summaryrefslogtreecommitdiffstats
path: root/gfx/skia
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--gfx/skia/LICENSE27
-rw-r--r--gfx/skia/README3
-rw-r--r--gfx/skia/README_COMMITTING10
-rw-r--r--gfx/skia/README_MOZILLA33
-rwxr-xr-xgfx/skia/generate_mozbuild.py416
-rw-r--r--gfx/skia/moz.build620
-rw-r--r--gfx/skia/patches/README2
-rw-r--r--gfx/skia/patches/archive/0001-Bug-687189-Implement-SkPaint-getPosTextPath.patch66
-rw-r--r--gfx/skia/patches/archive/0001-Bug-777614-Re-add-our-SkUserConfig.h-r-nrc.patch34
-rw-r--r--gfx/skia/patches/archive/0001-Bug-803063-Skia-cross-compilation-for-Windows-fails-.patch26
-rw-r--r--gfx/skia/patches/archive/0001-Bug-895086-Remove-unused-find_from_uniqueID-function.patch38
-rw-r--r--gfx/skia/patches/archive/0002-Bug-688366-Dont-invalidate-all-radial-gradients.patch30
-rw-r--r--gfx/skia/patches/archive/0002-Bug-848491-Re-apply-Bug-795549-Move-TileProc-functio.patch50
-rw-r--r--gfx/skia/patches/archive/0003-SkUserConfig-for-Mozilla.patch39
-rw-r--r--gfx/skia/patches/archive/0004-Bug-722011-Fix-trailing-commas-in-enums.patch280
-rw-r--r--gfx/skia/patches/archive/0004-Bug-777614-Re-apply-bug-719872-Fix-crash-on-Android-.patch684
-rw-r--r--gfx/skia/patches/archive/0005-Bug-731384-Fix-clang-SK_OVERRIDE.patch36
-rw-r--r--gfx/skia/patches/archive/0005-Bug-736276-Add-a-new-SkFontHost-that-takes-a-cairo_s.patch449
-rw-r--r--gfx/skia/patches/archive/0005-Bug-777614-Re-apply-bug-687188-Expand-the-gradient-c.patch198
-rw-r--r--gfx/skia/patches/archive/0006-Bug-751814-ARM-EDSP-ARMv6-Skia-fixes.patch147
-rw-r--r--gfx/skia/patches/archive/0006-Bug-848491-Re-apply-Bug-777614-Add-our-SkUserConfig..patch27
-rw-r--r--gfx/skia/patches/archive/0007-Bug-719872-Old-Android-FontHost.patch702
-rw-r--r--gfx/skia/patches/archive/0007-Bug-848491-Re-apply-bug-687188-Expand-the-gradient-c.patch168
-rw-r--r--gfx/skia/patches/archive/0008-Bug-687188-Skia-radial-gradients.patch173
-rw-r--r--gfx/skia/patches/archive/0008-Bug-848491-Re-apply-759683-Handle-compilers-that-don.patch35
-rw-r--r--gfx/skia/patches/archive/0009-Bug-755869-FreeBSD-Hurd.patch28
-rw-r--r--gfx/skia/patches/archive/0009-Bug-777614-Re-apply-759683-Handle-compilers-that-don.patch40
-rw-r--r--gfx/skia/patches/archive/0009-Bug-848491-Re-apply-bug-751418-Add-our-own-GrUserCon.patch23
-rw-r--r--gfx/skia/patches/archive/0010-Bug-689069-ARM-Opts.patch36
-rw-r--r--gfx/skia/patches/archive/0010-Bug-836892-Add-new-blending-modes-to-SkXfermode.patch698
-rw-r--r--gfx/skia/patches/archive/0010-Bug-848491-Re-apply-bug-817356-Patch-Skia-to-recogni.patch22
-rw-r--r--gfx/skia/patches/archive/0011-Bug-719575-Fix-clang-build.patch28
-rw-r--r--gfx/skia/patches/archive/0011-Bug-839347-no-anon-namespace-around-SkNO_RETURN_HINT.patch31
-rw-r--r--gfx/skia/patches/archive/0012-Bug-751418-Add-our-own-GrUserConfig-r-mattwoodrow.patch29
-rw-r--r--gfx/skia/patches/archive/0012-Bug-759683-make-ssse3-conditional.patch22
-rw-r--r--gfx/skia/patches/archive/0013-Bug-751418-Fix-compile-error-on-gcc-in-Skia-GL-r-mat.patch26
-rw-r--r--gfx/skia/patches/archive/0013-Bug-761890-fonts.patch162
-rw-r--r--gfx/skia/patches/archive/0014-Bug-765038-Fix-clang-build.patch29
-rw-r--r--gfx/skia/patches/archive/0015-Bug-766017-warnings.patch865
-rw-r--r--gfx/skia/patches/archive/0016-Bug-718849-Radial-gradients.patch400
-rw-r--r--gfx/skia/patches/archive/0017-Bug-740194-SkMemory-mozalloc.patch73
-rw-r--r--gfx/skia/patches/archive/0018-Bug-817356-PPC-defines.patch14
-rw-r--r--gfx/skia/patches/archive/0022-Bug-848491-Re-apply-bug-795538-Ensure-we-use-the-cor.patch39
-rw-r--r--gfx/skia/patches/archive/0023-Bug-890539-Fix-SK_COMPILE_ASSERT-build-warning.patch39
-rw-r--r--gfx/skia/patches/archive/0024-Bug-887318-fix-bgra-readback.patch217
-rw-r--r--gfx/skia/patches/archive/0025-Bug-896049-Add-default-Value-SK_OVERRIDE.patch26
-rw-r--r--gfx/skia/patches/archive/0026-Bug-901208-Fix-ARM-v4t.patch83
-rw-r--r--gfx/skia/patches/archive/0030-Bug-939629-Add-missing-include-guards.patch94
-rw-r--r--gfx/skia/patches/archive/0031-Bug-945588-Add-include-guard.patch39
-rw-r--r--gfx/skia/patches/archive/0032-Bug-974900-More-missing-include-guards.patch148
-rw-r--r--gfx/skia/patches/archive/0033-Bug-974900-undef-interface-windows.patch27
-rw-r--r--gfx/skia/patches/archive/SkPostConfig.patch32
-rw-r--r--gfx/skia/patches/archive/arm-fixes.patch191
-rw-r--r--gfx/skia/patches/archive/arm-opts.patch41
-rw-r--r--gfx/skia/patches/archive/fix-comma-end-enum-list.patch380
-rw-r--r--gfx/skia/patches/archive/fix-gradient-clamp.patch211
-rw-r--r--gfx/skia/patches/archive/getpostextpath.patch70
-rw-r--r--gfx/skia/patches/archive/mingw-fix.patch57
-rw-r--r--gfx/skia/patches/archive/new-aa.patch22
-rw-r--r--gfx/skia/patches/archive/old-android-fonthost.patch530
-rw-r--r--gfx/skia/patches/archive/radial-gradients.patch25
-rw-r--r--gfx/skia/patches/archive/skia_restrict_problem.patch461
-rw-r--r--gfx/skia/patches/archive/uninitialized-margin.patch22
-rw-r--r--gfx/skia/patches/archive/user-config.patch40
-rw-r--r--gfx/skia/skia/include/codec/SkAndroidCodec.h297
-rw-r--r--gfx/skia/skia/include/codec/SkCodec.h1015
-rw-r--r--gfx/skia/skia/include/codec/SkCodecAnimation.h61
-rw-r--r--gfx/skia/skia/include/codec/SkEncodedImageFormat.h36
-rw-r--r--gfx/skia/skia/include/codec/SkEncodedOrigin.h54
-rw-r--r--gfx/skia/skia/include/codec/SkPngChunkReader.h45
-rw-r--r--gfx/skia/skia/include/config/SkUserConfig.h152
-rw-r--r--gfx/skia/skia/include/core/SkAlphaType.h45
-rw-r--r--gfx/skia/skia/include/core/SkAnnotation.h52
-rw-r--r--gfx/skia/skia/include/core/SkBBHFactory.h63
-rw-r--r--gfx/skia/skia/include/core/SkBitmap.h1268
-rw-r--r--gfx/skia/skia/include/core/SkBlendMode.h112
-rw-r--r--gfx/skia/skia/include/core/SkBlender.h33
-rw-r--r--gfx/skia/skia/include/core/SkBlurTypes.h20
-rw-r--r--gfx/skia/skia/include/core/SkCanvas.h2632
-rw-r--r--gfx/skia/skia/include/core/SkCanvasVirtualEnforcer.h61
-rw-r--r--gfx/skia/skia/include/core/SkCapabilities.h44
-rw-r--r--gfx/skia/skia/include/core/SkClipOp.h19
-rw-r--r--gfx/skia/skia/include/core/SkColor.h447
-rw-r--r--gfx/skia/skia/include/core/SkColorFilter.h128
-rw-r--r--gfx/skia/skia/include/core/SkColorPriv.h167
-rw-r--r--gfx/skia/skia/include/core/SkColorSpace.h242
-rw-r--r--gfx/skia/skia/include/core/SkColorType.h67
-rw-r--r--gfx/skia/skia/include/core/SkContourMeasure.h131
-rw-r--r--gfx/skia/skia/include/core/SkCoverageMode.h28
-rw-r--r--gfx/skia/skia/include/core/SkCubicMap.h47
-rw-r--r--gfx/skia/skia/include/core/SkData.h191
-rw-r--r--gfx/skia/skia/include/core/SkDataTable.h122
-rw-r--r--gfx/skia/skia/include/core/SkDeferredDisplayList.h110
-rw-r--r--gfx/skia/skia/include/core/SkDeferredDisplayListRecorder.h97
-rw-r--r--gfx/skia/skia/include/core/SkDocument.h91
-rw-r--r--gfx/skia/skia/include/core/SkDrawLooper.h135
-rw-r--r--gfx/skia/skia/include/core/SkDrawable.h175
-rw-r--r--gfx/skia/skia/include/core/SkEncodedImageFormat.h9
-rw-r--r--gfx/skia/skia/include/core/SkExecutor.h41
-rw-r--r--gfx/skia/skia/include/core/SkFlattenable.h115
-rw-r--r--gfx/skia/skia/include/core/SkFont.h540
-rw-r--r--gfx/skia/skia/include/core/SkFontArguments.h94
-rw-r--r--gfx/skia/skia/include/core/SkFontMetrics.h139
-rw-r--r--gfx/skia/skia/include/core/SkFontMgr.h162
-rw-r--r--gfx/skia/skia/include/core/SkFontParameters.h42
-rw-r--r--gfx/skia/skia/include/core/SkFontStyle.h84
-rw-r--r--gfx/skia/skia/include/core/SkFontTypes.h25
-rw-r--r--gfx/skia/skia/include/core/SkGraphics.h169
-rw-r--r--gfx/skia/skia/include/core/SkICC.h9
-rw-r--r--gfx/skia/skia/include/core/SkImage.h1575
-rw-r--r--gfx/skia/skia/include/core/SkImageEncoder.h71
-rw-r--r--gfx/skia/skia/include/core/SkImageFilter.h114
-rw-r--r--gfx/skia/skia/include/core/SkImageGenerator.h231
-rw-r--r--gfx/skia/skia/include/core/SkImageInfo.h616
-rw-r--r--gfx/skia/skia/include/core/SkM44.h438
-rw-r--r--gfx/skia/skia/include/core/SkMallocPixelRef.h42
-rw-r--r--gfx/skia/skia/include/core/SkMaskFilter.h53
-rw-r--r--gfx/skia/skia/include/core/SkMatrix.h1996
-rw-r--r--gfx/skia/skia/include/core/SkMesh.h423
-rw-r--r--gfx/skia/skia/include/core/SkMilestone.h9
-rw-r--r--gfx/skia/skia/include/core/SkOpenTypeSVGDecoder.h30
-rw-r--r--gfx/skia/skia/include/core/SkOverdrawCanvas.h69
-rw-r--r--gfx/skia/skia/include/core/SkPaint.h695
-rw-r--r--gfx/skia/skia/include/core/SkPath.h1890
-rw-r--r--gfx/skia/skia/include/core/SkPathBuilder.h271
-rw-r--r--gfx/skia/skia/include/core/SkPathEffect.h106
-rw-r--r--gfx/skia/skia/include/core/SkPathMeasure.h88
-rw-r--r--gfx/skia/skia/include/core/SkPathTypes.h57
-rw-r--r--gfx/skia/skia/include/core/SkPathUtils.h42
-rw-r--r--gfx/skia/skia/include/core/SkPicture.h278
-rw-r--r--gfx/skia/skia/include/core/SkPictureRecorder.h115
-rw-r--r--gfx/skia/skia/include/core/SkPixelRef.h123
-rw-r--r--gfx/skia/skia/include/core/SkPixmap.h748
-rw-r--r--gfx/skia/skia/include/core/SkPoint.h568
-rw-r--r--gfx/skia/skia/include/core/SkPoint3.h157
-rw-r--r--gfx/skia/skia/include/core/SkPromiseImageTexture.h46
-rw-r--r--gfx/skia/skia/include/core/SkRRect.h516
-rw-r--r--gfx/skia/skia/include/core/SkRSXform.h69
-rw-r--r--gfx/skia/skia/include/core/SkRasterHandleAllocator.h94
-rw-r--r--gfx/skia/skia/include/core/SkRect.h1388
-rw-r--r--gfx/skia/skia/include/core/SkRefCnt.h389
-rw-r--r--gfx/skia/skia/include/core/SkRegion.h678
-rw-r--r--gfx/skia/skia/include/core/SkSamplingOptions.h105
-rw-r--r--gfx/skia/skia/include/core/SkScalar.h173
-rw-r--r--gfx/skia/skia/include/core/SkSerialProcs.h73
-rw-r--r--gfx/skia/skia/include/core/SkShader.h93
-rw-r--r--gfx/skia/skia/include/core/SkSize.h92
-rw-r--r--gfx/skia/skia/include/core/SkSpan.h13
-rw-r--r--gfx/skia/skia/include/core/SkStream.h523
-rw-r--r--gfx/skia/skia/include/core/SkString.h291
-rw-r--r--gfx/skia/skia/include/core/SkStrokeRec.h154
-rw-r--r--gfx/skia/skia/include/core/SkSurface.h1199
-rw-r--r--gfx/skia/skia/include/core/SkSurfaceCharacterization.h263
-rw-r--r--gfx/skia/skia/include/core/SkSurfaceProps.h93
-rw-r--r--gfx/skia/skia/include/core/SkSwizzle.h19
-rw-r--r--gfx/skia/skia/include/core/SkTextBlob.h506
-rw-r--r--gfx/skia/skia/include/core/SkTextureCompressionType.h30
-rw-r--r--gfx/skia/skia/include/core/SkTileMode.h41
-rw-r--r--gfx/skia/skia/include/core/SkTime.h63
-rw-r--r--gfx/skia/skia/include/core/SkTraceMemoryDump.h99
-rw-r--r--gfx/skia/skia/include/core/SkTypeface.h483
-rw-r--r--gfx/skia/skia/include/core/SkTypes.h197
-rw-r--r--gfx/skia/skia/include/core/SkUnPreMultiply.h56
-rw-r--r--gfx/skia/skia/include/core/SkVertices.h134
-rw-r--r--gfx/skia/skia/include/core/SkYUVAInfo.h304
-rw-r--r--gfx/skia/skia/include/core/SkYUVAPixmaps.h336
-rw-r--r--gfx/skia/skia/include/docs/SkPDFDocument.h202
-rw-r--r--gfx/skia/skia/include/docs/SkXPSDocument.h27
-rw-r--r--gfx/skia/skia/include/effects/Sk1DPathEffect.h40
-rw-r--r--gfx/skia/skia/include/effects/Sk2DPathEffect.h33
-rw-r--r--gfx/skia/skia/include/effects/SkBlenders.h27
-rw-r--r--gfx/skia/skia/include/effects/SkBlurDrawLooper.h26
-rw-r--r--gfx/skia/skia/include/effects/SkBlurMaskFilter.h35
-rw-r--r--gfx/skia/skia/include/effects/SkColorMatrix.h57
-rw-r--r--gfx/skia/skia/include/effects/SkColorMatrixFilter.h22
-rw-r--r--gfx/skia/skia/include/effects/SkCornerPathEffect.h32
-rw-r--r--gfx/skia/skia/include/effects/SkDashPathEffect.h43
-rw-r--r--gfx/skia/skia/include/effects/SkDiscretePathEffect.h37
-rw-r--r--gfx/skia/skia/include/effects/SkGradientShader.h342
-rw-r--r--gfx/skia/skia/include/effects/SkHighContrastFilter.h84
-rw-r--r--gfx/skia/skia/include/effects/SkImageFilters.h541
-rw-r--r--gfx/skia/skia/include/effects/SkLayerDrawLooper.h161
-rw-r--r--gfx/skia/skia/include/effects/SkLumaColorFilter.h37
-rw-r--r--gfx/skia/skia/include/effects/SkOpPathEffect.h43
-rw-r--r--gfx/skia/skia/include/effects/SkOverdrawColorFilter.h32
-rw-r--r--gfx/skia/skia/include/effects/SkPerlinNoiseShader.h54
-rw-r--r--gfx/skia/skia/include/effects/SkRuntimeEffect.h541
-rw-r--r--gfx/skia/skia/include/effects/SkShaderMaskFilter.h26
-rw-r--r--gfx/skia/skia/include/effects/SkStrokeAndFillPathEffect.h28
-rw-r--r--gfx/skia/skia/include/effects/SkTableColorFilter.h29
-rw-r--r--gfx/skia/skia/include/effects/SkTableMaskFilter.h41
-rw-r--r--gfx/skia/skia/include/effects/SkTrimPathEffect.h45
-rw-r--r--gfx/skia/skia/include/encode/SkEncoder.h63
-rw-r--r--gfx/skia/skia/include/encode/SkICC.h36
-rw-r--r--gfx/skia/skia/include/encode/SkJpegEncoder.h137
-rw-r--r--gfx/skia/skia/include/encode/SkPngEncoder.h115
-rw-r--r--gfx/skia/skia/include/encode/SkWebpEncoder.h78
-rw-r--r--gfx/skia/skia/include/gpu/GpuTypes.h72
-rw-r--r--gfx/skia/skia/include/gpu/GrBackendDrawableInfo.h44
-rw-r--r--gfx/skia/skia/include/gpu/GrBackendSemaphore.h140
-rw-r--r--gfx/skia/skia/include/gpu/GrBackendSurface.h666
-rw-r--r--gfx/skia/skia/include/gpu/GrBackendSurfaceMutableState.h26
-rw-r--r--gfx/skia/skia/include/gpu/GrContextOptions.h374
-rw-r--r--gfx/skia/skia/include/gpu/GrContextThreadSafeProxy.h169
-rw-r--r--gfx/skia/skia/include/gpu/GrDirectContext.h908
-rw-r--r--gfx/skia/skia/include/gpu/GrDriverBugWorkarounds.h53
-rw-r--r--gfx/skia/skia/include/gpu/GrDriverBugWorkaroundsAutogen.h43
-rw-r--r--gfx/skia/skia/include/gpu/GrRecordingContext.h286
-rw-r--r--gfx/skia/skia/include/gpu/GrSurfaceInfo.h166
-rw-r--r--gfx/skia/skia/include/gpu/GrTypes.h244
-rw-r--r--gfx/skia/skia/include/gpu/GrYUVABackendTextures.h124
-rw-r--r--gfx/skia/skia/include/gpu/MutableTextureState.h122
-rw-r--r--gfx/skia/skia/include/gpu/ShaderErrorHandler.h36
-rw-r--r--gfx/skia/skia/include/gpu/d3d/GrD3DBackendContext.h35
-rw-r--r--gfx/skia/skia/include/gpu/d3d/GrD3DTypes.h248
-rw-r--r--gfx/skia/skia/include/gpu/dawn/GrDawnTypes.h95
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLAssembleHelpers.h11
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLAssembleInterface.h39
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLConfig.h79
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLConfig_chrome.h14
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLExtensions.h78
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLFunctions.h307
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLInterface.h340
-rw-r--r--gfx/skia/skia/include/gpu/gl/GrGLTypes.h208
-rw-r--r--gfx/skia/skia/include/gpu/gl/egl/GrGLMakeEGLInterface.h14
-rw-r--r--gfx/skia/skia/include/gpu/gl/glx/GrGLMakeGLXInterface.h14
-rw-r--r--gfx/skia/skia/include/gpu/graphite/BackendTexture.h153
-rw-r--r--gfx/skia/skia/include/gpu/graphite/Context.h166
-rw-r--r--gfx/skia/skia/include/gpu/graphite/ContextOptions.h87
-rw-r--r--gfx/skia/skia/include/gpu/graphite/GraphiteTypes.h105
-rw-r--r--gfx/skia/skia/include/gpu/graphite/ImageProvider.h60
-rw-r--r--gfx/skia/skia/include/gpu/graphite/Recorder.h212
-rw-r--r--gfx/skia/skia/include/gpu/graphite/Recording.h96
-rw-r--r--gfx/skia/skia/include/gpu/graphite/TextureInfo.h162
-rw-r--r--gfx/skia/skia/include/gpu/graphite/YUVABackendTextures.h139
-rw-r--r--gfx/skia/skia/include/gpu/graphite/dawn/DawnBackendContext.h25
-rw-r--r--gfx/skia/skia/include/gpu/graphite/dawn/DawnTypes.h40
-rw-r--r--gfx/skia/skia/include/gpu/graphite/dawn/DawnUtils.h26
-rw-r--r--gfx/skia/skia/include/gpu/graphite/mtl/MtlBackendContext.h25
-rw-r--r--gfx/skia/skia/include/gpu/graphite/mtl/MtlGraphiteTypes.h69
-rw-r--r--gfx/skia/skia/include/gpu/graphite/mtl/MtlGraphiteUtils.h25
-rw-r--r--gfx/skia/skia/include/gpu/graphite/vk/VulkanGraphiteTypes.h64
-rw-r--r--gfx/skia/skia/include/gpu/graphite/vk/VulkanGraphiteUtils.h26
-rw-r--r--gfx/skia/skia/include/gpu/mock/GrMockTypes.h146
-rw-r--r--gfx/skia/skia/include/gpu/mtl/GrMtlBackendContext.h21
-rw-r--r--gfx/skia/skia/include/gpu/mtl/GrMtlTypes.h63
-rw-r--r--gfx/skia/skia/include/gpu/mtl/MtlMemoryAllocator.h39
-rw-r--r--gfx/skia/skia/include/gpu/vk/GrVkBackendContext.h78
-rw-r--r--gfx/skia/skia/include/gpu/vk/GrVkExtensions.h15
-rw-r--r--gfx/skia/skia/include/gpu/vk/GrVkMemoryAllocator.h15
-rw-r--r--gfx/skia/skia/include/gpu/vk/GrVkTypes.h149
-rw-r--r--gfx/skia/skia/include/gpu/vk/VulkanBackendContext.h46
-rw-r--r--gfx/skia/skia/include/gpu/vk/VulkanExtensions.h67
-rw-r--r--gfx/skia/skia/include/gpu/vk/VulkanMemoryAllocator.h114
-rw-r--r--gfx/skia/skia/include/gpu/vk/VulkanTypes.h59
-rw-r--r--gfx/skia/skia/include/pathops/SkPathOps.h113
-rw-r--r--gfx/skia/skia/include/ports/SkCFObject.h180
-rw-r--r--gfx/skia/skia/include/ports/SkFontConfigInterface.h115
-rw-r--r--gfx/skia/skia/include/ports/SkFontMgr_FontConfigInterface.h20
-rw-r--r--gfx/skia/skia/include/ports/SkFontMgr_android.h45
-rw-r--r--gfx/skia/skia/include/ports/SkFontMgr_data.h22
-rw-r--r--gfx/skia/skia/include/ports/SkFontMgr_directory.h21
-rw-r--r--gfx/skia/skia/include/ports/SkFontMgr_empty.h21
-rw-r--r--gfx/skia/skia/include/ports/SkFontMgr_fontconfig.h22
-rw-r--r--gfx/skia/skia/include/ports/SkFontMgr_fuchsia.h19
-rw-r--r--gfx/skia/skia/include/ports/SkFontMgr_indirect.h102
-rw-r--r--gfx/skia/skia/include/ports/SkFontMgr_mac_ct.h27
-rw-r--r--gfx/skia/skia/include/ports/SkImageGeneratorCG.h20
-rw-r--r--gfx/skia/skia/include/ports/SkImageGeneratorNDK.h40
-rw-r--r--gfx/skia/skia/include/ports/SkImageGeneratorWIC.h35
-rw-r--r--gfx/skia/skia/include/ports/SkRemotableFontMgr.h139
-rw-r--r--gfx/skia/skia/include/ports/SkTypeface_cairo.h17
-rw-r--r--gfx/skia/skia/include/ports/SkTypeface_mac.h44
-rw-r--r--gfx/skia/skia/include/ports/SkTypeface_win.h93
-rw-r--r--gfx/skia/skia/include/private/SkBitmaskEnum.h59
-rw-r--r--gfx/skia/skia/include/private/SkChecksum.h81
-rw-r--r--gfx/skia/skia/include/private/SkColorData.h386
-rw-r--r--gfx/skia/skia/include/private/SkEncodedInfo.h272
-rw-r--r--gfx/skia/skia/include/private/SkGainmapInfo.h92
-rw-r--r--gfx/skia/skia/include/private/SkGainmapShader.h53
-rw-r--r--gfx/skia/skia/include/private/SkIDChangeListener.h76
-rw-r--r--gfx/skia/skia/include/private/SkJpegGainmapEncoder.h71
-rw-r--r--gfx/skia/skia/include/private/SkOpts_spi.h23
-rw-r--r--gfx/skia/skia/include/private/SkPathRef.h539
-rw-r--r--gfx/skia/skia/include/private/SkSLDefines.h64
-rw-r--r--gfx/skia/skia/include/private/SkSLIRNode.h145
-rw-r--r--gfx/skia/skia/include/private/SkSLLayout.h93
-rw-r--r--gfx/skia/skia/include/private/SkSLModifiers.h178
-rw-r--r--gfx/skia/skia/include/private/SkSLProgramElement.h41
-rw-r--r--gfx/skia/skia/include/private/SkSLProgramKind.h36
-rw-r--r--gfx/skia/skia/include/private/SkSLSampleUsage.h85
-rw-r--r--gfx/skia/skia/include/private/SkSLStatement.h44
-rw-r--r--gfx/skia/skia/include/private/SkSLString.h59
-rw-r--r--gfx/skia/skia/include/private/SkSLSymbol.h63
-rw-r--r--gfx/skia/skia/include/private/SkShadowFlags.h27
-rw-r--r--gfx/skia/skia/include/private/SkSpinlock.h57
-rw-r--r--gfx/skia/skia/include/private/SkWeakRefCnt.h173
-rw-r--r--gfx/skia/skia/include/private/base/README.md4
-rw-r--r--gfx/skia/skia/include/private/base/SingleOwner.h75
-rw-r--r--gfx/skia/skia/include/private/base/SkAPI.h52
-rw-r--r--gfx/skia/skia/include/private/base/SkAlign.h39
-rw-r--r--gfx/skia/skia/include/private/base/SkAlignedStorage.h32
-rw-r--r--gfx/skia/skia/include/private/base/SkAssert.h93
-rw-r--r--gfx/skia/skia/include/private/base/SkAttributes.h89
-rw-r--r--gfx/skia/skia/include/private/base/SkCPUTypes.h25
-rw-r--r--gfx/skia/skia/include/private/base/SkContainers.h46
-rw-r--r--gfx/skia/skia/include/private/base/SkDebug.h27
-rw-r--r--gfx/skia/skia/include/private/base/SkDeque.h143
-rw-r--r--gfx/skia/skia/include/private/base/SkFeatures.h151
-rw-r--r--gfx/skia/skia/include/private/base/SkFixed.h143
-rw-r--r--gfx/skia/skia/include/private/base/SkFloatBits.h90
-rw-r--r--gfx/skia/skia/include/private/base/SkFloatingPoint.h247
-rw-r--r--gfx/skia/skia/include/private/base/SkLoadUserConfig.h58
-rw-r--r--gfx/skia/skia/include/private/base/SkMacros.h107
-rw-r--r--gfx/skia/skia/include/private/base/SkMalloc.h144
-rw-r--r--gfx/skia/skia/include/private/base/SkMath.h77
-rw-r--r--gfx/skia/skia/include/private/base/SkMutex.h64
-rw-r--r--gfx/skia/skia/include/private/base/SkNoncopyable.h30
-rw-r--r--gfx/skia/skia/include/private/base/SkOnce.h55
-rw-r--r--gfx/skia/skia/include/private/base/SkPathEnums.h25
-rw-r--r--gfx/skia/skia/include/private/base/SkSafe32.h49
-rw-r--r--gfx/skia/skia/include/private/base/SkSemaphore.h84
-rw-r--r--gfx/skia/skia/include/private/base/SkSpan_impl.h129
-rw-r--r--gfx/skia/skia/include/private/base/SkTArray.h696
-rw-r--r--gfx/skia/skia/include/private/base/SkTDArray.h236
-rw-r--r--gfx/skia/skia/include/private/base/SkTFitsIn.h105
-rw-r--r--gfx/skia/skia/include/private/base/SkTLogic.h56
-rw-r--r--gfx/skia/skia/include/private/base/SkTPin.h23
-rw-r--r--gfx/skia/skia/include/private/base/SkTemplates.h426
-rw-r--r--gfx/skia/skia/include/private/base/SkThreadAnnotations.h91
-rw-r--r--gfx/skia/skia/include/private/base/SkThreadID.h23
-rw-r--r--gfx/skia/skia/include/private/base/SkTo.h39
-rw-r--r--gfx/skia/skia/include/private/base/SkTypeTraits.h33
-rw-r--r--gfx/skia/skia/include/private/chromium/GrSlug.h16
-rw-r--r--gfx/skia/skia/include/private/chromium/GrVkSecondaryCBDrawContext.h130
-rw-r--r--gfx/skia/skia/include/private/chromium/SkChromeRemoteGlyphCache.h148
-rw-r--r--gfx/skia/skia/include/private/chromium/SkDiscardableMemory.h70
-rw-r--r--gfx/skia/skia/include/private/chromium/Slug.h67
-rw-r--r--gfx/skia/skia/include/private/gpu/ganesh/GrContext_Base.h100
-rw-r--r--gfx/skia/skia/include/private/gpu/ganesh/GrD3DTypesMinimal.h74
-rw-r--r--gfx/skia/skia/include/private/gpu/ganesh/GrDawnTypesPriv.h26
-rw-r--r--gfx/skia/skia/include/private/gpu/ganesh/GrGLTypesPriv.h108
-rw-r--r--gfx/skia/skia/include/private/gpu/ganesh/GrImageContext.h55
-rw-r--r--gfx/skia/skia/include/private/gpu/ganesh/GrMockTypesPriv.h32
-rw-r--r--gfx/skia/skia/include/private/gpu/ganesh/GrMtlTypesPriv.h75
-rw-r--r--gfx/skia/skia/include/private/gpu/ganesh/GrTypesPriv.h1042
-rw-r--r--gfx/skia/skia/include/private/gpu/ganesh/GrVkTypesPriv.h73
-rw-r--r--gfx/skia/skia/include/private/gpu/graphite/DawnTypesPriv.h38
-rw-r--r--gfx/skia/skia/include/private/gpu/graphite/MtlGraphiteTypesPriv.h74
-rw-r--r--gfx/skia/skia/include/private/gpu/graphite/VulkanGraphiteTypesPriv.h55
-rw-r--r--gfx/skia/skia/include/private/gpu/vk/SkiaVulkan.h36
-rw-r--r--gfx/skia/skia/include/private/gpu/vk/VulkanTypesPriv.h57
-rw-r--r--gfx/skia/skia/include/sksl/DSL.h37
-rw-r--r--gfx/skia/skia/include/sksl/DSLBlock.h58
-rw-r--r--gfx/skia/skia/include/sksl/DSLCase.h62
-rw-r--r--gfx/skia/skia/include/sksl/DSLCore.h468
-rw-r--r--gfx/skia/skia/include/sksl/DSLExpression.h241
-rw-r--r--gfx/skia/skia/include/sksl/DSLFunction.h114
-rw-r--r--gfx/skia/skia/include/sksl/DSLLayout.h118
-rw-r--r--gfx/skia/skia/include/sksl/DSLModifiers.h72
-rw-r--r--gfx/skia/skia/include/sksl/DSLStatement.h82
-rw-r--r--gfx/skia/skia/include/sksl/DSLType.h297
-rw-r--r--gfx/skia/skia/include/sksl/DSLVar.h231
-rw-r--r--gfx/skia/skia/include/sksl/SkSLDebugTrace.h28
-rw-r--r--gfx/skia/skia/include/sksl/SkSLErrorReporter.h65
-rw-r--r--gfx/skia/skia/include/sksl/SkSLOperator.h154
-rw-r--r--gfx/skia/skia/include/sksl/SkSLPosition.h104
-rw-r--r--gfx/skia/skia/include/sksl/SkSLVersion.h27
-rw-r--r--gfx/skia/skia/include/utils/SkAnimCodecPlayer.h67
-rw-r--r--gfx/skia/skia/include/utils/SkBase64.h53
-rw-r--r--gfx/skia/skia/include/utils/SkCamera.h109
-rw-r--r--gfx/skia/skia/include/utils/SkCanvasStateUtils.h81
-rw-r--r--gfx/skia/skia/include/utils/SkCustomTypeface.h69
-rw-r--r--gfx/skia/skia/include/utils/SkEventTracer.h90
-rw-r--r--gfx/skia/skia/include/utils/SkNWayCanvas.h133
-rw-r--r--gfx/skia/skia/include/utils/SkNoDrawCanvas.h80
-rw-r--r--gfx/skia/skia/include/utils/SkNullCanvas.h22
-rw-r--r--gfx/skia/skia/include/utils/SkOrderedFontMgr.h65
-rw-r--r--gfx/skia/skia/include/utils/SkPaintFilterCanvas.h141
-rw-r--r--gfx/skia/skia/include/utils/SkParse.h37
-rw-r--r--gfx/skia/skia/include/utils/SkParsePath.h25
-rw-r--r--gfx/skia/skia/include/utils/SkShadowUtils.h88
-rw-r--r--gfx/skia/skia/include/utils/SkTextUtils.h43
-rw-r--r--gfx/skia/skia/include/utils/SkTraceEventPhase.h19
-rw-r--r--gfx/skia/skia/include/utils/mac/SkCGUtils.h78
-rw-r--r--gfx/skia/skia/modules/skcms/README.chromium5
-rw-r--r--gfx/skia/skia/modules/skcms/skcms.cc3064
-rw-r--r--gfx/skia/skia/modules/skcms/skcms.gni20
-rw-r--r--gfx/skia/skia/modules/skcms/skcms.h418
-rw-r--r--gfx/skia/skia/modules/skcms/skcms_internal.h56
-rw-r--r--gfx/skia/skia/modules/skcms/src/Transform_inl.h1628
-rwxr-xr-xgfx/skia/skia/modules/skcms/version.sha11
-rw-r--r--gfx/skia/skia/src/base/README.md4
-rw-r--r--gfx/skia/skia/src/base/SkASAN.h65
-rw-r--r--gfx/skia/skia/src/base/SkArenaAlloc.cpp173
-rw-r--r--gfx/skia/skia/src/base/SkArenaAlloc.h336
-rw-r--r--gfx/skia/skia/src/base/SkArenaAllocList.h82
-rw-r--r--gfx/skia/skia/src/base/SkAutoMalloc.h178
-rw-r--r--gfx/skia/skia/src/base/SkBezierCurves.cpp111
-rw-r--r--gfx/skia/skia/src/base/SkBezierCurves.h63
-rw-r--r--gfx/skia/skia/src/base/SkBlockAllocator.cpp302
-rw-r--r--gfx/skia/skia/src/base/SkBlockAllocator.h754
-rw-r--r--gfx/skia/skia/src/base/SkBuffer.cpp90
-rw-r--r--gfx/skia/skia/src/base/SkBuffer.h134
-rw-r--r--gfx/skia/skia/src/base/SkContainers.cpp107
-rw-r--r--gfx/skia/skia/src/base/SkCubics.cpp241
-rw-r--r--gfx/skia/skia/src/base/SkCubics.h61
-rw-r--r--gfx/skia/skia/src/base/SkDeque.cpp310
-rw-r--r--gfx/skia/skia/src/base/SkEndian.h197
-rw-r--r--gfx/skia/skia/src/base/SkFloatingPoint.cpp51
-rw-r--r--gfx/skia/skia/src/base/SkHalf.cpp97
-rw-r--r--gfx/skia/skia/src/base/SkHalf.h37
-rw-r--r--gfx/skia/skia/src/base/SkLeanWindows.h35
-rw-r--r--gfx/skia/skia/src/base/SkMSAN.h43
-rw-r--r--gfx/skia/skia/src/base/SkMalloc.cpp22
-rw-r--r--gfx/skia/skia/src/base/SkMathPriv.cpp73
-rw-r--r--gfx/skia/skia/src/base/SkMathPriv.h346
-rw-r--r--gfx/skia/skia/src/base/SkQuads.cpp69
-rw-r--r--gfx/skia/skia/src/base/SkQuads.h36
-rw-r--r--gfx/skia/skia/src/base/SkRandom.h173
-rw-r--r--gfx/skia/skia/src/base/SkRectMemcpy.h32
-rw-r--r--gfx/skia/skia/src/base/SkSafeMath.cpp20
-rw-r--r--gfx/skia/skia/src/base/SkSafeMath.h113
-rw-r--r--gfx/skia/skia/src/base/SkScopeExit.h59
-rw-r--r--gfx/skia/skia/src/base/SkSemaphore.cpp83
-rw-r--r--gfx/skia/skia/src/base/SkStringView.h51
-rw-r--r--gfx/skia/skia/src/base/SkTBlockList.h448
-rw-r--r--gfx/skia/skia/src/base/SkTDArray.cpp240
-rw-r--r--gfx/skia/skia/src/base/SkTDPQueue.h222
-rw-r--r--gfx/skia/skia/src/base/SkTInternalLList.h304
-rw-r--r--gfx/skia/skia/src/base/SkTLazy.h208
-rw-r--r--gfx/skia/skia/src/base/SkTSearch.cpp117
-rw-r--r--gfx/skia/skia/src/base/SkTSearch.h132
-rw-r--r--gfx/skia/skia/src/base/SkTSort.h214
-rw-r--r--gfx/skia/skia/src/base/SkThreadID.cpp16
-rw-r--r--gfx/skia/skia/src/base/SkUTF.cpp316
-rw-r--r--gfx/skia/skia/src/base/SkUTF.h95
-rw-r--r--gfx/skia/skia/src/base/SkUtils.cpp13
-rw-r--r--gfx/skia/skia/src/base/SkUtils.h55
-rw-r--r--gfx/skia/skia/src/base/SkVx.h1183
-rw-r--r--gfx/skia/skia/src/base/SkZip.h215
-rw-r--r--gfx/skia/skia/src/codec/SkCodec.cpp972
-rw-r--r--gfx/skia/skia/src/codec/SkCodecImageGenerator.cpp110
-rw-r--r--gfx/skia/skia/src/codec/SkCodecImageGenerator.h128
-rw-r--r--gfx/skia/skia/src/codec/SkCodecPriv.h259
-rw-r--r--gfx/skia/skia/src/codec/SkColorTable.cpp25
-rw-r--r--gfx/skia/skia/src/codec/SkColorTable.h51
-rw-r--r--gfx/skia/skia/src/codec/SkEncodedInfo.cpp30
-rw-r--r--gfx/skia/skia/src/codec/SkFrameHolder.h206
-rw-r--r--gfx/skia/skia/src/codec/SkMaskSwizzler.cpp575
-rw-r--r--gfx/skia/skia/src/codec/SkMaskSwizzler.h76
-rw-r--r--gfx/skia/skia/src/codec/SkMasks.cpp153
-rw-r--r--gfx/skia/skia/src/codec/SkMasks.h61
-rw-r--r--gfx/skia/skia/src/codec/SkParseEncodedOrigin.cpp77
-rw-r--r--gfx/skia/skia/src/codec/SkParseEncodedOrigin.h19
-rw-r--r--gfx/skia/skia/src/codec/SkPixmapUtils.cpp69
-rw-r--r--gfx/skia/skia/src/codec/SkPixmapUtils.h61
-rw-r--r--gfx/skia/skia/src/codec/SkSampler.cpp71
-rw-r--r--gfx/skia/skia/src/codec/SkSampler.h89
-rw-r--r--gfx/skia/skia/src/codec/SkSwizzler.cpp1250
-rw-r--r--gfx/skia/skia/src/codec/SkSwizzler.h230
-rw-r--r--gfx/skia/skia/src/core/Sk4px.h249
-rw-r--r--gfx/skia/skia/src/core/SkAAClip.cpp1968
-rw-r--r--gfx/skia/skia/src/core/SkAAClip.h123
-rw-r--r--gfx/skia/skia/src/core/SkATrace.cpp87
-rw-r--r--gfx/skia/skia/src/core/SkATrace.h59
-rw-r--r--gfx/skia/skia/src/core/SkAdvancedTypefaceMetrics.h74
-rw-r--r--gfx/skia/skia/src/core/SkAlphaRuns.cpp78
-rw-r--r--gfx/skia/skia/src/core/SkAnalyticEdge.cpp438
-rw-r--r--gfx/skia/skia/src/core/SkAnalyticEdge.h141
-rw-r--r--gfx/skia/skia/src/core/SkAnnotation.cpp48
-rw-r--r--gfx/skia/skia/src/core/SkAnnotationKeys.h33
-rw-r--r--gfx/skia/skia/src/core/SkAntiRun.h196
-rw-r--r--gfx/skia/skia/src/core/SkAutoBlitterChoose.h57
-rw-r--r--gfx/skia/skia/src/core/SkAutoPixmapStorage.cpp82
-rw-r--r--gfx/skia/skia/src/core/SkAutoPixmapStorage.h92
-rw-r--r--gfx/skia/skia/src/core/SkBBHFactory.cpp18
-rw-r--r--gfx/skia/skia/src/core/SkBigPicture.cpp91
-rw-r--r--gfx/skia/skia/src/core/SkBigPicture.h74
-rw-r--r--gfx/skia/skia/src/core/SkBitmap.cpp671
-rw-r--r--gfx/skia/skia/src/core/SkBitmapCache.cpp300
-rw-r--r--gfx/skia/skia/src/core/SkBitmapCache.h67
-rw-r--r--gfx/skia/skia/src/core/SkBitmapDevice.cpp705
-rw-r--r--gfx/skia/skia/src/core/SkBitmapDevice.h167
-rw-r--r--gfx/skia/skia/src/core/SkBitmapProcState.cpp694
-rw-r--r--gfx/skia/skia/src/core/SkBitmapProcState.h209
-rw-r--r--gfx/skia/skia/src/core/SkBitmapProcState_matrixProcs.cpp541
-rw-r--r--gfx/skia/skia/src/core/SkBlendMode.cpp157
-rw-r--r--gfx/skia/skia/src/core/SkBlendModeBlender.cpp118
-rw-r--r--gfx/skia/skia/src/core/SkBlendModeBlender.h42
-rw-r--r--gfx/skia/skia/src/core/SkBlendModePriv.h40
-rw-r--r--gfx/skia/skia/src/core/SkBlenderBase.h107
-rw-r--r--gfx/skia/skia/src/core/SkBlitBWMaskTemplate.h127
-rw-r--r--gfx/skia/skia/src/core/SkBlitRow.h38
-rw-r--r--gfx/skia/skia/src/core/SkBlitRow_D32.cpp313
-rw-r--r--gfx/skia/skia/src/core/SkBlitter.cpp898
-rw-r--r--gfx/skia/skia/src/core/SkBlitter.h300
-rw-r--r--gfx/skia/skia/src/core/SkBlitter_A8.cpp313
-rw-r--r--gfx/skia/skia/src/core/SkBlitter_A8.h43
-rw-r--r--gfx/skia/skia/src/core/SkBlitter_ARGB32.cpp1420
-rw-r--r--gfx/skia/skia/src/core/SkBlitter_Sprite.cpp228
-rw-r--r--gfx/skia/skia/src/core/SkBlurMF.cpp1680
-rw-r--r--gfx/skia/skia/src/core/SkBlurMask.cpp661
-rw-r--r--gfx/skia/skia/src/core/SkBlurMask.h87
-rw-r--r--gfx/skia/skia/src/core/SkCachedData.cpp177
-rw-r--r--gfx/skia/skia/src/core/SkCachedData.h113
-rw-r--r--gfx/skia/skia/src/core/SkCanvas.cpp3087
-rw-r--r--gfx/skia/skia/src/core/SkCanvasPriv.cpp156
-rw-r--r--gfx/skia/skia/src/core/SkCanvasPriv.h116
-rw-r--r--gfx/skia/skia/src/core/SkCanvas_Raster.cpp54
-rw-r--r--gfx/skia/skia/src/core/SkCapabilities.cpp30
-rw-r--r--gfx/skia/skia/src/core/SkChromeRemoteGlyphCache.cpp1271
-rw-r--r--gfx/skia/skia/src/core/SkClipStack.cpp999
-rw-r--r--gfx/skia/skia/src/core/SkClipStack.h507
-rw-r--r--gfx/skia/skia/src/core/SkClipStackDevice.cpp124
-rw-r--r--gfx/skia/skia/src/core/SkClipStackDevice.h49
-rw-r--r--gfx/skia/skia/src/core/SkColor.cpp170
-rw-r--r--gfx/skia/skia/src/core/SkColorFilter.cpp633
-rw-r--r--gfx/skia/skia/src/core/SkColorFilterBase.h141
-rw-r--r--gfx/skia/skia/src/core/SkColorFilterPriv.h34
-rw-r--r--gfx/skia/skia/src/core/SkColorFilter_Matrix.cpp256
-rw-r--r--gfx/skia/skia/src/core/SkColorSpace.cpp411
-rw-r--r--gfx/skia/skia/src/core/SkColorSpacePriv.h86
-rw-r--r--gfx/skia/skia/src/core/SkColorSpaceXformSteps.cpp227
-rw-r--r--gfx/skia/skia/src/core/SkColorSpaceXformSteps.h57
-rw-r--r--gfx/skia/skia/src/core/SkCompressedDataUtils.cpp306
-rw-r--r--gfx/skia/skia/src/core/SkCompressedDataUtils.h51
-rw-r--r--gfx/skia/skia/src/core/SkContourMeasure.cpp673
-rw-r--r--gfx/skia/skia/src/core/SkConvertPixels.cpp253
-rw-r--r--gfx/skia/skia/src/core/SkConvertPixels.h21
-rw-r--r--gfx/skia/skia/src/core/SkCoreBlitters.h145
-rw-r--r--gfx/skia/skia/src/core/SkCpu.cpp161
-rw-r--r--gfx/skia/skia/src/core/SkCpu.h121
-rw-r--r--gfx/skia/skia/src/core/SkCubicClipper.cpp156
-rw-r--r--gfx/skia/skia/src/core/SkCubicClipper.h37
-rw-r--r--gfx/skia/skia/src/core/SkCubicMap.cpp81
-rw-r--r--gfx/skia/skia/src/core/SkCubicSolver.h60
-rw-r--r--gfx/skia/skia/src/core/SkData.cpp219
-rw-r--r--gfx/skia/skia/src/core/SkDataTable.cpp136
-rw-r--r--gfx/skia/skia/src/core/SkDebug.cpp14
-rw-r--r--gfx/skia/skia/src/core/SkDebugUtils.h23
-rw-r--r--gfx/skia/skia/src/core/SkDeferredDisplayList.cpp75
-rw-r--r--gfx/skia/skia/src/core/SkDeferredDisplayListPriv.h63
-rw-r--r--gfx/skia/skia/src/core/SkDeferredDisplayListRecorder.cpp260
-rw-r--r--gfx/skia/skia/src/core/SkDescriptor.cpp231
-rw-r--r--gfx/skia/skia/src/core/SkDescriptor.h112
-rw-r--r--gfx/skia/skia/src/core/SkDevice.cpp637
-rw-r--r--gfx/skia/skia/src/core/SkDevice.h637
-rw-r--r--gfx/skia/skia/src/core/SkDistanceFieldGen.cpp567
-rw-r--r--gfx/skia/skia/src/core/SkDistanceFieldGen.h81
-rw-r--r--gfx/skia/skia/src/core/SkDocument.cpp78
-rw-r--r--gfx/skia/skia/src/core/SkDraw.cpp616
-rw-r--r--gfx/skia/skia/src/core/SkDraw.h79
-rw-r--r--gfx/skia/skia/src/core/SkDrawBase.cpp776
-rw-r--r--gfx/skia/skia/src/core/SkDrawBase.h166
-rw-r--r--gfx/skia/skia/src/core/SkDrawLooper.cpp110
-rw-r--r--gfx/skia/skia/src/core/SkDrawProcs.h43
-rw-r--r--gfx/skia/skia/src/core/SkDrawShadowInfo.cpp217
-rw-r--r--gfx/skia/skia/src/core/SkDrawShadowInfo.h96
-rw-r--r--gfx/skia/skia/src/core/SkDraw_atlas.cpp237
-rw-r--r--gfx/skia/skia/src/core/SkDraw_text.cpp143
-rw-r--r--gfx/skia/skia/src/core/SkDraw_vertices.cpp551
-rw-r--r--gfx/skia/skia/src/core/SkDrawable.cpp99
-rw-r--r--gfx/skia/skia/src/core/SkEdge.cpp524
-rw-r--r--gfx/skia/skia/src/core/SkEdge.h137
-rw-r--r--gfx/skia/skia/src/core/SkEdgeBuilder.cpp394
-rw-r--r--gfx/skia/skia/src/core/SkEdgeBuilder.h92
-rw-r--r--gfx/skia/skia/src/core/SkEdgeClipper.cpp604
-rw-r--r--gfx/skia/skia/src/core/SkEdgeClipper.h69
-rw-r--r--gfx/skia/skia/src/core/SkEffectPriv.h29
-rw-r--r--gfx/skia/skia/src/core/SkEnumBitMask.h87
-rw-r--r--gfx/skia/skia/src/core/SkEnumerate.h114
-rw-r--r--gfx/skia/skia/src/core/SkExecutor.cpp153
-rw-r--r--gfx/skia/skia/src/core/SkFDot6.h78
-rw-r--r--gfx/skia/skia/src/core/SkFlattenable.cpp157
-rw-r--r--gfx/skia/skia/src/core/SkFont.cpp394
-rw-r--r--gfx/skia/skia/src/core/SkFontDescriptor.cpp277
-rw-r--r--gfx/skia/skia/src/core/SkFontDescriptor.h158
-rw-r--r--gfx/skia/skia/src/core/SkFontMetricsPriv.cpp60
-rw-r--r--gfx/skia/skia/src/core/SkFontMetricsPriv.h23
-rw-r--r--gfx/skia/skia/src/core/SkFontMgr.cpp287
-rw-r--r--gfx/skia/skia/src/core/SkFontMgrPriv.h14
-rw-r--r--gfx/skia/skia/src/core/SkFontPriv.h120
-rw-r--r--gfx/skia/skia/src/core/SkFontStream.cpp211
-rw-r--r--gfx/skia/skia/src/core/SkFontStream.h49
-rw-r--r--gfx/skia/skia/src/core/SkFont_serial.cpp117
-rw-r--r--gfx/skia/skia/src/core/SkFuzzLogging.h23
-rw-r--r--gfx/skia/skia/src/core/SkGaussFilter.cpp109
-rw-r--r--gfx/skia/skia/src/core/SkGaussFilter.h34
-rw-r--r--gfx/skia/skia/src/core/SkGeometry.cpp1780
-rw-r--r--gfx/skia/skia/src/core/SkGeometry.h543
-rw-r--r--gfx/skia/skia/src/core/SkGlobalInitialization_core.cpp18
-rw-r--r--gfx/skia/skia/src/core/SkGlyph.cpp700
-rw-r--r--gfx/skia/skia/src/core/SkGlyph.h639
-rw-r--r--gfx/skia/skia/src/core/SkGlyphRunPainter.cpp366
-rw-r--r--gfx/skia/skia/src/core/SkGlyphRunPainter.h52
-rw-r--r--gfx/skia/skia/src/core/SkGpuBlurUtils.cpp1039
-rw-r--r--gfx/skia/skia/src/core/SkGpuBlurUtils.h113
-rw-r--r--gfx/skia/skia/src/core/SkGraphics.cpp100
-rw-r--r--gfx/skia/skia/src/core/SkIDChangeListener.cpp70
-rw-r--r--gfx/skia/skia/src/core/SkIPoint16.h57
-rw-r--r--gfx/skia/skia/src/core/SkImageFilter.cpp682
-rw-r--r--gfx/skia/skia/src/core/SkImageFilterCache.cpp164
-rw-r--r--gfx/skia/skia/src/core/SkImageFilterCache.h73
-rw-r--r--gfx/skia/skia/src/core/SkImageFilterTypes.cpp430
-rw-r--r--gfx/skia/skia/src/core/SkImageFilterTypes.h799
-rw-r--r--gfx/skia/skia/src/core/SkImageFilter_Base.h492
-rw-r--r--gfx/skia/skia/src/core/SkImageGenerator.cpp123
-rw-r--r--gfx/skia/skia/src/core/SkImageInfo.cpp236
-rw-r--r--gfx/skia/skia/src/core/SkImageInfoPriv.h203
-rw-r--r--gfx/skia/skia/src/core/SkImagePriv.h62
-rw-r--r--gfx/skia/skia/src/core/SkLRUCache.h130
-rw-r--r--gfx/skia/skia/src/core/SkLatticeIter.cpp302
-rw-r--r--gfx/skia/skia/src/core/SkLatticeIter.h77
-rw-r--r--gfx/skia/skia/src/core/SkLineClipper.cpp282
-rw-r--r--gfx/skia/skia/src/core/SkLineClipper.h45
-rw-r--r--gfx/skia/skia/src/core/SkLocalMatrixImageFilter.cpp73
-rw-r--r--gfx/skia/skia/src/core/SkLocalMatrixImageFilter.h42
-rw-r--r--gfx/skia/skia/src/core/SkM44.cpp356
-rw-r--r--gfx/skia/skia/src/core/SkMD5.cpp261
-rw-r--r--gfx/skia/skia/src/core/SkMD5.h45
-rw-r--r--gfx/skia/skia/src/core/SkMallocPixelRef.cpp77
-rw-r--r--gfx/skia/skia/src/core/SkMask.cpp118
-rw-r--r--gfx/skia/skia/src/core/SkMask.h243
-rw-r--r--gfx/skia/skia/src/core/SkMaskBlurFilter.cpp1054
-rw-r--r--gfx/skia/skia/src/core/SkMaskBlurFilter.h37
-rw-r--r--gfx/skia/skia/src/core/SkMaskCache.cpp185
-rw-r--r--gfx/skia/skia/src/core/SkMaskCache.h44
-rw-r--r--gfx/skia/skia/src/core/SkMaskFilter.cpp414
-rw-r--r--gfx/skia/skia/src/core/SkMaskFilterBase.h266
-rw-r--r--gfx/skia/skia/src/core/SkMaskGamma.cpp125
-rw-r--r--gfx/skia/skia/src/core/SkMaskGamma.h232
-rw-r--r--gfx/skia/skia/src/core/SkMatrix.cpp1881
-rw-r--r--gfx/skia/skia/src/core/SkMatrixInvert.cpp144
-rw-r--r--gfx/skia/skia/src/core/SkMatrixInvert.h24
-rw-r--r--gfx/skia/skia/src/core/SkMatrixPriv.h201
-rw-r--r--gfx/skia/skia/src/core/SkMatrixProvider.h68
-rw-r--r--gfx/skia/skia/src/core/SkMatrixUtils.h39
-rw-r--r--gfx/skia/skia/src/core/SkMesh.cpp925
-rw-r--r--gfx/skia/skia/src/core/SkMeshPriv.h250
-rw-r--r--gfx/skia/skia/src/core/SkMessageBus.h153
-rw-r--r--gfx/skia/skia/src/core/SkMipmap.cpp895
-rw-r--r--gfx/skia/skia/src/core/SkMipmap.h96
-rw-r--r--gfx/skia/skia/src/core/SkMipmapAccessor.cpp110
-rw-r--r--gfx/skia/skia/src/core/SkMipmapAccessor.h53
-rw-r--r--gfx/skia/skia/src/core/SkMipmapBuilder.cpp37
-rw-r--r--gfx/skia/skia/src/core/SkMipmapBuilder.h40
-rw-r--r--gfx/skia/skia/src/core/SkModeColorFilter.cpp245
-rw-r--r--gfx/skia/skia/src/core/SkNextID.h21
-rw-r--r--gfx/skia/skia/src/core/SkOSFile.h101
-rw-r--r--gfx/skia/skia/src/core/SkOpts.cpp153
-rw-r--r--gfx/skia/skia/src/core/SkOpts.h139
-rw-r--r--gfx/skia/skia/src/core/SkOpts_erms.cpp122
-rw-r--r--gfx/skia/skia/src/core/SkOrderedReadBuffer.h9
-rw-r--r--gfx/skia/skia/src/core/SkOverdrawCanvas.cpp259
-rw-r--r--gfx/skia/skia/src/core/SkPaint.cpp294
-rw-r--r--gfx/skia/skia/src/core/SkPaintDefaults.h31
-rw-r--r--gfx/skia/skia/src/core/SkPaintPriv.cpp274
-rw-r--r--gfx/skia/skia/src/core/SkPaintPriv.h63
-rw-r--r--gfx/skia/skia/src/core/SkPath.cpp3918
-rw-r--r--gfx/skia/skia/src/core/SkPathBuilder.cpp867
-rw-r--r--gfx/skia/skia/src/core/SkPathEffect.cpp214
-rw-r--r--gfx/skia/skia/src/core/SkPathEffectBase.h146
-rw-r--r--gfx/skia/skia/src/core/SkPathMakers.h88
-rw-r--r--gfx/skia/skia/src/core/SkPathMeasure.cpp53
-rw-r--r--gfx/skia/skia/src/core/SkPathMeasurePriv.h29
-rw-r--r--gfx/skia/skia/src/core/SkPathPriv.h529
-rw-r--r--gfx/skia/skia/src/core/SkPathRef.cpp689
-rw-r--r--gfx/skia/skia/src/core/SkPathUtils.cpp87
-rw-r--r--gfx/skia/skia/src/core/SkPath_serial.cpp297
-rw-r--r--gfx/skia/skia/src/core/SkPicture.cpp352
-rw-r--r--gfx/skia/skia/src/core/SkPictureData.cpp601
-rw-r--r--gfx/skia/skia/src/core/SkPictureData.h196
-rw-r--r--gfx/skia/skia/src/core/SkPictureFlat.cpp22
-rw-r--r--gfx/skia/skia/src/core/SkPictureFlat.h223
-rw-r--r--gfx/skia/skia/src/core/SkPictureImageGenerator.cpp170
-rw-r--r--gfx/skia/skia/src/core/SkPicturePlayback.cpp739
-rw-r--r--gfx/skia/skia/src/core/SkPicturePlayback.h67
-rw-r--r--gfx/skia/skia/src/core/SkPicturePriv.h156
-rw-r--r--gfx/skia/skia/src/core/SkPictureRecord.cpp953
-rw-r--r--gfx/skia/skia/src/core/SkPictureRecord.h271
-rw-r--r--gfx/skia/skia/src/core/SkPictureRecorder.cpp145
-rw-r--r--gfx/skia/skia/src/core/SkPixelRef.cpp149
-rw-r--r--gfx/skia/skia/src/core/SkPixelRefPriv.h27
-rw-r--r--gfx/skia/skia/src/core/SkPixmap.cpp745
-rw-r--r--gfx/skia/skia/src/core/SkPixmapDraw.cpp87
-rw-r--r--gfx/skia/skia/src/core/SkPoint.cpp169
-rw-r--r--gfx/skia/skia/src/core/SkPoint3.cpp76
-rw-r--r--gfx/skia/skia/src/core/SkPointPriv.h127
-rw-r--r--gfx/skia/skia/src/core/SkPromiseImageTexture.cpp19
-rw-r--r--gfx/skia/skia/src/core/SkPtrRecorder.cpp73
-rw-r--r--gfx/skia/skia/src/core/SkPtrRecorder.h171
-rw-r--r--gfx/skia/skia/src/core/SkQuadClipper.cpp117
-rw-r--r--gfx/skia/skia/src/core/SkQuadClipper.h69
-rw-r--r--gfx/skia/skia/src/core/SkRRect.cpp917
-rw-r--r--gfx/skia/skia/src/core/SkRRectPriv.h67
-rw-r--r--gfx/skia/skia/src/core/SkRSXform.cpp45
-rw-r--r--gfx/skia/skia/src/core/SkRTree.cpp168
-rw-r--r--gfx/skia/skia/src/core/SkRTree.h82
-rw-r--r--gfx/skia/skia/src/core/SkRasterClip.cpp328
-rw-r--r--gfx/skia/skia/src/core/SkRasterClip.h185
-rw-r--r--gfx/skia/skia/src/core/SkRasterClipStack.h123
-rw-r--r--gfx/skia/skia/src/core/SkRasterPipeline.cpp499
-rw-r--r--gfx/skia/skia/src/core/SkRasterPipeline.h158
-rw-r--r--gfx/skia/skia/src/core/SkRasterPipelineBlitter.cpp607
-rw-r--r--gfx/skia/skia/src/core/SkRasterPipelineOpContexts.h205
-rw-r--r--gfx/skia/skia/src/core/SkRasterPipelineOpList.h195
-rw-r--r--gfx/skia/skia/src/core/SkReadBuffer.cpp504
-rw-r--r--gfx/skia/skia/src/core/SkReadBuffer.h264
-rw-r--r--gfx/skia/skia/src/core/SkReadPixelsRec.cpp42
-rw-r--r--gfx/skia/skia/src/core/SkReadPixelsRec.h52
-rw-r--r--gfx/skia/skia/src/core/SkRecord.cpp37
-rw-r--r--gfx/skia/skia/src/core/SkRecord.h181
-rw-r--r--gfx/skia/skia/src/core/SkRecordDraw.cpp590
-rw-r--r--gfx/skia/skia/src/core/SkRecordDraw.h83
-rw-r--r--gfx/skia/skia/src/core/SkRecordOpts.cpp314
-rw-r--r--gfx/skia/skia/src/core/SkRecordOpts.h32
-rw-r--r--gfx/skia/skia/src/core/SkRecordPattern.h177
-rw-r--r--gfx/skia/skia/src/core/SkRecordedDrawable.cpp104
-rw-r--r--gfx/skia/skia/src/core/SkRecordedDrawable.h42
-rw-r--r--gfx/skia/skia/src/core/SkRecorder.cpp423
-rw-r--r--gfx/skia/skia/src/core/SkRecorder.h179
-rw-r--r--gfx/skia/skia/src/core/SkRecords.cpp24
-rw-r--r--gfx/skia/skia/src/core/SkRecords.h362
-rw-r--r--gfx/skia/skia/src/core/SkRect.cpp309
-rw-r--r--gfx/skia/skia/src/core/SkRectPriv.h99
-rw-r--r--gfx/skia/skia/src/core/SkRegion.cpp1584
-rw-r--r--gfx/skia/skia/src/core/SkRegionPriv.h261
-rw-r--r--gfx/skia/skia/src/core/SkRegion_path.cpp586
-rw-r--r--gfx/skia/skia/src/core/SkResourceCache.cpp614
-rw-r--r--gfx/skia/skia/src/core/SkResourceCache.h293
-rw-r--r--gfx/skia/skia/src/core/SkRuntimeEffect.cpp2016
-rw-r--r--gfx/skia/skia/src/core/SkRuntimeEffectPriv.h176
-rw-r--r--gfx/skia/skia/src/core/SkSLTypeShared.cpp213
-rw-r--r--gfx/skia/skia/src/core/SkSLTypeShared.h242
-rw-r--r--gfx/skia/skia/src/core/SkSafeRange.h49
-rw-r--r--gfx/skia/skia/src/core/SkSamplingPriv.h77
-rw-r--r--gfx/skia/skia/src/core/SkScalar.cpp37
-rw-r--r--gfx/skia/skia/src/core/SkScaleToSides.h64
-rw-r--r--gfx/skia/skia/src/core/SkScalerContext.cpp1284
-rw-r--r--gfx/skia/skia/src/core/SkScalerContext.h464
-rw-r--r--gfx/skia/skia/src/core/SkScan.cpp111
-rw-r--r--gfx/skia/skia/src/core/SkScan.h142
-rw-r--r--gfx/skia/skia/src/core/SkScanPriv.h83
-rw-r--r--gfx/skia/skia/src/core/SkScan_AAAPath.cpp2033
-rw-r--r--gfx/skia/skia/src/core/SkScan_AntiPath.cpp208
-rw-r--r--gfx/skia/skia/src/core/SkScan_Antihair.cpp1014
-rw-r--r--gfx/skia/skia/src/core/SkScan_Hairline.cpp743
-rw-r--r--gfx/skia/skia/src/core/SkScan_Path.cpp784
-rw-r--r--gfx/skia/skia/src/core/SkScan_SAAPath.cpp611
-rw-r--r--gfx/skia/skia/src/core/SkSharedMutex.cpp368
-rw-r--r--gfx/skia/skia/src/core/SkSharedMutex.h102
-rw-r--r--gfx/skia/skia/src/core/SkSpecialImage.cpp458
-rw-r--r--gfx/skia/skia/src/core/SkSpecialImage.h263
-rw-r--r--gfx/skia/skia/src/core/SkSpecialSurface.cpp138
-rw-r--r--gfx/skia/skia/src/core/SkSpecialSurface.h101
-rw-r--r--gfx/skia/skia/src/core/SkSpinlock.cpp50
-rw-r--r--gfx/skia/skia/src/core/SkSpriteBlitter.h47
-rw-r--r--gfx/skia/skia/src/core/SkSpriteBlitter_ARGB32.cpp120
-rw-r--r--gfx/skia/skia/src/core/SkStream.cpp986
-rw-r--r--gfx/skia/skia/src/core/SkStreamPriv.h50
-rw-r--r--gfx/skia/skia/src/core/SkStrike.cpp456
-rw-r--r--gfx/skia/skia/src/core/SkStrike.h205
-rw-r--r--gfx/skia/skia/src/core/SkStrikeCache.cpp326
-rw-r--r--gfx/skia/skia/src/core/SkStrikeCache.h108
-rw-r--r--gfx/skia/skia/src/core/SkStrikeSpec.cpp309
-rw-r--r--gfx/skia/skia/src/core/SkStrikeSpec.h178
-rw-r--r--gfx/skia/skia/src/core/SkString.cpp630
-rw-r--r--gfx/skia/skia/src/core/SkStringUtils.cpp115
-rw-r--r--gfx/skia/skia/src/core/SkStringUtils.h62
-rw-r--r--gfx/skia/skia/src/core/SkStroke.cpp1618
-rw-r--r--gfx/skia/skia/src/core/SkStroke.h78
-rw-r--r--gfx/skia/skia/src/core/SkStrokeRec.cpp172
-rw-r--r--gfx/skia/skia/src/core/SkStrokerPriv.cpp235
-rw-r--r--gfx/skia/skia/src/core/SkStrokerPriv.h43
-rw-r--r--gfx/skia/skia/src/core/SkSurfaceCharacterization.cpp182
-rw-r--r--gfx/skia/skia/src/core/SkSurfacePriv.h23
-rw-r--r--gfx/skia/skia/src/core/SkSwizzle.cpp14
-rw-r--r--gfx/skia/skia/src/core/SkSwizzlePriv.h36
-rw-r--r--gfx/skia/skia/src/core/SkTDynamicHash.h58
-rw-r--r--gfx/skia/skia/src/core/SkTHash.h591
-rw-r--r--gfx/skia/skia/src/core/SkTMultiMap.h187
-rw-r--r--gfx/skia/skia/src/core/SkTaskGroup.cpp53
-rw-r--r--gfx/skia/skia/src/core/SkTaskGroup.h48
-rw-r--r--gfx/skia/skia/src/core/SkTextBlob.cpp1009
-rw-r--r--gfx/skia/skia/src/core/SkTextBlobPriv.h261
-rw-r--r--gfx/skia/skia/src/core/SkTextBlobTrace.cpp119
-rw-r--r--gfx/skia/skia/src/core/SkTextBlobTrace.h49
-rw-r--r--gfx/skia/skia/src/core/SkTextFormatParams.h31
-rw-r--r--gfx/skia/skia/src/core/SkTime.cpp89
-rw-r--r--gfx/skia/skia/src/core/SkTraceEvent.h419
-rw-r--r--gfx/skia/skia/src/core/SkTraceEventCommon.h557
-rw-r--r--gfx/skia/skia/src/core/SkTypeface.cpp578
-rw-r--r--gfx/skia/skia/src/core/SkTypefaceCache.cpp115
-rw-r--r--gfx/skia/skia/src/core/SkTypefaceCache.h74
-rw-r--r--gfx/skia/skia/src/core/SkTypeface_remote.cpp158
-rw-r--r--gfx/skia/skia/src/core/SkTypeface_remote.h175
-rw-r--r--gfx/skia/skia/src/core/SkUnPreMultiply.cpp79
-rw-r--r--gfx/skia/skia/src/core/SkVM.cpp4117
-rw-r--r--gfx/skia/skia/src/core/SkVM.h1369
-rw-r--r--gfx/skia/skia/src/core/SkVMBlitter.cpp815
-rw-r--r--gfx/skia/skia/src/core/SkVMBlitter.h113
-rw-r--r--gfx/skia/skia/src/core/SkVM_fwd.h23
-rw-r--r--gfx/skia/skia/src/core/SkValidationUtils.h36
-rw-r--r--gfx/skia/skia/src/core/SkVertState.cpp106
-rw-r--r--gfx/skia/skia/src/core/SkVertState.h58
-rw-r--r--gfx/skia/skia/src/core/SkVertices.cpp338
-rw-r--r--gfx/skia/skia/src/core/SkVerticesPriv.h62
-rw-r--r--gfx/skia/skia/src/core/SkWriteBuffer.cpp283
-rw-r--r--gfx/skia/skia/src/core/SkWriteBuffer.h178
-rw-r--r--gfx/skia/skia/src/core/SkWritePixelsRec.cpp43
-rw-r--r--gfx/skia/skia/src/core/SkWritePixelsRec.h52
-rw-r--r--gfx/skia/skia/src/core/SkWriter32.cpp76
-rw-r--r--gfx/skia/skia/src/core/SkWriter32.h279
-rw-r--r--gfx/skia/skia/src/core/SkXfermode.cpp172
-rw-r--r--gfx/skia/skia/src/core/SkXfermodeInterpretation.cpp50
-rw-r--r--gfx/skia/skia/src/core/SkXfermodeInterpretation.h30
-rw-r--r--gfx/skia/skia/src/core/SkXfermodePriv.h62
-rw-r--r--gfx/skia/skia/src/core/SkYUVAInfo.cpp376
-rw-r--r--gfx/skia/skia/src/core/SkYUVAInfoLocation.h63
-rw-r--r--gfx/skia/skia/src/core/SkYUVAPixmaps.cpp297
-rw-r--r--gfx/skia/skia/src/core/SkYUVMath.cpp339
-rw-r--r--gfx/skia/skia/src/core/SkYUVMath.h19
-rw-r--r--gfx/skia/skia/src/core/SkYUVPlanesCache.cpp93
-rw-r--r--gfx/skia/skia/src/core/SkYUVPlanesCache.h38
-rw-r--r--gfx/skia/skia/src/effects/Sk1DPathEffect.cpp259
-rw-r--r--gfx/skia/skia/src/effects/Sk2DPathEffect.cpp223
-rw-r--r--gfx/skia/skia/src/effects/SkBlenders.cpp78
-rw-r--r--gfx/skia/skia/src/effects/SkColorMatrix.cpp116
-rw-r--r--gfx/skia/skia/src/effects/SkColorMatrixFilter.cpp43
-rw-r--r--gfx/skia/skia/src/effects/SkCornerPathEffect.cpp187
-rw-r--r--gfx/skia/skia/src/effects/SkDashImpl.h49
-rw-r--r--gfx/skia/skia/src/effects/SkDashPathEffect.cpp413
-rw-r--r--gfx/skia/skia/src/effects/SkDiscretePathEffect.cpp191
-rw-r--r--gfx/skia/skia/src/effects/SkEmbossMask.cpp121
-rw-r--r--gfx/skia/skia/src/effects/SkEmbossMask.h21
-rw-r--r--gfx/skia/skia/src/effects/SkEmbossMaskFilter.cpp150
-rw-r--r--gfx/skia/skia/src/effects/SkEmbossMaskFilter.h61
-rw-r--r--gfx/skia/skia/src/effects/SkHighContrastFilter.cpp104
-rw-r--r--gfx/skia/skia/src/effects/SkLayerDrawLooper.cpp339
-rw-r--r--gfx/skia/skia/src/effects/SkLumaColorFilter.cpp36
-rw-r--r--gfx/skia/skia/src/effects/SkOpPE.h102
-rw-r--r--gfx/skia/skia/src/effects/SkOpPathEffect.cpp242
-rw-r--r--gfx/skia/skia/src/effects/SkOverdrawColorFilter.cpp57
-rw-r--r--gfx/skia/skia/src/effects/SkShaderMaskFilter.cpp156
-rw-r--r--gfx/skia/skia/src/effects/SkTableColorFilter.cpp344
-rw-r--r--gfx/skia/skia/src/effects/SkTableMaskFilter.cpp186
-rw-r--r--gfx/skia/skia/src/effects/SkTrimPE.h39
-rw-r--r--gfx/skia/skia/src/effects/SkTrimPathEffect.cpp149
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkAlphaThresholdImageFilter.cpp334
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkArithmeticImageFilter.cpp497
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkBlendImageFilter.cpp351
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkBlurImageFilter.cpp1038
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkColorFilterImageFilter.cpp180
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkComposeImageFilter.cpp141
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkCropImageFilter.cpp173
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkCropImageFilter.h20
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkDisplacementMapImageFilter.cpp600
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkDropShadowImageFilter.cpp205
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkImageImageFilter.cpp183
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkLightingImageFilter.cpp2190
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkMagnifierImageFilter.cpp296
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkMatrixConvolutionImageFilter.cpp529
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkMatrixTransformImageFilter.cpp184
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkMergeImageFilter.cpp130
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkMorphologyImageFilter.cpp768
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkPictureImageFilter.cpp150
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkRuntimeImageFilter.cpp284
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkRuntimeImageFilter.h22
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkShaderImageFilter.cpp135
-rw-r--r--gfx/skia/skia/src/effects/imagefilters/SkTileImageFilter.cpp198
-rw-r--r--gfx/skia/skia/src/encode/SkEncoder.cpp29
-rw-r--r--gfx/skia/skia/src/encode/SkICC.cpp762
-rw-r--r--gfx/skia/skia/src/encode/SkICCPriv.h63
-rw-r--r--gfx/skia/skia/src/encode/SkImageEncoder.cpp110
-rw-r--r--gfx/skia/skia/src/encode/SkImageEncoderFns.h213
-rw-r--r--gfx/skia/skia/src/encode/SkImageEncoderPriv.h51
-rw-r--r--gfx/skia/skia/src/encode/SkJPEGWriteUtility.cpp79
-rw-r--r--gfx/skia/skia/src/encode/SkJPEGWriteUtility.h42
-rw-r--r--gfx/skia/skia/src/encode/SkJpegEncoder.cpp419
-rw-r--r--gfx/skia/skia/src/encode/SkJpegGainmapEncoder.cpp413
-rw-r--r--gfx/skia/skia/src/encode/SkPngEncoder.cpp493
-rw-r--r--gfx/skia/skia/src/encode/SkWebpEncoder.cpp249
-rw-r--r--gfx/skia/skia/src/fonts/SkFontMgr_indirect.cpp185
-rw-r--r--gfx/skia/skia/src/fonts/SkRemotableFontMgr.cpp23
-rw-r--r--gfx/skia/skia/src/image/SkImage.cpp540
-rw-r--r--gfx/skia/skia/src/image/SkImage_AndroidFactories.cpp201
-rw-r--r--gfx/skia/skia/src/image/SkImage_Base.cpp367
-rw-r--r--gfx/skia/skia/src/image/SkImage_Base.h302
-rw-r--r--gfx/skia/skia/src/image/SkImage_Gpu.cpp821
-rw-r--r--gfx/skia/skia/src/image/SkImage_Gpu.h179
-rw-r--r--gfx/skia/skia/src/image/SkImage_GpuBase.cpp360
-rw-r--r--gfx/skia/skia/src/image/SkImage_GpuBase.h90
-rw-r--r--gfx/skia/skia/src/image/SkImage_GpuYUVA.cpp440
-rw-r--r--gfx/skia/skia/src/image/SkImage_GpuYUVA.h105
-rw-r--r--gfx/skia/skia/src/image/SkImage_Lazy.cpp689
-rw-r--r--gfx/skia/skia/src/image/SkImage_Lazy.h152
-rw-r--r--gfx/skia/skia/src/image/SkImage_Raster.cpp467
-rw-r--r--gfx/skia/skia/src/image/SkImage_Raster.h147
-rw-r--r--gfx/skia/skia/src/image/SkRescaleAndReadPixels.cpp166
-rw-r--r--gfx/skia/skia/src/image/SkRescaleAndReadPixels.h21
-rw-r--r--gfx/skia/skia/src/image/SkSurface.cpp300
-rw-r--r--gfx/skia/skia/src/image/SkSurface_Base.cpp169
-rw-r--r--gfx/skia/skia/src/image/SkSurface_Base.h237
-rw-r--r--gfx/skia/skia/src/image/SkSurface_Gpu.cpp813
-rw-r--r--gfx/skia/skia/src/image/SkSurface_Gpu.h99
-rw-r--r--gfx/skia/skia/src/image/SkSurface_GpuMtl.mm169
-rw-r--r--gfx/skia/skia/src/image/SkSurface_Null.cpp52
-rw-r--r--gfx/skia/skia/src/image/SkSurface_Raster.cpp199
-rw-r--r--gfx/skia/skia/src/image/SkSurface_Raster.h55
-rw-r--r--gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.cpp242
-rw-r--r--gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.h65
-rw-r--r--gfx/skia/skia/src/opts/SkBitmapProcState_opts.h545
-rw-r--r--gfx/skia/skia/src/opts/SkBlitMask_opts.h238
-rw-r--r--gfx/skia/skia/src/opts/SkBlitRow_opts.h256
-rw-r--r--gfx/skia/skia/src/opts/SkChecksum_opts.h145
-rw-r--r--gfx/skia/skia/src/opts/SkOpts_avx.cpp27
-rw-r--r--gfx/skia/skia/src/opts/SkOpts_crc32.cpp21
-rw-r--r--gfx/skia/skia/src/opts/SkOpts_hsw.cpp58
-rw-r--r--gfx/skia/skia/src/opts/SkOpts_skx.cpp21
-rw-r--r--gfx/skia/skia/src/opts/SkOpts_sse42.cpp21
-rw-r--r--gfx/skia/skia/src/opts/SkOpts_ssse3.cpp38
-rw-r--r--gfx/skia/skia/src/opts/SkRasterPipeline_opts.h5666
-rw-r--r--gfx/skia/skia/src/opts/SkSwizzler_opts.h1389
-rw-r--r--gfx/skia/skia/src/opts/SkUtils_opts.h71
-rw-r--r--gfx/skia/skia/src/opts/SkVM_opts.h351
-rw-r--r--gfx/skia/skia/src/opts/SkXfermode_opts.h137
-rw-r--r--gfx/skia/skia/src/pathops/SkAddIntersections.cpp595
-rw-r--r--gfx/skia/skia/src/pathops/SkAddIntersections.h15
-rw-r--r--gfx/skia/skia/src/pathops/SkDConicLineIntersection.cpp396
-rw-r--r--gfx/skia/skia/src/pathops/SkDCubicLineIntersection.cpp464
-rw-r--r--gfx/skia/skia/src/pathops/SkDCubicToQuads.cpp45
-rw-r--r--gfx/skia/skia/src/pathops/SkDLineIntersection.cpp344
-rw-r--r--gfx/skia/skia/src/pathops/SkDQuadLineIntersection.cpp478
-rw-r--r--gfx/skia/skia/src/pathops/SkIntersectionHelper.h113
-rw-r--r--gfx/skia/skia/src/pathops/SkIntersections.cpp175
-rw-r--r--gfx/skia/skia/src/pathops/SkIntersections.h346
-rw-r--r--gfx/skia/skia/src/pathops/SkLineParameters.h181
-rw-r--r--gfx/skia/skia/src/pathops/SkOpAngle.cpp1156
-rw-r--r--gfx/skia/skia/src/pathops/SkOpAngle.h156
-rw-r--r--gfx/skia/skia/src/pathops/SkOpBuilder.cpp212
-rw-r--r--gfx/skia/skia/src/pathops/SkOpCoincidence.cpp1456
-rw-r--r--gfx/skia/skia/src/pathops/SkOpCoincidence.h307
-rw-r--r--gfx/skia/skia/src/pathops/SkOpContour.cpp110
-rw-r--r--gfx/skia/skia/src/pathops/SkOpContour.h464
-rw-r--r--gfx/skia/skia/src/pathops/SkOpCubicHull.cpp155
-rw-r--r--gfx/skia/skia/src/pathops/SkOpEdgeBuilder.cpp360
-rw-r--r--gfx/skia/skia/src/pathops/SkOpEdgeBuilder.h83
-rw-r--r--gfx/skia/skia/src/pathops/SkOpSegment.cpp1787
-rw-r--r--gfx/skia/skia/src/pathops/SkOpSegment.h466
-rw-r--r--gfx/skia/skia/src/pathops/SkOpSpan.cpp490
-rw-r--r--gfx/skia/skia/src/pathops/SkOpSpan.h578
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsAsWinding.cpp457
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsBounds.h65
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsCommon.cpp338
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsCommon.h36
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsConic.cpp197
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsConic.h206
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsCubic.cpp763
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsCubic.h252
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsCurve.cpp143
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsCurve.h427
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsDebug.cpp3096
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsDebug.h453
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsLine.cpp154
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsLine.h41
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsOp.cpp395
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsPoint.h281
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsQuad.cpp423
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsQuad.h196
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsRect.cpp67
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsRect.h84
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsSimplify.cpp236
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsTCurve.h49
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsTSect.cpp2149
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsTSect.h376
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsTightBounds.cpp83
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsTypes.cpp226
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsTypes.h607
-rw-r--r--gfx/skia/skia/src/pathops/SkPathOpsWinding.cpp441
-rw-r--r--gfx/skia/skia/src/pathops/SkPathWriter.cpp434
-rw-r--r--gfx/skia/skia/src/pathops/SkPathWriter.h56
-rw-r--r--gfx/skia/skia/src/pathops/SkReduceOrder.cpp290
-rw-r--r--gfx/skia/skia/src/pathops/SkReduceOrder.h37
-rw-r--r--gfx/skia/skia/src/pdf/SkBitmapKey.h22
-rw-r--r--gfx/skia/skia/src/pdf/SkClusterator.cpp64
-rw-r--r--gfx/skia/skia/src/pdf/SkClusterator.h47
-rw-r--r--gfx/skia/skia/src/pdf/SkDeflate.cpp134
-rw-r--r--gfx/skia/skia/src/pdf/SkDeflate.h53
-rw-r--r--gfx/skia/skia/src/pdf/SkDocument_PDF_None.cpp22
-rw-r--r--gfx/skia/skia/src/pdf/SkJpegInfo.cpp128
-rw-r--r--gfx/skia/skia/src/pdf/SkJpegInfo.h25
-rw-r--r--gfx/skia/skia/src/pdf/SkKeyedImage.cpp49
-rw-r--r--gfx/skia/skia/src/pdf/SkKeyedImage.h46
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFBitmap.cpp329
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFBitmap.h22
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFDevice.cpp1761
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFDevice.h209
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFDocument.cpp641
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFDocumentPriv.h190
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFFont.cpp724
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFFont.h141
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFFormXObject.cpp39
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFFormXObject.h28
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFGlyphUse.h49
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFGradientShader.cpp1013
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFGradientShader.h67
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFGraphicStackState.cpp237
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFGraphicStackState.h41
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFGraphicState.cpp142
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFGraphicState.h71
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.cpp206
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.h22
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.cpp220
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.h28
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFMetadata.cpp325
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFMetadata.h29
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFResourceDict.cpp96
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFResourceDict.h50
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFShader.cpp367
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFShader.h67
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFSubsetFont.cpp208
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFSubsetFont.h16
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFTag.cpp372
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFTag.h64
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFType1Font.cpp339
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFType1Font.h11
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFTypes.cpp602
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFTypes.h218
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFUnion.h112
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFUtils.cpp395
-rw-r--r--gfx/skia/skia/src/pdf/SkPDFUtils.h137
-rw-r--r--gfx/skia/skia/src/pdf/SkUUID.h18
-rw-r--r--gfx/skia/skia/src/ports/SkDebug_android.cpp38
-rw-r--r--gfx/skia/skia/src/ports/SkDebug_stdio.cpp25
-rw-r--r--gfx/skia/skia/src/ports/SkDebug_win.cpp34
-rw-r--r--gfx/skia/skia/src/ports/SkDiscardableMemory_none.cpp14
-rw-r--r--gfx/skia/skia/src/ports/SkFontConfigInterface.cpp33
-rw-r--r--gfx/skia/skia/src/ports/SkFontConfigInterface_direct.cpp709
-rw-r--r--gfx/skia/skia/src/ports/SkFontConfigInterface_direct.h44
-rw-r--r--gfx/skia/skia/src/ports/SkFontConfigInterface_direct_factory.cpp16
-rw-r--r--gfx/skia/skia/src/ports/SkFontConfigTypeface.h66
-rw-r--r--gfx/skia/skia/src/ports/SkFontHost_FreeType.cpp2365
-rw-r--r--gfx/skia/skia/src/ports/SkFontHost_FreeType_common.cpp2091
-rw-r--r--gfx/skia/skia/src/ports/SkFontHost_FreeType_common.h191
-rw-r--r--gfx/skia/skia/src/ports/SkFontHost_cairo.cpp681
-rw-r--r--gfx/skia/skia/src/ports/SkFontHost_win.cpp2356
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface.cpp276
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface_factory.cpp18
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_android.cpp508
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_android_factory.cpp14
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_android_parser.cpp846
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_android_parser.h216
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_custom.cpp228
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_custom.h142
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_custom_directory.cpp104
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_custom_directory_factory.cpp21
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_custom_embedded.cpp117
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_custom_embedded_factory.cpp17
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_custom_empty.cpp27
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_custom_empty_factory.cpp13
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_empty_factory.cpp13
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_fontconfig.cpp948
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_fontconfig_factory.cpp14
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_fuchsia.cpp515
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_mac_ct.cpp532
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_mac_ct_factory.cpp18
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_win_dw.cpp956
-rw-r--r--gfx/skia/skia/src/ports/SkFontMgr_win_dw_factory.cpp18
-rw-r--r--gfx/skia/skia/src/ports/SkGlobalInitialization_default.cpp62
-rw-r--r--gfx/skia/skia/src/ports/SkImageEncoder_CG.cpp118
-rw-r--r--gfx/skia/skia/src/ports/SkImageEncoder_NDK.cpp72
-rw-r--r--gfx/skia/skia/src/ports/SkImageEncoder_WIC.cpp198
-rw-r--r--gfx/skia/skia/src/ports/SkImageGeneratorCG.cpp157
-rw-r--r--gfx/skia/skia/src/ports/SkImageGeneratorNDK.cpp222
-rw-r--r--gfx/skia/skia/src/ports/SkImageGeneratorWIC.cpp205
-rw-r--r--gfx/skia/skia/src/ports/SkImageGenerator_none.cpp13
-rw-r--r--gfx/skia/skia/src/ports/SkImageGenerator_skia.cpp14
-rw-r--r--gfx/skia/skia/src/ports/SkMemory_malloc.cpp110
-rw-r--r--gfx/skia/skia/src/ports/SkMemory_mozalloc.cpp49
-rw-r--r--gfx/skia/skia/src/ports/SkNDKConversions.cpp119
-rw-r--r--gfx/skia/skia/src/ports/SkNDKConversions.h31
-rw-r--r--gfx/skia/skia/src/ports/SkOSFile_ios.h52
-rw-r--r--gfx/skia/skia/src/ports/SkOSFile_posix.cpp220
-rw-r--r--gfx/skia/skia/src/ports/SkOSFile_stdio.cpp180
-rw-r--r--gfx/skia/skia/src/ports/SkOSFile_win.cpp286
-rw-r--r--gfx/skia/skia/src/ports/SkOSLibrary.h15
-rw-r--r--gfx/skia/skia/src/ports/SkOSLibrary_posix.cpp26
-rw-r--r--gfx/skia/skia/src/ports/SkOSLibrary_win.cpp25
-rw-r--r--gfx/skia/skia/src/ports/SkRemotableFontMgr_win_dw.cpp472
-rw-r--r--gfx/skia/skia/src/ports/SkScalerContext_mac_ct.cpp789
-rw-r--r--gfx/skia/skia/src/ports/SkScalerContext_mac_ct.h112
-rw-r--r--gfx/skia/skia/src/ports/SkScalerContext_win_dw.cpp1523
-rw-r--r--gfx/skia/skia/src/ports/SkScalerContext_win_dw.h117
-rw-r--r--gfx/skia/skia/src/ports/SkTypeface_mac_ct.cpp1541
-rw-r--r--gfx/skia/skia/src/ports/SkTypeface_mac_ct.h145
-rw-r--r--gfx/skia/skia/src/ports/SkTypeface_win_dw.cpp1094
-rw-r--r--gfx/skia/skia/src/ports/SkTypeface_win_dw.h180
-rw-r--r--gfx/skia/skia/src/sfnt/SkIBMFamilyClass.h142
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTableTypes.h62
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_EBDT.h108
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_EBLC.h150
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_EBSC.h41
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_OS_2.h52
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V0.h146
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V1.h515
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V2.h538
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V3.h547
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V4.h582
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_OS_2_VA.h141
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_fvar.h56
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_gasp.h72
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_glyf.h218
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_head.h146
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_hhea.h54
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_hmtx.h34
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_loca.h31
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_maxp.h34
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_maxp_CFF.h30
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_maxp_TT.h48
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_name.cpp586
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_name.h577
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTTable_post.h50
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTUtils.cpp231
-rw-r--r--gfx/skia/skia/src/sfnt/SkOTUtils.h105
-rw-r--r--gfx/skia/skia/src/sfnt/SkPanose.h527
-rw-r--r--gfx/skia/skia/src/sfnt/SkSFNTHeader.h70
-rw-r--r--gfx/skia/skia/src/sfnt/SkTTCFHeader.h57
-rw-r--r--gfx/skia/skia/src/shaders/SkBitmapProcShader.cpp93
-rw-r--r--gfx/skia/skia/src/shaders/SkBitmapProcShader.h26
-rw-r--r--gfx/skia/skia/src/shaders/SkColorFilterShader.cpp145
-rw-r--r--gfx/skia/skia/src/shaders/SkColorFilterShader.h53
-rw-r--r--gfx/skia/skia/src/shaders/SkColorShader.cpp275
-rw-r--r--gfx/skia/skia/src/shaders/SkComposeShader.cpp243
-rw-r--r--gfx/skia/skia/src/shaders/SkCoordClampShader.cpp177
-rw-r--r--gfx/skia/skia/src/shaders/SkEmptyShader.cpp65
-rw-r--r--gfx/skia/skia/src/shaders/SkGainmapShader.cpp167
-rw-r--r--gfx/skia/skia/src/shaders/SkImageShader.cpp1142
-rw-r--r--gfx/skia/skia/src/shaders/SkImageShader.h106
-rw-r--r--gfx/skia/skia/src/shaders/SkLocalMatrixShader.cpp221
-rw-r--r--gfx/skia/skia/src/shaders/SkLocalMatrixShader.h81
-rw-r--r--gfx/skia/skia/src/shaders/SkPerlinNoiseShader.cpp1149
-rw-r--r--gfx/skia/skia/src/shaders/SkPictureShader.cpp501
-rw-r--r--gfx/skia/skia/src/shaders/SkPictureShader.h75
-rw-r--r--gfx/skia/skia/src/shaders/SkShader.cpp334
-rw-r--r--gfx/skia/skia/src/shaders/SkShaderBase.h494
-rw-r--r--gfx/skia/skia/src/shaders/SkTransformShader.cpp99
-rw-r--r--gfx/skia/skia/src/shaders/SkTransformShader.h59
-rw-r--r--gfx/skia/skia/src/shaders/gradients/SkGradientShader.cpp7
-rw-r--r--gfx/skia/skia/src/shaders/gradients/SkGradientShaderBase.cpp1325
-rw-r--r--gfx/skia/skia/src/shaders/gradients/SkGradientShaderBase.h192
-rw-r--r--gfx/skia/skia/src/shaders/gradients/SkLinearGradient.cpp180
-rw-r--r--gfx/skia/skia/src/shaders/gradients/SkLinearGradient.h50
-rw-r--r--gfx/skia/skia/src/shaders/gradients/SkRadialGradient.cpp218
-rw-r--r--gfx/skia/skia/src/shaders/gradients/SkSweepGradient.cpp294
-rw-r--r--gfx/skia/skia/src/shaders/gradients/SkTwoPointConicalGradient.cpp657
-rw-r--r--gfx/skia/skia/src/sksl/GLSL.std.450.h131
-rw-r--r--gfx/skia/skia/src/sksl/README.md158
-rw-r--r--gfx/skia/skia/src/sksl/SkSLAnalysis.cpp705
-rw-r--r--gfx/skia/skia/src/sksl/SkSLAnalysis.h261
-rw-r--r--gfx/skia/skia/src/sksl/SkSLBuiltinTypes.cpp205
-rw-r--r--gfx/skia/skia/src/sksl/SkSLBuiltinTypes.h167
-rw-r--r--gfx/skia/skia/src/sksl/SkSLCompiler.cpp726
-rw-r--r--gfx/skia/skia/src/sksl/SkSLCompiler.h242
-rw-r--r--gfx/skia/skia/src/sksl/SkSLConstantFolder.cpp884
-rw-r--r--gfx/skia/skia/src/sksl/SkSLConstantFolder.h71
-rw-r--r--gfx/skia/skia/src/sksl/SkSLContext.cpp29
-rw-r--r--gfx/skia/skia/src/sksl/SkSLContext.h49
-rw-r--r--gfx/skia/skia/src/sksl/SkSLErrorReporter.cpp29
-rw-r--r--gfx/skia/skia/src/sksl/SkSLFileOutputStream.h78
-rw-r--r--gfx/skia/skia/src/sksl/SkSLGLSL.h58
-rw-r--r--gfx/skia/skia/src/sksl/SkSLInliner.cpp1062
-rw-r--r--gfx/skia/skia/src/sksl/SkSLInliner.h119
-rw-r--r--gfx/skia/skia/src/sksl/SkSLIntrinsicList.cpp33
-rw-r--r--gfx/skia/skia/src/sksl/SkSLIntrinsicList.h145
-rw-r--r--gfx/skia/skia/src/sksl/SkSLLexer.cpp808
-rw-r--r--gfx/skia/skia/src/sksl/SkSLLexer.h145
-rw-r--r--gfx/skia/skia/src/sksl/SkSLMangler.cpp76
-rw-r--r--gfx/skia/skia/src/sksl/SkSLMangler.h35
-rw-r--r--gfx/skia/skia/src/sksl/SkSLMemoryLayout.h211
-rw-r--r--gfx/skia/skia/src/sksl/SkSLMemoryPool.h44
-rw-r--r--gfx/skia/skia/src/sksl/SkSLModifiersPool.h38
-rw-r--r--gfx/skia/skia/src/sksl/SkSLModuleLoader.cpp444
-rw-r--r--gfx/skia/skia/src/sksl/SkSLModuleLoader.h67
-rw-r--r--gfx/skia/skia/src/sksl/SkSLOperator.cpp384
-rw-r--r--gfx/skia/skia/src/sksl/SkSLOutputStream.cpp41
-rw-r--r--gfx/skia/skia/src/sksl/SkSLOutputStream.h58
-rw-r--r--gfx/skia/skia/src/sksl/SkSLParser.cpp2248
-rw-r--r--gfx/skia/skia/src/sksl/SkSLParser.h369
-rw-r--r--gfx/skia/skia/src/sksl/SkSLPool.cpp97
-rw-r--r--gfx/skia/skia/src/sksl/SkSLPool.h96
-rw-r--r--gfx/skia/skia/src/sksl/SkSLPosition.cpp34
-rw-r--r--gfx/skia/skia/src/sksl/SkSLProgramSettings.h160
-rw-r--r--gfx/skia/skia/src/sksl/SkSLSampleUsage.cpp26
-rw-r--r--gfx/skia/skia/src/sksl/SkSLString.cpp115
-rw-r--r--gfx/skia/skia/src/sksl/SkSLStringStream.h58
-rw-r--r--gfx/skia/skia/src/sksl/SkSLThreadContext.cpp126
-rw-r--r--gfx/skia/skia/src/sksl/SkSLThreadContext.h177
-rw-r--r--gfx/skia/skia/src/sksl/SkSLUtil.cpp89
-rw-r--r--gfx/skia/skia/src/sksl/SkSLUtil.h187
-rw-r--r--gfx/skia/skia/src/sksl/analysis/SkSLCanExitWithoutReturningValue.cpp176
-rw-r--r--gfx/skia/skia/src/sksl/analysis/SkSLCheckProgramStructure.cpp223
-rw-r--r--gfx/skia/skia/src/sksl/analysis/SkSLFinalizationChecks.cpp172
-rw-r--r--gfx/skia/skia/src/sksl/analysis/SkSLGetLoopControlFlowInfo.cpp79
-rw-r--r--gfx/skia/skia/src/sksl/analysis/SkSLGetLoopUnrollInfo.cpp280
-rw-r--r--gfx/skia/skia/src/sksl/analysis/SkSLGetReturnComplexity.cpp131
-rw-r--r--gfx/skia/skia/src/sksl/analysis/SkSLHasSideEffects.cpp65
-rw-r--r--gfx/skia/skia/src/sksl/analysis/SkSLIsConstantExpression.cpp113
-rw-r--r--gfx/skia/skia/src/sksl/analysis/SkSLIsDynamicallyUniformExpression.cpp86
-rw-r--r--gfx/skia/skia/src/sksl/analysis/SkSLIsSameExpressionTree.cpp99
-rw-r--r--gfx/skia/skia/src/sksl/analysis/SkSLIsTrivialExpression.cpp70
-rw-r--r--gfx/skia/skia/src/sksl/analysis/SkSLNoOpErrorReporter.h23
-rw-r--r--gfx/skia/skia/src/sksl/analysis/SkSLProgramUsage.cpp242
-rw-r--r--gfx/skia/skia/src/sksl/analysis/SkSLProgramUsage.h53
-rw-r--r--gfx/skia/skia/src/sksl/analysis/SkSLProgramVisitor.h77
-rw-r--r--gfx/skia/skia/src/sksl/analysis/SkSLSwitchCaseContainsExit.cpp98
-rw-r--r--gfx/skia/skia/src/sksl/analysis/SkSLSymbolTableStackBuilder.cpp63
-rw-r--r--gfx/skia/skia/src/sksl/codegen/SkSLCodeGenerator.h89
-rw-r--r--gfx/skia/skia/src/sksl/codegen/SkSLGLSLCodeGenerator.cpp1774
-rw-r--r--gfx/skia/skia/src/sksl/codegen/SkSLGLSLCodeGenerator.h210
-rw-r--r--gfx/skia/skia/src/sksl/codegen/SkSLMetalCodeGenerator.cpp3226
-rw-r--r--gfx/skia/skia/src/sksl/codegen/SkSLMetalCodeGenerator.h330
-rw-r--r--gfx/skia/skia/src/sksl/codegen/SkSLPipelineStageCodeGenerator.cpp814
-rw-r--r--gfx/skia/skia/src/sksl/codegen/SkSLPipelineStageCodeGenerator.h70
-rw-r--r--gfx/skia/skia/src/sksl/codegen/SkSLRasterPipelineBuilder.cpp2861
-rw-r--r--gfx/skia/skia/src/sksl/codegen/SkSLRasterPipelineBuilder.h655
-rw-r--r--gfx/skia/skia/src/sksl/codegen/SkSLRasterPipelineCodeGenerator.cpp3444
-rw-r--r--gfx/skia/skia/src/sksl/codegen/SkSLRasterPipelineCodeGenerator.h32
-rw-r--r--gfx/skia/skia/src/sksl/codegen/SkSLSPIRVCodeGenerator.cpp4365
-rw-r--r--gfx/skia/skia/src/sksl/codegen/SkSLSPIRVCodeGenerator.h601
-rw-r--r--gfx/skia/skia/src/sksl/codegen/SkSLSPIRVtoHLSL.cpp49
-rw-r--r--gfx/skia/skia/src/sksl/codegen/SkSLSPIRVtoHLSL.h19
-rw-r--r--gfx/skia/skia/src/sksl/codegen/SkSLVMCodeGenerator.cpp2302
-rw-r--r--gfx/skia/skia/src/sksl/codegen/SkSLVMCodeGenerator.h79
-rw-r--r--gfx/skia/skia/src/sksl/codegen/SkSLWGSLCodeGenerator.cpp1939
-rw-r--r--gfx/skia/skia/src/sksl/codegen/SkSLWGSLCodeGenerator.h289
-rw-r--r--gfx/skia/skia/src/sksl/dsl/DSLBlock.cpp49
-rw-r--r--gfx/skia/skia/src/sksl/dsl/DSLCase.cpp46
-rw-r--r--gfx/skia/skia/src/sksl/dsl/DSLCore.cpp615
-rw-r--r--gfx/skia/skia/src/sksl/dsl/DSLExpression.cpp295
-rw-r--r--gfx/skia/skia/src/sksl/dsl/DSLFunction.cpp146
-rw-r--r--gfx/skia/skia/src/sksl/dsl/DSLLayout.cpp36
-rw-r--r--gfx/skia/skia/src/sksl/dsl/DSLStatement.cpp67
-rw-r--r--gfx/skia/skia/src/sksl/dsl/DSLType.cpp316
-rw-r--r--gfx/skia/skia/src/sksl/dsl/DSLVar.cpp177
-rw-r--r--gfx/skia/skia/src/sksl/dsl/priv/DSLWriter.cpp132
-rw-r--r--gfx/skia/skia/src/sksl/dsl/priv/DSLWriter.h64
-rw-r--r--gfx/skia/skia/src/sksl/dsl/priv/DSL_priv.h32
-rw-r--r--gfx/skia/skia/src/sksl/generated/sksl_compute.minified.sksl7
-rw-r--r--gfx/skia/skia/src/sksl/generated/sksl_compute.unoptimized.sksl7
-rw-r--r--gfx/skia/skia/src/sksl/generated/sksl_frag.minified.sksl5
-rw-r--r--gfx/skia/skia/src/sksl/generated/sksl_frag.unoptimized.sksl5
-rw-r--r--gfx/skia/skia/src/sksl/generated/sksl_gpu.minified.sksl85
-rw-r--r--gfx/skia/skia/src/sksl/generated/sksl_gpu.unoptimized.sksl107
-rw-r--r--gfx/skia/skia/src/sksl/generated/sksl_graphite_frag.dehydrated.sksl3119
-rw-r--r--gfx/skia/skia/src/sksl/generated/sksl_graphite_frag.minified.sksl179
-rw-r--r--gfx/skia/skia/src/sksl/generated/sksl_graphite_frag.unoptimized.sksl314
-rw-r--r--gfx/skia/skia/src/sksl/generated/sksl_graphite_vert.minified.sksl64
-rw-r--r--gfx/skia/skia/src/sksl/generated/sksl_graphite_vert.unoptimized.sksl121
-rw-r--r--gfx/skia/skia/src/sksl/generated/sksl_public.minified.sksl4
-rw-r--r--gfx/skia/skia/src/sksl/generated/sksl_public.unoptimized.sksl4
-rw-r--r--gfx/skia/skia/src/sksl/generated/sksl_rt_shader.minified.sksl2
-rw-r--r--gfx/skia/skia/src/sksl/generated/sksl_rt_shader.unoptimized.sksl2
-rw-r--r--gfx/skia/skia/src/sksl/generated/sksl_shared.minified.sksl143
-rw-r--r--gfx/skia/skia/src/sksl/generated/sksl_shared.unoptimized.sksl163
-rw-r--r--gfx/skia/skia/src/sksl/generated/sksl_vert.minified.sksl4
-rw-r--r--gfx/skia/skia/src/sksl/generated/sksl_vert.unoptimized.sksl4
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLBinaryExpression.cpp284
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLBinaryExpression.h112
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLBlock.cpp100
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLBlock.h110
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLBreakStatement.h44
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLChildCall.cpp73
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLChildCall.h72
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLConstructor.cpp241
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLConstructor.h143
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLConstructorArray.cpp94
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLConstructorArray.h58
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLConstructorArrayCast.cpp73
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLConstructorArrayCast.h54
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLConstructorCompound.cpp158
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLConstructorCompound.h55
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLConstructorCompoundCast.cpp100
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLConstructorCompoundCast.h52
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLConstructorDiagonalMatrix.cpp45
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLConstructorDiagonalMatrix.h55
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLConstructorMatrixResize.cpp58
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLConstructorMatrixResize.h56
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLConstructorScalarCast.cpp93
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLConstructorScalarCast.h61
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLConstructorSplat.cpp38
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLConstructorSplat.h63
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLConstructorStruct.cpp87
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLConstructorStruct.h58
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLContinueStatement.h44
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLDiscardStatement.cpp33
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLDiscardStatement.h49
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLDoStatement.cpp59
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLDoStatement.h78
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLExpression.cpp50
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLExpression.h143
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLExpressionStatement.cpp57
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLExpressionStatement.h66
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLExtension.h46
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLField.h56
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLFieldAccess.cpp125
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLFieldAccess.h104
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLForStatement.cpp197
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLForStatement.h150
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLFunctionCall.cpp1056
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLFunctionCall.h89
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLFunctionDeclaration.cpp598
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLFunctionDeclaration.h153
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLFunctionDefinition.cpp246
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLFunctionDefinition.h91
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLFunctionPrototype.h55
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLFunctionReference.h55
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLIfStatement.cpp108
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLIfStatement.h91
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLIndexExpression.cpp178
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLIndexExpression.h97
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLInterfaceBlock.cpp132
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLInterfaceBlock.h114
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLLayout.cpp75
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLLiteral.cpp23
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLLiteral.h145
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLMethodReference.h73
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLModifiers.cpp115
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLModifiersDeclaration.h49
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLNop.h48
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLPoison.h36
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLPostfixExpression.cpp50
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLPostfixExpression.h76
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLPrefixExpression.cpp282
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLPrefixExpression.h73
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLProgram.cpp116
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLProgram.h171
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLReturnStatement.h65
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLSetting.cpp94
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLSetting.h75
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLStructDefinition.h66
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLSwitchCase.h86
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLSwitchStatement.cpp275
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLSwitchStatement.h102
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLSwizzle.cpp548
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLSwizzle.h103
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.cpp122
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.h214
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLTernaryExpression.cpp136
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLTernaryExpression.h100
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLType.cpp1208
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLType.h600
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLTypeReference.cpp32
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLTypeReference.h70
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLVarDeclarations.cpp468
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLVarDeclarations.h164
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLVariable.cpp212
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLVariable.h179
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLVariableReference.cpp33
-rw-r--r--gfx/skia/skia/src/sksl/ir/SkSLVariableReference.h87
-rw-r--r--gfx/skia/skia/src/sksl/lex/DFA.h37
-rw-r--r--gfx/skia/skia/src/sksl/lex/DFAState.h75
-rw-r--r--gfx/skia/skia/src/sksl/lex/LexUtil.h20
-rw-r--r--gfx/skia/skia/src/sksl/lex/Main.cpp238
-rw-r--r--gfx/skia/skia/src/sksl/lex/NFA.cpp44
-rw-r--r--gfx/skia/skia/src/sksl/lex/NFA.h58
-rw-r--r--gfx/skia/skia/src/sksl/lex/NFAState.h152
-rw-r--r--gfx/skia/skia/src/sksl/lex/NFAtoDFA.h168
-rw-r--r--gfx/skia/skia/src/sksl/lex/RegexNode.cpp123
-rw-r--r--gfx/skia/skia/src/sksl/lex/RegexNode.h79
-rw-r--r--gfx/skia/skia/src/sksl/lex/RegexParser.cpp183
-rw-r--r--gfx/skia/skia/src/sksl/lex/RegexParser.h89
-rw-r--r--gfx/skia/skia/src/sksl/lex/TransitionTable.cpp241
-rw-r--r--gfx/skia/skia/src/sksl/lex/TransitionTable.h18
-rw-r--r--gfx/skia/skia/src/sksl/lex/sksl.lex102
-rw-r--r--gfx/skia/skia/src/sksl/sksl_compute.sksl21
-rw-r--r--gfx/skia/skia/src/sksl/sksl_frag.sksl9
-rw-r--r--gfx/skia/skia/src/sksl/sksl_gpu.sksl324
-rw-r--r--gfx/skia/skia/src/sksl/sksl_graphite_frag.sksl1135
-rw-r--r--gfx/skia/skia/src/sksl/sksl_graphite_vert.sksl535
-rw-r--r--gfx/skia/skia/src/sksl/sksl_public.sksl10
-rw-r--r--gfx/skia/skia/src/sksl/sksl_rt_shader.sksl1
-rw-r--r--gfx/skia/skia/src/sksl/sksl_shared.sksl449
-rw-r--r--gfx/skia/skia/src/sksl/sksl_vert.sksl9
-rw-r--r--gfx/skia/skia/src/sksl/spirv.h870
-rw-r--r--gfx/skia/skia/src/sksl/tracing/SkRPDebugTrace.cpp32
-rw-r--r--gfx/skia/skia/src/sksl/tracing/SkRPDebugTrace.h48
-rw-r--r--gfx/skia/skia/src/sksl/tracing/SkSLDebugInfo.h55
-rw-r--r--gfx/skia/skia/src/sksl/tracing/SkSLTraceHook.cpp35
-rw-r--r--gfx/skia/skia/src/sksl/tracing/SkSLTraceHook.h45
-rw-r--r--gfx/skia/skia/src/sksl/tracing/SkVMDebugTrace.cpp417
-rw-r--r--gfx/skia/skia/src/sksl/tracing/SkVMDebugTrace.h78
-rw-r--r--gfx/skia/skia/src/sksl/tracing/SkVMDebugTracePlayer.cpp284
-rw-r--r--gfx/skia/skia/src/sksl/tracing/SkVMDebugTracePlayer.h136
-rw-r--r--gfx/skia/skia/src/sksl/transform/SkSLAddConstToVarModifiers.cpp44
-rw-r--r--gfx/skia/skia/src/sksl/transform/SkSLEliminateDeadFunctions.cpp79
-rw-r--r--gfx/skia/skia/src/sksl/transform/SkSLEliminateDeadGlobalVariables.cpp90
-rw-r--r--gfx/skia/skia/src/sksl/transform/SkSLEliminateDeadLocalVariables.cpp169
-rw-r--r--gfx/skia/skia/src/sksl/transform/SkSLEliminateEmptyStatements.cpp67
-rw-r--r--gfx/skia/skia/src/sksl/transform/SkSLEliminateUnreachableCode.cpp214
-rw-r--r--gfx/skia/skia/src/sksl/transform/SkSLFindAndDeclareBuiltinFunctions.cpp95
-rw-r--r--gfx/skia/skia/src/sksl/transform/SkSLFindAndDeclareBuiltinVariables.cpp180
-rw-r--r--gfx/skia/skia/src/sksl/transform/SkSLProgramWriter.h40
-rw-r--r--gfx/skia/skia/src/sksl/transform/SkSLRenamePrivateSymbols.cpp243
-rw-r--r--gfx/skia/skia/src/sksl/transform/SkSLReplaceConstVarsWithLiterals.cpp104
-rw-r--r--gfx/skia/skia/src/sksl/transform/SkSLRewriteIndexedSwizzle.cpp54
-rw-r--r--gfx/skia/skia/src/sksl/transform/SkSLTransform.h103
-rw-r--r--gfx/skia/skia/src/text/GlyphRun.cpp372
-rw-r--r--gfx/skia/skia/src/text/GlyphRun.h183
-rw-r--r--gfx/skia/skia/src/text/StrikeForGPU.cpp89
-rw-r--r--gfx/skia/skia/src/text/StrikeForGPU.h133
-rw-r--r--gfx/skia/skia/src/utils/SkAnimCodecPlayer.cpp155
-rw-r--r--gfx/skia/skia/src/utils/SkBase64.cpp154
-rw-r--r--gfx/skia/skia/src/utils/SkBitSet.h147
-rw-r--r--gfx/skia/skia/src/utils/SkBlitterTrace.h62
-rw-r--r--gfx/skia/skia/src/utils/SkBlitterTraceCommon.h180
-rw-r--r--gfx/skia/skia/src/utils/SkCallableTraits.h86
-rw-r--r--gfx/skia/skia/src/utils/SkCamera.cpp239
-rw-r--r--gfx/skia/skia/src/utils/SkCanvasStack.cpp117
-rw-r--r--gfx/skia/skia/src/utils/SkCanvasStack.h82
-rw-r--r--gfx/skia/skia/src/utils/SkCanvasStateUtils.cpp338
-rw-r--r--gfx/skia/skia/src/utils/SkCharToGlyphCache.cpp129
-rw-r--r--gfx/skia/skia/src/utils/SkCharToGlyphCache.h65
-rw-r--r--gfx/skia/skia/src/utils/SkClipStackUtils.cpp42
-rw-r--r--gfx/skia/skia/src/utils/SkClipStackUtils.h21
-rw-r--r--gfx/skia/skia/src/utils/SkCustomTypeface.cpp523
-rw-r--r--gfx/skia/skia/src/utils/SkCycles.h56
-rw-r--r--gfx/skia/skia/src/utils/SkDashPath.cpp485
-rw-r--r--gfx/skia/skia/src/utils/SkDashPathPriv.h56
-rw-r--r--gfx/skia/skia/src/utils/SkEventTracer.cpp71
-rw-r--r--gfx/skia/skia/src/utils/SkFloatToDecimal.cpp187
-rw-r--r--gfx/skia/skia/src/utils/SkFloatToDecimal.h34
-rw-r--r--gfx/skia/skia/src/utils/SkFloatUtils.h173
-rw-r--r--gfx/skia/skia/src/utils/SkGaussianColorFilter.cpp135
-rw-r--r--gfx/skia/skia/src/utils/SkJSON.cpp933
-rw-r--r--gfx/skia/skia/src/utils/SkJSON.h372
-rw-r--r--gfx/skia/skia/src/utils/SkJSONWriter.cpp47
-rw-r--r--gfx/skia/skia/src/utils/SkJSONWriter.h419
-rw-r--r--gfx/skia/skia/src/utils/SkMatrix22.cpp41
-rw-r--r--gfx/skia/skia/src/utils/SkMatrix22.h31
-rw-r--r--gfx/skia/skia/src/utils/SkMultiPictureDocument.cpp224
-rw-r--r--gfx/skia/skia/src/utils/SkMultiPictureDocument.h51
-rw-r--r--gfx/skia/skia/src/utils/SkMultiPictureDocumentPriv.h21
-rw-r--r--gfx/skia/skia/src/utils/SkNWayCanvas.cpp414
-rw-r--r--gfx/skia/skia/src/utils/SkNullCanvas.cpp17
-rw-r--r--gfx/skia/skia/src/utils/SkOSPath.cpp49
-rw-r--r--gfx/skia/skia/src/utils/SkOSPath.h55
-rw-r--r--gfx/skia/skia/src/utils/SkOrderedFontMgr.cpp109
-rw-r--r--gfx/skia/skia/src/utils/SkPaintFilterCanvas.cpp301
-rw-r--r--gfx/skia/skia/src/utils/SkParse.cpp302
-rw-r--r--gfx/skia/skia/src/utils/SkParseColor.cpp388
-rw-r--r--gfx/skia/skia/src/utils/SkParsePath.cpp305
-rw-r--r--gfx/skia/skia/src/utils/SkPatchUtils.cpp390
-rw-r--r--gfx/skia/skia/src/utils/SkPatchUtils.h60
-rw-r--r--gfx/skia/skia/src/utils/SkPolyUtils.cpp1774
-rw-r--r--gfx/skia/skia/src/utils/SkPolyUtils.h116
-rw-r--r--gfx/skia/skia/src/utils/SkShaderUtils.cpp226
-rw-r--r--gfx/skia/skia/src/utils/SkShaderUtils.h40
-rw-r--r--gfx/skia/skia/src/utils/SkShadowTessellator.cpp1191
-rw-r--r--gfx/skia/skia/src/utils/SkShadowTessellator.h49
-rw-r--r--gfx/skia/skia/src/utils/SkShadowUtils.cpp844
-rw-r--r--gfx/skia/skia/src/utils/SkTestCanvas.h62
-rw-r--r--gfx/skia/skia/src/utils/SkTextUtils.cpp60
-rw-r--r--gfx/skia/skia/src/utils/SkVMVisualizer.cpp407
-rw-r--r--gfx/skia/skia/src/utils/SkVMVisualizer.h94
-rw-r--r--gfx/skia/skia/src/utils/mac/SkCGBase.h37
-rw-r--r--gfx/skia/skia/src/utils/mac/SkCGGeometry.h52
-rw-r--r--gfx/skia/skia/src/utils/mac/SkCTFont.cpp425
-rw-r--r--gfx/skia/skia/src/utils/mac/SkCTFont.h52
-rw-r--r--gfx/skia/skia/src/utils/mac/SkCreateCGImageRef.cpp253
-rw-r--r--gfx/skia/skia/src/utils/mac/SkUniqueCFRef.h24
-rw-r--r--gfx/skia/skia/src/utils/win/SkAutoCoInitialize.cpp32
-rw-r--r--gfx/skia/skia/src/utils/win/SkAutoCoInitialize.h32
-rw-r--r--gfx/skia/skia/src/utils/win/SkDWrite.cpp138
-rw-r--r--gfx/skia/skia/src/utils/win/SkDWrite.h99
-rw-r--r--gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.cpp236
-rw-r--r--gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.h88
-rw-r--r--gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.cpp155
-rw-r--r--gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.h58
-rw-r--r--gfx/skia/skia/src/utils/win/SkDWriteNTDDI_VERSION.h31
-rw-r--r--gfx/skia/skia/src/utils/win/SkHRESULT.cpp40
-rw-r--r--gfx/skia/skia/src/utils/win/SkHRESULT.h62
-rw-r--r--gfx/skia/skia/src/utils/win/SkIStream.cpp236
-rw-r--r--gfx/skia/skia/src/utils/win/SkIStream.h101
-rw-r--r--gfx/skia/skia/src/utils/win/SkObjBase.h25
-rw-r--r--gfx/skia/skia/src/utils/win/SkTScopedComPtr.h86
-rw-r--r--gfx/skia/skia/src/utils/win/SkWGL.h165
-rw-r--r--gfx/skia/skia/src/utils/win/SkWGL_win.cpp513
1488 files changed, 373331 insertions, 0 deletions
diff --git a/gfx/skia/LICENSE b/gfx/skia/LICENSE
new file mode 100644
index 0000000000..e74c256cba
--- /dev/null
+++ b/gfx/skia/LICENSE
@@ -0,0 +1,27 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/gfx/skia/README b/gfx/skia/README
new file mode 100644
index 0000000000..84e4ecc907
--- /dev/null
+++ b/gfx/skia/README
@@ -0,0 +1,3 @@
+Skia is a complete 2D graphic library for drawing Text, Geometries, and Images.
+
+See full details, and build instructions, at http://code.google.com/p/skia/wiki/DocRoot
diff --git a/gfx/skia/README_COMMITTING b/gfx/skia/README_COMMITTING
new file mode 100644
index 0000000000..4014ea3c7f
--- /dev/null
+++ b/gfx/skia/README_COMMITTING
@@ -0,0 +1,10 @@
+Any changes to Skia should have at a minimum both a Mozilla bug tagged with the [skia-upstream]
+whiteboard tag, and also an upstream bug and review request. Any patches that do ultimately land
+in mozilla-central must be reviewed by a Skia submodule peer.
+
+See https://wiki.mozilla.org/Modules/Core#Graphics for current peers.
+
+In most cases the patch will need to have an r+ from upstream before it is eligible to land here.
+
+For information on submitting upstream, see:
+https://sites.google.com/site/skiadocs/developer-documentation/contributing-code/how-to-submit-a-patch
diff --git a/gfx/skia/README_MOZILLA b/gfx/skia/README_MOZILLA
new file mode 100644
index 0000000000..af7a003743
--- /dev/null
+++ b/gfx/skia/README_MOZILLA
@@ -0,0 +1,33 @@
+This is an import of Skia. See skia/include/core/SkMilestone.h for the milestone number.
+
+Current upstream hash: 4655534302e6a3601c77eae70cc65b202609ab66 (milestone 79)
+
+How to update
+=============
+
+To update to a new version of Skia:
+
+- Clone Skia from upstream using the instructions here: https://skia.org/user/download
+ Usually: `git clone https://skia.googlesource.com/skia`
+- Checkout the wanted revision (`git checkout -b <hash>`). See below for the current
+ hash.
+- Copy the entire source tree from a Skia clone to mozilla-central/gfx/skia/skia
+ (make sure that . files are also copied as .gn is mandatory).
+- Download gn: cd gfx/skia/skia/bin && python fetch-gn && cd -
+ Note that these scripts might need Python 2.7. Please use a virtualenv if needed.
+- cd gfx/skia && ./generate_mozbuild.py
+
+Once that's done, use git status to view the files that have changed. Keep an eye on GrUserConfig.h
+and SkUserConfig.h as those probably don't want to be overwritten by upstream versions.
+
+This process will be made more automatic in the future.
+
+Debug
+=====
+
+In case of issues when updating, run the command manually.
+For example, if the following error occurs:
+`subprocess.CalledProcessError: Command 'cd skia && bin/gn gen out/linux --args='target_os="linux" ' > /dev/null && bin/gn desc out/linux :skia sources' returned non-zero exit status 1`
+Run:
+`cd skia && bin/gn gen out/linux --args='target_os="linux"'`
+and look at the errors.
diff --git a/gfx/skia/generate_mozbuild.py b/gfx/skia/generate_mozbuild.py
new file mode 100755
index 0000000000..39675ea9c2
--- /dev/null
+++ b/gfx/skia/generate_mozbuild.py
@@ -0,0 +1,416 @@
+#!/usr/bin/env python3
+
+import locale
+import subprocess
+from collections import defaultdict
+locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
+
+header = """
+#
+# ##### ####### # # # # # #
+# ## # # # # # # # # # # # #
+# ## # # # # # # # # # # #
+# ## #### # # # # # # # # # #
+# ## # # # ####### # # # ####### # ###
+# ## # # # # # # # # # # # ###
+# # ##### ####### # # ## ## # # # ###
+#
+# Seriously. You shouldn't even be looking at this file unless you're
+# debugging generate_mozbuild.py.
+#
+# DO NOT MODIFY THIS FILE IT IS AUTOGENERATED.
+#
+
+skia_opt_flags = []
+
+if CONFIG['MOZ_OPTIMIZE']:
+ if CONFIG['CC_TYPE'] == 'clang-cl':
+ skia_opt_flags += ['-O2']
+ elif CONFIG['CC_TYPE'] in ('clang', 'gcc'):
+ skia_opt_flags += ['-O3']
+
+"""
+
+footer = """
+
+# We allow warnings for third-party code that can be updated from upstream.
+AllowCompilerWarnings()
+
+FINAL_LIBRARY = 'gkmedias'
+LOCAL_INCLUDES += [
+ 'skia',
+]
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows':
+ DEFINES['UNICODE'] = True
+ DEFINES['_UNICODE'] = True
+ UNIFIED_SOURCES += [
+ 'skia/src/fonts/SkFontMgr_indirect.cpp',
+ 'skia/src/fonts/SkRemotableFontMgr.cpp',
+ ]
+
+# We should autogenerate these SSE related flags.
+
+if CONFIG['INTEL_ARCHITECTURE']:
+ SOURCES['skia/src/opts/SkOpts_ssse3.cpp'].flags += ['-Dskvx=skvx_ssse3', '-mssse3']
+ SOURCES['skia/src/opts/SkOpts_sse42.cpp'].flags += ['-Dskvx=skvx_sse42', '-msse4.2']
+ SOURCES['skia/src/opts/SkOpts_avx.cpp'].flags += ['-Dskvx=skvx_avx', '-mavx']
+ SOURCES['skia/src/opts/SkOpts_hsw.cpp'].flags += ['-Dskvx=skvx_hsw', '-mavx2', '-mf16c', '-mfma']
+ if not CONFIG["MOZ_CODE_COVERAGE"]:
+ SOURCES['skia/src/opts/SkOpts_skx.cpp'].flags += ['-Dskvx=skvx_skx', '-mavx512f', '-mavx512dq', '-mavx512cd', '-mavx512bw', '-mavx512vl']
+elif CONFIG['CPU_ARCH'] == 'aarch64' and CONFIG['CC_TYPE'] in ('clang', 'gcc'):
+ SOURCES['skia/src/opts/SkOpts_crc32.cpp'].flags += ['-Dskvx=skvx_crc32', '-march=armv8-a+crc']
+
+DEFINES['MOZ_SKIA'] = True
+
+DEFINES['SKIA_IMPLEMENTATION'] = 1
+
+DEFINES['SK_PDF_USE_HARFBUZZ_SUBSETTING'] = 1
+
+if CONFIG['MOZ_TREE_FREETYPE']:
+ DEFINES['SK_CAN_USE_DLOPEN'] = 0
+
+# Suppress warnings in third-party code.
+CXXFLAGS += [
+ '-Wno-deprecated-declarations',
+ '-Wno-overloaded-virtual',
+ '-Wno-sign-compare',
+ '-Wno-unreachable-code',
+ '-Wno-unused-function',
+]
+if CONFIG['CC_TYPE'] == 'gcc':
+ CXXFLAGS += [
+ '-Wno-logical-op',
+ '-Wno-maybe-uninitialized',
+ ]
+if CONFIG['CC_TYPE'] in ('clang', 'clang-cl'):
+ CXXFLAGS += [
+ '-Wno-implicit-fallthrough',
+ '-Wno-inconsistent-missing-override',
+ '-Wno-macro-redefined',
+ '-Wno-unused-private-field',
+ ]
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] in ('gtk', 'android'):
+ LOCAL_INCLUDES += [
+ "/gfx/cairo/cairo/src",
+ ]
+ CXXFLAGS += CONFIG['CAIRO_FT_CFLAGS']
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gtk':
+ CXXFLAGS += CONFIG['MOZ_PANGO_CFLAGS']
+
+if CONFIG['CPU_ARCH'] in ('mips32', 'mips64'):
+ # The skia code uses `mips` as a variable, but it's a builtin preprocessor
+ # macro on mips that expands to `1`.
+ DEFINES['mips'] = False
+"""
+
+import json
+
+platforms = ['linux', 'mac', 'android', 'win']
+
+def parse_sources(output):
+ return set(v.replace('//', 'skia/') for v in output.decode('utf-8').split() if v.endswith('.cpp') or v.endswith('.S'))
+
+def generate_opt_sources():
+ cpus = [('intel', 'x86', [':ssse3', ':sse42', ':avx', ':hsw', ':skx']),
+ ('arm64', 'arm64', [':crc32'])]
+
+ opt_sources = {}
+ for key, cpu, deps in cpus:
+ subprocess.check_output('cd skia && bin/gn gen out/{0} --args=\'target_cpu="{1}"\''.format(key, cpu), shell=True)
+ opt_sources[key] = set()
+ for dep in deps:
+ try:
+ output = subprocess.check_output('cd skia && bin/gn desc out/{0} {1} sources'.format(key, dep), shell=True)
+ if output:
+ opt_sources[key].update(parse_sources(output))
+ except subprocess.CalledProcessError as e:
+ if e.output.find(b'source_set') < 0:
+ raise
+
+ return opt_sources
+
+def generate_platform_sources():
+ sources = {}
+ platform_args = {
+ 'win' : 'win_vc="C:/" win_sdk_version="00.0.00000.0" win_toolchain_version="00.00.00000"'
+ }
+ for plat in platforms:
+ args = platform_args.get(plat, '')
+ output = subprocess.check_output('cd skia && bin/gn gen out/{0} --args=\'target_os="{0}" {1}\' > /dev/null && bin/gn desc out/{0} :skia sources'.format(plat, args), shell=True)
+ if output:
+ sources[plat] = parse_sources(output)
+
+ plat_deps = {
+ ':fontmgr_win' : 'win',
+ ':fontmgr_win_gdi' : 'win',
+ ':fontmgr_mac_ct' : 'mac',
+ }
+ for dep, key in plat_deps.items():
+ output = subprocess.check_output('cd skia && bin/gn desc out/{1} {0} sources'.format(dep, key), shell=True)
+ if output:
+ sources[key].update(parse_sources(output))
+
+ deps = {':pdf' : 'pdf'}
+ for dep, key in deps.items():
+ output = subprocess.check_output('cd skia && bin/gn desc out/linux {} sources'.format(dep), shell=True)
+ if output:
+ sources[key] = parse_sources(output)
+
+ sources.update(generate_opt_sources())
+ return sources
+
+
+def generate_separated_sources(platform_sources):
+ ignorelist = [
+ 'skia/src/android/',
+ 'skia/src/effects/',
+ 'skia/src/fonts/',
+ 'skia/src/ports/SkImageEncoder',
+ 'skia/src/ports/SkImageGenerator',
+ 'SkLight',
+ 'codec',
+ 'SkWGL',
+ 'SkMemory_malloc',
+ 'third_party',
+ 'SkAnimCodecPlayer',
+ 'SkCamera',
+ 'SkCanvasStack',
+ 'SkCanvasStateUtils',
+ 'JSON',
+ 'SkMultiPictureDocument',
+ 'SkNullCanvas',
+ 'SkNWayCanvas',
+ 'SkOverdrawCanvas',
+ 'SkPaintFilterCanvas',
+ 'SkParseColor',
+ 'SkXPS',
+ 'SkCreateCGImageRef',
+ 'skia/src/ports/SkGlobalInitialization',
+ 'SkICC',
+ ]
+
+ def isignorelisted(value):
+ for item in ignorelist:
+ if value.find(item) >= 0:
+ return True
+
+ return False
+
+ separated = defaultdict(set, {
+ 'common': {
+ 'skia/src/codec/SkMasks.cpp',
+ 'skia/src/effects/imagefilters/SkBlurImageFilter.cpp',
+ 'skia/src/effects/imagefilters/SkComposeImageFilter.cpp',
+ 'skia/src/effects/SkDashPathEffect.cpp',
+ 'skia/src/ports/SkDiscardableMemory_none.cpp',
+ 'skia/src/ports/SkGlobalInitialization_default.cpp',
+ 'skia/src/ports/SkMemory_mozalloc.cpp',
+ 'skia/src/ports/SkImageGenerator_none.cpp',
+ 'skia/modules/skcms/skcms.cc',
+ 'skia/src/core/SkImageFilterTypes.cpp',
+ 'skia/src/ports/SkFontMgr_empty_factory.cpp',
+ },
+ 'android': {
+ # 'skia/src/ports/SkDebug_android.cpp',
+ 'skia/src/ports/SkFontHost_cairo.cpp',
+ # 'skia/src/ports/SkFontHost_FreeType.cpp',
+ 'skia/src/ports/SkFontHost_FreeType_common.cpp',
+ # 'skia/src/ports/SkTime_Unix.cpp',
+ # 'skia/src/utils/SkThreadUtils_pthread.cpp',
+ },
+ 'linux': {
+ 'skia/src/ports/SkFontHost_cairo.cpp',
+ 'skia/src/ports/SkFontHost_FreeType_common.cpp',
+ },
+ 'win': set (),
+ 'intel': set(),
+ 'arm': set(),
+ 'arm64': set(),
+ 'none': set(),
+ 'pdf': set()
+ })
+
+ for plat in platform_sources.keys():
+ for value in platform_sources[plat]:
+ if isignorelisted(value):
+ continue
+
+ if value in separated['common']:
+ continue
+
+ key = plat
+
+ if all(value in platform_sources.get(p, {})
+ for p in platforms if p != plat):
+ key = 'common'
+
+ separated[key].add(value)
+
+ return separated
+
+def uniq(seq):
+ seen = set()
+ seen_add = seen.add
+ return [ x for x in seq if x not in seen and not seen_add(x)]
+
+def write_cflags(f, values, subsearch, cflag, indent):
+ def write_indent(indent):
+ for _ in range(indent):
+ f.write(' ')
+
+ if isinstance(subsearch, str):
+ subsearch = [ subsearch ]
+
+ def isallowlisted(value):
+ for item in subsearch:
+ if value.find(item) >= 0:
+ return True
+
+ return False
+
+ val_list = uniq(sorted(values, key=lambda x: x.lower()))
+
+ if len(val_list) == 0:
+ return
+
+ for val in val_list:
+ if isallowlisted(val):
+ write_indent(indent)
+ f.write("SOURCES[\'" + val + "\'].flags += " + cflag + "\n")
+
+opt_allowlist = [
+ 'SkOpts',
+ 'SkBitmapProcState',
+ 'SkBlitRow',
+ 'SkBlitter',
+ 'SkSpriteBlitter',
+ 'SkMatrix.cpp',
+ 'skcms',
+]
+
+# Unfortunately for now the gpu and pathops directories are
+# non-unifiable. Keep track of this and fix it.
+unified_ignorelist = [
+ 'FontHost',
+ 'SkBitmapProcState_matrixProcs.cpp',
+ 'SkBlitter_A8.cpp',
+ 'SkBlitter_ARGB32.cpp',
+ 'SkBlitter_Sprite.cpp',
+ 'SkCpu.cpp',
+ 'SkScan_Antihair.cpp',
+ 'SkScan_AntiPath.cpp',
+ 'SkParse.cpp',
+ 'SkPDFFont.cpp',
+ 'SkPDFDevice.cpp',
+ 'SkPDFType1Font.cpp',
+ 'SkPictureData.cpp',
+ 'SkColorSpace',
+ 'SkPath.cpp',
+ 'SkPathOpsDebug.cpp',
+ 'SkParsePath.cpp',
+ 'SkRecorder.cpp',
+ 'SkXfermode',
+ 'SkRTree.cpp',
+ 'SkVertices.cpp',
+ 'SkSLLexer.cpp',
+] + opt_allowlist
+
+def write_sources(f, values, indent):
+ def isignorelisted(value):
+ for item in unified_ignorelist:
+ if value.find(item) >= 0:
+ return True
+
+ return False
+
+ sources = {}
+ sources['nonunified'] = set()
+ sources['unified'] = set()
+
+ for item in values:
+ if isignorelisted(item):
+ sources['nonunified'].add(item)
+ else:
+ sources['unified'].add(item)
+
+ write_list(f, "UNIFIED_SOURCES", sources['unified'], indent)
+ write_list(f, "SOURCES", sources['nonunified'], indent)
+
+def write_list(f, name, values, indent):
+ def write_indent(indent):
+ for _ in range(indent):
+ f.write(' ')
+
+ val_list = uniq(sorted(values, key=lambda x: x.lower()))
+
+ if len(val_list) == 0:
+ return
+
+ write_indent(indent)
+ f.write(name + ' += [\n')
+ for val in val_list:
+ write_indent(indent + 4)
+ f.write('\'' + val + '\',\n')
+
+ write_indent(indent)
+ f.write(']\n')
+
+def write_mozbuild(sources):
+ filename = 'moz.build'
+ f = open(filename, 'w')
+
+ f.write(header)
+
+ write_sources(f, sources['common'], 0)
+ write_cflags(f, sources['common'], opt_allowlist, 'skia_opt_flags', 0)
+
+ f.write("if CONFIG['MOZ_ENABLE_SKIA_PDF']:\n")
+ write_sources(f, sources['pdf'], 4)
+
+ f.write("if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'android':\n")
+ write_sources(f, sources['android'], 4)
+
+ f.write("if CONFIG['MOZ_WIDGET_TOOLKIT'] in ('cocoa', 'uikit'):\n")
+ write_sources(f, sources['mac'], 4)
+
+ f.write("if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gtk':\n")
+ write_sources(f, sources['linux'], 4)
+
+ f.write("if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows':\n")
+ write_list(f, "SOURCES", sources['win'], 4)
+
+ f.write("if CONFIG['INTEL_ARCHITECTURE']:\n")
+ write_sources(f, sources['intel'], 4)
+ write_cflags(f, sources['intel'], opt_allowlist, 'skia_opt_flags', 4)
+
+ if sources['arm']:
+ f.write("elif CONFIG['CPU_ARCH'] == 'arm' and CONFIG['CC_TYPE'] in ('clang', 'gcc'):\n")
+ write_sources(f, sources['arm'], 4)
+ write_cflags(f, sources['arm'], opt_allowlist, 'skia_opt_flags', 4)
+
+ if sources['arm64']:
+ f.write("elif CONFIG['CPU_ARCH'] == 'aarch64':\n")
+ write_sources(f, sources['arm64'], 4)
+ write_cflags(f, sources['arm64'], opt_allowlist, 'skia_opt_flags', 4)
+
+ if sources['none']:
+ f.write("else:\n")
+ write_sources(f, sources['none'], 4)
+
+ f.write(footer)
+
+ f.close()
+
+ print('Wrote ' + filename)
+
+def main():
+ platform_sources = generate_platform_sources()
+ separated_sources = generate_separated_sources(platform_sources)
+ write_mozbuild(separated_sources)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/gfx/skia/moz.build b/gfx/skia/moz.build
new file mode 100644
index 0000000000..1d92936263
--- /dev/null
+++ b/gfx/skia/moz.build
@@ -0,0 +1,620 @@
+
+#
+# ##### ####### # # # # # #
+# ## # # # # # # # # # # # #
+# ## # # # # # # # # # # #
+# ## #### # # # # # # # # # #
+# ## # # # ####### # # # ####### # ###
+# ## # # # # # # # # # # # ###
+# # ##### ####### # # ## ## # # # ###
+#
+# Seriously. You shouldn't even be looking at this file unless you're
+# debugging generate_mozbuild.py.
+#
+# DO NOT MODIFY THIS FILE IT IS AUTOGENERATED.
+#
+
+skia_opt_flags = []
+
+if CONFIG['MOZ_OPTIMIZE']:
+ if CONFIG['CC_TYPE'] == 'clang-cl':
+ skia_opt_flags += ['-O2']
+ elif CONFIG['CC_TYPE'] in ('clang', 'gcc'):
+ skia_opt_flags += ['-O3']
+
+UNIFIED_SOURCES += [
+ 'skia/src/base/SkArenaAlloc.cpp',
+ 'skia/src/base/SkBezierCurves.cpp',
+ 'skia/src/base/SkBlockAllocator.cpp',
+ 'skia/src/base/SkBuffer.cpp',
+ 'skia/src/base/SkContainers.cpp',
+ 'skia/src/base/SkCubics.cpp',
+ 'skia/src/base/SkDeque.cpp',
+ 'skia/src/base/SkFloatingPoint.cpp',
+ 'skia/src/base/SkHalf.cpp',
+ 'skia/src/base/SkMalloc.cpp',
+ 'skia/src/base/SkMathPriv.cpp',
+ 'skia/src/base/SkQuads.cpp',
+ 'skia/src/base/SkSafeMath.cpp',
+ 'skia/src/base/SkSemaphore.cpp',
+ 'skia/src/base/SkTDArray.cpp',
+ 'skia/src/base/SkThreadID.cpp',
+ 'skia/src/base/SkTSearch.cpp',
+ 'skia/src/base/SkUTF.cpp',
+ 'skia/src/base/SkUtils.cpp',
+ 'skia/src/codec/SkMasks.cpp',
+ 'skia/src/core/SkAAClip.cpp',
+ 'skia/src/core/SkAlphaRuns.cpp',
+ 'skia/src/core/SkAnalyticEdge.cpp',
+ 'skia/src/core/SkAnnotation.cpp',
+ 'skia/src/core/SkATrace.cpp',
+ 'skia/src/core/SkAutoPixmapStorage.cpp',
+ 'skia/src/core/SkBBHFactory.cpp',
+ 'skia/src/core/SkBigPicture.cpp',
+ 'skia/src/core/SkBitmap.cpp',
+ 'skia/src/core/SkBitmapCache.cpp',
+ 'skia/src/core/SkBitmapDevice.cpp',
+ 'skia/src/core/SkBlendMode.cpp',
+ 'skia/src/core/SkBlendModeBlender.cpp',
+ 'skia/src/core/SkBlurMask.cpp',
+ 'skia/src/core/SkBlurMF.cpp',
+ 'skia/src/core/SkCachedData.cpp',
+ 'skia/src/core/SkCanvas.cpp',
+ 'skia/src/core/SkCanvas_Raster.cpp',
+ 'skia/src/core/SkCanvasPriv.cpp',
+ 'skia/src/core/SkCapabilities.cpp',
+ 'skia/src/core/SkChromeRemoteGlyphCache.cpp',
+ 'skia/src/core/SkClipStack.cpp',
+ 'skia/src/core/SkClipStackDevice.cpp',
+ 'skia/src/core/SkColor.cpp',
+ 'skia/src/core/SkColorFilter.cpp',
+ 'skia/src/core/SkColorFilter_Matrix.cpp',
+ 'skia/src/core/SkCompressedDataUtils.cpp',
+ 'skia/src/core/SkContourMeasure.cpp',
+ 'skia/src/core/SkConvertPixels.cpp',
+ 'skia/src/core/SkCubicClipper.cpp',
+ 'skia/src/core/SkCubicMap.cpp',
+ 'skia/src/core/SkData.cpp',
+ 'skia/src/core/SkDataTable.cpp',
+ 'skia/src/core/SkDebug.cpp',
+ 'skia/src/core/SkDeferredDisplayList.cpp',
+ 'skia/src/core/SkDeferredDisplayListRecorder.cpp',
+ 'skia/src/core/SkDescriptor.cpp',
+ 'skia/src/core/SkDevice.cpp',
+ 'skia/src/core/SkDistanceFieldGen.cpp',
+ 'skia/src/core/SkDocument.cpp',
+ 'skia/src/core/SkDraw.cpp',
+ 'skia/src/core/SkDraw_atlas.cpp',
+ 'skia/src/core/SkDraw_text.cpp',
+ 'skia/src/core/SkDraw_vertices.cpp',
+ 'skia/src/core/SkDrawable.cpp',
+ 'skia/src/core/SkDrawBase.cpp',
+ 'skia/src/core/SkDrawLooper.cpp',
+ 'skia/src/core/SkDrawShadowInfo.cpp',
+ 'skia/src/core/SkEdge.cpp',
+ 'skia/src/core/SkEdgeBuilder.cpp',
+ 'skia/src/core/SkEdgeClipper.cpp',
+ 'skia/src/core/SkExecutor.cpp',
+ 'skia/src/core/SkFlattenable.cpp',
+ 'skia/src/core/SkFont.cpp',
+ 'skia/src/core/SkFont_serial.cpp',
+ 'skia/src/core/SkFontDescriptor.cpp',
+ 'skia/src/core/SkFontMetricsPriv.cpp',
+ 'skia/src/core/SkFontMgr.cpp',
+ 'skia/src/core/SkFontStream.cpp',
+ 'skia/src/core/SkGaussFilter.cpp',
+ 'skia/src/core/SkGeometry.cpp',
+ 'skia/src/core/SkGlobalInitialization_core.cpp',
+ 'skia/src/core/SkGlyph.cpp',
+ 'skia/src/core/SkGlyphRunPainter.cpp',
+ 'skia/src/core/SkGpuBlurUtils.cpp',
+ 'skia/src/core/SkGraphics.cpp',
+ 'skia/src/core/SkIDChangeListener.cpp',
+ 'skia/src/core/SkImageFilter.cpp',
+ 'skia/src/core/SkImageFilterCache.cpp',
+ 'skia/src/core/SkImageFilterTypes.cpp',
+ 'skia/src/core/SkImageGenerator.cpp',
+ 'skia/src/core/SkImageInfo.cpp',
+ 'skia/src/core/SkLatticeIter.cpp',
+ 'skia/src/core/SkLineClipper.cpp',
+ 'skia/src/core/SkLocalMatrixImageFilter.cpp',
+ 'skia/src/core/SkM44.cpp',
+ 'skia/src/core/SkMallocPixelRef.cpp',
+ 'skia/src/core/SkMask.cpp',
+ 'skia/src/core/SkMaskBlurFilter.cpp',
+ 'skia/src/core/SkMaskCache.cpp',
+ 'skia/src/core/SkMaskFilter.cpp',
+ 'skia/src/core/SkMaskGamma.cpp',
+ 'skia/src/core/SkMatrixInvert.cpp',
+ 'skia/src/core/SkMD5.cpp',
+ 'skia/src/core/SkMesh.cpp',
+ 'skia/src/core/SkMipmap.cpp',
+ 'skia/src/core/SkMipmapAccessor.cpp',
+ 'skia/src/core/SkMipmapBuilder.cpp',
+ 'skia/src/core/SkModeColorFilter.cpp',
+ 'skia/src/core/SkPaint.cpp',
+ 'skia/src/core/SkPaintPriv.cpp',
+ 'skia/src/core/SkPath_serial.cpp',
+ 'skia/src/core/SkPathBuilder.cpp',
+ 'skia/src/core/SkPathEffect.cpp',
+ 'skia/src/core/SkPathMeasure.cpp',
+ 'skia/src/core/SkPathRef.cpp',
+ 'skia/src/core/SkPathUtils.cpp',
+ 'skia/src/core/SkPicture.cpp',
+ 'skia/src/core/SkPictureFlat.cpp',
+ 'skia/src/core/SkPictureImageGenerator.cpp',
+ 'skia/src/core/SkPicturePlayback.cpp',
+ 'skia/src/core/SkPictureRecord.cpp',
+ 'skia/src/core/SkPictureRecorder.cpp',
+ 'skia/src/core/SkPixelRef.cpp',
+ 'skia/src/core/SkPixmap.cpp',
+ 'skia/src/core/SkPixmapDraw.cpp',
+ 'skia/src/core/SkPoint.cpp',
+ 'skia/src/core/SkPoint3.cpp',
+ 'skia/src/core/SkPromiseImageTexture.cpp',
+ 'skia/src/core/SkPtrRecorder.cpp',
+ 'skia/src/core/SkQuadClipper.cpp',
+ 'skia/src/core/SkRasterClip.cpp',
+ 'skia/src/core/SkRasterPipeline.cpp',
+ 'skia/src/core/SkRasterPipelineBlitter.cpp',
+ 'skia/src/core/SkReadBuffer.cpp',
+ 'skia/src/core/SkReadPixelsRec.cpp',
+ 'skia/src/core/SkRecord.cpp',
+ 'skia/src/core/SkRecordDraw.cpp',
+ 'skia/src/core/SkRecordedDrawable.cpp',
+ 'skia/src/core/SkRecordOpts.cpp',
+ 'skia/src/core/SkRecords.cpp',
+ 'skia/src/core/SkRect.cpp',
+ 'skia/src/core/SkRegion.cpp',
+ 'skia/src/core/SkRegion_path.cpp',
+ 'skia/src/core/SkResourceCache.cpp',
+ 'skia/src/core/SkRRect.cpp',
+ 'skia/src/core/SkRSXform.cpp',
+ 'skia/src/core/SkRuntimeEffect.cpp',
+ 'skia/src/core/SkScalar.cpp',
+ 'skia/src/core/SkScalerContext.cpp',
+ 'skia/src/core/SkScan.cpp',
+ 'skia/src/core/SkScan_AAAPath.cpp',
+ 'skia/src/core/SkScan_Hairline.cpp',
+ 'skia/src/core/SkScan_Path.cpp',
+ 'skia/src/core/SkScan_SAAPath.cpp',
+ 'skia/src/core/SkSharedMutex.cpp',
+ 'skia/src/core/SkSLTypeShared.cpp',
+ 'skia/src/core/SkSpecialImage.cpp',
+ 'skia/src/core/SkSpecialSurface.cpp',
+ 'skia/src/core/SkSpinlock.cpp',
+ 'skia/src/core/SkStream.cpp',
+ 'skia/src/core/SkStrike.cpp',
+ 'skia/src/core/SkStrikeCache.cpp',
+ 'skia/src/core/SkStrikeSpec.cpp',
+ 'skia/src/core/SkString.cpp',
+ 'skia/src/core/SkStringUtils.cpp',
+ 'skia/src/core/SkStroke.cpp',
+ 'skia/src/core/SkStrokeRec.cpp',
+ 'skia/src/core/SkStrokerPriv.cpp',
+ 'skia/src/core/SkSurfaceCharacterization.cpp',
+ 'skia/src/core/SkSwizzle.cpp',
+ 'skia/src/core/SkTaskGroup.cpp',
+ 'skia/src/core/SkTextBlob.cpp',
+ 'skia/src/core/SkTextBlobTrace.cpp',
+ 'skia/src/core/SkTime.cpp',
+ 'skia/src/core/SkTypeface.cpp',
+ 'skia/src/core/SkTypeface_remote.cpp',
+ 'skia/src/core/SkTypefaceCache.cpp',
+ 'skia/src/core/SkUnPreMultiply.cpp',
+ 'skia/src/core/SkVertState.cpp',
+ 'skia/src/core/SkVM.cpp',
+ 'skia/src/core/SkVMBlitter.cpp',
+ 'skia/src/core/SkWriteBuffer.cpp',
+ 'skia/src/core/SkWritePixelsRec.cpp',
+ 'skia/src/core/SkWriter32.cpp',
+ 'skia/src/core/SkYUVAInfo.cpp',
+ 'skia/src/core/SkYUVAPixmaps.cpp',
+ 'skia/src/core/SkYUVMath.cpp',
+ 'skia/src/core/SkYUVPlanesCache.cpp',
+ 'skia/src/effects/imagefilters/SkBlurImageFilter.cpp',
+ 'skia/src/effects/imagefilters/SkComposeImageFilter.cpp',
+ 'skia/src/effects/SkDashPathEffect.cpp',
+ 'skia/src/encode/SkEncoder.cpp',
+ 'skia/src/encode/SkImageEncoder.cpp',
+ 'skia/src/image/SkImage.cpp',
+ 'skia/src/image/SkImage_Base.cpp',
+ 'skia/src/image/SkImage_Lazy.cpp',
+ 'skia/src/image/SkImage_Raster.cpp',
+ 'skia/src/image/SkRescaleAndReadPixels.cpp',
+ 'skia/src/image/SkSurface.cpp',
+ 'skia/src/image/SkSurface_Base.cpp',
+ 'skia/src/image/SkSurface_Null.cpp',
+ 'skia/src/image/SkSurface_Raster.cpp',
+ 'skia/src/lazy/SkDiscardableMemoryPool.cpp',
+ 'skia/src/pathops/SkAddIntersections.cpp',
+ 'skia/src/pathops/SkDConicLineIntersection.cpp',
+ 'skia/src/pathops/SkDCubicLineIntersection.cpp',
+ 'skia/src/pathops/SkDCubicToQuads.cpp',
+ 'skia/src/pathops/SkDLineIntersection.cpp',
+ 'skia/src/pathops/SkDQuadLineIntersection.cpp',
+ 'skia/src/pathops/SkIntersections.cpp',
+ 'skia/src/pathops/SkOpAngle.cpp',
+ 'skia/src/pathops/SkOpBuilder.cpp',
+ 'skia/src/pathops/SkOpCoincidence.cpp',
+ 'skia/src/pathops/SkOpContour.cpp',
+ 'skia/src/pathops/SkOpCubicHull.cpp',
+ 'skia/src/pathops/SkOpEdgeBuilder.cpp',
+ 'skia/src/pathops/SkOpSegment.cpp',
+ 'skia/src/pathops/SkOpSpan.cpp',
+ 'skia/src/pathops/SkPathOpsAsWinding.cpp',
+ 'skia/src/pathops/SkPathOpsCommon.cpp',
+ 'skia/src/pathops/SkPathOpsConic.cpp',
+ 'skia/src/pathops/SkPathOpsCubic.cpp',
+ 'skia/src/pathops/SkPathOpsCurve.cpp',
+ 'skia/src/pathops/SkPathOpsLine.cpp',
+ 'skia/src/pathops/SkPathOpsOp.cpp',
+ 'skia/src/pathops/SkPathOpsQuad.cpp',
+ 'skia/src/pathops/SkPathOpsRect.cpp',
+ 'skia/src/pathops/SkPathOpsSimplify.cpp',
+ 'skia/src/pathops/SkPathOpsTightBounds.cpp',
+ 'skia/src/pathops/SkPathOpsTSect.cpp',
+ 'skia/src/pathops/SkPathOpsTypes.cpp',
+ 'skia/src/pathops/SkPathOpsWinding.cpp',
+ 'skia/src/pathops/SkPathWriter.cpp',
+ 'skia/src/pathops/SkReduceOrder.cpp',
+ 'skia/src/ports/SkDiscardableMemory_none.cpp',
+ 'skia/src/ports/SkFontMgr_empty_factory.cpp',
+ 'skia/src/ports/SkGlobalInitialization_default.cpp',
+ 'skia/src/ports/SkImageGenerator_none.cpp',
+ 'skia/src/ports/SkMemory_mozalloc.cpp',
+ 'skia/src/ports/SkOSFile_stdio.cpp',
+ 'skia/src/sfnt/SkOTTable_name.cpp',
+ 'skia/src/sfnt/SkOTUtils.cpp',
+ 'skia/src/shaders/gradients/SkGradientShader.cpp',
+ 'skia/src/shaders/gradients/SkGradientShaderBase.cpp',
+ 'skia/src/shaders/gradients/SkLinearGradient.cpp',
+ 'skia/src/shaders/gradients/SkRadialGradient.cpp',
+ 'skia/src/shaders/gradients/SkSweepGradient.cpp',
+ 'skia/src/shaders/gradients/SkTwoPointConicalGradient.cpp',
+ 'skia/src/shaders/SkBitmapProcShader.cpp',
+ 'skia/src/shaders/SkColorFilterShader.cpp',
+ 'skia/src/shaders/SkColorShader.cpp',
+ 'skia/src/shaders/SkComposeShader.cpp',
+ 'skia/src/shaders/SkCoordClampShader.cpp',
+ 'skia/src/shaders/SkEmptyShader.cpp',
+ 'skia/src/shaders/SkGainmapShader.cpp',
+ 'skia/src/shaders/SkImageShader.cpp',
+ 'skia/src/shaders/SkLocalMatrixShader.cpp',
+ 'skia/src/shaders/SkPerlinNoiseShader.cpp',
+ 'skia/src/shaders/SkPictureShader.cpp',
+ 'skia/src/shaders/SkShader.cpp',
+ 'skia/src/shaders/SkTransformShader.cpp',
+ 'skia/src/sksl/analysis/SkSLCanExitWithoutReturningValue.cpp',
+ 'skia/src/sksl/analysis/SkSLCheckProgramStructure.cpp',
+ 'skia/src/sksl/analysis/SkSLFinalizationChecks.cpp',
+ 'skia/src/sksl/analysis/SkSLGetLoopControlFlowInfo.cpp',
+ 'skia/src/sksl/analysis/SkSLGetLoopUnrollInfo.cpp',
+ 'skia/src/sksl/analysis/SkSLGetReturnComplexity.cpp',
+ 'skia/src/sksl/analysis/SkSLHasSideEffects.cpp',
+ 'skia/src/sksl/analysis/SkSLIsConstantExpression.cpp',
+ 'skia/src/sksl/analysis/SkSLIsDynamicallyUniformExpression.cpp',
+ 'skia/src/sksl/analysis/SkSLIsSameExpressionTree.cpp',
+ 'skia/src/sksl/analysis/SkSLIsTrivialExpression.cpp',
+ 'skia/src/sksl/analysis/SkSLProgramUsage.cpp',
+ 'skia/src/sksl/analysis/SkSLSwitchCaseContainsExit.cpp',
+ 'skia/src/sksl/analysis/SkSLSymbolTableStackBuilder.cpp',
+ 'skia/src/sksl/codegen/SkSLRasterPipelineBuilder.cpp',
+ 'skia/src/sksl/codegen/SkSLRasterPipelineCodeGenerator.cpp',
+ 'skia/src/sksl/codegen/SkSLVMCodeGenerator.cpp',
+ 'skia/src/sksl/dsl/DSLBlock.cpp',
+ 'skia/src/sksl/dsl/DSLCase.cpp',
+ 'skia/src/sksl/dsl/DSLCore.cpp',
+ 'skia/src/sksl/dsl/DSLExpression.cpp',
+ 'skia/src/sksl/dsl/DSLFunction.cpp',
+ 'skia/src/sksl/dsl/DSLLayout.cpp',
+ 'skia/src/sksl/dsl/DSLStatement.cpp',
+ 'skia/src/sksl/dsl/DSLType.cpp',
+ 'skia/src/sksl/dsl/DSLVar.cpp',
+ 'skia/src/sksl/dsl/priv/DSLWriter.cpp',
+ 'skia/src/sksl/ir/SkSLBinaryExpression.cpp',
+ 'skia/src/sksl/ir/SkSLBlock.cpp',
+ 'skia/src/sksl/ir/SkSLChildCall.cpp',
+ 'skia/src/sksl/ir/SkSLConstructor.cpp',
+ 'skia/src/sksl/ir/SkSLConstructorArray.cpp',
+ 'skia/src/sksl/ir/SkSLConstructorArrayCast.cpp',
+ 'skia/src/sksl/ir/SkSLConstructorCompound.cpp',
+ 'skia/src/sksl/ir/SkSLConstructorCompoundCast.cpp',
+ 'skia/src/sksl/ir/SkSLConstructorDiagonalMatrix.cpp',
+ 'skia/src/sksl/ir/SkSLConstructorMatrixResize.cpp',
+ 'skia/src/sksl/ir/SkSLConstructorScalarCast.cpp',
+ 'skia/src/sksl/ir/SkSLConstructorSplat.cpp',
+ 'skia/src/sksl/ir/SkSLConstructorStruct.cpp',
+ 'skia/src/sksl/ir/SkSLDiscardStatement.cpp',
+ 'skia/src/sksl/ir/SkSLDoStatement.cpp',
+ 'skia/src/sksl/ir/SkSLExpression.cpp',
+ 'skia/src/sksl/ir/SkSLExpressionStatement.cpp',
+ 'skia/src/sksl/ir/SkSLFieldAccess.cpp',
+ 'skia/src/sksl/ir/SkSLForStatement.cpp',
+ 'skia/src/sksl/ir/SkSLFunctionCall.cpp',
+ 'skia/src/sksl/ir/SkSLFunctionDeclaration.cpp',
+ 'skia/src/sksl/ir/SkSLFunctionDefinition.cpp',
+ 'skia/src/sksl/ir/SkSLIfStatement.cpp',
+ 'skia/src/sksl/ir/SkSLIndexExpression.cpp',
+ 'skia/src/sksl/ir/SkSLInterfaceBlock.cpp',
+ 'skia/src/sksl/ir/SkSLLayout.cpp',
+ 'skia/src/sksl/ir/SkSLLiteral.cpp',
+ 'skia/src/sksl/ir/SkSLModifiers.cpp',
+ 'skia/src/sksl/ir/SkSLPostfixExpression.cpp',
+ 'skia/src/sksl/ir/SkSLPrefixExpression.cpp',
+ 'skia/src/sksl/ir/SkSLProgram.cpp',
+ 'skia/src/sksl/ir/SkSLSetting.cpp',
+ 'skia/src/sksl/ir/SkSLSwitchStatement.cpp',
+ 'skia/src/sksl/ir/SkSLSwizzle.cpp',
+ 'skia/src/sksl/ir/SkSLSymbolTable.cpp',
+ 'skia/src/sksl/ir/SkSLTernaryExpression.cpp',
+ 'skia/src/sksl/ir/SkSLType.cpp',
+ 'skia/src/sksl/ir/SkSLTypeReference.cpp',
+ 'skia/src/sksl/ir/SkSLVarDeclarations.cpp',
+ 'skia/src/sksl/ir/SkSLVariable.cpp',
+ 'skia/src/sksl/ir/SkSLVariableReference.cpp',
+ 'skia/src/sksl/SkSLAnalysis.cpp',
+ 'skia/src/sksl/SkSLBuiltinTypes.cpp',
+ 'skia/src/sksl/SkSLCompiler.cpp',
+ 'skia/src/sksl/SkSLConstantFolder.cpp',
+ 'skia/src/sksl/SkSLContext.cpp',
+ 'skia/src/sksl/SkSLErrorReporter.cpp',
+ 'skia/src/sksl/SkSLInliner.cpp',
+ 'skia/src/sksl/SkSLIntrinsicList.cpp',
+ 'skia/src/sksl/SkSLMangler.cpp',
+ 'skia/src/sksl/SkSLModuleLoader.cpp',
+ 'skia/src/sksl/SkSLOperator.cpp',
+ 'skia/src/sksl/SkSLOutputStream.cpp',
+ 'skia/src/sksl/SkSLParser.cpp',
+ 'skia/src/sksl/SkSLPool.cpp',
+ 'skia/src/sksl/SkSLPosition.cpp',
+ 'skia/src/sksl/SkSLSampleUsage.cpp',
+ 'skia/src/sksl/SkSLString.cpp',
+ 'skia/src/sksl/SkSLThreadContext.cpp',
+ 'skia/src/sksl/SkSLUtil.cpp',
+ 'skia/src/sksl/tracing/SkRPDebugTrace.cpp',
+ 'skia/src/sksl/tracing/SkSLTraceHook.cpp',
+ 'skia/src/sksl/tracing/SkVMDebugTrace.cpp',
+ 'skia/src/sksl/tracing/SkVMDebugTracePlayer.cpp',
+ 'skia/src/sksl/transform/SkSLAddConstToVarModifiers.cpp',
+ 'skia/src/sksl/transform/SkSLEliminateDeadFunctions.cpp',
+ 'skia/src/sksl/transform/SkSLEliminateDeadGlobalVariables.cpp',
+ 'skia/src/sksl/transform/SkSLEliminateDeadLocalVariables.cpp',
+ 'skia/src/sksl/transform/SkSLEliminateEmptyStatements.cpp',
+ 'skia/src/sksl/transform/SkSLEliminateUnreachableCode.cpp',
+ 'skia/src/sksl/transform/SkSLFindAndDeclareBuiltinFunctions.cpp',
+ 'skia/src/sksl/transform/SkSLFindAndDeclareBuiltinVariables.cpp',
+ 'skia/src/sksl/transform/SkSLRenamePrivateSymbols.cpp',
+ 'skia/src/sksl/transform/SkSLReplaceConstVarsWithLiterals.cpp',
+ 'skia/src/sksl/transform/SkSLRewriteIndexedSwizzle.cpp',
+ 'skia/src/text/GlyphRun.cpp',
+ 'skia/src/text/StrikeForGPU.cpp',
+ 'skia/src/utils/mac/SkCTFont.cpp',
+ 'skia/src/utils/SkBase64.cpp',
+ 'skia/src/utils/SkCharToGlyphCache.cpp',
+ 'skia/src/utils/SkClipStackUtils.cpp',
+ 'skia/src/utils/SkCustomTypeface.cpp',
+ 'skia/src/utils/SkDashPath.cpp',
+ 'skia/src/utils/SkEventTracer.cpp',
+ 'skia/src/utils/SkFloatToDecimal.cpp',
+ 'skia/src/utils/SkGaussianColorFilter.cpp',
+ 'skia/src/utils/SkMatrix22.cpp',
+ 'skia/src/utils/SkOrderedFontMgr.cpp',
+ 'skia/src/utils/SkOSPath.cpp',
+ 'skia/src/utils/SkPatchUtils.cpp',
+ 'skia/src/utils/SkPolyUtils.cpp',
+ 'skia/src/utils/SkShaderUtils.cpp',
+ 'skia/src/utils/SkShadowTessellator.cpp',
+ 'skia/src/utils/SkShadowUtils.cpp',
+ 'skia/src/utils/SkTextUtils.cpp',
+ 'skia/src/utils/SkVMVisualizer.cpp',
+ 'skia/src/utils/win/SkAutoCoInitialize.cpp',
+ 'skia/src/utils/win/SkDWrite.cpp',
+ 'skia/src/utils/win/SkDWriteFontFileStream.cpp',
+ 'skia/src/utils/win/SkDWriteGeometrySink.cpp',
+ 'skia/src/utils/win/SkHRESULT.cpp',
+ 'skia/src/utils/win/SkIStream.cpp',
+]
+SOURCES += [
+ 'skia/modules/skcms/skcms.cc',
+ 'skia/src/core/SkBitmapProcState.cpp',
+ 'skia/src/core/SkBitmapProcState_matrixProcs.cpp',
+ 'skia/src/core/SkBlitRow_D32.cpp',
+ 'skia/src/core/SkBlitter.cpp',
+ 'skia/src/core/SkBlitter_A8.cpp',
+ 'skia/src/core/SkBlitter_ARGB32.cpp',
+ 'skia/src/core/SkBlitter_Sprite.cpp',
+ 'skia/src/core/SkColorSpace.cpp',
+ 'skia/src/core/SkColorSpaceXformSteps.cpp',
+ 'skia/src/core/SkCpu.cpp',
+ 'skia/src/core/SkMatrix.cpp',
+ 'skia/src/core/SkOpts.cpp',
+ 'skia/src/core/SkOpts_erms.cpp',
+ 'skia/src/core/SkPath.cpp',
+ 'skia/src/core/SkPictureData.cpp',
+ 'skia/src/core/SkRecorder.cpp',
+ 'skia/src/core/SkRTree.cpp',
+ 'skia/src/core/SkScan_Antihair.cpp',
+ 'skia/src/core/SkScan_AntiPath.cpp',
+ 'skia/src/core/SkSpriteBlitter_ARGB32.cpp',
+ 'skia/src/core/SkVertices.cpp',
+ 'skia/src/core/SkXfermode.cpp',
+ 'skia/src/core/SkXfermodeInterpretation.cpp',
+ 'skia/src/pathops/SkPathOpsDebug.cpp',
+ 'skia/src/sksl/SkSLLexer.cpp',
+ 'skia/src/utils/SkParse.cpp',
+ 'skia/src/utils/SkParsePath.cpp',
+]
+SOURCES['skia/modules/skcms/skcms.cc'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBitmapProcState.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBitmapProcState_matrixProcs.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBlitRow_D32.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBlitter.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBlitter_A8.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBlitter_ARGB32.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkBlitter_Sprite.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkMatrix.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkOpts.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkOpts_erms.cpp'].flags += skia_opt_flags
+SOURCES['skia/src/core/SkSpriteBlitter_ARGB32.cpp'].flags += skia_opt_flags
+if CONFIG['MOZ_ENABLE_SKIA_PDF']:
+ UNIFIED_SOURCES += [
+ 'skia/src/pdf/SkClusterator.cpp',
+ 'skia/src/pdf/SkDeflate.cpp',
+ 'skia/src/pdf/SkJpegInfo.cpp',
+ 'skia/src/pdf/SkKeyedImage.cpp',
+ 'skia/src/pdf/SkPDFBitmap.cpp',
+ 'skia/src/pdf/SkPDFDocument.cpp',
+ 'skia/src/pdf/SkPDFFormXObject.cpp',
+ 'skia/src/pdf/SkPDFGradientShader.cpp',
+ 'skia/src/pdf/SkPDFGraphicStackState.cpp',
+ 'skia/src/pdf/SkPDFGraphicState.cpp',
+ 'skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.cpp',
+ 'skia/src/pdf/SkPDFMakeToUnicodeCmap.cpp',
+ 'skia/src/pdf/SkPDFMetadata.cpp',
+ 'skia/src/pdf/SkPDFResourceDict.cpp',
+ 'skia/src/pdf/SkPDFShader.cpp',
+ 'skia/src/pdf/SkPDFSubsetFont.cpp',
+ 'skia/src/pdf/SkPDFTag.cpp',
+ 'skia/src/pdf/SkPDFTypes.cpp',
+ 'skia/src/pdf/SkPDFUtils.cpp',
+ ]
+ SOURCES += [
+ 'skia/src/pdf/SkPDFDevice.cpp',
+ 'skia/src/pdf/SkPDFFont.cpp',
+ 'skia/src/pdf/SkPDFType1Font.cpp',
+ ]
+if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'android':
+ UNIFIED_SOURCES += [
+ 'skia/src/ports/SkDebug_android.cpp',
+ 'skia/src/ports/SkOSFile_posix.cpp',
+ 'skia/src/ports/SkOSLibrary_posix.cpp',
+ ]
+ SOURCES += [
+ 'skia/src/ports/SkFontHost_cairo.cpp',
+ 'skia/src/ports/SkFontHost_FreeType_common.cpp',
+ ]
+if CONFIG['MOZ_WIDGET_TOOLKIT'] in ('cocoa', 'uikit'):
+ UNIFIED_SOURCES += [
+ 'skia/src/ports/SkDebug_stdio.cpp',
+ 'skia/src/ports/SkFontMgr_mac_ct.cpp',
+ 'skia/src/ports/SkOSFile_posix.cpp',
+ 'skia/src/ports/SkOSLibrary_posix.cpp',
+ 'skia/src/ports/SkScalerContext_mac_ct.cpp',
+ 'skia/src/ports/SkTypeface_mac_ct.cpp',
+ ]
+if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gtk':
+ UNIFIED_SOURCES += [
+ 'skia/src/ports/SkDebug_stdio.cpp',
+ 'skia/src/ports/SkOSFile_posix.cpp',
+ 'skia/src/ports/SkOSLibrary_posix.cpp',
+ ]
+ SOURCES += [
+ 'skia/src/ports/SkFontHost_cairo.cpp',
+ 'skia/src/ports/SkFontHost_FreeType_common.cpp',
+ ]
+if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows':
+ SOURCES += [
+ 'skia/src/ports/SkDebug_win.cpp',
+ 'skia/src/ports/SkFontHost_win.cpp',
+ 'skia/src/ports/SkFontMgr_win_dw.cpp',
+ 'skia/src/ports/SkOSFile_win.cpp',
+ 'skia/src/ports/SkOSLibrary_win.cpp',
+ 'skia/src/ports/SkScalerContext_win_dw.cpp',
+ 'skia/src/ports/SkTypeface_win_dw.cpp',
+ ]
+if CONFIG['INTEL_ARCHITECTURE']:
+ SOURCES += [
+ 'skia/src/opts/SkOpts_avx.cpp',
+ 'skia/src/opts/SkOpts_hsw.cpp',
+ 'skia/src/opts/SkOpts_skx.cpp',
+ 'skia/src/opts/SkOpts_sse42.cpp',
+ 'skia/src/opts/SkOpts_ssse3.cpp',
+ ]
+ SOURCES['skia/src/opts/SkOpts_avx.cpp'].flags += skia_opt_flags
+ SOURCES['skia/src/opts/SkOpts_hsw.cpp'].flags += skia_opt_flags
+ SOURCES['skia/src/opts/SkOpts_skx.cpp'].flags += skia_opt_flags
+ SOURCES['skia/src/opts/SkOpts_sse42.cpp'].flags += skia_opt_flags
+ SOURCES['skia/src/opts/SkOpts_ssse3.cpp'].flags += skia_opt_flags
+elif CONFIG['CPU_ARCH'] == 'aarch64':
+ SOURCES += [
+ 'skia/src/opts/SkOpts_crc32.cpp',
+ ]
+ SOURCES['skia/src/opts/SkOpts_crc32.cpp'].flags += skia_opt_flags
+
+
+# We allow warnings for third-party code that can be updated from upstream.
+AllowCompilerWarnings()
+
+FINAL_LIBRARY = 'gkmedias'
+LOCAL_INCLUDES += [
+ 'skia',
+]
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows':
+ DEFINES['UNICODE'] = True
+ DEFINES['_UNICODE'] = True
+ UNIFIED_SOURCES += [
+ 'skia/src/fonts/SkFontMgr_indirect.cpp',
+ 'skia/src/fonts/SkRemotableFontMgr.cpp',
+ ]
+
+# We should autogenerate these SSE related flags.
+
+if CONFIG['INTEL_ARCHITECTURE']:
+ SOURCES['skia/src/opts/SkOpts_ssse3.cpp'].flags += ['-Dskvx=skvx_ssse3', '-mssse3']
+ SOURCES['skia/src/opts/SkOpts_sse42.cpp'].flags += ['-Dskvx=skvx_sse42', '-msse4.2']
+ SOURCES['skia/src/opts/SkOpts_avx.cpp'].flags += ['-Dskvx=skvx_avx', '-mavx']
+ SOURCES['skia/src/opts/SkOpts_hsw.cpp'].flags += ['-Dskvx=skvx_hsw', '-mavx2', '-mf16c', '-mfma']
+ if not CONFIG["MOZ_CODE_COVERAGE"]:
+ SOURCES['skia/src/opts/SkOpts_skx.cpp'].flags += ['-Dskvx=skvx_skx', '-mavx512f', '-mavx512dq', '-mavx512cd', '-mavx512bw', '-mavx512vl']
+elif CONFIG['CPU_ARCH'] == 'aarch64' and CONFIG['CC_TYPE'] in ('clang', 'gcc'):
+ SOURCES['skia/src/opts/SkOpts_crc32.cpp'].flags += ['-Dskvx=skvx_crc32', '-march=armv8-a+crc']
+
+DEFINES['MOZ_SKIA'] = True
+
+DEFINES['SKIA_IMPLEMENTATION'] = 1
+
+DEFINES['SK_PDF_USE_HARFBUZZ_SUBSETTING'] = 1
+
+if CONFIG['MOZ_TREE_FREETYPE']:
+ DEFINES['SK_CAN_USE_DLOPEN'] = 0
+
+# Suppress warnings in third-party code.
+CXXFLAGS += [
+ '-Wno-deprecated-declarations',
+ '-Wno-overloaded-virtual',
+ '-Wno-sign-compare',
+ '-Wno-unreachable-code',
+ '-Wno-unused-function',
+]
+if CONFIG['CC_TYPE'] == 'gcc':
+ CXXFLAGS += [
+ '-Wno-logical-op',
+ '-Wno-maybe-uninitialized',
+ ]
+if CONFIG['CC_TYPE'] in ('clang', 'clang-cl'):
+ CXXFLAGS += [
+ '-Wno-implicit-fallthrough',
+ '-Wno-inconsistent-missing-override',
+ '-Wno-macro-redefined',
+ '-Wno-unused-private-field',
+ ]
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] in ('gtk', 'android'):
+ LOCAL_INCLUDES += [
+ "/gfx/cairo/cairo/src",
+ ]
+ CXXFLAGS += CONFIG['CAIRO_FT_CFLAGS']
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gtk':
+ CXXFLAGS += CONFIG['MOZ_PANGO_CFLAGS']
+
+if CONFIG['CPU_ARCH'] in ('mips32', 'mips64'):
+ # The skia code uses `mips` as a variable, but it's a builtin preprocessor
+ # macro on mips that expands to `1`.
+ DEFINES['mips'] = False
+
+# Work around bug 1841199.
+if CONFIG['CPU_ARCH'] in ('mips32', 'mips64', 'ppc64'):
+ DEFINES['musttail'] = 'nomusttail'
diff --git a/gfx/skia/patches/README b/gfx/skia/patches/README
new file mode 100644
index 0000000000..8fd2c5396a
--- /dev/null
+++ b/gfx/skia/patches/README
@@ -0,0 +1,2 @@
+We no longer keep a local patch queue of patches against upstream. The protocol now
+is to upstream all patches before they are landed in mozilla-central.
diff --git a/gfx/skia/patches/archive/0001-Bug-687189-Implement-SkPaint-getPosTextPath.patch b/gfx/skia/patches/archive/0001-Bug-687189-Implement-SkPaint-getPosTextPath.patch
new file mode 100644
index 0000000000..f8e76dbb90
--- /dev/null
+++ b/gfx/skia/patches/archive/0001-Bug-687189-Implement-SkPaint-getPosTextPath.patch
@@ -0,0 +1,66 @@
+From 27a914815e757ed12523edf968c9da134dabeaf8 Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Fri, 18 May 2012 14:10:44 -0400
+Subject: [PATCH 01/10] Bug 755869 - [4] Re-apply bug 687189 - Implement
+ SkPaint::getPosTextPath r=mattwoodrow
+
+---
+ gfx/skia/include/core/SkPaint.h | 3 +++
+ gfx/skia/src/core/SkPaint.cpp | 27 +++++++++++++++++++++++++++
+ 2 files changed, 30 insertions(+), 0 deletions(-)
+
+diff --git a/gfx/skia/include/core/SkPaint.h b/gfx/skia/include/core/SkPaint.h
+index 1930db1..ff37d77 100644
+--- a/gfx/skia/include/core/SkPaint.h
++++ b/gfx/skia/include/core/SkPaint.h
+@@ -813,6 +813,9 @@ public:
+ void getTextPath(const void* text, size_t length, SkScalar x, SkScalar y,
+ SkPath* path) const;
+
++ void getPosTextPath(const void* text, size_t length,
++ const SkPoint pos[], SkPath* path) const;
++
+ #ifdef SK_BUILD_FOR_ANDROID
+ const SkGlyph& getUnicharMetrics(SkUnichar);
+ const SkGlyph& getGlyphMetrics(uint16_t);
+diff --git a/gfx/skia/src/core/SkPaint.cpp b/gfx/skia/src/core/SkPaint.cpp
+index 1b74fa1..4c119aa 100644
+--- a/gfx/skia/src/core/SkPaint.cpp
++++ b/gfx/skia/src/core/SkPaint.cpp
+@@ -1355,6 +1355,33 @@ void SkPaint::getTextPath(const void* textData, size_t length,
+ }
+ }
+
++void SkPaint::getPosTextPath(const void* textData, size_t length,
++ const SkPoint pos[], SkPath* path) const {
++ SkASSERT(length == 0 || textData != NULL);
++
++ const char* text = (const char*)textData;
++ if (text == NULL || length == 0 || path == NULL) {
++ return;
++ }
++
++ SkTextToPathIter iter(text, length, *this, false);
++ SkMatrix matrix;
++ SkPoint prevPos;
++ prevPos.set(0, 0);
++
++ matrix.setScale(iter.getPathScale(), iter.getPathScale());
++ path->reset();
++
++ unsigned int i = 0;
++ const SkPath* iterPath;
++ while ((iterPath = iter.next(NULL)) != NULL) {
++ matrix.postTranslate(pos[i].fX - prevPos.fX, pos[i].fY - prevPos.fY);
++ path->addPath(*iterPath, matrix);
++ prevPos = pos[i];
++ i++;
++ }
++}
++
+ static void add_flattenable(SkDescriptor* desc, uint32_t tag,
+ SkFlattenableWriteBuffer* buffer) {
+ buffer->flatten(desc->addEntry(tag, buffer->size(), NULL));
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0001-Bug-777614-Re-add-our-SkUserConfig.h-r-nrc.patch b/gfx/skia/patches/archive/0001-Bug-777614-Re-add-our-SkUserConfig.h-r-nrc.patch
new file mode 100644
index 0000000000..8fe0135fbb
--- /dev/null
+++ b/gfx/skia/patches/archive/0001-Bug-777614-Re-add-our-SkUserConfig.h-r-nrc.patch
@@ -0,0 +1,34 @@
+From 2dd8c789fc4ad3b5323c2c29f3e982d185f5b5d9 Mon Sep 17 00:00:00 2001
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 13 Sep 2012 22:33:38 -0400
+Subject: [PATCH 1/9] Bug 777614 - Re-add our SkUserConfig.h r=nrc
+
+---
+ gfx/skia/include/config/SkUserConfig.h | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/gfx/skia/include/config/SkUserConfig.h b/gfx/skia/include/config/SkUserConfig.h
+index 353272c..fbfbfe0 100644
+--- a/gfx/skia/include/config/SkUserConfig.h
++++ b/gfx/skia/include/config/SkUserConfig.h
+@@ -184,5 +184,16 @@
+ directories from your include search path when you're not building the GPU
+ backend. Defaults to 1 (build the GPU code).
+ */
+-//#define SK_SUPPORT_GPU 1
++#define SK_SUPPORT_GPU 0
++
++/* Don't dither 32bit gradients, to match what the canvas test suite expects.
++ */
++#define SK_DISABLE_DITHER_32BIT_GRADIENT
++
++/* Don't include stdint.h on windows as it conflicts with our build system.
++ */
++#ifdef SK_BUILD_FOR_WIN32
++ #define SK_IGNORE_STDINT_DOT_H
++#endif
++
+ #endif
+--
+1.7.11.4
+
diff --git a/gfx/skia/patches/archive/0001-Bug-803063-Skia-cross-compilation-for-Windows-fails-.patch b/gfx/skia/patches/archive/0001-Bug-803063-Skia-cross-compilation-for-Windows-fails-.patch
new file mode 100644
index 0000000000..20155977e2
--- /dev/null
+++ b/gfx/skia/patches/archive/0001-Bug-803063-Skia-cross-compilation-for-Windows-fails-.patch
@@ -0,0 +1,26 @@
+From 81ff1a8f5c2a7cc9e8b853101b995433a0c0fa37 Mon Sep 17 00:00:00 2001
+From: Jacek Caban <jacek@codeweavers.com>
+Date: Thu, 18 Oct 2012 15:25:08 +0200
+Subject: [PATCH] Bug 803063 - Skia cross compilation for Windows fails on
+ case sensitive OS
+
+---
+ gfx/skia/src/core/SkAdvancedTypefaceMetrics.cpp | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/gfx/skia/src/core/SkAdvancedTypefaceMetrics.cpp b/gfx/skia/src/core/SkAdvancedTypefaceMetrics.cpp
+index 370616e..b647ada 100644
+--- a/gfx/skia/src/core/SkAdvancedTypefaceMetrics.cpp
++++ b/gfx/skia/src/core/SkAdvancedTypefaceMetrics.cpp
+@@ -13,7 +13,7 @@
+ SK_DEFINE_INST_COUNT(SkAdvancedTypefaceMetrics)
+
+ #if defined(SK_BUILD_FOR_WIN)
+-#include <DWrite.h>
++#include <dwrite.h>
+ #endif
+
+ #if defined(SK_BUILD_FOR_UNIX) || defined(SK_BUILD_FOR_ANDROID)
+--
+1.7.8.6
+
diff --git a/gfx/skia/patches/archive/0001-Bug-895086-Remove-unused-find_from_uniqueID-function.patch b/gfx/skia/patches/archive/0001-Bug-895086-Remove-unused-find_from_uniqueID-function.patch
new file mode 100644
index 0000000000..aa1fadb435
--- /dev/null
+++ b/gfx/skia/patches/archive/0001-Bug-895086-Remove-unused-find_from_uniqueID-function.patch
@@ -0,0 +1,38 @@
+From 58861c38751adf1f4ef3f67f8e85f5c36f1c43a5 Mon Sep 17 00:00:00 2001
+From: George Wright <gw@gwright.org.uk>
+Date: Wed, 17 Jul 2013 16:28:07 -0400
+Subject: [PATCH] Bug 895086 - Remove unused find_from_uniqueID() function from
+ SkFontHost_linux
+
+---
+ gfx/skia/src/ports/SkFontHost_linux.cpp | 14 --------------
+ 1 file changed, 14 deletions(-)
+
+diff --git a/gfx/skia/src/ports/SkFontHost_linux.cpp b/gfx/skia/src/ports/SkFontHost_linux.cpp
+index df21014..05b73dc 100644
+--- a/gfx/skia/src/ports/SkFontHost_linux.cpp
++++ b/gfx/skia/src/ports/SkFontHost_linux.cpp
+@@ -117,20 +117,6 @@ static FamilyRec* find_family(const SkTypeface* member) {
+ return NULL;
+ }
+
+-static SkTypeface* find_from_uniqueID(uint32_t uniqueID) {
+- FamilyRec* curr = gFamilyHead;
+- while (curr != NULL) {
+- for (int i = 0; i < 4; i++) {
+- SkTypeface* face = curr->fFaces[i];
+- if (face != NULL && face->uniqueID() == uniqueID) {
+- return face;
+- }
+- }
+- curr = curr->fNext;
+- }
+- return NULL;
+-}
+-
+ /* Remove reference to this face from its family. If the resulting family
+ is empty (has no faces), return that family, otherwise return NULL
+ */
+--
+1.8.3.1
+
diff --git a/gfx/skia/patches/archive/0002-Bug-688366-Dont-invalidate-all-radial-gradients.patch b/gfx/skia/patches/archive/0002-Bug-688366-Dont-invalidate-all-radial-gradients.patch
new file mode 100644
index 0000000000..d396b4ed12
--- /dev/null
+++ b/gfx/skia/patches/archive/0002-Bug-688366-Dont-invalidate-all-radial-gradients.patch
@@ -0,0 +1,30 @@
+From f310d7e8b8d9cf6870c739650324bb585b591c0c Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Fri, 18 May 2012 14:11:32 -0400
+Subject: [PATCH 02/10] Bug 755869 - [5] Re-apply bug 688366 - Fix Skia
+ marking radial gradients with the same radius as
+ invalid. r=mattwoodrow
+
+---
+ gfx/skia/src/effects/SkGradientShader.cpp | 5 ++++-
+ 1 files changed, 4 insertions(+), 1 deletions(-)
+
+diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp
+index 6de820b..59ba48c 100644
+--- a/gfx/skia/src/effects/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/SkGradientShader.cpp
+@@ -1911,7 +1911,10 @@ public:
+ SkPMColor* SK_RESTRICT dstC = dstCParam;
+
+ // Zero difference between radii: fill with transparent black.
+- if (fDiffRadius == 0) {
++ // TODO: Is removing this actually correct? Two circles with the
++ // same radius, but different centers doesn't sound like it
++ // should be cleared
++ if (fDiffRadius == 0 && fCenter1 == fCenter2) {
+ sk_bzero(dstC, count * sizeof(*dstC));
+ return;
+ }
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0002-Bug-848491-Re-apply-Bug-795549-Move-TileProc-functio.patch b/gfx/skia/patches/archive/0002-Bug-848491-Re-apply-Bug-795549-Move-TileProc-functio.patch
new file mode 100644
index 0000000000..6ac2c9179d
--- /dev/null
+++ b/gfx/skia/patches/archive/0002-Bug-848491-Re-apply-Bug-795549-Move-TileProc-functio.patch
@@ -0,0 +1,50 @@
+From: George Wright <george@mozilla.com>
+Date: Mon, 14 Jan 2013 17:59:09 -0500
+Subject: Bug 848491 - Re-apply Bug 795549 - Move TileProc functions into their own file to ensure they only exist once in a library
+
+
+diff --git a/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h b/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
+index b9dbf1b..729ce4e 100644
+--- a/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
++++ b/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
+@@ -37,34 +37,9 @@ static inline void sk_memset32_dither(uint32_t dst[], uint32_t v0, uint32_t v1,
+ }
+ }
+
+-// Clamp
+-
+-static inline SkFixed clamp_tileproc(SkFixed x) {
+- return SkClampMax(x, 0xFFFF);
+-}
+-
+-// Repeat
+-
+-static inline SkFixed repeat_tileproc(SkFixed x) {
+- return x & 0xFFFF;
+-}
+-
+-// Mirror
+-
+-// Visual Studio 2010 (MSC_VER=1600) optimizes bit-shift code incorrectly.
+-// See http://code.google.com/p/skia/issues/detail?id=472
+-#if defined(_MSC_VER) && (_MSC_VER >= 1600)
+-#pragma optimize("", off)
+-#endif
+-
+-static inline SkFixed mirror_tileproc(SkFixed x) {
+- int s = x << 15 >> 31;
+- return (x ^ s) & 0xFFFF;
+-}
+-
+-#if defined(_MSC_VER) && (_MSC_VER >= 1600)
+-#pragma optimize("", on)
+-#endif
++SkFixed clamp_tileproc(SkFixed x);
++SkFixed repeat_tileproc(SkFixed x);
++SkFixed mirror_tileproc(SkFixed x);
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0003-SkUserConfig-for-Mozilla.patch b/gfx/skia/patches/archive/0003-SkUserConfig-for-Mozilla.patch
new file mode 100644
index 0000000000..dc52a8d3d0
--- /dev/null
+++ b/gfx/skia/patches/archive/0003-SkUserConfig-for-Mozilla.patch
@@ -0,0 +1,39 @@
+From ef53776c06cffc7607c3777702f93e04c0852981 Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Fri, 18 May 2012 14:13:49 -0400
+Subject: [PATCH 03/10] Bug 755869 - [6] Re-apply SkUserConfig (no
+ original bug) r=mattwoodrow
+
+---
+ gfx/skia/include/config/SkUserConfig.h | 10 ++++++++++
+ 1 files changed, 10 insertions(+), 0 deletions(-)
+
+diff --git a/gfx/skia/include/config/SkUserConfig.h b/gfx/skia/include/config/SkUserConfig.h
+index 9fdbd0a..f98ba85 100644
+--- a/gfx/skia/include/config/SkUserConfig.h
++++ b/gfx/skia/include/config/SkUserConfig.h
+@@ -156,6 +156,10 @@
+ //#define SK_SUPPORT_UNITTEST
+ #endif
+
++/* Don't dither 32bit gradients, to match what the canvas test suite expects.
++ */
++#define SK_DISABLE_DITHER_32BIT_GRADIENT
++
+ /* If your system embeds skia and has complex event logging, define this
+ symbol to name a file that maps the following macros to your system's
+ equivalents:
+@@ -177,4 +181,10 @@
+ #define SK_A32_SHIFT 24
+ #endif
+
++/* Don't include stdint.h on windows as it conflicts with our build system.
++ */
++#ifdef SK_BUILD_FOR_WIN32
++ #define SK_IGNORE_STDINT_DOT_H
++#endif
++
+ #endif
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0004-Bug-722011-Fix-trailing-commas-in-enums.patch b/gfx/skia/patches/archive/0004-Bug-722011-Fix-trailing-commas-in-enums.patch
new file mode 100644
index 0000000000..179aeded5d
--- /dev/null
+++ b/gfx/skia/patches/archive/0004-Bug-722011-Fix-trailing-commas-in-enums.patch
@@ -0,0 +1,280 @@
+From 81d61682a94d47be5b47fb7882ea7e7c7e6c3351 Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Fri, 18 May 2012 14:15:28 -0400
+Subject: [PATCH 04/10] Bug 755869 - [7] Re-apply bug 722011 - Fix
+ trailing commas at end of enum lists r=mattwoodrow
+
+---
+ gfx/skia/include/core/SkAdvancedTypefaceMetrics.h | 8 ++++----
+ gfx/skia/include/core/SkBlitRow.h | 2 +-
+ gfx/skia/include/core/SkCanvas.h | 2 +-
+ gfx/skia/include/core/SkDevice.h | 2 +-
+ gfx/skia/include/core/SkDeviceProfile.h | 4 ++--
+ gfx/skia/include/core/SkFlattenable.h | 2 +-
+ gfx/skia/include/core/SkFontHost.h | 4 ++--
+ gfx/skia/include/core/SkMaskFilter.h | 2 +-
+ gfx/skia/include/core/SkPaint.h | 4 ++--
+ gfx/skia/include/core/SkScalerContext.h | 9 +++++----
+ gfx/skia/include/core/SkTypes.h | 2 +-
+ gfx/skia/include/effects/SkLayerDrawLooper.h | 2 +-
+ gfx/skia/src/core/SkBitmap.cpp | 2 +-
+ gfx/skia/src/core/SkGlyphCache.cpp | 2 +-
+ 14 files changed, 24 insertions(+), 23 deletions(-)
+
+diff --git a/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h b/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h
+index 09fc9a9..5ffdb45 100644
+--- a/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h
++++ b/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h
+@@ -34,7 +34,7 @@ public:
+ kCFF_Font,
+ kTrueType_Font,
+ kOther_Font,
+- kNotEmbeddable_Font,
++ kNotEmbeddable_Font
+ };
+ // The type of the underlying font program. This field determines which
+ // of the following fields are valid. If it is kOther_Font or
+@@ -56,7 +56,7 @@ public:
+ kItalic_Style = 0x00040,
+ kAllCaps_Style = 0x10000,
+ kSmallCaps_Style = 0x20000,
+- kForceBold_Style = 0x40000,
++ kForceBold_Style = 0x40000
+ };
+ uint16_t fStyle; // Font style characteristics.
+ int16_t fItalicAngle; // Counterclockwise degrees from vertical of the
+@@ -75,7 +75,7 @@ public:
+ kHAdvance_PerGlyphInfo = 0x1, // Populate horizontal advance data.
+ kVAdvance_PerGlyphInfo = 0x2, // Populate vertical advance data.
+ kGlyphNames_PerGlyphInfo = 0x4, // Populate glyph names (Type 1 only).
+- kToUnicode_PerGlyphInfo = 0x8, // Populate ToUnicode table, ignored
++ kToUnicode_PerGlyphInfo = 0x8 // Populate ToUnicode table, ignored
+ // for Type 1 fonts
+ };
+
+@@ -84,7 +84,7 @@ public:
+ enum MetricType {
+ kDefault, // Default advance: fAdvance.count = 1
+ kRange, // Advances for a range: fAdvance.count = fEndID-fStartID
+- kRun, // fStartID-fEndID have same advance: fAdvance.count = 1
++ kRun // fStartID-fEndID have same advance: fAdvance.count = 1
+ };
+ MetricType fType;
+ uint16_t fStartId;
+diff --git a/gfx/skia/include/core/SkBlitRow.h b/gfx/skia/include/core/SkBlitRow.h
+index 973ab4c..febc405 100644
+--- a/gfx/skia/include/core/SkBlitRow.h
++++ b/gfx/skia/include/core/SkBlitRow.h
+@@ -42,7 +42,7 @@ public:
+
+ enum Flags32 {
+ kGlobalAlpha_Flag32 = 1 << 0,
+- kSrcPixelAlpha_Flag32 = 1 << 1,
++ kSrcPixelAlpha_Flag32 = 1 << 1
+ };
+
+ /** Function pointer that blends 32bit colors onto a 32bit destination.
+diff --git a/gfx/skia/include/core/SkCanvas.h b/gfx/skia/include/core/SkCanvas.h
+index 25cc94a..d942783 100644
+--- a/gfx/skia/include/core/SkCanvas.h
++++ b/gfx/skia/include/core/SkCanvas.h
+@@ -148,7 +148,7 @@ public:
+ * low byte to high byte: R, G, B, A.
+ */
+ kRGBA_Premul_Config8888,
+- kRGBA_Unpremul_Config8888,
++ kRGBA_Unpremul_Config8888
+ };
+
+ /**
+diff --git a/gfx/skia/include/core/SkDevice.h b/gfx/skia/include/core/SkDevice.h
+index 1e4e0a3..b4d44bf 100644
+--- a/gfx/skia/include/core/SkDevice.h
++++ b/gfx/skia/include/core/SkDevice.h
+@@ -139,7 +139,7 @@ public:
+ protected:
+ enum Usage {
+ kGeneral_Usage,
+- kSaveLayer_Usage, // <! internal use only
++ kSaveLayer_Usage // <! internal use only
+ };
+
+ struct TextFlags {
+diff --git a/gfx/skia/include/core/SkDeviceProfile.h b/gfx/skia/include/core/SkDeviceProfile.h
+index 46b9781..f6a0bca 100644
+--- a/gfx/skia/include/core/SkDeviceProfile.h
++++ b/gfx/skia/include/core/SkDeviceProfile.h
+@@ -17,7 +17,7 @@ public:
+ kRGB_Horizontal_LCDConfig,
+ kBGR_Horizontal_LCDConfig,
+ kRGB_Vertical_LCDConfig,
+- kBGR_Vertical_LCDConfig,
++ kBGR_Vertical_LCDConfig
+ };
+
+ enum FontHintLevel {
+@@ -25,7 +25,7 @@ public:
+ kSlight_FontHintLevel,
+ kNormal_FontHintLevel,
+ kFull_FontHintLevel,
+- kAuto_FontHintLevel,
++ kAuto_FontHintLevel
+ };
+
+ /**
+diff --git a/gfx/skia/include/core/SkFlattenable.h b/gfx/skia/include/core/SkFlattenable.h
+index 5714f9d..dc115fc 100644
+--- a/gfx/skia/include/core/SkFlattenable.h
++++ b/gfx/skia/include/core/SkFlattenable.h
+@@ -272,7 +272,7 @@ public:
+ * Instructs the writer to inline Factory names as there are seen the
+ * first time (after that we store an index). The pipe code uses this.
+ */
+- kInlineFactoryNames_Flag = 0x02,
++ kInlineFactoryNames_Flag = 0x02
+ };
+ Flags getFlags() const { return (Flags)fFlags; }
+ void setFlags(Flags flags) { fFlags = flags; }
+diff --git a/gfx/skia/include/core/SkFontHost.h b/gfx/skia/include/core/SkFontHost.h
+index 732de5c..10f9bdf 100644
+--- a/gfx/skia/include/core/SkFontHost.h
++++ b/gfx/skia/include/core/SkFontHost.h
+@@ -240,7 +240,7 @@ public:
+ */
+ enum LCDOrientation {
+ kHorizontal_LCDOrientation = 0, //!< this is the default
+- kVertical_LCDOrientation = 1,
++ kVertical_LCDOrientation = 1
+ };
+
+ static void SetSubpixelOrientation(LCDOrientation orientation);
+@@ -259,7 +259,7 @@ public:
+ enum LCDOrder {
+ kRGB_LCDOrder = 0, //!< this is the default
+ kBGR_LCDOrder = 1,
+- kNONE_LCDOrder = 2,
++ kNONE_LCDOrder = 2
+ };
+
+ static void SetSubpixelOrder(LCDOrder order);
+diff --git a/gfx/skia/include/core/SkMaskFilter.h b/gfx/skia/include/core/SkMaskFilter.h
+index 9a470a4..3422e27 100644
+--- a/gfx/skia/include/core/SkMaskFilter.h
++++ b/gfx/skia/include/core/SkMaskFilter.h
+@@ -61,7 +61,7 @@ public:
+ kNormal_BlurType, //!< fuzzy inside and outside
+ kSolid_BlurType, //!< solid inside, fuzzy outside
+ kOuter_BlurType, //!< nothing inside, fuzzy outside
+- kInner_BlurType, //!< fuzzy inside, nothing outside
++ kInner_BlurType //!< fuzzy inside, nothing outside
+ };
+
+ struct BlurInfo {
+diff --git a/gfx/skia/include/core/SkPaint.h b/gfx/skia/include/core/SkPaint.h
+index ff37d77..7c96e193 100644
+--- a/gfx/skia/include/core/SkPaint.h
++++ b/gfx/skia/include/core/SkPaint.h
+@@ -76,7 +76,7 @@ public:
+ kNo_Hinting = 0,
+ kSlight_Hinting = 1,
+ kNormal_Hinting = 2, //!< this is the default
+- kFull_Hinting = 3,
++ kFull_Hinting = 3
+ };
+
+ Hinting getHinting() const {
+@@ -289,7 +289,7 @@ public:
+ kStroke_Style, //!< stroke the geometry
+ kStrokeAndFill_Style, //!< fill and stroke the geometry
+
+- kStyleCount,
++ kStyleCount
+ };
+
+ /** Return the paint's style, used for controlling how primitives'
+diff --git a/gfx/skia/include/core/SkScalerContext.h b/gfx/skia/include/core/SkScalerContext.h
+index 2cb171b..3dbce27 100644
+--- a/gfx/skia/include/core/SkScalerContext.h
++++ b/gfx/skia/include/core/SkScalerContext.h
+@@ -182,21 +182,22 @@ public:
+ kGenA8FromLCD_Flag = 0x0800,
+
+ #ifdef SK_USE_COLOR_LUMINANCE
+- kLuminance_Bits = 3,
++ kLuminance_Bits = 3
+ #else
+ // luminance : 0 for black text, kLuminance_Max for white text
+ kLuminance_Shift = 13, // shift to land in the high 3-bits of Flags
+- kLuminance_Bits = 3, // ensure Flags doesn't exceed 16bits
++ kLuminance_Bits = 3 // ensure Flags doesn't exceed 16bits
+ #endif
+ };
+
+ // computed values
+ enum {
+- kHinting_Mask = kHintingBit1_Flag | kHintingBit2_Flag,
+ #ifdef SK_USE_COLOR_LUMINANCE
++ kHinting_Mask = kHintingBit1_Flag | kHintingBit2_Flag
+ #else
++ kHinting_Mask = kHintingBit1_Flag | kHintingBit2_Flag,
+ kLuminance_Max = (1 << kLuminance_Bits) - 1,
+- kLuminance_Mask = kLuminance_Max << kLuminance_Shift,
++ kLuminance_Mask = kLuminance_Max << kLuminance_Shift
+ #endif
+ };
+
+diff --git a/gfx/skia/include/core/SkTypes.h b/gfx/skia/include/core/SkTypes.h
+index 7963a7d..0c5c2d7 100644
+--- a/gfx/skia/include/core/SkTypes.h
++++ b/gfx/skia/include/core/SkTypes.h
+@@ -438,7 +438,7 @@ public:
+ * current block is dynamically allocated, just return the old
+ * block.
+ */
+- kReuse_OnShrink,
++ kReuse_OnShrink
+ };
+
+ /**
+diff --git a/gfx/skia/include/effects/SkLayerDrawLooper.h b/gfx/skia/include/effects/SkLayerDrawLooper.h
+index 0bc4af2..6cb8ef6 100644
+--- a/gfx/skia/include/effects/SkLayerDrawLooper.h
++++ b/gfx/skia/include/effects/SkLayerDrawLooper.h
+@@ -41,7 +41,7 @@ public:
+ * - Flags and Color are always computed using the LayerInfo's
+ * fFlagsMask and fColorMode.
+ */
+- kEntirePaint_Bits = -1,
++ kEntirePaint_Bits = -1
+
+ };
+ typedef int32_t BitFlags;
+diff --git a/gfx/skia/src/core/SkBitmap.cpp b/gfx/skia/src/core/SkBitmap.cpp
+index 6b99145..aff52fd 100644
+--- a/gfx/skia/src/core/SkBitmap.cpp
++++ b/gfx/skia/src/core/SkBitmap.cpp
+@@ -1376,7 +1376,7 @@ enum {
+ SERIALIZE_PIXELTYPE_RAW_WITH_CTABLE,
+ SERIALIZE_PIXELTYPE_RAW_NO_CTABLE,
+ SERIALIZE_PIXELTYPE_REF_DATA,
+- SERIALIZE_PIXELTYPE_REF_PTR,
++ SERIALIZE_PIXELTYPE_REF_PTR
+ };
+
+ /*
+diff --git a/gfx/skia/src/core/SkGlyphCache.cpp b/gfx/skia/src/core/SkGlyphCache.cpp
+index f3363cd..1fddc9d 100644
+--- a/gfx/skia/src/core/SkGlyphCache.cpp
++++ b/gfx/skia/src/core/SkGlyphCache.cpp
+@@ -417,7 +417,7 @@ class SkGlyphCache_Globals {
+ public:
+ enum UseMutex {
+ kNo_UseMutex, // thread-local cache
+- kYes_UseMutex, // shared cache
++ kYes_UseMutex // shared cache
+ };
+
+ SkGlyphCache_Globals(UseMutex um) {
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0004-Bug-777614-Re-apply-bug-719872-Fix-crash-on-Android-.patch b/gfx/skia/patches/archive/0004-Bug-777614-Re-apply-bug-719872-Fix-crash-on-Android-.patch
new file mode 100644
index 0000000000..ad6e181274
--- /dev/null
+++ b/gfx/skia/patches/archive/0004-Bug-777614-Re-apply-bug-719872-Fix-crash-on-Android-.patch
@@ -0,0 +1,684 @@
+From 0d730a94e9f6676d5cde45f955fe025a4549817e Mon Sep 17 00:00:00 2001
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 23 Aug 2012 16:45:38 -0400
+Subject: [PATCH 4/9] Bug 777614 - Re-apply bug 719872 - Fix crash on Android
+ by reverting to older FontHost r=nrc
+
+---
+ gfx/skia/src/ports/SkFontHost_android_old.cpp | 664 ++++++++++++++++++++++++++
+ 1 file changed, 664 insertions(+)
+ create mode 100644 gfx/skia/src/ports/SkFontHost_android_old.cpp
+
+diff --git a/gfx/skia/src/ports/SkFontHost_android_old.cpp b/gfx/skia/src/ports/SkFontHost_android_old.cpp
+new file mode 100644
+index 0000000..b5c4f3c
+--- /dev/null
++++ b/gfx/skia/src/ports/SkFontHost_android_old.cpp
+@@ -0,0 +1,664 @@
++
++/*
++ * Copyright 2006 The Android Open Source Project
++ *
++ * Use of this source code is governed by a BSD-style license that can be
++ * found in the LICENSE file.
++ */
++
++
++#include "SkFontHost.h"
++#include "SkDescriptor.h"
++#include "SkMMapStream.h"
++#include "SkPaint.h"
++#include "SkString.h"
++#include "SkStream.h"
++#include "SkThread.h"
++#include "SkTSearch.h"
++#include <stdio.h>
++
++#define FONT_CACHE_MEMORY_BUDGET (768 * 1024)
++
++#ifndef SK_FONT_FILE_PREFIX
++ #define SK_FONT_FILE_PREFIX "/fonts/"
++#endif
++
++bool find_name_and_attributes(SkStream* stream, SkString* name, SkTypeface::Style* style,
++ bool* isFixedWidth);
++
++static void GetFullPathForSysFonts(SkString* full, const char name[]) {
++ full->set(getenv("ANDROID_ROOT"));
++ full->append(SK_FONT_FILE_PREFIX);
++ full->append(name);
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++struct FamilyRec;
++
++/* This guy holds a mapping of a name -> family, used for looking up fonts.
++ Since it is stored in a stretchy array that doesn't preserve object
++ semantics, we don't use constructor/destructors, but just have explicit
++ helpers to manage our internal bookkeeping.
++*/
++struct NameFamilyPair {
++ const char* fName; // we own this
++ FamilyRec* fFamily; // we don't own this, we just reference it
++
++ void construct(const char name[], FamilyRec* family) {
++ fName = strdup(name);
++ fFamily = family; // we don't own this, so just record the referene
++ }
++
++ void destruct() {
++ free((char*)fName);
++ // we don't own family, so just ignore our reference
++ }
++};
++
++// we use atomic_inc to grow this for each typeface we create
++static int32_t gUniqueFontID;
++
++// this is the mutex that protects these globals
++static SkMutex gFamilyMutex;
++static FamilyRec* gFamilyHead;
++static SkTDArray<NameFamilyPair> gNameList;
++
++struct FamilyRec {
++ FamilyRec* fNext;
++ SkTypeface* fFaces[4];
++
++ FamilyRec()
++ {
++ fNext = gFamilyHead;
++ memset(fFaces, 0, sizeof(fFaces));
++ gFamilyHead = this;
++ }
++};
++
++static SkTypeface* find_best_face(const FamilyRec* family,
++ SkTypeface::Style style) {
++ SkTypeface* const* faces = family->fFaces;
++
++ if (faces[style] != NULL) { // exact match
++ return faces[style];
++ }
++ // look for a matching bold
++ style = (SkTypeface::Style)(style ^ SkTypeface::kItalic);
++ if (faces[style] != NULL) {
++ return faces[style];
++ }
++ // look for the plain
++ if (faces[SkTypeface::kNormal] != NULL) {
++ return faces[SkTypeface::kNormal];
++ }
++ // look for anything
++ for (int i = 0; i < 4; i++) {
++ if (faces[i] != NULL) {
++ return faces[i];
++ }
++ }
++ // should never get here, since the faces list should not be empty
++ SkASSERT(!"faces list is empty");
++ return NULL;
++}
++
++static FamilyRec* find_family(const SkTypeface* member) {
++ FamilyRec* curr = gFamilyHead;
++ while (curr != NULL) {
++ for (int i = 0; i < 4; i++) {
++ if (curr->fFaces[i] == member) {
++ return curr;
++ }
++ }
++ curr = curr->fNext;
++ }
++ return NULL;
++}
++
++/* Returns the matching typeface, or NULL. If a typeface is found, its refcnt
++ is not modified.
++ */
++static SkTypeface* find_from_uniqueID(uint32_t uniqueID) {
++ FamilyRec* curr = gFamilyHead;
++ while (curr != NULL) {
++ for (int i = 0; i < 4; i++) {
++ SkTypeface* face = curr->fFaces[i];
++ if (face != NULL && face->uniqueID() == uniqueID) {
++ return face;
++ }
++ }
++ curr = curr->fNext;
++ }
++ return NULL;
++}
++
++/* Remove reference to this face from its family. If the resulting family
++ is empty (has no faces), return that family, otherwise return NULL
++*/
++static FamilyRec* remove_from_family(const SkTypeface* face) {
++ FamilyRec* family = find_family(face);
++ SkASSERT(family->fFaces[face->style()] == face);
++ family->fFaces[face->style()] = NULL;
++
++ for (int i = 0; i < 4; i++) {
++ if (family->fFaces[i] != NULL) { // family is non-empty
++ return NULL;
++ }
++ }
++ return family; // return the empty family
++}
++
++// maybe we should make FamilyRec be doubly-linked
++static void detach_and_delete_family(FamilyRec* family) {
++ FamilyRec* curr = gFamilyHead;
++ FamilyRec* prev = NULL;
++
++ while (curr != NULL) {
++ FamilyRec* next = curr->fNext;
++ if (curr == family) {
++ if (prev == NULL) {
++ gFamilyHead = next;
++ } else {
++ prev->fNext = next;
++ }
++ SkDELETE(family);
++ return;
++ }
++ prev = curr;
++ curr = next;
++ }
++ SkASSERT(!"Yikes, couldn't find family in our list to remove/delete");
++}
++
++static SkTypeface* find_typeface(const char name[], SkTypeface::Style style) {
++ NameFamilyPair* list = gNameList.begin();
++ int count = gNameList.count();
++
++ int index = SkStrLCSearch(&list[0].fName, count, name, sizeof(list[0]));
++
++ if (index >= 0) {
++ return find_best_face(list[index].fFamily, style);
++ }
++ return NULL;
++}
++
++static SkTypeface* find_typeface(const SkTypeface* familyMember,
++ SkTypeface::Style style) {
++ const FamilyRec* family = find_family(familyMember);
++ return family ? find_best_face(family, style) : NULL;
++}
++
++static void add_name(const char name[], FamilyRec* family) {
++ SkAutoAsciiToLC tolc(name);
++ name = tolc.lc();
++
++ NameFamilyPair* list = gNameList.begin();
++ int count = gNameList.count();
++
++ int index = SkStrLCSearch(&list[0].fName, count, name, sizeof(list[0]));
++
++ if (index < 0) {
++ list = gNameList.insert(~index);
++ list->construct(name, family);
++ }
++}
++
++static void remove_from_names(FamilyRec* emptyFamily)
++{
++#ifdef SK_DEBUG
++ for (int i = 0; i < 4; i++) {
++ SkASSERT(emptyFamily->fFaces[i] == NULL);
++ }
++#endif
++
++ SkTDArray<NameFamilyPair>& list = gNameList;
++
++ // must go backwards when removing
++ for (int i = list.count() - 1; i >= 0; --i) {
++ NameFamilyPair* pair = &list[i];
++ if (pair->fFamily == emptyFamily) {
++ pair->destruct();
++ list.remove(i);
++ }
++ }
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++class FamilyTypeface : public SkTypeface {
++public:
++ FamilyTypeface(Style style, bool sysFont, SkTypeface* familyMember,
++ bool isFixedWidth)
++ : SkTypeface(style, sk_atomic_inc(&gUniqueFontID) + 1, isFixedWidth) {
++ fIsSysFont = sysFont;
++
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ FamilyRec* rec = NULL;
++ if (familyMember) {
++ rec = find_family(familyMember);
++ SkASSERT(rec);
++ } else {
++ rec = SkNEW(FamilyRec);
++ }
++ rec->fFaces[style] = this;
++ }
++
++ virtual ~FamilyTypeface() {
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ // remove us from our family. If the family is now empty, we return
++ // that and then remove that family from the name list
++ FamilyRec* family = remove_from_family(this);
++ if (NULL != family) {
++ remove_from_names(family);
++ detach_and_delete_family(family);
++ }
++ }
++
++ bool isSysFont() const { return fIsSysFont; }
++
++ virtual SkStream* openStream() = 0;
++ virtual const char* getUniqueString() const = 0;
++ virtual const char* getFilePath() const = 0;
++
++private:
++ bool fIsSysFont;
++
++ typedef SkTypeface INHERITED;
++};
++
++///////////////////////////////////////////////////////////////////////////////
++
++class StreamTypeface : public FamilyTypeface {
++public:
++ StreamTypeface(Style style, bool sysFont, SkTypeface* familyMember,
++ SkStream* stream, bool isFixedWidth)
++ : INHERITED(style, sysFont, familyMember, isFixedWidth) {
++ SkASSERT(stream);
++ stream->ref();
++ fStream = stream;
++ }
++ virtual ~StreamTypeface() {
++ fStream->unref();
++ }
++
++ // overrides
++ virtual SkStream* openStream() {
++ // we just ref our existing stream, since the caller will call unref()
++ // when they are through
++ fStream->ref();
++ // must rewind each time, since the caller assumes a "new" stream
++ fStream->rewind();
++ return fStream;
++ }
++ virtual const char* getUniqueString() const { return NULL; }
++ virtual const char* getFilePath() const { return NULL; }
++
++private:
++ SkStream* fStream;
++
++ typedef FamilyTypeface INHERITED;
++};
++
++class FileTypeface : public FamilyTypeface {
++public:
++ FileTypeface(Style style, bool sysFont, SkTypeface* familyMember,
++ const char path[], bool isFixedWidth)
++ : INHERITED(style, sysFont, familyMember, isFixedWidth) {
++ SkString fullpath;
++
++ if (sysFont) {
++ GetFullPathForSysFonts(&fullpath, path);
++ path = fullpath.c_str();
++ }
++ fPath.set(path);
++ }
++
++ // overrides
++ virtual SkStream* openStream() {
++ SkStream* stream = SkNEW_ARGS(SkMMAPStream, (fPath.c_str()));
++
++ // check for failure
++ if (stream->getLength() <= 0) {
++ SkDELETE(stream);
++ // maybe MMAP isn't supported. try FILE
++ stream = SkNEW_ARGS(SkFILEStream, (fPath.c_str()));
++ if (stream->getLength() <= 0) {
++ SkDELETE(stream);
++ stream = NULL;
++ }
++ }
++ return stream;
++ }
++ virtual const char* getUniqueString() const {
++ const char* str = strrchr(fPath.c_str(), '/');
++ if (str) {
++ str += 1; // skip the '/'
++ }
++ return str;
++ }
++ virtual const char* getFilePath() const {
++ return fPath.c_str();
++ }
++
++private:
++ SkString fPath;
++
++ typedef FamilyTypeface INHERITED;
++};
++
++///////////////////////////////////////////////////////////////////////////////
++///////////////////////////////////////////////////////////////////////////////
++
++static bool get_name_and_style(const char path[], SkString* name,
++ SkTypeface::Style* style,
++ bool* isFixedWidth, bool isExpected) {
++ SkString fullpath;
++ GetFullPathForSysFonts(&fullpath, path);
++
++ SkMMAPStream stream(fullpath.c_str());
++ if (stream.getLength() > 0) {
++ find_name_and_attributes(&stream, name, style, isFixedWidth);
++ return true;
++ }
++ else {
++ SkFILEStream stream(fullpath.c_str());
++ if (stream.getLength() > 0) {
++ find_name_and_attributes(&stream, name, style, isFixedWidth);
++ return true;
++ }
++ }
++
++ if (isExpected) {
++ SkDebugf("---- failed to open <%s> as a font\n", fullpath.c_str());
++ }
++ return false;
++}
++
++// used to record our notion of the pre-existing fonts
++struct FontInitRec {
++ const char* fFileName;
++ const char* const* fNames; // null-terminated list
++};
++
++static const char* gSansNames[] = {
++ "sans-serif", "arial", "helvetica", "tahoma", "verdana", NULL
++};
++
++static const char* gSerifNames[] = {
++ "serif", "times", "times new roman", "palatino", "georgia", "baskerville",
++ "goudy", "fantasy", "cursive", "ITC Stone Serif", NULL
++};
++
++static const char* gMonoNames[] = {
++ "monospace", "courier", "courier new", "monaco", NULL
++};
++
++// deliberately empty, but we use the address to identify fallback fonts
++static const char* gFBNames[] = { NULL };
++
++/* Fonts must be grouped by family, with the first font in a family having the
++ list of names (even if that list is empty), and the following members having
++ null for the list. The names list must be NULL-terminated
++*/
++static const FontInitRec gSystemFonts[] = {
++ { "DroidSans.ttf", gSansNames },
++ { "DroidSans-Bold.ttf", NULL },
++ { "DroidSerif-Regular.ttf", gSerifNames },
++ { "DroidSerif-Bold.ttf", NULL },
++ { "DroidSerif-Italic.ttf", NULL },
++ { "DroidSerif-BoldItalic.ttf", NULL },
++ { "DroidSansMono.ttf", gMonoNames },
++ /* These are optional, and can be ignored if not found in the file system.
++ These are appended to gFallbackFonts[] as they are seen, so we list
++ them in the order we want them to be accessed by NextLogicalFont().
++ */
++ { "DroidSansArabic.ttf", gFBNames },
++ { "DroidSansHebrew.ttf", gFBNames },
++ { "DroidSansThai.ttf", gFBNames },
++ { "MTLmr3m.ttf", gFBNames }, // Motoya Japanese Font
++ { "MTLc3m.ttf", gFBNames }, // Motoya Japanese Font
++ { "DroidSansJapanese.ttf", gFBNames },
++ { "DroidSansFallback.ttf", gFBNames }
++};
++
++#define DEFAULT_NAMES gSansNames
++
++// these globals are assigned (once) by load_system_fonts()
++static FamilyRec* gDefaultFamily;
++static SkTypeface* gDefaultNormal;
++
++/* This is sized conservatively, assuming that it will never be a size issue.
++ It will be initialized in load_system_fonts(), and will be filled with the
++ fontIDs that can be used for fallback consideration, in sorted order (sorted
++ meaning element[0] should be used first, then element[1], etc. When we hit
++ a fontID==0 in the array, the list is done, hence our allocation size is
++ +1 the total number of possible system fonts. Also see NextLogicalFont().
++ */
++static uint32_t gFallbackFonts[SK_ARRAY_COUNT(gSystemFonts)+1];
++
++/* Called once (ensured by the sentinel check at the beginning of our body).
++ Initializes all the globals, and register the system fonts.
++ */
++static void load_system_fonts() {
++ // check if we've already be called
++ if (NULL != gDefaultNormal) {
++ return;
++ }
++
++ const FontInitRec* rec = gSystemFonts;
++ SkTypeface* firstInFamily = NULL;
++ int fallbackCount = 0;
++
++ for (size_t i = 0; i < SK_ARRAY_COUNT(gSystemFonts); i++) {
++ // if we're the first in a new family, clear firstInFamily
++ if (rec[i].fNames != NULL) {
++ firstInFamily = NULL;
++ }
++
++ bool isFixedWidth;
++ SkString name;
++ SkTypeface::Style style;
++
++ // we expect all the fonts, except the "fallback" fonts
++ bool isExpected = (rec[i].fNames != gFBNames);
++ if (!get_name_and_style(rec[i].fFileName, &name, &style,
++ &isFixedWidth, isExpected)) {
++ continue;
++ }
++
++ SkTypeface* tf = SkNEW_ARGS(FileTypeface,
++ (style,
++ true, // system-font (cannot delete)
++ firstInFamily, // what family to join
++ rec[i].fFileName,
++ isFixedWidth) // filename
++ );
++
++ if (rec[i].fNames != NULL) {
++ // see if this is one of our fallback fonts
++ if (rec[i].fNames == gFBNames) {
++ // SkDebugf("---- adding %s as fallback[%d] fontID %d\n",
++ // rec[i].fFileName, fallbackCount, tf->uniqueID());
++ gFallbackFonts[fallbackCount++] = tf->uniqueID();
++ }
++
++ firstInFamily = tf;
++ FamilyRec* family = find_family(tf);
++ const char* const* names = rec[i].fNames;
++
++ // record the default family if this is it
++ if (names == DEFAULT_NAMES) {
++ gDefaultFamily = family;
++ }
++ // add the names to map to this family
++ while (*names) {
++ add_name(*names, family);
++ names += 1;
++ }
++ }
++ }
++
++ // do this after all fonts are loaded. This is our default font, and it
++ // acts as a sentinel so we only execute load_system_fonts() once
++ gDefaultNormal = find_best_face(gDefaultFamily, SkTypeface::kNormal);
++ // now terminate our fallback list with the sentinel value
++ gFallbackFonts[fallbackCount] = 0;
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++void SkFontHost::Serialize(const SkTypeface* face, SkWStream* stream) {
++ const char* name = ((FamilyTypeface*)face)->getUniqueString();
++
++ stream->write8((uint8_t)face->style());
++
++ if (NULL == name || 0 == *name) {
++ stream->writePackedUInt(0);
++// SkDebugf("--- fonthost serialize null\n");
++ } else {
++ uint32_t len = strlen(name);
++ stream->writePackedUInt(len);
++ stream->write(name, len);
++// SkDebugf("--- fonthost serialize <%s> %d\n", name, face->style());
++ }
++}
++
++SkTypeface* SkFontHost::Deserialize(SkStream* stream) {
++ load_system_fonts();
++
++ int style = stream->readU8();
++
++ int len = stream->readPackedUInt();
++ if (len > 0) {
++ SkString str;
++ str.resize(len);
++ stream->read(str.writable_str(), len);
++
++ const FontInitRec* rec = gSystemFonts;
++ for (size_t i = 0; i < SK_ARRAY_COUNT(gSystemFonts); i++) {
++ if (strcmp(rec[i].fFileName, str.c_str()) == 0) {
++ // backup until we hit the fNames
++ for (int j = i; j >= 0; --j) {
++ if (rec[j].fNames != NULL) {
++ return SkFontHost::CreateTypeface(NULL,
++ rec[j].fNames[0], (SkTypeface::Style)style);
++ }
++ }
++ }
++ }
++ }
++ return NULL;
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++SkTypeface* SkFontHost::CreateTypeface(const SkTypeface* familyFace,
++ const char familyName[],
++ SkTypeface::Style style) {
++ load_system_fonts();
++
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ // clip to legal style bits
++ style = (SkTypeface::Style)(style & SkTypeface::kBoldItalic);
++
++ SkTypeface* tf = NULL;
++
++ if (NULL != familyFace) {
++ tf = find_typeface(familyFace, style);
++ } else if (NULL != familyName) {
++// SkDebugf("======= familyName <%s>\n", familyName);
++ tf = find_typeface(familyName, style);
++ }
++
++ if (NULL == tf) {
++ tf = find_best_face(gDefaultFamily, style);
++ }
++
++ // we ref(), since the symantic is to return a new instance
++ tf->ref();
++ return tf;
++}
++
++SkStream* SkFontHost::OpenStream(uint32_t fontID) {
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ FamilyTypeface* tf = (FamilyTypeface*)find_from_uniqueID(fontID);
++ SkStream* stream = tf ? tf->openStream() : NULL;
++
++ if (stream && stream->getLength() == 0) {
++ stream->unref();
++ stream = NULL;
++ }
++ return stream;
++}
++
++size_t SkFontHost::GetFileName(SkFontID fontID, char path[], size_t length,
++ int32_t* index) {
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ FamilyTypeface* tf = (FamilyTypeface*)find_from_uniqueID(fontID);
++ const char* src = tf ? tf->getFilePath() : NULL;
++
++ if (src) {
++ size_t size = strlen(src);
++ if (path) {
++ memcpy(path, src, SkMin32(size, length));
++ }
++ if (index) {
++ *index = 0; // we don't have collections (yet)
++ }
++ return size;
++ } else {
++ return 0;
++ }
++}
++
++SkFontID SkFontHost::NextLogicalFont(SkFontID currFontID, SkFontID origFontID) {
++ load_system_fonts();
++
++ /* First see if fontID is already one of our fallbacks. If so, return
++ its successor. If fontID is not in our list, then return the first one
++ in our list. Note: list is zero-terminated, and returning zero means
++ we have no more fonts to use for fallbacks.
++ */
++ const uint32_t* list = gFallbackFonts;
++ for (int i = 0; list[i] != 0; i++) {
++ if (list[i] == currFontID) {
++ return list[i+1];
++ }
++ }
++ return list[0];
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++SkTypeface* SkFontHost::CreateTypefaceFromStream(SkStream* stream) {
++ if (NULL == stream || stream->getLength() <= 0) {
++ return NULL;
++ }
++
++ bool isFixedWidth;
++ SkString name;
++ SkTypeface::Style style;
++ find_name_and_attributes(stream, &name, &style, &isFixedWidth);
++
++ if (!name.isEmpty()) {
++ return SkNEW_ARGS(StreamTypeface, (style, false, NULL, stream, isFixedWidth));
++ } else {
++ return NULL;
++ }
++}
++
++SkTypeface* SkFontHost::CreateTypefaceFromFile(const char path[]) {
++ SkStream* stream = SkNEW_ARGS(SkMMAPStream, (path));
++ SkTypeface* face = SkFontHost::CreateTypefaceFromStream(stream);
++ // since we created the stream, we let go of our ref() here
++ stream->unref();
++ return face;
++}
++
++///////////////////////////////////////////////////////////////////////////////
+--
+1.7.11.4
+
diff --git a/gfx/skia/patches/archive/0005-Bug-731384-Fix-clang-SK_OVERRIDE.patch b/gfx/skia/patches/archive/0005-Bug-731384-Fix-clang-SK_OVERRIDE.patch
new file mode 100644
index 0000000000..e8b5df635b
--- /dev/null
+++ b/gfx/skia/patches/archive/0005-Bug-731384-Fix-clang-SK_OVERRIDE.patch
@@ -0,0 +1,36 @@
+From 80350275c72921ed5ac405c029ae33727467d7c5 Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Fri, 18 May 2012 14:15:50 -0400
+Subject: [PATCH 05/10] Bug 755869 - [8] Re-apply bug 731384 - Fix compile
+ errors on older versions of clang r=mattwoodrow
+
+---
+ gfx/skia/include/core/SkPostConfig.h | 9 +++++++++
+ 1 files changed, 9 insertions(+), 0 deletions(-)
+
+diff --git a/gfx/skia/include/core/SkPostConfig.h b/gfx/skia/include/core/SkPostConfig.h
+index 8316f7a..041fe2a 100644
+--- a/gfx/skia/include/core/SkPostConfig.h
++++ b/gfx/skia/include/core/SkPostConfig.h
+@@ -288,9 +288,18 @@
+ #if defined(_MSC_VER)
+ #define SK_OVERRIDE override
+ #elif defined(__clang__)
++#if __has_feature(cxx_override_control)
+ // Some documentation suggests we should be using __attribute__((override)),
+ // but it doesn't work.
+ #define SK_OVERRIDE override
++#elif defined(__has_extension)
++#if __has_extension(cxx_override_control)
++#define SK_OVERRIDE override
++#endif
++#endif
++#ifndef SK_OVERRIDE
++#define SK_OVERRIDE
++#endif
+ #else
+ // Linux GCC ignores "__attribute__((override))" and rejects "override".
+ #define SK_OVERRIDE
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0005-Bug-736276-Add-a-new-SkFontHost-that-takes-a-cairo_s.patch b/gfx/skia/patches/archive/0005-Bug-736276-Add-a-new-SkFontHost-that-takes-a-cairo_s.patch
new file mode 100644
index 0000000000..cd2f67131c
--- /dev/null
+++ b/gfx/skia/patches/archive/0005-Bug-736276-Add-a-new-SkFontHost-that-takes-a-cairo_s.patch
@@ -0,0 +1,449 @@
+From: George Wright <george@mozilla.com>
+Date: Wed, 1 Aug 2012 16:43:15 -0400
+Subject: Bug 736276 - Add a new SkFontHost that takes a cairo_scaled_font_t r=karl
+
+
+diff --git a/gfx/skia/include/ports/SkTypeface_cairo.h b/gfx/skia/include/ports/SkTypeface_cairo.h
+new file mode 100644
+index 0000000..7e44f04
+--- /dev/null
++++ b/gfx/skia/include/ports/SkTypeface_cairo.h
+@@ -0,0 +1,11 @@
++#ifndef SkTypeface_cairo_DEFINED
++#define SkTypeface_cairo_DEFINED
++
++#include <cairo.h>
++
++#include "SkTypeface.h"
++
++SK_API extern SkTypeface* SkCreateTypefaceFromCairoFont(cairo_font_face_t* fontFace, SkTypeface::Style style, bool isFixedWidth);
++
++#endif
++
+diff --git a/gfx/skia/moz.build b/gfx/skia/moz.build
+index 9ceba59..66efd52 100644
+--- a/gfx/skia/moz.build
++++ b/gfx/skia/moz.build
+@@ -171,10 +171,12 @@ elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows':
+ 'SkTime_win.cpp',
+ ]
+ elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gtk2':
++ EXPORTS.skia += [
++ 'include/ports/SkTypeface_cairo.h',
++ ]
+ CPP_SOURCES += [
+- 'SkFontHost_FreeType.cpp',
++ 'SkFontHost_cairo.cpp',
+ 'SkFontHost_FreeType_common.cpp',
+- 'SkFontHost_linux.cpp',
+ 'SkThread_pthread.cpp',
+ 'SkThreadUtils_pthread.cpp',
+ 'SkThreadUtils_pthread_linux.cpp',
+@@ -183,14 +185,15 @@ elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gtk2':
+ ]
+ elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'qt':
+ CPP_SOURCES += [
+- 'SkFontHost_FreeType.cpp',
++ 'SkFontHost_cairo.cpp',
+ 'SkFontHost_FreeType_common.cpp',
+ 'SkOSFile.cpp',
+ ]
+ if CONFIG['OS_TARGET'] == 'Linux':
++ EXPORTS.skia += [
++ 'include/ports/SkTypeface_cairo.h',
++ ]
+ CPP_SOURCES += [
+- 'SkFontHost_linux.cpp',
+- 'SkFontHost_tables.cpp',
+ 'SkThread_pthread.cpp',
+ 'SkThreadUtils_pthread.cpp',
+ 'SkThreadUtils_pthread_linux.cpp',
+@@ -204,11 +207,13 @@ elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gonk':
+ # Separate 'if' from above, since the else below applies to all != 'android'
+ # toolkits.
+ if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'android':
++ EXPORTS.skia += [
++ 'include/ports/SkTypeface_cairo.h',
++ ]
+ CPP_SOURCES += [
+ 'ashmem.cpp',
+ 'SkDebug_android.cpp',
+- 'SkFontHost_android_old.cpp',
+- 'SkFontHost_FreeType.cpp',
++ 'SkFontHost_cairo.cpp',
+ 'SkFontHost_FreeType_common.cpp',
+ 'SkImageRef_ashmem.cpp',
+ 'SkTime_Unix.cpp',
+diff --git a/gfx/skia/src/ports/SkFontHost_cairo.cpp b/gfx/skia/src/ports/SkFontHost_cairo.cpp
+new file mode 100644
+index 0000000..bb5b778
+--- /dev/null
++++ b/gfx/skia/src/ports/SkFontHost_cairo.cpp
+@@ -0,0 +1,364 @@
++
++/*
++ * Copyright 2012 Mozilla Foundation
++ *
++ * Use of this source code is governed by a BSD-style license that can be
++ * found in the LICENSE file.
++ */
++
++#include "cairo.h"
++#include "cairo-ft.h"
++
++#include "SkFontHost_FreeType_common.h"
++
++#include "SkAdvancedTypefaceMetrics.h"
++#include "SkFontHost.h"
++#include "SkPath.h"
++#include "SkScalerContext.h"
++#include "SkTypefaceCache.h"
++
++#include <ft2build.h>
++#include FT_FREETYPE_H
++
++static cairo_user_data_key_t kSkTypefaceKey;
++
++class SkScalerContext_CairoFT : public SkScalerContext_FreeType_Base {
++public:
++ SkScalerContext_CairoFT(SkTypeface* typeface, const SkDescriptor* desc);
++ virtual ~SkScalerContext_CairoFT();
++
++protected:
++ virtual unsigned generateGlyphCount() SK_OVERRIDE;
++ virtual uint16_t generateCharToGlyph(SkUnichar uniChar) SK_OVERRIDE;
++ virtual void generateAdvance(SkGlyph* glyph) SK_OVERRIDE;
++ virtual void generateMetrics(SkGlyph* glyph) SK_OVERRIDE;
++ virtual void generateImage(const SkGlyph& glyph) SK_OVERRIDE;
++ virtual void generatePath(const SkGlyph& glyph, SkPath* path) SK_OVERRIDE;
++ virtual void generateFontMetrics(SkPaint::FontMetrics* mx,
++ SkPaint::FontMetrics* my) SK_OVERRIDE;
++ virtual SkUnichar generateGlyphToChar(uint16_t glyph) SK_OVERRIDE;
++private:
++ cairo_scaled_font_t* fScaledFont;
++ uint32_t fLoadGlyphFlags;
++};
++
++class CairoLockedFTFace {
++public:
++ CairoLockedFTFace(cairo_scaled_font_t* scaledFont)
++ : fScaledFont(scaledFont)
++ , fFace(cairo_ft_scaled_font_lock_face(scaledFont))
++ {}
++
++ ~CairoLockedFTFace()
++ {
++ cairo_ft_scaled_font_unlock_face(fScaledFont);
++ }
++
++ FT_Face getFace()
++ {
++ return fFace;
++ }
++
++private:
++ cairo_scaled_font_t* fScaledFont;
++ FT_Face fFace;
++};
++
++class SkCairoFTTypeface : public SkTypeface {
++public:
++ static SkTypeface* CreateTypeface(cairo_font_face_t* fontFace, SkTypeface::Style style, bool isFixedWidth) {
++ SkASSERT(fontFace != NULL);
++ SkASSERT(cairo_font_face_get_type(fontFace) == CAIRO_FONT_TYPE_FT);
++
++ SkFontID newId = SkTypefaceCache::NewFontID();
++
++ return SkNEW_ARGS(SkCairoFTTypeface, (fontFace, style, newId, isFixedWidth));
++ }
++
++ cairo_font_face_t* getFontFace() {
++ return fFontFace;
++ }
++
++ virtual SkStream* onOpenStream(int*) const SK_OVERRIDE { return NULL; }
++
++ virtual SkAdvancedTypefaceMetrics*
++ onGetAdvancedTypefaceMetrics(SkAdvancedTypefaceMetrics::PerGlyphInfo,
++ const uint32_t*, uint32_t) const SK_OVERRIDE
++ {
++ SkDEBUGCODE(SkDebugf("SkCairoFTTypeface::onGetAdvancedTypefaceMetrics unimplemented\n"));
++ return NULL;
++ }
++
++ virtual SkScalerContext* onCreateScalerContext(const SkDescriptor* desc) const SK_OVERRIDE
++ {
++ return SkNEW_ARGS(SkScalerContext_CairoFT, (const_cast<SkCairoFTTypeface*>(this), desc));
++ }
++
++ virtual void onFilterRec(SkScalerContextRec*) const SK_OVERRIDE
++ {
++ SkDEBUGCODE(SkDebugf("SkCairoFTTypeface::onFilterRec unimplemented\n"));
++ }
++
++ virtual void onGetFontDescriptor(SkFontDescriptor*, bool*) const SK_OVERRIDE
++ {
++ SkDEBUGCODE(SkDebugf("SkCairoFTTypeface::onGetFontDescriptor unimplemented\n"));
++ }
++
++
++private:
++
++ SkCairoFTTypeface(cairo_font_face_t* fontFace, SkTypeface::Style style, SkFontID id, bool isFixedWidth)
++ : SkTypeface(style, id, isFixedWidth)
++ , fFontFace(fontFace)
++ {
++ cairo_font_face_set_user_data(fFontFace, &kSkTypefaceKey, this, NULL);
++ cairo_font_face_reference(fFontFace);
++ }
++
++ ~SkCairoFTTypeface()
++ {
++ cairo_font_face_set_user_data(fFontFace, &kSkTypefaceKey, NULL, NULL);
++ cairo_font_face_destroy(fFontFace);
++ }
++
++ cairo_font_face_t* fFontFace;
++};
++
++SkTypeface* SkCreateTypefaceFromCairoFont(cairo_font_face_t* fontFace, SkTypeface::Style style, bool isFixedWidth)
++{
++ SkTypeface* typeface = reinterpret_cast<SkTypeface*>(cairo_font_face_get_user_data(fontFace, &kSkTypefaceKey));
++
++ if (typeface) {
++ typeface->ref();
++ } else {
++ typeface = SkCairoFTTypeface::CreateTypeface(fontFace, style, isFixedWidth);
++ SkTypefaceCache::Add(typeface, style);
++ }
++
++ return typeface;
++}
++
++SkTypeface* SkFontHost::CreateTypeface(const SkTypeface* familyFace,
++ const char famillyName[],
++ SkTypeface::Style style)
++{
++ SkDEBUGFAIL("SkFontHost::FindTypeface unimplemented");
++ return NULL;
++}
++
++SkTypeface* SkFontHost::CreateTypefaceFromStream(SkStream*)
++{
++ SkDEBUGFAIL("SkFontHost::CreateTypeface unimplemented");
++ return NULL;
++}
++
++SkTypeface* SkFontHost::CreateTypefaceFromFile(char const*)
++{
++ SkDEBUGFAIL("SkFontHost::CreateTypefaceFromFile unimplemented");
++ return NULL;
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++static bool isLCD(const SkScalerContext::Rec& rec) {
++ switch (rec.fMaskFormat) {
++ case SkMask::kLCD16_Format:
++ case SkMask::kLCD32_Format:
++ return true;
++ default:
++ return false;
++ }
++}
++
++///////////////////////////////////////////////////////////////////////////////
++SkScalerContext_CairoFT::SkScalerContext_CairoFT(SkTypeface* typeface, const SkDescriptor* desc)
++ : SkScalerContext_FreeType_Base(typeface, desc)
++{
++ SkMatrix matrix;
++ fRec.getSingleMatrix(&matrix);
++
++ cairo_font_face_t* fontFace = static_cast<SkCairoFTTypeface*>(typeface)->getFontFace();
++
++ cairo_matrix_t fontMatrix, ctMatrix;
++ cairo_matrix_init(&fontMatrix, matrix.getScaleX(), matrix.getSkewY(), matrix.getSkewX(), matrix.getScaleY(), 0.0, 0.0);
++ cairo_matrix_init_scale(&ctMatrix, 1.0, 1.0);
++
++ // We need to ensure that the font options match for hinting, as generateMetrics()
++ // uses the fScaledFont which uses these font options
++ cairo_font_options_t *fontOptions = cairo_font_options_create();
++
++ FT_Int32 loadFlags = FT_LOAD_DEFAULT;
++
++ if (SkMask::kBW_Format == fRec.fMaskFormat) {
++ // See http://code.google.com/p/chromium/issues/detail?id=43252#c24
++ loadFlags = FT_LOAD_TARGET_MONO;
++ if (fRec.getHinting() == SkPaint::kNo_Hinting) {
++ cairo_font_options_set_hint_style(fontOptions, CAIRO_HINT_STYLE_NONE);
++ loadFlags = FT_LOAD_NO_HINTING;
++ }
++ } else {
++ switch (fRec.getHinting()) {
++ case SkPaint::kNo_Hinting:
++ loadFlags = FT_LOAD_NO_HINTING;
++ cairo_font_options_set_hint_style(fontOptions, CAIRO_HINT_STYLE_NONE);
++ break;
++ case SkPaint::kSlight_Hinting:
++ loadFlags = FT_LOAD_TARGET_LIGHT; // This implies FORCE_AUTOHINT
++ cairo_font_options_set_hint_style(fontOptions, CAIRO_HINT_STYLE_SLIGHT);
++ break;
++ case SkPaint::kNormal_Hinting:
++ cairo_font_options_set_hint_style(fontOptions, CAIRO_HINT_STYLE_MEDIUM);
++ if (fRec.fFlags & SkScalerContext::kAutohinting_Flag) {
++ loadFlags = FT_LOAD_FORCE_AUTOHINT;
++ }
++ break;
++ case SkPaint::kFull_Hinting:
++ cairo_font_options_set_hint_style(fontOptions, CAIRO_HINT_STYLE_FULL);
++ if (fRec.fFlags & SkScalerContext::kAutohinting_Flag) {
++ loadFlags = FT_LOAD_FORCE_AUTOHINT;
++ }
++ if (isLCD(fRec)) {
++ if (SkToBool(fRec.fFlags & SkScalerContext::kLCD_Vertical_Flag)) {
++ loadFlags = FT_LOAD_TARGET_LCD_V;
++ } else {
++ loadFlags = FT_LOAD_TARGET_LCD;
++ }
++ }
++ break;
++ default:
++ SkDebugf("---------- UNKNOWN hinting %d\n", fRec.getHinting());
++ break;
++ }
++ }
++
++ fScaledFont = cairo_scaled_font_create(fontFace, &fontMatrix, &ctMatrix, fontOptions);
++
++ if ((fRec.fFlags & SkScalerContext::kEmbeddedBitmapText_Flag) == 0) {
++ loadFlags |= FT_LOAD_NO_BITMAP;
++ }
++
++ // Always using FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH to get correct
++ // advances, as fontconfig and cairo do.
++ // See http://code.google.com/p/skia/issues/detail?id=222.
++ loadFlags |= FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH;
++
++ fLoadGlyphFlags = loadFlags;
++}
++
++SkScalerContext_CairoFT::~SkScalerContext_CairoFT()
++{
++ cairo_scaled_font_destroy(fScaledFont);
++}
++
++unsigned SkScalerContext_CairoFT::generateGlyphCount()
++{
++ CairoLockedFTFace faceLock(fScaledFont);
++ return faceLock.getFace()->num_glyphs;
++}
++
++uint16_t SkScalerContext_CairoFT::generateCharToGlyph(SkUnichar uniChar)
++{
++ CairoLockedFTFace faceLock(fScaledFont);
++ return SkToU16(FT_Get_Char_Index(faceLock.getFace(), uniChar));
++}
++
++void SkScalerContext_CairoFT::generateAdvance(SkGlyph* glyph)
++{
++ generateMetrics(glyph);
++}
++
++void SkScalerContext_CairoFT::generateMetrics(SkGlyph* glyph)
++{
++ SkASSERT(fScaledFont != NULL);
++ cairo_text_extents_t extents;
++ cairo_glyph_t cairoGlyph = { glyph->getGlyphID(fBaseGlyphCount), 0.0, 0.0 };
++ cairo_scaled_font_glyph_extents(fScaledFont, &cairoGlyph, 1, &extents);
++
++ glyph->fAdvanceX = SkDoubleToFixed(extents.x_advance);
++ glyph->fAdvanceY = SkDoubleToFixed(extents.y_advance);
++ glyph->fWidth = SkToU16(SkScalarCeil(extents.width));
++ glyph->fHeight = SkToU16(SkScalarCeil(extents.height));
++ glyph->fLeft = SkToS16(SkScalarCeil(extents.x_bearing));
++ glyph->fTop = SkToS16(SkScalarCeil(extents.y_bearing));
++ glyph->fLsbDelta = 0;
++ glyph->fRsbDelta = 0;
++}
++
++void SkScalerContext_CairoFT::generateImage(const SkGlyph& glyph)
++{
++ SkASSERT(fScaledFont != NULL);
++ CairoLockedFTFace faceLock(fScaledFont);
++ FT_Face face = faceLock.getFace();
++
++ FT_Error err = FT_Load_Glyph(face, glyph.getGlyphID(fBaseGlyphCount), fLoadGlyphFlags);
++
++ if (err != 0) {
++ memset(glyph.fImage, 0, glyph.rowBytes() * glyph.fHeight);
++ return;
++ }
++
++ generateGlyphImage(face, glyph);
++}
++
++void SkScalerContext_CairoFT::generatePath(const SkGlyph& glyph, SkPath* path)
++{
++ SkASSERT(fScaledFont != NULL);
++ CairoLockedFTFace faceLock(fScaledFont);
++ FT_Face face = faceLock.getFace();
++
++ SkASSERT(&glyph && path);
++
++ uint32_t flags = fLoadGlyphFlags;
++ flags |= FT_LOAD_NO_BITMAP; // ignore embedded bitmaps so we're sure to get the outline
++ flags &= ~FT_LOAD_RENDER; // don't scan convert (we just want the outline)
++
++ FT_Error err = FT_Load_Glyph(face, glyph.getGlyphID(fBaseGlyphCount), flags);
++
++ if (err != 0) {
++ path->reset();
++ return;
++ }
++
++ generateGlyphPath(face, path);
++}
++
++void SkScalerContext_CairoFT::generateFontMetrics(SkPaint::FontMetrics* mx,
++ SkPaint::FontMetrics* my)
++{
++ SkDEBUGCODE(SkDebugf("SkScalerContext_CairoFT::generateFontMetrics unimplemented\n"));
++}
++
++SkUnichar SkScalerContext_CairoFT::generateGlyphToChar(uint16_t glyph)
++{
++ SkASSERT(fScaledFont != NULL);
++ CairoLockedFTFace faceLock(fScaledFont);
++ FT_Face face = faceLock.getFace();
++
++ FT_UInt glyphIndex;
++ SkUnichar charCode = FT_Get_First_Char(face, &glyphIndex);
++ while (glyphIndex != 0) {
++ if (glyphIndex == glyph) {
++ return charCode;
++ }
++ charCode = FT_Get_Next_Char(face, charCode, &glyphIndex);
++ }
++
++ return 0;
++}
++
++#ifdef SK_BUILD_FOR_ANDROID
++SkTypeface* SkAndroidNextLogicalTypeface(SkFontID currFontID,
++ SkFontID origFontID) {
++ return NULL;
++}
++#endif
++
++///////////////////////////////////////////////////////////////////////////////
++
++#include "SkFontMgr.h"
++
++SkFontMgr* SkFontMgr::Factory() {
++ // todo
++ return NULL;
++}
++
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0005-Bug-777614-Re-apply-bug-687188-Expand-the-gradient-c.patch b/gfx/skia/patches/archive/0005-Bug-777614-Re-apply-bug-687188-Expand-the-gradient-c.patch
new file mode 100644
index 0000000000..cfcb40b9d7
--- /dev/null
+++ b/gfx/skia/patches/archive/0005-Bug-777614-Re-apply-bug-687188-Expand-the-gradient-c.patch
@@ -0,0 +1,198 @@
+From 1ab13a923399aa638388231baca784ba89f2c82b Mon Sep 17 00:00:00 2001
+From: George Wright <gw@gwright.org.uk>
+Date: Wed, 12 Sep 2012 12:30:29 -0400
+Subject: [PATCH 5/9] Bug 777614 - Re-apply bug 687188 - Expand the gradient
+ cache by 2 to store 0/1 colour stop values for
+ clamping. r=nrc
+
+---
+ .../src/effects/gradients/SkGradientShader.cpp | 22 +++++++++++----
+ .../src/effects/gradients/SkGradientShaderPriv.h | 5 +++-
+ .../src/effects/gradients/SkLinearGradient.cpp | 32 ++++++++++++++++------
+ .../gradients/SkTwoPointConicalGradient.cpp | 11 ++++++--
+ .../effects/gradients/SkTwoPointRadialGradient.cpp | 11 ++++++--
+ 5 files changed, 61 insertions(+), 20 deletions(-)
+
+diff --git a/gfx/skia/src/effects/gradients/SkGradientShader.cpp b/gfx/skia/src/effects/gradients/SkGradientShader.cpp
+index f0dac4d..79e7202 100644
+--- a/gfx/skia/src/effects/gradients/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/gradients/SkGradientShader.cpp
+@@ -426,15 +426,15 @@ static void complete_32bit_cache(SkPMColor* cache, int stride) {
+
+ const SkPMColor* SkGradientShaderBase::getCache32() const {
+ if (fCache32 == NULL) {
+- // double the count for dither entries
+- const int entryCount = kCache32Count * 2;
++ // double the count for dither entries, and have an extra two entries for clamp values
++ const int entryCount = kCache32Count * 2 + 2;
+ const size_t allocSize = sizeof(SkPMColor) * entryCount;
+
+ if (NULL == fCache32PixelRef) {
+ fCache32PixelRef = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ }
+- fCache32 = (SkPMColor*)fCache32PixelRef->getAddr();
++ fCache32 = (SkPMColor*)fCache32PixelRef->getAddr() + 1;
+ if (fColorCount == 2) {
+ Build32bitCache(fCache32, fOrigColors[0], fOrigColors[1],
+ kGradient32Length, fCacheAlpha);
+@@ -458,7 +458,7 @@ const SkPMColor* SkGradientShaderBase::getCache32() const {
+ SkMallocPixelRef* newPR = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ SkPMColor* linear = fCache32; // just computed linear data
+- SkPMColor* mapped = (SkPMColor*)newPR->getAddr(); // storage for mapped data
++ SkPMColor* mapped = (SkPMColor*)newPR->getAddr() + 1; // storage for mapped data
+ SkUnitMapper* map = fMapper;
+ for (int i = 0; i < kGradient32Length; i++) {
+ int index = map->mapUnit16((i << 8) | i) >> 8;
+@@ -467,10 +467,22 @@ const SkPMColor* SkGradientShaderBase::getCache32() const {
+ }
+ fCache32PixelRef->unref();
+ fCache32PixelRef = newPR;
+- fCache32 = (SkPMColor*)newPR->getAddr();
++ fCache32 = (SkPMColor*)newPR->getAddr() + 1;
+ }
+ complete_32bit_cache(fCache32, kCache32Count);
+ }
++
++ // Write the clamp colours into the first and last entries of fCache32
++ fCache32[kCache32ClampLower] = SkPackARGB32(fCacheAlpha,
++ SkColorGetR(fOrigColors[0]),
++ SkColorGetG(fOrigColors[0]),
++ SkColorGetB(fOrigColors[0]));
++
++ fCache32[kCache32ClampUpper] = SkPackARGB32(fCacheAlpha,
++ SkColorGetR(fOrigColors[fColorCount - 1]),
++ SkColorGetG(fOrigColors[fColorCount - 1]),
++ SkColorGetB(fOrigColors[fColorCount - 1]));
++
+ return fCache32;
+ }
+
+diff --git a/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h b/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
+index 0e7c2fc..7427935 100644
+--- a/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
++++ b/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
+@@ -133,7 +133,10 @@ public:
+ kDitherStride32 = 0,
+ #endif
+ kDitherStride16 = kCache16Count,
+- kLerpRemainderMask32 = (1 << (16 - kCache32Bits)) - 1
++ kLerpRemainderMask32 = (1 << (16 - kCache32Bits)) - 1,
++
++ kCache32ClampLower = -1,
++ kCache32ClampUpper = kCache32Count * 2
+ };
+
+
+diff --git a/gfx/skia/src/effects/gradients/SkLinearGradient.cpp b/gfx/skia/src/effects/gradients/SkLinearGradient.cpp
+index bcebc26..d400b4d 100644
+--- a/gfx/skia/src/effects/gradients/SkLinearGradient.cpp
++++ b/gfx/skia/src/effects/gradients/SkLinearGradient.cpp
+@@ -126,6 +126,17 @@ void shadeSpan_linear_vertical(TileProc proc, SkFixed dx, SkFixed fx,
+ SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache,
+ int toggle, int count) {
++ if (proc == clamp_tileproc) {
++ // No need to lerp or dither for clamp values
++ if (fx < 0) {
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampLower], count);
++ return;
++ } else if (fx > 0xffff) {
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampUpper], count);
++ return;
++ }
++ }
++
+ // We're a vertical gradient, so no change in a span.
+ // If colors change sharply across the gradient, dithering is
+ // insufficient (it subsamples the color space) and we need to lerp.
+@@ -144,6 +155,17 @@ void shadeSpan_linear_vertical_lerp(TileProc proc, SkFixed dx, SkFixed fx,
+ SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache,
+ int toggle, int count) {
++ if (proc == clamp_tileproc) {
++ // No need to lerp or dither for clamp values
++ if (fx < 0) {
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampLower], count);
++ return;
++ } else if (fx > 0xffff) {
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampUpper], count);
++ return;
++ }
++ }
++
+ // We're a vertical gradient, so no change in a span.
+ // If colors change sharply across the gradient, dithering is
+ // insufficient (it subsamples the color space) and we need to lerp.
+@@ -169,10 +191,7 @@ void shadeSpan_linear_clamp(TileProc proc, SkFixed dx, SkFixed fx,
+ range.init(fx, dx, count, 0, SkGradientShaderBase::kGradient32Length);
+
+ if ((count = range.fCount0) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV0],
+- cache[(toggle ^ SkGradientShaderBase::kDitherStride32) + range.fV0],
+- count);
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampLower], count);
+ dstC += count;
+ }
+ if ((count = range.fCount1) > 0) {
+@@ -191,10 +210,7 @@ void shadeSpan_linear_clamp(TileProc proc, SkFixed dx, SkFixed fx,
+ }
+ }
+ if ((count = range.fCount2) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV1],
+- cache[(toggle ^ SkGradientShaderBase::kDitherStride32) + range.fV1],
+- count);
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampUpper], count);
+ }
+ }
+
+diff --git a/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp b/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp
+index 3466d2c..764a444 100644
+--- a/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp
++++ b/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp
+@@ -123,9 +123,14 @@ static void twopoint_clamp(TwoPtRadial* rec, SkPMColor* SK_RESTRICT dstC,
+ if (TwoPtRadial::DontDrawT(t)) {
+ *dstC++ = 0;
+ } else {
+- SkFixed index = SkClampMax(t, 0xFFFF);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> SkGradientShaderBase::kCache32Shift];
++ if (t < 0) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampLower];
++ } else if (t > 0xFFFF) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampUpper];
++ } else {
++ SkASSERT(t <= 0xFFFF);
++ *dstC++ = cache[t >> SkGradientShaderBase::kCache32Shift];
++ }
+ }
+ }
+ }
+diff --git a/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp b/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp
+index 9362ded..22b028e 100644
+--- a/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp
++++ b/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp
+@@ -120,9 +120,14 @@ void shadeSpan_twopoint_clamp(SkScalar fx, SkScalar dx,
+ for (; count > 0; --count) {
+ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
+ fOneOverTwoA, posRoot);
+- SkFixed index = SkClampMax(t, 0xFFFF);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> SkGradientShaderBase::kCache32Shift];
++ if (t < 0) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampLower];
++ } else if (t > 0xFFFF) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampUpper];
++ } else {
++ SkASSERT(t <= 0xFFFF);
++ *dstC++ = cache[t >> SkGradientShaderBase::kCache32Shift];
++ }
+ fx += dx;
+ fy += dy;
+ b += db;
+--
+1.7.11.4
+
diff --git a/gfx/skia/patches/archive/0006-Bug-751814-ARM-EDSP-ARMv6-Skia-fixes.patch b/gfx/skia/patches/archive/0006-Bug-751814-ARM-EDSP-ARMv6-Skia-fixes.patch
new file mode 100644
index 0000000000..eb75691ad7
--- /dev/null
+++ b/gfx/skia/patches/archive/0006-Bug-751814-ARM-EDSP-ARMv6-Skia-fixes.patch
@@ -0,0 +1,147 @@
+From 94916fbbc7865c6fe23a57d6edc48c6daf93dda8 Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Fri, 18 May 2012 14:16:08 -0400
+Subject: [PATCH 06/10] Bug 755869 - [9] Re-apply bug 751814 - Various
+ Skia fixes for ARM without EDSP and ARMv6+
+ r=mattwoodrow
+
+---
+ gfx/skia/include/core/SkMath.h | 5 +--
+ gfx/skia/include/core/SkPostConfig.h | 45 ++++++++++++++++++++++
+ gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp | 6 +-
+ gfx/skia/src/opts/SkBlitRow_opts_arm.cpp | 9 ++++
+ 4 files changed, 58 insertions(+), 7 deletions(-)
+
+diff --git a/gfx/skia/include/core/SkMath.h b/gfx/skia/include/core/SkMath.h
+index 5889103..7a4b707 100644
+--- a/gfx/skia/include/core/SkMath.h
++++ b/gfx/skia/include/core/SkMath.h
+@@ -153,10 +153,7 @@ static inline bool SkIsPow2(int value) {
+ With this requirement, we can generate faster instructions on some
+ architectures.
+ */
+-#if defined(__arm__) \
+- && !defined(__thumb__) \
+- && !defined(__ARM_ARCH_4T__) \
+- && !defined(__ARM_ARCH_5T__)
++#ifdef SK_ARM_HAS_EDSP
+ static inline int32_t SkMulS16(S16CPU x, S16CPU y) {
+ SkASSERT((int16_t)x == x);
+ SkASSERT((int16_t)y == y);
+diff --git a/gfx/skia/include/core/SkPostConfig.h b/gfx/skia/include/core/SkPostConfig.h
+index 041fe2a..03105e4 100644
+--- a/gfx/skia/include/core/SkPostConfig.h
++++ b/gfx/skia/include/core/SkPostConfig.h
+@@ -311,3 +311,48 @@
+ #ifndef SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
+ #define SK_ALLOW_STATIC_GLOBAL_INITIALIZERS 1
+ #endif
++
++//////////////////////////////////////////////////////////////////////
++// ARM defines
++
++#if defined(__GNUC__) && defined(__arm__)
++
++# define SK_ARM_ARCH 3
++
++# if defined(__ARM_ARCH_4__) || defined(__ARM_ARCH_4T__) \
++ || defined(_ARM_ARCH_4)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 4
++# endif
++
++# if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
++ || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
++ || defined(__ARM_ARCH_5TEJ__) || defined(_ARM_ARCH_5)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 5
++# endif
++
++# if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
++ || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
++ || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) \
++ || defined(__ARM_ARCH_6M__) || defined(_ARM_ARCH_6)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 6
++# endif
++
++# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
++ || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
++ || defined(__ARM_ARCH_7EM__) || defined(_ARM_ARCH_7)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 7
++# endif
++
++# undef SK_ARM_HAS_EDSP
++# if defined(__thumb2__) && (SK_ARM_ARCH >= 6) \
++ || !defined(__thumb__) \
++ && ((SK_ARM_ARCH > 5) || defined(__ARM_ARCH_5E__) \
++ || defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__))
++# define SK_ARM_HAS_EDSP 1
++# endif
++
++#endif
+diff --git a/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp b/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp
+index 20d62e1..deb1bfe 100644
+--- a/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp
++++ b/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp
+@@ -11,7 +11,7 @@
+ #include "SkColorPriv.h"
+ #include "SkUtils.h"
+
+-#if __ARM_ARCH__ >= 6 && !defined(SK_CPU_BENDIAN)
++#if SK_ARM_ARCH >= 6 && !defined(SK_CPU_BENDIAN)
+ void SI8_D16_nofilter_DX_arm(
+ const SkBitmapProcState& s,
+ const uint32_t* SK_RESTRICT xy,
+@@ -182,7 +182,7 @@ void SI8_opaque_D32_nofilter_DX_arm(const SkBitmapProcState& s,
+
+ s.fBitmap->getColorTable()->unlockColors(false);
+ }
+-#endif //__ARM_ARCH__ >= 6 && !defined(SK_CPU_BENDIAN)
++#endif // SK_ARM_ARCH >= 6 && !defined(SK_CPU_BENDIAN)
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+@@ -200,7 +200,7 @@ void SkBitmapProcState::platformProcs() {
+
+ switch (fBitmap->config()) {
+ case SkBitmap::kIndex8_Config:
+-#if __ARM_ARCH__ >= 6 && !defined(SK_CPU_BENDIAN)
++#if SK_ARM_ARCH >= 6 && !defined(SK_CPU_BENDIAN)
+ if (justDx && !doFilter) {
+ #if 0 /* crashing on android device */
+ fSampleProc16 = SI8_D16_nofilter_DX_arm;
+diff --git a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+index 2490371..c928888 100644
+--- a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
++++ b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+@@ -675,8 +675,13 @@ static void __attribute((noinline,optimize("-fomit-frame-pointer"))) S32A_Blend_
+ /* dst1_scale and dst2_scale*/
+ "lsr r9, r5, #24 \n\t" /* src >> 24 */
+ "lsr r10, r6, #24 \n\t" /* src >> 24 */
++#ifdef SK_ARM_HAS_EDSP
+ "smulbb r9, r9, %[alpha] \n\t" /* r9 = SkMulS16 r9 with src_scale */
+ "smulbb r10, r10, %[alpha] \n\t" /* r10 = SkMulS16 r10 with src_scale */
++#else
++ "mul r9, r9, %[alpha] \n\t" /* r9 = SkMulS16 r9 with src_scale */
++ "mul r10, r10, %[alpha] \n\t" /* r10 = SkMulS16 r10 with src_scale */
++#endif
+ "lsr r9, r9, #8 \n\t" /* r9 >> 8 */
+ "lsr r10, r10, #8 \n\t" /* r10 >> 8 */
+ "rsb r9, r9, #256 \n\t" /* dst1_scale = r9 = 255 - r9 + 1 */
+@@ -745,7 +750,11 @@ static void __attribute((noinline,optimize("-fomit-frame-pointer"))) S32A_Blend_
+
+ "lsr r6, r5, #24 \n\t" /* src >> 24 */
+ "and r8, r12, r5, lsr #8 \n\t" /* ag = r8 = r5 masked by r12 lsr by #8 */
++#ifdef SK_ARM_HAS_EDSP
+ "smulbb r6, r6, %[alpha] \n\t" /* r6 = SkMulS16 with src_scale */
++#else
++ "mul r6, r6, %[alpha] \n\t" /* r6 = SkMulS16 with src_scale */
++#endif
+ "and r9, r12, r5 \n\t" /* rb = r9 = r5 masked by r12 */
+ "lsr r6, r6, #8 \n\t" /* r6 >> 8 */
+ "mul r8, r8, %[alpha] \n\t" /* ag = r8 times scale */
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0006-Bug-848491-Re-apply-Bug-777614-Add-our-SkUserConfig..patch b/gfx/skia/patches/archive/0006-Bug-848491-Re-apply-Bug-777614-Add-our-SkUserConfig..patch
new file mode 100644
index 0000000000..2850000ace
--- /dev/null
+++ b/gfx/skia/patches/archive/0006-Bug-848491-Re-apply-Bug-777614-Add-our-SkUserConfig..patch
@@ -0,0 +1,27 @@
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 25 Apr 2013 20:40:12 -0400
+Subject: Bug 848491 - Re-apply Bug 777614 - Add our SkUserConfig.h
+
+
+diff --git a/gfx/skia/include/config/SkUserConfig.h b/gfx/skia/include/config/SkUserConfig.h
+index 63fc90d..c965e91 100644
+--- a/gfx/skia/include/config/SkUserConfig.h
++++ b/gfx/skia/include/config/SkUserConfig.h
+@@ -201,4 +201,14 @@
+ */
+ //#define SK_SUPPORT_GPU 1
+
++/* Don't dither 32bit gradients, to match what the canvas test suite expects.
++ */
++#define SK_DISABLE_DITHER_32BIT_GRADIENT
++
++/* Don't include stdint.h on windows as it conflicts with our build system.
++ */
++#ifdef SK_BUILD_FOR_WIN32
++ #define SK_IGNORE_STDINT_DOT_H
++#endif
++
+ #endif
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0007-Bug-719872-Old-Android-FontHost.patch b/gfx/skia/patches/archive/0007-Bug-719872-Old-Android-FontHost.patch
new file mode 100644
index 0000000000..ca34e1a457
--- /dev/null
+++ b/gfx/skia/patches/archive/0007-Bug-719872-Old-Android-FontHost.patch
@@ -0,0 +1,702 @@
+From 6982ad469adcdfa2b7bdbf8bbd843bc22d3832fc Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Fri, 18 May 2012 14:52:40 -0400
+Subject: [PATCH 07/10] Bug 755869 - [10] Re-apply bug 719872 - Fix crash
+ on Android by reverting to older FontHost impl
+ r=mattwoodrow
+
+---
+ gfx/skia/Makefile.in | 5 +-
+ gfx/skia/src/ports/SkFontHost_android_old.cpp | 664 +++++++++++++++++++++++++
+ 2 files changed, 668 insertions(+), 1 deletions(-)
+ create mode 100644 gfx/skia/src/ports/SkFontHost_android_old.cpp
+
+diff --git a/gfx/skia/Makefile.in b/gfx/skia/Makefile.in
+index 9da098a..8184f1c 100644
+--- a/gfx/skia/Makefile.in
++++ b/gfx/skia/Makefile.in
+@@ -327,7 +327,10 @@ endif
+ ifeq (android,$(MOZ_WIDGET_TOOLKIT))
+ CPPSRCS += \
+ SkDebug_android.cpp \
+- SkFontHost_none.cpp \
++ SkFontHost_android_old.cpp \
++ SkFontHost_gamma.cpp \
++ SkFontHost_FreeType.cpp \
++ SkFontHost_tables.cpp \
+ SkMMapStream.cpp \
+ SkTime_Unix.cpp \
+ SkThread_pthread.cpp \
+diff --git a/gfx/skia/src/ports/SkFontHost_android_old.cpp b/gfx/skia/src/ports/SkFontHost_android_old.cpp
+new file mode 100644
+index 0000000..b5c4f3c
+--- /dev/null
++++ b/gfx/skia/src/ports/SkFontHost_android_old.cpp
+@@ -0,0 +1,664 @@
++
++/*
++ * Copyright 2006 The Android Open Source Project
++ *
++ * Use of this source code is governed by a BSD-style license that can be
++ * found in the LICENSE file.
++ */
++
++
++#include "SkFontHost.h"
++#include "SkDescriptor.h"
++#include "SkMMapStream.h"
++#include "SkPaint.h"
++#include "SkString.h"
++#include "SkStream.h"
++#include "SkThread.h"
++#include "SkTSearch.h"
++#include <stdio.h>
++
++#define FONT_CACHE_MEMORY_BUDGET (768 * 1024)
++
++#ifndef SK_FONT_FILE_PREFIX
++ #define SK_FONT_FILE_PREFIX "/fonts/"
++#endif
++
++bool find_name_and_attributes(SkStream* stream, SkString* name, SkTypeface::Style* style,
++ bool* isFixedWidth);
++
++static void GetFullPathForSysFonts(SkString* full, const char name[]) {
++ full->set(getenv("ANDROID_ROOT"));
++ full->append(SK_FONT_FILE_PREFIX);
++ full->append(name);
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++struct FamilyRec;
++
++/* This guy holds a mapping of a name -> family, used for looking up fonts.
++ Since it is stored in a stretchy array that doesn't preserve object
++ semantics, we don't use constructor/destructors, but just have explicit
++ helpers to manage our internal bookkeeping.
++*/
++struct NameFamilyPair {
++ const char* fName; // we own this
++ FamilyRec* fFamily; // we don't own this, we just reference it
++
++ void construct(const char name[], FamilyRec* family) {
++ fName = strdup(name);
++ fFamily = family; // we don't own this, so just record the referene
++ }
++
++ void destruct() {
++ free((char*)fName);
++ // we don't own family, so just ignore our reference
++ }
++};
++
++// we use atomic_inc to grow this for each typeface we create
++static int32_t gUniqueFontID;
++
++// this is the mutex that protects these globals
++static SkMutex gFamilyMutex;
++static FamilyRec* gFamilyHead;
++static SkTDArray<NameFamilyPair> gNameList;
++
++struct FamilyRec {
++ FamilyRec* fNext;
++ SkTypeface* fFaces[4];
++
++ FamilyRec()
++ {
++ fNext = gFamilyHead;
++ memset(fFaces, 0, sizeof(fFaces));
++ gFamilyHead = this;
++ }
++};
++
++static SkTypeface* find_best_face(const FamilyRec* family,
++ SkTypeface::Style style) {
++ SkTypeface* const* faces = family->fFaces;
++
++ if (faces[style] != NULL) { // exact match
++ return faces[style];
++ }
++ // look for a matching bold
++ style = (SkTypeface::Style)(style ^ SkTypeface::kItalic);
++ if (faces[style] != NULL) {
++ return faces[style];
++ }
++ // look for the plain
++ if (faces[SkTypeface::kNormal] != NULL) {
++ return faces[SkTypeface::kNormal];
++ }
++ // look for anything
++ for (int i = 0; i < 4; i++) {
++ if (faces[i] != NULL) {
++ return faces[i];
++ }
++ }
++ // should never get here, since the faces list should not be empty
++ SkASSERT(!"faces list is empty");
++ return NULL;
++}
++
++static FamilyRec* find_family(const SkTypeface* member) {
++ FamilyRec* curr = gFamilyHead;
++ while (curr != NULL) {
++ for (int i = 0; i < 4; i++) {
++ if (curr->fFaces[i] == member) {
++ return curr;
++ }
++ }
++ curr = curr->fNext;
++ }
++ return NULL;
++}
++
++/* Returns the matching typeface, or NULL. If a typeface is found, its refcnt
++ is not modified.
++ */
++static SkTypeface* find_from_uniqueID(uint32_t uniqueID) {
++ FamilyRec* curr = gFamilyHead;
++ while (curr != NULL) {
++ for (int i = 0; i < 4; i++) {
++ SkTypeface* face = curr->fFaces[i];
++ if (face != NULL && face->uniqueID() == uniqueID) {
++ return face;
++ }
++ }
++ curr = curr->fNext;
++ }
++ return NULL;
++}
++
++/* Remove reference to this face from its family. If the resulting family
++ is empty (has no faces), return that family, otherwise return NULL
++*/
++static FamilyRec* remove_from_family(const SkTypeface* face) {
++ FamilyRec* family = find_family(face);
++ SkASSERT(family->fFaces[face->style()] == face);
++ family->fFaces[face->style()] = NULL;
++
++ for (int i = 0; i < 4; i++) {
++ if (family->fFaces[i] != NULL) { // family is non-empty
++ return NULL;
++ }
++ }
++ return family; // return the empty family
++}
++
++// maybe we should make FamilyRec be doubly-linked
++static void detach_and_delete_family(FamilyRec* family) {
++ FamilyRec* curr = gFamilyHead;
++ FamilyRec* prev = NULL;
++
++ while (curr != NULL) {
++ FamilyRec* next = curr->fNext;
++ if (curr == family) {
++ if (prev == NULL) {
++ gFamilyHead = next;
++ } else {
++ prev->fNext = next;
++ }
++ SkDELETE(family);
++ return;
++ }
++ prev = curr;
++ curr = next;
++ }
++ SkASSERT(!"Yikes, couldn't find family in our list to remove/delete");
++}
++
++static SkTypeface* find_typeface(const char name[], SkTypeface::Style style) {
++ NameFamilyPair* list = gNameList.begin();
++ int count = gNameList.count();
++
++ int index = SkStrLCSearch(&list[0].fName, count, name, sizeof(list[0]));
++
++ if (index >= 0) {
++ return find_best_face(list[index].fFamily, style);
++ }
++ return NULL;
++}
++
++static SkTypeface* find_typeface(const SkTypeface* familyMember,
++ SkTypeface::Style style) {
++ const FamilyRec* family = find_family(familyMember);
++ return family ? find_best_face(family, style) : NULL;
++}
++
++static void add_name(const char name[], FamilyRec* family) {
++ SkAutoAsciiToLC tolc(name);
++ name = tolc.lc();
++
++ NameFamilyPair* list = gNameList.begin();
++ int count = gNameList.count();
++
++ int index = SkStrLCSearch(&list[0].fName, count, name, sizeof(list[0]));
++
++ if (index < 0) {
++ list = gNameList.insert(~index);
++ list->construct(name, family);
++ }
++}
++
++static void remove_from_names(FamilyRec* emptyFamily)
++{
++#ifdef SK_DEBUG
++ for (int i = 0; i < 4; i++) {
++ SkASSERT(emptyFamily->fFaces[i] == NULL);
++ }
++#endif
++
++ SkTDArray<NameFamilyPair>& list = gNameList;
++
++ // must go backwards when removing
++ for (int i = list.count() - 1; i >= 0; --i) {
++ NameFamilyPair* pair = &list[i];
++ if (pair->fFamily == emptyFamily) {
++ pair->destruct();
++ list.remove(i);
++ }
++ }
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++class FamilyTypeface : public SkTypeface {
++public:
++ FamilyTypeface(Style style, bool sysFont, SkTypeface* familyMember,
++ bool isFixedWidth)
++ : SkTypeface(style, sk_atomic_inc(&gUniqueFontID) + 1, isFixedWidth) {
++ fIsSysFont = sysFont;
++
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ FamilyRec* rec = NULL;
++ if (familyMember) {
++ rec = find_family(familyMember);
++ SkASSERT(rec);
++ } else {
++ rec = SkNEW(FamilyRec);
++ }
++ rec->fFaces[style] = this;
++ }
++
++ virtual ~FamilyTypeface() {
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ // remove us from our family. If the family is now empty, we return
++ // that and then remove that family from the name list
++ FamilyRec* family = remove_from_family(this);
++ if (NULL != family) {
++ remove_from_names(family);
++ detach_and_delete_family(family);
++ }
++ }
++
++ bool isSysFont() const { return fIsSysFont; }
++
++ virtual SkStream* openStream() = 0;
++ virtual const char* getUniqueString() const = 0;
++ virtual const char* getFilePath() const = 0;
++
++private:
++ bool fIsSysFont;
++
++ typedef SkTypeface INHERITED;
++};
++
++///////////////////////////////////////////////////////////////////////////////
++
++class StreamTypeface : public FamilyTypeface {
++public:
++ StreamTypeface(Style style, bool sysFont, SkTypeface* familyMember,
++ SkStream* stream, bool isFixedWidth)
++ : INHERITED(style, sysFont, familyMember, isFixedWidth) {
++ SkASSERT(stream);
++ stream->ref();
++ fStream = stream;
++ }
++ virtual ~StreamTypeface() {
++ fStream->unref();
++ }
++
++ // overrides
++ virtual SkStream* openStream() {
++ // we just ref our existing stream, since the caller will call unref()
++ // when they are through
++ fStream->ref();
++ // must rewind each time, since the caller assumes a "new" stream
++ fStream->rewind();
++ return fStream;
++ }
++ virtual const char* getUniqueString() const { return NULL; }
++ virtual const char* getFilePath() const { return NULL; }
++
++private:
++ SkStream* fStream;
++
++ typedef FamilyTypeface INHERITED;
++};
++
++class FileTypeface : public FamilyTypeface {
++public:
++ FileTypeface(Style style, bool sysFont, SkTypeface* familyMember,
++ const char path[], bool isFixedWidth)
++ : INHERITED(style, sysFont, familyMember, isFixedWidth) {
++ SkString fullpath;
++
++ if (sysFont) {
++ GetFullPathForSysFonts(&fullpath, path);
++ path = fullpath.c_str();
++ }
++ fPath.set(path);
++ }
++
++ // overrides
++ virtual SkStream* openStream() {
++ SkStream* stream = SkNEW_ARGS(SkMMAPStream, (fPath.c_str()));
++
++ // check for failure
++ if (stream->getLength() <= 0) {
++ SkDELETE(stream);
++ // maybe MMAP isn't supported. try FILE
++ stream = SkNEW_ARGS(SkFILEStream, (fPath.c_str()));
++ if (stream->getLength() <= 0) {
++ SkDELETE(stream);
++ stream = NULL;
++ }
++ }
++ return stream;
++ }
++ virtual const char* getUniqueString() const {
++ const char* str = strrchr(fPath.c_str(), '/');
++ if (str) {
++ str += 1; // skip the '/'
++ }
++ return str;
++ }
++ virtual const char* getFilePath() const {
++ return fPath.c_str();
++ }
++
++private:
++ SkString fPath;
++
++ typedef FamilyTypeface INHERITED;
++};
++
++///////////////////////////////////////////////////////////////////////////////
++///////////////////////////////////////////////////////////////////////////////
++
++static bool get_name_and_style(const char path[], SkString* name,
++ SkTypeface::Style* style,
++ bool* isFixedWidth, bool isExpected) {
++ SkString fullpath;
++ GetFullPathForSysFonts(&fullpath, path);
++
++ SkMMAPStream stream(fullpath.c_str());
++ if (stream.getLength() > 0) {
++ find_name_and_attributes(&stream, name, style, isFixedWidth);
++ return true;
++ }
++ else {
++ SkFILEStream stream(fullpath.c_str());
++ if (stream.getLength() > 0) {
++ find_name_and_attributes(&stream, name, style, isFixedWidth);
++ return true;
++ }
++ }
++
++ if (isExpected) {
++ SkDebugf("---- failed to open <%s> as a font\n", fullpath.c_str());
++ }
++ return false;
++}
++
++// used to record our notion of the pre-existing fonts
++struct FontInitRec {
++ const char* fFileName;
++ const char* const* fNames; // null-terminated list
++};
++
++static const char* gSansNames[] = {
++ "sans-serif", "arial", "helvetica", "tahoma", "verdana", NULL
++};
++
++static const char* gSerifNames[] = {
++ "serif", "times", "times new roman", "palatino", "georgia", "baskerville",
++ "goudy", "fantasy", "cursive", "ITC Stone Serif", NULL
++};
++
++static const char* gMonoNames[] = {
++ "monospace", "courier", "courier new", "monaco", NULL
++};
++
++// deliberately empty, but we use the address to identify fallback fonts
++static const char* gFBNames[] = { NULL };
++
++/* Fonts must be grouped by family, with the first font in a family having the
++ list of names (even if that list is empty), and the following members having
++ null for the list. The names list must be NULL-terminated
++*/
++static const FontInitRec gSystemFonts[] = {
++ { "DroidSans.ttf", gSansNames },
++ { "DroidSans-Bold.ttf", NULL },
++ { "DroidSerif-Regular.ttf", gSerifNames },
++ { "DroidSerif-Bold.ttf", NULL },
++ { "DroidSerif-Italic.ttf", NULL },
++ { "DroidSerif-BoldItalic.ttf", NULL },
++ { "DroidSansMono.ttf", gMonoNames },
++ /* These are optional, and can be ignored if not found in the file system.
++ These are appended to gFallbackFonts[] as they are seen, so we list
++ them in the order we want them to be accessed by NextLogicalFont().
++ */
++ { "DroidSansArabic.ttf", gFBNames },
++ { "DroidSansHebrew.ttf", gFBNames },
++ { "DroidSansThai.ttf", gFBNames },
++ { "MTLmr3m.ttf", gFBNames }, // Motoya Japanese Font
++ { "MTLc3m.ttf", gFBNames }, // Motoya Japanese Font
++ { "DroidSansJapanese.ttf", gFBNames },
++ { "DroidSansFallback.ttf", gFBNames }
++};
++
++#define DEFAULT_NAMES gSansNames
++
++// these globals are assigned (once) by load_system_fonts()
++static FamilyRec* gDefaultFamily;
++static SkTypeface* gDefaultNormal;
++
++/* This is sized conservatively, assuming that it will never be a size issue.
++ It will be initialized in load_system_fonts(), and will be filled with the
++ fontIDs that can be used for fallback consideration, in sorted order (sorted
++ meaning element[0] should be used first, then element[1], etc. When we hit
++ a fontID==0 in the array, the list is done, hence our allocation size is
++ +1 the total number of possible system fonts. Also see NextLogicalFont().
++ */
++static uint32_t gFallbackFonts[SK_ARRAY_COUNT(gSystemFonts)+1];
++
++/* Called once (ensured by the sentinel check at the beginning of our body).
++ Initializes all the globals, and register the system fonts.
++ */
++static void load_system_fonts() {
++ // check if we've already be called
++ if (NULL != gDefaultNormal) {
++ return;
++ }
++
++ const FontInitRec* rec = gSystemFonts;
++ SkTypeface* firstInFamily = NULL;
++ int fallbackCount = 0;
++
++ for (size_t i = 0; i < SK_ARRAY_COUNT(gSystemFonts); i++) {
++ // if we're the first in a new family, clear firstInFamily
++ if (rec[i].fNames != NULL) {
++ firstInFamily = NULL;
++ }
++
++ bool isFixedWidth;
++ SkString name;
++ SkTypeface::Style style;
++
++ // we expect all the fonts, except the "fallback" fonts
++ bool isExpected = (rec[i].fNames != gFBNames);
++ if (!get_name_and_style(rec[i].fFileName, &name, &style,
++ &isFixedWidth, isExpected)) {
++ continue;
++ }
++
++ SkTypeface* tf = SkNEW_ARGS(FileTypeface,
++ (style,
++ true, // system-font (cannot delete)
++ firstInFamily, // what family to join
++ rec[i].fFileName,
++ isFixedWidth) // filename
++ );
++
++ if (rec[i].fNames != NULL) {
++ // see if this is one of our fallback fonts
++ if (rec[i].fNames == gFBNames) {
++ // SkDebugf("---- adding %s as fallback[%d] fontID %d\n",
++ // rec[i].fFileName, fallbackCount, tf->uniqueID());
++ gFallbackFonts[fallbackCount++] = tf->uniqueID();
++ }
++
++ firstInFamily = tf;
++ FamilyRec* family = find_family(tf);
++ const char* const* names = rec[i].fNames;
++
++ // record the default family if this is it
++ if (names == DEFAULT_NAMES) {
++ gDefaultFamily = family;
++ }
++ // add the names to map to this family
++ while (*names) {
++ add_name(*names, family);
++ names += 1;
++ }
++ }
++ }
++
++ // do this after all fonts are loaded. This is our default font, and it
++ // acts as a sentinel so we only execute load_system_fonts() once
++ gDefaultNormal = find_best_face(gDefaultFamily, SkTypeface::kNormal);
++ // now terminate our fallback list with the sentinel value
++ gFallbackFonts[fallbackCount] = 0;
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++void SkFontHost::Serialize(const SkTypeface* face, SkWStream* stream) {
++ const char* name = ((FamilyTypeface*)face)->getUniqueString();
++
++ stream->write8((uint8_t)face->style());
++
++ if (NULL == name || 0 == *name) {
++ stream->writePackedUInt(0);
++// SkDebugf("--- fonthost serialize null\n");
++ } else {
++ uint32_t len = strlen(name);
++ stream->writePackedUInt(len);
++ stream->write(name, len);
++// SkDebugf("--- fonthost serialize <%s> %d\n", name, face->style());
++ }
++}
++
++SkTypeface* SkFontHost::Deserialize(SkStream* stream) {
++ load_system_fonts();
++
++ int style = stream->readU8();
++
++ int len = stream->readPackedUInt();
++ if (len > 0) {
++ SkString str;
++ str.resize(len);
++ stream->read(str.writable_str(), len);
++
++ const FontInitRec* rec = gSystemFonts;
++ for (size_t i = 0; i < SK_ARRAY_COUNT(gSystemFonts); i++) {
++ if (strcmp(rec[i].fFileName, str.c_str()) == 0) {
++ // backup until we hit the fNames
++ for (int j = i; j >= 0; --j) {
++ if (rec[j].fNames != NULL) {
++ return SkFontHost::CreateTypeface(NULL,
++ rec[j].fNames[0], (SkTypeface::Style)style);
++ }
++ }
++ }
++ }
++ }
++ return NULL;
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++SkTypeface* SkFontHost::CreateTypeface(const SkTypeface* familyFace,
++ const char familyName[],
++ SkTypeface::Style style) {
++ load_system_fonts();
++
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ // clip to legal style bits
++ style = (SkTypeface::Style)(style & SkTypeface::kBoldItalic);
++
++ SkTypeface* tf = NULL;
++
++ if (NULL != familyFace) {
++ tf = find_typeface(familyFace, style);
++ } else if (NULL != familyName) {
++// SkDebugf("======= familyName <%s>\n", familyName);
++ tf = find_typeface(familyName, style);
++ }
++
++ if (NULL == tf) {
++ tf = find_best_face(gDefaultFamily, style);
++ }
++
++ // we ref(), since the symantic is to return a new instance
++ tf->ref();
++ return tf;
++}
++
++SkStream* SkFontHost::OpenStream(uint32_t fontID) {
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ FamilyTypeface* tf = (FamilyTypeface*)find_from_uniqueID(fontID);
++ SkStream* stream = tf ? tf->openStream() : NULL;
++
++ if (stream && stream->getLength() == 0) {
++ stream->unref();
++ stream = NULL;
++ }
++ return stream;
++}
++
++size_t SkFontHost::GetFileName(SkFontID fontID, char path[], size_t length,
++ int32_t* index) {
++ SkAutoMutexAcquire ac(gFamilyMutex);
++
++ FamilyTypeface* tf = (FamilyTypeface*)find_from_uniqueID(fontID);
++ const char* src = tf ? tf->getFilePath() : NULL;
++
++ if (src) {
++ size_t size = strlen(src);
++ if (path) {
++ memcpy(path, src, SkMin32(size, length));
++ }
++ if (index) {
++ *index = 0; // we don't have collections (yet)
++ }
++ return size;
++ } else {
++ return 0;
++ }
++}
++
++SkFontID SkFontHost::NextLogicalFont(SkFontID currFontID, SkFontID origFontID) {
++ load_system_fonts();
++
++ /* First see if fontID is already one of our fallbacks. If so, return
++ its successor. If fontID is not in our list, then return the first one
++ in our list. Note: list is zero-terminated, and returning zero means
++ we have no more fonts to use for fallbacks.
++ */
++ const uint32_t* list = gFallbackFonts;
++ for (int i = 0; list[i] != 0; i++) {
++ if (list[i] == currFontID) {
++ return list[i+1];
++ }
++ }
++ return list[0];
++}
++
++///////////////////////////////////////////////////////////////////////////////
++
++SkTypeface* SkFontHost::CreateTypefaceFromStream(SkStream* stream) {
++ if (NULL == stream || stream->getLength() <= 0) {
++ return NULL;
++ }
++
++ bool isFixedWidth;
++ SkString name;
++ SkTypeface::Style style;
++ find_name_and_attributes(stream, &name, &style, &isFixedWidth);
++
++ if (!name.isEmpty()) {
++ return SkNEW_ARGS(StreamTypeface, (style, false, NULL, stream, isFixedWidth));
++ } else {
++ return NULL;
++ }
++}
++
++SkTypeface* SkFontHost::CreateTypefaceFromFile(const char path[]) {
++ SkStream* stream = SkNEW_ARGS(SkMMAPStream, (path));
++ SkTypeface* face = SkFontHost::CreateTypefaceFromStream(stream);
++ // since we created the stream, we let go of our ref() here
++ stream->unref();
++ return face;
++}
++
++///////////////////////////////////////////////////////////////////////////////
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0007-Bug-848491-Re-apply-bug-687188-Expand-the-gradient-c.patch b/gfx/skia/patches/archive/0007-Bug-848491-Re-apply-bug-687188-Expand-the-gradient-c.patch
new file mode 100644
index 0000000000..73bca9a48d
--- /dev/null
+++ b/gfx/skia/patches/archive/0007-Bug-848491-Re-apply-bug-687188-Expand-the-gradient-c.patch
@@ -0,0 +1,168 @@
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 25 Apr 2013 20:47:06 -0400
+Subject: Bug 848491 - Re-apply bug 687188 - Expand the gradient cache by 2 to store 0/1 colour stop values for clamping.
+
+
+diff --git a/gfx/skia/src/effects/gradients/SkGradientShader.cpp b/gfx/skia/src/effects/gradients/SkGradientShader.cpp
+index 684355d..27a9c46 100644
+--- a/gfx/skia/src/effects/gradients/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/gradients/SkGradientShader.cpp
+@@ -453,15 +453,15 @@ const uint16_t* SkGradientShaderBase::getCache16() const {
+
+ const SkPMColor* SkGradientShaderBase::getCache32() const {
+ if (fCache32 == NULL) {
+- // double the count for dither entries
+- const int entryCount = kCache32Count * 4;
++ // double the count for dither entries, and have an extra two entries for clamp values
++ const int entryCount = kCache32Count * 4 + 2;
+ const size_t allocSize = sizeof(SkPMColor) * entryCount;
+
+ if (NULL == fCache32PixelRef) {
+ fCache32PixelRef = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ }
+- fCache32 = (SkPMColor*)fCache32PixelRef->getAddr();
++ fCache32 = (SkPMColor*)fCache32PixelRef->getAddr() + 1;
+ if (fColorCount == 2) {
+ Build32bitCache(fCache32, fOrigColors[0], fOrigColors[1],
+ kCache32Count, fCacheAlpha);
+@@ -484,7 +484,7 @@ const SkPMColor* SkGradientShaderBase::getCache32() const {
+ SkMallocPixelRef* newPR = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ SkPMColor* linear = fCache32; // just computed linear data
+- SkPMColor* mapped = (SkPMColor*)newPR->getAddr(); // storage for mapped data
++ SkPMColor* mapped = (SkPMColor*)newPR->getAddr() + 1; // storage for mapped data
+ SkUnitMapper* map = fMapper;
+ for (int i = 0; i < kCache32Count; i++) {
+ int index = map->mapUnit16((i << 8) | i) >> 8;
+@@ -495,9 +495,21 @@ const SkPMColor* SkGradientShaderBase::getCache32() const {
+ }
+ fCache32PixelRef->unref();
+ fCache32PixelRef = newPR;
+- fCache32 = (SkPMColor*)newPR->getAddr();
++ fCache32 = (SkPMColor*)newPR->getAddr() + 1;
+ }
+ }
++
++ // Write the clamp colours into the first and last entries of fCache32
++ fCache32[kCache32ClampLower] = SkPackARGB32(fCacheAlpha,
++ SkColorGetR(fOrigColors[0]),
++ SkColorGetG(fOrigColors[0]),
++ SkColorGetB(fOrigColors[0]));
++
++ fCache32[kCache32ClampUpper] = SkPackARGB32(fCacheAlpha,
++ SkColorGetR(fOrigColors[fColorCount - 1]),
++ SkColorGetG(fOrigColors[fColorCount - 1]),
++ SkColorGetB(fOrigColors[fColorCount - 1]));
++
+ return fCache32;
+ }
+
+diff --git a/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h b/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
+index 729ce4e..2cb6a9d 100644
+--- a/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
++++ b/gfx/skia/src/effects/gradients/SkGradientShaderPriv.h
+@@ -86,6 +86,9 @@ public:
+ /// if dithering is disabled.
+ kDitherStride32 = kCache32Count,
+ kDitherStride16 = kCache16Count,
++
++ kCache32ClampLower = -1,
++ kCache32ClampUpper = kCache32Count * 4
+ };
+
+
+diff --git a/gfx/skia/src/effects/gradients/SkLinearGradient.cpp b/gfx/skia/src/effects/gradients/SkLinearGradient.cpp
+index e0f216c..40ab918 100644
+--- a/gfx/skia/src/effects/gradients/SkLinearGradient.cpp
++++ b/gfx/skia/src/effects/gradients/SkLinearGradient.cpp
+@@ -127,6 +127,17 @@ void shadeSpan_linear_vertical_lerp(TileProc proc, SkFixed dx, SkFixed fx,
+ SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache,
+ int toggle, int count) {
++ if (proc == clamp_tileproc) {
++ // No need to lerp or dither for clamp values
++ if (fx < 0) {
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampLower], count);
++ return;
++ } else if (fx > 0xffff) {
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampUpper], count);
++ return;
++ }
++ }
++
+ // We're a vertical gradient, so no change in a span.
+ // If colors change sharply across the gradient, dithering is
+ // insufficient (it subsamples the color space) and we need to lerp.
+@@ -154,10 +165,7 @@ void shadeSpan_linear_clamp(TileProc proc, SkFixed dx, SkFixed fx,
+ range.init(fx, dx, count, 0, SkGradientShaderBase::kCache32Count - 1);
+
+ if ((count = range.fCount0) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV0],
+- cache[next_dither_toggle(toggle) + range.fV0],
+- count);
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampLower], count);
+ dstC += count;
+ }
+ if ((count = range.fCount1) > 0) {
+@@ -176,10 +184,7 @@ void shadeSpan_linear_clamp(TileProc proc, SkFixed dx, SkFixed fx,
+ }
+ }
+ if ((count = range.fCount2) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV1],
+- cache[next_dither_toggle(toggle) + range.fV1],
+- count);
++ sk_memset32(dstC, cache[SkGradientShaderBase::kCache32ClampUpper], count);
+ }
+ }
+
+diff --git a/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp b/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp
+index abd974b..601fff4 100644
+--- a/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp
++++ b/gfx/skia/src/effects/gradients/SkTwoPointConicalGradient.cpp
+@@ -124,10 +124,14 @@ static void twopoint_clamp(TwoPtRadial* rec, SkPMColor* SK_RESTRICT dstC,
+ if (TwoPtRadial::DontDrawT(t)) {
+ *dstC++ = 0;
+ } else {
+- SkFixed index = SkClampMax(t, 0xFFFF);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[toggle +
+- (index >> SkGradientShaderBase::kCache32Shift)];
++ if (t < 0) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampLower];
++ } else if (t > 0xFFFF) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampUpper];
++ } else {
++ SkASSERT(t <= 0xFFFF);
++ *dstC++ = cache[t >> SkGradientShaderBase::kCache32Shift];
++ }
+ }
+ toggle = next_dither_toggle(toggle);
+ }
+diff --git a/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp b/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp
+index f70b67d..ec2ae75 100644
+--- a/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp
++++ b/gfx/skia/src/effects/gradients/SkTwoPointRadialGradient.cpp
+@@ -120,9 +120,14 @@ void shadeSpan_twopoint_clamp(SkScalar fx, SkScalar dx,
+ for (; count > 0; --count) {
+ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
+ fOneOverTwoA, posRoot);
+- SkFixed index = SkClampMax(t, 0xFFFF);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> SkGradientShaderBase::kCache32Shift];
++ if (t < 0) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampLower];
++ } else if (t > 0xFFFF) {
++ *dstC++ = cache[SkGradientShaderBase::kCache32ClampUpper];
++ } else {
++ SkASSERT(t <= 0xFFFF);
++ *dstC++ = cache[t >> SkGradientShaderBase::kCache32Shift];
++ }
+ fx += dx;
+ fy += dy;
+ b += db;
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0008-Bug-687188-Skia-radial-gradients.patch b/gfx/skia/patches/archive/0008-Bug-687188-Skia-radial-gradients.patch
new file mode 100644
index 0000000000..0f60dbd8ea
--- /dev/null
+++ b/gfx/skia/patches/archive/0008-Bug-687188-Skia-radial-gradients.patch
@@ -0,0 +1,173 @@
+From f941ea32e44a2436d235e83ef1a434289a9d9c1e Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Wed, 23 May 2012 11:40:25 -0400
+Subject: [PATCH 08/10] Bug 755869 - [11] Re-apply bug 687188 - Skia
+ radial gradients should use the 0/1 color stop values
+ for clamping. r=mattwoodrow
+
+---
+ gfx/skia/src/effects/SkGradientShader.cpp | 76 +++++++++++++++++++++++------
+ 1 files changed, 61 insertions(+), 15 deletions(-)
+
+diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp
+index 59ba48c..ea05a39 100644
+--- a/gfx/skia/src/effects/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/SkGradientShader.cpp
+@@ -204,6 +204,7 @@ private:
+ mutable SkMallocPixelRef* fCache32PixelRef;
+ mutable unsigned fCacheAlpha; // the alpha value we used when we computed the cache. larger than 8bits so we can store uninitialized value
+
++ static SkPMColor PremultiplyColor(SkColor c0, U8CPU alpha);
+ static void Build16bitCache(uint16_t[], SkColor c0, SkColor c1, int count);
+ static void Build32bitCache(SkPMColor[], SkColor c0, SkColor c1, int count,
+ U8CPU alpha);
+@@ -507,6 +508,21 @@ static inline U8CPU dither_ceil_fixed_to_8(SkFixed n) {
+ return ((n << 1) - (n | (n >> 8))) >> 8;
+ }
+
++SkPMColor Gradient_Shader::PremultiplyColor(SkColor c0, U8CPU paintAlpha)
++{
++ SkFixed a = SkMulDiv255Round(SkColorGetA(c0), paintAlpha);
++ SkFixed r = SkColorGetR(c0);
++ SkFixed g = SkColorGetG(c0);
++ SkFixed b = SkColorGetB(c0);
++
++ a = SkIntToFixed(a) + 0x8000;
++ r = SkIntToFixed(r) + 0x8000;
++ g = SkIntToFixed(g) + 0x8000;
++ b = SkIntToFixed(b) + 0x8000;
++
++ return SkPremultiplyARGBInline(a >> 16, r >> 16, g >> 16, b >> 16);
++}
++
+ void Gradient_Shader::Build32bitCache(SkPMColor cache[], SkColor c0, SkColor c1,
+ int count, U8CPU paintAlpha) {
+ SkASSERT(count > 1);
+@@ -628,14 +644,14 @@ static void complete_32bit_cache(SkPMColor* cache, int stride) {
+ const SkPMColor* Gradient_Shader::getCache32() const {
+ if (fCache32 == NULL) {
+ // double the count for dither entries
+- const int entryCount = kCache32Count * 2;
++ const int entryCount = kCache32Count * 2 + 2;
+ const size_t allocSize = sizeof(SkPMColor) * entryCount;
+
+ if (NULL == fCache32PixelRef) {
+ fCache32PixelRef = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ }
+- fCache32 = (SkPMColor*)fCache32PixelRef->getAddr();
++ fCache32 = (SkPMColor*)fCache32PixelRef->getAddr() + 1;
+ if (fColorCount == 2) {
+ Build32bitCache(fCache32, fOrigColors[0], fOrigColors[1],
+ kGradient32Length, fCacheAlpha);
+@@ -659,7 +675,7 @@ const SkPMColor* Gradient_Shader::getCache32() const {
+ SkMallocPixelRef* newPR = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ SkPMColor* linear = fCache32; // just computed linear data
+- SkPMColor* mapped = (SkPMColor*)newPR->getAddr(); // storage for mapped data
++ SkPMColor* mapped = (SkPMColor*)newPR->getAddr() + 1; // storage for mapped data
+ SkUnitMapper* map = fMapper;
+ for (int i = 0; i < kGradient32Length; i++) {
+ int index = map->mapUnit16((i << 8) | i) >> 8;
+@@ -668,10 +684,13 @@ const SkPMColor* Gradient_Shader::getCache32() const {
+ }
+ fCache32PixelRef->unref();
+ fCache32PixelRef = newPR;
+- fCache32 = (SkPMColor*)newPR->getAddr();
++ fCache32 = (SkPMColor*)newPR->getAddr() + 1;
+ }
+ complete_32bit_cache(fCache32, kCache32Count);
+ }
++ //Write the clamp colours into the first and last entries of fCache32
++ fCache32[-1] = PremultiplyColor(fOrigColors[0], fCacheAlpha);
++ fCache32[kCache32Count * 2] = PremultiplyColor(fOrigColors[fColorCount - 1], fCacheAlpha);
+ return fCache32;
+ }
+
+@@ -857,6 +876,18 @@ void shadeSpan_linear_vertical(TileProc proc, SkFixed dx, SkFixed fx,
+ SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache,
+ int toggle, int count) {
++ if (proc == clamp_tileproc) {
++ // Read out clamp values from beginning/end of the cache. No need to lerp
++ // or dither
++ if (fx < 0) {
++ sk_memset32(dstC, cache[-1], count);
++ return;
++ } else if (fx > 0xFFFF) {
++ sk_memset32(dstC, cache[Gradient_Shader::kCache32Count * 2], count);
++ return;
++ }
++ }
++
+ // We're a vertical gradient, so no change in a span.
+ // If colors change sharply across the gradient, dithering is
+ // insufficient (it subsamples the color space) and we need to lerp.
+@@ -875,6 +906,18 @@ void shadeSpan_linear_vertical_lerp(TileProc proc, SkFixed dx, SkFixed fx,
+ SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache,
+ int toggle, int count) {
++ if (proc == clamp_tileproc) {
++ // Read out clamp values from beginning/end of the cache. No need to lerp
++ // or dither
++ if (fx < 0) {
++ sk_memset32(dstC, cache[-1], count);
++ return;
++ } else if (fx > 0xFFFF) {
++ sk_memset32(dstC, cache[Gradient_Shader::kCache32Count * 2], count);
++ return;
++ }
++ }
++
+ // We're a vertical gradient, so no change in a span.
+ // If colors change sharply across the gradient, dithering is
+ // insufficient (it subsamples the color space) and we need to lerp.
+@@ -900,10 +943,8 @@ void shadeSpan_linear_clamp(TileProc proc, SkFixed dx, SkFixed fx,
+ range.init(fx, dx, count, 0, Gradient_Shader::kGradient32Length);
+
+ if ((count = range.fCount0) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV0],
+- cache[(toggle ^ Gradient_Shader::kDitherStride32) + range.fV0],
+- count);
++ // Shouldn't be any need to dither for clamping?
++ sk_memset32(dstC, cache[-1], count);
+ dstC += count;
+ }
+ if ((count = range.fCount1) > 0) {
+@@ -922,10 +963,8 @@ void shadeSpan_linear_clamp(TileProc proc, SkFixed dx, SkFixed fx,
+ }
+ }
+ if ((count = range.fCount2) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV1],
+- cache[(toggle ^ Gradient_Shader::kDitherStride32) + range.fV1],
+- count);
++ // Shouldn't be any need to dither for clamping?
++ sk_memset32(dstC, cache[Gradient_Shader::kCache32Count * 2], count);
+ }
+ }
+
+@@ -1796,9 +1835,16 @@ void shadeSpan_twopoint_clamp(SkScalar fx, SkScalar dx,
+ for (; count > 0; --count) {
+ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
+ fOneOverTwoA, posRoot);
+- SkFixed index = SkClampMax(t, 0xFFFF);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> Gradient_Shader::kCache32Shift];
++
++ if (t < 0) {
++ *dstC++ = cache[-1];
++ } else if (t > 0xFFFF) {
++ *dstC++ = cache[Gradient_Shader::kCache32Count * 2];
++ } else {
++ SkASSERT(t <= 0xFFFF);
++ *dstC++ = cache[t >> Gradient_Shader::kCache32Shift];
++ }
++
+ fx += dx;
+ fy += dy;
+ b += db;
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0008-Bug-848491-Re-apply-759683-Handle-compilers-that-don.patch b/gfx/skia/patches/archive/0008-Bug-848491-Re-apply-759683-Handle-compilers-that-don.patch
new file mode 100644
index 0000000000..58961d6e06
--- /dev/null
+++ b/gfx/skia/patches/archive/0008-Bug-848491-Re-apply-759683-Handle-compilers-that-don.patch
@@ -0,0 +1,35 @@
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 25 Apr 2013 20:49:45 -0400
+Subject: Bug 848491 - Re-apply 759683 - Handle compilers that don't support SSSE3 intrinsics
+
+
+diff --git a/gfx/skia/src/opts/opts_check_SSE2.cpp b/gfx/skia/src/opts/opts_check_SSE2.cpp
+index 6370058..18f68d6 100644
+--- a/gfx/skia/src/opts/opts_check_SSE2.cpp
++++ b/gfx/skia/src/opts/opts_check_SSE2.cpp
+@@ -86,9 +86,13 @@ static inline bool hasSSSE3() {
+ #else
+
+ static inline bool hasSSSE3() {
++#if defined(SK_BUILD_SSSE3)
+ int cpu_info[4] = { 0 };
+ getcpuid(1, cpu_info);
+ return (cpu_info[2] & 0x200) != 0;
++#else
++ return false;
++#endif
+ }
+ #endif
+
+@@ -104,7 +108,7 @@ static bool cachedHasSSSE3() {
+
+ void SkBitmapProcState::platformProcs() {
+ if (cachedHasSSSE3()) {
+-#if !defined(SK_BUILD_FOR_ANDROID)
++#if !defined(SK_BUILD_FOR_ANDROID) && defined(SK_BUILD_SSSE3)
+ // Disable SSSE3 optimization for Android x86
+ if (fSampleProc32 == S32_opaque_D32_filter_DX) {
+ fSampleProc32 = S32_opaque_D32_filter_DX_SSSE3;
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0009-Bug-755869-FreeBSD-Hurd.patch b/gfx/skia/patches/archive/0009-Bug-755869-FreeBSD-Hurd.patch
new file mode 100644
index 0000000000..1e9a93f20a
--- /dev/null
+++ b/gfx/skia/patches/archive/0009-Bug-755869-FreeBSD-Hurd.patch
@@ -0,0 +1,28 @@
+From df3be24040f7cb2f9c7ed86ad3e47206630e885f Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Wed, 23 May 2012 14:49:57 -0400
+Subject: [PATCH 09/10] Bug 755869 - [12] Re-apply bug 749533 - Add
+ support for GNU/kFreeBSD and Hurd in Skia.
+ r=mattwoodrow
+
+---
+ gfx/skia/include/core/SkPreConfig.h | 3 ++-
+ 1 files changed, 2 insertions(+), 1 deletions(-)
+
+diff --git a/gfx/skia/include/core/SkPreConfig.h b/gfx/skia/include/core/SkPreConfig.h
+index 46c6929..16c4d6c 100644
+--- a/gfx/skia/include/core/SkPreConfig.h
++++ b/gfx/skia/include/core/SkPreConfig.h
+@@ -35,7 +35,8 @@
+ #elif defined(ANDROID)
+ #define SK_BUILD_FOR_ANDROID
+ #elif defined(linux) || defined(__FreeBSD__) || defined(__OpenBSD__) || \
+- defined(__sun) || defined(__NetBSD__) || defined(__DragonFly__)
++ defined(__sun) || defined(__NetBSD__) || defined(__DragonFly__) || \
++ defined(__GLIBC__) || defined(__GNU__)
+ #define SK_BUILD_FOR_UNIX
+ #elif TARGET_OS_IPHONE || TARGET_IPHONE_SIMULATOR
+ #define SK_BUILD_FOR_IOS
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0009-Bug-777614-Re-apply-759683-Handle-compilers-that-don.patch b/gfx/skia/patches/archive/0009-Bug-777614-Re-apply-759683-Handle-compilers-that-don.patch
new file mode 100644
index 0000000000..1da208ed18
--- /dev/null
+++ b/gfx/skia/patches/archive/0009-Bug-777614-Re-apply-759683-Handle-compilers-that-don.patch
@@ -0,0 +1,40 @@
+From 2c5a8cebc806ed287ce7c3723ea64a233266cd9e Mon Sep 17 00:00:00 2001
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 13 Sep 2012 14:55:33 -0400
+Subject: [PATCH 9/9] Bug 777614 - Re-apply 759683 - Handle compilers that
+ don't support SSSE3 intrinsics r=nrc
+
+---
+ gfx/skia/src/opts/opts_check_SSE2.cpp | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/gfx/skia/src/opts/opts_check_SSE2.cpp b/gfx/skia/src/opts/opts_check_SSE2.cpp
+index 96d0dea..add6d5f 100644
+--- a/gfx/skia/src/opts/opts_check_SSE2.cpp
++++ b/gfx/skia/src/opts/opts_check_SSE2.cpp
+@@ -86,9 +86,13 @@ static inline bool hasSSSE3() {
+ #else
+
+ static inline bool hasSSSE3() {
++#if defined(SK_BUILD_SSSE3)
+ int cpu_info[4] = { 0 };
+ getcpuid(1, cpu_info);
+ return (cpu_info[2] & 0x200) != 0;
++#else
++ return false;
++#endif
+ }
+ #endif
+
+@@ -104,7 +108,7 @@ static bool cachedHasSSSE3() {
+
+ void SkBitmapProcState::platformProcs() {
+ if (cachedHasSSSE3()) {
+-#if !defined(SK_BUILD_FOR_ANDROID)
++#if !defined(SK_BUILD_FOR_ANDROID) && defined(SK_BUILD_SSSE3)
+ // Disable SSSE3 optimization for Android x86
+ if (fSampleProc32 == S32_opaque_D32_filter_DX) {
+ fSampleProc32 = S32_opaque_D32_filter_DX_SSSE3;
+--
+1.7.11.4
+
diff --git a/gfx/skia/patches/archive/0009-Bug-848491-Re-apply-bug-751418-Add-our-own-GrUserCon.patch b/gfx/skia/patches/archive/0009-Bug-848491-Re-apply-bug-751418-Add-our-own-GrUserCon.patch
new file mode 100644
index 0000000000..9778015c4f
--- /dev/null
+++ b/gfx/skia/patches/archive/0009-Bug-848491-Re-apply-bug-751418-Add-our-own-GrUserCon.patch
@@ -0,0 +1,23 @@
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 25 Apr 2013 20:52:32 -0400
+Subject: Bug 848491 - Re-apply bug 751418 - Add our own GrUserConfig
+
+
+diff --git a/gfx/skia/include/gpu/GrUserConfig.h b/gfx/skia/include/gpu/GrUserConfig.h
+index 11d4feb..77ab850 100644
+--- a/gfx/skia/include/gpu/GrUserConfig.h
++++ b/gfx/skia/include/gpu/GrUserConfig.h
+@@ -43,6 +43,10 @@
+ */
+ //#define GR_DEFAULT_TEXTURE_CACHE_MB_LIMIT 96
+
++/*
++ * This allows us to set a callback to be called before each GL call to ensure
++ * that our context is set correctly
++ */
+ #define GR_GL_PER_GL_FUNC_CALLBACK 1
+
+ #endif
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0010-Bug-689069-ARM-Opts.patch b/gfx/skia/patches/archive/0010-Bug-689069-ARM-Opts.patch
new file mode 100644
index 0000000000..bd6604b4bd
--- /dev/null
+++ b/gfx/skia/patches/archive/0010-Bug-689069-ARM-Opts.patch
@@ -0,0 +1,36 @@
+From dc1292fc8c2b9da900ebcac953120eaffd0d329e Mon Sep 17 00:00:00 2001
+From: George Wright <gwright@mozilla.com>
+Date: Wed, 23 May 2012 14:52:36 -0400
+Subject: [PATCH 10/10] Bug 755869 - [13] Re-apply bug 750733 - Use
+ handles in API object hooks where possible
+ r=mattwoodrow
+
+---
+ gfx/skia/src/xml/SkJS.cpp | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/gfx/skia/src/xml/SkJS.cpp b/gfx/skia/src/xml/SkJS.cpp
+index f2e7a83..b2717d7 100644
+--- a/gfx/skia/src/xml/SkJS.cpp
++++ b/gfx/skia/src/xml/SkJS.cpp
+@@ -74,7 +74,7 @@ extern "C" {
+ #endif
+
+ static bool
+-global_enumerate(JSContext *cx, JSObject *obj)
++global_enumerate(JSContext *cx, JSHandleObject *obj)
+ {
+ #ifdef LAZY_STANDARD_CLASSES
+ return JS_EnumerateStandardClasses(cx, obj);
+@@ -84,7 +84,7 @@ global_enumerate(JSContext *cx, JSObject *obj)
+ }
+
+ static bool
+-global_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags, JSObject **objp)
++global_resolve(JSContext *cx, JSHandleObject obj, JSHandleId id, unsigned flags, JSObject **objp)
+ {
+ #ifdef LAZY_STANDARD_CLASSES
+ if ((flags & JSRESOLVE_ASSIGNING) == 0) {
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0010-Bug-836892-Add-new-blending-modes-to-SkXfermode.patch b/gfx/skia/patches/archive/0010-Bug-836892-Add-new-blending-modes-to-SkXfermode.patch
new file mode 100644
index 0000000000..a446037de0
--- /dev/null
+++ b/gfx/skia/patches/archive/0010-Bug-836892-Add-new-blending-modes-to-SkXfermode.patch
@@ -0,0 +1,698 @@
+# HG changeset patch
+# User Rik Cabanier <cabanier@adobe.com>
+# Date 1360273929 -46800
+# Node ID 3ac8edca3a03b3d22240b5a5b95ae3b5ada9877d
+# Parent cbb67fe70b864b36165061e1fd3b083cd09af087
+Bug 836892 - Add new blending modes to SkXfermode. r=gw280
+
+diff --git a/gfx/skia/include/core/SkXfermode.h b/gfx/skia/include/core/SkXfermode.h
+--- a/gfx/skia/include/core/SkXfermode.h
++++ b/gfx/skia/include/core/SkXfermode.h
+@@ -96,33 +96,37 @@ public:
+ kDstOut_Mode, //!< [Da * (1 - Sa), Dc * (1 - Sa)]
+ kSrcATop_Mode, //!< [Da, Sc * Da + (1 - Sa) * Dc]
+ kDstATop_Mode, //!< [Sa, Sa * Dc + Sc * (1 - Da)]
+ kXor_Mode, //!< [Sa + Da - 2 * Sa * Da, Sc * (1 - Da) + (1 - Sa) * Dc]
+
+ // all remaining modes are defined in the SVG Compositing standard
+ // http://www.w3.org/TR/2009/WD-SVGCompositing-20090430/
+ kPlus_Mode,
+- kMultiply_Mode,
+
+ // all above modes can be expressed as pair of src/dst Coeffs
+ kCoeffModesCnt,
+
+- kScreen_Mode = kCoeffModesCnt,
++ kMultiply_Mode = kCoeffModesCnt,
++ kScreen_Mode,
+ kOverlay_Mode,
+ kDarken_Mode,
+ kLighten_Mode,
+ kColorDodge_Mode,
+ kColorBurn_Mode,
+ kHardLight_Mode,
+ kSoftLight_Mode,
+ kDifference_Mode,
+ kExclusion_Mode,
++ kHue_Mode,
++ kSaturation_Mode,
++ kColor_Mode,
++ kLuminosity_Mode,
+
+- kLastMode = kExclusion_Mode
++ kLastMode = kLuminosity_Mode
+ };
+
+ /**
+ * If the xfermode is one of the modes in the Mode enum, then asMode()
+ * returns true and sets (if not null) mode accordingly. Otherwise it
+ * returns false and ignores the mode parameter.
+ */
+ virtual bool asMode(Mode* mode);
+diff --git a/gfx/skia/src/core/SkXfermode.cpp b/gfx/skia/src/core/SkXfermode.cpp
+--- a/gfx/skia/src/core/SkXfermode.cpp
++++ b/gfx/skia/src/core/SkXfermode.cpp
+@@ -7,16 +7,18 @@
+ */
+
+
+ #include "SkXfermode.h"
+ #include "SkColorPriv.h"
+ #include "SkFlattenableBuffers.h"
+ #include "SkMathPriv.h"
+
++#include <algorithm>
++
+ SK_DEFINE_INST_COUNT(SkXfermode)
+
+ #define SkAlphaMulAlpha(a, b) SkMulDiv255Round(a, b)
+
+ #if 0
+ // idea for higher precision blends in xfer procs (and slightly faster)
+ // see DstATop as a probable caller
+ static U8CPU mulmuldiv255round(U8CPU a, U8CPU b, U8CPU c, U8CPU d) {
+@@ -176,244 +178,439 @@ static SkPMColor xor_modeproc(SkPMColor
+ static SkPMColor plus_modeproc(SkPMColor src, SkPMColor dst) {
+ unsigned b = saturated_add(SkGetPackedB32(src), SkGetPackedB32(dst));
+ unsigned g = saturated_add(SkGetPackedG32(src), SkGetPackedG32(dst));
+ unsigned r = saturated_add(SkGetPackedR32(src), SkGetPackedR32(dst));
+ unsigned a = saturated_add(SkGetPackedA32(src), SkGetPackedA32(dst));
+ return SkPackARGB32(a, r, g, b);
+ }
+
++static inline int srcover_byte(int a, int b) {
++ return a + b - SkAlphaMulAlpha(a, b);
++}
++
++#define blendfunc_byte(sc, dc, sa, da, blendfunc) \
++ clamp_div255round(sc * (255 - da) + dc * (255 - sa) + blendfunc(sc, dc, sa, da))
++
+ // kMultiply_Mode
++static inline int multiply_byte(int sc, int dc, int sa, int da) {
++ return sc * dc;
++}
+ static SkPMColor multiply_modeproc(SkPMColor src, SkPMColor dst) {
+- int a = SkAlphaMulAlpha(SkGetPackedA32(src), SkGetPackedA32(dst));
+- int r = SkAlphaMulAlpha(SkGetPackedR32(src), SkGetPackedR32(dst));
+- int g = SkAlphaMulAlpha(SkGetPackedG32(src), SkGetPackedG32(dst));
+- int b = SkAlphaMulAlpha(SkGetPackedB32(src), SkGetPackedB32(dst));
++ int sa = SkGetPackedA32(src);
++ int da = SkGetPackedA32(dst);
++ int a = srcover_byte(sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, multiply_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, multiply_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, multiply_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kScreen_Mode
+-static inline int srcover_byte(int a, int b) {
+- return a + b - SkAlphaMulAlpha(a, b);
++static inline int screen_byte(int sc, int dc, int sa, int da) {
++ return sc * da + sa * dc - sc * dc;
+ }
+ static SkPMColor screen_modeproc(SkPMColor src, SkPMColor dst) {
+- int a = srcover_byte(SkGetPackedA32(src), SkGetPackedA32(dst));
+- int r = srcover_byte(SkGetPackedR32(src), SkGetPackedR32(dst));
+- int g = srcover_byte(SkGetPackedG32(src), SkGetPackedG32(dst));
+- int b = srcover_byte(SkGetPackedB32(src), SkGetPackedB32(dst));
++ int sa = SkGetPackedA32(src);
++ int da = SkGetPackedA32(dst);
++ int a = srcover_byte(sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, screen_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, screen_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, screen_byte);
++ return SkPackARGB32(a, r, g, b);
++}
++
++// kHardLight_Mode
++static inline int hardlight_byte(int sc, int dc, int sa, int da) {
++ if(!sa || !da)
++ return sc * da;
++ float Sc = (float)sc/sa;
++ float Dc = (float)dc/da;
++ if(Sc <= 0.5)
++ Sc *= 2 * Dc;
++ else
++ Sc = -1 + 2 * Sc + 2 * Dc - 2 * Sc * Dc;
++
++ return Sc * sa * da;
++}
++static SkPMColor hardlight_modeproc(SkPMColor src, SkPMColor dst) {
++ int sa = SkGetPackedA32(src);
++ int da = SkGetPackedA32(dst);
++ int a = srcover_byte(sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, hardlight_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, hardlight_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, hardlight_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kOverlay_Mode
+ static inline int overlay_byte(int sc, int dc, int sa, int da) {
+- int tmp = sc * (255 - da) + dc * (255 - sa);
+- int rc;
+- if (2 * dc <= da) {
+- rc = 2 * sc * dc;
+- } else {
+- rc = sa * da - 2 * (da - dc) * (sa - sc);
+- }
+- return clamp_div255round(rc + tmp);
++ return hardlight_byte(dc, sc, da, sa);
+ }
+ static SkPMColor overlay_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = overlay_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = overlay_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = overlay_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, overlay_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, overlay_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, overlay_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kDarken_Mode
+ static inline int darken_byte(int sc, int dc, int sa, int da) {
+- int sd = sc * da;
+- int ds = dc * sa;
+- if (sd < ds) {
+- // srcover
+- return sc + dc - SkDiv255Round(ds);
+- } else {
+- // dstover
+- return dc + sc - SkDiv255Round(sd);
+- }
++ return SkMin32(sc * da, sa * dc);
+ }
+ static SkPMColor darken_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = darken_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = darken_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = darken_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, darken_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, darken_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, darken_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kLighten_Mode
+ static inline int lighten_byte(int sc, int dc, int sa, int da) {
+- int sd = sc * da;
+- int ds = dc * sa;
+- if (sd > ds) {
+- // srcover
+- return sc + dc - SkDiv255Round(ds);
+- } else {
+- // dstover
+- return dc + sc - SkDiv255Round(sd);
+- }
++ return SkMax32(sc * da, sa * dc);
+ }
+ static SkPMColor lighten_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = lighten_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = lighten_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = lighten_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, lighten_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, lighten_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, lighten_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kColorDodge_Mode
+ static inline int colordodge_byte(int sc, int dc, int sa, int da) {
+- int diff = sa - sc;
+- int rc;
+- if (0 == diff) {
+- rc = sa * da + sc * (255 - da) + dc * (255 - sa);
+- rc = SkDiv255Round(rc);
+- } else {
+- int tmp = (dc * sa << 15) / (da * diff);
+- rc = SkDiv255Round(sa * da) * tmp >> 15;
+- // don't clamp here, since we'll do it in our modeproc
+- }
+- return rc;
++ if (dc == 0)
++ return 0;
++ // Avoid division by 0
++ if (sc == sa)
++ return da * sa;
++
++ return SkMin32(sa * da, sa * sa * dc / (sa - sc));
+ }
+ static SkPMColor colordodge_modeproc(SkPMColor src, SkPMColor dst) {
+- // added to avoid div-by-zero in colordodge_byte
+- if (0 == dst) {
+- return src;
+- }
+-
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = colordodge_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = colordodge_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = colordodge_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+- r = clamp_max(r, a);
+- g = clamp_max(g, a);
+- b = clamp_max(b, a);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, colordodge_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, colordodge_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, colordodge_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kColorBurn_Mode
+ static inline int colorburn_byte(int sc, int dc, int sa, int da) {
+- int rc;
+- if (dc == da && 0 == sc) {
+- rc = sa * da + dc * (255 - sa);
+- } else if (0 == sc) {
+- return SkAlphaMulAlpha(dc, 255 - sa);
+- } else {
+- int tmp = (sa * (da - dc) * 256) / (sc * da);
+- if (tmp > 256) {
+- tmp = 256;
+- }
+- int tmp2 = sa * da;
+- rc = tmp2 - (tmp2 * tmp >> 8) + sc * (255 - da) + dc * (255 - sa);
+- }
+- return SkDiv255Round(rc);
++ // Avoid division by 0
++ if(dc == da)
++ return sa * da;
++ if(sc == 0)
++ return 0;
++
++ return sa * da - SkMin32(sa * da, sa * sa * (da - dc) / sc);
+ }
+ static SkPMColor colorburn_modeproc(SkPMColor src, SkPMColor dst) {
+- // added to avoid div-by-zero in colorburn_byte
+- if (0 == dst) {
+- return src;
+- }
+-
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = colorburn_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = colorburn_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = colorburn_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
+- return SkPackARGB32(a, r, g, b);
+-}
+-
+-// kHardLight_Mode
+-static inline int hardlight_byte(int sc, int dc, int sa, int da) {
+- int rc;
+- if (2 * sc <= sa) {
+- rc = 2 * sc * dc;
+- } else {
+- rc = sa * da - 2 * (da - dc) * (sa - sc);
+- }
+- return clamp_div255round(rc + sc * (255 - da) + dc * (255 - sa));
+-}
+-static SkPMColor hardlight_modeproc(SkPMColor src, SkPMColor dst) {
+- int sa = SkGetPackedA32(src);
+- int da = SkGetPackedA32(dst);
+- int a = srcover_byte(sa, da);
+- int r = hardlight_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = hardlight_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = hardlight_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, colorburn_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, colorburn_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, colorburn_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // returns 255 * sqrt(n/255)
+ static U8CPU sqrt_unit_byte(U8CPU n) {
+ return SkSqrtBits(n, 15+4);
+ }
+
+ // kSoftLight_Mode
+ static inline int softlight_byte(int sc, int dc, int sa, int da) {
+ int m = da ? dc * 256 / da : 0;
+ int rc;
+- if (2 * sc <= sa) {
+- rc = dc * (sa + ((2 * sc - sa) * (256 - m) >> 8));
+- } else if (4 * dc <= da) {
++ if (2 * sc <= sa)
++ return dc * (sa + ((2 * sc - sa) * (256 - m) >> 8));
++
++ if (4 * dc <= da) {
+ int tmp = (4 * m * (4 * m + 256) * (m - 256) >> 16) + 7 * m;
+- rc = dc * sa + (da * (2 * sc - sa) * tmp >> 8);
+- } else {
+- int tmp = sqrt_unit_byte(m) - m;
+- rc = dc * sa + (da * (2 * sc - sa) * tmp >> 8);
++ return dc * sa + (da * (2 * sc - sa) * tmp >> 8);
+ }
+- return clamp_div255round(rc + sc * (255 - da) + dc * (255 - sa));
++ int tmp = sqrt_unit_byte(m) - m;
++ return rc = dc * sa + (da * (2 * sc - sa) * tmp >> 8);
+ }
+ static SkPMColor softlight_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = softlight_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = softlight_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = softlight_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, softlight_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, softlight_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, softlight_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kDifference_Mode
+ static inline int difference_byte(int sc, int dc, int sa, int da) {
+- int tmp = SkMin32(sc * da, dc * sa);
+- return clamp_signed_byte(sc + dc - 2 * SkDiv255Round(tmp));
++ int tmp = dc * sa - sc * da;
++ if(tmp<0)
++ return - tmp;
++
++ return tmp;
+ }
+ static SkPMColor difference_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = difference_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = difference_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = difference_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, difference_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, difference_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, difference_byte);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ // kExclusion_Mode
+ static inline int exclusion_byte(int sc, int dc, int sa, int da) {
+- // this equations is wacky, wait for SVG to confirm it
+- int r = sc * da + dc * sa - 2 * sc * dc + sc * (255 - da) + dc * (255 - sa);
+- return clamp_div255round(r);
++ return sc * da + dc * sa - 2 * dc * sc;
+ }
+ static SkPMColor exclusion_modeproc(SkPMColor src, SkPMColor dst) {
+ int sa = SkGetPackedA32(src);
+ int da = SkGetPackedA32(dst);
+ int a = srcover_byte(sa, da);
+- int r = exclusion_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da);
+- int g = exclusion_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da);
+- int b = exclusion_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da);
++ int r = blendfunc_byte(SkGetPackedR32(src), SkGetPackedR32(dst), sa, da, exclusion_byte);
++ int g = blendfunc_byte(SkGetPackedG32(src), SkGetPackedG32(dst), sa, da, exclusion_byte);
++ int b = blendfunc_byte(SkGetPackedB32(src), SkGetPackedB32(dst), sa, da, exclusion_byte);
++ return SkPackARGB32(a, r, g, b);
++}
++
++///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
++struct BlendColor {
++ float r;
++ float g;
++ float b;
++
++ BlendColor(): r(0), g(0), b(0)
++ {}
++};
++
++static inline float Lum(BlendColor C)
++{
++ return C.r * 0.3 + C.g * 0.59 + C.b* 0.11;
++}
++
++static inline float SkMinFloat(float a, float b)
++{
++ if (a > b)
++ a = b;
++ return a;
++}
++
++static inline float SkMaxFloat(float a, float b)
++{
++ if (a < b)
++ a = b;
++ return a;
++}
++
++#define minimum(C) SkMinFloat(SkMinFloat(C.r, C.g), C.b)
++#define maximum(C) SkMaxFloat(SkMaxFloat(C.r, C.g), C.b)
++
++static inline float Sat(BlendColor c) {
++ return maximum(c) - minimum(c);
++}
++
++static inline void setSaturationComponents(float& Cmin, float& Cmid, float& Cmax, float s) {
++ if(Cmax > Cmin) {
++ Cmid = (((Cmid - Cmin) * s ) / (Cmax - Cmin));
++ Cmax = s;
++ } else {
++ Cmax = 0;
++ Cmid = 0;
++ }
++ Cmin = 0;
++}
++
++static inline BlendColor SetSat(BlendColor C, float s) {
++ if(C.r <= C.g) {
++ if(C.g <= C.b)
++ setSaturationComponents(C.r, C.g, C.b, s);
++ else
++ if(C.r <= C.b)
++ setSaturationComponents(C.r, C.b, C.g, s);
++ else
++ setSaturationComponents(C.b, C.r, C.g, s);
++ } else if(C.r <= C.b)
++ setSaturationComponents(C.g, C.r, C.b, s);
++ else
++ if(C.g <= C.b)
++ setSaturationComponents(C.g, C.b, C.r, s);
++ else
++ setSaturationComponents(C.b, C.g, C.r, s);
++
++ return C;
++}
++
++static inline BlendColor clipColor(BlendColor C) {
++ float L = Lum(C);
++ float n = minimum(C);
++ float x = maximum(C);
++ if(n < 0) {
++ C.r = L + (((C.r - L) * L) / (L - n));
++ C.g = L + (((C.g - L) * L) / (L - n));
++ C.b = L + (((C.b - L) * L) / (L - n));
++ }
++
++ if(x > 1) {
++ C.r = L + (((C.r - L) * (1 - L)) / (x - L));
++ C.g = L + (((C.g - L) * (1 - L)) / (x - L));
++ C.b = L + (((C.b - L) * (1 - L)) / (x - L));
++ }
++ return C;
++}
++
++static inline BlendColor SetLum(BlendColor C, float l) {
++ float d = l - Lum(C);
++ C.r += d;
++ C.g += d;
++ C.b += d;
++
++ return clipColor(C);
++}
++
++#define blendfunc_nonsep_byte(sc, dc, sa, da, blendval) \
++ clamp_div255round(sc * (255 - da) + dc * (255 - sa) + (int)(sa * da * blendval))
++
++static SkPMColor hue_modeproc(SkPMColor src, SkPMColor dst) {
++ int sr = SkGetPackedR32(src);
++ int sg = SkGetPackedG32(src);
++ int sb = SkGetPackedB32(src);
++ int sa = SkGetPackedA32(src);
++
++ int dr = SkGetPackedR32(dst);
++ int dg = SkGetPackedG32(dst);
++ int db = SkGetPackedB32(dst);
++ int da = SkGetPackedA32(dst);
++
++ BlendColor Cs;
++ if(sa) {
++ Cs.r = (float)sr / sa;
++ Cs.g = (float)sg / sa;
++ Cs.b = (float)sb / sa;
++ BlendColor Cd;
++ if(da) {
++ Cd.r = (float)dr / da;
++ Cd.g = (float)dg / da;
++ Cd.b = (float)db / da;
++ Cs = SetLum(SetSat(Cs, Sat(Cd)), Lum(Cd));
++ }
++ }
++
++ int a = srcover_byte(sa, da);
++ int r = blendfunc_nonsep_byte(sr, dr, sa, da, Cs.r);
++ int g = blendfunc_nonsep_byte(sg, dg, sa, da, Cs.g);
++ int b = blendfunc_nonsep_byte(sb, db, sa, da, Cs.b);
++ return SkPackARGB32(a, r, g, b);
++}
++
++static SkPMColor saturation_modeproc(SkPMColor src, SkPMColor dst) {
++ int sr = SkGetPackedR32(src);
++ int sg = SkGetPackedG32(src);
++ int sb = SkGetPackedB32(src);
++ int sa = SkGetPackedA32(src);
++
++ int dr = SkGetPackedR32(dst);
++ int dg = SkGetPackedG32(dst);
++ int db = SkGetPackedB32(dst);
++ int da = SkGetPackedA32(dst);
++
++ BlendColor Cs;
++ if(sa) {
++ Cs.r = (float)sr / sa;
++ Cs.g = (float)sg / sa;
++ Cs.b = (float)sb / sa;
++ BlendColor Cd;
++ if(da) {
++ Cd.r = (float)dr / da;
++ Cd.g = (float)dg / da;
++ Cd.b = (float)db / da;
++ Cs = SetLum(SetSat(Cd, Sat(Cs)), Lum(Cd));
++ }
++ }
++
++ int a = srcover_byte(sa, da);
++ int r = blendfunc_nonsep_byte(sr, dr, sa, da, Cs.r);
++ int g = blendfunc_nonsep_byte(sg, dg, sa, da, Cs.g);
++ int b = blendfunc_nonsep_byte(sb, db, sa, da, Cs.b);
++ return SkPackARGB32(a, r, g, b);
++}
++
++static SkPMColor color_modeproc(SkPMColor src, SkPMColor dst) {
++ int sr = SkGetPackedR32(src);
++ int sg = SkGetPackedG32(src);
++ int sb = SkGetPackedB32(src);
++ int sa = SkGetPackedA32(src);
++
++ int dr = SkGetPackedR32(dst);
++ int dg = SkGetPackedG32(dst);
++ int db = SkGetPackedB32(dst);
++ int da = SkGetPackedA32(dst);
++
++ BlendColor Cs;
++ if(sa) {
++ Cs.r = (float)sr / sa;
++ Cs.g = (float)sg / sa;
++ Cs.b = (float)sb / sa;
++ BlendColor Cd;
++ if(da) {
++ Cd.r = (float)dr / da;
++ Cd.g = (float)dg / da;
++ Cd.b = (float)db / da;
++ Cs = SetLum(Cs, Lum(Cd));
++ }
++ }
++
++ int a = srcover_byte(sa, da);
++ int r = blendfunc_nonsep_byte(sr, dr, sa, da, Cs.r);
++ int g = blendfunc_nonsep_byte(sg, dg, sa, da, Cs.g);
++ int b = blendfunc_nonsep_byte(sb, db, sa, da, Cs.b);
++ return SkPackARGB32(a, r, g, b);
++}
++
++static SkPMColor luminosity_modeproc(SkPMColor src, SkPMColor dst) {
++ int sr = SkGetPackedR32(src);
++ int sg = SkGetPackedG32(src);
++ int sb = SkGetPackedB32(src);
++ int sa = SkGetPackedA32(src);
++
++ int dr = SkGetPackedR32(dst);
++ int dg = SkGetPackedG32(dst);
++ int db = SkGetPackedB32(dst);
++ int da = SkGetPackedA32(dst);
++
++ BlendColor Cs;
++ if(sa) {
++ Cs.r = (float)sr / sa;
++ Cs.g = (float)sg / sa;
++ Cs.b = (float)sb / sa;
++ BlendColor Cd;
++ if(da) {
++ Cd.r = (float)dr / da;
++ Cd.g = (float)dg / da;
++ Cd.b = (float)db / da;
++ Cs = SetLum(Cd, Lum(Cs));
++ }
++ }
++
++ int a = srcover_byte(sa, da);
++ int r = blendfunc_nonsep_byte(sr, dr, sa, da, Cs.r);
++ int g = blendfunc_nonsep_byte(sg, dg, sa, da, Cs.g);
++ int b = blendfunc_nonsep_byte(sb, db, sa, da, Cs.b);
+ return SkPackARGB32(a, r, g, b);
+ }
+
+ struct ProcCoeff {
+ SkXfermodeProc fProc;
+ SkXfermode::Coeff fSC;
+ SkXfermode::Coeff fDC;
+ };
+@@ -430,27 +627,31 @@ static const ProcCoeff gProcCoeffs[] = {
+ { dstin_modeproc, SkXfermode::kZero_Coeff, SkXfermode::kSA_Coeff },
+ { srcout_modeproc, SkXfermode::kIDA_Coeff, SkXfermode::kZero_Coeff },
+ { dstout_modeproc, SkXfermode::kZero_Coeff, SkXfermode::kISA_Coeff },
+ { srcatop_modeproc, SkXfermode::kDA_Coeff, SkXfermode::kISA_Coeff },
+ { dstatop_modeproc, SkXfermode::kIDA_Coeff, SkXfermode::kSA_Coeff },
+ { xor_modeproc, SkXfermode::kIDA_Coeff, SkXfermode::kISA_Coeff },
+
+ { plus_modeproc, SkXfermode::kOne_Coeff, SkXfermode::kOne_Coeff },
+- { multiply_modeproc,SkXfermode::kZero_Coeff, SkXfermode::kSC_Coeff },
++ { multiply_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF},
+ { screen_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { overlay_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { darken_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { lighten_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { colordodge_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { colorburn_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { hardlight_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { softlight_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { difference_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ { exclusion_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
++ { hue_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
++ { saturation_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
++ { color_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
++ { luminosity_modeproc, CANNOT_USE_COEFF, CANNOT_USE_COEFF },
+ };
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ bool SkXfermode::asCoeff(Coeff* src, Coeff* dst) {
+ return false;
+ }
+
+@@ -1172,16 +1373,20 @@ static const Proc16Rec gModeProcs16[] =
+ { darken_modeproc16_0, darken_modeproc16_255, NULL }, // darken
+ { lighten_modeproc16_0, lighten_modeproc16_255, NULL }, // lighten
+ { NULL, NULL, NULL }, // colordodge
+ { NULL, NULL, NULL }, // colorburn
+ { NULL, NULL, NULL }, // hardlight
+ { NULL, NULL, NULL }, // softlight
+ { NULL, NULL, NULL }, // difference
+ { NULL, NULL, NULL }, // exclusion
++ { NULL, NULL, NULL }, // hue
++ { NULL, NULL, NULL }, // saturation
++ { NULL, NULL, NULL }, // color
++ { NULL, NULL, NULL }, // luminosity
+ };
+
+ SkXfermodeProc16 SkXfermode::GetProc16(Mode mode, SkColor srcColor) {
+ SkXfermodeProc16 proc16 = NULL;
+ if ((unsigned)mode < kModeCount) {
+ const Proc16Rec& rec = gModeProcs16[mode];
+ unsigned a = SkColorGetA(srcColor);
+
diff --git a/gfx/skia/patches/archive/0010-Bug-848491-Re-apply-bug-817356-Patch-Skia-to-recogni.patch b/gfx/skia/patches/archive/0010-Bug-848491-Re-apply-bug-817356-Patch-Skia-to-recogni.patch
new file mode 100644
index 0000000000..0d44b008d6
--- /dev/null
+++ b/gfx/skia/patches/archive/0010-Bug-848491-Re-apply-bug-817356-Patch-Skia-to-recogni.patch
@@ -0,0 +1,22 @@
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 25 Apr 2013 20:55:02 -0400
+Subject: Bug 848491 - Re-apply bug 817356 - Patch Skia to recognize uppercase PPC/PPC64.
+
+
+diff --git a/gfx/skia/include/core/SkPreConfig.h b/gfx/skia/include/core/SkPreConfig.h
+index 11cb223..7e95b99 100644
+--- a/gfx/skia/include/core/SkPreConfig.h
++++ b/gfx/skia/include/core/SkPreConfig.h
+@@ -99,7 +99,8 @@
+ //////////////////////////////////////////////////////////////////////
+
+ #if !defined(SK_CPU_BENDIAN) && !defined(SK_CPU_LENDIAN)
+- #if defined (__ppc__) || defined(__ppc64__)
++ #if defined (__ppc__) || defined(__PPC__) || defined(__ppc64__) \
++ || defined(__PPC64__)
+ #define SK_CPU_BENDIAN
+ #else
+ #define SK_CPU_LENDIAN
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0011-Bug-719575-Fix-clang-build.patch b/gfx/skia/patches/archive/0011-Bug-719575-Fix-clang-build.patch
new file mode 100644
index 0000000000..95cb08a36f
--- /dev/null
+++ b/gfx/skia/patches/archive/0011-Bug-719575-Fix-clang-build.patch
@@ -0,0 +1,28 @@
+From cf855f31194ff071f2c787a7413d70a43f15f204 Mon Sep 17 00:00:00 2001
+From: Ehsan Akhgari <ehsan@mozilla.com>
+Date: Tue, 29 May 2012 15:39:55 -0400
+Subject: [PATCH] Bug 755869 - Re-apply patch from bug 719575 to fix clang
+ builds for the new Skia r=gw280
+
+---
+ gfx/skia/src/ports/SkFontHost_mac_coretext.cpp | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp b/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp
+index c43d1a6..ce5f409 100644
+--- a/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp
++++ b/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp
+@@ -807,8 +807,8 @@ CGRGBPixel* Offscreen::getCG(const SkScalerContext_Mac& context, const SkGlyph&
+ void SkScalerContext_Mac::getVerticalOffset(CGGlyph glyphID, SkIPoint* offset) const {
+ CGSize vertOffset;
+ CTFontGetVerticalTranslationsForGlyphs(fCTVerticalFont, &glyphID, &vertOffset, 1);
+- const SkPoint trans = {SkFloatToScalar(vertOffset.width),
+- SkFloatToScalar(vertOffset.height)};
++ const SkPoint trans = {SkScalar(SkFloatToScalar(vertOffset.width)),
++ SkScalar(SkFloatToScalar(vertOffset.height))};
+ SkPoint floatOffset;
+ fVerticalMatrix.mapPoints(&floatOffset, &trans, 1);
+ if (!isSnowLeopard()) {
+--
+1.7.5.4
+
diff --git a/gfx/skia/patches/archive/0011-Bug-839347-no-anon-namespace-around-SkNO_RETURN_HINT.patch b/gfx/skia/patches/archive/0011-Bug-839347-no-anon-namespace-around-SkNO_RETURN_HINT.patch
new file mode 100644
index 0000000000..854f0b1afe
--- /dev/null
+++ b/gfx/skia/patches/archive/0011-Bug-839347-no-anon-namespace-around-SkNO_RETURN_HINT.patch
@@ -0,0 +1,31 @@
+# HG changeset patch
+# Parent 2c6da9f02606f7a02f635d99ef8cf669d3bc5c4b
+# User Daniel Holbert <dholbert@cs.stanford.edu>
+Bug 839347: Move SkNO_RETURN_HINT out of anonymous namespace so that clang won't warn about it being unused. r=mattwoodrow
+
+diff --git a/gfx/skia/include/core/SkPostConfig.h b/gfx/skia/include/core/SkPostConfig.h
+--- a/gfx/skia/include/core/SkPostConfig.h
++++ b/gfx/skia/include/core/SkPostConfig.h
+@@ -63,20 +63,18 @@
+ * The clang static analyzer likes to know that when the program is not
+ * expected to continue (crash, assertion failure, etc). It will notice that
+ * some combination of parameters lead to a function call that does not return.
+ * It can then make appropriate assumptions about the parameters in code
+ * executed only if the non-returning function was *not* called.
+ */
+ #if !defined(SkNO_RETURN_HINT)
+ #if SK_HAS_COMPILER_FEATURE(attribute_analyzer_noreturn)
+- namespace {
+- inline void SkNO_RETURN_HINT() __attribute__((analyzer_noreturn));
+- inline void SkNO_RETURN_HINT() {}
+- }
++ inline void SkNO_RETURN_HINT() __attribute__((analyzer_noreturn));
++ inline void SkNO_RETURN_HINT() {}
+ #else
+ #define SkNO_RETURN_HINT() do {} while (false)
+ #endif
+ #endif
+
+ #if defined(SK_ZLIB_INCLUDE) && defined(SK_SYSTEM_ZLIB)
+ #error "cannot define both SK_ZLIB_INCLUDE and SK_SYSTEM_ZLIB"
+ #elif defined(SK_ZLIB_INCLUDE) || defined(SK_SYSTEM_ZLIB)
diff --git a/gfx/skia/patches/archive/0012-Bug-751418-Add-our-own-GrUserConfig-r-mattwoodrow.patch b/gfx/skia/patches/archive/0012-Bug-751418-Add-our-own-GrUserConfig-r-mattwoodrow.patch
new file mode 100644
index 0000000000..cde2940950
--- /dev/null
+++ b/gfx/skia/patches/archive/0012-Bug-751418-Add-our-own-GrUserConfig-r-mattwoodrow.patch
@@ -0,0 +1,29 @@
+From 4c25387e6e6cdb55f19e51631a78c3fa9b4a3c73 Mon Sep 17 00:00:00 2001
+From: George Wright <gw@gwright.org.uk>
+Date: Thu, 1 Nov 2012 17:29:50 -0400
+Subject: [PATCH 2/8] Bug 751418 - Add our own GrUserConfig r=mattwoodrow
+
+---
+ gfx/skia/include/gpu/GrUserConfig.h | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/gfx/skia/include/gpu/GrUserConfig.h b/gfx/skia/include/gpu/GrUserConfig.h
+index d514486..b729ab3 100644
+--- a/gfx/skia/include/gpu/GrUserConfig.h
++++ b/gfx/skia/include/gpu/GrUserConfig.h
+@@ -64,6 +64,12 @@
+ #define GR_TEXT_SCALAR_IS_FIXED 0
+ #define GR_TEXT_SCALAR_IS_FLOAT 1
+
++/*
++ * This allows us to set a callback to be called before each GL call to ensure
++ * that our context is set correctly
++ */
++#define GR_GL_PER_GL_FUNC_CALLBACK 1
++
+ #endif
+
+
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0012-Bug-759683-make-ssse3-conditional.patch b/gfx/skia/patches/archive/0012-Bug-759683-make-ssse3-conditional.patch
new file mode 100644
index 0000000000..dc780c5ec6
--- /dev/null
+++ b/gfx/skia/patches/archive/0012-Bug-759683-make-ssse3-conditional.patch
@@ -0,0 +1,22 @@
+diff --git a/gfx/skia/src/opts/opts_check_SSE2.cpp b/gfx/skia/src/opts/opts_check_SSE2.cpp
+--- a/gfx/skia/src/opts/opts_check_SSE2.cpp
++++ b/gfx/skia/src/opts/opts_check_SSE2.cpp
+@@ -91,17 +91,17 @@ static bool cachedHasSSE2() {
+
+ static bool cachedHasSSSE3() {
+ static bool gHasSSSE3 = hasSSSE3();
+ return gHasSSSE3;
+ }
+
+ void SkBitmapProcState::platformProcs() {
+ if (cachedHasSSSE3()) {
+-#if !defined(SK_BUILD_FOR_ANDROID)
++#if defined(SK_BUILD_SSSE3)
+ // Disable SSSE3 optimization for Android x86
+ if (fSampleProc32 == S32_opaque_D32_filter_DX) {
+ fSampleProc32 = S32_opaque_D32_filter_DX_SSSE3;
+ } else if (fSampleProc32 == S32_alpha_D32_filter_DX) {
+ fSampleProc32 = S32_alpha_D32_filter_DX_SSSE3;
+ }
+
+ if (fSampleProc32 == S32_opaque_D32_filter_DXDY) {
diff --git a/gfx/skia/patches/archive/0013-Bug-751418-Fix-compile-error-on-gcc-in-Skia-GL-r-mat.patch b/gfx/skia/patches/archive/0013-Bug-751418-Fix-compile-error-on-gcc-in-Skia-GL-r-mat.patch
new file mode 100644
index 0000000000..167e22184d
--- /dev/null
+++ b/gfx/skia/patches/archive/0013-Bug-751418-Fix-compile-error-on-gcc-in-Skia-GL-r-mat.patch
@@ -0,0 +1,26 @@
+From 3d786b1f0c040205ad9ef6d4216ce06b41f7359f Mon Sep 17 00:00:00 2001
+From: George Wright <gw@gwright.org.uk>
+Date: Mon, 5 Nov 2012 15:49:42 +0000
+Subject: [PATCH 3/8] Bug 751418 - Fix compile error on gcc in Skia/GL
+ r=mattwoodrow
+
+---
+ gfx/skia/src/gpu/gl/GrGLProgram.cpp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gfx/skia/src/gpu/gl/GrGLProgram.cpp b/gfx/skia/src/gpu/gl/GrGLProgram.cpp
+index 2703110..40cadc3 100644
+--- a/gfx/skia/src/gpu/gl/GrGLProgram.cpp
++++ b/gfx/skia/src/gpu/gl/GrGLProgram.cpp
+@@ -575,7 +575,7 @@ bool GrGLProgram::genProgram(const GrCustomStage** customStages) {
+ POS_ATTR_NAME);
+
+ builder.fVSCode.appendf("void main() {\n"
+- "\tvec3 pos3 = %s * vec3("POS_ATTR_NAME", 1);\n"
++ "\tvec3 pos3 = %s * vec3(" POS_ATTR_NAME ", 1);\n"
+ "\tgl_Position = vec4(pos3.xy, 0, pos3.z);\n",
+ viewMName);
+
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0013-Bug-761890-fonts.patch b/gfx/skia/patches/archive/0013-Bug-761890-fonts.patch
new file mode 100644
index 0000000000..f20293d4cc
--- /dev/null
+++ b/gfx/skia/patches/archive/0013-Bug-761890-fonts.patch
@@ -0,0 +1,162 @@
+# HG changeset patch
+# User Nicholas Cameron <ncameron@mozilla.com>
+# Date 1337146927 -43200
+# Node ID 310209abef2c2667e5de41dd2a1f071e8cd42821
+# Parent 93f3ca4d5707b2aae9c6ae52d5d29c2c802e7ef8
+Bug 746883; changes to the Skia library. r=gw280
+
+diff --git a/gfx/skia/include/core/SkDraw.h b/gfx/skia/include/core/SkDraw.h
+--- a/gfx/skia/include/core/SkDraw.h
++++ b/gfx/skia/include/core/SkDraw.h
+@@ -125,23 +125,24 @@ public:
+ #endif
+ };
+
+ class SkGlyphCache;
+
+ class SkTextToPathIter {
+ public:
+ SkTextToPathIter(const char text[], size_t length, const SkPaint& paint,
+- bool applyStrokeAndPathEffects);
++ bool applyStrokeAndPathEffects, bool useCanonicalTextSize = true);
+ ~SkTextToPathIter();
+
+ const SkPaint& getPaint() const { return fPaint; }
+ SkScalar getPathScale() const { return fScale; }
+
+ const SkPath* next(SkScalar* xpos); //!< returns nil when there are no more paths
++ bool nextWithWhitespace(const SkPath** path, SkScalar* xpos); //!< returns false when there are no more paths
+
+ private:
+ SkGlyphCache* fCache;
+ SkPaint fPaint;
+ SkScalar fScale;
+ SkFixed fPrevAdvance;
+ const char* fText;
+ const char* fStop;
+diff --git a/gfx/skia/src/core/SkPaint.cpp b/gfx/skia/src/core/SkPaint.cpp
+--- a/gfx/skia/src/core/SkPaint.cpp
++++ b/gfx/skia/src/core/SkPaint.cpp
+@@ -1359,30 +1359,32 @@ void SkPaint::getPosTextPath(const void*
+ const SkPoint pos[], SkPath* path) const {
+ SkASSERT(length == 0 || textData != NULL);
+
+ const char* text = (const char*)textData;
+ if (text == NULL || length == 0 || path == NULL) {
+ return;
+ }
+
+- SkTextToPathIter iter(text, length, *this, false);
++ SkTextToPathIter iter(text, length, *this, false, false);
+ SkMatrix matrix;
+ SkPoint prevPos;
+ prevPos.set(0, 0);
+
+ matrix.setScale(iter.getPathScale(), iter.getPathScale());
+ path->reset();
+
+ unsigned int i = 0;
+ const SkPath* iterPath;
+- while ((iterPath = iter.next(NULL)) != NULL) {
+- matrix.postTranslate(pos[i].fX - prevPos.fX, pos[i].fY - prevPos.fY);
+- path->addPath(*iterPath, matrix);
+- prevPos = pos[i];
++ while (iter.nextWithWhitespace(&iterPath, NULL)) {
++ if (iterPath) {
++ matrix.postTranslate(pos[i].fX - prevPos.fX, pos[i].fY - prevPos.fY);
++ path->addPath(*iterPath, matrix);
++ prevPos = pos[i];
++ }
+ i++;
+ }
+ }
+
+ static void add_flattenable(SkDescriptor* desc, uint32_t tag,
+ SkFlattenableWriteBuffer* buffer) {
+ buffer->flatten(desc->addEntry(tag, buffer->size(), NULL));
+ }
+@@ -2118,30 +2120,31 @@ const SkRect& SkPaint::doComputeFastBoun
+
+ static bool has_thick_frame(const SkPaint& paint) {
+ return paint.getStrokeWidth() > 0 &&
+ paint.getStyle() != SkPaint::kFill_Style;
+ }
+
+ SkTextToPathIter::SkTextToPathIter( const char text[], size_t length,
+ const SkPaint& paint,
+- bool applyStrokeAndPathEffects)
++ bool applyStrokeAndPathEffects,
++ bool useCanonicalTextSize)
+ : fPaint(paint) {
+ fGlyphCacheProc = paint.getMeasureCacheProc(SkPaint::kForward_TextBufferDirection,
+ true);
+
+ fPaint.setLinearText(true);
+ fPaint.setMaskFilter(NULL); // don't want this affecting our path-cache lookup
+
+ if (fPaint.getPathEffect() == NULL && !has_thick_frame(fPaint)) {
+ applyStrokeAndPathEffects = false;
+ }
+
+ // can't use our canonical size if we need to apply patheffects
+- if (fPaint.getPathEffect() == NULL) {
++ if (useCanonicalTextSize && fPaint.getPathEffect() == NULL) {
+ fPaint.setTextSize(SkIntToScalar(SkPaint::kCanonicalTextSizeForPaths));
+ fScale = paint.getTextSize() / SkPaint::kCanonicalTextSizeForPaths;
+ if (has_thick_frame(fPaint)) {
+ fPaint.setStrokeWidth(SkScalarDiv(fPaint.getStrokeWidth(), fScale));
+ }
+ } else {
+ fScale = SK_Scalar1;
+ }
+@@ -2185,30 +2188,47 @@ SkTextToPathIter::SkTextToPathIter( cons
+ fXYIndex = paint.isVerticalText() ? 1 : 0;
+ }
+
+ SkTextToPathIter::~SkTextToPathIter() {
+ SkGlyphCache::AttachCache(fCache);
+ }
+
+ const SkPath* SkTextToPathIter::next(SkScalar* xpos) {
+- while (fText < fStop) {
++ const SkPath* result;
++ while (nextWithWhitespace(&result, xpos)) {
++ if (result) {
++ if (xpos) {
++ *xpos = fXPos;
++ }
++ return result;
++ }
++ }
++ return NULL;
++}
++
++bool SkTextToPathIter::nextWithWhitespace(const SkPath** path, SkScalar* xpos) {
++ if (fText < fStop) {
+ const SkGlyph& glyph = fGlyphCacheProc(fCache, &fText);
+
+ fXPos += SkScalarMul(SkFixedToScalar(fPrevAdvance + fAutoKern.adjust(glyph)), fScale);
+ fPrevAdvance = advance(glyph, fXYIndex); // + fPaint.getTextTracking();
+
+ if (glyph.fWidth) {
+ if (xpos) {
+ *xpos = fXPos;
+ }
+- return fCache->findPath(glyph);
++ *path = fCache->findPath(glyph);
++ return true;
++ } else {
++ *path = NULL;
++ return true;
+ }
+ }
+- return NULL;
++ return false;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ bool SkPaint::nothingToDraw() const {
+ if (fLooper) {
+ return false;
+ }
diff --git a/gfx/skia/patches/archive/0014-Bug-765038-Fix-clang-build.patch b/gfx/skia/patches/archive/0014-Bug-765038-Fix-clang-build.patch
new file mode 100644
index 0000000000..6cc74914d2
--- /dev/null
+++ b/gfx/skia/patches/archive/0014-Bug-765038-Fix-clang-build.patch
@@ -0,0 +1,29 @@
+# HG changeset patch
+# Parent 9ded7a9f94a863dfa1f3227d3013367f51b8b522
+# User Nicholas Cameron <ncameron@mozilla.com>
+Bug 765038; fix a Clang compilation bug in Skia; r=jwatt
+
+diff --git a/gfx/skia/src/sfnt/SkOTTable_head.h b/gfx/skia/src/sfnt/SkOTTable_head.h
+--- a/gfx/skia/src/sfnt/SkOTTable_head.h
++++ b/gfx/skia/src/sfnt/SkOTTable_head.h
+@@ -109,18 +109,18 @@ struct SkOTTableHead {
+ } raw;
+ } macStyle;
+ SK_OT_USHORT lowestRecPPEM;
+ struct FontDirectionHint {
+ SK_TYPED_ENUM(Value, SK_OT_SHORT,
+ ((FullyMixedDirectionalGlyphs, SkTEndian_SwapBE16(0)))
+ ((OnlyStronglyLTR, SkTEndian_SwapBE16(1)))
+ ((StronglyLTR, SkTEndian_SwapBE16(2)))
+- ((OnlyStronglyRTL, static_cast<SK_OT_SHORT>(SkTEndian_SwapBE16(-1))))
+- ((StronglyRTL, static_cast<SK_OT_SHORT>(SkTEndian_SwapBE16(-2))))
++ ((OnlyStronglyRTL, static_cast<SK_OT_SHORT>(SkTEndian_SwapBE16(static_cast<SK_OT_USHORT>(-1)))))
++ ((StronglyRTL, static_cast<SK_OT_SHORT>(SkTEndian_SwapBE16(static_cast<SK_OT_USHORT>(-2)))))
+ SK_SEQ_END,
+ (value)SK_SEQ_END)
+ } fontDirectionHint;
+ struct IndexToLocFormat {
+ SK_TYPED_ENUM(Value, SK_OT_SHORT,
+ ((ShortOffsets, SkTEndian_SwapBE16(0)))
+ ((LongOffsets, SkTEndian_SwapBE16(1)))
+ SK_SEQ_END,
diff --git a/gfx/skia/patches/archive/0015-Bug-766017-warnings.patch b/gfx/skia/patches/archive/0015-Bug-766017-warnings.patch
new file mode 100644
index 0000000000..174dcb9bce
--- /dev/null
+++ b/gfx/skia/patches/archive/0015-Bug-766017-warnings.patch
@@ -0,0 +1,865 @@
+From: David Zbarsky <dzbarsky@gmail.com>
+Bug 766017 - Fix some skia warnings r=gw280
+
+diff --git a/gfx/skia/include/utils/mac/SkCGUtils.h b/gfx/skia/include/utils/mac/SkCGUtils.h
+--- a/gfx/skia/include/utils/mac/SkCGUtils.h
++++ b/gfx/skia/include/utils/mac/SkCGUtils.h
+@@ -39,18 +39,16 @@ static inline CGImageRef SkCreateCGImage
+ /**
+ * Draw the bitmap into the specified CG context. The bitmap will be converted
+ * to a CGImage using the generic RGB colorspace. (x,y) specifies the position
+ * of the top-left corner of the bitmap. The bitmap is converted using the
+ * colorspace returned by CGColorSpaceCreateDeviceRGB()
+ */
+ void SkCGDrawBitmap(CGContextRef, const SkBitmap&, float x, float y);
+
+-bool SkPDFDocumentToBitmap(SkStream* stream, SkBitmap* output);
+-
+ /**
+ * Return a provider that wraps the specified stream. It will become an
+ * owner of the stream, so the caller must still manage its ownership.
+ *
+ * To hand-off ownership of the stream to the provider, the caller must do
+ * something like the following:
+ *
+ * SkStream* stream = new ...;
+diff --git a/gfx/skia/src/core/SkAAClip.cpp b/gfx/skia/src/core/SkAAClip.cpp
+--- a/gfx/skia/src/core/SkAAClip.cpp
++++ b/gfx/skia/src/core/SkAAClip.cpp
+@@ -246,17 +246,17 @@ static void count_left_right_zeros(const
+ zeros = 0;
+ }
+ row += 2;
+ width -= n;
+ }
+ *riteZ = zeros;
+ }
+
+-#ifdef SK_DEBUG
++#if 0
+ static void test_count_left_right_zeros() {
+ static bool gOnce;
+ if (gOnce) {
+ return;
+ }
+ gOnce = true;
+
+ const uint8_t data0[] = { 0, 0, 10, 0xFF };
+@@ -1319,22 +1319,16 @@ bool SkAAClip::setPath(const SkPath& pat
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ typedef void (*RowProc)(SkAAClip::Builder&, int bottom,
+ const uint8_t* rowA, const SkIRect& rectA,
+ const uint8_t* rowB, const SkIRect& rectB);
+
+-static void sectRowProc(SkAAClip::Builder& builder, int bottom,
+- const uint8_t* rowA, const SkIRect& rectA,
+- const uint8_t* rowB, const SkIRect& rectB) {
+-
+-}
+-
+ typedef U8CPU (*AlphaProc)(U8CPU alphaA, U8CPU alphaB);
+
+ static U8CPU sectAlphaProc(U8CPU alphaA, U8CPU alphaB) {
+ // Multiply
+ return SkMulDiv255Round(alphaA, alphaB);
+ }
+
+ static U8CPU unionAlphaProc(U8CPU alphaA, U8CPU alphaB) {
+@@ -1429,31 +1423,16 @@ private:
+ static void adjust_row(RowIter& iter, int& leftA, int& riteA, int rite) {
+ if (rite == riteA) {
+ iter.next();
+ leftA = iter.left();
+ riteA = iter.right();
+ }
+ }
+
+-static bool intersect(int& min, int& max, int boundsMin, int boundsMax) {
+- SkASSERT(min < max);
+- SkASSERT(boundsMin < boundsMax);
+- if (min >= boundsMax || max <= boundsMin) {
+- return false;
+- }
+- if (min < boundsMin) {
+- min = boundsMin;
+- }
+- if (max > boundsMax) {
+- max = boundsMax;
+- }
+- return true;
+-}
+-
+ static void operatorX(SkAAClip::Builder& builder, int lastY,
+ RowIter& iterA, RowIter& iterB,
+ AlphaProc proc, const SkIRect& bounds) {
+ int leftA = iterA.left();
+ int riteA = iterA.right();
+ int leftB = iterB.left();
+ int riteB = iterB.right();
+
+@@ -1970,34 +1949,33 @@ static void small_bzero(void* dst, size_
+ static inline uint8_t mergeOne(uint8_t value, unsigned alpha) {
+ return SkMulDiv255Round(value, alpha);
+ }
+ static inline uint16_t mergeOne(uint16_t value, unsigned alpha) {
+ unsigned r = SkGetPackedR16(value);
+ unsigned g = SkGetPackedG16(value);
+ unsigned b = SkGetPackedB16(value);
+ return SkPackRGB16(SkMulDiv255Round(r, alpha),
+- SkMulDiv255Round(r, alpha),
+- SkMulDiv255Round(r, alpha));
++ SkMulDiv255Round(g, alpha),
++ SkMulDiv255Round(b, alpha));
+ }
+ static inline SkPMColor mergeOne(SkPMColor value, unsigned alpha) {
+ unsigned a = SkGetPackedA32(value);
+ unsigned r = SkGetPackedR32(value);
+ unsigned g = SkGetPackedG32(value);
+ unsigned b = SkGetPackedB32(value);
+ return SkPackARGB32(SkMulDiv255Round(a, alpha),
+ SkMulDiv255Round(r, alpha),
+ SkMulDiv255Round(g, alpha),
+ SkMulDiv255Round(b, alpha));
+ }
+
+ template <typename T> void mergeT(const T* SK_RESTRICT src, int srcN,
+ const uint8_t* SK_RESTRICT row, int rowN,
+ T* SK_RESTRICT dst) {
+- SkDEBUGCODE(int accumulated = 0;)
+ for (;;) {
+ SkASSERT(rowN > 0);
+ SkASSERT(srcN > 0);
+
+ int n = SkMin32(rowN, srcN);
+ unsigned rowA = row[1];
+ if (0xFF == rowA) {
+ small_memcpy(dst, src, n * sizeof(T));
+diff --git a/gfx/skia/src/core/SkBlitMask_D32.cpp b/gfx/skia/src/core/SkBlitMask_D32.cpp
+--- a/gfx/skia/src/core/SkBlitMask_D32.cpp
++++ b/gfx/skia/src/core/SkBlitMask_D32.cpp
+@@ -268,107 +268,49 @@ bool SkBlitMask::BlitColor(const SkBitma
+ return true;
+ }
+ return false;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+ ///////////////////////////////////////////////////////////////////////////////
+
+-static void BW_RowProc_Blend(SkPMColor* SK_RESTRICT dst,
+- const uint8_t* SK_RESTRICT mask,
+- const SkPMColor* SK_RESTRICT src, int count) {
+- int i, octuple = (count + 7) >> 3;
+- for (i = 0; i < octuple; ++i) {
+- int m = *mask++;
+- if (m & 0x80) { dst[0] = SkPMSrcOver(src[0], dst[0]); }
+- if (m & 0x40) { dst[1] = SkPMSrcOver(src[1], dst[1]); }
+- if (m & 0x20) { dst[2] = SkPMSrcOver(src[2], dst[2]); }
+- if (m & 0x10) { dst[3] = SkPMSrcOver(src[3], dst[3]); }
+- if (m & 0x08) { dst[4] = SkPMSrcOver(src[4], dst[4]); }
+- if (m & 0x04) { dst[5] = SkPMSrcOver(src[5], dst[5]); }
+- if (m & 0x02) { dst[6] = SkPMSrcOver(src[6], dst[6]); }
+- if (m & 0x01) { dst[7] = SkPMSrcOver(src[7], dst[7]); }
+- src += 8;
+- dst += 8;
+- }
+- count &= 7;
+- if (count > 0) {
+- int m = *mask;
+- do {
+- if (m & 0x80) { dst[0] = SkPMSrcOver(src[0], dst[0]); }
+- m <<= 1;
+- src += 1;
+- dst += 1;
+- } while (--count > 0);
+- }
+-}
+-
+-static void BW_RowProc_Opaque(SkPMColor* SK_RESTRICT dst,
+- const uint8_t* SK_RESTRICT mask,
+- const SkPMColor* SK_RESTRICT src, int count) {
+- int i, octuple = (count + 7) >> 3;
+- for (i = 0; i < octuple; ++i) {
+- int m = *mask++;
+- if (m & 0x80) { dst[0] = src[0]; }
+- if (m & 0x40) { dst[1] = src[1]; }
+- if (m & 0x20) { dst[2] = src[2]; }
+- if (m & 0x10) { dst[3] = src[3]; }
+- if (m & 0x08) { dst[4] = src[4]; }
+- if (m & 0x04) { dst[5] = src[5]; }
+- if (m & 0x02) { dst[6] = src[6]; }
+- if (m & 0x01) { dst[7] = src[7]; }
+- src += 8;
+- dst += 8;
+- }
+- count &= 7;
+- if (count > 0) {
+- int m = *mask;
+- do {
+- if (m & 0x80) { dst[0] = SkPMSrcOver(src[0], dst[0]); }
+- m <<= 1;
+- src += 1;
+- dst += 1;
+- } while (--count > 0);
+- }
+-}
+-
+ static void A8_RowProc_Blend(SkPMColor* SK_RESTRICT dst,
+ const uint8_t* SK_RESTRICT mask,
+ const SkPMColor* SK_RESTRICT src, int count) {
+ for (int i = 0; i < count; ++i) {
+ if (mask[i]) {
+ dst[i] = SkBlendARGB32(src[i], dst[i], mask[i]);
+ }
+ }
+ }
+
+ // expand the steps that SkAlphaMulQ performs, but this way we can
+-// exand.. add.. combine
++// expand.. add.. combine
+ // instead of
+ // expand..combine add expand..combine
+ //
+ #define EXPAND0(v, m, s) ((v) & (m)) * (s)
+ #define EXPAND1(v, m, s) (((v) >> 8) & (m)) * (s)
+ #define COMBINE(e0, e1, m) ((((e0) >> 8) & (m)) | ((e1) & ~(m)))
+
+ static void A8_RowProc_Opaque(SkPMColor* SK_RESTRICT dst,
+ const uint8_t* SK_RESTRICT mask,
+ const SkPMColor* SK_RESTRICT src, int count) {
+- const uint32_t rbmask = gMask_00FF00FF;
+ for (int i = 0; i < count; ++i) {
+ int m = mask[i];
+ if (m) {
+ m += (m >> 7);
+ #if 1
+ // this is slightly slower than the expand/combine version, but it
+ // is much closer to the old results, so we use it for now to reduce
+ // rebaselining.
+ dst[i] = SkAlphaMulQ(src[i], m) + SkAlphaMulQ(dst[i], 256 - m);
+ #else
++ const uint32_t rbmask = gMask_00FF00FF;
+ uint32_t v = src[i];
+ uint32_t s0 = EXPAND0(v, rbmask, m);
+ uint32_t s1 = EXPAND1(v, rbmask, m);
+ v = dst[i];
+ uint32_t d0 = EXPAND0(v, rbmask, m);
+ uint32_t d1 = EXPAND1(v, rbmask, m);
+ dst[i] = COMBINE(s0 + d0, s1 + d1, rbmask);
+ #endif
+@@ -559,17 +501,17 @@ SkBlitMask::RowProc SkBlitMask::RowFacto
+ // make this opt-in until chrome can rebaseline
+ RowProc proc = PlatformRowProcs(config, format, flags);
+ if (proc) {
+ return proc;
+ }
+
+ static const RowProc gProcs[] = {
+ // need X coordinate to handle BW
+- NULL, NULL, //(RowProc)BW_RowProc_Blend, (RowProc)BW_RowProc_Opaque,
++ NULL, NULL,
+ (RowProc)A8_RowProc_Blend, (RowProc)A8_RowProc_Opaque,
+ (RowProc)LCD16_RowProc_Blend, (RowProc)LCD16_RowProc_Opaque,
+ (RowProc)LCD32_RowProc_Blend, (RowProc)LCD32_RowProc_Opaque,
+ };
+
+ int index;
+ switch (config) {
+ case SkBitmap::kARGB_8888_Config:
+diff --git a/gfx/skia/src/core/SkConcaveToTriangles.cpp b/gfx/skia/src/core/SkConcaveToTriangles.cpp
+--- a/gfx/skia/src/core/SkConcaveToTriangles.cpp
++++ b/gfx/skia/src/core/SkConcaveToTriangles.cpp
+@@ -37,17 +37,16 @@
+ #include "SkTDArray.h"
+ #include "SkGeometry.h"
+ #include "SkTSort.h"
+
+ // This is used to prevent runaway code bugs, and can probably be removed after
+ // the code has been proven robust.
+ #define kMaxCount 1000
+
+-#define DEBUG
+ #ifdef DEBUG
+ //------------------------------------------------------------------------------
+ // Debugging support
+ //------------------------------------------------------------------------------
+
+ #include <cstdio>
+ #include <stdarg.h>
+
+diff --git a/gfx/skia/src/core/SkPath.cpp b/gfx/skia/src/core/SkPath.cpp
+--- a/gfx/skia/src/core/SkPath.cpp
++++ b/gfx/skia/src/core/SkPath.cpp
+@@ -469,17 +469,16 @@ void SkPath::incReserve(U16CPU inc) {
+ fPts.setReserve(fPts.count() + inc);
+
+ SkDEBUGCODE(this->validate();)
+ }
+
+ void SkPath::moveTo(SkScalar x, SkScalar y) {
+ SkDEBUGCODE(this->validate();)
+
+- int vc = fVerbs.count();
+ SkPoint* pt;
+
+ // remember our index
+ fLastMoveToIndex = fPts.count();
+
+ pt = fPts.append();
+ *fVerbs.append() = kMove_Verb;
+ pt->set(x, y);
+@@ -1163,17 +1162,16 @@ void SkPath::reversePathTo(const SkPath&
+ }
+ pts -= gPtsInVerb[verbs[i]];
+ }
+ }
+
+ void SkPath::reverseAddPath(const SkPath& src) {
+ this->incReserve(src.fPts.count());
+
+- const SkPoint* startPts = src.fPts.begin();
+ const SkPoint* pts = src.fPts.end();
+ const uint8_t* startVerbs = src.fVerbs.begin();
+ const uint8_t* verbs = src.fVerbs.end();
+
+ fIsOval = false;
+
+ bool needMove = true;
+ bool needClose = false;
+diff --git a/gfx/skia/src/core/SkRegion.cpp b/gfx/skia/src/core/SkRegion.cpp
+--- a/gfx/skia/src/core/SkRegion.cpp
++++ b/gfx/skia/src/core/SkRegion.cpp
+@@ -920,20 +920,16 @@ static int operate(const SkRegion::RunTy
+ /* Given count RunTypes in a complex region, return the worst case number of
+ logical intervals that represents (i.e. number of rects that would be
+ returned from the iterator).
+
+ We could just return count/2, since there must be at least 2 values per
+ interval, but we can first trim off the const overhead of the initial TOP
+ value, plus the final BOTTOM + 2 sentinels.
+ */
+-static int count_to_intervals(int count) {
+- SkASSERT(count >= 6); // a single rect is 6 values
+- return (count - 4) >> 1;
+-}
+
+ /* Given a number of intervals, what is the worst case representation of that
+ many intervals?
+
+ Worst case (from a storage perspective), is a vertical stack of single
+ intervals: TOP + N * (BOTTOM INTERVALCOUNT LEFT RIGHT SENTINEL) + SENTINEL
+ */
+ static int intervals_to_count(int intervals) {
+diff --git a/gfx/skia/src/core/SkScalerContext.cpp b/gfx/skia/src/core/SkScalerContext.cpp
+--- a/gfx/skia/src/core/SkScalerContext.cpp
++++ b/gfx/skia/src/core/SkScalerContext.cpp
+@@ -336,44 +336,16 @@ SK_ERROR:
+ glyph->fTop = 0;
+ glyph->fWidth = 0;
+ glyph->fHeight = 0;
+ // put a valid value here, in case it was earlier set to
+ // MASK_FORMAT_JUST_ADVANCE
+ glyph->fMaskFormat = fRec.fMaskFormat;
+ }
+
+-static bool isLCD(const SkScalerContext::Rec& rec) {
+- return SkMask::kLCD16_Format == rec.fMaskFormat ||
+- SkMask::kLCD32_Format == rec.fMaskFormat;
+-}
+-
+-static uint16_t a8_to_rgb565(unsigned a8) {
+- return SkPackRGB16(a8 >> 3, a8 >> 2, a8 >> 3);
+-}
+-
+-static void copyToLCD16(const SkBitmap& src, const SkMask& dst) {
+- SkASSERT(SkBitmap::kA8_Config == src.config());
+- SkASSERT(SkMask::kLCD16_Format == dst.fFormat);
+-
+- const int width = dst.fBounds.width();
+- const int height = dst.fBounds.height();
+- const uint8_t* srcP = src.getAddr8(0, 0);
+- size_t srcRB = src.rowBytes();
+- uint16_t* dstP = (uint16_t*)dst.fImage;
+- size_t dstRB = dst.fRowBytes;
+- for (int y = 0; y < height; ++y) {
+- for (int x = 0; x < width; ++x) {
+- dstP[x] = a8_to_rgb565(srcP[x]);
+- }
+- srcP += srcRB;
+- dstP = (uint16_t*)((char*)dstP + dstRB);
+- }
+-}
+-
+ #define SK_FREETYPE_LCD_LERP 160
+
+ static int lerp(int start, int end) {
+ SkASSERT((unsigned)SK_FREETYPE_LCD_LERP <= 256);
+ return start + ((end - start) * (SK_FREETYPE_LCD_LERP) >> 8);
+ }
+
+ static uint16_t packLCD16(unsigned r, unsigned g, unsigned b) {
+diff --git a/gfx/skia/src/core/SkScan_AntiPath.cpp b/gfx/skia/src/core/SkScan_AntiPath.cpp
+--- a/gfx/skia/src/core/SkScan_AntiPath.cpp
++++ b/gfx/skia/src/core/SkScan_AntiPath.cpp
+@@ -230,52 +230,16 @@ void SuperBlitter::blitH(int x, int y, i
+ fOffsetX);
+
+ #ifdef SK_DEBUG
+ fRuns.assertValid(y & MASK, (1 << (8 - SHIFT)));
+ fCurrX = x + width;
+ #endif
+ }
+
+-static void set_left_rite_runs(SkAlphaRuns& runs, int ileft, U8CPU leftA,
+- int n, U8CPU riteA) {
+- SkASSERT(leftA <= 0xFF);
+- SkASSERT(riteA <= 0xFF);
+-
+- int16_t* run = runs.fRuns;
+- uint8_t* aa = runs.fAlpha;
+-
+- if (ileft > 0) {
+- run[0] = ileft;
+- aa[0] = 0;
+- run += ileft;
+- aa += ileft;
+- }
+-
+- SkASSERT(leftA < 0xFF);
+- if (leftA > 0) {
+- *run++ = 1;
+- *aa++ = leftA;
+- }
+-
+- if (n > 0) {
+- run[0] = n;
+- aa[0] = 0xFF;
+- run += n;
+- aa += n;
+- }
+-
+- SkASSERT(riteA < 0xFF);
+- if (riteA > 0) {
+- *run++ = 1;
+- *aa++ = riteA;
+- }
+- run[0] = 0;
+-}
+-
+ void SuperBlitter::blitRect(int x, int y, int width, int height) {
+ SkASSERT(width > 0);
+ SkASSERT(height > 0);
+
+ // blit leading rows
+ while ((y & MASK)) {
+ this->blitH(x, y++, width);
+ if (--height <= 0) {
+diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp
+--- a/gfx/skia/src/effects/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/SkGradientShader.cpp
+@@ -865,45 +865,16 @@ bool Linear_Gradient::setContext(const S
+ } while (0)
+
+ namespace {
+
+ typedef void (*LinearShadeProc)(TileProc proc, SkFixed dx, SkFixed fx,
+ SkPMColor* dstC, const SkPMColor* cache,
+ int toggle, int count);
+
+-// This function is deprecated, and will be replaced by
+-// shadeSpan_linear_vertical_lerp() once Chrome has been weaned off of it.
+-void shadeSpan_linear_vertical(TileProc proc, SkFixed dx, SkFixed fx,
+- SkPMColor* SK_RESTRICT dstC,
+- const SkPMColor* SK_RESTRICT cache,
+- int toggle, int count) {
+- if (proc == clamp_tileproc) {
+- // Read out clamp values from beginning/end of the cache. No need to lerp
+- // or dither
+- if (fx < 0) {
+- sk_memset32(dstC, cache[-1], count);
+- return;
+- } else if (fx > 0xFFFF) {
+- sk_memset32(dstC, cache[Gradient_Shader::kCache32Count * 2], count);
+- return;
+- }
+- }
+-
+- // We're a vertical gradient, so no change in a span.
+- // If colors change sharply across the gradient, dithering is
+- // insufficient (it subsamples the color space) and we need to lerp.
+- unsigned fullIndex = proc(fx);
+- unsigned fi = fullIndex >> (16 - Gradient_Shader::kCache32Bits);
+- sk_memset32_dither(dstC,
+- cache[toggle + fi],
+- cache[(toggle ^ Gradient_Shader::kDitherStride32) + fi],
+- count);
+-}
+-
+ // Linear interpolation (lerp) is unnecessary if there are no sharp
+ // discontinuities in the gradient - which must be true if there are
+ // only 2 colors - but it's cheap.
+ void shadeSpan_linear_vertical_lerp(TileProc proc, SkFixed dx, SkFixed fx,
+ SkPMColor* SK_RESTRICT dstC,
+ const SkPMColor* SK_RESTRICT cache,
+ int toggle, int count) {
+ if (proc == clamp_tileproc) {
+@@ -2131,16 +2102,18 @@ protected:
+ buffer.writePoint(fCenter);
+ }
+
+ private:
+ typedef Gradient_Shader INHERITED;
+ const SkPoint fCenter;
+ };
+
++#ifndef SK_SCALAR_IS_FLOAT
++
+ #ifdef COMPUTE_SWEEP_TABLE
+ #define PI 3.14159265
+ static bool gSweepTableReady;
+ static uint8_t gSweepTable[65];
+
+ /* Our table stores precomputed values for atan: [0...1] -> [0..PI/4]
+ We scale the results to [0..32]
+ */
+@@ -2168,20 +2141,23 @@ static const uint8_t gSweepTable[] = {
+ 10, 11, 11, 12, 12, 13, 13, 14, 15, 15, 16, 16, 17, 17, 18, 18,
+ 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 25, 26,
+ 26, 27, 27, 27, 28, 28, 29, 29, 29, 30, 30, 30, 31, 31, 31, 32,
+ 32
+ };
+ static const uint8_t* build_sweep_table() { return gSweepTable; }
+ #endif
+
++#endif
++
+ // divide numer/denom, with a bias of 6bits. Assumes numer <= denom
+ // and denom != 0. Since our table is 6bits big (+1), this is a nice fit.
+ // Same as (but faster than) SkFixedDiv(numer, denom) >> 10
+
++#ifndef SK_SCALAR_IS_FLOAT
+ //unsigned div_64(int numer, int denom);
+ static unsigned div_64(int numer, int denom) {
+ SkASSERT(numer <= denom);
+ SkASSERT(numer > 0);
+ SkASSERT(denom > 0);
+
+ int nbits = SkCLZ(numer);
+ int dbits = SkCLZ(denom);
+@@ -2294,16 +2270,17 @@ static unsigned atan_0_90(SkFixed y, SkF
+ result = 64 - result;
+ // pin to 63
+ result -= result >> 6;
+ }
+
+ SkASSERT(result <= 63);
+ return result;
+ }
++#endif
+
+ // returns angle in a circle [0..2PI) -> [0..255]
+ #ifdef SK_SCALAR_IS_FLOAT
+ static unsigned SkATan2_255(float y, float x) {
+ // static const float g255Over2PI = 255 / (2 * SK_ScalarPI);
+ static const float g255Over2PI = 40.584510488433314f;
+
+ float result = sk_float_atan2(y, x);
+diff --git a/gfx/skia/src/opts/SkBlitRect_opts_SSE2.cpp b/gfx/skia/src/opts/SkBlitRect_opts_SSE2.cpp
+--- a/gfx/skia/src/opts/SkBlitRect_opts_SSE2.cpp
++++ b/gfx/skia/src/opts/SkBlitRect_opts_SSE2.cpp
+@@ -112,17 +112,17 @@ void BlitRect32_OpaqueWide_SSE2(SkPMColo
+ }
+
+ void ColorRect32_SSE2(SkPMColor* destination,
+ int width, int height,
+ size_t rowBytes, uint32_t color) {
+ if (0 == height || 0 == width || 0 == color) {
+ return;
+ }
+- unsigned colorA = SkGetPackedA32(color);
++ //unsigned colorA = SkGetPackedA32(color);
+ //if (255 == colorA) {
+ //if (width < 31) {
+ //BlitRect32_OpaqueNarrow_SSE2(destination, width, height,
+ //rowBytes, color);
+ //} else {
+ //BlitRect32_OpaqueWide_SSE2(destination, width, height,
+ //rowBytes, color);
+ //}
+diff --git a/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp b/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp
+--- a/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp
++++ b/gfx/skia/src/ports/SkFontHost_mac_coretext.cpp
+@@ -75,20 +75,16 @@ static CGFloat CGRectGetMinY_inline(cons
+ static CGFloat CGRectGetMaxY_inline(const CGRect& rect) {
+ return rect.origin.y + rect.size.height;
+ }
+
+ static CGFloat CGRectGetWidth_inline(const CGRect& rect) {
+ return rect.size.width;
+ }
+
+-static CGFloat CGRectGetHeight(const CGRect& rect) {
+- return rect.size.height;
+-}
+-
+ ///////////////////////////////////////////////////////////////////////////////
+
+ static void sk_memset_rect32(uint32_t* ptr, uint32_t value, size_t width,
+ size_t height, size_t rowBytes) {
+ SkASSERT(width);
+ SkASSERT(width * sizeof(uint32_t) <= rowBytes);
+
+ if (width >= 32) {
+@@ -125,28 +121,30 @@ static void sk_memset_rect32(uint32_t* p
+ *ptr++ = value;
+ } while (--w > 0);
+ ptr = (uint32_t*)((char*)ptr + rowBytes);
+ height -= 1;
+ }
+ }
+ }
+
++#if 0
+ // Potentially this should be made (1) public (2) optimized when width is small.
+ // Also might want 16 and 32 bit version
+ //
+ static void sk_memset_rect(void* ptr, U8CPU byte, size_t width, size_t height,
+ size_t rowBytes) {
+ uint8_t* dst = (uint8_t*)ptr;
+ while (height) {
+ memset(dst, byte, width);
+ dst += rowBytes;
+ height -= 1;
+ }
+ }
++#endif
+
+ #include <sys/utsname.h>
+
+ typedef uint32_t CGRGBPixel;
+
+ static unsigned CGRGBPixel_getAlpha(CGRGBPixel pixel) {
+ return pixel & 0xFF;
+ }
+@@ -250,23 +248,16 @@ static CGAffineTransform MatrixToCGAffin
+ return CGAffineTransformMake(ScalarToCG(matrix[SkMatrix::kMScaleX]) * sx,
+ -ScalarToCG(matrix[SkMatrix::kMSkewY]) * sy,
+ -ScalarToCG(matrix[SkMatrix::kMSkewX]) * sx,
+ ScalarToCG(matrix[SkMatrix::kMScaleY]) * sy,
+ ScalarToCG(matrix[SkMatrix::kMTransX]) * sx,
+ ScalarToCG(matrix[SkMatrix::kMTransY]) * sy);
+ }
+
+-static void CGAffineTransformToMatrix(const CGAffineTransform& xform, SkMatrix* matrix) {
+- matrix->setAll(
+- CGToScalar(xform.a), CGToScalar(xform.c), CGToScalar(xform.tx),
+- CGToScalar(xform.b), CGToScalar(xform.d), CGToScalar(xform.ty),
+- 0, 0, SK_Scalar1);
+-}
+-
+ static SkScalar getFontScale(CGFontRef cgFont) {
+ int unitsPerEm = CGFontGetUnitsPerEm(cgFont);
+ return SkScalarInvert(SkIntToScalar(unitsPerEm));
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ #define BITMAP_INFO_RGB (kCGImageAlphaNoneSkipFirst | kCGBitmapByteOrder32Host)
+@@ -1075,16 +1066,17 @@ static const uint8_t* getInverseTable(bo
+ if (!gInited) {
+ build_power_table(gWhiteTable, 1.5f);
+ build_power_table(gTable, 2.2f);
+ gInited = true;
+ }
+ return isWhite ? gWhiteTable : gTable;
+ }
+
++#ifdef SK_USE_COLOR_LUMINANCE
+ static const uint8_t* getGammaTable(U8CPU luminance) {
+ static uint8_t gGammaTables[4][256];
+ static bool gInited;
+ if (!gInited) {
+ #if 1
+ float start = 1.1;
+ float stop = 2.1;
+ for (int i = 0; i < 4; ++i) {
+@@ -1097,45 +1089,49 @@ static const uint8_t* getGammaTable(U8CP
+ build_power_table(gGammaTables[2], 1);
+ build_power_table(gGammaTables[3], 1);
+ #endif
+ gInited = true;
+ }
+ SkASSERT(0 == (luminance >> 8));
+ return gGammaTables[luminance >> 6];
+ }
++#endif
+
++#ifndef SK_USE_COLOR_LUMINANCE
+ static void invertGammaMask(bool isWhite, CGRGBPixel rgb[], int width,
+ int height, size_t rb) {
+ const uint8_t* table = getInverseTable(isWhite);
+ for (int y = 0; y < height; ++y) {
+ for (int x = 0; x < width; ++x) {
+ uint32_t c = rgb[x];
+ int r = (c >> 16) & 0xFF;
+ int g = (c >> 8) & 0xFF;
+ int b = (c >> 0) & 0xFF;
+ rgb[x] = (table[r] << 16) | (table[g] << 8) | table[b];
+ }
+ rgb = (CGRGBPixel*)((char*)rgb + rb);
+ }
+ }
++#endif
+
+ static void cgpixels_to_bits(uint8_t dst[], const CGRGBPixel src[], int count) {
+ while (count > 0) {
+ uint8_t mask = 0;
+ for (int i = 7; i >= 0; --i) {
+ mask |= (CGRGBPixel_getAlpha(*src++) >> 7) << i;
+ if (0 == --count) {
+ break;
+ }
+ }
+ *dst++ = mask;
+ }
+ }
+
++#ifdef SK_USE_COLOR_LUMINANCE
+ static int lerpScale(int dst, int src, int scale) {
+ return dst + (scale * (src - dst) >> 23);
+ }
+
+ static CGRGBPixel lerpPixel(CGRGBPixel dst, CGRGBPixel src,
+ int scaleR, int scaleG, int scaleB) {
+ int sr = (src >> 16) & 0xFF;
+ int sg = (src >> 8) & 0xFF;
+@@ -1147,37 +1143,31 @@ static CGRGBPixel lerpPixel(CGRGBPixel d
+ int rr = lerpScale(dr, sr, scaleR);
+ int rg = lerpScale(dg, sg, scaleG);
+ int rb = lerpScale(db, sb, scaleB);
+ return (rr << 16) | (rg << 8) | rb;
+ }
+
+ static void lerpPixels(CGRGBPixel dst[], const CGRGBPixel src[], int width,
+ int height, int rowBytes, int lumBits) {
+-#ifdef SK_USE_COLOR_LUMINANCE
+ int scaleR = (1 << 23) * SkColorGetR(lumBits) / 0xFF;
+ int scaleG = (1 << 23) * SkColorGetG(lumBits) / 0xFF;
+ int scaleB = (1 << 23) * SkColorGetB(lumBits) / 0xFF;
+-#else
+- int scale = (1 << 23) * lumBits / SkScalerContext::kLuminance_Max;
+- int scaleR = scale;
+- int scaleG = scale;
+- int scaleB = scale;
+-#endif
+
+ for (int y = 0; y < height; ++y) {
+ for (int x = 0; x < width; ++x) {
+ // bit-not the src, since it was drawn from black, so we need the
+ // compliment of those bits
+ dst[x] = lerpPixel(dst[x], ~src[x], scaleR, scaleG, scaleB);
+ }
+ src = (CGRGBPixel*)((char*)src + rowBytes);
+ dst = (CGRGBPixel*)((char*)dst + rowBytes);
+ }
+ }
++#endif
+
+ #if 1
+ static inline int r32_to_16(int x) { return SkR32ToR16(x); }
+ static inline int g32_to_16(int x) { return SkG32ToG16(x); }
+ static inline int b32_to_16(int x) { return SkB32ToB16(x); }
+ #else
+ static inline int round8to5(int x) {
+ return (x + 3 - (x >> 5) + (x >> 7)) >> 3;
+@@ -1212,22 +1202,21 @@ static inline uint32_t rgb_to_lcd32(CGRG
+ return SkPackARGB32(0xFF, r, g, b);
+ }
+
+ #define BLACK_LUMINANCE_LIMIT 0x40
+ #define WHITE_LUMINANCE_LIMIT 0xA0
+
+ void SkScalerContext_Mac::generateImage(const SkGlyph& glyph) {
+ CGGlyph cgGlyph = (CGGlyph) glyph.getGlyphID(fBaseGlyphCount);
+-
+ const bool isLCD = isLCDFormat(glyph.fMaskFormat);
++#ifdef SK_USE_COLOR_LUMINANCE
+ const bool isBW = SkMask::kBW_Format == glyph.fMaskFormat;
+ const bool isA8 = !isLCD && !isBW;
+-
+-#ifdef SK_USE_COLOR_LUMINANCE
++
+ unsigned lumBits = fRec.getLuminanceColor();
+ uint32_t xorMask = 0;
+
+ if (isA8) {
+ // for A8, we just want a component (they're all the same)
+ lumBits = SkColorGetR(lumBits);
+ }
+ #else
+diff --git a/gfx/skia/src/utils/mac/SkCreateCGImageRef.cpp b/gfx/skia/src/utils/mac/SkCreateCGImageRef.cpp
+--- a/gfx/skia/src/utils/mac/SkCreateCGImageRef.cpp
++++ b/gfx/skia/src/utils/mac/SkCreateCGImageRef.cpp
+@@ -163,59 +163,8 @@ private:
+ CGPDFDocumentRef fDoc;
+ };
+
+ static void CGDataProviderReleaseData_FromMalloc(void*, const void* data,
+ size_t size) {
+ sk_free((void*)data);
+ }
+
+-bool SkPDFDocumentToBitmap(SkStream* stream, SkBitmap* output) {
+- size_t size = stream->getLength();
+- void* ptr = sk_malloc_throw(size);
+- stream->read(ptr, size);
+- CGDataProviderRef data = CGDataProviderCreateWithData(NULL, ptr, size,
+- CGDataProviderReleaseData_FromMalloc);
+- if (NULL == data) {
+- return false;
+- }
+-
+- CGPDFDocumentRef pdf = CGPDFDocumentCreateWithProvider(data);
+- CGDataProviderRelease(data);
+- if (NULL == pdf) {
+- return false;
+- }
+- SkAutoPDFRelease releaseMe(pdf);
+-
+- CGPDFPageRef page = CGPDFDocumentGetPage(pdf, 1);
+- if (NULL == page) {
+- return false;
+- }
+-
+- CGRect bounds = CGPDFPageGetBoxRect(page, kCGPDFMediaBox);
+-
+- int w = (int)CGRectGetWidth(bounds);
+- int h = (int)CGRectGetHeight(bounds);
+-
+- SkBitmap bitmap;
+- bitmap.setConfig(SkBitmap::kARGB_8888_Config, w, h);
+- bitmap.allocPixels();
+- bitmap.eraseColor(SK_ColorWHITE);
+-
+- size_t bitsPerComponent;
+- CGBitmapInfo info;
+- getBitmapInfo(bitmap, &bitsPerComponent, &info, NULL);
+-
+- CGColorSpaceRef cs = CGColorSpaceCreateDeviceRGB();
+- CGContextRef ctx = CGBitmapContextCreate(bitmap.getPixels(), w, h,
+- bitsPerComponent, bitmap.rowBytes(),
+- cs, info);
+- CGColorSpaceRelease(cs);
+-
+- if (ctx) {
+- CGContextDrawPDFPage(ctx, page);
+- CGContextRelease(ctx);
+- }
+-
+- output->swap(bitmap);
+- return true;
+-}
+-
diff --git a/gfx/skia/patches/archive/0016-Bug-718849-Radial-gradients.patch b/gfx/skia/patches/archive/0016-Bug-718849-Radial-gradients.patch
new file mode 100644
index 0000000000..e00fd8602e
--- /dev/null
+++ b/gfx/skia/patches/archive/0016-Bug-718849-Radial-gradients.patch
@@ -0,0 +1,400 @@
+# HG changeset patch
+# User Matt Woodrow <mwoodrow@mozilla.com>
+# Date 1339988782 -43200
+# Node ID 1e9dae659ee6c992f719fd4136efbcc5410ded37
+# Parent 946750f6d95febd199fb7b748e9d2c48fd01c8a6
+[mq]: skia-windows-gradients
+
+diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp
+--- a/gfx/skia/src/effects/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/SkGradientShader.cpp
+@@ -847,16 +847,19 @@ bool Linear_Gradient::setContext(const S
+ fFlags |= SkShader::kConstInY32_Flag;
+ if ((fFlags & SkShader::kHasSpan16_Flag) && !paint.isDither()) {
+ // only claim this if we do have a 16bit mode (i.e. none of our
+ // colors have alpha), and if we are not dithering (which obviously
+ // is not const in Y).
+ fFlags |= SkShader::kConstInY16_Flag;
+ }
+ }
++ if (fStart == fEnd) {
++ fFlags &= ~kOpaqueAlpha_Flag;
++ }
+ return true;
+ }
+
+ #define NO_CHECK_ITER \
+ do { \
+ unsigned fi = fx >> Gradient_Shader::kCache32Shift; \
+ SkASSERT(fi <= 0xFF); \
+ fx += dx; \
+@@ -976,16 +979,21 @@ void Linear_Gradient::shadeSpan(int x, i
+ TileProc proc = fTileProc;
+ const SkPMColor* SK_RESTRICT cache = this->getCache32();
+ #ifdef USE_DITHER_32BIT_GRADIENT
+ int toggle = ((x ^ y) & 1) * kDitherStride32;
+ #else
+ int toggle = 0;
+ #endif
+
++ if (fStart == fEnd) {
++ sk_bzero(dstC, count * sizeof(*dstC));
++ return;
++ }
++
+ if (fDstToIndexClass != kPerspective_MatrixClass) {
+ dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
+ SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
+ SkFixed dx, fx = SkScalarToFixed(srcPt.fX);
+
+ if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
+ SkFixed dxStorage[1];
+ (void)fDstToIndex.fixedStepInX(SkIntToScalar(y), dxStorage, NULL);
+@@ -1169,16 +1177,21 @@ void Linear_Gradient::shadeSpan16(int x,
+ SkASSERT(count > 0);
+
+ SkPoint srcPt;
+ SkMatrix::MapXYProc dstProc = fDstToIndexProc;
+ TileProc proc = fTileProc;
+ const uint16_t* SK_RESTRICT cache = this->getCache16();
+ int toggle = ((x ^ y) & 1) * kDitherStride16;
+
++ if (fStart == fEnd) {
++ sk_bzero(dstC, count * sizeof(*dstC));
++ return;
++ }
++
+ if (fDstToIndexClass != kPerspective_MatrixClass) {
+ dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
+ SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
+ SkFixed dx, fx = SkScalarToFixed(srcPt.fX);
+
+ if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
+ SkFixed dxStorage[1];
+ (void)fDstToIndex.fixedStepInX(SkIntToScalar(y), dxStorage, NULL);
+@@ -1739,21 +1752,25 @@ void Radial_Gradient::shadeSpan(int x, i
+ possible circles on which the point may fall. Solving for t yields
+ the gradient value to use.
+
+ If a<0, the start circle is entirely contained in the
+ end circle, and one of the roots will be <0 or >1 (off the line
+ segment). If a>0, the start circle falls at least partially
+ outside the end circle (or vice versa), and the gradient
+ defines a "tube" where a point may be on one circle (on the
+- inside of the tube) or the other (outside of the tube). We choose
+- one arbitrarily.
++ inside of the tube) or the other (outside of the tube). We choose
++ the one with the highest t value, as long as the radius that it
++ corresponds to is >=0. In the case where neither root has a positive
++ radius, we don't draw anything.
+
++ XXXmattwoodrow: I've removed this for now since it breaks
++ down when Dr == 0. Is there something else we can do instead?
+ In order to keep the math to within the limits of fixed point,
+- we divide the entire quadratic by Dr^2, and replace
++ we divide the entire quadratic by Dr, and replace
+ (x - Sx)/Dr with x' and (y - Sy)/Dr with y', giving
+
+ [Dx^2 / Dr^2 + Dy^2 / Dr^2 - 1)] * t^2
+ + 2 * [x' * Dx / Dr + y' * Dy / Dr - Sr / Dr] * t
+ + [x'^2 + y'^2 - Sr^2/Dr^2] = 0
+
+ (x' and y' are computed by appending the subtract and scale to the
+ fDstToIndex matrix in the constructor).
+@@ -1763,99 +1780,122 @@ void Radial_Gradient::shadeSpan(int x, i
+ x' and y', if x and y are linear in the span, 'B' can be computed
+ incrementally with a simple delta (db below). If it is not (e.g.,
+ a perspective projection), it must be computed in the loop.
+
+ */
+
+ namespace {
+
+-inline SkFixed two_point_radial(SkScalar b, SkScalar fx, SkScalar fy,
+- SkScalar sr2d2, SkScalar foura,
+- SkScalar oneOverTwoA, bool posRoot) {
++inline bool two_point_radial(SkScalar b, SkScalar fx, SkScalar fy,
++ SkScalar sr2d2, SkScalar foura,
++ SkScalar oneOverTwoA, SkScalar diffRadius,
++ SkScalar startRadius, SkFixed& t) {
+ SkScalar c = SkScalarSquare(fx) + SkScalarSquare(fy) - sr2d2;
+ if (0 == foura) {
+- return SkScalarToFixed(SkScalarDiv(-c, b));
++ SkScalar result = SkScalarDiv(-c, b);
++ if (result * diffRadius + startRadius >= 0) {
++ t = SkScalarToFixed(result);
++ return true;
++ }
++ return false;
+ }
+
+ SkScalar discrim = SkScalarSquare(b) - SkScalarMul(foura, c);
+ if (discrim < 0) {
+- discrim = -discrim;
++ return false;
+ }
+ SkScalar rootDiscrim = SkScalarSqrt(discrim);
+- SkScalar result;
+- if (posRoot) {
+- result = SkScalarMul(-b + rootDiscrim, oneOverTwoA);
+- } else {
+- result = SkScalarMul(-b - rootDiscrim, oneOverTwoA);
++
++ // Make sure the results corresponds to a positive radius.
++ SkScalar result = SkScalarMul(-b + rootDiscrim, oneOverTwoA);
++ if (result * diffRadius + startRadius >= 0) {
++ t = SkScalarToFixed(result);
++ return true;
+ }
+- return SkScalarToFixed(result);
++ result = SkScalarMul(-b - rootDiscrim, oneOverTwoA);
++ if (result * diffRadius + startRadius >= 0) {
++ t = SkScalarToFixed(result);
++ return true;
++ }
++
++ return false;
+ }
+
+ typedef void (* TwoPointRadialShadeProc)(SkScalar fx, SkScalar dx,
+ SkScalar fy, SkScalar dy,
+ SkScalar b, SkScalar db,
+- SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA, bool posRoot,
++ SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA,
++ SkScalar fDiffRadius, SkScalar fRadius1,
+ SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
+ int count);
+
+ void shadeSpan_twopoint_clamp(SkScalar fx, SkScalar dx,
+ SkScalar fy, SkScalar dy,
+ SkScalar b, SkScalar db,
+- SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA, bool posRoot,
++ SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA,
++ SkScalar fDiffRadius, SkScalar fRadius1,
+ SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
+ int count) {
+ for (; count > 0; --count) {
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
+- fOneOverTwoA, posRoot);
+-
+- if (t < 0) {
++ SkFixed t;
++ if (!two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, fDiffRadius, fRadius1, t)) {
++ *(dstC++) = 0;
++ } else if (t < 0) {
+ *dstC++ = cache[-1];
+ } else if (t > 0xFFFF) {
+ *dstC++ = cache[Gradient_Shader::kCache32Count * 2];
+ } else {
+ SkASSERT(t <= 0xFFFF);
+ *dstC++ = cache[t >> Gradient_Shader::kCache32Shift];
+ }
+
+ fx += dx;
+ fy += dy;
+ b += db;
+ }
+ }
+ void shadeSpan_twopoint_mirror(SkScalar fx, SkScalar dx,
+ SkScalar fy, SkScalar dy,
+ SkScalar b, SkScalar db,
+- SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA, bool posRoot,
++ SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA,
++ SkScalar fDiffRadius, SkScalar fRadius1,
+ SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
+ int count) {
+ for (; count > 0; --count) {
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
+- fOneOverTwoA, posRoot);
+- SkFixed index = mirror_tileproc(t);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> Gradient_Shader::kCache32Shift];
++ SkFixed t;
++ if (!two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, fDiffRadius, fRadius1, t)) {
++ *(dstC++) = 0;
++ } else {
++ SkFixed index = mirror_tileproc(t);
++ SkASSERT(index <= 0xFFFF);
++ *dstC++ = cache[index >> (16 - Gradient_Shader::kCache32Shift)];
++ }
+ fx += dx;
+ fy += dy;
+ b += db;
+ }
+ }
+
+ void shadeSpan_twopoint_repeat(SkScalar fx, SkScalar dx,
+ SkScalar fy, SkScalar dy,
+ SkScalar b, SkScalar db,
+- SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA, bool posRoot,
++ SkScalar fSr2D2, SkScalar foura, SkScalar fOneOverTwoA,
++ SkScalar fDiffRadius, SkScalar fRadius1,
+ SkPMColor* SK_RESTRICT dstC, const SkPMColor* SK_RESTRICT cache,
+ int count) {
+ for (; count > 0; --count) {
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
+- fOneOverTwoA, posRoot);
+- SkFixed index = repeat_tileproc(t);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> Gradient_Shader::kCache32Shift];
++ SkFixed t;
++ if (!two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, fDiffRadius, fRadius1, t)) {
++ *(dstC++) = 0;
++ } else {
++ SkFixed index = repeat_tileproc(t);
++ SkASSERT(index <= 0xFFFF);
++ *dstC++ = cache[index >> (16 - Gradient_Shader::kCache32Shift)];
++ }
+ fx += dx;
+ fy += dy;
+ b += db;
+ }
+ }
+
+
+
+@@ -1935,17 +1975,16 @@ public:
+ sk_bzero(dstC, count * sizeof(*dstC));
+ return;
+ }
+ SkMatrix::MapXYProc dstProc = fDstToIndexProc;
+ TileProc proc = fTileProc;
+ const SkPMColor* SK_RESTRICT cache = this->getCache32();
+
+ SkScalar foura = fA * 4;
+- bool posRoot = fDiffRadius < 0;
+ if (fDstToIndexClass != kPerspective_MatrixClass) {
+ SkPoint srcPt;
+ dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
+ SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
+ SkScalar dx, fx = srcPt.fX;
+ SkScalar dy, fy = srcPt.fY;
+
+ if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
+@@ -1954,60 +1993,69 @@ public:
+ dx = SkFixedToScalar(fixedX);
+ dy = SkFixedToScalar(fixedY);
+ } else {
+ SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
+ dx = fDstToIndex.getScaleX();
+ dy = fDstToIndex.getSkewY();
+ }
+ SkScalar b = (SkScalarMul(fDiff.fX, fx) +
+- SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
++ SkScalarMul(fDiff.fY, fy) - fStartRadius * fDiffRadius) * 2;
+ SkScalar db = (SkScalarMul(fDiff.fX, dx) +
+ SkScalarMul(fDiff.fY, dy)) * 2;
+
+ TwoPointRadialShadeProc shadeProc = shadeSpan_twopoint_repeat;
+ if (proc == clamp_tileproc) {
+ shadeProc = shadeSpan_twopoint_clamp;
+ } else if (proc == mirror_tileproc) {
+ shadeProc = shadeSpan_twopoint_mirror;
+ } else {
+ SkASSERT(proc == repeat_tileproc);
+ }
+ (*shadeProc)(fx, dx, fy, dy, b, db,
+- fSr2D2, foura, fOneOverTwoA, posRoot,
++ fSr2D2, foura, fOneOverTwoA, fDiffRadius, fRadius1,
+ dstC, cache, count);
+ } else { // perspective case
+ SkScalar dstX = SkIntToScalar(x);
+ SkScalar dstY = SkIntToScalar(y);
+ for (; count > 0; --count) {
+ SkPoint srcPt;
+ dstProc(fDstToIndex, dstX, dstY, &srcPt);
+ SkScalar fx = srcPt.fX;
+ SkScalar fy = srcPt.fY;
+ SkScalar b = (SkScalarMul(fDiff.fX, fx) +
+ SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura,
+- fOneOverTwoA, posRoot);
+- SkFixed index = proc(t);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> Gradient_Shader::kCache32Shift];
++ SkFixed t;
++ if (!two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, fDiffRadius, fRadius1, t)) {
++ *(dstC++) = 0;
++ } else {
++ SkFixed index = proc(t);
++ SkASSERT(index <= 0xFFFF);
++ *dstC++ = cache[index >> (16 - kCache32Bits)];
++ }
+ dstX += SK_Scalar1;
+ }
+ }
+ }
+
+ virtual bool setContext(const SkBitmap& device,
+ const SkPaint& paint,
+ const SkMatrix& matrix) SK_OVERRIDE {
+ if (!this->INHERITED::setContext(device, paint, matrix)) {
+ return false;
+ }
+
+ // we don't have a span16 proc
+ fFlags &= ~kHasSpan16_Flag;
++
++ // If we might end up wanting to draw nothing as part of the gradient
++ // then we should mark ourselves as not being opaque.
++ if (fA >= 0 || (fDiffRadius == 0 && fCenter1 == fCenter2)) {
++ fFlags &= ~kOpaqueAlpha_Flag;
++ }
+ return true;
+ }
+
+ SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(Two_Point_Radial_Gradient)
+
+ protected:
+ Two_Point_Radial_Gradient(SkFlattenableReadBuffer& buffer)
+ : INHERITED(buffer),
+@@ -2033,26 +2081,22 @@ private:
+ const SkScalar fRadius1;
+ const SkScalar fRadius2;
+ SkPoint fDiff;
+ SkScalar fStartRadius, fDiffRadius, fSr2D2, fA, fOneOverTwoA;
+
+ void init() {
+ fDiff = fCenter1 - fCenter2;
+ fDiffRadius = fRadius2 - fRadius1;
+- SkScalar inv = SkScalarInvert(fDiffRadius);
+- fDiff.fX = SkScalarMul(fDiff.fX, inv);
+- fDiff.fY = SkScalarMul(fDiff.fY, inv);
+- fStartRadius = SkScalarMul(fRadius1, inv);
++ fStartRadius = fRadius1;
+ fSr2D2 = SkScalarSquare(fStartRadius);
+- fA = SkScalarSquare(fDiff.fX) + SkScalarSquare(fDiff.fY) - SK_Scalar1;
++ fA = SkScalarSquare(fDiff.fX) + SkScalarSquare(fDiff.fY) - SkScalarSquare(fDiffRadius);
+ fOneOverTwoA = fA ? SkScalarInvert(fA * 2) : 0;
+
+ fPtsToUnit.setTranslate(-fCenter1.fX, -fCenter1.fY);
+- fPtsToUnit.postScale(inv, inv);
+ }
+ };
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ class Sweep_Gradient : public Gradient_Shader {
+ public:
+ Sweep_Gradient(SkScalar cx, SkScalar cy, const SkColor colors[],
+@@ -2488,16 +2532,20 @@ SkShader* SkGradientShader::CreateTwoPoi
+ int colorCount,
+ SkShader::TileMode mode,
+ SkUnitMapper* mapper) {
+ if (startRadius < 0 || endRadius < 0 || NULL == colors || colorCount < 1) {
+ return NULL;
+ }
+ EXPAND_1_COLOR(colorCount);
+
++ if (start == end && startRadius == 0) {
++ return CreateRadial(start, endRadius, colors, pos, colorCount, mode, mapper);
++ }
++
+ return SkNEW_ARGS(Two_Point_Radial_Gradient,
+ (start, startRadius, end, endRadius, colors, pos,
+ colorCount, mode, mapper));
+ }
+
+ SkShader* SkGradientShader::CreateSweep(SkScalar cx, SkScalar cy,
+ const SkColor colors[],
+ const SkScalar pos[],
diff --git a/gfx/skia/patches/archive/0017-Bug-740194-SkMemory-mozalloc.patch b/gfx/skia/patches/archive/0017-Bug-740194-SkMemory-mozalloc.patch
new file mode 100644
index 0000000000..719fda1650
--- /dev/null
+++ b/gfx/skia/patches/archive/0017-Bug-740194-SkMemory-mozalloc.patch
@@ -0,0 +1,73 @@
+commit 5786f516119bcb677510f3c9256b870c3b5616c8
+Author: George Wright <gwright@mozilla.com>
+Date: Wed Aug 15 23:51:34 2012 -0400
+
+ Bug 740194 - [Skia] Implement a version of SkMemory for Mozilla that uses the infallible mozalloc allocators r=cjones
+
+diff --git a/gfx/skia/include/config/SkUserConfig.h b/gfx/skia/include/config/SkUserConfig.h
+index f98ba85..17be191 100644
+--- a/gfx/skia/include/config/SkUserConfig.h
++++ b/gfx/skia/include/config/SkUserConfig.h
+@@ -35,6 +35,16 @@
+ commented out, so including it will have no effect.
+ */
+
++/*
++ Override new/delete with Mozilla's allocator, mozalloc
++
++ Ideally we shouldn't need to do this here, but until
++ http://code.google.com/p/skia/issues/detail?id=598 is fixed
++ we need to include this here to override operator new and delete
++*/
++
++#include "mozilla/mozalloc.h"
++
+ ///////////////////////////////////////////////////////////////////////////////
+
+ /* Scalars (the fractional value type in skia) can be implemented either as
+diff --git a/gfx/skia/src/ports/SkMemory_mozalloc.cpp b/gfx/skia/src/ports/SkMemory_mozalloc.cpp
+new file mode 100644
+index 0000000..1f16ee5
+--- /dev/null
++++ b/gfx/skia/src/ports/SkMemory_mozalloc.cpp
+@@ -0,0 +1,40 @@
++/*
++ * Copyright 2011 Google Inc.
++ * Copyright 2012 Mozilla Foundation
++ *
++ * Use of this source code is governed by a BSD-style license that can be
++ * found in the LICENSE file.
++ */
++
++#include "SkTypes.h"
++
++#include "mozilla/mozalloc.h"
++#include "mozilla/mozalloc_abort.h"
++#include "mozilla/mozalloc_oom.h"
++
++void sk_throw() {
++ SkDEBUGFAIL("sk_throw");
++ mozalloc_abort("Abort from sk_throw");
++}
++
++void sk_out_of_memory(void) {
++ SkDEBUGFAIL("sk_out_of_memory");
++ mozalloc_handle_oom(0);
++}
++
++void* sk_malloc_throw(size_t size) {
++ return sk_malloc_flags(size, SK_MALLOC_THROW);
++}
++
++void* sk_realloc_throw(void* addr, size_t size) {
++ return moz_xrealloc(addr, size);
++}
++
++void sk_free(void* p) {
++ free(p);
++}
++
++void* sk_malloc_flags(size_t size, unsigned flags) {
++ return (flags & SK_MALLOC_THROW) ? moz_xmalloc(size) : malloc(size);
++}
++
diff --git a/gfx/skia/patches/archive/0018-Bug-817356-PPC-defines.patch b/gfx/skia/patches/archive/0018-Bug-817356-PPC-defines.patch
new file mode 100644
index 0000000000..d16ec4b3b4
--- /dev/null
+++ b/gfx/skia/patches/archive/0018-Bug-817356-PPC-defines.patch
@@ -0,0 +1,14 @@
+Index: gfx/skia/include/core/SkPreConfig.h
+===================================================================
+--- gfx/skia/include/core/SkPreConfig.h (revision 6724)
++++ gfx/skia/include/core/SkPreConfig.h (working copy)
+@@ -94,7 +94,8 @@
+ //////////////////////////////////////////////////////////////////////
+
+ #if !defined(SK_CPU_BENDIAN) && !defined(SK_CPU_LENDIAN)
+- #if defined (__ppc__) || defined(__ppc64__)
++ #if defined (__ppc__) || defined(__PPC__) || defined(__ppc64__) \
++ || defined(__PPC64__)
+ #define SK_CPU_BENDIAN
+ #else
+ #define SK_CPU_LENDIAN
diff --git a/gfx/skia/patches/archive/0022-Bug-848491-Re-apply-bug-795538-Ensure-we-use-the-cor.patch b/gfx/skia/patches/archive/0022-Bug-848491-Re-apply-bug-795538-Ensure-we-use-the-cor.patch
new file mode 100644
index 0000000000..97404c431b
--- /dev/null
+++ b/gfx/skia/patches/archive/0022-Bug-848491-Re-apply-bug-795538-Ensure-we-use-the-cor.patch
@@ -0,0 +1,39 @@
+From: George Wright <gwright@mozilla.com>
+Date: Thu, 20 Jun 2013 09:21:21 -0400
+Subject: Bug 848491 - Re-apply bug 795538 - Ensure we use the correct colour (and alpha) for the clamp values r=mattwoodrow
+
+
+diff --git a/gfx/skia/src/effects/gradients/SkGradientShader.cpp b/gfx/skia/src/effects/gradients/SkGradientShader.cpp
+index 27a9c46..ce077b5 100644
+--- a/gfx/skia/src/effects/gradients/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/gradients/SkGradientShader.cpp
+@@ -500,15 +500,17 @@ const SkPMColor* SkGradientShaderBase::getCache32() const {
+ }
+
+ // Write the clamp colours into the first and last entries of fCache32
+- fCache32[kCache32ClampLower] = SkPackARGB32(fCacheAlpha,
+- SkColorGetR(fOrigColors[0]),
+- SkColorGetG(fOrigColors[0]),
+- SkColorGetB(fOrigColors[0]));
+-
+- fCache32[kCache32ClampUpper] = SkPackARGB32(fCacheAlpha,
+- SkColorGetR(fOrigColors[fColorCount - 1]),
+- SkColorGetG(fOrigColors[fColorCount - 1]),
+- SkColorGetB(fOrigColors[fColorCount - 1]));
++ fCache32[kCache32ClampLower] = SkPremultiplyARGBInline(SkMulDiv255Round(SkColorGetA(fOrigColors[0]),
++ fCacheAlpha),
++ SkColorGetR(fOrigColors[0]),
++ SkColorGetG(fOrigColors[0]),
++ SkColorGetB(fOrigColors[0]));
++
++ fCache32[kCache32ClampUpper] = SkPremultiplyARGBInline(SkMulDiv255Round(SkColorGetA(fOrigColors[fColorCount - 1]),
++ fCacheAlpha),
++ SkColorGetR(fOrigColors[fColorCount - 1]),
++ SkColorGetG(fOrigColors[fColorCount - 1]),
++ SkColorGetB(fOrigColors[fColorCount - 1]));
+
+ return fCache32;
+ }
+--
+1.7.11.7
+
diff --git a/gfx/skia/patches/archive/0023-Bug-890539-Fix-SK_COMPILE_ASSERT-build-warning.patch b/gfx/skia/patches/archive/0023-Bug-890539-Fix-SK_COMPILE_ASSERT-build-warning.patch
new file mode 100644
index 0000000000..9bc7ddec46
--- /dev/null
+++ b/gfx/skia/patches/archive/0023-Bug-890539-Fix-SK_COMPILE_ASSERT-build-warning.patch
@@ -0,0 +1,39 @@
+# HG changeset patch
+# Parent e378875000890099fffcdb4cbc4ab12828ac34ee
+# User Daniel Holbert <dholbert@cs.stanford.edu>
+Bug 890539: Annotate SK_COMPILE_ASSERT's typedef as permissibly unused, to fix GCC 4.8 build warning. r=gw280
+
+diff --git a/gfx/skia/include/core/SkTypes.h b/gfx/skia/include/core/SkTypes.h
+--- a/gfx/skia/include/core/SkTypes.h
++++ b/gfx/skia/include/core/SkTypes.h
+@@ -121,18 +121,29 @@ inline void operator delete(void* p) {
+ #define SkDEVCODE(code)
+ #define SK_DEVELOPER_TO_STRING()
+ #endif
+
+ template <bool>
+ struct SkCompileAssert {
+ };
+
++/*
++ * The SK_COMPILE_ASSERT definition creates an otherwise-unused typedef. This
++ * triggers compiler warnings with some versions of gcc, so mark the typedef
++ * as permissibly-unused to disable the warnings.
++ */
++# if defined(__GNUC__)
++# define SK_COMPILE_ASSERT_UNUSED_ATTRIBUTE __attribute__((unused))
++# else
++# define SK_COMPILE_ASSERT_UNUSED_ATTRIBUTE /* nothing */
++# endif
++
+ #define SK_COMPILE_ASSERT(expr, msg) \
+- typedef SkCompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1]
++ typedef SkCompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1] SK_COMPILE_ASSERT_UNUSED_ATTRIBUTE
+
+ /*
+ * Usage: SK_MACRO_CONCAT(a, b) to construct the symbol ab
+ *
+ * SK_MACRO_CONCAT_IMPL_PRIV just exists to make this work. Do not use directly
+ *
+ */
+ #define SK_MACRO_CONCAT(X, Y) SK_MACRO_CONCAT_IMPL_PRIV(X, Y)
diff --git a/gfx/skia/patches/archive/0024-Bug-887318-fix-bgra-readback.patch b/gfx/skia/patches/archive/0024-Bug-887318-fix-bgra-readback.patch
new file mode 100644
index 0000000000..864a0af7a9
--- /dev/null
+++ b/gfx/skia/patches/archive/0024-Bug-887318-fix-bgra-readback.patch
@@ -0,0 +1,217 @@
+diff --git a/gfx/gl/GLContextSkia.cpp b/gfx/gl/GLContextSkia.cpp
+--- a/gfx/gl/GLContextSkia.cpp
++++ b/gfx/gl/GLContextSkia.cpp
+@@ -303,39 +303,47 @@ const GLubyte* glGetString_mozilla(GrGLe
+ if (name == LOCAL_GL_VERSION) {
+ if (sGLContext.get()->IsGLES2()) {
+ return reinterpret_cast<const GLubyte*>("OpenGL ES 2.0");
+ } else {
+ return reinterpret_cast<const GLubyte*>("2.0");
+ }
+ } else if (name == LOCAL_GL_EXTENSIONS) {
+ // Only expose the bare minimum extensions we want to support to ensure a functional Ganesh
+ // as GLContext only exposes certain extensions
+ static bool extensionsStringBuilt = false;
+- static char extensionsString[120];
++ static char extensionsString[256];
+
+ if (!extensionsStringBuilt) {
+ if (sGLContext.get()->IsExtensionSupported(GLContext::EXT_texture_format_BGRA8888)) {
+ strcpy(extensionsString, "GL_EXT_texture_format_BGRA8888 ");
+ }
+
+ if (sGLContext.get()->IsExtensionSupported(GLContext::OES_packed_depth_stencil)) {
+ strcat(extensionsString, "GL_OES_packed_depth_stencil ");
+ }
+
+ if (sGLContext.get()->IsExtensionSupported(GLContext::EXT_packed_depth_stencil)) {
+ strcat(extensionsString, "GL_EXT_packed_depth_stencil ");
+ }
+
+ if (sGLContext.get()->IsExtensionSupported(GLContext::OES_rgb8_rgba8)) {
+ strcat(extensionsString, "GL_OES_rgb8_rgba8 ");
+ }
+
++ if (sGLContext.get()->IsExtensionSupported(GLContext::EXT_bgra)) {
++ strcat(extensionsString, "GL_EXT_bgra ");
++ }
++
++ if (sGLContext.get()->IsExtensionSupported(GLContext::EXT_read_format_bgra)) {
++ strcat(extensionsString, "GL_EXT_read_format_bgra ");
++ }
++
+ extensionsStringBuilt = true;
+ }
+
+ return reinterpret_cast<const GLubyte*>(extensionsString);
+
+ } else if (name == LOCAL_GL_SHADING_LANGUAGE_VERSION) {
+ if (sGLContext.get()->IsGLES2()) {
+ return reinterpret_cast<const GLubyte*>("OpenGL ES GLSL ES 1.0");
+ } else {
+ return reinterpret_cast<const GLubyte*>("1.10");
+diff --git a/gfx/skia/src/gpu/gl/GrGpuGL.cpp b/gfx/skia/src/gpu/gl/GrGpuGL.cpp
+--- a/gfx/skia/src/gpu/gl/GrGpuGL.cpp
++++ b/gfx/skia/src/gpu/gl/GrGpuGL.cpp
+@@ -1,18 +1,18 @@
+ /*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+-
++#include <algorithm>
+ #include "GrGpuGL.h"
+ #include "GrGLStencilBuffer.h"
+ #include "GrGLPath.h"
+ #include "GrGLShaderBuilder.h"
+ #include "GrTemplates.h"
+ #include "GrTypes.h"
+ #include "SkTemplates.h"
+
+ static const GrGLuint GR_MAX_GLUINT = ~0U;
+ static const GrGLint GR_INVAL_GLINT = ~0;
+@@ -1381,29 +1381,67 @@ bool GrGpuGL::readPixelsWillPayForYFlip(
+ // Note the rowBytes might be tight to the passed in data, but if data
+ // gets clipped in x to the target the rowBytes will no longer be tight.
+ if (left >= 0 && (left + width) < renderTarget->width()) {
+ return 0 == rowBytes ||
+ GrBytesPerPixel(config) * width == rowBytes;
+ } else {
+ return false;
+ }
+ }
+
++static void swizzleRow(void* buffer, int byteLen) {
++ uint8_t* src = (uint8_t*)buffer;
++ uint8_t* end = src + byteLen;
++
++ GrAssert((end - src) % 4 == 0);
++
++ for (; src != end; src += 4) {
++ std::swap(src[0], src[2]);
++ }
++}
++
++bool GrGpuGL::canReadBGRA() const
++{
++ if (kDesktop_GrGLBinding == this->glBinding() ||
++ this->hasExtension("GL_EXT_bgra"))
++ return true;
++
++ if (this->hasExtension("GL_EXT_read_format_bgra")) {
++ GrGLint readFormat = 0;
++ GrGLint readType = 0;
++
++ GL_CALL(GetIntegerv(GR_GL_IMPLEMENTATION_COLOR_READ_FORMAT, &readFormat));
++ GL_CALL(GetIntegerv(GR_GL_IMPLEMENTATION_COLOR_READ_TYPE, &readType));
++
++ return readFormat == GR_GL_BGRA && readType == GR_GL_UNSIGNED_BYTE;
++ }
++
++ return false;
++}
++
+ bool GrGpuGL::onReadPixels(GrRenderTarget* target,
+ int left, int top,
+ int width, int height,
+ GrPixelConfig config,
+ void* buffer,
+ size_t rowBytes) {
+ GrGLenum format;
+ GrGLenum type;
+ bool flipY = kBottomLeft_GrSurfaceOrigin == target->origin();
++ bool needSwizzle = false;
++
++ if (kBGRA_8888_GrPixelConfig == config && !this->canReadBGRA()) {
++ // Read RGBA and swizzle after
++ config = kRGBA_8888_GrPixelConfig;
++ needSwizzle = true;
++ }
++
+ if (!this->configToGLFormats(config, false, NULL, &format, &type)) {
+ return false;
+ }
+ size_t bpp = GrBytesPerPixel(config);
+ if (!adjust_pixel_ops_params(target->width(), target->height(), bpp,
+ &left, &top, &width, &height,
+ const_cast<const void**>(&buffer),
+ &rowBytes)) {
+ return false;
+ }
+@@ -1478,35 +1516,46 @@ bool GrGpuGL::onReadPixels(GrRenderTarge
+ scratch.reset(tightRowBytes);
+ void* tmpRow = scratch.get();
+ // flip y in-place by rows
+ const int halfY = height >> 1;
+ char* top = reinterpret_cast<char*>(buffer);
+ char* bottom = top + (height - 1) * rowBytes;
+ for (int y = 0; y < halfY; y++) {
+ memcpy(tmpRow, top, tightRowBytes);
+ memcpy(top, bottom, tightRowBytes);
+ memcpy(bottom, tmpRow, tightRowBytes);
++
++ if (needSwizzle) {
++ swizzleRow(top, tightRowBytes);
++ swizzleRow(bottom, tightRowBytes);
++ }
++
+ top += rowBytes;
+ bottom -= rowBytes;
+ }
+ }
+ } else {
+- GrAssert(readDst != buffer); GrAssert(rowBytes != tightRowBytes);
++ GrAssert(readDst != buffer);
++ GrAssert(rowBytes != tightRowBytes);
+ // copy from readDst to buffer while flipping y
+ // const int halfY = height >> 1;
+ const char* src = reinterpret_cast<const char*>(readDst);
+ char* dst = reinterpret_cast<char*>(buffer);
+ if (flipY) {
+ dst += (height-1) * rowBytes;
+ }
+ for (int y = 0; y < height; y++) {
+ memcpy(dst, src, tightRowBytes);
++ if (needSwizzle) {
++ swizzleRow(dst, tightRowBytes);
++ }
++
+ src += readDstRowBytes;
+ if (!flipY) {
+ dst += rowBytes;
+ } else {
+ dst -= rowBytes;
+ }
+ }
+ }
+ return true;
+ }
+diff --git a/gfx/skia/src/gpu/gl/GrGpuGL.h b/gfx/skia/src/gpu/gl/GrGpuGL.h
+--- a/gfx/skia/src/gpu/gl/GrGpuGL.h
++++ b/gfx/skia/src/gpu/gl/GrGpuGL.h
+@@ -243,20 +243,22 @@ private:
+ GrPixelConfig dataConfig,
+ const void* data,
+ size_t rowBytes);
+
+ bool createRenderTargetObjects(int width, int height,
+ GrGLuint texID,
+ GrGLRenderTarget::Desc* desc);
+
+ void fillInConfigRenderableTable();
+
++ bool canReadBGRA() const;
++
+ GrGLContext fGLContext;
+
+ // GL program-related state
+ ProgramCache* fProgramCache;
+ SkAutoTUnref<GrGLProgram> fCurrentProgram;
+
+ ///////////////////////////////////////////////////////////////////////////
+ ///@name Caching of GL State
+ ///@{
+ int fHWActiveTextureUnitIdx;
diff --git a/gfx/skia/patches/archive/0025-Bug-896049-Add-default-Value-SK_OVERRIDE.patch b/gfx/skia/patches/archive/0025-Bug-896049-Add-default-Value-SK_OVERRIDE.patch
new file mode 100644
index 0000000000..aff99f75f1
--- /dev/null
+++ b/gfx/skia/patches/archive/0025-Bug-896049-Add-default-Value-SK_OVERRIDE.patch
@@ -0,0 +1,26 @@
+diff --git a/gfx/skia/include/core/SkPostConfig.h b/gfx/skia/include/core/SkPostConfig.h
+--- a/gfx/skia/include/core/SkPostConfig.h
++++ b/gfx/skia/include/core/SkPostConfig.h
+@@ -325,19 +325,19 @@
+ // Some documentation suggests we should be using __attribute__((override)),
+ // but it doesn't work.
+ #define SK_OVERRIDE override
+ #elif defined(__has_extension)
+ #if __has_extension(cxx_override_control)
+ #define SK_OVERRIDE override
+ #endif
+ #endif
+- #else
+- // Linux GCC ignores "__attribute__((override))" and rejects "override".
+- #define SK_OVERRIDE
++ #endif
++ #ifndef SK_OVERRIDE
++ #define SK_OVERRIDE
+ #endif
+ #endif
+
+ //////////////////////////////////////////////////////////////////////
+
+ #ifndef SK_PRINTF_LIKE
+ #if defined(__clang__) || defined(__GNUC__)
+ #define SK_PRINTF_LIKE(A, B) __attribute__((format(printf, (A), (B))))
diff --git a/gfx/skia/patches/archive/0026-Bug-901208-Fix-ARM-v4t.patch b/gfx/skia/patches/archive/0026-Bug-901208-Fix-ARM-v4t.patch
new file mode 100644
index 0000000000..5c95b54014
--- /dev/null
+++ b/gfx/skia/patches/archive/0026-Bug-901208-Fix-ARM-v4t.patch
@@ -0,0 +1,83 @@
+diff --git a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+--- a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
++++ b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+@@ -26,66 +26,78 @@ static void S32A_D565_Opaque(uint16_t* S
+ asm volatile (
+ "1: \n\t"
+ "ldr r3, [%[src]], #4 \n\t"
+ "cmp r3, #0xff000000 \n\t"
+ "blo 2f \n\t"
+ "and r4, r3, #0x0000f8 \n\t"
+ "and r5, r3, #0x00fc00 \n\t"
+ "and r6, r3, #0xf80000 \n\t"
++#ifdef SK_ARM_HAS_EDSP
+ "pld [r1, #32] \n\t"
++#endif
+ "lsl r3, r4, #8 \n\t"
+ "orr r3, r3, r5, lsr #5 \n\t"
+ "orr r3, r3, r6, lsr #19 \n\t"
+ "subs %[count], %[count], #1 \n\t"
+ "strh r3, [%[dst]], #2 \n\t"
+ "bne 1b \n\t"
+ "b 4f \n\t"
+ "2: \n\t"
+ "lsrs r7, r3, #24 \n\t"
+ "beq 3f \n\t"
+ "ldrh r4, [%[dst]] \n\t"
+ "rsb r7, r7, #255 \n\t"
+ "and r6, r4, #0x001f \n\t"
+-#if SK_ARM_ARCH == 6
++#if SK_ARM_ARCH <= 6
+ "lsl r5, r4, #21 \n\t"
+ "lsr r5, r5, #26 \n\t"
+ #else
+ "ubfx r5, r4, #5, #6 \n\t"
+ #endif
++#ifdef SK_ARM_HAS_EDSP
+ "pld [r0, #16] \n\t"
++#endif
+ "lsr r4, r4, #11 \n\t"
+ #ifdef SK_ARM_HAS_EDSP
+ "smulbb r6, r6, r7 \n\t"
+ "smulbb r5, r5, r7 \n\t"
+ "smulbb r4, r4, r7 \n\t"
+ #else
+ "mul r6, r6, r7 \n\t"
+ "mul r5, r5, r7 \n\t"
+ "mul r4, r4, r7 \n\t"
+ #endif
++#if SK_ARM_ARCH >= 6
+ "uxtb r7, r3, ROR #16 \n\t"
+ "uxtb ip, r3, ROR #8 \n\t"
++#else
++ "mov ip, #0xff \n\t"
++ "and r7, ip, r3, ROR #16 \n\t"
++ "and ip, ip, r3, ROR #8 \n\t"
++#endif
+ "and r3, r3, #0xff \n\t"
+ "add r6, r6, #16 \n\t"
+ "add r5, r5, #32 \n\t"
+ "add r4, r4, #16 \n\t"
+ "add r6, r6, r6, lsr #5 \n\t"
+ "add r5, r5, r5, lsr #6 \n\t"
+ "add r4, r4, r4, lsr #5 \n\t"
+ "add r6, r7, r6, lsr #5 \n\t"
+ "add r5, ip, r5, lsr #6 \n\t"
+ "add r4, r3, r4, lsr #5 \n\t"
+ "lsr r6, r6, #3 \n\t"
+ "and r5, r5, #0xfc \n\t"
+ "and r4, r4, #0xf8 \n\t"
+ "orr r6, r6, r5, lsl #3 \n\t"
+ "orr r4, r6, r4, lsl #8 \n\t"
+ "strh r4, [%[dst]], #2 \n\t"
++#ifdef SK_ARM_HAS_EDSP
+ "pld [r1, #32] \n\t"
++#endif
+ "subs %[count], %[count], #1 \n\t"
+ "bne 1b \n\t"
+ "b 4f \n\t"
+ "3: \n\t"
+ "subs %[count], %[count], #1 \n\t"
+ "add %[dst], %[dst], #2 \n\t"
+ "bne 1b \n\t"
+ "4: \n\t"
diff --git a/gfx/skia/patches/archive/0030-Bug-939629-Add-missing-include-guards.patch b/gfx/skia/patches/archive/0030-Bug-939629-Add-missing-include-guards.patch
new file mode 100644
index 0000000000..c92bf2aaeb
--- /dev/null
+++ b/gfx/skia/patches/archive/0030-Bug-939629-Add-missing-include-guards.patch
@@ -0,0 +1,94 @@
+# HG changeset patch
+# Parent 979e60d9c09f22eb139643da6de7568b603e1aa1
+
+diff --git a/gfx/skia/include/images/SkImages.h b/gfx/skia/include/images/SkImages.h
+--- a/gfx/skia/include/images/SkImages.h
++++ b/gfx/skia/include/images/SkImages.h
+@@ -1,14 +1,19 @@
+ /*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
++#ifndef SkImages_DEFINED
++#define SkImages_DEFINED
++
+ class SkImages {
+ public:
+ /**
+ * Initializes flattenables in the images project.
+ */
+ static void InitializeFlattenables();
+ };
++
++#endif
+diff --git a/gfx/skia/src/gpu/GrAAConvexPathRenderer.h b/gfx/skia/src/gpu/GrAAConvexPathRenderer.h
+--- a/gfx/skia/src/gpu/GrAAConvexPathRenderer.h
++++ b/gfx/skia/src/gpu/GrAAConvexPathRenderer.h
+@@ -1,16 +1,19 @@
+
+ /*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
++#ifndef GrAAConvexPathRenderer_DEFINED
++#define GrAAConvexPathRenderer_DEFINED
++
+ #include "GrPathRenderer.h"
+
+
+ class GrAAConvexPathRenderer : public GrPathRenderer {
+ public:
+ GrAAConvexPathRenderer();
+
+ virtual bool canDrawPath(const SkPath& path,
+@@ -19,8 +22,10 @@ public:
+ bool antiAlias) const SK_OVERRIDE;
+
+ protected:
+ virtual bool onDrawPath(const SkPath& path,
+ const SkStrokeRec& stroke,
+ GrDrawTarget* target,
+ bool antiAlias) SK_OVERRIDE;
+ };
++
++#endif
+diff --git a/gfx/skia/src/gpu/GrReducedClip.h b/gfx/skia/src/gpu/GrReducedClip.h
+--- a/gfx/skia/src/gpu/GrReducedClip.h
++++ b/gfx/skia/src/gpu/GrReducedClip.h
+@@ -1,16 +1,19 @@
+
+ /*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
++#ifndef GrReducedClip_DEFINED
++#define GrReducedClip_DEFINED
++
+ #include "SkClipStack.h"
+ #include "SkTLList.h"
+
+ namespace GrReducedClip {
+
+ typedef SkTLList<SkClipStack::Element> ElementList;
+
+ enum InitialState {
+@@ -33,8 +36,10 @@ enum InitialState {
+ void ReduceClipStack(const SkClipStack& stack,
+ const SkIRect& queryBounds,
+ ElementList* result,
+ InitialState* initialState,
+ SkIRect* tighterBounds = NULL,
+ bool* requiresAA = NULL);
+
+ } // namespace GrReducedClip
++
++#endif
diff --git a/gfx/skia/patches/archive/0031-Bug-945588-Add-include-guard.patch b/gfx/skia/patches/archive/0031-Bug-945588-Add-include-guard.patch
new file mode 100644
index 0000000000..f58e7e1659
--- /dev/null
+++ b/gfx/skia/patches/archive/0031-Bug-945588-Add-include-guard.patch
@@ -0,0 +1,39 @@
+# HG changeset patch
+# User Ehsan Akhgari <ehsan@mozilla.com>
+
+Bug 945588 - Add include guards to SkConfig8888.h
+
+diff --git a/gfx/skia/src/core/SkConfig8888.h b/gfx/skia/src/core/SkConfig8888.h
+index 96eaef2..36bc9b4 100644
+--- a/gfx/skia/src/core/SkConfig8888.h
++++ b/gfx/skia/src/core/SkConfig8888.h
+@@ -1,16 +1,18 @@
+
+ /*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
++#ifndef SkConfig8888_DEFINED
++#define SkConfig8888_DEFINED
+
+ #include "SkCanvas.h"
+ #include "SkColorPriv.h"
+
+ /**
+ * Converts pixels from one Config8888 to another Config8888
+ */
+ void SkConvertConfig8888Pixels(uint32_t* dstPixels,
+@@ -69,8 +71,10 @@ static inline void SkCopyConfig8888ToBitmap(const SkBitmap& dstBmp,
+ int h = dstBmp.height();
+ size_t dstRowBytes = dstBmp.rowBytes();
+ uint32_t* dstPixels = reinterpret_cast<uint32_t*>(dstBmp.getPixels());
+
+ SkConvertConfig8888Pixels(dstPixels, dstRowBytes, SkCanvas::kNative_Premul_Config8888, srcPixels, srcRowBytes, srcConfig8888, w, h);
+ }
+
+ }
++
++#endif
diff --git a/gfx/skia/patches/archive/0032-Bug-974900-More-missing-include-guards.patch b/gfx/skia/patches/archive/0032-Bug-974900-More-missing-include-guards.patch
new file mode 100644
index 0000000000..b6b8461213
--- /dev/null
+++ b/gfx/skia/patches/archive/0032-Bug-974900-More-missing-include-guards.patch
@@ -0,0 +1,148 @@
+# HG changeset patch
+# Parent c8288d0c7a1544a590a0cac9c39397ac10c8a45b
+Bug 974900 - Add missing include guards to Skia headers - r=gw280
+
+diff --git a/gfx/skia/trunk/include/images/SkImages.h b/gfx/skia/trunk/include/images/SkImages.h
+--- a/gfx/skia/trunk/include/images/SkImages.h
++++ b/gfx/skia/trunk/include/images/SkImages.h
+@@ -1,14 +1,19 @@
+ /*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
++#ifndef SkImages_DEFINED
++#define SkImages_DEFINED
++
+ class SkImages {
+ public:
+ /**
+ * Initializes flattenables in the images project.
+ */
+ static void InitializeFlattenables();
+ };
++
++#endif
+diff --git a/gfx/skia/trunk/src/core/SkConvolver.h b/gfx/skia/trunk/src/core/SkConvolver.h
+--- a/gfx/skia/trunk/src/core/SkConvolver.h
++++ b/gfx/skia/trunk/src/core/SkConvolver.h
+@@ -8,16 +8,18 @@
+ #include "SkSize.h"
+ #include "SkTypes.h"
+ #include "SkTArray.h"
+
+ // avoid confusion with Mac OS X's math library (Carbon)
+ #if defined(__APPLE__)
+ #undef FloatToConvolutionFixed
+ #undef ConvolutionFixedToFloat
++#undef FloatToFixed
++#undef FixedToFloat
+ #endif
+
+ // Represents a filter in one dimension. Each output pixel has one entry in this
+ // object for the filter values contributing to it. You build up the filter
+ // list by calling AddFilter for each output pixel (in order).
+ //
+ // We do 2-dimensional convolution by first convolving each row by one
+ // SkConvolutionFilter1D, then convolving each column by another one.
+diff --git a/gfx/skia/trunk/src/gpu/GrAAConvexPathRenderer.h b/gfx/skia/trunk/src/gpu/GrAAConvexPathRenderer.h
+--- a/gfx/skia/trunk/src/gpu/GrAAConvexPathRenderer.h
++++ b/gfx/skia/trunk/src/gpu/GrAAConvexPathRenderer.h
+@@ -3,24 +3,28 @@
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+ #include "GrPathRenderer.h"
+
++#ifndef GrAAConvexPathRenderer_DEFINED
++#define GrAAConvexPathRenderer_DEFINED
+
+ class GrAAConvexPathRenderer : public GrPathRenderer {
+ public:
+ GrAAConvexPathRenderer();
+
+ virtual bool canDrawPath(const SkPath& path,
+ const SkStrokeRec& stroke,
+ const GrDrawTarget* target,
+ bool antiAlias) const SK_OVERRIDE;
+
+ protected:
+ virtual bool onDrawPath(const SkPath& path,
+ const SkStrokeRec& stroke,
+ GrDrawTarget* target,
+ bool antiAlias) SK_OVERRIDE;
+ };
++
++#endif
+diff --git a/gfx/skia/trunk/src/gpu/GrReducedClip.h b/gfx/skia/trunk/src/gpu/GrReducedClip.h
+--- a/gfx/skia/trunk/src/gpu/GrReducedClip.h
++++ b/gfx/skia/trunk/src/gpu/GrReducedClip.h
+@@ -1,16 +1,19 @@
+
+ /*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
++#ifndef GrReducedClip_DEFINED
++#define GrReducedClip_DEFINED
++
+ #include "SkClipStack.h"
+ #include "SkTLList.h"
+
+ namespace GrReducedClip {
+
+ typedef SkTLList<SkClipStack::Element> ElementList;
+
+ enum InitialState {
+@@ -36,8 +39,10 @@ SK_API void ReduceClipStack(const SkClip
+ const SkIRect& queryBounds,
+ ElementList* result,
+ int32_t* resultGenID,
+ InitialState* initialState,
+ SkIRect* tighterBounds = NULL,
+ bool* requiresAA = NULL);
+
+ } // namespace GrReducedClip
++
++#endif
+diff --git a/gfx/skia/trunk/src/pathops/SkLineParameters.h b/gfx/skia/trunk/src/pathops/SkLineParameters.h
+--- a/gfx/skia/trunk/src/pathops/SkLineParameters.h
++++ b/gfx/skia/trunk/src/pathops/SkLineParameters.h
+@@ -1,14 +1,18 @@
+ /*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
++
++#ifndef SkLineParameters_DEFINED
++#define SkLineParameters_DEFINED
++
+ #include "SkPathOpsCubic.h"
+ #include "SkPathOpsLine.h"
+ #include "SkPathOpsQuad.h"
+
+ // Sources
+ // computer-aided design - volume 22 number 9 november 1990 pp 538 - 549
+ // online at http://cagd.cs.byu.edu/~tom/papers/bezclip.pdf
+
+@@ -164,8 +168,10 @@ public:
+ return -a;
+ }
+
+ private:
+ double a;
+ double b;
+ double c;
+ };
++
++#endif
diff --git a/gfx/skia/patches/archive/0033-Bug-974900-undef-interface-windows.patch b/gfx/skia/patches/archive/0033-Bug-974900-undef-interface-windows.patch
new file mode 100644
index 0000000000..05f17000a0
--- /dev/null
+++ b/gfx/skia/patches/archive/0033-Bug-974900-undef-interface-windows.patch
@@ -0,0 +1,27 @@
+# HG changeset patch
+# Parent b12f9a408740aa5fd93c296a7d41e1b5f54c1b20
+Bug 974900 - #undef interface defined by windows headers - r=gw280
+
+diff --git a/gfx/skia/trunk/src/gpu/gl/GrGLCaps.h b/gfx/skia/trunk/src/gpu/gl/GrGLCaps.h
+--- a/gfx/skia/trunk/src/gpu/gl/GrGLCaps.h
++++ b/gfx/skia/trunk/src/gpu/gl/GrGLCaps.h
+@@ -9,16 +9,19 @@
+ #ifndef GrGLCaps_DEFINED
+ #define GrGLCaps_DEFINED
+
+ #include "GrDrawTargetCaps.h"
+ #include "GrGLStencilBuffer.h"
+ #include "SkTArray.h"
+ #include "SkTDArray.h"
+
++// defined in Windows headers
++#undef interface
++
+ class GrGLContextInfo;
+
+ /**
+ * Stores some capabilities of a GL context. Most are determined by the GL
+ * version and the extensions string. It also tracks formats that have passed
+ * the FBO completeness test.
+ */
+ class GrGLCaps : public GrDrawTargetCaps {
diff --git a/gfx/skia/patches/archive/SkPostConfig.patch b/gfx/skia/patches/archive/SkPostConfig.patch
new file mode 100644
index 0000000000..d32341f4ea
--- /dev/null
+++ b/gfx/skia/patches/archive/SkPostConfig.patch
@@ -0,0 +1,32 @@
+diff --git a/gfx/skia/include/core/SkPostConfig.h b/gfx/skia/include/core/SkPostConfig.h
+--- a/gfx/skia/include/core/SkPostConfig.h
++++ b/gfx/skia/include/core/SkPostConfig.h
+@@ -277,19 +277,28 @@
+ #endif
+
+ //////////////////////////////////////////////////////////////////////
+
+ #ifndef SK_OVERRIDE
+ #if defined(_MSC_VER)
+ #define SK_OVERRIDE override
+ #elif defined(__clang__)
++#if __has_feature(cxx_override_control)
+ // Some documentation suggests we should be using __attribute__((override)),
+ // but it doesn't work.
+ #define SK_OVERRIDE override
++#elif defined(__has_extension)
++#if __has_extension(cxx_override_control)
++#define SK_OVERRIDE override
++#endif
++#endif
++#ifndef SK_OVERRIDE
++#define SK_OVERRIDE
++#endif
+ #else
+ // Linux GCC ignores "__attribute__((override))" and rejects "override".
+ #define SK_OVERRIDE
+ #endif
+ #endif
+
+ //////////////////////////////////////////////////////////////////////
+
diff --git a/gfx/skia/patches/archive/arm-fixes.patch b/gfx/skia/patches/archive/arm-fixes.patch
new file mode 100644
index 0000000000..d9fa430df0
--- /dev/null
+++ b/gfx/skia/patches/archive/arm-fixes.patch
@@ -0,0 +1,191 @@
+diff --git a/gfx/skia/include/core/SkMath.h b/gfx/skia/include/core/SkMath.h
+--- a/gfx/skia/include/core/SkMath.h
++++ b/gfx/skia/include/core/SkMath.h
+@@ -148,20 +148,17 @@ static inline bool SkIsPow2(int value) {
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ /** SkMulS16(a, b) multiplies a * b, but requires that a and b are both int16_t.
+ With this requirement, we can generate faster instructions on some
+ architectures.
+ */
+-#if defined(__arm__) \
+- && !defined(__thumb__) \
+- && !defined(__ARM_ARCH_4T__) \
+- && !defined(__ARM_ARCH_5T__)
++#ifdef SK_ARM_HAS_EDSP
+ static inline int32_t SkMulS16(S16CPU x, S16CPU y) {
+ SkASSERT((int16_t)x == x);
+ SkASSERT((int16_t)y == y);
+ int32_t product;
+ asm("smulbb %0, %1, %2 \n"
+ : "=r"(product)
+ : "r"(x), "r"(y)
+ );
+diff --git a/gfx/skia/include/core/SkPostConfig.h b/gfx/skia/include/core/SkPostConfig.h
+--- a/gfx/skia/include/core/SkPostConfig.h
++++ b/gfx/skia/include/core/SkPostConfig.h
+@@ -300,8 +300,53 @@
+ #endif
+ #endif
+
+ //////////////////////////////////////////////////////////////////////
+
+ #ifndef SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
+ #define SK_ALLOW_STATIC_GLOBAL_INITIALIZERS 1
+ #endif
++
++//////////////////////////////////////////////////////////////////////
++// ARM defines
++
++#if defined(__GNUC__) && defined(__arm__)
++
++# define SK_ARM_ARCH 3
++
++# if defined(__ARM_ARCH_4__) || defined(__ARM_ARCH_4T__) \
++ || defined(_ARM_ARCH_4)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 4
++# endif
++
++# if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
++ || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
++ || defined(__ARM_ARCH_5TEJ__) || defined(_ARM_ARCH_5)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 5
++# endif
++
++# if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
++ || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
++ || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) \
++ || defined(__ARM_ARCH_6M__) || defined(_ARM_ARCH_6)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 6
++# endif
++
++# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
++ || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
++ || defined(__ARM_ARCH_7EM__) || defined(_ARM_ARCH_7)
++# undef SK_ARM_ARCH
++# define SK_ARM_ARCH 7
++# endif
++
++# undef SK_ARM_HAS_EDSP
++# if defined(__thumb2__) && (SK_ARM_ARCH >= 6) \
++ || !defined(__thumb__) \
++ && ((SK_ARM_ARCH > 5) || defined(__ARM_ARCH_5E__) \
++ || defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__))
++# define SK_ARM_HAS_EDSP 1
++# endif
++
++#endif
+diff --git a/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp b/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp
+--- a/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp
++++ b/gfx/skia/src/opts/SkBitmapProcState_opts_arm.cpp
+@@ -6,17 +6,17 @@
+ * found in the LICENSE file.
+ */
+
+
+ #include "SkBitmapProcState.h"
+ #include "SkColorPriv.h"
+ #include "SkUtils.h"
+
+-#if __ARM_ARCH__ >= 6 && !defined(SK_CPU_BENDIAN)
++#if SK_ARM_ARCH >= 6 && !defined(SK_CPU_BENDIAN)
+ void SI8_D16_nofilter_DX_arm(
+ const SkBitmapProcState& s,
+ const uint32_t* SK_RESTRICT xy,
+ int count,
+ uint16_t* SK_RESTRICT colors) __attribute__((optimize("O1")));
+
+ void SI8_D16_nofilter_DX_arm(const SkBitmapProcState& s,
+ const uint32_t* SK_RESTRICT xy,
+@@ -177,17 +177,17 @@ void SI8_opaque_D32_nofilter_DX_arm(cons
+ : [xx] "+r" (xx), [count] "+r" (count), [colors] "+r" (colors)
+ : [table] "r" (table), [srcAddr] "r" (srcAddr)
+ : "memory", "cc", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11"
+ );
+ }
+
+ s.fBitmap->getColorTable()->unlockColors(false);
+ }
+-#endif //__ARM_ARCH__ >= 6 && !defined(SK_CPU_BENDIAN)
++#endif // SK_ARM_ARCH >= 6 && !defined(SK_CPU_BENDIAN)
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ /* If we replace a sampleproc, then we null-out the associated shaderproc,
+ otherwise the shader won't even look at the matrix/sampler
+ */
+ void SkBitmapProcState::platformProcs() {
+ bool doFilter = fDoFilter;
+@@ -195,17 +195,17 @@ void SkBitmapProcState::platformProcs()
+ bool justDx = false;
+
+ if (fInvType <= (SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask)) {
+ justDx = true;
+ }
+
+ switch (fBitmap->config()) {
+ case SkBitmap::kIndex8_Config:
+-#if __ARM_ARCH__ >= 6 && !defined(SK_CPU_BENDIAN)
++#if SK_ARM_ARCH >= 6 && !defined(SK_CPU_BENDIAN)
+ if (justDx && !doFilter) {
+ #if 0 /* crashing on android device */
+ fSampleProc16 = SI8_D16_nofilter_DX_arm;
+ fShaderProc16 = NULL;
+ #endif
+ if (isOpaque) {
+ // this one is only very slighty faster than the C version
+ fSampleProc32 = SI8_opaque_D32_nofilter_DX_arm;
+diff --git a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+--- a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
++++ b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+@@ -669,18 +669,23 @@ static void __attribute((noinline,optimi
+ /* Double Loop */
+ "1: \n\t" /* <double loop> */
+ "ldm %[src]!, {r5, r6} \n\t" /* loading src pointers into r5 and r6 */
+ "ldm %[dst], {r7, r8} \n\t" /* loading dst pointers into r7 and r8 */
+
+ /* dst1_scale and dst2_scale*/
+ "lsr r9, r5, #24 \n\t" /* src >> 24 */
+ "lsr r10, r6, #24 \n\t" /* src >> 24 */
++#ifdef SK_ARM_HAS_EDSP
+ "smulbb r9, r9, %[alpha] \n\t" /* r9 = SkMulS16 r9 with src_scale */
+ "smulbb r10, r10, %[alpha] \n\t" /* r10 = SkMulS16 r10 with src_scale */
++#else
++ "mul r9, r9, %[alpha] \n\t" /* r9 = SkMulS16 r9 with src_scale */
++ "mul r10, r10, %[alpha] \n\t" /* r10 = SkMulS16 r10 with src_scale */
++#endif
+ "lsr r9, r9, #8 \n\t" /* r9 >> 8 */
+ "lsr r10, r10, #8 \n\t" /* r10 >> 8 */
+ "rsb r9, r9, #256 \n\t" /* dst1_scale = r9 = 255 - r9 + 1 */
+ "rsb r10, r10, #256 \n\t" /* dst2_scale = r10 = 255 - r10 + 1 */
+
+ /* ---------------------- */
+
+ /* src1, src1_scale */
+@@ -739,17 +744,21 @@ static void __attribute((noinline,optimi
+ /* else get into the single loop */
+ /* Single Loop */
+ "2: \n\t" /* <single loop> */
+ "ldr r5, [%[src]], #4 \n\t" /* loading src pointer into r5: r5=src */
+ "ldr r7, [%[dst]] \n\t" /* loading dst pointer into r7: r7=dst */
+
+ "lsr r6, r5, #24 \n\t" /* src >> 24 */
+ "and r8, r12, r5, lsr #8 \n\t" /* ag = r8 = r5 masked by r12 lsr by #8 */
++#ifdef SK_ARM_HAS_EDSP
+ "smulbb r6, r6, %[alpha] \n\t" /* r6 = SkMulS16 with src_scale */
++#else
++ "mul r6, r6, %[alpha] \n\t" /* r6 = SkMulS16 with src_scale */
++#endif
+ "and r9, r12, r5 \n\t" /* rb = r9 = r5 masked by r12 */
+ "lsr r6, r6, #8 \n\t" /* r6 >> 8 */
+ "mul r8, r8, %[alpha] \n\t" /* ag = r8 times scale */
+ "rsb r6, r6, #256 \n\t" /* r6 = 255 - r6 + 1 */
+
+ /* src, src_scale */
+ "mul r9, r9, %[alpha] \n\t" /* rb = r9 times scale */
+ "and r8, r8, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */
diff --git a/gfx/skia/patches/archive/arm-opts.patch b/gfx/skia/patches/archive/arm-opts.patch
new file mode 100644
index 0000000000..02ad85c9a7
--- /dev/null
+++ b/gfx/skia/patches/archive/arm-opts.patch
@@ -0,0 +1,41 @@
+diff --git a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+--- a/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
++++ b/gfx/skia/src/opts/SkBlitRow_opts_arm.cpp
+@@ -549,17 +549,17 @@ static void S32A_Opaque_BlitRow32_neon(S
+ #define S32A_Opaque_BlitRow32_PROC S32A_Opaque_BlitRow32_neon
+
+ #else
+
+ #ifdef TEST_SRC_ALPHA
+ #error The ARM asm version of S32A_Opaque_BlitRow32 does not support TEST_SRC_ALPHA
+ #endif
+
+-static void S32A_Opaque_BlitRow32_arm(SkPMColor* SK_RESTRICT dst,
++static void __attribute((noinline,optimize("-fomit-frame-pointer"))) S32A_Opaque_BlitRow32_arm(SkPMColor* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha) {
+
+ SkASSERT(255 == alpha);
+
+ /* Does not support the TEST_SRC_ALPHA case */
+ asm volatile (
+ "cmp %[count], #0 \n\t" /* comparing count with 0 */
+@@ -646,17 +646,17 @@ static void S32A_Opaque_BlitRow32_arm(Sk
+ );
+ }
+ #define S32A_Opaque_BlitRow32_PROC S32A_Opaque_BlitRow32_arm
+ #endif
+
+ /*
+ * ARM asm version of S32A_Blend_BlitRow32
+ */
+-static void S32A_Blend_BlitRow32_arm(SkPMColor* SK_RESTRICT dst,
++static void __attribute((noinline,optimize("-fomit-frame-pointer"))) S32A_Blend_BlitRow32_arm(SkPMColor* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha) {
+ asm volatile (
+ "cmp %[count], #0 \n\t" /* comparing count with 0 */
+ "beq 3f \n\t" /* if zero exit */
+
+ "mov r12, #0xff \n\t" /* load the 0xff mask in r12 */
+ "orr r12, r12, r12, lsl #16 \n\t" /* convert it to 0xff00ff in r12 */
diff --git a/gfx/skia/patches/archive/fix-comma-end-enum-list.patch b/gfx/skia/patches/archive/fix-comma-end-enum-list.patch
new file mode 100644
index 0000000000..dea36377e8
--- /dev/null
+++ b/gfx/skia/patches/archive/fix-comma-end-enum-list.patch
@@ -0,0 +1,380 @@
+diff --git a/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h b/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h
+--- a/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h
++++ b/gfx/skia/include/core/SkAdvancedTypefaceMetrics.h
+@@ -29,17 +29,17 @@ public:
+ SkString fFontName;
+
+ enum FontType {
+ kType1_Font,
+ kType1CID_Font,
+ kCFF_Font,
+ kTrueType_Font,
+ kOther_Font,
+- kNotEmbeddable_Font,
++ kNotEmbeddable_Font
+ };
+ // The type of the underlying font program. This field determines which
+ // of the following fields are valid. If it is kOther_Font or
+ // kNotEmbeddable_Font, the per glyph information will never be populated.
+ FontType fType;
+
+ // fMultiMaster may be true for Type1_Font or CFF_Font.
+ bool fMultiMaster;
+@@ -51,17 +51,17 @@ public:
+ kFixedPitch_Style = 0x00001,
+ kSerif_Style = 0x00002,
+ kSymbolic_Style = 0x00004,
+ kScript_Style = 0x00008,
+ kNonsymbolic_Style = 0x00020,
+ kItalic_Style = 0x00040,
+ kAllCaps_Style = 0x10000,
+ kSmallCaps_Style = 0x20000,
+- kForceBold_Style = 0x40000,
++ kForceBold_Style = 0x40000
+ };
+ uint16_t fStyle; // Font style characteristics.
+ int16_t fItalicAngle; // Counterclockwise degrees from vertical of the
+ // dominant vertical stroke for an Italic face.
+ // The following fields are all in font units.
+ int16_t fAscent; // Max height above baseline, not including accents.
+ int16_t fDescent; // Max depth below baseline (negative).
+ int16_t fStemV; // Thickness of dominant vertical stem.
+@@ -70,26 +70,26 @@ public:
+ SkIRect fBBox; // The bounding box of all glyphs (in font units).
+
+ // The type of advance data wanted.
+ enum PerGlyphInfo {
+ kNo_PerGlyphInfo = 0x0, // Don't populate any per glyph info.
+ kHAdvance_PerGlyphInfo = 0x1, // Populate horizontal advance data.
+ kVAdvance_PerGlyphInfo = 0x2, // Populate vertical advance data.
+ kGlyphNames_PerGlyphInfo = 0x4, // Populate glyph names (Type 1 only).
+- kToUnicode_PerGlyphInfo = 0x8, // Populate ToUnicode table, ignored
++ kToUnicode_PerGlyphInfo = 0x8 // Populate ToUnicode table, ignored
+ // for Type 1 fonts
+ };
+
+ template <typename Data>
+ struct AdvanceMetric {
+ enum MetricType {
+ kDefault, // Default advance: fAdvance.count = 1
+ kRange, // Advances for a range: fAdvance.count = fEndID-fStartID
+- kRun, // fStartID-fEndID have same advance: fAdvance.count = 1
++ kRun // fStartID-fEndID have same advance: fAdvance.count = 1
+ };
+ MetricType fType;
+ uint16_t fStartId;
+ uint16_t fEndId;
+ SkTDArray<Data> fAdvance;
+ SkTScopedPtr<AdvanceMetric<Data> > fNext;
+ };
+
+diff --git a/gfx/skia/include/core/SkBlitRow.h b/gfx/skia/include/core/SkBlitRow.h
+--- a/gfx/skia/include/core/SkBlitRow.h
++++ b/gfx/skia/include/core/SkBlitRow.h
+@@ -44,17 +44,17 @@ public:
+
+ //! Public entry-point to return a blit function ptr
+ static Proc Factory(unsigned flags, SkBitmap::Config);
+
+ ///////////// D32 version
+
+ enum Flags32 {
+ kGlobalAlpha_Flag32 = 1 << 0,
+- kSrcPixelAlpha_Flag32 = 1 << 1,
++ kSrcPixelAlpha_Flag32 = 1 << 1
+ };
+
+ /** Function pointer that blends 32bit colors onto a 32bit destination.
+ @param dst array of dst 32bit colors
+ @param src array of src 32bit colors (w/ or w/o alpha)
+ @param count number of colors to blend
+ @param alpha global alpha to be applied to all src colors
+ */
+diff --git a/gfx/skia/include/core/SkCanvas.h b/gfx/skia/include/core/SkCanvas.h
+--- a/gfx/skia/include/core/SkCanvas.h
++++ b/gfx/skia/include/core/SkCanvas.h
+@@ -132,17 +132,17 @@ public:
+ * low byte to high byte: B, G, R, A.
+ */
+ kBGRA_Premul_Config8888,
+ kBGRA_Unpremul_Config8888,
+ /**
+ * low byte to high byte: R, G, B, A.
+ */
+ kRGBA_Premul_Config8888,
+- kRGBA_Unpremul_Config8888,
++ kRGBA_Unpremul_Config8888
+ };
+
+ /**
+ * On success (returns true), copy the canvas pixels into the bitmap.
+ * On failure, the bitmap parameter is left unchanged and false is
+ * returned.
+ *
+ * The canvas' pixels are converted to the bitmap's config. The only
+diff --git a/gfx/skia/include/core/SkDevice.h b/gfx/skia/include/core/SkDevice.h
+--- a/gfx/skia/include/core/SkDevice.h
++++ b/gfx/skia/include/core/SkDevice.h
+@@ -134,17 +134,17 @@ public:
+ * Return the device's origin: its offset in device coordinates from
+ * the default origin in its canvas' matrix/clip
+ */
+ const SkIPoint& getOrigin() const { return fOrigin; }
+
+ protected:
+ enum Usage {
+ kGeneral_Usage,
+- kSaveLayer_Usage, // <! internal use only
++ kSaveLayer_Usage // <! internal use only
+ };
+
+ struct TextFlags {
+ uint32_t fFlags; // SkPaint::getFlags()
+ SkPaint::Hinting fHinting;
+ };
+
+ /**
+diff --git a/gfx/skia/include/core/SkFlattenable.h b/gfx/skia/include/core/SkFlattenable.h
+--- a/gfx/skia/include/core/SkFlattenable.h
++++ b/gfx/skia/include/core/SkFlattenable.h
+@@ -216,17 +216,17 @@ public:
+ SkFactorySet* setFactoryRecorder(SkFactorySet*);
+
+ enum Flags {
+ kCrossProcess_Flag = 0x01,
+ /**
+ * Instructs the writer to inline Factory names as there are seen the
+ * first time (after that we store an index). The pipe code uses this.
+ */
+- kInlineFactoryNames_Flag = 0x02,
++ kInlineFactoryNames_Flag = 0x02
+ };
+ Flags getFlags() const { return (Flags)fFlags; }
+ void setFlags(Flags flags) { fFlags = flags; }
+
+ bool isCrossProcess() const {
+ return SkToBool(fFlags & kCrossProcess_Flag);
+ }
+ bool inlineFactoryNames() const {
+diff --git a/gfx/skia/include/core/SkFontHost.h b/gfx/skia/include/core/SkFontHost.h
+--- a/gfx/skia/include/core/SkFontHost.h
++++ b/gfx/skia/include/core/SkFontHost.h
+@@ -245,17 +245,17 @@ public:
+ vertically. When rendering subpixel glyphs we need to know which way
+ round they are.
+
+ Note, if you change this after startup, you'll need to flush the glyph
+ cache because it'll have the wrong type of masks cached.
+ */
+ enum LCDOrientation {
+ kHorizontal_LCDOrientation = 0, //!< this is the default
+- kVertical_LCDOrientation = 1,
++ kVertical_LCDOrientation = 1
+ };
+
+ static void SetSubpixelOrientation(LCDOrientation orientation);
+ static LCDOrientation GetSubpixelOrientation();
+
+ /** LCD color elements can vary in order. For subpixel text we need to know
+ the order which the LCDs uses so that the color fringes are in the
+ correct place.
+@@ -264,17 +264,17 @@ public:
+ cache because it'll have the wrong type of masks cached.
+
+ kNONE_LCDOrder means that the subpixel elements are not spatially
+ separated in any usable fashion.
+ */
+ enum LCDOrder {
+ kRGB_LCDOrder = 0, //!< this is the default
+ kBGR_LCDOrder = 1,
+- kNONE_LCDOrder = 2,
++ kNONE_LCDOrder = 2
+ };
+
+ static void SetSubpixelOrder(LCDOrder order);
+ static LCDOrder GetSubpixelOrder();
+
+ #ifdef SK_BUILD_FOR_ANDROID
+ ///////////////////////////////////////////////////////////////////////////
+
+diff --git a/gfx/skia/include/core/SkMaskFilter.h b/gfx/skia/include/core/SkMaskFilter.h
+--- a/gfx/skia/include/core/SkMaskFilter.h
++++ b/gfx/skia/include/core/SkMaskFilter.h
+@@ -57,17 +57,17 @@ public:
+
+ virtual void flatten(SkFlattenableWriteBuffer& ) {}
+
+ enum BlurType {
+ kNone_BlurType, //!< this maskfilter is not a blur
+ kNormal_BlurType, //!< fuzzy inside and outside
+ kSolid_BlurType, //!< solid inside, fuzzy outside
+ kOuter_BlurType, //!< nothing inside, fuzzy outside
+- kInner_BlurType, //!< fuzzy inside, nothing outside
++ kInner_BlurType //!< fuzzy inside, nothing outside
+ };
+
+ struct BlurInfo {
+ SkScalar fRadius;
+ bool fIgnoreTransform;
+ bool fHighQuality;
+ };
+
+diff --git a/gfx/skia/include/core/SkPaint.h b/gfx/skia/include/core/SkPaint.h
+--- a/gfx/skia/include/core/SkPaint.h
++++ b/gfx/skia/include/core/SkPaint.h
+@@ -70,17 +70,17 @@ public:
+ kFull_Hinting -> <same as kNormalHinting, unless we are rendering
+ subpixel glyphs, in which case TARGET_LCD or
+ TARGET_LCD_V is used>
+ */
+ enum Hinting {
+ kNo_Hinting = 0,
+ kSlight_Hinting = 1,
+ kNormal_Hinting = 2, //!< this is the default
+- kFull_Hinting = 3,
++ kFull_Hinting = 3
+ };
+
+ Hinting getHinting() const {
+ return static_cast<Hinting>(fHinting);
+ }
+
+ void setHinting(Hinting hintingLevel);
+
+@@ -282,17 +282,17 @@ public:
+ results may not appear the same as if it was drawn twice, filled and
+ then stroked.
+ */
+ enum Style {
+ kFill_Style, //!< fill the geometry
+ kStroke_Style, //!< stroke the geometry
+ kStrokeAndFill_Style, //!< fill and stroke the geometry
+
+- kStyleCount,
++ kStyleCount
+ };
+
+ /** Return the paint's style, used for controlling how primitives'
+ geometries are interpreted (except for drawBitmap, which always assumes
+ kFill_Style).
+ @return the paint's Style
+ */
+ Style getStyle() const { return (Style)fStyle; }
+diff --git a/gfx/skia/include/core/SkScalerContext.h b/gfx/skia/include/core/SkScalerContext.h
+--- a/gfx/skia/include/core/SkScalerContext.h
++++ b/gfx/skia/include/core/SkScalerContext.h
+@@ -172,24 +172,24 @@ public:
+ kHintingBit2_Flag = 0x0100,
+
+ // these should only ever be set if fMaskFormat is LCD16 or LCD32
+ kLCD_Vertical_Flag = 0x0200, // else Horizontal
+ kLCD_BGROrder_Flag = 0x0400, // else RGB order
+
+ // luminance : 0 for black text, kLuminance_Max for white text
+ kLuminance_Shift = 11, // to shift into the other flags above
+- kLuminance_Bits = 3, // ensure Flags doesn't exceed 16bits
++ kLuminance_Bits = 3 // ensure Flags doesn't exceed 16bits
+ };
+
+ // computed values
+ enum {
+ kHinting_Mask = kHintingBit1_Flag | kHintingBit2_Flag,
+ kLuminance_Max = (1 << kLuminance_Bits) - 1,
+- kLuminance_Mask = kLuminance_Max << kLuminance_Shift,
++ kLuminance_Mask = kLuminance_Max << kLuminance_Shift
+ };
+
+ struct Rec {
+ uint32_t fOrigFontID;
+ uint32_t fFontID;
+ SkScalar fTextSize, fPreScaleX, fPreSkewX;
+ SkScalar fPost2x2[2][2];
+ SkScalar fFrameWidth, fMiterLimit;
+diff --git a/gfx/skia/include/core/SkTypes.h b/gfx/skia/include/core/SkTypes.h
+--- a/gfx/skia/include/core/SkTypes.h
++++ b/gfx/skia/include/core/SkTypes.h
+@@ -433,17 +433,17 @@ public:
+ */
+ kAlloc_OnShrink,
+
+ /**
+ * If the requested size is smaller than the current size, and the
+ * current block is dynamically allocated, just return the old
+ * block.
+ */
+- kReuse_OnShrink,
++ kReuse_OnShrink
+ };
+
+ /**
+ * Reallocates the block to a new size. The ptr may or may not change.
+ */
+ void* reset(size_t size, OnShrink shrink = kAlloc_OnShrink) {
+ if (size == fSize || (kReuse_OnShrink == shrink && size < fSize)) {
+ return fPtr;
+diff --git a/gfx/skia/include/effects/SkLayerDrawLooper.h b/gfx/skia/include/effects/SkLayerDrawLooper.h
+--- a/gfx/skia/include/effects/SkLayerDrawLooper.h
++++ b/gfx/skia/include/effects/SkLayerDrawLooper.h
+@@ -36,17 +36,17 @@ public:
+
+ /**
+ * Use the layer's paint entirely, with these exceptions:
+ * - We never override the draw's paint's text_encoding, since that is
+ * used to interpret the text/len parameters in draw[Pos]Text.
+ * - Flags and Color are always computed using the LayerInfo's
+ * fFlagsMask and fColorMode.
+ */
+- kEntirePaint_Bits = -1,
++ kEntirePaint_Bits = -1
+
+ };
+ typedef int32_t BitFlags;
+
+ /**
+ * Info for how to apply the layer's paint and offset.
+ *
+ * fFlagsMask selects which flags in the layer's paint should be applied.
+diff --git a/gfx/skia/src/core/SkBitmap.cpp b/gfx/skia/src/core/SkBitmap.cpp
+--- a/gfx/skia/src/core/SkBitmap.cpp
++++ b/gfx/skia/src/core/SkBitmap.cpp
+@@ -1357,17 +1357,17 @@ bool SkBitmap::extractAlpha(SkBitmap* ds
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ enum {
+ SERIALIZE_PIXELTYPE_NONE,
+ SERIALIZE_PIXELTYPE_RAW_WITH_CTABLE,
+ SERIALIZE_PIXELTYPE_RAW_NO_CTABLE,
+ SERIALIZE_PIXELTYPE_REF_DATA,
+- SERIALIZE_PIXELTYPE_REF_PTR,
++ SERIALIZE_PIXELTYPE_REF_PTR
+ };
+
+ static void writeString(SkFlattenableWriteBuffer& buffer, const char str[]) {
+ size_t len = strlen(str);
+ buffer.write32(len);
+ buffer.writePad(str, len);
+ }
+
+diff --git a/gfx/skia/src/core/SkMatrix.cpp b/gfx/skia/src/core/SkMatrix.cpp
+--- a/gfx/skia/src/core/SkMatrix.cpp
++++ b/gfx/skia/src/core/SkMatrix.cpp
+@@ -1715,17 +1715,17 @@ SkScalar SkMatrix::getMaxStretch() const
+ const SkMatrix& SkMatrix::I() {
+ static SkMatrix gIdentity;
+ static bool gOnce;
+ if (!gOnce) {
+ gIdentity.reset();
+ gOnce = true;
+ }
+ return gIdentity;
+-};
++}
+
+ const SkMatrix& SkMatrix::InvalidMatrix() {
+ static SkMatrix gInvalid;
+ static bool gOnce;
+ if (!gOnce) {
+ gInvalid.setAll(SK_ScalarMax, SK_ScalarMax, SK_ScalarMax,
+ SK_ScalarMax, SK_ScalarMax, SK_ScalarMax,
+ SK_ScalarMax, SK_ScalarMax, SK_ScalarMax);
diff --git a/gfx/skia/patches/archive/fix-gradient-clamp.patch b/gfx/skia/patches/archive/fix-gradient-clamp.patch
new file mode 100644
index 0000000000..91481c2c12
--- /dev/null
+++ b/gfx/skia/patches/archive/fix-gradient-clamp.patch
@@ -0,0 +1,211 @@
+diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp
+--- a/gfx/skia/src/effects/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/SkGradientShader.cpp
+@@ -167,16 +167,17 @@ private:
+
+ mutable uint16_t* fCache16; // working ptr. If this is NULL, we need to recompute the cache values
+ mutable SkPMColor* fCache32; // working ptr. If this is NULL, we need to recompute the cache values
+
+ mutable uint16_t* fCache16Storage; // storage for fCache16, allocated on demand
+ mutable SkMallocPixelRef* fCache32PixelRef;
+ mutable unsigned fCacheAlpha; // the alpha value we used when we computed the cache. larger than 8bits so we can store uninitialized value
+
++ static SkPMColor PremultiplyColor(SkColor c0, U8CPU alpha);
+ static void Build16bitCache(uint16_t[], SkColor c0, SkColor c1, int count);
+ static void Build32bitCache(SkPMColor[], SkColor c0, SkColor c1, int count,
+ U8CPU alpha);
+ void setCacheAlpha(U8CPU alpha) const;
+ void initCommon();
+
+ typedef SkShader INHERITED;
+ };
+@@ -512,16 +513,31 @@ static inline U8CPU dither_fixed_to_8(Sk
+ * For dithering with premultiply, we want to ceiling the alpha component,
+ * to ensure that it is always >= any color component.
+ */
+ static inline U8CPU dither_ceil_fixed_to_8(SkFixed n) {
+ n >>= 8;
+ return ((n << 1) - (n | (n >> 8))) >> 8;
+ }
+
++SkPMColor Gradient_Shader::PremultiplyColor(SkColor c0, U8CPU paintAlpha)
++{
++ SkFixed a = SkMulDiv255Round(SkColorGetA(c0), paintAlpha);
++ SkFixed r = SkColorGetR(c0);
++ SkFixed g = SkColorGetG(c0);
++ SkFixed b = SkColorGetB(c0);
++
++ a = SkIntToFixed(a) + 0x8000;
++ r = SkIntToFixed(r) + 0x8000;
++ g = SkIntToFixed(g) + 0x8000;
++ b = SkIntToFixed(b) + 0x8000;
++
++ return SkPremultiplyARGBInline(a >> 16, r >> 16, g >> 16, b >> 16);
++}
++
+ void Gradient_Shader::Build32bitCache(SkPMColor cache[], SkColor c0, SkColor c1,
+ int count, U8CPU paintAlpha) {
+ SkASSERT(count > 1);
+
+ // need to apply paintAlpha to our two endpoints
+ SkFixed a = SkMulDiv255Round(SkColorGetA(c0), paintAlpha);
+ SkFixed da;
+ {
+@@ -613,24 +629,24 @@ const uint16_t* Gradient_Shader::getCach
+ }
+ }
+ return fCache16;
+ }
+
+ const SkPMColor* Gradient_Shader::getCache32() const {
+ if (fCache32 == NULL) {
+ // double the count for dither entries
+- const int entryCount = kCache32Count * 2;
++ const int entryCount = kCache32Count * 2 + 2;
+ const size_t allocSize = sizeof(SkPMColor) * entryCount;
+
+ if (NULL == fCache32PixelRef) {
+ fCache32PixelRef = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ }
+- fCache32 = (SkPMColor*)fCache32PixelRef->getAddr();
++ fCache32 = (SkPMColor*)fCache32PixelRef->getAddr() + 1;
+ if (fColorCount == 2) {
+ Build32bitCache(fCache32, fOrigColors[0], fOrigColors[1],
+ kCache32Count, fCacheAlpha);
+ } else {
+ Rec* rec = fRecs;
+ int prevIndex = 0;
+ for (int i = 1; i < fColorCount; i++) {
+ int nextIndex = SkFixedToFFFF(rec[i].fPos) >> (16 - kCache32Bits);
+@@ -644,28 +660,31 @@ const SkPMColor* Gradient_Shader::getCac
+ }
+ SkASSERT(prevIndex == kCache32Count - 1);
+ }
+
+ if (fMapper) {
+ SkMallocPixelRef* newPR = SkNEW_ARGS(SkMallocPixelRef,
+ (NULL, allocSize, NULL));
+ SkPMColor* linear = fCache32; // just computed linear data
+- SkPMColor* mapped = (SkPMColor*)newPR->getAddr(); // storage for mapped data
++ SkPMColor* mapped = (SkPMColor*)newPR->getAddr() + 1; // storage for mapped data
+ SkUnitMapper* map = fMapper;
+ for (int i = 0; i < kCache32Count; i++) {
+ int index = map->mapUnit16((i << 8) | i) >> 8;
+ mapped[i] = linear[index];
+ mapped[i + kCache32Count] = linear[index + kCache32Count];
+ }
+ fCache32PixelRef->unref();
+ fCache32PixelRef = newPR;
+- fCache32 = (SkPMColor*)newPR->getAddr();
++ fCache32 = (SkPMColor*)newPR->getAddr() + 1;
+ }
+ }
++ //Write the clamp colours into the first and last entries of fCache32
++ fCache32[-1] = PremultiplyColor(fOrigColors[0], fCacheAlpha);
++ fCache32[kCache32Count * 2] = PremultiplyColor(fOrigColors[fColorCount - 1], fCacheAlpha);
+ return fCache32;
+ }
+
+ /*
+ * Because our caller might rebuild the same (logically the same) gradient
+ * over and over, we'd like to return exactly the same "bitmap" if possible,
+ * allowing the client to utilize a cache of our bitmap (e.g. with a GPU).
+ * To do that, we maintain a private cache of built-bitmaps, based on our
+@@ -875,28 +894,38 @@ void Linear_Gradient::shadeSpan(int x, i
+ dx = dxStorage[0];
+ } else {
+ SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
+ dx = SkScalarToFixed(fDstToIndex.getScaleX());
+ }
+
+ if (SkFixedNearlyZero(dx)) {
+ // we're a vertical gradient, so no change in a span
+- unsigned fi = proc(fx) >> (16 - kCache32Bits);
+- sk_memset32_dither(dstC, cache[toggle + fi],
+- cache[(toggle ^ TOGGLE_MASK) + fi], count);
++ if (proc == clamp_tileproc) {
++ if (fx < 0) {
++ sk_memset32(dstC, cache[-1], count);
++ } else if (fx > 0xFFFF) {
++ sk_memset32(dstC, cache[kCache32Count * 2], count);
++ } else {
++ unsigned fi = proc(fx) >> (16 - kCache32Bits);
++ sk_memset32_dither(dstC, cache[toggle + fi],
++ cache[(toggle ^ TOGGLE_MASK) + fi], count);
++ }
++ } else {
++ unsigned fi = proc(fx) >> (16 - kCache32Bits);
++ sk_memset32_dither(dstC, cache[toggle + fi],
++ cache[(toggle ^ TOGGLE_MASK) + fi], count);
++ }
+ } else if (proc == clamp_tileproc) {
+ SkClampRange range;
+- range.init(fx, dx, count, 0, 0xFF);
++ range.init(fx, dx, count, cache[-1], cache[kCache32Count * 2]);
+
+ if ((count = range.fCount0) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV0],
+- cache[(toggle ^ TOGGLE_MASK) + range.fV0],
+- count);
++ // Do we really want to dither the clamp values?
++ sk_memset32(dstC, range.fV0, count);
+ dstC += count;
+ }
+ if ((count = range.fCount1) > 0) {
+ int unroll = count >> 3;
+ fx = range.fFx1;
+ for (int i = 0; i < unroll; i++) {
+ NO_CHECK_ITER; NO_CHECK_ITER;
+ NO_CHECK_ITER; NO_CHECK_ITER;
+@@ -905,20 +934,17 @@ void Linear_Gradient::shadeSpan(int x, i
+ }
+ if ((count &= 7) > 0) {
+ do {
+ NO_CHECK_ITER;
+ } while (--count != 0);
+ }
+ }
+ if ((count = range.fCount2) > 0) {
+- sk_memset32_dither(dstC,
+- cache[toggle + range.fV1],
+- cache[(toggle ^ TOGGLE_MASK) + range.fV1],
+- count);
++ sk_memset32(dstC, range.fV1, count);
+ }
+ } else if (proc == mirror_tileproc) {
+ do {
+ unsigned fi = mirror_8bits(fx >> 8);
+ SkASSERT(fi <= 0xFF);
+ fx += dx;
+ *dstC++ = cache[toggle + fi];
+ toggle ^= TOGGLE_MASK;
+@@ -1670,19 +1699,24 @@ public:
+ }
+ SkScalar b = (SkScalarMul(fDiff.fX, fx) +
+ SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
+ SkScalar db = (SkScalarMul(fDiff.fX, dx) +
+ SkScalarMul(fDiff.fY, dy)) * 2;
+ if (proc == clamp_tileproc) {
+ for (; count > 0; --count) {
+ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
+- SkFixed index = SkClampMax(t, 0xFFFF);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> (16 - kCache32Bits)];
++ if (t < 0) {
++ *dstC++ = cache[-1];
++ } else if (t > 0xFFFF) {
++ *dstC++ = cache[kCache32Count * 2];
++ } else {
++ SkASSERT(t <= 0xFFFF);
++ *dstC++ = cache[t >> (16 - kCache32Bits)];
++ }
+ fx += dx;
+ fy += dy;
+ b += db;
+ }
+ } else if (proc == mirror_tileproc) {
+ for (; count > 0; --count) {
+ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
+ SkFixed index = mirror_tileproc(t);
diff --git a/gfx/skia/patches/archive/getpostextpath.patch b/gfx/skia/patches/archive/getpostextpath.patch
new file mode 100644
index 0000000000..7181411ec8
--- /dev/null
+++ b/gfx/skia/patches/archive/getpostextpath.patch
@@ -0,0 +1,70 @@
+diff --git a/gfx/skia/include/core/SkPaint.h b/gfx/skia/include/core/SkPaint.h
+--- a/gfx/skia/include/core/SkPaint.h
++++ b/gfx/skia/include/core/SkPaint.h
+@@ -836,16 +836,19 @@ public:
+
+ /** Return the path (outline) for the specified text.
+ Note: just like SkCanvas::drawText, this will respect the Align setting
+ in the paint.
+ */
+ void getTextPath(const void* text, size_t length, SkScalar x, SkScalar y,
+ SkPath* path) const;
+
++ void getPosTextPath(const void* text, size_t length,
++ const SkPoint pos[], SkPath* path) const;
++
+ #ifdef SK_BUILD_FOR_ANDROID
+ const SkGlyph& getUnicharMetrics(SkUnichar);
+ const void* findImage(const SkGlyph&);
+
+ uint32_t getGenerationID() const;
+ #endif
+
+ // returns true if the paint's settings (e.g. xfermode + alpha) resolve to
+diff --git a/gfx/skia/src/core/SkPaint.cpp b/gfx/skia/src/core/SkPaint.cpp
+--- a/gfx/skia/src/core/SkPaint.cpp
++++ b/gfx/skia/src/core/SkPaint.cpp
+@@ -1242,16 +1242,43 @@ void SkPaint::getTextPath(const void* te
+ const SkPath* iterPath;
+ while ((iterPath = iter.next(&xpos)) != NULL) {
+ matrix.postTranslate(xpos - prevXPos, 0);
+ path->addPath(*iterPath, matrix);
+ prevXPos = xpos;
+ }
+ }
+
++void SkPaint::getPosTextPath(const void* textData, size_t length,
++ const SkPoint pos[], SkPath* path) const {
++ SkASSERT(length == 0 || textData != NULL);
++
++ const char* text = (const char*)textData;
++ if (text == NULL || length == 0 || path == NULL) {
++ return;
++ }
++
++ SkTextToPathIter iter(text, length, *this, false, true);
++ SkMatrix matrix;
++ SkPoint prevPos;
++ prevPos.set(0, 0);
++
++ matrix.setScale(iter.getPathScale(), iter.getPathScale());
++ path->reset();
++
++ unsigned int i = 0;
++ const SkPath* iterPath;
++ while ((iterPath = iter.next(NULL)) != NULL) {
++ matrix.postTranslate(pos[i].fX - prevPos.fX, pos[i].fY - prevPos.fY);
++ path->addPath(*iterPath, matrix);
++ prevPos = pos[i];
++ i++;
++ }
++}
++
+ static void add_flattenable(SkDescriptor* desc, uint32_t tag,
+ SkFlattenableWriteBuffer* buffer) {
+ buffer->flatten(desc->addEntry(tag, buffer->size(), NULL));
+ }
+
+ // SkFontHost can override this choice in FilterRec()
+ static SkMask::Format computeMaskFormat(const SkPaint& paint) {
+ uint32_t flags = paint.getFlags();
diff --git a/gfx/skia/patches/archive/mingw-fix.patch b/gfx/skia/patches/archive/mingw-fix.patch
new file mode 100644
index 0000000000..d91a16aa70
--- /dev/null
+++ b/gfx/skia/patches/archive/mingw-fix.patch
@@ -0,0 +1,57 @@
+diff --git a/gfx/skia/include/core/SkPostConfig.h b/gfx/skia/include/core/SkPostConfig.h
+index 0135b85..bb108f8 100644
+--- a/gfx/skia/include/core/SkPostConfig.h
++++ b/gfx/skia/include/core/SkPostConfig.h
+@@ -253,7 +253,7 @@
+ //////////////////////////////////////////////////////////////////////
+
+ #ifndef SK_OVERRIDE
+-#if defined(SK_BUILD_FOR_WIN)
++#if defined(_MSC_VER)
+ #define SK_OVERRIDE override
+ #elif defined(__clang__)
+ // Some documentation suggests we should be using __attribute__((override)),
+diff --git a/gfx/skia/src/ports/SkFontHost_win.cpp b/gfx/skia/src/ports/SkFontHost_win.cpp
+index dd9c5dc..ca2c3dc 100644
+--- a/gfx/skia/src/ports/SkFontHost_win.cpp
++++ b/gfx/skia/src/ports/SkFontHost_win.cpp
+@@ -22,7 +22,7 @@
+ #ifdef WIN32
+ #include "windows.h"
+ #include "tchar.h"
+-#include "Usp10.h"
++#include "usp10.h"
+
+ // always packed xxRRGGBB
+ typedef uint32_t SkGdiRGB;
+@@ -1033,6 +1033,10 @@ SkAdvancedTypefaceMetrics* SkFontHost::GetAdvancedTypefaceMetrics(
+ HFONT savefont = (HFONT)SelectObject(hdc, font);
+ HFONT designFont = NULL;
+
++ const char stem_chars[] = {'i', 'I', '!', '1'};
++ int16_t min_width;
++ unsigned glyphCount;
++
+ // To request design units, create a logical font whose height is specified
+ // as unitsPerEm.
+ OUTLINETEXTMETRIC otm;
+@@ -1046,7 +1050,7 @@ SkAdvancedTypefaceMetrics* SkFontHost::GetAdvancedTypefaceMetrics(
+ if (!GetOutlineTextMetrics(hdc, sizeof(otm), &otm)) {
+ goto Error;
+ }
+- const unsigned glyphCount = calculateGlyphCount(hdc);
++ glyphCount = calculateGlyphCount(hdc);
+
+ info = new SkAdvancedTypefaceMetrics;
+ info->fEmSize = otm.otmEMSquare;
+@@ -1115,9 +1119,8 @@ SkAdvancedTypefaceMetrics* SkFontHost::GetAdvancedTypefaceMetrics(
+
+ // Figure out a good guess for StemV - Min width of i, I, !, 1.
+ // This probably isn't very good with an italic font.
+- int16_t min_width = SHRT_MAX;
++ min_width = SHRT_MAX;
+ info->fStemV = 0;
+- char stem_chars[] = {'i', 'I', '!', '1'};
+ for (size_t i = 0; i < SK_ARRAY_COUNT(stem_chars); i++) {
+ ABC abcWidths;
+ if (GetCharABCWidths(hdc, stem_chars[i], stem_chars[i], &abcWidths)) {
diff --git a/gfx/skia/patches/archive/new-aa.patch b/gfx/skia/patches/archive/new-aa.patch
new file mode 100644
index 0000000000..d5e6fbf73d
--- /dev/null
+++ b/gfx/skia/patches/archive/new-aa.patch
@@ -0,0 +1,22 @@
+diff --git a/gfx/skia/src/core/SkScan_AntiPath.cpp b/gfx/skia/src/core/SkScan_AntiPath.cpp
+--- a/gfx/skia/src/core/SkScan_AntiPath.cpp
++++ b/gfx/skia/src/core/SkScan_AntiPath.cpp
+@@ -31,17 +31,17 @@
+ - supersampled coordinates, scale equal to the output * SCALE
+
+ NEW_AA is a set of code-changes to try to make both paths produce identical
+ results. Its not quite there yet, though the remaining differences may be
+ in the subsequent blits, and not in the different masks/runs...
+ */
+ //#define FORCE_SUPERMASK
+ //#define FORCE_RLE
+-//#define SK_SUPPORT_NEW_AA
++#define SK_SUPPORT_NEW_AA
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ /// Base class for a single-pass supersampled blitter.
+ class BaseSuperBlitter : public SkBlitter {
+ public:
+ BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
+ const SkRegion& clip);
diff --git a/gfx/skia/patches/archive/old-android-fonthost.patch b/gfx/skia/patches/archive/old-android-fonthost.patch
new file mode 100644
index 0000000000..1c64ace7dd
--- /dev/null
+++ b/gfx/skia/patches/archive/old-android-fonthost.patch
@@ -0,0 +1,530 @@
+# HG changeset patch
+# Parent 9ee29e4aace683ddf6cf8ddb2893cd34fcfc772c
+# User James Willcox <jwillcox@mozilla.com>
+diff --git a/gfx/skia/Makefile.in b/gfx/skia/Makefile.in
+--- a/gfx/skia/Makefile.in
++++ b/gfx/skia/Makefile.in
+@@ -305,21 +305,20 @@ CPPSRCS += \
+ SkFontHost_mac_coretext.cpp \
+ SkTime_Unix.cpp \
+ $(NULL)
+ endif
+
+ ifeq (android,$(MOZ_WIDGET_TOOLKIT))
+ CPPSRCS += \
+ SkFontHost_FreeType.cpp \
+ SkFontHost_android.cpp \
+ SkFontHost_gamma.cpp \
+- FontHostConfiguration_android.cpp \
+ SkMMapStream.cpp \
+ SkTime_Unix.cpp \
+ $(NULL)
+
+ DEFINES += -DSK_BUILD_FOR_ANDROID_NDK
+ OS_CXXFLAGS += $(CAIRO_FT_CFLAGS)
+ endif
+
+ ifeq (gtk2,$(MOZ_WIDGET_TOOLKIT))
+ CPPSRCS += \
+diff --git a/gfx/skia/src/ports/SkFontHost_android.cpp b/gfx/skia/src/ports/SkFontHost_android.cpp
+--- a/gfx/skia/src/ports/SkFontHost_android.cpp
++++ b/gfx/skia/src/ports/SkFontHost_android.cpp
+@@ -1,38 +1,31 @@
++
+ /*
+-**
+-** Copyright 2006, The Android Open Source Project
+-**
+-** Licensed under the Apache License, Version 2.0 (the "License");
+-** you may not use this file except in compliance with the License.
+-** You may obtain a copy of the License at
+-**
+-** http://www.apache.org/licenses/LICENSE-2.0
+-**
+-** Unless required by applicable law or agreed to in writing, software
+-** distributed under the License is distributed on an "AS IS" BASIS,
+-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-** See the License for the specific language governing permissions and
+-** limitations under the License.
+-*/
++ * Copyright 2006 The Android Open Source Project
++ *
++ * Use of this source code is governed by a BSD-style license that can be
++ * found in the LICENSE file.
++ */
++
+
+ #include "SkFontHost.h"
+ #include "SkDescriptor.h"
+ #include "SkMMapStream.h"
+ #include "SkPaint.h"
+ #include "SkString.h"
+ #include "SkStream.h"
+ #include "SkThread.h"
+ #include "SkTSearch.h"
+-#include "FontHostConfiguration_android.h"
+ #include <stdio.h>
+
++#define FONT_CACHE_MEMORY_BUDGET (768 * 1024)
++
+ #ifndef SK_FONT_FILE_PREFIX
+ #define SK_FONT_FILE_PREFIX "/fonts/"
+ #endif
+
+ SkTypeface::Style find_name_and_attributes(SkStream* stream, SkString* name,
+ bool* isFixedWidth);
+
+ static void GetFullPathForSysFonts(SkString* full, const char name[]) {
+ full->set(getenv("ANDROID_ROOT"));
+ full->append(SK_FONT_FILE_PREFIX);
+@@ -99,21 +92,21 @@ static SkTypeface* find_best_face(const
+ if (faces[SkTypeface::kNormal] != NULL) {
+ return faces[SkTypeface::kNormal];
+ }
+ // look for anything
+ for (int i = 0; i < 4; i++) {
+ if (faces[i] != NULL) {
+ return faces[i];
+ }
+ }
+ // should never get here, since the faces list should not be empty
+- SkDEBUGFAIL("faces list is empty");
++ SkASSERT(!"faces list is empty");
+ return NULL;
+ }
+
+ static FamilyRec* find_family(const SkTypeface* member) {
+ FamilyRec* curr = gFamilyHead;
+ while (curr != NULL) {
+ for (int i = 0; i < 4; i++) {
+ if (curr->fFaces[i] == member) {
+ return curr;
+ }
+@@ -138,31 +131,27 @@ static SkTypeface* find_from_uniqueID(ui
+ curr = curr->fNext;
+ }
+ return NULL;
+ }
+
+ /* Remove reference to this face from its family. If the resulting family
+ is empty (has no faces), return that family, otherwise return NULL
+ */
+ static FamilyRec* remove_from_family(const SkTypeface* face) {
+ FamilyRec* family = find_family(face);
+- if (family) {
+- SkASSERT(family->fFaces[face->style()] == face);
+- family->fFaces[face->style()] = NULL;
++ SkASSERT(family->fFaces[face->style()] == face);
++ family->fFaces[face->style()] = NULL;
+
+- for (int i = 0; i < 4; i++) {
+- if (family->fFaces[i] != NULL) { // family is non-empty
+- return NULL;
+- }
++ for (int i = 0; i < 4; i++) {
++ if (family->fFaces[i] != NULL) { // family is non-empty
++ return NULL;
+ }
+- } else {
+-// SkDebugf("remove_from_family(%p) face not found", face);
+ }
+ return family; // return the empty family
+ }
+
+ // maybe we should make FamilyRec be doubly-linked
+ static void detach_and_delete_family(FamilyRec* family) {
+ FamilyRec* curr = gFamilyHead;
+ FamilyRec* prev = NULL;
+
+ while (curr != NULL) {
+@@ -172,21 +161,21 @@ static void detach_and_delete_family(Fam
+ gFamilyHead = next;
+ } else {
+ prev->fNext = next;
+ }
+ SkDELETE(family);
+ return;
+ }
+ prev = curr;
+ curr = next;
+ }
+- SkDEBUGFAIL("Yikes, couldn't find family in our list to remove/delete");
++ SkASSERT(!"Yikes, couldn't find family in our list to remove/delete");
+ }
+
+ static SkTypeface* find_typeface(const char name[], SkTypeface::Style style) {
+ NameFamilyPair* list = gNameList.begin();
+ int count = gNameList.count();
+
+ int index = SkStrLCSearch(&list[0].fName, count, name, sizeof(list[0]));
+
+ if (index >= 0) {
+ return find_best_face(list[index].fFamily, style);
+@@ -387,111 +376,90 @@ static bool get_name_and_style(const cha
+ }
+ return false;
+ }
+
+ // used to record our notion of the pre-existing fonts
+ struct FontInitRec {
+ const char* fFileName;
+ const char* const* fNames; // null-terminated list
+ };
+
++static const char* gSansNames[] = {
++ "sans-serif", "arial", "helvetica", "tahoma", "verdana", NULL
++};
++
++static const char* gSerifNames[] = {
++ "serif", "times", "times new roman", "palatino", "georgia", "baskerville",
++ "goudy", "fantasy", "cursive", "ITC Stone Serif", NULL
++};
++
++static const char* gMonoNames[] = {
++ "monospace", "courier", "courier new", "monaco", NULL
++};
++
+ // deliberately empty, but we use the address to identify fallback fonts
+ static const char* gFBNames[] = { NULL };
+
++/* Fonts must be grouped by family, with the first font in a family having the
++ list of names (even if that list is empty), and the following members having
++ null for the list. The names list must be NULL-terminated
++*/
++static const FontInitRec gSystemFonts[] = {
++ { "DroidSans.ttf", gSansNames },
++ { "DroidSans-Bold.ttf", NULL },
++ { "DroidSerif-Regular.ttf", gSerifNames },
++ { "DroidSerif-Bold.ttf", NULL },
++ { "DroidSerif-Italic.ttf", NULL },
++ { "DroidSerif-BoldItalic.ttf", NULL },
++ { "DroidSansMono.ttf", gMonoNames },
++ /* These are optional, and can be ignored if not found in the file system.
++ These are appended to gFallbackFonts[] as they are seen, so we list
++ them in the order we want them to be accessed by NextLogicalFont().
++ */
++ { "DroidSansArabic.ttf", gFBNames },
++ { "DroidSansHebrew.ttf", gFBNames },
++ { "DroidSansThai.ttf", gFBNames },
++ { "MTLmr3m.ttf", gFBNames }, // Motoya Japanese Font
++ { "MTLc3m.ttf", gFBNames }, // Motoya Japanese Font
++ { "DroidSansJapanese.ttf", gFBNames },
++ { "DroidSansFallback.ttf", gFBNames }
++};
+
+-/* Fonts are grouped by family, with the first font in a family having the
+- list of names (even if that list is empty), and the following members having
+- null for the list. The names list must be NULL-terminated.
+-*/
+-static FontInitRec *gSystemFonts;
+-static size_t gNumSystemFonts = 0;
+-
+-#define SYSTEM_FONTS_FILE "/system/etc/system_fonts.cfg"
++#define DEFAULT_NAMES gSansNames
+
+ // these globals are assigned (once) by load_system_fonts()
+ static FamilyRec* gDefaultFamily;
+ static SkTypeface* gDefaultNormal;
+-static char** gDefaultNames = NULL;
+-static uint32_t *gFallbackFonts;
+
+-/* Load info from a configuration file that populates the system/fallback font structures
+-*/
+-static void load_font_info() {
+-// load_font_info_xml("/system/etc/system_fonts.xml");
+- SkTDArray<FontFamily*> fontFamilies;
+- getFontFamilies(fontFamilies);
+-
+- SkTDArray<FontInitRec> fontInfo;
+- bool firstInFamily = false;
+- for (int i = 0; i < fontFamilies.count(); ++i) {
+- FontFamily *family = fontFamilies[i];
+- firstInFamily = true;
+- for (int j = 0; j < family->fFileNames.count(); ++j) {
+- FontInitRec fontInfoRecord;
+- fontInfoRecord.fFileName = family->fFileNames[j];
+- if (j == 0) {
+- if (family->fNames.count() == 0) {
+- // Fallback font
+- fontInfoRecord.fNames = (char **)gFBNames;
+- } else {
+- SkTDArray<const char*> names = family->fNames;
+- const char **nameList = (const char**)
+- malloc((names.count() + 1) * sizeof(char*));
+- if (nameList == NULL) {
+- // shouldn't get here
+- break;
+- }
+- if (gDefaultNames == NULL) {
+- gDefaultNames = (char**) nameList;
+- }
+- for (int i = 0; i < names.count(); ++i) {
+- nameList[i] = names[i];
+- }
+- nameList[names.count()] = NULL;
+- fontInfoRecord.fNames = nameList;
+- }
+- } else {
+- fontInfoRecord.fNames = NULL;
+- }
+- *fontInfo.append() = fontInfoRecord;
+- }
+- }
+- gNumSystemFonts = fontInfo.count();
+- gSystemFonts = (FontInitRec*) malloc(gNumSystemFonts * sizeof(FontInitRec));
+- gFallbackFonts = (uint32_t*) malloc((gNumSystemFonts + 1) * sizeof(uint32_t));
+- if (gSystemFonts == NULL) {
+- // shouldn't get here
+- gNumSystemFonts = 0;
+- }
+- for (size_t i = 0; i < gNumSystemFonts; ++i) {
+- gSystemFonts[i].fFileName = fontInfo[i].fFileName;
+- gSystemFonts[i].fNames = fontInfo[i].fNames;
+- }
+- fontFamilies.deleteAll();
+-}
++/* This is sized conservatively, assuming that it will never be a size issue.
++ It will be initialized in load_system_fonts(), and will be filled with the
++ fontIDs that can be used for fallback consideration, in sorted order (sorted
++ meaning element[0] should be used first, then element[1], etc. When we hit
++ a fontID==0 in the array, the list is done, hence our allocation size is
++ +1 the total number of possible system fonts. Also see NextLogicalFont().
++ */
++static uint32_t gFallbackFonts[SK_ARRAY_COUNT(gSystemFonts)+1];
+
+ /* Called once (ensured by the sentinel check at the beginning of our body).
+ Initializes all the globals, and register the system fonts.
+ */
+ static void load_system_fonts() {
+ // check if we've already be called
+ if (NULL != gDefaultNormal) {
+ return;
+ }
+
+- load_font_info();
+-
+ const FontInitRec* rec = gSystemFonts;
+ SkTypeface* firstInFamily = NULL;
+ int fallbackCount = 0;
+
+- for (size_t i = 0; i < gNumSystemFonts; i++) {
++ for (size_t i = 0; i < SK_ARRAY_COUNT(gSystemFonts); i++) {
+ // if we're the first in a new family, clear firstInFamily
+ if (rec[i].fNames != NULL) {
+ firstInFamily = NULL;
+ }
+
+ bool isFixedWidth;
+ SkString name;
+ SkTypeface::Style style;
+
+ // we expect all the fonts, except the "fallback" fonts
+@@ -515,120 +483,75 @@ static void load_system_fonts() {
+ // SkDebugf("---- adding %s as fallback[%d] fontID %d\n",
+ // rec[i].fFileName, fallbackCount, tf->uniqueID());
+ gFallbackFonts[fallbackCount++] = tf->uniqueID();
+ }
+
+ firstInFamily = tf;
+ FamilyRec* family = find_family(tf);
+ const char* const* names = rec[i].fNames;
+
+ // record the default family if this is it
+- if (names == gDefaultNames) {
++ if (names == DEFAULT_NAMES) {
+ gDefaultFamily = family;
+ }
+ // add the names to map to this family
+ while (*names) {
+ add_name(*names, family);
+ names += 1;
+ }
+ }
+ }
+
+ // do this after all fonts are loaded. This is our default font, and it
+ // acts as a sentinel so we only execute load_system_fonts() once
+ gDefaultNormal = find_best_face(gDefaultFamily, SkTypeface::kNormal);
+ // now terminate our fallback list with the sentinel value
+ gFallbackFonts[fallbackCount] = 0;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ void SkFontHost::Serialize(const SkTypeface* face, SkWStream* stream) {
+- // lookup and record if the font is custom (i.e. not a system font)
+- bool isCustomFont = !((FamilyTypeface*)face)->isSysFont();
+- stream->writeBool(isCustomFont);
++ const char* name = ((FamilyTypeface*)face)->getUniqueString();
+
+- if (isCustomFont) {
+- SkStream* fontStream = ((FamilyTypeface*)face)->openStream();
++ stream->write8((uint8_t)face->style());
+
+- // store the length of the custom font
+- uint32_t len = fontStream->getLength();
+- stream->write32(len);
+-
+- // store the entire font in the serialized stream
+- void* fontData = malloc(len);
+-
+- fontStream->read(fontData, len);
+- stream->write(fontData, len);
+-
+- fontStream->unref();
+- free(fontData);
+-// SkDebugf("--- fonthost custom serialize %d %d\n", face->style(), len);
+-
++ if (NULL == name || 0 == *name) {
++ stream->writePackedUInt(0);
++// SkDebugf("--- fonthost serialize null\n");
+ } else {
+- const char* name = ((FamilyTypeface*)face)->getUniqueString();
+-
+- stream->write8((uint8_t)face->style());
+-
+- if (NULL == name || 0 == *name) {
+- stream->writePackedUInt(0);
+-// SkDebugf("--- fonthost serialize null\n");
+- } else {
+- uint32_t len = strlen(name);
+- stream->writePackedUInt(len);
+- stream->write(name, len);
+-// SkDebugf("--- fonthost serialize <%s> %d\n", name, face->style());
+- }
++ uint32_t len = strlen(name);
++ stream->writePackedUInt(len);
++ stream->write(name, len);
++// SkDebugf("--- fonthost serialize <%s> %d\n", name, face->style());
+ }
+ }
+
+ SkTypeface* SkFontHost::Deserialize(SkStream* stream) {
+ load_system_fonts();
+
+- // check if the font is a custom or system font
+- bool isCustomFont = stream->readBool();
++ int style = stream->readU8();
+
+- if (isCustomFont) {
++ int len = stream->readPackedUInt();
++ if (len > 0) {
++ SkString str;
++ str.resize(len);
++ stream->read(str.writable_str(), len);
+
+- // read the length of the custom font from the stream
+- uint32_t len = stream->readU32();
+-
+- // generate a new stream to store the custom typeface
+- SkMemoryStream* fontStream = new SkMemoryStream(len);
+- stream->read((void*)fontStream->getMemoryBase(), len);
+-
+- SkTypeface* face = CreateTypefaceFromStream(fontStream);
+-
+- fontStream->unref();
+-
+-// SkDebugf("--- fonthost custom deserialize %d %d\n", face->style(), len);
+- return face;
+-
+- } else {
+- int style = stream->readU8();
+-
+- int len = stream->readPackedUInt();
+- if (len > 0) {
+- SkString str;
+- str.resize(len);
+- stream->read(str.writable_str(), len);
+-
+- const FontInitRec* rec = gSystemFonts;
+- for (size_t i = 0; i < gNumSystemFonts; i++) {
+- if (strcmp(rec[i].fFileName, str.c_str()) == 0) {
+- // backup until we hit the fNames
+- for (int j = i; j >= 0; --j) {
+- if (rec[j].fNames != NULL) {
+- return SkFontHost::CreateTypeface(NULL,
+- rec[j].fNames[0], NULL, 0,
+- (SkTypeface::Style)style);
+- }
++ const FontInitRec* rec = gSystemFonts;
++ for (size_t i = 0; i < SK_ARRAY_COUNT(gSystemFonts); i++) {
++ if (strcmp(rec[i].fFileName, str.c_str()) == 0) {
++ // backup until we hit the fNames
++ for (int j = i; j >= 0; --j) {
++ if (rec[j].fNames != NULL) {
++ return SkFontHost::CreateTypeface(NULL,
++ rec[j].fNames[0], NULL, 0, (SkTypeface::Style)style);
+ }
+ }
+ }
+ }
+ }
+ return NULL;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+@@ -697,49 +620,32 @@ size_t SkFontHost::GetFileName(SkFontID
+ }
+ return size;
+ } else {
+ return 0;
+ }
+ }
+
+ SkFontID SkFontHost::NextLogicalFont(SkFontID currFontID, SkFontID origFontID) {
+ load_system_fonts();
+
+- const SkTypeface* origTypeface = find_from_uniqueID(origFontID);
+- const SkTypeface* currTypeface = find_from_uniqueID(currFontID);
+-
+- SkASSERT(origTypeface != 0);
+- SkASSERT(currTypeface != 0);
+-
+- // Our fallback list always stores the id of the plain in each fallback
+- // family, so we transform currFontID to its plain equivalent.
+- currFontID = find_typeface(currTypeface, SkTypeface::kNormal)->uniqueID();
+-
+ /* First see if fontID is already one of our fallbacks. If so, return
+ its successor. If fontID is not in our list, then return the first one
+ in our list. Note: list is zero-terminated, and returning zero means
+ we have no more fonts to use for fallbacks.
+ */
+ const uint32_t* list = gFallbackFonts;
+ for (int i = 0; list[i] != 0; i++) {
+ if (list[i] == currFontID) {
+- if (list[i+1] == 0)
+- return 0;
+- const SkTypeface* nextTypeface = find_from_uniqueID(list[i+1]);
+- return find_typeface(nextTypeface, origTypeface->style())->uniqueID();
++ return list[i+1];
+ }
+ }
+-
+- // If we get here, currFontID was not a fallback, so we start at the
+- // beginning of our list.
+- const SkTypeface* firstTypeface = find_from_uniqueID(list[0]);
+- return find_typeface(firstTypeface, origTypeface->style())->uniqueID();
++ return list[0];
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+
+ SkTypeface* SkFontHost::CreateTypefaceFromStream(SkStream* stream) {
+ if (NULL == stream || stream->getLength() <= 0) {
+ return NULL;
+ }
+
+ bool isFixedWidth;
+@@ -754,10 +660,11 @@ SkTypeface* SkFontHost::CreateTypefaceFr
+ }
+
+ SkTypeface* SkFontHost::CreateTypefaceFromFile(const char path[]) {
+ SkStream* stream = SkNEW_ARGS(SkMMAPStream, (path));
+ SkTypeface* face = SkFontHost::CreateTypefaceFromStream(stream);
+ // since we created the stream, we let go of our ref() here
+ stream->unref();
+ return face;
+ }
+
++///////////////////////////////////////////////////////////////////////////////
diff --git a/gfx/skia/patches/archive/radial-gradients.patch b/gfx/skia/patches/archive/radial-gradients.patch
new file mode 100644
index 0000000000..183923e83e
--- /dev/null
+++ b/gfx/skia/patches/archive/radial-gradients.patch
@@ -0,0 +1,25 @@
+diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp
+--- a/gfx/skia/src/effects/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/SkGradientShader.cpp
+@@ -1665,17 +1665,20 @@ public:
+ }
+ return kRadial2_GradientType;
+ }
+
+ virtual void shadeSpan(int x, int y, SkPMColor* SK_RESTRICT dstC, int count) SK_OVERRIDE {
+ SkASSERT(count > 0);
+
+ // Zero difference between radii: fill with transparent black.
+- if (fDiffRadius == 0) {
++ // TODO: Is removing this actually correct? Two circles with the
++ // same radius, but different centers doesn't sound like it
++ // should be cleared
++ if (fDiffRadius == 0 && fCenter1 == fCenter2) {
+ sk_bzero(dstC, count * sizeof(*dstC));
+ return;
+ }
+ SkMatrix::MapXYProc dstProc = fDstToIndexProc;
+ TileProc proc = fTileProc;
+ const SkPMColor* SK_RESTRICT cache = this->getCache32();
+
+ SkScalar foura = fA * 4;
diff --git a/gfx/skia/patches/archive/skia_restrict_problem.patch b/gfx/skia/patches/archive/skia_restrict_problem.patch
new file mode 100644
index 0000000000..c7639ca2ce
--- /dev/null
+++ b/gfx/skia/patches/archive/skia_restrict_problem.patch
@@ -0,0 +1,461 @@
+diff --git a/gfx/skia/src/effects/SkGradientShader.cpp b/gfx/skia/src/effects/SkGradientShader.cpp
+--- a/gfx/skia/src/effects/SkGradientShader.cpp
++++ b/gfx/skia/src/effects/SkGradientShader.cpp
+@@ -1184,116 +1184,17 @@ public:
+ {
+ // make sure our table is insync with our current #define for kSQRT_TABLE_SIZE
+ SkASSERT(sizeof(gSqrt8Table) == kSQRT_TABLE_SIZE);
+
+ rad_to_unit_matrix(center, radius, &fPtsToUnit);
+ }
+
+ virtual void shadeSpan(int x, int y, SkPMColor* dstC, int count) SK_OVERRIDE;
+- virtual void shadeSpan16(int x, int y, uint16_t* SK_RESTRICT dstC, int count) SK_OVERRIDE {
+- SkASSERT(count > 0);
+-
+- SkPoint srcPt;
+- SkMatrix::MapXYProc dstProc = fDstToIndexProc;
+- TileProc proc = fTileProc;
+- const uint16_t* SK_RESTRICT cache = this->getCache16();
+- int toggle = ((x ^ y) & 1) << kCache16Bits;
+-
+- if (fDstToIndexClass != kPerspective_MatrixClass) {
+- dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
+- SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
+- SkFixed dx, fx = SkScalarToFixed(srcPt.fX);
+- SkFixed dy, fy = SkScalarToFixed(srcPt.fY);
+-
+- if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
+- SkFixed storage[2];
+- (void)fDstToIndex.fixedStepInX(SkIntToScalar(y), &storage[0], &storage[1]);
+- dx = storage[0];
+- dy = storage[1];
+- } else {
+- SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
+- dx = SkScalarToFixed(fDstToIndex.getScaleX());
+- dy = SkScalarToFixed(fDstToIndex.getSkewY());
+- }
+-
+- if (proc == clamp_tileproc) {
+- const uint8_t* SK_RESTRICT sqrt_table = gSqrt8Table;
+-
+- /* knock these down so we can pin against +- 0x7FFF, which is an immediate load,
+- rather than 0xFFFF which is slower. This is a compromise, since it reduces our
+- precision, but that appears to be visually OK. If we decide this is OK for
+- all of our cases, we could (it seems) put this scale-down into fDstToIndex,
+- to avoid having to do these extra shifts each time.
+- */
+- fx >>= 1;
+- dx >>= 1;
+- fy >>= 1;
+- dy >>= 1;
+- if (dy == 0) { // might perform this check for the other modes, but the win will be a smaller % of the total
+- fy = SkPin32(fy, -0xFFFF >> 1, 0xFFFF >> 1);
+- fy *= fy;
+- do {
+- unsigned xx = SkPin32(fx, -0xFFFF >> 1, 0xFFFF >> 1);
+- unsigned fi = (xx * xx + fy) >> (14 + 16 - kSQRT_TABLE_BITS);
+- fi = SkFastMin32(fi, 0xFFFF >> (16 - kSQRT_TABLE_BITS));
+- fx += dx;
+- *dstC++ = cache[toggle + (sqrt_table[fi] >> (8 - kCache16Bits))];
+- toggle ^= (1 << kCache16Bits);
+- } while (--count != 0);
+- } else {
+- do {
+- unsigned xx = SkPin32(fx, -0xFFFF >> 1, 0xFFFF >> 1);
+- unsigned fi = SkPin32(fy, -0xFFFF >> 1, 0xFFFF >> 1);
+- fi = (xx * xx + fi * fi) >> (14 + 16 - kSQRT_TABLE_BITS);
+- fi = SkFastMin32(fi, 0xFFFF >> (16 - kSQRT_TABLE_BITS));
+- fx += dx;
+- fy += dy;
+- *dstC++ = cache[toggle + (sqrt_table[fi] >> (8 - kCache16Bits))];
+- toggle ^= (1 << kCache16Bits);
+- } while (--count != 0);
+- }
+- } else if (proc == mirror_tileproc) {
+- do {
+- SkFixed dist = SkFixedSqrt(SkFixedSquare(fx) + SkFixedSquare(fy));
+- unsigned fi = mirror_tileproc(dist);
+- SkASSERT(fi <= 0xFFFF);
+- fx += dx;
+- fy += dy;
+- *dstC++ = cache[toggle + (fi >> (16 - kCache16Bits))];
+- toggle ^= (1 << kCache16Bits);
+- } while (--count != 0);
+- } else {
+- SkASSERT(proc == repeat_tileproc);
+- do {
+- SkFixed dist = SkFixedSqrt(SkFixedSquare(fx) + SkFixedSquare(fy));
+- unsigned fi = repeat_tileproc(dist);
+- SkASSERT(fi <= 0xFFFF);
+- fx += dx;
+- fy += dy;
+- *dstC++ = cache[toggle + (fi >> (16 - kCache16Bits))];
+- toggle ^= (1 << kCache16Bits);
+- } while (--count != 0);
+- }
+- } else { // perspective case
+- SkScalar dstX = SkIntToScalar(x);
+- SkScalar dstY = SkIntToScalar(y);
+- do {
+- dstProc(fDstToIndex, dstX, dstY, &srcPt);
+- unsigned fi = proc(SkScalarToFixed(srcPt.length()));
+- SkASSERT(fi <= 0xFFFF);
+-
+- int index = fi >> (16 - kCache16Bits);
+- *dstC++ = cache[toggle + index];
+- toggle ^= (1 << kCache16Bits);
+-
+- dstX += SK_Scalar1;
+- } while (--count != 0);
+- }
+- }
++ virtual void shadeSpan16(int x, int y, uint16_t* dstC, int count) SK_OVERRIDE;
+
+ virtual BitmapType asABitmap(SkBitmap* bitmap,
+ SkMatrix* matrix,
+ TileMode* xy,
+ SkScalar* twoPointRadialParams) const SK_OVERRIDE {
+ if (bitmap) {
+ this->commonAsABitmap(bitmap);
+ }
+@@ -1507,16 +1408,117 @@ void Radial_Gradient::shadeSpan(int x, i
+ unsigned fi = proc(SkScalarToFixed(srcPt.length()));
+ SkASSERT(fi <= 0xFFFF);
+ *dstC++ = cache[fi >> (16 - kCache32Bits)];
+ dstX += SK_Scalar1;
+ } while (--count != 0);
+ }
+ }
+
++void Radial_Gradient::shadeSpan16(int x, int y, uint16_t* SK_RESTRICT dstC, int count) {
++ SkASSERT(count > 0);
++
++ SkPoint srcPt;
++ SkMatrix::MapXYProc dstProc = fDstToIndexProc;
++ TileProc proc = fTileProc;
++ const uint16_t* SK_RESTRICT cache = this->getCache16();
++ int toggle = ((x ^ y) & 1) << kCache16Bits;
++
++ if (fDstToIndexClass != kPerspective_MatrixClass) {
++ dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
++ SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
++ SkFixed dx, fx = SkScalarToFixed(srcPt.fX);
++ SkFixed dy, fy = SkScalarToFixed(srcPt.fY);
++
++ if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
++ SkFixed storage[2];
++ (void)fDstToIndex.fixedStepInX(SkIntToScalar(y), &storage[0], &storage[1]);
++ dx = storage[0];
++ dy = storage[1];
++ } else {
++ SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
++ dx = SkScalarToFixed(fDstToIndex.getScaleX());
++ dy = SkScalarToFixed(fDstToIndex.getSkewY());
++ }
++
++ if (proc == clamp_tileproc) {
++ const uint8_t* SK_RESTRICT sqrt_table = gSqrt8Table;
++
++ /* knock these down so we can pin against +- 0x7FFF, which is an immediate load,
++ rather than 0xFFFF which is slower. This is a compromise, since it reduces our
++ precision, but that appears to be visually OK. If we decide this is OK for
++ all of our cases, we could (it seems) put this scale-down into fDstToIndex,
++ to avoid having to do these extra shifts each time.
++ */
++ fx >>= 1;
++ dx >>= 1;
++ fy >>= 1;
++ dy >>= 1;
++ if (dy == 0) { // might perform this check for the other modes, but the win will be a smaller % of the total
++ fy = SkPin32(fy, -0xFFFF >> 1, 0xFFFF >> 1);
++ fy *= fy;
++ do {
++ unsigned xx = SkPin32(fx, -0xFFFF >> 1, 0xFFFF >> 1);
++ unsigned fi = (xx * xx + fy) >> (14 + 16 - kSQRT_TABLE_BITS);
++ fi = SkFastMin32(fi, 0xFFFF >> (16 - kSQRT_TABLE_BITS));
++ fx += dx;
++ *dstC++ = cache[toggle + (sqrt_table[fi] >> (8 - kCache16Bits))];
++ toggle ^= (1 << kCache16Bits);
++ } while (--count != 0);
++ } else {
++ do {
++ unsigned xx = SkPin32(fx, -0xFFFF >> 1, 0xFFFF >> 1);
++ unsigned fi = SkPin32(fy, -0xFFFF >> 1, 0xFFFF >> 1);
++ fi = (xx * xx + fi * fi) >> (14 + 16 - kSQRT_TABLE_BITS);
++ fi = SkFastMin32(fi, 0xFFFF >> (16 - kSQRT_TABLE_BITS));
++ fx += dx;
++ fy += dy;
++ *dstC++ = cache[toggle + (sqrt_table[fi] >> (8 - kCache16Bits))];
++ toggle ^= (1 << kCache16Bits);
++ } while (--count != 0);
++ }
++ } else if (proc == mirror_tileproc) {
++ do {
++ SkFixed dist = SkFixedSqrt(SkFixedSquare(fx) + SkFixedSquare(fy));
++ unsigned fi = mirror_tileproc(dist);
++ SkASSERT(fi <= 0xFFFF);
++ fx += dx;
++ fy += dy;
++ *dstC++ = cache[toggle + (fi >> (16 - kCache16Bits))];
++ toggle ^= (1 << kCache16Bits);
++ } while (--count != 0);
++ } else {
++ SkASSERT(proc == repeat_tileproc);
++ do {
++ SkFixed dist = SkFixedSqrt(SkFixedSquare(fx) + SkFixedSquare(fy));
++ unsigned fi = repeat_tileproc(dist);
++ SkASSERT(fi <= 0xFFFF);
++ fx += dx;
++ fy += dy;
++ *dstC++ = cache[toggle + (fi >> (16 - kCache16Bits))];
++ toggle ^= (1 << kCache16Bits);
++ } while (--count != 0);
++ }
++ } else { // perspective case
++ SkScalar dstX = SkIntToScalar(x);
++ SkScalar dstY = SkIntToScalar(y);
++ do {
++ dstProc(fDstToIndex, dstX, dstY, &srcPt);
++ unsigned fi = proc(SkScalarToFixed(srcPt.length()));
++ SkASSERT(fi <= 0xFFFF);
++
++ int index = fi >> (16 - kCache16Bits);
++ *dstC++ = cache[toggle + index];
++ toggle ^= (1 << kCache16Bits);
++
++ dstX += SK_Scalar1;
++ } while (--count != 0);
++ }
++}
++
+ /* Two-point radial gradients are specified by two circles, each with a center
+ point and radius. The gradient can be considered to be a series of
+ concentric circles, with the color interpolated from the start circle
+ (at t=0) to the end circle (at t=1).
+
+ For each point (x, y) in the span, we want to find the
+ interpolated circle that intersects that point. The center
+ of the desired circle (Cx, Cy) falls at some distance t
+@@ -1661,109 +1663,17 @@ public:
+ info->fPoint[0] = fCenter1;
+ info->fPoint[1] = fCenter2;
+ info->fRadius[0] = fRadius1;
+ info->fRadius[1] = fRadius2;
+ }
+ return kRadial2_GradientType;
+ }
+
+- virtual void shadeSpan(int x, int y, SkPMColor* SK_RESTRICT dstC, int count) SK_OVERRIDE {
+- SkASSERT(count > 0);
+-
+- // Zero difference between radii: fill with transparent black.
+- // TODO: Is removing this actually correct? Two circles with the
+- // same radius, but different centers doesn't sound like it
+- // should be cleared
+- if (fDiffRadius == 0 && fCenter1 == fCenter2) {
+- sk_bzero(dstC, count * sizeof(*dstC));
+- return;
+- }
+- SkMatrix::MapXYProc dstProc = fDstToIndexProc;
+- TileProc proc = fTileProc;
+- const SkPMColor* SK_RESTRICT cache = this->getCache32();
+-
+- SkScalar foura = fA * 4;
+- bool posRoot = fDiffRadius < 0;
+- if (fDstToIndexClass != kPerspective_MatrixClass) {
+- SkPoint srcPt;
+- dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
+- SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
+- SkScalar dx, fx = srcPt.fX;
+- SkScalar dy, fy = srcPt.fY;
+-
+- if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
+- SkFixed fixedX, fixedY;
+- (void)fDstToIndex.fixedStepInX(SkIntToScalar(y), &fixedX, &fixedY);
+- dx = SkFixedToScalar(fixedX);
+- dy = SkFixedToScalar(fixedY);
+- } else {
+- SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
+- dx = fDstToIndex.getScaleX();
+- dy = fDstToIndex.getSkewY();
+- }
+- SkScalar b = (SkScalarMul(fDiff.fX, fx) +
+- SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
+- SkScalar db = (SkScalarMul(fDiff.fX, dx) +
+- SkScalarMul(fDiff.fY, dy)) * 2;
+- if (proc == clamp_tileproc) {
+- for (; count > 0; --count) {
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
+- if (t < 0) {
+- *dstC++ = cache[-1];
+- } else if (t > 0xFFFF) {
+- *dstC++ = cache[kCache32Count * 2];
+- } else {
+- SkASSERT(t <= 0xFFFF);
+- *dstC++ = cache[t >> (16 - kCache32Bits)];
+- }
+- fx += dx;
+- fy += dy;
+- b += db;
+- }
+- } else if (proc == mirror_tileproc) {
+- for (; count > 0; --count) {
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
+- SkFixed index = mirror_tileproc(t);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> (16 - kCache32Bits)];
+- fx += dx;
+- fy += dy;
+- b += db;
+- }
+- } else {
+- SkASSERT(proc == repeat_tileproc);
+- for (; count > 0; --count) {
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
+- SkFixed index = repeat_tileproc(t);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> (16 - kCache32Bits)];
+- fx += dx;
+- fy += dy;
+- b += db;
+- }
+- }
+- } else { // perspective case
+- SkScalar dstX = SkIntToScalar(x);
+- SkScalar dstY = SkIntToScalar(y);
+- for (; count > 0; --count) {
+- SkPoint srcPt;
+- dstProc(fDstToIndex, dstX, dstY, &srcPt);
+- SkScalar fx = srcPt.fX;
+- SkScalar fy = srcPt.fY;
+- SkScalar b = (SkScalarMul(fDiff.fX, fx) +
+- SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
+- SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
+- SkFixed index = proc(t);
+- SkASSERT(index <= 0xFFFF);
+- *dstC++ = cache[index >> (16 - kCache32Bits)];
+- dstX += SK_Scalar1;
+- }
+- }
+- }
++ virtual void shadeSpan(int x, int y, SkPMColor* dstC, int count) SK_OVERRIDE;
+
+ virtual bool setContext(const SkBitmap& device,
+ const SkPaint& paint,
+ const SkMatrix& matrix) SK_OVERRIDE {
+ if (!this->INHERITED::setContext(device, paint, matrix)) {
+ return false;
+ }
+
+@@ -1817,16 +1727,110 @@ private:
+ fA = SkScalarSquare(fDiff.fX) + SkScalarSquare(fDiff.fY) - SK_Scalar1;
+ fOneOverTwoA = fA ? SkScalarInvert(fA * 2) : 0;
+
+ fPtsToUnit.setTranslate(-fCenter1.fX, -fCenter1.fY);
+ fPtsToUnit.postScale(inv, inv);
+ }
+ };
+
++void Two_Point_Radial_Gradient::shadeSpan(int x, int y, SkPMColor* SK_RESTRICT dstC, int count) {
++ SkASSERT(count > 0);
++
++ // Zero difference between radii: fill with transparent black.
++ // TODO: Is removing this actually correct? Two circles with the
++ // same radius, but different centers doesn't sound like it
++ // should be cleared
++ if (fDiffRadius == 0 && fCenter1 == fCenter2) {
++ sk_bzero(dstC, count * sizeof(*dstC));
++ return;
++ }
++ SkMatrix::MapXYProc dstProc = fDstToIndexProc;
++ TileProc proc = fTileProc;
++ const SkPMColor* SK_RESTRICT cache = this->getCache32();
++
++ SkScalar foura = fA * 4;
++ bool posRoot = fDiffRadius < 0;
++ if (fDstToIndexClass != kPerspective_MatrixClass) {
++ SkPoint srcPt;
++ dstProc(fDstToIndex, SkIntToScalar(x) + SK_ScalarHalf,
++ SkIntToScalar(y) + SK_ScalarHalf, &srcPt);
++ SkScalar dx, fx = srcPt.fX;
++ SkScalar dy, fy = srcPt.fY;
++
++ if (fDstToIndexClass == kFixedStepInX_MatrixClass) {
++ SkFixed fixedX, fixedY;
++ (void)fDstToIndex.fixedStepInX(SkIntToScalar(y), &fixedX, &fixedY);
++ dx = SkFixedToScalar(fixedX);
++ dy = SkFixedToScalar(fixedY);
++ } else {
++ SkASSERT(fDstToIndexClass == kLinear_MatrixClass);
++ dx = fDstToIndex.getScaleX();
++ dy = fDstToIndex.getSkewY();
++ }
++ SkScalar b = (SkScalarMul(fDiff.fX, fx) +
++ SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
++ SkScalar db = (SkScalarMul(fDiff.fX, dx) +
++ SkScalarMul(fDiff.fY, dy)) * 2;
++ if (proc == clamp_tileproc) {
++ for (; count > 0; --count) {
++ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
++ if (t < 0) {
++ *dstC++ = cache[-1];
++ } else if (t > 0xFFFF) {
++ *dstC++ = cache[kCache32Count * 2];
++ } else {
++ SkASSERT(t <= 0xFFFF);
++ *dstC++ = cache[t >> (16 - kCache32Bits)];
++ }
++ fx += dx;
++ fy += dy;
++ b += db;
++ }
++ } else if (proc == mirror_tileproc) {
++ for (; count > 0; --count) {
++ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
++ SkFixed index = mirror_tileproc(t);
++ SkASSERT(index <= 0xFFFF);
++ *dstC++ = cache[index >> (16 - kCache32Bits)];
++ fx += dx;
++ fy += dy;
++ b += db;
++ }
++ } else {
++ SkASSERT(proc == repeat_tileproc);
++ for (; count > 0; --count) {
++ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
++ SkFixed index = repeat_tileproc(t);
++ SkASSERT(index <= 0xFFFF);
++ *dstC++ = cache[index >> (16 - kCache32Bits)];
++ fx += dx;
++ fy += dy;
++ b += db;
++ }
++ }
++ } else { // perspective case
++ SkScalar dstX = SkIntToScalar(x);
++ SkScalar dstY = SkIntToScalar(y);
++ for (; count > 0; --count) {
++ SkPoint srcPt;
++ dstProc(fDstToIndex, dstX, dstY, &srcPt);
++ SkScalar fx = srcPt.fX;
++ SkScalar fy = srcPt.fY;
++ SkScalar b = (SkScalarMul(fDiff.fX, fx) +
++ SkScalarMul(fDiff.fY, fy) - fStartRadius) * 2;
++ SkFixed t = two_point_radial(b, fx, fy, fSr2D2, foura, fOneOverTwoA, posRoot);
++ SkFixed index = proc(t);
++ SkASSERT(index <= 0xFFFF);
++ *dstC++ = cache[index >> (16 - kCache32Bits)];
++ dstX += SK_Scalar1;
++ }
++ }
++}
++
+ ///////////////////////////////////////////////////////////////////////////////
+
+ class Sweep_Gradient : public Gradient_Shader {
+ public:
+ Sweep_Gradient(SkScalar cx, SkScalar cy, const SkColor colors[],
+ const SkScalar pos[], int count, SkUnitMapper* mapper)
+ : Gradient_Shader(colors, pos, count, SkShader::kClamp_TileMode, mapper),
+ fCenter(SkPoint::Make(cx, cy))
diff --git a/gfx/skia/patches/archive/uninitialized-margin.patch b/gfx/skia/patches/archive/uninitialized-margin.patch
new file mode 100644
index 0000000000..b8ab213e7b
--- /dev/null
+++ b/gfx/skia/patches/archive/uninitialized-margin.patch
@@ -0,0 +1,22 @@
+diff --git a/gfx/skia/src/core/SkDraw.cpp b/gfx/skia/src/core/SkDraw.cpp
+--- a/gfx/skia/src/core/SkDraw.cpp
++++ b/gfx/skia/src/core/SkDraw.cpp
+@@ -2529,17 +2529,17 @@ static bool compute_bounds(const SkPath&
+
+ // init our bounds from the path
+ {
+ SkRect pathBounds = devPath.getBounds();
+ pathBounds.inset(-SK_ScalarHalf, -SK_ScalarHalf);
+ pathBounds.roundOut(bounds);
+ }
+
+- SkIPoint margin;
++ SkIPoint margin = SkIPoint::Make(0, 0);
+ if (filter) {
+ SkASSERT(filterMatrix);
+
+ SkMask srcM, dstM;
+
+ srcM.fBounds = *bounds;
+ srcM.fFormat = SkMask::kA8_Format;
+ srcM.fImage = NULL;
diff --git a/gfx/skia/patches/archive/user-config.patch b/gfx/skia/patches/archive/user-config.patch
new file mode 100644
index 0000000000..11c6f1f638
--- /dev/null
+++ b/gfx/skia/patches/archive/user-config.patch
@@ -0,0 +1,40 @@
+diff --git a/gfx/skia/include/config/SkUserConfig.h b/gfx/skia/include/config/SkUserConfig.h
+--- a/gfx/skia/include/config/SkUserConfig.h
++++ b/gfx/skia/include/config/SkUserConfig.h
+@@ -140,16 +140,20 @@
+ /* If SK_DEBUG is defined, then you can optionally define SK_SUPPORT_UNITTEST
+ which will run additional self-tests at startup. These can take a long time,
+ so this flag is optional.
+ */
+ #ifdef SK_DEBUG
+ //#define SK_SUPPORT_UNITTEST
+ #endif
+
++/* Don't dither 32bit gradients, to match what the canvas test suite expects.
++ */
++#define SK_DISABLE_DITHER_32BIT_GRADIENT
++
+ /* If your system embeds skia and has complex event logging, define this
+ symbol to name a file that maps the following macros to your system's
+ equivalents:
+ SK_TRACE_EVENT0(event)
+ SK_TRACE_EVENT1(event, name1, value1)
+ SK_TRACE_EVENT2(event, name1, value1, name2, value2)
+ src/utils/SkDebugTrace.h has a trivial implementation that writes to
+ the debug output stream. If SK_USER_TRACE_INCLUDE_FILE is not defined,
+@@ -161,9 +165,15 @@
+ */
+ #ifdef SK_SAMPLES_FOR_X
+ #define SK_R32_SHIFT 16
+ #define SK_G32_SHIFT 8
+ #define SK_B32_SHIFT 0
+ #define SK_A32_SHIFT 24
+ #endif
+
++/* Don't include stdint.h on windows as it conflicts with our build system.
++ */
++#ifdef SK_BUILD_FOR_WIN32
++ #define SK_IGNORE_STDINT_DOT_H
++#endif
++
+ #endif
diff --git a/gfx/skia/skia/include/codec/SkAndroidCodec.h b/gfx/skia/skia/include/codec/SkAndroidCodec.h
new file mode 100644
index 0000000000..2b8a79751c
--- /dev/null
+++ b/gfx/skia/skia/include/codec/SkAndroidCodec.h
@@ -0,0 +1,297 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAndroidCodec_DEFINED
+#define SkAndroidCodec_DEFINED
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkEncodedInfo.h"
+#include "include/private/base/SkNoncopyable.h"
+#include "modules/skcms/skcms.h"
+
+// TODO(kjlubick, bungeman) Replace these includes with forward declares
+#include "include/codec/SkEncodedImageFormat.h" // IWYU pragma: keep
+#include "include/core/SkAlphaType.h" // IWYU pragma: keep
+#include "include/core/SkColorType.h" // IWYU pragma: keep
+
+#include <cstddef>
+#include <memory>
+
+class SkData;
+class SkPngChunkReader;
+class SkStream;
+struct SkGainmapInfo;
+struct SkIRect;
+
+/**
+ * Abstract interface defining image codec functionality that is necessary for
+ * Android.
+ */
+class SK_API SkAndroidCodec : SkNoncopyable {
+public:
+ /**
+ * Deprecated.
+ *
+ * Now that SkAndroidCodec supports multiframe images, there are multiple
+ * ways to handle compositing an oriented frame on top of an oriented frame
+ * with different tradeoffs. SkAndroidCodec now ignores the orientation and
+ * forces the client to handle it.
+ */
+ enum class ExifOrientationBehavior {
+ kIgnore,
+ kRespect,
+ };
+
+ /**
+ * Pass ownership of an SkCodec to a newly-created SkAndroidCodec.
+ */
+ static std::unique_ptr<SkAndroidCodec> MakeFromCodec(std::unique_ptr<SkCodec>);
+
+ /**
+ * If this stream represents an encoded image that we know how to decode,
+ * return an SkAndroidCodec that can decode it. Otherwise return NULL.
+ *
+ * The SkPngChunkReader handles unknown chunks in PNGs.
+ * See SkCodec.h for more details.
+ *
+ * If NULL is returned, the stream is deleted immediately. Otherwise, the
+ * SkCodec takes ownership of it, and will delete it when done with it.
+ */
+ static std::unique_ptr<SkAndroidCodec> MakeFromStream(std::unique_ptr<SkStream>,
+ SkPngChunkReader* = nullptr);
+
+ /**
+ * If this data represents an encoded image that we know how to decode,
+ * return an SkAndroidCodec that can decode it. Otherwise return NULL.
+ *
+ * The SkPngChunkReader handles unknown chunks in PNGs.
+ * See SkCodec.h for more details.
+ */
+ static std::unique_ptr<SkAndroidCodec> MakeFromData(sk_sp<SkData>, SkPngChunkReader* = nullptr);
+
+ virtual ~SkAndroidCodec();
+
+ // TODO: fInfo is now just a cache of SkCodec's SkImageInfo. No need to
+ // cache and return a reference here, once Android call-sites are updated.
+ const SkImageInfo& getInfo() const { return fInfo; }
+
+ /**
+ * Return the ICC profile of the encoded data.
+ */
+ const skcms_ICCProfile* getICCProfile() const {
+ return fCodec->getEncodedInfo().profile();
+ }
+
+ /**
+ * Format of the encoded data.
+ */
+ SkEncodedImageFormat getEncodedFormat() const { return fCodec->getEncodedFormat(); }
+
+ /**
+ * @param requestedColorType Color type requested by the client
+ *
+ * |requestedColorType| may be overriden. We will default to kF16
+ * for high precision images.
+ *
+ * In the general case, if it is possible to decode to
+ * |requestedColorType|, this returns |requestedColorType|.
+ * Otherwise, this returns a color type that is an appropriate
+ * match for the the encoded data.
+ */
+ SkColorType computeOutputColorType(SkColorType requestedColorType);
+
+ /**
+ * @param requestedUnpremul Indicates if the client requested
+ * unpremultiplied output
+ *
+ * Returns the appropriate alpha type to decode to. If the image
+ * has alpha, the value of requestedUnpremul will be honored.
+ */
+ SkAlphaType computeOutputAlphaType(bool requestedUnpremul);
+
+ /**
+ * @param outputColorType Color type that the client will decode to.
+ * @param prefColorSpace Preferred color space to decode to.
+ * This may not return |prefColorSpace| for
+ * specific color types.
+ *
+ * Returns the appropriate color space to decode to.
+ */
+ sk_sp<SkColorSpace> computeOutputColorSpace(SkColorType outputColorType,
+ sk_sp<SkColorSpace> prefColorSpace = nullptr);
+
+ /**
+ * Compute the appropriate sample size to get to |size|.
+ *
+ * @param size As an input parameter, the desired output size of
+ * the decode. As an output parameter, the smallest sampled size
+ * larger than the input.
+ * @return the sample size to set AndroidOptions::fSampleSize to decode
+ * to the output |size|.
+ */
+ int computeSampleSize(SkISize* size) const;
+
+ /**
+ * Returns the dimensions of the scaled output image, for an input
+ * sampleSize.
+ *
+ * When the sample size divides evenly into the original dimensions, the
+ * scaled output dimensions will simply be equal to the original
+ * dimensions divided by the sample size.
+ *
+ * When the sample size does not divide even into the original
+ * dimensions, the codec may round up or down, depending on what is most
+ * efficient to decode.
+ *
+ * Finally, the codec will always recommend a non-zero output, so the output
+ * dimension will always be one if the sampleSize is greater than the
+ * original dimension.
+ */
+ SkISize getSampledDimensions(int sampleSize) const;
+
+ /**
+ * Return (via desiredSubset) a subset which can decoded from this codec,
+ * or false if the input subset is invalid.
+ *
+ * @param desiredSubset in/out parameter
+ * As input, a desired subset of the original bounds
+ * (as specified by getInfo).
+ * As output, if true is returned, desiredSubset may
+ * have been modified to a subset which is
+ * supported. Although a particular change may have
+ * been made to desiredSubset to create something
+ * supported, it is possible other changes could
+ * result in a valid subset. If false is returned,
+ * desiredSubset's value is undefined.
+ * @return true If the input desiredSubset is valid.
+ * desiredSubset may be modified to a subset
+ * supported by the codec.
+ * false If desiredSubset is invalid (NULL or not fully
+ * contained within the image).
+ */
+ bool getSupportedSubset(SkIRect* desiredSubset) const;
+ // TODO: Rename SkCodec::getValidSubset() to getSupportedSubset()
+
+ /**
+ * Returns the dimensions of the scaled, partial output image, for an
+ * input sampleSize and subset.
+ *
+ * @param sampleSize Factor to scale down by.
+ * @param subset Must be a valid subset of the original image
+ * dimensions and a subset supported by SkAndroidCodec.
+ * getSubset() can be used to obtain a subset supported
+ * by SkAndroidCodec.
+ * @return Size of the scaled partial image. Or zero size
+ * if either of the inputs is invalid.
+ */
+ SkISize getSampledSubsetDimensions(int sampleSize, const SkIRect& subset) const;
+
+ /**
+ * Additional options to pass to getAndroidPixels().
+ */
+ // FIXME: It's a bit redundant to name these AndroidOptions when this class is already
+ // called SkAndroidCodec. On the other hand, it's may be a bit confusing to call
+ // these Options when SkCodec has a slightly different set of Options. Maybe these
+ // should be DecodeOptions or SamplingOptions?
+ struct AndroidOptions : public SkCodec::Options {
+ AndroidOptions()
+ : SkCodec::Options()
+ , fSampleSize(1)
+ {}
+
+ /**
+ * The client may provide an integer downscale factor for the decode.
+ * The codec may implement this downscaling by sampling or another
+ * method if it is more efficient.
+ *
+ * The default is 1, representing no downscaling.
+ */
+ int fSampleSize;
+ };
+
+ /**
+ * Decode into the given pixels, a block of memory of size at
+ * least (info.fHeight - 1) * rowBytes + (info.fWidth *
+ * bytesPerPixel)
+ *
+ * Repeated calls to this function should give the same results,
+ * allowing the PixelRef to be immutable.
+ *
+ * @param info A description of the format (config, size)
+ * expected by the caller. This can simply be identical
+ * to the info returned by getInfo().
+ *
+ * This contract also allows the caller to specify
+ * different output-configs, which the implementation can
+ * decide to support or not.
+ *
+ * A size that does not match getInfo() implies a request
+ * to scale or subset. If the codec cannot perform this
+ * scaling or subsetting, it will return an error code.
+ *
+ * The AndroidOptions object is also used to specify any requested scaling or subsetting
+ * using options->fSampleSize and options->fSubset. If NULL, the defaults (as specified above
+ * for AndroidOptions) are used.
+ *
+ * @return Result kSuccess, or another value explaining the type of failure.
+ */
+ // FIXME: It's a bit redundant to name this getAndroidPixels() when this class is already
+ // called SkAndroidCodec. On the other hand, it's may be a bit confusing to call
+ // this getPixels() when it is a slightly different API than SkCodec's getPixels().
+ // Maybe this should be decode() or decodeSubset()?
+ SkCodec::Result getAndroidPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const AndroidOptions* options);
+
+ /**
+ * Simplified version of getAndroidPixels() where we supply the default AndroidOptions as
+ * specified above for AndroidOptions. It will not perform any scaling or subsetting.
+ */
+ SkCodec::Result getAndroidPixels(const SkImageInfo& info, void* pixels, size_t rowBytes);
+
+ SkCodec::Result getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes) {
+ return this->getAndroidPixels(info, pixels, rowBytes);
+ }
+
+ SkCodec* codec() const { return fCodec.get(); }
+
+ /**
+ * Retrieve the gainmap for an image.
+ *
+ * @param outInfo On success, this is populated with the parameters for
+ * rendering this gainmap. This parameter must be non-nullptr.
+ *
+ * @param outGainmapImageStream On success, this is populated with a stream from which the
+ * gainmap image may be decoded. This parameter is optional, and
+ * may be set to nullptr.
+ *
+ * @return If this has a gainmap image and that gainmap image was
+ * successfully extracted then return true. Otherwise return
+ * false.
+ */
+ bool getAndroidGainmap(SkGainmapInfo* outInfo,
+ std::unique_ptr<SkStream>* outGainmapImageStream);
+
+protected:
+ SkAndroidCodec(SkCodec*);
+
+ virtual SkISize onGetSampledDimensions(int sampleSize) const = 0;
+
+ virtual bool onGetSupportedSubset(SkIRect* desiredSubset) const = 0;
+
+ virtual SkCodec::Result onGetAndroidPixels(const SkImageInfo& info, void* pixels,
+ size_t rowBytes, const AndroidOptions& options) = 0;
+
+private:
+ const SkImageInfo fInfo;
+ std::unique_ptr<SkCodec> fCodec;
+};
+#endif // SkAndroidCodec_DEFINED
diff --git a/gfx/skia/skia/include/codec/SkCodec.h b/gfx/skia/skia/include/codec/SkCodec.h
new file mode 100644
index 0000000000..3ed1a95a80
--- /dev/null
+++ b/gfx/skia/skia/include/codec/SkCodec.h
@@ -0,0 +1,1015 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCodec_DEFINED
+#define SkCodec_DEFINED
+
+#include "include/codec/SkEncodedOrigin.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkTypes.h"
+#include "include/core/SkYUVAPixmaps.h"
+#include "include/private/SkEncodedInfo.h"
+#include "include/private/base/SkNoncopyable.h"
+#include "modules/skcms/skcms.h"
+
+#include <cstddef>
+#include <functional>
+#include <memory>
+#include <tuple>
+#include <vector>
+
+class SkData;
+class SkFrameHolder;
+class SkImage;
+class SkPngChunkReader;
+class SkSampler;
+class SkStream;
+struct SkGainmapInfo;
+enum SkAlphaType : int;
+enum class SkEncodedImageFormat;
+
+namespace SkCodecAnimation {
+enum class Blend;
+enum class DisposalMethod;
+}
+
+
+namespace DM {
+class CodecSrc;
+} // namespace DM
+
+/**
+ * Abstraction layer directly on top of an image codec.
+ */
+class SK_API SkCodec : SkNoncopyable {
+public:
+ /**
+ * Minimum number of bytes that must be buffered in SkStream input.
+ *
+ * An SkStream passed to NewFromStream must be able to use this many
+ * bytes to determine the image type. Then the same SkStream must be
+ * passed to the correct decoder to read from the beginning.
+ *
+ * This can be accomplished by implementing peek() to support peeking
+ * this many bytes, or by implementing rewind() to be able to rewind()
+ * after reading this many bytes.
+ */
+ static constexpr size_t MinBufferedBytesNeeded() { return 32; }
+
+ /**
+ * Error codes for various SkCodec methods.
+ */
+ enum Result {
+ /**
+ * General return value for success.
+ */
+ kSuccess,
+ /**
+ * The input is incomplete. A partial image was generated.
+ */
+ kIncompleteInput,
+ /**
+ * Like kIncompleteInput, except the input had an error.
+ *
+ * If returned from an incremental decode, decoding cannot continue,
+ * even with more data.
+ */
+ kErrorInInput,
+ /**
+ * The generator cannot convert to match the request, ignoring
+ * dimensions.
+ */
+ kInvalidConversion,
+ /**
+ * The generator cannot scale to requested size.
+ */
+ kInvalidScale,
+ /**
+ * Parameters (besides info) are invalid. e.g. NULL pixels, rowBytes
+ * too small, etc.
+ */
+ kInvalidParameters,
+ /**
+ * The input did not contain a valid image.
+ */
+ kInvalidInput,
+ /**
+ * Fulfilling this request requires rewinding the input, which is not
+ * supported for this input.
+ */
+ kCouldNotRewind,
+ /**
+ * An internal error, such as OOM.
+ */
+ kInternalError,
+ /**
+ * This method is not implemented by this codec.
+ * FIXME: Perhaps this should be kUnsupported?
+ */
+ kUnimplemented,
+ };
+
+ /**
+ * Readable string representing the error code.
+ */
+ static const char* ResultToString(Result);
+
+ /**
+ * For container formats that contain both still images and image sequences,
+ * instruct the decoder how the output should be selected. (Refer to comments
+ * for each value for more details.)
+ */
+ enum class SelectionPolicy {
+ /**
+ * If the container format contains both still images and image sequences,
+ * SkCodec should choose one of the still images. This is the default.
+ */
+ kPreferStillImage,
+ /**
+ * If the container format contains both still images and image sequences,
+ * SkCodec should choose one of the image sequences for animation.
+ */
+ kPreferAnimation,
+ };
+
+ /**
+ * If this stream represents an encoded image that we know how to decode,
+ * return an SkCodec that can decode it. Otherwise return NULL.
+ *
+ * As stated above, this call must be able to peek or read
+ * MinBufferedBytesNeeded to determine the correct format, and then start
+ * reading from the beginning. First it will attempt to peek, and it
+ * assumes that if less than MinBufferedBytesNeeded bytes (but more than
+ * zero) are returned, this is because the stream is shorter than this,
+ * so falling back to reading would not provide more data. If peek()
+ * returns zero bytes, this call will instead attempt to read(). This
+ * will require that the stream can be rewind()ed.
+ *
+ * If Result is not NULL, it will be set to either kSuccess if an SkCodec
+ * is returned or a reason for the failure if NULL is returned.
+ *
+ * If SkPngChunkReader is not NULL, take a ref and pass it to libpng if
+ * the image is a png.
+ *
+ * If the SkPngChunkReader is not NULL then:
+ * If the image is not a PNG, the SkPngChunkReader will be ignored.
+ * If the image is a PNG, the SkPngChunkReader will be reffed.
+ * If the PNG has unknown chunks, the SkPngChunkReader will be used
+ * to handle these chunks. SkPngChunkReader will be called to read
+ * any unknown chunk at any point during the creation of the codec
+ * or the decode. Note that if SkPngChunkReader fails to read a
+ * chunk, this could result in a failure to create the codec or a
+ * failure to decode the image.
+ * If the PNG does not contain unknown chunks, the SkPngChunkReader
+ * will not be used or modified.
+ *
+ * If NULL is returned, the stream is deleted immediately. Otherwise, the
+ * SkCodec takes ownership of it, and will delete it when done with it.
+ */
+ static std::unique_ptr<SkCodec> MakeFromStream(
+ std::unique_ptr<SkStream>, Result* = nullptr,
+ SkPngChunkReader* = nullptr,
+ SelectionPolicy selectionPolicy = SelectionPolicy::kPreferStillImage);
+
+ /**
+ * If this data represents an encoded image that we know how to decode,
+ * return an SkCodec that can decode it. Otherwise return NULL.
+ *
+ * If the SkPngChunkReader is not NULL then:
+ * If the image is not a PNG, the SkPngChunkReader will be ignored.
+ * If the image is a PNG, the SkPngChunkReader will be reffed.
+ * If the PNG has unknown chunks, the SkPngChunkReader will be used
+ * to handle these chunks. SkPngChunkReader will be called to read
+ * any unknown chunk at any point during the creation of the codec
+ * or the decode. Note that if SkPngChunkReader fails to read a
+ * chunk, this could result in a failure to create the codec or a
+ * failure to decode the image.
+ * If the PNG does not contain unknown chunks, the SkPngChunkReader
+ * will not be used or modified.
+ */
+ static std::unique_ptr<SkCodec> MakeFromData(sk_sp<SkData>, SkPngChunkReader* = nullptr);
+
+ virtual ~SkCodec();
+
+ /**
+ * Return a reasonable SkImageInfo to decode into.
+ *
+ * If the image has an ICC profile that does not map to an SkColorSpace,
+ * the returned SkImageInfo will use SRGB.
+ */
+ SkImageInfo getInfo() const { return fEncodedInfo.makeImageInfo(); }
+
+ SkISize dimensions() const { return {fEncodedInfo.width(), fEncodedInfo.height()}; }
+ SkIRect bounds() const {
+ return SkIRect::MakeWH(fEncodedInfo.width(), fEncodedInfo.height());
+ }
+
+ /**
+ * Return the ICC profile of the encoded data.
+ */
+ const skcms_ICCProfile* getICCProfile() const {
+ return this->getEncodedInfo().profile();
+ }
+
+ /**
+ * Returns the image orientation stored in the EXIF data.
+ * If there is no EXIF data, or if we cannot read the EXIF data, returns kTopLeft.
+ */
+ SkEncodedOrigin getOrigin() const { return fOrigin; }
+
+ /**
+ * Return a size that approximately supports the desired scale factor.
+ * The codec may not be able to scale efficiently to the exact scale
+ * factor requested, so return a size that approximates that scale.
+ * The returned value is the codec's suggestion for the closest valid
+ * scale that it can natively support
+ */
+ SkISize getScaledDimensions(float desiredScale) const {
+ // Negative and zero scales are errors.
+ SkASSERT(desiredScale > 0.0f);
+ if (desiredScale <= 0.0f) {
+ return SkISize::Make(0, 0);
+ }
+
+ // Upscaling is not supported. Return the original size if the client
+ // requests an upscale.
+ if (desiredScale >= 1.0f) {
+ return this->dimensions();
+ }
+ return this->onGetScaledDimensions(desiredScale);
+ }
+
+ /**
+ * Return (via desiredSubset) a subset which can decoded from this codec,
+ * or false if this codec cannot decode subsets or anything similar to
+ * desiredSubset.
+ *
+ * @param desiredSubset In/out parameter. As input, a desired subset of
+ * the original bounds (as specified by getInfo). If true is returned,
+ * desiredSubset may have been modified to a subset which is
+ * supported. Although a particular change may have been made to
+ * desiredSubset to create something supported, it is possible other
+ * changes could result in a valid subset.
+ * If false is returned, desiredSubset's value is undefined.
+ * @return true if this codec supports decoding desiredSubset (as
+ * returned, potentially modified)
+ */
+ bool getValidSubset(SkIRect* desiredSubset) const {
+ return this->onGetValidSubset(desiredSubset);
+ }
+
+ /**
+ * Format of the encoded data.
+ */
+ SkEncodedImageFormat getEncodedFormat() const { return this->onGetEncodedFormat(); }
+
+ /**
+ * Whether or not the memory passed to getPixels is zero initialized.
+ */
+ enum ZeroInitialized {
+ /**
+ * The memory passed to getPixels is zero initialized. The SkCodec
+ * may take advantage of this by skipping writing zeroes.
+ */
+ kYes_ZeroInitialized,
+ /**
+ * The memory passed to getPixels has not been initialized to zero,
+ * so the SkCodec must write all zeroes to memory.
+ *
+ * This is the default. It will be used if no Options struct is used.
+ */
+ kNo_ZeroInitialized,
+ };
+
+ /**
+ * Additional options to pass to getPixels.
+ */
+ struct Options {
+ Options()
+ : fZeroInitialized(kNo_ZeroInitialized)
+ , fSubset(nullptr)
+ , fFrameIndex(0)
+ , fPriorFrame(kNoFrame)
+ {}
+
+ ZeroInitialized fZeroInitialized;
+ /**
+ * If not NULL, represents a subset of the original image to decode.
+ * Must be within the bounds returned by getInfo().
+ * If the EncodedFormat is SkEncodedImageFormat::kWEBP (the only one which
+ * currently supports subsets), the top and left values must be even.
+ *
+ * In getPixels and incremental decode, we will attempt to decode the
+ * exact rectangular subset specified by fSubset.
+ *
+ * In a scanline decode, it does not make sense to specify a subset
+ * top or subset height, since the client already controls which rows
+ * to get and which rows to skip. During scanline decodes, we will
+ * require that the subset top be zero and the subset height be equal
+ * to the full height. We will, however, use the values of
+ * subset left and subset width to decode partial scanlines on calls
+ * to getScanlines().
+ */
+ const SkIRect* fSubset;
+
+ /**
+ * The frame to decode.
+ *
+ * Only meaningful for multi-frame images.
+ */
+ int fFrameIndex;
+
+ /**
+ * If not kNoFrame, the dst already contains the prior frame at this index.
+ *
+ * Only meaningful for multi-frame images.
+ *
+ * If fFrameIndex needs to be blended with a prior frame (as reported by
+ * getFrameInfo[fFrameIndex].fRequiredFrame), the client can set this to
+ * any non-kRestorePrevious frame in [fRequiredFrame, fFrameIndex) to
+ * indicate that that frame is already in the dst. Options.fZeroInitialized
+ * is ignored in this case.
+ *
+ * If set to kNoFrame, the codec will decode any necessary required frame(s) first.
+ */
+ int fPriorFrame;
+ };
+
+ /**
+ * Decode into the given pixels, a block of memory of size at
+ * least (info.fHeight - 1) * rowBytes + (info.fWidth *
+ * bytesPerPixel)
+ *
+ * Repeated calls to this function should give the same results,
+ * allowing the PixelRef to be immutable.
+ *
+ * @param info A description of the format (config, size)
+ * expected by the caller. This can simply be identical
+ * to the info returned by getInfo().
+ *
+ * This contract also allows the caller to specify
+ * different output-configs, which the implementation can
+ * decide to support or not.
+ *
+ * A size that does not match getInfo() implies a request
+ * to scale. If the generator cannot perform this scale,
+ * it will return kInvalidScale.
+ *
+ * If the info contains a non-null SkColorSpace, the codec
+ * will perform the appropriate color space transformation.
+ *
+ * If the caller passes in the SkColorSpace that maps to the
+ * ICC profile reported by getICCProfile(), the color space
+ * transformation is a no-op.
+ *
+ * If the caller passes a null SkColorSpace, no color space
+ * transformation will be done.
+ *
+ * If a scanline decode is in progress, scanline mode will end, requiring the client to call
+ * startScanlineDecode() in order to return to decoding scanlines.
+ *
+ * @return Result kSuccess, or another value explaining the type of failure.
+ */
+ Result getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes, const Options*);
+
+ /**
+ * Simplified version of getPixels() that uses the default Options.
+ */
+ Result getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes) {
+ return this->getPixels(info, pixels, rowBytes, nullptr);
+ }
+
+ Result getPixels(const SkPixmap& pm, const Options* opts = nullptr) {
+ return this->getPixels(pm.info(), pm.writable_addr(), pm.rowBytes(), opts);
+ }
+
+ /**
+ * Return an image containing the pixels.
+ */
+ std::tuple<sk_sp<SkImage>, SkCodec::Result> getImage(const SkImageInfo& info,
+ const Options* opts = nullptr);
+ std::tuple<sk_sp<SkImage>, SkCodec::Result> getImage();
+
+ /**
+ * If decoding to YUV is supported, this returns true. Otherwise, this
+ * returns false and the caller will ignore output parameter yuvaPixmapInfo.
+ *
+ * @param supportedDataTypes Indicates the data type/planar config combinations that are
+ * supported by the caller. If the generator supports decoding to
+ * YUV(A), but not as a type in supportedDataTypes, this method
+ * returns false.
+ * @param yuvaPixmapInfo Output parameter that specifies the planar configuration, subsampling,
+ * orientation, chroma siting, plane color types, and row bytes.
+ */
+ bool queryYUVAInfo(const SkYUVAPixmapInfo::SupportedDataTypes& supportedDataTypes,
+ SkYUVAPixmapInfo* yuvaPixmapInfo) const;
+
+ /**
+ * Returns kSuccess, or another value explaining the type of failure.
+ * This always attempts to perform a full decode. To get the planar
+ * configuration without decoding use queryYUVAInfo().
+ *
+ * @param yuvaPixmaps Contains preallocated pixmaps configured according to a successful call
+ * to queryYUVAInfo().
+ */
+ Result getYUVAPlanes(const SkYUVAPixmaps& yuvaPixmaps);
+
+ /**
+ * Prepare for an incremental decode with the specified options.
+ *
+ * This may require a rewind.
+ *
+ * If kIncompleteInput is returned, may be called again after more data has
+ * been provided to the source SkStream.
+ *
+ * @param dstInfo Info of the destination. If the dimensions do not match
+ * those of getInfo, this implies a scale.
+ * @param dst Memory to write to. Needs to be large enough to hold the subset,
+ * if present, or the full image as described in dstInfo.
+ * @param options Contains decoding options, including if memory is zero
+ * initialized and whether to decode a subset.
+ * @return Enum representing success or reason for failure.
+ */
+ Result startIncrementalDecode(const SkImageInfo& dstInfo, void* dst, size_t rowBytes,
+ const Options*);
+
+ Result startIncrementalDecode(const SkImageInfo& dstInfo, void* dst, size_t rowBytes) {
+ return this->startIncrementalDecode(dstInfo, dst, rowBytes, nullptr);
+ }
+
+ /**
+ * Start/continue the incremental decode.
+ *
+ * Not valid to call before a call to startIncrementalDecode() returns
+ * kSuccess.
+ *
+ * If kIncompleteInput is returned, may be called again after more data has
+ * been provided to the source SkStream.
+ *
+ * Unlike getPixels and getScanlines, this does not do any filling. This is
+ * left up to the caller, since they may be skipping lines or continuing the
+ * decode later. In the latter case, they may choose to initialize all lines
+ * first, or only initialize the remaining lines after the first call.
+ *
+ * @param rowsDecoded Optional output variable returning the total number of
+ * lines initialized. Only meaningful if this method returns kIncompleteInput.
+ * Otherwise the implementation may not set it.
+ * Note that some implementations may have initialized this many rows, but
+ * not necessarily finished those rows (e.g. interlaced PNG). This may be
+ * useful for determining what rows the client needs to initialize.
+ * @return kSuccess if all lines requested in startIncrementalDecode have
+ * been completely decoded. kIncompleteInput otherwise.
+ */
+ Result incrementalDecode(int* rowsDecoded = nullptr) {
+ if (!fStartedIncrementalDecode) {
+ return kInvalidParameters;
+ }
+ return this->onIncrementalDecode(rowsDecoded);
+ }
+
+ /**
+ * The remaining functions revolve around decoding scanlines.
+ */
+
+ /**
+ * Prepare for a scanline decode with the specified options.
+ *
+ * After this call, this class will be ready to decode the first scanline.
+ *
+ * This must be called in order to call getScanlines or skipScanlines.
+ *
+ * This may require rewinding the stream.
+ *
+ * Not all SkCodecs support this.
+ *
+ * @param dstInfo Info of the destination. If the dimensions do not match
+ * those of getInfo, this implies a scale.
+ * @param options Contains decoding options, including if memory is zero
+ * initialized.
+ * @return Enum representing success or reason for failure.
+ */
+ Result startScanlineDecode(const SkImageInfo& dstInfo, const Options* options);
+
+ /**
+ * Simplified version of startScanlineDecode() that uses the default Options.
+ */
+ Result startScanlineDecode(const SkImageInfo& dstInfo) {
+ return this->startScanlineDecode(dstInfo, nullptr);
+ }
+
+ /**
+ * Write the next countLines scanlines into dst.
+ *
+ * Not valid to call before calling startScanlineDecode().
+ *
+ * @param dst Must be non-null, and large enough to hold countLines
+ * scanlines of size rowBytes.
+ * @param countLines Number of lines to write.
+ * @param rowBytes Number of bytes per row. Must be large enough to hold
+ * a scanline based on the SkImageInfo used to create this object.
+ * @return the number of lines successfully decoded. If this value is
+ * less than countLines, this will fill the remaining lines with a
+ * default value.
+ */
+ int getScanlines(void* dst, int countLines, size_t rowBytes);
+
+ /**
+ * Skip count scanlines.
+ *
+ * Not valid to call before calling startScanlineDecode().
+ *
+ * The default version just calls onGetScanlines and discards the dst.
+ * NOTE: If skipped lines are the only lines with alpha, this default
+ * will make reallyHasAlpha return true, when it could have returned
+ * false.
+ *
+ * @return true if the scanlines were successfully skipped
+ * false on failure, possible reasons for failure include:
+ * An incomplete input image stream.
+ * Calling this function before calling startScanlineDecode().
+ * If countLines is less than zero or so large that it moves
+ * the current scanline past the end of the image.
+ */
+ bool skipScanlines(int countLines);
+
+ /**
+ * The order in which rows are output from the scanline decoder is not the
+ * same for all variations of all image types. This explains the possible
+ * output row orderings.
+ */
+ enum SkScanlineOrder {
+ /*
+ * By far the most common, this indicates that the image can be decoded
+ * reliably using the scanline decoder, and that rows will be output in
+ * the logical order.
+ */
+ kTopDown_SkScanlineOrder,
+
+ /*
+ * This indicates that the scanline decoder reliably outputs rows, but
+ * they will be returned in reverse order. If the scanline format is
+ * kBottomUp, the nextScanline() API can be used to determine the actual
+ * y-coordinate of the next output row, but the client is not forced
+ * to take advantage of this, given that it's not too tough to keep
+ * track independently.
+ *
+ * For full image decodes, it is safe to get all of the scanlines at
+ * once, since the decoder will handle inverting the rows as it
+ * decodes.
+ *
+ * For subset decodes and sampling, it is simplest to get and skip
+ * scanlines one at a time, using the nextScanline() API. It is
+ * possible to ask for larger chunks at a time, but this should be used
+ * with caution. As with full image decodes, the decoder will handle
+ * inverting the requested rows, but rows will still be delivered
+ * starting from the bottom of the image.
+ *
+ * Upside down bmps are an example.
+ */
+ kBottomUp_SkScanlineOrder,
+ };
+
+ /**
+ * An enum representing the order in which scanlines will be returned by
+ * the scanline decoder.
+ *
+ * This is undefined before startScanlineDecode() is called.
+ */
+ SkScanlineOrder getScanlineOrder() const { return this->onGetScanlineOrder(); }
+
+ /**
+ * Returns the y-coordinate of the next row to be returned by the scanline
+ * decoder.
+ *
+ * This will equal fCurrScanline, except in the case of strangely
+ * encoded image types (bottom-up bmps).
+ *
+ * Results are undefined when not in scanline decoding mode.
+ */
+ int nextScanline() const { return this->outputScanline(fCurrScanline); }
+
+ /**
+ * Returns the output y-coordinate of the row that corresponds to an input
+ * y-coordinate. The input y-coordinate represents where the scanline
+ * is located in the encoded data.
+ *
+ * This will equal inputScanline, except in the case of strangely
+ * encoded image types (bottom-up bmps, interlaced gifs).
+ */
+ int outputScanline(int inputScanline) const;
+
+ /**
+ * Return the number of frames in the image.
+ *
+ * May require reading through the stream.
+ */
+ int getFrameCount() {
+ return this->onGetFrameCount();
+ }
+
+ // Sentinel value used when a frame index implies "no frame":
+ // - FrameInfo::fRequiredFrame set to this value means the frame
+ // is independent.
+ // - Options::fPriorFrame set to this value means no (relevant) prior frame
+ // is residing in dst's memory.
+ static constexpr int kNoFrame = -1;
+
+ // This transitional definition was added in August 2018, and will eventually be removed.
+#ifdef SK_LEGACY_SKCODEC_NONE_ENUM
+ static constexpr int kNone = kNoFrame;
+#endif
+
+ /**
+ * Information about individual frames in a multi-framed image.
+ */
+ struct FrameInfo {
+ /**
+ * The frame that this frame needs to be blended with, or
+ * kNoFrame if this frame is independent (so it can be
+ * drawn over an uninitialized buffer).
+ *
+ * Note that this is the *earliest* frame that can be used
+ * for blending. Any frame from [fRequiredFrame, i) can be
+ * used, unless its fDisposalMethod is kRestorePrevious.
+ */
+ int fRequiredFrame;
+
+ /**
+ * Number of milliseconds to show this frame.
+ */
+ int fDuration;
+
+ /**
+ * Whether the end marker for this frame is contained in the stream.
+ *
+ * Note: this does not guarantee that an attempt to decode will be complete.
+ * There could be an error in the stream.
+ */
+ bool fFullyReceived;
+
+ /**
+ * This is conservative; it will still return non-opaque if e.g. a
+ * color index-based frame has a color with alpha but does not use it.
+ */
+ SkAlphaType fAlphaType;
+
+ /**
+ * Whether the updated rectangle contains alpha.
+ *
+ * This is conservative; it will still be set to true if e.g. a color
+ * index-based frame has a color with alpha but does not use it. In
+ * addition, it may be set to true, even if the final frame, after
+ * blending, is opaque.
+ */
+ bool fHasAlphaWithinBounds;
+
+ /**
+ * How this frame should be modified before decoding the next one.
+ */
+ SkCodecAnimation::DisposalMethod fDisposalMethod;
+
+ /**
+ * How this frame should blend with the prior frame.
+ */
+ SkCodecAnimation::Blend fBlend;
+
+ /**
+ * The rectangle updated by this frame.
+ *
+ * It may be empty, if the frame does not change the image. It will
+ * always be contained by SkCodec::dimensions().
+ */
+ SkIRect fFrameRect;
+ };
+
+ /**
+ * Return info about a single frame.
+ *
+ * Does not read through the stream, so it should be called after
+ * getFrameCount() to parse any frames that have not already been parsed.
+ *
+ * Only supported by animated (multi-frame) codecs. Note that this is a
+ * property of the codec (the SkCodec subclass), not the image.
+ *
+ * To elaborate, some codecs support animation (e.g. GIF). Others do not
+ * (e.g. BMP). Animated codecs can still represent single frame images.
+ * Calling getFrameInfo(0, etc) will return true for a single frame GIF
+ * even if the overall image is not animated (in that the pixels on screen
+ * do not change over time). When incrementally decoding a GIF image, we
+ * might only know that there's a single frame *so far*.
+ *
+ * For non-animated SkCodec subclasses, it's sufficient but not necessary
+ * for this method to always return false.
+ */
+ bool getFrameInfo(int index, FrameInfo* info) const {
+ if (index < 0) {
+ return false;
+ }
+ return this->onGetFrameInfo(index, info);
+ }
+
+ /**
+ * Return info about all the frames in the image.
+ *
+ * May require reading through the stream to determine info about the
+ * frames (including the count).
+ *
+ * As such, future decoding calls may require a rewind.
+ *
+ * This may return an empty vector for non-animated codecs. See the
+ * getFrameInfo(int, FrameInfo*) comment.
+ */
+ std::vector<FrameInfo> getFrameInfo();
+
+ static constexpr int kRepetitionCountInfinite = -1;
+
+ /**
+ * Return the number of times to repeat, if this image is animated. This number does not
+ * include the first play through of each frame. For example, a repetition count of 4 means
+ * that each frame is played 5 times and then the animation stops.
+ *
+ * It can return kRepetitionCountInfinite, a negative number, meaning that the animation
+ * should loop forever.
+ *
+ * May require reading the stream to find the repetition count.
+ *
+ * As such, future decoding calls may require a rewind.
+ *
+ * For still (non-animated) image codecs, this will return 0.
+ */
+ int getRepetitionCount() {
+ return this->onGetRepetitionCount();
+ }
+
+ // Register a decoder at runtime by passing two function pointers:
+ // - peek() to return true if the span of bytes appears to be your encoded format;
+ // - make() to attempt to create an SkCodec from the given stream.
+ // Not thread safe.
+ static void Register(
+ bool (*peek)(const void*, size_t),
+ std::unique_ptr<SkCodec> (*make)(std::unique_ptr<SkStream>, SkCodec::Result*));
+
+protected:
+ const SkEncodedInfo& getEncodedInfo() const { return fEncodedInfo; }
+
+ using XformFormat = skcms_PixelFormat;
+
+ SkCodec(SkEncodedInfo&&,
+ XformFormat srcFormat,
+ std::unique_ptr<SkStream>,
+ SkEncodedOrigin = kTopLeft_SkEncodedOrigin);
+
+ void setSrcXformFormat(XformFormat pixelFormat);
+
+ XformFormat getSrcXformFormat() const {
+ return fSrcXformFormat;
+ }
+
+ virtual bool onGetGainmapInfo(SkGainmapInfo*, std::unique_ptr<SkStream>*) { return false; }
+
+ virtual SkISize onGetScaledDimensions(float /*desiredScale*/) const {
+ // By default, scaling is not supported.
+ return this->dimensions();
+ }
+
+ // FIXME: What to do about subsets??
+ /**
+ * Subclasses should override if they support dimensions other than the
+ * srcInfo's.
+ */
+ virtual bool onDimensionsSupported(const SkISize&) {
+ return false;
+ }
+
+ virtual SkEncodedImageFormat onGetEncodedFormat() const = 0;
+
+ /**
+ * @param rowsDecoded When the encoded image stream is incomplete, this function
+ * will return kIncompleteInput and rowsDecoded will be set to
+ * the number of scanlines that were successfully decoded.
+ * This will allow getPixels() to fill the uninitialized memory.
+ */
+ virtual Result onGetPixels(const SkImageInfo& info,
+ void* pixels, size_t rowBytes, const Options&,
+ int* rowsDecoded) = 0;
+
+ virtual bool onQueryYUVAInfo(const SkYUVAPixmapInfo::SupportedDataTypes&,
+ SkYUVAPixmapInfo*) const { return false; }
+
+ virtual Result onGetYUVAPlanes(const SkYUVAPixmaps&) { return kUnimplemented; }
+
+ virtual bool onGetValidSubset(SkIRect* /*desiredSubset*/) const {
+ // By default, subsets are not supported.
+ return false;
+ }
+
+ /**
+ * If the stream was previously read, attempt to rewind.
+ *
+ * If the stream needed to be rewound, call onRewind.
+ * @returns true if the codec is at the right position and can be used.
+ * false if there was a failure to rewind.
+ *
+ * This is called by getPixels(), getYUV8Planes(), startIncrementalDecode() and
+ * startScanlineDecode(). Subclasses may call if they need to rewind at another time.
+ */
+ bool SK_WARN_UNUSED_RESULT rewindIfNeeded();
+
+ /**
+ * Called by rewindIfNeeded, if the stream needed to be rewound.
+ *
+ * Subclasses should do any set up needed after a rewind.
+ */
+ virtual bool onRewind() {
+ return true;
+ }
+
+ /**
+ * Get method for the input stream
+ */
+ SkStream* stream() {
+ return fStream.get();
+ }
+
+ /**
+ * The remaining functions revolve around decoding scanlines.
+ */
+
+ /**
+ * Most images types will be kTopDown and will not need to override this function.
+ */
+ virtual SkScanlineOrder onGetScanlineOrder() const { return kTopDown_SkScanlineOrder; }
+
+ const SkImageInfo& dstInfo() const { return fDstInfo; }
+
+ const Options& options() const { return fOptions; }
+
+ /**
+ * Returns the number of scanlines that have been decoded so far.
+ * This is unaffected by the SkScanlineOrder.
+ *
+ * Returns -1 if we have not started a scanline decode.
+ */
+ int currScanline() const { return fCurrScanline; }
+
+ virtual int onOutputScanline(int inputScanline) const;
+
+ /**
+ * Return whether we can convert to dst.
+ *
+ * Will be called for the appropriate frame, prior to initializing the colorXform.
+ */
+ virtual bool conversionSupported(const SkImageInfo& dst, bool srcIsOpaque,
+ bool needsColorXform);
+
+ // Some classes never need a colorXform e.g.
+ // - ICO uses its embedded codec's colorXform
+ // - WBMP is just Black/White
+ virtual bool usesColorXform() const { return true; }
+ void applyColorXform(void* dst, const void* src, int count) const;
+
+ bool colorXform() const { return fXformTime != kNo_XformTime; }
+ bool xformOnDecode() const { return fXformTime == kDecodeRow_XformTime; }
+
+ virtual int onGetFrameCount() {
+ return 1;
+ }
+
+ virtual bool onGetFrameInfo(int, FrameInfo*) const {
+ return false;
+ }
+
+ virtual int onGetRepetitionCount() {
+ return 0;
+ }
+
+private:
+ const SkEncodedInfo fEncodedInfo;
+ XformFormat fSrcXformFormat;
+ std::unique_ptr<SkStream> fStream;
+ bool fNeedsRewind = false;
+ const SkEncodedOrigin fOrigin;
+
+ SkImageInfo fDstInfo;
+ Options fOptions;
+
+ enum XformTime {
+ kNo_XformTime,
+ kPalette_XformTime,
+ kDecodeRow_XformTime,
+ };
+ XformTime fXformTime;
+ XformFormat fDstXformFormat; // Based on fDstInfo.
+ skcms_ICCProfile fDstProfile;
+ skcms_AlphaFormat fDstXformAlphaFormat;
+
+ // Only meaningful during scanline decodes.
+ int fCurrScanline = -1;
+
+ bool fStartedIncrementalDecode = false;
+
+ // Allows SkAndroidCodec to call handleFrameIndex (potentially decoding a prior frame and
+ // clearing to transparent) without SkCodec itself calling it, too.
+ bool fUsingCallbackForHandleFrameIndex = false;
+
+ bool initializeColorXform(const SkImageInfo& dstInfo, SkEncodedInfo::Alpha, bool srcIsOpaque);
+
+ /**
+ * Return whether these dimensions are supported as a scale.
+ *
+ * The codec may choose to cache the information about scale and subset.
+ * Either way, the same information will be passed to onGetPixels/onStart
+ * on success.
+ *
+ * This must return true for a size returned from getScaledDimensions.
+ */
+ bool dimensionsSupported(const SkISize& dim) {
+ return dim == this->dimensions() || this->onDimensionsSupported(dim);
+ }
+
+ /**
+ * For multi-framed images, return the object with information about the frames.
+ */
+ virtual const SkFrameHolder* getFrameHolder() const {
+ return nullptr;
+ }
+
+ // Callback for decoding a prior frame. The `Options::fFrameIndex` is ignored,
+ // being replaced by frameIndex. This allows opts to actually be a subclass of
+ // SkCodec::Options which SkCodec itself does not know how to copy or modify,
+ // but just passes through to the caller (where it can be reinterpret_cast'd).
+ using GetPixelsCallback = std::function<Result(const SkImageInfo&, void* pixels,
+ size_t rowBytes, const Options& opts,
+ int frameIndex)>;
+
+ /**
+ * Check for a valid Options.fFrameIndex, and decode prior frames if necessary.
+ *
+ * If GetPixelsCallback is not null, it will be used to decode a prior frame instead
+ * of using this SkCodec directly. It may also be used recursively, if that in turn
+ * depends on a prior frame. This is used by SkAndroidCodec.
+ */
+ Result handleFrameIndex(const SkImageInfo&, void* pixels, size_t rowBytes, const Options&,
+ GetPixelsCallback = nullptr);
+
+ // Methods for scanline decoding.
+ virtual Result onStartScanlineDecode(const SkImageInfo& /*dstInfo*/,
+ const Options& /*options*/) {
+ return kUnimplemented;
+ }
+
+ virtual Result onStartIncrementalDecode(const SkImageInfo& /*dstInfo*/, void*, size_t,
+ const Options&) {
+ return kUnimplemented;
+ }
+
+ virtual Result onIncrementalDecode(int*) {
+ return kUnimplemented;
+ }
+
+
+ virtual bool onSkipScanlines(int /*countLines*/) { return false; }
+
+ virtual int onGetScanlines(void* /*dst*/, int /*countLines*/, size_t /*rowBytes*/) { return 0; }
+
+ /**
+ * On an incomplete decode, getPixels() and getScanlines() will call this function
+ * to fill any uinitialized memory.
+ *
+ * @param dstInfo Contains the destination color type
+ * Contains the destination alpha type
+ * Contains the destination width
+ * The height stored in this info is unused
+ * @param dst Pointer to the start of destination pixel memory
+ * @param rowBytes Stride length in destination pixel memory
+ * @param zeroInit Indicates if memory is zero initialized
+ * @param linesRequested Number of lines that the client requested
+ * @param linesDecoded Number of lines that were successfully decoded
+ */
+ void fillIncompleteImage(const SkImageInfo& dstInfo, void* dst, size_t rowBytes,
+ ZeroInitialized zeroInit, int linesRequested, int linesDecoded);
+
+ /**
+ * Return an object which will allow forcing scanline decodes to sample in X.
+ *
+ * May create a sampler, if one is not currently being used. Otherwise, does
+ * not affect ownership.
+ *
+ * Only valid during scanline decoding or incremental decoding.
+ */
+ virtual SkSampler* getSampler(bool /*createIfNecessary*/) { return nullptr; }
+
+ friend class DM::CodecSrc; // for fillIncompleteImage
+ friend class SkSampledCodec;
+ friend class SkIcoCodec;
+ friend class SkAndroidCodec; // for fEncodedInfo
+};
+#endif // SkCodec_DEFINED
diff --git a/gfx/skia/skia/include/codec/SkCodecAnimation.h b/gfx/skia/skia/include/codec/SkCodecAnimation.h
new file mode 100644
index 0000000000..c5883e2af2
--- /dev/null
+++ b/gfx/skia/skia/include/codec/SkCodecAnimation.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCodecAnimation_DEFINED
+#define SkCodecAnimation_DEFINED
+
+namespace SkCodecAnimation {
+ /**
+ * This specifies how the next frame is based on this frame.
+ *
+ * Names are based on the GIF 89a spec.
+ *
+ * The numbers correspond to values in a GIF.
+ */
+ enum class DisposalMethod {
+ /**
+ * The next frame should be drawn on top of this one.
+ *
+ * In a GIF, a value of 0 (not specified) is also treated as Keep.
+ */
+ kKeep = 1,
+
+ /**
+ * Similar to Keep, except the area inside this frame's rectangle
+ * should be cleared to the BackGround color (transparent) before
+ * drawing the next frame.
+ */
+ kRestoreBGColor = 2,
+
+ /**
+ * The next frame should be drawn on top of the previous frame - i.e.
+ * disregarding this one.
+ *
+ * In a GIF, a value of 4 is also treated as RestorePrevious.
+ */
+ kRestorePrevious = 3,
+ };
+
+ /**
+ * How to blend the current frame.
+ */
+ enum class Blend {
+ /**
+ * Blend with the prior frame as if using SkBlendMode::kSrcOver.
+ */
+ kSrcOver,
+
+ /**
+ * Blend with the prior frame as if using SkBlendMode::kSrc.
+ *
+ * This frame's pixels replace the destination pixels.
+ */
+ kSrc,
+ };
+
+} // namespace SkCodecAnimation
+#endif // SkCodecAnimation_DEFINED
diff --git a/gfx/skia/skia/include/codec/SkEncodedImageFormat.h b/gfx/skia/skia/include/codec/SkEncodedImageFormat.h
new file mode 100644
index 0000000000..99ca44e765
--- /dev/null
+++ b/gfx/skia/skia/include/codec/SkEncodedImageFormat.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEncodedImageFormat_DEFINED
+#define SkEncodedImageFormat_DEFINED
+
+#include <stdint.h>
+
+/**
+ * Enum describing format of encoded data.
+ */
+enum class SkEncodedImageFormat {
+#ifdef SK_BUILD_FOR_GOOGLE3
+ kUnknown,
+#endif
+ kBMP,
+ kGIF,
+ kICO,
+ kJPEG,
+ kPNG,
+ kWBMP,
+ kWEBP,
+ kPKM,
+ kKTX,
+ kASTC,
+ kDNG,
+ kHEIF,
+ kAVIF,
+ kJPEGXL,
+};
+
+#endif // SkEncodedImageFormat_DEFINED
diff --git a/gfx/skia/skia/include/codec/SkEncodedOrigin.h b/gfx/skia/skia/include/codec/SkEncodedOrigin.h
new file mode 100644
index 0000000000..19d083672f
--- /dev/null
+++ b/gfx/skia/skia/include/codec/SkEncodedOrigin.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEncodedOrigin_DEFINED
+#define SkEncodedOrigin_DEFINED
+
+#include "include/core/SkMatrix.h"
+
+// These values match the orientation www.exif.org/Exif2-2.PDF.
+enum SkEncodedOrigin {
+ kTopLeft_SkEncodedOrigin = 1, // Default
+ kTopRight_SkEncodedOrigin = 2, // Reflected across y-axis
+ kBottomRight_SkEncodedOrigin = 3, // Rotated 180
+ kBottomLeft_SkEncodedOrigin = 4, // Reflected across x-axis
+ kLeftTop_SkEncodedOrigin = 5, // Reflected across x-axis, Rotated 90 CCW
+ kRightTop_SkEncodedOrigin = 6, // Rotated 90 CW
+ kRightBottom_SkEncodedOrigin = 7, // Reflected across x-axis, Rotated 90 CW
+ kLeftBottom_SkEncodedOrigin = 8, // Rotated 90 CCW
+ kDefault_SkEncodedOrigin = kTopLeft_SkEncodedOrigin,
+ kLast_SkEncodedOrigin = kLeftBottom_SkEncodedOrigin,
+};
+
+/**
+ * Given an encoded origin and the width and height of the source data, returns a matrix
+ * that transforms the source rectangle with upper left corner at [0, 0] and origin to a correctly
+ * oriented destination rectangle of [0, 0, w, h].
+ */
+static inline SkMatrix SkEncodedOriginToMatrix(SkEncodedOrigin origin, int w, int h) {
+ switch (origin) {
+ case kTopLeft_SkEncodedOrigin: return SkMatrix::I();
+ case kTopRight_SkEncodedOrigin: return SkMatrix::MakeAll(-1, 0, w, 0, 1, 0, 0, 0, 1);
+ case kBottomRight_SkEncodedOrigin: return SkMatrix::MakeAll(-1, 0, w, 0, -1, h, 0, 0, 1);
+ case kBottomLeft_SkEncodedOrigin: return SkMatrix::MakeAll( 1, 0, 0, 0, -1, h, 0, 0, 1);
+ case kLeftTop_SkEncodedOrigin: return SkMatrix::MakeAll( 0, 1, 0, 1, 0, 0, 0, 0, 1);
+ case kRightTop_SkEncodedOrigin: return SkMatrix::MakeAll( 0, -1, w, 1, 0, 0, 0, 0, 1);
+ case kRightBottom_SkEncodedOrigin: return SkMatrix::MakeAll( 0, -1, w, -1, 0, h, 0, 0, 1);
+ case kLeftBottom_SkEncodedOrigin: return SkMatrix::MakeAll( 0, 1, 0, -1, 0, h, 0, 0, 1);
+ }
+ SK_ABORT("Unexpected origin");
+}
+
+/**
+ * Return true if the encoded origin includes a 90 degree rotation, in which case the width
+ * and height of the source data are swapped relative to a correctly oriented destination.
+ */
+static inline bool SkEncodedOriginSwapsWidthHeight(SkEncodedOrigin origin) {
+ return origin >= kLeftTop_SkEncodedOrigin;
+}
+
+#endif // SkEncodedOrigin_DEFINED
diff --git a/gfx/skia/skia/include/codec/SkPngChunkReader.h b/gfx/skia/skia/include/codec/SkPngChunkReader.h
new file mode 100644
index 0000000000..0ee8a9ecc7
--- /dev/null
+++ b/gfx/skia/skia/include/codec/SkPngChunkReader.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPngChunkReader_DEFINED
+#define SkPngChunkReader_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+/**
+ * SkPngChunkReader
+ *
+ * Base class for optional callbacks to retrieve meta/chunk data out of a PNG
+ * encoded image as it is being decoded.
+ * Used by SkCodec.
+ */
+class SkPngChunkReader : public SkRefCnt {
+public:
+ /**
+ * This will be called by the decoder when it sees an unknown chunk.
+ *
+ * Use by SkCodec:
+ * Depending on the location of the unknown chunks, this callback may be
+ * called by
+ * - the factory (NewFromStream/NewFromData)
+ * - getPixels
+ * - startScanlineDecode
+ * - the first call to getScanlines/skipScanlines
+ * The callback may be called from a different thread (e.g. if the SkCodec
+ * is passed to another thread), and it may be called multiple times, if
+ * the SkCodec is used multiple times.
+ *
+ * @param tag Name for this type of chunk.
+ * @param data Data to be interpreted by the subclass.
+ * @param length Number of bytes of data in the chunk.
+ * @return true to continue decoding, or false to indicate an error, which
+ * will cause the decoder to not return the image.
+ */
+ virtual bool readChunk(const char tag[], const void* data, size_t length) = 0;
+};
+#endif // SkPngChunkReader_DEFINED
diff --git a/gfx/skia/skia/include/config/SkUserConfig.h b/gfx/skia/skia/include/config/SkUserConfig.h
new file mode 100644
index 0000000000..96aaad59ae
--- /dev/null
+++ b/gfx/skia/skia/include/config/SkUserConfig.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkUserConfig_DEFINED
+#define SkUserConfig_DEFINED
+
+/* SkTypes.h, the root of the public header files, includes this file
+ SkUserConfig.h after first initializing certain Skia defines, letting
+ this file change or augment those flags.
+
+ Below are optional defines that add, subtract, or change default behavior
+ in Skia. Your port can locally edit this file to enable/disable flags as
+ you choose, or these can be declared on your command line (i.e. -Dfoo).
+
+ By default, this #include file will always default to having all the flags
+ commented out, so including it will have no effect.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* Skia has lots of debug-only code. Often this is just null checks or other
+ parameter checking, but sometimes it can be quite intrusive (e.g. check that
+ each 32bit pixel is in premultiplied form). This code can be very useful
+ during development, but will slow things down in a shipping product.
+
+ By default, these mutually exclusive flags are defined in SkTypes.h,
+ based on the presence or absence of NDEBUG, but that decision can be changed
+ here.
+*/
+//#define SK_DEBUG
+//#define SK_RELEASE
+
+/* To write debug messages to a console, skia will call SkDebugf(...) following
+ printf conventions (e.g. const char* format, ...). If you want to redirect
+ this to something other than printf, define yours here
+*/
+//#define SkDebugf(...) MyFunction(__VA_ARGS__)
+
+/* Skia has both debug and release asserts. When an assert fails SK_ABORT will
+ be used to report an abort message. SK_ABORT is expected not to return. Skia
+ provides a default implementation which will print the message with SkDebugf
+ and then call sk_abort_no_print.
+*/
+//#define SK_ABORT(message, ...)
+
+/* To specify a different default font strike cache memory limit, define this. If this is
+ undefined, skia will use a built-in value.
+*/
+//#define SK_DEFAULT_FONT_CACHE_LIMIT (1024 * 1024)
+
+/* To specify a different default font strike cache count limit, define this. If this is
+ undefined, skia will use a built-in value.
+*/
+// #define SK_DEFAULT_FONT_CACHE_COUNT_LIMIT 2048
+
+/* To specify the default size of the image cache, undefine this and set it to
+ the desired value (in bytes). SkGraphics.h as a runtime API to set this
+ value as well. If this is undefined, a built-in value will be used.
+*/
+//#define SK_DEFAULT_IMAGE_CACHE_LIMIT (1024 * 1024)
+
+/* Define this to set the upper limit for text to support LCD. Values that
+ are very large increase the cost in the font cache and draw slower, without
+ improving readability. If this is undefined, Skia will use its default
+ value (e.g. 48)
+*/
+//#define SK_MAX_SIZE_FOR_LCDTEXT 48
+
+/* Change the kN32_SkColorType ordering to BGRA to work in X windows.
+*/
+//#define SK_R32_SHIFT 16
+
+/* Determines whether to build code that supports the Ganesh GPU backend. Some classes
+ that are not GPU-specific, such as SkShader subclasses, have optional code
+ that is used allows them to interact with this GPU backend. If you'd like to
+ include this code, include -DSK_GANESH in your cflags or uncomment below.
+ Defaults to not set (No Ganesh GPU backend).
+ This define affects the ABI of Skia, so make sure it matches the client which uses
+ the compiled version of Skia.
+*/
+//#define SK_GANESH
+
+/* Skia makes use of histogram logging macros to trace the frequency of
+ events. By default, Skia provides no-op versions of these macros.
+ Skia consumers can provide their own definitions of these macros to
+ integrate with their histogram collection backend.
+*/
+//#define SK_HISTOGRAM_BOOLEAN(name, sample)
+//#define SK_HISTOGRAM_ENUMERATION(name, sample, enum_size)
+//#define SK_HISTOGRAM_EXACT_LINEAR(name, sample, value_max)
+//#define SK_HISTOGRAM_MEMORY_KB(name, sample)
+
+/* Skia tries to make use of some non-standard C++ language extensions.
+ By default, Skia provides msvc and clang/gcc versions of these macros.
+ Skia consumers can provide their own definitions of these macros to
+ integrate with their own compilers and build system.
+*/
+//#define SK_UNUSED [[maybe_unused]]
+//#define SK_WARN_UNUSED_RESULT [[nodiscard]]
+//#define SK_ALWAYS_INLINE inline __attribute__((always_inline))
+//#define SK_NEVER_INLINE __attribute__((noinline))
+//#define SK_PRINTF_LIKE(A, B) __attribute__((format(printf, (A), (B))))
+//#define SK_NO_SANITIZE(A) __attribute__((no_sanitize(A)))
+//#define SK_TRIVIAL_ABI [[clang::trivial_abi]]
+
+/*
+ * If compiling Skia as a DLL, public APIs should be exported. Skia will set
+ * SK_API to something sensible for Clang and MSVC, but if clients need to
+ * customize it for their build system or compiler, they may.
+ * If a client needs to use SK_API (e.g. overriding SK_ABORT), then they
+ * *must* define their own, the default will not be defined prior to loading
+ * this file.
+ */
+//#define SK_API __declspec(dllexport)
+
+#define MOZ_SKIA
+
+// On all platforms we have this byte order
+#define SK_A32_SHIFT 24
+#define SK_R32_SHIFT 16
+#define SK_G32_SHIFT 8
+#define SK_B32_SHIFT 0
+
+#define SK_ALLOW_STATIC_GLOBAL_INITIALIZERS 0
+
+#define SK_RASTERIZE_EVEN_ROUNDING
+
+#define I_ACKNOWLEDGE_SKIA_DOES_NOT_SUPPORT_BIG_ENDIAN
+
+#define SK_SUPPORT_GPU 0
+
+#define SK_DISABLE_SLOW_DEBUG_VALIDATION 1
+
+#define SK_DISABLE_TYPEFACE_CACHE
+
+#define SK_USE_FREETYPE_EMBOLDEN
+
+#define SK_IGNORE_MAC_BLENDING_MATCH_FIX
+
+#ifndef MOZ_IMPLICIT
+# ifdef MOZ_CLANG_PLUGIN
+# define MOZ_IMPLICIT __attribute__((annotate("moz_implicit")))
+# else
+# define MOZ_IMPLICIT
+# endif
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkAlphaType.h b/gfx/skia/skia/include/core/SkAlphaType.h
new file mode 100644
index 0000000000..0c99906dfd
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkAlphaType.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAlphaType_DEFINED
+#define SkAlphaType_DEFINED
+
+/** \enum SkAlphaType
+ Describes how to interpret the alpha component of a pixel. A pixel may
+ be opaque, or alpha, describing multiple levels of transparency.
+
+ In simple blending, alpha weights the draw color and the destination
+ color to create a new color. If alpha describes a weight from zero to one:
+
+ new color = draw color * alpha + destination color * (1 - alpha)
+
+ In practice alpha is encoded in two or more bits, where 1.0 equals all bits set.
+
+ RGB may have alpha included in each component value; the stored
+ value is the original RGB multiplied by alpha. Premultiplied color
+ components improve performance.
+*/
+enum SkAlphaType : int {
+ kUnknown_SkAlphaType, //!< uninitialized
+ kOpaque_SkAlphaType, //!< pixel is opaque
+ kPremul_SkAlphaType, //!< pixel components are premultiplied by alpha
+ kUnpremul_SkAlphaType, //!< pixel components are independent of alpha
+ kLastEnum_SkAlphaType = kUnpremul_SkAlphaType, //!< last valid value
+};
+
+/** Returns true if SkAlphaType equals kOpaque_SkAlphaType.
+
+ kOpaque_SkAlphaType is a hint that the SkColorType is opaque, or that all
+ alpha values are set to their 1.0 equivalent. If SkAlphaType is
+ kOpaque_SkAlphaType, and SkColorType is not opaque, then the result of
+ drawing any pixel with a alpha value less than 1.0 is undefined.
+*/
+static inline bool SkAlphaTypeIsOpaque(SkAlphaType at) {
+ return kOpaque_SkAlphaType == at;
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkAnnotation.h b/gfx/skia/skia/include/core/SkAnnotation.h
new file mode 100644
index 0000000000..2006f309e9
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkAnnotation.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAnnotation_DEFINED
+#define SkAnnotation_DEFINED
+
+#include "include/core/SkTypes.h"
+
+class SkData;
+struct SkPoint;
+struct SkRect;
+class SkCanvas;
+
+/**
+ * Annotate the canvas by associating the specified URL with the
+ * specified rectangle (in local coordinates, just like drawRect).
+ *
+ * The URL is expected to be escaped and be valid 7-bit ASCII.
+ *
+ * If the backend of this canvas does not support annotations, this call is
+ * safely ignored.
+ *
+ * The caller is responsible for managing its ownership of the SkData.
+ */
+SK_API void SkAnnotateRectWithURL(SkCanvas*, const SkRect&, SkData*);
+
+/**
+ * Annotate the canvas by associating a name with the specified point.
+ *
+ * If the backend of this canvas does not support annotations, this call is
+ * safely ignored.
+ *
+ * The caller is responsible for managing its ownership of the SkData.
+ */
+SK_API void SkAnnotateNamedDestination(SkCanvas*, const SkPoint&, SkData*);
+
+/**
+ * Annotate the canvas by making the specified rectangle link to a named
+ * destination.
+ *
+ * If the backend of this canvas does not support annotations, this call is
+ * safely ignored.
+ *
+ * The caller is responsible for managing its ownership of the SkData.
+ */
+SK_API void SkAnnotateLinkToDestination(SkCanvas*, const SkRect&, SkData*);
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkBBHFactory.h b/gfx/skia/skia/include/core/SkBBHFactory.h
new file mode 100644
index 0000000000..2507d0f150
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkBBHFactory.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBBHFactory_DEFINED
+#define SkBBHFactory_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+#include <vector>
+
+class SkBBoxHierarchy : public SkRefCnt {
+public:
+ struct Metadata {
+ bool isDraw; // The corresponding SkRect bounds a draw command, not a pure state change.
+ };
+
+ /**
+ * Insert N bounding boxes into the hierarchy.
+ */
+ virtual void insert(const SkRect[], int N) = 0;
+ virtual void insert(const SkRect[], const Metadata[], int N);
+
+ /**
+ * Populate results with the indices of bounding boxes intersecting that query.
+ */
+ virtual void search(const SkRect& query, std::vector<int>* results) const = 0;
+
+ /**
+ * Return approximate size in memory of *this.
+ */
+ virtual size_t bytesUsed() const = 0;
+
+protected:
+ SkBBoxHierarchy() = default;
+ SkBBoxHierarchy(const SkBBoxHierarchy&) = delete;
+ SkBBoxHierarchy& operator=(const SkBBoxHierarchy&) = delete;
+};
+
+class SK_API SkBBHFactory {
+public:
+ /**
+ * Allocate a new SkBBoxHierarchy. Return NULL on failure.
+ */
+ virtual sk_sp<SkBBoxHierarchy> operator()() const = 0;
+ virtual ~SkBBHFactory() {}
+
+protected:
+ SkBBHFactory() = default;
+ SkBBHFactory(const SkBBHFactory&) = delete;
+ SkBBHFactory& operator=(const SkBBHFactory&) = delete;
+};
+
+class SK_API SkRTreeFactory : public SkBBHFactory {
+public:
+ sk_sp<SkBBoxHierarchy> operator()() const override;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkBitmap.h b/gfx/skia/skia/include/core/SkBitmap.h
new file mode 100644
index 0000000000..d843643ca7
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkBitmap.h
@@ -0,0 +1,1268 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmap_DEFINED
+#define SkBitmap_DEFINED
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkCPUTypes.h"
+#include "include/private/base/SkDebug.h"
+
+#include <cstddef>
+#include <cstdint>
+
+class SkColorSpace;
+class SkImage;
+class SkMatrix;
+class SkMipmap;
+class SkPaint;
+class SkPixelRef;
+class SkShader;
+enum SkColorType : int;
+enum class SkTileMode;
+struct SkMask;
+
+/** \class SkBitmap
+ SkBitmap describes a two-dimensional raster pixel array. SkBitmap is built on
+ SkImageInfo, containing integer width and height, SkColorType and SkAlphaType
+ describing the pixel format, and SkColorSpace describing the range of colors.
+ SkBitmap points to SkPixelRef, which describes the physical array of pixels.
+ SkImageInfo bounds may be located anywhere fully inside SkPixelRef bounds.
+
+ SkBitmap can be drawn using SkCanvas. SkBitmap can be a drawing destination for SkCanvas
+ draw member functions. SkBitmap flexibility as a pixel container limits some
+ optimizations available to the target platform.
+
+ If pixel array is primarily read-only, use SkImage for better performance.
+ If pixel array is primarily written to, use SkSurface for better performance.
+
+ Declaring SkBitmap const prevents altering SkImageInfo: the SkBitmap height, width,
+ and so on cannot change. It does not affect SkPixelRef: a caller may write its
+ pixels. Declaring SkBitmap const affects SkBitmap configuration, not its contents.
+
+ SkBitmap is not thread safe. Each thread must have its own copy of SkBitmap fields,
+ although threads may share the underlying pixel array.
+*/
+class SK_API SkBitmap {
+public:
+ class SK_API Allocator;
+
+ /** Creates an empty SkBitmap without pixels, with kUnknown_SkColorType,
+ kUnknown_SkAlphaType, and with a width and height of zero. SkPixelRef origin is
+ set to (0, 0).
+
+ Use setInfo() to associate SkColorType, SkAlphaType, width, and height
+ after SkBitmap has been created.
+
+ @return empty SkBitmap
+
+ example: https://fiddle.skia.org/c/@Bitmap_empty_constructor
+ */
+ SkBitmap();
+
+ /** Copies settings from src to returned SkBitmap. Shares pixels if src has pixels
+ allocated, so both bitmaps reference the same pixels.
+
+ @param src SkBitmap to copy SkImageInfo, and share SkPixelRef
+ @return copy of src
+
+ example: https://fiddle.skia.org/c/@Bitmap_copy_const_SkBitmap
+ */
+ SkBitmap(const SkBitmap& src);
+
+ /** Copies settings from src to returned SkBitmap. Moves ownership of src pixels to
+ SkBitmap.
+
+ @param src SkBitmap to copy SkImageInfo, and reassign SkPixelRef
+ @return copy of src
+
+ example: https://fiddle.skia.org/c/@Bitmap_move_SkBitmap
+ */
+ SkBitmap(SkBitmap&& src);
+
+ /** Decrements SkPixelRef reference count, if SkPixelRef is not nullptr.
+ */
+ ~SkBitmap();
+
+ /** Copies settings from src to returned SkBitmap. Shares pixels if src has pixels
+ allocated, so both bitmaps reference the same pixels.
+
+ @param src SkBitmap to copy SkImageInfo, and share SkPixelRef
+ @return copy of src
+
+ example: https://fiddle.skia.org/c/@Bitmap_copy_operator
+ */
+ SkBitmap& operator=(const SkBitmap& src);
+
+ /** Copies settings from src to returned SkBitmap. Moves ownership of src pixels to
+ SkBitmap.
+
+ @param src SkBitmap to copy SkImageInfo, and reassign SkPixelRef
+ @return copy of src
+
+ example: https://fiddle.skia.org/c/@Bitmap_move_operator
+ */
+ SkBitmap& operator=(SkBitmap&& src);
+
+ /** Swaps the fields of the two bitmaps.
+
+ @param other SkBitmap exchanged with original
+
+ example: https://fiddle.skia.org/c/@Bitmap_swap
+ */
+ void swap(SkBitmap& other);
+
+ /** Returns a constant reference to the SkPixmap holding the SkBitmap pixel
+ address, row bytes, and SkImageInfo.
+
+ @return reference to SkPixmap describing this SkBitmap
+ */
+ const SkPixmap& pixmap() const { return fPixmap; }
+
+ /** Returns width, height, SkAlphaType, SkColorType, and SkColorSpace.
+
+ @return reference to SkImageInfo
+ */
+ const SkImageInfo& info() const { return fPixmap.info(); }
+
+ /** Returns pixel count in each row. Should be equal or less than
+ rowBytes() / info().bytesPerPixel().
+
+ May be less than pixelRef().width(). Will not exceed pixelRef().width() less
+ pixelRefOrigin().fX.
+
+ @return pixel width in SkImageInfo
+ */
+ int width() const { return fPixmap.width(); }
+
+ /** Returns pixel row count.
+
+ Maybe be less than pixelRef().height(). Will not exceed pixelRef().height() less
+ pixelRefOrigin().fY.
+
+ @return pixel height in SkImageInfo
+ */
+ int height() const { return fPixmap.height(); }
+
+ SkColorType colorType() const { return fPixmap.colorType(); }
+
+ SkAlphaType alphaType() const { return fPixmap.alphaType(); }
+
+ /** Returns SkColorSpace, the range of colors, associated with SkImageInfo. The
+ reference count of SkColorSpace is unchanged. The returned SkColorSpace is
+ immutable.
+
+ @return SkColorSpace in SkImageInfo, or nullptr
+ */
+ SkColorSpace* colorSpace() const;
+
+ /** Returns smart pointer to SkColorSpace, the range of colors, associated with
+ SkImageInfo. The smart pointer tracks the number of objects sharing this
+ SkColorSpace reference so the memory is released when the owners destruct.
+
+ The returned SkColorSpace is immutable.
+
+ @return SkColorSpace in SkImageInfo wrapped in a smart pointer
+ */
+ sk_sp<SkColorSpace> refColorSpace() const;
+
+ /** Returns number of bytes per pixel required by SkColorType.
+ Returns zero if colorType( is kUnknown_SkColorType.
+
+ @return bytes in pixel
+ */
+ int bytesPerPixel() const { return fPixmap.info().bytesPerPixel(); }
+
+ /** Returns number of pixels that fit on row. Should be greater than or equal to
+ width().
+
+ @return maximum pixels per row
+ */
+ int rowBytesAsPixels() const { return fPixmap.rowBytesAsPixels(); }
+
+ /** Returns bit shift converting row bytes to row pixels.
+ Returns zero for kUnknown_SkColorType.
+
+ @return one of: 0, 1, 2, 3; left shift to convert pixels to bytes
+ */
+ int shiftPerPixel() const { return fPixmap.shiftPerPixel(); }
+
+ /** Returns true if either width() or height() are zero.
+
+ Does not check if SkPixelRef is nullptr; call drawsNothing() to check width(),
+ height(), and SkPixelRef.
+
+ @return true if dimensions do not enclose area
+ */
+ bool empty() const { return fPixmap.info().isEmpty(); }
+
+ /** Returns true if SkPixelRef is nullptr.
+
+ Does not check if width() or height() are zero; call drawsNothing() to check
+ width(), height(), and SkPixelRef.
+
+ @return true if no SkPixelRef is associated
+ */
+ bool isNull() const { return nullptr == fPixelRef; }
+
+ /** Returns true if width() or height() are zero, or if SkPixelRef is nullptr.
+ If true, SkBitmap has no effect when drawn or drawn into.
+
+ @return true if drawing has no effect
+ */
+ bool drawsNothing() const {
+ return this->empty() || this->isNull();
+ }
+
+ /** Returns row bytes, the interval from one pixel row to the next. Row bytes
+ is at least as large as: width() * info().bytesPerPixel().
+
+ Returns zero if colorType() is kUnknown_SkColorType, or if row bytes supplied to
+ setInfo() is not large enough to hold a row of pixels.
+
+ @return byte length of pixel row
+ */
+ size_t rowBytes() const { return fPixmap.rowBytes(); }
+
+ /** Sets SkAlphaType, if alphaType is compatible with SkColorType.
+ Returns true unless alphaType is kUnknown_SkAlphaType and current SkAlphaType
+ is not kUnknown_SkAlphaType.
+
+ Returns true if SkColorType is kUnknown_SkColorType. alphaType is ignored, and
+ SkAlphaType remains kUnknown_SkAlphaType.
+
+ Returns true if SkColorType is kRGB_565_SkColorType or kGray_8_SkColorType.
+ alphaType is ignored, and SkAlphaType remains kOpaque_SkAlphaType.
+
+ If SkColorType is kARGB_4444_SkColorType, kRGBA_8888_SkColorType,
+ kBGRA_8888_SkColorType, or kRGBA_F16_SkColorType: returns true unless
+ alphaType is kUnknown_SkAlphaType and SkAlphaType is not kUnknown_SkAlphaType.
+ If SkAlphaType is kUnknown_SkAlphaType, alphaType is ignored.
+
+ If SkColorType is kAlpha_8_SkColorType, returns true unless
+ alphaType is kUnknown_SkAlphaType and SkAlphaType is not kUnknown_SkAlphaType.
+ If SkAlphaType is kUnknown_SkAlphaType, alphaType is ignored. If alphaType is
+ kUnpremul_SkAlphaType, it is treated as kPremul_SkAlphaType.
+
+ This changes SkAlphaType in SkPixelRef; all bitmaps sharing SkPixelRef
+ are affected.
+
+ @return true if SkAlphaType is set
+
+ example: https://fiddle.skia.org/c/@Bitmap_setAlphaType
+ */
+ bool setAlphaType(SkAlphaType alphaType);
+
+ /** Returns pixel address, the base address corresponding to the pixel origin.
+
+ @return pixel address
+ */
+ void* getPixels() const { return fPixmap.writable_addr(); }
+
+ /** Returns minimum memory required for pixel storage.
+ Does not include unused memory on last row when rowBytesAsPixels() exceeds width().
+ Returns SIZE_MAX if result does not fit in size_t.
+ Returns zero if height() or width() is 0.
+ Returns height() times rowBytes() if colorType() is kUnknown_SkColorType.
+
+ @return size in bytes of image buffer
+ */
+ size_t computeByteSize() const { return fPixmap.computeByteSize(); }
+
+ /** Returns true if pixels can not change.
+
+ Most immutable SkBitmap checks trigger an assert only on debug builds.
+
+ @return true if pixels are immutable
+
+ example: https://fiddle.skia.org/c/@Bitmap_isImmutable
+ */
+ bool isImmutable() const;
+
+ /** Sets internal flag to mark SkBitmap as immutable. Once set, pixels can not change.
+ Any other bitmap sharing the same SkPixelRef are also marked as immutable.
+ Once SkPixelRef is marked immutable, the setting cannot be cleared.
+
+ Writing to immutable SkBitmap pixels triggers an assert on debug builds.
+
+ example: https://fiddle.skia.org/c/@Bitmap_setImmutable
+ */
+ void setImmutable();
+
+ /** Returns true if SkAlphaType is set to hint that all pixels are opaque; their
+ alpha value is implicitly or explicitly 1.0. If true, and all pixels are
+ not opaque, Skia may draw incorrectly.
+
+ Does not check if SkColorType allows alpha, or if any pixel value has
+ transparency.
+
+ @return true if SkImageInfo SkAlphaType is kOpaque_SkAlphaType
+ */
+ bool isOpaque() const {
+ return SkAlphaTypeIsOpaque(this->alphaType());
+ }
+
+ /** Resets to its initial state; all fields are set to zero, as if SkBitmap had
+ been initialized by SkBitmap().
+
+ Sets width, height, row bytes to zero; pixel address to nullptr; SkColorType to
+ kUnknown_SkColorType; and SkAlphaType to kUnknown_SkAlphaType.
+
+ If SkPixelRef is allocated, its reference count is decreased by one, releasing
+ its memory if SkBitmap is the sole owner.
+
+ example: https://fiddle.skia.org/c/@Bitmap_reset
+ */
+ void reset();
+
+ /** Returns true if all pixels are opaque. SkColorType determines how pixels
+ are encoded, and whether pixel describes alpha. Returns true for SkColorType
+ without alpha in each pixel; for other SkColorType, returns true if all
+ pixels have alpha values equivalent to 1.0 or greater.
+
+ For SkColorType kRGB_565_SkColorType or kGray_8_SkColorType: always
+ returns true. For SkColorType kAlpha_8_SkColorType, kBGRA_8888_SkColorType,
+ kRGBA_8888_SkColorType: returns true if all pixel alpha values are 255.
+ For SkColorType kARGB_4444_SkColorType: returns true if all pixel alpha values are 15.
+ For kRGBA_F16_SkColorType: returns true if all pixel alpha values are 1.0 or
+ greater.
+
+ Returns false for kUnknown_SkColorType.
+
+ @param bm SkBitmap to check
+ @return true if all pixels have opaque values or SkColorType is opaque
+ */
+ static bool ComputeIsOpaque(const SkBitmap& bm) {
+ return bm.pixmap().computeIsOpaque();
+ }
+
+ /** Returns SkRect { 0, 0, width(), height() }.
+
+ @param bounds container for floating point rectangle
+
+ example: https://fiddle.skia.org/c/@Bitmap_getBounds
+ */
+ void getBounds(SkRect* bounds) const;
+
+ /** Returns SkIRect { 0, 0, width(), height() }.
+
+ @param bounds container for integral rectangle
+
+ example: https://fiddle.skia.org/c/@Bitmap_getBounds_2
+ */
+ void getBounds(SkIRect* bounds) const;
+
+ /** Returns SkIRect { 0, 0, width(), height() }.
+
+ @return integral rectangle from origin to width() and height()
+ */
+ SkIRect bounds() const { return fPixmap.info().bounds(); }
+
+ /** Returns SkISize { width(), height() }.
+
+ @return integral size of width() and height()
+ */
+ SkISize dimensions() const { return fPixmap.info().dimensions(); }
+
+ /** Returns the bounds of this bitmap, offset by its SkPixelRef origin.
+
+ @return bounds within SkPixelRef bounds
+ */
+ SkIRect getSubset() const {
+ SkIPoint origin = this->pixelRefOrigin();
+ return SkIRect::MakeXYWH(origin.x(), origin.y(), this->width(), this->height());
+ }
+
+ /** Sets width, height, SkAlphaType, SkColorType, SkColorSpace, and optional
+ rowBytes. Frees pixels, and returns true if successful.
+
+ imageInfo.alphaType() may be altered to a value permitted by imageInfo.colorSpace().
+ If imageInfo.colorType() is kUnknown_SkColorType, imageInfo.alphaType() is
+ set to kUnknown_SkAlphaType.
+ If imageInfo.colorType() is kAlpha_8_SkColorType and imageInfo.alphaType() is
+ kUnpremul_SkAlphaType, imageInfo.alphaType() is replaced by kPremul_SkAlphaType.
+ If imageInfo.colorType() is kRGB_565_SkColorType or kGray_8_SkColorType,
+ imageInfo.alphaType() is set to kOpaque_SkAlphaType.
+ If imageInfo.colorType() is kARGB_4444_SkColorType, kRGBA_8888_SkColorType,
+ kBGRA_8888_SkColorType, or kRGBA_F16_SkColorType: imageInfo.alphaType() remains
+ unchanged.
+
+ rowBytes must equal or exceed imageInfo.minRowBytes(). If imageInfo.colorSpace() is
+ kUnknown_SkColorType, rowBytes is ignored and treated as zero; for all other
+ SkColorSpace values, rowBytes of zero is treated as imageInfo.minRowBytes().
+
+ Calls reset() and returns false if:
+ - rowBytes exceeds 31 bits
+ - imageInfo.width() is negative
+ - imageInfo.height() is negative
+ - rowBytes is positive and less than imageInfo.width() times imageInfo.bytesPerPixel()
+
+ @param imageInfo contains width, height, SkAlphaType, SkColorType, SkColorSpace
+ @param rowBytes imageInfo.minRowBytes() or larger; or zero
+ @return true if SkImageInfo set successfully
+
+ example: https://fiddle.skia.org/c/@Bitmap_setInfo
+ */
+ bool setInfo(const SkImageInfo& imageInfo, size_t rowBytes = 0);
+
+ /** \enum SkBitmap::AllocFlags
+ AllocFlags is obsolete. We always zero pixel memory when allocated.
+ */
+ enum AllocFlags {
+ kZeroPixels_AllocFlag = 1 << 0, //!< zero pixel memory. No effect. This is the default.
+ };
+
+ /** Sets SkImageInfo to info following the rules in setInfo() and allocates pixel
+ memory. Memory is zeroed.
+
+ Returns false and calls reset() if SkImageInfo could not be set, or memory could
+ not be allocated, or memory could not optionally be zeroed.
+
+ On most platforms, allocating pixel memory may succeed even though there is
+ not sufficient memory to hold pixels; allocation does not take place
+ until the pixels are written to. The actual behavior depends on the platform
+ implementation of calloc().
+
+ @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace
+ @param flags kZeroPixels_AllocFlag, or zero
+ @return true if pixels allocation is successful
+ */
+ bool SK_WARN_UNUSED_RESULT tryAllocPixelsFlags(const SkImageInfo& info, uint32_t flags);
+
+ /** Sets SkImageInfo to info following the rules in setInfo() and allocates pixel
+ memory. Memory is zeroed.
+
+ Aborts execution if SkImageInfo could not be set, or memory could
+ not be allocated, or memory could not optionally
+ be zeroed. Abort steps may be provided by the user at compile time by defining
+ SK_ABORT.
+
+ On most platforms, allocating pixel memory may succeed even though there is
+ not sufficient memory to hold pixels; allocation does not take place
+ until the pixels are written to. The actual behavior depends on the platform
+ implementation of calloc().
+
+ @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace
+ @param flags kZeroPixels_AllocFlag, or zero
+
+ example: https://fiddle.skia.org/c/@Bitmap_allocPixelsFlags
+ */
+ void allocPixelsFlags(const SkImageInfo& info, uint32_t flags);
+
+ /** Sets SkImageInfo to info following the rules in setInfo() and allocates pixel
+ memory. rowBytes must equal or exceed info.width() times info.bytesPerPixel(),
+ or equal zero. Pass in zero for rowBytes to compute the minimum valid value.
+
+ Returns false and calls reset() if SkImageInfo could not be set, or memory could
+ not be allocated.
+
+ On most platforms, allocating pixel memory may succeed even though there is
+ not sufficient memory to hold pixels; allocation does not take place
+ until the pixels are written to. The actual behavior depends on the platform
+ implementation of malloc().
+
+ @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace
+ @param rowBytes size of pixel row or larger; may be zero
+ @return true if pixel storage is allocated
+ */
+ bool SK_WARN_UNUSED_RESULT tryAllocPixels(const SkImageInfo& info, size_t rowBytes);
+
+ /** Sets SkImageInfo to info following the rules in setInfo() and allocates pixel
+ memory. rowBytes must equal or exceed info.width() times info.bytesPerPixel(),
+ or equal zero. Pass in zero for rowBytes to compute the minimum valid value.
+
+ Aborts execution if SkImageInfo could not be set, or memory could
+ not be allocated. Abort steps may be provided by
+ the user at compile time by defining SK_ABORT.
+
+ On most platforms, allocating pixel memory may succeed even though there is
+ not sufficient memory to hold pixels; allocation does not take place
+ until the pixels are written to. The actual behavior depends on the platform
+ implementation of malloc().
+
+ @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace
+ @param rowBytes size of pixel row or larger; may be zero
+
+ example: https://fiddle.skia.org/c/@Bitmap_allocPixels
+ */
+ void allocPixels(const SkImageInfo& info, size_t rowBytes);
+
+ /** Sets SkImageInfo to info following the rules in setInfo() and allocates pixel
+ memory.
+
+ Returns false and calls reset() if SkImageInfo could not be set, or memory could
+ not be allocated.
+
+ On most platforms, allocating pixel memory may succeed even though there is
+ not sufficient memory to hold pixels; allocation does not take place
+ until the pixels are written to. The actual behavior depends on the platform
+ implementation of malloc().
+
+ @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace
+ @return true if pixel storage is allocated
+ */
+ bool SK_WARN_UNUSED_RESULT tryAllocPixels(const SkImageInfo& info) {
+ return this->tryAllocPixels(info, info.minRowBytes());
+ }
+
+ /** Sets SkImageInfo to info following the rules in setInfo() and allocates pixel
+ memory.
+
+ Aborts execution if SkImageInfo could not be set, or memory could
+ not be allocated. Abort steps may be provided by
+ the user at compile time by defining SK_ABORT.
+
+ On most platforms, allocating pixel memory may succeed even though there is
+ not sufficient memory to hold pixels; allocation does not take place
+ until the pixels are written to. The actual behavior depends on the platform
+ implementation of malloc().
+
+ @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace
+
+ example: https://fiddle.skia.org/c/@Bitmap_allocPixels_2
+ */
+ void allocPixels(const SkImageInfo& info);
+
+ /** Sets SkImageInfo to width, height, and native color type; and allocates
+ pixel memory. If isOpaque is true, sets SkImageInfo to kOpaque_SkAlphaType;
+ otherwise, sets to kPremul_SkAlphaType.
+
+ Calls reset() and returns false if width exceeds 29 bits or is negative,
+ or height is negative.
+
+ Returns false if allocation fails.
+
+ Use to create SkBitmap that matches SkPMColor, the native pixel arrangement on
+ the platform. SkBitmap drawn to output device skips converting its pixel format.
+
+ @param width pixel column count; must be zero or greater
+ @param height pixel row count; must be zero or greater
+ @param isOpaque true if pixels do not have transparency
+ @return true if pixel storage is allocated
+ */
+ bool SK_WARN_UNUSED_RESULT tryAllocN32Pixels(int width, int height, bool isOpaque = false);
+
+ /** Sets SkImageInfo to width, height, and the native color type; and allocates
+ pixel memory. If isOpaque is true, sets SkImageInfo to kOpaque_SkAlphaType;
+ otherwise, sets to kPremul_SkAlphaType.
+
+ Aborts if width exceeds 29 bits or is negative, or height is negative, or
+ allocation fails. Abort steps may be provided by the user at compile time by
+ defining SK_ABORT.
+
+ Use to create SkBitmap that matches SkPMColor, the native pixel arrangement on
+ the platform. SkBitmap drawn to output device skips converting its pixel format.
+
+ @param width pixel column count; must be zero or greater
+ @param height pixel row count; must be zero or greater
+ @param isOpaque true if pixels do not have transparency
+
+ example: https://fiddle.skia.org/c/@Bitmap_allocN32Pixels
+ */
+ void allocN32Pixels(int width, int height, bool isOpaque = false);
+
+ /** Sets SkImageInfo to info following the rules in setInfo(), and creates SkPixelRef
+ containing pixels and rowBytes. releaseProc, if not nullptr, is called
+ immediately on failure or when pixels are no longer referenced. context may be
+ nullptr.
+
+ If SkImageInfo could not be set, or rowBytes is less than info.minRowBytes():
+ calls releaseProc if present, calls reset(), and returns false.
+
+ Otherwise, if pixels equals nullptr: sets SkImageInfo, calls releaseProc if
+ present, returns true.
+
+ If SkImageInfo is set, pixels is not nullptr, and releaseProc is not nullptr:
+ when pixels are no longer referenced, calls releaseProc with pixels and context
+ as parameters.
+
+ @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace
+ @param pixels address or pixel storage; may be nullptr
+ @param rowBytes size of pixel row or larger
+ @param releaseProc function called when pixels can be deleted; may be nullptr
+ @param context caller state passed to releaseProc; may be nullptr
+ @return true if SkImageInfo is set to info
+ */
+ bool installPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ void (*releaseProc)(void* addr, void* context), void* context);
+
+ /** Sets SkImageInfo to info following the rules in setInfo(), and creates SkPixelRef
+ containing pixels and rowBytes.
+
+ If SkImageInfo could not be set, or rowBytes is less than info.minRowBytes():
+ calls reset(), and returns false.
+
+ Otherwise, if pixels equals nullptr: sets SkImageInfo, returns true.
+
+ Caller must ensure that pixels are valid for the lifetime of SkBitmap and SkPixelRef.
+
+ @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace
+ @param pixels address or pixel storage; may be nullptr
+ @param rowBytes size of pixel row or larger
+ @return true if SkImageInfo is set to info
+ */
+ bool installPixels(const SkImageInfo& info, void* pixels, size_t rowBytes) {
+ return this->installPixels(info, pixels, rowBytes, nullptr, nullptr);
+ }
+
+ /** Sets SkImageInfo to pixmap.info() following the rules in setInfo(), and creates
+ SkPixelRef containing pixmap.addr() and pixmap.rowBytes().
+
+ If SkImageInfo could not be set, or pixmap.rowBytes() is less than
+ SkImageInfo::minRowBytes(): calls reset(), and returns false.
+
+ Otherwise, if pixmap.addr() equals nullptr: sets SkImageInfo, returns true.
+
+ Caller must ensure that pixmap is valid for the lifetime of SkBitmap and SkPixelRef.
+
+ @param pixmap SkImageInfo, pixel address, and rowBytes()
+ @return true if SkImageInfo was set to pixmap.info()
+
+ example: https://fiddle.skia.org/c/@Bitmap_installPixels_3
+ */
+ bool installPixels(const SkPixmap& pixmap);
+
+ /** Deprecated.
+ */
+ bool installMaskPixels(const SkMask& mask);
+
+ /** Replaces SkPixelRef with pixels, preserving SkImageInfo and rowBytes().
+ Sets SkPixelRef origin to (0, 0).
+
+ If pixels is nullptr, or if info().colorType() equals kUnknown_SkColorType;
+ release reference to SkPixelRef, and set SkPixelRef to nullptr.
+
+ Caller is responsible for handling ownership pixel memory for the lifetime
+ of SkBitmap and SkPixelRef.
+
+ @param pixels address of pixel storage, managed by caller
+
+ example: https://fiddle.skia.org/c/@Bitmap_setPixels
+ */
+ void setPixels(void* pixels);
+
+ /** Allocates pixel memory with HeapAllocator, and replaces existing SkPixelRef.
+ The allocation size is determined by SkImageInfo width, height, and SkColorType.
+
+ Returns false if info().colorType() is kUnknown_SkColorType, or allocation fails.
+
+ @return true if the allocation succeeds
+ */
+ bool SK_WARN_UNUSED_RESULT tryAllocPixels() {
+ return this->tryAllocPixels((Allocator*)nullptr);
+ }
+
+ /** Allocates pixel memory with HeapAllocator, and replaces existing SkPixelRef.
+ The allocation size is determined by SkImageInfo width, height, and SkColorType.
+
+ Aborts if info().colorType() is kUnknown_SkColorType, or allocation fails.
+ Abort steps may be provided by the user at compile
+ time by defining SK_ABORT.
+
+ example: https://fiddle.skia.org/c/@Bitmap_allocPixels_3
+ */
+ void allocPixels();
+
+ /** Allocates pixel memory with allocator, and replaces existing SkPixelRef.
+ The allocation size is determined by SkImageInfo width, height, and SkColorType.
+ If allocator is nullptr, use HeapAllocator instead.
+
+ Returns false if Allocator::allocPixelRef return false.
+
+ @param allocator instance of SkBitmap::Allocator instantiation
+ @return true if custom allocator reports success
+ */
+ bool SK_WARN_UNUSED_RESULT tryAllocPixels(Allocator* allocator);
+
+ /** Allocates pixel memory with allocator, and replaces existing SkPixelRef.
+ The allocation size is determined by SkImageInfo width, height, and SkColorType.
+ If allocator is nullptr, use HeapAllocator instead.
+
+ Aborts if Allocator::allocPixelRef return false. Abort steps may be provided by
+ the user at compile time by defining SK_ABORT.
+
+ @param allocator instance of SkBitmap::Allocator instantiation
+
+ example: https://fiddle.skia.org/c/@Bitmap_allocPixels_4
+ */
+ void allocPixels(Allocator* allocator);
+
+ /** Returns SkPixelRef, which contains: pixel base address; its dimensions; and
+ rowBytes(), the interval from one row to the next. Does not change SkPixelRef
+ reference count. SkPixelRef may be shared by multiple bitmaps.
+ If SkPixelRef has not been set, returns nullptr.
+
+ @return SkPixelRef, or nullptr
+ */
+ SkPixelRef* pixelRef() const { return fPixelRef.get(); }
+
+ /** Returns origin of pixels within SkPixelRef. SkBitmap bounds is always contained
+ by SkPixelRef bounds, which may be the same size or larger. Multiple SkBitmap
+ can share the same SkPixelRef, where each SkBitmap has different bounds.
+
+ The returned origin added to SkBitmap dimensions equals or is smaller than the
+ SkPixelRef dimensions.
+
+ Returns (0, 0) if SkPixelRef is nullptr.
+
+ @return pixel origin within SkPixelRef
+
+ example: https://fiddle.skia.org/c/@Bitmap_pixelRefOrigin
+ */
+ SkIPoint pixelRefOrigin() const;
+
+ /** Replaces pixelRef and origin in SkBitmap. dx and dy specify the offset
+ within the SkPixelRef pixels for the top-left corner of the bitmap.
+
+ Asserts in debug builds if dx or dy are out of range. Pins dx and dy
+ to legal range in release builds.
+
+ The caller is responsible for ensuring that the pixels match the
+ SkColorType and SkAlphaType in SkImageInfo.
+
+ @param pixelRef SkPixelRef describing pixel address and rowBytes()
+ @param dx column offset in SkPixelRef for bitmap origin
+ @param dy row offset in SkPixelRef for bitmap origin
+
+ example: https://fiddle.skia.org/c/@Bitmap_setPixelRef
+ */
+ void setPixelRef(sk_sp<SkPixelRef> pixelRef, int dx, int dy);
+
+ /** Returns true if SkBitmap is can be drawn.
+
+ @return true if getPixels() is not nullptr
+ */
+ bool readyToDraw() const {
+ return this->getPixels() != nullptr;
+ }
+
+ /** Returns a unique value corresponding to the pixels in SkPixelRef.
+ Returns a different value after notifyPixelsChanged() has been called.
+ Returns zero if SkPixelRef is nullptr.
+
+ Determines if pixels have changed since last examined.
+
+ @return unique value for pixels in SkPixelRef
+
+ example: https://fiddle.skia.org/c/@Bitmap_getGenerationID
+ */
+ uint32_t getGenerationID() const;
+
+ /** Marks that pixels in SkPixelRef have changed. Subsequent calls to
+ getGenerationID() return a different value.
+
+ example: https://fiddle.skia.org/c/@Bitmap_notifyPixelsChanged
+ */
+ void notifyPixelsChanged() const;
+
+ /** Replaces pixel values with c, interpreted as being in the sRGB SkColorSpace.
+ All pixels contained by bounds() are affected. If the colorType() is
+ kGray_8_SkColorType or kRGB_565_SkColorType, then alpha is ignored; RGB is
+ treated as opaque. If colorType() is kAlpha_8_SkColorType, then RGB is ignored.
+
+ @param c unpremultiplied color
+ @param colorSpace SkColorSpace of c
+
+ example: https://fiddle.skia.org/c/@Bitmap_eraseColor
+ */
+ void eraseColor(SkColor4f c, SkColorSpace* colorSpace = nullptr) const;
+
+ /** Replaces pixel values with c, interpreted as being in the sRGB SkColorSpace.
+ All pixels contained by bounds() are affected. If the colorType() is
+ kGray_8_SkColorType or kRGB_565_SkColorType, then alpha is ignored; RGB is
+ treated as opaque. If colorType() is kAlpha_8_SkColorType, then RGB is ignored.
+
+ Input color is ultimately converted to an SkColor4f, so eraseColor(SkColor4f c)
+ will have higher color resolution.
+
+ @param c unpremultiplied color.
+
+ example: https://fiddle.skia.org/c/@Bitmap_eraseColor
+ */
+ void eraseColor(SkColor c) const;
+
+ /** Replaces pixel values with unpremultiplied color built from a, r, g, and b,
+ interpreted as being in the sRGB SkColorSpace. All pixels contained by
+ bounds() are affected. If the colorType() is kGray_8_SkColorType or
+ kRGB_565_SkColorType, then a is ignored; r, g, and b are treated as opaque.
+ If colorType() is kAlpha_8_SkColorType, then r, g, and b are ignored.
+
+ @param a amount of alpha, from fully transparent (0) to fully opaque (255)
+ @param r amount of red, from no red (0) to full red (255)
+ @param g amount of green, from no green (0) to full green (255)
+ @param b amount of blue, from no blue (0) to full blue (255)
+ */
+ void eraseARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b) const {
+ this->eraseColor(SkColorSetARGB(a, r, g, b));
+ }
+
+ /** Replaces pixel values inside area with c. interpreted as being in the sRGB
+ SkColorSpace. If area does not intersect bounds(), call has no effect.
+
+ If the colorType() is kGray_8_SkColorType or kRGB_565_SkColorType, then alpha
+ is ignored; RGB is treated as opaque. If colorType() is kAlpha_8_SkColorType,
+ then RGB is ignored.
+
+ @param c unpremultiplied color
+ @param area rectangle to fill
+ @param colorSpace SkColorSpace of c
+
+ example: https://fiddle.skia.org/c/@Bitmap_erase
+ */
+ void erase(SkColor4f c, SkColorSpace* colorSpace, const SkIRect& area) const;
+ void erase(SkColor4f c, const SkIRect& area) const;
+
+ /** Replaces pixel values inside area with c. interpreted as being in the sRGB
+ SkColorSpace. If area does not intersect bounds(), call has no effect.
+
+ If the colorType() is kGray_8_SkColorType or kRGB_565_SkColorType, then alpha
+ is ignored; RGB is treated as opaque. If colorType() is kAlpha_8_SkColorType,
+ then RGB is ignored.
+
+ Input color is ultimately converted to an SkColor4f, so erase(SkColor4f c)
+ will have higher color resolution.
+
+ @param c unpremultiplied color
+ @param area rectangle to fill
+
+ example: https://fiddle.skia.org/c/@Bitmap_erase
+ */
+ void erase(SkColor c, const SkIRect& area) const;
+
+ /** Deprecated.
+ */
+ void eraseArea(const SkIRect& area, SkColor c) const {
+ this->erase(c, area);
+ }
+
+ /** Returns pixel at (x, y) as unpremultiplied color.
+ Returns black with alpha if SkColorType is kAlpha_8_SkColorType.
+
+ Input is not validated: out of bounds values of x or y trigger an assert() if
+ built with SK_DEBUG defined; and returns undefined values or may crash if
+ SK_RELEASE is defined. Fails if SkColorType is kUnknown_SkColorType or
+ pixel address is nullptr.
+
+ SkColorSpace in SkImageInfo is ignored. Some color precision may be lost in the
+ conversion to unpremultiplied color; original pixel data may have additional
+ precision.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return pixel converted to unpremultiplied color
+ */
+ SkColor getColor(int x, int y) const {
+ return this->pixmap().getColor(x, y);
+ }
+
+ /** Returns pixel at (x, y) as unpremultiplied float color.
+ Returns black with alpha if SkColorType is kAlpha_8_SkColorType.
+
+ Input is not validated: out of bounds values of x or y trigger an assert() if
+ built with SK_DEBUG defined; and returns undefined values or may crash if
+ SK_RELEASE is defined. Fails if SkColorType is kUnknown_SkColorType or
+ pixel address is nullptr.
+
+ SkColorSpace in SkImageInfo is ignored. Some color precision may be lost in the
+ conversion to unpremultiplied color.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return pixel converted to unpremultiplied color
+ */
+ SkColor4f getColor4f(int x, int y) const { return this->pixmap().getColor4f(x, y); }
+
+ /** Look up the pixel at (x,y) and return its alpha component, normalized to [0..1].
+ This is roughly equivalent to SkGetColorA(getColor()), but can be more efficent
+ (and more precise if the pixels store more than 8 bits per component).
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return alpha converted to normalized float
+ */
+ float getAlphaf(int x, int y) const {
+ return this->pixmap().getAlphaf(x, y);
+ }
+
+ /** Returns pixel address at (x, y).
+
+ Input is not validated: out of bounds values of x or y, or kUnknown_SkColorType,
+ trigger an assert() if built with SK_DEBUG defined. Returns nullptr if
+ SkColorType is kUnknown_SkColorType, or SkPixelRef is nullptr.
+
+ Performs a lookup of pixel size; for better performance, call
+ one of: getAddr8(), getAddr16(), or getAddr32().
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return generic pointer to pixel
+
+ example: https://fiddle.skia.org/c/@Bitmap_getAddr
+ */
+ void* getAddr(int x, int y) const;
+
+ /** Returns address at (x, y).
+
+ Input is not validated. Triggers an assert() if built with SK_DEBUG defined and:
+ - SkPixelRef is nullptr
+ - bytesPerPixel() is not four
+ - x is negative, or not less than width()
+ - y is negative, or not less than height()
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return unsigned 32-bit pointer to pixel at (x, y)
+ */
+ inline uint32_t* getAddr32(int x, int y) const;
+
+ /** Returns address at (x, y).
+
+ Input is not validated. Triggers an assert() if built with SK_DEBUG defined and:
+ - SkPixelRef is nullptr
+ - bytesPerPixel() is not two
+ - x is negative, or not less than width()
+ - y is negative, or not less than height()
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return unsigned 16-bit pointer to pixel at (x, y)
+ */
+ inline uint16_t* getAddr16(int x, int y) const;
+
+ /** Returns address at (x, y).
+
+ Input is not validated. Triggers an assert() if built with SK_DEBUG defined and:
+ - SkPixelRef is nullptr
+ - bytesPerPixel() is not one
+ - x is negative, or not less than width()
+ - y is negative, or not less than height()
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return unsigned 8-bit pointer to pixel at (x, y)
+ */
+ inline uint8_t* getAddr8(int x, int y) const;
+
+ /** Shares SkPixelRef with dst. Pixels are not copied; SkBitmap and dst point
+ to the same pixels; dst bounds() are set to the intersection of subset
+ and the original bounds().
+
+ subset may be larger than bounds(). Any area outside of bounds() is ignored.
+
+ Any contents of dst are discarded.
+
+ Return false if:
+ - dst is nullptr
+ - SkPixelRef is nullptr
+ - subset does not intersect bounds()
+
+ @param dst SkBitmap set to subset
+ @param subset rectangle of pixels to reference
+ @return true if dst is replaced by subset
+
+ example: https://fiddle.skia.org/c/@Bitmap_extractSubset
+ */
+ bool extractSubset(SkBitmap* dst, const SkIRect& subset) const;
+
+ /** Copies a SkRect of pixels from SkBitmap to dstPixels. Copy starts at (srcX, srcY),
+ and does not exceed SkBitmap (width(), height()).
+
+ dstInfo specifies width, height, SkColorType, SkAlphaType, and SkColorSpace of
+ destination. dstRowBytes specifics the gap from one destination row to the next.
+ Returns true if pixels are copied. Returns false if:
+ - dstInfo has no address
+ - dstRowBytes is less than dstInfo.minRowBytes()
+ - SkPixelRef is nullptr
+
+ Pixels are copied only if pixel conversion is possible. If SkBitmap colorType() is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; dstInfo.colorType() must match.
+ If SkBitmap colorType() is kGray_8_SkColorType, dstInfo.colorSpace() must match.
+ If SkBitmap alphaType() is kOpaque_SkAlphaType, dstInfo.alphaType() must
+ match. If SkBitmap colorSpace() is nullptr, dstInfo.colorSpace() must match. Returns
+ false if pixel conversion is not possible.
+
+ srcX and srcY may be negative to copy only top or left of source. Returns
+ false if width() or height() is zero or negative.
+ Returns false if abs(srcX) >= Bitmap width(), or if abs(srcY) >= Bitmap height().
+
+ @param dstInfo destination width, height, SkColorType, SkAlphaType, SkColorSpace
+ @param dstPixels destination pixel storage
+ @param dstRowBytes destination row length
+ @param srcX column index whose absolute value is less than width()
+ @param srcY row index whose absolute value is less than height()
+ @return true if pixels are copied to dstPixels
+ */
+ bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY) const;
+
+ /** Copies a SkRect of pixels from SkBitmap to dst. Copy starts at (srcX, srcY), and
+ does not exceed SkBitmap (width(), height()).
+
+ dst specifies width, height, SkColorType, SkAlphaType, SkColorSpace, pixel storage,
+ and row bytes of destination. dst.rowBytes() specifics the gap from one destination
+ row to the next. Returns true if pixels are copied. Returns false if:
+ - dst pixel storage equals nullptr
+ - dst.rowBytes is less than SkImageInfo::minRowBytes()
+ - SkPixelRef is nullptr
+
+ Pixels are copied only if pixel conversion is possible. If SkBitmap colorType() is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; dst SkColorType must match.
+ If SkBitmap colorType() is kGray_8_SkColorType, dst SkColorSpace must match.
+ If SkBitmap alphaType() is kOpaque_SkAlphaType, dst SkAlphaType must
+ match. If SkBitmap colorSpace() is nullptr, dst SkColorSpace must match. Returns
+ false if pixel conversion is not possible.
+
+ srcX and srcY may be negative to copy only top or left of source. Returns
+ false if width() or height() is zero or negative.
+ Returns false if abs(srcX) >= Bitmap width(), or if abs(srcY) >= Bitmap height().
+
+ @param dst destination SkPixmap: SkImageInfo, pixels, row bytes
+ @param srcX column index whose absolute value is less than width()
+ @param srcY row index whose absolute value is less than height()
+ @return true if pixels are copied to dst
+
+ example: https://fiddle.skia.org/c/@Bitmap_readPixels_2
+ */
+ bool readPixels(const SkPixmap& dst, int srcX, int srcY) const;
+
+ /** Copies a SkRect of pixels from SkBitmap to dst. Copy starts at (0, 0), and
+ does not exceed SkBitmap (width(), height()).
+
+ dst specifies width, height, SkColorType, SkAlphaType, SkColorSpace, pixel storage,
+ and row bytes of destination. dst.rowBytes() specifics the gap from one destination
+ row to the next. Returns true if pixels are copied. Returns false if:
+ - dst pixel storage equals nullptr
+ - dst.rowBytes is less than SkImageInfo::minRowBytes()
+ - SkPixelRef is nullptr
+
+ Pixels are copied only if pixel conversion is possible. If SkBitmap colorType() is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; dst SkColorType must match.
+ If SkBitmap colorType() is kGray_8_SkColorType, dst SkColorSpace must match.
+ If SkBitmap alphaType() is kOpaque_SkAlphaType, dst SkAlphaType must
+ match. If SkBitmap colorSpace() is nullptr, dst SkColorSpace must match. Returns
+ false if pixel conversion is not possible.
+
+ @param dst destination SkPixmap: SkImageInfo, pixels, row bytes
+ @return true if pixels are copied to dst
+ */
+ bool readPixels(const SkPixmap& dst) const {
+ return this->readPixels(dst, 0, 0);
+ }
+
+ /** Copies a SkRect of pixels from src. Copy starts at (dstX, dstY), and does not exceed
+ (src.width(), src.height()).
+
+ src specifies width, height, SkColorType, SkAlphaType, SkColorSpace, pixel storage,
+ and row bytes of source. src.rowBytes() specifics the gap from one source
+ row to the next. Returns true if pixels are copied. Returns false if:
+ - src pixel storage equals nullptr
+ - src.rowBytes is less than SkImageInfo::minRowBytes()
+ - SkPixelRef is nullptr
+
+ Pixels are copied only if pixel conversion is possible. If SkBitmap colorType() is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; src SkColorType must match.
+ If SkBitmap colorType() is kGray_8_SkColorType, src SkColorSpace must match.
+ If SkBitmap alphaType() is kOpaque_SkAlphaType, src SkAlphaType must
+ match. If SkBitmap colorSpace() is nullptr, src SkColorSpace must match. Returns
+ false if pixel conversion is not possible.
+
+ dstX and dstY may be negative to copy only top or left of source. Returns
+ false if width() or height() is zero or negative.
+ Returns false if abs(dstX) >= Bitmap width(), or if abs(dstY) >= Bitmap height().
+
+ @param src source SkPixmap: SkImageInfo, pixels, row bytes
+ @param dstX column index whose absolute value is less than width()
+ @param dstY row index whose absolute value is less than height()
+ @return true if src pixels are copied to SkBitmap
+
+ example: https://fiddle.skia.org/c/@Bitmap_writePixels
+ */
+ bool writePixels(const SkPixmap& src, int dstX, int dstY);
+
+ /** Copies a SkRect of pixels from src. Copy starts at (0, 0), and does not exceed
+ (src.width(), src.height()).
+
+ src specifies width, height, SkColorType, SkAlphaType, SkColorSpace, pixel storage,
+ and row bytes of source. src.rowBytes() specifics the gap from one source
+ row to the next. Returns true if pixels are copied. Returns false if:
+ - src pixel storage equals nullptr
+ - src.rowBytes is less than SkImageInfo::minRowBytes()
+ - SkPixelRef is nullptr
+
+ Pixels are copied only if pixel conversion is possible. If SkBitmap colorType() is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; src SkColorType must match.
+ If SkBitmap colorType() is kGray_8_SkColorType, src SkColorSpace must match.
+ If SkBitmap alphaType() is kOpaque_SkAlphaType, src SkAlphaType must
+ match. If SkBitmap colorSpace() is nullptr, src SkColorSpace must match. Returns
+ false if pixel conversion is not possible.
+
+ @param src source SkPixmap: SkImageInfo, pixels, row bytes
+ @return true if src pixels are copied to SkBitmap
+ */
+ bool writePixels(const SkPixmap& src) {
+ return this->writePixels(src, 0, 0);
+ }
+
+ /** Sets dst to alpha described by pixels. Returns false if dst cannot be written to
+ or dst pixels cannot be allocated.
+
+ Uses HeapAllocator to reserve memory for dst SkPixelRef.
+
+ @param dst holds SkPixelRef to fill with alpha layer
+ @return true if alpha layer was constructed in dst SkPixelRef
+ */
+ bool extractAlpha(SkBitmap* dst) const {
+ return this->extractAlpha(dst, nullptr, nullptr, nullptr);
+ }
+
+ /** Sets dst to alpha described by pixels. Returns false if dst cannot be written to
+ or dst pixels cannot be allocated.
+
+ If paint is not nullptr and contains SkMaskFilter, SkMaskFilter
+ generates mask alpha from SkBitmap. Uses HeapAllocator to reserve memory for dst
+ SkPixelRef. Sets offset to top-left position for dst for alignment with SkBitmap;
+ (0, 0) unless SkMaskFilter generates mask.
+
+ @param dst holds SkPixelRef to fill with alpha layer
+ @param paint holds optional SkMaskFilter; may be nullptr
+ @param offset top-left position for dst; may be nullptr
+ @return true if alpha layer was constructed in dst SkPixelRef
+ */
+ bool extractAlpha(SkBitmap* dst, const SkPaint* paint,
+ SkIPoint* offset) const {
+ return this->extractAlpha(dst, paint, nullptr, offset);
+ }
+
+ /** Sets dst to alpha described by pixels. Returns false if dst cannot be written to
+ or dst pixels cannot be allocated.
+
+ If paint is not nullptr and contains SkMaskFilter, SkMaskFilter
+ generates mask alpha from SkBitmap. allocator may reference a custom allocation
+ class or be set to nullptr to use HeapAllocator. Sets offset to top-left
+ position for dst for alignment with SkBitmap; (0, 0) unless SkMaskFilter generates
+ mask.
+
+ @param dst holds SkPixelRef to fill with alpha layer
+ @param paint holds optional SkMaskFilter; may be nullptr
+ @param allocator function to reserve memory for SkPixelRef; may be nullptr
+ @param offset top-left position for dst; may be nullptr
+ @return true if alpha layer was constructed in dst SkPixelRef
+ */
+ bool extractAlpha(SkBitmap* dst, const SkPaint* paint, Allocator* allocator,
+ SkIPoint* offset) const;
+
+ /** Copies SkBitmap pixel address, row bytes, and SkImageInfo to pixmap, if address
+ is available, and returns true. If pixel address is not available, return
+ false and leave pixmap unchanged.
+
+ pixmap contents become invalid on any future change to SkBitmap.
+
+ @param pixmap storage for pixel state if pixels are readable; otherwise, ignored
+ @return true if SkBitmap has direct access to pixels
+
+ example: https://fiddle.skia.org/c/@Bitmap_peekPixels
+ */
+ bool peekPixels(SkPixmap* pixmap) const;
+
+ /**
+ * Make a shader with the specified tiling, matrix and sampling.
+ */
+ sk_sp<SkShader> makeShader(SkTileMode tmx, SkTileMode tmy, const SkSamplingOptions&,
+ const SkMatrix* localMatrix = nullptr) const;
+ sk_sp<SkShader> makeShader(SkTileMode tmx, SkTileMode tmy, const SkSamplingOptions& sampling,
+ const SkMatrix& lm) const;
+ /** Defaults to clamp in both X and Y. */
+ sk_sp<SkShader> makeShader(const SkSamplingOptions& sampling, const SkMatrix& lm) const;
+ sk_sp<SkShader> makeShader(const SkSamplingOptions& sampling,
+ const SkMatrix* lm = nullptr) const;
+
+ /**
+ * Returns a new image from the bitmap. If the bitmap is marked immutable, this will
+ * share the pixel buffer. If not, it will make a copy of the pixels for the image.
+ */
+ sk_sp<SkImage> asImage() const;
+
+ /** Asserts if internal values are illegal or inconsistent. Only available if
+ SK_DEBUG is defined at compile time.
+ */
+ SkDEBUGCODE(void validate() const;)
+
+ /** \class SkBitmap::Allocator
+ Abstract subclass of HeapAllocator.
+ */
+ class Allocator : public SkRefCnt {
+ public:
+
+ /** Allocates the pixel memory for the bitmap, given its dimensions and
+ SkColorType. Returns true on success, where success means either setPixels()
+ or setPixelRef() was called.
+
+ @param bitmap SkBitmap containing SkImageInfo as input, and SkPixelRef as output
+ @return true if SkPixelRef was allocated
+ */
+ virtual bool allocPixelRef(SkBitmap* bitmap) = 0;
+ private:
+ using INHERITED = SkRefCnt;
+ };
+
+ /** \class SkBitmap::HeapAllocator
+ Subclass of SkBitmap::Allocator that returns a SkPixelRef that allocates its pixel
+ memory from the heap. This is the default SkBitmap::Allocator invoked by
+ allocPixels().
+ */
+ class HeapAllocator : public Allocator {
+ public:
+
+ /** Allocates the pixel memory for the bitmap, given its dimensions and
+ SkColorType. Returns true on success, where success means either setPixels()
+ or setPixelRef() was called.
+
+ @param bitmap SkBitmap containing SkImageInfo as input, and SkPixelRef as output
+ @return true if pixels are allocated
+
+ example: https://fiddle.skia.org/c/@Bitmap_HeapAllocator_allocPixelRef
+ */
+ bool allocPixelRef(SkBitmap* bitmap) override;
+ };
+
+private:
+ sk_sp<SkPixelRef> fPixelRef;
+ SkPixmap fPixmap;
+ sk_sp<SkMipmap> fMips;
+
+ friend class SkImage_Raster;
+ friend class SkReadBuffer; // unflatten
+ friend class GrProxyProvider; // fMips
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+inline uint32_t* SkBitmap::getAddr32(int x, int y) const {
+ SkASSERT(fPixmap.addr());
+ return fPixmap.writable_addr32(x, y);
+}
+
+inline uint16_t* SkBitmap::getAddr16(int x, int y) const {
+ SkASSERT(fPixmap.addr());
+ return fPixmap.writable_addr16(x, y);
+}
+
+inline uint8_t* SkBitmap::getAddr8(int x, int y) const {
+ SkASSERT(fPixmap.addr());
+ return fPixmap.writable_addr8(x, y);
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkBlendMode.h b/gfx/skia/skia/include/core/SkBlendMode.h
new file mode 100644
index 0000000000..4abe915762
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkBlendMode.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlendMode_DEFINED
+#define SkBlendMode_DEFINED
+
+#include "include/core/SkTypes.h"
+
+/**
+ * Blends are operators that take in two colors (source, destination) and return a new color.
+ * Many of these operate the same on all 4 components: red, green, blue, alpha. For these,
+ * we just document what happens to one component, rather than naming each one separately.
+ *
+ * Different SkColorTypes have different representations for color components:
+ * 8-bit: 0..255
+ * 6-bit: 0..63
+ * 5-bit: 0..31
+ * 4-bit: 0..15
+ * floats: 0...1
+ *
+ * The documentation is expressed as if the component values are always 0..1 (floats).
+ *
+ * For brevity, the documentation uses the following abbreviations
+ * s : source
+ * d : destination
+ * sa : source alpha
+ * da : destination alpha
+ *
+ * Results are abbreviated
+ * r : if all 4 components are computed in the same manner
+ * ra : result alpha component
+ * rc : result "color": red, green, blue components
+ */
+enum class SkBlendMode {
+ kClear, //!< r = 0
+ kSrc, //!< r = s
+ kDst, //!< r = d
+ kSrcOver, //!< r = s + (1-sa)*d
+ kDstOver, //!< r = d + (1-da)*s
+ kSrcIn, //!< r = s * da
+ kDstIn, //!< r = d * sa
+ kSrcOut, //!< r = s * (1-da)
+ kDstOut, //!< r = d * (1-sa)
+ kSrcATop, //!< r = s*da + d*(1-sa)
+ kDstATop, //!< r = d*sa + s*(1-da)
+ kXor, //!< r = s*(1-da) + d*(1-sa)
+ kPlus, //!< r = min(s + d, 1)
+ kModulate, //!< r = s*d
+ kScreen, //!< r = s + d - s*d
+
+ kOverlay, //!< multiply or screen, depending on destination
+ kDarken, //!< rc = s + d - max(s*da, d*sa), ra = kSrcOver
+ kLighten, //!< rc = s + d - min(s*da, d*sa), ra = kSrcOver
+ kColorDodge, //!< brighten destination to reflect source
+ kColorBurn, //!< darken destination to reflect source
+ kHardLight, //!< multiply or screen, depending on source
+ kSoftLight, //!< lighten or darken, depending on source
+ kDifference, //!< rc = s + d - 2*(min(s*da, d*sa)), ra = kSrcOver
+ kExclusion, //!< rc = s + d - two(s*d), ra = kSrcOver
+ kMultiply, //!< r = s*(1-da) + d*(1-sa) + s*d
+
+ kHue, //!< hue of source with saturation and luminosity of destination
+ kSaturation, //!< saturation of source with hue and luminosity of destination
+ kColor, //!< hue and saturation of source with luminosity of destination
+ kLuminosity, //!< luminosity of source with hue and saturation of destination
+
+ kLastCoeffMode = kScreen, //!< last porter duff blend mode
+ kLastSeparableMode = kMultiply, //!< last blend mode operating separately on components
+ kLastMode = kLuminosity, //!< last valid value
+};
+
+static constexpr int kSkBlendModeCount = static_cast<int>(SkBlendMode::kLastMode) + 1;
+
+/**
+ * For Porter-Duff SkBlendModes (those <= kLastCoeffMode), these coefficients describe the blend
+ * equation used. Coefficient-based blend modes specify an equation:
+ * ('dstCoeff' * dst + 'srcCoeff' * src), where the coefficient values are constants, functions of
+ * the src or dst alpha, or functions of the src or dst color.
+ */
+enum class SkBlendModeCoeff {
+ kZero, /** 0 */
+ kOne, /** 1 */
+ kSC, /** src color */
+ kISC, /** inverse src color (i.e. 1 - sc) */
+ kDC, /** dst color */
+ kIDC, /** inverse dst color (i.e. 1 - dc) */
+ kSA, /** src alpha */
+ kISA, /** inverse src alpha (i.e. 1 - sa) */
+ kDA, /** dst alpha */
+ kIDA, /** inverse dst alpha (i.e. 1 - da) */
+
+ kCoeffCount
+};
+
+/**
+ * Returns true if 'mode' is a coefficient-based blend mode (<= kLastCoeffMode). If true is
+ * returned, the mode's src and dst coefficient functions are set in 'src' and 'dst'.
+ */
+SK_API bool SkBlendMode_AsCoeff(SkBlendMode mode, SkBlendModeCoeff* src, SkBlendModeCoeff* dst);
+
+
+/** Returns name of blendMode as null-terminated C string.
+
+ @return C string
+*/
+SK_API const char* SkBlendMode_Name(SkBlendMode blendMode);
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkBlender.h b/gfx/skia/skia/include/core/SkBlender.h
new file mode 100644
index 0000000000..7acba87f52
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkBlender.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlender_DEFINED
+#define SkBlender_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkFlattenable.h"
+
+/**
+ * SkBlender represents a custom blend function in the Skia pipeline. When an SkBlender is
+ * present in a paint, the SkBlendMode is ignored. A blender combines a source color (the
+ * result of our paint) and destination color (from the canvas) into a final color.
+ */
+class SK_API SkBlender : public SkFlattenable {
+public:
+ /**
+ * Create a blender that implements the specified BlendMode.
+ */
+ static sk_sp<SkBlender> Mode(SkBlendMode mode);
+
+private:
+ SkBlender() = default;
+ friend class SkBlenderBase;
+
+ using INHERITED = SkFlattenable;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkBlurTypes.h b/gfx/skia/skia/include/core/SkBlurTypes.h
new file mode 100644
index 0000000000..f0dde10f25
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkBlurTypes.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlurTypes_DEFINED
+#define SkBlurTypes_DEFINED
+
+enum SkBlurStyle : int {
+ kNormal_SkBlurStyle, //!< fuzzy inside and outside
+ kSolid_SkBlurStyle, //!< solid inside, fuzzy outside
+ kOuter_SkBlurStyle, //!< nothing inside, fuzzy outside
+ kInner_SkBlurStyle, //!< fuzzy inside, nothing outside
+
+ kLastEnum_SkBlurStyle = kInner_SkBlurStyle,
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkCanvas.h b/gfx/skia/skia/include/core/SkCanvas.h
new file mode 100644
index 0000000000..650d9171ce
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkCanvas.h
@@ -0,0 +1,2632 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCanvas_DEFINED
+#define SkCanvas_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkClipOp.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkFontTypes.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkM44.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRasterHandleAllocator.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkString.h"
+#include "include/core/SkSurfaceProps.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkCPUTypes.h"
+#include "include/private/base/SkDeque.h"
+
+#include <cstdint>
+#include <cstring>
+#include <memory>
+#include <optional>
+
+#ifndef SK_SUPPORT_LEGACY_GETTOTALMATRIX
+#define SK_SUPPORT_LEGACY_GETTOTALMATRIX
+#endif
+
+namespace sktext {
+class GlyphRunBuilder;
+class GlyphRunList;
+}
+
+class AutoLayerForImageFilter;
+class GrRecordingContext;
+
+class SkBaseDevice;
+class SkBitmap;
+class SkBlender;
+class SkData;
+class SkDrawable;
+class SkFont;
+class SkImage;
+class SkMesh;
+class SkPaintFilterCanvas;
+class SkPath;
+class SkPicture;
+class SkPixmap;
+class SkRRect;
+class SkRegion;
+class SkShader;
+class SkSpecialImage;
+class SkSurface;
+class SkSurface_Base;
+class SkTextBlob;
+class SkVertices;
+struct SkDrawShadowRec;
+struct SkRSXform;
+
+namespace skgpu::graphite { class Recorder; }
+namespace sktext::gpu { class Slug; }
+namespace SkRecords { class Draw; }
+
+#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) && defined(SK_GANESH)
+class GrBackendRenderTarget;
+#endif
+
+// TODO:
+// This is not ideal but Chrome is depending on a forward decl of GrSlug here.
+// It should be removed once Chrome has migrated to sktext::gpu::Slug.
+using GrSlug = sktext::gpu::Slug;
+
+/** \class SkCanvas
+ SkCanvas provides an interface for drawing, and how the drawing is clipped and transformed.
+ SkCanvas contains a stack of SkMatrix and clip values.
+
+ SkCanvas and SkPaint together provide the state to draw into SkSurface or SkBaseDevice.
+ Each SkCanvas draw call transforms the geometry of the object by the concatenation of all
+ SkMatrix values in the stack. The transformed geometry is clipped by the intersection
+ of all of clip values in the stack. The SkCanvas draw calls use SkPaint to supply drawing
+ state such as color, SkTypeface, text size, stroke width, SkShader and so on.
+
+ To draw to a pixel-based destination, create raster surface or GPU surface.
+ Request SkCanvas from SkSurface to obtain the interface to draw.
+ SkCanvas generated by raster surface draws to memory visible to the CPU.
+ SkCanvas generated by GPU surface uses Vulkan or OpenGL to draw to the GPU.
+
+ To draw to a document, obtain SkCanvas from SVG canvas, document PDF, or SkPictureRecorder.
+ SkDocument based SkCanvas and other SkCanvas subclasses reference SkBaseDevice describing the
+ destination.
+
+ SkCanvas can be constructed to draw to SkBitmap without first creating raster surface.
+ This approach may be deprecated in the future.
+*/
+class SK_API SkCanvas {
+public:
+
+ /** Allocates raster SkCanvas that will draw directly into pixels.
+
+ SkCanvas is returned if all parameters are valid.
+ Valid parameters include:
+ info dimensions are zero or positive;
+ info contains SkColorType and SkAlphaType supported by raster surface;
+ pixels is not nullptr;
+ rowBytes is zero or large enough to contain info width pixels of SkColorType.
+
+ Pass zero for rowBytes to compute rowBytes from info width and size of pixel.
+ If rowBytes is greater than zero, it must be equal to or greater than
+ info width times bytes required for SkColorType.
+
+ Pixel buffer size should be info height times computed rowBytes.
+ Pixels are not initialized.
+ To access pixels after drawing, call flush() or peekPixels().
+
+ @param info width, height, SkColorType, SkAlphaType, SkColorSpace, of raster surface;
+ width, or height, or both, may be zero
+ @param pixels pointer to destination pixels buffer
+ @param rowBytes interval from one SkSurface row to the next, or zero
+ @param props LCD striping orientation and setting for device independent fonts;
+ may be nullptr
+ @return SkCanvas if all parameters are valid; otherwise, nullptr
+ */
+ static std::unique_ptr<SkCanvas> MakeRasterDirect(const SkImageInfo& info, void* pixels,
+ size_t rowBytes,
+ const SkSurfaceProps* props = nullptr);
+
+ /** Allocates raster SkCanvas specified by inline image specification. Subsequent SkCanvas
+ calls draw into pixels.
+ SkColorType is set to kN32_SkColorType.
+ SkAlphaType is set to kPremul_SkAlphaType.
+ To access pixels after drawing, call flush() or peekPixels().
+
+ SkCanvas is returned if all parameters are valid.
+ Valid parameters include:
+ width and height are zero or positive;
+ pixels is not nullptr;
+ rowBytes is zero or large enough to contain width pixels of kN32_SkColorType.
+
+ Pass zero for rowBytes to compute rowBytes from width and size of pixel.
+ If rowBytes is greater than zero, it must be equal to or greater than
+ width times bytes required for SkColorType.
+
+ Pixel buffer size should be height times rowBytes.
+
+ @param width pixel column count on raster surface created; must be zero or greater
+ @param height pixel row count on raster surface created; must be zero or greater
+ @param pixels pointer to destination pixels buffer; buffer size should be height
+ times rowBytes
+ @param rowBytes interval from one SkSurface row to the next, or zero
+ @return SkCanvas if all parameters are valid; otherwise, nullptr
+ */
+ static std::unique_ptr<SkCanvas> MakeRasterDirectN32(int width, int height, SkPMColor* pixels,
+ size_t rowBytes) {
+ return MakeRasterDirect(SkImageInfo::MakeN32Premul(width, height), pixels, rowBytes);
+ }
+
+ /** Creates an empty SkCanvas with no backing device or pixels, with
+ a width and height of zero.
+
+ @return empty SkCanvas
+
+ example: https://fiddle.skia.org/c/@Canvas_empty_constructor
+ */
+ SkCanvas();
+
+ /** Creates SkCanvas of the specified dimensions without a SkSurface.
+ Used by subclasses with custom implementations for draw member functions.
+
+ If props equals nullptr, SkSurfaceProps are created with
+ SkSurfaceProps::InitType settings, which choose the pixel striping
+ direction and order. Since a platform may dynamically change its direction when
+ the device is rotated, and since a platform may have multiple monitors with
+ different characteristics, it is best not to rely on this legacy behavior.
+
+ @param width zero or greater
+ @param height zero or greater
+ @param props LCD striping orientation and setting for device independent fonts;
+ may be nullptr
+ @return SkCanvas placeholder with dimensions
+
+ example: https://fiddle.skia.org/c/@Canvas_int_int_const_SkSurfaceProps_star
+ */
+ SkCanvas(int width, int height, const SkSurfaceProps* props = nullptr);
+
+ /** Private. For internal use only.
+ */
+ explicit SkCanvas(sk_sp<SkBaseDevice> device);
+
+ /** Constructs a canvas that draws into bitmap.
+ Sets kUnknown_SkPixelGeometry in constructed SkSurface.
+
+ SkBitmap is copied so that subsequently editing bitmap will not affect
+ constructed SkCanvas.
+
+ May be deprecated in the future.
+
+ @param bitmap width, height, SkColorType, SkAlphaType, and pixel
+ storage of raster surface
+ @return SkCanvas that can be used to draw into bitmap
+
+ example: https://fiddle.skia.org/c/@Canvas_copy_const_SkBitmap
+ */
+ explicit SkCanvas(const SkBitmap& bitmap);
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ /** Private.
+ */
+ enum class ColorBehavior {
+ kLegacy, //!< placeholder
+ };
+
+ /** Private. For use by Android framework only.
+
+ @param bitmap specifies a bitmap for the canvas to draw into
+ @param behavior specializes this constructor; value is unused
+ @return SkCanvas that can be used to draw into bitmap
+ */
+ SkCanvas(const SkBitmap& bitmap, ColorBehavior behavior);
+#endif
+
+ /** Constructs a canvas that draws into bitmap.
+ Use props to match the device characteristics, like LCD striping.
+
+ bitmap is copied so that subsequently editing bitmap will not affect
+ constructed SkCanvas.
+
+ @param bitmap width, height, SkColorType, SkAlphaType,
+ and pixel storage of raster surface
+ @param props order and orientation of RGB striping; and whether to use
+ device independent fonts
+ @return SkCanvas that can be used to draw into bitmap
+
+ example: https://fiddle.skia.org/c/@Canvas_const_SkBitmap_const_SkSurfaceProps
+ */
+ SkCanvas(const SkBitmap& bitmap, const SkSurfaceProps& props);
+
+ /** Draws saved layers, if any.
+ Frees up resources used by SkCanvas.
+
+ example: https://fiddle.skia.org/c/@Canvas_destructor
+ */
+ virtual ~SkCanvas();
+
+ /** Returns SkImageInfo for SkCanvas. If SkCanvas is not associated with raster surface or
+ GPU surface, returned SkColorType is set to kUnknown_SkColorType.
+
+ @return dimensions and SkColorType of SkCanvas
+
+ example: https://fiddle.skia.org/c/@Canvas_imageInfo
+ */
+ SkImageInfo imageInfo() const;
+
+ /** Copies SkSurfaceProps, if SkCanvas is associated with raster surface or
+ GPU surface, and returns true. Otherwise, returns false and leave props unchanged.
+
+ @param props storage for writable SkSurfaceProps
+ @return true if SkSurfaceProps was copied
+
+ DEPRECATED: Replace usage with getBaseProps() or getTopProps()
+
+ example: https://fiddle.skia.org/c/@Canvas_getProps
+ */
+ bool getProps(SkSurfaceProps* props) const;
+
+ /** Returns the SkSurfaceProps associated with the canvas (i.e., at the base of the layer
+ stack).
+
+ @return base SkSurfaceProps
+ */
+ SkSurfaceProps getBaseProps() const;
+
+ /** Returns the SkSurfaceProps associated with the canvas that are currently active (i.e., at
+ the top of the layer stack). This can differ from getBaseProps depending on the flags
+ passed to saveLayer (see SaveLayerFlagsSet).
+
+ @return SkSurfaceProps active in the current/top layer
+ */
+ SkSurfaceProps getTopProps() const;
+
+ /** Triggers the immediate execution of all pending draw operations.
+ If SkCanvas is associated with GPU surface, resolves all pending GPU operations.
+ If SkCanvas is associated with raster surface, has no effect; raster draw
+ operations are never deferred.
+
+ DEPRECATED: Replace usage with GrDirectContext::flush()
+ */
+ void flush();
+
+ /** Gets the size of the base or root layer in global canvas coordinates. The
+ origin of the base layer is always (0,0). The area available for drawing may be
+ smaller (due to clipping or saveLayer).
+
+ @return integral width and height of base layer
+
+ example: https://fiddle.skia.org/c/@Canvas_getBaseLayerSize
+ */
+ virtual SkISize getBaseLayerSize() const;
+
+ /** Creates SkSurface matching info and props, and associates it with SkCanvas.
+ Returns nullptr if no match found.
+
+ If props is nullptr, matches SkSurfaceProps in SkCanvas. If props is nullptr and SkCanvas
+ does not have SkSurfaceProps, creates SkSurface with default SkSurfaceProps.
+
+ @param info width, height, SkColorType, SkAlphaType, and SkColorSpace
+ @param props SkSurfaceProps to match; may be nullptr to match SkCanvas
+ @return SkSurface matching info and props, or nullptr if no match is available
+
+ example: https://fiddle.skia.org/c/@Canvas_makeSurface
+ */
+ sk_sp<SkSurface> makeSurface(const SkImageInfo& info, const SkSurfaceProps* props = nullptr);
+
+ /** Returns GPU context of the GPU surface associated with SkCanvas.
+
+ @return GPU context, if available; nullptr otherwise
+
+ example: https://fiddle.skia.org/c/@Canvas_recordingContext
+ */
+ virtual GrRecordingContext* recordingContext();
+
+ /** Returns Recorder for the GPU surface associated with SkCanvas.
+
+ @return Recorder, if available; nullptr otherwise
+ */
+ virtual skgpu::graphite::Recorder* recorder();
+
+ /** Sometimes a canvas is owned by a surface. If it is, getSurface() will return a bare
+ * pointer to that surface, else this will return nullptr.
+ */
+ SkSurface* getSurface() const;
+
+ /** Returns the pixel base address, SkImageInfo, rowBytes, and origin if the pixels
+ can be read directly. The returned address is only valid
+ while SkCanvas is in scope and unchanged. Any SkCanvas call or SkSurface call
+ may invalidate the returned address and other returned values.
+
+ If pixels are inaccessible, info, rowBytes, and origin are unchanged.
+
+ @param info storage for writable pixels' SkImageInfo; may be nullptr
+ @param rowBytes storage for writable pixels' row bytes; may be nullptr
+ @param origin storage for SkCanvas top layer origin, its top-left corner;
+ may be nullptr
+ @return address of pixels, or nullptr if inaccessible
+
+ example: https://fiddle.skia.org/c/@Canvas_accessTopLayerPixels_a
+ example: https://fiddle.skia.org/c/@Canvas_accessTopLayerPixels_b
+ */
+ void* accessTopLayerPixels(SkImageInfo* info, size_t* rowBytes, SkIPoint* origin = nullptr);
+
+ /** Returns custom context that tracks the SkMatrix and clip.
+
+ Use SkRasterHandleAllocator to blend Skia drawing with custom drawing, typically performed
+ by the host platform user interface. The custom context returned is generated by
+ SkRasterHandleAllocator::MakeCanvas, which creates a custom canvas with raster storage for
+ the drawing destination.
+
+ @return context of custom allocation
+
+ example: https://fiddle.skia.org/c/@Canvas_accessTopRasterHandle
+ */
+ SkRasterHandleAllocator::Handle accessTopRasterHandle() const;
+
+ /** Returns true if SkCanvas has direct access to its pixels.
+
+ Pixels are readable when SkBaseDevice is raster. Pixels are not readable when SkCanvas
+ is returned from GPU surface, returned by SkDocument::beginPage, returned by
+ SkPictureRecorder::beginRecording, or SkCanvas is the base of a utility class
+ like DebugCanvas.
+
+ pixmap is valid only while SkCanvas is in scope and unchanged. Any
+ SkCanvas or SkSurface call may invalidate the pixmap values.
+
+ @param pixmap storage for pixel state if pixels are readable; otherwise, ignored
+ @return true if SkCanvas has direct access to pixels
+
+ example: https://fiddle.skia.org/c/@Canvas_peekPixels
+ */
+ bool peekPixels(SkPixmap* pixmap);
+
+ /** Copies SkRect of pixels from SkCanvas into dstPixels. SkMatrix and clip are
+ ignored.
+
+ Source SkRect corners are (srcX, srcY) and (imageInfo().width(), imageInfo().height()).
+ Destination SkRect corners are (0, 0) and (dstInfo.width(), dstInfo.height()).
+ Copies each readable pixel intersecting both rectangles, without scaling,
+ converting to dstInfo.colorType() and dstInfo.alphaType() if required.
+
+ Pixels are readable when SkBaseDevice is raster, or backed by a GPU.
+ Pixels are not readable when SkCanvas is returned by SkDocument::beginPage,
+ returned by SkPictureRecorder::beginRecording, or SkCanvas is the base of a utility
+ class like DebugCanvas.
+
+ The destination pixel storage must be allocated by the caller.
+
+ Pixel values are converted only if SkColorType and SkAlphaType
+ do not match. Only pixels within both source and destination rectangles
+ are copied. dstPixels contents outside SkRect intersection are unchanged.
+
+ Pass negative values for srcX or srcY to offset pixels across or down destination.
+
+ Does not copy, and returns false if:
+ - Source and destination rectangles do not intersect.
+ - SkCanvas pixels could not be converted to dstInfo.colorType() or dstInfo.alphaType().
+ - SkCanvas pixels are not readable; for instance, SkCanvas is document-based.
+ - dstRowBytes is too small to contain one row of pixels.
+
+ @param dstInfo width, height, SkColorType, and SkAlphaType of dstPixels
+ @param dstPixels storage for pixels; dstInfo.height() times dstRowBytes, or larger
+ @param dstRowBytes size of one destination row; dstInfo.width() times pixel size, or larger
+ @param srcX offset into readable pixels on x-axis; may be negative
+ @param srcY offset into readable pixels on y-axis; may be negative
+ @return true if pixels were copied
+ */
+ bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY);
+
+ /** Copies SkRect of pixels from SkCanvas into pixmap. SkMatrix and clip are
+ ignored.
+
+ Source SkRect corners are (srcX, srcY) and (imageInfo().width(), imageInfo().height()).
+ Destination SkRect corners are (0, 0) and (pixmap.width(), pixmap.height()).
+ Copies each readable pixel intersecting both rectangles, without scaling,
+ converting to pixmap.colorType() and pixmap.alphaType() if required.
+
+ Pixels are readable when SkBaseDevice is raster, or backed by a GPU.
+ Pixels are not readable when SkCanvas is returned by SkDocument::beginPage,
+ returned by SkPictureRecorder::beginRecording, or SkCanvas is the base of a utility
+ class like DebugCanvas.
+
+ Caller must allocate pixel storage in pixmap if needed.
+
+ Pixel values are converted only if SkColorType and SkAlphaType
+ do not match. Only pixels within both source and destination SkRect
+ are copied. pixmap pixels contents outside SkRect intersection are unchanged.
+
+ Pass negative values for srcX or srcY to offset pixels across or down pixmap.
+
+ Does not copy, and returns false if:
+ - Source and destination rectangles do not intersect.
+ - SkCanvas pixels could not be converted to pixmap.colorType() or pixmap.alphaType().
+ - SkCanvas pixels are not readable; for instance, SkCanvas is document-based.
+ - SkPixmap pixels could not be allocated.
+ - pixmap.rowBytes() is too small to contain one row of pixels.
+
+ @param pixmap storage for pixels copied from SkCanvas
+ @param srcX offset into readable pixels on x-axis; may be negative
+ @param srcY offset into readable pixels on y-axis; may be negative
+ @return true if pixels were copied
+
+ example: https://fiddle.skia.org/c/@Canvas_readPixels_2
+ */
+ bool readPixels(const SkPixmap& pixmap, int srcX, int srcY);
+
+ /** Copies SkRect of pixels from SkCanvas into bitmap. SkMatrix and clip are
+ ignored.
+
+ Source SkRect corners are (srcX, srcY) and (imageInfo().width(), imageInfo().height()).
+ Destination SkRect corners are (0, 0) and (bitmap.width(), bitmap.height()).
+ Copies each readable pixel intersecting both rectangles, without scaling,
+ converting to bitmap.colorType() and bitmap.alphaType() if required.
+
+ Pixels are readable when SkBaseDevice is raster, or backed by a GPU.
+ Pixels are not readable when SkCanvas is returned by SkDocument::beginPage,
+ returned by SkPictureRecorder::beginRecording, or SkCanvas is the base of a utility
+ class like DebugCanvas.
+
+ Caller must allocate pixel storage in bitmap if needed.
+
+ SkBitmap values are converted only if SkColorType and SkAlphaType
+ do not match. Only pixels within both source and destination rectangles
+ are copied. SkBitmap pixels outside SkRect intersection are unchanged.
+
+ Pass negative values for srcX or srcY to offset pixels across or down bitmap.
+
+ Does not copy, and returns false if:
+ - Source and destination rectangles do not intersect.
+ - SkCanvas pixels could not be converted to bitmap.colorType() or bitmap.alphaType().
+ - SkCanvas pixels are not readable; for instance, SkCanvas is document-based.
+ - bitmap pixels could not be allocated.
+ - bitmap.rowBytes() is too small to contain one row of pixels.
+
+ @param bitmap storage for pixels copied from SkCanvas
+ @param srcX offset into readable pixels on x-axis; may be negative
+ @param srcY offset into readable pixels on y-axis; may be negative
+ @return true if pixels were copied
+
+ example: https://fiddle.skia.org/c/@Canvas_readPixels_3
+ */
+ bool readPixels(const SkBitmap& bitmap, int srcX, int srcY);
+
+ /** Copies SkRect from pixels to SkCanvas. SkMatrix and clip are ignored.
+ Source SkRect corners are (0, 0) and (info.width(), info.height()).
+ Destination SkRect corners are (x, y) and
+ (imageInfo().width(), imageInfo().height()).
+
+ Copies each readable pixel intersecting both rectangles, without scaling,
+ converting to imageInfo().colorType() and imageInfo().alphaType() if required.
+
+ Pixels are writable when SkBaseDevice is raster, or backed by a GPU.
+ Pixels are not writable when SkCanvas is returned by SkDocument::beginPage,
+ returned by SkPictureRecorder::beginRecording, or SkCanvas is the base of a utility
+ class like DebugCanvas.
+
+ Pixel values are converted only if SkColorType and SkAlphaType
+ do not match. Only pixels within both source and destination rectangles
+ are copied. SkCanvas pixels outside SkRect intersection are unchanged.
+
+ Pass negative values for x or y to offset pixels to the left or
+ above SkCanvas pixels.
+
+ Does not copy, and returns false if:
+ - Source and destination rectangles do not intersect.
+ - pixels could not be converted to SkCanvas imageInfo().colorType() or
+ imageInfo().alphaType().
+ - SkCanvas pixels are not writable; for instance, SkCanvas is document-based.
+ - rowBytes is too small to contain one row of pixels.
+
+ @param info width, height, SkColorType, and SkAlphaType of pixels
+ @param pixels pixels to copy, of size info.height() times rowBytes, or larger
+ @param rowBytes size of one row of pixels; info.width() times pixel size, or larger
+ @param x offset into SkCanvas writable pixels on x-axis; may be negative
+ @param y offset into SkCanvas writable pixels on y-axis; may be negative
+ @return true if pixels were written to SkCanvas
+
+ example: https://fiddle.skia.org/c/@Canvas_writePixels
+ */
+ bool writePixels(const SkImageInfo& info, const void* pixels, size_t rowBytes, int x, int y);
+
+ /** Copies SkRect from pixels to SkCanvas. SkMatrix and clip are ignored.
+ Source SkRect corners are (0, 0) and (bitmap.width(), bitmap.height()).
+
+ Destination SkRect corners are (x, y) and
+ (imageInfo().width(), imageInfo().height()).
+
+ Copies each readable pixel intersecting both rectangles, without scaling,
+ converting to imageInfo().colorType() and imageInfo().alphaType() if required.
+
+ Pixels are writable when SkBaseDevice is raster, or backed by a GPU.
+ Pixels are not writable when SkCanvas is returned by SkDocument::beginPage,
+ returned by SkPictureRecorder::beginRecording, or SkCanvas is the base of a utility
+ class like DebugCanvas.
+
+ Pixel values are converted only if SkColorType and SkAlphaType
+ do not match. Only pixels within both source and destination rectangles
+ are copied. SkCanvas pixels outside SkRect intersection are unchanged.
+
+ Pass negative values for x or y to offset pixels to the left or
+ above SkCanvas pixels.
+
+ Does not copy, and returns false if:
+ - Source and destination rectangles do not intersect.
+ - bitmap does not have allocated pixels.
+ - bitmap pixels could not be converted to SkCanvas imageInfo().colorType() or
+ imageInfo().alphaType().
+ - SkCanvas pixels are not writable; for instance, SkCanvas is document based.
+ - bitmap pixels are inaccessible; for instance, bitmap wraps a texture.
+
+ @param bitmap contains pixels copied to SkCanvas
+ @param x offset into SkCanvas writable pixels on x-axis; may be negative
+ @param y offset into SkCanvas writable pixels on y-axis; may be negative
+ @return true if pixels were written to SkCanvas
+
+ example: https://fiddle.skia.org/c/@Canvas_writePixels_2
+ example: https://fiddle.skia.org/c/@State_Stack_a
+ example: https://fiddle.skia.org/c/@State_Stack_b
+ */
+ bool writePixels(const SkBitmap& bitmap, int x, int y);
+
+ /** Saves SkMatrix and clip.
+ Calling restore() discards changes to SkMatrix and clip,
+ restoring the SkMatrix and clip to their state when save() was called.
+
+ SkMatrix may be changed by translate(), scale(), rotate(), skew(), concat(), setMatrix(),
+ and resetMatrix(). Clip may be changed by clipRect(), clipRRect(), clipPath(), clipRegion().
+
+ Saved SkCanvas state is put on a stack; multiple calls to save() should be balance
+ by an equal number of calls to restore().
+
+ Call restoreToCount() with result to restore this and subsequent saves.
+
+ @return depth of saved stack
+
+ example: https://fiddle.skia.org/c/@Canvas_save
+ */
+ int save();
+
+ /** Saves SkMatrix and clip, and allocates a SkSurface for subsequent drawing.
+ Calling restore() discards changes to SkMatrix and clip, and draws the SkSurface.
+
+ SkMatrix may be changed by translate(), scale(), rotate(), skew(), concat(),
+ setMatrix(), and resetMatrix(). Clip may be changed by clipRect(), clipRRect(),
+ clipPath(), clipRegion().
+
+ SkRect bounds suggests but does not define the SkSurface size. To clip drawing to
+ a specific rectangle, use clipRect().
+
+ Optional SkPaint paint applies alpha, SkColorFilter, SkImageFilter, and
+ SkBlendMode when restore() is called.
+
+ Call restoreToCount() with returned value to restore this and subsequent saves.
+
+ @param bounds hint to limit the size of the layer; may be nullptr
+ @param paint graphics state for layer; may be nullptr
+ @return depth of saved stack
+
+ example: https://fiddle.skia.org/c/@Canvas_saveLayer
+ example: https://fiddle.skia.org/c/@Canvas_saveLayer_4
+ */
+ int saveLayer(const SkRect* bounds, const SkPaint* paint);
+
+ /** Saves SkMatrix and clip, and allocates a SkSurface for subsequent drawing.
+ Calling restore() discards changes to SkMatrix and clip, and draws the SkSurface.
+
+ SkMatrix may be changed by translate(), scale(), rotate(), skew(), concat(),
+ setMatrix(), and resetMatrix(). Clip may be changed by clipRect(), clipRRect(),
+ clipPath(), clipRegion().
+
+ SkRect bounds suggests but does not define the layer size. To clip drawing to
+ a specific rectangle, use clipRect().
+
+ Optional SkPaint paint applies alpha, SkColorFilter, SkImageFilter, and
+ SkBlendMode when restore() is called.
+
+ Call restoreToCount() with returned value to restore this and subsequent saves.
+
+ @param bounds hint to limit the size of layer; may be nullptr
+ @param paint graphics state for layer; may be nullptr
+ @return depth of saved stack
+ */
+ int saveLayer(const SkRect& bounds, const SkPaint* paint) {
+ return this->saveLayer(&bounds, paint);
+ }
+
+ /** Saves SkMatrix and clip, and allocates SkSurface for subsequent drawing.
+
+ Calling restore() discards changes to SkMatrix and clip,
+ and blends layer with alpha opacity onto prior layer.
+
+ SkMatrix may be changed by translate(), scale(), rotate(), skew(), concat(),
+ setMatrix(), and resetMatrix(). Clip may be changed by clipRect(), clipRRect(),
+ clipPath(), clipRegion().
+
+ SkRect bounds suggests but does not define layer size. To clip drawing to
+ a specific rectangle, use clipRect().
+
+ alpha of zero is fully transparent, 1.0f is fully opaque.
+
+ Call restoreToCount() with returned value to restore this and subsequent saves.
+
+ @param bounds hint to limit the size of layer; may be nullptr
+ @param alpha opacity of layer
+ @return depth of saved stack
+
+ example: https://fiddle.skia.org/c/@Canvas_saveLayerAlpha
+ */
+ int saveLayerAlphaf(const SkRect* bounds, float alpha);
+ // Helper that accepts an int between 0 and 255, and divides it by 255.0
+ int saveLayerAlpha(const SkRect* bounds, U8CPU alpha) {
+ return this->saveLayerAlphaf(bounds, alpha * (1.0f / 255));
+ }
+
+ /** \enum SkCanvas::SaveLayerFlagsSet
+ SaveLayerFlags provides options that may be used in any combination in SaveLayerRec,
+ defining how layer allocated by saveLayer() operates. It may be set to zero,
+ kPreserveLCDText_SaveLayerFlag, kInitWithPrevious_SaveLayerFlag, or both flags.
+ */
+ enum SaveLayerFlagsSet {
+ kPreserveLCDText_SaveLayerFlag = 1 << 1,
+ kInitWithPrevious_SaveLayerFlag = 1 << 2, //!< initializes with previous contents
+ // instead of matching previous layer's colortype, use F16
+ kF16ColorType = 1 << 4,
+ };
+
+ typedef uint32_t SaveLayerFlags;
+
+ /** \struct SkCanvas::SaveLayerRec
+ SaveLayerRec contains the state used to create the layer.
+ */
+ struct SaveLayerRec {
+ /** Sets fBounds, fPaint, and fBackdrop to nullptr. Clears fSaveLayerFlags.
+
+ @return empty SaveLayerRec
+ */
+ SaveLayerRec() {}
+
+ /** Sets fBounds, fPaint, and fSaveLayerFlags; sets fBackdrop to nullptr.
+
+ @param bounds layer dimensions; may be nullptr
+ @param paint applied to layer when overlaying prior layer; may be nullptr
+ @param saveLayerFlags SaveLayerRec options to modify layer
+ @return SaveLayerRec with empty fBackdrop
+ */
+ SaveLayerRec(const SkRect* bounds, const SkPaint* paint, SaveLayerFlags saveLayerFlags = 0)
+ : SaveLayerRec(bounds, paint, nullptr, 1.f, saveLayerFlags) {}
+
+ /** Sets fBounds, fPaint, fBackdrop, and fSaveLayerFlags.
+
+ @param bounds layer dimensions; may be nullptr
+ @param paint applied to layer when overlaying prior layer;
+ may be nullptr
+ @param backdrop If not null, this causes the current layer to be filtered by
+ backdrop, and then drawn into the new layer
+ (respecting the current clip).
+ If null, the new layer is initialized with transparent-black.
+ @param saveLayerFlags SaveLayerRec options to modify layer
+ @return SaveLayerRec fully specified
+ */
+ SaveLayerRec(const SkRect* bounds, const SkPaint* paint, const SkImageFilter* backdrop,
+ SaveLayerFlags saveLayerFlags)
+ : SaveLayerRec(bounds, paint, backdrop, 1.f, saveLayerFlags) {}
+
+ /** hints at layer size limit */
+ const SkRect* fBounds = nullptr;
+
+ /** modifies overlay */
+ const SkPaint* fPaint = nullptr;
+
+ /**
+ * If not null, this triggers the same initialization behavior as setting
+ * kInitWithPrevious_SaveLayerFlag on fSaveLayerFlags: the current layer is copied into
+ * the new layer, rather than initializing the new layer with transparent-black.
+ * This is then filtered by fBackdrop (respecting the current clip).
+ */
+ const SkImageFilter* fBackdrop = nullptr;
+
+ /** preserves LCD text, creates with prior layer contents */
+ SaveLayerFlags fSaveLayerFlags = 0;
+
+ private:
+ friend class SkCanvas;
+ friend class SkCanvasPriv;
+
+ SaveLayerRec(const SkRect* bounds, const SkPaint* paint, const SkImageFilter* backdrop,
+ SkScalar backdropScale, SaveLayerFlags saveLayerFlags)
+ : fBounds(bounds)
+ , fPaint(paint)
+ , fBackdrop(backdrop)
+ , fSaveLayerFlags(saveLayerFlags)
+ , fExperimentalBackdropScale(backdropScale) {}
+
+ // Relative scale factor that the image content used to initialize the layer when the
+ // kInitFromPrevious flag or a backdrop filter is used.
+ SkScalar fExperimentalBackdropScale = 1.f;
+ };
+
+ /** Saves SkMatrix and clip, and allocates SkSurface for subsequent drawing.
+
+ Calling restore() discards changes to SkMatrix and clip,
+ and blends SkSurface with alpha opacity onto the prior layer.
+
+ SkMatrix may be changed by translate(), scale(), rotate(), skew(), concat(),
+ setMatrix(), and resetMatrix(). Clip may be changed by clipRect(), clipRRect(),
+ clipPath(), clipRegion().
+
+ SaveLayerRec contains the state used to create the layer.
+
+ Call restoreToCount() with returned value to restore this and subsequent saves.
+
+ @param layerRec layer state
+ @return depth of save state stack before this call was made.
+
+ example: https://fiddle.skia.org/c/@Canvas_saveLayer_3
+ */
+ int saveLayer(const SaveLayerRec& layerRec);
+
+ /** Removes changes to SkMatrix and clip since SkCanvas state was
+ last saved. The state is removed from the stack.
+
+ Does nothing if the stack is empty.
+
+ example: https://fiddle.skia.org/c/@AutoCanvasRestore_restore
+
+ example: https://fiddle.skia.org/c/@Canvas_restore
+ */
+ void restore();
+
+ /** Returns the number of saved states, each containing: SkMatrix and clip.
+ Equals the number of save() calls less the number of restore() calls plus one.
+ The save count of a new canvas is one.
+
+ @return depth of save state stack
+
+ example: https://fiddle.skia.org/c/@Canvas_getSaveCount
+ */
+ int getSaveCount() const;
+
+ /** Restores state to SkMatrix and clip values when save(), saveLayer(),
+ saveLayerPreserveLCDTextRequests(), or saveLayerAlpha() returned saveCount.
+
+ Does nothing if saveCount is greater than state stack count.
+ Restores state to initial values if saveCount is less than or equal to one.
+
+ @param saveCount depth of state stack to restore
+
+ example: https://fiddle.skia.org/c/@Canvas_restoreToCount
+ */
+ void restoreToCount(int saveCount);
+
+ /** Translates SkMatrix by dx along the x-axis and dy along the y-axis.
+
+ Mathematically, replaces SkMatrix with a translation matrix
+ premultiplied with SkMatrix.
+
+ This has the effect of moving the drawing by (dx, dy) before transforming
+ the result with SkMatrix.
+
+ @param dx distance to translate on x-axis
+ @param dy distance to translate on y-axis
+
+ example: https://fiddle.skia.org/c/@Canvas_translate
+ */
+ void translate(SkScalar dx, SkScalar dy);
+
+ /** Scales SkMatrix by sx on the x-axis and sy on the y-axis.
+
+ Mathematically, replaces SkMatrix with a scale matrix
+ premultiplied with SkMatrix.
+
+ This has the effect of scaling the drawing by (sx, sy) before transforming
+ the result with SkMatrix.
+
+ @param sx amount to scale on x-axis
+ @param sy amount to scale on y-axis
+
+ example: https://fiddle.skia.org/c/@Canvas_scale
+ */
+ void scale(SkScalar sx, SkScalar sy);
+
+ /** Rotates SkMatrix by degrees. Positive degrees rotates clockwise.
+
+ Mathematically, replaces SkMatrix with a rotation matrix
+ premultiplied with SkMatrix.
+
+ This has the effect of rotating the drawing by degrees before transforming
+ the result with SkMatrix.
+
+ @param degrees amount to rotate, in degrees
+
+ example: https://fiddle.skia.org/c/@Canvas_rotate
+ */
+ void rotate(SkScalar degrees);
+
+ /** Rotates SkMatrix by degrees about a point at (px, py). Positive degrees rotates
+ clockwise.
+
+ Mathematically, constructs a rotation matrix; premultiplies the rotation matrix by
+ a translation matrix; then replaces SkMatrix with the resulting matrix
+ premultiplied with SkMatrix.
+
+ This has the effect of rotating the drawing about a given point before
+ transforming the result with SkMatrix.
+
+ @param degrees amount to rotate, in degrees
+ @param px x-axis value of the point to rotate about
+ @param py y-axis value of the point to rotate about
+
+ example: https://fiddle.skia.org/c/@Canvas_rotate_2
+ */
+ void rotate(SkScalar degrees, SkScalar px, SkScalar py);
+
+ /** Skews SkMatrix by sx on the x-axis and sy on the y-axis. A positive value of sx
+ skews the drawing right as y-axis values increase; a positive value of sy skews
+ the drawing down as x-axis values increase.
+
+ Mathematically, replaces SkMatrix with a skew matrix premultiplied with SkMatrix.
+
+ This has the effect of skewing the drawing by (sx, sy) before transforming
+ the result with SkMatrix.
+
+ @param sx amount to skew on x-axis
+ @param sy amount to skew on y-axis
+
+ example: https://fiddle.skia.org/c/@Canvas_skew
+ */
+ void skew(SkScalar sx, SkScalar sy);
+
+ /** Replaces SkMatrix with matrix premultiplied with existing SkMatrix.
+
+ This has the effect of transforming the drawn geometry by matrix, before
+ transforming the result with existing SkMatrix.
+
+ @param matrix matrix to premultiply with existing SkMatrix
+
+ example: https://fiddle.skia.org/c/@Canvas_concat
+ */
+ void concat(const SkMatrix& matrix);
+ void concat(const SkM44&);
+
+ /** Replaces SkMatrix with matrix.
+ Unlike concat(), any prior matrix state is overwritten.
+
+ @param matrix matrix to copy, replacing existing SkMatrix
+
+ example: https://fiddle.skia.org/c/@Canvas_setMatrix
+ */
+ void setMatrix(const SkM44& matrix);
+
+ // DEPRECATED -- use SkM44 version
+ void setMatrix(const SkMatrix& matrix);
+
+ /** Sets SkMatrix to the identity matrix.
+ Any prior matrix state is overwritten.
+
+ example: https://fiddle.skia.org/c/@Canvas_resetMatrix
+ */
+ void resetMatrix();
+
+ /** Replaces clip with the intersection or difference of clip and rect,
+ with an aliased or anti-aliased clip edge. rect is transformed by SkMatrix
+ before it is combined with clip.
+
+ @param rect SkRect to combine with clip
+ @param op SkClipOp to apply to clip
+ @param doAntiAlias true if clip is to be anti-aliased
+
+ example: https://fiddle.skia.org/c/@Canvas_clipRect
+ */
+ void clipRect(const SkRect& rect, SkClipOp op, bool doAntiAlias);
+
+ /** Replaces clip with the intersection or difference of clip and rect.
+ Resulting clip is aliased; pixels are fully contained by the clip.
+ rect is transformed by SkMatrix before it is combined with clip.
+
+ @param rect SkRect to combine with clip
+ @param op SkClipOp to apply to clip
+ */
+ void clipRect(const SkRect& rect, SkClipOp op) {
+ this->clipRect(rect, op, false);
+ }
+
+ /** Replaces clip with the intersection of clip and rect.
+ Resulting clip is aliased; pixels are fully contained by the clip.
+ rect is transformed by SkMatrix
+ before it is combined with clip.
+
+ @param rect SkRect to combine with clip
+ @param doAntiAlias true if clip is to be anti-aliased
+ */
+ void clipRect(const SkRect& rect, bool doAntiAlias = false) {
+ this->clipRect(rect, SkClipOp::kIntersect, doAntiAlias);
+ }
+
+ void clipIRect(const SkIRect& irect, SkClipOp op = SkClipOp::kIntersect) {
+ this->clipRect(SkRect::Make(irect), op, false);
+ }
+
+ /** Sets the maximum clip rectangle, which can be set by clipRect(), clipRRect() and
+ clipPath() and intersect the current clip with the specified rect.
+ The maximum clip affects only future clipping operations; it is not retroactive.
+ The clip restriction is not recorded in pictures.
+
+ Pass an empty rect to disable maximum clip.
+ This private API is for use by Android framework only.
+
+ DEPRECATED: Replace usage with SkAndroidFrameworkUtils::replaceClip()
+
+ @param rect maximum allowed clip in device coordinates
+ */
+ void androidFramework_setDeviceClipRestriction(const SkIRect& rect);
+
+ /** Replaces clip with the intersection or difference of clip and rrect,
+ with an aliased or anti-aliased clip edge.
+ rrect is transformed by SkMatrix
+ before it is combined with clip.
+
+ @param rrect SkRRect to combine with clip
+ @param op SkClipOp to apply to clip
+ @param doAntiAlias true if clip is to be anti-aliased
+
+ example: https://fiddle.skia.org/c/@Canvas_clipRRect
+ */
+ void clipRRect(const SkRRect& rrect, SkClipOp op, bool doAntiAlias);
+
+ /** Replaces clip with the intersection or difference of clip and rrect.
+ Resulting clip is aliased; pixels are fully contained by the clip.
+ rrect is transformed by SkMatrix before it is combined with clip.
+
+ @param rrect SkRRect to combine with clip
+ @param op SkClipOp to apply to clip
+ */
+ void clipRRect(const SkRRect& rrect, SkClipOp op) {
+ this->clipRRect(rrect, op, false);
+ }
+
+ /** Replaces clip with the intersection of clip and rrect,
+ with an aliased or anti-aliased clip edge.
+ rrect is transformed by SkMatrix before it is combined with clip.
+
+ @param rrect SkRRect to combine with clip
+ @param doAntiAlias true if clip is to be anti-aliased
+ */
+ void clipRRect(const SkRRect& rrect, bool doAntiAlias = false) {
+ this->clipRRect(rrect, SkClipOp::kIntersect, doAntiAlias);
+ }
+
+ /** Replaces clip with the intersection or difference of clip and path,
+ with an aliased or anti-aliased clip edge. SkPath::FillType determines if path
+ describes the area inside or outside its contours; and if path contour overlaps
+ itself or another path contour, whether the overlaps form part of the area.
+ path is transformed by SkMatrix before it is combined with clip.
+
+ @param path SkPath to combine with clip
+ @param op SkClipOp to apply to clip
+ @param doAntiAlias true if clip is to be anti-aliased
+
+ example: https://fiddle.skia.org/c/@Canvas_clipPath
+ */
+ void clipPath(const SkPath& path, SkClipOp op, bool doAntiAlias);
+
+ /** Replaces clip with the intersection or difference of clip and path.
+ Resulting clip is aliased; pixels are fully contained by the clip.
+ SkPath::FillType determines if path
+ describes the area inside or outside its contours; and if path contour overlaps
+ itself or another path contour, whether the overlaps form part of the area.
+ path is transformed by SkMatrix
+ before it is combined with clip.
+
+ @param path SkPath to combine with clip
+ @param op SkClipOp to apply to clip
+ */
+ void clipPath(const SkPath& path, SkClipOp op) {
+ this->clipPath(path, op, false);
+ }
+
+ /** Replaces clip with the intersection of clip and path.
+ Resulting clip is aliased; pixels are fully contained by the clip.
+ SkPath::FillType determines if path
+ describes the area inside or outside its contours; and if path contour overlaps
+ itself or another path contour, whether the overlaps form part of the area.
+ path is transformed by SkMatrix before it is combined with clip.
+
+ @param path SkPath to combine with clip
+ @param doAntiAlias true if clip is to be anti-aliased
+ */
+ void clipPath(const SkPath& path, bool doAntiAlias = false) {
+ this->clipPath(path, SkClipOp::kIntersect, doAntiAlias);
+ }
+
+ void clipShader(sk_sp<SkShader>, SkClipOp = SkClipOp::kIntersect);
+
+ /** Replaces clip with the intersection or difference of clip and SkRegion deviceRgn.
+ Resulting clip is aliased; pixels are fully contained by the clip.
+ deviceRgn is unaffected by SkMatrix.
+
+ @param deviceRgn SkRegion to combine with clip
+ @param op SkClipOp to apply to clip
+
+ example: https://fiddle.skia.org/c/@Canvas_clipRegion
+ */
+ void clipRegion(const SkRegion& deviceRgn, SkClipOp op = SkClipOp::kIntersect);
+
+ /** Returns true if SkRect rect, transformed by SkMatrix, can be quickly determined to be
+ outside of clip. May return false even though rect is outside of clip.
+
+ Use to check if an area to be drawn is clipped out, to skip subsequent draw calls.
+
+ @param rect SkRect to compare with clip
+ @return true if rect, transformed by SkMatrix, does not intersect clip
+
+ example: https://fiddle.skia.org/c/@Canvas_quickReject
+ */
+ bool quickReject(const SkRect& rect) const;
+
+ /** Returns true if path, transformed by SkMatrix, can be quickly determined to be
+ outside of clip. May return false even though path is outside of clip.
+
+ Use to check if an area to be drawn is clipped out, to skip subsequent draw calls.
+
+ @param path SkPath to compare with clip
+ @return true if path, transformed by SkMatrix, does not intersect clip
+
+ example: https://fiddle.skia.org/c/@Canvas_quickReject_2
+ */
+ bool quickReject(const SkPath& path) const;
+
+ /** Returns bounds of clip, transformed by inverse of SkMatrix. If clip is empty,
+ return SkRect::MakeEmpty, where all SkRect sides equal zero.
+
+ SkRect returned is outset by one to account for partial pixel coverage if clip
+ is anti-aliased.
+
+ @return bounds of clip in local coordinates
+
+ example: https://fiddle.skia.org/c/@Canvas_getLocalClipBounds
+ */
+ SkRect getLocalClipBounds() const;
+
+ /** Returns bounds of clip, transformed by inverse of SkMatrix. If clip is empty,
+ return false, and set bounds to SkRect::MakeEmpty, where all SkRect sides equal zero.
+
+ bounds is outset by one to account for partial pixel coverage if clip
+ is anti-aliased.
+
+ @param bounds SkRect of clip in local coordinates
+ @return true if clip bounds is not empty
+ */
+ bool getLocalClipBounds(SkRect* bounds) const {
+ *bounds = this->getLocalClipBounds();
+ return !bounds->isEmpty();
+ }
+
+ /** Returns SkIRect bounds of clip, unaffected by SkMatrix. If clip is empty,
+ return SkRect::MakeEmpty, where all SkRect sides equal zero.
+
+ Unlike getLocalClipBounds(), returned SkIRect is not outset.
+
+ @return bounds of clip in SkBaseDevice coordinates
+
+ example: https://fiddle.skia.org/c/@Canvas_getDeviceClipBounds
+ */
+ SkIRect getDeviceClipBounds() const;
+
+ /** Returns SkIRect bounds of clip, unaffected by SkMatrix. If clip is empty,
+ return false, and set bounds to SkRect::MakeEmpty, where all SkRect sides equal zero.
+
+ Unlike getLocalClipBounds(), bounds is not outset.
+
+ @param bounds SkRect of clip in device coordinates
+ @return true if clip bounds is not empty
+ */
+ bool getDeviceClipBounds(SkIRect* bounds) const {
+ *bounds = this->getDeviceClipBounds();
+ return !bounds->isEmpty();
+ }
+
+ /** Fills clip with color color.
+ mode determines how ARGB is combined with destination.
+
+ @param color unpremultiplied ARGB
+ @param mode SkBlendMode used to combine source color and destination
+
+ example: https://fiddle.skia.org/c/@Canvas_drawColor
+ */
+ void drawColor(SkColor color, SkBlendMode mode = SkBlendMode::kSrcOver) {
+ this->drawColor(SkColor4f::FromColor(color), mode);
+ }
+
+ /** Fills clip with color color.
+ mode determines how ARGB is combined with destination.
+
+ @param color SkColor4f representing unpremultiplied color.
+ @param mode SkBlendMode used to combine source color and destination
+ */
+ void drawColor(const SkColor4f& color, SkBlendMode mode = SkBlendMode::kSrcOver);
+
+ /** Fills clip with color color using SkBlendMode::kSrc.
+ This has the effect of replacing all pixels contained by clip with color.
+
+ @param color unpremultiplied ARGB
+ */
+ void clear(SkColor color) {
+ this->clear(SkColor4f::FromColor(color));
+ }
+
+ /** Fills clip with color color using SkBlendMode::kSrc.
+ This has the effect of replacing all pixels contained by clip with color.
+
+ @param color SkColor4f representing unpremultiplied color.
+ */
+ void clear(const SkColor4f& color) {
+ this->drawColor(color, SkBlendMode::kSrc);
+ }
+
+ /** Makes SkCanvas contents undefined. Subsequent calls that read SkCanvas pixels,
+ such as drawing with SkBlendMode, return undefined results. discard() does
+ not change clip or SkMatrix.
+
+ discard() may do nothing, depending on the implementation of SkSurface or SkBaseDevice
+ that created SkCanvas.
+
+ discard() allows optimized performance on subsequent draws by removing
+ cached data associated with SkSurface or SkBaseDevice.
+ It is not necessary to call discard() once done with SkCanvas;
+ any cached data is deleted when owning SkSurface or SkBaseDevice is deleted.
+ */
+ void discard() { this->onDiscard(); }
+
+ /** Fills clip with SkPaint paint. SkPaint components, SkShader,
+ SkColorFilter, SkImageFilter, and SkBlendMode affect drawing;
+ SkMaskFilter and SkPathEffect in paint are ignored.
+
+ @param paint graphics state used to fill SkCanvas
+
+ example: https://fiddle.skia.org/c/@Canvas_drawPaint
+ */
+ void drawPaint(const SkPaint& paint);
+
+ /** \enum SkCanvas::PointMode
+ Selects if an array of points are drawn as discrete points, as lines, or as
+ an open polygon.
+ */
+ enum PointMode {
+ kPoints_PointMode, //!< draw each point separately
+ kLines_PointMode, //!< draw each pair of points as a line segment
+ kPolygon_PointMode, //!< draw the array of points as a open polygon
+ };
+
+ /** Draws pts using clip, SkMatrix and SkPaint paint.
+ count is the number of points; if count is less than one, has no effect.
+ mode may be one of: kPoints_PointMode, kLines_PointMode, or kPolygon_PointMode.
+
+ If mode is kPoints_PointMode, the shape of point drawn depends on paint
+ SkPaint::Cap. If paint is set to SkPaint::kRound_Cap, each point draws a
+ circle of diameter SkPaint stroke width. If paint is set to SkPaint::kSquare_Cap
+ or SkPaint::kButt_Cap, each point draws a square of width and height
+ SkPaint stroke width.
+
+ If mode is kLines_PointMode, each pair of points draws a line segment.
+ One line is drawn for every two points; each point is used once. If count is odd,
+ the final point is ignored.
+
+ If mode is kPolygon_PointMode, each adjacent pair of points draws a line segment.
+ count minus one lines are drawn; the first and last point are used once.
+
+ Each line segment respects paint SkPaint::Cap and SkPaint stroke width.
+ SkPaint::Style is ignored, as if were set to SkPaint::kStroke_Style.
+
+ Always draws each element one at a time; is not affected by
+ SkPaint::Join, and unlike drawPath(), does not create a mask from all points
+ and lines before drawing.
+
+ @param mode whether pts draws points or lines
+ @param count number of points in the array
+ @param pts array of points to draw
+ @param paint stroke, blend, color, and so on, used to draw
+
+ example: https://fiddle.skia.org/c/@Canvas_drawPoints
+ */
+ void drawPoints(PointMode mode, size_t count, const SkPoint pts[], const SkPaint& paint);
+
+ /** Draws point at (x, y) using clip, SkMatrix and SkPaint paint.
+
+ The shape of point drawn depends on paint SkPaint::Cap.
+ If paint is set to SkPaint::kRound_Cap, draw a circle of diameter
+ SkPaint stroke width. If paint is set to SkPaint::kSquare_Cap or SkPaint::kButt_Cap,
+ draw a square of width and height SkPaint stroke width.
+ SkPaint::Style is ignored, as if were set to SkPaint::kStroke_Style.
+
+ @param x left edge of circle or square
+ @param y top edge of circle or square
+ @param paint stroke, blend, color, and so on, used to draw
+
+ example: https://fiddle.skia.org/c/@Canvas_drawPoint
+ */
+ void drawPoint(SkScalar x, SkScalar y, const SkPaint& paint);
+
+ /** Draws point p using clip, SkMatrix and SkPaint paint.
+
+ The shape of point drawn depends on paint SkPaint::Cap.
+ If paint is set to SkPaint::kRound_Cap, draw a circle of diameter
+ SkPaint stroke width. If paint is set to SkPaint::kSquare_Cap or SkPaint::kButt_Cap,
+ draw a square of width and height SkPaint stroke width.
+ SkPaint::Style is ignored, as if were set to SkPaint::kStroke_Style.
+
+ @param p top-left edge of circle or square
+ @param paint stroke, blend, color, and so on, used to draw
+ */
+ void drawPoint(SkPoint p, const SkPaint& paint) {
+ this->drawPoint(p.x(), p.y(), paint);
+ }
+
+ /** Draws line segment from (x0, y0) to (x1, y1) using clip, SkMatrix, and SkPaint paint.
+ In paint: SkPaint stroke width describes the line thickness;
+ SkPaint::Cap draws the end rounded or square;
+ SkPaint::Style is ignored, as if were set to SkPaint::kStroke_Style.
+
+ @param x0 start of line segment on x-axis
+ @param y0 start of line segment on y-axis
+ @param x1 end of line segment on x-axis
+ @param y1 end of line segment on y-axis
+ @param paint stroke, blend, color, and so on, used to draw
+
+ example: https://fiddle.skia.org/c/@Canvas_drawLine
+ */
+ void drawLine(SkScalar x0, SkScalar y0, SkScalar x1, SkScalar y1, const SkPaint& paint);
+
+ /** Draws line segment from p0 to p1 using clip, SkMatrix, and SkPaint paint.
+ In paint: SkPaint stroke width describes the line thickness;
+ SkPaint::Cap draws the end rounded or square;
+ SkPaint::Style is ignored, as if were set to SkPaint::kStroke_Style.
+
+ @param p0 start of line segment
+ @param p1 end of line segment
+ @param paint stroke, blend, color, and so on, used to draw
+ */
+ void drawLine(SkPoint p0, SkPoint p1, const SkPaint& paint) {
+ this->drawLine(p0.x(), p0.y(), p1.x(), p1.y(), paint);
+ }
+
+ /** Draws SkRect rect using clip, SkMatrix, and SkPaint paint.
+ In paint: SkPaint::Style determines if rectangle is stroked or filled;
+ if stroked, SkPaint stroke width describes the line thickness, and
+ SkPaint::Join draws the corners rounded or square.
+
+ @param rect rectangle to draw
+ @param paint stroke or fill, blend, color, and so on, used to draw
+
+ example: https://fiddle.skia.org/c/@Canvas_drawRect
+ */
+ void drawRect(const SkRect& rect, const SkPaint& paint);
+
+ /** Draws SkIRect rect using clip, SkMatrix, and SkPaint paint.
+ In paint: SkPaint::Style determines if rectangle is stroked or filled;
+ if stroked, SkPaint stroke width describes the line thickness, and
+ SkPaint::Join draws the corners rounded or square.
+
+ @param rect rectangle to draw
+ @param paint stroke or fill, blend, color, and so on, used to draw
+ */
+ void drawIRect(const SkIRect& rect, const SkPaint& paint) {
+ SkRect r;
+ r.set(rect); // promotes the ints to scalars
+ this->drawRect(r, paint);
+ }
+
+ /** Draws SkRegion region using clip, SkMatrix, and SkPaint paint.
+ In paint: SkPaint::Style determines if rectangle is stroked or filled;
+ if stroked, SkPaint stroke width describes the line thickness, and
+ SkPaint::Join draws the corners rounded or square.
+
+ @param region region to draw
+ @param paint SkPaint stroke or fill, blend, color, and so on, used to draw
+
+ example: https://fiddle.skia.org/c/@Canvas_drawRegion
+ */
+ void drawRegion(const SkRegion& region, const SkPaint& paint);
+
+ /** Draws oval oval using clip, SkMatrix, and SkPaint.
+ In paint: SkPaint::Style determines if oval is stroked or filled;
+ if stroked, SkPaint stroke width describes the line thickness.
+
+ @param oval SkRect bounds of oval
+ @param paint SkPaint stroke or fill, blend, color, and so on, used to draw
+
+ example: https://fiddle.skia.org/c/@Canvas_drawOval
+ */
+ void drawOval(const SkRect& oval, const SkPaint& paint);
+
+ /** Draws SkRRect rrect using clip, SkMatrix, and SkPaint paint.
+ In paint: SkPaint::Style determines if rrect is stroked or filled;
+ if stroked, SkPaint stroke width describes the line thickness.
+
+ rrect may represent a rectangle, circle, oval, uniformly rounded rectangle, or
+ may have any combination of positive non-square radii for the four corners.
+
+ @param rrect SkRRect with up to eight corner radii to draw
+ @param paint SkPaint stroke or fill, blend, color, and so on, used to draw
+
+ example: https://fiddle.skia.org/c/@Canvas_drawRRect
+ */
+ void drawRRect(const SkRRect& rrect, const SkPaint& paint);
+
+ /** Draws SkRRect outer and inner
+ using clip, SkMatrix, and SkPaint paint.
+ outer must contain inner or the drawing is undefined.
+ In paint: SkPaint::Style determines if SkRRect is stroked or filled;
+ if stroked, SkPaint stroke width describes the line thickness.
+ If stroked and SkRRect corner has zero length radii, SkPaint::Join can
+ draw corners rounded or square.
+
+ GPU-backed platforms optimize drawing when both outer and inner are
+ concave and outer contains inner. These platforms may not be able to draw
+ SkPath built with identical data as fast.
+
+ @param outer SkRRect outer bounds to draw
+ @param inner SkRRect inner bounds to draw
+ @param paint SkPaint stroke or fill, blend, color, and so on, used to draw
+
+ example: https://fiddle.skia.org/c/@Canvas_drawDRRect_a
+ example: https://fiddle.skia.org/c/@Canvas_drawDRRect_b
+ */
+ void drawDRRect(const SkRRect& outer, const SkRRect& inner, const SkPaint& paint);
+
+ /** Draws circle at (cx, cy) with radius using clip, SkMatrix, and SkPaint paint.
+ If radius is zero or less, nothing is drawn.
+ In paint: SkPaint::Style determines if circle is stroked or filled;
+ if stroked, SkPaint stroke width describes the line thickness.
+
+ @param cx circle center on the x-axis
+ @param cy circle center on the y-axis
+ @param radius half the diameter of circle
+ @param paint SkPaint stroke or fill, blend, color, and so on, used to draw
+
+ example: https://fiddle.skia.org/c/@Canvas_drawCircle
+ */
+ void drawCircle(SkScalar cx, SkScalar cy, SkScalar radius, const SkPaint& paint);
+
+ /** Draws circle at center with radius using clip, SkMatrix, and SkPaint paint.
+ If radius is zero or less, nothing is drawn.
+ In paint: SkPaint::Style determines if circle is stroked or filled;
+ if stroked, SkPaint stroke width describes the line thickness.
+
+ @param center circle center
+ @param radius half the diameter of circle
+ @param paint SkPaint stroke or fill, blend, color, and so on, used to draw
+ */
+ void drawCircle(SkPoint center, SkScalar radius, const SkPaint& paint) {
+ this->drawCircle(center.x(), center.y(), radius, paint);
+ }
+
+ /** Draws arc using clip, SkMatrix, and SkPaint paint.
+
+ Arc is part of oval bounded by oval, sweeping from startAngle to startAngle plus
+ sweepAngle. startAngle and sweepAngle are in degrees.
+
+ startAngle of zero places start point at the right middle edge of oval.
+ A positive sweepAngle places arc end point clockwise from start point;
+ a negative sweepAngle places arc end point counterclockwise from start point.
+ sweepAngle may exceed 360 degrees, a full circle.
+ If useCenter is true, draw a wedge that includes lines from oval
+ center to arc end points. If useCenter is false, draw arc between end points.
+
+ If SkRect oval is empty or sweepAngle is zero, nothing is drawn.
+
+ @param oval SkRect bounds of oval containing arc to draw
+ @param startAngle angle in degrees where arc begins
+ @param sweepAngle sweep angle in degrees; positive is clockwise
+ @param useCenter if true, include the center of the oval
+ @param paint SkPaint stroke or fill, blend, color, and so on, used to draw
+ */
+ void drawArc(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint);
+
+ /** Draws SkRRect bounded by SkRect rect, with corner radii (rx, ry) using clip,
+ SkMatrix, and SkPaint paint.
+
+ In paint: SkPaint::Style determines if SkRRect is stroked or filled;
+ if stroked, SkPaint stroke width describes the line thickness.
+ If rx or ry are less than zero, they are treated as if they are zero.
+ If rx plus ry exceeds rect width or rect height, radii are scaled down to fit.
+ If rx and ry are zero, SkRRect is drawn as SkRect and if stroked is affected by
+ SkPaint::Join.
+
+ @param rect SkRect bounds of SkRRect to draw
+ @param rx axis length on x-axis of oval describing rounded corners
+ @param ry axis length on y-axis of oval describing rounded corners
+ @param paint stroke, blend, color, and so on, used to draw
+
+ example: https://fiddle.skia.org/c/@Canvas_drawRoundRect
+ */
+ void drawRoundRect(const SkRect& rect, SkScalar rx, SkScalar ry, const SkPaint& paint);
+
+ /** Draws SkPath path using clip, SkMatrix, and SkPaint paint.
+ SkPath contains an array of path contour, each of which may be open or closed.
+
+ In paint: SkPaint::Style determines if SkRRect is stroked or filled:
+ if filled, SkPath::FillType determines whether path contour describes inside or
+ outside of fill; if stroked, SkPaint stroke width describes the line thickness,
+ SkPaint::Cap describes line ends, and SkPaint::Join describes how
+ corners are drawn.
+
+ @param path SkPath to draw
+ @param paint stroke, blend, color, and so on, used to draw
+
+ example: https://fiddle.skia.org/c/@Canvas_drawPath
+ */
+ void drawPath(const SkPath& path, const SkPaint& paint);
+
+ void drawImage(const SkImage* image, SkScalar left, SkScalar top) {
+ this->drawImage(image, left, top, SkSamplingOptions(), nullptr);
+ }
+ void drawImage(const sk_sp<SkImage>& image, SkScalar left, SkScalar top) {
+ this->drawImage(image.get(), left, top, SkSamplingOptions(), nullptr);
+ }
+
+ /** \enum SkCanvas::SrcRectConstraint
+ SrcRectConstraint controls the behavior at the edge of source SkRect,
+ provided to drawImageRect() when there is any filtering. If kStrict is set,
+ then extra code is used to ensure it never samples outside of the src-rect.
+ kStrict_SrcRectConstraint disables the use of mipmaps and anisotropic filtering.
+ */
+ enum SrcRectConstraint {
+ kStrict_SrcRectConstraint, //!< sample only inside bounds; slower
+ kFast_SrcRectConstraint, //!< sample outside bounds; faster
+ };
+
+ void drawImage(const SkImage*, SkScalar x, SkScalar y, const SkSamplingOptions&,
+ const SkPaint* = nullptr);
+ void drawImage(const sk_sp<SkImage>& image, SkScalar x, SkScalar y,
+ const SkSamplingOptions& sampling, const SkPaint* paint = nullptr) {
+ this->drawImage(image.get(), x, y, sampling, paint);
+ }
+ void drawImageRect(const SkImage*, const SkRect& src, const SkRect& dst,
+ const SkSamplingOptions&, const SkPaint*, SrcRectConstraint);
+ void drawImageRect(const SkImage*, const SkRect& dst, const SkSamplingOptions&,
+ const SkPaint* = nullptr);
+ void drawImageRect(const sk_sp<SkImage>& image, const SkRect& src, const SkRect& dst,
+ const SkSamplingOptions& sampling, const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ this->drawImageRect(image.get(), src, dst, sampling, paint, constraint);
+ }
+ void drawImageRect(const sk_sp<SkImage>& image, const SkRect& dst,
+ const SkSamplingOptions& sampling, const SkPaint* paint = nullptr) {
+ this->drawImageRect(image.get(), dst, sampling, paint);
+ }
+
+ /** Draws SkImage image stretched proportionally to fit into SkRect dst.
+ SkIRect center divides the image into nine sections: four sides, four corners, and
+ the center. Corners are unmodified or scaled down proportionately if their sides
+ are larger than dst; center and four sides are scaled to fit remaining space, if any.
+
+ Additionally transform draw using clip, SkMatrix, and optional SkPaint paint.
+
+ If SkPaint paint is supplied, apply SkColorFilter, alpha, SkImageFilter, and
+ SkBlendMode. If image is kAlpha_8_SkColorType, apply SkShader.
+ If paint contains SkMaskFilter, generate mask from image bounds.
+ Any SkMaskFilter on paint is ignored as is paint anti-aliasing state.
+
+ If generated mask extends beyond image bounds, replicate image edge colors, just
+ as SkShader made from SkImage::makeShader with SkShader::kClamp_TileMode set
+ replicates the image edge color when it samples outside of its bounds.
+
+ @param image SkImage containing pixels, dimensions, and format
+ @param center SkIRect edge of image corners and sides
+ @param dst destination SkRect of image to draw to
+ @param filter what technique to use when sampling the image
+ @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter,
+ and so on; or nullptr
+ */
+ void drawImageNine(const SkImage* image, const SkIRect& center, const SkRect& dst,
+ SkFilterMode filter, const SkPaint* paint = nullptr);
+
+ /** \struct SkCanvas::Lattice
+ SkCanvas::Lattice divides SkBitmap or SkImage into a rectangular grid.
+ Grid entries on even columns and even rows are fixed; these entries are
+ always drawn at their original size if the destination is large enough.
+ If the destination side is too small to hold the fixed entries, all fixed
+ entries are proportionately scaled down to fit.
+ The grid entries not on even columns and rows are scaled to fit the
+ remaining space, if any.
+ */
+ struct Lattice {
+
+ /** \enum SkCanvas::Lattice::RectType
+ Optional setting per rectangular grid entry to make it transparent,
+ or to fill the grid entry with a color.
+ */
+ enum RectType : uint8_t {
+ kDefault = 0, //!< draws SkBitmap into lattice rectangle
+ kTransparent, //!< skips lattice rectangle by making it transparent
+ kFixedColor, //!< draws one of fColors into lattice rectangle
+ };
+
+ const int* fXDivs; //!< x-axis values dividing bitmap
+ const int* fYDivs; //!< y-axis values dividing bitmap
+ const RectType* fRectTypes; //!< array of fill types
+ int fXCount; //!< number of x-coordinates
+ int fYCount; //!< number of y-coordinates
+ const SkIRect* fBounds; //!< source bounds to draw from
+ const SkColor* fColors; //!< array of colors
+ };
+
+ /** Draws SkImage image stretched proportionally to fit into SkRect dst.
+
+ SkCanvas::Lattice lattice divides image into a rectangular grid.
+ Each intersection of an even-numbered row and column is fixed;
+ fixed lattice elements never scale larger than their initial
+ size and shrink proportionately when all fixed elements exceed the bitmap
+ dimension. All other grid elements scale to fill the available space, if any.
+
+ Additionally transform draw using clip, SkMatrix, and optional SkPaint paint.
+
+ If SkPaint paint is supplied, apply SkColorFilter, alpha, SkImageFilter, and
+ SkBlendMode. If image is kAlpha_8_SkColorType, apply SkShader.
+ If paint contains SkMaskFilter, generate mask from image bounds.
+ Any SkMaskFilter on paint is ignored as is paint anti-aliasing state.
+
+ If generated mask extends beyond bitmap bounds, replicate bitmap edge colors,
+ just as SkShader made from SkShader::MakeBitmapShader with
+ SkShader::kClamp_TileMode set replicates the bitmap edge color when it samples
+ outside of its bounds.
+
+ @param image SkImage containing pixels, dimensions, and format
+ @param lattice division of bitmap into fixed and variable rectangles
+ @param dst destination SkRect of image to draw to
+ @param filter what technique to use when sampling the image
+ @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter,
+ and so on; or nullptr
+ */
+ void drawImageLattice(const SkImage* image, const Lattice& lattice, const SkRect& dst,
+ SkFilterMode filter, const SkPaint* paint = nullptr);
+ void drawImageLattice(const SkImage* image, const Lattice& lattice, const SkRect& dst) {
+ this->drawImageLattice(image, lattice, dst, SkFilterMode::kNearest, nullptr);
+ }
+
+ /**
+ * Experimental. Controls anti-aliasing of each edge of images in an image-set.
+ */
+ enum QuadAAFlags : unsigned {
+ kLeft_QuadAAFlag = 0b0001,
+ kTop_QuadAAFlag = 0b0010,
+ kRight_QuadAAFlag = 0b0100,
+ kBottom_QuadAAFlag = 0b1000,
+
+ kNone_QuadAAFlags = 0b0000,
+ kAll_QuadAAFlags = 0b1111,
+ };
+
+ /** This is used by the experimental API below. */
+ struct SK_API ImageSetEntry {
+ ImageSetEntry(sk_sp<const SkImage> image, const SkRect& srcRect, const SkRect& dstRect,
+ int matrixIndex, float alpha, unsigned aaFlags, bool hasClip);
+
+ ImageSetEntry(sk_sp<const SkImage> image, const SkRect& srcRect, const SkRect& dstRect,
+ float alpha, unsigned aaFlags);
+
+ ImageSetEntry();
+ ~ImageSetEntry();
+ ImageSetEntry(const ImageSetEntry&);
+ ImageSetEntry& operator=(const ImageSetEntry&);
+
+ sk_sp<const SkImage> fImage;
+ SkRect fSrcRect;
+ SkRect fDstRect;
+ int fMatrixIndex = -1; // Index into the preViewMatrices arg, or < 0
+ float fAlpha = 1.f;
+ unsigned fAAFlags = kNone_QuadAAFlags; // QuadAAFlags
+ bool fHasClip = false; // True to use next 4 points in dstClip arg as quad
+ };
+
+ /**
+ * This is an experimental API for the SkiaRenderer Chromium project, and its API will surely
+ * evolve if it is not removed outright.
+ *
+ * This behaves very similarly to drawRect() combined with a clipPath() formed by clip
+ * quadrilateral. 'rect' and 'clip' are in the same coordinate space. If 'clip' is null, then it
+ * is as if the rectangle was not clipped (or, alternatively, clipped to itself). If not null,
+ * then it must provide 4 points.
+ *
+ * In addition to combining the draw and clipping into one operation, this function adds the
+ * additional capability of controlling each of the rectangle's edges anti-aliasing
+ * independently. The edges of the clip will respect the per-edge AA flags. It is required that
+ * 'clip' be contained inside 'rect'. In terms of mapping to edge labels, the 'clip' points
+ * should be ordered top-left, top-right, bottom-right, bottom-left so that the edge between [0]
+ * and [1] is "top", [1] and [2] is "right", [2] and [3] is "bottom", and [3] and [0] is "left".
+ * This ordering matches SkRect::toQuad().
+ *
+ * This API only draws solid color, filled rectangles so it does not accept a full SkPaint.
+ */
+ void experimental_DrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4], QuadAAFlags aaFlags,
+ const SkColor4f& color, SkBlendMode mode);
+ void experimental_DrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4], QuadAAFlags aaFlags,
+ SkColor color, SkBlendMode mode) {
+ this->experimental_DrawEdgeAAQuad(rect, clip, aaFlags, SkColor4f::FromColor(color), mode);
+ }
+
+ /**
+ * This is an bulk variant of experimental_DrawEdgeAAQuad() that renders 'cnt' textured quads.
+ * For each entry, 'fDstRect' is rendered with its clip (determined by entry's 'fHasClip' and
+ * the current index in 'dstClip'). The entry's fImage is applied to the destination rectangle
+ * by sampling from 'fSrcRect' sub-image. The corners of 'fSrcRect' map to the corners of
+ * 'fDstRect', just like in drawImageRect(), and they will be properly interpolated when
+ * applying a clip.
+ *
+ * Like experimental_DrawEdgeAAQuad(), each entry can specify edge AA flags that apply to both
+ * the destination rect and its clip.
+ *
+ * If provided, the 'dstClips' array must have length equal 4 * the number of entries with
+ * fHasClip true. If 'dstClips' is null, every entry must have 'fHasClip' set to false. The
+ * destination clip coordinates will be read consecutively with the image set entries, advancing
+ * by 4 points every time an entry with fHasClip is passed.
+ *
+ * This entry point supports per-entry manipulations to the canvas's current matrix. If an
+ * entry provides 'fMatrixIndex' >= 0, it will be drawn as if the canvas's CTM was
+ * canvas->getTotalMatrix() * preViewMatrices[fMatrixIndex]. If 'fMatrixIndex' is less than 0,
+ * the pre-view matrix transform is implicitly the identity, so it will be drawn using just the
+ * current canvas matrix. The pre-view matrix modifies the canvas's view matrix, it does not
+ * affect the local coordinates of each entry.
+ *
+ * An optional paint may be provided, which supports the same subset of features usable with
+ * drawImageRect (i.e. assumed to be filled and no path effects). When a paint is provided, the
+ * image set is drawn as if each image used the applied paint independently, so each is affected
+ * by the image, color, and/or mask filter.
+ */
+ void experimental_DrawEdgeAAImageSet(const ImageSetEntry imageSet[], int cnt,
+ const SkPoint dstClips[], const SkMatrix preViewMatrices[],
+ const SkSamplingOptions&, const SkPaint* paint = nullptr,
+ SrcRectConstraint constraint = kStrict_SrcRectConstraint);
+
+ /** Draws text, with origin at (x, y), using clip, SkMatrix, SkFont font,
+ and SkPaint paint.
+
+ When encoding is SkTextEncoding::kUTF8, SkTextEncoding::kUTF16, or
+ SkTextEncoding::kUTF32, this function uses the default
+ character-to-glyph mapping from the SkTypeface in font. It does not
+ perform typeface fallback for characters not found in the SkTypeface.
+ It does not perform kerning or other complex shaping; glyphs are
+ positioned based on their default advances.
+
+ Text meaning depends on SkTextEncoding.
+
+ Text size is affected by SkMatrix and SkFont text size. Default text
+ size is 12 point.
+
+ All elements of paint: SkPathEffect, SkMaskFilter, SkShader,
+ SkColorFilter, and SkImageFilter; apply to text. By
+ default, draws filled black glyphs.
+
+ @param text character code points or glyphs drawn
+ @param byteLength byte length of text array
+ @param encoding text encoding used in the text array
+ @param x start of text on x-axis
+ @param y start of text on y-axis
+ @param font typeface, text size and so, used to describe the text
+ @param paint blend, color, and so on, used to draw
+ */
+ void drawSimpleText(const void* text, size_t byteLength, SkTextEncoding encoding,
+ SkScalar x, SkScalar y, const SkFont& font, const SkPaint& paint);
+
+ /** Draws null terminated string, with origin at (x, y), using clip, SkMatrix,
+ SkFont font, and SkPaint paint.
+
+ This function uses the default character-to-glyph mapping from the
+ SkTypeface in font. It does not perform typeface fallback for
+ characters not found in the SkTypeface. It does not perform kerning;
+ glyphs are positioned based on their default advances.
+
+ String str is encoded as UTF-8.
+
+ Text size is affected by SkMatrix and font text size. Default text
+ size is 12 point.
+
+ All elements of paint: SkPathEffect, SkMaskFilter, SkShader,
+ SkColorFilter, and SkImageFilter; apply to text. By
+ default, draws filled black glyphs.
+
+ @param str character code points drawn,
+ ending with a char value of zero
+ @param x start of string on x-axis
+ @param y start of string on y-axis
+ @param font typeface, text size and so, used to describe the text
+ @param paint blend, color, and so on, used to draw
+ */
+ void drawString(const char str[], SkScalar x, SkScalar y, const SkFont& font,
+ const SkPaint& paint) {
+ this->drawSimpleText(str, strlen(str), SkTextEncoding::kUTF8, x, y, font, paint);
+ }
+
+ /** Draws SkString, with origin at (x, y), using clip, SkMatrix, SkFont font,
+ and SkPaint paint.
+
+ This function uses the default character-to-glyph mapping from the
+ SkTypeface in font. It does not perform typeface fallback for
+ characters not found in the SkTypeface. It does not perform kerning;
+ glyphs are positioned based on their default advances.
+
+ SkString str is encoded as UTF-8.
+
+ Text size is affected by SkMatrix and SkFont text size. Default text
+ size is 12 point.
+
+ All elements of paint: SkPathEffect, SkMaskFilter, SkShader,
+ SkColorFilter, and SkImageFilter; apply to text. By
+ default, draws filled black glyphs.
+
+ @param str character code points drawn,
+ ending with a char value of zero
+ @param x start of string on x-axis
+ @param y start of string on y-axis
+ @param font typeface, text size and so, used to describe the text
+ @param paint blend, color, and so on, used to draw
+ */
+ void drawString(const SkString& str, SkScalar x, SkScalar y, const SkFont& font,
+ const SkPaint& paint) {
+ this->drawSimpleText(str.c_str(), str.size(), SkTextEncoding::kUTF8, x, y, font, paint);
+ }
+
+ /** Draws count glyphs, at positions relative to origin styled with font and paint with
+ supporting utf8 and cluster information.
+
+ This function draw glyphs at the given positions relative to the given origin.
+ It does not perform typeface fallback for glyphs not found in the SkTypeface in font.
+
+ The drawing obeys the current transform matrix and clipping.
+
+ All elements of paint: SkPathEffect, SkMaskFilter, SkShader,
+ SkColorFilter, and SkImageFilter; apply to text. By
+ default, draws filled black glyphs.
+
+ @param count number of glyphs to draw
+ @param glyphs the array of glyphIDs to draw
+ @param positions where to draw each glyph relative to origin
+ @param clusters array of size count of cluster information
+ @param textByteCount size of the utf8text
+ @param utf8text utf8text supporting information for the glyphs
+ @param origin the origin of all the positions
+ @param font typeface, text size and so, used to describe the text
+ @param paint blend, color, and so on, used to draw
+ */
+ void drawGlyphs(int count, const SkGlyphID glyphs[], const SkPoint positions[],
+ const uint32_t clusters[], int textByteCount, const char utf8text[],
+ SkPoint origin, const SkFont& font, const SkPaint& paint);
+
+ /** Draws count glyphs, at positions relative to origin styled with font and paint.
+
+ This function draw glyphs at the given positions relative to the given origin.
+ It does not perform typeface fallback for glyphs not found in the SkTypeface in font.
+
+ The drawing obeys the current transform matrix and clipping.
+
+ All elements of paint: SkPathEffect, SkMaskFilter, SkShader,
+ SkColorFilter, and SkImageFilter; apply to text. By
+ default, draws filled black glyphs.
+
+ @param count number of glyphs to draw
+ @param glyphs the array of glyphIDs to draw
+ @param positions where to draw each glyph relative to origin
+ @param origin the origin of all the positions
+ @param font typeface, text size and so, used to describe the text
+ @param paint blend, color, and so on, used to draw
+ */
+ void drawGlyphs(int count, const SkGlyphID glyphs[], const SkPoint positions[],
+ SkPoint origin, const SkFont& font, const SkPaint& paint);
+
+ /** Draws count glyphs, at positions relative to origin styled with font and paint.
+
+ This function draw glyphs using the given scaling and rotations. They are positioned
+ relative to the given origin. It does not perform typeface fallback for glyphs not found
+ in the SkTypeface in font.
+
+ The drawing obeys the current transform matrix and clipping.
+
+ All elements of paint: SkPathEffect, SkMaskFilter, SkShader,
+ SkColorFilter, and SkImageFilter; apply to text. By
+ default, draws filled black glyphs.
+
+ @param count number of glyphs to draw
+ @param glyphs the array of glyphIDs to draw
+ @param xforms where to draw and orient each glyph
+ @param origin the origin of all the positions
+ @param font typeface, text size and so, used to describe the text
+ @param paint blend, color, and so on, used to draw
+ */
+ void drawGlyphs(int count, const SkGlyphID glyphs[], const SkRSXform xforms[],
+ SkPoint origin, const SkFont& font, const SkPaint& paint);
+
+ /** Draws SkTextBlob blob at (x, y), using clip, SkMatrix, and SkPaint paint.
+
+ blob contains glyphs, their positions, and paint attributes specific to text:
+ SkTypeface, SkPaint text size, SkPaint text scale x,
+ SkPaint text skew x, SkPaint::Align, SkPaint::Hinting, anti-alias, SkPaint fake bold,
+ SkPaint font embedded bitmaps, SkPaint full hinting spacing, LCD text, SkPaint linear text,
+ and SkPaint subpixel text.
+
+ SkTextEncoding must be set to SkTextEncoding::kGlyphID.
+
+ Elements of paint: anti-alias, SkBlendMode, color including alpha,
+ SkColorFilter, SkPaint dither, SkMaskFilter, SkPathEffect, SkShader, and
+ SkPaint::Style; apply to blob. If SkPaint contains SkPaint::kStroke_Style:
+ SkPaint miter limit, SkPaint::Cap, SkPaint::Join, and SkPaint stroke width;
+ apply to SkPath created from blob.
+
+ @param blob glyphs, positions, and their paints' text size, typeface, and so on
+ @param x horizontal offset applied to blob
+ @param y vertical offset applied to blob
+ @param paint blend, color, stroking, and so on, used to draw
+
+ example: https://fiddle.skia.org/c/@Canvas_drawTextBlob
+ */
+ void drawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y, const SkPaint& paint);
+
+ /** Draws SkTextBlob blob at (x, y), using clip, SkMatrix, and SkPaint paint.
+
+ blob contains glyphs, their positions, and paint attributes specific to text:
+ SkTypeface, SkPaint text size, SkPaint text scale x,
+ SkPaint text skew x, SkPaint::Align, SkPaint::Hinting, anti-alias, SkPaint fake bold,
+ SkPaint font embedded bitmaps, SkPaint full hinting spacing, LCD text, SkPaint linear text,
+ and SkPaint subpixel text.
+
+ SkTextEncoding must be set to SkTextEncoding::kGlyphID.
+
+ Elements of paint: SkPathEffect, SkMaskFilter, SkShader, SkColorFilter,
+ and SkImageFilter; apply to blob.
+
+ @param blob glyphs, positions, and their paints' text size, typeface, and so on
+ @param x horizontal offset applied to blob
+ @param y vertical offset applied to blob
+ @param paint blend, color, stroking, and so on, used to draw
+ */
+ void drawTextBlob(const sk_sp<SkTextBlob>& blob, SkScalar x, SkScalar y, const SkPaint& paint) {
+ this->drawTextBlob(blob.get(), x, y, paint);
+ }
+
+ /** Draws SkPicture picture, using clip and SkMatrix.
+ Clip and SkMatrix are unchanged by picture contents, as if
+ save() was called before and restore() was called after drawPicture().
+
+ SkPicture records a series of draw commands for later playback.
+
+ @param picture recorded drawing commands to play
+ */
+ void drawPicture(const SkPicture* picture) {
+ this->drawPicture(picture, nullptr, nullptr);
+ }
+
+ /** Draws SkPicture picture, using clip and SkMatrix.
+ Clip and SkMatrix are unchanged by picture contents, as if
+ save() was called before and restore() was called after drawPicture().
+
+ SkPicture records a series of draw commands for later playback.
+
+ @param picture recorded drawing commands to play
+ */
+ void drawPicture(const sk_sp<SkPicture>& picture) {
+ this->drawPicture(picture.get());
+ }
+
+ /** Draws SkPicture picture, using clip and SkMatrix; transforming picture with
+ SkMatrix matrix, if provided; and use SkPaint paint alpha, SkColorFilter,
+ SkImageFilter, and SkBlendMode, if provided.
+
+ If paint is non-null, then the picture is always drawn into a temporary layer before
+ actually landing on the canvas. Note that drawing into a layer can also change its
+ appearance if there are any non-associative blendModes inside any of the pictures elements.
+
+ @param picture recorded drawing commands to play
+ @param matrix SkMatrix to rotate, scale, translate, and so on; may be nullptr
+ @param paint SkPaint to apply transparency, filtering, and so on; may be nullptr
+
+ example: https://fiddle.skia.org/c/@Canvas_drawPicture_3
+ */
+ void drawPicture(const SkPicture* picture, const SkMatrix* matrix, const SkPaint* paint);
+
+ /** Draws SkPicture picture, using clip and SkMatrix; transforming picture with
+ SkMatrix matrix, if provided; and use SkPaint paint alpha, SkColorFilter,
+ SkImageFilter, and SkBlendMode, if provided.
+
+ If paint is non-null, then the picture is always drawn into a temporary layer before
+ actually landing on the canvas. Note that drawing into a layer can also change its
+ appearance if there are any non-associative blendModes inside any of the pictures elements.
+
+ @param picture recorded drawing commands to play
+ @param matrix SkMatrix to rotate, scale, translate, and so on; may be nullptr
+ @param paint SkPaint to apply transparency, filtering, and so on; may be nullptr
+ */
+ void drawPicture(const sk_sp<SkPicture>& picture, const SkMatrix* matrix,
+ const SkPaint* paint) {
+ this->drawPicture(picture.get(), matrix, paint);
+ }
+
+ /** Draws SkVertices vertices, a triangle mesh, using clip and SkMatrix.
+ If paint contains an SkShader and vertices does not contain texCoords, the shader
+ is mapped using the vertices' positions.
+
+ SkBlendMode is ignored if SkVertices does not have colors. Otherwise, it combines
+ - the SkShader if SkPaint contains SkShader
+ - or the opaque SkPaint color if SkPaint does not contain SkShader
+ as the src of the blend and the interpolated vertex colors as the dst.
+
+ SkMaskFilter, SkPathEffect, and antialiasing on SkPaint are ignored.
+
+ @param vertices triangle mesh to draw
+ @param mode combines vertices' colors with SkShader if present or SkPaint opaque color
+ if not. Ignored if the vertices do not contain color.
+ @param paint specifies the SkShader, used as SkVertices texture, and SkColorFilter.
+
+ example: https://fiddle.skia.org/c/@Canvas_drawVertices
+ */
+ void drawVertices(const SkVertices* vertices, SkBlendMode mode, const SkPaint& paint);
+
+ /** Draws SkVertices vertices, a triangle mesh, using clip and SkMatrix.
+ If paint contains an SkShader and vertices does not contain texCoords, the shader
+ is mapped using the vertices' positions.
+
+ SkBlendMode is ignored if SkVertices does not have colors. Otherwise, it combines
+ - the SkShader if SkPaint contains SkShader
+ - or the opaque SkPaint color if SkPaint does not contain SkShader
+ as the src of the blend and the interpolated vertex colors as the dst.
+
+ SkMaskFilter, SkPathEffect, and antialiasing on SkPaint are ignored.
+
+ @param vertices triangle mesh to draw
+ @param mode combines vertices' colors with SkShader if present or SkPaint opaque color
+ if not. Ignored if the vertices do not contain color.
+ @param paint specifies the SkShader, used as SkVertices texture, may be nullptr
+
+ example: https://fiddle.skia.org/c/@Canvas_drawVertices_2
+ */
+ void drawVertices(const sk_sp<SkVertices>& vertices, SkBlendMode mode, const SkPaint& paint);
+
+#if defined(SK_ENABLE_SKSL)
+ /**
+ Experimental, under active development, and subject to change without notice.
+
+ Draws a mesh using a user-defined specification (see SkMeshSpecification).
+
+ SkBlender is ignored if SkMesh's specification does not output fragment shader color.
+ Otherwise, it combines
+ - the SkShader if SkPaint contains SkShader
+ - or the opaque SkPaint color if SkPaint does not contain SkShader
+ as the src of the blend and the mesh's fragment color as the dst.
+
+ SkMaskFilter, SkPathEffect, and antialiasing on SkPaint are ignored.
+
+ @param mesh the mesh vertices and compatible specification.
+ @param blender combines vertices colors with SkShader if present or SkPaint opaque color
+ if not. Ignored if the custom mesh does not output color. Defaults to
+ SkBlendMode::kModulate if nullptr.
+ @param paint specifies the SkShader, used as SkVertices texture, may be nullptr
+ */
+ void drawMesh(const SkMesh& mesh, sk_sp<SkBlender> blender, const SkPaint& paint);
+#endif
+
+ /** Draws a Coons patch: the interpolation of four cubics with shared corners,
+ associating a color, and optionally a texture SkPoint, with each corner.
+
+ SkPoint array cubics specifies four SkPath cubic starting at the top-left corner,
+ in clockwise order, sharing every fourth point. The last SkPath cubic ends at the
+ first point.
+
+ Color array color associates colors with corners in top-left, top-right,
+ bottom-right, bottom-left order.
+
+ If paint contains SkShader, SkPoint array texCoords maps SkShader as texture to
+ corners in top-left, top-right, bottom-right, bottom-left order. If texCoords is
+ nullptr, SkShader is mapped using positions (derived from cubics).
+
+ SkBlendMode is ignored if colors is null. Otherwise, it combines
+ - the SkShader if SkPaint contains SkShader
+ - or the opaque SkPaint color if SkPaint does not contain SkShader
+ as the src of the blend and the interpolated patch colors as the dst.
+
+ SkMaskFilter, SkPathEffect, and antialiasing on SkPaint are ignored.
+
+ @param cubics SkPath cubic array, sharing common points
+ @param colors color array, one for each corner
+ @param texCoords SkPoint array of texture coordinates, mapping SkShader to corners;
+ may be nullptr
+ @param mode combines patch's colors with SkShader if present or SkPaint opaque color
+ if not. Ignored if colors is null.
+ @param paint SkShader, SkColorFilter, SkBlendMode, used to draw
+ */
+ void drawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode mode, const SkPaint& paint);
+
+ /** Draws a set of sprites from atlas, using clip, SkMatrix, and optional SkPaint paint.
+ paint uses anti-alias, alpha, SkColorFilter, SkImageFilter, and SkBlendMode
+ to draw, if present. For each entry in the array, SkRect tex locates sprite in
+ atlas, and SkRSXform xform transforms it into destination space.
+
+ SkMaskFilter and SkPathEffect on paint are ignored.
+
+ xform, tex, and colors if present, must contain count entries.
+ Optional colors are applied for each sprite using SkBlendMode mode, treating
+ sprite as source and colors as destination.
+ Optional cullRect is a conservative bounds of all transformed sprites.
+ If cullRect is outside of clip, canvas can skip drawing.
+
+ If atlas is nullptr, this draws nothing.
+
+ @param atlas SkImage containing sprites
+ @param xform SkRSXform mappings for sprites in atlas
+ @param tex SkRect locations of sprites in atlas
+ @param colors one per sprite, blended with sprite using SkBlendMode; may be nullptr
+ @param count number of sprites to draw
+ @param mode SkBlendMode combining colors and sprites
+ @param sampling SkSamplingOptions used when sampling from the atlas image
+ @param cullRect bounds of transformed sprites for efficient clipping; may be nullptr
+ @param paint SkColorFilter, SkImageFilter, SkBlendMode, and so on; may be nullptr
+ */
+ void drawAtlas(const SkImage* atlas, const SkRSXform xform[], const SkRect tex[],
+ const SkColor colors[], int count, SkBlendMode mode,
+ const SkSamplingOptions& sampling, const SkRect* cullRect, const SkPaint* paint);
+
+ /** Draws SkDrawable drawable using clip and SkMatrix, concatenated with
+ optional matrix.
+
+ If SkCanvas has an asynchronous implementation, as is the case
+ when it is recording into SkPicture, then drawable will be referenced,
+ so that SkDrawable::draw() can be called when the operation is finalized. To force
+ immediate drawing, call SkDrawable::draw() instead.
+
+ @param drawable custom struct encapsulating drawing commands
+ @param matrix transformation applied to drawing; may be nullptr
+
+ example: https://fiddle.skia.org/c/@Canvas_drawDrawable
+ */
+ void drawDrawable(SkDrawable* drawable, const SkMatrix* matrix = nullptr);
+
+ /** Draws SkDrawable drawable using clip and SkMatrix, offset by (x, y).
+
+ If SkCanvas has an asynchronous implementation, as is the case
+ when it is recording into SkPicture, then drawable will be referenced,
+ so that SkDrawable::draw() can be called when the operation is finalized. To force
+ immediate drawing, call SkDrawable::draw() instead.
+
+ @param drawable custom struct encapsulating drawing commands
+ @param x offset into SkCanvas writable pixels on x-axis
+ @param y offset into SkCanvas writable pixels on y-axis
+
+ example: https://fiddle.skia.org/c/@Canvas_drawDrawable_2
+ */
+ void drawDrawable(SkDrawable* drawable, SkScalar x, SkScalar y);
+
+ /** Associates SkRect on SkCanvas with an annotation; a key-value pair, where the key is
+ a null-terminated UTF-8 string, and optional value is stored as SkData.
+
+ Only some canvas implementations, such as recording to SkPicture, or drawing to
+ document PDF, use annotations.
+
+ @param rect SkRect extent of canvas to annotate
+ @param key string used for lookup
+ @param value data holding value stored in annotation
+
+ example: https://fiddle.skia.org/c/@Canvas_drawAnnotation_2
+ */
+ void drawAnnotation(const SkRect& rect, const char key[], SkData* value);
+
+ /** Associates SkRect on SkCanvas when an annotation; a key-value pair, where the key is
+ a null-terminated UTF-8 string, and optional value is stored as SkData.
+
+ Only some canvas implementations, such as recording to SkPicture, or drawing to
+ document PDF, use annotations.
+
+ @param rect SkRect extent of canvas to annotate
+ @param key string used for lookup
+ @param value data holding value stored in annotation
+ */
+ void drawAnnotation(const SkRect& rect, const char key[], const sk_sp<SkData>& value) {
+ this->drawAnnotation(rect, key, value.get());
+ }
+
+ /** Returns true if clip is empty; that is, nothing will draw.
+
+ May do work when called; it should not be called
+ more often than needed. However, once called, subsequent calls perform no
+ work until clip changes.
+
+ @return true if clip is empty
+
+ example: https://fiddle.skia.org/c/@Canvas_isClipEmpty
+ */
+ virtual bool isClipEmpty() const;
+
+ /** Returns true if clip is SkRect and not empty.
+ Returns false if the clip is empty, or if it is not SkRect.
+
+ @return true if clip is SkRect and not empty
+
+ example: https://fiddle.skia.org/c/@Canvas_isClipRect
+ */
+ virtual bool isClipRect() const;
+
+ /** Returns the current transform from local coordinates to the 'device', which for most
+ * purposes means pixels.
+ *
+ * @return transformation from local coordinates to device / pixels.
+ */
+ SkM44 getLocalToDevice() const;
+
+ /**
+ * Throws away the 3rd row and column in the matrix, so be warned.
+ */
+ SkMatrix getLocalToDeviceAs3x3() const {
+ return this->getLocalToDevice().asM33();
+ }
+
+#ifdef SK_SUPPORT_LEGACY_GETTOTALMATRIX
+ /** DEPRECATED
+ * Legacy version of getLocalToDevice(), which strips away any Z information, and
+ * just returns a 3x3 version.
+ *
+ * @return 3x3 version of getLocalToDevice()
+ *
+ * example: https://fiddle.skia.org/c/@Canvas_getTotalMatrix
+ * example: https://fiddle.skia.org/c/@Clip
+ */
+ SkMatrix getTotalMatrix() const;
+#endif
+
+ ///////////////////////////////////////////////////////////////////////////
+
+#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) && defined(SK_GANESH)
+ // These methods exist to support WebView in Android Framework.
+ SkIRect topLayerBounds() const;
+ GrBackendRenderTarget topLayerBackendRenderTarget() const;
+#endif
+
+ /**
+ * Returns the global clip as a region. If the clip contains AA, then only the bounds
+ * of the clip may be returned.
+ */
+ void temporary_internal_getRgnClip(SkRegion* region);
+
+ void private_draw_shadow_rec(const SkPath&, const SkDrawShadowRec&);
+
+
+protected:
+ // default impl defers to getDevice()->newSurface(info)
+ virtual sk_sp<SkSurface> onNewSurface(const SkImageInfo& info, const SkSurfaceProps& props);
+
+ // default impl defers to its device
+ virtual bool onPeekPixels(SkPixmap* pixmap);
+ virtual bool onAccessTopLayerPixels(SkPixmap* pixmap);
+ virtual SkImageInfo onImageInfo() const;
+ virtual bool onGetProps(SkSurfaceProps* props, bool top) const;
+ virtual void onFlush();
+
+ // Subclass save/restore notifiers.
+ // Overriders should call the corresponding INHERITED method up the inheritance chain.
+ // getSaveLayerStrategy()'s return value may suppress full layer allocation.
+ enum SaveLayerStrategy {
+ kFullLayer_SaveLayerStrategy,
+ kNoLayer_SaveLayerStrategy,
+ };
+
+ virtual void willSave() {}
+ // Overriders should call the corresponding INHERITED method up the inheritance chain.
+ virtual SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec& ) {
+ return kFullLayer_SaveLayerStrategy;
+ }
+
+ // returns true if we should actually perform the saveBehind, or false if we should just save.
+ virtual bool onDoSaveBehind(const SkRect*) { return true; }
+ virtual void willRestore() {}
+ virtual void didRestore() {}
+
+ virtual void didConcat44(const SkM44&) {}
+ virtual void didSetM44(const SkM44&) {}
+ virtual void didTranslate(SkScalar, SkScalar) {}
+ virtual void didScale(SkScalar, SkScalar) {}
+
+ // NOTE: If you are adding a new onDraw virtual to SkCanvas, PLEASE add an override to
+ // SkCanvasVirtualEnforcer (in SkCanvasVirtualEnforcer.h). This ensures that subclasses using
+ // that mechanism will be required to implement the new function.
+ virtual void onDrawPaint(const SkPaint& paint);
+ virtual void onDrawBehind(const SkPaint& paint);
+ virtual void onDrawRect(const SkRect& rect, const SkPaint& paint);
+ virtual void onDrawRRect(const SkRRect& rrect, const SkPaint& paint);
+ virtual void onDrawDRRect(const SkRRect& outer, const SkRRect& inner, const SkPaint& paint);
+ virtual void onDrawOval(const SkRect& rect, const SkPaint& paint);
+ virtual void onDrawArc(const SkRect& rect, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint);
+ virtual void onDrawPath(const SkPath& path, const SkPaint& paint);
+ virtual void onDrawRegion(const SkRegion& region, const SkPaint& paint);
+
+ virtual void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint);
+
+ virtual void onDrawGlyphRunList(const sktext::GlyphRunList& glyphRunList, const SkPaint& paint);
+
+ virtual void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode mode, const SkPaint& paint);
+ virtual void onDrawPoints(PointMode mode, size_t count, const SkPoint pts[],
+ const SkPaint& paint);
+
+ virtual void onDrawImage2(const SkImage*, SkScalar dx, SkScalar dy, const SkSamplingOptions&,
+ const SkPaint*);
+ virtual void onDrawImageRect2(const SkImage*, const SkRect& src, const SkRect& dst,
+ const SkSamplingOptions&, const SkPaint*, SrcRectConstraint);
+ virtual void onDrawImageLattice2(const SkImage*, const Lattice&, const SkRect& dst,
+ SkFilterMode, const SkPaint*);
+ virtual void onDrawAtlas2(const SkImage*, const SkRSXform[], const SkRect src[],
+ const SkColor[], int count, SkBlendMode, const SkSamplingOptions&,
+ const SkRect* cull, const SkPaint*);
+ virtual void onDrawEdgeAAImageSet2(const ImageSetEntry imageSet[], int count,
+ const SkPoint dstClips[], const SkMatrix preViewMatrices[],
+ const SkSamplingOptions&, const SkPaint*,
+ SrcRectConstraint);
+
+ virtual void onDrawVerticesObject(const SkVertices* vertices, SkBlendMode mode,
+ const SkPaint& paint);
+#ifdef SK_ENABLE_SKSL
+ virtual void onDrawMesh(const SkMesh&, sk_sp<SkBlender>, const SkPaint&);
+#endif
+ virtual void onDrawAnnotation(const SkRect& rect, const char key[], SkData* value);
+ virtual void onDrawShadowRec(const SkPath&, const SkDrawShadowRec&);
+
+ virtual void onDrawDrawable(SkDrawable* drawable, const SkMatrix* matrix);
+ virtual void onDrawPicture(const SkPicture* picture, const SkMatrix* matrix,
+ const SkPaint* paint);
+
+ virtual void onDrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4], QuadAAFlags aaFlags,
+ const SkColor4f& color, SkBlendMode mode);
+
+ enum ClipEdgeStyle {
+ kHard_ClipEdgeStyle,
+ kSoft_ClipEdgeStyle
+ };
+
+ virtual void onClipRect(const SkRect& rect, SkClipOp op, ClipEdgeStyle edgeStyle);
+ virtual void onClipRRect(const SkRRect& rrect, SkClipOp op, ClipEdgeStyle edgeStyle);
+ virtual void onClipPath(const SkPath& path, SkClipOp op, ClipEdgeStyle edgeStyle);
+ virtual void onClipShader(sk_sp<SkShader>, SkClipOp);
+ virtual void onClipRegion(const SkRegion& deviceRgn, SkClipOp op);
+ virtual void onResetClip();
+
+ virtual void onDiscard();
+
+#if (defined(SK_GANESH) || defined(SK_GRAPHITE))
+ /** Experimental
+ */
+ virtual sk_sp<sktext::gpu::Slug> onConvertGlyphRunListToSlug(
+ const sktext::GlyphRunList& glyphRunList, const SkPaint& paint);
+
+ /** Experimental
+ */
+ virtual void onDrawSlug(const sktext::gpu::Slug* slug);
+#endif
+
+private:
+
+ enum ShaderOverrideOpacity {
+ kNone_ShaderOverrideOpacity, //!< there is no overriding shader (bitmap or image)
+ kOpaque_ShaderOverrideOpacity, //!< the overriding shader is opaque
+ kNotOpaque_ShaderOverrideOpacity, //!< the overriding shader may not be opaque
+ };
+
+ // notify our surface (if we have one) that we are about to draw, so it
+ // can perform copy-on-write or invalidate any cached images
+ // returns false if the copy failed
+ bool SK_WARN_UNUSED_RESULT predrawNotify(bool willOverwritesEntireSurface = false);
+ bool SK_WARN_UNUSED_RESULT predrawNotify(const SkRect*, const SkPaint*, ShaderOverrideOpacity);
+
+ enum class CheckForOverwrite : bool {
+ kNo = false,
+ kYes = true
+ };
+ // call the appropriate predrawNotify and create a layer if needed.
+ std::optional<AutoLayerForImageFilter> aboutToDraw(
+ SkCanvas* canvas,
+ const SkPaint& paint,
+ const SkRect* rawBounds = nullptr,
+ CheckForOverwrite = CheckForOverwrite::kNo,
+ ShaderOverrideOpacity = kNone_ShaderOverrideOpacity);
+
+ // The bottom-most device in the stack, only changed by init(). Image properties and the final
+ // canvas pixels are determined by this device.
+ SkBaseDevice* baseDevice() const {
+ SkASSERT(fBaseDevice);
+ return fBaseDevice.get();
+ }
+
+ // The top-most device in the stack, will change within saveLayer()'s. All drawing and clipping
+ // operations should route to this device.
+ SkBaseDevice* topDevice() const;
+
+ // Canvases maintain a sparse stack of layers, where the top-most layer receives the drawing,
+ // clip, and matrix commands. There is a layer per call to saveLayer() using the
+ // kFullLayer_SaveLayerStrategy.
+ struct Layer {
+ sk_sp<SkBaseDevice> fDevice;
+ sk_sp<SkImageFilter> fImageFilter; // applied to layer *before* being drawn by paint
+ SkPaint fPaint;
+ bool fDiscard;
+
+ Layer(sk_sp<SkBaseDevice> device, sk_sp<SkImageFilter> imageFilter, const SkPaint& paint);
+ };
+
+ // Encapsulate state needed to restore from saveBehind()
+ struct BackImage {
+ // Out of line to avoid including SkSpecialImage.h
+ BackImage(sk_sp<SkSpecialImage>, SkIPoint);
+ BackImage(const BackImage&);
+ BackImage(BackImage&&);
+ BackImage& operator=(const BackImage&);
+ ~BackImage();
+
+ sk_sp<SkSpecialImage> fImage;
+ SkIPoint fLoc;
+ };
+
+ class MCRec {
+ public:
+ // If not null, this MCRec corresponds with the saveLayer() record that made the layer.
+ // The base "layer" is not stored here, since it is stored inline in SkCanvas and has no
+ // restoration behavior.
+ std::unique_ptr<Layer> fLayer;
+
+ // This points to the device of the top-most layer (which may be lower in the stack), or
+ // to the canvas's fBaseDevice. The MCRec does not own the device.
+ SkBaseDevice* fDevice;
+
+ std::unique_ptr<BackImage> fBackImage;
+ SkM44 fMatrix;
+ int fDeferredSaveCount = 0;
+
+ MCRec(SkBaseDevice* device);
+ MCRec(const MCRec* prev);
+ ~MCRec();
+
+ void newLayer(sk_sp<SkBaseDevice> layerDevice,
+ sk_sp<SkImageFilter> filter,
+ const SkPaint& restorePaint);
+
+ void reset(SkBaseDevice* device);
+ };
+
+ // the first N recs that can fit here mean we won't call malloc
+ static constexpr int kMCRecSize = 96; // most recent measurement
+ static constexpr int kMCRecCount = 32; // common depth for save/restores
+
+ intptr_t fMCRecStorage[kMCRecSize * kMCRecCount / sizeof(intptr_t)];
+
+ SkDeque fMCStack;
+ // points to top of stack
+ MCRec* fMCRec;
+
+ // Installed via init()
+ sk_sp<SkBaseDevice> fBaseDevice;
+ const SkSurfaceProps fProps;
+
+ int fSaveCount; // value returned by getSaveCount()
+
+ std::unique_ptr<SkRasterHandleAllocator> fAllocator;
+
+ SkSurface_Base* fSurfaceBase;
+ SkSurface_Base* getSurfaceBase() const { return fSurfaceBase; }
+ void setSurfaceBase(SkSurface_Base* sb) {
+ fSurfaceBase = sb;
+ }
+ friend class SkSurface_Base;
+ friend class SkSurface_Gpu;
+
+ SkIRect fClipRestrictionRect = SkIRect::MakeEmpty();
+ int fClipRestrictionSaveCount = -1;
+
+ void doSave();
+ void checkForDeferredSave();
+ void internalSetMatrix(const SkM44&);
+
+ friend class SkAndroidFrameworkUtils;
+ friend class SkCanvasPriv; // needs to expose android functions for testing outside android
+ friend class AutoLayerForImageFilter;
+ friend class SkSurface_Raster; // needs getDevice()
+ friend class SkNoDrawCanvas; // needs resetForNextPicture()
+ friend class SkNWayCanvas;
+ friend class SkPictureRecord; // predrawNotify (why does it need it? <reed>)
+ friend class SkOverdrawCanvas;
+ friend class SkRasterHandleAllocator;
+ friend class SkRecords::Draw;
+ template <typename Key>
+ friend class SkTestCanvas;
+
+protected:
+ // For use by SkNoDrawCanvas (via SkCanvasVirtualEnforcer, which can't be a friend)
+ SkCanvas(const SkIRect& bounds);
+private:
+ SkCanvas(const SkBitmap&, std::unique_ptr<SkRasterHandleAllocator>,
+ SkRasterHandleAllocator::Handle, const SkSurfaceProps* props);
+
+ SkCanvas(SkCanvas&&) = delete;
+ SkCanvas(const SkCanvas&) = delete;
+ SkCanvas& operator=(SkCanvas&&) = delete;
+ SkCanvas& operator=(const SkCanvas&) = delete;
+
+#if (defined(SK_GANESH) || defined(SK_GRAPHITE))
+ friend class sktext::gpu::Slug;
+ /** Experimental
+ * Convert a SkTextBlob to a sktext::gpu::Slug using the current canvas state.
+ */
+ sk_sp<sktext::gpu::Slug> convertBlobToSlug(const SkTextBlob& blob, SkPoint origin,
+ const SkPaint& paint);
+
+ /** Experimental
+ * Draw an sktext::gpu::Slug given the current canvas state.
+ */
+ void drawSlug(const sktext::gpu::Slug* slug);
+#endif
+
+ /** Experimental
+ * Saves the specified subset of the current pixels in the current layer,
+ * and then clears those pixels to transparent black.
+ * Restores the pixels on restore() by drawing them in SkBlendMode::kDstOver.
+ *
+ * @param subset conservative bounds of the area to be saved / restored.
+ * @return depth of save state stack before this call was made.
+ */
+ int only_axis_aligned_saveBehind(const SkRect* subset);
+
+ /**
+ * Like drawPaint, but magically clipped to the most recent saveBehind buffer rectangle.
+ * If there is no active saveBehind, then this draws nothing.
+ */
+ void drawClippedToSaveBehind(const SkPaint&);
+
+ void resetForNextPicture(const SkIRect& bounds);
+
+ // needs gettotalclip()
+ friend class SkCanvasStateUtils;
+
+ void init(sk_sp<SkBaseDevice>);
+
+ // All base onDrawX() functions should call this and skip drawing if it returns true.
+ // If 'matrix' is non-null, it maps the paint's fast bounds before checking for quick rejection
+ bool internalQuickReject(const SkRect& bounds, const SkPaint& paint,
+ const SkMatrix* matrix = nullptr);
+
+ void internalDrawPaint(const SkPaint& paint);
+ void internalSaveLayer(const SaveLayerRec&, SaveLayerStrategy);
+ void internalSaveBehind(const SkRect*);
+
+ void internalConcat44(const SkM44&);
+
+ // shared by save() and saveLayer()
+ void internalSave();
+ void internalRestore();
+
+ enum class DeviceCompatibleWithFilter : bool {
+ // Check the src device's local-to-device matrix for compatibility with the filter, and if
+ // it is not compatible, introduce an intermediate image and transformation that allows the
+ // filter to be evaluated on the modified src content.
+ kUnknown = false,
+ // Assume that the src device's local-to-device matrix is compatible with the filter.
+ kYes = true
+ };
+ /**
+ * Filters the contents of 'src' and draws the result into 'dst'. The filter is evaluated
+ * relative to the current canvas matrix, and src is drawn to dst using their relative transform
+ * 'paint' is applied after the filter and must not have a mask or image filter of its own.
+ * A null 'filter' behaves as if the identity filter were used.
+ *
+ * 'scaleFactor' is an extra uniform scale transform applied to downscale the 'src' image
+ * before any filtering, or as part of the copy, and is then drawn with 1/scaleFactor to 'dst'.
+ * Must be 1.0 if 'compat' is kYes (i.e. any scale factor has already been baked into the
+ * relative transforms between the devices).
+ */
+ void internalDrawDeviceWithFilter(SkBaseDevice* src, SkBaseDevice* dst,
+ const SkImageFilter* filter, const SkPaint& paint,
+ DeviceCompatibleWithFilter compat,
+ SkScalar scaleFactor = 1.f);
+
+ /*
+ * Returns true if drawing the specified rect (or all if it is null) with the specified
+ * paint (or default if null) would overwrite the entire root device of the canvas
+ * (i.e. the canvas' surface if it had one).
+ */
+ bool wouldOverwriteEntireSurface(const SkRect*, const SkPaint*, ShaderOverrideOpacity) const;
+
+ /**
+ * Returns true if the paint's imagefilter can be invoked directly, without needed a layer.
+ */
+ bool canDrawBitmapAsSprite(SkScalar x, SkScalar y, int w, int h, const SkSamplingOptions&,
+ const SkPaint&);
+
+ /**
+ * Returns true if the clip (for any active layer) contains antialiasing.
+ * If the clip is empty, this will return false.
+ */
+ bool androidFramework_isClipAA() const;
+
+ /**
+ * Reset the clip to be wide-open (modulo any separately specified device clip restriction).
+ * This operate within the save/restore clip stack so it can be undone by restoring to an
+ * earlier save point.
+ */
+ void internal_private_resetClip();
+
+ virtual SkPaintFilterCanvas* internal_private_asPaintFilterCanvas() const { return nullptr; }
+
+ // Keep track of the device clip bounds in the canvas' global space to reject draws before
+ // invoking the top-level device.
+ SkRect fQuickRejectBounds;
+
+ // Compute the clip's bounds based on all clipped SkDevice's reported device bounds transformed
+ // into the canvas' global space.
+ SkRect computeDeviceClipBounds(bool outsetForAA=true) const;
+
+ class AutoUpdateQRBounds;
+ void validateClip() const;
+
+ std::unique_ptr<sktext::GlyphRunBuilder> fScratchGlyphRunBuilder;
+
+ using INHERITED = SkRefCnt;
+};
+
+/** \class SkAutoCanvasRestore
+ Stack helper class calls SkCanvas::restoreToCount when SkAutoCanvasRestore
+ goes out of scope. Use this to guarantee that the canvas is restored to a known
+ state.
+*/
+class SkAutoCanvasRestore {
+public:
+
+ /** Preserves SkCanvas::save() count. Optionally saves SkCanvas clip and SkCanvas matrix.
+
+ @param canvas SkCanvas to guard
+ @param doSave call SkCanvas::save()
+ @return utility to restore SkCanvas state on destructor
+ */
+ SkAutoCanvasRestore(SkCanvas* canvas, bool doSave) : fCanvas(canvas), fSaveCount(0) {
+ if (fCanvas) {
+ fSaveCount = canvas->getSaveCount();
+ if (doSave) {
+ canvas->save();
+ }
+ }
+ }
+
+ /** Restores SkCanvas to saved state. Destructor is called when container goes out of
+ scope.
+ */
+ ~SkAutoCanvasRestore() {
+ if (fCanvas) {
+ fCanvas->restoreToCount(fSaveCount);
+ }
+ }
+
+ /** Restores SkCanvas to saved state immediately. Subsequent calls and
+ ~SkAutoCanvasRestore() have no effect.
+ */
+ void restore() {
+ if (fCanvas) {
+ fCanvas->restoreToCount(fSaveCount);
+ fCanvas = nullptr;
+ }
+ }
+
+private:
+ SkCanvas* fCanvas;
+ int fSaveCount;
+
+ SkAutoCanvasRestore(SkAutoCanvasRestore&&) = delete;
+ SkAutoCanvasRestore(const SkAutoCanvasRestore&) = delete;
+ SkAutoCanvasRestore& operator=(SkAutoCanvasRestore&&) = delete;
+ SkAutoCanvasRestore& operator=(const SkAutoCanvasRestore&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkCanvasVirtualEnforcer.h b/gfx/skia/skia/include/core/SkCanvasVirtualEnforcer.h
new file mode 100644
index 0000000000..5086b4337d
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkCanvasVirtualEnforcer.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCanvasVirtualEnforcer_DEFINED
+#define SkCanvasVirtualEnforcer_DEFINED
+
+#include "include/core/SkCanvas.h"
+
+// If you would ordinarily want to inherit from Base (eg SkCanvas, SkNWayCanvas), instead
+// inherit from SkCanvasVirtualEnforcer<Base>, which will make the build fail if you forget
+// to override one of SkCanvas' key virtual hooks.
+template <typename Base>
+class SkCanvasVirtualEnforcer : public Base {
+public:
+ using Base::Base;
+
+protected:
+ void onDrawPaint(const SkPaint& paint) override = 0;
+ void onDrawBehind(const SkPaint&) override {} // make zero after android updates
+ void onDrawRect(const SkRect& rect, const SkPaint& paint) override = 0;
+ void onDrawRRect(const SkRRect& rrect, const SkPaint& paint) override = 0;
+ void onDrawDRRect(const SkRRect& outer, const SkRRect& inner,
+ const SkPaint& paint) override = 0;
+ void onDrawOval(const SkRect& rect, const SkPaint& paint) override = 0;
+ void onDrawArc(const SkRect& rect, SkScalar startAngle, SkScalar sweepAngle, bool useCenter,
+ const SkPaint& paint) override = 0;
+ void onDrawPath(const SkPath& path, const SkPaint& paint) override = 0;
+ void onDrawRegion(const SkRegion& region, const SkPaint& paint) override = 0;
+
+ void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) override = 0;
+
+ void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode mode,
+ const SkPaint& paint) override = 0;
+ void onDrawPoints(SkCanvas::PointMode mode, size_t count, const SkPoint pts[],
+ const SkPaint& paint) override = 0;
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ // This is under active development for Chrome and not used in Android. Hold off on adding
+ // implementations in Android's SkCanvas subclasses until this stabilizes.
+ void onDrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4],
+ SkCanvas::QuadAAFlags aaFlags, const SkColor4f& color, SkBlendMode mode) override {}
+#else
+ void onDrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4],
+ SkCanvas::QuadAAFlags aaFlags, const SkColor4f& color, SkBlendMode mode) override = 0;
+#endif
+
+ void onDrawAnnotation(const SkRect& rect, const char key[], SkData* value) override = 0;
+ void onDrawShadowRec(const SkPath&, const SkDrawShadowRec&) override = 0;
+
+ void onDrawDrawable(SkDrawable* drawable, const SkMatrix* matrix) override = 0;
+ void onDrawPicture(const SkPicture* picture, const SkMatrix* matrix,
+ const SkPaint* paint) override = 0;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkCapabilities.h b/gfx/skia/skia/include/core/SkCapabilities.h
new file mode 100644
index 0000000000..214b5138f0
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkCapabilities.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCapabilities_DEFINED
+#define SkCapabilities_DEFINED
+
+#include "include/core/SkRefCnt.h"
+
+#ifdef SK_ENABLE_SKSL
+#include "include/sksl/SkSLVersion.h"
+namespace SkSL { struct ShaderCaps; }
+#endif
+
+#if defined(SK_GRAPHITE)
+namespace skgpu::graphite { class Caps; }
+#endif
+
+class SK_API SkCapabilities : public SkRefCnt {
+public:
+ static sk_sp<const SkCapabilities> RasterBackend();
+
+#ifdef SK_ENABLE_SKSL
+ SkSL::Version skslVersion() const { return fSkSLVersion; }
+#endif
+
+protected:
+#if defined(SK_GRAPHITE)
+ friend class skgpu::graphite::Caps; // for ctor
+#endif
+
+ SkCapabilities() = default;
+
+#ifdef SK_ENABLE_SKSL
+ void initSkCaps(const SkSL::ShaderCaps*);
+
+ SkSL::Version fSkSLVersion = SkSL::Version::k100;
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkClipOp.h b/gfx/skia/skia/include/core/SkClipOp.h
new file mode 100644
index 0000000000..3da6c61131
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkClipOp.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkClipOp_DEFINED
+#define SkClipOp_DEFINED
+
+#include "include/core/SkTypes.h"
+
+enum class SkClipOp {
+ kDifference = 0,
+ kIntersect = 1,
+ kMax_EnumValue = kIntersect
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkColor.h b/gfx/skia/skia/include/core/SkColor.h
new file mode 100644
index 0000000000..3b46be030f
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkColor.h
@@ -0,0 +1,447 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColor_DEFINED
+#define SkColor_DEFINED
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkCPUTypes.h"
+
+#include <array>
+#include <cstdint>
+
+/** \file SkColor.h
+
+ Types, consts, functions, and macros for colors.
+*/
+
+/** 8-bit type for an alpha value. 255 is 100% opaque, zero is 100% transparent.
+*/
+typedef uint8_t SkAlpha;
+
+/** 32-bit ARGB color value, unpremultiplied. Color components are always in
+ a known order. This is different from SkPMColor, which has its bytes in a configuration
+ dependent order, to match the format of kBGRA_8888_SkColorType bitmaps. SkColor
+ is the type used to specify colors in SkPaint and in gradients.
+
+ Color that is premultiplied has the same component values as color
+ that is unpremultiplied if alpha is 255, fully opaque, although may have the
+ component values in a different order.
+*/
+typedef uint32_t SkColor;
+
+/** Returns color value from 8-bit component values. Asserts if SK_DEBUG is defined
+ if a, r, g, or b exceed 255. Since color is unpremultiplied, a may be smaller
+ than the largest of r, g, and b.
+
+ @param a amount of alpha, from fully transparent (0) to fully opaque (255)
+ @param r amount of red, from no red (0) to full red (255)
+ @param g amount of green, from no green (0) to full green (255)
+ @param b amount of blue, from no blue (0) to full blue (255)
+ @return color and alpha, unpremultiplied
+*/
+static constexpr inline SkColor SkColorSetARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ return SkASSERT(a <= 255 && r <= 255 && g <= 255 && b <= 255),
+ (a << 24) | (r << 16) | (g << 8) | (b << 0);
+}
+
+/** Returns color value from 8-bit component values, with alpha set
+ fully opaque to 255.
+*/
+#define SkColorSetRGB(r, g, b) SkColorSetARGB(0xFF, r, g, b)
+
+/** Returns alpha byte from color value.
+*/
+#define SkColorGetA(color) (((color) >> 24) & 0xFF)
+
+/** Returns red component of color, from zero to 255.
+*/
+#define SkColorGetR(color) (((color) >> 16) & 0xFF)
+
+/** Returns green component of color, from zero to 255.
+*/
+#define SkColorGetG(color) (((color) >> 8) & 0xFF)
+
+/** Returns blue component of color, from zero to 255.
+*/
+#define SkColorGetB(color) (((color) >> 0) & 0xFF)
+
+/** Returns unpremultiplied color with red, blue, and green set from c; and alpha set
+ from a. Alpha component of c is ignored and is replaced by a in result.
+
+ @param c packed RGB, eight bits per component
+ @param a alpha: transparent at zero, fully opaque at 255
+ @return color with transparency
+*/
+static constexpr inline SkColor SK_WARN_UNUSED_RESULT SkColorSetA(SkColor c, U8CPU a) {
+ return (c & 0x00FFFFFF) | (a << 24);
+}
+
+/** Represents fully transparent SkAlpha value. SkAlpha ranges from zero,
+ fully transparent; to 255, fully opaque.
+*/
+constexpr SkAlpha SK_AlphaTRANSPARENT = 0x00;
+
+/** Represents fully opaque SkAlpha value. SkAlpha ranges from zero,
+ fully transparent; to 255, fully opaque.
+*/
+constexpr SkAlpha SK_AlphaOPAQUE = 0xFF;
+
+/** Represents fully transparent SkColor. May be used to initialize a destination
+ containing a mask or a non-rectangular image.
+*/
+constexpr SkColor SK_ColorTRANSPARENT = SkColorSetARGB(0x00, 0x00, 0x00, 0x00);
+
+/** Represents fully opaque black.
+*/
+constexpr SkColor SK_ColorBLACK = SkColorSetARGB(0xFF, 0x00, 0x00, 0x00);
+
+/** Represents fully opaque dark gray.
+ Note that SVG dark gray is equivalent to 0xFFA9A9A9.
+*/
+constexpr SkColor SK_ColorDKGRAY = SkColorSetARGB(0xFF, 0x44, 0x44, 0x44);
+
+/** Represents fully opaque gray.
+ Note that HTML gray is equivalent to 0xFF808080.
+*/
+constexpr SkColor SK_ColorGRAY = SkColorSetARGB(0xFF, 0x88, 0x88, 0x88);
+
+/** Represents fully opaque light gray. HTML silver is equivalent to 0xFFC0C0C0.
+ Note that SVG light gray is equivalent to 0xFFD3D3D3.
+*/
+constexpr SkColor SK_ColorLTGRAY = SkColorSetARGB(0xFF, 0xCC, 0xCC, 0xCC);
+
+/** Represents fully opaque white.
+*/
+constexpr SkColor SK_ColorWHITE = SkColorSetARGB(0xFF, 0xFF, 0xFF, 0xFF);
+
+/** Represents fully opaque red.
+*/
+constexpr SkColor SK_ColorRED = SkColorSetARGB(0xFF, 0xFF, 0x00, 0x00);
+
+/** Represents fully opaque green. HTML lime is equivalent.
+ Note that HTML green is equivalent to 0xFF008000.
+*/
+constexpr SkColor SK_ColorGREEN = SkColorSetARGB(0xFF, 0x00, 0xFF, 0x00);
+
+/** Represents fully opaque blue.
+*/
+constexpr SkColor SK_ColorBLUE = SkColorSetARGB(0xFF, 0x00, 0x00, 0xFF);
+
+/** Represents fully opaque yellow.
+*/
+constexpr SkColor SK_ColorYELLOW = SkColorSetARGB(0xFF, 0xFF, 0xFF, 0x00);
+
+/** Represents fully opaque cyan. HTML aqua is equivalent.
+*/
+constexpr SkColor SK_ColorCYAN = SkColorSetARGB(0xFF, 0x00, 0xFF, 0xFF);
+
+/** Represents fully opaque magenta. HTML fuchsia is equivalent.
+*/
+constexpr SkColor SK_ColorMAGENTA = SkColorSetARGB(0xFF, 0xFF, 0x00, 0xFF);
+
+/** Converts RGB to its HSV components.
+ hsv[0] contains hsv hue, a value from zero to less than 360.
+ hsv[1] contains hsv saturation, a value from zero to one.
+ hsv[2] contains hsv value, a value from zero to one.
+
+ @param red red component value from zero to 255
+ @param green green component value from zero to 255
+ @param blue blue component value from zero to 255
+ @param hsv three element array which holds the resulting HSV components
+*/
+SK_API void SkRGBToHSV(U8CPU red, U8CPU green, U8CPU blue, SkScalar hsv[3]);
+
+/** Converts ARGB to its HSV components. Alpha in ARGB is ignored.
+ hsv[0] contains hsv hue, and is assigned a value from zero to less than 360.
+ hsv[1] contains hsv saturation, a value from zero to one.
+ hsv[2] contains hsv value, a value from zero to one.
+
+ @param color ARGB color to convert
+ @param hsv three element array which holds the resulting HSV components
+*/
+static inline void SkColorToHSV(SkColor color, SkScalar hsv[3]) {
+ SkRGBToHSV(SkColorGetR(color), SkColorGetG(color), SkColorGetB(color), hsv);
+}
+
+/** Converts HSV components to an ARGB color. Alpha is passed through unchanged.
+ hsv[0] represents hsv hue, an angle from zero to less than 360.
+ hsv[1] represents hsv saturation, and varies from zero to one.
+ hsv[2] represents hsv value, and varies from zero to one.
+
+ Out of range hsv values are pinned.
+
+ @param alpha alpha component of the returned ARGB color
+ @param hsv three element array which holds the input HSV components
+ @return ARGB equivalent to HSV
+*/
+SK_API SkColor SkHSVToColor(U8CPU alpha, const SkScalar hsv[3]);
+
+/** Converts HSV components to an ARGB color. Alpha is set to 255.
+ hsv[0] represents hsv hue, an angle from zero to less than 360.
+ hsv[1] represents hsv saturation, and varies from zero to one.
+ hsv[2] represents hsv value, and varies from zero to one.
+
+ Out of range hsv values are pinned.
+
+ @param hsv three element array which holds the input HSV components
+ @return RGB equivalent to HSV
+*/
+static inline SkColor SkHSVToColor(const SkScalar hsv[3]) {
+ return SkHSVToColor(0xFF, hsv);
+}
+
+/** 32-bit ARGB color value, premultiplied. The byte order for this value is
+ configuration dependent, matching the format of kBGRA_8888_SkColorType bitmaps.
+ This is different from SkColor, which is unpremultiplied, and is always in the
+ same byte order.
+*/
+typedef uint32_t SkPMColor;
+
+/** Returns a SkPMColor value from unpremultiplied 8-bit component values.
+
+ @param a amount of alpha, from fully transparent (0) to fully opaque (255)
+ @param r amount of red, from no red (0) to full red (255)
+ @param g amount of green, from no green (0) to full green (255)
+ @param b amount of blue, from no blue (0) to full blue (255)
+ @return premultiplied color
+*/
+SK_API SkPMColor SkPreMultiplyARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b);
+
+/** Returns pmcolor closest to color c. Multiplies c RGB components by the c alpha,
+ and arranges the bytes to match the format of kN32_SkColorType.
+
+ @param c unpremultiplied ARGB color
+ @return premultiplied color
+*/
+SK_API SkPMColor SkPreMultiplyColor(SkColor c);
+
+/** \enum SkColorChannel
+ Describes different color channels one can manipulate
+*/
+enum class SkColorChannel {
+ kR, // the red channel
+ kG, // the green channel
+ kB, // the blue channel
+ kA, // the alpha channel
+
+ kLastEnum = kA,
+};
+
+/** Used to represent the channels available in a color type or texture format as a mask. */
+enum SkColorChannelFlag : uint32_t {
+ kRed_SkColorChannelFlag = 1 << static_cast<uint32_t>(SkColorChannel::kR),
+ kGreen_SkColorChannelFlag = 1 << static_cast<uint32_t>(SkColorChannel::kG),
+ kBlue_SkColorChannelFlag = 1 << static_cast<uint32_t>(SkColorChannel::kB),
+ kAlpha_SkColorChannelFlag = 1 << static_cast<uint32_t>(SkColorChannel::kA),
+ kGray_SkColorChannelFlag = 0x10,
+ // Convenience values
+ kGrayAlpha_SkColorChannelFlags = kGray_SkColorChannelFlag | kAlpha_SkColorChannelFlag,
+ kRG_SkColorChannelFlags = kRed_SkColorChannelFlag | kGreen_SkColorChannelFlag,
+ kRGB_SkColorChannelFlags = kRG_SkColorChannelFlags | kBlue_SkColorChannelFlag,
+ kRGBA_SkColorChannelFlags = kRGB_SkColorChannelFlags | kAlpha_SkColorChannelFlag,
+};
+static_assert(0 == (kGray_SkColorChannelFlag & kRGBA_SkColorChannelFlags), "bitfield conflict");
+
+/** \struct SkRGBA4f
+ RGBA color value, holding four floating point components. Color components are always in
+ a known order. kAT determines if the SkRGBA4f's R, G, and B components are premultiplied
+ by alpha or not.
+
+ Skia's public API always uses unpremultiplied colors, which can be stored as
+ SkRGBA4f<kUnpremul_SkAlphaType>. For convenience, this type can also be referred to
+ as SkColor4f.
+*/
+template <SkAlphaType kAT>
+struct SkRGBA4f {
+ float fR; //!< red component
+ float fG; //!< green component
+ float fB; //!< blue component
+ float fA; //!< alpha component
+
+ /** Compares SkRGBA4f with other, and returns true if all components are equal.
+
+ @param other SkRGBA4f to compare
+ @return true if SkRGBA4f equals other
+ */
+ bool operator==(const SkRGBA4f& other) const {
+ return fA == other.fA && fR == other.fR && fG == other.fG && fB == other.fB;
+ }
+
+ /** Compares SkRGBA4f with other, and returns true if not all components are equal.
+
+ @param other SkRGBA4f to compare
+ @return true if SkRGBA4f is not equal to other
+ */
+ bool operator!=(const SkRGBA4f& other) const {
+ return !(*this == other);
+ }
+
+ /** Returns SkRGBA4f multiplied by scale.
+
+ @param scale value to multiply by
+ @return SkRGBA4f as (fR * scale, fG * scale, fB * scale, fA * scale)
+ */
+ SkRGBA4f operator*(float scale) const {
+ return { fR * scale, fG * scale, fB * scale, fA * scale };
+ }
+
+ /** Returns SkRGBA4f multiplied component-wise by scale.
+
+ @param scale SkRGBA4f to multiply by
+ @return SkRGBA4f as (fR * scale.fR, fG * scale.fG, fB * scale.fB, fA * scale.fA)
+ */
+ SkRGBA4f operator*(const SkRGBA4f& scale) const {
+ return { fR * scale.fR, fG * scale.fG, fB * scale.fB, fA * scale.fA };
+ }
+
+ /** Returns a pointer to components of SkRGBA4f, for array access.
+
+ @return pointer to array [fR, fG, fB, fA]
+ */
+ const float* vec() const { return &fR; }
+
+ /** Returns a pointer to components of SkRGBA4f, for array access.
+
+ @return pointer to array [fR, fG, fB, fA]
+ */
+ float* vec() { return &fR; }
+
+ /** As a std::array<float, 4> */
+ std::array<float, 4> array() const { return {fR, fG, fB, fA}; }
+
+ /** Returns one component. Asserts if index is out of range and SK_DEBUG is defined.
+
+ @param index one of: 0 (fR), 1 (fG), 2 (fB), 3 (fA)
+ @return value corresponding to index
+ */
+ float operator[](int index) const {
+ SkASSERT(index >= 0 && index < 4);
+ return this->vec()[index];
+ }
+
+ /** Returns one component. Asserts if index is out of range and SK_DEBUG is defined.
+
+ @param index one of: 0 (fR), 1 (fG), 2 (fB), 3 (fA)
+ @return value corresponding to index
+ */
+ float& operator[](int index) {
+ SkASSERT(index >= 0 && index < 4);
+ return this->vec()[index];
+ }
+
+ /** Returns true if SkRGBA4f is an opaque color. Asserts if fA is out of range and
+ SK_DEBUG is defined.
+
+ @return true if SkRGBA4f is opaque
+ */
+ bool isOpaque() const {
+ SkASSERT(fA <= 1.0f && fA >= 0.0f);
+ return fA == 1.0f;
+ }
+
+ /** Returns true if all channels are in [0, 1]. */
+ bool fitsInBytes() const {
+ SkASSERT(fA >= 0.0f && fA <= 1.0f);
+ return fR >= 0.0f && fR <= 1.0f &&
+ fG >= 0.0f && fG <= 1.0f &&
+ fB >= 0.0f && fB <= 1.0f;
+ }
+
+ /** Returns closest SkRGBA4f to SkColor. Only allowed if SkRGBA4f is unpremultiplied.
+
+ @param color Color with Alpha, red, blue, and green components
+ @return SkColor as SkRGBA4f
+
+ example: https://fiddle.skia.org/c/@RGBA4f_FromColor
+ */
+ static SkRGBA4f FromColor(SkColor color); // impl. depends on kAT
+
+ /** Returns closest SkColor to SkRGBA4f. Only allowed if SkRGBA4f is unpremultiplied.
+
+ @return color as SkColor
+
+ example: https://fiddle.skia.org/c/@RGBA4f_toSkColor
+ */
+ SkColor toSkColor() const; // impl. depends on kAT
+
+ /** Returns closest SkRGBA4f to SkPMColor. Only allowed if SkRGBA4f is premultiplied.
+
+ @return SkPMColor as SkRGBA4f
+ */
+ static SkRGBA4f FromPMColor(SkPMColor); // impl. depends on kAT
+
+ /** Returns SkRGBA4f premultiplied by alpha. Asserts at compile time if SkRGBA4f is
+ already premultiplied.
+
+ @return premultiplied color
+ */
+ SkRGBA4f<kPremul_SkAlphaType> premul() const {
+ static_assert(kAT == kUnpremul_SkAlphaType, "");
+ return { fR * fA, fG * fA, fB * fA, fA };
+ }
+
+ /** Returns SkRGBA4f unpremultiplied by alpha. Asserts at compile time if SkRGBA4f is
+ already unpremultiplied.
+
+ @return unpremultiplied color
+ */
+ SkRGBA4f<kUnpremul_SkAlphaType> unpremul() const {
+ static_assert(kAT == kPremul_SkAlphaType, "");
+
+ if (fA == 0.0f) {
+ return { 0, 0, 0, 0 };
+ } else {
+ float invAlpha = 1 / fA;
+ return { fR * invAlpha, fG * invAlpha, fB * invAlpha, fA };
+ }
+ }
+
+ // This produces bytes in RGBA order (eg GrColor). Impl. is the same, regardless of kAT
+ uint32_t toBytes_RGBA() const;
+ static SkRGBA4f FromBytes_RGBA(uint32_t color);
+
+ /**
+ Returns a copy of the SkRGBA4f but with alpha component set to 1.0f.
+
+ @return opaque color
+ */
+ SkRGBA4f makeOpaque() const {
+ return { fR, fG, fB, 1.0f };
+ }
+};
+
+/** \struct SkColor4f
+ RGBA color value, holding four floating point components. Color components are always in
+ a known order, and are unpremultiplied.
+
+ This is a specialization of SkRGBA4f. For details, @see SkRGBA4f.
+*/
+using SkColor4f = SkRGBA4f<kUnpremul_SkAlphaType>;
+
+template <> SK_API SkColor4f SkColor4f::FromColor(SkColor);
+template <> SK_API SkColor SkColor4f::toSkColor() const;
+template <> SK_API uint32_t SkColor4f::toBytes_RGBA() const;
+template <> SK_API SkColor4f SkColor4f::FromBytes_RGBA(uint32_t color);
+
+namespace SkColors {
+constexpr SkColor4f kTransparent = {0, 0, 0, 0};
+constexpr SkColor4f kBlack = {0, 0, 0, 1};
+constexpr SkColor4f kDkGray = {0.25f, 0.25f, 0.25f, 1};
+constexpr SkColor4f kGray = {0.50f, 0.50f, 0.50f, 1};
+constexpr SkColor4f kLtGray = {0.75f, 0.75f, 0.75f, 1};
+constexpr SkColor4f kWhite = {1, 1, 1, 1};
+constexpr SkColor4f kRed = {1, 0, 0, 1};
+constexpr SkColor4f kGreen = {0, 1, 0, 1};
+constexpr SkColor4f kBlue = {0, 0, 1, 1};
+constexpr SkColor4f kYellow = {1, 1, 0, 1};
+constexpr SkColor4f kCyan = {0, 1, 1, 1};
+constexpr SkColor4f kMagenta = {1, 0, 1, 1};
+} // namespace SkColors
+#endif
diff --git a/gfx/skia/skia/include/core/SkColorFilter.h b/gfx/skia/skia/include/core/SkColorFilter.h
new file mode 100644
index 0000000000..1e0f6ea6f5
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkColorFilter.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorFilter_DEFINED
+#define SkColorFilter_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkFlattenable.h"
+
+class SkColorMatrix;
+class SkColorSpace;
+
+/**
+* ColorFilters are optional objects in the drawing pipeline. When present in
+* a paint, they are called with the "src" colors, and return new colors, which
+* are then passed onto the next stage (either ImageFilter or Xfermode).
+*
+* All subclasses are required to be reentrant-safe : it must be legal to share
+* the same instance between several threads.
+*/
+class SK_API SkColorFilter : public SkFlattenable {
+public:
+ /** If the filter can be represented by a source color plus Mode, this
+ * returns true, and sets (if not NULL) the color and mode appropriately.
+ * If not, this returns false and ignores the parameters.
+ */
+ bool asAColorMode(SkColor* color, SkBlendMode* mode) const;
+
+ /** If the filter can be represented by a 5x4 matrix, this
+ * returns true, and sets the matrix appropriately.
+ * If not, this returns false and ignores the parameter.
+ */
+ bool asAColorMatrix(float matrix[20]) const;
+
+ // Returns true if the filter is guaranteed to never change the alpha of a color it filters.
+ bool isAlphaUnchanged() const;
+
+ SkColor filterColor(SkColor) const;
+
+ /**
+ * Converts the src color (in src colorspace), into the dst colorspace,
+ * then applies this filter to it, returning the filtered color in the dst colorspace.
+ */
+ SkColor4f filterColor4f(const SkColor4f& srcColor, SkColorSpace* srcCS,
+ SkColorSpace* dstCS) const;
+
+ /** Construct a colorfilter whose effect is to first apply the inner filter and then apply
+ * this filter, applied to the output of the inner filter.
+ *
+ * result = this(inner(...))
+ */
+ sk_sp<SkColorFilter> makeComposed(sk_sp<SkColorFilter> inner) const;
+
+ static sk_sp<SkColorFilter> Deserialize(const void* data, size_t size,
+ const SkDeserialProcs* procs = nullptr);
+
+private:
+ SkColorFilter() = default;
+ friend class SkColorFilterBase;
+
+ using INHERITED = SkFlattenable;
+};
+
+class SK_API SkColorFilters {
+public:
+ static sk_sp<SkColorFilter> Compose(sk_sp<SkColorFilter> outer, sk_sp<SkColorFilter> inner) {
+ return outer ? outer->makeComposed(inner) : inner;
+ }
+
+ // Blends between the constant color (src) and input color (dst) based on the SkBlendMode.
+ // If the color space is null, the constant color is assumed to be defined in sRGB.
+ static sk_sp<SkColorFilter> Blend(const SkColor4f& c, sk_sp<SkColorSpace>, SkBlendMode mode);
+ static sk_sp<SkColorFilter> Blend(SkColor c, SkBlendMode mode);
+
+ static sk_sp<SkColorFilter> Matrix(const SkColorMatrix&);
+ static sk_sp<SkColorFilter> Matrix(const float rowMajor[20]);
+
+ // A version of Matrix which operates in HSLA space instead of RGBA.
+ // I.e. HSLA-to-RGBA(Matrix(RGBA-to-HSLA(input))).
+ static sk_sp<SkColorFilter> HSLAMatrix(const SkColorMatrix&);
+ static sk_sp<SkColorFilter> HSLAMatrix(const float rowMajor[20]);
+
+ static sk_sp<SkColorFilter> LinearToSRGBGamma();
+ static sk_sp<SkColorFilter> SRGBToLinearGamma();
+ static sk_sp<SkColorFilter> Lerp(float t, sk_sp<SkColorFilter> dst, sk_sp<SkColorFilter> src);
+
+ /**
+ * Create a table colorfilter, copying the table into the filter, and
+ * applying it to all 4 components.
+ * a' = table[a];
+ * r' = table[r];
+ * g' = table[g];
+ * b' = table[b];
+ * Components are operated on in unpremultiplied space. If the incomming
+ * colors are premultiplied, they are temporarily unpremultiplied, then
+ * the table is applied, and then the result is remultiplied.
+ */
+ static sk_sp<SkColorFilter> Table(const uint8_t table[256]);
+
+ /**
+ * Create a table colorfilter, with a different table for each
+ * component [A, R, G, B]. If a given table is NULL, then it is
+ * treated as identity, with the component left unchanged. If a table
+ * is not null, then its contents are copied into the filter.
+ */
+ static sk_sp<SkColorFilter> TableARGB(const uint8_t tableA[256],
+ const uint8_t tableR[256],
+ const uint8_t tableG[256],
+ const uint8_t tableB[256]);
+
+ /**
+ * Create a colorfilter that multiplies the RGB channels by one color, and
+ * then adds a second color, pinning the result for each component to
+ * [0..255]. The alpha components of the mul and add arguments
+ * are ignored.
+ */
+ static sk_sp<SkColorFilter> Lighting(SkColor mul, SkColor add);
+
+private:
+ SkColorFilters() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkColorPriv.h b/gfx/skia/skia/include/core/SkColorPriv.h
new file mode 100644
index 0000000000..f89de9db72
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkColorPriv.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorPriv_DEFINED
+#define SkColorPriv_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/private/base/SkMath.h"
+#include "include/private/base/SkTPin.h"
+#include "include/private/base/SkTo.h"
+
+#include <algorithm>
+
+/** Turn 0..255 into 0..256 by adding 1 at the half-way point. Used to turn a
+ byte into a scale value, so that we can say scale * value >> 8 instead of
+ alpha * value / 255.
+
+ In debugging, asserts that alpha is 0..255
+*/
+static inline unsigned SkAlpha255To256(U8CPU alpha) {
+ SkASSERT(SkToU8(alpha) == alpha);
+ // this one assues that blending on top of an opaque dst keeps it that way
+ // even though it is less accurate than a+(a>>7) for non-opaque dsts
+ return alpha + 1;
+}
+
+/** Multiplify value by 0..256, and shift the result down 8
+ (i.e. return (value * alpha256) >> 8)
+ */
+#define SkAlphaMul(value, alpha256) (((value) * (alpha256)) >> 8)
+
+static inline U8CPU SkUnitScalarClampToByte(SkScalar x) {
+ return static_cast<U8CPU>(SkTPin(x, 0.0f, 1.0f) * 255 + 0.5);
+}
+
+#define SK_A32_BITS 8
+#define SK_R32_BITS 8
+#define SK_G32_BITS 8
+#define SK_B32_BITS 8
+
+#define SK_A32_MASK ((1 << SK_A32_BITS) - 1)
+#define SK_R32_MASK ((1 << SK_R32_BITS) - 1)
+#define SK_G32_MASK ((1 << SK_G32_BITS) - 1)
+#define SK_B32_MASK ((1 << SK_B32_BITS) - 1)
+
+/*
+ * Skia's 32bit backend only supports 1 swizzle order at a time (compile-time).
+ * This is specified by SK_R32_SHIFT=0 or SK_R32_SHIFT=16.
+ *
+ * For easier compatibility with Skia's GPU backend, we further restrict these
+ * to either (in memory-byte-order) RGBA or BGRA. Note that this "order" does
+ * not directly correspond to the same shift-order, since we have to take endianess
+ * into account.
+ *
+ * Here we enforce this constraint.
+ */
+
+#define SK_RGBA_R32_SHIFT 0
+#define SK_RGBA_G32_SHIFT 8
+#define SK_RGBA_B32_SHIFT 16
+#define SK_RGBA_A32_SHIFT 24
+
+#define SK_BGRA_B32_SHIFT 0
+#define SK_BGRA_G32_SHIFT 8
+#define SK_BGRA_R32_SHIFT 16
+#define SK_BGRA_A32_SHIFT 24
+
+#if defined(SK_PMCOLOR_IS_RGBA) || defined(SK_PMCOLOR_IS_BGRA)
+ #error "Configure PMCOLOR by setting SK_R32_SHIFT."
+#endif
+
+// Deduce which SK_PMCOLOR_IS_ to define from the _SHIFT defines
+
+#if (SK_A32_SHIFT == SK_RGBA_A32_SHIFT && \
+ SK_R32_SHIFT == SK_RGBA_R32_SHIFT && \
+ SK_G32_SHIFT == SK_RGBA_G32_SHIFT && \
+ SK_B32_SHIFT == SK_RGBA_B32_SHIFT)
+ #define SK_PMCOLOR_IS_RGBA
+#elif (SK_A32_SHIFT == SK_BGRA_A32_SHIFT && \
+ SK_R32_SHIFT == SK_BGRA_R32_SHIFT && \
+ SK_G32_SHIFT == SK_BGRA_G32_SHIFT && \
+ SK_B32_SHIFT == SK_BGRA_B32_SHIFT)
+ #define SK_PMCOLOR_IS_BGRA
+#else
+ #error "need 32bit packing to be either RGBA or BGRA"
+#endif
+
+#define SkGetPackedA32(packed) ((uint32_t)((packed) << (24 - SK_A32_SHIFT)) >> 24)
+#define SkGetPackedR32(packed) ((uint32_t)((packed) << (24 - SK_R32_SHIFT)) >> 24)
+#define SkGetPackedG32(packed) ((uint32_t)((packed) << (24 - SK_G32_SHIFT)) >> 24)
+#define SkGetPackedB32(packed) ((uint32_t)((packed) << (24 - SK_B32_SHIFT)) >> 24)
+
+#define SkA32Assert(a) SkASSERT((unsigned)(a) <= SK_A32_MASK)
+#define SkR32Assert(r) SkASSERT((unsigned)(r) <= SK_R32_MASK)
+#define SkG32Assert(g) SkASSERT((unsigned)(g) <= SK_G32_MASK)
+#define SkB32Assert(b) SkASSERT((unsigned)(b) <= SK_B32_MASK)
+
+/**
+ * Pack the components into a SkPMColor, checking (in the debug version) that
+ * the components are 0..255, and are already premultiplied (i.e. alpha >= color)
+ */
+static inline SkPMColor SkPackARGB32(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ SkA32Assert(a);
+ SkASSERT(r <= a);
+ SkASSERT(g <= a);
+ SkASSERT(b <= a);
+
+ return (a << SK_A32_SHIFT) | (r << SK_R32_SHIFT) |
+ (g << SK_G32_SHIFT) | (b << SK_B32_SHIFT);
+}
+
+/**
+ * Same as SkPackARGB32, but this version guarantees to not check that the
+ * values are premultiplied in the debug version.
+ */
+static inline SkPMColor SkPackARGB32NoCheck(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ return (a << SK_A32_SHIFT) | (r << SK_R32_SHIFT) |
+ (g << SK_G32_SHIFT) | (b << SK_B32_SHIFT);
+}
+
+static inline
+SkPMColor SkPremultiplyARGBInline(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ SkA32Assert(a);
+ SkR32Assert(r);
+ SkG32Assert(g);
+ SkB32Assert(b);
+
+ if (a != 255) {
+ r = SkMulDiv255Round(r, a);
+ g = SkMulDiv255Round(g, a);
+ b = SkMulDiv255Round(b, a);
+ }
+ return SkPackARGB32(a, r, g, b);
+}
+
+// When Android is compiled optimizing for size, SkAlphaMulQ doesn't get
+// inlined; forcing inlining significantly improves performance.
+static SK_ALWAYS_INLINE uint32_t SkAlphaMulQ(uint32_t c, unsigned scale) {
+ uint32_t mask = 0xFF00FF;
+
+ uint32_t rb = ((c & mask) * scale) >> 8;
+ uint32_t ag = ((c >> 8) & mask) * scale;
+ return (rb & mask) | (ag & ~mask);
+}
+
+static inline SkPMColor SkPMSrcOver(SkPMColor src, SkPMColor dst) {
+ uint32_t scale = SkAlpha255To256(255 - SkGetPackedA32(src));
+
+ uint32_t mask = 0xFF00FF;
+ uint32_t rb = (((dst & mask) * scale) >> 8) & mask;
+ uint32_t ag = (((dst >> 8) & mask) * scale) & ~mask;
+
+ rb += (src & mask);
+ ag += (src & ~mask);
+
+ // Color channels (but not alpha) can overflow, so we have to saturate to 0xFF in each lane.
+ return std::min(rb & 0x000001FF, 0x000000FFU) |
+ std::min(ag & 0x0001FF00, 0x0000FF00U) |
+ std::min(rb & 0x01FF0000, 0x00FF0000U) |
+ (ag & 0xFF000000);
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkColorSpace.h b/gfx/skia/skia/include/core/SkColorSpace.h
new file mode 100644
index 0000000000..57c29e222a
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkColorSpace.h
@@ -0,0 +1,242 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorSpace_DEFINED
+#define SkColorSpace_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkFixed.h"
+#include "include/private/base/SkOnce.h"
+#include "modules/skcms/skcms.h"
+
+#include <cstddef>
+#include <cstdint>
+
+class SkData;
+
+/**
+ * Describes a color gamut with primaries and a white point.
+ */
+struct SK_API SkColorSpacePrimaries {
+ float fRX;
+ float fRY;
+ float fGX;
+ float fGY;
+ float fBX;
+ float fBY;
+ float fWX;
+ float fWY;
+
+ /**
+ * Convert primaries and a white point to a toXYZD50 matrix, the preferred color gamut
+ * representation of SkColorSpace.
+ */
+ bool toXYZD50(skcms_Matrix3x3* toXYZD50) const;
+};
+
+namespace SkNamedTransferFn {
+
+// Like SkNamedGamut::kSRGB, keeping this bitwise exactly the same as skcms makes things fastest.
+static constexpr skcms_TransferFunction kSRGB =
+ { 2.4f, (float)(1/1.055), (float)(0.055/1.055), (float)(1/12.92), 0.04045f, 0.0f, 0.0f };
+
+static constexpr skcms_TransferFunction k2Dot2 =
+ { 2.2f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f };
+
+static constexpr skcms_TransferFunction kLinear =
+ { 1.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f };
+
+static constexpr skcms_TransferFunction kRec2020 =
+ {2.22222f, 0.909672f, 0.0903276f, 0.222222f, 0.0812429f, 0, 0};
+
+static constexpr skcms_TransferFunction kPQ =
+ {-2.0f, -107/128.0f, 1.0f, 32/2523.0f, 2413/128.0f, -2392/128.0f, 8192/1305.0f };
+
+static constexpr skcms_TransferFunction kHLG =
+ {-3.0f, 2.0f, 2.0f, 1/0.17883277f, 0.28466892f, 0.55991073f, 0.0f };
+
+} // namespace SkNamedTransferFn
+
+namespace SkNamedGamut {
+
+static constexpr skcms_Matrix3x3 kSRGB = {{
+ // ICC fixed-point (16.16) representation, taken from skcms. Please keep them exactly in sync.
+ // 0.436065674f, 0.385147095f, 0.143066406f,
+ // 0.222488403f, 0.716873169f, 0.060607910f,
+ // 0.013916016f, 0.097076416f, 0.714096069f,
+ { SkFixedToFloat(0x6FA2), SkFixedToFloat(0x6299), SkFixedToFloat(0x24A0) },
+ { SkFixedToFloat(0x38F5), SkFixedToFloat(0xB785), SkFixedToFloat(0x0F84) },
+ { SkFixedToFloat(0x0390), SkFixedToFloat(0x18DA), SkFixedToFloat(0xB6CF) },
+}};
+
+static constexpr skcms_Matrix3x3 kAdobeRGB = {{
+ // ICC fixed-point (16.16) repesentation of:
+ // 0.60974, 0.20528, 0.14919,
+ // 0.31111, 0.62567, 0.06322,
+ // 0.01947, 0.06087, 0.74457,
+ { SkFixedToFloat(0x9c18), SkFixedToFloat(0x348d), SkFixedToFloat(0x2631) },
+ { SkFixedToFloat(0x4fa5), SkFixedToFloat(0xa02c), SkFixedToFloat(0x102f) },
+ { SkFixedToFloat(0x04fc), SkFixedToFloat(0x0f95), SkFixedToFloat(0xbe9c) },
+}};
+
+static constexpr skcms_Matrix3x3 kDisplayP3 = {{
+ { 0.515102f, 0.291965f, 0.157153f },
+ { 0.241182f, 0.692236f, 0.0665819f },
+ { -0.00104941f, 0.0418818f, 0.784378f },
+}};
+
+static constexpr skcms_Matrix3x3 kRec2020 = {{
+ { 0.673459f, 0.165661f, 0.125100f },
+ { 0.279033f, 0.675338f, 0.0456288f },
+ { -0.00193139f, 0.0299794f, 0.797162f },
+}};
+
+static constexpr skcms_Matrix3x3 kXYZ = {{
+ { 1.0f, 0.0f, 0.0f },
+ { 0.0f, 1.0f, 0.0f },
+ { 0.0f, 0.0f, 1.0f },
+}};
+
+} // namespace SkNamedGamut
+
+class SK_API SkColorSpace : public SkNVRefCnt<SkColorSpace> {
+public:
+ /**
+ * Create the sRGB color space.
+ */
+ static sk_sp<SkColorSpace> MakeSRGB();
+
+ /**
+ * Colorspace with the sRGB primaries, but a linear (1.0) gamma.
+ */
+ static sk_sp<SkColorSpace> MakeSRGBLinear();
+
+ /**
+ * Create an SkColorSpace from a transfer function and a row-major 3x3 transformation to XYZ.
+ */
+ static sk_sp<SkColorSpace> MakeRGB(const skcms_TransferFunction& transferFn,
+ const skcms_Matrix3x3& toXYZ);
+
+ /**
+ * Create an SkColorSpace from a parsed (skcms) ICC profile.
+ */
+ static sk_sp<SkColorSpace> Make(const skcms_ICCProfile&);
+
+ /**
+ * Convert this color space to an skcms ICC profile struct.
+ */
+ void toProfile(skcms_ICCProfile*) const;
+
+ /**
+ * Returns true if the color space gamma is near enough to be approximated as sRGB.
+ */
+ bool gammaCloseToSRGB() const;
+
+ /**
+ * Returns true if the color space gamma is linear.
+ */
+ bool gammaIsLinear() const;
+
+ /**
+ * Sets |fn| to the transfer function from this color space. Returns true if the transfer
+ * function can be represented as coefficients to the standard ICC 7-parameter equation.
+ * Returns false otherwise (eg, PQ, HLG).
+ */
+ bool isNumericalTransferFn(skcms_TransferFunction* fn) const;
+
+ /**
+ * Returns true and sets |toXYZD50|.
+ */
+ bool toXYZD50(skcms_Matrix3x3* toXYZD50) const;
+
+ /**
+ * Returns a hash of the gamut transformation to XYZ D50. Allows for fast equality checking
+ * of gamuts, at the (very small) risk of collision.
+ */
+ uint32_t toXYZD50Hash() const { return fToXYZD50Hash; }
+
+ /**
+ * Returns a color space with the same gamut as this one, but with a linear gamma.
+ */
+ sk_sp<SkColorSpace> makeLinearGamma() const;
+
+ /**
+ * Returns a color space with the same gamut as this one, but with the sRGB transfer
+ * function.
+ */
+ sk_sp<SkColorSpace> makeSRGBGamma() const;
+
+ /**
+ * Returns a color space with the same transfer function as this one, but with the primary
+ * colors rotated. In other words, this produces a new color space that maps RGB to GBR
+ * (when applied to a source), and maps RGB to BRG (when applied to a destination).
+ *
+ * This is used for testing, to construct color spaces that have severe and testable behavior.
+ */
+ sk_sp<SkColorSpace> makeColorSpin() const;
+
+ /**
+ * Returns true if the color space is sRGB.
+ * Returns false otherwise.
+ *
+ * This allows a little bit of tolerance, given that we might see small numerical error
+ * in some cases: converting ICC fixed point to float, converting white point to D50,
+ * rounding decisions on transfer function and matrix.
+ *
+ * This does not consider a 2.2f exponential transfer function to be sRGB. While these
+ * functions are similar (and it is sometimes useful to consider them together), this
+ * function checks for logical equality.
+ */
+ bool isSRGB() const;
+
+ /**
+ * Returns a serialized representation of this color space.
+ */
+ sk_sp<SkData> serialize() const;
+
+ /**
+ * If |memory| is nullptr, returns the size required to serialize.
+ * Otherwise, serializes into |memory| and returns the size.
+ */
+ size_t writeToMemory(void* memory) const;
+
+ static sk_sp<SkColorSpace> Deserialize(const void* data, size_t length);
+
+ /**
+ * If both are null, we return true. If one is null and the other is not, we return false.
+ * If both are non-null, we do a deeper compare.
+ */
+ static bool Equals(const SkColorSpace*, const SkColorSpace*);
+
+ void transferFn(float gabcdef[7]) const; // DEPRECATED: Remove when webview usage is gone
+ void transferFn(skcms_TransferFunction* fn) const;
+ void invTransferFn(skcms_TransferFunction* fn) const;
+ void gamutTransformTo(const SkColorSpace* dst, skcms_Matrix3x3* src_to_dst) const;
+
+ uint32_t transferFnHash() const { return fTransferFnHash; }
+ uint64_t hash() const { return (uint64_t)fTransferFnHash << 32 | fToXYZD50Hash; }
+
+private:
+ friend class SkColorSpaceSingletonFactory;
+
+ SkColorSpace(const skcms_TransferFunction& transferFn, const skcms_Matrix3x3& toXYZ);
+
+ void computeLazyDstFields() const;
+
+ uint32_t fTransferFnHash;
+ uint32_t fToXYZD50Hash;
+
+ skcms_TransferFunction fTransferFn;
+ skcms_Matrix3x3 fToXYZD50;
+
+ mutable skcms_TransferFunction fInvTransferFn;
+ mutable skcms_Matrix3x3 fFromXYZD50;
+ mutable SkOnce fLazyDstFieldsOnce;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkColorType.h b/gfx/skia/skia/include/core/SkColorType.h
new file mode 100644
index 0000000000..789c4ad019
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkColorType.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorType_DEFINED
+#define SkColorType_DEFINED
+
+#include "include/core/SkTypes.h"
+
+/** \enum SkColorType
+ Describes how pixel bits encode color. A pixel may be an alpha mask, a grayscale, RGB, or ARGB.
+
+ kN32_SkColorType selects the native 32-bit ARGB format for the current configuration. This can
+ lead to inconsistent results across platforms, so use with caution.
+*/
+enum SkColorType : int {
+ kUnknown_SkColorType, //!< uninitialized
+ kAlpha_8_SkColorType, //!< pixel with alpha in 8-bit byte
+ kRGB_565_SkColorType, //!< pixel with 5 bits red, 6 bits green, 5 bits blue, in 16-bit word
+ kARGB_4444_SkColorType, //!< pixel with 4 bits for alpha, red, green, blue; in 16-bit word
+ kRGBA_8888_SkColorType, //!< pixel with 8 bits for red, green, blue, alpha; in 32-bit word
+ kRGB_888x_SkColorType, //!< pixel with 8 bits each for red, green, blue; in 32-bit word
+ kBGRA_8888_SkColorType, //!< pixel with 8 bits for blue, green, red, alpha; in 32-bit word
+ kRGBA_1010102_SkColorType, //!< 10 bits for red, green, blue; 2 bits for alpha; in 32-bit word
+ kBGRA_1010102_SkColorType, //!< 10 bits for blue, green, red; 2 bits for alpha; in 32-bit word
+ kRGB_101010x_SkColorType, //!< pixel with 10 bits each for red, green, blue; in 32-bit word
+ kBGR_101010x_SkColorType, //!< pixel with 10 bits each for blue, green, red; in 32-bit word
+ kBGR_101010x_XR_SkColorType, //!< pixel with 10 bits each for blue, green, red; in 32-bit word, extended range
+ kGray_8_SkColorType, //!< pixel with grayscale level in 8-bit byte
+ kRGBA_F16Norm_SkColorType, //!< pixel with half floats in [0,1] for red, green, blue, alpha;
+ // in 64-bit word
+ kRGBA_F16_SkColorType, //!< pixel with half floats for red, green, blue, alpha;
+ // in 64-bit word
+ kRGBA_F32_SkColorType, //!< pixel using C float for red, green, blue, alpha; in 128-bit word
+
+ // The following 6 colortypes are just for reading from - not for rendering to
+ kR8G8_unorm_SkColorType, //!< pixel with a uint8_t for red and green
+
+ kA16_float_SkColorType, //!< pixel with a half float for alpha
+ kR16G16_float_SkColorType, //!< pixel with a half float for red and green
+
+ kA16_unorm_SkColorType, //!< pixel with a little endian uint16_t for alpha
+ kR16G16_unorm_SkColorType, //!< pixel with a little endian uint16_t for red and green
+ kR16G16B16A16_unorm_SkColorType, //!< pixel with a little endian uint16_t for red, green, blue
+ // and alpha
+
+ kSRGBA_8888_SkColorType,
+ kR8_unorm_SkColorType,
+
+ kLastEnum_SkColorType = kR8_unorm_SkColorType, //!< last valid value
+
+#if SK_PMCOLOR_BYTE_ORDER(B,G,R,A)
+ kN32_SkColorType = kBGRA_8888_SkColorType,//!< native 32-bit BGRA encoding
+
+#elif SK_PMCOLOR_BYTE_ORDER(R,G,B,A)
+ kN32_SkColorType = kRGBA_8888_SkColorType,//!< native 32-bit RGBA encoding
+
+#else
+ kN32_SkColorType = kBGRA_8888_SkColorType,
+#endif
+};
+static constexpr int kSkColorTypeCnt = static_cast<int>(kLastEnum_SkColorType) + 1;
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkContourMeasure.h b/gfx/skia/skia/include/core/SkContourMeasure.h
new file mode 100644
index 0000000000..7090deaaed
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkContourMeasure.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkContourMeasure_DEFINED
+#define SkContourMeasure_DEFINED
+
+#include "include/core/SkPath.h"
+#include "include/core/SkRefCnt.h"
+#include "include/private/base/SkTDArray.h"
+
+struct SkConic;
+
+class SK_API SkContourMeasure : public SkRefCnt {
+public:
+ /** Return the length of the contour.
+ */
+ SkScalar length() const { return fLength; }
+
+ /** Pins distance to 0 <= distance <= length(), and then computes the corresponding
+ * position and tangent.
+ */
+ bool SK_WARN_UNUSED_RESULT getPosTan(SkScalar distance, SkPoint* position,
+ SkVector* tangent) const;
+
+ enum MatrixFlags {
+ kGetPosition_MatrixFlag = 0x01,
+ kGetTangent_MatrixFlag = 0x02,
+ kGetPosAndTan_MatrixFlag = kGetPosition_MatrixFlag | kGetTangent_MatrixFlag
+ };
+
+ /** Pins distance to 0 <= distance <= getLength(), and then computes
+ the corresponding matrix (by calling getPosTan).
+ Returns false if there is no path, or a zero-length path was specified, in which case
+ matrix is unchanged.
+ */
+ bool SK_WARN_UNUSED_RESULT getMatrix(SkScalar distance, SkMatrix* matrix,
+ MatrixFlags flags = kGetPosAndTan_MatrixFlag) const;
+
+ /** Given a start and stop distance, return in dst the intervening segment(s).
+ If the segment is zero-length, return false, else return true.
+ startD and stopD are pinned to legal values (0..getLength()). If startD > stopD
+ then return false (and leave dst untouched).
+ Begin the segment with a moveTo if startWithMoveTo is true
+ */
+ bool SK_WARN_UNUSED_RESULT getSegment(SkScalar startD, SkScalar stopD, SkPath* dst,
+ bool startWithMoveTo) const;
+
+ /** Return true if the contour is closed()
+ */
+ bool isClosed() const { return fIsClosed; }
+
+private:
+ struct Segment {
+ SkScalar fDistance; // total distance up to this point
+ unsigned fPtIndex; // index into the fPts array
+ unsigned fTValue : 30;
+ unsigned fType : 2; // actually the enum SkSegType
+ // See SkPathMeasurePriv.h
+
+ SkScalar getScalarT() const;
+
+ static const Segment* Next(const Segment* seg) {
+ unsigned ptIndex = seg->fPtIndex;
+ do {
+ ++seg;
+ } while (seg->fPtIndex == ptIndex);
+ return seg;
+ }
+
+ };
+
+ const SkTDArray<Segment> fSegments;
+ const SkTDArray<SkPoint> fPts; // Points used to define the segments
+
+ const SkScalar fLength;
+ const bool fIsClosed;
+
+ SkContourMeasure(SkTDArray<Segment>&& segs, SkTDArray<SkPoint>&& pts,
+ SkScalar length, bool isClosed);
+ ~SkContourMeasure() override {}
+
+ const Segment* distanceToSegment(SkScalar distance, SkScalar* t) const;
+
+ friend class SkContourMeasureIter;
+};
+
+class SK_API SkContourMeasureIter {
+public:
+ SkContourMeasureIter();
+ /**
+ * Initialize the Iter with a path.
+ * The parts of the path that are needed are copied, so the client is free to modify/delete
+ * the path after this call.
+ *
+ * resScale controls the precision of the measure. values > 1 increase the
+ * precision (and possibly slow down the computation).
+ */
+ SkContourMeasureIter(const SkPath& path, bool forceClosed, SkScalar resScale = 1);
+ ~SkContourMeasureIter();
+
+ /**
+ * Reset the Iter with a path.
+ * The parts of the path that are needed are copied, so the client is free to modify/delete
+ * the path after this call.
+ */
+ void reset(const SkPath& path, bool forceClosed, SkScalar resScale = 1);
+
+ /**
+ * Iterates through contours in path, returning a contour-measure object for each contour
+ * in the path. Returns null when it is done.
+ *
+ * This only returns non-zero length contours, where a contour is the segments between
+ * a kMove_Verb and either ...
+ * - the next kMove_Verb
+ * - kClose_Verb (1 or more)
+ * - kDone_Verb
+ * If it encounters a zero-length contour, it is skipped.
+ */
+ sk_sp<SkContourMeasure> next();
+
+private:
+ class Impl;
+
+ std::unique_ptr<Impl> fImpl;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkCoverageMode.h b/gfx/skia/skia/include/core/SkCoverageMode.h
new file mode 100644
index 0000000000..aaae60c419
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkCoverageMode.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCoverageMode_DEFINED
+#define SkCoverageMode_DEFINED
+
+/**
+ * Describes geometric operations (ala SkRegion::Op) that can be applied to coverage bytes.
+ * These can be thought of as variants of porter-duff (SkBlendMode) modes, but only applied
+ * to the alpha channel.
+ *
+ * See SkMaskFilter for ways to use these when combining two different masks.
+ */
+enum class SkCoverageMode {
+ kUnion, // A ∪ B A+B-A*B
+ kIntersect, // A ∩ B A*B
+ kDifference, // A - B A*(1-B)
+ kReverseDifference, // B - A B*(1-A)
+ kXor, // A ⊕ B A+B-2*A*B
+
+ kLast = kXor,
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkCubicMap.h b/gfx/skia/skia/include/core/SkCubicMap.h
new file mode 100644
index 0000000000..863c9333f6
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkCubicMap.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCubicMap_DEFINED
+#define SkCubicMap_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+/**
+ * Fast evaluation of a cubic ease-in / ease-out curve. This is defined as a parametric cubic
+ * curve inside the unit square.
+ *
+ * pt[0] is implicitly { 0, 0 }
+ * pt[3] is implicitly { 1, 1 }
+ * pts[1,2].X are inside the unit [0..1]
+ */
+class SK_API SkCubicMap {
+public:
+ SkCubicMap(SkPoint p1, SkPoint p2);
+
+ static bool IsLinear(SkPoint p1, SkPoint p2) {
+ return SkScalarNearlyEqual(p1.fX, p1.fY) && SkScalarNearlyEqual(p2.fX, p2.fY);
+ }
+
+ float computeYFromX(float x) const;
+
+ SkPoint computeFromT(float t) const;
+
+private:
+ enum Type {
+ kLine_Type, // x == y
+ kCubeRoot_Type, // At^3 == x
+ kSolver_Type, // general monotonic cubic solver
+ };
+
+ SkPoint fCoeff[3];
+ Type fType;
+};
+
+#endif
+
diff --git a/gfx/skia/skia/include/core/SkData.h b/gfx/skia/skia/include/core/SkData.h
new file mode 100644
index 0000000000..2b50cebc81
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkData.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkData_DEFINED
+#define SkData_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/private/base/SkAPI.h"
+#include "include/private/base/SkAssert.h"
+
+#include <cstdint>
+#include <cstdio>
+
+class SkStream;
+
+/**
+ * SkData holds an immutable data buffer. Not only is the data immutable,
+ * but the actual ptr that is returned (by data() or bytes()) is guaranteed
+ * to always be the same for the life of this instance.
+ */
+class SK_API SkData final : public SkNVRefCnt<SkData> {
+public:
+ /**
+ * Returns the number of bytes stored.
+ */
+ size_t size() const { return fSize; }
+
+ bool isEmpty() const { return 0 == fSize; }
+
+ /**
+ * Returns the ptr to the data.
+ */
+ const void* data() const { return fPtr; }
+
+ /**
+ * Like data(), returns a read-only ptr into the data, but in this case
+ * it is cast to uint8_t*, to make it easy to add an offset to it.
+ */
+ const uint8_t* bytes() const {
+ return reinterpret_cast<const uint8_t*>(fPtr);
+ }
+
+ /**
+ * USE WITH CAUTION.
+ * This call will assert that the refcnt is 1, as a precaution against modifying the
+ * contents when another client/thread has access to the data.
+ */
+ void* writable_data() {
+ if (fSize) {
+ // only assert we're unique if we're not empty
+ SkASSERT(this->unique());
+ }
+ return const_cast<void*>(fPtr);
+ }
+
+ /**
+ * Helper to copy a range of the data into a caller-provided buffer.
+ * Returns the actual number of bytes copied, after clamping offset and
+ * length to the size of the data. If buffer is NULL, it is ignored, and
+ * only the computed number of bytes is returned.
+ */
+ size_t copyRange(size_t offset, size_t length, void* buffer) const;
+
+ /**
+ * Returns true if these two objects have the same length and contents,
+ * effectively returning 0 == memcmp(...)
+ */
+ bool equals(const SkData* other) const;
+
+ /**
+ * Function that, if provided, will be called when the SkData goes out
+ * of scope, allowing for custom allocation/freeing of the data's contents.
+ */
+ typedef void (*ReleaseProc)(const void* ptr, void* context);
+
+ /**
+ * Create a new dataref by copying the specified data
+ */
+ static sk_sp<SkData> MakeWithCopy(const void* data, size_t length);
+
+
+ /**
+ * Create a new data with uninitialized contents. The caller should call writable_data()
+ * to write into the buffer, but this must be done before another ref() is made.
+ */
+ static sk_sp<SkData> MakeUninitialized(size_t length);
+
+ /**
+ * Create a new data with zero-initialized contents. The caller should call writable_data()
+ * to write into the buffer, but this must be done before another ref() is made.
+ */
+ static sk_sp<SkData> MakeZeroInitialized(size_t length);
+
+ /**
+ * Create a new dataref by copying the specified c-string
+ * (a null-terminated array of bytes). The returned SkData will have size()
+ * equal to strlen(cstr) + 1. If cstr is NULL, it will be treated the same
+ * as "".
+ */
+ static sk_sp<SkData> MakeWithCString(const char cstr[]);
+
+ /**
+ * Create a new dataref, taking the ptr as is, and using the
+ * releaseproc to free it. The proc may be NULL.
+ */
+ static sk_sp<SkData> MakeWithProc(const void* ptr, size_t length, ReleaseProc proc, void* ctx);
+
+ /**
+ * Call this when the data parameter is already const and will outlive the lifetime of the
+ * SkData. Suitable for with const globals.
+ */
+ static sk_sp<SkData> MakeWithoutCopy(const void* data, size_t length) {
+ return MakeWithProc(data, length, NoopReleaseProc, nullptr);
+ }
+
+ /**
+ * Create a new dataref from a pointer allocated by malloc. The Data object
+ * takes ownership of that allocation, and will handling calling sk_free.
+ */
+ static sk_sp<SkData> MakeFromMalloc(const void* data, size_t length);
+
+ /**
+ * Create a new dataref the file with the specified path.
+ * If the file cannot be opened, this returns NULL.
+ */
+ static sk_sp<SkData> MakeFromFileName(const char path[]);
+
+ /**
+ * Create a new dataref from a stdio FILE.
+ * This does not take ownership of the FILE, nor close it.
+ * The caller is free to close the FILE at its convenience.
+ * The FILE must be open for reading only.
+ * Returns NULL on failure.
+ */
+ static sk_sp<SkData> MakeFromFILE(FILE* f);
+
+ /**
+ * Create a new dataref from a file descriptor.
+ * This does not take ownership of the file descriptor, nor close it.
+ * The caller is free to close the file descriptor at its convenience.
+ * The file descriptor must be open for reading only.
+ * Returns NULL on failure.
+ */
+ static sk_sp<SkData> MakeFromFD(int fd);
+
+ /**
+ * Attempt to read size bytes into a SkData. If the read succeeds, return the data,
+ * else return NULL. Either way the stream's cursor may have been changed as a result
+ * of calling read().
+ */
+ static sk_sp<SkData> MakeFromStream(SkStream*, size_t size);
+
+ /**
+ * Create a new dataref using a subset of the data in the specified
+ * src dataref.
+ */
+ static sk_sp<SkData> MakeSubset(const SkData* src, size_t offset, size_t length);
+
+ /**
+ * Returns a new empty dataref (or a reference to a shared empty dataref).
+ * New or shared, the caller must see that unref() is eventually called.
+ */
+ static sk_sp<SkData> MakeEmpty();
+
+private:
+ friend class SkNVRefCnt<SkData>;
+ ReleaseProc fReleaseProc;
+ void* fReleaseProcContext;
+ const void* fPtr;
+ size_t fSize;
+
+ SkData(const void* ptr, size_t size, ReleaseProc, void* context);
+ explicit SkData(size_t size); // inplace new/delete
+ ~SkData();
+
+ // Ensure the unsized delete is called.
+ void operator delete(void* p);
+
+ // shared internal factory
+ static sk_sp<SkData> PrivateNewWithCopy(const void* srcOrNull, size_t length);
+
+ static void NoopReleaseProc(const void*, void*); // {}
+
+ using INHERITED = SkRefCnt;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkDataTable.h b/gfx/skia/skia/include/core/SkDataTable.h
new file mode 100644
index 0000000000..3aa48d5f33
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkDataTable.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDataTable_DEFINED
+#define SkDataTable_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/private/base/SkAPI.h"
+#include "include/private/base/SkAssert.h"
+
+#include <cstdint>
+#include <cstring>
+
+/**
+ * Like SkData, SkDataTable holds an immutable data buffer. The data buffer is
+ * organized into a table of entries, each with a length, so the entries are
+ * not required to all be the same size.
+ */
+class SK_API SkDataTable : public SkRefCnt {
+public:
+ /**
+ * Returns true if the table is empty (i.e. has no entries).
+ */
+ bool isEmpty() const { return 0 == fCount; }
+
+ /**
+ * Return the number of entries in the table. 0 for an empty table
+ */
+ int count() const { return fCount; }
+
+ /**
+ * Return the size of the index'th entry in the table. The caller must
+ * ensure that index is valid for this table.
+ */
+ size_t atSize(int index) const;
+
+ /**
+ * Return a pointer to the data of the index'th entry in the table.
+ * The caller must ensure that index is valid for this table.
+ *
+ * @param size If non-null, this returns the byte size of this entry. This
+ * will be the same value that atSize(index) would return.
+ */
+ const void* at(int index, size_t* size = nullptr) const;
+
+ template <typename T>
+ const T* atT(int index, size_t* size = nullptr) const {
+ return reinterpret_cast<const T*>(this->at(index, size));
+ }
+
+ /**
+ * Returns the index'th entry as a c-string, and assumes that the trailing
+ * null byte had been copied into the table as well.
+ */
+ const char* atStr(int index) const {
+ size_t size;
+ const char* str = this->atT<const char>(index, &size);
+ SkASSERT(strlen(str) + 1 == size);
+ return str;
+ }
+
+ typedef void (*FreeProc)(void* context);
+
+ static sk_sp<SkDataTable> MakeEmpty();
+
+ /**
+ * Return a new DataTable that contains a copy of the data stored in each
+ * "array".
+ *
+ * @param ptrs array of points to each element to be copied into the table.
+ * @param sizes array of byte-lengths for each entry in the corresponding
+ * ptrs[] array.
+ * @param count the number of array elements in ptrs[] and sizes[] to copy.
+ */
+ static sk_sp<SkDataTable> MakeCopyArrays(const void * const * ptrs,
+ const size_t sizes[], int count);
+
+ /**
+ * Return a new table that contains a copy of the data in array.
+ *
+ * @param array contiguous array of data for all elements to be copied.
+ * @param elemSize byte-length for a given element.
+ * @param count the number of entries to be copied out of array. The number
+ * of bytes that will be copied is count * elemSize.
+ */
+ static sk_sp<SkDataTable> MakeCopyArray(const void* array, size_t elemSize, int count);
+
+ static sk_sp<SkDataTable> MakeArrayProc(const void* array, size_t elemSize, int count,
+ FreeProc proc, void* context);
+
+private:
+ struct Dir {
+ const void* fPtr;
+ uintptr_t fSize;
+ };
+
+ int fCount;
+ size_t fElemSize;
+ union {
+ const Dir* fDir;
+ const char* fElems;
+ } fU;
+
+ FreeProc fFreeProc;
+ void* fFreeProcContext;
+
+ SkDataTable();
+ SkDataTable(const void* array, size_t elemSize, int count,
+ FreeProc, void* context);
+ SkDataTable(const Dir*, int count, FreeProc, void* context);
+ ~SkDataTable() override;
+
+ friend class SkDataTableBuilder; // access to Dir
+
+ using INHERITED = SkRefCnt;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkDeferredDisplayList.h b/gfx/skia/skia/include/core/SkDeferredDisplayList.h
new file mode 100644
index 0000000000..07296360ba
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkDeferredDisplayList.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDeferredDisplayList_DEFINED
+#define SkDeferredDisplayList_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSurfaceCharacterization.h"
+#include "include/core/SkTypes.h"
+
+class SkDeferredDisplayListPriv;
+
+#if defined(SK_GANESH)
+#include "include/gpu/GrRecordingContext.h"
+#include "include/private/base/SkTArray.h"
+#include <map>
+class GrRenderTask;
+class GrRenderTargetProxy;
+#else
+using GrRenderTargetProxy = SkRefCnt;
+#endif
+
+/*
+ * This class contains pre-processed gpu operations that can be replayed into
+ * an SkSurface via SkSurface::draw(SkDeferredDisplayList*).
+ */
+class SkDeferredDisplayList : public SkNVRefCnt<SkDeferredDisplayList> {
+public:
+ SK_API ~SkDeferredDisplayList();
+
+ SK_API const SkSurfaceCharacterization& characterization() const {
+ return fCharacterization;
+ }
+
+#if defined(SK_GANESH)
+ /**
+ * Iterate through the programs required by the DDL.
+ */
+ class SK_API ProgramIterator {
+ public:
+ ProgramIterator(GrDirectContext*, SkDeferredDisplayList*);
+ ~ProgramIterator();
+
+ // This returns true if any work was done. Getting a cache hit does not count as work.
+ bool compile();
+ bool done() const;
+ void next();
+
+ private:
+ GrDirectContext* fDContext;
+ const SkTArray<GrRecordingContext::ProgramData>& fProgramData;
+ int fIndex;
+ };
+#endif
+
+ // Provides access to functions that aren't part of the public API.
+ SkDeferredDisplayListPriv priv();
+ const SkDeferredDisplayListPriv priv() const; // NOLINT(readability-const-return-type)
+
+private:
+ friend class GrDrawingManager; // for access to 'fRenderTasks', 'fLazyProxyData', 'fArenas'
+ friend class SkDeferredDisplayListRecorder; // for access to 'fLazyProxyData'
+ friend class SkDeferredDisplayListPriv;
+
+ // This object is the source from which the lazy proxy backing the DDL will pull its backing
+ // texture when the DDL is replayed. It has to be separately ref counted bc the lazy proxy
+ // can outlive the DDL.
+ class LazyProxyData : public SkRefCnt {
+#if defined(SK_GANESH)
+ public:
+ // Upon being replayed - this field will be filled in (by the DrawingManager) with the
+ // proxy backing the destination SkSurface. Note that, since there is no good place to
+ // clear it, it can become a dangling pointer. Additionally, since the renderTargetProxy
+ // doesn't get a ref here, the SkSurface that owns it must remain alive until the DDL
+ // is flushed.
+ // TODO: the drawing manager could ref the renderTargetProxy for the DDL and then add
+ // a renderingTask to unref it after the DDL's ops have been executed.
+ GrRenderTargetProxy* fReplayDest = nullptr;
+#endif
+ };
+
+ SK_API SkDeferredDisplayList(const SkSurfaceCharacterization& characterization,
+ sk_sp<GrRenderTargetProxy> fTargetProxy,
+ sk_sp<LazyProxyData>);
+
+#if defined(SK_GANESH)
+ const SkTArray<GrRecordingContext::ProgramData>& programData() const {
+ return fProgramData;
+ }
+#endif
+
+ const SkSurfaceCharacterization fCharacterization;
+
+#if defined(SK_GANESH)
+ // These are ordered such that the destructor cleans op tasks up first (which may refer back
+ // to the arena and memory pool in their destructors).
+ GrRecordingContext::OwnedArenas fArenas;
+ SkTArray<sk_sp<GrRenderTask>> fRenderTasks;
+
+ SkTArray<GrRecordingContext::ProgramData> fProgramData;
+ sk_sp<GrRenderTargetProxy> fTargetProxy;
+ sk_sp<LazyProxyData> fLazyProxyData;
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkDeferredDisplayListRecorder.h b/gfx/skia/skia/include/core/SkDeferredDisplayListRecorder.h
new file mode 100644
index 0000000000..67ee03fd6c
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkDeferredDisplayListRecorder.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDeferredDisplayListRecorder_DEFINED
+#define SkDeferredDisplayListRecorder_DEFINED
+
+#include "include/core/SkDeferredDisplayList.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSurfaceCharacterization.h"
+#include "include/core/SkTypes.h"
+
+class GrBackendFormat;
+class GrBackendTexture;
+class GrRecordingContext;
+class GrYUVABackendTextureInfo;
+class SkCanvas;
+class SkSurface;
+
+/*
+ * This class is intended to be used as:
+ * Get an SkSurfaceCharacterization representing the intended gpu-backed destination SkSurface
+ * Create one of these (an SkDeferredDisplayListRecorder) on the stack
+ * Get the canvas and render into it
+ * Snap off and hold on to an SkDeferredDisplayList
+ * Once your app actually needs the pixels, call SkSurface::draw(SkDeferredDisplayList*)
+ *
+ * This class never accesses the GPU but performs all the cpu work it can. It
+ * is thread-safe (i.e., one can break a scene into tiles and perform their cpu-side
+ * work in parallel ahead of time).
+ */
+class SK_API SkDeferredDisplayListRecorder {
+public:
+ SkDeferredDisplayListRecorder(const SkSurfaceCharacterization&);
+ ~SkDeferredDisplayListRecorder();
+
+ const SkSurfaceCharacterization& characterization() const {
+ return fCharacterization;
+ }
+
+ // The backing canvas will become invalid (and this entry point will return
+ // null) once 'detach' is called.
+ // Note: ownership of the SkCanvas is not transferred via this call.
+ SkCanvas* getCanvas();
+
+ sk_sp<SkDeferredDisplayList> detach();
+
+#if defined(SK_GANESH)
+ using PromiseImageTextureContext = SkImage::PromiseImageTextureContext;
+ using PromiseImageTextureFulfillProc = SkImage::PromiseImageTextureFulfillProc;
+ using PromiseImageTextureReleaseProc = SkImage::PromiseImageTextureReleaseProc;
+
+#ifndef SK_MAKE_PROMISE_TEXTURE_DISABLE_LEGACY_API
+ /** Deprecated: Use SkImage::MakePromiseTexture instead. */
+ sk_sp<SkImage> makePromiseTexture(const GrBackendFormat& backendFormat,
+ int width,
+ int height,
+ GrMipmapped mipmapped,
+ GrSurfaceOrigin origin,
+ SkColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ PromiseImageTextureFulfillProc textureFulfillProc,
+ PromiseImageTextureReleaseProc textureReleaseProc,
+ PromiseImageTextureContext textureContext);
+
+ /** Deprecated: Use SkImage::MakePromiseYUVATexture instead. */
+ sk_sp<SkImage> makeYUVAPromiseTexture(const GrYUVABackendTextureInfo& yuvaBackendTextureInfo,
+ sk_sp<SkColorSpace> imageColorSpace,
+ PromiseImageTextureFulfillProc textureFulfillProc,
+ PromiseImageTextureReleaseProc textureReleaseProc,
+ PromiseImageTextureContext textureContexts[]);
+#endif // SK_MAKE_PROMISE_TEXTURE_DISABLE_LEGACY_API
+#endif // defined(SK_GANESH)
+
+private:
+ SkDeferredDisplayListRecorder(const SkDeferredDisplayListRecorder&) = delete;
+ SkDeferredDisplayListRecorder& operator=(const SkDeferredDisplayListRecorder&) = delete;
+
+ bool init();
+
+ const SkSurfaceCharacterization fCharacterization;
+
+#if defined(SK_GANESH)
+ sk_sp<GrRecordingContext> fContext;
+ sk_sp<GrRenderTargetProxy> fTargetProxy;
+ sk_sp<SkDeferredDisplayList::LazyProxyData> fLazyProxyData;
+ sk_sp<SkSurface> fSurface;
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkDocument.h b/gfx/skia/skia/include/core/SkDocument.h
new file mode 100644
index 0000000000..eacfb2c040
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkDocument.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDocument_DEFINED
+#define SkDocument_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+
+class SkCanvas;
+class SkWStream;
+struct SkRect;
+
+/** SK_ScalarDefaultDPI is 72 dots per inch. */
+static constexpr SkScalar SK_ScalarDefaultRasterDPI = 72.0f;
+
+/**
+ * High-level API for creating a document-based canvas. To use..
+ *
+ * 1. Create a document, specifying a stream to store the output.
+ * 2. For each "page" of content:
+ * a. canvas = doc->beginPage(...)
+ * b. draw_my_content(canvas);
+ * c. doc->endPage();
+ * 3. Close the document with doc->close().
+ */
+class SK_API SkDocument : public SkRefCnt {
+public:
+
+ /**
+ * Begin a new page for the document, returning the canvas that will draw
+ * into the page. The document owns this canvas, and it will go out of
+ * scope when endPage() or close() is called, or the document is deleted.
+ */
+ SkCanvas* beginPage(SkScalar width, SkScalar height, const SkRect* content = nullptr);
+
+ /**
+ * Call endPage() when the content for the current page has been drawn
+ * (into the canvas returned by beginPage()). After this call the canvas
+ * returned by beginPage() will be out-of-scope.
+ */
+ void endPage();
+
+ /**
+ * Call close() when all pages have been drawn. This will close the file
+ * or stream holding the document's contents. After close() the document
+ * can no longer add new pages. Deleting the document will automatically
+ * call close() if need be.
+ */
+ void close();
+
+ /**
+ * Call abort() to stop producing the document immediately.
+ * The stream output must be ignored, and should not be trusted.
+ */
+ void abort();
+
+protected:
+ SkDocument(SkWStream*);
+
+ // note: subclasses must call close() in their destructor, as the base class
+ // cannot do this for them.
+ ~SkDocument() override;
+
+ virtual SkCanvas* onBeginPage(SkScalar width, SkScalar height) = 0;
+ virtual void onEndPage() = 0;
+ virtual void onClose(SkWStream*) = 0;
+ virtual void onAbort() = 0;
+
+ // Allows subclasses to write to the stream as pages are written.
+ SkWStream* getStream() { return fStream; }
+
+ enum State {
+ kBetweenPages_State,
+ kInPage_State,
+ kClosed_State
+ };
+ State getState() const { return fState; }
+
+private:
+ SkWStream* fStream;
+ State fState;
+
+ using INHERITED = SkRefCnt;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkDrawLooper.h b/gfx/skia/skia/include/core/SkDrawLooper.h
new file mode 100644
index 0000000000..69d341c25f
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkDrawLooper.h
@@ -0,0 +1,135 @@
+
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDrawLooper_DEFINED
+#define SkDrawLooper_DEFINED
+
+#include "include/core/SkBlurTypes.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkPoint.h"
+#include <functional> // std::function
+
+#ifndef SK_SUPPORT_LEGACY_DRAWLOOPER
+#error "SkDrawLooper is unsupported"
+#endif
+
+class SkArenaAlloc;
+class SkCanvas;
+class SkMatrix;
+class SkPaint;
+struct SkRect;
+
+/** \class SkDrawLooper
+ DEPRECATED: No longer supported in Skia.
+*/
+class SK_API SkDrawLooper : public SkFlattenable {
+public:
+ /**
+ * Holds state during a draw. Users call next() until it returns false.
+ *
+ * Subclasses of SkDrawLooper should create a subclass of this object to
+ * hold state specific to their subclass.
+ */
+ class SK_API Context {
+ public:
+ Context() {}
+ virtual ~Context() {}
+
+ struct Info {
+ SkVector fTranslate;
+ bool fApplyPostCTM;
+
+ void applyToCTM(SkMatrix* ctm) const;
+ void applyToCanvas(SkCanvas*) const;
+ };
+
+ /**
+ * Called in a loop on objects returned by SkDrawLooper::createContext().
+ * Each time true is returned, the object is drawn (possibly with a modified
+ * canvas and/or paint). When false is finally returned, drawing for the object
+ * stops.
+ *
+ * On each call, the paint will be in its original state, but the
+ * canvas will be as it was following the previous call to next() or
+ * createContext().
+ *
+ * The implementation must ensure that, when next() finally returns
+ * false, the canvas has been restored to the state it was
+ * initially, before createContext() was first called.
+ */
+ virtual bool next(Info*, SkPaint*) = 0;
+
+ private:
+ Context(const Context&) = delete;
+ Context& operator=(const Context&) = delete;
+ };
+
+ /**
+ * Called right before something is being drawn. Returns a Context
+ * whose next() method should be called until it returns false.
+ */
+ virtual Context* makeContext(SkArenaAlloc*) const = 0;
+
+ /**
+ * The fast bounds functions are used to enable the paint to be culled early
+ * in the drawing pipeline. If a subclass can support this feature it must
+ * return true for the canComputeFastBounds() function. If that function
+ * returns false then computeFastBounds behavior is undefined otherwise it
+ * is expected to have the following behavior. Given the parent paint and
+ * the parent's bounding rect the subclass must fill in and return the
+ * storage rect, where the storage rect is with the union of the src rect
+ * and the looper's bounding rect.
+ */
+ bool canComputeFastBounds(const SkPaint& paint) const;
+ void computeFastBounds(const SkPaint& paint, const SkRect& src, SkRect* dst) const;
+
+ struct BlurShadowRec {
+ SkScalar fSigma;
+ SkVector fOffset;
+ SkColor fColor;
+ SkBlurStyle fStyle;
+ };
+ /**
+ * If this looper can be interpreted as having two layers, such that
+ * 1. The first layer (bottom most) just has a blur and translate
+ * 2. The second layer has no modifications to either paint or canvas
+ * 3. No other layers.
+ * then return true, and if not null, fill out the BlurShadowRec).
+ *
+ * If any of the above are not met, return false and ignore the BlurShadowRec parameter.
+ */
+ virtual bool asABlurShadow(BlurShadowRec*) const;
+
+ static SkFlattenable::Type GetFlattenableType() {
+ return kSkDrawLooper_Type;
+ }
+
+ SkFlattenable::Type getFlattenableType() const override {
+ return kSkDrawLooper_Type;
+ }
+
+ static sk_sp<SkDrawLooper> Deserialize(const void* data, size_t size,
+ const SkDeserialProcs* procs = nullptr) {
+ return sk_sp<SkDrawLooper>(static_cast<SkDrawLooper*>(
+ SkFlattenable::Deserialize(
+ kSkDrawLooper_Type, data, size, procs).release()));
+ }
+
+ void apply(SkCanvas* canvas, const SkPaint& paint,
+ std::function<void(SkCanvas*, const SkPaint&)>);
+
+protected:
+ SkDrawLooper() {}
+
+private:
+ using INHERITED = SkFlattenable;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkDrawable.h b/gfx/skia/skia/include/core/SkDrawable.h
new file mode 100644
index 0000000000..316bf058bc
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkDrawable.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDrawable_DEFINED
+#define SkDrawable_DEFINED
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/private/base/SkAPI.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+
+class GrBackendDrawableInfo;
+class SkCanvas;
+class SkMatrix;
+class SkPicture;
+enum class GrBackendApi : unsigned int;
+struct SkDeserialProcs;
+struct SkIRect;
+struct SkImageInfo;
+struct SkRect;
+
+/**
+ * Base-class for objects that draw into SkCanvas.
+ *
+ * The object has a generation ID, which is guaranteed to be unique across all drawables. To
+ * allow for clients of the drawable that may want to cache the results, the drawable must
+ * change its generation ID whenever its internal state changes such that it will draw differently.
+ */
+class SK_API SkDrawable : public SkFlattenable {
+public:
+ /**
+ * Draws into the specified content. The drawing sequence will be balanced upon return
+ * (i.e. the saveLevel() on the canvas will match what it was when draw() was called,
+ * and the current matrix and clip settings will not be changed.
+ */
+ void draw(SkCanvas*, const SkMatrix* = nullptr);
+ void draw(SkCanvas*, SkScalar x, SkScalar y);
+
+ /**
+ * When using the GPU backend it is possible for a drawable to execute using the underlying 3D
+ * API rather than the SkCanvas API. It does so by creating a GpuDrawHandler. The GPU backend
+ * is deferred so the handler will be given access to the 3D API at the correct point in the
+ * drawing stream as the GPU backend flushes. Since the drawable may mutate, each time it is
+ * drawn to a GPU-backed canvas a new handler is snapped, representing the drawable's state at
+ * the time of the snap.
+ *
+ * When the GPU backend flushes to the 3D API it will call the draw method on the
+ * GpuDrawHandler. At this time the drawable may add commands to the stream of GPU commands for
+ * the unerlying 3D API. The draw function takes a GrBackendDrawableInfo which contains
+ * information about the current state of 3D API which the caller must respect. See
+ * GrBackendDrawableInfo for more specific details on what information is sent and the
+ * requirements for different 3D APIs.
+ *
+ * Additionaly there may be a slight delay from when the drawable adds its commands to when
+ * those commands are actually submitted to the GPU. Thus the drawable or GpuDrawHandler is
+ * required to keep any resources that are used by its added commands alive and valid until
+ * those commands are submitted to the GPU. The GpuDrawHandler will be kept alive and then
+ * deleted once the commands are submitted to the GPU. The dtor of the GpuDrawHandler is the
+ * signal to the drawable that the commands have all been submitted. Different 3D APIs may have
+ * additional requirements for certain resources which require waiting for the GPU to finish
+ * all work on those resources before reusing or deleting them. In this case, the drawable can
+ * use the dtor call of the GpuDrawHandler to add a fence to the GPU to track when the GPU work
+ * has completed.
+ *
+ * Currently this is only supported for the GPU Vulkan backend.
+ */
+
+ class GpuDrawHandler {
+ public:
+ virtual ~GpuDrawHandler() {}
+
+ virtual void draw(const GrBackendDrawableInfo&) {}
+ };
+
+ /**
+ * Snaps off a GpuDrawHandler to represent the state of the SkDrawable at the time the snap is
+ * called. This is used for executing GPU backend specific draws intermixed with normal Skia GPU
+ * draws. The GPU API, which will be used for the draw, as well as the full matrix, device clip
+ * bounds and imageInfo of the target buffer are passed in as inputs.
+ */
+ std::unique_ptr<GpuDrawHandler> snapGpuDrawHandler(GrBackendApi backendApi,
+ const SkMatrix& matrix,
+ const SkIRect& clipBounds,
+ const SkImageInfo& bufferInfo) {
+ return this->onSnapGpuDrawHandler(backendApi, matrix, clipBounds, bufferInfo);
+ }
+
+ SkPicture* newPictureSnapshot();
+
+ /**
+ * Return a unique value for this instance. If two calls to this return the same value,
+ * it is presumed that calling the draw() method will render the same thing as well.
+ *
+ * Subclasses that change their state should call notifyDrawingChanged() to ensure that
+ * a new value will be returned the next time it is called.
+ */
+ uint32_t getGenerationID();
+
+ /**
+ * Return the (conservative) bounds of what the drawable will draw. If the drawable can
+ * change what it draws (e.g. animation or in response to some external change), then this
+ * must return a bounds that is always valid for all possible states.
+ */
+ SkRect getBounds();
+
+ /**
+ * Return approximately how many bytes would be freed if this drawable is destroyed.
+ * The base implementation returns 0 to indicate that this is unknown.
+ */
+ size_t approximateBytesUsed();
+
+ /**
+ * Calling this invalidates the previous generation ID, and causes a new one to be computed
+ * the next time getGenerationID() is called. Typically this is called by the object itself,
+ * in response to its internal state changing.
+ */
+ void notifyDrawingChanged();
+
+ static SkFlattenable::Type GetFlattenableType() {
+ return kSkDrawable_Type;
+ }
+
+ SkFlattenable::Type getFlattenableType() const override {
+ return kSkDrawable_Type;
+ }
+
+ static sk_sp<SkDrawable> Deserialize(const void* data, size_t size,
+ const SkDeserialProcs* procs = nullptr) {
+ return sk_sp<SkDrawable>(static_cast<SkDrawable*>(
+ SkFlattenable::Deserialize(
+ kSkDrawable_Type, data, size, procs).release()));
+ }
+
+ Factory getFactory() const override { return nullptr; }
+ const char* getTypeName() const override { return nullptr; }
+
+protected:
+ SkDrawable();
+
+ virtual SkRect onGetBounds() = 0;
+ virtual size_t onApproximateBytesUsed();
+ virtual void onDraw(SkCanvas*) = 0;
+
+ virtual std::unique_ptr<GpuDrawHandler> onSnapGpuDrawHandler(GrBackendApi, const SkMatrix&,
+ const SkIRect& /*clipBounds*/,
+ const SkImageInfo&) {
+ return nullptr;
+ }
+
+ // TODO: Delete this once Android gets updated to take the clipBounds version above.
+ virtual std::unique_ptr<GpuDrawHandler> onSnapGpuDrawHandler(GrBackendApi, const SkMatrix&) {
+ return nullptr;
+ }
+
+ /**
+ * Default implementation calls onDraw() with a canvas that records into a picture. Subclasses
+ * may override if they have a more efficient way to return a picture for the current state
+ * of their drawable. Note: this picture must draw the same as what would be drawn from
+ * onDraw().
+ */
+ virtual SkPicture* onNewPictureSnapshot();
+
+private:
+ int32_t fGenerationID;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkEncodedImageFormat.h b/gfx/skia/skia/include/core/SkEncodedImageFormat.h
new file mode 100644
index 0000000000..0db3830b9a
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkEncodedImageFormat.h
@@ -0,0 +1,9 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// TODO(kjlubick) remove this shim after clients have been moved to the new location
+#include "include/codec/SkEncodedImageFormat.h" // IWYU pragma: export
diff --git a/gfx/skia/skia/include/core/SkExecutor.h b/gfx/skia/skia/include/core/SkExecutor.h
new file mode 100644
index 0000000000..88e2ca6e52
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkExecutor.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkExecutor_DEFINED
+#define SkExecutor_DEFINED
+
+#include <functional>
+#include <memory>
+#include "include/core/SkTypes.h"
+
+class SK_API SkExecutor {
+public:
+ virtual ~SkExecutor();
+
+ // Create a thread pool SkExecutor with a fixed thread count, by default the number of cores.
+ static std::unique_ptr<SkExecutor> MakeFIFOThreadPool(int threads = 0,
+ bool allowBorrowing = true);
+ static std::unique_ptr<SkExecutor> MakeLIFOThreadPool(int threads = 0,
+ bool allowBorrowing = true);
+
+ // There is always a default SkExecutor available by calling SkExecutor::GetDefault().
+ static SkExecutor& GetDefault();
+ static void SetDefault(SkExecutor*); // Does not take ownership. Not thread safe.
+
+ // Add work to execute.
+ virtual void add(std::function<void(void)>) = 0;
+
+ // If it makes sense for this executor, use this thread to execute work for a little while.
+ virtual void borrow() {}
+
+protected:
+ SkExecutor() = default;
+ SkExecutor(const SkExecutor&) = delete;
+ SkExecutor& operator=(const SkExecutor&) = delete;
+};
+
+#endif//SkExecutor_DEFINED
diff --git a/gfx/skia/skia/include/core/SkFlattenable.h b/gfx/skia/skia/include/core/SkFlattenable.h
new file mode 100644
index 0000000000..3585e845b5
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFlattenable.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFlattenable_DEFINED
+#define SkFlattenable_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+#include <cstddef>
+
+class SkData;
+class SkReadBuffer;
+class SkWriteBuffer;
+struct SkDeserialProcs;
+struct SkSerialProcs;
+
+/** \class SkFlattenable
+
+ SkFlattenable is the base class for objects that need to be flattened
+ into a data stream for either transport or as part of the key to the
+ font cache.
+ */
+class SK_API SkFlattenable : public SkRefCnt {
+public:
+ enum Type {
+ kSkColorFilter_Type,
+ kSkBlender_Type,
+ kSkDrawable_Type,
+ kSkDrawLooper_Type, // no longer used internally by Skia
+ kSkImageFilter_Type,
+ kSkMaskFilter_Type,
+ kSkPathEffect_Type,
+ kSkShader_Type,
+ };
+
+ typedef sk_sp<SkFlattenable> (*Factory)(SkReadBuffer&);
+
+ SkFlattenable() {}
+
+ /** Implement this to return a factory function pointer that can be called
+ to recreate your class given a buffer (previously written to by your
+ override of flatten().
+ */
+ virtual Factory getFactory() const = 0;
+
+ /**
+ * Returns the name of the object's class.
+ */
+ virtual const char* getTypeName() const = 0;
+
+ static Factory NameToFactory(const char name[]);
+ static const char* FactoryToName(Factory);
+
+ static void Register(const char name[], Factory);
+
+ /**
+ * Override this if your subclass needs to record data that it will need to recreate itself
+ * from its CreateProc (returned by getFactory()).
+ *
+ * DEPRECATED public : will move to protected ... use serialize() instead
+ */
+ virtual void flatten(SkWriteBuffer&) const {}
+
+ virtual Type getFlattenableType() const = 0;
+
+ //
+ // public ways to serialize / deserialize
+ //
+ sk_sp<SkData> serialize(const SkSerialProcs* = nullptr) const;
+ size_t serialize(void* memory, size_t memory_size,
+ const SkSerialProcs* = nullptr) const;
+ static sk_sp<SkFlattenable> Deserialize(Type, const void* data, size_t length,
+ const SkDeserialProcs* procs = nullptr);
+
+protected:
+ class PrivateInitializer {
+ public:
+ static void InitEffects();
+ static void InitImageFilters();
+ };
+
+private:
+ static void RegisterFlattenablesIfNeeded();
+ static void Finalize();
+
+ friend class SkGraphics;
+
+ using INHERITED = SkRefCnt;
+};
+
+#if defined(SK_DISABLE_EFFECT_DESERIALIZATION)
+ #define SK_REGISTER_FLATTENABLE(type) do{}while(false)
+
+ #define SK_FLATTENABLE_HOOKS(type) \
+ static sk_sp<SkFlattenable> CreateProc(SkReadBuffer&); \
+ friend class SkFlattenable::PrivateInitializer; \
+ Factory getFactory() const override { return nullptr; } \
+ const char* getTypeName() const override { return #type; }
+#else
+ #define SK_REGISTER_FLATTENABLE(type) \
+ SkFlattenable::Register(#type, type::CreateProc)
+
+ #define SK_FLATTENABLE_HOOKS(type) \
+ static sk_sp<SkFlattenable> CreateProc(SkReadBuffer&); \
+ friend class SkFlattenable::PrivateInitializer; \
+ Factory getFactory() const override { return type::CreateProc; } \
+ const char* getTypeName() const override { return #type; }
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkFont.h b/gfx/skia/skia/include/core/SkFont.h
new file mode 100644
index 0000000000..88e92694bd
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFont.h
@@ -0,0 +1,540 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFont_DEFINED
+#define SkFont_DEFINED
+
+#include "include/core/SkFontTypes.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/base/SkTemplates.h"
+
+#include <vector>
+
+class SkMatrix;
+class SkPaint;
+class SkPath;
+struct SkFontMetrics;
+
+/** \class SkFont
+ SkFont controls options applied when drawing and measuring text.
+*/
+class SK_API SkFont {
+public:
+ /** Whether edge pixels draw opaque or with partial transparency.
+ */
+ enum class Edging {
+ kAlias, //!< no transparent pixels on glyph edges
+ kAntiAlias, //!< may have transparent pixels on glyph edges
+ kSubpixelAntiAlias, //!< glyph positioned in pixel using transparency
+ };
+
+ /** Constructs SkFont with default values.
+
+ @return default initialized SkFont
+ */
+ SkFont();
+
+ /** Constructs SkFont with default values with SkTypeface and size in points.
+
+ @param typeface font and style used to draw and measure text
+ @param size typographic height of text
+ @return initialized SkFont
+ */
+ SkFont(sk_sp<SkTypeface> typeface, SkScalar size);
+
+ /** Constructs SkFont with default values with SkTypeface.
+
+ @param typeface font and style used to draw and measure text
+ @return initialized SkFont
+ */
+ explicit SkFont(sk_sp<SkTypeface> typeface);
+
+
+ /** Constructs SkFont with default values with SkTypeface and size in points,
+ horizontal scale, and horizontal skew. Horizontal scale emulates condensed
+ and expanded fonts. Horizontal skew emulates oblique fonts.
+
+ @param typeface font and style used to draw and measure text
+ @param size typographic height of text
+ @param scaleX text horizontal scale
+ @param skewX additional shear on x-axis relative to y-axis
+ @return initialized SkFont
+ */
+ SkFont(sk_sp<SkTypeface> typeface, SkScalar size, SkScalar scaleX, SkScalar skewX);
+
+
+ /** Compares SkFont and font, and returns true if they are equivalent.
+ May return false if SkTypeface has identical contents but different pointers.
+
+ @param font font to compare
+ @return true if SkFont pair are equivalent
+ */
+ bool operator==(const SkFont& font) const;
+
+ /** Compares SkFont and font, and returns true if they are not equivalent.
+ May return true if SkTypeface has identical contents but different pointers.
+
+ @param font font to compare
+ @return true if SkFont pair are not equivalent
+ */
+ bool operator!=(const SkFont& font) const { return !(*this == font); }
+
+ /** If true, instructs the font manager to always hint glyphs.
+ Returned value is only meaningful if platform uses FreeType as the font manager.
+
+ @return true if all glyphs are hinted
+ */
+ bool isForceAutoHinting() const { return SkToBool(fFlags & kForceAutoHinting_PrivFlag); }
+
+ /** Returns true if font engine may return glyphs from font bitmaps instead of from outlines.
+
+ @return true if glyphs may be font bitmaps
+ */
+ bool isEmbeddedBitmaps() const { return SkToBool(fFlags & kEmbeddedBitmaps_PrivFlag); }
+
+ /** Returns true if glyphs may be drawn at sub-pixel offsets.
+
+ @return true if glyphs may be drawn at sub-pixel offsets.
+ */
+ bool isSubpixel() const { return SkToBool(fFlags & kSubpixel_PrivFlag); }
+
+ /** Returns true if font and glyph metrics are requested to be linearly scalable.
+
+ @return true if font and glyph metrics are requested to be linearly scalable.
+ */
+ bool isLinearMetrics() const { return SkToBool(fFlags & kLinearMetrics_PrivFlag); }
+
+ /** Returns true if bold is approximated by increasing the stroke width when creating glyph
+ bitmaps from outlines.
+
+ @return bold is approximated through stroke width
+ */
+ bool isEmbolden() const { return SkToBool(fFlags & kEmbolden_PrivFlag); }
+
+ /** Returns true if baselines will be snapped to pixel positions when the current transformation
+ matrix is axis aligned.
+
+ @return baselines may be snapped to pixels
+ */
+ bool isBaselineSnap() const { return SkToBool(fFlags & kBaselineSnap_PrivFlag); }
+
+ /** Sets whether to always hint glyphs.
+ If forceAutoHinting is set, instructs the font manager to always hint glyphs.
+
+ Only affects platforms that use FreeType as the font manager.
+
+ @param forceAutoHinting setting to always hint glyphs
+ */
+ void setForceAutoHinting(bool forceAutoHinting);
+
+ /** Requests, but does not require, to use bitmaps in fonts instead of outlines.
+
+ @param embeddedBitmaps setting to use bitmaps in fonts
+ */
+ void setEmbeddedBitmaps(bool embeddedBitmaps);
+
+ /** Requests, but does not require, that glyphs respect sub-pixel positioning.
+
+ @param subpixel setting for sub-pixel positioning
+ */
+ void setSubpixel(bool subpixel);
+
+ /** Requests, but does not require, linearly scalable font and glyph metrics.
+
+ For outline fonts 'true' means font and glyph metrics should ignore hinting and rounding.
+ Note that some bitmap formats may not be able to scale linearly and will ignore this flag.
+
+ @param linearMetrics setting for linearly scalable font and glyph metrics.
+ */
+ void setLinearMetrics(bool linearMetrics);
+
+ /** Increases stroke width when creating glyph bitmaps to approximate a bold typeface.
+
+ @param embolden setting for bold approximation
+ */
+ void setEmbolden(bool embolden);
+
+ /** Requests that baselines be snapped to pixels when the current transformation matrix is axis
+ aligned.
+
+ @param baselineSnap setting for baseline snapping to pixels
+ */
+ void setBaselineSnap(bool baselineSnap);
+
+ /** Whether edge pixels draw opaque or with partial transparency.
+ */
+ Edging getEdging() const { return (Edging)fEdging; }
+
+ /** Requests, but does not require, that edge pixels draw opaque or with
+ partial transparency.
+ */
+ void setEdging(Edging edging);
+
+ /** Sets level of glyph outline adjustment.
+ Does not check for valid values of hintingLevel.
+ */
+ void setHinting(SkFontHinting hintingLevel);
+
+ /** Returns level of glyph outline adjustment.
+ */
+ SkFontHinting getHinting() const { return (SkFontHinting)fHinting; }
+
+ /** Returns a font with the same attributes of this font, but with the specified size.
+ Returns nullptr if size is less than zero, infinite, or NaN.
+
+ @param size typographic height of text
+ @return initialized SkFont
+ */
+ SkFont makeWithSize(SkScalar size) const;
+
+ /** Returns SkTypeface if set, or nullptr.
+ Does not alter SkTypeface SkRefCnt.
+
+ @return SkTypeface if previously set, nullptr otherwise
+ */
+ SkTypeface* getTypeface() const {return fTypeface.get(); }
+
+ /** Returns SkTypeface if set, or the default typeface.
+ Does not alter SkTypeface SkRefCnt.
+
+ @return SkTypeface if previously set or, a pointer to the default typeface if not
+ previously set.
+ */
+ SkTypeface* getTypefaceOrDefault() const;
+
+ /** Returns text size in points.
+
+ @return typographic height of text
+ */
+ SkScalar getSize() const { return fSize; }
+
+ /** Returns text scale on x-axis.
+ Default value is 1.
+
+ @return text horizontal scale
+ */
+ SkScalar getScaleX() const { return fScaleX; }
+
+ /** Returns text skew on x-axis.
+ Default value is zero.
+
+ @return additional shear on x-axis relative to y-axis
+ */
+ SkScalar getSkewX() const { return fSkewX; }
+
+ /** Increases SkTypeface SkRefCnt by one.
+
+ @return SkTypeface if previously set, nullptr otherwise
+ */
+ sk_sp<SkTypeface> refTypeface() const { return fTypeface; }
+
+ /** Increases SkTypeface SkRefCnt by one.
+
+ @return SkTypeface if previously set or, a pointer to the default typeface if not
+ previously set.
+ */
+ sk_sp<SkTypeface> refTypefaceOrDefault() const;
+
+ /** Sets SkTypeface to typeface, decreasing SkRefCnt of the previous SkTypeface.
+ Pass nullptr to clear SkTypeface and use the default typeface. Increments
+ tf SkRefCnt by one.
+
+ @param tf font and style used to draw text
+ */
+ void setTypeface(sk_sp<SkTypeface> tf) { fTypeface = tf; }
+
+ /** Sets text size in points.
+ Has no effect if textSize is not greater than or equal to zero.
+
+ @param textSize typographic height of text
+ */
+ void setSize(SkScalar textSize);
+
+ /** Sets text scale on x-axis.
+ Default value is 1.
+
+ @param scaleX text horizontal scale
+ */
+ void setScaleX(SkScalar scaleX);
+
+ /** Sets text skew on x-axis.
+ Default value is zero.
+
+ @param skewX additional shear on x-axis relative to y-axis
+ */
+ void setSkewX(SkScalar skewX);
+
+ /** Converts text into glyph indices.
+ Returns the number of glyph indices represented by text.
+ SkTextEncoding specifies how text represents characters or glyphs.
+ glyphs may be nullptr, to compute the glyph count.
+
+ Does not check text for valid character codes or valid glyph indices.
+
+ If byteLength equals zero, returns zero.
+ If byteLength includes a partial character, the partial character is ignored.
+
+ If encoding is SkTextEncoding::kUTF8 and text contains an invalid UTF-8 sequence,
+ zero is returned.
+
+ When encoding is SkTextEncoding::kUTF8, SkTextEncoding::kUTF16, or
+ SkTextEncoding::kUTF32; then each Unicode codepoint is mapped to a
+ single glyph. This function uses the default character-to-glyph
+ mapping from the SkTypeface and maps characters not found in the
+ SkTypeface to zero.
+
+ If maxGlyphCount is not sufficient to store all the glyphs, no glyphs are copied.
+ The total glyph count is returned for subsequent buffer reallocation.
+
+ @param text character storage encoded with SkTextEncoding
+ @param byteLength length of character storage in bytes
+ @param glyphs storage for glyph indices; may be nullptr
+ @param maxGlyphCount storage capacity
+ @return number of glyphs represented by text of length byteLength
+ */
+ int textToGlyphs(const void* text, size_t byteLength, SkTextEncoding encoding,
+ SkGlyphID glyphs[], int maxGlyphCount) const;
+
+ /** Returns glyph index for Unicode character.
+
+ If the character is not supported by the SkTypeface, returns 0.
+
+ @param uni Unicode character
+ @return glyph index
+ */
+ SkGlyphID unicharToGlyph(SkUnichar uni) const;
+
+ void unicharsToGlyphs(const SkUnichar uni[], int count, SkGlyphID glyphs[]) const;
+
+ /** Returns number of glyphs represented by text.
+
+ If encoding is SkTextEncoding::kUTF8, SkTextEncoding::kUTF16, or
+ SkTextEncoding::kUTF32; then each Unicode codepoint is mapped to a
+ single glyph.
+
+ @param text character storage encoded with SkTextEncoding
+ @param byteLength length of character storage in bytes
+ @return number of glyphs represented by text of length byteLength
+ */
+ int countText(const void* text, size_t byteLength, SkTextEncoding encoding) const {
+ return this->textToGlyphs(text, byteLength, encoding, nullptr, 0);
+ }
+
+ /** Returns the advance width of text.
+ The advance is the normal distance to move before drawing additional text.
+ Returns the bounding box of text if bounds is not nullptr.
+
+ @param text character storage encoded with SkTextEncoding
+ @param byteLength length of character storage in bytes
+ @param bounds returns bounding box relative to (0, 0) if not nullptr
+ @return the sum of the default advance widths
+ */
+ SkScalar measureText(const void* text, size_t byteLength, SkTextEncoding encoding,
+ SkRect* bounds = nullptr) const {
+ return this->measureText(text, byteLength, encoding, bounds, nullptr);
+ }
+
+ /** Returns the advance width of text.
+ The advance is the normal distance to move before drawing additional text.
+ Returns the bounding box of text if bounds is not nullptr. The paint
+ stroke settings, mask filter, or path effect may modify the bounds.
+
+ @param text character storage encoded with SkTextEncoding
+ @param byteLength length of character storage in bytes
+ @param bounds returns bounding box relative to (0, 0) if not nullptr
+ @param paint optional; may be nullptr
+ @return the sum of the default advance widths
+ */
+ SkScalar measureText(const void* text, size_t byteLength, SkTextEncoding encoding,
+ SkRect* bounds, const SkPaint* paint) const;
+
+ /** DEPRECATED
+ Retrieves the advance and bounds for each glyph in glyphs.
+ Both widths and bounds may be nullptr.
+ If widths is not nullptr, widths must be an array of count entries.
+ if bounds is not nullptr, bounds must be an array of count entries.
+
+ @param glyphs array of glyph indices to be measured
+ @param count number of glyphs
+ @param widths returns text advances for each glyph; may be nullptr
+ @param bounds returns bounds for each glyph relative to (0, 0); may be nullptr
+ */
+ void getWidths(const SkGlyphID glyphs[], int count, SkScalar widths[], SkRect bounds[]) const {
+ this->getWidthsBounds(glyphs, count, widths, bounds, nullptr);
+ }
+
+ // DEPRECATED
+ void getWidths(const SkGlyphID glyphs[], int count, SkScalar widths[], std::nullptr_t) const {
+ this->getWidths(glyphs, count, widths);
+ }
+
+ /** Retrieves the advance and bounds for each glyph in glyphs.
+ Both widths and bounds may be nullptr.
+ If widths is not nullptr, widths must be an array of count entries.
+ if bounds is not nullptr, bounds must be an array of count entries.
+
+ @param glyphs array of glyph indices to be measured
+ @param count number of glyphs
+ @param widths returns text advances for each glyph
+ */
+ void getWidths(const SkGlyphID glyphs[], int count, SkScalar widths[]) const {
+ this->getWidthsBounds(glyphs, count, widths, nullptr, nullptr);
+ }
+
+ /** Retrieves the advance and bounds for each glyph in glyphs.
+ Both widths and bounds may be nullptr.
+ If widths is not nullptr, widths must be an array of count entries.
+ if bounds is not nullptr, bounds must be an array of count entries.
+
+ @param glyphs array of glyph indices to be measured
+ @param count number of glyphs
+ @param widths returns text advances for each glyph; may be nullptr
+ @param bounds returns bounds for each glyph relative to (0, 0); may be nullptr
+ @param paint optional, specifies stroking, SkPathEffect and SkMaskFilter
+ */
+ void getWidthsBounds(const SkGlyphID glyphs[], int count, SkScalar widths[], SkRect bounds[],
+ const SkPaint* paint) const;
+
+
+ /** Retrieves the bounds for each glyph in glyphs.
+ bounds must be an array of count entries.
+ If paint is not nullptr, its stroking, SkPathEffect, and SkMaskFilter fields are respected.
+
+ @param glyphs array of glyph indices to be measured
+ @param count number of glyphs
+ @param bounds returns bounds for each glyph relative to (0, 0); may be nullptr
+ @param paint optional, specifies stroking, SkPathEffect, and SkMaskFilter
+ */
+ void getBounds(const SkGlyphID glyphs[], int count, SkRect bounds[],
+ const SkPaint* paint) const {
+ this->getWidthsBounds(glyphs, count, nullptr, bounds, paint);
+ }
+
+ /** Retrieves the positions for each glyph, beginning at the specified origin. The caller
+ must allocated at least count number of elements in the pos[] array.
+
+ @param glyphs array of glyph indices to be positioned
+ @param count number of glyphs
+ @param pos returns glyphs positions
+ @param origin location of the first glyph. Defaults to {0, 0}.
+ */
+ void getPos(const SkGlyphID glyphs[], int count, SkPoint pos[], SkPoint origin = {0, 0}) const;
+
+ /** Retrieves the x-positions for each glyph, beginning at the specified origin. The caller
+ must allocated at least count number of elements in the xpos[] array.
+
+ @param glyphs array of glyph indices to be positioned
+ @param count number of glyphs
+ @param xpos returns glyphs x-positions
+ @param origin x-position of the first glyph. Defaults to 0.
+ */
+ void getXPos(const SkGlyphID glyphs[], int count, SkScalar xpos[], SkScalar origin = 0) const;
+
+ /** Returns intervals [start, end] describing lines parallel to the advance that intersect
+ * with the glyphs.
+ *
+ * @param glyphs the glyphs to intersect
+ * @param count the number of glyphs and positions
+ * @param pos the position of each glyph
+ * @param top the top of the line intersecting
+ * @param bottom the bottom of the line intersecting
+ @return array of pairs of x values [start, end]. May be empty.
+ */
+ std::vector<SkScalar> getIntercepts(const SkGlyphID glyphs[], int count, const SkPoint pos[],
+ SkScalar top, SkScalar bottom,
+ const SkPaint* = nullptr) const;
+
+ /** Modifies path to be the outline of the glyph.
+ If the glyph has an outline, modifies path to be the glyph's outline and returns true.
+ The glyph outline may be empty. Degenerate contours in the glyph outline will be skipped.
+ If glyph is described by a bitmap, returns false and ignores path parameter.
+
+ @param glyphID index of glyph
+ @param path pointer to existing SkPath
+ @return true if glyphID is described by path
+ */
+ bool getPath(SkGlyphID glyphID, SkPath* path) const;
+
+ /** Returns path corresponding to glyph array.
+
+ @param glyphIDs array of glyph indices
+ @param count number of glyphs
+ @param glyphPathProc function returning one glyph description as path
+ @param ctx function context
+ */
+ void getPaths(const SkGlyphID glyphIDs[], int count,
+ void (*glyphPathProc)(const SkPath* pathOrNull, const SkMatrix& mx, void* ctx),
+ void* ctx) const;
+
+ /** Returns SkFontMetrics associated with SkTypeface.
+ The return value is the recommended spacing between lines: the sum of metrics
+ descent, ascent, and leading.
+ If metrics is not nullptr, SkFontMetrics is copied to metrics.
+ Results are scaled by text size but does not take into account
+ dimensions required by text scale, text skew, fake bold,
+ style stroke, and SkPathEffect.
+
+ @param metrics storage for SkFontMetrics; may be nullptr
+ @return recommended spacing between lines
+ */
+ SkScalar getMetrics(SkFontMetrics* metrics) const;
+
+ /** Returns the recommended spacing between lines: the sum of metrics
+ descent, ascent, and leading.
+ Result is scaled by text size but does not take into account
+ dimensions required by stroking and SkPathEffect.
+ Returns the same result as getMetrics().
+
+ @return recommended spacing between lines
+ */
+ SkScalar getSpacing() const { return this->getMetrics(nullptr); }
+
+ /** Dumps fields of the font to SkDebugf. May change its output over time, so clients should
+ * not rely on this for anything specific. Used to aid in debugging.
+ */
+ void dump() const;
+
+ using sk_is_trivially_relocatable = std::true_type;
+
+private:
+ enum PrivFlags {
+ kForceAutoHinting_PrivFlag = 1 << 0,
+ kEmbeddedBitmaps_PrivFlag = 1 << 1,
+ kSubpixel_PrivFlag = 1 << 2,
+ kLinearMetrics_PrivFlag = 1 << 3,
+ kEmbolden_PrivFlag = 1 << 4,
+ kBaselineSnap_PrivFlag = 1 << 5,
+ };
+
+ static constexpr unsigned kAllFlags = kForceAutoHinting_PrivFlag
+ | kEmbeddedBitmaps_PrivFlag
+ | kSubpixel_PrivFlag
+ | kLinearMetrics_PrivFlag
+ | kEmbolden_PrivFlag
+ | kBaselineSnap_PrivFlag;
+
+ sk_sp<SkTypeface> fTypeface;
+ SkScalar fSize;
+ SkScalar fScaleX;
+ SkScalar fSkewX;
+ uint8_t fFlags;
+ uint8_t fEdging;
+ uint8_t fHinting;
+
+ static_assert(::sk_is_trivially_relocatable<decltype(fTypeface)>::value);
+
+ SkScalar setupForAsPaths(SkPaint*);
+ bool hasSomeAntiAliasing() const;
+
+ friend class SkFontPriv;
+ friend class SkGlyphRunListPainterCPU;
+ friend class SkStrikeSpec;
+ friend class SkRemoteGlyphCacheTest;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkFontArguments.h b/gfx/skia/skia/include/core/SkFontArguments.h
new file mode 100644
index 0000000000..a5139bb21b
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFontArguments.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontArguments_DEFINED
+#define SkFontArguments_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+/** Represents a set of actual arguments for a font. */
+struct SkFontArguments {
+ struct VariationPosition {
+ struct Coordinate {
+ SkFourByteTag axis;
+ float value;
+ };
+ const Coordinate* coordinates;
+ int coordinateCount;
+ };
+
+ /** Specify a palette to use and overrides for palette entries.
+ *
+ * `overrides` is a list of pairs of palette entry index and color.
+ * The overriden palette entries will use the associated color.
+ * Override pairs with palette entry indices out of range will not be applied.
+ * Later override entries override earlier ones.
+ */
+ struct Palette {
+ struct Override {
+ int index;
+ SkColor color;
+ };
+ int index;
+ const Override* overrides;
+ int overrideCount;
+ };
+
+ SkFontArguments()
+ : fCollectionIndex(0)
+ , fVariationDesignPosition{nullptr, 0}
+ , fPalette{0, nullptr, 0} {}
+
+ /** Specify the index of the desired font.
+ *
+ * Font formats like ttc, dfont, cff, cid, pfr, t42, t1, and fon may actually be indexed
+ * collections of fonts.
+ */
+ SkFontArguments& setCollectionIndex(int collectionIndex) {
+ fCollectionIndex = collectionIndex;
+ return *this;
+ }
+
+ /** Specify a position in the variation design space.
+ *
+ * Any axis not specified will use the default value.
+ * Any specified axis not actually present in the font will be ignored.
+ *
+ * @param position not copied. The value must remain valid for life of SkFontArguments.
+ */
+ SkFontArguments& setVariationDesignPosition(VariationPosition position) {
+ fVariationDesignPosition.coordinates = position.coordinates;
+ fVariationDesignPosition.coordinateCount = position.coordinateCount;
+ return *this;
+ }
+
+ int getCollectionIndex() const {
+ return fCollectionIndex;
+ }
+
+ VariationPosition getVariationDesignPosition() const {
+ return fVariationDesignPosition;
+ }
+
+ SkFontArguments& setPalette(Palette palette) {
+ fPalette.index = palette.index;
+ fPalette.overrides = palette.overrides;
+ fPalette.overrideCount = palette.overrideCount;
+ return *this;
+ }
+
+ Palette getPalette() const { return fPalette; }
+
+private:
+ int fCollectionIndex;
+ VariationPosition fVariationDesignPosition;
+ Palette fPalette;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkFontMetrics.h b/gfx/skia/skia/include/core/SkFontMetrics.h
new file mode 100644
index 0000000000..f496039311
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFontMetrics.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMetrics_DEFINED
+#define SkFontMetrics_DEFINED
+
+#include "include/core/SkScalar.h"
+#include "include/private/base/SkTo.h"
+
+/** \class SkFontMetrics
+ The metrics of an SkFont.
+ The metric values are consistent with the Skia y-down coordinate system.
+ */
+struct SK_API SkFontMetrics {
+ bool operator==(const SkFontMetrics& that) {
+ return
+ this->fFlags == that.fFlags &&
+ this->fTop == that.fTop &&
+ this->fAscent == that.fAscent &&
+ this->fDescent == that.fDescent &&
+ this->fBottom == that.fBottom &&
+ this->fLeading == that.fLeading &&
+ this->fAvgCharWidth == that.fAvgCharWidth &&
+ this->fMaxCharWidth == that.fMaxCharWidth &&
+ this->fXMin == that.fXMin &&
+ this->fXMax == that.fXMax &&
+ this->fXHeight == that.fXHeight &&
+ this->fCapHeight == that.fCapHeight &&
+ this->fUnderlineThickness == that.fUnderlineThickness &&
+ this->fUnderlinePosition == that.fUnderlinePosition &&
+ this->fStrikeoutThickness == that.fStrikeoutThickness &&
+ this->fStrikeoutPosition == that.fStrikeoutPosition;
+ }
+
+ /** \enum FontMetricsFlags
+ FontMetricsFlags indicate when certain metrics are valid;
+ the underline or strikeout metrics may be valid and zero.
+ Fonts with embedded bitmaps may not have valid underline or strikeout metrics.
+ */
+ enum FontMetricsFlags {
+ kUnderlineThicknessIsValid_Flag = 1 << 0, //!< set if fUnderlineThickness is valid
+ kUnderlinePositionIsValid_Flag = 1 << 1, //!< set if fUnderlinePosition is valid
+ kStrikeoutThicknessIsValid_Flag = 1 << 2, //!< set if fStrikeoutThickness is valid
+ kStrikeoutPositionIsValid_Flag = 1 << 3, //!< set if fStrikeoutPosition is valid
+ kBoundsInvalid_Flag = 1 << 4, //!< set if fTop, fBottom, fXMin, fXMax invalid
+ };
+
+ uint32_t fFlags; //!< FontMetricsFlags indicating which metrics are valid
+ SkScalar fTop; //!< greatest extent above origin of any glyph bounding box, typically negative; deprecated with variable fonts
+ SkScalar fAscent; //!< distance to reserve above baseline, typically negative
+ SkScalar fDescent; //!< distance to reserve below baseline, typically positive
+ SkScalar fBottom; //!< greatest extent below origin of any glyph bounding box, typically positive; deprecated with variable fonts
+ SkScalar fLeading; //!< distance to add between lines, typically positive or zero
+ SkScalar fAvgCharWidth; //!< average character width, zero if unknown
+ SkScalar fMaxCharWidth; //!< maximum character width, zero if unknown
+ SkScalar fXMin; //!< greatest extent to left of origin of any glyph bounding box, typically negative; deprecated with variable fonts
+ SkScalar fXMax; //!< greatest extent to right of origin of any glyph bounding box, typically positive; deprecated with variable fonts
+ SkScalar fXHeight; //!< height of lower-case 'x', zero if unknown, typically negative
+ SkScalar fCapHeight; //!< height of an upper-case letter, zero if unknown, typically negative
+ SkScalar fUnderlineThickness; //!< underline thickness
+ SkScalar fUnderlinePosition; //!< distance from baseline to top of stroke, typically positive
+ SkScalar fStrikeoutThickness; //!< strikeout thickness
+ SkScalar fStrikeoutPosition; //!< distance from baseline to bottom of stroke, typically negative
+
+ /** Returns true if SkFontMetrics has a valid underline thickness, and sets
+ thickness to that value. If the underline thickness is not valid,
+ return false, and ignore thickness.
+
+ @param thickness storage for underline width
+ @return true if font specifies underline width
+ */
+ bool hasUnderlineThickness(SkScalar* thickness) const {
+ if (SkToBool(fFlags & kUnderlineThicknessIsValid_Flag)) {
+ *thickness = fUnderlineThickness;
+ return true;
+ }
+ return false;
+ }
+
+ /** Returns true if SkFontMetrics has a valid underline position, and sets
+ position to that value. If the underline position is not valid,
+ return false, and ignore position.
+
+ @param position storage for underline position
+ @return true if font specifies underline position
+ */
+ bool hasUnderlinePosition(SkScalar* position) const {
+ if (SkToBool(fFlags & kUnderlinePositionIsValid_Flag)) {
+ *position = fUnderlinePosition;
+ return true;
+ }
+ return false;
+ }
+
+ /** Returns true if SkFontMetrics has a valid strikeout thickness, and sets
+ thickness to that value. If the underline thickness is not valid,
+ return false, and ignore thickness.
+
+ @param thickness storage for strikeout width
+ @return true if font specifies strikeout width
+ */
+ bool hasStrikeoutThickness(SkScalar* thickness) const {
+ if (SkToBool(fFlags & kStrikeoutThicknessIsValid_Flag)) {
+ *thickness = fStrikeoutThickness;
+ return true;
+ }
+ return false;
+ }
+
+ /** Returns true if SkFontMetrics has a valid strikeout position, and sets
+ position to that value. If the underline position is not valid,
+ return false, and ignore position.
+
+ @param position storage for strikeout position
+ @return true if font specifies strikeout position
+ */
+ bool hasStrikeoutPosition(SkScalar* position) const {
+ if (SkToBool(fFlags & kStrikeoutPositionIsValid_Flag)) {
+ *position = fStrikeoutPosition;
+ return true;
+ }
+ return false;
+ }
+
+ /** Returns true if SkFontMetrics has a valid fTop, fBottom, fXMin, and fXMax.
+ If the bounds are not valid, return false.
+
+ @return true if font specifies maximum glyph bounds
+ */
+ bool hasBounds() const {
+ return !SkToBool(fFlags & kBoundsInvalid_Flag);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkFontMgr.h b/gfx/skia/skia/include/core/SkFontMgr.h
new file mode 100644
index 0000000000..5b988c0074
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFontMgr.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_DEFINED
+#define SkFontMgr_DEFINED
+
+#include "include/core/SkFontArguments.h"
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+#include <memory>
+
+class SkData;
+class SkFontData;
+class SkStreamAsset;
+class SkString;
+class SkTypeface;
+
+class SK_API SkFontStyleSet : public SkRefCnt {
+public:
+ virtual int count() = 0;
+ virtual void getStyle(int index, SkFontStyle*, SkString* style) = 0;
+ virtual SkTypeface* createTypeface(int index) = 0;
+ virtual SkTypeface* matchStyle(const SkFontStyle& pattern) = 0;
+
+ static SkFontStyleSet* CreateEmpty();
+
+protected:
+ SkTypeface* matchStyleCSS3(const SkFontStyle& pattern);
+
+private:
+ using INHERITED = SkRefCnt;
+};
+
+class SK_API SkFontMgr : public SkRefCnt {
+public:
+ int countFamilies() const;
+ void getFamilyName(int index, SkString* familyName) const;
+ SkFontStyleSet* createStyleSet(int index) const;
+
+ /**
+ * The caller must call unref() on the returned object.
+ * Never returns NULL; will return an empty set if the name is not found.
+ *
+ * Passing nullptr as the parameter will return the default system family.
+ * Note that most systems don't have a default system family, so passing nullptr will often
+ * result in the empty set.
+ *
+ * It is possible that this will return a style set not accessible from
+ * createStyleSet(int) due to hidden or auto-activated fonts.
+ */
+ SkFontStyleSet* matchFamily(const char familyName[]) const;
+
+ /**
+ * Find the closest matching typeface to the specified familyName and style
+ * and return a ref to it. The caller must call unref() on the returned
+ * object. Will return nullptr if no 'good' match is found.
+ *
+ * Passing |nullptr| as the parameter for |familyName| will return the
+ * default system font.
+ *
+ * It is possible that this will return a style set not accessible from
+ * createStyleSet(int) or matchFamily(const char[]) due to hidden or
+ * auto-activated fonts.
+ */
+ SkTypeface* matchFamilyStyle(const char familyName[], const SkFontStyle&) const;
+
+ /**
+ * Use the system fallback to find a typeface for the given character.
+ * Note that bcp47 is a combination of ISO 639, 15924, and 3166-1 codes,
+ * so it is fine to just pass a ISO 639 here.
+ *
+ * Will return NULL if no family can be found for the character
+ * in the system fallback.
+ *
+ * Passing |nullptr| as the parameter for |familyName| will return the
+ * default system font.
+ *
+ * bcp47[0] is the least significant fallback, bcp47[bcp47Count-1] is the
+ * most significant. If no specified bcp47 codes match, any font with the
+ * requested character will be matched.
+ */
+ SkTypeface* matchFamilyStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const;
+
+ /**
+ * Create a typeface for the specified data and TTC index (pass 0 for none)
+ * or NULL if the data is not recognized. The caller must call unref() on
+ * the returned object if it is not null.
+ */
+ sk_sp<SkTypeface> makeFromData(sk_sp<SkData>, int ttcIndex = 0) const;
+
+ /**
+ * Create a typeface for the specified stream and TTC index
+ * (pass 0 for none) or NULL if the stream is not recognized. The caller
+ * must call unref() on the returned object if it is not null.
+ */
+ sk_sp<SkTypeface> makeFromStream(std::unique_ptr<SkStreamAsset>, int ttcIndex = 0) const;
+
+ /* Experimental, API subject to change. */
+ sk_sp<SkTypeface> makeFromStream(std::unique_ptr<SkStreamAsset>, const SkFontArguments&) const;
+
+ /**
+ * Create a typeface for the specified fileName and TTC index
+ * (pass 0 for none) or NULL if the file is not found, or its contents are
+ * not recognized. The caller must call unref() on the returned object
+ * if it is not null.
+ */
+ sk_sp<SkTypeface> makeFromFile(const char path[], int ttcIndex = 0) const;
+
+ sk_sp<SkTypeface> legacyMakeTypeface(const char familyName[], SkFontStyle style) const;
+
+ /** Return the default fontmgr. */
+ static sk_sp<SkFontMgr> RefDefault();
+
+ /* Returns an empty font manager without any typeface dependencies */
+ static sk_sp<SkFontMgr> RefEmpty();
+
+protected:
+ virtual int onCountFamilies() const = 0;
+ virtual void onGetFamilyName(int index, SkString* familyName) const = 0;
+ virtual SkFontStyleSet* onCreateStyleSet(int index)const = 0;
+
+ /** May return NULL if the name is not found. */
+ virtual SkFontStyleSet* onMatchFamily(const char familyName[]) const = 0;
+
+ virtual SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle&) const = 0;
+ virtual SkTypeface* onMatchFamilyStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const = 0;
+
+ virtual sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData>, int ttcIndex) const = 0;
+ virtual sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset>,
+ int ttcIndex) const = 0;
+ virtual sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset>,
+ const SkFontArguments&) const = 0;
+ virtual sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const = 0;
+
+ virtual sk_sp<SkTypeface> onLegacyMakeTypeface(const char familyName[], SkFontStyle) const = 0;
+
+ // this method is never called -- will be removed
+ virtual SkTypeface* onMatchFaceStyle(const SkTypeface*,
+ const SkFontStyle&) const {
+ return nullptr;
+ }
+
+private:
+
+ /** Implemented by porting layer to return the default factory. */
+ static sk_sp<SkFontMgr> Factory();
+
+ using INHERITED = SkRefCnt;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkFontParameters.h b/gfx/skia/skia/include/core/SkFontParameters.h
new file mode 100644
index 0000000000..ae4f1d68b6
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFontParameters.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontParameters_DEFINED
+#define SkFontParameters_DEFINED
+
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+struct SkFontParameters {
+ struct Variation {
+ // Parameters in a variation font axis.
+ struct Axis {
+ constexpr Axis() : tag(0), min(0), def(0), max(0), flags(0) {}
+ constexpr Axis(SkFourByteTag tag, float min, float def, float max, bool hidden) :
+ tag(tag), min(min), def(def), max(max), flags(hidden ? HIDDEN : 0) {}
+
+ // Four character identifier of the font axis (weight, width, slant, italic...).
+ SkFourByteTag tag;
+ // Minimum value supported by this axis.
+ float min;
+ // Default value set by this axis.
+ float def;
+ // Maximum value supported by this axis. The maximum can equal the minimum.
+ float max;
+ // Return whether this axis is recommended to be remain hidden in user interfaces.
+ bool isHidden() const { return flags & HIDDEN; }
+ // Set this axis to be remain hidden in user interfaces.
+ void setHidden(bool hidden) { flags = hidden ? (flags | HIDDEN) : (flags & ~HIDDEN); }
+ private:
+ static constexpr uint16_t HIDDEN = 0x0001;
+ // Attributes for a font axis.
+ uint16_t flags;
+ };
+ };
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkFontStyle.h b/gfx/skia/skia/include/core/SkFontStyle.h
new file mode 100644
index 0000000000..be46b53bb2
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFontStyle.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontStyle_DEFINED
+#define SkFontStyle_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTPin.h"
+
+#include <cstdint>
+
+class SK_API SkFontStyle {
+public:
+ enum Weight {
+ kInvisible_Weight = 0,
+ kThin_Weight = 100,
+ kExtraLight_Weight = 200,
+ kLight_Weight = 300,
+ kNormal_Weight = 400,
+ kMedium_Weight = 500,
+ kSemiBold_Weight = 600,
+ kBold_Weight = 700,
+ kExtraBold_Weight = 800,
+ kBlack_Weight = 900,
+ kExtraBlack_Weight = 1000,
+ };
+
+ enum Width {
+ kUltraCondensed_Width = 1,
+ kExtraCondensed_Width = 2,
+ kCondensed_Width = 3,
+ kSemiCondensed_Width = 4,
+ kNormal_Width = 5,
+ kSemiExpanded_Width = 6,
+ kExpanded_Width = 7,
+ kExtraExpanded_Width = 8,
+ kUltraExpanded_Width = 9,
+ };
+
+ enum Slant {
+ kUpright_Slant,
+ kItalic_Slant,
+ kOblique_Slant,
+ };
+
+ constexpr SkFontStyle(int weight, int width, Slant slant) : fValue(
+ (SkTPin<int>(weight, kInvisible_Weight, kExtraBlack_Weight)) +
+ (SkTPin<int>(width, kUltraCondensed_Width, kUltraExpanded_Width) << 16) +
+ (SkTPin<int>(slant, kUpright_Slant, kOblique_Slant) << 24)
+ ) { }
+
+ constexpr SkFontStyle() : SkFontStyle{kNormal_Weight, kNormal_Width, kUpright_Slant} { }
+
+ bool operator==(const SkFontStyle& rhs) const {
+ return fValue == rhs.fValue;
+ }
+
+ int weight() const { return fValue & 0xFFFF; }
+ int width() const { return (fValue >> 16) & 0xFF; }
+ Slant slant() const { return (Slant)((fValue >> 24) & 0xFF); }
+
+ static constexpr SkFontStyle Normal() {
+ return SkFontStyle(kNormal_Weight, kNormal_Width, kUpright_Slant);
+ }
+ static constexpr SkFontStyle Bold() {
+ return SkFontStyle(kBold_Weight, kNormal_Width, kUpright_Slant);
+ }
+ static constexpr SkFontStyle Italic() {
+ return SkFontStyle(kNormal_Weight, kNormal_Width, kItalic_Slant );
+ }
+ static constexpr SkFontStyle BoldItalic() {
+ return SkFontStyle(kBold_Weight, kNormal_Width, kItalic_Slant );
+ }
+
+private:
+ friend class SkTypefaceProxyPrototype; // To serialize fValue
+ int32_t fValue;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkFontTypes.h b/gfx/skia/skia/include/core/SkFontTypes.h
new file mode 100644
index 0000000000..76f5dde67f
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkFontTypes.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontTypes_DEFINED
+#define SkFontTypes_DEFINED
+
+enum class SkTextEncoding {
+ kUTF8, //!< uses bytes to represent UTF-8 or ASCII
+ kUTF16, //!< uses two byte words to represent most of Unicode
+ kUTF32, //!< uses four byte words to represent all of Unicode
+ kGlyphID, //!< uses two byte words to represent glyph indices
+};
+
+enum class SkFontHinting {
+ kNone, //!< glyph outlines unchanged
+ kSlight, //!< minimal modification to improve constrast
+ kNormal, //!< glyph outlines modified to improve constrast
+ kFull, //!< modifies glyph outlines for maximum constrast
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkGraphics.h b/gfx/skia/skia/include/core/SkGraphics.h
new file mode 100644
index 0000000000..2a20825046
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkGraphics.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGraphics_DEFINED
+#define SkGraphics_DEFINED
+
+#include "include/core/SkRefCnt.h"
+
+#include <memory>
+
+class SkData;
+class SkImageGenerator;
+class SkOpenTypeSVGDecoder;
+class SkPath;
+class SkTraceMemoryDump;
+
+class SK_API SkGraphics {
+public:
+ /**
+ * Call this at process initialization time if your environment does not
+ * permit static global initializers that execute code.
+ * Init() is thread-safe and idempotent.
+ */
+ static void Init();
+
+ /**
+ * Return the max number of bytes that should be used by the font cache.
+ * If the cache needs to allocate more, it will purge previous entries.
+ * This max can be changed by calling SetFontCacheLimit().
+ */
+ static size_t GetFontCacheLimit();
+
+ /**
+ * Specify the max number of bytes that should be used by the font cache.
+ * If the cache needs to allocate more, it will purge previous entries.
+ *
+ * This function returns the previous setting, as if GetFontCacheLimit()
+ * had be called before the new limit was set.
+ */
+ static size_t SetFontCacheLimit(size_t bytes);
+
+ /**
+ * Return the number of bytes currently used by the font cache.
+ */
+ static size_t GetFontCacheUsed();
+
+ /**
+ * Return the number of entries in the font cache.
+ * A cache "entry" is associated with each typeface + pointSize + matrix.
+ */
+ static int GetFontCacheCountUsed();
+
+ /**
+ * Return the current limit to the number of entries in the font cache.
+ * A cache "entry" is associated with each typeface + pointSize + matrix.
+ */
+ static int GetFontCacheCountLimit();
+
+ /**
+ * Set the limit to the number of entries in the font cache, and return
+ * the previous value. If this new value is lower than the previous,
+ * it will automatically try to purge entries to meet the new limit.
+ */
+ static int SetFontCacheCountLimit(int count);
+
+ /**
+ * For debugging purposes, this will attempt to purge the font cache. It
+ * does not change the limit, but will cause subsequent font measures and
+ * draws to be recreated, since they will no longer be in the cache.
+ */
+ static void PurgeFontCache();
+
+ /**
+ * This function returns the memory used for temporary images and other resources.
+ */
+ static size_t GetResourceCacheTotalBytesUsed();
+
+ /**
+ * These functions get/set the memory usage limit for the resource cache, used for temporary
+ * bitmaps and other resources. Entries are purged from the cache when the memory useage
+ * exceeds this limit.
+ */
+ static size_t GetResourceCacheTotalByteLimit();
+ static size_t SetResourceCacheTotalByteLimit(size_t newLimit);
+
+ /**
+ * For debugging purposes, this will attempt to purge the resource cache. It
+ * does not change the limit.
+ */
+ static void PurgeResourceCache();
+
+ /**
+ * When the cachable entry is very lage (e.g. a large scaled bitmap), adding it to the cache
+ * can cause most/all of the existing entries to be purged. To avoid the, the client can set
+ * a limit for a single allocation. If a cacheable entry would have been cached, but its size
+ * exceeds this limit, then we do not attempt to cache it at all.
+ *
+ * Zero is the default value, meaning we always attempt to cache entries.
+ */
+ static size_t GetResourceCacheSingleAllocationByteLimit();
+ static size_t SetResourceCacheSingleAllocationByteLimit(size_t newLimit);
+
+ /**
+ * Dumps memory usage of caches using the SkTraceMemoryDump interface. See SkTraceMemoryDump
+ * for usage of this method.
+ */
+ static void DumpMemoryStatistics(SkTraceMemoryDump* dump);
+
+ /**
+ * Free as much globally cached memory as possible. This will purge all private caches in Skia,
+ * including font and image caches.
+ *
+ * If there are caches associated with GPU context, those will not be affected by this call.
+ */
+ static void PurgeAllCaches();
+
+ typedef std::unique_ptr<SkImageGenerator>
+ (*ImageGeneratorFromEncodedDataFactory)(sk_sp<SkData>);
+
+ /**
+ * To instantiate images from encoded data, first looks at this runtime function-ptr. If it
+ * exists, it is called to create an SkImageGenerator from SkData. If there is no function-ptr
+ * or there is, but it returns NULL, then skia will call its internal default implementation.
+ *
+ * Returns the previous factory (which could be NULL).
+ */
+ static ImageGeneratorFromEncodedDataFactory
+ SetImageGeneratorFromEncodedDataFactory(ImageGeneratorFromEncodedDataFactory);
+
+ /**
+ * To draw OpenType SVG data, Skia will look at this runtime function pointer. If this function
+ * pointer is set, the SkTypeface implementations which support OpenType SVG will call this
+ * function to create an SkOpenTypeSVGDecoder to decode the OpenType SVG and draw it as needed.
+ * If this function is not set, the SkTypeface implementations will generally not support
+ * OpenType SVG and attempt to use other glyph representations if available.
+ */
+ using OpenTypeSVGDecoderFactory =
+ std::unique_ptr<SkOpenTypeSVGDecoder> (*)(const uint8_t* svg, size_t length);
+ static OpenTypeSVGDecoderFactory SetOpenTypeSVGDecoderFactory(OpenTypeSVGDecoderFactory);
+ static OpenTypeSVGDecoderFactory GetOpenTypeSVGDecoderFactory();
+
+ /**
+ * Call early in main() to allow Skia to use a JIT to accelerate CPU-bound operations.
+ */
+ static void AllowJIT();
+
+ /**
+ * To override the default AA algorithm choice in the CPU backend, provide a function that
+ * returns whether to use analytic (true) or supersampled (false) for a given path.
+ *
+ * NOTE: This is a temporary API, intended for migration of all clients to one algorithm,
+ * and should not be used.
+ */
+ typedef bool (*PathAnalyticAADeciderProc)(const SkPath&);
+ static void SetPathAnalyticAADecider(PathAnalyticAADeciderProc);
+};
+
+class SkAutoGraphics {
+public:
+ SkAutoGraphics() {
+ SkGraphics::Init();
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkICC.h b/gfx/skia/skia/include/core/SkICC.h
new file mode 100644
index 0000000000..c0b458100c
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkICC.h
@@ -0,0 +1,9 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// TODO(kjlubick) remove this shim after clients have been moved to the new location
+#include "include/encode/SkICC.h" // IWYU pragma: export
diff --git a/gfx/skia/skia/include/core/SkImage.h b/gfx/skia/skia/include/core/SkImage.h
new file mode 100644
index 0000000000..c7311ae1b6
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkImage.h
@@ -0,0 +1,1575 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImage_DEFINED
+#define SkImage_DEFINED
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkTypes.h"
+
+#if defined(SK_GANESH)
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/GrTypes.h"
+#endif
+#if defined(SK_GRAPHITE)
+#include "include/gpu/graphite/GraphiteTypes.h"
+#endif
+
+#include <cstddef>
+#include <cstdint>
+#include <functional>
+#include <memory>
+#include <optional>
+
+#if defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26
+#include "include/gpu/GrTypes.h"
+#include <android/hardware_buffer.h>
+#endif
+
+// TODO(kjlubick) remove when Chrome has been migrated
+#include "include/core/SkTextureCompressionType.h"
+
+class GrBackendFormat;
+class GrBackendTexture;
+class GrContextThreadSafeProxy;
+class GrDirectContext;
+class GrRecordingContext;
+class GrYUVABackendTextureInfo;
+class GrYUVABackendTextures;
+class SkBitmap;
+class SkColorSpace;
+class SkData;
+class SkImageFilter;
+class SkImageGenerator;
+class SkMatrix;
+class SkMipmap;
+class SkPaint;
+class SkPicture;
+class SkPixmap;
+class SkPromiseImageTexture;
+class SkShader;
+class SkSurfaceProps;
+class SkYUVAPixmaps;
+enum SkColorType : int;
+enum class SkEncodedImageFormat;
+enum class SkTileMode;
+struct SkIPoint;
+struct SkSamplingOptions;
+
+#if defined(SK_GRAPHITE)
+namespace skgpu::graphite {
+class BackendTexture;
+class Recorder;
+class TextureInfo;
+enum class Volatile : bool;
+class YUVABackendTextures;
+}
+#endif
+
+/** \class SkImage
+ SkImage describes a two dimensional array of pixels to draw. The pixels may be
+ decoded in a raster bitmap, encoded in a SkPicture or compressed data stream,
+ or located in GPU memory as a GPU texture.
+
+ SkImage cannot be modified after it is created. SkImage may allocate additional
+ storage as needed; for instance, an encoded SkImage may decode when drawn.
+
+ SkImage width and height are greater than zero. Creating an SkImage with zero width
+ or height returns SkImage equal to nullptr.
+
+ SkImage may be created from SkBitmap, SkPixmap, SkSurface, SkPicture, encoded streams,
+ GPU texture, YUV_ColorSpace data, or hardware buffer. Encoded streams supported
+ include BMP, GIF, HEIF, ICO, JPEG, PNG, WBMP, WebP. Supported encoding details
+ vary with platform.
+*/
+class SK_API SkImage : public SkRefCnt {
+public:
+
+ /** Caller data passed to RasterReleaseProc; may be nullptr.
+ */
+ typedef void* ReleaseContext;
+
+ /** Creates SkImage from SkPixmap and copy of pixels. Since pixels are copied, SkPixmap
+ pixels may be modified or deleted without affecting SkImage.
+
+ SkImage is returned if SkPixmap is valid. Valid SkPixmap parameters include:
+ dimensions are greater than zero;
+ each dimension fits in 29 bits;
+ SkColorType and SkAlphaType are valid, and SkColorType is not kUnknown_SkColorType;
+ row bytes are large enough to hold one row of pixels;
+ pixel address is not nullptr.
+
+ @param pixmap SkImageInfo, pixel address, and row bytes
+ @return copy of SkPixmap pixels, or nullptr
+
+ example: https://fiddle.skia.org/c/@Image_MakeRasterCopy
+ */
+ static sk_sp<SkImage> MakeRasterCopy(const SkPixmap& pixmap);
+
+ /** Creates SkImage from SkImageInfo, sharing pixels.
+
+ SkImage is returned if SkImageInfo is valid. Valid SkImageInfo parameters include:
+ dimensions are greater than zero;
+ each dimension fits in 29 bits;
+ SkColorType and SkAlphaType are valid, and SkColorType is not kUnknown_SkColorType;
+ rowBytes are large enough to hold one row of pixels;
+ pixels is not nullptr, and contains enough data for SkImage.
+
+ @param info contains width, height, SkAlphaType, SkColorType, SkColorSpace
+ @param pixels address or pixel storage
+ @param rowBytes size of pixel row or larger
+ @return SkImage sharing pixels, or nullptr
+ */
+ static sk_sp<SkImage> MakeRasterData(const SkImageInfo& info, sk_sp<SkData> pixels,
+ size_t rowBytes);
+
+ /** Function called when SkImage no longer shares pixels. ReleaseContext is
+ provided by caller when SkImage is created, and may be nullptr.
+ */
+ typedef void (*RasterReleaseProc)(const void* pixels, ReleaseContext);
+
+ /** Creates SkImage from pixmap, sharing SkPixmap pixels. Pixels must remain valid and
+ unchanged until rasterReleaseProc is called. rasterReleaseProc is passed
+ releaseContext when SkImage is deleted or no longer refers to pixmap pixels.
+
+ Pass nullptr for rasterReleaseProc to share SkPixmap without requiring a callback
+ when SkImage is released. Pass nullptr for releaseContext if rasterReleaseProc
+ does not require state.
+
+ SkImage is returned if pixmap is valid. Valid SkPixmap parameters include:
+ dimensions are greater than zero;
+ each dimension fits in 29 bits;
+ SkColorType and SkAlphaType are valid, and SkColorType is not kUnknown_SkColorType;
+ row bytes are large enough to hold one row of pixels;
+ pixel address is not nullptr.
+
+ @param pixmap SkImageInfo, pixel address, and row bytes
+ @param rasterReleaseProc function called when pixels can be released; or nullptr
+ @param releaseContext state passed to rasterReleaseProc; or nullptr
+ @return SkImage sharing pixmap
+ */
+ static sk_sp<SkImage> MakeFromRaster(const SkPixmap& pixmap,
+ RasterReleaseProc rasterReleaseProc,
+ ReleaseContext releaseContext);
+
+ /** Creates SkImage from bitmap, sharing or copying bitmap pixels. If the bitmap
+ is marked immutable, and its pixel memory is shareable, it may be shared
+ instead of copied.
+
+ SkImage is returned if bitmap is valid. Valid SkBitmap parameters include:
+ dimensions are greater than zero;
+ each dimension fits in 29 bits;
+ SkColorType and SkAlphaType are valid, and SkColorType is not kUnknown_SkColorType;
+ row bytes are large enough to hold one row of pixels;
+ pixel address is not nullptr.
+
+ @param bitmap SkImageInfo, row bytes, and pixels
+ @return created SkImage, or nullptr
+
+ example: https://fiddle.skia.org/c/@Image_MakeFromBitmap
+ */
+ static sk_sp<SkImage> MakeFromBitmap(const SkBitmap& bitmap);
+
+ /** Creates SkImage from data returned by imageGenerator. Generated data is owned by SkImage and
+ may not be shared or accessed.
+
+ SkImage is returned if generator data is valid. Valid data parameters vary by type of data
+ and platform.
+
+ imageGenerator may wrap SkPicture data, codec data, or custom data.
+
+ @param imageGenerator stock or custom routines to retrieve SkImage
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromGenerator(std::unique_ptr<SkImageGenerator> imageGenerator);
+
+ /**
+ * Return an image backed by the encoded data, but attempt to defer decoding until the image
+ * is actually used/drawn. This deferral allows the system to cache the result, either on the
+ * CPU or on the GPU, depending on where the image is drawn. If memory is low, the cache may
+ * be purged, causing the next draw of the image to have to re-decode.
+ *
+ * If alphaType is nullopt, the image's alpha type will be chosen automatically based on the
+ * image format. Transparent images will default to kPremul_SkAlphaType. If alphaType contains
+ * kPremul_SkAlphaType or kUnpremul_SkAlphaType, that alpha type will be used. Forcing opaque
+ * (passing kOpaque_SkAlphaType) is not allowed, and will return nullptr.
+ *
+ * This is similar to DecodeTo[Raster,Texture], but this method will attempt to defer the
+ * actual decode, while the DecodeTo... method explicitly decode and allocate the backend
+ * when the call is made.
+ *
+ * If the encoded format is not supported, nullptr is returned.
+ *
+ * @param encoded the encoded data
+ * @return created SkImage, or nullptr
+
+ example: https://fiddle.skia.org/c/@Image_MakeFromEncoded
+ */
+ static sk_sp<SkImage> MakeFromEncoded(sk_sp<SkData> encoded,
+ std::optional<SkAlphaType> alphaType = std::nullopt);
+
+ // TODO(kjlubick) remove this once Chrome has been migrated to new type
+ static const SkTextureCompressionType kETC1_CompressionType =
+ SkTextureCompressionType::kETC1_RGB8;
+
+ /** Creates a CPU-backed SkImage from compressed data.
+
+ This method will decompress the compressed data and create an image wrapping
+ it. Any mipmap levels present in the compressed data are discarded.
+
+ @param data compressed data to store in SkImage
+ @param width width of full SkImage
+ @param height height of full SkImage
+ @param type type of compression used
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeRasterFromCompressed(sk_sp<SkData> data,
+ int width, int height,
+ SkTextureCompressionType type);
+
+ enum class BitDepth {
+ kU8, //!< uses 8-bit unsigned int per color component
+ kF16, //!< uses 16-bit float per color component
+ };
+
+ /** Creates SkImage from picture. Returned SkImage width and height are set by dimensions.
+ SkImage draws picture with matrix and paint, set to bitDepth and colorSpace.
+
+ If matrix is nullptr, draws with identity SkMatrix. If paint is nullptr, draws
+ with default SkPaint. colorSpace may be nullptr.
+
+ @param picture stream of drawing commands
+ @param dimensions width and height
+ @param matrix SkMatrix to rotate, scale, translate, and so on; may be nullptr
+ @param paint SkPaint to apply transparency, filtering, and so on; may be nullptr
+ @param bitDepth 8-bit integer or 16-bit float: per component
+ @param colorSpace range of colors; may be nullptr
+ @param props props to use when rasterizing the picture
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromPicture(sk_sp<SkPicture> picture, const SkISize& dimensions,
+ const SkMatrix* matrix, const SkPaint* paint,
+ BitDepth bitDepth, sk_sp<SkColorSpace> colorSpace,
+ SkSurfaceProps props);
+ static sk_sp<SkImage> MakeFromPicture(sk_sp<SkPicture> picture, const SkISize& dimensions,
+ const SkMatrix* matrix, const SkPaint* paint,
+ BitDepth bitDepth, sk_sp<SkColorSpace> colorSpace);
+
+#if defined(SK_GANESH) || defined(SK_GRAPHITE)
+ /** User function called when supplied texture may be deleted.
+ */
+ typedef void (*TextureReleaseProc)(ReleaseContext releaseContext);
+#endif
+
+#if defined(SK_GANESH)
+ /** Creates a GPU-backed SkImage from compressed data.
+
+ This method will return an SkImage representing the compressed data.
+ If the GPU doesn't support the specified compression method, the data
+ will be decompressed and then wrapped in a GPU-backed image.
+
+ Note: one can query the supported compression formats via
+ GrRecordingContext::compressedBackendFormat.
+
+ @param context GPU context
+ @param data compressed data to store in SkImage
+ @param width width of full SkImage
+ @param height height of full SkImage
+ @param type type of compression used
+ @param mipmapped does 'data' contain data for all the mipmap levels?
+ @param isProtected do the contents of 'data' require DRM protection (on Vulkan)?
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeTextureFromCompressed(GrDirectContext* direct,
+ sk_sp<SkData> data,
+ int width, int height,
+ SkTextureCompressionType type,
+ GrMipmapped mipmapped = GrMipmapped::kNo,
+ GrProtected isProtected = GrProtected::kNo);
+
+ /** Creates SkImage from GPU texture associated with context. GPU texture must stay
+ valid and unchanged until textureReleaseProc is called. textureReleaseProc is
+ passed releaseContext when SkImage is deleted or no longer refers to texture.
+
+ SkImage is returned if format of backendTexture is recognized and supported.
+ Recognized formats vary by GPU back-end.
+
+ @note When using a DDL recording context, textureReleaseProc will be called on the
+ GPU thread after the DDL is played back on the direct context.
+
+ @param context GPU context
+ @param backendTexture Gexture residing on GPU
+ @param origin Origin of backendTexture
+ @param colorType Color type of the resulting image
+ @param alphaType Alpha type of the resulting image
+ @param colorSpace This describes the color space of this image's contents, as
+ seen after sampling. In general, if the format of the backend
+ texture is SRGB, some linear colorSpace should be supplied
+ (e.g., SkColorSpace::MakeSRGBLinear()). If the format of the
+ backend texture is linear, then the colorSpace should include
+ a description of the transfer function as
+ well (e.g., SkColorSpace::MakeSRGB()).
+ @param textureReleaseProc Function called when texture can be released
+ @param releaseContext State passed to textureReleaseProc
+ @return Created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromTexture(GrRecordingContext* context,
+ const GrBackendTexture& backendTexture,
+ GrSurfaceOrigin origin,
+ SkColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ TextureReleaseProc textureReleaseProc = nullptr,
+ ReleaseContext releaseContext = nullptr);
+
+ /** Creates an SkImage from a GPU backend texture. The backend texture must stay
+ valid and unchanged until textureReleaseProc is called. The textureReleaseProc is
+ called when the SkImage is deleted or no longer refers to the texture and will be
+ passed the releaseContext.
+
+ An SkImage is returned if the format of backendTexture is recognized and supported.
+ Recognized formats vary by GPU back-end.
+
+ @note When using a DDL recording context, textureReleaseProc will be called on the
+ GPU thread after the DDL is played back on the direct context.
+
+ @param context The GPU context
+ @param backendTexture A texture already allocated by the GPU
+ @param origin Origin of backendTexture
+ @param alphaType This characterizes the nature of the alpha values in the
+ backend texture. For opaque compressed formats (e.g., ETC1)
+ this should usually be set to kOpaque_SkAlphaType.
+ @param colorSpace This describes the color space of this image's contents, as
+ seen after sampling. In general, if the format of the backend
+ texture is SRGB, some linear colorSpace should be supplied
+ (e.g., SkColorSpace::MakeSRGBLinear()). If the format of the
+ backend texture is linear, then the colorSpace should include
+ a description of the transfer function as
+ well (e.g., SkColorSpace::MakeSRGB()).
+ @param textureReleaseProc Function called when the backend texture can be released
+ @param releaseContext State passed to textureReleaseProc
+ @return Created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromCompressedTexture(GrRecordingContext* context,
+ const GrBackendTexture& backendTexture,
+ GrSurfaceOrigin origin,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ TextureReleaseProc textureReleaseProc = nullptr,
+ ReleaseContext releaseContext = nullptr);
+
+ /** Creates SkImage from pixmap. SkImage is uploaded to GPU back-end using context.
+
+ Created SkImage is available to other GPU contexts, and is available across thread
+ boundaries. All contexts must be in the same GPU share group, or otherwise
+ share resources.
+
+ When SkImage is no longer referenced, context releases texture memory
+ asynchronously.
+
+ SkColorSpace of SkImage is determined by pixmap.colorSpace().
+
+ SkImage is returned referring to GPU back-end if context is not nullptr,
+ format of data is recognized and supported, and if context supports moving
+ resources between contexts. Otherwise, pixmap pixel data is copied and SkImage
+ as returned in raster format if possible; nullptr may be returned.
+ Recognized GPU formats vary by platform and GPU back-end.
+
+ @param context GPU context
+ @param pixmap SkImageInfo, pixel address, and row bytes
+ @param buildMips create SkImage as mip map if true
+ @param limitToMaxTextureSize downscale image to GPU maximum texture size, if necessary
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeCrossContextFromPixmap(GrDirectContext* context,
+ const SkPixmap& pixmap,
+ bool buildMips,
+ bool limitToMaxTextureSize = false);
+
+ /** Creates SkImage from backendTexture associated with context. backendTexture and
+ returned SkImage are managed internally, and are released when no longer needed.
+
+ SkImage is returned if format of backendTexture is recognized and supported.
+ Recognized formats vary by GPU back-end.
+
+ @param context GPU context
+ @param backendTexture texture residing on GPU
+ @param textureOrigin origin of backendTexture
+ @param colorType color type of the resulting image
+ @param alphaType alpha type of the resulting image
+ @param colorSpace range of colors; may be nullptr
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromAdoptedTexture(GrRecordingContext* context,
+ const GrBackendTexture& backendTexture,
+ GrSurfaceOrigin textureOrigin,
+ SkColorType colorType);
+ static sk_sp<SkImage> MakeFromAdoptedTexture(GrRecordingContext* context,
+ const GrBackendTexture& backendTexture,
+ GrSurfaceOrigin textureOrigin,
+ SkColorType colorType,
+ SkAlphaType alphaType);
+ static sk_sp<SkImage> MakeFromAdoptedTexture(GrRecordingContext* context,
+ const GrBackendTexture& backendTexture,
+ GrSurfaceOrigin textureOrigin,
+ SkColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace);
+
+ /** Creates an SkImage from YUV[A] planar textures. This requires that the textures stay valid
+ for the lifetime of the image. The ReleaseContext can be used to know when it is safe to
+ either delete or overwrite the textures. If ReleaseProc is provided it is also called before
+ return on failure.
+
+ @param context GPU context
+ @param yuvaTextures A set of textures containing YUVA data and a description of the
+ data and transformation to RGBA.
+ @param imageColorSpace range of colors of the resulting image after conversion to RGB;
+ may be nullptr
+ @param textureReleaseProc called when the backend textures can be released
+ @param releaseContext state passed to textureReleaseProc
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromYUVATextures(GrRecordingContext* context,
+ const GrYUVABackendTextures& yuvaTextures,
+ sk_sp<SkColorSpace> imageColorSpace,
+ TextureReleaseProc textureReleaseProc = nullptr,
+ ReleaseContext releaseContext = nullptr);
+ static sk_sp<SkImage> MakeFromYUVATextures(GrRecordingContext* context,
+ const GrYUVABackendTextures& yuvaTextures);
+
+ /** Creates SkImage from SkYUVAPixmaps.
+
+ The image will remain planar with each plane converted to a texture using the passed
+ GrRecordingContext.
+
+ SkYUVAPixmaps has a SkYUVAInfo which specifies the transformation from YUV to RGB.
+ The SkColorSpace of the resulting RGB values is specified by imageColorSpace. This will
+ be the SkColorSpace reported by the image and when drawn the RGB values will be converted
+ from this space into the destination space (if the destination is tagged).
+
+ Currently, this is only supported using the GPU backend and will fail if context is nullptr.
+
+ SkYUVAPixmaps does not need to remain valid after this returns.
+
+ @param context GPU context
+ @param pixmaps The planes as pixmaps with supported SkYUVAInfo that
+ specifies conversion to RGB.
+ @param buildMips create internal YUVA textures as mip map if kYes. This is
+ silently ignored if the context does not support mip maps.
+ @param limitToMaxTextureSize downscale image to GPU maximum texture size, if necessary
+ @param imageColorSpace range of colors of the resulting image; may be nullptr
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromYUVAPixmaps(GrRecordingContext* context,
+ const SkYUVAPixmaps& pixmaps,
+ GrMipmapped buildMips,
+ bool limitToMaxTextureSize,
+ sk_sp<SkColorSpace> imageColorSpace);
+ static sk_sp<SkImage> MakeFromYUVAPixmaps(GrRecordingContext* context,
+ const SkYUVAPixmaps& pixmaps,
+ GrMipmapped buildMips = GrMipmapped::kNo,
+ bool limitToMaxTextureSize = false);
+
+ using PromiseImageTextureContext = void*;
+ using PromiseImageTextureFulfillProc =
+ sk_sp<SkPromiseImageTexture> (*)(PromiseImageTextureContext);
+ using PromiseImageTextureReleaseProc = void (*)(PromiseImageTextureContext);
+
+ /** Create a new SkImage that is very similar to an SkImage created by MakeFromTexture. The
+ difference is that the caller need not have created the texture nor populated it with the
+ image pixel data. Moreover, the SkImage may be created on a thread as the creation of the
+ image does not require access to the backend API or GrDirectContext. Instead of passing a
+ GrBackendTexture the client supplies a description of the texture consisting of
+ GrBackendFormat, width, height, and GrMipmapped state. The resulting SkImage can be drawn
+ to a SkDeferredDisplayListRecorder or directly to a GPU-backed SkSurface.
+
+ When the actual texture is required to perform a backend API draw, textureFulfillProc will
+ be called to receive a GrBackendTexture. The properties of the GrBackendTexture must match
+ those set during the SkImage creation, and it must refer to a valid existing texture in the
+ backend API context/device, and be populated with the image pixel data. The texture cannot
+ be deleted until textureReleaseProc is called.
+
+ There is at most one call to each of textureFulfillProc and textureReleaseProc.
+ textureReleaseProc is always called even if image creation fails or if the
+ image is never fulfilled (e.g. it is never drawn or all draws are clipped out)
+
+ @param gpuContextProxy the thread-safe proxy of the gpu context. required.
+ @param backendFormat format of promised gpu texture
+ @param dimensions width & height of promised gpu texture
+ @param mipmapped mip mapped state of promised gpu texture
+ @param origin surface origin of promised gpu texture
+ @param colorType color type of promised gpu texture
+ @param alphaType alpha type of promised gpu texture
+ @param colorSpace range of colors; may be nullptr
+ @param textureFulfillProc function called to get actual gpu texture
+ @param textureReleaseProc function called when texture can be deleted
+ @param textureContext state passed to textureFulfillProc and textureReleaseProc
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakePromiseTexture(sk_sp<GrContextThreadSafeProxy> gpuContextProxy,
+ const GrBackendFormat& backendFormat,
+ SkISize dimensions,
+ GrMipmapped mipmapped,
+ GrSurfaceOrigin origin,
+ SkColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ PromiseImageTextureFulfillProc textureFulfillProc,
+ PromiseImageTextureReleaseProc textureReleaseProc,
+ PromiseImageTextureContext textureContext);
+
+ /** This entry point operates like 'MakePromiseTexture' but it is used to construct a SkImage
+ from YUV[A] data. The source data may be planar (i.e. spread across multiple textures). In
+ the extreme Y, U, V, and A are all in different planes and thus the image is specified by
+ four textures. 'backendTextureInfo' describes the planar arrangement, texture formats,
+ conversion to RGB, and origin of the textures. Separate 'textureFulfillProc' and
+ 'textureReleaseProc' calls are made for each texture. Each texture has its own
+ PromiseImageTextureContext. If 'backendTextureInfo' is not valid then no release proc
+ calls are made. Otherwise, the calls will be made even on failure. 'textureContexts' has one
+ entry for each of the up to four textures, as indicated by 'backendTextureInfo'.
+
+ Currently the mip mapped property of 'backendTextureInfo' is ignored. However, in the
+ near future it will be required that if it is kYes then textureFulfillProc must return
+ a mip mapped texture for each plane in order to successfully draw the image.
+
+ @param gpuContextProxy the thread-safe proxy of the gpu context. required.
+ @param backendTextureInfo info about the promised yuva gpu texture
+ @param imageColorSpace range of colors; may be nullptr
+ @param textureFulfillProc function called to get actual gpu texture
+ @param textureReleaseProc function called when texture can be deleted
+ @param textureContexts state passed to textureFulfillProc and textureReleaseProc
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakePromiseYUVATexture(sk_sp<GrContextThreadSafeProxy> gpuContextProxy,
+ const GrYUVABackendTextureInfo& backendTextureInfo,
+ sk_sp<SkColorSpace> imageColorSpace,
+ PromiseImageTextureFulfillProc textureFulfillProc,
+ PromiseImageTextureReleaseProc textureReleaseProc,
+ PromiseImageTextureContext textureContexts[]);
+
+#endif // defined(SK_GANESH)
+
+ /** Returns a SkImageInfo describing the width, height, color type, alpha type, and color space
+ of the SkImage.
+
+ @return image info of SkImage.
+ */
+ const SkImageInfo& imageInfo() const { return fInfo; }
+
+ /** Returns pixel count in each row.
+
+ @return pixel width in SkImage
+ */
+ int width() const { return fInfo.width(); }
+
+ /** Returns pixel row count.
+
+ @return pixel height in SkImage
+ */
+ int height() const { return fInfo.height(); }
+
+ /** Returns SkISize { width(), height() }.
+
+ @return integral size of width() and height()
+ */
+ SkISize dimensions() const { return SkISize::Make(fInfo.width(), fInfo.height()); }
+
+ /** Returns SkIRect { 0, 0, width(), height() }.
+
+ @return integral rectangle from origin to width() and height()
+ */
+ SkIRect bounds() const { return SkIRect::MakeWH(fInfo.width(), fInfo.height()); }
+
+ /** Returns value unique to image. SkImage contents cannot change after SkImage is
+ created. Any operation to create a new SkImage will receive generate a new
+ unique number.
+
+ @return unique identifier
+ */
+ uint32_t uniqueID() const { return fUniqueID; }
+
+ /** Returns SkAlphaType.
+
+ SkAlphaType returned was a parameter to an SkImage constructor,
+ or was parsed from encoded data.
+
+ @return SkAlphaType in SkImage
+
+ example: https://fiddle.skia.org/c/@Image_alphaType
+ */
+ SkAlphaType alphaType() const;
+
+ /** Returns SkColorType if known; otherwise, returns kUnknown_SkColorType.
+
+ @return SkColorType of SkImage
+
+ example: https://fiddle.skia.org/c/@Image_colorType
+ */
+ SkColorType colorType() const;
+
+ /** Returns SkColorSpace, the range of colors, associated with SkImage. The
+ reference count of SkColorSpace is unchanged. The returned SkColorSpace is
+ immutable.
+
+ SkColorSpace returned was passed to an SkImage constructor,
+ or was parsed from encoded data. SkColorSpace returned may be ignored when SkImage
+ is drawn, depending on the capabilities of the SkSurface receiving the drawing.
+
+ @return SkColorSpace in SkImage, or nullptr
+
+ example: https://fiddle.skia.org/c/@Image_colorSpace
+ */
+ SkColorSpace* colorSpace() const;
+
+ /** Returns a smart pointer to SkColorSpace, the range of colors, associated with
+ SkImage. The smart pointer tracks the number of objects sharing this
+ SkColorSpace reference so the memory is released when the owners destruct.
+
+ The returned SkColorSpace is immutable.
+
+ SkColorSpace returned was passed to an SkImage constructor,
+ or was parsed from encoded data. SkColorSpace returned may be ignored when SkImage
+ is drawn, depending on the capabilities of the SkSurface receiving the drawing.
+
+ @return SkColorSpace in SkImage, or nullptr, wrapped in a smart pointer
+
+ example: https://fiddle.skia.org/c/@Image_refColorSpace
+ */
+ sk_sp<SkColorSpace> refColorSpace() const;
+
+ /** Returns true if SkImage pixels represent transparency only. If true, each pixel
+ is packed in 8 bits as defined by kAlpha_8_SkColorType.
+
+ @return true if pixels represent a transparency mask
+
+ example: https://fiddle.skia.org/c/@Image_isAlphaOnly
+ */
+ bool isAlphaOnly() const;
+
+ /** Returns true if pixels ignore their alpha value and are treated as fully opaque.
+
+ @return true if SkAlphaType is kOpaque_SkAlphaType
+ */
+ bool isOpaque() const { return SkAlphaTypeIsOpaque(this->alphaType()); }
+
+ /**
+ * Make a shader with the specified tiling and mipmap sampling.
+ */
+ sk_sp<SkShader> makeShader(SkTileMode tmx, SkTileMode tmy, const SkSamplingOptions&,
+ const SkMatrix* localMatrix = nullptr) const;
+ sk_sp<SkShader> makeShader(SkTileMode tmx, SkTileMode tmy, const SkSamplingOptions& sampling,
+ const SkMatrix& lm) const;
+ /** Defaults to clamp in both X and Y. */
+ sk_sp<SkShader> makeShader(const SkSamplingOptions& sampling, const SkMatrix& lm) const;
+ sk_sp<SkShader> makeShader(const SkSamplingOptions& sampling,
+ const SkMatrix* lm = nullptr) const;
+
+ /**
+ * makeRawShader functions like makeShader, but for images that contain non-color data.
+ * This includes images encoding things like normals, material properties (eg, roughness),
+ * heightmaps, or any other purely mathematical data that happens to be stored in an image.
+ * These types of images are useful with some programmable shaders (see: SkRuntimeEffect).
+ *
+ * Raw image shaders work like regular image shaders (including filtering and tiling), with
+ * a few major differences:
+ * - No color space transformation is ever applied (the color space of the image is ignored).
+ * - Images with an alpha type of kUnpremul are *not* automatically premultiplied.
+ * - Bicubic filtering is not supported. If SkSamplingOptions::useCubic is true, these
+ * factories will return nullptr.
+ */
+ sk_sp<SkShader> makeRawShader(SkTileMode tmx, SkTileMode tmy, const SkSamplingOptions&,
+ const SkMatrix* localMatrix = nullptr) const;
+ sk_sp<SkShader> makeRawShader(SkTileMode tmx, SkTileMode tmy, const SkSamplingOptions& sampling,
+ const SkMatrix& lm) const;
+ /** Defaults to clamp in both X and Y. */
+ sk_sp<SkShader> makeRawShader(const SkSamplingOptions& sampling, const SkMatrix& lm) const;
+ sk_sp<SkShader> makeRawShader(const SkSamplingOptions& sampling,
+ const SkMatrix* lm = nullptr) const;
+
+ /** Copies SkImage pixel address, row bytes, and SkImageInfo to pixmap, if address
+ is available, and returns true. If pixel address is not available, return
+ false and leave pixmap unchanged.
+
+ @param pixmap storage for pixel state if pixels are readable; otherwise, ignored
+ @return true if SkImage has direct access to pixels
+
+ example: https://fiddle.skia.org/c/@Image_peekPixels
+ */
+ bool peekPixels(SkPixmap* pixmap) const;
+
+ /** Returns true if the contents of SkImage was created on or uploaded to GPU memory,
+ and is available as a GPU texture.
+
+ @return true if SkImage is a GPU texture
+
+ example: https://fiddle.skia.org/c/@Image_isTextureBacked
+ */
+ bool isTextureBacked() const;
+
+ /** Returns an approximation of the amount of texture memory used by the image. Returns
+ zero if the image is not texture backed or if the texture has an external format.
+ */
+ size_t textureSize() const;
+
+ /** Returns true if SkImage can be drawn on either raster surface or GPU surface.
+ If context is nullptr, tests if SkImage draws on raster surface;
+ otherwise, tests if SkImage draws on GPU surface associated with context.
+
+ SkImage backed by GPU texture may become invalid if associated context is
+ invalid. lazy image may be invalid and may not draw to raster surface or
+ GPU surface or both.
+
+ @param context GPU context
+ @return true if SkImage can be drawn
+
+ example: https://fiddle.skia.org/c/@Image_isValid
+ */
+ bool isValid(GrRecordingContext* context) const;
+
+#if defined(SK_GANESH)
+ /** Flushes any pending uses of texture-backed images in the GPU backend. If the image is not
+ texture-backed (including promise texture images) or if the GrDirectContext does not
+ have the same context ID as the context backing the image then this is a no-op.
+
+ If the image was not used in any non-culled draws in the current queue of work for the
+ passed GrDirectContext then this is a no-op unless the GrFlushInfo contains semaphores or
+ a finish proc. Those are respected even when the image has not been used.
+
+ @param context the context on which to flush pending usages of the image.
+ @param info flush options
+ */
+ GrSemaphoresSubmitted flush(GrDirectContext* context, const GrFlushInfo& flushInfo) const;
+
+ void flush(GrDirectContext* context) const { this->flush(context, {}); }
+
+ /** Version of flush() that uses a default GrFlushInfo. Also submits the flushed work to the
+ GPU.
+ */
+ void flushAndSubmit(GrDirectContext*) const;
+
+ /** Retrieves the back-end texture. If SkImage has no back-end texture, an invalid
+ object is returned. Call GrBackendTexture::isValid to determine if the result
+ is valid.
+
+ If flushPendingGrContextIO is true, completes deferred I/O operations.
+
+ If origin in not nullptr, copies location of content drawn into SkImage.
+
+ @param flushPendingGrContextIO flag to flush outstanding requests
+ @return back-end API texture handle; invalid on failure
+ */
+ GrBackendTexture getBackendTexture(bool flushPendingGrContextIO,
+ GrSurfaceOrigin* origin = nullptr) const;
+#endif // defined(SK_GANESH)
+
+ /** \enum SkImage::CachingHint
+ CachingHint selects whether Skia may internally cache SkBitmap generated by
+ decoding SkImage, or by copying SkImage from GPU to CPU. The default behavior
+ allows caching SkBitmap.
+
+ Choose kDisallow_CachingHint if SkImage pixels are to be used only once, or
+ if SkImage pixels reside in a cache outside of Skia, or to reduce memory pressure.
+
+ Choosing kAllow_CachingHint does not ensure that pixels will be cached.
+ SkImage pixels may not be cached if memory requirements are too large or
+ pixels are not accessible.
+ */
+ enum CachingHint {
+ kAllow_CachingHint, //!< allows internally caching decoded and copied pixels
+ kDisallow_CachingHint, //!< disallows internally caching decoded and copied pixels
+ };
+
+ /** Copies SkRect of pixels from SkImage to dstPixels. Copy starts at offset (srcX, srcY),
+ and does not exceed SkImage (width(), height()).
+
+ dstInfo specifies width, height, SkColorType, SkAlphaType, and SkColorSpace of
+ destination. dstRowBytes specifies the gap from one destination row to the next.
+ Returns true if pixels are copied. Returns false if:
+ - dstInfo.addr() equals nullptr
+ - dstRowBytes is less than dstInfo.minRowBytes()
+ - SkPixelRef is nullptr
+
+ Pixels are copied only if pixel conversion is possible. If SkImage SkColorType is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; dstInfo.colorType() must match.
+ If SkImage SkColorType is kGray_8_SkColorType, dstInfo.colorSpace() must match.
+ If SkImage SkAlphaType is kOpaque_SkAlphaType, dstInfo.alphaType() must
+ match. If SkImage SkColorSpace is nullptr, dstInfo.colorSpace() must match. Returns
+ false if pixel conversion is not possible.
+
+ srcX and srcY may be negative to copy only top or left of source. Returns
+ false if width() or height() is zero or negative.
+ Returns false if abs(srcX) >= Image width(), or if abs(srcY) >= Image height().
+
+ If cachingHint is kAllow_CachingHint, pixels may be retained locally.
+ If cachingHint is kDisallow_CachingHint, pixels are not added to the local cache.
+
+ @param context the GrDirectContext in play, if it exists
+ @param dstInfo destination width, height, SkColorType, SkAlphaType, SkColorSpace
+ @param dstPixels destination pixel storage
+ @param dstRowBytes destination row length
+ @param srcX column index whose absolute value is less than width()
+ @param srcY row index whose absolute value is less than height()
+ @param cachingHint whether the pixels should be cached locally
+ @return true if pixels are copied to dstPixels
+ */
+ bool readPixels(GrDirectContext* context,
+ const SkImageInfo& dstInfo,
+ void* dstPixels,
+ size_t dstRowBytes,
+ int srcX, int srcY,
+ CachingHint cachingHint = kAllow_CachingHint) const;
+
+ /** Copies a SkRect of pixels from SkImage to dst. Copy starts at (srcX, srcY), and
+ does not exceed SkImage (width(), height()).
+
+ dst specifies width, height, SkColorType, SkAlphaType, SkColorSpace, pixel storage,
+ and row bytes of destination. dst.rowBytes() specifics the gap from one destination
+ row to the next. Returns true if pixels are copied. Returns false if:
+ - dst pixel storage equals nullptr
+ - dst.rowBytes is less than SkImageInfo::minRowBytes
+ - SkPixelRef is nullptr
+
+ Pixels are copied only if pixel conversion is possible. If SkImage SkColorType is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; dst.colorType() must match.
+ If SkImage SkColorType is kGray_8_SkColorType, dst.colorSpace() must match.
+ If SkImage SkAlphaType is kOpaque_SkAlphaType, dst.alphaType() must
+ match. If SkImage SkColorSpace is nullptr, dst.colorSpace() must match. Returns
+ false if pixel conversion is not possible.
+
+ srcX and srcY may be negative to copy only top or left of source. Returns
+ false if width() or height() is zero or negative.
+ Returns false if abs(srcX) >= Image width(), or if abs(srcY) >= Image height().
+
+ If cachingHint is kAllow_CachingHint, pixels may be retained locally.
+ If cachingHint is kDisallow_CachingHint, pixels are not added to the local cache.
+
+ @param context the GrDirectContext in play, if it exists
+ @param dst destination SkPixmap: SkImageInfo, pixels, row bytes
+ @param srcX column index whose absolute value is less than width()
+ @param srcY row index whose absolute value is less than height()
+ @param cachingHint whether the pixels should be cached locallyZ
+ @return true if pixels are copied to dst
+ */
+ bool readPixels(GrDirectContext* context,
+ const SkPixmap& dst,
+ int srcX,
+ int srcY,
+ CachingHint cachingHint = kAllow_CachingHint) const;
+
+#ifndef SK_IMAGE_READ_PIXELS_DISABLE_LEGACY_API
+ /** Deprecated. Use the variants that accept a GrDirectContext. */
+ bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY, CachingHint cachingHint = kAllow_CachingHint) const;
+ bool readPixels(const SkPixmap& dst, int srcX, int srcY,
+ CachingHint cachingHint = kAllow_CachingHint) const;
+#endif
+
+ /** The result from asyncRescaleAndReadPixels() or asyncRescaleAndReadPixelsYUV420(). */
+ class AsyncReadResult {
+ public:
+ AsyncReadResult(const AsyncReadResult&) = delete;
+ AsyncReadResult(AsyncReadResult&&) = delete;
+ AsyncReadResult& operator=(const AsyncReadResult&) = delete;
+ AsyncReadResult& operator=(AsyncReadResult&&) = delete;
+
+ virtual ~AsyncReadResult() = default;
+ virtual int count() const = 0;
+ virtual const void* data(int i) const = 0;
+ virtual size_t rowBytes(int i) const = 0;
+
+ protected:
+ AsyncReadResult() = default;
+ };
+
+ /** Client-provided context that is passed to client-provided ReadPixelsContext. */
+ using ReadPixelsContext = void*;
+
+ /** Client-provided callback to asyncRescaleAndReadPixels() or
+ asyncRescaleAndReadPixelsYUV420() that is called when read result is ready or on failure.
+ */
+ using ReadPixelsCallback = void(ReadPixelsContext, std::unique_ptr<const AsyncReadResult>);
+
+ enum class RescaleGamma : bool { kSrc, kLinear };
+
+ enum class RescaleMode {
+ kNearest,
+ kLinear,
+ kRepeatedLinear,
+ kRepeatedCubic,
+ };
+
+ /** Makes image pixel data available to caller, possibly asynchronously. It can also rescale
+ the image pixels.
+
+ Currently asynchronous reads are only supported on the GPU backend and only when the
+ underlying 3D API supports transfer buffers and CPU/GPU synchronization primitives. In all
+ other cases this operates synchronously.
+
+ Data is read from the source sub-rectangle, is optionally converted to a linear gamma, is
+ rescaled to the size indicated by 'info', is then converted to the color space, color type,
+ and alpha type of 'info'. A 'srcRect' that is not contained by the bounds of the image
+ causes failure.
+
+ When the pixel data is ready the caller's ReadPixelsCallback is called with a
+ AsyncReadResult containing pixel data in the requested color type, alpha type, and color
+ space. The AsyncReadResult will have count() == 1. Upon failure the callback is called with
+ nullptr for AsyncReadResult. For a GPU image this flushes work but a submit must occur to
+ guarantee a finite time before the callback is called.
+
+ The data is valid for the lifetime of AsyncReadResult with the exception that if the SkImage
+ is GPU-backed the data is immediately invalidated if the context is abandoned or
+ destroyed.
+
+ @param info info of the requested pixels
+ @param srcRect subrectangle of image to read
+ @param rescaleGamma controls whether rescaling is done in the image's gamma or whether
+ the source data is transformed to a linear gamma before rescaling.
+ @param rescaleMode controls the technique (and cost) of the rescaling
+ @param callback function to call with result of the read
+ @param context passed to callback
+ */
+ void asyncRescaleAndReadPixels(const SkImageInfo& info,
+ const SkIRect& srcRect,
+ RescaleGamma rescaleGamma,
+ RescaleMode rescaleMode,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context) const;
+
+ /**
+ Similar to asyncRescaleAndReadPixels but performs an additional conversion to YUV. The
+ RGB->YUV conversion is controlled by 'yuvColorSpace'. The YUV data is returned as three
+ planes ordered y, u, v. The u and v planes are half the width and height of the resized
+ rectangle. The y, u, and v values are single bytes. Currently this fails if 'dstSize'
+ width and height are not even. A 'srcRect' that is not contained by the bounds of the
+ image causes failure.
+
+ When the pixel data is ready the caller's ReadPixelsCallback is called with a
+ AsyncReadResult containing the planar data. The AsyncReadResult will have count() == 3.
+ Upon failure the callback is called with nullptr for AsyncReadResult. For a GPU image this
+ flushes work but a submit must occur to guarantee a finite time before the callback is
+ called.
+
+ The data is valid for the lifetime of AsyncReadResult with the exception that if the SkImage
+ is GPU-backed the data is immediately invalidated if the context is abandoned or
+ destroyed.
+
+ @param yuvColorSpace The transformation from RGB to YUV. Applied to the resized image
+ after it is converted to dstColorSpace.
+ @param dstColorSpace The color space to convert the resized image to, after rescaling.
+ @param srcRect The portion of the image to rescale and convert to YUV planes.
+ @param dstSize The size to rescale srcRect to
+ @param rescaleGamma controls whether rescaling is done in the image's gamma or whether
+ the source data is transformed to a linear gamma before rescaling.
+ @param rescaleMode controls the technique (and cost) of the rescaling
+ @param callback function to call with the planar read result
+ @param context passed to callback
+ */
+ void asyncRescaleAndReadPixelsYUV420(SkYUVColorSpace yuvColorSpace,
+ sk_sp<SkColorSpace> dstColorSpace,
+ const SkIRect& srcRect,
+ const SkISize& dstSize,
+ RescaleGamma rescaleGamma,
+ RescaleMode rescaleMode,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context) const;
+
+ /** Copies SkImage to dst, scaling pixels to fit dst.width() and dst.height(), and
+ converting pixels to match dst.colorType() and dst.alphaType(). Returns true if
+ pixels are copied. Returns false if dst.addr() is nullptr, or dst.rowBytes() is
+ less than dst SkImageInfo::minRowBytes.
+
+ Pixels are copied only if pixel conversion is possible. If SkImage SkColorType is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; dst.colorType() must match.
+ If SkImage SkColorType is kGray_8_SkColorType, dst.colorSpace() must match.
+ If SkImage SkAlphaType is kOpaque_SkAlphaType, dst.alphaType() must
+ match. If SkImage SkColorSpace is nullptr, dst.colorSpace() must match. Returns
+ false if pixel conversion is not possible.
+
+ If cachingHint is kAllow_CachingHint, pixels may be retained locally.
+ If cachingHint is kDisallow_CachingHint, pixels are not added to the local cache.
+
+ @param dst destination SkPixmap: SkImageInfo, pixels, row bytes
+ @return true if pixels are scaled to fit dst
+ */
+ bool scalePixels(const SkPixmap& dst, const SkSamplingOptions&,
+ CachingHint cachingHint = kAllow_CachingHint) const;
+
+ /** Encodes SkImage pixels, returning result as SkData.
+
+ Returns nullptr if encoding fails, or if encodedImageFormat is not supported.
+
+ SkImage encoding in a format requires both building with one or more of:
+ SK_ENCODE_JPEG, SK_ENCODE_PNG, SK_ENCODE_WEBP; and platform support
+ for the encoded format.
+
+ If SK_BUILD_FOR_MAC or SK_BUILD_FOR_IOS is defined, encodedImageFormat can
+ additionally be one of: SkEncodedImageFormat::kICO, SkEncodedImageFormat::kBMP,
+ SkEncodedImageFormat::kGIF.
+
+ quality is a platform and format specific metric trading off size and encoding
+ error. When used, quality equaling 100 encodes with the least error. quality may
+ be ignored by the encoder.
+
+ @param context the GrDirectContext in play, if it exists; can be nullptr
+ @param encodedImageFormat one of: SkEncodedImageFormat::kJPEG, SkEncodedImageFormat::kPNG,
+ SkEncodedImageFormat::kWEBP
+ @param quality encoder specific metric with 100 equaling best
+ @return encoded SkImage, or nullptr
+
+ example: https://fiddle.skia.org/c/@Image_encodeToData
+ */
+ sk_sp<SkData> encodeToData(GrDirectContext* context,
+ SkEncodedImageFormat encodedImageFormat,
+ int quality) const;
+#ifndef SK_IMAGE_READ_PIXELS_DISABLE_LEGACY_API
+ // Deprecated, use above version instead
+ sk_sp<SkData> encodeToData(SkEncodedImageFormat encodedImageFormat, int quality) const;
+#endif
+
+ /** Encodes SkImage pixels, returning result as SkData. Returns existing encoded data
+ if present; otherwise, SkImage is encoded with SkEncodedImageFormat::kPNG. Skia
+ must be built with SK_ENCODE_PNG to encode SkImage.
+
+ Returns nullptr if existing encoded data is missing or invalid, and
+ encoding fails.
+
+ @return encoded SkImage, or nullptr
+
+ example: https://fiddle.skia.org/c/@Image_encodeToData_2
+ */
+ sk_sp<SkData> encodeToData(GrDirectContext* context) const;
+#ifndef SK_IMAGE_READ_PIXELS_DISABLE_LEGACY_API
+ // Deprecated, use above version instead
+ sk_sp<SkData> encodeToData() const;
+#endif
+
+ /** Returns encoded SkImage pixels as SkData, if SkImage was created from supported
+ encoded stream format. Platform support for formats vary and may require building
+ with one or more of: SK_ENCODE_JPEG, SK_ENCODE_PNG, SK_ENCODE_WEBP.
+
+ Returns nullptr if SkImage contents are not encoded.
+
+ @return encoded SkImage, or nullptr
+
+ example: https://fiddle.skia.org/c/@Image_refEncodedData
+ */
+ sk_sp<SkData> refEncodedData() const;
+
+ /** Returns subset of this image.
+
+ Returns nullptr if any of the following are true:
+ - Subset is empty
+ - Subset is not contained inside the image's bounds
+ - Pixels in the image could not be read or copied
+
+ If this image is texture-backed, the context parameter is required and must match the
+ context of the source image. If the context parameter is provided, and the image is
+ raster-backed, the subset will be converted to texture-backed.
+
+ @param subset bounds of returned SkImage
+ @param context the GrDirectContext in play, if it exists
+ @return the subsetted image, or nullptr
+
+ example: https://fiddle.skia.org/c/@Image_makeSubset
+ */
+ sk_sp<SkImage> makeSubset(const SkIRect& subset, GrDirectContext* direct = nullptr) const;
+
+ /**
+ * Returns true if the image has mipmap levels.
+ */
+ bool hasMipmaps() const;
+
+ /**
+ * Returns an image with the same "base" pixels as the this image, but with mipmap levels
+ * automatically generated and attached.
+ */
+ sk_sp<SkImage> withDefaultMipmaps() const;
+
+#if defined(SK_GANESH)
+ /** Returns SkImage backed by GPU texture associated with context. Returned SkImage is
+ compatible with SkSurface created with dstColorSpace. The returned SkImage respects
+ mipmapped setting; if mipmapped equals GrMipmapped::kYes, the backing texture
+ allocates mip map levels.
+
+ The mipmapped parameter is effectively treated as kNo if MIP maps are not supported by the
+ GPU.
+
+ Returns original SkImage if the image is already texture-backed, the context matches, and
+ mipmapped is compatible with the backing GPU texture. skgpu::Budgeted is ignored in this
+ case.
+
+ Returns nullptr if context is nullptr, or if SkImage was created with another
+ GrDirectContext.
+
+ @param GrDirectContext the GrDirectContext in play, if it exists
+ @param GrMipmapped whether created SkImage texture must allocate mip map levels
+ @param skgpu::Budgeted whether to count a newly created texture for the returned image
+ counts against the context's budget.
+ @return created SkImage, or nullptr
+ */
+ sk_sp<SkImage> makeTextureImage(GrDirectContext*,
+ GrMipmapped = GrMipmapped::kNo,
+ skgpu::Budgeted = skgpu::Budgeted::kYes) const;
+#endif // defined(SK_GANESH)
+
+#if defined(SK_GRAPHITE)
+ /** Creates an SkImage from a GPU texture associated with the recorder.
+
+ SkImage is returned if the format of backendTexture is recognized and supported.
+ Recognized formats vary by GPU back-end.
+
+ @param recorder The recorder
+ @param backendTexture Texture residing on GPU
+ @param colorType Color type of the resulting image
+ @param alphaType Alpha type of the resulting image
+ @param colorSpace This describes the color space of this image's contents, as
+ seen after sampling. In general, if the format of the backend
+ texture is SRGB, some linear colorSpace should be supplied
+ (e.g., SkColorSpace::MakeSRGBLinear()). If the format of the
+ backend texture is linear, then the colorSpace should include
+ a description of the transfer function as
+ well (e.g., SkColorSpace::MakeSRGB()).
+ @param TextureReleaseProc Function called when the backend texture can be released
+ @param ReleaseContext State passed to textureReleaseProc
+ @return Created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeGraphiteFromBackendTexture(skgpu::graphite::Recorder*,
+ const skgpu::graphite::BackendTexture&,
+ SkColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ TextureReleaseProc = nullptr,
+ ReleaseContext = nullptr);
+
+ // Passed to both fulfill and imageRelease
+ using GraphitePromiseImageContext = void*;
+ // Returned from fulfill and passed into textureRelease
+ using GraphitePromiseTextureReleaseContext = void*;
+
+ using GraphitePromiseImageFulfillProc =
+ std::tuple<skgpu::graphite::BackendTexture, GraphitePromiseTextureReleaseContext>
+ (*)(GraphitePromiseImageContext);
+ using GraphitePromiseImageReleaseProc = void (*)(GraphitePromiseImageContext);
+ using GraphitePromiseTextureReleaseProc = void (*)(GraphitePromiseTextureReleaseContext);
+
+ /** Create a new SkImage that is very similar to an SkImage created by
+ MakeGraphiteFromBackendTexture. The difference is that the caller need not have created the
+ backend texture nor populated it with data when creating the image. Instead of passing a
+ BackendTexture to the factory the client supplies a description of the texture consisting
+ of dimensions, TextureInfo, SkColorInfo and Volatility.
+
+ In general, 'fulfill' must return a BackendTexture that matches the properties
+ provided at SkImage creation time. The BackendTexture must refer to a valid existing
+ texture in the backend API context/device, and already be populated with data.
+ The texture cannot be deleted until 'textureRelease' is called. 'textureRelease' will
+ be called with the textureReleaseContext returned by 'fulfill'.
+
+ Wrt when and how often the fulfill, imageRelease, and textureRelease callbacks will
+ be called:
+
+ For non-volatile promise images, 'fulfill' will be called at Context::insertRecording
+ time. Regardless of whether 'fulfill' succeeded or failed, 'imageRelease' will always be
+ called only once - when Skia will no longer try calling 'fulfill' to get a backend
+ texture. If 'fulfill' failed (i.e., it didn't return a valid backend texture) then
+ 'textureRelease' will never be called. If 'fulfill' was successful then
+ 'textureRelease' will be called only once when the GPU is done with the contents of the
+ promise image. This will usually occur during a Context::submit call but it could occur
+ earlier due to error conditions. 'fulfill' can be called multiple times if the promise
+ image is used in multiple recordings. If 'fulfill' fails, the insertRecording itself will
+ fail. Subsequent insertRecording calls (with Recordings that use the promise image) will
+ keep calling 'fulfill' until it succeeds.
+
+ For volatile promise images, 'fulfill' will be called each time the Recording is inserted
+ into a Context. Regardless of whether 'fulfill' succeeded or failed, 'imageRelease'
+ will always be called only once just like the non-volatile case. If 'fulfill' fails at
+ insertRecording-time, 'textureRelease' will never be called. If 'fulfill' was successful
+ then a 'textureRelease' matching that 'fulfill' will be called when the GPU is done with
+ the contents of the promise image. This will usually occur during a Context::submit call
+ but it could occur earlier due to error conditions.
+
+ @param recorder the recorder that will capture the commands creating the image
+ @param dimensions width & height of promised gpu texture
+ @param textureInfo structural information for the promised gpu texture
+ @param colorInfo color type, alpha type and colorSpace information for the image
+ @param isVolatile volatility of the promise image
+ @param fulfill function called to get the actual backend texture
+ @param imageRelease function called when any image-centric data can be deleted
+ @param textureRelease function called when the backend texture can be deleted
+ @param imageContext state passed to fulfill and imageRelease
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeGraphitePromiseTexture(skgpu::graphite::Recorder*,
+ SkISize dimensions,
+ const skgpu::graphite::TextureInfo&,
+ const SkColorInfo&,
+ skgpu::graphite::Volatile,
+ GraphitePromiseImageFulfillProc,
+ GraphitePromiseImageReleaseProc,
+ GraphitePromiseTextureReleaseProc,
+ GraphitePromiseImageContext);
+
+ /** Creates an SkImage from YUV[A] planar textures associated with the recorder.
+
+ @param recorder The recorder.
+ @param yuvaBackendTextures A set of textures containing YUVA data and a description of the
+ data and transformation to RGBA.
+ @param imageColorSpace range of colors of the resulting image after conversion to RGB;
+ may be nullptr
+ @param TextureReleaseProc called when the backend textures can be released
+ @param ReleaseContext state passed to TextureReleaseProc
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeGraphiteFromYUVABackendTextures(
+ skgpu::graphite::Recorder* recorder,
+ const skgpu::graphite::YUVABackendTextures& yuvaBackendTextures,
+ sk_sp<SkColorSpace> imageColorSpace,
+ TextureReleaseProc = nullptr,
+ ReleaseContext = nullptr);
+
+ struct RequiredImageProperties {
+ skgpu::Mipmapped fMipmapped;
+ };
+
+ /** Creates SkImage from SkYUVAPixmaps.
+
+ The image will remain planar with each plane converted to a texture using the passed
+ Recorder.
+
+ SkYUVAPixmaps has a SkYUVAInfo which specifies the transformation from YUV to RGB.
+ The SkColorSpace of the resulting RGB values is specified by imgColorSpace. This will
+ be the SkColorSpace reported by the image and when drawn the RGB values will be converted
+ from this space into the destination space (if the destination is tagged).
+
+ This is only supported using the GPU backend and will fail if recorder is nullptr.
+
+ SkYUVAPixmaps does not need to remain valid after this returns.
+
+ @param Recorder The Recorder to use for storing commands
+ @param pixmaps The planes as pixmaps with supported SkYUVAInfo that
+ specifies conversion to RGB.
+ @param RequiredImageProperties Properties the returned SkImage must possess (e.g.,
+ mipmaps)
+ @param limitToMaxTextureSize Downscale image to GPU maximum texture size, if necessary
+ @param imgColorSpace Range of colors of the resulting image; may be nullptr
+ @return Created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeGraphiteFromYUVAPixmaps(skgpu::graphite::Recorder*,
+ const SkYUVAPixmaps& pixmaps,
+ RequiredImageProperties = {},
+ bool limitToMaxTextureSize = false,
+ sk_sp<SkColorSpace> imgColorSpace = nullptr);
+
+ /** Graphite version of makeTextureImage.
+
+ Returns an SkImage backed by a Graphite texture, using the provided Recorder for creation
+ and uploads if necessary. The returned SkImage respects the required image properties'
+ mipmap setting for non-Graphite SkImages; i.e., if mipmapping is required, the backing
+ Graphite texture will have allocated mip map levels.
+
+ It is assumed that MIP maps are always supported by the GPU.
+
+ Returns original SkImage if the image is already Graphite-backed and the required mipmapping
+ is compatible with the backing Graphite texture. If the required mipmapping is not
+ compatible, nullptr will be returned.
+
+ Returns nullptr if no Recorder is provided, or if SkImage was created with another
+ Recorder and work on that Recorder has not been submitted.
+
+ @param Recorder the Recorder to use for storing commands
+ @param RequiredImageProperties properties the returned SkImage must possess (e.g.,
+ mipmaps)
+ @return created SkImage, or nullptr
+ */
+ sk_sp<SkImage> makeTextureImage(skgpu::graphite::Recorder*,
+ RequiredImageProperties = {}) const;
+
+ /** Returns subset of this image.
+
+ Returns nullptr if any of the following are true:
+ - Subset is empty
+ - Subset is not contained inside the image's bounds
+ - Pixels in the image could not be read or copied
+
+ If this image is texture-backed, the recorder parameter is required.
+ If the recorder parameter is provided, and the image is raster-backed, the subset will
+ be converted to texture-backed.
+
+ @param subset bounds of returned SkImage
+ @param recorder the recorder in which to create the new image
+ @param RequiredImageProperties properties the returned SkImage must possess (e.g.,
+ mipmaps)
+ @return the subsetted image, or nullptr
+ */
+ sk_sp<SkImage> makeSubset(const SkIRect& subset,
+ skgpu::graphite::Recorder*,
+ RequiredImageProperties = {}) const;
+
+ /** Creates SkImage in target SkColorSpace.
+ Returns nullptr if SkImage could not be created.
+
+ Returns original SkImage if it is in target SkColorSpace.
+ Otherwise, converts pixels from SkImage SkColorSpace to target SkColorSpace.
+ If SkImage colorSpace() returns nullptr, SkImage SkColorSpace is assumed to be sRGB.
+
+ If this image is graphite-backed, the recorder parameter is required.
+
+ @param targetColorSpace SkColorSpace describing color range of returned SkImage
+ @param recorder The Recorder in which to create the new image
+ @param RequiredImageProperties properties the returned SkImage must possess (e.g.,
+ mipmaps)
+ @return created SkImage in target SkColorSpace
+ */
+ sk_sp<SkImage> makeColorSpace(sk_sp<SkColorSpace> targetColorSpace,
+ skgpu::graphite::Recorder*,
+ RequiredImageProperties = {}) const;
+
+ /** Experimental.
+ Creates SkImage in target SkColorType and SkColorSpace.
+ Returns nullptr if SkImage could not be created.
+
+ Returns original SkImage if it is in target SkColorType and SkColorSpace.
+
+ If this image is graphite-backed, the recorder parameter is required.
+
+ @param targetColorType SkColorType of returned SkImage
+ @param targetColorSpace SkColorSpace of returned SkImage
+ @param recorder The Recorder in which to create the new image
+ @param RequiredImageProperties properties the returned SkImage must possess (e.g.,
+ mipmaps)
+ @return created SkImage in target SkColorType and SkColorSpace
+ */
+ sk_sp<SkImage> makeColorTypeAndColorSpace(SkColorType targetColorType,
+ sk_sp<SkColorSpace> targetColorSpace,
+ skgpu::graphite::Recorder*,
+ RequiredImageProperties = {}) const;
+
+#endif // SK_GRAPHITE
+
+ /** Returns raster image or lazy image. Copies SkImage backed by GPU texture into
+ CPU memory if needed. Returns original SkImage if decoded in raster bitmap,
+ or if encoded in a stream.
+
+ Returns nullptr if backed by GPU texture and copy fails.
+
+ @return raster image, lazy image, or nullptr
+
+ example: https://fiddle.skia.org/c/@Image_makeNonTextureImage
+ */
+ sk_sp<SkImage> makeNonTextureImage() const;
+
+ /** Returns raster image. Copies SkImage backed by GPU texture into CPU memory,
+ or decodes SkImage from lazy image. Returns original SkImage if decoded in
+ raster bitmap.
+
+ Returns nullptr if copy, decode, or pixel read fails.
+
+ If cachingHint is kAllow_CachingHint, pixels may be retained locally.
+ If cachingHint is kDisallow_CachingHint, pixels are not added to the local cache.
+
+ @return raster image, or nullptr
+
+ example: https://fiddle.skia.org/c/@Image_makeRasterImage
+ */
+ sk_sp<SkImage> makeRasterImage(CachingHint cachingHint = kDisallow_CachingHint) const;
+
+ /** Creates filtered SkImage. filter processes original SkImage, potentially changing
+ color, position, and size. subset is the bounds of original SkImage processed
+ by filter. clipBounds is the expected bounds of the filtered SkImage. outSubset
+ is required storage for the actual bounds of the filtered SkImage. offset is
+ required storage for translation of returned SkImage.
+
+ Returns nullptr if SkImage could not be created or if the recording context provided doesn't
+ match the GPU context in which the image was created. If nullptr is returned, outSubset
+ and offset are undefined.
+
+ Useful for animation of SkImageFilter that varies size from frame to frame.
+ Returned SkImage is created larger than required by filter so that GPU texture
+ can be reused with different sized effects. outSubset describes the valid bounds
+ of GPU texture returned. offset translates the returned SkImage to keep subsequent
+ animation frames aligned with respect to each other.
+
+ @param context the GrRecordingContext in play - if it exists
+ @param filter how SkImage is sampled when transformed
+ @param subset bounds of SkImage processed by filter
+ @param clipBounds expected bounds of filtered SkImage
+ @param outSubset storage for returned SkImage bounds
+ @param offset storage for returned SkImage translation
+ @return filtered SkImage, or nullptr
+ */
+ sk_sp<SkImage> makeWithFilter(GrRecordingContext* context,
+ const SkImageFilter* filter, const SkIRect& subset,
+ const SkIRect& clipBounds, SkIRect* outSubset,
+ SkIPoint* offset) const;
+
+ /** Defines a callback function, taking one parameter of type GrBackendTexture with
+ no return value. Function is called when back-end texture is to be released.
+ */
+ typedef std::function<void(GrBackendTexture)> BackendTextureReleaseProc;
+
+#if defined(SK_GANESH)
+ /** Creates a GrBackendTexture from the provided SkImage. Returns true and
+ stores result in backendTexture and backendTextureReleaseProc if
+ texture is created; otherwise, returns false and leaves
+ backendTexture and backendTextureReleaseProc unmodified.
+
+ Call backendTextureReleaseProc after deleting backendTexture.
+ backendTextureReleaseProc cleans up auxiliary data related to returned
+ backendTexture. The caller must delete returned backendTexture after use.
+
+ If SkImage is both texture backed and singly referenced, image is returned in
+ backendTexture without conversion or making a copy. SkImage is singly referenced
+ if its was transferred solely using std::move().
+
+ If SkImage is not texture backed, returns texture with SkImage contents.
+
+ @param context GPU context
+ @param image SkImage used for texture
+ @param backendTexture storage for back-end texture
+ @param backendTextureReleaseProc storage for clean up function
+ @return true if back-end texture was created
+ */
+ static bool MakeBackendTextureFromSkImage(GrDirectContext* context,
+ sk_sp<SkImage> image,
+ GrBackendTexture* backendTexture,
+ BackendTextureReleaseProc* backendTextureReleaseProc);
+#endif
+ /** Deprecated.
+ */
+ enum LegacyBitmapMode {
+ kRO_LegacyBitmapMode, //!< returned bitmap is read-only and immutable
+ };
+
+ /** Deprecated.
+ Creates raster SkBitmap with same pixels as SkImage. If legacyBitmapMode is
+ kRO_LegacyBitmapMode, returned bitmap is read-only and immutable.
+ Returns true if SkBitmap is stored in bitmap. Returns false and resets bitmap if
+ SkBitmap write did not succeed.
+
+ @param bitmap storage for legacy SkBitmap
+ @param legacyBitmapMode bitmap is read-only and immutable
+ @return true if SkBitmap was created
+ */
+ bool asLegacyBitmap(SkBitmap* bitmap,
+ LegacyBitmapMode legacyBitmapMode = kRO_LegacyBitmapMode) const;
+
+ /** Returns true if SkImage is backed by an image-generator or other service that creates
+ and caches its pixels or texture on-demand.
+
+ @return true if SkImage is created as needed
+
+ example: https://fiddle.skia.org/c/@Image_isLazyGenerated_a
+ example: https://fiddle.skia.org/c/@Image_isLazyGenerated_b
+ */
+ bool isLazyGenerated() const;
+
+ /** Creates SkImage in target SkColorSpace.
+ Returns nullptr if SkImage could not be created.
+
+ Returns original SkImage if it is in target SkColorSpace.
+ Otherwise, converts pixels from SkImage SkColorSpace to target SkColorSpace.
+ If SkImage colorSpace() returns nullptr, SkImage SkColorSpace is assumed to be sRGB.
+
+ If this image is texture-backed, the context parameter is required and must match the
+ context of the source image.
+
+ @param target SkColorSpace describing color range of returned SkImage
+ @param direct The GrDirectContext in play, if it exists
+ @return created SkImage in target SkColorSpace
+
+ example: https://fiddle.skia.org/c/@Image_makeColorSpace
+ */
+ sk_sp<SkImage> makeColorSpace(sk_sp<SkColorSpace> target,
+ GrDirectContext* direct = nullptr) const;
+
+ /** Experimental.
+ Creates SkImage in target SkColorType and SkColorSpace.
+ Returns nullptr if SkImage could not be created.
+
+ Returns original SkImage if it is in target SkColorType and SkColorSpace.
+
+ If this image is texture-backed, the context parameter is required and must match the
+ context of the source image.
+
+ @param targetColorType SkColorType of returned SkImage
+ @param targetColorSpace SkColorSpace of returned SkImage
+ @param direct The GrDirectContext in play, if it exists
+ @return created SkImage in target SkColorType and SkColorSpace
+ */
+ sk_sp<SkImage> makeColorTypeAndColorSpace(SkColorType targetColorType,
+ sk_sp<SkColorSpace> targetColorSpace,
+ GrDirectContext* direct = nullptr) const;
+
+ /** Creates a new SkImage identical to this one, but with a different SkColorSpace.
+ This does not convert the underlying pixel data, so the resulting image will draw
+ differently.
+ */
+ sk_sp<SkImage> reinterpretColorSpace(sk_sp<SkColorSpace> newColorSpace) const;
+
+private:
+ SkImage(const SkImageInfo& info, uint32_t uniqueID);
+
+ friend class SkBitmap;
+ friend class SkImage_Base; // for private ctor
+ friend class SkImage_Raster; // for withMipmaps
+ friend class SkMipmapBuilder;
+
+ SkImageInfo fInfo;
+ const uint32_t fUniqueID;
+
+ sk_sp<SkImage> withMipmaps(sk_sp<SkMipmap>) const;
+
+ using INHERITED = SkRefCnt;
+
+public:
+#if !defined(SK_DISABLE_LEGACY_IMAGE_FACTORIES)
+#if defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26
+ /** (See Skia bug 7447)
+ Creates SkImage from Android hardware buffer.
+ Returned SkImage takes a reference on the buffer.
+
+ Only available on Android, when __ANDROID_API__ is defined to be 26 or greater.
+
+ @param hardwareBuffer AHardwareBuffer Android hardware buffer
+ @param colorSpace range of colors; may be nullptr
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromAHardwareBuffer(
+ AHardwareBuffer* hardwareBuffer,
+ SkAlphaType alphaType = kPremul_SkAlphaType);
+ static sk_sp<SkImage> MakeFromAHardwareBuffer(
+ AHardwareBuffer* hardwareBuffer,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ GrSurfaceOrigin surfaceOrigin = kTopLeft_GrSurfaceOrigin);
+
+ /** Creates SkImage from Android hardware buffer and uploads the data from the SkPixmap to it.
+ Returned SkImage takes a reference on the buffer.
+
+ Only available on Android, when __ANDROID_API__ is defined to be 26 or greater.
+
+ @param context GPU context
+ @param pixmap SkPixmap that contains data to be uploaded to the AHardwareBuffer
+ @param hardwareBuffer AHardwareBuffer Android hardware buffer
+ @param surfaceOrigin surface origin for resulting image
+ @return created SkImage, or nullptr
+ */
+ static sk_sp<SkImage> MakeFromAHardwareBufferWithData(
+ GrDirectContext* context,
+ const SkPixmap& pixmap,
+ AHardwareBuffer* hardwareBuffer,
+ GrSurfaceOrigin surfaceOrigin = kTopLeft_GrSurfaceOrigin);
+#endif // SK_BUILD_FOR_ANDROID && __ANDROID_API__ >= 26
+
+#endif // !SK_DISABLE_LEGACY_IMAGE_FACTORIES
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkImageEncoder.h b/gfx/skia/skia/include/core/SkImageEncoder.h
new file mode 100644
index 0000000000..ca2406b4d1
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkImageEncoder.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageEncoder_DEFINED
+#define SkImageEncoder_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+class SkBitmap;
+class SkData;
+class SkPixmap;
+class SkWStream;
+enum class SkEncodedImageFormat;
+
+/**
+ * Encode SkPixmap in the given binary image format.
+ *
+ * @param dst results are written to this stream.
+ * @param src source pixels.
+ * @param format image format, not all formats are supported.
+ * @param quality range from 0-100, this is supported by jpeg and webp.
+ * higher values correspond to improved visual quality, but less compression.
+ *
+ * @return false iff input is bad or format is unsupported.
+ *
+ * Will always return false if Skia is compiled without image
+ * encoders.
+ *
+ * For SkEncodedImageFormat::kWEBP, if quality is 100, it will use lossless compression. Otherwise
+ * it will use lossy.
+ *
+ * For examples of encoding an image to a file or to a block of memory,
+ * see tools/ToolUtils.h.
+ */
+SK_API bool SkEncodeImage(SkWStream* dst, const SkPixmap& src,
+ SkEncodedImageFormat format, int quality);
+
+/**
+ * The following helper function wraps SkEncodeImage().
+ */
+SK_API bool SkEncodeImage(SkWStream* dst, const SkBitmap& src, SkEncodedImageFormat f, int q);
+
+/**
+ * Encode SkPixmap in the given binary image format.
+ *
+ * @param src source pixels.
+ * @param format image format, not all formats are supported.
+ * @param quality range from 0-100, this is supported by jpeg and webp.
+ * higher values correspond to improved visual quality, but less compression.
+ *
+ * @return encoded data or nullptr if input is bad or format is unsupported.
+ *
+ * Will always return nullptr if Skia is compiled without image
+ * encoders.
+ *
+ * For SkEncodedImageFormat::kWEBP, if quality is 100, it will use lossless compression. Otherwise
+ * it will use lossy.
+ */
+SK_API sk_sp<SkData> SkEncodePixmap(const SkPixmap& src, SkEncodedImageFormat format, int quality);
+
+/**
+ * Helper that extracts the pixmap from the bitmap, and then calls SkEncodePixmap()
+ */
+SK_API sk_sp<SkData> SkEncodeBitmap(const SkBitmap& src, SkEncodedImageFormat format, int quality);
+
+#endif // SkImageEncoder_DEFINED
diff --git a/gfx/skia/skia/include/core/SkImageFilter.h b/gfx/skia/skia/include/core/SkImageFilter.h
new file mode 100644
index 0000000000..e2240916d4
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkImageFilter.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageFilter_DEFINED
+#define SkImageFilter_DEFINED
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkRect.h"
+
+class SkColorFilter;
+
+/**
+ * Base class for image filters. If one is installed in the paint, then all drawing occurs as
+ * usual, but it is as if the drawing happened into an offscreen (before the xfermode is applied).
+ * This offscreen bitmap will then be handed to the imagefilter, who in turn creates a new bitmap
+ * which is what will finally be drawn to the device (using the original xfermode).
+ *
+ * The local space of image filters matches the local space of the drawn geometry. For instance if
+ * there is rotation on the canvas, the blur will be computed along those rotated axes and not in
+ * the device space. In order to achieve this result, the actual drawing of the geometry may happen
+ * in an unrotated coordinate system so that the filtered image can be computed more easily, and
+ * then it will be post transformed to match what would have been produced if the geometry were
+ * drawn with the total canvas matrix to begin with.
+ */
+class SK_API SkImageFilter : public SkFlattenable {
+public:
+ enum MapDirection {
+ kForward_MapDirection,
+ kReverse_MapDirection,
+ };
+ /**
+ * Map a device-space rect recursively forward or backward through the filter DAG.
+ * kForward_MapDirection is used to determine which pixels of the destination canvas a source
+ * image rect would touch after filtering. kReverse_MapDirection is used to determine which rect
+ * of the source image would be required to fill the given rect (typically, clip bounds). Used
+ * for clipping and temp-buffer allocations, so the result need not be exact, but should never
+ * be smaller than the real answer. The default implementation recursively unions all input
+ * bounds, or returns the source rect if no inputs.
+ *
+ * In kReverse mode, 'inputRect' is the device-space bounds of the input pixels. In kForward
+ * mode it should always be null. If 'inputRect' is null in kReverse mode the resulting answer
+ * may be incorrect.
+ */
+ SkIRect filterBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect = nullptr) const;
+
+ /**
+ * Returns whether this image filter is a color filter and puts the color filter into the
+ * "filterPtr" parameter if it can. Does nothing otherwise.
+ * If this returns false, then the filterPtr is unchanged.
+ * If this returns true, then if filterPtr is not null, it must be set to a ref'd colorfitler
+ * (i.e. it may not be set to NULL).
+ */
+ bool isColorFilterNode(SkColorFilter** filterPtr) const;
+
+ // DEPRECATED : use isColorFilterNode() instead
+ bool asColorFilter(SkColorFilter** filterPtr) const {
+ return this->isColorFilterNode(filterPtr);
+ }
+
+ /**
+ * Returns true (and optionally returns a ref'd filter) if this imagefilter can be completely
+ * replaced by the returned colorfilter. i.e. the two effects will affect drawing in the same
+ * way.
+ */
+ bool asAColorFilter(SkColorFilter** filterPtr) const;
+
+ /**
+ * Returns the number of inputs this filter will accept (some inputs can be NULL).
+ */
+ int countInputs() const;
+
+ /**
+ * Returns the input filter at a given index, or NULL if no input is connected. The indices
+ * used are filter-specific.
+ */
+ const SkImageFilter* getInput(int i) const;
+
+ // Default impl returns union of all input bounds.
+ virtual SkRect computeFastBounds(const SkRect& bounds) const;
+
+ // Can this filter DAG compute the resulting bounds of an object-space rectangle?
+ bool canComputeFastBounds() const;
+
+ /**
+ * If this filter can be represented by another filter + a localMatrix, return that filter,
+ * else return null.
+ */
+ sk_sp<SkImageFilter> makeWithLocalMatrix(const SkMatrix& matrix) const;
+
+ static sk_sp<SkImageFilter> Deserialize(const void* data, size_t size,
+ const SkDeserialProcs* procs = nullptr) {
+ return sk_sp<SkImageFilter>(static_cast<SkImageFilter*>(
+ SkFlattenable::Deserialize(kSkImageFilter_Type, data, size, procs).release()));
+ }
+
+protected:
+
+ sk_sp<SkImageFilter> refMe() const {
+ return sk_ref_sp(const_cast<SkImageFilter*>(this));
+ }
+
+private:
+ friend class SkImageFilter_Base;
+
+ using INHERITED = SkFlattenable;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkImageGenerator.h b/gfx/skia/skia/include/core/SkImageGenerator.h
new file mode 100644
index 0000000000..438739ec69
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkImageGenerator.h
@@ -0,0 +1,231 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageGenerator_DEFINED
+#define SkImageGenerator_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkYUVAPixmaps.h"
+#include "include/private/base/SkAPI.h"
+
+#if defined(SK_GANESH)
+#include "include/gpu/GrTypes.h"
+#endif
+
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <optional>
+
+class GrRecordingContext;
+class GrSurfaceProxyView;
+class SkColorSpace;
+class SkMatrix;
+class SkPaint;
+class SkPicture;
+class SkSurfaceProps;
+enum SkAlphaType : int;
+enum class GrImageTexGenPolicy : int;
+namespace skgpu { enum class Mipmapped : bool; }
+struct SkISize;
+
+class SK_API SkImageGenerator {
+public:
+ /**
+ * The PixelRef which takes ownership of this SkImageGenerator
+ * will call the image generator's destructor.
+ */
+ virtual ~SkImageGenerator() { }
+
+ uint32_t uniqueID() const { return fUniqueID; }
+
+ /**
+ * Return a ref to the encoded (i.e. compressed) representation
+ * of this data.
+ *
+ * If non-NULL is returned, the caller is responsible for calling
+ * unref() on the data when it is finished.
+ */
+ sk_sp<SkData> refEncodedData() {
+ return this->onRefEncodedData();
+ }
+
+ /**
+ * Return the ImageInfo associated with this generator.
+ */
+ const SkImageInfo& getInfo() const { return fInfo; }
+
+ /**
+ * Can this generator be used to produce images that will be drawable to the specified context
+ * (or to CPU, if context is nullptr)?
+ */
+ bool isValid(GrRecordingContext* context) const {
+ return this->onIsValid(context);
+ }
+
+ /**
+ * Decode into the given pixels, a block of memory of size at
+ * least (info.fHeight - 1) * rowBytes + (info.fWidth *
+ * bytesPerPixel)
+ *
+ * Repeated calls to this function should give the same results,
+ * allowing the PixelRef to be immutable.
+ *
+ * @param info A description of the format
+ * expected by the caller. This can simply be identical
+ * to the info returned by getInfo().
+ *
+ * This contract also allows the caller to specify
+ * different output-configs, which the implementation can
+ * decide to support or not.
+ *
+ * A size that does not match getInfo() implies a request
+ * to scale. If the generator cannot perform this scale,
+ * it will return false.
+ *
+ * @return true on success.
+ */
+ bool getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes);
+
+ bool getPixels(const SkPixmap& pm) {
+ return this->getPixels(pm.info(), pm.writable_addr(), pm.rowBytes());
+ }
+
+ /**
+ * If decoding to YUV is supported, this returns true. Otherwise, this
+ * returns false and the caller will ignore output parameter yuvaPixmapInfo.
+ *
+ * @param supportedDataTypes Indicates the data type/planar config combinations that are
+ * supported by the caller. If the generator supports decoding to
+ * YUV(A), but not as a type in supportedDataTypes, this method
+ * returns false.
+ * @param yuvaPixmapInfo Output parameter that specifies the planar configuration, subsampling,
+ * orientation, chroma siting, plane color types, and row bytes.
+ */
+ bool queryYUVAInfo(const SkYUVAPixmapInfo::SupportedDataTypes& supportedDataTypes,
+ SkYUVAPixmapInfo* yuvaPixmapInfo) const;
+
+ /**
+ * Returns true on success and false on failure.
+ * This always attempts to perform a full decode. To get the planar
+ * configuration without decoding use queryYUVAInfo().
+ *
+ * @param yuvaPixmaps Contains preallocated pixmaps configured according to a successful call
+ * to queryYUVAInfo().
+ */
+ bool getYUVAPlanes(const SkYUVAPixmaps& yuvaPixmaps);
+
+#if defined(SK_GANESH)
+ /**
+ * If the generator can natively/efficiently return its pixels as a GPU image (backed by a
+ * texture) this will return that image. If not, this will return NULL.
+ *
+ * Regarding the GrRecordingContext parameter:
+ *
+ * It must be non-NULL. The generator should only succeed if:
+ * - its internal context is the same
+ * - it can somehow convert its texture into one that is valid for the provided context.
+ *
+ * If the mipmapped parameter is kYes, the generator should try to create a TextureProxy that
+ * at least has the mip levels allocated and the base layer filled in. If this is not possible,
+ * the generator is allowed to return a non mipped proxy, but this will have some additional
+ * overhead in later allocating mips and copying of the base layer.
+ *
+ * GrImageTexGenPolicy determines whether or not a new texture must be created (and its budget
+ * status) or whether this may (but is not required to) return a pre-existing texture that is
+ * retained by the generator (kDraw).
+ */
+ GrSurfaceProxyView generateTexture(GrRecordingContext*,
+ const SkImageInfo& info,
+ skgpu::Mipmapped mipmapped,
+ GrImageTexGenPolicy);
+#endif
+
+#if defined(SK_GRAPHITE)
+ sk_sp<SkImage> makeTextureImage(skgpu::graphite::Recorder*,
+ const SkImageInfo&,
+ skgpu::Mipmapped);
+#endif
+
+ /**
+ * If the default image decoder system can interpret the specified (encoded) data, then
+ * this returns a new ImageGenerator for it. Otherwise this returns NULL. Either way
+ * the caller is still responsible for managing their ownership of the data.
+ * By default, images will be converted to premultiplied pixels. The alpha type can be
+ * overridden by specifying kPremul_SkAlphaType or kUnpremul_SkAlphaType. Specifying
+ * kOpaque_SkAlphaType is not supported, and will return NULL.
+ */
+ static std::unique_ptr<SkImageGenerator> MakeFromEncoded(
+ sk_sp<SkData>, std::optional<SkAlphaType> = std::nullopt);
+
+ /** Return a new image generator backed by the specified picture. If the size is empty or
+ * the picture is NULL, this returns NULL.
+ * The optional matrix and paint arguments are passed to drawPicture() at rasterization
+ * time.
+ */
+ static std::unique_ptr<SkImageGenerator> MakeFromPicture(const SkISize&, sk_sp<SkPicture>,
+ const SkMatrix*, const SkPaint*,
+ SkImage::BitDepth,
+ sk_sp<SkColorSpace>,
+ SkSurfaceProps props);
+ static std::unique_ptr<SkImageGenerator> MakeFromPicture(const SkISize&, sk_sp<SkPicture>,
+ const SkMatrix*, const SkPaint*,
+ SkImage::BitDepth,
+ sk_sp<SkColorSpace>);
+protected:
+ static constexpr int kNeedNewImageUniqueID = 0;
+
+ SkImageGenerator(const SkImageInfo& info, uint32_t uniqueId = kNeedNewImageUniqueID);
+
+ virtual sk_sp<SkData> onRefEncodedData() { return nullptr; }
+ struct Options {};
+ virtual bool onGetPixels(const SkImageInfo&, void*, size_t, const Options&) { return false; }
+ virtual bool onIsValid(GrRecordingContext*) const { return true; }
+ virtual bool onQueryYUVAInfo(const SkYUVAPixmapInfo::SupportedDataTypes&,
+ SkYUVAPixmapInfo*) const { return false; }
+ virtual bool onGetYUVAPlanes(const SkYUVAPixmaps&) { return false; }
+#if defined(SK_GANESH)
+ // returns nullptr
+ virtual GrSurfaceProxyView onGenerateTexture(GrRecordingContext*, const SkImageInfo&,
+ GrMipmapped, GrImageTexGenPolicy);
+
+ // Most internal SkImageGenerators produce textures and views that use kTopLeft_GrSurfaceOrigin.
+ // If the generator may produce textures with different origins (e.g.
+ // GrAHardwareBufferImageGenerator) it should override this function to return the correct
+ // origin.
+ virtual GrSurfaceOrigin origin() const { return kTopLeft_GrSurfaceOrigin; }
+#endif
+
+#if defined(SK_GRAPHITE)
+ virtual sk_sp<SkImage> onMakeTextureImage(skgpu::graphite::Recorder*,
+ const SkImageInfo&,
+ skgpu::Mipmapped);
+#endif
+
+private:
+ const SkImageInfo fInfo;
+ const uint32_t fUniqueID;
+
+ friend class SkImage_Lazy;
+
+ // This is our default impl, which may be different on different platforms.
+ // It is called from NewFromEncoded() after it has checked for any runtime factory.
+ // The SkData will never be NULL, as that will have been checked by NewFromEncoded.
+ static std::unique_ptr<SkImageGenerator> MakeFromEncodedImpl(sk_sp<SkData>,
+ std::optional<SkAlphaType>);
+
+ SkImageGenerator(SkImageGenerator&&) = delete;
+ SkImageGenerator(const SkImageGenerator&) = delete;
+ SkImageGenerator& operator=(SkImageGenerator&&) = delete;
+ SkImageGenerator& operator=(const SkImageGenerator&) = delete;
+};
+
+#endif // SkImageGenerator_DEFINED
diff --git a/gfx/skia/skia/include/core/SkImageInfo.h b/gfx/skia/skia/include/core/SkImageInfo.h
new file mode 100644
index 0000000000..b566171900
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkImageInfo.h
@@ -0,0 +1,616 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageInfo_DEFINED
+#define SkImageInfo_DEFINED
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSize.h"
+#include "include/private/base/SkAPI.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkMath.h"
+#include "include/private/base/SkTFitsIn.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <utility>
+
+class SkColorSpace;
+
+/** Returns the number of bytes required to store a pixel, including unused padding.
+ Returns zero if ct is kUnknown_SkColorType or invalid.
+
+ @return bytes per pixel
+*/
+SK_API int SkColorTypeBytesPerPixel(SkColorType ct);
+
+/** Returns true if SkColorType always decodes alpha to 1.0, making the pixel
+ fully opaque. If true, SkColorType does not reserve bits to encode alpha.
+
+ @return true if alpha is always set to 1.0
+*/
+SK_API bool SkColorTypeIsAlwaysOpaque(SkColorType ct);
+
+/** Returns true if canonical can be set to a valid SkAlphaType for colorType. If
+ there is more than one valid canonical SkAlphaType, set to alphaType, if valid.
+ If true is returned and canonical is not nullptr, store valid SkAlphaType.
+
+ Returns false only if alphaType is kUnknown_SkAlphaType, color type is not
+ kUnknown_SkColorType, and SkColorType is not always opaque. If false is returned,
+ canonical is ignored.
+
+ @param canonical storage for SkAlphaType
+ @return true if valid SkAlphaType can be associated with colorType
+*/
+SK_API bool SkColorTypeValidateAlphaType(SkColorType colorType, SkAlphaType alphaType,
+ SkAlphaType* canonical = nullptr);
+
+/** \enum SkImageInfo::SkYUVColorSpace
+ Describes color range of YUV pixels. The color mapping from YUV to RGB varies
+ depending on the source. YUV pixels may be generated by JPEG images, standard
+ video streams, or high definition video streams. Each has its own mapping from
+ YUV to RGB.
+
+ JPEG YUV values encode the full range of 0 to 255 for all three components.
+ Video YUV values often range from 16 to 235 for Y and from 16 to 240 for U and V (limited).
+ Details of encoding and conversion to RGB are described in YCbCr color space.
+
+ The identity colorspace exists to provide a utility mapping from Y to R, U to G and V to B.
+ It can be used to visualize the YUV planes or to explicitly post process the YUV channels.
+*/
+enum SkYUVColorSpace : int {
+ kJPEG_Full_SkYUVColorSpace, //!< describes full range
+ kRec601_Limited_SkYUVColorSpace, //!< describes SDTV range
+ kRec709_Full_SkYUVColorSpace, //!< describes HDTV range
+ kRec709_Limited_SkYUVColorSpace,
+ kBT2020_8bit_Full_SkYUVColorSpace, //!< describes UHDTV range, non-constant-luminance
+ kBT2020_8bit_Limited_SkYUVColorSpace,
+ kBT2020_10bit_Full_SkYUVColorSpace,
+ kBT2020_10bit_Limited_SkYUVColorSpace,
+ kBT2020_12bit_Full_SkYUVColorSpace,
+ kBT2020_12bit_Limited_SkYUVColorSpace,
+ kIdentity_SkYUVColorSpace, //!< maps Y->R, U->G, V->B
+
+ kLastEnum_SkYUVColorSpace = kIdentity_SkYUVColorSpace, //!< last valid value
+
+ // Legacy (deprecated) names:
+ kJPEG_SkYUVColorSpace = kJPEG_Full_SkYUVColorSpace,
+ kRec601_SkYUVColorSpace = kRec601_Limited_SkYUVColorSpace,
+ kRec709_SkYUVColorSpace = kRec709_Limited_SkYUVColorSpace,
+ kBT2020_SkYUVColorSpace = kBT2020_8bit_Limited_SkYUVColorSpace,
+};
+
+/** \struct SkColorInfo
+ Describes pixel and encoding. SkImageInfo can be created from SkColorInfo by
+ providing dimensions.
+
+ It encodes how pixel bits describe alpha, transparency; color components red, blue,
+ and green; and SkColorSpace, the range and linearity of colors.
+*/
+class SK_API SkColorInfo {
+public:
+ /** Creates an SkColorInfo with kUnknown_SkColorType, kUnknown_SkAlphaType,
+ and no SkColorSpace.
+
+ @return empty SkImageInfo
+ */
+ SkColorInfo();
+ ~SkColorInfo();
+
+ /** Creates SkColorInfo from SkColorType ct, SkAlphaType at, and optionally SkColorSpace cs.
+
+ If SkColorSpace cs is nullptr and SkColorInfo is part of drawing source: SkColorSpace
+ defaults to sRGB, mapping into SkSurface SkColorSpace.
+
+ Parameters are not validated to see if their values are legal, or that the
+ combination is supported.
+ @return created SkColorInfo
+ */
+ SkColorInfo(SkColorType ct, SkAlphaType at, sk_sp<SkColorSpace> cs);
+
+ SkColorInfo(const SkColorInfo&);
+ SkColorInfo(SkColorInfo&&);
+
+ SkColorInfo& operator=(const SkColorInfo&);
+ SkColorInfo& operator=(SkColorInfo&&);
+
+ SkColorSpace* colorSpace() const;
+ sk_sp<SkColorSpace> refColorSpace() const;
+ SkColorType colorType() const { return fColorType; }
+ SkAlphaType alphaType() const { return fAlphaType; }
+
+ bool isOpaque() const {
+ return SkAlphaTypeIsOpaque(fAlphaType)
+ || SkColorTypeIsAlwaysOpaque(fColorType);
+ }
+
+ bool gammaCloseToSRGB() const;
+
+ /** Does other represent the same color type, alpha type, and color space? */
+ bool operator==(const SkColorInfo& other) const;
+
+ /** Does other represent a different color type, alpha type, or color space? */
+ bool operator!=(const SkColorInfo& other) const;
+
+ /** Creates SkColorInfo with same SkColorType, SkColorSpace, with SkAlphaType set
+ to newAlphaType.
+
+ Created SkColorInfo contains newAlphaType even if it is incompatible with
+ SkColorType, in which case SkAlphaType in SkColorInfo is ignored.
+ */
+ SkColorInfo makeAlphaType(SkAlphaType newAlphaType) const;
+
+ /** Creates new SkColorInfo with same SkAlphaType, SkColorSpace, with SkColorType
+ set to newColorType.
+ */
+ SkColorInfo makeColorType(SkColorType newColorType) const;
+
+ /** Creates SkColorInfo with same SkAlphaType, SkColorType, with SkColorSpace
+ set to cs. cs may be nullptr.
+ */
+ SkColorInfo makeColorSpace(sk_sp<SkColorSpace> cs) const;
+
+ /** Returns number of bytes per pixel required by SkColorType.
+ Returns zero if colorType() is kUnknown_SkColorType.
+
+ @return bytes in pixel
+
+ example: https://fiddle.skia.org/c/@ImageInfo_bytesPerPixel
+ */
+ int bytesPerPixel() const;
+
+ /** Returns bit shift converting row bytes to row pixels.
+ Returns zero for kUnknown_SkColorType.
+
+ @return one of: 0, 1, 2, 3, 4; left shift to convert pixels to bytes
+
+ example: https://fiddle.skia.org/c/@ImageInfo_shiftPerPixel
+ */
+ int shiftPerPixel() const;
+
+private:
+ sk_sp<SkColorSpace> fColorSpace;
+ SkColorType fColorType = kUnknown_SkColorType;
+ SkAlphaType fAlphaType = kUnknown_SkAlphaType;
+};
+
+/** \struct SkImageInfo
+ Describes pixel dimensions and encoding. SkBitmap, SkImage, PixMap, and SkSurface
+ can be created from SkImageInfo. SkImageInfo can be retrieved from SkBitmap and
+ SkPixmap, but not from SkImage and SkSurface. For example, SkImage and SkSurface
+ implementations may defer pixel depth, so may not completely specify SkImageInfo.
+
+ SkImageInfo contains dimensions, the pixel integral width and height. It encodes
+ how pixel bits describe alpha, transparency; color components red, blue,
+ and green; and SkColorSpace, the range and linearity of colors.
+*/
+struct SK_API SkImageInfo {
+public:
+
+ /** Creates an empty SkImageInfo with kUnknown_SkColorType, kUnknown_SkAlphaType,
+ a width and height of zero, and no SkColorSpace.
+
+ @return empty SkImageInfo
+ */
+ SkImageInfo() = default;
+
+ /** Creates SkImageInfo from integral dimensions width and height, SkColorType ct,
+ SkAlphaType at, and optionally SkColorSpace cs.
+
+ If SkColorSpace cs is nullptr and SkImageInfo is part of drawing source: SkColorSpace
+ defaults to sRGB, mapping into SkSurface SkColorSpace.
+
+ Parameters are not validated to see if their values are legal, or that the
+ combination is supported.
+
+ @param width pixel column count; must be zero or greater
+ @param height pixel row count; must be zero or greater
+ @param cs range of colors; may be nullptr
+ @return created SkImageInfo
+ */
+ static SkImageInfo Make(int width, int height, SkColorType ct, SkAlphaType at);
+ static SkImageInfo Make(int width, int height, SkColorType ct, SkAlphaType at,
+ sk_sp<SkColorSpace> cs);
+ static SkImageInfo Make(SkISize dimensions, SkColorType ct, SkAlphaType at);
+ static SkImageInfo Make(SkISize dimensions, SkColorType ct, SkAlphaType at,
+ sk_sp<SkColorSpace> cs);
+
+ /** Creates SkImageInfo from integral dimensions and SkColorInfo colorInfo,
+
+ Parameters are not validated to see if their values are legal, or that the
+ combination is supported.
+
+ @param dimensions pixel column and row count; must be zeros or greater
+ @param SkColorInfo the pixel encoding consisting of SkColorType, SkAlphaType, and
+ SkColorSpace (which may be nullptr)
+ @return created SkImageInfo
+ */
+ static SkImageInfo Make(SkISize dimensions, const SkColorInfo& colorInfo) {
+ return SkImageInfo(dimensions, colorInfo);
+ }
+ static SkImageInfo Make(SkISize dimensions, SkColorInfo&& colorInfo) {
+ return SkImageInfo(dimensions, std::move(colorInfo));
+ }
+
+ /** Creates SkImageInfo from integral dimensions width and height, kN32_SkColorType,
+ SkAlphaType at, and optionally SkColorSpace cs. kN32_SkColorType will equal either
+ kBGRA_8888_SkColorType or kRGBA_8888_SkColorType, whichever is optimal.
+
+ If SkColorSpace cs is nullptr and SkImageInfo is part of drawing source: SkColorSpace
+ defaults to sRGB, mapping into SkSurface SkColorSpace.
+
+ Parameters are not validated to see if their values are legal, or that the
+ combination is supported.
+
+ @param width pixel column count; must be zero or greater
+ @param height pixel row count; must be zero or greater
+ @param cs range of colors; may be nullptr
+ @return created SkImageInfo
+ */
+ static SkImageInfo MakeN32(int width, int height, SkAlphaType at);
+ static SkImageInfo MakeN32(int width, int height, SkAlphaType at, sk_sp<SkColorSpace> cs);
+
+ /** Creates SkImageInfo from integral dimensions width and height, kN32_SkColorType,
+ SkAlphaType at, with sRGB SkColorSpace.
+
+ Parameters are not validated to see if their values are legal, or that the
+ combination is supported.
+
+ @param width pixel column count; must be zero or greater
+ @param height pixel row count; must be zero or greater
+ @return created SkImageInfo
+
+ example: https://fiddle.skia.org/c/@ImageInfo_MakeS32
+ */
+ static SkImageInfo MakeS32(int width, int height, SkAlphaType at);
+
+ /** Creates SkImageInfo from integral dimensions width and height, kN32_SkColorType,
+ kPremul_SkAlphaType, with optional SkColorSpace.
+
+ If SkColorSpace cs is nullptr and SkImageInfo is part of drawing source: SkColorSpace
+ defaults to sRGB, mapping into SkSurface SkColorSpace.
+
+ Parameters are not validated to see if their values are legal, or that the
+ combination is supported.
+
+ @param width pixel column count; must be zero or greater
+ @param height pixel row count; must be zero or greater
+ @param cs range of colors; may be nullptr
+ @return created SkImageInfo
+ */
+ static SkImageInfo MakeN32Premul(int width, int height);
+ static SkImageInfo MakeN32Premul(int width, int height, sk_sp<SkColorSpace> cs);
+
+ /** Creates SkImageInfo from integral dimensions width and height, kN32_SkColorType,
+ kPremul_SkAlphaType, with SkColorSpace set to nullptr.
+
+ If SkImageInfo is part of drawing source: SkColorSpace defaults to sRGB, mapping
+ into SkSurface SkColorSpace.
+
+ Parameters are not validated to see if their values are legal, or that the
+ combination is supported.
+
+ @param dimensions width and height, each must be zero or greater
+ @param cs range of colors; may be nullptr
+ @return created SkImageInfo
+ */
+ static SkImageInfo MakeN32Premul(SkISize dimensions);
+ static SkImageInfo MakeN32Premul(SkISize dimensions, sk_sp<SkColorSpace> cs);
+
+ /** Creates SkImageInfo from integral dimensions width and height, kAlpha_8_SkColorType,
+ kPremul_SkAlphaType, with SkColorSpace set to nullptr.
+
+ @param width pixel column count; must be zero or greater
+ @param height pixel row count; must be zero or greater
+ @return created SkImageInfo
+ */
+ static SkImageInfo MakeA8(int width, int height);
+ /** Creates SkImageInfo from integral dimensions, kAlpha_8_SkColorType,
+ kPremul_SkAlphaType, with SkColorSpace set to nullptr.
+
+ @param dimensions pixel row and column count; must be zero or greater
+ @return created SkImageInfo
+ */
+ static SkImageInfo MakeA8(SkISize dimensions);
+
+ /** Creates SkImageInfo from integral dimensions width and height, kUnknown_SkColorType,
+ kUnknown_SkAlphaType, with SkColorSpace set to nullptr.
+
+ Returned SkImageInfo as part of source does not draw, and as part of destination
+ can not be drawn to.
+
+ @param width pixel column count; must be zero or greater
+ @param height pixel row count; must be zero or greater
+ @return created SkImageInfo
+ */
+ static SkImageInfo MakeUnknown(int width, int height);
+
+ /** Creates SkImageInfo from integral dimensions width and height set to zero,
+ kUnknown_SkColorType, kUnknown_SkAlphaType, with SkColorSpace set to nullptr.
+
+ Returned SkImageInfo as part of source does not draw, and as part of destination
+ can not be drawn to.
+
+ @return created SkImageInfo
+ */
+ static SkImageInfo MakeUnknown() {
+ return MakeUnknown(0, 0);
+ }
+
+ /** Returns pixel count in each row.
+
+ @return pixel width
+ */
+ int width() const { return fDimensions.width(); }
+
+ /** Returns pixel row count.
+
+ @return pixel height
+ */
+ int height() const { return fDimensions.height(); }
+
+ SkColorType colorType() const { return fColorInfo.colorType(); }
+
+ SkAlphaType alphaType() const { return fColorInfo.alphaType(); }
+
+ /** Returns SkColorSpace, the range of colors. The reference count of
+ SkColorSpace is unchanged. The returned SkColorSpace is immutable.
+
+ @return SkColorSpace, or nullptr
+ */
+ SkColorSpace* colorSpace() const;
+
+ /** Returns smart pointer to SkColorSpace, the range of colors. The smart pointer
+ tracks the number of objects sharing this SkColorSpace reference so the memory
+ is released when the owners destruct.
+
+ The returned SkColorSpace is immutable.
+
+ @return SkColorSpace wrapped in a smart pointer
+ */
+ sk_sp<SkColorSpace> refColorSpace() const;
+
+ /** Returns if SkImageInfo describes an empty area of pixels by checking if either
+ width or height is zero or smaller.
+
+ @return true if either dimension is zero or smaller
+ */
+ bool isEmpty() const { return fDimensions.isEmpty(); }
+
+ /** Returns the dimensionless SkColorInfo that represents the same color type,
+ alpha type, and color space as this SkImageInfo.
+ */
+ const SkColorInfo& colorInfo() const { return fColorInfo; }
+
+ /** Returns true if SkAlphaType is set to hint that all pixels are opaque; their
+ alpha value is implicitly or explicitly 1.0. If true, and all pixels are
+ not opaque, Skia may draw incorrectly.
+
+ Does not check if SkColorType allows alpha, or if any pixel value has
+ transparency.
+
+ @return true if SkAlphaType is kOpaque_SkAlphaType
+ */
+ bool isOpaque() const { return fColorInfo.isOpaque(); }
+
+ /** Returns SkISize { width(), height() }.
+
+ @return integral size of width() and height()
+ */
+ SkISize dimensions() const { return fDimensions; }
+
+ /** Returns SkIRect { 0, 0, width(), height() }.
+
+ @return integral rectangle from origin to width() and height()
+ */
+ SkIRect bounds() const { return SkIRect::MakeSize(fDimensions); }
+
+ /** Returns true if associated SkColorSpace is not nullptr, and SkColorSpace gamma
+ is approximately the same as sRGB.
+ This includes the
+
+ @return true if SkColorSpace gamma is approximately the same as sRGB
+ */
+ bool gammaCloseToSRGB() const { return fColorInfo.gammaCloseToSRGB(); }
+
+ /** Creates SkImageInfo with the same SkColorType, SkColorSpace, and SkAlphaType,
+ with dimensions set to width and height.
+
+ @param newWidth pixel column count; must be zero or greater
+ @param newHeight pixel row count; must be zero or greater
+ @return created SkImageInfo
+ */
+ SkImageInfo makeWH(int newWidth, int newHeight) const {
+ return Make({newWidth, newHeight}, fColorInfo);
+ }
+
+ /** Creates SkImageInfo with the same SkColorType, SkColorSpace, and SkAlphaType,
+ with dimensions set to newDimensions.
+
+ @param newSize pixel column and row count; must be zero or greater
+ @return created SkImageInfo
+ */
+ SkImageInfo makeDimensions(SkISize newSize) const {
+ return Make(newSize, fColorInfo);
+ }
+
+ /** Creates SkImageInfo with same SkColorType, SkColorSpace, width, and height,
+ with SkAlphaType set to newAlphaType.
+
+ Created SkImageInfo contains newAlphaType even if it is incompatible with
+ SkColorType, in which case SkAlphaType in SkImageInfo is ignored.
+
+ @return created SkImageInfo
+ */
+ SkImageInfo makeAlphaType(SkAlphaType newAlphaType) const {
+ return Make(fDimensions, fColorInfo.makeAlphaType(newAlphaType));
+ }
+
+ /** Creates SkImageInfo with same SkAlphaType, SkColorSpace, width, and height,
+ with SkColorType set to newColorType.
+
+ @return created SkImageInfo
+ */
+ SkImageInfo makeColorType(SkColorType newColorType) const {
+ return Make(fDimensions, fColorInfo.makeColorType(newColorType));
+ }
+
+ /** Creates SkImageInfo with same SkAlphaType, SkColorType, width, and height,
+ with SkColorSpace set to cs.
+
+ @param cs range of colors; may be nullptr
+ @return created SkImageInfo
+ */
+ SkImageInfo makeColorSpace(sk_sp<SkColorSpace> cs) const;
+
+ /** Returns number of bytes per pixel required by SkColorType.
+ Returns zero if colorType( is kUnknown_SkColorType.
+
+ @return bytes in pixel
+ */
+ int bytesPerPixel() const { return fColorInfo.bytesPerPixel(); }
+
+ /** Returns bit shift converting row bytes to row pixels.
+ Returns zero for kUnknown_SkColorType.
+
+ @return one of: 0, 1, 2, 3; left shift to convert pixels to bytes
+ */
+ int shiftPerPixel() const { return fColorInfo.shiftPerPixel(); }
+
+ /** Returns minimum bytes per row, computed from pixel width() and SkColorType, which
+ specifies bytesPerPixel(). SkBitmap maximum value for row bytes must fit
+ in 31 bits.
+
+ @return width() times bytesPerPixel() as unsigned 64-bit integer
+ */
+ uint64_t minRowBytes64() const {
+ return (uint64_t)sk_64_mul(this->width(), this->bytesPerPixel());
+ }
+
+ /** Returns minimum bytes per row, computed from pixel width() and SkColorType, which
+ specifies bytesPerPixel(). SkBitmap maximum value for row bytes must fit
+ in 31 bits.
+
+ @return width() times bytesPerPixel() as size_t
+ */
+ size_t minRowBytes() const {
+ uint64_t minRowBytes = this->minRowBytes64();
+ if (!SkTFitsIn<int32_t>(minRowBytes)) {
+ return 0;
+ }
+ return (size_t)minRowBytes;
+ }
+
+ /** Returns byte offset of pixel from pixel base address.
+
+ Asserts in debug build if x or y is outside of bounds. Does not assert if
+ rowBytes is smaller than minRowBytes(), even though result may be incorrect.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @param rowBytes size of pixel row or larger
+ @return offset within pixel array
+
+ example: https://fiddle.skia.org/c/@ImageInfo_computeOffset
+ */
+ size_t computeOffset(int x, int y, size_t rowBytes) const;
+
+ /** Compares SkImageInfo with other, and returns true if width, height, SkColorType,
+ SkAlphaType, and SkColorSpace are equivalent.
+
+ @param other SkImageInfo to compare
+ @return true if SkImageInfo equals other
+ */
+ bool operator==(const SkImageInfo& other) const {
+ return fDimensions == other.fDimensions && fColorInfo == other.fColorInfo;
+ }
+
+ /** Compares SkImageInfo with other, and returns true if width, height, SkColorType,
+ SkAlphaType, and SkColorSpace are not equivalent.
+
+ @param other SkImageInfo to compare
+ @return true if SkImageInfo is not equal to other
+ */
+ bool operator!=(const SkImageInfo& other) const {
+ return !(*this == other);
+ }
+
+ /** Returns storage required by pixel array, given SkImageInfo dimensions, SkColorType,
+ and rowBytes. rowBytes is assumed to be at least as large as minRowBytes().
+
+ Returns zero if height is zero.
+ Returns SIZE_MAX if answer exceeds the range of size_t.
+
+ @param rowBytes size of pixel row or larger
+ @return memory required by pixel buffer
+
+ example: https://fiddle.skia.org/c/@ImageInfo_computeByteSize
+ */
+ size_t computeByteSize(size_t rowBytes) const;
+
+ /** Returns storage required by pixel array, given SkImageInfo dimensions, and
+ SkColorType. Uses minRowBytes() to compute bytes for pixel row.
+
+ Returns zero if height is zero.
+ Returns SIZE_MAX if answer exceeds the range of size_t.
+
+ @return least memory required by pixel buffer
+ */
+ size_t computeMinByteSize() const {
+ return this->computeByteSize(this->minRowBytes());
+ }
+
+ /** Returns true if byteSize equals SIZE_MAX. computeByteSize() and
+ computeMinByteSize() return SIZE_MAX if size_t can not hold buffer size.
+
+ @param byteSize result of computeByteSize() or computeMinByteSize()
+ @return true if computeByteSize() or computeMinByteSize() result exceeds size_t
+ */
+ static bool ByteSizeOverflowed(size_t byteSize) {
+ return SIZE_MAX == byteSize;
+ }
+
+ /** Returns true if rowBytes is valid for this SkImageInfo.
+
+ @param rowBytes size of pixel row including padding
+ @return true if rowBytes is large enough to contain pixel row and is properly
+ aligned
+ */
+ bool validRowBytes(size_t rowBytes) const {
+ if (rowBytes < this->minRowBytes64()) {
+ return false;
+ }
+ int shift = this->shiftPerPixel();
+ size_t alignedRowBytes = rowBytes >> shift << shift;
+ return alignedRowBytes == rowBytes;
+ }
+
+ /** Creates an empty SkImageInfo with kUnknown_SkColorType, kUnknown_SkAlphaType,
+ a width and height of zero, and no SkColorSpace.
+ */
+ void reset() { *this = {}; }
+
+ /** Asserts if internal values are illegal or inconsistent. Only available if
+ SK_DEBUG is defined at compile time.
+ */
+ SkDEBUGCODE(void validate() const;)
+
+private:
+ SkColorInfo fColorInfo;
+ SkISize fDimensions = {0, 0};
+
+ SkImageInfo(SkISize dimensions, const SkColorInfo& colorInfo)
+ : fColorInfo(colorInfo), fDimensions(dimensions) {}
+
+ SkImageInfo(SkISize dimensions, SkColorInfo&& colorInfo)
+ : fColorInfo(std::move(colorInfo)), fDimensions(dimensions) {}
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkM44.h b/gfx/skia/skia/include/core/SkM44.h
new file mode 100644
index 0000000000..11a06a15b1
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkM44.h
@@ -0,0 +1,438 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkM44_DEFINED
+#define SkM44_DEFINED
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+
+struct SK_API SkV2 {
+ float x, y;
+
+ bool operator==(const SkV2 v) const { return x == v.x && y == v.y; }
+ bool operator!=(const SkV2 v) const { return !(*this == v); }
+
+ static SkScalar Dot(SkV2 a, SkV2 b) { return a.x * b.x + a.y * b.y; }
+ static SkScalar Cross(SkV2 a, SkV2 b) { return a.x * b.y - a.y * b.x; }
+ static SkV2 Normalize(SkV2 v) { return v * (1.0f / v.length()); }
+
+ SkV2 operator-() const { return {-x, -y}; }
+ SkV2 operator+(SkV2 v) const { return {x+v.x, y+v.y}; }
+ SkV2 operator-(SkV2 v) const { return {x-v.x, y-v.y}; }
+
+ SkV2 operator*(SkV2 v) const { return {x*v.x, y*v.y}; }
+ friend SkV2 operator*(SkV2 v, SkScalar s) { return {v.x*s, v.y*s}; }
+ friend SkV2 operator*(SkScalar s, SkV2 v) { return {v.x*s, v.y*s}; }
+ friend SkV2 operator/(SkV2 v, SkScalar s) { return {v.x/s, v.y/s}; }
+ friend SkV2 operator/(SkScalar s, SkV2 v) { return {s/v.x, s/v.y}; }
+
+ void operator+=(SkV2 v) { *this = *this + v; }
+ void operator-=(SkV2 v) { *this = *this - v; }
+ void operator*=(SkV2 v) { *this = *this * v; }
+ void operator*=(SkScalar s) { *this = *this * s; }
+ void operator/=(SkScalar s) { *this = *this / s; }
+
+ SkScalar lengthSquared() const { return Dot(*this, *this); }
+ SkScalar length() const { return SkScalarSqrt(this->lengthSquared()); }
+
+ SkScalar dot(SkV2 v) const { return Dot(*this, v); }
+ SkScalar cross(SkV2 v) const { return Cross(*this, v); }
+ SkV2 normalize() const { return Normalize(*this); }
+
+ const float* ptr() const { return &x; }
+ float* ptr() { return &x; }
+};
+
+struct SK_API SkV3 {
+ float x, y, z;
+
+ bool operator==(const SkV3& v) const {
+ return x == v.x && y == v.y && z == v.z;
+ }
+ bool operator!=(const SkV3& v) const { return !(*this == v); }
+
+ static SkScalar Dot(const SkV3& a, const SkV3& b) { return a.x*b.x + a.y*b.y + a.z*b.z; }
+ static SkV3 Cross(const SkV3& a, const SkV3& b) {
+ return { a.y*b.z - a.z*b.y, a.z*b.x - a.x*b.z, a.x*b.y - a.y*b.x };
+ }
+ static SkV3 Normalize(const SkV3& v) { return v * (1.0f / v.length()); }
+
+ SkV3 operator-() const { return {-x, -y, -z}; }
+ SkV3 operator+(const SkV3& v) const { return { x + v.x, y + v.y, z + v.z }; }
+ SkV3 operator-(const SkV3& v) const { return { x - v.x, y - v.y, z - v.z }; }
+
+ SkV3 operator*(const SkV3& v) const {
+ return { x*v.x, y*v.y, z*v.z };
+ }
+ friend SkV3 operator*(const SkV3& v, SkScalar s) {
+ return { v.x*s, v.y*s, v.z*s };
+ }
+ friend SkV3 operator*(SkScalar s, const SkV3& v) { return v*s; }
+
+ void operator+=(SkV3 v) { *this = *this + v; }
+ void operator-=(SkV3 v) { *this = *this - v; }
+ void operator*=(SkV3 v) { *this = *this * v; }
+ void operator*=(SkScalar s) { *this = *this * s; }
+
+ SkScalar lengthSquared() const { return Dot(*this, *this); }
+ SkScalar length() const { return SkScalarSqrt(Dot(*this, *this)); }
+
+ SkScalar dot(const SkV3& v) const { return Dot(*this, v); }
+ SkV3 cross(const SkV3& v) const { return Cross(*this, v); }
+ SkV3 normalize() const { return Normalize(*this); }
+
+ const float* ptr() const { return &x; }
+ float* ptr() { return &x; }
+};
+
+struct SK_API SkV4 {
+ float x, y, z, w;
+
+ bool operator==(const SkV4& v) const {
+ return x == v.x && y == v.y && z == v.z && w == v.w;
+ }
+ bool operator!=(const SkV4& v) const { return !(*this == v); }
+
+ static SkScalar Dot(const SkV4& a, const SkV4& b) {
+ return a.x*b.x + a.y*b.y + a.z*b.z + a.w*b.w;
+ }
+ static SkV4 Normalize(const SkV4& v) { return v * (1.0f / v.length()); }
+
+ SkV4 operator-() const { return {-x, -y, -z, -w}; }
+ SkV4 operator+(const SkV4& v) const { return { x + v.x, y + v.y, z + v.z, w + v.w }; }
+ SkV4 operator-(const SkV4& v) const { return { x - v.x, y - v.y, z - v.z, w - v.w }; }
+
+ SkV4 operator*(const SkV4& v) const {
+ return { x*v.x, y*v.y, z*v.z, w*v.w };
+ }
+ friend SkV4 operator*(const SkV4& v, SkScalar s) {
+ return { v.x*s, v.y*s, v.z*s, v.w*s };
+ }
+ friend SkV4 operator*(SkScalar s, const SkV4& v) { return v*s; }
+
+ SkScalar lengthSquared() const { return Dot(*this, *this); }
+ SkScalar length() const { return SkScalarSqrt(Dot(*this, *this)); }
+
+ SkScalar dot(const SkV4& v) const { return Dot(*this, v); }
+ SkV4 normalize() const { return Normalize(*this); }
+
+ const float* ptr() const { return &x; }
+ float* ptr() { return &x; }
+
+ float operator[](int i) const {
+ SkASSERT(i >= 0 && i < 4);
+ return this->ptr()[i];
+ }
+ float& operator[](int i) {
+ SkASSERT(i >= 0 && i < 4);
+ return this->ptr()[i];
+ }
+};
+
+/**
+ * 4x4 matrix used by SkCanvas and other parts of Skia.
+ *
+ * Skia assumes a right-handed coordinate system:
+ * +X goes to the right
+ * +Y goes down
+ * +Z goes into the screen (away from the viewer)
+ */
+class SK_API SkM44 {
+public:
+ SkM44(const SkM44& src) = default;
+ SkM44& operator=(const SkM44& src) = default;
+
+ constexpr SkM44()
+ : fMat{1, 0, 0, 0,
+ 0, 1, 0, 0,
+ 0, 0, 1, 0,
+ 0, 0, 0, 1}
+ {}
+
+ SkM44(const SkM44& a, const SkM44& b) {
+ this->setConcat(a, b);
+ }
+
+ enum Uninitialized_Constructor {
+ kUninitialized_Constructor
+ };
+ SkM44(Uninitialized_Constructor) {}
+
+ enum NaN_Constructor {
+ kNaN_Constructor
+ };
+ constexpr SkM44(NaN_Constructor)
+ : fMat{SK_ScalarNaN, SK_ScalarNaN, SK_ScalarNaN, SK_ScalarNaN,
+ SK_ScalarNaN, SK_ScalarNaN, SK_ScalarNaN, SK_ScalarNaN,
+ SK_ScalarNaN, SK_ScalarNaN, SK_ScalarNaN, SK_ScalarNaN,
+ SK_ScalarNaN, SK_ScalarNaN, SK_ScalarNaN, SK_ScalarNaN}
+ {}
+
+ /**
+ * The constructor parameters are in row-major order.
+ */
+ constexpr SkM44(SkScalar m0, SkScalar m4, SkScalar m8, SkScalar m12,
+ SkScalar m1, SkScalar m5, SkScalar m9, SkScalar m13,
+ SkScalar m2, SkScalar m6, SkScalar m10, SkScalar m14,
+ SkScalar m3, SkScalar m7, SkScalar m11, SkScalar m15)
+ // fMat is column-major order in memory.
+ : fMat{m0, m1, m2, m3,
+ m4, m5, m6, m7,
+ m8, m9, m10, m11,
+ m12, m13, m14, m15}
+ {}
+
+ static SkM44 Rows(const SkV4& r0, const SkV4& r1, const SkV4& r2, const SkV4& r3) {
+ SkM44 m(kUninitialized_Constructor);
+ m.setRow(0, r0);
+ m.setRow(1, r1);
+ m.setRow(2, r2);
+ m.setRow(3, r3);
+ return m;
+ }
+ static SkM44 Cols(const SkV4& c0, const SkV4& c1, const SkV4& c2, const SkV4& c3) {
+ SkM44 m(kUninitialized_Constructor);
+ m.setCol(0, c0);
+ m.setCol(1, c1);
+ m.setCol(2, c2);
+ m.setCol(3, c3);
+ return m;
+ }
+
+ static SkM44 RowMajor(const SkScalar r[16]) {
+ return SkM44(r[ 0], r[ 1], r[ 2], r[ 3],
+ r[ 4], r[ 5], r[ 6], r[ 7],
+ r[ 8], r[ 9], r[10], r[11],
+ r[12], r[13], r[14], r[15]);
+ }
+ static SkM44 ColMajor(const SkScalar c[16]) {
+ return SkM44(c[0], c[4], c[ 8], c[12],
+ c[1], c[5], c[ 9], c[13],
+ c[2], c[6], c[10], c[14],
+ c[3], c[7], c[11], c[15]);
+ }
+
+ static SkM44 Translate(SkScalar x, SkScalar y, SkScalar z = 0) {
+ return SkM44(1, 0, 0, x,
+ 0, 1, 0, y,
+ 0, 0, 1, z,
+ 0, 0, 0, 1);
+ }
+
+ static SkM44 Scale(SkScalar x, SkScalar y, SkScalar z = 1) {
+ return SkM44(x, 0, 0, 0,
+ 0, y, 0, 0,
+ 0, 0, z, 0,
+ 0, 0, 0, 1);
+ }
+
+ static SkM44 Rotate(SkV3 axis, SkScalar radians) {
+ SkM44 m(kUninitialized_Constructor);
+ m.setRotate(axis, radians);
+ return m;
+ }
+
+ // Scales and translates 'src' to fill 'dst' exactly.
+ static SkM44 RectToRect(const SkRect& src, const SkRect& dst);
+
+ static SkM44 LookAt(const SkV3& eye, const SkV3& center, const SkV3& up);
+ static SkM44 Perspective(float near, float far, float angle);
+
+ bool operator==(const SkM44& other) const;
+ bool operator!=(const SkM44& other) const {
+ return !(other == *this);
+ }
+
+ void getColMajor(SkScalar v[]) const {
+ memcpy(v, fMat, sizeof(fMat));
+ }
+ void getRowMajor(SkScalar v[]) const;
+
+ SkScalar rc(int r, int c) const {
+ SkASSERT(r >= 0 && r <= 3);
+ SkASSERT(c >= 0 && c <= 3);
+ return fMat[c*4 + r];
+ }
+ void setRC(int r, int c, SkScalar value) {
+ SkASSERT(r >= 0 && r <= 3);
+ SkASSERT(c >= 0 && c <= 3);
+ fMat[c*4 + r] = value;
+ }
+
+ SkV4 row(int i) const {
+ SkASSERT(i >= 0 && i <= 3);
+ return {fMat[i + 0], fMat[i + 4], fMat[i + 8], fMat[i + 12]};
+ }
+ SkV4 col(int i) const {
+ SkASSERT(i >= 0 && i <= 3);
+ return {fMat[i*4 + 0], fMat[i*4 + 1], fMat[i*4 + 2], fMat[i*4 + 3]};
+ }
+
+ void setRow(int i, const SkV4& v) {
+ SkASSERT(i >= 0 && i <= 3);
+ fMat[i + 0] = v.x;
+ fMat[i + 4] = v.y;
+ fMat[i + 8] = v.z;
+ fMat[i + 12] = v.w;
+ }
+ void setCol(int i, const SkV4& v) {
+ SkASSERT(i >= 0 && i <= 3);
+ memcpy(&fMat[i*4], v.ptr(), sizeof(v));
+ }
+
+ SkM44& setIdentity() {
+ *this = { 1, 0, 0, 0,
+ 0, 1, 0, 0,
+ 0, 0, 1, 0,
+ 0, 0, 0, 1 };
+ return *this;
+ }
+
+ SkM44& setTranslate(SkScalar x, SkScalar y, SkScalar z = 0) {
+ *this = { 1, 0, 0, x,
+ 0, 1, 0, y,
+ 0, 0, 1, z,
+ 0, 0, 0, 1 };
+ return *this;
+ }
+
+ SkM44& setScale(SkScalar x, SkScalar y, SkScalar z = 1) {
+ *this = { x, 0, 0, 0,
+ 0, y, 0, 0,
+ 0, 0, z, 0,
+ 0, 0, 0, 1 };
+ return *this;
+ }
+
+ /**
+ * Set this matrix to rotate about the specified unit-length axis vector,
+ * by an angle specified by its sin() and cos().
+ *
+ * This does not attempt to verify that axis.length() == 1 or that the sin,cos values
+ * are correct.
+ */
+ SkM44& setRotateUnitSinCos(SkV3 axis, SkScalar sinAngle, SkScalar cosAngle);
+
+ /**
+ * Set this matrix to rotate about the specified unit-length axis vector,
+ * by an angle specified in radians.
+ *
+ * This does not attempt to verify that axis.length() == 1.
+ */
+ SkM44& setRotateUnit(SkV3 axis, SkScalar radians) {
+ return this->setRotateUnitSinCos(axis, SkScalarSin(radians), SkScalarCos(radians));
+ }
+
+ /**
+ * Set this matrix to rotate about the specified axis vector,
+ * by an angle specified in radians.
+ *
+ * Note: axis is not assumed to be unit-length, so it will be normalized internally.
+ * If axis is already unit-length, call setRotateAboutUnitRadians() instead.
+ */
+ SkM44& setRotate(SkV3 axis, SkScalar radians);
+
+ SkM44& setConcat(const SkM44& a, const SkM44& b);
+
+ friend SkM44 operator*(const SkM44& a, const SkM44& b) {
+ return SkM44(a, b);
+ }
+
+ SkM44& preConcat(const SkM44& m) {
+ return this->setConcat(*this, m);
+ }
+
+ SkM44& postConcat(const SkM44& m) {
+ return this->setConcat(m, *this);
+ }
+
+ /**
+ * A matrix is categorized as 'perspective' if the bottom row is not [0, 0, 0, 1].
+ * For most uses, a bottom row of [0, 0, 0, X] behaves like a non-perspective matrix, though
+ * it will be categorized as perspective. Calling normalizePerspective() will change the
+ * matrix such that, if its bottom row was [0, 0, 0, X], it will be changed to [0, 0, 0, 1]
+ * by scaling the rest of the matrix by 1/X.
+ *
+ * | A B C D | | A/X B/X C/X D/X |
+ * | E F G H | -> | E/X F/X G/X H/X | for X != 0
+ * | I J K L | | I/X J/X K/X L/X |
+ * | 0 0 0 X | | 0 0 0 1 |
+ */
+ void normalizePerspective();
+
+ /** Returns true if all elements of the matrix are finite. Returns false if any
+ element is infinity, or NaN.
+
+ @return true if matrix has only finite elements
+ */
+ bool isFinite() const { return SkScalarsAreFinite(fMat, 16); }
+
+ /** If this is invertible, return that in inverse and return true. If it is
+ * not invertible, return false and leave the inverse parameter unchanged.
+ */
+ bool SK_WARN_UNUSED_RESULT invert(SkM44* inverse) const;
+
+ SkM44 SK_WARN_UNUSED_RESULT transpose() const;
+
+ void dump() const;
+
+ ////////////
+
+ SkV4 map(float x, float y, float z, float w) const;
+ SkV4 operator*(const SkV4& v) const {
+ return this->map(v.x, v.y, v.z, v.w);
+ }
+ SkV3 operator*(SkV3 v) const {
+ auto v4 = this->map(v.x, v.y, v.z, 0);
+ return {v4.x, v4.y, v4.z};
+ }
+ ////////////////////// Converting to/from SkMatrix
+
+ /* When converting from SkM44 to SkMatrix, the third row and
+ * column is dropped. When converting from SkMatrix to SkM44
+ * the third row and column remain as identity:
+ * [ a b c ] [ a b 0 c ]
+ * [ d e f ] -> [ d e 0 f ]
+ * [ g h i ] [ 0 0 1 0 ]
+ * [ g h 0 i ]
+ */
+ SkMatrix asM33() const {
+ return SkMatrix::MakeAll(fMat[0], fMat[4], fMat[12],
+ fMat[1], fMat[5], fMat[13],
+ fMat[3], fMat[7], fMat[15]);
+ }
+
+ explicit SkM44(const SkMatrix& src)
+ : SkM44(src[SkMatrix::kMScaleX], src[SkMatrix::kMSkewX], 0, src[SkMatrix::kMTransX],
+ src[SkMatrix::kMSkewY], src[SkMatrix::kMScaleY], 0, src[SkMatrix::kMTransY],
+ 0, 0, 1, 0,
+ src[SkMatrix::kMPersp0], src[SkMatrix::kMPersp1], 0, src[SkMatrix::kMPersp2])
+ {}
+
+ SkM44& preTranslate(SkScalar x, SkScalar y, SkScalar z = 0);
+ SkM44& postTranslate(SkScalar x, SkScalar y, SkScalar z = 0);
+
+ SkM44& preScale(SkScalar x, SkScalar y);
+ SkM44& preScale(SkScalar x, SkScalar y, SkScalar z);
+ SkM44& preConcat(const SkMatrix&);
+
+private:
+ /* Stored in column-major.
+ * Indices
+ * 0 4 8 12 1 0 0 trans_x
+ * 1 5 9 13 e.g. 0 1 0 trans_y
+ * 2 6 10 14 0 0 1 trans_z
+ * 3 7 11 15 0 0 0 1
+ */
+ SkScalar fMat[16];
+
+ friend class SkMatrixPriv;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkMallocPixelRef.h b/gfx/skia/skia/include/core/SkMallocPixelRef.h
new file mode 100644
index 0000000000..cce54b50f4
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkMallocPixelRef.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMallocPixelRef_DEFINED
+#define SkMallocPixelRef_DEFINED
+
+#include "include/core/SkPixelRef.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+class SkData;
+struct SkImageInfo;
+
+/** We explicitly use the same allocator for our pixels that SkMask does,
+ so that we can freely assign memory allocated by one class to the other.
+*/
+namespace SkMallocPixelRef {
+ /**
+ * Return a new SkMallocPixelRef, automatically allocating storage for the
+ * pixels. If rowBytes are 0, an optimal value will be chosen automatically.
+ * If rowBytes is > 0, then it will be respected, or NULL will be returned
+ * if rowBytes is invalid for the specified info.
+ *
+ * All pixel bytes are zeroed.
+ *
+ * Returns NULL on failure.
+ */
+ SK_API sk_sp<SkPixelRef> MakeAllocate(const SkImageInfo&, size_t rowBytes);
+
+ /**
+ * Return a new SkMallocPixelRef that will use the provided SkData and
+ * rowBytes as pixel storage. The SkData will be ref()ed and on
+ * destruction of the PixelRef, the SkData will be unref()ed.
+ *
+ * Returns NULL on failure.
+ */
+ SK_API sk_sp<SkPixelRef> MakeWithData(const SkImageInfo&, size_t rowBytes, sk_sp<SkData> data);
+} // namespace SkMallocPixelRef
+#endif
diff --git a/gfx/skia/skia/include/core/SkMaskFilter.h b/gfx/skia/skia/include/core/SkMaskFilter.h
new file mode 100644
index 0000000000..9d03e98c0c
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkMaskFilter.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMaskFilter_DEFINED
+#define SkMaskFilter_DEFINED
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+#include <cstddef>
+
+enum SkBlurStyle : int;
+struct SkDeserialProcs;
+struct SkRect;
+
+/** \class SkMaskFilter
+
+ SkMaskFilter is the base class for object that perform transformations on
+ the mask before drawing it. An example subclass is Blur.
+*/
+class SK_API SkMaskFilter : public SkFlattenable {
+public:
+ /** Create a blur maskfilter.
+ * @param style The SkBlurStyle to use
+ * @param sigma Standard deviation of the Gaussian blur to apply. Must be > 0.
+ * @param respectCTM if true the blur's sigma is modified by the CTM.
+ * @return The new blur maskfilter
+ */
+ static sk_sp<SkMaskFilter> MakeBlur(SkBlurStyle style, SkScalar sigma,
+ bool respectCTM = true);
+
+ /**
+ * Returns the approximate bounds that would result from filtering the src rect.
+ * The actual result may be different, but it should be contained within the
+ * returned bounds.
+ */
+ SkRect approximateFilteredBounds(const SkRect& src) const;
+
+ static sk_sp<SkMaskFilter> Deserialize(const void* data, size_t size,
+ const SkDeserialProcs* procs = nullptr);
+
+private:
+ static void RegisterFlattenables();
+ friend class SkFlattenable;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkMatrix.h b/gfx/skia/skia/include/core/SkMatrix.h
new file mode 100644
index 0000000000..cf84d26228
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkMatrix.h
@@ -0,0 +1,1996 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMatrix_DEFINED
+#define SkMatrix_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkMacros.h"
+#include "include/private/base/SkTo.h"
+
+#include <cstdint>
+#include <cstring>
+
+struct SkPoint3;
+struct SkRSXform;
+struct SkSize;
+
+// Remove when clients are updated to live without this
+#define SK_SUPPORT_LEGACY_MATRIX_RECTTORECT
+
+/**
+ * When we transform points through a matrix containing perspective (the bottom row is something
+ * other than 0,0,1), the bruteforce math can produce confusing results (since we might divide
+ * by 0, or a negative w value). By default, methods that map rects and paths will apply
+ * perspective clipping, but this can be changed by specifying kYes to those methods.
+ */
+enum class SkApplyPerspectiveClip {
+ kNo, //!< Don't pre-clip the geometry before applying the (perspective) matrix
+ kYes, //!< Do pre-clip the geometry before applying the (perspective) matrix
+};
+
+/** \class SkMatrix
+ SkMatrix holds a 3x3 matrix for transforming coordinates. This allows mapping
+ SkPoint and vectors with translation, scaling, skewing, rotation, and
+ perspective.
+
+ SkMatrix elements are in row major order.
+ SkMatrix constexpr default constructs to identity.
+
+ SkMatrix includes a hidden variable that classifies the type of matrix to
+ improve performance. SkMatrix is not thread safe unless getType() is called first.
+
+ example: https://fiddle.skia.org/c/@Matrix_063
+*/
+SK_BEGIN_REQUIRE_DENSE
+class SK_API SkMatrix {
+public:
+
+ /** Creates an identity SkMatrix:
+
+ | 1 0 0 |
+ | 0 1 0 |
+ | 0 0 1 |
+ */
+ constexpr SkMatrix() : SkMatrix(1,0,0, 0,1,0, 0,0,1, kIdentity_Mask | kRectStaysRect_Mask) {}
+
+ /** Sets SkMatrix to scale by (sx, sy). Returned matrix is:
+
+ | sx 0 0 |
+ | 0 sy 0 |
+ | 0 0 1 |
+
+ @param sx horizontal scale factor
+ @param sy vertical scale factor
+ @return SkMatrix with scale
+ */
+ static SkMatrix SK_WARN_UNUSED_RESULT Scale(SkScalar sx, SkScalar sy) {
+ SkMatrix m;
+ m.setScale(sx, sy);
+ return m;
+ }
+
+ /** Sets SkMatrix to translate by (dx, dy). Returned matrix is:
+
+ | 1 0 dx |
+ | 0 1 dy |
+ | 0 0 1 |
+
+ @param dx horizontal translation
+ @param dy vertical translation
+ @return SkMatrix with translation
+ */
+ static SkMatrix SK_WARN_UNUSED_RESULT Translate(SkScalar dx, SkScalar dy) {
+ SkMatrix m;
+ m.setTranslate(dx, dy);
+ return m;
+ }
+ static SkMatrix SK_WARN_UNUSED_RESULT Translate(SkVector t) { return Translate(t.x(), t.y()); }
+ static SkMatrix SK_WARN_UNUSED_RESULT Translate(SkIVector t) { return Translate(t.x(), t.y()); }
+
+ /** Sets SkMatrix to rotate by |deg| about a pivot point at (0, 0).
+
+ @param deg rotation angle in degrees (positive rotates clockwise)
+ @return SkMatrix with rotation
+ */
+ static SkMatrix SK_WARN_UNUSED_RESULT RotateDeg(SkScalar deg) {
+ SkMatrix m;
+ m.setRotate(deg);
+ return m;
+ }
+ static SkMatrix SK_WARN_UNUSED_RESULT RotateDeg(SkScalar deg, SkPoint pt) {
+ SkMatrix m;
+ m.setRotate(deg, pt.x(), pt.y());
+ return m;
+ }
+ static SkMatrix SK_WARN_UNUSED_RESULT RotateRad(SkScalar rad) {
+ return RotateDeg(SkRadiansToDegrees(rad));
+ }
+
+ /** Sets SkMatrix to skew by (kx, ky) about pivot point (0, 0).
+
+ @param kx horizontal skew factor
+ @param ky vertical skew factor
+ @return SkMatrix with skew
+ */
+ static SkMatrix SK_WARN_UNUSED_RESULT Skew(SkScalar kx, SkScalar ky) {
+ SkMatrix m;
+ m.setSkew(kx, ky);
+ return m;
+ }
+
+ /** \enum SkMatrix::ScaleToFit
+ ScaleToFit describes how SkMatrix is constructed to map one SkRect to another.
+ ScaleToFit may allow SkMatrix to have unequal horizontal and vertical scaling,
+ or may restrict SkMatrix to square scaling. If restricted, ScaleToFit specifies
+ how SkMatrix maps to the side or center of the destination SkRect.
+ */
+ enum ScaleToFit {
+ kFill_ScaleToFit, //!< scales in x and y to fill destination SkRect
+ kStart_ScaleToFit, //!< scales and aligns to left and top
+ kCenter_ScaleToFit, //!< scales and aligns to center
+ kEnd_ScaleToFit, //!< scales and aligns to right and bottom
+ };
+
+ /** Returns SkMatrix set to scale and translate src to dst. ScaleToFit selects
+ whether mapping completely fills dst or preserves the aspect ratio, and how to
+ align src within dst. Returns the identity SkMatrix if src is empty. If dst is
+ empty, returns SkMatrix set to:
+
+ | 0 0 0 |
+ | 0 0 0 |
+ | 0 0 1 |
+
+ @param src SkRect to map from
+ @param dst SkRect to map to
+ @param mode How to handle the mapping
+ @return SkMatrix mapping src to dst
+ */
+ static SkMatrix SK_WARN_UNUSED_RESULT RectToRect(const SkRect& src, const SkRect& dst,
+ ScaleToFit mode = kFill_ScaleToFit) {
+ return MakeRectToRect(src, dst, mode);
+ }
+
+ /** Sets SkMatrix to:
+
+ | scaleX skewX transX |
+ | skewY scaleY transY |
+ | pers0 pers1 pers2 |
+
+ @param scaleX horizontal scale factor
+ @param skewX horizontal skew factor
+ @param transX horizontal translation
+ @param skewY vertical skew factor
+ @param scaleY vertical scale factor
+ @param transY vertical translation
+ @param pers0 input x-axis perspective factor
+ @param pers1 input y-axis perspective factor
+ @param pers2 perspective scale factor
+ @return SkMatrix constructed from parameters
+ */
+ static SkMatrix SK_WARN_UNUSED_RESULT MakeAll(SkScalar scaleX, SkScalar skewX, SkScalar transX,
+ SkScalar skewY, SkScalar scaleY, SkScalar transY,
+ SkScalar pers0, SkScalar pers1, SkScalar pers2) {
+ SkMatrix m;
+ m.setAll(scaleX, skewX, transX, skewY, scaleY, transY, pers0, pers1, pers2);
+ return m;
+ }
+
+ /** \enum SkMatrix::TypeMask
+ Enum of bit fields for mask returned by getType().
+ Used to identify the complexity of SkMatrix, to optimize performance.
+ */
+ enum TypeMask {
+ kIdentity_Mask = 0, //!< identity SkMatrix; all bits clear
+ kTranslate_Mask = 0x01, //!< translation SkMatrix
+ kScale_Mask = 0x02, //!< scale SkMatrix
+ kAffine_Mask = 0x04, //!< skew or rotate SkMatrix
+ kPerspective_Mask = 0x08, //!< perspective SkMatrix
+ };
+
+ /** Returns a bit field describing the transformations the matrix may
+ perform. The bit field is computed conservatively, so it may include
+ false positives. For example, when kPerspective_Mask is set, all
+ other bits are set.
+
+ @return kIdentity_Mask, or combinations of: kTranslate_Mask, kScale_Mask,
+ kAffine_Mask, kPerspective_Mask
+ */
+ TypeMask getType() const {
+ if (fTypeMask & kUnknown_Mask) {
+ fTypeMask = this->computeTypeMask();
+ }
+ // only return the public masks
+ return (TypeMask)(fTypeMask & 0xF);
+ }
+
+ /** Returns true if SkMatrix is identity. Identity matrix is:
+
+ | 1 0 0 |
+ | 0 1 0 |
+ | 0 0 1 |
+
+ @return true if SkMatrix has no effect
+ */
+ bool isIdentity() const {
+ return this->getType() == 0;
+ }
+
+ /** Returns true if SkMatrix at most scales and translates. SkMatrix may be identity,
+ contain only scale elements, only translate elements, or both. SkMatrix form is:
+
+ | scale-x 0 translate-x |
+ | 0 scale-y translate-y |
+ | 0 0 1 |
+
+ @return true if SkMatrix is identity; or scales, translates, or both
+ */
+ bool isScaleTranslate() const {
+ return !(this->getType() & ~(kScale_Mask | kTranslate_Mask));
+ }
+
+ /** Returns true if SkMatrix is identity, or translates. SkMatrix form is:
+
+ | 1 0 translate-x |
+ | 0 1 translate-y |
+ | 0 0 1 |
+
+ @return true if SkMatrix is identity, or translates
+ */
+ bool isTranslate() const { return !(this->getType() & ~(kTranslate_Mask)); }
+
+ /** Returns true SkMatrix maps SkRect to another SkRect. If true, SkMatrix is identity,
+ or scales, or rotates a multiple of 90 degrees, or mirrors on axes. In all
+ cases, SkMatrix may also have translation. SkMatrix form is either:
+
+ | scale-x 0 translate-x |
+ | 0 scale-y translate-y |
+ | 0 0 1 |
+
+ or
+
+ | 0 rotate-x translate-x |
+ | rotate-y 0 translate-y |
+ | 0 0 1 |
+
+ for non-zero values of scale-x, scale-y, rotate-x, and rotate-y.
+
+ Also called preservesAxisAlignment(); use the one that provides better inline
+ documentation.
+
+ @return true if SkMatrix maps one SkRect into another
+ */
+ bool rectStaysRect() const {
+ if (fTypeMask & kUnknown_Mask) {
+ fTypeMask = this->computeTypeMask();
+ }
+ return (fTypeMask & kRectStaysRect_Mask) != 0;
+ }
+
+ /** Returns true SkMatrix maps SkRect to another SkRect. If true, SkMatrix is identity,
+ or scales, or rotates a multiple of 90 degrees, or mirrors on axes. In all
+ cases, SkMatrix may also have translation. SkMatrix form is either:
+
+ | scale-x 0 translate-x |
+ | 0 scale-y translate-y |
+ | 0 0 1 |
+
+ or
+
+ | 0 rotate-x translate-x |
+ | rotate-y 0 translate-y |
+ | 0 0 1 |
+
+ for non-zero values of scale-x, scale-y, rotate-x, and rotate-y.
+
+ Also called rectStaysRect(); use the one that provides better inline
+ documentation.
+
+ @return true if SkMatrix maps one SkRect into another
+ */
+ bool preservesAxisAlignment() const { return this->rectStaysRect(); }
+
+ /** Returns true if the matrix contains perspective elements. SkMatrix form is:
+
+ | -- -- -- |
+ | -- -- -- |
+ | perspective-x perspective-y perspective-scale |
+
+ where perspective-x or perspective-y is non-zero, or perspective-scale is
+ not one. All other elements may have any value.
+
+ @return true if SkMatrix is in most general form
+ */
+ bool hasPerspective() const {
+ return SkToBool(this->getPerspectiveTypeMaskOnly() &
+ kPerspective_Mask);
+ }
+
+ /** Returns true if SkMatrix contains only translation, rotation, reflection, and
+ uniform scale.
+ Returns false if SkMatrix contains different scales, skewing, perspective, or
+ degenerate forms that collapse to a line or point.
+
+ Describes that the SkMatrix makes rendering with and without the matrix are
+ visually alike; a transformed circle remains a circle. Mathematically, this is
+ referred to as similarity of a Euclidean space, or a similarity transformation.
+
+ Preserves right angles, keeping the arms of the angle equal lengths.
+
+ @param tol to be deprecated
+ @return true if SkMatrix only rotates, uniformly scales, translates
+
+ example: https://fiddle.skia.org/c/@Matrix_isSimilarity
+ */
+ bool isSimilarity(SkScalar tol = SK_ScalarNearlyZero) const;
+
+ /** Returns true if SkMatrix contains only translation, rotation, reflection, and
+ scale. Scale may differ along rotated axes.
+ Returns false if SkMatrix skewing, perspective, or degenerate forms that collapse
+ to a line or point.
+
+ Preserves right angles, but not requiring that the arms of the angle
+ retain equal lengths.
+
+ @param tol to be deprecated
+ @return true if SkMatrix only rotates, scales, translates
+
+ example: https://fiddle.skia.org/c/@Matrix_preservesRightAngles
+ */
+ bool preservesRightAngles(SkScalar tol = SK_ScalarNearlyZero) const;
+
+ /** SkMatrix organizes its values in row-major order. These members correspond to
+ each value in SkMatrix.
+ */
+ static constexpr int kMScaleX = 0; //!< horizontal scale factor
+ static constexpr int kMSkewX = 1; //!< horizontal skew factor
+ static constexpr int kMTransX = 2; //!< horizontal translation
+ static constexpr int kMSkewY = 3; //!< vertical skew factor
+ static constexpr int kMScaleY = 4; //!< vertical scale factor
+ static constexpr int kMTransY = 5; //!< vertical translation
+ static constexpr int kMPersp0 = 6; //!< input x perspective factor
+ static constexpr int kMPersp1 = 7; //!< input y perspective factor
+ static constexpr int kMPersp2 = 8; //!< perspective bias
+
+ /** Affine arrays are in column-major order to match the matrix used by
+ PDF and XPS.
+ */
+ static constexpr int kAScaleX = 0; //!< horizontal scale factor
+ static constexpr int kASkewY = 1; //!< vertical skew factor
+ static constexpr int kASkewX = 2; //!< horizontal skew factor
+ static constexpr int kAScaleY = 3; //!< vertical scale factor
+ static constexpr int kATransX = 4; //!< horizontal translation
+ static constexpr int kATransY = 5; //!< vertical translation
+
+ /** Returns one matrix value. Asserts if index is out of range and SK_DEBUG is
+ defined.
+
+ @param index one of: kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY,
+ kMPersp0, kMPersp1, kMPersp2
+ @return value corresponding to index
+ */
+ SkScalar operator[](int index) const {
+ SkASSERT((unsigned)index < 9);
+ return fMat[index];
+ }
+
+ /** Returns one matrix value. Asserts if index is out of range and SK_DEBUG is
+ defined.
+
+ @param index one of: kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY,
+ kMPersp0, kMPersp1, kMPersp2
+ @return value corresponding to index
+ */
+ SkScalar get(int index) const {
+ SkASSERT((unsigned)index < 9);
+ return fMat[index];
+ }
+
+ /** Returns one matrix value from a particular row/column. Asserts if index is out
+ of range and SK_DEBUG is defined.
+
+ @param r matrix row to fetch
+ @param c matrix column to fetch
+ @return value at the given matrix position
+ */
+ SkScalar rc(int r, int c) const {
+ SkASSERT(r >= 0 && r <= 2);
+ SkASSERT(c >= 0 && c <= 2);
+ return fMat[r*3 + c];
+ }
+
+ /** Returns scale factor multiplied by x-axis input, contributing to x-axis output.
+ With mapPoints(), scales SkPoint along the x-axis.
+
+ @return horizontal scale factor
+ */
+ SkScalar getScaleX() const { return fMat[kMScaleX]; }
+
+ /** Returns scale factor multiplied by y-axis input, contributing to y-axis output.
+ With mapPoints(), scales SkPoint along the y-axis.
+
+ @return vertical scale factor
+ */
+ SkScalar getScaleY() const { return fMat[kMScaleY]; }
+
+ /** Returns scale factor multiplied by x-axis input, contributing to y-axis output.
+ With mapPoints(), skews SkPoint along the y-axis.
+ Skewing both axes can rotate SkPoint.
+
+ @return vertical skew factor
+ */
+ SkScalar getSkewY() const { return fMat[kMSkewY]; }
+
+ /** Returns scale factor multiplied by y-axis input, contributing to x-axis output.
+ With mapPoints(), skews SkPoint along the x-axis.
+ Skewing both axes can rotate SkPoint.
+
+ @return horizontal scale factor
+ */
+ SkScalar getSkewX() const { return fMat[kMSkewX]; }
+
+ /** Returns translation contributing to x-axis output.
+ With mapPoints(), moves SkPoint along the x-axis.
+
+ @return horizontal translation factor
+ */
+ SkScalar getTranslateX() const { return fMat[kMTransX]; }
+
+ /** Returns translation contributing to y-axis output.
+ With mapPoints(), moves SkPoint along the y-axis.
+
+ @return vertical translation factor
+ */
+ SkScalar getTranslateY() const { return fMat[kMTransY]; }
+
+ /** Returns factor scaling input x-axis relative to input y-axis.
+
+ @return input x-axis perspective factor
+ */
+ SkScalar getPerspX() const { return fMat[kMPersp0]; }
+
+ /** Returns factor scaling input y-axis relative to input x-axis.
+
+ @return input y-axis perspective factor
+ */
+ SkScalar getPerspY() const { return fMat[kMPersp1]; }
+
+ /** Returns writable SkMatrix value. Asserts if index is out of range and SK_DEBUG is
+ defined. Clears internal cache anticipating that caller will change SkMatrix value.
+
+ Next call to read SkMatrix state may recompute cache; subsequent writes to SkMatrix
+ value must be followed by dirtyMatrixTypeCache().
+
+ @param index one of: kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY,
+ kMPersp0, kMPersp1, kMPersp2
+ @return writable value corresponding to index
+ */
+ SkScalar& operator[](int index) {
+ SkASSERT((unsigned)index < 9);
+ this->setTypeMask(kUnknown_Mask);
+ return fMat[index];
+ }
+
+ /** Sets SkMatrix value. Asserts if index is out of range and SK_DEBUG is
+ defined. Safer than operator[]; internal cache is always maintained.
+
+ @param index one of: kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY,
+ kMPersp0, kMPersp1, kMPersp2
+ @param value scalar to store in SkMatrix
+ */
+ SkMatrix& set(int index, SkScalar value) {
+ SkASSERT((unsigned)index < 9);
+ fMat[index] = value;
+ this->setTypeMask(kUnknown_Mask);
+ return *this;
+ }
+
+ /** Sets horizontal scale factor.
+
+ @param v horizontal scale factor to store
+ */
+ SkMatrix& setScaleX(SkScalar v) { return this->set(kMScaleX, v); }
+
+ /** Sets vertical scale factor.
+
+ @param v vertical scale factor to store
+ */
+ SkMatrix& setScaleY(SkScalar v) { return this->set(kMScaleY, v); }
+
+ /** Sets vertical skew factor.
+
+ @param v vertical skew factor to store
+ */
+ SkMatrix& setSkewY(SkScalar v) { return this->set(kMSkewY, v); }
+
+ /** Sets horizontal skew factor.
+
+ @param v horizontal skew factor to store
+ */
+ SkMatrix& setSkewX(SkScalar v) { return this->set(kMSkewX, v); }
+
+ /** Sets horizontal translation.
+
+ @param v horizontal translation to store
+ */
+ SkMatrix& setTranslateX(SkScalar v) { return this->set(kMTransX, v); }
+
+ /** Sets vertical translation.
+
+ @param v vertical translation to store
+ */
+ SkMatrix& setTranslateY(SkScalar v) { return this->set(kMTransY, v); }
+
+ /** Sets input x-axis perspective factor, which causes mapXY() to vary input x-axis values
+ inversely proportional to input y-axis values.
+
+ @param v perspective factor
+ */
+ SkMatrix& setPerspX(SkScalar v) { return this->set(kMPersp0, v); }
+
+ /** Sets input y-axis perspective factor, which causes mapXY() to vary input y-axis values
+ inversely proportional to input x-axis values.
+
+ @param v perspective factor
+ */
+ SkMatrix& setPerspY(SkScalar v) { return this->set(kMPersp1, v); }
+
+ /** Sets all values from parameters. Sets matrix to:
+
+ | scaleX skewX transX |
+ | skewY scaleY transY |
+ | persp0 persp1 persp2 |
+
+ @param scaleX horizontal scale factor to store
+ @param skewX horizontal skew factor to store
+ @param transX horizontal translation to store
+ @param skewY vertical skew factor to store
+ @param scaleY vertical scale factor to store
+ @param transY vertical translation to store
+ @param persp0 input x-axis values perspective factor to store
+ @param persp1 input y-axis values perspective factor to store
+ @param persp2 perspective scale factor to store
+ */
+ SkMatrix& setAll(SkScalar scaleX, SkScalar skewX, SkScalar transX,
+ SkScalar skewY, SkScalar scaleY, SkScalar transY,
+ SkScalar persp0, SkScalar persp1, SkScalar persp2) {
+ fMat[kMScaleX] = scaleX;
+ fMat[kMSkewX] = skewX;
+ fMat[kMTransX] = transX;
+ fMat[kMSkewY] = skewY;
+ fMat[kMScaleY] = scaleY;
+ fMat[kMTransY] = transY;
+ fMat[kMPersp0] = persp0;
+ fMat[kMPersp1] = persp1;
+ fMat[kMPersp2] = persp2;
+ this->setTypeMask(kUnknown_Mask);
+ return *this;
+ }
+
+ /** Copies nine scalar values contained by SkMatrix into buffer, in member value
+ ascending order: kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY,
+ kMPersp0, kMPersp1, kMPersp2.
+
+ @param buffer storage for nine scalar values
+ */
+ void get9(SkScalar buffer[9]) const {
+ memcpy(buffer, fMat, 9 * sizeof(SkScalar));
+ }
+
+ /** Sets SkMatrix to nine scalar values in buffer, in member value ascending order:
+ kMScaleX, kMSkewX, kMTransX, kMSkewY, kMScaleY, kMTransY, kMPersp0, kMPersp1,
+ kMPersp2.
+
+ Sets matrix to:
+
+ | buffer[0] buffer[1] buffer[2] |
+ | buffer[3] buffer[4] buffer[5] |
+ | buffer[6] buffer[7] buffer[8] |
+
+ In the future, set9 followed by get9 may not return the same values. Since SkMatrix
+ maps non-homogeneous coordinates, scaling all nine values produces an equivalent
+ transformation, possibly improving precision.
+
+ @param buffer nine scalar values
+ */
+ SkMatrix& set9(const SkScalar buffer[9]);
+
+ /** Sets SkMatrix to identity; which has no effect on mapped SkPoint. Sets SkMatrix to:
+
+ | 1 0 0 |
+ | 0 1 0 |
+ | 0 0 1 |
+
+ Also called setIdentity(); use the one that provides better inline
+ documentation.
+ */
+ SkMatrix& reset();
+
+ /** Sets SkMatrix to identity; which has no effect on mapped SkPoint. Sets SkMatrix to:
+
+ | 1 0 0 |
+ | 0 1 0 |
+ | 0 0 1 |
+
+ Also called reset(); use the one that provides better inline
+ documentation.
+ */
+ SkMatrix& setIdentity() { return this->reset(); }
+
+ /** Sets SkMatrix to translate by (dx, dy).
+
+ @param dx horizontal translation
+ @param dy vertical translation
+ */
+ SkMatrix& setTranslate(SkScalar dx, SkScalar dy);
+
+ /** Sets SkMatrix to translate by (v.fX, v.fY).
+
+ @param v vector containing horizontal and vertical translation
+ */
+ SkMatrix& setTranslate(const SkVector& v) { return this->setTranslate(v.fX, v.fY); }
+
+ /** Sets SkMatrix to scale by sx and sy, about a pivot point at (px, py).
+ The pivot point is unchanged when mapped with SkMatrix.
+
+ @param sx horizontal scale factor
+ @param sy vertical scale factor
+ @param px pivot on x-axis
+ @param py pivot on y-axis
+ */
+ SkMatrix& setScale(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py);
+
+ /** Sets SkMatrix to scale by sx and sy about at pivot point at (0, 0).
+
+ @param sx horizontal scale factor
+ @param sy vertical scale factor
+ */
+ SkMatrix& setScale(SkScalar sx, SkScalar sy);
+
+ /** Sets SkMatrix to rotate by degrees about a pivot point at (px, py).
+ The pivot point is unchanged when mapped with SkMatrix.
+
+ Positive degrees rotates clockwise.
+
+ @param degrees angle of axes relative to upright axes
+ @param px pivot on x-axis
+ @param py pivot on y-axis
+ */
+ SkMatrix& setRotate(SkScalar degrees, SkScalar px, SkScalar py);
+
+ /** Sets SkMatrix to rotate by degrees about a pivot point at (0, 0).
+ Positive degrees rotates clockwise.
+
+ @param degrees angle of axes relative to upright axes
+ */
+ SkMatrix& setRotate(SkScalar degrees);
+
+ /** Sets SkMatrix to rotate by sinValue and cosValue, about a pivot point at (px, py).
+ The pivot point is unchanged when mapped with SkMatrix.
+
+ Vector (sinValue, cosValue) describes the angle of rotation relative to (0, 1).
+ Vector length specifies scale.
+
+ @param sinValue rotation vector x-axis component
+ @param cosValue rotation vector y-axis component
+ @param px pivot on x-axis
+ @param py pivot on y-axis
+ */
+ SkMatrix& setSinCos(SkScalar sinValue, SkScalar cosValue,
+ SkScalar px, SkScalar py);
+
+ /** Sets SkMatrix to rotate by sinValue and cosValue, about a pivot point at (0, 0).
+
+ Vector (sinValue, cosValue) describes the angle of rotation relative to (0, 1).
+ Vector length specifies scale.
+
+ @param sinValue rotation vector x-axis component
+ @param cosValue rotation vector y-axis component
+ */
+ SkMatrix& setSinCos(SkScalar sinValue, SkScalar cosValue);
+
+ /** Sets SkMatrix to rotate, scale, and translate using a compressed matrix form.
+
+ Vector (rsxForm.fSSin, rsxForm.fSCos) describes the angle of rotation relative
+ to (0, 1). Vector length specifies scale. Mapped point is rotated and scaled
+ by vector, then translated by (rsxForm.fTx, rsxForm.fTy).
+
+ @param rsxForm compressed SkRSXform matrix
+ @return reference to SkMatrix
+
+ example: https://fiddle.skia.org/c/@Matrix_setRSXform
+ */
+ SkMatrix& setRSXform(const SkRSXform& rsxForm);
+
+ /** Sets SkMatrix to skew by kx and ky, about a pivot point at (px, py).
+ The pivot point is unchanged when mapped with SkMatrix.
+
+ @param kx horizontal skew factor
+ @param ky vertical skew factor
+ @param px pivot on x-axis
+ @param py pivot on y-axis
+ */
+ SkMatrix& setSkew(SkScalar kx, SkScalar ky, SkScalar px, SkScalar py);
+
+ /** Sets SkMatrix to skew by kx and ky, about a pivot point at (0, 0).
+
+ @param kx horizontal skew factor
+ @param ky vertical skew factor
+ */
+ SkMatrix& setSkew(SkScalar kx, SkScalar ky);
+
+ /** Sets SkMatrix to SkMatrix a multiplied by SkMatrix b. Either a or b may be this.
+
+ Given:
+
+ | A B C | | J K L |
+ a = | D E F |, b = | M N O |
+ | G H I | | P Q R |
+
+ sets SkMatrix to:
+
+ | A B C | | J K L | | AJ+BM+CP AK+BN+CQ AL+BO+CR |
+ a * b = | D E F | * | M N O | = | DJ+EM+FP DK+EN+FQ DL+EO+FR |
+ | G H I | | P Q R | | GJ+HM+IP GK+HN+IQ GL+HO+IR |
+
+ @param a SkMatrix on left side of multiply expression
+ @param b SkMatrix on right side of multiply expression
+ */
+ SkMatrix& setConcat(const SkMatrix& a, const SkMatrix& b);
+
+ /** Sets SkMatrix to SkMatrix multiplied by SkMatrix constructed from translation (dx, dy).
+ This can be thought of as moving the point to be mapped before applying SkMatrix.
+
+ Given:
+
+ | A B C | | 1 0 dx |
+ Matrix = | D E F |, T(dx, dy) = | 0 1 dy |
+ | G H I | | 0 0 1 |
+
+ sets SkMatrix to:
+
+ | A B C | | 1 0 dx | | A B A*dx+B*dy+C |
+ Matrix * T(dx, dy) = | D E F | | 0 1 dy | = | D E D*dx+E*dy+F |
+ | G H I | | 0 0 1 | | G H G*dx+H*dy+I |
+
+ @param dx x-axis translation before applying SkMatrix
+ @param dy y-axis translation before applying SkMatrix
+ */
+ SkMatrix& preTranslate(SkScalar dx, SkScalar dy);
+
+ /** Sets SkMatrix to SkMatrix multiplied by SkMatrix constructed from scaling by (sx, sy)
+ about pivot point (px, py).
+ This can be thought of as scaling about a pivot point before applying SkMatrix.
+
+ Given:
+
+ | A B C | | sx 0 dx |
+ Matrix = | D E F |, S(sx, sy, px, py) = | 0 sy dy |
+ | G H I | | 0 0 1 |
+
+ where
+
+ dx = px - sx * px
+ dy = py - sy * py
+
+ sets SkMatrix to:
+
+ | A B C | | sx 0 dx | | A*sx B*sy A*dx+B*dy+C |
+ Matrix * S(sx, sy, px, py) = | D E F | | 0 sy dy | = | D*sx E*sy D*dx+E*dy+F |
+ | G H I | | 0 0 1 | | G*sx H*sy G*dx+H*dy+I |
+
+ @param sx horizontal scale factor
+ @param sy vertical scale factor
+ @param px pivot on x-axis
+ @param py pivot on y-axis
+ */
+ SkMatrix& preScale(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py);
+
+ /** Sets SkMatrix to SkMatrix multiplied by SkMatrix constructed from scaling by (sx, sy)
+ about pivot point (0, 0).
+ This can be thought of as scaling about the origin before applying SkMatrix.
+
+ Given:
+
+ | A B C | | sx 0 0 |
+ Matrix = | D E F |, S(sx, sy) = | 0 sy 0 |
+ | G H I | | 0 0 1 |
+
+ sets SkMatrix to:
+
+ | A B C | | sx 0 0 | | A*sx B*sy C |
+ Matrix * S(sx, sy) = | D E F | | 0 sy 0 | = | D*sx E*sy F |
+ | G H I | | 0 0 1 | | G*sx H*sy I |
+
+ @param sx horizontal scale factor
+ @param sy vertical scale factor
+ */
+ SkMatrix& preScale(SkScalar sx, SkScalar sy);
+
+ /** Sets SkMatrix to SkMatrix multiplied by SkMatrix constructed from rotating by degrees
+ about pivot point (px, py).
+ This can be thought of as rotating about a pivot point before applying SkMatrix.
+
+ Positive degrees rotates clockwise.
+
+ Given:
+
+ | A B C | | c -s dx |
+ Matrix = | D E F |, R(degrees, px, py) = | s c dy |
+ | G H I | | 0 0 1 |
+
+ where
+
+ c = cos(degrees)
+ s = sin(degrees)
+ dx = s * py + (1 - c) * px
+ dy = -s * px + (1 - c) * py
+
+ sets SkMatrix to:
+
+ | A B C | | c -s dx | | Ac+Bs -As+Bc A*dx+B*dy+C |
+ Matrix * R(degrees, px, py) = | D E F | | s c dy | = | Dc+Es -Ds+Ec D*dx+E*dy+F |
+ | G H I | | 0 0 1 | | Gc+Hs -Gs+Hc G*dx+H*dy+I |
+
+ @param degrees angle of axes relative to upright axes
+ @param px pivot on x-axis
+ @param py pivot on y-axis
+ */
+ SkMatrix& preRotate(SkScalar degrees, SkScalar px, SkScalar py);
+
+ /** Sets SkMatrix to SkMatrix multiplied by SkMatrix constructed from rotating by degrees
+ about pivot point (0, 0).
+ This can be thought of as rotating about the origin before applying SkMatrix.
+
+ Positive degrees rotates clockwise.
+
+ Given:
+
+ | A B C | | c -s 0 |
+ Matrix = | D E F |, R(degrees, px, py) = | s c 0 |
+ | G H I | | 0 0 1 |
+
+ where
+
+ c = cos(degrees)
+ s = sin(degrees)
+
+ sets SkMatrix to:
+
+ | A B C | | c -s 0 | | Ac+Bs -As+Bc C |
+ Matrix * R(degrees, px, py) = | D E F | | s c 0 | = | Dc+Es -Ds+Ec F |
+ | G H I | | 0 0 1 | | Gc+Hs -Gs+Hc I |
+
+ @param degrees angle of axes relative to upright axes
+ */
+ SkMatrix& preRotate(SkScalar degrees);
+
+ /** Sets SkMatrix to SkMatrix multiplied by SkMatrix constructed from skewing by (kx, ky)
+ about pivot point (px, py).
+ This can be thought of as skewing about a pivot point before applying SkMatrix.
+
+ Given:
+
+ | A B C | | 1 kx dx |
+ Matrix = | D E F |, K(kx, ky, px, py) = | ky 1 dy |
+ | G H I | | 0 0 1 |
+
+ where
+
+ dx = -kx * py
+ dy = -ky * px
+
+ sets SkMatrix to:
+
+ | A B C | | 1 kx dx | | A+B*ky A*kx+B A*dx+B*dy+C |
+ Matrix * K(kx, ky, px, py) = | D E F | | ky 1 dy | = | D+E*ky D*kx+E D*dx+E*dy+F |
+ | G H I | | 0 0 1 | | G+H*ky G*kx+H G*dx+H*dy+I |
+
+ @param kx horizontal skew factor
+ @param ky vertical skew factor
+ @param px pivot on x-axis
+ @param py pivot on y-axis
+ */
+ SkMatrix& preSkew(SkScalar kx, SkScalar ky, SkScalar px, SkScalar py);
+
+ /** Sets SkMatrix to SkMatrix multiplied by SkMatrix constructed from skewing by (kx, ky)
+ about pivot point (0, 0).
+ This can be thought of as skewing about the origin before applying SkMatrix.
+
+ Given:
+
+ | A B C | | 1 kx 0 |
+ Matrix = | D E F |, K(kx, ky) = | ky 1 0 |
+ | G H I | | 0 0 1 |
+
+ sets SkMatrix to:
+
+ | A B C | | 1 kx 0 | | A+B*ky A*kx+B C |
+ Matrix * K(kx, ky) = | D E F | | ky 1 0 | = | D+E*ky D*kx+E F |
+ | G H I | | 0 0 1 | | G+H*ky G*kx+H I |
+
+ @param kx horizontal skew factor
+ @param ky vertical skew factor
+ */
+ SkMatrix& preSkew(SkScalar kx, SkScalar ky);
+
+ /** Sets SkMatrix to SkMatrix multiplied by SkMatrix other.
+ This can be thought of mapping by other before applying SkMatrix.
+
+ Given:
+
+ | A B C | | J K L |
+ Matrix = | D E F |, other = | M N O |
+ | G H I | | P Q R |
+
+ sets SkMatrix to:
+
+ | A B C | | J K L | | AJ+BM+CP AK+BN+CQ AL+BO+CR |
+ Matrix * other = | D E F | * | M N O | = | DJ+EM+FP DK+EN+FQ DL+EO+FR |
+ | G H I | | P Q R | | GJ+HM+IP GK+HN+IQ GL+HO+IR |
+
+ @param other SkMatrix on right side of multiply expression
+ */
+ SkMatrix& preConcat(const SkMatrix& other);
+
+ /** Sets SkMatrix to SkMatrix constructed from translation (dx, dy) multiplied by SkMatrix.
+ This can be thought of as moving the point to be mapped after applying SkMatrix.
+
+ Given:
+
+ | J K L | | 1 0 dx |
+ Matrix = | M N O |, T(dx, dy) = | 0 1 dy |
+ | P Q R | | 0 0 1 |
+
+ sets SkMatrix to:
+
+ | 1 0 dx | | J K L | | J+dx*P K+dx*Q L+dx*R |
+ T(dx, dy) * Matrix = | 0 1 dy | | M N O | = | M+dy*P N+dy*Q O+dy*R |
+ | 0 0 1 | | P Q R | | P Q R |
+
+ @param dx x-axis translation after applying SkMatrix
+ @param dy y-axis translation after applying SkMatrix
+ */
+ SkMatrix& postTranslate(SkScalar dx, SkScalar dy);
+
+ /** Sets SkMatrix to SkMatrix constructed from scaling by (sx, sy) about pivot point
+ (px, py), multiplied by SkMatrix.
+ This can be thought of as scaling about a pivot point after applying SkMatrix.
+
+ Given:
+
+ | J K L | | sx 0 dx |
+ Matrix = | M N O |, S(sx, sy, px, py) = | 0 sy dy |
+ | P Q R | | 0 0 1 |
+
+ where
+
+ dx = px - sx * px
+ dy = py - sy * py
+
+ sets SkMatrix to:
+
+ | sx 0 dx | | J K L | | sx*J+dx*P sx*K+dx*Q sx*L+dx+R |
+ S(sx, sy, px, py) * Matrix = | 0 sy dy | | M N O | = | sy*M+dy*P sy*N+dy*Q sy*O+dy*R |
+ | 0 0 1 | | P Q R | | P Q R |
+
+ @param sx horizontal scale factor
+ @param sy vertical scale factor
+ @param px pivot on x-axis
+ @param py pivot on y-axis
+ */
+ SkMatrix& postScale(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py);
+
+ /** Sets SkMatrix to SkMatrix constructed from scaling by (sx, sy) about pivot point
+ (0, 0), multiplied by SkMatrix.
+ This can be thought of as scaling about the origin after applying SkMatrix.
+
+ Given:
+
+ | J K L | | sx 0 0 |
+ Matrix = | M N O |, S(sx, sy) = | 0 sy 0 |
+ | P Q R | | 0 0 1 |
+
+ sets SkMatrix to:
+
+ | sx 0 0 | | J K L | | sx*J sx*K sx*L |
+ S(sx, sy) * Matrix = | 0 sy 0 | | M N O | = | sy*M sy*N sy*O |
+ | 0 0 1 | | P Q R | | P Q R |
+
+ @param sx horizontal scale factor
+ @param sy vertical scale factor
+ */
+ SkMatrix& postScale(SkScalar sx, SkScalar sy);
+
+ /** Sets SkMatrix to SkMatrix constructed from rotating by degrees about pivot point
+ (px, py), multiplied by SkMatrix.
+ This can be thought of as rotating about a pivot point after applying SkMatrix.
+
+ Positive degrees rotates clockwise.
+
+ Given:
+
+ | J K L | | c -s dx |
+ Matrix = | M N O |, R(degrees, px, py) = | s c dy |
+ | P Q R | | 0 0 1 |
+
+ where
+
+ c = cos(degrees)
+ s = sin(degrees)
+ dx = s * py + (1 - c) * px
+ dy = -s * px + (1 - c) * py
+
+ sets SkMatrix to:
+
+ |c -s dx| |J K L| |cJ-sM+dx*P cK-sN+dx*Q cL-sO+dx+R|
+ R(degrees, px, py) * Matrix = |s c dy| |M N O| = |sJ+cM+dy*P sK+cN+dy*Q sL+cO+dy*R|
+ |0 0 1| |P Q R| | P Q R|
+
+ @param degrees angle of axes relative to upright axes
+ @param px pivot on x-axis
+ @param py pivot on y-axis
+ */
+ SkMatrix& postRotate(SkScalar degrees, SkScalar px, SkScalar py);
+
+ /** Sets SkMatrix to SkMatrix constructed from rotating by degrees about pivot point
+ (0, 0), multiplied by SkMatrix.
+ This can be thought of as rotating about the origin after applying SkMatrix.
+
+ Positive degrees rotates clockwise.
+
+ Given:
+
+ | J K L | | c -s 0 |
+ Matrix = | M N O |, R(degrees, px, py) = | s c 0 |
+ | P Q R | | 0 0 1 |
+
+ where
+
+ c = cos(degrees)
+ s = sin(degrees)
+
+ sets SkMatrix to:
+
+ | c -s dx | | J K L | | cJ-sM cK-sN cL-sO |
+ R(degrees, px, py) * Matrix = | s c dy | | M N O | = | sJ+cM sK+cN sL+cO |
+ | 0 0 1 | | P Q R | | P Q R |
+
+ @param degrees angle of axes relative to upright axes
+ */
+ SkMatrix& postRotate(SkScalar degrees);
+
+ /** Sets SkMatrix to SkMatrix constructed from skewing by (kx, ky) about pivot point
+ (px, py), multiplied by SkMatrix.
+ This can be thought of as skewing about a pivot point after applying SkMatrix.
+
+ Given:
+
+ | J K L | | 1 kx dx |
+ Matrix = | M N O |, K(kx, ky, px, py) = | ky 1 dy |
+ | P Q R | | 0 0 1 |
+
+ where
+
+ dx = -kx * py
+ dy = -ky * px
+
+ sets SkMatrix to:
+
+ | 1 kx dx| |J K L| |J+kx*M+dx*P K+kx*N+dx*Q L+kx*O+dx+R|
+ K(kx, ky, px, py) * Matrix = |ky 1 dy| |M N O| = |ky*J+M+dy*P ky*K+N+dy*Q ky*L+O+dy*R|
+ | 0 0 1| |P Q R| | P Q R|
+
+ @param kx horizontal skew factor
+ @param ky vertical skew factor
+ @param px pivot on x-axis
+ @param py pivot on y-axis
+ */
+ SkMatrix& postSkew(SkScalar kx, SkScalar ky, SkScalar px, SkScalar py);
+
+ /** Sets SkMatrix to SkMatrix constructed from skewing by (kx, ky) about pivot point
+ (0, 0), multiplied by SkMatrix.
+ This can be thought of as skewing about the origin after applying SkMatrix.
+
+ Given:
+
+ | J K L | | 1 kx 0 |
+ Matrix = | M N O |, K(kx, ky) = | ky 1 0 |
+ | P Q R | | 0 0 1 |
+
+ sets SkMatrix to:
+
+ | 1 kx 0 | | J K L | | J+kx*M K+kx*N L+kx*O |
+ K(kx, ky) * Matrix = | ky 1 0 | | M N O | = | ky*J+M ky*K+N ky*L+O |
+ | 0 0 1 | | P Q R | | P Q R |
+
+ @param kx horizontal skew factor
+ @param ky vertical skew factor
+ */
+ SkMatrix& postSkew(SkScalar kx, SkScalar ky);
+
+ /** Sets SkMatrix to SkMatrix other multiplied by SkMatrix.
+ This can be thought of mapping by other after applying SkMatrix.
+
+ Given:
+
+ | J K L | | A B C |
+ Matrix = | M N O |, other = | D E F |
+ | P Q R | | G H I |
+
+ sets SkMatrix to:
+
+ | A B C | | J K L | | AJ+BM+CP AK+BN+CQ AL+BO+CR |
+ other * Matrix = | D E F | * | M N O | = | DJ+EM+FP DK+EN+FQ DL+EO+FR |
+ | G H I | | P Q R | | GJ+HM+IP GK+HN+IQ GL+HO+IR |
+
+ @param other SkMatrix on left side of multiply expression
+ */
+ SkMatrix& postConcat(const SkMatrix& other);
+
+#ifndef SK_SUPPORT_LEGACY_MATRIX_RECTTORECT
+private:
+#endif
+ /** Sets SkMatrix to scale and translate src SkRect to dst SkRect. stf selects whether
+ mapping completely fills dst or preserves the aspect ratio, and how to align
+ src within dst. Returns false if src is empty, and sets SkMatrix to identity.
+ Returns true if dst is empty, and sets SkMatrix to:
+
+ | 0 0 0 |
+ | 0 0 0 |
+ | 0 0 1 |
+
+ @param src SkRect to map from
+ @param dst SkRect to map to
+ @return true if SkMatrix can represent SkRect mapping
+
+ example: https://fiddle.skia.org/c/@Matrix_setRectToRect
+ */
+ bool setRectToRect(const SkRect& src, const SkRect& dst, ScaleToFit stf);
+
+ /** Returns SkMatrix set to scale and translate src SkRect to dst SkRect. stf selects
+ whether mapping completely fills dst or preserves the aspect ratio, and how to
+ align src within dst. Returns the identity SkMatrix if src is empty. If dst is
+ empty, returns SkMatrix set to:
+
+ | 0 0 0 |
+ | 0 0 0 |
+ | 0 0 1 |
+
+ @param src SkRect to map from
+ @param dst SkRect to map to
+ @return SkMatrix mapping src to dst
+ */
+ static SkMatrix MakeRectToRect(const SkRect& src, const SkRect& dst, ScaleToFit stf) {
+ SkMatrix m;
+ m.setRectToRect(src, dst, stf);
+ return m;
+ }
+#ifndef SK_SUPPORT_LEGACY_MATRIX_RECTTORECT
+public:
+#endif
+
+ /** Sets SkMatrix to map src to dst. count must be zero or greater, and four or less.
+
+ If count is zero, sets SkMatrix to identity and returns true.
+ If count is one, sets SkMatrix to translate and returns true.
+ If count is two or more, sets SkMatrix to map SkPoint if possible; returns false
+ if SkMatrix cannot be constructed. If count is four, SkMatrix may include
+ perspective.
+
+ @param src SkPoint to map from
+ @param dst SkPoint to map to
+ @param count number of SkPoint in src and dst
+ @return true if SkMatrix was constructed successfully
+
+ example: https://fiddle.skia.org/c/@Matrix_setPolyToPoly
+ */
+ bool setPolyToPoly(const SkPoint src[], const SkPoint dst[], int count);
+
+ /** Sets inverse to reciprocal matrix, returning true if SkMatrix can be inverted.
+ Geometrically, if SkMatrix maps from source to destination, inverse SkMatrix
+ maps from destination to source. If SkMatrix can not be inverted, inverse is
+ unchanged.
+
+ @param inverse storage for inverted SkMatrix; may be nullptr
+ @return true if SkMatrix can be inverted
+ */
+ bool SK_WARN_UNUSED_RESULT invert(SkMatrix* inverse) const {
+ // Allow the trivial case to be inlined.
+ if (this->isIdentity()) {
+ if (inverse) {
+ inverse->reset();
+ }
+ return true;
+ }
+ return this->invertNonIdentity(inverse);
+ }
+
+ /** Fills affine with identity values in column major order.
+ Sets affine to:
+
+ | 1 0 0 |
+ | 0 1 0 |
+
+ Affine 3 by 2 matrices in column major order are used by OpenGL and XPS.
+
+ @param affine storage for 3 by 2 affine matrix
+
+ example: https://fiddle.skia.org/c/@Matrix_SetAffineIdentity
+ */
+ static void SetAffineIdentity(SkScalar affine[6]);
+
+ /** Fills affine in column major order. Sets affine to:
+
+ | scale-x skew-x translate-x |
+ | skew-y scale-y translate-y |
+
+ If SkMatrix contains perspective, returns false and leaves affine unchanged.
+
+ @param affine storage for 3 by 2 affine matrix; may be nullptr
+ @return true if SkMatrix does not contain perspective
+ */
+ bool SK_WARN_UNUSED_RESULT asAffine(SkScalar affine[6]) const;
+
+ /** Sets SkMatrix to affine values, passed in column major order. Given affine,
+ column, then row, as:
+
+ | scale-x skew-x translate-x |
+ | skew-y scale-y translate-y |
+
+ SkMatrix is set, row, then column, to:
+
+ | scale-x skew-x translate-x |
+ | skew-y scale-y translate-y |
+ | 0 0 1 |
+
+ @param affine 3 by 2 affine matrix
+ */
+ SkMatrix& setAffine(const SkScalar affine[6]);
+
+ /**
+ * A matrix is categorized as 'perspective' if the bottom row is not [0, 0, 1].
+ * However, for most uses (e.g. mapPoints) a bottom row of [0, 0, X] behaves like a
+ * non-perspective matrix, though it will be categorized as perspective. Calling
+ * normalizePerspective() will change the matrix such that, if its bottom row was [0, 0, X],
+ * it will be changed to [0, 0, 1] by scaling the rest of the matrix by 1/X.
+ *
+ * | A B C | | A/X B/X C/X |
+ * | D E F | -> | D/X E/X F/X | for X != 0
+ * | 0 0 X | | 0 0 1 |
+ */
+ void normalizePerspective() {
+ if (fMat[8] != 1) {
+ this->doNormalizePerspective();
+ }
+ }
+
+ /** Maps src SkPoint array of length count to dst SkPoint array of equal or greater
+ length. SkPoint are mapped by multiplying each SkPoint by SkMatrix. Given:
+
+ | A B C | | x |
+ Matrix = | D E F |, pt = | y |
+ | G H I | | 1 |
+
+ where
+
+ for (i = 0; i < count; ++i) {
+ x = src[i].fX
+ y = src[i].fY
+ }
+
+ each dst SkPoint is computed as:
+
+ |A B C| |x| Ax+By+C Dx+Ey+F
+ Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , -------
+ |G H I| |1| Gx+Hy+I Gx+Hy+I
+
+ src and dst may point to the same storage.
+
+ @param dst storage for mapped SkPoint
+ @param src SkPoint to transform
+ @param count number of SkPoint to transform
+
+ example: https://fiddle.skia.org/c/@Matrix_mapPoints
+ */
+ void mapPoints(SkPoint dst[], const SkPoint src[], int count) const;
+
+ /** Maps pts SkPoint array of length count in place. SkPoint are mapped by multiplying
+ each SkPoint by SkMatrix. Given:
+
+ | A B C | | x |
+ Matrix = | D E F |, pt = | y |
+ | G H I | | 1 |
+
+ where
+
+ for (i = 0; i < count; ++i) {
+ x = pts[i].fX
+ y = pts[i].fY
+ }
+
+ each resulting pts SkPoint is computed as:
+
+ |A B C| |x| Ax+By+C Dx+Ey+F
+ Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , -------
+ |G H I| |1| Gx+Hy+I Gx+Hy+I
+
+ @param pts storage for mapped SkPoint
+ @param count number of SkPoint to transform
+ */
+ void mapPoints(SkPoint pts[], int count) const {
+ this->mapPoints(pts, pts, count);
+ }
+
+ /** Maps src SkPoint3 array of length count to dst SkPoint3 array, which must of length count or
+ greater. SkPoint3 array is mapped by multiplying each SkPoint3 by SkMatrix. Given:
+
+ | A B C | | x |
+ Matrix = | D E F |, src = | y |
+ | G H I | | z |
+
+ each resulting dst SkPoint is computed as:
+
+ |A B C| |x|
+ Matrix * src = |D E F| |y| = |Ax+By+Cz Dx+Ey+Fz Gx+Hy+Iz|
+ |G H I| |z|
+
+ @param dst storage for mapped SkPoint3 array
+ @param src SkPoint3 array to transform
+ @param count items in SkPoint3 array to transform
+
+ example: https://fiddle.skia.org/c/@Matrix_mapHomogeneousPoints
+ */
+ void mapHomogeneousPoints(SkPoint3 dst[], const SkPoint3 src[], int count) const;
+
+ /**
+ * Returns homogeneous points, starting with 2D src points (with implied w = 1).
+ */
+ void mapHomogeneousPoints(SkPoint3 dst[], const SkPoint src[], int count) const;
+
+ /** Returns SkPoint pt multiplied by SkMatrix. Given:
+
+ | A B C | | x |
+ Matrix = | D E F |, pt = | y |
+ | G H I | | 1 |
+
+ result is computed as:
+
+ |A B C| |x| Ax+By+C Dx+Ey+F
+ Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , -------
+ |G H I| |1| Gx+Hy+I Gx+Hy+I
+
+ @param p SkPoint to map
+ @return mapped SkPoint
+ */
+ SkPoint mapPoint(SkPoint pt) const {
+ SkPoint result;
+ this->mapXY(pt.x(), pt.y(), &result);
+ return result;
+ }
+
+ /** Maps SkPoint (x, y) to result. SkPoint is mapped by multiplying by SkMatrix. Given:
+
+ | A B C | | x |
+ Matrix = | D E F |, pt = | y |
+ | G H I | | 1 |
+
+ result is computed as:
+
+ |A B C| |x| Ax+By+C Dx+Ey+F
+ Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , -------
+ |G H I| |1| Gx+Hy+I Gx+Hy+I
+
+ @param x x-axis value of SkPoint to map
+ @param y y-axis value of SkPoint to map
+ @param result storage for mapped SkPoint
+
+ example: https://fiddle.skia.org/c/@Matrix_mapXY
+ */
+ void mapXY(SkScalar x, SkScalar y, SkPoint* result) const;
+
+ /** Returns SkPoint (x, y) multiplied by SkMatrix. Given:
+
+ | A B C | | x |
+ Matrix = | D E F |, pt = | y |
+ | G H I | | 1 |
+
+ result is computed as:
+
+ |A B C| |x| Ax+By+C Dx+Ey+F
+ Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , -------
+ |G H I| |1| Gx+Hy+I Gx+Hy+I
+
+ @param x x-axis value of SkPoint to map
+ @param y y-axis value of SkPoint to map
+ @return mapped SkPoint
+ */
+ SkPoint mapXY(SkScalar x, SkScalar y) const {
+ SkPoint result;
+ this->mapXY(x,y, &result);
+ return result;
+ }
+
+
+ /** Returns (0, 0) multiplied by SkMatrix. Given:
+
+ | A B C | | 0 |
+ Matrix = | D E F |, pt = | 0 |
+ | G H I | | 1 |
+
+ result is computed as:
+
+ |A B C| |0| C F
+ Matrix * pt = |D E F| |0| = |C F I| = - , -
+ |G H I| |1| I I
+
+ @return mapped (0, 0)
+ */
+ SkPoint mapOrigin() const {
+ SkScalar x = this->getTranslateX(),
+ y = this->getTranslateY();
+ if (this->hasPerspective()) {
+ SkScalar w = fMat[kMPersp2];
+ if (w) { w = 1 / w; }
+ x *= w;
+ y *= w;
+ }
+ return {x, y};
+ }
+
+ /** Maps src vector array of length count to vector SkPoint array of equal or greater
+ length. Vectors are mapped by multiplying each vector by SkMatrix, treating
+ SkMatrix translation as zero. Given:
+
+ | A B 0 | | x |
+ Matrix = | D E 0 |, src = | y |
+ | G H I | | 1 |
+
+ where
+
+ for (i = 0; i < count; ++i) {
+ x = src[i].fX
+ y = src[i].fY
+ }
+
+ each dst vector is computed as:
+
+ |A B 0| |x| Ax+By Dx+Ey
+ Matrix * src = |D E 0| |y| = |Ax+By Dx+Ey Gx+Hy+I| = ------- , -------
+ |G H I| |1| Gx+Hy+I Gx+Hy+I
+
+ src and dst may point to the same storage.
+
+ @param dst storage for mapped vectors
+ @param src vectors to transform
+ @param count number of vectors to transform
+
+ example: https://fiddle.skia.org/c/@Matrix_mapVectors
+ */
+ void mapVectors(SkVector dst[], const SkVector src[], int count) const;
+
+ /** Maps vecs vector array of length count in place, multiplying each vector by
+ SkMatrix, treating SkMatrix translation as zero. Given:
+
+ | A B 0 | | x |
+ Matrix = | D E 0 |, vec = | y |
+ | G H I | | 1 |
+
+ where
+
+ for (i = 0; i < count; ++i) {
+ x = vecs[i].fX
+ y = vecs[i].fY
+ }
+
+ each result vector is computed as:
+
+ |A B 0| |x| Ax+By Dx+Ey
+ Matrix * vec = |D E 0| |y| = |Ax+By Dx+Ey Gx+Hy+I| = ------- , -------
+ |G H I| |1| Gx+Hy+I Gx+Hy+I
+
+ @param vecs vectors to transform, and storage for mapped vectors
+ @param count number of vectors to transform
+ */
+ void mapVectors(SkVector vecs[], int count) const {
+ this->mapVectors(vecs, vecs, count);
+ }
+
+ /** Maps vector (dx, dy) to result. Vector is mapped by multiplying by SkMatrix,
+ treating SkMatrix translation as zero. Given:
+
+ | A B 0 | | dx |
+ Matrix = | D E 0 |, vec = | dy |
+ | G H I | | 1 |
+
+ each result vector is computed as:
+
+ |A B 0| |dx| A*dx+B*dy D*dx+E*dy
+ Matrix * vec = |D E 0| |dy| = |A*dx+B*dy D*dx+E*dy G*dx+H*dy+I| = ----------- , -----------
+ |G H I| | 1| G*dx+H*dy+I G*dx+*dHy+I
+
+ @param dx x-axis value of vector to map
+ @param dy y-axis value of vector to map
+ @param result storage for mapped vector
+ */
+ void mapVector(SkScalar dx, SkScalar dy, SkVector* result) const {
+ SkVector vec = { dx, dy };
+ this->mapVectors(result, &vec, 1);
+ }
+
+ /** Returns vector (dx, dy) multiplied by SkMatrix, treating SkMatrix translation as zero.
+ Given:
+
+ | A B 0 | | dx |
+ Matrix = | D E 0 |, vec = | dy |
+ | G H I | | 1 |
+
+ each result vector is computed as:
+
+ |A B 0| |dx| A*dx+B*dy D*dx+E*dy
+ Matrix * vec = |D E 0| |dy| = |A*dx+B*dy D*dx+E*dy G*dx+H*dy+I| = ----------- , -----------
+ |G H I| | 1| G*dx+H*dy+I G*dx+*dHy+I
+
+ @param dx x-axis value of vector to map
+ @param dy y-axis value of vector to map
+ @return mapped vector
+ */
+ SkVector mapVector(SkScalar dx, SkScalar dy) const {
+ SkVector vec = { dx, dy };
+ this->mapVectors(&vec, &vec, 1);
+ return vec;
+ }
+
+ /** Sets dst to bounds of src corners mapped by SkMatrix.
+ Returns true if mapped corners are dst corners.
+
+ Returned value is the same as calling rectStaysRect().
+
+ @param dst storage for bounds of mapped SkPoint
+ @param src SkRect to map
+ @param pc whether to apply perspective clipping
+ @return true if dst is equivalent to mapped src
+
+ example: https://fiddle.skia.org/c/@Matrix_mapRect
+ */
+ bool mapRect(SkRect* dst, const SkRect& src,
+ SkApplyPerspectiveClip pc = SkApplyPerspectiveClip::kYes) const;
+
+ /** Sets rect to bounds of rect corners mapped by SkMatrix.
+ Returns true if mapped corners are computed rect corners.
+
+ Returned value is the same as calling rectStaysRect().
+
+ @param rect rectangle to map, and storage for bounds of mapped corners
+ @param pc whether to apply perspective clipping
+ @return true if result is equivalent to mapped rect
+ */
+ bool mapRect(SkRect* rect, SkApplyPerspectiveClip pc = SkApplyPerspectiveClip::kYes) const {
+ return this->mapRect(rect, *rect, pc);
+ }
+
+ /** Returns bounds of src corners mapped by SkMatrix.
+
+ @param src rectangle to map
+ @return mapped bounds
+ */
+ SkRect mapRect(const SkRect& src,
+ SkApplyPerspectiveClip pc = SkApplyPerspectiveClip::kYes) const {
+ SkRect dst;
+ (void)this->mapRect(&dst, src, pc);
+ return dst;
+ }
+
+ /** Maps four corners of rect to dst. SkPoint are mapped by multiplying each
+ rect corner by SkMatrix. rect corner is processed in this order:
+ (rect.fLeft, rect.fTop), (rect.fRight, rect.fTop), (rect.fRight, rect.fBottom),
+ (rect.fLeft, rect.fBottom).
+
+ rect may be empty: rect.fLeft may be greater than or equal to rect.fRight;
+ rect.fTop may be greater than or equal to rect.fBottom.
+
+ Given:
+
+ | A B C | | x |
+ Matrix = | D E F |, pt = | y |
+ | G H I | | 1 |
+
+ where pt is initialized from each of (rect.fLeft, rect.fTop),
+ (rect.fRight, rect.fTop), (rect.fRight, rect.fBottom), (rect.fLeft, rect.fBottom),
+ each dst SkPoint is computed as:
+
+ |A B C| |x| Ax+By+C Dx+Ey+F
+ Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , -------
+ |G H I| |1| Gx+Hy+I Gx+Hy+I
+
+ @param dst storage for mapped corner SkPoint
+ @param rect SkRect to map
+
+ Note: this does not perform perspective clipping (as that might result in more than
+ 4 points, so results are suspect if the matrix contains perspective.
+ */
+ void mapRectToQuad(SkPoint dst[4], const SkRect& rect) const {
+ // This could potentially be faster if we only transformed each x and y of the rect once.
+ rect.toQuad(dst);
+ this->mapPoints(dst, 4);
+ }
+
+ /** Sets dst to bounds of src corners mapped by SkMatrix. If matrix contains
+ elements other than scale or translate: asserts if SK_DEBUG is defined;
+ otherwise, results are undefined.
+
+ @param dst storage for bounds of mapped SkPoint
+ @param src SkRect to map
+
+ example: https://fiddle.skia.org/c/@Matrix_mapRectScaleTranslate
+ */
+ void mapRectScaleTranslate(SkRect* dst, const SkRect& src) const;
+
+ /** Returns geometric mean radius of ellipse formed by constructing circle of
+ size radius, and mapping constructed circle with SkMatrix. The result squared is
+ equal to the major axis length times the minor axis length.
+ Result is not meaningful if SkMatrix contains perspective elements.
+
+ @param radius circle size to map
+ @return average mapped radius
+
+ example: https://fiddle.skia.org/c/@Matrix_mapRadius
+ */
+ SkScalar mapRadius(SkScalar radius) const;
+
+ /** Compares a and b; returns true if a and b are numerically equal. Returns true
+ even if sign of zero values are different. Returns false if either SkMatrix
+ contains NaN, even if the other SkMatrix also contains NaN.
+
+ @param a SkMatrix to compare
+ @param b SkMatrix to compare
+ @return true if SkMatrix a and SkMatrix b are numerically equal
+ */
+ friend SK_API bool operator==(const SkMatrix& a, const SkMatrix& b);
+
+ /** Compares a and b; returns true if a and b are not numerically equal. Returns false
+ even if sign of zero values are different. Returns true if either SkMatrix
+ contains NaN, even if the other SkMatrix also contains NaN.
+
+ @param a SkMatrix to compare
+ @param b SkMatrix to compare
+ @return true if SkMatrix a and SkMatrix b are numerically not equal
+ */
+ friend SK_API bool operator!=(const SkMatrix& a, const SkMatrix& b) {
+ return !(a == b);
+ }
+
+ /** Writes text representation of SkMatrix to standard output. Floating point values
+ are written with limited precision; it may not be possible to reconstruct
+ original SkMatrix from output.
+
+ example: https://fiddle.skia.org/c/@Matrix_dump
+ */
+ void dump() const;
+
+ /** Returns the minimum scaling factor of SkMatrix by decomposing the scaling and
+ skewing elements.
+ Returns -1 if scale factor overflows or SkMatrix contains perspective.
+
+ @return minimum scale factor
+
+ example: https://fiddle.skia.org/c/@Matrix_getMinScale
+ */
+ SkScalar getMinScale() const;
+
+ /** Returns the maximum scaling factor of SkMatrix by decomposing the scaling and
+ skewing elements.
+ Returns -1 if scale factor overflows or SkMatrix contains perspective.
+
+ @return maximum scale factor
+
+ example: https://fiddle.skia.org/c/@Matrix_getMaxScale
+ */
+ SkScalar getMaxScale() const;
+
+ /** Sets scaleFactors[0] to the minimum scaling factor, and scaleFactors[1] to the
+ maximum scaling factor. Scaling factors are computed by decomposing
+ the SkMatrix scaling and skewing elements.
+
+ Returns true if scaleFactors are found; otherwise, returns false and sets
+ scaleFactors to undefined values.
+
+ @param scaleFactors storage for minimum and maximum scale factors
+ @return true if scale factors were computed correctly
+ */
+ bool SK_WARN_UNUSED_RESULT getMinMaxScales(SkScalar scaleFactors[2]) const;
+
+ /** Decomposes SkMatrix into scale components and whatever remains. Returns false if
+ SkMatrix could not be decomposed.
+
+ Sets scale to portion of SkMatrix that scale axes. Sets remaining to SkMatrix
+ with scaling factored out. remaining may be passed as nullptr
+ to determine if SkMatrix can be decomposed without computing remainder.
+
+ Returns true if scale components are found. scale and remaining are
+ unchanged if SkMatrix contains perspective; scale factors are not finite, or
+ are nearly zero.
+
+ On success: Matrix = Remaining * scale.
+
+ @param scale axes scaling factors; may be nullptr
+ @param remaining SkMatrix without scaling; may be nullptr
+ @return true if scale can be computed
+
+ example: https://fiddle.skia.org/c/@Matrix_decomposeScale
+ */
+ bool decomposeScale(SkSize* scale, SkMatrix* remaining = nullptr) const;
+
+ /** Returns reference to const identity SkMatrix. Returned SkMatrix is set to:
+
+ | 1 0 0 |
+ | 0 1 0 |
+ | 0 0 1 |
+
+ @return const identity SkMatrix
+
+ example: https://fiddle.skia.org/c/@Matrix_I
+ */
+ static const SkMatrix& I();
+
+ /** Returns reference to a const SkMatrix with invalid values. Returned SkMatrix is set
+ to:
+
+ | SK_ScalarMax SK_ScalarMax SK_ScalarMax |
+ | SK_ScalarMax SK_ScalarMax SK_ScalarMax |
+ | SK_ScalarMax SK_ScalarMax SK_ScalarMax |
+
+ @return const invalid SkMatrix
+
+ example: https://fiddle.skia.org/c/@Matrix_InvalidMatrix
+ */
+ static const SkMatrix& InvalidMatrix();
+
+ /** Returns SkMatrix a multiplied by SkMatrix b.
+
+ Given:
+
+ | A B C | | J K L |
+ a = | D E F |, b = | M N O |
+ | G H I | | P Q R |
+
+ sets SkMatrix to:
+
+ | A B C | | J K L | | AJ+BM+CP AK+BN+CQ AL+BO+CR |
+ a * b = | D E F | * | M N O | = | DJ+EM+FP DK+EN+FQ DL+EO+FR |
+ | G H I | | P Q R | | GJ+HM+IP GK+HN+IQ GL+HO+IR |
+
+ @param a SkMatrix on left side of multiply expression
+ @param b SkMatrix on right side of multiply expression
+ @return SkMatrix computed from a times b
+ */
+ static SkMatrix Concat(const SkMatrix& a, const SkMatrix& b) {
+ SkMatrix result;
+ result.setConcat(a, b);
+ return result;
+ }
+
+ friend SkMatrix operator*(const SkMatrix& a, const SkMatrix& b) {
+ return Concat(a, b);
+ }
+
+ /** Sets internal cache to unknown state. Use to force update after repeated
+ modifications to SkMatrix element reference returned by operator[](int index).
+ */
+ void dirtyMatrixTypeCache() {
+ this->setTypeMask(kUnknown_Mask);
+ }
+
+ /** Initializes SkMatrix with scale and translate elements.
+
+ | sx 0 tx |
+ | 0 sy ty |
+ | 0 0 1 |
+
+ @param sx horizontal scale factor to store
+ @param sy vertical scale factor to store
+ @param tx horizontal translation to store
+ @param ty vertical translation to store
+ */
+ void setScaleTranslate(SkScalar sx, SkScalar sy, SkScalar tx, SkScalar ty) {
+ fMat[kMScaleX] = sx;
+ fMat[kMSkewX] = 0;
+ fMat[kMTransX] = tx;
+
+ fMat[kMSkewY] = 0;
+ fMat[kMScaleY] = sy;
+ fMat[kMTransY] = ty;
+
+ fMat[kMPersp0] = 0;
+ fMat[kMPersp1] = 0;
+ fMat[kMPersp2] = 1;
+
+ int mask = 0;
+ if (sx != 1 || sy != 1) {
+ mask |= kScale_Mask;
+ }
+ if (tx != 0.0f || ty != 0.0f) {
+ mask |= kTranslate_Mask;
+ }
+ if (sx != 0 && sy != 0) {
+ mask |= kRectStaysRect_Mask;
+ }
+ this->setTypeMask(mask);
+ }
+
+ /** Returns true if all elements of the matrix are finite. Returns false if any
+ element is infinity, or NaN.
+
+ @return true if matrix has only finite elements
+ */
+ bool isFinite() const { return SkScalarsAreFinite(fMat, 9); }
+
+private:
+ /** Set if the matrix will map a rectangle to another rectangle. This
+ can be true if the matrix is scale-only, or rotates a multiple of
+ 90 degrees.
+
+ This bit will be set on identity matrices
+ */
+ static constexpr int kRectStaysRect_Mask = 0x10;
+
+ /** Set if the perspective bit is valid even though the rest of
+ the matrix is Unknown.
+ */
+ static constexpr int kOnlyPerspectiveValid_Mask = 0x40;
+
+ static constexpr int kUnknown_Mask = 0x80;
+
+ static constexpr int kORableMasks = kTranslate_Mask |
+ kScale_Mask |
+ kAffine_Mask |
+ kPerspective_Mask;
+
+ static constexpr int kAllMasks = kTranslate_Mask |
+ kScale_Mask |
+ kAffine_Mask |
+ kPerspective_Mask |
+ kRectStaysRect_Mask;
+
+ SkScalar fMat[9];
+ mutable int32_t fTypeMask;
+
+ constexpr SkMatrix(SkScalar sx, SkScalar kx, SkScalar tx,
+ SkScalar ky, SkScalar sy, SkScalar ty,
+ SkScalar p0, SkScalar p1, SkScalar p2, int typeMask)
+ : fMat{sx, kx, tx,
+ ky, sy, ty,
+ p0, p1, p2}
+ , fTypeMask(typeMask) {}
+
+ static void ComputeInv(SkScalar dst[9], const SkScalar src[9], double invDet, bool isPersp);
+
+ uint8_t computeTypeMask() const;
+ uint8_t computePerspectiveTypeMask() const;
+
+ void setTypeMask(int mask) {
+ // allow kUnknown or a valid mask
+ SkASSERT(kUnknown_Mask == mask || (mask & kAllMasks) == mask ||
+ ((kUnknown_Mask | kOnlyPerspectiveValid_Mask) & mask)
+ == (kUnknown_Mask | kOnlyPerspectiveValid_Mask));
+ fTypeMask = mask;
+ }
+
+ void orTypeMask(int mask) {
+ SkASSERT((mask & kORableMasks) == mask);
+ fTypeMask |= mask;
+ }
+
+ void clearTypeMask(int mask) {
+ // only allow a valid mask
+ SkASSERT((mask & kAllMasks) == mask);
+ fTypeMask &= ~mask;
+ }
+
+ TypeMask getPerspectiveTypeMaskOnly() const {
+ if ((fTypeMask & kUnknown_Mask) &&
+ !(fTypeMask & kOnlyPerspectiveValid_Mask)) {
+ fTypeMask = this->computePerspectiveTypeMask();
+ }
+ return (TypeMask)(fTypeMask & 0xF);
+ }
+
+ /** Returns true if we already know that the matrix is identity;
+ false otherwise.
+ */
+ bool isTriviallyIdentity() const {
+ if (fTypeMask & kUnknown_Mask) {
+ return false;
+ }
+ return ((fTypeMask & 0xF) == 0);
+ }
+
+ inline void updateTranslateMask() {
+ if ((fMat[kMTransX] != 0) | (fMat[kMTransY] != 0)) {
+ fTypeMask |= kTranslate_Mask;
+ } else {
+ fTypeMask &= ~kTranslate_Mask;
+ }
+ }
+
+ typedef void (*MapXYProc)(const SkMatrix& mat, SkScalar x, SkScalar y,
+ SkPoint* result);
+
+ static MapXYProc GetMapXYProc(TypeMask mask) {
+ SkASSERT((mask & ~kAllMasks) == 0);
+ return gMapXYProcs[mask & kAllMasks];
+ }
+
+ MapXYProc getMapXYProc() const {
+ return GetMapXYProc(this->getType());
+ }
+
+ typedef void (*MapPtsProc)(const SkMatrix& mat, SkPoint dst[],
+ const SkPoint src[], int count);
+
+ static MapPtsProc GetMapPtsProc(TypeMask mask) {
+ SkASSERT((mask & ~kAllMasks) == 0);
+ return gMapPtsProcs[mask & kAllMasks];
+ }
+
+ MapPtsProc getMapPtsProc() const {
+ return GetMapPtsProc(this->getType());
+ }
+
+ bool SK_WARN_UNUSED_RESULT invertNonIdentity(SkMatrix* inverse) const;
+
+ static bool Poly2Proc(const SkPoint[], SkMatrix*);
+ static bool Poly3Proc(const SkPoint[], SkMatrix*);
+ static bool Poly4Proc(const SkPoint[], SkMatrix*);
+
+ static void Identity_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*);
+ static void Trans_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*);
+ static void Scale_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*);
+ static void ScaleTrans_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*);
+ static void Rot_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*);
+ static void RotTrans_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*);
+ static void Persp_xy(const SkMatrix&, SkScalar, SkScalar, SkPoint*);
+
+ static const MapXYProc gMapXYProcs[];
+
+ static void Identity_pts(const SkMatrix&, SkPoint[], const SkPoint[], int);
+ static void Trans_pts(const SkMatrix&, SkPoint dst[], const SkPoint[], int);
+ static void Scale_pts(const SkMatrix&, SkPoint dst[], const SkPoint[], int);
+ static void ScaleTrans_pts(const SkMatrix&, SkPoint dst[], const SkPoint[],
+ int count);
+ static void Persp_pts(const SkMatrix&, SkPoint dst[], const SkPoint[], int);
+
+ static void Affine_vpts(const SkMatrix&, SkPoint dst[], const SkPoint[], int);
+
+ static const MapPtsProc gMapPtsProcs[];
+
+ // return the number of bytes written, whether or not buffer is null
+ size_t writeToMemory(void* buffer) const;
+ /**
+ * Reads data from the buffer parameter
+ *
+ * @param buffer Memory to read from
+ * @param length Amount of memory available in the buffer
+ * @return number of bytes read (must be a multiple of 4) or
+ * 0 if there was not enough memory available
+ */
+ size_t readFromMemory(const void* buffer, size_t length);
+
+ // legacy method -- still needed? why not just postScale(1/divx, ...)?
+ bool postIDiv(int divx, int divy);
+ void doNormalizePerspective();
+
+ friend class SkPerspIter;
+ friend class SkMatrixPriv;
+ friend class SerializationTest;
+};
+SK_END_REQUIRE_DENSE
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkMesh.h b/gfx/skia/skia/include/core/SkMesh.h
new file mode 100644
index 0000000000..360a039e7f
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkMesh.h
@@ -0,0 +1,423 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMesh_DEFINED
+#define SkMesh_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#ifdef SK_ENABLE_SKSL
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSpan.h"
+#include "include/core/SkString.h"
+#include "include/effects/SkRuntimeEffect.h"
+
+#include <memory>
+#include <tuple>
+#include <vector>
+
+class GrDirectContext;
+class SkColorSpace;
+class SkData;
+
+namespace SkSL { struct Program; }
+
+/**
+ * A specification for custom meshes. Specifies the vertex buffer attributes and stride, the
+ * vertex program that produces a user-defined set of varyings, and a fragment program that ingests
+ * the interpolated varyings and produces local coordinates for shading and optionally a color.
+ *
+ * The varyings must include a float2 named "position". If the passed varyings does not
+ * contain such a varying then one is implicitly added to the final specification and the SkSL
+ * Varyings struct described below. It is an error to have a varying named "position" that has a
+ * type other than float2.
+ *
+ * The provided attributes and varyings are used to create Attributes and Varyings structs in SkSL
+ * that are used by the shaders. Each attribute from the Attribute span becomes a member of the
+ * SkSL Attributes struct and likewise for the varyings.
+ *
+ * The signature of the vertex program must be:
+ * Varyings main(const Attributes).
+ *
+ * The signature of the fragment program must be either:
+ * float2 main(const Varyings)
+ * or
+ * float2 main(const Varyings, out (half4|float4) color)
+ *
+ * where the return value is the local coordinates that will be used to access SkShader. If the
+ * color variant is used, the returned color will be blended with SkPaint's SkShader (or SkPaint
+ * color in absence of a SkShader) using the SkBlender passed to SkCanvas drawMesh(). To use
+ * interpolated local space positions as the shader coordinates, equivalent to how SkPaths are
+ * shaded, return the position field from the Varying struct as the coordinates.
+ *
+ * The vertex and fragment programs may both contain uniforms. Uniforms with the same name are
+ * assumed to be shared between stages. It is an error to specify uniforms in the vertex and
+ * fragment program with the same name but different types, dimensionality, or layouts.
+ */
+class SkMeshSpecification : public SkNVRefCnt<SkMeshSpecification> {
+public:
+ /** These values are enforced when creating a specification. */
+ static constexpr size_t kMaxStride = 1024;
+ static constexpr size_t kMaxAttributes = 8;
+ static constexpr size_t kStrideAlignment = 4;
+ static constexpr size_t kOffsetAlignment = 4;
+ static constexpr size_t kMaxVaryings = 6;
+
+ struct Attribute {
+ enum class Type : uint32_t { // CPU representation Shader Type
+ kFloat, // float float
+ kFloat2, // two floats float2
+ kFloat3, // three floats float3
+ kFloat4, // four floats float4
+ kUByte4_unorm, // four bytes half4
+
+ kLast = kUByte4_unorm
+ };
+ Type type;
+ size_t offset;
+ SkString name;
+ };
+
+ struct Varying {
+ enum class Type : uint32_t {
+ kFloat, // "float"
+ kFloat2, // "float2"
+ kFloat3, // "float3"
+ kFloat4, // "float4"
+ kHalf, // "half"
+ kHalf2, // "half2"
+ kHalf3, // "half3"
+ kHalf4, // "half4"
+
+ kLast = kHalf4
+ };
+ Type type;
+ SkString name;
+ };
+
+ using Uniform = SkRuntimeEffect::Uniform;
+
+ ~SkMeshSpecification();
+
+ struct Result {
+ sk_sp<SkMeshSpecification> specification;
+ SkString error;
+ };
+
+ /**
+ * If successful the return is a specification and an empty error string. Otherwise, it is a
+ * null specification a non-empty error string.
+ *
+ * @param attributes The vertex attributes that will be consumed by 'vs'. Attributes need
+ * not be tightly packed but attribute offsets must be aligned to
+ * kOffsetAlignment and offset + size may not be greater than
+ * 'vertexStride'. At least one attribute is required.
+ * @param vertexStride The offset between successive attribute values. This must be aligned to
+ * kStrideAlignment.
+ * @param varyings The varyings that will be written by 'vs' and read by 'fs'. This may
+ * be empty.
+ * @param vs The vertex shader code that computes a vertex position and the varyings
+ * from the attributes.
+ * @param fs The fragment code that computes a local coordinate and optionally a
+ * color from the varyings. The local coordinate is used to sample
+ * SkShader.
+ * @param cs The colorspace of the color produced by 'fs'. Ignored if 'fs's main()
+ * function does not have a color out param.
+ * @param at The alpha type of the color produced by 'fs'. Ignored if 'fs's main()
+ * function does not have a color out param. Cannot be kUnknown.
+ */
+ static Result Make(SkSpan<const Attribute> attributes,
+ size_t vertexStride,
+ SkSpan<const Varying> varyings,
+ const SkString& vs,
+ const SkString& fs);
+ static Result Make(SkSpan<const Attribute> attributes,
+ size_t vertexStride,
+ SkSpan<const Varying> varyings,
+ const SkString& vs,
+ const SkString& fs,
+ sk_sp<SkColorSpace> cs);
+ static Result Make(SkSpan<const Attribute> attributes,
+ size_t vertexStride,
+ SkSpan<const Varying> varyings,
+ const SkString& vs,
+ const SkString& fs,
+ sk_sp<SkColorSpace> cs,
+ SkAlphaType at);
+
+ SkSpan<const Attribute> attributes() const { return SkSpan(fAttributes); }
+
+ /**
+ * Combined size of all 'uniform' variables. When creating a SkMesh with this specification
+ * provide an SkData of this size, containing values for all of those variables. Use uniforms()
+ * to get the offset of each uniform within the SkData.
+ */
+ size_t uniformSize() const;
+
+ /**
+ * Provides info about individual uniforms including the offset into an SkData where each
+ * uniform value should be placed.
+ */
+ SkSpan<const Uniform> uniforms() const { return SkSpan(fUniforms); }
+
+ /** Returns pointer to the named uniform variable's description, or nullptr if not found. */
+ const Uniform* findUniform(std::string_view name) const;
+
+ /** Returns pointer to the named attribute, or nullptr if not found. */
+ const Attribute* findAttribute(std::string_view name) const;
+
+ /** Returns pointer to the named varying, or nullptr if not found. */
+ const Varying* findVarying(std::string_view name) const;
+
+ size_t stride() const { return fStride; }
+
+private:
+ friend struct SkMeshSpecificationPriv;
+
+ enum class ColorType {
+ kNone,
+ kHalf4,
+ kFloat4,
+ };
+
+ static Result MakeFromSourceWithStructs(SkSpan<const Attribute> attributes,
+ size_t stride,
+ SkSpan<const Varying> varyings,
+ const SkString& vs,
+ const SkString& fs,
+ sk_sp<SkColorSpace> cs,
+ SkAlphaType at);
+
+ SkMeshSpecification(SkSpan<const Attribute>,
+ size_t,
+ SkSpan<const Varying>,
+ int passthroughLocalCoordsVaryingIndex,
+ uint32_t deadVaryingMask,
+ std::vector<Uniform> uniforms,
+ std::unique_ptr<const SkSL::Program>,
+ std::unique_ptr<const SkSL::Program>,
+ ColorType,
+ sk_sp<SkColorSpace>,
+ SkAlphaType);
+
+ SkMeshSpecification(const SkMeshSpecification&) = delete;
+ SkMeshSpecification(SkMeshSpecification&&) = delete;
+
+ SkMeshSpecification& operator=(const SkMeshSpecification&) = delete;
+ SkMeshSpecification& operator=(SkMeshSpecification&&) = delete;
+
+ const std::vector<Attribute> fAttributes;
+ const std::vector<Varying> fVaryings;
+ const std::vector<Uniform> fUniforms;
+ const std::unique_ptr<const SkSL::Program> fVS;
+ const std::unique_ptr<const SkSL::Program> fFS;
+ const size_t fStride;
+ uint32_t fHash;
+ const int fPassthroughLocalCoordsVaryingIndex;
+ const uint32_t fDeadVaryingMask;
+ const ColorType fColorType;
+ const sk_sp<SkColorSpace> fColorSpace;
+ const SkAlphaType fAlphaType;
+};
+
+/**
+ * A vertex buffer, a topology, optionally an index buffer, and a compatible SkMeshSpecification.
+ *
+ * The data in the vertex buffer is expected to contain the attributes described by the spec
+ * for vertexCount vertices beginning at vertexOffset. vertexOffset must be aligned to the
+ * SkMeshSpecification's vertex stride. The size of the buffer must be at least vertexOffset +
+ * spec->stride()*vertexCount (even if vertex attributes contains pad at the end of the stride). If
+ * the specified bounds does not contain all the points output by the spec's vertex program when
+ * applied to the vertices in the custom mesh then the result is undefined.
+ *
+ * MakeIndexed may be used to create an indexed mesh. indexCount indices are read from the index
+ * buffer at the specified offset which must be aligned to 2. The indices are always unsigned 16bit
+ * integers. The index count must be at least 3.
+ *
+ * If Make() is used the implicit index sequence is 0, 1, 2, 3, ... and vertexCount must be at least
+ * 3.
+ *
+ * Both Make() and MakeIndexed() take a SkData with the uniform values. See
+ * SkMeshSpecification::uniformSize() and SkMeshSpecification::uniforms() for sizing and packing
+ * uniforms into the SkData.
+ */
+class SkMesh {
+public:
+ class IndexBuffer : public SkRefCnt {
+ public:
+ virtual size_t size() const = 0;
+
+ /**
+ * Modifies the data in the IndexBuffer by copying size bytes from data into the buffer
+ * at offset. Fails if offset + size > this->size() or if either offset or size is not
+ * aligned to 4 bytes. The GrDirectContext* must match that used to create the buffer. We
+ * take it as a parameter to emphasize that the context must be used to update the data and
+ * thus the context must be valid for the current thread.
+ */
+ bool update(GrDirectContext*, const void* data, size_t offset, size_t size);
+
+ private:
+ virtual bool onUpdate(GrDirectContext*, const void* data, size_t offset, size_t size) = 0;
+ };
+
+ class VertexBuffer : public SkRefCnt {
+ public:
+ virtual size_t size() const = 0;
+
+ /**
+ * Modifies the data in the IndexBuffer by copying size bytes from data into the buffer
+ * at offset. Fails if offset + size > this->size() or if either offset or size is not
+ * aligned to 4 bytes. The GrDirectContext* must match that used to create the buffer. We
+ * take it as a parameter to emphasize that the context must be used to update the data and
+ * thus the context must be valid for the current thread.
+ */
+ bool update(GrDirectContext*, const void* data, size_t offset, size_t size);
+
+ private:
+ virtual bool onUpdate(GrDirectContext*, const void* data, size_t offset, size_t size) = 0;
+ };
+
+ SkMesh();
+ ~SkMesh();
+
+ SkMesh(const SkMesh&);
+ SkMesh(SkMesh&&);
+
+ SkMesh& operator=(const SkMesh&);
+ SkMesh& operator=(SkMesh&&);
+
+ /**
+ * Makes an index buffer to be used with SkMeshes. The buffer may be CPU- or GPU-backed
+ * depending on whether GrDirectContext* is nullptr.
+ *
+ * @param GrDirectContext* If nullptr a CPU-backed object is returned. Otherwise, the data is
+ * uploaded to the GPU and a GPU-backed buffer is returned. It may
+ * only be used to draw into SkSurfaces that are backed by the passed
+ * GrDirectContext.
+ * @param data The data used to populate the buffer, or nullptr to create a zero-
+ * initialized buffer.
+ * @param size Both the size of the data in 'data' and the size of the resulting
+ * buffer.
+ */
+ static sk_sp<IndexBuffer> MakeIndexBuffer(GrDirectContext*, const void* data, size_t size);
+
+ /**
+ * Makes a copy of an index buffer. The implementation currently only supports a CPU-backed
+ * source buffer.
+ */
+ static sk_sp<IndexBuffer> CopyIndexBuffer(GrDirectContext*, sk_sp<IndexBuffer>);
+
+ /**
+ * Makes a vertex buffer to be used with SkMeshes. The buffer may be CPU- or GPU-backed
+ * depending on whether GrDirectContext* is nullptr.
+ *
+ * @param GrDirectContext* If nullptr a CPU-backed object is returned. Otherwise, the data is
+ * uploaded to the GPU and a GPU-backed buffer is returned. It may
+ * only be used to draw into SkSurfaces that are backed by the passed
+ * GrDirectContext.
+ * @param data The data used to populate the buffer, or nullptr to create a zero-
+ * initialized buffer.
+ * @param size Both the size of the data in 'data' and the size of the resulting
+ * buffer.
+ */
+ static sk_sp<VertexBuffer> MakeVertexBuffer(GrDirectContext*, const void*, size_t size);
+
+ /**
+ * Makes a copy of a vertex buffer. The implementation currently only supports a CPU-backed
+ * source buffer.
+ */
+ static sk_sp<VertexBuffer> CopyVertexBuffer(GrDirectContext*, sk_sp<VertexBuffer>);
+
+ enum class Mode { kTriangles, kTriangleStrip };
+
+ struct Result;
+
+ /**
+ * Creates a non-indexed SkMesh. The returned SkMesh can be tested for validity using
+ * SkMesh::isValid(). An invalid mesh simply fails to draws if passed to SkCanvas::drawMesh().
+ * If the mesh is invalid the returned string give contain the reason for the failure (e.g. the
+ * vertex buffer was null or uniform data too small).
+ */
+ static Result Make(sk_sp<SkMeshSpecification>,
+ Mode,
+ sk_sp<VertexBuffer>,
+ size_t vertexCount,
+ size_t vertexOffset,
+ sk_sp<const SkData> uniforms,
+ const SkRect& bounds);
+
+ /**
+ * Creates an indexed SkMesh. The returned SkMesh can be tested for validity using
+ * SkMesh::isValid(). A invalid mesh simply fails to draw if passed to SkCanvas::drawMesh().
+ * If the mesh is invalid the returned string give contain the reason for the failure (e.g. the
+ * index buffer was null or uniform data too small).
+ */
+ static Result MakeIndexed(sk_sp<SkMeshSpecification>,
+ Mode,
+ sk_sp<VertexBuffer>,
+ size_t vertexCount,
+ size_t vertexOffset,
+ sk_sp<IndexBuffer>,
+ size_t indexCount,
+ size_t indexOffset,
+ sk_sp<const SkData> uniforms,
+ const SkRect& bounds);
+
+ sk_sp<SkMeshSpecification> refSpec() const { return fSpec; }
+ SkMeshSpecification* spec() const { return fSpec.get(); }
+
+ Mode mode() const { return fMode; }
+
+ sk_sp<VertexBuffer> refVertexBuffer() const { return fVB; }
+ VertexBuffer* vertexBuffer() const { return fVB.get(); }
+
+ size_t vertexOffset() const { return fVOffset; }
+ size_t vertexCount() const { return fVCount; }
+
+ sk_sp<IndexBuffer> refIndexBuffer() const { return fIB; }
+ IndexBuffer* indexBuffer() const { return fIB.get(); }
+
+ size_t indexOffset() const { return fIOffset; }
+ size_t indexCount() const { return fICount; }
+
+ sk_sp<const SkData> refUniforms() const { return fUniforms; }
+ const SkData* uniforms() const { return fUniforms.get(); }
+
+ SkRect bounds() const { return fBounds; }
+
+ bool isValid() const;
+
+private:
+ friend struct SkMeshPriv;
+
+ std::tuple<bool, SkString> validate() const;
+
+ sk_sp<SkMeshSpecification> fSpec;
+
+ sk_sp<VertexBuffer> fVB;
+ sk_sp<IndexBuffer> fIB;
+
+ sk_sp<const SkData> fUniforms;
+
+ size_t fVOffset = 0; // Must be a multiple of spec->stride()
+ size_t fVCount = 0;
+
+ size_t fIOffset = 0; // Must be a multiple of sizeof(uint16_t)
+ size_t fICount = 0;
+
+ Mode fMode = Mode::kTriangles;
+
+ SkRect fBounds = SkRect::MakeEmpty();
+};
+
+struct SkMesh::Result { SkMesh mesh; SkString error; };
+
+#endif // SK_ENABLE_SKSL
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkMilestone.h b/gfx/skia/skia/include/core/SkMilestone.h
new file mode 100644
index 0000000000..dd0eebe924
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkMilestone.h
@@ -0,0 +1,9 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SK_MILESTONE
+#define SK_MILESTONE 113
+#endif
diff --git a/gfx/skia/skia/include/core/SkOpenTypeSVGDecoder.h b/gfx/skia/skia/include/core/SkOpenTypeSVGDecoder.h
new file mode 100644
index 0000000000..5a2e48a9df
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkOpenTypeSVGDecoder.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2022 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOpenTypeSVGDecoder_DEFINED
+#define SkOpenTypeSVGDecoder_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+
+#include <memory>
+
+class SkCanvas;
+
+class SkOpenTypeSVGDecoder {
+public:
+ /** Each instance probably owns an SVG DOM.
+ * The instance may be cached so needs to report how much memory it retains.
+ */
+ virtual size_t approximateSize() = 0;
+ virtual bool render(SkCanvas&, int upem, SkGlyphID glyphId,
+ SkColor foregroundColor, SkSpan<SkColor> palette) = 0;
+ virtual ~SkOpenTypeSVGDecoder() = default;
+};
+
+#endif // SkOpenTypeSVGDecoder_DEFINED
diff --git a/gfx/skia/skia/include/core/SkOverdrawCanvas.h b/gfx/skia/skia/include/core/SkOverdrawCanvas.h
new file mode 100644
index 0000000000..f3ffc06556
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkOverdrawCanvas.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOverdrawCanvas_DEFINED
+#define SkOverdrawCanvas_DEFINED
+
+#include "include/core/SkCanvasVirtualEnforcer.h"
+#include "include/utils/SkNWayCanvas.h"
+
+/**
+ * Captures all drawing commands. Rather than draw the actual content, this device
+ * increments the alpha channel of each pixel every time it would have been touched
+ * by a draw call. This is useful for detecting overdraw.
+ */
+class SK_API SkOverdrawCanvas : public SkCanvasVirtualEnforcer<SkNWayCanvas> {
+public:
+ /* Does not take ownership of canvas */
+ SkOverdrawCanvas(SkCanvas*);
+
+ void onDrawTextBlob(const SkTextBlob*, SkScalar, SkScalar, const SkPaint&) override;
+ void onDrawGlyphRunList(
+ const sktext::GlyphRunList& glyphRunList, const SkPaint& paint) override;
+ void onDrawPatch(const SkPoint[12], const SkColor[4], const SkPoint[4], SkBlendMode,
+ const SkPaint&) override;
+ void onDrawPaint(const SkPaint&) override;
+ void onDrawBehind(const SkPaint& paint) override;
+ void onDrawRect(const SkRect&, const SkPaint&) override;
+ void onDrawRegion(const SkRegion&, const SkPaint&) override;
+ void onDrawOval(const SkRect&, const SkPaint&) override;
+ void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override;
+ void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override;
+ void onDrawRRect(const SkRRect&, const SkPaint&) override;
+ void onDrawPoints(PointMode, size_t, const SkPoint[], const SkPaint&) override;
+ void onDrawVerticesObject(const SkVertices*, SkBlendMode, const SkPaint&) override;
+ void onDrawPath(const SkPath&, const SkPaint&) override;
+
+ void onDrawImage2(const SkImage*, SkScalar, SkScalar, const SkSamplingOptions&,
+ const SkPaint*) override;
+ void onDrawImageRect2(const SkImage*, const SkRect&, const SkRect&, const SkSamplingOptions&,
+ const SkPaint*, SrcRectConstraint) override;
+ void onDrawImageLattice2(const SkImage*, const Lattice&, const SkRect&, SkFilterMode,
+ const SkPaint*) override;
+ void onDrawAtlas2(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[], int,
+ SkBlendMode, const SkSamplingOptions&, const SkRect*, const SkPaint*) override;
+
+ void onDrawDrawable(SkDrawable*, const SkMatrix*) override;
+ void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override;
+
+ void onDrawAnnotation(const SkRect&, const char key[], SkData* value) override;
+ void onDrawShadowRec(const SkPath&, const SkDrawShadowRec&) override;
+
+ void onDrawEdgeAAQuad(const SkRect&, const SkPoint[4], SkCanvas::QuadAAFlags, const SkColor4f&,
+ SkBlendMode) override;
+ void onDrawEdgeAAImageSet2(const ImageSetEntry[], int count, const SkPoint[], const SkMatrix[],
+ const SkSamplingOptions&,const SkPaint*, SrcRectConstraint) override;
+
+private:
+ inline SkPaint overdrawPaint(const SkPaint& paint);
+
+ SkPaint fPaint;
+
+ using INHERITED = SkCanvasVirtualEnforcer<SkNWayCanvas>;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPaint.h b/gfx/skia/skia/include/core/SkPaint.h
new file mode 100644
index 0000000000..157dbb59d8
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPaint.h
@@ -0,0 +1,695 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPaint_DEFINED
+#define SkPaint_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkCPUTypes.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkTo.h"
+#include "include/private/base/SkTypeTraits.h"
+
+#include <cstdint>
+#include <optional>
+#include <type_traits>
+
+class SkBlender;
+class SkColorFilter;
+class SkColorSpace;
+class SkImageFilter;
+class SkMaskFilter;
+class SkPathEffect;
+class SkShader;
+enum class SkBlendMode;
+struct SkRect;
+
+/** \class SkPaint
+ SkPaint controls options applied when drawing. SkPaint collects all
+ options outside of the SkCanvas clip and SkCanvas matrix.
+
+ Various options apply to strokes and fills, and images.
+
+ SkPaint collects effects and filters that describe single-pass and multiple-pass
+ algorithms that alter the drawing geometry, color, and transparency. For instance,
+ SkPaint does not directly implement dashing or blur, but contains the objects that do so.
+*/
+class SK_API SkPaint {
+public:
+
+ /** Constructs SkPaint with default values.
+
+ @return default initialized SkPaint
+
+ example: https://fiddle.skia.org/c/@Paint_empty_constructor
+ */
+ SkPaint();
+
+ /** Constructs SkPaint with default values and the given color.
+
+ Sets alpha and RGB used when stroking and filling. The color is four floating
+ point values, unpremultiplied. The color values are interpreted as being in
+ the colorSpace. If colorSpace is nullptr, then color is assumed to be in the
+ sRGB color space.
+
+ @param color unpremultiplied RGBA
+ @param colorSpace SkColorSpace describing the encoding of color
+ @return SkPaint with the given color
+ */
+ explicit SkPaint(const SkColor4f& color, SkColorSpace* colorSpace = nullptr);
+
+ /** Makes a shallow copy of SkPaint. SkPathEffect, SkShader,
+ SkMaskFilter, SkColorFilter, and SkImageFilter are shared
+ between the original paint and the copy. Objects containing SkRefCnt increment
+ their references by one.
+
+ The referenced objects SkPathEffect, SkShader, SkMaskFilter, SkColorFilter,
+ and SkImageFilter cannot be modified after they are created.
+ This prevents objects with SkRefCnt from being modified once SkPaint refers to them.
+
+ @param paint original to copy
+ @return shallow copy of paint
+
+ example: https://fiddle.skia.org/c/@Paint_copy_const_SkPaint
+ */
+ SkPaint(const SkPaint& paint);
+
+ /** Implements a move constructor to avoid increasing the reference counts
+ of objects referenced by the paint.
+
+ After the call, paint is undefined, and can be safely destructed.
+
+ @param paint original to move
+ @return content of paint
+
+ example: https://fiddle.skia.org/c/@Paint_move_SkPaint
+ */
+ SkPaint(SkPaint&& paint);
+
+ /** Decreases SkPaint SkRefCnt of owned objects: SkPathEffect, SkShader,
+ SkMaskFilter, SkColorFilter, and SkImageFilter. If the
+ objects containing SkRefCnt go to zero, they are deleted.
+ */
+ ~SkPaint();
+
+ /** Makes a shallow copy of SkPaint. SkPathEffect, SkShader,
+ SkMaskFilter, SkColorFilter, and SkImageFilter are shared
+ between the original paint and the copy. Objects containing SkRefCnt in the
+ prior destination are decreased by one, and the referenced objects are deleted if the
+ resulting count is zero. Objects containing SkRefCnt in the parameter paint
+ are increased by one. paint is unmodified.
+
+ @param paint original to copy
+ @return content of paint
+
+ example: https://fiddle.skia.org/c/@Paint_copy_operator
+ */
+ SkPaint& operator=(const SkPaint& paint);
+
+ /** Moves the paint to avoid increasing the reference counts
+ of objects referenced by the paint parameter. Objects containing SkRefCnt in the
+ prior destination are decreased by one; those objects are deleted if the resulting count
+ is zero.
+
+ After the call, paint is undefined, and can be safely destructed.
+
+ @param paint original to move
+ @return content of paint
+
+ example: https://fiddle.skia.org/c/@Paint_move_operator
+ */
+ SkPaint& operator=(SkPaint&& paint);
+
+ /** Compares a and b, and returns true if a and b are equivalent. May return false
+ if SkPathEffect, SkShader, SkMaskFilter, SkColorFilter,
+ or SkImageFilter have identical contents but different pointers.
+
+ @param a SkPaint to compare
+ @param b SkPaint to compare
+ @return true if SkPaint pair are equivalent
+ */
+ SK_API friend bool operator==(const SkPaint& a, const SkPaint& b);
+
+ /** Compares a and b, and returns true if a and b are not equivalent. May return true
+ if SkPathEffect, SkShader, SkMaskFilter, SkColorFilter,
+ or SkImageFilter have identical contents but different pointers.
+
+ @param a SkPaint to compare
+ @param b SkPaint to compare
+ @return true if SkPaint pair are not equivalent
+ */
+ friend bool operator!=(const SkPaint& a, const SkPaint& b) {
+ return !(a == b);
+ }
+
+ /** Sets all SkPaint contents to their initial values. This is equivalent to replacing
+ SkPaint with the result of SkPaint().
+
+ example: https://fiddle.skia.org/c/@Paint_reset
+ */
+ void reset();
+
+ /** Returns true if pixels on the active edges of SkPath may be drawn with partial transparency.
+ @return antialiasing state
+ */
+ bool isAntiAlias() const {
+ return SkToBool(fBitfields.fAntiAlias);
+ }
+
+ /** Requests, but does not require, that edge pixels draw opaque or with
+ partial transparency.
+ @param aa setting for antialiasing
+ */
+ void setAntiAlias(bool aa) { fBitfields.fAntiAlias = static_cast<unsigned>(aa); }
+
+ /** Returns true if color error may be distributed to smooth color transition.
+ @return dithering state
+ */
+ bool isDither() const {
+ return SkToBool(fBitfields.fDither);
+ }
+
+ /** Requests, but does not require, to distribute color error.
+ @param dither setting for ditering
+ */
+ void setDither(bool dither) { fBitfields.fDither = static_cast<unsigned>(dither); }
+
+ /** \enum SkPaint::Style
+ Set Style to fill, stroke, or both fill and stroke geometry.
+ The stroke and fill
+ share all paint attributes; for instance, they are drawn with the same color.
+
+ Use kStrokeAndFill_Style to avoid hitting the same pixels twice with a stroke draw and
+ a fill draw.
+ */
+ enum Style : uint8_t {
+ kFill_Style, //!< set to fill geometry
+ kStroke_Style, //!< set to stroke geometry
+ kStrokeAndFill_Style, //!< sets to stroke and fill geometry
+ };
+
+ /** May be used to verify that SkPaint::Style is a legal value.
+ */
+ static constexpr int kStyleCount = kStrokeAndFill_Style + 1;
+
+ /** Returns whether the geometry is filled, stroked, or filled and stroked.
+ */
+ Style getStyle() const { return (Style)fBitfields.fStyle; }
+
+ /** Sets whether the geometry is filled, stroked, or filled and stroked.
+ Has no effect if style is not a legal SkPaint::Style value.
+
+ example: https://fiddle.skia.org/c/@Paint_setStyle
+ example: https://fiddle.skia.org/c/@Stroke_Width
+ */
+ void setStyle(Style style);
+
+ /**
+ * Set paint's style to kStroke if true, or kFill if false.
+ */
+ void setStroke(bool);
+
+ /** Retrieves alpha and RGB, unpremultiplied, packed into 32 bits.
+ Use helpers SkColorGetA(), SkColorGetR(), SkColorGetG(), and SkColorGetB() to extract
+ a color component.
+
+ @return unpremultiplied ARGB
+ */
+ SkColor getColor() const { return fColor4f.toSkColor(); }
+
+ /** Retrieves alpha and RGB, unpremultiplied, as four floating point values. RGB are
+ extended sRGB values (sRGB gamut, and encoded with the sRGB transfer function).
+
+ @return unpremultiplied RGBA
+ */
+ SkColor4f getColor4f() const { return fColor4f; }
+
+ /** Sets alpha and RGB used when stroking and filling. The color is a 32-bit value,
+ unpremultiplied, packing 8-bit components for alpha, red, blue, and green.
+
+ @param color unpremultiplied ARGB
+
+ example: https://fiddle.skia.org/c/@Paint_setColor
+ */
+ void setColor(SkColor color);
+
+ /** Sets alpha and RGB used when stroking and filling. The color is four floating
+ point values, unpremultiplied. The color values are interpreted as being in
+ the colorSpace. If colorSpace is nullptr, then color is assumed to be in the
+ sRGB color space.
+
+ @param color unpremultiplied RGBA
+ @param colorSpace SkColorSpace describing the encoding of color
+ */
+ void setColor(const SkColor4f& color, SkColorSpace* colorSpace = nullptr);
+
+ void setColor4f(const SkColor4f& color, SkColorSpace* colorSpace = nullptr) {
+ this->setColor(color, colorSpace);
+ }
+
+ /** Retrieves alpha from the color used when stroking and filling.
+
+ @return alpha ranging from zero, fully transparent, to one, fully opaque
+ */
+ float getAlphaf() const { return fColor4f.fA; }
+
+ // Helper that scales the alpha by 255.
+ uint8_t getAlpha() const {
+ return static_cast<uint8_t>(sk_float_round2int(this->getAlphaf() * 255));
+ }
+
+ /** Replaces alpha, leaving RGB
+ unchanged. An out of range value triggers an assert in the debug
+ build. a is a value from 0.0 to 1.0.
+ a set to zero makes color fully transparent; a set to 1.0 makes color
+ fully opaque.
+
+ @param a alpha component of color
+ */
+ void setAlphaf(float a);
+
+ // Helper that accepts an int between 0 and 255, and divides it by 255.0
+ void setAlpha(U8CPU a) {
+ this->setAlphaf(a * (1.0f / 255));
+ }
+
+ /** Sets color used when drawing solid fills. The color components range from 0 to 255.
+ The color is unpremultiplied; alpha sets the transparency independent of RGB.
+
+ @param a amount of alpha, from fully transparent (0) to fully opaque (255)
+ @param r amount of red, from no red (0) to full red (255)
+ @param g amount of green, from no green (0) to full green (255)
+ @param b amount of blue, from no blue (0) to full blue (255)
+
+ example: https://fiddle.skia.org/c/@Paint_setARGB
+ */
+ void setARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b);
+
+ /** Returns the thickness of the pen used by SkPaint to
+ outline the shape.
+
+ @return zero for hairline, greater than zero for pen thickness
+ */
+ SkScalar getStrokeWidth() const { return fWidth; }
+
+ /** Sets the thickness of the pen used by the paint to outline the shape.
+ A stroke-width of zero is treated as "hairline" width. Hairlines are always exactly one
+ pixel wide in device space (their thickness does not change as the canvas is scaled).
+ Negative stroke-widths are invalid; setting a negative width will have no effect.
+
+ @param width zero thickness for hairline; greater than zero for pen thickness
+
+ example: https://fiddle.skia.org/c/@Miter_Limit
+ example: https://fiddle.skia.org/c/@Paint_setStrokeWidth
+ */
+ void setStrokeWidth(SkScalar width);
+
+ /** Returns the limit at which a sharp corner is drawn beveled.
+
+ @return zero and greater miter limit
+ */
+ SkScalar getStrokeMiter() const { return fMiterLimit; }
+
+ /** Sets the limit at which a sharp corner is drawn beveled.
+ Valid values are zero and greater.
+ Has no effect if miter is less than zero.
+
+ @param miter zero and greater miter limit
+
+ example: https://fiddle.skia.org/c/@Paint_setStrokeMiter
+ */
+ void setStrokeMiter(SkScalar miter);
+
+ /** \enum SkPaint::Cap
+ Cap draws at the beginning and end of an open path contour.
+ */
+ enum Cap {
+ kButt_Cap, //!< no stroke extension
+ kRound_Cap, //!< adds circle
+ kSquare_Cap, //!< adds square
+ kLast_Cap = kSquare_Cap, //!< largest Cap value
+ kDefault_Cap = kButt_Cap, //!< equivalent to kButt_Cap
+ };
+
+ /** May be used to verify that SkPaint::Cap is a legal value.
+ */
+ static constexpr int kCapCount = kLast_Cap + 1;
+
+ /** \enum SkPaint::Join
+ Join specifies how corners are drawn when a shape is stroked. Join
+ affects the four corners of a stroked rectangle, and the connected segments in a
+ stroked path.
+
+ Choose miter join to draw sharp corners. Choose round join to draw a circle with a
+ radius equal to the stroke width on top of the corner. Choose bevel join to minimally
+ connect the thick strokes.
+
+ The fill path constructed to describe the stroked path respects the join setting but may
+ not contain the actual join. For instance, a fill path constructed with round joins does
+ not necessarily include circles at each connected segment.
+ */
+ enum Join : uint8_t {
+ kMiter_Join, //!< extends to miter limit
+ kRound_Join, //!< adds circle
+ kBevel_Join, //!< connects outside edges
+ kLast_Join = kBevel_Join, //!< equivalent to the largest value for Join
+ kDefault_Join = kMiter_Join, //!< equivalent to kMiter_Join
+ };
+
+ /** May be used to verify that SkPaint::Join is a legal value.
+ */
+ static constexpr int kJoinCount = kLast_Join + 1;
+
+ /** Returns the geometry drawn at the beginning and end of strokes.
+ */
+ Cap getStrokeCap() const { return (Cap)fBitfields.fCapType; }
+
+ /** Sets the geometry drawn at the beginning and end of strokes.
+
+ example: https://fiddle.skia.org/c/@Paint_setStrokeCap_a
+ example: https://fiddle.skia.org/c/@Paint_setStrokeCap_b
+ */
+ void setStrokeCap(Cap cap);
+
+ /** Returns the geometry drawn at the corners of strokes.
+ */
+ Join getStrokeJoin() const { return (Join)fBitfields.fJoinType; }
+
+ /** Sets the geometry drawn at the corners of strokes.
+
+ example: https://fiddle.skia.org/c/@Paint_setStrokeJoin
+ */
+ void setStrokeJoin(Join join);
+
+ /** Returns optional colors used when filling a path, such as a gradient.
+
+ Does not alter SkShader SkRefCnt.
+
+ @return SkShader if previously set, nullptr otherwise
+ */
+ SkShader* getShader() const { return fShader.get(); }
+
+ /** Returns optional colors used when filling a path, such as a gradient.
+
+ Increases SkShader SkRefCnt by one.
+
+ @return SkShader if previously set, nullptr otherwise
+
+ example: https://fiddle.skia.org/c/@Paint_refShader
+ */
+ sk_sp<SkShader> refShader() const;
+
+ /** Sets optional colors used when filling a path, such as a gradient.
+
+ Sets SkShader to shader, decreasing SkRefCnt of the previous SkShader.
+ Increments shader SkRefCnt by one.
+
+ @param shader how geometry is filled with color; if nullptr, color is used instead
+
+ example: https://fiddle.skia.org/c/@Color_Filter_Methods
+ example: https://fiddle.skia.org/c/@Paint_setShader
+ */
+ void setShader(sk_sp<SkShader> shader);
+
+ /** Returns SkColorFilter if set, or nullptr.
+ Does not alter SkColorFilter SkRefCnt.
+
+ @return SkColorFilter if previously set, nullptr otherwise
+ */
+ SkColorFilter* getColorFilter() const { return fColorFilter.get(); }
+
+ /** Returns SkColorFilter if set, or nullptr.
+ Increases SkColorFilter SkRefCnt by one.
+
+ @return SkColorFilter if set, or nullptr
+
+ example: https://fiddle.skia.org/c/@Paint_refColorFilter
+ */
+ sk_sp<SkColorFilter> refColorFilter() const;
+
+ /** Sets SkColorFilter to filter, decreasing SkRefCnt of the previous
+ SkColorFilter. Pass nullptr to clear SkColorFilter.
+
+ Increments filter SkRefCnt by one.
+
+ @param colorFilter SkColorFilter to apply to subsequent draw
+
+ example: https://fiddle.skia.org/c/@Blend_Mode_Methods
+ example: https://fiddle.skia.org/c/@Paint_setColorFilter
+ */
+ void setColorFilter(sk_sp<SkColorFilter> colorFilter);
+
+ /** If the current blender can be represented as a SkBlendMode enum, this returns that
+ * enum in the optional's value(). If it cannot, then the returned optional does not
+ * contain a value.
+ */
+ std::optional<SkBlendMode> asBlendMode() const;
+
+ /**
+ * Queries the blender, and if it can be represented as a SkBlendMode, return that mode,
+ * else return the defaultMode provided.
+ */
+ SkBlendMode getBlendMode_or(SkBlendMode defaultMode) const;
+
+ /** Returns true iff the current blender claims to be equivalent to SkBlendMode::kSrcOver.
+ *
+ * Also returns true of the current blender is nullptr.
+ */
+ bool isSrcOver() const;
+
+ /** Helper method for calling setBlender().
+ *
+ * This sets a blender that implements the specified blendmode enum.
+ */
+ void setBlendMode(SkBlendMode mode);
+
+ /** Returns the user-supplied blend function, if one has been set.
+ * Does not alter SkBlender's SkRefCnt.
+ *
+ * A nullptr blender signifies the default SrcOver behavior.
+ *
+ * @return the SkBlender assigned to this paint, otherwise nullptr
+ */
+ SkBlender* getBlender() const { return fBlender.get(); }
+
+ /** Returns the user-supplied blend function, if one has been set.
+ * Increments the SkBlender's SkRefCnt by one.
+ *
+ * A nullptr blender signifies the default SrcOver behavior.
+ *
+ * @return the SkBlender assigned to this paint, otherwise nullptr
+ */
+ sk_sp<SkBlender> refBlender() const;
+
+ /** Sets the current blender, increasing its refcnt, and if a blender is already
+ * present, decreasing that object's refcnt.
+ *
+ * A nullptr blender signifies the default SrcOver behavior.
+ *
+ * For convenience, you can call setBlendMode() if the blend effect can be expressed
+ * as one of those values.
+ */
+ void setBlender(sk_sp<SkBlender> blender);
+
+ /** Returns SkPathEffect if set, or nullptr.
+ Does not alter SkPathEffect SkRefCnt.
+
+ @return SkPathEffect if previously set, nullptr otherwise
+ */
+ SkPathEffect* getPathEffect() const { return fPathEffect.get(); }
+
+ /** Returns SkPathEffect if set, or nullptr.
+ Increases SkPathEffect SkRefCnt by one.
+
+ @return SkPathEffect if previously set, nullptr otherwise
+
+ example: https://fiddle.skia.org/c/@Paint_refPathEffect
+ */
+ sk_sp<SkPathEffect> refPathEffect() const;
+
+ /** Sets SkPathEffect to pathEffect, decreasing SkRefCnt of the previous
+ SkPathEffect. Pass nullptr to leave the path geometry unaltered.
+
+ Increments pathEffect SkRefCnt by one.
+
+ @param pathEffect replace SkPath with a modification when drawn
+
+ example: https://fiddle.skia.org/c/@Mask_Filter_Methods
+ example: https://fiddle.skia.org/c/@Paint_setPathEffect
+ */
+ void setPathEffect(sk_sp<SkPathEffect> pathEffect);
+
+ /** Returns SkMaskFilter if set, or nullptr.
+ Does not alter SkMaskFilter SkRefCnt.
+
+ @return SkMaskFilter if previously set, nullptr otherwise
+ */
+ SkMaskFilter* getMaskFilter() const { return fMaskFilter.get(); }
+
+ /** Returns SkMaskFilter if set, or nullptr.
+
+ Increases SkMaskFilter SkRefCnt by one.
+
+ @return SkMaskFilter if previously set, nullptr otherwise
+
+ example: https://fiddle.skia.org/c/@Paint_refMaskFilter
+ */
+ sk_sp<SkMaskFilter> refMaskFilter() const;
+
+ /** Sets SkMaskFilter to maskFilter, decreasing SkRefCnt of the previous
+ SkMaskFilter. Pass nullptr to clear SkMaskFilter and leave SkMaskFilter effect on
+ mask alpha unaltered.
+
+ Increments maskFilter SkRefCnt by one.
+
+ @param maskFilter modifies clipping mask generated from drawn geometry
+
+ example: https://fiddle.skia.org/c/@Paint_setMaskFilter
+ example: https://fiddle.skia.org/c/@Typeface_Methods
+ */
+ void setMaskFilter(sk_sp<SkMaskFilter> maskFilter);
+
+ /** Returns SkImageFilter if set, or nullptr.
+ Does not alter SkImageFilter SkRefCnt.
+
+ @return SkImageFilter if previously set, nullptr otherwise
+ */
+ SkImageFilter* getImageFilter() const { return fImageFilter.get(); }
+
+ /** Returns SkImageFilter if set, or nullptr.
+ Increases SkImageFilter SkRefCnt by one.
+
+ @return SkImageFilter if previously set, nullptr otherwise
+
+ example: https://fiddle.skia.org/c/@Paint_refImageFilter
+ */
+ sk_sp<SkImageFilter> refImageFilter() const;
+
+ /** Sets SkImageFilter to imageFilter, decreasing SkRefCnt of the previous
+ SkImageFilter. Pass nullptr to clear SkImageFilter, and remove SkImageFilter effect
+ on drawing.
+
+ Increments imageFilter SkRefCnt by one.
+
+ @param imageFilter how SkImage is sampled when transformed
+
+ example: https://fiddle.skia.org/c/@Paint_setImageFilter
+ */
+ void setImageFilter(sk_sp<SkImageFilter> imageFilter);
+
+ /** Returns true if SkPaint prevents all drawing;
+ otherwise, the SkPaint may or may not allow drawing.
+
+ Returns true if, for example, SkBlendMode combined with alpha computes a
+ new alpha of zero.
+
+ @return true if SkPaint prevents all drawing
+
+ example: https://fiddle.skia.org/c/@Paint_nothingToDraw
+ */
+ bool nothingToDraw() const;
+
+ /** (to be made private)
+ Returns true if SkPaint does not include elements requiring extensive computation
+ to compute SkBaseDevice bounds of drawn geometry. For instance, SkPaint with SkPathEffect
+ always returns false.
+
+ @return true if SkPaint allows for fast computation of bounds
+ */
+ bool canComputeFastBounds() const;
+
+ /** (to be made private)
+ Only call this if canComputeFastBounds() returned true. This takes a
+ raw rectangle (the raw bounds of a shape), and adjusts it for stylistic
+ effects in the paint (e.g. stroking). If needed, it uses the storage
+ parameter. It returns the adjusted bounds that can then be used
+ for SkCanvas::quickReject tests.
+
+ The returned SkRect will either be orig or storage, thus the caller
+ should not rely on storage being set to the result, but should always
+ use the returned value. It is legal for orig and storage to be the same
+ SkRect.
+ For example:
+ if (!path.isInverseFillType() && paint.canComputeFastBounds()) {
+ SkRect storage;
+ if (canvas->quickReject(paint.computeFastBounds(path.getBounds(), &storage))) {
+ return; // do not draw the path
+ }
+ }
+ // draw the path
+
+ @param orig geometry modified by SkPaint when drawn
+ @param storage computed bounds of geometry; may not be nullptr
+ @return fast computed bounds
+ */
+ const SkRect& computeFastBounds(const SkRect& orig, SkRect* storage) const;
+
+ /** (to be made private)
+
+ @param orig geometry modified by SkPaint when drawn
+ @param storage computed bounds of geometry
+ @return fast computed bounds
+ */
+ const SkRect& computeFastStrokeBounds(const SkRect& orig,
+ SkRect* storage) const {
+ return this->doComputeFastBounds(orig, storage, kStroke_Style);
+ }
+
+ /** (to be made private)
+ Computes the bounds, overriding the SkPaint SkPaint::Style. This can be used to
+ account for additional width required by stroking orig, without
+ altering SkPaint::Style set to fill.
+
+ @param orig geometry modified by SkPaint when drawn
+ @param storage computed bounds of geometry
+ @param style overrides SkPaint::Style
+ @return fast computed bounds
+ */
+ const SkRect& doComputeFastBounds(const SkRect& orig, SkRect* storage,
+ Style style) const;
+
+ using sk_is_trivially_relocatable = std::true_type;
+
+private:
+ sk_sp<SkPathEffect> fPathEffect;
+ sk_sp<SkShader> fShader;
+ sk_sp<SkMaskFilter> fMaskFilter;
+ sk_sp<SkColorFilter> fColorFilter;
+ sk_sp<SkImageFilter> fImageFilter;
+ sk_sp<SkBlender> fBlender;
+
+ SkColor4f fColor4f;
+ SkScalar fWidth;
+ SkScalar fMiterLimit;
+ union {
+ struct {
+ unsigned fAntiAlias : 1;
+ unsigned fDither : 1;
+ unsigned fCapType : 2;
+ unsigned fJoinType : 2;
+ unsigned fStyle : 2;
+ unsigned fPadding : 24; // 24 == 32 -1-1-2-2-2
+ } fBitfields;
+ uint32_t fBitfieldsUInt;
+ };
+
+ static_assert(::sk_is_trivially_relocatable<decltype(fPathEffect)>::value);
+ static_assert(::sk_is_trivially_relocatable<decltype(fShader)>::value);
+ static_assert(::sk_is_trivially_relocatable<decltype(fMaskFilter)>::value);
+ static_assert(::sk_is_trivially_relocatable<decltype(fColorFilter)>::value);
+ static_assert(::sk_is_trivially_relocatable<decltype(fImageFilter)>::value);
+ static_assert(::sk_is_trivially_relocatable<decltype(fBlender)>::value);
+ static_assert(::sk_is_trivially_relocatable<decltype(fColor4f)>::value);
+ static_assert(::sk_is_trivially_relocatable<decltype(fBitfields)>::value);
+
+ friend class SkPaintPriv;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPath.h b/gfx/skia/skia/include/core/SkPath.h
new file mode 100644
index 0000000000..8858e7d3c8
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPath.h
@@ -0,0 +1,1890 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPath_DEFINED
+#define SkPath_DEFINED
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPathTypes.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkTo.h"
+#include "include/private/base/SkTypeTraits.h"
+
+#include <atomic>
+#include <cstddef>
+#include <cstdint>
+#include <initializer_list>
+#include <tuple>
+#include <type_traits>
+
+class SkData;
+class SkPathRef;
+class SkRRect;
+class SkWStream;
+enum class SkPathConvexity;
+enum class SkPathFirstDirection;
+
+// WIP -- define this locally, and fix call-sites to use SkPathBuilder (skbug.com/9000)
+//#define SK_HIDE_PATH_EDIT_METHODS
+
+/** \class SkPath
+ SkPath contain geometry. SkPath may be empty, or contain one or more verbs that
+ outline a figure. SkPath always starts with a move verb to a Cartesian coordinate,
+ and may be followed by additional verbs that add lines or curves.
+ Adding a close verb makes the geometry into a continuous loop, a closed contour.
+ SkPath may contain any number of contours, each beginning with a move verb.
+
+ SkPath contours may contain only a move verb, or may also contain lines,
+ quadratic beziers, conics, and cubic beziers. SkPath contours may be open or
+ closed.
+
+ When used to draw a filled area, SkPath describes whether the fill is inside or
+ outside the geometry. SkPath also describes the winding rule used to fill
+ overlapping contours.
+
+ Internally, SkPath lazily computes metrics likes bounds and convexity. Call
+ SkPath::updateBoundsCache to make SkPath thread safe.
+*/
+class SK_API SkPath {
+public:
+ /**
+ * Create a new path with the specified segments.
+ *
+ * The points and weights arrays are read in order, based on the sequence of verbs.
+ *
+ * Move 1 point
+ * Line 1 point
+ * Quad 2 points
+ * Conic 2 points and 1 weight
+ * Cubic 3 points
+ * Close 0 points
+ *
+ * If an illegal sequence of verbs is encountered, or the specified number of points
+ * or weights is not sufficient given the verbs, an empty Path is returned.
+ *
+ * A legal sequence of verbs consists of any number of Contours. A contour always begins
+ * with a Move verb, followed by 0 or more segments: Line, Quad, Conic, Cubic, followed
+ * by an optional Close.
+ */
+ static SkPath Make(const SkPoint[], int pointCount,
+ const uint8_t[], int verbCount,
+ const SkScalar[], int conicWeightCount,
+ SkPathFillType, bool isVolatile = false);
+
+ static SkPath Rect(const SkRect&, SkPathDirection = SkPathDirection::kCW,
+ unsigned startIndex = 0);
+ static SkPath Oval(const SkRect&, SkPathDirection = SkPathDirection::kCW);
+ static SkPath Oval(const SkRect&, SkPathDirection, unsigned startIndex);
+ static SkPath Circle(SkScalar center_x, SkScalar center_y, SkScalar radius,
+ SkPathDirection dir = SkPathDirection::kCW);
+ static SkPath RRect(const SkRRect&, SkPathDirection dir = SkPathDirection::kCW);
+ static SkPath RRect(const SkRRect&, SkPathDirection, unsigned startIndex);
+ static SkPath RRect(const SkRect& bounds, SkScalar rx, SkScalar ry,
+ SkPathDirection dir = SkPathDirection::kCW);
+
+ static SkPath Polygon(const SkPoint pts[], int count, bool isClosed,
+ SkPathFillType = SkPathFillType::kWinding,
+ bool isVolatile = false);
+
+ static SkPath Polygon(const std::initializer_list<SkPoint>& list, bool isClosed,
+ SkPathFillType fillType = SkPathFillType::kWinding,
+ bool isVolatile = false) {
+ return Polygon(list.begin(), SkToInt(list.size()), isClosed, fillType, isVolatile);
+ }
+
+ static SkPath Line(const SkPoint a, const SkPoint b) {
+ return Polygon({a, b}, false);
+ }
+
+ /** Constructs an empty SkPath. By default, SkPath has no verbs, no SkPoint, and no weights.
+ FillType is set to kWinding.
+
+ @return empty SkPath
+
+ example: https://fiddle.skia.org/c/@Path_empty_constructor
+ */
+ SkPath();
+
+ /** Constructs a copy of an existing path.
+ Copy constructor makes two paths identical by value. Internally, path and
+ the returned result share pointer values. The underlying verb array, SkPoint array
+ and weights are copied when modified.
+
+ Creating a SkPath copy is very efficient and never allocates memory.
+ SkPath are always copied by value from the interface; the underlying shared
+ pointers are not exposed.
+
+ @param path SkPath to copy by value
+ @return copy of SkPath
+
+ example: https://fiddle.skia.org/c/@Path_copy_const_SkPath
+ */
+ SkPath(const SkPath& path);
+
+ /** Releases ownership of any shared data and deletes data if SkPath is sole owner.
+
+ example: https://fiddle.skia.org/c/@Path_destructor
+ */
+ ~SkPath();
+
+ /** Constructs a copy of an existing path.
+ SkPath assignment makes two paths identical by value. Internally, assignment
+ shares pointer values. The underlying verb array, SkPoint array and weights
+ are copied when modified.
+
+ Copying SkPath by assignment is very efficient and never allocates memory.
+ SkPath are always copied by value from the interface; the underlying shared
+ pointers are not exposed.
+
+ @param path verb array, SkPoint array, weights, and SkPath::FillType to copy
+ @return SkPath copied by value
+
+ example: https://fiddle.skia.org/c/@Path_copy_operator
+ */
+ SkPath& operator=(const SkPath& path);
+
+ /** Compares a and b; returns true if SkPath::FillType, verb array, SkPoint array, and weights
+ are equivalent.
+
+ @param a SkPath to compare
+ @param b SkPath to compare
+ @return true if SkPath pair are equivalent
+ */
+ friend SK_API bool operator==(const SkPath& a, const SkPath& b);
+
+ /** Compares a and b; returns true if SkPath::FillType, verb array, SkPoint array, and weights
+ are not equivalent.
+
+ @param a SkPath to compare
+ @param b SkPath to compare
+ @return true if SkPath pair are not equivalent
+ */
+ friend bool operator!=(const SkPath& a, const SkPath& b) {
+ return !(a == b);
+ }
+
+ /** Returns true if SkPath contain equal verbs and equal weights.
+ If SkPath contain one or more conics, the weights must match.
+
+ conicTo() may add different verbs depending on conic weight, so it is not
+ trivial to interpolate a pair of SkPath containing conics with different
+ conic weight values.
+
+ @param compare SkPath to compare
+ @return true if SkPath verb array and weights are equivalent
+
+ example: https://fiddle.skia.org/c/@Path_isInterpolatable
+ */
+ bool isInterpolatable(const SkPath& compare) const;
+
+ /** Interpolates between SkPath with SkPoint array of equal size.
+ Copy verb array and weights to out, and set out SkPoint array to a weighted
+ average of this SkPoint array and ending SkPoint array, using the formula:
+ (Path Point * weight) + ending Point * (1 - weight).
+
+ weight is most useful when between zero (ending SkPoint array) and
+ one (this Point_Array); will work with values outside of this
+ range.
+
+ interpolate() returns false and leaves out unchanged if SkPoint array is not
+ the same size as ending SkPoint array. Call isInterpolatable() to check SkPath
+ compatibility prior to calling interpolate().
+
+ @param ending SkPoint array averaged with this SkPoint array
+ @param weight contribution of this SkPoint array, and
+ one minus contribution of ending SkPoint array
+ @param out SkPath replaced by interpolated averages
+ @return true if SkPath contain same number of SkPoint
+
+ example: https://fiddle.skia.org/c/@Path_interpolate
+ */
+ bool interpolate(const SkPath& ending, SkScalar weight, SkPath* out) const;
+
+ /** Returns SkPathFillType, the rule used to fill SkPath.
+
+ @return current SkPathFillType setting
+ */
+ SkPathFillType getFillType() const { return (SkPathFillType)fFillType; }
+
+ /** Sets FillType, the rule used to fill SkPath. While there is no check
+ that ft is legal, values outside of FillType are not supported.
+ */
+ void setFillType(SkPathFillType ft) {
+ fFillType = SkToU8(ft);
+ }
+
+ /** Returns if FillType describes area outside SkPath geometry. The inverse fill area
+ extends indefinitely.
+
+ @return true if FillType is kInverseWinding or kInverseEvenOdd
+ */
+ bool isInverseFillType() const { return SkPathFillType_IsInverse(this->getFillType()); }
+
+ /** Replaces FillType with its inverse. The inverse of FillType describes the area
+ unmodified by the original FillType.
+ */
+ void toggleInverseFillType() {
+ fFillType ^= 2;
+ }
+
+ /** Returns true if the path is convex. If necessary, it will first compute the convexity.
+ */
+ bool isConvex() const;
+
+ /** Returns true if this path is recognized as an oval or circle.
+
+ bounds receives bounds of oval.
+
+ bounds is unmodified if oval is not found.
+
+ @param bounds storage for bounding SkRect of oval; may be nullptr
+ @return true if SkPath is recognized as an oval or circle
+
+ example: https://fiddle.skia.org/c/@Path_isOval
+ */
+ bool isOval(SkRect* bounds) const;
+
+ /** Returns true if path is representable as SkRRect.
+ Returns false if path is representable as oval, circle, or SkRect.
+
+ rrect receives bounds of SkRRect.
+
+ rrect is unmodified if SkRRect is not found.
+
+ @param rrect storage for bounding SkRect of SkRRect; may be nullptr
+ @return true if SkPath contains only SkRRect
+
+ example: https://fiddle.skia.org/c/@Path_isRRect
+ */
+ bool isRRect(SkRRect* rrect) const;
+
+ /** Sets SkPath to its initial state.
+ Removes verb array, SkPoint array, and weights, and sets FillType to kWinding.
+ Internal storage associated with SkPath is released.
+
+ @return reference to SkPath
+
+ example: https://fiddle.skia.org/c/@Path_reset
+ */
+ SkPath& reset();
+
+ /** Sets SkPath to its initial state, preserving internal storage.
+ Removes verb array, SkPoint array, and weights, and sets FillType to kWinding.
+ Internal storage associated with SkPath is retained.
+
+ Use rewind() instead of reset() if SkPath storage will be reused and performance
+ is critical.
+
+ @return reference to SkPath
+
+ example: https://fiddle.skia.org/c/@Path_rewind
+ */
+ SkPath& rewind();
+
+ /** Returns if SkPath is empty.
+ Empty SkPath may have FillType but has no SkPoint, SkPath::Verb, or conic weight.
+ SkPath() constructs empty SkPath; reset() and rewind() make SkPath empty.
+
+ @return true if the path contains no SkPath::Verb array
+ */
+ bool isEmpty() const;
+
+ /** Returns if contour is closed.
+ Contour is closed if SkPath SkPath::Verb array was last modified by close(). When stroked,
+ closed contour draws SkPaint::Join instead of SkPaint::Cap at first and last SkPoint.
+
+ @return true if the last contour ends with a kClose_Verb
+
+ example: https://fiddle.skia.org/c/@Path_isLastContourClosed
+ */
+ bool isLastContourClosed() const;
+
+ /** Returns true for finite SkPoint array values between negative SK_ScalarMax and
+ positive SK_ScalarMax. Returns false for any SkPoint array value of
+ SK_ScalarInfinity, SK_ScalarNegativeInfinity, or SK_ScalarNaN.
+
+ @return true if all SkPoint values are finite
+ */
+ bool isFinite() const;
+
+ /** Returns true if the path is volatile; it will not be altered or discarded
+ by the caller after it is drawn. SkPath by default have volatile set false, allowing
+ SkSurface to attach a cache of data which speeds repeated drawing. If true, SkSurface
+ may not speed repeated drawing.
+
+ @return true if caller will alter SkPath after drawing
+ */
+ bool isVolatile() const {
+ return SkToBool(fIsVolatile);
+ }
+
+ /** Specifies whether SkPath is volatile; whether it will be altered or discarded
+ by the caller after it is drawn. SkPath by default have volatile set false, allowing
+ SkBaseDevice to attach a cache of data which speeds repeated drawing.
+
+ Mark temporary paths, discarded or modified after use, as volatile
+ to inform SkBaseDevice that the path need not be cached.
+
+ Mark animating SkPath volatile to improve performance.
+ Mark unchanging SkPath non-volatile to improve repeated rendering.
+
+ raster surface SkPath draws are affected by volatile for some shadows.
+ GPU surface SkPath draws are affected by volatile for some shadows and concave geometries.
+
+ @param isVolatile true if caller will alter SkPath after drawing
+ @return reference to SkPath
+ */
+ SkPath& setIsVolatile(bool isVolatile) {
+ fIsVolatile = isVolatile;
+ return *this;
+ }
+
+ /** Tests if line between SkPoint pair is degenerate.
+ Line with no length or that moves a very short distance is degenerate; it is
+ treated as a point.
+
+ exact changes the equality test. If true, returns true only if p1 equals p2.
+ If false, returns true if p1 equals or nearly equals p2.
+
+ @param p1 line start point
+ @param p2 line end point
+ @param exact if false, allow nearly equals
+ @return true if line is degenerate; its length is effectively zero
+
+ example: https://fiddle.skia.org/c/@Path_IsLineDegenerate
+ */
+ static bool IsLineDegenerate(const SkPoint& p1, const SkPoint& p2, bool exact);
+
+ /** Tests if quad is degenerate.
+ Quad with no length or that moves a very short distance is degenerate; it is
+ treated as a point.
+
+ @param p1 quad start point
+ @param p2 quad control point
+ @param p3 quad end point
+ @param exact if true, returns true only if p1, p2, and p3 are equal;
+ if false, returns true if p1, p2, and p3 are equal or nearly equal
+ @return true if quad is degenerate; its length is effectively zero
+ */
+ static bool IsQuadDegenerate(const SkPoint& p1, const SkPoint& p2,
+ const SkPoint& p3, bool exact);
+
+ /** Tests if cubic is degenerate.
+ Cubic with no length or that moves a very short distance is degenerate; it is
+ treated as a point.
+
+ @param p1 cubic start point
+ @param p2 cubic control point 1
+ @param p3 cubic control point 2
+ @param p4 cubic end point
+ @param exact if true, returns true only if p1, p2, p3, and p4 are equal;
+ if false, returns true if p1, p2, p3, and p4 are equal or nearly equal
+ @return true if cubic is degenerate; its length is effectively zero
+ */
+ static bool IsCubicDegenerate(const SkPoint& p1, const SkPoint& p2,
+ const SkPoint& p3, const SkPoint& p4, bool exact);
+
+ /** Returns true if SkPath contains only one line;
+ SkPath::Verb array has two entries: kMove_Verb, kLine_Verb.
+ If SkPath contains one line and line is not nullptr, line is set to
+ line start point and line end point.
+ Returns false if SkPath is not one line; line is unaltered.
+
+ @param line storage for line. May be nullptr
+ @return true if SkPath contains exactly one line
+
+ example: https://fiddle.skia.org/c/@Path_isLine
+ */
+ bool isLine(SkPoint line[2]) const;
+
+ /** Returns the number of points in SkPath.
+ SkPoint count is initially zero.
+
+ @return SkPath SkPoint array length
+
+ example: https://fiddle.skia.org/c/@Path_countPoints
+ */
+ int countPoints() const;
+
+ /** Returns SkPoint at index in SkPoint array. Valid range for index is
+ 0 to countPoints() - 1.
+ Returns (0, 0) if index is out of range.
+
+ @param index SkPoint array element selector
+ @return SkPoint array value or (0, 0)
+
+ example: https://fiddle.skia.org/c/@Path_getPoint
+ */
+ SkPoint getPoint(int index) const;
+
+ /** Returns number of points in SkPath. Up to max points are copied.
+ points may be nullptr; then, max must be zero.
+ If max is greater than number of points, excess points storage is unaltered.
+
+ @param points storage for SkPath SkPoint array. May be nullptr
+ @param max maximum to copy; must be greater than or equal to zero
+ @return SkPath SkPoint array length
+
+ example: https://fiddle.skia.org/c/@Path_getPoints
+ */
+ int getPoints(SkPoint points[], int max) const;
+
+ /** Returns the number of verbs: kMove_Verb, kLine_Verb, kQuad_Verb, kConic_Verb,
+ kCubic_Verb, and kClose_Verb; added to SkPath.
+
+ @return length of verb array
+
+ example: https://fiddle.skia.org/c/@Path_countVerbs
+ */
+ int countVerbs() const;
+
+ /** Returns the number of verbs in the path. Up to max verbs are copied. The
+ verbs are copied as one byte per verb.
+
+ @param verbs storage for verbs, may be nullptr
+ @param max maximum number to copy into verbs
+ @return the actual number of verbs in the path
+
+ example: https://fiddle.skia.org/c/@Path_getVerbs
+ */
+ int getVerbs(uint8_t verbs[], int max) const;
+
+ /** Returns the approximate byte size of the SkPath in memory.
+
+ @return approximate size
+ */
+ size_t approximateBytesUsed() const;
+
+ /** Exchanges the verb array, SkPoint array, weights, and SkPath::FillType with other.
+ Cached state is also exchanged. swap() internally exchanges pointers, so
+ it is lightweight and does not allocate memory.
+
+ swap() usage has largely been replaced by operator=(const SkPath& path).
+ SkPath do not copy their content on assignment until they are written to,
+ making assignment as efficient as swap().
+
+ @param other SkPath exchanged by value
+
+ example: https://fiddle.skia.org/c/@Path_swap
+ */
+ void swap(SkPath& other);
+
+ /** Returns minimum and maximum axes values of SkPoint array.
+ Returns (0, 0, 0, 0) if SkPath contains no points. Returned bounds width and height may
+ be larger or smaller than area affected when SkPath is drawn.
+
+ SkRect returned includes all SkPoint added to SkPath, including SkPoint associated with
+ kMove_Verb that define empty contours.
+
+ @return bounds of all SkPoint in SkPoint array
+ */
+ const SkRect& getBounds() const;
+
+ /** Updates internal bounds so that subsequent calls to getBounds() are instantaneous.
+ Unaltered copies of SkPath may also access cached bounds through getBounds().
+
+ For now, identical to calling getBounds() and ignoring the returned value.
+
+ Call to prepare SkPath subsequently drawn from multiple threads,
+ to avoid a race condition where each draw separately computes the bounds.
+ */
+ void updateBoundsCache() const {
+ // for now, just calling getBounds() is sufficient
+ this->getBounds();
+ }
+
+ /** Returns minimum and maximum axes values of the lines and curves in SkPath.
+ Returns (0, 0, 0, 0) if SkPath contains no points.
+ Returned bounds width and height may be larger or smaller than area affected
+ when SkPath is drawn.
+
+ Includes SkPoint associated with kMove_Verb that define empty
+ contours.
+
+ Behaves identically to getBounds() when SkPath contains
+ only lines. If SkPath contains curves, computed bounds includes
+ the maximum extent of the quad, conic, or cubic; is slower than getBounds();
+ and unlike getBounds(), does not cache the result.
+
+ @return tight bounds of curves in SkPath
+
+ example: https://fiddle.skia.org/c/@Path_computeTightBounds
+ */
+ SkRect computeTightBounds() const;
+
+ /** Returns true if rect is contained by SkPath.
+ May return false when rect is contained by SkPath.
+
+ For now, only returns true if SkPath has one contour and is convex.
+ rect may share points and edges with SkPath and be contained.
+ Returns true if rect is empty, that is, it has zero width or height; and
+ the SkPoint or line described by rect is contained by SkPath.
+
+ @param rect SkRect, line, or SkPoint checked for containment
+ @return true if rect is contained
+
+ example: https://fiddle.skia.org/c/@Path_conservativelyContainsRect
+ */
+ bool conservativelyContainsRect(const SkRect& rect) const;
+
+ /** Grows SkPath verb array and SkPoint array to contain extraPtCount additional SkPoint.
+ May improve performance and use less memory by
+ reducing the number and size of allocations when creating SkPath.
+
+ @param extraPtCount number of additional SkPoint to allocate
+
+ example: https://fiddle.skia.org/c/@Path_incReserve
+ */
+ void incReserve(int extraPtCount);
+
+#ifdef SK_HIDE_PATH_EDIT_METHODS
+private:
+#endif
+
+ /** Adds beginning of contour at SkPoint (x, y).
+
+ @param x x-axis value of contour start
+ @param y y-axis value of contour start
+ @return reference to SkPath
+
+ example: https://fiddle.skia.org/c/@Path_moveTo
+ */
+ SkPath& moveTo(SkScalar x, SkScalar y);
+
+ /** Adds beginning of contour at SkPoint p.
+
+ @param p contour start
+ @return reference to SkPath
+ */
+ SkPath& moveTo(const SkPoint& p) {
+ return this->moveTo(p.fX, p.fY);
+ }
+
+ /** Adds beginning of contour relative to last point.
+ If SkPath is empty, starts contour at (dx, dy).
+ Otherwise, start contour at last point offset by (dx, dy).
+ Function name stands for "relative move to".
+
+ @param dx offset from last point to contour start on x-axis
+ @param dy offset from last point to contour start on y-axis
+ @return reference to SkPath
+
+ example: https://fiddle.skia.org/c/@Path_rMoveTo
+ */
+ SkPath& rMoveTo(SkScalar dx, SkScalar dy);
+
+ /** Adds line from last point to (x, y). If SkPath is empty, or last SkPath::Verb is
+ kClose_Verb, last point is set to (0, 0) before adding line.
+
+ lineTo() appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed.
+ lineTo() then appends kLine_Verb to verb array and (x, y) to SkPoint array.
+
+ @param x end of added line on x-axis
+ @param y end of added line on y-axis
+ @return reference to SkPath
+
+ example: https://fiddle.skia.org/c/@Path_lineTo
+ */
+ SkPath& lineTo(SkScalar x, SkScalar y);
+
+ /** Adds line from last point to SkPoint p. If SkPath is empty, or last SkPath::Verb is
+ kClose_Verb, last point is set to (0, 0) before adding line.
+
+ lineTo() first appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed.
+ lineTo() then appends kLine_Verb to verb array and SkPoint p to SkPoint array.
+
+ @param p end SkPoint of added line
+ @return reference to SkPath
+ */
+ SkPath& lineTo(const SkPoint& p) {
+ return this->lineTo(p.fX, p.fY);
+ }
+
+ /** Adds line from last point to vector (dx, dy). If SkPath is empty, or last SkPath::Verb is
+ kClose_Verb, last point is set to (0, 0) before adding line.
+
+ Appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed;
+ then appends kLine_Verb to verb array and line end to SkPoint array.
+ Line end is last point plus vector (dx, dy).
+ Function name stands for "relative line to".
+
+ @param dx offset from last point to line end on x-axis
+ @param dy offset from last point to line end on y-axis
+ @return reference to SkPath
+
+ example: https://fiddle.skia.org/c/@Path_rLineTo
+ example: https://fiddle.skia.org/c/@Quad_a
+ example: https://fiddle.skia.org/c/@Quad_b
+ */
+ SkPath& rLineTo(SkScalar dx, SkScalar dy);
+
+ /** Adds quad from last point towards (x1, y1), to (x2, y2).
+ If SkPath is empty, or last SkPath::Verb is kClose_Verb, last point is set to (0, 0)
+ before adding quad.
+
+ Appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed;
+ then appends kQuad_Verb to verb array; and (x1, y1), (x2, y2)
+ to SkPoint array.
+
+ @param x1 control SkPoint of quad on x-axis
+ @param y1 control SkPoint of quad on y-axis
+ @param x2 end SkPoint of quad on x-axis
+ @param y2 end SkPoint of quad on y-axis
+ @return reference to SkPath
+
+ example: https://fiddle.skia.org/c/@Path_quadTo
+ */
+ SkPath& quadTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2);
+
+ /** Adds quad from last point towards SkPoint p1, to SkPoint p2.
+ If SkPath is empty, or last SkPath::Verb is kClose_Verb, last point is set to (0, 0)
+ before adding quad.
+
+ Appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed;
+ then appends kQuad_Verb to verb array; and SkPoint p1, p2
+ to SkPoint array.
+
+ @param p1 control SkPoint of added quad
+ @param p2 end SkPoint of added quad
+ @return reference to SkPath
+ */
+ SkPath& quadTo(const SkPoint& p1, const SkPoint& p2) {
+ return this->quadTo(p1.fX, p1.fY, p2.fX, p2.fY);
+ }
+
+ /** Adds quad from last point towards vector (dx1, dy1), to vector (dx2, dy2).
+ If SkPath is empty, or last SkPath::Verb
+ is kClose_Verb, last point is set to (0, 0) before adding quad.
+
+ Appends kMove_Verb to verb array and (0, 0) to SkPoint array,
+ if needed; then appends kQuad_Verb to verb array; and appends quad
+ control and quad end to SkPoint array.
+ Quad control is last point plus vector (dx1, dy1).
+ Quad end is last point plus vector (dx2, dy2).
+ Function name stands for "relative quad to".
+
+ @param dx1 offset from last point to quad control on x-axis
+ @param dy1 offset from last point to quad control on y-axis
+ @param dx2 offset from last point to quad end on x-axis
+ @param dy2 offset from last point to quad end on y-axis
+ @return reference to SkPath
+
+ example: https://fiddle.skia.org/c/@Conic_Weight_a
+ example: https://fiddle.skia.org/c/@Conic_Weight_b
+ example: https://fiddle.skia.org/c/@Conic_Weight_c
+ example: https://fiddle.skia.org/c/@Path_rQuadTo
+ */
+ SkPath& rQuadTo(SkScalar dx1, SkScalar dy1, SkScalar dx2, SkScalar dy2);
+
+ /** Adds conic from last point towards (x1, y1), to (x2, y2), weighted by w.
+ If SkPath is empty, or last SkPath::Verb is kClose_Verb, last point is set to (0, 0)
+ before adding conic.
+
+ Appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed.
+
+ If w is finite and not one, appends kConic_Verb to verb array;
+ and (x1, y1), (x2, y2) to SkPoint array; and w to conic weights.
+
+ If w is one, appends kQuad_Verb to verb array, and
+ (x1, y1), (x2, y2) to SkPoint array.
+
+ If w is not finite, appends kLine_Verb twice to verb array, and
+ (x1, y1), (x2, y2) to SkPoint array.
+
+ @param x1 control SkPoint of conic on x-axis
+ @param y1 control SkPoint of conic on y-axis
+ @param x2 end SkPoint of conic on x-axis
+ @param y2 end SkPoint of conic on y-axis
+ @param w weight of added conic
+ @return reference to SkPath
+ */
+ SkPath& conicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2,
+ SkScalar w);
+
+ /** Adds conic from last point towards SkPoint p1, to SkPoint p2, weighted by w.
+ If SkPath is empty, or last SkPath::Verb is kClose_Verb, last point is set to (0, 0)
+ before adding conic.
+
+ Appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed.
+
+ If w is finite and not one, appends kConic_Verb to verb array;
+ and SkPoint p1, p2 to SkPoint array; and w to conic weights.
+
+ If w is one, appends kQuad_Verb to verb array, and SkPoint p1, p2
+ to SkPoint array.
+
+ If w is not finite, appends kLine_Verb twice to verb array, and
+ SkPoint p1, p2 to SkPoint array.
+
+ @param p1 control SkPoint of added conic
+ @param p2 end SkPoint of added conic
+ @param w weight of added conic
+ @return reference to SkPath
+ */
+ SkPath& conicTo(const SkPoint& p1, const SkPoint& p2, SkScalar w) {
+ return this->conicTo(p1.fX, p1.fY, p2.fX, p2.fY, w);
+ }
+
+ /** Adds conic from last point towards vector (dx1, dy1), to vector (dx2, dy2),
+ weighted by w. If SkPath is empty, or last SkPath::Verb
+ is kClose_Verb, last point is set to (0, 0) before adding conic.
+
+ Appends kMove_Verb to verb array and (0, 0) to SkPoint array,
+ if needed.
+
+ If w is finite and not one, next appends kConic_Verb to verb array,
+ and w is recorded as conic weight; otherwise, if w is one, appends
+ kQuad_Verb to verb array; or if w is not finite, appends kLine_Verb
+ twice to verb array.
+
+ In all cases appends SkPoint control and end to SkPoint array.
+ control is last point plus vector (dx1, dy1).
+ end is last point plus vector (dx2, dy2).
+
+ Function name stands for "relative conic to".
+
+ @param dx1 offset from last point to conic control on x-axis
+ @param dy1 offset from last point to conic control on y-axis
+ @param dx2 offset from last point to conic end on x-axis
+ @param dy2 offset from last point to conic end on y-axis
+ @param w weight of added conic
+ @return reference to SkPath
+ */
+ SkPath& rConicTo(SkScalar dx1, SkScalar dy1, SkScalar dx2, SkScalar dy2,
+ SkScalar w);
+
+ /** Adds cubic from last point towards (x1, y1), then towards (x2, y2), ending at
+ (x3, y3). If SkPath is empty, or last SkPath::Verb is kClose_Verb, last point is set to
+ (0, 0) before adding cubic.
+
+ Appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed;
+ then appends kCubic_Verb to verb array; and (x1, y1), (x2, y2), (x3, y3)
+ to SkPoint array.
+
+ @param x1 first control SkPoint of cubic on x-axis
+ @param y1 first control SkPoint of cubic on y-axis
+ @param x2 second control SkPoint of cubic on x-axis
+ @param y2 second control SkPoint of cubic on y-axis
+ @param x3 end SkPoint of cubic on x-axis
+ @param y3 end SkPoint of cubic on y-axis
+ @return reference to SkPath
+ */
+ SkPath& cubicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2,
+ SkScalar x3, SkScalar y3);
+
+ /** Adds cubic from last point towards SkPoint p1, then towards SkPoint p2, ending at
+ SkPoint p3. If SkPath is empty, or last SkPath::Verb is kClose_Verb, last point is set to
+ (0, 0) before adding cubic.
+
+ Appends kMove_Verb to verb array and (0, 0) to SkPoint array, if needed;
+ then appends kCubic_Verb to verb array; and SkPoint p1, p2, p3
+ to SkPoint array.
+
+ @param p1 first control SkPoint of cubic
+ @param p2 second control SkPoint of cubic
+ @param p3 end SkPoint of cubic
+ @return reference to SkPath
+ */
+ SkPath& cubicTo(const SkPoint& p1, const SkPoint& p2, const SkPoint& p3) {
+ return this->cubicTo(p1.fX, p1.fY, p2.fX, p2.fY, p3.fX, p3.fY);
+ }
+
+ /** Adds cubic from last point towards vector (dx1, dy1), then towards
+ vector (dx2, dy2), to vector (dx3, dy3).
+ If SkPath is empty, or last SkPath::Verb
+ is kClose_Verb, last point is set to (0, 0) before adding cubic.
+
+ Appends kMove_Verb to verb array and (0, 0) to SkPoint array,
+ if needed; then appends kCubic_Verb to verb array; and appends cubic
+ control and cubic end to SkPoint array.
+ Cubic control is last point plus vector (dx1, dy1).
+ Cubic end is last point plus vector (dx2, dy2).
+ Function name stands for "relative cubic to".
+
+ @param dx1 offset from last point to first cubic control on x-axis
+ @param dy1 offset from last point to first cubic control on y-axis
+ @param dx2 offset from last point to second cubic control on x-axis
+ @param dy2 offset from last point to second cubic control on y-axis
+ @param dx3 offset from last point to cubic end on x-axis
+ @param dy3 offset from last point to cubic end on y-axis
+ @return reference to SkPath
+ */
+ SkPath& rCubicTo(SkScalar dx1, SkScalar dy1, SkScalar dx2, SkScalar dy2,
+ SkScalar dx3, SkScalar dy3);
+
+ /** Appends arc to SkPath. Arc added is part of ellipse
+ bounded by oval, from startAngle through sweepAngle. Both startAngle and
+ sweepAngle are measured in degrees, where zero degrees is aligned with the
+ positive x-axis, and positive sweeps extends arc clockwise.
+
+ arcTo() adds line connecting SkPath last SkPoint to initial arc SkPoint if forceMoveTo
+ is false and SkPath is not empty. Otherwise, added contour begins with first point
+ of arc. Angles greater than -360 and less than 360 are treated modulo 360.
+
+ @param oval bounds of ellipse containing arc
+ @param startAngle starting angle of arc in degrees
+ @param sweepAngle sweep, in degrees. Positive is clockwise; treated modulo 360
+ @param forceMoveTo true to start a new contour with arc
+ @return reference to SkPath
+
+ example: https://fiddle.skia.org/c/@Path_arcTo
+ */
+ SkPath& arcTo(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle, bool forceMoveTo);
+
+ /** Appends arc to SkPath, after appending line if needed. Arc is implemented by conic
+ weighted to describe part of circle. Arc is contained by tangent from
+ last SkPath point to (x1, y1), and tangent from (x1, y1) to (x2, y2). Arc
+ is part of circle sized to radius, positioned so it touches both tangent lines.
+
+ If last Path Point does not start Arc, arcTo appends connecting Line to Path.
+ The length of Vector from (x1, y1) to (x2, y2) does not affect Arc.
+
+ Arc sweep is always less than 180 degrees. If radius is zero, or if
+ tangents are nearly parallel, arcTo appends Line from last Path Point to (x1, y1).
+
+ arcTo appends at most one Line and one conic.
+ arcTo implements the functionality of PostScript arct and HTML Canvas arcTo.
+
+ @param x1 x-axis value common to pair of tangents
+ @param y1 y-axis value common to pair of tangents
+ @param x2 x-axis value end of second tangent
+ @param y2 y-axis value end of second tangent
+ @param radius distance from arc to circle center
+ @return reference to SkPath
+
+ example: https://fiddle.skia.org/c/@Path_arcTo_2_a
+ example: https://fiddle.skia.org/c/@Path_arcTo_2_b
+ example: https://fiddle.skia.org/c/@Path_arcTo_2_c
+ */
+ SkPath& arcTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2, SkScalar radius);
+
+ /** Appends arc to SkPath, after appending line if needed. Arc is implemented by conic
+ weighted to describe part of circle. Arc is contained by tangent from
+ last SkPath point to p1, and tangent from p1 to p2. Arc
+ is part of circle sized to radius, positioned so it touches both tangent lines.
+
+ If last SkPath SkPoint does not start arc, arcTo() appends connecting line to SkPath.
+ The length of vector from p1 to p2 does not affect arc.
+
+ Arc sweep is always less than 180 degrees. If radius is zero, or if
+ tangents are nearly parallel, arcTo() appends line from last SkPath SkPoint to p1.
+
+ arcTo() appends at most one line and one conic.
+ arcTo() implements the functionality of PostScript arct and HTML Canvas arcTo.
+
+ @param p1 SkPoint common to pair of tangents
+ @param p2 end of second tangent
+ @param radius distance from arc to circle center
+ @return reference to SkPath
+ */
+ SkPath& arcTo(const SkPoint p1, const SkPoint p2, SkScalar radius) {
+ return this->arcTo(p1.fX, p1.fY, p2.fX, p2.fY, radius);
+ }
+
+ /** \enum SkPath::ArcSize
+ Four oval parts with radii (rx, ry) start at last SkPath SkPoint and ends at (x, y).
+ ArcSize and Direction select one of the four oval parts.
+ */
+ enum ArcSize {
+ kSmall_ArcSize, //!< smaller of arc pair
+ kLarge_ArcSize, //!< larger of arc pair
+ };
+
+ /** Appends arc to SkPath. Arc is implemented by one or more conics weighted to
+ describe part of oval with radii (rx, ry) rotated by xAxisRotate degrees. Arc
+ curves from last SkPath SkPoint to (x, y), choosing one of four possible routes:
+ clockwise or counterclockwise, and smaller or larger.
+
+ Arc sweep is always less than 360 degrees. arcTo() appends line to (x, y) if
+ either radii are zero, or if last SkPath SkPoint equals (x, y). arcTo() scales radii
+ (rx, ry) to fit last SkPath SkPoint and (x, y) if both are greater than zero but
+ too small.
+
+ arcTo() appends up to four conic curves.
+ arcTo() implements the functionality of SVG arc, although SVG sweep-flag value
+ is opposite the integer value of sweep; SVG sweep-flag uses 1 for clockwise,
+ while kCW_Direction cast to int is zero.
+
+ @param rx radius on x-axis before x-axis rotation
+ @param ry radius on y-axis before x-axis rotation
+ @param xAxisRotate x-axis rotation in degrees; positive values are clockwise
+ @param largeArc chooses smaller or larger arc
+ @param sweep chooses clockwise or counterclockwise arc
+ @param x end of arc
+ @param y end of arc
+ @return reference to SkPath
+ */
+ SkPath& arcTo(SkScalar rx, SkScalar ry, SkScalar xAxisRotate, ArcSize largeArc,
+ SkPathDirection sweep, SkScalar x, SkScalar y);
+
+ /** Appends arc to SkPath. Arc is implemented by one or more conic weighted to describe
+ part of oval with radii (r.fX, r.fY) rotated by xAxisRotate degrees. Arc curves
+ from last SkPath SkPoint to (xy.fX, xy.fY), choosing one of four possible routes:
+ clockwise or counterclockwise,
+ and smaller or larger.
+
+ Arc sweep is always less than 360 degrees. arcTo() appends line to xy if either
+ radii are zero, or if last SkPath SkPoint equals (xy.fX, xy.fY). arcTo() scales radii r to
+ fit last SkPath SkPoint and xy if both are greater than zero but too small to describe
+ an arc.
+
+ arcTo() appends up to four conic curves.
+ arcTo() implements the functionality of SVG arc, although SVG sweep-flag value is
+ opposite the integer value of sweep; SVG sweep-flag uses 1 for clockwise, while
+ kCW_Direction cast to int is zero.
+
+ @param r radii on axes before x-axis rotation
+ @param xAxisRotate x-axis rotation in degrees; positive values are clockwise
+ @param largeArc chooses smaller or larger arc
+ @param sweep chooses clockwise or counterclockwise arc
+ @param xy end of arc
+ @return reference to SkPath
+ */
+ SkPath& arcTo(const SkPoint r, SkScalar xAxisRotate, ArcSize largeArc, SkPathDirection sweep,
+ const SkPoint xy) {
+ return this->arcTo(r.fX, r.fY, xAxisRotate, largeArc, sweep, xy.fX, xy.fY);
+ }
+
+ /** Appends arc to SkPath, relative to last SkPath SkPoint. Arc is implemented by one or
+ more conic, weighted to describe part of oval with radii (rx, ry) rotated by
+ xAxisRotate degrees. Arc curves from last SkPath SkPoint to relative end SkPoint:
+ (dx, dy), choosing one of four possible routes: clockwise or
+ counterclockwise, and smaller or larger. If SkPath is empty, the start arc SkPoint
+ is (0, 0).
+
+ Arc sweep is always less than 360 degrees. arcTo() appends line to end SkPoint
+ if either radii are zero, or if last SkPath SkPoint equals end SkPoint.
+ arcTo() scales radii (rx, ry) to fit last SkPath SkPoint and end SkPoint if both are
+ greater than zero but too small to describe an arc.
+
+ arcTo() appends up to four conic curves.
+ arcTo() implements the functionality of svg arc, although SVG "sweep-flag" value is
+ opposite the integer value of sweep; SVG "sweep-flag" uses 1 for clockwise, while
+ kCW_Direction cast to int is zero.
+
+ @param rx radius before x-axis rotation
+ @param ry radius before x-axis rotation
+ @param xAxisRotate x-axis rotation in degrees; positive values are clockwise
+ @param largeArc chooses smaller or larger arc
+ @param sweep chooses clockwise or counterclockwise arc
+ @param dx x-axis offset end of arc from last SkPath SkPoint
+ @param dy y-axis offset end of arc from last SkPath SkPoint
+ @return reference to SkPath
+ */
+ SkPath& rArcTo(SkScalar rx, SkScalar ry, SkScalar xAxisRotate, ArcSize largeArc,
+ SkPathDirection sweep, SkScalar dx, SkScalar dy);
+
+ /** Appends kClose_Verb to SkPath. A closed contour connects the first and last SkPoint
+ with line, forming a continuous loop. Open and closed contour draw the same
+ with SkPaint::kFill_Style. With SkPaint::kStroke_Style, open contour draws
+ SkPaint::Cap at contour start and end; closed contour draws
+ SkPaint::Join at contour start and end.
+
+ close() has no effect if SkPath is empty or last SkPath SkPath::Verb is kClose_Verb.
+
+ @return reference to SkPath
+
+ example: https://fiddle.skia.org/c/@Path_close
+ */
+ SkPath& close();
+
+#ifdef SK_HIDE_PATH_EDIT_METHODS
+public:
+#endif
+
+ /** Approximates conic with quad array. Conic is constructed from start SkPoint p0,
+ control SkPoint p1, end SkPoint p2, and weight w.
+ Quad array is stored in pts; this storage is supplied by caller.
+ Maximum quad count is 2 to the pow2.
+ Every third point in array shares last SkPoint of previous quad and first SkPoint of
+ next quad. Maximum pts storage size is given by:
+ (1 + 2 * (1 << pow2)) * sizeof(SkPoint).
+
+ Returns quad count used the approximation, which may be smaller
+ than the number requested.
+
+ conic weight determines the amount of influence conic control point has on the curve.
+ w less than one represents an elliptical section. w greater than one represents
+ a hyperbolic section. w equal to one represents a parabolic section.
+
+ Two quad curves are sufficient to approximate an elliptical conic with a sweep
+ of up to 90 degrees; in this case, set pow2 to one.
+
+ @param p0 conic start SkPoint
+ @param p1 conic control SkPoint
+ @param p2 conic end SkPoint
+ @param w conic weight
+ @param pts storage for quad array
+ @param pow2 quad count, as power of two, normally 0 to 5 (1 to 32 quad curves)
+ @return number of quad curves written to pts
+ */
+ static int ConvertConicToQuads(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2,
+ SkScalar w, SkPoint pts[], int pow2);
+
+ /** Returns true if SkPath is equivalent to SkRect when filled.
+ If false: rect, isClosed, and direction are unchanged.
+ If true: rect, isClosed, and direction are written to if not nullptr.
+
+ rect may be smaller than the SkPath bounds. SkPath bounds may include kMove_Verb points
+ that do not alter the area drawn by the returned rect.
+
+ @param rect storage for bounds of SkRect; may be nullptr
+ @param isClosed storage set to true if SkPath is closed; may be nullptr
+ @param direction storage set to SkRect direction; may be nullptr
+ @return true if SkPath contains SkRect
+
+ example: https://fiddle.skia.org/c/@Path_isRect
+ */
+ bool isRect(SkRect* rect, bool* isClosed = nullptr, SkPathDirection* direction = nullptr) const;
+
+#ifdef SK_HIDE_PATH_EDIT_METHODS
+private:
+#endif
+
+ /** Adds a new contour to the path, defined by the rect, and wound in the
+ specified direction. The verbs added to the path will be:
+
+ kMove, kLine, kLine, kLine, kClose
+
+ start specifies which corner to begin the contour:
+ 0: upper-left corner
+ 1: upper-right corner
+ 2: lower-right corner
+ 3: lower-left corner
+
+ This start point also acts as the implied beginning of the subsequent,
+ contour, if it does not have an explicit moveTo(). e.g.
+
+ path.addRect(...)
+ // if we don't say moveTo() here, we will use the rect's start point
+ path.lineTo(...)
+
+ @param rect SkRect to add as a closed contour
+ @param dir SkPath::Direction to orient the new contour
+ @param start initial corner of SkRect to add
+ @return reference to SkPath
+
+ example: https://fiddle.skia.org/c/@Path_addRect_2
+ */
+ SkPath& addRect(const SkRect& rect, SkPathDirection dir, unsigned start);
+
+ SkPath& addRect(const SkRect& rect, SkPathDirection dir = SkPathDirection::kCW) {
+ return this->addRect(rect, dir, 0);
+ }
+
+ SkPath& addRect(SkScalar left, SkScalar top, SkScalar right, SkScalar bottom,
+ SkPathDirection dir = SkPathDirection::kCW) {
+ return this->addRect({left, top, right, bottom}, dir, 0);
+ }
+
+ /** Adds oval to path, appending kMove_Verb, four kConic_Verb, and kClose_Verb.
+ Oval is upright ellipse bounded by SkRect oval with radii equal to half oval width
+ and half oval height. Oval begins at (oval.fRight, oval.centerY()) and continues
+ clockwise if dir is kCW_Direction, counterclockwise if dir is kCCW_Direction.
+
+ @param oval bounds of ellipse added
+ @param dir SkPath::Direction to wind ellipse
+ @return reference to SkPath
+
+ example: https://fiddle.skia.org/c/@Path_addOval
+ */
+ SkPath& addOval(const SkRect& oval, SkPathDirection dir = SkPathDirection::kCW);
+
+ /** Adds oval to SkPath, appending kMove_Verb, four kConic_Verb, and kClose_Verb.
+ Oval is upright ellipse bounded by SkRect oval with radii equal to half oval width
+ and half oval height. Oval begins at start and continues
+ clockwise if dir is kCW_Direction, counterclockwise if dir is kCCW_Direction.
+
+ @param oval bounds of ellipse added
+ @param dir SkPath::Direction to wind ellipse
+ @param start index of initial point of ellipse
+ @return reference to SkPath
+
+ example: https://fiddle.skia.org/c/@Path_addOval_2
+ */
+ SkPath& addOval(const SkRect& oval, SkPathDirection dir, unsigned start);
+
+ /** Adds circle centered at (x, y) of size radius to SkPath, appending kMove_Verb,
+ four kConic_Verb, and kClose_Verb. Circle begins at: (x + radius, y), continuing
+ clockwise if dir is kCW_Direction, and counterclockwise if dir is kCCW_Direction.
+
+ Has no effect if radius is zero or negative.
+
+ @param x center of circle
+ @param y center of circle
+ @param radius distance from center to edge
+ @param dir SkPath::Direction to wind circle
+ @return reference to SkPath
+ */
+ SkPath& addCircle(SkScalar x, SkScalar y, SkScalar radius,
+ SkPathDirection dir = SkPathDirection::kCW);
+
+ /** Appends arc to SkPath, as the start of new contour. Arc added is part of ellipse
+ bounded by oval, from startAngle through sweepAngle. Both startAngle and
+ sweepAngle are measured in degrees, where zero degrees is aligned with the
+ positive x-axis, and positive sweeps extends arc clockwise.
+
+ If sweepAngle <= -360, or sweepAngle >= 360; and startAngle modulo 90 is nearly
+ zero, append oval instead of arc. Otherwise, sweepAngle values are treated
+ modulo 360, and arc may or may not draw depending on numeric rounding.
+
+ @param oval bounds of ellipse containing arc
+ @param startAngle starting angle of arc in degrees
+ @param sweepAngle sweep, in degrees. Positive is clockwise; treated modulo 360
+ @return reference to SkPath
+
+ example: https://fiddle.skia.org/c/@Path_addArc
+ */
+ SkPath& addArc(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle);
+
+ /** Appends SkRRect to SkPath, creating a new closed contour. SkRRect has bounds
+ equal to rect; each corner is 90 degrees of an ellipse with radii (rx, ry). If
+ dir is kCW_Direction, SkRRect starts at top-left of the lower-left corner and
+ winds clockwise. If dir is kCCW_Direction, SkRRect starts at the bottom-left
+ of the upper-left corner and winds counterclockwise.
+
+ If either rx or ry is too large, rx and ry are scaled uniformly until the
+ corners fit. If rx or ry is less than or equal to zero, addRoundRect() appends
+ SkRect rect to SkPath.
+
+ After appending, SkPath may be empty, or may contain: SkRect, oval, or SkRRect.
+
+ @param rect bounds of SkRRect
+ @param rx x-axis radius of rounded corners on the SkRRect
+ @param ry y-axis radius of rounded corners on the SkRRect
+ @param dir SkPath::Direction to wind SkRRect
+ @return reference to SkPath
+ */
+ SkPath& addRoundRect(const SkRect& rect, SkScalar rx, SkScalar ry,
+ SkPathDirection dir = SkPathDirection::kCW);
+
+ /** Appends SkRRect to SkPath, creating a new closed contour. SkRRect has bounds
+ equal to rect; each corner is 90 degrees of an ellipse with radii from the
+ array.
+
+ @param rect bounds of SkRRect
+ @param radii array of 8 SkScalar values, a radius pair for each corner
+ @param dir SkPath::Direction to wind SkRRect
+ @return reference to SkPath
+ */
+ SkPath& addRoundRect(const SkRect& rect, const SkScalar radii[],
+ SkPathDirection dir = SkPathDirection::kCW);
+
+ /** Adds rrect to SkPath, creating a new closed contour. If
+ dir is kCW_Direction, rrect starts at top-left of the lower-left corner and
+ winds clockwise. If dir is kCCW_Direction, rrect starts at the bottom-left
+ of the upper-left corner and winds counterclockwise.
+
+ After appending, SkPath may be empty, or may contain: SkRect, oval, or SkRRect.
+
+ @param rrect bounds and radii of rounded rectangle
+ @param dir SkPath::Direction to wind SkRRect
+ @return reference to SkPath
+
+ example: https://fiddle.skia.org/c/@Path_addRRect
+ */
+ SkPath& addRRect(const SkRRect& rrect, SkPathDirection dir = SkPathDirection::kCW);
+
+ /** Adds rrect to SkPath, creating a new closed contour. If dir is kCW_Direction, rrect
+ winds clockwise; if dir is kCCW_Direction, rrect winds counterclockwise.
+ start determines the first point of rrect to add.
+
+ @param rrect bounds and radii of rounded rectangle
+ @param dir SkPath::Direction to wind SkRRect
+ @param start index of initial point of SkRRect
+ @return reference to SkPath
+
+ example: https://fiddle.skia.org/c/@Path_addRRect_2
+ */
+ SkPath& addRRect(const SkRRect& rrect, SkPathDirection dir, unsigned start);
+
+ /** Adds contour created from line array, adding (count - 1) line segments.
+ Contour added starts at pts[0], then adds a line for every additional SkPoint
+ in pts array. If close is true, appends kClose_Verb to SkPath, connecting
+ pts[count - 1] and pts[0].
+
+ If count is zero, append kMove_Verb to path.
+ Has no effect if count is less than one.
+
+ @param pts array of line sharing end and start SkPoint
+ @param count length of SkPoint array
+ @param close true to add line connecting contour end and start
+ @return reference to SkPath
+
+ example: https://fiddle.skia.org/c/@Path_addPoly
+ */
+ SkPath& addPoly(const SkPoint pts[], int count, bool close);
+
+ /** Adds contour created from list. Contour added starts at list[0], then adds a line
+ for every additional SkPoint in list. If close is true, appends kClose_Verb to SkPath,
+ connecting last and first SkPoint in list.
+
+ If list is empty, append kMove_Verb to path.
+
+ @param list array of SkPoint
+ @param close true to add line connecting contour end and start
+ @return reference to SkPath
+ */
+ SkPath& addPoly(const std::initializer_list<SkPoint>& list, bool close) {
+ return this->addPoly(list.begin(), SkToInt(list.size()), close);
+ }
+
+#ifdef SK_HIDE_PATH_EDIT_METHODS
+public:
+#endif
+
+ /** \enum SkPath::AddPathMode
+ AddPathMode chooses how addPath() appends. Adding one SkPath to another can extend
+ the last contour or start a new contour.
+ */
+ enum AddPathMode {
+ kAppend_AddPathMode, //!< appended to destination unaltered
+ kExtend_AddPathMode, //!< add line if prior contour is not closed
+ };
+
+ /** Appends src to SkPath, offset by (dx, dy).
+
+ If mode is kAppend_AddPathMode, src verb array, SkPoint array, and conic weights are
+ added unaltered. If mode is kExtend_AddPathMode, add line before appending
+ verbs, SkPoint, and conic weights.
+
+ @param src SkPath verbs, SkPoint, and conic weights to add
+ @param dx offset added to src SkPoint array x-axis coordinates
+ @param dy offset added to src SkPoint array y-axis coordinates
+ @param mode kAppend_AddPathMode or kExtend_AddPathMode
+ @return reference to SkPath
+ */
+ SkPath& addPath(const SkPath& src, SkScalar dx, SkScalar dy,
+ AddPathMode mode = kAppend_AddPathMode);
+
+ /** Appends src to SkPath.
+
+ If mode is kAppend_AddPathMode, src verb array, SkPoint array, and conic weights are
+ added unaltered. If mode is kExtend_AddPathMode, add line before appending
+ verbs, SkPoint, and conic weights.
+
+ @param src SkPath verbs, SkPoint, and conic weights to add
+ @param mode kAppend_AddPathMode or kExtend_AddPathMode
+ @return reference to SkPath
+ */
+ SkPath& addPath(const SkPath& src, AddPathMode mode = kAppend_AddPathMode) {
+ SkMatrix m;
+ m.reset();
+ return this->addPath(src, m, mode);
+ }
+
+ /** Appends src to SkPath, transformed by matrix. Transformed curves may have different
+ verbs, SkPoint, and conic weights.
+
+ If mode is kAppend_AddPathMode, src verb array, SkPoint array, and conic weights are
+ added unaltered. If mode is kExtend_AddPathMode, add line before appending
+ verbs, SkPoint, and conic weights.
+
+ @param src SkPath verbs, SkPoint, and conic weights to add
+ @param matrix transform applied to src
+ @param mode kAppend_AddPathMode or kExtend_AddPathMode
+ @return reference to SkPath
+ */
+ SkPath& addPath(const SkPath& src, const SkMatrix& matrix,
+ AddPathMode mode = kAppend_AddPathMode);
+
+ /** Appends src to SkPath, from back to front.
+ Reversed src always appends a new contour to SkPath.
+
+ @param src SkPath verbs, SkPoint, and conic weights to add
+ @return reference to SkPath
+
+ example: https://fiddle.skia.org/c/@Path_reverseAddPath
+ */
+ SkPath& reverseAddPath(const SkPath& src);
+
+ /** Offsets SkPoint array by (dx, dy). Offset SkPath replaces dst.
+ If dst is nullptr, SkPath is replaced by offset data.
+
+ @param dx offset added to SkPoint array x-axis coordinates
+ @param dy offset added to SkPoint array y-axis coordinates
+ @param dst overwritten, translated copy of SkPath; may be nullptr
+
+ example: https://fiddle.skia.org/c/@Path_offset
+ */
+ void offset(SkScalar dx, SkScalar dy, SkPath* dst) const;
+
+ /** Offsets SkPoint array by (dx, dy). SkPath is replaced by offset data.
+
+ @param dx offset added to SkPoint array x-axis coordinates
+ @param dy offset added to SkPoint array y-axis coordinates
+ */
+ void offset(SkScalar dx, SkScalar dy) {
+ this->offset(dx, dy, this);
+ }
+
+ /** Transforms verb array, SkPoint array, and weight by matrix.
+ transform may change verbs and increase their number.
+ Transformed SkPath replaces dst; if dst is nullptr, original data
+ is replaced.
+
+ @param matrix SkMatrix to apply to SkPath
+ @param dst overwritten, transformed copy of SkPath; may be nullptr
+ @param pc whether to apply perspective clipping
+
+ example: https://fiddle.skia.org/c/@Path_transform
+ */
+ void transform(const SkMatrix& matrix, SkPath* dst,
+ SkApplyPerspectiveClip pc = SkApplyPerspectiveClip::kYes) const;
+
+ /** Transforms verb array, SkPoint array, and weight by matrix.
+ transform may change verbs and increase their number.
+ SkPath is replaced by transformed data.
+
+ @param matrix SkMatrix to apply to SkPath
+ @param pc whether to apply perspective clipping
+ */
+ void transform(const SkMatrix& matrix,
+ SkApplyPerspectiveClip pc = SkApplyPerspectiveClip::kYes) {
+ this->transform(matrix, this, pc);
+ }
+
+ SkPath makeTransform(const SkMatrix& m,
+ SkApplyPerspectiveClip pc = SkApplyPerspectiveClip::kYes) const {
+ SkPath dst;
+ this->transform(m, &dst, pc);
+ return dst;
+ }
+
+ SkPath makeScale(SkScalar sx, SkScalar sy) {
+ return this->makeTransform(SkMatrix::Scale(sx, sy), SkApplyPerspectiveClip::kNo);
+ }
+
+ /** Returns last point on SkPath in lastPt. Returns false if SkPoint array is empty,
+ storing (0, 0) if lastPt is not nullptr.
+
+ @param lastPt storage for final SkPoint in SkPoint array; may be nullptr
+ @return true if SkPoint array contains one or more SkPoint
+
+ example: https://fiddle.skia.org/c/@Path_getLastPt
+ */
+ bool getLastPt(SkPoint* lastPt) const;
+
+ /** Sets last point to (x, y). If SkPoint array is empty, append kMove_Verb to
+ verb array and append (x, y) to SkPoint array.
+
+ @param x set x-axis value of last point
+ @param y set y-axis value of last point
+
+ example: https://fiddle.skia.org/c/@Path_setLastPt
+ */
+ void setLastPt(SkScalar x, SkScalar y);
+
+ /** Sets the last point on the path. If SkPoint array is empty, append kMove_Verb to
+ verb array and append p to SkPoint array.
+
+ @param p set value of last point
+ */
+ void setLastPt(const SkPoint& p) {
+ this->setLastPt(p.fX, p.fY);
+ }
+
+ /** \enum SkPath::SegmentMask
+ SegmentMask constants correspond to each drawing Verb type in SkPath; for
+ instance, if SkPath only contains lines, only the kLine_SegmentMask bit is set.
+ */
+ enum SegmentMask {
+ kLine_SegmentMask = kLine_SkPathSegmentMask,
+ kQuad_SegmentMask = kQuad_SkPathSegmentMask,
+ kConic_SegmentMask = kConic_SkPathSegmentMask,
+ kCubic_SegmentMask = kCubic_SkPathSegmentMask,
+ };
+
+ /** Returns a mask, where each set bit corresponds to a SegmentMask constant
+ if SkPath contains one or more verbs of that type.
+ Returns zero if SkPath contains no lines, or curves: quads, conics, or cubics.
+
+ getSegmentMasks() returns a cached result; it is very fast.
+
+ @return SegmentMask bits or zero
+ */
+ uint32_t getSegmentMasks() const;
+
+ /** \enum SkPath::Verb
+ Verb instructs SkPath how to interpret one or more SkPoint and optional conic weight;
+ manage contour, and terminate SkPath.
+ */
+ enum Verb {
+ kMove_Verb = static_cast<int>(SkPathVerb::kMove),
+ kLine_Verb = static_cast<int>(SkPathVerb::kLine),
+ kQuad_Verb = static_cast<int>(SkPathVerb::kQuad),
+ kConic_Verb = static_cast<int>(SkPathVerb::kConic),
+ kCubic_Verb = static_cast<int>(SkPathVerb::kCubic),
+ kClose_Verb = static_cast<int>(SkPathVerb::kClose),
+ kDone_Verb = kClose_Verb + 1
+ };
+
+ /** \class SkPath::Iter
+ Iterates through verb array, and associated SkPoint array and conic weight.
+ Provides options to treat open contours as closed, and to ignore
+ degenerate data.
+ */
+ class SK_API Iter {
+ public:
+
+ /** Initializes SkPath::Iter with an empty SkPath. next() on SkPath::Iter returns
+ kDone_Verb.
+ Call setPath to initialize SkPath::Iter at a later time.
+
+ @return SkPath::Iter of empty SkPath
+
+ example: https://fiddle.skia.org/c/@Path_Iter_Iter
+ */
+ Iter();
+
+ /** Sets SkPath::Iter to return elements of verb array, SkPoint array, and conic weight in
+ path. If forceClose is true, SkPath::Iter will add kLine_Verb and kClose_Verb after each
+ open contour. path is not altered.
+
+ @param path SkPath to iterate
+ @param forceClose true if open contours generate kClose_Verb
+ @return SkPath::Iter of path
+
+ example: https://fiddle.skia.org/c/@Path_Iter_const_SkPath
+ */
+ Iter(const SkPath& path, bool forceClose);
+
+ /** Sets SkPath::Iter to return elements of verb array, SkPoint array, and conic weight in
+ path. If forceClose is true, SkPath::Iter will add kLine_Verb and kClose_Verb after each
+ open contour. path is not altered.
+
+ @param path SkPath to iterate
+ @param forceClose true if open contours generate kClose_Verb
+
+ example: https://fiddle.skia.org/c/@Path_Iter_setPath
+ */
+ void setPath(const SkPath& path, bool forceClose);
+
+ /** Returns next SkPath::Verb in verb array, and advances SkPath::Iter.
+ When verb array is exhausted, returns kDone_Verb.
+
+ Zero to four SkPoint are stored in pts, depending on the returned SkPath::Verb.
+
+ @param pts storage for SkPoint data describing returned SkPath::Verb
+ @return next SkPath::Verb from verb array
+
+ example: https://fiddle.skia.org/c/@Path_RawIter_next
+ */
+ Verb next(SkPoint pts[4]);
+
+ /** Returns conic weight if next() returned kConic_Verb.
+
+ If next() has not been called, or next() did not return kConic_Verb,
+ result is undefined.
+
+ @return conic weight for conic SkPoint returned by next()
+ */
+ SkScalar conicWeight() const { return *fConicWeights; }
+
+ /** Returns true if last kLine_Verb returned by next() was generated
+ by kClose_Verb. When true, the end point returned by next() is
+ also the start point of contour.
+
+ If next() has not been called, or next() did not return kLine_Verb,
+ result is undefined.
+
+ @return true if last kLine_Verb was generated by kClose_Verb
+ */
+ bool isCloseLine() const { return SkToBool(fCloseLine); }
+
+ /** Returns true if subsequent calls to next() return kClose_Verb before returning
+ kMove_Verb. if true, contour SkPath::Iter is processing may end with kClose_Verb, or
+ SkPath::Iter may have been initialized with force close set to true.
+
+ @return true if contour is closed
+
+ example: https://fiddle.skia.org/c/@Path_Iter_isClosedContour
+ */
+ bool isClosedContour() const;
+
+ private:
+ const SkPoint* fPts;
+ const uint8_t* fVerbs;
+ const uint8_t* fVerbStop;
+ const SkScalar* fConicWeights;
+ SkPoint fMoveTo;
+ SkPoint fLastPt;
+ bool fForceClose;
+ bool fNeedClose;
+ bool fCloseLine;
+
+ Verb autoClose(SkPoint pts[2]);
+ };
+
+private:
+ /** \class SkPath::RangeIter
+ Iterates through a raw range of path verbs, points, and conics. All values are returned
+ unaltered.
+
+ NOTE: This class will be moved into SkPathPriv once RangeIter is removed.
+ */
+ class RangeIter {
+ public:
+ RangeIter() = default;
+ RangeIter(const uint8_t* verbs, const SkPoint* points, const SkScalar* weights)
+ : fVerb(verbs), fPoints(points), fWeights(weights) {
+ SkDEBUGCODE(fInitialPoints = fPoints;)
+ }
+ bool operator!=(const RangeIter& that) const {
+ return fVerb != that.fVerb;
+ }
+ bool operator==(const RangeIter& that) const {
+ return fVerb == that.fVerb;
+ }
+ RangeIter& operator++() {
+ auto verb = static_cast<SkPathVerb>(*fVerb++);
+ fPoints += pts_advance_after_verb(verb);
+ if (verb == SkPathVerb::kConic) {
+ ++fWeights;
+ }
+ return *this;
+ }
+ RangeIter operator++(int) {
+ RangeIter copy = *this;
+ this->operator++();
+ return copy;
+ }
+ SkPathVerb peekVerb() const {
+ return static_cast<SkPathVerb>(*fVerb);
+ }
+ std::tuple<SkPathVerb, const SkPoint*, const SkScalar*> operator*() const {
+ SkPathVerb verb = this->peekVerb();
+ // We provide the starting point for beziers by peeking backwards from the current
+ // point, which works fine as long as there is always a kMove before any geometry.
+ // (SkPath::injectMoveToIfNeeded should have guaranteed this to be the case.)
+ int backset = pts_backset_for_verb(verb);
+ SkASSERT(fPoints + backset >= fInitialPoints);
+ return {verb, fPoints + backset, fWeights};
+ }
+ private:
+ constexpr static int pts_advance_after_verb(SkPathVerb verb) {
+ switch (verb) {
+ case SkPathVerb::kMove: return 1;
+ case SkPathVerb::kLine: return 1;
+ case SkPathVerb::kQuad: return 2;
+ case SkPathVerb::kConic: return 2;
+ case SkPathVerb::kCubic: return 3;
+ case SkPathVerb::kClose: return 0;
+ }
+ SkUNREACHABLE;
+ }
+ constexpr static int pts_backset_for_verb(SkPathVerb verb) {
+ switch (verb) {
+ case SkPathVerb::kMove: return 0;
+ case SkPathVerb::kLine: return -1;
+ case SkPathVerb::kQuad: return -1;
+ case SkPathVerb::kConic: return -1;
+ case SkPathVerb::kCubic: return -1;
+ case SkPathVerb::kClose: return -1;
+ }
+ SkUNREACHABLE;
+ }
+ const uint8_t* fVerb = nullptr;
+ const SkPoint* fPoints = nullptr;
+ const SkScalar* fWeights = nullptr;
+ SkDEBUGCODE(const SkPoint* fInitialPoints = nullptr;)
+ };
+public:
+
+ /** \class SkPath::RawIter
+ Use Iter instead. This class will soon be removed and RangeIter will be made private.
+ */
+ class SK_API RawIter {
+ public:
+
+ /** Initializes RawIter with an empty SkPath. next() on RawIter returns kDone_Verb.
+ Call setPath to initialize SkPath::Iter at a later time.
+
+ @return RawIter of empty SkPath
+ */
+ RawIter() {}
+
+ /** Sets RawIter to return elements of verb array, SkPoint array, and conic weight in path.
+
+ @param path SkPath to iterate
+ @return RawIter of path
+ */
+ RawIter(const SkPath& path) {
+ setPath(path);
+ }
+
+ /** Sets SkPath::Iter to return elements of verb array, SkPoint array, and conic weight in
+ path.
+
+ @param path SkPath to iterate
+ */
+ void setPath(const SkPath&);
+
+ /** Returns next SkPath::Verb in verb array, and advances RawIter.
+ When verb array is exhausted, returns kDone_Verb.
+ Zero to four SkPoint are stored in pts, depending on the returned SkPath::Verb.
+
+ @param pts storage for SkPoint data describing returned SkPath::Verb
+ @return next SkPath::Verb from verb array
+ */
+ Verb next(SkPoint[4]);
+
+ /** Returns next SkPath::Verb, but does not advance RawIter.
+
+ @return next SkPath::Verb from verb array
+ */
+ Verb peek() const {
+ return (fIter != fEnd) ? static_cast<Verb>(std::get<0>(*fIter)) : kDone_Verb;
+ }
+
+ /** Returns conic weight if next() returned kConic_Verb.
+
+ If next() has not been called, or next() did not return kConic_Verb,
+ result is undefined.
+
+ @return conic weight for conic SkPoint returned by next()
+ */
+ SkScalar conicWeight() const {
+ return fConicWeight;
+ }
+
+ private:
+ RangeIter fIter;
+ RangeIter fEnd;
+ SkScalar fConicWeight = 0;
+ friend class SkPath;
+
+ };
+
+ /** Returns true if the point (x, y) is contained by SkPath, taking into
+ account FillType.
+
+ @param x x-axis value of containment test
+ @param y y-axis value of containment test
+ @return true if SkPoint is in SkPath
+
+ example: https://fiddle.skia.org/c/@Path_contains
+ */
+ bool contains(SkScalar x, SkScalar y) const;
+
+ /** Writes text representation of SkPath to stream. If stream is nullptr, writes to
+ standard output. Set dumpAsHex true to generate exact binary representations
+ of floating point numbers used in SkPoint array and conic weights.
+
+ @param stream writable SkWStream receiving SkPath text representation; may be nullptr
+ @param dumpAsHex true if SkScalar values are written as hexadecimal
+
+ example: https://fiddle.skia.org/c/@Path_dump
+ */
+ void dump(SkWStream* stream, bool dumpAsHex) const;
+
+ void dump() const { this->dump(nullptr, false); }
+ void dumpHex() const { this->dump(nullptr, true); }
+
+ // Like dump(), but outputs for the SkPath::Make() factory
+ void dumpArrays(SkWStream* stream, bool dumpAsHex) const;
+ void dumpArrays() const { this->dumpArrays(nullptr, false); }
+
+ /** Writes SkPath to buffer, returning the number of bytes written.
+ Pass nullptr to obtain the storage size.
+
+ Writes SkPath::FillType, verb array, SkPoint array, conic weight, and
+ additionally writes computed information like SkPath::Convexity and bounds.
+
+ Use only be used in concert with readFromMemory();
+ the format used for SkPath in memory is not guaranteed.
+
+ @param buffer storage for SkPath; may be nullptr
+ @return size of storage required for SkPath; always a multiple of 4
+
+ example: https://fiddle.skia.org/c/@Path_writeToMemory
+ */
+ size_t writeToMemory(void* buffer) const;
+
+ /** Writes SkPath to buffer, returning the buffer written to, wrapped in SkData.
+
+ serialize() writes SkPath::FillType, verb array, SkPoint array, conic weight, and
+ additionally writes computed information like SkPath::Convexity and bounds.
+
+ serialize() should only be used in concert with readFromMemory().
+ The format used for SkPath in memory is not guaranteed.
+
+ @return SkPath data wrapped in SkData buffer
+
+ example: https://fiddle.skia.org/c/@Path_serialize
+ */
+ sk_sp<SkData> serialize() const;
+
+ /** Initializes SkPath from buffer of size length. Returns zero if the buffer is
+ data is inconsistent, or the length is too small.
+
+ Reads SkPath::FillType, verb array, SkPoint array, conic weight, and
+ additionally reads computed information like SkPath::Convexity and bounds.
+
+ Used only in concert with writeToMemory();
+ the format used for SkPath in memory is not guaranteed.
+
+ @param buffer storage for SkPath
+ @param length buffer size in bytes; must be multiple of 4
+ @return number of bytes read, or zero on failure
+
+ example: https://fiddle.skia.org/c/@Path_readFromMemory
+ */
+ size_t readFromMemory(const void* buffer, size_t length);
+
+ /** (See Skia bug 1762.)
+ Returns a non-zero, globally unique value. A different value is returned
+ if verb array, SkPoint array, or conic weight changes.
+
+ Setting SkPath::FillType does not change generation identifier.
+
+ Each time the path is modified, a different generation identifier will be returned.
+ SkPath::FillType does affect generation identifier on Android framework.
+
+ @return non-zero, globally unique value
+
+ example: https://fiddle.skia.org/c/@Path_getGenerationID
+ */
+ uint32_t getGenerationID() const;
+
+ /** Returns if SkPath data is consistent. Corrupt SkPath data is detected if
+ internal values are out of range or internal storage does not match
+ array dimensions.
+
+ @return true if SkPath data is consistent
+ */
+ bool isValid() const;
+
+ using sk_is_trivially_relocatable = std::true_type;
+
+private:
+ SkPath(sk_sp<SkPathRef>, SkPathFillType, bool isVolatile, SkPathConvexity,
+ SkPathFirstDirection firstDirection);
+
+ sk_sp<SkPathRef> fPathRef;
+ int fLastMoveToIndex;
+ mutable std::atomic<uint8_t> fConvexity; // SkPathConvexity
+ mutable std::atomic<uint8_t> fFirstDirection; // SkPathFirstDirection
+ uint8_t fFillType : 2;
+ uint8_t fIsVolatile : 1;
+
+ static_assert(::sk_is_trivially_relocatable<decltype(fPathRef)>::value);
+
+ /** Resets all fields other than fPathRef to their initial 'empty' values.
+ * Assumes the caller has already emptied fPathRef.
+ * On Android increments fGenerationID without reseting it.
+ */
+ void resetFields();
+
+ /** Sets all fields other than fPathRef to the values in 'that'.
+ * Assumes the caller has already set fPathRef.
+ * Doesn't change fGenerationID or fSourcePath on Android.
+ */
+ void copyFields(const SkPath& that);
+
+ size_t writeToMemoryAsRRect(void* buffer) const;
+ size_t readAsRRect(const void*, size_t);
+ size_t readFromMemory_EQ4Or5(const void*, size_t);
+
+ friend class Iter;
+ friend class SkPathPriv;
+ friend class SkPathStroker;
+
+ /* Append, in reverse order, the first contour of path, ignoring path's
+ last point. If no moveTo() call has been made for this contour, the
+ first point is automatically set to (0,0).
+ */
+ SkPath& reversePathTo(const SkPath&);
+
+ // called before we add points for lineTo, quadTo, cubicTo, checking to see
+ // if we need to inject a leading moveTo first
+ //
+ // SkPath path; path.lineTo(...); <--- need a leading moveTo(0, 0)
+ // SkPath path; ... path.close(); path.lineTo(...) <-- need a moveTo(previous moveTo)
+ //
+ inline void injectMoveToIfNeeded();
+
+ inline bool hasOnlyMoveTos() const;
+
+ SkPathConvexity computeConvexity() const;
+
+ bool isValidImpl() const;
+ /** Asserts if SkPath data is inconsistent.
+ Debugging check intended for internal use only.
+ */
+#ifdef SK_DEBUG
+ void validate() const;
+ void validateRef() const;
+#endif
+
+ // called by stroker to see if all points (in the last contour) are equal and worthy of a cap
+ bool isZeroLengthSincePoint(int startPtIndex) const;
+
+ /** Returns if the path can return a bound at no cost (true) or will have to
+ perform some computation (false).
+ */
+ bool hasComputedBounds() const;
+
+ // 'rect' needs to be sorted
+ void setBounds(const SkRect& rect);
+
+ void setPt(int index, SkScalar x, SkScalar y);
+
+ SkPath& dirtyAfterEdit();
+
+ // Bottlenecks for working with fConvexity and fFirstDirection.
+ // Notice the setters are const... these are mutable atomic fields.
+ void setConvexity(SkPathConvexity) const;
+
+ void setFirstDirection(SkPathFirstDirection) const;
+ SkPathFirstDirection getFirstDirection() const;
+
+ /** Returns the comvexity type, computing if needed. Never returns kUnknown.
+ @return path's convexity type (convex or concave)
+ */
+ SkPathConvexity getConvexity() const;
+
+ SkPathConvexity getConvexityOrUnknown() const;
+
+ // Compares the cached value with a freshly computed one (computeConvexity())
+ bool isConvexityAccurate() const;
+
+ /** Stores a convexity type for this path. This is what will be returned if
+ * getConvexityOrUnknown() is called. If you pass kUnknown, then if getContexityType()
+ * is called, the real convexity will be computed.
+ *
+ * example: https://fiddle.skia.org/c/@Path_setConvexity
+ */
+ void setConvexity(SkPathConvexity convexity);
+
+ /** Shrinks SkPath verb array and SkPoint array storage to discard unused capacity.
+ * May reduce the heap overhead for SkPath known to be fully constructed.
+ *
+ * NOTE: This may relocate the underlying buffers, and thus any Iterators referencing
+ * this path should be discarded after calling shrinkToFit().
+ */
+ void shrinkToFit();
+
+ friend class SkAutoPathBoundsUpdate;
+ friend class SkAutoDisableOvalCheck;
+ friend class SkAutoDisableDirectionCheck;
+ friend class SkPathBuilder;
+ friend class SkPathEdgeIter;
+ friend class SkPathWriter;
+ friend class SkOpBuilder;
+ friend class SkBench_AddPathTest; // perf test reversePathTo
+ friend class PathTest_Private; // unit test reversePathTo
+ friend class ForceIsRRect_Private; // unit test isRRect
+ friend class FuzzPath; // for legacy access to validateRef
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPathBuilder.h b/gfx/skia/skia/include/core/SkPathBuilder.h
new file mode 100644
index 0000000000..247c08624c
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPathBuilder.h
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathBuilder_DEFINED
+#define SkPathBuilder_DEFINED
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPathTypes.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkPathRef.h"
+#include "include/private/base/SkTo.h"
+
+#include <initializer_list>
+
+class SkRRect;
+
+class SK_API SkPathBuilder {
+public:
+ SkPathBuilder();
+ SkPathBuilder(SkPathFillType);
+ SkPathBuilder(const SkPath&);
+ SkPathBuilder(const SkPathBuilder&) = default;
+ ~SkPathBuilder();
+
+ SkPathBuilder& operator=(const SkPath&);
+ SkPathBuilder& operator=(const SkPathBuilder&) = default;
+
+ SkPathFillType fillType() const { return fFillType; }
+ SkRect computeBounds() const;
+
+ SkPath snapshot() const; // the builder is unchanged after returning this path
+ SkPath detach(); // the builder is reset to empty after returning this path
+
+ SkPathBuilder& setFillType(SkPathFillType ft) { fFillType = ft; return *this; }
+ SkPathBuilder& setIsVolatile(bool isVolatile) { fIsVolatile = isVolatile; return *this; }
+
+ SkPathBuilder& reset();
+
+ SkPathBuilder& moveTo(SkPoint pt);
+ SkPathBuilder& moveTo(SkScalar x, SkScalar y) { return this->moveTo(SkPoint::Make(x, y)); }
+
+ SkPathBuilder& lineTo(SkPoint pt);
+ SkPathBuilder& lineTo(SkScalar x, SkScalar y) { return this->lineTo(SkPoint::Make(x, y)); }
+
+ SkPathBuilder& quadTo(SkPoint pt1, SkPoint pt2);
+ SkPathBuilder& quadTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2) {
+ return this->quadTo(SkPoint::Make(x1, y1), SkPoint::Make(x2, y2));
+ }
+ SkPathBuilder& quadTo(const SkPoint pts[2]) { return this->quadTo(pts[0], pts[1]); }
+
+ SkPathBuilder& conicTo(SkPoint pt1, SkPoint pt2, SkScalar w);
+ SkPathBuilder& conicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2, SkScalar w) {
+ return this->conicTo(SkPoint::Make(x1, y1), SkPoint::Make(x2, y2), w);
+ }
+ SkPathBuilder& conicTo(const SkPoint pts[2], SkScalar w) {
+ return this->conicTo(pts[0], pts[1], w);
+ }
+
+ SkPathBuilder& cubicTo(SkPoint pt1, SkPoint pt2, SkPoint pt3);
+ SkPathBuilder& cubicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2, SkScalar x3, SkScalar y3) {
+ return this->cubicTo(SkPoint::Make(x1, y1), SkPoint::Make(x2, y2), SkPoint::Make(x3, y3));
+ }
+ SkPathBuilder& cubicTo(const SkPoint pts[3]) {
+ return this->cubicTo(pts[0], pts[1], pts[2]);
+ }
+
+ SkPathBuilder& close();
+
+ // Append a series of lineTo(...)
+ SkPathBuilder& polylineTo(const SkPoint pts[], int count);
+ SkPathBuilder& polylineTo(const std::initializer_list<SkPoint>& list) {
+ return this->polylineTo(list.begin(), SkToInt(list.size()));
+ }
+
+ // Relative versions of segments, relative to the previous position.
+
+ SkPathBuilder& rLineTo(SkPoint pt);
+ SkPathBuilder& rLineTo(SkScalar x, SkScalar y) { return this->rLineTo({x, y}); }
+ SkPathBuilder& rQuadTo(SkPoint pt1, SkPoint pt2);
+ SkPathBuilder& rQuadTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2) {
+ return this->rQuadTo({x1, y1}, {x2, y2});
+ }
+ SkPathBuilder& rConicTo(SkPoint p1, SkPoint p2, SkScalar w);
+ SkPathBuilder& rConicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2, SkScalar w) {
+ return this->rConicTo({x1, y1}, {x2, y2}, w);
+ }
+ SkPathBuilder& rCubicTo(SkPoint pt1, SkPoint pt2, SkPoint pt3);
+ SkPathBuilder& rCubicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2, SkScalar x3, SkScalar y3) {
+ return this->rCubicTo({x1, y1}, {x2, y2}, {x3, y3});
+ }
+
+ // Arcs
+
+ /** Appends arc to the builder. Arc added is part of ellipse
+ bounded by oval, from startAngle through sweepAngle. Both startAngle and
+ sweepAngle are measured in degrees, where zero degrees is aligned with the
+ positive x-axis, and positive sweeps extends arc clockwise.
+
+ arcTo() adds line connecting the builder's last point to initial arc point if forceMoveTo
+ is false and the builder is not empty. Otherwise, added contour begins with first point
+ of arc. Angles greater than -360 and less than 360 are treated modulo 360.
+
+ @param oval bounds of ellipse containing arc
+ @param startAngleDeg starting angle of arc in degrees
+ @param sweepAngleDeg sweep, in degrees. Positive is clockwise; treated modulo 360
+ @param forceMoveTo true to start a new contour with arc
+ @return reference to the builder
+ */
+ SkPathBuilder& arcTo(const SkRect& oval, SkScalar startAngleDeg, SkScalar sweepAngleDeg,
+ bool forceMoveTo);
+
+ /** Appends arc to SkPath, after appending line if needed. Arc is implemented by conic
+ weighted to describe part of circle. Arc is contained by tangent from
+ last SkPath point to p1, and tangent from p1 to p2. Arc
+ is part of circle sized to radius, positioned so it touches both tangent lines.
+
+ If last SkPath SkPoint does not start arc, arcTo() appends connecting line to SkPath.
+ The length of vector from p1 to p2 does not affect arc.
+
+ Arc sweep is always less than 180 degrees. If radius is zero, or if
+ tangents are nearly parallel, arcTo() appends line from last SkPath SkPoint to p1.
+
+ arcTo() appends at most one line and one conic.
+ arcTo() implements the functionality of PostScript arct and HTML Canvas arcTo.
+
+ @param p1 SkPoint common to pair of tangents
+ @param p2 end of second tangent
+ @param radius distance from arc to circle center
+ @return reference to SkPath
+ */
+ SkPathBuilder& arcTo(SkPoint p1, SkPoint p2, SkScalar radius);
+
+ enum ArcSize {
+ kSmall_ArcSize, //!< smaller of arc pair
+ kLarge_ArcSize, //!< larger of arc pair
+ };
+
+ /** Appends arc to SkPath. Arc is implemented by one or more conic weighted to describe
+ part of oval with radii (r.fX, r.fY) rotated by xAxisRotate degrees. Arc curves
+ from last SkPath SkPoint to (xy.fX, xy.fY), choosing one of four possible routes:
+ clockwise or counterclockwise,
+ and smaller or larger.
+
+ Arc sweep is always less than 360 degrees. arcTo() appends line to xy if either
+ radii are zero, or if last SkPath SkPoint equals (xy.fX, xy.fY). arcTo() scales radii r to
+ fit last SkPath SkPoint and xy if both are greater than zero but too small to describe
+ an arc.
+
+ arcTo() appends up to four conic curves.
+ arcTo() implements the functionality of SVG arc, although SVG sweep-flag value is
+ opposite the integer value of sweep; SVG sweep-flag uses 1 for clockwise, while
+ kCW_Direction cast to int is zero.
+
+ @param r radii on axes before x-axis rotation
+ @param xAxisRotate x-axis rotation in degrees; positive values are clockwise
+ @param largeArc chooses smaller or larger arc
+ @param sweep chooses clockwise or counterclockwise arc
+ @param xy end of arc
+ @return reference to SkPath
+ */
+ SkPathBuilder& arcTo(SkPoint r, SkScalar xAxisRotate, ArcSize largeArc, SkPathDirection sweep,
+ SkPoint xy);
+
+ /** Appends arc to the builder, as the start of new contour. Arc added is part of ellipse
+ bounded by oval, from startAngle through sweepAngle. Both startAngle and
+ sweepAngle are measured in degrees, where zero degrees is aligned with the
+ positive x-axis, and positive sweeps extends arc clockwise.
+
+ If sweepAngle <= -360, or sweepAngle >= 360; and startAngle modulo 90 is nearly
+ zero, append oval instead of arc. Otherwise, sweepAngle values are treated
+ modulo 360, and arc may or may not draw depending on numeric rounding.
+
+ @param oval bounds of ellipse containing arc
+ @param startAngleDeg starting angle of arc in degrees
+ @param sweepAngleDeg sweep, in degrees. Positive is clockwise; treated modulo 360
+ @return reference to this builder
+ */
+ SkPathBuilder& addArc(const SkRect& oval, SkScalar startAngleDeg, SkScalar sweepAngleDeg);
+
+ // Add a new contour
+
+ SkPathBuilder& addRect(const SkRect&, SkPathDirection, unsigned startIndex);
+ SkPathBuilder& addOval(const SkRect&, SkPathDirection, unsigned startIndex);
+ SkPathBuilder& addRRect(const SkRRect&, SkPathDirection, unsigned startIndex);
+
+ SkPathBuilder& addRect(const SkRect& rect, SkPathDirection dir = SkPathDirection::kCW) {
+ return this->addRect(rect, dir, 0);
+ }
+ SkPathBuilder& addOval(const SkRect& rect, SkPathDirection dir = SkPathDirection::kCW) {
+ // legacy start index: 1
+ return this->addOval(rect, dir, 1);
+ }
+ SkPathBuilder& addRRect(const SkRRect& rrect, SkPathDirection dir = SkPathDirection::kCW) {
+ // legacy start indices: 6 (CW) and 7 (CCW)
+ return this->addRRect(rrect, dir, dir == SkPathDirection::kCW ? 6 : 7);
+ }
+
+ SkPathBuilder& addCircle(SkScalar center_x, SkScalar center_y, SkScalar radius,
+ SkPathDirection dir = SkPathDirection::kCW);
+
+ SkPathBuilder& addPolygon(const SkPoint pts[], int count, bool isClosed);
+ SkPathBuilder& addPolygon(const std::initializer_list<SkPoint>& list, bool isClosed) {
+ return this->addPolygon(list.begin(), SkToInt(list.size()), isClosed);
+ }
+
+ SkPathBuilder& addPath(const SkPath&);
+
+ // Performance hint, to reserve extra storage for subsequent calls to lineTo, quadTo, etc.
+
+ void incReserve(int extraPtCount, int extraVerbCount);
+ void incReserve(int extraPtCount) {
+ this->incReserve(extraPtCount, extraPtCount);
+ }
+
+ SkPathBuilder& offset(SkScalar dx, SkScalar dy);
+
+ SkPathBuilder& toggleInverseFillType() {
+ fFillType = (SkPathFillType)((unsigned)fFillType ^ 2);
+ return *this;
+ }
+
+private:
+ SkPathRef::PointsArray fPts;
+ SkPathRef::VerbsArray fVerbs;
+ SkPathRef::ConicWeightsArray fConicWeights;
+
+ SkPathFillType fFillType;
+ bool fIsVolatile;
+
+ unsigned fSegmentMask;
+ SkPoint fLastMovePoint;
+ int fLastMoveIndex; // only needed until SkPath is immutable
+ bool fNeedsMoveVerb;
+
+ enum IsA {
+ kIsA_JustMoves, // we only have 0 or more moves
+ kIsA_MoreThanMoves, // we have verbs other than just move
+ kIsA_Oval, // we are 0 or more moves followed by an oval
+ kIsA_RRect, // we are 0 or more moves followed by a rrect
+ };
+ IsA fIsA = kIsA_JustMoves;
+ int fIsAStart = -1; // tracks direction iff fIsA is not unknown
+ bool fIsACCW = false; // tracks direction iff fIsA is not unknown
+
+ int countVerbs() const { return fVerbs.size(); }
+
+ // called right before we add a (non-move) verb
+ void ensureMove() {
+ fIsA = kIsA_MoreThanMoves;
+ if (fNeedsMoveVerb) {
+ this->moveTo(fLastMovePoint);
+ }
+ }
+
+ SkPath make(sk_sp<SkPathRef>) const;
+
+ SkPathBuilder& privateReverseAddPath(const SkPath&);
+
+ friend class SkPathPriv;
+};
+
+#endif
+
diff --git a/gfx/skia/skia/include/core/SkPathEffect.h b/gfx/skia/skia/include/core/SkPathEffect.h
new file mode 100644
index 0000000000..abb370c52a
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPathEffect.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathEffect_DEFINED
+#define SkPathEffect_DEFINED
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkScalar.h"
+// not needed, but some of our clients need it (they don't IWYU)
+#include "include/core/SkPath.h"
+
+class SkPath;
+struct SkRect;
+class SkStrokeRec;
+
+/** \class SkPathEffect
+
+ SkPathEffect is the base class for objects in the SkPaint that affect
+ the geometry of a drawing primitive before it is transformed by the
+ canvas' matrix and drawn.
+
+ Dashing is implemented as a subclass of SkPathEffect.
+*/
+class SK_API SkPathEffect : public SkFlattenable {
+public:
+ /**
+ * Returns a patheffect that apples each effect (first and second) to the original path,
+ * and returns a path with the sum of these.
+ *
+ * result = first(path) + second(path)
+ *
+ */
+ static sk_sp<SkPathEffect> MakeSum(sk_sp<SkPathEffect> first, sk_sp<SkPathEffect> second);
+
+ /**
+ * Returns a patheffect that applies the inner effect to the path, and then applies the
+ * outer effect to the result of the inner's.
+ *
+ * result = outer(inner(path))
+ */
+ static sk_sp<SkPathEffect> MakeCompose(sk_sp<SkPathEffect> outer, sk_sp<SkPathEffect> inner);
+
+ static SkFlattenable::Type GetFlattenableType() {
+ return kSkPathEffect_Type;
+ }
+
+ // move to base?
+
+ enum DashType {
+ kNone_DashType, //!< ignores the info parameter
+ kDash_DashType, //!< fills in all of the info parameter
+ };
+
+ struct DashInfo {
+ DashInfo() : fIntervals(nullptr), fCount(0), fPhase(0) {}
+ DashInfo(SkScalar* intervals, int32_t count, SkScalar phase)
+ : fIntervals(intervals), fCount(count), fPhase(phase) {}
+
+ SkScalar* fIntervals; //!< Length of on/off intervals for dashed lines
+ // Even values represent ons, and odds offs
+ int32_t fCount; //!< Number of intervals in the dash. Should be even number
+ SkScalar fPhase; //!< Offset into the dashed interval pattern
+ // mod the sum of all intervals
+ };
+
+ DashType asADash(DashInfo* info) const;
+
+ /**
+ * Given a src path (input) and a stroke-rec (input and output), apply
+ * this effect to the src path, returning the new path in dst, and return
+ * true. If this effect cannot be applied, return false and ignore dst
+ * and stroke-rec.
+ *
+ * The stroke-rec specifies the initial request for stroking (if any).
+ * The effect can treat this as input only, or it can choose to change
+ * the rec as well. For example, the effect can decide to change the
+ * stroke's width or join, or the effect can change the rec from stroke
+ * to fill (or fill to stroke) in addition to returning a new (dst) path.
+ *
+ * If this method returns true, the caller will apply (as needed) the
+ * resulting stroke-rec to dst and then draw.
+ */
+ bool filterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect* cullR) const;
+
+ /** Version of filterPath that can be called when the CTM is known. */
+ bool filterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect* cullR,
+ const SkMatrix& ctm) const;
+
+ /** True if this path effect requires a valid CTM */
+ bool needsCTM() const;
+
+ static sk_sp<SkPathEffect> Deserialize(const void* data, size_t size,
+ const SkDeserialProcs* procs = nullptr);
+
+private:
+ SkPathEffect() = default;
+ friend class SkPathEffectBase;
+
+ using INHERITED = SkFlattenable;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPathMeasure.h b/gfx/skia/skia/include/core/SkPathMeasure.h
new file mode 100644
index 0000000000..167b18278d
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPathMeasure.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathMeasure_DEFINED
+#define SkPathMeasure_DEFINED
+
+#include "include/core/SkContourMeasure.h"
+#include "include/core/SkPath.h"
+#include "include/private/base/SkTDArray.h"
+
+class SK_API SkPathMeasure {
+public:
+ SkPathMeasure();
+ /** Initialize the pathmeasure with the specified path. The parts of the path that are needed
+ * are copied, so the client is free to modify/delete the path after this call.
+ *
+ * resScale controls the precision of the measure. values > 1 increase the
+ * precision (and possibly slow down the computation).
+ */
+ SkPathMeasure(const SkPath& path, bool forceClosed, SkScalar resScale = 1);
+ ~SkPathMeasure();
+
+ /** Reset the pathmeasure with the specified path. The parts of the path that are needed
+ * are copied, so the client is free to modify/delete the path after this call..
+ */
+ void setPath(const SkPath*, bool forceClosed);
+
+ /** Return the total length of the current contour, or 0 if no path
+ is associated (e.g. resetPath(null))
+ */
+ SkScalar getLength();
+
+ /** Pins distance to 0 <= distance <= getLength(), and then computes
+ the corresponding position and tangent.
+ Returns false if there is no path, or a zero-length path was specified, in which case
+ position and tangent are unchanged.
+ */
+ bool SK_WARN_UNUSED_RESULT getPosTan(SkScalar distance, SkPoint* position,
+ SkVector* tangent);
+
+ enum MatrixFlags {
+ kGetPosition_MatrixFlag = 0x01,
+ kGetTangent_MatrixFlag = 0x02,
+ kGetPosAndTan_MatrixFlag = kGetPosition_MatrixFlag | kGetTangent_MatrixFlag
+ };
+
+ /** Pins distance to 0 <= distance <= getLength(), and then computes
+ the corresponding matrix (by calling getPosTan).
+ Returns false if there is no path, or a zero-length path was specified, in which case
+ matrix is unchanged.
+ */
+ bool SK_WARN_UNUSED_RESULT getMatrix(SkScalar distance, SkMatrix* matrix,
+ MatrixFlags flags = kGetPosAndTan_MatrixFlag);
+
+ /** Given a start and stop distance, return in dst the intervening segment(s).
+ If the segment is zero-length, return false, else return true.
+ startD and stopD are pinned to legal values (0..getLength()). If startD > stopD
+ then return false (and leave dst untouched).
+ Begin the segment with a moveTo if startWithMoveTo is true
+ */
+ bool getSegment(SkScalar startD, SkScalar stopD, SkPath* dst, bool startWithMoveTo);
+
+ /** Return true if the current contour is closed()
+ */
+ bool isClosed();
+
+ /** Move to the next contour in the path. Return true if one exists, or false if
+ we're done with the path.
+ */
+ bool nextContour();
+
+#ifdef SK_DEBUG
+ void dump();
+#endif
+
+private:
+ SkContourMeasureIter fIter;
+ sk_sp<SkContourMeasure> fContour;
+
+ SkPathMeasure(const SkPathMeasure&) = delete;
+ SkPathMeasure& operator=(const SkPathMeasure&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPathTypes.h b/gfx/skia/skia/include/core/SkPathTypes.h
new file mode 100644
index 0000000000..963a6bda00
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPathTypes.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2019 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathTypes_DEFINED
+#define SkPathTypes_DEFINED
+
+enum class SkPathFillType {
+ /** Specifies that "inside" is computed by a non-zero sum of signed edge crossings */
+ kWinding,
+ /** Specifies that "inside" is computed by an odd number of edge crossings */
+ kEvenOdd,
+ /** Same as Winding, but draws outside of the path, rather than inside */
+ kInverseWinding,
+ /** Same as EvenOdd, but draws outside of the path, rather than inside */
+ kInverseEvenOdd
+};
+
+static inline bool SkPathFillType_IsEvenOdd(SkPathFillType ft) {
+ return (static_cast<int>(ft) & 1) != 0;
+}
+
+static inline bool SkPathFillType_IsInverse(SkPathFillType ft) {
+ return (static_cast<int>(ft) & 2) != 0;
+}
+
+static inline SkPathFillType SkPathFillType_ConvertToNonInverse(SkPathFillType ft) {
+ return static_cast<SkPathFillType>(static_cast<int>(ft) & 1);
+}
+
+enum class SkPathDirection {
+ /** clockwise direction for adding closed contours */
+ kCW,
+ /** counter-clockwise direction for adding closed contours */
+ kCCW,
+};
+
+enum SkPathSegmentMask {
+ kLine_SkPathSegmentMask = 1 << 0,
+ kQuad_SkPathSegmentMask = 1 << 1,
+ kConic_SkPathSegmentMask = 1 << 2,
+ kCubic_SkPathSegmentMask = 1 << 3,
+};
+
+enum class SkPathVerb {
+ kMove, //!< SkPath::RawIter returns 1 point
+ kLine, //!< SkPath::RawIter returns 2 points
+ kQuad, //!< SkPath::RawIter returns 3 points
+ kConic, //!< SkPath::RawIter returns 3 points + 1 weight
+ kCubic, //!< SkPath::RawIter returns 4 points
+ kClose //!< SkPath::RawIter returns 0 points
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPathUtils.h b/gfx/skia/skia/include/core/SkPathUtils.h
new file mode 100644
index 0000000000..6285da7996
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPathUtils.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathUtils_DEFINED
+#define SkPathUtils_DEFINED
+
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+class SkMatrix;
+class SkPaint;
+class SkPath;
+struct SkRect;
+
+namespace skpathutils {
+
+/** Returns the filled equivalent of the stroked path.
+
+ @param src SkPath read to create a filled version
+ @param paint SkPaint, from which attributes such as stroke cap, width, miter, and join,
+ as well as pathEffect will be used.
+ @param dst resulting SkPath; may be the same as src, but may not be nullptr
+ @param cullRect optional limit passed to SkPathEffect
+ @param resScale if > 1, increase precision, else if (0 < resScale < 1) reduce precision
+ to favor speed and size
+ @return true if the dst path was updated, false if it was not (e.g. if the path
+ represents hairline and cannot be filled).
+*/
+SK_API bool FillPathWithPaint(const SkPath &src, const SkPaint &paint, SkPath *dst,
+ const SkRect *cullRect, SkScalar resScale = 1);
+
+SK_API bool FillPathWithPaint(const SkPath &src, const SkPaint &paint, SkPath *dst,
+ const SkRect *cullRect, const SkMatrix &ctm);
+
+SK_API bool FillPathWithPaint(const SkPath &src, const SkPaint &paint, SkPath *dst);
+
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPicture.h b/gfx/skia/skia/include/core/SkPicture.h
new file mode 100644
index 0000000000..bb384dfab1
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPicture.h
@@ -0,0 +1,278 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPicture_DEFINED
+#define SkPicture_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkTileMode.h"
+#include "include/core/SkTypes.h"
+
+class SkCanvas;
+class SkData;
+struct SkDeserialProcs;
+class SkImage;
+class SkMatrix;
+struct SkSerialProcs;
+class SkStream;
+class SkWStream;
+
+/** \class SkPicture
+ SkPicture records drawing commands made to SkCanvas. The command stream may be
+ played in whole or in part at a later time.
+
+ SkPicture is an abstract class. SkPicture may be generated by SkPictureRecorder
+ or SkDrawable, or from SkPicture previously saved to SkData or SkStream.
+
+ SkPicture may contain any SkCanvas drawing command, as well as one or more
+ SkCanvas matrix or SkCanvas clip. SkPicture has a cull SkRect, which is used as
+ a bounding box hint. To limit SkPicture bounds, use SkCanvas clip when
+ recording or drawing SkPicture.
+*/
+class SK_API SkPicture : public SkRefCnt {
+public:
+ ~SkPicture() override;
+
+ /** Recreates SkPicture that was serialized into a stream. Returns constructed SkPicture
+ if successful; otherwise, returns nullptr. Fails if data does not permit
+ constructing valid SkPicture.
+
+ procs->fPictureProc permits supplying a custom function to decode SkPicture.
+ If procs->fPictureProc is nullptr, default decoding is used. procs->fPictureCtx
+ may be used to provide user context to procs->fPictureProc; procs->fPictureProc
+ is called with a pointer to data, data byte length, and user context.
+
+ @param stream container for serial data
+ @param procs custom serial data decoders; may be nullptr
+ @return SkPicture constructed from stream data
+ */
+ static sk_sp<SkPicture> MakeFromStream(SkStream* stream,
+ const SkDeserialProcs* procs = nullptr);
+
+ /** Recreates SkPicture that was serialized into data. Returns constructed SkPicture
+ if successful; otherwise, returns nullptr. Fails if data does not permit
+ constructing valid SkPicture.
+
+ procs->fPictureProc permits supplying a custom function to decode SkPicture.
+ If procs->fPictureProc is nullptr, default decoding is used. procs->fPictureCtx
+ may be used to provide user context to procs->fPictureProc; procs->fPictureProc
+ is called with a pointer to data, data byte length, and user context.
+
+ @param data container for serial data
+ @param procs custom serial data decoders; may be nullptr
+ @return SkPicture constructed from data
+ */
+ static sk_sp<SkPicture> MakeFromData(const SkData* data,
+ const SkDeserialProcs* procs = nullptr);
+
+ /**
+
+ @param data pointer to serial data
+ @param size size of data
+ @param procs custom serial data decoders; may be nullptr
+ @return SkPicture constructed from data
+ */
+ static sk_sp<SkPicture> MakeFromData(const void* data, size_t size,
+ const SkDeserialProcs* procs = nullptr);
+
+ /** \class SkPicture::AbortCallback
+ AbortCallback is an abstract class. An implementation of AbortCallback may
+ passed as a parameter to SkPicture::playback, to stop it before all drawing
+ commands have been processed.
+
+ If AbortCallback::abort returns true, SkPicture::playback is interrupted.
+ */
+ class SK_API AbortCallback {
+ public:
+ /** Has no effect.
+ */
+ virtual ~AbortCallback() = default;
+
+ /** Stops SkPicture playback when some condition is met. A subclass of
+ AbortCallback provides an override for abort() that can stop SkPicture::playback.
+
+ The part of SkPicture drawn when aborted is undefined. SkPicture instantiations are
+ free to stop drawing at different points during playback.
+
+ If the abort happens inside one or more calls to SkCanvas::save(), stack
+ of SkCanvas matrix and SkCanvas clip values is restored to its state before
+ SkPicture::playback was called.
+
+ @return true to stop playback
+
+ example: https://fiddle.skia.org/c/@Picture_AbortCallback_abort
+ */
+ virtual bool abort() = 0;
+
+ protected:
+ AbortCallback() = default;
+ AbortCallback(const AbortCallback&) = delete;
+ AbortCallback& operator=(const AbortCallback&) = delete;
+ };
+
+ /** Replays the drawing commands on the specified canvas. In the case that the
+ commands are recorded, each command in the SkPicture is sent separately to canvas.
+
+ To add a single command to draw SkPicture to recording canvas, call
+ SkCanvas::drawPicture instead.
+
+ @param canvas receiver of drawing commands
+ @param callback allows interruption of playback
+
+ example: https://fiddle.skia.org/c/@Picture_playback
+ */
+ virtual void playback(SkCanvas* canvas, AbortCallback* callback = nullptr) const = 0;
+
+ /** Returns cull SkRect for this picture, passed in when SkPicture was created.
+ Returned SkRect does not specify clipping SkRect for SkPicture; cull is hint
+ of SkPicture bounds.
+
+ SkPicture is free to discard recorded drawing commands that fall outside
+ cull.
+
+ @return bounds passed when SkPicture was created
+
+ example: https://fiddle.skia.org/c/@Picture_cullRect
+ */
+ virtual SkRect cullRect() const = 0;
+
+ /** Returns a non-zero value unique among SkPicture in Skia process.
+
+ @return identifier for SkPicture
+ */
+ uint32_t uniqueID() const { return fUniqueID; }
+
+ /** Returns storage containing SkData describing SkPicture, using optional custom
+ encoders.
+
+ procs->fPictureProc permits supplying a custom function to encode SkPicture.
+ If procs->fPictureProc is nullptr, default encoding is used. procs->fPictureCtx
+ may be used to provide user context to procs->fPictureProc; procs->fPictureProc
+ is called with a pointer to SkPicture and user context.
+
+ @param procs custom serial data encoders; may be nullptr
+ @return storage containing serialized SkPicture
+
+ example: https://fiddle.skia.org/c/@Picture_serialize
+ */
+ sk_sp<SkData> serialize(const SkSerialProcs* procs = nullptr) const;
+
+ /** Writes picture to stream, using optional custom encoders.
+
+ procs->fPictureProc permits supplying a custom function to encode SkPicture.
+ If procs->fPictureProc is nullptr, default encoding is used. procs->fPictureCtx
+ may be used to provide user context to procs->fPictureProc; procs->fPictureProc
+ is called with a pointer to SkPicture and user context.
+
+ @param stream writable serial data stream
+ @param procs custom serial data encoders; may be nullptr
+
+ example: https://fiddle.skia.org/c/@Picture_serialize_2
+ */
+ void serialize(SkWStream* stream, const SkSerialProcs* procs = nullptr) const;
+
+ /** Returns a placeholder SkPicture. Result does not draw, and contains only
+ cull SkRect, a hint of its bounds. Result is immutable; it cannot be changed
+ later. Result identifier is unique.
+
+ Returned placeholder can be intercepted during playback to insert other
+ commands into SkCanvas draw stream.
+
+ @param cull placeholder dimensions
+ @return placeholder with unique identifier
+
+ example: https://fiddle.skia.org/c/@Picture_MakePlaceholder
+ */
+ static sk_sp<SkPicture> MakePlaceholder(SkRect cull);
+
+ /** Returns the approximate number of operations in SkPicture. Returned value
+ may be greater or less than the number of SkCanvas calls
+ recorded: some calls may be recorded as more than one operation, other
+ calls may be optimized away.
+
+ @param nested if true, include the op-counts of nested pictures as well, else
+ just return count the ops in the top-level picture.
+ @return approximate operation count
+
+ example: https://fiddle.skia.org/c/@Picture_approximateOpCount
+ */
+ virtual int approximateOpCount(bool nested = false) const = 0;
+
+ /** Returns the approximate byte size of SkPicture. Does not include large objects
+ referenced by SkPicture.
+
+ @return approximate size
+
+ example: https://fiddle.skia.org/c/@Picture_approximateBytesUsed
+ */
+ virtual size_t approximateBytesUsed() const = 0;
+
+ /** Return a new shader that will draw with this picture.
+ *
+ * @param tmx The tiling mode to use when sampling in the x-direction.
+ * @param tmy The tiling mode to use when sampling in the y-direction.
+ * @param mode How to filter the tiles
+ * @param localMatrix Optional matrix used when sampling
+ * @param tile The tile rectangle in picture coordinates: this represents the subset
+ * (or superset) of the picture used when building a tile. It is not
+ * affected by localMatrix and does not imply scaling (only translation
+ * and cropping). If null, the tile rect is considered equal to the picture
+ * bounds.
+ * @return Returns a new shader object. Note: this function never returns null.
+ */
+ sk_sp<SkShader> makeShader(SkTileMode tmx, SkTileMode tmy, SkFilterMode mode,
+ const SkMatrix* localMatrix, const SkRect* tileRect) const;
+
+ sk_sp<SkShader> makeShader(SkTileMode tmx, SkTileMode tmy, SkFilterMode mode) const {
+ return this->makeShader(tmx, tmy, mode, nullptr, nullptr);
+ }
+
+private:
+ // Allowed subclasses.
+ SkPicture();
+ friend class SkBigPicture;
+ friend class SkEmptyPicture;
+ friend class SkPicturePriv;
+
+ void serialize(SkWStream*, const SkSerialProcs*, class SkRefCntSet* typefaces,
+ bool textBlobsOnly=false) const;
+ static sk_sp<SkPicture> MakeFromStreamPriv(SkStream*, const SkDeserialProcs*,
+ class SkTypefacePlayback*,
+ int recursionLimit);
+ friend class SkPictureData;
+
+ /** Return true if the SkStream/Buffer represents a serialized picture, and
+ fills out SkPictInfo. After this function returns, the data source is not
+ rewound so it will have to be manually reset before passing to
+ MakeFromStream or MakeFromBuffer. Note, MakeFromStream and
+ MakeFromBuffer perform this check internally so these entry points are
+ intended for stand alone tools.
+ If false is returned, SkPictInfo is unmodified.
+ */
+ static bool StreamIsSKP(SkStream*, struct SkPictInfo*);
+ static bool BufferIsSKP(class SkReadBuffer*, struct SkPictInfo*);
+ friend bool SkPicture_StreamIsSKP(SkStream*, struct SkPictInfo*);
+
+ // Returns NULL if this is not an SkBigPicture.
+ virtual const class SkBigPicture* asSkBigPicture() const { return nullptr; }
+
+ static bool IsValidPictInfo(const struct SkPictInfo& info);
+ static sk_sp<SkPicture> Forwardport(const struct SkPictInfo&,
+ const class SkPictureData*,
+ class SkReadBuffer* buffer);
+
+ struct SkPictInfo createHeader() const;
+ class SkPictureData* backport() const;
+
+ uint32_t fUniqueID;
+ mutable std::atomic<bool> fAddedToCache{false};
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPictureRecorder.h b/gfx/skia/skia/include/core/SkPictureRecorder.h
new file mode 100644
index 0000000000..d91d105000
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPictureRecorder.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPictureRecorder_DEFINED
+#define SkPictureRecorder_DEFINED
+
+#include "include/core/SkBBHFactory.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkRefCnt.h"
+
+#include <memory>
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+namespace android {
+ class Picture;
+};
+#endif
+
+class SkCanvas;
+class SkDrawable;
+class SkPictureRecord;
+class SkRecord;
+class SkRecorder;
+
+class SK_API SkPictureRecorder {
+public:
+ SkPictureRecorder();
+ ~SkPictureRecorder();
+
+ enum FinishFlags {
+ };
+
+ /** Returns the canvas that records the drawing commands.
+ @param bounds the cull rect used when recording this picture. Any drawing the falls outside
+ of this rect is undefined, and may be drawn or it may not.
+ @param bbh optional acceleration structure
+ @param recordFlags optional flags that control recording.
+ @return the canvas.
+ */
+ SkCanvas* beginRecording(const SkRect& bounds, sk_sp<SkBBoxHierarchy> bbh);
+
+ SkCanvas* beginRecording(const SkRect& bounds, SkBBHFactory* bbhFactory = nullptr);
+
+ SkCanvas* beginRecording(SkScalar width, SkScalar height,
+ SkBBHFactory* bbhFactory = nullptr) {
+ return this->beginRecording(SkRect::MakeWH(width, height), bbhFactory);
+ }
+
+ /** Returns the recording canvas if one is active, or NULL if recording is
+ not active. This does not alter the refcnt on the canvas (if present).
+ */
+ SkCanvas* getRecordingCanvas();
+
+ /**
+ * Signal that the caller is done recording. This invalidates the canvas returned by
+ * beginRecording/getRecordingCanvas. Ownership of the object is passed to the caller, who
+ * must call unref() when they are done using it.
+ *
+ * The returned picture is immutable. If during recording drawables were added to the canvas,
+ * these will have been "drawn" into a recording canvas, so that this resulting picture will
+ * reflect their current state, but will not contain a live reference to the drawables
+ * themselves.
+ */
+ sk_sp<SkPicture> finishRecordingAsPicture();
+
+ /**
+ * Signal that the caller is done recording, and update the cull rect to use for bounding
+ * box hierarchy (BBH) generation. The behavior is the same as calling
+ * finishRecordingAsPicture(), except that this method updates the cull rect initially passed
+ * into beginRecording.
+ * @param cullRect the new culling rectangle to use as the overall bound for BBH generation
+ * and subsequent culling operations.
+ * @return the picture containing the recorded content.
+ */
+ sk_sp<SkPicture> finishRecordingAsPictureWithCull(const SkRect& cullRect);
+
+ /**
+ * Signal that the caller is done recording. This invalidates the canvas returned by
+ * beginRecording/getRecordingCanvas. Ownership of the object is passed to the caller, who
+ * must call unref() when they are done using it.
+ *
+ * Unlike finishRecordingAsPicture(), which returns an immutable picture, the returned drawable
+ * may contain live references to other drawables (if they were added to the recording canvas)
+ * and therefore this drawable will reflect the current state of those nested drawables anytime
+ * it is drawn or a new picture is snapped from it (by calling drawable->newPictureSnapshot()).
+ */
+ sk_sp<SkDrawable> finishRecordingAsDrawable();
+
+private:
+ void reset();
+
+ /** Replay the current (partially recorded) operation stream into
+ canvas. This call doesn't close the current recording.
+ */
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ friend class android::Picture;
+#endif
+ friend class SkPictureRecorderReplayTester; // for unit testing
+ void partialReplay(SkCanvas* canvas) const;
+
+ bool fActivelyRecording;
+ SkRect fCullRect;
+ sk_sp<SkBBoxHierarchy> fBBH;
+ std::unique_ptr<SkRecorder> fRecorder;
+ sk_sp<SkRecord> fRecord;
+
+ SkPictureRecorder(SkPictureRecorder&&) = delete;
+ SkPictureRecorder& operator=(SkPictureRecorder&&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPixelRef.h b/gfx/skia/skia/include/core/SkPixelRef.h
new file mode 100644
index 0000000000..5d99821d72
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPixelRef.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPixelRef_DEFINED
+#define SkPixelRef_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSize.h"
+#include "include/private/SkIDChangeListener.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkTDArray.h"
+
+#include <atomic>
+
+struct SkIRect;
+
+class GrTexture;
+class SkDiscardableMemory;
+
+/** \class SkPixelRef
+
+ This class is the smart container for pixel memory, and is used with SkBitmap.
+ This class can be shared/accessed between multiple threads.
+*/
+class SK_API SkPixelRef : public SkRefCnt {
+public:
+ SkPixelRef(int width, int height, void* addr, size_t rowBytes);
+ ~SkPixelRef() override;
+
+ SkISize dimensions() const { return {fWidth, fHeight}; }
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+ void* pixels() const { return fPixels; }
+ size_t rowBytes() const { return fRowBytes; }
+
+ /** Returns a non-zero, unique value corresponding to the pixels in this
+ pixelref. Each time the pixels are changed (and notifyPixelsChanged is
+ called), a different generation ID will be returned.
+ */
+ uint32_t getGenerationID() const;
+
+ /**
+ * Call this if you have changed the contents of the pixels. This will in-
+ * turn cause a different generation ID value to be returned from
+ * getGenerationID().
+ */
+ void notifyPixelsChanged();
+
+ /** Returns true if this pixelref is marked as immutable, meaning that the
+ contents of its pixels will not change for the lifetime of the pixelref.
+ */
+ bool isImmutable() const { return fMutability != kMutable; }
+
+ /** Marks this pixelref is immutable, meaning that the contents of its
+ pixels will not change for the lifetime of the pixelref. This state can
+ be set on a pixelref, but it cannot be cleared once it is set.
+ */
+ void setImmutable();
+
+ // Register a listener that may be called the next time our generation ID changes.
+ //
+ // We'll only call the listener if we're confident that we are the only SkPixelRef with this
+ // generation ID. If our generation ID changes and we decide not to call the listener, we'll
+ // never call it: you must add a new listener for each generation ID change. We also won't call
+ // the listener when we're certain no one knows what our generation ID is.
+ //
+ // This can be used to invalidate caches keyed by SkPixelRef generation ID.
+ // Takes ownership of listener. Threadsafe.
+ void addGenIDChangeListener(sk_sp<SkIDChangeListener> listener);
+
+ // Call when this pixelref is part of the key to a resourcecache entry. This allows the cache
+ // to know automatically those entries can be purged when this pixelref is changed or deleted.
+ void notifyAddedToCache() {
+ fAddedToCache.store(true);
+ }
+
+ virtual SkDiscardableMemory* diagnostic_only_getDiscardable() const { return nullptr; }
+
+protected:
+ void android_only_reset(int width, int height, size_t rowBytes);
+
+private:
+ int fWidth;
+ int fHeight;
+ void* fPixels;
+ size_t fRowBytes;
+
+ // Bottom bit indicates the Gen ID is unique.
+ bool genIDIsUnique() const { return SkToBool(fTaggedGenID.load() & 1); }
+ mutable std::atomic<uint32_t> fTaggedGenID;
+
+ SkIDChangeListener::List fGenIDChangeListeners;
+
+ // Set true by caches when they cache content that's derived from the current pixels.
+ std::atomic<bool> fAddedToCache;
+
+ enum Mutability {
+ kMutable, // PixelRefs begin mutable.
+ kTemporarilyImmutable, // Considered immutable, but can revert to mutable.
+ kImmutable, // Once set to this state, it never leaves.
+ } fMutability : 8; // easily fits inside a byte
+
+ void needsNewGenID();
+ void callGenIDChangeListeners();
+
+ void setTemporarilyImmutable();
+ void restoreMutability();
+ friend class SkSurface_Raster; // For temporary immutable methods above.
+
+ void setImmutableWithID(uint32_t genID);
+ friend void SkBitmapCache_setImmutableWithID(SkPixelRef*, uint32_t);
+
+ using INHERITED = SkRefCnt;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPixmap.h b/gfx/skia/skia/include/core/SkPixmap.h
new file mode 100644
index 0000000000..e9e6de9ae6
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPixmap.h
@@ -0,0 +1,748 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPixmap_DEFINED
+#define SkPixmap_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkSize.h"
+#include "include/private/base/SkAPI.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkAttributes.h"
+
+#include <cstddef>
+#include <cstdint>
+
+class SkColorSpace;
+enum SkAlphaType : int;
+struct SkMask;
+
+/** \class SkPixmap
+ SkPixmap provides a utility to pair SkImageInfo with pixels and row bytes.
+ SkPixmap is a low level class which provides convenience functions to access
+ raster destinations. SkCanvas can not draw SkPixmap, nor does SkPixmap provide
+ a direct drawing destination.
+
+ Use SkBitmap to draw pixels referenced by SkPixmap; use SkSurface to draw into
+ pixels referenced by SkPixmap.
+
+ SkPixmap does not try to manage the lifetime of the pixel memory. Use SkPixelRef
+ to manage pixel memory; SkPixelRef is safe across threads.
+*/
+class SK_API SkPixmap {
+public:
+
+ /** Creates an empty SkPixmap without pixels, with kUnknown_SkColorType, with
+ kUnknown_SkAlphaType, and with a width and height of zero. Use
+ reset() to associate pixels, SkColorType, SkAlphaType, width, and height
+ after SkPixmap has been created.
+
+ @return empty SkPixmap
+ */
+ SkPixmap()
+ : fPixels(nullptr), fRowBytes(0), fInfo(SkImageInfo::MakeUnknown(0, 0))
+ {}
+
+ /** Creates SkPixmap from info width, height, SkAlphaType, and SkColorType.
+ addr points to pixels, or nullptr. rowBytes should be info.width() times
+ info.bytesPerPixel(), or larger.
+
+ No parameter checking is performed; it is up to the caller to ensure that
+ addr and rowBytes agree with info.
+
+ The memory lifetime of pixels is managed by the caller. When SkPixmap goes
+ out of scope, addr is unaffected.
+
+ SkPixmap may be later modified by reset() to change its size, pixel type, or
+ storage.
+
+ @param info width, height, SkAlphaType, SkColorType of SkImageInfo
+ @param addr pointer to pixels allocated by caller; may be nullptr
+ @param rowBytes size of one row of addr; width times pixel size, or larger
+ @return initialized SkPixmap
+ */
+ SkPixmap(const SkImageInfo& info, const void* addr, size_t rowBytes)
+ : fPixels(addr), fRowBytes(rowBytes), fInfo(info)
+ {}
+
+ /** Sets width, height, row bytes to zero; pixel address to nullptr; SkColorType to
+ kUnknown_SkColorType; and SkAlphaType to kUnknown_SkAlphaType.
+
+ The prior pixels are unaffected; it is up to the caller to release pixels
+ memory if desired.
+
+ example: https://fiddle.skia.org/c/@Pixmap_reset
+ */
+ void reset();
+
+ /** Sets width, height, SkAlphaType, and SkColorType from info.
+ Sets pixel address from addr, which may be nullptr.
+ Sets row bytes from rowBytes, which should be info.width() times
+ info.bytesPerPixel(), or larger.
+
+ Does not check addr. Asserts if built with SK_DEBUG defined and if rowBytes is
+ too small to hold one row of pixels.
+
+ The memory lifetime pixels are managed by the caller. When SkPixmap goes
+ out of scope, addr is unaffected.
+
+ @param info width, height, SkAlphaType, SkColorType of SkImageInfo
+ @param addr pointer to pixels allocated by caller; may be nullptr
+ @param rowBytes size of one row of addr; width times pixel size, or larger
+
+ example: https://fiddle.skia.org/c/@Pixmap_reset_2
+ */
+ void reset(const SkImageInfo& info, const void* addr, size_t rowBytes);
+
+ /** Changes SkColorSpace in SkImageInfo; preserves width, height, SkAlphaType, and
+ SkColorType in SkImage, and leaves pixel address and row bytes unchanged.
+ SkColorSpace reference count is incremented.
+
+ @param colorSpace SkColorSpace moved to SkImageInfo
+
+ example: https://fiddle.skia.org/c/@Pixmap_setColorSpace
+ */
+ void setColorSpace(sk_sp<SkColorSpace> colorSpace);
+
+ /** Deprecated.
+ */
+ bool SK_WARN_UNUSED_RESULT reset(const SkMask& mask);
+
+ /** Sets subset width, height, pixel address to intersection of SkPixmap with area,
+ if intersection is not empty; and return true. Otherwise, leave subset unchanged
+ and return false.
+
+ Failing to read the return value generates a compile time warning.
+
+ @param subset storage for width, height, pixel address of intersection
+ @param area bounds to intersect with SkPixmap
+ @return true if intersection of SkPixmap and area is not empty
+ */
+ bool SK_WARN_UNUSED_RESULT extractSubset(SkPixmap* subset, const SkIRect& area) const;
+
+ /** Returns width, height, SkAlphaType, SkColorType, and SkColorSpace.
+
+ @return reference to SkImageInfo
+ */
+ const SkImageInfo& info() const { return fInfo; }
+
+ /** Returns row bytes, the interval from one pixel row to the next. Row bytes
+ is at least as large as: width() * info().bytesPerPixel().
+
+ Returns zero if colorType() is kUnknown_SkColorType.
+ It is up to the SkBitmap creator to ensure that row bytes is a useful value.
+
+ @return byte length of pixel row
+ */
+ size_t rowBytes() const { return fRowBytes; }
+
+ /** Returns pixel address, the base address corresponding to the pixel origin.
+
+ It is up to the SkPixmap creator to ensure that pixel address is a useful value.
+
+ @return pixel address
+ */
+ const void* addr() const { return fPixels; }
+
+ /** Returns pixel count in each pixel row. Should be equal or less than:
+ rowBytes() / info().bytesPerPixel().
+
+ @return pixel width in SkImageInfo
+ */
+ int width() const { return fInfo.width(); }
+
+ /** Returns pixel row count.
+
+ @return pixel height in SkImageInfo
+ */
+ int height() const { return fInfo.height(); }
+
+ /**
+ * Return the dimensions of the pixmap (from its ImageInfo)
+ */
+ SkISize dimensions() const { return fInfo.dimensions(); }
+
+ SkColorType colorType() const { return fInfo.colorType(); }
+
+ SkAlphaType alphaType() const { return fInfo.alphaType(); }
+
+ /** Returns SkColorSpace, the range of colors, associated with SkImageInfo. The
+ reference count of SkColorSpace is unchanged. The returned SkColorSpace is
+ immutable.
+
+ @return SkColorSpace in SkImageInfo, or nullptr
+ */
+ SkColorSpace* colorSpace() const;
+
+ /** Returns smart pointer to SkColorSpace, the range of colors, associated with
+ SkImageInfo. The smart pointer tracks the number of objects sharing this
+ SkColorSpace reference so the memory is released when the owners destruct.
+
+ The returned SkColorSpace is immutable.
+
+ @return SkColorSpace in SkImageInfo wrapped in a smart pointer
+ */
+ sk_sp<SkColorSpace> refColorSpace() const;
+
+ /** Returns true if SkAlphaType is kOpaque_SkAlphaType.
+ Does not check if SkColorType allows alpha, or if any pixel value has
+ transparency.
+
+ @return true if SkImageInfo has opaque SkAlphaType
+ */
+ bool isOpaque() const { return fInfo.isOpaque(); }
+
+ /** Returns SkIRect { 0, 0, width(), height() }.
+
+ @return integral rectangle from origin to width() and height()
+ */
+ SkIRect bounds() const { return SkIRect::MakeWH(this->width(), this->height()); }
+
+ /** Returns number of pixels that fit on row. Should be greater than or equal to
+ width().
+
+ @return maximum pixels per row
+ */
+ int rowBytesAsPixels() const { return int(fRowBytes >> this->shiftPerPixel()); }
+
+ /** Returns bit shift converting row bytes to row pixels.
+ Returns zero for kUnknown_SkColorType.
+
+ @return one of: 0, 1, 2, 3; left shift to convert pixels to bytes
+ */
+ int shiftPerPixel() const { return fInfo.shiftPerPixel(); }
+
+ /** Returns minimum memory required for pixel storage.
+ Does not include unused memory on last row when rowBytesAsPixels() exceeds width().
+ Returns SIZE_MAX if result does not fit in size_t.
+ Returns zero if height() or width() is 0.
+ Returns height() times rowBytes() if colorType() is kUnknown_SkColorType.
+
+ @return size in bytes of image buffer
+ */
+ size_t computeByteSize() const { return fInfo.computeByteSize(fRowBytes); }
+
+ /** Returns true if all pixels are opaque. SkColorType determines how pixels
+ are encoded, and whether pixel describes alpha. Returns true for SkColorType
+ without alpha in each pixel; for other SkColorType, returns true if all
+ pixels have alpha values equivalent to 1.0 or greater.
+
+ For SkColorType kRGB_565_SkColorType or kGray_8_SkColorType: always
+ returns true. For SkColorType kAlpha_8_SkColorType, kBGRA_8888_SkColorType,
+ kRGBA_8888_SkColorType: returns true if all pixel alpha values are 255.
+ For SkColorType kARGB_4444_SkColorType: returns true if all pixel alpha values are 15.
+ For kRGBA_F16_SkColorType: returns true if all pixel alpha values are 1.0 or
+ greater.
+
+ Returns false for kUnknown_SkColorType.
+
+ @return true if all pixels have opaque values or SkColorType is opaque
+
+ example: https://fiddle.skia.org/c/@Pixmap_computeIsOpaque
+ */
+ bool computeIsOpaque() const;
+
+ /** Returns pixel at (x, y) as unpremultiplied color.
+ Returns black with alpha if SkColorType is kAlpha_8_SkColorType.
+
+ Input is not validated: out of bounds values of x or y trigger an assert() if
+ built with SK_DEBUG defined; and returns undefined values or may crash if
+ SK_RELEASE is defined. Fails if SkColorType is kUnknown_SkColorType or
+ pixel address is nullptr.
+
+ SkColorSpace in SkImageInfo is ignored. Some color precision may be lost in the
+ conversion to unpremultiplied color; original pixel data may have additional
+ precision.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return pixel converted to unpremultiplied color
+
+ example: https://fiddle.skia.org/c/@Pixmap_getColor
+ */
+ SkColor getColor(int x, int y) const;
+
+ /** Returns pixel at (x, y) as unpremultiplied color as an SkColor4f.
+ Returns black with alpha if SkColorType is kAlpha_8_SkColorType.
+
+ Input is not validated: out of bounds values of x or y trigger an assert() if
+ built with SK_DEBUG defined; and returns undefined values or may crash if
+ SK_RELEASE is defined. Fails if SkColorType is kUnknown_SkColorType or
+ pixel address is nullptr.
+
+ SkColorSpace in SkImageInfo is ignored. Some color precision may be lost in the
+ conversion to unpremultiplied color; original pixel data may have additional
+ precision, though this is less likely than for getColor(). Rounding errors may
+ occur if the underlying type has lower precision.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return pixel converted to unpremultiplied float color
+ */
+ SkColor4f getColor4f(int x, int y) const;
+
+ /** Look up the pixel at (x,y) and return its alpha component, normalized to [0..1].
+ This is roughly equivalent to SkGetColorA(getColor()), but can be more efficent
+ (and more precise if the pixels store more than 8 bits per component).
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return alpha converted to normalized float
+ */
+ float getAlphaf(int x, int y) const;
+
+ /** Returns readable pixel address at (x, y). Returns nullptr if SkPixelRef is nullptr.
+
+ Input is not validated: out of bounds values of x or y trigger an assert() if
+ built with SK_DEBUG defined. Returns nullptr if SkColorType is kUnknown_SkColorType.
+
+ Performs a lookup of pixel size; for better performance, call
+ one of: addr8, addr16, addr32, addr64, or addrF16().
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return readable generic pointer to pixel
+ */
+ const void* addr(int x, int y) const {
+ return (const char*)fPixels + fInfo.computeOffset(x, y, fRowBytes);
+ }
+
+ /** Returns readable base pixel address. Result is addressable as unsigned 8-bit bytes.
+ Will trigger an assert() if SkColorType is not kAlpha_8_SkColorType or
+ kGray_8_SkColorType, and is built with SK_DEBUG defined.
+
+ One byte corresponds to one pixel.
+
+ @return readable unsigned 8-bit pointer to pixels
+ */
+ const uint8_t* addr8() const {
+ SkASSERT(1 == fInfo.bytesPerPixel());
+ return reinterpret_cast<const uint8_t*>(fPixels);
+ }
+
+ /** Returns readable base pixel address. Result is addressable as unsigned 16-bit words.
+ Will trigger an assert() if SkColorType is not kRGB_565_SkColorType or
+ kARGB_4444_SkColorType, and is built with SK_DEBUG defined.
+
+ One word corresponds to one pixel.
+
+ @return readable unsigned 16-bit pointer to pixels
+ */
+ const uint16_t* addr16() const {
+ SkASSERT(2 == fInfo.bytesPerPixel());
+ return reinterpret_cast<const uint16_t*>(fPixels);
+ }
+
+ /** Returns readable base pixel address. Result is addressable as unsigned 32-bit words.
+ Will trigger an assert() if SkColorType is not kRGBA_8888_SkColorType or
+ kBGRA_8888_SkColorType, and is built with SK_DEBUG defined.
+
+ One word corresponds to one pixel.
+
+ @return readable unsigned 32-bit pointer to pixels
+ */
+ const uint32_t* addr32() const {
+ SkASSERT(4 == fInfo.bytesPerPixel());
+ return reinterpret_cast<const uint32_t*>(fPixels);
+ }
+
+ /** Returns readable base pixel address. Result is addressable as unsigned 64-bit words.
+ Will trigger an assert() if SkColorType is not kRGBA_F16_SkColorType and is built
+ with SK_DEBUG defined.
+
+ One word corresponds to one pixel.
+
+ @return readable unsigned 64-bit pointer to pixels
+ */
+ const uint64_t* addr64() const {
+ SkASSERT(8 == fInfo.bytesPerPixel());
+ return reinterpret_cast<const uint64_t*>(fPixels);
+ }
+
+ /** Returns readable base pixel address. Result is addressable as unsigned 16-bit words.
+ Will trigger an assert() if SkColorType is not kRGBA_F16_SkColorType and is built
+ with SK_DEBUG defined.
+
+ Each word represents one color component encoded as a half float.
+ Four words correspond to one pixel.
+
+ @return readable unsigned 16-bit pointer to first component of pixels
+ */
+ const uint16_t* addrF16() const {
+ SkASSERT(8 == fInfo.bytesPerPixel());
+ SkASSERT(kRGBA_F16_SkColorType == fInfo.colorType() ||
+ kRGBA_F16Norm_SkColorType == fInfo.colorType());
+ return reinterpret_cast<const uint16_t*>(fPixels);
+ }
+
+ /** Returns readable pixel address at (x, y).
+
+ Input is not validated: out of bounds values of x or y trigger an assert() if
+ built with SK_DEBUG defined.
+
+ Will trigger an assert() if SkColorType is not kAlpha_8_SkColorType or
+ kGray_8_SkColorType, and is built with SK_DEBUG defined.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return readable unsigned 8-bit pointer to pixel at (x, y)
+ */
+ const uint8_t* addr8(int x, int y) const {
+ SkASSERT((unsigned)x < (unsigned)fInfo.width());
+ SkASSERT((unsigned)y < (unsigned)fInfo.height());
+ return (const uint8_t*)((const char*)this->addr8() + (size_t)y * fRowBytes + (x << 0));
+ }
+
+ /** Returns readable pixel address at (x, y).
+
+ Input is not validated: out of bounds values of x or y trigger an assert() if
+ built with SK_DEBUG defined.
+
+ Will trigger an assert() if SkColorType is not kRGB_565_SkColorType or
+ kARGB_4444_SkColorType, and is built with SK_DEBUG defined.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return readable unsigned 16-bit pointer to pixel at (x, y)
+ */
+ const uint16_t* addr16(int x, int y) const {
+ SkASSERT((unsigned)x < (unsigned)fInfo.width());
+ SkASSERT((unsigned)y < (unsigned)fInfo.height());
+ return (const uint16_t*)((const char*)this->addr16() + (size_t)y * fRowBytes + (x << 1));
+ }
+
+ /** Returns readable pixel address at (x, y).
+
+ Input is not validated: out of bounds values of x or y trigger an assert() if
+ built with SK_DEBUG defined.
+
+ Will trigger an assert() if SkColorType is not kRGBA_8888_SkColorType or
+ kBGRA_8888_SkColorType, and is built with SK_DEBUG defined.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return readable unsigned 32-bit pointer to pixel at (x, y)
+ */
+ const uint32_t* addr32(int x, int y) const {
+ SkASSERT((unsigned)x < (unsigned)fInfo.width());
+ SkASSERT((unsigned)y < (unsigned)fInfo.height());
+ return (const uint32_t*)((const char*)this->addr32() + (size_t)y * fRowBytes + (x << 2));
+ }
+
+ /** Returns readable pixel address at (x, y).
+
+ Input is not validated: out of bounds values of x or y trigger an assert() if
+ built with SK_DEBUG defined.
+
+ Will trigger an assert() if SkColorType is not kRGBA_F16_SkColorType and is built
+ with SK_DEBUG defined.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return readable unsigned 64-bit pointer to pixel at (x, y)
+ */
+ const uint64_t* addr64(int x, int y) const {
+ SkASSERT((unsigned)x < (unsigned)fInfo.width());
+ SkASSERT((unsigned)y < (unsigned)fInfo.height());
+ return (const uint64_t*)((const char*)this->addr64() + (size_t)y * fRowBytes + (x << 3));
+ }
+
+ /** Returns readable pixel address at (x, y).
+
+ Input is not validated: out of bounds values of x or y trigger an assert() if
+ built with SK_DEBUG defined.
+
+ Will trigger an assert() if SkColorType is not kRGBA_F16_SkColorType and is built
+ with SK_DEBUG defined.
+
+ Each unsigned 16-bit word represents one color component encoded as a half float.
+ Four words correspond to one pixel.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return readable unsigned 16-bit pointer to pixel component at (x, y)
+ */
+ const uint16_t* addrF16(int x, int y) const {
+ SkASSERT(kRGBA_F16_SkColorType == fInfo.colorType() ||
+ kRGBA_F16Norm_SkColorType == fInfo.colorType());
+ return reinterpret_cast<const uint16_t*>(this->addr64(x, y));
+ }
+
+ /** Returns writable base pixel address.
+
+ @return writable generic base pointer to pixels
+ */
+ void* writable_addr() const { return const_cast<void*>(fPixels); }
+
+ /** Returns writable pixel address at (x, y).
+
+ Input is not validated: out of bounds values of x or y trigger an assert() if
+ built with SK_DEBUG defined. Returns zero if SkColorType is kUnknown_SkColorType.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return writable generic pointer to pixel
+ */
+ void* writable_addr(int x, int y) const {
+ return const_cast<void*>(this->addr(x, y));
+ }
+
+ /** Returns writable pixel address at (x, y). Result is addressable as unsigned
+ 8-bit bytes. Will trigger an assert() if SkColorType is not kAlpha_8_SkColorType
+ or kGray_8_SkColorType, and is built with SK_DEBUG defined.
+
+ One byte corresponds to one pixel.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return writable unsigned 8-bit pointer to pixels
+ */
+ uint8_t* writable_addr8(int x, int y) const {
+ return const_cast<uint8_t*>(this->addr8(x, y));
+ }
+
+ /** Returns writable_addr pixel address at (x, y). Result is addressable as unsigned
+ 16-bit words. Will trigger an assert() if SkColorType is not kRGB_565_SkColorType
+ or kARGB_4444_SkColorType, and is built with SK_DEBUG defined.
+
+ One word corresponds to one pixel.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return writable unsigned 16-bit pointer to pixel
+ */
+ uint16_t* writable_addr16(int x, int y) const {
+ return const_cast<uint16_t*>(this->addr16(x, y));
+ }
+
+ /** Returns writable pixel address at (x, y). Result is addressable as unsigned
+ 32-bit words. Will trigger an assert() if SkColorType is not
+ kRGBA_8888_SkColorType or kBGRA_8888_SkColorType, and is built with SK_DEBUG
+ defined.
+
+ One word corresponds to one pixel.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return writable unsigned 32-bit pointer to pixel
+ */
+ uint32_t* writable_addr32(int x, int y) const {
+ return const_cast<uint32_t*>(this->addr32(x, y));
+ }
+
+ /** Returns writable pixel address at (x, y). Result is addressable as unsigned
+ 64-bit words. Will trigger an assert() if SkColorType is not
+ kRGBA_F16_SkColorType and is built with SK_DEBUG defined.
+
+ One word corresponds to one pixel.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return writable unsigned 64-bit pointer to pixel
+ */
+ uint64_t* writable_addr64(int x, int y) const {
+ return const_cast<uint64_t*>(this->addr64(x, y));
+ }
+
+ /** Returns writable pixel address at (x, y). Result is addressable as unsigned
+ 16-bit words. Will trigger an assert() if SkColorType is not
+ kRGBA_F16_SkColorType and is built with SK_DEBUG defined.
+
+ Each word represents one color component encoded as a half float.
+ Four words correspond to one pixel.
+
+ @param x column index, zero or greater, and less than width()
+ @param y row index, zero or greater, and less than height()
+ @return writable unsigned 16-bit pointer to first component of pixel
+ */
+ uint16_t* writable_addrF16(int x, int y) const {
+ return reinterpret_cast<uint16_t*>(writable_addr64(x, y));
+ }
+
+ /** Copies a SkRect of pixels to dstPixels. Copy starts at (0, 0), and does not
+ exceed SkPixmap (width(), height()).
+
+ dstInfo specifies width, height, SkColorType, SkAlphaType, and
+ SkColorSpace of destination. dstRowBytes specifics the gap from one destination
+ row to the next. Returns true if pixels are copied. Returns false if
+ dstInfo address equals nullptr, or dstRowBytes is less than dstInfo.minRowBytes().
+
+ Pixels are copied only if pixel conversion is possible. If SkPixmap colorType() is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; dstInfo.colorType() must match.
+ If SkPixmap colorType() is kGray_8_SkColorType, dstInfo.colorSpace() must match.
+ If SkPixmap alphaType() is kOpaque_SkAlphaType, dstInfo.alphaType() must
+ match. If SkPixmap colorSpace() is nullptr, dstInfo.colorSpace() must match. Returns
+ false if pixel conversion is not possible.
+
+ Returns false if SkPixmap width() or height() is zero or negative.
+
+ @param dstInfo destination width, height, SkColorType, SkAlphaType, SkColorSpace
+ @param dstPixels destination pixel storage
+ @param dstRowBytes destination row length
+ @return true if pixels are copied to dstPixels
+ */
+ bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes) const {
+ return this->readPixels(dstInfo, dstPixels, dstRowBytes, 0, 0);
+ }
+
+ /** Copies a SkRect of pixels to dstPixels. Copy starts at (srcX, srcY), and does not
+ exceed SkPixmap (width(), height()).
+
+ dstInfo specifies width, height, SkColorType, SkAlphaType, and
+ SkColorSpace of destination. dstRowBytes specifics the gap from one destination
+ row to the next. Returns true if pixels are copied. Returns false if
+ dstInfo address equals nullptr, or dstRowBytes is less than dstInfo.minRowBytes().
+
+ Pixels are copied only if pixel conversion is possible. If SkPixmap colorType() is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; dstInfo.colorType() must match.
+ If SkPixmap colorType() is kGray_8_SkColorType, dstInfo.colorSpace() must match.
+ If SkPixmap alphaType() is kOpaque_SkAlphaType, dstInfo.alphaType() must
+ match. If SkPixmap colorSpace() is nullptr, dstInfo.colorSpace() must match. Returns
+ false if pixel conversion is not possible.
+
+ srcX and srcY may be negative to copy only top or left of source. Returns
+ false if SkPixmap width() or height() is zero or negative. Returns false if:
+ abs(srcX) >= Pixmap width(), or if abs(srcY) >= Pixmap height().
+
+ @param dstInfo destination width, height, SkColorType, SkAlphaType, SkColorSpace
+ @param dstPixels destination pixel storage
+ @param dstRowBytes destination row length
+ @param srcX column index whose absolute value is less than width()
+ @param srcY row index whose absolute value is less than height()
+ @return true if pixels are copied to dstPixels
+ */
+ bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes, int srcX,
+ int srcY) const;
+
+ /** Copies a SkRect of pixels to dst. Copy starts at (srcX, srcY), and does not
+ exceed SkPixmap (width(), height()). dst specifies width, height, SkColorType,
+ SkAlphaType, and SkColorSpace of destination. Returns true if pixels are copied.
+ Returns false if dst address equals nullptr, or dst.rowBytes() is less than
+ dst SkImageInfo::minRowBytes.
+
+ Pixels are copied only if pixel conversion is possible. If SkPixmap colorType() is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; dst.info().colorType must match.
+ If SkPixmap colorType() is kGray_8_SkColorType, dst.info().colorSpace must match.
+ If SkPixmap alphaType() is kOpaque_SkAlphaType, dst.info().alphaType must
+ match. If SkPixmap colorSpace() is nullptr, dst.info().colorSpace must match. Returns
+ false if pixel conversion is not possible.
+
+ srcX and srcY may be negative to copy only top or left of source. Returns
+ false SkPixmap width() or height() is zero or negative. Returns false if:
+ abs(srcX) >= Pixmap width(), or if abs(srcY) >= Pixmap height().
+
+ @param dst SkImageInfo and pixel address to write to
+ @param srcX column index whose absolute value is less than width()
+ @param srcY row index whose absolute value is less than height()
+ @return true if pixels are copied to dst
+ */
+ bool readPixels(const SkPixmap& dst, int srcX, int srcY) const {
+ return this->readPixels(dst.info(), dst.writable_addr(), dst.rowBytes(), srcX, srcY);
+ }
+
+ /** Copies pixels inside bounds() to dst. dst specifies width, height, SkColorType,
+ SkAlphaType, and SkColorSpace of destination. Returns true if pixels are copied.
+ Returns false if dst address equals nullptr, or dst.rowBytes() is less than
+ dst SkImageInfo::minRowBytes.
+
+ Pixels are copied only if pixel conversion is possible. If SkPixmap colorType() is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; dst SkColorType must match.
+ If SkPixmap colorType() is kGray_8_SkColorType, dst SkColorSpace must match.
+ If SkPixmap alphaType() is kOpaque_SkAlphaType, dst SkAlphaType must
+ match. If SkPixmap colorSpace() is nullptr, dst SkColorSpace must match. Returns
+ false if pixel conversion is not possible.
+
+ Returns false if SkPixmap width() or height() is zero or negative.
+
+ @param dst SkImageInfo and pixel address to write to
+ @return true if pixels are copied to dst
+ */
+ bool readPixels(const SkPixmap& dst) const {
+ return this->readPixels(dst.info(), dst.writable_addr(), dst.rowBytes(), 0, 0);
+ }
+
+ /** Copies SkBitmap to dst, scaling pixels to fit dst.width() and dst.height(), and
+ converting pixels to match dst.colorType() and dst.alphaType(). Returns true if
+ pixels are copied. Returns false if dst address is nullptr, or dst.rowBytes() is
+ less than dst SkImageInfo::minRowBytes.
+
+ Pixels are copied only if pixel conversion is possible. If SkPixmap colorType() is
+ kGray_8_SkColorType, or kAlpha_8_SkColorType; dst SkColorType must match.
+ If SkPixmap colorType() is kGray_8_SkColorType, dst SkColorSpace must match.
+ If SkPixmap alphaType() is kOpaque_SkAlphaType, dst SkAlphaType must
+ match. If SkPixmap colorSpace() is nullptr, dst SkColorSpace must match. Returns
+ false if pixel conversion is not possible.
+
+ Returns false if SkBitmap width() or height() is zero or negative.
+
+ @param dst SkImageInfo and pixel address to write to
+ @return true if pixels are scaled to fit dst
+
+ example: https://fiddle.skia.org/c/@Pixmap_scalePixels
+ */
+ bool scalePixels(const SkPixmap& dst, const SkSamplingOptions&) const;
+
+ /** Writes color to pixels bounded by subset; returns true on success.
+ Returns false if colorType() is kUnknown_SkColorType, or if subset does
+ not intersect bounds().
+
+ @param color sRGB unpremultiplied color to write
+ @param subset bounding integer SkRect of written pixels
+ @return true if pixels are changed
+
+ example: https://fiddle.skia.org/c/@Pixmap_erase
+ */
+ bool erase(SkColor color, const SkIRect& subset) const;
+
+ /** Writes color to pixels inside bounds(); returns true on success.
+ Returns false if colorType() is kUnknown_SkColorType, or if bounds()
+ is empty.
+
+ @param color sRGB unpremultiplied color to write
+ @return true if pixels are changed
+ */
+ bool erase(SkColor color) const { return this->erase(color, this->bounds()); }
+
+ /** Writes color to pixels bounded by subset; returns true on success.
+ if subset is nullptr, writes colors pixels inside bounds(). Returns false if
+ colorType() is kUnknown_SkColorType, if subset is not nullptr and does
+ not intersect bounds(), or if subset is nullptr and bounds() is empty.
+
+ @param color sRGB unpremultiplied color to write
+ @param subset bounding integer SkRect of pixels to write; may be nullptr
+ @return true if pixels are changed
+
+ example: https://fiddle.skia.org/c/@Pixmap_erase_3
+ */
+ bool erase(const SkColor4f& color, const SkIRect* subset = nullptr) const {
+ return this->erase(color, nullptr, subset);
+ }
+
+ /** Writes color to pixels bounded by subset; returns true on success.
+ if subset is nullptr, writes colors pixels inside bounds(). Returns false if
+ colorType() is kUnknown_SkColorType, if subset is not nullptr and does
+ not intersect bounds(), or if subset is nullptr and bounds() is empty.
+
+ @param color unpremultiplied color to write
+ @param cs SkColorSpace of color
+ @param subset bounding integer SkRect of pixels to write; may be nullptr
+ @return true if pixels are changed
+ */
+ bool erase(const SkColor4f& color, SkColorSpace* cs, const SkIRect* subset = nullptr) const;
+
+private:
+ const void* fPixels;
+ size_t fRowBytes;
+ SkImageInfo fInfo;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPoint.h b/gfx/skia/skia/include/core/SkPoint.h
new file mode 100644
index 0000000000..a5e7fa09fb
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPoint.h
@@ -0,0 +1,568 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPoint_DEFINED
+#define SkPoint_DEFINED
+
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkSafe32.h"
+
+#include <cstdint>
+
+struct SkIPoint;
+
+/** SkIVector provides an alternative name for SkIPoint. SkIVector and SkIPoint
+ can be used interchangeably for all purposes.
+*/
+typedef SkIPoint SkIVector;
+
+/** \struct SkIPoint
+ SkIPoint holds two 32-bit integer coordinates.
+*/
+struct SkIPoint {
+ int32_t fX; //!< x-axis value
+ int32_t fY; //!< y-axis value
+
+ /** Sets fX to x, fY to y.
+
+ @param x integer x-axis value of constructed SkIPoint
+ @param y integer y-axis value of constructed SkIPoint
+ @return SkIPoint (x, y)
+ */
+ static constexpr SkIPoint Make(int32_t x, int32_t y) {
+ return {x, y};
+ }
+
+ /** Returns x-axis value of SkIPoint.
+
+ @return fX
+ */
+ constexpr int32_t x() const { return fX; }
+
+ /** Returns y-axis value of SkIPoint.
+
+ @return fY
+ */
+ constexpr int32_t y() const { return fY; }
+
+ /** Returns true if fX and fY are both zero.
+
+ @return true if fX is zero and fY is zero
+ */
+ bool isZero() const { return (fX | fY) == 0; }
+
+ /** Sets fX to x and fY to y.
+
+ @param x new value for fX
+ @param y new value for fY
+ */
+ void set(int32_t x, int32_t y) {
+ fX = x;
+ fY = y;
+ }
+
+ /** Returns SkIPoint changing the signs of fX and fY.
+
+ @return SkIPoint as (-fX, -fY)
+ */
+ SkIPoint operator-() const {
+ return {-fX, -fY};
+ }
+
+ /** Offsets SkIPoint by ivector v. Sets SkIPoint to (fX + v.fX, fY + v.fY).
+
+ @param v ivector to add
+ */
+ void operator+=(const SkIVector& v) {
+ fX = Sk32_sat_add(fX, v.fX);
+ fY = Sk32_sat_add(fY, v.fY);
+ }
+
+ /** Subtracts ivector v from SkIPoint. Sets SkIPoint to: (fX - v.fX, fY - v.fY).
+
+ @param v ivector to subtract
+ */
+ void operator-=(const SkIVector& v) {
+ fX = Sk32_sat_sub(fX, v.fX);
+ fY = Sk32_sat_sub(fY, v.fY);
+ }
+
+ /** Returns true if SkIPoint is equivalent to SkIPoint constructed from (x, y).
+
+ @param x value compared with fX
+ @param y value compared with fY
+ @return true if SkIPoint equals (x, y)
+ */
+ bool equals(int32_t x, int32_t y) const {
+ return fX == x && fY == y;
+ }
+
+ /** Returns true if a is equivalent to b.
+
+ @param a SkIPoint to compare
+ @param b SkIPoint to compare
+ @return true if a.fX == b.fX and a.fY == b.fY
+ */
+ friend bool operator==(const SkIPoint& a, const SkIPoint& b) {
+ return a.fX == b.fX && a.fY == b.fY;
+ }
+
+ /** Returns true if a is not equivalent to b.
+
+ @param a SkIPoint to compare
+ @param b SkIPoint to compare
+ @return true if a.fX != b.fX or a.fY != b.fY
+ */
+ friend bool operator!=(const SkIPoint& a, const SkIPoint& b) {
+ return a.fX != b.fX || a.fY != b.fY;
+ }
+
+ /** Returns ivector from b to a; computed as (a.fX - b.fX, a.fY - b.fY).
+
+ Can also be used to subtract ivector from ivector, returning ivector.
+
+ @param a SkIPoint or ivector to subtract from
+ @param b ivector to subtract
+ @return ivector from b to a
+ */
+ friend SkIVector operator-(const SkIPoint& a, const SkIPoint& b) {
+ return { Sk32_sat_sub(a.fX, b.fX), Sk32_sat_sub(a.fY, b.fY) };
+ }
+
+ /** Returns SkIPoint resulting from SkIPoint a offset by ivector b, computed as:
+ (a.fX + b.fX, a.fY + b.fY).
+
+ Can also be used to offset SkIPoint b by ivector a, returning SkIPoint.
+ Can also be used to add ivector to ivector, returning ivector.
+
+ @param a SkIPoint or ivector to add to
+ @param b SkIPoint or ivector to add
+ @return SkIPoint equal to a offset by b
+ */
+ friend SkIPoint operator+(const SkIPoint& a, const SkIVector& b) {
+ return { Sk32_sat_add(a.fX, b.fX), Sk32_sat_add(a.fY, b.fY) };
+ }
+};
+
+struct SkPoint;
+
+/** SkVector provides an alternative name for SkPoint. SkVector and SkPoint can
+ be used interchangeably for all purposes.
+*/
+typedef SkPoint SkVector;
+
+/** \struct SkPoint
+ SkPoint holds two 32-bit floating point coordinates.
+*/
+struct SK_API SkPoint {
+ SkScalar fX; //!< x-axis value
+ SkScalar fY; //!< y-axis value
+
+ /** Sets fX to x, fY to y. Used both to set SkPoint and vector.
+
+ @param x SkScalar x-axis value of constructed SkPoint or vector
+ @param y SkScalar y-axis value of constructed SkPoint or vector
+ @return SkPoint (x, y)
+ */
+ static constexpr SkPoint Make(SkScalar x, SkScalar y) {
+ return {x, y};
+ }
+
+ /** Returns x-axis value of SkPoint or vector.
+
+ @return fX
+ */
+ constexpr SkScalar x() const { return fX; }
+
+ /** Returns y-axis value of SkPoint or vector.
+
+ @return fY
+ */
+ constexpr SkScalar y() const { return fY; }
+
+ /** Returns true if fX and fY are both zero.
+
+ @return true if fX is zero and fY is zero
+ */
+ bool isZero() const { return (0 == fX) & (0 == fY); }
+
+ /** Sets fX to x and fY to y.
+
+ @param x new value for fX
+ @param y new value for fY
+ */
+ void set(SkScalar x, SkScalar y) {
+ fX = x;
+ fY = y;
+ }
+
+ /** Sets fX to x and fY to y, promoting integers to SkScalar values.
+
+ Assigning a large integer value directly to fX or fY may cause a compiler
+ error, triggered by narrowing conversion of int to SkScalar. This safely
+ casts x and y to avoid the error.
+
+ @param x new value for fX
+ @param y new value for fY
+ */
+ void iset(int32_t x, int32_t y) {
+ fX = SkIntToScalar(x);
+ fY = SkIntToScalar(y);
+ }
+
+ /** Sets fX to p.fX and fY to p.fY, promoting integers to SkScalar values.
+
+ Assigning an SkIPoint containing a large integer value directly to fX or fY may
+ cause a compiler error, triggered by narrowing conversion of int to SkScalar.
+ This safely casts p.fX and p.fY to avoid the error.
+
+ @param p SkIPoint members promoted to SkScalar
+ */
+ void iset(const SkIPoint& p) {
+ fX = SkIntToScalar(p.fX);
+ fY = SkIntToScalar(p.fY);
+ }
+
+ /** Sets fX to absolute value of pt.fX; and fY to absolute value of pt.fY.
+
+ @param pt members providing magnitude for fX and fY
+ */
+ void setAbs(const SkPoint& pt) {
+ fX = SkScalarAbs(pt.fX);
+ fY = SkScalarAbs(pt.fY);
+ }
+
+ /** Adds offset to each SkPoint in points array with count entries.
+
+ @param points SkPoint array
+ @param count entries in array
+ @param offset vector added to points
+ */
+ static void Offset(SkPoint points[], int count, const SkVector& offset) {
+ Offset(points, count, offset.fX, offset.fY);
+ }
+
+ /** Adds offset (dx, dy) to each SkPoint in points array of length count.
+
+ @param points SkPoint array
+ @param count entries in array
+ @param dx added to fX in points
+ @param dy added to fY in points
+ */
+ static void Offset(SkPoint points[], int count, SkScalar dx, SkScalar dy) {
+ for (int i = 0; i < count; ++i) {
+ points[i].offset(dx, dy);
+ }
+ }
+
+ /** Adds offset (dx, dy) to SkPoint.
+
+ @param dx added to fX
+ @param dy added to fY
+ */
+ void offset(SkScalar dx, SkScalar dy) {
+ fX += dx;
+ fY += dy;
+ }
+
+ /** Returns the Euclidean distance from origin, computed as:
+
+ sqrt(fX * fX + fY * fY)
+
+ .
+
+ @return straight-line distance to origin
+ */
+ SkScalar length() const { return SkPoint::Length(fX, fY); }
+
+ /** Returns the Euclidean distance from origin, computed as:
+
+ sqrt(fX * fX + fY * fY)
+
+ .
+
+ @return straight-line distance to origin
+ */
+ SkScalar distanceToOrigin() const { return this->length(); }
+
+ /** Scales (fX, fY) so that length() returns one, while preserving ratio of fX to fY,
+ if possible. If prior length is nearly zero, sets vector to (0, 0) and returns
+ false; otherwise returns true.
+
+ @return true if former length is not zero or nearly zero
+
+ example: https://fiddle.skia.org/c/@Point_normalize_2
+ */
+ bool normalize();
+
+ /** Sets vector to (x, y) scaled so length() returns one, and so that
+ (fX, fY) is proportional to (x, y). If (x, y) length is nearly zero,
+ sets vector to (0, 0) and returns false; otherwise returns true.
+
+ @param x proportional value for fX
+ @param y proportional value for fY
+ @return true if (x, y) length is not zero or nearly zero
+
+ example: https://fiddle.skia.org/c/@Point_setNormalize
+ */
+ bool setNormalize(SkScalar x, SkScalar y);
+
+ /** Scales vector so that distanceToOrigin() returns length, if possible. If former
+ length is nearly zero, sets vector to (0, 0) and return false; otherwise returns
+ true.
+
+ @param length straight-line distance to origin
+ @return true if former length is not zero or nearly zero
+
+ example: https://fiddle.skia.org/c/@Point_setLength
+ */
+ bool setLength(SkScalar length);
+
+ /** Sets vector to (x, y) scaled to length, if possible. If former
+ length is nearly zero, sets vector to (0, 0) and return false; otherwise returns
+ true.
+
+ @param x proportional value for fX
+ @param y proportional value for fY
+ @param length straight-line distance to origin
+ @return true if (x, y) length is not zero or nearly zero
+
+ example: https://fiddle.skia.org/c/@Point_setLength_2
+ */
+ bool setLength(SkScalar x, SkScalar y, SkScalar length);
+
+ /** Sets dst to SkPoint times scale. dst may be SkPoint to modify SkPoint in place.
+
+ @param scale factor to multiply SkPoint by
+ @param dst storage for scaled SkPoint
+
+ example: https://fiddle.skia.org/c/@Point_scale
+ */
+ void scale(SkScalar scale, SkPoint* dst) const;
+
+ /** Scales SkPoint in place by scale.
+
+ @param value factor to multiply SkPoint by
+ */
+ void scale(SkScalar value) { this->scale(value, this); }
+
+ /** Changes the sign of fX and fY.
+ */
+ void negate() {
+ fX = -fX;
+ fY = -fY;
+ }
+
+ /** Returns SkPoint changing the signs of fX and fY.
+
+ @return SkPoint as (-fX, -fY)
+ */
+ SkPoint operator-() const {
+ return {-fX, -fY};
+ }
+
+ /** Adds vector v to SkPoint. Sets SkPoint to: (fX + v.fX, fY + v.fY).
+
+ @param v vector to add
+ */
+ void operator+=(const SkVector& v) {
+ fX += v.fX;
+ fY += v.fY;
+ }
+
+ /** Subtracts vector v from SkPoint. Sets SkPoint to: (fX - v.fX, fY - v.fY).
+
+ @param v vector to subtract
+ */
+ void operator-=(const SkVector& v) {
+ fX -= v.fX;
+ fY -= v.fY;
+ }
+
+ /** Returns SkPoint multiplied by scale.
+
+ @param scale scalar to multiply by
+ @return SkPoint as (fX * scale, fY * scale)
+ */
+ SkPoint operator*(SkScalar scale) const {
+ return {fX * scale, fY * scale};
+ }
+
+ /** Multiplies SkPoint by scale. Sets SkPoint to: (fX * scale, fY * scale).
+
+ @param scale scalar to multiply by
+ @return reference to SkPoint
+ */
+ SkPoint& operator*=(SkScalar scale) {
+ fX *= scale;
+ fY *= scale;
+ return *this;
+ }
+
+ /** Returns true if both fX and fY are measurable values.
+
+ @return true for values other than infinities and NaN
+ */
+ bool isFinite() const {
+ SkScalar accum = 0;
+ accum *= fX;
+ accum *= fY;
+
+ // accum is either NaN or it is finite (zero).
+ SkASSERT(0 == accum || SkScalarIsNaN(accum));
+
+ // value==value will be true iff value is not NaN
+ // TODO: is it faster to say !accum or accum==accum?
+ return !SkScalarIsNaN(accum);
+ }
+
+ /** Returns true if SkPoint is equivalent to SkPoint constructed from (x, y).
+
+ @param x value compared with fX
+ @param y value compared with fY
+ @return true if SkPoint equals (x, y)
+ */
+ bool equals(SkScalar x, SkScalar y) const {
+ return fX == x && fY == y;
+ }
+
+ /** Returns true if a is equivalent to b.
+
+ @param a SkPoint to compare
+ @param b SkPoint to compare
+ @return true if a.fX == b.fX and a.fY == b.fY
+ */
+ friend bool operator==(const SkPoint& a, const SkPoint& b) {
+ return a.fX == b.fX && a.fY == b.fY;
+ }
+
+ /** Returns true if a is not equivalent to b.
+
+ @param a SkPoint to compare
+ @param b SkPoint to compare
+ @return true if a.fX != b.fX or a.fY != b.fY
+ */
+ friend bool operator!=(const SkPoint& a, const SkPoint& b) {
+ return a.fX != b.fX || a.fY != b.fY;
+ }
+
+ /** Returns vector from b to a, computed as (a.fX - b.fX, a.fY - b.fY).
+
+ Can also be used to subtract vector from SkPoint, returning SkPoint.
+ Can also be used to subtract vector from vector, returning vector.
+
+ @param a SkPoint to subtract from
+ @param b SkPoint to subtract
+ @return vector from b to a
+ */
+ friend SkVector operator-(const SkPoint& a, const SkPoint& b) {
+ return {a.fX - b.fX, a.fY - b.fY};
+ }
+
+ /** Returns SkPoint resulting from SkPoint a offset by vector b, computed as:
+ (a.fX + b.fX, a.fY + b.fY).
+
+ Can also be used to offset SkPoint b by vector a, returning SkPoint.
+ Can also be used to add vector to vector, returning vector.
+
+ @param a SkPoint or vector to add to
+ @param b SkPoint or vector to add
+ @return SkPoint equal to a offset by b
+ */
+ friend SkPoint operator+(const SkPoint& a, const SkVector& b) {
+ return {a.fX + b.fX, a.fY + b.fY};
+ }
+
+ /** Returns the Euclidean distance from origin, computed as:
+
+ sqrt(x * x + y * y)
+
+ .
+
+ @param x component of length
+ @param y component of length
+ @return straight-line distance to origin
+
+ example: https://fiddle.skia.org/c/@Point_Length
+ */
+ static SkScalar Length(SkScalar x, SkScalar y);
+
+ /** Scales (vec->fX, vec->fY) so that length() returns one, while preserving ratio of vec->fX
+ to vec->fY, if possible. If original length is nearly zero, sets vec to (0, 0) and returns
+ zero; otherwise, returns length of vec before vec is scaled.
+
+ Returned prior length may be SK_ScalarInfinity if it can not be represented by SkScalar.
+
+ Note that normalize() is faster if prior length is not required.
+
+ @param vec normalized to unit length
+ @return original vec length
+
+ example: https://fiddle.skia.org/c/@Point_Normalize
+ */
+ static SkScalar Normalize(SkVector* vec);
+
+ /** Returns the Euclidean distance between a and b.
+
+ @param a line end point
+ @param b line end point
+ @return straight-line distance from a to b
+ */
+ static SkScalar Distance(const SkPoint& a, const SkPoint& b) {
+ return Length(a.fX - b.fX, a.fY - b.fY);
+ }
+
+ /** Returns the dot product of vector a and vector b.
+
+ @param a left side of dot product
+ @param b right side of dot product
+ @return product of input magnitudes and cosine of the angle between them
+ */
+ static SkScalar DotProduct(const SkVector& a, const SkVector& b) {
+ return a.fX * b.fX + a.fY * b.fY;
+ }
+
+ /** Returns the cross product of vector a and vector b.
+
+ a and b form three-dimensional vectors with z-axis value equal to zero. The
+ cross product is a three-dimensional vector with x-axis and y-axis values equal
+ to zero. The cross product z-axis component is returned.
+
+ @param a left side of cross product
+ @param b right side of cross product
+ @return area spanned by vectors signed by angle direction
+ */
+ static SkScalar CrossProduct(const SkVector& a, const SkVector& b) {
+ return a.fX * b.fY - a.fY * b.fX;
+ }
+
+ /** Returns the cross product of vector and vec.
+
+ Vector and vec form three-dimensional vectors with z-axis value equal to zero.
+ The cross product is a three-dimensional vector with x-axis and y-axis values
+ equal to zero. The cross product z-axis component is returned.
+
+ @param vec right side of cross product
+ @return area spanned by vectors signed by angle direction
+ */
+ SkScalar cross(const SkVector& vec) const {
+ return CrossProduct(*this, vec);
+ }
+
+ /** Returns the dot product of vector and vector vec.
+
+ @param vec right side of dot product
+ @return product of input magnitudes and cosine of the angle between them
+ */
+ SkScalar dot(const SkVector& vec) const {
+ return DotProduct(*this, vec);
+ }
+
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPoint3.h b/gfx/skia/skia/include/core/SkPoint3.h
new file mode 100644
index 0000000000..e372f82791
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPoint3.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPoint3_DEFINED
+#define SkPoint3_DEFINED
+
+#include "include/core/SkPoint.h"
+
+struct SK_API SkPoint3 {
+ SkScalar fX, fY, fZ;
+
+ static SkPoint3 Make(SkScalar x, SkScalar y, SkScalar z) {
+ SkPoint3 pt;
+ pt.set(x, y, z);
+ return pt;
+ }
+
+ SkScalar x() const { return fX; }
+ SkScalar y() const { return fY; }
+ SkScalar z() const { return fZ; }
+
+ void set(SkScalar x, SkScalar y, SkScalar z) { fX = x; fY = y; fZ = z; }
+
+ friend bool operator==(const SkPoint3& a, const SkPoint3& b) {
+ return a.fX == b.fX && a.fY == b.fY && a.fZ == b.fZ;
+ }
+
+ friend bool operator!=(const SkPoint3& a, const SkPoint3& b) {
+ return !(a == b);
+ }
+
+ /** Returns the Euclidian distance from (0,0,0) to (x,y,z)
+ */
+ static SkScalar Length(SkScalar x, SkScalar y, SkScalar z);
+
+ /** Return the Euclidian distance from (0,0,0) to the point
+ */
+ SkScalar length() const { return SkPoint3::Length(fX, fY, fZ); }
+
+ /** Set the point (vector) to be unit-length in the same direction as it
+ already points. If the point has a degenerate length (i.e., nearly 0)
+ then set it to (0,0,0) and return false; otherwise return true.
+ */
+ bool normalize();
+
+ /** Return a new point whose X, Y and Z coordinates are scaled.
+ */
+ SkPoint3 makeScale(SkScalar scale) const {
+ SkPoint3 p;
+ p.set(scale * fX, scale * fY, scale * fZ);
+ return p;
+ }
+
+ /** Scale the point's coordinates by scale.
+ */
+ void scale(SkScalar value) {
+ fX *= value;
+ fY *= value;
+ fZ *= value;
+ }
+
+ /** Return a new point whose X, Y and Z coordinates are the negative of the
+ original point's
+ */
+ SkPoint3 operator-() const {
+ SkPoint3 neg;
+ neg.fX = -fX;
+ neg.fY = -fY;
+ neg.fZ = -fZ;
+ return neg;
+ }
+
+ /** Returns a new point whose coordinates are the difference between
+ a and b (i.e., a - b)
+ */
+ friend SkPoint3 operator-(const SkPoint3& a, const SkPoint3& b) {
+ return { a.fX - b.fX, a.fY - b.fY, a.fZ - b.fZ };
+ }
+
+ /** Returns a new point whose coordinates are the sum of a and b (a + b)
+ */
+ friend SkPoint3 operator+(const SkPoint3& a, const SkPoint3& b) {
+ return { a.fX + b.fX, a.fY + b.fY, a.fZ + b.fZ };
+ }
+
+ /** Add v's coordinates to the point's
+ */
+ void operator+=(const SkPoint3& v) {
+ fX += v.fX;
+ fY += v.fY;
+ fZ += v.fZ;
+ }
+
+ /** Subtract v's coordinates from the point's
+ */
+ void operator-=(const SkPoint3& v) {
+ fX -= v.fX;
+ fY -= v.fY;
+ fZ -= v.fZ;
+ }
+
+ friend SkPoint3 operator*(SkScalar t, SkPoint3 p) {
+ return { t * p.fX, t * p.fY, t * p.fZ };
+ }
+
+ /** Returns true if fX, fY, and fZ are measurable values.
+
+ @return true for values other than infinities and NaN
+ */
+ bool isFinite() const {
+ SkScalar accum = 0;
+ accum *= fX;
+ accum *= fY;
+ accum *= fZ;
+
+ // accum is either NaN or it is finite (zero).
+ SkASSERT(0 == accum || SkScalarIsNaN(accum));
+
+ // value==value will be true iff value is not NaN
+ // TODO: is it faster to say !accum or accum==accum?
+ return !SkScalarIsNaN(accum);
+ }
+
+ /** Returns the dot product of a and b, treating them as 3D vectors
+ */
+ static SkScalar DotProduct(const SkPoint3& a, const SkPoint3& b) {
+ return a.fX * b.fX + a.fY * b.fY + a.fZ * b.fZ;
+ }
+
+ SkScalar dot(const SkPoint3& vec) const {
+ return DotProduct(*this, vec);
+ }
+
+ /** Returns the cross product of a and b, treating them as 3D vectors
+ */
+ static SkPoint3 CrossProduct(const SkPoint3& a, const SkPoint3& b) {
+ SkPoint3 result;
+ result.fX = a.fY*b.fZ - a.fZ*b.fY;
+ result.fY = a.fZ*b.fX - a.fX*b.fZ;
+ result.fZ = a.fX*b.fY - a.fY*b.fX;
+
+ return result;
+ }
+
+ SkPoint3 cross(const SkPoint3& vec) const {
+ return CrossProduct(*this, vec);
+ }
+};
+
+typedef SkPoint3 SkVector3;
+typedef SkPoint3 SkColor3f;
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkPromiseImageTexture.h b/gfx/skia/skia/include/core/SkPromiseImageTexture.h
new file mode 100644
index 0000000000..0bd4034fdc
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkPromiseImageTexture.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPromiseImageTexture_DEFINED
+#define SkPromiseImageTexture_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#if defined(SK_GANESH)
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/GrBackendSurface.h"
+/**
+ * This type is used to fulfill textures for PromiseImages. Once an instance is returned from a
+ * PromiseImageTextureFulfillProc the GrBackendTexture it wraps must remain valid until the
+ * corresponding PromiseImageTextureReleaseProc is called.
+ */
+class SK_API SkPromiseImageTexture : public SkNVRefCnt<SkPromiseImageTexture> {
+public:
+ SkPromiseImageTexture() = delete;
+ SkPromiseImageTexture(const SkPromiseImageTexture&) = delete;
+ SkPromiseImageTexture(SkPromiseImageTexture&&) = delete;
+ ~SkPromiseImageTexture();
+ SkPromiseImageTexture& operator=(const SkPromiseImageTexture&) = delete;
+ SkPromiseImageTexture& operator=(SkPromiseImageTexture&&) = delete;
+
+ static sk_sp<SkPromiseImageTexture> Make(const GrBackendTexture& backendTexture) {
+ if (!backendTexture.isValid()) {
+ return nullptr;
+ }
+ return sk_sp<SkPromiseImageTexture>(new SkPromiseImageTexture(backendTexture));
+ }
+
+ GrBackendTexture backendTexture() const { return fBackendTexture; }
+
+private:
+ explicit SkPromiseImageTexture(const GrBackendTexture& backendTexture);
+
+ GrBackendTexture fBackendTexture;
+};
+#endif // defined(SK_GANESH)
+
+#endif // SkPromiseImageTexture_DEFINED
diff --git a/gfx/skia/skia/include/core/SkRRect.h b/gfx/skia/skia/include/core/SkRRect.h
new file mode 100644
index 0000000000..73bc4a95b9
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkRRect.h
@@ -0,0 +1,516 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRRect_DEFINED
+#define SkRRect_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+#include <cstdint>
+#include <cstring>
+
+class SkMatrix;
+class SkString;
+
+/** \class SkRRect
+ SkRRect describes a rounded rectangle with a bounds and a pair of radii for each corner.
+ The bounds and radii can be set so that SkRRect describes: a rectangle with sharp corners;
+ a circle; an oval; or a rectangle with one or more rounded corners.
+
+ SkRRect allows implementing CSS properties that describe rounded corners.
+ SkRRect may have up to eight different radii, one for each axis on each of its four
+ corners.
+
+ SkRRect may modify the provided parameters when initializing bounds and radii.
+ If either axis radii is zero or less: radii are stored as zero; corner is square.
+ If corner curves overlap, radii are proportionally reduced to fit within bounds.
+*/
+class SK_API SkRRect {
+public:
+
+ /** Initializes bounds at (0, 0), the origin, with zero width and height.
+ Initializes corner radii to (0, 0), and sets type of kEmpty_Type.
+
+ @return empty SkRRect
+ */
+ SkRRect() = default;
+
+ /** Initializes to copy of rrect bounds and corner radii.
+
+ @param rrect bounds and corner to copy
+ @return copy of rrect
+ */
+ SkRRect(const SkRRect& rrect) = default;
+
+ /** Copies rrect bounds and corner radii.
+
+ @param rrect bounds and corner to copy
+ @return copy of rrect
+ */
+ SkRRect& operator=(const SkRRect& rrect) = default;
+
+ /** \enum SkRRect::Type
+ Type describes possible specializations of SkRRect. Each Type is
+ exclusive; a SkRRect may only have one type.
+
+ Type members become progressively less restrictive; larger values of
+ Type have more degrees of freedom than smaller values.
+ */
+ enum Type {
+ kEmpty_Type, //!< zero width or height
+ kRect_Type, //!< non-zero width and height, and zeroed radii
+ kOval_Type, //!< non-zero width and height filled with radii
+ kSimple_Type, //!< non-zero width and height with equal radii
+ kNinePatch_Type, //!< non-zero width and height with axis-aligned radii
+ kComplex_Type, //!< non-zero width and height with arbitrary radii
+ kLastType = kComplex_Type, //!< largest Type value
+ };
+
+ Type getType() const {
+ SkASSERT(this->isValid());
+ return static_cast<Type>(fType);
+ }
+
+ Type type() const { return this->getType(); }
+
+ inline bool isEmpty() const { return kEmpty_Type == this->getType(); }
+ inline bool isRect() const { return kRect_Type == this->getType(); }
+ inline bool isOval() const { return kOval_Type == this->getType(); }
+ inline bool isSimple() const { return kSimple_Type == this->getType(); }
+ inline bool isNinePatch() const { return kNinePatch_Type == this->getType(); }
+ inline bool isComplex() const { return kComplex_Type == this->getType(); }
+
+ /** Returns span on the x-axis. This does not check if result fits in 32-bit float;
+ result may be infinity.
+
+ @return rect().fRight minus rect().fLeft
+ */
+ SkScalar width() const { return fRect.width(); }
+
+ /** Returns span on the y-axis. This does not check if result fits in 32-bit float;
+ result may be infinity.
+
+ @return rect().fBottom minus rect().fTop
+ */
+ SkScalar height() const { return fRect.height(); }
+
+ /** Returns top-left corner radii. If type() returns kEmpty_Type, kRect_Type,
+ kOval_Type, or kSimple_Type, returns a value representative of all corner radii.
+ If type() returns kNinePatch_Type or kComplex_Type, at least one of the
+ remaining three corners has a different value.
+
+ @return corner radii for simple types
+ */
+ SkVector getSimpleRadii() const {
+ return fRadii[0];
+ }
+
+ /** Sets bounds to zero width and height at (0, 0), the origin. Sets
+ corner radii to zero and sets type to kEmpty_Type.
+ */
+ void setEmpty() { *this = SkRRect(); }
+
+ /** Sets bounds to sorted rect, and sets corner radii to zero.
+ If set bounds has width and height, and sets type to kRect_Type;
+ otherwise, sets type to kEmpty_Type.
+
+ @param rect bounds to set
+ */
+ void setRect(const SkRect& rect) {
+ if (!this->initializeRect(rect)) {
+ return;
+ }
+
+ memset(fRadii, 0, sizeof(fRadii));
+ fType = kRect_Type;
+
+ SkASSERT(this->isValid());
+ }
+
+ /** Initializes bounds at (0, 0), the origin, with zero width and height.
+ Initializes corner radii to (0, 0), and sets type of kEmpty_Type.
+
+ @return empty SkRRect
+ */
+ static SkRRect MakeEmpty() { return SkRRect(); }
+
+ /** Initializes to copy of r bounds and zeroes corner radii.
+
+ @param r bounds to copy
+ @return copy of r
+ */
+ static SkRRect MakeRect(const SkRect& r) {
+ SkRRect rr;
+ rr.setRect(r);
+ return rr;
+ }
+
+ /** Sets bounds to oval, x-axis radii to half oval.width(), and all y-axis radii
+ to half oval.height(). If oval bounds is empty, sets to kEmpty_Type.
+ Otherwise, sets to kOval_Type.
+
+ @param oval bounds of oval
+ @return oval
+ */
+ static SkRRect MakeOval(const SkRect& oval) {
+ SkRRect rr;
+ rr.setOval(oval);
+ return rr;
+ }
+
+ /** Sets to rounded rectangle with the same radii for all four corners.
+ If rect is empty, sets to kEmpty_Type.
+ Otherwise, if xRad and yRad are zero, sets to kRect_Type.
+ Otherwise, if xRad is at least half rect.width() and yRad is at least half
+ rect.height(), sets to kOval_Type.
+ Otherwise, sets to kSimple_Type.
+
+ @param rect bounds of rounded rectangle
+ @param xRad x-axis radius of corners
+ @param yRad y-axis radius of corners
+ @return rounded rectangle
+ */
+ static SkRRect MakeRectXY(const SkRect& rect, SkScalar xRad, SkScalar yRad) {
+ SkRRect rr;
+ rr.setRectXY(rect, xRad, yRad);
+ return rr;
+ }
+
+ /** Sets bounds to oval, x-axis radii to half oval.width(), and all y-axis radii
+ to half oval.height(). If oval bounds is empty, sets to kEmpty_Type.
+ Otherwise, sets to kOval_Type.
+
+ @param oval bounds of oval
+ */
+ void setOval(const SkRect& oval);
+
+ /** Sets to rounded rectangle with the same radii for all four corners.
+ If rect is empty, sets to kEmpty_Type.
+ Otherwise, if xRad or yRad is zero, sets to kRect_Type.
+ Otherwise, if xRad is at least half rect.width() and yRad is at least half
+ rect.height(), sets to kOval_Type.
+ Otherwise, sets to kSimple_Type.
+
+ @param rect bounds of rounded rectangle
+ @param xRad x-axis radius of corners
+ @param yRad y-axis radius of corners
+
+ example: https://fiddle.skia.org/c/@RRect_setRectXY
+ */
+ void setRectXY(const SkRect& rect, SkScalar xRad, SkScalar yRad);
+
+ /** Sets bounds to rect. Sets radii to (leftRad, topRad), (rightRad, topRad),
+ (rightRad, bottomRad), (leftRad, bottomRad).
+
+ If rect is empty, sets to kEmpty_Type.
+ Otherwise, if leftRad and rightRad are zero, sets to kRect_Type.
+ Otherwise, if topRad and bottomRad are zero, sets to kRect_Type.
+ Otherwise, if leftRad and rightRad are equal and at least half rect.width(), and
+ topRad and bottomRad are equal at least half rect.height(), sets to kOval_Type.
+ Otherwise, if leftRad and rightRad are equal, and topRad and bottomRad are equal,
+ sets to kSimple_Type. Otherwise, sets to kNinePatch_Type.
+
+ Nine patch refers to the nine parts defined by the radii: one center rectangle,
+ four edge patches, and four corner patches.
+
+ @param rect bounds of rounded rectangle
+ @param leftRad left-top and left-bottom x-axis radius
+ @param topRad left-top and right-top y-axis radius
+ @param rightRad right-top and right-bottom x-axis radius
+ @param bottomRad left-bottom and right-bottom y-axis radius
+ */
+ void setNinePatch(const SkRect& rect, SkScalar leftRad, SkScalar topRad,
+ SkScalar rightRad, SkScalar bottomRad);
+
+ /** Sets bounds to rect. Sets radii array for individual control of all for corners.
+
+ If rect is empty, sets to kEmpty_Type.
+ Otherwise, if one of each corner radii are zero, sets to kRect_Type.
+ Otherwise, if all x-axis radii are equal and at least half rect.width(), and
+ all y-axis radii are equal at least half rect.height(), sets to kOval_Type.
+ Otherwise, if all x-axis radii are equal, and all y-axis radii are equal,
+ sets to kSimple_Type. Otherwise, sets to kNinePatch_Type.
+
+ @param rect bounds of rounded rectangle
+ @param radii corner x-axis and y-axis radii
+
+ example: https://fiddle.skia.org/c/@RRect_setRectRadii
+ */
+ void setRectRadii(const SkRect& rect, const SkVector radii[4]);
+
+ /** \enum SkRRect::Corner
+ The radii are stored: top-left, top-right, bottom-right, bottom-left.
+ */
+ enum Corner {
+ kUpperLeft_Corner, //!< index of top-left corner radii
+ kUpperRight_Corner, //!< index of top-right corner radii
+ kLowerRight_Corner, //!< index of bottom-right corner radii
+ kLowerLeft_Corner, //!< index of bottom-left corner radii
+ };
+
+ /** Returns bounds. Bounds may have zero width or zero height. Bounds right is
+ greater than or equal to left; bounds bottom is greater than or equal to top.
+ Result is identical to getBounds().
+
+ @return bounding box
+ */
+ const SkRect& rect() const { return fRect; }
+
+ /** Returns scalar pair for radius of curve on x-axis and y-axis for one corner.
+ Both radii may be zero. If not zero, both are positive and finite.
+
+ @return x-axis and y-axis radii for one corner
+ */
+ SkVector radii(Corner corner) const { return fRadii[corner]; }
+
+ /** Returns bounds. Bounds may have zero width or zero height. Bounds right is
+ greater than or equal to left; bounds bottom is greater than or equal to top.
+ Result is identical to rect().
+
+ @return bounding box
+ */
+ const SkRect& getBounds() const { return fRect; }
+
+ /** Returns true if bounds and radii in a are equal to bounds and radii in b.
+
+ a and b are not equal if either contain NaN. a and b are equal if members
+ contain zeroes with different signs.
+
+ @param a SkRect bounds and radii to compare
+ @param b SkRect bounds and radii to compare
+ @return true if members are equal
+ */
+ friend bool operator==(const SkRRect& a, const SkRRect& b) {
+ return a.fRect == b.fRect && SkScalarsEqual(&a.fRadii[0].fX, &b.fRadii[0].fX, 8);
+ }
+
+ /** Returns true if bounds and radii in a are not equal to bounds and radii in b.
+
+ a and b are not equal if either contain NaN. a and b are equal if members
+ contain zeroes with different signs.
+
+ @param a SkRect bounds and radii to compare
+ @param b SkRect bounds and radii to compare
+ @return true if members are not equal
+ */
+ friend bool operator!=(const SkRRect& a, const SkRRect& b) {
+ return a.fRect != b.fRect || !SkScalarsEqual(&a.fRadii[0].fX, &b.fRadii[0].fX, 8);
+ }
+
+ /** Copies SkRRect to dst, then insets dst bounds by dx and dy, and adjusts dst
+ radii by dx and dy. dx and dy may be positive, negative, or zero. dst may be
+ SkRRect.
+
+ If either corner radius is zero, the corner has no curvature and is unchanged.
+ Otherwise, if adjusted radius becomes negative, pins radius to zero.
+ If dx exceeds half dst bounds width, dst bounds left and right are set to
+ bounds x-axis center. If dy exceeds half dst bounds height, dst bounds top and
+ bottom are set to bounds y-axis center.
+
+ If dx or dy cause the bounds to become infinite, dst bounds is zeroed.
+
+ @param dx added to rect().fLeft, and subtracted from rect().fRight
+ @param dy added to rect().fTop, and subtracted from rect().fBottom
+ @param dst insets bounds and radii
+
+ example: https://fiddle.skia.org/c/@RRect_inset
+ */
+ void inset(SkScalar dx, SkScalar dy, SkRRect* dst) const;
+
+ /** Insets bounds by dx and dy, and adjusts radii by dx and dy. dx and dy may be
+ positive, negative, or zero.
+
+ If either corner radius is zero, the corner has no curvature and is unchanged.
+ Otherwise, if adjusted radius becomes negative, pins radius to zero.
+ If dx exceeds half bounds width, bounds left and right are set to
+ bounds x-axis center. If dy exceeds half bounds height, bounds top and
+ bottom are set to bounds y-axis center.
+
+ If dx or dy cause the bounds to become infinite, bounds is zeroed.
+
+ @param dx added to rect().fLeft, and subtracted from rect().fRight
+ @param dy added to rect().fTop, and subtracted from rect().fBottom
+ */
+ void inset(SkScalar dx, SkScalar dy) {
+ this->inset(dx, dy, this);
+ }
+
+ /** Outsets dst bounds by dx and dy, and adjusts radii by dx and dy. dx and dy may be
+ positive, negative, or zero.
+
+ If either corner radius is zero, the corner has no curvature and is unchanged.
+ Otherwise, if adjusted radius becomes negative, pins radius to zero.
+ If dx exceeds half dst bounds width, dst bounds left and right are set to
+ bounds x-axis center. If dy exceeds half dst bounds height, dst bounds top and
+ bottom are set to bounds y-axis center.
+
+ If dx or dy cause the bounds to become infinite, dst bounds is zeroed.
+
+ @param dx subtracted from rect().fLeft, and added to rect().fRight
+ @param dy subtracted from rect().fTop, and added to rect().fBottom
+ @param dst outset bounds and radii
+ */
+ void outset(SkScalar dx, SkScalar dy, SkRRect* dst) const {
+ this->inset(-dx, -dy, dst);
+ }
+
+ /** Outsets bounds by dx and dy, and adjusts radii by dx and dy. dx and dy may be
+ positive, negative, or zero.
+
+ If either corner radius is zero, the corner has no curvature and is unchanged.
+ Otherwise, if adjusted radius becomes negative, pins radius to zero.
+ If dx exceeds half bounds width, bounds left and right are set to
+ bounds x-axis center. If dy exceeds half bounds height, bounds top and
+ bottom are set to bounds y-axis center.
+
+ If dx or dy cause the bounds to become infinite, bounds is zeroed.
+
+ @param dx subtracted from rect().fLeft, and added to rect().fRight
+ @param dy subtracted from rect().fTop, and added to rect().fBottom
+ */
+ void outset(SkScalar dx, SkScalar dy) {
+ this->inset(-dx, -dy, this);
+ }
+
+ /** Translates SkRRect by (dx, dy).
+
+ @param dx offset added to rect().fLeft and rect().fRight
+ @param dy offset added to rect().fTop and rect().fBottom
+ */
+ void offset(SkScalar dx, SkScalar dy) {
+ fRect.offset(dx, dy);
+ }
+
+ /** Returns SkRRect translated by (dx, dy).
+
+ @param dx offset added to rect().fLeft and rect().fRight
+ @param dy offset added to rect().fTop and rect().fBottom
+ @return SkRRect bounds offset by (dx, dy), with unchanged corner radii
+ */
+ SkRRect SK_WARN_UNUSED_RESULT makeOffset(SkScalar dx, SkScalar dy) const {
+ return SkRRect(fRect.makeOffset(dx, dy), fRadii, fType);
+ }
+
+ /** Returns true if rect is inside the bounds and corner radii, and if
+ SkRRect and rect are not empty.
+
+ @param rect area tested for containment
+ @return true if SkRRect contains rect
+
+ example: https://fiddle.skia.org/c/@RRect_contains
+ */
+ bool contains(const SkRect& rect) const;
+
+ /** Returns true if bounds and radii values are finite and describe a SkRRect
+ SkRRect::Type that matches getType(). All SkRRect methods construct valid types,
+ even if the input values are not valid. Invalid SkRRect data can only
+ be generated by corrupting memory.
+
+ @return true if bounds and radii match type()
+
+ example: https://fiddle.skia.org/c/@RRect_isValid
+ */
+ bool isValid() const;
+
+ static constexpr size_t kSizeInMemory = 12 * sizeof(SkScalar);
+
+ /** Writes SkRRect to buffer. Writes kSizeInMemory bytes, and returns
+ kSizeInMemory, the number of bytes written.
+
+ @param buffer storage for SkRRect
+ @return bytes written, kSizeInMemory
+
+ example: https://fiddle.skia.org/c/@RRect_writeToMemory
+ */
+ size_t writeToMemory(void* buffer) const;
+
+ /** Reads SkRRect from buffer, reading kSizeInMemory bytes.
+ Returns kSizeInMemory, bytes read if length is at least kSizeInMemory.
+ Otherwise, returns zero.
+
+ @param buffer memory to read from
+ @param length size of buffer
+ @return bytes read, or 0 if length is less than kSizeInMemory
+
+ example: https://fiddle.skia.org/c/@RRect_readFromMemory
+ */
+ size_t readFromMemory(const void* buffer, size_t length);
+
+ /** Transforms by SkRRect by matrix, storing result in dst.
+ Returns true if SkRRect transformed can be represented by another SkRRect.
+ Returns false if matrix contains transformations that are not axis aligned.
+
+ Asserts in debug builds if SkRRect equals dst.
+
+ @param matrix SkMatrix specifying the transform
+ @param dst SkRRect to store the result
+ @return true if transformation succeeded.
+
+ example: https://fiddle.skia.org/c/@RRect_transform
+ */
+ bool transform(const SkMatrix& matrix, SkRRect* dst) const;
+
+ /** Writes text representation of SkRRect to standard output.
+ Set asHex true to generate exact binary representations
+ of floating point numbers.
+
+ @param asHex true if SkScalar values are written as hexadecimal
+
+ example: https://fiddle.skia.org/c/@RRect_dump
+ */
+ void dump(bool asHex) const;
+ SkString dumpToString(bool asHex) const;
+
+ /** Writes text representation of SkRRect to standard output. The representation
+ may be directly compiled as C++ code. Floating point values are written
+ with limited precision; it may not be possible to reconstruct original
+ SkRRect from output.
+ */
+ void dump() const { this->dump(false); }
+
+ /** Writes text representation of SkRRect to standard output. The representation
+ may be directly compiled as C++ code. Floating point values are written
+ in hexadecimal to preserve their exact bit pattern. The output reconstructs the
+ original SkRRect.
+ */
+ void dumpHex() const { this->dump(true); }
+
+private:
+ static bool AreRectAndRadiiValid(const SkRect&, const SkVector[4]);
+
+ SkRRect(const SkRect& rect, const SkVector radii[4], int32_t type)
+ : fRect(rect)
+ , fRadii{radii[0], radii[1], radii[2], radii[3]}
+ , fType(type) {}
+
+ /**
+ * Initializes fRect. If the passed in rect is not finite or empty the rrect will be fully
+ * initialized and false is returned. Otherwise, just fRect is initialized and true is returned.
+ */
+ bool initializeRect(const SkRect&);
+
+ void computeType();
+ bool checkCornerContainment(SkScalar x, SkScalar y) const;
+ // Returns true if the radii had to be scaled to fit rect
+ bool scaleRadii();
+
+ SkRect fRect = SkRect::MakeEmpty();
+ // Radii order is UL, UR, LR, LL. Use Corner enum to index into fRadii[]
+ SkVector fRadii[4] = {{0, 0}, {0, 0}, {0,0}, {0,0}};
+ // use an explicitly sized type so we're sure the class is dense (no uninitialized bytes)
+ int32_t fType = kEmpty_Type;
+ // TODO: add padding so we can use memcpy for flattening and not copy uninitialized data
+
+ // to access fRadii directly
+ friend class SkPath;
+ friend class SkRRectPriv;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkRSXform.h b/gfx/skia/skia/include/core/SkRSXform.h
new file mode 100644
index 0000000000..5fcfff2922
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkRSXform.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRSXform_DEFINED
+#define SkRSXform_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkSize.h"
+
+/**
+ * A compressed form of a rotation+scale matrix.
+ *
+ * [ fSCos -fSSin fTx ]
+ * [ fSSin fSCos fTy ]
+ * [ 0 0 1 ]
+ */
+struct SK_API SkRSXform {
+ static SkRSXform Make(SkScalar scos, SkScalar ssin, SkScalar tx, SkScalar ty) {
+ SkRSXform xform = { scos, ssin, tx, ty };
+ return xform;
+ }
+
+ /*
+ * Initialize a new xform based on the scale, rotation (in radians), final tx,ty location
+ * and anchor-point ax,ay within the src quad.
+ *
+ * Note: the anchor point is not normalized (e.g. 0...1) but is in pixels of the src image.
+ */
+ static SkRSXform MakeFromRadians(SkScalar scale, SkScalar radians, SkScalar tx, SkScalar ty,
+ SkScalar ax, SkScalar ay) {
+ const SkScalar s = SkScalarSin(radians) * scale;
+ const SkScalar c = SkScalarCos(radians) * scale;
+ return Make(c, s, tx + -c * ax + s * ay, ty + -s * ax - c * ay);
+ }
+
+ SkScalar fSCos;
+ SkScalar fSSin;
+ SkScalar fTx;
+ SkScalar fTy;
+
+ bool rectStaysRect() const {
+ return 0 == fSCos || 0 == fSSin;
+ }
+
+ void setIdentity() {
+ fSCos = 1;
+ fSSin = fTx = fTy = 0;
+ }
+
+ void set(SkScalar scos, SkScalar ssin, SkScalar tx, SkScalar ty) {
+ fSCos = scos;
+ fSSin = ssin;
+ fTx = tx;
+ fTy = ty;
+ }
+
+ void toQuad(SkScalar width, SkScalar height, SkPoint quad[4]) const;
+ void toQuad(const SkSize& size, SkPoint quad[4]) const {
+ this->toQuad(size.width(), size.height(), quad);
+ }
+ void toTriStrip(SkScalar width, SkScalar height, SkPoint strip[4]) const;
+};
+
+#endif
+
diff --git a/gfx/skia/skia/include/core/SkRasterHandleAllocator.h b/gfx/skia/skia/include/core/SkRasterHandleAllocator.h
new file mode 100644
index 0000000000..6fe121a6de
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkRasterHandleAllocator.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRasterHandleAllocator_DEFINED
+#define SkRasterHandleAllocator_DEFINED
+
+#include "include/core/SkImageInfo.h"
+
+class SkBitmap;
+class SkCanvas;
+class SkMatrix;
+class SkSurfaceProps;
+
+/**
+ * If a client wants to control the allocation of raster layers in a canvas, it should subclass
+ * SkRasterHandleAllocator. This allocator performs two tasks:
+ * 1. controls how the memory for the pixels is allocated
+ * 2. associates a "handle" to a private object that can track the matrix/clip of the SkCanvas
+ *
+ * This example allocates a canvas, and defers to the allocator to create the base layer.
+ *
+ * std::unique_ptr<SkCanvas> canvas = SkRasterHandleAllocator::MakeCanvas(
+ * SkImageInfo::Make(...),
+ * std::make_unique<MySubclassRasterHandleAllocator>(...),
+ * nullptr);
+ *
+ * If you have already allocated the base layer (and its handle, release-proc etc.) then you
+ * can pass those in using the last parameter to MakeCanvas().
+ *
+ * Regardless of how the base layer is allocated, each time canvas->saveLayer() is called,
+ * your allocator's allocHandle() will be called.
+ */
+class SK_API SkRasterHandleAllocator {
+public:
+ virtual ~SkRasterHandleAllocator() = default;
+
+ // The value that is returned to clients of the canvas that has this allocator installed.
+ typedef void* Handle;
+
+ struct Rec {
+ // When the allocation goes out of scope, this proc is called to free everything associated
+ // with it: the pixels, the "handle", etc. This is passed the pixel address and fReleaseCtx.
+ void (*fReleaseProc)(void* pixels, void* ctx);
+ void* fReleaseCtx; // context passed to fReleaseProc
+ void* fPixels; // pixels for this allocation
+ size_t fRowBytes; // rowbytes for these pixels
+ Handle fHandle; // public handle returned by SkCanvas::accessTopRasterHandle()
+ };
+
+ /**
+ * Given a requested info, allocate the corresponding pixels/rowbytes, and whatever handle
+ * is desired to give clients access to those pixels. The rec also contains a proc and context
+ * which will be called when this allocation goes out of scope.
+ *
+ * e.g.
+ * when canvas->saveLayer() is called, the allocator will be called to allocate the pixels
+ * for the layer. When canvas->restore() is called, the fReleaseProc will be called.
+ */
+ virtual bool allocHandle(const SkImageInfo&, Rec*) = 0;
+
+ /**
+ * Clients access the handle for a given layer by calling SkCanvas::accessTopRasterHandle().
+ * To allow the handle to reflect the current matrix/clip in the canvs, updateHandle() is
+ * is called. The subclass is responsible to update the handle as it sees fit.
+ */
+ virtual void updateHandle(Handle, const SkMatrix&, const SkIRect&) = 0;
+
+ /**
+ * This creates a canvas which will use the allocator to manage pixel allocations, including
+ * all calls to saveLayer().
+ *
+ * If rec is non-null, then it will be used as the base-layer of pixels/handle.
+ * If rec is null, then the allocator will be called for the base-layer as well.
+ */
+ static std::unique_ptr<SkCanvas> MakeCanvas(std::unique_ptr<SkRasterHandleAllocator>,
+ const SkImageInfo&, const Rec* rec = nullptr,
+ const SkSurfaceProps* props = nullptr);
+
+protected:
+ SkRasterHandleAllocator() = default;
+ SkRasterHandleAllocator(const SkRasterHandleAllocator&) = delete;
+ SkRasterHandleAllocator& operator=(const SkRasterHandleAllocator&) = delete;
+
+private:
+ friend class SkBitmapDevice;
+
+ Handle allocBitmap(const SkImageInfo&, SkBitmap*);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkRect.h b/gfx/skia/skia/include/core/SkRect.h
new file mode 100644
index 0000000000..1ed7823c23
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkRect.h
@@ -0,0 +1,1388 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRect_DEFINED
+#define SkRect_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkSafe32.h"
+#include "include/private/base/SkTFitsIn.h"
+
+#include <string>
+#include <algorithm>
+#include <cstdint>
+
+struct SkRect;
+
+/** \struct SkIRect
+ SkIRect holds four 32-bit integer coordinates describing the upper and
+ lower bounds of a rectangle. SkIRect may be created from outer bounds or
+ from position, width, and height. SkIRect describes an area; if its right
+ is less than or equal to its left, or if its bottom is less than or equal to
+ its top, it is considered empty.
+*/
+struct SK_API SkIRect {
+ int32_t fLeft; //!< smaller x-axis bounds
+ int32_t fTop; //!< smaller y-axis bounds
+ int32_t fRight; //!< larger x-axis bounds
+ int32_t fBottom; //!< larger y-axis bounds
+
+ /** Returns constructed SkIRect set to (0, 0, 0, 0).
+ Many other rectangles are empty; if left is equal to or greater than right,
+ or if top is equal to or greater than bottom. Setting all members to zero
+ is a convenience, but does not designate a special empty rectangle.
+
+ @return bounds (0, 0, 0, 0)
+ */
+ static constexpr SkIRect SK_WARN_UNUSED_RESULT MakeEmpty() {
+ return SkIRect{0, 0, 0, 0};
+ }
+
+ /** Returns constructed SkIRect set to (0, 0, w, h). Does not validate input; w or h
+ may be negative.
+
+ @param w width of constructed SkIRect
+ @param h height of constructed SkIRect
+ @return bounds (0, 0, w, h)
+ */
+ static constexpr SkIRect SK_WARN_UNUSED_RESULT MakeWH(int32_t w, int32_t h) {
+ return SkIRect{0, 0, w, h};
+ }
+
+ /** Returns constructed SkIRect set to (0, 0, size.width(), size.height()).
+ Does not validate input; size.width() or size.height() may be negative.
+
+ @param size values for SkIRect width and height
+ @return bounds (0, 0, size.width(), size.height())
+ */
+ static constexpr SkIRect SK_WARN_UNUSED_RESULT MakeSize(const SkISize& size) {
+ return SkIRect{0, 0, size.fWidth, size.fHeight};
+ }
+
+ /** Returns constructed SkIRect set to (pt.x(), pt.y(), pt.x() + size.width(),
+ pt.y() + size.height()). Does not validate input; size.width() or size.height() may be
+ negative.
+
+ @param pt values for SkIRect fLeft and fTop
+ @param size values for SkIRect width and height
+ @return bounds at pt with width and height of size
+ */
+ static constexpr SkIRect SK_WARN_UNUSED_RESULT MakePtSize(SkIPoint pt, SkISize size) {
+ return MakeXYWH(pt.x(), pt.y(), size.width(), size.height());
+ }
+
+ /** Returns constructed SkIRect set to (l, t, r, b). Does not sort input; SkIRect may
+ result in fLeft greater than fRight, or fTop greater than fBottom.
+
+ @param l integer stored in fLeft
+ @param t integer stored in fTop
+ @param r integer stored in fRight
+ @param b integer stored in fBottom
+ @return bounds (l, t, r, b)
+ */
+ static constexpr SkIRect SK_WARN_UNUSED_RESULT MakeLTRB(int32_t l, int32_t t,
+ int32_t r, int32_t b) {
+ return SkIRect{l, t, r, b};
+ }
+
+ /** Returns constructed SkIRect set to: (x, y, x + w, y + h).
+ Does not validate input; w or h may be negative.
+
+ @param x stored in fLeft
+ @param y stored in fTop
+ @param w added to x and stored in fRight
+ @param h added to y and stored in fBottom
+ @return bounds at (x, y) with width w and height h
+ */
+ static constexpr SkIRect SK_WARN_UNUSED_RESULT MakeXYWH(int32_t x, int32_t y,
+ int32_t w, int32_t h) {
+ return { x, y, Sk32_sat_add(x, w), Sk32_sat_add(y, h) };
+ }
+
+ /** Returns left edge of SkIRect, if sorted.
+ Call sort() to reverse fLeft and fRight if needed.
+
+ @return fLeft
+ */
+ constexpr int32_t left() const { return fLeft; }
+
+ /** Returns top edge of SkIRect, if sorted. Call isEmpty() to see if SkIRect may be invalid,
+ and sort() to reverse fTop and fBottom if needed.
+
+ @return fTop
+ */
+ constexpr int32_t top() const { return fTop; }
+
+ /** Returns right edge of SkIRect, if sorted.
+ Call sort() to reverse fLeft and fRight if needed.
+
+ @return fRight
+ */
+ constexpr int32_t right() const { return fRight; }
+
+ /** Returns bottom edge of SkIRect, if sorted. Call isEmpty() to see if SkIRect may be invalid,
+ and sort() to reverse fTop and fBottom if needed.
+
+ @return fBottom
+ */
+ constexpr int32_t bottom() const { return fBottom; }
+
+ /** Returns left edge of SkIRect, if sorted. Call isEmpty() to see if SkIRect may be invalid,
+ and sort() to reverse fLeft and fRight if needed.
+
+ @return fLeft
+ */
+ constexpr int32_t x() const { return fLeft; }
+
+ /** Returns top edge of SkIRect, if sorted. Call isEmpty() to see if SkIRect may be invalid,
+ and sort() to reverse fTop and fBottom if needed.
+
+ @return fTop
+ */
+ constexpr int32_t y() const { return fTop; }
+
+ // Experimental
+ constexpr SkIPoint topLeft() const { return {fLeft, fTop}; }
+
+ /** Returns span on the x-axis. This does not check if SkIRect is sorted, or if
+ result fits in 32-bit signed integer; result may be negative.
+
+ @return fRight minus fLeft
+ */
+ constexpr int32_t width() const { return Sk32_can_overflow_sub(fRight, fLeft); }
+
+ /** Returns span on the y-axis. This does not check if SkIRect is sorted, or if
+ result fits in 32-bit signed integer; result may be negative.
+
+ @return fBottom minus fTop
+ */
+ constexpr int32_t height() const { return Sk32_can_overflow_sub(fBottom, fTop); }
+
+ /** Returns spans on the x-axis and y-axis. This does not check if SkIRect is sorted,
+ or if result fits in 32-bit signed integer; result may be negative.
+
+ @return SkISize (width, height)
+ */
+ constexpr SkISize size() const { return SkISize::Make(this->width(), this->height()); }
+
+ /** Returns span on the x-axis. This does not check if SkIRect is sorted, so the
+ result may be negative. This is safer than calling width() since width() might
+ overflow in its calculation.
+
+ @return fRight minus fLeft cast to int64_t
+ */
+ constexpr int64_t width64() const { return (int64_t)fRight - (int64_t)fLeft; }
+
+ /** Returns span on the y-axis. This does not check if SkIRect is sorted, so the
+ result may be negative. This is safer than calling height() since height() might
+ overflow in its calculation.
+
+ @return fBottom minus fTop cast to int64_t
+ */
+ constexpr int64_t height64() const { return (int64_t)fBottom - (int64_t)fTop; }
+
+ /** Returns true if fLeft is equal to or greater than fRight, or if fTop is equal
+ to or greater than fBottom. Call sort() to reverse rectangles with negative
+ width64() or height64().
+
+ @return true if width64() or height64() are zero or negative
+ */
+ bool isEmpty64() const { return fRight <= fLeft || fBottom <= fTop; }
+
+ /** Returns true if width() or height() are zero or negative.
+
+ @return true if width() or height() are zero or negative
+ */
+ bool isEmpty() const {
+ int64_t w = this->width64();
+ int64_t h = this->height64();
+ if (w <= 0 || h <= 0) {
+ return true;
+ }
+ // Return true if either exceeds int32_t
+ return !SkTFitsIn<int32_t>(w | h);
+ }
+
+ /** Returns true if all members in a: fLeft, fTop, fRight, and fBottom; are
+ identical to corresponding members in b.
+
+ @param a SkIRect to compare
+ @param b SkIRect to compare
+ @return true if members are equal
+ */
+ friend bool operator==(const SkIRect& a, const SkIRect& b) {
+ return a.fLeft == b.fLeft && a.fTop == b.fTop &&
+ a.fRight == b.fRight && a.fBottom == b.fBottom;
+ }
+
+ /** Returns true if any member in a: fLeft, fTop, fRight, and fBottom; is not
+ identical to the corresponding member in b.
+
+ @param a SkIRect to compare
+ @param b SkIRect to compare
+ @return true if members are not equal
+ */
+ friend bool operator!=(const SkIRect& a, const SkIRect& b) {
+ return a.fLeft != b.fLeft || a.fTop != b.fTop ||
+ a.fRight != b.fRight || a.fBottom != b.fBottom;
+ }
+
+ /** Sets SkIRect to (0, 0, 0, 0).
+
+ Many other rectangles are empty; if left is equal to or greater than right,
+ or if top is equal to or greater than bottom. Setting all members to zero
+ is a convenience, but does not designate a special empty rectangle.
+ */
+ void setEmpty() { memset(this, 0, sizeof(*this)); }
+
+ /** Sets SkIRect to (left, top, right, bottom).
+ left and right are not sorted; left is not necessarily less than right.
+ top and bottom are not sorted; top is not necessarily less than bottom.
+
+ @param left stored in fLeft
+ @param top stored in fTop
+ @param right stored in fRight
+ @param bottom stored in fBottom
+ */
+ void setLTRB(int32_t left, int32_t top, int32_t right, int32_t bottom) {
+ fLeft = left;
+ fTop = top;
+ fRight = right;
+ fBottom = bottom;
+ }
+
+ /** Sets SkIRect to: (x, y, x + width, y + height).
+ Does not validate input; width or height may be negative.
+
+ @param x stored in fLeft
+ @param y stored in fTop
+ @param width added to x and stored in fRight
+ @param height added to y and stored in fBottom
+ */
+ void setXYWH(int32_t x, int32_t y, int32_t width, int32_t height) {
+ fLeft = x;
+ fTop = y;
+ fRight = Sk32_sat_add(x, width);
+ fBottom = Sk32_sat_add(y, height);
+ }
+
+ void setWH(int32_t width, int32_t height) {
+ fLeft = 0;
+ fTop = 0;
+ fRight = width;
+ fBottom = height;
+ }
+
+ void setSize(SkISize size) {
+ fLeft = 0;
+ fTop = 0;
+ fRight = size.width();
+ fBottom = size.height();
+ }
+
+ /** Returns SkIRect offset by (dx, dy).
+
+ If dx is negative, SkIRect returned is moved to the left.
+ If dx is positive, SkIRect returned is moved to the right.
+ If dy is negative, SkIRect returned is moved upward.
+ If dy is positive, SkIRect returned is moved downward.
+
+ @param dx offset added to fLeft and fRight
+ @param dy offset added to fTop and fBottom
+ @return SkIRect offset by dx and dy, with original width and height
+ */
+ constexpr SkIRect makeOffset(int32_t dx, int32_t dy) const {
+ return {
+ Sk32_sat_add(fLeft, dx), Sk32_sat_add(fTop, dy),
+ Sk32_sat_add(fRight, dx), Sk32_sat_add(fBottom, dy),
+ };
+ }
+
+ /** Returns SkIRect offset by (offset.x(), offset.y()).
+
+ If offset.x() is negative, SkIRect returned is moved to the left.
+ If offset.x() is positive, SkIRect returned is moved to the right.
+ If offset.y() is negative, SkIRect returned is moved upward.
+ If offset.y() is positive, SkIRect returned is moved downward.
+
+ @param offset translation vector
+ @return SkIRect translated by offset, with original width and height
+ */
+ constexpr SkIRect makeOffset(SkIVector offset) const {
+ return this->makeOffset(offset.x(), offset.y());
+ }
+
+ /** Returns SkIRect, inset by (dx, dy).
+
+ If dx is negative, SkIRect returned is wider.
+ If dx is positive, SkIRect returned is narrower.
+ If dy is negative, SkIRect returned is taller.
+ If dy is positive, SkIRect returned is shorter.
+
+ @param dx offset added to fLeft and subtracted from fRight
+ @param dy offset added to fTop and subtracted from fBottom
+ @return SkIRect inset symmetrically left and right, top and bottom
+ */
+ SkIRect makeInset(int32_t dx, int32_t dy) const {
+ return {
+ Sk32_sat_add(fLeft, dx), Sk32_sat_add(fTop, dy),
+ Sk32_sat_sub(fRight, dx), Sk32_sat_sub(fBottom, dy),
+ };
+ }
+
+ /** Returns SkIRect, outset by (dx, dy).
+
+ If dx is negative, SkIRect returned is narrower.
+ If dx is positive, SkIRect returned is wider.
+ If dy is negative, SkIRect returned is shorter.
+ If dy is positive, SkIRect returned is taller.
+
+ @param dx offset subtracted to fLeft and added from fRight
+ @param dy offset subtracted to fTop and added from fBottom
+ @return SkIRect outset symmetrically left and right, top and bottom
+ */
+ SkIRect makeOutset(int32_t dx, int32_t dy) const {
+ return {
+ Sk32_sat_sub(fLeft, dx), Sk32_sat_sub(fTop, dy),
+ Sk32_sat_add(fRight, dx), Sk32_sat_add(fBottom, dy),
+ };
+ }
+
+ /** Offsets SkIRect by adding dx to fLeft, fRight; and by adding dy to fTop, fBottom.
+
+ If dx is negative, moves SkIRect returned to the left.
+ If dx is positive, moves SkIRect returned to the right.
+ If dy is negative, moves SkIRect returned upward.
+ If dy is positive, moves SkIRect returned downward.
+
+ @param dx offset added to fLeft and fRight
+ @param dy offset added to fTop and fBottom
+ */
+ void offset(int32_t dx, int32_t dy) {
+ fLeft = Sk32_sat_add(fLeft, dx);
+ fTop = Sk32_sat_add(fTop, dy);
+ fRight = Sk32_sat_add(fRight, dx);
+ fBottom = Sk32_sat_add(fBottom, dy);
+ }
+
+ /** Offsets SkIRect by adding delta.fX to fLeft, fRight; and by adding delta.fY to
+ fTop, fBottom.
+
+ If delta.fX is negative, moves SkIRect returned to the left.
+ If delta.fX is positive, moves SkIRect returned to the right.
+ If delta.fY is negative, moves SkIRect returned upward.
+ If delta.fY is positive, moves SkIRect returned downward.
+
+ @param delta offset added to SkIRect
+ */
+ void offset(const SkIPoint& delta) {
+ this->offset(delta.fX, delta.fY);
+ }
+
+ /** Offsets SkIRect so that fLeft equals newX, and fTop equals newY. width and height
+ are unchanged.
+
+ @param newX stored in fLeft, preserving width()
+ @param newY stored in fTop, preserving height()
+ */
+ void offsetTo(int32_t newX, int32_t newY) {
+ fRight = Sk64_pin_to_s32((int64_t)fRight + newX - fLeft);
+ fBottom = Sk64_pin_to_s32((int64_t)fBottom + newY - fTop);
+ fLeft = newX;
+ fTop = newY;
+ }
+
+ /** Insets SkIRect by (dx,dy).
+
+ If dx is positive, makes SkIRect narrower.
+ If dx is negative, makes SkIRect wider.
+ If dy is positive, makes SkIRect shorter.
+ If dy is negative, makes SkIRect taller.
+
+ @param dx offset added to fLeft and subtracted from fRight
+ @param dy offset added to fTop and subtracted from fBottom
+ */
+ void inset(int32_t dx, int32_t dy) {
+ fLeft = Sk32_sat_add(fLeft, dx);
+ fTop = Sk32_sat_add(fTop, dy);
+ fRight = Sk32_sat_sub(fRight, dx);
+ fBottom = Sk32_sat_sub(fBottom, dy);
+ }
+
+ /** Outsets SkIRect by (dx, dy).
+
+ If dx is positive, makes SkIRect wider.
+ If dx is negative, makes SkIRect narrower.
+ If dy is positive, makes SkIRect taller.
+ If dy is negative, makes SkIRect shorter.
+
+ @param dx subtracted to fLeft and added from fRight
+ @param dy subtracted to fTop and added from fBottom
+ */
+ void outset(int32_t dx, int32_t dy) { this->inset(-dx, -dy); }
+
+ /** Adjusts SkIRect by adding dL to fLeft, dT to fTop, dR to fRight, and dB to fBottom.
+
+ If dL is positive, narrows SkIRect on the left. If negative, widens it on the left.
+ If dT is positive, shrinks SkIRect on the top. If negative, lengthens it on the top.
+ If dR is positive, narrows SkIRect on the right. If negative, widens it on the right.
+ If dB is positive, shrinks SkIRect on the bottom. If negative, lengthens it on the bottom.
+
+ The resulting SkIRect is not checked for validity. Thus, if the resulting SkIRect left is
+ greater than right, the SkIRect will be considered empty. Call sort() after this call
+ if that is not the desired behavior.
+
+ @param dL offset added to fLeft
+ @param dT offset added to fTop
+ @param dR offset added to fRight
+ @param dB offset added to fBottom
+ */
+ void adjust(int32_t dL, int32_t dT, int32_t dR, int32_t dB) {
+ fLeft = Sk32_sat_add(fLeft, dL);
+ fTop = Sk32_sat_add(fTop, dT);
+ fRight = Sk32_sat_add(fRight, dR);
+ fBottom = Sk32_sat_add(fBottom, dB);
+ }
+
+ /** Returns true if: fLeft <= x < fRight && fTop <= y < fBottom.
+ Returns false if SkIRect is empty.
+
+ Considers input to describe constructed SkIRect: (x, y, x + 1, y + 1) and
+ returns true if constructed area is completely enclosed by SkIRect area.
+
+ @param x test SkIPoint x-coordinate
+ @param y test SkIPoint y-coordinate
+ @return true if (x, y) is inside SkIRect
+ */
+ bool contains(int32_t x, int32_t y) const {
+ return x >= fLeft && x < fRight && y >= fTop && y < fBottom;
+ }
+
+ /** Returns true if SkIRect contains r.
+ Returns false if SkIRect is empty or r is empty.
+
+ SkIRect contains r when SkIRect area completely includes r area.
+
+ @param r SkIRect contained
+ @return true if all sides of SkIRect are outside r
+ */
+ bool contains(const SkIRect& r) const {
+ return !r.isEmpty() && !this->isEmpty() && // check for empties
+ fLeft <= r.fLeft && fTop <= r.fTop &&
+ fRight >= r.fRight && fBottom >= r.fBottom;
+ }
+
+ /** Returns true if SkIRect contains r.
+ Returns false if SkIRect is empty or r is empty.
+
+ SkIRect contains r when SkIRect area completely includes r area.
+
+ @param r SkRect contained
+ @return true if all sides of SkIRect are outside r
+ */
+ inline bool contains(const SkRect& r) const;
+
+ /** Returns true if SkIRect contains construction.
+ Asserts if SkIRect is empty or construction is empty, and if SK_DEBUG is defined.
+
+ Return is undefined if SkIRect is empty or construction is empty.
+
+ @param r SkIRect contained
+ @return true if all sides of SkIRect are outside r
+ */
+ bool containsNoEmptyCheck(const SkIRect& r) const {
+ SkASSERT(fLeft < fRight && fTop < fBottom);
+ SkASSERT(r.fLeft < r.fRight && r.fTop < r.fBottom);
+ return fLeft <= r.fLeft && fTop <= r.fTop && fRight >= r.fRight && fBottom >= r.fBottom;
+ }
+
+ /** Returns true if SkIRect intersects r, and sets SkIRect to intersection.
+ Returns false if SkIRect does not intersect r, and leaves SkIRect unchanged.
+
+ Returns false if either r or SkIRect is empty, leaving SkIRect unchanged.
+
+ @param r limit of result
+ @return true if r and SkIRect have area in common
+ */
+ bool intersect(const SkIRect& r) {
+ return this->intersect(*this, r);
+ }
+
+ /** Returns true if a intersects b, and sets SkIRect to intersection.
+ Returns false if a does not intersect b, and leaves SkIRect unchanged.
+
+ Returns false if either a or b is empty, leaving SkIRect unchanged.
+
+ @param a SkIRect to intersect
+ @param b SkIRect to intersect
+ @return true if a and b have area in common
+ */
+ bool SK_WARN_UNUSED_RESULT intersect(const SkIRect& a, const SkIRect& b);
+
+ /** Returns true if a intersects b.
+ Returns false if either a or b is empty, or do not intersect.
+
+ @param a SkIRect to intersect
+ @param b SkIRect to intersect
+ @return true if a and b have area in common
+ */
+ static bool Intersects(const SkIRect& a, const SkIRect& b) {
+ return SkIRect{}.intersect(a, b);
+ }
+
+ /** Sets SkIRect to the union of itself and r.
+
+ Has no effect if r is empty. Otherwise, if SkIRect is empty, sets SkIRect to r.
+
+ @param r expansion SkIRect
+
+ example: https://fiddle.skia.org/c/@IRect_join_2
+ */
+ void join(const SkIRect& r);
+
+ /** Swaps fLeft and fRight if fLeft is greater than fRight; and swaps
+ fTop and fBottom if fTop is greater than fBottom. Result may be empty,
+ and width() and height() will be zero or positive.
+ */
+ void sort() {
+ using std::swap;
+ if (fLeft > fRight) {
+ swap(fLeft, fRight);
+ }
+ if (fTop > fBottom) {
+ swap(fTop, fBottom);
+ }
+ }
+
+ /** Returns SkIRect with fLeft and fRight swapped if fLeft is greater than fRight; and
+ with fTop and fBottom swapped if fTop is greater than fBottom. Result may be empty;
+ and width() and height() will be zero or positive.
+
+ @return sorted SkIRect
+ */
+ SkIRect makeSorted() const {
+ return MakeLTRB(std::min(fLeft, fRight), std::min(fTop, fBottom),
+ std::max(fLeft, fRight), std::max(fTop, fBottom));
+ }
+};
+
+/** \struct SkRect
+ SkRect holds four SkScalar coordinates describing the upper and
+ lower bounds of a rectangle. SkRect may be created from outer bounds or
+ from position, width, and height. SkRect describes an area; if its right
+ is less than or equal to its left, or if its bottom is less than or equal to
+ its top, it is considered empty.
+*/
+struct SK_API SkRect {
+ SkScalar fLeft; //!< smaller x-axis bounds
+ SkScalar fTop; //!< smaller y-axis bounds
+ SkScalar fRight; //!< larger x-axis bounds
+ SkScalar fBottom; //!< larger y-axis bounds
+
+ /** Returns constructed SkRect set to (0, 0, 0, 0).
+ Many other rectangles are empty; if left is equal to or greater than right,
+ or if top is equal to or greater than bottom. Setting all members to zero
+ is a convenience, but does not designate a special empty rectangle.
+
+ @return bounds (0, 0, 0, 0)
+ */
+ static constexpr SkRect SK_WARN_UNUSED_RESULT MakeEmpty() {
+ return SkRect{0, 0, 0, 0};
+ }
+
+ /** Returns constructed SkRect set to SkScalar values (0, 0, w, h). Does not
+ validate input; w or h may be negative.
+
+ Passing integer values may generate a compiler warning since SkRect cannot
+ represent 32-bit integers exactly. Use SkIRect for an exact integer rectangle.
+
+ @param w SkScalar width of constructed SkRect
+ @param h SkScalar height of constructed SkRect
+ @return bounds (0, 0, w, h)
+ */
+ static constexpr SkRect SK_WARN_UNUSED_RESULT MakeWH(SkScalar w, SkScalar h) {
+ return SkRect{0, 0, w, h};
+ }
+
+ /** Returns constructed SkRect set to integer values (0, 0, w, h). Does not validate
+ input; w or h may be negative.
+
+ Use to avoid a compiler warning that input may lose precision when stored.
+ Use SkIRect for an exact integer rectangle.
+
+ @param w integer width of constructed SkRect
+ @param h integer height of constructed SkRect
+ @return bounds (0, 0, w, h)
+ */
+ static SkRect SK_WARN_UNUSED_RESULT MakeIWH(int w, int h) {
+ return {0, 0, SkIntToScalar(w), SkIntToScalar(h)};
+ }
+
+ /** Returns constructed SkRect set to (0, 0, size.width(), size.height()). Does not
+ validate input; size.width() or size.height() may be negative.
+
+ @param size SkScalar values for SkRect width and height
+ @return bounds (0, 0, size.width(), size.height())
+ */
+ static constexpr SkRect SK_WARN_UNUSED_RESULT MakeSize(const SkSize& size) {
+ return SkRect{0, 0, size.fWidth, size.fHeight};
+ }
+
+ /** Returns constructed SkRect set to (l, t, r, b). Does not sort input; SkRect may
+ result in fLeft greater than fRight, or fTop greater than fBottom.
+
+ @param l SkScalar stored in fLeft
+ @param t SkScalar stored in fTop
+ @param r SkScalar stored in fRight
+ @param b SkScalar stored in fBottom
+ @return bounds (l, t, r, b)
+ */
+ static constexpr SkRect SK_WARN_UNUSED_RESULT MakeLTRB(SkScalar l, SkScalar t, SkScalar r,
+ SkScalar b) {
+ return SkRect {l, t, r, b};
+ }
+
+ /** Returns constructed SkRect set to (x, y, x + w, y + h).
+ Does not validate input; w or h may be negative.
+
+ @param x stored in fLeft
+ @param y stored in fTop
+ @param w added to x and stored in fRight
+ @param h added to y and stored in fBottom
+ @return bounds at (x, y) with width w and height h
+ */
+ static constexpr SkRect SK_WARN_UNUSED_RESULT MakeXYWH(SkScalar x, SkScalar y, SkScalar w,
+ SkScalar h) {
+ return SkRect {x, y, x + w, y + h};
+ }
+
+ /** Returns constructed SkIRect set to (0, 0, size.width(), size.height()).
+ Does not validate input; size.width() or size.height() may be negative.
+
+ @param size integer values for SkRect width and height
+ @return bounds (0, 0, size.width(), size.height())
+ */
+ static SkRect Make(const SkISize& size) {
+ return MakeIWH(size.width(), size.height());
+ }
+
+ /** Returns constructed SkIRect set to irect, promoting integers to scalar.
+ Does not validate input; fLeft may be greater than fRight, fTop may be greater
+ than fBottom.
+
+ @param irect integer unsorted bounds
+ @return irect members converted to SkScalar
+ */
+ static SkRect SK_WARN_UNUSED_RESULT Make(const SkIRect& irect) {
+ return {
+ SkIntToScalar(irect.fLeft), SkIntToScalar(irect.fTop),
+ SkIntToScalar(irect.fRight), SkIntToScalar(irect.fBottom)
+ };
+ }
+
+ /** Returns true if fLeft is equal to or greater than fRight, or if fTop is equal
+ to or greater than fBottom. Call sort() to reverse rectangles with negative
+ width() or height().
+
+ @return true if width() or height() are zero or negative
+ */
+ bool isEmpty() const {
+ // We write it as the NOT of a non-empty rect, so we will return true if any values
+ // are NaN.
+ return !(fLeft < fRight && fTop < fBottom);
+ }
+
+ /** Returns true if fLeft is equal to or less than fRight, or if fTop is equal
+ to or less than fBottom. Call sort() to reverse rectangles with negative
+ width() or height().
+
+ @return true if width() or height() are zero or positive
+ */
+ bool isSorted() const { return fLeft <= fRight && fTop <= fBottom; }
+
+ /** Returns true if all values in the rectangle are finite: SK_ScalarMin or larger,
+ and SK_ScalarMax or smaller.
+
+ @return true if no member is infinite or NaN
+ */
+ bool isFinite() const {
+ float accum = 0;
+ accum *= fLeft;
+ accum *= fTop;
+ accum *= fRight;
+ accum *= fBottom;
+
+ // accum is either NaN or it is finite (zero).
+ SkASSERT(0 == accum || SkScalarIsNaN(accum));
+
+ // value==value will be true iff value is not NaN
+ // TODO: is it faster to say !accum or accum==accum?
+ return !SkScalarIsNaN(accum);
+ }
+
+ /** Returns left edge of SkRect, if sorted. Call isSorted() to see if SkRect is valid.
+ Call sort() to reverse fLeft and fRight if needed.
+
+ @return fLeft
+ */
+ constexpr SkScalar x() const { return fLeft; }
+
+ /** Returns top edge of SkRect, if sorted. Call isEmpty() to see if SkRect may be invalid,
+ and sort() to reverse fTop and fBottom if needed.
+
+ @return fTop
+ */
+ constexpr SkScalar y() const { return fTop; }
+
+ /** Returns left edge of SkRect, if sorted. Call isSorted() to see if SkRect is valid.
+ Call sort() to reverse fLeft and fRight if needed.
+
+ @return fLeft
+ */
+ constexpr SkScalar left() const { return fLeft; }
+
+ /** Returns top edge of SkRect, if sorted. Call isEmpty() to see if SkRect may be invalid,
+ and sort() to reverse fTop and fBottom if needed.
+
+ @return fTop
+ */
+ constexpr SkScalar top() const { return fTop; }
+
+ /** Returns right edge of SkRect, if sorted. Call isSorted() to see if SkRect is valid.
+ Call sort() to reverse fLeft and fRight if needed.
+
+ @return fRight
+ */
+ constexpr SkScalar right() const { return fRight; }
+
+ /** Returns bottom edge of SkRect, if sorted. Call isEmpty() to see if SkRect may be invalid,
+ and sort() to reverse fTop and fBottom if needed.
+
+ @return fBottom
+ */
+ constexpr SkScalar bottom() const { return fBottom; }
+
+ /** Returns span on the x-axis. This does not check if SkRect is sorted, or if
+ result fits in 32-bit float; result may be negative or infinity.
+
+ @return fRight minus fLeft
+ */
+ constexpr SkScalar width() const { return fRight - fLeft; }
+
+ /** Returns span on the y-axis. This does not check if SkRect is sorted, or if
+ result fits in 32-bit float; result may be negative or infinity.
+
+ @return fBottom minus fTop
+ */
+ constexpr SkScalar height() const { return fBottom - fTop; }
+
+ /** Returns average of left edge and right edge. Result does not change if SkRect
+ is sorted. Result may overflow to infinity if SkRect is far from the origin.
+
+ @return midpoint on x-axis
+ */
+ constexpr SkScalar centerX() const {
+ // don't use SkScalarHalf(fLeft + fBottom) as that might overflow before the 0.5
+ return SkScalarHalf(fLeft) + SkScalarHalf(fRight);
+ }
+
+ /** Returns average of top edge and bottom edge. Result does not change if SkRect
+ is sorted.
+
+ @return midpoint on y-axis
+ */
+ constexpr SkScalar centerY() const {
+ // don't use SkScalarHalf(fTop + fBottom) as that might overflow before the 0.5
+ return SkScalarHalf(fTop) + SkScalarHalf(fBottom);
+ }
+
+ /** Returns the point this->centerX(), this->centerY().
+ @return rectangle center
+ */
+ constexpr SkPoint center() const { return {this->centerX(), this->centerY()}; }
+
+ /** Returns true if all members in a: fLeft, fTop, fRight, and fBottom; are
+ equal to the corresponding members in b.
+
+ a and b are not equal if either contain NaN. a and b are equal if members
+ contain zeroes with different signs.
+
+ @param a SkRect to compare
+ @param b SkRect to compare
+ @return true if members are equal
+ */
+ friend bool operator==(const SkRect& a, const SkRect& b) {
+ return SkScalarsEqual((const SkScalar*)&a, (const SkScalar*)&b, 4);
+ }
+
+ /** Returns true if any in a: fLeft, fTop, fRight, and fBottom; does not
+ equal the corresponding members in b.
+
+ a and b are not equal if either contain NaN. a and b are equal if members
+ contain zeroes with different signs.
+
+ @param a SkRect to compare
+ @param b SkRect to compare
+ @return true if members are not equal
+ */
+ friend bool operator!=(const SkRect& a, const SkRect& b) {
+ return !SkScalarsEqual((const SkScalar*)&a, (const SkScalar*)&b, 4);
+ }
+
+ /** Returns four points in quad that enclose SkRect ordered as: top-left, top-right,
+ bottom-right, bottom-left.
+
+ TODO: Consider adding parameter to control whether quad is clockwise or counterclockwise.
+
+ @param quad storage for corners of SkRect
+
+ example: https://fiddle.skia.org/c/@Rect_toQuad
+ */
+ void toQuad(SkPoint quad[4]) const;
+
+ /** Sets SkRect to (0, 0, 0, 0).
+
+ Many other rectangles are empty; if left is equal to or greater than right,
+ or if top is equal to or greater than bottom. Setting all members to zero
+ is a convenience, but does not designate a special empty rectangle.
+ */
+ void setEmpty() { *this = MakeEmpty(); }
+
+ /** Sets SkRect to src, promoting src members from integer to scalar.
+ Very large values in src may lose precision.
+
+ @param src integer SkRect
+ */
+ void set(const SkIRect& src) {
+ fLeft = SkIntToScalar(src.fLeft);
+ fTop = SkIntToScalar(src.fTop);
+ fRight = SkIntToScalar(src.fRight);
+ fBottom = SkIntToScalar(src.fBottom);
+ }
+
+ /** Sets SkRect to (left, top, right, bottom).
+ left and right are not sorted; left is not necessarily less than right.
+ top and bottom are not sorted; top is not necessarily less than bottom.
+
+ @param left stored in fLeft
+ @param top stored in fTop
+ @param right stored in fRight
+ @param bottom stored in fBottom
+ */
+ void setLTRB(SkScalar left, SkScalar top, SkScalar right, SkScalar bottom) {
+ fLeft = left;
+ fTop = top;
+ fRight = right;
+ fBottom = bottom;
+ }
+
+ /** Sets to bounds of SkPoint array with count entries. If count is zero or smaller,
+ or if SkPoint array contains an infinity or NaN, sets to (0, 0, 0, 0).
+
+ Result is either empty or sorted: fLeft is less than or equal to fRight, and
+ fTop is less than or equal to fBottom.
+
+ @param pts SkPoint array
+ @param count entries in array
+ */
+ void setBounds(const SkPoint pts[], int count) {
+ (void)this->setBoundsCheck(pts, count);
+ }
+
+ /** Sets to bounds of SkPoint array with count entries. Returns false if count is
+ zero or smaller, or if SkPoint array contains an infinity or NaN; in these cases
+ sets SkRect to (0, 0, 0, 0).
+
+ Result is either empty or sorted: fLeft is less than or equal to fRight, and
+ fTop is less than or equal to fBottom.
+
+ @param pts SkPoint array
+ @param count entries in array
+ @return true if all SkPoint values are finite
+
+ example: https://fiddle.skia.org/c/@Rect_setBoundsCheck
+ */
+ bool setBoundsCheck(const SkPoint pts[], int count);
+
+ /** Sets to bounds of SkPoint pts array with count entries. If any SkPoint in pts
+ contains infinity or NaN, all SkRect dimensions are set to NaN.
+
+ @param pts SkPoint array
+ @param count entries in array
+
+ example: https://fiddle.skia.org/c/@Rect_setBoundsNoCheck
+ */
+ void setBoundsNoCheck(const SkPoint pts[], int count);
+
+ /** Sets bounds to the smallest SkRect enclosing SkPoint p0 and p1. The result is
+ sorted and may be empty. Does not check to see if values are finite.
+
+ @param p0 corner to include
+ @param p1 corner to include
+ */
+ void set(const SkPoint& p0, const SkPoint& p1) {
+ fLeft = std::min(p0.fX, p1.fX);
+ fRight = std::max(p0.fX, p1.fX);
+ fTop = std::min(p0.fY, p1.fY);
+ fBottom = std::max(p0.fY, p1.fY);
+ }
+
+ /** Sets SkRect to (x, y, x + width, y + height).
+ Does not validate input; width or height may be negative.
+
+ @param x stored in fLeft
+ @param y stored in fTop
+ @param width added to x and stored in fRight
+ @param height added to y and stored in fBottom
+ */
+ void setXYWH(SkScalar x, SkScalar y, SkScalar width, SkScalar height) {
+ fLeft = x;
+ fTop = y;
+ fRight = x + width;
+ fBottom = y + height;
+ }
+
+ /** Sets SkRect to (0, 0, width, height). Does not validate input;
+ width or height may be negative.
+
+ @param width stored in fRight
+ @param height stored in fBottom
+ */
+ void setWH(SkScalar width, SkScalar height) {
+ fLeft = 0;
+ fTop = 0;
+ fRight = width;
+ fBottom = height;
+ }
+ void setIWH(int32_t width, int32_t height) {
+ this->setWH(SkIntToScalar(width), SkIntToScalar(height));
+ }
+
+ /** Returns SkRect offset by (dx, dy).
+
+ If dx is negative, SkRect returned is moved to the left.
+ If dx is positive, SkRect returned is moved to the right.
+ If dy is negative, SkRect returned is moved upward.
+ If dy is positive, SkRect returned is moved downward.
+
+ @param dx added to fLeft and fRight
+ @param dy added to fTop and fBottom
+ @return SkRect offset on axes, with original width and height
+ */
+ constexpr SkRect makeOffset(SkScalar dx, SkScalar dy) const {
+ return MakeLTRB(fLeft + dx, fTop + dy, fRight + dx, fBottom + dy);
+ }
+
+ /** Returns SkRect offset by v.
+
+ @param v added to rect
+ @return SkRect offset on axes, with original width and height
+ */
+ constexpr SkRect makeOffset(SkVector v) const { return this->makeOffset(v.x(), v.y()); }
+
+ /** Returns SkRect, inset by (dx, dy).
+
+ If dx is negative, SkRect returned is wider.
+ If dx is positive, SkRect returned is narrower.
+ If dy is negative, SkRect returned is taller.
+ If dy is positive, SkRect returned is shorter.
+
+ @param dx added to fLeft and subtracted from fRight
+ @param dy added to fTop and subtracted from fBottom
+ @return SkRect inset symmetrically left and right, top and bottom
+ */
+ SkRect makeInset(SkScalar dx, SkScalar dy) const {
+ return MakeLTRB(fLeft + dx, fTop + dy, fRight - dx, fBottom - dy);
+ }
+
+ /** Returns SkRect, outset by (dx, dy).
+
+ If dx is negative, SkRect returned is narrower.
+ If dx is positive, SkRect returned is wider.
+ If dy is negative, SkRect returned is shorter.
+ If dy is positive, SkRect returned is taller.
+
+ @param dx subtracted to fLeft and added from fRight
+ @param dy subtracted to fTop and added from fBottom
+ @return SkRect outset symmetrically left and right, top and bottom
+ */
+ SkRect makeOutset(SkScalar dx, SkScalar dy) const {
+ return MakeLTRB(fLeft - dx, fTop - dy, fRight + dx, fBottom + dy);
+ }
+
+ /** Offsets SkRect by adding dx to fLeft, fRight; and by adding dy to fTop, fBottom.
+
+ If dx is negative, moves SkRect to the left.
+ If dx is positive, moves SkRect to the right.
+ If dy is negative, moves SkRect upward.
+ If dy is positive, moves SkRect downward.
+
+ @param dx offset added to fLeft and fRight
+ @param dy offset added to fTop and fBottom
+ */
+ void offset(SkScalar dx, SkScalar dy) {
+ fLeft += dx;
+ fTop += dy;
+ fRight += dx;
+ fBottom += dy;
+ }
+
+ /** Offsets SkRect by adding delta.fX to fLeft, fRight; and by adding delta.fY to
+ fTop, fBottom.
+
+ If delta.fX is negative, moves SkRect to the left.
+ If delta.fX is positive, moves SkRect to the right.
+ If delta.fY is negative, moves SkRect upward.
+ If delta.fY is positive, moves SkRect downward.
+
+ @param delta added to SkRect
+ */
+ void offset(const SkPoint& delta) {
+ this->offset(delta.fX, delta.fY);
+ }
+
+ /** Offsets SkRect so that fLeft equals newX, and fTop equals newY. width and height
+ are unchanged.
+
+ @param newX stored in fLeft, preserving width()
+ @param newY stored in fTop, preserving height()
+ */
+ void offsetTo(SkScalar newX, SkScalar newY) {
+ fRight += newX - fLeft;
+ fBottom += newY - fTop;
+ fLeft = newX;
+ fTop = newY;
+ }
+
+ /** Insets SkRect by (dx, dy).
+
+ If dx is positive, makes SkRect narrower.
+ If dx is negative, makes SkRect wider.
+ If dy is positive, makes SkRect shorter.
+ If dy is negative, makes SkRect taller.
+
+ @param dx added to fLeft and subtracted from fRight
+ @param dy added to fTop and subtracted from fBottom
+ */
+ void inset(SkScalar dx, SkScalar dy) {
+ fLeft += dx;
+ fTop += dy;
+ fRight -= dx;
+ fBottom -= dy;
+ }
+
+ /** Outsets SkRect by (dx, dy).
+
+ If dx is positive, makes SkRect wider.
+ If dx is negative, makes SkRect narrower.
+ If dy is positive, makes SkRect taller.
+ If dy is negative, makes SkRect shorter.
+
+ @param dx subtracted to fLeft and added from fRight
+ @param dy subtracted to fTop and added from fBottom
+ */
+ void outset(SkScalar dx, SkScalar dy) { this->inset(-dx, -dy); }
+
+ /** Returns true if SkRect intersects r, and sets SkRect to intersection.
+ Returns false if SkRect does not intersect r, and leaves SkRect unchanged.
+
+ Returns false if either r or SkRect is empty, leaving SkRect unchanged.
+
+ @param r limit of result
+ @return true if r and SkRect have area in common
+
+ example: https://fiddle.skia.org/c/@Rect_intersect
+ */
+ bool intersect(const SkRect& r);
+
+ /** Returns true if a intersects b, and sets SkRect to intersection.
+ Returns false if a does not intersect b, and leaves SkRect unchanged.
+
+ Returns false if either a or b is empty, leaving SkRect unchanged.
+
+ @param a SkRect to intersect
+ @param b SkRect to intersect
+ @return true if a and b have area in common
+ */
+ bool SK_WARN_UNUSED_RESULT intersect(const SkRect& a, const SkRect& b);
+
+
+private:
+ static bool Intersects(SkScalar al, SkScalar at, SkScalar ar, SkScalar ab,
+ SkScalar bl, SkScalar bt, SkScalar br, SkScalar bb) {
+ SkScalar L = std::max(al, bl);
+ SkScalar R = std::min(ar, br);
+ SkScalar T = std::max(at, bt);
+ SkScalar B = std::min(ab, bb);
+ return L < R && T < B;
+ }
+
+public:
+
+ /** Returns true if SkRect intersects r.
+ Returns false if either r or SkRect is empty, or do not intersect.
+
+ @param r SkRect to intersect
+ @return true if r and SkRect have area in common
+ */
+ bool intersects(const SkRect& r) const {
+ return Intersects(fLeft, fTop, fRight, fBottom,
+ r.fLeft, r.fTop, r.fRight, r.fBottom);
+ }
+
+ /** Returns true if a intersects b.
+ Returns false if either a or b is empty, or do not intersect.
+
+ @param a SkRect to intersect
+ @param b SkRect to intersect
+ @return true if a and b have area in common
+ */
+ static bool Intersects(const SkRect& a, const SkRect& b) {
+ return Intersects(a.fLeft, a.fTop, a.fRight, a.fBottom,
+ b.fLeft, b.fTop, b.fRight, b.fBottom);
+ }
+
+ /** Sets SkRect to the union of itself and r.
+
+ Has no effect if r is empty. Otherwise, if SkRect is empty, sets
+ SkRect to r.
+
+ @param r expansion SkRect
+
+ example: https://fiddle.skia.org/c/@Rect_join_2
+ */
+ void join(const SkRect& r);
+
+ /** Sets SkRect to the union of itself and r.
+
+ Asserts if r is empty and SK_DEBUG is defined.
+ If SkRect is empty, sets SkRect to r.
+
+ May produce incorrect results if r is empty.
+
+ @param r expansion SkRect
+ */
+ void joinNonEmptyArg(const SkRect& r) {
+ SkASSERT(!r.isEmpty());
+ // if we are empty, just assign
+ if (fLeft >= fRight || fTop >= fBottom) {
+ *this = r;
+ } else {
+ this->joinPossiblyEmptyRect(r);
+ }
+ }
+
+ /** Sets SkRect to the union of itself and the construction.
+
+ May produce incorrect results if SkRect or r is empty.
+
+ @param r expansion SkRect
+ */
+ void joinPossiblyEmptyRect(const SkRect& r) {
+ fLeft = std::min(fLeft, r.left());
+ fTop = std::min(fTop, r.top());
+ fRight = std::max(fRight, r.right());
+ fBottom = std::max(fBottom, r.bottom());
+ }
+
+ /** Returns true if: fLeft <= x < fRight && fTop <= y < fBottom.
+ Returns false if SkRect is empty.
+
+ @param x test SkPoint x-coordinate
+ @param y test SkPoint y-coordinate
+ @return true if (x, y) is inside SkRect
+ */
+ bool contains(SkScalar x, SkScalar y) const {
+ return x >= fLeft && x < fRight && y >= fTop && y < fBottom;
+ }
+
+ /** Returns true if SkRect contains r.
+ Returns false if SkRect is empty or r is empty.
+
+ SkRect contains r when SkRect area completely includes r area.
+
+ @param r SkRect contained
+ @return true if all sides of SkRect are outside r
+ */
+ bool contains(const SkRect& r) const {
+ // todo: can we eliminate the this->isEmpty check?
+ return !r.isEmpty() && !this->isEmpty() &&
+ fLeft <= r.fLeft && fTop <= r.fTop &&
+ fRight >= r.fRight && fBottom >= r.fBottom;
+ }
+
+ /** Returns true if SkRect contains r.
+ Returns false if SkRect is empty or r is empty.
+
+ SkRect contains r when SkRect area completely includes r area.
+
+ @param r SkIRect contained
+ @return true if all sides of SkRect are outside r
+ */
+ bool contains(const SkIRect& r) const {
+ // todo: can we eliminate the this->isEmpty check?
+ return !r.isEmpty() && !this->isEmpty() &&
+ fLeft <= SkIntToScalar(r.fLeft) && fTop <= SkIntToScalar(r.fTop) &&
+ fRight >= SkIntToScalar(r.fRight) && fBottom >= SkIntToScalar(r.fBottom);
+ }
+
+ /** Sets SkIRect by adding 0.5 and discarding the fractional portion of SkRect
+ members, using (SkScalarRoundToInt(fLeft), SkScalarRoundToInt(fTop),
+ SkScalarRoundToInt(fRight), SkScalarRoundToInt(fBottom)).
+
+ @param dst storage for SkIRect
+ */
+ void round(SkIRect* dst) const {
+ SkASSERT(dst);
+ dst->setLTRB(SkScalarRoundToInt(fLeft), SkScalarRoundToInt(fTop),
+ SkScalarRoundToInt(fRight), SkScalarRoundToInt(fBottom));
+ }
+
+ /** Sets SkIRect by discarding the fractional portion of fLeft and fTop; and rounding
+ up fRight and fBottom, using
+ (SkScalarFloorToInt(fLeft), SkScalarFloorToInt(fTop),
+ SkScalarCeilToInt(fRight), SkScalarCeilToInt(fBottom)).
+
+ @param dst storage for SkIRect
+ */
+ void roundOut(SkIRect* dst) const {
+ SkASSERT(dst);
+ dst->setLTRB(SkScalarFloorToInt(fLeft), SkScalarFloorToInt(fTop),
+ SkScalarCeilToInt(fRight), SkScalarCeilToInt(fBottom));
+ }
+
+ /** Sets SkRect by discarding the fractional portion of fLeft and fTop; and rounding
+ up fRight and fBottom, using
+ (SkScalarFloorToInt(fLeft), SkScalarFloorToInt(fTop),
+ SkScalarCeilToInt(fRight), SkScalarCeilToInt(fBottom)).
+
+ @param dst storage for SkRect
+ */
+ void roundOut(SkRect* dst) const {
+ dst->setLTRB(SkScalarFloorToScalar(fLeft), SkScalarFloorToScalar(fTop),
+ SkScalarCeilToScalar(fRight), SkScalarCeilToScalar(fBottom));
+ }
+
+ /** Sets SkRect by rounding up fLeft and fTop; and discarding the fractional portion
+ of fRight and fBottom, using
+ (SkScalarCeilToInt(fLeft), SkScalarCeilToInt(fTop),
+ SkScalarFloorToInt(fRight), SkScalarFloorToInt(fBottom)).
+
+ @param dst storage for SkIRect
+ */
+ void roundIn(SkIRect* dst) const {
+ SkASSERT(dst);
+ dst->setLTRB(SkScalarCeilToInt(fLeft), SkScalarCeilToInt(fTop),
+ SkScalarFloorToInt(fRight), SkScalarFloorToInt(fBottom));
+ }
+
+ /** Returns SkIRect by adding 0.5 and discarding the fractional portion of SkRect
+ members, using (SkScalarRoundToInt(fLeft), SkScalarRoundToInt(fTop),
+ SkScalarRoundToInt(fRight), SkScalarRoundToInt(fBottom)).
+
+ @return rounded SkIRect
+ */
+ SkIRect round() const {
+ SkIRect ir;
+ this->round(&ir);
+ return ir;
+ }
+
+ /** Sets SkIRect by discarding the fractional portion of fLeft and fTop; and rounding
+ up fRight and fBottom, using
+ (SkScalarFloorToInt(fLeft), SkScalarFloorToInt(fTop),
+ SkScalarCeilToInt(fRight), SkScalarCeilToInt(fBottom)).
+
+ @return rounded SkIRect
+ */
+ SkIRect roundOut() const {
+ SkIRect ir;
+ this->roundOut(&ir);
+ return ir;
+ }
+ /** Sets SkIRect by rounding up fLeft and fTop; and discarding the fractional portion
+ of fRight and fBottom, using
+ (SkScalarCeilToInt(fLeft), SkScalarCeilToInt(fTop),
+ SkScalarFloorToInt(fRight), SkScalarFloorToInt(fBottom)).
+
+ @return rounded SkIRect
+ */
+ SkIRect roundIn() const {
+ SkIRect ir;
+ this->roundIn(&ir);
+ return ir;
+ }
+
+ /** Swaps fLeft and fRight if fLeft is greater than fRight; and swaps
+ fTop and fBottom if fTop is greater than fBottom. Result may be empty;
+ and width() and height() will be zero or positive.
+ */
+ void sort() {
+ using std::swap;
+ if (fLeft > fRight) {
+ swap(fLeft, fRight);
+ }
+
+ if (fTop > fBottom) {
+ swap(fTop, fBottom);
+ }
+ }
+
+ /** Returns SkRect with fLeft and fRight swapped if fLeft is greater than fRight; and
+ with fTop and fBottom swapped if fTop is greater than fBottom. Result may be empty;
+ and width() and height() will be zero or positive.
+
+ @return sorted SkRect
+ */
+ SkRect makeSorted() const {
+ return MakeLTRB(std::min(fLeft, fRight), std::min(fTop, fBottom),
+ std::max(fLeft, fRight), std::max(fTop, fBottom));
+ }
+
+ /** Returns pointer to first scalar in SkRect, to treat it as an array with four
+ entries.
+
+ @return pointer to fLeft
+ */
+ const SkScalar* asScalars() const { return &fLeft; }
+
+ /** Writes text representation of SkRect to standard output. Set asHex to true to
+ generate exact binary representations of floating point numbers.
+
+ @param asHex true if SkScalar values are written as hexadecimal
+
+ example: https://fiddle.skia.org/c/@Rect_dump
+ */
+ void dump(bool asHex) const;
+
+ /** Writes text representation of SkRect to standard output. The representation may be
+ directly compiled as C++ code. Floating point values are written
+ with limited precision; it may not be possible to reconstruct original SkRect
+ from output.
+ */
+ void dump() const { this->dump(false); }
+
+ /** Writes text representation of SkRect to standard output. The representation may be
+ directly compiled as C++ code. Floating point values are written
+ in hexadecimal to preserve their exact bit pattern. The output reconstructs the
+ original SkRect.
+
+ Use instead of dump() when submitting
+ */
+ void dumpHex() const { this->dump(true); }
+};
+
+inline bool SkIRect::contains(const SkRect& r) const {
+ return !r.isEmpty() && !this->isEmpty() && // check for empties
+ (SkScalar)fLeft <= r.fLeft && (SkScalar)fTop <= r.fTop &&
+ (SkScalar)fRight >= r.fRight && (SkScalar)fBottom >= r.fBottom;
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkRefCnt.h b/gfx/skia/skia/include/core/SkRefCnt.h
new file mode 100644
index 0000000000..668de14e1d
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkRefCnt.h
@@ -0,0 +1,389 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRefCnt_DEFINED
+#define SkRefCnt_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkDebug.h"
+
+#include <atomic>
+#include <cstddef>
+#include <cstdint>
+#include <iosfwd>
+#include <type_traits>
+#include <utility>
+
+/** \class SkRefCntBase
+
+ SkRefCntBase is the base class for objects that may be shared by multiple
+ objects. When an existing owner wants to share a reference, it calls ref().
+ When an owner wants to release its reference, it calls unref(). When the
+ shared object's reference count goes to zero as the result of an unref()
+ call, its (virtual) destructor is called. It is an error for the
+ destructor to be called explicitly (or via the object going out of scope on
+ the stack or calling delete) if getRefCnt() > 1.
+*/
+class SK_API SkRefCntBase {
+public:
+ /** Default construct, initializing the reference count to 1.
+ */
+ SkRefCntBase() : fRefCnt(1) {}
+
+ /** Destruct, asserting that the reference count is 1.
+ */
+ virtual ~SkRefCntBase() {
+ #ifdef SK_DEBUG
+ SkASSERTF(this->getRefCnt() == 1, "fRefCnt was %d", this->getRefCnt());
+ // illegal value, to catch us if we reuse after delete
+ fRefCnt.store(0, std::memory_order_relaxed);
+ #endif
+ }
+
+ /** May return true if the caller is the only owner.
+ * Ensures that all previous owner's actions are complete.
+ */
+ bool unique() const {
+ if (1 == fRefCnt.load(std::memory_order_acquire)) {
+ // The acquire barrier is only really needed if we return true. It
+ // prevents code conditioned on the result of unique() from running
+ // until previous owners are all totally done calling unref().
+ return true;
+ }
+ return false;
+ }
+
+ /** Increment the reference count. Must be balanced by a call to unref().
+ */
+ void ref() const {
+ SkASSERT(this->getRefCnt() > 0);
+ // No barrier required.
+ (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
+ }
+
+ /** Decrement the reference count. If the reference count is 1 before the
+ decrement, then delete the object. Note that if this is the case, then
+ the object needs to have been allocated via new, and not on the stack.
+ */
+ void unref() const {
+ SkASSERT(this->getRefCnt() > 0);
+ // A release here acts in place of all releases we "should" have been doing in ref().
+ if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
+ // Like unique(), the acquire is only needed on success, to make sure
+ // code in internal_dispose() doesn't happen before the decrement.
+ this->internal_dispose();
+ }
+ }
+
+private:
+
+#ifdef SK_DEBUG
+ /** Return the reference count. Use only for debugging. */
+ int32_t getRefCnt() const {
+ return fRefCnt.load(std::memory_order_relaxed);
+ }
+#endif
+
+ /**
+ * Called when the ref count goes to 0.
+ */
+ virtual void internal_dispose() const {
+ #ifdef SK_DEBUG
+ SkASSERT(0 == this->getRefCnt());
+ fRefCnt.store(1, std::memory_order_relaxed);
+ #endif
+ delete this;
+ }
+
+ // The following friends are those which override internal_dispose()
+ // and conditionally call SkRefCnt::internal_dispose().
+ friend class SkWeakRefCnt;
+
+ mutable std::atomic<int32_t> fRefCnt;
+
+ SkRefCntBase(SkRefCntBase&&) = delete;
+ SkRefCntBase(const SkRefCntBase&) = delete;
+ SkRefCntBase& operator=(SkRefCntBase&&) = delete;
+ SkRefCntBase& operator=(const SkRefCntBase&) = delete;
+};
+
+#ifdef SK_REF_CNT_MIXIN_INCLUDE
+// It is the responsibility of the following include to define the type SkRefCnt.
+// This SkRefCnt should normally derive from SkRefCntBase.
+#include SK_REF_CNT_MIXIN_INCLUDE
+#else
+class SK_API SkRefCnt : public SkRefCntBase {
+ // "#include SK_REF_CNT_MIXIN_INCLUDE" doesn't work with this build system.
+ #if defined(SK_BUILD_FOR_GOOGLE3)
+ public:
+ void deref() const { this->unref(); }
+ #endif
+};
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** Call obj->ref() and return obj. The obj must not be nullptr.
+ */
+template <typename T> static inline T* SkRef(T* obj) {
+ SkASSERT(obj);
+ obj->ref();
+ return obj;
+}
+
+/** Check if the argument is non-null, and if so, call obj->ref() and return obj.
+ */
+template <typename T> static inline T* SkSafeRef(T* obj) {
+ if (obj) {
+ obj->ref();
+ }
+ return obj;
+}
+
+/** Check if the argument is non-null, and if so, call obj->unref()
+ */
+template <typename T> static inline void SkSafeUnref(T* obj) {
+ if (obj) {
+ obj->unref();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// This is a variant of SkRefCnt that's Not Virtual, so weighs 4 bytes instead of 8 or 16.
+// There's only benefit to using this if the deriving class does not otherwise need a vtable.
+template <typename Derived>
+class SkNVRefCnt {
+public:
+ SkNVRefCnt() : fRefCnt(1) {}
+ ~SkNVRefCnt() {
+ #ifdef SK_DEBUG
+ int rc = fRefCnt.load(std::memory_order_relaxed);
+ SkASSERTF(rc == 1, "NVRefCnt was %d", rc);
+ #endif
+ }
+
+ // Implementation is pretty much the same as SkRefCntBase. All required barriers are the same:
+ // - unique() needs acquire when it returns true, and no barrier if it returns false;
+ // - ref() doesn't need any barrier;
+ // - unref() needs a release barrier, and an acquire if it's going to call delete.
+
+ bool unique() const { return 1 == fRefCnt.load(std::memory_order_acquire); }
+ void ref() const { (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed); }
+ void unref() const {
+ if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
+ // restore the 1 for our destructor's assert
+ SkDEBUGCODE(fRefCnt.store(1, std::memory_order_relaxed));
+ delete (const Derived*)this;
+ }
+ }
+ void deref() const { this->unref(); }
+
+ // This must be used with caution. It is only valid to call this when 'threadIsolatedTestCnt'
+ // refs are known to be isolated to the current thread. That is, it is known that there are at
+ // least 'threadIsolatedTestCnt' refs for which no other thread may make a balancing unref()
+ // call. Assuming the contract is followed, if this returns false then no other thread has
+ // ownership of this. If it returns true then another thread *may* have ownership.
+ bool refCntGreaterThan(int32_t threadIsolatedTestCnt) const {
+ int cnt = fRefCnt.load(std::memory_order_acquire);
+ // If this fails then the above contract has been violated.
+ SkASSERT(cnt >= threadIsolatedTestCnt);
+ return cnt > threadIsolatedTestCnt;
+ }
+
+private:
+ mutable std::atomic<int32_t> fRefCnt;
+
+ SkNVRefCnt(SkNVRefCnt&&) = delete;
+ SkNVRefCnt(const SkNVRefCnt&) = delete;
+ SkNVRefCnt& operator=(SkNVRefCnt&&) = delete;
+ SkNVRefCnt& operator=(const SkNVRefCnt&) = delete;
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Shared pointer class to wrap classes that support a ref()/unref() interface.
+ *
+ * This can be used for classes inheriting from SkRefCnt, but it also works for other
+ * classes that match the interface, but have different internal choices: e.g. the hosted class
+ * may have its ref/unref be thread-safe, but that is not assumed/imposed by sk_sp.
+ *
+ * Declared with the trivial_abi attribute where supported so that sk_sp and types containing it
+ * may be considered as trivially relocatable by the compiler so that destroying-move operations
+ * i.e. move constructor followed by destructor can be optimized to memcpy.
+ */
+template <typename T> class SK_TRIVIAL_ABI sk_sp {
+public:
+ using element_type = T;
+
+ constexpr sk_sp() : fPtr(nullptr) {}
+ constexpr sk_sp(std::nullptr_t) : fPtr(nullptr) {}
+
+ /**
+ * Shares the underlying object by calling ref(), so that both the argument and the newly
+ * created sk_sp both have a reference to it.
+ */
+ sk_sp(const sk_sp<T>& that) : fPtr(SkSafeRef(that.get())) {}
+ template <typename U,
+ typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type>
+ sk_sp(const sk_sp<U>& that) : fPtr(SkSafeRef(that.get())) {}
+
+ /**
+ * Move the underlying object from the argument to the newly created sk_sp. Afterwards only
+ * the new sk_sp will have a reference to the object, and the argument will point to null.
+ * No call to ref() or unref() will be made.
+ */
+ sk_sp(sk_sp<T>&& that) : fPtr(that.release()) {}
+ template <typename U,
+ typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type>
+ sk_sp(sk_sp<U>&& that) : fPtr(that.release()) {}
+
+ /**
+ * Adopt the bare pointer into the newly created sk_sp.
+ * No call to ref() or unref() will be made.
+ */
+ explicit sk_sp(T* obj) : fPtr(obj) {}
+
+ /**
+ * Calls unref() on the underlying object pointer.
+ */
+ ~sk_sp() {
+ SkSafeUnref(fPtr);
+ SkDEBUGCODE(fPtr = nullptr);
+ }
+
+ sk_sp<T>& operator=(std::nullptr_t) { this->reset(); return *this; }
+
+ /**
+ * Shares the underlying object referenced by the argument by calling ref() on it. If this
+ * sk_sp previously had a reference to an object (i.e. not null) it will call unref() on that
+ * object.
+ */
+ sk_sp<T>& operator=(const sk_sp<T>& that) {
+ if (this != &that) {
+ this->reset(SkSafeRef(that.get()));
+ }
+ return *this;
+ }
+ template <typename U,
+ typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type>
+ sk_sp<T>& operator=(const sk_sp<U>& that) {
+ this->reset(SkSafeRef(that.get()));
+ return *this;
+ }
+
+ /**
+ * Move the underlying object from the argument to the sk_sp. If the sk_sp previously held
+ * a reference to another object, unref() will be called on that object. No call to ref()
+ * will be made.
+ */
+ sk_sp<T>& operator=(sk_sp<T>&& that) {
+ this->reset(that.release());
+ return *this;
+ }
+ template <typename U,
+ typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type>
+ sk_sp<T>& operator=(sk_sp<U>&& that) {
+ this->reset(that.release());
+ return *this;
+ }
+
+ T& operator*() const {
+ SkASSERT(this->get() != nullptr);
+ return *this->get();
+ }
+
+ explicit operator bool() const { return this->get() != nullptr; }
+
+ T* get() const { return fPtr; }
+ T* operator->() const { return fPtr; }
+
+ /**
+ * Adopt the new bare pointer, and call unref() on any previously held object (if not null).
+ * No call to ref() will be made.
+ */
+ void reset(T* ptr = nullptr) {
+ // Calling fPtr->unref() may call this->~() or this->reset(T*).
+ // http://wg21.cmeerw.net/lwg/issue998
+ // http://wg21.cmeerw.net/lwg/issue2262
+ T* oldPtr = fPtr;
+ fPtr = ptr;
+ SkSafeUnref(oldPtr);
+ }
+
+ /**
+ * Return the bare pointer, and set the internal object pointer to nullptr.
+ * The caller must assume ownership of the object, and manage its reference count directly.
+ * No call to unref() will be made.
+ */
+ T* SK_WARN_UNUSED_RESULT release() {
+ T* ptr = fPtr;
+ fPtr = nullptr;
+ return ptr;
+ }
+
+ void swap(sk_sp<T>& that) /*noexcept*/ {
+ using std::swap;
+ swap(fPtr, that.fPtr);
+ }
+
+ using sk_is_trivially_relocatable = std::true_type;
+
+private:
+ T* fPtr;
+};
+
+template <typename T> inline void swap(sk_sp<T>& a, sk_sp<T>& b) /*noexcept*/ {
+ a.swap(b);
+}
+
+template <typename T, typename U> inline bool operator==(const sk_sp<T>& a, const sk_sp<U>& b) {
+ return a.get() == b.get();
+}
+template <typename T> inline bool operator==(const sk_sp<T>& a, std::nullptr_t) /*noexcept*/ {
+ return !a;
+}
+template <typename T> inline bool operator==(std::nullptr_t, const sk_sp<T>& b) /*noexcept*/ {
+ return !b;
+}
+
+template <typename T, typename U> inline bool operator!=(const sk_sp<T>& a, const sk_sp<U>& b) {
+ return a.get() != b.get();
+}
+template <typename T> inline bool operator!=(const sk_sp<T>& a, std::nullptr_t) /*noexcept*/ {
+ return static_cast<bool>(a);
+}
+template <typename T> inline bool operator!=(std::nullptr_t, const sk_sp<T>& b) /*noexcept*/ {
+ return static_cast<bool>(b);
+}
+
+template <typename C, typename CT, typename T>
+auto operator<<(std::basic_ostream<C, CT>& os, const sk_sp<T>& sp) -> decltype(os << sp.get()) {
+ return os << sp.get();
+}
+
+template <typename T, typename... Args>
+sk_sp<T> sk_make_sp(Args&&... args) {
+ return sk_sp<T>(new T(std::forward<Args>(args)...));
+}
+
+/*
+ * Returns a sk_sp wrapping the provided ptr AND calls ref on it (if not null).
+ *
+ * This is different than the semantics of the constructor for sk_sp, which just wraps the ptr,
+ * effectively "adopting" it.
+ */
+template <typename T> sk_sp<T> sk_ref_sp(T* obj) {
+ return sk_sp<T>(SkSafeRef(obj));
+}
+
+template <typename T> sk_sp<T> sk_ref_sp(const T* obj) {
+ return sk_sp<T>(const_cast<T*>(SkSafeRef(obj)));
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkRegion.h b/gfx/skia/skia/include/core/SkRegion.h
new file mode 100644
index 0000000000..6f8aa25d54
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkRegion.h
@@ -0,0 +1,678 @@
+/*
+ * Copyright 2005 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRegion_DEFINED
+#define SkRegion_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/private/base/SkTypeTraits.h"
+
+class SkPath;
+class SkRgnBuilder;
+
+/** \class SkRegion
+ SkRegion describes the set of pixels used to clip SkCanvas. SkRegion is compact,
+ efficiently storing a single integer rectangle, or a run length encoded array
+ of rectangles. SkRegion may reduce the current SkCanvas clip, or may be drawn as
+ one or more integer rectangles. SkRegion iterator returns the scan lines or
+ rectangles contained by it, optionally intersecting a bounding rectangle.
+*/
+class SK_API SkRegion {
+ typedef int32_t RunType;
+public:
+
+ /** Constructs an empty SkRegion. SkRegion is set to empty bounds
+ at (0, 0) with zero width and height.
+
+ @return empty SkRegion
+
+ example: https://fiddle.skia.org/c/@Region_empty_constructor
+ */
+ SkRegion();
+
+ /** Constructs a copy of an existing region.
+ Copy constructor makes two regions identical by value. Internally, region and
+ the returned result share pointer values. The underlying SkRect array is
+ copied when modified.
+
+ Creating a SkRegion copy is very efficient and never allocates memory.
+ SkRegion are always copied by value from the interface; the underlying shared
+ pointers are not exposed.
+
+ @param region SkRegion to copy by value
+ @return copy of SkRegion
+
+ example: https://fiddle.skia.org/c/@Region_copy_const_SkRegion
+ */
+ SkRegion(const SkRegion& region);
+
+ /** Constructs a rectangular SkRegion matching the bounds of rect.
+
+ @param rect bounds of constructed SkRegion
+ @return rectangular SkRegion
+
+ example: https://fiddle.skia.org/c/@Region_copy_const_SkIRect
+ */
+ explicit SkRegion(const SkIRect& rect);
+
+ /** Releases ownership of any shared data and deletes data if SkRegion is sole owner.
+
+ example: https://fiddle.skia.org/c/@Region_destructor
+ */
+ ~SkRegion();
+
+ /** Constructs a copy of an existing region.
+ Makes two regions identical by value. Internally, region and
+ the returned result share pointer values. The underlying SkRect array is
+ copied when modified.
+
+ Creating a SkRegion copy is very efficient and never allocates memory.
+ SkRegion are always copied by value from the interface; the underlying shared
+ pointers are not exposed.
+
+ @param region SkRegion to copy by value
+ @return SkRegion to copy by value
+
+ example: https://fiddle.skia.org/c/@Region_copy_operator
+ */
+ SkRegion& operator=(const SkRegion& region);
+
+ /** Compares SkRegion and other; returns true if they enclose exactly
+ the same area.
+
+ @param other SkRegion to compare
+ @return true if SkRegion pair are equivalent
+
+ example: https://fiddle.skia.org/c/@Region_equal1_operator
+ */
+ bool operator==(const SkRegion& other) const;
+
+ /** Compares SkRegion and other; returns true if they do not enclose the same area.
+
+ @param other SkRegion to compare
+ @return true if SkRegion pair are not equivalent
+ */
+ bool operator!=(const SkRegion& other) const {
+ return !(*this == other);
+ }
+
+ /** Sets SkRegion to src, and returns true if src bounds is not empty.
+ This makes SkRegion and src identical by value. Internally,
+ SkRegion and src share pointer values. The underlying SkRect array is
+ copied when modified.
+
+ Creating a SkRegion copy is very efficient and never allocates memory.
+ SkRegion are always copied by value from the interface; the underlying shared
+ pointers are not exposed.
+
+ @param src SkRegion to copy
+ @return copy of src
+ */
+ bool set(const SkRegion& src) {
+ *this = src;
+ return !this->isEmpty();
+ }
+
+ /** Exchanges SkIRect array of SkRegion and other. swap() internally exchanges pointers,
+ so it is lightweight and does not allocate memory.
+
+ swap() usage has largely been replaced by operator=(const SkRegion& region).
+ SkPath do not copy their content on assignment until they are written to,
+ making assignment as efficient as swap().
+
+ @param other operator=(const SkRegion& region) set
+
+ example: https://fiddle.skia.org/c/@Region_swap
+ */
+ void swap(SkRegion& other);
+
+ /** Returns true if SkRegion is empty.
+ Empty SkRegion has bounds width or height less than or equal to zero.
+ SkRegion() constructs empty SkRegion; setEmpty()
+ and setRect() with dimensionless data make SkRegion empty.
+
+ @return true if bounds has no width or height
+ */
+ bool isEmpty() const { return fRunHead == emptyRunHeadPtr(); }
+
+ /** Returns true if SkRegion is one SkIRect with positive dimensions.
+
+ @return true if SkRegion contains one SkIRect
+ */
+ bool isRect() const { return fRunHead == kRectRunHeadPtr; }
+
+ /** Returns true if SkRegion is described by more than one rectangle.
+
+ @return true if SkRegion contains more than one SkIRect
+ */
+ bool isComplex() const { return !this->isEmpty() && !this->isRect(); }
+
+ /** Returns minimum and maximum axes values of SkIRect array.
+ Returns (0, 0, 0, 0) if SkRegion is empty.
+
+ @return combined bounds of all SkIRect elements
+ */
+ const SkIRect& getBounds() const { return fBounds; }
+
+ /** Returns a value that increases with the number of
+ elements in SkRegion. Returns zero if SkRegion is empty.
+ Returns one if SkRegion equals SkIRect; otherwise, returns
+ value greater than one indicating that SkRegion is complex.
+
+ Call to compare SkRegion for relative complexity.
+
+ @return relative complexity
+
+ example: https://fiddle.skia.org/c/@Region_computeRegionComplexity
+ */
+ int computeRegionComplexity() const;
+
+ /** Appends outline of SkRegion to path.
+ Returns true if SkRegion is not empty; otherwise, returns false, and leaves path
+ unmodified.
+
+ @param path SkPath to append to
+ @return true if path changed
+
+ example: https://fiddle.skia.org/c/@Region_getBoundaryPath
+ */
+ bool getBoundaryPath(SkPath* path) const;
+
+ /** Constructs an empty SkRegion. SkRegion is set to empty bounds
+ at (0, 0) with zero width and height. Always returns false.
+
+ @return false
+
+ example: https://fiddle.skia.org/c/@Region_setEmpty
+ */
+ bool setEmpty();
+
+ /** Constructs a rectangular SkRegion matching the bounds of rect.
+ If rect is empty, constructs empty and returns false.
+
+ @param rect bounds of constructed SkRegion
+ @return true if rect is not empty
+
+ example: https://fiddle.skia.org/c/@Region_setRect
+ */
+ bool setRect(const SkIRect& rect);
+
+ /** Constructs SkRegion as the union of SkIRect in rects array. If count is
+ zero, constructs empty SkRegion. Returns false if constructed SkRegion is empty.
+
+ May be faster than repeated calls to op().
+
+ @param rects array of SkIRect
+ @param count array size
+ @return true if constructed SkRegion is not empty
+
+ example: https://fiddle.skia.org/c/@Region_setRects
+ */
+ bool setRects(const SkIRect rects[], int count);
+
+ /** Constructs a copy of an existing region.
+ Makes two regions identical by value. Internally, region and
+ the returned result share pointer values. The underlying SkRect array is
+ copied when modified.
+
+ Creating a SkRegion copy is very efficient and never allocates memory.
+ SkRegion are always copied by value from the interface; the underlying shared
+ pointers are not exposed.
+
+ @param region SkRegion to copy by value
+ @return SkRegion to copy by value
+
+ example: https://fiddle.skia.org/c/@Region_setRegion
+ */
+ bool setRegion(const SkRegion& region);
+
+ /** Constructs SkRegion to match outline of path within clip.
+ Returns false if constructed SkRegion is empty.
+
+ Constructed SkRegion draws the same pixels as path through clip when
+ anti-aliasing is disabled.
+
+ @param path SkPath providing outline
+ @param clip SkRegion containing path
+ @return true if constructed SkRegion is not empty
+
+ example: https://fiddle.skia.org/c/@Region_setPath
+ */
+ bool setPath(const SkPath& path, const SkRegion& clip);
+
+ /** Returns true if SkRegion intersects rect.
+ Returns false if either rect or SkRegion is empty, or do not intersect.
+
+ @param rect SkIRect to intersect
+ @return true if rect and SkRegion have area in common
+
+ example: https://fiddle.skia.org/c/@Region_intersects
+ */
+ bool intersects(const SkIRect& rect) const;
+
+ /** Returns true if SkRegion intersects other.
+ Returns false if either other or SkRegion is empty, or do not intersect.
+
+ @param other SkRegion to intersect
+ @return true if other and SkRegion have area in common
+
+ example: https://fiddle.skia.org/c/@Region_intersects_2
+ */
+ bool intersects(const SkRegion& other) const;
+
+ /** Returns true if SkIPoint (x, y) is inside SkRegion.
+ Returns false if SkRegion is empty.
+
+ @param x test SkIPoint x-coordinate
+ @param y test SkIPoint y-coordinate
+ @return true if (x, y) is inside SkRegion
+
+ example: https://fiddle.skia.org/c/@Region_contains
+ */
+ bool contains(int32_t x, int32_t y) const;
+
+ /** Returns true if other is completely inside SkRegion.
+ Returns false if SkRegion or other is empty.
+
+ @param other SkIRect to contain
+ @return true if other is inside SkRegion
+
+ example: https://fiddle.skia.org/c/@Region_contains_2
+ */
+ bool contains(const SkIRect& other) const;
+
+ /** Returns true if other is completely inside SkRegion.
+ Returns false if SkRegion or other is empty.
+
+ @param other SkRegion to contain
+ @return true if other is inside SkRegion
+
+ example: https://fiddle.skia.org/c/@Region_contains_3
+ */
+ bool contains(const SkRegion& other) const;
+
+ /** Returns true if SkRegion is a single rectangle and contains r.
+ May return false even though SkRegion contains r.
+
+ @param r SkIRect to contain
+ @return true quickly if r points are equal or inside
+ */
+ bool quickContains(const SkIRect& r) const {
+ SkASSERT(this->isEmpty() == fBounds.isEmpty()); // valid region
+
+ return r.fLeft < r.fRight && r.fTop < r.fBottom &&
+ fRunHead == kRectRunHeadPtr && // this->isRect()
+ /* fBounds.contains(left, top, right, bottom); */
+ fBounds.fLeft <= r.fLeft && fBounds.fTop <= r.fTop &&
+ fBounds.fRight >= r.fRight && fBounds.fBottom >= r.fBottom;
+ }
+
+ /** Returns true if SkRegion does not intersect rect.
+ Returns true if rect is empty or SkRegion is empty.
+ May return false even though SkRegion does not intersect rect.
+
+ @param rect SkIRect to intersect
+ @return true if rect does not intersect
+ */
+ bool quickReject(const SkIRect& rect) const {
+ return this->isEmpty() || rect.isEmpty() ||
+ !SkIRect::Intersects(fBounds, rect);
+ }
+
+ /** Returns true if SkRegion does not intersect rgn.
+ Returns true if rgn is empty or SkRegion is empty.
+ May return false even though SkRegion does not intersect rgn.
+
+ @param rgn SkRegion to intersect
+ @return true if rgn does not intersect
+ */
+ bool quickReject(const SkRegion& rgn) const {
+ return this->isEmpty() || rgn.isEmpty() ||
+ !SkIRect::Intersects(fBounds, rgn.fBounds);
+ }
+
+ /** Offsets SkRegion by ivector (dx, dy). Has no effect if SkRegion is empty.
+
+ @param dx x-axis offset
+ @param dy y-axis offset
+ */
+ void translate(int dx, int dy) { this->translate(dx, dy, this); }
+
+ /** Offsets SkRegion by ivector (dx, dy), writing result to dst. SkRegion may be passed
+ as dst parameter, translating SkRegion in place. Has no effect if dst is nullptr.
+ If SkRegion is empty, sets dst to empty.
+
+ @param dx x-axis offset
+ @param dy y-axis offset
+ @param dst translated result
+
+ example: https://fiddle.skia.org/c/@Region_translate_2
+ */
+ void translate(int dx, int dy, SkRegion* dst) const;
+
+ /** \enum SkRegion::Op
+ The logical operations that can be performed when combining two SkRegion.
+ */
+ enum Op {
+ kDifference_Op, //!< target minus operand
+ kIntersect_Op, //!< target intersected with operand
+ kUnion_Op, //!< target unioned with operand
+ kXOR_Op, //!< target exclusive or with operand
+ kReverseDifference_Op, //!< operand minus target
+ kReplace_Op, //!< replace target with operand
+ kLastOp = kReplace_Op, //!< last operator
+ };
+
+ static const int kOpCnt = kLastOp + 1;
+
+ /** Replaces SkRegion with the result of SkRegion op rect.
+ Returns true if replaced SkRegion is not empty.
+
+ @param rect SkIRect operand
+ @return false if result is empty
+ */
+ bool op(const SkIRect& rect, Op op) {
+ if (this->isRect() && kIntersect_Op == op) {
+ if (!fBounds.intersect(rect)) {
+ return this->setEmpty();
+ }
+ return true;
+ }
+ return this->op(*this, rect, op);
+ }
+
+ /** Replaces SkRegion with the result of SkRegion op rgn.
+ Returns true if replaced SkRegion is not empty.
+
+ @param rgn SkRegion operand
+ @return false if result is empty
+ */
+ bool op(const SkRegion& rgn, Op op) { return this->op(*this, rgn, op); }
+
+ /** Replaces SkRegion with the result of rect op rgn.
+ Returns true if replaced SkRegion is not empty.
+
+ @param rect SkIRect operand
+ @param rgn SkRegion operand
+ @return false if result is empty
+
+ example: https://fiddle.skia.org/c/@Region_op_4
+ */
+ bool op(const SkIRect& rect, const SkRegion& rgn, Op op);
+
+ /** Replaces SkRegion with the result of rgn op rect.
+ Returns true if replaced SkRegion is not empty.
+
+ @param rgn SkRegion operand
+ @param rect SkIRect operand
+ @return false if result is empty
+
+ example: https://fiddle.skia.org/c/@Region_op_5
+ */
+ bool op(const SkRegion& rgn, const SkIRect& rect, Op op);
+
+ /** Replaces SkRegion with the result of rgna op rgnb.
+ Returns true if replaced SkRegion is not empty.
+
+ @param rgna SkRegion operand
+ @param rgnb SkRegion operand
+ @return false if result is empty
+
+ example: https://fiddle.skia.org/c/@Region_op_6
+ */
+ bool op(const SkRegion& rgna, const SkRegion& rgnb, Op op);
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ /** Private. Android framework only.
+
+ @return string representation of SkRegion
+ */
+ char* toString();
+#endif
+
+ /** \class SkRegion::Iterator
+ Returns sequence of rectangles, sorted along y-axis, then x-axis, that make
+ up SkRegion.
+ */
+ class SK_API Iterator {
+ public:
+
+ /** Initializes SkRegion::Iterator with an empty SkRegion. done() on SkRegion::Iterator
+ returns true.
+ Call reset() to initialized SkRegion::Iterator at a later time.
+
+ @return empty SkRegion iterator
+ */
+ Iterator() : fRgn(nullptr), fDone(true) {}
+
+ /** Sets SkRegion::Iterator to return elements of SkIRect array in region.
+
+ @param region SkRegion to iterate
+ @return SkRegion iterator
+
+ example: https://fiddle.skia.org/c/@Region_Iterator_copy_const_SkRegion
+ */
+ Iterator(const SkRegion& region);
+
+ /** SkPoint SkRegion::Iterator to start of SkRegion.
+ Returns true if SkRegion was set; otherwise, returns false.
+
+ @return true if SkRegion was set
+
+ example: https://fiddle.skia.org/c/@Region_Iterator_rewind
+ */
+ bool rewind();
+
+ /** Resets iterator, using the new SkRegion.
+
+ @param region SkRegion to iterate
+
+ example: https://fiddle.skia.org/c/@Region_Iterator_reset
+ */
+ void reset(const SkRegion& region);
+
+ /** Returns true if SkRegion::Iterator is pointing to final SkIRect in SkRegion.
+
+ @return true if data parsing is complete
+ */
+ bool done() const { return fDone; }
+
+ /** Advances SkRegion::Iterator to next SkIRect in SkRegion if it is not done.
+
+ example: https://fiddle.skia.org/c/@Region_Iterator_next
+ */
+ void next();
+
+ /** Returns SkIRect element in SkRegion. Does not return predictable results if SkRegion
+ is empty.
+
+ @return part of SkRegion as SkIRect
+ */
+ const SkIRect& rect() const { return fRect; }
+
+ /** Returns SkRegion if set; otherwise, returns nullptr.
+
+ @return iterated SkRegion
+ */
+ const SkRegion* rgn() const { return fRgn; }
+
+ private:
+ const SkRegion* fRgn;
+ const SkRegion::RunType* fRuns;
+ SkIRect fRect = {0, 0, 0, 0};
+ bool fDone;
+ };
+
+ /** \class SkRegion::Cliperator
+ Returns the sequence of rectangles, sorted along y-axis, then x-axis, that make
+ up SkRegion intersected with the specified clip rectangle.
+ */
+ class SK_API Cliperator {
+ public:
+
+ /** Sets SkRegion::Cliperator to return elements of SkIRect array in SkRegion within clip.
+
+ @param region SkRegion to iterate
+ @param clip bounds of iteration
+ @return SkRegion iterator
+
+ example: https://fiddle.skia.org/c/@Region_Cliperator_const_SkRegion_const_SkIRect
+ */
+ Cliperator(const SkRegion& region, const SkIRect& clip);
+
+ /** Returns true if SkRegion::Cliperator is pointing to final SkIRect in SkRegion.
+
+ @return true if data parsing is complete
+ */
+ bool done() { return fDone; }
+
+ /** Advances iterator to next SkIRect in SkRegion contained by clip.
+
+ example: https://fiddle.skia.org/c/@Region_Cliperator_next
+ */
+ void next();
+
+ /** Returns SkIRect element in SkRegion, intersected with clip passed to
+ SkRegion::Cliperator constructor. Does not return predictable results if SkRegion
+ is empty.
+
+ @return part of SkRegion inside clip as SkIRect
+ */
+ const SkIRect& rect() const { return fRect; }
+
+ private:
+ Iterator fIter;
+ SkIRect fClip;
+ SkIRect fRect = {0, 0, 0, 0};
+ bool fDone;
+ };
+
+ /** \class SkRegion::Spanerator
+ Returns the line segment ends within SkRegion that intersect a horizontal line.
+ */
+ class Spanerator {
+ public:
+
+ /** Sets SkRegion::Spanerator to return line segments in SkRegion on scan line.
+
+ @param region SkRegion to iterate
+ @param y horizontal line to intersect
+ @param left bounds of iteration
+ @param right bounds of iteration
+ @return SkRegion iterator
+
+ example: https://fiddle.skia.org/c/@Region_Spanerator_const_SkRegion_int_int_int
+ */
+ Spanerator(const SkRegion& region, int y, int left, int right);
+
+ /** Advances iterator to next span intersecting SkRegion within line segment provided
+ in constructor. Returns true if interval was found.
+
+ @param left pointer to span start; may be nullptr
+ @param right pointer to span end; may be nullptr
+ @return true if interval was found
+
+ example: https://fiddle.skia.org/c/@Region_Spanerator_next
+ */
+ bool next(int* left, int* right);
+
+ private:
+ const SkRegion::RunType* fRuns;
+ int fLeft, fRight;
+ bool fDone;
+ };
+
+ /** Writes SkRegion to buffer, and returns number of bytes written.
+ If buffer is nullptr, returns number number of bytes that would be written.
+
+ @param buffer storage for binary data
+ @return size of SkRegion
+
+ example: https://fiddle.skia.org/c/@Region_writeToMemory
+ */
+ size_t writeToMemory(void* buffer) const;
+
+ /** Constructs SkRegion from buffer of size length. Returns bytes read.
+ Returned value will be multiple of four or zero if length was too small.
+
+ @param buffer storage for binary data
+ @param length size of buffer
+ @return bytes read
+
+ example: https://fiddle.skia.org/c/@Region_readFromMemory
+ */
+ size_t readFromMemory(const void* buffer, size_t length);
+
+ using sk_is_trivially_relocatable = std::true_type;
+
+private:
+ static constexpr int kOpCount = kReplace_Op + 1;
+
+ // T
+ // [B N L R S]
+ // S
+ static constexpr int kRectRegionRuns = 7;
+
+ struct RunHead;
+
+ static RunHead* emptyRunHeadPtr() { return (SkRegion::RunHead*) -1; }
+ static constexpr RunHead* kRectRunHeadPtr = nullptr;
+
+ // allocate space for count runs
+ void allocateRuns(int count);
+ void allocateRuns(int count, int ySpanCount, int intervalCount);
+ void allocateRuns(const RunHead& src);
+
+ SkDEBUGCODE(void dump() const;)
+
+ SkIRect fBounds;
+ RunHead* fRunHead;
+
+ static_assert(::sk_is_trivially_relocatable<decltype(fBounds)>::value);
+ static_assert(::sk_is_trivially_relocatable<decltype(fRunHead)>::value);
+
+ void freeRuns();
+
+ /**
+ * Return the runs from this region, consing up fake runs if the region
+ * is empty or a rect. In those 2 cases, we use tmpStorage to hold the
+ * run data.
+ */
+ const RunType* getRuns(RunType tmpStorage[], int* intervals) const;
+
+ // This is called with runs[] that do not yet have their interval-count
+ // field set on each scanline. That is computed as part of this call
+ // (inside ComputeRunBounds).
+ bool setRuns(RunType runs[], int count);
+
+ int count_runtype_values(int* itop, int* ibot) const;
+
+ bool isValid() const;
+
+ static void BuildRectRuns(const SkIRect& bounds,
+ RunType runs[kRectRegionRuns]);
+
+ // If the runs define a simple rect, return true and set bounds to that
+ // rect. If not, return false and ignore bounds.
+ static bool RunsAreARect(const SkRegion::RunType runs[], int count,
+ SkIRect* bounds);
+
+ /**
+ * If the last arg is null, just return if the result is non-empty,
+ * else store the result in the last arg.
+ */
+ static bool Oper(const SkRegion&, const SkRegion&, SkRegion::Op, SkRegion*);
+
+ friend struct RunHead;
+ friend class Iterator;
+ friend class Spanerator;
+ friend class SkRegionPriv;
+ friend class SkRgnBuilder;
+ friend class SkFlatRegion;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkSamplingOptions.h b/gfx/skia/skia/include/core/SkSamplingOptions.h
new file mode 100644
index 0000000000..24b6d51659
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkSamplingOptions.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageSampling_DEFINED
+#define SkImageSampling_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#include <algorithm>
+#include <new>
+
+enum class SkFilterMode {
+ kNearest, // single sample point (nearest neighbor)
+ kLinear, // interporate between 2x2 sample points (bilinear interpolation)
+
+ kLast = kLinear,
+};
+static constexpr int kSkFilterModeCount = static_cast<int>(SkFilterMode::kLast) + 1;
+
+enum class SkMipmapMode {
+ kNone, // ignore mipmap levels, sample from the "base"
+ kNearest, // sample from the nearest level
+ kLinear, // interpolate between the two nearest levels
+
+ kLast = kLinear,
+};
+static constexpr int kSkMipmapModeCount = static_cast<int>(SkMipmapMode::kLast) + 1;
+
+/*
+ * Specify B and C (each between 0...1) to create a shader that applies the corresponding
+ * cubic reconstruction filter to the image.
+ *
+ * Example values:
+ * B = 1/3, C = 1/3 "Mitchell" filter
+ * B = 0, C = 1/2 "Catmull-Rom" filter
+ *
+ * See "Reconstruction Filters in Computer Graphics"
+ * Don P. Mitchell
+ * Arun N. Netravali
+ * 1988
+ * https://www.cs.utexas.edu/~fussell/courses/cs384g-fall2013/lectures/mitchell/Mitchell.pdf
+ *
+ * Desmos worksheet https://www.desmos.com/calculator/aghdpicrvr
+ * Nice overview https://entropymine.com/imageworsener/bicubic/
+ */
+struct SkCubicResampler {
+ float B, C;
+
+ // Historic default for kHigh_SkFilterQuality
+ static constexpr SkCubicResampler Mitchell() { return {1/3.0f, 1/3.0f}; }
+ static constexpr SkCubicResampler CatmullRom() { return {0.0f, 1/2.0f}; }
+};
+
+struct SK_API SkSamplingOptions {
+ const int maxAniso = 0;
+ const bool useCubic = false;
+ const SkCubicResampler cubic = {0, 0};
+ const SkFilterMode filter = SkFilterMode::kNearest;
+ const SkMipmapMode mipmap = SkMipmapMode::kNone;
+
+ constexpr SkSamplingOptions() = default;
+ SkSamplingOptions(const SkSamplingOptions&) = default;
+ SkSamplingOptions& operator=(const SkSamplingOptions& that) {
+ this->~SkSamplingOptions(); // A pedantic no-op.
+ new (this) SkSamplingOptions(that);
+ return *this;
+ }
+
+ constexpr SkSamplingOptions(SkFilterMode fm, SkMipmapMode mm)
+ : filter(fm)
+ , mipmap(mm) {}
+
+ explicit constexpr SkSamplingOptions(SkFilterMode fm)
+ : filter(fm)
+ , mipmap(SkMipmapMode::kNone) {}
+
+ explicit constexpr SkSamplingOptions(const SkCubicResampler& c)
+ : useCubic(true)
+ , cubic(c) {}
+
+ static constexpr SkSamplingOptions Aniso(int maxAniso) {
+ return SkSamplingOptions{std::max(maxAniso, 1)};
+ }
+
+ bool operator==(const SkSamplingOptions& other) const {
+ return maxAniso == other.maxAniso
+ && useCubic == other.useCubic
+ && cubic.B == other.cubic.B
+ && cubic.C == other.cubic.C
+ && filter == other.filter
+ && mipmap == other.mipmap;
+ }
+ bool operator!=(const SkSamplingOptions& other) const { return !(*this == other); }
+
+ bool isAniso() const { return maxAniso != 0; }
+
+private:
+ constexpr SkSamplingOptions(int maxAniso) : maxAniso(maxAniso) {}
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkScalar.h b/gfx/skia/skia/include/core/SkScalar.h
new file mode 100644
index 0000000000..f3e11b34c2
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkScalar.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkScalar_DEFINED
+#define SkScalar_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkFloatingPoint.h"
+
+typedef float SkScalar;
+
+#define SK_Scalar1 1.0f
+#define SK_ScalarHalf 0.5f
+#define SK_ScalarSqrt2 SK_FloatSqrt2
+#define SK_ScalarPI SK_FloatPI
+#define SK_ScalarTanPIOver8 0.414213562f
+#define SK_ScalarRoot2Over2 0.707106781f
+#define SK_ScalarMax 3.402823466e+38f
+#define SK_ScalarMin (-SK_ScalarMax)
+#define SK_ScalarInfinity SK_FloatInfinity
+#define SK_ScalarNegativeInfinity SK_FloatNegativeInfinity
+#define SK_ScalarNaN SK_FloatNaN
+
+#define SkScalarFloorToScalar(x) sk_float_floor(x)
+#define SkScalarCeilToScalar(x) sk_float_ceil(x)
+#define SkScalarRoundToScalar(x) sk_float_round(x)
+#define SkScalarTruncToScalar(x) sk_float_trunc(x)
+
+#define SkScalarFloorToInt(x) sk_float_floor2int(x)
+#define SkScalarCeilToInt(x) sk_float_ceil2int(x)
+#define SkScalarRoundToInt(x) sk_float_round2int(x)
+
+#define SkScalarAbs(x) sk_float_abs(x)
+#define SkScalarCopySign(x, y) sk_float_copysign(x, y)
+#define SkScalarMod(x, y) sk_float_mod(x,y)
+#define SkScalarSqrt(x) sk_float_sqrt(x)
+#define SkScalarPow(b, e) sk_float_pow(b, e)
+
+#define SkScalarSin(radians) (float)sk_float_sin(radians)
+#define SkScalarCos(radians) (float)sk_float_cos(radians)
+#define SkScalarTan(radians) (float)sk_float_tan(radians)
+#define SkScalarASin(val) (float)sk_float_asin(val)
+#define SkScalarACos(val) (float)sk_float_acos(val)
+#define SkScalarATan2(y, x) (float)sk_float_atan2(y,x)
+#define SkScalarExp(x) (float)sk_float_exp(x)
+#define SkScalarLog(x) (float)sk_float_log(x)
+#define SkScalarLog2(x) (float)sk_float_log2(x)
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+#define SkIntToScalar(x) static_cast<SkScalar>(x)
+#define SkIntToFloat(x) static_cast<float>(x)
+#define SkScalarTruncToInt(x) sk_float_saturate2int(x)
+
+#define SkScalarToFloat(x) static_cast<float>(x)
+#define SkFloatToScalar(x) static_cast<SkScalar>(x)
+#define SkScalarToDouble(x) static_cast<double>(x)
+#define SkDoubleToScalar(x) sk_double_to_float(x)
+
+static inline bool SkScalarIsNaN(SkScalar x) { return x != x; }
+
+/** Returns true if x is not NaN and not infinite
+ */
+static inline bool SkScalarIsFinite(SkScalar x) { return sk_float_isfinite(x); }
+
+static inline bool SkScalarsAreFinite(SkScalar a, SkScalar b) {
+ return sk_floats_are_finite(a, b);
+}
+
+static inline bool SkScalarsAreFinite(const SkScalar array[], int count) {
+ return sk_floats_are_finite(array, count);
+}
+
+/** Returns the fractional part of the scalar. */
+static inline SkScalar SkScalarFraction(SkScalar x) {
+ return x - SkScalarTruncToScalar(x);
+}
+
+static inline SkScalar SkScalarSquare(SkScalar x) { return x * x; }
+
+#define SkScalarInvert(x) sk_ieee_float_divide_TODO_IS_DIVIDE_BY_ZERO_SAFE_HERE(SK_Scalar1, (x))
+#define SkScalarAve(a, b) (((a) + (b)) * SK_ScalarHalf)
+#define SkScalarHalf(a) ((a) * SK_ScalarHalf)
+
+#define SkDegreesToRadians(degrees) ((degrees) * (SK_ScalarPI / 180))
+#define SkRadiansToDegrees(radians) ((radians) * (180 / SK_ScalarPI))
+
+static inline bool SkScalarIsInt(SkScalar x) {
+ return x == SkScalarFloorToScalar(x);
+}
+
+/**
+ * Returns -1 || 0 || 1 depending on the sign of value:
+ * -1 if x < 0
+ * 0 if x == 0
+ * 1 if x > 0
+ */
+static inline int SkScalarSignAsInt(SkScalar x) {
+ return x < 0 ? -1 : (x > 0);
+}
+
+// Scalar result version of above
+static inline SkScalar SkScalarSignAsScalar(SkScalar x) {
+ return x < 0 ? -SK_Scalar1 : ((x > 0) ? SK_Scalar1 : 0);
+}
+
+#define SK_ScalarNearlyZero (SK_Scalar1 / (1 << 12))
+
+static inline bool SkScalarNearlyZero(SkScalar x,
+ SkScalar tolerance = SK_ScalarNearlyZero) {
+ SkASSERT(tolerance >= 0);
+ return SkScalarAbs(x) <= tolerance;
+}
+
+static inline bool SkScalarNearlyEqual(SkScalar x, SkScalar y,
+ SkScalar tolerance = SK_ScalarNearlyZero) {
+ SkASSERT(tolerance >= 0);
+ return SkScalarAbs(x-y) <= tolerance;
+}
+
+#define SK_ScalarSinCosNearlyZero (SK_Scalar1 / (1 << 16))
+
+static inline float SkScalarSinSnapToZero(SkScalar radians) {
+ float v = SkScalarSin(radians);
+ return SkScalarNearlyZero(v, SK_ScalarSinCosNearlyZero) ? 0.0f : v;
+}
+
+static inline float SkScalarCosSnapToZero(SkScalar radians) {
+ float v = SkScalarCos(radians);
+ return SkScalarNearlyZero(v, SK_ScalarSinCosNearlyZero) ? 0.0f : v;
+}
+
+/** Linearly interpolate between A and B, based on t.
+ If t is 0, return A
+ If t is 1, return B
+ else interpolate.
+ t must be [0..SK_Scalar1]
+*/
+static inline SkScalar SkScalarInterp(SkScalar A, SkScalar B, SkScalar t) {
+ SkASSERT(t >= 0 && t <= SK_Scalar1);
+ return A + (B - A) * t;
+}
+
+/** Interpolate along the function described by (keys[length], values[length])
+ for the passed searchKey. SearchKeys outside the range keys[0]-keys[Length]
+ clamp to the min or max value. This function assumes the number of pairs
+ (length) will be small and a linear search is used.
+
+ Repeated keys are allowed for discontinuous functions (so long as keys is
+ monotonically increasing). If key is the value of a repeated scalar in
+ keys the first one will be used.
+*/
+SkScalar SkScalarInterpFunc(SkScalar searchKey, const SkScalar keys[],
+ const SkScalar values[], int length);
+
+/*
+ * Helper to compare an array of scalars.
+ */
+static inline bool SkScalarsEqual(const SkScalar a[], const SkScalar b[], int n) {
+ SkASSERT(n >= 0);
+ for (int i = 0; i < n; ++i) {
+ if (a[i] != b[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkSerialProcs.h b/gfx/skia/skia/include/core/SkSerialProcs.h
new file mode 100644
index 0000000000..87e10d847c
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkSerialProcs.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSerialProcs_DEFINED
+#define SkSerialProcs_DEFINED
+
+#include "include/core/SkImage.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkTypeface.h"
+
+/**
+ * A serial-proc is asked to serialize the specified object (e.g. picture or image).
+ * If a data object is returned, it will be used (even if it is zero-length).
+ * If null is returned, then Skia will take its default action.
+ *
+ * The default action for pictures is to use Skia's internal format.
+ * The default action for images is to encode either in its native format or PNG.
+ * The default action for typefaces is to use Skia's internal format.
+ */
+
+typedef sk_sp<SkData> (*SkSerialPictureProc)(SkPicture*, void* ctx);
+typedef sk_sp<SkData> (*SkSerialImageProc)(SkImage*, void* ctx);
+typedef sk_sp<SkData> (*SkSerialTypefaceProc)(SkTypeface*, void* ctx);
+
+/**
+ * Called with the encoded form of a picture (previously written with a custom
+ * SkSerialPictureProc proc). Return a picture object, or nullptr indicating failure.
+ */
+typedef sk_sp<SkPicture> (*SkDeserialPictureProc)(const void* data, size_t length, void* ctx);
+
+/**
+ * Called with the encoded from of an image. The proc can return an image object, or if it
+ * returns nullptr, then Skia will take its default action to try to create an image from the data.
+ *
+ * Note that unlike SkDeserialPictureProc and SkDeserialTypefaceProc, return nullptr from this
+ * does not indicate failure, but is a signal for Skia to take its default action.
+ */
+typedef sk_sp<SkImage> (*SkDeserialImageProc)(const void* data, size_t length, void* ctx);
+
+/**
+ * Called with the encoded form of a typeface (previously written with a custom
+ * SkSerialTypefaceProc proc). Return a typeface object, or nullptr indicating failure.
+ */
+typedef sk_sp<SkTypeface> (*SkDeserialTypefaceProc)(const void* data, size_t length, void* ctx);
+
+struct SK_API SkSerialProcs {
+ SkSerialPictureProc fPictureProc = nullptr;
+ void* fPictureCtx = nullptr;
+
+ SkSerialImageProc fImageProc = nullptr;
+ void* fImageCtx = nullptr;
+
+ SkSerialTypefaceProc fTypefaceProc = nullptr;
+ void* fTypefaceCtx = nullptr;
+};
+
+struct SK_API SkDeserialProcs {
+ SkDeserialPictureProc fPictureProc = nullptr;
+ void* fPictureCtx = nullptr;
+
+ SkDeserialImageProc fImageProc = nullptr;
+ void* fImageCtx = nullptr;
+
+ SkDeserialTypefaceProc fTypefaceProc = nullptr;
+ void* fTypefaceCtx = nullptr;
+};
+
+#endif
+
diff --git a/gfx/skia/skia/include/core/SkShader.h b/gfx/skia/skia/include/core/SkShader.h
new file mode 100644
index 0000000000..be42a87b9a
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkShader.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkShader_DEFINED
+#define SkShader_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkTileMode.h"
+
+class SkArenaAlloc;
+class SkBitmap;
+class SkBlender;
+class SkColorFilter;
+class SkColorSpace;
+class SkImage;
+class SkPath;
+class SkPicture;
+class SkRasterPipeline;
+class GrFragmentProcessor;
+
+/** \class SkShader
+ *
+ * Shaders specify the source color(s) for what is being drawn. If a paint
+ * has no shader, then the paint's color is used. If the paint has a
+ * shader, then the shader's color(s) are use instead, but they are
+ * modulated by the paint's alpha. This makes it easy to create a shader
+ * once (e.g. bitmap tiling or gradient) and then change its transparency
+ * w/o having to modify the original shader... only the paint's alpha needs
+ * to be modified.
+ */
+class SK_API SkShader : public SkFlattenable {
+public:
+ /**
+ * Returns true if the shader is guaranteed to produce only opaque
+ * colors, subject to the SkPaint using the shader to apply an opaque
+ * alpha value. Subclasses should override this to allow some
+ * optimizations.
+ */
+ virtual bool isOpaque() const { return false; }
+
+ /**
+ * Iff this shader is backed by a single SkImage, return its ptr (the caller must ref this
+ * if they want to keep it longer than the lifetime of the shader). If not, return nullptr.
+ */
+ SkImage* isAImage(SkMatrix* localMatrix, SkTileMode xy[2]) const;
+
+ bool isAImage() const {
+ return this->isAImage(nullptr, (SkTileMode*)nullptr) != nullptr;
+ }
+
+ //////////////////////////////////////////////////////////////////////////
+ // Methods to create combinations or variants of shaders
+
+ /**
+ * Return a shader that will apply the specified localMatrix to this shader.
+ * The specified matrix will be applied before any matrix associated with this shader.
+ */
+ sk_sp<SkShader> makeWithLocalMatrix(const SkMatrix&) const;
+
+ /**
+ * Create a new shader that produces the same colors as invoking this shader and then applying
+ * the colorfilter.
+ */
+ sk_sp<SkShader> makeWithColorFilter(sk_sp<SkColorFilter>) const;
+
+private:
+ SkShader() = default;
+ friend class SkShaderBase;
+
+ using INHERITED = SkFlattenable;
+};
+
+class SK_API SkShaders {
+public:
+ static sk_sp<SkShader> Empty();
+ static sk_sp<SkShader> Color(SkColor);
+ static sk_sp<SkShader> Color(const SkColor4f&, sk_sp<SkColorSpace>);
+ static sk_sp<SkShader> Blend(SkBlendMode mode, sk_sp<SkShader> dst, sk_sp<SkShader> src);
+ static sk_sp<SkShader> Blend(sk_sp<SkBlender>, sk_sp<SkShader> dst, sk_sp<SkShader> src);
+ static sk_sp<SkShader> CoordClamp(sk_sp<SkShader>, const SkRect& subset);
+private:
+ SkShaders() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkSize.h b/gfx/skia/skia/include/core/SkSize.h
new file mode 100644
index 0000000000..867f4eeb97
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkSize.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSize_DEFINED
+#define SkSize_DEFINED
+
+#include "include/core/SkScalar.h"
+
+#include <cstdint>
+
+struct SkISize {
+ int32_t fWidth;
+ int32_t fHeight;
+
+ static constexpr SkISize Make(int32_t w, int32_t h) { return {w, h}; }
+
+ static constexpr SkISize MakeEmpty() { return {0, 0}; }
+
+ void set(int32_t w, int32_t h) { *this = SkISize{w, h}; }
+
+ /** Returns true iff fWidth == 0 && fHeight == 0
+ */
+ bool isZero() const { return 0 == fWidth && 0 == fHeight; }
+
+ /** Returns true if either width or height are <= 0 */
+ bool isEmpty() const { return fWidth <= 0 || fHeight <= 0; }
+
+ /** Set the width and height to 0 */
+ void setEmpty() { fWidth = fHeight = 0; }
+
+ constexpr int32_t width() const { return fWidth; }
+ constexpr int32_t height() const { return fHeight; }
+
+ constexpr int64_t area() const { return fWidth * fHeight; }
+
+ bool equals(int32_t w, int32_t h) const { return fWidth == w && fHeight == h; }
+};
+
+static inline bool operator==(const SkISize& a, const SkISize& b) {
+ return a.fWidth == b.fWidth && a.fHeight == b.fHeight;
+}
+
+static inline bool operator!=(const SkISize& a, const SkISize& b) { return !(a == b); }
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct SkSize {
+ SkScalar fWidth;
+ SkScalar fHeight;
+
+ static SkSize Make(SkScalar w, SkScalar h) { return {w, h}; }
+
+ static SkSize Make(const SkISize& src) {
+ return {SkIntToScalar(src.width()), SkIntToScalar(src.height())};
+ }
+
+ static SkSize MakeEmpty() { return {0, 0}; }
+
+ void set(SkScalar w, SkScalar h) { *this = SkSize{w, h}; }
+
+ /** Returns true iff fWidth == 0 && fHeight == 0
+ */
+ bool isZero() const { return 0 == fWidth && 0 == fHeight; }
+
+ /** Returns true if either width or height are <= 0 */
+ bool isEmpty() const { return fWidth <= 0 || fHeight <= 0; }
+
+ /** Set the width and height to 0 */
+ void setEmpty() { *this = SkSize{0, 0}; }
+
+ SkScalar width() const { return fWidth; }
+ SkScalar height() const { return fHeight; }
+
+ bool equals(SkScalar w, SkScalar h) const { return fWidth == w && fHeight == h; }
+
+ SkISize toRound() const { return {SkScalarRoundToInt(fWidth), SkScalarRoundToInt(fHeight)}; }
+
+ SkISize toCeil() const { return {SkScalarCeilToInt(fWidth), SkScalarCeilToInt(fHeight)}; }
+
+ SkISize toFloor() const { return {SkScalarFloorToInt(fWidth), SkScalarFloorToInt(fHeight)}; }
+};
+
+static inline bool operator==(const SkSize& a, const SkSize& b) {
+ return a.fWidth == b.fWidth && a.fHeight == b.fHeight;
+}
+
+static inline bool operator!=(const SkSize& a, const SkSize& b) { return !(a == b); }
+#endif
diff --git a/gfx/skia/skia/include/core/SkSpan.h b/gfx/skia/skia/include/core/SkSpan.h
new file mode 100644
index 0000000000..37cac632b1
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkSpan.h
@@ -0,0 +1,13 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// We want SkSpan to be a public API, but it is also fundamental to many of our internal types.
+// Thus, we have a public file that clients can include. This file defers to the private copy
+// so we do not have a dependency cycle from our "base" files to our "core" files.
+
+#include "include/private/base/SkSpan_impl.h" // IWYU pragma: export
+
diff --git a/gfx/skia/skia/include/core/SkStream.h b/gfx/skia/skia/include/core/SkStream.h
new file mode 100644
index 0000000000..c582c80a05
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkStream.h
@@ -0,0 +1,523 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStream_DEFINED
+#define SkStream_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkCPUTypes.h"
+#include "include/private/base/SkTo.h"
+
+#include <cstdint>
+#include <cstdio>
+#include <cstring>
+#include <memory>
+#include <utility>
+class SkStreamAsset;
+
+/**
+ * SkStream -- abstraction for a source of bytes. Subclasses can be backed by
+ * memory, or a file, or something else.
+ *
+ * NOTE:
+ *
+ * Classic "streams" APIs are sort of async, in that on a request for N
+ * bytes, they may return fewer than N bytes on a given call, in which case
+ * the caller can "try again" to get more bytes, eventually (modulo an error)
+ * receiving their total N bytes.
+ *
+ * Skia streams behave differently. They are effectively synchronous, and will
+ * always return all N bytes of the request if possible. If they return fewer
+ * (the read() call returns the number of bytes read) then that means there is
+ * no more data (at EOF or hit an error). The caller should *not* call again
+ * in hopes of fulfilling more of the request.
+ */
+class SK_API SkStream {
+public:
+ virtual ~SkStream() {}
+ SkStream() {}
+
+ /**
+ * Attempts to open the specified file as a stream, returns nullptr on failure.
+ */
+ static std::unique_ptr<SkStreamAsset> MakeFromFile(const char path[]);
+
+ /** Reads or skips size number of bytes.
+ * If buffer == NULL, skip size bytes, return how many were skipped.
+ * If buffer != NULL, copy size bytes into buffer, return how many were copied.
+ * @param buffer when NULL skip size bytes, otherwise copy size bytes into buffer
+ * @param size the number of bytes to skip or copy
+ * @return the number of bytes actually read.
+ */
+ virtual size_t read(void* buffer, size_t size) = 0;
+
+ /** Skip size number of bytes.
+ * @return the actual number bytes that could be skipped.
+ */
+ size_t skip(size_t size) {
+ return this->read(nullptr, size);
+ }
+
+ /**
+ * Attempt to peek at size bytes.
+ * If this stream supports peeking, copy min(size, peekable bytes) into
+ * buffer, and return the number of bytes copied.
+ * If the stream does not support peeking, or cannot peek any bytes,
+ * return 0 and leave buffer unchanged.
+ * The stream is guaranteed to be in the same visible state after this
+ * call, regardless of success or failure.
+ * @param buffer Must not be NULL, and must be at least size bytes. Destination
+ * to copy bytes.
+ * @param size Number of bytes to copy.
+ * @return The number of bytes peeked/copied.
+ */
+ virtual size_t peek(void* /*buffer*/, size_t /*size*/) const { return 0; }
+
+ /** Returns true when all the bytes in the stream have been read.
+ * This may return true early (when there are no more bytes to be read)
+ * or late (after the first unsuccessful read).
+ */
+ virtual bool isAtEnd() const = 0;
+
+ bool SK_WARN_UNUSED_RESULT readS8(int8_t*);
+ bool SK_WARN_UNUSED_RESULT readS16(int16_t*);
+ bool SK_WARN_UNUSED_RESULT readS32(int32_t*);
+
+ bool SK_WARN_UNUSED_RESULT readU8(uint8_t* i) { return this->readS8((int8_t*)i); }
+ bool SK_WARN_UNUSED_RESULT readU16(uint16_t* i) { return this->readS16((int16_t*)i); }
+ bool SK_WARN_UNUSED_RESULT readU32(uint32_t* i) { return this->readS32((int32_t*)i); }
+
+ bool SK_WARN_UNUSED_RESULT readBool(bool* b) {
+ uint8_t i;
+ if (!this->readU8(&i)) { return false; }
+ *b = (i != 0);
+ return true;
+ }
+ bool SK_WARN_UNUSED_RESULT readScalar(SkScalar*);
+ bool SK_WARN_UNUSED_RESULT readPackedUInt(size_t*);
+
+//SkStreamRewindable
+ /** Rewinds to the beginning of the stream. Returns true if the stream is known
+ * to be at the beginning after this call returns.
+ */
+ virtual bool rewind() { return false; }
+
+ /** Duplicates this stream. If this cannot be done, returns NULL.
+ * The returned stream will be positioned at the beginning of its data.
+ */
+ std::unique_ptr<SkStream> duplicate() const {
+ return std::unique_ptr<SkStream>(this->onDuplicate());
+ }
+ /** Duplicates this stream. If this cannot be done, returns NULL.
+ * The returned stream will be positioned the same as this stream.
+ */
+ std::unique_ptr<SkStream> fork() const {
+ return std::unique_ptr<SkStream>(this->onFork());
+ }
+
+//SkStreamSeekable
+ /** Returns true if this stream can report its current position. */
+ virtual bool hasPosition() const { return false; }
+ /** Returns the current position in the stream. If this cannot be done, returns 0. */
+ virtual size_t getPosition() const { return 0; }
+
+ /** Seeks to an absolute position in the stream. If this cannot be done, returns false.
+ * If an attempt is made to seek past the end of the stream, the position will be set
+ * to the end of the stream.
+ */
+ virtual bool seek(size_t /*position*/) { return false; }
+
+ /** Seeks to an relative offset in the stream. If this cannot be done, returns false.
+ * If an attempt is made to move to a position outside the stream, the position will be set
+ * to the closest point within the stream (beginning or end).
+ */
+ virtual bool move(long /*offset*/) { return false; }
+
+//SkStreamAsset
+ /** Returns true if this stream can report its total length. */
+ virtual bool hasLength() const { return false; }
+ /** Returns the total length of the stream. If this cannot be done, returns 0. */
+ virtual size_t getLength() const { return 0; }
+
+//SkStreamMemory
+ /** Returns the starting address for the data. If this cannot be done, returns NULL. */
+ //TODO: replace with virtual const SkData* getData()
+ virtual const void* getMemoryBase() { return nullptr; }
+
+private:
+ virtual SkStream* onDuplicate() const { return nullptr; }
+ virtual SkStream* onFork() const { return nullptr; }
+
+ SkStream(SkStream&&) = delete;
+ SkStream(const SkStream&) = delete;
+ SkStream& operator=(SkStream&&) = delete;
+ SkStream& operator=(const SkStream&) = delete;
+};
+
+/** SkStreamRewindable is a SkStream for which rewind and duplicate are required. */
+class SK_API SkStreamRewindable : public SkStream {
+public:
+ bool rewind() override = 0;
+ std::unique_ptr<SkStreamRewindable> duplicate() const {
+ return std::unique_ptr<SkStreamRewindable>(this->onDuplicate());
+ }
+private:
+ SkStreamRewindable* onDuplicate() const override = 0;
+};
+
+/** SkStreamSeekable is a SkStreamRewindable for which position, seek, move, and fork are required. */
+class SK_API SkStreamSeekable : public SkStreamRewindable {
+public:
+ std::unique_ptr<SkStreamSeekable> duplicate() const {
+ return std::unique_ptr<SkStreamSeekable>(this->onDuplicate());
+ }
+
+ bool hasPosition() const override { return true; }
+ size_t getPosition() const override = 0;
+ bool seek(size_t position) override = 0;
+ bool move(long offset) override = 0;
+
+ std::unique_ptr<SkStreamSeekable> fork() const {
+ return std::unique_ptr<SkStreamSeekable>(this->onFork());
+ }
+private:
+ SkStreamSeekable* onDuplicate() const override = 0;
+ SkStreamSeekable* onFork() const override = 0;
+};
+
+/** SkStreamAsset is a SkStreamSeekable for which getLength is required. */
+class SK_API SkStreamAsset : public SkStreamSeekable {
+public:
+ bool hasLength() const override { return true; }
+ size_t getLength() const override = 0;
+
+ std::unique_ptr<SkStreamAsset> duplicate() const {
+ return std::unique_ptr<SkStreamAsset>(this->onDuplicate());
+ }
+ std::unique_ptr<SkStreamAsset> fork() const {
+ return std::unique_ptr<SkStreamAsset>(this->onFork());
+ }
+private:
+ SkStreamAsset* onDuplicate() const override = 0;
+ SkStreamAsset* onFork() const override = 0;
+};
+
+/** SkStreamMemory is a SkStreamAsset for which getMemoryBase is required. */
+class SK_API SkStreamMemory : public SkStreamAsset {
+public:
+ const void* getMemoryBase() override = 0;
+
+ std::unique_ptr<SkStreamMemory> duplicate() const {
+ return std::unique_ptr<SkStreamMemory>(this->onDuplicate());
+ }
+ std::unique_ptr<SkStreamMemory> fork() const {
+ return std::unique_ptr<SkStreamMemory>(this->onFork());
+ }
+private:
+ SkStreamMemory* onDuplicate() const override = 0;
+ SkStreamMemory* onFork() const override = 0;
+};
+
+class SK_API SkWStream {
+public:
+ virtual ~SkWStream();
+ SkWStream() {}
+
+ /** Called to write bytes to a SkWStream. Returns true on success
+ @param buffer the address of at least size bytes to be written to the stream
+ @param size The number of bytes in buffer to write to the stream
+ @return true on success
+ */
+ virtual bool write(const void* buffer, size_t size) = 0;
+ virtual void flush();
+
+ virtual size_t bytesWritten() const = 0;
+
+ // helpers
+
+ bool write8(U8CPU value) {
+ uint8_t v = SkToU8(value);
+ return this->write(&v, 1);
+ }
+ bool write16(U16CPU value) {
+ uint16_t v = SkToU16(value);
+ return this->write(&v, 2);
+ }
+ bool write32(uint32_t v) {
+ return this->write(&v, 4);
+ }
+
+ bool writeText(const char text[]) {
+ SkASSERT(text);
+ return this->write(text, std::strlen(text));
+ }
+
+ bool newline() { return this->write("\n", std::strlen("\n")); }
+
+ bool writeDecAsText(int32_t);
+ bool writeBigDecAsText(int64_t, int minDigits = 0);
+ bool writeHexAsText(uint32_t, int minDigits = 0);
+ bool writeScalarAsText(SkScalar);
+
+ bool writeBool(bool v) { return this->write8(v); }
+ bool writeScalar(SkScalar);
+ bool writePackedUInt(size_t);
+
+ bool writeStream(SkStream* input, size_t length);
+
+ /**
+ * This returns the number of bytes in the stream required to store
+ * 'value'.
+ */
+ static int SizeOfPackedUInt(size_t value);
+
+private:
+ SkWStream(const SkWStream&) = delete;
+ SkWStream& operator=(const SkWStream&) = delete;
+};
+
+class SK_API SkNullWStream : public SkWStream {
+public:
+ SkNullWStream() : fBytesWritten(0) {}
+
+ bool write(const void* , size_t n) override { fBytesWritten += n; return true; }
+ void flush() override {}
+ size_t bytesWritten() const override { return fBytesWritten; }
+
+private:
+ size_t fBytesWritten;
+};
+
+////////////////////////////////////////////////////////////////////////////////////////
+
+/** A stream that wraps a C FILE* file stream. */
+class SK_API SkFILEStream : public SkStreamAsset {
+public:
+ /** Initialize the stream by calling sk_fopen on the specified path.
+ * This internal stream will be closed in the destructor.
+ */
+ explicit SkFILEStream(const char path[] = nullptr);
+
+ /** Initialize the stream with an existing C FILE stream.
+ * The current position of the C FILE stream will be considered the
+ * beginning of the SkFILEStream and the current seek end of the FILE will be the end.
+ * The C FILE stream will be closed in the destructor.
+ */
+ explicit SkFILEStream(FILE* file);
+
+ /** Initialize the stream with an existing C FILE stream.
+ * The current position of the C FILE stream will be considered the
+ * beginning of the SkFILEStream and size bytes later will be the end.
+ * The C FILE stream will be closed in the destructor.
+ */
+ explicit SkFILEStream(FILE* file, size_t size);
+
+ ~SkFILEStream() override;
+
+ static std::unique_ptr<SkFILEStream> Make(const char path[]) {
+ std::unique_ptr<SkFILEStream> stream(new SkFILEStream(path));
+ return stream->isValid() ? std::move(stream) : nullptr;
+ }
+
+ /** Returns true if the current path could be opened. */
+ bool isValid() const { return fFILE != nullptr; }
+
+ /** Close this SkFILEStream. */
+ void close();
+
+ size_t read(void* buffer, size_t size) override;
+ bool isAtEnd() const override;
+
+ bool rewind() override;
+ std::unique_ptr<SkStreamAsset> duplicate() const {
+ return std::unique_ptr<SkStreamAsset>(this->onDuplicate());
+ }
+
+ size_t getPosition() const override;
+ bool seek(size_t position) override;
+ bool move(long offset) override;
+
+ std::unique_ptr<SkStreamAsset> fork() const {
+ return std::unique_ptr<SkStreamAsset>(this->onFork());
+ }
+
+ size_t getLength() const override;
+
+private:
+ explicit SkFILEStream(FILE*, size_t size, size_t start);
+ explicit SkFILEStream(std::shared_ptr<FILE>, size_t end, size_t start);
+ explicit SkFILEStream(std::shared_ptr<FILE>, size_t end, size_t start, size_t current);
+
+ SkStreamAsset* onDuplicate() const override;
+ SkStreamAsset* onFork() const override;
+
+ std::shared_ptr<FILE> fFILE;
+ // My own council will I keep on sizes and offsets.
+ // These are seek positions in the underling FILE, not offsets into the stream.
+ size_t fEnd;
+ size_t fStart;
+ size_t fCurrent;
+
+ using INHERITED = SkStreamAsset;
+};
+
+class SK_API SkMemoryStream : public SkStreamMemory {
+public:
+ SkMemoryStream();
+
+ /** We allocate (and free) the memory. Write to it via getMemoryBase() */
+ SkMemoryStream(size_t length);
+
+ /** If copyData is true, the stream makes a private copy of the data. */
+ SkMemoryStream(const void* data, size_t length, bool copyData = false);
+
+ /** Creates the stream to read from the specified data */
+ SkMemoryStream(sk_sp<SkData> data);
+
+ /** Returns a stream with a copy of the input data. */
+ static std::unique_ptr<SkMemoryStream> MakeCopy(const void* data, size_t length);
+
+ /** Returns a stream with a bare pointer reference to the input data. */
+ static std::unique_ptr<SkMemoryStream> MakeDirect(const void* data, size_t length);
+
+ /** Returns a stream with a shared reference to the input data. */
+ static std::unique_ptr<SkMemoryStream> Make(sk_sp<SkData> data);
+
+ /** Resets the stream to the specified data and length,
+ just like the constructor.
+ if copyData is true, the stream makes a private copy of the data
+ */
+ virtual void setMemory(const void* data, size_t length,
+ bool copyData = false);
+ /** Replace any memory buffer with the specified buffer. The caller
+ must have allocated data with sk_malloc or sk_realloc, since it
+ will be freed with sk_free.
+ */
+ void setMemoryOwned(const void* data, size_t length);
+
+ sk_sp<SkData> asData() const { return fData; }
+ void setData(sk_sp<SkData> data);
+
+ void skipToAlign4();
+ const void* getAtPos();
+
+ size_t read(void* buffer, size_t size) override;
+ bool isAtEnd() const override;
+
+ size_t peek(void* buffer, size_t size) const override;
+
+ bool rewind() override;
+
+ std::unique_ptr<SkMemoryStream> duplicate() const {
+ return std::unique_ptr<SkMemoryStream>(this->onDuplicate());
+ }
+
+ size_t getPosition() const override;
+ bool seek(size_t position) override;
+ bool move(long offset) override;
+
+ std::unique_ptr<SkMemoryStream> fork() const {
+ return std::unique_ptr<SkMemoryStream>(this->onFork());
+ }
+
+ size_t getLength() const override;
+
+ const void* getMemoryBase() override;
+
+private:
+ SkMemoryStream* onDuplicate() const override;
+ SkMemoryStream* onFork() const override;
+
+ sk_sp<SkData> fData;
+ size_t fOffset;
+
+ using INHERITED = SkStreamMemory;
+};
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+
+class SK_API SkFILEWStream : public SkWStream {
+public:
+ SkFILEWStream(const char path[]);
+ ~SkFILEWStream() override;
+
+ /** Returns true if the current path could be opened.
+ */
+ bool isValid() const { return fFILE != nullptr; }
+
+ bool write(const void* buffer, size_t size) override;
+ void flush() override;
+ void fsync();
+ size_t bytesWritten() const override;
+
+private:
+ FILE* fFILE;
+
+ using INHERITED = SkWStream;
+};
+
+class SK_API SkDynamicMemoryWStream : public SkWStream {
+public:
+ SkDynamicMemoryWStream() = default;
+ SkDynamicMemoryWStream(SkDynamicMemoryWStream&&);
+ SkDynamicMemoryWStream& operator=(SkDynamicMemoryWStream&&);
+ ~SkDynamicMemoryWStream() override;
+
+ bool write(const void* buffer, size_t size) override;
+ size_t bytesWritten() const override;
+
+ bool read(void* buffer, size_t offset, size_t size);
+
+ /** More efficient version of read(dst, 0, bytesWritten()). */
+ void copyTo(void* dst) const;
+ bool writeToStream(SkWStream* dst) const;
+
+ /** Equivalent to copyTo() followed by reset(), but may save memory use. */
+ void copyToAndReset(void* dst);
+
+ /** Equivalent to writeToStream() followed by reset(), but may save memory use. */
+ bool writeToAndReset(SkWStream* dst);
+
+ /** Equivalent to writeToStream() followed by reset(), but may save memory use.
+ When the dst is also a SkDynamicMemoryWStream, the implementation is constant time. */
+ bool writeToAndReset(SkDynamicMemoryWStream* dst);
+
+ /** Prepend this stream to dst, resetting this. */
+ void prependToAndReset(SkDynamicMemoryWStream* dst);
+
+ /** Return the contents as SkData, and then reset the stream. */
+ sk_sp<SkData> detachAsData();
+
+ /** Reset, returning a reader stream with the current content. */
+ std::unique_ptr<SkStreamAsset> detachAsStream();
+
+ /** Reset the stream to its original, empty, state. */
+ void reset();
+ void padToAlign4();
+private:
+ struct Block;
+ Block* fHead = nullptr;
+ Block* fTail = nullptr;
+ size_t fBytesWrittenBeforeTail = 0;
+
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+ // For access to the Block type.
+ friend class SkBlockMemoryStream;
+ friend class SkBlockMemoryRefCnt;
+
+ using INHERITED = SkWStream;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkString.h b/gfx/skia/skia/include/core/SkString.h
new file mode 100644
index 0000000000..1b27fbf44b
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkString.h
@@ -0,0 +1,291 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkString_DEFINED
+#define SkString_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTo.h"
+#include "include/private/base/SkTypeTraits.h"
+
+#include <atomic>
+#include <cstdarg>
+#include <cstddef>
+#include <cstdint>
+#include <string>
+#include <string_view>
+#include <type_traits>
+
+/* Some helper functions for C strings */
+static inline bool SkStrStartsWith(const char string[], const char prefixStr[]) {
+ SkASSERT(string);
+ SkASSERT(prefixStr);
+ return !strncmp(string, prefixStr, strlen(prefixStr));
+}
+static inline bool SkStrStartsWith(const char string[], const char prefixChar) {
+ SkASSERT(string);
+ return (prefixChar == *string);
+}
+
+bool SkStrEndsWith(const char string[], const char suffixStr[]);
+bool SkStrEndsWith(const char string[], const char suffixChar);
+
+int SkStrStartsWithOneOf(const char string[], const char prefixes[]);
+
+static inline int SkStrFind(const char string[], const char substring[]) {
+ const char *first = strstr(string, substring);
+ if (nullptr == first) return -1;
+ return SkToInt(first - &string[0]);
+}
+
+static inline int SkStrFindLastOf(const char string[], const char subchar) {
+ const char* last = strrchr(string, subchar);
+ if (nullptr == last) return -1;
+ return SkToInt(last - &string[0]);
+}
+
+static inline bool SkStrContains(const char string[], const char substring[]) {
+ SkASSERT(string);
+ SkASSERT(substring);
+ return (-1 != SkStrFind(string, substring));
+}
+static inline bool SkStrContains(const char string[], const char subchar) {
+ SkASSERT(string);
+ char tmp[2];
+ tmp[0] = subchar;
+ tmp[1] = '\0';
+ return (-1 != SkStrFind(string, tmp));
+}
+
+/*
+ * The SkStrAppend... methods will write into the provided buffer, assuming it is large enough.
+ * Each method has an associated const (e.g. kSkStrAppendU32_MaxSize) which will be the largest
+ * value needed for that method's buffer.
+ *
+ * char storage[kSkStrAppendU32_MaxSize];
+ * SkStrAppendU32(storage, value);
+ *
+ * Note : none of the SkStrAppend... methods write a terminating 0 to their buffers. Instead,
+ * the methods return the ptr to the end of the written part of the buffer. This can be used
+ * to compute the length, and/or know where to write a 0 if that is desired.
+ *
+ * char storage[kSkStrAppendU32_MaxSize + 1];
+ * char* stop = SkStrAppendU32(storage, value);
+ * size_t len = stop - storage;
+ * *stop = 0; // valid, since storage was 1 byte larger than the max.
+ */
+
+static constexpr int kSkStrAppendU32_MaxSize = 10;
+char* SkStrAppendU32(char buffer[], uint32_t);
+static constexpr int kSkStrAppendU64_MaxSize = 20;
+char* SkStrAppendU64(char buffer[], uint64_t, int minDigits);
+
+static constexpr int kSkStrAppendS32_MaxSize = kSkStrAppendU32_MaxSize + 1;
+char* SkStrAppendS32(char buffer[], int32_t);
+static constexpr int kSkStrAppendS64_MaxSize = kSkStrAppendU64_MaxSize + 1;
+char* SkStrAppendS64(char buffer[], int64_t, int minDigits);
+
+/**
+ * Floats have at most 8 significant digits, so we limit our %g to that.
+ * However, the total string could be 15 characters: -1.2345678e-005
+ *
+ * In theory we should only expect up to 2 digits for the exponent, but on
+ * some platforms we have seen 3 (as in the example above).
+ */
+static constexpr int kSkStrAppendScalar_MaxSize = 15;
+
+/**
+ * Write the scalar in decimal format into buffer, and return a pointer to
+ * the next char after the last one written. Note: a terminating 0 is not
+ * written into buffer, which must be at least kSkStrAppendScalar_MaxSize.
+ * Thus if the caller wants to add a 0 at the end, buffer must be at least
+ * kSkStrAppendScalar_MaxSize + 1 bytes large.
+ */
+char* SkStrAppendScalar(char buffer[], SkScalar);
+
+/** \class SkString
+
+ Light weight class for managing strings. Uses reference
+ counting to make string assignments and copies very fast
+ with no extra RAM cost. Assumes UTF8 encoding.
+*/
+class SK_API SkString {
+public:
+ SkString();
+ explicit SkString(size_t len);
+ explicit SkString(const char text[]);
+ SkString(const char text[], size_t len);
+ SkString(const SkString&);
+ SkString(SkString&&);
+ explicit SkString(const std::string&);
+ explicit SkString(std::string_view);
+ ~SkString();
+
+ bool isEmpty() const { return 0 == fRec->fLength; }
+ size_t size() const { return (size_t) fRec->fLength; }
+ const char* data() const { return fRec->data(); }
+ const char* c_str() const { return fRec->data(); }
+ char operator[](size_t n) const { return this->c_str()[n]; }
+
+ bool equals(const SkString&) const;
+ bool equals(const char text[]) const;
+ bool equals(const char text[], size_t len) const;
+
+ bool startsWith(const char prefixStr[]) const {
+ return SkStrStartsWith(fRec->data(), prefixStr);
+ }
+ bool startsWith(const char prefixChar) const {
+ return SkStrStartsWith(fRec->data(), prefixChar);
+ }
+ bool endsWith(const char suffixStr[]) const {
+ return SkStrEndsWith(fRec->data(), suffixStr);
+ }
+ bool endsWith(const char suffixChar) const {
+ return SkStrEndsWith(fRec->data(), suffixChar);
+ }
+ bool contains(const char substring[]) const {
+ return SkStrContains(fRec->data(), substring);
+ }
+ bool contains(const char subchar) const {
+ return SkStrContains(fRec->data(), subchar);
+ }
+ int find(const char substring[]) const {
+ return SkStrFind(fRec->data(), substring);
+ }
+ int findLastOf(const char subchar) const {
+ return SkStrFindLastOf(fRec->data(), subchar);
+ }
+
+ friend bool operator==(const SkString& a, const SkString& b) {
+ return a.equals(b);
+ }
+ friend bool operator!=(const SkString& a, const SkString& b) {
+ return !a.equals(b);
+ }
+
+ // these methods edit the string
+
+ SkString& operator=(const SkString&);
+ SkString& operator=(SkString&&);
+ SkString& operator=(const char text[]);
+
+ char* data();
+ char& operator[](size_t n) { return this->data()[n]; }
+
+ void reset();
+ /** String contents are preserved on resize. (For destructive resize, `set(nullptr, length)`.)
+ * `resize` automatically reserves an extra byte at the end of the buffer for a null terminator.
+ */
+ void resize(size_t len);
+ void set(const SkString& src) { *this = src; }
+ void set(const char text[]);
+ void set(const char text[], size_t len);
+ void set(std::string_view str) { this->set(str.data(), str.size()); }
+
+ void insert(size_t offset, const char text[]);
+ void insert(size_t offset, const char text[], size_t len);
+ void insert(size_t offset, const SkString& str) { this->insert(offset, str.c_str(), str.size()); }
+ void insert(size_t offset, std::string_view str) { this->insert(offset, str.data(), str.size()); }
+ void insertUnichar(size_t offset, SkUnichar);
+ void insertS32(size_t offset, int32_t value);
+ void insertS64(size_t offset, int64_t value, int minDigits = 0);
+ void insertU32(size_t offset, uint32_t value);
+ void insertU64(size_t offset, uint64_t value, int minDigits = 0);
+ void insertHex(size_t offset, uint32_t value, int minDigits = 0);
+ void insertScalar(size_t offset, SkScalar);
+
+ void append(const char text[]) { this->insert((size_t)-1, text); }
+ void append(const char text[], size_t len) { this->insert((size_t)-1, text, len); }
+ void append(const SkString& str) { this->insert((size_t)-1, str.c_str(), str.size()); }
+ void append(std::string_view str) { this->insert((size_t)-1, str.data(), str.size()); }
+ void appendUnichar(SkUnichar uni) { this->insertUnichar((size_t)-1, uni); }
+ void appendS32(int32_t value) { this->insertS32((size_t)-1, value); }
+ void appendS64(int64_t value, int minDigits = 0) { this->insertS64((size_t)-1, value, minDigits); }
+ void appendU32(uint32_t value) { this->insertU32((size_t)-1, value); }
+ void appendU64(uint64_t value, int minDigits = 0) { this->insertU64((size_t)-1, value, minDigits); }
+ void appendHex(uint32_t value, int minDigits = 0) { this->insertHex((size_t)-1, value, minDigits); }
+ void appendScalar(SkScalar value) { this->insertScalar((size_t)-1, value); }
+
+ void prepend(const char text[]) { this->insert(0, text); }
+ void prepend(const char text[], size_t len) { this->insert(0, text, len); }
+ void prepend(const SkString& str) { this->insert(0, str.c_str(), str.size()); }
+ void prepend(std::string_view str) { this->insert(0, str.data(), str.size()); }
+ void prependUnichar(SkUnichar uni) { this->insertUnichar(0, uni); }
+ void prependS32(int32_t value) { this->insertS32(0, value); }
+ void prependS64(int32_t value, int minDigits = 0) { this->insertS64(0, value, minDigits); }
+ void prependHex(uint32_t value, int minDigits = 0) { this->insertHex(0, value, minDigits); }
+ void prependScalar(SkScalar value) { this->insertScalar((size_t)-1, value); }
+
+ void printf(const char format[], ...) SK_PRINTF_LIKE(2, 3);
+ void printVAList(const char format[], va_list) SK_PRINTF_LIKE(2, 0);
+ void appendf(const char format[], ...) SK_PRINTF_LIKE(2, 3);
+ void appendVAList(const char format[], va_list) SK_PRINTF_LIKE(2, 0);
+ void prependf(const char format[], ...) SK_PRINTF_LIKE(2, 3);
+ void prependVAList(const char format[], va_list) SK_PRINTF_LIKE(2, 0);
+
+ void remove(size_t offset, size_t length);
+
+ SkString& operator+=(const SkString& s) { this->append(s); return *this; }
+ SkString& operator+=(const char text[]) { this->append(text); return *this; }
+ SkString& operator+=(const char c) { this->append(&c, 1); return *this; }
+
+ /**
+ * Swap contents between this and other. This function is guaranteed
+ * to never fail or throw.
+ */
+ void swap(SkString& other);
+
+ using sk_is_trivially_relocatable = std::true_type;
+
+private:
+ struct Rec {
+ public:
+ constexpr Rec(uint32_t len, int32_t refCnt) : fLength(len), fRefCnt(refCnt) {}
+ static sk_sp<Rec> Make(const char text[], size_t len);
+ char* data() { return fBeginningOfData; }
+ const char* data() const { return fBeginningOfData; }
+ void ref() const;
+ void unref() const;
+ bool unique() const;
+#ifdef SK_DEBUG
+ int32_t getRefCnt() const;
+#endif
+ uint32_t fLength; // logically size_t, but we want it to stay 32 bits
+
+ private:
+ mutable std::atomic<int32_t> fRefCnt;
+ char fBeginningOfData[1] = {'\0'};
+
+ // Ensure the unsized delete is called.
+ void operator delete(void* p) { ::operator delete(p); }
+ };
+ sk_sp<Rec> fRec;
+
+ static_assert(::sk_is_trivially_relocatable<decltype(fRec)>::value);
+
+#ifdef SK_DEBUG
+ const SkString& validate() const;
+#else
+ const SkString& validate() const { return *this; }
+#endif
+
+ static const Rec gEmptyRec;
+};
+
+/// Creates a new string and writes into it using a printf()-style format.
+SkString SkStringPrintf(const char* format, ...) SK_PRINTF_LIKE(1, 2);
+/// This makes it easier to write a caller as a VAR_ARGS function where the format string is
+/// optional.
+static inline SkString SkStringPrintf() { return SkString(); }
+
+static inline void swap(SkString& a, SkString& b) {
+ a.swap(b);
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkStrokeRec.h b/gfx/skia/skia/include/core/SkStrokeRec.h
new file mode 100644
index 0000000000..1257d04a84
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkStrokeRec.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStrokeRec_DEFINED
+#define SkStrokeRec_DEFINED
+
+#include "include/core/SkPaint.h"
+#include "include/private/base/SkMacros.h"
+
+class SkPath;
+
+SK_BEGIN_REQUIRE_DENSE
+class SK_API SkStrokeRec {
+public:
+ enum InitStyle {
+ kHairline_InitStyle,
+ kFill_InitStyle
+ };
+ SkStrokeRec(InitStyle style);
+ SkStrokeRec(const SkPaint&, SkPaint::Style, SkScalar resScale = 1);
+ explicit SkStrokeRec(const SkPaint&, SkScalar resScale = 1);
+
+ enum Style {
+ kHairline_Style,
+ kFill_Style,
+ kStroke_Style,
+ kStrokeAndFill_Style
+ };
+
+ static constexpr int kStyleCount = kStrokeAndFill_Style + 1;
+
+ Style getStyle() const;
+ SkScalar getWidth() const { return fWidth; }
+ SkScalar getMiter() const { return fMiterLimit; }
+ SkPaint::Cap getCap() const { return (SkPaint::Cap)fCap; }
+ SkPaint::Join getJoin() const { return (SkPaint::Join)fJoin; }
+
+ bool isHairlineStyle() const {
+ return kHairline_Style == this->getStyle();
+ }
+
+ bool isFillStyle() const {
+ return kFill_Style == this->getStyle();
+ }
+
+ void setFillStyle();
+ void setHairlineStyle();
+ /**
+ * Specify the strokewidth, and optionally if you want stroke + fill.
+ * Note, if width==0, then this request is taken to mean:
+ * strokeAndFill==true -> new style will be Fill
+ * strokeAndFill==false -> new style will be Hairline
+ */
+ void setStrokeStyle(SkScalar width, bool strokeAndFill = false);
+
+ void setStrokeParams(SkPaint::Cap cap, SkPaint::Join join, SkScalar miterLimit) {
+ fCap = cap;
+ fJoin = join;
+ fMiterLimit = miterLimit;
+ }
+
+ SkScalar getResScale() const {
+ return fResScale;
+ }
+
+ void setResScale(SkScalar rs) {
+ SkASSERT(rs > 0 && SkScalarIsFinite(rs));
+ fResScale = rs;
+ }
+
+ /**
+ * Returns true if this specifes any thick stroking, i.e. applyToPath()
+ * will return true.
+ */
+ bool needToApply() const {
+ Style style = this->getStyle();
+ return (kStroke_Style == style) || (kStrokeAndFill_Style == style);
+ }
+
+ /**
+ * Apply these stroke parameters to the src path, returning the result
+ * in dst.
+ *
+ * If there was no change (i.e. style == hairline or fill) this returns
+ * false and dst is unchanged. Otherwise returns true and the result is
+ * stored in dst.
+ *
+ * src and dst may be the same path.
+ */
+ bool applyToPath(SkPath* dst, const SkPath& src) const;
+
+ /**
+ * Apply these stroke parameters to a paint.
+ */
+ void applyToPaint(SkPaint* paint) const;
+
+ /**
+ * Gives a conservative value for the outset that should applied to a
+ * geometries bounds to account for any inflation due to applying this
+ * strokeRec to the geometry.
+ */
+ SkScalar getInflationRadius() const;
+
+ /**
+ * Equivalent to:
+ * SkStrokeRec rec(paint, style);
+ * rec.getInflationRadius();
+ * This does not account for other effects on the paint (i.e. path
+ * effect).
+ */
+ static SkScalar GetInflationRadius(const SkPaint&, SkPaint::Style);
+
+ static SkScalar GetInflationRadius(SkPaint::Join, SkScalar miterLimit, SkPaint::Cap,
+ SkScalar strokeWidth);
+
+ /**
+ * Compare if two SkStrokeRecs have an equal effect on a path.
+ * Equal SkStrokeRecs produce equal paths. Equality of produced
+ * paths does not take the ResScale parameter into account.
+ */
+ bool hasEqualEffect(const SkStrokeRec& other) const {
+ if (!this->needToApply()) {
+ return this->getStyle() == other.getStyle();
+ }
+ return fWidth == other.fWidth &&
+ (fJoin != SkPaint::kMiter_Join || fMiterLimit == other.fMiterLimit) &&
+ fCap == other.fCap &&
+ fJoin == other.fJoin &&
+ fStrokeAndFill == other.fStrokeAndFill;
+ }
+
+private:
+ void init(const SkPaint&, SkPaint::Style, SkScalar resScale);
+
+ SkScalar fResScale;
+ SkScalar fWidth;
+ SkScalar fMiterLimit;
+ // The following three members are packed together into a single u32.
+ // This is to avoid unnecessary padding and ensure binary equality for
+ // hashing (because the padded areas might contain garbage values).
+ //
+ // fCap and fJoin are larger than needed to avoid having to initialize
+ // any pad values
+ uint32_t fCap : 16; // SkPaint::Cap
+ uint32_t fJoin : 15; // SkPaint::Join
+ uint32_t fStrokeAndFill : 1; // bool
+};
+SK_END_REQUIRE_DENSE
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkSurface.h b/gfx/skia/skia/include/core/SkSurface.h
new file mode 100644
index 0000000000..3673d172b6
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkSurface.h
@@ -0,0 +1,1199 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSurface_DEFINED
+#define SkSurface_DEFINED
+
+#include "include/core/SkImage.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSurfaceProps.h"
+#include "include/core/SkTypes.h"
+
+#if defined(SK_GANESH)
+#include "include/gpu/GrTypes.h"
+#else
+enum GrSurfaceOrigin: int;
+#endif
+
+#if defined(SK_GRAPHITE)
+#include "include/gpu/GpuTypes.h"
+namespace skgpu::graphite {
+class BackendTexture;
+}
+#endif
+
+#if defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26
+#include <android/hardware_buffer.h>
+class GrDirectContext;
+#endif
+
+#if defined(SK_GANESH) && defined(SK_METAL)
+#include "include/gpu/mtl/GrMtlTypes.h"
+#endif
+
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+
+class GrBackendRenderTarget;
+class GrBackendSemaphore;
+class GrBackendTexture;
+class GrRecordingContext;
+class SkBitmap;
+class SkCanvas;
+class SkCapabilities;
+class SkColorSpace;
+class SkDeferredDisplayList;
+class SkPaint;
+class SkSurfaceCharacterization;
+enum SkColorType : int;
+struct SkIRect;
+struct SkISize;
+
+namespace skgpu {
+class MutableTextureState;
+enum class Budgeted : bool;
+}
+
+namespace skgpu::graphite {
+class Recorder;
+}
+
+/** \class SkSurface
+ SkSurface is responsible for managing the pixels that a canvas draws into. The pixels can be
+ allocated either in CPU memory (a raster surface) or on the GPU (a GrRenderTarget surface).
+ SkSurface takes care of allocating a SkCanvas that will draw into the surface. Call
+ surface->getCanvas() to use that canvas (but don't delete it, it is owned by the surface).
+ SkSurface always has non-zero dimensions. If there is a request for a new surface, and either
+ of the requested dimensions are zero, then nullptr will be returned.
+*/
+class SK_API SkSurface : public SkRefCnt {
+public:
+
+ /** Allocates raster SkSurface. SkCanvas returned by SkSurface draws directly into pixels.
+
+ SkSurface is returned if all parameters are valid.
+ Valid parameters include:
+ info dimensions are greater than zero;
+ info contains SkColorType and SkAlphaType supported by raster surface;
+ pixels is not nullptr;
+ rowBytes is large enough to contain info width pixels of SkColorType.
+
+ Pixel buffer size should be info height times computed rowBytes.
+ Pixels are not initialized.
+ To access pixels after drawing, peekPixels() or readPixels().
+
+ @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace,
+ of raster surface; width and height must be greater than zero
+ @param pixels pointer to destination pixels buffer
+ @param rowBytes interval from one SkSurface row to the next
+ @param surfaceProps LCD striping orientation and setting for device independent fonts;
+ may be nullptr
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeRasterDirect(const SkImageInfo& imageInfo, void* pixels,
+ size_t rowBytes,
+ const SkSurfaceProps* surfaceProps = nullptr);
+
+ static sk_sp<SkSurface> MakeRasterDirect(const SkPixmap& pm,
+ const SkSurfaceProps* props = nullptr) {
+ return MakeRasterDirect(pm.info(), pm.writable_addr(), pm.rowBytes(), props);
+ }
+
+ /** Allocates raster SkSurface. SkCanvas returned by SkSurface draws directly into pixels.
+ releaseProc is called with pixels and context when SkSurface is deleted.
+
+ SkSurface is returned if all parameters are valid.
+ Valid parameters include:
+ info dimensions are greater than zero;
+ info contains SkColorType and SkAlphaType supported by raster surface;
+ pixels is not nullptr;
+ rowBytes is large enough to contain info width pixels of SkColorType.
+
+ Pixel buffer size should be info height times computed rowBytes.
+ Pixels are not initialized.
+ To access pixels after drawing, call flush() or peekPixels().
+
+ @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace,
+ of raster surface; width and height must be greater than zero
+ @param pixels pointer to destination pixels buffer
+ @param rowBytes interval from one SkSurface row to the next
+ @param releaseProc called when SkSurface is deleted; may be nullptr
+ @param context passed to releaseProc; may be nullptr
+ @param surfaceProps LCD striping orientation and setting for device independent fonts;
+ may be nullptr
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeRasterDirectReleaseProc(const SkImageInfo& imageInfo, void* pixels,
+ size_t rowBytes,
+ void (*releaseProc)(void* pixels, void* context),
+ void* context, const SkSurfaceProps* surfaceProps = nullptr);
+
+ /** Allocates raster SkSurface. SkCanvas returned by SkSurface draws directly into pixels.
+ Allocates and zeroes pixel memory. Pixel memory size is imageInfo.height() times
+ rowBytes, or times imageInfo.minRowBytes() if rowBytes is zero.
+ Pixel memory is deleted when SkSurface is deleted.
+
+ SkSurface is returned if all parameters are valid.
+ Valid parameters include:
+ info dimensions are greater than zero;
+ info contains SkColorType and SkAlphaType supported by raster surface;
+ rowBytes is large enough to contain info width pixels of SkColorType, or is zero.
+
+ If rowBytes is zero, a suitable value will be chosen internally.
+
+ @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace,
+ of raster surface; width and height must be greater than zero
+ @param rowBytes interval from one SkSurface row to the next; may be zero
+ @param surfaceProps LCD striping orientation and setting for device independent fonts;
+ may be nullptr
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeRaster(const SkImageInfo& imageInfo, size_t rowBytes,
+ const SkSurfaceProps* surfaceProps);
+
+ /** Allocates raster SkSurface. SkCanvas returned by SkSurface draws directly into pixels.
+ Allocates and zeroes pixel memory. Pixel memory size is imageInfo.height() times
+ imageInfo.minRowBytes().
+ Pixel memory is deleted when SkSurface is deleted.
+
+ SkSurface is returned if all parameters are valid.
+ Valid parameters include:
+ info dimensions are greater than zero;
+ info contains SkColorType and SkAlphaType supported by raster surface.
+
+ @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace,
+ of raster surface; width and height must be greater than zero
+ @param props LCD striping orientation and setting for device independent fonts;
+ may be nullptr
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeRaster(const SkImageInfo& imageInfo,
+ const SkSurfaceProps* props = nullptr) {
+ return MakeRaster(imageInfo, 0, props);
+ }
+
+ /** Allocates raster SkSurface. SkCanvas returned by SkSurface draws directly into pixels.
+ Allocates and zeroes pixel memory. Pixel memory size is height times width times
+ four. Pixel memory is deleted when SkSurface is deleted.
+
+ Internally, sets SkImageInfo to width, height, native color type, and
+ kPremul_SkAlphaType.
+
+ SkSurface is returned if width and height are greater than zero.
+
+ Use to create SkSurface that matches SkPMColor, the native pixel arrangement on
+ the platform. SkSurface drawn to output device skips converting its pixel format.
+
+ @param width pixel column count; must be greater than zero
+ @param height pixel row count; must be greater than zero
+ @param surfaceProps LCD striping orientation and setting for device independent
+ fonts; may be nullptr
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeRasterN32Premul(int width, int height,
+ const SkSurfaceProps* surfaceProps = nullptr);
+
+ /** Caller data passed to RenderTarget/TextureReleaseProc; may be nullptr. */
+ typedef void* ReleaseContext;
+
+ /** User function called when supplied render target may be deleted. */
+ typedef void (*RenderTargetReleaseProc)(ReleaseContext releaseContext);
+
+ /** User function called when supplied texture may be deleted. */
+ typedef void (*TextureReleaseProc)(ReleaseContext releaseContext);
+
+ /** Wraps a GPU-backed texture into SkSurface. Caller must ensure the texture is
+ valid for the lifetime of returned SkSurface. If sampleCnt greater than zero,
+ creates an intermediate MSAA SkSurface which is used for drawing backendTexture.
+
+ SkSurface is returned if all parameters are valid. backendTexture is valid if
+ its pixel configuration agrees with colorSpace and context; for instance, if
+ backendTexture has an sRGB configuration, then context must support sRGB,
+ and colorSpace must be present. Further, backendTexture width and height must
+ not exceed context capabilities, and the context must be able to support
+ back-end textures.
+
+ Upon success textureReleaseProc is called when it is safe to delete the texture in the
+ backend API (accounting only for use of the texture by this surface). If SkSurface creation
+ fails textureReleaseProc is called before this function returns.
+
+ If defined(SK_GANESH) is defined as zero, has no effect and returns nullptr.
+
+ @param context GPU context
+ @param backendTexture texture residing on GPU
+ @param sampleCnt samples per pixel, or 0 to disable full scene anti-aliasing
+ @param colorSpace range of colors; may be nullptr
+ @param surfaceProps LCD striping orientation and setting for device independent
+ fonts; may be nullptr
+ @param textureReleaseProc function called when texture can be released
+ @param releaseContext state passed to textureReleaseProc
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeFromBackendTexture(GrRecordingContext* context,
+ const GrBackendTexture& backendTexture,
+ GrSurfaceOrigin origin, int sampleCnt,
+ SkColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps,
+ TextureReleaseProc textureReleaseProc = nullptr,
+ ReleaseContext releaseContext = nullptr);
+
+ /** Wraps a GPU-backed buffer into SkSurface. Caller must ensure backendRenderTarget
+ is valid for the lifetime of returned SkSurface.
+
+ SkSurface is returned if all parameters are valid. backendRenderTarget is valid if
+ its pixel configuration agrees with colorSpace and context; for instance, if
+ backendRenderTarget has an sRGB configuration, then context must support sRGB,
+ and colorSpace must be present. Further, backendRenderTarget width and height must
+ not exceed context capabilities, and the context must be able to support
+ back-end render targets.
+
+ Upon success releaseProc is called when it is safe to delete the render target in the
+ backend API (accounting only for use of the render target by this surface). If SkSurface
+ creation fails releaseProc is called before this function returns.
+
+ If defined(SK_GANESH) is defined as zero, has no effect and returns nullptr.
+
+ @param context GPU context
+ @param backendRenderTarget GPU intermediate memory buffer
+ @param colorSpace range of colors
+ @param surfaceProps LCD striping orientation and setting for device independent
+ fonts; may be nullptr
+ @param releaseProc function called when backendRenderTarget can be released
+ @param releaseContext state passed to releaseProc
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeFromBackendRenderTarget(GrRecordingContext* context,
+ const GrBackendRenderTarget& backendRenderTarget,
+ GrSurfaceOrigin origin,
+ SkColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps,
+ RenderTargetReleaseProc releaseProc = nullptr,
+ ReleaseContext releaseContext = nullptr);
+
+ /** Returns SkSurface on GPU indicated by context. Allocates memory for
+ pixels, based on the width, height, and SkColorType in SkImageInfo. budgeted
+ selects whether allocation for pixels is tracked by context. imageInfo
+ describes the pixel format in SkColorType, and transparency in
+ SkAlphaType, and color matching in SkColorSpace.
+
+ sampleCount requests the number of samples per pixel.
+ Pass zero to disable multi-sample anti-aliasing. The request is rounded
+ up to the next supported count, or rounded down if it is larger than the
+ maximum supported count.
+
+ surfaceOrigin pins either the top-left or the bottom-left corner to the origin.
+
+ shouldCreateWithMips hints that SkImage returned by makeImageSnapshot() is mip map.
+
+ If defined(SK_GANESH) is defined as zero, has no effect and returns nullptr.
+
+ @param context GPU context
+ @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace;
+ width, or height, or both, may be zero
+ @param sampleCount samples per pixel, or 0 to disable full scene anti-aliasing
+ @param surfaceProps LCD striping orientation and setting for device independent
+ fonts; may be nullptr
+ @param shouldCreateWithMips hint that SkSurface will host mip map images
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeRenderTarget(GrRecordingContext* context,
+ skgpu::Budgeted budgeted,
+ const SkImageInfo& imageInfo,
+ int sampleCount,
+ GrSurfaceOrigin surfaceOrigin,
+ const SkSurfaceProps* surfaceProps,
+ bool shouldCreateWithMips = false);
+
+ /** Returns SkSurface on GPU indicated by context. Allocates memory for
+ pixels, based on the width, height, and SkColorType in SkImageInfo. budgeted
+ selects whether allocation for pixels is tracked by context. imageInfo
+ describes the pixel format in SkColorType, and transparency in
+ SkAlphaType, and color matching in SkColorSpace.
+
+ sampleCount requests the number of samples per pixel.
+ Pass zero to disable multi-sample anti-aliasing. The request is rounded
+ up to the next supported count, or rounded down if it is larger than the
+ maximum supported count.
+
+ SkSurface bottom-left corner is pinned to the origin.
+
+ @param context GPU context
+ @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace,
+ of raster surface; width, or height, or both, may be zero
+ @param sampleCount samples per pixel, or 0 to disable multi-sample anti-aliasing
+ @param surfaceProps LCD striping orientation and setting for device independent
+ fonts; may be nullptr
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeRenderTarget(GrRecordingContext* context,
+ skgpu::Budgeted budgeted,
+ const SkImageInfo& imageInfo,
+ int sampleCount,
+ const SkSurfaceProps* surfaceProps) {
+#if defined(SK_GANESH)
+ return MakeRenderTarget(context, budgeted, imageInfo, sampleCount,
+ kBottomLeft_GrSurfaceOrigin, surfaceProps);
+#else
+ // TODO(kjlubick, scroggo) Remove this once Android is updated.
+ return nullptr;
+#endif
+ }
+
+ /** Returns SkSurface on GPU indicated by context. Allocates memory for
+ pixels, based on the width, height, and SkColorType in SkImageInfo. budgeted
+ selects whether allocation for pixels is tracked by context. imageInfo
+ describes the pixel format in SkColorType, and transparency in
+ SkAlphaType, and color matching in SkColorSpace.
+
+ SkSurface bottom-left corner is pinned to the origin.
+
+ @param context GPU context
+ @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace,
+ of raster surface; width, or height, or both, may be zero
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeRenderTarget(GrRecordingContext* context,
+ skgpu::Budgeted budgeted,
+ const SkImageInfo& imageInfo) {
+#if defined(SK_GANESH)
+ if (!imageInfo.width() || !imageInfo.height()) {
+ return nullptr;
+ }
+ return MakeRenderTarget(context, budgeted, imageInfo, 0, kBottomLeft_GrSurfaceOrigin,
+ nullptr);
+#else
+ // TODO(kjlubick, scroggo) Remove this once Android is updated.
+ return nullptr;
+#endif
+ }
+
+ /** Returns SkSurface on GPU indicated by context that is compatible with the provided
+ characterization. budgeted selects whether allocation for pixels is tracked by context.
+
+ @param context GPU context
+ @param characterization description of the desired SkSurface
+ @return SkSurface if all parameters are valid; otherwise, nullptr
+ */
+ static sk_sp<SkSurface> MakeRenderTarget(GrRecordingContext* context,
+ const SkSurfaceCharacterization& characterization,
+ skgpu::Budgeted budgeted);
+
+#if defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26
+ /** Private.
+ Creates SkSurface from Android hardware buffer.
+ Returned SkSurface takes a reference on the buffer. The ref on the buffer will be released
+ when the SkSurface is destroyed and there is no pending work on the GPU involving the
+ buffer.
+
+ Only available on Android, when __ANDROID_API__ is defined to be 26 or greater.
+
+ Currently this is only supported for buffers that can be textured as well as rendered to.
+ In other words that must have both AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT and
+ AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE usage bits.
+
+ @param context GPU context
+ @param hardwareBuffer AHardwareBuffer Android hardware buffer
+ @param colorSpace range of colors; may be nullptr
+ @param surfaceProps LCD striping orientation and setting for device independent
+ fonts; may be nullptr
+ @param fromWindow Whether or not the AHardwareBuffer is part of an Android Window.
+ Currently only used with Vulkan backend.
+ @return created SkSurface, or nullptr
+ */
+ static sk_sp<SkSurface> MakeFromAHardwareBuffer(GrDirectContext* context,
+ AHardwareBuffer* hardwareBuffer,
+ GrSurfaceOrigin origin,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ , bool fromWindow = false
+#endif // SK_BUILD_FOR_ANDROID_FRAMEWORK
+ );
+#endif
+
+#if defined(SK_GRAPHITE)
+ /**
+ * In Graphite, while clients hold a ref on an SkSurface, the backing gpu object does _not_
+ * count against the budget. Once an SkSurface is freed, the backing gpu object may or may
+ * not become a scratch (i.e., reusable) resource but, if it does, it will be counted against
+ * the budget.
+ */
+ static sk_sp<SkSurface> MakeGraphite(
+ skgpu::graphite::Recorder*,
+ const SkImageInfo& imageInfo,
+ skgpu::Mipmapped = skgpu::Mipmapped::kNo,
+ const SkSurfaceProps* surfaceProps = nullptr);
+
+ /**
+ * Wraps a GPU-backed texture in an SkSurface. Depending on the backend gpu API, the caller may
+ * be required to ensure the texture is valid for the lifetime of the returned SkSurface. The
+ * required lifetimes for the specific apis are:
+ * Metal: Skia will call retain on the underlying MTLTexture so the caller can drop it once
+ * this call returns.
+ *
+ * SkSurface is returned if all the parameters are valid. The backendTexture is valid if its
+ * format agrees with colorSpace and recorder; for instance, if backendTexture has an sRGB
+ * configuration, then the recorder must support sRGB, and colorSpace must be present. Further,
+ * backendTexture's width and height must not exceed the recorder's capabilities, and the
+ * recorder must be able to support the back-end texture.
+ */
+ static sk_sp<SkSurface> MakeGraphiteFromBackendTexture(skgpu::graphite::Recorder*,
+ const skgpu::graphite::BackendTexture&,
+ SkColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* props);
+
+#endif // SK_GRAPHITE
+
+#if defined(SK_GANESH) && defined(SK_METAL)
+ /** Creates SkSurface from CAMetalLayer.
+ Returned SkSurface takes a reference on the CAMetalLayer. The ref on the layer will be
+ released when the SkSurface is destroyed.
+
+ Only available when Metal API is enabled.
+
+ Will grab the current drawable from the layer and use its texture as a backendRT to
+ create a renderable surface.
+
+ @param context GPU context
+ @param layer GrMTLHandle (expected to be a CAMetalLayer*)
+ @param sampleCnt samples per pixel, or 0 to disable full scene anti-aliasing
+ @param colorSpace range of colors; may be nullptr
+ @param surfaceProps LCD striping orientation and setting for device independent
+ fonts; may be nullptr
+ @param drawable Pointer to drawable to be filled in when this surface is
+ instantiated; may not be nullptr
+ @return created SkSurface, or nullptr
+ */
+ static sk_sp<SkSurface> MakeFromCAMetalLayer(GrRecordingContext* context,
+ GrMTLHandle layer,
+ GrSurfaceOrigin origin,
+ int sampleCnt,
+ SkColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps,
+ GrMTLHandle* drawable)
+ SK_API_AVAILABLE_CA_METAL_LAYER;
+
+ /** Creates SkSurface from MTKView.
+ Returned SkSurface takes a reference on the MTKView. The ref on the layer will be
+ released when the SkSurface is destroyed.
+
+ Only available when Metal API is enabled.
+
+ Will grab the current drawable from the layer and use its texture as a backendRT to
+ create a renderable surface.
+
+ @param context GPU context
+ @param layer GrMTLHandle (expected to be a MTKView*)
+ @param sampleCnt samples per pixel, or 0 to disable full scene anti-aliasing
+ @param colorSpace range of colors; may be nullptr
+ @param surfaceProps LCD striping orientation and setting for device independent
+ fonts; may be nullptr
+ @return created SkSurface, or nullptr
+ */
+ static sk_sp<SkSurface> MakeFromMTKView(GrRecordingContext* context,
+ GrMTLHandle mtkView,
+ GrSurfaceOrigin origin,
+ int sampleCnt,
+ SkColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps)
+ SK_API_AVAILABLE(macos(10.11), ios(9.0));
+#endif
+
+ /** Is this surface compatible with the provided characterization?
+
+ This method can be used to determine if an existing SkSurface is a viable destination
+ for an SkDeferredDisplayList.
+
+ @param characterization The characterization for which a compatibility check is desired
+ @return true if this surface is compatible with the characterization;
+ false otherwise
+ */
+ bool isCompatible(const SkSurfaceCharacterization& characterization) const;
+
+ /** Returns SkSurface without backing pixels. Drawing to SkCanvas returned from SkSurface
+ has no effect. Calling makeImageSnapshot() on returned SkSurface returns nullptr.
+
+ @param width one or greater
+ @param height one or greater
+ @return SkSurface if width and height are positive; otherwise, nullptr
+
+ example: https://fiddle.skia.org/c/@Surface_MakeNull
+ */
+ static sk_sp<SkSurface> MakeNull(int width, int height);
+
+ /** Returns pixel count in each row; may be zero or greater.
+
+ @return number of pixel columns
+ */
+ int width() const { return fWidth; }
+
+ /** Returns pixel row count; may be zero or greater.
+
+ @return number of pixel rows
+ */
+ int height() const { return fHeight; }
+
+ /** Returns an ImageInfo describing the surface.
+ */
+ virtual SkImageInfo imageInfo() const { return SkImageInfo::MakeUnknown(fWidth, fHeight); }
+
+ /** Returns unique value identifying the content of SkSurface. Returned value changes
+ each time the content changes. Content is changed by drawing, or by calling
+ notifyContentWillChange().
+
+ @return unique content identifier
+
+ example: https://fiddle.skia.org/c/@Surface_notifyContentWillChange
+ */
+ uint32_t generationID();
+
+ /** \enum SkSurface::ContentChangeMode
+ ContentChangeMode members are parameters to notifyContentWillChange().
+ */
+ enum ContentChangeMode {
+ kDiscard_ContentChangeMode, //!< discards surface on change
+ kRetain_ContentChangeMode, //!< preserves surface on change
+ };
+
+ /** Notifies that SkSurface contents will be changed by code outside of Skia.
+ Subsequent calls to generationID() return a different value.
+
+ TODO: Can kRetain_ContentChangeMode be deprecated?
+
+ example: https://fiddle.skia.org/c/@Surface_notifyContentWillChange
+ */
+ void notifyContentWillChange(ContentChangeMode mode);
+
+ /** Returns the recording context being used by the SkSurface.
+
+ @return the recording context, if available; nullptr otherwise
+ */
+ GrRecordingContext* recordingContext();
+
+ /** Returns the recorder being used by the SkSurface.
+
+ @return the recorder, if available; nullptr otherwise
+ */
+ skgpu::graphite::Recorder* recorder();
+
+#if defined(SK_GANESH)
+ enum BackendHandleAccess {
+ kFlushRead_BackendHandleAccess, //!< back-end object is readable
+ kFlushWrite_BackendHandleAccess, //!< back-end object is writable
+ kDiscardWrite_BackendHandleAccess, //!< back-end object must be overwritten
+ };
+
+ /** Deprecated.
+ */
+ static const BackendHandleAccess kFlushRead_TextureHandleAccess =
+ kFlushRead_BackendHandleAccess;
+
+ /** Deprecated.
+ */
+ static const BackendHandleAccess kFlushWrite_TextureHandleAccess =
+ kFlushWrite_BackendHandleAccess;
+
+ /** Deprecated.
+ */
+ static const BackendHandleAccess kDiscardWrite_TextureHandleAccess =
+ kDiscardWrite_BackendHandleAccess;
+
+ /** Retrieves the back-end texture. If SkSurface has no back-end texture, an invalid
+ object is returned. Call GrBackendTexture::isValid to determine if the result
+ is valid.
+
+ The returned GrBackendTexture should be discarded if the SkSurface is drawn to or deleted.
+
+ @return GPU texture reference; invalid on failure
+ */
+ GrBackendTexture getBackendTexture(BackendHandleAccess backendHandleAccess);
+
+ /** Retrieves the back-end render target. If SkSurface has no back-end render target, an invalid
+ object is returned. Call GrBackendRenderTarget::isValid to determine if the result
+ is valid.
+
+ The returned GrBackendRenderTarget should be discarded if the SkSurface is drawn to
+ or deleted.
+
+ @return GPU render target reference; invalid on failure
+ */
+ GrBackendRenderTarget getBackendRenderTarget(BackendHandleAccess backendHandleAccess);
+
+ /** If the surface was made via MakeFromBackendTexture then it's backing texture may be
+ substituted with a different texture. The contents of the previous backing texture are
+ copied into the new texture. SkCanvas state is preserved. The original sample count is
+ used. The GrBackendFormat and dimensions of replacement texture must match that of
+ the original.
+
+ Upon success textureReleaseProc is called when it is safe to delete the texture in the
+ backend API (accounting only for use of the texture by this surface). If SkSurface creation
+ fails textureReleaseProc is called before this function returns.
+
+ @param backendTexture the new backing texture for the surface
+ @param mode Retain or discard current Content
+ @param textureReleaseProc function called when texture can be released
+ @param releaseContext state passed to textureReleaseProc
+ */
+ bool replaceBackendTexture(const GrBackendTexture& backendTexture,
+ GrSurfaceOrigin origin,
+ ContentChangeMode mode = kRetain_ContentChangeMode,
+ TextureReleaseProc textureReleaseProc = nullptr,
+ ReleaseContext releaseContext = nullptr);
+#endif
+
+ /** Returns SkCanvas that draws into SkSurface. Subsequent calls return the same SkCanvas.
+ SkCanvas returned is managed and owned by SkSurface, and is deleted when SkSurface
+ is deleted.
+
+ @return drawing SkCanvas for SkSurface
+
+ example: https://fiddle.skia.org/c/@Surface_getCanvas
+ */
+ SkCanvas* getCanvas();
+
+ /** Returns SkCapabilities that describes the capabilities of the SkSurface's device.
+
+ @return SkCapabilities of SkSurface's device.
+ */
+ sk_sp<const SkCapabilities> capabilities();
+
+ /** Returns a compatible SkSurface, or nullptr. Returned SkSurface contains
+ the same raster, GPU, or null properties as the original. Returned SkSurface
+ does not share the same pixels.
+
+ Returns nullptr if imageInfo width or height are zero, or if imageInfo
+ is incompatible with SkSurface.
+
+ @param imageInfo width, height, SkColorType, SkAlphaType, SkColorSpace,
+ of SkSurface; width and height must be greater than zero
+ @return compatible SkSurface or nullptr
+
+ example: https://fiddle.skia.org/c/@Surface_makeSurface
+ */
+ sk_sp<SkSurface> makeSurface(const SkImageInfo& imageInfo);
+
+ /** Calls makeSurface(ImageInfo) with the same ImageInfo as this surface, but with the
+ * specified width and height.
+ */
+ sk_sp<SkSurface> makeSurface(int width, int height);
+
+ /** Returns SkImage capturing SkSurface contents. Subsequent drawing to SkSurface contents
+ are not captured. SkImage allocation is accounted for if SkSurface was created with
+ skgpu::Budgeted::kYes.
+
+ @return SkImage initialized with SkSurface contents
+
+ example: https://fiddle.skia.org/c/@Surface_makeImageSnapshot
+ */
+ sk_sp<SkImage> makeImageSnapshot();
+
+ /**
+ * Like the no-parameter version, this returns an image of the current surface contents.
+ * This variant takes a rectangle specifying the subset of the surface that is of interest.
+ * These bounds will be sanitized before being used.
+ * - If bounds extends beyond the surface, it will be trimmed to just the intersection of
+ * it and the surface.
+ * - If bounds does not intersect the surface, then this returns nullptr.
+ * - If bounds == the surface, then this is the same as calling the no-parameter variant.
+
+ example: https://fiddle.skia.org/c/@Surface_makeImageSnapshot_2
+ */
+ sk_sp<SkImage> makeImageSnapshot(const SkIRect& bounds);
+
+#if defined(SK_GRAPHITE)
+ /**
+ * The 'asImage' and 'makeImageCopy' API/entry points are currently only available for
+ * Graphite.
+ *
+ * In this API, SkSurface no longer supports copy-on-write behavior. Instead, when creating
+ * an image for a surface, the client must explicitly indicate if a copy should be made.
+ * In both of the below calls the resource backing the surface will never change.
+ *
+ * The 'asImage' entry point has some major ramifications for the mutability of the
+ * returned SkImage. Since the originating surface and the returned image share the
+ * same backing, care must be taken by the client to ensure that the contents of the image
+ * reflect the desired contents when it is consumed by the gpu.
+ * Note: if the backing GPU buffer isn't textureable this method will return null. Graphite
+ * will not attempt to make a copy.
+ * Note: For 'asImage', the mipmapping of the image will match that of the source surface.
+ *
+ * The 'makeImageCopy' entry point allows subsetting and the addition of mipmaps (since
+ * a copy is already being made).
+ *
+ * In Graphite, the legacy API call (i.e., makeImageSnapshot) will just always make a copy.
+ */
+ sk_sp<SkImage> asImage();
+
+ sk_sp<SkImage> makeImageCopy(const SkIRect* subset = nullptr,
+ skgpu::Mipmapped mipmapped = skgpu::Mipmapped::kNo);
+#endif
+
+ /** Draws SkSurface contents to canvas, with its top-left corner at (x, y).
+
+ If SkPaint paint is not nullptr, apply SkColorFilter, alpha, SkImageFilter, and SkBlendMode.
+
+ @param canvas SkCanvas drawn into
+ @param x horizontal offset in SkCanvas
+ @param y vertical offset in SkCanvas
+ @param sampling what technique to use when sampling the surface pixels
+ @param paint SkPaint containing SkBlendMode, SkColorFilter, SkImageFilter,
+ and so on; or nullptr
+
+ example: https://fiddle.skia.org/c/@Surface_draw
+ */
+ void draw(SkCanvas* canvas, SkScalar x, SkScalar y, const SkSamplingOptions& sampling,
+ const SkPaint* paint);
+
+ void draw(SkCanvas* canvas, SkScalar x, SkScalar y, const SkPaint* paint = nullptr) {
+ this->draw(canvas, x, y, SkSamplingOptions(), paint);
+ }
+
+ /** Copies SkSurface pixel address, row bytes, and SkImageInfo to SkPixmap, if address
+ is available, and returns true. If pixel address is not available, return
+ false and leave SkPixmap unchanged.
+
+ pixmap contents become invalid on any future change to SkSurface.
+
+ @param pixmap storage for pixel state if pixels are readable; otherwise, ignored
+ @return true if SkSurface has direct access to pixels
+
+ example: https://fiddle.skia.org/c/@Surface_peekPixels
+ */
+ bool peekPixels(SkPixmap* pixmap);
+
+ /** Copies SkRect of pixels to dst.
+
+ Source SkRect corners are (srcX, srcY) and SkSurface (width(), height()).
+ Destination SkRect corners are (0, 0) and (dst.width(), dst.height()).
+ Copies each readable pixel intersecting both rectangles, without scaling,
+ converting to dst.colorType() and dst.alphaType() if required.
+
+ Pixels are readable when SkSurface is raster, or backed by a GPU.
+
+ The destination pixel storage must be allocated by the caller.
+
+ Pixel values are converted only if SkColorType and SkAlphaType
+ do not match. Only pixels within both source and destination rectangles
+ are copied. dst contents outside SkRect intersection are unchanged.
+
+ Pass negative values for srcX or srcY to offset pixels across or down destination.
+
+ Does not copy, and returns false if:
+ - Source and destination rectangles do not intersect.
+ - SkPixmap pixels could not be allocated.
+ - dst.rowBytes() is too small to contain one row of pixels.
+
+ @param dst storage for pixels copied from SkSurface
+ @param srcX offset into readable pixels on x-axis; may be negative
+ @param srcY offset into readable pixels on y-axis; may be negative
+ @return true if pixels were copied
+
+ example: https://fiddle.skia.org/c/@Surface_readPixels
+ */
+ bool readPixels(const SkPixmap& dst, int srcX, int srcY);
+
+ /** Copies SkRect of pixels from SkCanvas into dstPixels.
+
+ Source SkRect corners are (srcX, srcY) and SkSurface (width(), height()).
+ Destination SkRect corners are (0, 0) and (dstInfo.width(), dstInfo.height()).
+ Copies each readable pixel intersecting both rectangles, without scaling,
+ converting to dstInfo.colorType() and dstInfo.alphaType() if required.
+
+ Pixels are readable when SkSurface is raster, or backed by a GPU.
+
+ The destination pixel storage must be allocated by the caller.
+
+ Pixel values are converted only if SkColorType and SkAlphaType
+ do not match. Only pixels within both source and destination rectangles
+ are copied. dstPixels contents outside SkRect intersection are unchanged.
+
+ Pass negative values for srcX or srcY to offset pixels across or down destination.
+
+ Does not copy, and returns false if:
+ - Source and destination rectangles do not intersect.
+ - SkSurface pixels could not be converted to dstInfo.colorType() or dstInfo.alphaType().
+ - dstRowBytes is too small to contain one row of pixels.
+
+ @param dstInfo width, height, SkColorType, and SkAlphaType of dstPixels
+ @param dstPixels storage for pixels; dstInfo.height() times dstRowBytes, or larger
+ @param dstRowBytes size of one destination row; dstInfo.width() times pixel size, or larger
+ @param srcX offset into readable pixels on x-axis; may be negative
+ @param srcY offset into readable pixels on y-axis; may be negative
+ @return true if pixels were copied
+ */
+ bool readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY);
+
+ /** Copies SkRect of pixels from SkSurface into bitmap.
+
+ Source SkRect corners are (srcX, srcY) and SkSurface (width(), height()).
+ Destination SkRect corners are (0, 0) and (bitmap.width(), bitmap.height()).
+ Copies each readable pixel intersecting both rectangles, without scaling,
+ converting to bitmap.colorType() and bitmap.alphaType() if required.
+
+ Pixels are readable when SkSurface is raster, or backed by a GPU.
+
+ The destination pixel storage must be allocated by the caller.
+
+ Pixel values are converted only if SkColorType and SkAlphaType
+ do not match. Only pixels within both source and destination rectangles
+ are copied. dst contents outside SkRect intersection are unchanged.
+
+ Pass negative values for srcX or srcY to offset pixels across or down destination.
+
+ Does not copy, and returns false if:
+ - Source and destination rectangles do not intersect.
+ - SkSurface pixels could not be converted to dst.colorType() or dst.alphaType().
+ - dst pixels could not be allocated.
+ - dst.rowBytes() is too small to contain one row of pixels.
+
+ @param dst storage for pixels copied from SkSurface
+ @param srcX offset into readable pixels on x-axis; may be negative
+ @param srcY offset into readable pixels on y-axis; may be negative
+ @return true if pixels were copied
+
+ example: https://fiddle.skia.org/c/@Surface_readPixels_3
+ */
+ bool readPixels(const SkBitmap& dst, int srcX, int srcY);
+
+ using AsyncReadResult = SkImage::AsyncReadResult;
+
+ /** Client-provided context that is passed to client-provided ReadPixelsContext. */
+ using ReadPixelsContext = void*;
+
+ /** Client-provided callback to asyncRescaleAndReadPixels() or
+ asyncRescaleAndReadPixelsYUV420() that is called when read result is ready or on failure.
+ */
+ using ReadPixelsCallback = void(ReadPixelsContext, std::unique_ptr<const AsyncReadResult>);
+
+ /** Controls the gamma that rescaling occurs in for asyncRescaleAndReadPixels() and
+ asyncRescaleAndReadPixelsYUV420().
+ */
+ using RescaleGamma = SkImage::RescaleGamma;
+ using RescaleMode = SkImage::RescaleMode;
+
+ /** Makes surface pixel data available to caller, possibly asynchronously. It can also rescale
+ the surface pixels.
+
+ Currently asynchronous reads are only supported on the GPU backend and only when the
+ underlying 3D API supports transfer buffers and CPU/GPU synchronization primitives. In all
+ other cases this operates synchronously.
+
+ Data is read from the source sub-rectangle, is optionally converted to a linear gamma, is
+ rescaled to the size indicated by 'info', is then converted to the color space, color type,
+ and alpha type of 'info'. A 'srcRect' that is not contained by the bounds of the surface
+ causes failure.
+
+ When the pixel data is ready the caller's ReadPixelsCallback is called with a
+ AsyncReadResult containing pixel data in the requested color type, alpha type, and color
+ space. The AsyncReadResult will have count() == 1. Upon failure the callback is called
+ with nullptr for AsyncReadResult. For a GPU surface this flushes work but a submit must
+ occur to guarantee a finite time before the callback is called.
+
+ The data is valid for the lifetime of AsyncReadResult with the exception that if the
+ SkSurface is GPU-backed the data is immediately invalidated if the context is abandoned
+ or destroyed.
+
+ @param info info of the requested pixels
+ @param srcRect subrectangle of surface to read
+ @param rescaleGamma controls whether rescaling is done in the surface's gamma or whether
+ the source data is transformed to a linear gamma before rescaling.
+ @param rescaleMode controls the technique of the rescaling
+ @param callback function to call with result of the read
+ @param context passed to callback
+ */
+ void asyncRescaleAndReadPixels(const SkImageInfo& info,
+ const SkIRect& srcRect,
+ RescaleGamma rescaleGamma,
+ RescaleMode rescaleMode,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context);
+
+ /**
+ Similar to asyncRescaleAndReadPixels but performs an additional conversion to YUV. The
+ RGB->YUV conversion is controlled by 'yuvColorSpace'. The YUV data is returned as three
+ planes ordered y, u, v. The u and v planes are half the width and height of the resized
+ rectangle. The y, u, and v values are single bytes. Currently this fails if 'dstSize'
+ width and height are not even. A 'srcRect' that is not contained by the bounds of the
+ surface causes failure.
+
+ When the pixel data is ready the caller's ReadPixelsCallback is called with a
+ AsyncReadResult containing the planar data. The AsyncReadResult will have count() == 3.
+ Upon failure the callback is called with nullptr for AsyncReadResult. For a GPU surface this
+ flushes work but a submit must occur to guarantee a finite time before the callback is
+ called.
+
+ The data is valid for the lifetime of AsyncReadResult with the exception that if the
+ SkSurface is GPU-backed the data is immediately invalidated if the context is abandoned
+ or destroyed.
+
+ @param yuvColorSpace The transformation from RGB to YUV. Applied to the resized image
+ after it is converted to dstColorSpace.
+ @param dstColorSpace The color space to convert the resized image to, after rescaling.
+ @param srcRect The portion of the surface to rescale and convert to YUV planes.
+ @param dstSize The size to rescale srcRect to
+ @param rescaleGamma controls whether rescaling is done in the surface's gamma or whether
+ the source data is transformed to a linear gamma before rescaling.
+ @param rescaleMode controls the sampling technique of the rescaling
+ @param callback function to call with the planar read result
+ @param context passed to callback
+ */
+ void asyncRescaleAndReadPixelsYUV420(SkYUVColorSpace yuvColorSpace,
+ sk_sp<SkColorSpace> dstColorSpace,
+ const SkIRect& srcRect,
+ const SkISize& dstSize,
+ RescaleGamma rescaleGamma,
+ RescaleMode rescaleMode,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context);
+
+ /** Copies SkRect of pixels from the src SkPixmap to the SkSurface.
+
+ Source SkRect corners are (0, 0) and (src.width(), src.height()).
+ Destination SkRect corners are (dstX, dstY) and
+ (dstX + Surface width(), dstY + Surface height()).
+
+ Copies each readable pixel intersecting both rectangles, without scaling,
+ converting to SkSurface colorType() and SkSurface alphaType() if required.
+
+ @param src storage for pixels to copy to SkSurface
+ @param dstX x-axis position relative to SkSurface to begin copy; may be negative
+ @param dstY y-axis position relative to SkSurface to begin copy; may be negative
+
+ example: https://fiddle.skia.org/c/@Surface_writePixels
+ */
+ void writePixels(const SkPixmap& src, int dstX, int dstY);
+
+ /** Copies SkRect of pixels from the src SkBitmap to the SkSurface.
+
+ Source SkRect corners are (0, 0) and (src.width(), src.height()).
+ Destination SkRect corners are (dstX, dstY) and
+ (dstX + Surface width(), dstY + Surface height()).
+
+ Copies each readable pixel intersecting both rectangles, without scaling,
+ converting to SkSurface colorType() and SkSurface alphaType() if required.
+
+ @param src storage for pixels to copy to SkSurface
+ @param dstX x-axis position relative to SkSurface to begin copy; may be negative
+ @param dstY y-axis position relative to SkSurface to begin copy; may be negative
+
+ example: https://fiddle.skia.org/c/@Surface_writePixels_2
+ */
+ void writePixels(const SkBitmap& src, int dstX, int dstY);
+
+ /** Returns SkSurfaceProps for surface.
+
+ @return LCD striping orientation and setting for device independent fonts
+ */
+ const SkSurfaceProps& props() const { return fProps; }
+
+ /** Call to ensure all reads/writes of the surface have been issued to the underlying 3D API.
+ Skia will correctly order its own draws and pixel operations. This must to be used to ensure
+ correct ordering when the surface backing store is accessed outside Skia (e.g. direct use of
+ the 3D API or a windowing system). GrDirectContext has additional flush and submit methods
+ that apply to all surfaces and images created from a GrDirectContext. This is equivalent to
+ calling SkSurface::flush with a default GrFlushInfo followed by
+ GrDirectContext::submit(syncCpu).
+ */
+ void flushAndSubmit(bool syncCpu = false);
+
+ enum class BackendSurfaceAccess {
+ kNoAccess, //!< back-end object will not be used by client
+ kPresent, //!< back-end surface will be used for presenting to screen
+ };
+
+#if defined(SK_GANESH)
+ /** If a surface is GPU texture backed, is being drawn with MSAA, and there is a resolve
+ texture, this call will insert a resolve command into the stream of gpu commands. In order
+ for the resolve to actually have an effect, the work still needs to be flushed and submitted
+ to the GPU after recording the resolve command. If a resolve is not supported or the
+ SkSurface has no dirty work to resolve, then this call is a no-op.
+
+ This call is most useful when the SkSurface is created by wrapping a single sampled gpu
+ texture, but asking Skia to render with MSAA. If the client wants to use the wrapped texture
+ outside of Skia, the only way to trigger a resolve is either to call this command or use
+ SkSurface::flush.
+ */
+ void resolveMSAA();
+
+ /** Issues pending SkSurface commands to the GPU-backed API objects and resolves any SkSurface
+ MSAA. A call to GrDirectContext::submit is always required to ensure work is actually sent
+ to the gpu. Some specific API details:
+ GL: Commands are actually sent to the driver, but glFlush is never called. Thus some
+ sync objects from the flush will not be valid until a submission occurs.
+
+ Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command
+ buffer or encoder objects. However, these objects are not sent to the gpu until a
+ submission occurs.
+
+ The work that is submitted to the GPU will be dependent on the BackendSurfaceAccess that is
+ passed in.
+
+ If BackendSurfaceAccess::kNoAccess is passed in all commands will be issued to the GPU.
+
+ If BackendSurfaceAccess::kPresent is passed in and the backend API is not Vulkan, it is
+ treated the same as kNoAccess. If the backend API is Vulkan, the VkImage that backs the
+ SkSurface will be transferred back to its original queue. If the SkSurface was created by
+ wrapping a VkImage, the queue will be set to the queue which was originally passed in on
+ the GrVkImageInfo. Additionally, if the original queue was not external or foreign the
+ layout of the VkImage will be set to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR.
+
+ The GrFlushInfo describes additional options to flush. Please see documentation at
+ GrFlushInfo for more info.
+
+ If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be
+ submitted to the gpu during the next submit call (it is possible Skia failed to create a
+ subset of the semaphores). The client should not wait on these semaphores until after submit
+ has been called, but must keep them alive until then. If a submit flag was passed in with
+ the flush these valid semaphores can we waited on immediately. If this call returns
+ GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on
+ the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in
+ with the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the
+ client is still responsible for deleting any initialized semaphores.
+ Regardless of semaphore submission the context will still be flushed. It should be
+ emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not
+ happen. It simply means there were no semaphores submitted to the GPU. A caller should only
+ take this as a failure if they passed in semaphores to be submitted.
+
+ Pending surface commands are flushed regardless of the return result.
+
+ @param access type of access the call will do on the backend object after flush
+ @param info flush options
+ */
+ GrSemaphoresSubmitted flush(BackendSurfaceAccess access, const GrFlushInfo& info);
+
+ /** Issues pending SkSurface commands to the GPU-backed API objects and resolves any SkSurface
+ MSAA. A call to GrDirectContext::submit is always required to ensure work is actually sent
+ to the gpu. Some specific API details:
+ GL: Commands are actually sent to the driver, but glFlush is never called. Thus some
+ sync objects from the flush will not be valid until a submission occurs.
+
+ Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command
+ buffer or encoder objects. However, these objects are not sent to the gpu until a
+ submission occurs.
+
+ The GrFlushInfo describes additional options to flush. Please see documentation at
+ GrFlushInfo for more info.
+
+ If a skgpu::MutableTextureState is passed in, at the end of the flush we will transition
+ the surface to be in the state requested by the skgpu::MutableTextureState. If the surface
+ (or SkImage or GrBackendSurface wrapping the same backend object) is used again after this
+ flush the state may be changed and no longer match what is requested here. This is often
+ used if the surface will be used for presenting or external use and the client wants backend
+ object to be prepped for that use. A finishedProc or semaphore on the GrFlushInfo will also
+ include the work for any requested state change.
+
+ If the backend API is Vulkan, the caller can set the skgpu::MutableTextureState's
+ VkImageLayout to VK_IMAGE_LAYOUT_UNDEFINED or queueFamilyIndex to VK_QUEUE_FAMILY_IGNORED to
+ tell Skia to not change those respective states.
+
+ If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be
+ submitted to the gpu during the next submit call (it is possible Skia failed to create a
+ subset of the semaphores). The client should not wait on these semaphores until after submit
+ has been called, but must keep them alive until then. If a submit flag was passed in with
+ the flush these valid semaphores can we waited on immediately. If this call returns
+ GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on
+ the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in
+ with the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the
+ client is still responsible for deleting any initialized semaphores.
+ Regardleess of semaphore submission the context will still be flushed. It should be
+ emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not
+ happen. It simply means there were no semaphores submitted to the GPU. A caller should only
+ take this as a failure if they passed in semaphores to be submitted.
+
+ Pending surface commands are flushed regardless of the return result.
+
+ @param info flush options
+ @param access optional state change request after flush
+ */
+ GrSemaphoresSubmitted flush(const GrFlushInfo& info,
+ const skgpu::MutableTextureState* newState = nullptr);
+#endif // defined(SK_GANESH)
+
+ void flush();
+
+ /** Inserts a list of GPU semaphores that the current GPU-backed API must wait on before
+ executing any more commands on the GPU for this surface. If this call returns false, then
+ the GPU back-end will not wait on any passed in semaphores, and the client will still own
+ the semaphores, regardless of the value of deleteSemaphoresAfterWait.
+
+ If deleteSemaphoresAfterWait is false then Skia will not delete the semaphores. In this case
+ it is the client's responsibility to not destroy or attempt to reuse the semaphores until it
+ knows that Skia has finished waiting on them. This can be done by using finishedProcs
+ on flush calls.
+
+ @param numSemaphores size of waitSemaphores array
+ @param waitSemaphores array of semaphore containers
+ @paramm deleteSemaphoresAfterWait who owns and should delete the semaphores
+ @return true if GPU is waiting on semaphores
+ */
+ bool wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores,
+ bool deleteSemaphoresAfterWait = true);
+
+ /** Initializes SkSurfaceCharacterization that can be used to perform GPU back-end
+ processing in a separate thread. Typically this is used to divide drawing
+ into multiple tiles. SkDeferredDisplayListRecorder records the drawing commands
+ for each tile.
+
+ Return true if SkSurface supports characterization. raster surface returns false.
+
+ @param characterization properties for parallel drawing
+ @return true if supported
+
+ example: https://fiddle.skia.org/c/@Surface_characterize
+ */
+ bool characterize(SkSurfaceCharacterization* characterization) const;
+
+ /** Draws the deferred display list created via a SkDeferredDisplayListRecorder.
+ If the deferred display list is not compatible with this SkSurface, the draw is skipped
+ and false is return.
+
+ The xOffset and yOffset parameters are experimental and, if not both zero, will cause
+ the draw to be ignored.
+ When implemented, if xOffset or yOffset are non-zero, the DDL will be drawn offset by that
+ amount into the surface.
+
+ @param deferredDisplayList drawing commands
+ @param xOffset x-offset at which to draw the DDL
+ @param yOffset y-offset at which to draw the DDL
+ @return false if deferredDisplayList is not compatible
+
+ example: https://fiddle.skia.org/c/@Surface_draw_2
+ */
+ bool draw(sk_sp<const SkDeferredDisplayList> deferredDisplayList,
+ int xOffset = 0,
+ int yOffset = 0);
+
+protected:
+ SkSurface(int width, int height, const SkSurfaceProps* surfaceProps);
+ SkSurface(const SkImageInfo& imageInfo, const SkSurfaceProps* surfaceProps);
+
+ // called by subclass if their contents have changed
+ void dirtyGenerationID() {
+ fGenerationID = 0;
+ }
+
+private:
+ const SkSurfaceProps fProps;
+ const int fWidth;
+ const int fHeight;
+ uint32_t fGenerationID;
+
+ using INHERITED = SkRefCnt;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkSurfaceCharacterization.h b/gfx/skia/skia/include/core/SkSurfaceCharacterization.h
new file mode 100644
index 0000000000..075f601a27
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkSurfaceCharacterization.h
@@ -0,0 +1,263 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSurfaceCharacterization_DEFINED
+#define SkSurfaceCharacterization_DEFINED
+
+
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSurfaceProps.h"
+
+class SkColorSpace;
+
+
+#if defined(SK_GANESH)
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrContextThreadSafeProxy.h"
+#include "include/gpu/GrTypes.h"
+
+/** \class SkSurfaceCharacterization
+ A surface characterization contains all the information Ganesh requires to makes its internal
+ rendering decisions. When passed into a SkDeferredDisplayListRecorder it will copy the
+ data and pass it on to the SkDeferredDisplayList if/when it is created. Note that both of
+ those objects (the Recorder and the DisplayList) will take a ref on the
+ GrContextThreadSafeProxy and SkColorSpace objects.
+*/
+class SK_API SkSurfaceCharacterization {
+public:
+ enum class Textureable : bool { kNo = false, kYes = true };
+ enum class MipMapped : bool { kNo = false, kYes = true };
+ enum class UsesGLFBO0 : bool { kNo = false, kYes = true };
+ // This flag indicates that the backing VkImage for this Vulkan surface will have the
+ // VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT set. This bit allows skia to handle advanced blends
+ // more optimally in a shader by being able to directly read the dst values.
+ enum class VkRTSupportsInputAttachment : bool { kNo = false, kYes = true };
+ // This flag indicates if the surface is wrapping a raw Vulkan secondary command buffer.
+ enum class VulkanSecondaryCBCompatible : bool { kNo = false, kYes = true };
+
+ SkSurfaceCharacterization()
+ : fCacheMaxResourceBytes(0)
+ , fOrigin(kBottomLeft_GrSurfaceOrigin)
+ , fSampleCnt(0)
+ , fIsTextureable(Textureable::kYes)
+ , fIsMipMapped(MipMapped::kYes)
+ , fUsesGLFBO0(UsesGLFBO0::kNo)
+ , fVulkanSecondaryCBCompatible(VulkanSecondaryCBCompatible::kNo)
+ , fIsProtected(GrProtected::kNo)
+ , fSurfaceProps(0, kUnknown_SkPixelGeometry) {
+ }
+
+ SkSurfaceCharacterization(SkSurfaceCharacterization&&) = default;
+ SkSurfaceCharacterization& operator=(SkSurfaceCharacterization&&) = default;
+
+ SkSurfaceCharacterization(const SkSurfaceCharacterization&) = default;
+ SkSurfaceCharacterization& operator=(const SkSurfaceCharacterization& other) = default;
+ bool operator==(const SkSurfaceCharacterization& other) const;
+ bool operator!=(const SkSurfaceCharacterization& other) const {
+ return !(*this == other);
+ }
+
+ /*
+ * Return a new surface characterization with the only difference being a different width
+ * and height
+ */
+ SkSurfaceCharacterization createResized(int width, int height) const;
+
+ /*
+ * Return a new surface characterization with only a replaced color space
+ */
+ SkSurfaceCharacterization createColorSpace(sk_sp<SkColorSpace>) const;
+
+ /*
+ * Return a new surface characterization with the backend format replaced. A colorType
+ * must also be supplied to indicate the interpretation of the new format.
+ */
+ SkSurfaceCharacterization createBackendFormat(SkColorType colorType,
+ const GrBackendFormat& backendFormat) const;
+
+ /*
+ * Return a new surface characterization with just a different use of FBO0 (in GL)
+ */
+ SkSurfaceCharacterization createFBO0(bool usesGLFBO0) const;
+
+ GrContextThreadSafeProxy* contextInfo() const { return fContextInfo.get(); }
+ sk_sp<GrContextThreadSafeProxy> refContextInfo() const { return fContextInfo; }
+ size_t cacheMaxResourceBytes() const { return fCacheMaxResourceBytes; }
+
+ bool isValid() const { return kUnknown_SkColorType != fImageInfo.colorType(); }
+
+ const SkImageInfo& imageInfo() const { return fImageInfo; }
+ const GrBackendFormat& backendFormat() const { return fBackendFormat; }
+ GrSurfaceOrigin origin() const { return fOrigin; }
+ SkISize dimensions() const { return fImageInfo.dimensions(); }
+ int width() const { return fImageInfo.width(); }
+ int height() const { return fImageInfo.height(); }
+ SkColorType colorType() const { return fImageInfo.colorType(); }
+ int sampleCount() const { return fSampleCnt; }
+ bool isTextureable() const { return Textureable::kYes == fIsTextureable; }
+ bool isMipMapped() const { return MipMapped::kYes == fIsMipMapped; }
+ bool usesGLFBO0() const { return UsesGLFBO0::kYes == fUsesGLFBO0; }
+ bool vkRTSupportsInputAttachment() const {
+ return VkRTSupportsInputAttachment::kYes == fVkRTSupportsInputAttachment;
+ }
+ bool vulkanSecondaryCBCompatible() const {
+ return VulkanSecondaryCBCompatible::kYes == fVulkanSecondaryCBCompatible;
+ }
+ GrProtected isProtected() const { return fIsProtected; }
+ SkColorSpace* colorSpace() const { return fImageInfo.colorSpace(); }
+ sk_sp<SkColorSpace> refColorSpace() const { return fImageInfo.refColorSpace(); }
+ const SkSurfaceProps& surfaceProps()const { return fSurfaceProps; }
+
+ // Is the provided backend texture compatible with this surface characterization?
+ bool isCompatible(const GrBackendTexture&) const;
+
+private:
+ friend class SkSurface_Gpu; // for 'set' & 'config'
+ friend class GrVkSecondaryCBDrawContext; // for 'set' & 'config'
+ friend class GrContextThreadSafeProxy; // for private ctor
+ friend class SkDeferredDisplayListRecorder; // for 'config'
+ friend class SkSurface; // for 'config'
+
+ SkDEBUGCODE(void validate() const;)
+
+ SkSurfaceCharacterization(sk_sp<GrContextThreadSafeProxy> contextInfo,
+ size_t cacheMaxResourceBytes,
+ const SkImageInfo& ii,
+ const GrBackendFormat& backendFormat,
+ GrSurfaceOrigin origin,
+ int sampleCnt,
+ Textureable isTextureable,
+ MipMapped isMipMapped,
+ UsesGLFBO0 usesGLFBO0,
+ VkRTSupportsInputAttachment vkRTSupportsInputAttachment,
+ VulkanSecondaryCBCompatible vulkanSecondaryCBCompatible,
+ GrProtected isProtected,
+ const SkSurfaceProps& surfaceProps)
+ : fContextInfo(std::move(contextInfo))
+ , fCacheMaxResourceBytes(cacheMaxResourceBytes)
+ , fImageInfo(ii)
+ , fBackendFormat(backendFormat)
+ , fOrigin(origin)
+ , fSampleCnt(sampleCnt)
+ , fIsTextureable(isTextureable)
+ , fIsMipMapped(isMipMapped)
+ , fUsesGLFBO0(usesGLFBO0)
+ , fVkRTSupportsInputAttachment(vkRTSupportsInputAttachment)
+ , fVulkanSecondaryCBCompatible(vulkanSecondaryCBCompatible)
+ , fIsProtected(isProtected)
+ , fSurfaceProps(surfaceProps) {
+ if (fSurfaceProps.flags() & SkSurfaceProps::kDynamicMSAA_Flag) {
+ // Dynamic MSAA is not currently supported with DDL.
+ *this = {};
+ }
+ SkDEBUGCODE(this->validate());
+ }
+
+ void set(sk_sp<GrContextThreadSafeProxy> contextInfo,
+ size_t cacheMaxResourceBytes,
+ const SkImageInfo& ii,
+ const GrBackendFormat& backendFormat,
+ GrSurfaceOrigin origin,
+ int sampleCnt,
+ Textureable isTextureable,
+ MipMapped isMipMapped,
+ UsesGLFBO0 usesGLFBO0,
+ VkRTSupportsInputAttachment vkRTSupportsInputAttachment,
+ VulkanSecondaryCBCompatible vulkanSecondaryCBCompatible,
+ GrProtected isProtected,
+ const SkSurfaceProps& surfaceProps) {
+ if (surfaceProps.flags() & SkSurfaceProps::kDynamicMSAA_Flag) {
+ // Dynamic MSAA is not currently supported with DDL.
+ *this = {};
+ } else {
+ fContextInfo = contextInfo;
+ fCacheMaxResourceBytes = cacheMaxResourceBytes;
+
+ fImageInfo = ii;
+ fBackendFormat = backendFormat;
+ fOrigin = origin;
+ fSampleCnt = sampleCnt;
+ fIsTextureable = isTextureable;
+ fIsMipMapped = isMipMapped;
+ fUsesGLFBO0 = usesGLFBO0;
+ fVkRTSupportsInputAttachment = vkRTSupportsInputAttachment;
+ fVulkanSecondaryCBCompatible = vulkanSecondaryCBCompatible;
+ fIsProtected = isProtected;
+ fSurfaceProps = surfaceProps;
+ }
+ SkDEBUGCODE(this->validate());
+ }
+
+ sk_sp<GrContextThreadSafeProxy> fContextInfo;
+ size_t fCacheMaxResourceBytes;
+
+ SkImageInfo fImageInfo;
+ GrBackendFormat fBackendFormat;
+ GrSurfaceOrigin fOrigin;
+ int fSampleCnt;
+ Textureable fIsTextureable;
+ MipMapped fIsMipMapped;
+ UsesGLFBO0 fUsesGLFBO0;
+ VkRTSupportsInputAttachment fVkRTSupportsInputAttachment;
+ VulkanSecondaryCBCompatible fVulkanSecondaryCBCompatible;
+ GrProtected fIsProtected;
+ SkSurfaceProps fSurfaceProps;
+};
+
+#else// !defined(SK_GANESH)
+class GrBackendFormat;
+
+class SK_API SkSurfaceCharacterization {
+public:
+ SkSurfaceCharacterization() : fSurfaceProps(0, kUnknown_SkPixelGeometry) { }
+
+ SkSurfaceCharacterization createResized(int width, int height) const {
+ return *this;
+ }
+
+ SkSurfaceCharacterization createColorSpace(sk_sp<SkColorSpace>) const {
+ return *this;
+ }
+
+ SkSurfaceCharacterization createBackendFormat(SkColorType, const GrBackendFormat&) const {
+ return *this;
+ }
+
+ SkSurfaceCharacterization createFBO0(bool usesGLFBO0) const {
+ return *this;
+ }
+
+ bool operator==(const SkSurfaceCharacterization& other) const { return false; }
+ bool operator!=(const SkSurfaceCharacterization& other) const {
+ return !(*this == other);
+ }
+
+ size_t cacheMaxResourceBytes() const { return 0; }
+
+ bool isValid() const { return false; }
+
+ int width() const { return 0; }
+ int height() const { return 0; }
+ int stencilCount() const { return 0; }
+ bool isTextureable() const { return false; }
+ bool isMipMapped() const { return false; }
+ bool usesGLFBO0() const { return false; }
+ bool vkRTSupportsAttachmentInput() const { return false; }
+ bool vulkanSecondaryCBCompatible() const { return false; }
+ SkColorSpace* colorSpace() const { return nullptr; }
+ sk_sp<SkColorSpace> refColorSpace() const { return nullptr; }
+ const SkSurfaceProps& surfaceProps()const { return fSurfaceProps; }
+
+private:
+ SkSurfaceProps fSurfaceProps;
+};
+
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkSurfaceProps.h b/gfx/skia/skia/include/core/SkSurfaceProps.h
new file mode 100644
index 0000000000..357af25dec
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkSurfaceProps.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSurfaceProps_DEFINED
+#define SkSurfaceProps_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTo.h"
+
+/**
+ * Description of how the LCD strips are arranged for each pixel. If this is unknown, or the
+ * pixels are meant to be "portable" and/or transformed before showing (e.g. rotated, scaled)
+ * then use kUnknown_SkPixelGeometry.
+ */
+enum SkPixelGeometry {
+ kUnknown_SkPixelGeometry,
+ kRGB_H_SkPixelGeometry,
+ kBGR_H_SkPixelGeometry,
+ kRGB_V_SkPixelGeometry,
+ kBGR_V_SkPixelGeometry,
+};
+
+// Returns true iff geo is a known geometry and is RGB.
+static inline bool SkPixelGeometryIsRGB(SkPixelGeometry geo) {
+ return kRGB_H_SkPixelGeometry == geo || kRGB_V_SkPixelGeometry == geo;
+}
+
+// Returns true iff geo is a known geometry and is BGR.
+static inline bool SkPixelGeometryIsBGR(SkPixelGeometry geo) {
+ return kBGR_H_SkPixelGeometry == geo || kBGR_V_SkPixelGeometry == geo;
+}
+
+// Returns true iff geo is a known geometry and is horizontal.
+static inline bool SkPixelGeometryIsH(SkPixelGeometry geo) {
+ return kRGB_H_SkPixelGeometry == geo || kBGR_H_SkPixelGeometry == geo;
+}
+
+// Returns true iff geo is a known geometry and is vertical.
+static inline bool SkPixelGeometryIsV(SkPixelGeometry geo) {
+ return kRGB_V_SkPixelGeometry == geo || kBGR_V_SkPixelGeometry == geo;
+}
+
+/**
+ * Describes properties and constraints of a given SkSurface. The rendering engine can parse these
+ * during drawing, and can sometimes optimize its performance (e.g. disabling an expensive
+ * feature).
+ */
+class SK_API SkSurfaceProps {
+public:
+ enum Flags {
+ kUseDeviceIndependentFonts_Flag = 1 << 0,
+ // Use internal MSAA to render to non-MSAA GPU surfaces.
+ kDynamicMSAA_Flag = 1 << 1
+ };
+ /** Deprecated alias used by Chromium. Will be removed. */
+ static const Flags kUseDistanceFieldFonts_Flag = kUseDeviceIndependentFonts_Flag;
+
+ /** No flags, unknown pixel geometry. */
+ SkSurfaceProps();
+ SkSurfaceProps(uint32_t flags, SkPixelGeometry);
+
+ SkSurfaceProps(const SkSurfaceProps&);
+ SkSurfaceProps& operator=(const SkSurfaceProps&);
+
+ SkSurfaceProps cloneWithPixelGeometry(SkPixelGeometry newPixelGeometry) const {
+ return SkSurfaceProps(fFlags, newPixelGeometry);
+ }
+
+ uint32_t flags() const { return fFlags; }
+ SkPixelGeometry pixelGeometry() const { return fPixelGeometry; }
+
+ bool isUseDeviceIndependentFonts() const {
+ return SkToBool(fFlags & kUseDeviceIndependentFonts_Flag);
+ }
+
+ bool operator==(const SkSurfaceProps& that) const {
+ return fFlags == that.fFlags && fPixelGeometry == that.fPixelGeometry;
+ }
+
+ bool operator!=(const SkSurfaceProps& that) const {
+ return !(*this == that);
+ }
+
+private:
+ uint32_t fFlags;
+ SkPixelGeometry fPixelGeometry;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkSwizzle.h b/gfx/skia/skia/include/core/SkSwizzle.h
new file mode 100644
index 0000000000..61e93b2da7
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkSwizzle.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSwizzle_DEFINED
+#define SkSwizzle_DEFINED
+
+#include "include/core/SkTypes.h"
+
+/**
+ Swizzles byte order of |count| 32-bit pixels, swapping R and B.
+ (RGBA <-> BGRA)
+*/
+SK_API void SkSwapRB(uint32_t* dest, const uint32_t* src, int count);
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkTextBlob.h b/gfx/skia/skia/include/core/SkTextBlob.h
new file mode 100644
index 0000000000..8f6cb01c04
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkTextBlob.h
@@ -0,0 +1,506 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTextBlob_DEFINED
+#define SkTextBlob_DEFINED
+
+#include "include/core/SkFont.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkString.h"
+#include "include/private/base/SkTemplates.h"
+
+#include <atomic>
+
+struct SkRSXform;
+struct SkSerialProcs;
+struct SkDeserialProcs;
+
+namespace sktext {
+class GlyphRunList;
+}
+
+/** \class SkTextBlob
+ SkTextBlob combines multiple text runs into an immutable container. Each text
+ run consists of glyphs, SkPaint, and position. Only parts of SkPaint related to
+ fonts and text rendering are used by run.
+*/
+class SK_API SkTextBlob final : public SkNVRefCnt<SkTextBlob> {
+private:
+ class RunRecord;
+
+public:
+
+ /** Returns conservative bounding box. Uses SkPaint associated with each glyph to
+ determine glyph bounds, and unions all bounds. Returned bounds may be
+ larger than the bounds of all glyphs in runs.
+
+ @return conservative bounding box
+ */
+ const SkRect& bounds() const { return fBounds; }
+
+ /** Returns a non-zero value unique among all text blobs.
+
+ @return identifier for SkTextBlob
+ */
+ uint32_t uniqueID() const { return fUniqueID; }
+
+ /** Returns the number of intervals that intersect bounds.
+ bounds describes a pair of lines parallel to the text advance.
+ The return count is zero or a multiple of two, and is at most twice the number of glyphs in
+ the the blob.
+
+ Pass nullptr for intervals to determine the size of the interval array.
+
+ Runs within the blob that contain SkRSXform are ignored when computing intercepts.
+
+ @param bounds lower and upper line parallel to the advance
+ @param intervals returned intersections; may be nullptr
+ @param paint specifies stroking, SkPathEffect that affects the result; may be nullptr
+ @return number of intersections; may be zero
+ */
+ int getIntercepts(const SkScalar bounds[2], SkScalar intervals[],
+ const SkPaint* paint = nullptr) const;
+
+ /** Creates SkTextBlob with a single run.
+
+ font contains attributes used to define the run text.
+
+ When encoding is SkTextEncoding::kUTF8, SkTextEncoding::kUTF16, or
+ SkTextEncoding::kUTF32, this function uses the default
+ character-to-glyph mapping from the SkTypeface in font. It does not
+ perform typeface fallback for characters not found in the SkTypeface.
+ It does not perform kerning or other complex shaping; glyphs are
+ positioned based on their default advances.
+
+ @param text character code points or glyphs drawn
+ @param byteLength byte length of text array
+ @param font text size, typeface, text scale, and so on, used to draw
+ @param encoding text encoding used in the text array
+ @return SkTextBlob constructed from one run
+ */
+ static sk_sp<SkTextBlob> MakeFromText(const void* text, size_t byteLength, const SkFont& font,
+ SkTextEncoding encoding = SkTextEncoding::kUTF8);
+
+ /** Creates SkTextBlob with a single run. string meaning depends on SkTextEncoding;
+ by default, string is encoded as UTF-8.
+
+ font contains attributes used to define the run text.
+
+ When encoding is SkTextEncoding::kUTF8, SkTextEncoding::kUTF16, or
+ SkTextEncoding::kUTF32, this function uses the default
+ character-to-glyph mapping from the SkTypeface in font. It does not
+ perform typeface fallback for characters not found in the SkTypeface.
+ It does not perform kerning or other complex shaping; glyphs are
+ positioned based on their default advances.
+
+ @param string character code points or glyphs drawn
+ @param font text size, typeface, text scale, and so on, used to draw
+ @param encoding text encoding used in the text array
+ @return SkTextBlob constructed from one run
+ */
+ static sk_sp<SkTextBlob> MakeFromString(const char* string, const SkFont& font,
+ SkTextEncoding encoding = SkTextEncoding::kUTF8) {
+ if (!string) {
+ return nullptr;
+ }
+ return MakeFromText(string, strlen(string), font, encoding);
+ }
+
+ /** Returns a textblob built from a single run of text with x-positions and a single y value.
+ This is equivalent to using SkTextBlobBuilder and calling allocRunPosH().
+ Returns nullptr if byteLength is zero.
+
+ @param text character code points or glyphs drawn (based on encoding)
+ @param byteLength byte length of text array
+ @param xpos array of x-positions, must contain values for all of the character points.
+ @param constY shared y-position for each character point, to be paired with each xpos.
+ @param font SkFont used for this run
+ @param encoding specifies the encoding of the text array.
+ @return new textblob or nullptr
+ */
+ static sk_sp<SkTextBlob> MakeFromPosTextH(const void* text, size_t byteLength,
+ const SkScalar xpos[], SkScalar constY, const SkFont& font,
+ SkTextEncoding encoding = SkTextEncoding::kUTF8);
+
+ /** Returns a textblob built from a single run of text with positions.
+ This is equivalent to using SkTextBlobBuilder and calling allocRunPos().
+ Returns nullptr if byteLength is zero.
+
+ @param text character code points or glyphs drawn (based on encoding)
+ @param byteLength byte length of text array
+ @param pos array of positions, must contain values for all of the character points.
+ @param font SkFont used for this run
+ @param encoding specifies the encoding of the text array.
+ @return new textblob or nullptr
+ */
+ static sk_sp<SkTextBlob> MakeFromPosText(const void* text, size_t byteLength,
+ const SkPoint pos[], const SkFont& font,
+ SkTextEncoding encoding = SkTextEncoding::kUTF8);
+
+ static sk_sp<SkTextBlob> MakeFromRSXform(const void* text, size_t byteLength,
+ const SkRSXform xform[], const SkFont& font,
+ SkTextEncoding encoding = SkTextEncoding::kUTF8);
+
+ /** Writes data to allow later reconstruction of SkTextBlob. memory points to storage
+ to receive the encoded data, and memory_size describes the size of storage.
+ Returns bytes used if provided storage is large enough to hold all data;
+ otherwise, returns zero.
+
+ procs.fTypefaceProc permits supplying a custom function to encode SkTypeface.
+ If procs.fTypefaceProc is nullptr, default encoding is used. procs.fTypefaceCtx
+ may be used to provide user context to procs.fTypefaceProc; procs.fTypefaceProc
+ is called with a pointer to SkTypeface and user context.
+
+ @param procs custom serial data encoders; may be nullptr
+ @param memory storage for data
+ @param memory_size size of storage
+ @return bytes written, or zero if required storage is larger than memory_size
+
+ example: https://fiddle.skia.org/c/@TextBlob_serialize
+ */
+ size_t serialize(const SkSerialProcs& procs, void* memory, size_t memory_size) const;
+
+ /** Returns storage containing SkData describing SkTextBlob, using optional custom
+ encoders.
+
+ procs.fTypefaceProc permits supplying a custom function to encode SkTypeface.
+ If procs.fTypefaceProc is nullptr, default encoding is used. procs.fTypefaceCtx
+ may be used to provide user context to procs.fTypefaceProc; procs.fTypefaceProc
+ is called with a pointer to SkTypeface and user context.
+
+ @param procs custom serial data encoders; may be nullptr
+ @return storage containing serialized SkTextBlob
+
+ example: https://fiddle.skia.org/c/@TextBlob_serialize_2
+ */
+ sk_sp<SkData> serialize(const SkSerialProcs& procs) const;
+
+ /** Recreates SkTextBlob that was serialized into data. Returns constructed SkTextBlob
+ if successful; otherwise, returns nullptr. Fails if size is smaller than
+ required data length, or if data does not permit constructing valid SkTextBlob.
+
+ procs.fTypefaceProc permits supplying a custom function to decode SkTypeface.
+ If procs.fTypefaceProc is nullptr, default decoding is used. procs.fTypefaceCtx
+ may be used to provide user context to procs.fTypefaceProc; procs.fTypefaceProc
+ is called with a pointer to SkTypeface data, data byte length, and user context.
+
+ @param data pointer for serial data
+ @param size size of data
+ @param procs custom serial data decoders; may be nullptr
+ @return SkTextBlob constructed from data in memory
+ */
+ static sk_sp<SkTextBlob> Deserialize(const void* data, size_t size,
+ const SkDeserialProcs& procs);
+
+ class SK_API Iter {
+ public:
+ struct Run {
+ SkTypeface* fTypeface;
+ int fGlyphCount;
+ const uint16_t* fGlyphIndices;
+#ifdef SK_UNTIL_CRBUG_1187654_IS_FIXED
+ const uint32_t* fClusterIndex_forTest;
+ int fUtf8Size_forTest;
+ const char* fUtf8_forTest;
+#endif
+ };
+
+ Iter(const SkTextBlob&);
+
+ /**
+ * Returns true for each "run" inside the textblob, setting the Run fields (if not null).
+ * If this returns false, there are no more runs, and the Run parameter will be ignored.
+ */
+ bool next(Run*);
+
+ // Experimental, DO NO USE, will change/go-away
+ struct ExperimentalRun {
+ SkFont font;
+ int count;
+ const uint16_t* glyphs;
+ const SkPoint* positions;
+ };
+ bool experimentalNext(ExperimentalRun*);
+
+ private:
+ const RunRecord* fRunRecord;
+ };
+
+private:
+ friend class SkNVRefCnt<SkTextBlob>;
+
+ enum GlyphPositioning : uint8_t;
+
+ explicit SkTextBlob(const SkRect& bounds);
+
+ ~SkTextBlob();
+
+ // Memory for objects of this class is created with sk_malloc rather than operator new and must
+ // be freed with sk_free.
+ void operator delete(void* p);
+ void* operator new(size_t);
+ void* operator new(size_t, void* p);
+
+ static unsigned ScalarsPerGlyph(GlyphPositioning pos);
+
+ // Call when this blob is part of the key to a cache entry. This allows the cache
+ // to know automatically those entries can be purged when this SkTextBlob is deleted.
+ void notifyAddedToCache(uint32_t cacheID) const {
+ fCacheID.store(cacheID);
+ }
+
+ friend class sktext::GlyphRunList;
+ friend class SkTextBlobBuilder;
+ friend class SkTextBlobPriv;
+ friend class SkTextBlobRunIterator;
+
+ const SkRect fBounds;
+ const uint32_t fUniqueID;
+ mutable std::atomic<uint32_t> fCacheID;
+
+ SkDEBUGCODE(size_t fStorageSize;)
+
+ // The actual payload resides in externally-managed storage, following the object.
+ // (see the .cpp for more details)
+
+ using INHERITED = SkRefCnt;
+};
+
+/** \class SkTextBlobBuilder
+ Helper class for constructing SkTextBlob.
+*/
+class SK_API SkTextBlobBuilder {
+public:
+
+ /** Constructs empty SkTextBlobBuilder. By default, SkTextBlobBuilder has no runs.
+
+ @return empty SkTextBlobBuilder
+
+ example: https://fiddle.skia.org/c/@TextBlobBuilder_empty_constructor
+ */
+ SkTextBlobBuilder();
+
+ /** Deletes data allocated internally by SkTextBlobBuilder.
+ */
+ ~SkTextBlobBuilder();
+
+ /** Returns SkTextBlob built from runs of glyphs added by builder. Returned
+ SkTextBlob is immutable; it may be copied, but its contents may not be altered.
+ Returns nullptr if no runs of glyphs were added by builder.
+
+ Resets SkTextBlobBuilder to its initial empty state, allowing it to be
+ reused to build a new set of runs.
+
+ @return SkTextBlob or nullptr
+
+ example: https://fiddle.skia.org/c/@TextBlobBuilder_make
+ */
+ sk_sp<SkTextBlob> make();
+
+ /** \struct SkTextBlobBuilder::RunBuffer
+ RunBuffer supplies storage for glyphs and positions within a run.
+
+ A run is a sequence of glyphs sharing font metrics and positioning.
+ Each run may position its glyphs in one of three ways:
+ by specifying where the first glyph is drawn, and allowing font metrics to
+ determine the advance to subsequent glyphs; by specifying a baseline, and
+ the position on that baseline for each glyph in run; or by providing SkPoint
+ array, one per glyph.
+ */
+ struct RunBuffer {
+ SkGlyphID* glyphs; //!< storage for glyph indexes in run
+ SkScalar* pos; //!< storage for glyph positions in run
+ char* utf8text; //!< storage for text UTF-8 code units in run
+ uint32_t* clusters; //!< storage for glyph clusters (index of UTF-8 code unit)
+
+ // Helpers, since the "pos" field can be different types (always some number of floats).
+ SkPoint* points() const { return reinterpret_cast<SkPoint*>(pos); }
+ SkRSXform* xforms() const { return reinterpret_cast<SkRSXform*>(pos); }
+ };
+
+ /** Returns run with storage for glyphs. Caller must write count glyphs to
+ RunBuffer::glyphs before next call to SkTextBlobBuilder.
+
+ RunBuffer::pos, RunBuffer::utf8text, and RunBuffer::clusters should be ignored.
+
+ Glyphs share metrics in font.
+
+ Glyphs are positioned on a baseline at (x, y), using font metrics to
+ determine their relative placement.
+
+ bounds defines an optional bounding box, used to suppress drawing when SkTextBlob
+ bounds does not intersect SkSurface bounds. If bounds is nullptr, SkTextBlob bounds
+ is computed from (x, y) and RunBuffer::glyphs metrics.
+
+ @param font SkFont used for this run
+ @param count number of glyphs
+ @param x horizontal offset within the blob
+ @param y vertical offset within the blob
+ @param bounds optional run bounding box
+ @return writable glyph buffer
+ */
+ const RunBuffer& allocRun(const SkFont& font, int count, SkScalar x, SkScalar y,
+ const SkRect* bounds = nullptr);
+
+ /** Returns run with storage for glyphs and positions along baseline. Caller must
+ write count glyphs to RunBuffer::glyphs and count scalars to RunBuffer::pos
+ before next call to SkTextBlobBuilder.
+
+ RunBuffer::utf8text and RunBuffer::clusters should be ignored.
+
+ Glyphs share metrics in font.
+
+ Glyphs are positioned on a baseline at y, using x-axis positions written by
+ caller to RunBuffer::pos.
+
+ bounds defines an optional bounding box, used to suppress drawing when SkTextBlob
+ bounds does not intersect SkSurface bounds. If bounds is nullptr, SkTextBlob bounds
+ is computed from y, RunBuffer::pos, and RunBuffer::glyphs metrics.
+
+ @param font SkFont used for this run
+ @param count number of glyphs
+ @param y vertical offset within the blob
+ @param bounds optional run bounding box
+ @return writable glyph buffer and x-axis position buffer
+ */
+ const RunBuffer& allocRunPosH(const SkFont& font, int count, SkScalar y,
+ const SkRect* bounds = nullptr);
+
+ /** Returns run with storage for glyphs and SkPoint positions. Caller must
+ write count glyphs to RunBuffer::glyphs and count SkPoint to RunBuffer::pos
+ before next call to SkTextBlobBuilder.
+
+ RunBuffer::utf8text and RunBuffer::clusters should be ignored.
+
+ Glyphs share metrics in font.
+
+ Glyphs are positioned using SkPoint written by caller to RunBuffer::pos, using
+ two scalar values for each SkPoint.
+
+ bounds defines an optional bounding box, used to suppress drawing when SkTextBlob
+ bounds does not intersect SkSurface bounds. If bounds is nullptr, SkTextBlob bounds
+ is computed from RunBuffer::pos, and RunBuffer::glyphs metrics.
+
+ @param font SkFont used for this run
+ @param count number of glyphs
+ @param bounds optional run bounding box
+ @return writable glyph buffer and SkPoint buffer
+ */
+ const RunBuffer& allocRunPos(const SkFont& font, int count,
+ const SkRect* bounds = nullptr);
+
+ // RunBuffer.pos points to SkRSXform array
+ const RunBuffer& allocRunRSXform(const SkFont& font, int count);
+
+ /** Returns run with storage for glyphs, text, and clusters. Caller must
+ write count glyphs to RunBuffer::glyphs, textByteCount UTF-8 code units
+ into RunBuffer::utf8text, and count monotonic indexes into utf8text
+ into RunBuffer::clusters before next call to SkTextBlobBuilder.
+
+ RunBuffer::pos should be ignored.
+
+ Glyphs share metrics in font.
+
+ Glyphs are positioned on a baseline at (x, y), using font metrics to
+ determine their relative placement.
+
+ bounds defines an optional bounding box, used to suppress drawing when SkTextBlob
+ bounds does not intersect SkSurface bounds. If bounds is nullptr, SkTextBlob bounds
+ is computed from (x, y) and RunBuffer::glyphs metrics.
+
+ @param font SkFont used for this run
+ @param count number of glyphs
+ @param x horizontal offset within the blob
+ @param y vertical offset within the blob
+ @param textByteCount number of UTF-8 code units
+ @param bounds optional run bounding box
+ @return writable glyph buffer, text buffer, and cluster buffer
+ */
+ const RunBuffer& allocRunText(const SkFont& font, int count, SkScalar x, SkScalar y,
+ int textByteCount, const SkRect* bounds = nullptr);
+
+ /** Returns run with storage for glyphs, positions along baseline, text,
+ and clusters. Caller must write count glyphs to RunBuffer::glyphs,
+ count scalars to RunBuffer::pos, textByteCount UTF-8 code units into
+ RunBuffer::utf8text, and count monotonic indexes into utf8text into
+ RunBuffer::clusters before next call to SkTextBlobBuilder.
+
+ Glyphs share metrics in font.
+
+ Glyphs are positioned on a baseline at y, using x-axis positions written by
+ caller to RunBuffer::pos.
+
+ bounds defines an optional bounding box, used to suppress drawing when SkTextBlob
+ bounds does not intersect SkSurface bounds. If bounds is nullptr, SkTextBlob bounds
+ is computed from y, RunBuffer::pos, and RunBuffer::glyphs metrics.
+
+ @param font SkFont used for this run
+ @param count number of glyphs
+ @param y vertical offset within the blob
+ @param textByteCount number of UTF-8 code units
+ @param bounds optional run bounding box
+ @return writable glyph buffer, x-axis position buffer, text buffer, and cluster buffer
+ */
+ const RunBuffer& allocRunTextPosH(const SkFont& font, int count, SkScalar y, int textByteCount,
+ const SkRect* bounds = nullptr);
+
+ /** Returns run with storage for glyphs, SkPoint positions, text, and
+ clusters. Caller must write count glyphs to RunBuffer::glyphs, count
+ SkPoint to RunBuffer::pos, textByteCount UTF-8 code units into
+ RunBuffer::utf8text, and count monotonic indexes into utf8text into
+ RunBuffer::clusters before next call to SkTextBlobBuilder.
+
+ Glyphs share metrics in font.
+
+ Glyphs are positioned using SkPoint written by caller to RunBuffer::pos, using
+ two scalar values for each SkPoint.
+
+ bounds defines an optional bounding box, used to suppress drawing when SkTextBlob
+ bounds does not intersect SkSurface bounds. If bounds is nullptr, SkTextBlob bounds
+ is computed from RunBuffer::pos, and RunBuffer::glyphs metrics.
+
+ @param font SkFont used for this run
+ @param count number of glyphs
+ @param textByteCount number of UTF-8 code units
+ @param bounds optional run bounding box
+ @return writable glyph buffer, SkPoint buffer, text buffer, and cluster buffer
+ */
+ const RunBuffer& allocRunTextPos(const SkFont& font, int count, int textByteCount,
+ const SkRect* bounds = nullptr);
+
+ // RunBuffer.pos points to SkRSXform array
+ const RunBuffer& allocRunTextRSXform(const SkFont& font, int count, int textByteCount,
+ const SkRect* bounds = nullptr);
+
+private:
+ void reserve(size_t size);
+ void allocInternal(const SkFont& font, SkTextBlob::GlyphPositioning positioning,
+ int count, int textBytes, SkPoint offset, const SkRect* bounds);
+ bool mergeRun(const SkFont& font, SkTextBlob::GlyphPositioning positioning,
+ uint32_t count, SkPoint offset);
+ void updateDeferredBounds();
+
+ static SkRect ConservativeRunBounds(const SkTextBlob::RunRecord&);
+ static SkRect TightRunBounds(const SkTextBlob::RunRecord&);
+
+ friend class SkTextBlobPriv;
+ friend class SkTextBlobBuilderPriv;
+
+ skia_private::AutoTMalloc<uint8_t> fStorage;
+ size_t fStorageSize;
+ size_t fStorageUsed;
+
+ SkRect fBounds;
+ int fRunCount;
+ bool fDeferredBounds;
+ size_t fLastRun; // index into fStorage
+
+ RunBuffer fCurrentRunBuffer;
+};
+
+#endif // SkTextBlob_DEFINED
diff --git a/gfx/skia/skia/include/core/SkTextureCompressionType.h b/gfx/skia/skia/include/core/SkTextureCompressionType.h
new file mode 100644
index 0000000000..e9b441378d
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkTextureCompressionType.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTextureCompressionType_DEFINED
+#define SkTextureCompressionType_DEFINED
+/*
+ * Skia | GL_COMPRESSED_* | MTLPixelFormat* | VK_FORMAT_*_BLOCK
+ * --------------------------------------------------------------------------------------
+ * kETC2_RGB8_UNORM | ETC1_RGB8 | ETC2_RGB8 (iOS-only) | ETC2_R8G8B8_UNORM
+ * | RGB8_ETC2 | |
+ * --------------------------------------------------------------------------------------
+ * kBC1_RGB8_UNORM | RGB_S3TC_DXT1_EXT | N/A | BC1_RGB_UNORM
+ * --------------------------------------------------------------------------------------
+ * kBC1_RGBA8_UNORM | RGBA_S3TC_DXT1_EXT | BC1_RGBA (macOS-only)| BC1_RGBA_UNORM
+ */
+enum class SkTextureCompressionType {
+ kNone,
+ kETC2_RGB8_UNORM,
+
+ kBC1_RGB8_UNORM,
+ kBC1_RGBA8_UNORM,
+ kLast = kBC1_RGBA8_UNORM,
+ kETC1_RGB8 = kETC2_RGB8_UNORM,
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkTileMode.h b/gfx/skia/skia/include/core/SkTileMode.h
new file mode 100644
index 0000000000..8a9d020958
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkTileMode.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTileModes_DEFINED
+#define SkTileModes_DEFINED
+
+#include "include/core/SkTypes.h"
+
+enum class SkTileMode {
+ /**
+ * Replicate the edge color if the shader draws outside of its
+ * original bounds.
+ */
+ kClamp,
+
+ /**
+ * Repeat the shader's image horizontally and vertically.
+ */
+ kRepeat,
+
+ /**
+ * Repeat the shader's image horizontally and vertically, alternating
+ * mirror images so that adjacent images always seam.
+ */
+ kMirror,
+
+ /**
+ * Only draw within the original domain, return transparent-black everywhere else.
+ */
+ kDecal,
+
+ kLastTileMode = kDecal,
+};
+
+static constexpr int kSkTileModeCount = static_cast<int>(SkTileMode::kLastTileMode) + 1;
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkTime.h b/gfx/skia/skia/include/core/SkTime.h
new file mode 100644
index 0000000000..9135c7e113
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkTime.h
@@ -0,0 +1,63 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkTime_DEFINED
+#define SkTime_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkMacros.h"
+
+#include <cinttypes>
+
+class SkString;
+
+/** \class SkTime
+ Platform-implemented utilities to return time of day, and millisecond counter.
+*/
+class SK_API SkTime {
+public:
+ struct DateTime {
+ int16_t fTimeZoneMinutes; // The number of minutes that GetDateTime()
+ // is ahead of or behind UTC.
+ uint16_t fYear; //!< e.g. 2005
+ uint8_t fMonth; //!< 1..12
+ uint8_t fDayOfWeek; //!< 0..6, 0==Sunday
+ uint8_t fDay; //!< 1..31
+ uint8_t fHour; //!< 0..23
+ uint8_t fMinute; //!< 0..59
+ uint8_t fSecond; //!< 0..59
+
+ void toISO8601(SkString* dst) const;
+ };
+ static void GetDateTime(DateTime*);
+
+ static double GetSecs() { return GetNSecs() * 1e-9; }
+ static double GetMSecs() { return GetNSecs() * 1e-6; }
+ static double GetNSecs();
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkAutoTime {
+public:
+ // The label is not deep-copied, so its address must remain valid for the
+ // lifetime of this object
+ SkAutoTime(const char* label = nullptr)
+ : fLabel(label)
+ , fNow(SkTime::GetMSecs()) {}
+ ~SkAutoTime() {
+ uint64_t dur = static_cast<uint64_t>(SkTime::GetMSecs() - fNow);
+ SkDebugf("%s %" PRIu64 "\n", fLabel ? fLabel : "", dur);
+ }
+private:
+ const char* fLabel;
+ double fNow;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkTraceMemoryDump.h b/gfx/skia/skia/include/core/SkTraceMemoryDump.h
new file mode 100644
index 0000000000..7837bfbd89
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkTraceMemoryDump.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTraceMemoryDump_DEFINED
+#define SkTraceMemoryDump_DEFINED
+
+#include "include/core/SkTypes.h"
+
+class SkDiscardableMemory;
+
+/**
+ * Interface for memory tracing.
+ * This interface is meant to be passed as argument to the memory dump methods of Skia objects.
+ * The implementation of this interface is provided by the embedder.
+ */
+class SK_API SkTraceMemoryDump {
+public:
+ /**
+ * Enum to specify the level of the requested details for the dump from the Skia objects.
+ */
+ enum LevelOfDetail {
+ // Dump only the minimal details to get the total memory usage (Usually just the totals).
+ kLight_LevelOfDetail,
+
+ // Dump the detailed breakdown of the objects in the caches.
+ kObjectsBreakdowns_LevelOfDetail
+ };
+
+ /**
+ * Appends a new memory dump (i.e. a row) to the trace memory infrastructure.
+ * If dumpName does not exist yet, a new one is created. Otherwise, a new column is appended to
+ * the previously created dump.
+ * Arguments:
+ * dumpName: an absolute, slash-separated, name for the item being dumped
+ * e.g., "skia/CacheX/EntryY".
+ * valueName: a string indicating the name of the column.
+ * e.g., "size", "active_size", "number_of_objects".
+ * This string is supposed to be long lived and is NOT copied.
+ * units: a string indicating the units for the value.
+ * e.g., "bytes", "objects".
+ * This string is supposed to be long lived and is NOT copied.
+ * value: the actual value being dumped.
+ */
+ virtual void dumpNumericValue(const char* dumpName,
+ const char* valueName,
+ const char* units,
+ uint64_t value) = 0;
+
+ virtual void dumpStringValue(const char* /*dumpName*/,
+ const char* /*valueName*/,
+ const char* /*value*/) { }
+
+ /**
+ * Sets the memory backing for an existing dump.
+ * backingType and backingObjectId are used by the embedder to associate the memory dumped via
+ * dumpNumericValue with the corresponding dump that backs the memory.
+ */
+ virtual void setMemoryBacking(const char* dumpName,
+ const char* backingType,
+ const char* backingObjectId) = 0;
+
+ /**
+ * Specialization for memory backed by discardable memory.
+ */
+ virtual void setDiscardableMemoryBacking(
+ const char* dumpName,
+ const SkDiscardableMemory& discardableMemoryObject) = 0;
+
+ /**
+ * Returns the type of details requested in the dump. The granularity of the dump is supposed to
+ * match the LevelOfDetail argument. The level of detail must not affect the total size
+ * reported, but only granularity of the child entries.
+ */
+ virtual LevelOfDetail getRequestedDetails() const = 0;
+
+ /**
+ * Returns true if we should dump wrapped objects. Wrapped objects come from outside Skia, and
+ * may be independently tracked there.
+ */
+ virtual bool shouldDumpWrappedObjects() const { return true; }
+
+ /**
+ * If shouldDumpWrappedObjects() returns true then this function will be called to populate
+ * the output with information on whether the item being dumped is a wrapped object.
+ */
+ virtual void dumpWrappedState(const char* /*dumpName*/, bool /*isWrappedObject*/) {}
+
+protected:
+ virtual ~SkTraceMemoryDump() = default;
+ SkTraceMemoryDump() = default;
+ SkTraceMemoryDump(const SkTraceMemoryDump&) = delete;
+ SkTraceMemoryDump& operator=(const SkTraceMemoryDump&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkTypeface.h b/gfx/skia/skia/include/core/SkTypeface.h
new file mode 100644
index 0000000000..e06b9bfa8b
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkTypeface.h
@@ -0,0 +1,483 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTypeface_DEFINED
+#define SkTypeface_DEFINED
+
+#include "include/core/SkFontArguments.h"
+#include "include/core/SkFontParameters.h"
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkFontTypes.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkString.h"
+#include "include/private/SkWeakRefCnt.h"
+#include "include/private/base/SkOnce.h"
+
+class SkData;
+class SkDescriptor;
+class SkFontData;
+class SkFontDescriptor;
+class SkScalerContext;
+class SkStream;
+class SkStreamAsset;
+class SkWStream;
+struct SkAdvancedTypefaceMetrics;
+struct SkScalerContextEffects;
+struct SkScalerContextRec;
+
+using SkTypefaceID = uint32_t;
+
+// SkFontID is deprecated, please use SkTypefaceID.
+using SkFontID = SkTypefaceID;
+
+
+/** Machine endian. */
+typedef uint32_t SkFontTableTag;
+
+/** \class SkTypeface
+
+ The SkTypeface class specifies the typeface and intrinsic style of a font.
+ This is used in the paint, along with optionally algorithmic settings like
+ textSize, textSkewX, textScaleX, kFakeBoldText_Mask, to specify
+ how text appears when drawn (and measured).
+
+ Typeface objects are immutable, and so they can be shared between threads.
+*/
+class SK_API SkTypeface : public SkWeakRefCnt {
+public:
+ /** Returns the typeface's intrinsic style attributes. */
+ SkFontStyle fontStyle() const {
+ return fStyle;
+ }
+
+ /** Returns true if style() has the kBold bit set. */
+ bool isBold() const { return fStyle.weight() >= SkFontStyle::kSemiBold_Weight; }
+
+ /** Returns true if style() has the kItalic bit set. */
+ bool isItalic() const { return fStyle.slant() != SkFontStyle::kUpright_Slant; }
+
+ /** Returns true if the typeface claims to be fixed-pitch.
+ * This is a style bit, advance widths may vary even if this returns true.
+ */
+ bool isFixedPitch() const { return fIsFixedPitch; }
+
+ /** Copy into 'coordinates' (allocated by the caller) the design variation coordinates.
+ *
+ * @param coordinates the buffer into which to write the design variation coordinates.
+ * @param coordinateCount the number of entries available through 'coordinates'.
+ *
+ * @return The number of axes, or -1 if there is an error.
+ * If 'coordinates != nullptr' and 'coordinateCount >= numAxes' then 'coordinates' will be
+ * filled with the variation coordinates describing the position of this typeface in design
+ * variation space. It is possible the number of axes can be retrieved but actual position
+ * cannot.
+ */
+ int getVariationDesignPosition(SkFontArguments::VariationPosition::Coordinate coordinates[],
+ int coordinateCount) const;
+
+ /** Copy into 'parameters' (allocated by the caller) the design variation parameters.
+ *
+ * @param parameters the buffer into which to write the design variation parameters.
+ * @param coordinateCount the number of entries available through 'parameters'.
+ *
+ * @return The number of axes, or -1 if there is an error.
+ * If 'parameters != nullptr' and 'parameterCount >= numAxes' then 'parameters' will be
+ * filled with the variation parameters describing the position of this typeface in design
+ * variation space. It is possible the number of axes can be retrieved but actual parameters
+ * cannot.
+ */
+ int getVariationDesignParameters(SkFontParameters::Variation::Axis parameters[],
+ int parameterCount) const;
+
+ /** Return a 32bit value for this typeface, unique for the underlying font
+ data. Will never return 0.
+ */
+ SkTypefaceID uniqueID() const { return fUniqueID; }
+
+ /** Return the uniqueID for the specified typeface. If the face is null,
+ resolve it to the default font and return its uniqueID. Will never
+ return 0.
+ */
+ static SkTypefaceID UniqueID(const SkTypeface* face);
+
+ /** Returns true if the two typefaces reference the same underlying font,
+ handling either being null (treating null as the default font)
+ */
+ static bool Equal(const SkTypeface* facea, const SkTypeface* faceb);
+
+ /** Returns the default normal typeface, which is never nullptr. */
+ static sk_sp<SkTypeface> MakeDefault();
+
+ /** Creates a new reference to the typeface that most closely matches the
+ requested familyName and fontStyle. This method allows extended font
+ face specifiers as in the SkFontStyle type. Will never return null.
+
+ @param familyName May be NULL. The name of the font family.
+ @param fontStyle The style of the typeface.
+ @return reference to the closest-matching typeface. Call must call
+ unref() when they are done.
+ */
+ static sk_sp<SkTypeface> MakeFromName(const char familyName[], SkFontStyle fontStyle);
+
+ /** Return a new typeface given a file. If the file does not exist, or is
+ not a valid font file, returns nullptr.
+ */
+ static sk_sp<SkTypeface> MakeFromFile(const char path[], int index = 0);
+
+ /** Return a new typeface given a stream. If the stream is
+ not a valid font file, returns nullptr. Ownership of the stream is
+ transferred, so the caller must not reference it again.
+ */
+ static sk_sp<SkTypeface> MakeFromStream(std::unique_ptr<SkStreamAsset> stream, int index = 0);
+
+ /** Return a new typeface given a SkData. If the data is null, or is not a valid font file,
+ * returns nullptr.
+ */
+ static sk_sp<SkTypeface> MakeFromData(sk_sp<SkData>, int index = 0);
+
+ /** Return a new typeface based on this typeface but parameterized as specified in the
+ SkFontArguments. If the SkFontArguments does not supply an argument for a parameter
+ in the font then the value from this typeface will be used as the value for that
+ argument. If the cloned typeface would be exaclty the same as this typeface then
+ this typeface may be ref'ed and returned. May return nullptr on failure.
+ */
+ sk_sp<SkTypeface> makeClone(const SkFontArguments&) const;
+
+ /**
+ * A typeface can serialize just a descriptor (names, etc.), or it can also include the
+ * actual font data (which can be large). This enum controls how serialize() decides what
+ * to serialize.
+ */
+ enum class SerializeBehavior {
+ kDoIncludeData,
+ kDontIncludeData,
+ kIncludeDataIfLocal,
+ };
+
+ /** Write a unique signature to a stream, sufficient to reconstruct a
+ typeface referencing the same font when Deserialize is called.
+ */
+ void serialize(SkWStream*, SerializeBehavior = SerializeBehavior::kIncludeDataIfLocal) const;
+
+ /**
+ * Same as serialize(SkWStream*, ...) but returns the serialized data in SkData, instead of
+ * writing it to a stream.
+ */
+ sk_sp<SkData> serialize(SerializeBehavior = SerializeBehavior::kIncludeDataIfLocal) const;
+
+ /** Given the data previously written by serialize(), return a new instance
+ of a typeface referring to the same font. If that font is not available,
+ return nullptr.
+ Does not affect ownership of SkStream.
+ */
+ static sk_sp<SkTypeface> MakeDeserialize(SkStream*);
+
+ /**
+ * Given an array of UTF32 character codes, return their corresponding glyph IDs.
+ *
+ * @param chars pointer to the array of UTF32 chars
+ * @param number of chars and glyphs
+ * @param glyphs returns the corresponding glyph IDs for each character.
+ */
+ void unicharsToGlyphs(const SkUnichar uni[], int count, SkGlyphID glyphs[]) const;
+
+ int textToGlyphs(const void* text, size_t byteLength, SkTextEncoding encoding,
+ SkGlyphID glyphs[], int maxGlyphCount) const;
+
+ /**
+ * Return the glyphID that corresponds to the specified unicode code-point
+ * (in UTF32 encoding). If the unichar is not supported, returns 0.
+ *
+ * This is a short-cut for calling unicharsToGlyphs().
+ */
+ SkGlyphID unicharToGlyph(SkUnichar unichar) const;
+
+ /**
+ * Return the number of glyphs in the typeface.
+ */
+ int countGlyphs() const;
+
+ // Table getters -- may fail if the underlying font format is not organized
+ // as 4-byte tables.
+
+ /** Return the number of tables in the font. */
+ int countTables() const;
+
+ /** Copy into tags[] (allocated by the caller) the list of table tags in
+ * the font, and return the number. This will be the same as CountTables()
+ * or 0 if an error occured. If tags == NULL, this only returns the count
+ * (the same as calling countTables()).
+ */
+ int getTableTags(SkFontTableTag tags[]) const;
+
+ /** Given a table tag, return the size of its contents, or 0 if not present
+ */
+ size_t getTableSize(SkFontTableTag) const;
+
+ /** Copy the contents of a table into data (allocated by the caller). Note
+ * that the contents of the table will be in their native endian order
+ * (which for most truetype tables is big endian). If the table tag is
+ * not found, or there is an error copying the data, then 0 is returned.
+ * If this happens, it is possible that some or all of the memory pointed
+ * to by data may have been written to, even though an error has occured.
+ *
+ * @param tag The table tag whose contents are to be copied
+ * @param offset The offset in bytes into the table's contents where the
+ * copy should start from.
+ * @param length The number of bytes, starting at offset, of table data
+ * to copy.
+ * @param data storage address where the table contents are copied to
+ * @return the number of bytes actually copied into data. If offset+length
+ * exceeds the table's size, then only the bytes up to the table's
+ * size are actually copied, and this is the value returned. If
+ * offset > the table's size, or tag is not a valid table,
+ * then 0 is returned.
+ */
+ size_t getTableData(SkFontTableTag tag, size_t offset, size_t length,
+ void* data) const;
+
+ /**
+ * Return an immutable copy of the requested font table, or nullptr if that table was
+ * not found. This can sometimes be faster than calling getTableData() twice: once to find
+ * the length, and then again to copy the data.
+ *
+ * @param tag The table tag whose contents are to be copied
+ * @return an immutable copy of the table's data, or nullptr.
+ */
+ sk_sp<SkData> copyTableData(SkFontTableTag tag) const;
+
+ /**
+ * Return the units-per-em value for this typeface, or zero if there is an
+ * error.
+ */
+ int getUnitsPerEm() const;
+
+ /**
+ * Given a run of glyphs, return the associated horizontal adjustments.
+ * Adjustments are in "design units", which are integers relative to the
+ * typeface's units per em (see getUnitsPerEm).
+ *
+ * Some typefaces are known to never support kerning. Calling this method
+ * with all zeros (e.g. getKerningPairAdustments(NULL, 0, NULL)) returns
+ * a boolean indicating if the typeface might support kerning. If it
+ * returns false, then it will always return false (no kerning) for all
+ * possible glyph runs. If it returns true, then it *may* return true for
+ * somne glyph runs.
+ *
+ * If count is non-zero, then the glyphs parameter must point to at least
+ * [count] valid glyph IDs, and the adjustments parameter must be
+ * sized to at least [count - 1] entries. If the method returns true, then
+ * [count-1] entries in the adjustments array will be set. If the method
+ * returns false, then no kerning should be applied, and the adjustments
+ * array will be in an undefined state (possibly some values may have been
+ * written, but none of them should be interpreted as valid values).
+ */
+ bool getKerningPairAdjustments(const SkGlyphID glyphs[], int count,
+ int32_t adjustments[]) const;
+
+ struct LocalizedString {
+ SkString fString;
+ SkString fLanguage;
+ };
+ class LocalizedStrings {
+ public:
+ LocalizedStrings() = default;
+ virtual ~LocalizedStrings() { }
+ virtual bool next(LocalizedString* localizedString) = 0;
+ void unref() { delete this; }
+
+ private:
+ LocalizedStrings(const LocalizedStrings&) = delete;
+ LocalizedStrings& operator=(const LocalizedStrings&) = delete;
+ };
+ /**
+ * Returns an iterator which will attempt to enumerate all of the
+ * family names specified by the font.
+ * It is the caller's responsibility to unref() the returned pointer.
+ */
+ LocalizedStrings* createFamilyNameIterator() const;
+
+ /**
+ * Return the family name for this typeface. It will always be returned
+ * encoded as UTF8, but the language of the name is whatever the host
+ * platform chooses.
+ */
+ void getFamilyName(SkString* name) const;
+
+ /**
+ * Return the PostScript name for this typeface.
+ * Value may change based on variation parameters.
+ * Returns false if no PostScript name is available.
+ */
+ bool getPostScriptName(SkString* name) const;
+
+ /**
+ * Return a stream for the contents of the font data, or NULL on failure.
+ * If ttcIndex is not null, it is set to the TrueTypeCollection index
+ * of this typeface within the stream, or 0 if the stream is not a
+ * collection.
+ * The caller is responsible for deleting the stream.
+ */
+ std::unique_ptr<SkStreamAsset> openStream(int* ttcIndex) const;
+
+ /**
+ * Return a stream for the contents of the font data.
+ * Returns nullptr on failure or if the font data isn't already available in stream form.
+ * Use when the stream can be used opportunistically but the calling code would prefer
+ * to fall back to table access if creating the stream would be expensive.
+ * Otherwise acts the same as openStream.
+ */
+ std::unique_ptr<SkStreamAsset> openExistingStream(int* ttcIndex) const;
+
+ /**
+ * Return a scalercontext for the given descriptor. It may return a
+ * stub scalercontext that will not crash, but will draw nothing.
+ */
+ std::unique_ptr<SkScalerContext> createScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor*) const;
+
+ /**
+ * Return a rectangle (scaled to 1-pt) that represents the union of the bounds of all
+ * of the glyphs, but each one positioned at (0,). This may be conservatively large, and
+ * will not take into account any hinting or other size-specific adjustments.
+ */
+ SkRect getBounds() const;
+
+ /***
+ * Returns whether this typeface has color glyphs and therefore cannot be
+ * rendered as a path. e.g. Emojis.
+ */
+ virtual bool hasColorGlyphs() const { return false; }
+
+ // PRIVATE / EXPERIMENTAL -- do not call
+ void filterRec(SkScalerContextRec* rec) const {
+ this->onFilterRec(rec);
+ }
+ // PRIVATE / EXPERIMENTAL -- do not call
+ void getFontDescriptor(SkFontDescriptor* desc, bool* isLocal) const {
+ this->onGetFontDescriptor(desc, isLocal);
+ }
+ // PRIVATE / EXPERIMENTAL -- do not call
+ void* internal_private_getCTFontRef() const {
+ return this->onGetCTFontRef();
+ }
+
+ /* Skia reserves all tags that begin with a lower case letter and 0 */
+ using FactoryId = SkFourByteTag;
+ static void Register(
+ FactoryId id,
+ sk_sp<SkTypeface> (*make)(std::unique_ptr<SkStreamAsset>, const SkFontArguments&));
+
+protected:
+ explicit SkTypeface(const SkFontStyle& style, bool isFixedPitch = false);
+ ~SkTypeface() override;
+
+ virtual sk_sp<SkTypeface> onMakeClone(const SkFontArguments&) const = 0;
+
+ /** Sets the fixedPitch bit. If used, must be called in the constructor. */
+ void setIsFixedPitch(bool isFixedPitch) { fIsFixedPitch = isFixedPitch; }
+ /** Sets the font style. If used, must be called in the constructor. */
+ void setFontStyle(SkFontStyle style) { fStyle = style; }
+
+ // Must return a valid scaler context. It can not return nullptr.
+ virtual std::unique_ptr<SkScalerContext> onCreateScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor*) const = 0;
+ virtual void onFilterRec(SkScalerContextRec*) const = 0;
+ friend class SkScalerContext; // onFilterRec
+
+ // Subclasses *must* override this method to work with the PDF backend.
+ virtual std::unique_ptr<SkAdvancedTypefaceMetrics> onGetAdvancedMetrics() const = 0;
+ // For type1 postscript fonts only, set the glyph names for each glyph.
+ // destination array is non-null, and points to an array of size this->countGlyphs().
+ // Backends that do not suport type1 fonts should not override.
+ virtual void getPostScriptGlyphNames(SkString*) const = 0;
+
+ // The mapping from glyph to Unicode; array indices are glyph ids.
+ // For each glyph, give the default Unicode value, if it exists.
+ // dstArray is non-null, and points to an array of size this->countGlyphs().
+ virtual void getGlyphToUnicodeMap(SkUnichar* dstArray) const = 0;
+
+ virtual std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const = 0;
+
+ virtual std::unique_ptr<SkStreamAsset> onOpenExistingStream(int* ttcIndex) const;
+
+ virtual bool onGlyphMaskNeedsCurrentColor() const = 0;
+
+ virtual int onGetVariationDesignPosition(
+ SkFontArguments::VariationPosition::Coordinate coordinates[],
+ int coordinateCount) const = 0;
+
+ virtual int onGetVariationDesignParameters(
+ SkFontParameters::Variation::Axis parameters[], int parameterCount) const = 0;
+
+ virtual void onGetFontDescriptor(SkFontDescriptor*, bool* isLocal) const = 0;
+
+ virtual void onCharsToGlyphs(const SkUnichar* chars, int count, SkGlyphID glyphs[]) const = 0;
+ virtual int onCountGlyphs() const = 0;
+
+ virtual int onGetUPEM() const = 0;
+ virtual bool onGetKerningPairAdjustments(const SkGlyphID glyphs[], int count,
+ int32_t adjustments[]) const;
+
+ /** Returns the family name of the typeface as known by its font manager.
+ * This name may or may not be produced by the family name iterator.
+ */
+ virtual void onGetFamilyName(SkString* familyName) const = 0;
+ virtual bool onGetPostScriptName(SkString*) const = 0;
+
+ /** Returns an iterator over the family names in the font. */
+ virtual LocalizedStrings* onCreateFamilyNameIterator() const = 0;
+
+ virtual int onGetTableTags(SkFontTableTag tags[]) const = 0;
+ virtual size_t onGetTableData(SkFontTableTag, size_t offset,
+ size_t length, void* data) const = 0;
+ virtual sk_sp<SkData> onCopyTableData(SkFontTableTag) const;
+
+ virtual bool onComputeBounds(SkRect*) const;
+
+ virtual void* onGetCTFontRef() const { return nullptr; }
+
+private:
+ /** Returns true if the typeface's glyph masks may refer to the foreground
+ * paint foreground color. This is needed to determine caching requirements. Usually true for
+ * typefaces that contain a COLR table.
+ */
+ bool glyphMaskNeedsCurrentColor() const;
+ friend class SkStrikeServerImpl; // glyphMaskNeedsCurrentColor
+ friend class SkTypefaceProxyPrototype; // glyphMaskNeedsCurrentColor
+
+ /** Retrieve detailed typeface metrics. Used by the PDF backend. */
+ std::unique_ptr<SkAdvancedTypefaceMetrics> getAdvancedMetrics() const;
+ friend class SkRandomTypeface; // getAdvancedMetrics
+ friend class SkPDFFont; // getAdvancedMetrics
+
+ /** Style specifies the intrinsic style attributes of a given typeface */
+ enum Style {
+ kNormal = 0,
+ kBold = 0x01,
+ kItalic = 0x02,
+
+ // helpers
+ kBoldItalic = 0x03
+ };
+ static SkFontStyle FromOldStyle(Style oldStyle);
+ static SkTypeface* GetDefaultTypeface(Style style = SkTypeface::kNormal);
+
+ friend class SkFontPriv; // GetDefaultTypeface
+ friend class SkPaintPriv; // GetDefaultTypeface
+ friend class SkFont; // getGlyphToUnicodeMap
+
+private:
+ SkTypefaceID fUniqueID;
+ SkFontStyle fStyle;
+ mutable SkRect fBounds;
+ mutable SkOnce fBoundsOnce;
+ bool fIsFixedPitch;
+
+ using INHERITED = SkWeakRefCnt;
+};
+#endif
diff --git a/gfx/skia/skia/include/core/SkTypes.h b/gfx/skia/skia/include/core/SkTypes.h
new file mode 100644
index 0000000000..5530cc4463
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkTypes.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTypes_DEFINED
+#define SkTypes_DEFINED
+
+// All of these files should be independent of things users can set via the user config file.
+// They should also be able to be included in any order.
+// IWYU pragma: begin_exports
+#include "include/private/base/SkFeatures.h"
+
+// Load and verify defines from the user config file.
+#include "include/private/base/SkLoadUserConfig.h"
+
+// Any includes or defines below can be configured by the user config file.
+#include "include/private/base/SkAPI.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkAttributes.h"
+#include "include/private/base/SkDebug.h"
+// IWYU pragma: end_exports
+
+#include <climits>
+#include <cstdint>
+
+#if defined(SK_GANESH) || defined(SK_GRAPHITE)
+# if !defined(SK_ENABLE_SKSL)
+# define SK_ENABLE_SKSL
+# endif
+#else
+# undef SK_GL
+# undef SK_VULKAN
+# undef SK_METAL
+# undef SK_DAWN
+# undef SK_DIRECT3D
+#endif
+
+// If SK_R32_SHIFT is set, we'll use that to choose RGBA or BGRA.
+// If not, we'll default to RGBA everywhere except BGRA on Windows.
+#if defined(SK_R32_SHIFT)
+ static_assert(SK_R32_SHIFT == 0 || SK_R32_SHIFT == 16, "");
+#elif defined(SK_BUILD_FOR_WIN)
+ #define SK_R32_SHIFT 16
+#else
+ #define SK_R32_SHIFT 0
+#endif
+
+#if defined(SK_B32_SHIFT)
+ static_assert(SK_B32_SHIFT == (16-SK_R32_SHIFT), "");
+#else
+ #define SK_B32_SHIFT (16-SK_R32_SHIFT)
+#endif
+
+#define SK_G32_SHIFT 8
+#define SK_A32_SHIFT 24
+
+/**
+ * SK_PMCOLOR_BYTE_ORDER can be used to query the byte order of SkPMColor at compile time.
+ */
+#ifdef SK_CPU_BENDIAN
+# define SK_PMCOLOR_BYTE_ORDER(C0, C1, C2, C3) \
+ (SK_ ## C3 ## 32_SHIFT == 0 && \
+ SK_ ## C2 ## 32_SHIFT == 8 && \
+ SK_ ## C1 ## 32_SHIFT == 16 && \
+ SK_ ## C0 ## 32_SHIFT == 24)
+#else
+# define SK_PMCOLOR_BYTE_ORDER(C0, C1, C2, C3) \
+ (SK_ ## C0 ## 32_SHIFT == 0 && \
+ SK_ ## C1 ## 32_SHIFT == 8 && \
+ SK_ ## C2 ## 32_SHIFT == 16 && \
+ SK_ ## C3 ## 32_SHIFT == 24)
+#endif
+
+#if defined SK_DEBUG && defined SK_BUILD_FOR_WIN
+ #ifdef free
+ #undef free
+ #endif
+ #include <crtdbg.h>
+ #undef free
+#endif
+
+#ifndef SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
+ #define SK_ALLOW_STATIC_GLOBAL_INITIALIZERS 0
+#endif
+
+#if !defined(SK_GAMMA_EXPONENT)
+ #define SK_GAMMA_EXPONENT (0.0f) // SRGB
+#endif
+
+#ifndef GR_TEST_UTILS
+# define GR_TEST_UTILS 0
+#endif
+
+
+#if defined(SK_HISTOGRAM_ENUMERATION) || \
+ defined(SK_HISTOGRAM_BOOLEAN) || \
+ defined(SK_HISTOGRAM_EXACT_LINEAR) || \
+ defined(SK_HISTOGRAM_MEMORY_KB)
+# define SK_HISTOGRAMS_ENABLED 1
+#else
+# define SK_HISTOGRAMS_ENABLED 0
+#endif
+
+#ifndef SK_HISTOGRAM_BOOLEAN
+# define SK_HISTOGRAM_BOOLEAN(name, sample)
+#endif
+
+#ifndef SK_HISTOGRAM_ENUMERATION
+# define SK_HISTOGRAM_ENUMERATION(name, sample, enum_size)
+#endif
+
+#ifndef SK_HISTOGRAM_EXACT_LINEAR
+# define SK_HISTOGRAM_EXACT_LINEAR(name, sample, value_max)
+#endif
+
+#ifndef SK_HISTOGRAM_MEMORY_KB
+# define SK_HISTOGRAM_MEMORY_KB(name, sample)
+#endif
+
+#define SK_HISTOGRAM_PERCENTAGE(name, percent_as_int) \
+ SK_HISTOGRAM_EXACT_LINEAR(name, percent_as_int, 101)
+
+// The top-level define SK_ENABLE_OPTIMIZE_SIZE can be used to remove several large features at once
+#if defined(SK_ENABLE_OPTIMIZE_SIZE)
+# define SK_FORCE_RASTER_PIPELINE_BLITTER
+# define SK_DISABLE_SDF_TEXT
+#endif
+
+#ifndef SK_DISABLE_LEGACY_SHADERCONTEXT
+# define SK_ENABLE_LEGACY_SHADERCONTEXT
+#endif
+
+#if defined(SK_BUILD_FOR_LIBFUZZER) || defined(SK_BUILD_FOR_AFL_FUZZ)
+#if !defined(SK_BUILD_FOR_FUZZER)
+ #define SK_BUILD_FOR_FUZZER
+#endif
+#endif
+
+/**
+ * Gr defines are set to 0 or 1, rather than being undefined or defined
+ */
+
+#if !defined(GR_CACHE_STATS)
+ #if defined(SK_DEBUG) || defined(SK_DUMP_STATS)
+ #define GR_CACHE_STATS 1
+ #else
+ #define GR_CACHE_STATS 0
+ #endif
+#endif
+
+#if !defined(GR_GPU_STATS)
+ #if defined(SK_DEBUG) || defined(SK_DUMP_STATS) || GR_TEST_UTILS
+ #define GR_GPU_STATS 1
+ #else
+ #define GR_GPU_STATS 0
+ #endif
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+
+typedef uint32_t SkFourByteTag;
+static inline constexpr SkFourByteTag SkSetFourByteTag(char a, char b, char c, char d) {
+ return (((uint32_t)a << 24) | ((uint32_t)b << 16) | ((uint32_t)c << 8) | (uint32_t)d);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+/** 32 bit integer to hold a unicode value
+*/
+typedef int32_t SkUnichar;
+
+/** 16 bit unsigned integer to hold a glyph index
+*/
+typedef uint16_t SkGlyphID;
+
+/** 32 bit value to hold a millisecond duration
+ Note that SK_MSecMax is about 25 days.
+*/
+typedef uint32_t SkMSec;
+
+/** Maximum representable milliseconds; 24d 20h 31m 23.647s.
+*/
+static constexpr SkMSec SK_MSecMax = INT32_MAX;
+
+/** The generation IDs in Skia reserve 0 has an invalid marker.
+*/
+static constexpr uint32_t SK_InvalidGenID = 0;
+
+/** The unique IDs in Skia reserve 0 has an invalid marker.
+*/
+static constexpr uint32_t SK_InvalidUniqueID = 0;
+
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkUnPreMultiply.h b/gfx/skia/skia/include/core/SkUnPreMultiply.h
new file mode 100644
index 0000000000..b492619d07
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkUnPreMultiply.h
@@ -0,0 +1,56 @@
+
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+
+
+#ifndef SkUnPreMultiply_DEFINED
+#define SkUnPreMultiply_DEFINED
+
+#include "include/core/SkColor.h"
+
+class SK_API SkUnPreMultiply {
+public:
+ typedef uint32_t Scale;
+
+ // index this table with alpha [0..255]
+ static const Scale* GetScaleTable() {
+ return gTable;
+ }
+
+ static Scale GetScale(U8CPU alpha) {
+ SkASSERT(alpha <= 255);
+ return gTable[alpha];
+ }
+
+ /** Usage:
+
+ const Scale* table = SkUnPreMultiply::GetScaleTable();
+
+ for (...) {
+ unsigned a = ...
+ SkUnPreMultiply::Scale scale = table[a];
+
+ red = SkUnPreMultiply::ApplyScale(scale, red);
+ ...
+ // now red is unpremultiplied
+ }
+ */
+ static U8CPU ApplyScale(Scale scale, U8CPU component) {
+ SkASSERT(component <= 255);
+ return (scale * component + (1 << 23)) >> 24;
+ }
+
+ static SkColor PMColorToColor(SkPMColor c);
+
+private:
+ static const uint32_t gTable[256];
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkVertices.h b/gfx/skia/skia/include/core/SkVertices.h
new file mode 100644
index 0000000000..2c3f784a42
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkVertices.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkVertices_DEFINED
+#define SkVertices_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+
+#include <memory>
+
+class SkData;
+struct SkPoint;
+class SkVerticesPriv;
+
+/**
+ * An immutable set of vertex data that can be used with SkCanvas::drawVertices.
+ */
+class SK_API SkVertices : public SkNVRefCnt<SkVertices> {
+ struct Desc;
+ struct Sizes;
+public:
+ enum VertexMode {
+ kTriangles_VertexMode,
+ kTriangleStrip_VertexMode,
+ kTriangleFan_VertexMode,
+
+ kLast_VertexMode = kTriangleFan_VertexMode,
+ };
+
+ /**
+ * Create a vertices by copying the specified arrays. texs, colors may be nullptr,
+ * and indices is ignored if indexCount == 0.
+ */
+ static sk_sp<SkVertices> MakeCopy(VertexMode mode, int vertexCount,
+ const SkPoint positions[],
+ const SkPoint texs[],
+ const SkColor colors[],
+ int indexCount,
+ const uint16_t indices[]);
+
+ static sk_sp<SkVertices> MakeCopy(VertexMode mode, int vertexCount,
+ const SkPoint positions[],
+ const SkPoint texs[],
+ const SkColor colors[]) {
+ return MakeCopy(mode,
+ vertexCount,
+ positions,
+ texs,
+ colors,
+ 0,
+ nullptr);
+ }
+
+ enum BuilderFlags {
+ kHasTexCoords_BuilderFlag = 1 << 0,
+ kHasColors_BuilderFlag = 1 << 1,
+ };
+ class Builder {
+ public:
+ Builder(VertexMode mode, int vertexCount, int indexCount, uint32_t flags);
+
+ bool isValid() const { return fVertices != nullptr; }
+
+ SkPoint* positions();
+ uint16_t* indices(); // returns null if there are no indices
+
+ // If we have custom attributes, these will always be null
+ SkPoint* texCoords(); // returns null if there are no texCoords
+ SkColor* colors(); // returns null if there are no colors
+
+ // Detach the built vertices object. After the first call, this will always return null.
+ sk_sp<SkVertices> detach();
+
+ private:
+ Builder(const Desc&);
+
+ void init(const Desc&);
+
+ // holds a partially complete object. only completed in detach()
+ sk_sp<SkVertices> fVertices;
+ // Extra storage for intermediate vertices in the case where the client specifies indexed
+ // triangle fans. These get converted to indexed triangles when the Builder is finalized.
+ std::unique_ptr<uint8_t[]> fIntermediateFanIndices;
+
+ friend class SkVertices;
+ friend class SkVerticesPriv;
+ };
+
+ uint32_t uniqueID() const { return fUniqueID; }
+ const SkRect& bounds() const { return fBounds; }
+
+ // returns approximate byte size of the vertices object
+ size_t approximateSize() const;
+
+ // Provides access to functions that aren't part of the public API.
+ SkVerticesPriv priv();
+ const SkVerticesPriv priv() const; // NOLINT(readability-const-return-type)
+
+private:
+ SkVertices() {}
+
+ friend class SkVerticesPriv;
+
+ // these are needed since we've manually sized our allocation (see Builder::init)
+ friend class SkNVRefCnt<SkVertices>;
+ void operator delete(void* p);
+
+ Sizes getSizes() const;
+
+ // we store this first, to pair with the refcnt in our base-class, so we don't have an
+ // unnecessary pad between it and the (possibly 8-byte aligned) ptrs.
+ uint32_t fUniqueID;
+
+ // these point inside our allocation, so none of these can be "freed"
+ SkPoint* fPositions; // [vertexCount]
+ uint16_t* fIndices; // [indexCount] or null
+ SkPoint* fTexs; // [vertexCount] or null
+ SkColor* fColors; // [vertexCount] or null
+
+ SkRect fBounds; // computed to be the union of the fPositions[]
+ int fVertexCount;
+ int fIndexCount;
+
+ VertexMode fMode;
+ // below here is where the actual array data is stored.
+};
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkYUVAInfo.h b/gfx/skia/skia/include/core/SkYUVAInfo.h
new file mode 100644
index 0000000000..a3cf210f37
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkYUVAInfo.h
@@ -0,0 +1,304 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkYUVAInfo_DEFINED
+#define SkYUVAInfo_DEFINED
+
+#include "include/codec/SkEncodedOrigin.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkSize.h"
+
+#include <array>
+#include <tuple>
+
+/**
+ * Specifies the structure of planes for a YUV image with optional alpha. The actual planar data
+ * is not part of this structure and depending on usage is in external textures or pixmaps.
+ */
+class SK_API SkYUVAInfo {
+public:
+ enum YUVAChannels { kY, kU, kV, kA, kLast = kA };
+ static constexpr int kYUVAChannelCount = static_cast<int>(YUVAChannels::kLast + 1);
+
+ struct YUVALocation; // For internal use.
+ using YUVALocations = std::array<YUVALocation, kYUVAChannelCount>;
+
+ /**
+ * Specifies how YUV (and optionally A) are divided among planes. Planes are separated by
+ * underscores in the enum value names. Within each plane the pixmap/texture channels are
+ * mapped to the YUVA channels in the order specified, e.g. for kY_UV Y is in channel 0 of plane
+ * 0, U is in channel 0 of plane 1, and V is in channel 1 of plane 1. Channel ordering
+ * within a pixmap/texture given the channels it contains:
+ * A: 0:A
+ * Luminance/Gray: 0:Gray
+ * Luminance/Gray + Alpha: 0:Gray, 1:A
+ * RG 0:R, 1:G
+ * RGB 0:R, 1:G, 2:B
+ * RGBA 0:R, 1:G, 2:B, 3:A
+ */
+ enum class PlaneConfig {
+ kUnknown,
+
+ kY_U_V, ///< Plane 0: Y, Plane 1: U, Plane 2: V
+ kY_V_U, ///< Plane 0: Y, Plane 1: V, Plane 2: U
+ kY_UV, ///< Plane 0: Y, Plane 1: UV
+ kY_VU, ///< Plane 0: Y, Plane 1: VU
+ kYUV, ///< Plane 0: YUV
+ kUYV, ///< Plane 0: UYV
+
+ kY_U_V_A, ///< Plane 0: Y, Plane 1: U, Plane 2: V, Plane 3: A
+ kY_V_U_A, ///< Plane 0: Y, Plane 1: V, Plane 2: U, Plane 3: A
+ kY_UV_A, ///< Plane 0: Y, Plane 1: UV, Plane 2: A
+ kY_VU_A, ///< Plane 0: Y, Plane 1: VU, Plane 2: A
+ kYUVA, ///< Plane 0: YUVA
+ kUYVA, ///< Plane 0: UYVA
+
+ kLast = kUYVA
+ };
+
+ /**
+ * UV subsampling is also specified in the enum value names using J:a:b notation (e.g. 4:2:0 is
+ * 1/2 horizontal and 1/2 vertical resolution for U and V). If alpha is present it is not sub-
+ * sampled. Note that Subsampling values other than k444 are only valid with PlaneConfig values
+ * that have U and V in different planes than Y (and A, if present).
+ */
+ enum class Subsampling {
+ kUnknown,
+
+ k444, ///< No subsampling. UV values for each Y.
+ k422, ///< 1 set of UV values for each 2x1 block of Y values.
+ k420, ///< 1 set of UV values for each 2x2 block of Y values.
+ k440, ///< 1 set of UV values for each 1x2 block of Y values.
+ k411, ///< 1 set of UV values for each 4x1 block of Y values.
+ k410, ///< 1 set of UV values for each 4x2 block of Y values.
+
+ kLast = k410
+ };
+
+ /**
+ * Describes how subsampled chroma values are sited relative to luma values.
+ *
+ * Currently only centered siting is supported but will expand to support additional sitings.
+ */
+ enum class Siting {
+ /**
+ * Subsampled chroma value is sited at the center of the block of corresponding luma values.
+ */
+ kCentered,
+ };
+
+ static constexpr int kMaxPlanes = 4;
+
+ /** ratio of Y/A values to U/V values in x and y. */
+ static std::tuple<int, int> SubsamplingFactors(Subsampling);
+
+ /**
+ * SubsamplingFactors(Subsampling) if planedIdx refers to a U/V plane and otherwise {1, 1} if
+ * inputs are valid. Invalid inputs consist of incompatible PlaneConfig/Subsampling/planeIdx
+ * combinations. {0, 0} is returned for invalid inputs.
+ */
+ static std::tuple<int, int> PlaneSubsamplingFactors(PlaneConfig, Subsampling, int planeIdx);
+
+ /**
+ * Given image dimensions, a planer configuration, subsampling, and origin, determine the
+ * expected size of each plane. Returns the number of expected planes. planeDimensions[0]
+ * through planeDimensions[<ret>] are written. The input image dimensions are as displayed
+ * (after the planes have been transformed to the intended display orientation). The plane
+ * dimensions are output as the planes are stored in memory (may be rotated from image
+ * dimensions).
+ */
+ static int PlaneDimensions(SkISize imageDimensions,
+ PlaneConfig,
+ Subsampling,
+ SkEncodedOrigin,
+ SkISize planeDimensions[kMaxPlanes]);
+
+ /** Number of planes for a given PlaneConfig. */
+ static constexpr int NumPlanes(PlaneConfig);
+
+ /**
+ * Number of Y, U, V, A channels in the ith plane for a given PlaneConfig (or 0 if i is
+ * invalid).
+ */
+ static constexpr int NumChannelsInPlane(PlaneConfig, int i);
+
+ /**
+ * Given a PlaneConfig and a set of channel flags for each plane, convert to YUVALocations
+ * representation. Fails if channel flags aren't valid for the PlaneConfig (i.e. don't have
+ * enough channels in a plane) by returning an invalid set of locations (plane indices are -1).
+ */
+ static YUVALocations GetYUVALocations(PlaneConfig, const uint32_t* planeChannelFlags);
+
+ /** Does the PlaneConfig have alpha values? */
+ static bool HasAlpha(PlaneConfig);
+
+ SkYUVAInfo() = default;
+ SkYUVAInfo(const SkYUVAInfo&) = default;
+
+ /**
+ * 'dimensions' should specify the size of the full resolution image (after planes have been
+ * oriented to how the image is displayed as indicated by 'origin').
+ */
+ SkYUVAInfo(SkISize dimensions,
+ PlaneConfig,
+ Subsampling,
+ SkYUVColorSpace,
+ SkEncodedOrigin origin = kTopLeft_SkEncodedOrigin,
+ Siting sitingX = Siting::kCentered,
+ Siting sitingY = Siting::kCentered);
+
+ SkYUVAInfo& operator=(const SkYUVAInfo& that) = default;
+
+ PlaneConfig planeConfig() const { return fPlaneConfig; }
+ Subsampling subsampling() const { return fSubsampling; }
+
+ std::tuple<int, int> planeSubsamplingFactors(int planeIdx) const {
+ return PlaneSubsamplingFactors(fPlaneConfig, fSubsampling, planeIdx);
+ }
+
+ /**
+ * Dimensions of the full resolution image (after planes have been oriented to how the image
+ * is displayed as indicated by fOrigin).
+ */
+ SkISize dimensions() const { return fDimensions; }
+ int width() const { return fDimensions.width(); }
+ int height() const { return fDimensions.height(); }
+
+ SkYUVColorSpace yuvColorSpace() const { return fYUVColorSpace; }
+ Siting sitingX() const { return fSitingX; }
+ Siting sitingY() const { return fSitingY; }
+
+ SkEncodedOrigin origin() const { return fOrigin; }
+
+ SkMatrix originMatrix() const {
+ return SkEncodedOriginToMatrix(fOrigin, this->width(), this->height());
+ }
+
+ bool hasAlpha() const { return HasAlpha(fPlaneConfig); }
+
+ /**
+ * Returns the number of planes and initializes planeDimensions[0]..planeDimensions[<ret>] to
+ * the expected dimensions for each plane. Dimensions are as stored in memory, before
+ * transformation to image display space as indicated by origin().
+ */
+ int planeDimensions(SkISize planeDimensions[kMaxPlanes]) const {
+ return PlaneDimensions(fDimensions, fPlaneConfig, fSubsampling, fOrigin, planeDimensions);
+ }
+
+ /**
+ * Given a per-plane row bytes, determine size to allocate for all planes. Optionally retrieves
+ * the per-plane byte sizes in planeSizes if not null. If total size overflows will return
+ * SIZE_MAX and set all planeSizes to SIZE_MAX.
+ */
+ size_t computeTotalBytes(const size_t rowBytes[kMaxPlanes],
+ size_t planeSizes[kMaxPlanes] = nullptr) const;
+
+ int numPlanes() const { return NumPlanes(fPlaneConfig); }
+
+ int numChannelsInPlane(int i) const { return NumChannelsInPlane(fPlaneConfig, i); }
+
+ /**
+ * Given a set of channel flags for each plane, converts this->planeConfig() to YUVALocations
+ * representation. Fails if the channel flags aren't valid for the PlaneConfig (i.e. don't have
+ * enough channels in a plane) by returning default initialized locations (all plane indices are
+ * -1).
+ */
+ YUVALocations toYUVALocations(const uint32_t* channelFlags) const;
+
+ /**
+ * Makes a SkYUVAInfo that is identical to this one but with the passed Subsampling. If the
+ * passed Subsampling is not k444 and this info's PlaneConfig is not compatible with chroma
+ * subsampling (because Y is in the same plane as UV) then the result will be an invalid
+ * SkYUVAInfo.
+ */
+ SkYUVAInfo makeSubsampling(SkYUVAInfo::Subsampling) const;
+
+ /**
+ * Makes a SkYUVAInfo that is identical to this one but with the passed dimensions. If the
+ * passed dimensions is empty then the result will be an invalid SkYUVAInfo.
+ */
+ SkYUVAInfo makeDimensions(SkISize) const;
+
+ bool operator==(const SkYUVAInfo& that) const;
+ bool operator!=(const SkYUVAInfo& that) const { return !(*this == that); }
+
+ bool isValid() const { return fPlaneConfig != PlaneConfig::kUnknown; }
+
+private:
+ SkISize fDimensions = {0, 0};
+
+ PlaneConfig fPlaneConfig = PlaneConfig::kUnknown;
+ Subsampling fSubsampling = Subsampling::kUnknown;
+
+ SkYUVColorSpace fYUVColorSpace = SkYUVColorSpace::kIdentity_SkYUVColorSpace;
+
+ /**
+ * YUVA data often comes from formats like JPEG that support EXIF orientation.
+ * Code that operates on the raw YUV data often needs to know that orientation.
+ */
+ SkEncodedOrigin fOrigin = kTopLeft_SkEncodedOrigin;
+
+ Siting fSitingX = Siting::kCentered;
+ Siting fSitingY = Siting::kCentered;
+};
+
+constexpr int SkYUVAInfo::NumPlanes(PlaneConfig planeConfig) {
+ switch (planeConfig) {
+ case PlaneConfig::kUnknown: return 0;
+ case PlaneConfig::kY_U_V: return 3;
+ case PlaneConfig::kY_V_U: return 3;
+ case PlaneConfig::kY_UV: return 2;
+ case PlaneConfig::kY_VU: return 2;
+ case PlaneConfig::kYUV: return 1;
+ case PlaneConfig::kUYV: return 1;
+ case PlaneConfig::kY_U_V_A: return 4;
+ case PlaneConfig::kY_V_U_A: return 4;
+ case PlaneConfig::kY_UV_A: return 3;
+ case PlaneConfig::kY_VU_A: return 3;
+ case PlaneConfig::kYUVA: return 1;
+ case PlaneConfig::kUYVA: return 1;
+ }
+ SkUNREACHABLE;
+}
+
+constexpr int SkYUVAInfo::NumChannelsInPlane(PlaneConfig config, int i) {
+ switch (config) {
+ case PlaneConfig::kUnknown:
+ return 0;
+
+ case SkYUVAInfo::PlaneConfig::kY_U_V:
+ case SkYUVAInfo::PlaneConfig::kY_V_U:
+ return i >= 0 && i < 3 ? 1 : 0;
+ case SkYUVAInfo::PlaneConfig::kY_UV:
+ case SkYUVAInfo::PlaneConfig::kY_VU:
+ switch (i) {
+ case 0: return 1;
+ case 1: return 2;
+ default: return 0;
+ }
+ case SkYUVAInfo::PlaneConfig::kYUV:
+ case SkYUVAInfo::PlaneConfig::kUYV:
+ return i == 0 ? 3 : 0;
+ case SkYUVAInfo::PlaneConfig::kY_U_V_A:
+ case SkYUVAInfo::PlaneConfig::kY_V_U_A:
+ return i >= 0 && i < 4 ? 1 : 0;
+ case SkYUVAInfo::PlaneConfig::kY_UV_A:
+ case SkYUVAInfo::PlaneConfig::kY_VU_A:
+ switch (i) {
+ case 0: return 1;
+ case 1: return 2;
+ case 2: return 1;
+ default: return 0;
+ }
+ case SkYUVAInfo::PlaneConfig::kYUVA:
+ case SkYUVAInfo::PlaneConfig::kUYVA:
+ return i == 0 ? 4 : 0;
+ }
+ return 0;
+}
+
+#endif
diff --git a/gfx/skia/skia/include/core/SkYUVAPixmaps.h b/gfx/skia/skia/include/core/SkYUVAPixmaps.h
new file mode 100644
index 0000000000..f75b314c00
--- /dev/null
+++ b/gfx/skia/skia/include/core/SkYUVAPixmaps.h
@@ -0,0 +1,336 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkYUVAPixmaps_DEFINED
+#define SkYUVAPixmaps_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkYUVAInfo.h"
+#include "include/private/base/SkTo.h"
+
+#include <array>
+#include <bitset>
+
+class GrImageContext;
+
+/**
+ * SkYUVAInfo combined with per-plane SkColorTypes and row bytes. Fully specifies the SkPixmaps
+ * for a YUVA image without the actual pixel memory and data.
+ */
+class SK_API SkYUVAPixmapInfo {
+public:
+ static constexpr auto kMaxPlanes = SkYUVAInfo::kMaxPlanes;
+
+ using PlaneConfig = SkYUVAInfo::PlaneConfig;
+ using Subsampling = SkYUVAInfo::Subsampling;
+
+ /**
+ * Data type for Y, U, V, and possibly A channels independent of how values are packed into
+ * planes.
+ **/
+ enum class DataType {
+ kUnorm8, ///< 8 bit unsigned normalized
+ kUnorm16, ///< 16 bit unsigned normalized
+ kFloat16, ///< 16 bit (half) floating point
+ kUnorm10_Unorm2, ///< 10 bit unorm for Y, U, and V. 2 bit unorm for alpha (if present).
+
+ kLast = kUnorm10_Unorm2
+ };
+ static constexpr int kDataTypeCnt = static_cast<int>(DataType::kLast) + 1;
+
+ class SK_API SupportedDataTypes {
+ public:
+ /** Defaults to nothing supported. */
+ constexpr SupportedDataTypes() = default;
+
+ /** Init based on texture formats supported by the context. */
+ SupportedDataTypes(const GrImageContext&);
+
+ /** All legal combinations of PlaneConfig and DataType are supported. */
+ static constexpr SupportedDataTypes All();
+
+ /**
+ * Checks whether there is a supported combination of color types for planes structured
+ * as indicated by PlaneConfig with channel data types as indicated by DataType.
+ */
+ constexpr bool supported(PlaneConfig, DataType) const;
+
+ /**
+ * Update to add support for pixmaps with numChannel channels where each channel is
+ * represented as DataType.
+ */
+ void enableDataType(DataType, int numChannels);
+
+ private:
+ // The bit for DataType dt with n channels is at index kDataTypeCnt*(n-1) + dt.
+ std::bitset<kDataTypeCnt*4> fDataTypeSupport = {};
+ };
+
+ /**
+ * Gets the default SkColorType to use with numChannels channels, each represented as DataType.
+ * Returns kUnknown_SkColorType if no such color type.
+ */
+ static constexpr SkColorType DefaultColorTypeForDataType(DataType dataType, int numChannels);
+
+ /**
+ * If the SkColorType is supported for YUVA pixmaps this will return the number of YUVA channels
+ * that can be stored in a plane of this color type and what the DataType is of those channels.
+ * If the SkColorType is not supported as a YUVA plane the number of channels is reported as 0
+ * and the DataType returned should be ignored.
+ */
+ static std::tuple<int, DataType> NumChannelsAndDataType(SkColorType);
+
+ /** Default SkYUVAPixmapInfo is invalid. */
+ SkYUVAPixmapInfo() = default;
+
+ /**
+ * Initializes the SkYUVAPixmapInfo from a SkYUVAInfo with per-plane color types and row bytes.
+ * This will be invalid if the colorTypes aren't compatible with the SkYUVAInfo or if a
+ * rowBytes entry is not valid for the plane dimensions and color type. Color type and
+ * row byte values beyond the number of planes in SkYUVAInfo are ignored. All SkColorTypes
+ * must have the same DataType or this will be invalid.
+ *
+ * If rowBytes is nullptr then bpp*width is assumed for each plane.
+ */
+ SkYUVAPixmapInfo(const SkYUVAInfo&,
+ const SkColorType[kMaxPlanes],
+ const size_t rowBytes[kMaxPlanes]);
+ /**
+ * Like above but uses DefaultColorTypeForDataType to determine each plane's SkColorType. If
+ * rowBytes is nullptr then bpp*width is assumed for each plane.
+ */
+ SkYUVAPixmapInfo(const SkYUVAInfo&, DataType, const size_t rowBytes[kMaxPlanes]);
+
+ SkYUVAPixmapInfo(const SkYUVAPixmapInfo&) = default;
+
+ SkYUVAPixmapInfo& operator=(const SkYUVAPixmapInfo&) = default;
+
+ bool operator==(const SkYUVAPixmapInfo&) const;
+ bool operator!=(const SkYUVAPixmapInfo& that) const { return !(*this == that); }
+
+ const SkYUVAInfo& yuvaInfo() const { return fYUVAInfo; }
+
+ SkYUVColorSpace yuvColorSpace() const { return fYUVAInfo.yuvColorSpace(); }
+
+ /** The number of SkPixmap planes, 0 if this SkYUVAPixmapInfo is invalid. */
+ int numPlanes() const { return fYUVAInfo.numPlanes(); }
+
+ /** The per-YUV[A] channel data type. */
+ DataType dataType() const { return fDataType; }
+
+ /**
+ * Row bytes for the ith plane. Returns zero if i >= numPlanes() or this SkYUVAPixmapInfo is
+ * invalid.
+ */
+ size_t rowBytes(int i) const { return fRowBytes[static_cast<size_t>(i)]; }
+
+ /** Image info for the ith plane, or default SkImageInfo if i >= numPlanes() */
+ const SkImageInfo& planeInfo(int i) const { return fPlaneInfos[static_cast<size_t>(i)]; }
+
+ /**
+ * Determine size to allocate for all planes. Optionally retrieves the per-plane sizes in
+ * planeSizes if not null. If total size overflows will return SIZE_MAX and set all planeSizes
+ * to SIZE_MAX. Returns 0 and fills planesSizes with 0 if this SkYUVAPixmapInfo is not valid.
+ */
+ size_t computeTotalBytes(size_t planeSizes[kMaxPlanes] = nullptr) const;
+
+ /**
+ * Takes an allocation that is assumed to be at least computeTotalBytes() in size and configures
+ * the first numPlanes() entries in pixmaps array to point into that memory. The remaining
+ * entries of pixmaps are default initialized. Fails if this SkYUVAPixmapInfo not valid.
+ */
+ bool initPixmapsFromSingleAllocation(void* memory, SkPixmap pixmaps[kMaxPlanes]) const;
+
+ /**
+ * Returns true if this has been configured with a non-empty dimensioned SkYUVAInfo with
+ * compatible color types and row bytes.
+ */
+ bool isValid() const { return fYUVAInfo.isValid(); }
+
+ /** Is this valid and does it use color types allowed by the passed SupportedDataTypes? */
+ bool isSupported(const SupportedDataTypes&) const;
+
+private:
+ SkYUVAInfo fYUVAInfo;
+ std::array<SkImageInfo, kMaxPlanes> fPlaneInfos = {};
+ std::array<size_t, kMaxPlanes> fRowBytes = {};
+ DataType fDataType = DataType::kUnorm8;
+ static_assert(kUnknown_SkColorType == 0, "default init isn't kUnknown");
+};
+
+/**
+ * Helper to store SkPixmap planes as described by a SkYUVAPixmapInfo. Can be responsible for
+ * allocating/freeing memory for pixmaps or use external memory.
+ */
+class SK_API SkYUVAPixmaps {
+public:
+ using DataType = SkYUVAPixmapInfo::DataType;
+ static constexpr auto kMaxPlanes = SkYUVAPixmapInfo::kMaxPlanes;
+
+ static SkColorType RecommendedRGBAColorType(DataType);
+
+ /** Allocate space for pixmaps' pixels in the SkYUVAPixmaps. */
+ static SkYUVAPixmaps Allocate(const SkYUVAPixmapInfo& yuvaPixmapInfo);
+
+ /**
+ * Use storage in SkData as backing store for pixmaps' pixels. SkData is retained by the
+ * SkYUVAPixmaps.
+ */
+ static SkYUVAPixmaps FromData(const SkYUVAPixmapInfo&, sk_sp<SkData>);
+
+ /**
+ * Makes a deep copy of the src SkYUVAPixmaps. The returned SkYUVAPixmaps owns its planes'
+ * backing stores.
+ */
+ static SkYUVAPixmaps MakeCopy(const SkYUVAPixmaps& src);
+
+ /**
+ * Use passed in memory as backing store for pixmaps' pixels. Caller must ensure memory remains
+ * allocated while pixmaps are in use. There must be at least
+ * SkYUVAPixmapInfo::computeTotalBytes() allocated starting at memory.
+ */
+ static SkYUVAPixmaps FromExternalMemory(const SkYUVAPixmapInfo&, void* memory);
+
+ /**
+ * Wraps existing SkPixmaps. The SkYUVAPixmaps will have no ownership of the SkPixmaps' pixel
+ * memory so the caller must ensure it remains valid. Will return an invalid SkYUVAPixmaps if
+ * the SkYUVAInfo isn't compatible with the SkPixmap array (number of planes, plane dimensions,
+ * sufficient color channels in planes, ...).
+ */
+ static SkYUVAPixmaps FromExternalPixmaps(const SkYUVAInfo&, const SkPixmap[kMaxPlanes]);
+
+ /** Default SkYUVAPixmaps is invalid. */
+ SkYUVAPixmaps() = default;
+ ~SkYUVAPixmaps() = default;
+
+ SkYUVAPixmaps(SkYUVAPixmaps&& that) = default;
+ SkYUVAPixmaps& operator=(SkYUVAPixmaps&& that) = default;
+ SkYUVAPixmaps(const SkYUVAPixmaps&) = default;
+ SkYUVAPixmaps& operator=(const SkYUVAPixmaps& that) = default;
+
+ /** Does have initialized pixmaps compatible with its SkYUVAInfo. */
+ bool isValid() const { return !fYUVAInfo.dimensions().isEmpty(); }
+
+ const SkYUVAInfo& yuvaInfo() const { return fYUVAInfo; }
+
+ DataType dataType() const { return fDataType; }
+
+ SkYUVAPixmapInfo pixmapsInfo() const;
+
+ /** Number of pixmap planes or 0 if this SkYUVAPixmaps is invalid. */
+ int numPlanes() const { return this->isValid() ? fYUVAInfo.numPlanes() : 0; }
+
+ /**
+ * Access the SkPixmap planes. They are default initialized if this is not a valid
+ * SkYUVAPixmaps.
+ */
+ const std::array<SkPixmap, kMaxPlanes>& planes() const { return fPlanes; }
+
+ /**
+ * Get the ith SkPixmap plane. SkPixmap will be default initialized if i >= numPlanes or this
+ * SkYUVAPixmaps is invalid.
+ */
+ const SkPixmap& plane(int i) const { return fPlanes[SkToSizeT(i)]; }
+
+ /**
+ * Computes a YUVALocations representation of the planar layout. The result is guaranteed to be
+ * valid if this->isValid().
+ */
+ SkYUVAInfo::YUVALocations toYUVALocations() const;
+
+ /** Does this SkPixmaps own the backing store of the planes? */
+ bool ownsStorage() const { return SkToBool(fData); }
+
+private:
+ SkYUVAPixmaps(const SkYUVAPixmapInfo&, sk_sp<SkData>);
+ SkYUVAPixmaps(const SkYUVAInfo&, DataType, const SkPixmap[kMaxPlanes]);
+
+ std::array<SkPixmap, kMaxPlanes> fPlanes = {};
+ sk_sp<SkData> fData;
+ SkYUVAInfo fYUVAInfo;
+ DataType fDataType;
+};
+
+//////////////////////////////////////////////////////////////////////////////
+
+constexpr SkYUVAPixmapInfo::SupportedDataTypes SkYUVAPixmapInfo::SupportedDataTypes::All() {
+ using ULL = unsigned long long; // bitset cons. takes this.
+ ULL bits = 0;
+ for (ULL c = 1; c <= 4; ++c) {
+ for (ULL dt = 0; dt <= ULL(kDataTypeCnt); ++dt) {
+ if (DefaultColorTypeForDataType(static_cast<DataType>(dt),
+ static_cast<int>(c)) != kUnknown_SkColorType) {
+ bits |= ULL(1) << (dt + static_cast<ULL>(kDataTypeCnt)*(c - 1));
+ }
+ }
+ }
+ SupportedDataTypes combinations;
+ combinations.fDataTypeSupport = bits;
+ return combinations;
+}
+
+constexpr bool SkYUVAPixmapInfo::SupportedDataTypes::supported(PlaneConfig config,
+ DataType type) const {
+ int n = SkYUVAInfo::NumPlanes(config);
+ for (int i = 0; i < n; ++i) {
+ auto c = static_cast<size_t>(SkYUVAInfo::NumChannelsInPlane(config, i));
+ SkASSERT(c >= 1 && c <= 4);
+ if (!fDataTypeSupport[static_cast<size_t>(type) +
+ (c - 1)*static_cast<size_t>(kDataTypeCnt)]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+constexpr SkColorType SkYUVAPixmapInfo::DefaultColorTypeForDataType(DataType dataType,
+ int numChannels) {
+ switch (numChannels) {
+ case 1:
+ switch (dataType) {
+ case DataType::kUnorm8: return kGray_8_SkColorType;
+ case DataType::kUnorm16: return kA16_unorm_SkColorType;
+ case DataType::kFloat16: return kA16_float_SkColorType;
+ case DataType::kUnorm10_Unorm2: return kUnknown_SkColorType;
+ }
+ break;
+ case 2:
+ switch (dataType) {
+ case DataType::kUnorm8: return kR8G8_unorm_SkColorType;
+ case DataType::kUnorm16: return kR16G16_unorm_SkColorType;
+ case DataType::kFloat16: return kR16G16_float_SkColorType;
+ case DataType::kUnorm10_Unorm2: return kUnknown_SkColorType;
+ }
+ break;
+ case 3:
+ // None of these are tightly packed. The intended use case is for interleaved YUVA
+ // planes where we're forcing opaqueness by ignoring the alpha values.
+ // There are "x" rather than "A" variants for Unorm8 and Unorm10_Unorm2 but we don't
+ // choose them because 1) there is no inherent advantage and 2) there is better support
+ // in the GPU backend for the "A" versions.
+ switch (dataType) {
+ case DataType::kUnorm8: return kRGBA_8888_SkColorType;
+ case DataType::kUnorm16: return kR16G16B16A16_unorm_SkColorType;
+ case DataType::kFloat16: return kRGBA_F16_SkColorType;
+ case DataType::kUnorm10_Unorm2: return kRGBA_1010102_SkColorType;
+ }
+ break;
+ case 4:
+ switch (dataType) {
+ case DataType::kUnorm8: return kRGBA_8888_SkColorType;
+ case DataType::kUnorm16: return kR16G16B16A16_unorm_SkColorType;
+ case DataType::kFloat16: return kRGBA_F16_SkColorType;
+ case DataType::kUnorm10_Unorm2: return kRGBA_1010102_SkColorType;
+ }
+ break;
+ }
+ return kUnknown_SkColorType;
+}
+
+#endif
diff --git a/gfx/skia/skia/include/docs/SkPDFDocument.h b/gfx/skia/skia/include/docs/SkPDFDocument.h
new file mode 100644
index 0000000000..16e953be5e
--- /dev/null
+++ b/gfx/skia/skia/include/docs/SkPDFDocument.h
@@ -0,0 +1,202 @@
+// Copyright 2018 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+#ifndef SkPDFDocument_DEFINED
+#define SkPDFDocument_DEFINED
+
+#include "include/core/SkDocument.h"
+
+#include <vector>
+
+#include "include/core/SkColor.h"
+#include "include/core/SkMilestone.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTime.h"
+#include "include/private/base/SkNoncopyable.h"
+
+#define SKPDF_STRING(X) SKPDF_STRING_IMPL(X)
+#define SKPDF_STRING_IMPL(X) #X
+
+class SkExecutor;
+class SkPDFArray;
+class SkPDFTagTree;
+
+namespace SkPDF {
+
+/** Attributes for nodes in the PDF tree. */
+class SK_API AttributeList : SkNoncopyable {
+public:
+ AttributeList();
+ ~AttributeList();
+
+ // Each attribute must have an owner (e.g. "Layout", "List", "Table", etc)
+ // and an attribute name (e.g. "BBox", "RowSpan", etc.) from PDF32000_2008 14.8.5,
+ // and then a value of the proper type according to the spec.
+ void appendInt(const char* owner, const char* name, int value);
+ void appendFloat(const char* owner, const char* name, float value);
+ void appendName(const char* owner, const char* attrName, const char* value);
+ void appendFloatArray(const char* owner,
+ const char* name,
+ const std::vector<float>& value);
+ void appendNodeIdArray(const char* owner,
+ const char* attrName,
+ const std::vector<int>& nodeIds);
+
+private:
+ friend class ::SkPDFTagTree;
+
+ std::unique_ptr<SkPDFArray> fAttrs;
+};
+
+/** A node in a PDF structure tree, giving a semantic representation
+ of the content. Each node ID is associated with content
+ by passing the SkCanvas and node ID to SkPDF::SetNodeId() when drawing.
+ NodeIDs should be unique within each tree.
+*/
+struct StructureElementNode {
+ SkString fTypeString;
+ std::vector<std::unique_ptr<StructureElementNode>> fChildVector;
+ int fNodeId = 0;
+ std::vector<int> fAdditionalNodeIds;
+ AttributeList fAttributes;
+ SkString fAlt;
+ SkString fLang;
+};
+
+/** Optional metadata to be passed into the PDF factory function.
+*/
+struct Metadata {
+ /** The document's title.
+ */
+ SkString fTitle;
+
+ /** The name of the person who created the document.
+ */
+ SkString fAuthor;
+
+ /** The subject of the document.
+ */
+ SkString fSubject;
+
+ /** Keywords associated with the document. Commas may be used to delineate
+ keywords within the string.
+ */
+ SkString fKeywords;
+
+ /** If the document was converted to PDF from another format,
+ the name of the conforming product that created the
+ original document from which it was converted.
+ */
+ SkString fCreator;
+
+ /** The product that is converting this document to PDF.
+ */
+ SkString fProducer = SkString("Skia/PDF m" SKPDF_STRING(SK_MILESTONE));
+
+ /** The date and time the document was created.
+ The zero default value represents an unknown/unset time.
+ */
+ SkTime::DateTime fCreation = {0, 0, 0, 0, 0, 0, 0, 0};
+
+ /** The date and time the document was most recently modified.
+ The zero default value represents an unknown/unset time.
+ */
+ SkTime::DateTime fModified = {0, 0, 0, 0, 0, 0, 0, 0};
+
+ /** The DPI (pixels-per-inch) at which features without native PDF support
+ will be rasterized (e.g. draw image with perspective, draw text with
+ perspective, ...) A larger DPI would create a PDF that reflects the
+ original intent with better fidelity, but it can make for larger PDF
+ files too, which would use more memory while rendering, and it would be
+ slower to be processed or sent online or to printer.
+ */
+ SkScalar fRasterDPI = SK_ScalarDefaultRasterDPI;
+
+ /** If true, include XMP metadata, a document UUID, and sRGB output intent
+ information. This adds length to the document and makes it
+ non-reproducable, but are necessary features for PDF/A-2b conformance
+ */
+ bool fPDFA = false;
+
+ /** Encoding quality controls the trade-off between size and quality. By
+ default this is set to 101 percent, which corresponds to lossless
+ encoding. If this value is set to a value <= 100, and the image is
+ opaque, it will be encoded (using JPEG) with that quality setting.
+ */
+ int fEncodingQuality = 101;
+
+ /** An optional tree of structured document tags that provide
+ a semantic representation of the content. The caller
+ should retain ownership.
+ */
+ StructureElementNode* fStructureElementTreeRoot = nullptr;
+
+ /** Executor to handle threaded work within PDF Backend. If this is nullptr,
+ then all work will be done serially on the main thread. To have worker
+ threads assist with various tasks, set this to a valid SkExecutor
+ instance. Currently used for executing Deflate algorithm in parallel.
+
+ If set, the PDF output will be non-reproducible in the order and
+ internal numbering of objects, but should render the same.
+
+ Experimental.
+ */
+ SkExecutor* fExecutor = nullptr;
+
+ /** PDF streams may be compressed to save space.
+ Use this to specify the desired compression vs time tradeoff.
+ */
+ enum class CompressionLevel : int {
+ Default = -1,
+ None = 0,
+ LowButFast = 1,
+ Average = 6,
+ HighButSlow = 9,
+ } fCompressionLevel = CompressionLevel::Default;
+
+ /** Preferred Subsetter. Only respected if both are compiled in.
+
+ The Sfntly subsetter is deprecated.
+
+ Experimental.
+ */
+ enum Subsetter {
+ kHarfbuzz_Subsetter,
+ kSfntly_Subsetter,
+ } fSubsetter = kHarfbuzz_Subsetter;
+};
+
+/** Associate a node ID with subsequent drawing commands in an
+ SkCanvas. The same node ID can appear in a StructureElementNode
+ in order to associate a document's structure element tree with
+ its content.
+
+ A node ID of zero indicates no node ID.
+
+ @param canvas The canvas used to draw to the PDF.
+ @param nodeId The node ID for subsequent drawing commands.
+*/
+SK_API void SetNodeId(SkCanvas* dst, int nodeID);
+
+/** Create a PDF-backed document, writing the results into a SkWStream.
+
+ PDF pages are sized in point units. 1 pt == 1/72 inch == 127/360 mm.
+
+ @param stream A PDF document will be written to this stream. The document may write
+ to the stream at anytime during its lifetime, until either close() is
+ called or the document is deleted.
+ @param metadata a PDFmetadata object. Any fields may be left empty.
+
+ @returns NULL if there is an error, otherwise a newly created PDF-backed SkDocument.
+*/
+SK_API sk_sp<SkDocument> MakeDocument(SkWStream* stream, const Metadata& metadata);
+
+static inline sk_sp<SkDocument> MakeDocument(SkWStream* stream) {
+ return MakeDocument(stream, Metadata());
+}
+
+} // namespace SkPDF
+
+#undef SKPDF_STRING
+#undef SKPDF_STRING_IMPL
+#endif // SkPDFDocument_DEFINED
diff --git a/gfx/skia/skia/include/docs/SkXPSDocument.h b/gfx/skia/skia/include/docs/SkXPSDocument.h
new file mode 100644
index 0000000000..5cd0777c9b
--- /dev/null
+++ b/gfx/skia/skia/include/docs/SkXPSDocument.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkXPSDocument_DEFINED
+#define SkXPSDocument_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#ifdef SK_BUILD_FOR_WIN
+
+#include "include/core/SkDocument.h"
+
+struct IXpsOMObjectFactory;
+
+namespace SkXPS {
+
+SK_API sk_sp<SkDocument> MakeDocument(SkWStream* stream,
+ IXpsOMObjectFactory* xpsFactory,
+ SkScalar dpi = SK_ScalarDefaultRasterDPI);
+
+} // namespace SkXPS
+#endif // SK_BUILD_FOR_WIN
+#endif // SkXPSDocument_DEFINED
diff --git a/gfx/skia/skia/include/effects/Sk1DPathEffect.h b/gfx/skia/skia/include/effects/Sk1DPathEffect.h
new file mode 100644
index 0000000000..fd05c52df7
--- /dev/null
+++ b/gfx/skia/skia/include/effects/Sk1DPathEffect.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef Sk1DPathEffect_DEFINED
+#define Sk1DPathEffect_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+class SkPath;
+class SkPathEffect;
+
+class SK_API SkPath1DPathEffect {
+public:
+ enum Style {
+ kTranslate_Style, // translate the shape to each position
+ kRotate_Style, // rotate the shape about its center
+ kMorph_Style, // transform each point, and turn lines into curves
+
+ kLastEnum_Style = kMorph_Style,
+ };
+
+ /** Dash by replicating the specified path.
+ @param path The path to replicate (dash)
+ @param advance The space between instances of path
+ @param phase distance (mod advance) along path for its initial position
+ @param style how to transform path at each point (based on the current
+ position and tangent)
+ */
+ static sk_sp<SkPathEffect> Make(const SkPath& path, SkScalar advance, SkScalar phase, Style);
+
+ static void RegisterFlattenables();
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/Sk2DPathEffect.h b/gfx/skia/skia/include/effects/Sk2DPathEffect.h
new file mode 100644
index 0000000000..b8b3ba3981
--- /dev/null
+++ b/gfx/skia/skia/include/effects/Sk2DPathEffect.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef Sk2DPathEffect_DEFINED
+#define Sk2DPathEffect_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+class SkMatrix;
+class SkPath;
+class SkPathEffect;
+
+class SK_API SkLine2DPathEffect {
+public:
+ static sk_sp<SkPathEffect> Make(SkScalar width, const SkMatrix& matrix);
+
+ static void RegisterFlattenables();
+};
+
+class SK_API SkPath2DPathEffect {
+public:
+ static sk_sp<SkPathEffect> Make(const SkMatrix& matrix, const SkPath& path);
+
+ static void RegisterFlattenables();
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkBlenders.h b/gfx/skia/skia/include/effects/SkBlenders.h
new file mode 100644
index 0000000000..7507071b05
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkBlenders.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlenders_DEFINED
+#define SkBlenders_DEFINED
+
+#include "include/core/SkBlender.h"
+
+class SK_API SkBlenders {
+public:
+ /**
+ * Create a blender that implements the following:
+ * k1 * src * dst + k2 * src + k3 * dst + k4
+ * @param k1, k2, k3, k4 The four coefficients.
+ * @param enforcePMColor If true, the RGB channels will be clamped to the calculated alpha.
+ */
+ static sk_sp<SkBlender> Arithmetic(float k1, float k2, float k3, float k4, bool enforcePremul);
+
+private:
+ SkBlenders() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkBlurDrawLooper.h b/gfx/skia/skia/include/effects/SkBlurDrawLooper.h
new file mode 100644
index 0000000000..fc766f807a
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkBlurDrawLooper.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlurDrawLooper_DEFINED
+#define SkBlurDrawLooper_DEFINED
+
+#include "include/core/SkDrawLooper.h"
+
+#ifndef SK_SUPPORT_LEGACY_DRAWLOOPER
+#error "SkDrawLooper is unsupported"
+#endif
+
+/**
+ * DEPRECATED: No longer supported in Skia.
+ */
+namespace SkBlurDrawLooper {
+ sk_sp<SkDrawLooper> SK_API Make(SkColor4f color, SkColorSpace* cs,
+ SkScalar sigma, SkScalar dx, SkScalar dy);
+ sk_sp<SkDrawLooper> SK_API Make(SkColor color, SkScalar sigma, SkScalar dx, SkScalar dy);
+} // namespace SkBlurDrawLooper
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkBlurMaskFilter.h b/gfx/skia/skia/include/effects/SkBlurMaskFilter.h
new file mode 100644
index 0000000000..1b9319869e
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkBlurMaskFilter.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlurMaskFilter_DEFINED
+#define SkBlurMaskFilter_DEFINED
+
+// we include this since our callers will need to at least be able to ref/unref
+#include "include/core/SkBlurTypes.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+
+class SkRRect;
+
+class SK_API SkBlurMaskFilter {
+public:
+#ifdef SK_SUPPORT_LEGACY_EMBOSSMASKFILTER
+ /** Create an emboss maskfilter
+ @param blurSigma standard deviation of the Gaussian blur to apply
+ before applying lighting (e.g. 3)
+ @param direction array of 3 scalars [x, y, z] specifying the direction of the light source
+ @param ambient 0...1 amount of ambient light
+ @param specular coefficient for specular highlights (e.g. 8)
+ @return the emboss maskfilter
+ */
+ static sk_sp<SkMaskFilter> MakeEmboss(SkScalar blurSigma, const SkScalar direction[3],
+ SkScalar ambient, SkScalar specular);
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkColorMatrix.h b/gfx/skia/skia/include/effects/SkColorMatrix.h
new file mode 100644
index 0000000000..5092278f0d
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkColorMatrix.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorMatrix_DEFINED
+#define SkColorMatrix_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#include <algorithm>
+#include <array>
+
+enum SkYUVColorSpace : int;
+
+class SK_API SkColorMatrix {
+public:
+ constexpr SkColorMatrix() : SkColorMatrix(1, 0, 0, 0, 0,
+ 0, 1, 0, 0, 0,
+ 0, 0, 1, 0, 0,
+ 0, 0, 0, 1, 0) {}
+
+ constexpr SkColorMatrix(float m00, float m01, float m02, float m03, float m04,
+ float m10, float m11, float m12, float m13, float m14,
+ float m20, float m21, float m22, float m23, float m24,
+ float m30, float m31, float m32, float m33, float m34)
+ : fMat { m00, m01, m02, m03, m04,
+ m10, m11, m12, m13, m14,
+ m20, m21, m22, m23, m24,
+ m30, m31, m32, m33, m34 } {}
+
+ static SkColorMatrix RGBtoYUV(SkYUVColorSpace);
+ static SkColorMatrix YUVtoRGB(SkYUVColorSpace);
+
+ void setIdentity();
+ void setScale(float rScale, float gScale, float bScale, float aScale = 1.0f);
+
+ void postTranslate(float dr, float dg, float db, float da);
+
+ void setConcat(const SkColorMatrix& a, const SkColorMatrix& b);
+ void preConcat(const SkColorMatrix& mat) { this->setConcat(*this, mat); }
+ void postConcat(const SkColorMatrix& mat) { this->setConcat(mat, *this); }
+
+ void setSaturation(float sat);
+
+ void setRowMajor(const float src[20]) { std::copy_n(src, 20, fMat.begin()); }
+ void getRowMajor(float dst[20]) const { std::copy_n(fMat.begin(), 20, dst); }
+
+private:
+ std::array<float, 20> fMat;
+
+ friend class SkColorFilters;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkColorMatrixFilter.h b/gfx/skia/skia/include/effects/SkColorMatrixFilter.h
new file mode 100644
index 0000000000..3e5337b0cf
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkColorMatrixFilter.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorMatrixFilter_DEFINED
+#define SkColorMatrixFilter_DEFINED
+
+#include "include/core/SkColorFilter.h"
+
+// (DEPRECATED) This factory function is deprecated. Please use the one in
+// SkColorFilters (i.e., Lighting).
+class SK_API SkColorMatrixFilter : public SkColorFilter {
+public:
+ static sk_sp<SkColorFilter> MakeLightingFilter(SkColor mul, SkColor add) {
+ return SkColorFilters::Lighting(mul, add);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkCornerPathEffect.h b/gfx/skia/skia/include/effects/SkCornerPathEffect.h
new file mode 100644
index 0000000000..7f7e7159f3
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkCornerPathEffect.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCornerPathEffect_DEFINED
+#define SkCornerPathEffect_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+class SkPathEffect;
+
+/** \class SkCornerPathEffect
+
+ SkCornerPathEffect is a subclass of SkPathEffect that can turn sharp corners
+ into various treatments (e.g. rounded corners)
+*/
+class SK_API SkCornerPathEffect {
+public:
+ /** radius must be > 0 to have an effect. It specifies the distance from each corner
+ that should be "rounded".
+ */
+ static sk_sp<SkPathEffect> Make(SkScalar radius);
+
+ static void RegisterFlattenables();
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkDashPathEffect.h b/gfx/skia/skia/include/effects/SkDashPathEffect.h
new file mode 100644
index 0000000000..f30064aa94
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkDashPathEffect.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDashPathEffect_DEFINED
+#define SkDashPathEffect_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+class SkPathEffect;
+
+class SK_API SkDashPathEffect {
+public:
+ /** intervals: array containing an even number of entries (>=2), with
+ the even indices specifying the length of "on" intervals, and the odd
+ indices specifying the length of "off" intervals. This array will be
+ copied in Make, and can be disposed of freely after.
+ count: number of elements in the intervals array
+ phase: offset into the intervals array (mod the sum of all of the
+ intervals).
+
+ For example: if intervals[] = {10, 20}, count = 2, and phase = 25,
+ this will set up a dashed path like so:
+ 5 pixels off
+ 10 pixels on
+ 20 pixels off
+ 10 pixels on
+ 20 pixels off
+ ...
+ A phase of -5, 25, 55, 85, etc. would all result in the same path,
+ because the sum of all the intervals is 30.
+
+ Note: only affects stroked paths.
+ */
+ static sk_sp<SkPathEffect> Make(const SkScalar intervals[], int count, SkScalar phase);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkDiscretePathEffect.h b/gfx/skia/skia/include/effects/SkDiscretePathEffect.h
new file mode 100644
index 0000000000..6054cbdc99
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkDiscretePathEffect.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDiscretePathEffect_DEFINED
+#define SkDiscretePathEffect_DEFINED
+
+#include "include/core/SkPathEffect.h"
+
+/** \class SkDiscretePathEffect
+
+ This path effect chops a path into discrete segments, and randomly displaces them.
+*/
+class SK_API SkDiscretePathEffect {
+public:
+ /** Break the path into segments of segLength length, and randomly move the endpoints
+ away from the original path by a maximum of deviation.
+ Note: works on filled or framed paths
+
+ @param seedAssist This is a caller-supplied seedAssist that modifies
+ the seed value that is used to randomize the path
+ segments' endpoints. If not supplied it defaults to 0,
+ in which case filtering a path multiple times will
+ result in the same set of segments (this is useful for
+ testing). If a caller does not want this behaviour
+ they can pass in a different seedAssist to get a
+ different set of path segments.
+ */
+ static sk_sp<SkPathEffect> Make(SkScalar segLength, SkScalar dev, uint32_t seedAssist = 0);
+
+ static void RegisterFlattenables();
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkGradientShader.h b/gfx/skia/skia/include/effects/SkGradientShader.h
new file mode 100644
index 0000000000..4541118230
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkGradientShader.h
@@ -0,0 +1,342 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGradientShader_DEFINED
+#define SkGradientShader_DEFINED
+
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkTileMode.h"
+
+/** \class SkGradientShader
+
+ SkGradientShader hosts factories for creating subclasses of SkShader that
+ render linear and radial gradients. In general, degenerate cases should not
+ produce surprising results, but there are several types of degeneracies:
+
+ * A linear gradient made from the same two points.
+ * A radial gradient with a radius of zero.
+ * A sweep gradient where the start and end angle are the same.
+ * A two point conical gradient where the two centers and the two radii are
+ the same.
+
+ For any degenerate gradient with a decal tile mode, it will draw empty since the interpolating
+ region is zero area and the outer region is discarded by the decal mode.
+
+ For any degenerate gradient with a repeat or mirror tile mode, it will draw a solid color that
+ is the average gradient color, since infinitely many repetitions of the gradients will fill the
+ shape.
+
+ For a clamped gradient, every type is well-defined at the limit except for linear gradients. The
+ radial gradient with zero radius becomes the last color. The sweep gradient draws the sector
+ from 0 to the provided angle with the first color, with a hardstop switching to the last color.
+ When the provided angle is 0, this is just the solid last color again. Similarly, the two point
+ conical gradient becomes a circle filled with the first color, sized to the provided radius,
+ with a hardstop switching to the last color. When the two radii are both zero, this is just the
+ solid last color.
+
+ As a linear gradient approaches the degenerate case, its shader will approach the appearance of
+ two half planes, each filled by the first and last colors of the gradient. The planes will be
+ oriented perpendicular to the vector between the two defining points of the gradient. However,
+ once they become the same point, Skia cannot reconstruct what that expected orientation is. To
+ provide a stable and predictable color in this case, Skia just uses the last color as a solid
+ fill to be similar to many of the other degenerate gradients' behaviors in clamp mode.
+*/
+class SK_API SkGradientShader {
+public:
+ enum Flags {
+ /** By default gradients will interpolate their colors in unpremul space
+ * and then premultiply each of the results. By setting this flag, the
+ * gradients will premultiply their colors first, and then interpolate
+ * between them.
+ * example: https://fiddle.skia.org/c/@GradientShader_MakeLinear
+ */
+ kInterpolateColorsInPremul_Flag = 1 << 0,
+ };
+
+ struct Interpolation {
+ enum class InPremul : bool { kNo = false, kYes = true };
+
+ enum class ColorSpace : uint8_t {
+ // Default Skia behavior: interpolate in the color space of the destination surface
+ kDestination,
+
+ // https://www.w3.org/TR/css-color-4/#interpolation-space
+ kSRGBLinear,
+ kLab,
+ kOKLab,
+ kLCH,
+ kOKLCH,
+ kSRGB,
+ kHSL,
+ kHWB,
+
+ kLastColorSpace = kHWB,
+ };
+ static constexpr int kColorSpaceCount = static_cast<int>(ColorSpace::kLastColorSpace) + 1;
+
+ enum class HueMethod : uint8_t {
+ // https://www.w3.org/TR/css-color-4/#hue-interpolation
+ kShorter,
+ kLonger,
+ kIncreasing,
+ kDecreasing,
+
+ kLastHueMethod = kDecreasing,
+ };
+ static constexpr int kHueMethodCount = static_cast<int>(HueMethod::kLastHueMethod) + 1;
+
+ InPremul fInPremul = InPremul::kNo;
+
+ /*
+ * NOTE: Do not use fColorSpace or fHueMethod (yet). These features are in development and
+ * incomplete. This comment (and RELEASE_NOTES.txt) will be updated once the features are
+ * ready to be used.
+ */
+ ColorSpace fColorSpace = ColorSpace::kDestination;
+ HueMethod fHueMethod = HueMethod::kShorter; // Only relevant for LCH, OKLCH, HSL, or HWB
+
+ static Interpolation FromFlags(uint32_t flags) {
+ return {flags & kInterpolateColorsInPremul_Flag ? InPremul::kYes : InPremul::kNo,
+ ColorSpace::kDestination,
+ HueMethod::kShorter};
+ }
+ };
+
+ /** Returns a shader that generates a linear gradient between the two specified points.
+ <p />
+ @param pts The start and end points for the gradient.
+ @param colors The array[count] of colors, to be distributed between the two points
+ @param pos May be NULL. array[count] of SkScalars, or NULL, of the relative position of
+ each corresponding color in the colors array. If this is NULL,
+ the the colors are distributed evenly between the start and end point.
+ If this is not null, the values must lie between 0.0 and 1.0, and be
+ strictly increasing. If the first value is not 0.0, then an additional
+ color stop is added at position 0.0, with the same color as colors[0].
+ If the the last value is not 1.0, then an additional color stop is added
+ at position 1.0, with the same color as colors[count - 1].
+ @param count Must be >=2. The number of colors (and pos if not NULL) entries.
+ @param mode The tiling mode
+
+ example: https://fiddle.skia.org/c/@GradientShader_MakeLinear
+ */
+ static sk_sp<SkShader> MakeLinear(const SkPoint pts[2],
+ const SkColor colors[], const SkScalar pos[], int count,
+ SkTileMode mode,
+ uint32_t flags = 0, const SkMatrix* localMatrix = nullptr);
+
+ /** Returns a shader that generates a linear gradient between the two specified points.
+ <p />
+ @param pts The start and end points for the gradient.
+ @param colors The array[count] of colors, to be distributed between the two points
+ @param pos May be NULL. array[count] of SkScalars, or NULL, of the relative position of
+ each corresponding color in the colors array. If this is NULL,
+ the the colors are distributed evenly between the start and end point.
+ If this is not null, the values must lie between 0.0 and 1.0, and be
+ strictly increasing. If the first value is not 0.0, then an additional
+ color stop is added at position 0.0, with the same color as colors[0].
+ If the the last value is not 1.0, then an additional color stop is added
+ at position 1.0, with the same color as colors[count - 1].
+ @param count Must be >=2. The number of colors (and pos if not NULL) entries.
+ @param mode The tiling mode
+
+ example: https://fiddle.skia.org/c/@GradientShader_MakeLinear
+ */
+ static sk_sp<SkShader> MakeLinear(const SkPoint pts[2],
+ const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int count, SkTileMode mode,
+ const Interpolation& interpolation,
+ const SkMatrix* localMatrix);
+ static sk_sp<SkShader> MakeLinear(const SkPoint pts[2],
+ const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int count, SkTileMode mode,
+ uint32_t flags = 0, const SkMatrix* localMatrix = nullptr) {
+ return MakeLinear(pts, colors, std::move(colorSpace), pos, count, mode,
+ Interpolation::FromFlags(flags), localMatrix);
+ }
+
+ /** Returns a shader that generates a radial gradient given the center and radius.
+ <p />
+ @param center The center of the circle for this gradient
+ @param radius Must be positive. The radius of the circle for this gradient
+ @param colors The array[count] of colors, to be distributed between the center and edge of the circle
+ @param pos May be NULL. The array[count] of SkScalars, or NULL, of the relative position of
+ each corresponding color in the colors array. If this is NULL,
+ the the colors are distributed evenly between the center and edge of the circle.
+ If this is not null, the values must lie between 0.0 and 1.0, and be
+ strictly increasing. If the first value is not 0.0, then an additional
+ color stop is added at position 0.0, with the same color as colors[0].
+ If the the last value is not 1.0, then an additional color stop is added
+ at position 1.0, with the same color as colors[count - 1].
+ @param count Must be >= 2. The number of colors (and pos if not NULL) entries
+ @param mode The tiling mode
+ */
+ static sk_sp<SkShader> MakeRadial(const SkPoint& center, SkScalar radius,
+ const SkColor colors[], const SkScalar pos[], int count,
+ SkTileMode mode,
+ uint32_t flags = 0, const SkMatrix* localMatrix = nullptr);
+
+ /** Returns a shader that generates a radial gradient given the center and radius.
+ <p />
+ @param center The center of the circle for this gradient
+ @param radius Must be positive. The radius of the circle for this gradient
+ @param colors The array[count] of colors, to be distributed between the center and edge of the circle
+ @param pos May be NULL. The array[count] of SkScalars, or NULL, of the relative position of
+ each corresponding color in the colors array. If this is NULL,
+ the the colors are distributed evenly between the center and edge of the circle.
+ If this is not null, the values must lie between 0.0 and 1.0, and be
+ strictly increasing. If the first value is not 0.0, then an additional
+ color stop is added at position 0.0, with the same color as colors[0].
+ If the the last value is not 1.0, then an additional color stop is added
+ at position 1.0, with the same color as colors[count - 1].
+ @param count Must be >= 2. The number of colors (and pos if not NULL) entries
+ @param mode The tiling mode
+ */
+ static sk_sp<SkShader> MakeRadial(const SkPoint& center, SkScalar radius,
+ const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int count, SkTileMode mode,
+ const Interpolation& interpolation,
+ const SkMatrix* localMatrix);
+ static sk_sp<SkShader> MakeRadial(const SkPoint& center, SkScalar radius,
+ const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int count, SkTileMode mode,
+ uint32_t flags = 0, const SkMatrix* localMatrix = nullptr) {
+ return MakeRadial(center, radius, colors, std::move(colorSpace), pos, count, mode,
+ Interpolation::FromFlags(flags), localMatrix);
+ }
+
+ /**
+ * Returns a shader that generates a conical gradient given two circles, or
+ * returns NULL if the inputs are invalid. The gradient interprets the
+ * two circles according to the following HTML spec.
+ * http://dev.w3.org/html5/2dcontext/#dom-context-2d-createradialgradient
+ */
+ static sk_sp<SkShader> MakeTwoPointConical(const SkPoint& start, SkScalar startRadius,
+ const SkPoint& end, SkScalar endRadius,
+ const SkColor colors[], const SkScalar pos[],
+ int count, SkTileMode mode,
+ uint32_t flags = 0,
+ const SkMatrix* localMatrix = nullptr);
+
+ /**
+ * Returns a shader that generates a conical gradient given two circles, or
+ * returns NULL if the inputs are invalid. The gradient interprets the
+ * two circles according to the following HTML spec.
+ * http://dev.w3.org/html5/2dcontext/#dom-context-2d-createradialgradient
+ */
+ static sk_sp<SkShader> MakeTwoPointConical(const SkPoint& start, SkScalar startRadius,
+ const SkPoint& end, SkScalar endRadius,
+ const SkColor4f colors[],
+ sk_sp<SkColorSpace> colorSpace, const SkScalar pos[],
+ int count, SkTileMode mode,
+ const Interpolation& interpolation,
+ const SkMatrix* localMatrix);
+ static sk_sp<SkShader> MakeTwoPointConical(const SkPoint& start, SkScalar startRadius,
+ const SkPoint& end, SkScalar endRadius,
+ const SkColor4f colors[],
+ sk_sp<SkColorSpace> colorSpace, const SkScalar pos[],
+ int count, SkTileMode mode,
+ uint32_t flags = 0,
+ const SkMatrix* localMatrix = nullptr) {
+ return MakeTwoPointConical(start, startRadius, end, endRadius, colors,
+ std::move(colorSpace), pos, count, mode,
+ Interpolation::FromFlags(flags), localMatrix);
+ }
+
+ /** Returns a shader that generates a sweep gradient given a center.
+
+ The shader accepts negative angles and angles larger than 360, draws
+ between 0 and 360 degrees, similar to the CSS conic-gradient
+ semantics. 0 degrees means horizontal positive x axis. The start angle
+ must be less than the end angle, otherwise a null pointer is
+ returned. If color stops do not contain 0 and 1 but are within this
+ range, the respective outer color stop is repeated for 0 and 1. Color
+ stops less than 0 are clamped to 0, and greater than 1 are clamped to 1.
+ <p />
+ @param cx The X coordinate of the center of the sweep
+ @param cx The Y coordinate of the center of the sweep
+ @param colors The array[count] of colors, to be distributed around the center, within
+ the gradient angle range.
+ @param pos May be NULL. The array[count] of SkScalars, or NULL, of the relative
+ position of each corresponding color in the colors array. If this is
+ NULL, then the colors are distributed evenly within the angular range.
+ If this is not null, the values must lie between 0.0 and 1.0, and be
+ strictly increasing. If the first value is not 0.0, then an additional
+ color stop is added at position 0.0, with the same color as colors[0].
+ If the the last value is not 1.0, then an additional color stop is added
+ at position 1.0, with the same color as colors[count - 1].
+ @param count Must be >= 2. The number of colors (and pos if not NULL) entries
+ @param mode Tiling mode: controls drawing outside of the gradient angular range.
+ @param startAngle Start of the angular range, corresponding to pos == 0.
+ @param endAngle End of the angular range, corresponding to pos == 1.
+ */
+ static sk_sp<SkShader> MakeSweep(SkScalar cx, SkScalar cy,
+ const SkColor colors[], const SkScalar pos[], int count,
+ SkTileMode mode,
+ SkScalar startAngle, SkScalar endAngle,
+ uint32_t flags, const SkMatrix* localMatrix);
+ static sk_sp<SkShader> MakeSweep(SkScalar cx, SkScalar cy,
+ const SkColor colors[], const SkScalar pos[], int count,
+ uint32_t flags = 0, const SkMatrix* localMatrix = nullptr) {
+ return MakeSweep(cx, cy, colors, pos, count, SkTileMode::kClamp, 0, 360, flags,
+ localMatrix);
+ }
+
+ /** Returns a shader that generates a sweep gradient given a center.
+
+ The shader accepts negative angles and angles larger than 360, draws
+ between 0 and 360 degrees, similar to the CSS conic-gradient
+ semantics. 0 degrees means horizontal positive x axis. The start angle
+ must be less than the end angle, otherwise a null pointer is
+ returned. If color stops do not contain 0 and 1 but are within this
+ range, the respective outer color stop is repeated for 0 and 1. Color
+ stops less than 0 are clamped to 0, and greater than 1 are clamped to 1.
+ <p />
+ @param cx The X coordinate of the center of the sweep
+ @param cx The Y coordinate of the center of the sweep
+ @param colors The array[count] of colors, to be distributed around the center, within
+ the gradient angle range.
+ @param pos May be NULL. The array[count] of SkScalars, or NULL, of the relative
+ position of each corresponding color in the colors array. If this is
+ NULL, then the colors are distributed evenly within the angular range.
+ If this is not null, the values must lie between 0.0 and 1.0, and be
+ strictly increasing. If the first value is not 0.0, then an additional
+ color stop is added at position 0.0, with the same color as colors[0].
+ If the the last value is not 1.0, then an additional color stop is added
+ at position 1.0, with the same color as colors[count - 1].
+ @param count Must be >= 2. The number of colors (and pos if not NULL) entries
+ @param mode Tiling mode: controls drawing outside of the gradient angular range.
+ @param startAngle Start of the angular range, corresponding to pos == 0.
+ @param endAngle End of the angular range, corresponding to pos == 1.
+ */
+ static sk_sp<SkShader> MakeSweep(SkScalar cx, SkScalar cy,
+ const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int count,
+ SkTileMode mode,
+ SkScalar startAngle, SkScalar endAngle,
+ const Interpolation& interpolation,
+ const SkMatrix* localMatrix);
+ static sk_sp<SkShader> MakeSweep(SkScalar cx, SkScalar cy,
+ const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int count,
+ SkTileMode mode,
+ SkScalar startAngle, SkScalar endAngle,
+ uint32_t flags, const SkMatrix* localMatrix) {
+ return MakeSweep(cx, cy, colors, std::move(colorSpace), pos, count, mode, startAngle,
+ endAngle, Interpolation::FromFlags(flags), localMatrix);
+ }
+ static sk_sp<SkShader> MakeSweep(SkScalar cx, SkScalar cy,
+ const SkColor4f colors[], sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[], int count,
+ uint32_t flags = 0, const SkMatrix* localMatrix = nullptr) {
+ return MakeSweep(cx, cy, colors, std::move(colorSpace), pos, count, SkTileMode::kClamp,
+ 0, 360, flags, localMatrix);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkHighContrastFilter.h b/gfx/skia/skia/include/effects/SkHighContrastFilter.h
new file mode 100644
index 0000000000..1224ade5e4
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkHighContrastFilter.h
@@ -0,0 +1,84 @@
+/*
+* Copyright 2017 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef SkHighContrastFilter_DEFINED
+#define SkHighContrastFilter_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+class SkColorFilter;
+
+/**
+ * Configuration struct for SkHighContrastFilter.
+ *
+ * Provides transformations to improve contrast for users with low vision.
+ */
+struct SkHighContrastConfig {
+ enum class InvertStyle {
+ kNoInvert,
+ kInvertBrightness,
+ kInvertLightness,
+
+ kLast = kInvertLightness
+ };
+
+ SkHighContrastConfig() {
+ fGrayscale = false;
+ fInvertStyle = InvertStyle::kNoInvert;
+ fContrast = 0.0f;
+ }
+
+ SkHighContrastConfig(bool grayscale,
+ InvertStyle invertStyle,
+ SkScalar contrast)
+ : fGrayscale(grayscale),
+ fInvertStyle(invertStyle),
+ fContrast(contrast) {}
+
+ // Returns true if all of the fields are set within the valid range.
+ bool isValid() const {
+ return fInvertStyle >= InvertStyle::kNoInvert &&
+ fInvertStyle <= InvertStyle::kInvertLightness &&
+ fContrast >= -1.0 &&
+ fContrast <= 1.0;
+ }
+
+ // If true, the color will be converted to grayscale.
+ bool fGrayscale;
+
+ // Whether to invert brightness, lightness, or neither.
+ InvertStyle fInvertStyle;
+
+ // After grayscale and inverting, the contrast can be adjusted linearly.
+ // The valid range is -1.0 through 1.0, where 0.0 is no adjustment.
+ SkScalar fContrast;
+};
+
+/**
+ * Color filter that provides transformations to improve contrast
+ * for users with low vision.
+ *
+ * Applies the following transformations in this order. Each of these
+ * can be configured using SkHighContrastConfig.
+ *
+ * - Conversion to grayscale
+ * - Color inversion (either in RGB or HSL space)
+ * - Increasing the resulting contrast.
+ *
+ * Calling SkHighContrastFilter::Make will return nullptr if the config is
+ * not valid, e.g. if you try to call it with a contrast outside the range of
+ * -1.0 to 1.0.
+ */
+
+struct SK_API SkHighContrastFilter {
+ // Returns the filter, or nullptr if the config is invalid.
+ static sk_sp<SkColorFilter> Make(const SkHighContrastConfig& config);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkImageFilters.h b/gfx/skia/skia/include/effects/SkImageFilters.h
new file mode 100644
index 0000000000..75d2cb0bcc
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkImageFilters.h
@@ -0,0 +1,541 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageFilters_DEFINED
+#define SkImageFilters_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkTileMode.h"
+#include "include/core/SkTypes.h"
+#include "include/effects/SkRuntimeEffect.h"
+
+#include <cstddef>
+
+class SkBlender;
+class SkColorFilter;
+class SkPaint;
+class SkRegion;
+
+namespace skif {
+ static constexpr SkRect kNoCropRect = {SK_ScalarNegativeInfinity, SK_ScalarNegativeInfinity,
+ SK_ScalarInfinity, SK_ScalarInfinity};
+}
+
+// A set of factory functions providing useful SkImageFilter effects. For image filters that take an
+// input filter, providing nullptr means it will automatically use the dynamic source image. This
+// source depends on how the filter is applied, but is either the contents of a saved layer when
+// drawing with SkCanvas, or an explicit SkImage if using SkImage::makeWithFilter.
+class SK_API SkImageFilters {
+public:
+ // This is just a convenience type to allow passing SkIRects, SkRects, and optional pointers
+ // to those types as a crop rect for the image filter factories. It's not intended to be used
+ // directly.
+ struct CropRect {
+ CropRect() : fCropRect(skif::kNoCropRect) {}
+ // Intentionally not explicit so callers don't have to use this type but can use SkIRect or
+ // SkRect as desired.
+ CropRect(std::nullptr_t) : fCropRect(skif::kNoCropRect) {}
+ CropRect(const SkIRect& crop) : fCropRect(SkRect::Make(crop)) {}
+ CropRect(const SkRect& crop) : fCropRect(crop) {}
+ CropRect(const SkIRect* optionalCrop) : fCropRect(optionalCrop ? SkRect::Make(*optionalCrop)
+ : skif::kNoCropRect) {}
+ CropRect(const SkRect* optionalCrop) : fCropRect(optionalCrop ? *optionalCrop
+ : skif::kNoCropRect) {}
+
+ operator const SkRect*() const { return fCropRect == skif::kNoCropRect ? nullptr : &fCropRect; }
+
+ SkRect fCropRect;
+ };
+
+ /**
+ * Create a filter that updates the alpha of the image based on 'region'. Pixels inside the
+ * region are made more opaque and pixels outside are made more transparent.
+ *
+ * Specifically, if a pixel is inside the region, its alpha will be set to
+ * max(innerMin, pixel's alpha). If a pixel is outside the region, its alpha will be updated to
+ * min(outerMax, pixel's alpha).
+ * @param region The geometric region controlling the inner and outer alpha thresholds.
+ * @param innerMin The minimum alpha value for pixels inside 'region'.
+ * @param outerMax The maximum alpha value for pixels outside of 'region'.
+ * @param input The input filter, or uses the source bitmap if this is null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> AlphaThreshold(const SkRegion& region, SkScalar innerMin,
+ SkScalar outerMax, sk_sp<SkImageFilter> input,
+ const CropRect& cropRect = {});
+
+ /**
+ * Create a filter that implements a custom blend mode. Each output pixel is the result of
+ * combining the corresponding background and foreground pixels using the 4 coefficients:
+ * k1 * foreground * background + k2 * foreground + k3 * background + k4
+ * @param k1, k2, k3, k4 The four coefficients used to combine the foreground and background.
+ * @param enforcePMColor If true, the RGB channels will be clamped to the calculated alpha.
+ * @param background The background content, using the source bitmap when this is null.
+ * @param foreground The foreground content, using the source bitmap when this is null.
+ * @param cropRect Optional rectangle that crops the inputs and output.
+ */
+ static sk_sp<SkImageFilter> Arithmetic(SkScalar k1, SkScalar k2, SkScalar k3, SkScalar k4,
+ bool enforcePMColor, sk_sp<SkImageFilter> background,
+ sk_sp<SkImageFilter> foreground,
+ const CropRect& cropRect = {});
+
+ /**
+ * This filter takes an SkBlendMode and uses it to composite the two filters together.
+ * @param mode The blend mode that defines the compositing operation
+ * @param background The Dst pixels used in blending, if null the source bitmap is used.
+ * @param foreground The Src pixels used in blending, if null the source bitmap is used.
+ * @cropRect Optional rectangle to crop input and output.
+ */
+ static sk_sp<SkImageFilter> Blend(SkBlendMode mode, sk_sp<SkImageFilter> background,
+ sk_sp<SkImageFilter> foreground = nullptr,
+ const CropRect& cropRect = {});
+
+ /**
+ * This filter takes an SkBlendMode and uses it to composite the two filters together.
+ * @param blender The blender that defines the compositing operation
+ * @param background The Dst pixels used in blending, if null the source bitmap is used.
+ * @param foreground The Src pixels used in blending, if null the source bitmap is used.
+ * @cropRect Optional rectangle to crop input and output.
+ */
+ static sk_sp<SkImageFilter> Blend(sk_sp<SkBlender> blender, sk_sp<SkImageFilter> background,
+ sk_sp<SkImageFilter> foreground = nullptr,
+ const CropRect& cropRect = {});
+
+ /**
+ * Create a filter that blurs its input by the separate X and Y sigmas. The provided tile mode
+ * is used when the blur kernel goes outside the input image.
+ * @param sigmaX The Gaussian sigma value for blurring along the X axis.
+ * @param sigmaY The Gaussian sigma value for blurring along the Y axis.
+ * @param tileMode The tile mode applied at edges .
+ * TODO (michaelludwig) - kMirror is not supported yet
+ * @param input The input filter that is blurred, uses source bitmap if this is null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> Blur(SkScalar sigmaX, SkScalar sigmaY, SkTileMode tileMode,
+ sk_sp<SkImageFilter> input, const CropRect& cropRect = {});
+ // As above, but defaults to the decal tile mode.
+ static sk_sp<SkImageFilter> Blur(SkScalar sigmaX, SkScalar sigmaY, sk_sp<SkImageFilter> input,
+ const CropRect& cropRect = {}) {
+ return Blur(sigmaX, sigmaY, SkTileMode::kDecal, std::move(input), cropRect);
+ }
+
+ /**
+ * Create a filter that applies the color filter to the input filter results.
+ * @param cf The color filter that transforms the input image.
+ * @param input The input filter, or uses the source bitmap if this is null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> ColorFilter(sk_sp<SkColorFilter> cf, sk_sp<SkImageFilter> input,
+ const CropRect& cropRect = {});
+
+ /**
+ * Create a filter that composes 'inner' with 'outer', such that the results of 'inner' are
+ * treated as the source bitmap passed to 'outer', i.e. result = outer(inner(source)).
+ * @param outer The outer filter that evaluates the results of inner.
+ * @param inner The inner filter that produces the input to outer.
+ */
+ static sk_sp<SkImageFilter> Compose(sk_sp<SkImageFilter> outer, sk_sp<SkImageFilter> inner);
+
+ /**
+ * Create a filter that moves each pixel in its color input based on an (x,y) vector encoded
+ * in its displacement input filter. Two color components of the displacement image are
+ * mapped into a vector as scale * (color[xChannel], color[yChannel]), where the channel
+ * selectors are one of R, G, B, or A.
+ * @param xChannelSelector RGBA channel that encodes the x displacement per pixel.
+ * @param yChannelSelector RGBA channel that encodes the y displacement per pixel.
+ * @param scale Scale applied to displacement extracted from image.
+ * @param displacement The filter defining the displacement image, or null to use source.
+ * @param color The filter providing the color pixels to be displaced. If null,
+ * it will use the source.
+ * @param cropRect Optional rectangle that crops the color input and output.
+ */
+ static sk_sp<SkImageFilter> DisplacementMap(SkColorChannel xChannelSelector,
+ SkColorChannel yChannelSelector,
+ SkScalar scale, sk_sp<SkImageFilter> displacement,
+ sk_sp<SkImageFilter> color,
+ const CropRect& cropRect = {});
+
+ /**
+ * Create a filter that draws a drop shadow under the input content. This filter produces an
+ * image that includes the inputs' content.
+ * @param dx The X offset of the shadow.
+ * @param dy The Y offset of the shadow.
+ * @param sigmaX The blur radius for the shadow, along the X axis.
+ * @param sigmaY The blur radius for the shadow, along the Y axis.
+ * @param color The color of the drop shadow.
+ * @param input The input filter, or will use the source bitmap if this is null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> DropShadow(SkScalar dx, SkScalar dy,
+ SkScalar sigmaX, SkScalar sigmaY,
+ SkColor color, sk_sp<SkImageFilter> input,
+ const CropRect& cropRect = {});
+ /**
+ * Create a filter that renders a drop shadow, in exactly the same manner as ::DropShadow,
+ * except that the resulting image does not include the input content. This allows the shadow
+ * and input to be composed by a filter DAG in a more flexible manner.
+ * @param dx The X offset of the shadow.
+ * @param dy The Y offset of the shadow.
+ * @param sigmaX The blur radius for the shadow, along the X axis.
+ * @param sigmaY The blur radius for the shadow, along the Y axis.
+ * @param color The color of the drop shadow.
+ * @param input The input filter, or will use the source bitmap if this is null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> DropShadowOnly(SkScalar dx, SkScalar dy,
+ SkScalar sigmaX, SkScalar sigmaY,
+ SkColor color, sk_sp<SkImageFilter> input,
+ const CropRect& cropRect = {});
+
+ /**
+ * Create a filter that draws the 'srcRect' portion of image into 'dstRect' using the given
+ * filter quality. Similar to SkCanvas::drawImageRect. Returns null if 'image' is null.
+ * @param image The image that is output by the filter, subset by 'srcRect'.
+ * @param srcRect The source pixels sampled into 'dstRect'
+ * @param dstRect The local rectangle to draw the image into.
+ * @param sampling The sampling to use when drawing the image.
+ */
+ static sk_sp<SkImageFilter> Image(sk_sp<SkImage> image, const SkRect& srcRect,
+ const SkRect& dstRect, const SkSamplingOptions& sampling);
+
+ /**
+ * Create a filter that draws the image using the given sampling.
+ * Similar to SkCanvas::drawImage. Returns null if 'image' is null.
+ * @param image The image that is output by the filter.
+ * @param sampling The sampling to use when drawing the image.
+ */
+ static sk_sp<SkImageFilter> Image(sk_sp<SkImage> image, const SkSamplingOptions& sampling) {
+ if (image) {
+ SkRect r = SkRect::Make(image->bounds());
+ return Image(std::move(image), r, r, sampling);
+ } else {
+ return nullptr;
+ }
+ }
+
+ /**
+ * Create a filter that draws the image using Mitchel cubic resampling.
+ * @param image The image that is output by the filter.
+ */
+ static sk_sp<SkImageFilter> Image(sk_sp<SkImage> image) {
+ return Image(std::move(image), SkSamplingOptions({1/3.0f, 1/3.0f}));
+ }
+
+ /**
+ * Create a filter that mimics a zoom/magnifying lens effect.
+ * @param srcRect
+ * @param inset
+ * @param input The input filter that is magnified, if null the source bitmap is used.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> Magnifier(const SkRect& srcRect, SkScalar inset,
+ sk_sp<SkImageFilter> input,
+ const CropRect& cropRect = {});
+
+ /**
+ * Create a filter that applies an NxM image processing kernel to the input image. This can be
+ * used to produce effects such as sharpening, blurring, edge detection, etc.
+ * @param kernelSize The kernel size in pixels, in each dimension (N by M).
+ * @param kernel The image processing kernel. Must contain N * M elements, in row order.
+ * @param gain A scale factor applied to each pixel after convolution. This can be
+ * used to normalize the kernel, if it does not already sum to 1.
+ * @param bias A bias factor added to each pixel after convolution.
+ * @param kernelOffset An offset applied to each pixel coordinate before convolution.
+ * This can be used to center the kernel over the image
+ * (e.g., a 3x3 kernel should have an offset of {1, 1}).
+ * @param tileMode How accesses outside the image are treated.
+ * TODO (michaelludwig) - kMirror is not supported yet
+ * @param convolveAlpha If true, all channels are convolved. If false, only the RGB channels
+ * are convolved, and alpha is copied from the source image.
+ * @param input The input image filter, if null the source bitmap is used instead.
+ * @param cropRect Optional rectangle to which the output processing will be limited.
+ */
+ static sk_sp<SkImageFilter> MatrixConvolution(const SkISize& kernelSize,
+ const SkScalar kernel[], SkScalar gain,
+ SkScalar bias, const SkIPoint& kernelOffset,
+ SkTileMode tileMode, bool convolveAlpha,
+ sk_sp<SkImageFilter> input,
+ const CropRect& cropRect = {});
+
+ /**
+ * Create a filter that transforms the input image by 'matrix'. This matrix transforms the
+ * local space, which means it effectively happens prior to any transformation coming from the
+ * SkCanvas initiating the filtering.
+ * @param matrix The matrix to apply to the original content.
+ * @param sampling How the image will be sampled when it is transformed
+ * @param input The image filter to transform, or null to use the source image.
+ */
+ static sk_sp<SkImageFilter> MatrixTransform(const SkMatrix& matrix,
+ const SkSamplingOptions& sampling,
+ sk_sp<SkImageFilter> input);
+
+ /**
+ * Create a filter that merges the 'count' filters together by drawing their results in order
+ * with src-over blending.
+ * @param filters The input filter array to merge, which must have 'count' elements. Any null
+ * filter pointers will use the source bitmap instead.
+ * @param count The number of input filters to be merged.
+ * @param cropRect Optional rectangle that crops all input filters and the output.
+ */
+ static sk_sp<SkImageFilter> Merge(sk_sp<SkImageFilter>* const filters, int count,
+ const CropRect& cropRect = {});
+ /**
+ * Create a filter that merges the results of the two filters together with src-over blending.
+ * @param first The first input filter, or the source bitmap if this is null.
+ * @param second The second input filter, or the source bitmap if this null.
+ * @param cropRect Optional rectangle that crops the inputs and output.
+ */
+ static sk_sp<SkImageFilter> Merge(sk_sp<SkImageFilter> first, sk_sp<SkImageFilter> second,
+ const CropRect& cropRect = {}) {
+ sk_sp<SkImageFilter> array[] = { std::move(first), std::move(second) };
+ return Merge(array, 2, cropRect);
+ }
+
+ /**
+ * Create a filter that offsets the input filter by the given vector.
+ * @param dx The x offset in local space that the image is shifted.
+ * @param dy The y offset in local space that the image is shifted.
+ * @param input The input that will be moved, if null the source bitmap is used instead.
+ * @param cropRect Optional rectangle to crop the input and output.
+ */
+ static sk_sp<SkImageFilter> Offset(SkScalar dx, SkScalar dy, sk_sp<SkImageFilter> input,
+ const CropRect& cropRect = {});
+
+ /**
+ * Create a filter that produces the SkPicture as its output, drawn into targetRect. Note that
+ * the targetRect is not the same as the SkIRect cropRect that many filters accept. Returns
+ * null if 'pic' is null.
+ * @param pic The picture that is drawn for the filter output.
+ * @param targetRect The drawing region for the picture.
+ */
+ static sk_sp<SkImageFilter> Picture(sk_sp<SkPicture> pic, const SkRect& targetRect);
+ // As above, but uses SkPicture::cullRect for the drawing region.
+ static sk_sp<SkImageFilter> Picture(sk_sp<SkPicture> pic) {
+ SkRect target = pic ? pic->cullRect() : SkRect::MakeEmpty();
+ return Picture(std::move(pic), target);
+ }
+
+#ifdef SK_ENABLE_SKSL
+ /**
+ * Create a filter that fills the output with the per-pixel evaluation of the SkShader produced
+ * by the SkRuntimeShaderBuilder. The shader is defined in the image filter's local coordinate
+ * system, so it will automatically be affected by SkCanvas' transform.
+ *
+ * @param builder The builder used to produce the runtime shader, that will in turn
+ * fill the result image
+ * @param childShaderName The name of the child shader defined in the builder that will be
+ * bound to the input param (or the source image if the input param
+ * is null). If empty, the builder can have exactly one child shader,
+ * which automatically binds the input param.
+ * @param input The image filter that will be provided as input to the runtime
+ * shader. If null the implicit source image is used instead
+ */
+ static sk_sp<SkImageFilter> RuntimeShader(const SkRuntimeShaderBuilder& builder,
+ std::string_view childShaderName,
+ sk_sp<SkImageFilter> input);
+
+ /**
+ * Create a filter that fills the output with the per-pixel evaluation of the SkShader produced
+ * by the SkRuntimeShaderBuilder. The shader is defined in the image filter's local coordinate
+ * system, so it will automatically be affected by SkCanvas' transform.
+ *
+ * @param builder The builder used to produce the runtime shader, that will in turn
+ * fill the result image
+ * @param childShaderNames The names of the child shaders defined in the builder that will be
+ * bound to the input params (or the source image if the input param
+ * is null). If any name is null, or appears more than once, factory
+ * fails and returns nullptr.
+ * @param inputs The image filters that will be provided as input to the runtime
+ * shader. If any are null, the implicit source image is used instead.
+ * @param inputCount How many entries are present in 'childShaderNames' and 'inputs'.
+ */
+ static sk_sp<SkImageFilter> RuntimeShader(const SkRuntimeShaderBuilder& builder,
+ std::string_view childShaderNames[],
+ const sk_sp<SkImageFilter> inputs[],
+ int inputCount);
+#endif // SK_ENABLE_SKSL
+
+ enum class Dither : bool {
+ kNo = false,
+ kYes = true
+ };
+
+ /**
+ * Create a filter that fills the output with the per-pixel evaluation of the SkShader. The
+ * shader is defined in the image filter's local coordinate system, so will automatically
+ * be affected by SkCanvas' transform.
+ *
+ * Like Image() and Picture(), this is a leaf filter that can be used to introduce inputs to
+ * a complex filter graph, but should generally be combined with a filter that as at least
+ * one null input to use the implicit source image.
+ * @param shader The shader that fills the result image
+ */
+ static sk_sp<SkImageFilter> Shader(sk_sp<SkShader> shader, const CropRect& cropRect = {}) {
+ return Shader(std::move(shader), Dither::kNo, cropRect);
+ }
+ static sk_sp<SkImageFilter> Shader(sk_sp<SkShader> shader, Dither dither,
+ const CropRect& cropRect = {});
+
+ /**
+ * Create a tile image filter.
+ * @param src Defines the pixels to tile
+ * @param dst Defines the pixel region that the tiles will be drawn to
+ * @param input The input that will be tiled, if null the source bitmap is used instead.
+ */
+ static sk_sp<SkImageFilter> Tile(const SkRect& src, const SkRect& dst,
+ sk_sp<SkImageFilter> input);
+
+ // Morphology filter effects
+
+ /**
+ * Create a filter that dilates each input pixel's channel values to the max value within the
+ * given radii along the x and y axes.
+ * @param radiusX The distance to dilate along the x axis to either side of each pixel.
+ * @param radiusY The distance to dilate along the y axis to either side of each pixel.
+ * @param input The image filter that is dilated, using source bitmap if this is null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> Dilate(SkScalar radiusX, SkScalar radiusY,
+ sk_sp<SkImageFilter> input,
+ const CropRect& cropRect = {});
+
+ /**
+ * Create a filter that erodes each input pixel's channel values to the minimum channel value
+ * within the given radii along the x and y axes.
+ * @param radiusX The distance to erode along the x axis to either side of each pixel.
+ * @param radiusY The distance to erode along the y axis to either side of each pixel.
+ * @param input The image filter that is eroded, using source bitmap if this is null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> Erode(SkScalar radiusX, SkScalar radiusY,
+ sk_sp<SkImageFilter> input,
+ const CropRect& cropRect = {});
+
+ // Lighting filter effects
+
+ /**
+ * Create a filter that calculates the diffuse illumination from a distant light source,
+ * interpreting the alpha channel of the input as the height profile of the surface (to
+ * approximate normal vectors).
+ * @param direction The direction to the distance light.
+ * @param lightColor The color of the diffuse light source.
+ * @param surfaceScale Scale factor to transform from alpha values to physical height.
+ * @param kd Diffuse reflectance coefficient.
+ * @param input The input filter that defines surface normals (as alpha), or uses the
+ * source bitmap when null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> DistantLitDiffuse(const SkPoint3& direction, SkColor lightColor,
+ SkScalar surfaceScale, SkScalar kd,
+ sk_sp<SkImageFilter> input,
+ const CropRect& cropRect = {});
+ /**
+ * Create a filter that calculates the diffuse illumination from a point light source, using
+ * alpha channel of the input as the height profile of the surface (to approximate normal
+ * vectors).
+ * @param location The location of the point light.
+ * @param lightColor The color of the diffuse light source.
+ * @param surfaceScale Scale factor to transform from alpha values to physical height.
+ * @param kd Diffuse reflectance coefficient.
+ * @param input The input filter that defines surface normals (as alpha), or uses the
+ * source bitmap when null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> PointLitDiffuse(const SkPoint3& location, SkColor lightColor,
+ SkScalar surfaceScale, SkScalar kd,
+ sk_sp<SkImageFilter> input,
+ const CropRect& cropRect = {});
+ /**
+ * Create a filter that calculates the diffuse illumination from a spot light source, using
+ * alpha channel of the input as the height profile of the surface (to approximate normal
+ * vectors). The spot light is restricted to be within 'cutoffAngle' of the vector between
+ * the location and target.
+ * @param location The location of the spot light.
+ * @param target The location that the spot light is point towards
+ * @param falloffExponent Exponential falloff parameter for illumination outside of cutoffAngle
+ * @param cutoffAngle Maximum angle from lighting direction that receives full light
+ * @param lightColor The color of the diffuse light source.
+ * @param surfaceScale Scale factor to transform from alpha values to physical height.
+ * @param kd Diffuse reflectance coefficient.
+ * @param input The input filter that defines surface normals (as alpha), or uses the
+ * source bitmap when null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> SpotLitDiffuse(const SkPoint3& location, const SkPoint3& target,
+ SkScalar falloffExponent, SkScalar cutoffAngle,
+ SkColor lightColor, SkScalar surfaceScale,
+ SkScalar kd, sk_sp<SkImageFilter> input,
+ const CropRect& cropRect = {});
+
+ /**
+ * Create a filter that calculates the specular illumination from a distant light source,
+ * interpreting the alpha channel of the input as the height profile of the surface (to
+ * approximate normal vectors).
+ * @param direction The direction to the distance light.
+ * @param lightColor The color of the specular light source.
+ * @param surfaceScale Scale factor to transform from alpha values to physical height.
+ * @param ks Specular reflectance coefficient.
+ * @param shininess The specular exponent determining how shiny the surface is.
+ * @param input The input filter that defines surface normals (as alpha), or uses the
+ * source bitmap when null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> DistantLitSpecular(const SkPoint3& direction, SkColor lightColor,
+ SkScalar surfaceScale, SkScalar ks,
+ SkScalar shininess, sk_sp<SkImageFilter> input,
+ const CropRect& cropRect = {});
+ /**
+ * Create a filter that calculates the specular illumination from a point light source, using
+ * alpha channel of the input as the height profile of the surface (to approximate normal
+ * vectors).
+ * @param location The location of the point light.
+ * @param lightColor The color of the specular light source.
+ * @param surfaceScale Scale factor to transform from alpha values to physical height.
+ * @param ks Specular reflectance coefficient.
+ * @param shininess The specular exponent determining how shiny the surface is.
+ * @param input The input filter that defines surface normals (as alpha), or uses the
+ * source bitmap when null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> PointLitSpecular(const SkPoint3& location, SkColor lightColor,
+ SkScalar surfaceScale, SkScalar ks,
+ SkScalar shininess, sk_sp<SkImageFilter> input,
+ const CropRect& cropRect = {});
+ /**
+ * Create a filter that calculates the specular illumination from a spot light source, using
+ * alpha channel of the input as the height profile of the surface (to approximate normal
+ * vectors). The spot light is restricted to be within 'cutoffAngle' of the vector between
+ * the location and target.
+ * @param location The location of the spot light.
+ * @param target The location that the spot light is point towards
+ * @param falloffExponent Exponential falloff parameter for illumination outside of cutoffAngle
+ * @param cutoffAngle Maximum angle from lighting direction that receives full light
+ * @param lightColor The color of the specular light source.
+ * @param surfaceScale Scale factor to transform from alpha values to physical height.
+ * @param ks Specular reflectance coefficient.
+ * @param shininess The specular exponent determining how shiny the surface is.
+ * @param input The input filter that defines surface normals (as alpha), or uses the
+ * source bitmap when null.
+ * @param cropRect Optional rectangle that crops the input and output.
+ */
+ static sk_sp<SkImageFilter> SpotLitSpecular(const SkPoint3& location, const SkPoint3& target,
+ SkScalar falloffExponent, SkScalar cutoffAngle,
+ SkColor lightColor, SkScalar surfaceScale,
+ SkScalar ks, SkScalar shininess,
+ sk_sp<SkImageFilter> input,
+ const CropRect& cropRect = {});
+
+private:
+ SkImageFilters() = delete;
+};
+
+#endif // SkImageFilters_DEFINED
diff --git a/gfx/skia/skia/include/effects/SkLayerDrawLooper.h b/gfx/skia/skia/include/effects/SkLayerDrawLooper.h
new file mode 100644
index 0000000000..1e875b58cc
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkLayerDrawLooper.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLayerDrawLooper_DEFINED
+#define SkLayerDrawLooper_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkDrawLooper.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPoint.h"
+
+#ifndef SK_SUPPORT_LEGACY_DRAWLOOPER
+#error "SkDrawLooper is unsupported"
+#endif
+
+/**
+ * DEPRECATED: No longer supported by Skia.
+ */
+class SK_API SkLayerDrawLooper : public SkDrawLooper {
+public:
+ ~SkLayerDrawLooper() override;
+
+ /**
+ * Bits specifies which aspects of the layer's paint should replace the
+ * corresponding aspects on the draw's paint.
+ * kEntirePaint_Bits means use the layer's paint completely.
+ * 0 means ignore the layer's paint... except for fColorMode, which is
+ * always applied.
+ */
+ enum Bits {
+ kStyle_Bit = 1 << 0, //!< use this layer's Style/stroke settings
+ kPathEffect_Bit = 1 << 2, //!< use this layer's patheffect
+ kMaskFilter_Bit = 1 << 3, //!< use this layer's maskfilter
+ kShader_Bit = 1 << 4, //!< use this layer's shader
+ kColorFilter_Bit = 1 << 5, //!< use this layer's colorfilter
+ kXfermode_Bit = 1 << 6, //!< use this layer's xfermode
+
+ // unsupported kTextSkewX_Bit = 1 << 1,
+
+ /**
+ * Use the layer's paint entirely, with these exceptions:
+ * - We never override the draw's paint's text_encoding, since that is
+ * used to interpret the text/len parameters in draw[Pos]Text.
+ * - Color is always computed using the LayerInfo's fColorMode.
+ */
+ kEntirePaint_Bits = -1
+
+ };
+ typedef int32_t BitFlags;
+
+ /**
+ * Info for how to apply the layer's paint and offset.
+ *
+ * fColorMode controls how we compute the final color for the layer:
+ * The layer's paint's color is treated as the SRC
+ * The draw's paint's color is treated as the DST
+ * final-color = Mode(layers-color, draws-color);
+ * Any SkBlendMode will work. Two common choices are:
+ * kSrc: to use the layer's color, ignoring the draw's
+ * kDst: to just keep the draw's color, ignoring the layer's
+ */
+ struct SK_API LayerInfo {
+ BitFlags fPaintBits;
+ SkBlendMode fColorMode;
+ SkVector fOffset;
+ bool fPostTranslate; //!< applies to fOffset
+
+ /**
+ * Initial the LayerInfo. Defaults to settings that will draw the
+ * layer with no changes: e.g.
+ * fPaintBits == 0
+ * fColorMode == kDst_Mode
+ * fOffset == (0, 0)
+ */
+ LayerInfo();
+ };
+
+ SkDrawLooper::Context* makeContext(SkArenaAlloc*) const override;
+
+ bool asABlurShadow(BlurShadowRec* rec) const override;
+
+protected:
+ SkLayerDrawLooper();
+
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkLayerDrawLooper)
+
+ struct Rec {
+ Rec* fNext;
+ SkPaint fPaint;
+ LayerInfo fInfo;
+ };
+ Rec* fRecs;
+ int fCount;
+
+ // state-machine during the init/next cycle
+ class LayerDrawLooperContext : public SkDrawLooper::Context {
+ public:
+ explicit LayerDrawLooperContext(const SkLayerDrawLooper* looper);
+
+ protected:
+ bool next(Info*, SkPaint* paint) override;
+
+ private:
+ Rec* fCurrRec;
+
+ static void ApplyInfo(SkPaint* dst, const SkPaint& src, const LayerInfo&);
+ };
+
+ using INHERITED = SkDrawLooper;
+
+public:
+ class SK_API Builder {
+ public:
+ Builder();
+
+ ~Builder();
+
+ /**
+ * Call for each layer you want to add (from top to bottom).
+ * This returns a paint you can modify, but that ptr is only valid until
+ * the next call made to addLayer().
+ */
+ SkPaint* addLayer(const LayerInfo&);
+
+ /**
+ * This layer will draw with the original paint, at the specified offset
+ */
+ void addLayer(SkScalar dx, SkScalar dy);
+
+ /**
+ * This layer will with the original paint and no offset.
+ */
+ void addLayer() { this->addLayer(0, 0); }
+
+ /// Similar to addLayer, but adds a layer to the top.
+ SkPaint* addLayerOnTop(const LayerInfo&);
+
+ /**
+ * Pass list of layers on to newly built looper and return it. This will
+ * also reset the builder, so it can be used to build another looper.
+ */
+ sk_sp<SkDrawLooper> detach();
+
+ private:
+ Builder(const Builder&) = delete;
+ Builder& operator=(const Builder&) = delete;
+
+ Rec* fRecs;
+ Rec* fTopRec;
+ int fCount;
+ };
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkLumaColorFilter.h b/gfx/skia/skia/include/effects/SkLumaColorFilter.h
new file mode 100644
index 0000000000..41a9a45f3f
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkLumaColorFilter.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLumaColorFilter_DEFINED
+#define SkLumaColorFilter_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+class SkColorFilter;
+
+/**
+ * SkLumaColorFilter multiplies the luma of its input into the alpha channel,
+ * and sets the red, green, and blue channels to zero.
+ *
+ * SkLumaColorFilter(r,g,b,a) = {0,0,0, a * luma(r,g,b)}
+ *
+ * This is similar to a luminanceToAlpha feColorMatrix,
+ * but note how this filter folds in the previous alpha,
+ * something an feColorMatrix cannot do.
+ *
+ * feColorMatrix(luminanceToAlpha; r,g,b,a) = {0,0,0, luma(r,g,b)}
+ *
+ * (Despite its name, an feColorMatrix using luminanceToAlpha does
+ * actually compute luma, a dot-product of gamma-encoded color channels,
+ * not luminance, a dot-product of linear color channels. So at least
+ * SkLumaColorFilter and feColorMatrix+luminanceToAlpha agree there.)
+ */
+struct SK_API SkLumaColorFilter {
+ static sk_sp<SkColorFilter> Make();
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkOpPathEffect.h b/gfx/skia/skia/include/effects/SkOpPathEffect.h
new file mode 100644
index 0000000000..3c9110f0cc
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkOpPathEffect.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOpPathEffect_DEFINED
+#define SkOpPathEffect_DEFINED
+
+#include "include/core/SkPaint.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/pathops/SkPathOps.h"
+
+class SkMatrix;
+class SkPathEffect;
+
+class SK_API SkMergePathEffect {
+public:
+ /* Defers to two other patheffects, and then combines their outputs using the specified op.
+ * e.g.
+ * result = output_one op output_two
+ *
+ * If either one or two is nullptr, then the original path is passed through to the op.
+ */
+ static sk_sp<SkPathEffect> Make(sk_sp<SkPathEffect> one, sk_sp<SkPathEffect> two, SkPathOp op);
+};
+
+class SK_API SkMatrixPathEffect {
+public:
+ static sk_sp<SkPathEffect> MakeTranslate(SkScalar dx, SkScalar dy);
+ static sk_sp<SkPathEffect> Make(const SkMatrix&);
+};
+
+class SK_API SkStrokePathEffect {
+public:
+ static sk_sp<SkPathEffect> Make(SkScalar width, SkPaint::Join, SkPaint::Cap,
+ SkScalar miter = 4);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkOverdrawColorFilter.h b/gfx/skia/skia/include/effects/SkOverdrawColorFilter.h
new file mode 100644
index 0000000000..5f1642483a
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkOverdrawColorFilter.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColor.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+class SkColorFilter;
+
+#ifndef SkOverdrawColorFilter_DEFINED
+#define SkOverdrawColorFilter_DEFINED
+
+/**
+ * Uses the value in the src alpha channel to set the dst pixel.
+ * 0 -> colors[0]
+ * 1 -> colors[1]
+ * ...
+ * 5 (or larger) -> colors[5]
+ *
+ */
+class SK_API SkOverdrawColorFilter {
+public:
+ static constexpr int kNumColors = 6;
+
+ static sk_sp<SkColorFilter> MakeWithSkColors(const SkColor[kNumColors]);
+};
+
+#endif // SkOverdrawColorFilter_DEFINED
diff --git a/gfx/skia/skia/include/effects/SkPerlinNoiseShader.h b/gfx/skia/skia/include/effects/SkPerlinNoiseShader.h
new file mode 100644
index 0000000000..f94b3420fc
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkPerlinNoiseShader.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPerlinNoiseShader_DEFINED
+#define SkPerlinNoiseShader_DEFINED
+
+#include "include/core/SkShader.h"
+
+/** \class SkPerlinNoiseShader
+
+ SkPerlinNoiseShader creates an image using the Perlin turbulence function.
+
+ It can produce tileable noise if asked to stitch tiles and provided a tile size.
+ In order to fill a large area with repeating noise, set the stitchTiles flag to
+ true, and render exactly a single tile of noise. Without this flag, the result
+ will contain visible seams between tiles.
+
+ The algorithm used is described here :
+ http://www.w3.org/TR/SVG/filters.html#feTurbulenceElement
+*/
+class SK_API SkPerlinNoiseShader {
+public:
+ /**
+ * This will construct Perlin noise of the given type (Fractal Noise or Turbulence).
+ *
+ * Both base frequencies (X and Y) have a usual range of (0..1) and must be non-negative.
+ *
+ * The number of octaves provided should be fairly small, with a limit of 255 enforced.
+ * Each octave doubles the frequency, so 10 octaves would produce noise from
+ * baseFrequency * 1, * 2, * 4, ..., * 512, which quickly yields insignificantly small
+ * periods and resembles regular unstructured noise rather than Perlin noise.
+ *
+ * If tileSize isn't NULL or an empty size, the tileSize parameter will be used to modify
+ * the frequencies so that the noise will be tileable for the given tile size. If tileSize
+ * is NULL or an empty size, the frequencies will be used as is without modification.
+ */
+ static sk_sp<SkShader> MakeFractalNoise(SkScalar baseFrequencyX, SkScalar baseFrequencyY,
+ int numOctaves, SkScalar seed,
+ const SkISize* tileSize = nullptr);
+ static sk_sp<SkShader> MakeTurbulence(SkScalar baseFrequencyX, SkScalar baseFrequencyY,
+ int numOctaves, SkScalar seed,
+ const SkISize* tileSize = nullptr);
+
+ static void RegisterFlattenables();
+
+private:
+ SkPerlinNoiseShader() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkRuntimeEffect.h b/gfx/skia/skia/include/effects/SkRuntimeEffect.h
new file mode 100644
index 0000000000..81246d4020
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkRuntimeEffect.h
@@ -0,0 +1,541 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRuntimeEffect_DEFINED
+#define SkRuntimeEffect_DEFINED
+
+#include "include/core/SkBlender.h"
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkSpan.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLSampleUsage.h"
+#include "include/private/base/SkOnce.h"
+#include "include/private/base/SkTemplates.h"
+
+#include <string>
+#include <optional>
+#include <vector>
+
+#ifdef SK_ENABLE_SKSL
+
+#include "include/sksl/SkSLVersion.h"
+
+class GrRecordingContext;
+class SkFilterColorProgram;
+class SkImage;
+class SkRuntimeImageFilter;
+
+namespace SkSL {
+class DebugTrace;
+class ErrorReporter;
+class FunctionDefinition;
+struct Program;
+enum class ProgramKind : int8_t;
+struct ProgramSettings;
+} // namespace SkSL
+
+namespace skvm {
+class Program;
+}
+
+namespace SkSL::RP {
+class Program;
+}
+
+/*
+ * SkRuntimeEffect supports creating custom SkShader and SkColorFilter objects using Skia's SkSL
+ * shading language.
+ *
+ * NOTE: This API is experimental and subject to change.
+ */
+class SK_API SkRuntimeEffect : public SkRefCnt {
+public:
+ // Reflected description of a uniform variable in the effect's SkSL
+ struct Uniform {
+ enum class Type {
+ kFloat,
+ kFloat2,
+ kFloat3,
+ kFloat4,
+ kFloat2x2,
+ kFloat3x3,
+ kFloat4x4,
+ kInt,
+ kInt2,
+ kInt3,
+ kInt4,
+ };
+
+ enum Flags {
+ // Uniform is declared as an array. 'count' contains array length.
+ kArray_Flag = 0x1,
+
+ // Uniform is declared with layout(color). Colors should be supplied as unpremultiplied,
+ // extended-range (unclamped) sRGB (ie SkColor4f). The uniform will be automatically
+ // transformed to unpremultiplied extended-range working-space colors.
+ kColor_Flag = 0x2,
+
+ // When used with SkMeshSpecification, indicates that the uniform is present in the
+ // vertex shader. Not used with SkRuntimeEffect.
+ kVertex_Flag = 0x4,
+
+ // When used with SkMeshSpecification, indicates that the uniform is present in the
+ // fragment shader. Not used with SkRuntimeEffect.
+ kFragment_Flag = 0x8,
+
+ // This flag indicates that the SkSL uniform uses a medium-precision type
+ // (i.e., `half` instead of `float`).
+ kHalfPrecision_Flag = 0x10,
+ };
+
+ std::string_view name;
+ size_t offset;
+ Type type;
+ int count;
+ uint32_t flags;
+
+ bool isArray() const { return SkToBool(this->flags & kArray_Flag); }
+ bool isColor() const { return SkToBool(this->flags & kColor_Flag); }
+ size_t sizeInBytes() const;
+ };
+
+ // Reflected description of a uniform child (shader or colorFilter) in the effect's SkSL
+ enum class ChildType {
+ kShader,
+ kColorFilter,
+ kBlender,
+ };
+
+ struct Child {
+ std::string_view name;
+ ChildType type;
+ int index;
+ };
+
+ class Options {
+ public:
+ // For testing purposes, disables optimization and inlining. (Normally, Runtime Effects
+ // don't run the inliner directly, but they still get an inlining pass once they are
+ // painted.)
+ bool forceUnoptimized = false;
+
+ private:
+ friend class SkRuntimeEffect;
+ friend class SkRuntimeEffectPriv;
+
+ // This flag allows Runtime Effects to access Skia implementation details like sk_FragCoord
+ // and functions with private identifiers (e.g. $rgb_to_hsl).
+ bool allowPrivateAccess = false;
+
+ // TODO(skia:11209) - Replace this with a promised SkCapabilities?
+ // This flag lifts the ES2 restrictions on Runtime Effects that are gated by the
+ // `strictES2Mode` check. Be aware that the software renderer and pipeline-stage effect are
+ // still largely ES3-unaware and can still fail or crash if post-ES2 features are used.
+ // This is only intended for use by tests and certain internally created effects.
+ SkSL::Version maxVersionAllowed = SkSL::Version::k100;
+ };
+
+ // If the effect is compiled successfully, `effect` will be non-null.
+ // Otherwise, `errorText` will contain the reason for failure.
+ struct Result {
+ sk_sp<SkRuntimeEffect> effect;
+ SkString errorText;
+ };
+
+ // MakeForColorFilter and MakeForShader verify that the SkSL code is valid for those stages of
+ // the Skia pipeline. In all of the signatures described below, color parameters and return
+ // values are flexible. They are listed as being 'vec4', but they can also be 'half4' or
+ // 'float4'. ('vec4' is an alias for 'float4').
+
+ // We can't use a default argument for `options` due to a bug in Clang.
+ // https://bugs.llvm.org/show_bug.cgi?id=36684
+
+ // Color filter SkSL requires an entry point that looks like:
+ // vec4 main(vec4 inColor) { ... }
+ static Result MakeForColorFilter(SkString sksl, const Options&);
+ static Result MakeForColorFilter(SkString sksl) {
+ return MakeForColorFilter(std::move(sksl), Options{});
+ }
+
+ // Shader SkSL requires an entry point that looks like:
+ // vec4 main(vec2 inCoords) { ... }
+ static Result MakeForShader(SkString sksl, const Options&);
+ static Result MakeForShader(SkString sksl) {
+ return MakeForShader(std::move(sksl), Options{});
+ }
+
+ // Blend SkSL requires an entry point that looks like:
+ // vec4 main(vec4 srcColor, vec4 dstColor) { ... }
+ static Result MakeForBlender(SkString sksl, const Options&);
+ static Result MakeForBlender(SkString sksl) {
+ return MakeForBlender(std::move(sksl), Options{});
+ }
+
+ // Object that allows passing a SkShader, SkColorFilter or SkBlender as a child
+ class ChildPtr {
+ public:
+ ChildPtr() = default;
+ ChildPtr(sk_sp<SkShader> s) : fChild(std::move(s)) {}
+ ChildPtr(sk_sp<SkColorFilter> cf) : fChild(std::move(cf)) {}
+ ChildPtr(sk_sp<SkBlender> b) : fChild(std::move(b)) {}
+
+ // Asserts that the flattenable is either null, or one of the legal derived types
+ ChildPtr(sk_sp<SkFlattenable> f);
+
+ std::optional<ChildType> type() const;
+
+ SkShader* shader() const;
+ SkColorFilter* colorFilter() const;
+ SkBlender* blender() const;
+ SkFlattenable* flattenable() const { return fChild.get(); }
+
+ using sk_is_trivially_relocatable = std::true_type;
+
+ private:
+ sk_sp<SkFlattenable> fChild;
+
+ static_assert(::sk_is_trivially_relocatable<decltype(fChild)>::value);
+ };
+
+ sk_sp<SkShader> makeShader(sk_sp<const SkData> uniforms,
+ sk_sp<SkShader> children[],
+ size_t childCount,
+ const SkMatrix* localMatrix = nullptr) const;
+ sk_sp<SkShader> makeShader(sk_sp<const SkData> uniforms,
+ SkSpan<ChildPtr> children,
+ const SkMatrix* localMatrix = nullptr) const;
+
+ sk_sp<SkImage> makeImage(GrRecordingContext*,
+ sk_sp<const SkData> uniforms,
+ SkSpan<ChildPtr> children,
+ const SkMatrix* localMatrix,
+ SkImageInfo resultInfo,
+ bool mipmapped) const;
+
+ sk_sp<SkColorFilter> makeColorFilter(sk_sp<const SkData> uniforms) const;
+ sk_sp<SkColorFilter> makeColorFilter(sk_sp<const SkData> uniforms,
+ sk_sp<SkColorFilter> children[],
+ size_t childCount) const;
+ sk_sp<SkColorFilter> makeColorFilter(sk_sp<const SkData> uniforms,
+ SkSpan<ChildPtr> children) const;
+
+ sk_sp<SkBlender> makeBlender(sk_sp<const SkData> uniforms,
+ SkSpan<ChildPtr> children = {}) const;
+
+ /**
+ * Creates a new Runtime Effect patterned after an already-existing one. The new shader behaves
+ * like the original, but also creates a debug trace of its execution at the requested
+ * coordinate. After painting with this shader, the associated DebugTrace object will contain a
+ * shader execution trace. Call `writeTrace` on the debug trace object to generate a full trace
+ * suitable for a debugger, or call `dump` to emit a human-readable trace.
+ *
+ * Debug traces are only supported on a raster (non-GPU) canvas.
+
+ * Debug traces are currently only supported on shaders. Color filter and blender tracing is a
+ * work-in-progress.
+ */
+ struct TracedShader {
+ sk_sp<SkShader> shader;
+ sk_sp<SkSL::DebugTrace> debugTrace;
+ };
+ static TracedShader MakeTraced(sk_sp<SkShader> shader, const SkIPoint& traceCoord);
+
+ // Returns the SkSL source of the runtime effect shader.
+ const std::string& source() const;
+
+ // Combined size of all 'uniform' variables. When calling makeColorFilter or makeShader,
+ // provide an SkData of this size, containing values for all of those variables.
+ size_t uniformSize() const;
+
+ SkSpan<const Uniform> uniforms() const { return SkSpan(fUniforms); }
+ SkSpan<const Child> children() const { return SkSpan(fChildren); }
+
+ // Returns pointer to the named uniform variable's description, or nullptr if not found
+ const Uniform* findUniform(std::string_view name) const;
+
+ // Returns pointer to the named child's description, or nullptr if not found
+ const Child* findChild(std::string_view name) const;
+
+ // Allows the runtime effect type to be identified.
+ bool allowShader() const { return (fFlags & kAllowShader_Flag); }
+ bool allowColorFilter() const { return (fFlags & kAllowColorFilter_Flag); }
+ bool allowBlender() const { return (fFlags & kAllowBlender_Flag); }
+
+ static void RegisterFlattenables();
+ ~SkRuntimeEffect() override;
+
+private:
+ enum Flags {
+ kUsesSampleCoords_Flag = 0x01,
+ kAllowColorFilter_Flag = 0x02,
+ kAllowShader_Flag = 0x04,
+ kAllowBlender_Flag = 0x08,
+ kSamplesOutsideMain_Flag = 0x10,
+ kUsesColorTransform_Flag = 0x20,
+ kAlwaysOpaque_Flag = 0x40,
+ };
+
+ SkRuntimeEffect(std::unique_ptr<SkSL::Program> baseProgram,
+ const Options& options,
+ const SkSL::FunctionDefinition& main,
+ std::vector<Uniform>&& uniforms,
+ std::vector<Child>&& children,
+ std::vector<SkSL::SampleUsage>&& sampleUsages,
+ uint32_t flags);
+
+ sk_sp<SkRuntimeEffect> makeUnoptimizedClone();
+
+ static Result MakeFromSource(SkString sksl, const Options& options, SkSL::ProgramKind kind);
+
+ static Result MakeInternal(std::unique_ptr<SkSL::Program> program,
+ const Options& options,
+ SkSL::ProgramKind kind);
+
+ static SkSL::ProgramSettings MakeSettings(const Options& options);
+
+ uint32_t hash() const { return fHash; }
+ bool usesSampleCoords() const { return (fFlags & kUsesSampleCoords_Flag); }
+ bool samplesOutsideMain() const { return (fFlags & kSamplesOutsideMain_Flag); }
+ bool usesColorTransform() const { return (fFlags & kUsesColorTransform_Flag); }
+ bool alwaysOpaque() const { return (fFlags & kAlwaysOpaque_Flag); }
+
+ const SkFilterColorProgram* getFilterColorProgram() const;
+ const SkSL::RP::Program* getRPProgram() const;
+
+#if defined(SK_GANESH)
+ friend class GrSkSLFP; // fBaseProgram, fSampleUsages
+ friend class GrGLSLSkSLFP; //
+#endif
+
+ friend class SkRTShader; // fBaseProgram, fMain, fSampleUsages, getRPProgram()
+ friend class SkRuntimeBlender; //
+ friend class SkRuntimeColorFilter; //
+
+ friend class SkFilterColorProgram;
+ friend class SkRuntimeEffectPriv;
+
+ uint32_t fHash;
+
+ std::unique_ptr<SkSL::Program> fBaseProgram;
+ std::unique_ptr<SkSL::RP::Program> fRPProgram;
+ mutable SkOnce fCompileRPProgramOnce;
+ const SkSL::FunctionDefinition& fMain;
+ std::vector<Uniform> fUniforms;
+ std::vector<Child> fChildren;
+ std::vector<SkSL::SampleUsage> fSampleUsages;
+
+ std::unique_ptr<SkFilterColorProgram> fFilterColorProgram;
+
+ uint32_t fFlags; // Flags
+};
+
+/** Base class for SkRuntimeShaderBuilder, defined below. */
+class SkRuntimeEffectBuilder {
+public:
+ struct BuilderUniform {
+ // Copy 'val' to this variable. No type conversion is performed - 'val' must be same
+ // size as expected by the effect. Information about the variable can be queried by
+ // looking at fVar. If the size is incorrect, no copy will be performed, and debug
+ // builds will abort. If this is the result of querying a missing variable, fVar will
+ // be nullptr, and assigning will also do nothing (and abort in debug builds).
+ template <typename T>
+ std::enable_if_t<std::is_trivially_copyable<T>::value, BuilderUniform&> operator=(
+ const T& val) {
+ if (!fVar) {
+ SkDEBUGFAIL("Assigning to missing variable");
+ } else if (sizeof(val) != fVar->sizeInBytes()) {
+ SkDEBUGFAIL("Incorrect value size");
+ } else {
+ memcpy(SkTAddOffset<void>(fOwner->writableUniformData(), fVar->offset),
+ &val, sizeof(val));
+ }
+ return *this;
+ }
+
+ BuilderUniform& operator=(const SkMatrix& val) {
+ if (!fVar) {
+ SkDEBUGFAIL("Assigning to missing variable");
+ } else if (fVar->sizeInBytes() != 9 * sizeof(float)) {
+ SkDEBUGFAIL("Incorrect value size");
+ } else {
+ float* data = SkTAddOffset<float>(fOwner->writableUniformData(),
+ (ptrdiff_t)fVar->offset);
+ data[0] = val.get(0); data[1] = val.get(3); data[2] = val.get(6);
+ data[3] = val.get(1); data[4] = val.get(4); data[5] = val.get(7);
+ data[6] = val.get(2); data[7] = val.get(5); data[8] = val.get(8);
+ }
+ return *this;
+ }
+
+ template <typename T>
+ bool set(const T val[], const int count) {
+ static_assert(std::is_trivially_copyable<T>::value, "Value must be trivial copyable");
+ if (!fVar) {
+ SkDEBUGFAIL("Assigning to missing variable");
+ return false;
+ } else if (sizeof(T) * count != fVar->sizeInBytes()) {
+ SkDEBUGFAIL("Incorrect value size");
+ return false;
+ } else {
+ memcpy(SkTAddOffset<void>(fOwner->writableUniformData(), fVar->offset),
+ val, sizeof(T) * count);
+ }
+ return true;
+ }
+
+ SkRuntimeEffectBuilder* fOwner;
+ const SkRuntimeEffect::Uniform* fVar; // nullptr if the variable was not found
+ };
+
+ struct BuilderChild {
+ template <typename T> BuilderChild& operator=(sk_sp<T> val) {
+ if (!fChild) {
+ SkDEBUGFAIL("Assigning to missing child");
+ } else {
+ fOwner->fChildren[(size_t)fChild->index] = std::move(val);
+ }
+ return *this;
+ }
+
+ BuilderChild& operator=(std::nullptr_t) {
+ if (!fChild) {
+ SkDEBUGFAIL("Assigning to missing child");
+ } else {
+ fOwner->fChildren[(size_t)fChild->index] = SkRuntimeEffect::ChildPtr{};
+ }
+ return *this;
+ }
+
+ SkRuntimeEffectBuilder* fOwner;
+ const SkRuntimeEffect::Child* fChild; // nullptr if the child was not found
+ };
+
+ const SkRuntimeEffect* effect() const { return fEffect.get(); }
+
+ BuilderUniform uniform(std::string_view name) { return { this, fEffect->findUniform(name) }; }
+ BuilderChild child(std::string_view name) { return { this, fEffect->findChild(name) }; }
+
+ // Get access to the collated uniforms and children (in the order expected by APIs like
+ // makeShader on the effect):
+ sk_sp<const SkData> uniforms() { return fUniforms; }
+ SkSpan<SkRuntimeEffect::ChildPtr> children() { return fChildren; }
+
+protected:
+ SkRuntimeEffectBuilder() = delete;
+ explicit SkRuntimeEffectBuilder(sk_sp<SkRuntimeEffect> effect)
+ : fEffect(std::move(effect))
+ , fUniforms(SkData::MakeZeroInitialized(fEffect->uniformSize()))
+ , fChildren(fEffect->children().size()) {}
+ explicit SkRuntimeEffectBuilder(sk_sp<SkRuntimeEffect> effect, sk_sp<SkData> uniforms)
+ : fEffect(std::move(effect))
+ , fUniforms(std::move(uniforms))
+ , fChildren(fEffect->children().size()) {}
+
+ SkRuntimeEffectBuilder(SkRuntimeEffectBuilder&&) = default;
+ SkRuntimeEffectBuilder(const SkRuntimeEffectBuilder&) = default;
+
+ SkRuntimeEffectBuilder& operator=(SkRuntimeEffectBuilder&&) = delete;
+ SkRuntimeEffectBuilder& operator=(const SkRuntimeEffectBuilder&) = delete;
+
+private:
+ void* writableUniformData() {
+ if (!fUniforms->unique()) {
+ fUniforms = SkData::MakeWithCopy(fUniforms->data(), fUniforms->size());
+ }
+ return fUniforms->writable_data();
+ }
+
+ sk_sp<SkRuntimeEffect> fEffect;
+ sk_sp<SkData> fUniforms;
+ std::vector<SkRuntimeEffect::ChildPtr> fChildren;
+};
+
+/**
+ * SkRuntimeShaderBuilder is a utility to simplify creating SkShader objects from SkRuntimeEffects.
+ *
+ * NOTE: Like SkRuntimeEffect, this API is experimental and subject to change!
+ *
+ * Given an SkRuntimeEffect, the SkRuntimeShaderBuilder manages creating an input data block and
+ * provides named access to the 'uniform' variables in that block, as well as named access
+ * to a list of child shader slots. Usage:
+ *
+ * sk_sp<SkRuntimeEffect> effect = ...;
+ * SkRuntimeShaderBuilder builder(effect);
+ * builder.uniform("some_uniform_float") = 3.14f;
+ * builder.uniform("some_uniform_matrix") = SkM44::Rotate(...);
+ * builder.child("some_child_effect") = mySkImage->makeShader(...);
+ * ...
+ * sk_sp<SkShader> shader = builder.makeShader(nullptr, false);
+ *
+ * Note that SkRuntimeShaderBuilder is built entirely on the public API of SkRuntimeEffect,
+ * so can be used as-is or serve as inspiration for other interfaces or binding techniques.
+ */
+class SK_API SkRuntimeShaderBuilder : public SkRuntimeEffectBuilder {
+public:
+ explicit SkRuntimeShaderBuilder(sk_sp<SkRuntimeEffect>);
+ // This is currently required by Android Framework but may go away if that dependency
+ // can be removed.
+ SkRuntimeShaderBuilder(const SkRuntimeShaderBuilder&) = default;
+ ~SkRuntimeShaderBuilder();
+
+ sk_sp<SkShader> makeShader(const SkMatrix* localMatrix = nullptr);
+ sk_sp<SkImage> makeImage(GrRecordingContext*,
+ const SkMatrix* localMatrix,
+ SkImageInfo resultInfo,
+ bool mipmapped);
+
+private:
+ using INHERITED = SkRuntimeEffectBuilder;
+
+ explicit SkRuntimeShaderBuilder(sk_sp<SkRuntimeEffect> effect, sk_sp<SkData> uniforms)
+ : INHERITED(std::move(effect), std::move(uniforms)) {}
+
+ friend class SkRuntimeImageFilter;
+};
+
+/**
+ * SkRuntimeColorFilterBuilder makes it easy to setup and assign uniforms to runtime color filters.
+ */
+class SK_API SkRuntimeColorFilterBuilder : public SkRuntimeEffectBuilder {
+public:
+ explicit SkRuntimeColorFilterBuilder(sk_sp<SkRuntimeEffect>);
+ ~SkRuntimeColorFilterBuilder();
+
+ SkRuntimeColorFilterBuilder(const SkRuntimeColorFilterBuilder&) = delete;
+ SkRuntimeColorFilterBuilder& operator=(const SkRuntimeColorFilterBuilder&) = delete;
+
+ sk_sp<SkColorFilter> makeColorFilter();
+
+private:
+ using INHERITED = SkRuntimeEffectBuilder;
+};
+
+/**
+ * SkRuntimeBlendBuilder is a utility to simplify creation and uniform setup of runtime blenders.
+ */
+class SK_API SkRuntimeBlendBuilder : public SkRuntimeEffectBuilder {
+public:
+ explicit SkRuntimeBlendBuilder(sk_sp<SkRuntimeEffect>);
+ ~SkRuntimeBlendBuilder();
+
+ SkRuntimeBlendBuilder(const SkRuntimeBlendBuilder&) = delete;
+ SkRuntimeBlendBuilder& operator=(const SkRuntimeBlendBuilder&) = delete;
+
+ sk_sp<SkBlender> makeBlender();
+
+private:
+ using INHERITED = SkRuntimeEffectBuilder;
+};
+
+#endif // SK_ENABLE_SKSL
+
+#endif // SkRuntimeEffect_DEFINED
diff --git a/gfx/skia/skia/include/effects/SkShaderMaskFilter.h b/gfx/skia/skia/include/effects/SkShaderMaskFilter.h
new file mode 100644
index 0000000000..84937967bf
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkShaderMaskFilter.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkShaderMaskFilter_DEFINED
+#define SkShaderMaskFilter_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+class SkMaskFilter;
+class SkShader;
+
+class SK_API SkShaderMaskFilter {
+public:
+ static sk_sp<SkMaskFilter> Make(sk_sp<SkShader> shader);
+
+private:
+ static void RegisterFlattenables();
+ friend class SkFlattenable;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkStrokeAndFillPathEffect.h b/gfx/skia/skia/include/effects/SkStrokeAndFillPathEffect.h
new file mode 100644
index 0000000000..fbde649334
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkStrokeAndFillPathEffect.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStrokeAndFillPathEffect_DEFINED
+#define SkStrokeAndFillPathEffect_DEFINED
+
+#include "include/core/SkPaint.h"
+#include "include/core/SkPathEffect.h"
+#include "include/pathops/SkPathOps.h"
+
+class SK_API SkStrokeAndFillPathEffect {
+public:
+ /* If the paint is set to stroke, this will add the stroke and fill geometries
+ * together (hoping that the winding-direction works out).
+ *
+ * If the paint is set to fill, this effect is ignored.
+ *
+ * Note that if the paint is set to stroke and the stroke-width is 0, then
+ * this will turn the geometry into just a fill.
+ */
+ static sk_sp<SkPathEffect> Make();
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkTableColorFilter.h b/gfx/skia/skia/include/effects/SkTableColorFilter.h
new file mode 100644
index 0000000000..9a6ce3253e
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkTableColorFilter.h
@@ -0,0 +1,29 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef SkTableColorFilter_DEFINED
+#define SkTableColorFilter_DEFINED
+
+#include "include/core/SkColorFilter.h"
+
+// (DEPRECATED) These factory functions are deprecated. Please use the ones in
+// SkColorFilters (i.e., Table and TableARGB).
+class SK_API SkTableColorFilter {
+public:
+ static sk_sp<SkColorFilter> Make(const uint8_t table[256]) {
+ return SkColorFilters::Table(table);
+ }
+
+ static sk_sp<SkColorFilter> MakeARGB(const uint8_t tableA[256],
+ const uint8_t tableR[256],
+ const uint8_t tableG[256],
+ const uint8_t tableB[256]) {
+ return SkColorFilters::TableARGB(tableA, tableR, tableG, tableB);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkTableMaskFilter.h b/gfx/skia/skia/include/effects/SkTableMaskFilter.h
new file mode 100644
index 0000000000..412f138353
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkTableMaskFilter.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTableMaskFilter_DEFINED
+#define SkTableMaskFilter_DEFINED
+
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+#include <cstdint>
+
+class SkMaskFilter;
+
+/** \class SkTableMaskFilter
+
+ Applies a table lookup on each of the alpha values in the mask.
+ Helper methods create some common tables (e.g. gamma, clipping)
+ */
+class SK_API SkTableMaskFilter {
+public:
+ /** Utility that sets the gamma table
+ */
+ static void MakeGammaTable(uint8_t table[256], SkScalar gamma);
+
+ /** Utility that creates a clipping table: clamps values below min to 0
+ and above max to 255, and rescales the remaining into 0..255
+ */
+ static void MakeClipTable(uint8_t table[256], uint8_t min, uint8_t max);
+
+ static SkMaskFilter* Create(const uint8_t table[256]);
+ static SkMaskFilter* CreateGamma(SkScalar gamma);
+ static SkMaskFilter* CreateClip(uint8_t min, uint8_t max);
+
+ SkTableMaskFilter() = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/effects/SkTrimPathEffect.h b/gfx/skia/skia/include/effects/SkTrimPathEffect.h
new file mode 100644
index 0000000000..3e6fb7c342
--- /dev/null
+++ b/gfx/skia/skia/include/effects/SkTrimPathEffect.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTrimPathEffect_DEFINED
+#define SkTrimPathEffect_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+class SkPathEffect;
+
+class SK_API SkTrimPathEffect {
+public:
+ enum class Mode {
+ kNormal, // return the subset path [start,stop]
+ kInverted, // return the complement/subset paths [0,start] + [stop,1]
+ };
+
+ /**
+ * Take start and stop "t" values (values between 0...1), and return a path that is that
+ * subset of the original path.
+ *
+ * e.g.
+ * Make(0.5, 1.0) --> return the 2nd half of the path
+ * Make(0.33333, 0.66667) --> return the middle third of the path
+ *
+ * The trim values apply to the entire path, so if it contains several contours, all of them
+ * are including in the calculation.
+ *
+ * startT and stopT must be 0..1 inclusive. If they are outside of that interval, they will
+ * be pinned to the nearest legal value. If either is NaN, null will be returned.
+ *
+ * Note: for Mode::kNormal, this will return one (logical) segment (even if it is spread
+ * across multiple contours). For Mode::kInverted, this will return 2 logical
+ * segments: stopT..1 and 0...startT, in this order.
+ */
+ static sk_sp<SkPathEffect> Make(SkScalar startT, SkScalar stopT, Mode = Mode::kNormal);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/encode/SkEncoder.h b/gfx/skia/skia/include/encode/SkEncoder.h
new file mode 100644
index 0000000000..8f76e8016c
--- /dev/null
+++ b/gfx/skia/skia/include/encode/SkEncoder.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEncoder_DEFINED
+#define SkEncoder_DEFINED
+
+#include "include/core/SkPixmap.h"
+#include "include/private/base/SkAPI.h"
+#include "include/private/base/SkNoncopyable.h"
+#include "include/private/base/SkTemplates.h"
+
+#include <cstddef>
+#include <cstdint>
+
+class SK_API SkEncoder : SkNoncopyable {
+public:
+ /**
+ * A single frame to be encoded into an animated image.
+ *
+ * If a frame does not fit in the canvas size, this is an error.
+ * TODO(skia:13705): Add offsets when we have support for an encoder that supports using
+ * offsets.
+ */
+ struct SK_API Frame {
+ /**
+ * Pixmap of the frame.
+ */
+ SkPixmap pixmap;
+ /**
+ * Duration of the frame in millseconds.
+ */
+ int duration;
+ };
+
+ /**
+ * Encode |numRows| rows of input. If the caller requests more rows than are remaining
+ * in the src, this will encode all of the remaining rows. |numRows| must be greater
+ * than zero.
+ */
+ bool encodeRows(int numRows);
+
+ virtual ~SkEncoder() {}
+
+protected:
+
+ virtual bool onEncodeRows(int numRows) = 0;
+
+ SkEncoder(const SkPixmap& src, size_t storageBytes)
+ : fSrc(src)
+ , fCurrRow(0)
+ , fStorage(storageBytes)
+ {}
+
+ const SkPixmap& fSrc;
+ int fCurrRow;
+ skia_private::AutoTMalloc<uint8_t> fStorage;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/encode/SkICC.h b/gfx/skia/skia/include/encode/SkICC.h
new file mode 100644
index 0000000000..b14836b2ab
--- /dev/null
+++ b/gfx/skia/skia/include/encode/SkICC.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkICC_DEFINED
+#define SkICC_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/private/base/SkAPI.h"
+
+#include <cstdint>
+
+class SkData;
+struct skcms_ICCProfile;
+struct skcms_Matrix3x3;
+struct skcms_TransferFunction;
+
+SK_API sk_sp<SkData> SkWriteICCProfile(const skcms_TransferFunction&,
+ const skcms_Matrix3x3& toXYZD50);
+
+SK_API sk_sp<SkData> SkWriteICCProfile(const skcms_ICCProfile*, const char* description);
+
+// Utility function for populating the grid_16 member of skcms_A2B and skcms_B2A
+// structures. This converts a point in XYZD50 to its representation in grid_16_lab.
+// It will write 6 bytes. The behavior of this function matches how skcms will decode
+// values, but might not match the specification, see https://crbug.com/skia/13807.
+SK_API void SkICCFloatXYZD50ToGrid16Lab(const float* float_xyz, uint8_t* grid16_lab);
+
+// Utility function for popluating the table_16 member of skcms_Curve structure.
+// This converts a float to its representation in table_16. It will write 2 bytes.
+SK_API void SkICCFloatToTable16(const float f, uint8_t* table_16);
+
+#endif//SkICC_DEFINED
diff --git a/gfx/skia/skia/include/encode/SkJpegEncoder.h b/gfx/skia/skia/include/encode/SkJpegEncoder.h
new file mode 100644
index 0000000000..0e036501d2
--- /dev/null
+++ b/gfx/skia/skia/include/encode/SkJpegEncoder.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkJpegEncoder_DEFINED
+#define SkJpegEncoder_DEFINED
+
+#include "include/encode/SkEncoder.h"
+#include "include/private/base/SkAPI.h"
+
+#include <memory>
+
+class SkColorSpace;
+class SkData;
+class SkJpegEncoderMgr;
+class SkPixmap;
+class SkWStream;
+class SkYUVAPixmaps;
+struct skcms_ICCProfile;
+
+class SK_API SkJpegEncoder : public SkEncoder {
+public:
+
+ enum class AlphaOption {
+ kIgnore,
+ kBlendOnBlack,
+ };
+
+ enum class Downsample {
+ /**
+ * Reduction by a factor of two in both the horizontal and vertical directions.
+ */
+ k420,
+
+ /**
+ * Reduction by a factor of two in the horizontal direction.
+ */
+ k422,
+
+ /**
+ * No downsampling.
+ */
+ k444,
+ };
+
+ struct Options {
+ /**
+ * |fQuality| must be in [0, 100] where 0 corresponds to the lowest quality.
+ */
+ int fQuality = 100;
+
+ /**
+ * Choose the downsampling factor for the U and V components. This is only
+ * meaningful if the |src| is not kGray, since kGray will not be encoded as YUV.
+ * This is ignored in favor of |src|'s subsampling when |src| is an SkYUVAPixmaps.
+ *
+ * Our default value matches the libjpeg-turbo default.
+ */
+ Downsample fDownsample = Downsample::k420;
+
+ /**
+ * Jpegs must be opaque. This instructs the encoder on how to handle input
+ * images with alpha.
+ *
+ * The default is to ignore the alpha channel and treat the image as opaque.
+ * Another option is to blend the pixels onto a black background before encoding.
+ * In the second case, the encoder supports linear or legacy blending.
+ */
+ AlphaOption fAlphaOption = AlphaOption::kIgnore;
+
+ /**
+ * Optional XMP metadata.
+ */
+ const SkData* xmpMetadata = nullptr;
+
+ /**
+ * An optional ICC profile to override the default behavior.
+ *
+ * The default behavior is to generate an ICC profile using a primary matrix and
+ * analytic transfer function. If the color space of |src| cannot be represented
+ * in this way (e.g, it is HLG or PQ), then no profile will be embedded.
+ */
+ const skcms_ICCProfile* fICCProfile = nullptr;
+ const char* fICCProfileDescription = nullptr;
+ };
+
+ /**
+ * Encode the |src| pixels to the |dst| stream.
+ * |options| may be used to control the encoding behavior.
+ *
+ * Returns true on success. Returns false on an invalid or unsupported |src|.
+ */
+ static bool Encode(SkWStream* dst, const SkPixmap& src, const Options& options);
+ static bool Encode(SkWStream* dst,
+ const SkYUVAPixmaps& src,
+ const SkColorSpace* srcColorSpace,
+ const Options& options);
+
+ /**
+ * Create a jpeg encoder that will encode the |src| pixels to the |dst| stream.
+ * |options| may be used to control the encoding behavior.
+ *
+ * |dst| is unowned but must remain valid for the lifetime of the object.
+ *
+ * This returns nullptr on an invalid or unsupported |src|.
+ */
+ static std::unique_ptr<SkEncoder> Make(SkWStream* dst, const SkPixmap& src,
+ const Options& options);
+ static std::unique_ptr<SkEncoder> Make(SkWStream* dst,
+ const SkYUVAPixmaps& src,
+ const SkColorSpace* srcColorSpace,
+ const Options& options);
+
+ ~SkJpegEncoder() override;
+
+protected:
+ bool onEncodeRows(int numRows) override;
+
+private:
+ SkJpegEncoder(std::unique_ptr<SkJpegEncoderMgr>, const SkPixmap& src);
+ SkJpegEncoder(std::unique_ptr<SkJpegEncoderMgr>, const SkYUVAPixmaps* srcYUVA);
+
+ static std::unique_ptr<SkEncoder> Make(SkWStream* dst,
+ const SkPixmap* src,
+ const SkYUVAPixmaps* srcYUVA,
+ const SkColorSpace* srcYUVAColorSpace,
+ const Options& options);
+
+ std::unique_ptr<SkJpegEncoderMgr> fEncoderMgr;
+ const SkYUVAPixmaps* fSrcYUVA = nullptr;
+ using INHERITED = SkEncoder;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/encode/SkPngEncoder.h b/gfx/skia/skia/include/encode/SkPngEncoder.h
new file mode 100644
index 0000000000..cc7aa50b81
--- /dev/null
+++ b/gfx/skia/skia/include/encode/SkPngEncoder.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPngEncoder_DEFINED
+#define SkPngEncoder_DEFINED
+
+#include "include/core/SkDataTable.h"
+#include "include/core/SkRefCnt.h"
+#include "include/encode/SkEncoder.h"
+#include "include/private/base/SkAPI.h"
+
+#include <memory>
+
+class SkPixmap;
+class SkPngEncoderMgr;
+class SkWStream;
+struct skcms_ICCProfile;
+
+class SK_API SkPngEncoder : public SkEncoder {
+public:
+
+ enum class FilterFlag : int {
+ kZero = 0x00,
+ kNone = 0x08,
+ kSub = 0x10,
+ kUp = 0x20,
+ kAvg = 0x40,
+ kPaeth = 0x80,
+ kAll = kNone | kSub | kUp | kAvg | kPaeth,
+ };
+
+ struct Options {
+ /**
+ * Selects which filtering strategies to use.
+ *
+ * If a single filter is chosen, libpng will use that filter for every row.
+ *
+ * If multiple filters are chosen, libpng will use a heuristic to guess which filter
+ * will encode smallest, then apply that filter. This happens on a per row basis,
+ * different rows can use different filters.
+ *
+ * Using a single filter (or less filters) is typically faster. Trying all of the
+ * filters may help minimize the output file size.
+ *
+ * Our default value matches libpng's default.
+ */
+ FilterFlag fFilterFlags = FilterFlag::kAll;
+
+ /**
+ * Must be in [0, 9] where 9 corresponds to maximal compression. This value is passed
+ * directly to zlib. 0 is a special case to skip zlib entirely, creating dramatically
+ * larger pngs.
+ *
+ * Our default value matches libpng's default.
+ */
+ int fZLibLevel = 6;
+
+ /**
+ * Represents comments in the tEXt ancillary chunk of the png.
+ * The 2i-th entry is the keyword for the i-th comment,
+ * and the (2i + 1)-th entry is the text for the i-th comment.
+ */
+ sk_sp<SkDataTable> fComments;
+
+ /**
+ * An optional ICC profile to override the default behavior.
+ *
+ * The default behavior is to generate an ICC profile using a primary matrix and
+ * analytic transfer function. If the color space of |src| cannot be represented
+ * in this way (e.g, it is HLG or PQ), then no profile will be embedded.
+ */
+ const skcms_ICCProfile* fICCProfile = nullptr;
+ const char* fICCProfileDescription = nullptr;
+ };
+
+ /**
+ * Encode the |src| pixels to the |dst| stream.
+ * |options| may be used to control the encoding behavior.
+ *
+ * Returns true on success. Returns false on an invalid or unsupported |src|.
+ */
+ static bool Encode(SkWStream* dst, const SkPixmap& src, const Options& options);
+
+ /**
+ * Create a png encoder that will encode the |src| pixels to the |dst| stream.
+ * |options| may be used to control the encoding behavior.
+ *
+ * |dst| is unowned but must remain valid for the lifetime of the object.
+ *
+ * This returns nullptr on an invalid or unsupported |src|.
+ */
+ static std::unique_ptr<SkEncoder> Make(SkWStream* dst, const SkPixmap& src,
+ const Options& options);
+
+ ~SkPngEncoder() override;
+
+protected:
+ bool onEncodeRows(int numRows) override;
+
+ SkPngEncoder(std::unique_ptr<SkPngEncoderMgr>, const SkPixmap& src);
+
+ std::unique_ptr<SkPngEncoderMgr> fEncoderMgr;
+ using INHERITED = SkEncoder;
+};
+
+static inline SkPngEncoder::FilterFlag operator|(SkPngEncoder::FilterFlag x,
+ SkPngEncoder::FilterFlag y) {
+ return (SkPngEncoder::FilterFlag)((int)x | (int)y);
+}
+
+#endif
diff --git a/gfx/skia/skia/include/encode/SkWebpEncoder.h b/gfx/skia/skia/include/encode/SkWebpEncoder.h
new file mode 100644
index 0000000000..fe55a607dd
--- /dev/null
+++ b/gfx/skia/skia/include/encode/SkWebpEncoder.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkWebpEncoder_DEFINED
+#define SkWebpEncoder_DEFINED
+
+#include "include/core/SkSpan.h" // IWYU pragma: keep
+#include "include/encode/SkEncoder.h"
+#include "include/private/base/SkAPI.h"
+
+class SkPixmap;
+class SkWStream;
+struct skcms_ICCProfile;
+
+namespace SkWebpEncoder {
+
+ enum class Compression {
+ kLossy,
+ kLossless,
+ };
+
+ struct SK_API Options {
+ /**
+ * |fCompression| determines whether we will use webp lossy or lossless compression.
+ *
+ * |fQuality| must be in [0.0f, 100.0f].
+ * If |fCompression| is kLossy, |fQuality| corresponds to the visual quality of the
+ * encoding. Decreasing the quality will result in a smaller encoded image.
+ * If |fCompression| is kLossless, |fQuality| corresponds to the amount of effort
+ * put into the encoding. Lower values will compress faster into larger files,
+ * while larger values will compress slower into smaller files.
+ *
+ * This scheme is designed to match the libwebp API.
+ */
+ Compression fCompression = Compression::kLossy;
+ float fQuality = 100.0f;
+
+ /**
+ * An optional ICC profile to override the default behavior.
+ *
+ * The default behavior is to generate an ICC profile using a primary matrix and
+ * analytic transfer function. If the color space of |src| cannot be represented
+ * in this way (e.g, it is HLG or PQ), then no profile will be embedded.
+ */
+ const skcms_ICCProfile* fICCProfile = nullptr;
+ const char* fICCProfileDescription = nullptr;
+ };
+
+ /**
+ * Encode the |src| pixels to the |dst| stream.
+ * |options| may be used to control the encoding behavior.
+ *
+ * Returns true on success. Returns false on an invalid or unsupported |src|.
+ */
+ SK_API bool Encode(SkWStream* dst, const SkPixmap& src, const Options& options);
+
+ /**
+ * Encode the |src| frames to the |dst| stream.
+ * |options| may be used to control the encoding behavior.
+ *
+ * The size of the first frame will be used as the canvas size. If any other frame does
+ * not match the canvas size, this is an error.
+ *
+ * Returns true on success. Returns false on an invalid or unsupported |src|.
+ *
+ * Note: libwebp API also supports set background color, loop limit and customize
+ * lossy/lossless for each frame. These could be added later as needed.
+ */
+ SK_API bool EncodeAnimated(SkWStream* dst,
+ SkSpan<const SkEncoder::Frame> src,
+ const Options& options);
+} // namespace SkWebpEncoder
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GpuTypes.h b/gfx/skia/skia/include/gpu/GpuTypes.h
new file mode 100644
index 0000000000..e2e3961f8b
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GpuTypes.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_GpuTypes_DEFINED
+#define skgpu_GpuTypes_DEFINED
+
+#include "include/core/SkTypes.h"
+
+/**
+ * This file includes numerous public types that are used by all of our gpu backends.
+ */
+
+namespace skgpu {
+
+/**
+ * Possible 3D APIs that may be used by Graphite.
+ */
+enum class BackendApi : unsigned {
+ kDawn,
+ kMetal,
+ kVulkan,
+ kMock,
+};
+
+/** Indicates whether an allocation should count against a cache budget. */
+enum class Budgeted : bool {
+ kNo = false,
+ kYes = true,
+};
+
+/**
+ * Value passed into various callbacks to tell the client the result of operations connected to a
+ * specific callback. The actual interpretation of kFailed and kSuccess are dependent on the
+ * specific callbacks and are documented with the callback itself.
+ */
+enum class CallbackResult : bool {
+ kFailed = false,
+ kSuccess = true,
+};
+
+/**
+ * Is the texture mipmapped or not
+ */
+enum class Mipmapped : bool {
+ kNo = false,
+ kYes = true,
+};
+
+/**
+ * Is the data protected on the GPU or not.
+ */
+enum class Protected : bool {
+ kNo = false,
+ kYes = true,
+};
+
+/**
+ * Is a texture renderable or not
+ */
+enum class Renderable : bool {
+ kNo = false,
+ kYes = true,
+};
+
+} // namespace skgpu
+
+
+#endif // skgpu_GpuTypes_DEFINED
diff --git a/gfx/skia/skia/include/gpu/GrBackendDrawableInfo.h b/gfx/skia/skia/include/gpu/GrBackendDrawableInfo.h
new file mode 100644
index 0000000000..bda1e769fd
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrBackendDrawableInfo.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBackendDrawableInfo_DEFINED
+#define GrBackendDrawableInfo_DEFINED
+
+#include "include/gpu/GrTypes.h"
+
+#include "include/gpu/vk/GrVkTypes.h"
+
+class SK_API GrBackendDrawableInfo {
+public:
+ // Creates an invalid backend drawable info.
+ GrBackendDrawableInfo() : fIsValid(false) {}
+
+ GrBackendDrawableInfo(const GrVkDrawableInfo& info)
+ : fIsValid(true)
+ , fBackend(GrBackendApi::kVulkan)
+ , fVkInfo(info) {}
+
+ // Returns true if the backend texture has been initialized.
+ bool isValid() const { return fIsValid; }
+
+ GrBackendApi backend() const { return fBackend; }
+
+ bool getVkDrawableInfo(GrVkDrawableInfo* outInfo) const {
+ if (this->isValid() && GrBackendApi::kVulkan == fBackend) {
+ *outInfo = fVkInfo;
+ return true;
+ }
+ return false;
+ }
+
+private:
+ bool fIsValid;
+ GrBackendApi fBackend;
+ GrVkDrawableInfo fVkInfo;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrBackendSemaphore.h b/gfx/skia/skia/include/gpu/GrBackendSemaphore.h
new file mode 100644
index 0000000000..13d07928e7
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrBackendSemaphore.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBackendSemaphore_DEFINED
+#define GrBackendSemaphore_DEFINED
+
+#include "include/gpu/GrTypes.h"
+
+#include "include/gpu/gl/GrGLTypes.h"
+
+#ifdef SK_METAL
+#include "include/gpu/mtl/GrMtlTypes.h"
+#endif
+
+#ifdef SK_VULKAN
+#include "include/gpu/vk/GrVkTypes.h"
+#endif
+
+#ifdef SK_DIRECT3D
+#include "include/private/gpu/ganesh/GrD3DTypesMinimal.h"
+#endif
+
+/**
+ * Wrapper class for passing into and receiving data from Ganesh about a backend semaphore object.
+ */
+class GrBackendSemaphore {
+public:
+ // For convenience we just set the backend here to OpenGL. The GrBackendSemaphore cannot be used
+ // until either init* is called, which will set the appropriate GrBackend.
+ GrBackendSemaphore()
+ : fBackend(GrBackendApi::kOpenGL), fGLSync(nullptr), fIsInitialized(false) {}
+
+#ifdef SK_DIRECT3D
+ // We only need to specify these if Direct3D is enabled, because it requires special copy
+ // characteristics.
+ ~GrBackendSemaphore();
+ GrBackendSemaphore(const GrBackendSemaphore&);
+ GrBackendSemaphore& operator=(const GrBackendSemaphore&);
+#endif
+
+ void initGL(GrGLsync sync) {
+ fBackend = GrBackendApi::kOpenGL;
+ fGLSync = sync;
+ fIsInitialized = true;
+ }
+
+#ifdef SK_VULKAN
+ void initVulkan(VkSemaphore semaphore) {
+ fBackend = GrBackendApi::kVulkan;
+ fVkSemaphore = semaphore;
+
+ fIsInitialized = true;
+ }
+
+ VkSemaphore vkSemaphore() const {
+ if (!fIsInitialized || GrBackendApi::kVulkan != fBackend) {
+ return VK_NULL_HANDLE;
+ }
+ return fVkSemaphore;
+ }
+#endif
+
+#ifdef SK_METAL
+ // It is the creator's responsibility to ref the MTLEvent passed in here, via __bridge_retained.
+ // The other end will wrap this BackendSemaphore and take the ref, via __bridge_transfer.
+ void initMetal(GrMTLHandle event, uint64_t value) {
+ fBackend = GrBackendApi::kMetal;
+ fMtlEvent = event;
+ fMtlValue = value;
+
+ fIsInitialized = true;
+ }
+
+ GrMTLHandle mtlSemaphore() const {
+ if (!fIsInitialized || GrBackendApi::kMetal != fBackend) {
+ return nullptr;
+ }
+ return fMtlEvent;
+ }
+
+ uint64_t mtlValue() const {
+ if (!fIsInitialized || GrBackendApi::kMetal != fBackend) {
+ return 0;
+ }
+ return fMtlValue;
+ }
+
+#endif
+
+#ifdef SK_DIRECT3D
+ void initDirect3D(const GrD3DFenceInfo& info) {
+ fBackend = GrBackendApi::kDirect3D;
+ this->assignD3DFenceInfo(info);
+ fIsInitialized = true;
+ }
+#endif
+
+ bool isInitialized() const { return fIsInitialized; }
+
+ GrGLsync glSync() const {
+ if (!fIsInitialized || GrBackendApi::kOpenGL != fBackend) {
+ return nullptr;
+ }
+ return fGLSync;
+ }
+
+
+#ifdef SK_DIRECT3D
+ bool getD3DFenceInfo(GrD3DFenceInfo* outInfo) const;
+#endif
+
+private:
+#ifdef SK_DIRECT3D
+ void assignD3DFenceInfo(const GrD3DFenceInfo& info);
+#endif
+
+ GrBackendApi fBackend;
+ union {
+ GrGLsync fGLSync;
+#ifdef SK_VULKAN
+ VkSemaphore fVkSemaphore;
+#endif
+#ifdef SK_METAL
+ GrMTLHandle fMtlEvent; // Expected to be an id<MTLEvent>
+#endif
+#ifdef SK_DIRECT3D
+ GrD3DFenceInfo* fD3DFenceInfo;
+#endif
+ };
+#ifdef SK_METAL
+ uint64_t fMtlValue;
+#endif
+ bool fIsInitialized;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrBackendSurface.h b/gfx/skia/skia/include/gpu/GrBackendSurface.h
new file mode 100644
index 0000000000..e196cb9272
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrBackendSurface.h
@@ -0,0 +1,666 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBackendSurface_DEFINED
+#define GrBackendSurface_DEFINED
+
+// This include of GrBackendSurfaceMutableState is not needed here, but some clients were depending
+// on the include here instead of including it themselves. Adding this back here until we can fix
+// up clients so it can be removed.
+#include "include/gpu/GrBackendSurfaceMutableState.h"
+
+#include "include/gpu/GrSurfaceInfo.h"
+#include "include/gpu/GrTypes.h"
+#include "include/gpu/MutableTextureState.h"
+#ifdef SK_GL
+#include "include/gpu/gl/GrGLTypes.h"
+#include "include/private/gpu/ganesh/GrGLTypesPriv.h"
+#endif
+#include "include/gpu/mock/GrMockTypes.h"
+#ifdef SK_VULKAN
+#include "include/gpu/vk/GrVkTypes.h"
+#include "include/private/gpu/ganesh/GrVkTypesPriv.h"
+#endif
+
+#ifdef SK_DAWN
+#include "include/gpu/dawn/GrDawnTypes.h"
+#endif
+
+#include <string>
+
+class GrVkImageLayout;
+class GrGLTextureParameters;
+class GrColorFormatDesc;
+enum class SkTextureCompressionType;
+
+namespace skgpu {
+class MutableTextureStateRef;
+}
+
+#ifdef SK_DAWN
+#include "webgpu/webgpu_cpp.h"
+#endif
+
+#ifdef SK_METAL
+#include "include/gpu/mtl/GrMtlTypes.h"
+#endif
+
+#ifdef SK_DIRECT3D
+#include "include/private/gpu/ganesh/GrD3DTypesMinimal.h"
+class GrD3DResourceState;
+#endif
+
+#if defined(SK_DEBUG) || GR_TEST_UTILS
+class SkString;
+#endif
+
+#if !defined(SK_GANESH)
+
+// SkSurfaceCharacterization always needs a minimal version of this
+class SK_API GrBackendFormat {
+public:
+ bool isValid() const { return false; }
+};
+
+// SkSurface and SkImage rely on a minimal version of these always being available
+class SK_API GrBackendTexture {
+public:
+ GrBackendTexture() {}
+
+ bool isValid() const { return false; }
+};
+
+class SK_API GrBackendRenderTarget {
+public:
+ GrBackendRenderTarget() {}
+
+ bool isValid() const { return false; }
+ bool isFramebufferOnly() const { return false; }
+};
+#else
+
+enum class GrGLFormat;
+
+class SK_API GrBackendFormat {
+public:
+ // Creates an invalid backend format.
+ GrBackendFormat() {}
+ GrBackendFormat(const GrBackendFormat&);
+ GrBackendFormat& operator=(const GrBackendFormat&);
+
+#ifdef SK_GL
+ static GrBackendFormat MakeGL(GrGLenum format, GrGLenum target) {
+ return GrBackendFormat(format, target);
+ }
+#endif
+
+#ifdef SK_VULKAN
+ static GrBackendFormat MakeVk(VkFormat format, bool willUseDRMFormatModifiers = false) {
+ return GrBackendFormat(format, GrVkYcbcrConversionInfo(), willUseDRMFormatModifiers);
+ }
+
+ static GrBackendFormat MakeVk(const GrVkYcbcrConversionInfo& ycbcrInfo,
+ bool willUseDRMFormatModifiers = false);
+#endif
+
+#ifdef SK_DAWN
+ static GrBackendFormat MakeDawn(wgpu::TextureFormat format) {
+ return GrBackendFormat(format);
+ }
+#endif
+
+#ifdef SK_METAL
+ static GrBackendFormat MakeMtl(GrMTLPixelFormat format) {
+ return GrBackendFormat(format);
+ }
+#endif
+
+#ifdef SK_DIRECT3D
+ static GrBackendFormat MakeDxgi(DXGI_FORMAT format) {
+ return GrBackendFormat(format);
+ }
+#endif
+
+ static GrBackendFormat MakeMock(GrColorType colorType, SkTextureCompressionType compression,
+ bool isStencilFormat = false);
+
+ bool operator==(const GrBackendFormat& that) const;
+ bool operator!=(const GrBackendFormat& that) const { return !(*this == that); }
+
+ GrBackendApi backend() const { return fBackend; }
+ GrTextureType textureType() const { return fTextureType; }
+
+ /**
+ * Gets the channels present in the format as a bitfield of SkColorChannelFlag values.
+ * Luminance channels are reported as kGray_SkColorChannelFlag.
+ */
+ uint32_t channelMask() const;
+
+ GrColorFormatDesc desc() const;
+
+#ifdef SK_GL
+ /**
+ * If the backend API is GL this gets the format as a GrGLFormat. Otherwise, returns
+ * GrGLFormat::kUnknown.
+ */
+ GrGLFormat asGLFormat() const;
+
+ GrGLenum asGLFormatEnum() const;
+#endif
+
+#ifdef SK_VULKAN
+ /**
+ * If the backend API is Vulkan this gets the format as a VkFormat and returns true. Otherwise,
+ * returns false.
+ */
+ bool asVkFormat(VkFormat*) const;
+
+ const GrVkYcbcrConversionInfo* getVkYcbcrConversionInfo() const;
+#endif
+
+#ifdef SK_DAWN
+ /**
+ * If the backend API is Dawn this gets the format as a wgpu::TextureFormat and returns true.
+ * Otherwise, returns false.
+ */
+ bool asDawnFormat(wgpu::TextureFormat*) const;
+#endif
+
+#ifdef SK_METAL
+ /**
+ * If the backend API is Metal this gets the format as a GrMtlPixelFormat. Otherwise,
+ * Otherwise, returns MTLPixelFormatInvalid.
+ */
+ GrMTLPixelFormat asMtlFormat() const;
+#endif
+
+#ifdef SK_DIRECT3D
+ /**
+ * If the backend API is Direct3D this gets the format as a DXGI_FORMAT and returns true.
+ * Otherwise, returns false.
+ */
+ bool asDxgiFormat(DXGI_FORMAT*) const;
+#endif
+
+ /**
+ * If the backend API is not Mock these three calls will return kUnknown, kNone or false,
+ * respectively. Otherwise, only one of the following can be true. The GrColorType is not
+ * kUnknown, the compression type is not kNone, or this is a mock stencil format.
+ */
+ GrColorType asMockColorType() const;
+ SkTextureCompressionType asMockCompressionType() const;
+ bool isMockStencilFormat() const;
+
+ // If possible, copies the GrBackendFormat and forces the texture type to be Texture2D. If the
+ // GrBackendFormat was for Vulkan and it originally had a GrVkYcbcrConversionInfo, we will
+ // remove the conversion and set the format to be VK_FORMAT_R8G8B8A8_UNORM.
+ GrBackendFormat makeTexture2D() const;
+
+ // Returns true if the backend format has been initialized.
+ bool isValid() const { return fValid; }
+
+#if defined(SK_DEBUG) || GR_TEST_UTILS
+ SkString toStr() const;
+#endif
+
+private:
+#ifdef SK_GL
+ GrBackendFormat(GrGLenum format, GrGLenum target);
+#endif
+
+#ifdef SK_VULKAN
+ GrBackendFormat(const VkFormat vkFormat, const GrVkYcbcrConversionInfo&,
+ bool willUseDRMFormatModifiers);
+#endif
+
+#ifdef SK_DAWN
+ GrBackendFormat(wgpu::TextureFormat format);
+#endif
+
+#ifdef SK_METAL
+ GrBackendFormat(const GrMTLPixelFormat mtlFormat);
+#endif
+
+#ifdef SK_DIRECT3D
+ GrBackendFormat(DXGI_FORMAT dxgiFormat);
+#endif
+
+ GrBackendFormat(GrColorType, SkTextureCompressionType, bool isStencilFormat);
+
+#ifdef SK_DEBUG
+ bool validateMock() const;
+#endif
+
+ GrBackendApi fBackend = GrBackendApi::kMock;
+ bool fValid = false;
+
+ union {
+#ifdef SK_GL
+ GrGLenum fGLFormat; // the sized, internal format of the GL resource
+#endif
+#ifdef SK_VULKAN
+ struct {
+ VkFormat fFormat;
+ GrVkYcbcrConversionInfo fYcbcrConversionInfo;
+ } fVk;
+#endif
+#ifdef SK_DAWN
+ wgpu::TextureFormat fDawnFormat;
+#endif
+
+#ifdef SK_METAL
+ GrMTLPixelFormat fMtlFormat;
+#endif
+
+#ifdef SK_DIRECT3D
+ DXGI_FORMAT fDxgiFormat;
+#endif
+ struct {
+ GrColorType fColorType;
+ SkTextureCompressionType fCompressionType;
+ bool fIsStencilFormat;
+ } fMock;
+ };
+ GrTextureType fTextureType = GrTextureType::kNone;
+};
+
+class SK_API GrBackendTexture {
+public:
+ // Creates an invalid backend texture.
+ GrBackendTexture();
+
+#ifdef SK_GL
+ // The GrGLTextureInfo must have a valid fFormat.
+ GrBackendTexture(int width,
+ int height,
+ GrMipmapped,
+ const GrGLTextureInfo& glInfo,
+ std::string_view label = {});
+#endif
+
+#ifdef SK_VULKAN
+ GrBackendTexture(int width,
+ int height,
+ const GrVkImageInfo& vkInfo,
+ std::string_view label = {});
+#endif
+
+#ifdef SK_METAL
+ GrBackendTexture(int width,
+ int height,
+ GrMipmapped,
+ const GrMtlTextureInfo& mtlInfo,
+ std::string_view label = {});
+#endif
+
+#ifdef SK_DIRECT3D
+ GrBackendTexture(int width,
+ int height,
+ const GrD3DTextureResourceInfo& d3dInfo,
+ std::string_view label = {});
+#endif
+
+#ifdef SK_DAWN
+ GrBackendTexture(int width,
+ int height,
+ const GrDawnTextureInfo& dawnInfo,
+ std::string_view label = {});
+#endif
+
+ GrBackendTexture(int width,
+ int height,
+ GrMipmapped,
+ const GrMockTextureInfo& mockInfo,
+ std::string_view label = {});
+
+ GrBackendTexture(const GrBackendTexture& that);
+
+ ~GrBackendTexture();
+
+ GrBackendTexture& operator=(const GrBackendTexture& that);
+
+ SkISize dimensions() const { return {fWidth, fHeight}; }
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+ std::string_view getLabel() const { return fLabel; }
+ GrMipmapped mipmapped() const { return fMipmapped; }
+ bool hasMipmaps() const { return fMipmapped == GrMipmapped::kYes; }
+ /** deprecated alias of hasMipmaps(). */
+ bool hasMipMaps() const { return this->hasMipmaps(); }
+ GrBackendApi backend() const {return fBackend; }
+ GrTextureType textureType() const { return fTextureType; }
+
+#ifdef SK_GL
+ // If the backend API is GL, copies a snapshot of the GrGLTextureInfo struct into the passed in
+ // pointer and returns true. Otherwise returns false if the backend API is not GL.
+ bool getGLTextureInfo(GrGLTextureInfo*) const;
+
+ // Call this to indicate that the texture parameters have been modified in the GL context
+ // externally to GrContext.
+ void glTextureParametersModified();
+#endif
+
+#ifdef SK_DAWN
+ // If the backend API is Dawn, copies a snapshot of the GrDawnTextureInfo struct into the passed
+ // in pointer and returns true. Otherwise returns false if the backend API is not Dawn.
+ bool getDawnTextureInfo(GrDawnTextureInfo*) const;
+#endif
+
+#ifdef SK_VULKAN
+ // If the backend API is Vulkan, copies a snapshot of the GrVkImageInfo struct into the passed
+ // in pointer and returns true. This snapshot will set the fImageLayout to the current layout
+ // state. Otherwise returns false if the backend API is not Vulkan.
+ bool getVkImageInfo(GrVkImageInfo*) const;
+
+ // Anytime the client changes the VkImageLayout of the VkImage captured by this
+ // GrBackendTexture, they must call this function to notify Skia of the changed layout.
+ void setVkImageLayout(VkImageLayout);
+#endif
+
+#ifdef SK_METAL
+ // If the backend API is Metal, copies a snapshot of the GrMtlTextureInfo struct into the passed
+ // in pointer and returns true. Otherwise returns false if the backend API is not Metal.
+ bool getMtlTextureInfo(GrMtlTextureInfo*) const;
+#endif
+
+#ifdef SK_DIRECT3D
+ // If the backend API is Direct3D, copies a snapshot of the GrD3DTextureResourceInfo struct into
+ // the passed in pointer and returns true. This snapshot will set the fResourceState to the
+ // current resource state. Otherwise returns false if the backend API is not D3D.
+ bool getD3DTextureResourceInfo(GrD3DTextureResourceInfo*) const;
+
+ // Anytime the client changes the D3D12_RESOURCE_STATES of the D3D12_RESOURCE captured by this
+ // GrBackendTexture, they must call this function to notify Skia of the changed layout.
+ void setD3DResourceState(GrD3DResourceStateEnum);
+#endif
+
+ // Get the GrBackendFormat for this texture (or an invalid format if this is not valid).
+ GrBackendFormat getBackendFormat() const;
+
+ // If the backend API is Mock, copies a snapshot of the GrMockTextureInfo struct into the passed
+ // in pointer and returns true. Otherwise returns false if the backend API is not Mock.
+ bool getMockTextureInfo(GrMockTextureInfo*) const;
+
+ // If the client changes any of the mutable backend of the GrBackendTexture they should call
+ // this function to inform Skia that those values have changed. The backend API specific state
+ // that can be set from this function are:
+ //
+ // Vulkan: VkImageLayout and QueueFamilyIndex
+ void setMutableState(const skgpu::MutableTextureState&);
+
+ // Returns true if we are working with protected content.
+ bool isProtected() const;
+
+ // Returns true if the backend texture has been initialized.
+ bool isValid() const { return fIsValid; }
+
+ // Returns true if both textures are valid and refer to the same API texture.
+ bool isSameTexture(const GrBackendTexture&);
+
+#if GR_TEST_UTILS
+ static bool TestingOnly_Equals(const GrBackendTexture& , const GrBackendTexture&);
+#endif
+
+private:
+ friend class GrVkGpu; // for getMutableState
+ sk_sp<skgpu::MutableTextureStateRef> getMutableState() const;
+
+#ifdef SK_GL
+ friend class GrGLTexture;
+ friend class GrGLGpu; // for getGLTextureParams
+ GrBackendTexture(int width,
+ int height,
+ GrMipmapped,
+ const GrGLTextureInfo,
+ sk_sp<GrGLTextureParameters>,
+ std::string_view label = {});
+ sk_sp<GrGLTextureParameters> getGLTextureParams() const;
+#endif
+
+#ifdef SK_VULKAN
+ friend class GrVkTexture;
+ GrBackendTexture(int width,
+ int height,
+ const GrVkImageInfo& vkInfo,
+ sk_sp<skgpu::MutableTextureStateRef> mutableState,
+ std::string_view label = {});
+#endif
+
+#ifdef SK_DIRECT3D
+ friend class GrD3DTexture;
+ friend class GrD3DGpu; // for getGrD3DResourceState
+ GrBackendTexture(int width,
+ int height,
+ const GrD3DTextureResourceInfo& vkInfo,
+ sk_sp<GrD3DResourceState> state,
+ std::string_view label = {});
+ sk_sp<GrD3DResourceState> getGrD3DResourceState() const;
+#endif
+
+ // Free and release and resources being held by the GrBackendTexture.
+ void cleanup();
+
+ bool fIsValid;
+ int fWidth; //<! width in pixels
+ int fHeight; //<! height in pixels
+ const std::string fLabel;
+ GrMipmapped fMipmapped;
+ GrBackendApi fBackend;
+ GrTextureType fTextureType;
+
+ union {
+#ifdef SK_GL
+ GrGLBackendTextureInfo fGLInfo;
+#endif
+#ifdef SK_VULKAN
+ GrVkBackendSurfaceInfo fVkInfo;
+#endif
+ GrMockTextureInfo fMockInfo;
+#ifdef SK_DIRECT3D
+ GrD3DBackendSurfaceInfo fD3DInfo;
+#endif
+ };
+#ifdef SK_METAL
+ GrMtlTextureInfo fMtlInfo;
+#endif
+#ifdef SK_DAWN
+ GrDawnTextureInfo fDawnInfo;
+#endif
+
+ sk_sp<skgpu::MutableTextureStateRef> fMutableState;
+};
+
+class SK_API GrBackendRenderTarget {
+public:
+ // Creates an invalid backend texture.
+ GrBackendRenderTarget();
+
+#ifdef SK_GL
+ // The GrGLTextureInfo must have a valid fFormat. If wrapping in an SkSurface we require the
+ // stencil bits to be either 0, 8 or 16.
+ GrBackendRenderTarget(int width,
+ int height,
+ int sampleCnt,
+ int stencilBits,
+ const GrGLFramebufferInfo& glInfo);
+#endif
+
+#ifdef SK_DAWN
+ // If wrapping in an SkSurface we require the stencil bits to be either 0, 8 or 16.
+ GrBackendRenderTarget(int width,
+ int height,
+ int sampleCnt,
+ int stencilBits,
+ const GrDawnRenderTargetInfo& dawnInfo);
+#endif
+
+#ifdef SK_VULKAN
+ /** Deprecated. Sample count is now part of GrVkImageInfo. */
+ GrBackendRenderTarget(int width, int height, int sampleCnt, const GrVkImageInfo& vkInfo);
+
+ GrBackendRenderTarget(int width, int height, const GrVkImageInfo& vkInfo);
+#endif
+
+#ifdef SK_METAL
+ GrBackendRenderTarget(int width,
+ int height,
+ const GrMtlTextureInfo& mtlInfo);
+ /** Deprecated. Sample count is ignored and is instead retrieved from the MtlTexture. */
+ GrBackendRenderTarget(int width,
+ int height,
+ int sampleCnt,
+ const GrMtlTextureInfo& mtlInfo);
+#endif
+
+#ifdef SK_DIRECT3D
+ GrBackendRenderTarget(int width,
+ int height,
+ const GrD3DTextureResourceInfo& d3dInfo);
+#endif
+
+ GrBackendRenderTarget(int width,
+ int height,
+ int sampleCnt,
+ int stencilBits,
+ const GrMockRenderTargetInfo& mockInfo);
+
+ ~GrBackendRenderTarget();
+
+ GrBackendRenderTarget(const GrBackendRenderTarget& that);
+ GrBackendRenderTarget& operator=(const GrBackendRenderTarget&);
+
+ SkISize dimensions() const { return {fWidth, fHeight}; }
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+ int sampleCnt() const { return fSampleCnt; }
+ int stencilBits() const { return fStencilBits; }
+ GrBackendApi backend() const {return fBackend; }
+ bool isFramebufferOnly() const { return fFramebufferOnly; }
+
+#ifdef SK_GL
+ // If the backend API is GL, copies a snapshot of the GrGLFramebufferInfo struct into the passed
+ // in pointer and returns true. Otherwise returns false if the backend API is not GL.
+ bool getGLFramebufferInfo(GrGLFramebufferInfo*) const;
+#endif
+
+#ifdef SK_DAWN
+ // If the backend API is Dawn, copies a snapshot of the GrDawnRenderTargetInfo struct into the
+ // passed-in pointer and returns true. Otherwise returns false if the backend API is not Dawn.
+ bool getDawnRenderTargetInfo(GrDawnRenderTargetInfo*) const;
+#endif
+
+#ifdef SK_VULKAN
+ // If the backend API is Vulkan, copies a snapshot of the GrVkImageInfo struct into the passed
+ // in pointer and returns true. This snapshot will set the fImageLayout to the current layout
+ // state. Otherwise returns false if the backend API is not Vulkan.
+ bool getVkImageInfo(GrVkImageInfo*) const;
+
+ // Anytime the client changes the VkImageLayout of the VkImage captured by this
+ // GrBackendRenderTarget, they must call this function to notify Skia of the changed layout.
+ void setVkImageLayout(VkImageLayout);
+#endif
+
+#ifdef SK_METAL
+ // If the backend API is Metal, copies a snapshot of the GrMtlTextureInfo struct into the passed
+ // in pointer and returns true. Otherwise returns false if the backend API is not Metal.
+ bool getMtlTextureInfo(GrMtlTextureInfo*) const;
+#endif
+
+#ifdef SK_DIRECT3D
+ // If the backend API is Direct3D, copies a snapshot of the GrMtlTextureInfo struct into the
+ // passed in pointer and returns true. Otherwise returns false if the backend API is not D3D.
+ bool getD3DTextureResourceInfo(GrD3DTextureResourceInfo*) const;
+
+ // Anytime the client changes the D3D12_RESOURCE_STATES of the D3D12_RESOURCE captured by this
+ // GrBackendTexture, they must call this function to notify Skia of the changed layout.
+ void setD3DResourceState(GrD3DResourceStateEnum);
+#endif
+
+ // Get the GrBackendFormat for this render target (or an invalid format if this is not valid).
+ GrBackendFormat getBackendFormat() const;
+
+ // If the backend API is Mock, copies a snapshot of the GrMockTextureInfo struct into the passed
+ // in pointer and returns true. Otherwise returns false if the backend API is not Mock.
+ bool getMockRenderTargetInfo(GrMockRenderTargetInfo*) const;
+
+ // If the client changes any of the mutable backend of the GrBackendTexture they should call
+ // this function to inform Skia that those values have changed. The backend API specific state
+ // that can be set from this function are:
+ //
+ // Vulkan: VkImageLayout and QueueFamilyIndex
+ void setMutableState(const skgpu::MutableTextureState&);
+
+ // Returns true if we are working with protected content.
+ bool isProtected() const;
+
+ // Returns true if the backend texture has been initialized.
+ bool isValid() const { return fIsValid; }
+
+
+#if GR_TEST_UTILS
+ static bool TestingOnly_Equals(const GrBackendRenderTarget&, const GrBackendRenderTarget&);
+#endif
+
+private:
+ friend class GrVkGpu; // for getMutableState
+ sk_sp<skgpu::MutableTextureStateRef> getMutableState() const;
+
+#ifdef SK_VULKAN
+ friend class GrVkRenderTarget;
+ GrBackendRenderTarget(int width,
+ int height,
+ const GrVkImageInfo& vkInfo,
+ sk_sp<skgpu::MutableTextureStateRef> mutableState);
+#endif
+
+#ifdef SK_DIRECT3D
+ friend class GrD3DGpu;
+ friend class GrD3DRenderTarget;
+ GrBackendRenderTarget(int width,
+ int height,
+ const GrD3DTextureResourceInfo& d3dInfo,
+ sk_sp<GrD3DResourceState> state);
+ sk_sp<GrD3DResourceState> getGrD3DResourceState() const;
+#endif
+
+ // Free and release and resources being held by the GrBackendTexture.
+ void cleanup();
+
+ bool fIsValid;
+ bool fFramebufferOnly = false;
+ int fWidth; //<! width in pixels
+ int fHeight; //<! height in pixels
+
+ int fSampleCnt;
+ int fStencilBits;
+
+ GrBackendApi fBackend;
+
+ union {
+#ifdef SK_GL
+ GrGLFramebufferInfo fGLInfo;
+#endif
+#ifdef SK_VULKAN
+ GrVkBackendSurfaceInfo fVkInfo;
+#endif
+ GrMockRenderTargetInfo fMockInfo;
+#ifdef SK_DIRECT3D
+ GrD3DBackendSurfaceInfo fD3DInfo;
+#endif
+ };
+#ifdef SK_METAL
+ GrMtlTextureInfo fMtlInfo;
+#endif
+#ifdef SK_DAWN
+ GrDawnRenderTargetInfo fDawnInfo;
+#endif
+ sk_sp<skgpu::MutableTextureStateRef> fMutableState;
+};
+
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrBackendSurfaceMutableState.h b/gfx/skia/skia/include/gpu/GrBackendSurfaceMutableState.h
new file mode 100644
index 0000000000..cbf27bf7e5
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrBackendSurfaceMutableState.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBackendSurfaceMutableState_DEFINED
+#define GrBackendSurfaceMutableState_DEFINED
+
+#include "include/gpu/MutableTextureState.h"
+
+class GrBackendSurfaceMutableState : public skgpu::MutableTextureState {
+public:
+ GrBackendSurfaceMutableState() = default;
+
+#ifdef SK_VULKAN
+ GrBackendSurfaceMutableState(VkImageLayout layout, uint32_t queueFamilyIndex)
+ : skgpu::MutableTextureState(layout, queueFamilyIndex) {}
+#endif
+
+ GrBackendSurfaceMutableState(const GrBackendSurfaceMutableState& that)
+ : skgpu::MutableTextureState(that) {}
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrContextOptions.h b/gfx/skia/skia/include/gpu/GrContextOptions.h
new file mode 100644
index 0000000000..bf4ca409a8
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrContextOptions.h
@@ -0,0 +1,374 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrContextOptions_DEFINED
+#define GrContextOptions_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/gpu/GrDriverBugWorkarounds.h"
+#include "include/gpu/GrTypes.h"
+#include "include/gpu/ShaderErrorHandler.h"
+#include "include/private/gpu/ganesh/GrTypesPriv.h"
+
+#include <vector>
+
+class SkExecutor;
+
+#if defined(SK_GANESH)
+struct SK_API GrContextOptions {
+ enum class Enable {
+ /** Forces an option to be disabled. */
+ kNo,
+ /** Forces an option to be enabled. */
+ kYes,
+ /**
+ * Uses Skia's default behavior, which may use runtime properties (e.g. driver version).
+ */
+ kDefault
+ };
+
+ enum class ShaderCacheStrategy {
+ kSkSL,
+ kBackendSource,
+ kBackendBinary,
+ };
+
+ /**
+ * Abstract class which stores Skia data in a cache that persists between sessions. Currently,
+ * Skia stores compiled shader binaries (only when glProgramBinary / glGetProgramBinary are
+ * supported) when provided a persistent cache, but this may extend to other data in the future.
+ */
+ class SK_API PersistentCache {
+ public:
+ virtual ~PersistentCache() = default;
+
+ /**
+ * Returns the data for the key if it exists in the cache, otherwise returns null.
+ */
+ virtual sk_sp<SkData> load(const SkData& key) = 0;
+
+ // Placeholder until all clients override the 3-parameter store(), then remove this, and
+ // make that version pure virtual.
+ virtual void store(const SkData& /*key*/, const SkData& /*data*/) { SkASSERT(false); }
+
+ /**
+ * Stores data in the cache, indexed by key. description provides a human-readable
+ * version of the key.
+ */
+ virtual void store(const SkData& key, const SkData& data, const SkString& /*description*/) {
+ this->store(key, data);
+ }
+
+ protected:
+ PersistentCache() = default;
+ PersistentCache(const PersistentCache&) = delete;
+ PersistentCache& operator=(const PersistentCache&) = delete;
+ };
+
+ using ShaderErrorHandler = skgpu::ShaderErrorHandler;
+
+ GrContextOptions() {}
+
+ // Suppress prints for the GrContext.
+ bool fSuppressPrints = false;
+
+ /**
+ * Controls whether we check for GL errors after functions that allocate resources (e.g.
+ * glTexImage2D), at the end of a GPU submission, or checking framebuffer completeness. The
+ * results of shader compilation and program linking are always checked, regardless of this
+ * option. Ignored on backends other than GL.
+ */
+ Enable fSkipGLErrorChecks = Enable::kDefault;
+
+ /** Overrides: These options override feature detection using backend API queries. These
+ overrides can only reduce the feature set or limits, never increase them beyond the
+ detected values. */
+
+ int fMaxTextureSizeOverride = SK_MaxS32;
+
+ /** the threshold in bytes above which we will use a buffer mapping API to map vertex and index
+ buffers to CPU memory in order to update them. A value of -1 means the GrContext should
+ deduce the optimal value for this platform. */
+ int fBufferMapThreshold = -1;
+
+ /**
+ * Executor to handle threaded work within Ganesh. If this is nullptr, then all work will be
+ * done serially on the main thread. To have worker threads assist with various tasks, set this
+ * to a valid SkExecutor instance. Currently, used for software path rendering, but may be used
+ * for other tasks.
+ */
+ SkExecutor* fExecutor = nullptr;
+
+ /** Construct mipmaps manually, via repeated downsampling draw-calls. This is used when
+ the driver's implementation (glGenerateMipmap) contains bugs. This requires mipmap
+ level control (ie desktop or ES3). */
+ bool fDoManualMipmapping = false;
+
+ /**
+ * Disables the use of coverage counting shortcuts to render paths. Coverage counting can cause
+ * artifacts along shared edges if care isn't taken to ensure both contours wind in the same
+ * direction.
+ */
+ // FIXME: Once this is removed from Chrome and Android, rename to fEnable"".
+ bool fDisableCoverageCountingPaths = true;
+
+ /**
+ * Disables distance field rendering for paths. Distance field computation can be expensive,
+ * and yields no benefit if a path is not rendered multiple times with different transforms.
+ */
+ bool fDisableDistanceFieldPaths = false;
+
+ /**
+ * If true this allows path mask textures to be cached. This is only really useful if paths
+ * are commonly rendered at the same scale and fractional translation.
+ */
+ bool fAllowPathMaskCaching = true;
+
+ /**
+ * If true, the GPU will not be used to perform YUV -> RGB conversion when generating
+ * textures from codec-backed images.
+ */
+ bool fDisableGpuYUVConversion = false;
+
+ /**
+ * The maximum size of cache textures used for Skia's Glyph cache.
+ */
+ size_t fGlyphCacheTextureMaximumBytes = 2048 * 1024 * 4;
+
+ /**
+ * Below this threshold size in device space distance field fonts won't be used. Distance field
+ * fonts don't support hinting which is more important at smaller sizes.
+ */
+ float fMinDistanceFieldFontSize = 18;
+
+ /**
+ * Above this threshold size in device space glyphs are drawn as individual paths.
+ */
+#if defined(SK_BUILD_FOR_ANDROID)
+ float fGlyphsAsPathsFontSize = 384;
+#elif defined(SK_BUILD_FOR_MAC)
+ float fGlyphsAsPathsFontSize = 256;
+#else
+ float fGlyphsAsPathsFontSize = 324;
+#endif
+
+ /**
+ * Can the glyph atlas use multiple textures. If allowed, the each texture's size is bound by
+ * fGlypheCacheTextureMaximumBytes.
+ */
+ Enable fAllowMultipleGlyphCacheTextures = Enable::kDefault;
+
+ /**
+ * Bugs on certain drivers cause stencil buffers to leak. This flag causes Skia to avoid
+ * allocating stencil buffers and use alternate rasterization paths, avoiding the leak.
+ */
+ bool fAvoidStencilBuffers = false;
+
+ /**
+ * Enables driver workaround to use draws instead of HW clears, e.g. glClear on the GL backend.
+ */
+ Enable fUseDrawInsteadOfClear = Enable::kDefault;
+
+ /**
+ * Allow Ganesh to more aggressively reorder operations to reduce the number of render passes.
+ * Offscreen draws will be done upfront instead of interrupting the main render pass when
+ * possible. May increase VRAM usage, but still observes the resource cache limit.
+ * Enabled by default.
+ */
+ Enable fReduceOpsTaskSplitting = Enable::kDefault;
+
+ /**
+ * Some ES3 contexts report the ES2 external image extension, but not the ES3 version.
+ * If support for external images is critical, enabling this option will cause Ganesh to limit
+ * shaders to the ES2 shading language in that situation.
+ */
+ bool fPreferExternalImagesOverES3 = false;
+
+ /**
+ * Disables correctness workarounds that are enabled for particular GPUs, OSes, or drivers.
+ * This does not affect code path choices that are made for perfomance reasons nor does it
+ * override other GrContextOption settings.
+ */
+ bool fDisableDriverCorrectnessWorkarounds = false;
+
+ /**
+ * Maximum number of GPU programs or pipelines to keep active in the runtime cache.
+ */
+ int fRuntimeProgramCacheSize = 256;
+
+ /**
+ * Cache in which to store compiled shader binaries between runs.
+ */
+ PersistentCache* fPersistentCache = nullptr;
+
+ /**
+ * This affects the usage of the PersistentCache. We can cache SkSL, backend source (GLSL), or
+ * backend binaries (GL program binaries). By default we cache binaries, but if the driver's
+ * binary loading/storing is believed to have bugs, this can be limited to caching GLSL.
+ * Caching GLSL strings still saves CPU work when a GL program is created.
+ */
+ ShaderCacheStrategy fShaderCacheStrategy = ShaderCacheStrategy::kBackendBinary;
+
+ /**
+ * If present, use this object to report shader compilation failures. If not, report failures
+ * via SkDebugf and assert.
+ */
+ ShaderErrorHandler* fShaderErrorHandler = nullptr;
+
+ /**
+ * Specifies the number of samples Ganesh should use when performing internal draws with MSAA
+ * (hardware capabilities permitting).
+ *
+ * If 0, Ganesh will disable internal code paths that use multisampling.
+ */
+ int fInternalMultisampleCount = 4;
+
+ /**
+ * In Skia's vulkan backend a single GrContext submit equates to the submission of a single
+ * primary command buffer to the VkQueue. This value specifies how many vulkan secondary command
+ * buffers we will cache for reuse on a given primary command buffer. A single submit may use
+ * more than this many secondary command buffers, but after the primary command buffer is
+ * finished on the GPU it will only hold on to this many secondary command buffers for reuse.
+ *
+ * A value of -1 means we will pick a limit value internally.
+ */
+ int fMaxCachedVulkanSecondaryCommandBuffers = -1;
+
+ /**
+ * If true, the caps will never support mipmaps.
+ */
+ bool fSuppressMipmapSupport = false;
+
+ /**
+ * If true, the TessellationPathRenderer will not be used for path rendering.
+ * If false, will fallback to any driver workarounds, if set.
+ */
+ bool fDisableTessellationPathRenderer = false;
+
+ /**
+ * If true, and if supported, enables hardware tessellation in the caps.
+ * DEPRECATED: This value is ignored; experimental hardware tessellation is always disabled.
+ */
+ bool fEnableExperimentalHardwareTessellation = false;
+
+ /**
+ * If true, then add 1 pixel padding to all glyph masks in the atlas to support bi-lerp
+ * rendering of all glyphs. This must be set to true to use Slugs.
+ */
+ bool fSupportBilerpFromGlyphAtlas = false;
+
+ /**
+ * Uses a reduced variety of shaders. May perform less optimally in steady state but can reduce
+ * jank due to shader compilations.
+ */
+ bool fReducedShaderVariations = false;
+
+ /**
+ * If true, then allow to enable MSAA on new Intel GPUs.
+ */
+ bool fAllowMSAAOnNewIntel = false;
+
+ /**
+ * Currently on ARM Android we disable the use of GL TexStorage because of memory regressions.
+ * However, some clients may still want to use TexStorage. For example, TexStorage support is
+ * required for creating protected textures.
+ *
+ * This flag has no impact on non GL backends.
+ */
+ bool fAlwaysUseTexStorageWhenAvailable = false;
+
+ /**
+ * Optional callback that can be passed into the GrDirectContext which will be called when the
+ * GrDirectContext is about to be destroyed. When this call is made, it will be safe for the
+ * client to delete the GPU backend context that is backing the GrDirectContext. The
+ * GrDirectContextDestroyedContext will be passed back to the client in the callback.
+ */
+ GrDirectContextDestroyedContext fContextDeleteContext = nullptr;
+ GrDirectContextDestroyedProc fContextDeleteProc = nullptr;
+
+#if GR_TEST_UTILS
+ /**
+ * Private options that are only meant for testing within Skia's tools.
+ */
+
+ /**
+ * Testing-only mode to exercise allocation failures in the flush-time callback objects.
+ * For now it only simulates allocation failure during the preFlush callback.
+ */
+ bool fFailFlushTimeCallbacks = false;
+
+ /**
+ * Prevents use of dual source blending, to test that all xfer modes work correctly without it.
+ */
+ bool fSuppressDualSourceBlending = false;
+
+ /**
+ * Prevents the use of non-coefficient-based blend equations, for testing dst reads, barriers,
+ * and in-shader blending.
+ */
+ bool fSuppressAdvancedBlendEquations = false;
+
+ /**
+ * Prevents the use of framebuffer fetches, for testing dst reads and texture barriers.
+ */
+ bool fSuppressFramebufferFetch = false;
+
+ /**
+ * If true, then all paths are processed as if "setIsVolatile" had been called.
+ */
+ bool fAllPathsVolatile = false;
+
+ /**
+ * Render everything in wireframe
+ */
+ bool fWireframeMode = false;
+
+ /**
+ * Enforces clearing of all textures when they're created.
+ */
+ bool fClearAllTextures = false;
+
+ /**
+ * Randomly generate a (false) GL_OUT_OF_MEMORY error
+ */
+ bool fRandomGLOOM = false;
+
+ /**
+ * Force off support for write/transfer pixels row bytes in caps.
+ */
+ bool fDisallowWriteAndTransferPixelRowBytes = false;
+
+ /**
+ * Include or exclude specific GPU path renderers.
+ */
+ GpuPathRenderers fGpuPathRenderers = GpuPathRenderers::kDefault;
+
+ /**
+ * Specify the GPU resource cache limit. Equivalent to calling `setResourceCacheLimit` on the
+ * context at construction time.
+ *
+ * A value of -1 means use the default limit value.
+ */
+ int fResourceCacheLimitOverride = -1;
+
+ /**
+ * Maximum width and height of internal texture atlases.
+ */
+ int fMaxTextureAtlasSize = 2048;
+#endif
+
+ GrDriverBugWorkarounds fDriverBugWorkarounds;
+};
+#else
+struct GrContextOptions {
+ struct PersistentCache {};
+};
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrContextThreadSafeProxy.h b/gfx/skia/skia/include/gpu/GrContextThreadSafeProxy.h
new file mode 100644
index 0000000000..eb75555364
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrContextThreadSafeProxy.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrContextThreadSafeProxy_DEFINED
+#define GrContextThreadSafeProxy_DEFINED
+
+#include "include/core/SkRefCnt.h"
+
+#if defined(SK_GANESH)
+
+#include "include/core/SkImageInfo.h"
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/GrContextOptions.h"
+#include "include/gpu/GrTypes.h"
+
+#include <atomic>
+
+class GrBackendFormat;
+class GrCaps;
+class GrContextThreadSafeProxyPriv;
+class GrThreadSafeCache;
+class GrThreadSafePipelineBuilder;
+class SkSurfaceCharacterization;
+class SkSurfaceProps;
+enum class SkTextureCompressionType;
+
+namespace sktext::gpu { class TextBlobRedrawCoordinator; }
+
+/**
+ * Can be used to perform actions related to the generating GrContext in a thread safe manner. The
+ * proxy does not access the 3D API (e.g. OpenGL) that backs the generating GrContext.
+ */
+class SK_API GrContextThreadSafeProxy final : public SkNVRefCnt<GrContextThreadSafeProxy> {
+public:
+ ~GrContextThreadSafeProxy();
+
+ /**
+ * Create a surface characterization for a DDL that will be replayed into the GrContext
+ * that created this proxy. On failure the resulting characterization will be invalid (i.e.,
+ * "!c.isValid()").
+ *
+ * @param cacheMaxResourceBytes The max resource bytes limit that will be in effect
+ * when the DDL created with this characterization is
+ * replayed.
+ * Note: the contract here is that the DDL will be
+ * created as if it had a full 'cacheMaxResourceBytes'
+ * to use. If replayed into a GrContext that already has
+ * locked GPU memory, the replay can exceed the budget.
+ * To rephrase, all resource allocation decisions are
+ * made at record time and at playback time the budget
+ * limits will be ignored.
+ * @param ii The image info specifying properties of the SkSurface
+ * that the DDL created with this characterization will
+ * be replayed into.
+ * Note: Ganesh doesn't make use of the SkImageInfo's
+ * alphaType
+ * @param backendFormat Information about the format of the GPU surface that
+ * will back the SkSurface upon replay
+ * @param sampleCount The sample count of the SkSurface that the DDL
+ * created with this characterization will be replayed
+ * into
+ * @param origin The origin of the SkSurface that the DDL created with
+ * this characterization will be replayed into
+ * @param surfaceProps The surface properties of the SkSurface that the DDL
+ * created with this characterization will be replayed
+ * into
+ * @param isMipMapped Will the surface the DDL will be replayed into have
+ * space allocated for mipmaps?
+ * @param willUseGLFBO0 Will the surface the DDL will be replayed into be
+ * backed by GL FBO 0. This flag is only valid if using
+ * an GL backend.
+ * @param isTextureable Will the surface be able to act as a texture?
+ * @param isProtected Will the (Vulkan) surface be DRM protected?
+ * @param vkRTSupportsInputAttachment Can the vulkan surface be used as in input
+ attachment?
+ * @param forVulkanSecondaryCommandBuffer Will the surface be wrapping a vulkan secondary
+ * command buffer via a GrVkSecondaryCBDrawContext? If
+ * this is true then the following is required:
+ * isTexureable = false
+ * isMipMapped = false
+ * willUseGLFBO0 = false
+ * vkRTSupportsInputAttachment = false
+ */
+ SkSurfaceCharacterization createCharacterization(
+ size_t cacheMaxResourceBytes,
+ const SkImageInfo& ii,
+ const GrBackendFormat& backendFormat,
+ int sampleCount,
+ GrSurfaceOrigin origin,
+ const SkSurfaceProps& surfaceProps,
+ bool isMipMapped,
+ bool willUseGLFBO0 = false,
+ bool isTextureable = true,
+ GrProtected isProtected = GrProtected::kNo,
+ bool vkRTSupportsInputAttachment = false,
+ bool forVulkanSecondaryCommandBuffer = false);
+
+ /*
+ * Retrieve the default GrBackendFormat for a given SkColorType and renderability.
+ * It is guaranteed that this backend format will be the one used by the following
+ * SkColorType and SkSurfaceCharacterization-based createBackendTexture methods.
+ *
+ * The caller should check that the returned format is valid.
+ */
+ GrBackendFormat defaultBackendFormat(SkColorType ct, GrRenderable renderable) const;
+
+ /**
+ * Retrieve the GrBackendFormat for a given SkTextureCompressionType. This is
+ * guaranteed to match the backend format used by the following
+ * createCompressedBackendTexture methods that take a CompressionType.
+ *
+ * The caller should check that the returned format is valid.
+ */
+ GrBackendFormat compressedBackendFormat(SkTextureCompressionType c) const;
+
+ /**
+ * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA
+ * rendering is supported for the color type. 0 is returned if rendering to this color type
+ * is not supported at all.
+ */
+ int maxSurfaceSampleCountForColorType(SkColorType colorType) const;
+
+ bool isValid() const { return nullptr != fCaps; }
+
+ bool operator==(const GrContextThreadSafeProxy& that) const {
+ // Each GrContext should only ever have a single thread-safe proxy.
+ SkASSERT((this == &that) == (this->fContextID == that.fContextID));
+ return this == &that;
+ }
+
+ bool operator!=(const GrContextThreadSafeProxy& that) const { return !(*this == that); }
+
+ // Provides access to functions that aren't part of the public API.
+ GrContextThreadSafeProxyPriv priv();
+ const GrContextThreadSafeProxyPriv priv() const; // NOLINT(readability-const-return-type)
+
+private:
+ friend class GrContextThreadSafeProxyPriv; // for ctor and hidden methods
+
+ // DDL TODO: need to add unit tests for backend & maybe options
+ GrContextThreadSafeProxy(GrBackendApi, const GrContextOptions&);
+
+ void abandonContext();
+ bool abandoned() const;
+
+ // TODO: This should be part of the constructor but right now we have a chicken-and-egg problem
+ // with GrContext where we get the caps by creating a GPU which requires a context (see the
+ // `init` method on GrContext_Base).
+ void init(sk_sp<const GrCaps>, sk_sp<GrThreadSafePipelineBuilder>);
+
+ const GrBackendApi fBackend;
+ const GrContextOptions fOptions;
+ const uint32_t fContextID;
+ sk_sp<const GrCaps> fCaps;
+ std::unique_ptr<sktext::gpu::TextBlobRedrawCoordinator> fTextBlobRedrawCoordinator;
+ std::unique_ptr<GrThreadSafeCache> fThreadSafeCache;
+ sk_sp<GrThreadSafePipelineBuilder> fPipelineBuilder;
+ std::atomic<bool> fAbandoned{false};
+};
+
+#else // !defined(SK_GANESH)
+class SK_API GrContextThreadSafeProxy final : public SkNVRefCnt<GrContextThreadSafeProxy> {};
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrDirectContext.h b/gfx/skia/skia/include/gpu/GrDirectContext.h
new file mode 100644
index 0000000000..05c8099d3d
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrDirectContext.h
@@ -0,0 +1,908 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDirectContext_DEFINED
+#define GrDirectContext_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/GrContextOptions.h"
+#include "include/gpu/GrRecordingContext.h"
+#include "include/gpu/GrTypes.h"
+
+#include <chrono>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <string_view>
+
+class GrAtlasManager;
+class GrBackendSemaphore;
+class GrBackendFormat;
+class GrBackendTexture;
+class GrBackendRenderTarget;
+class GrClientMappedBufferManager;
+class GrContextThreadSafeProxy;
+class GrDirectContextPriv;
+class GrGpu;
+class GrResourceCache;
+class GrResourceProvider;
+class SkData;
+class SkPixmap;
+class SkTaskGroup;
+class SkTraceMemoryDump;
+enum SkColorType : int;
+enum class SkTextureCompressionType;
+struct GrGLInterface;
+struct GrMockOptions;
+struct GrVkBackendContext; // IWYU pragma: keep
+struct GrD3DBackendContext; // IWYU pragma: keep
+struct GrMtlBackendContext; // IWYU pragma: keep
+
+namespace skgpu {
+ class MutableTextureState;
+#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
+ namespace ganesh { class SmallPathAtlasMgr; }
+#endif
+}
+namespace sktext { namespace gpu { class StrikeCache; } }
+namespace wgpu { class Device; } // IWYU pragma: keep
+
+class SK_API GrDirectContext : public GrRecordingContext {
+public:
+#ifdef SK_GL
+ /**
+ * Creates a GrDirectContext for a backend context. If no GrGLInterface is provided then the
+ * result of GrGLMakeNativeInterface() is used if it succeeds.
+ */
+ static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>, const GrContextOptions&);
+ static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>);
+ static sk_sp<GrDirectContext> MakeGL(const GrContextOptions&);
+ static sk_sp<GrDirectContext> MakeGL();
+#endif
+
+#ifdef SK_VULKAN
+ /**
+ * The Vulkan context (VkQueue, VkDevice, VkInstance) must be kept alive until the returned
+ * GrDirectContext is destroyed. This also means that any objects created with this
+ * GrDirectContext (e.g. SkSurfaces, SkImages, etc.) must also be released as they may hold
+ * refs on the GrDirectContext. Once all these objects and the GrDirectContext are released,
+ * then it is safe to delete the vulkan objects.
+ */
+ static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&, const GrContextOptions&);
+ static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&);
+#endif
+
+#ifdef SK_METAL
+ /**
+ * Makes a GrDirectContext which uses Metal as the backend. The GrMtlBackendContext contains a
+ * MTLDevice and MTLCommandQueue which should be used by the backend. These objects must
+ * have their own ref which will be released when the GrMtlBackendContext is destroyed.
+ * Ganesh will take its own ref on the objects which will be released when the GrDirectContext
+ * is destroyed.
+ */
+ static sk_sp<GrDirectContext> MakeMetal(const GrMtlBackendContext&, const GrContextOptions&);
+ static sk_sp<GrDirectContext> MakeMetal(const GrMtlBackendContext&);
+ /**
+ * Deprecated.
+ *
+ * Makes a GrDirectContext which uses Metal as the backend. The device parameter is an
+ * MTLDevice and queue is an MTLCommandQueue which should be used by the backend. These objects
+ * must have a ref on them that can be transferred to Ganesh, which will release the ref
+ * when the GrDirectContext is destroyed.
+ */
+ static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue, const GrContextOptions&);
+ static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue);
+#endif
+
+#ifdef SK_DIRECT3D
+ /**
+ * Makes a GrDirectContext which uses Direct3D as the backend. The Direct3D context
+ * must be kept alive until the returned GrDirectContext is first destroyed or abandoned.
+ */
+ static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&, const GrContextOptions&);
+ static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&);
+#endif
+
+#ifdef SK_DAWN
+ static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&,
+ const GrContextOptions&);
+ static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&);
+#endif
+
+ static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*, const GrContextOptions&);
+ static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*);
+
+ ~GrDirectContext() override;
+
+ /**
+ * The context normally assumes that no outsider is setting state
+ * within the underlying 3D API's context/device/whatever. This call informs
+ * the context that the state was modified and it should resend. Shouldn't
+ * be called frequently for good performance.
+ * The flag bits, state, is dependent on which backend is used by the
+ * context, either GL or D3D (possible in future).
+ */
+ void resetContext(uint32_t state = kAll_GrBackendState);
+
+ /**
+ * If the backend is GrBackendApi::kOpenGL, then all texture unit/target combinations for which
+ * the context has modified the bound texture will have texture id 0 bound. This does not
+ * flush the context. Calling resetContext() does not change the set that will be bound
+ * to texture id 0 on the next call to resetGLTextureBindings(). After this is called
+ * all unit/target combinations are considered to have unmodified bindings until the context
+ * subsequently modifies them (meaning if this is called twice in a row with no intervening
+ * context usage then the second call is a no-op.)
+ */
+ void resetGLTextureBindings();
+
+ /**
+ * Abandons all GPU resources and assumes the underlying backend 3D API context is no longer
+ * usable. Call this if you have lost the associated GPU context, and thus internal texture,
+ * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the
+ * context and any of its created resource objects will not make backend 3D API calls. Content
+ * rendered but not previously flushed may be lost. After this function is called all subsequent
+ * calls on the context will fail or be no-ops.
+ *
+ * The typical use case for this function is that the underlying 3D context was lost and further
+ * API calls may crash.
+ *
+ * This call is not valid to be made inside ReleaseProcs passed into SkSurface or SkImages. The
+ * call will simply fail (and assert in debug) if it is called while inside a ReleaseProc.
+ *
+ * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to
+ * create the context must be kept alive even after abandoning the context. Those objects must
+ * live for the lifetime of the context object itself. The reason for this is so that
+ * we can continue to delete any outstanding GrBackendTextures/RenderTargets which must be
+ * cleaned up even in a device lost state.
+ */
+ void abandonContext() override;
+
+ /**
+ * Returns true if the context was abandoned or if the if the backend specific context has
+ * gotten into an unrecoverarble, lost state (e.g. in Vulkan backend if we've gotten a
+ * VK_ERROR_DEVICE_LOST). If the backend context is lost, this call will also abandon this
+ * context.
+ */
+ bool abandoned() override;
+
+ // TODO: Remove this from public after migrating Chrome.
+ sk_sp<GrContextThreadSafeProxy> threadSafeProxy();
+
+ /**
+ * Checks if the underlying 3D API reported an out-of-memory error. If this returns true it is
+ * reset and will return false until another out-of-memory error is reported by the 3D API. If
+ * the context is abandoned then this will report false.
+ *
+ * Currently this is implemented for:
+ *
+ * OpenGL [ES] - Note that client calls to glGetError() may swallow GL_OUT_OF_MEMORY errors and
+ * therefore hide the error from Skia. Also, it is not advised to use this in combination with
+ * enabling GrContextOptions::fSkipGLErrorChecks. That option may prevent the context from ever
+ * checking the GL context for OOM.
+ *
+ * Vulkan - Reports true if VK_ERROR_OUT_OF_HOST_MEMORY or VK_ERROR_OUT_OF_DEVICE_MEMORY has
+ * occurred.
+ */
+ bool oomed();
+
+ /**
+ * This is similar to abandonContext() however the underlying 3D context is not yet lost and
+ * the context will cleanup all allocated resources before returning. After returning it will
+ * assume that the underlying context may no longer be valid.
+ *
+ * The typical use case for this function is that the client is going to destroy the 3D context
+ * but can't guarantee that context will be destroyed first (perhaps because it may be ref'ed
+ * elsewhere by either the client or Skia objects).
+ *
+ * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to
+ * create the context must be alive before calling releaseResourcesAndAbandonContext.
+ */
+ void releaseResourcesAndAbandonContext();
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Resource Cache
+
+ /** DEPRECATED
+ * Return the current GPU resource cache limits.
+ *
+ * @param maxResources If non-null, will be set to -1.
+ * @param maxResourceBytes If non-null, returns maximum number of bytes of
+ * video memory that can be held in the cache.
+ */
+ void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const;
+
+ /**
+ * Return the current GPU resource cache limit in bytes.
+ */
+ size_t getResourceCacheLimit() const;
+
+ /**
+ * Gets the current GPU resource cache usage.
+ *
+ * @param resourceCount If non-null, returns the number of resources that are held in the
+ * cache.
+ * @param maxResourceBytes If non-null, returns the total number of bytes of video memory held
+ * in the cache.
+ */
+ void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const;
+
+ /**
+ * Gets the number of bytes in the cache consumed by purgeable (e.g. unlocked) resources.
+ */
+ size_t getResourceCachePurgeableBytes() const;
+
+ /** DEPRECATED
+ * Specify the GPU resource cache limits. If the current cache exceeds the maxResourceBytes
+ * limit, it will be purged (LRU) to keep the cache within the limit.
+ *
+ * @param maxResources Unused.
+ * @param maxResourceBytes The maximum number of bytes of video memory
+ * that can be held in the cache.
+ */
+ void setResourceCacheLimits(int maxResources, size_t maxResourceBytes);
+
+ /**
+ * Specify the GPU resource cache limit. If the cache currently exceeds this limit,
+ * it will be purged (LRU) to keep the cache within the limit.
+ *
+ * @param maxResourceBytes The maximum number of bytes of video memory
+ * that can be held in the cache.
+ */
+ void setResourceCacheLimit(size_t maxResourceBytes);
+
+ /**
+ * Frees GPU created by the context. Can be called to reduce GPU memory
+ * pressure.
+ */
+ void freeGpuResources();
+
+ /**
+ * Purge GPU resources that haven't been used in the past 'msNotUsed' milliseconds or are
+ * otherwise marked for deletion, regardless of whether the context is under budget.
+ *
+ * If 'scratchResourcesOnly' is true all unlocked scratch resources older than 'msNotUsed' will
+ * be purged but the unlocked resources with persistent data will remain. If
+ * 'scratchResourcesOnly' is false then all unlocked resources older than 'msNotUsed' will be
+ * purged.
+ *
+ * @param msNotUsed Only unlocked resources not used in these last milliseconds
+ * will be cleaned up.
+ * @param scratchResourcesOnly If true only unlocked scratch resources will be purged.
+ */
+ void performDeferredCleanup(std::chrono::milliseconds msNotUsed,
+ bool scratchResourcesOnly=false);
+
+ // Temporary compatibility API for Android.
+ void purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed) {
+ this->performDeferredCleanup(msNotUsed);
+ }
+
+ /**
+ * Purge unlocked resources from the cache until the the provided byte count has been reached
+ * or we have purged all unlocked resources. The default policy is to purge in LRU order, but
+ * can be overridden to prefer purging scratch resources (in LRU order) prior to purging other
+ * resource types.
+ *
+ * @param maxBytesToPurge the desired number of bytes to be purged.
+ * @param preferScratchResources If true scratch resources will be purged prior to other
+ * resource types.
+ */
+ void purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources);
+
+ /**
+ * This entry point is intended for instances where an app has been backgrounded or
+ * suspended.
+ * If 'scratchResourcesOnly' is true all unlocked scratch resources will be purged but the
+ * unlocked resources with persistent data will remain. If 'scratchResourcesOnly' is false
+ * then all unlocked resources will be purged.
+ * In either case, after the unlocked resources are purged a separate pass will be made to
+ * ensure that resource usage is under budget (i.e., even if 'scratchResourcesOnly' is true
+ * some resources with persistent data may be purged to be under budget).
+ *
+ * @param scratchResourcesOnly If true only unlocked scratch resources will be purged prior
+ * enforcing the budget requirements.
+ */
+ void purgeUnlockedResources(bool scratchResourcesOnly);
+
+ /**
+ * Gets the maximum supported texture size.
+ */
+ using GrRecordingContext::maxTextureSize;
+
+ /**
+ * Gets the maximum supported render target size.
+ */
+ using GrRecordingContext::maxRenderTargetSize;
+
+ /**
+ * Can a SkImage be created with the given color type.
+ */
+ using GrRecordingContext::colorTypeSupportedAsImage;
+
+ /**
+ * Can a SkSurface be created with the given color type. To check whether MSAA is supported
+ * use maxSurfaceSampleCountForColorType().
+ */
+ using GrRecordingContext::colorTypeSupportedAsSurface;
+
+ /**
+ * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA
+ * rendering is supported for the color type. 0 is returned if rendering to this color type
+ * is not supported at all.
+ */
+ using GrRecordingContext::maxSurfaceSampleCountForColorType;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Misc.
+
+ /**
+ * Inserts a list of GPU semaphores that the current GPU-backed API must wait on before
+ * executing any more commands on the GPU. If this call returns false, then the GPU back-end
+ * will not wait on any passed in semaphores, and the client will still own the semaphores,
+ * regardless of the value of deleteSemaphoresAfterWait.
+ *
+ * If deleteSemaphoresAfterWait is false then Skia will not delete the semaphores. In this case
+ * it is the client's responsibility to not destroy or attempt to reuse the semaphores until it
+ * knows that Skia has finished waiting on them. This can be done by using finishedProcs on
+ * flush calls.
+ */
+ bool wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores,
+ bool deleteSemaphoresAfterWait = true);
+
+ /**
+ * Call to ensure all drawing to the context has been flushed and submitted to the underlying 3D
+ * API. This is equivalent to calling GrContext::flush with a default GrFlushInfo followed by
+ * GrContext::submit(syncCpu).
+ */
+ void flushAndSubmit(bool syncCpu = false) {
+ this->flush(GrFlushInfo());
+ this->submit(syncCpu);
+ }
+
+ /**
+ * Call to ensure all drawing to the context has been flushed to underlying 3D API specific
+ * objects. A call to `submit` is always required to ensure work is actually sent to
+ * the gpu. Some specific API details:
+ * GL: Commands are actually sent to the driver, but glFlush is never called. Thus some
+ * sync objects from the flush will not be valid until a submission occurs.
+ *
+ * Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command
+ * buffer or encoder objects. However, these objects are not sent to the gpu until a
+ * submission occurs.
+ *
+ * If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be
+ * submitted to the gpu during the next submit call (it is possible Skia failed to create a
+ * subset of the semaphores). The client should not wait on these semaphores until after submit
+ * has been called, and must keep them alive until then. If this call returns
+ * GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on
+ * the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in with
+ * the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the
+ * client is still responsible for deleting any initialized semaphores.
+ * Regardleess of semaphore submission the context will still be flushed. It should be
+ * emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not
+ * happen. It simply means there were no semaphores submitted to the GPU. A caller should only
+ * take this as a failure if they passed in semaphores to be submitted.
+ */
+ GrSemaphoresSubmitted flush(const GrFlushInfo& info);
+
+ void flush() { this->flush({}); }
+
+ /**
+ * Submit outstanding work to the gpu from all previously un-submitted flushes. The return
+ * value of the submit will indicate whether or not the submission to the GPU was successful.
+ *
+ * If the call returns true, all previously passed in semaphores in flush calls will have been
+ * submitted to the GPU and they can safely be waited on. The caller should wait on those
+ * semaphores or perform some other global synchronization before deleting the semaphores.
+ *
+ * If it returns false, then those same semaphores will not have been submitted and we will not
+ * try to submit them again. The caller is free to delete the semaphores at any time.
+ *
+ * If the syncCpu flag is true this function will return once the gpu has finished with all
+ * submitted work.
+ */
+ bool submit(bool syncCpu = false);
+
+ /**
+ * Checks whether any asynchronous work is complete and if so calls related callbacks.
+ */
+ void checkAsyncWorkCompletion();
+
+ /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */
+ // Chrome is using this!
+ void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
+
+ bool supportsDistanceFieldText() const;
+
+ void storeVkPipelineCacheData();
+
+ /**
+ * Retrieve the default GrBackendFormat for a given SkColorType and renderability.
+ * It is guaranteed that this backend format will be the one used by the following
+ * SkColorType and SkSurfaceCharacterization-based createBackendTexture methods.
+ *
+ * The caller should check that the returned format is valid.
+ */
+ using GrRecordingContext::defaultBackendFormat;
+
+ /**
+ * The explicitly allocated backend texture API allows clients to use Skia to create backend
+ * objects outside of Skia proper (i.e., Skia's caching system will not know about them.)
+ *
+ * It is the client's responsibility to delete all these objects (using deleteBackendTexture)
+ * before deleting the context used to create them. If the backend is Vulkan, the textures must
+ * be deleted before abandoning the context as well. Additionally, clients should only delete
+ * these objects on the thread for which that context is active.
+ *
+ * The client is responsible for ensuring synchronization between different uses
+ * of the backend object (i.e., wrapping it in a surface, rendering to it, deleting the
+ * surface, rewrapping it in a image and drawing the image will require explicit
+ * synchronization on the client's part).
+ */
+
+ /**
+ * If possible, create an uninitialized backend texture. The client should ensure that the
+ * returned backend texture is valid.
+ * For the Vulkan backend the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_UNDEFINED.
+ */
+ GrBackendTexture createBackendTexture(int width,
+ int height,
+ const GrBackendFormat&,
+ GrMipmapped,
+ GrRenderable,
+ GrProtected = GrProtected::kNo,
+ std::string_view label = {});
+
+ /**
+ * If possible, create an uninitialized backend texture. The client should ensure that the
+ * returned backend texture is valid.
+ * If successful, the created backend texture will be compatible with the provided
+ * SkColorType.
+ * For the Vulkan backend the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_UNDEFINED.
+ */
+ GrBackendTexture createBackendTexture(int width, int height,
+ SkColorType,
+ GrMipmapped,
+ GrRenderable,
+ GrProtected = GrProtected::kNo,
+ std::string_view label = {});
+
+ /**
+ * If possible, create a backend texture initialized to a particular color. The client should
+ * ensure that the returned backend texture is valid. The client can pass in a finishedProc
+ * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The
+ * client is required to call `submit` to send the upload work to the gpu. The
+ * finishedProc will always get called even if we failed to create the GrBackendTexture.
+ * For the Vulkan backend the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ */
+ GrBackendTexture createBackendTexture(int width, int height,
+ const GrBackendFormat&,
+ const SkColor4f& color,
+ GrMipmapped,
+ GrRenderable,
+ GrProtected = GrProtected::kNo,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr,
+ std::string_view label = {});
+
+ /**
+ * If possible, create a backend texture initialized to a particular color. The client should
+ * ensure that the returned backend texture is valid. The client can pass in a finishedProc
+ * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The
+ * client is required to call `submit` to send the upload work to the gpu. The
+ * finishedProc will always get called even if we failed to create the GrBackendTexture.
+ * If successful, the created backend texture will be compatible with the provided
+ * SkColorType.
+ * For the Vulkan backend the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ */
+ GrBackendTexture createBackendTexture(int width, int height,
+ SkColorType,
+ const SkColor4f& color,
+ GrMipmapped,
+ GrRenderable,
+ GrProtected = GrProtected::kNo,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr,
+ std::string_view label = {});
+
+ /**
+ * If possible, create a backend texture initialized with the provided pixmap data. The client
+ * should ensure that the returned backend texture is valid. The client can pass in a
+ * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
+ * deleted. The client is required to call `submit` to send the upload work to the gpu.
+ * The finishedProc will always get called even if we failed to create the GrBackendTexture.
+ * If successful, the created backend texture will be compatible with the provided
+ * pixmap(s). Compatible, in this case, means that the backend format will be the result
+ * of calling defaultBackendFormat on the base pixmap's colortype. The src data can be deleted
+ * when this call returns.
+ * If numLevels is 1 a non-mipmapped texture will result. If a mipmapped texture is desired
+ * the data for all the mipmap levels must be provided. In the mipmapped case all the
+ * colortypes of the provided pixmaps must be the same. Additionally, all the miplevels
+ * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount). The
+ * GrSurfaceOrigin controls whether the pixmap data is vertically flipped in the texture.
+ * Note: the pixmap's alphatypes and colorspaces are ignored.
+ * For the Vulkan backend the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ */
+ GrBackendTexture createBackendTexture(const SkPixmap srcData[],
+ int numLevels,
+ GrSurfaceOrigin,
+ GrRenderable,
+ GrProtected,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr,
+ std::string_view label = {});
+
+ /**
+ * Convenience version createBackendTexture() that takes just a base level pixmap.
+ */
+ GrBackendTexture createBackendTexture(const SkPixmap& srcData,
+ GrSurfaceOrigin textureOrigin,
+ GrRenderable renderable,
+ GrProtected isProtected,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr,
+ std::string_view label = {});
+
+ // Deprecated versions that do not take origin and assume top-left.
+ GrBackendTexture createBackendTexture(const SkPixmap srcData[],
+ int numLevels,
+ GrRenderable renderable,
+ GrProtected isProtected,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr,
+ std::string_view label = {});
+
+ GrBackendTexture createBackendTexture(const SkPixmap& srcData,
+ GrRenderable renderable,
+ GrProtected isProtected,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr,
+ std::string_view label = {});
+
+ /**
+ * If possible, updates a backend texture to be filled to a particular color. The client should
+ * check the return value to see if the update was successful. The client can pass in a
+ * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
+ * deleted. The client is required to call `submit` to send the upload work to the gpu.
+ * The finishedProc will always get called even if we failed to update the GrBackendTexture.
+ * For the Vulkan backend after a successful update the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ */
+ bool updateBackendTexture(const GrBackendTexture&,
+ const SkColor4f& color,
+ GrGpuFinishedProc finishedProc,
+ GrGpuFinishedContext finishedContext);
+
+ /**
+ * If possible, updates a backend texture to be filled to a particular color. The data in
+ * GrBackendTexture and passed in color is interpreted with respect to the passed in
+ * SkColorType. The client should check the return value to see if the update was successful.
+ * The client can pass in a finishedProc to be notified when the data has been uploaded by the
+ * gpu and the texture can be deleted. The client is required to call `submit` to send
+ * the upload work to the gpu. The finishedProc will always get called even if we failed to
+ * update the GrBackendTexture.
+ * For the Vulkan backend after a successful update the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ */
+ bool updateBackendTexture(const GrBackendTexture&,
+ SkColorType skColorType,
+ const SkColor4f& color,
+ GrGpuFinishedProc finishedProc,
+ GrGpuFinishedContext finishedContext);
+
+ /**
+ * If possible, updates a backend texture filled with the provided pixmap data. The client
+ * should check the return value to see if the update was successful. The client can pass in a
+ * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
+ * deleted. The client is required to call `submit` to send the upload work to the gpu.
+ * The finishedProc will always get called even if we failed to create the GrBackendTexture.
+ * The backend texture must be compatible with the provided pixmap(s). Compatible, in this case,
+ * means that the backend format is compatible with the base pixmap's colortype. The src data
+ * can be deleted when this call returns.
+ * If the backend texture is mip mapped, the data for all the mipmap levels must be provided.
+ * In the mipmapped case all the colortypes of the provided pixmaps must be the same.
+ * Additionally, all the miplevels must be sized correctly (please see
+ * SkMipmap::ComputeLevelSize and ComputeLevelCount). The GrSurfaceOrigin controls whether the
+ * pixmap data is vertically flipped in the texture.
+ * Note: the pixmap's alphatypes and colorspaces are ignored.
+ * For the Vulkan backend after a successful update the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ */
+ bool updateBackendTexture(const GrBackendTexture&,
+ const SkPixmap srcData[],
+ int numLevels,
+ GrSurfaceOrigin = kTopLeft_GrSurfaceOrigin,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr);
+
+ /**
+ * Convenience version of updateBackendTexture that takes just a base level pixmap.
+ */
+ bool updateBackendTexture(const GrBackendTexture& texture,
+ const SkPixmap& srcData,
+ GrSurfaceOrigin textureOrigin = kTopLeft_GrSurfaceOrigin,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr) {
+ return this->updateBackendTexture(texture,
+ &srcData,
+ 1,
+ textureOrigin,
+ finishedProc,
+ finishedContext);
+ }
+
+ // Deprecated version that does not take origin and assumes top-left.
+ bool updateBackendTexture(const GrBackendTexture& texture,
+ const SkPixmap srcData[],
+ int numLevels,
+ GrGpuFinishedProc finishedProc,
+ GrGpuFinishedContext finishedContext);
+
+ /**
+ * Retrieve the GrBackendFormat for a given SkTextureCompressionType. This is
+ * guaranteed to match the backend format used by the following
+ * createCompressedBackendTexture methods that take a CompressionType.
+ *
+ * The caller should check that the returned format is valid.
+ */
+ using GrRecordingContext::compressedBackendFormat;
+
+ /**
+ *If possible, create a compressed backend texture initialized to a particular color. The
+ * client should ensure that the returned backend texture is valid. The client can pass in a
+ * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
+ * deleted. The client is required to call `submit` to send the upload work to the gpu.
+ * The finishedProc will always get called even if we failed to create the GrBackendTexture.
+ * For the Vulkan backend the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ */
+ GrBackendTexture createCompressedBackendTexture(int width, int height,
+ const GrBackendFormat&,
+ const SkColor4f& color,
+ GrMipmapped,
+ GrProtected = GrProtected::kNo,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr);
+
+ GrBackendTexture createCompressedBackendTexture(int width, int height,
+ SkTextureCompressionType,
+ const SkColor4f& color,
+ GrMipmapped,
+ GrProtected = GrProtected::kNo,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr);
+
+ /**
+ * If possible, create a backend texture initialized with the provided raw data. The client
+ * should ensure that the returned backend texture is valid. The client can pass in a
+ * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
+ * deleted. The client is required to call `submit` to send the upload work to the gpu.
+ * The finishedProc will always get called even if we failed to create the GrBackendTexture
+ * If numLevels is 1 a non-mipmapped texture will result. If a mipmapped texture is desired
+ * the data for all the mipmap levels must be provided. Additionally, all the miplevels
+ * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount).
+ * For the Vulkan backend the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ */
+ GrBackendTexture createCompressedBackendTexture(int width, int height,
+ const GrBackendFormat&,
+ const void* data, size_t dataSize,
+ GrMipmapped,
+ GrProtected = GrProtected::kNo,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr);
+
+ GrBackendTexture createCompressedBackendTexture(int width, int height,
+ SkTextureCompressionType,
+ const void* data, size_t dataSize,
+ GrMipmapped,
+ GrProtected = GrProtected::kNo,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr);
+
+ /**
+ * If possible, updates a backend texture filled with the provided color. If the texture is
+ * mipmapped, all levels of the mip chain will be updated to have the supplied color. The client
+ * should check the return value to see if the update was successful. The client can pass in a
+ * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
+ * deleted. The client is required to call `submit` to send the upload work to the gpu.
+ * The finishedProc will always get called even if we failed to create the GrBackendTexture.
+ * For the Vulkan backend after a successful update the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ */
+ bool updateCompressedBackendTexture(const GrBackendTexture&,
+ const SkColor4f& color,
+ GrGpuFinishedProc finishedProc,
+ GrGpuFinishedContext finishedContext);
+
+ /**
+ * If possible, updates a backend texture filled with the provided raw data. The client
+ * should check the return value to see if the update was successful. The client can pass in a
+ * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
+ * deleted. The client is required to call `submit` to send the upload work to the gpu.
+ * The finishedProc will always get called even if we failed to create the GrBackendTexture.
+ * If a mipmapped texture is passed in, the data for all the mipmap levels must be provided.
+ * Additionally, all the miplevels must be sized correctly (please see
+ * SkMipMap::ComputeLevelSize and ComputeLevelCount).
+ * For the Vulkan backend after a successful update the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ */
+ bool updateCompressedBackendTexture(const GrBackendTexture&,
+ const void* data,
+ size_t dataSize,
+ GrGpuFinishedProc finishedProc,
+ GrGpuFinishedContext finishedContext);
+
+ /**
+ * Updates the state of the GrBackendTexture/RenderTarget to have the passed in
+ * skgpu::MutableTextureState. All objects that wrap the backend surface (i.e. SkSurfaces and
+ * SkImages) will also be aware of this state change. This call does not submit the state change
+ * to the gpu, but requires the client to call `submit` to send it to the GPU. The work
+ * for this call is ordered linearly with all other calls that require GrContext::submit to be
+ * called (e.g updateBackendTexture and flush). If finishedProc is not null then it will be
+ * called with finishedContext after the state transition is known to have occurred on the GPU.
+ *
+ * See skgpu::MutableTextureState to see what state can be set via this call.
+ *
+ * If the backend API is Vulkan, the caller can set the skgpu::MutableTextureState's
+ * VkImageLayout to VK_IMAGE_LAYOUT_UNDEFINED or queueFamilyIndex to VK_QUEUE_FAMILY_IGNORED to
+ * tell Skia to not change those respective states.
+ *
+ * If previousState is not null and this returns true, then Skia will have filled in
+ * previousState to have the values of the state before this call.
+ */
+ bool setBackendTextureState(const GrBackendTexture&,
+ const skgpu::MutableTextureState&,
+ skgpu::MutableTextureState* previousState = nullptr,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr);
+ bool setBackendRenderTargetState(const GrBackendRenderTarget&,
+ const skgpu::MutableTextureState&,
+ skgpu::MutableTextureState* previousState = nullptr,
+ GrGpuFinishedProc finishedProc = nullptr,
+ GrGpuFinishedContext finishedContext = nullptr);
+
+ void deleteBackendTexture(GrBackendTexture);
+
+ // This interface allows clients to pre-compile shaders and populate the runtime program cache.
+ // The key and data blobs should be the ones passed to the PersistentCache, in SkSL format.
+ //
+ // Steps to use this API:
+ //
+ // 1) Create a GrDirectContext as normal, but set fPersistentCache on GrContextOptions to
+ // something that will save the cached shader blobs. Set fShaderCacheStrategy to kSkSL. This
+ // will ensure that the blobs are SkSL, and are suitable for pre-compilation.
+ // 2) Run your application, and save all of the key/data pairs that are fed to the cache.
+ //
+ // 3) Switch over to shipping your application. Include the key/data pairs from above.
+ // 4) At startup (or any convenient time), call precompileShader for each key/data pair.
+ // This will compile the SkSL to create a GL program, and populate the runtime cache.
+ //
+ // This is only guaranteed to work if the context/device used in step #2 are created in the
+ // same way as the one used in step #4, and the same GrContextOptions are specified.
+ // Using cached shader blobs on a different device or driver are undefined.
+ bool precompileShader(const SkData& key, const SkData& data);
+
+#ifdef SK_ENABLE_DUMP_GPU
+ /** Returns a string with detailed information about the context & GPU, in JSON format. */
+ SkString dump() const;
+#endif
+
+ class DirectContextID {
+ public:
+ static GrDirectContext::DirectContextID Next();
+
+ DirectContextID() : fID(SK_InvalidUniqueID) {}
+
+ bool operator==(const DirectContextID& that) const { return fID == that.fID; }
+ bool operator!=(const DirectContextID& that) const { return !(*this == that); }
+
+ void makeInvalid() { fID = SK_InvalidUniqueID; }
+ bool isValid() const { return fID != SK_InvalidUniqueID; }
+
+ private:
+ constexpr DirectContextID(uint32_t id) : fID(id) {}
+ uint32_t fID;
+ };
+
+ DirectContextID directContextID() const { return fDirectContextID; }
+
+ // Provides access to functions that aren't part of the public API.
+ GrDirectContextPriv priv();
+ const GrDirectContextPriv priv() const; // NOLINT(readability-const-return-type)
+
+protected:
+ GrDirectContext(GrBackendApi backend, const GrContextOptions& options);
+
+ bool init() override;
+
+ GrAtlasManager* onGetAtlasManager() { return fAtlasManager.get(); }
+#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
+ skgpu::ganesh::SmallPathAtlasMgr* onGetSmallPathAtlasMgr();
+#endif
+
+ GrDirectContext* asDirectContext() override { return this; }
+
+private:
+ // This call will make sure out work on the GPU is finished and will execute any outstanding
+ // asynchronous work (e.g. calling finished procs, freeing resources, etc.) related to the
+ // outstanding work on the gpu. The main use currently for this function is when tearing down or
+ // abandoning the context.
+ //
+ // When we finish up work on the GPU it could trigger callbacks to the client. In the case we
+ // are abandoning the context we don't want the client to be able to use the GrDirectContext to
+ // issue more commands during the callback. Thus before calling this function we set the
+ // GrDirectContext's state to be abandoned. However, we need to be able to get by the abaonded
+ // check in the call to know that it is safe to execute this. The shouldExecuteWhileAbandoned
+ // bool is used for this signal.
+ void syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned);
+
+ // This delete callback needs to be the first thing on the GrDirectContext so that it is the
+ // last thing destroyed. The callback may signal the client to clean up things that may need
+ // to survive the lifetime of some of the other objects on the GrDirectCotnext. So make sure
+ // we don't call it until all else has been destroyed.
+ class DeleteCallbackHelper {
+ public:
+ DeleteCallbackHelper(GrDirectContextDestroyedContext context,
+ GrDirectContextDestroyedProc proc)
+ : fContext(context), fProc(proc) {}
+
+ ~DeleteCallbackHelper() {
+ if (fProc) {
+ fProc(fContext);
+ }
+ }
+
+ private:
+ GrDirectContextDestroyedContext fContext;
+ GrDirectContextDestroyedProc fProc;
+ };
+ std::unique_ptr<DeleteCallbackHelper> fDeleteCallbackHelper;
+
+ const DirectContextID fDirectContextID;
+ // fTaskGroup must appear before anything that uses it (e.g. fGpu), so that it is destroyed
+ // after all of its users. Clients of fTaskGroup will generally want to ensure that they call
+ // wait() on it as they are being destroyed, to avoid the possibility of pending tasks being
+ // invoked after objects they depend upon have already been destroyed.
+ std::unique_ptr<SkTaskGroup> fTaskGroup;
+ std::unique_ptr<sktext::gpu::StrikeCache> fStrikeCache;
+ sk_sp<GrGpu> fGpu;
+ std::unique_ptr<GrResourceCache> fResourceCache;
+ std::unique_ptr<GrResourceProvider> fResourceProvider;
+
+ // This is incremented before we start calling ReleaseProcs from GrSurfaces and decremented
+ // after. A ReleaseProc may trigger code causing another resource to get freed so we to track
+ // the count to know if we in a ReleaseProc at any level. When this is set to a value greated
+ // than zero we will not allow abandonContext calls to be made on the context.
+ int fInsideReleaseProcCnt = 0;
+
+ bool fDidTestPMConversions;
+ // true if the PM/UPM conversion succeeded; false otherwise
+ bool fPMUPMConversionsRoundTrip;
+
+ GrContextOptions::PersistentCache* fPersistentCache;
+
+ std::unique_ptr<GrClientMappedBufferManager> fMappedBufferManager;
+ std::unique_ptr<GrAtlasManager> fAtlasManager;
+
+#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
+ std::unique_ptr<skgpu::ganesh::SmallPathAtlasMgr> fSmallPathAtlasMgr;
+#endif
+
+ friend class GrDirectContextPriv;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrDriverBugWorkarounds.h b/gfx/skia/skia/include/gpu/GrDriverBugWorkarounds.h
new file mode 100644
index 0000000000..1aa995c791
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrDriverBugWorkarounds.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDriverBugWorkarounds_DEFINED
+#define GrDriverBugWorkarounds_DEFINED
+
+// External embedders of Skia can override this to use their own list
+// of workaround names.
+#ifdef SK_GPU_WORKAROUNDS_HEADER
+#include SK_GPU_WORKAROUNDS_HEADER
+#else
+// To regenerate this file, set gn arg "skia_generate_workarounds = true"
+// or invoke `bazel run //tools:generate_workarounds`
+// This is not rebuilt by default to avoid embedders having to have extra
+// build steps.
+#include "include/gpu/GrDriverBugWorkaroundsAutogen.h"
+#endif
+
+#include "include/core/SkTypes.h"
+
+#include <stdint.h>
+#include <vector>
+
+enum GrDriverBugWorkaroundType {
+#define GPU_OP(type, name) type,
+ GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)
+#undef GPU_OP
+ NUMBER_OF_GPU_DRIVER_BUG_WORKAROUND_TYPES
+};
+
+class SK_API GrDriverBugWorkarounds {
+ public:
+ GrDriverBugWorkarounds();
+ GrDriverBugWorkarounds(const GrDriverBugWorkarounds&) = default;
+ explicit GrDriverBugWorkarounds(const std::vector<int32_t>& workarounds);
+
+ GrDriverBugWorkarounds& operator=(const GrDriverBugWorkarounds&) = default;
+
+ // Turn on any workarounds listed in |workarounds| (but don't turn any off).
+ void applyOverrides(const GrDriverBugWorkarounds& workarounds);
+
+ ~GrDriverBugWorkarounds();
+
+#define GPU_OP(type, name) bool name = false;
+ GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)
+#undef GPU_OP
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrDriverBugWorkaroundsAutogen.h b/gfx/skia/skia/include/gpu/GrDriverBugWorkaroundsAutogen.h
new file mode 100644
index 0000000000..d0b96ca80a
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrDriverBugWorkaroundsAutogen.h
@@ -0,0 +1,43 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from build_workaround_header.py
+// DO NOT EDIT!
+
+#define GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP) \
+ GPU_OP(ADD_AND_TRUE_TO_LOOP_CONDITION, \
+ add_and_true_to_loop_condition) \
+ GPU_OP(DISABLE_BLEND_EQUATION_ADVANCED, \
+ disable_blend_equation_advanced) \
+ GPU_OP(DISABLE_DISCARD_FRAMEBUFFER, \
+ disable_discard_framebuffer) \
+ GPU_OP(DISABLE_DUAL_SOURCE_BLENDING_SUPPORT, \
+ disable_dual_source_blending_support) \
+ GPU_OP(DISABLE_TEXTURE_STORAGE, \
+ disable_texture_storage) \
+ GPU_OP(DISALLOW_LARGE_INSTANCED_DRAW, \
+ disallow_large_instanced_draw) \
+ GPU_OP(EMULATE_ABS_INT_FUNCTION, \
+ emulate_abs_int_function) \
+ GPU_OP(FLUSH_ON_FRAMEBUFFER_CHANGE, \
+ flush_on_framebuffer_change) \
+ GPU_OP(FORCE_UPDATE_SCISSOR_STATE_WHEN_BINDING_FBO0, \
+ force_update_scissor_state_when_binding_fbo0) \
+ GPU_OP(GL_CLEAR_BROKEN, \
+ gl_clear_broken) \
+ GPU_OP(MAX_FRAGMENT_UNIFORM_VECTORS_32, \
+ max_fragment_uniform_vectors_32) \
+ GPU_OP(MAX_MSAA_SAMPLE_COUNT_4, \
+ max_msaa_sample_count_4) \
+ GPU_OP(PACK_PARAMETERS_WORKAROUND_WITH_PACK_BUFFER, \
+ pack_parameters_workaround_with_pack_buffer) \
+ GPU_OP(REMOVE_POW_WITH_CONSTANT_EXPONENT, \
+ remove_pow_with_constant_exponent) \
+ GPU_OP(REWRITE_DO_WHILE_LOOPS, \
+ rewrite_do_while_loops) \
+ GPU_OP(UNBIND_ATTACHMENTS_ON_BOUND_RENDER_FBO_DELETE, \
+ unbind_attachments_on_bound_render_fbo_delete) \
+ GPU_OP(UNFOLD_SHORT_CIRCUIT_AS_TERNARY_OPERATION, \
+ unfold_short_circuit_as_ternary_operation) \
+// The End
diff --git a/gfx/skia/skia/include/gpu/GrRecordingContext.h b/gfx/skia/skia/include/gpu/GrRecordingContext.h
new file mode 100644
index 0000000000..b7bd6af920
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrRecordingContext.h
@@ -0,0 +1,286 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRecordingContext_DEFINED
+#define GrRecordingContext_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/gpu/ganesh/GrImageContext.h"
+
+#if GR_GPU_STATS && GR_TEST_UTILS
+#include <map>
+#include <string>
+#endif
+
+class GrAuditTrail;
+class GrBackendFormat;
+class GrDrawingManager;
+class GrOnFlushCallbackObject;
+class GrMemoryPool;
+class GrProgramDesc;
+class GrProgramInfo;
+class GrProxyProvider;
+class GrRecordingContextPriv;
+class GrSurfaceProxy;
+class GrThreadSafeCache;
+class SkArenaAlloc;
+class SkCapabilities;
+class SkJSONWriter;
+
+namespace sktext::gpu {
+class SubRunAllocator;
+class TextBlobRedrawCoordinator;
+}
+
+#if GR_TEST_UTILS
+class SkString;
+#endif
+
+class GrRecordingContext : public GrImageContext {
+public:
+ ~GrRecordingContext() override;
+
+ SK_API GrBackendFormat defaultBackendFormat(SkColorType ct, GrRenderable renderable) const {
+ return INHERITED::defaultBackendFormat(ct, renderable);
+ }
+
+ /**
+ * Reports whether the GrDirectContext associated with this GrRecordingContext is abandoned.
+ * When called on a GrDirectContext it may actively check whether the underlying 3D API
+ * device/context has been disconnected before reporting the status. If so, calling this
+ * method will transition the GrDirectContext to the abandoned state.
+ */
+ bool abandoned() override { return INHERITED::abandoned(); }
+
+ /*
+ * Can a SkSurface be created with the given color type. To check whether MSAA is supported
+ * use maxSurfaceSampleCountForColorType().
+ */
+ SK_API bool colorTypeSupportedAsSurface(SkColorType colorType) const {
+ if (kR16G16_unorm_SkColorType == colorType ||
+ kA16_unorm_SkColorType == colorType ||
+ kA16_float_SkColorType == colorType ||
+ kR16G16_float_SkColorType == colorType ||
+ kR16G16B16A16_unorm_SkColorType == colorType ||
+ kGray_8_SkColorType == colorType) {
+ return false;
+ }
+
+ return this->maxSurfaceSampleCountForColorType(colorType) > 0;
+ }
+
+ /**
+ * Gets the maximum supported texture size.
+ */
+ SK_API int maxTextureSize() const;
+
+ /**
+ * Gets the maximum supported render target size.
+ */
+ SK_API int maxRenderTargetSize() const;
+
+ /**
+ * Can a SkImage be created with the given color type.
+ */
+ SK_API bool colorTypeSupportedAsImage(SkColorType) const;
+
+ /**
+ * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA
+ * rendering is supported for the color type. 0 is returned if rendering to this color type
+ * is not supported at all.
+ */
+ SK_API int maxSurfaceSampleCountForColorType(SkColorType colorType) const {
+ return INHERITED::maxSurfaceSampleCountForColorType(colorType);
+ }
+
+ SK_API sk_sp<const SkCapabilities> skCapabilities() const;
+
+ // Provides access to functions that aren't part of the public API.
+ GrRecordingContextPriv priv();
+ const GrRecordingContextPriv priv() const; // NOLINT(readability-const-return-type)
+
+ // The collection of specialized memory arenas for different types of data recorded by a
+ // GrRecordingContext. Arenas does not maintain ownership of the pools it groups together.
+ class Arenas {
+ public:
+ Arenas(SkArenaAlloc*, sktext::gpu::SubRunAllocator*);
+
+ // For storing pipelines and other complex data as-needed by ops
+ SkArenaAlloc* recordTimeAllocator() { return fRecordTimeAllocator; }
+
+ // For storing GrTextBlob SubRuns
+ sktext::gpu::SubRunAllocator* recordTimeSubRunAllocator() {
+ return fRecordTimeSubRunAllocator;
+ }
+
+ private:
+ SkArenaAlloc* fRecordTimeAllocator;
+ sktext::gpu::SubRunAllocator* fRecordTimeSubRunAllocator;
+ };
+
+protected:
+ friend class GrRecordingContextPriv; // for hidden functions
+ friend class SkDeferredDisplayList; // for OwnedArenas
+ friend class SkDeferredDisplayListPriv; // for ProgramData
+
+ // Like Arenas, but preserves ownership of the underlying pools.
+ class OwnedArenas {
+ public:
+ OwnedArenas(bool ddlRecording);
+ ~OwnedArenas();
+
+ Arenas get();
+
+ OwnedArenas& operator=(OwnedArenas&&);
+
+ private:
+ bool fDDLRecording;
+ std::unique_ptr<SkArenaAlloc> fRecordTimeAllocator;
+ std::unique_ptr<sktext::gpu::SubRunAllocator> fRecordTimeSubRunAllocator;
+ };
+
+ GrRecordingContext(sk_sp<GrContextThreadSafeProxy>, bool ddlRecording);
+
+ bool init() override;
+
+ void abandonContext() override;
+
+ GrDrawingManager* drawingManager();
+
+ // There is no going back from this method. It should only be called to control the timing
+ // during abandon or destruction of the context.
+ void destroyDrawingManager();
+
+ Arenas arenas() { return fArenas.get(); }
+ // This entry point should only be used for DDL creation where we want the ops' lifetime to
+ // match that of the DDL.
+ OwnedArenas&& detachArenas();
+
+ GrProxyProvider* proxyProvider() { return fProxyProvider.get(); }
+ const GrProxyProvider* proxyProvider() const { return fProxyProvider.get(); }
+
+ struct ProgramData {
+ ProgramData(std::unique_ptr<const GrProgramDesc>, const GrProgramInfo*);
+ ProgramData(ProgramData&&); // for SkTArray
+ ProgramData(const ProgramData&) = delete;
+ ~ProgramData();
+
+ const GrProgramDesc& desc() const { return *fDesc; }
+ const GrProgramInfo& info() const { return *fInfo; }
+
+ private:
+ // TODO: store the GrProgramDescs in the 'fRecordTimeData' arena
+ std::unique_ptr<const GrProgramDesc> fDesc;
+ // The program infos should be stored in 'fRecordTimeData' so do not need to be ref
+ // counted or deleted in the destructor.
+ const GrProgramInfo* fInfo = nullptr;
+ };
+
+ // This entry point gives the recording context a chance to cache the provided
+ // programInfo. The DDL context takes this opportunity to store programInfos as a sidecar
+ // to the DDL.
+ virtual void recordProgramInfo(const GrProgramInfo*) {}
+ // This asks the recording context to return any programInfos it may have collected
+ // via the 'recordProgramInfo' call. It is up to the caller to ensure that the lifetime
+ // of the programInfos matches the intended use. For example, in DDL-record mode it
+ // is known that all the programInfos will have been allocated in an arena with the
+ // same lifetime at the DDL itself.
+ virtual void detachProgramData(SkTArray<ProgramData>*) {}
+
+ sktext::gpu::TextBlobRedrawCoordinator* getTextBlobRedrawCoordinator();
+ const sktext::gpu::TextBlobRedrawCoordinator* getTextBlobRedrawCoordinator() const;
+
+ GrThreadSafeCache* threadSafeCache();
+ const GrThreadSafeCache* threadSafeCache() const;
+
+ /**
+ * Registers an object for flush-related callbacks. (See GrOnFlushCallbackObject.)
+ *
+ * NOTE: the drawing manager tracks this object as a raw pointer; it is up to the caller to
+ * ensure its lifetime is tied to that of the context.
+ */
+ void addOnFlushCallbackObject(GrOnFlushCallbackObject*);
+
+ GrRecordingContext* asRecordingContext() override { return this; }
+
+ class Stats {
+ public:
+ Stats() = default;
+
+#if GR_GPU_STATS
+ void reset() { *this = {}; }
+
+ int numPathMasksGenerated() const { return fNumPathMasksGenerated; }
+ void incNumPathMasksGenerated() { fNumPathMasksGenerated++; }
+
+ int numPathMaskCacheHits() const { return fNumPathMaskCacheHits; }
+ void incNumPathMasksCacheHits() { fNumPathMaskCacheHits++; }
+
+#if GR_TEST_UTILS
+ void dump(SkString* out) const;
+ void dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) const;
+#endif
+
+ private:
+ int fNumPathMasksGenerated{0};
+ int fNumPathMaskCacheHits{0};
+
+#else // GR_GPU_STATS
+ void incNumPathMasksGenerated() {}
+ void incNumPathMasksCacheHits() {}
+
+#if GR_TEST_UTILS
+ void dump(SkString*) const {}
+ void dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) const {}
+#endif
+#endif // GR_GPU_STATS
+ } fStats;
+
+#if GR_GPU_STATS && GR_TEST_UTILS
+ struct DMSAAStats {
+ void dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) const;
+ void dump() const;
+ void merge(const DMSAAStats&);
+ int fNumRenderPasses = 0;
+ int fNumMultisampleRenderPasses = 0;
+ std::map<std::string, int> fTriggerCounts;
+ };
+
+ DMSAAStats fDMSAAStats;
+#endif
+
+ Stats* stats() { return &fStats; }
+ const Stats* stats() const { return &fStats; }
+ void dumpJSON(SkJSONWriter*) const;
+
+protected:
+ // Delete last in case other objects call it during destruction.
+ std::unique_ptr<GrAuditTrail> fAuditTrail;
+
+private:
+ OwnedArenas fArenas;
+
+ std::unique_ptr<GrDrawingManager> fDrawingManager;
+ std::unique_ptr<GrProxyProvider> fProxyProvider;
+
+#if GR_TEST_UTILS
+ int fSuppressWarningMessages = 0;
+#endif
+
+ using INHERITED = GrImageContext;
+};
+
+/**
+ * Safely cast a possibly-null base context to direct context.
+ */
+static inline GrDirectContext* GrAsDirectContext(GrContext_Base* base) {
+ return base ? base->asDirectContext() : nullptr;
+}
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrSurfaceInfo.h b/gfx/skia/skia/include/gpu/GrSurfaceInfo.h
new file mode 100644
index 0000000000..e037fb4957
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrSurfaceInfo.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSurfaceInfo_DEFINED
+#define GrSurfaceInfo_DEFINED
+
+#include "include/gpu/GrTypes.h"
+
+#ifdef SK_GL
+#include "include/private/gpu/ganesh/GrGLTypesPriv.h"
+#endif
+#ifdef SK_VULKAN
+#include "include/private/gpu/ganesh/GrVkTypesPriv.h"
+#endif
+#ifdef SK_DIRECT3D
+#include "include/private/gpu/ganesh/GrD3DTypesMinimal.h"
+struct GrD3DSurfaceInfo;
+#endif
+#ifdef SK_METAL
+#include "include/private/gpu/ganesh/GrMtlTypesPriv.h"
+#endif
+#ifdef SK_DAWN
+#include "include/private/gpu/ganesh/GrDawnTypesPriv.h"
+#endif
+#include "include/private/gpu/ganesh/GrMockTypesPriv.h"
+
+class GrSurfaceInfo {
+public:
+ GrSurfaceInfo() {}
+#ifdef SK_GL
+ GrSurfaceInfo(const GrGLSurfaceInfo& glInfo)
+ : fBackend(GrBackendApi::kOpenGL)
+ , fValid(true)
+ , fSampleCount(glInfo.fSampleCount)
+ , fLevelCount(glInfo.fLevelCount)
+ , fProtected(glInfo.fProtected)
+ , fGLSpec(glInfo) {}
+#endif
+#ifdef SK_VULKAN
+ GrSurfaceInfo(const GrVkSurfaceInfo& vkInfo)
+ : fBackend(GrBackendApi::kVulkan)
+ , fValid(true)
+ , fSampleCount(vkInfo.fSampleCount)
+ , fLevelCount(vkInfo.fLevelCount)
+ , fProtected(vkInfo.fProtected)
+ , fVkSpec(vkInfo) {}
+#endif
+#ifdef SK_DIRECT3D
+ GrSurfaceInfo(const GrD3DSurfaceInfo& d3dInfo);
+#endif
+#ifdef SK_METAL
+ GrSurfaceInfo(const GrMtlSurfaceInfo& mtlInfo)
+ : fBackend(GrBackendApi::kMetal)
+ , fValid(true)
+ , fSampleCount(mtlInfo.fSampleCount)
+ , fLevelCount(mtlInfo.fLevelCount)
+ , fProtected(mtlInfo.fProtected)
+ , fMtlSpec(mtlInfo) {}
+#endif
+#ifdef SK_DAWN
+ GrSurfaceInfo(const GrDawnSurfaceInfo& dawnInfo)
+ : fBackend(GrBackendApi::kDawn)
+ , fValid(true)
+ , fSampleCount(dawnInfo.fSampleCount)
+ , fLevelCount(dawnInfo.fLevelCount)
+ , fProtected(dawnInfo.fProtected)
+ , fDawnSpec(dawnInfo) {}
+#endif
+ GrSurfaceInfo(const GrMockSurfaceInfo& mockInfo)
+ : fBackend(GrBackendApi::kMock)
+ , fValid(true)
+ , fSampleCount(mockInfo.fSampleCount)
+ , fLevelCount(mockInfo.fLevelCount)
+ , fProtected(mockInfo.fProtected)
+ , fMockSpec(mockInfo) {}
+
+ ~GrSurfaceInfo();
+ GrSurfaceInfo(const GrSurfaceInfo&) = default;
+
+ bool isValid() const { return fValid; }
+ GrBackendApi backend() const { return fBackend; }
+
+ uint32_t numSamples() const { return fSampleCount; }
+ uint32_t numMipLevels() const { return fLevelCount; }
+ GrProtected isProtected() const { return fProtected; }
+
+#ifdef SK_GL
+ bool getGLSurfaceInfo(GrGLSurfaceInfo* info) const {
+ if (!this->isValid() || fBackend != GrBackendApi::kOpenGL) {
+ return false;
+ }
+ *info = GrGLTextureSpecToSurfaceInfo(fGLSpec, fSampleCount, fLevelCount, fProtected);
+ return true;
+ }
+#endif
+#ifdef SK_VULKAN
+ bool getVkSurfaceInfo(GrVkSurfaceInfo* info) const {
+ if (!this->isValid() || fBackend != GrBackendApi::kVulkan) {
+ return false;
+ }
+ *info = GrVkImageSpecToSurfaceInfo(fVkSpec, fSampleCount, fLevelCount, fProtected);
+ return true;
+ }
+#endif
+#ifdef SK_DIRECT3D
+ bool getD3DSurfaceInfo(GrD3DSurfaceInfo*) const;
+#endif
+#ifdef SK_METAL
+ bool getMtlSurfaceInfo(GrMtlSurfaceInfo* info) const {
+ if (!this->isValid() || fBackend != GrBackendApi::kMetal) {
+ return false;
+ }
+ *info = GrMtlTextureSpecToSurfaceInfo(fMtlSpec, fSampleCount, fLevelCount, fProtected);
+ return true;
+ }
+#endif
+#ifdef SK_DAWN
+ bool getDawnSurfaceInfo(GrDawnSurfaceInfo* info) const {
+ if (!this->isValid() || fBackend != GrBackendApi::kDawn) {
+ return false;
+ }
+ *info = GrDawnTextureSpecToSurfaceInfo(fDawnSpec, fSampleCount, fLevelCount, fProtected);
+ return true;
+ }
+#endif
+ bool getMockSurfaceInfo(GrMockSurfaceInfo* info) const {
+ if (!this->isValid() || fBackend != GrBackendApi::kMock) {
+ return false;
+ }
+ *info = GrMockTextureSpecToSurfaceInfo(fMockSpec, fSampleCount, fLevelCount, fProtected);
+ return true;
+ }
+
+private:
+ GrBackendApi fBackend = GrBackendApi::kMock;
+ bool fValid = false;
+
+ uint32_t fSampleCount = 1;
+ uint32_t fLevelCount = 0;
+ GrProtected fProtected = GrProtected::kNo;
+
+ union {
+#ifdef SK_GL
+ GrGLTextureSpec fGLSpec;
+#endif
+#ifdef SK_VULKAN
+ GrVkImageSpec fVkSpec;
+#endif
+#ifdef SK_DIRECT3D
+ GrD3DTextureResourceSpecHolder fD3DSpec;
+#endif
+#ifdef SK_METAL
+ GrMtlTextureSpec fMtlSpec;
+#endif
+#ifdef SK_DAWN
+ GrDawnTextureSpec fDawnSpec;
+#endif
+ GrMockTextureSpec fMockSpec;
+ };
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrTypes.h b/gfx/skia/skia/include/gpu/GrTypes.h
new file mode 100644
index 0000000000..177a35a943
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrTypes.h
@@ -0,0 +1,244 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTypes_DEFINED
+#define GrTypes_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTo.h" // IWYU pragma: keep
+
+#include <cstddef>
+#include <cstdint>
+class GrBackendSemaphore;
+
+namespace skgpu {
+enum class Mipmapped : bool;
+enum class Protected : bool;
+enum class Renderable : bool;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Wraps a C++11 enum that we use as a bitfield, and enables a limited amount of
+ * masking with type safety. Instantiated with the ~ operator.
+ */
+template<typename TFlags> class GrTFlagsMask {
+public:
+ constexpr explicit GrTFlagsMask(TFlags value) : GrTFlagsMask(static_cast<int>(value)) {}
+ constexpr explicit GrTFlagsMask(int value) : fValue(value) {}
+ constexpr int value() const { return fValue; }
+private:
+ const int fValue;
+};
+
+/**
+ * Defines bitwise operators that make it possible to use an enum class as a
+ * basic bitfield.
+ */
+#define GR_MAKE_BITFIELD_CLASS_OPS(X) \
+ [[maybe_unused]] constexpr GrTFlagsMask<X> operator~(X a) { \
+ return GrTFlagsMask<X>(~static_cast<int>(a)); \
+ } \
+ [[maybe_unused]] constexpr X operator|(X a, X b) { \
+ return static_cast<X>(static_cast<int>(a) | static_cast<int>(b)); \
+ } \
+ [[maybe_unused]] inline X& operator|=(X& a, X b) { \
+ return (a = a | b); \
+ } \
+ [[maybe_unused]] constexpr bool operator&(X a, X b) { \
+ return SkToBool(static_cast<int>(a) & static_cast<int>(b)); \
+ } \
+ [[maybe_unused]] constexpr GrTFlagsMask<X> operator|(GrTFlagsMask<X> a, GrTFlagsMask<X> b) { \
+ return GrTFlagsMask<X>(a.value() | b.value()); \
+ } \
+ [[maybe_unused]] constexpr GrTFlagsMask<X> operator|(GrTFlagsMask<X> a, X b) { \
+ return GrTFlagsMask<X>(a.value() | static_cast<int>(b)); \
+ } \
+ [[maybe_unused]] constexpr GrTFlagsMask<X> operator|(X a, GrTFlagsMask<X> b) { \
+ return GrTFlagsMask<X>(static_cast<int>(a) | b.value()); \
+ } \
+ [[maybe_unused]] constexpr X operator&(GrTFlagsMask<X> a, GrTFlagsMask<X> b) { \
+ return static_cast<X>(a.value() & b.value()); \
+ } \
+ [[maybe_unused]] constexpr X operator&(GrTFlagsMask<X> a, X b) { \
+ return static_cast<X>(a.value() & static_cast<int>(b)); \
+ } \
+ [[maybe_unused]] constexpr X operator&(X a, GrTFlagsMask<X> b) { \
+ return static_cast<X>(static_cast<int>(a) & b.value()); \
+ } \
+ [[maybe_unused]] inline X& operator&=(X& a, GrTFlagsMask<X> b) { \
+ return (a = a & b); \
+ } \
+
+#define GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(X) \
+ friend constexpr GrTFlagsMask<X> operator ~(X); \
+ friend constexpr X operator |(X, X); \
+ friend X& operator |=(X&, X); \
+ friend constexpr bool operator &(X, X); \
+ friend constexpr GrTFlagsMask<X> operator|(GrTFlagsMask<X>, GrTFlagsMask<X>); \
+ friend constexpr GrTFlagsMask<X> operator|(GrTFlagsMask<X>, X); \
+ friend constexpr GrTFlagsMask<X> operator|(X, GrTFlagsMask<X>); \
+ friend constexpr X operator&(GrTFlagsMask<X>, GrTFlagsMask<X>); \
+ friend constexpr X operator&(GrTFlagsMask<X>, X); \
+ friend constexpr X operator&(X, GrTFlagsMask<X>); \
+ friend X& operator &=(X&, GrTFlagsMask<X>)
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Possible 3D APIs that may be used by Ganesh.
+ */
+enum class GrBackendApi : unsigned {
+ kOpenGL,
+ kVulkan,
+ kMetal,
+ kDirect3D,
+ kDawn,
+ /**
+ * Mock is a backend that does not draw anything. It is used for unit tests
+ * and to measure CPU overhead.
+ */
+ kMock,
+
+ /**
+ * Added here to support the legacy GrBackend enum value and clients who referenced it using
+ * GrBackend::kOpenGL_GrBackend.
+ */
+ kOpenGL_GrBackend = kOpenGL,
+};
+
+/**
+ * Previously the above enum was not an enum class but a normal enum. To support the legacy use of
+ * the enum values we define them below so that no clients break.
+ */
+typedef GrBackendApi GrBackend;
+
+static constexpr GrBackendApi kMetal_GrBackend = GrBackendApi::kMetal;
+static constexpr GrBackendApi kVulkan_GrBackend = GrBackendApi::kVulkan;
+static constexpr GrBackendApi kMock_GrBackend = GrBackendApi::kMock;
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Used to say whether a texture has mip levels allocated or not.
+ */
+/** Deprecated legacy alias of skgpu::Mipmapped. */
+using GrMipmapped = skgpu::Mipmapped;
+/** Deprecated legacy alias of skgpu::Mipmapped. */
+using GrMipMapped = skgpu::Mipmapped;
+
+/*
+ * Can a GrBackendObject be rendered to?
+ */
+using GrRenderable = skgpu::Renderable;
+
+/*
+ * Used to say whether texture is backed by protected memory.
+ */
+using GrProtected = skgpu::Protected;
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * GPU SkImage and SkSurfaces can be stored such that (0, 0) in texture space may correspond to
+ * either the top-left or bottom-left content pixel.
+ */
+enum GrSurfaceOrigin : int {
+ kTopLeft_GrSurfaceOrigin,
+ kBottomLeft_GrSurfaceOrigin,
+};
+
+/**
+ * A GrContext's cache of backend context state can be partially invalidated.
+ * These enums are specific to the GL backend and we'd add a new set for an alternative backend.
+ */
+enum GrGLBackendState {
+ kRenderTarget_GrGLBackendState = 1 << 0,
+ // Also includes samplers bound to texture units.
+ kTextureBinding_GrGLBackendState = 1 << 1,
+ // View state stands for scissor and viewport
+ kView_GrGLBackendState = 1 << 2,
+ kBlend_GrGLBackendState = 1 << 3,
+ kMSAAEnable_GrGLBackendState = 1 << 4,
+ kVertex_GrGLBackendState = 1 << 5,
+ kStencil_GrGLBackendState = 1 << 6,
+ kPixelStore_GrGLBackendState = 1 << 7,
+ kProgram_GrGLBackendState = 1 << 8,
+ kFixedFunction_GrGLBackendState = 1 << 9,
+ kMisc_GrGLBackendState = 1 << 10,
+ kALL_GrGLBackendState = 0xffff
+};
+
+/**
+ * This value translates to reseting all the context state for any backend.
+ */
+static const uint32_t kAll_GrBackendState = 0xffffffff;
+
+typedef void* GrGpuFinishedContext;
+typedef void (*GrGpuFinishedProc)(GrGpuFinishedContext finishedContext);
+
+typedef void* GrGpuSubmittedContext;
+typedef void (*GrGpuSubmittedProc)(GrGpuSubmittedContext submittedContext, bool success);
+
+typedef void* GrDirectContextDestroyedContext;
+typedef void (*GrDirectContextDestroyedProc)(GrDirectContextDestroyedContext destroyedContext);
+
+/**
+ * Struct to supply options to flush calls.
+ *
+ * After issuing all commands, fNumSemaphore semaphores will be signaled by the gpu. The client
+ * passes in an array of fNumSemaphores GrBackendSemaphores. In general these GrBackendSemaphore's
+ * can be either initialized or not. If they are initialized, the backend uses the passed in
+ * semaphore. If it is not initialized, a new semaphore is created and the GrBackendSemaphore
+ * object is initialized with that semaphore. The semaphores are not sent to the GPU until the next
+ * GrContext::submit call is made. See the GrContext::submit for more information.
+ *
+ * The client will own and be responsible for deleting the underlying semaphores that are stored
+ * and returned in initialized GrBackendSemaphore objects. The GrBackendSemaphore objects
+ * themselves can be deleted as soon as this function returns.
+ *
+ * If a finishedProc is provided, the finishedProc will be called when all work submitted to the gpu
+ * from this flush call and all previous flush calls has finished on the GPU. If the flush call
+ * fails due to an error and nothing ends up getting sent to the GPU, the finished proc is called
+ * immediately.
+ *
+ * If a submittedProc is provided, the submittedProc will be called when all work from this flush
+ * call is submitted to the GPU. If the flush call fails due to an error and nothing will get sent
+ * to the GPU, the submitted proc is called immediately. It is possibly that when work is finally
+ * submitted, that the submission actual fails. In this case we will not reattempt to do the
+ * submission. Skia notifies the client of these via the success bool passed into the submittedProc.
+ * The submittedProc is useful to the client to know when semaphores that were sent with the flush
+ * have actually been submitted to the GPU so that they can be waited on (or deleted if the submit
+ * fails).
+ * Note about GL: In GL work gets sent to the driver immediately during the flush call, but we don't
+ * really know when the driver sends the work to the GPU. Therefore, we treat the submitted proc as
+ * we do in other backends. It will be called when the next GrContext::submit is called after the
+ * flush (or possibly during the flush if there is no work to be done for the flush). The main use
+ * case for the submittedProc is to know when semaphores have been sent to the GPU and even in GL
+ * it is required to call GrContext::submit to flush them. So a client should be able to treat all
+ * backend APIs the same in terms of how the submitted procs are treated.
+ */
+struct GrFlushInfo {
+ size_t fNumSemaphores = 0;
+ GrBackendSemaphore* fSignalSemaphores = nullptr;
+ GrGpuFinishedProc fFinishedProc = nullptr;
+ GrGpuFinishedContext fFinishedContext = nullptr;
+ GrGpuSubmittedProc fSubmittedProc = nullptr;
+ GrGpuSubmittedContext fSubmittedContext = nullptr;
+};
+
+/**
+ * Enum used as return value when flush with semaphores so the client knows whether the valid
+ * semaphores will be submitted on the next GrContext::submit call.
+ */
+enum class GrSemaphoresSubmitted : bool {
+ kNo = false,
+ kYes = true
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/GrYUVABackendTextures.h b/gfx/skia/skia/include/gpu/GrYUVABackendTextures.h
new file mode 100644
index 0000000000..edcde7e533
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/GrYUVABackendTextures.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrYUVABackendTextures_DEFINED
+#define GrYUVABackendTextures_DEFINED
+
+#include "include/core/SkYUVAInfo.h"
+#include "include/gpu/GrBackendSurface.h"
+
+#include <tuple>
+
+/**
+ * A description of a set GrBackendTextures that hold the planar data described by a SkYUVAInfo.
+ */
+class SK_API GrYUVABackendTextureInfo {
+public:
+ static constexpr auto kMaxPlanes = SkYUVAInfo::kMaxPlanes;
+
+ /** Default GrYUVABackendTextureInfo is invalid. */
+ GrYUVABackendTextureInfo() = default;
+
+ /**
+ * Initializes a GrYUVABackendTextureInfo to describe a set of textures that can store the
+ * planes indicated by the SkYUVAInfo. The texture dimensions are taken from the SkYUVAInfo's
+ * plane dimensions. All the described textures share a common origin. The planar image this
+ * describes will be mip mapped if all the textures are individually mip mapped as indicated
+ * by GrMipmapped. This will produce an invalid result (return false from isValid()) if the
+ * passed formats' channels don't agree with SkYUVAInfo.
+ */
+ GrYUVABackendTextureInfo(const SkYUVAInfo&,
+ const GrBackendFormat[kMaxPlanes],
+ GrMipmapped,
+ GrSurfaceOrigin);
+
+ GrYUVABackendTextureInfo(const GrYUVABackendTextureInfo&) = default;
+
+ GrYUVABackendTextureInfo& operator=(const GrYUVABackendTextureInfo&) = default;
+
+ bool operator==(const GrYUVABackendTextureInfo&) const;
+ bool operator!=(const GrYUVABackendTextureInfo& that) const { return !(*this == that); }
+
+ const SkYUVAInfo& yuvaInfo() const { return fYUVAInfo; }
+
+ SkYUVColorSpace yuvColorSpace() const { return fYUVAInfo.yuvColorSpace(); }
+
+ GrMipmapped mipmapped() const { return fMipmapped; }
+
+ GrSurfaceOrigin textureOrigin() const { return fTextureOrigin; }
+
+ /** The number of SkPixmap planes, 0 if this GrYUVABackendTextureInfo is invalid. */
+ int numPlanes() const { return fYUVAInfo.numPlanes(); }
+
+ /** Format of the ith plane, or invalid format if i >= numPlanes() */
+ const GrBackendFormat& planeFormat(int i) const { return fPlaneFormats[i]; }
+
+ /**
+ * Returns true if this has been configured with a valid SkYUVAInfo with compatible texture
+ * formats.
+ */
+ bool isValid() const { return fYUVAInfo.isValid(); }
+
+ /**
+ * Computes a YUVALocations representation of the planar layout. The result is guaranteed to be
+ * valid if this->isValid().
+ */
+ SkYUVAInfo::YUVALocations toYUVALocations() const;
+
+private:
+ SkYUVAInfo fYUVAInfo;
+ GrBackendFormat fPlaneFormats[kMaxPlanes];
+ GrMipmapped fMipmapped = GrMipmapped::kNo;
+ GrSurfaceOrigin fTextureOrigin = kTopLeft_GrSurfaceOrigin;
+};
+
+/**
+ * A set of GrBackendTextures that hold the planar data for an image described a SkYUVAInfo.
+ */
+class SK_API GrYUVABackendTextures {
+public:
+ GrYUVABackendTextures() = default;
+ GrYUVABackendTextures(const GrYUVABackendTextures&) = delete;
+ GrYUVABackendTextures(GrYUVABackendTextures&&) = default;
+
+ GrYUVABackendTextures& operator=(const GrYUVABackendTextures&) = delete;
+ GrYUVABackendTextures& operator=(GrYUVABackendTextures&&) = default;
+
+ GrYUVABackendTextures(const SkYUVAInfo&,
+ const GrBackendTexture[SkYUVAInfo::kMaxPlanes],
+ GrSurfaceOrigin textureOrigin);
+
+ const std::array<GrBackendTexture, SkYUVAInfo::kMaxPlanes>& textures() const {
+ return fTextures;
+ }
+
+ GrBackendTexture texture(int i) const {
+ SkASSERT(i >= 0 && i < SkYUVAInfo::kMaxPlanes);
+ return fTextures[static_cast<size_t>(i)];
+ }
+
+ const SkYUVAInfo& yuvaInfo() const { return fYUVAInfo; }
+
+ int numPlanes() const { return fYUVAInfo.numPlanes(); }
+
+ GrSurfaceOrigin textureOrigin() const { return fTextureOrigin; }
+
+ bool isValid() const { return fYUVAInfo.isValid(); }
+
+ /**
+ * Computes a YUVALocations representation of the planar layout. The result is guaranteed to be
+ * valid if this->isValid().
+ */
+ SkYUVAInfo::YUVALocations toYUVALocations() const;
+
+private:
+ SkYUVAInfo fYUVAInfo;
+ std::array<GrBackendTexture, SkYUVAInfo::kMaxPlanes> fTextures;
+ GrSurfaceOrigin fTextureOrigin = kTopLeft_GrSurfaceOrigin;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/MutableTextureState.h b/gfx/skia/skia/include/gpu/MutableTextureState.h
new file mode 100644
index 0000000000..19b7cd54c6
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/MutableTextureState.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2022 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_MutableTextureState_DEFINED
+#define skgpu_MutableTextureState_DEFINED
+
+#include "include/gpu/GpuTypes.h"
+
+#ifdef SK_VULKAN
+#include "include/private/gpu/vk/VulkanTypesPriv.h"
+#endif
+
+#include <new>
+
+class GrVkGpu;
+
+namespace skgpu {
+
+/**
+ * Since Skia and clients can both modify gpu textures and their connected state, Skia needs a way
+ * for clients to inform us if they have modifiend any of this state. In order to not need setters
+ * for every single API and state, we use this class to be a generic wrapper around all the mutable
+ * state. This class is used for calls that inform Skia of these texture/image state changes by the
+ * client as well as for requesting state changes to be done by Skia. The backend specific state
+ * that is wrapped by this class are:
+ *
+ * Vulkan: VkImageLayout and QueueFamilyIndex
+ */
+class SK_API MutableTextureState {
+public:
+ MutableTextureState() {}
+
+#ifdef SK_VULKAN
+ MutableTextureState(VkImageLayout layout, uint32_t queueFamilyIndex)
+ : fVkState(layout, queueFamilyIndex)
+ , fBackend(BackendApi::kVulkan)
+ , fIsValid(true) {}
+#endif
+
+ MutableTextureState(const MutableTextureState& that)
+ : fBackend(that.fBackend), fIsValid(that.fIsValid) {
+ if (!fIsValid) {
+ return;
+ }
+ switch (fBackend) {
+ case BackendApi::kVulkan:
+ #ifdef SK_VULKAN
+ SkASSERT(that.fBackend == BackendApi::kVulkan);
+ fVkState = that.fVkState;
+ #endif
+ break;
+ default:
+ (void)that;
+ SkUNREACHABLE;
+ }
+ }
+
+ MutableTextureState& operator=(const MutableTextureState& that) {
+ if (this != &that) {
+ this->~MutableTextureState();
+ new (this) MutableTextureState(that);
+ }
+ return *this;
+ }
+
+#ifdef SK_VULKAN
+ // If this class is not Vulkan backed it will return value of VK_IMAGE_LAYOUT_UNDEFINED.
+ // Otherwise it will return the VkImageLayout.
+ VkImageLayout getVkImageLayout() const {
+ if (this->isValid() && fBackend != BackendApi::kVulkan) {
+ return VK_IMAGE_LAYOUT_UNDEFINED;
+ }
+ return fVkState.getImageLayout();
+ }
+
+ // If this class is not Vulkan backed it will return value of VK_QUEUE_FAMILY_IGNORED.
+ // Otherwise it will return the VkImageLayout.
+ uint32_t getQueueFamilyIndex() const {
+ if (this->isValid() && fBackend != BackendApi::kVulkan) {
+ return VK_QUEUE_FAMILY_IGNORED;
+ }
+ return fVkState.getQueueFamilyIndex();
+ }
+#endif
+
+ BackendApi backend() const { return fBackend; }
+
+ // Returns true if the backend mutable state has been initialized.
+ bool isValid() const { return fIsValid; }
+
+private:
+ friend class MutableTextureStateRef;
+ friend class ::GrVkGpu;
+
+#ifdef SK_VULKAN
+ void setVulkanState(VkImageLayout layout, uint32_t queueFamilyIndex) {
+ SkASSERT(!this->isValid() || fBackend == BackendApi::kVulkan);
+ fVkState.setImageLayout(layout);
+ fVkState.setQueueFamilyIndex(queueFamilyIndex);
+ fBackend = BackendApi::kVulkan;
+ fIsValid = true;
+ }
+#endif
+
+ union {
+ char fPlaceholder;
+#ifdef SK_VULKAN
+ VulkanMutableTextureState fVkState;
+#endif
+ };
+
+ BackendApi fBackend = BackendApi::kMock;
+ bool fIsValid = false;
+};
+
+} // namespace skgpu
+
+#endif // skgpu_MutableTextureState_DEFINED
diff --git a/gfx/skia/skia/include/gpu/ShaderErrorHandler.h b/gfx/skia/skia/include/gpu/ShaderErrorHandler.h
new file mode 100644
index 0000000000..8960da5c5a
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/ShaderErrorHandler.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_ShaderErrorHandler_DEFINED
+#define skgpu_ShaderErrorHandler_DEFINED
+
+#include "include/core/SkTypes.h"
+
+namespace skgpu {
+/**
+ * Abstract class to report errors when compiling shaders.
+ */
+class SK_API ShaderErrorHandler {
+public:
+ virtual ~ShaderErrorHandler() = default;
+
+ virtual void compileError(const char* shader, const char* errors) = 0;
+
+protected:
+ ShaderErrorHandler() = default;
+ ShaderErrorHandler(const ShaderErrorHandler&) = delete;
+ ShaderErrorHandler& operator=(const ShaderErrorHandler&) = delete;
+};
+
+/**
+ * Used when no error handler is set. Will report failures via SkDebugf and asserts.
+ */
+ShaderErrorHandler* DefaultShaderErrorHandler();
+
+} // namespace skgpu
+
+#endif // skgpu_ShaderErrorHandler_DEFINED
diff --git a/gfx/skia/skia/include/gpu/d3d/GrD3DBackendContext.h b/gfx/skia/skia/include/gpu/d3d/GrD3DBackendContext.h
new file mode 100644
index 0000000000..bb85e52e5c
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/d3d/GrD3DBackendContext.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrD3DBackendContext_DEFINED
+#define GrD3DBackendContext_DEFINED
+
+// GrD3DTypes.h includes d3d12.h, which in turn includes windows.h, which redefines many
+// common identifiers such as:
+// * interface
+// * small
+// * near
+// * far
+// * CreateSemaphore
+// * MemoryBarrier
+//
+// You should only include GrD3DBackendContext.h if you are prepared to rename those identifiers.
+#include "include/gpu/d3d/GrD3DTypes.h"
+
+#include "include/gpu/GrTypes.h"
+
+// The BackendContext contains all of the base D3D objects needed by the GrD3DGpu. The assumption
+// is that the client will set these up and pass them to the GrD3DGpu constructor.
+struct SK_API GrD3DBackendContext {
+ gr_cp<IDXGIAdapter1> fAdapter;
+ gr_cp<ID3D12Device> fDevice;
+ gr_cp<ID3D12CommandQueue> fQueue;
+ sk_sp<GrD3DMemoryAllocator> fMemoryAllocator;
+ GrProtected fProtectedContext = GrProtected::kNo;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/d3d/GrD3DTypes.h b/gfx/skia/skia/include/gpu/d3d/GrD3DTypes.h
new file mode 100644
index 0000000000..b595422e86
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/d3d/GrD3DTypes.h
@@ -0,0 +1,248 @@
+
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrD3DTypes_DEFINED
+#define GrD3DTypes_DEFINED
+
+// This file includes d3d12.h, which in turn includes windows.h, which redefines many
+// common identifiers such as:
+// * interface
+// * small
+// * near
+// * far
+// * CreateSemaphore
+// * MemoryBarrier
+//
+// You should only include this header if you need the Direct3D definitions and are
+// prepared to rename those identifiers.
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/GpuTypes.h"
+#include <d3d12.h>
+#include <dxgi1_4.h>
+
+class GrD3DGpu;
+
+ /** Check if the argument is non-null, and if so, call obj->AddRef() and return obj.
+ */
+template <typename T> static inline T* GrSafeComAddRef(T* obj) {
+ if (obj) {
+ obj->AddRef();
+ }
+ return obj;
+}
+
+/** Check if the argument is non-null, and if so, call obj->Release()
+ */
+template <typename T> static inline void GrSafeComRelease(T* obj) {
+ if (obj) {
+ obj->Release();
+ }
+}
+
+template <typename T> class gr_cp {
+public:
+ using element_type = T;
+
+ constexpr gr_cp() : fObject(nullptr) {}
+ constexpr gr_cp(std::nullptr_t) : fObject(nullptr) {}
+
+ /**
+ * Shares the underlying object by calling AddRef(), so that both the argument and the newly
+ * created gr_cp both have a reference to it.
+ */
+ gr_cp(const gr_cp<T>& that) : fObject(GrSafeComAddRef(that.get())) {}
+
+ /**
+ * Move the underlying object from the argument to the newly created gr_cp. Afterwards only
+ * the new gr_cp will have a reference to the object, and the argument will point to null.
+ * No call to AddRef() or Release() will be made.
+ */
+ gr_cp(gr_cp<T>&& that) : fObject(that.release()) {}
+
+ /**
+ * Adopt the bare object into the newly created gr_cp.
+ * No call to AddRef() or Release() will be made.
+ */
+ explicit gr_cp(T* obj) {
+ fObject = obj;
+ }
+
+ /**
+ * Calls Release() on the underlying object pointer.
+ */
+ ~gr_cp() {
+ GrSafeComRelease(fObject);
+ SkDEBUGCODE(fObject = nullptr);
+ }
+
+ /**
+ * Shares the underlying object referenced by the argument by calling AddRef() on it. If this
+ * gr_cp previously had a reference to an object (i.e. not null) it will call Release()
+ * on that object.
+ */
+ gr_cp<T>& operator=(const gr_cp<T>& that) {
+ if (this != &that) {
+ this->reset(GrSafeComAddRef(that.get()));
+ }
+ return *this;
+ }
+
+ /**
+ * Move the underlying object from the argument to the gr_cp. If the gr_cp
+ * previously held a reference to another object, Release() will be called on that object.
+ * No call to AddRef() will be made.
+ */
+ gr_cp<T>& operator=(gr_cp<T>&& that) {
+ this->reset(that.release());
+ return *this;
+ }
+
+ explicit operator bool() const { return this->get() != nullptr; }
+
+ T* get() const { return fObject; }
+ T* operator->() const { return fObject; }
+ T** operator&() { return &fObject; }
+
+ /**
+ * Adopt the new object, and call Release() on any previously held object (if not null).
+ * No call to AddRef() will be made.
+ */
+ void reset(T* object = nullptr) {
+ T* oldObject = fObject;
+ fObject = object;
+ GrSafeComRelease(oldObject);
+ }
+
+ /**
+ * Shares the new object by calling AddRef() on it. If this gr_cp previously had a
+ * reference to an object (i.e. not null) it will call Release() on that object.
+ */
+ void retain(T* object) {
+ if (this->fObject != object) {
+ this->reset(GrSafeComAddRef(object));
+ }
+ }
+
+ /**
+ * Return the original object, and set the internal object to nullptr.
+ * The caller must assume ownership of the object, and manage its reference count directly.
+ * No call to Release() will be made.
+ */
+ T* SK_WARN_UNUSED_RESULT release() {
+ T* obj = fObject;
+ fObject = nullptr;
+ return obj;
+ }
+
+private:
+ T* fObject;
+};
+
+template <typename T> inline bool operator==(const gr_cp<T>& a,
+ const gr_cp<T>& b) {
+ return a.get() == b.get();
+}
+
+template <typename T> inline bool operator!=(const gr_cp<T>& a,
+ const gr_cp<T>& b) {
+ return a.get() != b.get();
+}
+
+// interface classes for the GPU memory allocator
+class GrD3DAlloc : public SkRefCnt {
+public:
+ ~GrD3DAlloc() override = default;
+};
+
+class GrD3DMemoryAllocator : public SkRefCnt {
+public:
+ virtual gr_cp<ID3D12Resource> createResource(D3D12_HEAP_TYPE, const D3D12_RESOURCE_DESC*,
+ D3D12_RESOURCE_STATES initialResourceState,
+ sk_sp<GrD3DAlloc>* allocation,
+ const D3D12_CLEAR_VALUE*) = 0;
+ virtual gr_cp<ID3D12Resource> createAliasingResource(sk_sp<GrD3DAlloc>& allocation,
+ uint64_t localOffset,
+ const D3D12_RESOURCE_DESC*,
+ D3D12_RESOURCE_STATES initialResourceState,
+ const D3D12_CLEAR_VALUE*) = 0;
+};
+
+// Note: there is no notion of Borrowed or Adopted resources in the D3D backend,
+// so Ganesh will ref fResource once it's asked to wrap it.
+// Clients are responsible for releasing their own ref to avoid memory leaks.
+struct GrD3DTextureResourceInfo {
+ gr_cp<ID3D12Resource> fResource = nullptr;
+ sk_sp<GrD3DAlloc> fAlloc = nullptr;
+ D3D12_RESOURCE_STATES fResourceState = D3D12_RESOURCE_STATE_COMMON;
+ DXGI_FORMAT fFormat = DXGI_FORMAT_UNKNOWN;
+ uint32_t fSampleCount = 1;
+ uint32_t fLevelCount = 0;
+ unsigned int fSampleQualityPattern = DXGI_STANDARD_MULTISAMPLE_QUALITY_PATTERN;
+ skgpu::Protected fProtected = skgpu::Protected::kNo;
+
+ GrD3DTextureResourceInfo() = default;
+
+ GrD3DTextureResourceInfo(ID3D12Resource* resource,
+ const sk_sp<GrD3DAlloc> alloc,
+ D3D12_RESOURCE_STATES resourceState,
+ DXGI_FORMAT format,
+ uint32_t sampleCount,
+ uint32_t levelCount,
+ unsigned int sampleQualityLevel,
+ skgpu::Protected isProtected = skgpu::Protected::kNo)
+ : fResource(resource)
+ , fAlloc(alloc)
+ , fResourceState(resourceState)
+ , fFormat(format)
+ , fSampleCount(sampleCount)
+ , fLevelCount(levelCount)
+ , fSampleQualityPattern(sampleQualityLevel)
+ , fProtected(isProtected) {}
+
+ GrD3DTextureResourceInfo(const GrD3DTextureResourceInfo& info,
+ D3D12_RESOURCE_STATES resourceState)
+ : fResource(info.fResource)
+ , fAlloc(info.fAlloc)
+ , fResourceState(resourceState)
+ , fFormat(info.fFormat)
+ , fSampleCount(info.fSampleCount)
+ , fLevelCount(info.fLevelCount)
+ , fSampleQualityPattern(info.fSampleQualityPattern)
+ , fProtected(info.fProtected) {}
+
+#if GR_TEST_UTILS
+ bool operator==(const GrD3DTextureResourceInfo& that) const {
+ return fResource == that.fResource && fResourceState == that.fResourceState &&
+ fFormat == that.fFormat && fSampleCount == that.fSampleCount &&
+ fLevelCount == that.fLevelCount &&
+ fSampleQualityPattern == that.fSampleQualityPattern && fProtected == that.fProtected;
+ }
+#endif
+};
+
+struct GrD3DFenceInfo {
+ GrD3DFenceInfo()
+ : fFence(nullptr)
+ , fValue(0) {
+ }
+
+ gr_cp<ID3D12Fence> fFence;
+ uint64_t fValue; // signal value for the fence
+};
+
+struct GrD3DSurfaceInfo {
+ uint32_t fSampleCount = 1;
+ uint32_t fLevelCount = 0;
+ skgpu::Protected fProtected = skgpu::Protected::kNo;
+
+ DXGI_FORMAT fFormat = DXGI_FORMAT_UNKNOWN;
+ unsigned int fSampleQualityPattern = DXGI_STANDARD_MULTISAMPLE_QUALITY_PATTERN;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/dawn/GrDawnTypes.h b/gfx/skia/skia/include/gpu/dawn/GrDawnTypes.h
new file mode 100644
index 0000000000..fbd3dbaf55
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/dawn/GrDawnTypes.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDawnTypes_DEFINED
+#define GrDawnTypes_DEFINED
+
+#include "include/gpu/GpuTypes.h"
+
+#ifdef Always
+#undef Always
+static constexpr int Always = 2;
+#endif
+#ifdef Success
+#undef Success
+static constexpr int Success = 0;
+#endif
+#ifdef None
+#undef None
+static constexpr int None = 0L;
+#endif
+#include "webgpu/webgpu_cpp.h" // IWYU pragma: export
+
+struct GrDawnTextureInfo {
+ wgpu::Texture fTexture;
+ wgpu::TextureFormat fFormat;
+ uint32_t fLevelCount;
+ GrDawnTextureInfo() : fTexture(nullptr), fFormat(), fLevelCount(0) {
+ }
+ GrDawnTextureInfo(const GrDawnTextureInfo& other)
+ : fTexture(other.fTexture)
+ , fFormat(other.fFormat)
+ , fLevelCount(other.fLevelCount) {
+ }
+ GrDawnTextureInfo& operator=(const GrDawnTextureInfo& other) {
+ fTexture = other.fTexture;
+ fFormat = other.fFormat;
+ fLevelCount = other.fLevelCount;
+ return *this;
+ }
+ bool operator==(const GrDawnTextureInfo& other) const {
+ return fTexture.Get() == other.fTexture.Get() &&
+ fFormat == other.fFormat &&
+ fLevelCount == other.fLevelCount;
+ }
+};
+
+// GrDawnRenderTargetInfo holds a reference to a (1-mip) TextureView. This means that, for now,
+// GrDawnRenderTarget is suitable for rendering, but not readPixels() or writePixels(). Also,
+// backdrop filters and certain blend modes requiring copying the destination framebuffer
+// will not work.
+struct GrDawnRenderTargetInfo {
+ wgpu::TextureView fTextureView;
+ wgpu::TextureFormat fFormat;
+ uint32_t fLevelCount;
+ GrDawnRenderTargetInfo() : fTextureView(nullptr), fFormat(), fLevelCount(0) {
+ }
+ GrDawnRenderTargetInfo(const GrDawnRenderTargetInfo& other)
+ : fTextureView(other.fTextureView)
+ , fFormat(other.fFormat)
+ , fLevelCount(other.fLevelCount) {
+ }
+ explicit GrDawnRenderTargetInfo(const GrDawnTextureInfo& texInfo)
+ : fFormat(texInfo.fFormat)
+ , fLevelCount(1) {
+ wgpu::TextureViewDescriptor desc;
+ desc.format = texInfo.fFormat;
+ desc.mipLevelCount = 1;
+ fTextureView = texInfo.fTexture.CreateView(&desc);
+ }
+ GrDawnRenderTargetInfo& operator=(const GrDawnRenderTargetInfo& other) {
+ fTextureView = other.fTextureView;
+ fFormat = other.fFormat;
+ fLevelCount = other.fLevelCount;
+ return *this;
+ }
+ bool operator==(const GrDawnRenderTargetInfo& other) const {
+ return fTextureView.Get() == other.fTextureView.Get() &&
+ fFormat == other.fFormat &&
+ fLevelCount == other.fLevelCount;
+ }
+};
+
+struct GrDawnSurfaceInfo {
+ uint32_t fSampleCount = 1;
+ uint32_t fLevelCount = 0;
+ skgpu::Protected fProtected = skgpu::Protected::kNo;
+
+ wgpu::TextureFormat fFormat;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLAssembleHelpers.h b/gfx/skia/skia/include/gpu/gl/GrGLAssembleHelpers.h
new file mode 100644
index 0000000000..bfa2aea376
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLAssembleHelpers.h
@@ -0,0 +1,11 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/gl/GrGLAssembleInterface.h"
+
+void GrGetEGLQueryAndDisplay(GrEGLQueryStringFn** queryString, GrEGLDisplay* display,
+ void* ctx, GrGLGetProc get);
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLAssembleInterface.h b/gfx/skia/skia/include/gpu/gl/GrGLAssembleInterface.h
new file mode 100644
index 0000000000..4f9f9f9ee0
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLAssembleInterface.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/gl/GrGLInterface.h"
+
+typedef GrGLFuncPtr (*GrGLGetProc)(void* ctx, const char name[]);
+
+/**
+ * Generic function for creating a GrGLInterface for an either OpenGL or GLES. It calls
+ * get() to get each function address. ctx is a generic ptr passed to and interpreted by get().
+ */
+SK_API sk_sp<const GrGLInterface> GrGLMakeAssembledInterface(void *ctx, GrGLGetProc get);
+
+/**
+ * Generic function for creating a GrGLInterface for an OpenGL (but not GLES) context. It calls
+ * get() to get each function address. ctx is a generic ptr passed to and interpreted by get().
+ */
+SK_API sk_sp<const GrGLInterface> GrGLMakeAssembledGLInterface(void *ctx, GrGLGetProc get);
+
+/**
+ * Generic function for creating a GrGLInterface for an OpenGL ES (but not Open GL) context. It
+ * calls get() to get each function address. ctx is a generic ptr passed to and interpreted by
+ * get().
+ */
+SK_API sk_sp<const GrGLInterface> GrGLMakeAssembledGLESInterface(void *ctx, GrGLGetProc get);
+
+/**
+ * Generic function for creating a GrGLInterface for a WebGL (similar to OpenGL ES) context. It
+ * calls get() to get each function address. ctx is a generic ptr passed to and interpreted by
+ * get().
+ */
+SK_API sk_sp<const GrGLInterface> GrGLMakeAssembledWebGLInterface(void *ctx, GrGLGetProc get);
+
+/** Deprecated version of GrGLMakeAssembledInterface() that returns a bare pointer. */
+SK_API const GrGLInterface* GrGLAssembleInterface(void *ctx, GrGLGetProc get);
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLConfig.h b/gfx/skia/skia/include/gpu/gl/GrGLConfig.h
new file mode 100644
index 0000000000..e3573486ca
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLConfig.h
@@ -0,0 +1,79 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrGLConfig_DEFINED
+#define GrGLConfig_DEFINED
+
+#include "include/gpu/GrTypes.h"
+
+/**
+ * Optional GL config file.
+ */
+#ifdef GR_GL_CUSTOM_SETUP_HEADER
+ #include GR_GL_CUSTOM_SETUP_HEADER
+#endif
+
+#if !defined(GR_GL_FUNCTION_TYPE)
+ #if defined(SK_BUILD_FOR_WIN)
+ #define GR_GL_FUNCTION_TYPE __stdcall
+ #else
+ #define GR_GL_FUNCTION_TYPE
+ #endif
+#endif
+
+/**
+ * The following are optional defines that can be enabled at the compiler
+ * command line, in a IDE project, in a GrUserConfig.h file, or in a GL custom
+ * file (if one is in use). If a GR_GL_CUSTOM_SETUP_HEADER is used they can
+ * also be placed there.
+ *
+ * GR_GL_LOG_CALLS: if 1 Gr can print every GL call using SkDebugf. Defaults to
+ * 0. Logging can be enabled and disabled at runtime using a debugger via to
+ * global gLogCallsGL. The initial value of gLogCallsGL is controlled by
+ * GR_GL_LOG_CALLS_START.
+ *
+ * GR_GL_LOG_CALLS_START: controls the initial value of gLogCallsGL when
+ * GR_GL_LOG_CALLS is 1. Defaults to 0.
+ *
+ * GR_GL_CHECK_ERROR: if enabled Gr can do a glGetError() after every GL call.
+ * Defaults to 1 if SK_DEBUG is set, otherwise 0. When GR_GL_CHECK_ERROR is 1
+ * this can be toggled in a debugger using the gCheckErrorGL global. The initial
+ * value of gCheckErrorGL is controlled by by GR_GL_CHECK_ERROR_START.
+ *
+ * GR_GL_CHECK_ERROR_START: controls the initial value of gCheckErrorGL
+ * when GR_GL_CHECK_ERROR is 1. Defaults to 1.
+ *
+ */
+
+#if !defined(GR_GL_LOG_CALLS)
+ #ifdef SK_DEBUG
+ #define GR_GL_LOG_CALLS 1
+ #else
+ #define GR_GL_LOG_CALLS 0
+ #endif
+#endif
+
+#if !defined(GR_GL_LOG_CALLS_START)
+ #define GR_GL_LOG_CALLS_START 0
+#endif
+
+#if !defined(GR_GL_CHECK_ERROR)
+ #ifdef SK_DEBUG
+ #define GR_GL_CHECK_ERROR 1
+ #else
+ #define GR_GL_CHECK_ERROR 0
+ #endif
+#endif
+
+#if !defined(GR_GL_CHECK_ERROR_START)
+ #define GR_GL_CHECK_ERROR_START 1
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLConfig_chrome.h b/gfx/skia/skia/include/gpu/gl/GrGLConfig_chrome.h
new file mode 100644
index 0000000000..40127d1704
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLConfig_chrome.h
@@ -0,0 +1,14 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef GrGLConfig_chrome_DEFINED
+#define GrGLConfig_chrome_DEFINED
+
+// glGetError() forces a sync with gpu process on chrome
+#define GR_GL_CHECK_ERROR_START 0
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLExtensions.h b/gfx/skia/skia/include/gpu/gl/GrGLExtensions.h
new file mode 100644
index 0000000000..dfa83e1962
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLExtensions.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLExtensions_DEFINED
+#define GrGLExtensions_DEFINED
+
+#include "include/core/SkString.h"
+#include "include/gpu/gl/GrGLFunctions.h"
+#include "include/private/base/SkTArray.h"
+
+#include <utility>
+
+struct GrGLInterface;
+class SkJSONWriter;
+
+/**
+ * This helper queries the current GL context for its extensions, remembers them, and can be
+ * queried. It supports both glGetString- and glGetStringi-style extension string APIs and will
+ * use the latter if it is available. It also will query for EGL extensions if a eglQueryString
+ * implementation is provided.
+ */
+class SK_API GrGLExtensions {
+public:
+ GrGLExtensions() {}
+
+ GrGLExtensions(const GrGLExtensions&);
+
+ GrGLExtensions& operator=(const GrGLExtensions&);
+
+ void swap(GrGLExtensions* that) {
+ using std::swap;
+ swap(fStrings, that->fStrings);
+ swap(fInitialized, that->fInitialized);
+ }
+
+ /**
+ * We sometimes need to use this class without having yet created a GrGLInterface. This version
+ * of init expects that getString is always non-NULL while getIntegerv and getStringi are non-
+ * NULL if on desktop GL with version 3.0 or higher. Otherwise it will fail.
+ */
+ bool init(GrGLStandard standard,
+ GrGLFunction<GrGLGetStringFn> getString,
+ GrGLFunction<GrGLGetStringiFn> getStringi,
+ GrGLFunction<GrGLGetIntegervFn> getIntegerv,
+ GrGLFunction<GrEGLQueryStringFn> queryString = nullptr,
+ GrEGLDisplay eglDisplay = nullptr);
+
+ bool isInitialized() const { return fInitialized; }
+
+ /**
+ * Queries whether an extension is present. This will fail if init() has not been called.
+ */
+ bool has(const char[]) const;
+
+ /**
+ * Removes an extension if present. Returns true if the extension was present before the call.
+ */
+ bool remove(const char[]);
+
+ /**
+ * Adds an extension to list
+ */
+ void add(const char[]);
+
+ void reset() { fStrings.clear(); }
+
+ void dumpJSON(SkJSONWriter*) const;
+
+private:
+ bool fInitialized = false;
+ SkTArray<SkString> fStrings;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLFunctions.h b/gfx/skia/skia/include/gpu/gl/GrGLFunctions.h
new file mode 100644
index 0000000000..4e488abcad
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLFunctions.h
@@ -0,0 +1,307 @@
+
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLFunctions_DEFINED
+#define GrGLFunctions_DEFINED
+
+#include <cstring>
+#include "include/gpu/gl/GrGLTypes.h"
+#include "include/private/base/SkTLogic.h"
+
+
+extern "C" {
+
+///////////////////////////////////////////////////////////////////////////////
+
+using GrGLActiveTextureFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum texture);
+using GrGLAttachShaderFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLuint shader);
+using GrGLBeginQueryFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLuint id);
+using GrGLBindAttribLocationFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLuint index, const char* name);
+using GrGLBindBufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLuint buffer);
+using GrGLBindFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLuint framebuffer);
+using GrGLBindRenderbufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLuint renderbuffer);
+using GrGLBindTextureFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLuint texture);
+using GrGLBindFragDataLocationFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLuint colorNumber, const GrGLchar* name);
+using GrGLBindFragDataLocationIndexedFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLuint colorNumber, GrGLuint index, const GrGLchar* name);
+using GrGLBindSamplerFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint unit, GrGLuint sampler);
+using GrGLBindVertexArrayFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint array);
+using GrGLBlendBarrierFn = GrGLvoid GR_GL_FUNCTION_TYPE();
+using GrGLBlendColorFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLclampf red, GrGLclampf green, GrGLclampf blue, GrGLclampf alpha);
+using GrGLBlendEquationFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode);
+using GrGLBlendFuncFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum sfactor, GrGLenum dfactor);
+using GrGLBlitFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint srcX0, GrGLint srcY0, GrGLint srcX1, GrGLint srcY1, GrGLint dstX0, GrGLint dstY0, GrGLint dstX1, GrGLint dstY1, GrGLbitfield mask, GrGLenum filter);
+using GrGLBufferDataFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizeiptr size, const GrGLvoid* data, GrGLenum usage);
+using GrGLBufferSubDataFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLintptr offset, GrGLsizeiptr size, const GrGLvoid* data);
+using GrGLCheckFramebufferStatusFn = GrGLenum GR_GL_FUNCTION_TYPE(GrGLenum target);
+using GrGLClearFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLbitfield mask);
+using GrGLClearColorFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLclampf red, GrGLclampf green, GrGLclampf blue, GrGLclampf alpha);
+using GrGLClearStencilFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint s);
+using GrGLClearTexImageFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint texture, GrGLint level, GrGLenum format, GrGLenum type, const GrGLvoid* data);
+using GrGLClearTexSubImageFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint texture, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint zoffset, GrGLsizei width, GrGLsizei height, GrGLsizei depth, GrGLenum format, GrGLenum type, const GrGLvoid* data);
+using GrGLColorMaskFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLboolean red, GrGLboolean green, GrGLboolean blue, GrGLboolean alpha);
+using GrGLCompileShaderFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint shader);
+using GrGLCompressedTexImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLenum internalformat, GrGLsizei width, GrGLsizei height, GrGLint border, GrGLsizei imageSize, const GrGLvoid* data);
+using GrGLCompressedTexSubImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLsizei imageSize, const GrGLvoid* data);
+using GrGLCopyBufferSubDataFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum readTargt, GrGLenum writeTarget, GrGLintptr readOffset, GrGLintptr writeOffset, GrGLsizeiptr size);
+using GrGLCopyTexSubImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height);
+using GrGLCreateProgramFn = GrGLuint GR_GL_FUNCTION_TYPE();
+using GrGLCreateShaderFn = GrGLuint GR_GL_FUNCTION_TYPE(GrGLenum type);
+using GrGLCullFaceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode);
+using GrGLDeleteBuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* buffers);
+using GrGLDeleteFencesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* fences);
+using GrGLDeleteFramebuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* framebuffers);
+using GrGLDeleteProgramFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program);
+using GrGLDeleteQueriesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* ids);
+using GrGLDeleteRenderbuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* renderbuffers);
+using GrGLDeleteSamplersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei count, const GrGLuint* samplers);
+using GrGLDeleteShaderFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint shader);
+using GrGLDeleteTexturesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* textures);
+using GrGLDeleteVertexArraysFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLuint* arrays);
+using GrGLDepthMaskFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLboolean flag);
+using GrGLDisableFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum cap);
+using GrGLDisableVertexAttribArrayFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint index);
+using GrGLDrawArraysFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLint first, GrGLsizei count);
+using GrGLDrawArraysInstancedFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLint first, GrGLsizei count, GrGLsizei primcount);
+using GrGLDrawArraysIndirectFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, const GrGLvoid* indirect);
+using GrGLDrawBufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode);
+using GrGLDrawBuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, const GrGLenum* bufs);
+using GrGLDrawElementsFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLsizei count, GrGLenum type, const GrGLvoid* indices);
+using GrGLDrawElementsInstancedFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLsizei count, GrGLenum type, const GrGLvoid* indices, GrGLsizei primcount);
+using GrGLDrawElementsIndirectFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLenum type, const GrGLvoid* indirect);
+using GrGLDrawRangeElementsFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLuint start, GrGLuint end, GrGLsizei count, GrGLenum type, const GrGLvoid* indices);
+using GrGLEnableFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum cap);
+using GrGLEnableVertexAttribArrayFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint index);
+using GrGLEndQueryFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target);
+using GrGLFinishFn = GrGLvoid GR_GL_FUNCTION_TYPE();
+using GrGLFinishFenceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint fence);
+using GrGLFlushFn = GrGLvoid GR_GL_FUNCTION_TYPE();
+using GrGLFlushMappedBufferRangeFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLintptr offset, GrGLsizeiptr length);
+using GrGLFramebufferRenderbufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum attachment, GrGLenum renderbuffertarget, GrGLuint renderbuffer);
+using GrGLFramebufferTexture2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum attachment, GrGLenum textarget, GrGLuint texture, GrGLint level);
+using GrGLFramebufferTexture2DMultisampleFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum attachment, GrGLenum textarget, GrGLuint texture, GrGLint level, GrGLsizei samples);
+using GrGLFrontFaceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode);
+using GrGLGenBuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* buffers);
+using GrGLGenFencesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* fences);
+using GrGLGenFramebuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* framebuffers);
+using GrGLGenerateMipmapFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target);
+using GrGLGenQueriesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* ids);
+using GrGLGenRenderbuffersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* renderbuffers);
+using GrGLGenSamplersFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei count, GrGLuint* samplers);
+using GrGLGenTexturesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* textures);
+using GrGLGenVertexArraysFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei n, GrGLuint* arrays);
+using GrGLGetBufferParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, GrGLint* params);
+using GrGLGetErrorFn = GrGLenum GR_GL_FUNCTION_TYPE();
+using GrGLGetFramebufferAttachmentParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum attachment, GrGLenum pname, GrGLint* params);
+using GrGLGetFloatvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum pname, GrGLfloat* params);
+using GrGLGetIntegervFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum pname, GrGLint* params);
+using GrGLGetMultisamplefvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum pname, GrGLuint index, GrGLfloat* val);
+using GrGLGetProgramBinaryFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLsizei bufsize, GrGLsizei* length, GrGLenum* binaryFormat, void* binary);
+using GrGLGetProgramInfoLogFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLsizei bufsize, GrGLsizei* length, char* infolog);
+using GrGLGetProgramivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLenum pname, GrGLint* params);
+using GrGLGetQueryivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum GLtarget, GrGLenum pname, GrGLint* params);
+using GrGLGetQueryObjecti64vFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint id, GrGLenum pname, GrGLint64* params);
+using GrGLGetQueryObjectivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint id, GrGLenum pname, GrGLint* params);
+using GrGLGetQueryObjectui64vFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint id, GrGLenum pname, GrGLuint64* params);
+using GrGLGetQueryObjectuivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint id, GrGLenum pname, GrGLuint* params);
+using GrGLGetRenderbufferParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, GrGLint* params);
+using GrGLGetShaderInfoLogFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint shader, GrGLsizei bufsize, GrGLsizei* length, char* infolog);
+using GrGLGetShaderivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint shader, GrGLenum pname, GrGLint* params);
+using GrGLGetShaderPrecisionFormatFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum shadertype, GrGLenum precisiontype, GrGLint* range, GrGLint* precision);
+using GrGLGetStringFn = const GrGLubyte* GR_GL_FUNCTION_TYPE(GrGLenum name);
+using GrGLGetStringiFn = const GrGLubyte* GR_GL_FUNCTION_TYPE(GrGLenum name, GrGLuint index);
+using GrGLGetTexLevelParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLenum pname, GrGLint* params);
+using GrGLGetUniformLocationFn = GrGLint GR_GL_FUNCTION_TYPE(GrGLuint program, const char* name);
+using GrGLInsertEventMarkerFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei length, const char* marker);
+using GrGLInvalidateBufferDataFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint buffer);
+using GrGLInvalidateBufferSubDataFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint buffer, GrGLintptr offset, GrGLsizeiptr length);
+using GrGLInvalidateFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizei numAttachments, const GrGLenum* attachments);
+using GrGLInvalidateSubFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizei numAttachments, const GrGLenum* attachments, GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height);
+using GrGLInvalidateTexImageFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint texture, GrGLint level);
+using GrGLInvalidateTexSubImageFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint texture, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLint zoffset, GrGLsizei width, GrGLsizei height, GrGLsizei depth);
+using GrGLIsTextureFn = GrGLboolean GR_GL_FUNCTION_TYPE(GrGLuint texture);
+using GrGLLineWidthFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLfloat width);
+using GrGLLinkProgramFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program);
+using GrGLMapBufferFn = GrGLvoid* GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum access);
+using GrGLMapBufferRangeFn = GrGLvoid* GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLintptr offset, GrGLsizeiptr length, GrGLbitfield access);
+using GrGLMapBufferSubDataFn = GrGLvoid* GR_GL_FUNCTION_TYPE(GrGLuint target, GrGLintptr offset, GrGLsizeiptr size, GrGLenum access);
+using GrGLMapTexSubImage2DFn = GrGLvoid* GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLenum type, GrGLenum access);
+using GrGLMemoryBarrierFn = GrGLvoid* GR_GL_FUNCTION_TYPE(GrGLbitfield barriers);
+using GrGLPatchParameteriFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum pname, GrGLint value);
+using GrGLPixelStoreiFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum pname, GrGLint param);
+using GrGLPolygonModeFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum face, GrGLenum mode);
+using GrGLPopGroupMarkerFn = GrGLvoid GR_GL_FUNCTION_TYPE();
+using GrGLProgramBinaryFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLenum binaryFormat, void* binary, GrGLsizei length);
+using GrGLProgramParameteriFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLenum pname, GrGLint value);
+using GrGLPushGroupMarkerFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsizei length, const char* marker);
+using GrGLQueryCounterFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint id, GrGLenum target);
+using GrGLReadBufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum src);
+using GrGLReadPixelsFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLenum type, GrGLvoid* pixels);
+using GrGLRenderbufferStorageFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum internalformat, GrGLsizei width, GrGLsizei height);
+using GrGLRenderbufferStorageMultisampleFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizei samples, GrGLenum internalformat, GrGLsizei width, GrGLsizei height);
+using GrGLResolveMultisampleFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE();
+using GrGLSamplerParameterfFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint sampler, GrGLenum pname, GrGLfloat param);
+using GrGLSamplerParameteriFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint sampler, GrGLenum pname, GrGLint param);
+using GrGLSamplerParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint sampler, GrGLenum pname, const GrGLint* params);
+using GrGLScissorFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height);
+// GL_CHROMIUM_bind_uniform_location
+using GrGLBindUniformLocationFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program, GrGLint location, const char* name);
+using GrGLSetFenceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint fence, GrGLenum condition);
+using GrGLShaderSourceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint shader, GrGLsizei count, const char* const* str, const GrGLint* length);
+using GrGLStencilFuncFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum func, GrGLint ref, GrGLuint mask);
+using GrGLStencilFuncSeparateFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum face, GrGLenum func, GrGLint ref, GrGLuint mask);
+using GrGLStencilMaskFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint mask);
+using GrGLStencilMaskSeparateFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum face, GrGLuint mask);
+using GrGLStencilOpFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum fail, GrGLenum zfail, GrGLenum zpass);
+using GrGLStencilOpSeparateFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum face, GrGLenum fail, GrGLenum zfail, GrGLenum zpass);
+using GrGLTexBufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum internalformat, GrGLuint buffer);
+using GrGLTexBufferRangeFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum internalformat, GrGLuint buffer, GrGLintptr offset, GrGLsizeiptr size);
+using GrGLTexImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLint internalformat, GrGLsizei width, GrGLsizei height, GrGLint border, GrGLenum format, GrGLenum type, const GrGLvoid* pixels);
+using GrGLTexParameterfFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, GrGLfloat param);
+using GrGLTexParameterfvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, const GrGLfloat* params);
+using GrGLTexParameteriFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, GrGLint param);
+using GrGLTexParameterivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum pname, const GrGLint* params);
+using GrGLTexStorage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizei levels, GrGLenum internalformat, GrGLsizei width, GrGLsizei height);
+using GrGLDiscardFramebufferFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLsizei numAttachments, const GrGLenum* attachments);
+using GrGLTestFenceFn = GrGLboolean GR_GL_FUNCTION_TYPE(GrGLuint fence);
+using GrGLTexSubImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLenum type, const GrGLvoid* pixels);
+using GrGLTextureBarrierFn = GrGLvoid GR_GL_FUNCTION_TYPE();
+using GrGLUniform1fFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLfloat v0);
+using GrGLUniform1iFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLint v0);
+using GrGLUniform1fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLfloat* v);
+using GrGLUniform1ivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLint* v);
+using GrGLUniform2fFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLfloat v0, GrGLfloat v1);
+using GrGLUniform2iFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLint v0, GrGLint v1);
+using GrGLUniform2fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLfloat* v);
+using GrGLUniform2ivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLint* v);
+using GrGLUniform3fFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLfloat v0, GrGLfloat v1, GrGLfloat v2);
+using GrGLUniform3iFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLint v0, GrGLint v1, GrGLint v2);
+using GrGLUniform3fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLfloat* v);
+using GrGLUniform3ivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLint* v);
+using GrGLUniform4fFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLfloat v0, GrGLfloat v1, GrGLfloat v2, GrGLfloat v3);
+using GrGLUniform4iFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLint v0, GrGLint v1, GrGLint v2, GrGLint v3);
+using GrGLUniform4fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLfloat* v);
+using GrGLUniform4ivFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, const GrGLint* v);
+using GrGLUniformMatrix2fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, GrGLboolean transpose, const GrGLfloat* value);
+using GrGLUniformMatrix3fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, GrGLboolean transpose, const GrGLfloat* value);
+using GrGLUniformMatrix4fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint location, GrGLsizei count, GrGLboolean transpose, const GrGLfloat* value);
+using GrGLUnmapBufferFn = GrGLboolean GR_GL_FUNCTION_TYPE(GrGLenum target);
+using GrGLUnmapBufferSubDataFn = GrGLvoid GR_GL_FUNCTION_TYPE(const GrGLvoid* mem);
+using GrGLUnmapTexSubImage2DFn = GrGLvoid GR_GL_FUNCTION_TYPE(const GrGLvoid* mem);
+using GrGLUseProgramFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint program);
+using GrGLVertexAttrib1fFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, const GrGLfloat value);
+using GrGLVertexAttrib2fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, const GrGLfloat* values);
+using GrGLVertexAttrib3fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, const GrGLfloat* values);
+using GrGLVertexAttrib4fvFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, const GrGLfloat* values);
+using GrGLVertexAttribDivisorFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint index, GrGLuint divisor);
+using GrGLVertexAttribIPointerFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, GrGLint size, GrGLenum type, GrGLsizei stride, const GrGLvoid* ptr);
+using GrGLVertexAttribPointerFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint indx, GrGLint size, GrGLenum type, GrGLboolean normalized, GrGLsizei stride, const GrGLvoid* ptr);
+using GrGLViewportFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLint x, GrGLint y, GrGLsizei width, GrGLsizei height);
+
+/* GL_NV_framebuffer_mixed_samples */
+using GrGLCoverageModulationFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum components);
+
+/* EXT_base_instance */
+using GrGLDrawArraysInstancedBaseInstanceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLint first, GrGLsizei count, GrGLsizei instancecount, GrGLuint baseinstance);
+using GrGLDrawElementsInstancedBaseVertexBaseInstanceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLsizei count, GrGLenum type, const void *indices, GrGLsizei instancecount, GrGLint basevertex, GrGLuint baseinstance);
+
+/* EXT_multi_draw_indirect */
+using GrGLMultiDrawArraysIndirectFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, const GrGLvoid* indirect, GrGLsizei drawcount, GrGLsizei stride);
+using GrGLMultiDrawElementsIndirectFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLenum type, const GrGLvoid* indirect, GrGLsizei drawcount, GrGLsizei stride);
+
+/* ANGLE_base_vertex_base_instance */
+using GrGLMultiDrawArraysInstancedBaseInstanceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, const GrGLint* firsts, const GrGLsizei* counts, const GrGLsizei* instanceCounts, const GrGLuint* baseInstances, const GrGLsizei drawcount);
+using GrGLMultiDrawElementsInstancedBaseVertexBaseInstanceFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, const GrGLint* counts, GrGLenum type, const GrGLvoid* const* indices, const GrGLsizei* instanceCounts, const GrGLint* baseVertices, const GrGLuint* baseInstances, const GrGLsizei drawcount);
+
+/* ARB_sync */
+using GrGLFenceSyncFn = GrGLsync GR_GL_FUNCTION_TYPE(GrGLenum condition, GrGLbitfield flags);
+using GrGLIsSyncFn = GrGLboolean GR_GL_FUNCTION_TYPE(GrGLsync sync);
+using GrGLClientWaitSyncFn = GrGLenum GR_GL_FUNCTION_TYPE(GrGLsync sync, GrGLbitfield flags, GrGLuint64 timeout);
+using GrGLWaitSyncFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsync sync, GrGLbitfield flags, GrGLuint64 timeout);
+using GrGLDeleteSyncFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLsync sync);
+
+/* ARB_internalformat_query */
+using GrGLGetInternalformativFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum target, GrGLenum internalformat, GrGLenum pname, GrGLsizei bufSize, GrGLint* params);
+
+/* KHR_debug */
+using GrGLDebugMessageControlFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum source, GrGLenum type, GrGLenum severity, GrGLsizei count, const GrGLuint* ids, GrGLboolean enabled);
+using GrGLDebugMessageInsertFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum source, GrGLenum type, GrGLuint id, GrGLenum severity, GrGLsizei length, const GrGLchar* buf);
+using GrGLDebugMessageCallbackFn = GrGLvoid GR_GL_FUNCTION_TYPE(GRGLDEBUGPROC callback, const GrGLvoid* userParam);
+using GrGLGetDebugMessageLogFn = GrGLuint GR_GL_FUNCTION_TYPE(GrGLuint count, GrGLsizei bufSize, GrGLenum* sources, GrGLenum* types, GrGLuint* ids, GrGLenum* severities, GrGLsizei* lengths, GrGLchar* messageLog);
+using GrGLPushDebugGroupFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum source, GrGLuint id, GrGLsizei length, const GrGLchar* message);
+using GrGLPopDebugGroupFn = GrGLvoid GR_GL_FUNCTION_TYPE();
+using GrGLObjectLabelFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum identifier, GrGLuint name, GrGLsizei length, const GrGLchar* label);
+
+/** EXT_window_rectangles */
+using GrGLWindowRectanglesFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLenum mode, GrGLsizei count, const GrGLint box[]);
+
+/** GL_QCOM_tiled_rendering */
+using GrGLStartTilingFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLuint x, GrGLuint y, GrGLuint width, GrGLuint height, GrGLbitfield preserveMask);
+using GrGLEndTilingFn = GrGLvoid GR_GL_FUNCTION_TYPE(GrGLbitfield preserveMask);
+
+/** EGL functions */
+using GrEGLQueryStringFn = const char* GR_GL_FUNCTION_TYPE(GrEGLDisplay dpy, GrEGLint name);
+using GrEGLGetCurrentDisplayFn = GrEGLDisplay GR_GL_FUNCTION_TYPE();
+using GrEGLCreateImageFn = GrEGLImage GR_GL_FUNCTION_TYPE(GrEGLDisplay dpy, GrEGLContext ctx, GrEGLenum target, GrEGLClientBuffer buffer, const GrEGLint* attrib_list);
+using GrEGLDestroyImageFn = GrEGLBoolean GR_GL_FUNCTION_TYPE(GrEGLDisplay dpy, GrEGLImage image);
+} // extern "C"
+
+// This is a lighter-weight std::function, trying to reduce code size and compile time
+// by only supporting the exact use cases we require.
+template <typename T> class GrGLFunction;
+
+template <typename R, typename... Args>
+class GrGLFunction<R GR_GL_FUNCTION_TYPE(Args...)> {
+public:
+ using Fn = R GR_GL_FUNCTION_TYPE(Args...);
+ // Construct empty.
+ GrGLFunction() = default;
+ GrGLFunction(std::nullptr_t) {}
+
+ // Construct from a simple function pointer.
+ GrGLFunction(Fn* fn_ptr) {
+ static_assert(sizeof(fn_ptr) <= sizeof(fBuf), "fBuf is too small");
+ if (fn_ptr) {
+ memcpy(fBuf, &fn_ptr, sizeof(fn_ptr));
+ fCall = [](const void* buf, Args... args) {
+ return (*(Fn**)buf)(std::forward<Args>(args)...);
+ };
+ }
+ }
+
+ // Construct from a small closure.
+ template <typename Closure>
+ GrGLFunction(Closure closure) : GrGLFunction() {
+ static_assert(sizeof(Closure) <= sizeof(fBuf), "fBuf is too small");
+#if defined(__APPLE__) // I am having serious trouble getting these to work with all STLs...
+ static_assert(std::is_trivially_copyable<Closure>::value, "");
+ static_assert(std::is_trivially_destructible<Closure>::value, "");
+#endif
+
+ memcpy(fBuf, &closure, sizeof(closure));
+ fCall = [](const void* buf, Args... args) {
+ auto closure = (const Closure*)buf;
+ return (*closure)(args...);
+ };
+ }
+
+ R operator()(Args... args) const {
+ SkASSERT(fCall);
+ return fCall(fBuf, std::forward<Args>(args)...);
+ }
+
+ explicit operator bool() const { return fCall != nullptr; }
+
+ void reset() { fCall = nullptr; }
+
+private:
+ using Call = R(const void* buf, Args...);
+ Call* fCall = nullptr;
+ size_t fBuf[4];
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLInterface.h b/gfx/skia/skia/include/gpu/gl/GrGLInterface.h
new file mode 100644
index 0000000000..64ca419b9b
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLInterface.h
@@ -0,0 +1,340 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLInterface_DEFINED
+#define GrGLInterface_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/gl/GrGLExtensions.h"
+#include "include/gpu/gl/GrGLFunctions.h"
+
+////////////////////////////////////////////////////////////////////////////////
+
+typedef void(*GrGLFuncPtr)();
+struct GrGLInterface;
+
+
+/**
+ * Rather than depend on platform-specific GL headers and libraries, we require
+ * the client to provide a struct of GL function pointers. This struct can be
+ * specified per-GrContext as a parameter to GrContext::MakeGL. If no interface is
+ * passed to MakeGL then a default GL interface is created using GrGLMakeNativeInterface().
+ * If this returns nullptr then GrContext::MakeGL() will fail.
+ *
+ * The implementation of GrGLMakeNativeInterface is platform-specific. Several
+ * implementations have been provided (for GLX, WGL, EGL, etc), along with an
+ * implementation that simply returns nullptr. Clients should select the most
+ * appropriate one to build.
+ */
+SK_API sk_sp<const GrGLInterface> GrGLMakeNativeInterface();
+
+/**
+ * GrContext uses the following interface to make all calls into OpenGL. When a
+ * GrContext is created it is given a GrGLInterface. The interface's function
+ * pointers must be valid for the OpenGL context associated with the GrContext.
+ * On some platforms, such as Windows, function pointers for OpenGL extensions
+ * may vary between OpenGL contexts. So the caller must be careful to use a
+ * GrGLInterface initialized for the correct context. All functions that should
+ * be available based on the OpenGL's version and extension string must be
+ * non-NULL or GrContext creation will fail. This can be tested with the
+ * validate() method when the OpenGL context has been made current.
+ */
+struct SK_API GrGLInterface : public SkRefCnt {
+private:
+ using INHERITED = SkRefCnt;
+
+#if GR_GL_CHECK_ERROR
+ // This is here to avoid having our debug code that checks for a GL error after most GL calls
+ // accidentally swallow an OOM that should be reported.
+ mutable bool fOOMed = false;
+ bool fSuppressErrorLogging = false;
+#endif
+
+public:
+ GrGLInterface();
+
+ // Validates that the GrGLInterface supports its advertised standard. This means the necessary
+ // function pointers have been initialized for both the GL version and any advertised
+ // extensions.
+ bool validate() const;
+
+#if GR_GL_CHECK_ERROR
+ GrGLenum checkError(const char* location, const char* call) const;
+ bool checkAndResetOOMed() const;
+ void suppressErrorLogging();
+#endif
+
+#if GR_TEST_UTILS
+ GrGLInterface(const GrGLInterface& that)
+ : fStandard(that.fStandard)
+ , fExtensions(that.fExtensions)
+ , fFunctions(that.fFunctions) {}
+#endif
+
+ // Indicates the type of GL implementation
+ union {
+ GrGLStandard fStandard;
+ GrGLStandard fBindingsExported; // Legacy name, will be remove when Chromium is updated.
+ };
+
+ GrGLExtensions fExtensions;
+
+ bool hasExtension(const char ext[]) const { return fExtensions.has(ext); }
+
+ /**
+ * The function pointers are in a struct so that we can have a compiler generated assignment
+ * operator.
+ */
+ struct Functions {
+ GrGLFunction<GrGLActiveTextureFn> fActiveTexture;
+ GrGLFunction<GrGLAttachShaderFn> fAttachShader;
+ GrGLFunction<GrGLBeginQueryFn> fBeginQuery;
+ GrGLFunction<GrGLBindAttribLocationFn> fBindAttribLocation;
+ GrGLFunction<GrGLBindBufferFn> fBindBuffer;
+ GrGLFunction<GrGLBindFragDataLocationFn> fBindFragDataLocation;
+ GrGLFunction<GrGLBindFragDataLocationIndexedFn> fBindFragDataLocationIndexed;
+ GrGLFunction<GrGLBindFramebufferFn> fBindFramebuffer;
+ GrGLFunction<GrGLBindRenderbufferFn> fBindRenderbuffer;
+ GrGLFunction<GrGLBindSamplerFn> fBindSampler;
+ GrGLFunction<GrGLBindTextureFn> fBindTexture;
+ GrGLFunction<GrGLBindVertexArrayFn> fBindVertexArray;
+ GrGLFunction<GrGLBlendBarrierFn> fBlendBarrier;
+ GrGLFunction<GrGLBlendColorFn> fBlendColor;
+ GrGLFunction<GrGLBlendEquationFn> fBlendEquation;
+ GrGLFunction<GrGLBlendFuncFn> fBlendFunc;
+ GrGLFunction<GrGLBlitFramebufferFn> fBlitFramebuffer;
+ GrGLFunction<GrGLBufferDataFn> fBufferData;
+ GrGLFunction<GrGLBufferSubDataFn> fBufferSubData;
+ GrGLFunction<GrGLCheckFramebufferStatusFn> fCheckFramebufferStatus;
+ GrGLFunction<GrGLClearFn> fClear;
+ GrGLFunction<GrGLClearColorFn> fClearColor;
+ GrGLFunction<GrGLClearStencilFn> fClearStencil;
+ GrGLFunction<GrGLClearTexImageFn> fClearTexImage;
+ GrGLFunction<GrGLClearTexSubImageFn> fClearTexSubImage;
+ GrGLFunction<GrGLColorMaskFn> fColorMask;
+ GrGLFunction<GrGLCompileShaderFn> fCompileShader;
+ GrGLFunction<GrGLCompressedTexImage2DFn> fCompressedTexImage2D;
+ GrGLFunction<GrGLCompressedTexSubImage2DFn> fCompressedTexSubImage2D;
+ GrGLFunction<GrGLCopyBufferSubDataFn> fCopyBufferSubData;
+ GrGLFunction<GrGLCopyTexSubImage2DFn> fCopyTexSubImage2D;
+ GrGLFunction<GrGLCreateProgramFn> fCreateProgram;
+ GrGLFunction<GrGLCreateShaderFn> fCreateShader;
+ GrGLFunction<GrGLCullFaceFn> fCullFace;
+ GrGLFunction<GrGLDeleteBuffersFn> fDeleteBuffers;
+ GrGLFunction<GrGLDeleteFencesFn> fDeleteFences;
+ GrGLFunction<GrGLDeleteFramebuffersFn> fDeleteFramebuffers;
+ GrGLFunction<GrGLDeleteProgramFn> fDeleteProgram;
+ GrGLFunction<GrGLDeleteQueriesFn> fDeleteQueries;
+ GrGLFunction<GrGLDeleteRenderbuffersFn> fDeleteRenderbuffers;
+ GrGLFunction<GrGLDeleteSamplersFn> fDeleteSamplers;
+ GrGLFunction<GrGLDeleteShaderFn> fDeleteShader;
+ GrGLFunction<GrGLDeleteTexturesFn> fDeleteTextures;
+ GrGLFunction<GrGLDeleteVertexArraysFn> fDeleteVertexArrays;
+ GrGLFunction<GrGLDepthMaskFn> fDepthMask;
+ GrGLFunction<GrGLDisableFn> fDisable;
+ GrGLFunction<GrGLDisableVertexAttribArrayFn> fDisableVertexAttribArray;
+ GrGLFunction<GrGLDrawArraysFn> fDrawArrays;
+ GrGLFunction<GrGLDrawArraysIndirectFn> fDrawArraysIndirect;
+ GrGLFunction<GrGLDrawArraysInstancedFn> fDrawArraysInstanced;
+ GrGLFunction<GrGLDrawBufferFn> fDrawBuffer;
+ GrGLFunction<GrGLDrawBuffersFn> fDrawBuffers;
+ GrGLFunction<GrGLDrawElementsFn> fDrawElements;
+ GrGLFunction<GrGLDrawElementsIndirectFn> fDrawElementsIndirect;
+ GrGLFunction<GrGLDrawElementsInstancedFn> fDrawElementsInstanced;
+ GrGLFunction<GrGLDrawRangeElementsFn> fDrawRangeElements;
+ GrGLFunction<GrGLEnableFn> fEnable;
+ GrGLFunction<GrGLEnableVertexAttribArrayFn> fEnableVertexAttribArray;
+ GrGLFunction<GrGLEndQueryFn> fEndQuery;
+ GrGLFunction<GrGLFinishFn> fFinish;
+ GrGLFunction<GrGLFinishFenceFn> fFinishFence;
+ GrGLFunction<GrGLFlushFn> fFlush;
+ GrGLFunction<GrGLFlushMappedBufferRangeFn> fFlushMappedBufferRange;
+ GrGLFunction<GrGLFramebufferRenderbufferFn> fFramebufferRenderbuffer;
+ GrGLFunction<GrGLFramebufferTexture2DFn> fFramebufferTexture2D;
+ GrGLFunction<GrGLFramebufferTexture2DMultisampleFn> fFramebufferTexture2DMultisample;
+ GrGLFunction<GrGLFrontFaceFn> fFrontFace;
+ GrGLFunction<GrGLGenBuffersFn> fGenBuffers;
+ GrGLFunction<GrGLGenFencesFn> fGenFences;
+ GrGLFunction<GrGLGenFramebuffersFn> fGenFramebuffers;
+ GrGLFunction<GrGLGenerateMipmapFn> fGenerateMipmap;
+ GrGLFunction<GrGLGenQueriesFn> fGenQueries;
+ GrGLFunction<GrGLGenRenderbuffersFn> fGenRenderbuffers;
+ GrGLFunction<GrGLGenSamplersFn> fGenSamplers;
+ GrGLFunction<GrGLGenTexturesFn> fGenTextures;
+ GrGLFunction<GrGLGenVertexArraysFn> fGenVertexArrays;
+ GrGLFunction<GrGLGetBufferParameterivFn> fGetBufferParameteriv;
+ GrGLFunction<GrGLGetErrorFn> fGetError;
+ GrGLFunction<GrGLGetFramebufferAttachmentParameterivFn> fGetFramebufferAttachmentParameteriv;
+ GrGLFunction<GrGLGetFloatvFn> fGetFloatv;
+ GrGLFunction<GrGLGetIntegervFn> fGetIntegerv;
+ GrGLFunction<GrGLGetMultisamplefvFn> fGetMultisamplefv;
+ GrGLFunction<GrGLGetProgramBinaryFn> fGetProgramBinary;
+ GrGLFunction<GrGLGetProgramInfoLogFn> fGetProgramInfoLog;
+ GrGLFunction<GrGLGetProgramivFn> fGetProgramiv;
+ GrGLFunction<GrGLGetQueryObjecti64vFn> fGetQueryObjecti64v;
+ GrGLFunction<GrGLGetQueryObjectivFn> fGetQueryObjectiv;
+ GrGLFunction<GrGLGetQueryObjectui64vFn> fGetQueryObjectui64v;
+ GrGLFunction<GrGLGetQueryObjectuivFn> fGetQueryObjectuiv;
+ GrGLFunction<GrGLGetQueryivFn> fGetQueryiv;
+ GrGLFunction<GrGLGetRenderbufferParameterivFn> fGetRenderbufferParameteriv;
+ GrGLFunction<GrGLGetShaderInfoLogFn> fGetShaderInfoLog;
+ GrGLFunction<GrGLGetShaderivFn> fGetShaderiv;
+ GrGLFunction<GrGLGetShaderPrecisionFormatFn> fGetShaderPrecisionFormat;
+ GrGLFunction<GrGLGetStringFn> fGetString;
+ GrGLFunction<GrGLGetStringiFn> fGetStringi;
+ GrGLFunction<GrGLGetTexLevelParameterivFn> fGetTexLevelParameteriv;
+ GrGLFunction<GrGLGetUniformLocationFn> fGetUniformLocation;
+ GrGLFunction<GrGLInsertEventMarkerFn> fInsertEventMarker;
+ GrGLFunction<GrGLInvalidateBufferDataFn> fInvalidateBufferData;
+ GrGLFunction<GrGLInvalidateBufferSubDataFn> fInvalidateBufferSubData;
+ GrGLFunction<GrGLInvalidateFramebufferFn> fInvalidateFramebuffer;
+ GrGLFunction<GrGLInvalidateSubFramebufferFn> fInvalidateSubFramebuffer;
+ GrGLFunction<GrGLInvalidateTexImageFn> fInvalidateTexImage;
+ GrGLFunction<GrGLInvalidateTexSubImageFn> fInvalidateTexSubImage;
+ GrGLFunction<GrGLIsTextureFn> fIsTexture;
+ GrGLFunction<GrGLLineWidthFn> fLineWidth;
+ GrGLFunction<GrGLLinkProgramFn> fLinkProgram;
+ GrGLFunction<GrGLProgramBinaryFn> fProgramBinary;
+ GrGLFunction<GrGLProgramParameteriFn> fProgramParameteri;
+ GrGLFunction<GrGLMapBufferFn> fMapBuffer;
+ GrGLFunction<GrGLMapBufferRangeFn> fMapBufferRange;
+ GrGLFunction<GrGLMapBufferSubDataFn> fMapBufferSubData;
+ GrGLFunction<GrGLMapTexSubImage2DFn> fMapTexSubImage2D;
+ GrGLFunction<GrGLMemoryBarrierFn> fMemoryBarrier;
+ GrGLFunction<GrGLDrawArraysInstancedBaseInstanceFn> fDrawArraysInstancedBaseInstance;
+ GrGLFunction<GrGLDrawElementsInstancedBaseVertexBaseInstanceFn> fDrawElementsInstancedBaseVertexBaseInstance;
+ GrGLFunction<GrGLMultiDrawArraysIndirectFn> fMultiDrawArraysIndirect;
+ GrGLFunction<GrGLMultiDrawElementsIndirectFn> fMultiDrawElementsIndirect;
+ GrGLFunction<GrGLMultiDrawArraysInstancedBaseInstanceFn> fMultiDrawArraysInstancedBaseInstance;
+ GrGLFunction<GrGLMultiDrawElementsInstancedBaseVertexBaseInstanceFn> fMultiDrawElementsInstancedBaseVertexBaseInstance;
+ GrGLFunction<GrGLPatchParameteriFn> fPatchParameteri;
+ GrGLFunction<GrGLPixelStoreiFn> fPixelStorei;
+ GrGLFunction<GrGLPolygonModeFn> fPolygonMode;
+ GrGLFunction<GrGLPopGroupMarkerFn> fPopGroupMarker;
+ GrGLFunction<GrGLPushGroupMarkerFn> fPushGroupMarker;
+ GrGLFunction<GrGLQueryCounterFn> fQueryCounter;
+ GrGLFunction<GrGLReadBufferFn> fReadBuffer;
+ GrGLFunction<GrGLReadPixelsFn> fReadPixels;
+ GrGLFunction<GrGLRenderbufferStorageFn> fRenderbufferStorage;
+
+ // On OpenGL ES there are multiple incompatible extensions that add support for MSAA
+ // and ES3 adds MSAA support to the standard. On an ES3 driver we may still use the
+ // older extensions for performance reasons or due to ES3 driver bugs. We want the function
+ // that creates the GrGLInterface to provide all available functions and internally
+ // we will select among them. They all have a method called glRenderbufferStorageMultisample*.
+ // So we have separate function pointers for GL_IMG/EXT_multisampled_to_texture,
+ // GL_CHROMIUM/ANGLE_framebuffer_multisample/ES3, and GL_APPLE_framebuffer_multisample
+ // variations.
+ //
+ // If a driver supports multiple GL_ARB_framebuffer_multisample-style extensions then we will
+ // assume the function pointers for the standard (or equivalent GL_ARB) version have
+ // been preferred over GL_EXT, GL_CHROMIUM, or GL_ANGLE variations that have reduced
+ // functionality.
+
+ // GL_EXT_multisampled_render_to_texture (preferred) or GL_IMG_multisampled_render_to_texture
+ GrGLFunction<GrGLRenderbufferStorageMultisampleFn> fRenderbufferStorageMultisampleES2EXT;
+ // GL_APPLE_framebuffer_multisample
+ GrGLFunction<GrGLRenderbufferStorageMultisampleFn> fRenderbufferStorageMultisampleES2APPLE;
+
+ // This is used to store the pointer for GL_ARB/EXT/ANGLE/CHROMIUM_framebuffer_multisample or
+ // the standard function in ES3+ or GL 3.0+.
+ GrGLFunction<GrGLRenderbufferStorageMultisampleFn> fRenderbufferStorageMultisample;
+
+ // Pointer to BindUniformLocationCHROMIUM from the GL_CHROMIUM_bind_uniform_location extension.
+ GrGLFunction<GrGLBindUniformLocationFn> fBindUniformLocation;
+
+ GrGLFunction<GrGLResolveMultisampleFramebufferFn> fResolveMultisampleFramebuffer;
+ GrGLFunction<GrGLSamplerParameterfFn> fSamplerParameterf;
+ GrGLFunction<GrGLSamplerParameteriFn> fSamplerParameteri;
+ GrGLFunction<GrGLSamplerParameterivFn> fSamplerParameteriv;
+ GrGLFunction<GrGLScissorFn> fScissor;
+ GrGLFunction<GrGLSetFenceFn> fSetFence;
+ GrGLFunction<GrGLShaderSourceFn> fShaderSource;
+ GrGLFunction<GrGLStencilFuncFn> fStencilFunc;
+ GrGLFunction<GrGLStencilFuncSeparateFn> fStencilFuncSeparate;
+ GrGLFunction<GrGLStencilMaskFn> fStencilMask;
+ GrGLFunction<GrGLStencilMaskSeparateFn> fStencilMaskSeparate;
+ GrGLFunction<GrGLStencilOpFn> fStencilOp;
+ GrGLFunction<GrGLStencilOpSeparateFn> fStencilOpSeparate;
+ GrGLFunction<GrGLTestFenceFn> fTestFence;
+ GrGLFunction<GrGLTexBufferFn> fTexBuffer;
+ GrGLFunction<GrGLTexBufferRangeFn> fTexBufferRange;
+ GrGLFunction<GrGLTexImage2DFn> fTexImage2D;
+ GrGLFunction<GrGLTexParameterfFn> fTexParameterf;
+ GrGLFunction<GrGLTexParameterfvFn> fTexParameterfv;
+ GrGLFunction<GrGLTexParameteriFn> fTexParameteri;
+ GrGLFunction<GrGLTexParameterivFn> fTexParameteriv;
+ GrGLFunction<GrGLTexSubImage2DFn> fTexSubImage2D;
+ GrGLFunction<GrGLTexStorage2DFn> fTexStorage2D;
+ GrGLFunction<GrGLTextureBarrierFn> fTextureBarrier;
+ GrGLFunction<GrGLDiscardFramebufferFn> fDiscardFramebuffer;
+ GrGLFunction<GrGLUniform1fFn> fUniform1f;
+ GrGLFunction<GrGLUniform1iFn> fUniform1i;
+ GrGLFunction<GrGLUniform1fvFn> fUniform1fv;
+ GrGLFunction<GrGLUniform1ivFn> fUniform1iv;
+ GrGLFunction<GrGLUniform2fFn> fUniform2f;
+ GrGLFunction<GrGLUniform2iFn> fUniform2i;
+ GrGLFunction<GrGLUniform2fvFn> fUniform2fv;
+ GrGLFunction<GrGLUniform2ivFn> fUniform2iv;
+ GrGLFunction<GrGLUniform3fFn> fUniform3f;
+ GrGLFunction<GrGLUniform3iFn> fUniform3i;
+ GrGLFunction<GrGLUniform3fvFn> fUniform3fv;
+ GrGLFunction<GrGLUniform3ivFn> fUniform3iv;
+ GrGLFunction<GrGLUniform4fFn> fUniform4f;
+ GrGLFunction<GrGLUniform4iFn> fUniform4i;
+ GrGLFunction<GrGLUniform4fvFn> fUniform4fv;
+ GrGLFunction<GrGLUniform4ivFn> fUniform4iv;
+ GrGLFunction<GrGLUniformMatrix2fvFn> fUniformMatrix2fv;
+ GrGLFunction<GrGLUniformMatrix3fvFn> fUniformMatrix3fv;
+ GrGLFunction<GrGLUniformMatrix4fvFn> fUniformMatrix4fv;
+ GrGLFunction<GrGLUnmapBufferFn> fUnmapBuffer;
+ GrGLFunction<GrGLUnmapBufferSubDataFn> fUnmapBufferSubData;
+ GrGLFunction<GrGLUnmapTexSubImage2DFn> fUnmapTexSubImage2D;
+ GrGLFunction<GrGLUseProgramFn> fUseProgram;
+ GrGLFunction<GrGLVertexAttrib1fFn> fVertexAttrib1f;
+ GrGLFunction<GrGLVertexAttrib2fvFn> fVertexAttrib2fv;
+ GrGLFunction<GrGLVertexAttrib3fvFn> fVertexAttrib3fv;
+ GrGLFunction<GrGLVertexAttrib4fvFn> fVertexAttrib4fv;
+ GrGLFunction<GrGLVertexAttribDivisorFn> fVertexAttribDivisor;
+ GrGLFunction<GrGLVertexAttribIPointerFn> fVertexAttribIPointer;
+ GrGLFunction<GrGLVertexAttribPointerFn> fVertexAttribPointer;
+ GrGLFunction<GrGLViewportFn> fViewport;
+
+ /* ARB_sync */
+ GrGLFunction<GrGLFenceSyncFn> fFenceSync;
+ GrGLFunction<GrGLIsSyncFn> fIsSync;
+ GrGLFunction<GrGLClientWaitSyncFn> fClientWaitSync;
+ GrGLFunction<GrGLWaitSyncFn> fWaitSync;
+ GrGLFunction<GrGLDeleteSyncFn> fDeleteSync;
+
+ /* ARB_internalforamt_query */
+ GrGLFunction<GrGLGetInternalformativFn> fGetInternalformativ;
+
+ /* KHR_debug */
+ GrGLFunction<GrGLDebugMessageControlFn> fDebugMessageControl;
+ GrGLFunction<GrGLDebugMessageInsertFn> fDebugMessageInsert;
+ GrGLFunction<GrGLDebugMessageCallbackFn> fDebugMessageCallback;
+ GrGLFunction<GrGLGetDebugMessageLogFn> fGetDebugMessageLog;
+ GrGLFunction<GrGLPushDebugGroupFn> fPushDebugGroup;
+ GrGLFunction<GrGLPopDebugGroupFn> fPopDebugGroup;
+ GrGLFunction<GrGLObjectLabelFn> fObjectLabel;
+
+ /* EXT_window_rectangles */
+ GrGLFunction<GrGLWindowRectanglesFn> fWindowRectangles;
+
+ /* GL_QCOM_tiled_rendering */
+ GrGLFunction<GrGLStartTilingFn> fStartTiling;
+ GrGLFunction<GrGLEndTilingFn> fEndTiling;
+ } fFunctions;
+
+#if GR_TEST_UTILS
+ // This exists for internal testing.
+ virtual void abandon() const;
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/GrGLTypes.h b/gfx/skia/skia/include/gpu/gl/GrGLTypes.h
new file mode 100644
index 0000000000..3af4802eaa
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/GrGLTypes.h
@@ -0,0 +1,208 @@
+
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLTypes_DEFINED
+#define GrGLTypes_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/gl/GrGLConfig.h"
+
+/**
+ * Classifies GL contexts by which standard they implement (currently as OpenGL vs. OpenGL ES).
+ */
+enum GrGLStandard {
+ kNone_GrGLStandard,
+ kGL_GrGLStandard,
+ kGLES_GrGLStandard,
+ kWebGL_GrGLStandard,
+};
+static const int kGrGLStandardCnt = 4;
+
+// The following allow certain interfaces to be turned off at compile time
+// (for example, to lower code size).
+#if SK_ASSUME_GL_ES
+ #define GR_IS_GR_GL(standard) false
+ #define GR_IS_GR_GL_ES(standard) true
+ #define GR_IS_GR_WEBGL(standard) false
+ #define SK_DISABLE_GL_INTERFACE 1
+ #define SK_DISABLE_WEBGL_INTERFACE 1
+#elif SK_ASSUME_GL
+ #define GR_IS_GR_GL(standard) true
+ #define GR_IS_GR_GL_ES(standard) false
+ #define GR_IS_GR_WEBGL(standard) false
+ #define SK_DISABLE_GL_ES_INTERFACE 1
+ #define SK_DISABLE_WEBGL_INTERFACE 1
+#elif SK_ASSUME_WEBGL
+ #define GR_IS_GR_GL(standard) false
+ #define GR_IS_GR_GL_ES(standard) false
+ #define GR_IS_GR_WEBGL(standard) true
+ #define SK_DISABLE_GL_ES_INTERFACE 1
+ #define SK_DISABLE_GL_INTERFACE 1
+#else
+ #define GR_IS_GR_GL(standard) (kGL_GrGLStandard == standard)
+ #define GR_IS_GR_GL_ES(standard) (kGLES_GrGLStandard == standard)
+ #define GR_IS_GR_WEBGL(standard) (kWebGL_GrGLStandard == standard)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * The supported GL formats represented as an enum. Actual support by GrContext depends on GL
+ * context version and extensions.
+ */
+enum class GrGLFormat {
+ kUnknown,
+
+ kRGBA8,
+ kR8,
+ kALPHA8,
+ kLUMINANCE8,
+ kLUMINANCE8_ALPHA8,
+ kBGRA8,
+ kRGB565,
+ kRGBA16F,
+ kR16F,
+ kRGB8,
+ kRGBX8,
+ kRG8,
+ kRGB10_A2,
+ kRGBA4,
+ kSRGB8_ALPHA8,
+ kCOMPRESSED_ETC1_RGB8,
+ kCOMPRESSED_RGB8_ETC2,
+ kCOMPRESSED_RGB8_BC1,
+ kCOMPRESSED_RGBA8_BC1,
+ kR16,
+ kRG16,
+ kRGBA16,
+ kRG16F,
+ kLUMINANCE16F,
+
+ kLastColorFormat = kLUMINANCE16F,
+
+ // Depth/Stencil formats
+ kSTENCIL_INDEX8,
+ kSTENCIL_INDEX16,
+ kDEPTH24_STENCIL8,
+
+ kLast = kDEPTH24_STENCIL8
+};
+
+///////////////////////////////////////////////////////////////////////////////
+/**
+ * Declares typedefs for all the GL functions used in GrGLInterface
+ */
+
+typedef unsigned int GrGLenum;
+typedef unsigned char GrGLboolean;
+typedef unsigned int GrGLbitfield;
+typedef signed char GrGLbyte;
+typedef char GrGLchar;
+typedef short GrGLshort;
+typedef int GrGLint;
+typedef int GrGLsizei;
+typedef int64_t GrGLint64;
+typedef unsigned char GrGLubyte;
+typedef unsigned short GrGLushort;
+typedef unsigned int GrGLuint;
+typedef uint64_t GrGLuint64;
+typedef unsigned short int GrGLhalf;
+typedef float GrGLfloat;
+typedef float GrGLclampf;
+typedef double GrGLdouble;
+typedef double GrGLclampd;
+typedef void GrGLvoid;
+#ifdef _WIN64
+typedef signed long long int GrGLintptr;
+typedef signed long long int GrGLsizeiptr;
+#else
+typedef signed long int GrGLintptr;
+typedef signed long int GrGLsizeiptr;
+#endif
+typedef void* GrGLeglImage;
+typedef struct __GLsync* GrGLsync;
+
+struct GrGLDrawArraysIndirectCommand {
+ GrGLuint fCount;
+ GrGLuint fInstanceCount;
+ GrGLuint fFirst;
+ GrGLuint fBaseInstance; // Requires EXT_base_instance on ES.
+};
+
+// static_asserts must have messages in this file because its included in C++14 client code.
+static_assert(16 == sizeof(GrGLDrawArraysIndirectCommand), "");
+
+struct GrGLDrawElementsIndirectCommand {
+ GrGLuint fCount;
+ GrGLuint fInstanceCount;
+ GrGLuint fFirstIndex;
+ GrGLuint fBaseVertex;
+ GrGLuint fBaseInstance; // Requires EXT_base_instance on ES.
+};
+
+static_assert(20 == sizeof(GrGLDrawElementsIndirectCommand), "");
+
+/**
+ * KHR_debug
+ */
+typedef void (GR_GL_FUNCTION_TYPE* GRGLDEBUGPROC)(GrGLenum source,
+ GrGLenum type,
+ GrGLuint id,
+ GrGLenum severity,
+ GrGLsizei length,
+ const GrGLchar* message,
+ const void* userParam);
+
+/**
+ * EGL types.
+ */
+typedef void* GrEGLImage;
+typedef void* GrEGLDisplay;
+typedef void* GrEGLContext;
+typedef void* GrEGLClientBuffer;
+typedef unsigned int GrEGLenum;
+typedef int32_t GrEGLint;
+typedef unsigned int GrEGLBoolean;
+
+///////////////////////////////////////////////////////////////////////////////
+/**
+ * Types for interacting with GL resources created externally to Skia. GrBackendObjects for GL
+ * textures are really const GrGLTexture*. The fFormat here should be a sized, internal format
+ * for the texture. We will try to use the sized format if the GL Context supports it, otherwise
+ * we will internally fall back to using the base internal formats.
+ */
+struct GrGLTextureInfo {
+ GrGLenum fTarget;
+ GrGLuint fID;
+ GrGLenum fFormat = 0;
+
+ bool operator==(const GrGLTextureInfo& that) const {
+ return fTarget == that.fTarget && fID == that.fID && fFormat == that.fFormat;
+ }
+};
+
+struct GrGLFramebufferInfo {
+ GrGLuint fFBOID;
+ GrGLenum fFormat = 0;
+
+ bool operator==(const GrGLFramebufferInfo& that) const {
+ return fFBOID == that.fFBOID && fFormat == that.fFormat;
+ }
+};
+
+struct GrGLSurfaceInfo {
+ uint32_t fSampleCount = 1;
+ uint32_t fLevelCount = 0;
+ skgpu::Protected fProtected = skgpu::Protected::kNo;
+
+ GrGLenum fTarget = 0;
+ GrGLenum fFormat = 0;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/gl/egl/GrGLMakeEGLInterface.h b/gfx/skia/skia/include/gpu/gl/egl/GrGLMakeEGLInterface.h
new file mode 100644
index 0000000000..a3eb420b04
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/egl/GrGLMakeEGLInterface.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/GrTypes.h"
+
+#include "include/core/SkRefCnt.h"
+
+struct GrGLInterface;
+
+sk_sp<const GrGLInterface> GrGLMakeEGLInterface();
diff --git a/gfx/skia/skia/include/gpu/gl/glx/GrGLMakeGLXInterface.h b/gfx/skia/skia/include/gpu/gl/glx/GrGLMakeGLXInterface.h
new file mode 100644
index 0000000000..b49cde4589
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/gl/glx/GrGLMakeGLXInterface.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/gpu/GrTypes.h"
+
+#include "include/core/SkRefCnt.h"
+
+struct GrGLInterface;
+
+sk_sp<const GrGLInterface> GrGLMakeGLXInterface();
diff --git a/gfx/skia/skia/include/gpu/graphite/BackendTexture.h b/gfx/skia/skia/include/gpu/graphite/BackendTexture.h
new file mode 100644
index 0000000000..2502b819a2
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/BackendTexture.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_BackendTexture_DEFINED
+#define skgpu_graphite_BackendTexture_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSize.h"
+#include "include/gpu/graphite/GraphiteTypes.h"
+#include "include/gpu/graphite/TextureInfo.h"
+
+#ifdef SK_DAWN
+#include "include/gpu/graphite/dawn/DawnTypes.h"
+#endif
+
+#ifdef SK_METAL
+#include "include/gpu/graphite/mtl/MtlGraphiteTypes.h"
+#endif
+
+#ifdef SK_VULKAN
+#include "include/private/gpu/vk/SkiaVulkan.h"
+#endif
+
+namespace skgpu {
+class MutableTextureState;
+class MutableTextureStateRef;
+}
+
+namespace skgpu::graphite {
+
+class BackendTexture {
+public:
+ BackendTexture();
+#ifdef SK_DAWN
+ // Create a BackendTexture from a wgpu::Texture. Texture info will be
+ // queried from the texture. Comparing to wgpu::TextureView,
+ // SkImage::readPixels(), SkSurface::readPixels() and
+ // SkSurface::writePixels() are implemented by direct buffer copy. They
+ // should be more efficient. For wgpu::TextureView, those methods will use
+ // create an intermediate wgpu::Texture, and use it to transfer pixels.
+ // Note: for better performance, using wgpu::Texture IS RECOMMENDED.
+ BackendTexture(wgpu::Texture texture);
+ // Create a BackendTexture from a wgpu::TextureView. Texture dimensions and
+ // info have to be provided.
+ // Note: this method is for importing wgpu::TextureView from wgpu::SwapChain
+ // only.
+ BackendTexture(SkISize dimensions,
+ const DawnTextureInfo& info,
+ wgpu::TextureView textureView);
+#endif
+#ifdef SK_METAL
+ // The BackendTexture will not call retain or release on the passed in MtlHandle. Thus the
+ // client must keep the MtlHandle valid until they are no longer using the BackendTexture.
+ BackendTexture(SkISize dimensions, MtlHandle mtlTexture);
+#endif
+
+#ifdef SK_VULKAN
+ BackendTexture(SkISize dimensions,
+ const VulkanTextureInfo&,
+ VkImageLayout,
+ uint32_t queueFamilyIndex,
+ VkImage);
+#endif
+
+ BackendTexture(const BackendTexture&);
+
+ ~BackendTexture();
+
+ BackendTexture& operator=(const BackendTexture&);
+
+ bool operator==(const BackendTexture&) const;
+ bool operator!=(const BackendTexture& that) const { return !(*this == that); }
+
+ bool isValid() const { return fInfo.isValid(); }
+ BackendApi backend() const { return fInfo.backend(); }
+
+ SkISize dimensions() const { return fDimensions; }
+
+ const TextureInfo& info() const { return fInfo; }
+
+ // If the client changes any of the mutable backend of the GrBackendTexture they should call
+ // this function to inform Skia that those values have changed. The backend API specific state
+ // that can be set from this function are:
+ //
+ // Vulkan: VkImageLayout and QueueFamilyIndex
+ void setMutableState(const skgpu::MutableTextureState&);
+
+#ifdef SK_DAWN
+ wgpu::Texture getDawnTexture() const;
+ wgpu::TextureView getDawnTextureView() const;
+#endif
+#ifdef SK_METAL
+ MtlHandle getMtlTexture() const;
+#endif
+
+#ifdef SK_VULKAN
+ VkImage getVkImage() const;
+ VkImageLayout getVkImageLayout() const;
+ uint32_t getVkQueueFamilyIndex() const;
+#endif
+
+private:
+ sk_sp<MutableTextureStateRef> mutableState() const;
+
+ SkISize fDimensions;
+ TextureInfo fInfo;
+
+ sk_sp<MutableTextureStateRef> fMutableState;
+
+#ifdef SK_DAWN
+ struct Dawn {
+ Dawn(wgpu::Texture texture) : fTexture(std::move(texture)) {}
+ Dawn(wgpu::TextureView textureView) : fTextureView(std::move(textureView)) {}
+
+ bool operator==(const Dawn& that) const {
+ return fTexture.Get() == that.fTexture.Get() &&
+ fTextureView.Get() == that.fTextureView.Get();
+ }
+ bool operator!=(const Dawn& that) const {
+ return !this->operator==(that);
+ }
+ Dawn& operator=(const Dawn& that) {
+ fTexture = that.fTexture;
+ fTextureView = that.fTextureView;
+ return *this;
+ }
+
+ wgpu::Texture fTexture;
+ wgpu::TextureView fTextureView;
+ };
+#endif
+
+ union {
+#ifdef SK_DAWN
+ Dawn fDawn;
+#endif
+#ifdef SK_METAL
+ MtlHandle fMtlTexture;
+#endif
+#ifdef SK_VULKAN
+ VkImage fVkImage;
+#endif
+ };
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_BackendTexture_DEFINED
+
diff --git a/gfx/skia/skia/include/gpu/graphite/Context.h b/gfx/skia/skia/include/gpu/graphite/Context.h
new file mode 100644
index 0000000000..d6da45ad4c
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/Context.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_Context_DEFINED
+#define skgpu_graphite_Context_DEFINED
+
+#include "include/core/SkImage.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkShader.h"
+#include "include/gpu/graphite/ContextOptions.h"
+#include "include/gpu/graphite/GraphiteTypes.h"
+#include "include/gpu/graphite/Recorder.h"
+#include "include/private/base/SingleOwner.h"
+
+#include <memory>
+
+class SkRuntimeEffect;
+
+namespace skgpu::graphite {
+
+class BackendTexture;
+class Buffer;
+class ClientMappedBufferManager;
+class Context;
+class ContextPriv;
+class GlobalCache;
+class PaintOptions;
+class PlotUploadTracker;
+class QueueManager;
+class Recording;
+class ResourceProvider;
+class SharedContext;
+class TextureProxy;
+
+class SK_API Context final {
+public:
+ Context(const Context&) = delete;
+ Context(Context&&) = delete;
+ Context& operator=(const Context&) = delete;
+ Context& operator=(Context&&) = delete;
+
+ ~Context();
+
+ BackendApi backend() const;
+
+ std::unique_ptr<Recorder> makeRecorder(const RecorderOptions& = {});
+
+ bool insertRecording(const InsertRecordingInfo&);
+ bool submit(SyncToCpu = SyncToCpu::kNo);
+
+ void asyncReadPixels(const SkImage* image,
+ const SkColorInfo& dstColorInfo,
+ const SkIRect& srcRect,
+ SkImage::ReadPixelsCallback callback,
+ SkImage::ReadPixelsContext context);
+
+ void asyncReadPixels(const SkSurface* surface,
+ const SkColorInfo& dstColorInfo,
+ const SkIRect& srcRect,
+ SkImage::ReadPixelsCallback callback,
+ SkImage::ReadPixelsContext context);
+
+ /**
+ * Checks whether any asynchronous work is complete and if so calls related callbacks.
+ */
+ void checkAsyncWorkCompletion();
+
+ /**
+ * Called to delete the passed in BackendTexture. This should only be called if the
+ * BackendTexture was created by calling Recorder::createBackendTexture on a Recorder created
+ * from this Context. If the BackendTexture is not valid or does not match the BackendApi of the
+ * Context then nothing happens.
+ *
+ * Otherwise this will delete/release the backend object that is wrapped in the BackendTexture.
+ * The BackendTexture will be reset to an invalid state and should not be used again.
+ */
+ void deleteBackendTexture(BackendTexture&);
+
+ // Provides access to functions that aren't part of the public API.
+ ContextPriv priv();
+ const ContextPriv priv() const; // NOLINT(readability-const-return-type)
+
+ class ContextID {
+ public:
+ static Context::ContextID Next();
+
+ ContextID() : fID(SK_InvalidUniqueID) {}
+
+ bool operator==(const ContextID& that) const { return fID == that.fID; }
+ bool operator!=(const ContextID& that) const { return !(*this == that); }
+
+ void makeInvalid() { fID = SK_InvalidUniqueID; }
+ bool isValid() const { return fID != SK_InvalidUniqueID; }
+
+ private:
+ constexpr ContextID(uint32_t id) : fID(id) {}
+ uint32_t fID;
+ };
+
+ ContextID contextID() const { return fContextID; }
+
+protected:
+ Context(sk_sp<SharedContext>, std::unique_ptr<QueueManager>, const ContextOptions&);
+
+private:
+ friend class ContextPriv;
+ friend class ContextCtorAccessor;
+
+ SingleOwner* singleOwner() const { return &fSingleOwner; }
+
+ // Must be called in Make() to handle one-time GPU setup operations that can possibly fail and
+ // require Context::Make() to return a nullptr.
+ bool finishInitialization();
+
+ void asyncReadPixels(const TextureProxy* textureProxy,
+ const SkImageInfo& srcImageInfo,
+ const SkColorInfo& dstColorInfo,
+ const SkIRect& srcRect,
+ SkImage::ReadPixelsCallback callback,
+ SkImage::ReadPixelsContext context);
+
+ // Inserts a texture to buffer transfer task, used by asyncReadPixels methods
+ struct PixelTransferResult {
+ using ConversionFn = void(void* dst, const void* mappedBuffer);
+ // If null then the transfer could not be performed. Otherwise this buffer will contain
+ // the pixel data when the transfer is complete.
+ sk_sp<Buffer> fTransferBuffer;
+ // If this is null then the transfer buffer will contain the data in the requested
+ // color type. Otherwise, when the transfer is done this must be called to convert
+ // from the transfer buffer's color type to the requested color type.
+ std::function<ConversionFn> fPixelConverter;
+ };
+ PixelTransferResult transferPixels(const TextureProxy*,
+ const SkImageInfo& srcImageInfo,
+ const SkColorInfo& dstColorInfo,
+ const SkIRect& srcRect);
+
+ sk_sp<SharedContext> fSharedContext;
+ std::unique_ptr<ResourceProvider> fResourceProvider;
+ std::unique_ptr<QueueManager> fQueueManager;
+ std::unique_ptr<ClientMappedBufferManager> fMappedBufferManager;
+ std::unique_ptr<PlotUploadTracker> fPlotUploadTracker;
+
+ // In debug builds we guard against improper thread handling. This guard is passed to the
+ // ResourceCache for the Context.
+ mutable SingleOwner fSingleOwner;
+
+#if GRAPHITE_TEST_UTILS
+ // In test builds a Recorder may track the Context that was used to create it.
+ bool fStoreContextRefInRecorder = false;
+ // If this tracking is on, to allow the client to safely delete this Context or its Recorders
+ // in any order we must also track the Recorders created here.
+ std::vector<Recorder*> fTrackedRecorders;
+#endif
+
+ // Needed for MessageBox handling
+ const ContextID fContextID;
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_Context_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/ContextOptions.h b/gfx/skia/skia/include/gpu/graphite/ContextOptions.h
new file mode 100644
index 0000000000..2838f10b0d
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/ContextOptions.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_ContextOptions_DEFINED
+#define skgpu_graphite_ContextOptions_DEFINED
+
+namespace skgpu { class ShaderErrorHandler; }
+
+namespace skgpu::graphite {
+
+struct SK_API ContextOptions {
+ ContextOptions() {}
+
+ /**
+ * Disables correctness workarounds that are enabled for particular GPUs, OSes, or drivers.
+ * This does not affect code path choices that are made for perfomance reasons nor does it
+ * override other ContextOption settings.
+ */
+ bool fDisableDriverCorrectnessWorkarounds = false;
+
+ /**
+ * If present, use this object to report shader compilation failures. If not, report failures
+ * via SkDebugf and assert.
+ */
+ skgpu::ShaderErrorHandler* fShaderErrorHandler = nullptr;
+
+ /**
+ * Will the client make sure to only ever be executing one thread that uses the Context and all
+ * derived classes (e.g. Recorders, Recordings, etc.) at a time. If so we can possibly make some
+ * objects (e.g. VulkanMemoryAllocator) not thread safe to improve single thread performance.
+ */
+ bool fClientWillExternallySynchronizeAllThreads = false;
+
+ /**
+ * The maximum size of cache textures used for Skia's Glyph cache.
+ */
+ size_t fGlyphCacheTextureMaximumBytes = 2048 * 1024 * 4;
+
+ /**
+ * Below this threshold size in device space distance field fonts won't be used. Distance field
+ * fonts don't support hinting which is more important at smaller sizes.
+ */
+ float fMinDistanceFieldFontSize = 18;
+
+ /**
+ * Above this threshold size in device space glyphs are drawn as individual paths.
+ */
+#if defined(SK_BUILD_FOR_ANDROID)
+ float fGlyphsAsPathsFontSize = 384;
+#elif defined(SK_BUILD_FOR_MAC)
+ float fGlyphsAsPathsFontSize = 256;
+#else
+ float fGlyphsAsPathsFontSize = 324;
+#endif
+
+ /**
+ * Can the glyph atlas use multiple textures. If allowed, the each texture's size is bound by
+ * fGlypheCacheTextureMaximumBytes.
+ */
+ bool fAllowMultipleGlyphCacheTextures = true;
+ bool fSupportBilerpFromGlyphAtlas = false;
+
+#if GRAPHITE_TEST_UTILS
+ /**
+ * Private options that are only meant for testing within Skia's tools.
+ */
+
+ /**
+ * Maximum width and height of internal texture atlases.
+ */
+ int fMaxTextureAtlasSize = 2048;
+
+ /**
+ * If true, will store a pointer in Recorder that points back to the Context
+ * that created it. Used by readPixels() and other methods that normally require a Context.
+ */
+ bool fStoreContextRefInRecorder = false;
+#endif
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_ContextOptions
diff --git a/gfx/skia/skia/include/gpu/graphite/GraphiteTypes.h b/gfx/skia/skia/include/gpu/graphite/GraphiteTypes.h
new file mode 100644
index 0000000000..231f2a5e14
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/GraphiteTypes.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_GraphiteTypes_DEFINED
+#define skgpu_graphite_GraphiteTypes_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkTypes.h"
+#include "include/gpu/GpuTypes.h"
+
+#include <memory>
+
+class SkSurface;
+
+namespace skgpu::graphite {
+
+class Recording;
+class Task;
+
+using GpuFinishedContext = void*;
+using GpuFinishedProc = void (*)(GpuFinishedContext finishedContext, CallbackResult);
+
+/**
+ * The fFinishedProc is called when the Recording has been submitted and finished on the GPU, or
+ * when there is a failure that caused it not to be submitted. The callback will always be called
+ * and the caller can use the callback to know it is safe to free any resources associated with
+ * the Recording that they may be holding onto. If the Recording is successfully submitted to the
+ * GPU the callback will be called with CallbackResult::kSuccess once the GPU has finished. All
+ * other cases where some failure occured it will be called with CallbackResult::kFailed.
+ *
+ * The fTargetSurface, if provided, is used as a target for any draws recorded onto a deferred
+ * canvas returned from Recorder::makeDeferredCanvas. This target surface must be provided iff
+ * the Recording contains any such draws. It must be Graphite-backed and its backing texture's
+ * TextureInfo must match the info provided to the Recorder when making the deferred canvas.
+ *
+ * fTargetTranslation is an additional translation applied to draws targeting fTargetSurface.
+ */
+struct InsertRecordingInfo {
+ Recording* fRecording = nullptr;
+
+ SkSurface* fTargetSurface = nullptr;
+ SkIVector fTargetTranslation = {0, 0};
+
+ GpuFinishedContext fFinishedContext = nullptr;
+ GpuFinishedProc fFinishedProc = nullptr;
+};
+
+/**
+ * The fFinishedProc is called when the Recording has been submitted and finished on the GPU, or
+ * when there is a failure that caused it not to be submitted. The callback will always be called
+ * and the caller can use the callback to know it is safe to free any resources associated with
+ * the Recording that they may be holding onto. If the Recording is successfully submitted to the
+ * GPU the callback will be called with CallbackResult::kSuccess once the GPU has finished. All
+ * other cases where some failure occured it will be called with CallbackResult::kFailed.
+ */
+struct InsertFinishInfo {
+ GpuFinishedContext fFinishedContext = nullptr;
+ GpuFinishedProc fFinishedProc = nullptr;
+};
+
+/**
+ * Actually submit work to the GPU and track its completion
+ */
+enum class SyncToCpu : bool {
+ kYes = true,
+ kNo = false
+};
+
+/*
+ * For Promise Images - should the Promise Image be fulfilled every time a Recording that references
+ * it is inserted into the Context.
+ */
+enum class Volatile : bool {
+ kNo = false, // only fulfilled once
+ kYes = true // fulfilled on every insertion call
+};
+
+/*
+ * Graphite's different rendering methods each only apply to certain types of draws. This
+ * enum supports decision-making regarding the different renderers and what is being drawn.
+ */
+enum DrawTypeFlags : uint8_t {
+
+ kNone = 0b000,
+
+ // SkCanvas:: drawSimpleText, drawString, drawGlyphs, drawTextBlob, drawSlug
+ kText = 0b001,
+
+ // SkCanvas::drawVertices
+ kDrawVertices = 0b010,
+
+ // All other canvas draw calls
+ kShape = 0b100,
+
+ kMostCommon = kText | kShape,
+ kAll = kText | kDrawVertices | kShape
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_GraphiteTypes_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/ImageProvider.h b/gfx/skia/skia/include/gpu/graphite/ImageProvider.h
new file mode 100644
index 0000000000..2773f03b1d
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/ImageProvider.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_ImageProvider_DEFINED
+#define skgpu_graphite_ImageProvider_DEFINED
+
+#include "include/core/SkImage.h"
+#include "include/core/SkRefCnt.h"
+
+namespace skgpu::graphite {
+
+class Recorder;
+
+/*
+ * This class provides a centralized location for clients to perform any caching of images
+ * they desire. Whenever Graphite encounters an SkImage which is not Graphite-backed
+ * it will call ImageProvider::findOrCreate. The client's derived version of this class should
+ * return a Graphite-backed version of the provided SkImage that meets the specified
+ * requirements.
+ *
+ * Skia requires that 'findOrCreate' return a Graphite-backed image that preserves the dimensions,
+ * number of channels and alpha type of the original image. The bit depth of the
+ * individual channels can change (e.g., 4444 -> 8888 is allowed).
+ * Wrt mipmapping, the returned image can have different mipmap settings than requested. If
+ * mipmapping was requested but not returned, the sampling level will be reduced to linear.
+ * If the requirements are not met by the returned image (modulo the flexibility wrt mipmapping)
+ * Graphite will drop the draw.
+ *
+ * Note: by default, Graphite will not perform any caching of images
+ *
+ * Threading concerns:
+ * If the same ImageProvider is given to multiple Recorders it is up to the
+ * client to handle any required thread synchronization. This is not limited to just
+ * restricting access to whatever map a derived class may have but extends to ensuring
+ * that an image created on one Recorder has had its creation work submitted before it
+ * is used by any work submitted by another Recording. Please note, this requirement
+ * (re the submission of creation work and image usage on different threads) is common to all
+ * graphite SkImages and isn't unique to SkImages returned by the ImageProvider.
+ *
+ * TODO(b/240996632): add documentation re shutdown order.
+ * TODO(b/240997067): add unit tests
+ */
+class SK_API ImageProvider : public SkRefCnt {
+public:
+ // If the client's derived class already has a Graphite-backed image that has the same
+ // contents as 'image' and meets the requirements, then it can be returned.
+ // makeTextureImage can always be called to create an acceptable Graphite-backed image
+ // which could then be cached.
+ virtual sk_sp<SkImage> findOrCreate(Recorder* recorder,
+ const SkImage* image,
+ SkImage::RequiredImageProperties) = 0;
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_ImageProvider_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/Recorder.h b/gfx/skia/skia/include/gpu/graphite/Recorder.h
new file mode 100644
index 0000000000..b27f682d2d
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/Recorder.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_Recorder_DEFINED
+#define skgpu_graphite_Recorder_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSize.h"
+#include "include/gpu/graphite/GraphiteTypes.h"
+#include "include/gpu/graphite/Recording.h"
+#include "include/private/base/SingleOwner.h"
+#include "include/private/base/SkTArray.h"
+
+#include <vector>
+
+class SkCanvas;
+struct SkImageInfo;
+class SkPixmap;
+
+namespace skgpu {
+class RefCntedCallback;
+class TokenTracker;
+}
+
+namespace sktext::gpu {
+class StrikeCache;
+class TextBlobRedrawCoordinator;
+}
+
+namespace skgpu::graphite {
+
+class AtlasManager;
+class BackendTexture;
+class Caps;
+class Context;
+class Device;
+class DrawBufferManager;
+class GlobalCache;
+class ImageProvider;
+class RecorderPriv;
+class ResourceProvider;
+class RuntimeEffectDictionary;
+class SharedContext;
+class Task;
+class TaskGraph;
+class TextureDataBlock;
+class TextureInfo;
+class UniformDataBlock;
+class UploadBufferManager;
+
+template<typename T> class PipelineDataCache;
+using UniformDataCache = PipelineDataCache<UniformDataBlock>;
+using TextureDataCache = PipelineDataCache<TextureDataBlock>;
+
+struct SK_API RecorderOptions final {
+ RecorderOptions();
+ RecorderOptions(const RecorderOptions&);
+ ~RecorderOptions();
+
+ sk_sp<ImageProvider> fImageProvider;
+};
+
+class SK_API Recorder final {
+public:
+ Recorder(const Recorder&) = delete;
+ Recorder(Recorder&&) = delete;
+ Recorder& operator=(const Recorder&) = delete;
+ Recorder& operator=(Recorder&&) = delete;
+
+ ~Recorder();
+
+ std::unique_ptr<Recording> snap();
+
+ ImageProvider* clientImageProvider() { return fClientImageProvider.get(); }
+ const ImageProvider* clientImageProvider() const { return fClientImageProvider.get(); }
+
+ /**
+ * Creates a new backend gpu texture matching the dimensions and TextureInfo. If an invalid
+ * TextureInfo or a TextureInfo Skia can't support is passed in, this will return an invalid
+ * BackendTexture. Thus the client should check isValid on the returned BackendTexture to know
+ * if it succeeded or not.
+ *
+ * If this does return a valid BackendTexture, the caller is required to use
+ * Recorder::deleteBackendTexture or Context::deleteBackendTexture to delete the texture. It is
+ * safe to use the Context that created this Recorder or any other Recorder created from the
+ * same Context to call deleteBackendTexture.
+ */
+ BackendTexture createBackendTexture(SkISize dimensions, const TextureInfo&);
+
+ /**
+ * If possible, updates a backend texture with the provided pixmap data. The client
+ * should check the return value to see if the update was successful. The client is required
+ * to insert a Recording into the Context and call `submit` to send the upload work to the gpu.
+ * The backend texture must be compatible with the provided pixmap(s). Compatible, in this case,
+ * means that the backend format is compatible with the base pixmap's colortype. The src data
+ * can be deleted when this call returns.
+ * If the backend texture is mip mapped, the data for all the mipmap levels must be provided.
+ * In the mipmapped case all the colortypes of the provided pixmaps must be the same.
+ * Additionally, all the miplevels must be sized correctly (please see
+ * SkMipmap::ComputeLevelSize and ComputeLevelCount).
+ * Note: the pixmap's alphatypes and colorspaces are ignored.
+ * For the Vulkan backend after a successful update the layout of the created VkImage will be:
+ * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ */
+ bool updateBackendTexture(const BackendTexture&,
+ const SkPixmap srcData[],
+ int numLevels);
+
+ /**
+ * Called to delete the passed in BackendTexture. This should only be called if the
+ * BackendTexture was created by calling Recorder::createBackendTexture on a Recorder that is
+ * associated with the same Context. If the BackendTexture is not valid or does not match the
+ * BackendApi of the Recorder then nothing happens.
+ *
+ * Otherwise this will delete/release the backend object that is wrapped in the BackendTexture.
+ * The BackendTexture will be reset to an invalid state and should not be used again.
+ */
+ void deleteBackendTexture(BackendTexture&);
+
+ // Adds a proc that will be moved to the Recording upon snap, subsequently attached to the
+ // CommandBuffer when the Recording is added, and called when that CommandBuffer is submitted
+ // and finishes. If the Recorder or Recording is deleted before the proc is added to the
+ // CommandBuffer, it will be called with result Failure.
+ void addFinishInfo(const InsertFinishInfo&);
+
+ // Returns a canvas that will record to a proxy surface, which must be instantiated on replay.
+ // This can only be called once per Recording; subsequent calls will return null until a
+ // Recording is snapped. Additionally, the returned SkCanvas is only valid until the next
+ // Recording snap, at which point it is deleted.
+ SkCanvas* makeDeferredCanvas(const SkImageInfo&, const TextureInfo&);
+
+ // Provides access to functions that aren't part of the public API.
+ RecorderPriv priv();
+ const RecorderPriv priv() const; // NOLINT(readability-const-return-type)
+
+#if GR_TEST_UTILS
+ bool deviceIsRegistered(Device*);
+#endif
+
+private:
+ friend class Context; // For ctor
+ friend class Device; // For registering and deregistering Devices;
+ friend class RecorderPriv; // for ctor and hidden methods
+
+ Recorder(sk_sp<SharedContext>, const RecorderOptions&);
+
+ SingleOwner* singleOwner() const { return &fSingleOwner; }
+
+ BackendApi backend() const;
+
+ // We keep track of all Devices that are connected to a Recorder. This allows the client to
+ // safely delete an SkSurface or a Recorder in any order. If the client deletes the Recorder
+ // we need to notify all Devices that the Recorder is no longer valid. If we delete the
+ // SkSurface/Device first we will flush all the Device's into the Recorder before deregistering
+ // it from the Recorder.
+ //
+ // We do not need to take a ref on the Device since the Device will flush and deregister itself
+ // in its dtor. There is no other need for the Recorder to know about the Device after this
+ // point.
+ //
+ // Note: We could probably get by with only registering Devices directly connected to
+ // SkSurfaces. All other one off Devices will be created in a controlled scope where the
+ // Recorder should still be valid by the time they need to flush their work when the Device is
+ // deleted. We would have to make sure we safely handle cases where a client calls saveLayer
+ // then either deletes the SkSurface or Recorder before calling restore. For simplicity we just
+ // register every device for now, but if we see extra overhead in pushing back the extra
+ // pointers, we can look into only registering SkSurface Devices.
+ void registerDevice(Device*);
+ void deregisterDevice(const Device*);
+
+ sk_sp<SharedContext> fSharedContext;
+ std::unique_ptr<ResourceProvider> fResourceProvider;
+ std::unique_ptr<RuntimeEffectDictionary> fRuntimeEffectDict;
+
+ std::unique_ptr<TaskGraph> fGraph;
+ std::unique_ptr<UniformDataCache> fUniformDataCache;
+ std::unique_ptr<TextureDataCache> fTextureDataCache;
+ std::unique_ptr<DrawBufferManager> fDrawBufferManager;
+ std::unique_ptr<UploadBufferManager> fUploadBufferManager;
+ std::vector<Device*> fTrackedDevices;
+
+ uint32_t fRecorderID; // Needed for MessageBox handling for text
+ std::unique_ptr<AtlasManager> fAtlasManager;
+ std::unique_ptr<TokenTracker> fTokenTracker;
+ std::unique_ptr<sktext::gpu::StrikeCache> fStrikeCache;
+ std::unique_ptr<sktext::gpu::TextBlobRedrawCoordinator> fTextBlobCache;
+ sk_sp<ImageProvider> fClientImageProvider;
+
+ // In debug builds we guard against improper thread handling
+ // This guard is passed to the ResourceCache.
+ // TODO: Should we also pass this to Device, DrawContext, and similar classes?
+ mutable SingleOwner fSingleOwner;
+
+ sk_sp<Device> fTargetProxyDevice;
+ std::unique_ptr<SkCanvas> fTargetProxyCanvas;
+ std::unique_ptr<Recording::LazyProxyData> fTargetProxyData;
+
+ SkTArray<sk_sp<RefCntedCallback>> fFinishedProcs;
+
+#if GRAPHITE_TEST_UTILS
+ // For testing use only -- the Context used to create this Recorder
+ Context* fContext = nullptr;
+#endif
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_Recorder_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/Recording.h b/gfx/skia/skia/include/gpu/graphite/Recording.h
new file mode 100644
index 0000000000..6a94ab84b8
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/Recording.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_Recording_DEFINED
+#define skgpu_graphite_Recording_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/private/SkChecksum.h"
+#include "include/private/base/SkTArray.h"
+
+#include <memory>
+#include <unordered_set>
+#include <vector>
+
+namespace skgpu {
+class RefCntedCallback;
+}
+
+namespace skgpu::graphite {
+
+class CommandBuffer;
+class RecordingPriv;
+class Resource;
+class ResourceProvider;
+class TaskGraph;
+class Texture;
+class TextureInfo;
+class TextureProxy;
+
+class Recording final {
+public:
+ ~Recording();
+
+ RecordingPriv priv();
+
+#if GRAPHITE_TEST_UTILS
+ bool isTargetProxyInstantiated() const;
+#endif
+
+private:
+ friend class Recorder; // for ctor and LazyProxyData
+ friend class RecordingPriv;
+
+ // LazyProxyData is used if this recording should be replayed to a target that is provided on
+ // replay, and it handles the target proxy's instantiation with the provided target.
+ class LazyProxyData {
+ public:
+ LazyProxyData(const TextureInfo&);
+
+ TextureProxy* lazyProxy();
+ sk_sp<TextureProxy> refLazyProxy();
+
+ bool lazyInstantiate(ResourceProvider*, sk_sp<Texture>);
+
+ private:
+ sk_sp<Texture> fTarget;
+ sk_sp<TextureProxy> fTargetProxy;
+ };
+
+ struct ProxyHash {
+ std::size_t operator()(const sk_sp<TextureProxy>& proxy) const {
+ return SkGoodHash()(proxy.get());
+ }
+ };
+
+ Recording(std::unique_ptr<TaskGraph>,
+ std::unordered_set<sk_sp<TextureProxy>, ProxyHash>&& nonVolatileLazyProxies,
+ std::unordered_set<sk_sp<TextureProxy>, ProxyHash>&& volatileLazyProxies,
+ std::unique_ptr<LazyProxyData> targetProxyData,
+ SkTArray<sk_sp<RefCntedCallback>>&& finishedProcs);
+
+ bool addCommands(CommandBuffer*, ResourceProvider*);
+ void addResourceRef(sk_sp<Resource>);
+
+ std::unique_ptr<TaskGraph> fGraph;
+ // We don't always take refs to all resources used by specific Tasks (e.g. a common buffer used
+ // for uploads). Instead we'll just hold onto one ref for those Resources outside the Tasks.
+ // Those refs are stored in the array here and will eventually be passed onto a CommandBuffer
+ // when the Recording adds its commands.
+ std::vector<sk_sp<Resource>> fExtraResourceRefs;
+
+ std::unordered_set<sk_sp<TextureProxy>, ProxyHash> fNonVolatileLazyProxies;
+ std::unordered_set<sk_sp<TextureProxy>, ProxyHash> fVolatileLazyProxies;
+
+ std::unique_ptr<LazyProxyData> fTargetProxyData;
+
+ SkTArray<sk_sp<RefCntedCallback>> fFinishedProcs;
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_Recording_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/TextureInfo.h b/gfx/skia/skia/include/gpu/graphite/TextureInfo.h
new file mode 100644
index 0000000000..dd4e6698c3
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/TextureInfo.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_TextureInfo_DEFINED
+#define skgpu_graphite_TextureInfo_DEFINED
+
+#include "include/gpu/graphite/GraphiteTypes.h"
+
+#ifdef SK_DAWN
+#include "include/private/gpu/graphite/DawnTypesPriv.h"
+#endif
+
+#ifdef SK_METAL
+#include "include/private/gpu/graphite/MtlGraphiteTypesPriv.h"
+#endif
+
+#ifdef SK_VULKAN
+#include "include/private/gpu/graphite/VulkanGraphiteTypesPriv.h"
+#endif
+
+namespace skgpu::graphite {
+
+class TextureInfo {
+public:
+ TextureInfo() {}
+#ifdef SK_DAWN
+ TextureInfo(const DawnTextureInfo& dawnInfo)
+ : fBackend(BackendApi::kDawn)
+ , fValid(true)
+ , fSampleCount(dawnInfo.fSampleCount)
+ , fMipmapped(dawnInfo.fMipmapped)
+ , fProtected(Protected::kNo)
+ , fDawnSpec(dawnInfo) {}
+#endif
+
+#ifdef SK_METAL
+ TextureInfo(const MtlTextureInfo& mtlInfo)
+ : fBackend(BackendApi::kMetal)
+ , fValid(true)
+ , fSampleCount(mtlInfo.fSampleCount)
+ , fMipmapped(mtlInfo.fMipmapped)
+ , fProtected(Protected::kNo)
+ , fMtlSpec(mtlInfo) {}
+#endif
+
+#ifdef SK_VULKAN
+ TextureInfo(const VulkanTextureInfo& vkInfo)
+ : fBackend(BackendApi::kVulkan)
+ , fValid(true)
+ , fSampleCount(vkInfo.fSampleCount)
+ , fMipmapped(vkInfo.fMipmapped)
+ , fProtected(Protected::kNo)
+ , fVkSpec(vkInfo) {
+ if (vkInfo.fFlags & VK_IMAGE_CREATE_PROTECTED_BIT) {
+ fProtected = Protected::kYes;
+ }
+ }
+#endif
+
+ ~TextureInfo() {}
+ TextureInfo(const TextureInfo&) = default;
+ TextureInfo& operator=(const TextureInfo&);
+
+ bool operator==(const TextureInfo&) const;
+ bool operator!=(const TextureInfo& that) const { return !(*this == that); }
+
+ bool isValid() const { return fValid; }
+ BackendApi backend() const { return fBackend; }
+
+ uint32_t numSamples() const { return fSampleCount; }
+ Mipmapped mipmapped() const { return fMipmapped; }
+ Protected isProtected() const { return fProtected; }
+
+#ifdef SK_DAWN
+ bool getDawnTextureInfo(DawnTextureInfo* info) const {
+ if (!this->isValid() || fBackend != BackendApi::kDawn) {
+ return false;
+ }
+ *info = DawnTextureSpecToTextureInfo(fDawnSpec, fSampleCount, fMipmapped);
+ return true;
+ }
+#endif
+
+#ifdef SK_METAL
+ bool getMtlTextureInfo(MtlTextureInfo* info) const {
+ if (!this->isValid() || fBackend != BackendApi::kMetal) {
+ return false;
+ }
+ *info = MtlTextureSpecToTextureInfo(fMtlSpec, fSampleCount, fMipmapped);
+ return true;
+ }
+#endif
+
+#ifdef SK_VULKAN
+ bool getVulkanTextureInfo(VulkanTextureInfo* info) const {
+ if (!this->isValid() || fBackend != BackendApi::kVulkan) {
+ return false;
+ }
+ *info = VulkanTextureSpecToTextureInfo(fVkSpec, fSampleCount, fMipmapped);
+ return true;
+ }
+#endif
+
+private:
+#ifdef SK_DAWN
+ friend class DawnCaps;
+ friend class DawnCommandBuffer;
+ friend class DawnGraphicsPipeline;
+ friend class DawnResourceProvider;
+ friend class DawnTexture;
+ const DawnTextureSpec& dawnTextureSpec() const {
+ SkASSERT(fValid && fBackend == BackendApi::kDawn);
+ return fDawnSpec;
+ }
+#endif
+
+#ifdef SK_METAL
+ friend class MtlCaps;
+ friend class MtlGraphicsPipeline;
+ friend class MtlTexture;
+ const MtlTextureSpec& mtlTextureSpec() const {
+ SkASSERT(fValid && fBackend == BackendApi::kMetal);
+ return fMtlSpec;
+ }
+#endif
+
+#ifdef SK_VULKAN
+ friend class VulkanCaps;
+ friend class VulkanTexture;
+ const VulkanTextureSpec& vulkanTextureSpec() const {
+ SkASSERT(fValid && fBackend == BackendApi::kVulkan);
+ return fVkSpec;
+ }
+#endif
+
+ BackendApi fBackend = BackendApi::kMock;
+ bool fValid = false;
+
+ uint32_t fSampleCount = 1;
+ Mipmapped fMipmapped = Mipmapped::kNo;
+ Protected fProtected = Protected::kNo;
+
+ union {
+#ifdef SK_DAWN
+ DawnTextureSpec fDawnSpec;
+#endif
+#ifdef SK_METAL
+ MtlTextureSpec fMtlSpec;
+#endif
+#ifdef SK_VULKAN
+ VulkanTextureSpec fVkSpec;
+#endif
+ };
+};
+
+} // namespace skgpu::graphite
+
+#endif //skgpu_graphite_TextureInfo_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/YUVABackendTextures.h b/gfx/skia/skia/include/gpu/graphite/YUVABackendTextures.h
new file mode 100644
index 0000000000..c3b80ae196
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/YUVABackendTextures.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_YUVABackendTextures_DEFINED
+#define skgpu_graphite_YUVABackendTextures_DEFINED
+
+#include "include/core/SkSpan.h"
+#include "include/core/SkYUVAInfo.h"
+#include "include/gpu/graphite/BackendTexture.h"
+
+#include <tuple>
+
+namespace skgpu::graphite {
+class Recorder;
+
+/**
+ * A description of a set of BackendTextures that hold the planar data described by a SkYUVAInfo.
+ */
+class SK_API YUVABackendTextureInfo {
+public:
+ static constexpr auto kMaxPlanes = SkYUVAInfo::kMaxPlanes;
+
+ /** Default YUVABackendTextureInfo is invalid. */
+ YUVABackendTextureInfo() = default;
+ YUVABackendTextureInfo(const YUVABackendTextureInfo&) = default;
+ YUVABackendTextureInfo& operator=(const YUVABackendTextureInfo&) = default;
+
+ /**
+ * Initializes a YUVABackendTextureInfo to describe a set of textures that can store the
+ * planes indicated by the SkYUVAInfo. The texture dimensions are taken from the SkYUVAInfo's
+ * plane dimensions. All the described textures share a common origin. The planar image this
+ * describes will be mip mapped if all the textures are individually mip mapped as indicated
+ * by Mipmapped. This will produce an invalid result (return false from isValid()) if the
+ * passed formats' channels don't agree with SkYUVAInfo.
+ */
+ YUVABackendTextureInfo(const Recorder*,
+ const SkYUVAInfo&,
+ const TextureInfo[kMaxPlanes],
+ Mipmapped);
+
+ bool operator==(const YUVABackendTextureInfo&) const;
+ bool operator!=(const YUVABackendTextureInfo& that) const { return !(*this == that); }
+
+ /** TextureInfo for the ith plane, or invalid if i >= numPlanes() */
+ const TextureInfo& planeTextureInfo(int i) const {
+ SkASSERT(i >= 0);
+ return fPlaneTextureInfos[static_cast<size_t>(i)];
+ }
+
+ const SkYUVAInfo& yuvaInfo() const { return fYUVAInfo; }
+
+ SkYUVColorSpace yuvColorSpace() const { return fYUVAInfo.yuvColorSpace(); }
+
+ Mipmapped mipmapped() const { return fMipmapped; }
+
+ /** The number of planes, 0 if this YUVABackendTextureInfo is invalid. */
+ int numPlanes() const { return fYUVAInfo.numPlanes(); }
+
+ /**
+ * Returns true if this has been configured with a valid SkYUVAInfo with compatible texture
+ * formats.
+ */
+ bool isValid() const { return fYUVAInfo.isValid(); }
+
+ /**
+ * Computes a YUVALocations representation of the planar layout. The result is guaranteed to be
+ * valid if this->isValid().
+ */
+ SkYUVAInfo::YUVALocations toYUVALocations() const;
+
+private:
+ SkYUVAInfo fYUVAInfo;
+ std::array<TextureInfo, kMaxPlanes> fPlaneTextureInfos;
+ std::array<uint32_t, kMaxPlanes> fPlaneChannelMasks;
+ Mipmapped fMipmapped = Mipmapped::kNo;
+};
+
+/**
+ * A set of BackendTextures that hold the planar data for an image described a SkYUVAInfo.
+ */
+class SK_API YUVABackendTextures {
+public:
+ static constexpr auto kMaxPlanes = SkYUVAInfo::kMaxPlanes;
+
+ YUVABackendTextures() = default;
+ YUVABackendTextures(const YUVABackendTextures&) = delete;
+ YUVABackendTextures& operator=(const YUVABackendTextures&) = delete;
+
+ /**
+ * Initializes a YUVABackendTextures object from a set of textures that store the planes
+ * indicated by the SkYUVAInfo. This will produce an invalid result (return false from
+ * isValid()) if the passed texture formats' channels don't agree with SkYUVAInfo.
+ */
+ YUVABackendTextures(const Recorder*,
+ const SkYUVAInfo&,
+ const BackendTexture[kMaxPlanes]);
+
+ SkSpan<const BackendTexture> planeTextures() const {
+ return SkSpan<const BackendTexture>(fPlaneTextures);
+ }
+
+ /** BackendTexture for the ith plane, or invalid if i >= numPlanes() */
+ BackendTexture planeTexture(int i) const {
+ SkASSERT(i >= 0);
+ return fPlaneTextures[static_cast<size_t>(i)];
+ }
+
+ const SkYUVAInfo& yuvaInfo() const { return fYUVAInfo; }
+
+ SkYUVColorSpace yuvColorSpace() const { return fYUVAInfo.yuvColorSpace(); }
+
+ /** The number of planes, 0 if this YUVABackendTextureInfo is invalid. */
+ int numPlanes() const { return fYUVAInfo.numPlanes(); }
+
+ /**
+ * Returns true if this has been configured with a valid SkYUVAInfo with compatible texture
+ * formats.
+ */
+ bool isValid() const { return fYUVAInfo.isValid(); }
+
+ /**
+ * Computes a YUVALocations representation of the planar layout. The result is guaranteed to be
+ * valid if this->isValid().
+ */
+ SkYUVAInfo::YUVALocations toYUVALocations() const;
+
+private:
+ SkYUVAInfo fYUVAInfo;
+ std::array<BackendTexture, kMaxPlanes> fPlaneTextures;
+ std::array<uint32_t, kMaxPlanes> fPlaneChannelMasks;
+};
+
+} // End of namespace skgpu::graphite
+
+#endif // skgpu_graphite_YUVABackendTextures_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/dawn/DawnBackendContext.h b/gfx/skia/skia/include/gpu/graphite/dawn/DawnBackendContext.h
new file mode 100644
index 0000000000..99282c4d76
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/dawn/DawnBackendContext.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_DawnBackendContext_DEFINED
+#define skgpu_graphite_DawnBackendContext_DEFINED
+
+#include "webgpu/webgpu_cpp.h"
+
+namespace skgpu::graphite {
+
+// The DawnBackendContext contains all of the base Dawn objects needed by the graphite Dawn
+// backend. The client will create this object and pass it into the Context::MakeDawn factory call
+// when setting up Skia.
+struct SK_API DawnBackendContext {
+ wgpu::Device fDevice;
+ wgpu::Queue fQueue;
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_DawnBackendContext_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/dawn/DawnTypes.h b/gfx/skia/skia/include/gpu/graphite/dawn/DawnTypes.h
new file mode 100644
index 0000000000..291be75630
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/dawn/DawnTypes.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2022 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_DawnTypes_DEFINED
+#define skgpu_graphite_DawnTypes_DEFINED
+
+#include "include/gpu/graphite/GraphiteTypes.h"
+#include "webgpu/webgpu_cpp.h"
+
+namespace skgpu::graphite {
+
+struct DawnTextureInfo {
+ uint32_t fSampleCount = 1;
+ Mipmapped fMipmapped = Mipmapped::kNo;
+
+ // wgpu::TextureDescriptor properties
+ wgpu::TextureFormat fFormat = wgpu::TextureFormat::Undefined;
+ wgpu::TextureUsage fUsage = wgpu::TextureUsage::None;
+
+ DawnTextureInfo() = default;
+ DawnTextureInfo(const wgpu::Texture& texture);
+ DawnTextureInfo(uint32_t sampleCount,
+ Mipmapped mipmapped,
+ wgpu::TextureFormat format,
+ wgpu::TextureUsage usage)
+ : fSampleCount(sampleCount)
+ , fMipmapped(mipmapped)
+ , fFormat(format)
+ , fUsage(usage) {}
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_DawnTypes_DEFINED
+
+
diff --git a/gfx/skia/skia/include/gpu/graphite/dawn/DawnUtils.h b/gfx/skia/skia/include/gpu/graphite/dawn/DawnUtils.h
new file mode 100644
index 0000000000..ef1b57c9e0
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/dawn/DawnUtils.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_DawnUtils_DEFINED
+#define skgpu_graphite_DawnUtils_DEFINED
+
+#include <memory>
+
+namespace skgpu::graphite {
+
+class Context;
+struct ContextOptions;
+struct DawnBackendContext;
+
+namespace ContextFactory {
+std::unique_ptr<Context> MakeDawn(const DawnBackendContext&, const ContextOptions&);
+} // namespace ContextFactory
+
+} // namespace skgpu::graphite
+
+
+#endif // skgpu_graphite_DawnUtils_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/mtl/MtlBackendContext.h b/gfx/skia/skia/include/gpu/graphite/mtl/MtlBackendContext.h
new file mode 100644
index 0000000000..9d6d0192d1
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/mtl/MtlBackendContext.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_MtlBackendContext_DEFINED
+#define skgpu_graphite_MtlBackendContext_DEFINED
+
+#include "include/gpu/graphite/mtl/MtlGraphiteTypes.h"
+
+namespace skgpu::graphite {
+
+// The MtlBackendContext contains all of the base Metal objects needed by the graphite Metal
+// backend. The client will create this object and pass it into the Context::MakeMetal factory call
+// when setting up Skia.
+struct SK_API MtlBackendContext {
+ sk_cfp<CFTypeRef> fDevice;
+ sk_cfp<CFTypeRef> fQueue;
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_MtlBackendContext_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/mtl/MtlGraphiteTypes.h b/gfx/skia/skia/include/gpu/graphite/mtl/MtlGraphiteTypes.h
new file mode 100644
index 0000000000..bc04421643
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/mtl/MtlGraphiteTypes.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_MtlGraphiteTypes_DEFINED
+#define skgpu_graphite_MtlGraphiteTypes_DEFINED
+
+#include "include/gpu/graphite/GraphiteTypes.h"
+#include "include/ports/SkCFObject.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef __APPLE__
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <TargetConditionals.h>
+
+#if TARGET_OS_SIMULATOR
+#define SK_API_AVAILABLE_CA_METAL_LAYER SK_API_AVAILABLE(macos(10.11), ios(13.0))
+#else // TARGET_OS_SIMULATOR
+#define SK_API_AVAILABLE_CA_METAL_LAYER SK_API_AVAILABLE(macos(10.11), ios(8.0))
+#endif // TARGET_OS_SIMULATOR
+
+#endif // __APPLE__
+
+
+namespace skgpu::graphite {
+
+/**
+ * Declares typedefs for Metal types used in Graphite cpp code
+ */
+using MtlPixelFormat = unsigned int;
+using MtlTextureUsage = unsigned int;
+using MtlStorageMode = unsigned int;
+using MtlHandle = const void*;
+
+struct MtlTextureInfo {
+ uint32_t fSampleCount = 1;
+ skgpu::Mipmapped fMipmapped = skgpu::Mipmapped::kNo;
+
+ // Since we aren't in an Obj-C header we can't directly use Mtl types here. Each of these can
+ // cast to their mapped Mtl types list below.
+ MtlPixelFormat fFormat = 0; // MTLPixelFormat fFormat = MTLPixelFormatInvalid;
+ MtlTextureUsage fUsage = 0; // MTLTextureUsage fUsage = MTLTextureUsageUnknown;
+ MtlStorageMode fStorageMode = 0; // MTLStorageMode fStorageMode = MTLStorageModeShared;
+ bool fFramebufferOnly = false;
+
+ MtlTextureInfo() = default;
+ MtlTextureInfo(MtlHandle mtlTexture);
+ MtlTextureInfo(uint32_t sampleCount,
+ skgpu::Mipmapped mipmapped,
+ MtlPixelFormat format,
+ MtlTextureUsage usage,
+ MtlStorageMode storageMode,
+ bool framebufferOnly)
+ : fSampleCount(sampleCount)
+ , fMipmapped(mipmapped)
+ , fFormat(format)
+ , fUsage(usage)
+ , fStorageMode(storageMode)
+ , fFramebufferOnly(framebufferOnly) {}
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_MtlGraphiteTypes_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/mtl/MtlGraphiteUtils.h b/gfx/skia/skia/include/gpu/graphite/mtl/MtlGraphiteUtils.h
new file mode 100644
index 0000000000..681f0867ae
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/mtl/MtlGraphiteUtils.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_MtlGraphiteUtils_DEFINED
+#define skgpu_graphite_MtlGraphiteUtils_DEFINED
+
+#include <memory>
+
+namespace skgpu::graphite {
+
+class Context;
+struct ContextOptions;
+struct MtlBackendContext;
+
+namespace ContextFactory {
+std::unique_ptr<Context> MakeMetal(const MtlBackendContext&, const ContextOptions&);
+} // namespace ContextFactory
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_MtlGraphiteUtils_DEFINED
diff --git a/gfx/skia/skia/include/gpu/graphite/vk/VulkanGraphiteTypes.h b/gfx/skia/skia/include/gpu/graphite/vk/VulkanGraphiteTypes.h
new file mode 100644
index 0000000000..bd448d2ca6
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/vk/VulkanGraphiteTypes.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2022 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_VulkanGraphiteTypes_DEFINED
+#define skgpu_graphite_VulkanGraphiteTypes_DEFINED
+
+#include "include/gpu/graphite/GraphiteTypes.h"
+#include "include/gpu/vk/VulkanTypes.h"
+
+namespace skgpu::graphite {
+
+struct VulkanTextureInfo {
+ uint32_t fSampleCount = 1;
+ Mipmapped fMipmapped = Mipmapped::kNo;
+
+ // VkImageCreateInfo properties
+ // Currently the only supported flag is VK_IMAGE_CREATE_PROTECTED_BIT. Any other flag will not
+ // be accepted
+ VkImageCreateFlags fFlags = 0;
+ VkFormat fFormat = VK_FORMAT_UNDEFINED;
+ VkImageTiling fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ VkImageUsageFlags fImageUsageFlags = 0;
+ VkSharingMode fSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+
+ // Properties related to the image view and sampling. These are less inherent properties of the
+ // VkImage but describe how the VkImage should be used within Skia.
+
+ // What aspect to use for the VkImageView. The normal, default is VK_IMAGE_ASPECT_COLOR_BIT.
+ // However, if the VkImage is a Ycbcr format, the client can pass a specific plan here to have
+ // Skia directly sample a plane. In that case the client should also pass in a VkFormat that is
+ // compatible with the plane as described by the Vulkan spec.
+ VkImageAspectFlags fAspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ // TODO: Either Make the ycbcr conversion info shareable with Ganesh or add a version for
+ // Graphite.
+ // GrVkYcbcrConversionInfo fYcbcrConversionInfo;
+
+ VulkanTextureInfo() = default;
+ VulkanTextureInfo(uint32_t sampleCount,
+ Mipmapped mipmapped,
+ VkImageCreateFlags flags,
+ VkFormat format,
+ VkImageTiling imageTiling,
+ VkImageUsageFlags imageUsageFlags,
+ VkSharingMode sharingMode,
+ VkImageAspectFlags aspectMask)
+ : fSampleCount(sampleCount)
+ , fMipmapped(mipmapped)
+ , fFlags(flags)
+ , fFormat(format)
+ , fImageTiling(imageTiling)
+ , fImageUsageFlags(imageUsageFlags)
+ , fSharingMode(sharingMode)
+ , fAspectMask(aspectMask) {}
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_VulkanGraphiteTypes_DEFINED
+
+
diff --git a/gfx/skia/skia/include/gpu/graphite/vk/VulkanGraphiteUtils.h b/gfx/skia/skia/include/gpu/graphite/vk/VulkanGraphiteUtils.h
new file mode 100644
index 0000000000..07c76a332d
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/graphite/vk/VulkanGraphiteUtils.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_VulkanGraphiteUtils_DEFINED
+#define skgpu_graphite_VulkanGraphiteUtils_DEFINED
+
+#include <memory>
+
+namespace skgpu { struct VulkanBackendContext; }
+
+namespace skgpu::graphite {
+
+class Context;
+struct ContextOptions;
+
+namespace ContextFactory {
+std::unique_ptr<Context> MakeVulkan(const VulkanBackendContext&, const ContextOptions&);
+} // namespace ContextFactory
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_VulkanGraphiteUtils_DEFINED
diff --git a/gfx/skia/skia/include/gpu/mock/GrMockTypes.h b/gfx/skia/skia/include/gpu/mock/GrMockTypes.h
new file mode 100644
index 0000000000..dfa648086c
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/mock/GrMockTypes.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMockOptions_DEFINED
+#define GrMockOptions_DEFINED
+
+#include "include/core/SkTextureCompressionType.h"
+#include "include/gpu/GpuTypes.h"
+#include "include/private/gpu/ganesh/GrTypesPriv.h"
+
+class GrBackendFormat;
+
+struct GrMockTextureInfo {
+ GrMockTextureInfo()
+ : fColorType(GrColorType::kUnknown)
+ , fCompressionType(SkTextureCompressionType::kNone)
+ , fID(0) {}
+
+ GrMockTextureInfo(GrColorType colorType,
+ SkTextureCompressionType compressionType,
+ int id)
+ : fColorType(colorType)
+ , fCompressionType(compressionType)
+ , fID(id) {
+ SkASSERT(fID);
+ if (fCompressionType != SkTextureCompressionType::kNone) {
+ SkASSERT(colorType == GrColorType::kUnknown);
+ }
+ }
+
+ bool operator==(const GrMockTextureInfo& that) const {
+ return fColorType == that.fColorType &&
+ fCompressionType == that.fCompressionType &&
+ fID == that.fID;
+ }
+
+ GrBackendFormat getBackendFormat() const;
+
+ SkTextureCompressionType compressionType() const { return fCompressionType; }
+
+ GrColorType colorType() const {
+ SkASSERT(fCompressionType == SkTextureCompressionType::kNone);
+ return fColorType;
+ }
+
+ int id() const { return fID; }
+
+private:
+ GrColorType fColorType;
+ SkTextureCompressionType fCompressionType;
+ int fID;
+};
+
+struct GrMockRenderTargetInfo {
+ GrMockRenderTargetInfo()
+ : fColorType(GrColorType::kUnknown)
+ , fID(0) {}
+
+ GrMockRenderTargetInfo(GrColorType colorType, int id)
+ : fColorType(colorType)
+ , fID(id) {
+ SkASSERT(fID);
+ }
+
+ bool operator==(const GrMockRenderTargetInfo& that) const {
+ return fColorType == that.fColorType &&
+ fID == that.fID;
+ }
+
+ GrBackendFormat getBackendFormat() const;
+
+ GrColorType colorType() const { return fColorType; }
+
+private:
+ GrColorType fColorType;
+ int fID;
+};
+
+struct GrMockSurfaceInfo {
+ uint32_t fSampleCount = 1;
+ uint32_t fLevelCount = 0;
+ skgpu::Protected fProtected = skgpu::Protected::kNo;
+
+ GrColorType fColorType = GrColorType::kUnknown;
+ SkTextureCompressionType fCompressionType = SkTextureCompressionType::kNone;
+};
+
+static constexpr int kSkTextureCompressionTypeCount = static_cast<int>(SkTextureCompressionType::kLast) + 1;
+
+/**
+ * A pointer to this type is used as the GrBackendContext when creating a Mock GrContext. It can be
+ * used to specify capability options for the mock context. If nullptr is used a default constructed
+ * GrMockOptions is used.
+ */
+struct GrMockOptions {
+ GrMockOptions() {
+ using Renderability = ConfigOptions::Renderability;
+ // By default RGBA_8888 and BGRA_8888 are textureable and renderable and
+ // A8 and RGB565 are texturable.
+ fConfigOptions[(int)GrColorType::kRGBA_8888].fRenderability = Renderability::kNonMSAA;
+ fConfigOptions[(int)GrColorType::kRGBA_8888].fTexturable = true;
+ fConfigOptions[(int)GrColorType::kAlpha_8].fTexturable = true;
+ fConfigOptions[(int)GrColorType::kBGR_565].fTexturable = true;
+
+ fConfigOptions[(int)GrColorType::kBGRA_8888] = fConfigOptions[(int)GrColorType::kRGBA_8888];
+
+ fCompressedOptions[(int)SkTextureCompressionType::kETC2_RGB8_UNORM].fTexturable = true;
+ fCompressedOptions[(int)SkTextureCompressionType::kBC1_RGB8_UNORM].fTexturable = true;
+ fCompressedOptions[(int)SkTextureCompressionType::kBC1_RGBA8_UNORM].fTexturable = true;
+ }
+
+ struct ConfigOptions {
+ enum Renderability { kNo, kNonMSAA, kMSAA };
+ Renderability fRenderability = kNo;
+ bool fTexturable = false;
+ };
+
+ // GrCaps options.
+ bool fMipmapSupport = false;
+ bool fDrawInstancedSupport = false;
+ bool fHalfFloatVertexAttributeSupport = false;
+ uint32_t fMapBufferFlags = 0;
+ int fMaxTextureSize = 2048;
+ int fMaxRenderTargetSize = 2048;
+ int fMaxWindowRectangles = 0;
+ int fMaxVertexAttributes = 16;
+ ConfigOptions fConfigOptions[kGrColorTypeCnt];
+ ConfigOptions fCompressedOptions[kSkTextureCompressionTypeCount];
+
+ // GrShaderCaps options.
+ bool fIntegerSupport = false;
+ bool fFlatInterpolationSupport = false;
+ int fMaxVertexSamplers = 0;
+ int fMaxFragmentSamplers = 8;
+ bool fShaderDerivativeSupport = true;
+ bool fDualSourceBlendingSupport = false;
+
+ // GrMockGpu options.
+ bool fFailTextureAllocations = false;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/mtl/GrMtlBackendContext.h b/gfx/skia/skia/include/gpu/mtl/GrMtlBackendContext.h
new file mode 100644
index 0000000000..0d88f479ac
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/mtl/GrMtlBackendContext.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlBackendContext_DEFINED
+#define GrMtlBackendContext_DEFINED
+
+#include "include/gpu/mtl/GrMtlTypes.h"
+
+// The BackendContext contains all of the base Metal objects needed by the GrMtlGpu. The assumption
+// is that the client will set these up and pass them to the GrMtlGpu constructor.
+struct SK_API GrMtlBackendContext {
+ sk_cfp<GrMTLHandle> fDevice;
+ sk_cfp<GrMTLHandle> fQueue;
+ sk_cfp<GrMTLHandle> fBinaryArchive;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/mtl/GrMtlTypes.h b/gfx/skia/skia/include/gpu/mtl/GrMtlTypes.h
new file mode 100644
index 0000000000..7c0d620e06
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/mtl/GrMtlTypes.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlTypes_DEFINED
+#define GrMtlTypes_DEFINED
+
+#include "include/gpu/GpuTypes.h"
+#include "include/ports/SkCFObject.h"
+
+/**
+ * Declares typedefs for Metal types used in Ganesh cpp code
+ */
+using GrMTLPixelFormat = unsigned int;
+using GrMTLTextureUsage = unsigned int;
+using GrMTLStorageMode = unsigned int;
+using GrMTLHandle = const void*;
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef __APPLE__
+
+#include <TargetConditionals.h>
+
+#if TARGET_OS_SIMULATOR
+#define SK_API_AVAILABLE_CA_METAL_LAYER SK_API_AVAILABLE(macos(10.11), ios(13.0))
+#else // TARGET_OS_SIMULATOR
+#define SK_API_AVAILABLE_CA_METAL_LAYER SK_API_AVAILABLE(macos(10.11), ios(8.0))
+#endif // TARGET_OS_SIMULATOR
+
+/**
+ * Types for interacting with Metal resources created externally to Skia.
+ * This is used by GrBackendObjects.
+ */
+struct GrMtlTextureInfo {
+public:
+ GrMtlTextureInfo() {}
+
+ sk_cfp<GrMTLHandle> fTexture;
+
+ bool operator==(const GrMtlTextureInfo& that) const {
+ return fTexture == that.fTexture;
+ }
+};
+
+struct GrMtlSurfaceInfo {
+ uint32_t fSampleCount = 1;
+ uint32_t fLevelCount = 0;
+ skgpu::Protected fProtected = skgpu::Protected::kNo;
+
+ // Since we aren't in an Obj-C header we can't directly use Mtl types here. Each of these can
+ // cast to their mapped Mtl types list below.
+ GrMTLPixelFormat fFormat = 0; // MTLPixelFormat fFormat = MTLPixelFormatInvalid;
+ GrMTLTextureUsage fUsage = 0; // MTLTextureUsage fUsage = MTLTextureUsageUnknown;
+ GrMTLStorageMode fStorageMode = 0; // MTLStorageMode fStorageMode = MTLStorageModeShared;
+};
+
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/mtl/MtlMemoryAllocator.h b/gfx/skia/skia/include/gpu/mtl/MtlMemoryAllocator.h
new file mode 100644
index 0000000000..425c461791
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/mtl/MtlMemoryAllocator.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2022 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_MtlMemoryAllocator_DEFINED
+#define skgpu_MtlMemoryAllocator_DEFINED
+
+#ifdef __APPLE__
+
+#ifdef __OBJC__
+#import <Metal/Metal.h>
+#endif
+
+namespace skgpu {
+
+// interface classes for the GPU memory allocator
+class MtlAlloc : public SkRefCnt {
+public:
+ ~MtlAlloc() override = default;
+};
+
+#ifdef __OBJC__
+class MtlMemoryAllocator : public SkRefCnt {
+public:
+ virtual id<MTLBuffer> newBufferWithLength(NSUInteger length, MTLResourceOptions options,
+ sk_sp<MtlAlloc>* allocation) = 0;
+ virtual id<MTLTexture> newTextureWithDescriptor(MTLTextureDescriptor* texDesc,
+ sk_sp<MtlAlloc>* allocation) = 0;
+};
+#endif
+
+} // namespace skgpu
+
+#endif // __APPLE__
+
+#endif // skgpu_MtlMemoryAllocator_DEFINED
diff --git a/gfx/skia/skia/include/gpu/vk/GrVkBackendContext.h b/gfx/skia/skia/include/gpu/vk/GrVkBackendContext.h
new file mode 100644
index 0000000000..23c1b0deaf
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/GrVkBackendContext.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkBackendContext_DEFINED
+#define GrVkBackendContext_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/vk/GrVkTypes.h"
+#include "include/gpu/vk/VulkanMemoryAllocator.h"
+
+namespace skgpu { class VulkanExtensions; }
+
+enum GrVkExtensionFlags {
+ kEXT_debug_report_GrVkExtensionFlag = 0x0001,
+ kNV_glsl_shader_GrVkExtensionFlag = 0x0002,
+ kKHR_surface_GrVkExtensionFlag = 0x0004,
+ kKHR_swapchain_GrVkExtensionFlag = 0x0008,
+ kKHR_win32_surface_GrVkExtensionFlag = 0x0010,
+ kKHR_android_surface_GrVkExtensionFlag = 0x0020,
+ kKHR_xcb_surface_GrVkExtensionFlag = 0x0040,
+};
+
+enum GrVkFeatureFlags {
+ kGeometryShader_GrVkFeatureFlag = 0x0001,
+ kDualSrcBlend_GrVkFeatureFlag = 0x0002,
+ kSampleRateShading_GrVkFeatureFlag = 0x0004,
+};
+
+// It is not guarenteed VkPhysicalDeviceProperties2 will be in the client's header so we forward
+// declare it here to be safe.
+struct VkPhysicalDeviceFeatures2;
+
+// The BackendContext contains all of the base Vulkan objects needed by the GrVkGpu. The assumption
+// is that the client will set these up and pass them to the GrVkGpu constructor. The VkDevice
+// created must support at least one graphics queue, which is passed in as well.
+// The QueueFamilyIndex must match the family of the given queue. It is needed for CommandPool
+// creation, and any GrBackendObjects handed to us (e.g., for wrapped textures) needs to be created
+// in or transitioned to that family. The refs held by members of this struct must be released
+// (either by deleting the struct or manually releasing the refs) before the underlying vulkan
+// device and instance are destroyed.
+struct SK_API GrVkBackendContext {
+ VkInstance fInstance = VK_NULL_HANDLE;
+ VkPhysicalDevice fPhysicalDevice = VK_NULL_HANDLE;
+ VkDevice fDevice = VK_NULL_HANDLE;
+ VkQueue fQueue = VK_NULL_HANDLE;
+ uint32_t fGraphicsQueueIndex = 0;
+ uint32_t fMinAPIVersion = 0; // Deprecated. Use fInstanceVersion
+ // instead.
+ uint32_t fInstanceVersion = 0; // Deprecated. Use fMaxApiVersion
+ // The max api version set here should match the value set in VkApplicationInfo::apiVersion when
+ // then VkInstance was created.
+ uint32_t fMaxAPIVersion = 0;
+ uint32_t fExtensions = 0; // Deprecated. Use fVkExtensions instead.
+ const skgpu::VulkanExtensions* fVkExtensions = nullptr;
+ uint32_t fFeatures = 0; // Deprecated. Use fDeviceFeatures[2]
+ // instead.
+ // The client can create their VkDevice with either a VkPhysicalDeviceFeatures or
+ // VkPhysicalDeviceFeatures2 struct, thus we have to support taking both. The
+ // VkPhysicalDeviceFeatures2 struct is needed so we know if the client enabled any extension
+ // specific features. If fDeviceFeatures2 is not null then we ignore fDeviceFeatures. If both
+ // fDeviceFeatures and fDeviceFeatures2 are null we will assume no features are enabled.
+ const VkPhysicalDeviceFeatures* fDeviceFeatures = nullptr;
+ const VkPhysicalDeviceFeatures2* fDeviceFeatures2 = nullptr;
+ sk_sp<skgpu::VulkanMemoryAllocator> fMemoryAllocator;
+ skgpu::VulkanGetProc fGetProc = nullptr;
+ // This is deprecated and should be set to false. The client is responsible for managing the
+ // lifetime of the VkInstance and VkDevice objects.
+ bool fOwnsInstanceAndDevice = false;
+ // Indicates that we are working with protected content and all CommandPool and Queue operations
+ // should be done in a protected context.
+ skgpu::Protected fProtectedContext = skgpu::Protected::kNo;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/vk/GrVkExtensions.h b/gfx/skia/skia/include/gpu/vk/GrVkExtensions.h
new file mode 100644
index 0000000000..b32cc16eb5
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/GrVkExtensions.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkExtensions_DEFINED
+#define GrVkExtensions_DEFINED
+
+#include "include/gpu/vk/VulkanExtensions.h"
+
+using GrVkExtensions = skgpu::VulkanExtensions;
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/vk/GrVkMemoryAllocator.h b/gfx/skia/skia/include/gpu/vk/GrVkMemoryAllocator.h
new file mode 100644
index 0000000000..034e1f506c
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/GrVkMemoryAllocator.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkMemoryAllocator_DEFINED
+#define GrVkMemoryAllocator_DEFINED
+
+#include "include/gpu/vk/VulkanMemoryAllocator.h"
+
+using GrVkMemoryAllocator = skgpu::VulkanMemoryAllocator;
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/vk/GrVkTypes.h b/gfx/skia/skia/include/gpu/vk/GrVkTypes.h
new file mode 100644
index 0000000000..ae680a8af5
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/GrVkTypes.h
@@ -0,0 +1,149 @@
+
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkTypes_DEFINED
+#define GrVkTypes_DEFINED
+
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/vk/VulkanTypes.h"
+
+using GrVkBackendMemory = skgpu::VulkanBackendMemory;
+using GrVkAlloc = skgpu::VulkanAlloc;
+
+// This struct is used to pass in the necessary information to create a VkSamplerYcbcrConversion
+// object for an VkExternalFormatANDROID.
+struct GrVkYcbcrConversionInfo {
+ bool operator==(const GrVkYcbcrConversionInfo& that) const {
+ // Invalid objects are not required to have all other fields initialized or matching.
+ if (!this->isValid() && !that.isValid()) {
+ return true;
+ }
+ return this->fFormat == that.fFormat &&
+ this->fExternalFormat == that.fExternalFormat &&
+ this->fYcbcrModel == that.fYcbcrModel &&
+ this->fYcbcrRange == that.fYcbcrRange &&
+ this->fXChromaOffset == that.fXChromaOffset &&
+ this->fYChromaOffset == that.fYChromaOffset &&
+ this->fChromaFilter == that.fChromaFilter &&
+ this->fForceExplicitReconstruction == that.fForceExplicitReconstruction;
+ }
+ bool operator!=(const GrVkYcbcrConversionInfo& that) const { return !(*this == that); }
+
+ bool isValid() const { return fYcbcrModel != VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY; }
+
+ // Format of the source image. Must be set to VK_FORMAT_UNDEFINED for external images or
+ // a valid image format otherwise.
+ VkFormat fFormat = VK_FORMAT_UNDEFINED;
+
+ // The external format. Must be non-zero for external images, zero otherwise.
+ // Should be compatible to be used in a VkExternalFormatANDROID struct.
+ uint64_t fExternalFormat = 0;
+
+ VkSamplerYcbcrModelConversion fYcbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY;
+ VkSamplerYcbcrRange fYcbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
+ VkChromaLocation fXChromaOffset = VK_CHROMA_LOCATION_COSITED_EVEN;
+ VkChromaLocation fYChromaOffset = VK_CHROMA_LOCATION_COSITED_EVEN;
+ VkFilter fChromaFilter = VK_FILTER_NEAREST;
+ VkBool32 fForceExplicitReconstruction = false;
+
+ // For external images format features here should be those returned by a call to
+ // vkAndroidHardwareBufferFormatPropertiesANDROID
+ VkFormatFeatureFlags fFormatFeatures = 0;
+};
+
+/*
+ * When wrapping a GrBackendTexture or GrBackendRendenderTarget, the fCurrentQueueFamily should
+ * either be VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_EXTERNAL, or VK_QUEUE_FAMILY_FOREIGN_EXT. If
+ * fSharingMode is VK_SHARING_MODE_EXCLUSIVE then fCurrentQueueFamily can also be the graphics
+ * queue index passed into Skia.
+ */
+struct GrVkImageInfo {
+ VkImage fImage = VK_NULL_HANDLE;
+ skgpu::VulkanAlloc fAlloc;
+ VkImageTiling fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ VkImageLayout fImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ VkFormat fFormat = VK_FORMAT_UNDEFINED;
+ VkImageUsageFlags fImageUsageFlags = 0;
+ uint32_t fSampleCount = 1;
+ uint32_t fLevelCount = 0;
+ uint32_t fCurrentQueueFamily = VK_QUEUE_FAMILY_IGNORED;
+ skgpu::Protected fProtected = skgpu::Protected::kNo;
+ GrVkYcbcrConversionInfo fYcbcrConversionInfo;
+ VkSharingMode fSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ bool fPartOfSwapchainOrAndroidWindow = false;
+#endif
+
+#if GR_TEST_UTILS
+ bool operator==(const GrVkImageInfo& that) const {
+ bool equal = fImage == that.fImage && fAlloc == that.fAlloc &&
+ fImageTiling == that.fImageTiling &&
+ fImageLayout == that.fImageLayout &&
+ fFormat == that.fFormat &&
+ fImageUsageFlags == that.fImageUsageFlags &&
+ fSampleCount == that.fSampleCount &&
+ fLevelCount == that.fLevelCount &&
+ fCurrentQueueFamily == that.fCurrentQueueFamily &&
+ fProtected == that.fProtected &&
+ fYcbcrConversionInfo == that.fYcbcrConversionInfo &&
+ fSharingMode == that.fSharingMode;
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ equal = equal && (fPartOfSwapchainOrAndroidWindow == that.fPartOfSwapchainOrAndroidWindow);
+#endif
+ return equal;
+ }
+#endif
+};
+
+using GrVkGetProc = skgpu::VulkanGetProc;
+
+/**
+ * This object is wrapped in a GrBackendDrawableInfo and passed in as an argument to
+ * drawBackendGpu() calls on an SkDrawable. The drawable will use this info to inject direct
+ * Vulkan calls into our stream of GPU draws.
+ *
+ * The SkDrawable is given a secondary VkCommandBuffer in which to record draws. The GPU backend
+ * will then execute that command buffer within a render pass it is using for its own draws. The
+ * drawable is also given the attachment of the color index, a compatible VkRenderPass, and the
+ * VkFormat of the color attachment so that it can make VkPipeline objects for the draws. The
+ * SkDrawable must not alter the state of the VkRenderpass or sub pass.
+ *
+ * Additionally, the SkDrawable may fill in the passed in fDrawBounds with the bounds of the draws
+ * that it submits to the command buffer. This will be used by the GPU backend for setting the
+ * bounds in vkCmdBeginRenderPass. If fDrawBounds is not updated, we will assume that the entire
+ * attachment may have been written to.
+ *
+ * The SkDrawable is always allowed to create its own command buffers and submit them to the queue
+ * to render offscreen textures which will be sampled in draws added to the passed in
+ * VkCommandBuffer. If this is done the SkDrawable is in charge of adding the required memory
+ * barriers to the queue for the sampled images since the Skia backend will not do this.
+ */
+struct GrVkDrawableInfo {
+ VkCommandBuffer fSecondaryCommandBuffer;
+ uint32_t fColorAttachmentIndex;
+ VkRenderPass fCompatibleRenderPass;
+ VkFormat fFormat;
+ VkRect2D* fDrawBounds;
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ bool fFromSwapchainOrAndroidWindow;
+#endif
+};
+
+struct GrVkSurfaceInfo {
+ uint32_t fSampleCount = 1;
+ uint32_t fLevelCount = 0;
+ skgpu::Protected fProtected = skgpu::Protected::kNo;
+
+ VkImageTiling fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ VkFormat fFormat = VK_FORMAT_UNDEFINED;
+ VkImageUsageFlags fImageUsageFlags = 0;
+ GrVkYcbcrConversionInfo fYcbcrConversionInfo;
+ VkSharingMode fSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/gpu/vk/VulkanBackendContext.h b/gfx/skia/skia/include/gpu/vk/VulkanBackendContext.h
new file mode 100644
index 0000000000..c78e2de0c9
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/VulkanBackendContext.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_VulkanBackendContext_DEFINED
+#define skgpu_VulkanBackendContext_DEFINED
+
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/vk/VulkanMemoryAllocator.h"
+#include "include/gpu/vk/VulkanTypes.h"
+
+namespace skgpu {
+
+class VulkanExtensions;
+
+// The VkBackendContext contains all of the base Vk objects needed by the skia Vulkan context.
+struct SK_API VulkanBackendContext {
+ VkInstance fInstance;
+ VkPhysicalDevice fPhysicalDevice;
+ VkDevice fDevice;
+ VkQueue fQueue;
+ uint32_t fGraphicsQueueIndex;
+ // The max api version set here should match the value set in VkApplicationInfo::apiVersion when
+ // then VkInstance was created.
+ uint32_t fMaxAPIVersion;
+ const skgpu::VulkanExtensions* fVkExtensions = nullptr;
+ // The client can create their VkDevice with either a VkPhysicalDeviceFeatures or
+ // VkPhysicalDeviceFeatures2 struct, thus we have to support taking both. The
+ // VkPhysicalDeviceFeatures2 struct is needed so we know if the client enabled any extension
+ // specific features. If fDeviceFeatures2 is not null then we ignore fDeviceFeatures. If both
+ // fDeviceFeatures and fDeviceFeatures2 are null we will assume no features are enabled.
+ const VkPhysicalDeviceFeatures* fDeviceFeatures = nullptr;
+ const VkPhysicalDeviceFeatures2* fDeviceFeatures2 = nullptr;
+ // Optional. The client may provide an inplementation of a VulkanMemoryAllocator for Skia to use
+ // for allocating Vulkan resources that use VkDeviceMemory.
+ sk_sp<VulkanMemoryAllocator> fMemoryAllocator;
+ skgpu::VulkanGetProc fGetProc;
+ Protected fProtectedContext;
+};
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_VulkanBackendContext_DEFINED
diff --git a/gfx/skia/skia/include/gpu/vk/VulkanExtensions.h b/gfx/skia/skia/include/gpu/vk/VulkanExtensions.h
new file mode 100644
index 0000000000..aea442e491
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/VulkanExtensions.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2022 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_VulkanExtensions_DEFINED
+#define skgpu_VulkanExtensions_DEFINED
+
+#include "include/core/SkString.h"
+#include "include/gpu/vk/VulkanTypes.h"
+#include "include/private/base/SkTArray.h"
+
+namespace skgpu {
+
+/**
+ * Helper class that eats in an array of extensions strings for instance and device and allows for
+ * quicker querying if an extension is present.
+ */
+class SK_API VulkanExtensions {
+public:
+ VulkanExtensions() {}
+
+ void init(VulkanGetProc, VkInstance, VkPhysicalDevice,
+ uint32_t instanceExtensionCount, const char* const* instanceExtensions,
+ uint32_t deviceExtensionCount, const char* const* deviceExtensions);
+
+ bool hasExtension(const char[], uint32_t minVersion) const;
+
+ struct Info {
+ Info() {}
+ Info(const char* name) : fName(name), fSpecVersion(0) {}
+
+ SkString fName;
+ uint32_t fSpecVersion;
+
+ struct Less {
+ bool operator()(const Info& a, const SkString& b) const {
+ return strcmp(a.fName.c_str(), b.c_str()) < 0;
+ }
+ bool operator()(const SkString& a, const VulkanExtensions::Info& b) const {
+ return strcmp(a.c_str(), b.fName.c_str()) < 0;
+ }
+ };
+ };
+
+#ifdef SK_DEBUG
+ void dump() const {
+ SkDebugf("**Vulkan Extensions**\n");
+ for (int i = 0; i < fExtensions.size(); ++i) {
+ SkDebugf("%s. Version: %d\n",
+ fExtensions[i].fName.c_str(), fExtensions[i].fSpecVersion);
+ }
+ SkDebugf("**End Vulkan Extensions**\n");
+ }
+#endif
+
+private:
+ void getSpecVersions(VulkanGetProc getProc, VkInstance, VkPhysicalDevice);
+
+ SkTArray<Info> fExtensions;
+};
+
+} // namespace skgpu
+
+#endif // skgpu_VulkanExtensions_DEFINED
diff --git a/gfx/skia/skia/include/gpu/vk/VulkanMemoryAllocator.h b/gfx/skia/skia/include/gpu/vk/VulkanMemoryAllocator.h
new file mode 100644
index 0000000000..ebaa28ed1b
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/VulkanMemoryAllocator.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2022 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_VulkanMemoryAllocator_DEFINED
+#define skgpu_VulkanMemoryAllocator_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/vk/VulkanTypes.h"
+
+namespace skgpu {
+
+class VulkanMemoryAllocator : public SkRefCnt {
+public:
+ enum AllocationPropertyFlags {
+ kNone_AllocationPropertyFlag = 0b0000,
+ // Allocation will be placed in its own VkDeviceMemory and not suballocated from some larger
+ // block.
+ kDedicatedAllocation_AllocationPropertyFlag = 0b0001,
+ // Says that the backing memory can only be accessed by the device. Additionally the device
+ // may lazily allocate the memory. This cannot be used with buffers that will be host
+ // visible. Setting this flag does not guarantee that we will allocate memory that respects
+ // it, but we will try to prefer memory that can respect it.
+ kLazyAllocation_AllocationPropertyFlag = 0b0010,
+ // The allocation will be mapped immediately and stay mapped until it is destroyed. This
+ // flag is only valid for buffers which are host visible (i.e. must have a usage other than
+ // BufferUsage::kGpuOnly).
+ kPersistentlyMapped_AllocationPropertyFlag = 0b0100,
+ // Allocation can only be accessed by the device using a protected context.
+ kProtected_AllocationPropertyFlag = 0b1000,
+ };
+
+ enum class BufferUsage {
+ // Buffers that will only be accessed from the device (large const buffers) will always be
+ // in device local memory.
+ kGpuOnly,
+ // Buffers that typically will be updated multiple times by the host and read on the gpu
+ // (e.g. uniform or vertex buffers). CPU writes will generally be sequential in the buffer
+ // and will try to take advantage of the write-combined nature of the gpu buffers. Thus this
+ // will always be mappable and coherent memory, and it will prefer to be in device local
+ // memory.
+ kCpuWritesGpuReads,
+ // Buffers that will be accessed on the host and copied to another GPU resource (transfer
+ // buffers). Will always be mappable and coherent memory.
+ kTransfersFromCpuToGpu,
+ // Buffers which are typically writted to by the GPU and then read on the host. Will always
+ // be mappable memory, and will prefer cached memory.
+ kTransfersFromGpuToCpu,
+ };
+
+ virtual VkResult allocateImageMemory(VkImage image,
+ uint32_t allocationPropertyFlags,
+ skgpu::VulkanBackendMemory* memory) = 0;
+
+ virtual VkResult allocateBufferMemory(VkBuffer buffer,
+ BufferUsage usage,
+ uint32_t allocationPropertyFlags,
+ skgpu::VulkanBackendMemory* memory) = 0;
+
+ // Fills out the passed in skgpu::VulkanAlloc struct for the passed in
+ // skgpu::VulkanBackendMemory.
+ virtual void getAllocInfo(const skgpu::VulkanBackendMemory&, skgpu::VulkanAlloc*) const = 0;
+
+ // Maps the entire allocation and returns a pointer to the start of the allocation. The
+ // implementation may map more memory than just the allocation, but the returned pointer must
+ // point at the start of the memory for the requested allocation.
+ virtual void* mapMemory(const skgpu::VulkanBackendMemory&) { return nullptr; }
+ virtual VkResult mapMemory(const skgpu::VulkanBackendMemory& memory, void** data) {
+ *data = this->mapMemory(memory);
+ // VK_ERROR_INITIALIZATION_FAILED is a bogus result to return from this function, but it is
+ // just something to return that is not VK_SUCCESS and can't be interpreted by a caller to
+ // mean something specific happened like device lost or oom. This will be removed once we
+ // update clients to implement this virtual.
+ return *data ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED;
+ }
+ virtual void unmapMemory(const skgpu::VulkanBackendMemory&) = 0;
+
+ // The following two calls are used for managing non-coherent memory. The offset is relative to
+ // the start of the allocation and not the underlying VkDeviceMemory. Additionaly the client
+ // must make sure that the offset + size passed in is less that or equal to the allocation size.
+ // It is the responsibility of the implementation to make sure all alignment requirements are
+ // followed. The client should not have to deal with any sort of alignment issues.
+ virtual void flushMappedMemory(const skgpu::VulkanBackendMemory&, VkDeviceSize, VkDeviceSize) {}
+ virtual VkResult flushMemory(const skgpu::VulkanBackendMemory& memory,
+ VkDeviceSize offset,
+ VkDeviceSize size) {
+ this->flushMappedMemory(memory, offset, size);
+ return VK_SUCCESS;
+ }
+ virtual void invalidateMappedMemory(const skgpu::VulkanBackendMemory&,
+ VkDeviceSize,
+ VkDeviceSize) {}
+ virtual VkResult invalidateMemory(const skgpu::VulkanBackendMemory& memory,
+ VkDeviceSize offset,
+ VkDeviceSize size) {
+ this->invalidateMappedMemory(memory, offset, size);
+ return VK_SUCCESS;
+ }
+
+ virtual void freeMemory(const skgpu::VulkanBackendMemory&) = 0;
+
+ // Returns the total amount of memory that is allocated as well as total
+ // amount of memory in use by an allocation from this allocator.
+ // Return 1st param is total allocated memory, 2nd is total used memory.
+ virtual std::pair<uint64_t, uint64_t> totalAllocatedAndUsedMemory() const = 0;
+};
+
+} // namespace skgpu
+
+#endif // skgpu_VulkanMemoryAllocator_DEFINED
diff --git a/gfx/skia/skia/include/gpu/vk/VulkanTypes.h b/gfx/skia/skia/include/gpu/vk/VulkanTypes.h
new file mode 100644
index 0000000000..5468c59211
--- /dev/null
+++ b/gfx/skia/skia/include/gpu/vk/VulkanTypes.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2022 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_VulkanTypes_DEFINED
+#define skgpu_VulkanTypes_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/gpu/vk/SkiaVulkan.h"
+
+#include <functional>
+
+#ifndef VK_VERSION_1_1
+#error Skia requires the use of Vulkan 1.1 headers
+#endif
+
+namespace skgpu {
+
+using VulkanGetProc = std::function<PFN_vkVoidFunction(
+ const char*, // function name
+ VkInstance, // instance or VK_NULL_HANDLE
+ VkDevice // device or VK_NULL_HANDLE
+ )>;
+
+typedef intptr_t VulkanBackendMemory;
+
+/**
+ * Types for interacting with Vulkan resources created externally to Skia.
+ */
+struct VulkanAlloc {
+ // can be VK_NULL_HANDLE iff is an RT and is borrowed
+ VkDeviceMemory fMemory = VK_NULL_HANDLE;
+ VkDeviceSize fOffset = 0;
+ VkDeviceSize fSize = 0; // this can be indeterminate iff Tex uses borrow semantics
+ uint32_t fFlags = 0;
+ // handle to memory allocated via skgpu::VulkanMemoryAllocator.
+ VulkanBackendMemory fBackendMemory = 0;
+
+ enum Flag {
+ kNoncoherent_Flag = 0x1, // memory must be flushed to device after mapping
+ kMappable_Flag = 0x2, // memory is able to be mapped.
+ kLazilyAllocated_Flag = 0x4, // memory was created with lazy allocation
+ };
+
+ bool operator==(const VulkanAlloc& that) const {
+ return fMemory == that.fMemory && fOffset == that.fOffset && fSize == that.fSize &&
+ fFlags == that.fFlags && fUsesSystemHeap == that.fUsesSystemHeap;
+ }
+
+private:
+ bool fUsesSystemHeap = false;
+};
+
+} // namespace skgpu
+
+#endif // skgpu_VulkanTypes_DEFINED
diff --git a/gfx/skia/skia/include/pathops/SkPathOps.h b/gfx/skia/skia/include/pathops/SkPathOps.h
new file mode 100644
index 0000000000..2d1a911be1
--- /dev/null
+++ b/gfx/skia/skia/include/pathops/SkPathOps.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOps_DEFINED
+#define SkPathOps_DEFINED
+
+#include "include/core/SkPath.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTDArray.h"
+
+struct SkRect;
+
+
+// FIXME: move everything below into the SkPath class
+/**
+ * The logical operations that can be performed when combining two paths.
+ */
+enum SkPathOp {
+ kDifference_SkPathOp, //!< subtract the op path from the first path
+ kIntersect_SkPathOp, //!< intersect the two paths
+ kUnion_SkPathOp, //!< union (inclusive-or) the two paths
+ kXOR_SkPathOp, //!< exclusive-or the two paths
+ kReverseDifference_SkPathOp, //!< subtract the first path from the op path
+};
+
+/** Set this path to the result of applying the Op to this path and the
+ specified path: this = (this op operand).
+ The resulting path will be constructed from non-overlapping contours.
+ The curve order is reduced where possible so that cubics may be turned
+ into quadratics, and quadratics maybe turned into lines.
+
+ Returns true if operation was able to produce a result;
+ otherwise, result is unmodified.
+
+ @param one The first operand (for difference, the minuend)
+ @param two The second operand (for difference, the subtrahend)
+ @param op The operator to apply.
+ @param result The product of the operands. The result may be one of the
+ inputs.
+ @return True if the operation succeeded.
+ */
+bool SK_API Op(const SkPath& one, const SkPath& two, SkPathOp op, SkPath* result);
+
+/** Set this path to a set of non-overlapping contours that describe the
+ same area as the original path.
+ The curve order is reduced where possible so that cubics may
+ be turned into quadratics, and quadratics maybe turned into lines.
+
+ Returns true if operation was able to produce a result;
+ otherwise, result is unmodified.
+
+ @param path The path to simplify.
+ @param result The simplified path. The result may be the input.
+ @return True if simplification succeeded.
+ */
+bool SK_API Simplify(const SkPath& path, SkPath* result);
+
+/** Set the resulting rectangle to the tight bounds of the path.
+
+ @param path The path measured.
+ @param result The tight bounds of the path.
+ @return True if the bounds could be computed.
+ */
+bool SK_API TightBounds(const SkPath& path, SkRect* result);
+
+/** Set the result with fill type winding to area equivalent to path.
+ Returns true if successful. Does not detect if path contains contours which
+ contain self-crossings or cross other contours; in these cases, may return
+ true even though result does not fill same area as path.
+
+ Returns true if operation was able to produce a result;
+ otherwise, result is unmodified. The result may be the input.
+
+ @param path The path typically with fill type set to even odd.
+ @param result The equivalent path with fill type set to winding.
+ @return True if winding path was set.
+ */
+bool SK_API AsWinding(const SkPath& path, SkPath* result);
+
+/** Perform a series of path operations, optimized for unioning many paths together.
+ */
+class SK_API SkOpBuilder {
+public:
+ /** Add one or more paths and their operand. The builder is empty before the first
+ path is added, so the result of a single add is (emptyPath OP path).
+
+ @param path The second operand.
+ @param _operator The operator to apply to the existing and supplied paths.
+ */
+ void add(const SkPath& path, SkPathOp _operator);
+
+ /** Computes the sum of all paths and operands, and resets the builder to its
+ initial state.
+
+ @param result The product of the operands.
+ @return True if the operation succeeded.
+ */
+ bool resolve(SkPath* result);
+
+private:
+ SkTArray<SkPath> fPathRefs;
+ SkTDArray<SkPathOp> fOps;
+
+ static bool FixWinding(SkPath* path);
+ static void ReversePath(SkPath* path);
+ void reset();
+};
+
+#endif
diff --git a/gfx/skia/skia/include/ports/SkCFObject.h b/gfx/skia/skia/include/ports/SkCFObject.h
new file mode 100644
index 0000000000..20e86671b7
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkCFObject.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCFObject_DEFINED
+#define SkCFObject_DEFINED
+
+#ifdef __APPLE__
+
+#include "include/core/SkTypes.h"
+
+#include <cstddef> // std::nullptr_t
+
+#import <CoreFoundation/CoreFoundation.h>
+
+/**
+ * Wrapper class for managing lifetime of CoreFoundation objects. It will call
+ * CFRetain and CFRelease appropriately on creation, assignment, and deletion.
+ * Based on sk_sp<>.
+ */
+template <typename T> static inline T SkCFSafeRetain(T obj) {
+ if (obj) {
+ CFRetain(obj);
+ }
+ return obj;
+}
+
+template <typename T> static inline void SkCFSafeRelease(T obj) {
+ if (obj) {
+ CFRelease(obj);
+ }
+}
+
+template <typename T> class sk_cfp {
+public:
+ using element_type = T;
+
+ constexpr sk_cfp() {}
+ constexpr sk_cfp(std::nullptr_t) {}
+
+ /**
+ * Shares the underlying object by calling CFRetain(), so that both the argument and the newly
+ * created sk_cfp both have a reference to it.
+ */
+ sk_cfp(const sk_cfp<T>& that) : fObject(SkCFSafeRetain(that.get())) {}
+
+ /**
+ * Move the underlying object from the argument to the newly created sk_cfp. Afterwards only
+ * the new sk_cfp will have a reference to the object, and the argument will point to null.
+ * No call to CFRetain() or CFRelease() will be made.
+ */
+ sk_cfp(sk_cfp<T>&& that) : fObject(that.release()) {}
+
+ /**
+ * Adopt the bare object into the newly created sk_cfp.
+ * No call to CFRetain() or CFRelease() will be made.
+ */
+ explicit sk_cfp(T obj) {
+ fObject = obj;
+ }
+
+ /**
+ * Calls CFRelease() on the underlying object pointer.
+ */
+ ~sk_cfp() {
+ SkCFSafeRelease(fObject);
+ SkDEBUGCODE(fObject = nil);
+ }
+
+ sk_cfp<T>& operator=(std::nullptr_t) { this->reset(); return *this; }
+
+ /**
+ * Shares the underlying object referenced by the argument by calling CFRetain() on it. If this
+ * sk_cfp previously had a reference to an object (i.e. not null) it will call CFRelease()
+ * on that object.
+ */
+ sk_cfp<T>& operator=(const sk_cfp<T>& that) {
+ if (this != &that) {
+ this->reset(SkCFSafeRetain(that.get()));
+ }
+ return *this;
+ }
+
+ /**
+ * Move the underlying object from the argument to the sk_cfp. If the sk_cfp
+ * previously held a reference to another object, CFRelease() will be called on that object.
+ * No call to CFRetain() will be made.
+ */
+ sk_cfp<T>& operator=(sk_cfp<T>&& that) {
+ this->reset(that.release());
+ return *this;
+ }
+
+ explicit operator bool() const { return this->get() != nil; }
+
+ T get() const { return fObject; }
+ T operator*() const {
+ SkASSERT(fObject);
+ return fObject;
+ }
+
+ /**
+ * Adopt the new object, and call CFRelease() on any previously held object (if not null).
+ * No call to CFRetain() will be made.
+ */
+ void reset(T object = nil) {
+ // Need to unref after assigning, see
+ // http://wg21.cmeerw.net/lwg/issue998
+ // http://wg21.cmeerw.net/lwg/issue2262
+ T oldObject = fObject;
+ fObject = object;
+ SkCFSafeRelease(oldObject);
+ }
+
+ /**
+ * Shares the new object by calling CFRetain() on it. If this sk_cfp previously had a
+ * reference to an object (i.e. not null) it will call CFRelease() on that object.
+ */
+ void retain(T object) {
+ if (fObject != object) {
+ this->reset(SkCFSafeRetain(object));
+ }
+ }
+
+ /**
+ * Return the original object, and set the internal object to nullptr.
+ * The caller must assume ownership of the object, and manage its reference count directly.
+ * No call to CFRelease() will be made.
+ */
+ T SK_WARN_UNUSED_RESULT release() {
+ T obj = fObject;
+ fObject = nil;
+ return obj;
+ }
+
+private:
+ T fObject = nil;
+};
+
+template <typename T> inline bool operator==(const sk_cfp<T>& a,
+ const sk_cfp<T>& b) {
+ return a.get() == b.get();
+}
+template <typename T> inline bool operator==(const sk_cfp<T>& a,
+ std::nullptr_t) {
+ return !a;
+}
+template <typename T> inline bool operator==(std::nullptr_t,
+ const sk_cfp<T>& b) {
+ return !b;
+}
+
+template <typename T> inline bool operator!=(const sk_cfp<T>& a,
+ const sk_cfp<T>& b) {
+ return a.get() != b.get();
+}
+template <typename T> inline bool operator!=(const sk_cfp<T>& a,
+ std::nullptr_t) {
+ return static_cast<bool>(a);
+}
+template <typename T> inline bool operator!=(std::nullptr_t,
+ const sk_cfp<T>& b) {
+ return static_cast<bool>(b);
+}
+
+/*
+ * Returns a sk_cfp wrapping the provided object AND calls retain on it (if not null).
+ *
+ * This is different than the semantics of the constructor for sk_cfp, which just wraps the
+ * object, effectively "adopting" it.
+ */
+template <typename T> sk_cfp<T> sk_ret_cfp(T obj) {
+ return sk_cfp<T>(SkCFSafeRetain(obj));
+}
+
+#endif // __APPLE__
+#endif // SkCFObject_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkFontConfigInterface.h b/gfx/skia/skia/include/ports/SkFontConfigInterface.h
new file mode 100644
index 0000000000..65fd612593
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontConfigInterface.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontConfigInterface_DEFINED
+#define SkFontConfigInterface_DEFINED
+
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypeface.h"
+
+class SkFontMgr;
+
+/**
+ * \class SkFontConfigInterface
+ *
+ * A simple interface for remotable font management.
+ * The global instance can be found with RefGlobal().
+ */
+class SK_API SkFontConfigInterface : public SkRefCnt {
+public:
+
+ /**
+ * Returns the global SkFontConfigInterface instance. If it is not
+ * nullptr, calls ref() on it. The caller must balance this with a call to
+ * unref(). The default SkFontConfigInterface is the result of calling
+ * GetSingletonDirectInterface.
+ */
+ static sk_sp<SkFontConfigInterface> RefGlobal();
+
+ /**
+ * Replace the current global instance with the specified one.
+ */
+ static void SetGlobal(sk_sp<SkFontConfigInterface> fc);
+
+ /**
+ * This should be treated as private to the impl of SkFontConfigInterface.
+ * Callers should not change or expect any particular values. It is meant
+ * to be a union of possible storage types to aid the impl.
+ */
+ struct FontIdentity {
+ FontIdentity() : fID(0), fTTCIndex(0) {}
+
+ bool operator==(const FontIdentity& other) const {
+ return fID == other.fID &&
+ fTTCIndex == other.fTTCIndex &&
+ fString == other.fString;
+ }
+ bool operator!=(const FontIdentity& other) const {
+ return !(*this == other);
+ }
+
+ uint32_t fID;
+ int32_t fTTCIndex;
+ SkString fString;
+ SkFontStyle fStyle;
+
+ // If buffer is NULL, just return the number of bytes that would have
+ // been written. Will pad contents to a multiple of 4.
+ size_t writeToMemory(void* buffer = nullptr) const;
+
+ // Recreate from a flattened buffer, returning the number of bytes read.
+ size_t readFromMemory(const void* buffer, size_t length);
+ };
+
+ /**
+ * Given a familyName and style, find the best match.
+ *
+ * If a match is found, return true and set its outFontIdentifier.
+ * If outFamilyName is not null, assign the found familyName to it
+ * (which may differ from the requested familyName).
+ * If outStyle is not null, assign the found style to it
+ * (which may differ from the requested style).
+ *
+ * If a match is not found, return false, and ignore all out parameters.
+ */
+ virtual bool matchFamilyName(const char familyName[],
+ SkFontStyle requested,
+ FontIdentity* outFontIdentifier,
+ SkString* outFamilyName,
+ SkFontStyle* outStyle) = 0;
+
+ /**
+ * Given a FontRef, open a stream to access its data, or return null
+ * if the FontRef's data is not available. The caller is responsible for
+ * deleting the stream when it is done accessing the data.
+ */
+ virtual SkStreamAsset* openStream(const FontIdentity&) = 0;
+
+ /**
+ * Return an SkTypeface for the given FontIdentity.
+ *
+ * The default implementation simply returns a new typeface built using data obtained from
+ * openStream(), but derived classes may implement more complex caching schemes.
+ */
+ virtual sk_sp<SkTypeface> makeTypeface(const FontIdentity& identity) {
+ return SkTypeface::MakeFromStream(std::unique_ptr<SkStreamAsset>(this->openStream(identity)),
+ identity.fTTCIndex);
+
+ }
+
+ /**
+ * Return a singleton instance of a direct subclass that calls into
+ * libfontconfig. This does not affect the refcnt of the returned instance.
+ */
+ static SkFontConfigInterface* GetSingletonDirectInterface();
+
+ using INHERITED = SkRefCnt;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/ports/SkFontMgr_FontConfigInterface.h b/gfx/skia/skia/include/ports/SkFontMgr_FontConfigInterface.h
new file mode 100644
index 0000000000..05771257d2
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontMgr_FontConfigInterface.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_FontConfigInterface_DEFINED
+#define SkFontMgr_FontConfigInterface_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+class SkFontMgr;
+class SkFontConfigInterface;
+
+/** Creates a SkFontMgr which wraps a SkFontConfigInterface. */
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_FCI(sk_sp<SkFontConfigInterface> fci);
+
+#endif // #ifndef SkFontMgr_FontConfigInterface_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkFontMgr_android.h b/gfx/skia/skia/include/ports/SkFontMgr_android.h
new file mode 100644
index 0000000000..d68f3ba034
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontMgr_android.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_android_DEFINED
+#define SkFontMgr_android_DEFINED
+
+#include "include/core/SkRefCnt.h"
+
+class SkFontMgr;
+
+struct SkFontMgr_Android_CustomFonts {
+ /** When specifying custom fonts, indicates how to use system fonts. */
+ enum SystemFontUse {
+ kOnlyCustom, /** Use only custom fonts. NDK compliant. */
+ kPreferCustom, /** Use custom fonts before system fonts. */
+ kPreferSystem /** Use system fonts before custom fonts. */
+ };
+ /** Whether or not to use system fonts. */
+ SystemFontUse fSystemFontUse;
+
+ /** Base path to resolve relative font file names. If a directory, should end with '/'. */
+ const char* fBasePath;
+
+ /** Optional custom configuration file to use. */
+ const char* fFontsXml;
+
+ /** Optional custom configuration file for fonts which provide fallback.
+ * In the new style (version > 21) fontsXml format is used, this should be NULL.
+ */
+ const char* fFallbackFontsXml;
+
+ /** Optional custom flag. If set to true the SkFontMgr will acquire all requisite
+ * system IO resources on initialization.
+ */
+ bool fIsolated;
+};
+
+/** Create a font manager for Android. If 'custom' is NULL, use only system fonts. */
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_Android(const SkFontMgr_Android_CustomFonts* custom);
+
+#endif // SkFontMgr_android_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkFontMgr_data.h b/gfx/skia/skia/include/ports/SkFontMgr_data.h
new file mode 100644
index 0000000000..6a22365af4
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontMgr_data.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2023 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkFontMgr_data_DEFINED
+#define SkFontMgr_data_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+
+class SkFontMgr;
+
+/** Create a custom font manager which wraps a collection of SkData-stored fonts.
+ * This font manager uses FreeType for rendering.
+ */
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_Custom_Data(SkSpan<sk_sp<SkData>>);
+
+#endif // SkFontMgr_data_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkFontMgr_directory.h b/gfx/skia/skia/include/ports/SkFontMgr_directory.h
new file mode 100644
index 0000000000..b1a60fb4da
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontMgr_directory.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_directory_DEFINED
+#define SkFontMgr_directory_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+class SkFontMgr;
+
+/** Create a custom font manager which scans a given directory for font files.
+ * This font manager uses FreeType for rendering.
+ */
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_Custom_Directory(const char* dir);
+
+#endif // SkFontMgr_directory_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkFontMgr_empty.h b/gfx/skia/skia/include/ports/SkFontMgr_empty.h
new file mode 100644
index 0000000000..e5756421d0
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontMgr_empty.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_empty_DEFINED
+#define SkFontMgr_empty_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+class SkFontMgr;
+
+/** Create a custom font manager that contains no built-in fonts.
+ * This font manager uses FreeType for rendering.
+ */
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_Custom_Empty();
+
+#endif // SkFontMgr_empty_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkFontMgr_fontconfig.h b/gfx/skia/skia/include/ports/SkFontMgr_fontconfig.h
new file mode 100644
index 0000000000..4b2bb2d297
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontMgr_fontconfig.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_fontconfig_DEFINED
+#define SkFontMgr_fontconfig_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include <fontconfig/fontconfig.h>
+
+class SkFontMgr;
+
+/** Create a font manager around a FontConfig instance.
+ * If 'fc' is NULL, will use a new default config.
+ * Takes ownership of 'fc' and will call FcConfigDestroy on it.
+ */
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_FontConfig(FcConfig* fc);
+
+#endif // #ifndef SkFontMgr_fontconfig_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkFontMgr_fuchsia.h b/gfx/skia/skia/include/ports/SkFontMgr_fuchsia.h
new file mode 100644
index 0000000000..d20530af72
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontMgr_fuchsia.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_fuchsia_DEFINED
+#define SkFontMgr_fuchsia_DEFINED
+
+#include <fuchsia/fonts/cpp/fidl.h>
+
+#include "include/core/SkRefCnt.h"
+
+class SkFontMgr;
+
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_Fuchsia(fuchsia::fonts::ProviderSyncPtr provider);
+
+#endif // SkFontMgr_fuchsia_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkFontMgr_indirect.h b/gfx/skia/skia/include/ports/SkFontMgr_indirect.h
new file mode 100644
index 0000000000..2a499ab676
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontMgr_indirect.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_indirect_DEFINED
+#define SkFontMgr_indirect_DEFINED
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypeface.h"
+#include "include/core/SkTypes.h"
+#include "include/ports/SkRemotableFontMgr.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkOnce.h"
+#include "include/private/base/SkTArray.h"
+
+class SkData;
+class SkFontStyle;
+class SkStreamAsset;
+class SkString;
+
+class SK_API SkFontMgr_Indirect : public SkFontMgr {
+public:
+ // TODO: The SkFontMgr is only used for createFromStream/File/Data.
+ // In the future these calls should be broken out into their own interface
+ // with a name like SkFontRenderer.
+ SkFontMgr_Indirect(sk_sp<SkFontMgr> impl, sk_sp<SkRemotableFontMgr> proxy)
+ : fImpl(std::move(impl)), fProxy(std::move(proxy))
+ { }
+
+protected:
+ int onCountFamilies() const override;
+ void onGetFamilyName(int index, SkString* familyName) const override;
+ SkFontStyleSet* onCreateStyleSet(int index) const override;
+
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override;
+
+ SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& fontStyle) const override;
+
+ SkTypeface* onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle&,
+ const char* bcp47[],
+ int bcp47Count,
+ SkUnichar character) const override;
+
+ sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset>, int ttcIndex) const override;
+ sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments& args) const override;
+ sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const override;
+ sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData>, int ttcIndex) const override;
+ sk_sp<SkTypeface> onLegacyMakeTypeface(const char familyName[], SkFontStyle) const override;
+
+private:
+ SkTypeface* createTypefaceFromFontId(const SkFontIdentity& fontId) const;
+
+ sk_sp<SkFontMgr> fImpl;
+ sk_sp<SkRemotableFontMgr> fProxy;
+
+ struct DataEntry {
+ uint32_t fDataId; // key1
+ uint32_t fTtcIndex; // key2
+ SkTypeface* fTypeface; // value: weak ref to typeface
+
+ DataEntry() = default;
+
+ DataEntry(DataEntry&& that) { *this = std::move(that); }
+ DataEntry& operator=(DataEntry&& that) {
+ if (this != &that) {
+ fDataId = that.fDataId;
+ fTtcIndex = that.fTtcIndex;
+ fTypeface = that.fTypeface;
+
+ SkDEBUGCODE(that.fDataId = SkFontIdentity::kInvalidDataId;)
+ SkDEBUGCODE(that.fTtcIndex = 0xbbadbeef;)
+ that.fTypeface = nullptr;
+ }
+ return *this;
+ }
+
+ ~DataEntry() {
+ if (fTypeface) {
+ fTypeface->weak_unref();
+ }
+ }
+ };
+ /**
+ * This cache is essentially { dataId: { ttcIndex: typeface } }
+ * For data caching we want a mapping from data id to weak references to
+ * typefaces with that data id. By storing the index next to the typeface,
+ * this data cache also acts as a typeface cache.
+ */
+ mutable SkTArray<DataEntry> fDataCache;
+ mutable SkMutex fDataCacheMutex;
+
+ friend class SkStyleSet_Indirect;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/ports/SkFontMgr_mac_ct.h b/gfx/skia/skia/include/ports/SkFontMgr_mac_ct.h
new file mode 100644
index 0000000000..45cba65b5d
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkFontMgr_mac_ct.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_mac_ct_DEFINED
+#define SkFontMgr_mac_ct_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+#ifdef SK_BUILD_FOR_MAC
+#import <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreText/CoreText.h>
+#endif
+
+class SkFontMgr;
+
+/** Create a font manager for CoreText. If the collection is nullptr the system default will be used. */
+SK_API extern sk_sp<SkFontMgr> SkFontMgr_New_CoreText(CTFontCollectionRef);
+
+#endif // SkFontMgr_mac_ct_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkImageGeneratorCG.h b/gfx/skia/skia/include/ports/SkImageGeneratorCG.h
new file mode 100644
index 0000000000..93592cde4e
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkImageGeneratorCG.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#include "include/core/SkData.h"
+#include "include/core/SkImageGenerator.h"
+
+#include <memory>
+
+namespace SkImageGeneratorCG {
+SK_API std::unique_ptr<SkImageGenerator> MakeFromEncodedCG(sk_sp<SkData>);
+} // namespace SkImageGeneratorCG
+
+#endif //defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
diff --git a/gfx/skia/skia/include/ports/SkImageGeneratorNDK.h b/gfx/skia/skia/include/ports/SkImageGeneratorNDK.h
new file mode 100644
index 0000000000..739a586f0d
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkImageGeneratorNDK.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageGeneratorNDK_DEFINED
+#define SkImageGeneratorNDK_DEFINED
+
+#include "include/core/SkTypes.h"
+#ifdef SK_ENABLE_NDK_IMAGES
+
+#include "include/core/SkData.h"
+#include "include/core/SkImageGenerator.h"
+
+#include <memory>
+
+namespace SkImageGeneratorNDK {
+/**
+ * Create a generator that uses the Android NDK's APIs for decoding images.
+ *
+ * Only supported on devices where __ANDROID_API__ >= 30.
+ *
+ * As with SkCodec, the SkColorSpace passed to getPixels() determines the
+ * type of color space transformations to apply. A null SkColorSpace means to
+ * apply none.
+ *
+ * A note on scaling: Calling getPixels() on the resulting SkImageGenerator
+ * with dimensions that do not match getInfo() requests a scale. For WebP
+ * files, dimensions smaller than those of getInfo are supported. For Jpeg
+ * files, dimensions of 1/2, 1/4, and 1/8 are supported. TODO: Provide an
+ * API like SkCodecImageGenerator::getScaledDimensions() to report which
+ * dimensions are supported?
+ */
+SK_API std::unique_ptr<SkImageGenerator> MakeFromEncodedNDK(sk_sp<SkData>);
+}
+
+#endif // SK_ENABLE_NDK_IMAGES
+#endif // SkImageGeneratorNDK_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkImageGeneratorWIC.h b/gfx/skia/skia/include/ports/SkImageGeneratorWIC.h
new file mode 100644
index 0000000000..eb57a20956
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkImageGeneratorWIC.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "include/core/SkData.h"
+#include "include/core/SkImageGenerator.h"
+
+#include <memory>
+
+/*
+ * Any Windows program that uses COM must initialize the COM library by calling
+ * the CoInitializeEx function. In addition, each thread that uses a COM
+ * interface must make a separate call to this function.
+ *
+ * For every successful call to CoInitializeEx, the thread must call
+ * CoUninitialize before it exits.
+ *
+ * SkImageGeneratorWIC requires the COM library and leaves it to the client to
+ * initialize COM for their application.
+ *
+ * For more information on initializing COM, please see:
+ * https://msdn.microsoft.com/en-us/library/windows/desktop/ff485844.aspx
+ */
+namespace SkImageGeneratorWIC {
+SK_API std::unique_ptr<SkImageGenerator> MakeFromEncodedWIC(sk_sp<SkData>);
+}
+
+#endif // SK_BUILD_FOR_WIN
diff --git a/gfx/skia/skia/include/ports/SkRemotableFontMgr.h b/gfx/skia/skia/include/ports/SkRemotableFontMgr.h
new file mode 100644
index 0000000000..eacb6bde9c
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkRemotableFontMgr.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRemotableFontMgr_DEFINED
+#define SkRemotableFontMgr_DEFINED
+
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTemplates.h"
+
+class SkDataTable;
+class SkStreamAsset;
+
+struct SK_API SkFontIdentity {
+ static const uint32_t kInvalidDataId = 0xFFFFFFFF;
+
+ // Note that fDataId is a data identifier, not a font identifier.
+ // (fDataID, fTtcIndex) can be seen as a font identifier.
+ uint32_t fDataId;
+ uint32_t fTtcIndex;
+
+ // On Linux/FontConfig there is also the ability to specify preferences for rendering
+ // antialias, embedded bitmaps, autohint, hinting, hintstyle, lcd rendering
+ // may all be set or set to no-preference
+ // (No-preference is resolved against globals set by the platform)
+ // Since they may be selected against, these are really 'extensions' to SkFontStyle.
+ // SkFontStyle should pick these up.
+ SkFontStyle fFontStyle;
+};
+
+class SK_API SkRemotableFontIdentitySet : public SkRefCnt {
+public:
+ SkRemotableFontIdentitySet(int count, SkFontIdentity** data);
+
+ int count() const { return fCount; }
+ const SkFontIdentity& at(int index) const { return fData[index]; }
+
+ static SkRemotableFontIdentitySet* NewEmpty();
+
+private:
+ SkRemotableFontIdentitySet() : fCount(0), fData() { }
+
+ friend SkRemotableFontIdentitySet* sk_remotable_font_identity_set_new();
+
+ int fCount;
+ skia_private::AutoTArray<SkFontIdentity> fData;
+
+ using INHERITED = SkRefCnt;
+};
+
+class SK_API SkRemotableFontMgr : public SkRefCnt {
+public:
+ /**
+ * Returns all of the fonts with the given familyIndex.
+ * Returns NULL if the index is out of bounds.
+ * Returns empty if there are no fonts at the given index.
+ *
+ * The caller must unref() the returned object.
+ */
+ virtual SkRemotableFontIdentitySet* getIndex(int familyIndex) const = 0;
+
+ /**
+ * Returns the closest match to the given style in the given index.
+ * If there are no available fonts at the given index, the return value's
+ * data id will be kInvalidDataId.
+ */
+ virtual SkFontIdentity matchIndexStyle(int familyIndex, const SkFontStyle&) const = 0;
+
+ /**
+ * Returns all the fonts on the system with the given name.
+ * If the given name is NULL, will return the default font family.
+ * Never returns NULL; will return an empty set if the name is not found.
+ *
+ * It is possible that this will return fonts not accessible from
+ * getIndex(int) or matchIndexStyle(int, SkFontStyle) due to
+ * hidden or auto-activated fonts.
+ *
+ * The matching may be done in a system dependent way. The name may be
+ * matched case-insensitive, there may be system aliases which resolve,
+ * and names outside the current locale may be considered. However, this
+ * should only return fonts which are somehow associated with the requested
+ * name.
+ *
+ * The caller must unref() the returned object.
+ */
+ virtual SkRemotableFontIdentitySet* matchName(const char familyName[]) const = 0;
+
+ /**
+ * Returns the closest matching font to the specified name and style.
+ * If there are no available fonts which match the name, the return value's
+ * data id will be kInvalidDataId.
+ * If the given name is NULL, the match will be against any default fonts.
+ *
+ * It is possible that this will return a font identity not accessible from
+ * methods returning sets due to hidden or auto-activated fonts.
+ *
+ * The matching may be done in a system dependent way. The name may be
+ * matched case-insensitive, there may be system aliases which resolve,
+ * and names outside the current locale may be considered. However, this
+ * should only return a font which is somehow associated with the requested
+ * name.
+ *
+ * The caller must unref() the returned object.
+ */
+ virtual SkFontIdentity matchNameStyle(const char familyName[], const SkFontStyle&) const = 0;
+
+ /**
+ * Use the system fall-back to find a font for the given character.
+ * If no font can be found for the character, the return value's data id
+ * will be kInvalidDataId.
+ * If the name is NULL, the match will start against any default fonts.
+ * If the bpc47 is NULL, a default locale will be assumed.
+ *
+ * Note that bpc47 is a combination of ISO 639, 15924, and 3166-1 codes,
+ * so it is fine to just pass a ISO 639 here.
+ */
+ virtual SkFontIdentity matchNameStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const=0;
+
+ /**
+ * Returns the data for the given data id.
+ * Will return NULL if the data id is invalid.
+ * Note that this is a data id, not a font id.
+ *
+ * The caller must unref() the returned object.
+ */
+ virtual SkStreamAsset* getData(int dataId) const = 0;
+
+private:
+ using INHERITED = SkRefCnt;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/ports/SkTypeface_cairo.h b/gfx/skia/skia/include/ports/SkTypeface_cairo.h
new file mode 100644
index 0000000000..7cf1aec3b1
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkTypeface_cairo.h
@@ -0,0 +1,17 @@
+#ifndef SkTypeface_cairo_DEFINED
+#define SkTypeface_cairo_DEFINED
+
+#include "include/core/SkTypeface.h"
+#include "include/core/SkSurfaceProps.h"
+
+struct FT_FaceRec_;
+typedef FT_FaceRec_* FT_Face;
+
+SK_API extern void SkInitCairoFT(bool fontHintingEnabled);
+
+SK_API extern SkTypeface* SkCreateTypefaceFromCairoFTFont(
+ FT_Face face = nullptr, void* faceContext = nullptr,
+ SkPixelGeometry pixelGeometry = kUnknown_SkPixelGeometry,
+ uint8_t lcdFilter = 0);
+
+#endif
diff --git a/gfx/skia/skia/include/ports/SkTypeface_mac.h b/gfx/skia/skia/include/ports/SkTypeface_mac.h
new file mode 100644
index 0000000000..ec68e05492
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkTypeface_mac.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTypeface_mac_DEFINED
+#define SkTypeface_mac_DEFINED
+
+#include "include/core/SkTypeface.h"
+
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#ifdef SK_BUILD_FOR_MAC
+#import <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreText/CoreText.h>
+#endif
+
+/**
+ * Like the other Typeface make methods, this returns a new reference to the
+ * corresponding typeface for the specified CTFontRef.
+ */
+SK_API extern sk_sp<SkTypeface> SkMakeTypefaceFromCTFont(CTFontRef);
+
+/**
+ * Returns the platform-specific CTFontRef handle for a
+ * given SkTypeface. Note that the returned CTFontRef gets
+ * released when the source SkTypeface is destroyed.
+ *
+ * This method is deprecated. It may only be used by Blink Mac
+ * legacy code in special cases related to text-shaping
+ * with AAT fonts, clipboard handling and font fallback.
+ * See https://code.google.com/p/skia/issues/detail?id=3408
+ */
+SK_API extern CTFontRef SkTypeface_GetCTFontRef(const SkTypeface* face);
+
+#endif // defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+#endif // SkTypeface_mac_DEFINED
diff --git a/gfx/skia/skia/include/ports/SkTypeface_win.h b/gfx/skia/skia/include/ports/SkTypeface_win.h
new file mode 100644
index 0000000000..22a930e319
--- /dev/null
+++ b/gfx/skia/skia/include/ports/SkTypeface_win.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTypeface_win_DEFINED
+#define SkTypeface_win_DEFINED
+
+#include "include/core/SkTypeface.h"
+#include "include/core/SkTypes.h"
+
+#ifdef SK_BUILD_FOR_WIN
+
+#ifdef UNICODE
+typedef struct tagLOGFONTW LOGFONTW;
+typedef LOGFONTW LOGFONT;
+#else
+typedef struct tagLOGFONTA LOGFONTA;
+typedef LOGFONTA LOGFONT;
+#endif // UNICODE
+
+/**
+ * Like the other Typeface create methods, this returns a new reference to the
+ * corresponding typeface for the specified logfont. The caller is responsible
+ * for calling unref() when it is finished.
+ */
+SK_API SkTypeface* SkCreateTypefaceFromLOGFONT(const LOGFONT&);
+
+/**
+ * Copy the LOGFONT associated with this typeface into the lf parameter. Note
+ * that the lfHeight will need to be set afterwards, since the typeface does
+ * not track this (the paint does).
+ * typeface may be NULL, in which case we return the logfont for the default font.
+ */
+SK_API void SkLOGFONTFromTypeface(const SkTypeface* typeface, LOGFONT* lf);
+
+/**
+ * Set an optional callback to ensure that the data behind a LOGFONT is loaded.
+ * This will get called if Skia tries to access the data but hits a failure.
+ * Normally this is null, and is only required if the font data needs to be
+ * remotely (re)loaded.
+ */
+SK_API void SkTypeface_SetEnsureLOGFONTAccessibleProc(void (*)(const LOGFONT&));
+
+// Experimental!
+//
+class SkFontMgr;
+class SkRemotableFontMgr;
+struct IDWriteFactory;
+struct IDWriteFontCollection;
+struct IDWriteFontFallback;
+struct IDWriteFontFace;
+
+/**
+ * Like the other Typeface create methods, this returns a new reference to the
+ * corresponding typeface for the specified dwrite font. The caller is responsible
+ * for calling unref() when it is finished.
+ */
+SK_API SkTypeface* SkCreateTypefaceFromDWriteFont(IDWriteFactory* aFactory,
+ IDWriteFontFace* aFontFace,
+ SkFontStyle aStyle,
+ int aRenderingMode,
+ float aGamma,
+ float aContrast,
+ float aClearTypeLevel);
+
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_GDI();
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_DirectWrite(IDWriteFactory* factory = nullptr,
+ IDWriteFontCollection* collection = nullptr);
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_DirectWrite(IDWriteFactory* factory,
+ IDWriteFontCollection* collection,
+ IDWriteFontFallback* fallback);
+
+/**
+ * Creates an SkFontMgr which renders using DirectWrite and obtains its data
+ * from the SkRemotableFontMgr.
+ *
+ * If DirectWrite could not be initialized, will return NULL.
+ */
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_DirectWriteRenderer(sk_sp<SkRemotableFontMgr>);
+
+/**
+ * Creates an SkRemotableFontMgr backed by DirectWrite using the default
+ * system font collection in the current locale.
+ *
+ * If DirectWrite could not be initialized, will return NULL.
+ */
+SK_API sk_sp<SkRemotableFontMgr> SkRemotableFontMgr_New_DirectWrite();
+
+#endif // SK_BUILD_FOR_WIN
+#endif // SkTypeface_win_DEFINED
diff --git a/gfx/skia/skia/include/private/SkBitmaskEnum.h b/gfx/skia/skia/include/private/SkBitmaskEnum.h
new file mode 100644
index 0000000000..b25045359d
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkBitmaskEnum.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkEnumOperators_DEFINED
+#define SkEnumOperators_DEFINED
+
+#include <type_traits>
+
+namespace sknonstd {
+template <typename T> struct is_bitmask_enum : std::false_type {};
+
+template <typename E>
+std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, bool> constexpr Any(E e) {
+ return static_cast<std::underlying_type_t<E>>(e) != 0;
+}
+} // namespace sknonstd
+
+template <typename E>
+std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, E> constexpr operator|(E l, E r) {
+ using U = std::underlying_type_t<E>;
+ return static_cast<E>(static_cast<U>(l) | static_cast<U>(r));
+}
+
+template <typename E>
+std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, E&> constexpr operator|=(E& l, E r) {
+ return l = l | r;
+}
+
+template <typename E>
+std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, E> constexpr operator&(E l, E r) {
+ using U = std::underlying_type_t<E>;
+ return static_cast<E>(static_cast<U>(l) & static_cast<U>(r));
+}
+
+template <typename E>
+std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, E&> constexpr operator&=(E& l, E r) {
+ return l = l & r;
+}
+
+template <typename E>
+std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, E> constexpr operator^(E l, E r) {
+ using U = std::underlying_type_t<E>;
+ return static_cast<E>(static_cast<U>(l) ^ static_cast<U>(r));
+}
+
+template <typename E>
+std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, E&> constexpr operator^=(E& l, E r) {
+ return l = l ^ r;
+}
+
+template <typename E>
+std::enable_if_t<sknonstd::is_bitmask_enum<E>::value, E> constexpr operator~(E e) {
+ return static_cast<E>(~static_cast<std::underlying_type_t<E>>(e));
+}
+
+#endif // SkEnumOperators_DEFINED
diff --git a/gfx/skia/skia/include/private/SkChecksum.h b/gfx/skia/skia/include/private/SkChecksum.h
new file mode 100644
index 0000000000..d36e726089
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkChecksum.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkChecksum_DEFINED
+#define SkChecksum_DEFINED
+
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkOpts_spi.h"
+#include "include/private/base/SkTLogic.h"
+
+#include <string>
+#include <string_view>
+
+class SkChecksum {
+public:
+ SkChecksum() = default;
+ // Make noncopyable
+ SkChecksum(const SkChecksum&) = delete;
+ SkChecksum& operator=(const SkChecksum&) = delete;
+
+ /**
+ * uint32_t -> uint32_t hash, useful for when you're about to trucate this hash but you
+ * suspect its low bits aren't well mixed.
+ *
+ * This is the Murmur3 finalizer.
+ */
+ static uint32_t Mix(uint32_t hash) {
+ hash ^= hash >> 16;
+ hash *= 0x85ebca6b;
+ hash ^= hash >> 13;
+ hash *= 0xc2b2ae35;
+ hash ^= hash >> 16;
+ return hash;
+ }
+
+ /**
+ * uint32_t -> uint32_t hash, useful for when you're about to trucate this hash but you
+ * suspect its low bits aren't well mixed.
+ *
+ * This version is 2-lines cheaper than Mix, but seems to be sufficient for the font cache.
+ */
+ static uint32_t CheapMix(uint32_t hash) {
+ hash ^= hash >> 16;
+ hash *= 0x85ebca6b;
+ hash ^= hash >> 16;
+ return hash;
+ }
+};
+
+// SkGoodHash should usually be your first choice in hashing data.
+// It should be both reasonably fast and high quality.
+struct SkGoodHash {
+ template <typename K>
+ std::enable_if_t<sizeof(K) == 4, uint32_t> operator()(const K& k) const {
+ return SkChecksum::Mix(*(const uint32_t*)&k);
+ }
+
+ template <typename K>
+ std::enable_if_t<sizeof(K) != 4, uint32_t> operator()(const K& k) const {
+ return SkOpts::hash_fn(&k, sizeof(K), 0);
+ }
+
+ uint32_t operator()(const SkString& k) const {
+ return SkOpts::hash_fn(k.c_str(), k.size(), 0);
+ }
+
+ uint32_t operator()(const std::string& k) const {
+ return SkOpts::hash_fn(k.c_str(), k.size(), 0);
+ }
+
+ uint32_t operator()(std::string_view k) const {
+ return SkOpts::hash_fn(k.data(), k.size(), 0);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkColorData.h b/gfx/skia/skia/include/private/SkColorData.h
new file mode 100644
index 0000000000..1bef596a36
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkColorData.h
@@ -0,0 +1,386 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorData_DEFINED
+#define SkColorData_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkColorPriv.h"
+#include "include/private/base/SkTo.h"
+
+////////////////////////////////////////////////////////////////////////////////////////////
+// Convert a 16bit pixel to a 32bit pixel
+
+#define SK_R16_BITS 5
+#define SK_G16_BITS 6
+#define SK_B16_BITS 5
+
+#define SK_R16_SHIFT (SK_B16_BITS + SK_G16_BITS)
+#define SK_G16_SHIFT (SK_B16_BITS)
+#define SK_B16_SHIFT 0
+
+#define SK_R16_MASK ((1 << SK_R16_BITS) - 1)
+#define SK_G16_MASK ((1 << SK_G16_BITS) - 1)
+#define SK_B16_MASK ((1 << SK_B16_BITS) - 1)
+
+#define SkGetPackedR16(color) (((unsigned)(color) >> SK_R16_SHIFT) & SK_R16_MASK)
+#define SkGetPackedG16(color) (((unsigned)(color) >> SK_G16_SHIFT) & SK_G16_MASK)
+#define SkGetPackedB16(color) (((unsigned)(color) >> SK_B16_SHIFT) & SK_B16_MASK)
+
+static inline unsigned SkR16ToR32(unsigned r) {
+ return (r << (8 - SK_R16_BITS)) | (r >> (2 * SK_R16_BITS - 8));
+}
+
+static inline unsigned SkG16ToG32(unsigned g) {
+ return (g << (8 - SK_G16_BITS)) | (g >> (2 * SK_G16_BITS - 8));
+}
+
+static inline unsigned SkB16ToB32(unsigned b) {
+ return (b << (8 - SK_B16_BITS)) | (b >> (2 * SK_B16_BITS - 8));
+}
+
+#define SkPacked16ToR32(c) SkR16ToR32(SkGetPackedR16(c))
+#define SkPacked16ToG32(c) SkG16ToG32(SkGetPackedG16(c))
+#define SkPacked16ToB32(c) SkB16ToB32(SkGetPackedB16(c))
+
+//////////////////////////////////////////////////////////////////////////////
+
+#define SkASSERT_IS_BYTE(x) SkASSERT(0 == ((x) & ~0xFFu))
+
+// Reverse the bytes coorsponding to RED and BLUE in a packed pixels. Note the
+// pair of them are in the same 2 slots in both RGBA and BGRA, thus there is
+// no need to pass in the colortype to this function.
+static inline uint32_t SkSwizzle_RB(uint32_t c) {
+ static const uint32_t kRBMask = (0xFF << SK_R32_SHIFT) | (0xFF << SK_B32_SHIFT);
+
+ unsigned c0 = (c >> SK_R32_SHIFT) & 0xFF;
+ unsigned c1 = (c >> SK_B32_SHIFT) & 0xFF;
+ return (c & ~kRBMask) | (c0 << SK_B32_SHIFT) | (c1 << SK_R32_SHIFT);
+}
+
+static inline uint32_t SkPackARGB_as_RGBA(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ SkASSERT_IS_BYTE(a);
+ SkASSERT_IS_BYTE(r);
+ SkASSERT_IS_BYTE(g);
+ SkASSERT_IS_BYTE(b);
+ return (a << SK_RGBA_A32_SHIFT) | (r << SK_RGBA_R32_SHIFT) |
+ (g << SK_RGBA_G32_SHIFT) | (b << SK_RGBA_B32_SHIFT);
+}
+
+static inline uint32_t SkPackARGB_as_BGRA(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ SkASSERT_IS_BYTE(a);
+ SkASSERT_IS_BYTE(r);
+ SkASSERT_IS_BYTE(g);
+ SkASSERT_IS_BYTE(b);
+ return (a << SK_BGRA_A32_SHIFT) | (r << SK_BGRA_R32_SHIFT) |
+ (g << SK_BGRA_G32_SHIFT) | (b << SK_BGRA_B32_SHIFT);
+}
+
+static inline SkPMColor SkSwizzle_RGBA_to_PMColor(uint32_t c) {
+#ifdef SK_PMCOLOR_IS_RGBA
+ return c;
+#else
+ return SkSwizzle_RB(c);
+#endif
+}
+
+static inline SkPMColor SkSwizzle_BGRA_to_PMColor(uint32_t c) {
+#ifdef SK_PMCOLOR_IS_BGRA
+ return c;
+#else
+ return SkSwizzle_RB(c);
+#endif
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+///@{
+/** See ITU-R Recommendation BT.709 at http://www.itu.int/rec/R-REC-BT.709/ .*/
+#define SK_ITU_BT709_LUM_COEFF_R (0.2126f)
+#define SK_ITU_BT709_LUM_COEFF_G (0.7152f)
+#define SK_ITU_BT709_LUM_COEFF_B (0.0722f)
+///@}
+
+///@{
+/** A float value which specifies this channel's contribution to luminance. */
+#define SK_LUM_COEFF_R SK_ITU_BT709_LUM_COEFF_R
+#define SK_LUM_COEFF_G SK_ITU_BT709_LUM_COEFF_G
+#define SK_LUM_COEFF_B SK_ITU_BT709_LUM_COEFF_B
+///@}
+
+/** Computes the luminance from the given r, g, and b in accordance with
+ SK_LUM_COEFF_X. For correct results, r, g, and b should be in linear space.
+*/
+static inline U8CPU SkComputeLuminance(U8CPU r, U8CPU g, U8CPU b) {
+ //The following is
+ //r * SK_LUM_COEFF_R + g * SK_LUM_COEFF_G + b * SK_LUM_COEFF_B
+ //with SK_LUM_COEFF_X in 1.8 fixed point (rounding adjusted to sum to 256).
+ return (r * 54 + g * 183 + b * 19) >> 8;
+}
+
+/** Calculates 256 - (value * alpha256) / 255 in range [0,256],
+ * for [0,255] value and [0,256] alpha256.
+ */
+static inline U16CPU SkAlphaMulInv256(U16CPU value, U16CPU alpha256) {
+ unsigned prod = 0xFFFF - value * alpha256;
+ return (prod + (prod >> 8)) >> 8;
+}
+
+// The caller may want negative values, so keep all params signed (int)
+// so we don't accidentally slip into unsigned math and lose the sign
+// extension when we shift (in SkAlphaMul)
+static inline int SkAlphaBlend(int src, int dst, int scale256) {
+ SkASSERT((unsigned)scale256 <= 256);
+ return dst + SkAlphaMul(src - dst, scale256);
+}
+
+static inline uint16_t SkPackRGB16(unsigned r, unsigned g, unsigned b) {
+ SkASSERT(r <= SK_R16_MASK);
+ SkASSERT(g <= SK_G16_MASK);
+ SkASSERT(b <= SK_B16_MASK);
+
+ return SkToU16((r << SK_R16_SHIFT) | (g << SK_G16_SHIFT) | (b << SK_B16_SHIFT));
+}
+
+#define SK_R16_MASK_IN_PLACE (SK_R16_MASK << SK_R16_SHIFT)
+#define SK_G16_MASK_IN_PLACE (SK_G16_MASK << SK_G16_SHIFT)
+#define SK_B16_MASK_IN_PLACE (SK_B16_MASK << SK_B16_SHIFT)
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Abstract 4-byte interpolation, implemented on top of SkPMColor
+ * utility functions. Third parameter controls blending of the first two:
+ * (src, dst, 0) returns dst
+ * (src, dst, 0xFF) returns src
+ * scale is [0..256], unlike SkFourByteInterp which takes [0..255]
+ */
+static inline SkPMColor SkFourByteInterp256(SkPMColor src, SkPMColor dst, int scale) {
+ unsigned a = SkTo<uint8_t>(SkAlphaBlend(SkGetPackedA32(src), SkGetPackedA32(dst), scale));
+ unsigned r = SkTo<uint8_t>(SkAlphaBlend(SkGetPackedR32(src), SkGetPackedR32(dst), scale));
+ unsigned g = SkTo<uint8_t>(SkAlphaBlend(SkGetPackedG32(src), SkGetPackedG32(dst), scale));
+ unsigned b = SkTo<uint8_t>(SkAlphaBlend(SkGetPackedB32(src), SkGetPackedB32(dst), scale));
+
+ return SkPackARGB32(a, r, g, b);
+}
+
+/**
+ * Abstract 4-byte interpolation, implemented on top of SkPMColor
+ * utility functions. Third parameter controls blending of the first two:
+ * (src, dst, 0) returns dst
+ * (src, dst, 0xFF) returns src
+ */
+static inline SkPMColor SkFourByteInterp(SkPMColor src, SkPMColor dst, U8CPU srcWeight) {
+ int scale = (int)SkAlpha255To256(srcWeight);
+ return SkFourByteInterp256(src, dst, scale);
+}
+
+/**
+ * 0xAARRGGBB -> 0x00AA00GG, 0x00RR00BB
+ */
+static inline void SkSplay(uint32_t color, uint32_t* ag, uint32_t* rb) {
+ const uint32_t mask = 0x00FF00FF;
+ *ag = (color >> 8) & mask;
+ *rb = color & mask;
+}
+
+/**
+ * 0xAARRGGBB -> 0x00AA00GG00RR00BB
+ * (note, ARGB -> AGRB)
+ */
+static inline uint64_t SkSplay(uint32_t color) {
+ const uint32_t mask = 0x00FF00FF;
+ uint64_t agrb = (color >> 8) & mask; // 0x0000000000AA00GG
+ agrb <<= 32; // 0x00AA00GG00000000
+ agrb |= color & mask; // 0x00AA00GG00RR00BB
+ return agrb;
+}
+
+/**
+ * 0xAAxxGGxx, 0xRRxxBBxx-> 0xAARRGGBB
+ */
+static inline uint32_t SkUnsplay(uint32_t ag, uint32_t rb) {
+ const uint32_t mask = 0xFF00FF00;
+ return (ag & mask) | ((rb & mask) >> 8);
+}
+
+/**
+ * 0xAAxxGGxxRRxxBBxx -> 0xAARRGGBB
+ * (note, AGRB -> ARGB)
+ */
+static inline uint32_t SkUnsplay(uint64_t agrb) {
+ const uint32_t mask = 0xFF00FF00;
+ return SkPMColor(
+ ((agrb & mask) >> 8) | // 0x00RR00BB
+ ((agrb >> 32) & mask)); // 0xAARRGGBB
+}
+
+static inline SkPMColor SkFastFourByteInterp256_32(SkPMColor src, SkPMColor dst, unsigned scale) {
+ SkASSERT(scale <= 256);
+
+ // Two 8-bit blends per two 32-bit registers, with space to make sure the math doesn't collide.
+ uint32_t src_ag, src_rb, dst_ag, dst_rb;
+ SkSplay(src, &src_ag, &src_rb);
+ SkSplay(dst, &dst_ag, &dst_rb);
+
+ const uint32_t ret_ag = src_ag * scale + (256 - scale) * dst_ag;
+ const uint32_t ret_rb = src_rb * scale + (256 - scale) * dst_rb;
+
+ return SkUnsplay(ret_ag, ret_rb);
+}
+
+static inline SkPMColor SkFastFourByteInterp256_64(SkPMColor src, SkPMColor dst, unsigned scale) {
+ SkASSERT(scale <= 256);
+ // Four 8-bit blends in one 64-bit register, with space to make sure the math doesn't collide.
+ return SkUnsplay(SkSplay(src) * scale + (256-scale) * SkSplay(dst));
+}
+
+// TODO(mtklein): Replace slow versions with fast versions, using scale + (scale>>7) everywhere.
+
+/**
+ * Same as SkFourByteInterp256, but faster.
+ */
+static inline SkPMColor SkFastFourByteInterp256(SkPMColor src, SkPMColor dst, unsigned scale) {
+ // On a 64-bit machine, _64 is about 10% faster than _32, but ~40% slower on a 32-bit machine.
+ if (sizeof(void*) == 4) {
+ return SkFastFourByteInterp256_32(src, dst, scale);
+ } else {
+ return SkFastFourByteInterp256_64(src, dst, scale);
+ }
+}
+
+/**
+ * Nearly the same as SkFourByteInterp, but faster and a touch more accurate, due to better
+ * srcWeight scaling to [0, 256].
+ */
+static inline SkPMColor SkFastFourByteInterp(SkPMColor src, SkPMColor dst, U8CPU srcWeight) {
+ SkASSERT(srcWeight <= 255);
+ // scale = srcWeight + (srcWeight >> 7) is more accurate than
+ // scale = srcWeight + 1, but 7% slower
+ return SkFastFourByteInterp256(src, dst, srcWeight + (srcWeight >> 7));
+}
+
+/**
+ * Interpolates between colors src and dst using [0,256] scale.
+ */
+static inline SkPMColor SkPMLerp(SkPMColor src, SkPMColor dst, unsigned scale) {
+ return SkFastFourByteInterp256(src, dst, scale);
+}
+
+static inline SkPMColor SkBlendARGB32(SkPMColor src, SkPMColor dst, U8CPU aa) {
+ SkASSERT((unsigned)aa <= 255);
+
+ unsigned src_scale = SkAlpha255To256(aa);
+ unsigned dst_scale = SkAlphaMulInv256(SkGetPackedA32(src), src_scale);
+
+ const uint32_t mask = 0xFF00FF;
+
+ uint32_t src_rb = (src & mask) * src_scale;
+ uint32_t src_ag = ((src >> 8) & mask) * src_scale;
+
+ uint32_t dst_rb = (dst & mask) * dst_scale;
+ uint32_t dst_ag = ((dst >> 8) & mask) * dst_scale;
+
+ return (((src_rb + dst_rb) >> 8) & mask) | ((src_ag + dst_ag) & ~mask);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////
+// Convert a 32bit pixel to a 16bit pixel (no dither)
+
+#define SkR32ToR16_MACRO(r) ((unsigned)(r) >> (SK_R32_BITS - SK_R16_BITS))
+#define SkG32ToG16_MACRO(g) ((unsigned)(g) >> (SK_G32_BITS - SK_G16_BITS))
+#define SkB32ToB16_MACRO(b) ((unsigned)(b) >> (SK_B32_BITS - SK_B16_BITS))
+
+#ifdef SK_DEBUG
+ static inline unsigned SkR32ToR16(unsigned r) {
+ SkR32Assert(r);
+ return SkR32ToR16_MACRO(r);
+ }
+ static inline unsigned SkG32ToG16(unsigned g) {
+ SkG32Assert(g);
+ return SkG32ToG16_MACRO(g);
+ }
+ static inline unsigned SkB32ToB16(unsigned b) {
+ SkB32Assert(b);
+ return SkB32ToB16_MACRO(b);
+ }
+#else
+ #define SkR32ToR16(r) SkR32ToR16_MACRO(r)
+ #define SkG32ToG16(g) SkG32ToG16_MACRO(g)
+ #define SkB32ToB16(b) SkB32ToB16_MACRO(b)
+#endif
+
+static inline U16CPU SkPixel32ToPixel16(SkPMColor c) {
+ unsigned r = ((c >> (SK_R32_SHIFT + (8 - SK_R16_BITS))) & SK_R16_MASK) << SK_R16_SHIFT;
+ unsigned g = ((c >> (SK_G32_SHIFT + (8 - SK_G16_BITS))) & SK_G16_MASK) << SK_G16_SHIFT;
+ unsigned b = ((c >> (SK_B32_SHIFT + (8 - SK_B16_BITS))) & SK_B16_MASK) << SK_B16_SHIFT;
+ return r | g | b;
+}
+
+static inline U16CPU SkPack888ToRGB16(U8CPU r, U8CPU g, U8CPU b) {
+ return (SkR32ToR16(r) << SK_R16_SHIFT) |
+ (SkG32ToG16(g) << SK_G16_SHIFT) |
+ (SkB32ToB16(b) << SK_B16_SHIFT);
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////
+
+static inline SkColor SkPixel16ToColor(U16CPU src) {
+ SkASSERT(src == SkToU16(src));
+
+ unsigned r = SkPacked16ToR32(src);
+ unsigned g = SkPacked16ToG32(src);
+ unsigned b = SkPacked16ToB32(src);
+
+ SkASSERT((r >> (8 - SK_R16_BITS)) == SkGetPackedR16(src));
+ SkASSERT((g >> (8 - SK_G16_BITS)) == SkGetPackedG16(src));
+ SkASSERT((b >> (8 - SK_B16_BITS)) == SkGetPackedB16(src));
+
+ return SkColorSetRGB(r, g, b);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+typedef uint16_t SkPMColor16;
+
+// Put in OpenGL order (r g b a)
+#define SK_A4444_SHIFT 0
+#define SK_R4444_SHIFT 12
+#define SK_G4444_SHIFT 8
+#define SK_B4444_SHIFT 4
+
+static inline U8CPU SkReplicateNibble(unsigned nib) {
+ SkASSERT(nib <= 0xF);
+ return (nib << 4) | nib;
+}
+
+#define SkGetPackedA4444(c) (((unsigned)(c) >> SK_A4444_SHIFT) & 0xF)
+#define SkGetPackedR4444(c) (((unsigned)(c) >> SK_R4444_SHIFT) & 0xF)
+#define SkGetPackedG4444(c) (((unsigned)(c) >> SK_G4444_SHIFT) & 0xF)
+#define SkGetPackedB4444(c) (((unsigned)(c) >> SK_B4444_SHIFT) & 0xF)
+
+#define SkPacked4444ToA32(c) SkReplicateNibble(SkGetPackedA4444(c))
+
+static inline SkPMColor SkPixel4444ToPixel32(U16CPU c) {
+ uint32_t d = (SkGetPackedA4444(c) << SK_A32_SHIFT) |
+ (SkGetPackedR4444(c) << SK_R32_SHIFT) |
+ (SkGetPackedG4444(c) << SK_G32_SHIFT) |
+ (SkGetPackedB4444(c) << SK_B32_SHIFT);
+ return d | (d << 4);
+}
+
+using SkPMColor4f = SkRGBA4f<kPremul_SkAlphaType>;
+
+constexpr SkPMColor4f SK_PMColor4fTRANSPARENT = { 0, 0, 0, 0 };
+constexpr SkPMColor4f SK_PMColor4fBLACK = { 0, 0, 0, 1 };
+constexpr SkPMColor4f SK_PMColor4fWHITE = { 1, 1, 1, 1 };
+constexpr SkPMColor4f SK_PMColor4fILLEGAL = { SK_FloatNegativeInfinity,
+ SK_FloatNegativeInfinity,
+ SK_FloatNegativeInfinity,
+ SK_FloatNegativeInfinity };
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkEncodedInfo.h b/gfx/skia/skia/include/private/SkEncodedInfo.h
new file mode 100644
index 0000000000..74e2ad1480
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkEncodedInfo.h
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEncodedInfo_DEFINED
+#define SkEncodedInfo_DEFINED
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+#include "modules/skcms/skcms.h"
+
+#include <cstdint>
+#include <memory>
+#include <utility>
+
+struct SkEncodedInfo {
+public:
+ class ICCProfile {
+ public:
+ static std::unique_ptr<ICCProfile> Make(sk_sp<SkData>);
+ static std::unique_ptr<ICCProfile> Make(const skcms_ICCProfile&);
+
+ const skcms_ICCProfile* profile() const { return &fProfile; }
+ private:
+ ICCProfile(const skcms_ICCProfile&, sk_sp<SkData> = nullptr);
+
+ skcms_ICCProfile fProfile;
+ sk_sp<SkData> fData;
+ };
+
+ enum Alpha {
+ kOpaque_Alpha,
+ kUnpremul_Alpha,
+
+ // Each pixel is either fully opaque or fully transparent.
+ // There is no difference between requesting kPremul or kUnpremul.
+ kBinary_Alpha,
+ };
+
+ /*
+ * We strive to make the number of components per pixel obvious through
+ * our naming conventions.
+ * Ex: kRGB has 3 components. kRGBA has 4 components.
+ *
+ * This sometimes results in redundant Alpha and Color information.
+ * Ex: kRGB images must also be kOpaque.
+ */
+ enum Color {
+ // PNG, WBMP
+ kGray_Color,
+
+ // PNG
+ kGrayAlpha_Color,
+
+ // PNG with Skia-specific sBIT
+ // Like kGrayAlpha, except this expects to be treated as
+ // kAlpha_8_SkColorType, which ignores the gray component. If
+ // decoded to full color (e.g. kN32), the gray component is respected
+ // (so it can share code with kGrayAlpha).
+ kXAlpha_Color,
+
+ // PNG
+ // 565 images may be encoded to PNG by specifying the number of
+ // significant bits for each channel. This is a strange 565
+ // representation because the image is still encoded with 8 bits per
+ // component.
+ k565_Color,
+
+ // PNG, GIF, BMP
+ kPalette_Color,
+
+ // PNG, RAW
+ kRGB_Color,
+ kRGBA_Color,
+
+ // BMP
+ kBGR_Color,
+ kBGRX_Color,
+ kBGRA_Color,
+
+ // JPEG, WEBP
+ kYUV_Color,
+
+ // WEBP
+ kYUVA_Color,
+
+ // JPEG
+ // Photoshop actually writes inverted CMYK data into JPEGs, where zero
+ // represents 100% ink coverage. For this reason, we treat CMYK JPEGs
+ // as having inverted CMYK. libjpeg-turbo warns that this may break
+ // other applications, but the CMYK JPEGs we see on the web expect to
+ // be treated as inverted CMYK.
+ kInvertedCMYK_Color,
+ kYCCK_Color,
+ };
+
+ static SkEncodedInfo Make(int width, int height, Color color, Alpha alpha,
+ int bitsPerComponent) {
+ return Make(width, height, color, alpha, bitsPerComponent, nullptr);
+ }
+
+ static SkEncodedInfo Make(int width, int height, Color color,
+ Alpha alpha, int bitsPerComponent, std::unique_ptr<ICCProfile> profile) {
+ return Make(width, height, color, alpha, /*bitsPerComponent*/ bitsPerComponent,
+ std::move(profile), /*colorDepth*/ bitsPerComponent);
+ }
+
+ static SkEncodedInfo Make(int width, int height, Color color,
+ Alpha alpha, int bitsPerComponent, std::unique_ptr<ICCProfile> profile,
+ int colorDepth) {
+ SkASSERT(1 == bitsPerComponent ||
+ 2 == bitsPerComponent ||
+ 4 == bitsPerComponent ||
+ 8 == bitsPerComponent ||
+ 16 == bitsPerComponent);
+
+ switch (color) {
+ case kGray_Color:
+ SkASSERT(kOpaque_Alpha == alpha);
+ break;
+ case kGrayAlpha_Color:
+ SkASSERT(kOpaque_Alpha != alpha);
+ break;
+ case kPalette_Color:
+ SkASSERT(16 != bitsPerComponent);
+ break;
+ case kRGB_Color:
+ case kBGR_Color:
+ case kBGRX_Color:
+ SkASSERT(kOpaque_Alpha == alpha);
+ SkASSERT(bitsPerComponent >= 8);
+ break;
+ case kYUV_Color:
+ case kInvertedCMYK_Color:
+ case kYCCK_Color:
+ SkASSERT(kOpaque_Alpha == alpha);
+ SkASSERT(8 == bitsPerComponent);
+ break;
+ case kRGBA_Color:
+ SkASSERT(bitsPerComponent >= 8);
+ break;
+ case kBGRA_Color:
+ case kYUVA_Color:
+ SkASSERT(8 == bitsPerComponent);
+ break;
+ case kXAlpha_Color:
+ SkASSERT(kUnpremul_Alpha == alpha);
+ SkASSERT(8 == bitsPerComponent);
+ break;
+ case k565_Color:
+ SkASSERT(kOpaque_Alpha == alpha);
+ SkASSERT(8 == bitsPerComponent);
+ break;
+ default:
+ SkASSERT(false);
+ break;
+ }
+
+ return SkEncodedInfo(width, height, color, alpha,
+ bitsPerComponent, colorDepth, std::move(profile));
+ }
+
+ /*
+ * Returns a recommended SkImageInfo.
+ *
+ * TODO: Leave this up to the client.
+ */
+ SkImageInfo makeImageInfo() const {
+ auto ct = kGray_Color == fColor ? kGray_8_SkColorType :
+ kXAlpha_Color == fColor ? kAlpha_8_SkColorType :
+ k565_Color == fColor ? kRGB_565_SkColorType :
+ kN32_SkColorType ;
+ auto alpha = kOpaque_Alpha == fAlpha ? kOpaque_SkAlphaType
+ : kUnpremul_SkAlphaType;
+ sk_sp<SkColorSpace> cs = fProfile ? SkColorSpace::Make(*fProfile->profile())
+ : nullptr;
+ if (!cs) {
+ cs = SkColorSpace::MakeSRGB();
+ }
+ return SkImageInfo::Make(fWidth, fHeight, ct, alpha, std::move(cs));
+ }
+
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+ Color color() const { return fColor; }
+ Alpha alpha() const { return fAlpha; }
+ bool opaque() const { return fAlpha == kOpaque_Alpha; }
+ const skcms_ICCProfile* profile() const {
+ if (!fProfile) return nullptr;
+ return fProfile->profile();
+ }
+
+ uint8_t bitsPerComponent() const { return fBitsPerComponent; }
+
+ uint8_t bitsPerPixel() const {
+ switch (fColor) {
+ case kGray_Color:
+ return fBitsPerComponent;
+ case kXAlpha_Color:
+ case kGrayAlpha_Color:
+ return 2 * fBitsPerComponent;
+ case kPalette_Color:
+ return fBitsPerComponent;
+ case kRGB_Color:
+ case kBGR_Color:
+ case kYUV_Color:
+ case k565_Color:
+ return 3 * fBitsPerComponent;
+ case kRGBA_Color:
+ case kBGRA_Color:
+ case kBGRX_Color:
+ case kYUVA_Color:
+ case kInvertedCMYK_Color:
+ case kYCCK_Color:
+ return 4 * fBitsPerComponent;
+ default:
+ SkASSERT(false);
+ return 0;
+ }
+ }
+
+ SkEncodedInfo(const SkEncodedInfo& orig) = delete;
+ SkEncodedInfo& operator=(const SkEncodedInfo&) = delete;
+
+ SkEncodedInfo(SkEncodedInfo&& orig) = default;
+ SkEncodedInfo& operator=(SkEncodedInfo&&) = default;
+
+ // Explicit copy method, to avoid accidental copying.
+ SkEncodedInfo copy() const {
+ auto copy = SkEncodedInfo::Make(
+ fWidth, fHeight, fColor, fAlpha, fBitsPerComponent, nullptr, fColorDepth);
+ if (fProfile) {
+ copy.fProfile = std::make_unique<ICCProfile>(*fProfile);
+ }
+ return copy;
+ }
+
+ // Return number of bits of R/G/B channel
+ uint8_t getColorDepth() const {
+ return fColorDepth;
+ }
+
+private:
+ SkEncodedInfo(int width, int height, Color color, Alpha alpha,
+ uint8_t bitsPerComponent, uint8_t colorDepth, std::unique_ptr<ICCProfile> profile)
+ : fWidth(width)
+ , fHeight(height)
+ , fColor(color)
+ , fAlpha(alpha)
+ , fBitsPerComponent(bitsPerComponent)
+ , fColorDepth(colorDepth)
+ , fProfile(std::move(profile))
+ {}
+
+ int fWidth;
+ int fHeight;
+ Color fColor;
+ Alpha fAlpha;
+ uint8_t fBitsPerComponent;
+ uint8_t fColorDepth;
+ std::unique_ptr<ICCProfile> fProfile;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkGainmapInfo.h b/gfx/skia/skia/include/private/SkGainmapInfo.h
new file mode 100644
index 0000000000..d477371188
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkGainmapInfo.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2023 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGainmapInfo_DEFINED
+#define SkGainmapInfo_DEFINED
+
+#include "include/core/SkColor.h"
+
+/**
+ * Gainmap rendering parameters. Suppose our display has HDR to SDR ratio of H and we wish to
+ * display an image with gainmap on this display. Let B be the pixel value from the base image
+ * in a color space that has the primaries of the base image and a linear transfer function. Let
+ * G be the pixel value from the gainmap. Let D be the output pixel in the same color space as B.
+ * The value of D is computed as follows:
+ *
+ * First, let W be a weight parameter determing how much the gainmap will be applied.
+ * W = clamp((log(H) - log(fDisplayRatioSdr)) /
+ * (log(fDisplayRatioHdr) - log(fDisplayRatioSdr), 0, 1)
+ *
+ * Next, let L be the gainmap value in log space. We compute this from the value G that was
+ * sampled from the texture as follows:
+ * L = mix(log(fGainmapRatioMin), log(fGainmapRatioMax), pow(G, fGainmapGamma))
+ *
+ * Finally, apply the gainmap to compute D, the displayed pixel. If the base image is SDR then
+ * compute:
+ * D = (B + fEpsilonSdr) * exp(L * W) - fEpsilonHdr
+ * If the base image is HDR then compute:
+ * D = (B + fEpsilonHdr) * exp(L * (W - 1)) - fEpsilonSdr
+ *
+ * In the above math, log() is a natural logarithm and exp() is natural exponentiation. Note,
+ * however, that the base used for the log() and exp() functions does not affect the results of
+ * the computation (it cancels out, as long as the same base is used throughout).
+ */
+struct SkGainmapInfo {
+ /**
+ * Parameters for converting the gainmap from its image encoding to log space. These are
+ * specified per color channel. The alpha value is unused.
+ */
+ SkColor4f fGainmapRatioMin = {1.f, 1.f, 1.f, 1.0};
+ SkColor4f fGainmapRatioMax = {2.f, 2.f, 2.f, 1.0};
+ SkColor4f fGainmapGamma = {1.f, 1.f, 1.f, 1.f};
+
+ /**
+ * Parameters sometimes used in gainmap computation to avoid numerical instability.
+ */
+ SkColor4f fEpsilonSdr = {0.f, 0.f, 0.f, 1.0};
+ SkColor4f fEpsilonHdr = {0.f, 0.f, 0.f, 1.0};
+
+ /**
+ * If the output display's HDR to SDR ratio is less or equal than fDisplayRatioSdr then the SDR
+ * rendition is displayed. If the output display's HDR to SDR ratio is greater or equal than
+ * fDisplayRatioHdr then the HDR rendition is displayed. If the output display's HDR to SDR
+ * ratio is between these values then an interpolation between the two is displayed using the
+ * math above.
+ */
+ float fDisplayRatioSdr = 1.f;
+ float fDisplayRatioHdr = 2.f;
+
+ /**
+ * Whether the base image is the SDR image or the HDR image.
+ */
+ enum class BaseImageType {
+ kSDR,
+ kHDR,
+ };
+ BaseImageType fBaseImageType = BaseImageType::kSDR;
+
+ // TODO(ccameron): Remove these parameters after the new parameters roll into Android.
+ SkColor4f fLogRatioMin = {0.f, 0.f, 0.f, 1.0};
+ SkColor4f fLogRatioMax = {1.f, 1.f, 1.f, 1.0};
+ float fHdrRatioMin = 1.f;
+ float fHdrRatioMax = 50.f;
+
+ /**
+ * The type of file that created this gainmap.
+ */
+ enum class Type {
+ kUnknown,
+ kMultiPicture,
+ kJpegR_Linear,
+ kJpegR_HLG,
+ kJpegR_PQ,
+ kHDRGM,
+ };
+ Type fType = Type::kUnknown;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkGainmapShader.h b/gfx/skia/skia/include/private/SkGainmapShader.h
new file mode 100644
index 0000000000..f490ab96a4
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkGainmapShader.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2023 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGainmapShader_DEFINED
+#define SkGainmapShader_DEFINED
+
+#include "include/core/SkRefCnt.h"
+
+class SkColorSpace;
+class SkShader;
+class SkImage;
+struct SkGainmapInfo;
+struct SkRect;
+struct SkSamplingOptions;
+
+/**
+ * A gainmap shader will apply a gainmap to an base image using the math described alongside the
+ * definition of SkGainmapInfo.
+ */
+class SK_API SkGainmapShader {
+public:
+ /**
+ * Make a gainmap shader.
+ *
+ * When sampling the base image baseImage, the rectangle baseRect will be sampled to map to
+ * the rectangle dstRect. Sampling will be done according to baseSamplingOptions.
+ *
+ * When sampling the gainmap image gainmapImage, the rectangle gainmapRect will be sampled to
+ * map to the rectangle dstRect. Sampling will be done according to gainmapSamplingOptions.
+ *
+ * The gainmap will be applied according to the HDR to SDR ratio specified in dstHdrRatio.
+ *
+ * This shader must know the color space of the canvas that it will be rendered to. This color
+ * space must be specified in dstColorSpace.
+ * TODO(ccameron): Remove the need for dstColorSpace.
+ */
+ static sk_sp<SkShader> Make(const sk_sp<const SkImage>& baseImage,
+ const SkRect& baseRect,
+ const SkSamplingOptions& baseSamplingOptions,
+ const sk_sp<const SkImage>& gainmapImage,
+ const SkRect& gainmapRect,
+ const SkSamplingOptions& gainmapSamplingOptions,
+ const SkGainmapInfo& gainmapInfo,
+ const SkRect& dstRect,
+ float dstHdrRatio,
+ sk_sp<SkColorSpace> dstColorSpace);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkIDChangeListener.h b/gfx/skia/skia/include/private/SkIDChangeListener.h
new file mode 100644
index 0000000000..a32dae1a5a
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkIDChangeListener.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkIDChangeListener_DEFINED
+#define SkIDChangeListener_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkThreadAnnotations.h"
+
+#include <atomic>
+
+/**
+ * Used to be notified when a gen/unique ID is invalidated, typically to preemptively purge
+ * associated items from a cache that are no longer reachable. The listener can
+ * be marked for deregistration if the cached item is remove before the listener is
+ * triggered. This prevents unbounded listener growth when cache items are routinely
+ * removed before the gen ID/unique ID is invalidated.
+ */
+class SkIDChangeListener : public SkRefCnt {
+public:
+ SkIDChangeListener();
+
+ ~SkIDChangeListener() override;
+
+ virtual void changed() = 0;
+
+ /**
+ * Mark the listener is no longer needed. It should be removed and changed() should not be
+ * called.
+ */
+ void markShouldDeregister() { fShouldDeregister.store(true, std::memory_order_relaxed); }
+
+ /** Indicates whether markShouldDeregister was called. */
+ bool shouldDeregister() { return fShouldDeregister.load(std::memory_order_acquire); }
+
+ /** Manages a list of SkIDChangeListeners. */
+ class List {
+ public:
+ List();
+
+ ~List();
+
+ /**
+ * Add a new listener to the list. It must not already be deregistered. Also clears out
+ * previously deregistered listeners.
+ */
+ void add(sk_sp<SkIDChangeListener> listener) SK_EXCLUDES(fMutex);
+
+ /**
+ * The number of registered listeners (including deregisterd listeners that are yet-to-be
+ * removed.
+ */
+ int count() const SK_EXCLUDES(fMutex);
+
+ /** Calls changed() on all listeners that haven't been deregistered and resets the list. */
+ void changed() SK_EXCLUDES(fMutex);
+
+ /** Resets without calling changed() on the listeners. */
+ void reset() SK_EXCLUDES(fMutex);
+
+ private:
+ mutable SkMutex fMutex;
+ SkSTArray<1, sk_sp<SkIDChangeListener>> fListeners SK_GUARDED_BY(fMutex);
+ };
+
+private:
+ std::atomic<bool> fShouldDeregister;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkJpegGainmapEncoder.h b/gfx/skia/skia/include/private/SkJpegGainmapEncoder.h
new file mode 100644
index 0000000000..756de78b23
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkJpegGainmapEncoder.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2023 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkJpegGainmapEncoder_DEFINED
+#define SkJpegGainmapEncoder_DEFINED
+
+#include "include/encode/SkJpegEncoder.h"
+
+class SkPixmap;
+class SkWStream;
+struct SkGainmapInfo;
+
+class SK_API SkJpegGainmapEncoder {
+public:
+ /**
+ * Encode a JpegR image to |dst|.
+ *
+ * The base image is specified by |base|, and |baseOptions| controls the encoding behavior for
+ * the base image.
+ *
+ * The gainmap image is specified by |gainmap|, and |gainmapOptions| controls the encoding
+ * behavior for the gainmap image.
+ *
+ * The rendering behavior of the gainmap image is provided in |gainmapInfo|. Not all gainmap
+ * based images are compatible with JpegR. If the image is not compatible with JpegR, then
+ * convert the gainmap to a format that is capable with JpegR. This conversion may result in
+ * less precise quantization of the gainmap image.
+ *
+ * Returns true on success. Returns false on an invalid or unsupported |src|.
+ */
+ static bool EncodeJpegR(SkWStream* dst,
+ const SkPixmap& base,
+ const SkJpegEncoder::Options& baseOptions,
+ const SkPixmap& gainmap,
+ const SkJpegEncoder::Options& gainmapOptions,
+ const SkGainmapInfo& gainmapInfo);
+
+ /**
+ * Encode an HDRGM image to |dst|.
+ *
+ * The base image is specified by |base|, and |baseOptions| controls the encoding behavior for
+ * the base image.
+ *
+ * The gainmap image is specified by |gainmap|, and |gainmapOptions| controls the encoding
+ * behavior for the gainmap image.
+ *
+ * The rendering behavior of the gainmap image is provided in |gainmapInfo|.
+ *
+ * If |baseOptions| or |gainmapOptions| specify XMP metadata, then that metadata will be
+ * overwritten.
+ *
+ * Returns true on success. Returns false on an invalid or unsupported |src|.
+ */
+ static bool EncodeHDRGM(SkWStream* dst,
+ const SkPixmap& base,
+ const SkJpegEncoder::Options& baseOptions,
+ const SkPixmap& gainmap,
+ const SkJpegEncoder::Options& gainmapOptions,
+ const SkGainmapInfo& gainmapInfo);
+
+ /**
+ * Write a Multi Picture Format containing the |imageCount| images specified by |images|.
+ */
+ static bool MakeMPF(SkWStream* dst, const SkData** images, size_t imageCount);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkOpts_spi.h b/gfx/skia/skia/include/private/SkOpts_spi.h
new file mode 100644
index 0000000000..6e888b77c8
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkOpts_spi.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOpts_spi_DEFINED
+#define SkOpts_spi_DEFINED
+
+#include "include/private/base/SkAPI.h"
+
+#include <cstddef>
+
+// These are exposed as SK_SPI (e.g. SkParagraph), the rest of SkOpts is
+// declared in src/core
+
+namespace SkOpts {
+ // The fastest high quality 32-bit hash we can provide on this platform.
+ extern uint32_t SK_SPI (*hash_fn)(const void* data, size_t bytes, uint32_t seed);
+} // namespace SkOpts
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkPathRef.h b/gfx/skia/skia/include/private/SkPathRef.h
new file mode 100644
index 0000000000..5e48086d35
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkPathRef.h
@@ -0,0 +1,539 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathRef_DEFINED
+#define SkPathRef_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkIDChangeListener.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTo.h"
+
+#include <atomic>
+#include <cstddef>
+#include <cstdint>
+#include <tuple>
+#include <utility>
+
+class SkMatrix;
+class SkRRect;
+
+// These are computed from a stream of verbs
+struct SkPathVerbAnalysis {
+ bool valid;
+ int points, weights;
+ unsigned segmentMask;
+};
+SkPathVerbAnalysis sk_path_analyze_verbs(const uint8_t verbs[], int count);
+
+
+/**
+ * Holds the path verbs and points. It is versioned by a generation ID. None of its public methods
+ * modify the contents. To modify or append to the verbs/points wrap the SkPathRef in an
+ * SkPathRef::Editor object. Installing the editor resets the generation ID. It also performs
+ * copy-on-write if the SkPathRef is shared by multiple SkPaths. The caller passes the Editor's
+ * constructor a pointer to a sk_sp<SkPathRef>, which may be updated to point to a new SkPathRef
+ * after the editor's constructor returns.
+ *
+ * The points and verbs are stored in a single allocation. The points are at the begining of the
+ * allocation while the verbs are stored at end of the allocation, in reverse order. Thus the points
+ * and verbs both grow into the middle of the allocation until the meet. To access verb i in the
+ * verb array use ref.verbs()[~i] (because verbs() returns a pointer just beyond the first
+ * logical verb or the last verb in memory).
+ */
+
+class SK_API SkPathRef final : public SkNVRefCnt<SkPathRef> {
+public:
+ // See https://bugs.chromium.org/p/skia/issues/detail?id=13817 for how these sizes were
+ // determined.
+ using PointsArray = SkSTArray<4, SkPoint>;
+ using VerbsArray = SkSTArray<4, uint8_t>;
+ using ConicWeightsArray = SkSTArray<2, SkScalar>;
+
+ SkPathRef(PointsArray points, VerbsArray verbs, ConicWeightsArray weights,
+ unsigned segmentMask)
+ : fPoints(std::move(points))
+ , fVerbs(std::move(verbs))
+ , fConicWeights(std::move(weights))
+ {
+ fBoundsIsDirty = true; // this also invalidates fIsFinite
+ fGenerationID = 0; // recompute
+ fSegmentMask = segmentMask;
+ fIsOval = false;
+ fIsRRect = false;
+ // The next two values don't matter unless fIsOval or fIsRRect are true.
+ fRRectOrOvalIsCCW = false;
+ fRRectOrOvalStartIdx = 0xAC;
+ SkDEBUGCODE(fEditorsAttached.store(0);)
+
+ this->computeBounds(); // do this now, before we worry about multiple owners/threads
+ SkDEBUGCODE(this->validate();)
+ }
+
+ class Editor {
+ public:
+ Editor(sk_sp<SkPathRef>* pathRef,
+ int incReserveVerbs = 0,
+ int incReservePoints = 0);
+
+ ~Editor() { SkDEBUGCODE(fPathRef->fEditorsAttached--;) }
+
+ /**
+ * Returns the array of points.
+ */
+ SkPoint* writablePoints() { return fPathRef->getWritablePoints(); }
+ const SkPoint* points() const { return fPathRef->points(); }
+
+ /**
+ * Gets the ith point. Shortcut for this->points() + i
+ */
+ SkPoint* atPoint(int i) { return fPathRef->getWritablePoints() + i; }
+ const SkPoint* atPoint(int i) const { return &fPathRef->fPoints[i]; }
+
+ /**
+ * Adds the verb and allocates space for the number of points indicated by the verb. The
+ * return value is a pointer to where the points for the verb should be written.
+ * 'weight' is only used if 'verb' is kConic_Verb
+ */
+ SkPoint* growForVerb(int /*SkPath::Verb*/ verb, SkScalar weight = 0) {
+ SkDEBUGCODE(fPathRef->validate();)
+ return fPathRef->growForVerb(verb, weight);
+ }
+
+ /**
+ * Allocates space for multiple instances of a particular verb and the
+ * requisite points & weights.
+ * The return pointer points at the first new point (indexed normally [<i>]).
+ * If 'verb' is kConic_Verb, 'weights' will return a pointer to the
+ * space for the conic weights (indexed normally).
+ */
+ SkPoint* growForRepeatedVerb(int /*SkPath::Verb*/ verb,
+ int numVbs,
+ SkScalar** weights = nullptr) {
+ return fPathRef->growForRepeatedVerb(verb, numVbs, weights);
+ }
+
+ /**
+ * Concatenates all verbs from 'path' onto the pathRef's verbs array. Increases the point
+ * count by the number of points in 'path', and the conic weight count by the number of
+ * conics in 'path'.
+ *
+ * Returns pointers to the uninitialized points and conic weights data.
+ */
+ std::tuple<SkPoint*, SkScalar*> growForVerbsInPath(const SkPathRef& path) {
+ return fPathRef->growForVerbsInPath(path);
+ }
+
+ /**
+ * Resets the path ref to a new verb and point count. The new verbs and points are
+ * uninitialized.
+ */
+ void resetToSize(int newVerbCnt, int newPointCnt, int newConicCount) {
+ fPathRef->resetToSize(newVerbCnt, newPointCnt, newConicCount);
+ }
+
+ /**
+ * Gets the path ref that is wrapped in the Editor.
+ */
+ SkPathRef* pathRef() { return fPathRef; }
+
+ void setIsOval(bool isOval, bool isCCW, unsigned start) {
+ fPathRef->setIsOval(isOval, isCCW, start);
+ }
+
+ void setIsRRect(bool isRRect, bool isCCW, unsigned start) {
+ fPathRef->setIsRRect(isRRect, isCCW, start);
+ }
+
+ void setBounds(const SkRect& rect) { fPathRef->setBounds(rect); }
+
+ private:
+ SkPathRef* fPathRef;
+ };
+
+ class SK_API Iter {
+ public:
+ Iter();
+ Iter(const SkPathRef&);
+
+ void setPathRef(const SkPathRef&);
+
+ /** Return the next verb in this iteration of the path. When all
+ segments have been visited, return kDone_Verb.
+
+ If any point in the path is non-finite, return kDone_Verb immediately.
+
+ @param pts The points representing the current verb and/or segment
+ This must not be NULL.
+ @return The verb for the current segment
+ */
+ uint8_t next(SkPoint pts[4]);
+ uint8_t peek() const;
+
+ SkScalar conicWeight() const { return *fConicWeights; }
+
+ private:
+ const SkPoint* fPts;
+ const uint8_t* fVerbs;
+ const uint8_t* fVerbStop;
+ const SkScalar* fConicWeights;
+ };
+
+public:
+ /**
+ * Gets a path ref with no verbs or points.
+ */
+ static SkPathRef* CreateEmpty();
+
+ /**
+ * Returns true if all of the points in this path are finite, meaning there
+ * are no infinities and no NaNs.
+ */
+ bool isFinite() const {
+ if (fBoundsIsDirty) {
+ this->computeBounds();
+ }
+ return SkToBool(fIsFinite);
+ }
+
+ /**
+ * Returns a mask, where each bit corresponding to a SegmentMask is
+ * set if the path contains 1 or more segments of that type.
+ * Returns 0 for an empty path (no segments).
+ */
+ uint32_t getSegmentMasks() const { return fSegmentMask; }
+
+ /** Returns true if the path is an oval.
+ *
+ * @param rect returns the bounding rect of this oval. It's a circle
+ * if the height and width are the same.
+ * @param isCCW is the oval CCW (or CW if false).
+ * @param start indicates where the contour starts on the oval (see
+ * SkPath::addOval for intepretation of the index).
+ *
+ * @return true if this path is an oval.
+ * Tracking whether a path is an oval is considered an
+ * optimization for performance and so some paths that are in
+ * fact ovals can report false.
+ */
+ bool isOval(SkRect* rect, bool* isCCW, unsigned* start) const {
+ if (fIsOval) {
+ if (rect) {
+ *rect = this->getBounds();
+ }
+ if (isCCW) {
+ *isCCW = SkToBool(fRRectOrOvalIsCCW);
+ }
+ if (start) {
+ *start = fRRectOrOvalStartIdx;
+ }
+ }
+
+ return SkToBool(fIsOval);
+ }
+
+ bool isRRect(SkRRect* rrect, bool* isCCW, unsigned* start) const;
+
+ bool hasComputedBounds() const {
+ return !fBoundsIsDirty;
+ }
+
+ /** Returns the bounds of the path's points. If the path contains 0 or 1
+ points, the bounds is set to (0,0,0,0), and isEmpty() will return true.
+ Note: this bounds may be larger than the actual shape, since curves
+ do not extend as far as their control points.
+ */
+ const SkRect& getBounds() const {
+ if (fBoundsIsDirty) {
+ this->computeBounds();
+ }
+ return fBounds;
+ }
+
+ SkRRect getRRect() const;
+
+ /**
+ * Transforms a path ref by a matrix, allocating a new one only if necessary.
+ */
+ static void CreateTransformedCopy(sk_sp<SkPathRef>* dst,
+ const SkPathRef& src,
+ const SkMatrix& matrix);
+
+ // static SkPathRef* CreateFromBuffer(SkRBuffer* buffer);
+
+ /**
+ * Rollsback a path ref to zero verbs and points with the assumption that the path ref will be
+ * repopulated with approximately the same number of verbs and points. A new path ref is created
+ * only if necessary.
+ */
+ static void Rewind(sk_sp<SkPathRef>* pathRef);
+
+ ~SkPathRef();
+ int countPoints() const { return fPoints.size(); }
+ int countVerbs() const { return fVerbs.size(); }
+ int countWeights() const { return fConicWeights.size(); }
+
+ size_t approximateBytesUsed() const;
+
+ /**
+ * Returns a pointer one beyond the first logical verb (last verb in memory order).
+ */
+ const uint8_t* verbsBegin() const { return fVerbs.begin(); }
+
+ /**
+ * Returns a const pointer to the first verb in memory (which is the last logical verb).
+ */
+ const uint8_t* verbsEnd() const { return fVerbs.end(); }
+
+ /**
+ * Returns a const pointer to the first point.
+ */
+ const SkPoint* points() const { return fPoints.begin(); }
+
+ /**
+ * Shortcut for this->points() + this->countPoints()
+ */
+ const SkPoint* pointsEnd() const { return this->points() + this->countPoints(); }
+
+ const SkScalar* conicWeights() const { return fConicWeights.begin(); }
+ const SkScalar* conicWeightsEnd() const { return fConicWeights.end(); }
+
+ /**
+ * Convenience methods for getting to a verb or point by index.
+ */
+ uint8_t atVerb(int index) const { return fVerbs[index]; }
+ const SkPoint& atPoint(int index) const { return fPoints[index]; }
+
+ bool operator== (const SkPathRef& ref) const;
+
+ void interpolate(const SkPathRef& ending, SkScalar weight, SkPathRef* out) const;
+
+ /**
+ * Gets an ID that uniquely identifies the contents of the path ref. If two path refs have the
+ * same ID then they have the same verbs and points. However, two path refs may have the same
+ * contents but different genIDs.
+ * skbug.com/1762 for background on why fillType is necessary (for now).
+ */
+ uint32_t genID(uint8_t fillType) const;
+
+ void addGenIDChangeListener(sk_sp<SkIDChangeListener>); // Threadsafe.
+ int genIDChangeListenerCount(); // Threadsafe
+
+ bool dataMatchesVerbs() const;
+ bool isValid() const;
+ SkDEBUGCODE(void validate() const { SkASSERT(this->isValid()); } )
+
+ /**
+ * Resets this SkPathRef to a clean state.
+ */
+ void reset();
+
+ bool isInitialEmptyPathRef() const {
+ return fGenerationID == kEmptyGenID;
+ }
+
+private:
+ enum SerializationOffsets {
+ kLegacyRRectOrOvalStartIdx_SerializationShift = 28, // requires 3 bits, ignored.
+ kLegacyRRectOrOvalIsCCW_SerializationShift = 27, // requires 1 bit, ignored.
+ kLegacyIsRRect_SerializationShift = 26, // requires 1 bit, ignored.
+ kIsFinite_SerializationShift = 25, // requires 1 bit
+ kLegacyIsOval_SerializationShift = 24, // requires 1 bit, ignored.
+ kSegmentMask_SerializationShift = 0 // requires 4 bits (deprecated)
+ };
+
+ SkPathRef(int numVerbs = 0, int numPoints = 0) {
+ fBoundsIsDirty = true; // this also invalidates fIsFinite
+ fGenerationID = kEmptyGenID;
+ fSegmentMask = 0;
+ fIsOval = false;
+ fIsRRect = false;
+ // The next two values don't matter unless fIsOval or fIsRRect are true.
+ fRRectOrOvalIsCCW = false;
+ fRRectOrOvalStartIdx = 0xAC;
+ if (numPoints > 0)
+ fPoints.reserve_back(numPoints);
+ if (numVerbs > 0)
+ fVerbs.reserve_back(numVerbs);
+ SkDEBUGCODE(fEditorsAttached.store(0);)
+ SkDEBUGCODE(this->validate();)
+ }
+
+ void copy(const SkPathRef& ref, int additionalReserveVerbs, int additionalReservePoints);
+
+ // Return true if the computed bounds are finite.
+ static bool ComputePtBounds(SkRect* bounds, const SkPathRef& ref) {
+ return bounds->setBoundsCheck(ref.points(), ref.countPoints());
+ }
+
+ // called, if dirty, by getBounds()
+ void computeBounds() const {
+ SkDEBUGCODE(this->validate();)
+ // TODO(mtklein): remove fBoundsIsDirty and fIsFinite,
+ // using an inverted rect instead of fBoundsIsDirty and always recalculating fIsFinite.
+ SkASSERT(fBoundsIsDirty);
+
+ fIsFinite = ComputePtBounds(&fBounds, *this);
+ fBoundsIsDirty = false;
+ }
+
+ void setBounds(const SkRect& rect) {
+ SkASSERT(rect.fLeft <= rect.fRight && rect.fTop <= rect.fBottom);
+ fBounds = rect;
+ fBoundsIsDirty = false;
+ fIsFinite = fBounds.isFinite();
+ }
+
+ /** Makes additional room but does not change the counts or change the genID */
+ void incReserve(int additionalVerbs, int additionalPoints) {
+ SkDEBUGCODE(this->validate();)
+ // Use reserve() so that if there is not enough space, the array will grow with some
+ // additional space. This ensures repeated calls to grow won't always allocate.
+ if (additionalPoints > 0)
+ fPoints.reserve(fPoints.size() + additionalPoints);
+ if (additionalVerbs > 0)
+ fVerbs.reserve(fVerbs.size() + additionalVerbs);
+ SkDEBUGCODE(this->validate();)
+ }
+
+ /**
+ * Resets all state except that of the verbs, points, and conic-weights.
+ * Intended to be called from other functions that reset state.
+ */
+ void commonReset() {
+ SkDEBUGCODE(this->validate();)
+ this->callGenIDChangeListeners();
+ fBoundsIsDirty = true; // this also invalidates fIsFinite
+ fGenerationID = 0;
+
+ fSegmentMask = 0;
+ fIsOval = false;
+ fIsRRect = false;
+ }
+
+ /** Resets the path ref with verbCount verbs and pointCount points, all uninitialized. Also
+ * allocates space for reserveVerb additional verbs and reservePoints additional points.*/
+ void resetToSize(int verbCount, int pointCount, int conicCount,
+ int reserveVerbs = 0, int reservePoints = 0) {
+ commonReset();
+ // Use reserve_back() so the arrays are sized to exactly fit the data.
+ const int pointDelta = pointCount + reservePoints - fPoints.size();
+ if (pointDelta > 0) {
+ fPoints.reserve_back(pointDelta);
+ }
+ fPoints.resize_back(pointCount);
+ const int verbDelta = verbCount + reserveVerbs - fVerbs.size();
+ if (verbDelta > 0) {
+ fVerbs.reserve_back(verbDelta);
+ }
+ fVerbs.resize_back(verbCount);
+ fConicWeights.resize_back(conicCount);
+ SkDEBUGCODE(this->validate();)
+ }
+
+ /**
+ * Increases the verb count by numVbs and point count by the required amount.
+ * The new points are uninitialized. All the new verbs are set to the specified
+ * verb. If 'verb' is kConic_Verb, 'weights' will return a pointer to the
+ * uninitialized conic weights.
+ */
+ SkPoint* growForRepeatedVerb(int /*SkPath::Verb*/ verb, int numVbs, SkScalar** weights);
+
+ /**
+ * Increases the verb count 1, records the new verb, and creates room for the requisite number
+ * of additional points. A pointer to the first point is returned. Any new points are
+ * uninitialized.
+ */
+ SkPoint* growForVerb(int /*SkPath::Verb*/ verb, SkScalar weight);
+
+ /**
+ * Concatenates all verbs from 'path' onto our own verbs array. Increases the point count by the
+ * number of points in 'path', and the conic weight count by the number of conics in 'path'.
+ *
+ * Returns pointers to the uninitialized points and conic weights data.
+ */
+ std::tuple<SkPoint*, SkScalar*> growForVerbsInPath(const SkPathRef& path);
+
+ /**
+ * Private, non-const-ptr version of the public function verbsMemBegin().
+ */
+ uint8_t* verbsBeginWritable() { return fVerbs.begin(); }
+
+ /**
+ * Called the first time someone calls CreateEmpty to actually create the singleton.
+ */
+ friend SkPathRef* sk_create_empty_pathref();
+
+ void setIsOval(bool isOval, bool isCCW, unsigned start) {
+ fIsOval = isOval;
+ fRRectOrOvalIsCCW = isCCW;
+ fRRectOrOvalStartIdx = SkToU8(start);
+ }
+
+ void setIsRRect(bool isRRect, bool isCCW, unsigned start) {
+ fIsRRect = isRRect;
+ fRRectOrOvalIsCCW = isCCW;
+ fRRectOrOvalStartIdx = SkToU8(start);
+ }
+
+ // called only by the editor. Note that this is not a const function.
+ SkPoint* getWritablePoints() {
+ SkDEBUGCODE(this->validate();)
+ fIsOval = false;
+ fIsRRect = false;
+ return fPoints.begin();
+ }
+
+ const SkPoint* getPoints() const {
+ SkDEBUGCODE(this->validate();)
+ return fPoints.begin();
+ }
+
+ void callGenIDChangeListeners();
+
+ enum {
+ kMinSize = 256,
+ };
+
+ mutable SkRect fBounds;
+
+ PointsArray fPoints;
+ VerbsArray fVerbs;
+ ConicWeightsArray fConicWeights;
+
+ enum {
+ kEmptyGenID = 1, // GenID reserved for path ref with zero points and zero verbs.
+ };
+ mutable uint32_t fGenerationID;
+ SkDEBUGCODE(std::atomic<int> fEditorsAttached;) // assert only one editor in use at any time.
+
+ SkIDChangeListener::List fGenIDChangeListeners;
+
+ mutable uint8_t fBoundsIsDirty;
+ mutable bool fIsFinite; // only meaningful if bounds are valid
+
+ bool fIsOval;
+ bool fIsRRect;
+ // Both the circle and rrect special cases have a notion of direction and starting point
+ // The next two variables store that information for either.
+ bool fRRectOrOvalIsCCW;
+ uint8_t fRRectOrOvalStartIdx;
+ uint8_t fSegmentMask;
+
+ friend class PathRefTest_Private;
+ friend class ForceIsRRect_Private; // unit test isRRect
+ friend class SkPath;
+ friend class SkPathBuilder;
+ friend class SkPathPriv;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSLDefines.h b/gfx/skia/skia/include/private/SkSLDefines.h
new file mode 100644
index 0000000000..a258054229
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSLDefines.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DEFINES
+#define SKSL_DEFINES
+
+#include <cstdint>
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTArray.h"
+
+using SKSL_INT = int64_t;
+using SKSL_FLOAT = float;
+
+namespace SkSL {
+
+class Expression;
+class Statement;
+
+using ComponentArray = SkSTArray<4, int8_t>; // for Swizzles
+
+class ExpressionArray : public SkSTArray<2, std::unique_ptr<Expression>> {
+public:
+ using SkSTArray::SkSTArray;
+
+ /** Returns a new ExpressionArray containing a clone of every element. */
+ ExpressionArray clone() const;
+};
+
+using StatementArray = SkSTArray<2, std::unique_ptr<Statement>>;
+
+// Functions larger than this (measured in IR nodes) will not be inlined. This growth factor
+// accounts for the number of calls being inlined--i.e., a function called five times (that is, with
+// five inlining opportunities) would be considered 5x larger than if it were called once. This
+// default threshold value is arbitrary, but tends to work well in practice.
+static constexpr int kDefaultInlineThreshold = 50;
+
+// A hard upper limit on the number of variable slots allowed in a function/global scope.
+// This is an arbitrary limit, but is needed to prevent code generation from taking unbounded
+// amounts of time or space.
+static constexpr int kVariableSlotLimit = 100000;
+
+// The SwizzleComponent namespace is used both by the SkSL::Swizzle expression, and the DSL swizzle.
+// This namespace is injected into SkSL::dsl so that `using namespace SkSL::dsl` enables DSL code
+// like `Swizzle(var, X, Y, ONE)` to compile without any extra qualifications.
+namespace SwizzleComponent {
+
+enum Type : int8_t {
+ X = 0, Y = 1, Z = 2, W = 3,
+ R = 4, G = 5, B = 6, A = 7,
+ S = 8, T = 9, P = 10, Q = 11,
+ UL = 12, UT = 13, UR = 14, UB = 15,
+ ZERO,
+ ONE
+};
+
+} // namespace SwizzleComponent
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSLIRNode.h b/gfx/skia/skia/include/private/SkSLIRNode.h
new file mode 100644
index 0000000000..8fb4279b76
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSLIRNode.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_IRNODE
+#define SKSL_IRNODE
+
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/SkSLPool.h"
+
+#include <string>
+
+namespace SkSL {
+
+// The fKind field of IRNode could contain any of these values.
+enum class ProgramElementKind {
+ kExtension = 0,
+ kFunction,
+ kFunctionPrototype,
+ kGlobalVar,
+ kInterfaceBlock,
+ kModifiers,
+ kStructDefinition,
+
+ kFirst = kExtension,
+ kLast = kStructDefinition
+};
+
+enum class SymbolKind {
+ kExternal = (int) ProgramElementKind::kLast + 1,
+ kField,
+ kFunctionDeclaration,
+ kType,
+ kVariable,
+
+ kFirst = kExternal,
+ kLast = kVariable
+};
+
+enum class StatementKind {
+ kBlock = (int) SymbolKind::kLast + 1,
+ kBreak,
+ kContinue,
+ kDiscard,
+ kDo,
+ kExpression,
+ kFor,
+ kIf,
+ kNop,
+ kReturn,
+ kSwitch,
+ kSwitchCase,
+ kVarDeclaration,
+
+ kFirst = kBlock,
+ kLast = kVarDeclaration,
+};
+
+enum class ExpressionKind {
+ kBinary = (int) StatementKind::kLast + 1,
+ kChildCall,
+ kConstructorArray,
+ kConstructorArrayCast,
+ kConstructorCompound,
+ kConstructorCompoundCast,
+ kConstructorDiagonalMatrix,
+ kConstructorMatrixResize,
+ kConstructorScalarCast,
+ kConstructorSplat,
+ kConstructorStruct,
+ kFieldAccess,
+ kFunctionReference,
+ kFunctionCall,
+ kIndex,
+ kLiteral,
+ kMethodReference,
+ kPoison,
+ kPostfix,
+ kPrefix,
+ kSetting,
+ kSwizzle,
+ kTernary,
+ kTypeReference,
+ kVariableReference,
+
+ kFirst = kBinary,
+ kLast = kVariableReference
+};
+
+/**
+ * Represents a node in the intermediate representation (IR) tree. The IR is a fully-resolved
+ * version of the program (all types determined, everything validated), ready for code generation.
+ */
+class IRNode : public Poolable {
+public:
+ virtual ~IRNode() {}
+
+ virtual std::string description() const = 0;
+
+ // No copy construction or assignment
+ IRNode(const IRNode&) = delete;
+ IRNode& operator=(const IRNode&) = delete;
+
+ // position of this element within the program being compiled, for error reporting purposes
+ Position fPosition;
+
+ /**
+ * Use is<T> to check the type of an IRNode.
+ * e.g. replace `s.kind() == Statement::Kind::kReturn` with `s.is<ReturnStatement>()`.
+ */
+ template <typename T>
+ bool is() const {
+ return this->fKind == (int)T::kIRNodeKind;
+ }
+
+ /**
+ * Use as<T> to downcast IRNodes.
+ * e.g. replace `(ReturnStatement&) s` with `s.as<ReturnStatement>()`.
+ */
+ template <typename T>
+ const T& as() const {
+ SkASSERT(this->is<T>());
+ return static_cast<const T&>(*this);
+ }
+
+ template <typename T>
+ T& as() {
+ SkASSERT(this->is<T>());
+ return static_cast<T&>(*this);
+ }
+
+protected:
+ IRNode(Position position, int kind)
+ : fPosition(position)
+ , fKind(kind) {}
+
+ int fKind;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSLLayout.h b/gfx/skia/skia/include/private/SkSLLayout.h
new file mode 100644
index 0000000000..a99f18a477
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSLLayout.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_LAYOUT
+#define SKSL_LAYOUT
+
+#include <string>
+
+namespace SkSL {
+
+/**
+ * Represents a layout block appearing before a variable declaration, as in:
+ *
+ * layout (location = 0) int x;
+ */
+struct Layout {
+ enum Flag {
+ kOriginUpperLeft_Flag = 1 << 0,
+ kPushConstant_Flag = 1 << 1,
+ kBlendSupportAllEquations_Flag = 1 << 2,
+ kColor_Flag = 1 << 3,
+
+ // These flags indicate if the qualifier appeared, regardless of the accompanying value.
+ kLocation_Flag = 1 << 4,
+ kOffset_Flag = 1 << 5,
+ kBinding_Flag = 1 << 6,
+ kTexture_Flag = 1 << 7,
+ kSampler_Flag = 1 << 8,
+ kIndex_Flag = 1 << 9,
+ kSet_Flag = 1 << 10,
+ kBuiltin_Flag = 1 << 11,
+ kInputAttachmentIndex_Flag = 1 << 12,
+
+ // These flags indicate the backend type; only one at most can be set.
+ kSPIRV_Flag = 1 << 13,
+ kMetal_Flag = 1 << 14,
+ kGL_Flag = 1 << 15,
+ kWGSL_Flag = 1 << 16,
+ };
+
+ static constexpr int kAllBackendFlagsMask =
+ Layout::kSPIRV_Flag | Layout::kMetal_Flag | Layout::kGL_Flag | Layout::kWGSL_Flag;
+
+ Layout(int flags, int location, int offset, int binding, int index, int set, int builtin,
+ int inputAttachmentIndex)
+ : fFlags(flags)
+ , fLocation(location)
+ , fOffset(offset)
+ , fBinding(binding)
+ , fIndex(index)
+ , fSet(set)
+ , fBuiltin(builtin)
+ , fInputAttachmentIndex(inputAttachmentIndex) {}
+
+ Layout() = default;
+
+ static Layout builtin(int builtin) {
+ Layout result;
+ result.fBuiltin = builtin;
+ return result;
+ }
+
+ std::string description() const;
+
+ bool operator==(const Layout& other) const;
+
+ bool operator!=(const Layout& other) const {
+ return !(*this == other);
+ }
+
+ int fFlags = 0;
+ int fLocation = -1;
+ int fOffset = -1;
+ int fBinding = -1;
+ int fTexture = -1;
+ int fSampler = -1;
+ int fIndex = -1;
+ int fSet = -1;
+ // builtin comes from SPIR-V and identifies which particular builtin value this object
+ // represents.
+ int fBuiltin = -1;
+ // input_attachment_index comes from Vulkan/SPIR-V to connect a shader variable to the a
+ // corresponding attachment on the subpass in which the shader is being used.
+ int fInputAttachmentIndex = -1;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSLModifiers.h b/gfx/skia/skia/include/private/SkSLModifiers.h
new file mode 100644
index 0000000000..7e8efddf19
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSLModifiers.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_MODIFIERS
+#define SKSL_MODIFIERS
+
+#include "include/private/SkSLLayout.h"
+
+#include <cstddef>
+#include <memory>
+#include <string>
+
+namespace SkSL {
+
+class Context;
+class Position;
+
+/**
+ * A set of modifier keywords (in, out, uniform, etc.) appearing before a declaration.
+ */
+struct Modifiers {
+ /**
+ * OpenGL requires modifiers to be in a strict order:
+ * - invariant-qualifier: (invariant)
+ * - interpolation-qualifier: flat, noperspective, (smooth)
+ * - storage-qualifier: const, uniform
+ * - parameter-qualifier: in, out, inout
+ * - precision-qualifier: highp, mediump, lowp
+ *
+ * SkSL does not have `invariant` or `smooth`.
+ */
+
+ enum Flag {
+ kNo_Flag = 0,
+ // Real GLSL modifiers
+ kFlat_Flag = 1 << 0,
+ kNoPerspective_Flag = 1 << 1,
+ kConst_Flag = 1 << 2,
+ kUniform_Flag = 1 << 3,
+ kIn_Flag = 1 << 4,
+ kOut_Flag = 1 << 5,
+ kHighp_Flag = 1 << 6,
+ kMediump_Flag = 1 << 7,
+ kLowp_Flag = 1 << 8,
+ kReadOnly_Flag = 1 << 9,
+ kWriteOnly_Flag = 1 << 10,
+ kBuffer_Flag = 1 << 11,
+ // Corresponds to the GLSL 'shared' modifier. Only allowed in a compute program.
+ kWorkgroup_Flag = 1 << 12,
+ // SkSL extensions, not present in GLSL
+ kExport_Flag = 1 << 13,
+ kES3_Flag = 1 << 14,
+ kPure_Flag = 1 << 15,
+ kInline_Flag = 1 << 16,
+ kNoInline_Flag = 1 << 17,
+ };
+
+ Modifiers()
+ : fLayout(Layout())
+ , fFlags(0) {}
+
+ Modifiers(const Layout& layout, int flags)
+ : fLayout(layout)
+ , fFlags(flags) {}
+
+ std::string description() const {
+ return fLayout.description() + DescribeFlags(fFlags) + " ";
+ }
+
+ static std::string DescribeFlags(int flags) {
+ // SkSL extensions
+ std::string result;
+ if (flags & kExport_Flag) {
+ result += "$export ";
+ }
+ if (flags & kES3_Flag) {
+ result += "$es3 ";
+ }
+ if (flags & kPure_Flag) {
+ result += "$pure ";
+ }
+ if (flags & kInline_Flag) {
+ result += "inline ";
+ }
+ if (flags & kNoInline_Flag) {
+ result += "noinline ";
+ }
+
+ // Real GLSL qualifiers (must be specified in order in GLSL 4.1 and below)
+ if (flags & kFlat_Flag) {
+ result += "flat ";
+ }
+ if (flags & kNoPerspective_Flag) {
+ result += "noperspective ";
+ }
+ if (flags & kConst_Flag) {
+ result += "const ";
+ }
+ if (flags & kUniform_Flag) {
+ result += "uniform ";
+ }
+ if ((flags & kIn_Flag) && (flags & kOut_Flag)) {
+ result += "inout ";
+ } else if (flags & kIn_Flag) {
+ result += "in ";
+ } else if (flags & kOut_Flag) {
+ result += "out ";
+ }
+ if (flags & kHighp_Flag) {
+ result += "highp ";
+ }
+ if (flags & kMediump_Flag) {
+ result += "mediump ";
+ }
+ if (flags & kLowp_Flag) {
+ result += "lowp ";
+ }
+ if (flags & kReadOnly_Flag) {
+ result += "readonly ";
+ }
+ if (flags & kWriteOnly_Flag) {
+ result += "writeonly ";
+ }
+ if (flags & kBuffer_Flag) {
+ result += "buffer ";
+ }
+
+ // We're using a non-GLSL name for this one; the GLSL equivalent is "shared"
+ if (flags & kWorkgroup_Flag) {
+ result += "workgroup ";
+ }
+
+ if (!result.empty()) {
+ result.pop_back();
+ }
+ return result;
+ }
+
+ bool operator==(const Modifiers& other) const {
+ return fLayout == other.fLayout && fFlags == other.fFlags;
+ }
+
+ bool operator!=(const Modifiers& other) const {
+ return !(*this == other);
+ }
+
+ /**
+ * Verifies that only permitted modifiers and layout flags are included. Reports errors and
+ * returns false in the event of a violation.
+ */
+ bool checkPermitted(const Context& context,
+ Position pos,
+ int permittedModifierFlags,
+ int permittedLayoutFlags) const;
+
+ Layout fLayout;
+ int fFlags;
+};
+
+} // namespace SkSL
+
+namespace std {
+
+template <>
+struct hash<SkSL::Modifiers> {
+ size_t operator()(const SkSL::Modifiers& key) const {
+ return (size_t) key.fFlags ^ ((size_t) key.fLayout.fFlags << 8) ^
+ ((size_t) key.fLayout.fBuiltin << 16);
+ }
+};
+
+} // namespace std
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSLProgramElement.h b/gfx/skia/skia/include/private/SkSLProgramElement.h
new file mode 100644
index 0000000000..34d57bcdf8
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSLProgramElement.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_PROGRAMELEMENT
+#define SKSL_PROGRAMELEMENT
+
+#include "include/private/SkSLIRNode.h"
+
+#include <memory>
+
+namespace SkSL {
+
+/**
+ * Represents a top-level element (e.g. function or global variable) in a program.
+ */
+class ProgramElement : public IRNode {
+public:
+ using Kind = ProgramElementKind;
+
+ ProgramElement(Position pos, Kind kind)
+ : INHERITED(pos, (int) kind) {
+ SkASSERT(kind >= Kind::kFirst && kind <= Kind::kLast);
+ }
+
+ Kind kind() const {
+ return (Kind) fKind;
+ }
+
+ virtual std::unique_ptr<ProgramElement> clone() const = 0;
+
+private:
+ using INHERITED = IRNode;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSLProgramKind.h b/gfx/skia/skia/include/private/SkSLProgramKind.h
new file mode 100644
index 0000000000..f2355bd7d8
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSLProgramKind.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSLProgramKind_DEFINED
+#define SkSLProgramKind_DEFINED
+
+#include <cinttypes>
+
+namespace SkSL {
+
+/**
+ * SkSL supports several different program kinds.
+ */
+enum class ProgramKind : int8_t {
+ kFragment,
+ kVertex,
+ kCompute,
+ kGraphiteFragment,
+ kGraphiteVertex,
+ kRuntimeColorFilter, // Runtime effect only suitable as SkColorFilter
+ kRuntimeShader, // " " " " " SkShader
+ kRuntimeBlender, // " " " " " SkBlender
+ kPrivateRuntimeColorFilter, // Runtime color filter with public restrictions lifted
+ kPrivateRuntimeShader, // Runtime shader " " " "
+ kPrivateRuntimeBlender, // Runtime blender " " " "
+ kMeshVertex, // Vertex portion of a custom mesh
+ kMeshFragment, // Fragment " " " " "
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSLSampleUsage.h b/gfx/skia/skia/include/private/SkSLSampleUsage.h
new file mode 100644
index 0000000000..39d9e25818
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSLSampleUsage.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSLSampleUsage_DEFINED
+#define SkSLSampleUsage_DEFINED
+
+#include "include/core/SkTypes.h"
+
+namespace SkSL {
+
+/**
+ * Represents all of the ways that a fragment processor is sampled by its parent.
+ */
+class SampleUsage {
+public:
+ enum class Kind {
+ // Child is never sampled
+ kNone,
+ // Child is only sampled at the same coordinates as the parent
+ kPassThrough,
+ // Child is sampled with a matrix whose value is uniform
+ kUniformMatrix,
+ // Child is sampled with sk_FragCoord.xy
+ kFragCoord,
+ // Child is sampled using explicit coordinates
+ kExplicit,
+ };
+
+ // Make a SampleUsage that corresponds to no sampling of the child at all
+ SampleUsage() = default;
+
+ SampleUsage(Kind kind, bool hasPerspective) : fKind(kind), fHasPerspective(hasPerspective) {
+ if (kind != Kind::kUniformMatrix) {
+ SkASSERT(!fHasPerspective);
+ }
+ }
+
+ // Child is sampled with a matrix whose value is uniform. The name is fixed.
+ static SampleUsage UniformMatrix(bool hasPerspective) {
+ return SampleUsage(Kind::kUniformMatrix, hasPerspective);
+ }
+
+ static SampleUsage Explicit() {
+ return SampleUsage(Kind::kExplicit, false);
+ }
+
+ static SampleUsage PassThrough() {
+ return SampleUsage(Kind::kPassThrough, false);
+ }
+
+ static SampleUsage FragCoord() { return SampleUsage(Kind::kFragCoord, false); }
+
+ bool operator==(const SampleUsage& that) const {
+ return fKind == that.fKind && fHasPerspective == that.fHasPerspective;
+ }
+
+ bool operator!=(const SampleUsage& that) const { return !(*this == that); }
+
+ // Arbitrary name used by all uniform sampling matrices
+ static const char* MatrixUniformName() { return "matrix"; }
+
+ SampleUsage merge(const SampleUsage& other);
+
+ Kind kind() const { return fKind; }
+
+ bool hasPerspective() const { return fHasPerspective; }
+
+ bool isSampled() const { return fKind != Kind::kNone; }
+ bool isPassThrough() const { return fKind == Kind::kPassThrough; }
+ bool isExplicit() const { return fKind == Kind::kExplicit; }
+ bool isUniformMatrix() const { return fKind == Kind::kUniformMatrix; }
+ bool isFragCoord() const { return fKind == Kind::kFragCoord; }
+
+private:
+ Kind fKind = Kind::kNone;
+ bool fHasPerspective = false; // Only valid if fKind is kUniformMatrix
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSLStatement.h b/gfx/skia/skia/include/private/SkSLStatement.h
new file mode 100644
index 0000000000..3e5f084c75
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSLStatement.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_STATEMENT
+#define SKSL_STATEMENT
+
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLSymbol.h"
+
+namespace SkSL {
+
+/**
+ * Abstract supertype of all statements.
+ */
+class Statement : public IRNode {
+public:
+ using Kind = StatementKind;
+
+ Statement(Position pos, Kind kind)
+ : INHERITED(pos, (int) kind) {
+ SkASSERT(kind >= Kind::kFirst && kind <= Kind::kLast);
+ }
+
+ Kind kind() const {
+ return (Kind) fKind;
+ }
+
+ virtual bool isEmpty() const {
+ return false;
+ }
+
+ virtual std::unique_ptr<Statement> clone() const = 0;
+
+private:
+ using INHERITED = IRNode;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSLString.h b/gfx/skia/skia/include/private/SkSLString.h
new file mode 100644
index 0000000000..f8f3768ca8
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSLString.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_STRING
+#define SKSL_STRING
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+
+#include <stdarg.h>
+#include <string>
+#include <string_view>
+
+namespace SkSL {
+
+bool stod(std::string_view s, SKSL_FLOAT* value);
+bool stoi(std::string_view s, SKSL_INT* value);
+
+namespace String {
+
+std::string printf(const char* fmt, ...) SK_PRINTF_LIKE(1, 2);
+void appendf(std::string* str, const char* fmt, ...) SK_PRINTF_LIKE(2, 3);
+void vappendf(std::string* str, const char* fmt, va_list va) SK_PRINTF_LIKE(2, 0);
+
+inline auto Separator() {
+ // This returns a lambda which emits "" the first time it is called, and ", " every subsequent
+ // time it is called.
+ struct Output {
+ const std::string fSpace, fComma;
+ };
+ static const Output* kOutput = new Output{{}, {", "}};
+
+ return [firstSeparator = true]() mutable -> const std::string& {
+ if (firstSeparator) {
+ firstSeparator = false;
+ return kOutput->fSpace;
+ } else {
+ return kOutput->fComma;
+ }
+ };
+}
+
+} // namespace String
+} // namespace SkSL
+
+namespace skstd {
+
+// We use a custom to_string(float|double) which ignores locale settings and writes `1.0` instead
+// of `1.00000`.
+std::string to_string(float value);
+std::string to_string(double value);
+
+} // namespace skstd
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSLSymbol.h b/gfx/skia/skia/include/private/SkSLSymbol.h
new file mode 100644
index 0000000000..a5b563c5c7
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSLSymbol.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_SYMBOL
+#define SKSL_SYMBOL
+
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLProgramElement.h"
+
+namespace SkSL {
+
+class Type;
+
+/**
+ * Represents a symboltable entry.
+ */
+class Symbol : public IRNode {
+public:
+ using Kind = SymbolKind;
+
+ Symbol(Position pos, Kind kind, std::string_view name, const Type* type = nullptr)
+ : INHERITED(pos, (int) kind)
+ , fName(name)
+ , fType(type) {
+ SkASSERT(kind >= Kind::kFirst && kind <= Kind::kLast);
+ }
+
+ ~Symbol() override {}
+
+ const Type& type() const {
+ SkASSERT(fType);
+ return *fType;
+ }
+
+ Kind kind() const {
+ return (Kind) fKind;
+ }
+
+ std::string_view name() const {
+ return fName;
+ }
+
+ /**
+ * Don't call this directly--use SymbolTable::renameSymbol instead!
+ */
+ void setName(std::string_view newName) {
+ fName = newName;
+ }
+
+private:
+ std::string_view fName;
+ const Type* fType;
+
+ using INHERITED = IRNode;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkShadowFlags.h b/gfx/skia/skia/include/private/SkShadowFlags.h
new file mode 100644
index 0000000000..99ed6cb8a0
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkShadowFlags.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkShadowFlags_DEFINED
+#define SkShadowFlags_DEFINED
+
+// A set of flags shared between the SkAmbientShadowMaskFilter and the SkSpotShadowMaskFilter
+enum SkShadowFlags {
+ kNone_ShadowFlag = 0x00,
+ /** The occluding object is not opaque. Knowing that the occluder is opaque allows
+ * us to cull shadow geometry behind it and improve performance. */
+ kTransparentOccluder_ShadowFlag = 0x01,
+ /** Don't try to use analytic shadows. */
+ kGeometricOnly_ShadowFlag = 0x02,
+ /** Light position represents a direction, light radius is blur radius at elevation 1 */
+ kDirectionalLight_ShadowFlag = 0x04,
+ /** Concave paths will only use blur to generate the shadow */
+ kConcaveBlurOnly_ShadowFlag = 0x08,
+ /** mask for all shadow flags */
+ kAll_ShadowFlag = 0x0F
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/SkSpinlock.h b/gfx/skia/skia/include/private/SkSpinlock.h
new file mode 100644
index 0000000000..3816dc9dff
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkSpinlock.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSpinlock_DEFINED
+#define SkSpinlock_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkThreadAnnotations.h"
+#include <atomic>
+
+class SK_CAPABILITY("mutex") SkSpinlock {
+public:
+ constexpr SkSpinlock() = default;
+
+ void acquire() SK_ACQUIRE() {
+ // To act as a mutex, we need an acquire barrier when we acquire the lock.
+ if (fLocked.exchange(true, std::memory_order_acquire)) {
+ // Lock was contended. Fall back to an out-of-line spin loop.
+ this->contendedAcquire();
+ }
+ }
+
+ // Acquire the lock or fail (quickly). Lets the caller decide to do something other than wait.
+ bool tryAcquire() SK_TRY_ACQUIRE(true) {
+ // To act as a mutex, we need an acquire barrier when we acquire the lock.
+ if (fLocked.exchange(true, std::memory_order_acquire)) {
+ // Lock was contended. Let the caller decide what to do.
+ return false;
+ }
+ return true;
+ }
+
+ void release() SK_RELEASE_CAPABILITY() {
+ // To act as a mutex, we need a release barrier when we release the lock.
+ fLocked.store(false, std::memory_order_release);
+ }
+
+private:
+ SK_API void contendedAcquire();
+
+ std::atomic<bool> fLocked{false};
+};
+
+class SK_SCOPED_CAPABILITY SkAutoSpinlock {
+public:
+ SkAutoSpinlock(SkSpinlock& mutex) SK_ACQUIRE(mutex) : fSpinlock(mutex) { fSpinlock.acquire(); }
+ ~SkAutoSpinlock() SK_RELEASE_CAPABILITY() { fSpinlock.release(); }
+
+private:
+ SkSpinlock& fSpinlock;
+};
+
+#endif//SkSpinlock_DEFINED
diff --git a/gfx/skia/skia/include/private/SkWeakRefCnt.h b/gfx/skia/skia/include/private/SkWeakRefCnt.h
new file mode 100644
index 0000000000..058a18652b
--- /dev/null
+++ b/gfx/skia/skia/include/private/SkWeakRefCnt.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkWeakRefCnt_DEFINED
+#define SkWeakRefCnt_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+#include <atomic>
+#include <cstdint>
+
+/** \class SkWeakRefCnt
+
+ SkWeakRefCnt is the base class for objects that may be shared by multiple
+ objects. When an existing strong owner wants to share a reference, it calls
+ ref(). When a strong owner wants to release its reference, it calls
+ unref(). When the shared object's strong reference count goes to zero as
+ the result of an unref() call, its (virtual) weak_dispose method is called.
+ It is an error for the destructor to be called explicitly (or via the
+ object going out of scope on the stack or calling delete) if
+ getRefCnt() > 1.
+
+ In addition to strong ownership, an owner may instead obtain a weak
+ reference by calling weak_ref(). A call to weak_ref() must be balanced by a
+ call to weak_unref(). To obtain a strong reference from a weak reference,
+ call try_ref(). If try_ref() returns true, the owner's pointer is now also
+ a strong reference on which unref() must be called. Note that this does not
+ affect the original weak reference, weak_unref() must still be called. When
+ the weak reference count goes to zero, the object is deleted. While the
+ weak reference count is positive and the strong reference count is zero the
+ object still exists, but will be in the disposed state. It is up to the
+ object to define what this means.
+
+ Note that a strong reference implicitly implies a weak reference. As a
+ result, it is allowable for the owner of a strong ref to call try_ref().
+ This will have the same effect as calling ref(), but may be more expensive.
+
+ Example:
+
+ SkWeakRefCnt myRef = strongRef.weak_ref();
+ ... // strongRef.unref() may or may not be called
+ if (myRef.try_ref()) {
+ ... // use myRef
+ myRef.unref();
+ } else {
+ // myRef is in the disposed state
+ }
+ myRef.weak_unref();
+*/
+class SK_API SkWeakRefCnt : public SkRefCnt {
+public:
+ /** Default construct, initializing the reference counts to 1.
+ The strong references collectively hold one weak reference. When the
+ strong reference count goes to zero, the collectively held weak
+ reference is released.
+ */
+ SkWeakRefCnt() : SkRefCnt(), fWeakCnt(1) {}
+
+ /** Destruct, asserting that the weak reference count is 1.
+ */
+ ~SkWeakRefCnt() override {
+#ifdef SK_DEBUG
+ SkASSERT(getWeakCnt() == 1);
+ fWeakCnt.store(0, std::memory_order_relaxed);
+#endif
+ }
+
+#ifdef SK_DEBUG
+ /** Return the weak reference count. */
+ int32_t getWeakCnt() const {
+ return fWeakCnt.load(std::memory_order_relaxed);
+ }
+#endif
+
+private:
+ /** If fRefCnt is 0, returns 0.
+ * Otherwise increments fRefCnt, acquires, and returns the old value.
+ */
+ int32_t atomic_conditional_acquire_strong_ref() const {
+ int32_t prev = fRefCnt.load(std::memory_order_relaxed);
+ do {
+ if (0 == prev) {
+ break;
+ }
+ } while(!fRefCnt.compare_exchange_weak(prev, prev+1, std::memory_order_acquire,
+ std::memory_order_relaxed));
+ return prev;
+ }
+
+public:
+ /** Creates a strong reference from a weak reference, if possible. The
+ caller must already be an owner. If try_ref() returns true the owner
+ is in posession of an additional strong reference. Both the original
+ reference and new reference must be properly unreferenced. If try_ref()
+ returns false, no strong reference could be created and the owner's
+ reference is in the same state as before the call.
+ */
+ bool SK_WARN_UNUSED_RESULT try_ref() const {
+ if (atomic_conditional_acquire_strong_ref() != 0) {
+ // Acquire barrier (L/SL), if not provided above.
+ // Prevents subsequent code from happening before the increment.
+ return true;
+ }
+ return false;
+ }
+
+ /** Increment the weak reference count. Must be balanced by a call to
+ weak_unref().
+ */
+ void weak_ref() const {
+ SkASSERT(getRefCnt() > 0);
+ SkASSERT(getWeakCnt() > 0);
+ // No barrier required.
+ (void)fWeakCnt.fetch_add(+1, std::memory_order_relaxed);
+ }
+
+ /** Decrement the weak reference count. If the weak reference count is 1
+ before the decrement, then call delete on the object. Note that if this
+ is the case, then the object needs to have been allocated via new, and
+ not on the stack.
+ */
+ void weak_unref() const {
+ SkASSERT(getWeakCnt() > 0);
+ // A release here acts in place of all releases we "should" have been doing in ref().
+ if (1 == fWeakCnt.fetch_add(-1, std::memory_order_acq_rel)) {
+ // Like try_ref(), the acquire is only needed on success, to make sure
+ // code in internal_dispose() doesn't happen before the decrement.
+#ifdef SK_DEBUG
+ // so our destructor won't complain
+ fWeakCnt.store(1, std::memory_order_relaxed);
+#endif
+ this->INHERITED::internal_dispose();
+ }
+ }
+
+ /** Returns true if there are no strong references to the object. When this
+ is the case all future calls to try_ref() will return false.
+ */
+ bool weak_expired() const {
+ return fRefCnt.load(std::memory_order_relaxed) == 0;
+ }
+
+protected:
+ /** Called when the strong reference count goes to zero. This allows the
+ object to free any resources it may be holding. Weak references may
+ still exist and their level of allowed access to the object is defined
+ by the object's class.
+ */
+ virtual void weak_dispose() const {
+ }
+
+private:
+ /** Called when the strong reference count goes to zero. Calls weak_dispose
+ on the object and releases the implicit weak reference held
+ collectively by the strong references.
+ */
+ void internal_dispose() const override {
+ weak_dispose();
+ weak_unref();
+ }
+
+ /* Invariant: fWeakCnt = #weak + (fRefCnt > 0 ? 1 : 0) */
+ mutable std::atomic<int32_t> fWeakCnt;
+
+ using INHERITED = SkRefCnt;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/README.md b/gfx/skia/skia/include/private/base/README.md
new file mode 100644
index 0000000000..7f4f17b228
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/README.md
@@ -0,0 +1,4 @@
+Files in "base" are used by many parts of Skia, but are not part of the public Skia API.
+See also src/base for other files that are part of base, but not needed by the public API.
+
+Files here should not depend on anything other than system headers or other files in base. \ No newline at end of file
diff --git a/gfx/skia/skia/include/private/base/SingleOwner.h b/gfx/skia/skia/include/private/base/SingleOwner.h
new file mode 100644
index 0000000000..473981e1fb
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SingleOwner.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_SingleOwner_DEFINED
+#define skgpu_SingleOwner_DEFINED
+
+#include "include/private/base/SkDebug.h" // IWYU pragma: keep
+
+#if defined(SK_DEBUG)
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkThreadAnnotations.h"
+#include "include/private/base/SkThreadID.h"
+
+#endif
+
+namespace skgpu {
+
+#if defined(SK_DEBUG)
+
+#define SKGPU_ASSERT_SINGLE_OWNER(obj) \
+ skgpu::SingleOwner::AutoEnforce debug_SingleOwner(obj, __FILE__, __LINE__);
+
+// This is a debug tool to verify an object is only being used from one thread at a time.
+class SingleOwner {
+public:
+ SingleOwner() : fOwner(kIllegalThreadID), fReentranceCount(0) {}
+
+ struct AutoEnforce {
+ AutoEnforce(SingleOwner* so, const char* file, int line)
+ : fFile(file), fLine(line), fSO(so) {
+ fSO->enter(file, line);
+ }
+ ~AutoEnforce() { fSO->exit(fFile, fLine); }
+
+ const char* fFile;
+ int fLine;
+ SingleOwner* fSO;
+ };
+
+private:
+ void enter(const char* file, int line) {
+ SkAutoMutexExclusive lock(fMutex);
+ SkThreadID self = SkGetThreadID();
+ SkASSERTF(fOwner == self || fOwner == kIllegalThreadID, "%s:%d Single owner failure.",
+ file, line);
+ fReentranceCount++;
+ fOwner = self;
+ }
+
+ void exit(const char* file, int line) {
+ SkAutoMutexExclusive lock(fMutex);
+ SkASSERTF(fOwner == SkGetThreadID(), "%s:%d Single owner failure.", file, line);
+ fReentranceCount--;
+ if (fReentranceCount == 0) {
+ fOwner = kIllegalThreadID;
+ }
+ }
+
+ SkMutex fMutex;
+ SkThreadID fOwner SK_GUARDED_BY(fMutex);
+ int fReentranceCount SK_GUARDED_BY(fMutex);
+};
+#else
+#define SKGPU_ASSERT_SINGLE_OWNER(obj)
+class SingleOwner {}; // Provide a no-op implementation so we can pass pointers to constructors
+#endif
+
+} // namespace skgpu
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkAPI.h b/gfx/skia/skia/include/private/base/SkAPI.h
new file mode 100644
index 0000000000..4028f95d87
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkAPI.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAPI_DEFINED
+#define SkAPI_DEFINED
+
+#include "include/private/base/SkLoadUserConfig.h" // IWYU pragma: keep
+
+// If SKIA_IMPLEMENTATION is defined as 1, that signals we are building Skia and should
+// export our symbols. If it is not set (or set to 0), then Skia is being used by a client
+// and we should not export our symbols.
+#if !defined(SKIA_IMPLEMENTATION)
+ #define SKIA_IMPLEMENTATION 0
+#endif
+
+// If we are compiling Skia is being as a DLL, we need to be sure to export all of our public
+// APIs to that DLL. If a client is using Skia which was compiled as a DLL, we need to instruct
+// the linker to use the symbols from that DLL. This is the goal of the SK_API define.
+#if !defined(SK_API)
+ #if defined(SKIA_DLL)
+ #if defined(_MSC_VER)
+ #if SKIA_IMPLEMENTATION
+ #define SK_API __declspec(dllexport)
+ #else
+ #define SK_API __declspec(dllimport)
+ #endif
+ #else
+ #define SK_API __attribute__((visibility("default")))
+ #endif
+ #else
+ #define SK_API
+ #endif
+#endif
+
+// SK_SPI is functionally identical to SK_API, but used within src to clarify that it's less stable
+#if !defined(SK_SPI)
+ #define SK_SPI SK_API
+#endif
+
+// See https://clang.llvm.org/docs/AttributeReference.html#availability
+// The API_AVAILABLE macro comes from <os/availability.h> on MacOS
+#if defined(SK_ENABLE_API_AVAILABLE)
+# define SK_API_AVAILABLE API_AVAILABLE
+#else
+# define SK_API_AVAILABLE(...)
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkAlign.h b/gfx/skia/skia/include/private/base/SkAlign.h
new file mode 100644
index 0000000000..2b2138ddd4
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkAlign.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAlign_DEFINED
+#define SkAlign_DEFINED
+
+#include "include/private/base/SkAssert.h"
+
+#include <cstddef>
+
+template <typename T> static constexpr T SkAlign2(T x) { return (x + 1) >> 1 << 1; }
+template <typename T> static constexpr T SkAlign4(T x) { return (x + 3) >> 2 << 2; }
+template <typename T> static constexpr T SkAlign8(T x) { return (x + 7) >> 3 << 3; }
+
+template <typename T> static constexpr bool SkIsAlign2(T x) { return 0 == (x & 1); }
+template <typename T> static constexpr bool SkIsAlign4(T x) { return 0 == (x & 3); }
+template <typename T> static constexpr bool SkIsAlign8(T x) { return 0 == (x & 7); }
+
+template <typename T> static constexpr T SkAlignPtr(T x) {
+ return sizeof(void*) == 8 ? SkAlign8(x) : SkAlign4(x);
+}
+template <typename T> static constexpr bool SkIsAlignPtr(T x) {
+ return sizeof(void*) == 8 ? SkIsAlign8(x) : SkIsAlign4(x);
+}
+
+/**
+ * align up to a power of 2
+ */
+static inline constexpr size_t SkAlignTo(size_t x, size_t alignment) {
+ // The same as alignment && SkIsPow2(value), w/o a dependency cycle.
+ SkASSERT(alignment && (alignment & (alignment - 1)) == 0);
+ return (x + alignment - 1) & ~(alignment - 1);
+}
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkAlignedStorage.h b/gfx/skia/skia/include/private/base/SkAlignedStorage.h
new file mode 100644
index 0000000000..532ad03978
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkAlignedStorage.h
@@ -0,0 +1,32 @@
+// Copyright 2022 Google LLC
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+
+#ifndef SkAlignedStorage_DEFINED
+#define SkAlignedStorage_DEFINED
+
+#include <cstddef>
+#include <iterator>
+
+template <int N, typename T> class SkAlignedSTStorage {
+public:
+ SkAlignedSTStorage() {}
+ SkAlignedSTStorage(SkAlignedSTStorage&&) = delete;
+ SkAlignedSTStorage(const SkAlignedSTStorage&) = delete;
+ SkAlignedSTStorage& operator=(SkAlignedSTStorage&&) = delete;
+ SkAlignedSTStorage& operator=(const SkAlignedSTStorage&) = delete;
+
+ // Returns void* because this object does not initialize the
+ // memory. Use placement new for types that require a constructor.
+ void* get() { return fStorage; }
+ const void* get() const { return fStorage; }
+
+ // Act as a container of bytes because the storage is uninitialized.
+ std::byte* data() { return fStorage; }
+ const std::byte* data() const { return fStorage; }
+ size_t size() const { return std::size(fStorage); }
+
+private:
+ alignas(T) std::byte fStorage[sizeof(T) * N];
+};
+
+#endif // SkAlignedStorage_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkAssert.h b/gfx/skia/skia/include/private/base/SkAssert.h
new file mode 100644
index 0000000000..053e25f22b
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkAssert.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAssert_DEFINED
+#define SkAssert_DEFINED
+
+#include "include/private/base/SkAPI.h"
+#include "include/private/base/SkDebug.h" // IWYU pragma: keep
+
+/** Called internally if we hit an unrecoverable error.
+ The platform implementation must not return, but should either throw
+ an exception or otherwise exit.
+*/
+[[noreturn]] SK_API extern void sk_abort_no_print(void);
+SK_API extern bool sk_abort_is_enabled();
+
+#if defined(SK_BUILD_FOR_GOOGLE3)
+ void SkDebugfForDumpStackTrace(const char* data, void* unused);
+ namespace base {
+ void DumpStackTrace(int skip_count, void w(const char*, void*), void* arg);
+ }
+# define SK_DUMP_GOOGLE3_STACK() ::base::DumpStackTrace(0, SkDebugfForDumpStackTrace, nullptr)
+#else
+# define SK_DUMP_GOOGLE3_STACK()
+#endif
+
+#if !defined(SK_ABORT)
+# if defined(SK_BUILD_FOR_WIN)
+ // This style lets Visual Studio follow errors back to the source file.
+# define SK_DUMP_LINE_FORMAT "%s(%d)"
+# else
+# define SK_DUMP_LINE_FORMAT "%s:%d"
+# endif
+# define SK_ABORT(message, ...) \
+ do { if (sk_abort_is_enabled()) { \
+ SkDebugf(SK_DUMP_LINE_FORMAT ": fatal error: \"" message "\"\n", \
+ __FILE__, __LINE__, ##__VA_ARGS__); \
+ SK_DUMP_GOOGLE3_STACK(); \
+ sk_abort_no_print(); \
+ } } while (false)
+#endif
+
+// SkASSERT, SkASSERTF and SkASSERT_RELEASE can be used as stand alone assertion expressions, e.g.
+// uint32_t foo(int x) {
+// SkASSERT(x > 4);
+// return x - 4;
+// }
+// and are also written to be compatible with constexpr functions:
+// constexpr uint32_t foo(int x) {
+// return SkASSERT(x > 4),
+// x - 4;
+// }
+#define SkASSERT_RELEASE(cond) \
+ static_cast<void>( (cond) ? (void)0 : []{ SK_ABORT("assert(%s)", #cond); }() )
+
+#if defined(SK_DEBUG)
+ #define SkASSERT(cond) SkASSERT_RELEASE(cond)
+ #define SkASSERTF(cond, fmt, ...) static_cast<void>( (cond) ? (void)0 : [&]{ \
+ SkDebugf(fmt"\n", ##__VA_ARGS__); \
+ SK_ABORT("assert(%s)", #cond); \
+ }() )
+ #define SkDEBUGFAIL(message) SK_ABORT("%s", message)
+ #define SkDEBUGFAILF(fmt, ...) SK_ABORT(fmt, ##__VA_ARGS__)
+ #define SkAssertResult(cond) SkASSERT(cond)
+#else
+ #define SkASSERT(cond) static_cast<void>(0)
+ #define SkASSERTF(cond, fmt, ...) static_cast<void>(0)
+ #define SkDEBUGFAIL(message)
+ #define SkDEBUGFAILF(fmt, ...)
+
+ // unlike SkASSERT, this macro executes its condition in the non-debug build.
+ // The if is present so that this can be used with functions marked SK_WARN_UNUSED_RESULT.
+ #define SkAssertResult(cond) if (cond) {} do {} while(false)
+#endif
+
+#if !defined(SkUNREACHABLE)
+# if defined(_MSC_VER) && !defined(__clang__)
+# include <intrin.h>
+# define FAST_FAIL_INVALID_ARG 5
+// See https://developercommunity.visualstudio.com/content/problem/1128631/code-flow-doesnt-see-noreturn-with-extern-c.html
+// for why this is wrapped. Hopefully removable after msvc++ 19.27 is no longer supported.
+[[noreturn]] static inline void sk_fast_fail() { __fastfail(FAST_FAIL_INVALID_ARG); }
+# define SkUNREACHABLE sk_fast_fail()
+# else
+# define SkUNREACHABLE __builtin_trap()
+# endif
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkAttributes.h b/gfx/skia/skia/include/private/base/SkAttributes.h
new file mode 100644
index 0000000000..038a800e97
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkAttributes.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAttributes_DEFINED
+#define SkAttributes_DEFINED
+
+#include "include/private/base/SkFeatures.h" // IWYU pragma: keep
+#include "include/private/base/SkLoadUserConfig.h" // IWYU pragma: keep
+
+#if defined(__clang__) || defined(__GNUC__)
+# define SK_ATTRIBUTE(attr) __attribute__((attr))
+#else
+# define SK_ATTRIBUTE(attr)
+#endif
+
+#if !defined(SK_UNUSED)
+# if !defined(__clang__) && defined(_MSC_VER)
+# define SK_UNUSED __pragma(warning(suppress:4189))
+# else
+# define SK_UNUSED SK_ATTRIBUTE(unused)
+# endif
+#endif
+
+#if !defined(SK_WARN_UNUSED_RESULT)
+ #define SK_WARN_UNUSED_RESULT SK_ATTRIBUTE(warn_unused_result)
+#endif
+
+/**
+ * If your judgment is better than the compiler's (i.e. you've profiled it),
+ * you can use SK_ALWAYS_INLINE to force inlining. E.g.
+ * inline void someMethod() { ... } // may not be inlined
+ * SK_ALWAYS_INLINE void someMethod() { ... } // should always be inlined
+ */
+#if !defined(SK_ALWAYS_INLINE)
+# if defined(SK_BUILD_FOR_WIN)
+# define SK_ALWAYS_INLINE __forceinline
+# else
+# define SK_ALWAYS_INLINE SK_ATTRIBUTE(always_inline) inline
+# endif
+#endif
+
+/**
+ * If your judgment is better than the compiler's (i.e. you've profiled it),
+ * you can use SK_NEVER_INLINE to prevent inlining.
+ */
+#if !defined(SK_NEVER_INLINE)
+# if defined(SK_BUILD_FOR_WIN)
+# define SK_NEVER_INLINE __declspec(noinline)
+# else
+# define SK_NEVER_INLINE SK_ATTRIBUTE(noinline)
+# endif
+#endif
+
+/**
+ * Used to annotate a function as taking printf style arguments.
+ * `A` is the (1 based) index of the format string argument.
+ * `B` is the (1 based) index of the first argument used by the format string.
+ */
+#if !defined(SK_PRINTF_LIKE)
+# define SK_PRINTF_LIKE(A, B) SK_ATTRIBUTE(format(printf, (A), (B)))
+#endif
+
+/**
+ * Used to ignore sanitizer warnings.
+ */
+#if !defined(SK_NO_SANITIZE)
+# define SK_NO_SANITIZE(A) SK_ATTRIBUTE(no_sanitize(A))
+#endif
+
+/**
+ * Annotates a class' non-trivial special functions as trivial for the purposes of calls.
+ * Allows a class with a non-trivial destructor to be __is_trivially_relocatable.
+ * Use of this attribute on a public API breaks platform ABI.
+ * Annotated classes may not hold pointers derived from `this`.
+ * Annotated classes must implement move+delete as equivalent to memcpy+free.
+ * Use may require more complete types, as callee destroys.
+ *
+ * https://clang.llvm.org/docs/AttributeReference.html#trivial-abi
+ * https://libcxx.llvm.org/DesignDocs/UniquePtrTrivialAbi.html
+ */
+#if !defined(SK_TRIVIAL_ABI)
+# define SK_TRIVIAL_ABI
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkCPUTypes.h b/gfx/skia/skia/include/private/base/SkCPUTypes.h
new file mode 100644
index 0000000000..a5f60fd3ef
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkCPUTypes.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkCPUTypes_DEFINED
+#define SkCPUTypes_DEFINED
+
+// TODO(bungeman,kjlubick) There are a lot of assumptions throughout the codebase that
+// these types are 32 bits, when they could be more or less. Public APIs should stop
+// using these. Internally, we could use uint_fast8_t and uint_fast16_t, but not in
+// public APIs due to ABI incompatibilities.
+
+/** Fast type for unsigned 8 bits. Use for parameter passing and local
+ variables, not for storage
+*/
+typedef unsigned U8CPU;
+
+/** Fast type for unsigned 16 bits. Use for parameter passing and local
+ variables, not for storage
+*/
+typedef unsigned U16CPU;
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkContainers.h b/gfx/skia/skia/include/private/base/SkContainers.h
new file mode 100644
index 0000000000..2ece73e287
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkContainers.h
@@ -0,0 +1,46 @@
+// Copyright 2022 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+
+#ifndef SkContainers_DEFINED
+#define SkContainers_DEFINED
+
+#include "include/private/base/SkAPI.h"
+#include "include/private/base/SkSpan_impl.h"
+
+#include <cstddef>
+#include <cstdint>
+
+class SK_SPI SkContainerAllocator {
+public:
+ SkContainerAllocator(size_t sizeOfT, int maxCapacity)
+ : fSizeOfT{sizeOfT}
+ , fMaxCapacity{maxCapacity} {}
+
+ // allocate will abort on failure. Given a capacity of 0, it will return the empty span.
+ // The bytes allocated are freed using sk_free().
+ SkSpan<std::byte> allocate(int capacity, double growthFactor = 1.0);
+
+private:
+ friend struct SkContainerAllocatorTestingPeer;
+ // All capacity counts will be rounded up to kCapacityMultiple.
+ // TODO: this is a constant from the original SkTArray code. This should be checked some how.
+ static constexpr int64_t kCapacityMultiple = 8;
+
+ // Rounds up capacity to next multiple of kCapacityMultiple and pin to fMaxCapacity.
+ size_t roundUpCapacity(int64_t capacity) const;
+
+ // Grows the capacity by growthFactor being sure to stay with in kMinBytes and fMaxCapacity.
+ size_t growthFactorCapacity(int capacity, double growthFactor) const;
+
+ const size_t fSizeOfT;
+ const int64_t fMaxCapacity;
+};
+
+// sk_allocate_canfail returns the empty span on failure. Parameter size must be > 0.
+SkSpan<std::byte> sk_allocate_canfail(size_t size);
+
+// Returns the empty span if size is 0. sk_allocate_throw aborts on failure.
+SkSpan<std::byte> sk_allocate_throw(size_t size);
+
+SK_SPI void sk_report_container_overflow_and_die();
+#endif // SkContainers_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkDebug.h b/gfx/skia/skia/include/private/base/SkDebug.h
new file mode 100644
index 0000000000..2e4810fc1c
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkDebug.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDebug_DEFINED
+#define SkDebug_DEFINED
+
+#include "include/private/base/SkAPI.h"
+#include "include/private/base/SkAttributes.h"
+#include "include/private/base/SkLoadUserConfig.h" // IWYU pragma: keep
+
+#if !defined(SkDebugf)
+ void SK_SPI SkDebugf(const char format[], ...) SK_PRINTF_LIKE(1, 2);
+#endif
+
+#if defined(SK_DEBUG)
+ #define SkDEBUGCODE(...) __VA_ARGS__
+ #define SkDEBUGF(...) SkDebugf(__VA_ARGS__)
+#else
+ #define SkDEBUGCODE(...)
+ #define SkDEBUGF(...)
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkDeque.h b/gfx/skia/skia/include/private/base/SkDeque.h
new file mode 100644
index 0000000000..fbc6167313
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkDeque.h
@@ -0,0 +1,143 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDeque_DEFINED
+#define SkDeque_DEFINED
+
+#include "include/private/base/SkAPI.h"
+
+#include <cstddef>
+
+/*
+ * The deque class works by blindly creating memory space of a specified element
+ * size. It manages the memory as a doubly linked list of blocks each of which
+ * can contain multiple elements. Pushes and pops add/remove blocks from the
+ * beginning/end of the list as necessary while each block tracks the used
+ * portion of its memory.
+ * One behavior to be aware of is that the pops do not immediately remove an
+ * empty block from the beginning/end of the list (Presumably so push/pop pairs
+ * on the block boundaries don't cause thrashing). This can result in the first/
+ * last element not residing in the first/last block.
+ */
+class SK_API SkDeque {
+public:
+ /**
+ * elemSize specifies the size of each individual element in the deque
+ * allocCount specifies how many elements are to be allocated as a block
+ */
+ explicit SkDeque(size_t elemSize, int allocCount = 1);
+ SkDeque(size_t elemSize, void* storage, size_t storageSize, int allocCount = 1);
+ ~SkDeque();
+
+ bool empty() const { return 0 == fCount; }
+ int count() const { return fCount; }
+ size_t elemSize() const { return fElemSize; }
+
+ const void* front() const { return fFront; }
+ const void* back() const { return fBack; }
+
+ void* front() {
+ return (void*)((const SkDeque*)this)->front();
+ }
+
+ void* back() {
+ return (void*)((const SkDeque*)this)->back();
+ }
+
+ /**
+ * push_front and push_back return a pointer to the memory space
+ * for the new element
+ */
+ void* push_front();
+ void* push_back();
+
+ void pop_front();
+ void pop_back();
+
+private:
+ struct Block;
+
+public:
+ class Iter {
+ public:
+ enum IterStart {
+ kFront_IterStart,
+ kBack_IterStart,
+ };
+
+ /**
+ * Creates an uninitialized iterator. Must be reset()
+ */
+ Iter();
+
+ Iter(const SkDeque& d, IterStart startLoc);
+ void* next();
+ void* prev();
+
+ void reset(const SkDeque& d, IterStart startLoc);
+
+ private:
+ SkDeque::Block* fCurBlock;
+ char* fPos;
+ size_t fElemSize;
+ };
+
+ // Inherit privately from Iter to prevent access to reverse iteration
+ class F2BIter : private Iter {
+ public:
+ F2BIter() {}
+
+ /**
+ * Wrap Iter's 2 parameter ctor to force initialization to the
+ * beginning of the deque
+ */
+ F2BIter(const SkDeque& d) : INHERITED(d, kFront_IterStart) {}
+
+ using Iter::next;
+
+ /**
+ * Wrap Iter::reset to force initialization to the beginning of the
+ * deque
+ */
+ void reset(const SkDeque& d) {
+ this->INHERITED::reset(d, kFront_IterStart);
+ }
+
+ private:
+ using INHERITED = Iter;
+ };
+
+private:
+ // allow unit test to call numBlocksAllocated
+ friend class DequeUnitTestHelper;
+
+ void* fFront;
+ void* fBack;
+
+ Block* fFrontBlock;
+ Block* fBackBlock;
+ size_t fElemSize;
+ void* fInitialStorage;
+ int fCount; // number of elements in the deque
+ int fAllocCount; // number of elements to allocate per block
+
+ Block* allocateBlock(int allocCount);
+ void freeBlock(Block* block);
+
+ /**
+ * This returns the number of chunk blocks allocated by the deque. It
+ * can be used to gauge the effectiveness of the selected allocCount.
+ */
+ int numBlocksAllocated() const;
+
+ SkDeque(const SkDeque&) = delete;
+ SkDeque& operator=(const SkDeque&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkFeatures.h b/gfx/skia/skia/include/private/base/SkFeatures.h
new file mode 100644
index 0000000000..662bf03211
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkFeatures.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFeatures_DEFINED
+#define SkFeatures_DEFINED
+
+#if !defined(SK_BUILD_FOR_ANDROID) && !defined(SK_BUILD_FOR_IOS) && !defined(SK_BUILD_FOR_WIN) && \
+ !defined(SK_BUILD_FOR_UNIX) && !defined(SK_BUILD_FOR_MAC)
+
+ #ifdef __APPLE__
+ #include <TargetConditionals.h>
+ #endif
+
+ #if defined(_WIN32) || defined(__SYMBIAN32__)
+ #define SK_BUILD_FOR_WIN
+ #elif defined(ANDROID) || defined(__ANDROID__)
+ #define SK_BUILD_FOR_ANDROID
+ #elif defined(linux) || defined(__linux) || defined(__FreeBSD__) || \
+ defined(__OpenBSD__) || defined(__sun) || defined(__NetBSD__) || \
+ defined(__DragonFly__) || defined(__Fuchsia__) || \
+ defined(__GLIBC__) || defined(__GNU__) || defined(__unix__)
+ #define SK_BUILD_FOR_UNIX
+ #elif TARGET_OS_IPHONE || TARGET_IPHONE_SIMULATOR
+ #define SK_BUILD_FOR_IOS
+ #else
+ #define SK_BUILD_FOR_MAC
+ #endif
+#endif // end SK_BUILD_FOR_*
+
+
+#if defined(SK_BUILD_FOR_WIN) && !defined(__clang__)
+ #if !defined(SK_RESTRICT)
+ #define SK_RESTRICT __restrict
+ #endif
+ #if !defined(SK_WARN_UNUSED_RESULT)
+ #define SK_WARN_UNUSED_RESULT
+ #endif
+#endif
+
+#if !defined(SK_RESTRICT)
+ #define SK_RESTRICT __restrict__
+#endif
+
+#if !defined(SK_CPU_BENDIAN) && !defined(SK_CPU_LENDIAN)
+ #if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+ #define SK_CPU_BENDIAN
+ #elif defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+ #define SK_CPU_LENDIAN
+ #elif defined(__sparc) || defined(__sparc__) || \
+ defined(_POWER) || defined(__powerpc__) || \
+ defined(__ppc__) || defined(__hppa) || \
+ defined(__PPC__) || defined(__PPC64__) || \
+ defined(_MIPSEB) || defined(__ARMEB__) || \
+ defined(__s390__) || \
+ (defined(__sh__) && defined(__BIG_ENDIAN__)) || \
+ (defined(__ia64) && defined(__BIG_ENDIAN__))
+ #define SK_CPU_BENDIAN
+ #else
+ #define SK_CPU_LENDIAN
+ #endif
+#endif
+
+#if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
+ #define SK_CPU_X86 1
+#endif
+
+/**
+ * SK_CPU_SSE_LEVEL
+ *
+ * If defined, SK_CPU_SSE_LEVEL should be set to the highest supported level.
+ * On non-intel CPU this should be undefined.
+ */
+#define SK_CPU_SSE_LEVEL_SSE1 10
+#define SK_CPU_SSE_LEVEL_SSE2 20
+#define SK_CPU_SSE_LEVEL_SSE3 30
+#define SK_CPU_SSE_LEVEL_SSSE3 31
+#define SK_CPU_SSE_LEVEL_SSE41 41
+#define SK_CPU_SSE_LEVEL_SSE42 42
+#define SK_CPU_SSE_LEVEL_AVX 51
+#define SK_CPU_SSE_LEVEL_AVX2 52
+#define SK_CPU_SSE_LEVEL_SKX 60
+
+// TODO(brianosman,kjlubick) clean up these checks
+
+// Are we in GCC/Clang?
+#ifndef SK_CPU_SSE_LEVEL
+ // These checks must be done in descending order to ensure we set the highest
+ // available SSE level.
+ #if defined(__AVX512F__) && defined(__AVX512DQ__) && defined(__AVX512CD__) && \
+ defined(__AVX512BW__) && defined(__AVX512VL__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SKX
+ #elif defined(__AVX2__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX2
+ #elif defined(__AVX__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX
+ #elif defined(__SSE4_2__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE42
+ #elif defined(__SSE4_1__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE41
+ #elif defined(__SSSE3__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSSE3
+ #elif defined(__SSE3__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE3
+ #elif defined(__SSE2__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE2
+ #endif
+#endif
+
+// Are we in VisualStudio?
+#ifndef SK_CPU_SSE_LEVEL
+ // These checks must be done in descending order to ensure we set the highest
+ // available SSE level. 64-bit intel guarantees at least SSE2 support.
+ #if defined(__AVX512F__) && defined(__AVX512DQ__) && defined(__AVX512CD__) && \
+ defined(__AVX512BW__) && defined(__AVX512VL__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SKX
+ #elif defined(__AVX2__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX2
+ #elif defined(__AVX__)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX
+ #elif defined(_M_X64) || defined(_M_AMD64)
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE2
+ #elif defined(_M_IX86_FP)
+ #if _M_IX86_FP >= 2
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE2
+ #elif _M_IX86_FP == 1
+ #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE1
+ #endif
+ #endif
+#endif
+
+// ARM defines
+#if defined(__arm__) && (!defined(__APPLE__) || !TARGET_IPHONE_SIMULATOR)
+ #define SK_CPU_ARM32
+#elif defined(__aarch64__)
+ #define SK_CPU_ARM64
+#endif
+
+// All 64-bit ARM chips have NEON. Many 32-bit ARM chips do too.
+#if !defined(SK_ARM_HAS_NEON) && defined(__ARM_NEON)
+ #define SK_ARM_HAS_NEON
+#endif
+
+#if defined(__ARM_FEATURE_CRC32)
+ #define SK_ARM_HAS_CRC32
+#endif
+
+#endif // SkFeatures_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkFixed.h b/gfx/skia/skia/include/private/base/SkFixed.h
new file mode 100644
index 0000000000..2c8f2fb56c
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkFixed.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFixed_DEFINED
+#define SkFixed_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkMath.h" // IWYU pragma: keep
+#include "include/private/base/SkTPin.h" // IWYU pragma: keep
+
+#include <cstdint>
+
+/** \file SkFixed.h
+
+ Types and macros for 16.16 fixed point
+*/
+
+/** 32 bit signed integer used to represent fractions values with 16 bits to the right of the decimal point
+*/
+typedef int32_t SkFixed;
+#define SK_Fixed1 (1 << 16)
+#define SK_FixedHalf (1 << 15)
+#define SK_FixedQuarter (1 << 14)
+#define SK_FixedMax (0x7FFFFFFF)
+#define SK_FixedMin (-SK_FixedMax)
+#define SK_FixedPI (0x3243F)
+#define SK_FixedSqrt2 (92682)
+#define SK_FixedTanPIOver8 (0x6A0A)
+#define SK_FixedRoot2Over2 (0xB505)
+
+// NOTE: SkFixedToFloat is exact. SkFloatToFixed seems to lack a rounding step. For all fixed-point
+// values, this version is as accurate as possible for (fixed -> float -> fixed). Rounding reduces
+// accuracy if the intermediate floats are in the range that only holds integers (adding 0.5f to an
+// odd integer then snaps to nearest even). Using double for the rounding math gives maximum
+// accuracy for (float -> fixed -> float), but that's usually overkill.
+#define SkFixedToFloat(x) ((x) * 1.52587890625e-5f)
+#define SkFloatToFixed(x) sk_float_saturate2int((x) * SK_Fixed1)
+
+#ifdef SK_DEBUG
+ static inline SkFixed SkFloatToFixed_Check(float x) {
+ int64_t n64 = (int64_t)(x * SK_Fixed1);
+ SkFixed n32 = (SkFixed)n64;
+ SkASSERT(n64 == n32);
+ return n32;
+ }
+#else
+ #define SkFloatToFixed_Check(x) SkFloatToFixed(x)
+#endif
+
+#define SkFixedToDouble(x) ((x) * 1.52587890625e-5)
+#define SkDoubleToFixed(x) ((SkFixed)((x) * SK_Fixed1))
+
+/** Converts an integer to a SkFixed, asserting that the result does not overflow
+ a 32 bit signed integer
+*/
+#ifdef SK_DEBUG
+ inline SkFixed SkIntToFixed(int n)
+ {
+ SkASSERT(n >= -32768 && n <= 32767);
+ // Left shifting a negative value has undefined behavior in C, so we cast to unsigned before
+ // shifting.
+ return (SkFixed)( (unsigned)n << 16 );
+ }
+#else
+ // Left shifting a negative value has undefined behavior in C, so we cast to unsigned before
+ // shifting. Then we force the cast to SkFixed to ensure that the answer is signed (like the
+ // debug version).
+ #define SkIntToFixed(n) (SkFixed)((unsigned)(n) << 16)
+#endif
+
+#define SkFixedRoundToInt(x) (((x) + SK_FixedHalf) >> 16)
+#define SkFixedCeilToInt(x) (((x) + SK_Fixed1 - 1) >> 16)
+#define SkFixedFloorToInt(x) ((x) >> 16)
+
+static inline SkFixed SkFixedRoundToFixed(SkFixed x) {
+ return (SkFixed)( (uint32_t)(x + SK_FixedHalf) & 0xFFFF0000 );
+}
+static inline SkFixed SkFixedCeilToFixed(SkFixed x) {
+ return (SkFixed)( (uint32_t)(x + SK_Fixed1 - 1) & 0xFFFF0000 );
+}
+static inline SkFixed SkFixedFloorToFixed(SkFixed x) {
+ return (SkFixed)( (uint32_t)x & 0xFFFF0000 );
+}
+
+#define SkFixedAve(a, b) (((a) + (b)) >> 1)
+
+// The divide may exceed 32 bits. Clamp to a signed 32 bit result.
+#define SkFixedDiv(numer, denom) \
+ SkToS32(SkTPin<int64_t>((SkLeftShift((int64_t)(numer), 16) / (denom)), SK_MinS32, SK_MaxS32))
+
+static inline SkFixed SkFixedMul(SkFixed a, SkFixed b) {
+ return (SkFixed)((int64_t)a * b >> 16);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Platform-specific alternatives to our portable versions.
+
+// The VCVT float-to-fixed instruction is part of the VFPv3 instruction set.
+#if defined(__ARM_VFPV3__)
+ #include <cstring>
+
+ /* This does not handle NaN or other obscurities, but is faster than
+ than (int)(x*65536). When built on Android with -Os, needs forcing
+ to inline or we lose the speed benefit.
+ */
+ SK_ALWAYS_INLINE SkFixed SkFloatToFixed_arm(float x)
+ {
+ int32_t y;
+ asm("vcvt.s32.f32 %0, %0, #16": "+w"(x));
+ std::memcpy(&y, &x, sizeof(y));
+ return y;
+ }
+ #undef SkFloatToFixed
+ #define SkFloatToFixed(x) SkFloatToFixed_arm(x)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define SkFixedToScalar(x) SkFixedToFloat(x)
+#define SkScalarToFixed(x) SkFloatToFixed(x)
+
+///////////////////////////////////////////////////////////////////////////////
+
+typedef int64_t SkFixed3232; // 32.32
+
+#define SkFixed3232Max SK_MaxS64
+#define SkFixed3232Min (-SkFixed3232Max)
+
+#define SkIntToFixed3232(x) (SkLeftShift((SkFixed3232)(x), 32))
+#define SkFixed3232ToInt(x) ((int)((x) >> 32))
+#define SkFixedToFixed3232(x) (SkLeftShift((SkFixed3232)(x), 16))
+#define SkFixed3232ToFixed(x) ((SkFixed)((x) >> 16))
+#define SkFloatToFixed3232(x) sk_float_saturate2int64((x) * (65536.0f * 65536.0f))
+#define SkFixed3232ToFloat(x) (x * (1 / (65536.0f * 65536.0f)))
+
+#define SkScalarToFixed3232(x) SkFloatToFixed3232(x)
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkFloatBits.h b/gfx/skia/skia/include/private/base/SkFloatBits.h
new file mode 100644
index 0000000000..37a7b271ae
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkFloatBits.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFloatBits_DEFINED
+#define SkFloatBits_DEFINED
+
+#include "include/private/base/SkMath.h"
+
+#include <cstdint>
+
+/** Convert a sign-bit int (i.e. float interpreted as int) into a 2s compliement
+ int. This also converts -0 (0x80000000) to 0. Doing this to a float allows
+ it to be compared using normal C operators (<, <=, etc.)
+*/
+static inline int32_t SkSignBitTo2sCompliment(int32_t x) {
+ if (x < 0) {
+ x &= 0x7FFFFFFF;
+ x = -x;
+ }
+ return x;
+}
+
+/** Convert a 2s compliment int to a sign-bit (i.e. int interpreted as float).
+ This undoes the result of SkSignBitTo2sCompliment().
+ */
+static inline int32_t Sk2sComplimentToSignBit(int32_t x) {
+ int sign = x >> 31;
+ // make x positive
+ x = (x ^ sign) - sign;
+ // set the sign bit as needed
+ x |= SkLeftShift(sign, 31);
+ return x;
+}
+
+union SkFloatIntUnion {
+ float fFloat;
+ int32_t fSignBitInt;
+};
+
+// Helper to see a float as its bit pattern (w/o aliasing warnings)
+static inline int32_t SkFloat2Bits(float x) {
+ SkFloatIntUnion data;
+ data.fFloat = x;
+ return data.fSignBitInt;
+}
+
+// Helper to see a bit pattern as a float (w/o aliasing warnings)
+static inline float SkBits2Float(int32_t floatAsBits) {
+ SkFloatIntUnion data;
+ data.fSignBitInt = floatAsBits;
+ return data.fFloat;
+}
+
+constexpr int32_t gFloatBits_exponent_mask = 0x7F800000;
+constexpr int32_t gFloatBits_matissa_mask = 0x007FFFFF;
+
+static inline bool SkFloatBits_IsFinite(int32_t bits) {
+ return (bits & gFloatBits_exponent_mask) != gFloatBits_exponent_mask;
+}
+
+static inline bool SkFloatBits_IsInf(int32_t bits) {
+ return ((bits & gFloatBits_exponent_mask) == gFloatBits_exponent_mask) &&
+ (bits & gFloatBits_matissa_mask) == 0;
+}
+
+/** Return the float as a 2s compliment int. Just to be used to compare floats
+ to each other or against positive float-bit-constants (like 0). This does
+ not return the int equivalent of the float, just something cheaper for
+ compares-only.
+ */
+static inline int32_t SkFloatAs2sCompliment(float x) {
+ return SkSignBitTo2sCompliment(SkFloat2Bits(x));
+}
+
+/** Return the 2s compliment int as a float. This undos the result of
+ SkFloatAs2sCompliment
+ */
+static inline float Sk2sComplimentAsFloat(int32_t x) {
+ return SkBits2Float(Sk2sComplimentToSignBit(x));
+}
+
+// Scalar wrappers for float-bit routines
+
+#define SkScalarAs2sCompliment(x) SkFloatAs2sCompliment(x)
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkFloatingPoint.h b/gfx/skia/skia/include/private/base/SkFloatingPoint.h
new file mode 100644
index 0000000000..4b2eb4d897
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkFloatingPoint.h
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFloatingPoint_DEFINED
+#define SkFloatingPoint_DEFINED
+
+#include "include/private/base/SkAttributes.h"
+#include "include/private/base/SkFloatBits.h"
+#include "include/private/base/SkMath.h"
+
+#include <cfloat>
+#include <cmath>
+#include <cstdint>
+#include <cstring>
+
+constexpr float SK_FloatSqrt2 = 1.41421356f;
+constexpr float SK_FloatPI = 3.14159265f;
+constexpr double SK_DoublePI = 3.14159265358979323846264338327950288;
+
+// C++98 cmath std::pow seems to be the earliest portable way to get float pow.
+// However, on Linux including cmath undefines isfinite.
+// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=14608
+static inline float sk_float_pow(float base, float exp) {
+ return powf(base, exp);
+}
+
+#define sk_float_sqrt(x) sqrtf(x)
+#define sk_float_sin(x) sinf(x)
+#define sk_float_cos(x) cosf(x)
+#define sk_float_tan(x) tanf(x)
+#define sk_float_floor(x) floorf(x)
+#define sk_float_ceil(x) ceilf(x)
+#define sk_float_trunc(x) truncf(x)
+#ifdef SK_BUILD_FOR_MAC
+# define sk_float_acos(x) static_cast<float>(acos(x))
+# define sk_float_asin(x) static_cast<float>(asin(x))
+#else
+# define sk_float_acos(x) acosf(x)
+# define sk_float_asin(x) asinf(x)
+#endif
+#define sk_float_atan2(y,x) atan2f(y,x)
+#define sk_float_abs(x) fabsf(x)
+#define sk_float_copysign(x, y) copysignf(x, y)
+#define sk_float_mod(x,y) fmodf(x,y)
+#define sk_float_exp(x) expf(x)
+#define sk_float_log(x) logf(x)
+
+constexpr float sk_float_degrees_to_radians(float degrees) {
+ return degrees * (SK_FloatPI / 180);
+}
+
+constexpr float sk_float_radians_to_degrees(float radians) {
+ return radians * (180 / SK_FloatPI);
+}
+
+// floor(double+0.5) vs. floorf(float+0.5f) give comparable performance, but upcasting to double
+// means tricky values like 0.49999997 and 2^24 get rounded correctly. If these were rounded
+// as floatf(x + .5f), they would be 1 higher than expected.
+#define sk_float_round(x) (float)sk_double_round((double)(x))
+
+// can't find log2f on android, but maybe that just a tool bug?
+#ifdef SK_BUILD_FOR_ANDROID
+ static inline float sk_float_log2(float x) {
+ const double inv_ln_2 = 1.44269504088896;
+ return (float)(log(x) * inv_ln_2);
+ }
+#else
+ #define sk_float_log2(x) log2f(x)
+#endif
+
+static inline bool sk_float_isfinite(float x) {
+ return SkFloatBits_IsFinite(SkFloat2Bits(x));
+}
+
+static inline bool sk_floats_are_finite(float a, float b) {
+ return sk_float_isfinite(a) && sk_float_isfinite(b);
+}
+
+static inline bool sk_floats_are_finite(const float array[], int count) {
+ float prod = 0;
+ for (int i = 0; i < count; ++i) {
+ prod *= array[i];
+ }
+ // At this point, prod will either be NaN or 0
+ return prod == 0; // if prod is NaN, this check will return false
+}
+
+static inline bool sk_float_isinf(float x) {
+ return SkFloatBits_IsInf(SkFloat2Bits(x));
+}
+
+#ifdef SK_BUILD_FOR_WIN
+ #define sk_float_isnan(x) _isnan(x)
+#elif defined(__clang__) || defined(__GNUC__)
+ #define sk_float_isnan(x) __builtin_isnan(x)
+#else
+ #define sk_float_isnan(x) isnan(x)
+#endif
+
+#define sk_double_isnan(a) sk_float_isnan(a)
+
+#define SK_MaxS32FitsInFloat 2147483520
+#define SK_MinS32FitsInFloat -SK_MaxS32FitsInFloat
+
+#define SK_MaxS64FitsInFloat (SK_MaxS64 >> (63-24) << (63-24)) // 0x7fffff8000000000
+#define SK_MinS64FitsInFloat -SK_MaxS64FitsInFloat
+
+/**
+ * Return the closest int for the given float. Returns SK_MaxS32FitsInFloat for NaN.
+ */
+static inline int sk_float_saturate2int(float x) {
+ x = x < SK_MaxS32FitsInFloat ? x : SK_MaxS32FitsInFloat;
+ x = x > SK_MinS32FitsInFloat ? x : SK_MinS32FitsInFloat;
+ return (int)x;
+}
+
+/**
+ * Return the closest int for the given double. Returns SK_MaxS32 for NaN.
+ */
+static inline int sk_double_saturate2int(double x) {
+ x = x < SK_MaxS32 ? x : SK_MaxS32;
+ x = x > SK_MinS32 ? x : SK_MinS32;
+ return (int)x;
+}
+
+/**
+ * Return the closest int64_t for the given float. Returns SK_MaxS64FitsInFloat for NaN.
+ */
+static inline int64_t sk_float_saturate2int64(float x) {
+ x = x < SK_MaxS64FitsInFloat ? x : SK_MaxS64FitsInFloat;
+ x = x > SK_MinS64FitsInFloat ? x : SK_MinS64FitsInFloat;
+ return (int64_t)x;
+}
+
+#define sk_float_floor2int(x) sk_float_saturate2int(sk_float_floor(x))
+#define sk_float_round2int(x) sk_float_saturate2int(sk_float_round(x))
+#define sk_float_ceil2int(x) sk_float_saturate2int(sk_float_ceil(x))
+
+#define sk_float_floor2int_no_saturate(x) (int)sk_float_floor(x)
+#define sk_float_round2int_no_saturate(x) (int)sk_float_round(x)
+#define sk_float_ceil2int_no_saturate(x) (int)sk_float_ceil(x)
+
+#define sk_double_floor(x) floor(x)
+#define sk_double_round(x) floor((x) + 0.5)
+#define sk_double_ceil(x) ceil(x)
+#define sk_double_floor2int(x) (int)sk_double_floor(x)
+#define sk_double_round2int(x) (int)sk_double_round(x)
+#define sk_double_ceil2int(x) (int)sk_double_ceil(x)
+
+// Cast double to float, ignoring any warning about too-large finite values being cast to float.
+// Clang thinks this is undefined, but it's actually implementation defined to return either
+// the largest float or infinity (one of the two bracketing representable floats). Good enough!
+#ifdef __clang__
+SK_NO_SANITIZE("float-cast-overflow")
+#elif defined(__GNUC__)
+SK_ATTRIBUTE(no_sanitize_undefined)
+#endif
+static inline float sk_double_to_float(double x) {
+ return static_cast<float>(x);
+}
+
+#define SK_FloatNaN std::numeric_limits<float>::quiet_NaN()
+#define SK_FloatInfinity (+std::numeric_limits<float>::infinity())
+#define SK_FloatNegativeInfinity (-std::numeric_limits<float>::infinity())
+
+#define SK_DoubleNaN std::numeric_limits<double>::quiet_NaN()
+
+// Returns false if any of the floats are outside of [0...1]
+// Returns true if count is 0
+bool sk_floats_are_unit(const float array[], size_t count);
+
+static inline float sk_float_rsqrt_portable(float x) { return 1.0f / sk_float_sqrt(x); }
+static inline float sk_float_rsqrt (float x) { return 1.0f / sk_float_sqrt(x); }
+
+// Returns the log2 of the provided value, were that value to be rounded up to the next power of 2.
+// Returns 0 if value <= 0:
+// Never returns a negative number, even if value is NaN.
+//
+// sk_float_nextlog2((-inf..1]) -> 0
+// sk_float_nextlog2((1..2]) -> 1
+// sk_float_nextlog2((2..4]) -> 2
+// sk_float_nextlog2((4..8]) -> 3
+// ...
+static inline int sk_float_nextlog2(float x) {
+ uint32_t bits = (uint32_t)SkFloat2Bits(x);
+ bits += (1u << 23) - 1u; // Increment the exponent for non-powers-of-2.
+ int exp = ((int32_t)bits >> 23) - 127;
+ return exp & ~(exp >> 31); // Return 0 for negative or denormalized floats, and exponents < 0.
+}
+
+// This is the number of significant digits we can print in a string such that when we read that
+// string back we get the floating point number we expect. The minimum value C requires is 6, but
+// most compilers support 9
+#ifdef FLT_DECIMAL_DIG
+#define SK_FLT_DECIMAL_DIG FLT_DECIMAL_DIG
+#else
+#define SK_FLT_DECIMAL_DIG 9
+#endif
+
+// IEEE defines how float divide behaves for non-finite values and zero-denoms, but C does not
+// so we have a helper that suppresses the possible undefined-behavior warnings.
+
+#ifdef __clang__
+SK_NO_SANITIZE("float-divide-by-zero")
+#elif defined(__GNUC__)
+SK_ATTRIBUTE(no_sanitize_undefined)
+#endif
+static inline float sk_ieee_float_divide(float numer, float denom) {
+ return numer / denom;
+}
+
+#ifdef __clang__
+SK_NO_SANITIZE("float-divide-by-zero")
+#elif defined(__GNUC__)
+SK_ATTRIBUTE(no_sanitize_undefined)
+#endif
+static inline double sk_ieee_double_divide(double numer, double denom) {
+ return numer / denom;
+}
+
+// While we clean up divide by zero, we'll replace places that do divide by zero with this TODO.
+static inline float sk_ieee_float_divide_TODO_IS_DIVIDE_BY_ZERO_SAFE_HERE(float n, float d) {
+ return sk_ieee_float_divide(n,d);
+}
+
+static inline float sk_fmaf(float f, float m, float a) {
+#if defined(FP_FAST_FMA)
+ return std::fmaf(f,m,a);
+#else
+ return f*m+a;
+#endif
+}
+
+// Returns true iff the provided number is within a small epsilon of 0.
+bool sk_double_nearly_zero(double a);
+
+// Comparing floating point numbers is complicated. This helper only works if one or none
+// of the two inputs is not very close to zero. It also does not work if both inputs could be NaN.
+// The term "ulps" stands for "units of least precision". Read the following for more nuance:
+// https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
+bool sk_doubles_nearly_equal_ulps(double a, double b, uint8_t max_ulps_diff=16);
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkLoadUserConfig.h b/gfx/skia/skia/include/private/base/SkLoadUserConfig.h
new file mode 100644
index 0000000000..397d40bf0c
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkLoadUserConfig.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SK_USER_CONFIG_WAS_LOADED
+
+// Include this to set reasonable defaults (e.g. for SK_CPU_LENDIAN)
+#include "include/private/base/SkFeatures.h"
+
+// Allows embedders that want to disable macros that take arguments to just
+// define that symbol to be one of these
+#define SK_NOTHING_ARG1(arg1)
+#define SK_NOTHING_ARG2(arg1, arg2)
+#define SK_NOTHING_ARG3(arg1, arg2, arg3)
+
+// IWYU pragma: begin_exports
+
+// Note: SK_USER_CONFIG_HEADER will not work with Bazel builds, as that file will not
+// be specified for the Bazel sandbox.
+#if defined (SK_USER_CONFIG_HEADER)
+ #include SK_USER_CONFIG_HEADER
+#else
+ #include "include/config/SkUserConfig.h"
+#endif
+// IWYU pragma: end_exports
+
+// Checks to make sure the SkUserConfig options do not conflict.
+#if !defined(SK_DEBUG) && !defined(SK_RELEASE)
+ #ifdef NDEBUG
+ #define SK_RELEASE
+ #else
+ #define SK_DEBUG
+ #endif
+#endif
+
+#if defined(SK_DEBUG) && defined(SK_RELEASE)
+# error "cannot define both SK_DEBUG and SK_RELEASE"
+#elif !defined(SK_DEBUG) && !defined(SK_RELEASE)
+# error "must define either SK_DEBUG or SK_RELEASE"
+#endif
+
+#if defined(SK_CPU_LENDIAN) && defined(SK_CPU_BENDIAN)
+# error "cannot define both SK_CPU_LENDIAN and SK_CPU_BENDIAN"
+#elif !defined(SK_CPU_LENDIAN) && !defined(SK_CPU_BENDIAN)
+# error "must define either SK_CPU_LENDIAN or SK_CPU_BENDIAN"
+#endif
+
+#if defined(SK_CPU_BENDIAN) && !defined(I_ACKNOWLEDGE_SKIA_DOES_NOT_SUPPORT_BIG_ENDIAN)
+ #error "The Skia team is not endian-savvy enough to support big-endian CPUs."
+ #error "If you still want to use Skia,"
+ #error "please define I_ACKNOWLEDGE_SKIA_DOES_NOT_SUPPORT_BIG_ENDIAN."
+#endif
+
+#define SK_USER_CONFIG_WAS_LOADED
+#endif // SK_USER_CONFIG_WAS_LOADED
diff --git a/gfx/skia/skia/include/private/base/SkMacros.h b/gfx/skia/skia/include/private/base/SkMacros.h
new file mode 100644
index 0000000000..a28602c4fb
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkMacros.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkMacros_DEFINED
+#define SkMacros_DEFINED
+
+/*
+ * Usage: SK_MACRO_CONCAT(a, b) to construct the symbol ab
+ *
+ * SK_MACRO_CONCAT_IMPL_PRIV just exists to make this work. Do not use directly
+ *
+ */
+#define SK_MACRO_CONCAT(X, Y) SK_MACRO_CONCAT_IMPL_PRIV(X, Y)
+#define SK_MACRO_CONCAT_IMPL_PRIV(X, Y) X ## Y
+
+/*
+ * Usage: SK_MACRO_APPEND_LINE(foo) to make foo123, where 123 is the current
+ * line number. Easy way to construct
+ * unique names for local functions or
+ * variables.
+ */
+#define SK_MACRO_APPEND_LINE(name) SK_MACRO_CONCAT(name, __LINE__)
+
+#define SK_MACRO_APPEND_COUNTER(name) SK_MACRO_CONCAT(name, __COUNTER__)
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Can be used to bracket data types that must be dense/packed, e.g. hash keys.
+#if defined(__clang__) // This should work on GCC too, but GCC diagnostic pop didn't seem to work!
+ #define SK_BEGIN_REQUIRE_DENSE _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic error \"-Wpadded\"")
+ #define SK_END_REQUIRE_DENSE _Pragma("GCC diagnostic pop")
+#else
+ #define SK_BEGIN_REQUIRE_DENSE
+ #define SK_END_REQUIRE_DENSE
+#endif
+
+#ifdef MOZ_SKIA
+
+ #ifdef MOZ_ASAN
+ #include "mozilla/MemoryChecking.h"
+ #define SK_INTENTIONALLY_LEAKED(X) MOZ_LSAN_INTENTIONALLY_LEAK_OBJECT(X)
+ #else
+ #define SK_INTENTIONALLY_LEAKED(x) ((void)0)
+ #endif
+
+#else // !MOZ_SKIA
+
+#if defined(__clang__) && defined(__has_feature)
+ // Some compilers have a preprocessor that does not appear to do short-circuit
+ // evaluation as expected
+ #if __has_feature(leak_sanitizer) || __has_feature(address_sanitizer)
+ // Chrome had issues if we tried to include lsan_interface.h ourselves.
+ // https://github.com/llvm/llvm-project/blob/10a35632d55bb05004fe3d0c2d4432bb74897ee7/compiler-rt/include/sanitizer/lsan_interface.h#L26
+extern "C" {
+ void __lsan_ignore_object(const void *p);
+}
+ #define SK_INTENTIONALLY_LEAKED(X) __lsan_ignore_object(X)
+ #else
+ #define SK_INTENTIONALLY_LEAKED(X) ((void)0)
+ #endif
+#else
+ #define SK_INTENTIONALLY_LEAKED(X) ((void)0)
+#endif
+
+#endif // MOZ_SKIA
+
+#define SK_INIT_TO_AVOID_WARNING = 0
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Defines overloaded bitwise operators to make it easier to use an enum as a
+ * bitfield.
+ */
+#define SK_MAKE_BITFIELD_OPS(X) \
+ inline X operator ~(X a) { \
+ using U = std::underlying_type_t<X>; \
+ return (X) (~static_cast<U>(a)); \
+ } \
+ inline X operator |(X a, X b) { \
+ using U = std::underlying_type_t<X>; \
+ return (X) (static_cast<U>(a) | static_cast<U>(b)); \
+ } \
+ inline X& operator |=(X& a, X b) { \
+ return (a = a | b); \
+ } \
+ inline X operator &(X a, X b) { \
+ using U = std::underlying_type_t<X>; \
+ return (X) (static_cast<U>(a) & static_cast<U>(b)); \
+ } \
+ inline X& operator &=(X& a, X b) { \
+ return (a = a & b); \
+ }
+
+#define SK_DECL_BITFIELD_OPS_FRIENDS(X) \
+ friend X operator ~(X a); \
+ friend X operator |(X a, X b); \
+ friend X& operator |=(X& a, X b); \
+ \
+ friend X operator &(X a, X b); \
+ friend X& operator &=(X& a, X b);
+
+#endif // SkMacros_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkMalloc.h b/gfx/skia/skia/include/private/base/SkMalloc.h
new file mode 100644
index 0000000000..1c0c2e73da
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkMalloc.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMalloc_DEFINED
+#define SkMalloc_DEFINED
+
+#include <cstring>
+
+#include "include/private/base/SkAPI.h"
+
+/*
+ memory wrappers to be implemented by the porting layer (platform)
+*/
+
+
+/** Free memory returned by sk_malloc(). It is safe to pass null. */
+SK_API extern void sk_free(void*);
+
+/**
+ * Called internally if we run out of memory. The platform implementation must
+ * not return, but should either throw an exception or otherwise exit.
+ */
+SK_API extern void sk_out_of_memory(void);
+
+enum {
+ /**
+ * If this bit is set, the returned buffer must be zero-initialized. If this bit is not set
+ * the buffer can be uninitialized.
+ */
+ SK_MALLOC_ZERO_INITIALIZE = 1 << 0,
+
+ /**
+ * If this bit is set, the implementation must throw/crash/quit if the request cannot
+ * be fulfilled. If this bit is not set, then it should return nullptr on failure.
+ */
+ SK_MALLOC_THROW = 1 << 1,
+};
+/**
+ * Return a block of memory (at least 4-byte aligned) of at least the specified size.
+ * If the requested memory cannot be returned, either return nullptr or throw/exit, depending
+ * on the SK_MALLOC_THROW bit. If the allocation succeeds, the memory will be zero-initialized
+ * if the SK_MALLOC_ZERO_INITIALIZE bit was set.
+ *
+ * To free the memory, call sk_free()
+ */
+SK_API extern void* sk_malloc_flags(size_t size, unsigned flags);
+
+/** Same as standard realloc(), but this one never returns null on failure. It will throw
+ * if it fails.
+ * If size is 0, it will call sk_free on buffer and return null. (This behavior is implementation-
+ * defined for normal realloc. We follow what glibc does.)
+ */
+SK_API extern void* sk_realloc_throw(void* buffer, size_t size);
+
+static inline void* sk_malloc_throw(size_t size) {
+ return sk_malloc_flags(size, SK_MALLOC_THROW);
+}
+
+static inline void* sk_calloc_throw(size_t size) {
+ return sk_malloc_flags(size, SK_MALLOC_THROW | SK_MALLOC_ZERO_INITIALIZE);
+}
+
+static inline void* sk_calloc_canfail(size_t size) {
+#if defined(SK_BUILD_FOR_FUZZER)
+ // To reduce the chance of OOM, pretend we can't allocate more than 200kb.
+ if (size > 200000) {
+ return nullptr;
+ }
+#endif
+ return sk_malloc_flags(size, SK_MALLOC_ZERO_INITIALIZE);
+}
+
+// Performs a safe multiply count * elemSize, checking for overflow
+SK_API extern void* sk_calloc_throw(size_t count, size_t elemSize);
+SK_API extern void* sk_malloc_throw(size_t count, size_t elemSize);
+SK_API extern void* sk_realloc_throw(void* buffer, size_t count, size_t elemSize);
+
+/**
+ * These variants return nullptr on failure
+ */
+static inline void* sk_malloc_canfail(size_t size) {
+#if defined(SK_BUILD_FOR_FUZZER)
+ // To reduce the chance of OOM, pretend we can't allocate more than 200kb.
+ if (size > 200000) {
+ return nullptr;
+ }
+#endif
+ return sk_malloc_flags(size, 0);
+}
+SK_API extern void* sk_malloc_canfail(size_t count, size_t elemSize);
+
+// bzero is safer than memset, but we can't rely on it, so... sk_bzero()
+static inline void sk_bzero(void* buffer, size_t size) {
+ // Please c.f. sk_careful_memcpy. It's undefined behavior to call memset(null, 0, 0).
+ if (size) {
+ memset(buffer, 0, size);
+ }
+}
+
+/**
+ * sk_careful_memcpy() is just like memcpy(), but guards against undefined behavior.
+ *
+ * It is undefined behavior to call memcpy() with null dst or src, even if len is 0.
+ * If an optimizer is "smart" enough, it can exploit this to do unexpected things.
+ * memcpy(dst, src, 0);
+ * if (src) {
+ * printf("%x\n", *src);
+ * }
+ * In this code the compiler can assume src is not null and omit the if (src) {...} check,
+ * unconditionally running the printf, crashing the program if src really is null.
+ * Of the compilers we pay attention to only GCC performs this optimization in practice.
+ */
+static inline void* sk_careful_memcpy(void* dst, const void* src, size_t len) {
+ // When we pass >0 len we had better already be passing valid pointers.
+ // So we just need to skip calling memcpy when len == 0.
+ if (len) {
+ memcpy(dst,src,len);
+ }
+ return dst;
+}
+
+static inline void* sk_careful_memmove(void* dst, const void* src, size_t len) {
+ // When we pass >0 len we had better already be passing valid pointers.
+ // So we just need to skip calling memcpy when len == 0.
+ if (len) {
+ memmove(dst,src,len);
+ }
+ return dst;
+}
+
+static inline int sk_careful_memcmp(const void* a, const void* b, size_t len) {
+ // When we pass >0 len we had better already be passing valid pointers.
+ // So we just need to skip calling memcmp when len == 0.
+ if (len == 0) {
+ return 0; // we treat zero-length buffers as "equal"
+ }
+ return memcmp(a, b, len);
+}
+
+#endif // SkMalloc_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkMath.h b/gfx/skia/skia/include/private/base/SkMath.h
new file mode 100644
index 0000000000..34bfa739f7
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkMath.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMath_DEFINED
+#define SkMath_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkCPUTypes.h"
+
+#include <cstdint>
+#include <climits>
+
+// Max Signed 16 bit value
+static constexpr int16_t SK_MaxS16 = INT16_MAX;
+static constexpr int16_t SK_MinS16 = -SK_MaxS16;
+
+static constexpr int32_t SK_MaxS32 = INT32_MAX;
+static constexpr int32_t SK_MinS32 = -SK_MaxS32;
+static constexpr int32_t SK_NaN32 = INT32_MIN;
+
+static constexpr int64_t SK_MaxS64 = INT64_MAX;
+static constexpr int64_t SK_MinS64 = -SK_MaxS64;
+
+// 64bit -> 32bit utilities
+
+// Handy util that can be passed two ints, and will automatically promote to
+// 64bits before the multiply, so the caller doesn't have to remember to cast
+// e.g. (int64_t)a * b;
+static inline int64_t sk_64_mul(int64_t a, int64_t b) {
+ return a * b;
+}
+
+static inline constexpr int32_t SkLeftShift(int32_t value, int32_t shift) {
+ return (int32_t) ((uint32_t) value << shift);
+}
+
+static inline constexpr int64_t SkLeftShift(int64_t value, int32_t shift) {
+ return (int64_t) ((uint64_t) value << shift);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Returns true if value is a power of 2. Does not explicitly check for
+ * value <= 0.
+ */
+template <typename T> constexpr inline bool SkIsPow2(T value) {
+ return (value & (value - 1)) == 0;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Return a*b/((1 << shift) - 1), rounding any fractional bits.
+ * Only valid if a and b are unsigned and <= 32767 and shift is > 0 and <= 8
+ */
+static inline unsigned SkMul16ShiftRound(U16CPU a, U16CPU b, int shift) {
+ SkASSERT(a <= 32767);
+ SkASSERT(b <= 32767);
+ SkASSERT(shift > 0 && shift <= 8);
+ unsigned prod = a*b + (1 << (shift - 1));
+ return (prod + (prod >> shift)) >> shift;
+}
+
+/**
+ * Return a*b/255, rounding any fractional bits.
+ * Only valid if a and b are unsigned and <= 32767.
+ */
+static inline U8CPU SkMulDiv255Round(U16CPU a, U16CPU b) {
+ return SkMul16ShiftRound(a, b, 8);
+}
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkMutex.h b/gfx/skia/skia/include/private/base/SkMutex.h
new file mode 100644
index 0000000000..4452beb912
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkMutex.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMutex_DEFINED
+#define SkMutex_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkSemaphore.h"
+#include "include/private/base/SkThreadAnnotations.h"
+#include "include/private/base/SkThreadID.h"
+
+class SK_CAPABILITY("mutex") SkMutex {
+public:
+ constexpr SkMutex() = default;
+
+ ~SkMutex() {
+ this->assertNotHeld();
+ }
+
+ void acquire() SK_ACQUIRE() {
+ fSemaphore.wait();
+ SkDEBUGCODE(fOwner = SkGetThreadID();)
+ }
+
+ void release() SK_RELEASE_CAPABILITY() {
+ this->assertHeld();
+ SkDEBUGCODE(fOwner = kIllegalThreadID;)
+ fSemaphore.signal();
+ }
+
+ void assertHeld() SK_ASSERT_CAPABILITY(this) {
+ SkASSERT(fOwner == SkGetThreadID());
+ }
+
+ void assertNotHeld() {
+ SkASSERT(fOwner == kIllegalThreadID);
+ }
+
+private:
+ SkSemaphore fSemaphore{1};
+ SkDEBUGCODE(SkThreadID fOwner{kIllegalThreadID};)
+};
+
+class SK_SCOPED_CAPABILITY SkAutoMutexExclusive {
+public:
+ SkAutoMutexExclusive(SkMutex& mutex) SK_ACQUIRE(mutex) : fMutex(mutex) { fMutex.acquire(); }
+ ~SkAutoMutexExclusive() SK_RELEASE_CAPABILITY() { fMutex.release(); }
+
+ SkAutoMutexExclusive(const SkAutoMutexExclusive&) = delete;
+ SkAutoMutexExclusive(SkAutoMutexExclusive&&) = delete;
+
+ SkAutoMutexExclusive& operator=(const SkAutoMutexExclusive&) = delete;
+ SkAutoMutexExclusive& operator=(SkAutoMutexExclusive&&) = delete;
+
+private:
+ SkMutex& fMutex;
+};
+
+#endif // SkMutex_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkNoncopyable.h b/gfx/skia/skia/include/private/base/SkNoncopyable.h
new file mode 100644
index 0000000000..ec4a4e5161
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkNoncopyable.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNoncopyable_DEFINED
+#define SkNoncopyable_DEFINED
+
+#include "include/private/base/SkAPI.h"
+
+/** \class SkNoncopyable (DEPRECATED)
+
+ SkNoncopyable is the base class for objects that do not want to
+ be copied. It hides its copy-constructor and its assignment-operator.
+*/
+class SK_API SkNoncopyable {
+public:
+ SkNoncopyable() = default;
+
+ SkNoncopyable(SkNoncopyable&&) = default;
+ SkNoncopyable& operator =(SkNoncopyable&&) = default;
+
+private:
+ SkNoncopyable(const SkNoncopyable&) = delete;
+ SkNoncopyable& operator=(const SkNoncopyable&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkOnce.h b/gfx/skia/skia/include/private/base/SkOnce.h
new file mode 100644
index 0000000000..97ce6b6311
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkOnce.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOnce_DEFINED
+#define SkOnce_DEFINED
+
+#include "include/private/base/SkThreadAnnotations.h"
+
+#include <atomic>
+#include <cstdint>
+#include <utility>
+
+// SkOnce provides call-once guarantees for Skia, much like std::once_flag/std::call_once().
+//
+// There should be no particularly error-prone gotcha use cases when using SkOnce.
+// It works correctly as a class member, a local, a global, a function-scoped static, whatever.
+
+class SkOnce {
+public:
+ constexpr SkOnce() = default;
+
+ template <typename Fn, typename... Args>
+ void operator()(Fn&& fn, Args&&... args) {
+ auto state = fState.load(std::memory_order_acquire);
+
+ if (state == Done) {
+ return;
+ }
+
+ // If it looks like no one has started calling fn(), try to claim that job.
+ if (state == NotStarted && fState.compare_exchange_strong(state, Claimed,
+ std::memory_order_relaxed,
+ std::memory_order_relaxed)) {
+ // Great! We'll run fn() then notify the other threads by releasing Done into fState.
+ fn(std::forward<Args>(args)...);
+ return fState.store(Done, std::memory_order_release);
+ }
+
+ // Some other thread is calling fn().
+ // We'll just spin here acquiring until it releases Done into fState.
+ SK_POTENTIALLY_BLOCKING_REGION_BEGIN;
+ while (fState.load(std::memory_order_acquire) != Done) { /*spin*/ }
+ SK_POTENTIALLY_BLOCKING_REGION_END;
+ }
+
+private:
+ enum State : uint8_t { NotStarted, Claimed, Done};
+ std::atomic<uint8_t> fState{NotStarted};
+};
+
+#endif // SkOnce_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkPathEnums.h b/gfx/skia/skia/include/private/base/SkPathEnums.h
new file mode 100644
index 0000000000..642bbb3489
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkPathEnums.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * This file contains private enums related to paths. See also skbug.com/10670
+ */
+
+#ifndef SkPathEnums_DEFINED
+#define SkPathEnums_DEFINED
+
+enum class SkPathConvexity {
+ kConvex,
+ kConcave,
+ kUnknown,
+};
+
+enum class SkPathFirstDirection {
+ kCW, // == SkPathDirection::kCW
+ kCCW, // == SkPathDirection::kCCW
+ kUnknown,
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkSafe32.h b/gfx/skia/skia/include/private/base/SkSafe32.h
new file mode 100644
index 0000000000..5ba4c2f9a4
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkSafe32.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSafe32_DEFINED
+#define SkSafe32_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkMath.h"
+
+#include <cstdint>
+
+static constexpr int32_t Sk64_pin_to_s32(int64_t x) {
+ return x < SK_MinS32 ? SK_MinS32 : (x > SK_MaxS32 ? SK_MaxS32 : (int32_t)x);
+}
+
+static constexpr int32_t Sk32_sat_add(int32_t a, int32_t b) {
+ return Sk64_pin_to_s32((int64_t)a + (int64_t)b);
+}
+
+static constexpr int32_t Sk32_sat_sub(int32_t a, int32_t b) {
+ return Sk64_pin_to_s32((int64_t)a - (int64_t)b);
+}
+
+// To avoid UBSAN complaints about 2's compliment overflows
+//
+static constexpr int32_t Sk32_can_overflow_add(int32_t a, int32_t b) {
+ return (int32_t)((uint32_t)a + (uint32_t)b);
+}
+static constexpr int32_t Sk32_can_overflow_sub(int32_t a, int32_t b) {
+ return (int32_t)((uint32_t)a - (uint32_t)b);
+}
+
+/**
+ * This is a 'safe' abs for 32-bit integers that asserts when undefined behavior would occur.
+ * SkTAbs (in SkTemplates.h) is a general purpose absolute-value function.
+ */
+static inline int32_t SkAbs32(int32_t value) {
+ SkASSERT(value != SK_NaN32); // The most negative int32_t can't be negated.
+ if (value < 0) {
+ value = -value;
+ }
+ return value;
+}
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkSemaphore.h b/gfx/skia/skia/include/private/base/SkSemaphore.h
new file mode 100644
index 0000000000..f78ee86625
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkSemaphore.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSemaphore_DEFINED
+#define SkSemaphore_DEFINED
+
+#include "include/private/base/SkAPI.h"
+#include "include/private/base/SkOnce.h"
+#include "include/private/base/SkThreadAnnotations.h"
+
+#include <algorithm>
+#include <atomic>
+
+class SkSemaphore {
+public:
+ constexpr SkSemaphore(int count = 0) : fCount(count), fOSSemaphore(nullptr) {}
+
+ // Cleanup the underlying OS semaphore.
+ SK_SPI ~SkSemaphore();
+
+ // Increment the counter n times.
+ // Generally it's better to call signal(n) instead of signal() n times.
+ void signal(int n = 1);
+
+ // Decrement the counter by 1,
+ // then if the counter is < 0, sleep this thread until the counter is >= 0.
+ void wait();
+
+ // If the counter is positive, decrement it by 1 and return true, otherwise return false.
+ SK_SPI bool try_wait();
+
+private:
+ // This implementation follows the general strategy of
+ // 'A Lightweight Semaphore with Partial Spinning'
+ // found here
+ // http://preshing.com/20150316/semaphores-are-surprisingly-versatile/
+ // That article (and entire blog) are very much worth reading.
+ //
+ // We wrap an OS-provided semaphore with a user-space atomic counter that
+ // lets us avoid interacting with the OS semaphore unless strictly required:
+ // moving the count from >=0 to <0 or vice-versa, i.e. sleeping or waking threads.
+ struct OSSemaphore;
+
+ SK_SPI void osSignal(int n);
+ SK_SPI void osWait();
+
+ std::atomic<int> fCount;
+ SkOnce fOSSemaphoreOnce;
+ OSSemaphore* fOSSemaphore;
+};
+
+inline void SkSemaphore::signal(int n) {
+ int prev = fCount.fetch_add(n, std::memory_order_release);
+
+ // We only want to call the OS semaphore when our logical count crosses
+ // from <0 to >=0 (when we need to wake sleeping threads).
+ //
+ // This is easiest to think about with specific examples of prev and n.
+ // If n == 5 and prev == -3, there are 3 threads sleeping and we signal
+ // std::min(-(-3), 5) == 3 times on the OS semaphore, leaving the count at 2.
+ //
+ // If prev >= 0, no threads are waiting, std::min(-prev, n) is always <= 0,
+ // so we don't call the OS semaphore, leaving the count at (prev + n).
+ int toSignal = std::min(-prev, n);
+ if (toSignal > 0) {
+ this->osSignal(toSignal);
+ }
+}
+
+inline void SkSemaphore::wait() {
+ // Since this fetches the value before the subtract, zero and below means that there are no
+ // resources left, so the thread needs to wait.
+ if (fCount.fetch_sub(1, std::memory_order_acquire) <= 0) {
+ SK_POTENTIALLY_BLOCKING_REGION_BEGIN;
+ this->osWait();
+ SK_POTENTIALLY_BLOCKING_REGION_END;
+ }
+}
+
+#endif//SkSemaphore_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkSpan_impl.h b/gfx/skia/skia/include/private/base/SkSpan_impl.h
new file mode 100644
index 0000000000..5f31a651bb
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkSpan_impl.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSpan_DEFINED
+#define SkSpan_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkTo.h"
+
+#include <cstddef>
+#include <initializer_list>
+#include <iterator>
+#include <limits>
+#include <utility>
+
+// Having this be an export works around IWYU churn related to
+// https://github.com/include-what-you-use/include-what-you-use/issues/1121
+#include <type_traits> // IWYU pragma: export
+
+// Add macro to check the lifetime of initializer_list arguments. initializer_list has a very
+// short life span, and can only be used as a parameter, and not as a variable.
+#if defined(__clang__) && defined(__has_cpp_attribute) && __has_cpp_attribute(clang::lifetimebound)
+#define SK_CHECK_IL_LIFETIME [[clang::lifetimebound]]
+#else
+#define SK_CHECK_IL_LIFETIME
+#endif
+
+/**
+ * SkSpan holds a reference to contiguous data of type T along with a count. SkSpan does not own
+ * the data itself but is merely a reference, therefore you must take care with the lifetime of
+ * the underlying data.
+ *
+ * SkSpan is a count and a pointer into existing array or data type that stores its data in
+ * contiguous memory like std::vector. Any container that works with std::size() and std::data()
+ * can be used.
+ *
+ * SkSpan makes a convenient parameter for a routine to accept array like things. This allows you to
+ * write the routine without overloads for all different container types.
+ *
+ * Example:
+ * void routine(SkSpan<const int> a) { ... }
+ *
+ * std::vector v = {1, 2, 3, 4, 5};
+ *
+ * routine(a);
+ *
+ * A word of caution when working with initializer_list, initializer_lists have a lifetime that is
+ * limited to the current statement. The following is correct and safe:
+ *
+ * Example:
+ * routine({1,2,3,4,5});
+ *
+ * The following is undefined, and will result in erratic execution:
+ *
+ * Bad Example:
+ * initializer_list l = {1, 2, 3, 4, 5}; // The data behind l dies at the ;.
+ * routine(l);
+ */
+template <typename T>
+class SkSpan {
+public:
+ constexpr SkSpan() : fPtr{nullptr}, fSize{0} {}
+
+ template <typename Integer, std::enable_if_t<std::is_integral_v<Integer>, bool> = true>
+ constexpr SkSpan(T* ptr, Integer size) : fPtr{ptr}, fSize{SkToSizeT(size)} {
+ SkASSERT(ptr || fSize == 0); // disallow nullptr + a nonzero size
+ SkASSERT(fSize < kMaxSize);
+ }
+ template <typename U, typename = std::enable_if_t<std::is_same_v<const U, T>>>
+ constexpr SkSpan(const SkSpan<U>& that) : fPtr(std::data(that)), fSize(std::size(that)) {}
+ constexpr SkSpan(const SkSpan& o) = default;
+ template<size_t N> constexpr SkSpan(T(&a)[N]) : SkSpan(a, N) { }
+ template<typename Container>
+ constexpr SkSpan(Container& c) : SkSpan(std::data(c), std::size(c)) { }
+ SkSpan(std::initializer_list<T> il SK_CHECK_IL_LIFETIME)
+ : SkSpan(std::data(il), std::size(il)) {}
+
+ constexpr SkSpan& operator=(const SkSpan& that) = default;
+
+ constexpr T& operator [] (size_t i) const {
+ SkASSERT(i < this->size());
+ return fPtr[i];
+ }
+ constexpr T& front() const { return fPtr[0]; }
+ constexpr T& back() const { return fPtr[fSize - 1]; }
+ constexpr T* begin() const { return fPtr; }
+ constexpr T* end() const { return fPtr + fSize; }
+ constexpr auto rbegin() const { return std::make_reverse_iterator(this->end()); }
+ constexpr auto rend() const { return std::make_reverse_iterator(this->begin()); }
+ constexpr T* data() const { return this->begin(); }
+ constexpr size_t size() const { return fSize; }
+ constexpr bool empty() const { return fSize == 0; }
+ constexpr size_t size_bytes() const { return fSize * sizeof(T); }
+ constexpr SkSpan<T> first(size_t prefixLen) const {
+ SkASSERT(prefixLen <= this->size());
+ return SkSpan{fPtr, prefixLen};
+ }
+ constexpr SkSpan<T> last(size_t postfixLen) const {
+ SkASSERT(postfixLen <= this->size());
+ return SkSpan{fPtr + (this->size() - postfixLen), postfixLen};
+ }
+ constexpr SkSpan<T> subspan(size_t offset) const {
+ return this->subspan(offset, this->size() - offset);
+ }
+ constexpr SkSpan<T> subspan(size_t offset, size_t count) const {
+ SkASSERT(offset <= this->size());
+ SkASSERT(count <= this->size() - offset);
+ return SkSpan{fPtr + offset, count};
+ }
+
+private:
+ static const constexpr size_t kMaxSize = std::numeric_limits<size_t>::max() / sizeof(T);
+ T* fPtr;
+ size_t fSize;
+};
+
+template <typename Container>
+SkSpan(Container&) ->
+ SkSpan<std::remove_pointer_t<decltype(std::data(std::declval<Container&>()))>>;
+
+template <typename T>
+SkSpan(std::initializer_list<T>) ->
+ SkSpan<std::remove_pointer_t<decltype(std::data(std::declval<std::initializer_list<T>>()))>>;
+
+#endif // SkSpan_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkTArray.h b/gfx/skia/skia/include/private/base/SkTArray.h
new file mode 100644
index 0000000000..635d04e2a8
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkTArray.h
@@ -0,0 +1,696 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTArray_DEFINED
+#define SkTArray_DEFINED
+
+#include "include/private/base/SkAlignedStorage.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkAttributes.h"
+#include "include/private/base/SkContainers.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkMath.h"
+#include "include/private/base/SkSpan_impl.h"
+#include "include/private/base/SkTo.h"
+#include "include/private/base/SkTypeTraits.h" // IWYU pragma: keep
+
+#include <algorithm>
+#include <climits>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <initializer_list>
+#include <new>
+#include <utility>
+
+namespace skia_private {
+/** TArray<T> implements a typical, mostly std::vector-like array.
+ Each T will be default-initialized on allocation, and ~T will be called on destruction.
+
+ MEM_MOVE controls the behavior when a T needs to be moved (e.g. when the array is resized)
+ - true: T will be bit-copied via memcpy.
+ - false: T will be moved via move-constructors.
+*/
+template <typename T, bool MEM_MOVE = sk_is_trivially_relocatable_v<T>> class TArray {
+public:
+ using value_type = T;
+
+ /**
+ * Creates an empty array with no initial storage
+ */
+ TArray() : fOwnMemory(true), fCapacity{0} {}
+
+ /**
+ * Creates an empty array that will preallocate space for reserveCount
+ * elements.
+ */
+ explicit TArray(int reserveCount) : TArray() { this->reserve_back(reserveCount); }
+
+ /**
+ * Copies one array to another. The new array will be heap allocated.
+ */
+ TArray(const TArray& that) : TArray(that.fData, that.fSize) {}
+
+ TArray(TArray&& that) {
+ if (that.fOwnMemory) {
+ this->setData(that);
+ that.setData({});
+ } else {
+ this->initData(that.fSize);
+ that.move(fData);
+ }
+ fSize = std::exchange(that.fSize, 0);
+ }
+
+ /**
+ * Creates a TArray by copying contents of a standard C array. The new
+ * array will be heap allocated. Be careful not to use this constructor
+ * when you really want the (void*, int) version.
+ */
+ TArray(const T* array, int count) {
+ this->initData(count);
+ this->copy(array);
+ }
+
+ /**
+ * Creates a TArray by copying contents of an initializer list.
+ */
+ TArray(std::initializer_list<T> data) : TArray(data.begin(), data.size()) {}
+
+ TArray& operator=(const TArray& that) {
+ if (this == &that) {
+ return *this;
+ }
+ this->clear();
+ this->checkRealloc(that.size(), kExactFit);
+ fSize = that.fSize;
+ this->copy(that.fData);
+ return *this;
+ }
+ TArray& operator=(TArray&& that) {
+ if (this != &that) {
+ this->clear();
+ if (that.fOwnMemory) {
+ // The storage is on the heap, so move the data pointer.
+ if (fOwnMemory) {
+ sk_free(fData);
+ }
+
+ fData = std::exchange(that.fData, nullptr);
+
+ // Can't use exchange with bitfields.
+ fCapacity = that.fCapacity;
+ that.fCapacity = 0;
+
+ fOwnMemory = true;
+ } else {
+ // The data is stored inline in that, so move it element-by-element.
+ this->checkRealloc(that.size(), kExactFit);
+ that.move(fData);
+ }
+ fSize = std::exchange(that.fSize, 0);
+ }
+ return *this;
+ }
+
+ ~TArray() {
+ this->destroyAll();
+ if (fOwnMemory) {
+ sk_free(fData);
+ }
+ }
+
+ /**
+ * Resets to size() = n newly constructed T objects and resets any reserve count.
+ */
+ void reset(int n) {
+ SkASSERT(n >= 0);
+ this->clear();
+ this->checkRealloc(n, kExactFit);
+ fSize = n;
+ for (int i = 0; i < this->size(); ++i) {
+ new (fData + i) T;
+ }
+ }
+
+ /**
+ * Resets to a copy of a C array and resets any reserve count.
+ */
+ void reset(const T* array, int count) {
+ SkASSERT(count >= 0);
+ this->clear();
+ this->checkRealloc(count, kExactFit);
+ fSize = count;
+ this->copy(array);
+ }
+
+ /**
+ * Ensures there is enough reserved space for n elements.
+ */
+ void reserve(int n) {
+ SkASSERT(n >= 0);
+ if (n > this->size()) {
+ this->checkRealloc(n - this->size(), kGrowing);
+ }
+ }
+
+ /**
+ * Ensures there is enough reserved space for n additional elements. The is guaranteed at least
+ * until the array size grows above n and subsequently shrinks below n, any version of reset()
+ * is called, or reserve_back() is called again.
+ */
+ void reserve_back(int n) {
+ SkASSERT(n >= 0);
+ if (n > 0) {
+ this->checkRealloc(n, kExactFit);
+ }
+ }
+
+ void removeShuffle(int n) {
+ SkASSERT(n < this->size());
+ int newCount = fSize - 1;
+ fSize = newCount;
+ fData[n].~T();
+ if (n != newCount) {
+ this->move(n, newCount);
+ }
+ }
+
+ // Is the array empty.
+ bool empty() const { return fSize == 0; }
+
+ /**
+ * Adds 1 new default-initialized T value and returns it by reference. Note
+ * the reference only remains valid until the next call that adds or removes
+ * elements.
+ */
+ T& push_back() {
+ void* newT = this->push_back_raw(1);
+ return *new (newT) T;
+ }
+
+ /**
+ * Version of above that uses a copy constructor to initialize the new item
+ */
+ T& push_back(const T& t) {
+ void* newT = this->push_back_raw(1);
+ return *new (newT) T(t);
+ }
+
+ /**
+ * Version of above that uses a move constructor to initialize the new item
+ */
+ T& push_back(T&& t) {
+ void* newT = this->push_back_raw(1);
+ return *new (newT) T(std::move(t));
+ }
+
+ /**
+ * Construct a new T at the back of this array.
+ */
+ template<class... Args> T& emplace_back(Args&&... args) {
+ void* newT = this->push_back_raw(1);
+ return *new (newT) T(std::forward<Args>(args)...);
+ }
+
+ /**
+ * Allocates n more default-initialized T values, and returns the address of
+ * the start of that new range. Note: this address is only valid until the
+ * next API call made on the array that might add or remove elements.
+ */
+ T* push_back_n(int n) {
+ SkASSERT(n >= 0);
+ T* newTs = TCast(this->push_back_raw(n));
+ for (int i = 0; i < n; ++i) {
+ new (&newTs[i]) T;
+ }
+ return newTs;
+ }
+
+ /**
+ * Version of above that uses a copy constructor to initialize all n items
+ * to the same T.
+ */
+ T* push_back_n(int n, const T& t) {
+ SkASSERT(n >= 0);
+ T* newTs = TCast(this->push_back_raw(n));
+ for (int i = 0; i < n; ++i) {
+ new (&newTs[i]) T(t);
+ }
+ return static_cast<T*>(newTs);
+ }
+
+ /**
+ * Version of above that uses a copy constructor to initialize the n items
+ * to separate T values.
+ */
+ T* push_back_n(int n, const T t[]) {
+ SkASSERT(n >= 0);
+ this->checkRealloc(n, kGrowing);
+ T* end = this->end();
+ for (int i = 0; i < n; ++i) {
+ new (end + i) T(t[i]);
+ }
+ fSize += n;
+ return end;
+ }
+
+ /**
+ * Version of above that uses the move constructor to set n items.
+ */
+ T* move_back_n(int n, T* t) {
+ SkASSERT(n >= 0);
+ this->checkRealloc(n, kGrowing);
+ T* end = this->end();
+ for (int i = 0; i < n; ++i) {
+ new (end + i) T(std::move(t[i]));
+ }
+ fSize += n;
+ return end;
+ }
+
+ /**
+ * Removes the last element. Not safe to call when size() == 0.
+ */
+ void pop_back() {
+ SkASSERT(fSize > 0);
+ --fSize;
+ fData[fSize].~T();
+ }
+
+ /**
+ * Removes the last n elements. Not safe to call when size() < n.
+ */
+ void pop_back_n(int n) {
+ SkASSERT(n >= 0);
+ SkASSERT(this->size() >= n);
+ int i = fSize;
+ while (i-- > fSize - n) {
+ (*this)[i].~T();
+ }
+ fSize -= n;
+ }
+
+ /**
+ * Pushes or pops from the back to resize. Pushes will be default
+ * initialized.
+ */
+ void resize_back(int newCount) {
+ SkASSERT(newCount >= 0);
+
+ if (newCount > this->size()) {
+ this->push_back_n(newCount - fSize);
+ } else if (newCount < this->size()) {
+ this->pop_back_n(fSize - newCount);
+ }
+ }
+
+ /** Swaps the contents of this array with that array. Does a pointer swap if possible,
+ otherwise copies the T values. */
+ void swap(TArray& that) {
+ using std::swap;
+ if (this == &that) {
+ return;
+ }
+ if (fOwnMemory && that.fOwnMemory) {
+ swap(fData, that.fData);
+ swap(fSize, that.fSize);
+
+ // Can't use swap because fCapacity is a bit field.
+ auto allocCount = fCapacity;
+ fCapacity = that.fCapacity;
+ that.fCapacity = allocCount;
+ } else {
+ // This could be more optimal...
+ TArray copy(std::move(that));
+ that = std::move(*this);
+ *this = std::move(copy);
+ }
+ }
+
+ T* begin() {
+ return fData;
+ }
+ const T* begin() const {
+ return fData;
+ }
+
+ // It's safe to use fItemArray + fSize because if fItemArray is nullptr then adding 0 is
+ // valid and returns nullptr. See [expr.add] in the C++ standard.
+ T* end() {
+ if (fData == nullptr) {
+ SkASSERT(fSize == 0);
+ }
+ return fData + fSize;
+ }
+ const T* end() const {
+ if (fData == nullptr) {
+ SkASSERT(fSize == 0);
+ }
+ return fData + fSize;
+ }
+ T* data() { return fData; }
+ const T* data() const { return fData; }
+ int size() const { return fSize; }
+ size_t size_bytes() const { return this->bytes(fSize); }
+ void resize(size_t count) { this->resize_back((int)count); }
+
+ void clear() {
+ this->destroyAll();
+ fSize = 0;
+ }
+
+ void shrink_to_fit() {
+ if (!fOwnMemory || fSize == fCapacity) {
+ return;
+ }
+ if (fSize == 0) {
+ sk_free(fData);
+ fData = nullptr;
+ fCapacity = 0;
+ } else {
+ SkSpan<std::byte> allocation = Allocate(fSize);
+ this->move(TCast(allocation.data()));
+ if (fOwnMemory) {
+ sk_free(fData);
+ }
+ this->setDataFromBytes(allocation);
+ }
+ }
+
+ /**
+ * Get the i^th element.
+ */
+ T& operator[] (int i) {
+ SkASSERT(i < this->size());
+ SkASSERT(i >= 0);
+ return fData[i];
+ }
+
+ const T& operator[] (int i) const {
+ SkASSERT(i < this->size());
+ SkASSERT(i >= 0);
+ return fData[i];
+ }
+
+ T& at(int i) { return (*this)[i]; }
+ const T& at(int i) const { return (*this)[i]; }
+
+ /**
+ * equivalent to operator[](0)
+ */
+ T& front() { SkASSERT(fSize > 0); return fData[0];}
+
+ const T& front() const { SkASSERT(fSize > 0); return fData[0];}
+
+ /**
+ * equivalent to operator[](size() - 1)
+ */
+ T& back() { SkASSERT(fSize); return fData[fSize - 1];}
+
+ const T& back() const { SkASSERT(fSize > 0); return fData[fSize - 1];}
+
+ /**
+ * equivalent to operator[](size()-1-i)
+ */
+ T& fromBack(int i) {
+ SkASSERT(i >= 0);
+ SkASSERT(i < this->size());
+ return fData[fSize - i - 1];
+ }
+
+ const T& fromBack(int i) const {
+ SkASSERT(i >= 0);
+ SkASSERT(i < this->size());
+ return fData[fSize - i - 1];
+ }
+
+ bool operator==(const TArray<T, MEM_MOVE>& right) const {
+ int leftCount = this->size();
+ if (leftCount != right.size()) {
+ return false;
+ }
+ for (int index = 0; index < leftCount; ++index) {
+ if (fData[index] != right.fData[index]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool operator!=(const TArray<T, MEM_MOVE>& right) const {
+ return !(*this == right);
+ }
+
+ int capacity() const {
+ return fCapacity;
+ }
+
+protected:
+ // Creates an empty array that will use the passed storage block until it is insufficiently
+ // large to hold the entire array.
+ template <int InitialCapacity>
+ TArray(SkAlignedSTStorage<InitialCapacity, T>* storage, int size = 0) {
+ static_assert(InitialCapacity >= 0);
+ SkASSERT(size >= 0);
+ SkASSERT(storage->get() != nullptr);
+ if (size > InitialCapacity) {
+ this->initData(size);
+ } else {
+ this->setDataFromBytes(*storage);
+ fSize = size;
+
+ // setDataFromBytes always sets fOwnMemory to true, but we are actually using static
+ // storage here, which shouldn't ever be freed.
+ fOwnMemory = false;
+ }
+ }
+
+ // Copy a C array, using pre-allocated storage if preAllocCount >= count. Otherwise, storage
+ // will only be used when array shrinks to fit.
+ template <int InitialCapacity>
+ TArray(const T* array, int size, SkAlignedSTStorage<InitialCapacity, T>* storage)
+ : TArray{storage, size}
+ {
+ this->copy(array);
+ }
+
+private:
+ // Growth factors for checkRealloc.
+ static constexpr double kExactFit = 1.0;
+ static constexpr double kGrowing = 1.5;
+
+ static constexpr int kMinHeapAllocCount = 8;
+ static_assert(SkIsPow2(kMinHeapAllocCount), "min alloc count not power of two.");
+
+ // Note for 32-bit machines kMaxCapacity will be <= SIZE_MAX. For 64-bit machines it will
+ // just be INT_MAX if the sizeof(T) < 2^32.
+ static constexpr int kMaxCapacity = SkToInt(std::min(SIZE_MAX / sizeof(T), (size_t)INT_MAX));
+
+ void setDataFromBytes(SkSpan<std::byte> allocation) {
+ T* data = TCast(allocation.data());
+ // We have gotten extra bytes back from the allocation limit, pin to kMaxCapacity. It
+ // would seem like the SkContainerAllocator should handle the divide, but it would have
+ // to a full divide instruction. If done here the size is known at compile, and usually
+ // can be implemented by a right shift. The full divide takes ~50X longer than the shift.
+ size_t size = std::min(allocation.size() / sizeof(T), SkToSizeT(kMaxCapacity));
+ setData(SkSpan<T>(data, size));
+ }
+
+ void setData(SkSpan<T> array) {
+ fData = array.data();
+ fCapacity = SkToU32(array.size());
+ fOwnMemory = true;
+ }
+
+ // We disable Control-Flow Integrity sanitization (go/cfi) when casting item-array buffers.
+ // CFI flags this code as dangerous because we are casting `buffer` to a T* while the buffer's
+ // contents might still be uninitialized memory. When T has a vtable, this is especially risky
+ // because we could hypothetically access a virtual method on fItemArray and jump to an
+ // unpredictable location in memory. Of course, TArray won't actually use fItemArray in this
+ // way, and we don't want to construct a T before the user requests one. There's no real risk
+ // here, so disable CFI when doing these casts.
+#ifdef __clang__
+ SK_NO_SANITIZE("cfi")
+#elif defined(__GNUC__)
+ SK_ATTRIBUTE(no_sanitize_undefined)
+#endif
+ static T* TCast(void* buffer) {
+ return (T*)buffer;
+ }
+
+ size_t bytes(int n) const {
+ SkASSERT(n <= kMaxCapacity);
+ return SkToSizeT(n) * sizeof(T);
+ }
+
+ static SkSpan<std::byte> Allocate(int capacity, double growthFactor = 1.0) {
+ return SkContainerAllocator{sizeof(T), kMaxCapacity}.allocate(capacity, growthFactor);
+ }
+
+ void initData(int count) {
+ this->setDataFromBytes(Allocate(count));
+ fSize = count;
+ }
+
+ void destroyAll() {
+ if (!this->empty()) {
+ T* cursor = this->begin();
+ T* const end = this->end();
+ do {
+ cursor->~T();
+ cursor++;
+ } while (cursor < end);
+ }
+ }
+
+ /** In the following move and copy methods, 'dst' is assumed to be uninitialized raw storage.
+ * In the following move methods, 'src' is destroyed leaving behind uninitialized raw storage.
+ */
+ void copy(const T* src) {
+ if constexpr (std::is_trivially_copyable_v<T>) {
+ if (!this->empty() && src != nullptr) {
+ sk_careful_memcpy(fData, src, this->size_bytes());
+ }
+ } else {
+ for (int i = 0; i < this->size(); ++i) {
+ new (fData + i) T(src[i]);
+ }
+ }
+ }
+
+ void move(int dst, int src) {
+ if constexpr (MEM_MOVE) {
+ memcpy(static_cast<void*>(&fData[dst]),
+ static_cast<const void*>(&fData[src]),
+ sizeof(T));
+ } else {
+ new (&fData[dst]) T(std::move(fData[src]));
+ fData[src].~T();
+ }
+ }
+
+ void move(void* dst) {
+ if constexpr (MEM_MOVE) {
+ sk_careful_memcpy(dst, fData, this->bytes(fSize));
+ } else {
+ for (int i = 0; i < this->size(); ++i) {
+ new (static_cast<char*>(dst) + this->bytes(i)) T(std::move(fData[i]));
+ fData[i].~T();
+ }
+ }
+ }
+
+ // Helper function that makes space for n objects, adjusts the count, but does not initialize
+ // the new objects.
+ void* push_back_raw(int n) {
+ this->checkRealloc(n, kGrowing);
+ void* ptr = fData + fSize;
+ fSize += n;
+ return ptr;
+ }
+
+ void checkRealloc(int delta, double growthFactor) {
+ // This constant needs to be declared in the function where it is used to work around
+ // MSVC's persnickety nature about template definitions.
+ SkASSERT(delta >= 0);
+ SkASSERT(fSize >= 0);
+ SkASSERT(fCapacity >= 0);
+
+ // Return if there are enough remaining allocated elements to satisfy the request.
+ if (this->capacity() - fSize >= delta) {
+ return;
+ }
+
+ // Don't overflow fSize or size_t later in the memory allocation. Overflowing memory
+ // allocation really only applies to fSizes on 32-bit machines; on 64-bit machines this
+ // will probably never produce a check. Since kMaxCapacity is bounded above by INT_MAX,
+ // this also checks the bounds of fSize.
+ if (delta > kMaxCapacity - fSize) {
+ sk_report_container_overflow_and_die();
+ }
+ const int newCount = fSize + delta;
+
+ SkSpan<std::byte> allocation = Allocate(newCount, growthFactor);
+
+ this->move(TCast(allocation.data()));
+ if (fOwnMemory) {
+ sk_free(fData);
+ }
+ this->setDataFromBytes(allocation);
+ SkASSERT(this->capacity() >= newCount);
+ SkASSERT(fData != nullptr);
+ }
+
+ T* fData{nullptr};
+ int fSize{0};
+ uint32_t fOwnMemory : 1;
+ uint32_t fCapacity : 31;
+};
+
+template <typename T, bool M> static inline void swap(TArray<T, M>& a, TArray<T, M>& b) {
+ a.swap(b);
+}
+
+} // namespace skia_private
+
+/**
+ * Subclass of TArray that contains a preallocated memory block for the array.
+ */
+template <int N, typename T, bool MEM_MOVE = sk_is_trivially_relocatable_v<T>>
+class SkSTArray : private SkAlignedSTStorage<N,T>, public skia_private::TArray<T, MEM_MOVE> {
+private:
+ static_assert(N > 0);
+ using STORAGE = SkAlignedSTStorage<N,T>;
+ using INHERITED = skia_private::TArray<T, MEM_MOVE>;
+
+public:
+ SkSTArray()
+ : STORAGE{}, INHERITED(static_cast<STORAGE*>(this)) {}
+
+ SkSTArray(const T* array, int count)
+ : STORAGE{}, INHERITED(array, count, static_cast<STORAGE*>(this)) {}
+
+ SkSTArray(std::initializer_list<T> data) : SkSTArray(data.begin(), SkToInt(data.size())) {}
+
+ explicit SkSTArray(int reserveCount) : SkSTArray() {
+ this->reserve_back(reserveCount);
+ }
+
+ SkSTArray (const SkSTArray& that) : SkSTArray() { *this = that; }
+ explicit SkSTArray(const INHERITED& that) : SkSTArray() { *this = that; }
+ SkSTArray ( SkSTArray&& that) : SkSTArray() { *this = std::move(that); }
+ explicit SkSTArray( INHERITED&& that) : SkSTArray() { *this = std::move(that); }
+
+ SkSTArray& operator=(const SkSTArray& that) {
+ INHERITED::operator=(that);
+ return *this;
+ }
+ SkSTArray& operator=(const INHERITED& that) {
+ INHERITED::operator=(that);
+ return *this;
+ }
+
+ SkSTArray& operator=(SkSTArray&& that) {
+ INHERITED::operator=(std::move(that));
+ return *this;
+ }
+ SkSTArray& operator=(INHERITED&& that) {
+ INHERITED::operator=(std::move(that));
+ return *this;
+ }
+
+ // Force the use of TArray for data() and size().
+ using INHERITED::data;
+ using INHERITED::size;
+};
+
+// TODO: remove this typedef when all uses have been converted from SkTArray to TArray.
+template <typename T, bool MEM_MOVE = sk_is_trivially_relocatable_v<T>>
+using SkTArray = skia_private::TArray<T, MEM_MOVE>;
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkTDArray.h b/gfx/skia/skia/include/private/base/SkTDArray.h
new file mode 100644
index 0000000000..b08d285378
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkTDArray.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTDArray_DEFINED
+#define SkTDArray_DEFINED
+
+#include "include/private/base/SkAPI.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkTo.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <initializer_list>
+
+class SK_SPI SkTDStorage {
+public:
+ explicit SkTDStorage(int sizeOfT);
+ SkTDStorage(const void* src, int size, int sizeOfT);
+
+ // Copy
+ SkTDStorage(const SkTDStorage& that);
+ SkTDStorage& operator= (const SkTDStorage& that);
+
+ // Move
+ SkTDStorage(SkTDStorage&& that);
+ SkTDStorage& operator= (SkTDStorage&& that);
+
+ ~SkTDStorage();
+
+ void reset();
+ void swap(SkTDStorage& that);
+
+ // Size routines
+ bool empty() const { return fSize == 0; }
+ void clear() { fSize = 0; }
+ int size() const { return fSize; }
+ void resize(int newSize);
+ size_t size_bytes() const { return this->bytes(fSize); }
+
+ // Capacity routines
+ int capacity() const { return fCapacity; }
+ void reserve(int newCapacity);
+ void shrink_to_fit();
+
+ void* data() { return fStorage; }
+ const void* data() const { return fStorage; }
+
+ // Deletion routines
+ void erase(int index, int count);
+ // Removes the entry at 'index' and replaces it with the last array element
+ void removeShuffle(int index);
+
+ // Insertion routines
+ void* prepend();
+
+ void append();
+ void append(int count);
+ void* append(const void* src, int count);
+
+ void* insert(int index);
+ void* insert(int index, int count, const void* src);
+
+ void pop_back() {
+ SkASSERT(fSize > 0);
+ fSize--;
+ }
+
+ friend bool operator==(const SkTDStorage& a, const SkTDStorage& b);
+ friend bool operator!=(const SkTDStorage& a, const SkTDStorage& b) {
+ return !(a == b);
+ }
+
+private:
+ size_t bytes(int n) const { return SkToSizeT(n * fSizeOfT); }
+ void* address(int n) { return fStorage + this->bytes(n); }
+
+ // Adds delta to fSize. Crash if outside [0, INT_MAX]
+ int calculateSizeOrDie(int delta);
+
+ // Move the tail of the array defined by the indexes tailStart and tailEnd to dstIndex. The
+ // elements at dstIndex are overwritten by the tail.
+ void moveTail(int dstIndex, int tailStart, int tailEnd);
+
+ // Copy src into the array at dstIndex.
+ void copySrc(int dstIndex, const void* src, int count);
+
+ const int fSizeOfT;
+ std::byte* fStorage{nullptr};
+ int fCapacity{0}; // size of the allocation in fArray (#elements)
+ int fSize{0}; // logical number of elements (fSize <= fCapacity)
+};
+
+static inline void swap(SkTDStorage& a, SkTDStorage& b) {
+ a.swap(b);
+}
+
+// SkTDArray<T> implements a std::vector-like array for raw data-only objects that do not require
+// construction or destruction. The constructor and destructor for T will not be called; T objects
+// will always be moved via raw memcpy. Newly created T objects will contain uninitialized memory.
+template <typename T> class SkTDArray {
+public:
+ SkTDArray() : fStorage{sizeof(T)} {}
+ SkTDArray(const T src[], int count) : fStorage{src, count, sizeof(T)} { }
+ SkTDArray(const std::initializer_list<T>& list) : SkTDArray(list.begin(), list.size()) {}
+
+ // Copy
+ SkTDArray(const SkTDArray<T>& src) : SkTDArray(src.data(), src.size()) {}
+ SkTDArray<T>& operator=(const SkTDArray<T>& src) {
+ fStorage = src.fStorage;
+ return *this;
+ }
+
+ // Move
+ SkTDArray(SkTDArray<T>&& src) : fStorage{std::move(src.fStorage)} {}
+ SkTDArray<T>& operator=(SkTDArray<T>&& src) {
+ fStorage = std::move(src.fStorage);
+ return *this;
+ }
+
+ friend bool operator==(const SkTDArray<T>& a, const SkTDArray<T>& b) {
+ return a.fStorage == b.fStorage;
+ }
+ friend bool operator!=(const SkTDArray<T>& a, const SkTDArray<T>& b) { return !(a == b); }
+
+ void swap(SkTDArray<T>& that) {
+ using std::swap;
+ swap(fStorage, that.fStorage);
+ }
+
+ bool empty() const { return fStorage.empty(); }
+
+ // Return the number of elements in the array
+ int size() const { return fStorage.size(); }
+
+ // Return the total number of elements allocated.
+ // Note: capacity() - size() gives you the number of elements you can add without causing an
+ // allocation.
+ int capacity() const { return fStorage.capacity(); }
+
+ // return the number of bytes in the array: count * sizeof(T)
+ size_t size_bytes() const { return fStorage.size_bytes(); }
+
+ T* data() { return static_cast<T*>(fStorage.data()); }
+ const T* data() const { return static_cast<const T*>(fStorage.data()); }
+ T* begin() { return this->data(); }
+ const T* begin() const { return this->data(); }
+ T* end() { return this->data() + this->size(); }
+ const T* end() const { return this->data() + this->size(); }
+
+ T& operator[](int index) {
+ SkASSERT(index < this->size());
+ return this->data()[index];
+ }
+ const T& operator[](int index) const {
+ SkASSERT(index < this->size());
+ return this->data()[index];
+ }
+
+ const T& back() const {
+ SkASSERT(this->size() > 0);
+ return this->data()[this->size() - 1];
+ }
+ T& back() {
+ SkASSERT(this->size() > 0);
+ return this->data()[this->size() - 1];
+ }
+
+ void reset() {
+ fStorage.reset();
+ }
+
+ void clear() {
+ fStorage.clear();
+ }
+
+ // Sets the number of elements in the array.
+ // If the array does not have space for count elements, it will increase
+ // the storage allocated to some amount greater than that required.
+ // It will never shrink the storage.
+ void resize(int count) {
+ fStorage.resize(count);
+ }
+
+ void reserve(int n) {
+ fStorage.reserve(n);
+ }
+
+ T* append() {
+ fStorage.append();
+ return this->end() - 1;
+ }
+ T* append(int count) {
+ fStorage.append(count);
+ return this->end() - count;
+ }
+ T* append(int count, const T* src) {
+ return static_cast<T*>(fStorage.append(src, count));
+ }
+
+ T* insert(int index) {
+ return static_cast<T*>(fStorage.insert(index));
+ }
+ T* insert(int index, int count, const T* src = nullptr) {
+ return static_cast<T*>(fStorage.insert(index, count, src));
+ }
+
+ void remove(int index, int count = 1) {
+ fStorage.erase(index, count);
+ }
+
+ void removeShuffle(int index) {
+ fStorage.removeShuffle(index);
+ }
+
+ // routines to treat the array like a stack
+ void push_back(const T& v) {
+ this->append();
+ this->back() = v;
+ }
+ void pop_back() { fStorage.pop_back(); }
+
+ void shrink_to_fit() {
+ fStorage.shrink_to_fit();
+ }
+
+private:
+ SkTDStorage fStorage;
+};
+
+template <typename T> static inline void swap(SkTDArray<T>& a, SkTDArray<T>& b) { a.swap(b); }
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkTFitsIn.h b/gfx/skia/skia/include/private/base/SkTFitsIn.h
new file mode 100644
index 0000000000..365748abef
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkTFitsIn.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTFitsIn_DEFINED
+#define SkTFitsIn_DEFINED
+
+#include "include/private/base/SkDebug.h"
+
+#include <cstdint>
+#include <limits>
+#include <type_traits>
+
+/**
+ * std::underlying_type is only defined for enums. For integral types, we just want the type.
+ */
+template <typename T, class Enable = void>
+struct sk_strip_enum {
+ typedef T type;
+};
+
+template <typename T>
+struct sk_strip_enum<T, typename std::enable_if<std::is_enum<T>::value>::type> {
+ typedef typename std::underlying_type<T>::type type;
+};
+
+
+/**
+ * In C++ an unsigned to signed cast where the source value cannot be represented in the destination
+ * type results in an implementation defined destination value. Unlike C, C++ does not allow a trap.
+ * This makes "(S)(D)s == s" a possibly useful test. However, there are two cases where this is
+ * incorrect:
+ *
+ * when testing if a value of a smaller signed type can be represented in a larger unsigned type
+ * (int8_t)(uint16_t)-1 == -1 => (int8_t)0xFFFF == -1 => [implementation defined] == -1
+ *
+ * when testing if a value of a larger unsigned type can be represented in a smaller signed type
+ * (uint16_t)(int8_t)0xFFFF == 0xFFFF => (uint16_t)-1 == 0xFFFF => 0xFFFF == 0xFFFF => true.
+ *
+ * Consider the cases:
+ * u = unsigned, less digits
+ * U = unsigned, more digits
+ * s = signed, less digits
+ * S = signed, more digits
+ * v is the value we're considering.
+ *
+ * u -> U: (u)(U)v == v, trivially true
+ * U -> u: (U)(u)v == v, both casts well defined, test works
+ * s -> S: (s)(S)v == v, trivially true
+ * S -> s: (S)(s)v == v, first cast implementation value, second cast defined, test works
+ * s -> U: (s)(U)v == v, *this is bad*, the second cast results in implementation defined value
+ * S -> u: (S)(u)v == v, the second cast is required to prevent promotion of rhs to unsigned
+ * u -> S: (u)(S)v == v, trivially true
+ * U -> s: (U)(s)v == v, *this is bad*,
+ * first cast results in implementation defined value,
+ * second cast is defined. However, this creates false positives
+ * uint16_t x = 0xFFFF
+ * (uint16_t)(int8_t)x == x
+ * => (uint16_t)-1 == x
+ * => 0xFFFF == x
+ * => true
+ *
+ * So for the eight cases three are trivially true, three more are valid casts, and two are special.
+ * The two 'full' checks which otherwise require two comparisons are valid cast checks.
+ * The two remaining checks s -> U [v >= 0] and U -> s [v <= max(s)] can be done with one op.
+ */
+
+template <typename D, typename S>
+static constexpr inline
+typename std::enable_if<(std::is_integral<S>::value || std::is_enum<S>::value) &&
+ (std::is_integral<D>::value || std::is_enum<D>::value), bool>::type
+/*bool*/ SkTFitsIn(S src) {
+ // Ensure that is_signed and is_unsigned are passed the arithmetic underlyng types of enums.
+ using Sa = typename sk_strip_enum<S>::type;
+ using Da = typename sk_strip_enum<D>::type;
+
+ // SkTFitsIn() is used in public headers, so needs to be written targeting at most C++11.
+ return
+
+ // E.g. (int8_t)(uint8_t) int8_t(-1) == -1, but the uint8_t == 255, not -1.
+ (std::is_signed<Sa>::value && std::is_unsigned<Da>::value && sizeof(Sa) <= sizeof(Da)) ?
+ (S)0 <= src :
+
+ // E.g. (uint8_t)(int8_t) uint8_t(255) == 255, but the int8_t == -1.
+ (std::is_signed<Da>::value && std::is_unsigned<Sa>::value && sizeof(Da) <= sizeof(Sa)) ?
+ src <= (S)std::numeric_limits<Da>::max() :
+
+#if !defined(SK_DEBUG) && !defined(__MSVC_RUNTIME_CHECKS )
+ // Correct (simple) version. This trips up MSVC's /RTCc run-time checking.
+ (S)(D)src == src;
+#else
+ // More complex version that's safe with /RTCc. Used in all debug builds, for coverage.
+ (std::is_signed<Sa>::value) ?
+ (intmax_t)src >= (intmax_t)std::numeric_limits<Da>::min() &&
+ (intmax_t)src <= (intmax_t)std::numeric_limits<Da>::max() :
+
+ // std::is_unsigned<S> ?
+ (uintmax_t)src <= (uintmax_t)std::numeric_limits<Da>::max();
+#endif
+}
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkTLogic.h b/gfx/skia/skia/include/private/base/SkTLogic.h
new file mode 100644
index 0000000000..26f363c946
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkTLogic.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ *
+ * This header provides some std:: features early in the skstd namespace
+ * and several Skia-specific additions in the sknonstd namespace.
+ */
+
+#ifndef SkTLogic_DEFINED
+#define SkTLogic_DEFINED
+
+#include <iterator>
+#include <type_traits>
+#include "include/private/base/SkTo.h"
+
+// The sknonstd namespace contains things we would like to be proposed and feel std-ish.
+namespace sknonstd {
+
+// The name 'copy' here is fraught with peril. In this case it means 'append', not 'overwrite'.
+// Alternate proposed names are 'propagate', 'augment', or 'append' (and 'add', but already taken).
+// std::experimental::propagate_const already exists for other purposes in TSv2.
+// These also follow the <dest, source> pattern used by boost.
+template <typename D, typename S> struct copy_const {
+ using type = std::conditional_t<std::is_const<S>::value, std::add_const_t<D>, D>;
+};
+template <typename D, typename S> using copy_const_t = typename copy_const<D, S>::type;
+
+template <typename D, typename S> struct copy_volatile {
+ using type = std::conditional_t<std::is_volatile<S>::value, std::add_volatile_t<D>, D>;
+};
+template <typename D, typename S> using copy_volatile_t = typename copy_volatile<D, S>::type;
+
+template <typename D, typename S> struct copy_cv {
+ using type = copy_volatile_t<copy_const_t<D, S>, S>;
+};
+template <typename D, typename S> using copy_cv_t = typename copy_cv<D, S>::type;
+
+// The name 'same' here means 'overwrite'.
+// Alternate proposed names are 'replace', 'transfer', or 'qualify_from'.
+// same_xxx<D, S> can be written as copy_xxx<remove_xxx_t<D>, S>
+template <typename D, typename S> using same_const = copy_const<std::remove_const_t<D>, S>;
+template <typename D, typename S> using same_const_t = typename same_const<D, S>::type;
+template <typename D, typename S> using same_volatile =copy_volatile<std::remove_volatile_t<D>,S>;
+template <typename D, typename S> using same_volatile_t = typename same_volatile<D, S>::type;
+template <typename D, typename S> using same_cv = copy_cv<std::remove_cv_t<D>, S>;
+template <typename D, typename S> using same_cv_t = typename same_cv<D, S>::type;
+
+} // namespace sknonstd
+
+template <typename Container>
+constexpr int SkCount(const Container& c) { return SkTo<int>(std::size(c)); }
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkTPin.h b/gfx/skia/skia/include/private/base/SkTPin.h
new file mode 100644
index 0000000000..c824c44640
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkTPin.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTPin_DEFINED
+#define SkTPin_DEFINED
+
+#include <algorithm>
+
+/** @return x pinned (clamped) between lo and hi, inclusively.
+
+ Unlike std::clamp(), SkTPin() always returns a value between lo and hi.
+ If x is NaN, SkTPin() returns lo but std::clamp() returns NaN.
+*/
+template <typename T>
+static constexpr const T& SkTPin(const T& x, const T& lo, const T& hi) {
+ return std::max(lo, std::min(x, hi));
+}
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkTemplates.h b/gfx/skia/skia/include/private/base/SkTemplates.h
new file mode 100644
index 0000000000..cbcf36c594
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkTemplates.h
@@ -0,0 +1,426 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTemplates_DEFINED
+#define SkTemplates_DEFINED
+
+#include "include/private/base/SkAlign.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkTLogic.h"
+
+#include <array>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+
+/** \file SkTemplates.h
+
+ This file contains light-weight template classes for type-safe and exception-safe
+ resource management.
+*/
+
+/**
+ * Marks a local variable as known to be unused (to avoid warnings).
+ * Note that this does *not* prevent the local variable from being optimized away.
+ */
+template<typename T> inline void sk_ignore_unused_variable(const T&) { }
+
+/**
+ * This is a general purpose absolute-value function.
+ * See SkAbs32 in (SkSafe32.h) for a 32-bit int specific version that asserts.
+ */
+template <typename T> static inline T SkTAbs(T value) {
+ if (value < 0) {
+ value = -value;
+ }
+ return value;
+}
+
+/**
+ * Returns a pointer to a D which comes immediately after S[count].
+ */
+template <typename D, typename S> inline D* SkTAfter(S* ptr, size_t count = 1) {
+ return reinterpret_cast<D*>(ptr + count);
+}
+
+/**
+ * Returns a pointer to a D which comes byteOffset bytes after S.
+ */
+template <typename D, typename S> inline D* SkTAddOffset(S* ptr, ptrdiff_t byteOffset) {
+ // The intermediate char* has the same cv-ness as D as this produces better error messages.
+ // This relies on the fact that reinterpret_cast can add constness, but cannot remove it.
+ return reinterpret_cast<D*>(reinterpret_cast<sknonstd::same_cv_t<char, D>*>(ptr) + byteOffset);
+}
+
+template <typename T, T* P> struct SkOverloadedFunctionObject {
+ template <typename... Args>
+ auto operator()(Args&&... args) const -> decltype(P(std::forward<Args>(args)...)) {
+ return P(std::forward<Args>(args)...);
+ }
+};
+
+template <auto F> using SkFunctionObject =
+ SkOverloadedFunctionObject<std::remove_pointer_t<decltype(F)>, F>;
+
+/** \class SkAutoTCallVProc
+
+ Call a function when this goes out of scope. The template uses two
+ parameters, the object, and a function that is to be called in the destructor.
+ If release() is called, the object reference is set to null. If the object
+ reference is null when the destructor is called, we do not call the
+ function.
+*/
+template <typename T, void (*P)(T*)> class SkAutoTCallVProc
+ : public std::unique_ptr<T, SkFunctionObject<P>> {
+ using inherited = std::unique_ptr<T, SkFunctionObject<P>>;
+public:
+ using inherited::inherited;
+ SkAutoTCallVProc(const SkAutoTCallVProc&) = delete;
+ SkAutoTCallVProc(SkAutoTCallVProc&& that) : inherited(std::move(that)) {}
+
+ operator T*() const { return this->get(); }
+};
+
+
+namespace skia_private {
+/** Allocate an array of T elements, and free the array in the destructor
+ */
+template <typename T> class AutoTArray {
+public:
+ AutoTArray() {}
+ /** Allocate count number of T elements
+ */
+ explicit AutoTArray(int count) {
+ SkASSERT(count >= 0);
+ if (count) {
+ fArray.reset(new T[count]);
+ }
+ SkDEBUGCODE(fCount = count;)
+ }
+
+ AutoTArray(AutoTArray&& other) : fArray(std::move(other.fArray)) {
+ SkDEBUGCODE(fCount = other.fCount; other.fCount = 0;)
+ }
+ AutoTArray& operator=(AutoTArray&& other) {
+ if (this != &other) {
+ fArray = std::move(other.fArray);
+ SkDEBUGCODE(fCount = other.fCount; other.fCount = 0;)
+ }
+ return *this;
+ }
+
+ /** Reallocates given a new count. Reallocation occurs even if new count equals old count.
+ */
+ void reset(int count = 0) { *this = AutoTArray(count); }
+
+ /** Return the array of T elements. Will be NULL if count == 0
+ */
+ T* get() const { return fArray.get(); }
+
+ /** Return the nth element in the array
+ */
+ T& operator[](int index) const {
+ SkASSERT((unsigned)index < (unsigned)fCount);
+ return fArray[index];
+ }
+
+ /** Aliases matching other types, like std::vector. */
+ const T* data() const { return fArray.get(); }
+ T* data() { return fArray.get(); }
+
+private:
+ std::unique_ptr<T[]> fArray;
+ SkDEBUGCODE(int fCount = 0;)
+};
+
+/** Wraps AutoTArray, with room for kCountRequested elements preallocated.
+ */
+template <int kCountRequested, typename T> class AutoSTArray {
+public:
+ AutoSTArray(AutoSTArray&&) = delete;
+ AutoSTArray(const AutoSTArray&) = delete;
+ AutoSTArray& operator=(AutoSTArray&&) = delete;
+ AutoSTArray& operator=(const AutoSTArray&) = delete;
+
+ /** Initialize with no objects */
+ AutoSTArray() {
+ fArray = nullptr;
+ fCount = 0;
+ }
+
+ /** Allocate count number of T elements
+ */
+ AutoSTArray(int count) {
+ fArray = nullptr;
+ fCount = 0;
+ this->reset(count);
+ }
+
+ ~AutoSTArray() {
+ this->reset(0);
+ }
+
+ /** Destroys previous objects in the array and default constructs count number of objects */
+ void reset(int count) {
+ T* start = fArray;
+ T* iter = start + fCount;
+ while (iter > start) {
+ (--iter)->~T();
+ }
+
+ SkASSERT(count >= 0);
+ if (fCount != count) {
+ if (fCount > kCount) {
+ // 'fArray' was allocated last time so free it now
+ SkASSERT((T*) fStorage != fArray);
+ sk_free(fArray);
+ }
+
+ if (count > kCount) {
+ fArray = (T*) sk_malloc_throw(count, sizeof(T));
+ } else if (count > 0) {
+ fArray = (T*) fStorage;
+ } else {
+ fArray = nullptr;
+ }
+
+ fCount = count;
+ }
+
+ iter = fArray;
+ T* stop = fArray + count;
+ while (iter < stop) {
+ new (iter++) T;
+ }
+ }
+
+ /** Return the number of T elements in the array
+ */
+ int count() const { return fCount; }
+
+ /** Return the array of T elements. Will be NULL if count == 0
+ */
+ T* get() const { return fArray; }
+
+ T* begin() { return fArray; }
+
+ const T* begin() const { return fArray; }
+
+ T* end() { return fArray + fCount; }
+
+ const T* end() const { return fArray + fCount; }
+
+ /** Return the nth element in the array
+ */
+ T& operator[](int index) const {
+ SkASSERT(index < fCount);
+ return fArray[index];
+ }
+
+ /** Aliases matching other types, like std::vector. */
+ const T* data() const { return fArray; }
+ T* data() { return fArray; }
+ size_t size() const { return fCount; }
+
+private:
+#if defined(SK_BUILD_FOR_GOOGLE3)
+ // Stack frame size is limited for SK_BUILD_FOR_GOOGLE3. 4k is less than the actual max,
+ // but some functions have multiple large stack allocations.
+ static const int kMaxBytes = 4 * 1024;
+ static const int kCount = kCountRequested * sizeof(T) > kMaxBytes
+ ? kMaxBytes / sizeof(T)
+ : kCountRequested;
+#else
+ static const int kCount = kCountRequested;
+#endif
+
+ int fCount;
+ T* fArray;
+ alignas(T) char fStorage[kCount * sizeof(T)];
+};
+
+/** Manages an array of T elements, freeing the array in the destructor.
+ * Does NOT call any constructors/destructors on T (T must be POD).
+ */
+template <typename T,
+ typename = std::enable_if_t<std::is_trivially_default_constructible<T>::value &&
+ std::is_trivially_destructible<T>::value>>
+class AutoTMalloc {
+public:
+ /** Takes ownership of the ptr. The ptr must be a value which can be passed to sk_free. */
+ explicit AutoTMalloc(T* ptr = nullptr) : fPtr(ptr) {}
+
+ /** Allocates space for 'count' Ts. */
+ explicit AutoTMalloc(size_t count)
+ : fPtr(count ? (T*)sk_malloc_throw(count, sizeof(T)) : nullptr) {}
+
+ AutoTMalloc(AutoTMalloc&&) = default;
+ AutoTMalloc& operator=(AutoTMalloc&&) = default;
+
+ /** Resize the memory area pointed to by the current ptr preserving contents. */
+ void realloc(size_t count) {
+ fPtr.reset(count ? (T*)sk_realloc_throw(fPtr.release(), count * sizeof(T)) : nullptr);
+ }
+
+ /** Resize the memory area pointed to by the current ptr without preserving contents. */
+ T* reset(size_t count = 0) {
+ fPtr.reset(count ? (T*)sk_malloc_throw(count, sizeof(T)) : nullptr);
+ return this->get();
+ }
+
+ T* get() const { return fPtr.get(); }
+
+ operator T*() { return fPtr.get(); }
+
+ operator const T*() const { return fPtr.get(); }
+
+ T& operator[](int index) { return fPtr.get()[index]; }
+
+ const T& operator[](int index) const { return fPtr.get()[index]; }
+
+ /** Aliases matching other types, like std::vector. */
+ const T* data() const { return fPtr.get(); }
+ T* data() { return fPtr.get(); }
+
+ /**
+ * Transfer ownership of the ptr to the caller, setting the internal
+ * pointer to NULL. Note that this differs from get(), which also returns
+ * the pointer, but it does not transfer ownership.
+ */
+ T* release() { return fPtr.release(); }
+
+private:
+ std::unique_ptr<T, SkOverloadedFunctionObject<void(void*), sk_free>> fPtr;
+};
+
+template <size_t kCountRequested,
+ typename T,
+ typename = std::enable_if_t<std::is_trivially_default_constructible<T>::value &&
+ std::is_trivially_destructible<T>::value>>
+class AutoSTMalloc {
+public:
+ AutoSTMalloc() : fPtr(fTStorage) {}
+
+ AutoSTMalloc(size_t count) {
+ if (count > kCount) {
+ fPtr = (T*)sk_malloc_throw(count, sizeof(T));
+ } else if (count) {
+ fPtr = fTStorage;
+ } else {
+ fPtr = nullptr;
+ }
+ }
+
+ AutoSTMalloc(AutoSTMalloc&&) = delete;
+ AutoSTMalloc(const AutoSTMalloc&) = delete;
+ AutoSTMalloc& operator=(AutoSTMalloc&&) = delete;
+ AutoSTMalloc& operator=(const AutoSTMalloc&) = delete;
+
+ ~AutoSTMalloc() {
+ if (fPtr != fTStorage) {
+ sk_free(fPtr);
+ }
+ }
+
+ // doesn't preserve contents
+ T* reset(size_t count) {
+ if (fPtr != fTStorage) {
+ sk_free(fPtr);
+ }
+ if (count > kCount) {
+ fPtr = (T*)sk_malloc_throw(count, sizeof(T));
+ } else if (count) {
+ fPtr = fTStorage;
+ } else {
+ fPtr = nullptr;
+ }
+ return fPtr;
+ }
+
+ T* get() const { return fPtr; }
+
+ operator T*() {
+ return fPtr;
+ }
+
+ operator const T*() const {
+ return fPtr;
+ }
+
+ T& operator[](int index) {
+ return fPtr[index];
+ }
+
+ const T& operator[](int index) const {
+ return fPtr[index];
+ }
+
+ /** Aliases matching other types, like std::vector. */
+ const T* data() const { return fPtr; }
+ T* data() { return fPtr; }
+
+ // Reallocs the array, can be used to shrink the allocation. Makes no attempt to be intelligent
+ void realloc(size_t count) {
+ if (count > kCount) {
+ if (fPtr == fTStorage) {
+ fPtr = (T*)sk_malloc_throw(count, sizeof(T));
+ memcpy((void*)fPtr, fTStorage, kCount * sizeof(T));
+ } else {
+ fPtr = (T*)sk_realloc_throw(fPtr, count, sizeof(T));
+ }
+ } else if (count) {
+ if (fPtr != fTStorage) {
+ fPtr = (T*)sk_realloc_throw(fPtr, count, sizeof(T));
+ }
+ } else {
+ this->reset(0);
+ }
+ }
+
+private:
+ // Since we use uint32_t storage, we might be able to get more elements for free.
+ static const size_t kCountWithPadding = SkAlign4(kCountRequested*sizeof(T)) / sizeof(T);
+#if defined(SK_BUILD_FOR_GOOGLE3)
+ // Stack frame size is limited for SK_BUILD_FOR_GOOGLE3. 4k is less than the actual max, but some functions
+ // have multiple large stack allocations.
+ static const size_t kMaxBytes = 4 * 1024;
+ static const size_t kCount = kCountRequested * sizeof(T) > kMaxBytes
+ ? kMaxBytes / sizeof(T)
+ : kCountWithPadding;
+#else
+ static const size_t kCount = kCountWithPadding;
+#endif
+
+ T* fPtr;
+ union {
+ uint32_t fStorage32[SkAlign4(kCount*sizeof(T)) >> 2];
+ T fTStorage[1]; // do NOT want to invoke T::T()
+ };
+};
+
+using UniqueVoidPtr = std::unique_ptr<void, SkOverloadedFunctionObject<void(void*), sk_free>>;
+
+} // namespace skia_private
+
+template<typename C, std::size_t... Is>
+constexpr auto SkMakeArrayFromIndexSequence(C c, std::index_sequence<Is...> is)
+-> std::array<decltype(c(std::declval<typename decltype(is)::value_type>())), sizeof...(Is)> {
+ return {{ c(Is)... }};
+}
+
+template<size_t N, typename C> constexpr auto SkMakeArray(C c)
+-> std::array<decltype(c(std::declval<typename std::index_sequence<N>::value_type>())), N> {
+ return SkMakeArrayFromIndexSequence(c, std::make_index_sequence<N>{});
+}
+
+#endif
diff --git a/gfx/skia/skia/include/private/base/SkThreadAnnotations.h b/gfx/skia/skia/include/private/base/SkThreadAnnotations.h
new file mode 100644
index 0000000000..fc2a4aacee
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkThreadAnnotations.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkThreadAnnotations_DEFINED
+#define SkThreadAnnotations_DEFINED
+
+// The bulk of this code is cribbed from:
+// http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+
+#if defined(__clang__) && (!defined(SWIG))
+#define SK_THREAD_ANNOTATION_ATTRIBUTE(x) __attribute__((x))
+#else
+#define SK_THREAD_ANNOTATION_ATTRIBUTE(x) // no-op
+#endif
+
+#define SK_CAPABILITY(x) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(capability(x))
+
+#define SK_SCOPED_CAPABILITY \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(scoped_lockable)
+
+#define SK_GUARDED_BY(x) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(guarded_by(x))
+
+#define SK_PT_GUARDED_BY(x) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(pt_guarded_by(x))
+
+#define SK_ACQUIRED_BEFORE(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(acquired_before(__VA_ARGS__))
+
+#define SK_ACQUIRED_AFTER(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(acquired_after(__VA_ARGS__))
+
+#define SK_REQUIRES(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(requires_capability(__VA_ARGS__))
+
+#define SK_REQUIRES_SHARED(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(requires_shared_capability(__VA_ARGS__))
+
+#define SK_ACQUIRE(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(acquire_capability(__VA_ARGS__))
+
+#define SK_ACQUIRE_SHARED(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(acquire_shared_capability(__VA_ARGS__))
+
+// Would be SK_RELEASE, but that is already in use as SK_DEBUG vs. SK_RELEASE.
+#define SK_RELEASE_CAPABILITY(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(release_capability(__VA_ARGS__))
+
+// For symmetry with SK_RELEASE_CAPABILITY.
+#define SK_RELEASE_SHARED_CAPABILITY(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(release_shared_capability(__VA_ARGS__))
+
+#define SK_TRY_ACQUIRE(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(try_acquire_capability(__VA_ARGS__))
+
+#define SK_TRY_ACQUIRE_SHARED(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(try_acquire_shared_capability(__VA_ARGS__))
+
+#define SK_EXCLUDES(...) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(locks_excluded(__VA_ARGS__))
+
+#define SK_ASSERT_CAPABILITY(x) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(assert_capability(x))
+
+#define SK_ASSERT_SHARED_CAPABILITY(x) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(assert_shared_capability(x))
+
+#define SK_RETURN_CAPABILITY(x) \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(lock_returned(x))
+
+#define SK_NO_THREAD_SAFETY_ANALYSIS \
+ SK_THREAD_ANNOTATION_ATTRIBUTE(no_thread_safety_analysis)
+
+#if defined(SK_BUILD_FOR_GOOGLE3) && !defined(SK_BUILD_FOR_WASM_IN_GOOGLE3)
+ extern "C" {
+ void __google_cxa_guard_acquire_begin(void);
+ void __google_cxa_guard_acquire_end (void);
+ }
+ #define SK_POTENTIALLY_BLOCKING_REGION_BEGIN __google_cxa_guard_acquire_begin()
+ #define SK_POTENTIALLY_BLOCKING_REGION_END __google_cxa_guard_acquire_end()
+#else
+ #define SK_POTENTIALLY_BLOCKING_REGION_BEGIN
+ #define SK_POTENTIALLY_BLOCKING_REGION_END
+#endif
+
+#endif // SkThreadAnnotations_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkThreadID.h b/gfx/skia/skia/include/private/base/SkThreadID.h
new file mode 100644
index 0000000000..18984884c9
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkThreadID.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkThreadID_DEFINED
+#define SkThreadID_DEFINED
+
+#include "include/private/base/SkAPI.h"
+#include "include/private/base/SkDebug.h"
+
+#include <cstdint>
+
+typedef int64_t SkThreadID;
+
+// SkMutex.h uses SkGetThreadID in debug only code.
+SkDEBUGCODE(SK_SPI) SkThreadID SkGetThreadID();
+
+const SkThreadID kIllegalThreadID = 0;
+
+#endif // SkThreadID_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkTo.h b/gfx/skia/skia/include/private/base/SkTo.h
new file mode 100644
index 0000000000..51ccafeeaf
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkTo.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkTo_DEFINED
+#define SkTo_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkTFitsIn.h"
+
+#include <cstddef>
+#include <cstdint>
+
+template <typename D, typename S> constexpr D SkTo(S s) {
+ return SkASSERT(SkTFitsIn<D>(s)),
+ static_cast<D>(s);
+}
+
+template <typename S> constexpr int8_t SkToS8(S x) { return SkTo<int8_t>(x); }
+template <typename S> constexpr uint8_t SkToU8(S x) { return SkTo<uint8_t>(x); }
+template <typename S> constexpr int16_t SkToS16(S x) { return SkTo<int16_t>(x); }
+template <typename S> constexpr uint16_t SkToU16(S x) { return SkTo<uint16_t>(x); }
+template <typename S> constexpr int32_t SkToS32(S x) { return SkTo<int32_t>(x); }
+template <typename S> constexpr uint32_t SkToU32(S x) { return SkTo<uint32_t>(x); }
+template <typename S> constexpr int64_t SkToS64(S x) { return SkTo<int64_t>(x); }
+template <typename S> constexpr uint64_t SkToU64(S x) { return SkTo<uint64_t>(x); }
+template <typename S> constexpr int SkToInt(S x) { return SkTo<int>(x); }
+template <typename S> constexpr unsigned SkToUInt(S x) { return SkTo<unsigned>(x); }
+template <typename S> constexpr size_t SkToSizeT(S x) { return SkTo<size_t>(x); }
+
+/** @return false or true based on the condition
+*/
+template <typename T> static constexpr bool SkToBool(const T& x) {
+ return (bool)x;
+}
+
+#endif // SkTo_DEFINED
diff --git a/gfx/skia/skia/include/private/base/SkTypeTraits.h b/gfx/skia/skia/include/private/base/SkTypeTraits.h
new file mode 100644
index 0000000000..736f789776
--- /dev/null
+++ b/gfx/skia/skia/include/private/base/SkTypeTraits.h
@@ -0,0 +1,33 @@
+// Copyright 2022 Google LLC
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+
+#ifndef SkTypeTraits_DEFINED
+#define SkTypeTraits_DEFINED
+
+#include <memory>
+#include <type_traits>
+
+// Trait for identifying types which are relocatable via memcpy, for container optimizations.
+template<typename, typename = void>
+struct sk_has_trivially_relocatable_member : std::false_type {};
+
+// Types can declare themselves trivially relocatable with a public
+// using sk_is_trivially_relocatable = std::true_type;
+template<typename T>
+struct sk_has_trivially_relocatable_member<T, std::void_t<typename T::sk_is_trivially_relocatable>>
+ : T::sk_is_trivially_relocatable {};
+
+// By default, all trivially copyable types are trivially relocatable.
+template <typename T>
+struct sk_is_trivially_relocatable
+ : std::disjunction<std::is_trivially_copyable<T>, sk_has_trivially_relocatable_member<T>>{};
+
+// Here be some dragons: while technically not guaranteed, we count on all sane unique_ptr
+// implementations to be trivially relocatable.
+template <typename T>
+struct sk_is_trivially_relocatable<std::unique_ptr<T>> : std::true_type {};
+
+template <typename T>
+inline constexpr bool sk_is_trivially_relocatable_v = sk_is_trivially_relocatable<T>::value;
+
+#endif // SkTypeTraits_DEFINED
diff --git a/gfx/skia/skia/include/private/chromium/GrSlug.h b/gfx/skia/skia/include/private/chromium/GrSlug.h
new file mode 100644
index 0000000000..56841c5b99
--- /dev/null
+++ b/gfx/skia/skia/include/private/chromium/GrSlug.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrSlug_DEFINED
+#define GrSlug_DEFINED
+
+#include "include/private/chromium/Slug.h"
+
+// TODO: Update Chrome to use sktext::gpu classes and remove these
+using GrSlug = sktext::gpu::Slug;
+
+#endif // GrSlug_DEFINED
diff --git a/gfx/skia/skia/include/private/chromium/GrVkSecondaryCBDrawContext.h b/gfx/skia/skia/include/private/chromium/GrVkSecondaryCBDrawContext.h
new file mode 100644
index 0000000000..51ed8a804d
--- /dev/null
+++ b/gfx/skia/skia/include/private/chromium/GrVkSecondaryCBDrawContext.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkSecondaryCBDrawContext_DEFINED
+#define GrVkSecondaryCBDrawContext_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSurfaceProps.h"
+#include "include/core/SkTypes.h"
+
+#include <memory>
+
+class GrBackendSemaphore;
+class GrRecordingContext;
+struct GrVkDrawableInfo;
+namespace skgpu::ganesh {
+class Device;
+}
+class SkCanvas;
+class SkDeferredDisplayList;
+struct SkImageInfo;
+class SkSurfaceCharacterization;
+class SkSurfaceProps;
+
+/**
+ * This class is a private header that is intended to only be used inside of Chromium. This requires
+ * Chromium to burrow in and include this specifically since it is not part of skia's public include
+ * directory.
+ */
+
+/**
+ * This class is used to draw into an external Vulkan secondary command buffer that is imported
+ * by the client. The secondary command buffer that gets imported must already have had begin called
+ * on it with VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT. Thus any draws to the imported
+ * command buffer cannot require changing the render pass. This requirement means that certain types
+ * of draws will not be supported when using a GrVkSecondaryCBDrawContext. This includes:
+ * Draws that require a dst copy for blending will be dropped
+ * Text draws will be dropped (these may require intermediate uploads of text data)
+ * Read and Write pixels will not work
+ * Any other draw that requires a copy will fail (this includes using backdrop filter with save
+ * layer).
+ * Stenciling is also disabled, but that should not restrict any actual draws from working.
+ *
+ * While using a GrVkSecondaryCBDrawContext, the client can also draw into normal SkSurfaces and
+ * then draw those SkSufaces (as SkImages) into the GrVkSecondaryCBDrawContext. If any of the
+ * previously mentioned unsupported draws are needed by the client, they can draw them into an
+ * offscreen surface, and then draw that into the GrVkSecondaryCBDrawContext.
+ *
+ * After all drawing to the GrVkSecondaryCBDrawContext has been done, the client must call flush()
+ * on the GrVkSecondaryCBDrawContext to actually fill in the secondary VkCommandBuffer with the
+ * draws.
+ *
+ * Additionally, the client must keep the GrVkSecondaryCBDrawContext alive until the secondary
+ * VkCommandBuffer has been submitted and all work finished on the GPU. Before deleting the
+ * GrVkSecondaryCBDrawContext, the client must call releaseResources() so that Skia can cleanup
+ * any internal objects that were created for the draws into the secondary command buffer.
+ */
+class SK_SPI GrVkSecondaryCBDrawContext : public SkRefCnt {
+public:
+ static sk_sp<GrVkSecondaryCBDrawContext> Make(GrRecordingContext*,
+ const SkImageInfo&,
+ const GrVkDrawableInfo&,
+ const SkSurfaceProps* props);
+
+ ~GrVkSecondaryCBDrawContext() override;
+
+ SkCanvas* getCanvas();
+
+ // Records all the draws to the imported secondary command buffer and sets any dependent
+ // offscreen draws to the GPU.
+ void flush();
+
+ /** Inserts a list of GPU semaphores that Skia will have the driver wait on before executing
+ commands for this secondary CB. The wait semaphores will get added to the VkCommandBuffer
+ owned by this GrContext when flush() is called, and not the command buffer which the
+ Secondary CB is from. This will guarantee that the driver waits on the semaphores before
+ the secondary command buffer gets executed. If this call returns false, then the GPU
+ back end will not wait on any passed in semaphores, and the client will still own the
+ semaphores, regardless of the value of deleteSemaphoresAfterWait.
+
+ If deleteSemaphoresAfterWait is false then Skia will not delete the semaphores. In this case
+ it is the client's responsibility to not destroy or attempt to reuse the semaphores until it
+ knows that Skia has finished waiting on them. This can be done by using finishedProcs
+ on flush calls.
+
+ @param numSemaphores size of waitSemaphores array
+ @param waitSemaphores array of semaphore containers
+ @paramm deleteSemaphoresAfterWait who owns and should delete the semaphores
+ @return true if GPU is waiting on semaphores
+ */
+ bool wait(int numSemaphores,
+ const GrBackendSemaphore waitSemaphores[],
+ bool deleteSemaphoresAfterWait = true);
+
+ // This call will release all resources held by the draw context. The client must call
+ // releaseResources() before deleting the drawing context. However, the resources also include
+ // any Vulkan resources that were created and used for draws. Therefore the client must only
+ // call releaseResources() after submitting the secondary command buffer, and waiting for it to
+ // finish on the GPU. If it is called earlier then some vulkan objects may be deleted while they
+ // are still in use by the GPU.
+ void releaseResources();
+
+ const SkSurfaceProps& props() const { return fProps; }
+
+ // TODO: Fill out these calls to support DDL
+ bool characterize(SkSurfaceCharacterization* characterization) const;
+
+#ifndef SK_DDL_IS_UNIQUE_POINTER
+ bool draw(sk_sp<const SkDeferredDisplayList> deferredDisplayList);
+#else
+ bool draw(const SkDeferredDisplayList* deferredDisplayList);
+#endif
+
+ bool isCompatible(const SkSurfaceCharacterization& characterization) const;
+
+private:
+ explicit GrVkSecondaryCBDrawContext(sk_sp<skgpu::ganesh::Device>, const SkSurfaceProps*);
+
+ sk_sp<skgpu::ganesh::Device> fDevice;
+ std::unique_ptr<SkCanvas> fCachedCanvas;
+ const SkSurfaceProps fProps;
+
+ using INHERITED = SkRefCnt;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/chromium/SkChromeRemoteGlyphCache.h b/gfx/skia/skia/include/private/chromium/SkChromeRemoteGlyphCache.h
new file mode 100644
index 0000000000..962d183b2d
--- /dev/null
+++ b/gfx/skia/skia/include/private/chromium/SkChromeRemoteGlyphCache.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkChromeRemoteGlyphCache_DEFINED
+#define SkChromeRemoteGlyphCache_DEFINED
+
+#include <memory>
+#include <vector>
+
+#include "include/core/SkData.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypeface.h"
+#include "include/utils/SkNoDrawCanvas.h"
+
+struct SkPackedGlyphID;
+class SkAutoDescriptor;
+class SkStrikeCache;
+class SkStrikeClientImpl;
+class SkStrikeServer;
+class SkStrikeServerImpl;
+namespace sktext::gpu { class Slug; }
+
+using SkDiscardableHandleId = uint32_t;
+// This class is not thread-safe.
+class SkStrikeServer {
+public:
+ // An interface used by the server to create handles for pinning SkStrike
+ // entries on the remote client.
+ class DiscardableHandleManager {
+ public:
+ SK_SPI virtual ~DiscardableHandleManager() = default;
+
+ // Creates a new *locked* handle and returns a unique ID that can be used to identify
+ // it on the remote client.
+ SK_SPI virtual SkDiscardableHandleId createHandle() = 0;
+
+ // Returns true if the handle could be successfully locked. The server can
+ // assume it will remain locked until the next set of serialized entries is
+ // pulled from the SkStrikeServer.
+ // If returns false, the cache entry mapped to the handle has been deleted
+ // on the client. Any subsequent attempts to lock the same handle are not
+ // allowed.
+ SK_SPI virtual bool lockHandle(SkDiscardableHandleId) = 0;
+
+ // Returns true if a handle has been deleted on the remote client. It is
+ // invalid to use a handle id again with this manager once this returns true.
+ SK_SPI virtual bool isHandleDeleted(SkDiscardableHandleId) = 0;
+ };
+
+ SK_SPI explicit SkStrikeServer(DiscardableHandleManager* discardableHandleManager);
+ SK_SPI ~SkStrikeServer();
+
+ // Create an analysis SkCanvas used to populate the SkStrikeServer with ops
+ // which will be serialized and rendered using the SkStrikeClient.
+ SK_API std::unique_ptr<SkCanvas> makeAnalysisCanvas(int width, int height,
+ const SkSurfaceProps& props,
+ sk_sp<SkColorSpace> colorSpace,
+ bool DFTSupport,
+ bool DFTPerspSupport = true);
+
+ // Serializes the strike data captured using a canvas returned by ::makeAnalysisCanvas. Any
+ // handles locked using the DiscardableHandleManager will be assumed to be
+ // unlocked after this call.
+ SK_SPI void writeStrikeData(std::vector<uint8_t>* memory);
+
+ // Testing helpers
+ void setMaxEntriesInDescriptorMapForTesting(size_t count);
+ size_t remoteStrikeMapSizeForTesting() const;
+
+private:
+ SkStrikeServerImpl* impl();
+
+ std::unique_ptr<SkStrikeServerImpl> fImpl;
+};
+
+class SkStrikeClient {
+public:
+ // This enum is used in histogram reporting in chromium. Please don't re-order the list of
+ // entries, and consider it to be append-only.
+ enum CacheMissType : uint32_t {
+ // Hard failures where no fallback could be found.
+ kFontMetrics = 0,
+ kGlyphMetrics = 1,
+ kGlyphImage = 2,
+ kGlyphPath = 3,
+
+ // (DEPRECATED) The original glyph could not be found and a fallback was used.
+ kGlyphMetricsFallback = 4,
+ kGlyphPathFallback = 5,
+
+ kGlyphDrawable = 6,
+ kLast = kGlyphDrawable
+ };
+
+ // An interface to delete handles that may be pinned by the remote server.
+ class DiscardableHandleManager : public SkRefCnt {
+ public:
+ ~DiscardableHandleManager() override = default;
+
+ // Returns true if the handle was unlocked and can be safely deleted. Once
+ // successful, subsequent attempts to delete the same handle are invalid.
+ virtual bool deleteHandle(SkDiscardableHandleId) = 0;
+
+ virtual void assertHandleValid(SkDiscardableHandleId) {}
+
+ virtual void notifyCacheMiss(CacheMissType type, int fontSize) = 0;
+
+ struct ReadFailureData {
+ size_t memorySize;
+ size_t bytesRead;
+ uint64_t typefaceSize;
+ uint64_t strikeCount;
+ uint64_t glyphImagesCount;
+ uint64_t glyphPathsCount;
+ };
+ virtual void notifyReadFailure(const ReadFailureData& data) {}
+ };
+
+ SK_SPI explicit SkStrikeClient(sk_sp<DiscardableHandleManager>,
+ bool isLogging = true,
+ SkStrikeCache* strikeCache = nullptr);
+ SK_SPI ~SkStrikeClient();
+
+ // Deserializes the strike data from a SkStrikeServer. All messages generated
+ // from a server when serializing the ops must be deserialized before the op
+ // is rasterized.
+ // Returns false if the data is invalid.
+ SK_SPI bool readStrikeData(const volatile void* memory, size_t memorySize);
+
+ // Given a descriptor re-write the Rec mapping the typefaceID from the renderer to the
+ // corresponding typefaceID on the GPU.
+ SK_SPI bool translateTypefaceID(SkAutoDescriptor* descriptor) const;
+
+ // Testing helpers
+ sk_sp<SkTypeface> retrieveTypefaceUsingServerIDForTest(SkTypefaceID) const;
+
+ // Given a buffer, unflatten into a slug making sure to do the typefaceID translation from
+ // renderer to GPU. Returns nullptr if there was a problem.
+ sk_sp<sktext::gpu::Slug> deserializeSlugForTest(const void* data, size_t size) const;
+
+private:
+ std::unique_ptr<SkStrikeClientImpl> fImpl;
+};
+#endif // SkChromeRemoteGlyphCache_DEFINED
diff --git a/gfx/skia/skia/include/private/chromium/SkDiscardableMemory.h b/gfx/skia/skia/include/private/chromium/SkDiscardableMemory.h
new file mode 100644
index 0000000000..ade4d71aa7
--- /dev/null
+++ b/gfx/skia/skia/include/private/chromium/SkDiscardableMemory.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDiscardableMemory_DEFINED
+#define SkDiscardableMemory_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+/**
+ * Interface for discardable memory. Implementation is provided by the
+ * embedder.
+ */
+class SK_SPI SkDiscardableMemory {
+public:
+ /**
+ * Factory method that creates, initializes and locks an SkDiscardableMemory
+ * object. If either of these steps fails, a nullptr pointer will be returned.
+ */
+ static SkDiscardableMemory* Create(size_t bytes);
+
+ /**
+ * Factory class that creates, initializes and locks an SkDiscardableMemory
+ * object. If either of these steps fails, a nullptr pointer will be returned.
+ */
+ class Factory : public SkRefCnt {
+ public:
+ virtual SkDiscardableMemory* create(size_t bytes) = 0;
+ private:
+ using INHERITED = SkRefCnt;
+ };
+
+ /** Must not be called while locked.
+ */
+ virtual ~SkDiscardableMemory() {}
+
+ /**
+ * Locks the memory, prevent it from being discarded. Once locked. you may
+ * obtain a pointer to that memory using the data() method.
+ *
+ * lock() may return false, indicating that the underlying memory was
+ * discarded and that the lock failed.
+ *
+ * Nested calls to lock are not allowed.
+ */
+ virtual bool SK_WARN_UNUSED_RESULT lock() = 0;
+
+ /**
+ * Returns the current pointer for the discardable memory. This call is ONLY
+ * valid when the discardable memory object is locked.
+ */
+ virtual void* data() = 0;
+
+ /**
+ * Unlock the memory so that it can be purged by the system. Must be called
+ * after every successful lock call.
+ */
+ virtual void unlock() = 0;
+
+protected:
+ SkDiscardableMemory() = default;
+ SkDiscardableMemory(const SkDiscardableMemory&) = delete;
+ SkDiscardableMemory& operator=(const SkDiscardableMemory&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/chromium/Slug.h b/gfx/skia/skia/include/private/chromium/Slug.h
new file mode 100644
index 0000000000..6775af0fc6
--- /dev/null
+++ b/gfx/skia/skia/include/private/chromium/Slug.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef sktext_gpu_Slug_DEFINED
+#define sktext_gpu_Slug_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+
+class SkCanvas;
+class SkMatrix;
+class SkPaint;
+class SkTextBlob;
+class SkReadBuffer;
+class SkStrikeClient;
+class SkWriteBuffer;
+
+namespace sktext::gpu {
+// Slug encapsulates an SkTextBlob at a specific origin, using a specific paint. It can be
+// manipulated using matrix and clip changes to the canvas. If the canvas is transformed, then
+// the Slug will also transform with smaller glyphs using bi-linear interpolation to render. You
+// can think of a Slug as making a rubber stamp out of a SkTextBlob.
+class SK_API Slug : public SkRefCnt {
+public:
+ // Return nullptr if the blob would not draw. This is not because of clipping, but because of
+ // some paint optimization. The Slug is captured as if drawn using drawTextBlob.
+ static sk_sp<Slug> ConvertBlob(
+ SkCanvas* canvas, const SkTextBlob& blob, SkPoint origin, const SkPaint& paint);
+
+ // Serialize the slug.
+ sk_sp<SkData> serialize() const;
+ size_t serialize(void* buffer, size_t size) const;
+
+ // Set the client parameter to the appropriate SkStrikeClient when typeface ID translation
+ // is needed.
+ static sk_sp<Slug> Deserialize(
+ const void* data, size_t size, const SkStrikeClient* client = nullptr);
+ static sk_sp<Slug> MakeFromBuffer(SkReadBuffer& buffer);
+
+
+ // Draw the Slug obeying the canvas's mapping and clipping.
+ void draw(SkCanvas* canvas) const;
+
+ virtual SkRect sourceBounds() const = 0;
+ virtual SkRect sourceBoundsWithOrigin () const = 0;
+
+ // The paint passed into ConvertBlob; this paint is used instead of the paint resulting from
+ // the call to aboutToDraw because when we call draw(), the initial paint is needed to call
+ // aboutToDraw again to get the layer right.
+ virtual const SkPaint& initialPaint() const = 0;
+
+ virtual void doFlatten(SkWriteBuffer&) const = 0;
+
+ uint32_t uniqueID() const { return fUniqueID; }
+
+private:
+ static uint32_t NextUniqueID();
+ const uint32_t fUniqueID{NextUniqueID()};
+};
+} // namespace sktext::gpu
+
+#endif // sktext_gpu_Slug_DEFINED
diff --git a/gfx/skia/skia/include/private/gpu/ganesh/GrContext_Base.h b/gfx/skia/skia/include/private/gpu/ganesh/GrContext_Base.h
new file mode 100644
index 0000000000..ba7172e005
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/ganesh/GrContext_Base.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrContext_Base_DEFINED
+#define GrContext_Base_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrContextOptions.h"
+#include "include/gpu/GrTypes.h"
+
+class GrBaseContextPriv;
+class GrCaps;
+class GrContextThreadSafeProxy;
+class GrDirectContext;
+class GrImageContext;
+class GrRecordingContext;
+enum class SkTextureCompressionType;
+
+class GrContext_Base : public SkRefCnt {
+public:
+ ~GrContext_Base() override;
+
+ /*
+ * Safely downcast to a GrDirectContext.
+ */
+ virtual GrDirectContext* asDirectContext() { return nullptr; }
+
+ /*
+ * The 3D API backing this context
+ */
+ SK_API GrBackendApi backend() const;
+
+ /*
+ * Retrieve the default GrBackendFormat for a given SkColorType and renderability.
+ * It is guaranteed that this backend format will be the one used by the GrContext
+ * SkColorType and SkSurfaceCharacterization-based createBackendTexture methods.
+ *
+ * The caller should check that the returned format is valid.
+ */
+ SK_API GrBackendFormat defaultBackendFormat(SkColorType, GrRenderable) const;
+
+ SK_API GrBackendFormat compressedBackendFormat(SkTextureCompressionType) const;
+
+ /**
+ * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA
+ * rendering is supported for the color type. 0 is returned if rendering to this color type
+ * is not supported at all.
+ */
+ SK_API int maxSurfaceSampleCountForColorType(SkColorType colorType) const;
+
+ // TODO: When the public version is gone, rename to refThreadSafeProxy and add raw ptr ver.
+ sk_sp<GrContextThreadSafeProxy> threadSafeProxy();
+
+ // Provides access to functions that aren't part of the public API.
+ GrBaseContextPriv priv();
+ const GrBaseContextPriv priv() const; // NOLINT(readability-const-return-type)
+
+protected:
+ friend class GrBaseContextPriv; // for hidden functions
+
+ GrContext_Base(sk_sp<GrContextThreadSafeProxy>);
+
+ virtual bool init();
+
+ /**
+ * An identifier for this context. The id is used by all compatible contexts. For example,
+ * if SkImages are created on one thread using an image creation context, then fed into a
+ * DDL Recorder on second thread (which has a recording context) and finally replayed on
+ * a third thread with a direct context, then all three contexts will report the same id.
+ * It is an error for an image to be used with contexts that report different ids.
+ */
+ uint32_t contextID() const;
+
+ bool matches(GrContext_Base* candidate) const {
+ return candidate && candidate->contextID() == this->contextID();
+ }
+
+ /*
+ * The options in effect for this context
+ */
+ const GrContextOptions& options() const;
+
+ const GrCaps* caps() const;
+ sk_sp<const GrCaps> refCaps() const;
+
+ virtual GrImageContext* asImageContext() { return nullptr; }
+ virtual GrRecordingContext* asRecordingContext() { return nullptr; }
+
+ sk_sp<GrContextThreadSafeProxy> fThreadSafeProxy;
+
+private:
+ using INHERITED = SkRefCnt;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/gpu/ganesh/GrD3DTypesMinimal.h b/gfx/skia/skia/include/private/gpu/ganesh/GrD3DTypesMinimal.h
new file mode 100644
index 0000000000..26b7534476
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/ganesh/GrD3DTypesMinimal.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrD3DTypesMinimal_DEFINED
+#define GrD3DTypesMinimal_DEFINED
+
+// Minimal definitions of Direct3D types, without including d3d12.h
+
+#include "include/core/SkRefCnt.h"
+
+#include <dxgiformat.h>
+
+#include "include/gpu/GrTypes.h"
+
+struct ID3D12Resource;
+class GrD3DResourceState;
+typedef int GrD3DResourceStateEnum;
+struct GrD3DSurfaceInfo;
+struct GrD3DTextureResourceInfo;
+struct GrD3DTextureResourceSpec;
+struct GrD3DFenceInfo;
+
+// This struct is to used to store the the actual information about the Direct3D backend image on
+// GrBackendTexture and GrBackendRenderTarget. When a client calls getD3DTextureInfo on a
+// GrBackendTexture/RenderTarget, we use the GrD3DBackendSurfaceInfo to create a snapshot
+// GrD3DTextureResourceInfo object. Internally, this uses a ref count GrD3DResourceState object to
+// track the current D3D12_RESOURCE_STATES which can be shared with an internal GrD3DTextureResource
+// so that state updates can be seen by all users of the texture.
+struct GrD3DBackendSurfaceInfo {
+ GrD3DBackendSurfaceInfo(const GrD3DTextureResourceInfo& info, GrD3DResourceState* state);
+
+ void cleanup();
+
+ GrD3DBackendSurfaceInfo& operator=(const GrD3DBackendSurfaceInfo&) = delete;
+
+ // Assigns the passed in GrD3DBackendSurfaceInfo to this object. if isValid is true we will also
+ // attempt to unref the old fLayout on this object.
+ void assign(const GrD3DBackendSurfaceInfo&, bool isValid);
+
+ void setResourceState(GrD3DResourceStateEnum state);
+
+ sk_sp<GrD3DResourceState> getGrD3DResourceState() const;
+
+ GrD3DTextureResourceInfo snapTextureResourceInfo() const;
+
+ bool isProtected() const;
+#if GR_TEST_UTILS
+ bool operator==(const GrD3DBackendSurfaceInfo& that) const;
+#endif
+
+private:
+ GrD3DTextureResourceInfo* fTextureResourceInfo;
+ GrD3DResourceState* fResourceState;
+};
+
+struct GrD3DTextureResourceSpecHolder {
+public:
+ GrD3DTextureResourceSpecHolder(const GrD3DSurfaceInfo&);
+
+ void cleanup();
+
+ GrD3DSurfaceInfo getSurfaceInfo(uint32_t sampleCount,
+ uint32_t levelCount,
+ skgpu::Protected isProtected) const;
+
+private:
+ GrD3DTextureResourceSpec* fSpec;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/gpu/ganesh/GrDawnTypesPriv.h b/gfx/skia/skia/include/private/gpu/ganesh/GrDawnTypesPriv.h
new file mode 100644
index 0000000000..ffcdc0eaaf
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/ganesh/GrDawnTypesPriv.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDawnTypesPriv_DEFINED
+#define GrDawnTypesPriv_DEFINED
+
+#include "include/gpu/dawn/GrDawnTypes.h"
+
+struct GrDawnTextureSpec {
+ GrDawnTextureSpec() {}
+ GrDawnTextureSpec(const GrDawnSurfaceInfo& info) : fFormat(info.fFormat) {}
+
+ wgpu::TextureFormat fFormat;
+};
+
+GrDawnSurfaceInfo GrDawnTextureSpecToSurfaceInfo(const GrDawnTextureSpec& dawnSpec,
+ uint32_t sampleCount,
+ uint32_t levelCount,
+ skgpu::Protected isProtected);
+
+#endif
+
diff --git a/gfx/skia/skia/include/private/gpu/ganesh/GrGLTypesPriv.h b/gfx/skia/skia/include/private/gpu/ganesh/GrGLTypesPriv.h
new file mode 100644
index 0000000000..7db777487a
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/ganesh/GrGLTypesPriv.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/gl/GrGLTypes.h"
+
+#ifndef GrGLTypesPriv_DEFINED
+#define GrGLTypesPriv_DEFINED
+
+static constexpr int kGrGLColorFormatCount = static_cast<int>(GrGLFormat::kLastColorFormat) + 1;
+
+class GrGLTextureParameters : public SkNVRefCnt<GrGLTextureParameters> {
+public:
+ // We currently consider texture parameters invalid on all textures
+ // GrContext::resetContext(). We use this type to track whether instances of
+ // GrGLTextureParameters were updated before or after the most recent resetContext(). At 10
+ // resets / frame and 60fps a 64bit timestamp will overflow in about a billion years.
+ // TODO: Require clients to use GrBackendTexture::glTextureParametersModified() to invalidate
+ // texture parameters and get rid of timestamp checking.
+ using ResetTimestamp = uint64_t;
+
+ // This initializes the params to have an expired timestamp. They'll be considered invalid the
+ // first time the texture is used unless set() is called.
+ GrGLTextureParameters() = default;
+
+ // This is texture parameter state that is overridden when a non-zero sampler object is bound.
+ struct SamplerOverriddenState {
+ SamplerOverriddenState();
+ void invalidate();
+
+ GrGLenum fMinFilter;
+ GrGLenum fMagFilter;
+ GrGLenum fWrapS;
+ GrGLenum fWrapT;
+ GrGLfloat fMinLOD;
+ GrGLfloat fMaxLOD;
+ GrGLfloat fMaxAniso;
+ // We always want the border color to be transparent black, so no need to store 4 floats.
+ // Just track if it's been invalidated and no longer the default
+ bool fBorderColorInvalid;
+ };
+
+ // Texture parameter state that is not overridden by a bound sampler object.
+ struct NonsamplerState {
+ NonsamplerState();
+ void invalidate();
+
+ GrGLint fBaseMipMapLevel;
+ GrGLint fMaxMipmapLevel;
+ bool fSwizzleIsRGBA;
+ };
+
+ void invalidate();
+
+ ResetTimestamp resetTimestamp() const { return fResetTimestamp; }
+ const SamplerOverriddenState& samplerOverriddenState() const { return fSamplerOverriddenState; }
+ const NonsamplerState& nonsamplerState() const { return fNonsamplerState; }
+
+ // SamplerOverriddenState is optional because we don't track it when we're using sampler
+ // objects.
+ void set(const SamplerOverriddenState* samplerState,
+ const NonsamplerState& nonsamplerState,
+ ResetTimestamp currTimestamp);
+
+private:
+ static constexpr ResetTimestamp kExpiredTimestamp = 0;
+
+ SamplerOverriddenState fSamplerOverriddenState;
+ NonsamplerState fNonsamplerState;
+ ResetTimestamp fResetTimestamp = kExpiredTimestamp;
+};
+
+class GrGLBackendTextureInfo {
+public:
+ GrGLBackendTextureInfo(const GrGLTextureInfo& info, GrGLTextureParameters* params)
+ : fInfo(info), fParams(params) {}
+ GrGLBackendTextureInfo(const GrGLBackendTextureInfo&) = delete;
+ GrGLBackendTextureInfo& operator=(const GrGLBackendTextureInfo&) = delete;
+ const GrGLTextureInfo& info() const { return fInfo; }
+ GrGLTextureParameters* parameters() const { return fParams; }
+ sk_sp<GrGLTextureParameters> refParameters() const { return sk_ref_sp(fParams); }
+
+ void cleanup();
+ void assign(const GrGLBackendTextureInfo&, bool thisIsValid);
+
+private:
+ GrGLTextureInfo fInfo;
+ GrGLTextureParameters* fParams;
+};
+
+struct GrGLTextureSpec {
+ GrGLTextureSpec() : fTarget(0), fFormat(0) {}
+ GrGLTextureSpec(const GrGLSurfaceInfo& info) : fTarget(info.fTarget), fFormat(info.fFormat) {}
+
+ GrGLenum fTarget;
+ GrGLenum fFormat;
+};
+
+GrGLSurfaceInfo GrGLTextureSpecToSurfaceInfo(const GrGLTextureSpec& glSpec,
+ uint32_t sampleCount,
+ uint32_t levelCount,
+ skgpu::Protected isProtected);
+
+#endif
diff --git a/gfx/skia/skia/include/private/gpu/ganesh/GrImageContext.h b/gfx/skia/skia/include/private/gpu/ganesh/GrImageContext.h
new file mode 100644
index 0000000000..72fdd4433d
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/ganesh/GrImageContext.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrImageContext_DEFINED
+#define GrImageContext_DEFINED
+
+#include "include/private/base/SingleOwner.h"
+#include "include/private/gpu/ganesh/GrContext_Base.h"
+
+class GrImageContextPriv;
+
+// This is now just a view on a ThreadSafeProxy, that SkImages can attempt to
+// downcast to a GrDirectContext as a backdoor to some operations. Once we remove the backdoors,
+// this goes away and SkImages just hold ThreadSafeProxies.
+class GrImageContext : public GrContext_Base {
+public:
+ ~GrImageContext() override;
+
+ // Provides access to functions that aren't part of the public API.
+ GrImageContextPriv priv();
+ const GrImageContextPriv priv() const; // NOLINT(readability-const-return-type)
+
+protected:
+ friend class GrImageContextPriv; // for hidden functions
+
+ GrImageContext(sk_sp<GrContextThreadSafeProxy>);
+
+ SK_API virtual void abandonContext();
+ SK_API virtual bool abandoned();
+
+ /** This is only useful for debug purposes */
+ skgpu::SingleOwner* singleOwner() const { return &fSingleOwner; }
+
+ GrImageContext* asImageContext() override { return this; }
+
+private:
+ // When making promise images, we currently need a placeholder GrImageContext instance to give
+ // to the SkImage that has no real power, just a wrapper around the ThreadSafeProxy.
+ // TODO: De-power SkImage to ThreadSafeProxy or at least figure out a way to share one instance.
+ static sk_sp<GrImageContext> MakeForPromiseImage(sk_sp<GrContextThreadSafeProxy>);
+
+ // In debug builds we guard against improper thread handling
+ // This guard is passed to the GrDrawingManager and, from there to all the
+ // GrSurfaceDrawContexts. It is also passed to the GrResourceProvider and SkGpuDevice.
+ // TODO: Move this down to GrRecordingContext.
+ mutable skgpu::SingleOwner fSingleOwner;
+
+ using INHERITED = GrContext_Base;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/private/gpu/ganesh/GrMockTypesPriv.h b/gfx/skia/skia/include/private/gpu/ganesh/GrMockTypesPriv.h
new file mode 100644
index 0000000000..59a608dcfc
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/ganesh/GrMockTypesPriv.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMockTypesPriv_DEFINED
+#define GrMockTypesPriv_DEFINED
+
+#include "include/core/SkTextureCompressionType.h"
+#include "include/gpu/mock/GrMockTypes.h"
+
+struct GrMockTextureSpec {
+ GrMockTextureSpec()
+ : fColorType(GrColorType::kUnknown)
+ , fCompressionType(SkTextureCompressionType::kNone) {}
+ GrMockTextureSpec(const GrMockSurfaceInfo& info)
+ : fColorType(info.fColorType)
+ , fCompressionType(info.fCompressionType) {}
+
+ GrColorType fColorType = GrColorType::kUnknown;
+ SkTextureCompressionType fCompressionType = SkTextureCompressionType::kNone;
+};
+
+GrMockSurfaceInfo GrMockTextureSpecToSurfaceInfo(const GrMockTextureSpec& mockSpec,
+ uint32_t sampleCount,
+ uint32_t levelCount,
+ GrProtected isProtected);
+
+#endif
+
diff --git a/gfx/skia/skia/include/private/gpu/ganesh/GrMtlTypesPriv.h b/gfx/skia/skia/include/private/gpu/ganesh/GrMtlTypesPriv.h
new file mode 100644
index 0000000000..ef65848b5e
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/ganesh/GrMtlTypesPriv.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMtlTypesPriv_DEFINED
+#define GrMtlTypesPriv_DEFINED
+
+#include "include/gpu/GrTypes.h"
+#include "include/gpu/mtl/GrMtlTypes.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef __APPLE__
+
+#include <TargetConditionals.h>
+
+#if defined(SK_BUILD_FOR_MAC)
+#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 110000
+#define GR_METAL_SDK_VERSION 230
+#elif __MAC_OS_X_VERSION_MAX_ALLOWED >= 101500
+#define GR_METAL_SDK_VERSION 220
+#elif __MAC_OS_X_VERSION_MAX_ALLOWED >= 101400
+#define GR_METAL_SDK_VERSION 210
+#else
+#error Must use at least 10.14 SDK to build Metal backend for MacOS
+#endif
+#else
+#if __IPHONE_OS_VERSION_MAX_ALLOWED >= 140000 || __TV_OS_VERSION_MAX_ALLOWED >= 140000
+#define GR_METAL_SDK_VERSION 230
+#elif __IPHONE_OS_VERSION_MAX_ALLOWED >= 130000 || __TV_OS_VERSION_MAX_ALLOWED >= 130000
+#define GR_METAL_SDK_VERSION 220
+#elif __IPHONE_OS_VERSION_MAX_ALLOWED >= 120000 || __TV_OS_VERSION_MAX_ALLOWED >= 120000
+#define GR_METAL_SDK_VERSION 210
+#else
+#error Must use at least 12.00 SDK to build Metal backend for iOS
+#endif
+#endif
+
+#if __has_feature(objc_arc) && __has_attribute(objc_externally_retained)
+#define GR_NORETAIN __attribute__((objc_externally_retained))
+#define GR_NORETAIN_BEGIN \
+ _Pragma("clang attribute push (__attribute__((objc_externally_retained)), apply_to=any(function,objc_method))")
+#define GR_NORETAIN_END _Pragma("clang attribute pop")
+#else
+#define GR_NORETAIN
+#define GR_NORETAIN_BEGIN
+#define GR_NORETAIN_END
+#endif
+
+struct GrMtlTextureSpec {
+ GrMtlTextureSpec()
+ : fFormat(0)
+ , fUsage(0)
+ , fStorageMode(0) {}
+ GrMtlTextureSpec(const GrMtlSurfaceInfo& info)
+ : fFormat(info.fFormat)
+ , fUsage(info.fUsage)
+ , fStorageMode(info.fStorageMode) {}
+
+ GrMTLPixelFormat fFormat;
+ GrMTLTextureUsage fUsage;
+ GrMTLStorageMode fStorageMode;
+};
+
+GrMtlSurfaceInfo GrMtlTextureSpecToSurfaceInfo(const GrMtlTextureSpec& mtlSpec,
+ uint32_t sampleCount,
+ uint32_t levelCount,
+ skgpu::Protected isProtected);
+
+#endif // __APPLE__
+
+#endif // GrMtlTypesPriv_DEFINED
diff --git a/gfx/skia/skia/include/private/gpu/ganesh/GrTypesPriv.h b/gfx/skia/skia/include/private/gpu/ganesh/GrTypesPriv.h
new file mode 100644
index 0000000000..fb8688de0d
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/ganesh/GrTypesPriv.h
@@ -0,0 +1,1042 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTypesPriv_DEFINED
+#define GrTypesPriv_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTextureCompressionType.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/base/SkMacros.h"
+#include "include/private/base/SkTypeTraits.h"
+
+#include <chrono>
+#include <functional>
+
+class GrBackendFormat;
+class GrCaps;
+class GrSurfaceProxy;
+
+#ifdef MOZ_SKIA
+#include "mozilla/TimeStamp.h"
+
+struct GrStdSteadyClock
+{
+ typedef mozilla::TimeStamp time_point;
+
+ static time_point now() {
+ return mozilla::TimeStamp::NowLoRes();
+ }
+};
+
+static inline GrStdSteadyClock::time_point
+operator-(GrStdSteadyClock::time_point t, std::chrono::milliseconds ms) {
+ return t - mozilla::TimeDuration::FromMilliseconds(ms.count());
+}
+
+#else
+
+// The old libstdc++ uses the draft name "monotonic_clock" rather than "steady_clock". This might
+// not actually be monotonic, depending on how libstdc++ was built. However, this is only currently
+// used for idle resource purging so it shouldn't cause a correctness problem.
+#if defined(__GLIBCXX__) && (__GLIBCXX__ < 20130000)
+using GrStdSteadyClock = std::chrono::monotonic_clock;
+#else
+using GrStdSteadyClock = std::chrono::steady_clock;
+#endif
+
+#endif
+
+/**
+ * divide, rounding up
+ */
+
+static inline constexpr size_t GrSizeDivRoundUp(size_t x, size_t y) { return (x + (y - 1)) / y; }
+
+/**
+ * Geometric primitives used for drawing.
+ */
+enum class GrPrimitiveType : uint8_t {
+ kTriangles,
+ kTriangleStrip,
+ kPoints,
+ kLines, // 1 pix wide only
+ kLineStrip, // 1 pix wide only
+};
+static constexpr int kNumGrPrimitiveTypes = (int)GrPrimitiveType::kLineStrip + 1;
+
+static constexpr bool GrIsPrimTypeLines(GrPrimitiveType type) {
+ return GrPrimitiveType::kLines == type || GrPrimitiveType::kLineStrip == type;
+}
+
+enum class GrPrimitiveRestart : bool {
+ kNo = false,
+ kYes = true
+};
+
+/**
+ * Should a created surface be texturable?
+ */
+enum class GrTexturable : bool {
+ kNo = false,
+ kYes = true
+};
+
+// A DDL recorder has its own proxy provider and proxy cache. This enum indicates if
+// a given proxy provider is one of these special ones.
+enum class GrDDLProvider : bool {
+ kNo = false,
+ kYes = true
+};
+
+/** Ownership rules for external GPU resources imported into Skia. */
+enum GrWrapOwnership {
+ /** Skia will assume the client will keep the resource alive and Skia will not free it. */
+ kBorrow_GrWrapOwnership,
+
+ /** Skia will assume ownership of the resource and free it. */
+ kAdopt_GrWrapOwnership,
+};
+
+enum class GrWrapCacheable : bool {
+ /**
+ * The wrapped resource will be removed from the cache as soon as it becomes purgeable. It may
+ * still be assigned and found by a unique key, but the presence of the key will not be used to
+ * keep the resource alive when it has no references.
+ */
+ kNo = false,
+ /**
+ * The wrapped resource is allowed to remain in the GrResourceCache when it has no references
+ * but has a unique key. Such resources should only be given unique keys when it is known that
+ * the key will eventually be removed from the resource or invalidated via the message bus.
+ */
+ kYes = true
+};
+
+enum class GrBudgetedType : uint8_t {
+ /** The resource is budgeted and is subject to purging under budget pressure. */
+ kBudgeted,
+ /**
+ * The resource is unbudgeted and is purged as soon as it has no refs regardless of whether
+ * it has a unique or scratch key.
+ */
+ kUnbudgetedUncacheable,
+ /**
+ * The resource is unbudgeted and is allowed to remain in the cache with no refs if it
+ * has a unique key. Scratch keys are ignored.
+ */
+ kUnbudgetedCacheable,
+};
+
+enum class GrScissorTest : bool {
+ kDisabled = false,
+ kEnabled = true
+};
+
+/*
+ * Used to say whether texture is backed by memory.
+ */
+enum class GrMemoryless : bool {
+ /**
+ * The texture will be allocated normally and will affect memory budgets.
+ */
+ kNo = false,
+ /**
+ * The texture will be not use GPU memory and will not affect memory budgets.
+ */
+ kYes = true
+};
+
+struct GrMipLevel {
+ const void* fPixels = nullptr;
+ size_t fRowBytes = 0;
+ // This may be used to keep fPixels from being freed while a GrMipLevel exists.
+ sk_sp<SkData> fOptionalStorage;
+
+ static_assert(::sk_is_trivially_relocatable<decltype(fPixels)>::value);
+ static_assert(::sk_is_trivially_relocatable<decltype(fOptionalStorage)>::value);
+
+ using sk_is_trivially_relocatable = std::true_type;
+};
+
+enum class GrSemaphoreWrapType {
+ kWillSignal,
+ kWillWait,
+};
+
+/**
+ * This enum is used to specify the load operation to be used when an OpsTask/GrOpsRenderPass
+ * begins execution.
+ */
+enum class GrLoadOp {
+ kLoad,
+ kClear,
+ kDiscard,
+};
+
+/**
+ * This enum is used to specify the store operation to be used when an OpsTask/GrOpsRenderPass
+ * ends execution.
+ */
+enum class GrStoreOp {
+ kStore,
+ kDiscard,
+};
+
+/**
+ * Used to control antialiasing in draw calls.
+ */
+enum class GrAA : bool {
+ kNo = false,
+ kYes = true
+};
+
+enum class GrFillRule : bool {
+ kNonzero,
+ kEvenOdd
+};
+
+inline GrFillRule GrFillRuleForPathFillType(SkPathFillType fillType) {
+ switch (fillType) {
+ case SkPathFillType::kWinding:
+ case SkPathFillType::kInverseWinding:
+ return GrFillRule::kNonzero;
+ case SkPathFillType::kEvenOdd:
+ case SkPathFillType::kInverseEvenOdd:
+ return GrFillRule::kEvenOdd;
+ }
+ SkUNREACHABLE;
+}
+
+inline GrFillRule GrFillRuleForSkPath(const SkPath& path) {
+ return GrFillRuleForPathFillType(path.getFillType());
+}
+
+/** This enum indicates the type of antialiasing to be performed. */
+enum class GrAAType : unsigned {
+ /** No antialiasing */
+ kNone,
+ /** Use fragment shader code to blend with a fractional pixel coverage. */
+ kCoverage,
+ /** Use normal MSAA. */
+ kMSAA,
+
+ kLast = kMSAA
+};
+static const int kGrAATypeCount = static_cast<int>(GrAAType::kLast) + 1;
+
+static constexpr bool GrAATypeIsHW(GrAAType type) {
+ switch (type) {
+ case GrAAType::kNone:
+ return false;
+ case GrAAType::kCoverage:
+ return false;
+ case GrAAType::kMSAA:
+ return true;
+ }
+ SkUNREACHABLE;
+}
+
+/**
+ * Some pixel configs are inherently clamped to [0,1], some are allowed to go outside that range,
+ * and some are FP but manually clamped in the XP.
+ */
+enum class GrClampType {
+ kAuto, // Normalized, fixed-point configs
+ kManual, // Clamped FP configs
+ kNone, // Normal (unclamped) FP configs
+};
+
+/**
+ * A number of rectangle/quadrilateral drawing APIs can control anti-aliasing on a per edge basis.
+ * These masks specify which edges are AA'ed. The intent for this is to support tiling with seamless
+ * boundaries, where the inner edges are non-AA and the outer edges are AA. Regular rectangle draws
+ * simply use kAll or kNone depending on if they want anti-aliasing or not.
+ *
+ * In APIs that support per-edge AA, GrQuadAAFlags is the only AA-control parameter that is
+ * provided (compared to the typical GrAA parameter). kNone is equivalent to GrAA::kNo, and any
+ * other set of edge flags would require GrAA::kYes (with rendering output dependent on how that
+ * maps to GrAAType for a given SurfaceDrawContext).
+ *
+ * These values are identical to SkCanvas::QuadAAFlags.
+ */
+enum class GrQuadAAFlags {
+ kLeft = 0b0001,
+ kTop = 0b0010,
+ kRight = 0b0100,
+ kBottom = 0b1000,
+
+ kNone = 0b0000,
+ kAll = 0b1111,
+};
+
+GR_MAKE_BITFIELD_CLASS_OPS(GrQuadAAFlags)
+
+static inline GrQuadAAFlags SkToGrQuadAAFlags(unsigned flags) {
+ return static_cast<GrQuadAAFlags>(flags);
+}
+
+/**
+ * The type of texture. Backends other than GL currently only use the 2D value but the type must
+ * still be known at the API-neutral layer as it used to determine whether MIP maps, renderability,
+ * and sampling parameters are legal for proxies that will be instantiated with wrapped textures.
+ */
+enum class GrTextureType {
+ kNone,
+ k2D,
+ /* Rectangle uses unnormalized texture coordinates. */
+ kRectangle,
+ kExternal
+};
+
+enum GrShaderType {
+ kVertex_GrShaderType,
+ kFragment_GrShaderType,
+
+ kLastkFragment_GrShaderType = kFragment_GrShaderType
+};
+static const int kGrShaderTypeCount = kLastkFragment_GrShaderType + 1;
+
+enum GrShaderFlags {
+ kNone_GrShaderFlags = 0,
+ kVertex_GrShaderFlag = 1 << 0,
+ kFragment_GrShaderFlag = 1 << 1
+};
+SK_MAKE_BITFIELD_OPS(GrShaderFlags)
+
+/** Rectangle and external textures only support the clamp wrap mode and do not support
+ * MIP maps.
+ */
+static inline bool GrTextureTypeHasRestrictedSampling(GrTextureType type) {
+ switch (type) {
+ case GrTextureType::k2D:
+ return false;
+ case GrTextureType::kRectangle:
+ return true;
+ case GrTextureType::kExternal:
+ return true;
+ default:
+ SK_ABORT("Unexpected texture type");
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Types used to describe format of vertices in arrays.
+ */
+enum GrVertexAttribType {
+ kFloat_GrVertexAttribType = 0,
+ kFloat2_GrVertexAttribType,
+ kFloat3_GrVertexAttribType,
+ kFloat4_GrVertexAttribType,
+ kHalf_GrVertexAttribType,
+ kHalf2_GrVertexAttribType,
+ kHalf4_GrVertexAttribType,
+
+ kInt2_GrVertexAttribType, // vector of 2 32-bit ints
+ kInt3_GrVertexAttribType, // vector of 3 32-bit ints
+ kInt4_GrVertexAttribType, // vector of 4 32-bit ints
+
+
+ kByte_GrVertexAttribType, // signed byte
+ kByte2_GrVertexAttribType, // vector of 2 8-bit signed bytes
+ kByte4_GrVertexAttribType, // vector of 4 8-bit signed bytes
+ kUByte_GrVertexAttribType, // unsigned byte
+ kUByte2_GrVertexAttribType, // vector of 2 8-bit unsigned bytes
+ kUByte4_GrVertexAttribType, // vector of 4 8-bit unsigned bytes
+
+ kUByte_norm_GrVertexAttribType, // unsigned byte, e.g. coverage, 0 -> 0.0f, 255 -> 1.0f.
+ kUByte4_norm_GrVertexAttribType, // vector of 4 unsigned bytes, e.g. colors, 0 -> 0.0f,
+ // 255 -> 1.0f.
+
+ kShort2_GrVertexAttribType, // vector of 2 16-bit shorts.
+ kShort4_GrVertexAttribType, // vector of 4 16-bit shorts.
+
+ kUShort2_GrVertexAttribType, // vector of 2 unsigned shorts. 0 -> 0, 65535 -> 65535.
+ kUShort2_norm_GrVertexAttribType, // vector of 2 unsigned shorts. 0 -> 0.0f, 65535 -> 1.0f.
+
+ kInt_GrVertexAttribType,
+ kUInt_GrVertexAttribType,
+
+ kUShort_norm_GrVertexAttribType,
+
+ kUShort4_norm_GrVertexAttribType, // vector of 4 unsigned shorts. 0 -> 0.0f, 65535 -> 1.0f.
+
+ kLast_GrVertexAttribType = kUShort4_norm_GrVertexAttribType
+};
+static const int kGrVertexAttribTypeCount = kLast_GrVertexAttribType + 1;
+
+//////////////////////////////////////////////////////////////////////////////
+
+/**
+ * We have coverage effects that clip rendering to the edge of some geometric primitive.
+ * This enum specifies how that clipping is performed. Not all factories that take a
+ * GrClipEdgeType will succeed with all values and it is up to the caller to verify success.
+ */
+enum class GrClipEdgeType {
+ kFillBW,
+ kFillAA,
+ kInverseFillBW,
+ kInverseFillAA,
+
+ kLast = kInverseFillAA
+};
+static const int kGrClipEdgeTypeCnt = (int) GrClipEdgeType::kLast + 1;
+
+static constexpr bool GrClipEdgeTypeIsFill(const GrClipEdgeType edgeType) {
+ return (GrClipEdgeType::kFillAA == edgeType || GrClipEdgeType::kFillBW == edgeType);
+}
+
+static constexpr bool GrClipEdgeTypeIsInverseFill(const GrClipEdgeType edgeType) {
+ return (GrClipEdgeType::kInverseFillAA == edgeType ||
+ GrClipEdgeType::kInverseFillBW == edgeType);
+}
+
+static constexpr bool GrClipEdgeTypeIsAA(const GrClipEdgeType edgeType) {
+ return (GrClipEdgeType::kFillBW != edgeType &&
+ GrClipEdgeType::kInverseFillBW != edgeType);
+}
+
+static inline GrClipEdgeType GrInvertClipEdgeType(const GrClipEdgeType edgeType) {
+ switch (edgeType) {
+ case GrClipEdgeType::kFillBW:
+ return GrClipEdgeType::kInverseFillBW;
+ case GrClipEdgeType::kFillAA:
+ return GrClipEdgeType::kInverseFillAA;
+ case GrClipEdgeType::kInverseFillBW:
+ return GrClipEdgeType::kFillBW;
+ case GrClipEdgeType::kInverseFillAA:
+ return GrClipEdgeType::kFillAA;
+ }
+ SkUNREACHABLE;
+}
+
+/**
+ * Indicates the type of pending IO operations that can be recorded for gpu resources.
+ */
+enum GrIOType {
+ kRead_GrIOType,
+ kWrite_GrIOType,
+ kRW_GrIOType
+};
+
+/**
+ * Indicates the type of data that a GPU buffer will be used for.
+ */
+enum class GrGpuBufferType {
+ kVertex,
+ kIndex,
+ kDrawIndirect,
+ kXferCpuToGpu,
+ kXferGpuToCpu,
+ kUniform,
+};
+static const constexpr int kGrGpuBufferTypeCount = static_cast<int>(GrGpuBufferType::kUniform) + 1;
+
+/**
+ * Provides a performance hint regarding the frequency at which a data store will be accessed.
+ */
+enum GrAccessPattern {
+ /** Data store will be respecified repeatedly and used many times. */
+ kDynamic_GrAccessPattern,
+ /** Data store will be specified once and used many times. (Thus disqualified from caching.) */
+ kStatic_GrAccessPattern,
+ /** Data store will be specified once and used at most a few times. (Also can't be cached.) */
+ kStream_GrAccessPattern,
+
+ kLast_GrAccessPattern = kStream_GrAccessPattern
+};
+
+// Flags shared between the GrSurface & GrSurfaceProxy class hierarchies
+enum class GrInternalSurfaceFlags {
+ kNone = 0,
+
+ // Texture-level
+
+ // Means the pixels in the texture are read-only. Cannot also be a GrRenderTarget[Proxy].
+ kReadOnly = 1 << 0,
+
+ // RT-level
+
+ // This flag is for use with GL only. It tells us that the internal render target wraps FBO 0.
+ kGLRTFBOIDIs0 = 1 << 1,
+
+ // This means the render target is multisampled, and internally holds a non-msaa texture for
+ // resolving into. The render target resolves itself by blitting into this internal texture.
+ // (asTexture() might or might not return the internal texture, but if it does, we always
+ // resolve the render target before accessing this texture's data.)
+ kRequiresManualMSAAResolve = 1 << 2,
+
+ // This means the pixels in the render target are write-only. This is used for Dawn and Metal
+ // swap chain targets which can be rendered to, but not read or copied.
+ kFramebufferOnly = 1 << 3,
+
+ // This is a Vulkan only flag. If set the surface can be used as an input attachment in a
+ // shader. This is used for doing in shader blending where we want to sample from the same
+ // image we are drawing to.
+ kVkRTSupportsInputAttachment = 1 << 4,
+};
+
+GR_MAKE_BITFIELD_CLASS_OPS(GrInternalSurfaceFlags)
+
+// 'GR_MAKE_BITFIELD_CLASS_OPS' defines the & operator on GrInternalSurfaceFlags to return bool.
+// We want to find the bitwise & with these masks, so we declare them as ints.
+constexpr static int kGrInternalTextureFlagsMask = static_cast<int>(
+ GrInternalSurfaceFlags::kReadOnly);
+
+// We don't include kVkRTSupportsInputAttachment in this mask since we check it manually. We don't
+// require that both the surface and proxy have matching values for this flag. Instead we require
+// if the proxy has it set then the surface must also have it set. All other flags listed here must
+// match on the proxy and surface.
+// TODO: Add back kFramebufferOnly flag here once we update SkSurfaceCharacterization to take it
+// as a flag. skbug.com/10672
+constexpr static int kGrInternalRenderTargetFlagsMask = static_cast<int>(
+ GrInternalSurfaceFlags::kGLRTFBOIDIs0 |
+ GrInternalSurfaceFlags::kRequiresManualMSAAResolve/* |
+ GrInternalSurfaceFlags::kFramebufferOnly*/);
+
+constexpr static int kGrInternalTextureRenderTargetFlagsMask =
+ kGrInternalTextureFlagsMask | kGrInternalRenderTargetFlagsMask;
+
+#ifdef SK_DEBUG
+// Takes a pointer to a GrCaps, and will suppress prints if required
+#define GrCapsDebugf(caps, ...) if (!(caps)->suppressPrints()) SkDebugf(__VA_ARGS__)
+#else
+#define GrCapsDebugf(caps, ...) do {} while (0)
+#endif
+
+/**
+ * Specifies if the holder owns the backend, OpenGL or Vulkan, object.
+ */
+enum class GrBackendObjectOwnership : bool {
+ /** Holder does not destroy the backend object. */
+ kBorrowed = false,
+ /** Holder destroys the backend object. */
+ kOwned = true
+};
+
+/*
+ * Object for CPU-GPU synchronization
+ */
+typedef uint64_t GrFence;
+
+/**
+ * Used to include or exclude specific GPU path renderers for testing purposes.
+ */
+enum class GpuPathRenderers {
+ kNone = 0, // Always use software masks and/or DefaultPathRenderer.
+ kDashLine = 1 << 0,
+ kAtlas = 1 << 1,
+ kTessellation = 1 << 2,
+ kCoverageCounting = 1 << 3,
+ kAAHairline = 1 << 4,
+ kAAConvex = 1 << 5,
+ kAALinearizing = 1 << 6,
+ kSmall = 1 << 7,
+ kTriangulating = 1 << 8,
+ kDefault = ((1 << 9) - 1) // All path renderers.
+};
+
+/**
+ * Used to describe the current state of Mips on a GrTexture
+ */
+enum class GrMipmapStatus {
+ kNotAllocated, // Mips have not been allocated
+ kDirty, // Mips are allocated but the full mip tree does not have valid data
+ kValid, // All levels fully allocated and have valid data in them
+};
+
+GR_MAKE_BITFIELD_CLASS_OPS(GpuPathRenderers)
+
+/**
+ * Like SkColorType this describes a layout of pixel data in CPU memory. It specifies the channels,
+ * their type, and width. This exists so that the GPU backend can have private types that have no
+ * analog in the public facing SkColorType enum and omit types not implemented in the GPU backend.
+ * It does not refer to a texture format and the mapping to texture formats may be many-to-many.
+ * It does not specify the sRGB encoding of the stored values. The components are listed in order of
+ * where they appear in memory. In other words the first component listed is in the low bits and
+ * the last component in the high bits.
+ */
+enum class GrColorType {
+ kUnknown,
+ kAlpha_8,
+ kBGR_565,
+ kABGR_4444, // This name differs from SkColorType. kARGB_4444_SkColorType is misnamed.
+ kRGBA_8888,
+ kRGBA_8888_SRGB,
+ kRGB_888x,
+ kRG_88,
+ kBGRA_8888,
+ kRGBA_1010102,
+ kBGRA_1010102,
+ kGray_8,
+ kGrayAlpha_88,
+ kAlpha_F16,
+ kRGBA_F16,
+ kRGBA_F16_Clamped,
+ kRGBA_F32,
+
+ kAlpha_16,
+ kRG_1616,
+ kRG_F16,
+ kRGBA_16161616,
+
+ // Unusual types that come up after reading back in cases where we are reassigning the meaning
+ // of a texture format's channels to use for a particular color format but have to read back the
+ // data to a full RGBA quadruple. (e.g. using a R8 texture format as A8 color type but the API
+ // only supports reading to RGBA8.) None of these have SkColorType equivalents.
+ kAlpha_8xxx,
+ kAlpha_F32xxx,
+ kGray_8xxx,
+ kR_8xxx,
+
+ // Types used to initialize backend textures.
+ kRGB_888,
+ kR_8,
+ kR_16,
+ kR_F16,
+ kGray_F16,
+ kBGRA_4444,
+ kARGB_4444,
+
+ kLast = kARGB_4444
+};
+
+static const int kGrColorTypeCnt = static_cast<int>(GrColorType::kLast) + 1;
+
+static constexpr SkColorType GrColorTypeToSkColorType(GrColorType ct) {
+ switch (ct) {
+ case GrColorType::kUnknown: return kUnknown_SkColorType;
+ case GrColorType::kAlpha_8: return kAlpha_8_SkColorType;
+ case GrColorType::kBGR_565: return kRGB_565_SkColorType;
+ case GrColorType::kABGR_4444: return kARGB_4444_SkColorType;
+ case GrColorType::kRGBA_8888: return kRGBA_8888_SkColorType;
+ case GrColorType::kRGBA_8888_SRGB: return kSRGBA_8888_SkColorType;
+ case GrColorType::kRGB_888x: return kRGB_888x_SkColorType;
+ case GrColorType::kRG_88: return kR8G8_unorm_SkColorType;
+ case GrColorType::kBGRA_8888: return kBGRA_8888_SkColorType;
+ case GrColorType::kRGBA_1010102: return kRGBA_1010102_SkColorType;
+ case GrColorType::kBGRA_1010102: return kBGRA_1010102_SkColorType;
+ case GrColorType::kGray_8: return kGray_8_SkColorType;
+ case GrColorType::kGrayAlpha_88: return kUnknown_SkColorType;
+ case GrColorType::kAlpha_F16: return kA16_float_SkColorType;
+ case GrColorType::kRGBA_F16: return kRGBA_F16_SkColorType;
+ case GrColorType::kRGBA_F16_Clamped: return kRGBA_F16Norm_SkColorType;
+ case GrColorType::kRGBA_F32: return kRGBA_F32_SkColorType;
+ case GrColorType::kAlpha_8xxx: return kUnknown_SkColorType;
+ case GrColorType::kAlpha_F32xxx: return kUnknown_SkColorType;
+ case GrColorType::kGray_8xxx: return kUnknown_SkColorType;
+ case GrColorType::kR_8xxx: return kUnknown_SkColorType;
+ case GrColorType::kAlpha_16: return kA16_unorm_SkColorType;
+ case GrColorType::kRG_1616: return kR16G16_unorm_SkColorType;
+ case GrColorType::kRGBA_16161616: return kR16G16B16A16_unorm_SkColorType;
+ case GrColorType::kRG_F16: return kR16G16_float_SkColorType;
+ case GrColorType::kRGB_888: return kUnknown_SkColorType;
+ case GrColorType::kR_8: return kR8_unorm_SkColorType;
+ case GrColorType::kR_16: return kUnknown_SkColorType;
+ case GrColorType::kR_F16: return kUnknown_SkColorType;
+ case GrColorType::kGray_F16: return kUnknown_SkColorType;
+ case GrColorType::kARGB_4444: return kUnknown_SkColorType;
+ case GrColorType::kBGRA_4444: return kUnknown_SkColorType;
+ }
+ SkUNREACHABLE;
+}
+
+static constexpr GrColorType SkColorTypeToGrColorType(SkColorType ct) {
+ switch (ct) {
+ case kUnknown_SkColorType: return GrColorType::kUnknown;
+ case kAlpha_8_SkColorType: return GrColorType::kAlpha_8;
+ case kRGB_565_SkColorType: return GrColorType::kBGR_565;
+ case kARGB_4444_SkColorType: return GrColorType::kABGR_4444;
+ case kRGBA_8888_SkColorType: return GrColorType::kRGBA_8888;
+ case kSRGBA_8888_SkColorType: return GrColorType::kRGBA_8888_SRGB;
+ case kRGB_888x_SkColorType: return GrColorType::kRGB_888x;
+ case kBGRA_8888_SkColorType: return GrColorType::kBGRA_8888;
+ case kGray_8_SkColorType: return GrColorType::kGray_8;
+ case kRGBA_F16Norm_SkColorType: return GrColorType::kRGBA_F16_Clamped;
+ case kRGBA_F16_SkColorType: return GrColorType::kRGBA_F16;
+ case kRGBA_1010102_SkColorType: return GrColorType::kRGBA_1010102;
+ case kRGB_101010x_SkColorType: return GrColorType::kUnknown;
+ case kBGRA_1010102_SkColorType: return GrColorType::kBGRA_1010102;
+ case kBGR_101010x_SkColorType: return GrColorType::kUnknown;
+ case kBGR_101010x_XR_SkColorType: return GrColorType::kUnknown;
+ case kRGBA_F32_SkColorType: return GrColorType::kRGBA_F32;
+ case kR8G8_unorm_SkColorType: return GrColorType::kRG_88;
+ case kA16_unorm_SkColorType: return GrColorType::kAlpha_16;
+ case kR16G16_unorm_SkColorType: return GrColorType::kRG_1616;
+ case kA16_float_SkColorType: return GrColorType::kAlpha_F16;
+ case kR16G16_float_SkColorType: return GrColorType::kRG_F16;
+ case kR16G16B16A16_unorm_SkColorType: return GrColorType::kRGBA_16161616;
+ case kR8_unorm_SkColorType: return GrColorType::kR_8;
+ }
+ SkUNREACHABLE;
+}
+
+static constexpr uint32_t GrColorTypeChannelFlags(GrColorType ct) {
+ switch (ct) {
+ case GrColorType::kUnknown: return 0;
+ case GrColorType::kAlpha_8: return kAlpha_SkColorChannelFlag;
+ case GrColorType::kBGR_565: return kRGB_SkColorChannelFlags;
+ case GrColorType::kABGR_4444: return kRGBA_SkColorChannelFlags;
+ case GrColorType::kRGBA_8888: return kRGBA_SkColorChannelFlags;
+ case GrColorType::kRGBA_8888_SRGB: return kRGBA_SkColorChannelFlags;
+ case GrColorType::kRGB_888x: return kRGB_SkColorChannelFlags;
+ case GrColorType::kRG_88: return kRG_SkColorChannelFlags;
+ case GrColorType::kBGRA_8888: return kRGBA_SkColorChannelFlags;
+ case GrColorType::kRGBA_1010102: return kRGBA_SkColorChannelFlags;
+ case GrColorType::kBGRA_1010102: return kRGBA_SkColorChannelFlags;
+ case GrColorType::kGray_8: return kGray_SkColorChannelFlag;
+ case GrColorType::kGrayAlpha_88: return kGrayAlpha_SkColorChannelFlags;
+ case GrColorType::kAlpha_F16: return kAlpha_SkColorChannelFlag;
+ case GrColorType::kRGBA_F16: return kRGBA_SkColorChannelFlags;
+ case GrColorType::kRGBA_F16_Clamped: return kRGBA_SkColorChannelFlags;
+ case GrColorType::kRGBA_F32: return kRGBA_SkColorChannelFlags;
+ case GrColorType::kAlpha_8xxx: return kAlpha_SkColorChannelFlag;
+ case GrColorType::kAlpha_F32xxx: return kAlpha_SkColorChannelFlag;
+ case GrColorType::kGray_8xxx: return kGray_SkColorChannelFlag;
+ case GrColorType::kR_8xxx: return kRed_SkColorChannelFlag;
+ case GrColorType::kAlpha_16: return kAlpha_SkColorChannelFlag;
+ case GrColorType::kRG_1616: return kRG_SkColorChannelFlags;
+ case GrColorType::kRGBA_16161616: return kRGBA_SkColorChannelFlags;
+ case GrColorType::kRG_F16: return kRG_SkColorChannelFlags;
+ case GrColorType::kRGB_888: return kRGB_SkColorChannelFlags;
+ case GrColorType::kR_8: return kRed_SkColorChannelFlag;
+ case GrColorType::kR_16: return kRed_SkColorChannelFlag;
+ case GrColorType::kR_F16: return kRed_SkColorChannelFlag;
+ case GrColorType::kGray_F16: return kGray_SkColorChannelFlag;
+ case GrColorType::kARGB_4444: return kRGBA_SkColorChannelFlags;
+ case GrColorType::kBGRA_4444: return kRGBA_SkColorChannelFlags;
+ }
+ SkUNREACHABLE;
+}
+
+/**
+ * Describes the encoding of channel data in a GrColorType.
+ */
+enum class GrColorTypeEncoding {
+ kUnorm,
+ kSRGBUnorm,
+ // kSnorm,
+ kFloat,
+ // kSint
+ // kUint
+};
+
+/**
+ * Describes a GrColorType by how many bits are used for each color component and how they are
+ * encoded. Currently all the non-zero channels share a single GrColorTypeEncoding. This could be
+ * expanded to store separate encodings and to indicate which bits belong to which components.
+ */
+class GrColorFormatDesc {
+public:
+ static constexpr GrColorFormatDesc MakeRGBA(int rgba, GrColorTypeEncoding e) {
+ return {rgba, rgba, rgba, rgba, 0, e};
+ }
+
+ static constexpr GrColorFormatDesc MakeRGBA(int rgb, int a, GrColorTypeEncoding e) {
+ return {rgb, rgb, rgb, a, 0, e};
+ }
+
+ static constexpr GrColorFormatDesc MakeRGB(int rgb, GrColorTypeEncoding e) {
+ return {rgb, rgb, rgb, 0, 0, e};
+ }
+
+ static constexpr GrColorFormatDesc MakeRGB(int r, int g, int b, GrColorTypeEncoding e) {
+ return {r, g, b, 0, 0, e};
+ }
+
+ static constexpr GrColorFormatDesc MakeAlpha(int a, GrColorTypeEncoding e) {
+ return {0, 0, 0, a, 0, e};
+ }
+
+ static constexpr GrColorFormatDesc MakeR(int r, GrColorTypeEncoding e) {
+ return {r, 0, 0, 0, 0, e};
+ }
+
+ static constexpr GrColorFormatDesc MakeRG(int rg, GrColorTypeEncoding e) {
+ return {rg, rg, 0, 0, 0, e};
+ }
+
+ static constexpr GrColorFormatDesc MakeGray(int grayBits, GrColorTypeEncoding e) {
+ return {0, 0, 0, 0, grayBits, e};
+ }
+
+ static constexpr GrColorFormatDesc MakeGrayAlpha(int grayAlpha, GrColorTypeEncoding e) {
+ return {0, 0, 0, 0, grayAlpha, e};
+ }
+
+ static constexpr GrColorFormatDesc MakeInvalid() { return {}; }
+
+ constexpr int r() const { return fRBits; }
+ constexpr int g() const { return fGBits; }
+ constexpr int b() const { return fBBits; }
+ constexpr int a() const { return fABits; }
+ constexpr int operator[](int c) const {
+ switch (c) {
+ case 0: return this->r();
+ case 1: return this->g();
+ case 2: return this->b();
+ case 3: return this->a();
+ }
+ SkUNREACHABLE;
+ }
+
+ constexpr int gray() const { return fGrayBits; }
+
+ constexpr GrColorTypeEncoding encoding() const { return fEncoding; }
+
+private:
+ int fRBits = 0;
+ int fGBits = 0;
+ int fBBits = 0;
+ int fABits = 0;
+ int fGrayBits = 0;
+ GrColorTypeEncoding fEncoding = GrColorTypeEncoding::kUnorm;
+
+ constexpr GrColorFormatDesc() = default;
+
+ constexpr GrColorFormatDesc(int r, int g, int b, int a, int gray, GrColorTypeEncoding encoding)
+ : fRBits(r), fGBits(g), fBBits(b), fABits(a), fGrayBits(gray), fEncoding(encoding) {
+ SkASSERT(r >= 0 && g >= 0 && b >= 0 && a >= 0 && gray >= 0);
+ SkASSERT(!gray || (!r && !g && !b));
+ SkASSERT(r || g || b || a || gray);
+ }
+};
+
+static constexpr GrColorFormatDesc GrGetColorTypeDesc(GrColorType ct) {
+ switch (ct) {
+ case GrColorType::kUnknown:
+ return GrColorFormatDesc::MakeInvalid();
+ case GrColorType::kAlpha_8:
+ return GrColorFormatDesc::MakeAlpha(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kBGR_565:
+ return GrColorFormatDesc::MakeRGB(5, 6, 5, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kABGR_4444:
+ return GrColorFormatDesc::MakeRGBA(4, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kRGBA_8888:
+ return GrColorFormatDesc::MakeRGBA(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kRGBA_8888_SRGB:
+ return GrColorFormatDesc::MakeRGBA(8, GrColorTypeEncoding::kSRGBUnorm);
+ case GrColorType::kRGB_888x:
+ return GrColorFormatDesc::MakeRGB(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kRG_88:
+ return GrColorFormatDesc::MakeRG(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kBGRA_8888:
+ return GrColorFormatDesc::MakeRGBA(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kRGBA_1010102:
+ return GrColorFormatDesc::MakeRGBA(10, 2, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kBGRA_1010102:
+ return GrColorFormatDesc::MakeRGBA(10, 2, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kGray_8:
+ return GrColorFormatDesc::MakeGray(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kGrayAlpha_88:
+ return GrColorFormatDesc::MakeGrayAlpha(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kAlpha_F16:
+ return GrColorFormatDesc::MakeAlpha(16, GrColorTypeEncoding::kFloat);
+ case GrColorType::kRGBA_F16:
+ return GrColorFormatDesc::MakeRGBA(16, GrColorTypeEncoding::kFloat);
+ case GrColorType::kRGBA_F16_Clamped:
+ return GrColorFormatDesc::MakeRGBA(16, GrColorTypeEncoding::kFloat);
+ case GrColorType::kRGBA_F32:
+ return GrColorFormatDesc::MakeRGBA(32, GrColorTypeEncoding::kFloat);
+ case GrColorType::kAlpha_8xxx:
+ return GrColorFormatDesc::MakeAlpha(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kAlpha_F32xxx:
+ return GrColorFormatDesc::MakeAlpha(32, GrColorTypeEncoding::kFloat);
+ case GrColorType::kGray_8xxx:
+ return GrColorFormatDesc::MakeGray(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kR_8xxx:
+ return GrColorFormatDesc::MakeR(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kAlpha_16:
+ return GrColorFormatDesc::MakeAlpha(16, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kRG_1616:
+ return GrColorFormatDesc::MakeRG(16, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kRGBA_16161616:
+ return GrColorFormatDesc::MakeRGBA(16, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kRG_F16:
+ return GrColorFormatDesc::MakeRG(16, GrColorTypeEncoding::kFloat);
+ case GrColorType::kRGB_888:
+ return GrColorFormatDesc::MakeRGB(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kR_8:
+ return GrColorFormatDesc::MakeR(8, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kR_16:
+ return GrColorFormatDesc::MakeR(16, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kR_F16:
+ return GrColorFormatDesc::MakeR(16, GrColorTypeEncoding::kFloat);
+ case GrColorType::kGray_F16:
+ return GrColorFormatDesc::MakeGray(16, GrColorTypeEncoding::kFloat);
+ case GrColorType::kARGB_4444:
+ return GrColorFormatDesc::MakeRGBA(4, GrColorTypeEncoding::kUnorm);
+ case GrColorType::kBGRA_4444:
+ return GrColorFormatDesc::MakeRGBA(4, GrColorTypeEncoding::kUnorm);
+ }
+ SkUNREACHABLE;
+}
+
+static constexpr GrClampType GrColorTypeClampType(GrColorType colorType) {
+ if (GrGetColorTypeDesc(colorType).encoding() == GrColorTypeEncoding::kUnorm ||
+ GrGetColorTypeDesc(colorType).encoding() == GrColorTypeEncoding::kSRGBUnorm) {
+ return GrClampType::kAuto;
+ }
+ return GrColorType::kRGBA_F16_Clamped == colorType ? GrClampType::kManual : GrClampType::kNone;
+}
+
+// Consider a color type "wider" than n if it has more than n bits for any its representable
+// channels.
+static constexpr bool GrColorTypeIsWiderThan(GrColorType colorType, int n) {
+ SkASSERT(n > 0);
+ auto desc = GrGetColorTypeDesc(colorType);
+ return (desc.r() && desc.r() > n )||
+ (desc.g() && desc.g() > n) ||
+ (desc.b() && desc.b() > n) ||
+ (desc.a() && desc.a() > n) ||
+ (desc.gray() && desc.gray() > n);
+}
+
+static constexpr bool GrColorTypeIsAlphaOnly(GrColorType ct) {
+ return GrColorTypeChannelFlags(ct) == kAlpha_SkColorChannelFlag;
+}
+
+static constexpr bool GrColorTypeHasAlpha(GrColorType ct) {
+ return GrColorTypeChannelFlags(ct) & kAlpha_SkColorChannelFlag;
+}
+
+static constexpr size_t GrColorTypeBytesPerPixel(GrColorType ct) {
+ switch (ct) {
+ case GrColorType::kUnknown: return 0;
+ case GrColorType::kAlpha_8: return 1;
+ case GrColorType::kBGR_565: return 2;
+ case GrColorType::kABGR_4444: return 2;
+ case GrColorType::kRGBA_8888: return 4;
+ case GrColorType::kRGBA_8888_SRGB: return 4;
+ case GrColorType::kRGB_888x: return 4;
+ case GrColorType::kRG_88: return 2;
+ case GrColorType::kBGRA_8888: return 4;
+ case GrColorType::kRGBA_1010102: return 4;
+ case GrColorType::kBGRA_1010102: return 4;
+ case GrColorType::kGray_8: return 1;
+ case GrColorType::kGrayAlpha_88: return 2;
+ case GrColorType::kAlpha_F16: return 2;
+ case GrColorType::kRGBA_F16: return 8;
+ case GrColorType::kRGBA_F16_Clamped: return 8;
+ case GrColorType::kRGBA_F32: return 16;
+ case GrColorType::kAlpha_8xxx: return 4;
+ case GrColorType::kAlpha_F32xxx: return 16;
+ case GrColorType::kGray_8xxx: return 4;
+ case GrColorType::kR_8xxx: return 4;
+ case GrColorType::kAlpha_16: return 2;
+ case GrColorType::kRG_1616: return 4;
+ case GrColorType::kRGBA_16161616: return 8;
+ case GrColorType::kRG_F16: return 4;
+ case GrColorType::kRGB_888: return 3;
+ case GrColorType::kR_8: return 1;
+ case GrColorType::kR_16: return 2;
+ case GrColorType::kR_F16: return 2;
+ case GrColorType::kGray_F16: return 2;
+ case GrColorType::kARGB_4444: return 2;
+ case GrColorType::kBGRA_4444: return 2;
+ }
+ SkUNREACHABLE;
+}
+
+// In general we try to not mix CompressionType and ColorType, but currently SkImage still requires
+// an SkColorType even for CompressedTypes so we need some conversion.
+static constexpr SkColorType GrCompressionTypeToSkColorType(SkTextureCompressionType compression) {
+ switch (compression) {
+ case SkTextureCompressionType::kNone: return kUnknown_SkColorType;
+ case SkTextureCompressionType::kETC2_RGB8_UNORM: return kRGB_888x_SkColorType;
+ case SkTextureCompressionType::kBC1_RGB8_UNORM: return kRGB_888x_SkColorType;
+ case SkTextureCompressionType::kBC1_RGBA8_UNORM: return kRGBA_8888_SkColorType;
+ }
+
+ SkUNREACHABLE;
+}
+
+enum class GrDstSampleFlags {
+ kNone = 0,
+ kRequiresTextureBarrier = 1 << 0,
+ kAsInputAttachment = 1 << 1,
+};
+GR_MAKE_BITFIELD_CLASS_OPS(GrDstSampleFlags)
+
+using GrVisitProxyFunc = std::function<void(GrSurfaceProxy*, GrMipmapped)>;
+
+#if defined(SK_DEBUG) || GR_TEST_UTILS || defined(SK_ENABLE_DUMP_GPU)
+static constexpr const char* GrBackendApiToStr(GrBackendApi api) {
+ switch (api) {
+ case GrBackendApi::kOpenGL: return "OpenGL";
+ case GrBackendApi::kVulkan: return "Vulkan";
+ case GrBackendApi::kMetal: return "Metal";
+ case GrBackendApi::kDirect3D: return "Direct3D";
+ case GrBackendApi::kDawn: return "Dawn";
+ case GrBackendApi::kMock: return "Mock";
+ }
+ SkUNREACHABLE;
+}
+
+static constexpr const char* GrColorTypeToStr(GrColorType ct) {
+ switch (ct) {
+ case GrColorType::kUnknown: return "kUnknown";
+ case GrColorType::kAlpha_8: return "kAlpha_8";
+ case GrColorType::kBGR_565: return "kRGB_565";
+ case GrColorType::kABGR_4444: return "kABGR_4444";
+ case GrColorType::kRGBA_8888: return "kRGBA_8888";
+ case GrColorType::kRGBA_8888_SRGB: return "kRGBA_8888_SRGB";
+ case GrColorType::kRGB_888x: return "kRGB_888x";
+ case GrColorType::kRG_88: return "kRG_88";
+ case GrColorType::kBGRA_8888: return "kBGRA_8888";
+ case GrColorType::kRGBA_1010102: return "kRGBA_1010102";
+ case GrColorType::kBGRA_1010102: return "kBGRA_1010102";
+ case GrColorType::kGray_8: return "kGray_8";
+ case GrColorType::kGrayAlpha_88: return "kGrayAlpha_88";
+ case GrColorType::kAlpha_F16: return "kAlpha_F16";
+ case GrColorType::kRGBA_F16: return "kRGBA_F16";
+ case GrColorType::kRGBA_F16_Clamped: return "kRGBA_F16_Clamped";
+ case GrColorType::kRGBA_F32: return "kRGBA_F32";
+ case GrColorType::kAlpha_8xxx: return "kAlpha_8xxx";
+ case GrColorType::kAlpha_F32xxx: return "kAlpha_F32xxx";
+ case GrColorType::kGray_8xxx: return "kGray_8xxx";
+ case GrColorType::kR_8xxx: return "kR_8xxx";
+ case GrColorType::kAlpha_16: return "kAlpha_16";
+ case GrColorType::kRG_1616: return "kRG_1616";
+ case GrColorType::kRGBA_16161616: return "kRGBA_16161616";
+ case GrColorType::kRG_F16: return "kRG_F16";
+ case GrColorType::kRGB_888: return "kRGB_888";
+ case GrColorType::kR_8: return "kR_8";
+ case GrColorType::kR_16: return "kR_16";
+ case GrColorType::kR_F16: return "kR_F16";
+ case GrColorType::kGray_F16: return "kGray_F16";
+ case GrColorType::kARGB_4444: return "kARGB_4444";
+ case GrColorType::kBGRA_4444: return "kBGRA_4444";
+ }
+ SkUNREACHABLE;
+}
+
+static constexpr const char* GrCompressionTypeToStr(SkTextureCompressionType compression) {
+ switch (compression) {
+ case SkTextureCompressionType::kNone: return "kNone";
+ case SkTextureCompressionType::kETC2_RGB8_UNORM: return "kETC2_RGB8_UNORM";
+ case SkTextureCompressionType::kBC1_RGB8_UNORM: return "kBC1_RGB8_UNORM";
+ case SkTextureCompressionType::kBC1_RGBA8_UNORM: return "kBC1_RGBA8_UNORM";
+ }
+ SkUNREACHABLE;
+}
+
+static constexpr const char* GrSurfaceOriginToStr(GrSurfaceOrigin origin) {
+ switch (origin) {
+ case kTopLeft_GrSurfaceOrigin: return "kTopLeft";
+ case kBottomLeft_GrSurfaceOrigin: return "kBottomLeft";
+ }
+ SkUNREACHABLE;
+}
+#endif
+
+#endif
diff --git a/gfx/skia/skia/include/private/gpu/ganesh/GrVkTypesPriv.h b/gfx/skia/skia/include/private/gpu/ganesh/GrVkTypesPriv.h
new file mode 100644
index 0000000000..f300a71396
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/ganesh/GrVkTypesPriv.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkTypesPriv_DEFINED
+#define GrVkTypesPriv_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/gpu/vk/GrVkTypes.h"
+
+namespace skgpu {
+class MutableTextureStateRef;
+}
+
+
+// This struct is to used to store the the actual information about the vulkan backend image on the
+// GrBackendTexture and GrBackendRenderTarget. When a client calls getVkImageInfo on a
+// GrBackendTexture/RenderTarget, we use the GrVkBackendSurfaceInfo to create a snapshot
+// GrVkImgeInfo object. Internally, this uses a ref count GrVkImageLayout object to track the
+// current VkImageLayout which can be shared with an internal GrVkImage so that layout updates can
+// be seen by all users of the image.
+struct GrVkBackendSurfaceInfo {
+ GrVkBackendSurfaceInfo(GrVkImageInfo info) : fImageInfo(info) {}
+
+ void cleanup();
+
+ GrVkBackendSurfaceInfo& operator=(const GrVkBackendSurfaceInfo&) = delete;
+
+ // Assigns the passed in GrVkBackendSurfaceInfo to this object. if isValid is true we will also
+ // attempt to unref the old fLayout on this object.
+ void assign(const GrVkBackendSurfaceInfo&, bool isValid);
+
+ GrVkImageInfo snapImageInfo(const skgpu::MutableTextureStateRef*) const;
+
+ bool isProtected() const { return fImageInfo.fProtected == skgpu::Protected::kYes; }
+#if GR_TEST_UTILS
+ bool operator==(const GrVkBackendSurfaceInfo& that) const;
+#endif
+
+private:
+ GrVkImageInfo fImageInfo;
+};
+
+struct GrVkImageSpec {
+ GrVkImageSpec()
+ : fImageTiling(VK_IMAGE_TILING_OPTIMAL)
+ , fFormat(VK_FORMAT_UNDEFINED)
+ , fImageUsageFlags(0)
+ , fSharingMode(VK_SHARING_MODE_EXCLUSIVE) {}
+
+ GrVkImageSpec(const GrVkSurfaceInfo& info)
+ : fImageTiling(info.fImageTiling)
+ , fFormat(info.fFormat)
+ , fImageUsageFlags(info.fImageUsageFlags)
+ , fYcbcrConversionInfo(info.fYcbcrConversionInfo)
+ , fSharingMode(info.fSharingMode) {}
+
+ VkImageTiling fImageTiling;
+ VkFormat fFormat;
+ VkImageUsageFlags fImageUsageFlags;
+ GrVkYcbcrConversionInfo fYcbcrConversionInfo;
+ VkSharingMode fSharingMode;
+};
+
+GrVkSurfaceInfo GrVkImageSpecToSurfaceInfo(const GrVkImageSpec& vkSpec,
+ uint32_t sampleCount,
+ uint32_t levelCount,
+ skgpu::Protected isProtected);
+
+#endif
diff --git a/gfx/skia/skia/include/private/gpu/graphite/DawnTypesPriv.h b/gfx/skia/skia/include/private/gpu/graphite/DawnTypesPriv.h
new file mode 100644
index 0000000000..bbf401c95e
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/graphite/DawnTypesPriv.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2022 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_DawnTypesPriv_DEFINED
+#define skgpu_graphite_DawnTypesPriv_DEFINED
+
+#include "include/gpu/graphite/dawn/DawnTypes.h"
+
+namespace skgpu::graphite {
+
+struct DawnTextureSpec {
+ DawnTextureSpec()
+ : fFormat(wgpu::TextureFormat::Undefined)
+ , fUsage(wgpu::TextureUsage::None) {}
+ DawnTextureSpec(const DawnTextureInfo& info)
+ : fFormat(info.fFormat)
+ , fUsage(info.fUsage) {}
+
+ bool operator==(const DawnTextureSpec& that) const {
+ return fUsage == that.fUsage &&
+ fFormat == that.fFormat;
+ }
+
+ wgpu::TextureFormat fFormat;
+ wgpu::TextureUsage fUsage;
+};
+
+DawnTextureInfo DawnTextureSpecToTextureInfo(const DawnTextureSpec& dawnSpec,
+ uint32_t sampleCount,
+ Mipmapped mipmapped);
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_DawnTypesPriv_DEFINED
diff --git a/gfx/skia/skia/include/private/gpu/graphite/MtlGraphiteTypesPriv.h b/gfx/skia/skia/include/private/gpu/graphite/MtlGraphiteTypesPriv.h
new file mode 100644
index 0000000000..bf26aa2a78
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/graphite/MtlGraphiteTypesPriv.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_MtlGraphiteTypesPriv_DEFINED
+#define skgpu_graphite_MtlGraphiteTypesPriv_DEFINED
+
+#include "include/gpu/graphite/GraphiteTypes.h"
+#include "include/gpu/graphite/mtl/MtlGraphiteTypes.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef __APPLE__
+
+#include <TargetConditionals.h>
+
+// We're using the MSL version as shorthand for the Metal SDK version here
+#if defined(SK_BUILD_FOR_MAC)
+#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 110000
+#define GR_METAL_SDK_VERSION 230
+#elif __MAC_OS_X_VERSION_MAX_ALLOWED >= 120000
+#define GR_METAL_SDK_VERSION 240
+#else
+#error Must use at least 11.00 SDK to build Metal backend for MacOS
+#endif
+#else
+#if __IPHONE_OS_VERSION_MAX_ALLOWED >= 140000 || __TV_OS_VERSION_MAX_ALLOWED >= 140000
+#define GR_METAL_SDK_VERSION 230
+#elif __IPHONE_OS_VERSION_MAX_ALLOWED >= 150000 || __TV_OS_VERSION_MAX_ALLOWED >= 150000
+#define GR_METAL_SDK_VERSION 240
+#else
+#error Must use at least 14.00 SDK to build Metal backend for iOS
+#endif
+#endif
+
+#endif // __APPLE__
+
+namespace skgpu::graphite {
+
+struct MtlTextureSpec {
+ MtlTextureSpec()
+ : fFormat(0)
+ , fUsage(0)
+ , fStorageMode(0)
+ , fFramebufferOnly(false) {}
+ MtlTextureSpec(const MtlTextureInfo& info)
+ : fFormat(info.fFormat)
+ , fUsage(info.fUsage)
+ , fStorageMode(info.fStorageMode)
+ , fFramebufferOnly(info.fFramebufferOnly) {}
+
+ bool operator==(const MtlTextureSpec& that) const {
+ return fFormat == that.fFormat &&
+ fUsage == that.fUsage &&
+ fStorageMode == that.fStorageMode &&
+ fFramebufferOnly == that.fFramebufferOnly;
+ }
+
+ MtlPixelFormat fFormat;
+ MtlTextureUsage fUsage;
+ MtlStorageMode fStorageMode;
+ bool fFramebufferOnly;
+};
+
+MtlTextureInfo MtlTextureSpecToTextureInfo(const MtlTextureSpec& mtlSpec,
+ uint32_t sampleCount,
+ Mipmapped mipmapped);
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_MtlGraphiteTypesPriv_DEFINED
diff --git a/gfx/skia/skia/include/private/gpu/graphite/VulkanGraphiteTypesPriv.h b/gfx/skia/skia/include/private/gpu/graphite/VulkanGraphiteTypesPriv.h
new file mode 100644
index 0000000000..b4304e3ae8
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/graphite/VulkanGraphiteTypesPriv.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2022 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_graphite_VulkanGraphiteTypesPriv_DEFINED
+#define skgpu_graphite_VulkanGraphiteTypesPriv_DEFINED
+
+#include "include/gpu/graphite/vk/VulkanGraphiteTypes.h"
+
+namespace skgpu::graphite {
+
+struct VulkanTextureSpec {
+ VulkanTextureSpec()
+ : fFlags(0)
+ , fFormat(VK_FORMAT_UNDEFINED)
+ , fImageTiling(VK_IMAGE_TILING_OPTIMAL)
+ , fImageUsageFlags(0)
+ , fSharingMode(VK_SHARING_MODE_EXCLUSIVE)
+ , fAspectMask(VK_IMAGE_ASPECT_COLOR_BIT) {}
+ VulkanTextureSpec(const VulkanTextureInfo& info)
+ : fFlags(info.fFlags)
+ , fFormat(info.fFormat)
+ , fImageTiling(info.fImageTiling)
+ , fImageUsageFlags(info.fImageUsageFlags)
+ , fSharingMode(info.fSharingMode)
+ , fAspectMask(info.fAspectMask) {}
+
+ bool operator==(const VulkanTextureSpec& that) const {
+ return fFlags == that.fFlags &&
+ fFormat == that.fFormat &&
+ fImageTiling == that.fImageTiling &&
+ fImageUsageFlags == that.fImageUsageFlags &&
+ fSharingMode == that.fSharingMode &&
+ fAspectMask == that.fAspectMask;
+ }
+
+ VkImageCreateFlags fFlags;
+ VkFormat fFormat;
+ VkImageTiling fImageTiling;
+ VkImageUsageFlags fImageUsageFlags;
+ VkSharingMode fSharingMode;
+ VkImageAspectFlags fAspectMask;
+ // GrVkYcbcrConversionInfo fYcbcrConversionInfo;
+};
+
+VulkanTextureInfo VulkanTextureSpecToTextureInfo(const VulkanTextureSpec& vkSpec,
+ uint32_t sampleCount,
+ Mipmapped mipmapped);
+
+} // namespace skgpu::graphite
+
+#endif // skgpu_graphite_VulkanGraphiteTypesPriv_DEFINED
diff --git a/gfx/skia/skia/include/private/gpu/vk/SkiaVulkan.h b/gfx/skia/skia/include/private/gpu/vk/SkiaVulkan.h
new file mode 100644
index 0000000000..ca4bcf108b
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/vk/SkiaVulkan.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkiaVulkan_DEFINED
+#define SkiaVulkan_DEFINED
+
+#include "include/core/SkTypes.h"
+
+// IWYU pragma: begin_exports
+
+#if SKIA_IMPLEMENTATION || !defined(SK_VULKAN)
+#include "include/third_party/vulkan/vulkan/vulkan_core.h"
+#else
+// For google3 builds we don't set SKIA_IMPLEMENTATION so we need to make sure that the vulkan
+// headers stay up to date for our needs
+#include <vulkan/vulkan_core.h>
+#endif
+
+#ifdef SK_BUILD_FOR_ANDROID
+// This is needed to get android extensions for external memory
+#if SKIA_IMPLEMENTATION || !defined(SK_VULKAN)
+#include "include/third_party/vulkan/vulkan/vulkan_android.h"
+#else
+// For google3 builds we don't set SKIA_IMPLEMENTATION so we need to make sure that the vulkan
+// headers stay up to date for our needs
+#include <vulkan/vulkan_android.h>
+#endif
+#endif
+
+// IWYU pragma: end_exports
+
+#endif
diff --git a/gfx/skia/skia/include/private/gpu/vk/VulkanTypesPriv.h b/gfx/skia/skia/include/private/gpu/vk/VulkanTypesPriv.h
new file mode 100644
index 0000000000..e99869ca1a
--- /dev/null
+++ b/gfx/skia/skia/include/private/gpu/vk/VulkanTypesPriv.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2022 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef skgpu_VulkanTypesPriv_DEFINED
+#define skgpu_VulkanTypesPriv_DEFINED
+
+#include "include/gpu/vk/VulkanTypes.h"
+
+#include <atomic>
+
+namespace skgpu {
+
+class VulkanMutableTextureState {
+public:
+ VulkanMutableTextureState(VkImageLayout layout, uint32_t queueFamilyIndex)
+ : fLayout(layout)
+ , fQueueFamilyIndex(queueFamilyIndex) {}
+
+ VulkanMutableTextureState& operator=(const VulkanMutableTextureState& that) {
+ fLayout = that.getImageLayout();
+ fQueueFamilyIndex = that.getQueueFamilyIndex();
+ return *this;
+ }
+
+ void setImageLayout(VkImageLayout layout) {
+ // Defaulting to use std::memory_order_seq_cst
+ fLayout.store(layout);
+ }
+
+ VkImageLayout getImageLayout() const {
+ // Defaulting to use std::memory_order_seq_cst
+ return fLayout.load();
+ }
+
+ void setQueueFamilyIndex(uint32_t queueFamilyIndex) {
+ // Defaulting to use std::memory_order_seq_cst
+ fQueueFamilyIndex.store(queueFamilyIndex);
+ }
+
+ uint32_t getQueueFamilyIndex() const {
+ // Defaulting to use std::memory_order_seq_cst
+ return fQueueFamilyIndex.load();
+ }
+
+private:
+ std::atomic<VkImageLayout> fLayout;
+ std::atomic<uint32_t> fQueueFamilyIndex;
+};
+
+} // namespace skgpu
+
+#endif // skgpu_VulkanGraphiteTypesPriv_DEFINED
+
diff --git a/gfx/skia/skia/include/sksl/DSL.h b/gfx/skia/skia/include/sksl/DSL.h
new file mode 100644
index 0000000000..6b9ebd4727
--- /dev/null
+++ b/gfx/skia/skia/include/sksl/DSL.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DSL
+#define SKSL_DSL
+
+#include "include/sksl/DSLBlock.h"
+#include "include/sksl/DSLCore.h"
+#include "include/sksl/DSLExpression.h"
+#include "include/sksl/DSLFunction.h"
+#include "include/sksl/DSLType.h"
+
+namespace SkSL {
+
+namespace dsl {
+
+using Block = DSLBlock;
+using Case = DSLCase;
+using Expression = DSLExpression;
+using Field = DSLField;
+using Function = DSLFunction;
+using GlobalVar = DSLGlobalVar;
+using Layout = DSLLayout;
+using Modifiers = DSLModifiers;
+using Parameter = DSLParameter;
+using Statement = DSLStatement;
+using Var = DSLVar;
+
+} // namespace dsl
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/sksl/DSLBlock.h b/gfx/skia/skia/include/sksl/DSLBlock.h
new file mode 100644
index 0000000000..233236ae1e
--- /dev/null
+++ b/gfx/skia/skia/include/sksl/DSLBlock.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DSL_BLOCK
+#define SKSL_DSL_BLOCK
+
+#include "include/private/SkSLDefines.h"
+#include "include/private/base/SkTArray.h"
+#include "include/sksl/DSLStatement.h"
+#include "include/sksl/SkSLPosition.h"
+
+#include <memory>
+
+namespace SkSL {
+
+class Block;
+class SymbolTable;
+
+namespace dsl {
+
+class DSLBlock {
+public:
+ template<class... Statements>
+ DSLBlock(Statements... statements) {
+ fStatements.reserve_back(sizeof...(statements));
+ ((void)fStatements.push_back(DSLStatement(statements.release()).release()), ...);
+ }
+
+ DSLBlock(SkSL::StatementArray statements, std::shared_ptr<SymbolTable> symbols = nullptr,
+ Position pos = {});
+
+ DSLBlock(SkTArray<DSLStatement> statements, std::shared_ptr<SymbolTable> symbols = nullptr,
+ Position pos = {});
+
+ DSLBlock(DSLBlock&& other) = default;
+ DSLBlock& operator=(DSLBlock&& other) = default;
+
+ ~DSLBlock() = default;
+
+ void append(DSLStatement stmt);
+
+ std::unique_ptr<SkSL::Block> release();
+
+private:
+ SkSL::StatementArray fStatements;
+ std::shared_ptr<SkSL::SymbolTable> fSymbols;
+ Position fPosition;
+};
+
+} // namespace dsl
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/sksl/DSLCase.h b/gfx/skia/skia/include/sksl/DSLCase.h
new file mode 100644
index 0000000000..06e7225f11
--- /dev/null
+++ b/gfx/skia/skia/include/sksl/DSLCase.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DSL_CASE
+#define SKSL_DSL_CASE
+
+#include "include/private/SkSLDefines.h"
+#include "include/private/base/SkTArray.h"
+#include "include/sksl/DSLExpression.h"
+#include "include/sksl/DSLStatement.h"
+#include "include/sksl/SkSLPosition.h"
+
+#include <utility>
+
+namespace SkSL {
+
+namespace dsl {
+
+class DSLCase {
+public:
+ // An empty expression means 'default:'.
+ template<class... Statements>
+ DSLCase(DSLExpression value, Statements... statements)
+ : fValue(std::move(value)) {
+ fStatements.reserve_back(sizeof...(statements));
+ ((void)fStatements.push_back(DSLStatement(std::move(statements)).release()), ...);
+ }
+
+ DSLCase(DSLExpression value, SkTArray<DSLStatement> statements,
+ Position pos = {});
+
+ DSLCase(DSLExpression value, SkSL::StatementArray statements,
+ Position pos = {});
+
+ DSLCase(DSLCase&&);
+
+ ~DSLCase();
+
+ DSLCase& operator=(DSLCase&&);
+
+ void append(DSLStatement stmt);
+
+private:
+ DSLExpression fValue;
+ SkSL::StatementArray fStatements;
+ Position fPosition;
+
+ friend class DSLCore;
+
+ template<class... Cases>
+ friend DSLStatement Switch(DSLExpression value, Cases... cases);
+};
+
+} // namespace dsl
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/sksl/DSLCore.h b/gfx/skia/skia/include/sksl/DSLCore.h
new file mode 100644
index 0000000000..3d3408c307
--- /dev/null
+++ b/gfx/skia/skia/include/sksl/DSLCore.h
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DSL_CORE
+#define SKSL_DSL_CORE
+
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLProgramKind.h"
+#include "include/private/base/SkTArray.h"
+#include "include/sksl/DSLCase.h"
+#include "include/sksl/DSLExpression.h"
+#include "include/sksl/DSLStatement.h"
+#include "include/sksl/DSLVar.h" // IWYU pragma: keep
+#include "include/sksl/SkSLPosition.h"
+
+#include <memory>
+#include <string>
+#include <string_view>
+#include <utility>
+
+namespace SkSL {
+
+class Compiler;
+class ErrorReporter;
+struct Program;
+struct ProgramSettings;
+
+namespace dsl {
+
+class DSLField;
+class DSLModifiers;
+
+// When users import the DSL namespace via `using namespace SkSL::dsl`, we want the SwizzleComponent
+// Type enum to come into scope as well, so `Swizzle(var, X, Y, ONE)` can work as expected.
+// `namespace SkSL::SwizzleComponent` contains only an `enum Type`; this `using namespace` directive
+// shouldn't pollute the SkSL::dsl namespace with anything else.
+using namespace SkSL::SwizzleComponent;
+
+/**
+ * Starts DSL output on the current thread using the specified compiler. This must be called
+ * prior to any other DSL functions.
+ */
+void Start(SkSL::Compiler* compiler, SkSL::ProgramKind kind = SkSL::ProgramKind::kFragment);
+
+void Start(SkSL::Compiler* compiler, SkSL::ProgramKind kind, const SkSL::ProgramSettings& settings);
+
+/**
+ * Signals the end of DSL output. This must be called sometime between a call to Start() and the
+ * termination of the thread.
+ */
+void End();
+
+/**
+ * Returns all global elements (functions and global variables) as a self-contained Program. The
+ * optional source string is retained as the program's source. DSL programs do not normally have
+ * sources, but when a DSL program is produced from parsed program text (as in Parser), it may be
+ * important to retain it so that any std::string_views derived from it remain valid.
+ */
+std::unique_ptr<SkSL::Program> ReleaseProgram(std::unique_ptr<std::string> source = nullptr);
+
+/**
+ * Returns the ErrorReporter which will be notified of any errors that occur during DSL calls. The
+ * default error reporter aborts on any error.
+ */
+ErrorReporter& GetErrorReporter();
+
+/**
+ * Installs an ErrorReporter which will be notified of any errors that occur during DSL calls.
+ */
+void SetErrorReporter(ErrorReporter* errorReporter);
+
+/**
+ * #extension <name> : enable
+ */
+void AddExtension(std::string_view name, Position pos = {});
+
+/**
+ * break;
+ */
+DSLStatement Break(Position pos = {});
+
+/**
+ * continue;
+ */
+DSLStatement Continue(Position pos = {});
+
+/**
+ * Adds a modifiers declaration to the current program.
+ */
+void Declare(const DSLModifiers& modifiers, Position pos = {});
+
+/**
+ * Creates a local variable declaration statement.
+ */
+DSLStatement Declare(DSLVar& var, Position pos = {});
+
+/**
+ * Creates a local variable declaration statement containing multiple variables.
+ */
+DSLStatement Declare(SkTArray<DSLVar>& vars, Position pos = {});
+
+/**
+ * Declares a global variable.
+ */
+void Declare(DSLGlobalVar& var, Position pos = {});
+
+/**
+ * Declares a set of global variables.
+ */
+void Declare(SkTArray<DSLGlobalVar>& vars, Position pos = {});
+
+/**
+ * default: statements
+ */
+template<class... Statements>
+DSLCase Default(Statements... statements) {
+ return DSLCase(DSLExpression(), std::move(statements)...);
+}
+
+/**
+ * discard;
+ */
+DSLStatement Discard(Position pos = {});
+
+/**
+ * do stmt; while (test);
+ */
+DSLStatement Do(DSLStatement stmt, DSLExpression test, Position pos = {});
+
+/**
+ * for (initializer; test; next) stmt;
+ */
+DSLStatement For(DSLStatement initializer, DSLExpression test, DSLExpression next,
+ DSLStatement stmt, Position pos = {}, ForLoopPositions positions = {});
+
+/**
+ * if (test) ifTrue; [else ifFalse;]
+ */
+DSLStatement If(DSLExpression test, DSLStatement ifTrue, DSLStatement ifFalse = DSLStatement(),
+ Position pos = {});
+
+DSLExpression InterfaceBlock(const DSLModifiers& modifiers, std::string_view typeName,
+ SkTArray<DSLField> fields, std::string_view varName = "",
+ int arraySize = 0, Position pos = {});
+
+/**
+ * return [value];
+ */
+DSLStatement Return(DSLExpression value = DSLExpression(),
+ Position pos = {});
+
+/**
+ * test ? ifTrue : ifFalse
+ */
+DSLExpression Select(DSLExpression test, DSLExpression ifTrue, DSLExpression ifFalse,
+ Position = {});
+
+// Internal use only
+DSLStatement Switch(DSLExpression value, SkTArray<DSLCase> cases, Position pos = {});
+
+/**
+ * switch (value) { cases }
+ */
+template<class... Cases>
+DSLStatement Switch(DSLExpression value, Cases... cases) {
+ SkTArray<DSLCase> caseArray;
+ caseArray.reserve_back(sizeof...(cases));
+ (caseArray.push_back(std::move(cases)), ...);
+ return Switch(std::move(value), std::move(caseArray), Position{});
+}
+
+/**
+ * while (test) stmt;
+ */
+DSLStatement While(DSLExpression test, DSLStatement stmt,
+ Position pos = {});
+
+/**
+ * expression.xyz1
+ */
+DSLExpression Swizzle(DSLExpression base,
+ SkSL::SwizzleComponent::Type a,
+ Position pos = {},
+ Position maskPos = {});
+
+DSLExpression Swizzle(DSLExpression base,
+ SkSL::SwizzleComponent::Type a,
+ SkSL::SwizzleComponent::Type b,
+ Position pos = {},
+ Position maskPos = {});
+
+DSLExpression Swizzle(DSLExpression base,
+ SkSL::SwizzleComponent::Type a,
+ SkSL::SwizzleComponent::Type b,
+ SkSL::SwizzleComponent::Type c,
+ Position pos = {},
+ Position maskPos = {});
+
+DSLExpression Swizzle(DSLExpression base,
+ SkSL::SwizzleComponent::Type a,
+ SkSL::SwizzleComponent::Type b,
+ SkSL::SwizzleComponent::Type c,
+ SkSL::SwizzleComponent::Type d,
+ Position pos = {},
+ Position maskPos = {});
+
+/**
+ * Returns the absolute value of x. If x is a vector, operates componentwise.
+ */
+DSLExpression Abs(DSLExpression x, Position pos = {});
+
+/**
+ * Returns true if all of the components of boolean vector x are true.
+ */
+DSLExpression All(DSLExpression x, Position pos = {});
+
+/**
+ * Returns true if any of the components of boolean vector x are true.
+ */
+DSLExpression Any(DSLExpression x, Position pos = {});
+
+/**
+ * Returns the arctangent of y over x. Operates componentwise on vectors.
+ */
+DSLExpression Atan(DSLExpression y_over_x, Position pos = {});
+DSLExpression Atan(DSLExpression y, DSLExpression x, Position pos = {});
+
+/**
+ * Returns x rounded towards positive infinity. If x is a vector, operates componentwise.
+ */
+DSLExpression Ceil(DSLExpression x, Position pos = {});
+
+/**
+ * Returns x clamped to between min and max. If x is a vector, operates componentwise.
+ */
+DSLExpression Clamp(DSLExpression x, DSLExpression min, DSLExpression max,
+ Position pos = {});
+
+/**
+ * Returns the cosine of x. If x is a vector, operates componentwise.
+ */
+DSLExpression Cos(DSLExpression x, Position pos = {});
+
+/**
+ * Returns the cross product of x and y.
+ */
+DSLExpression Cross(DSLExpression x, DSLExpression y, Position pos = {});
+
+/**
+ * Returns x converted from radians to degrees. If x is a vector, operates componentwise.
+ */
+DSLExpression Degrees(DSLExpression x, Position pos = {});
+
+/**
+ * Returns the distance between x and y.
+ */
+DSLExpression Distance(DSLExpression x, DSLExpression y,
+ Position pos = {});
+
+/**
+ * Returns the dot product of x and y.
+ */
+DSLExpression Dot(DSLExpression x, DSLExpression y, Position pos = {});
+
+/**
+ * Returns a boolean vector indicating whether components of x are equal to the corresponding
+ * components of y.
+ */
+DSLExpression Equal(DSLExpression x, DSLExpression y, Position pos = {});
+
+/**
+ * Returns e^x. If x is a vector, operates componentwise.
+ */
+DSLExpression Exp(DSLExpression x, Position pos = {});
+
+/**
+ * Returns 2^x. If x is a vector, operates componentwise.
+ */
+DSLExpression Exp2(DSLExpression x, Position pos = {});
+
+/**
+ * If dot(i, nref) >= 0, returns n, otherwise returns -n.
+ */
+DSLExpression Faceforward(DSLExpression n, DSLExpression i, DSLExpression nref,
+ Position pos = {});
+
+/**
+ * Returns x rounded towards negative infinity. If x is a vector, operates componentwise.
+ */
+DSLExpression Floor(DSLExpression x, Position pos = {});
+
+/**
+ * Returns the fractional part of x. If x is a vector, operates componentwise.
+ */
+DSLExpression Fract(DSLExpression x, Position pos = {});
+
+/**
+ * Returns a boolean vector indicating whether components of x are greater than the corresponding
+ * components of y.
+ */
+DSLExpression GreaterThan(DSLExpression x, DSLExpression y,
+ Position pos = {});
+
+/**
+ * Returns a boolean vector indicating whether components of x are greater than or equal to the
+ * corresponding components of y.
+ */
+DSLExpression GreaterThanEqual(DSLExpression x, DSLExpression y,
+ Position pos = {});
+
+/**
+ * Returns the 1/sqrt(x). If x is a vector, operates componentwise.
+ */
+DSLExpression Inversesqrt(DSLExpression x, Position pos = {});
+
+/**
+ * Returns the inverse of the matrix x.
+ */
+DSLExpression Inverse(DSLExpression x, Position pos = {});
+
+/**
+ * Returns the length of the vector x.
+ */
+DSLExpression Length(DSLExpression x, Position pos = {});
+
+/**
+ * Returns a boolean vector indicating whether components of x are less than the corresponding
+ * components of y.
+ */
+DSLExpression LessThan(DSLExpression x, DSLExpression y,
+ Position pos = {});
+
+/**
+ * Returns a boolean vector indicating whether components of x are less than or equal to the
+ * corresponding components of y.
+ */
+DSLExpression LessThanEqual(DSLExpression x, DSLExpression y,
+ Position pos = {});
+
+/**
+ * Returns the log base e of x. If x is a vector, operates componentwise.
+ */
+DSLExpression Log(DSLExpression x, Position pos = {});
+
+/**
+ * Returns the log base 2 of x. If x is a vector, operates componentwise.
+ */
+DSLExpression Log2(DSLExpression x, Position pos = {});
+
+/**
+ * Returns the larger (closer to positive infinity) of x and y. If x is a vector, operates
+ * componentwise. y may be either a vector of the same dimensions as x, or a scalar.
+ */
+DSLExpression Max(DSLExpression x, DSLExpression y, Position pos = {});
+
+/**
+ * Returns the smaller (closer to negative infinity) of x and y. If x is a vector, operates
+ * componentwise. y may be either a vector of the same dimensions as x, or a scalar.
+ */
+DSLExpression Min(DSLExpression x, DSLExpression y, Position pos = {});
+
+/**
+ * Returns a linear intepolation between x and y at position a, where a=0 results in x and a=1
+ * results in y. If x and y are vectors, operates componentwise. a may be either a vector of the
+ * same dimensions as x and y, or a scalar.
+ */
+DSLExpression Mix(DSLExpression x, DSLExpression y, DSLExpression a,
+ Position pos = {});
+
+/**
+ * Returns x modulo y. If x is a vector, operates componentwise. y may be either a vector of the
+ * same dimensions as x, or a scalar.
+ */
+DSLExpression Mod(DSLExpression x, DSLExpression y, Position pos = {});
+
+/**
+ * Returns the vector x normalized to a length of 1.
+ */
+DSLExpression Normalize(DSLExpression x, Position pos = {});
+
+/**
+ * Returns a boolean vector indicating whether components of x are not equal to the corresponding
+ * components of y.
+ */
+DSLExpression NotEqual(DSLExpression x, DSLExpression y,
+ Position pos = {});
+
+/**
+ * Returns x raised to the power y. If x is a vector, operates componentwise. y may be either a
+ * vector of the same dimensions as x, or a scalar.
+ */
+DSLExpression Pow(DSLExpression x, DSLExpression y, Position pos = {});
+
+/**
+ * Returns x converted from degrees to radians. If x is a vector, operates componentwise.
+ */
+DSLExpression Radians(DSLExpression x, Position pos = {});
+
+/**
+ * Returns i reflected from a surface with normal n.
+ */
+DSLExpression Reflect(DSLExpression i, DSLExpression n, Position pos = {});
+
+/**
+ * Returns i refracted across a surface with normal n and ratio of indices of refraction eta.
+ */
+DSLExpression Refract(DSLExpression i, DSLExpression n, DSLExpression eta,
+ Position pos = {});
+
+/**
+ * Returns x, rounded to the nearest integer. If x is a vector, operates componentwise.
+ */
+DSLExpression Round(DSLExpression x, Position pos = {});
+
+/**
+ * Returns x clamped to the range [0, 1]. If x is a vector, operates componentwise.
+ */
+DSLExpression Saturate(DSLExpression x, Position pos = {});
+
+/**
+ * Returns -1, 0, or 1 depending on whether x is negative, zero, or positive, respectively. If x is
+ * a vector, operates componentwise.
+ */
+DSLExpression Sign(DSLExpression x, Position pos = {});
+
+/**
+ * Returns the sine of x. If x is a vector, operates componentwise.
+ */
+DSLExpression Sin(DSLExpression x, Position pos = {});
+
+/**
+ * Returns a smooth interpolation between 0 (at x=edge1) and 1 (at x=edge2). If x is a vector,
+ * operates componentwise. edge1 and edge2 may either be both vectors of the same dimensions as x or
+ * scalars.
+ */
+DSLExpression Smoothstep(DSLExpression edge1, DSLExpression edge2, DSLExpression x,
+ Position pos = {});
+
+/**
+ * Returns the square root of x. If x is a vector, operates componentwise.
+ */
+DSLExpression Sqrt(DSLExpression x, Position pos = {});
+
+/**
+ * Returns 0 if x < edge or 1 if x >= edge. If x is a vector, operates componentwise. edge may be
+ * either a vector of the same dimensions as x, or a scalar.
+ */
+DSLExpression Step(DSLExpression edge, DSLExpression x, Position pos = {});
+
+/**
+ * Returns the tangent of x. If x is a vector, operates componentwise.
+ */
+DSLExpression Tan(DSLExpression x, Position pos = {});
+
+/**
+ * Returns x converted from premultipled to unpremultiplied alpha.
+ */
+DSLExpression Unpremul(DSLExpression x, Position pos = {});
+
+} // namespace dsl
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/sksl/DSLExpression.h b/gfx/skia/skia/include/sksl/DSLExpression.h
new file mode 100644
index 0000000000..46e70fa2be
--- /dev/null
+++ b/gfx/skia/skia/include/sksl/DSLExpression.h
@@ -0,0 +1,241 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DSL_EXPRESSION
+#define SKSL_DSL_EXPRESSION
+
+#include "include/private/base/SkTArray.h"
+#include "include/sksl/SkSLOperator.h"
+#include "include/sksl/SkSLPosition.h"
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <string_view>
+#include <type_traits>
+
+#if defined(__has_cpp_attribute) && __has_cpp_attribute(clang::reinitializes)
+#define SK_CLANG_REINITIALIZES [[clang::reinitializes]]
+#else
+#define SK_CLANG_REINITIALIZES
+#endif
+
+namespace SkSL {
+
+class Expression;
+class ExpressionArray;
+
+namespace dsl {
+
+class DSLType;
+class DSLVarBase;
+
+/**
+ * Represents an expression such as 'cos(x)' or 'a + b'.
+ */
+class DSLExpression {
+public:
+ DSLExpression(const DSLExpression&) = delete;
+
+ DSLExpression(DSLExpression&&);
+
+ DSLExpression();
+
+ /**
+ * Creates an expression representing a literal float.
+ */
+ DSLExpression(float value, Position pos = {});
+
+ /**
+ * Creates an expression representing a literal float.
+ */
+ DSLExpression(double value, Position pos = {})
+ : DSLExpression((float) value) {}
+
+ /**
+ * Creates an expression representing a literal int.
+ */
+ DSLExpression(int value, Position pos = {});
+
+ /**
+ * Creates an expression representing a literal int.
+ */
+ DSLExpression(int64_t value, Position pos = {});
+
+ /**
+ * Creates an expression representing a literal uint.
+ */
+ DSLExpression(unsigned int value, Position pos = {});
+
+ /**
+ * Creates an expression representing a literal bool.
+ */
+ DSLExpression(bool value, Position pos = {});
+
+ /**
+ * Creates an expression representing a variable reference.
+ */
+ DSLExpression(DSLVarBase& var, Position pos = {});
+
+ DSLExpression(DSLVarBase&& var, Position pos = {});
+
+ // If expression is null, returns Poison
+ explicit DSLExpression(std::unique_ptr<SkSL::Expression> expression, Position pos = {});
+
+ static DSLExpression Poison(Position pos = {});
+
+ ~DSLExpression();
+
+ DSLType type() const;
+
+ std::string description() const;
+
+ Position position() const;
+
+ void setPosition(Position pos);
+
+ /**
+ * Performs assignment, like the '=' operator.
+ */
+ DSLExpression assign(DSLExpression other);
+
+ DSLExpression x(Position pos = {});
+
+ DSLExpression y(Position pos = {});
+
+ DSLExpression z(Position pos = {});
+
+ DSLExpression w(Position pos = {});
+
+ DSLExpression r(Position pos = {});
+
+ DSLExpression g(Position pos = {});
+
+ DSLExpression b(Position pos = {});
+
+ DSLExpression a(Position pos = {});
+
+ /**
+ * Creates an SkSL struct field access expression.
+ */
+ DSLExpression field(std::string_view name, Position pos = {});
+
+ /**
+ * Creates an SkSL array index expression.
+ */
+ DSLExpression operator[](DSLExpression index);
+
+ DSLExpression operator()(SkTArray<DSLExpression, true> args, Position pos = {});
+
+ DSLExpression operator()(ExpressionArray args, Position pos = {});
+
+ /**
+ * Invokes a prefix operator.
+ */
+ DSLExpression prefix(Operator::Kind op, Position pos);
+
+ /**
+ * Invokes a postfix operator.
+ */
+ DSLExpression postfix(Operator::Kind op, Position pos);
+
+ /**
+ * Invokes a binary operator.
+ */
+ DSLExpression binary(Operator::Kind op, DSLExpression right, Position pos);
+
+ /**
+ * Equivalent to operator[].
+ */
+ DSLExpression index(DSLExpression index, Position pos);
+
+ /**
+ * Returns true if this object contains an expression. DSLExpressions which were created with
+ * the empty constructor or which have already been release()ed do not have a value.
+ * DSLExpressions created with errors are still considered to have a value (but contain poison).
+ */
+ bool hasValue() const {
+ return fExpression != nullptr;
+ }
+
+ /**
+ * Returns true if this object contains an expression which is not poison.
+ */
+ bool isValid() const;
+
+ SK_CLANG_REINITIALIZES void swap(DSLExpression& other);
+
+ /**
+ * Invalidates this object and returns the SkSL expression it represents. It is an error to call
+ * this on an invalid DSLExpression.
+ */
+ std::unique_ptr<SkSL::Expression> release();
+
+private:
+ /**
+ * Calls release if this expression has a value, otherwise returns null.
+ */
+ std::unique_ptr<SkSL::Expression> releaseIfPossible();
+
+ std::unique_ptr<SkSL::Expression> fExpression;
+
+ friend DSLExpression SampleChild(int index, DSLExpression coords);
+
+ friend class DSLCore;
+ friend class DSLVarBase;
+ friend class DSLWriter;
+};
+
+DSLExpression operator+(DSLExpression left, DSLExpression right);
+DSLExpression operator+(DSLExpression expr);
+DSLExpression operator+=(DSLExpression left, DSLExpression right);
+DSLExpression operator-(DSLExpression left, DSLExpression right);
+DSLExpression operator-(DSLExpression expr);
+DSLExpression operator-=(DSLExpression left, DSLExpression right);
+DSLExpression operator*(DSLExpression left, DSLExpression right);
+DSLExpression operator*=(DSLExpression left, DSLExpression right);
+DSLExpression operator/(DSLExpression left, DSLExpression right);
+DSLExpression operator/=(DSLExpression left, DSLExpression right);
+DSLExpression operator%(DSLExpression left, DSLExpression right);
+DSLExpression operator%=(DSLExpression left, DSLExpression right);
+DSLExpression operator<<(DSLExpression left, DSLExpression right);
+DSLExpression operator<<=(DSLExpression left, DSLExpression right);
+DSLExpression operator>>(DSLExpression left, DSLExpression right);
+DSLExpression operator>>=(DSLExpression left, DSLExpression right);
+DSLExpression operator&&(DSLExpression left, DSLExpression right);
+DSLExpression operator||(DSLExpression left, DSLExpression right);
+DSLExpression operator&(DSLExpression left, DSLExpression right);
+DSLExpression operator&=(DSLExpression left, DSLExpression right);
+DSLExpression operator|(DSLExpression left, DSLExpression right);
+DSLExpression operator|=(DSLExpression left, DSLExpression right);
+DSLExpression operator^(DSLExpression left, DSLExpression right);
+DSLExpression operator^=(DSLExpression left, DSLExpression right);
+DSLExpression LogicalXor(DSLExpression left, DSLExpression right);
+DSLExpression operator,(DSLExpression left, DSLExpression right);
+DSLExpression operator==(DSLExpression left, DSLExpression right);
+DSLExpression operator!=(DSLExpression left, DSLExpression right);
+DSLExpression operator>(DSLExpression left, DSLExpression right);
+DSLExpression operator<(DSLExpression left, DSLExpression right);
+DSLExpression operator>=(DSLExpression left, DSLExpression right);
+DSLExpression operator<=(DSLExpression left, DSLExpression right);
+DSLExpression operator!(DSLExpression expr);
+DSLExpression operator~(DSLExpression expr);
+DSLExpression operator++(DSLExpression expr);
+DSLExpression operator++(DSLExpression expr, int);
+DSLExpression operator--(DSLExpression expr);
+DSLExpression operator--(DSLExpression expr, int);
+
+} // namespace dsl
+
+} // namespace SkSL
+
+template <typename T> struct sk_is_trivially_relocatable;
+
+template <>
+struct sk_is_trivially_relocatable<SkSL::dsl::DSLExpression> : std::true_type {};
+
+#endif
diff --git a/gfx/skia/skia/include/sksl/DSLFunction.h b/gfx/skia/skia/include/sksl/DSLFunction.h
new file mode 100644
index 0000000000..a4928b02fe
--- /dev/null
+++ b/gfx/skia/skia/include/sksl/DSLFunction.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DSL_FUNCTION
+#define SKSL_DSL_FUNCTION
+
+#include "include/core/SkSpan.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/base/SkTArray.h"
+#include "include/sksl/DSLBlock.h"
+#include "include/sksl/DSLExpression.h"
+#include "include/sksl/DSLModifiers.h"
+#include "include/sksl/DSLStatement.h"
+#include "include/sksl/DSLVar.h"
+#include "include/sksl/SkSLPosition.h"
+
+#include <string_view>
+#include <utility>
+
+namespace SkSL {
+
+class FunctionDeclaration;
+
+namespace dsl {
+
+class DSLType;
+
+class DSLFunction {
+public:
+ template<class... Parameters>
+ DSLFunction(const DSLType& returnType, std::string_view name, Parameters&... parameters)
+ : DSLFunction(DSLModifiers(), returnType, name, parameters...) {}
+
+ template<class... Parameters>
+ DSLFunction(const DSLModifiers& modifiers, const DSLType& returnType, std::string_view name,
+ Parameters&... parameters) {
+ SkTArray<DSLParameter*> parameterArray;
+ parameterArray.reserve_back(sizeof...(parameters));
+ (parameterArray.push_back(&parameters), ...);
+
+ // We can't have a default parameter and a template parameter pack at the same time, so
+ // unfortunately we can't capture position from this overload.
+ this->init(modifiers, returnType, name, parameterArray, Position());
+ }
+
+ DSLFunction(std::string_view name, const DSLModifiers& modifiers, const DSLType& returnType,
+ SkSpan<DSLParameter*> parameters, Position pos = {}) {
+ this->init(modifiers, returnType, name, parameters, pos);
+ }
+
+ DSLFunction(SkSL::FunctionDeclaration* decl)
+ : fDecl(decl) {}
+
+ virtual ~DSLFunction() = default;
+
+ template<class... Stmt>
+ void define(Stmt... stmts) {
+ DSLBlock block = DSLBlock(DSLStatement(std::move(stmts))...);
+ this->define(std::move(block));
+ }
+
+ void define(DSLBlock block, Position pos = {});
+
+ void prototype();
+
+ /**
+ * Invokes the function with the given arguments.
+ */
+ template<class... Args>
+ DSLExpression operator()(Args&&... args) {
+ ExpressionArray argArray;
+ argArray.reserve_back(sizeof...(args));
+ this->collectArgs(argArray, std::forward<Args>(args)...);
+ return this->call(std::move(argArray));
+ }
+
+ /**
+ * Invokes the function with the given arguments.
+ */
+ DSLExpression call(SkSpan<DSLExpression> args, Position pos = {});
+
+ DSLExpression call(ExpressionArray args, Position pos = {});
+
+private:
+ void collectArgs(ExpressionArray& args) {}
+
+ template<class... RemainingArgs>
+ void collectArgs(ExpressionArray& args, DSLVar& var, RemainingArgs&&... remaining) {
+ args.push_back(DSLExpression(var).release());
+ collectArgs(args, std::forward<RemainingArgs>(remaining)...);
+ }
+
+ template<class... RemainingArgs>
+ void collectArgs(ExpressionArray& args, DSLExpression expr, RemainingArgs&&... remaining) {
+ args.push_back(expr.release());
+ collectArgs(args, std::forward<RemainingArgs>(remaining)...);
+ }
+
+ void init(DSLModifiers modifiers, const DSLType& returnType, std::string_view name,
+ SkSpan<DSLParameter*> params, Position pos);
+
+ SkSL::FunctionDeclaration* fDecl = nullptr;
+ SkSL::Position fPosition;
+};
+
+} // namespace dsl
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/sksl/DSLLayout.h b/gfx/skia/skia/include/sksl/DSLLayout.h
new file mode 100644
index 0000000000..6eb8b6257b
--- /dev/null
+++ b/gfx/skia/skia/include/sksl/DSLLayout.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DSL_LAYOUT
+#define SKSL_DSL_LAYOUT
+
+#include "include/private/SkSLLayout.h"
+#include "include/sksl/SkSLPosition.h"
+
+namespace SkSL {
+
+namespace dsl {
+
+class DSLLayout {
+public:
+ DSLLayout() {}
+
+ DSLLayout& originUpperLeft(Position pos = {}) {
+ return this->flag(SkSL::Layout::kOriginUpperLeft_Flag, "origin_upper_left", pos);
+ }
+
+ DSLLayout& pushConstant(Position pos = {}) {
+ return this->flag(SkSL::Layout::kPushConstant_Flag, "push_constant", pos);
+ }
+
+ DSLLayout& blendSupportAllEquations(Position pos = {}) {
+ return this->flag(SkSL::Layout::kBlendSupportAllEquations_Flag,
+ "blend_support_all_equations", pos);
+ }
+
+ DSLLayout& color(Position pos = {}) {
+ return this->flag(SkSL::Layout::kColor_Flag, "color", pos);
+ }
+
+ DSLLayout& location(int location, Position pos = {}) {
+ return this->intValue(&fSkSLLayout.fLocation, location, SkSL::Layout::kLocation_Flag,
+ "location", pos);
+ }
+
+ DSLLayout& offset(int offset, Position pos = {}) {
+ return this->intValue(&fSkSLLayout.fOffset, offset, SkSL::Layout::kOffset_Flag, "offset",
+ pos);
+ }
+
+ DSLLayout& binding(int binding, Position pos = {}) {
+ return this->intValue(&fSkSLLayout.fBinding, binding, SkSL::Layout::kBinding_Flag,
+ "binding", pos);
+ }
+
+ DSLLayout& texture(int texture, Position pos = {}) {
+ return this->intValue(&fSkSLLayout.fTexture, texture, SkSL::Layout::kTexture_Flag,
+ "texture", pos);
+ }
+
+ DSLLayout& sampler(int sampler, Position pos = {}) {
+ return this->intValue(&fSkSLLayout.fSampler, sampler, SkSL::Layout::kSampler_Flag,
+ "sampler", pos);
+ }
+
+ DSLLayout& index(int index, Position pos = {}) {
+ return this->intValue(&fSkSLLayout.fIndex, index, SkSL::Layout::kIndex_Flag, "index", pos);
+ }
+
+ DSLLayout& set(int set, Position pos = {}) {
+ return this->intValue(&fSkSLLayout.fSet, set, SkSL::Layout::kSet_Flag, "set", pos);
+ }
+
+ DSLLayout& builtin(int builtin, Position pos = {}) {
+ return this->intValue(&fSkSLLayout.fBuiltin, builtin, SkSL::Layout::kBuiltin_Flag,
+ "builtin", pos);
+ }
+
+ DSLLayout& inputAttachmentIndex(int inputAttachmentIndex,
+ Position pos = {}) {
+ return this->intValue(&fSkSLLayout.fInputAttachmentIndex, inputAttachmentIndex,
+ SkSL::Layout::kInputAttachmentIndex_Flag, "input_attachment_index",
+ pos);
+ }
+
+ DSLLayout& spirv(Position pos = {}) {
+ return this->flag(SkSL::Layout::kSPIRV_Flag, "spirv", pos);
+ }
+
+ DSLLayout& metal(Position pos = {}) {
+ return this->flag(SkSL::Layout::kMetal_Flag, "metal", pos);
+ }
+
+ DSLLayout& gl(Position pos = {}) {
+ return this->flag(SkSL::Layout::kGL_Flag, "gl", pos);
+ }
+
+ DSLLayout& wgsl(Position pos = {}) {
+ return this->flag(SkSL::Layout::kWGSL_Flag, "wgsl", pos);
+ }
+
+private:
+ explicit DSLLayout(SkSL::Layout skslLayout)
+ : fSkSLLayout(skslLayout) {}
+
+ DSLLayout& flag(SkSL::Layout::Flag mask, const char* name, Position pos);
+
+ DSLLayout& intValue(int* target, int value, SkSL::Layout::Flag flag, const char* name,
+ Position pos);
+
+ SkSL::Layout fSkSLLayout;
+
+ friend class DSLModifiers;
+};
+
+} // namespace dsl
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/sksl/DSLModifiers.h b/gfx/skia/skia/include/sksl/DSLModifiers.h
new file mode 100644
index 0000000000..c60b7b2c0c
--- /dev/null
+++ b/gfx/skia/skia/include/sksl/DSLModifiers.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DSL_MODIFIERS
+#define SKSL_DSL_MODIFIERS
+
+#include "include/core/SkSpan.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/sksl/DSLLayout.h"
+
+namespace SkSL {
+
+namespace dsl {
+
+class DSLField;
+class DSLType;
+
+enum Modifier {
+ kNo_Modifier = SkSL::Modifiers::kNo_Flag,
+ kConst_Modifier = SkSL::Modifiers::kConst_Flag,
+ kIn_Modifier = SkSL::Modifiers::kIn_Flag,
+ kOut_Modifier = SkSL::Modifiers::kOut_Flag,
+ kInOut_Modifier = SkSL::Modifiers::kIn_Flag | SkSL::Modifiers::kOut_Flag,
+ kUniform_Modifier = SkSL::Modifiers::kUniform_Flag,
+ kFlat_Modifier = SkSL::Modifiers::kFlat_Flag,
+ kNoPerspective_Modifier = SkSL::Modifiers::kNoPerspective_Flag,
+};
+
+class DSLModifiers {
+public:
+ DSLModifiers(int flags = 0, Position pos = {})
+ : DSLModifiers(DSLLayout(), flags, pos) {}
+
+ DSLModifiers(DSLLayout layout, int flags = 0, Position pos = {})
+ : fModifiers(layout.fSkSLLayout, flags)
+ , fPosition(pos) {}
+
+ int& flags() {
+ return fModifiers.fFlags;
+ }
+
+ const int& flags() const {
+ return fModifiers.fFlags;
+ }
+
+ DSLLayout layout() const {
+ return DSLLayout(fModifiers.fLayout);
+ }
+
+private:
+ SkSL::Modifiers fModifiers;
+ Position fPosition;
+
+ friend DSLType StructType(std::string_view name,
+ SkSpan<DSLField> fields,
+ bool interfaceBlock,
+ Position pos);
+ friend class DSLCore;
+ friend class DSLFunction;
+ friend class DSLType;
+ friend class DSLWriter;
+};
+
+} // namespace dsl
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/sksl/DSLStatement.h b/gfx/skia/skia/include/sksl/DSLStatement.h
new file mode 100644
index 0000000000..391e911d3a
--- /dev/null
+++ b/gfx/skia/skia/include/sksl/DSLStatement.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DSL_STATEMENT
+#define SKSL_DSL_STATEMENT
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLStatement.h"
+#include "include/sksl/SkSLPosition.h"
+
+#include <memory>
+#include <utility>
+
+namespace SkSL {
+
+class Expression;
+
+namespace dsl {
+
+class DSLBlock;
+class DSLExpression;
+
+class DSLStatement {
+public:
+ DSLStatement();
+
+ DSLStatement(DSLExpression expr);
+
+ DSLStatement(DSLBlock block);
+
+ DSLStatement(DSLStatement&&) = default;
+
+ DSLStatement(std::unique_ptr<SkSL::Expression> expr);
+
+ DSLStatement(std::unique_ptr<SkSL::Statement> stmt, Position pos);
+
+ DSLStatement(std::unique_ptr<SkSL::Statement> stmt);
+
+ ~DSLStatement();
+
+ DSLStatement& operator=(DSLStatement&& other) = default;
+
+ Position position() {
+ SkASSERT(this->hasValue());
+ return fStatement->fPosition;
+ }
+
+ void setPosition(Position pos) {
+ SkASSERT(this->hasValue());
+ fStatement->fPosition = pos;
+ }
+
+ bool hasValue() { return fStatement != nullptr; }
+
+ std::unique_ptr<SkSL::Statement> release() {
+ SkASSERT(this->hasValue());
+ return std::move(fStatement);
+ }
+
+private:
+ std::unique_ptr<SkSL::Statement> releaseIfPossible() {
+ return std::move(fStatement);
+ }
+
+ std::unique_ptr<SkSL::Statement> fStatement;
+
+ friend class DSLCore;
+ friend class DSLWriter;
+ friend DSLStatement operator,(DSLStatement left, DSLStatement right);
+};
+
+DSLStatement operator,(DSLStatement left, DSLStatement right);
+
+} // namespace dsl
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/sksl/DSLType.h b/gfx/skia/skia/include/sksl/DSLType.h
new file mode 100644
index 0000000000..fe2522e1aa
--- /dev/null
+++ b/gfx/skia/skia/include/sksl/DSLType.h
@@ -0,0 +1,297 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DSL_TYPE
+#define SKSL_DSL_TYPE
+
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+#include "include/sksl/DSLExpression.h"
+#include "include/sksl/DSLModifiers.h"
+#include "include/sksl/SkSLPosition.h"
+
+#include <cstdint>
+#include <string_view>
+#include <utility>
+
+namespace SkSL {
+
+class Compiler;
+class Type;
+
+namespace dsl {
+
+class DSLField;
+class DSLVarBase;
+
+enum TypeConstant : uint8_t {
+ kBool_Type,
+ kBool2_Type,
+ kBool3_Type,
+ kBool4_Type,
+ kHalf_Type,
+ kHalf2_Type,
+ kHalf3_Type,
+ kHalf4_Type,
+ kHalf2x2_Type,
+ kHalf3x2_Type,
+ kHalf4x2_Type,
+ kHalf2x3_Type,
+ kHalf3x3_Type,
+ kHalf4x3_Type,
+ kHalf2x4_Type,
+ kHalf3x4_Type,
+ kHalf4x4_Type,
+ kFloat_Type,
+ kFloat2_Type,
+ kFloat3_Type,
+ kFloat4_Type,
+ kFragmentProcessor_Type,
+ kFloat2x2_Type,
+ kFloat3x2_Type,
+ kFloat4x2_Type,
+ kFloat2x3_Type,
+ kFloat3x3_Type,
+ kFloat4x3_Type,
+ kFloat2x4_Type,
+ kFloat3x4_Type,
+ kFloat4x4_Type,
+ kInt_Type,
+ kInt2_Type,
+ kInt3_Type,
+ kInt4_Type,
+ kShader_Type,
+ kShort_Type,
+ kShort2_Type,
+ kShort3_Type,
+ kShort4_Type,
+ kUInt_Type,
+ kUInt2_Type,
+ kUInt3_Type,
+ kUInt4_Type,
+ kUShort_Type,
+ kUShort2_Type,
+ kUShort3_Type,
+ kUShort4_Type,
+ kVoid_Type,
+ kPoison_Type,
+};
+
+class DSLType {
+public:
+ DSLType(TypeConstant tc, Position pos = {});
+
+ DSLType(const SkSL::Type* type, Position pos = {});
+
+ DSLType(std::string_view name, Position pos = {});
+
+ DSLType(std::string_view name,
+ DSLModifiers* modifiers,
+ Position pos = {});
+
+ static DSLType Invalid();
+
+ /**
+ * Returns true if the SkSL type is non-null.
+ */
+ bool hasValue() const { return fSkSLType != nullptr; }
+
+ /**
+ * Returns true if this type is a bool.
+ */
+ bool isBoolean() const;
+
+ /**
+ * Returns true if this is a numeric scalar type.
+ */
+ bool isNumber() const;
+
+ /**
+ * Returns true if this is a floating-point scalar type (float or half).
+ */
+ bool isFloat() const;
+
+ /**
+ * Returns true if this is a signed scalar type (int or short).
+ */
+ bool isSigned() const;
+
+ /**
+ * Returns true if this is an unsigned scalar type (uint or ushort).
+ */
+ bool isUnsigned() const;
+
+ /**
+ * Returns true if this is a signed or unsigned integer.
+ */
+ bool isInteger() const;
+
+ /**
+ * Returns true if this is a scalar type.
+ */
+ bool isScalar() const;
+
+ /**
+ * Returns true if this is a vector type.
+ */
+ bool isVector() const;
+
+ /**
+ * Returns true if this is a matrix type.
+ */
+ bool isMatrix() const;
+
+ /**
+ * Returns true if this is a array type.
+ */
+ bool isArray() const;
+
+ /**
+ * Returns true if this is a struct type.
+ */
+ bool isStruct() const;
+
+ /**
+ * Returns true if this is an interface block
+ */
+ bool isInterfaceBlock() const;
+
+ /**
+ * Returns true if this is a Skia object type (shader, colorFilter, blender).
+ */
+ bool isEffectChild() const;
+
+ template<typename... Args>
+ static DSLExpression Construct(DSLType type, DSLVarBase& var, Args&&... args) {
+ DSLExpression argArray[] = {var, args...};
+ return Construct(type, SkSpan(argArray));
+ }
+
+ template<typename... Args>
+ static DSLExpression Construct(DSLType type, DSLExpression expr, Args&&... args) {
+ DSLExpression argArray[] = {std::move(expr), std::move(args)...};
+ return Construct(type, SkSpan(argArray));
+ }
+
+ static DSLExpression Construct(DSLType type, SkSpan<DSLExpression> argArray);
+
+private:
+ const SkSL::Type& skslType() const {
+ SkASSERT(fSkSLType);
+ return *fSkSLType;
+ }
+
+ const SkSL::Type* fSkSLType = nullptr;
+
+ friend DSLType Array(const DSLType& base, int count, Position pos);
+ friend DSLType Struct(std::string_view name, SkSpan<DSLField> fields, Position pos);
+ friend DSLType StructType(std::string_view name,
+ SkSpan<DSLField> fields,
+ bool interfaceBlock,
+ Position pos);
+ friend DSLType UnsizedArray(const DSLType& base, Position pos);
+ friend class DSLCore;
+ friend class DSLFunction;
+ friend class DSLVarBase;
+ friend class DSLWriter;
+ friend class SkSL::Compiler;
+};
+
+#define TYPE(T) \
+ template<typename... Args> \
+ DSLExpression T(Args&&... args) { \
+ return DSLType::Construct(k ## T ## _Type, std::forward<Args>(args)...); \
+ }
+
+#define VECTOR_TYPE(T) \
+ TYPE(T) \
+ TYPE(T ## 2) \
+ TYPE(T ## 3) \
+ TYPE(T ## 4)
+
+#define MATRIX_TYPE(T) \
+ TYPE(T ## 2x2) \
+ TYPE(T ## 3x2) \
+ TYPE(T ## 4x2) \
+ TYPE(T ## 2x3) \
+ TYPE(T ## 3x3) \
+ TYPE(T ## 4x3) \
+ TYPE(T ## 2x4) \
+ TYPE(T ## 3x4) \
+ TYPE(T ## 4x4)
+
+VECTOR_TYPE(Bool)
+VECTOR_TYPE(Float)
+VECTOR_TYPE(Half)
+VECTOR_TYPE(Int)
+VECTOR_TYPE(UInt)
+VECTOR_TYPE(Short)
+VECTOR_TYPE(UShort)
+
+MATRIX_TYPE(Float)
+MATRIX_TYPE(Half)
+
+#undef TYPE
+#undef VECTOR_TYPE
+#undef MATRIX_TYPE
+
+DSLType Array(const DSLType& base, int count, Position pos = {});
+
+DSLType UnsizedArray(const DSLType& base, Position pos = {});
+
+class DSLField {
+public:
+ DSLField(const DSLType type, std::string_view name,
+ Position pos = {})
+ : DSLField(DSLModifiers(), type, name, pos) {}
+
+ DSLField(const DSLModifiers& modifiers, const DSLType type, std::string_view name,
+ Position pos = {})
+ : fModifiers(modifiers)
+ , fType(type)
+ , fName(name)
+ , fPosition(pos) {}
+
+private:
+ DSLModifiers fModifiers;
+ const DSLType fType;
+ std::string_view fName;
+ Position fPosition;
+
+ friend class DSLCore;
+ friend DSLType StructType(std::string_view name,
+ SkSpan<DSLField> fields,
+ bool interfaceBlock,
+ Position pos);
+};
+
+/**
+ * Creates a StructDefinition at the top level and returns the associated type.
+ */
+DSLType Struct(std::string_view name, SkSpan<DSLField> fields, Position pos = {});
+
+template<typename... Field>
+DSLType Struct(std::string_view name, Field... fields) {
+ DSLField fieldTypes[] = {std::move(fields)...};
+ return Struct(name, SkSpan(fieldTypes), Position());
+}
+
+/**
+ * Creates a struct type and adds it to the current symbol table. Does _not_ create a ProgramElement
+ * at the top level, so the type will exist, but won't be represented anywhere in the output.
+ * (Use Struct or InterfaceBlock to add a top-level program element.)
+ */
+DSLType StructType(std::string_view name,
+ SkSpan<DSLField> fields,
+ bool interfaceBlock,
+ Position pos);
+
+} // namespace dsl
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/sksl/DSLVar.h b/gfx/skia/skia/include/sksl/DSLVar.h
new file mode 100644
index 0000000000..f052a525e3
--- /dev/null
+++ b/gfx/skia/skia/include/sksl/DSLVar.h
@@ -0,0 +1,231 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DSL_VAR
+#define SKSL_DSL_VAR
+
+#include "include/private/SkSLStatement.h"
+#include "include/sksl/DSLExpression.h"
+#include "include/sksl/DSLModifiers.h"
+#include "include/sksl/DSLType.h"
+#include "include/sksl/SkSLPosition.h"
+
+#include <cstdint>
+#include <memory>
+#include <string_view>
+#include <utility>
+
+namespace SkSL {
+
+class Expression;
+class ExpressionArray;
+class Variable;
+enum class VariableStorage : int8_t;
+
+namespace dsl {
+
+class DSLVarBase {
+public:
+ /**
+ * Constructs a new variable with the specified type and name.
+ */
+ DSLVarBase(VariableStorage storage, DSLType type, std::string_view name,
+ DSLExpression initialValue, Position pos, Position namePos);
+
+ DSLVarBase(VariableStorage storage, const DSLModifiers& modifiers, DSLType type,
+ std::string_view name, DSLExpression initialValue, Position pos, Position namePos);
+
+ DSLVarBase(DSLVarBase&&) = default;
+
+ std::string_view name() const {
+ return fName;
+ }
+
+ const DSLModifiers& modifiers() const {
+ return fModifiers;
+ }
+
+ VariableStorage storage() const {
+ return fStorage;
+ }
+
+ DSLExpression x() {
+ return DSLExpression(*this).x();
+ }
+
+ DSLExpression y() {
+ return DSLExpression(*this).y();
+ }
+
+ DSLExpression z() {
+ return DSLExpression(*this).z();
+ }
+
+ DSLExpression w() {
+ return DSLExpression(*this).w();
+ }
+
+ DSLExpression r() {
+ return DSLExpression(*this).r();
+ }
+
+ DSLExpression g() {
+ return DSLExpression(*this).g();
+ }
+
+ DSLExpression b() {
+ return DSLExpression(*this).b();
+ }
+
+ DSLExpression a() {
+ return DSLExpression(*this).a();
+ }
+
+ DSLExpression field(std::string_view name) {
+ return DSLExpression(*this).field(name);
+ }
+
+ DSLExpression operator[](DSLExpression&& index);
+
+ DSLExpression operator++() {
+ return ++DSLExpression(*this);
+ }
+
+ DSLExpression operator++(int) {
+ return DSLExpression(*this)++;
+ }
+
+ DSLExpression operator--() {
+ return --DSLExpression(*this);
+ }
+
+ DSLExpression operator--(int) {
+ return DSLExpression(*this)--;
+ }
+
+ template <class T> DSLExpression assign(T&& param) {
+ return this->assignExpression(DSLExpression(std::forward<T>(param)));
+ }
+
+protected:
+ /**
+ * Creates an empty, unpopulated var. Can be replaced with a real var later via `swap`.
+ */
+ DSLVarBase(VariableStorage storage) : fType(kVoid_Type), fStorage(storage) {}
+
+ DSLExpression assignExpression(DSLExpression other);
+
+ void swap(DSLVarBase& other);
+
+ DSLModifiers fModifiers;
+ // We only need to keep track of the type here so that we can create the SkSL::Variable. For
+ // predefined variables this field is unnecessary, so we don't bother tracking it and just set
+ // it to kVoid; in other words, you shouldn't generally be relying on this field to be correct.
+ // If you need to determine the variable's type, look at DSLWriter::Var(...)->type() instead.
+ DSLType fType;
+ std::unique_ptr<SkSL::Statement> fDeclaration;
+ SkSL::Variable* fVar = nullptr;
+ Position fNamePosition;
+ std::string_view fName;
+ DSLExpression fInitialValue;
+ Position fPosition;
+ VariableStorage fStorage;
+ bool fInitialized = false;
+
+ friend class DSLCore;
+ friend class DSLFunction;
+ friend class DSLWriter;
+};
+
+/**
+ * A local variable.
+ */
+class DSLVar : public DSLVarBase {
+public:
+ DSLVar();
+
+ DSLVar(DSLType type, std::string_view name, DSLExpression initialValue = DSLExpression(),
+ Position pos = {}, Position namePos = {});
+
+ DSLVar(const DSLModifiers& modifiers, DSLType type, std::string_view name,
+ DSLExpression initialValue = DSLExpression(), Position pos = {}, Position namePos = {});
+
+ DSLVar(DSLVar&&) = default;
+
+ void swap(DSLVar& other);
+
+private:
+ using INHERITED = DSLVarBase;
+};
+
+/**
+ * A global variable.
+ */
+class DSLGlobalVar : public DSLVarBase {
+public:
+ DSLGlobalVar();
+
+ DSLGlobalVar(DSLType type, std::string_view name, DSLExpression initialValue = DSLExpression(),
+ Position pos = {}, Position namePos = {});
+
+ DSLGlobalVar(const DSLModifiers& modifiers, DSLType type, std::string_view name,
+ DSLExpression initialValue = DSLExpression(),
+ Position pos = {}, Position namePos = {});
+
+ DSLGlobalVar(const char* name);
+
+ DSLGlobalVar(DSLGlobalVar&&) = default;
+
+ void swap(DSLGlobalVar& other);
+
+ /**
+ * Implements the following method calls:
+ * half4 shader::eval(float2 coords);
+ * half4 colorFilter::eval(half4 input);
+ */
+ DSLExpression eval(DSLExpression x, Position pos = {});
+
+ /**
+ * Implements the following method call:
+ * half4 blender::eval(half4 src, half4 dst);
+ */
+ DSLExpression eval(DSLExpression x, DSLExpression y, Position pos = {});
+
+private:
+ DSLExpression eval(ExpressionArray args, Position pos);
+
+ std::unique_ptr<SkSL::Expression> methodCall(std::string_view methodName, Position pos);
+
+ using INHERITED = DSLVarBase;
+};
+
+/**
+ * A function parameter.
+ */
+class DSLParameter : public DSLVarBase {
+public:
+ DSLParameter();
+
+ DSLParameter(DSLType type, std::string_view name, Position pos = {}, Position namePos = {});
+
+ DSLParameter(const DSLModifiers& modifiers, DSLType type, std::string_view name,
+ Position pos = {}, Position namePos = {});
+
+ DSLParameter(DSLParameter&&) = default;
+
+ void swap(DSLParameter& other);
+
+private:
+ using INHERITED = DSLVarBase;
+};
+
+} // namespace dsl
+
+} // namespace SkSL
+
+
+#endif
diff --git a/gfx/skia/skia/include/sksl/SkSLDebugTrace.h b/gfx/skia/skia/include/sksl/SkSLDebugTrace.h
new file mode 100644
index 0000000000..9c5eafbc94
--- /dev/null
+++ b/gfx/skia/skia/include/sksl/SkSLDebugTrace.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DEBUG_TRACE
+#define SKSL_DEBUG_TRACE
+
+#include "include/core/SkRefCnt.h"
+
+class SkWStream;
+
+namespace SkSL {
+
+class DebugTrace : public SkRefCnt {
+public:
+ /** Serializes a debug trace to JSON which can be parsed by our debugger. */
+ virtual void writeTrace(SkWStream* w) const = 0;
+
+ /** Generates a human-readable dump of the debug trace. */
+ virtual void dump(SkWStream* o) const = 0;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/sksl/SkSLErrorReporter.h b/gfx/skia/skia/include/sksl/SkSLErrorReporter.h
new file mode 100644
index 0000000000..4abf4631b8
--- /dev/null
+++ b/gfx/skia/skia/include/sksl/SkSLErrorReporter.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_ERROR_REPORTER
+#define SKSL_ERROR_REPORTER
+
+#include "include/core/SkTypes.h"
+
+#include <string_view>
+
+namespace SkSL {
+
+class Position;
+
+/**
+ * Class which is notified in the event of an error.
+ */
+class ErrorReporter {
+public:
+ ErrorReporter() {}
+
+ virtual ~ErrorReporter() {}
+
+ void error(Position position, std::string_view msg);
+
+ std::string_view source() const { return fSource; }
+
+ void setSource(std::string_view source) { fSource = source; }
+
+ int errorCount() const {
+ return fErrorCount;
+ }
+
+ void resetErrorCount() {
+ fErrorCount = 0;
+ }
+
+protected:
+ /**
+ * Called when an error is reported.
+ */
+ virtual void handleError(std::string_view msg, Position position) = 0;
+
+private:
+ Position position(int offset) const;
+
+ std::string_view fSource;
+ int fErrorCount = 0;
+};
+
+/**
+ * Error reporter for tests that need an SkSL context; aborts immediately if an error is reported.
+ */
+class TestingOnly_AbortErrorReporter : public ErrorReporter {
+public:
+ void handleError(std::string_view msg, Position pos) override;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/sksl/SkSLOperator.h b/gfx/skia/skia/include/sksl/SkSLOperator.h
new file mode 100644
index 0000000000..1e47dce618
--- /dev/null
+++ b/gfx/skia/skia/include/sksl/SkSLOperator.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_OPERATOR
+#define SKSL_OPERATOR
+
+#include <cstdint>
+#include <string_view>
+
+namespace SkSL {
+
+class Context;
+class Type;
+
+enum class OperatorKind : uint8_t {
+ PLUS,
+ MINUS,
+ STAR,
+ SLASH,
+ PERCENT,
+ SHL,
+ SHR,
+ LOGICALNOT,
+ LOGICALAND,
+ LOGICALOR,
+ LOGICALXOR,
+ BITWISENOT,
+ BITWISEAND,
+ BITWISEOR,
+ BITWISEXOR,
+ EQ,
+ EQEQ,
+ NEQ,
+ LT,
+ GT,
+ LTEQ,
+ GTEQ,
+ PLUSEQ,
+ MINUSEQ,
+ STAREQ,
+ SLASHEQ,
+ PERCENTEQ,
+ SHLEQ,
+ SHREQ,
+ BITWISEANDEQ,
+ BITWISEOREQ,
+ BITWISEXOREQ,
+ PLUSPLUS,
+ MINUSMINUS,
+ COMMA
+};
+
+enum class OperatorPrecedence : uint8_t {
+ kParentheses = 1,
+ kPostfix = 2,
+ kPrefix = 3,
+ kMultiplicative = 4,
+ kAdditive = 5,
+ kShift = 6,
+ kRelational = 7,
+ kEquality = 8,
+ kBitwiseAnd = 9,
+ kBitwiseXor = 10,
+ kBitwiseOr = 11,
+ kLogicalAnd = 12,
+ kLogicalXor = 13,
+ kLogicalOr = 14,
+ kTernary = 15,
+ kAssignment = 16,
+ kSequence = 17,
+ kTopLevel = kSequence
+};
+
+class Operator {
+public:
+ using Kind = OperatorKind;
+
+ Operator(Kind op) : fKind(op) {}
+
+ Kind kind() const { return fKind; }
+
+ bool isEquality() const {
+ return fKind == Kind::EQEQ || fKind == Kind::NEQ;
+ }
+
+ OperatorPrecedence getBinaryPrecedence() const;
+
+ // Returns the operator name surrounded by the expected whitespace for a tidy binary expression.
+ const char* operatorName() const;
+
+ // Returns the operator name without any surrounding whitespace.
+ std::string_view tightOperatorName() const;
+
+ // Returns true if op is '=' or any compound assignment operator ('+=', '-=', etc.)
+ bool isAssignment() const;
+
+ // Given a compound assignment operator, returns the non-assignment version of the operator
+ // (e.g. '+=' becomes '+')
+ Operator removeAssignment() const;
+
+ /**
+ * Defines the set of relational (comparison) operators:
+ * < <= > >=
+ */
+ bool isRelational() const;
+
+ /**
+ * Defines the set of operators which are only valid on integral types:
+ * << <<= >> >>= & &= | |= ^ ^= % %=
+ */
+ bool isOnlyValidForIntegralTypes() const;
+
+ /**
+ * Defines the set of operators which perform vector/matrix math.
+ * + += - -= * *= / /= % %= << <<= >> >>= & &= | |= ^ ^=
+ */
+ bool isValidForMatrixOrVector() const;
+
+ /*
+ * Defines the set of operators allowed by The OpenGL ES Shading Language 1.00, Section 5.1.
+ * The set of illegal (reserved) operators are the ones that only make sense with integral
+ * types. This is not a coincidence: It's because ES2 doesn't require 'int' to be anything but
+ * syntactic sugar for floats with truncation after each operation.
+ */
+ bool isAllowedInStrictES2Mode() const {
+ return !this->isOnlyValidForIntegralTypes();
+ }
+
+ /**
+ * Determines the operand and result types of a binary expression. Returns true if the
+ * expression is legal, false otherwise. If false, the values of the out parameters are
+ * undefined.
+ */
+ bool determineBinaryType(const Context& context,
+ const Type& left,
+ const Type& right,
+ const Type** outLeftType,
+ const Type** outRightType,
+ const Type** outResultType) const;
+
+private:
+ bool isOperator() const;
+ bool isMatrixMultiply(const Type& left, const Type& right) const;
+
+ Kind fKind;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/sksl/SkSLPosition.h b/gfx/skia/skia/include/sksl/SkSLPosition.h
new file mode 100644
index 0000000000..5f8e80a607
--- /dev/null
+++ b/gfx/skia/skia/include/sksl/SkSLPosition.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_POSITION
+#define SKSL_POSITION
+
+#include "include/core/SkTypes.h"
+
+#include <cstdint>
+#include <string_view>
+
+namespace SkSL {
+
+class Position {
+public:
+ Position()
+ : fStartOffset(-1)
+ , fLength(0) {}
+
+ static Position Range(int startOffset, int endOffset) {
+ SkASSERT(startOffset <= endOffset);
+ SkASSERT(startOffset <= 0xFFFFFF);
+ int length = endOffset - startOffset;
+ Position result;
+ result.fStartOffset = startOffset;
+ result.fLength = length <= 0xFF ? length : 0xFF;
+ return result;
+ }
+
+ bool valid() const {
+ return fStartOffset != -1;
+ }
+
+ int line(std::string_view source) const;
+
+ int startOffset() const {
+ SkASSERT(this->valid());
+ return fStartOffset;
+ }
+
+ int endOffset() const {
+ SkASSERT(this->valid());
+ return fStartOffset + fLength;
+ }
+
+ // Returns the position from this through, and including the entirety of, end.
+ Position rangeThrough(Position end) const {
+ if (fStartOffset == -1 || end.fStartOffset == -1) {
+ return *this;
+ }
+ SkASSERTF(this->startOffset() <= end.startOffset() && this->endOffset() <= end.endOffset(),
+ "Invalid range: (%d-%d) - (%d-%d)\n", this->startOffset(), this->endOffset(),
+ end.startOffset(), end.endOffset());
+ return Range(this->startOffset(), end.endOffset());
+ }
+
+ // Returns a position representing the character immediately after this position
+ Position after() const {
+ int endOffset = this->endOffset();
+ return Range(endOffset, endOffset + 1);
+ }
+
+ bool operator==(const Position& other) const {
+ return fStartOffset == other.fStartOffset && fLength == other.fLength;
+ }
+
+ bool operator!=(const Position& other) const {
+ return !(*this == other);
+ }
+
+ bool operator>(const Position& other) const {
+ return fStartOffset > other.fStartOffset;
+ }
+
+ bool operator>=(const Position& other) const {
+ return fStartOffset >= other.fStartOffset;
+ }
+
+ bool operator<(const Position& other) const {
+ return fStartOffset < other.fStartOffset;
+ }
+
+ bool operator<=(const Position& other) const {
+ return fStartOffset <= other.fStartOffset;
+ }
+
+private:
+ int32_t fStartOffset : 24;
+ uint32_t fLength : 8;
+};
+
+struct ForLoopPositions {
+ Position initPosition = Position();
+ Position conditionPosition = Position();
+ Position nextPosition = Position();
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/sksl/SkSLVersion.h b/gfx/skia/skia/include/sksl/SkSLVersion.h
new file mode 100644
index 0000000000..ad059d580e
--- /dev/null
+++ b/gfx/skia/skia/include/sksl/SkSLVersion.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSLVersion_DEFINED
+#define SkSLVersion_DEFINED
+
+namespace SkSL {
+
+enum class Version {
+ /**
+ * Desktop GLSL 1.10, GLSL ES 1.00, WebGL 1.0
+ */
+ k100,
+
+ /**
+ * Desktop GLSL 3.30, GLSL ES 3.00, WebGL 2.0
+ */
+ k300,
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkAnimCodecPlayer.h b/gfx/skia/skia/include/utils/SkAnimCodecPlayer.h
new file mode 100644
index 0000000000..f4729aa37d
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkAnimCodecPlayer.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAnimCodecPlayer_DEFINED
+#define SkAnimCodecPlayer_DEFINED
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSize.h"
+
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+class SkImage;
+
+class SkAnimCodecPlayer {
+public:
+ SkAnimCodecPlayer(std::unique_ptr<SkCodec> codec);
+ ~SkAnimCodecPlayer();
+
+ /**
+ * Returns the current frame of the animation. This defaults to the first frame for
+ * animated codecs (i.e. msec = 0). Calling this multiple times (without calling seek())
+ * will always return the same image object (or null if there was an error).
+ */
+ sk_sp<SkImage> getFrame();
+
+ /**
+ * Return the size of the image(s) that will be returned by getFrame().
+ */
+ SkISize dimensions() const;
+
+ /**
+ * Returns the total duration of the animation in milliseconds. Returns 0 for a single-frame
+ * image.
+ */
+ uint32_t duration() const { return fTotalDuration; }
+
+ /**
+ * Finds the closest frame associated with the time code (in milliseconds) and sets that
+ * to be the current frame (call getFrame() to retrieve that image).
+ * Returns true iff this call to seek() changed the "current frame" for the animation.
+ * Thus if seek() returns false, then getFrame() will return the same image as it did
+ * before this call to seek().
+ */
+ bool seek(uint32_t msec);
+
+
+private:
+ std::unique_ptr<SkCodec> fCodec;
+ SkImageInfo fImageInfo;
+ std::vector<SkCodec::FrameInfo> fFrameInfos;
+ std::vector<sk_sp<SkImage> > fImages;
+ int fCurrIndex = 0;
+ uint32_t fTotalDuration;
+
+ sk_sp<SkImage> getFrameAt(int index);
+};
+
+#endif
+
diff --git a/gfx/skia/skia/include/utils/SkBase64.h b/gfx/skia/skia/include/utils/SkBase64.h
new file mode 100644
index 0000000000..e01028543a
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkBase64.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBase64_DEFINED
+#define SkBase64_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#include <cstddef>
+
+struct SkBase64 {
+public:
+ enum Error {
+ kNoError,
+ kPadError,
+ kBadCharError
+ };
+
+ /**
+ Base64 encodes src into dst.
+
+ Normally this is called once with 'dst' nullptr to get the required size, then again with an
+ allocated 'dst' pointer to do the actual encoding.
+
+ @param dst nullptr or a pointer to a buffer large enough to receive the result
+
+ @param encode nullptr for default encoding or a pointer to at least 65 chars.
+ encode[64] will be used as the pad character.
+ Encodings other than the default encoding cannot be decoded.
+
+ @return the required length of dst for encoding.
+ */
+ static size_t Encode(const void* src, size_t length, void* dst, const char* encode = nullptr);
+
+ /**
+ Base64 decodes src into dst.
+
+ Normally this is called once with 'dst' nullptr to get the required size, then again with an
+ allocated 'dst' pointer to do the actual encoding.
+
+ @param dst nullptr or a pointer to a buffer large enough to receive the result
+
+ @param dstLength assigned the length dst is required to be. Must not be nullptr.
+ */
+ static Error SK_WARN_UNUSED_RESULT Decode(const void* src, size_t srcLength,
+ void* dst, size_t* dstLength);
+};
+
+#endif // SkBase64_DEFINED
diff --git a/gfx/skia/skia/include/utils/SkCamera.h b/gfx/skia/skia/include/utils/SkCamera.h
new file mode 100644
index 0000000000..536691875e
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkCamera.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Inspired by Rob Johnson's most excellent QuickDraw GX sample code
+
+#ifndef SkCamera_DEFINED
+#define SkCamera_DEFINED
+
+#include "include/core/SkM44.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkNoncopyable.h"
+
+// NOTE -- This entire header / impl is deprecated, and will be removed from Skia soon.
+//
+// Skia now has support for a 4x matrix (SkM44) in SkCanvas.
+//
+
+class SkCanvas;
+
+// DEPRECATED
+class SkPatch3D {
+public:
+ SkPatch3D();
+
+ void reset();
+ void transform(const SkM44&, SkPatch3D* dst = nullptr) const;
+
+ // dot a unit vector with the patch's normal
+ SkScalar dotWith(SkScalar dx, SkScalar dy, SkScalar dz) const;
+ SkScalar dotWith(const SkV3& v) const {
+ return this->dotWith(v.x, v.y, v.z);
+ }
+
+ // deprecated, but still here for animator (for now)
+ void rotate(SkScalar /*x*/, SkScalar /*y*/, SkScalar /*z*/) {}
+ void rotateDegrees(SkScalar /*x*/, SkScalar /*y*/, SkScalar /*z*/) {}
+
+private:
+public: // make public for SkDraw3D for now
+ SkV3 fU, fV;
+ SkV3 fOrigin;
+
+ friend class SkCamera3D;
+};
+
+// DEPRECATED
+class SkCamera3D {
+public:
+ SkCamera3D();
+
+ void reset();
+ void update();
+ void patchToMatrix(const SkPatch3D&, SkMatrix* matrix) const;
+
+ SkV3 fLocation; // origin of the camera's space
+ SkV3 fAxis; // view direction
+ SkV3 fZenith; // up direction
+ SkV3 fObserver; // eye position (may not be the same as the origin)
+
+private:
+ mutable SkMatrix fOrientation;
+ mutable bool fNeedToUpdate;
+
+ void doUpdate() const;
+};
+
+// DEPRECATED
+class SK_API Sk3DView : SkNoncopyable {
+public:
+ Sk3DView();
+ ~Sk3DView();
+
+ void save();
+ void restore();
+
+ void translate(SkScalar x, SkScalar y, SkScalar z);
+ void rotateX(SkScalar deg);
+ void rotateY(SkScalar deg);
+ void rotateZ(SkScalar deg);
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ void setCameraLocation(SkScalar x, SkScalar y, SkScalar z);
+ SkScalar getCameraLocationX() const;
+ SkScalar getCameraLocationY() const;
+ SkScalar getCameraLocationZ() const;
+#endif
+
+ void getMatrix(SkMatrix*) const;
+ void applyToCanvas(SkCanvas*) const;
+
+ SkScalar dotWithNormal(SkScalar dx, SkScalar dy, SkScalar dz) const;
+
+private:
+ struct Rec {
+ Rec* fNext;
+ SkM44 fMatrix;
+ };
+ Rec* fRec;
+ Rec fInitialRec;
+ SkCamera3D fCamera;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkCanvasStateUtils.h b/gfx/skia/skia/include/utils/SkCanvasStateUtils.h
new file mode 100644
index 0000000000..0172e37931
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkCanvasStateUtils.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCanvasStateUtils_DEFINED
+#define SkCanvasStateUtils_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#include <memory>
+
+class SkCanvas;
+class SkCanvasState;
+
+/**
+ * A set of functions that are useful for copying the state of an SkCanvas
+ * across a library boundary where the Skia library on the other side of the
+ * boundary may be newer. The expected usage is outline below...
+ *
+ * Lib Boundary
+ * CaptureCanvasState(...) |||
+ * SkCanvas --> SkCanvasState |||
+ * ||| CreateFromCanvasState(...)
+ * ||| SkCanvasState --> SkCanvas`
+ * ||| Draw into SkCanvas`
+ * ||| Unref SkCanvas`
+ * ReleaseCanvasState(...) |||
+ *
+ */
+class SK_API SkCanvasStateUtils {
+public:
+ /**
+ * Captures the current state of the canvas into an opaque ptr that is safe
+ * to pass to a different instance of Skia (which may be the same version,
+ * or may be newer). The function will return NULL in the event that one of the
+ * following conditions are true.
+ * 1) the canvas device type is not supported (currently only raster is supported)
+ * 2) the canvas clip type is not supported (currently only non-AA clips are supported)
+ *
+ * It is recommended that the original canvas also not be used until all
+ * canvases that have been created using its captured state have been dereferenced.
+ *
+ * Finally, it is important to note that any draw filters attached to the
+ * canvas are NOT currently captured.
+ *
+ * @param canvas The canvas you wish to capture the current state of.
+ * @return NULL or an opaque ptr that can be passed to CreateFromCanvasState
+ * to reconstruct the canvas. The caller is responsible for calling
+ * ReleaseCanvasState to free the memory associated with this state.
+ */
+ static SkCanvasState* CaptureCanvasState(SkCanvas* canvas);
+
+ /**
+ * Create a new SkCanvas from the captured state of another SkCanvas. The
+ * function will return NULL in the event that one of the
+ * following conditions are true.
+ * 1) the captured state is in an unrecognized format
+ * 2) the captured canvas device type is not supported
+ *
+ * @param state Opaque object created by CaptureCanvasState.
+ * @return NULL or an SkCanvas* whose devices and matrix/clip state are
+ * identical to the captured canvas. The caller is responsible for
+ * calling unref on the SkCanvas.
+ */
+ static std::unique_ptr<SkCanvas> MakeFromCanvasState(const SkCanvasState* state);
+
+ /**
+ * Free the memory associated with the captured canvas state. The state
+ * should not be released until all SkCanvas objects created using that
+ * state have been dereferenced. Must be called from the same library
+ * instance that created the state via CaptureCanvasState.
+ *
+ * @param state The captured state you wish to dispose of.
+ */
+ static void ReleaseCanvasState(SkCanvasState* state);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkCustomTypeface.h b/gfx/skia/skia/include/utils/SkCustomTypeface.h
new file mode 100644
index 0000000000..d387fb24ca
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkCustomTypeface.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCustomTypeface_DEFINED
+#define SkCustomTypeface_DEFINED
+
+#include "include/core/SkDrawable.h"
+#include "include/core/SkFontMetrics.h"
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypeface.h"
+#include "include/core/SkTypes.h"
+
+#include <memory>
+#include <vector>
+
+class SkStream;
+class SkStreamAsset;
+struct SkFontArguments;
+
+class SK_API SkCustomTypefaceBuilder {
+public:
+ SkCustomTypefaceBuilder();
+
+ void setGlyph(SkGlyphID, float advance, const SkPath&);
+ void setGlyph(SkGlyphID, float advance, sk_sp<SkDrawable>, const SkRect& bounds);
+
+ void setMetrics(const SkFontMetrics& fm, float scale = 1);
+ void setFontStyle(SkFontStyle);
+
+ sk_sp<SkTypeface> detach();
+
+ static constexpr SkTypeface::FactoryId FactoryId = SkSetFourByteTag('u','s','e','r');
+ static sk_sp<SkTypeface> MakeFromStream(std::unique_ptr<SkStreamAsset>, const SkFontArguments&);
+
+private:
+ struct GlyphRec {
+ // logical union
+ SkPath fPath;
+ sk_sp<SkDrawable> fDrawable;
+
+ SkRect fBounds = {0,0,0,0}; // only used for drawable glyphs atm
+ float fAdvance = 0;
+
+ bool isDrawable() const {
+ SkASSERT(!fDrawable || fPath.isEmpty());
+ return fDrawable != nullptr;
+ }
+ };
+
+ std::vector<GlyphRec> fGlyphRecs;
+ SkFontMetrics fMetrics;
+ SkFontStyle fStyle;
+
+ GlyphRec& ensureStorage(SkGlyphID);
+
+ static sk_sp<SkTypeface> Deserialize(SkStream*);
+
+ friend class SkTypeface;
+ friend class SkUserTypeface;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkEventTracer.h b/gfx/skia/skia/include/utils/SkEventTracer.h
new file mode 100644
index 0000000000..2ec0a3b355
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkEventTracer.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2014 Google Inc. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEventTracer_DEFINED
+#define SkEventTracer_DEFINED
+
+// The class in this header defines the interface between Skia's internal
+// tracing macros and an external entity (e.g., Chrome) that will consume them.
+// Such an entity should subclass SkEventTracer and provide an instance of
+// that event to SkEventTracer::SetInstance.
+
+// If you're looking for the tracing macros to instrument Skia itself, those
+// live in src/core/SkTraceEvent.h
+
+#include "include/core/SkTypes.h"
+
+#include <cstdint>
+
+class SK_API SkEventTracer {
+public:
+
+ typedef uint64_t Handle;
+
+ /**
+ * If this is the first call to SetInstance or GetInstance then the passed instance is
+ * installed and true is returned. Otherwise, false is returned. In either case ownership of the
+ * tracer is transferred and it will be deleted when no longer needed.
+ *
+ * Not deleting the tracer on process exit should not cause problems as
+ * the whole heap is about to go away with the process. This can also
+ * improve performance by reducing the amount of work needed.
+ *
+ * @param leakTracer Do not delete tracer on process exit.
+ */
+ static bool SetInstance(SkEventTracer*, bool leakTracer = false);
+
+ /**
+ * Gets the event tracer. If this is the first call to SetInstance or GetIntance then a default
+ * event tracer is installed and returned.
+ */
+ static SkEventTracer* GetInstance();
+
+ virtual ~SkEventTracer() = default;
+
+ // The pointer returned from GetCategoryGroupEnabled() points to a
+ // value with zero or more of the following bits. Used in this class only.
+ // The TRACE_EVENT macros should only use the value as a bool.
+ // These values must be in sync with macro values in trace_event.h in chromium.
+ enum CategoryGroupEnabledFlags {
+ // Category group enabled for the recording mode.
+ kEnabledForRecording_CategoryGroupEnabledFlags = 1 << 0,
+ // Category group enabled for the monitoring mode.
+ kEnabledForMonitoring_CategoryGroupEnabledFlags = 1 << 1,
+ // Category group enabled by SetEventCallbackEnabled().
+ kEnabledForEventCallback_CategoryGroupEnabledFlags = 1 << 2,
+ };
+
+ virtual const uint8_t* getCategoryGroupEnabled(const char* name) = 0;
+ virtual const char* getCategoryGroupName(const uint8_t* categoryEnabledFlag) = 0;
+
+ virtual SkEventTracer::Handle
+ addTraceEvent(char phase,
+ const uint8_t* categoryEnabledFlag,
+ const char* name,
+ uint64_t id,
+ int32_t numArgs,
+ const char** argNames,
+ const uint8_t* argTypes,
+ const uint64_t* argValues,
+ uint8_t flags) = 0;
+
+ virtual void
+ updateTraceEventDuration(const uint8_t* categoryEnabledFlag,
+ const char* name,
+ SkEventTracer::Handle handle) = 0;
+
+ // Optional method that can be implemented to allow splitting up traces into different sections.
+ virtual void newTracingSection(const char*) {}
+
+protected:
+ SkEventTracer() = default;
+ SkEventTracer(const SkEventTracer&) = delete;
+ SkEventTracer& operator=(const SkEventTracer&) = delete;
+};
+
+#endif // SkEventTracer_DEFINED
diff --git a/gfx/skia/skia/include/utils/SkNWayCanvas.h b/gfx/skia/skia/include/utils/SkNWayCanvas.h
new file mode 100644
index 0000000000..87c6916b39
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkNWayCanvas.h
@@ -0,0 +1,133 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNWayCanvas_DEFINED
+#define SkNWayCanvas_DEFINED
+
+#include "include/core/SkCanvasVirtualEnforcer.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkM44.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTDArray.h"
+#include "include/utils/SkNoDrawCanvas.h"
+
+#include <cstddef>
+
+namespace sktext {
+class GlyphRunList;
+}
+
+class SkCanvas;
+class SkData;
+class SkDrawable;
+class SkImage;
+class SkMatrix;
+class SkPaint;
+class SkPath;
+class SkPicture;
+class SkRRect;
+class SkRegion;
+class SkShader;
+class SkTextBlob;
+class SkVertices;
+enum class SkBlendMode;
+enum class SkClipOp;
+struct SkDrawShadowRec;
+struct SkPoint;
+struct SkRSXform;
+struct SkRect;
+
+#if defined(SK_GANESH)
+namespace sktext::gpu {
+class Slug;
+}
+#endif
+
+class SK_API SkNWayCanvas : public SkCanvasVirtualEnforcer<SkNoDrawCanvas> {
+public:
+ SkNWayCanvas(int width, int height);
+ ~SkNWayCanvas() override;
+
+ virtual void addCanvas(SkCanvas*);
+ virtual void removeCanvas(SkCanvas*);
+ virtual void removeAll();
+
+protected:
+ SkTDArray<SkCanvas*> fList;
+
+ void willSave() override;
+ SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec&) override;
+ bool onDoSaveBehind(const SkRect*) override;
+ void willRestore() override;
+
+ void didConcat44(const SkM44&) override;
+ void didSetM44(const SkM44&) override;
+ void didScale(SkScalar, SkScalar) override;
+ void didTranslate(SkScalar, SkScalar) override;
+
+ void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override;
+ void onDrawGlyphRunList(const sktext::GlyphRunList&, const SkPaint&) override;
+ void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) override;
+#if defined(SK_GANESH)
+ void onDrawSlug(const sktext::gpu::Slug* slug) override;
+#endif
+ void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode, const SkPaint& paint) override;
+
+ void onDrawPaint(const SkPaint&) override;
+ void onDrawBehind(const SkPaint&) override;
+ void onDrawPoints(PointMode, size_t count, const SkPoint pts[], const SkPaint&) override;
+ void onDrawRect(const SkRect&, const SkPaint&) override;
+ void onDrawRegion(const SkRegion&, const SkPaint&) override;
+ void onDrawOval(const SkRect&, const SkPaint&) override;
+ void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override;
+ void onDrawRRect(const SkRRect&, const SkPaint&) override;
+ void onDrawPath(const SkPath&, const SkPaint&) override;
+
+ void onDrawImage2(const SkImage*, SkScalar, SkScalar, const SkSamplingOptions&,
+ const SkPaint*) override;
+ void onDrawImageRect2(const SkImage*, const SkRect&, const SkRect&, const SkSamplingOptions&,
+ const SkPaint*, SrcRectConstraint) override;
+ void onDrawImageLattice2(const SkImage*, const Lattice&, const SkRect&, SkFilterMode,
+ const SkPaint*) override;
+ void onDrawAtlas2(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[], int,
+ SkBlendMode, const SkSamplingOptions&, const SkRect*, const SkPaint*) override;
+
+ void onDrawVerticesObject(const SkVertices*, SkBlendMode, const SkPaint&) override;
+ void onDrawShadowRec(const SkPath&, const SkDrawShadowRec&) override;
+
+ void onClipRect(const SkRect&, SkClipOp, ClipEdgeStyle) override;
+ void onClipRRect(const SkRRect&, SkClipOp, ClipEdgeStyle) override;
+ void onClipPath(const SkPath&, SkClipOp, ClipEdgeStyle) override;
+ void onClipShader(sk_sp<SkShader>, SkClipOp) override;
+ void onClipRegion(const SkRegion&, SkClipOp) override;
+ void onResetClip() override;
+
+ void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override;
+ void onDrawDrawable(SkDrawable*, const SkMatrix*) override;
+ void onDrawAnnotation(const SkRect&, const char[], SkData*) override;
+
+ void onDrawEdgeAAQuad(const SkRect&, const SkPoint[4], QuadAAFlags, const SkColor4f&,
+ SkBlendMode) override;
+ void onDrawEdgeAAImageSet2(const ImageSetEntry[], int count, const SkPoint[], const SkMatrix[],
+ const SkSamplingOptions&,const SkPaint*, SrcRectConstraint) override;
+
+ void onFlush() override;
+
+ class Iter;
+
+private:
+ using INHERITED = SkCanvasVirtualEnforcer<SkNoDrawCanvas>;
+};
+
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkNoDrawCanvas.h b/gfx/skia/skia/include/utils/SkNoDrawCanvas.h
new file mode 100644
index 0000000000..3f25638738
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkNoDrawCanvas.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNoDrawCanvas_DEFINED
+#define SkNoDrawCanvas_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkCanvasVirtualEnforcer.h"
+
+struct SkIRect;
+
+// SkNoDrawCanvas is a helper for SkCanvas subclasses which do not need to
+// actually rasterize (e.g., analysis of the draw calls).
+//
+// It provides the following simplifications:
+//
+// * not backed by any device/pixels
+// * conservative clipping (clipping calls only use rectangles)
+//
+class SK_API SkNoDrawCanvas : public SkCanvasVirtualEnforcer<SkCanvas> {
+public:
+ SkNoDrawCanvas(int width, int height);
+ SkNoDrawCanvas(const SkIRect&);
+
+ explicit SkNoDrawCanvas(sk_sp<SkBaseDevice> device);
+
+ // Optimization to reset state to be the same as after construction.
+ void resetCanvas(int w, int h) { this->resetForNextPicture(SkIRect::MakeWH(w, h)); }
+ void resetCanvas(const SkIRect& rect) { this->resetForNextPicture(rect); }
+
+protected:
+ SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec& rec) override;
+ bool onDoSaveBehind(const SkRect*) override;
+
+ // No-op overrides for aborting rasterization earlier than SkNullBlitter.
+ void onDrawAnnotation(const SkRect&, const char[], SkData*) override {}
+ void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override {}
+ void onDrawDrawable(SkDrawable*, const SkMatrix*) override {}
+ void onDrawTextBlob(const SkTextBlob*, SkScalar, SkScalar, const SkPaint&) override {}
+ void onDrawPatch(const SkPoint[12], const SkColor[4], const SkPoint[4], SkBlendMode,
+ const SkPaint&) override {}
+
+ void onDrawPaint(const SkPaint&) override {}
+ void onDrawBehind(const SkPaint&) override {}
+ void onDrawPoints(PointMode, size_t, const SkPoint[], const SkPaint&) override {}
+ void onDrawRect(const SkRect&, const SkPaint&) override {}
+ void onDrawRegion(const SkRegion&, const SkPaint&) override {}
+ void onDrawOval(const SkRect&, const SkPaint&) override {}
+ void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override {}
+ void onDrawRRect(const SkRRect&, const SkPaint&) override {}
+ void onDrawPath(const SkPath&, const SkPaint&) override {}
+
+ void onDrawImage2(const SkImage*, SkScalar, SkScalar, const SkSamplingOptions&,
+ const SkPaint*) override {}
+ void onDrawImageRect2(const SkImage*, const SkRect&, const SkRect&, const SkSamplingOptions&,
+ const SkPaint*, SrcRectConstraint) override {}
+ void onDrawImageLattice2(const SkImage*, const Lattice&, const SkRect&, SkFilterMode,
+ const SkPaint*) override {}
+ void onDrawAtlas2(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[], int,
+ SkBlendMode, const SkSamplingOptions&, const SkRect*, const SkPaint*) override {}
+
+ void onDrawVerticesObject(const SkVertices*, SkBlendMode, const SkPaint&) override {}
+ void onDrawShadowRec(const SkPath&, const SkDrawShadowRec&) override {}
+ void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override {}
+
+ void onDrawEdgeAAQuad(const SkRect&, const SkPoint[4], QuadAAFlags, const SkColor4f&,
+ SkBlendMode) override {}
+ void onDrawEdgeAAImageSet2(const ImageSetEntry[], int, const SkPoint[], const SkMatrix[],
+ const SkSamplingOptions&, const SkPaint*,
+ SrcRectConstraint) override {}
+
+private:
+ using INHERITED = SkCanvasVirtualEnforcer<SkCanvas>;
+};
+
+#endif // SkNoDrawCanvas_DEFINED
diff --git a/gfx/skia/skia/include/utils/SkNullCanvas.h b/gfx/skia/skia/include/utils/SkNullCanvas.h
new file mode 100644
index 0000000000..a77e3e3de9
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkNullCanvas.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNullCanvas_DEFINED
+#define SkNullCanvas_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#include <memory>
+
+class SkCanvas;
+
+/**
+ * Creates a canvas that draws nothing. This is useful for performance testing.
+ */
+SK_API std::unique_ptr<SkCanvas> SkMakeNullCanvas();
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkOrderedFontMgr.h b/gfx/skia/skia/include/utils/SkOrderedFontMgr.h
new file mode 100644
index 0000000000..a03bfdb541
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkOrderedFontMgr.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOrderedFontMgr_DEFINED
+#define SkOrderedFontMgr_DEFINED
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+#include <memory>
+#include <vector>
+
+class SkData;
+class SkFontStyle;
+class SkStreamAsset;
+class SkString;
+class SkTypeface;
+struct SkFontArguments;
+
+/**
+ * Collects an order list of other font managers, and visits them in order
+ * when a request to find or match is issued.
+ *
+ * Note: this explicitly fails on any attempt to Make a typeface: all of
+ * those requests will return null.
+ */
+class SK_API SkOrderedFontMgr : public SkFontMgr {
+public:
+ SkOrderedFontMgr();
+ ~SkOrderedFontMgr() override;
+
+ void append(sk_sp<SkFontMgr>);
+
+protected:
+ int onCountFamilies() const override;
+ void onGetFamilyName(int index, SkString* familyName) const override;
+ SkFontStyleSet* onCreateStyleSet(int index)const override;
+
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override;
+
+ SkTypeface* onMatchFamilyStyle(const char familyName[], const SkFontStyle&) const override;
+ SkTypeface* onMatchFamilyStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const override;
+
+ // Note: all of these always return null
+ sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData>, int ttcIndex) const override;
+ sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset>,
+ int ttcIndex) const override;
+ sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset>,
+ const SkFontArguments&) const override;
+ sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const override;
+
+ sk_sp<SkTypeface> onLegacyMakeTypeface(const char familyName[], SkFontStyle) const override;
+
+private:
+ std::vector<sk_sp<SkFontMgr>> fList;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkPaintFilterCanvas.h b/gfx/skia/skia/include/utils/SkPaintFilterCanvas.h
new file mode 100644
index 0000000000..9a836bc7c2
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkPaintFilterCanvas.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPaintFilterCanvas_DEFINED
+#define SkPaintFilterCanvas_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkCanvasVirtualEnforcer.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTDArray.h"
+#include "include/utils/SkNWayCanvas.h"
+
+#include <cstddef>
+
+namespace sktext {
+class GlyphRunList;
+}
+
+class GrRecordingContext;
+class SkData;
+class SkDrawable;
+class SkImage;
+class SkMatrix;
+class SkPaint;
+class SkPath;
+class SkPicture;
+class SkPixmap;
+class SkRRect;
+class SkRegion;
+class SkSurface;
+class SkSurfaceProps;
+class SkTextBlob;
+class SkVertices;
+enum class SkBlendMode;
+struct SkDrawShadowRec;
+struct SkPoint;
+struct SkRSXform;
+struct SkRect;
+
+/** \class SkPaintFilterCanvas
+
+ A utility proxy base class for implementing draw/paint filters.
+*/
+class SK_API SkPaintFilterCanvas : public SkCanvasVirtualEnforcer<SkNWayCanvas> {
+public:
+ /**
+ * The new SkPaintFilterCanvas is configured for forwarding to the
+ * specified canvas. Also copies the target canvas matrix and clip bounds.
+ */
+ SkPaintFilterCanvas(SkCanvas* canvas);
+
+ enum Type {
+ kPicture_Type,
+ };
+
+ // Forwarded to the wrapped canvas.
+ SkISize getBaseLayerSize() const override { return proxy()->getBaseLayerSize(); }
+ GrRecordingContext* recordingContext() override { return proxy()->recordingContext(); }
+
+protected:
+ /**
+ * Called with the paint that will be used to draw the specified type.
+ * The implementation may modify the paint as they wish.
+ *
+ * The result bool is used to determine whether the draw op is to be
+ * executed (true) or skipped (false).
+ *
+ * Note: The base implementation calls onFilter() for top-level/explicit paints only.
+ * To also filter encapsulated paints (e.g. SkPicture, SkTextBlob), clients may need to
+ * override the relevant methods (i.e. drawPicture, drawTextBlob).
+ */
+ virtual bool onFilter(SkPaint& paint) const = 0;
+
+ void onDrawPaint(const SkPaint&) override;
+ void onDrawBehind(const SkPaint&) override;
+ void onDrawPoints(PointMode, size_t count, const SkPoint pts[], const SkPaint&) override;
+ void onDrawRect(const SkRect&, const SkPaint&) override;
+ void onDrawRRect(const SkRRect&, const SkPaint&) override;
+ void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override;
+ void onDrawRegion(const SkRegion&, const SkPaint&) override;
+ void onDrawOval(const SkRect&, const SkPaint&) override;
+ void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override;
+ void onDrawPath(const SkPath&, const SkPaint&) override;
+
+ void onDrawImage2(const SkImage*, SkScalar, SkScalar, const SkSamplingOptions&,
+ const SkPaint*) override;
+ void onDrawImageRect2(const SkImage*, const SkRect&, const SkRect&, const SkSamplingOptions&,
+ const SkPaint*, SrcRectConstraint) override;
+ void onDrawImageLattice2(const SkImage*, const Lattice&, const SkRect&, SkFilterMode,
+ const SkPaint*) override;
+ void onDrawAtlas2(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[], int,
+ SkBlendMode, const SkSamplingOptions&, const SkRect*, const SkPaint*) override;
+
+ void onDrawVerticesObject(const SkVertices*, SkBlendMode, const SkPaint&) override;
+ void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode,
+ const SkPaint& paint) override;
+ void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override;
+ void onDrawDrawable(SkDrawable*, const SkMatrix*) override;
+
+ void onDrawGlyphRunList(const sktext::GlyphRunList&, const SkPaint&) override;
+ void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) override;
+ void onDrawAnnotation(const SkRect& rect, const char key[], SkData* value) override;
+ void onDrawShadowRec(const SkPath& path, const SkDrawShadowRec& rec) override;
+
+ void onDrawEdgeAAQuad(const SkRect&, const SkPoint[4], QuadAAFlags, const SkColor4f&,
+ SkBlendMode) override;
+ void onDrawEdgeAAImageSet2(const ImageSetEntry[], int count, const SkPoint[], const SkMatrix[],
+ const SkSamplingOptions&,const SkPaint*, SrcRectConstraint) override;
+
+ // Forwarded to the wrapped canvas.
+ sk_sp<SkSurface> onNewSurface(const SkImageInfo&, const SkSurfaceProps&) override;
+ bool onPeekPixels(SkPixmap* pixmap) override;
+ bool onAccessTopLayerPixels(SkPixmap* pixmap) override;
+ SkImageInfo onImageInfo() const override;
+ bool onGetProps(SkSurfaceProps* props, bool top) const override;
+
+private:
+ class AutoPaintFilter;
+
+ SkCanvas* proxy() const { SkASSERT(fList.size() == 1); return fList[0]; }
+
+ SkPaintFilterCanvas* internal_private_asPaintFilterCanvas() const override {
+ return const_cast<SkPaintFilterCanvas*>(this);
+ }
+
+ friend class SkAndroidFrameworkUtils;
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkParse.h b/gfx/skia/skia/include/utils/SkParse.h
new file mode 100644
index 0000000000..bcabc3c793
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkParse.h
@@ -0,0 +1,37 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkParse_DEFINED
+#define SkParse_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+#include <cstddef>
+#include <cstdint>
+
+class SK_API SkParse {
+public:
+ static int Count(const char str[]); // number of scalars or int values
+ static int Count(const char str[], char separator);
+ static const char* FindColor(const char str[], SkColor* value);
+ static const char* FindHex(const char str[], uint32_t* value);
+ static const char* FindMSec(const char str[], SkMSec* value);
+ static const char* FindNamedColor(const char str[], size_t len, SkColor* color);
+ static const char* FindS32(const char str[], int32_t* value);
+ static const char* FindScalar(const char str[], SkScalar* value);
+ static const char* FindScalars(const char str[], SkScalar value[], int count);
+
+ static bool FindBool(const char str[], bool* value);
+ // return the index of str in list[], or -1 if not found
+ static int FindList(const char str[], const char list[]);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkParsePath.h b/gfx/skia/skia/include/utils/SkParsePath.h
new file mode 100644
index 0000000000..acd0ef2305
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkParsePath.h
@@ -0,0 +1,25 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkParsePath_DEFINED
+#define SkParsePath_DEFINED
+
+#include "include/core/SkPath.h"
+
+class SkString;
+
+class SK_API SkParsePath {
+public:
+ static bool FromSVGString(const char str[], SkPath*);
+
+ enum class PathEncoding { Absolute, Relative };
+ static SkString ToSVGString(const SkPath&, PathEncoding = PathEncoding::Absolute);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkShadowUtils.h b/gfx/skia/skia/include/utils/SkShadowUtils.h
new file mode 100644
index 0000000000..b7c43d569f
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkShadowUtils.h
@@ -0,0 +1,88 @@
+
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkShadowUtils_DEFINED
+#define SkShadowUtils_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkShadowFlags.h"
+
+#include <cstdint>
+
+class SkCanvas;
+class SkMatrix;
+class SkPath;
+struct SkPoint3;
+struct SkRect;
+
+class SK_API SkShadowUtils {
+public:
+ /**
+ * Draw an offset spot shadow and outlining ambient shadow for the given path using a disc
+ * light. The shadow may be cached, depending on the path type and canvas matrix. If the
+ * matrix is perspective or the path is volatile, it will not be cached.
+ *
+ * @param canvas The canvas on which to draw the shadows.
+ * @param path The occluder used to generate the shadows.
+ * @param zPlaneParams Values for the plane function which returns the Z offset of the
+ * occluder from the canvas based on local x and y values (the current matrix is not applied).
+ * @param lightPos Generally, the 3D position of the light relative to the canvas plane.
+ * If kDirectionalLight_ShadowFlag is set, this specifies a vector pointing
+ * towards the light.
+ * @param lightRadius Generally, the radius of the disc light.
+ * If DirectionalLight_ShadowFlag is set, this specifies the amount of
+ * blur when the occluder is at Z offset == 1. The blur will grow linearly
+ * as the Z value increases.
+ * @param ambientColor The color of the ambient shadow.
+ * @param spotColor The color of the spot shadow.
+ * @param flags Options controlling opaque occluder optimizations, shadow appearance,
+ * and light position. See SkShadowFlags.
+ */
+ static void DrawShadow(SkCanvas* canvas, const SkPath& path, const SkPoint3& zPlaneParams,
+ const SkPoint3& lightPos, SkScalar lightRadius,
+ SkColor ambientColor, SkColor spotColor,
+ uint32_t flags = SkShadowFlags::kNone_ShadowFlag);
+
+ /**
+ * Generate bounding box for shadows relative to path. Includes both the ambient and spot
+ * shadow bounds.
+ *
+ * @param ctm Current transformation matrix to device space.
+ * @param path The occluder used to generate the shadows.
+ * @param zPlaneParams Values for the plane function which returns the Z offset of the
+ * occluder from the canvas based on local x and y values (the current matrix is not applied).
+ * @param lightPos Generally, the 3D position of the light relative to the canvas plane.
+ * If kDirectionalLight_ShadowFlag is set, this specifies a vector pointing
+ * towards the light.
+ * @param lightRadius Generally, the radius of the disc light.
+ * If DirectionalLight_ShadowFlag is set, this specifies the amount of
+ * blur when the occluder is at Z offset == 1. The blur will grow linearly
+ * as the Z value increases.
+ * @param flags Options controlling opaque occluder optimizations, shadow appearance,
+ * and light position. See SkShadowFlags.
+ * @param bounds Return value for shadow bounding box.
+ * @return Returns true if successful, false otherwise.
+ */
+ static bool GetLocalBounds(const SkMatrix& ctm, const SkPath& path,
+ const SkPoint3& zPlaneParams, const SkPoint3& lightPos,
+ SkScalar lightRadius, uint32_t flags, SkRect* bounds);
+
+ /**
+ * Helper routine to compute color values for one-pass tonal alpha.
+ *
+ * @param inAmbientColor Original ambient color
+ * @param inSpotColor Original spot color
+ * @param outAmbientColor Modified ambient color
+ * @param outSpotColor Modified spot color
+ */
+ static void ComputeTonalColors(SkColor inAmbientColor, SkColor inSpotColor,
+ SkColor* outAmbientColor, SkColor* outSpotColor);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkTextUtils.h b/gfx/skia/skia/include/utils/SkTextUtils.h
new file mode 100644
index 0000000000..b73ab771e8
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkTextUtils.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTextUtils_DEFINED
+#define SkTextUtils_DEFINED
+
+#include "include/core/SkFontTypes.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+#include <cstddef>
+#include <string>
+
+class SkCanvas;
+class SkFont;
+class SkPaint;
+class SkPath;
+
+class SK_API SkTextUtils {
+public:
+ enum Align {
+ kLeft_Align,
+ kCenter_Align,
+ kRight_Align,
+ };
+
+ static void Draw(SkCanvas*, const void* text, size_t size, SkTextEncoding,
+ SkScalar x, SkScalar y, const SkFont&, const SkPaint&, Align = kLeft_Align);
+
+ static void DrawString(SkCanvas* canvas, const char text[], SkScalar x, SkScalar y,
+ const SkFont& font, const SkPaint& paint, Align align = kLeft_Align) {
+ Draw(canvas, text, strlen(text), SkTextEncoding::kUTF8, x, y, font, paint, align);
+ }
+
+ static void GetPath(const void* text, size_t length, SkTextEncoding, SkScalar x, SkScalar y,
+ const SkFont&, SkPath*);
+};
+
+#endif
diff --git a/gfx/skia/skia/include/utils/SkTraceEventPhase.h b/gfx/skia/skia/include/utils/SkTraceEventPhase.h
new file mode 100644
index 0000000000..38457be24b
--- /dev/null
+++ b/gfx/skia/skia/include/utils/SkTraceEventPhase.h
@@ -0,0 +1,19 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef SkTraceEventPhase_DEFINED
+#define SkTraceEventPhase_DEFINED
+
+// Phase indicates the nature of an event entry. E.g. part of a begin/end pair.
+#define TRACE_EVENT_PHASE_BEGIN ('B')
+#define TRACE_EVENT_PHASE_END ('E')
+#define TRACE_EVENT_PHASE_COMPLETE ('X')
+#define TRACE_EVENT_PHASE_INSTANT ('I')
+#define TRACE_EVENT_PHASE_ASYNC_BEGIN ('S')
+#define TRACE_EVENT_PHASE_ASYNC_END ('F')
+#define TRACE_EVENT_PHASE_COUNTER ('C')
+#define TRACE_EVENT_PHASE_CREATE_OBJECT ('N')
+#define TRACE_EVENT_PHASE_SNAPSHOT_OBJECT ('O')
+#define TRACE_EVENT_PHASE_DELETE_OBJECT ('D')
+
+#endif // SkTraceEventPhase_DEFINED
diff --git a/gfx/skia/skia/include/utils/mac/SkCGUtils.h b/gfx/skia/skia/include/utils/mac/SkCGUtils.h
new file mode 100644
index 0000000000..a320dd8d4c
--- /dev/null
+++ b/gfx/skia/skia/include/utils/mac/SkCGUtils.h
@@ -0,0 +1,78 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkCGUtils_DEFINED
+#define SkCGUtils_DEFINED
+
+#include "include/core/SkImage.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkSize.h"
+
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#ifdef SK_BUILD_FOR_MAC
+#include <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreGraphics/CoreGraphics.h>
+#endif
+
+class SkBitmap;
+class SkData;
+class SkPixmap;
+class SkStreamRewindable;
+
+SK_API CGContextRef SkCreateCGContext(const SkPixmap&);
+
+/**
+ * Given a CGImage, allocate an SkBitmap and copy the image's pixels into it. If scaleToFit is not
+ * null, use it to determine the size of the bitmap, and scale the image to fill the bitmap.
+ * Otherwise use the image's width/height.
+ *
+ * On failure, return false, and leave bitmap unchanged.
+ */
+SK_API bool SkCreateBitmapFromCGImage(SkBitmap* dst, CGImageRef src);
+
+SK_API sk_sp<SkImage> SkMakeImageFromCGImage(CGImageRef);
+
+/**
+ * Copy the pixels from src into the memory specified by info/rowBytes/dstPixels. On failure,
+ * return false (e.g. ImageInfo incompatible with src).
+ */
+SK_API bool SkCopyPixelsFromCGImage(const SkImageInfo& info, size_t rowBytes, void* dstPixels,
+ CGImageRef src);
+static inline bool SkCopyPixelsFromCGImage(const SkPixmap& dst, CGImageRef src) {
+ return SkCopyPixelsFromCGImage(dst.info(), dst.rowBytes(), dst.writable_addr(), src);
+}
+
+/**
+ * Create an imageref from the specified bitmap using the specified colorspace.
+ * If space is NULL, then CGColorSpaceCreateDeviceRGB() is used.
+ */
+SK_API CGImageRef SkCreateCGImageRefWithColorspace(const SkBitmap& bm,
+ CGColorSpaceRef space);
+
+/**
+ * Create an imageref from the specified bitmap using the colorspace returned
+ * by CGColorSpaceCreateDeviceRGB()
+ */
+static inline CGImageRef SkCreateCGImageRef(const SkBitmap& bm) {
+ return SkCreateCGImageRefWithColorspace(bm, nil);
+}
+
+/**
+ * Draw the bitmap into the specified CG context. The bitmap will be converted
+ * to a CGImage using the generic RGB colorspace. (x,y) specifies the position
+ * of the top-left corner of the bitmap. The bitmap is converted using the
+ * colorspace returned by CGColorSpaceCreateDeviceRGB()
+ */
+void SkCGDrawBitmap(CGContextRef, const SkBitmap&, float x, float y);
+
+#endif // defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+#endif // SkCGUtils_DEFINED
diff --git a/gfx/skia/skia/modules/skcms/README.chromium b/gfx/skia/skia/modules/skcms/README.chromium
new file mode 100644
index 0000000000..046f6b1d19
--- /dev/null
+++ b/gfx/skia/skia/modules/skcms/README.chromium
@@ -0,0 +1,5 @@
+Name: skcms
+URL: https://skia.org/
+Version: unknown
+Security Critical: yes
+License: BSD
diff --git a/gfx/skia/skia/modules/skcms/skcms.cc b/gfx/skia/skia/modules/skcms/skcms.cc
new file mode 100644
index 0000000000..246c08af94
--- /dev/null
+++ b/gfx/skia/skia/modules/skcms/skcms.cc
@@ -0,0 +1,3064 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "skcms.h"
+#include "skcms_internal.h"
+#include <assert.h>
+#include <float.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+
+#if defined(__ARM_NEON)
+ #include <arm_neon.h>
+#elif defined(__SSE__)
+ #include <immintrin.h>
+
+ #if defined(__clang__)
+ // That #include <immintrin.h> is usually enough, but Clang's headers
+ // "helpfully" skip including the whole kitchen sink when _MSC_VER is
+ // defined, because lots of programs on Windows would include that and
+ // it'd be a lot slower. But we want all those headers included so we
+ // can use their features after runtime checks later.
+ #include <smmintrin.h>
+ #include <avxintrin.h>
+ #include <avx2intrin.h>
+ #include <avx512fintrin.h>
+ #include <avx512dqintrin.h>
+ #endif
+#endif
+
+static bool runtime_cpu_detection = true;
+void skcms_DisableRuntimeCPUDetection() {
+ runtime_cpu_detection = false;
+}
+
+// sizeof(x) will return size_t, which is 32-bit on some machines and 64-bit on others.
+// We have better testing on 64-bit machines, so force 32-bit machines to behave like 64-bit.
+//
+// Please do not use sizeof() directly, and size_t only when required.
+// (We have no way of enforcing these requests...)
+#define SAFE_SIZEOF(x) ((uint64_t)sizeof(x))
+
+// Same sort of thing for _Layout structs with a variable sized array at the end (named "variable").
+#define SAFE_FIXED_SIZE(type) ((uint64_t)offsetof(type, variable))
+
+static const union {
+ uint32_t bits;
+ float f;
+} inf_ = { 0x7f800000 };
+#define INFINITY_ inf_.f
+
+#if defined(__clang__) || defined(__GNUC__)
+ #define small_memcpy __builtin_memcpy
+#else
+ #define small_memcpy memcpy
+#endif
+
+static float log2f_(float x) {
+ // The first approximation of log2(x) is its exponent 'e', minus 127.
+ int32_t bits;
+ small_memcpy(&bits, &x, sizeof(bits));
+
+ float e = (float)bits * (1.0f / (1<<23));
+
+ // If we use the mantissa too we can refine the error signficantly.
+ int32_t m_bits = (bits & 0x007fffff) | 0x3f000000;
+ float m;
+ small_memcpy(&m, &m_bits, sizeof(m));
+
+ return (e - 124.225514990f
+ - 1.498030302f*m
+ - 1.725879990f/(0.3520887068f + m));
+}
+static float logf_(float x) {
+ const float ln2 = 0.69314718f;
+ return ln2*log2f_(x);
+}
+
+static float exp2f_(float x) {
+ float fract = x - floorf_(x);
+
+ float fbits = (1.0f * (1<<23)) * (x + 121.274057500f
+ - 1.490129070f*fract
+ + 27.728023300f/(4.84252568f - fract));
+
+ // Before we cast fbits to int32_t, check for out of range values to pacify UBSAN.
+ // INT_MAX is not exactly representable as a float, so exclude it as effectively infinite.
+ // Negative values are effectively underflow - we'll end up returning a (different) negative
+ // value, which makes no sense. So clamp to zero.
+ if (fbits >= (float)INT_MAX) {
+ return INFINITY_;
+ } else if (fbits < 0) {
+ return 0;
+ }
+
+ int32_t bits = (int32_t)fbits;
+ small_memcpy(&x, &bits, sizeof(x));
+ return x;
+}
+
+// Not static, as it's used by some test tools.
+float powf_(float x, float y) {
+ assert (x >= 0);
+ return (x == 0) || (x == 1) ? x
+ : exp2f_(log2f_(x) * y);
+}
+
+static float expf_(float x) {
+ const float log2_e = 1.4426950408889634074f;
+ return exp2f_(log2_e * x);
+}
+
+static float fmaxf_(float x, float y) { return x > y ? x : y; }
+static float fminf_(float x, float y) { return x < y ? x : y; }
+
+static bool isfinitef_(float x) { return 0 == x*0; }
+
+static float minus_1_ulp(float x) {
+ int32_t bits;
+ memcpy(&bits, &x, sizeof(bits));
+ bits = bits - 1;
+ memcpy(&x, &bits, sizeof(bits));
+ return x;
+}
+
+// Most transfer functions we work with are sRGBish.
+// For exotic HDR transfer functions, we encode them using a tf.g that makes no sense,
+// and repurpose the other fields to hold the parameters of the HDR functions.
+struct TF_PQish { float A,B,C,D,E,F; };
+struct TF_HLGish { float R,G,a,b,c,K_minus_1; };
+// We didn't originally support a scale factor K for HLG, and instead just stored 0 in
+// the unused `f` field of skcms_TransferFunction for HLGish and HLGInvish transfer functions.
+// By storing f=K-1, those old unusued f=0 values now mean K=1, a noop scale factor.
+
+static float TFKind_marker(skcms_TFType kind) {
+ // We'd use different NaNs, but those aren't guaranteed to be preserved by WASM.
+ return -(float)kind;
+}
+
+static skcms_TFType classify(const skcms_TransferFunction& tf, TF_PQish* pq = nullptr
+ , TF_HLGish* hlg = nullptr) {
+ if (tf.g < 0 && static_cast<float>(static_cast<int>(tf.g)) == tf.g) {
+ // TODO: soundness checks for PQ/HLG like we do for sRGBish?
+ switch ((int)tf.g) {
+ case -skcms_TFType_PQish:
+ if (pq) {
+ memcpy(pq , &tf.a, sizeof(*pq ));
+ }
+ return skcms_TFType_PQish;
+ case -skcms_TFType_HLGish:
+ if (hlg) {
+ memcpy(hlg, &tf.a, sizeof(*hlg));
+ }
+ return skcms_TFType_HLGish;
+ case -skcms_TFType_HLGinvish:
+ if (hlg) {
+ memcpy(hlg, &tf.a, sizeof(*hlg));
+ }
+ return skcms_TFType_HLGinvish;
+ }
+ return skcms_TFType_Invalid;
+ }
+
+ // Basic soundness checks for sRGBish transfer functions.
+ if (isfinitef_(tf.a + tf.b + tf.c + tf.d + tf.e + tf.f + tf.g)
+ // a,c,d,g should be non-negative to make any sense.
+ && tf.a >= 0
+ && tf.c >= 0
+ && tf.d >= 0
+ && tf.g >= 0
+ // Raising a negative value to a fractional tf->g produces complex numbers.
+ && tf.a * tf.d + tf.b >= 0) {
+ return skcms_TFType_sRGBish;
+ }
+
+ return skcms_TFType_Invalid;
+}
+
+skcms_TFType skcms_TransferFunction_getType(const skcms_TransferFunction* tf) {
+ return classify(*tf);
+}
+bool skcms_TransferFunction_isSRGBish(const skcms_TransferFunction* tf) {
+ return classify(*tf) == skcms_TFType_sRGBish;
+}
+bool skcms_TransferFunction_isPQish(const skcms_TransferFunction* tf) {
+ return classify(*tf) == skcms_TFType_PQish;
+}
+bool skcms_TransferFunction_isHLGish(const skcms_TransferFunction* tf) {
+ return classify(*tf) == skcms_TFType_HLGish;
+}
+
+bool skcms_TransferFunction_makePQish(skcms_TransferFunction* tf,
+ float A, float B, float C,
+ float D, float E, float F) {
+ *tf = { TFKind_marker(skcms_TFType_PQish), A,B,C,D,E,F };
+ assert(skcms_TransferFunction_isPQish(tf));
+ return true;
+}
+
+bool skcms_TransferFunction_makeScaledHLGish(skcms_TransferFunction* tf,
+ float K, float R, float G,
+ float a, float b, float c) {
+ *tf = { TFKind_marker(skcms_TFType_HLGish), R,G, a,b,c, K-1.0f };
+ assert(skcms_TransferFunction_isHLGish(tf));
+ return true;
+}
+
+float skcms_TransferFunction_eval(const skcms_TransferFunction* tf, float x) {
+ float sign = x < 0 ? -1.0f : 1.0f;
+ x *= sign;
+
+ TF_PQish pq;
+ TF_HLGish hlg;
+ switch (classify(*tf, &pq, &hlg)) {
+ case skcms_TFType_Invalid: break;
+
+ case skcms_TFType_HLGish: {
+ const float K = hlg.K_minus_1 + 1.0f;
+ return K * sign * (x*hlg.R <= 1 ? powf_(x*hlg.R, hlg.G)
+ : expf_((x-hlg.c)*hlg.a) + hlg.b);
+ }
+
+ // skcms_TransferFunction_invert() inverts R, G, and a for HLGinvish so this math is fast.
+ case skcms_TFType_HLGinvish: {
+ const float K = hlg.K_minus_1 + 1.0f;
+ x /= K;
+ return sign * (x <= 1 ? hlg.R * powf_(x, hlg.G)
+ : hlg.a * logf_(x - hlg.b) + hlg.c);
+ }
+
+ case skcms_TFType_sRGBish:
+ return sign * (x < tf->d ? tf->c * x + tf->f
+ : powf_(tf->a * x + tf->b, tf->g) + tf->e);
+
+ case skcms_TFType_PQish: return sign * powf_(fmaxf_(pq.A + pq.B * powf_(x, pq.C), 0)
+ / (pq.D + pq.E * powf_(x, pq.C)),
+ pq.F);
+ }
+ return 0;
+}
+
+
+static float eval_curve(const skcms_Curve* curve, float x) {
+ if (curve->table_entries == 0) {
+ return skcms_TransferFunction_eval(&curve->parametric, x);
+ }
+
+ float ix = fmaxf_(0, fminf_(x, 1)) * static_cast<float>(curve->table_entries - 1);
+ int lo = (int) ix ,
+ hi = (int)(float)minus_1_ulp(ix + 1.0f);
+ float t = ix - (float)lo;
+
+ float l, h;
+ if (curve->table_8) {
+ l = curve->table_8[lo] * (1/255.0f);
+ h = curve->table_8[hi] * (1/255.0f);
+ } else {
+ uint16_t be_l, be_h;
+ memcpy(&be_l, curve->table_16 + 2*lo, 2);
+ memcpy(&be_h, curve->table_16 + 2*hi, 2);
+ uint16_t le_l = ((be_l << 8) | (be_l >> 8)) & 0xffff;
+ uint16_t le_h = ((be_h << 8) | (be_h >> 8)) & 0xffff;
+ l = le_l * (1/65535.0f);
+ h = le_h * (1/65535.0f);
+ }
+ return l + (h-l)*t;
+}
+
+float skcms_MaxRoundtripError(const skcms_Curve* curve, const skcms_TransferFunction* inv_tf) {
+ uint32_t N = curve->table_entries > 256 ? curve->table_entries : 256;
+ const float dx = 1.0f / static_cast<float>(N - 1);
+ float err = 0;
+ for (uint32_t i = 0; i < N; i++) {
+ float x = static_cast<float>(i) * dx,
+ y = eval_curve(curve, x);
+ err = fmaxf_(err, fabsf_(x - skcms_TransferFunction_eval(inv_tf, y)));
+ }
+ return err;
+}
+
+bool skcms_AreApproximateInverses(const skcms_Curve* curve, const skcms_TransferFunction* inv_tf) {
+ return skcms_MaxRoundtripError(curve, inv_tf) < (1/512.0f);
+}
+
+// Additional ICC signature values that are only used internally
+enum {
+ // File signature
+ skcms_Signature_acsp = 0x61637370,
+
+ // Tag signatures
+ skcms_Signature_rTRC = 0x72545243,
+ skcms_Signature_gTRC = 0x67545243,
+ skcms_Signature_bTRC = 0x62545243,
+ skcms_Signature_kTRC = 0x6B545243,
+
+ skcms_Signature_rXYZ = 0x7258595A,
+ skcms_Signature_gXYZ = 0x6758595A,
+ skcms_Signature_bXYZ = 0x6258595A,
+
+ skcms_Signature_A2B0 = 0x41324230,
+ skcms_Signature_B2A0 = 0x42324130,
+
+ skcms_Signature_CHAD = 0x63686164,
+ skcms_Signature_WTPT = 0x77747074,
+
+ skcms_Signature_CICP = 0x63696370,
+
+ // Type signatures
+ skcms_Signature_curv = 0x63757276,
+ skcms_Signature_mft1 = 0x6D667431,
+ skcms_Signature_mft2 = 0x6D667432,
+ skcms_Signature_mAB = 0x6D414220,
+ skcms_Signature_mBA = 0x6D424120,
+ skcms_Signature_para = 0x70617261,
+ skcms_Signature_sf32 = 0x73663332,
+ // XYZ is also a PCS signature, so it's defined in skcms.h
+ // skcms_Signature_XYZ = 0x58595A20,
+};
+
+static uint16_t read_big_u16(const uint8_t* ptr) {
+ uint16_t be;
+ memcpy(&be, ptr, sizeof(be));
+#if defined(_MSC_VER)
+ return _byteswap_ushort(be);
+#else
+ return __builtin_bswap16(be);
+#endif
+}
+
+static uint32_t read_big_u32(const uint8_t* ptr) {
+ uint32_t be;
+ memcpy(&be, ptr, sizeof(be));
+#if defined(_MSC_VER)
+ return _byteswap_ulong(be);
+#else
+ return __builtin_bswap32(be);
+#endif
+}
+
+static int32_t read_big_i32(const uint8_t* ptr) {
+ return (int32_t)read_big_u32(ptr);
+}
+
+static float read_big_fixed(const uint8_t* ptr) {
+ return static_cast<float>(read_big_i32(ptr)) * (1.0f / 65536.0f);
+}
+
+// Maps to an in-memory profile so that fields line up to the locations specified
+// in ICC.1:2010, section 7.2
+typedef struct {
+ uint8_t size [ 4];
+ uint8_t cmm_type [ 4];
+ uint8_t version [ 4];
+ uint8_t profile_class [ 4];
+ uint8_t data_color_space [ 4];
+ uint8_t pcs [ 4];
+ uint8_t creation_date_time [12];
+ uint8_t signature [ 4];
+ uint8_t platform [ 4];
+ uint8_t flags [ 4];
+ uint8_t device_manufacturer [ 4];
+ uint8_t device_model [ 4];
+ uint8_t device_attributes [ 8];
+ uint8_t rendering_intent [ 4];
+ uint8_t illuminant_X [ 4];
+ uint8_t illuminant_Y [ 4];
+ uint8_t illuminant_Z [ 4];
+ uint8_t creator [ 4];
+ uint8_t profile_id [16];
+ uint8_t reserved [28];
+ uint8_t tag_count [ 4]; // Technically not part of header, but required
+} header_Layout;
+
+typedef struct {
+ uint8_t signature [4];
+ uint8_t offset [4];
+ uint8_t size [4];
+} tag_Layout;
+
+static const tag_Layout* get_tag_table(const skcms_ICCProfile* profile) {
+ return (const tag_Layout*)(profile->buffer + SAFE_SIZEOF(header_Layout));
+}
+
+// s15Fixed16ArrayType is technically variable sized, holding N values. However, the only valid
+// use of the type is for the CHAD tag that stores exactly nine values.
+typedef struct {
+ uint8_t type [ 4];
+ uint8_t reserved [ 4];
+ uint8_t values [36];
+} sf32_Layout;
+
+bool skcms_GetCHAD(const skcms_ICCProfile* profile, skcms_Matrix3x3* m) {
+ skcms_ICCTag tag;
+ if (!skcms_GetTagBySignature(profile, skcms_Signature_CHAD, &tag)) {
+ return false;
+ }
+
+ if (tag.type != skcms_Signature_sf32 || tag.size < SAFE_SIZEOF(sf32_Layout)) {
+ return false;
+ }
+
+ const sf32_Layout* sf32Tag = (const sf32_Layout*)tag.buf;
+ const uint8_t* values = sf32Tag->values;
+ for (int r = 0; r < 3; ++r)
+ for (int c = 0; c < 3; ++c, values += 4) {
+ m->vals[r][c] = read_big_fixed(values);
+ }
+ return true;
+}
+
+// XYZType is technically variable sized, holding N XYZ triples. However, the only valid uses of
+// the type are for tags/data that store exactly one triple.
+typedef struct {
+ uint8_t type [4];
+ uint8_t reserved [4];
+ uint8_t X [4];
+ uint8_t Y [4];
+ uint8_t Z [4];
+} XYZ_Layout;
+
+static bool read_tag_xyz(const skcms_ICCTag* tag, float* x, float* y, float* z) {
+ if (tag->type != skcms_Signature_XYZ || tag->size < SAFE_SIZEOF(XYZ_Layout)) {
+ return false;
+ }
+
+ const XYZ_Layout* xyzTag = (const XYZ_Layout*)tag->buf;
+
+ *x = read_big_fixed(xyzTag->X);
+ *y = read_big_fixed(xyzTag->Y);
+ *z = read_big_fixed(xyzTag->Z);
+ return true;
+}
+
+bool skcms_GetWTPT(const skcms_ICCProfile* profile, float xyz[3]) {
+ skcms_ICCTag tag;
+ return skcms_GetTagBySignature(profile, skcms_Signature_WTPT, &tag) &&
+ read_tag_xyz(&tag, &xyz[0], &xyz[1], &xyz[2]);
+}
+
+static bool read_to_XYZD50(const skcms_ICCTag* rXYZ, const skcms_ICCTag* gXYZ,
+ const skcms_ICCTag* bXYZ, skcms_Matrix3x3* toXYZ) {
+ return read_tag_xyz(rXYZ, &toXYZ->vals[0][0], &toXYZ->vals[1][0], &toXYZ->vals[2][0]) &&
+ read_tag_xyz(gXYZ, &toXYZ->vals[0][1], &toXYZ->vals[1][1], &toXYZ->vals[2][1]) &&
+ read_tag_xyz(bXYZ, &toXYZ->vals[0][2], &toXYZ->vals[1][2], &toXYZ->vals[2][2]);
+}
+
+typedef struct {
+ uint8_t type [4];
+ uint8_t reserved_a [4];
+ uint8_t function_type [2];
+ uint8_t reserved_b [2];
+ uint8_t variable [1/*variable*/]; // 1, 3, 4, 5, or 7 s15.16, depending on function_type
+} para_Layout;
+
+static bool read_curve_para(const uint8_t* buf, uint32_t size,
+ skcms_Curve* curve, uint32_t* curve_size) {
+ if (size < SAFE_FIXED_SIZE(para_Layout)) {
+ return false;
+ }
+
+ const para_Layout* paraTag = (const para_Layout*)buf;
+
+ enum { kG = 0, kGAB = 1, kGABC = 2, kGABCD = 3, kGABCDEF = 4 };
+ uint16_t function_type = read_big_u16(paraTag->function_type);
+ if (function_type > kGABCDEF) {
+ return false;
+ }
+
+ static const uint32_t curve_bytes[] = { 4, 12, 16, 20, 28 };
+ if (size < SAFE_FIXED_SIZE(para_Layout) + curve_bytes[function_type]) {
+ return false;
+ }
+
+ if (curve_size) {
+ *curve_size = SAFE_FIXED_SIZE(para_Layout) + curve_bytes[function_type];
+ }
+
+ curve->table_entries = 0;
+ curve->parametric.a = 1.0f;
+ curve->parametric.b = 0.0f;
+ curve->parametric.c = 0.0f;
+ curve->parametric.d = 0.0f;
+ curve->parametric.e = 0.0f;
+ curve->parametric.f = 0.0f;
+ curve->parametric.g = read_big_fixed(paraTag->variable);
+
+ switch (function_type) {
+ case kGAB:
+ curve->parametric.a = read_big_fixed(paraTag->variable + 4);
+ curve->parametric.b = read_big_fixed(paraTag->variable + 8);
+ if (curve->parametric.a == 0) {
+ return false;
+ }
+ curve->parametric.d = -curve->parametric.b / curve->parametric.a;
+ break;
+ case kGABC:
+ curve->parametric.a = read_big_fixed(paraTag->variable + 4);
+ curve->parametric.b = read_big_fixed(paraTag->variable + 8);
+ curve->parametric.e = read_big_fixed(paraTag->variable + 12);
+ if (curve->parametric.a == 0) {
+ return false;
+ }
+ curve->parametric.d = -curve->parametric.b / curve->parametric.a;
+ curve->parametric.f = curve->parametric.e;
+ break;
+ case kGABCD:
+ curve->parametric.a = read_big_fixed(paraTag->variable + 4);
+ curve->parametric.b = read_big_fixed(paraTag->variable + 8);
+ curve->parametric.c = read_big_fixed(paraTag->variable + 12);
+ curve->parametric.d = read_big_fixed(paraTag->variable + 16);
+ break;
+ case kGABCDEF:
+ curve->parametric.a = read_big_fixed(paraTag->variable + 4);
+ curve->parametric.b = read_big_fixed(paraTag->variable + 8);
+ curve->parametric.c = read_big_fixed(paraTag->variable + 12);
+ curve->parametric.d = read_big_fixed(paraTag->variable + 16);
+ curve->parametric.e = read_big_fixed(paraTag->variable + 20);
+ curve->parametric.f = read_big_fixed(paraTag->variable + 24);
+ break;
+ }
+ return skcms_TransferFunction_isSRGBish(&curve->parametric);
+}
+
+typedef struct {
+ uint8_t type [4];
+ uint8_t reserved [4];
+ uint8_t value_count [4];
+ uint8_t variable [1/*variable*/]; // value_count, 8.8 if 1, uint16 (n*65535) if > 1
+} curv_Layout;
+
+static bool read_curve_curv(const uint8_t* buf, uint32_t size,
+ skcms_Curve* curve, uint32_t* curve_size) {
+ if (size < SAFE_FIXED_SIZE(curv_Layout)) {
+ return false;
+ }
+
+ const curv_Layout* curvTag = (const curv_Layout*)buf;
+
+ uint32_t value_count = read_big_u32(curvTag->value_count);
+ if (size < SAFE_FIXED_SIZE(curv_Layout) + value_count * SAFE_SIZEOF(uint16_t)) {
+ return false;
+ }
+
+ if (curve_size) {
+ *curve_size = SAFE_FIXED_SIZE(curv_Layout) + value_count * SAFE_SIZEOF(uint16_t);
+ }
+
+ if (value_count < 2) {
+ curve->table_entries = 0;
+ curve->parametric.a = 1.0f;
+ curve->parametric.b = 0.0f;
+ curve->parametric.c = 0.0f;
+ curve->parametric.d = 0.0f;
+ curve->parametric.e = 0.0f;
+ curve->parametric.f = 0.0f;
+ if (value_count == 0) {
+ // Empty tables are a shorthand for an identity curve
+ curve->parametric.g = 1.0f;
+ } else {
+ // Single entry tables are a shorthand for simple gamma
+ curve->parametric.g = read_big_u16(curvTag->variable) * (1.0f / 256.0f);
+ }
+ } else {
+ curve->table_8 = nullptr;
+ curve->table_16 = curvTag->variable;
+ curve->table_entries = value_count;
+ }
+
+ return true;
+}
+
+// Parses both curveType and parametricCurveType data. Ensures that at most 'size' bytes are read.
+// If curve_size is not nullptr, writes the number of bytes used by the curve in (*curve_size).
+static bool read_curve(const uint8_t* buf, uint32_t size,
+ skcms_Curve* curve, uint32_t* curve_size) {
+ if (!buf || size < 4 || !curve) {
+ return false;
+ }
+
+ uint32_t type = read_big_u32(buf);
+ if (type == skcms_Signature_para) {
+ return read_curve_para(buf, size, curve, curve_size);
+ } else if (type == skcms_Signature_curv) {
+ return read_curve_curv(buf, size, curve, curve_size);
+ }
+
+ return false;
+}
+
+// mft1 and mft2 share a large chunk of data
+typedef struct {
+ uint8_t type [ 4];
+ uint8_t reserved_a [ 4];
+ uint8_t input_channels [ 1];
+ uint8_t output_channels [ 1];
+ uint8_t grid_points [ 1];
+ uint8_t reserved_b [ 1];
+ uint8_t matrix [36];
+} mft_CommonLayout;
+
+typedef struct {
+ mft_CommonLayout common [1];
+
+ uint8_t variable [1/*variable*/];
+} mft1_Layout;
+
+typedef struct {
+ mft_CommonLayout common [1];
+
+ uint8_t input_table_entries [2];
+ uint8_t output_table_entries [2];
+ uint8_t variable [1/*variable*/];
+} mft2_Layout;
+
+static bool read_mft_common(const mft_CommonLayout* mftTag, skcms_A2B* a2b) {
+ // MFT matrices are applied before the first set of curves, but must be identity unless the
+ // input is PCSXYZ. We don't support PCSXYZ profiles, so we ignore this matrix. Note that the
+ // matrix in skcms_A2B is applied later in the pipe, so supporting this would require another
+ // field/flag.
+ a2b->matrix_channels = 0;
+ a2b-> input_channels = mftTag-> input_channels[0];
+ a2b->output_channels = mftTag->output_channels[0];
+
+ // We require exactly three (ie XYZ/Lab/RGB) output channels
+ if (a2b->output_channels != ARRAY_COUNT(a2b->output_curves)) {
+ return false;
+ }
+ // We require at least one, and no more than four (ie CMYK) input channels
+ if (a2b->input_channels < 1 || a2b->input_channels > ARRAY_COUNT(a2b->input_curves)) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < a2b->input_channels; ++i) {
+ a2b->grid_points[i] = mftTag->grid_points[0];
+ }
+ // The grid only makes sense with at least two points along each axis
+ if (a2b->grid_points[0] < 2) {
+ return false;
+ }
+ return true;
+}
+
+// All as the A2B version above, except where noted.
+static bool read_mft_common(const mft_CommonLayout* mftTag, skcms_B2A* b2a) {
+ // Same as A2B.
+ b2a->matrix_channels = 0;
+ b2a-> input_channels = mftTag-> input_channels[0];
+ b2a->output_channels = mftTag->output_channels[0];
+
+
+ // For B2A, exactly 3 input channels (XYZ) and 3 (RGB) or 4 (CMYK) output channels.
+ if (b2a->input_channels != ARRAY_COUNT(b2a->input_curves)) {
+ return false;
+ }
+ if (b2a->output_channels < 3 || b2a->output_channels > ARRAY_COUNT(b2a->output_curves)) {
+ return false;
+ }
+
+ // Same as A2B.
+ for (uint32_t i = 0; i < b2a->input_channels; ++i) {
+ b2a->grid_points[i] = mftTag->grid_points[0];
+ }
+ if (b2a->grid_points[0] < 2) {
+ return false;
+ }
+ return true;
+}
+
+template <typename A2B_or_B2A>
+static bool init_tables(const uint8_t* table_base, uint64_t max_tables_len, uint32_t byte_width,
+ uint32_t input_table_entries, uint32_t output_table_entries,
+ A2B_or_B2A* out) {
+ // byte_width is 1 or 2, [input|output]_table_entries are in [2, 4096], so no overflow
+ uint32_t byte_len_per_input_table = input_table_entries * byte_width;
+ uint32_t byte_len_per_output_table = output_table_entries * byte_width;
+
+ // [input|output]_channels are <= 4, so still no overflow
+ uint32_t byte_len_all_input_tables = out->input_channels * byte_len_per_input_table;
+ uint32_t byte_len_all_output_tables = out->output_channels * byte_len_per_output_table;
+
+ uint64_t grid_size = out->output_channels * byte_width;
+ for (uint32_t axis = 0; axis < out->input_channels; ++axis) {
+ grid_size *= out->grid_points[axis];
+ }
+
+ if (max_tables_len < byte_len_all_input_tables + grid_size + byte_len_all_output_tables) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < out->input_channels; ++i) {
+ out->input_curves[i].table_entries = input_table_entries;
+ if (byte_width == 1) {
+ out->input_curves[i].table_8 = table_base + i * byte_len_per_input_table;
+ out->input_curves[i].table_16 = nullptr;
+ } else {
+ out->input_curves[i].table_8 = nullptr;
+ out->input_curves[i].table_16 = table_base + i * byte_len_per_input_table;
+ }
+ }
+
+ if (byte_width == 1) {
+ out->grid_8 = table_base + byte_len_all_input_tables;
+ out->grid_16 = nullptr;
+ } else {
+ out->grid_8 = nullptr;
+ out->grid_16 = table_base + byte_len_all_input_tables;
+ }
+
+ const uint8_t* output_table_base = table_base + byte_len_all_input_tables + grid_size;
+ for (uint32_t i = 0; i < out->output_channels; ++i) {
+ out->output_curves[i].table_entries = output_table_entries;
+ if (byte_width == 1) {
+ out->output_curves[i].table_8 = output_table_base + i * byte_len_per_output_table;
+ out->output_curves[i].table_16 = nullptr;
+ } else {
+ out->output_curves[i].table_8 = nullptr;
+ out->output_curves[i].table_16 = output_table_base + i * byte_len_per_output_table;
+ }
+ }
+
+ return true;
+}
+
+template <typename A2B_or_B2A>
+static bool read_tag_mft1(const skcms_ICCTag* tag, A2B_or_B2A* out) {
+ if (tag->size < SAFE_FIXED_SIZE(mft1_Layout)) {
+ return false;
+ }
+
+ const mft1_Layout* mftTag = (const mft1_Layout*)tag->buf;
+ if (!read_mft_common(mftTag->common, out)) {
+ return false;
+ }
+
+ uint32_t input_table_entries = 256;
+ uint32_t output_table_entries = 256;
+
+ return init_tables(mftTag->variable, tag->size - SAFE_FIXED_SIZE(mft1_Layout), 1,
+ input_table_entries, output_table_entries, out);
+}
+
+template <typename A2B_or_B2A>
+static bool read_tag_mft2(const skcms_ICCTag* tag, A2B_or_B2A* out) {
+ if (tag->size < SAFE_FIXED_SIZE(mft2_Layout)) {
+ return false;
+ }
+
+ const mft2_Layout* mftTag = (const mft2_Layout*)tag->buf;
+ if (!read_mft_common(mftTag->common, out)) {
+ return false;
+ }
+
+ uint32_t input_table_entries = read_big_u16(mftTag->input_table_entries);
+ uint32_t output_table_entries = read_big_u16(mftTag->output_table_entries);
+
+ // ICC spec mandates that 2 <= table_entries <= 4096
+ if (input_table_entries < 2 || input_table_entries > 4096 ||
+ output_table_entries < 2 || output_table_entries > 4096) {
+ return false;
+ }
+
+ return init_tables(mftTag->variable, tag->size - SAFE_FIXED_SIZE(mft2_Layout), 2,
+ input_table_entries, output_table_entries, out);
+}
+
+static bool read_curves(const uint8_t* buf, uint32_t size, uint32_t curve_offset,
+ uint32_t num_curves, skcms_Curve* curves) {
+ for (uint32_t i = 0; i < num_curves; ++i) {
+ if (curve_offset > size) {
+ return false;
+ }
+
+ uint32_t curve_bytes;
+ if (!read_curve(buf + curve_offset, size - curve_offset, &curves[i], &curve_bytes)) {
+ return false;
+ }
+
+ if (curve_bytes > UINT32_MAX - 3) {
+ return false;
+ }
+ curve_bytes = (curve_bytes + 3) & ~3U;
+
+ uint64_t new_offset_64 = (uint64_t)curve_offset + curve_bytes;
+ curve_offset = (uint32_t)new_offset_64;
+ if (new_offset_64 != curve_offset) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// mAB and mBA tags use the same encoding, including color lookup tables.
+typedef struct {
+ uint8_t type [ 4];
+ uint8_t reserved_a [ 4];
+ uint8_t input_channels [ 1];
+ uint8_t output_channels [ 1];
+ uint8_t reserved_b [ 2];
+ uint8_t b_curve_offset [ 4];
+ uint8_t matrix_offset [ 4];
+ uint8_t m_curve_offset [ 4];
+ uint8_t clut_offset [ 4];
+ uint8_t a_curve_offset [ 4];
+} mAB_or_mBA_Layout;
+
+typedef struct {
+ uint8_t grid_points [16];
+ uint8_t grid_byte_width [ 1];
+ uint8_t reserved [ 3];
+ uint8_t variable [1/*variable*/];
+} CLUT_Layout;
+
+static bool read_tag_mab(const skcms_ICCTag* tag, skcms_A2B* a2b, bool pcs_is_xyz) {
+ if (tag->size < SAFE_SIZEOF(mAB_or_mBA_Layout)) {
+ return false;
+ }
+
+ const mAB_or_mBA_Layout* mABTag = (const mAB_or_mBA_Layout*)tag->buf;
+
+ a2b->input_channels = mABTag->input_channels[0];
+ a2b->output_channels = mABTag->output_channels[0];
+
+ // We require exactly three (ie XYZ/Lab/RGB) output channels
+ if (a2b->output_channels != ARRAY_COUNT(a2b->output_curves)) {
+ return false;
+ }
+ // We require no more than four (ie CMYK) input channels
+ if (a2b->input_channels > ARRAY_COUNT(a2b->input_curves)) {
+ return false;
+ }
+
+ uint32_t b_curve_offset = read_big_u32(mABTag->b_curve_offset);
+ uint32_t matrix_offset = read_big_u32(mABTag->matrix_offset);
+ uint32_t m_curve_offset = read_big_u32(mABTag->m_curve_offset);
+ uint32_t clut_offset = read_big_u32(mABTag->clut_offset);
+ uint32_t a_curve_offset = read_big_u32(mABTag->a_curve_offset);
+
+ // "B" curves must be present
+ if (0 == b_curve_offset) {
+ return false;
+ }
+
+ if (!read_curves(tag->buf, tag->size, b_curve_offset, a2b->output_channels,
+ a2b->output_curves)) {
+ return false;
+ }
+
+ // "M" curves and Matrix must be used together
+ if (0 != m_curve_offset) {
+ if (0 == matrix_offset) {
+ return false;
+ }
+ a2b->matrix_channels = a2b->output_channels;
+ if (!read_curves(tag->buf, tag->size, m_curve_offset, a2b->matrix_channels,
+ a2b->matrix_curves)) {
+ return false;
+ }
+
+ // Read matrix, which is stored as a row-major 3x3, followed by the fourth column
+ if (tag->size < matrix_offset + 12 * SAFE_SIZEOF(uint32_t)) {
+ return false;
+ }
+ float encoding_factor = pcs_is_xyz ? (65535 / 32768.0f) : 1.0f;
+ const uint8_t* mtx_buf = tag->buf + matrix_offset;
+ a2b->matrix.vals[0][0] = encoding_factor * read_big_fixed(mtx_buf + 0);
+ a2b->matrix.vals[0][1] = encoding_factor * read_big_fixed(mtx_buf + 4);
+ a2b->matrix.vals[0][2] = encoding_factor * read_big_fixed(mtx_buf + 8);
+ a2b->matrix.vals[1][0] = encoding_factor * read_big_fixed(mtx_buf + 12);
+ a2b->matrix.vals[1][1] = encoding_factor * read_big_fixed(mtx_buf + 16);
+ a2b->matrix.vals[1][2] = encoding_factor * read_big_fixed(mtx_buf + 20);
+ a2b->matrix.vals[2][0] = encoding_factor * read_big_fixed(mtx_buf + 24);
+ a2b->matrix.vals[2][1] = encoding_factor * read_big_fixed(mtx_buf + 28);
+ a2b->matrix.vals[2][2] = encoding_factor * read_big_fixed(mtx_buf + 32);
+ a2b->matrix.vals[0][3] = encoding_factor * read_big_fixed(mtx_buf + 36);
+ a2b->matrix.vals[1][3] = encoding_factor * read_big_fixed(mtx_buf + 40);
+ a2b->matrix.vals[2][3] = encoding_factor * read_big_fixed(mtx_buf + 44);
+ } else {
+ if (0 != matrix_offset) {
+ return false;
+ }
+ a2b->matrix_channels = 0;
+ }
+
+ // "A" curves and CLUT must be used together
+ if (0 != a_curve_offset) {
+ if (0 == clut_offset) {
+ return false;
+ }
+ if (!read_curves(tag->buf, tag->size, a_curve_offset, a2b->input_channels,
+ a2b->input_curves)) {
+ return false;
+ }
+
+ if (tag->size < clut_offset + SAFE_FIXED_SIZE(CLUT_Layout)) {
+ return false;
+ }
+ const CLUT_Layout* clut = (const CLUT_Layout*)(tag->buf + clut_offset);
+
+ if (clut->grid_byte_width[0] == 1) {
+ a2b->grid_8 = clut->variable;
+ a2b->grid_16 = nullptr;
+ } else if (clut->grid_byte_width[0] == 2) {
+ a2b->grid_8 = nullptr;
+ a2b->grid_16 = clut->variable;
+ } else {
+ return false;
+ }
+
+ uint64_t grid_size = a2b->output_channels * clut->grid_byte_width[0]; // the payload
+ for (uint32_t i = 0; i < a2b->input_channels; ++i) {
+ a2b->grid_points[i] = clut->grid_points[i];
+ // The grid only makes sense with at least two points along each axis
+ if (a2b->grid_points[i] < 2) {
+ return false;
+ }
+ grid_size *= a2b->grid_points[i];
+ }
+ if (tag->size < clut_offset + SAFE_FIXED_SIZE(CLUT_Layout) + grid_size) {
+ return false;
+ }
+ } else {
+ if (0 != clut_offset) {
+ return false;
+ }
+
+ // If there is no CLUT, the number of input and output channels must match
+ if (a2b->input_channels != a2b->output_channels) {
+ return false;
+ }
+
+ // Zero out the number of input channels to signal that we're skipping this stage
+ a2b->input_channels = 0;
+ }
+
+ return true;
+}
+
+// Exactly the same as read_tag_mab(), except where there are comments.
+// TODO: refactor the two to eliminate common code?
+static bool read_tag_mba(const skcms_ICCTag* tag, skcms_B2A* b2a, bool pcs_is_xyz) {
+ if (tag->size < SAFE_SIZEOF(mAB_or_mBA_Layout)) {
+ return false;
+ }
+
+ const mAB_or_mBA_Layout* mBATag = (const mAB_or_mBA_Layout*)tag->buf;
+
+ b2a->input_channels = mBATag->input_channels[0];
+ b2a->output_channels = mBATag->output_channels[0];
+
+ // Require exactly 3 inputs (XYZ) and 3 (RGB) or 4 (CMYK) outputs.
+ if (b2a->input_channels != ARRAY_COUNT(b2a->input_curves)) {
+ return false;
+ }
+ if (b2a->output_channels < 3 || b2a->output_channels > ARRAY_COUNT(b2a->output_curves)) {
+ return false;
+ }
+
+ uint32_t b_curve_offset = read_big_u32(mBATag->b_curve_offset);
+ uint32_t matrix_offset = read_big_u32(mBATag->matrix_offset);
+ uint32_t m_curve_offset = read_big_u32(mBATag->m_curve_offset);
+ uint32_t clut_offset = read_big_u32(mBATag->clut_offset);
+ uint32_t a_curve_offset = read_big_u32(mBATag->a_curve_offset);
+
+ if (0 == b_curve_offset) {
+ return false;
+ }
+
+ // "B" curves are our inputs, not outputs.
+ if (!read_curves(tag->buf, tag->size, b_curve_offset, b2a->input_channels,
+ b2a->input_curves)) {
+ return false;
+ }
+
+ if (0 != m_curve_offset) {
+ if (0 == matrix_offset) {
+ return false;
+ }
+ // Matrix channels is tied to input_channels (3), not output_channels.
+ b2a->matrix_channels = b2a->input_channels;
+
+ if (!read_curves(tag->buf, tag->size, m_curve_offset, b2a->matrix_channels,
+ b2a->matrix_curves)) {
+ return false;
+ }
+
+ if (tag->size < matrix_offset + 12 * SAFE_SIZEOF(uint32_t)) {
+ return false;
+ }
+ float encoding_factor = pcs_is_xyz ? (32768 / 65535.0f) : 1.0f; // TODO: understand
+ const uint8_t* mtx_buf = tag->buf + matrix_offset;
+ b2a->matrix.vals[0][0] = encoding_factor * read_big_fixed(mtx_buf + 0);
+ b2a->matrix.vals[0][1] = encoding_factor * read_big_fixed(mtx_buf + 4);
+ b2a->matrix.vals[0][2] = encoding_factor * read_big_fixed(mtx_buf + 8);
+ b2a->matrix.vals[1][0] = encoding_factor * read_big_fixed(mtx_buf + 12);
+ b2a->matrix.vals[1][1] = encoding_factor * read_big_fixed(mtx_buf + 16);
+ b2a->matrix.vals[1][2] = encoding_factor * read_big_fixed(mtx_buf + 20);
+ b2a->matrix.vals[2][0] = encoding_factor * read_big_fixed(mtx_buf + 24);
+ b2a->matrix.vals[2][1] = encoding_factor * read_big_fixed(mtx_buf + 28);
+ b2a->matrix.vals[2][2] = encoding_factor * read_big_fixed(mtx_buf + 32);
+ b2a->matrix.vals[0][3] = encoding_factor * read_big_fixed(mtx_buf + 36);
+ b2a->matrix.vals[1][3] = encoding_factor * read_big_fixed(mtx_buf + 40);
+ b2a->matrix.vals[2][3] = encoding_factor * read_big_fixed(mtx_buf + 44);
+ } else {
+ if (0 != matrix_offset) {
+ return false;
+ }
+ b2a->matrix_channels = 0;
+ }
+
+ if (0 != a_curve_offset) {
+ if (0 == clut_offset) {
+ return false;
+ }
+
+ // "A" curves are our output, not input.
+ if (!read_curves(tag->buf, tag->size, a_curve_offset, b2a->output_channels,
+ b2a->output_curves)) {
+ return false;
+ }
+
+ if (tag->size < clut_offset + SAFE_FIXED_SIZE(CLUT_Layout)) {
+ return false;
+ }
+ const CLUT_Layout* clut = (const CLUT_Layout*)(tag->buf + clut_offset);
+
+ if (clut->grid_byte_width[0] == 1) {
+ b2a->grid_8 = clut->variable;
+ b2a->grid_16 = nullptr;
+ } else if (clut->grid_byte_width[0] == 2) {
+ b2a->grid_8 = nullptr;
+ b2a->grid_16 = clut->variable;
+ } else {
+ return false;
+ }
+
+ uint64_t grid_size = b2a->output_channels * clut->grid_byte_width[0];
+ for (uint32_t i = 0; i < b2a->input_channels; ++i) {
+ b2a->grid_points[i] = clut->grid_points[i];
+ if (b2a->grid_points[i] < 2) {
+ return false;
+ }
+ grid_size *= b2a->grid_points[i];
+ }
+ if (tag->size < clut_offset + SAFE_FIXED_SIZE(CLUT_Layout) + grid_size) {
+ return false;
+ }
+ } else {
+ if (0 != clut_offset) {
+ return false;
+ }
+
+ if (b2a->input_channels != b2a->output_channels) {
+ return false;
+ }
+
+ // Zero out *output* channels to skip this stage.
+ b2a->output_channels = 0;
+ }
+ return true;
+}
+
+// If you pass f, we'll fit a possibly-non-zero value for *f.
+// If you pass nullptr, we'll assume you want *f to be treated as zero.
+static int fit_linear(const skcms_Curve* curve, int N, float tol,
+ float* c, float* d, float* f = nullptr) {
+ assert(N > 1);
+ // We iteratively fit the first points to the TF's linear piece.
+ // We want the cx + f line to pass through the first and last points we fit exactly.
+ //
+ // As we walk along the points we find the minimum and maximum slope of the line before the
+ // error would exceed our tolerance. We stop when the range [slope_min, slope_max] becomes
+ // emtpy, when we definitely can't add any more points.
+ //
+ // Some points' error intervals may intersect the running interval but not lie fully
+ // within it. So we keep track of the last point we saw that is a valid end point candidate,
+ // and once the search is done, back up to build the line through *that* point.
+ const float dx = 1.0f / static_cast<float>(N - 1);
+
+ int lin_points = 1;
+
+ float f_zero = 0.0f;
+ if (f) {
+ *f = eval_curve(curve, 0);
+ } else {
+ f = &f_zero;
+ }
+
+
+ float slope_min = -INFINITY_;
+ float slope_max = +INFINITY_;
+ for (int i = 1; i < N; ++i) {
+ float x = static_cast<float>(i) * dx;
+ float y = eval_curve(curve, x);
+
+ float slope_max_i = (y + tol - *f) / x,
+ slope_min_i = (y - tol - *f) / x;
+ if (slope_max_i < slope_min || slope_max < slope_min_i) {
+ // Slope intervals would no longer overlap.
+ break;
+ }
+ slope_max = fminf_(slope_max, slope_max_i);
+ slope_min = fmaxf_(slope_min, slope_min_i);
+
+ float cur_slope = (y - *f) / x;
+ if (slope_min <= cur_slope && cur_slope <= slope_max) {
+ lin_points = i + 1;
+ *c = cur_slope;
+ }
+ }
+
+ // Set D to the last point that met our tolerance.
+ *d = static_cast<float>(lin_points - 1) * dx;
+ return lin_points;
+}
+
+// If this skcms_Curve holds an identity table, rewrite it as an identity skcms_TransferFunction.
+static void canonicalize_identity(skcms_Curve* curve) {
+ if (curve->table_entries && curve->table_entries <= (uint32_t)INT_MAX) {
+ int N = (int)curve->table_entries;
+
+ float c = 0.0f, d = 0.0f, f = 0.0f;
+ if (N == fit_linear(curve, N, 1.0f/static_cast<float>(2*N), &c,&d,&f)
+ && c == 1.0f
+ && f == 0.0f) {
+ curve->table_entries = 0;
+ curve->table_8 = nullptr;
+ curve->table_16 = nullptr;
+ curve->parametric = skcms_TransferFunction{1,1,0,0,0,0,0};
+ }
+ }
+}
+
+static bool read_a2b(const skcms_ICCTag* tag, skcms_A2B* a2b, bool pcs_is_xyz) {
+ bool ok = false;
+ if (tag->type == skcms_Signature_mft1) { ok = read_tag_mft1(tag, a2b); }
+ if (tag->type == skcms_Signature_mft2) { ok = read_tag_mft2(tag, a2b); }
+ if (tag->type == skcms_Signature_mAB ) { ok = read_tag_mab(tag, a2b, pcs_is_xyz); }
+ if (!ok) {
+ return false;
+ }
+
+ if (a2b->input_channels > 0) { canonicalize_identity(a2b->input_curves + 0); }
+ if (a2b->input_channels > 1) { canonicalize_identity(a2b->input_curves + 1); }
+ if (a2b->input_channels > 2) { canonicalize_identity(a2b->input_curves + 2); }
+ if (a2b->input_channels > 3) { canonicalize_identity(a2b->input_curves + 3); }
+
+ if (a2b->matrix_channels > 0) { canonicalize_identity(a2b->matrix_curves + 0); }
+ if (a2b->matrix_channels > 1) { canonicalize_identity(a2b->matrix_curves + 1); }
+ if (a2b->matrix_channels > 2) { canonicalize_identity(a2b->matrix_curves + 2); }
+
+ if (a2b->output_channels > 0) { canonicalize_identity(a2b->output_curves + 0); }
+ if (a2b->output_channels > 1) { canonicalize_identity(a2b->output_curves + 1); }
+ if (a2b->output_channels > 2) { canonicalize_identity(a2b->output_curves + 2); }
+
+ return true;
+}
+
+static bool read_b2a(const skcms_ICCTag* tag, skcms_B2A* b2a, bool pcs_is_xyz) {
+ bool ok = false;
+ if (tag->type == skcms_Signature_mft1) { ok = read_tag_mft1(tag, b2a); }
+ if (tag->type == skcms_Signature_mft2) { ok = read_tag_mft2(tag, b2a); }
+ if (tag->type == skcms_Signature_mBA ) { ok = read_tag_mba(tag, b2a, pcs_is_xyz); }
+ if (!ok) {
+ return false;
+ }
+
+ if (b2a->input_channels > 0) { canonicalize_identity(b2a->input_curves + 0); }
+ if (b2a->input_channels > 1) { canonicalize_identity(b2a->input_curves + 1); }
+ if (b2a->input_channels > 2) { canonicalize_identity(b2a->input_curves + 2); }
+
+ if (b2a->matrix_channels > 0) { canonicalize_identity(b2a->matrix_curves + 0); }
+ if (b2a->matrix_channels > 1) { canonicalize_identity(b2a->matrix_curves + 1); }
+ if (b2a->matrix_channels > 2) { canonicalize_identity(b2a->matrix_curves + 2); }
+
+ if (b2a->output_channels > 0) { canonicalize_identity(b2a->output_curves + 0); }
+ if (b2a->output_channels > 1) { canonicalize_identity(b2a->output_curves + 1); }
+ if (b2a->output_channels > 2) { canonicalize_identity(b2a->output_curves + 2); }
+ if (b2a->output_channels > 3) { canonicalize_identity(b2a->output_curves + 3); }
+
+ return true;
+}
+
+typedef struct {
+ uint8_t type [4];
+ uint8_t reserved [4];
+ uint8_t color_primaries [1];
+ uint8_t transfer_characteristics [1];
+ uint8_t matrix_coefficients [1];
+ uint8_t video_full_range_flag [1];
+} CICP_Layout;
+
+static bool read_cicp(const skcms_ICCTag* tag, skcms_CICP* cicp) {
+ if (tag->type != skcms_Signature_CICP || tag->size < SAFE_SIZEOF(CICP_Layout)) {
+ return false;
+ }
+
+ const CICP_Layout* cicpTag = (const CICP_Layout*)tag->buf;
+
+ cicp->color_primaries = cicpTag->color_primaries[0];
+ cicp->transfer_characteristics = cicpTag->transfer_characteristics[0];
+ cicp->matrix_coefficients = cicpTag->matrix_coefficients[0];
+ cicp->video_full_range_flag = cicpTag->video_full_range_flag[0];
+ return true;
+}
+
+void skcms_GetTagByIndex(const skcms_ICCProfile* profile, uint32_t idx, skcms_ICCTag* tag) {
+ if (!profile || !profile->buffer || !tag) { return; }
+ if (idx > profile->tag_count) { return; }
+ const tag_Layout* tags = get_tag_table(profile);
+ tag->signature = read_big_u32(tags[idx].signature);
+ tag->size = read_big_u32(tags[idx].size);
+ tag->buf = read_big_u32(tags[idx].offset) + profile->buffer;
+ tag->type = read_big_u32(tag->buf);
+}
+
+bool skcms_GetTagBySignature(const skcms_ICCProfile* profile, uint32_t sig, skcms_ICCTag* tag) {
+ if (!profile || !profile->buffer || !tag) { return false; }
+ const tag_Layout* tags = get_tag_table(profile);
+ for (uint32_t i = 0; i < profile->tag_count; ++i) {
+ if (read_big_u32(tags[i].signature) == sig) {
+ tag->signature = sig;
+ tag->size = read_big_u32(tags[i].size);
+ tag->buf = read_big_u32(tags[i].offset) + profile->buffer;
+ tag->type = read_big_u32(tag->buf);
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool usable_as_src(const skcms_ICCProfile* profile) {
+ return profile->has_A2B
+ || (profile->has_trc && profile->has_toXYZD50);
+}
+
+bool skcms_ParseWithA2BPriority(const void* buf, size_t len,
+ const int priority[], const int priorities,
+ skcms_ICCProfile* profile) {
+ assert(SAFE_SIZEOF(header_Layout) == 132);
+
+ if (!profile) {
+ return false;
+ }
+ memset(profile, 0, SAFE_SIZEOF(*profile));
+
+ if (len < SAFE_SIZEOF(header_Layout)) {
+ return false;
+ }
+
+ // Byte-swap all header fields
+ const header_Layout* header = (const header_Layout*)buf;
+ profile->buffer = (const uint8_t*)buf;
+ profile->size = read_big_u32(header->size);
+ uint32_t version = read_big_u32(header->version);
+ profile->data_color_space = read_big_u32(header->data_color_space);
+ profile->pcs = read_big_u32(header->pcs);
+ uint32_t signature = read_big_u32(header->signature);
+ float illuminant_X = read_big_fixed(header->illuminant_X);
+ float illuminant_Y = read_big_fixed(header->illuminant_Y);
+ float illuminant_Z = read_big_fixed(header->illuminant_Z);
+ profile->tag_count = read_big_u32(header->tag_count);
+
+ // Validate signature, size (smaller than buffer, large enough to hold tag table),
+ // and major version
+ uint64_t tag_table_size = profile->tag_count * SAFE_SIZEOF(tag_Layout);
+ if (signature != skcms_Signature_acsp ||
+ profile->size > len ||
+ profile->size < SAFE_SIZEOF(header_Layout) + tag_table_size ||
+ (version >> 24) > 4) {
+ return false;
+ }
+
+ // Validate that illuminant is D50 white
+ if (fabsf_(illuminant_X - 0.9642f) > 0.0100f ||
+ fabsf_(illuminant_Y - 1.0000f) > 0.0100f ||
+ fabsf_(illuminant_Z - 0.8249f) > 0.0100f) {
+ return false;
+ }
+
+ // Validate that all tag entries have sane offset + size
+ const tag_Layout* tags = get_tag_table(profile);
+ for (uint32_t i = 0; i < profile->tag_count; ++i) {
+ uint32_t tag_offset = read_big_u32(tags[i].offset);
+ uint32_t tag_size = read_big_u32(tags[i].size);
+ uint64_t tag_end = (uint64_t)tag_offset + (uint64_t)tag_size;
+ if (tag_size < 4 || tag_end > profile->size) {
+ return false;
+ }
+ }
+
+ if (profile->pcs != skcms_Signature_XYZ && profile->pcs != skcms_Signature_Lab) {
+ return false;
+ }
+
+ bool pcs_is_xyz = profile->pcs == skcms_Signature_XYZ;
+
+ // Pre-parse commonly used tags.
+ skcms_ICCTag kTRC;
+ if (profile->data_color_space == skcms_Signature_Gray &&
+ skcms_GetTagBySignature(profile, skcms_Signature_kTRC, &kTRC)) {
+ if (!read_curve(kTRC.buf, kTRC.size, &profile->trc[0], nullptr)) {
+ // Malformed tag
+ return false;
+ }
+ profile->trc[1] = profile->trc[0];
+ profile->trc[2] = profile->trc[0];
+ profile->has_trc = true;
+
+ if (pcs_is_xyz) {
+ profile->toXYZD50.vals[0][0] = illuminant_X;
+ profile->toXYZD50.vals[1][1] = illuminant_Y;
+ profile->toXYZD50.vals[2][2] = illuminant_Z;
+ profile->has_toXYZD50 = true;
+ }
+ } else {
+ skcms_ICCTag rTRC, gTRC, bTRC;
+ if (skcms_GetTagBySignature(profile, skcms_Signature_rTRC, &rTRC) &&
+ skcms_GetTagBySignature(profile, skcms_Signature_gTRC, &gTRC) &&
+ skcms_GetTagBySignature(profile, skcms_Signature_bTRC, &bTRC)) {
+ if (!read_curve(rTRC.buf, rTRC.size, &profile->trc[0], nullptr) ||
+ !read_curve(gTRC.buf, gTRC.size, &profile->trc[1], nullptr) ||
+ !read_curve(bTRC.buf, bTRC.size, &profile->trc[2], nullptr)) {
+ // Malformed TRC tags
+ return false;
+ }
+ profile->has_trc = true;
+ }
+
+ skcms_ICCTag rXYZ, gXYZ, bXYZ;
+ if (skcms_GetTagBySignature(profile, skcms_Signature_rXYZ, &rXYZ) &&
+ skcms_GetTagBySignature(profile, skcms_Signature_gXYZ, &gXYZ) &&
+ skcms_GetTagBySignature(profile, skcms_Signature_bXYZ, &bXYZ)) {
+ if (!read_to_XYZD50(&rXYZ, &gXYZ, &bXYZ, &profile->toXYZD50)) {
+ // Malformed XYZ tags
+ return false;
+ }
+ profile->has_toXYZD50 = true;
+ }
+ }
+
+ for (int i = 0; i < priorities; i++) {
+ // enum { perceptual, relative_colormetric, saturation }
+ if (priority[i] < 0 || priority[i] > 2) {
+ return false;
+ }
+ uint32_t sig = skcms_Signature_A2B0 + static_cast<uint32_t>(priority[i]);
+ skcms_ICCTag tag;
+ if (skcms_GetTagBySignature(profile, sig, &tag)) {
+ if (!read_a2b(&tag, &profile->A2B, pcs_is_xyz)) {
+ // Malformed A2B tag
+ return false;
+ }
+ profile->has_A2B = true;
+ break;
+ }
+ }
+
+ for (int i = 0; i < priorities; i++) {
+ // enum { perceptual, relative_colormetric, saturation }
+ if (priority[i] < 0 || priority[i] > 2) {
+ return false;
+ }
+ uint32_t sig = skcms_Signature_B2A0 + static_cast<uint32_t>(priority[i]);
+ skcms_ICCTag tag;
+ if (skcms_GetTagBySignature(profile, sig, &tag)) {
+ if (!read_b2a(&tag, &profile->B2A, pcs_is_xyz)) {
+ // Malformed B2A tag
+ return false;
+ }
+ profile->has_B2A = true;
+ break;
+ }
+ }
+
+ skcms_ICCTag cicp_tag;
+ if (skcms_GetTagBySignature(profile, skcms_Signature_CICP, &cicp_tag)) {
+ if (!read_cicp(&cicp_tag, &profile->CICP)) {
+ // Malformed CICP tag
+ return false;
+ }
+ profile->has_CICP = true;
+ }
+
+ return usable_as_src(profile);
+}
+
+
+const skcms_ICCProfile* skcms_sRGB_profile() {
+ static const skcms_ICCProfile sRGB_profile = {
+ nullptr, // buffer, moot here
+
+ 0, // size, moot here
+ skcms_Signature_RGB, // data_color_space
+ skcms_Signature_XYZ, // pcs
+ 0, // tag count, moot here
+
+ // We choose to represent sRGB with its canonical transfer function,
+ // and with its canonical XYZD50 gamut matrix.
+ true, // has_trc, followed by the 3 trc curves
+ {
+ {{0, {2.4f, (float)(1/1.055), (float)(0.055/1.055), (float)(1/12.92), 0.04045f, 0, 0}}},
+ {{0, {2.4f, (float)(1/1.055), (float)(0.055/1.055), (float)(1/12.92), 0.04045f, 0, 0}}},
+ {{0, {2.4f, (float)(1/1.055), (float)(0.055/1.055), (float)(1/12.92), 0.04045f, 0, 0}}},
+ },
+
+ true, // has_toXYZD50, followed by 3x3 toXYZD50 matrix
+ {{
+ { 0.436065674f, 0.385147095f, 0.143066406f },
+ { 0.222488403f, 0.716873169f, 0.060607910f },
+ { 0.013916016f, 0.097076416f, 0.714096069f },
+ }},
+
+ false, // has_A2B, followed by A2B itself, which we don't care about.
+ {
+ 0,
+ {
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ },
+ {0,0,0,0},
+ nullptr,
+ nullptr,
+
+ 0,
+ {
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ },
+ {{
+ { 0,0,0,0 },
+ { 0,0,0,0 },
+ { 0,0,0,0 },
+ }},
+
+ 0,
+ {
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ },
+ },
+
+ false, // has_B2A, followed by B2A itself, which we also don't care about.
+ {
+ 0,
+ {
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ },
+
+ 0,
+ {{
+ { 0,0,0,0 },
+ { 0,0,0,0 },
+ { 0,0,0,0 },
+ }},
+ {
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ },
+
+ 0,
+ {0,0,0,0},
+ nullptr,
+ nullptr,
+ {
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ },
+ },
+
+ false, // has_CICP, followed by cicp itself which we don't care about.
+ { 0, 0, 0, 0 },
+ };
+ return &sRGB_profile;
+}
+
+const skcms_ICCProfile* skcms_XYZD50_profile() {
+ // Just like sRGB above, but with identity transfer functions and toXYZD50 matrix.
+ static const skcms_ICCProfile XYZD50_profile = {
+ nullptr, // buffer, moot here
+
+ 0, // size, moot here
+ skcms_Signature_RGB, // data_color_space
+ skcms_Signature_XYZ, // pcs
+ 0, // tag count, moot here
+
+ true, // has_trc, followed by the 3 trc curves
+ {
+ {{0, {1,1, 0,0,0,0,0}}},
+ {{0, {1,1, 0,0,0,0,0}}},
+ {{0, {1,1, 0,0,0,0,0}}},
+ },
+
+ true, // has_toXYZD50, followed by 3x3 toXYZD50 matrix
+ {{
+ { 1,0,0 },
+ { 0,1,0 },
+ { 0,0,1 },
+ }},
+
+ false, // has_A2B, followed by A2B itself, which we don't care about.
+ {
+ 0,
+ {
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ },
+ {0,0,0,0},
+ nullptr,
+ nullptr,
+
+ 0,
+ {
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ },
+ {{
+ { 0,0,0,0 },
+ { 0,0,0,0 },
+ { 0,0,0,0 },
+ }},
+
+ 0,
+ {
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ },
+ },
+
+ false, // has_B2A, followed by B2A itself, which we also don't care about.
+ {
+ 0,
+ {
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ },
+
+ 0,
+ {{
+ { 0,0,0,0 },
+ { 0,0,0,0 },
+ { 0,0,0,0 },
+ }},
+ {
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ },
+
+ 0,
+ {0,0,0,0},
+ nullptr,
+ nullptr,
+ {
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ {{0, {0,0, 0,0,0,0,0}}},
+ },
+ },
+
+ false, // has_CICP, followed by cicp itself which we don't care about.
+ { 0, 0, 0, 0 },
+ };
+
+ return &XYZD50_profile;
+}
+
+const skcms_TransferFunction* skcms_sRGB_TransferFunction() {
+ return &skcms_sRGB_profile()->trc[0].parametric;
+}
+
+const skcms_TransferFunction* skcms_sRGB_Inverse_TransferFunction() {
+ static const skcms_TransferFunction sRGB_inv =
+ {0.416666657f, 1.137283325f, -0.0f, 12.920000076f, 0.003130805f, -0.054969788f, -0.0f};
+ return &sRGB_inv;
+}
+
+const skcms_TransferFunction* skcms_Identity_TransferFunction() {
+ static const skcms_TransferFunction identity = {1,1,0,0,0,0,0};
+ return &identity;
+}
+
+const uint8_t skcms_252_random_bytes[] = {
+ 8, 179, 128, 204, 253, 38, 134, 184, 68, 102, 32, 138, 99, 39, 169, 215,
+ 119, 26, 3, 223, 95, 239, 52, 132, 114, 74, 81, 234, 97, 116, 244, 205, 30,
+ 154, 173, 12, 51, 159, 122, 153, 61, 226, 236, 178, 229, 55, 181, 220, 191,
+ 194, 160, 126, 168, 82, 131, 18, 180, 245, 163, 22, 246, 69, 235, 252, 57,
+ 108, 14, 6, 152, 240, 255, 171, 242, 20, 227, 177, 238, 96, 85, 16, 211,
+ 70, 200, 149, 155, 146, 127, 145, 100, 151, 109, 19, 165, 208, 195, 164,
+ 137, 254, 182, 248, 64, 201, 45, 209, 5, 147, 207, 210, 113, 162, 83, 225,
+ 9, 31, 15, 231, 115, 37, 58, 53, 24, 49, 197, 56, 120, 172, 48, 21, 214,
+ 129, 111, 11, 50, 187, 196, 34, 60, 103, 71, 144, 47, 203, 77, 80, 232,
+ 140, 222, 250, 206, 166, 247, 139, 249, 221, 72, 106, 27, 199, 117, 54,
+ 219, 135, 118, 40, 79, 41, 251, 46, 93, 212, 92, 233, 148, 28, 121, 63,
+ 123, 158, 105, 59, 29, 42, 143, 23, 0, 107, 176, 87, 104, 183, 156, 193,
+ 189, 90, 188, 65, 190, 17, 198, 7, 186, 161, 1, 124, 78, 125, 170, 133,
+ 174, 218, 67, 157, 75, 101, 89, 217, 62, 33, 141, 228, 25, 35, 91, 230, 4,
+ 2, 13, 73, 86, 167, 237, 84, 243, 44, 185, 66, 130, 110, 150, 142, 216, 88,
+ 112, 36, 224, 136, 202, 76, 94, 98, 175, 213
+};
+
+bool skcms_ApproximatelyEqualProfiles(const skcms_ICCProfile* A, const skcms_ICCProfile* B) {
+ // Test for exactly equal profiles first.
+ if (A == B || 0 == memcmp(A,B, sizeof(skcms_ICCProfile))) {
+ return true;
+ }
+
+ // For now this is the essentially the same strategy we use in test_only.c
+ // for our skcms_Transform() smoke tests:
+ // 1) transform A to XYZD50
+ // 2) transform B to XYZD50
+ // 3) return true if they're similar enough
+ // Our current criterion in 3) is maximum 1 bit error per XYZD50 byte.
+
+ // skcms_252_random_bytes are 252 of a random shuffle of all possible bytes.
+ // 252 is evenly divisible by 3 and 4. Only 192, 10, 241, and 43 are missing.
+
+ // We want to allow otherwise equivalent profiles tagged as grayscale and RGB
+ // to be treated as equal. But CMYK profiles are a totally different ballgame.
+ const auto CMYK = skcms_Signature_CMYK;
+ if ((A->data_color_space == CMYK) != (B->data_color_space == CMYK)) {
+ return false;
+ }
+
+ // Interpret as RGB_888 if data color space is RGB or GRAY, RGBA_8888 if CMYK.
+ // TODO: working with RGBA_8888 either way is probably fastest.
+ skcms_PixelFormat fmt = skcms_PixelFormat_RGB_888;
+ size_t npixels = 84;
+ if (A->data_color_space == skcms_Signature_CMYK) {
+ fmt = skcms_PixelFormat_RGBA_8888;
+ npixels = 63;
+ }
+
+ // TODO: if A or B is a known profile (skcms_sRGB_profile, skcms_XYZD50_profile),
+ // use pre-canned results and skip that skcms_Transform() call?
+ uint8_t dstA[252],
+ dstB[252];
+ if (!skcms_Transform(
+ skcms_252_random_bytes, fmt, skcms_AlphaFormat_Unpremul, A,
+ dstA, skcms_PixelFormat_RGB_888, skcms_AlphaFormat_Unpremul, skcms_XYZD50_profile(),
+ npixels)) {
+ return false;
+ }
+ if (!skcms_Transform(
+ skcms_252_random_bytes, fmt, skcms_AlphaFormat_Unpremul, B,
+ dstB, skcms_PixelFormat_RGB_888, skcms_AlphaFormat_Unpremul, skcms_XYZD50_profile(),
+ npixels)) {
+ return false;
+ }
+
+ // TODO: make sure this final check has reasonable codegen.
+ for (size_t i = 0; i < 252; i++) {
+ if (abs((int)dstA[i] - (int)dstB[i]) > 1) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool skcms_TRCs_AreApproximateInverse(const skcms_ICCProfile* profile,
+ const skcms_TransferFunction* inv_tf) {
+ if (!profile || !profile->has_trc) {
+ return false;
+ }
+
+ return skcms_AreApproximateInverses(&profile->trc[0], inv_tf) &&
+ skcms_AreApproximateInverses(&profile->trc[1], inv_tf) &&
+ skcms_AreApproximateInverses(&profile->trc[2], inv_tf);
+}
+
+static bool is_zero_to_one(float x) {
+ return 0 <= x && x <= 1;
+}
+
+typedef struct { float vals[3]; } skcms_Vector3;
+
+static skcms_Vector3 mv_mul(const skcms_Matrix3x3* m, const skcms_Vector3* v) {
+ skcms_Vector3 dst = {{0,0,0}};
+ for (int row = 0; row < 3; ++row) {
+ dst.vals[row] = m->vals[row][0] * v->vals[0]
+ + m->vals[row][1] * v->vals[1]
+ + m->vals[row][2] * v->vals[2];
+ }
+ return dst;
+}
+
+bool skcms_AdaptToXYZD50(float wx, float wy,
+ skcms_Matrix3x3* toXYZD50) {
+ if (!is_zero_to_one(wx) || !is_zero_to_one(wy) ||
+ !toXYZD50) {
+ return false;
+ }
+
+ // Assumes that Y is 1.0f.
+ skcms_Vector3 wXYZ = { { wx / wy, 1, (1 - wx - wy) / wy } };
+
+ // Now convert toXYZ matrix to toXYZD50.
+ skcms_Vector3 wXYZD50 = { { 0.96422f, 1.0f, 0.82521f } };
+
+ // Calculate the chromatic adaptation matrix. We will use the Bradford method, thus
+ // the matrices below. The Bradford method is used by Adobe and is widely considered
+ // to be the best.
+ skcms_Matrix3x3 xyz_to_lms = {{
+ { 0.8951f, 0.2664f, -0.1614f },
+ { -0.7502f, 1.7135f, 0.0367f },
+ { 0.0389f, -0.0685f, 1.0296f },
+ }};
+ skcms_Matrix3x3 lms_to_xyz = {{
+ { 0.9869929f, -0.1470543f, 0.1599627f },
+ { 0.4323053f, 0.5183603f, 0.0492912f },
+ { -0.0085287f, 0.0400428f, 0.9684867f },
+ }};
+
+ skcms_Vector3 srcCone = mv_mul(&xyz_to_lms, &wXYZ);
+ skcms_Vector3 dstCone = mv_mul(&xyz_to_lms, &wXYZD50);
+
+ *toXYZD50 = {{
+ { dstCone.vals[0] / srcCone.vals[0], 0, 0 },
+ { 0, dstCone.vals[1] / srcCone.vals[1], 0 },
+ { 0, 0, dstCone.vals[2] / srcCone.vals[2] },
+ }};
+ *toXYZD50 = skcms_Matrix3x3_concat(toXYZD50, &xyz_to_lms);
+ *toXYZD50 = skcms_Matrix3x3_concat(&lms_to_xyz, toXYZD50);
+
+ return true;
+}
+
+bool skcms_PrimariesToXYZD50(float rx, float ry,
+ float gx, float gy,
+ float bx, float by,
+ float wx, float wy,
+ skcms_Matrix3x3* toXYZD50) {
+ if (!is_zero_to_one(rx) || !is_zero_to_one(ry) ||
+ !is_zero_to_one(gx) || !is_zero_to_one(gy) ||
+ !is_zero_to_one(bx) || !is_zero_to_one(by) ||
+ !is_zero_to_one(wx) || !is_zero_to_one(wy) ||
+ !toXYZD50) {
+ return false;
+ }
+
+ // First, we need to convert xy values (primaries) to XYZ.
+ skcms_Matrix3x3 primaries = {{
+ { rx, gx, bx },
+ { ry, gy, by },
+ { 1 - rx - ry, 1 - gx - gy, 1 - bx - by },
+ }};
+ skcms_Matrix3x3 primaries_inv;
+ if (!skcms_Matrix3x3_invert(&primaries, &primaries_inv)) {
+ return false;
+ }
+
+ // Assumes that Y is 1.0f.
+ skcms_Vector3 wXYZ = { { wx / wy, 1, (1 - wx - wy) / wy } };
+ skcms_Vector3 XYZ = mv_mul(&primaries_inv, &wXYZ);
+
+ skcms_Matrix3x3 toXYZ = {{
+ { XYZ.vals[0], 0, 0 },
+ { 0, XYZ.vals[1], 0 },
+ { 0, 0, XYZ.vals[2] },
+ }};
+ toXYZ = skcms_Matrix3x3_concat(&primaries, &toXYZ);
+
+ skcms_Matrix3x3 DXtoD50;
+ if (!skcms_AdaptToXYZD50(wx, wy, &DXtoD50)) {
+ return false;
+ }
+
+ *toXYZD50 = skcms_Matrix3x3_concat(&DXtoD50, &toXYZ);
+ return true;
+}
+
+
+bool skcms_Matrix3x3_invert(const skcms_Matrix3x3* src, skcms_Matrix3x3* dst) {
+ double a00 = src->vals[0][0],
+ a01 = src->vals[1][0],
+ a02 = src->vals[2][0],
+ a10 = src->vals[0][1],
+ a11 = src->vals[1][1],
+ a12 = src->vals[2][1],
+ a20 = src->vals[0][2],
+ a21 = src->vals[1][2],
+ a22 = src->vals[2][2];
+
+ double b0 = a00*a11 - a01*a10,
+ b1 = a00*a12 - a02*a10,
+ b2 = a01*a12 - a02*a11,
+ b3 = a20,
+ b4 = a21,
+ b5 = a22;
+
+ double determinant = b0*b5
+ - b1*b4
+ + b2*b3;
+
+ if (determinant == 0) {
+ return false;
+ }
+
+ double invdet = 1.0 / determinant;
+ if (invdet > +FLT_MAX || invdet < -FLT_MAX || !isfinitef_((float)invdet)) {
+ return false;
+ }
+
+ b0 *= invdet;
+ b1 *= invdet;
+ b2 *= invdet;
+ b3 *= invdet;
+ b4 *= invdet;
+ b5 *= invdet;
+
+ dst->vals[0][0] = (float)( a11*b5 - a12*b4 );
+ dst->vals[1][0] = (float)( a02*b4 - a01*b5 );
+ dst->vals[2][0] = (float)( + b2 );
+ dst->vals[0][1] = (float)( a12*b3 - a10*b5 );
+ dst->vals[1][1] = (float)( a00*b5 - a02*b3 );
+ dst->vals[2][1] = (float)( - b1 );
+ dst->vals[0][2] = (float)( a10*b4 - a11*b3 );
+ dst->vals[1][2] = (float)( a01*b3 - a00*b4 );
+ dst->vals[2][2] = (float)( + b0 );
+
+ for (int r = 0; r < 3; ++r)
+ for (int c = 0; c < 3; ++c) {
+ if (!isfinitef_(dst->vals[r][c])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+skcms_Matrix3x3 skcms_Matrix3x3_concat(const skcms_Matrix3x3* A, const skcms_Matrix3x3* B) {
+ skcms_Matrix3x3 m = { { { 0,0,0 },{ 0,0,0 },{ 0,0,0 } } };
+ for (int r = 0; r < 3; r++)
+ for (int c = 0; c < 3; c++) {
+ m.vals[r][c] = A->vals[r][0] * B->vals[0][c]
+ + A->vals[r][1] * B->vals[1][c]
+ + A->vals[r][2] * B->vals[2][c];
+ }
+ return m;
+}
+
+#if defined(__clang__)
+ [[clang::no_sanitize("float-divide-by-zero")]] // Checked for by classify() on the way out.
+#endif
+bool skcms_TransferFunction_invert(const skcms_TransferFunction* src, skcms_TransferFunction* dst) {
+ TF_PQish pq;
+ TF_HLGish hlg;
+ switch (classify(*src, &pq, &hlg)) {
+ case skcms_TFType_Invalid: return false;
+ case skcms_TFType_sRGBish: break; // handled below
+
+ case skcms_TFType_PQish:
+ *dst = { TFKind_marker(skcms_TFType_PQish), -pq.A, pq.D, 1.0f/pq.F
+ , pq.B, -pq.E, 1.0f/pq.C};
+ return true;
+
+ case skcms_TFType_HLGish:
+ *dst = { TFKind_marker(skcms_TFType_HLGinvish), 1.0f/hlg.R, 1.0f/hlg.G
+ , 1.0f/hlg.a, hlg.b, hlg.c
+ , hlg.K_minus_1 };
+ return true;
+
+ case skcms_TFType_HLGinvish:
+ *dst = { TFKind_marker(skcms_TFType_HLGish), 1.0f/hlg.R, 1.0f/hlg.G
+ , 1.0f/hlg.a, hlg.b, hlg.c
+ , hlg.K_minus_1 };
+ return true;
+ }
+
+ assert (classify(*src) == skcms_TFType_sRGBish);
+
+ // We're inverting this function, solving for x in terms of y.
+ // y = (cx + f) x < d
+ // (ax + b)^g + e x ≥ d
+ // The inverse of this function can be expressed in the same piecewise form.
+ skcms_TransferFunction inv = {0,0,0,0,0,0,0};
+
+ // We'll start by finding the new threshold inv.d.
+ // In principle we should be able to find that by solving for y at x=d from either side.
+ // (If those two d values aren't the same, it's a discontinuous transfer function.)
+ float d_l = src->c * src->d + src->f,
+ d_r = powf_(src->a * src->d + src->b, src->g) + src->e;
+ if (fabsf_(d_l - d_r) > 1/512.0f) {
+ return false;
+ }
+ inv.d = d_l; // TODO(mtklein): better in practice to choose d_r?
+
+ // When d=0, the linear section collapses to a point. We leave c,d,f all zero in that case.
+ if (inv.d > 0) {
+ // Inverting the linear section is pretty straightfoward:
+ // y = cx + f
+ // y - f = cx
+ // (1/c)y - f/c = x
+ inv.c = 1.0f/src->c;
+ inv.f = -src->f/src->c;
+ }
+
+ // The interesting part is inverting the nonlinear section:
+ // y = (ax + b)^g + e.
+ // y - e = (ax + b)^g
+ // (y - e)^1/g = ax + b
+ // (y - e)^1/g - b = ax
+ // (1/a)(y - e)^1/g - b/a = x
+ //
+ // To make that fit our form, we need to move the (1/a) term inside the exponentiation:
+ // let k = (1/a)^g
+ // (1/a)( y - e)^1/g - b/a = x
+ // (ky - ke)^1/g - b/a = x
+
+ float k = powf_(src->a, -src->g); // (1/a)^g == a^-g
+ inv.g = 1.0f / src->g;
+ inv.a = k;
+ inv.b = -k * src->e;
+ inv.e = -src->b / src->a;
+
+ // We need to enforce the same constraints here that we do when fitting a curve,
+ // a >= 0 and ad+b >= 0. These constraints are checked by classify(), so they're true
+ // of the source function if we're here.
+
+ // Just like when fitting the curve, there's really no way to rescue a < 0.
+ if (inv.a < 0) {
+ return false;
+ }
+ // On the other hand we can rescue an ad+b that's gone slightly negative here.
+ if (inv.a * inv.d + inv.b < 0) {
+ inv.b = -inv.a * inv.d;
+ }
+
+ // That should usually make classify(inv) == sRGBish true, but there are a couple situations
+ // where we might still fail here, like non-finite parameter values.
+ if (classify(inv) != skcms_TFType_sRGBish) {
+ return false;
+ }
+
+ assert (inv.a >= 0);
+ assert (inv.a * inv.d + inv.b >= 0);
+
+ // Now in principle we're done.
+ // But to preserve the valuable invariant inv(src(1.0f)) == 1.0f, we'll tweak
+ // e or f of the inverse, depending on which segment contains src(1.0f).
+ float s = skcms_TransferFunction_eval(src, 1.0f);
+ if (!isfinitef_(s)) {
+ return false;
+ }
+
+ float sign = s < 0 ? -1.0f : 1.0f;
+ s *= sign;
+ if (s < inv.d) {
+ inv.f = 1.0f - sign * inv.c * s;
+ } else {
+ inv.e = 1.0f - sign * powf_(inv.a * s + inv.b, inv.g);
+ }
+
+ *dst = inv;
+ return classify(*dst) == skcms_TFType_sRGBish;
+}
+
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //
+
+// From here below we're approximating an skcms_Curve with an skcms_TransferFunction{g,a,b,c,d,e,f}:
+//
+// tf(x) = cx + f x < d
+// tf(x) = (ax + b)^g + e x ≥ d
+//
+// When fitting, we add the additional constraint that both pieces meet at d:
+//
+// cd + f = (ad + b)^g + e
+//
+// Solving for e and folding it through gives an alternate formulation of the non-linear piece:
+//
+// tf(x) = cx + f x < d
+// tf(x) = (ax + b)^g - (ad + b)^g + cd + f x ≥ d
+//
+// Our overall strategy is then:
+// For a couple tolerances,
+// - fit_linear(): fit c,d,f iteratively to as many points as our tolerance allows
+// - invert c,d,f
+// - fit_nonlinear(): fit g,a,b using Gauss-Newton given those inverted c,d,f
+// (and by constraint, inverted e) to the inverse of the table.
+// Return the parameters with least maximum error.
+//
+// To run Gauss-Newton to find g,a,b, we'll also need the gradient of the residuals
+// of round-trip f_inv(x), the inverse of the non-linear piece of f(x).
+//
+// let y = Table(x)
+// r(x) = x - f_inv(y)
+//
+// ∂r/∂g = ln(ay + b)*(ay + b)^g
+// - ln(ad + b)*(ad + b)^g
+// ∂r/∂a = yg(ay + b)^(g-1)
+// - dg(ad + b)^(g-1)
+// ∂r/∂b = g(ay + b)^(g-1)
+// - g(ad + b)^(g-1)
+
+// Return the residual of roundtripping skcms_Curve(x) through f_inv(y) with parameters P,
+// and fill out the gradient of the residual into dfdP.
+static float rg_nonlinear(float x,
+ const skcms_Curve* curve,
+ const skcms_TransferFunction* tf,
+ float dfdP[3]) {
+ const float y = eval_curve(curve, x);
+
+ const float g = tf->g, a = tf->a, b = tf->b,
+ c = tf->c, d = tf->d, f = tf->f;
+
+ const float Y = fmaxf_(a*y + b, 0.0f),
+ D = a*d + b;
+ assert (D >= 0);
+
+ // The gradient.
+ dfdP[0] = logf_(Y)*powf_(Y, g)
+ - logf_(D)*powf_(D, g);
+ dfdP[1] = y*g*powf_(Y, g-1)
+ - d*g*powf_(D, g-1);
+ dfdP[2] = g*powf_(Y, g-1)
+ - g*powf_(D, g-1);
+
+ // The residual.
+ const float f_inv = powf_(Y, g)
+ - powf_(D, g)
+ + c*d + f;
+ return x - f_inv;
+}
+
+static bool gauss_newton_step(const skcms_Curve* curve,
+ skcms_TransferFunction* tf,
+ float x0, float dx, int N) {
+ // We'll sample x from the range [x0,x1] (both inclusive) N times with even spacing.
+ //
+ // Let P = [ tf->g, tf->a, tf->b ] (the three terms that we're adjusting).
+ //
+ // We want to do P' = P + (Jf^T Jf)^-1 Jf^T r(P),
+ // where r(P) is the residual vector
+ // and Jf is the Jacobian matrix of f(), ∂r/∂P.
+ //
+ // Let's review the shape of each of these expressions:
+ // r(P) is [N x 1], a column vector with one entry per value of x tested
+ // Jf is [N x 3], a matrix with an entry for each (x,P) pair
+ // Jf^T is [3 x N], the transpose of Jf
+ //
+ // Jf^T Jf is [3 x N] * [N x 3] == [3 x 3], a 3x3 matrix,
+ // and so is its inverse (Jf^T Jf)^-1
+ // Jf^T r(P) is [3 x N] * [N x 1] == [3 x 1], a column vector with the same shape as P
+ //
+ // Our implementation strategy to get to the final ∆P is
+ // 1) evaluate Jf^T Jf, call that lhs
+ // 2) evaluate Jf^T r(P), call that rhs
+ // 3) invert lhs
+ // 4) multiply inverse lhs by rhs
+ //
+ // This is a friendly implementation strategy because we don't have to have any
+ // buffers that scale with N, and equally nice don't have to perform any matrix
+ // operations that are variable size.
+ //
+ // Other implementation strategies could trade this off, e.g. evaluating the
+ // pseudoinverse of Jf ( (Jf^T Jf)^-1 Jf^T ) directly, then multiplying that by
+ // the residuals. That would probably require implementing singular value
+ // decomposition, and would create a [3 x N] matrix to be multiplied by the
+ // [N x 1] residual vector, but on the upside I think that'd eliminate the
+ // possibility of this gauss_newton_step() function ever failing.
+
+ // 0) start off with lhs and rhs safely zeroed.
+ skcms_Matrix3x3 lhs = {{ {0,0,0}, {0,0,0}, {0,0,0} }};
+ skcms_Vector3 rhs = { {0,0,0} };
+
+ // 1,2) evaluate lhs and evaluate rhs
+ // We want to evaluate Jf only once, but both lhs and rhs involve Jf^T,
+ // so we'll have to update lhs and rhs at the same time.
+ for (int i = 0; i < N; i++) {
+ float x = x0 + static_cast<float>(i)*dx;
+
+ float dfdP[3] = {0,0,0};
+ float resid = rg_nonlinear(x,curve,tf, dfdP);
+
+ for (int r = 0; r < 3; r++) {
+ for (int c = 0; c < 3; c++) {
+ lhs.vals[r][c] += dfdP[r] * dfdP[c];
+ }
+ rhs.vals[r] += dfdP[r] * resid;
+ }
+ }
+
+ // If any of the 3 P parameters are unused, this matrix will be singular.
+ // Detect those cases and fix them up to indentity instead, so we can invert.
+ for (int k = 0; k < 3; k++) {
+ if (lhs.vals[0][k]==0 && lhs.vals[1][k]==0 && lhs.vals[2][k]==0 &&
+ lhs.vals[k][0]==0 && lhs.vals[k][1]==0 && lhs.vals[k][2]==0) {
+ lhs.vals[k][k] = 1;
+ }
+ }
+
+ // 3) invert lhs
+ skcms_Matrix3x3 lhs_inv;
+ if (!skcms_Matrix3x3_invert(&lhs, &lhs_inv)) {
+ return false;
+ }
+
+ // 4) multiply inverse lhs by rhs
+ skcms_Vector3 dP = mv_mul(&lhs_inv, &rhs);
+ tf->g += dP.vals[0];
+ tf->a += dP.vals[1];
+ tf->b += dP.vals[2];
+ return isfinitef_(tf->g) && isfinitef_(tf->a) && isfinitef_(tf->b);
+}
+
+static float max_roundtrip_error_checked(const skcms_Curve* curve,
+ const skcms_TransferFunction* tf_inv) {
+ skcms_TransferFunction tf;
+ if (!skcms_TransferFunction_invert(tf_inv, &tf) || skcms_TFType_sRGBish != classify(tf)) {
+ return INFINITY_;
+ }
+
+ skcms_TransferFunction tf_inv_again;
+ if (!skcms_TransferFunction_invert(&tf, &tf_inv_again)) {
+ return INFINITY_;
+ }
+
+ return skcms_MaxRoundtripError(curve, &tf_inv_again);
+}
+
+// Fit the points in [L,N) to the non-linear piece of tf, or return false if we can't.
+static bool fit_nonlinear(const skcms_Curve* curve, int L, int N, skcms_TransferFunction* tf) {
+ // This enforces a few constraints that are not modeled in gauss_newton_step()'s optimization.
+ auto fixup_tf = [tf]() {
+ // a must be non-negative. That ensures the function is monotonically increasing.
+ // We don't really know how to fix up a if it goes negative.
+ if (tf->a < 0) {
+ return false;
+ }
+ // ad+b must be non-negative. That ensures we don't end up with complex numbers in powf.
+ // We feel just barely not uneasy enough to tweak b so ad+b is zero in this case.
+ if (tf->a * tf->d + tf->b < 0) {
+ tf->b = -tf->a * tf->d;
+ }
+ assert (tf->a >= 0 &&
+ tf->a * tf->d + tf->b >= 0);
+
+ // cd+f must be ~= (ad+b)^g+e. That ensures the function is continuous. We keep e as a free
+ // parameter so we can guarantee this.
+ tf->e = tf->c*tf->d + tf->f
+ - powf_(tf->a*tf->d + tf->b, tf->g);
+
+ return true;
+ };
+
+ if (!fixup_tf()) {
+ return false;
+ }
+
+ // No matter where we start, dx should always represent N even steps from 0 to 1.
+ const float dx = 1.0f / static_cast<float>(N-1);
+
+ skcms_TransferFunction best_tf = *tf;
+ float best_max_error = INFINITY_;
+
+ // Need this or several curves get worse... *sigh*
+ float init_error = max_roundtrip_error_checked(curve, tf);
+ if (init_error < best_max_error) {
+ best_max_error = init_error;
+ best_tf = *tf;
+ }
+
+ // As far as we can tell, 1 Gauss-Newton step won't converge, and 3 steps is no better than 2.
+ for (int j = 0; j < 8; j++) {
+ if (!gauss_newton_step(curve, tf, static_cast<float>(L)*dx, dx, N-L) || !fixup_tf()) {
+ *tf = best_tf;
+ return isfinitef_(best_max_error);
+ }
+
+ float max_error = max_roundtrip_error_checked(curve, tf);
+ if (max_error < best_max_error) {
+ best_max_error = max_error;
+ best_tf = *tf;
+ }
+ }
+
+ *tf = best_tf;
+ return isfinitef_(best_max_error);
+}
+
+bool skcms_ApproximateCurve(const skcms_Curve* curve,
+ skcms_TransferFunction* approx,
+ float* max_error) {
+ if (!curve || !approx || !max_error) {
+ return false;
+ }
+
+ if (curve->table_entries == 0) {
+ // No point approximating an skcms_TransferFunction with an skcms_TransferFunction!
+ return false;
+ }
+
+ if (curve->table_entries == 1 || curve->table_entries > (uint32_t)INT_MAX) {
+ // We need at least two points, and must put some reasonable cap on the maximum number.
+ return false;
+ }
+
+ int N = (int)curve->table_entries;
+ const float dx = 1.0f / static_cast<float>(N - 1);
+
+ *max_error = INFINITY_;
+ const float kTolerances[] = { 1.5f / 65535.0f, 1.0f / 512.0f };
+ for (int t = 0; t < ARRAY_COUNT(kTolerances); t++) {
+ skcms_TransferFunction tf,
+ tf_inv;
+
+ // It's problematic to fit curves with non-zero f, so always force it to zero explicitly.
+ tf.f = 0.0f;
+ int L = fit_linear(curve, N, kTolerances[t], &tf.c, &tf.d);
+
+ if (L == N) {
+ // If the entire data set was linear, move the coefficients to the nonlinear portion
+ // with G == 1. This lets use a canonical representation with d == 0.
+ tf.g = 1;
+ tf.a = tf.c;
+ tf.b = tf.f;
+ tf.c = tf.d = tf.e = tf.f = 0;
+ } else if (L == N - 1) {
+ // Degenerate case with only two points in the nonlinear segment. Solve directly.
+ tf.g = 1;
+ tf.a = (eval_curve(curve, static_cast<float>(N-1)*dx) -
+ eval_curve(curve, static_cast<float>(N-2)*dx))
+ / dx;
+ tf.b = eval_curve(curve, static_cast<float>(N-2)*dx)
+ - tf.a * static_cast<float>(N-2)*dx;
+ tf.e = 0;
+ } else {
+ // Start by guessing a gamma-only curve through the midpoint.
+ int mid = (L + N) / 2;
+ float mid_x = static_cast<float>(mid) / static_cast<float>(N - 1);
+ float mid_y = eval_curve(curve, mid_x);
+ tf.g = log2f_(mid_y) / log2f_(mid_x);
+ tf.a = 1;
+ tf.b = 0;
+ tf.e = tf.c*tf.d + tf.f
+ - powf_(tf.a*tf.d + tf.b, tf.g);
+
+
+ if (!skcms_TransferFunction_invert(&tf, &tf_inv) ||
+ !fit_nonlinear(curve, L,N, &tf_inv)) {
+ continue;
+ }
+
+ // We fit tf_inv, so calculate tf to keep in sync.
+ // fit_nonlinear() should guarantee invertibility.
+ if (!skcms_TransferFunction_invert(&tf_inv, &tf)) {
+ assert(false);
+ continue;
+ }
+ }
+
+ // We'd better have a sane, sRGB-ish TF by now.
+ // Other non-Bad TFs would be fine, but we know we've only ever tried to fit sRGBish;
+ // anything else is just some accident of math and the way we pun tf.g as a type flag.
+ // fit_nonlinear() should guarantee this, but the special cases may fail this test.
+ if (skcms_TFType_sRGBish != classify(tf)) {
+ continue;
+ }
+
+ // We find our error by roundtripping the table through tf_inv.
+ //
+ // (The most likely use case for this approximation is to be inverted and
+ // used as the transfer function for a destination color space.)
+ //
+ // We've kept tf and tf_inv in sync above, but we can't guarantee that tf is
+ // invertible, so re-verify that here (and use the new inverse for testing).
+ // fit_nonlinear() should guarantee this, but the special cases that don't use
+ // it may fail this test.
+ if (!skcms_TransferFunction_invert(&tf, &tf_inv)) {
+ continue;
+ }
+
+ float err = skcms_MaxRoundtripError(curve, &tf_inv);
+ if (*max_error > err) {
+ *max_error = err;
+ *approx = tf;
+ }
+ }
+ return isfinitef_(*max_error);
+}
+
+// ~~~~ Impl. of skcms_Transform() ~~~~
+
+typedef enum {
+ Op_load_a8,
+ Op_load_g8,
+ Op_load_8888_palette8,
+ Op_load_4444,
+ Op_load_565,
+ Op_load_888,
+ Op_load_8888,
+ Op_load_1010102,
+ Op_load_101010x_XR,
+ Op_load_161616LE,
+ Op_load_16161616LE,
+ Op_load_161616BE,
+ Op_load_16161616BE,
+ Op_load_hhh,
+ Op_load_hhhh,
+ Op_load_fff,
+ Op_load_ffff,
+
+ Op_swap_rb,
+ Op_clamp,
+ Op_invert,
+ Op_force_opaque,
+ Op_premul,
+ Op_unpremul,
+ Op_matrix_3x3,
+ Op_matrix_3x4,
+
+ Op_lab_to_xyz,
+ Op_xyz_to_lab,
+
+ Op_tf_r,
+ Op_tf_g,
+ Op_tf_b,
+ Op_tf_a,
+
+ Op_pq_r,
+ Op_pq_g,
+ Op_pq_b,
+ Op_pq_a,
+
+ Op_hlg_r,
+ Op_hlg_g,
+ Op_hlg_b,
+ Op_hlg_a,
+
+ Op_hlginv_r,
+ Op_hlginv_g,
+ Op_hlginv_b,
+ Op_hlginv_a,
+
+ Op_table_r,
+ Op_table_g,
+ Op_table_b,
+ Op_table_a,
+
+ Op_clut_A2B,
+ Op_clut_B2A,
+
+ Op_store_a8,
+ Op_store_g8,
+ Op_store_4444,
+ Op_store_565,
+ Op_store_888,
+ Op_store_8888,
+ Op_store_1010102,
+ Op_store_161616LE,
+ Op_store_16161616LE,
+ Op_store_161616BE,
+ Op_store_16161616BE,
+ Op_store_101010x_XR,
+ Op_store_hhh,
+ Op_store_hhhh,
+ Op_store_fff,
+ Op_store_ffff,
+} Op;
+
+#if defined(__clang__)
+ template <int N, typename T> using Vec = T __attribute__((ext_vector_type(N)));
+#elif defined(__GNUC__)
+ // For some reason GCC accepts this nonsense, but not the more straightforward version,
+ // template <int N, typename T> using Vec = T __attribute__((vector_size(N*sizeof(T))));
+ template <int N, typename T>
+ struct VecHelper { typedef T __attribute__((vector_size(N*sizeof(T)))) V; };
+
+ template <int N, typename T> using Vec = typename VecHelper<N,T>::V;
+#endif
+
+// First, instantiate our default exec_ops() implementation using the default compiliation target.
+
+namespace baseline {
+#if defined(SKCMS_PORTABLE) || !(defined(__clang__) || defined(__GNUC__)) \
+ || (defined(__EMSCRIPTEN_major__) && !defined(__wasm_simd128__))
+ #define N 1
+ template <typename T> using V = T;
+ using Color = float;
+#elif defined(__AVX512F__) && defined(__AVX512DQ__)
+ #define N 16
+ template <typename T> using V = Vec<N,T>;
+ using Color = float;
+#elif defined(__AVX__)
+ #define N 8
+ template <typename T> using V = Vec<N,T>;
+ using Color = float;
+#elif defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(SKCMS_OPT_INTO_NEON_FP16)
+ #define N 8
+ template <typename T> using V = Vec<N,T>;
+ using Color = _Float16;
+#else
+ #define N 4
+ template <typename T> using V = Vec<N,T>;
+ using Color = float;
+#endif
+
+ #include "src/Transform_inl.h"
+ #undef N
+}
+
+// Now, instantiate any other versions of run_program() we may want for runtime detection.
+#if !defined(SKCMS_PORTABLE) && \
+ !defined(SKCMS_NO_RUNTIME_CPU_DETECTION) && \
+ (( defined(__clang__) && __clang_major__ >= 5) || \
+ (!defined(__clang__) && defined(__GNUC__))) \
+ && defined(__x86_64__)
+
+ #if !defined(__AVX2__)
+ #if defined(__clang__)
+ #pragma clang attribute push(__attribute__((target("avx2,f16c"))), apply_to=function)
+ #elif defined(__GNUC__)
+ #pragma GCC push_options
+ #pragma GCC target("avx2,f16c")
+ #endif
+
+ namespace hsw {
+ #define USING_AVX
+ #define USING_AVX_F16C
+ #define USING_AVX2
+ #define N 8
+ template <typename T> using V = Vec<N,T>;
+ using Color = float;
+
+ #include "src/Transform_inl.h"
+
+ // src/Transform_inl.h will undefine USING_* for us.
+ #undef N
+ }
+
+ #if defined(__clang__)
+ #pragma clang attribute pop
+ #elif defined(__GNUC__)
+ #pragma GCC pop_options
+ #endif
+
+ #define TEST_FOR_HSW
+ #endif
+
+ #if !defined(__AVX512F__) || !defined(__AVX512DQ__)
+ #if defined(__clang__)
+ #pragma clang attribute push(__attribute__((target("avx512f,avx512dq,avx512cd,avx512bw,avx512vl"))), apply_to=function)
+ #elif defined(__GNUC__)
+ #pragma GCC push_options
+ #pragma GCC target("avx512f,avx512dq,avx512cd,avx512bw,avx512vl")
+ #endif
+
+ namespace skx {
+ #define USING_AVX512F
+ #define N 16
+ template <typename T> using V = Vec<N,T>;
+ using Color = float;
+
+ #include "src/Transform_inl.h"
+
+ // src/Transform_inl.h will undefine USING_* for us.
+ #undef N
+ }
+
+ #if defined(__clang__)
+ #pragma clang attribute pop
+ #elif defined(__GNUC__)
+ #pragma GCC pop_options
+ #endif
+
+ #define TEST_FOR_SKX
+ #endif
+
+ #if defined(TEST_FOR_HSW) || defined(TEST_FOR_SKX)
+ enum class CpuType { None, HSW, SKX };
+ static CpuType cpu_type() {
+ static const CpuType type = []{
+ if (!runtime_cpu_detection) {
+ return CpuType::None;
+ }
+ // See http://www.sandpile.org/x86/cpuid.htm
+
+ // First, a basic cpuid(1) lets us check prerequisites for HSW, SKX.
+ uint32_t eax, ebx, ecx, edx;
+ __asm__ __volatile__("cpuid" : "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
+ : "0"(1), "2"(0));
+ if ((edx & (1u<<25)) && // SSE
+ (edx & (1u<<26)) && // SSE2
+ (ecx & (1u<< 0)) && // SSE3
+ (ecx & (1u<< 9)) && // SSSE3
+ (ecx & (1u<<12)) && // FMA (N.B. not used, avoided even)
+ (ecx & (1u<<19)) && // SSE4.1
+ (ecx & (1u<<20)) && // SSE4.2
+ (ecx & (1u<<26)) && // XSAVE
+ (ecx & (1u<<27)) && // OSXSAVE
+ (ecx & (1u<<28)) && // AVX
+ (ecx & (1u<<29))) { // F16C
+
+ // Call cpuid(7) to check for AVX2 and AVX-512 bits.
+ __asm__ __volatile__("cpuid" : "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
+ : "0"(7), "2"(0));
+ // eax from xgetbv(0) will tell us whether XMM, YMM, and ZMM state is saved.
+ uint32_t xcr0, dont_need_edx;
+ __asm__ __volatile__("xgetbv" : "=a"(xcr0), "=d"(dont_need_edx) : "c"(0));
+
+ if ((xcr0 & (1u<<1)) && // XMM register state saved?
+ (xcr0 & (1u<<2)) && // YMM register state saved?
+ (ebx & (1u<<5))) { // AVX2
+ // At this point we're at least HSW. Continue checking for SKX.
+ if ((xcr0 & (1u<< 5)) && // Opmasks state saved?
+ (xcr0 & (1u<< 6)) && // First 16 ZMM registers saved?
+ (xcr0 & (1u<< 7)) && // High 16 ZMM registers saved?
+ (ebx & (1u<<16)) && // AVX512F
+ (ebx & (1u<<17)) && // AVX512DQ
+ (ebx & (1u<<28)) && // AVX512CD
+ (ebx & (1u<<30)) && // AVX512BW
+ (ebx & (1u<<31))) { // AVX512VL
+ return CpuType::SKX;
+ }
+ return CpuType::HSW;
+ }
+ }
+ return CpuType::None;
+ }();
+ return type;
+ }
+ #endif
+
+#endif
+
+typedef struct {
+ Op op;
+ const void* arg;
+} OpAndArg;
+
+static OpAndArg select_curve_op(const skcms_Curve* curve, int channel) {
+ static const struct { Op sRGBish, PQish, HLGish, HLGinvish, table; } ops[] = {
+ { Op_tf_r, Op_pq_r, Op_hlg_r, Op_hlginv_r, Op_table_r },
+ { Op_tf_g, Op_pq_g, Op_hlg_g, Op_hlginv_g, Op_table_g },
+ { Op_tf_b, Op_pq_b, Op_hlg_b, Op_hlginv_b, Op_table_b },
+ { Op_tf_a, Op_pq_a, Op_hlg_a, Op_hlginv_a, Op_table_a },
+ };
+ const auto& op = ops[channel];
+
+ if (curve->table_entries == 0) {
+ const OpAndArg noop = { Op_load_a8/*doesn't matter*/, nullptr };
+
+ const skcms_TransferFunction& tf = curve->parametric;
+
+ if (tf.g == 1 && tf.a == 1 &&
+ tf.b == 0 && tf.c == 0 && tf.d == 0 && tf.e == 0 && tf.f == 0) {
+ return noop;
+ }
+
+ switch (classify(tf)) {
+ case skcms_TFType_Invalid: return noop;
+ case skcms_TFType_sRGBish: return OpAndArg{op.sRGBish, &tf};
+ case skcms_TFType_PQish: return OpAndArg{op.PQish, &tf};
+ case skcms_TFType_HLGish: return OpAndArg{op.HLGish, &tf};
+ case skcms_TFType_HLGinvish: return OpAndArg{op.HLGinvish, &tf};
+ }
+ }
+ return OpAndArg{op.table, curve};
+}
+
+static size_t bytes_per_pixel(skcms_PixelFormat fmt) {
+ switch (fmt >> 1) { // ignore rgb/bgr
+ case skcms_PixelFormat_A_8 >> 1: return 1;
+ case skcms_PixelFormat_G_8 >> 1: return 1;
+ case skcms_PixelFormat_RGBA_8888_Palette8 >> 1: return 1;
+ case skcms_PixelFormat_ABGR_4444 >> 1: return 2;
+ case skcms_PixelFormat_RGB_565 >> 1: return 2;
+ case skcms_PixelFormat_RGB_888 >> 1: return 3;
+ case skcms_PixelFormat_RGBA_8888 >> 1: return 4;
+ case skcms_PixelFormat_RGBA_8888_sRGB >> 1: return 4;
+ case skcms_PixelFormat_RGBA_1010102 >> 1: return 4;
+ case skcms_PixelFormat_RGB_101010x_XR >> 1: return 4;
+ case skcms_PixelFormat_RGB_161616LE >> 1: return 6;
+ case skcms_PixelFormat_RGBA_16161616LE >> 1: return 8;
+ case skcms_PixelFormat_RGB_161616BE >> 1: return 6;
+ case skcms_PixelFormat_RGBA_16161616BE >> 1: return 8;
+ case skcms_PixelFormat_RGB_hhh_Norm >> 1: return 6;
+ case skcms_PixelFormat_RGBA_hhhh_Norm >> 1: return 8;
+ case skcms_PixelFormat_RGB_hhh >> 1: return 6;
+ case skcms_PixelFormat_RGBA_hhhh >> 1: return 8;
+ case skcms_PixelFormat_RGB_fff >> 1: return 12;
+ case skcms_PixelFormat_RGBA_ffff >> 1: return 16;
+ }
+ assert(false);
+ return 0;
+}
+
+static bool prep_for_destination(const skcms_ICCProfile* profile,
+ skcms_Matrix3x3* fromXYZD50,
+ skcms_TransferFunction* invR,
+ skcms_TransferFunction* invG,
+ skcms_TransferFunction* invB) {
+ // skcms_Transform() supports B2A destinations...
+ if (profile->has_B2A) { return true; }
+ // ...and destinations with parametric transfer functions and an XYZD50 gamut matrix.
+ return profile->has_trc
+ && profile->has_toXYZD50
+ && profile->trc[0].table_entries == 0
+ && profile->trc[1].table_entries == 0
+ && profile->trc[2].table_entries == 0
+ && skcms_TransferFunction_invert(&profile->trc[0].parametric, invR)
+ && skcms_TransferFunction_invert(&profile->trc[1].parametric, invG)
+ && skcms_TransferFunction_invert(&profile->trc[2].parametric, invB)
+ && skcms_Matrix3x3_invert(&profile->toXYZD50, fromXYZD50);
+}
+
+bool skcms_Transform(const void* src,
+ skcms_PixelFormat srcFmt,
+ skcms_AlphaFormat srcAlpha,
+ const skcms_ICCProfile* srcProfile,
+ void* dst,
+ skcms_PixelFormat dstFmt,
+ skcms_AlphaFormat dstAlpha,
+ const skcms_ICCProfile* dstProfile,
+ size_t npixels) {
+ return skcms_TransformWithPalette(src, srcFmt, srcAlpha, srcProfile,
+ dst, dstFmt, dstAlpha, dstProfile,
+ npixels, nullptr);
+}
+
+bool skcms_TransformWithPalette(const void* src,
+ skcms_PixelFormat srcFmt,
+ skcms_AlphaFormat srcAlpha,
+ const skcms_ICCProfile* srcProfile,
+ void* dst,
+ skcms_PixelFormat dstFmt,
+ skcms_AlphaFormat dstAlpha,
+ const skcms_ICCProfile* dstProfile,
+ size_t nz,
+ const void* palette) {
+ const size_t dst_bpp = bytes_per_pixel(dstFmt),
+ src_bpp = bytes_per_pixel(srcFmt);
+ // Let's just refuse if the request is absurdly big.
+ if (nz * dst_bpp > INT_MAX || nz * src_bpp > INT_MAX) {
+ return false;
+ }
+ int n = (int)nz;
+
+ // Null profiles default to sRGB. Passing null for both is handy when doing format conversion.
+ if (!srcProfile) {
+ srcProfile = skcms_sRGB_profile();
+ }
+ if (!dstProfile) {
+ dstProfile = skcms_sRGB_profile();
+ }
+
+ // We can't transform in place unless the PixelFormats are the same size.
+ if (dst == src && dst_bpp != src_bpp) {
+ return false;
+ }
+ // TODO: more careful alias rejection (like, dst == src + 1)?
+
+ if (needs_palette(srcFmt) && !palette) {
+ return false;
+ }
+
+ Op program [32];
+ const void* arguments[32];
+
+ Op* ops = program;
+ const void** args = arguments;
+
+ // These are always parametric curves of some sort.
+ skcms_Curve dst_curves[3];
+ dst_curves[0].table_entries =
+ dst_curves[1].table_entries =
+ dst_curves[2].table_entries = 0;
+
+ skcms_Matrix3x3 from_xyz;
+
+ switch (srcFmt >> 1) {
+ default: return false;
+ case skcms_PixelFormat_A_8 >> 1: *ops++ = Op_load_a8; break;
+ case skcms_PixelFormat_G_8 >> 1: *ops++ = Op_load_g8; break;
+ case skcms_PixelFormat_ABGR_4444 >> 1: *ops++ = Op_load_4444; break;
+ case skcms_PixelFormat_RGB_565 >> 1: *ops++ = Op_load_565; break;
+ case skcms_PixelFormat_RGB_888 >> 1: *ops++ = Op_load_888; break;
+ case skcms_PixelFormat_RGBA_8888 >> 1: *ops++ = Op_load_8888; break;
+ case skcms_PixelFormat_RGBA_1010102 >> 1: *ops++ = Op_load_1010102; break;
+ case skcms_PixelFormat_RGB_101010x_XR >> 1: *ops++ = Op_load_101010x_XR; break;
+ case skcms_PixelFormat_RGB_161616LE >> 1: *ops++ = Op_load_161616LE; break;
+ case skcms_PixelFormat_RGBA_16161616LE >> 1: *ops++ = Op_load_16161616LE; break;
+ case skcms_PixelFormat_RGB_161616BE >> 1: *ops++ = Op_load_161616BE; break;
+ case skcms_PixelFormat_RGBA_16161616BE >> 1: *ops++ = Op_load_16161616BE; break;
+ case skcms_PixelFormat_RGB_hhh_Norm >> 1: *ops++ = Op_load_hhh; break;
+ case skcms_PixelFormat_RGBA_hhhh_Norm >> 1: *ops++ = Op_load_hhhh; break;
+ case skcms_PixelFormat_RGB_hhh >> 1: *ops++ = Op_load_hhh; break;
+ case skcms_PixelFormat_RGBA_hhhh >> 1: *ops++ = Op_load_hhhh; break;
+ case skcms_PixelFormat_RGB_fff >> 1: *ops++ = Op_load_fff; break;
+ case skcms_PixelFormat_RGBA_ffff >> 1: *ops++ = Op_load_ffff; break;
+
+ case skcms_PixelFormat_RGBA_8888_Palette8 >> 1: *ops++ = Op_load_8888_palette8;
+ *args++ = palette;
+ break;
+ case skcms_PixelFormat_RGBA_8888_sRGB >> 1:
+ *ops++ = Op_load_8888;
+ *ops++ = Op_tf_r; *args++ = skcms_sRGB_TransferFunction();
+ *ops++ = Op_tf_g; *args++ = skcms_sRGB_TransferFunction();
+ *ops++ = Op_tf_b; *args++ = skcms_sRGB_TransferFunction();
+ break;
+ }
+ if (srcFmt == skcms_PixelFormat_RGB_hhh_Norm ||
+ srcFmt == skcms_PixelFormat_RGBA_hhhh_Norm) {
+ *ops++ = Op_clamp;
+ }
+ if (srcFmt & 1) {
+ *ops++ = Op_swap_rb;
+ }
+ skcms_ICCProfile gray_dst_profile;
+ if ((dstFmt >> 1) == (skcms_PixelFormat_G_8 >> 1)) {
+ // When transforming to gray, stop at XYZ (by setting toXYZ to identity), then transform
+ // luminance (Y) by the destination transfer function.
+ gray_dst_profile = *dstProfile;
+ skcms_SetXYZD50(&gray_dst_profile, &skcms_XYZD50_profile()->toXYZD50);
+ dstProfile = &gray_dst_profile;
+ }
+
+ if (srcProfile->data_color_space == skcms_Signature_CMYK) {
+ // Photoshop creates CMYK images as inverse CMYK.
+ // These happen to be the only ones we've _ever_ seen.
+ *ops++ = Op_invert;
+ // With CMYK, ignore the alpha type, to avoid changing K or conflating CMY with K.
+ srcAlpha = skcms_AlphaFormat_Unpremul;
+ }
+
+ if (srcAlpha == skcms_AlphaFormat_Opaque) {
+ *ops++ = Op_force_opaque;
+ } else if (srcAlpha == skcms_AlphaFormat_PremulAsEncoded) {
+ *ops++ = Op_unpremul;
+ }
+
+ if (dstProfile != srcProfile) {
+
+ if (!prep_for_destination(dstProfile,
+ &from_xyz,
+ &dst_curves[0].parametric,
+ &dst_curves[1].parametric,
+ &dst_curves[2].parametric)) {
+ return false;
+ }
+
+ if (srcProfile->has_A2B) {
+ if (srcProfile->A2B.input_channels) {
+ for (int i = 0; i < (int)srcProfile->A2B.input_channels; i++) {
+ OpAndArg oa = select_curve_op(&srcProfile->A2B.input_curves[i], i);
+ if (oa.arg) {
+ *ops++ = oa.op;
+ *args++ = oa.arg;
+ }
+ }
+ *ops++ = Op_clamp;
+ *ops++ = Op_clut_A2B;
+ *args++ = &srcProfile->A2B;
+ }
+
+ if (srcProfile->A2B.matrix_channels == 3) {
+ for (int i = 0; i < 3; i++) {
+ OpAndArg oa = select_curve_op(&srcProfile->A2B.matrix_curves[i], i);
+ if (oa.arg) {
+ *ops++ = oa.op;
+ *args++ = oa.arg;
+ }
+ }
+
+ static const skcms_Matrix3x4 I = {{
+ {1,0,0,0},
+ {0,1,0,0},
+ {0,0,1,0},
+ }};
+ if (0 != memcmp(&I, &srcProfile->A2B.matrix, sizeof(I))) {
+ *ops++ = Op_matrix_3x4;
+ *args++ = &srcProfile->A2B.matrix;
+ }
+ }
+
+ if (srcProfile->A2B.output_channels == 3) {
+ for (int i = 0; i < 3; i++) {
+ OpAndArg oa = select_curve_op(&srcProfile->A2B.output_curves[i], i);
+ if (oa.arg) {
+ *ops++ = oa.op;
+ *args++ = oa.arg;
+ }
+ }
+ }
+
+ if (srcProfile->pcs == skcms_Signature_Lab) {
+ *ops++ = Op_lab_to_xyz;
+ }
+
+ } else if (srcProfile->has_trc && srcProfile->has_toXYZD50) {
+ for (int i = 0; i < 3; i++) {
+ OpAndArg oa = select_curve_op(&srcProfile->trc[i], i);
+ if (oa.arg) {
+ *ops++ = oa.op;
+ *args++ = oa.arg;
+ }
+ }
+ } else {
+ return false;
+ }
+
+ // A2B sources are in XYZD50 by now, but TRC sources are still in their original gamut.
+ assert (srcProfile->has_A2B || srcProfile->has_toXYZD50);
+
+ if (dstProfile->has_B2A) {
+ // B2A needs its input in XYZD50, so transform TRC sources now.
+ if (!srcProfile->has_A2B) {
+ *ops++ = Op_matrix_3x3;
+ *args++ = &srcProfile->toXYZD50;
+ }
+
+ if (dstProfile->pcs == skcms_Signature_Lab) {
+ *ops++ = Op_xyz_to_lab;
+ }
+
+ if (dstProfile->B2A.input_channels == 3) {
+ for (int i = 0; i < 3; i++) {
+ OpAndArg oa = select_curve_op(&dstProfile->B2A.input_curves[i], i);
+ if (oa.arg) {
+ *ops++ = oa.op;
+ *args++ = oa.arg;
+ }
+ }
+ }
+
+ if (dstProfile->B2A.matrix_channels == 3) {
+ static const skcms_Matrix3x4 I = {{
+ {1,0,0,0},
+ {0,1,0,0},
+ {0,0,1,0},
+ }};
+ if (0 != memcmp(&I, &dstProfile->B2A.matrix, sizeof(I))) {
+ *ops++ = Op_matrix_3x4;
+ *args++ = &dstProfile->B2A.matrix;
+ }
+
+ for (int i = 0; i < 3; i++) {
+ OpAndArg oa = select_curve_op(&dstProfile->B2A.matrix_curves[i], i);
+ if (oa.arg) {
+ *ops++ = oa.op;
+ *args++ = oa.arg;
+ }
+ }
+ }
+
+ if (dstProfile->B2A.output_channels) {
+ *ops++ = Op_clamp;
+ *ops++ = Op_clut_B2A;
+ *args++ = &dstProfile->B2A;
+ for (int i = 0; i < (int)dstProfile->B2A.output_channels; i++) {
+ OpAndArg oa = select_curve_op(&dstProfile->B2A.output_curves[i], i);
+ if (oa.arg) {
+ *ops++ = oa.op;
+ *args++ = oa.arg;
+ }
+ }
+ }
+ } else {
+ // This is a TRC destination.
+ // We'll concat any src->xyz matrix with our xyz->dst matrix into one src->dst matrix.
+ // (A2B sources are already in XYZD50, making that src->xyz matrix I.)
+ static const skcms_Matrix3x3 I = {{
+ { 1.0f, 0.0f, 0.0f },
+ { 0.0f, 1.0f, 0.0f },
+ { 0.0f, 0.0f, 1.0f },
+ }};
+ const skcms_Matrix3x3* to_xyz = srcProfile->has_A2B ? &I : &srcProfile->toXYZD50;
+
+ // There's a chance the source and destination gamuts are identical,
+ // in which case we can skip the gamut transform.
+ if (0 != memcmp(&dstProfile->toXYZD50, to_xyz, sizeof(skcms_Matrix3x3))) {
+ // Concat the entire gamut transform into from_xyz,
+ // now slightly misnamed but it's a handy spot to stash the result.
+ from_xyz = skcms_Matrix3x3_concat(&from_xyz, to_xyz);
+ *ops++ = Op_matrix_3x3;
+ *args++ = &from_xyz;
+ }
+
+ // Encode back to dst RGB using its parametric transfer functions.
+ for (int i = 0; i < 3; i++) {
+ OpAndArg oa = select_curve_op(dst_curves+i, i);
+ if (oa.arg) {
+ assert (oa.op != Op_table_r &&
+ oa.op != Op_table_g &&
+ oa.op != Op_table_b &&
+ oa.op != Op_table_a);
+ *ops++ = oa.op;
+ *args++ = oa.arg;
+ }
+ }
+ }
+ }
+
+ // Clamp here before premul to make sure we're clamping to normalized values _and_ gamut,
+ // not just to values that fit in [0,1].
+ //
+ // E.g. r = 1.1, a = 0.5 would fit fine in fixed point after premul (ra=0.55,a=0.5),
+ // but would be carrying r > 1, which is really unexpected for downstream consumers.
+ if (dstFmt < skcms_PixelFormat_RGB_hhh) {
+ *ops++ = Op_clamp;
+ }
+
+ if (dstProfile->data_color_space == skcms_Signature_CMYK) {
+ // Photoshop creates CMYK images as inverse CMYK.
+ // These happen to be the only ones we've _ever_ seen.
+ *ops++ = Op_invert;
+
+ // CMYK has no alpha channel, so make sure dstAlpha is a no-op.
+ dstAlpha = skcms_AlphaFormat_Unpremul;
+ }
+
+ if (dstAlpha == skcms_AlphaFormat_Opaque) {
+ *ops++ = Op_force_opaque;
+ } else if (dstAlpha == skcms_AlphaFormat_PremulAsEncoded) {
+ *ops++ = Op_premul;
+ }
+ if (dstFmt & 1) {
+ *ops++ = Op_swap_rb;
+ }
+ switch (dstFmt >> 1) {
+ default: return false;
+ case skcms_PixelFormat_A_8 >> 1: *ops++ = Op_store_a8; break;
+ case skcms_PixelFormat_G_8 >> 1: *ops++ = Op_store_g8; break;
+ case skcms_PixelFormat_ABGR_4444 >> 1: *ops++ = Op_store_4444; break;
+ case skcms_PixelFormat_RGB_565 >> 1: *ops++ = Op_store_565; break;
+ case skcms_PixelFormat_RGB_888 >> 1: *ops++ = Op_store_888; break;
+ case skcms_PixelFormat_RGBA_8888 >> 1: *ops++ = Op_store_8888; break;
+ case skcms_PixelFormat_RGBA_1010102 >> 1: *ops++ = Op_store_1010102; break;
+ case skcms_PixelFormat_RGB_161616LE >> 1: *ops++ = Op_store_161616LE; break;
+ case skcms_PixelFormat_RGBA_16161616LE >> 1: *ops++ = Op_store_16161616LE; break;
+ case skcms_PixelFormat_RGB_161616BE >> 1: *ops++ = Op_store_161616BE; break;
+ case skcms_PixelFormat_RGBA_16161616BE >> 1: *ops++ = Op_store_16161616BE; break;
+ case skcms_PixelFormat_RGB_hhh_Norm >> 1: *ops++ = Op_store_hhh; break;
+ case skcms_PixelFormat_RGBA_hhhh_Norm >> 1: *ops++ = Op_store_hhhh; break;
+ case skcms_PixelFormat_RGB_101010x_XR >> 1: *ops++ = Op_store_101010x_XR; break;
+ case skcms_PixelFormat_RGB_hhh >> 1: *ops++ = Op_store_hhh; break;
+ case skcms_PixelFormat_RGBA_hhhh >> 1: *ops++ = Op_store_hhhh; break;
+ case skcms_PixelFormat_RGB_fff >> 1: *ops++ = Op_store_fff; break;
+ case skcms_PixelFormat_RGBA_ffff >> 1: *ops++ = Op_store_ffff; break;
+
+ case skcms_PixelFormat_RGBA_8888_sRGB >> 1:
+ *ops++ = Op_tf_r; *args++ = skcms_sRGB_Inverse_TransferFunction();
+ *ops++ = Op_tf_g; *args++ = skcms_sRGB_Inverse_TransferFunction();
+ *ops++ = Op_tf_b; *args++ = skcms_sRGB_Inverse_TransferFunction();
+ *ops++ = Op_store_8888;
+ break;
+ }
+
+ auto run = baseline::run_program;
+#if defined(TEST_FOR_HSW)
+ switch (cpu_type()) {
+ case CpuType::None: break;
+ case CpuType::HSW: run = hsw::run_program; break;
+ case CpuType::SKX: run = hsw::run_program; break;
+ }
+#endif
+#if defined(TEST_FOR_SKX)
+ switch (cpu_type()) {
+ case CpuType::None: break;
+ case CpuType::HSW: break;
+ case CpuType::SKX: run = skx::run_program; break;
+ }
+#endif
+ run(program, arguments, (const char*)src, (char*)dst, n, src_bpp,dst_bpp);
+ return true;
+}
+
+static void assert_usable_as_destination(const skcms_ICCProfile* profile) {
+#if defined(NDEBUG)
+ (void)profile;
+#else
+ skcms_Matrix3x3 fromXYZD50;
+ skcms_TransferFunction invR, invG, invB;
+ assert(prep_for_destination(profile, &fromXYZD50, &invR, &invG, &invB));
+#endif
+}
+
+bool skcms_MakeUsableAsDestination(skcms_ICCProfile* profile) {
+ if (!profile->has_B2A) {
+ skcms_Matrix3x3 fromXYZD50;
+ if (!profile->has_trc || !profile->has_toXYZD50
+ || !skcms_Matrix3x3_invert(&profile->toXYZD50, &fromXYZD50)) {
+ return false;
+ }
+
+ skcms_TransferFunction tf[3];
+ for (int i = 0; i < 3; i++) {
+ skcms_TransferFunction inv;
+ if (profile->trc[i].table_entries == 0
+ && skcms_TransferFunction_invert(&profile->trc[i].parametric, &inv)) {
+ tf[i] = profile->trc[i].parametric;
+ continue;
+ }
+
+ float max_error;
+ // Parametric curves from skcms_ApproximateCurve() are guaranteed to be invertible.
+ if (!skcms_ApproximateCurve(&profile->trc[i], &tf[i], &max_error)) {
+ return false;
+ }
+ }
+
+ for (int i = 0; i < 3; ++i) {
+ profile->trc[i].table_entries = 0;
+ profile->trc[i].parametric = tf[i];
+ }
+ }
+ assert_usable_as_destination(profile);
+ return true;
+}
+
+bool skcms_MakeUsableAsDestinationWithSingleCurve(skcms_ICCProfile* profile) {
+ // Call skcms_MakeUsableAsDestination() with B2A disabled;
+ // on success that'll return a TRC/XYZ profile with three skcms_TransferFunctions.
+ skcms_ICCProfile result = *profile;
+ result.has_B2A = false;
+ if (!skcms_MakeUsableAsDestination(&result)) {
+ return false;
+ }
+
+ // Of the three, pick the transfer function that best fits the other two.
+ int best_tf = 0;
+ float min_max_error = INFINITY_;
+ for (int i = 0; i < 3; i++) {
+ skcms_TransferFunction inv;
+ if (!skcms_TransferFunction_invert(&result.trc[i].parametric, &inv)) {
+ return false;
+ }
+
+ float err = 0;
+ for (int j = 0; j < 3; ++j) {
+ err = fmaxf_(err, skcms_MaxRoundtripError(&profile->trc[j], &inv));
+ }
+ if (min_max_error > err) {
+ min_max_error = err;
+ best_tf = i;
+ }
+ }
+
+ for (int i = 0; i < 3; i++) {
+ result.trc[i].parametric = result.trc[best_tf].parametric;
+ }
+
+ *profile = result;
+ assert_usable_as_destination(profile);
+ return true;
+}
diff --git a/gfx/skia/skia/modules/skcms/skcms.gni b/gfx/skia/skia/modules/skcms/skcms.gni
new file mode 100644
index 0000000000..aa7daa2cf4
--- /dev/null
+++ b/gfx/skia/skia/modules/skcms/skcms.gni
@@ -0,0 +1,20 @@
+# DO NOT EDIT: This is a generated file.
+# See //bazel/exporter_tool/README.md for more information.
+#
+# The source of truth is //modules/skcms/BUILD.bazel
+
+# To update this file, run make -C bazel generate_gni
+
+_modules = get_path_info("../../modules", "abspath")
+
+# Generated by Bazel rule //modules/skcms:public_hdrs
+skcms_public_headers = [ "$_modules/skcms/skcms.h" ]
+
+# List generated by Bazel rules:
+# //modules/skcms:srcs
+# //modules/skcms:textual_hdrs
+skcms_sources = [
+ "$_modules/skcms/skcms.cc",
+ "$_modules/skcms/skcms_internal.h",
+ "$_modules/skcms/src/Transform_inl.h",
+]
diff --git a/gfx/skia/skia/modules/skcms/skcms.h b/gfx/skia/skia/modules/skcms/skcms.h
new file mode 100644
index 0000000000..322549b38f
--- /dev/null
+++ b/gfx/skia/skia/modules/skcms/skcms.h
@@ -0,0 +1,418 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#pragma once
+
+// skcms.h contains the entire public API for skcms.
+
+#ifndef SKCMS_API
+ #define SKCMS_API
+#endif
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// A row-major 3x3 matrix (ie vals[row][col])
+typedef struct skcms_Matrix3x3 {
+ float vals[3][3];
+} skcms_Matrix3x3;
+
+// It is _not_ safe to alias the pointers to invert in-place.
+SKCMS_API bool skcms_Matrix3x3_invert(const skcms_Matrix3x3*, skcms_Matrix3x3*);
+SKCMS_API skcms_Matrix3x3 skcms_Matrix3x3_concat(const skcms_Matrix3x3*, const skcms_Matrix3x3*);
+
+// A row-major 3x4 matrix (ie vals[row][col])
+typedef struct skcms_Matrix3x4 {
+ float vals[3][4];
+} skcms_Matrix3x4;
+
+// A transfer function mapping encoded values to linear values,
+// represented by this 7-parameter piecewise function:
+//
+// linear = sign(encoded) * (c*|encoded| + f) , 0 <= |encoded| < d
+// = sign(encoded) * ((a*|encoded| + b)^g + e), d <= |encoded|
+//
+// (A simple gamma transfer function sets g to gamma and a to 1.)
+typedef struct skcms_TransferFunction {
+ float g, a,b,c,d,e,f;
+} skcms_TransferFunction;
+
+SKCMS_API float skcms_TransferFunction_eval (const skcms_TransferFunction*, float);
+SKCMS_API bool skcms_TransferFunction_invert(const skcms_TransferFunction*,
+ skcms_TransferFunction*);
+
+typedef enum skcms_TFType {
+ skcms_TFType_Invalid,
+ skcms_TFType_sRGBish,
+ skcms_TFType_PQish,
+ skcms_TFType_HLGish,
+ skcms_TFType_HLGinvish,
+} skcms_TFType;
+
+// Identify which kind of transfer function is encoded in an skcms_TransferFunction
+SKCMS_API skcms_TFType skcms_TransferFunction_getType(const skcms_TransferFunction*);
+
+// We can jam a couple alternate transfer function forms into skcms_TransferFunction,
+// including those matching the general forms of the SMPTE ST 2084 PQ function or HLG.
+//
+// PQish:
+// max(A + B|encoded|^C, 0)
+// linear = sign(encoded) * (------------------------) ^ F
+// D + E|encoded|^C
+SKCMS_API bool skcms_TransferFunction_makePQish(skcms_TransferFunction*,
+ float A, float B, float C,
+ float D, float E, float F);
+// HLGish:
+// { K * sign(encoded) * ( (R|encoded|)^G ) when 0 <= |encoded| <= 1/R
+// linear = { K * sign(encoded) * ( e^(a(|encoded|-c)) + b ) when 1/R < |encoded|
+SKCMS_API bool skcms_TransferFunction_makeScaledHLGish(skcms_TransferFunction*,
+ float K, float R, float G,
+ float a, float b, float c);
+
+// Compatibility shim with K=1 for old callers.
+static inline bool skcms_TransferFunction_makeHLGish(skcms_TransferFunction* fn,
+ float R, float G,
+ float a, float b, float c) {
+ return skcms_TransferFunction_makeScaledHLGish(fn, 1.0f, R,G, a,b,c);
+}
+
+// PQ mapping encoded [0,1] to linear [0,1].
+static inline bool skcms_TransferFunction_makePQ(skcms_TransferFunction* tf) {
+ return skcms_TransferFunction_makePQish(tf, -107/128.0f, 1.0f, 32/2523.0f
+ , 2413/128.0f, -2392/128.0f, 8192/1305.0f);
+}
+// HLG mapping encoded [0,1] to linear [0,12].
+static inline bool skcms_TransferFunction_makeHLG(skcms_TransferFunction* tf) {
+ return skcms_TransferFunction_makeHLGish(tf, 2.0f, 2.0f
+ , 1/0.17883277f, 0.28466892f, 0.55991073f);
+}
+
+// Is this an ordinary sRGB-ish transfer function, or one of the HDR forms we support?
+SKCMS_API bool skcms_TransferFunction_isSRGBish(const skcms_TransferFunction*);
+SKCMS_API bool skcms_TransferFunction_isPQish (const skcms_TransferFunction*);
+SKCMS_API bool skcms_TransferFunction_isHLGish (const skcms_TransferFunction*);
+
+// Unified representation of 'curv' or 'para' tag data, or a 1D table from 'mft1' or 'mft2'
+typedef union skcms_Curve {
+ struct {
+ uint32_t alias_of_table_entries;
+ skcms_TransferFunction parametric;
+ };
+ struct {
+ uint32_t table_entries;
+ const uint8_t* table_8;
+ const uint8_t* table_16;
+ };
+} skcms_Curve;
+
+// Complex transforms between device space (A) and profile connection space (B):
+// A2B: device -> [ "A" curves -> CLUT ] -> [ "M" curves -> matrix ] -> "B" curves -> PCS
+// B2A: device <- [ "A" curves <- CLUT ] <- [ "M" curves <- matrix ] <- "B" curves <- PCS
+
+typedef struct skcms_A2B {
+ // Optional: N 1D "A" curves, followed by an N-dimensional CLUT.
+ // If input_channels == 0, these curves and CLUT are skipped,
+ // Otherwise, input_channels must be in [1, 4].
+ uint32_t input_channels;
+ skcms_Curve input_curves[4];
+ uint8_t grid_points[4];
+ const uint8_t* grid_8;
+ const uint8_t* grid_16;
+
+ // Optional: 3 1D "M" curves, followed by a color matrix.
+ // If matrix_channels == 0, these curves and matrix are skipped,
+ // Otherwise, matrix_channels must be 3.
+ uint32_t matrix_channels;
+ skcms_Curve matrix_curves[3];
+ skcms_Matrix3x4 matrix;
+
+ // Required: 3 1D "B" curves. Always present, and output_channels must be 3.
+ uint32_t output_channels;
+ skcms_Curve output_curves[3];
+} skcms_A2B;
+
+typedef struct skcms_B2A {
+ // Required: 3 1D "B" curves. Always present, and input_channels must be 3.
+ uint32_t input_channels;
+ skcms_Curve input_curves[3];
+
+ // Optional: a color matrix, followed by 3 1D "M" curves.
+ // If matrix_channels == 0, this matrix and these curves are skipped,
+ // Otherwise, matrix_channels must be 3.
+ uint32_t matrix_channels;
+ skcms_Matrix3x4 matrix;
+ skcms_Curve matrix_curves[3];
+
+ // Optional: an N-dimensional CLUT, followed by N 1D "A" curves.
+ // If output_channels == 0, this CLUT and these curves are skipped,
+ // Otherwise, output_channels must be in [1, 4].
+ uint32_t output_channels;
+ uint8_t grid_points[4];
+ const uint8_t* grid_8;
+ const uint8_t* grid_16;
+ skcms_Curve output_curves[4];
+} skcms_B2A;
+
+typedef struct skcms_CICP {
+ uint8_t color_primaries;
+ uint8_t transfer_characteristics;
+ uint8_t matrix_coefficients;
+ uint8_t video_full_range_flag;
+} skcms_CICP;
+
+typedef struct skcms_ICCProfile {
+ const uint8_t* buffer;
+
+ uint32_t size;
+ uint32_t data_color_space;
+ uint32_t pcs;
+ uint32_t tag_count;
+
+ // skcms_Parse() will set commonly-used fields for you when possible:
+
+ // If we can parse red, green and blue transfer curves from the profile,
+ // trc will be set to those three curves, and has_trc will be true.
+ bool has_trc;
+ skcms_Curve trc[3];
+
+ // If this profile's gamut can be represented by a 3x3 transform to XYZD50,
+ // skcms_Parse() sets toXYZD50 to that transform and has_toXYZD50 to true.
+ bool has_toXYZD50;
+ skcms_Matrix3x3 toXYZD50;
+
+ // If the profile has a valid A2B0 or A2B1 tag, skcms_Parse() sets A2B to
+ // that data, and has_A2B to true. skcms_ParseWithA2BPriority() does the
+ // same following any user-provided prioritization of A2B0, A2B1, or A2B2.
+ bool has_A2B;
+ skcms_A2B A2B;
+
+ // If the profile has a valid B2A0 or B2A1 tag, skcms_Parse() sets B2A to
+ // that data, and has_B2A to true. skcms_ParseWithA2BPriority() does the
+ // same following any user-provided prioritization of B2A0, B2A1, or B2A2.
+ bool has_B2A;
+ skcms_B2A B2A;
+
+ // If the profile has a valid CICP tag, skcms_Parse() sets CICP to that data,
+ // and has_CICP to true.
+ bool has_CICP;
+ skcms_CICP CICP;
+} skcms_ICCProfile;
+
+// The sRGB color profile is so commonly used that we offer a canonical skcms_ICCProfile for it.
+SKCMS_API const skcms_ICCProfile* skcms_sRGB_profile(void);
+// Ditto for XYZD50, the most common profile connection space.
+SKCMS_API const skcms_ICCProfile* skcms_XYZD50_profile(void);
+
+SKCMS_API const skcms_TransferFunction* skcms_sRGB_TransferFunction(void);
+SKCMS_API const skcms_TransferFunction* skcms_sRGB_Inverse_TransferFunction(void);
+SKCMS_API const skcms_TransferFunction* skcms_Identity_TransferFunction(void);
+
+// Practical equality test for two skcms_ICCProfiles.
+// The implementation is subject to change, but it will always try to answer
+// "can I substitute A for B?" and "can I skip transforming from A to B?".
+SKCMS_API bool skcms_ApproximatelyEqualProfiles(const skcms_ICCProfile* A,
+ const skcms_ICCProfile* B);
+
+// Practical test that answers: Is curve roughly the inverse of inv_tf? Typically used by passing
+// the inverse of a known parametric transfer function (like sRGB), to determine if a particular
+// curve is very close to sRGB.
+SKCMS_API bool skcms_AreApproximateInverses(const skcms_Curve* curve,
+ const skcms_TransferFunction* inv_tf);
+
+// Similar to above, answering the question for all three TRC curves of the given profile. Again,
+// passing skcms_sRGB_InverseTransferFunction as inv_tf will answer the question:
+// "Does this profile have a transfer function that is very close to sRGB?"
+SKCMS_API bool skcms_TRCs_AreApproximateInverse(const skcms_ICCProfile* profile,
+ const skcms_TransferFunction* inv_tf);
+
+// Parse an ICC profile and return true if possible, otherwise return false.
+// Selects an A2B profile (if present) according to priority list (each entry 0-2).
+// The buffer is not copied; it must remain valid as long as the skcms_ICCProfile will be used.
+SKCMS_API bool skcms_ParseWithA2BPriority(const void*, size_t,
+ const int priority[], int priorities,
+ skcms_ICCProfile*);
+
+static inline bool skcms_Parse(const void* buf, size_t len, skcms_ICCProfile* profile) {
+ // For continuity of existing user expectations,
+ // prefer A2B0 (perceptual) over A2B1 (relative colormetric), and ignore A2B2 (saturation).
+ const int priority[] = {0,1};
+ return skcms_ParseWithA2BPriority(buf, len,
+ priority, sizeof(priority)/sizeof(*priority),
+ profile);
+}
+
+SKCMS_API bool skcms_ApproximateCurve(const skcms_Curve* curve,
+ skcms_TransferFunction* approx,
+ float* max_error);
+
+SKCMS_API bool skcms_GetCHAD(const skcms_ICCProfile*, skcms_Matrix3x3*);
+SKCMS_API bool skcms_GetWTPT(const skcms_ICCProfile*, float xyz[3]);
+
+// These are common ICC signature values
+enum {
+ // data_color_space
+ skcms_Signature_CMYK = 0x434D594B,
+ skcms_Signature_Gray = 0x47524159,
+ skcms_Signature_RGB = 0x52474220,
+
+ // pcs
+ skcms_Signature_Lab = 0x4C616220,
+ skcms_Signature_XYZ = 0x58595A20,
+};
+
+typedef enum skcms_PixelFormat {
+ skcms_PixelFormat_A_8,
+ skcms_PixelFormat_A_8_,
+ skcms_PixelFormat_G_8,
+ skcms_PixelFormat_G_8_,
+ skcms_PixelFormat_RGBA_8888_Palette8,
+ skcms_PixelFormat_BGRA_8888_Palette8,
+
+ skcms_PixelFormat_RGB_565,
+ skcms_PixelFormat_BGR_565,
+
+ skcms_PixelFormat_ABGR_4444,
+ skcms_PixelFormat_ARGB_4444,
+
+ skcms_PixelFormat_RGB_888,
+ skcms_PixelFormat_BGR_888,
+ skcms_PixelFormat_RGBA_8888,
+ skcms_PixelFormat_BGRA_8888,
+ skcms_PixelFormat_RGBA_8888_sRGB, // Automatic sRGB encoding / decoding.
+ skcms_PixelFormat_BGRA_8888_sRGB, // (Generally used with linear transfer functions.)
+
+ skcms_PixelFormat_RGBA_1010102,
+ skcms_PixelFormat_BGRA_1010102,
+
+ skcms_PixelFormat_RGB_161616LE, // Little-endian. Pointers must be 16-bit aligned.
+ skcms_PixelFormat_BGR_161616LE,
+ skcms_PixelFormat_RGBA_16161616LE,
+ skcms_PixelFormat_BGRA_16161616LE,
+
+ skcms_PixelFormat_RGB_161616BE, // Big-endian. Pointers must be 16-bit aligned.
+ skcms_PixelFormat_BGR_161616BE,
+ skcms_PixelFormat_RGBA_16161616BE,
+ skcms_PixelFormat_BGRA_16161616BE,
+
+ skcms_PixelFormat_RGB_hhh_Norm, // 1-5-10 half-precision float in [0,1]
+ skcms_PixelFormat_BGR_hhh_Norm, // Pointers must be 16-bit aligned.
+ skcms_PixelFormat_RGBA_hhhh_Norm,
+ skcms_PixelFormat_BGRA_hhhh_Norm,
+
+ skcms_PixelFormat_RGB_hhh, // 1-5-10 half-precision float.
+ skcms_PixelFormat_BGR_hhh, // Pointers must be 16-bit aligned.
+ skcms_PixelFormat_RGBA_hhhh,
+ skcms_PixelFormat_BGRA_hhhh,
+
+ skcms_PixelFormat_RGB_fff, // 1-8-23 single-precision float (the normal kind).
+ skcms_PixelFormat_BGR_fff, // Pointers must be 32-bit aligned.
+ skcms_PixelFormat_RGBA_ffff,
+ skcms_PixelFormat_BGRA_ffff,
+
+ skcms_PixelFormat_RGB_101010x_XR, // Note: This is located here to signal no clamping.
+ skcms_PixelFormat_BGR_101010x_XR, // Compatible with MTLPixelFormatBGR10_XR.
+} skcms_PixelFormat;
+
+// We always store any alpha channel linearly. In the chart below, tf-1() is the inverse
+// transfer function for the given color profile (applying the transfer function linearizes).
+
+// We treat opaque as a strong requirement, not just a performance hint: we will ignore
+// any source alpha and treat it as 1.0, and will make sure that any destination alpha
+// channel is filled with the equivalent of 1.0.
+
+// We used to offer multiple types of premultiplication, but now just one, PremulAsEncoded.
+// This is the premul you're probably used to working with.
+
+typedef enum skcms_AlphaFormat {
+ skcms_AlphaFormat_Opaque, // alpha is always opaque
+ // tf-1(r), tf-1(g), tf-1(b), 1.0
+ skcms_AlphaFormat_Unpremul, // alpha and color are unassociated
+ // tf-1(r), tf-1(g), tf-1(b), a
+ skcms_AlphaFormat_PremulAsEncoded, // premultiplied while encoded
+ // tf-1(r)*a, tf-1(g)*a, tf-1(b)*a, a
+} skcms_AlphaFormat;
+
+// Convert npixels pixels from src format and color profile to dst format and color profile
+// and return true, otherwise return false. It is safe to alias dst == src if dstFmt == srcFmt.
+SKCMS_API bool skcms_Transform(const void* src,
+ skcms_PixelFormat srcFmt,
+ skcms_AlphaFormat srcAlpha,
+ const skcms_ICCProfile* srcProfile,
+ void* dst,
+ skcms_PixelFormat dstFmt,
+ skcms_AlphaFormat dstAlpha,
+ const skcms_ICCProfile* dstProfile,
+ size_t npixels);
+
+// As skcms_Transform(), supporting srcFmts with a palette.
+SKCMS_API bool skcms_TransformWithPalette(const void* src,
+ skcms_PixelFormat srcFmt,
+ skcms_AlphaFormat srcAlpha,
+ const skcms_ICCProfile* srcProfile,
+ void* dst,
+ skcms_PixelFormat dstFmt,
+ skcms_AlphaFormat dstAlpha,
+ const skcms_ICCProfile* dstProfile,
+ size_t npixels,
+ const void* palette);
+
+// If profile can be used as a destination in skcms_Transform, return true. Otherwise, attempt to
+// rewrite it with approximations where reasonable. If successful, return true. If no reasonable
+// approximation exists, leave the profile unchanged and return false.
+SKCMS_API bool skcms_MakeUsableAsDestination(skcms_ICCProfile* profile);
+
+// If profile can be used as a destination with a single parametric transfer function (ie for
+// rasterization), return true. Otherwise, attempt to rewrite it with approximations where
+// reasonable. If successful, return true. If no reasonable approximation exists, leave the
+// profile unchanged and return false.
+SKCMS_API bool skcms_MakeUsableAsDestinationWithSingleCurve(skcms_ICCProfile* profile);
+
+// Returns a matrix to adapt XYZ color from given the whitepoint to D50.
+SKCMS_API bool skcms_AdaptToXYZD50(float wx, float wy,
+ skcms_Matrix3x3* toXYZD50);
+
+// Returns a matrix to convert RGB color into XYZ adapted to D50, given the
+// primaries and whitepoint of the RGB model.
+SKCMS_API bool skcms_PrimariesToXYZD50(float rx, float ry,
+ float gx, float gy,
+ float bx, float by,
+ float wx, float wy,
+ skcms_Matrix3x3* toXYZD50);
+
+// Call before your first call to skcms_Transform() to skip runtime CPU detection.
+SKCMS_API void skcms_DisableRuntimeCPUDetection(void);
+
+// Utilities for programmatically constructing profiles
+static inline void skcms_Init(skcms_ICCProfile* p) {
+ memset(p, 0, sizeof(*p));
+ p->data_color_space = skcms_Signature_RGB;
+ p->pcs = skcms_Signature_XYZ;
+}
+
+static inline void skcms_SetTransferFunction(skcms_ICCProfile* p,
+ const skcms_TransferFunction* tf) {
+ p->has_trc = true;
+ for (int i = 0; i < 3; ++i) {
+ p->trc[i].table_entries = 0;
+ p->trc[i].parametric = *tf;
+ }
+}
+
+static inline void skcms_SetXYZD50(skcms_ICCProfile* p, const skcms_Matrix3x3* m) {
+ p->has_toXYZD50 = true;
+ p->toXYZD50 = *m;
+}
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/gfx/skia/skia/modules/skcms/skcms_internal.h b/gfx/skia/skia/modules/skcms/skcms_internal.h
new file mode 100644
index 0000000000..cc6d578ba0
--- /dev/null
+++ b/gfx/skia/skia/modules/skcms/skcms_internal.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#pragma once
+
+// skcms_internal.h contains APIs shared by skcms' internals and its test tools.
+// Please don't use this header from outside the skcms repo.
+
+#include "skcms.h"
+#include <stdbool.h>
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// ~~~~ General Helper Macros ~~~~
+ #define ARRAY_COUNT(arr) (int)(sizeof((arr)) / sizeof(*(arr)))
+
+ typedef struct skcms_ICCTag {
+ uint32_t signature;
+ uint32_t type;
+ uint32_t size;
+ const uint8_t* buf;
+ } skcms_ICCTag;
+
+ void skcms_GetTagByIndex (const skcms_ICCProfile*, uint32_t idx, skcms_ICCTag*);
+ bool skcms_GetTagBySignature(const skcms_ICCProfile*, uint32_t sig, skcms_ICCTag*);
+
+ float skcms_MaxRoundtripError(const skcms_Curve* curve, const skcms_TransferFunction* inv_tf);
+
+ // 252 of a random shuffle of all possible bytes.
+ // 252 is evenly divisible by 3 and 4. Only 192, 10, 241, and 43 are missing.
+ // Used for ICC profile equivalence testing.
+ extern const uint8_t skcms_252_random_bytes[252];
+
+// ~~~~ Portable Math ~~~~
+ static inline float floorf_(float x) {
+ float roundtrip = (float)((int)x);
+ return roundtrip > x ? roundtrip - 1 : roundtrip;
+ }
+ static inline float fabsf_(float x) { return x < 0 ? -x : x; }
+ float powf_(float, float);
+
+// ~~~~ Does this pixel format need a palette pointer to be usable? ~~~~
+ static inline bool needs_palette(skcms_PixelFormat fmt) {
+ return (fmt >> 1) == (skcms_PixelFormat_RGBA_8888_Palette8 >> 1);
+ }
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/gfx/skia/skia/modules/skcms/src/Transform_inl.h b/gfx/skia/skia/modules/skcms/src/Transform_inl.h
new file mode 100644
index 0000000000..350f6a20a6
--- /dev/null
+++ b/gfx/skia/skia/modules/skcms/src/Transform_inl.h
@@ -0,0 +1,1628 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Intentionally NO #pragma once... included multiple times.
+
+// This file is included from skcms.cc in a namespace with some pre-defines:
+// - N: depth of all vectors, 1,4,8, or 16 (preprocessor define)
+// - V<T>: a template to create a vector of N T's.
+
+using F = V<Color>; // Called F for historic reasons... maybe rename C?
+using I32 = V<int32_t>;
+using U64 = V<uint64_t>;
+using U32 = V<uint32_t>;
+using U16 = V<uint16_t>;
+using U8 = V<uint8_t>;
+
+
+#if defined(__GNUC__) && !defined(__clang__)
+ // Once again, GCC is kind of weird, not allowing vector = scalar directly.
+ static constexpr F F0 = F() + 0.0f,
+ F1 = F() + 1.0f,
+ FInfBits = F() + 0x7f800000; // equals 2139095040, the bit pattern of +Inf
+#else
+ static constexpr F F0 = 0.0f,
+ F1 = 1.0f,
+ FInfBits = 0x7f800000; // equals 2139095040, the bit pattern of +Inf
+#endif
+
+// Instead of checking __AVX__ below, we'll check USING_AVX.
+// This lets skcms.cc set USING_AVX to force us in even if the compiler's not set that way.
+// Same deal for __F16C__ and __AVX2__ ~~~> USING_AVX_F16C, USING_AVX2.
+
+#if !defined(USING_AVX) && N == 8 && defined(__AVX__)
+ #define USING_AVX
+#endif
+#if !defined(USING_AVX_F16C) && defined(USING_AVX) && defined(__F16C__)
+ #define USING AVX_F16C
+#endif
+#if !defined(USING_AVX2) && defined(USING_AVX) && defined(__AVX2__)
+ #define USING_AVX2
+#endif
+#if !defined(USING_AVX512F) && N == 16 && defined(__AVX512F__) && defined(__AVX512DQ__)
+ #define USING_AVX512F
+#endif
+
+// Similar to the AVX+ features, we define USING_NEON and USING_NEON_F16C.
+// This is more for organizational clarity... skcms.cc doesn't force these.
+#if N > 1 && defined(__ARM_NEON)
+ #define USING_NEON
+ #if __ARM_FP & 2
+ #define USING_NEON_F16C
+ #endif
+ #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(SKCMS_OPT_INTO_NEON_FP16)
+ #define USING_NEON_FP16
+ #endif
+#endif
+
+// These -Wvector-conversion warnings seem to trigger in very bogus situations,
+// like vst3q_f32() expecting a 16x char rather than a 4x float vector. :/
+#if defined(USING_NEON) && defined(__clang__)
+ #pragma clang diagnostic ignored "-Wvector-conversion"
+#endif
+
+// GCC & Clang (but not clang-cl) warn returning U64 on x86 is larger than a register.
+// You'd see warnings like, "using AVX even though AVX is not enabled".
+// We stifle these warnings; our helpers that return U64 are always inlined.
+#if defined(__SSE__) && defined(__GNUC__)
+ #if !defined(__has_warning)
+ #pragma GCC diagnostic ignored "-Wpsabi"
+ #elif __has_warning("-Wpsabi")
+ #pragma GCC diagnostic ignored "-Wpsabi"
+ #endif
+#endif
+
+#if defined(__clang__)
+ #define FALLTHROUGH [[clang::fallthrough]]
+#else
+ #define FALLTHROUGH
+#endif
+
+// We tag most helper functions as SI, to enforce good code generation
+// but also work around what we think is a bug in GCC: when targeting 32-bit
+// x86, GCC tends to pass U16 (4x uint16_t vector) function arguments in the
+// MMX mm0 register, which seems to mess with unrelated code that later uses
+// x87 FP instructions (MMX's mm0 is an alias for x87's st0 register).
+//
+// It helps codegen to call __builtin_memcpy() when we know the byte count at compile time.
+#if defined(__clang__) || defined(__GNUC__)
+ #define SI static inline __attribute__((always_inline))
+#else
+ #define SI static inline
+#endif
+
+template <typename T, typename P>
+SI T load(const P* ptr) {
+ T val;
+ small_memcpy(&val, ptr, sizeof(val));
+ return val;
+}
+template <typename T, typename P>
+SI void store(P* ptr, const T& val) {
+ small_memcpy(ptr, &val, sizeof(val));
+}
+
+// (T)v is a cast when N == 1 and a bit-pun when N>1,
+// so we use cast<T>(v) to actually cast or bit_pun<T>(v) to bit-pun.
+template <typename D, typename S>
+SI D cast(const S& v) {
+#if N == 1
+ return (D)v;
+#elif defined(__clang__)
+ return __builtin_convertvector(v, D);
+#else
+ D d;
+ for (int i = 0; i < N; i++) {
+ d[i] = v[i];
+ }
+ return d;
+#endif
+}
+
+template <typename D, typename S>
+SI D bit_pun(const S& v) {
+ static_assert(sizeof(D) == sizeof(v), "");
+ return load<D>(&v);
+}
+
+// When we convert from float to fixed point, it's very common to want to round,
+// and for some reason compilers generate better code when converting to int32_t.
+// To serve both those ends, we use this function to_fixed() instead of direct cast().
+#if defined(USING_NEON_FP16)
+ // NEON's got a F16 -> U16 instruction, so this should be fine without going via I16.
+ SI U16 to_fixed(F f) { return cast<U16>(f + 0.5f); }
+#else
+ SI U32 to_fixed(F f) { return (U32)cast<I32>(f + 0.5f); }
+#endif
+
+
+// Sometimes we do something crazy on one branch of a conditonal,
+// like divide by zero or convert a huge float to an integer,
+// but then harmlessly select the other side. That trips up N==1
+// sanitizer builds, so we make if_then_else() a macro to avoid
+// evaluating the unused side.
+
+#if N == 1
+ #define if_then_else(cond, t, e) ((cond) ? (t) : (e))
+#else
+ template <typename C, typename T>
+ SI T if_then_else(C cond, T t, T e) {
+ return bit_pun<T>( ( cond & bit_pun<C>(t)) |
+ (~cond & bit_pun<C>(e)) );
+ }
+#endif
+
+
+SI F F_from_Half(U16 half) {
+#if defined(USING_NEON_FP16)
+ return bit_pun<F>(half);
+#elif defined(USING_NEON_F16C)
+ return vcvt_f32_f16((float16x4_t)half);
+#elif defined(USING_AVX512F)
+ return (F)_mm512_cvtph_ps((__m256i)half);
+#elif defined(USING_AVX_F16C)
+ typedef int16_t __attribute__((vector_size(16))) I16;
+ return __builtin_ia32_vcvtph2ps256((I16)half);
+#else
+ U32 wide = cast<U32>(half);
+ // A half is 1-5-10 sign-exponent-mantissa, with 15 exponent bias.
+ U32 s = wide & 0x8000,
+ em = wide ^ s;
+
+ // Constructing the float is easy if the half is not denormalized.
+ F norm = bit_pun<F>( (s<<16) + (em<<13) + ((127-15)<<23) );
+
+ // Simply flush all denorm half floats to zero.
+ return if_then_else(em < 0x0400, F0, norm);
+#endif
+}
+
+#if defined(__clang__)
+ // The -((127-15)<<10) underflows that side of the math when
+ // we pass a denorm half float. It's harmless... we'll take the 0 side anyway.
+ __attribute__((no_sanitize("unsigned-integer-overflow")))
+#endif
+SI U16 Half_from_F(F f) {
+#if defined(USING_NEON_FP16)
+ return bit_pun<U16>(f);
+#elif defined(USING_NEON_F16C)
+ return (U16)vcvt_f16_f32(f);
+#elif defined(USING_AVX512F)
+ return (U16)_mm512_cvtps_ph((__m512 )f, _MM_FROUND_CUR_DIRECTION );
+#elif defined(USING_AVX_F16C)
+ return (U16)__builtin_ia32_vcvtps2ph256(f, 0x04/*_MM_FROUND_CUR_DIRECTION*/);
+#else
+ // A float is 1-8-23 sign-exponent-mantissa, with 127 exponent bias.
+ U32 sem = bit_pun<U32>(f),
+ s = sem & 0x80000000,
+ em = sem ^ s;
+
+ // For simplicity we flush denorm half floats (including all denorm floats) to zero.
+ return cast<U16>(if_then_else(em < 0x38800000, (U32)F0
+ , (s>>16) + (em>>13) - ((127-15)<<10)));
+#endif
+}
+
+// Swap high and low bytes of 16-bit lanes, converting between big-endian and little-endian.
+#if defined(USING_NEON_FP16)
+ SI U16 swap_endian_16(U16 v) {
+ return (U16)vrev16q_u8((uint8x16_t) v);
+ }
+#elif defined(USING_NEON)
+ SI U16 swap_endian_16(U16 v) {
+ return (U16)vrev16_u8((uint8x8_t) v);
+ }
+#endif
+
+SI U64 swap_endian_16x4(const U64& rgba) {
+ return (rgba & 0x00ff00ff00ff00ff) << 8
+ | (rgba & 0xff00ff00ff00ff00) >> 8;
+}
+
+#if defined(USING_NEON_FP16)
+ SI F min_(F x, F y) { return (F)vminq_f16((float16x8_t)x, (float16x8_t)y); }
+ SI F max_(F x, F y) { return (F)vmaxq_f16((float16x8_t)x, (float16x8_t)y); }
+#elif defined(USING_NEON)
+ SI F min_(F x, F y) { return (F)vminq_f32((float32x4_t)x, (float32x4_t)y); }
+ SI F max_(F x, F y) { return (F)vmaxq_f32((float32x4_t)x, (float32x4_t)y); }
+#else
+ SI F min_(F x, F y) { return if_then_else(x > y, y, x); }
+ SI F max_(F x, F y) { return if_then_else(x < y, y, x); }
+#endif
+
+SI F floor_(F x) {
+#if N == 1
+ return floorf_(x);
+#elif defined(USING_NEON_FP16)
+ return vrndmq_f16(x);
+#elif defined(__aarch64__)
+ return vrndmq_f32(x);
+#elif defined(USING_AVX512F)
+ // Clang's _mm512_floor_ps() passes its mask as -1, not (__mmask16)-1,
+ // and integer santizer catches that this implicit cast changes the
+ // value from -1 to 65535. We'll cast manually to work around it.
+ // Read this as `return _mm512_floor_ps(x)`.
+ return _mm512_mask_floor_ps(x, (__mmask16)-1, x);
+#elif defined(USING_AVX)
+ return __builtin_ia32_roundps256(x, 0x01/*_MM_FROUND_FLOOR*/);
+#elif defined(__SSE4_1__)
+ return _mm_floor_ps(x);
+#else
+ // Round trip through integers with a truncating cast.
+ F roundtrip = cast<F>(cast<I32>(x));
+ // If x is negative, truncating gives the ceiling instead of the floor.
+ return roundtrip - if_then_else(roundtrip > x, F1, F0);
+
+ // This implementation fails for values of x that are outside
+ // the range an integer can represent. We expect most x to be small.
+#endif
+}
+
+SI F approx_log2(F x) {
+#if defined(USING_NEON_FP16)
+ // TODO(mtklein)
+ return x;
+#else
+ // The first approximation of log2(x) is its exponent 'e', minus 127.
+ I32 bits = bit_pun<I32>(x);
+
+ F e = cast<F>(bits) * (1.0f / (1<<23));
+
+ // If we use the mantissa too we can refine the error signficantly.
+ F m = bit_pun<F>( (bits & 0x007fffff) | 0x3f000000 );
+
+ return e - 124.225514990f
+ - 1.498030302f*m
+ - 1.725879990f/(0.3520887068f + m);
+#endif
+}
+
+SI F approx_log(F x) {
+ const float ln2 = 0.69314718f;
+ return ln2 * approx_log2(x);
+}
+
+SI F approx_exp2(F x) {
+#if defined(USING_NEON_FP16)
+ // TODO(mtklein)
+ return x;
+#else
+ F fract = x - floor_(x);
+
+ F fbits = (1.0f * (1<<23)) * (x + 121.274057500f
+ - 1.490129070f*fract
+ + 27.728023300f/(4.84252568f - fract));
+ I32 bits = cast<I32>(min_(max_(fbits, F0), FInfBits));
+
+ return bit_pun<F>(bits);
+#endif
+}
+
+SI F approx_pow(F x, float y) {
+ return if_then_else((x == F0) | (x == F1), x
+ , approx_exp2(approx_log2(x) * y));
+}
+
+SI F approx_exp(F x) {
+ const float log2_e = 1.4426950408889634074f;
+ return approx_exp2(log2_e * x);
+}
+
+// Return tf(x).
+SI F apply_tf(const skcms_TransferFunction* tf, F x) {
+#if defined(USING_NEON_FP16)
+ // TODO(mtklein)
+ (void)tf;
+ return x;
+#else
+ // Peel off the sign bit and set x = |x|.
+ U32 bits = bit_pun<U32>(x),
+ sign = bits & 0x80000000;
+ x = bit_pun<F>(bits ^ sign);
+
+ // The transfer function has a linear part up to d, exponential at d and after.
+ F v = if_then_else(x < tf->d, tf->c*x + tf->f
+ , approx_pow(tf->a*x + tf->b, tf->g) + tf->e);
+
+ // Tack the sign bit back on.
+ return bit_pun<F>(sign | bit_pun<U32>(v));
+#endif
+}
+
+SI F apply_pq(const skcms_TransferFunction* tf, F x) {
+#if defined(USING_NEON_FP16)
+ // TODO(mtklein)
+ (void)tf;
+ return x;
+#else
+ U32 bits = bit_pun<U32>(x),
+ sign = bits & 0x80000000;
+ x = bit_pun<F>(bits ^ sign);
+
+ F v = approx_pow(max_(tf->a + tf->b * approx_pow(x, tf->c), F0)
+ / (tf->d + tf->e * approx_pow(x, tf->c)),
+ tf->f);
+
+ return bit_pun<F>(sign | bit_pun<U32>(v));
+#endif
+}
+
+SI F apply_hlg(const skcms_TransferFunction* tf, F x) {
+#if defined(USING_NEON_FP16)
+ // TODO(mtklein)
+ (void)tf;
+ return x;
+#else
+ const float R = tf->a, G = tf->b,
+ a = tf->c, b = tf->d, c = tf->e,
+ K = tf->f + 1;
+ U32 bits = bit_pun<U32>(x),
+ sign = bits & 0x80000000;
+ x = bit_pun<F>(bits ^ sign);
+
+ F v = if_then_else(x*R <= 1, approx_pow(x*R, G)
+ , approx_exp((x-c)*a) + b);
+
+ return K*bit_pun<F>(sign | bit_pun<U32>(v));
+#endif
+}
+
+SI F apply_hlginv(const skcms_TransferFunction* tf, F x) {
+#if defined(USING_NEON_FP16)
+ // TODO(mtklein)
+ (void)tf;
+ return x;
+#else
+ const float R = tf->a, G = tf->b,
+ a = tf->c, b = tf->d, c = tf->e,
+ K = tf->f + 1;
+ U32 bits = bit_pun<U32>(x),
+ sign = bits & 0x80000000;
+ x = bit_pun<F>(bits ^ sign);
+ x /= K;
+
+ F v = if_then_else(x <= 1, R * approx_pow(x, G)
+ , a * approx_log(x - b) + c);
+
+ return bit_pun<F>(sign | bit_pun<U32>(v));
+#endif
+}
+
+
+// Strided loads and stores of N values, starting from p.
+template <typename T, typename P>
+SI T load_3(const P* p) {
+#if N == 1
+ return (T)p[0];
+#elif N == 4
+ return T{p[ 0],p[ 3],p[ 6],p[ 9]};
+#elif N == 8
+ return T{p[ 0],p[ 3],p[ 6],p[ 9], p[12],p[15],p[18],p[21]};
+#elif N == 16
+ return T{p[ 0],p[ 3],p[ 6],p[ 9], p[12],p[15],p[18],p[21],
+ p[24],p[27],p[30],p[33], p[36],p[39],p[42],p[45]};
+#endif
+}
+
+template <typename T, typename P>
+SI T load_4(const P* p) {
+#if N == 1
+ return (T)p[0];
+#elif N == 4
+ return T{p[ 0],p[ 4],p[ 8],p[12]};
+#elif N == 8
+ return T{p[ 0],p[ 4],p[ 8],p[12], p[16],p[20],p[24],p[28]};
+#elif N == 16
+ return T{p[ 0],p[ 4],p[ 8],p[12], p[16],p[20],p[24],p[28],
+ p[32],p[36],p[40],p[44], p[48],p[52],p[56],p[60]};
+#endif
+}
+
+template <typename T, typename P>
+SI void store_3(P* p, const T& v) {
+#if N == 1
+ p[0] = v;
+#elif N == 4
+ p[ 0] = v[ 0]; p[ 3] = v[ 1]; p[ 6] = v[ 2]; p[ 9] = v[ 3];
+#elif N == 8
+ p[ 0] = v[ 0]; p[ 3] = v[ 1]; p[ 6] = v[ 2]; p[ 9] = v[ 3];
+ p[12] = v[ 4]; p[15] = v[ 5]; p[18] = v[ 6]; p[21] = v[ 7];
+#elif N == 16
+ p[ 0] = v[ 0]; p[ 3] = v[ 1]; p[ 6] = v[ 2]; p[ 9] = v[ 3];
+ p[12] = v[ 4]; p[15] = v[ 5]; p[18] = v[ 6]; p[21] = v[ 7];
+ p[24] = v[ 8]; p[27] = v[ 9]; p[30] = v[10]; p[33] = v[11];
+ p[36] = v[12]; p[39] = v[13]; p[42] = v[14]; p[45] = v[15];
+#endif
+}
+
+template <typename T, typename P>
+SI void store_4(P* p, const T& v) {
+#if N == 1
+ p[0] = v;
+#elif N == 4
+ p[ 0] = v[ 0]; p[ 4] = v[ 1]; p[ 8] = v[ 2]; p[12] = v[ 3];
+#elif N == 8
+ p[ 0] = v[ 0]; p[ 4] = v[ 1]; p[ 8] = v[ 2]; p[12] = v[ 3];
+ p[16] = v[ 4]; p[20] = v[ 5]; p[24] = v[ 6]; p[28] = v[ 7];
+#elif N == 16
+ p[ 0] = v[ 0]; p[ 4] = v[ 1]; p[ 8] = v[ 2]; p[12] = v[ 3];
+ p[16] = v[ 4]; p[20] = v[ 5]; p[24] = v[ 6]; p[28] = v[ 7];
+ p[32] = v[ 8]; p[36] = v[ 9]; p[40] = v[10]; p[44] = v[11];
+ p[48] = v[12]; p[52] = v[13]; p[56] = v[14]; p[60] = v[15];
+#endif
+}
+
+
+SI U8 gather_8(const uint8_t* p, I32 ix) {
+#if N == 1
+ U8 v = p[ix];
+#elif N == 4
+ U8 v = { p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]] };
+#elif N == 8
+ U8 v = { p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]],
+ p[ix[4]], p[ix[5]], p[ix[6]], p[ix[7]] };
+#elif N == 16
+ U8 v = { p[ix[ 0]], p[ix[ 1]], p[ix[ 2]], p[ix[ 3]],
+ p[ix[ 4]], p[ix[ 5]], p[ix[ 6]], p[ix[ 7]],
+ p[ix[ 8]], p[ix[ 9]], p[ix[10]], p[ix[11]],
+ p[ix[12]], p[ix[13]], p[ix[14]], p[ix[15]] };
+#endif
+ return v;
+}
+
+SI U16 gather_16(const uint8_t* p, I32 ix) {
+ // Load the i'th 16-bit value from p.
+ auto load_16 = [p](int i) {
+ return load<uint16_t>(p + 2*i);
+ };
+#if N == 1
+ U16 v = load_16(ix);
+#elif N == 4
+ U16 v = { load_16(ix[0]), load_16(ix[1]), load_16(ix[2]), load_16(ix[3]) };
+#elif N == 8
+ U16 v = { load_16(ix[0]), load_16(ix[1]), load_16(ix[2]), load_16(ix[3]),
+ load_16(ix[4]), load_16(ix[5]), load_16(ix[6]), load_16(ix[7]) };
+#elif N == 16
+ U16 v = { load_16(ix[ 0]), load_16(ix[ 1]), load_16(ix[ 2]), load_16(ix[ 3]),
+ load_16(ix[ 4]), load_16(ix[ 5]), load_16(ix[ 6]), load_16(ix[ 7]),
+ load_16(ix[ 8]), load_16(ix[ 9]), load_16(ix[10]), load_16(ix[11]),
+ load_16(ix[12]), load_16(ix[13]), load_16(ix[14]), load_16(ix[15]) };
+#endif
+ return v;
+}
+
+SI U32 gather_32(const uint8_t* p, I32 ix) {
+ // Load the i'th 32-bit value from p.
+ auto load_32 = [p](int i) {
+ return load<uint32_t>(p + 4*i);
+ };
+#if N == 1
+ U32 v = load_32(ix);
+#elif N == 4
+ U32 v = { load_32(ix[0]), load_32(ix[1]), load_32(ix[2]), load_32(ix[3]) };
+#elif N == 8
+ U32 v = { load_32(ix[0]), load_32(ix[1]), load_32(ix[2]), load_32(ix[3]),
+ load_32(ix[4]), load_32(ix[5]), load_32(ix[6]), load_32(ix[7]) };
+#elif N == 16
+ U32 v = { load_32(ix[ 0]), load_32(ix[ 1]), load_32(ix[ 2]), load_32(ix[ 3]),
+ load_32(ix[ 4]), load_32(ix[ 5]), load_32(ix[ 6]), load_32(ix[ 7]),
+ load_32(ix[ 8]), load_32(ix[ 9]), load_32(ix[10]), load_32(ix[11]),
+ load_32(ix[12]), load_32(ix[13]), load_32(ix[14]), load_32(ix[15]) };
+#endif
+ // TODO: AVX2 and AVX-512 gathers (c.f. gather_24).
+ return v;
+}
+
+SI U32 gather_24(const uint8_t* p, I32 ix) {
+ // First, back up a byte. Any place we're gathering from has a safe junk byte to read
+ // in front of it, either a previous table value, or some tag metadata.
+ p -= 1;
+
+ // Load the i'th 24-bit value from p, and 1 extra byte.
+ auto load_24_32 = [p](int i) {
+ return load<uint32_t>(p + 3*i);
+ };
+
+ // Now load multiples of 4 bytes (a junk byte, then r,g,b).
+#if N == 1
+ U32 v = load_24_32(ix);
+#elif N == 4
+ U32 v = { load_24_32(ix[0]), load_24_32(ix[1]), load_24_32(ix[2]), load_24_32(ix[3]) };
+#elif N == 8 && !defined(USING_AVX2)
+ U32 v = { load_24_32(ix[0]), load_24_32(ix[1]), load_24_32(ix[2]), load_24_32(ix[3]),
+ load_24_32(ix[4]), load_24_32(ix[5]), load_24_32(ix[6]), load_24_32(ix[7]) };
+#elif N == 8
+ (void)load_24_32;
+ // The gather instruction here doesn't need any particular alignment,
+ // but the intrinsic takes a const int*.
+ const int* p4 = bit_pun<const int*>(p);
+ I32 zero = { 0, 0, 0, 0, 0, 0, 0, 0},
+ mask = {-1,-1,-1,-1, -1,-1,-1,-1};
+ #if defined(__clang__)
+ U32 v = (U32)__builtin_ia32_gatherd_d256(zero, p4, 3*ix, mask, 1);
+ #elif defined(__GNUC__)
+ U32 v = (U32)__builtin_ia32_gathersiv8si(zero, p4, 3*ix, mask, 1);
+ #endif
+#elif N == 16
+ (void)load_24_32;
+ // The intrinsic is supposed to take const void* now, but it takes const int*, just like AVX2.
+ // And AVX-512 swapped the order of arguments. :/
+ const int* p4 = bit_pun<const int*>(p);
+ U32 v = (U32)_mm512_i32gather_epi32((__m512i)(3*ix), p4, 1);
+#endif
+
+ // Shift off the junk byte, leaving r,g,b in low 24 bits (and zero in the top 8).
+ return v >> 8;
+}
+
+#if !defined(__arm__)
+ SI void gather_48(const uint8_t* p, I32 ix, U64* v) {
+ // As in gather_24(), with everything doubled.
+ p -= 2;
+
+ // Load the i'th 48-bit value from p, and 2 extra bytes.
+ auto load_48_64 = [p](int i) {
+ return load<uint64_t>(p + 6*i);
+ };
+
+ #if N == 1
+ *v = load_48_64(ix);
+ #elif N == 4
+ *v = U64{
+ load_48_64(ix[0]), load_48_64(ix[1]), load_48_64(ix[2]), load_48_64(ix[3]),
+ };
+ #elif N == 8 && !defined(USING_AVX2)
+ *v = U64{
+ load_48_64(ix[0]), load_48_64(ix[1]), load_48_64(ix[2]), load_48_64(ix[3]),
+ load_48_64(ix[4]), load_48_64(ix[5]), load_48_64(ix[6]), load_48_64(ix[7]),
+ };
+ #elif N == 8
+ (void)load_48_64;
+ typedef int32_t __attribute__((vector_size(16))) Half_I32;
+ typedef long long __attribute__((vector_size(32))) Half_I64;
+
+ // The gather instruction here doesn't need any particular alignment,
+ // but the intrinsic takes a const long long*.
+ const long long int* p8 = bit_pun<const long long int*>(p);
+
+ Half_I64 zero = { 0, 0, 0, 0},
+ mask = {-1,-1,-1,-1};
+
+ ix *= 6;
+ Half_I32 ix_lo = { ix[0], ix[1], ix[2], ix[3] },
+ ix_hi = { ix[4], ix[5], ix[6], ix[7] };
+
+ #if defined(__clang__)
+ Half_I64 lo = (Half_I64)__builtin_ia32_gatherd_q256(zero, p8, ix_lo, mask, 1),
+ hi = (Half_I64)__builtin_ia32_gatherd_q256(zero, p8, ix_hi, mask, 1);
+ #elif defined(__GNUC__)
+ Half_I64 lo = (Half_I64)__builtin_ia32_gathersiv4di(zero, p8, ix_lo, mask, 1),
+ hi = (Half_I64)__builtin_ia32_gathersiv4di(zero, p8, ix_hi, mask, 1);
+ #endif
+ store((char*)v + 0, lo);
+ store((char*)v + 32, hi);
+ #elif N == 16
+ (void)load_48_64;
+ const long long int* p8 = bit_pun<const long long int*>(p);
+ __m512i lo = _mm512_i32gather_epi64(_mm512_extracti32x8_epi32((__m512i)(6*ix), 0), p8, 1),
+ hi = _mm512_i32gather_epi64(_mm512_extracti32x8_epi32((__m512i)(6*ix), 1), p8, 1);
+ store((char*)v + 0, lo);
+ store((char*)v + 64, hi);
+ #endif
+
+ *v >>= 16;
+ }
+#endif
+
+SI F F_from_U8(U8 v) {
+ return cast<F>(v) * (1/255.0f);
+}
+
+SI F F_from_U16_BE(U16 v) {
+ // All 16-bit ICC values are big-endian, so we byte swap before converting to float.
+ // MSVC catches the "loss" of data here in the portable path, so we also make sure to mask.
+ U16 lo = (v >> 8),
+ hi = (v << 8) & 0xffff;
+ return cast<F>(lo|hi) * (1/65535.0f);
+}
+
+SI U16 U16_from_F(F v) {
+ // 65535 == inf in FP16, so promote to FP32 before converting.
+ return cast<U16>(cast<V<float>>(v) * 65535 + 0.5f);
+}
+
+SI F minus_1_ulp(F v) {
+#if defined(USING_NEON_FP16)
+ return bit_pun<F>( bit_pun<U16>(v) - 1 );
+#else
+ return bit_pun<F>( bit_pun<U32>(v) - 1 );
+#endif
+}
+
+SI F table(const skcms_Curve* curve, F v) {
+ // Clamp the input to [0,1], then scale to a table index.
+ F ix = max_(F0, min_(v, F1)) * (float)(curve->table_entries - 1);
+
+ // We'll look up (equal or adjacent) entries at lo and hi, then lerp by t between the two.
+ I32 lo = cast<I32>( ix ),
+ hi = cast<I32>(minus_1_ulp(ix+1.0f));
+ F t = ix - cast<F>(lo); // i.e. the fractional part of ix.
+
+ // TODO: can we load l and h simultaneously? Each entry in 'h' is either
+ // the same as in 'l' or adjacent. We have a rough idea that's it'd always be safe
+ // to read adjacent entries and perhaps underflow the table by a byte or two
+ // (it'd be junk, but always safe to read). Not sure how to lerp yet.
+ F l,h;
+ if (curve->table_8) {
+ l = F_from_U8(gather_8(curve->table_8, lo));
+ h = F_from_U8(gather_8(curve->table_8, hi));
+ } else {
+ l = F_from_U16_BE(gather_16(curve->table_16, lo));
+ h = F_from_U16_BE(gather_16(curve->table_16, hi));
+ }
+ return l + (h-l)*t;
+}
+
+SI void sample_clut_8(const uint8_t* grid_8, I32 ix, F* r, F* g, F* b) {
+ U32 rgb = gather_24(grid_8, ix);
+
+ *r = cast<F>((rgb >> 0) & 0xff) * (1/255.0f);
+ *g = cast<F>((rgb >> 8) & 0xff) * (1/255.0f);
+ *b = cast<F>((rgb >> 16) & 0xff) * (1/255.0f);
+}
+
+SI void sample_clut_8(const uint8_t* grid_8, I32 ix, F* r, F* g, F* b, F* a) {
+ // TODO: don't forget to optimize gather_32().
+ U32 rgba = gather_32(grid_8, ix);
+
+ *r = cast<F>((rgba >> 0) & 0xff) * (1/255.0f);
+ *g = cast<F>((rgba >> 8) & 0xff) * (1/255.0f);
+ *b = cast<F>((rgba >> 16) & 0xff) * (1/255.0f);
+ *a = cast<F>((rgba >> 24) & 0xff) * (1/255.0f);
+}
+
+SI void sample_clut_16(const uint8_t* grid_16, I32 ix, F* r, F* g, F* b) {
+#if defined(__arm__)
+ // This is up to 2x faster on 32-bit ARM than the #else-case fast path.
+ *r = F_from_U16_BE(gather_16(grid_16, 3*ix+0));
+ *g = F_from_U16_BE(gather_16(grid_16, 3*ix+1));
+ *b = F_from_U16_BE(gather_16(grid_16, 3*ix+2));
+#else
+ // This strategy is much faster for 64-bit builds, and fine for 32-bit x86 too.
+ U64 rgb;
+ gather_48(grid_16, ix, &rgb);
+ rgb = swap_endian_16x4(rgb);
+
+ *r = cast<F>((rgb >> 0) & 0xffff) * (1/65535.0f);
+ *g = cast<F>((rgb >> 16) & 0xffff) * (1/65535.0f);
+ *b = cast<F>((rgb >> 32) & 0xffff) * (1/65535.0f);
+#endif
+}
+
+SI void sample_clut_16(const uint8_t* grid_16, I32 ix, F* r, F* g, F* b, F* a) {
+ // TODO: gather_64()-based fast path?
+ *r = F_from_U16_BE(gather_16(grid_16, 4*ix+0));
+ *g = F_from_U16_BE(gather_16(grid_16, 4*ix+1));
+ *b = F_from_U16_BE(gather_16(grid_16, 4*ix+2));
+ *a = F_from_U16_BE(gather_16(grid_16, 4*ix+3));
+}
+
+static void clut(uint32_t input_channels, uint32_t output_channels,
+ const uint8_t grid_points[4], const uint8_t* grid_8, const uint8_t* grid_16,
+ F* r, F* g, F* b, F* a) {
+
+ const int dim = (int)input_channels;
+ assert (0 < dim && dim <= 4);
+ assert (output_channels == 3 ||
+ output_channels == 4);
+
+ // For each of these arrays, think foo[2*dim], but we use foo[8] since we know dim <= 4.
+ I32 index [8]; // Index contribution by dimension, first low from 0, then high from 4.
+ F weight[8]; // Weight for each contribution, again first low, then high.
+
+ // O(dim) work first: calculate index,weight from r,g,b,a.
+ const F inputs[] = { *r,*g,*b,*a };
+ for (int i = dim-1, stride = 1; i >= 0; i--) {
+ // x is where we logically want to sample the grid in the i-th dimension.
+ F x = inputs[i] * (float)(grid_points[i] - 1);
+
+ // But we can't index at floats. lo and hi are the two integer grid points surrounding x.
+ I32 lo = cast<I32>( x ), // i.e. trunc(x) == floor(x) here.
+ hi = cast<I32>(minus_1_ulp(x+1.0f));
+ // Notice how we fold in the accumulated stride across previous dimensions here.
+ index[i+0] = lo * stride;
+ index[i+4] = hi * stride;
+ stride *= grid_points[i];
+
+ // We'll interpolate between those two integer grid points by t.
+ F t = x - cast<F>(lo); // i.e. fract(x)
+ weight[i+0] = 1-t;
+ weight[i+4] = t;
+ }
+
+ *r = *g = *b = F0;
+ if (output_channels == 4) {
+ *a = F0;
+ }
+
+ // We'll sample 2^dim == 1<<dim table entries per pixel,
+ // in all combinations of low and high in each dimension.
+ for (int combo = 0; combo < (1<<dim); combo++) { // This loop can be done in any order.
+
+ // Each of these upcoming (combo&N)*K expressions here evaluates to 0 or 4,
+ // where 0 selects the low index contribution and its weight 1-t,
+ // or 4 the high index contribution and its weight t.
+
+ // Since 0<dim≤4, we can always just start off with the 0-th channel,
+ // then handle the others conditionally.
+ I32 ix = index [0 + (combo&1)*4];
+ F w = weight[0 + (combo&1)*4];
+
+ switch ((dim-1)&3) { // This lets the compiler know there are no other cases to handle.
+ case 3: ix += index [3 + (combo&8)/2];
+ w *= weight[3 + (combo&8)/2];
+ FALLTHROUGH;
+ // fall through
+
+ case 2: ix += index [2 + (combo&4)*1];
+ w *= weight[2 + (combo&4)*1];
+ FALLTHROUGH;
+ // fall through
+
+ case 1: ix += index [1 + (combo&2)*2];
+ w *= weight[1 + (combo&2)*2];
+ }
+
+ F R,G,B,A=F0;
+ if (output_channels == 3) {
+ if (grid_8) { sample_clut_8 (grid_8 ,ix, &R,&G,&B); }
+ else { sample_clut_16(grid_16,ix, &R,&G,&B); }
+ } else {
+ if (grid_8) { sample_clut_8 (grid_8 ,ix, &R,&G,&B,&A); }
+ else { sample_clut_16(grid_16,ix, &R,&G,&B,&A); }
+ }
+ *r += w*R;
+ *g += w*G;
+ *b += w*B;
+ *a += w*A;
+ }
+}
+
+static void clut(const skcms_A2B* a2b, F* r, F* g, F* b, F a) {
+ clut(a2b->input_channels, a2b->output_channels,
+ a2b->grid_points, a2b->grid_8, a2b->grid_16,
+ r,g,b,&a);
+}
+static void clut(const skcms_B2A* b2a, F* r, F* g, F* b, F* a) {
+ clut(b2a->input_channels, b2a->output_channels,
+ b2a->grid_points, b2a->grid_8, b2a->grid_16,
+ r,g,b,a);
+}
+
+static void exec_ops(const Op* ops, const void** args,
+ const char* src, char* dst, int i) {
+ F r = F0, g = F0, b = F0, a = F1;
+ while (true) {
+ switch (*ops++) {
+ case Op_load_a8:{
+ a = F_from_U8(load<U8>(src + 1*i));
+ } break;
+
+ case Op_load_g8:{
+ r = g = b = F_from_U8(load<U8>(src + 1*i));
+ } break;
+
+ case Op_load_4444:{
+ U16 abgr = load<U16>(src + 2*i);
+
+ r = cast<F>((abgr >> 12) & 0xf) * (1/15.0f);
+ g = cast<F>((abgr >> 8) & 0xf) * (1/15.0f);
+ b = cast<F>((abgr >> 4) & 0xf) * (1/15.0f);
+ a = cast<F>((abgr >> 0) & 0xf) * (1/15.0f);
+ } break;
+
+ case Op_load_565:{
+ U16 rgb = load<U16>(src + 2*i);
+
+ r = cast<F>(rgb & (uint16_t)(31<< 0)) * (1.0f / (31<< 0));
+ g = cast<F>(rgb & (uint16_t)(63<< 5)) * (1.0f / (63<< 5));
+ b = cast<F>(rgb & (uint16_t)(31<<11)) * (1.0f / (31<<11));
+ } break;
+
+ case Op_load_888:{
+ const uint8_t* rgb = (const uint8_t*)(src + 3*i);
+ #if defined(USING_NEON_FP16)
+ // See the explanation under USING_NEON below. This is that doubled up.
+ uint8x16x3_t v = {{ vdupq_n_u8(0), vdupq_n_u8(0), vdupq_n_u8(0) }};
+ v = vld3q_lane_u8(rgb+ 0, v, 0);
+ v = vld3q_lane_u8(rgb+ 3, v, 2);
+ v = vld3q_lane_u8(rgb+ 6, v, 4);
+ v = vld3q_lane_u8(rgb+ 9, v, 6);
+
+ v = vld3q_lane_u8(rgb+12, v, 8);
+ v = vld3q_lane_u8(rgb+15, v, 10);
+ v = vld3q_lane_u8(rgb+18, v, 12);
+ v = vld3q_lane_u8(rgb+21, v, 14);
+
+ r = cast<F>((U16)v.val[0]) * (1/255.0f);
+ g = cast<F>((U16)v.val[1]) * (1/255.0f);
+ b = cast<F>((U16)v.val[2]) * (1/255.0f);
+ #elif defined(USING_NEON)
+ // There's no uint8x4x3_t or vld3 load for it, so we'll load each rgb pixel one at
+ // a time. Since we're doing that, we might as well load them into 16-bit lanes.
+ // (We'd even load into 32-bit lanes, but that's not possible on ARMv7.)
+ uint8x8x3_t v = {{ vdup_n_u8(0), vdup_n_u8(0), vdup_n_u8(0) }};
+ v = vld3_lane_u8(rgb+0, v, 0);
+ v = vld3_lane_u8(rgb+3, v, 2);
+ v = vld3_lane_u8(rgb+6, v, 4);
+ v = vld3_lane_u8(rgb+9, v, 6);
+
+ // Now if we squint, those 3 uint8x8_t we constructed are really U16s, easy to
+ // convert to F. (Again, U32 would be even better here if drop ARMv7 or split
+ // ARMv7 and ARMv8 impls.)
+ r = cast<F>((U16)v.val[0]) * (1/255.0f);
+ g = cast<F>((U16)v.val[1]) * (1/255.0f);
+ b = cast<F>((U16)v.val[2]) * (1/255.0f);
+ #else
+ r = cast<F>(load_3<U32>(rgb+0) ) * (1/255.0f);
+ g = cast<F>(load_3<U32>(rgb+1) ) * (1/255.0f);
+ b = cast<F>(load_3<U32>(rgb+2) ) * (1/255.0f);
+ #endif
+ } break;
+
+ case Op_load_8888:{
+ U32 rgba = load<U32>(src + 4*i);
+
+ r = cast<F>((rgba >> 0) & 0xff) * (1/255.0f);
+ g = cast<F>((rgba >> 8) & 0xff) * (1/255.0f);
+ b = cast<F>((rgba >> 16) & 0xff) * (1/255.0f);
+ a = cast<F>((rgba >> 24) & 0xff) * (1/255.0f);
+ } break;
+
+ case Op_load_8888_palette8:{
+ const uint8_t* palette = (const uint8_t*) *args++;
+ I32 ix = cast<I32>(load<U8>(src + 1*i));
+ U32 rgba = gather_32(palette, ix);
+
+ r = cast<F>((rgba >> 0) & 0xff) * (1/255.0f);
+ g = cast<F>((rgba >> 8) & 0xff) * (1/255.0f);
+ b = cast<F>((rgba >> 16) & 0xff) * (1/255.0f);
+ a = cast<F>((rgba >> 24) & 0xff) * (1/255.0f);
+ } break;
+
+ case Op_load_1010102:{
+ U32 rgba = load<U32>(src + 4*i);
+
+ r = cast<F>((rgba >> 0) & 0x3ff) * (1/1023.0f);
+ g = cast<F>((rgba >> 10) & 0x3ff) * (1/1023.0f);
+ b = cast<F>((rgba >> 20) & 0x3ff) * (1/1023.0f);
+ a = cast<F>((rgba >> 30) & 0x3 ) * (1/ 3.0f);
+ } break;
+
+ case Op_load_101010x_XR:{
+ static constexpr float min = -0.752941f;
+ static constexpr float max = 1.25098f;
+ static constexpr float range = max - min;
+ U32 rgba = load<U32>(src + 4*i);
+ r = cast<F>((rgba >> 0) & 0x3ff) * (1/1023.0f) * range + min;
+ g = cast<F>((rgba >> 10) & 0x3ff) * (1/1023.0f) * range + min;
+ b = cast<F>((rgba >> 20) & 0x3ff) * (1/1023.0f) * range + min;
+ } break;
+
+ case Op_load_161616LE:{
+ uintptr_t ptr = (uintptr_t)(src + 6*i);
+ assert( (ptr & 1) == 0 ); // src must be 2-byte aligned for this
+ const uint16_t* rgb = (const uint16_t*)ptr; // cast to const uint16_t* to be safe.
+ #if defined(USING_NEON_FP16)
+ uint16x8x3_t v = vld3q_u16(rgb);
+ r = cast<F>((U16)v.val[0]) * (1/65535.0f);
+ g = cast<F>((U16)v.val[1]) * (1/65535.0f);
+ b = cast<F>((U16)v.val[2]) * (1/65535.0f);
+ #elif defined(USING_NEON)
+ uint16x4x3_t v = vld3_u16(rgb);
+ r = cast<F>((U16)v.val[0]) * (1/65535.0f);
+ g = cast<F>((U16)v.val[1]) * (1/65535.0f);
+ b = cast<F>((U16)v.val[2]) * (1/65535.0f);
+ #else
+ r = cast<F>(load_3<U32>(rgb+0)) * (1/65535.0f);
+ g = cast<F>(load_3<U32>(rgb+1)) * (1/65535.0f);
+ b = cast<F>(load_3<U32>(rgb+2)) * (1/65535.0f);
+ #endif
+ } break;
+
+ case Op_load_16161616LE:{
+ uintptr_t ptr = (uintptr_t)(src + 8*i);
+ assert( (ptr & 1) == 0 ); // src must be 2-byte aligned for this
+ const uint16_t* rgba = (const uint16_t*)ptr; // cast to const uint16_t* to be safe.
+ #if defined(USING_NEON_FP16)
+ uint16x8x4_t v = vld4q_u16(rgba);
+ r = cast<F>((U16)v.val[0]) * (1/65535.0f);
+ g = cast<F>((U16)v.val[1]) * (1/65535.0f);
+ b = cast<F>((U16)v.val[2]) * (1/65535.0f);
+ a = cast<F>((U16)v.val[3]) * (1/65535.0f);
+ #elif defined(USING_NEON)
+ uint16x4x4_t v = vld4_u16(rgba);
+ r = cast<F>((U16)v.val[0]) * (1/65535.0f);
+ g = cast<F>((U16)v.val[1]) * (1/65535.0f);
+ b = cast<F>((U16)v.val[2]) * (1/65535.0f);
+ a = cast<F>((U16)v.val[3]) * (1/65535.0f);
+ #else
+ U64 px = load<U64>(rgba);
+
+ r = cast<F>((px >> 0) & 0xffff) * (1/65535.0f);
+ g = cast<F>((px >> 16) & 0xffff) * (1/65535.0f);
+ b = cast<F>((px >> 32) & 0xffff) * (1/65535.0f);
+ a = cast<F>((px >> 48) & 0xffff) * (1/65535.0f);
+ #endif
+ } break;
+
+ case Op_load_161616BE:{
+ uintptr_t ptr = (uintptr_t)(src + 6*i);
+ assert( (ptr & 1) == 0 ); // src must be 2-byte aligned for this
+ const uint16_t* rgb = (const uint16_t*)ptr; // cast to const uint16_t* to be safe.
+ #if defined(USING_NEON_FP16)
+ uint16x8x3_t v = vld3q_u16(rgb);
+ r = cast<F>(swap_endian_16((U16)v.val[0])) * (1/65535.0f);
+ g = cast<F>(swap_endian_16((U16)v.val[1])) * (1/65535.0f);
+ b = cast<F>(swap_endian_16((U16)v.val[2])) * (1/65535.0f);
+ #elif defined(USING_NEON)
+ uint16x4x3_t v = vld3_u16(rgb);
+ r = cast<F>(swap_endian_16((U16)v.val[0])) * (1/65535.0f);
+ g = cast<F>(swap_endian_16((U16)v.val[1])) * (1/65535.0f);
+ b = cast<F>(swap_endian_16((U16)v.val[2])) * (1/65535.0f);
+ #else
+ U32 R = load_3<U32>(rgb+0),
+ G = load_3<U32>(rgb+1),
+ B = load_3<U32>(rgb+2);
+ // R,G,B are big-endian 16-bit, so byte swap them before converting to float.
+ r = cast<F>((R & 0x00ff)<<8 | (R & 0xff00)>>8) * (1/65535.0f);
+ g = cast<F>((G & 0x00ff)<<8 | (G & 0xff00)>>8) * (1/65535.0f);
+ b = cast<F>((B & 0x00ff)<<8 | (B & 0xff00)>>8) * (1/65535.0f);
+ #endif
+ } break;
+
+ case Op_load_16161616BE:{
+ uintptr_t ptr = (uintptr_t)(src + 8*i);
+ assert( (ptr & 1) == 0 ); // src must be 2-byte aligned for this
+ const uint16_t* rgba = (const uint16_t*)ptr; // cast to const uint16_t* to be safe.
+ #if defined(USING_NEON_FP16)
+ uint16x8x4_t v = vld4q_u16(rgba);
+ r = cast<F>(swap_endian_16((U16)v.val[0])) * (1/65535.0f);
+ g = cast<F>(swap_endian_16((U16)v.val[1])) * (1/65535.0f);
+ b = cast<F>(swap_endian_16((U16)v.val[2])) * (1/65535.0f);
+ a = cast<F>(swap_endian_16((U16)v.val[3])) * (1/65535.0f);
+ #elif defined(USING_NEON)
+ uint16x4x4_t v = vld4_u16(rgba);
+ r = cast<F>(swap_endian_16((U16)v.val[0])) * (1/65535.0f);
+ g = cast<F>(swap_endian_16((U16)v.val[1])) * (1/65535.0f);
+ b = cast<F>(swap_endian_16((U16)v.val[2])) * (1/65535.0f);
+ a = cast<F>(swap_endian_16((U16)v.val[3])) * (1/65535.0f);
+ #else
+ U64 px = swap_endian_16x4(load<U64>(rgba));
+
+ r = cast<F>((px >> 0) & 0xffff) * (1/65535.0f);
+ g = cast<F>((px >> 16) & 0xffff) * (1/65535.0f);
+ b = cast<F>((px >> 32) & 0xffff) * (1/65535.0f);
+ a = cast<F>((px >> 48) & 0xffff) * (1/65535.0f);
+ #endif
+ } break;
+
+ case Op_load_hhh:{
+ uintptr_t ptr = (uintptr_t)(src + 6*i);
+ assert( (ptr & 1) == 0 ); // src must be 2-byte aligned for this
+ const uint16_t* rgb = (const uint16_t*)ptr; // cast to const uint16_t* to be safe.
+ #if defined(USING_NEON_FP16)
+ uint16x8x3_t v = vld3q_u16(rgb);
+ U16 R = (U16)v.val[0],
+ G = (U16)v.val[1],
+ B = (U16)v.val[2];
+ #elif defined(USING_NEON)
+ uint16x4x3_t v = vld3_u16(rgb);
+ U16 R = (U16)v.val[0],
+ G = (U16)v.val[1],
+ B = (U16)v.val[2];
+ #else
+ U16 R = load_3<U16>(rgb+0),
+ G = load_3<U16>(rgb+1),
+ B = load_3<U16>(rgb+2);
+ #endif
+ r = F_from_Half(R);
+ g = F_from_Half(G);
+ b = F_from_Half(B);
+ } break;
+
+ case Op_load_hhhh:{
+ uintptr_t ptr = (uintptr_t)(src + 8*i);
+ assert( (ptr & 1) == 0 ); // src must be 2-byte aligned for this
+ const uint16_t* rgba = (const uint16_t*)ptr; // cast to const uint16_t* to be safe.
+ #if defined(USING_NEON_FP16)
+ uint16x8x4_t v = vld4q_u16(rgba);
+ U16 R = (U16)v.val[0],
+ G = (U16)v.val[1],
+ B = (U16)v.val[2],
+ A = (U16)v.val[3];
+ #elif defined(USING_NEON)
+ uint16x4x4_t v = vld4_u16(rgba);
+ U16 R = (U16)v.val[0],
+ G = (U16)v.val[1],
+ B = (U16)v.val[2],
+ A = (U16)v.val[3];
+ #else
+ U64 px = load<U64>(rgba);
+ U16 R = cast<U16>((px >> 0) & 0xffff),
+ G = cast<U16>((px >> 16) & 0xffff),
+ B = cast<U16>((px >> 32) & 0xffff),
+ A = cast<U16>((px >> 48) & 0xffff);
+ #endif
+ r = F_from_Half(R);
+ g = F_from_Half(G);
+ b = F_from_Half(B);
+ a = F_from_Half(A);
+ } break;
+
+ case Op_load_fff:{
+ uintptr_t ptr = (uintptr_t)(src + 12*i);
+ assert( (ptr & 3) == 0 ); // src must be 4-byte aligned for this
+ const float* rgb = (const float*)ptr; // cast to const float* to be safe.
+ #if defined(USING_NEON_FP16)
+ float32x4x3_t lo = vld3q_f32(rgb + 0),
+ hi = vld3q_f32(rgb + 12);
+ r = (F)vcombine_f16(vcvt_f16_f32(lo.val[0]), vcvt_f16_f32(hi.val[0]));
+ g = (F)vcombine_f16(vcvt_f16_f32(lo.val[1]), vcvt_f16_f32(hi.val[1]));
+ b = (F)vcombine_f16(vcvt_f16_f32(lo.val[2]), vcvt_f16_f32(hi.val[2]));
+ #elif defined(USING_NEON)
+ float32x4x3_t v = vld3q_f32(rgb);
+ r = (F)v.val[0];
+ g = (F)v.val[1];
+ b = (F)v.val[2];
+ #else
+ r = load_3<F>(rgb+0);
+ g = load_3<F>(rgb+1);
+ b = load_3<F>(rgb+2);
+ #endif
+ } break;
+
+ case Op_load_ffff:{
+ uintptr_t ptr = (uintptr_t)(src + 16*i);
+ assert( (ptr & 3) == 0 ); // src must be 4-byte aligned for this
+ const float* rgba = (const float*)ptr; // cast to const float* to be safe.
+ #if defined(USING_NEON_FP16)
+ float32x4x4_t lo = vld4q_f32(rgba + 0),
+ hi = vld4q_f32(rgba + 16);
+ r = (F)vcombine_f16(vcvt_f16_f32(lo.val[0]), vcvt_f16_f32(hi.val[0]));
+ g = (F)vcombine_f16(vcvt_f16_f32(lo.val[1]), vcvt_f16_f32(hi.val[1]));
+ b = (F)vcombine_f16(vcvt_f16_f32(lo.val[2]), vcvt_f16_f32(hi.val[2]));
+ a = (F)vcombine_f16(vcvt_f16_f32(lo.val[3]), vcvt_f16_f32(hi.val[3]));
+ #elif defined(USING_NEON)
+ float32x4x4_t v = vld4q_f32(rgba);
+ r = (F)v.val[0];
+ g = (F)v.val[1];
+ b = (F)v.val[2];
+ a = (F)v.val[3];
+ #else
+ r = load_4<F>(rgba+0);
+ g = load_4<F>(rgba+1);
+ b = load_4<F>(rgba+2);
+ a = load_4<F>(rgba+3);
+ #endif
+ } break;
+
+ case Op_swap_rb:{
+ F t = r;
+ r = b;
+ b = t;
+ } break;
+
+ case Op_clamp:{
+ r = max_(F0, min_(r, F1));
+ g = max_(F0, min_(g, F1));
+ b = max_(F0, min_(b, F1));
+ a = max_(F0, min_(a, F1));
+ } break;
+
+ case Op_invert:{
+ r = F1 - r;
+ g = F1 - g;
+ b = F1 - b;
+ a = F1 - a;
+ } break;
+
+ case Op_force_opaque:{
+ a = F1;
+ } break;
+
+ case Op_premul:{
+ r *= a;
+ g *= a;
+ b *= a;
+ } break;
+
+ case Op_unpremul:{
+ F scale = if_then_else(F1 / a < INFINITY_, F1 / a, F0);
+ r *= scale;
+ g *= scale;
+ b *= scale;
+ } break;
+
+ case Op_matrix_3x3:{
+ const skcms_Matrix3x3* matrix = (const skcms_Matrix3x3*) *args++;
+ const float* m = &matrix->vals[0][0];
+
+ F R = m[0]*r + m[1]*g + m[2]*b,
+ G = m[3]*r + m[4]*g + m[5]*b,
+ B = m[6]*r + m[7]*g + m[8]*b;
+
+ r = R;
+ g = G;
+ b = B;
+ } break;
+
+ case Op_matrix_3x4:{
+ const skcms_Matrix3x4* matrix = (const skcms_Matrix3x4*) *args++;
+ const float* m = &matrix->vals[0][0];
+
+ F R = m[0]*r + m[1]*g + m[ 2]*b + m[ 3],
+ G = m[4]*r + m[5]*g + m[ 6]*b + m[ 7],
+ B = m[8]*r + m[9]*g + m[10]*b + m[11];
+
+ r = R;
+ g = G;
+ b = B;
+ } break;
+
+ case Op_lab_to_xyz:{
+ // The L*a*b values are in r,g,b, but normalized to [0,1]. Reconstruct them:
+ F L = r * 100.0f,
+ A = g * 255.0f - 128.0f,
+ B = b * 255.0f - 128.0f;
+
+ // Convert to CIE XYZ.
+ F Y = (L + 16.0f) * (1/116.0f),
+ X = Y + A*(1/500.0f),
+ Z = Y - B*(1/200.0f);
+
+ X = if_then_else(X*X*X > 0.008856f, X*X*X, (X - (16/116.0f)) * (1/7.787f));
+ Y = if_then_else(Y*Y*Y > 0.008856f, Y*Y*Y, (Y - (16/116.0f)) * (1/7.787f));
+ Z = if_then_else(Z*Z*Z > 0.008856f, Z*Z*Z, (Z - (16/116.0f)) * (1/7.787f));
+
+ // Adjust to XYZD50 illuminant, and stuff back into r,g,b for the next op.
+ r = X * 0.9642f;
+ g = Y ;
+ b = Z * 0.8249f;
+ } break;
+
+ // As above, in reverse.
+ case Op_xyz_to_lab:{
+ F X = r * (1/0.9642f),
+ Y = g,
+ Z = b * (1/0.8249f);
+
+ X = if_then_else(X > 0.008856f, approx_pow(X, 1/3.0f), X*7.787f + (16/116.0f));
+ Y = if_then_else(Y > 0.008856f, approx_pow(Y, 1/3.0f), Y*7.787f + (16/116.0f));
+ Z = if_then_else(Z > 0.008856f, approx_pow(Z, 1/3.0f), Z*7.787f + (16/116.0f));
+
+ F L = Y*116.0f - 16.0f,
+ A = (X-Y)*500.0f,
+ B = (Y-Z)*200.0f;
+
+ r = L * (1/100.f);
+ g = (A + 128.0f) * (1/255.0f);
+ b = (B + 128.0f) * (1/255.0f);
+ } break;
+
+ case Op_tf_r:{ r = apply_tf((const skcms_TransferFunction*)*args++, r); } break;
+ case Op_tf_g:{ g = apply_tf((const skcms_TransferFunction*)*args++, g); } break;
+ case Op_tf_b:{ b = apply_tf((const skcms_TransferFunction*)*args++, b); } break;
+ case Op_tf_a:{ a = apply_tf((const skcms_TransferFunction*)*args++, a); } break;
+
+ case Op_pq_r:{ r = apply_pq((const skcms_TransferFunction*)*args++, r); } break;
+ case Op_pq_g:{ g = apply_pq((const skcms_TransferFunction*)*args++, g); } break;
+ case Op_pq_b:{ b = apply_pq((const skcms_TransferFunction*)*args++, b); } break;
+ case Op_pq_a:{ a = apply_pq((const skcms_TransferFunction*)*args++, a); } break;
+
+ case Op_hlg_r:{ r = apply_hlg((const skcms_TransferFunction*)*args++, r); } break;
+ case Op_hlg_g:{ g = apply_hlg((const skcms_TransferFunction*)*args++, g); } break;
+ case Op_hlg_b:{ b = apply_hlg((const skcms_TransferFunction*)*args++, b); } break;
+ case Op_hlg_a:{ a = apply_hlg((const skcms_TransferFunction*)*args++, a); } break;
+
+ case Op_hlginv_r:{ r = apply_hlginv((const skcms_TransferFunction*)*args++, r); } break;
+ case Op_hlginv_g:{ g = apply_hlginv((const skcms_TransferFunction*)*args++, g); } break;
+ case Op_hlginv_b:{ b = apply_hlginv((const skcms_TransferFunction*)*args++, b); } break;
+ case Op_hlginv_a:{ a = apply_hlginv((const skcms_TransferFunction*)*args++, a); } break;
+
+ case Op_table_r: { r = table((const skcms_Curve*)*args++, r); } break;
+ case Op_table_g: { g = table((const skcms_Curve*)*args++, g); } break;
+ case Op_table_b: { b = table((const skcms_Curve*)*args++, b); } break;
+ case Op_table_a: { a = table((const skcms_Curve*)*args++, a); } break;
+
+ case Op_clut_A2B: {
+ const skcms_A2B* a2b = (const skcms_A2B*) *args++;
+ clut(a2b, &r,&g,&b,a);
+
+ if (a2b->input_channels == 4) {
+ // CMYK is opaque.
+ a = F1;
+ }
+ } break;
+
+ case Op_clut_B2A: {
+ const skcms_B2A* b2a = (const skcms_B2A*) *args++;
+ clut(b2a, &r,&g,&b,&a);
+ } break;
+
+ // Notice, from here on down the store_ ops all return, ending the loop.
+
+ case Op_store_a8: {
+ store(dst + 1*i, cast<U8>(to_fixed(a * 255)));
+ } return;
+
+ case Op_store_g8: {
+ // g should be holding luminance (Y) (r,g,b ~~~> X,Y,Z)
+ store(dst + 1*i, cast<U8>(to_fixed(g * 255)));
+ } return;
+
+ case Op_store_4444: {
+ store<U16>(dst + 2*i, cast<U16>(to_fixed(r * 15) << 12)
+ | cast<U16>(to_fixed(g * 15) << 8)
+ | cast<U16>(to_fixed(b * 15) << 4)
+ | cast<U16>(to_fixed(a * 15) << 0));
+ } return;
+
+ case Op_store_565: {
+ store<U16>(dst + 2*i, cast<U16>(to_fixed(r * 31) << 0 )
+ | cast<U16>(to_fixed(g * 63) << 5 )
+ | cast<U16>(to_fixed(b * 31) << 11 ));
+ } return;
+
+ case Op_store_888: {
+ uint8_t* rgb = (uint8_t*)dst + 3*i;
+ #if defined(USING_NEON_FP16)
+ // See the explanation under USING_NEON below. This is that doubled up.
+ U16 R = to_fixed(r * 255),
+ G = to_fixed(g * 255),
+ B = to_fixed(b * 255);
+
+ uint8x16x3_t v = {{ (uint8x16_t)R, (uint8x16_t)G, (uint8x16_t)B }};
+ vst3q_lane_u8(rgb+ 0, v, 0);
+ vst3q_lane_u8(rgb+ 3, v, 2);
+ vst3q_lane_u8(rgb+ 6, v, 4);
+ vst3q_lane_u8(rgb+ 9, v, 6);
+
+ vst3q_lane_u8(rgb+12, v, 8);
+ vst3q_lane_u8(rgb+15, v, 10);
+ vst3q_lane_u8(rgb+18, v, 12);
+ vst3q_lane_u8(rgb+21, v, 14);
+ #elif defined(USING_NEON)
+ // Same deal as load_888 but in reverse... we'll store using uint8x8x3_t, but
+ // get there via U16 to save some instructions converting to float. And just
+ // like load_888, we'd prefer to go via U32 but for ARMv7 support.
+ U16 R = cast<U16>(to_fixed(r * 255)),
+ G = cast<U16>(to_fixed(g * 255)),
+ B = cast<U16>(to_fixed(b * 255));
+
+ uint8x8x3_t v = {{ (uint8x8_t)R, (uint8x8_t)G, (uint8x8_t)B }};
+ vst3_lane_u8(rgb+0, v, 0);
+ vst3_lane_u8(rgb+3, v, 2);
+ vst3_lane_u8(rgb+6, v, 4);
+ vst3_lane_u8(rgb+9, v, 6);
+ #else
+ store_3(rgb+0, cast<U8>(to_fixed(r * 255)) );
+ store_3(rgb+1, cast<U8>(to_fixed(g * 255)) );
+ store_3(rgb+2, cast<U8>(to_fixed(b * 255)) );
+ #endif
+ } return;
+
+ case Op_store_8888: {
+ store(dst + 4*i, cast<U32>(to_fixed(r * 255)) << 0
+ | cast<U32>(to_fixed(g * 255)) << 8
+ | cast<U32>(to_fixed(b * 255)) << 16
+ | cast<U32>(to_fixed(a * 255)) << 24);
+ } return;
+
+ case Op_store_101010x_XR: {
+ static constexpr float min = -0.752941f;
+ static constexpr float max = 1.25098f;
+ static constexpr float range = max - min;
+ store(dst + 4*i, cast<U32>(to_fixed(((r - min) / range) * 1023)) << 0
+ | cast<U32>(to_fixed(((g - min) / range) * 1023)) << 10
+ | cast<U32>(to_fixed(((b - min) / range) * 1023)) << 20);
+ return;
+ }
+ case Op_store_1010102: {
+ store(dst + 4*i, cast<U32>(to_fixed(r * 1023)) << 0
+ | cast<U32>(to_fixed(g * 1023)) << 10
+ | cast<U32>(to_fixed(b * 1023)) << 20
+ | cast<U32>(to_fixed(a * 3)) << 30);
+ } return;
+
+ case Op_store_161616LE: {
+ uintptr_t ptr = (uintptr_t)(dst + 6*i);
+ assert( (ptr & 1) == 0 ); // The dst pointer must be 2-byte aligned
+ uint16_t* rgb = (uint16_t*)ptr; // for this cast to uint16_t* to be safe.
+ #if defined(USING_NEON_FP16)
+ uint16x8x3_t v = {{
+ (uint16x8_t)U16_from_F(r),
+ (uint16x8_t)U16_from_F(g),
+ (uint16x8_t)U16_from_F(b),
+ }};
+ vst3q_u16(rgb, v);
+ #elif defined(USING_NEON)
+ uint16x4x3_t v = {{
+ (uint16x4_t)U16_from_F(r),
+ (uint16x4_t)U16_from_F(g),
+ (uint16x4_t)U16_from_F(b),
+ }};
+ vst3_u16(rgb, v);
+ #else
+ store_3(rgb+0, U16_from_F(r));
+ store_3(rgb+1, U16_from_F(g));
+ store_3(rgb+2, U16_from_F(b));
+ #endif
+
+ } return;
+
+ case Op_store_16161616LE: {
+ uintptr_t ptr = (uintptr_t)(dst + 8*i);
+ assert( (ptr & 1) == 0 ); // The dst pointer must be 2-byte aligned
+ uint16_t* rgba = (uint16_t*)ptr; // for this cast to uint16_t* to be safe.
+ #if defined(USING_NEON_FP16)
+ uint16x8x4_t v = {{
+ (uint16x8_t)U16_from_F(r),
+ (uint16x8_t)U16_from_F(g),
+ (uint16x8_t)U16_from_F(b),
+ (uint16x8_t)U16_from_F(a),
+ }};
+ vst4q_u16(rgba, v);
+ #elif defined(USING_NEON)
+ uint16x4x4_t v = {{
+ (uint16x4_t)U16_from_F(r),
+ (uint16x4_t)U16_from_F(g),
+ (uint16x4_t)U16_from_F(b),
+ (uint16x4_t)U16_from_F(a),
+ }};
+ vst4_u16(rgba, v);
+ #else
+ U64 px = cast<U64>(to_fixed(r * 65535)) << 0
+ | cast<U64>(to_fixed(g * 65535)) << 16
+ | cast<U64>(to_fixed(b * 65535)) << 32
+ | cast<U64>(to_fixed(a * 65535)) << 48;
+ store(rgba, px);
+ #endif
+ } return;
+
+ case Op_store_161616BE: {
+ uintptr_t ptr = (uintptr_t)(dst + 6*i);
+ assert( (ptr & 1) == 0 ); // The dst pointer must be 2-byte aligned
+ uint16_t* rgb = (uint16_t*)ptr; // for this cast to uint16_t* to be safe.
+ #if defined(USING_NEON_FP16)
+ uint16x8x3_t v = {{
+ (uint16x8_t)swap_endian_16(U16_from_F(r)),
+ (uint16x8_t)swap_endian_16(U16_from_F(g)),
+ (uint16x8_t)swap_endian_16(U16_from_F(b)),
+ }};
+ vst3q_u16(rgb, v);
+ #elif defined(USING_NEON)
+ uint16x4x3_t v = {{
+ (uint16x4_t)swap_endian_16(cast<U16>(U16_from_F(r))),
+ (uint16x4_t)swap_endian_16(cast<U16>(U16_from_F(g))),
+ (uint16x4_t)swap_endian_16(cast<U16>(U16_from_F(b))),
+ }};
+ vst3_u16(rgb, v);
+ #else
+ U32 R = to_fixed(r * 65535),
+ G = to_fixed(g * 65535),
+ B = to_fixed(b * 65535);
+ store_3(rgb+0, cast<U16>((R & 0x00ff) << 8 | (R & 0xff00) >> 8) );
+ store_3(rgb+1, cast<U16>((G & 0x00ff) << 8 | (G & 0xff00) >> 8) );
+ store_3(rgb+2, cast<U16>((B & 0x00ff) << 8 | (B & 0xff00) >> 8) );
+ #endif
+
+ } return;
+
+ case Op_store_16161616BE: {
+ uintptr_t ptr = (uintptr_t)(dst + 8*i);
+ assert( (ptr & 1) == 0 ); // The dst pointer must be 2-byte aligned
+ uint16_t* rgba = (uint16_t*)ptr; // for this cast to uint16_t* to be safe.
+ #if defined(USING_NEON_FP16)
+ uint16x8x4_t v = {{
+ (uint16x8_t)swap_endian_16(U16_from_F(r)),
+ (uint16x8_t)swap_endian_16(U16_from_F(g)),
+ (uint16x8_t)swap_endian_16(U16_from_F(b)),
+ (uint16x8_t)swap_endian_16(U16_from_F(a)),
+ }};
+ vst4q_u16(rgba, v);
+ #elif defined(USING_NEON)
+ uint16x4x4_t v = {{
+ (uint16x4_t)swap_endian_16(cast<U16>(U16_from_F(r))),
+ (uint16x4_t)swap_endian_16(cast<U16>(U16_from_F(g))),
+ (uint16x4_t)swap_endian_16(cast<U16>(U16_from_F(b))),
+ (uint16x4_t)swap_endian_16(cast<U16>(U16_from_F(a))),
+ }};
+ vst4_u16(rgba, v);
+ #else
+ U64 px = cast<U64>(to_fixed(r * 65535)) << 0
+ | cast<U64>(to_fixed(g * 65535)) << 16
+ | cast<U64>(to_fixed(b * 65535)) << 32
+ | cast<U64>(to_fixed(a * 65535)) << 48;
+ store(rgba, swap_endian_16x4(px));
+ #endif
+ } return;
+
+ case Op_store_hhh: {
+ uintptr_t ptr = (uintptr_t)(dst + 6*i);
+ assert( (ptr & 1) == 0 ); // The dst pointer must be 2-byte aligned
+ uint16_t* rgb = (uint16_t*)ptr; // for this cast to uint16_t* to be safe.
+
+ U16 R = Half_from_F(r),
+ G = Half_from_F(g),
+ B = Half_from_F(b);
+ #if defined(USING_NEON_FP16)
+ uint16x8x3_t v = {{
+ (uint16x8_t)R,
+ (uint16x8_t)G,
+ (uint16x8_t)B,
+ }};
+ vst3q_u16(rgb, v);
+ #elif defined(USING_NEON)
+ uint16x4x3_t v = {{
+ (uint16x4_t)R,
+ (uint16x4_t)G,
+ (uint16x4_t)B,
+ }};
+ vst3_u16(rgb, v);
+ #else
+ store_3(rgb+0, R);
+ store_3(rgb+1, G);
+ store_3(rgb+2, B);
+ #endif
+ } return;
+
+ case Op_store_hhhh: {
+ uintptr_t ptr = (uintptr_t)(dst + 8*i);
+ assert( (ptr & 1) == 0 ); // The dst pointer must be 2-byte aligned
+ uint16_t* rgba = (uint16_t*)ptr; // for this cast to uint16_t* to be safe.
+
+ U16 R = Half_from_F(r),
+ G = Half_from_F(g),
+ B = Half_from_F(b),
+ A = Half_from_F(a);
+ #if defined(USING_NEON_FP16)
+ uint16x8x4_t v = {{
+ (uint16x8_t)R,
+ (uint16x8_t)G,
+ (uint16x8_t)B,
+ (uint16x8_t)A,
+ }};
+ vst4q_u16(rgba, v);
+ #elif defined(USING_NEON)
+ uint16x4x4_t v = {{
+ (uint16x4_t)R,
+ (uint16x4_t)G,
+ (uint16x4_t)B,
+ (uint16x4_t)A,
+ }};
+ vst4_u16(rgba, v);
+ #else
+ store(rgba, cast<U64>(R) << 0
+ | cast<U64>(G) << 16
+ | cast<U64>(B) << 32
+ | cast<U64>(A) << 48);
+ #endif
+
+ } return;
+
+ case Op_store_fff: {
+ uintptr_t ptr = (uintptr_t)(dst + 12*i);
+ assert( (ptr & 3) == 0 ); // The dst pointer must be 4-byte aligned
+ float* rgb = (float*)ptr; // for this cast to float* to be safe.
+ #if defined(USING_NEON_FP16)
+ float32x4x3_t lo = {{
+ vcvt_f32_f16(vget_low_f16(r)),
+ vcvt_f32_f16(vget_low_f16(g)),
+ vcvt_f32_f16(vget_low_f16(b)),
+ }}, hi = {{
+ vcvt_f32_f16(vget_high_f16(r)),
+ vcvt_f32_f16(vget_high_f16(g)),
+ vcvt_f32_f16(vget_high_f16(b)),
+ }};
+ vst3q_f32(rgb + 0, lo);
+ vst3q_f32(rgb + 12, hi);
+ #elif defined(USING_NEON)
+ float32x4x3_t v = {{
+ (float32x4_t)r,
+ (float32x4_t)g,
+ (float32x4_t)b,
+ }};
+ vst3q_f32(rgb, v);
+ #else
+ store_3(rgb+0, r);
+ store_3(rgb+1, g);
+ store_3(rgb+2, b);
+ #endif
+ } return;
+
+ case Op_store_ffff: {
+ uintptr_t ptr = (uintptr_t)(dst + 16*i);
+ assert( (ptr & 3) == 0 ); // The dst pointer must be 4-byte aligned
+ float* rgba = (float*)ptr; // for this cast to float* to be safe.
+ #if defined(USING_NEON_FP16)
+ float32x4x4_t lo = {{
+ vcvt_f32_f16(vget_low_f16(r)),
+ vcvt_f32_f16(vget_low_f16(g)),
+ vcvt_f32_f16(vget_low_f16(b)),
+ vcvt_f32_f16(vget_low_f16(a)),
+ }}, hi = {{
+ vcvt_f32_f16(vget_high_f16(r)),
+ vcvt_f32_f16(vget_high_f16(g)),
+ vcvt_f32_f16(vget_high_f16(b)),
+ vcvt_f32_f16(vget_high_f16(a)),
+ }};
+ vst4q_f32(rgba + 0, lo);
+ vst4q_f32(rgba + 16, hi);
+ #elif defined(USING_NEON)
+ float32x4x4_t v = {{
+ (float32x4_t)r,
+ (float32x4_t)g,
+ (float32x4_t)b,
+ (float32x4_t)a,
+ }};
+ vst4q_f32(rgba, v);
+ #else
+ store_4(rgba+0, r);
+ store_4(rgba+1, g);
+ store_4(rgba+2, b);
+ store_4(rgba+3, a);
+ #endif
+ } return;
+ }
+ }
+}
+
+
+static void run_program(const Op* program, const void** arguments,
+ const char* src, char* dst, int n,
+ const size_t src_bpp, const size_t dst_bpp) {
+ int i = 0;
+ while (n >= N) {
+ exec_ops(program, arguments, src, dst, i);
+ i += N;
+ n -= N;
+ }
+ if (n > 0) {
+ char tmp[4*4*N] = {0};
+
+ memcpy(tmp, (const char*)src + (size_t)i*src_bpp, (size_t)n*src_bpp);
+ exec_ops(program, arguments, tmp, tmp, 0);
+ memcpy((char*)dst + (size_t)i*dst_bpp, tmp, (size_t)n*dst_bpp);
+ }
+}
+
+// Clean up any #defines we may have set so that we can be #included again.
+#if defined(USING_AVX)
+ #undef USING_AVX
+#endif
+#if defined(USING_AVX_F16C)
+ #undef USING_AVX_F16C
+#endif
+#if defined(USING_AVX2)
+ #undef USING_AVX2
+#endif
+#if defined(USING_AVX512F)
+ #undef USING_AVX512F
+#endif
+
+#if defined(USING_NEON)
+ #undef USING_NEON
+#endif
+#if defined(USING_NEON_F16C)
+ #undef USING_NEON_F16C
+#endif
+#if defined(USING_NEON_FP16)
+ #undef USING_NEON_FP16
+#endif
+
+#undef FALLTHROUGH
diff --git a/gfx/skia/skia/modules/skcms/version.sha1 b/gfx/skia/skia/modules/skcms/version.sha1
new file mode 100755
index 0000000000..ae57df7d06
--- /dev/null
+++ b/gfx/skia/skia/modules/skcms/version.sha1
@@ -0,0 +1 @@
+ba39d81f9797aa973bdf01aa6b0363b280352fba
diff --git a/gfx/skia/skia/src/base/README.md b/gfx/skia/skia/src/base/README.md
new file mode 100644
index 0000000000..322c671436
--- /dev/null
+++ b/gfx/skia/skia/src/base/README.md
@@ -0,0 +1,4 @@
+The files here are part of the base package (see also include/private/base). The distinction
+is that the files here are not needed by anything in the public API.
+
+Files here should not depend on anything other than system headers or other files in base. \ No newline at end of file
diff --git a/gfx/skia/skia/src/base/SkASAN.h b/gfx/skia/skia/src/base/SkASAN.h
new file mode 100644
index 0000000000..8da93daaa0
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkASAN.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkASAN_DEFINED
+#define SkASAN_DEFINED
+
+#include <cstddef>
+
+#ifdef MOZ_SKIA
+
+#include "mozilla/MemoryChecking.h"
+
+#ifdef MOZ_HAVE_MEM_CHECKS
+#define SK_SANITIZE_ADDRESS MOZ_HAVE_MEM_CHECKS
+#endif
+
+static inline void sk_asan_poison_memory_region(void const volatile *addr, size_t size) {
+ MOZ_MAKE_MEM_NOACCESS(addr, size);
+}
+
+static inline void sk_asan_unpoison_memory_region(void const volatile *addr, size_t size) {
+ MOZ_MAKE_MEM_DEFINED(addr, size);
+}
+
+#else // !MOZ_SKIA
+
+#ifdef __SANITIZE_ADDRESS__
+ #define SK_SANITIZE_ADDRESS 1
+#endif
+#if !defined(SK_SANITIZE_ADDRESS) && defined(__has_feature)
+ #if __has_feature(address_sanitizer)
+ #define SK_SANITIZE_ADDRESS 1
+ #endif
+#endif
+
+// Typically declared in LLVM's asan_interface.h.
+#ifdef SK_SANITIZE_ADDRESS
+extern "C" {
+ void __asan_poison_memory_region(void const volatile *addr, size_t size);
+ void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
+}
+#endif
+
+// Code that implements bespoke allocation arenas can poison the entire arena on creation, then
+// unpoison chunks of arena memory as they are parceled out. Consider leaving gaps between blocks
+// to detect buffer overrun.
+static inline void sk_asan_poison_memory_region(void const volatile *addr, size_t size) {
+#ifdef SK_SANITIZE_ADDRESS
+ __asan_poison_memory_region(addr, size);
+#endif
+}
+
+static inline void sk_asan_unpoison_memory_region(void const volatile *addr, size_t size) {
+#ifdef SK_SANITIZE_ADDRESS
+ __asan_unpoison_memory_region(addr, size);
+#endif
+}
+
+#endif // !MOZ_SKIA
+
+#endif // SkASAN_DEFINED
diff --git a/gfx/skia/skia/src/base/SkArenaAlloc.cpp b/gfx/skia/skia/src/base/SkArenaAlloc.cpp
new file mode 100644
index 0000000000..2dc1c00226
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkArenaAlloc.cpp
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/base/SkArenaAlloc.h"
+
+#include "include/private/base/SkMalloc.h"
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+
+static char* end_chain(char*) { return nullptr; }
+
+SkArenaAlloc::SkArenaAlloc(char* block, size_t size, size_t firstHeapAllocation)
+ : fDtorCursor {block}
+ , fCursor {block}
+ , fEnd {block + SkToU32(size)}
+ , fFibonacciProgression{SkToU32(size), SkToU32(firstHeapAllocation)}
+{
+ if (size < sizeof(Footer)) {
+ fEnd = fCursor = fDtorCursor = nullptr;
+ }
+
+ if (fCursor != nullptr) {
+ this->installFooter(end_chain, 0);
+ sk_asan_poison_memory_region(fCursor, fEnd - fCursor);
+ }
+}
+
+SkArenaAlloc::~SkArenaAlloc() {
+ RunDtorsOnBlock(fDtorCursor);
+}
+
+void SkArenaAlloc::installFooter(FooterAction* action, uint32_t padding) {
+ assert(SkTFitsIn<uint8_t>(padding));
+ this->installRaw(action);
+ this->installRaw((uint8_t)padding);
+ fDtorCursor = fCursor;
+}
+
+char* SkArenaAlloc::SkipPod(char* footerEnd) {
+ char* objEnd = footerEnd - (sizeof(Footer) + sizeof(uint32_t));
+ uint32_t skip;
+ memmove(&skip, objEnd, sizeof(uint32_t));
+ return objEnd - (ptrdiff_t) skip;
+}
+
+void SkArenaAlloc::RunDtorsOnBlock(char* footerEnd) {
+ while (footerEnd != nullptr) {
+ FooterAction* action;
+ uint8_t padding;
+
+ memcpy(&action, footerEnd - sizeof( Footer), sizeof( action));
+ memcpy(&padding, footerEnd - sizeof(padding), sizeof(padding));
+
+ footerEnd = action(footerEnd) - (ptrdiff_t)padding;
+ }
+}
+
+char* SkArenaAlloc::NextBlock(char* footerEnd) {
+ char* objEnd = footerEnd - (sizeof(char*) + sizeof(Footer));
+ char* next;
+ memmove(&next, objEnd, sizeof(char*));
+ RunDtorsOnBlock(next);
+ sk_free(objEnd);
+ return nullptr;
+}
+
+void SkArenaAlloc::ensureSpace(uint32_t size, uint32_t alignment) {
+ constexpr uint32_t headerSize = sizeof(Footer) + sizeof(ptrdiff_t);
+ constexpr uint32_t maxSize = std::numeric_limits<uint32_t>::max();
+ constexpr uint32_t overhead = headerSize + sizeof(Footer);
+ AssertRelease(size <= maxSize - overhead);
+ uint32_t objSizeAndOverhead = size + overhead;
+
+ const uint32_t alignmentOverhead = alignment - 1;
+ AssertRelease(objSizeAndOverhead <= maxSize - alignmentOverhead);
+ objSizeAndOverhead += alignmentOverhead;
+
+ uint32_t minAllocationSize = fFibonacciProgression.nextBlockSize();
+ uint32_t allocationSize = std::max(objSizeAndOverhead, minAllocationSize);
+
+ // Round up to a nice size. If > 32K align to 4K boundary else up to max_align_t. The > 32K
+ // heuristic is from the JEMalloc behavior.
+ {
+ uint32_t mask = allocationSize > (1 << 15) ? (1 << 12) - 1 : 16 - 1;
+ AssertRelease(allocationSize <= maxSize - mask);
+ allocationSize = (allocationSize + mask) & ~mask;
+ }
+
+ char* newBlock = static_cast<char*>(sk_malloc_throw(allocationSize));
+
+ auto previousDtor = fDtorCursor;
+ fCursor = newBlock;
+ fDtorCursor = newBlock;
+ fEnd = fCursor + allocationSize;
+
+ // poison the unused bytes in the block.
+ sk_asan_poison_memory_region(fCursor, fEnd - fCursor);
+
+ this->installRaw(previousDtor);
+ this->installFooter(NextBlock, 0);
+}
+
+char* SkArenaAlloc::allocObjectWithFooter(uint32_t sizeIncludingFooter, uint32_t alignment) {
+ uintptr_t mask = alignment - 1;
+
+restart:
+ uint32_t skipOverhead = 0;
+ const bool needsSkipFooter = fCursor != fDtorCursor;
+ if (needsSkipFooter) {
+ skipOverhead = sizeof(Footer) + sizeof(uint32_t);
+ }
+ const uint32_t totalSize = sizeIncludingFooter + skipOverhead;
+
+ // Math on null fCursor/fEnd is undefined behavior, so explicitly check for first alloc.
+ if (!fCursor) {
+ this->ensureSpace(totalSize, alignment);
+ goto restart;
+ }
+
+ assert(fEnd);
+ // This test alone would be enough nullptr were defined to be 0, but it's not.
+ char* objStart = (char*)((uintptr_t)(fCursor + skipOverhead + mask) & ~mask);
+ if ((ptrdiff_t)totalSize > fEnd - objStart) {
+ this->ensureSpace(totalSize, alignment);
+ goto restart;
+ }
+
+ AssertRelease((ptrdiff_t)totalSize <= fEnd - objStart);
+
+ // Install a skip footer if needed, thus terminating a run of POD data. The calling code is
+ // responsible for installing the footer after the object.
+ if (needsSkipFooter) {
+ this->installRaw(SkToU32(fCursor - fDtorCursor));
+ this->installFooter(SkipPod, 0);
+ }
+
+ return objStart;
+}
+
+SkArenaAllocWithReset::SkArenaAllocWithReset(char* block,
+ size_t size,
+ size_t firstHeapAllocation)
+ : SkArenaAlloc(block, size, firstHeapAllocation)
+ , fFirstBlock{block}
+ , fFirstSize{SkToU32(size)}
+ , fFirstHeapAllocationSize{SkToU32(firstHeapAllocation)} {}
+
+void SkArenaAllocWithReset::reset() {
+ char* const firstBlock = fFirstBlock;
+ const uint32_t firstSize = fFirstSize;
+ const uint32_t firstHeapAllocationSize = fFirstHeapAllocationSize;
+ this->~SkArenaAllocWithReset();
+ new (this) SkArenaAllocWithReset{firstBlock, firstSize, firstHeapAllocationSize};
+}
+
+// SkFibonacci47 is the first 47 Fibonacci numbers. Fib(47) is the largest value less than 2 ^ 32.
+// Used by SkFibBlockSizes.
+std::array<const uint32_t, 47> SkFibonacci47 {
+ 1, 1, 2, 3, 5, 8,
+ 13, 21, 34, 55, 89, 144,
+ 233, 377, 610, 987, 1597, 2584,
+ 4181, 6765, 10946, 17711, 28657, 46368,
+ 75025, 121393, 196418, 317811, 514229, 832040,
+ 1346269, 2178309, 3524578, 5702887, 9227465, 14930352,
+ 24157817, 39088169, 63245986, 102334155, 165580141, 267914296,
+ 433494437, 701408733, 1134903170, 1836311903, 2971215073,
+};
diff --git a/gfx/skia/skia/src/base/SkArenaAlloc.h b/gfx/skia/skia/src/base/SkArenaAlloc.h
new file mode 100644
index 0000000000..547f2c5910
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkArenaAlloc.h
@@ -0,0 +1,336 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkArenaAlloc_DEFINED
+#define SkArenaAlloc_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkTFitsIn.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkASAN.h"
+
+#include <algorithm>
+#include <array>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <limits>
+#include <new>
+#include <type_traits>
+#include <utility>
+
+// We found allocating strictly doubling amounts of memory from the heap left too
+// much unused slop, particularly on Android. Instead we'll follow a Fibonacci-like
+// progression.
+
+// SkFibonacci47 is the first 47 Fibonacci numbers. Fib(47) is the largest value less than 2 ^ 32.
+extern std::array<const uint32_t, 47> SkFibonacci47;
+template<uint32_t kMaxSize>
+class SkFibBlockSizes {
+public:
+ // staticBlockSize, and firstAllocationSize are parameters describing the initial memory
+ // layout. staticBlockSize describes the size of the inlined memory, and firstAllocationSize
+ // describes the size of the first block to be allocated if the static block is exhausted. By
+ // convention, firstAllocationSize is the first choice for the block unit size followed by
+ // staticBlockSize followed by the default of 1024 bytes.
+ SkFibBlockSizes(uint32_t staticBlockSize, uint32_t firstAllocationSize) : fIndex{0} {
+ fBlockUnitSize = firstAllocationSize > 0 ? firstAllocationSize :
+ staticBlockSize > 0 ? staticBlockSize : 1024;
+
+ SkASSERT_RELEASE(0 < fBlockUnitSize);
+ SkASSERT_RELEASE(fBlockUnitSize < std::min(kMaxSize, (1u << 26) - 1));
+ }
+
+ uint32_t nextBlockSize() {
+ uint32_t result = SkFibonacci47[fIndex] * fBlockUnitSize;
+
+ if (SkTo<size_t>(fIndex + 1) < SkFibonacci47.size() &&
+ SkFibonacci47[fIndex + 1] < kMaxSize / fBlockUnitSize)
+ {
+ fIndex += 1;
+ }
+
+ return result;
+ }
+
+private:
+ uint32_t fIndex : 6;
+ uint32_t fBlockUnitSize : 26;
+};
+
+// SkArenaAlloc allocates object and destroys the allocated objects when destroyed. It's designed
+// to minimize the number of underlying block allocations. SkArenaAlloc allocates first out of an
+// (optional) user-provided block of memory, and when that's exhausted it allocates on the heap,
+// starting with an allocation of firstHeapAllocation bytes. If your data (plus a small overhead)
+// fits in the user-provided block, SkArenaAlloc never uses the heap, and if it fits in
+// firstHeapAllocation bytes, it'll use the heap only once. If 0 is specified for
+// firstHeapAllocation, then blockSize is used unless that too is 0, then 1024 is used.
+//
+// Examples:
+//
+// char block[mostCasesSize];
+// SkArenaAlloc arena(block, mostCasesSize);
+//
+// If mostCasesSize is too large for the stack, you can use the following pattern.
+//
+// std::unique_ptr<char[]> block{new char[mostCasesSize]};
+// SkArenaAlloc arena(block.get(), mostCasesSize, almostAllCasesSize);
+//
+// If the program only sometimes allocates memory, use the following pattern.
+//
+// SkArenaAlloc arena(nullptr, 0, almostAllCasesSize);
+//
+// The storage does not necessarily need to be on the stack. Embedding the storage in a class also
+// works.
+//
+// class Foo {
+// char storage[mostCasesSize];
+// SkArenaAlloc arena (storage, mostCasesSize);
+// };
+//
+// In addition, the system is optimized to handle POD data including arrays of PODs (where
+// POD is really data with no destructors). For POD data it has zero overhead per item, and a
+// typical per block overhead of 8 bytes. For non-POD objects there is a per item overhead of 4
+// bytes. For arrays of non-POD objects there is a per array overhead of typically 8 bytes. There
+// is an addition overhead when switching from POD data to non-POD data of typically 8 bytes.
+//
+// If additional blocks are needed they are increased exponentially. This strategy bounds the
+// recursion of the RunDtorsOnBlock to be limited to O(log size-of-memory). Block size grow using
+// the Fibonacci sequence which means that for 2^32 memory there are 48 allocations, and for 2^48
+// there are 71 allocations.
+class SkArenaAlloc {
+public:
+ SkArenaAlloc(char* block, size_t blockSize, size_t firstHeapAllocation);
+
+ explicit SkArenaAlloc(size_t firstHeapAllocation)
+ : SkArenaAlloc(nullptr, 0, firstHeapAllocation) {}
+
+ SkArenaAlloc(const SkArenaAlloc&) = delete;
+ SkArenaAlloc& operator=(const SkArenaAlloc&) = delete;
+ SkArenaAlloc(SkArenaAlloc&&) = delete;
+ SkArenaAlloc& operator=(SkArenaAlloc&&) = delete;
+
+ ~SkArenaAlloc();
+
+ template <typename Ctor>
+ auto make(Ctor&& ctor) -> decltype(ctor(nullptr)) {
+ using T = std::remove_pointer_t<decltype(ctor(nullptr))>;
+
+ uint32_t size = SkToU32(sizeof(T));
+ uint32_t alignment = SkToU32(alignof(T));
+ char* objStart;
+ if (std::is_trivially_destructible<T>::value) {
+ objStart = this->allocObject(size, alignment);
+ fCursor = objStart + size;
+ sk_asan_unpoison_memory_region(objStart, size);
+ } else {
+ objStart = this->allocObjectWithFooter(size + sizeof(Footer), alignment);
+ // Can never be UB because max value is alignof(T).
+ uint32_t padding = SkToU32(objStart - fCursor);
+
+ // Advance to end of object to install footer.
+ fCursor = objStart + size;
+ sk_asan_unpoison_memory_region(objStart, size);
+ FooterAction* releaser = [](char* objEnd) {
+ char* objStart = objEnd - (sizeof(T) + sizeof(Footer));
+ ((T*)objStart)->~T();
+ return objStart;
+ };
+ this->installFooter(releaser, padding);
+ }
+
+ // This must be last to make objects with nested use of this allocator work.
+ return ctor(objStart);
+ }
+
+ template <typename T, typename... Args>
+ T* make(Args&&... args) {
+ return this->make([&](void* objStart) {
+ return new(objStart) T(std::forward<Args>(args)...);
+ });
+ }
+
+ template <typename T>
+ T* makeArrayDefault(size_t count) {
+ T* array = this->allocUninitializedArray<T>(count);
+ for (size_t i = 0; i < count; i++) {
+ // Default initialization: if T is primitive then the value is left uninitialized.
+ new (&array[i]) T;
+ }
+ return array;
+ }
+
+ template <typename T>
+ T* makeArray(size_t count) {
+ T* array = this->allocUninitializedArray<T>(count);
+ for (size_t i = 0; i < count; i++) {
+ // Value initialization: if T is primitive then the value is zero-initialized.
+ new (&array[i]) T();
+ }
+ return array;
+ }
+
+ template <typename T, typename Initializer>
+ T* makeInitializedArray(size_t count, Initializer initializer) {
+ T* array = this->allocUninitializedArray<T>(count);
+ for (size_t i = 0; i < count; i++) {
+ new (&array[i]) T(initializer(i));
+ }
+ return array;
+ }
+
+ // Only use makeBytesAlignedTo if none of the typed variants are impractical to use.
+ void* makeBytesAlignedTo(size_t size, size_t align) {
+ AssertRelease(SkTFitsIn<uint32_t>(size));
+ auto objStart = this->allocObject(SkToU32(size), SkToU32(align));
+ fCursor = objStart + size;
+ sk_asan_unpoison_memory_region(objStart, size);
+ return objStart;
+ }
+
+private:
+ static void AssertRelease(bool cond) { if (!cond) { ::abort(); } }
+
+ using FooterAction = char* (char*);
+ struct Footer {
+ uint8_t unaligned_action[sizeof(FooterAction*)];
+ uint8_t padding;
+ };
+
+ static char* SkipPod(char* footerEnd);
+ static void RunDtorsOnBlock(char* footerEnd);
+ static char* NextBlock(char* footerEnd);
+
+ template <typename T>
+ void installRaw(const T& val) {
+ sk_asan_unpoison_memory_region(fCursor, sizeof(val));
+ memcpy(fCursor, &val, sizeof(val));
+ fCursor += sizeof(val);
+ }
+ void installFooter(FooterAction* releaser, uint32_t padding);
+
+ void ensureSpace(uint32_t size, uint32_t alignment);
+
+ char* allocObject(uint32_t size, uint32_t alignment) {
+ uintptr_t mask = alignment - 1;
+ uintptr_t alignedOffset = (~reinterpret_cast<uintptr_t>(fCursor) + 1) & mask;
+ uintptr_t totalSize = size + alignedOffset;
+ AssertRelease(totalSize >= size);
+ if (totalSize > static_cast<uintptr_t>(fEnd - fCursor)) {
+ this->ensureSpace(size, alignment);
+ alignedOffset = (~reinterpret_cast<uintptr_t>(fCursor) + 1) & mask;
+ }
+
+ char* object = fCursor + alignedOffset;
+
+ SkASSERT((reinterpret_cast<uintptr_t>(object) & (alignment - 1)) == 0);
+ SkASSERT(object + size <= fEnd);
+
+ return object;
+ }
+
+ char* allocObjectWithFooter(uint32_t sizeIncludingFooter, uint32_t alignment);
+
+ template <typename T>
+ T* allocUninitializedArray(size_t countZ) {
+ AssertRelease(SkTFitsIn<uint32_t>(countZ));
+ uint32_t count = SkToU32(countZ);
+
+ char* objStart;
+ AssertRelease(count <= std::numeric_limits<uint32_t>::max() / sizeof(T));
+ uint32_t arraySize = SkToU32(count * sizeof(T));
+ uint32_t alignment = SkToU32(alignof(T));
+
+ if (std::is_trivially_destructible<T>::value) {
+ objStart = this->allocObject(arraySize, alignment);
+ fCursor = objStart + arraySize;
+ sk_asan_unpoison_memory_region(objStart, arraySize);
+ } else {
+ constexpr uint32_t overhead = sizeof(Footer) + sizeof(uint32_t);
+ AssertRelease(arraySize <= std::numeric_limits<uint32_t>::max() - overhead);
+ uint32_t totalSize = arraySize + overhead;
+ objStart = this->allocObjectWithFooter(totalSize, alignment);
+
+ // Can never be UB because max value is alignof(T).
+ uint32_t padding = SkToU32(objStart - fCursor);
+
+ // Advance to end of array to install footer.
+ fCursor = objStart + arraySize;
+ sk_asan_unpoison_memory_region(objStart, arraySize);
+ this->installRaw(SkToU32(count));
+ this->installFooter(
+ [](char* footerEnd) {
+ char* objEnd = footerEnd - (sizeof(Footer) + sizeof(uint32_t));
+ uint32_t count;
+ memmove(&count, objEnd, sizeof(uint32_t));
+ char* objStart = objEnd - count * sizeof(T);
+ T* array = (T*) objStart;
+ for (uint32_t i = 0; i < count; i++) {
+ array[i].~T();
+ }
+ return objStart;
+ },
+ padding);
+ }
+
+ return (T*)objStart;
+ }
+
+ char* fDtorCursor;
+ char* fCursor;
+ char* fEnd;
+
+ SkFibBlockSizes<std::numeric_limits<uint32_t>::max()> fFibonacciProgression;
+};
+
+class SkArenaAllocWithReset : public SkArenaAlloc {
+public:
+ SkArenaAllocWithReset(char* block, size_t blockSize, size_t firstHeapAllocation);
+
+ explicit SkArenaAllocWithReset(size_t firstHeapAllocation)
+ : SkArenaAllocWithReset(nullptr, 0, firstHeapAllocation) {}
+
+ // Destroy all allocated objects, free any heap allocations.
+ void reset();
+
+private:
+ char* const fFirstBlock;
+ const uint32_t fFirstSize;
+ const uint32_t fFirstHeapAllocationSize;
+};
+
+// Helper for defining allocators with inline/reserved storage.
+// For argument declarations, stick to the base type (SkArenaAlloc).
+// Note: Inheriting from the storage first means the storage will outlive the
+// SkArenaAlloc, letting ~SkArenaAlloc read it as it calls destructors.
+// (This is mostly only relevant for strict tools like MSAN.)
+template <size_t InlineStorageSize>
+class SkSTArenaAlloc : private std::array<char, InlineStorageSize>, public SkArenaAlloc {
+public:
+ explicit SkSTArenaAlloc(size_t firstHeapAllocation = InlineStorageSize)
+ : SkArenaAlloc{this->data(), this->size(), firstHeapAllocation} {}
+
+ ~SkSTArenaAlloc() {
+ // Be sure to unpoison the memory that is probably on the stack.
+ sk_asan_unpoison_memory_region(this->data(), this->size());
+ }
+};
+
+template <size_t InlineStorageSize>
+class SkSTArenaAllocWithReset
+ : private std::array<char, InlineStorageSize>, public SkArenaAllocWithReset {
+public:
+ explicit SkSTArenaAllocWithReset(size_t firstHeapAllocation = InlineStorageSize)
+ : SkArenaAllocWithReset{this->data(), this->size(), firstHeapAllocation} {}
+
+ ~SkSTArenaAllocWithReset() {
+ // Be sure to unpoison the memory that is probably on the stack.
+ sk_asan_unpoison_memory_region(this->data(), this->size());
+ }
+};
+
+#endif // SkArenaAlloc_DEFINED
diff --git a/gfx/skia/skia/src/base/SkArenaAllocList.h b/gfx/skia/skia/src/base/SkArenaAllocList.h
new file mode 100644
index 0000000000..57bce52023
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkArenaAllocList.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkArenaAllocList_DEFINED
+#define SkArenaAllocList_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "src/base/SkArenaAlloc.h" // IWYU pragma: keep
+
+#include <utility>
+
+/**
+ * A singly linked list of Ts stored in a SkArenaAlloc. The arena rather than the list owns
+ * the elements. This supports forward iteration and range based for loops.
+ */
+template <typename T>
+class SkArenaAllocList {
+private:
+ struct Node;
+
+public:
+ SkArenaAllocList() = default;
+
+ void reset() { fHead = fTail = nullptr; }
+
+ template <typename... Args>
+ inline T& append(SkArenaAlloc* arena, Args... args);
+
+ class Iter {
+ public:
+ Iter() = default;
+ inline Iter& operator++();
+ T& operator*() const { return fCurr->fT; }
+ T* operator->() const { return &fCurr->fT; }
+ bool operator==(const Iter& that) const { return fCurr == that.fCurr; }
+ bool operator!=(const Iter& that) const { return !(*this == that); }
+
+ private:
+ friend class SkArenaAllocList;
+ explicit Iter(Node* node) : fCurr(node) {}
+ Node* fCurr = nullptr;
+ };
+
+ Iter begin() { return Iter(fHead); }
+ Iter end() { return Iter(); }
+ Iter tail() { return Iter(fTail); }
+
+private:
+ struct Node {
+ template <typename... Args>
+ Node(Args... args) : fT(std::forward<Args>(args)...) {}
+ T fT;
+ Node* fNext = nullptr;
+ };
+ Node* fHead = nullptr;
+ Node* fTail = nullptr;
+};
+
+template <typename T>
+template <typename... Args>
+T& SkArenaAllocList<T>::append(SkArenaAlloc* arena, Args... args) {
+ SkASSERT(!fHead == !fTail);
+ auto* n = arena->make<Node>(std::forward<Args>(args)...);
+ if (!fTail) {
+ fHead = fTail = n;
+ } else {
+ fTail = fTail->fNext = n;
+ }
+ return fTail->fT;
+}
+
+template <typename T>
+typename SkArenaAllocList<T>::Iter& SkArenaAllocList<T>::Iter::operator++() {
+ fCurr = fCurr->fNext;
+ return *this;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/base/SkAutoMalloc.h b/gfx/skia/skia/src/base/SkAutoMalloc.h
new file mode 100644
index 0000000000..6520cc0582
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkAutoMalloc.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAutoMalloc_DEFINED
+#define SkAutoMalloc_DEFINED
+
+#include "include/private/base/SkAlign.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkNoncopyable.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+
+/**
+ * Manage an allocated block of heap memory. This object is the sole manager of
+ * the lifetime of the block, so the caller must not call sk_free() or delete
+ * on the block, unless release() was called.
+ */
+class SkAutoMalloc : SkNoncopyable {
+public:
+ explicit SkAutoMalloc(size_t size = 0)
+ : fPtr(size ? sk_malloc_throw(size) : nullptr), fSize(size) {}
+
+ /**
+ * Passed to reset to specify what happens if the requested size is smaller
+ * than the current size (and the current block was dynamically allocated).
+ */
+ enum OnShrink {
+ /**
+ * If the requested size is smaller than the current size, and the
+ * current block is dynamically allocated, free the old block and
+ * malloc a new block of the smaller size.
+ */
+ kAlloc_OnShrink,
+
+ /**
+ * If the requested size is smaller than the current size, and the
+ * current block is dynamically allocated, just return the old
+ * block.
+ */
+ kReuse_OnShrink
+ };
+
+ /**
+ * Reallocates the block to a new size. The ptr may or may not change.
+ */
+ void* reset(size_t size = 0, OnShrink shrink = kAlloc_OnShrink) {
+ if (size != fSize && (size > fSize || kReuse_OnShrink != shrink)) {
+ fPtr.reset(size ? sk_malloc_throw(size) : nullptr);
+ fSize = size;
+ }
+ return fPtr.get();
+ }
+
+ /**
+ * Return the allocated block.
+ */
+ void* get() { return fPtr.get(); }
+ const void* get() const { return fPtr.get(); }
+
+ /** Transfer ownership of the current ptr to the caller, setting the
+ internal reference to null. Note the caller is reponsible for calling
+ sk_free on the returned address.
+ */
+ void* release() {
+ fSize = 0;
+ return fPtr.release();
+ }
+
+private:
+ struct WrapFree {
+ void operator()(void* p) { sk_free(p); }
+ };
+ std::unique_ptr<void, WrapFree> fPtr;
+ size_t fSize; // can be larger than the requested size (see kReuse)
+};
+
+/**
+ * Manage an allocated block of memory. If the requested size is <= kSizeRequested (or slightly
+ * more), then the allocation will come from the stack rather than the heap. This object is the
+ * sole manager of the lifetime of the block, so the caller must not call sk_free() or delete on
+ * the block.
+ */
+template <size_t kSizeRequested> class SkAutoSMalloc : SkNoncopyable {
+public:
+ /**
+ * Creates initially empty storage. get() returns a ptr, but it is to a zero-byte allocation.
+ * Must call reset(size) to return an allocated block.
+ */
+ SkAutoSMalloc() {
+ fPtr = fStorage;
+ fSize = kSize;
+ }
+
+ /**
+ * Allocate a block of the specified size. If size <= kSizeRequested (or slightly more), then
+ * the allocation will come from the stack, otherwise it will be dynamically allocated.
+ */
+ explicit SkAutoSMalloc(size_t size) {
+ fPtr = fStorage;
+ fSize = kSize;
+ this->reset(size);
+ }
+
+ /**
+ * Free the allocated block (if any). If the block was small enough to have been allocated on
+ * the stack, then this does nothing.
+ */
+ ~SkAutoSMalloc() {
+ if (fPtr != (void*)fStorage) {
+ sk_free(fPtr);
+ }
+ }
+
+ /**
+ * Return the allocated block. May return non-null even if the block is of zero size. Since
+ * this may be on the stack or dynamically allocated, the caller must not call sk_free() on it,
+ * but must rely on SkAutoSMalloc to manage it.
+ */
+ void* get() const { return fPtr; }
+
+ /**
+ * Return a new block of the requested size, freeing (as necessary) any previously allocated
+ * block. As with the constructor, if size <= kSizeRequested (or slightly more) then the return
+ * block may be allocated locally, rather than from the heap.
+ */
+ void* reset(size_t size,
+ SkAutoMalloc::OnShrink shrink = SkAutoMalloc::kAlloc_OnShrink,
+ bool* didChangeAlloc = nullptr) {
+ size = (size < kSize) ? kSize : size;
+ bool alloc = size != fSize && (SkAutoMalloc::kAlloc_OnShrink == shrink || size > fSize);
+ if (didChangeAlloc) {
+ *didChangeAlloc = alloc;
+ }
+ if (alloc) {
+ if (fPtr != (void*)fStorage) {
+ sk_free(fPtr);
+ }
+
+ if (size == kSize) {
+ SkASSERT(fPtr != fStorage); // otherwise we lied when setting didChangeAlloc.
+ fPtr = fStorage;
+ } else {
+ fPtr = sk_malloc_throw(size);
+ }
+
+ fSize = size;
+ }
+ SkASSERT(fSize >= size && fSize >= kSize);
+ SkASSERT((fPtr == fStorage) || fSize > kSize);
+ return fPtr;
+ }
+
+private:
+ // Align up to 32 bits.
+ static const size_t kSizeAlign4 = SkAlign4(kSizeRequested);
+#if defined(SK_BUILD_FOR_GOOGLE3)
+ // Stack frame size is limited for SK_BUILD_FOR_GOOGLE3. 4k is less than the actual max, but some functions
+ // have multiple large stack allocations.
+ static const size_t kMaxBytes = 4 * 1024;
+ static const size_t kSize = kSizeRequested > kMaxBytes ? kMaxBytes : kSizeAlign4;
+#else
+ static const size_t kSize = kSizeAlign4;
+#endif
+
+ void* fPtr;
+ size_t fSize; // can be larger than the requested size (see kReuse)
+ uint32_t fStorage[kSize >> 2];
+};
+// Can't guard the constructor because it's a template class.
+
+#endif
diff --git a/gfx/skia/skia/src/base/SkBezierCurves.cpp b/gfx/skia/skia/src/base/SkBezierCurves.cpp
new file mode 100644
index 0000000000..a79129ff7d
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkBezierCurves.cpp
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/base/SkBezierCurves.h"
+
+#include "include/private/base/SkAssert.h"
+
+#include <cstddef>
+
+static inline double interpolate(double A, double B, double t) {
+ return A + (B - A) * t;
+}
+
+std::array<double, 2> SkBezierCubic::EvalAt(const double curve[8], double t) {
+ const auto in_X = [&curve](size_t n) { return curve[2*n]; };
+ const auto in_Y = [&curve](size_t n) { return curve[2*n + 1]; };
+
+ // Two semi-common fast paths
+ if (t == 0) {
+ return {in_X(0), in_Y(0)};
+ }
+ if (t == 1) {
+ return {in_X(3), in_Y(3)};
+ }
+ // X(t) = X_0*(1-t)^3 + 3*X_1*t(1-t)^2 + 3*X_2*t^2(1-t) + X_3*t^3
+ // Y(t) = Y_0*(1-t)^3 + 3*Y_1*t(1-t)^2 + 3*Y_2*t^2(1-t) + Y_3*t^3
+ // Some compilers are smart enough and have sufficient registers/intrinsics to write optimal
+ // code from
+ // double one_minus_t = 1 - t;
+ // double a = one_minus_t * one_minus_t * one_minus_t;
+ // double b = 3 * one_minus_t * one_minus_t * t;
+ // double c = 3 * one_minus_t * t * t;
+ // double d = t * t * t;
+ // However, some (e.g. when compiling for ARM) fail to do so, so we use this form
+ // to help more compilers generate smaller/faster ASM. https://godbolt.org/z/M6jG9x45c
+ double one_minus_t = 1 - t;
+ double one_minus_t_squared = one_minus_t * one_minus_t;
+ double a = (one_minus_t_squared * one_minus_t);
+ double b = 3 * one_minus_t_squared * t;
+ double t_squared = t * t;
+ double c = 3 * one_minus_t * t_squared;
+ double d = t_squared * t;
+
+ return {a * in_X(0) + b * in_X(1) + c * in_X(2) + d * in_X(3),
+ a * in_Y(0) + b * in_Y(1) + c * in_Y(2) + d * in_Y(3)};
+}
+
+// Perform subdivision using De Casteljau's algorithm, that is, repeated linear
+// interpolation between adjacent points.
+void SkBezierCubic::Subdivide(const double curve[8], double t,
+ double twoCurves[14]) {
+ SkASSERT(0.0 <= t && t <= 1.0);
+ // We split the curve "in" into two curves "alpha" and "beta"
+ const auto in_X = [&curve](size_t n) { return curve[2*n]; };
+ const auto in_Y = [&curve](size_t n) { return curve[2*n + 1]; };
+ const auto alpha_X = [&twoCurves](size_t n) -> double& { return twoCurves[2*n]; };
+ const auto alpha_Y = [&twoCurves](size_t n) -> double& { return twoCurves[2*n + 1]; };
+ const auto beta_X = [&twoCurves](size_t n) -> double& { return twoCurves[2*n + 6]; };
+ const auto beta_Y = [&twoCurves](size_t n) -> double& { return twoCurves[2*n + 7]; };
+
+ alpha_X(0) = in_X(0);
+ alpha_Y(0) = in_Y(0);
+
+ beta_X(3) = in_X(3);
+ beta_Y(3) = in_Y(3);
+
+ double x01 = interpolate(in_X(0), in_X(1), t);
+ double y01 = interpolate(in_Y(0), in_Y(1), t);
+ double x12 = interpolate(in_X(1), in_X(2), t);
+ double y12 = interpolate(in_Y(1), in_Y(2), t);
+ double x23 = interpolate(in_X(2), in_X(3), t);
+ double y23 = interpolate(in_Y(2), in_Y(3), t);
+
+ alpha_X(1) = x01;
+ alpha_Y(1) = y01;
+
+ beta_X(2) = x23;
+ beta_Y(2) = y23;
+
+ alpha_X(2) = interpolate(x01, x12, t);
+ alpha_Y(2) = interpolate(y01, y12, t);
+
+ beta_X(1) = interpolate(x12, x23, t);
+ beta_Y(1) = interpolate(y12, y23, t);
+
+ alpha_X(3) /*= beta_X(0) */ = interpolate(alpha_X(2), beta_X(1), t);
+ alpha_Y(3) /*= beta_Y(0) */ = interpolate(alpha_Y(2), beta_Y(1), t);
+}
+
+std::array<double, 4> SkBezierCubic::ConvertToPolynomial(const double curve[8], bool yValues) {
+ const double* offset_curve = yValues ? curve + 1 : curve;
+ const auto P = [&offset_curve](size_t n) { return offset_curve[2*n]; };
+ // A cubic Bézier curve is interpolated as follows:
+ // c(t) = (1 - t)^3 P_0 + 3t(1 - t)^2 P_1 + 3t^2 (1 - t) P_2 + t^3 P_3
+ // = (-P_0 + 3P_1 + -3P_2 + P_3) t^3 + (3P_0 - 6P_1 + 3P_2) t^2 +
+ // (-3P_0 + 3P_1) t + P_0
+ // Where P_N is the Nth point. The second step expands the polynomial and groups
+ // by powers of t. The desired output is a cubic formula, so we just need to
+ // combine the appropriate points to make the coefficients.
+ std::array<double, 4> results;
+ results[0] = -P(0) + 3*P(1) - 3*P(2) + P(3);
+ results[1] = 3*P(0) - 6*P(1) + 3*P(2);
+ results[2] = -3*P(0) + 3*P(1);
+ results[3] = P(0);
+ return results;
+}
+
diff --git a/gfx/skia/skia/src/base/SkBezierCurves.h b/gfx/skia/skia/src/base/SkBezierCurves.h
new file mode 100644
index 0000000000..772fee4bf7
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkBezierCurves.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkBezierCurves_DEFINED
+#define SkBezierCurves_DEFINED
+
+#include <array>
+
+/**
+ * Utilities for dealing with cubic Bézier curves. These have a start XY
+ * point, an end XY point, and two control XY points in between. They take
+ * a parameter t which is between 0 and 1 (inclusive) which is used to
+ * interpolate between the start and end points, via a route dictated by
+ * the control points, and return a new XY point.
+ *
+ * We store a Bézier curve as an array of 8 floats or doubles, where
+ * the even indices are the X coordinates, and the odd indices are the Y
+ * coordinates.
+ */
+class SkBezierCubic {
+public:
+
+ /**
+ * Evaluates the cubic Bézier curve for a given t. It returns an X and Y coordinate
+ * following the formula, which does the interpolation mentioned above.
+ * X(t) = X_0*(1-t)^3 + 3*X_1*t(1-t)^2 + 3*X_2*t^2(1-t) + X_3*t^3
+ * Y(t) = Y_0*(1-t)^3 + 3*Y_1*t(1-t)^2 + 3*Y_2*t^2(1-t) + Y_3*t^3
+ *
+ * t is typically in the range [0, 1], but this function will not assert that,
+ * as Bézier curves are well-defined for any real number input.
+ */
+ static std::array<double, 2> EvalAt(const double curve[8], double t);
+
+ /**
+ * Splits the provided Bézier curve at the location t, resulting in two
+ * Bézier curves that share a point (the end point from curve 1
+ * and the start point from curve 2 are the same).
+ *
+ * t must be in the interval [0, 1].
+ *
+ * The provided twoCurves array will be filled such that indices
+ * 0-7 are the first curve (representing the interval [0, t]), and
+ * indices 6-13 are the second curve (representing [t, 1]).
+ */
+ static void Subdivide(const double curve[8], double t,
+ double twoCurves[14]);
+
+ /**
+ * Converts the provided Bézier curve into the the equivalent cubic
+ * f(t) = A*t^3 + B*t^2 + C*t + D
+ * where f(t) will represent Y coordinates over time if yValues is
+ * true and the X coordinates if yValues is false.
+ *
+ * In effect, this turns the control points into an actual line, representing
+ * the x or y values.
+ */
+ static std::array<double, 4> ConvertToPolynomial(const double curve[8], bool yValues);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/base/SkBlockAllocator.cpp b/gfx/skia/skia/src/base/SkBlockAllocator.cpp
new file mode 100644
index 0000000000..e62fc2078d
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkBlockAllocator.cpp
@@ -0,0 +1,302 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/base/SkBlockAllocator.h"
+
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkTo.h"
+
+#ifdef SK_DEBUG
+#include <vector>
+#endif
+
+SkBlockAllocator::SkBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes,
+ size_t additionalPreallocBytes)
+ : fTail(&fHead)
+ // Round up to the nearest max-aligned value, and then divide so that fBlockSizeIncrement
+ // can effectively fit higher byte counts in its 16 bits of storage
+ , fBlockIncrement(SkTo<uint16_t>(
+ std::min(SkAlignTo(blockIncrementBytes, kAddressAlign) / kAddressAlign,
+ (size_t) std::numeric_limits<uint16_t>::max())))
+ , fGrowthPolicy(static_cast<uint64_t>(policy))
+ , fN0((policy == GrowthPolicy::kLinear || policy == GrowthPolicy::kExponential) ? 1 : 0)
+ , fN1(1)
+ // The head block always fills remaining space from SkBlockAllocator's size, because it's
+ // inline, but can take over the specified number of bytes immediately after it.
+ , fHead(/*prev=*/nullptr, additionalPreallocBytes + BaseHeadBlockSize()) {
+ SkASSERT(fBlockIncrement >= 1);
+ SkASSERT(additionalPreallocBytes <= kMaxAllocationSize);
+}
+
+SkBlockAllocator::Block::Block(Block* prev, int allocationSize)
+ : fNext(nullptr)
+ , fPrev(prev)
+ , fSize(allocationSize)
+ , fCursor(kDataStart)
+ , fMetadata(0)
+ , fAllocatorMetadata(0) {
+ SkASSERT(allocationSize >= (int) sizeof(Block));
+ SkDEBUGCODE(fSentinel = kAssignedMarker;)
+
+ this->poisonRange(kDataStart, fSize);
+}
+
+SkBlockAllocator::Block::~Block() {
+ this->unpoisonRange(kDataStart, fSize);
+
+ SkASSERT(fSentinel == kAssignedMarker);
+ SkDEBUGCODE(fSentinel = kFreedMarker;) // FWIW
+}
+
+size_t SkBlockAllocator::totalSize() const {
+ // Use size_t since the sum across all blocks could exceed 'int', even though each block won't
+ size_t size = offsetof(SkBlockAllocator, fHead) + this->scratchBlockSize();
+ for (const Block* b : this->blocks()) {
+ size += b->fSize;
+ }
+ SkASSERT(size >= this->preallocSize());
+ return size;
+}
+
+size_t SkBlockAllocator::totalUsableSpace() const {
+ size_t size = this->scratchBlockSize();
+ if (size > 0) {
+ size -= kDataStart; // scratchBlockSize reports total block size, not usable size
+ }
+ for (const Block* b : this->blocks()) {
+ size += (b->fSize - kDataStart);
+ }
+ SkASSERT(size >= this->preallocUsableSpace());
+ return size;
+}
+
+size_t SkBlockAllocator::totalSpaceInUse() const {
+ size_t size = 0;
+ for (const Block* b : this->blocks()) {
+ size += (b->fCursor - kDataStart);
+ }
+ SkASSERT(size <= this->totalUsableSpace());
+ return size;
+}
+
+SkBlockAllocator::Block* SkBlockAllocator::findOwningBlock(const void* p) {
+ // When in doubt, search in reverse to find an overlapping block.
+ uintptr_t ptr = reinterpret_cast<uintptr_t>(p);
+ for (Block* b : this->rblocks()) {
+ uintptr_t lowerBound = reinterpret_cast<uintptr_t>(b) + kDataStart;
+ uintptr_t upperBound = reinterpret_cast<uintptr_t>(b) + b->fSize;
+ if (lowerBound <= ptr && ptr < upperBound) {
+ SkASSERT(b->fSentinel == kAssignedMarker);
+ return b;
+ }
+ }
+ return nullptr;
+}
+
+void SkBlockAllocator::releaseBlock(Block* block) {
+ if (block == &fHead) {
+ // Reset the cursor of the head block so that it can be reused if it becomes the new tail
+ block->fCursor = kDataStart;
+ block->fMetadata = 0;
+ block->poisonRange(kDataStart, block->fSize);
+ // Unlike in reset(), we don't set the head's next block to null because there are
+ // potentially heap-allocated blocks that are still connected to it.
+ } else {
+ SkASSERT(block->fPrev);
+ block->fPrev->fNext = block->fNext;
+ if (block->fNext) {
+ SkASSERT(fTail != block);
+ block->fNext->fPrev = block->fPrev;
+ } else {
+ SkASSERT(fTail == block);
+ fTail = block->fPrev;
+ }
+
+ // The released block becomes the new scratch block (if it's bigger), or delete it
+ if (this->scratchBlockSize() < block->fSize) {
+ SkASSERT(block != fHead.fPrev); // shouldn't already be the scratch block
+ if (fHead.fPrev) {
+ delete fHead.fPrev;
+ }
+ block->markAsScratch();
+ fHead.fPrev = block;
+ } else {
+ delete block;
+ }
+ }
+
+ // Decrement growth policy (opposite of addBlock()'s increment operations)
+ GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
+ if (fN0 > 0 && (fN1 > 1 || gp == GrowthPolicy::kFibonacci)) {
+ SkASSERT(gp != GrowthPolicy::kFixed); // fixed never needs undoing, fN0 always is 0
+ if (gp == GrowthPolicy::kLinear) {
+ fN1 = fN1 - fN0;
+ } else if (gp == GrowthPolicy::kFibonacci) {
+ // Subtract n0 from n1 to get the prior 2 terms in the fibonacci sequence
+ int temp = fN1 - fN0; // yields prior fN0
+ fN1 = fN1 - temp; // yields prior fN1
+ fN0 = temp;
+ } else {
+ SkASSERT(gp == GrowthPolicy::kExponential);
+ // Divide by 2 to undo the 2N update from addBlock
+ fN1 = fN1 >> 1;
+ fN0 = fN1;
+ }
+ }
+
+ SkASSERT(fN1 >= 1 && fN0 >= 0);
+}
+
+void SkBlockAllocator::stealHeapBlocks(SkBlockAllocator* other) {
+ Block* toSteal = other->fHead.fNext;
+ if (toSteal) {
+ // The other's next block connects back to this allocator's current tail, and its new tail
+ // becomes the end of other's block linked list.
+ SkASSERT(other->fTail != &other->fHead);
+ toSteal->fPrev = fTail;
+ fTail->fNext = toSteal;
+ fTail = other->fTail;
+ // The other allocator becomes just its inline head block
+ other->fTail = &other->fHead;
+ other->fHead.fNext = nullptr;
+ } // else no block to steal
+}
+
+void SkBlockAllocator::reset() {
+ for (Block* b : this->rblocks()) {
+ if (b == &fHead) {
+ // Reset metadata and cursor, tail points to the head block again
+ fTail = b;
+ b->fNext = nullptr;
+ b->fCursor = kDataStart;
+ b->fMetadata = 0;
+ // For reset(), but NOT releaseBlock(), the head allocatorMetadata and scratch block
+ // are reset/destroyed.
+ b->fAllocatorMetadata = 0;
+ b->poisonRange(kDataStart, b->fSize);
+ this->resetScratchSpace();
+ } else {
+ delete b;
+ }
+ }
+ SkASSERT(fTail == &fHead && fHead.fNext == nullptr && fHead.fPrev == nullptr &&
+ fHead.metadata() == 0 && fHead.fCursor == kDataStart);
+
+ GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
+ fN0 = (gp == GrowthPolicy::kLinear || gp == GrowthPolicy::kExponential) ? 1 : 0;
+ fN1 = 1;
+}
+
+void SkBlockAllocator::resetScratchSpace() {
+ if (fHead.fPrev) {
+ delete fHead.fPrev;
+ fHead.fPrev = nullptr;
+ }
+}
+
+void SkBlockAllocator::addBlock(int minSize, int maxSize) {
+ SkASSERT(minSize > (int) sizeof(Block) && minSize <= maxSize);
+
+ // Max positive value for uint:23 storage (decltype(fN0) picks up uint64_t, not uint:23).
+ static constexpr int kMaxN = (1 << 23) - 1;
+ static_assert(2 * kMaxN <= std::numeric_limits<int32_t>::max()); // Growth policy won't overflow
+
+ auto alignAllocSize = [](int size) {
+ // Round to a nice boundary since the block isn't maxing out:
+ // if allocSize > 32K, aligns on 4K boundary otherwise aligns on max_align_t, to play
+ // nicely with jeMalloc (from SkArenaAlloc).
+ int mask = size > (1 << 15) ? ((1 << 12) - 1) : (kAddressAlign - 1);
+ return (size + mask) & ~mask;
+ };
+
+ int allocSize;
+ void* mem = nullptr;
+ if (this->scratchBlockSize() >= minSize) {
+ // Activate the scratch block instead of making a new block
+ SkASSERT(fHead.fPrev->isScratch());
+ allocSize = fHead.fPrev->fSize;
+ mem = fHead.fPrev;
+ fHead.fPrev = nullptr;
+ } else if (minSize < maxSize) {
+ // Calculate the 'next' size per growth policy sequence
+ GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
+ int nextN1 = fN0 + fN1;
+ int nextN0;
+ if (gp == GrowthPolicy::kFixed || gp == GrowthPolicy::kLinear) {
+ nextN0 = fN0;
+ } else if (gp == GrowthPolicy::kFibonacci) {
+ nextN0 = fN1;
+ } else {
+ SkASSERT(gp == GrowthPolicy::kExponential);
+ nextN0 = nextN1;
+ }
+ fN0 = std::min(kMaxN, nextN0);
+ fN1 = std::min(kMaxN, nextN1);
+
+ // However, must guard against overflow here, since all the size-based asserts prevented
+ // alignment/addition overflows, while multiplication requires 2x bits instead of x+1.
+ int sizeIncrement = fBlockIncrement * kAddressAlign;
+ if (maxSize / sizeIncrement < nextN1) {
+ // The growth policy would overflow, so use the max. We've already confirmed that
+ // maxSize will be sufficient for the requested minimumSize
+ allocSize = maxSize;
+ } else {
+ allocSize = std::min(alignAllocSize(std::max(minSize, sizeIncrement * nextN1)),
+ maxSize);
+ }
+ } else {
+ SkASSERT(minSize == maxSize);
+ // Still align on a nice boundary, no max clamping since that would just undo the alignment
+ allocSize = alignAllocSize(minSize);
+ }
+
+ // Create new block and append to the linked list of blocks in this allocator
+ if (!mem) {
+ mem = operator new(allocSize);
+ }
+ fTail->fNext = new (mem) Block(fTail, allocSize);
+ fTail = fTail->fNext;
+}
+
+#ifdef SK_DEBUG
+void SkBlockAllocator::validate() const {
+ std::vector<const Block*> blocks;
+ const Block* prev = nullptr;
+ for (const Block* block : this->blocks()) {
+ blocks.push_back(block);
+
+ SkASSERT(kAssignedMarker == block->fSentinel);
+ if (block == &fHead) {
+ // The head blocks' fPrev may be non-null if it holds a scratch block, but that's not
+ // considered part of the linked list
+ SkASSERT(!prev && (!fHead.fPrev || fHead.fPrev->isScratch()));
+ } else {
+ SkASSERT(prev == block->fPrev);
+ }
+ if (prev) {
+ SkASSERT(prev->fNext == block);
+ }
+
+ SkASSERT(block->fSize >= (int) sizeof(Block));
+ SkASSERT(block->fCursor >= kDataStart);
+ SkASSERT(block->fCursor <= block->fSize);
+
+ prev = block;
+ }
+ SkASSERT(prev == fTail);
+ SkASSERT(!blocks.empty());
+ SkASSERT(blocks[0] == &fHead);
+
+ // Confirm reverse iteration matches forward iteration
+ size_t j = blocks.size();
+ for (const Block* b : this->rblocks()) {
+ SkASSERT(b == blocks[j - 1]);
+ j--;
+ }
+ SkASSERT(j == 0);
+}
+#endif
diff --git a/gfx/skia/skia/src/base/SkBlockAllocator.h b/gfx/skia/skia/src/base/SkBlockAllocator.h
new file mode 100644
index 0000000000..02201c17d4
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkBlockAllocator.h
@@ -0,0 +1,754 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlockAllocator_DEFINED
+#define SkBlockAllocator_DEFINED
+
+#include "include/private/base/SkAlign.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkMacros.h"
+#include "include/private/base/SkMath.h"
+#include "include/private/base/SkNoncopyable.h"
+#include "src/base/SkASAN.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+#include <new>
+#include <type_traits>
+
+/**
+ * SkBlockAllocator provides low-level support for a block allocated arena with a dynamic tail that
+ * tracks space reservations within each block. Its APIs provide the ability to reserve space,
+ * resize reservations, and release reservations. It will automatically create new blocks if needed
+ * and destroy all remaining blocks when it is destructed. It assumes that anything allocated within
+ * its blocks has its destructors called externally. It is recommended that SkBlockAllocator is
+ * wrapped by a higher-level allocator that uses the low-level APIs to implement a simpler,
+ * purpose-focused API w/o having to worry as much about byte-level concerns.
+ *
+ * SkBlockAllocator has no limit to its total size, but each allocation is limited to 512MB (which
+ * should be sufficient for Skia's use cases). This upper allocation limit allows all internal
+ * operations to be performed using 'int' and avoid many overflow checks. Static asserts are used
+ * to ensure that those operations would not overflow when using the largest possible values.
+ *
+ * Possible use modes:
+ * 1. No upfront allocation, either on the stack or as a field
+ * SkBlockAllocator allocator(policy, heapAllocSize);
+ *
+ * 2. In-place new'd
+ * void* mem = operator new(totalSize);
+ * SkBlockAllocator* allocator = new (mem) SkBlockAllocator(policy, heapAllocSize,
+ * totalSize- sizeof(SkBlockAllocator));
+ * delete allocator;
+ *
+ * 3. Use SkSBlockAllocator to increase the preallocation size
+ * SkSBlockAllocator<1024> allocator(policy, heapAllocSize);
+ * sizeof(allocator) == 1024;
+ */
+// TODO(michaelludwig) - While API is different, this shares similarities to SkArenaAlloc and
+// SkFibBlockSizes, so we should work to integrate them.
+class SkBlockAllocator final : SkNoncopyable {
+public:
+ // Largest size that can be requested from allocate(), chosen because it's the largest pow-2
+ // that is less than int32_t::max()/2.
+ inline static constexpr int kMaxAllocationSize = 1 << 29;
+
+ enum class GrowthPolicy : int {
+ kFixed, // Next block size = N
+ kLinear, // = #blocks * N
+ kFibonacci, // = fibonacci(#blocks) * N
+ kExponential, // = 2^#blocks * N
+ kLast = kExponential
+ };
+ inline static constexpr int kGrowthPolicyCount = static_cast<int>(GrowthPolicy::kLast) + 1;
+
+ class Block final {
+ public:
+ ~Block();
+ void operator delete(void* p) { ::operator delete(p); }
+
+ // Return the maximum allocation size with the given alignment that can fit in this block.
+ template <size_t Align = 1, size_t Padding = 0>
+ int avail() const { return std::max(0, fSize - this->cursor<Align, Padding>()); }
+
+ // Return the aligned offset of the first allocation, assuming it was made with the
+ // specified Align, and Padding. The returned offset does not mean a valid allocation
+ // starts at that offset, this is a utility function for classes built on top to manage
+ // indexing into a block effectively.
+ template <size_t Align = 1, size_t Padding = 0>
+ int firstAlignedOffset() const { return this->alignedOffset<Align, Padding>(kDataStart); }
+
+ // Convert an offset into this block's storage into a usable pointer.
+ void* ptr(int offset) {
+ SkASSERT(offset >= kDataStart && offset < fSize);
+ return reinterpret_cast<char*>(this) + offset;
+ }
+ const void* ptr(int offset) const { return const_cast<Block*>(this)->ptr(offset); }
+
+ // Every block has an extra 'int' for clients to use however they want. It will start
+ // at 0 when a new block is made, or when the head block is reset.
+ int metadata() const { return fMetadata; }
+ void setMetadata(int value) { fMetadata = value; }
+
+ /**
+ * Release the byte range between offset 'start' (inclusive) and 'end' (exclusive). This
+ * will return true if those bytes were successfully reclaimed, i.e. a subsequent allocation
+ * request could occupy the space. Regardless of return value, the provided byte range that
+ * [start, end) represents should not be used until it's re-allocated with allocate<...>().
+ */
+ inline bool release(int start, int end);
+
+ /**
+ * Resize a previously reserved byte range of offset 'start' (inclusive) to 'end'
+ * (exclusive). 'deltaBytes' is the SIGNED change to length of the reservation.
+ *
+ * When negative this means the reservation is shrunk and the new length is (end - start -
+ * |deltaBytes|). If this new length would be 0, the byte range can no longer be used (as if
+ * it were released instead). Asserts that it would not shrink the reservation below 0.
+ *
+ * If 'deltaBytes' is positive, the allocator attempts to increase the length of the
+ * reservation. If 'deltaBytes' is less than or equal to avail() and it was the last
+ * allocation in the block, it can be resized. If there is not enough available bytes to
+ * accommodate the increase in size, or another allocation is blocking the increase in size,
+ * then false will be returned and the reserved byte range is unmodified.
+ */
+ inline bool resize(int start, int end, int deltaBytes);
+
+ private:
+ friend class SkBlockAllocator;
+
+ Block(Block* prev, int allocationSize);
+
+ // We poison the unallocated space in a Block to allow ASAN to catch invalid writes.
+ void poisonRange(int start, int end) {
+ sk_asan_poison_memory_region(reinterpret_cast<char*>(this) + start, end - start);
+ }
+ void unpoisonRange(int start, int end) {
+ sk_asan_unpoison_memory_region(reinterpret_cast<char*>(this) + start, end - start);
+ }
+
+ // Get fCursor, but aligned such that ptr(rval) satisfies Align.
+ template <size_t Align, size_t Padding>
+ int cursor() const { return this->alignedOffset<Align, Padding>(fCursor); }
+
+ template <size_t Align, size_t Padding>
+ int alignedOffset(int offset) const;
+
+ bool isScratch() const { return fCursor < 0; }
+ void markAsScratch() {
+ fCursor = -1;
+ this->poisonRange(kDataStart, fSize);
+ }
+
+ SkDEBUGCODE(uint32_t fSentinel;) // known value to check for bad back pointers to blocks
+
+ Block* fNext; // doubly-linked list of blocks
+ Block* fPrev;
+
+ // Each block tracks its own cursor because as later blocks are released, an older block
+ // may become the active tail again.
+ int fSize; // includes the size of the BlockHeader and requested metadata
+ int fCursor; // (this + fCursor) points to next available allocation
+ int fMetadata;
+
+ // On release builds, a Block's other 2 pointers and 3 int fields leaves 4 bytes of padding
+ // for 8 and 16 aligned systems. Currently this is only manipulated in the head block for
+ // an allocator-level metadata and is explicitly not reset when the head block is "released"
+ // Down the road we could instead choose to offer multiple metadata slots per block.
+ int fAllocatorMetadata;
+ };
+
+ // Tuple representing a range of bytes, marking the unaligned start, the first aligned point
+ // after any padding, and the upper limit depending on requested size.
+ struct ByteRange {
+ Block* fBlock; // Owning block
+ int fStart; // Inclusive byte lower limit of byte range
+ int fAlignedOffset; // >= start, matching alignment requirement (i.e. first real byte)
+ int fEnd; // Exclusive upper limit of byte range
+ };
+
+ // The size of the head block is determined by 'additionalPreallocBytes'. Subsequent heap blocks
+ // are determined by 'policy' and 'blockIncrementBytes', although 'blockIncrementBytes' will be
+ // aligned to std::max_align_t.
+ //
+ // When 'additionalPreallocBytes' > 0, the allocator assumes that many extra bytes immediately
+ // after the allocator can be used by its inline head block. This is useful when the allocator
+ // is in-place new'ed into a larger block of memory, but it should remain set to 0 if stack
+ // allocated or if the class layout does not guarantee that space is present.
+ SkBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes,
+ size_t additionalPreallocBytes = 0);
+
+ ~SkBlockAllocator() { this->reset(); }
+ void operator delete(void* p) { ::operator delete(p); }
+
+ /**
+ * Helper to calculate the minimum number of bytes needed for heap block size, under the
+ * assumption that Align will be the requested alignment of the first call to allocate().
+ * Ex. To store N instances of T in a heap block, the 'blockIncrementBytes' should be set to
+ * BlockOverhead<alignof(T)>() + N * sizeof(T) when making the SkBlockAllocator.
+ */
+ template<size_t Align = 1, size_t Padding = 0>
+ static constexpr size_t BlockOverhead();
+
+ /**
+ * Helper to calculate the minimum number of bytes needed for a preallocation, under the
+ * assumption that Align will be the requested alignment of the first call to allocate().
+ * Ex. To preallocate a SkSBlockAllocator to hold N instances of T, its arge should be
+ * Overhead<alignof(T)>() + N * sizeof(T)
+ */
+ template<size_t Align = 1, size_t Padding = 0>
+ static constexpr size_t Overhead();
+
+ /**
+ * Return the total number of bytes of the allocator, including its instance overhead, per-block
+ * overhead and space used for allocations.
+ */
+ size_t totalSize() const;
+ /**
+ * Return the total number of bytes usable for allocations. This includes bytes that have
+ * been reserved already by a call to allocate() and bytes that are still available. It is
+ * totalSize() minus all allocator and block-level overhead.
+ */
+ size_t totalUsableSpace() const;
+ /**
+ * Return the total number of usable bytes that have been reserved by allocations. This will
+ * be less than or equal to totalUsableSpace().
+ */
+ size_t totalSpaceInUse() const;
+
+ /**
+ * Return the total number of bytes that were pre-allocated for the SkBlockAllocator. This will
+ * include 'additionalPreallocBytes' passed to the constructor, and represents what the total
+ * size would become after a call to reset().
+ */
+ size_t preallocSize() const {
+ // Don't double count fHead's Block overhead in both sizeof(SkBlockAllocator) and fSize.
+ return sizeof(SkBlockAllocator) + fHead.fSize - BaseHeadBlockSize();
+ }
+ /**
+ * Return the usable size of the inline head block; this will be equal to
+ * 'additionalPreallocBytes' plus any alignment padding that the system had to add to Block.
+ * The returned value represents what could be allocated before a heap block is be created.
+ */
+ size_t preallocUsableSpace() const {
+ return fHead.fSize - kDataStart;
+ }
+
+ /**
+ * Get the current value of the allocator-level metadata (a user-oriented slot). This is
+ * separate from any block-level metadata, but can serve a similar purpose to compactly support
+ * data collections on top of SkBlockAllocator.
+ */
+ int metadata() const { return fHead.fAllocatorMetadata; }
+
+ /**
+ * Set the current value of the allocator-level metadata.
+ */
+ void setMetadata(int value) { fHead.fAllocatorMetadata = value; }
+
+ /**
+ * Reserve space that will hold 'size' bytes. This will automatically allocate a new block if
+ * there is not enough available space in the current block to provide 'size' bytes. The
+ * returned ByteRange tuple specifies the Block owning the reserved memory, the full byte range,
+ * and the aligned offset within that range to use for the user-facing pointer. The following
+ * invariants hold:
+ *
+ * 1. block->ptr(alignedOffset) is aligned to Align
+ * 2. end - alignedOffset == size
+ * 3. Padding <= alignedOffset - start <= Padding + Align - 1
+ *
+ * Invariant #3, when Padding > 0, allows intermediate allocators to embed metadata along with
+ * the allocations. If the Padding bytes are used for some 'struct Meta', then
+ * ptr(alignedOffset - sizeof(Meta)) can be safely used as a Meta* if Meta's alignment
+ * requirements are less than or equal to the alignment specified in allocate<>. This can be
+ * easily guaranteed by using the pattern:
+ *
+ * allocate<max(UserAlign, alignof(Meta)), sizeof(Meta)>(userSize);
+ *
+ * This ensures that ptr(alignedOffset) will always satisfy UserAlign and
+ * ptr(alignedOffset - sizeof(Meta)) will always satisfy alignof(Meta). Alternatively, memcpy
+ * can be used to read and write values between start and alignedOffset without worrying about
+ * alignment requirements of the metadata.
+ *
+ * For over-aligned allocations, the alignedOffset (as an int) may not be a multiple of Align,
+ * but the result of ptr(alignedOffset) will be a multiple of Align.
+ */
+ template <size_t Align, size_t Padding = 0>
+ ByteRange allocate(size_t size);
+
+ enum ReserveFlags : unsigned {
+ // If provided to reserve(), the input 'size' will be rounded up to the next size determined
+ // by the growth policy of the SkBlockAllocator. If not, 'size' will be aligned to max_align
+ kIgnoreGrowthPolicy_Flag = 0b01,
+ // If provided to reserve(), the number of available bytes of the current block will not
+ // be used to satisfy the reservation (assuming the contiguous range was long enough to
+ // begin with).
+ kIgnoreExistingBytes_Flag = 0b10,
+
+ kNo_ReserveFlags = 0b00
+ };
+
+ /**
+ * Ensure the block allocator has 'size' contiguous available bytes. After calling this
+ * function, currentBlock()->avail<Align, Padding>() may still report less than 'size' if the
+ * reserved space was added as a scratch block. This is done so that anything remaining in
+ * the current block can still be used if a smaller-than-size allocation is requested. If 'size'
+ * is requested by a subsequent allocation, the scratch block will automatically be activated
+ * and the request will not itself trigger any malloc.
+ *
+ * The optional 'flags' controls how the input size is allocated; by default it will attempt
+ * to use available contiguous bytes in the current block and will respect the growth policy
+ * of the allocator.
+ */
+ template <size_t Align = 1, size_t Padding = 0>
+ void reserve(size_t size, ReserveFlags flags = kNo_ReserveFlags);
+
+ /**
+ * Return a pointer to the start of the current block. This will never be null.
+ */
+ const Block* currentBlock() const { return fTail; }
+ Block* currentBlock() { return fTail; }
+
+ const Block* headBlock() const { return &fHead; }
+ Block* headBlock() { return &fHead; }
+
+ /**
+ * Return the block that owns the allocated 'ptr'. Assuming that earlier, an allocation was
+ * returned as {b, start, alignedOffset, end}, and 'p = b->ptr(alignedOffset)', then a call
+ * to 'owningBlock<Align, Padding>(p, start) == b'.
+ *
+ * If calling code has already made a pointer to their metadata, i.e. 'm = p - Padding', then
+ * 'owningBlock<Align, 0>(m, start)' will also return b, allowing you to recover the block from
+ * the metadata pointer.
+ *
+ * If calling code has access to the original alignedOffset, this function should not be used
+ * since the owning block is just 'p - alignedOffset', regardless of original Align or Padding.
+ */
+ template <size_t Align, size_t Padding = 0>
+ Block* owningBlock(const void* ptr, int start);
+
+ template <size_t Align, size_t Padding = 0>
+ const Block* owningBlock(const void* ptr, int start) const {
+ return const_cast<SkBlockAllocator*>(this)->owningBlock<Align, Padding>(ptr, start);
+ }
+
+ /**
+ * Find the owning block of the allocated pointer, 'p'. Without any additional information this
+ * is O(N) on the number of allocated blocks.
+ */
+ Block* findOwningBlock(const void* ptr);
+ const Block* findOwningBlock(const void* ptr) const {
+ return const_cast<SkBlockAllocator*>(this)->findOwningBlock(ptr);
+ }
+
+ /**
+ * Explicitly free an entire block, invalidating any remaining allocations from the block.
+ * SkBlockAllocator will release all alive blocks automatically when it is destroyed, but this
+ * function can be used to reclaim memory over the lifetime of the allocator. The provided
+ * 'block' pointer must have previously come from a call to currentBlock() or allocate().
+ *
+ * If 'block' represents the inline-allocated head block, its cursor and metadata are instead
+ * reset to their defaults.
+ *
+ * If the block is not the head block, it may be kept as a scratch block to be reused for
+ * subsequent allocation requests, instead of making an entirely new block. A scratch block is
+ * not visible when iterating over blocks but is reported in the total size of the allocator.
+ */
+ void releaseBlock(Block* block);
+
+ /**
+ * Detach every heap-allocated block owned by 'other' and concatenate them to this allocator's
+ * list of blocks. This memory is now managed by this allocator. Since this only transfers
+ * ownership of a Block, and a Block itself does not move, any previous allocations remain
+ * valid and associated with their original Block instances. SkBlockAllocator-level functions
+ * that accept allocated pointers (e.g. findOwningBlock), must now use this allocator and not
+ * 'other' for these allocations.
+ *
+ * The head block of 'other' cannot be stolen, so higher-level allocators and memory structures
+ * must handle that data differently.
+ */
+ void stealHeapBlocks(SkBlockAllocator* other);
+
+ /**
+ * Explicitly free all blocks (invalidating all allocations), and resets the head block to its
+ * default state. The allocator-level metadata is reset to 0 as well.
+ */
+ void reset();
+
+ /**
+ * Remove any reserved scratch space, either from calling reserve() or releaseBlock().
+ */
+ void resetScratchSpace();
+
+ template <bool Forward, bool Const> class BlockIter;
+
+ /**
+ * Clients can iterate over all active Blocks in the SkBlockAllocator using for loops:
+ *
+ * Forward iteration from head to tail block (or non-const variant):
+ * for (const Block* b : this->blocks()) { }
+ * Reverse iteration from tail to head block:
+ * for (const Block* b : this->rblocks()) { }
+ *
+ * It is safe to call releaseBlock() on the active block while looping.
+ */
+ inline BlockIter<true, false> blocks();
+ inline BlockIter<true, true> blocks() const;
+ inline BlockIter<false, false> rblocks();
+ inline BlockIter<false, true> rblocks() const;
+
+#ifdef SK_DEBUG
+ inline static constexpr uint32_t kAssignedMarker = 0xBEEFFACE;
+ inline static constexpr uint32_t kFreedMarker = 0xCAFEBABE;
+
+ void validate() const;
+#endif
+
+private:
+ friend class BlockAllocatorTestAccess;
+ friend class TBlockListTestAccess;
+
+ inline static constexpr int kDataStart = sizeof(Block);
+ #ifdef SK_FORCE_8_BYTE_ALIGNMENT
+ // This is an issue for WASM builds using emscripten, which had std::max_align_t = 16, but
+ // was returning pointers only aligned to 8 bytes.
+ // https://github.com/emscripten-core/emscripten/issues/10072
+ //
+ // Setting this to 8 will let SkBlockAllocator properly correct for the pointer address if
+ // a 16-byte aligned allocation is requested in wasm (unlikely since we don't use long
+ // doubles).
+ inline static constexpr size_t kAddressAlign = 8;
+ #else
+ // The alignment Block addresses will be at when created using operator new
+ // (spec-compliant is pointers are aligned to max_align_t).
+ inline static constexpr size_t kAddressAlign = alignof(std::max_align_t);
+ #endif
+
+ // Calculates the size of a new Block required to store a kMaxAllocationSize request for the
+ // given alignment and padding bytes. Also represents maximum valid fCursor value in a Block.
+ template<size_t Align, size_t Padding>
+ static constexpr size_t MaxBlockSize();
+
+ static constexpr int BaseHeadBlockSize() {
+ return sizeof(SkBlockAllocator) - offsetof(SkBlockAllocator, fHead);
+ }
+
+ // Append a new block to the end of the block linked list, updating fTail. 'minSize' must
+ // have enough room for sizeof(Block). 'maxSize' is the upper limit of fSize for the new block
+ // that will preserve the static guarantees SkBlockAllocator makes.
+ void addBlock(int minSize, int maxSize);
+
+ int scratchBlockSize() const { return fHead.fPrev ? fHead.fPrev->fSize : 0; }
+
+ Block* fTail; // All non-head blocks are heap allocated; tail will never be null.
+
+ // All remaining state is packed into 64 bits to keep SkBlockAllocator at 16 bytes + head block
+ // (on a 64-bit system).
+
+ // Growth of the block size is controlled by four factors: BlockIncrement, N0 and N1, and a
+ // policy defining how N0 is updated. When a new block is needed, we calculate N1' = N0 + N1.
+ // Depending on the policy, N0' = N0 (no growth or linear growth), or N0' = N1 (Fibonacci), or
+ // N0' = N1' (exponential). The size of the new block is N1' * BlockIncrement * MaxAlign,
+ // after which fN0 and fN1 store N0' and N1' clamped into 23 bits. With current bit allocations,
+ // N1' is limited to 2^24, and assuming MaxAlign=16, then BlockIncrement must be '2' in order to
+ // eventually reach the hard 2^29 size limit of SkBlockAllocator.
+
+ // Next heap block size = (fBlockIncrement * alignof(std::max_align_t) * (fN0 + fN1))
+ uint64_t fBlockIncrement : 16;
+ uint64_t fGrowthPolicy : 2; // GrowthPolicy
+ uint64_t fN0 : 23; // = 1 for linear/exp.; = 0 for fixed/fibonacci, initially
+ uint64_t fN1 : 23; // = 1 initially
+
+ // Inline head block, must be at the end so that it can utilize any additional reserved space
+ // from the initial allocation.
+ // The head block's prev pointer may be non-null, which signifies a scratch block that may be
+ // reused instead of allocating an entirely new block (this helps when allocate+release calls
+ // bounce back and forth across the capacity of a block).
+ alignas(kAddressAlign) Block fHead;
+
+ static_assert(kGrowthPolicyCount <= 4);
+};
+
+// A wrapper around SkBlockAllocator that includes preallocated storage for the head block.
+// N will be the preallocSize() reported by the allocator.
+template<size_t N>
+class SkSBlockAllocator : SkNoncopyable {
+public:
+ using GrowthPolicy = SkBlockAllocator::GrowthPolicy;
+
+ SkSBlockAllocator() {
+ new (fStorage) SkBlockAllocator(GrowthPolicy::kFixed, N, N - sizeof(SkBlockAllocator));
+ }
+ explicit SkSBlockAllocator(GrowthPolicy policy) {
+ new (fStorage) SkBlockAllocator(policy, N, N - sizeof(SkBlockAllocator));
+ }
+
+ SkSBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes) {
+ new (fStorage) SkBlockAllocator(policy, blockIncrementBytes, N - sizeof(SkBlockAllocator));
+ }
+
+ ~SkSBlockAllocator() {
+ this->allocator()->~SkBlockAllocator();
+ }
+
+ SkBlockAllocator* operator->() { return this->allocator(); }
+ const SkBlockAllocator* operator->() const { return this->allocator(); }
+
+ SkBlockAllocator* allocator() { return reinterpret_cast<SkBlockAllocator*>(fStorage); }
+ const SkBlockAllocator* allocator() const {
+ return reinterpret_cast<const SkBlockAllocator*>(fStorage);
+ }
+
+private:
+ static_assert(N >= sizeof(SkBlockAllocator));
+
+ // Will be used to placement new the allocator
+ alignas(SkBlockAllocator) char fStorage[N];
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// Template and inline implementations
+
+SK_MAKE_BITFIELD_OPS(SkBlockAllocator::ReserveFlags)
+
+template<size_t Align, size_t Padding>
+constexpr size_t SkBlockAllocator::BlockOverhead() {
+ static_assert(SkAlignTo(kDataStart + Padding, Align) >= sizeof(Block));
+ return SkAlignTo(kDataStart + Padding, Align);
+}
+
+template<size_t Align, size_t Padding>
+constexpr size_t SkBlockAllocator::Overhead() {
+ // NOTE: On most platforms, SkBlockAllocator is packed; this is not the case on debug builds
+ // due to extra fields, or on WASM due to 4byte pointers but 16byte max align.
+ return std::max(sizeof(SkBlockAllocator),
+ offsetof(SkBlockAllocator, fHead) + BlockOverhead<Align, Padding>());
+}
+
+template<size_t Align, size_t Padding>
+constexpr size_t SkBlockAllocator::MaxBlockSize() {
+ // Without loss of generality, assumes 'align' will be the largest encountered alignment for the
+ // allocator (if it's not, the largest align will be encountered by the compiler and pass/fail
+ // the same set of static asserts).
+ return BlockOverhead<Align, Padding>() + kMaxAllocationSize;
+}
+
+template<size_t Align, size_t Padding>
+void SkBlockAllocator::reserve(size_t size, ReserveFlags flags) {
+ if (size > kMaxAllocationSize) {
+ SK_ABORT("Allocation too large (%zu bytes requested)", size);
+ }
+ int iSize = (int) size;
+ if ((flags & kIgnoreExistingBytes_Flag) ||
+ this->currentBlock()->avail<Align, Padding>() < iSize) {
+
+ int blockSize = BlockOverhead<Align, Padding>() + iSize;
+ int maxSize = (flags & kIgnoreGrowthPolicy_Flag) ? blockSize
+ : MaxBlockSize<Align, Padding>();
+ SkASSERT((size_t) maxSize <= (MaxBlockSize<Align, Padding>()));
+
+ SkDEBUGCODE(auto oldTail = fTail;)
+ this->addBlock(blockSize, maxSize);
+ SkASSERT(fTail != oldTail);
+ // Releasing the just added block will move it into scratch space, allowing the original
+ // tail's bytes to be used first before the scratch block is activated.
+ this->releaseBlock(fTail);
+ }
+}
+
+template <size_t Align, size_t Padding>
+SkBlockAllocator::ByteRange SkBlockAllocator::allocate(size_t size) {
+ // Amount of extra space for a new block to make sure the allocation can succeed.
+ static constexpr int kBlockOverhead = (int) BlockOverhead<Align, Padding>();
+
+ // Ensures 'offset' and 'end' calculations will be valid
+ static_assert((kMaxAllocationSize + SkAlignTo(MaxBlockSize<Align, Padding>(), Align))
+ <= (size_t) std::numeric_limits<int32_t>::max());
+ // Ensures size + blockOverhead + addBlock's alignment operations will be valid
+ static_assert(kMaxAllocationSize + kBlockOverhead + ((1 << 12) - 1) // 4K align for large blocks
+ <= std::numeric_limits<int32_t>::max());
+
+ if (size > kMaxAllocationSize) {
+ SK_ABORT("Allocation too large (%zu bytes requested)", size);
+ }
+
+ int iSize = (int) size;
+ int offset = fTail->cursor<Align, Padding>();
+ int end = offset + iSize;
+ if (end > fTail->fSize) {
+ this->addBlock(iSize + kBlockOverhead, MaxBlockSize<Align, Padding>());
+ offset = fTail->cursor<Align, Padding>();
+ end = offset + iSize;
+ }
+
+ // Check invariants
+ SkASSERT(end <= fTail->fSize);
+ SkASSERT(end - offset == iSize);
+ SkASSERT(offset - fTail->fCursor >= (int) Padding &&
+ offset - fTail->fCursor <= (int) (Padding + Align - 1));
+ SkASSERT(reinterpret_cast<uintptr_t>(fTail->ptr(offset)) % Align == 0);
+
+ int start = fTail->fCursor;
+ fTail->fCursor = end;
+
+ fTail->unpoisonRange(offset - Padding, end);
+
+ return {fTail, start, offset, end};
+}
+
+template <size_t Align, size_t Padding>
+SkBlockAllocator::Block* SkBlockAllocator::owningBlock(const void* p, int start) {
+ // 'p' was originally formed by aligning 'block + start + Padding', producing the inequality:
+ // block + start + Padding <= p <= block + start + Padding + Align-1
+ // Rearranging this yields:
+ // block <= p - start - Padding <= block + Align-1
+ // Masking these terms by ~(Align-1) reconstructs 'block' if the alignment of the block is
+ // greater than or equal to Align (since block & ~(Align-1) == (block + Align-1) & ~(Align-1)
+ // in that case). Overalignment does not reduce to inequality unfortunately.
+ if /* constexpr */ (Align <= kAddressAlign) {
+ Block* block = reinterpret_cast<Block*>(
+ (reinterpret_cast<uintptr_t>(p) - start - Padding) & ~(Align - 1));
+ SkASSERT(block->fSentinel == kAssignedMarker);
+ return block;
+ } else {
+ // There's not a constant-time expression available to reconstruct the block from 'p',
+ // but this is unlikely to happen frequently.
+ return this->findOwningBlock(p);
+ }
+}
+
+template <size_t Align, size_t Padding>
+int SkBlockAllocator::Block::alignedOffset(int offset) const {
+ static_assert(SkIsPow2(Align));
+ // Aligning adds (Padding + Align - 1) as an intermediate step, so ensure that can't overflow
+ static_assert(MaxBlockSize<Align, Padding>() + Padding + Align - 1
+ <= (size_t) std::numeric_limits<int32_t>::max());
+
+ if /* constexpr */ (Align <= kAddressAlign) {
+ // Same as SkAlignTo, but operates on ints instead of size_t
+ return (offset + Padding + Align - 1) & ~(Align - 1);
+ } else {
+ // Must take into account that 'this' may be starting at a pointer that doesn't satisfy the
+ // larger alignment request, so must align the entire pointer, not just offset
+ uintptr_t blockPtr = reinterpret_cast<uintptr_t>(this);
+ uintptr_t alignedPtr = (blockPtr + offset + Padding + Align - 1) & ~(Align - 1);
+ SkASSERT(alignedPtr - blockPtr <= (uintptr_t) std::numeric_limits<int32_t>::max());
+ return (int) (alignedPtr - blockPtr);
+ }
+}
+
+bool SkBlockAllocator::Block::resize(int start, int end, int deltaBytes) {
+ SkASSERT(fSentinel == kAssignedMarker);
+ SkASSERT(start >= kDataStart && end <= fSize && start < end);
+
+ if (deltaBytes > kMaxAllocationSize || deltaBytes < -kMaxAllocationSize) {
+ // Cannot possibly satisfy the resize and could overflow subsequent math
+ return false;
+ }
+ if (fCursor == end) {
+ int nextCursor = end + deltaBytes;
+ SkASSERT(nextCursor >= start);
+ // We still check nextCursor >= start for release builds that wouldn't assert.
+ if (nextCursor <= fSize && nextCursor >= start) {
+ if (nextCursor < fCursor) {
+ // The allocation got smaller; poison the space that can no longer be used.
+ this->poisonRange(nextCursor + 1, end);
+ } else {
+ // The allocation got larger; unpoison the space that can now be used.
+ this->unpoisonRange(end, nextCursor);
+ }
+
+ fCursor = nextCursor;
+ return true;
+ }
+ }
+ return false;
+}
+
+// NOTE: release is equivalent to resize(start, end, start - end), and the compiler can optimize
+// most of the operations away, but it wasn't able to remove the unnecessary branch comparing the
+// new cursor to the block size or old start, so release() gets a specialization.
+bool SkBlockAllocator::Block::release(int start, int end) {
+ SkASSERT(fSentinel == kAssignedMarker);
+ SkASSERT(start >= kDataStart && end <= fSize && start < end);
+
+ this->poisonRange(start, end);
+
+ if (fCursor == end) {
+ fCursor = start;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+///////// Block iteration
+template <bool Forward, bool Const>
+class SkBlockAllocator::BlockIter {
+private:
+ using BlockT = typename std::conditional<Const, const Block, Block>::type;
+ using AllocatorT =
+ typename std::conditional<Const, const SkBlockAllocator, SkBlockAllocator>::type;
+
+public:
+ BlockIter(AllocatorT* allocator) : fAllocator(allocator) {}
+
+ class Item {
+ public:
+ bool operator!=(const Item& other) const { return fBlock != other.fBlock; }
+
+ BlockT* operator*() const { return fBlock; }
+
+ Item& operator++() {
+ this->advance(fNext);
+ return *this;
+ }
+
+ private:
+ friend BlockIter;
+
+ Item(BlockT* block) { this->advance(block); }
+
+ void advance(BlockT* block) {
+ fBlock = block;
+ fNext = block ? (Forward ? block->fNext : block->fPrev) : nullptr;
+ if (!Forward && fNext && fNext->isScratch()) {
+ // For reverse-iteration only, we need to stop at the head, not the scratch block
+ // possibly stashed in head->prev.
+ fNext = nullptr;
+ }
+ SkASSERT(!fNext || !fNext->isScratch());
+ }
+
+ BlockT* fBlock;
+ // Cache this before operator++ so that fBlock can be released during iteration
+ BlockT* fNext;
+ };
+
+ Item begin() const { return Item(Forward ? &fAllocator->fHead : fAllocator->fTail); }
+ Item end() const { return Item(nullptr); }
+
+private:
+ AllocatorT* fAllocator;
+};
+
+SkBlockAllocator::BlockIter<true, false> SkBlockAllocator::blocks() {
+ return BlockIter<true, false>(this);
+}
+SkBlockAllocator::BlockIter<true, true> SkBlockAllocator::blocks() const {
+ return BlockIter<true, true>(this);
+}
+SkBlockAllocator::BlockIter<false, false> SkBlockAllocator::rblocks() {
+ return BlockIter<false, false>(this);
+}
+SkBlockAllocator::BlockIter<false, true> SkBlockAllocator::rblocks() const {
+ return BlockIter<false, true>(this);
+}
+
+#endif // SkBlockAllocator_DEFINED
diff --git a/gfx/skia/skia/src/base/SkBuffer.cpp b/gfx/skia/skia/src/base/SkBuffer.cpp
new file mode 100644
index 0000000000..bb39782215
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkBuffer.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/base/SkBuffer.h"
+
+#include "include/private/base/SkAlign.h"
+#include "include/private/base/SkMalloc.h"
+
+#include <cstdint>
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+const void* SkRBuffer::skip(size_t size) {
+ if (fValid && size <= this->available()) {
+ const void* pos = fPos;
+ fPos += size;
+ return pos;
+ }
+ fValid = false;
+ return nullptr;
+}
+
+bool SkRBuffer::read(void* buffer, size_t size) {
+ if (const void* src = this->skip(size)) {
+ sk_careful_memcpy(buffer, src, size);
+ return true;
+ }
+ return false;
+}
+
+bool SkRBuffer::skipToAlign4() {
+ intptr_t pos = reinterpret_cast<intptr_t>(fPos);
+ size_t n = SkAlign4(pos) - pos;
+ if (fValid && n <= this->available()) {
+ fPos += n;
+ return true;
+ } else {
+ fValid = false;
+ return false;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void* SkWBuffer::skip(size_t size) {
+ void* result = fPos;
+ writeNoSizeCheck(nullptr, size);
+ return fData == nullptr ? nullptr : result;
+}
+
+void SkWBuffer::writeNoSizeCheck(const void* buffer, size_t size) {
+ SkASSERT(fData == nullptr || fStop == nullptr || fPos + size <= fStop);
+ if (fData && buffer) {
+ sk_careful_memcpy(fPos, buffer, size);
+ }
+ fPos += size;
+}
+
+size_t SkWBuffer::padToAlign4() {
+ size_t pos = this->pos();
+ size_t n = SkAlign4(pos) - pos;
+
+ if (n && fData)
+ {
+ char* p = fPos;
+ char* stop = p + n;
+ do {
+ *p++ = 0;
+ } while (p < stop);
+ }
+ fPos += n;
+ return n;
+}
+
+#if 0
+#ifdef SK_DEBUG
+ static void AssertBuffer32(const void* buffer)
+ {
+ SkASSERT(buffer);
+ SkASSERT(((size_t)buffer & 3) == 0);
+ }
+#else
+ #define AssertBuffer32(buffer)
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/base/SkBuffer.h b/gfx/skia/skia/src/base/SkBuffer.h
new file mode 100644
index 0000000000..b30fda499d
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkBuffer.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBuffer_DEFINED
+#define SkBuffer_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkNoncopyable.h"
+#include "src/base/SkSafeMath.h"
+
+#include <cstddef>
+#include <cstdint>
+
+typedef float SkScalar;
+
+/** \class SkRBuffer
+
+ Light weight class for reading data from a memory block.
+ The RBuffer is given the buffer to read from, with either a specified size
+ or no size (in which case no range checking is performed). It is iillegal
+ to attempt to read a value from an empty RBuffer (data == null).
+*/
+class SkRBuffer : SkNoncopyable {
+public:
+ SkRBuffer() : fData(nullptr), fPos(nullptr), fStop(nullptr) {}
+
+ /** Initialize RBuffer with a data point and length.
+ */
+ SkRBuffer(const void* data, size_t size) {
+ SkASSERT(data != nullptr || size == 0);
+ fData = (const char*)data;
+ fPos = (const char*)data;
+ fStop = (const char*)data + size;
+ }
+
+ /** Return the number of bytes that have been read from the beginning
+ of the data pointer.
+ */
+ size_t pos() const { return fPos - fData; }
+ /** Return the total size of the data pointer. Only defined if the length was
+ specified in the constructor or in a call to reset().
+ */
+ size_t size() const { return fStop - fData; }
+ /** Return true if the buffer has read to the end of the data pointer.
+ Only defined if the length was specified in the constructor or in a call
+ to reset(). Always returns true if the length was not specified.
+ */
+ bool eof() const { return fPos >= fStop; }
+
+ size_t available() const { return fStop - fPos; }
+
+ bool isValid() const { return fValid; }
+
+ /** Read the specified number of bytes from the data pointer. If buffer is not
+ null, copy those bytes into buffer.
+ */
+ bool read(void* buffer, size_t size);
+ bool skipToAlign4();
+
+ bool readU8(uint8_t* x) { return this->read(x, 1); }
+ bool readS32(int32_t* x) { return this->read(x, 4); }
+ bool readU32(uint32_t* x) { return this->read(x, 4); }
+
+ // returns nullptr on failure
+ const void* skip(size_t bytes);
+ template <typename T> const T* skipCount(size_t count) {
+ return static_cast<const T*>(this->skip(SkSafeMath::Mul(count, sizeof(T))));
+ }
+
+private:
+ const char* fData;
+ const char* fPos;
+ const char* fStop;
+ bool fValid = true;
+};
+
+/** \class SkWBuffer
+
+ Light weight class for writing data to a memory block.
+ The WBuffer is given the buffer to write into, with either a specified size
+ or no size, in which case no range checking is performed. An empty WBuffer
+ is legal, in which case no data is ever written, but the relative pos()
+ is updated.
+*/
+class SkWBuffer : SkNoncopyable {
+public:
+ SkWBuffer() : fData(nullptr), fPos(nullptr), fStop(nullptr) {}
+ SkWBuffer(void* data) { reset(data); }
+ SkWBuffer(void* data, size_t size) { reset(data, size); }
+
+ void reset(void* data) {
+ fData = (char*)data;
+ fPos = (char*)data;
+ fStop = nullptr; // no bounds checking
+ }
+
+ void reset(void* data, size_t size) {
+ SkASSERT(data != nullptr || size == 0);
+ fData = (char*)data;
+ fPos = (char*)data;
+ fStop = (char*)data + size;
+ }
+
+ size_t pos() const { return fPos - fData; }
+ void* skip(size_t size); // return start of skipped data
+
+ void write(const void* buffer, size_t size) {
+ if (size) {
+ this->writeNoSizeCheck(buffer, size);
+ }
+ }
+
+ size_t padToAlign4();
+
+ void writePtr(const void* x) { this->writeNoSizeCheck(&x, sizeof(x)); }
+ void writeScalar(SkScalar x) { this->writeNoSizeCheck(&x, 4); }
+ void write32(int32_t x) { this->writeNoSizeCheck(&x, 4); }
+ void write16(int16_t x) { this->writeNoSizeCheck(&x, 2); }
+ void write8(int8_t x) { this->writeNoSizeCheck(&x, 1); }
+ void writeBool(bool x) { this->write8(x); }
+
+private:
+ void writeNoSizeCheck(const void* buffer, size_t size);
+
+ char* fData;
+ char* fPos;
+ char* fStop;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/base/SkContainers.cpp b/gfx/skia/skia/src/base/SkContainers.cpp
new file mode 100644
index 0000000000..1e36a76ec4
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkContainers.cpp
@@ -0,0 +1,107 @@
+// Copyright 2019 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+
+#include "include/private/base/SkContainers.h"
+
+#include "include/private/base/SkAlign.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkFeatures.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkTo.h"
+
+#include <algorithm>
+#include <cstddef>
+
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+#include <malloc/malloc.h>
+#elif defined(SK_BUILD_FOR_ANDROID) || (defined(SK_BUILD_FOR_UNIX) && !defined(__OpenBSD__))
+#include <malloc.h>
+#elif defined(SK_BUILD_FOR_WIN)
+#include <malloc.h>
+#endif
+
+namespace {
+// Return at least as many bytes to keep malloc aligned.
+constexpr size_t kMinBytes = alignof(max_align_t);
+
+SkSpan<std::byte> complete_size(void* ptr, size_t size) {
+ if (ptr == nullptr) {
+ return {};
+ }
+
+ size_t completeSize = size;
+
+ // Use the OS specific calls to find the actual capacity.
+ #if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+ // TODO: remove the max, when the chrome implementation of malloc_size doesn't return 0.
+ completeSize = std::max(malloc_size(ptr), size);
+ #elif defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 17
+ completeSize = malloc_usable_size(ptr);
+ SkASSERT(completeSize >= size);
+ #elif defined(SK_BUILD_FOR_UNIX) && !defined(__OpenBSD__)
+ completeSize = malloc_usable_size(ptr);
+ SkASSERT(completeSize >= size);
+ #elif defined(SK_BUILD_FOR_WIN)
+ completeSize = _msize(ptr);
+ SkASSERT(completeSize >= size);
+ #endif
+
+ return {static_cast<std::byte*>(ptr), completeSize};
+}
+} // namespace
+
+SkSpan<std::byte> SkContainerAllocator::allocate(int capacity, double growthFactor) {
+ SkASSERT(capacity >= 0);
+ SkASSERT(growthFactor >= 1.0);
+ SkASSERT_RELEASE(capacity <= fMaxCapacity);
+
+ if (growthFactor > 1.0 && capacity > 0) {
+ capacity = this->growthFactorCapacity(capacity, growthFactor);
+ }
+
+ return sk_allocate_throw(capacity * fSizeOfT);
+}
+
+size_t SkContainerAllocator::roundUpCapacity(int64_t capacity) const {
+ SkASSERT(capacity >= 0);
+
+ // If round will not go above fMaxCapacity return rounded capacity.
+ if (capacity < fMaxCapacity - kCapacityMultiple) {
+ return SkAlignTo(capacity, kCapacityMultiple);
+ }
+
+ return SkToSizeT(fMaxCapacity);
+}
+
+size_t SkContainerAllocator::growthFactorCapacity(int capacity, double growthFactor) const {
+ SkASSERT(capacity >= 0);
+ SkASSERT(growthFactor >= 1.0);
+ // Multiply by the growthFactor. Remember this must be done in 64-bit ints and not
+ // size_t because size_t changes.
+ const int64_t capacityGrowth = static_cast<int64_t>(capacity * growthFactor);
+
+ // Notice that for small values of capacity, rounding up will provide most of the growth.
+ return this->roundUpCapacity(capacityGrowth);
+}
+
+
+SkSpan<std::byte> sk_allocate_canfail(size_t size) {
+ // Make sure to ask for at least the minimum number of bytes.
+ const size_t adjustedSize = std::max(size, kMinBytes);
+ void* ptr = sk_malloc_canfail(adjustedSize);
+ return complete_size(ptr, adjustedSize);
+}
+
+SkSpan<std::byte> sk_allocate_throw(size_t size) {
+ if (size == 0) {
+ return {};
+ }
+ // Make sure to ask for at least the minimum number of bytes.
+ const size_t adjustedSize = std::max(size, kMinBytes);
+ void* ptr = sk_malloc_throw(adjustedSize);
+ return complete_size(ptr, adjustedSize);
+}
+
+void sk_report_container_overflow_and_die() {
+ SK_ABORT("Requested capacity is too large.");
+}
diff --git a/gfx/skia/skia/src/base/SkCubics.cpp b/gfx/skia/skia/src/base/SkCubics.cpp
new file mode 100644
index 0000000000..64a4beb007
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkCubics.cpp
@@ -0,0 +1,241 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/base/SkCubics.h"
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkTPin.h"
+#include "src/base/SkQuads.h"
+
+#include <algorithm>
+#include <cmath>
+
+static constexpr double PI = 3.141592653589793;
+
+static bool nearly_equal(double x, double y) {
+ if (sk_double_nearly_zero(x)) {
+ return sk_double_nearly_zero(y);
+ }
+ return sk_doubles_nearly_equal_ulps(x, y);
+}
+
+// When the A coefficient of a cubic is close to 0, there can be floating point error
+// that arises from computing a very large root. In those cases, we would rather be
+// precise about the smaller 2 roots, so we have this arbitrary cutoff for when A is
+// really small or small compared to B.
+static bool close_to_a_quadratic(double A, double B) {
+ if (sk_double_nearly_zero(B)) {
+ return sk_double_nearly_zero(A);
+ }
+ return std::abs(A / B) < 1.0e-7;
+}
+
+int SkCubics::RootsReal(double A, double B, double C, double D, double solution[3]) {
+ if (close_to_a_quadratic(A, B)) {
+ return SkQuads::RootsReal(B, C, D, solution);
+ }
+ if (sk_double_nearly_zero(D)) { // 0 is one root
+ int num = SkQuads::RootsReal(A, B, C, solution);
+ for (int i = 0; i < num; ++i) {
+ if (sk_double_nearly_zero(solution[i])) {
+ return num;
+ }
+ }
+ solution[num++] = 0;
+ return num;
+ }
+ if (sk_double_nearly_zero(A + B + C + D)) { // 1 is one root
+ int num = SkQuads::RootsReal(A, A + B, -D, solution);
+ for (int i = 0; i < num; ++i) {
+ if (sk_doubles_nearly_equal_ulps(solution[i], 1)) {
+ return num;
+ }
+ }
+ solution[num++] = 1;
+ return num;
+ }
+ double a, b, c;
+ {
+ // If A is zero (e.g. B was nan and thus close_to_a_quadratic was false), we will
+ // temporarily have infinities rolling about, but will catch that when checking
+ // R2MinusQ3.
+ double invA = sk_ieee_double_divide(1, A);
+ a = B * invA;
+ b = C * invA;
+ c = D * invA;
+ }
+ double a2 = a * a;
+ double Q = (a2 - b * 3) / 9;
+ double R = (2 * a2 * a - 9 * a * b + 27 * c) / 54;
+ double R2 = R * R;
+ double Q3 = Q * Q * Q;
+ double R2MinusQ3 = R2 - Q3;
+ // If one of R2 Q3 is infinite or nan, subtracting them will also be infinite/nan.
+ // If both are infinite or nan, the subtraction will be nan.
+ // In either case, we have no finite roots.
+ if (!std::isfinite(R2MinusQ3)) {
+ return 0;
+ }
+ double adiv3 = a / 3;
+ double r;
+ double* roots = solution;
+ if (R2MinusQ3 < 0) { // we have 3 real roots
+ // the divide/root can, due to finite precisions, be slightly outside of -1...1
+ const double theta = acos(SkTPin(R / std::sqrt(Q3), -1., 1.));
+ const double neg2RootQ = -2 * std::sqrt(Q);
+
+ r = neg2RootQ * cos(theta / 3) - adiv3;
+ *roots++ = r;
+
+ r = neg2RootQ * cos((theta + 2 * PI) / 3) - adiv3;
+ if (!nearly_equal(solution[0], r)) {
+ *roots++ = r;
+ }
+ r = neg2RootQ * cos((theta - 2 * PI) / 3) - adiv3;
+ if (!nearly_equal(solution[0], r) &&
+ (roots - solution == 1 || !nearly_equal(solution[1], r))) {
+ *roots++ = r;
+ }
+ } else { // we have 1 real root
+ const double sqrtR2MinusQ3 = std::sqrt(R2MinusQ3);
+ A = fabs(R) + sqrtR2MinusQ3;
+ A = std::cbrt(A); // cube root
+ if (R > 0) {
+ A = -A;
+ }
+ if (!sk_double_nearly_zero(A)) {
+ A += Q / A;
+ }
+ r = A - adiv3;
+ *roots++ = r;
+ if (!sk_double_nearly_zero(R2) &&
+ sk_doubles_nearly_equal_ulps(R2, Q3)) {
+ r = -A / 2 - adiv3;
+ if (!nearly_equal(solution[0], r)) {
+ *roots++ = r;
+ }
+ }
+ }
+ return static_cast<int>(roots - solution);
+}
+
+int SkCubics::RootsValidT(double A, double B, double C, double D,
+ double solution[3]) {
+ double allRoots[3] = {0, 0, 0};
+ int realRoots = SkCubics::RootsReal(A, B, C, D, allRoots);
+ int foundRoots = 0;
+ for (int index = 0; index < realRoots; ++index) {
+ double tValue = allRoots[index];
+ if (tValue >= 1.0 && tValue <= 1.00005) {
+ // Make sure we do not already have 1 (or something very close) in the list of roots.
+ if ((foundRoots < 1 || !sk_doubles_nearly_equal_ulps(solution[0], 1)) &&
+ (foundRoots < 2 || !sk_doubles_nearly_equal_ulps(solution[1], 1))) {
+ solution[foundRoots++] = 1;
+ }
+ } else if (tValue >= -0.00005 && (tValue <= 0.0 || sk_double_nearly_zero(tValue))) {
+ // Make sure we do not already have 0 (or something very close) in the list of roots.
+ if ((foundRoots < 1 || !sk_double_nearly_zero(solution[0])) &&
+ (foundRoots < 2 || !sk_double_nearly_zero(solution[1]))) {
+ solution[foundRoots++] = 0;
+ }
+ } else if (tValue > 0.0 && tValue < 1.0) {
+ solution[foundRoots++] = tValue;
+ }
+ }
+ return foundRoots;
+}
+
+static bool approximately_zero(double x) {
+ // This cutoff for our binary search hopefully strikes a good balance between
+ // performance and accuracy.
+ return std::abs(x) < 0.00000001;
+}
+
+static int find_extrema_valid_t(double A, double B, double C,
+ double t[2]) {
+ // To find the local min and max of a cubic, we take the derivative and
+ // solve when that is equal to 0.
+ // d/dt (A*t^3 + B*t^2 + C*t + D) = 3A*t^2 + 2B*t + C
+ double roots[2] = {0, 0};
+ int numRoots = SkQuads::RootsReal(3*A, 2*B, C, roots);
+ int validRoots = 0;
+ for (int i = 0; i < numRoots; i++) {
+ double tValue = roots[i];
+ if (tValue >= 0 && tValue <= 1.0) {
+ t[validRoots++] = tValue;
+ }
+ }
+ return validRoots;
+}
+
+static double binary_search(double A, double B, double C, double D, double start, double stop) {
+ SkASSERT(start <= stop);
+ double left = SkCubics::EvalAt(A, B, C, D, start);
+ if (approximately_zero(left)) {
+ return start;
+ }
+ double right = SkCubics::EvalAt(A, B, C, D, stop);
+ if (!std::isfinite(left) || !std::isfinite(right)) {
+ return -1; // Not going to deal with one or more endpoints being non-finite.
+ }
+ if ((left > 0 && right > 0) || (left < 0 && right < 0)) {
+ return -1; // We can only have a root if one is above 0 and the other is below 0.
+ }
+
+ constexpr int maxIterations = 1000; // prevent infinite loop
+ for (int i = 0; i < maxIterations; i++) {
+ double step = (start + stop) / 2;
+ double curr = SkCubics::EvalAt(A, B, C, D, step);
+ if (approximately_zero(curr)) {
+ return step;
+ }
+ if ((curr < 0 && left < 0) || (curr > 0 && left > 0)) {
+ // go right
+ start = step;
+ } else {
+ // go left
+ stop = step;
+ }
+ }
+ return -1;
+}
+
+int SkCubics::BinarySearchRootsValidT(double A, double B, double C, double D,
+ double solution[3]) {
+ if (!std::isfinite(A) || !std::isfinite(B) || !std::isfinite(C) || !std::isfinite(D)) {
+ return 0;
+ }
+ double regions[4] = {0, 0, 0, 1};
+ // Find local minima and maxima
+ double minMax[2] = {0, 0};
+ int extremaCount = find_extrema_valid_t(A, B, C, minMax);
+ int startIndex = 2 - extremaCount;
+ if (extremaCount == 1) {
+ regions[startIndex + 1] = minMax[0];
+ }
+ if (extremaCount == 2) {
+ // While the roots will be in the range 0 to 1 inclusive, they might not be sorted.
+ regions[startIndex + 1] = std::min(minMax[0], minMax[1]);
+ regions[startIndex + 2] = std::max(minMax[0], minMax[1]);
+ }
+ // Starting at regions[startIndex] and going up through regions[3], we have
+ // an ascending list of numbers in the range 0 to 1.0, between which are the possible
+ // locations of a root.
+ int foundRoots = 0;
+ for (;startIndex < 3; startIndex++) {
+ double root = binary_search(A, B, C, D, regions[startIndex], regions[startIndex + 1]);
+ if (root >= 0) {
+ // Check for duplicates
+ if ((foundRoots < 1 || !approximately_zero(solution[0] - root)) &&
+ (foundRoots < 2 || !approximately_zero(solution[1] - root))) {
+ solution[foundRoots++] = root;
+ }
+ }
+ }
+ return foundRoots;
+}
diff --git a/gfx/skia/skia/src/base/SkCubics.h b/gfx/skia/skia/src/base/SkCubics.h
new file mode 100644
index 0000000000..7e3cbbb567
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkCubics.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkCubics_DEFINED
+#define SkCubics_DEFINED
+
+/**
+ * Utilities for dealing with cubic formulas with one variable:
+ * f(t) = A*t^3 + B*t^2 + C*t + d
+ */
+class SkCubics {
+public:
+ /**
+ * Puts up to 3 real solutions to the equation
+ * A*t^3 + B*t^2 + C*t + d = 0
+ * in the provided array and returns how many roots that was.
+ */
+ static int RootsReal(double A, double B, double C, double D,
+ double solution[3]);
+
+ /**
+ * Puts up to 3 real solutions to the equation
+ * A*t^3 + B*t^2 + C*t + D = 0
+ * in the provided array, with the constraint that t is in the range [0.0, 1.0],
+ * and returns how many roots that was.
+ */
+ static int RootsValidT(double A, double B, double C, double D,
+ double solution[3]);
+
+
+ /**
+ * Puts up to 3 real solutions to the equation
+ * A*t^3 + B*t^2 + C*t + D = 0
+ * in the provided array, with the constraint that t is in the range [0.0, 1.0],
+ * and returns how many roots that was.
+ * This is a slower method than RootsValidT, but more accurate in circumstances
+ * where floating point error gets too big.
+ */
+ static int BinarySearchRootsValidT(double A, double B, double C, double D,
+ double solution[3]);
+
+ /**
+ * Evaluates the cubic function with the 4 provided coefficients and the
+ * provided variable.
+ */
+ static double EvalAt(double A, double B, double C, double D, double t) {
+ return A * t * t * t +
+ B * t * t +
+ C * t +
+ D;
+ }
+
+ static double EvalAt(double coefficients[4], double t) {
+ return EvalAt(coefficients[0], coefficients[1], coefficients[2], coefficients[3], t);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/base/SkDeque.cpp b/gfx/skia/skia/src/base/SkDeque.cpp
new file mode 100644
index 0000000000..ffff336f90
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkDeque.cpp
@@ -0,0 +1,310 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkDeque.h"
+#include "include/private/base/SkMalloc.h"
+
+#include <cstddef>
+
+struct SkDeque::Block {
+ Block* fNext;
+ Block* fPrev;
+ char* fBegin; // start of used section in this chunk
+ char* fEnd; // end of used section in this chunk
+ char* fStop; // end of the allocated chunk
+
+ char* start() { return (char*)(this + 1); }
+ const char* start() const { return (const char*)(this + 1); }
+
+ void init(size_t size) {
+ fNext = fPrev = nullptr;
+ fBegin = fEnd = nullptr;
+ fStop = (char*)this + size;
+ }
+};
+
+SkDeque::SkDeque(size_t elemSize, int allocCount)
+ : fElemSize(elemSize)
+ , fInitialStorage(nullptr)
+ , fCount(0)
+ , fAllocCount(allocCount) {
+ SkASSERT(allocCount >= 1);
+ fFrontBlock = fBackBlock = nullptr;
+ fFront = fBack = nullptr;
+}
+
+SkDeque::SkDeque(size_t elemSize, void* storage, size_t storageSize, int allocCount)
+ : fElemSize(elemSize)
+ , fInitialStorage(storage)
+ , fCount(0)
+ , fAllocCount(allocCount) {
+ SkASSERT(storageSize == 0 || storage != nullptr);
+ SkASSERT(allocCount >= 1);
+
+ if (storageSize >= sizeof(Block) + elemSize) {
+ fFrontBlock = (Block*)storage;
+ fFrontBlock->init(storageSize);
+ } else {
+ fFrontBlock = nullptr;
+ }
+ fBackBlock = fFrontBlock;
+ fFront = fBack = nullptr;
+}
+
+SkDeque::~SkDeque() {
+ Block* head = fFrontBlock;
+ Block* initialHead = (Block*)fInitialStorage;
+
+ while (head) {
+ Block* next = head->fNext;
+ if (head != initialHead) {
+ this->freeBlock(head);
+ }
+ head = next;
+ }
+}
+
+void* SkDeque::push_front() {
+ fCount += 1;
+
+ if (nullptr == fFrontBlock) {
+ fFrontBlock = this->allocateBlock(fAllocCount);
+ fBackBlock = fFrontBlock; // update our linklist
+ }
+
+ Block* first = fFrontBlock;
+ char* begin;
+
+ if (nullptr == first->fBegin) {
+ INIT_CHUNK:
+ first->fEnd = first->fStop;
+ begin = first->fStop - fElemSize;
+ } else {
+ begin = first->fBegin - fElemSize;
+ if (begin < first->start()) { // no more room in this chunk
+ // should we alloc more as we accumulate more elements?
+ first = this->allocateBlock(fAllocCount);
+ first->fNext = fFrontBlock;
+ fFrontBlock->fPrev = first;
+ fFrontBlock = first;
+ goto INIT_CHUNK;
+ }
+ }
+
+ first->fBegin = begin;
+
+ if (nullptr == fFront) {
+ SkASSERT(nullptr == fBack);
+ fFront = fBack = begin;
+ } else {
+ SkASSERT(fBack);
+ fFront = begin;
+ }
+
+ return begin;
+}
+
+void* SkDeque::push_back() {
+ fCount += 1;
+
+ if (nullptr == fBackBlock) {
+ fBackBlock = this->allocateBlock(fAllocCount);
+ fFrontBlock = fBackBlock; // update our linklist
+ }
+
+ Block* last = fBackBlock;
+ char* end;
+
+ if (nullptr == last->fBegin) {
+ INIT_CHUNK:
+ last->fBegin = last->start();
+ end = last->fBegin + fElemSize;
+ } else {
+ end = last->fEnd + fElemSize;
+ if (end > last->fStop) { // no more room in this chunk
+ // should we alloc more as we accumulate more elements?
+ last = this->allocateBlock(fAllocCount);
+ last->fPrev = fBackBlock;
+ fBackBlock->fNext = last;
+ fBackBlock = last;
+ goto INIT_CHUNK;
+ }
+ }
+
+ last->fEnd = end;
+ end -= fElemSize;
+
+ if (nullptr == fBack) {
+ SkASSERT(nullptr == fFront);
+ fFront = fBack = end;
+ } else {
+ SkASSERT(fFront);
+ fBack = end;
+ }
+
+ return end;
+}
+
+void SkDeque::pop_front() {
+ SkASSERT(fCount > 0);
+ fCount -= 1;
+
+ Block* first = fFrontBlock;
+
+ SkASSERT(first != nullptr);
+
+ if (first->fBegin == nullptr) { // we were marked empty from before
+ first = first->fNext;
+ SkASSERT(first != nullptr); // else we popped too far
+ first->fPrev = nullptr;
+ this->freeBlock(fFrontBlock);
+ fFrontBlock = first;
+ }
+
+ char* begin = first->fBegin + fElemSize;
+ SkASSERT(begin <= first->fEnd);
+
+ if (begin < fFrontBlock->fEnd) {
+ first->fBegin = begin;
+ SkASSERT(first->fBegin);
+ fFront = first->fBegin;
+ } else {
+ first->fBegin = first->fEnd = nullptr; // mark as empty
+ if (nullptr == first->fNext) {
+ fFront = fBack = nullptr;
+ } else {
+ SkASSERT(first->fNext->fBegin);
+ fFront = first->fNext->fBegin;
+ }
+ }
+}
+
+void SkDeque::pop_back() {
+ SkASSERT(fCount > 0);
+ fCount -= 1;
+
+ Block* last = fBackBlock;
+
+ SkASSERT(last != nullptr);
+
+ if (last->fEnd == nullptr) { // we were marked empty from before
+ last = last->fPrev;
+ SkASSERT(last != nullptr); // else we popped too far
+ last->fNext = nullptr;
+ this->freeBlock(fBackBlock);
+ fBackBlock = last;
+ }
+
+ char* end = last->fEnd - fElemSize;
+ SkASSERT(end >= last->fBegin);
+
+ if (end > last->fBegin) {
+ last->fEnd = end;
+ SkASSERT(last->fEnd);
+ fBack = last->fEnd - fElemSize;
+ } else {
+ last->fBegin = last->fEnd = nullptr; // mark as empty
+ if (nullptr == last->fPrev) {
+ fFront = fBack = nullptr;
+ } else {
+ SkASSERT(last->fPrev->fEnd);
+ fBack = last->fPrev->fEnd - fElemSize;
+ }
+ }
+}
+
+int SkDeque::numBlocksAllocated() const {
+ int numBlocks = 0;
+
+ for (const Block* temp = fFrontBlock; temp; temp = temp->fNext) {
+ ++numBlocks;
+ }
+
+ return numBlocks;
+}
+
+SkDeque::Block* SkDeque::allocateBlock(int allocCount) {
+ Block* newBlock = (Block*)sk_malloc_throw(sizeof(Block) + allocCount * fElemSize);
+ newBlock->init(sizeof(Block) + allocCount * fElemSize);
+ return newBlock;
+}
+
+void SkDeque::freeBlock(Block* block) {
+ sk_free(block);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkDeque::Iter::Iter() : fCurBlock(nullptr), fPos(nullptr), fElemSize(0) {}
+
+SkDeque::Iter::Iter(const SkDeque& d, IterStart startLoc) {
+ this->reset(d, startLoc);
+}
+
+// Due to how reset and next work, next actually returns the current element
+// pointed to by fPos and then updates fPos to point to the next one.
+void* SkDeque::Iter::next() {
+ char* pos = fPos;
+
+ if (pos) { // if we were valid, try to move to the next setting
+ char* next = pos + fElemSize;
+ SkASSERT(next <= fCurBlock->fEnd);
+ if (next == fCurBlock->fEnd) { // exhausted this chunk, move to next
+ do {
+ fCurBlock = fCurBlock->fNext;
+ } while (fCurBlock != nullptr && fCurBlock->fBegin == nullptr);
+ next = fCurBlock ? fCurBlock->fBegin : nullptr;
+ }
+ fPos = next;
+ }
+ return pos;
+}
+
+// Like next, prev actually returns the current element pointed to by fPos and
+// then makes fPos point to the previous element.
+void* SkDeque::Iter::prev() {
+ char* pos = fPos;
+
+ if (pos) { // if we were valid, try to move to the prior setting
+ char* prev = pos - fElemSize;
+ SkASSERT(prev >= fCurBlock->fBegin - fElemSize);
+ if (prev < fCurBlock->fBegin) { // exhausted this chunk, move to prior
+ do {
+ fCurBlock = fCurBlock->fPrev;
+ } while (fCurBlock != nullptr && fCurBlock->fEnd == nullptr);
+ prev = fCurBlock ? fCurBlock->fEnd - fElemSize : nullptr;
+ }
+ fPos = prev;
+ }
+ return pos;
+}
+
+// reset works by skipping through the spare blocks at the start (or end)
+// of the doubly linked list until a non-empty one is found. The fPos
+// member is then set to the first (or last) element in the block. If
+// there are no elements in the deque both fCurBlock and fPos will come
+// out of this routine nullptr.
+void SkDeque::Iter::reset(const SkDeque& d, IterStart startLoc) {
+ fElemSize = d.fElemSize;
+
+ if (kFront_IterStart == startLoc) {
+ // initialize the iterator to start at the front
+ fCurBlock = d.fFrontBlock;
+ while (fCurBlock && nullptr == fCurBlock->fBegin) {
+ fCurBlock = fCurBlock->fNext;
+ }
+ fPos = fCurBlock ? fCurBlock->fBegin : nullptr;
+ } else {
+ // initialize the iterator to start at the back
+ fCurBlock = d.fBackBlock;
+ while (fCurBlock && nullptr == fCurBlock->fEnd) {
+ fCurBlock = fCurBlock->fPrev;
+ }
+ fPos = fCurBlock ? fCurBlock->fEnd - fElemSize : nullptr;
+ }
+}
diff --git a/gfx/skia/skia/src/base/SkEndian.h b/gfx/skia/skia/src/base/SkEndian.h
new file mode 100644
index 0000000000..732c248802
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkEndian.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEndian_DEFINED
+#define SkEndian_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkFeatures.h"
+
+#include <cstdint>
+
+/** \file SkEndian.h
+
+ Macros and helper functions for handling 16 and 32 bit values in
+ big and little endian formats.
+*/
+
+#if defined(SK_CPU_LENDIAN) && defined(SK_CPU_BENDIAN)
+ #error "can't have both LENDIAN and BENDIAN defined"
+#endif
+
+#if !defined(SK_CPU_LENDIAN) && !defined(SK_CPU_BENDIAN)
+ #error "need either LENDIAN or BENDIAN defined"
+#endif
+
+/** Swap the two bytes in the low 16bits of the parameters.
+ e.g. 0x1234 -> 0x3412
+*/
+static inline uint16_t SkEndianSwap16(uint16_t value) {
+ return static_cast<uint16_t>((value >> 8) | ((value & 0xFF) << 8));
+}
+
+template<uint16_t N> struct SkTEndianSwap16 {
+ static const uint16_t value = static_cast<uint16_t>((N >> 8) | ((N & 0xFF) << 8));
+};
+
+/** Vector version of SkEndianSwap16(), which swaps the
+ low two bytes of each value in the array.
+*/
+static inline void SkEndianSwap16s(uint16_t array[], int count) {
+ SkASSERT(count == 0 || array != nullptr);
+
+ while (--count >= 0) {
+ *array = SkEndianSwap16(*array);
+ array += 1;
+ }
+}
+
+/** Reverse all 4 bytes in a 32bit value.
+ e.g. 0x12345678 -> 0x78563412
+*/
+static constexpr uint32_t SkEndianSwap32(uint32_t value) {
+ return ((value & 0xFF) << 24) |
+ ((value & 0xFF00) << 8) |
+ ((value & 0xFF0000) >> 8) |
+ (value >> 24);
+}
+
+template<uint32_t N> struct SkTEndianSwap32 {
+ static const uint32_t value = ((N & 0xFF) << 24) |
+ ((N & 0xFF00) << 8) |
+ ((N & 0xFF0000) >> 8) |
+ (N >> 24);
+};
+
+/** Vector version of SkEndianSwap32(), which swaps the
+ bytes of each value in the array.
+*/
+static inline void SkEndianSwap32s(uint32_t array[], int count) {
+ SkASSERT(count == 0 || array != nullptr);
+
+ while (--count >= 0) {
+ *array = SkEndianSwap32(*array);
+ array += 1;
+ }
+}
+
+/** Reverse all 8 bytes in a 64bit value.
+ e.g. 0x1122334455667788 -> 0x8877665544332211
+*/
+static inline uint64_t SkEndianSwap64(uint64_t value) {
+ return (((value & 0x00000000000000FFULL) << (8*7)) |
+ ((value & 0x000000000000FF00ULL) << (8*5)) |
+ ((value & 0x0000000000FF0000ULL) << (8*3)) |
+ ((value & 0x00000000FF000000ULL) << (8*1)) |
+ ((value & 0x000000FF00000000ULL) >> (8*1)) |
+ ((value & 0x0000FF0000000000ULL) >> (8*3)) |
+ ((value & 0x00FF000000000000ULL) >> (8*5)) |
+ ((value) >> (8*7)));
+}
+template<uint64_t N> struct SkTEndianSwap64 {
+ static const uint64_t value = (((N & 0x00000000000000FFULL) << (8*7)) |
+ ((N & 0x000000000000FF00ULL) << (8*5)) |
+ ((N & 0x0000000000FF0000ULL) << (8*3)) |
+ ((N & 0x00000000FF000000ULL) << (8*1)) |
+ ((N & 0x000000FF00000000ULL) >> (8*1)) |
+ ((N & 0x0000FF0000000000ULL) >> (8*3)) |
+ ((N & 0x00FF000000000000ULL) >> (8*5)) |
+ ((N) >> (8*7)));
+};
+
+/** Vector version of SkEndianSwap64(), which swaps the
+ bytes of each value in the array.
+*/
+static inline void SkEndianSwap64s(uint64_t array[], int count) {
+ SkASSERT(count == 0 || array != nullptr);
+
+ while (--count >= 0) {
+ *array = SkEndianSwap64(*array);
+ array += 1;
+ }
+}
+
+#ifdef SK_CPU_LENDIAN
+ #define SkEndian_SwapBE16(n) SkEndianSwap16(n)
+ #define SkEndian_SwapBE32(n) SkEndianSwap32(n)
+ #define SkEndian_SwapBE64(n) SkEndianSwap64(n)
+ #define SkEndian_SwapLE16(n) (n)
+ #define SkEndian_SwapLE32(n) (n)
+ #define SkEndian_SwapLE64(n) (n)
+
+ #define SkTEndian_SwapBE16(n) SkTEndianSwap16<n>::value
+ #define SkTEndian_SwapBE32(n) SkTEndianSwap32<n>::value
+ #define SkTEndian_SwapBE64(n) SkTEndianSwap64<n>::value
+ #define SkTEndian_SwapLE16(n) (n)
+ #define SkTEndian_SwapLE32(n) (n)
+ #define SkTEndian_SwapLE64(n) (n)
+#else // SK_CPU_BENDIAN
+ #define SkEndian_SwapBE16(n) (n)
+ #define SkEndian_SwapBE32(n) (n)
+ #define SkEndian_SwapBE64(n) (n)
+ #define SkEndian_SwapLE16(n) SkEndianSwap16(n)
+ #define SkEndian_SwapLE32(n) SkEndianSwap32(n)
+ #define SkEndian_SwapLE64(n) SkEndianSwap64(n)
+
+ #define SkTEndian_SwapBE16(n) (n)
+ #define SkTEndian_SwapBE32(n) (n)
+ #define SkTEndian_SwapBE64(n) (n)
+ #define SkTEndian_SwapLE16(n) SkTEndianSwap16<n>::value
+ #define SkTEndian_SwapLE32(n) SkTEndianSwap32<n>::value
+ #define SkTEndian_SwapLE64(n) SkTEndianSwap64<n>::value
+#endif
+
+// When a bytestream is embedded in a 32-bit word, how far we need to
+// shift the word to extract each byte from the low 8 bits by anding with 0xff.
+#ifdef SK_CPU_LENDIAN
+ #define SkEndian_Byte0Shift 0
+ #define SkEndian_Byte1Shift 8
+ #define SkEndian_Byte2Shift 16
+ #define SkEndian_Byte3Shift 24
+#else // SK_CPU_BENDIAN
+ #define SkEndian_Byte0Shift 24
+ #define SkEndian_Byte1Shift 16
+ #define SkEndian_Byte2Shift 8
+ #define SkEndian_Byte3Shift 0
+#endif
+
+
+#if defined(SK_UINT8_BITFIELD_LENDIAN) && defined(SK_UINT8_BITFIELD_BENDIAN)
+ #error "can't have both bitfield LENDIAN and BENDIAN defined"
+#endif
+
+#if !defined(SK_UINT8_BITFIELD_LENDIAN) && !defined(SK_UINT8_BITFIELD_BENDIAN)
+ #ifdef SK_CPU_LENDIAN
+ #define SK_UINT8_BITFIELD_LENDIAN
+ #else
+ #define SK_UINT8_BITFIELD_BENDIAN
+ #endif
+#endif
+
+#ifdef SK_UINT8_BITFIELD_LENDIAN
+ #define SK_UINT8_BITFIELD(f0, f1, f2, f3, f4, f5, f6, f7) \
+ SK_OT_BYTE f0 : 1; \
+ SK_OT_BYTE f1 : 1; \
+ SK_OT_BYTE f2 : 1; \
+ SK_OT_BYTE f3 : 1; \
+ SK_OT_BYTE f4 : 1; \
+ SK_OT_BYTE f5 : 1; \
+ SK_OT_BYTE f6 : 1; \
+ SK_OT_BYTE f7 : 1;
+#else
+ #define SK_UINT8_BITFIELD(f0, f1, f2, f3, f4, f5, f6, f7) \
+ SK_OT_BYTE f7 : 1; \
+ SK_OT_BYTE f6 : 1; \
+ SK_OT_BYTE f5 : 1; \
+ SK_OT_BYTE f4 : 1; \
+ SK_OT_BYTE f3 : 1; \
+ SK_OT_BYTE f2 : 1; \
+ SK_OT_BYTE f1 : 1; \
+ SK_OT_BYTE f0 : 1;
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/base/SkFloatingPoint.cpp b/gfx/skia/skia/src/base/SkFloatingPoint.cpp
new file mode 100644
index 0000000000..3e3d91d6e5
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkFloatingPoint.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/base/SkFloatingPoint.h"
+
+#include "include/private/base/SkAssert.h"
+
+#include <cmath>
+
+static inline int64_t double_to_twos_complement_bits(double x) {
+ // Convert a double to its bit pattern
+ int64_t bits = 0;
+ static_assert(sizeof(x) == sizeof(bits));
+ std::memcpy(&bits, &x, sizeof(bits));
+ // Convert a sign-bit int (i.e. double interpreted as int) into a 2s complement
+ // int. This also converts -0 (0x8000000000000000) to 0. Doing this to a double allows
+ // it to be compared using normal C operators (<, <=, etc.)
+ if (bits < 0) {
+ bits &= 0x7FFFFFFFFFFFFFFF;
+ bits = -bits;
+ }
+ return bits;
+}
+
+// Arbitrarily chosen.
+constexpr static double sk_double_epsilon = 0.0000000001;
+
+bool sk_doubles_nearly_equal_ulps(double a, double b, uint8_t max_ulps_diff) {
+ // If both of these are zero (or very close), then using Units of Least Precision
+ // will not be accurate and we should use sk_double_nearly_zero instead.
+ SkASSERT(!(fabs(a) < sk_double_epsilon && fabs(b) < sk_double_epsilon));
+ // This algorithm does not work if both inputs are NaN.
+ SkASSERT(!(std::isnan(a) && std::isnan(b)));
+ // If both inputs are infinity (or actually equal), this catches it.
+ if (a == b) {
+ return true;
+ }
+ int64_t aBits = double_to_twos_complement_bits(a);
+ int64_t bBits = double_to_twos_complement_bits(b);
+
+ // Find the difference in Units of Least Precision (ULPs).
+ return aBits < bBits + max_ulps_diff && bBits < aBits + max_ulps_diff;
+}
+
+bool sk_double_nearly_zero(double a) {
+ return a == 0 || fabs(a) < sk_double_epsilon;
+}
diff --git a/gfx/skia/skia/src/base/SkHalf.cpp b/gfx/skia/skia/src/base/SkHalf.cpp
new file mode 100644
index 0000000000..024daa29b8
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkHalf.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/base/SkFloatBits.h"
+#include "src/base/SkHalf.h"
+
+uint16_t halfMantissa(SkHalf h) {
+ return h & 0x03ff;
+}
+
+uint16_t halfExponent(SkHalf h) {
+ return (h >> 10) & 0x001f;
+}
+
+uint16_t halfSign(SkHalf h) {
+ return h >> 15;
+}
+
+union FloatUIntUnion {
+ uint32_t fUInt; // this must come first for the initializations below to work
+ float fFloat;
+};
+
+// based on Fabien Giesen's float_to_half_fast3()
+// see https://gist.github.com/rygorous/2156668
+SkHalf SkFloatToHalf(float f) {
+ static const uint32_t f32infty = { 255 << 23 };
+ static const uint32_t f16infty = { 31 << 23 };
+ static const FloatUIntUnion magic = { 15 << 23 };
+ static const uint32_t sign_mask = 0x80000000u;
+ static const uint32_t round_mask = ~0xfffu;
+ SkHalf o = 0;
+
+ FloatUIntUnion floatUnion;
+ floatUnion.fFloat = f;
+
+ uint32_t sign = floatUnion.fUInt & sign_mask;
+ floatUnion.fUInt ^= sign;
+
+ // NOTE all the integer compares in this function can be safely
+ // compiled into signed compares since all operands are below
+ // 0x80000000. Important if you want fast straight SSE2 code
+ // (since there's no unsigned PCMPGTD).
+
+ // Inf or NaN (all exponent bits set)
+ if (floatUnion.fUInt >= f32infty)
+ // NaN->qNaN and Inf->Inf
+ o = (floatUnion.fUInt > f32infty) ? 0x7e00 : 0x7c00;
+ // (De)normalized number or zero
+ else {
+ floatUnion.fUInt &= round_mask;
+ floatUnion.fFloat *= magic.fFloat;
+ floatUnion.fUInt -= round_mask;
+ // Clamp to signed infinity if overflowed
+ if (floatUnion.fUInt > f16infty) {
+ floatUnion.fUInt = f16infty;
+ }
+
+ o = floatUnion.fUInt >> 13; // Take the bits!
+ }
+
+ o |= sign >> 16;
+ return o;
+}
+
+// based on Fabien Giesen's half_to_float_fast2()
+// see https://fgiesen.wordpress.com/2012/03/28/half-to-float-done-quic/
+float SkHalfToFloat(SkHalf h) {
+ static const FloatUIntUnion magic = { 126 << 23 };
+ FloatUIntUnion o;
+
+ if (halfExponent(h) == 0)
+ {
+ // Zero / Denormal
+ o.fUInt = magic.fUInt + halfMantissa(h);
+ o.fFloat -= magic.fFloat;
+ }
+ else
+ {
+ // Set mantissa
+ o.fUInt = halfMantissa(h) << 13;
+ // Set exponent
+ if (halfExponent(h) == 0x1f)
+ // Inf/NaN
+ o.fUInt |= (255 << 23);
+ else
+ o.fUInt |= ((127 - 15 + halfExponent(h)) << 23);
+ }
+
+ // Set sign
+ o.fUInt |= (halfSign(h) << 31);
+ return o.fFloat;
+}
diff --git a/gfx/skia/skia/src/base/SkHalf.h b/gfx/skia/skia/src/base/SkHalf.h
new file mode 100644
index 0000000000..d88c80d9db
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkHalf.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkHalf_DEFINED
+#define SkHalf_DEFINED
+
+#include "src/base/SkVx.h"
+
+// 16-bit floating point value
+// format is 1 bit sign, 5 bits exponent, 10 bits mantissa
+// only used for storage
+typedef uint16_t SkHalf;
+
+static constexpr uint16_t SK_HalfMin = 0x0400; // 2^-14 (minimum positive normal value)
+static constexpr uint16_t SK_HalfMax = 0x7bff; // 65504
+static constexpr uint16_t SK_HalfEpsilon = 0x1400; // 2^-10
+static constexpr uint16_t SK_Half1 = 0x3C00; // 1
+
+// convert between half and single precision floating point
+float SkHalfToFloat(SkHalf h);
+SkHalf SkFloatToHalf(float f);
+
+// Convert between half and single precision floating point,
+// assuming inputs and outputs are both finite, and may
+// flush values which would be denormal half floats to zero.
+static inline skvx::float4 SkHalfToFloat_finite_ftz(uint64_t rgba) {
+ return skvx::from_half(skvx::half4::Load(&rgba));
+}
+static inline skvx::half4 SkFloatToHalf_finite_ftz(const skvx::float4& c) {
+ return skvx::to_half(c);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/base/SkLeanWindows.h b/gfx/skia/skia/src/base/SkLeanWindows.h
new file mode 100644
index 0000000000..d43150db76
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkLeanWindows.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkLeanWindows_DEFINED
+#define SkLeanWindows_DEFINED
+
+#include "include/private/base/SkFeatures.h" // IWYU pragma: keep
+
+#ifdef SK_BUILD_FOR_WIN
+// https://devblogs.microsoft.com/oldnewthing/20091130-00/?p=15863
+# ifndef WIN32_LEAN_AND_MEAN
+# define WIN32_LEAN_AND_MEAN
+# define WIN32_IS_MEAN_WAS_LOCALLY_DEFINED
+# endif
+# ifndef NOMINMAX
+# define NOMINMAX
+# define NOMINMAX_WAS_LOCALLY_DEFINED
+# endif
+#
+# include <windows.h>
+#
+# ifdef WIN32_IS_MEAN_WAS_LOCALLY_DEFINED
+# undef WIN32_IS_MEAN_WAS_LOCALLY_DEFINED
+# undef WIN32_LEAN_AND_MEAN
+# endif
+# ifdef NOMINMAX_WAS_LOCALLY_DEFINED
+# undef NOMINMAX_WAS_LOCALLY_DEFINED
+# undef NOMINMAX
+# endif
+#endif
+
+#endif // SkLeanWindows_DEFINED
diff --git a/gfx/skia/skia/src/base/SkMSAN.h b/gfx/skia/skia/src/base/SkMSAN.h
new file mode 100644
index 0000000000..85fa2fce4b
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkMSAN.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMSAN_DEFINED
+#define SkMSAN_DEFINED
+
+#include "include/private/base/SkAssert.h"
+
+#include <cstddef>
+#include <string.h>
+
+// Typically declared in LLVM's msan_interface.h. Easier for us to just re-declare.
+extern "C" {
+ void __msan_check_mem_is_initialized(const volatile void*, size_t);
+ void __msan_unpoison (const volatile void*, size_t);
+}
+
+// Code that requires initialized inputs can call this to make it clear that
+// the blame for use of uninitialized data belongs further up the call stack.
+static inline void sk_msan_assert_initialized(const void* begin, const void* end) {
+#if defined(__has_feature)
+ #if __has_feature(memory_sanitizer)
+ __msan_check_mem_is_initialized(begin, (const char*)end - (const char*)begin);
+ #endif
+#endif
+}
+
+// Lie to MSAN that this range of memory is initialized.
+// This can hide serious problems if overused. Every use of this should refer to a bug.
+static inline void sk_msan_mark_initialized(const void* begin, const void* end, const char* skbug) {
+ SkASSERT(skbug && 0 != strcmp(skbug, ""));
+#if defined(__has_feature)
+ #if __has_feature(memory_sanitizer)
+ __msan_unpoison(begin, (const char*)end - (const char*)begin);
+ #endif
+#endif
+}
+
+#endif//SkMSAN_DEFINED
diff --git a/gfx/skia/skia/src/base/SkMalloc.cpp b/gfx/skia/skia/src/base/SkMalloc.cpp
new file mode 100644
index 0000000000..944b4847b7
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkMalloc.cpp
@@ -0,0 +1,22 @@
+// Copyright 2019 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+
+#include "include/private/base/SkMalloc.h"
+
+#include "src/base/SkSafeMath.h"
+
+void* sk_calloc_throw(size_t count, size_t elemSize) {
+ return sk_calloc_throw(SkSafeMath::Mul(count, elemSize));
+}
+
+void* sk_malloc_throw(size_t count, size_t elemSize) {
+ return sk_malloc_throw(SkSafeMath::Mul(count, elemSize));
+}
+
+void* sk_realloc_throw(void* buffer, size_t count, size_t elemSize) {
+ return sk_realloc_throw(buffer, SkSafeMath::Mul(count, elemSize));
+}
+
+void* sk_malloc_canfail(size_t count, size_t elemSize) {
+ return sk_malloc_canfail(SkSafeMath::Mul(count, elemSize));
+}
diff --git a/gfx/skia/skia/src/base/SkMathPriv.cpp b/gfx/skia/skia/src/base/SkMathPriv.cpp
new file mode 100644
index 0000000000..2674e69886
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkMathPriv.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/base/SkMathPriv.h"
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkFloatingPoint.h"
+
+#include <cstddef>
+#include <cstdint>
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* www.worldserver.com/turk/computergraphics/FixedSqrt.pdf
+*/
+int32_t SkSqrtBits(int32_t x, int count) {
+ SkASSERT(x >= 0 && count > 0 && (unsigned)count <= 30);
+
+ uint32_t root = 0;
+ uint32_t remHi = 0;
+ uint32_t remLo = x;
+
+ do {
+ root <<= 1;
+
+ remHi = (remHi<<2) | (remLo>>30);
+ remLo <<= 2;
+
+ uint32_t testDiv = (root << 1) + 1;
+ if (remHi >= testDiv) {
+ remHi -= testDiv;
+ root++;
+ }
+ } while (--count >= 0);
+
+ return root;
+}
+
+// Kernighan's method
+int SkPopCount_portable(uint32_t n) {
+ int count = 0;
+
+ while (n) {
+ n &= (n - 1); // Remove the lowest bit in the integer.
+ count++;
+ }
+ return count;
+}
+
+// Here we strip off the unwanted bits and then return the number of trailing zero bits
+int SkNthSet(uint32_t target, int n) {
+ SkASSERT(n < SkPopCount(target));
+
+ for (int i = 0; i < n; ++i) {
+ target &= (target - 1); // Remove the lowest bit in the integer.
+ }
+
+ return SkCTZ(target);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool sk_floats_are_unit(const float array[], size_t count) {
+ bool is_unit = true;
+ for (size_t i = 0; i < count; ++i) {
+ is_unit &= (array[i] >= 0) & (array[i] <= 1);
+ }
+ return is_unit;
+}
diff --git a/gfx/skia/skia/src/base/SkMathPriv.h b/gfx/skia/skia/src/base/SkMathPriv.h
new file mode 100644
index 0000000000..0bcb113b6d
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkMathPriv.h
@@ -0,0 +1,346 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMathPriv_DEFINED
+#define SkMathPriv_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkCPUTypes.h"
+#include "include/private/base/SkTemplates.h"
+
+#include <cstddef>
+#include <cstdint>
+
+/**
+ * Return the integer square root of value, with a bias of bitBias
+ */
+int32_t SkSqrtBits(int32_t value, int bitBias);
+
+/** Return the integer square root of n, treated as a SkFixed (16.16)
+ */
+static inline int32_t SkSqrt32(int32_t n) { return SkSqrtBits(n, 15); }
+
+/**
+ * Returns (value < 0 ? 0 : value) efficiently (i.e. no compares or branches)
+ */
+static inline int SkClampPos(int value) {
+ return value & ~(value >> 31);
+}
+
+/**
+ * Stores numer/denom and numer%denom into div and mod respectively.
+ */
+template <typename In, typename Out>
+inline void SkTDivMod(In numer, In denom, Out* div, Out* mod) {
+#ifdef SK_CPU_ARM32
+ // If we wrote this as in the else branch, GCC won't fuse the two into one
+ // divmod call, but rather a div call followed by a divmod. Silly! This
+ // version is just as fast as calling __aeabi_[u]idivmod manually, but with
+ // prettier code.
+ //
+ // This benches as around 2x faster than the code in the else branch.
+ const In d = numer/denom;
+ *div = static_cast<Out>(d);
+ *mod = static_cast<Out>(numer-d*denom);
+#else
+ // On x86 this will just be a single idiv.
+ *div = static_cast<Out>(numer/denom);
+ *mod = static_cast<Out>(numer%denom);
+#endif
+}
+
+/** Returns -1 if n < 0, else returns 0
+ */
+#define SkExtractSign(n) ((int32_t)(n) >> 31)
+
+/** If sign == -1, returns -n, else sign must be 0, and returns n.
+ Typically used in conjunction with SkExtractSign().
+ */
+static inline int32_t SkApplySign(int32_t n, int32_t sign) {
+ SkASSERT(sign == 0 || sign == -1);
+ return (n ^ sign) - sign;
+}
+
+/** Return x with the sign of y */
+static inline int32_t SkCopySign32(int32_t x, int32_t y) {
+ return SkApplySign(x, SkExtractSign(x ^ y));
+}
+
+/** Given a positive value and a positive max, return the value
+ pinned against max.
+ Note: only works as long as max - value doesn't wrap around
+ @return max if value >= max, else value
+ */
+static inline unsigned SkClampUMax(unsigned value, unsigned max) {
+ if (value > max) {
+ value = max;
+ }
+ return value;
+}
+
+// If a signed int holds min_int (e.g. 0x80000000) it is undefined what happens when
+// we negate it (even though we *know* we're 2's complement and we'll get the same
+// value back). So we create this helper function that casts to size_t (unsigned) first,
+// to avoid the complaint.
+static inline size_t sk_negate_to_size_t(int32_t value) {
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning(disable : 4146) // Thanks MSVC, we know what we're negating an unsigned
+#endif
+ return -static_cast<size_t>(value);
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** Return a*b/255, truncating away any fractional bits. Only valid if both
+ a and b are 0..255
+ */
+static inline U8CPU SkMulDiv255Trunc(U8CPU a, U8CPU b) {
+ SkASSERT((uint8_t)a == a);
+ SkASSERT((uint8_t)b == b);
+ unsigned prod = a*b + 1;
+ return (prod + (prod >> 8)) >> 8;
+}
+
+/** Return (a*b)/255, taking the ceiling of any fractional bits. Only valid if
+ both a and b are 0..255. The expected result equals (a * b + 254) / 255.
+ */
+static inline U8CPU SkMulDiv255Ceiling(U8CPU a, U8CPU b) {
+ SkASSERT((uint8_t)a == a);
+ SkASSERT((uint8_t)b == b);
+ unsigned prod = a*b + 255;
+ return (prod + (prod >> 8)) >> 8;
+}
+
+/** Just the rounding step in SkDiv255Round: round(value / 255)
+ */
+static inline unsigned SkDiv255Round(unsigned prod) {
+ prod += 128;
+ return (prod + (prod >> 8)) >> 8;
+}
+
+/**
+ * Swap byte order of a 4-byte value, e.g. 0xaarrggbb -> 0xbbggrraa.
+ */
+#if defined(_MSC_VER)
+ #include <stdlib.h>
+ static inline uint32_t SkBSwap32(uint32_t v) { return _byteswap_ulong(v); }
+#else
+ static inline uint32_t SkBSwap32(uint32_t v) { return __builtin_bswap32(v); }
+#endif
+
+/*
+ * Return the number of set bits (i.e., the population count) in the provided uint32_t.
+ */
+int SkPopCount_portable(uint32_t n);
+
+#if defined(__GNUC__) || defined(__clang__)
+ static inline int SkPopCount(uint32_t n) {
+ return __builtin_popcount(n);
+ }
+#else
+ static inline int SkPopCount(uint32_t n) {
+ return SkPopCount_portable(n);
+ }
+#endif
+
+/*
+ * Return the 0-based index of the nth bit set in target
+ * Returns 32 if there is no nth bit set.
+ */
+int SkNthSet(uint32_t target, int n);
+
+//! Returns the number of leading zero bits (0...32)
+// From Hacker's Delight 2nd Edition
+constexpr int SkCLZ_portable(uint32_t x) {
+ int n = 32;
+ uint32_t y = x >> 16; if (y != 0) {n -= 16; x = y;}
+ y = x >> 8; if (y != 0) {n -= 8; x = y;}
+ y = x >> 4; if (y != 0) {n -= 4; x = y;}
+ y = x >> 2; if (y != 0) {n -= 2; x = y;}
+ y = x >> 1; if (y != 0) {return n - 2;}
+ return n - static_cast<int>(x);
+}
+
+static_assert(32 == SkCLZ_portable(0));
+static_assert(31 == SkCLZ_portable(1));
+static_assert( 1 == SkCLZ_portable(1 << 30));
+static_assert( 1 == SkCLZ_portable((1 << 30) | (1 << 24) | 1));
+static_assert( 0 == SkCLZ_portable(~0U));
+
+#if defined(SK_BUILD_FOR_WIN)
+ #include <intrin.h>
+
+ static inline int SkCLZ(uint32_t mask) {
+ if (mask) {
+ unsigned long index = 0;
+ _BitScanReverse(&index, mask);
+ // Suppress this bogus /analyze warning. The check for non-zero
+ // guarantees that _BitScanReverse will succeed.
+ #pragma warning(suppress : 6102) // Using 'index' from failed function call
+ return index ^ 0x1F;
+ } else {
+ return 32;
+ }
+ }
+#elif defined(SK_CPU_ARM32) || defined(__GNUC__) || defined(__clang__)
+ static inline int SkCLZ(uint32_t mask) {
+ // __builtin_clz(0) is undefined, so we have to detect that case.
+ return mask ? __builtin_clz(mask) : 32;
+ }
+#else
+ static inline int SkCLZ(uint32_t mask) {
+ return SkCLZ_portable(mask);
+ }
+#endif
+
+//! Returns the number of trailing zero bits (0...32)
+// From Hacker's Delight 2nd Edition
+constexpr int SkCTZ_portable(uint32_t x) {
+ return 32 - SkCLZ_portable(~x & (x - 1));
+}
+
+static_assert(32 == SkCTZ_portable(0));
+static_assert( 0 == SkCTZ_portable(1));
+static_assert(30 == SkCTZ_portable(1 << 30));
+static_assert( 2 == SkCTZ_portable((1 << 30) | (1 << 24) | (1 << 2)));
+static_assert( 0 == SkCTZ_portable(~0U));
+
+#if defined(SK_BUILD_FOR_WIN)
+ #include <intrin.h>
+
+ static inline int SkCTZ(uint32_t mask) {
+ if (mask) {
+ unsigned long index = 0;
+ _BitScanForward(&index, mask);
+ // Suppress this bogus /analyze warning. The check for non-zero
+ // guarantees that _BitScanReverse will succeed.
+ #pragma warning(suppress : 6102) // Using 'index' from failed function call
+ return index;
+ } else {
+ return 32;
+ }
+ }
+#elif defined(SK_CPU_ARM32) || defined(__GNUC__) || defined(__clang__)
+ static inline int SkCTZ(uint32_t mask) {
+ // __builtin_ctz(0) is undefined, so we have to detect that case.
+ return mask ? __builtin_ctz(mask) : 32;
+ }
+#else
+ static inline int SkCTZ(uint32_t mask) {
+ return SkCTZ_portable(mask);
+ }
+#endif
+
+/**
+ * Returns the log2 of the specified value, were that value to be rounded up
+ * to the next power of 2. It is undefined to pass 0. Examples:
+ * SkNextLog2(1) -> 0
+ * SkNextLog2(2) -> 1
+ * SkNextLog2(3) -> 2
+ * SkNextLog2(4) -> 2
+ * SkNextLog2(5) -> 3
+ */
+static inline int SkNextLog2(uint32_t value) {
+ SkASSERT(value != 0);
+ return 32 - SkCLZ(value - 1);
+}
+
+constexpr int SkNextLog2_portable(uint32_t value) {
+ SkASSERT(value != 0);
+ return 32 - SkCLZ_portable(value - 1);
+}
+
+/**
+* Returns the log2 of the specified value, were that value to be rounded down
+* to the previous power of 2. It is undefined to pass 0. Examples:
+* SkPrevLog2(1) -> 0
+* SkPrevLog2(2) -> 1
+* SkPrevLog2(3) -> 1
+* SkPrevLog2(4) -> 2
+* SkPrevLog2(5) -> 2
+*/
+static inline int SkPrevLog2(uint32_t value) {
+ SkASSERT(value != 0);
+ return 32 - SkCLZ(value >> 1);
+}
+
+constexpr int SkPrevLog2_portable(uint32_t value) {
+ SkASSERT(value != 0);
+ return 32 - SkCLZ_portable(value >> 1);
+}
+
+/**
+ * Returns the smallest power-of-2 that is >= the specified value. If value
+ * is already a power of 2, then it is returned unchanged. It is undefined
+ * if value is <= 0.
+ */
+static inline int SkNextPow2(int value) {
+ SkASSERT(value > 0);
+ return 1 << SkNextLog2(static_cast<uint32_t>(value));
+}
+
+constexpr int SkNextPow2_portable(int value) {
+ SkASSERT(value > 0);
+ return 1 << SkNextLog2_portable(static_cast<uint32_t>(value));
+}
+
+/**
+* Returns the largest power-of-2 that is <= the specified value. If value
+* is already a power of 2, then it is returned unchanged. It is undefined
+* if value is <= 0.
+*/
+static inline int SkPrevPow2(int value) {
+ SkASSERT(value > 0);
+ return 1 << SkPrevLog2(static_cast<uint32_t>(value));
+}
+
+constexpr int SkPrevPow2_portable(int value) {
+ SkASSERT(value > 0);
+ return 1 << SkPrevLog2_portable(static_cast<uint32_t>(value));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Return the smallest power-of-2 >= n.
+ */
+static inline uint32_t GrNextPow2(uint32_t n) {
+ return n ? (1 << (32 - SkCLZ(n - 1))) : 1;
+}
+
+/**
+ * Returns the next power of 2 >= n or n if the next power of 2 can't be represented by size_t.
+ */
+static inline size_t GrNextSizePow2(size_t n) {
+ constexpr int kNumSizeTBits = 8 * sizeof(size_t);
+ constexpr size_t kHighBitSet = size_t(1) << (kNumSizeTBits - 1);
+
+ if (!n) {
+ return 1;
+ } else if (n >= kHighBitSet) {
+ return n;
+ }
+
+ n--;
+ uint32_t shift = 1;
+ while (shift < kNumSizeTBits) {
+ n |= n >> shift;
+ shift <<= 1;
+ }
+ return n + 1;
+}
+
+// conservative check. will return false for very large values that "could" fit
+template <typename T> static inline bool SkFitsInFixed(T x) {
+ return SkTAbs(x) <= 32767.0f;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/base/SkQuads.cpp b/gfx/skia/skia/src/base/SkQuads.cpp
new file mode 100644
index 0000000000..a77837932c
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkQuads.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/base/SkQuads.h"
+
+#include "include/private/base/SkFloatingPoint.h"
+
+#include <cmath>
+
+// Solve 0 = M * x + B. If M is 0, there are no solutions, unless B is also 0,
+// in which case there are infinite solutions, so we just return 1 of them.
+static int solve_linear(const double M, const double B, double solution[2]) {
+ if (sk_double_nearly_zero(M)) {
+ solution[0] = 0;
+ if (sk_double_nearly_zero(B)) {
+ return 1;
+ }
+ return 0;
+ }
+ solution[0] = -B / M;
+ if (!std::isfinite(solution[0])) {
+ return 0;
+ }
+ return 1;
+}
+
+// When the A coefficient of a quadratic is close to 0, there can be floating point error
+// that arises from computing a very large root. In those cases, we would rather be
+// precise about the one smaller root, so we have this arbitrary cutoff for when A is
+// really small or small compared to B.
+static bool close_to_linear(double A, double B) {
+ if (sk_double_nearly_zero(B)) {
+ return sk_double_nearly_zero(A);
+ }
+ // This is a different threshold (tighter) than the close_to_a_quadratic in SkCubics.cpp
+ // because the SkQuads::RootsReal gives better answers for longer as A/B -> 0.
+ return std::abs(A / B) < 1.0e-16;
+}
+
+int SkQuads::RootsReal(const double A, const double B, const double C, double solution[2]) {
+ if (close_to_linear(A, B)) {
+ return solve_linear(B, C, solution);
+ }
+ // If A is zero (e.g. B was nan and thus close_to_linear was false), we will
+ // temporarily have infinities rolling about, but will catch that when checking
+ // p2 - q.
+ const double p = sk_ieee_double_divide(B, 2 * A);
+ const double q = sk_ieee_double_divide(C, A);
+ /* normal form: x^2 + px + q = 0 */
+ const double p2 = p * p;
+ if (!std::isfinite(p2 - q) ||
+ (!sk_double_nearly_zero(p2 - q) && p2 < q)) {
+ return 0;
+ }
+ double sqrt_D = 0;
+ if (p2 > q) {
+ sqrt_D = sqrt(p2 - q);
+ }
+ solution[0] = sqrt_D - p;
+ solution[1] = -sqrt_D - p;
+ if (sk_double_nearly_zero(sqrt_D) ||
+ sk_doubles_nearly_equal_ulps(solution[0], solution[1])) {
+ return 1;
+ }
+ return 2;
+}
diff --git a/gfx/skia/skia/src/base/SkQuads.h b/gfx/skia/skia/src/base/SkQuads.h
new file mode 100644
index 0000000000..645d43bcd4
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkQuads.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkQuads_DEFINED
+#define SkQuads_DEFINED
+
+/**
+ * Utilities for dealing with quadratic formulas with one variable:
+ * f(t) = A*t^2 + B*t + C
+ */
+class SkQuads {
+public:
+ /**
+ * Puts up to 2 real solutions to the equation
+ * A*t^2 + B*t + C = 0
+ * in the provided array.
+ */
+ static int RootsReal(double A, double B, double C,
+ double solution[2]);
+
+ /**
+ * Evaluates the quadratic function with the 3 provided coefficients and the
+ * provided variable.
+ */
+ static double EvalAt(double A, double B, double C, double t) {
+ return A * t * t +
+ B * t +
+ C;
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/base/SkRandom.h b/gfx/skia/skia/src/base/SkRandom.h
new file mode 100644
index 0000000000..96b3824896
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkRandom.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRandom_DEFINED
+#define SkRandom_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkFixed.h"
+#include "include/private/base/SkFloatBits.h"
+
+#include <cstdint>
+
+typedef float SkScalar;
+
+/** \class SkRandom
+
+ Utility class that implements pseudo random 32bit numbers using Marsaglia's
+ multiply-with-carry "mother of all" algorithm. Unlike rand(), this class holds
+ its own state, so that multiple instances can be used with no side-effects.
+
+ Has a large period and all bits are well-randomized.
+ */
+class SkRandom {
+public:
+ SkRandom() { init(0); }
+ SkRandom(uint32_t seed) { init(seed); }
+ SkRandom(const SkRandom& rand) : fK(rand.fK), fJ(rand.fJ) {}
+
+ SkRandom& operator=(const SkRandom& rand) {
+ fK = rand.fK;
+ fJ = rand.fJ;
+
+ return *this;
+ }
+
+ /** Return the next pseudo random number as an unsigned 32bit value.
+ */
+ uint32_t nextU() {
+ fK = kKMul*(fK & 0xffff) + (fK >> 16);
+ fJ = kJMul*(fJ & 0xffff) + (fJ >> 16);
+ return (((fK << 16) | (fK >> 16)) + fJ);
+ }
+
+ /** Return the next pseudo random number as a signed 32bit value.
+ */
+ int32_t nextS() { return (int32_t)this->nextU(); }
+
+ /**
+ * Returns value [0...1) as an IEEE float
+ */
+ float nextF() {
+ int floatint = 0x3f800000 | (int)(this->nextU() >> 9);
+ float f = SkBits2Float(floatint) - 1.0f;
+ return f;
+ }
+
+ /**
+ * Returns value [min...max) as a float
+ */
+ float nextRangeF(float min, float max) {
+ return min + this->nextF() * (max - min);
+ }
+
+ /** Return the next pseudo random number, as an unsigned value of
+ at most bitCount bits.
+ @param bitCount The maximum number of bits to be returned
+ */
+ uint32_t nextBits(unsigned bitCount) {
+ SkASSERT(bitCount > 0 && bitCount <= 32);
+ return this->nextU() >> (32 - bitCount);
+ }
+
+ /** Return the next pseudo random unsigned number, mapped to lie within
+ [min, max] inclusive.
+ */
+ uint32_t nextRangeU(uint32_t min, uint32_t max) {
+ SkASSERT(min <= max);
+ uint32_t range = max - min + 1;
+ if (0 == range) {
+ return this->nextU();
+ } else {
+ return min + this->nextU() % range;
+ }
+ }
+
+ /** Return the next pseudo random unsigned number, mapped to lie within
+ [0, count).
+ */
+ uint32_t nextULessThan(uint32_t count) {
+ SkASSERT(count > 0);
+ return this->nextRangeU(0, count - 1);
+ }
+
+ /** Return the next pseudo random number expressed as a SkScalar
+ in the range [0..SK_Scalar1).
+ */
+ SkScalar nextUScalar1() { return SkFixedToScalar(this->nextUFixed1()); }
+
+ /** Return the next pseudo random number expressed as a SkScalar
+ in the range [min..max).
+ */
+ SkScalar nextRangeScalar(SkScalar min, SkScalar max) {
+ return this->nextUScalar1() * (max - min) + min;
+ }
+
+ /** Return the next pseudo random number expressed as a SkScalar
+ in the range [-SK_Scalar1..SK_Scalar1).
+ */
+ SkScalar nextSScalar1() { return SkFixedToScalar(this->nextSFixed1()); }
+
+ /** Return the next pseudo random number as a bool.
+ */
+ bool nextBool() { return this->nextU() >= 0x80000000; }
+
+ /** A biased version of nextBool().
+ */
+ bool nextBiasedBool(SkScalar fractionTrue) {
+ SkASSERT(fractionTrue >= 0 && fractionTrue <= 1);
+ return this->nextUScalar1() <= fractionTrue;
+ }
+
+ /** Reset the random object.
+ */
+ void setSeed(uint32_t seed) { init(seed); }
+
+private:
+ // Initialize state variables with LCG.
+ // We must ensure that both J and K are non-zero, otherwise the
+ // multiply-with-carry step will forevermore return zero.
+ void init(uint32_t seed) {
+ fK = NextLCG(seed);
+ if (0 == fK) {
+ fK = NextLCG(fK);
+ }
+ fJ = NextLCG(fK);
+ if (0 == fJ) {
+ fJ = NextLCG(fJ);
+ }
+ SkASSERT(0 != fK && 0 != fJ);
+ }
+ static uint32_t NextLCG(uint32_t seed) { return kMul*seed + kAdd; }
+
+ /** Return the next pseudo random number expressed as an unsigned SkFixed
+ in the range [0..SK_Fixed1).
+ */
+ SkFixed nextUFixed1() { return this->nextU() >> 16; }
+
+ /** Return the next pseudo random number expressed as a signed SkFixed
+ in the range [-SK_Fixed1..SK_Fixed1).
+ */
+ SkFixed nextSFixed1() { return this->nextS() >> 15; }
+
+ // See "Numerical Recipes in C", 1992 page 284 for these constants
+ // For the LCG that sets the initial state from a seed
+ enum {
+ kMul = 1664525,
+ kAdd = 1013904223
+ };
+ // Constants for the multiply-with-carry steps
+ enum {
+ kKMul = 30345,
+ kJMul = 18000,
+ };
+
+ uint32_t fK;
+ uint32_t fJ;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/base/SkRectMemcpy.h b/gfx/skia/skia/src/base/SkRectMemcpy.h
new file mode 100644
index 0000000000..07ba0f0c65
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkRectMemcpy.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRectMemcpy_DEFINED
+#define SkRectMemcpy_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkTemplates.h"
+
+#include <cstring>
+
+static inline void SkRectMemcpy(void* dst, size_t dstRB, const void* src, size_t srcRB,
+ size_t trimRowBytes, int rowCount) {
+ SkASSERT(trimRowBytes <= dstRB);
+ SkASSERT(trimRowBytes <= srcRB);
+ if (trimRowBytes == dstRB && trimRowBytes == srcRB) {
+ memcpy(dst, src, trimRowBytes * rowCount);
+ return;
+ }
+
+ for (int i = 0; i < rowCount; ++i) {
+ memcpy(dst, src, trimRowBytes);
+ dst = SkTAddOffset<void>(dst, dstRB);
+ src = SkTAddOffset<const void>(src, srcRB);
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/base/SkSafeMath.cpp b/gfx/skia/skia/src/base/SkSafeMath.cpp
new file mode 100644
index 0000000000..cb69125edb
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkSafeMath.cpp
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/base/SkSafeMath.h"
+
+size_t SkSafeMath::Add(size_t x, size_t y) {
+ SkSafeMath tmp;
+ size_t sum = tmp.add(x, y);
+ return tmp.ok() ? sum : SIZE_MAX;
+}
+
+size_t SkSafeMath::Mul(size_t x, size_t y) {
+ SkSafeMath tmp;
+ size_t prod = tmp.mul(x, y);
+ return tmp.ok() ? prod : SIZE_MAX;
+}
diff --git a/gfx/skia/skia/src/base/SkSafeMath.h b/gfx/skia/skia/src/base/SkSafeMath.h
new file mode 100644
index 0000000000..8ca44749f4
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkSafeMath.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSafeMath_DEFINED
+#define SkSafeMath_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkDebug.h" // IWYU pragma: keep
+#include "include/private/base/SkTFitsIn.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+
+// SkSafeMath always check that a series of operations do not overflow.
+// This must be correct for all platforms, because this is a check for safety at runtime.
+
+class SkSafeMath {
+public:
+ SkSafeMath() = default;
+
+ bool ok() const { return fOK; }
+ explicit operator bool() const { return fOK; }
+
+ size_t mul(size_t x, size_t y) {
+ return sizeof(size_t) == sizeof(uint64_t) ? mul64(x, y) : mul32(x, y);
+ }
+
+ size_t add(size_t x, size_t y) {
+ size_t result = x + y;
+ fOK &= result >= x;
+ return result;
+ }
+
+ /**
+ * Return a + b, unless this result is an overflow/underflow. In those cases, fOK will
+ * be set to false, and it is undefined what this returns.
+ */
+ int addInt(int a, int b) {
+ if (b < 0 && a < std::numeric_limits<int>::min() - b) {
+ fOK = false;
+ return a;
+ } else if (b > 0 && a > std::numeric_limits<int>::max() - b) {
+ fOK = false;
+ return a;
+ }
+ return a + b;
+ }
+
+ size_t alignUp(size_t x, size_t alignment) {
+ SkASSERT(alignment && !(alignment & (alignment - 1)));
+ return add(x, alignment - 1) & ~(alignment - 1);
+ }
+
+ template <typename T> T castTo(size_t value) {
+ if (!SkTFitsIn<T>(value)) {
+ fOK = false;
+ }
+ return static_cast<T>(value);
+ }
+
+ // These saturate to their results
+ static size_t Add(size_t x, size_t y);
+ static size_t Mul(size_t x, size_t y);
+ static size_t Align4(size_t x) {
+ SkSafeMath safe;
+ return safe.alignUp(x, 4);
+ }
+
+private:
+ uint32_t mul32(uint32_t x, uint32_t y) {
+ uint64_t bx = x;
+ uint64_t by = y;
+ uint64_t result = bx * by;
+ fOK &= result >> 32 == 0;
+ // Overflow information is capture in fOK. Return the result modulo 2^32.
+ return (uint32_t)result;
+ }
+
+ uint64_t mul64(uint64_t x, uint64_t y) {
+ if (x <= std::numeric_limits<uint64_t>::max() >> 32
+ && y <= std::numeric_limits<uint64_t>::max() >> 32) {
+ return x * y;
+ } else {
+ auto hi = [](uint64_t x) { return x >> 32; };
+ auto lo = [](uint64_t x) { return x & 0xFFFFFFFF; };
+
+ uint64_t lx_ly = lo(x) * lo(y);
+ uint64_t hx_ly = hi(x) * lo(y);
+ uint64_t lx_hy = lo(x) * hi(y);
+ uint64_t hx_hy = hi(x) * hi(y);
+ uint64_t result = 0;
+ result = this->add(lx_ly, (hx_ly << 32));
+ result = this->add(result, (lx_hy << 32));
+ fOK &= (hx_hy + (hx_ly >> 32) + (lx_hy >> 32)) == 0;
+
+ #if defined(SK_DEBUG) && defined(__clang__) && defined(__x86_64__)
+ auto double_check = (unsigned __int128)x * y;
+ SkASSERT(result == (double_check & 0xFFFFFFFFFFFFFFFF));
+ SkASSERT(!fOK || (double_check >> 64 == 0));
+ #endif
+
+ return result;
+ }
+ }
+ bool fOK = true;
+};
+
+#endif//SkSafeMath_DEFINED
diff --git a/gfx/skia/skia/src/base/SkScopeExit.h b/gfx/skia/skia/src/base/SkScopeExit.h
new file mode 100644
index 0000000000..9c3581b464
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkScopeExit.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkScopeExit_DEFINED
+#define SkScopeExit_DEFINED
+
+#include "include/private/base/SkMacros.h"
+
+#include <functional>
+#include <utility>
+
+/** SkScopeExit calls a std:::function<void()> in its destructor. */
+class SkScopeExit {
+public:
+ SkScopeExit() = default;
+ SkScopeExit(std::function<void()> f) : fFn(std::move(f)) {}
+ SkScopeExit(SkScopeExit&& that) : fFn(std::move(that.fFn)) {}
+
+ ~SkScopeExit() {
+ if (fFn) {
+ fFn();
+ }
+ }
+
+ void clear() { fFn = {}; }
+
+ SkScopeExit& operator=(SkScopeExit&& that) {
+ fFn = std::move(that.fFn);
+ return *this;
+ }
+
+private:
+ std::function<void()> fFn;
+
+ SkScopeExit( const SkScopeExit& ) = delete;
+ SkScopeExit& operator=(const SkScopeExit& ) = delete;
+};
+
+/**
+ * SK_AT_SCOPE_EXIT(stmt) evaluates stmt when the current scope ends.
+ *
+ * E.g.
+ * {
+ * int x = 5;
+ * {
+ * SK_AT_SCOPE_EXIT(x--);
+ * SkASSERT(x == 5);
+ * }
+ * SkASSERT(x == 4);
+ * }
+ */
+#define SK_AT_SCOPE_EXIT(stmt) \
+ SkScopeExit SK_MACRO_APPEND_LINE(at_scope_exit_)([&]() { stmt; })
+
+#endif // SkScopeExit_DEFINED
diff --git a/gfx/skia/skia/src/base/SkSemaphore.cpp b/gfx/skia/skia/src/base/SkSemaphore.cpp
new file mode 100644
index 0000000000..cb85fa9745
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkSemaphore.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/base/SkSemaphore.h"
+
+#include "include/private/base/SkFeatures.h" // IWYU pragma: keep
+
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+ #include <dispatch/dispatch.h>
+
+ struct SkSemaphore::OSSemaphore {
+ dispatch_semaphore_t fSemaphore;
+
+ OSSemaphore() { fSemaphore = dispatch_semaphore_create(0/*initial count*/); }
+ ~OSSemaphore() { dispatch_release(fSemaphore); }
+
+ void signal(int n) { while (n --> 0) { dispatch_semaphore_signal(fSemaphore); } }
+ void wait() { dispatch_semaphore_wait(fSemaphore, DISPATCH_TIME_FOREVER); }
+ };
+#elif defined(SK_BUILD_FOR_WIN)
+#include "src/base/SkLeanWindows.h"
+
+ struct SkSemaphore::OSSemaphore {
+ HANDLE fSemaphore;
+
+ OSSemaphore() {
+ fSemaphore = CreateSemaphore(nullptr /*security attributes, optional*/,
+ 0 /*initial count*/,
+ MAXLONG /*max count*/,
+ nullptr /*name, optional*/);
+ }
+ ~OSSemaphore() { CloseHandle(fSemaphore); }
+
+ void signal(int n) {
+ ReleaseSemaphore(fSemaphore, n, nullptr/*returns previous count, optional*/);
+ }
+ void wait() { WaitForSingleObject(fSemaphore, INFINITE/*timeout in ms*/); }
+ };
+#else
+ // It's important we test for Mach before this. This code will compile but not work there.
+ #include <errno.h>
+ #include <semaphore.h>
+ struct SkSemaphore::OSSemaphore {
+ sem_t fSemaphore;
+
+ OSSemaphore() { sem_init(&fSemaphore, 0/*cross process?*/, 0/*initial count*/); }
+ ~OSSemaphore() { sem_destroy(&fSemaphore); }
+
+ void signal(int n) { while (n --> 0) { sem_post(&fSemaphore); } }
+ void wait() {
+ // Try until we're not interrupted.
+ while(sem_wait(&fSemaphore) == -1 && errno == EINTR);
+ }
+ };
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkSemaphore::~SkSemaphore() {
+ delete fOSSemaphore;
+}
+
+void SkSemaphore::osSignal(int n) {
+ fOSSemaphoreOnce([this] { fOSSemaphore = new OSSemaphore; });
+ fOSSemaphore->signal(n);
+}
+
+void SkSemaphore::osWait() {
+ fOSSemaphoreOnce([this] { fOSSemaphore = new OSSemaphore; });
+ fOSSemaphore->wait();
+}
+
+bool SkSemaphore::try_wait() {
+ int count = fCount.load(std::memory_order_relaxed);
+ if (count > 0) {
+ return fCount.compare_exchange_weak(count, count-1, std::memory_order_acquire);
+ }
+ return false;
+}
diff --git a/gfx/skia/skia/src/base/SkStringView.h b/gfx/skia/skia/src/base/SkStringView.h
new file mode 100644
index 0000000000..f8f83ae77e
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkStringView.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStringView_DEFINED
+#define SkStringView_DEFINED
+
+#include <cstring>
+#include <string_view>
+
+namespace skstd {
+
+// C++20 additions
+inline constexpr bool starts_with(std::string_view str, std::string_view prefix) {
+ if (prefix.length() > str.length()) {
+ return false;
+ }
+ return prefix.length() == 0 || !memcmp(str.data(), prefix.data(), prefix.length());
+}
+
+inline constexpr bool starts_with(std::string_view str, std::string_view::value_type c) {
+ return !str.empty() && str.front() == c;
+}
+
+inline constexpr bool ends_with(std::string_view str, std::string_view suffix) {
+ if (suffix.length() > str.length()) {
+ return false;
+ }
+ return suffix.length() == 0 || !memcmp(str.data() + str.length() - suffix.length(),
+ suffix.data(), suffix.length());
+}
+
+inline constexpr bool ends_with(std::string_view str, std::string_view::value_type c) {
+ return !str.empty() && str.back() == c;
+}
+
+// C++23 additions
+inline constexpr bool contains(std::string_view str, std::string_view needle) {
+ return str.find(needle) != std::string_view::npos;
+}
+
+inline constexpr bool contains(std::string_view str, std::string_view::value_type c) {
+ return str.find(c) != std::string_view::npos;
+}
+
+} // namespace skstd
+
+#endif
diff --git a/gfx/skia/skia/src/base/SkTBlockList.h b/gfx/skia/skia/src/base/SkTBlockList.h
new file mode 100644
index 0000000000..88e91a92bb
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkTBlockList.h
@@ -0,0 +1,448 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTBlockList_DEFINED
+#define SkTBlockList_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkBlockAllocator.h"
+
+#include <algorithm>
+#include <cstring>
+#include <type_traits>
+#include <utility>
+
+// Forward declarations for the iterators used by SkTBlockList
+using IndexFn = int (*)(const SkBlockAllocator::Block*);
+using NextFn = int (*)(const SkBlockAllocator::Block*, int);
+template<typename T, typename B> using ItemFn = T (*)(B*, int);
+template <typename T, bool Forward, bool Const, IndexFn Start, IndexFn End, NextFn Next,
+ ItemFn<T, typename std::conditional<Const, const SkBlockAllocator::Block,
+ SkBlockAllocator::Block>::type> Resolve>
+class BlockIndexIterator;
+
+/**
+ * SkTBlockList manages dynamic storage for instances of T, reserving fixed blocks such that
+ * allocation is amortized across every N instances. In this way it is a hybrid of an array-based
+ * vector and a linked-list. T can be any type and non-trivial destructors are automatically
+ * invoked when the SkTBlockList is destructed. The addresses of instances are guaranteed
+ * not to move except when a list is concatenated to another.
+ *
+ * The collection supports storing a templated number of elements inline before heap-allocated
+ * blocks are made to hold additional instances. By default, the heap blocks are sized to hold the
+ * same number of items as the inline block. A common pattern is to have the inline size hold only
+ * a small number of items for the common case and then allocate larger blocks when needed.
+ *
+ * If the size of a collection is N, and its block size is B, the complexity of the common
+ * operations are:
+ * - push_back()/emplace_back(): O(1), with malloc O(B)
+ * - pop_back(): O(1), with free O(B)
+ * - front()/back(): O(1)
+ * - reset(): O(N) for non-trivial types, O(N/B) for trivial types
+ * - concat(): O(B)
+ * - random access: O(N/B)
+ * - iteration: O(1) at each step
+ *
+ * These characteristics make it well suited for allocating items in a LIFO ordering, or otherwise
+ * acting as a stack, or simply using it as a typed allocator.
+ */
+template <typename T, int StartingItems = 1>
+class SkTBlockList {
+public:
+ /**
+ * Create an allocator that defaults to using StartingItems as heap increment.
+ */
+ SkTBlockList() : SkTBlockList(StartingItems) {}
+
+ /**
+ * Create an allocator
+ *
+ * @param itemsPerBlock the number of items to allocate at once
+ */
+ explicit SkTBlockList(int itemsPerBlock,
+ SkBlockAllocator::GrowthPolicy policy =
+ SkBlockAllocator::GrowthPolicy::kFixed)
+ : fAllocator(policy,
+ SkBlockAllocator::BlockOverhead<alignof(T)>() + sizeof(T)*itemsPerBlock) {}
+
+ ~SkTBlockList() { this->reset(); }
+
+ /**
+ * Adds an item and returns it.
+ *
+ * @return the added item.
+ */
+ T& push_back() {
+ return *new (this->pushItem()) T;
+ }
+ T& push_back(const T& t) {
+ return *new (this->pushItem()) T(t);
+ }
+ T& push_back(T&& t) {
+ return *new (this->pushItem()) T(std::move(t));
+ }
+
+ template <typename... Args>
+ T& emplace_back(Args&&... args) {
+ return *new (this->pushItem()) T(std::forward<Args>(args)...);
+ }
+
+ /**
+ * Move all items from 'other' to the end of this collection. When this returns, 'other' will
+ * be empty. Items in 'other' may be moved as part of compacting the pre-allocated start of
+ * 'other' into this list (using T's move constructor or memcpy if T is trivially copyable), but
+ * this is O(StartingItems) and not O(N). All other items are concatenated in O(1).
+ */
+ template <int SI>
+ void concat(SkTBlockList<T, SI>&& other);
+
+ /**
+ * Allocate, if needed, space to hold N more Ts before another malloc will occur.
+ */
+ void reserve(int n) {
+ int avail = fAllocator->currentBlock()->template avail<alignof(T)>() / sizeof(T);
+ if (n > avail) {
+ int reserved = n - avail;
+ // Don't consider existing bytes since we've already determined how to split the N items
+ fAllocator->template reserve<alignof(T)>(
+ reserved * sizeof(T), SkBlockAllocator::kIgnoreExistingBytes_Flag);
+ }
+ }
+
+ /**
+ * Remove the last item, only call if count() != 0
+ */
+ void pop_back() {
+ SkASSERT(this->count() > 0);
+
+ SkBlockAllocator::Block* block = fAllocator->currentBlock();
+
+ // Run dtor for the popped item
+ int releaseIndex = Last(block);
+ GetItem(block, releaseIndex).~T();
+
+ if (releaseIndex == First(block)) {
+ fAllocator->releaseBlock(block);
+ } else {
+ // Since this always follows LIFO, the block should always be able to release the memory
+ SkAssertResult(block->release(releaseIndex, releaseIndex + sizeof(T)));
+ block->setMetadata(Decrement(block, releaseIndex));
+ }
+
+ fAllocator->setMetadata(fAllocator->metadata() - 1);
+ }
+
+ /**
+ * Removes all added items.
+ */
+ void reset() {
+ // Invoke destructors in reverse order if not trivially destructible
+ if constexpr (!std::is_trivially_destructible<T>::value) {
+ for (T& t : this->ritems()) {
+ t.~T();
+ }
+ }
+
+ fAllocator->reset();
+ }
+
+ /**
+ * Returns the item count.
+ */
+ int count() const {
+#ifdef SK_DEBUG
+ // Confirm total count matches sum of block counts
+ int count = 0;
+ for (const auto* b :fAllocator->blocks()) {
+ if (b->metadata() == 0) {
+ continue; // skip empty
+ }
+ count += (sizeof(T) + Last(b) - First(b)) / sizeof(T);
+ }
+ SkASSERT(count == fAllocator->metadata());
+#endif
+ return fAllocator->metadata();
+ }
+
+ /**
+ * Is the count 0?
+ */
+ bool empty() const { return this->count() == 0; }
+
+ /**
+ * Access first item, only call if count() != 0
+ */
+ T& front() {
+ // This assumes that the head block actually have room to store the first item.
+ static_assert(StartingItems >= 1);
+ SkASSERT(this->count() > 0 && fAllocator->headBlock()->metadata() > 0);
+ return GetItem(fAllocator->headBlock(), First(fAllocator->headBlock()));
+ }
+ const T& front() const {
+ SkASSERT(this->count() > 0 && fAllocator->headBlock()->metadata() > 0);
+ return GetItem(fAllocator->headBlock(), First(fAllocator->headBlock()));
+ }
+
+ /**
+ * Access last item, only call if count() != 0
+ */
+ T& back() {
+ SkASSERT(this->count() > 0 && fAllocator->currentBlock()->metadata() > 0);
+ return GetItem(fAllocator->currentBlock(), Last(fAllocator->currentBlock()));
+ }
+ const T& back() const {
+ SkASSERT(this->count() > 0 && fAllocator->currentBlock()->metadata() > 0);
+ return GetItem(fAllocator->currentBlock(), Last(fAllocator->currentBlock()));
+ }
+
+ /**
+ * Access item by index. Not an operator[] since it should not be considered constant time.
+ * Use for-range loops by calling items() or ritems() instead to access all added items in order
+ */
+ T& item(int i) {
+ SkASSERT(i >= 0 && i < this->count());
+
+ // Iterate over blocks until we find the one that contains i.
+ for (auto* b : fAllocator->blocks()) {
+ if (b->metadata() == 0) {
+ continue; // skip empty
+ }
+
+ int start = First(b);
+ int end = Last(b) + sizeof(T); // exclusive
+ int index = start + i * sizeof(T);
+ if (index < end) {
+ return GetItem(b, index);
+ } else {
+ i -= (end - start) / sizeof(T);
+ }
+ }
+ SkUNREACHABLE;
+ }
+ const T& item(int i) const {
+ return const_cast<SkTBlockList*>(this)->item(i);
+ }
+
+private:
+ // Let other SkTBlockLists have access (only ever used when T and S are the same but you
+ // cannot have partial specializations declared as a friend...)
+ template<typename S, int N> friend class SkTBlockList;
+ friend class TBlockListTestAccess; // for fAllocator
+
+ inline static constexpr size_t StartingSize =
+ SkBlockAllocator::Overhead<alignof(T)>() + StartingItems * sizeof(T);
+
+ static T& GetItem(SkBlockAllocator::Block* block, int index) {
+ return *static_cast<T*>(block->ptr(index));
+ }
+ static const T& GetItem(const SkBlockAllocator::Block* block, int index) {
+ return *static_cast<const T*>(block->ptr(index));
+ }
+ static int First(const SkBlockAllocator::Block* b) {
+ return b->firstAlignedOffset<alignof(T)>();
+ }
+ static int Last(const SkBlockAllocator::Block* b) {
+ return b->metadata();
+ }
+ static int Increment(const SkBlockAllocator::Block* b, int index) {
+ return index + sizeof(T);
+ }
+ static int Decrement(const SkBlockAllocator::Block* b, int index) {
+ return index - sizeof(T);
+ }
+
+ void* pushItem() {
+ // 'template' required because fAllocator is a template, calling a template member
+ auto br = fAllocator->template allocate<alignof(T)>(sizeof(T));
+ SkASSERT(br.fStart == br.fAlignedOffset ||
+ br.fAlignedOffset == First(fAllocator->currentBlock()));
+ br.fBlock->setMetadata(br.fAlignedOffset);
+ fAllocator->setMetadata(fAllocator->metadata() + 1);
+ return br.fBlock->ptr(br.fAlignedOffset);
+ }
+
+ // N represents the number of items, whereas SkSBlockAllocator takes total bytes, so must
+ // account for the block allocator's size too.
+ //
+ // This class uses the SkBlockAllocator's metadata to track total count of items, and per-block
+ // metadata to track the index of the last allocated item within each block.
+ SkSBlockAllocator<StartingSize> fAllocator;
+
+public:
+ using Iter = BlockIndexIterator<T&, true, false, &First, &Last, &Increment, &GetItem>;
+ using CIter = BlockIndexIterator<const T&, true, true, &First, &Last, &Increment, &GetItem>;
+ using RIter = BlockIndexIterator<T&, false, false, &Last, &First, &Decrement, &GetItem>;
+ using CRIter = BlockIndexIterator<const T&, false, true, &Last, &First, &Decrement, &GetItem>;
+
+ /**
+ * Iterate over all items in allocation order (oldest to newest) using a for-range loop:
+ *
+ * for (auto&& T : this->items()) {}
+ */
+ Iter items() { return Iter(fAllocator.allocator()); }
+ CIter items() const { return CIter(fAllocator.allocator()); }
+
+ // Iterate from newest to oldest using a for-range loop.
+ RIter ritems() { return RIter(fAllocator.allocator()); }
+ CRIter ritems() const { return CRIter(fAllocator.allocator()); }
+};
+
+template <typename T, int SI1>
+template <int SI2>
+void SkTBlockList<T, SI1>::concat(SkTBlockList<T, SI2>&& other) {
+ // Optimize the common case where the list to append only has a single item
+ if (other.empty()) {
+ return;
+ } else if (other.count() == 1) {
+ this->push_back(other.back());
+ other.pop_back();
+ return;
+ }
+
+ // Manually move all items in other's head block into this list; all heap blocks from 'other'
+ // will be appended to the block linked list (no per-item moves needed then).
+ int headItemCount = 0;
+ SkBlockAllocator::Block* headBlock = other.fAllocator->headBlock();
+ SkDEBUGCODE(int oldCount = this->count();)
+ if (headBlock->metadata() > 0) {
+ int headStart = First(headBlock);
+ int headEnd = Last(headBlock) + sizeof(T); // exclusive
+ headItemCount = (headEnd - headStart) / sizeof(T);
+ int avail = fAllocator->currentBlock()->template avail<alignof(T)>() / sizeof(T);
+ if (headItemCount > avail) {
+ // Make sure there is extra room for the items beyond what's already avail. Use the
+ // kIgnoreGrowthPolicy_Flag to make this reservation as tight as possible since
+ // 'other's heap blocks will be appended after it and any extra space is wasted.
+ fAllocator->template reserve<alignof(T)>((headItemCount - avail) * sizeof(T),
+ SkBlockAllocator::kIgnoreExistingBytes_Flag |
+ SkBlockAllocator::kIgnoreGrowthPolicy_Flag);
+ }
+
+ if constexpr (std::is_trivially_copy_constructible<T>::value) {
+ // memcpy all items at once (or twice between current and reserved space).
+ SkASSERT(std::is_trivially_destructible<T>::value);
+ auto copy = [](SkBlockAllocator::Block* src, int start, SkBlockAllocator* dst, int n) {
+ auto target = dst->template allocate<alignof(T)>(n * sizeof(T));
+ memcpy(target.fBlock->ptr(target.fAlignedOffset), src->ptr(start), n * sizeof(T));
+ target.fBlock->setMetadata(target.fAlignedOffset + (n - 1) * sizeof(T));
+ };
+
+ if (avail > 0) {
+ // Copy 0 to avail items into existing tail block
+ copy(headBlock, headStart, fAllocator.allocator(), std::min(headItemCount, avail));
+ }
+ if (headItemCount > avail) {
+ // Copy (head count - avail) into the extra reserved space
+ copy(headBlock, headStart + avail * sizeof(T),
+ fAllocator.allocator(), headItemCount - avail);
+ }
+ fAllocator->setMetadata(fAllocator->metadata() + headItemCount);
+ } else {
+ // Move every item over one at a time
+ for (int i = headStart; i < headEnd; i += sizeof(T)) {
+ T& toMove = GetItem(headBlock, i);
+ this->push_back(std::move(toMove));
+ // Anything of interest should have been moved, but run this since T isn't
+ // a trusted type.
+ toMove.~T(); // NOLINT(bugprone-use-after-move): calling dtor always allowed
+ }
+ }
+
+ other.fAllocator->releaseBlock(headBlock);
+ }
+
+ // other's head block must have been fully copied since it cannot be stolen
+ SkASSERT(other.fAllocator->headBlock()->metadata() == 0 &&
+ fAllocator->metadata() == oldCount + headItemCount);
+ fAllocator->stealHeapBlocks(other.fAllocator.allocator());
+ fAllocator->setMetadata(fAllocator->metadata() +
+ (other.fAllocator->metadata() - headItemCount));
+ other.fAllocator->setMetadata(0);
+}
+
+/**
+ * BlockIndexIterator provides a reusable iterator template for collections built on top of a
+ * SkBlockAllocator, where each item is of the same type, and the index to an item can be iterated
+ * over in a known manner. It supports const and non-const, and forward and reverse, assuming it's
+ * provided with proper functions for starting, ending, and advancing.
+ */
+template <typename T, // The element type (including any modifiers)
+ bool Forward, // Are indices within a block increasing or decreasing with iteration?
+ bool Const, // Whether or not T is const
+ IndexFn Start, // Returns the index of the first valid item in a block
+ IndexFn End, // Returns the index of the last valid item (so it is inclusive)
+ NextFn Next, // Returns the next index given the current index
+ ItemFn<T, typename std::conditional<Const, const SkBlockAllocator::Block,
+ SkBlockAllocator::Block>::type> Resolve>
+class BlockIndexIterator {
+ using BlockIter = typename SkBlockAllocator::BlockIter<Forward, Const>;
+public:
+ BlockIndexIterator(BlockIter iter) : fBlockIter(iter) {}
+
+ class Item {
+ public:
+ bool operator!=(const Item& other) const {
+ return other.fBlock != fBlock || (SkToBool(*fBlock) && other.fIndex != fIndex);
+ }
+
+ T operator*() const {
+ SkASSERT(*fBlock);
+ return Resolve(*fBlock, fIndex);
+ }
+
+ Item& operator++() {
+ const auto* block = *fBlock;
+ SkASSERT(block && block->metadata() > 0);
+ SkASSERT((Forward && Next(block, fIndex) > fIndex) ||
+ (!Forward && Next(block, fIndex) < fIndex));
+ fIndex = Next(block, fIndex);
+ if ((Forward && fIndex > fEndIndex) || (!Forward && fIndex < fEndIndex)) {
+ ++fBlock;
+ this->setIndices();
+ }
+ return *this;
+ }
+
+ private:
+ friend BlockIndexIterator;
+ using BlockItem = typename BlockIter::Item;
+
+ Item(BlockItem block) : fBlock(block) {
+ this->setIndices();
+ }
+
+ void setIndices() {
+ // Skip empty blocks
+ while(*fBlock && (*fBlock)->metadata() == 0) {
+ ++fBlock;
+ }
+ if (*fBlock) {
+ fIndex = Start(*fBlock);
+ fEndIndex = End(*fBlock);
+ } else {
+ fIndex = 0;
+ fEndIndex = 0;
+ }
+
+ SkASSERT((Forward && fIndex <= fEndIndex) || (!Forward && fIndex >= fEndIndex));
+ }
+
+ BlockItem fBlock;
+ int fIndex;
+ int fEndIndex;
+ };
+
+ Item begin() const { return Item(fBlockIter.begin()); }
+ Item end() const { return Item(fBlockIter.end()); }
+
+private:
+ BlockIter fBlockIter;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/base/SkTDArray.cpp b/gfx/skia/skia/src/base/SkTDArray.cpp
new file mode 100644
index 0000000000..2cf7780f95
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkTDArray.cpp
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/base/SkTDArray.h"
+
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkTFitsIn.h"
+#include "include/private/base/SkTo.h"
+
+#include <climits>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <new>
+#include <utility>
+
+SkTDStorage::SkTDStorage(int sizeOfT) : fSizeOfT{sizeOfT} {}
+
+SkTDStorage::SkTDStorage(const void* src, int size, int sizeOfT)
+ : fSizeOfT{sizeOfT}
+ , fCapacity{size}
+ , fSize{size} {
+ if (size > 0) {
+ SkASSERT(src != nullptr);
+ size_t storageSize = this->bytes(size);
+ fStorage = static_cast<std::byte*>(sk_malloc_throw(storageSize));
+ memcpy(fStorage, src, storageSize);
+ }
+}
+
+SkTDStorage::SkTDStorage(const SkTDStorage& that)
+ : SkTDStorage{that.fStorage, that.fSize, that.fSizeOfT} {}
+
+SkTDStorage& SkTDStorage::operator=(const SkTDStorage& that) {
+ if (this != &that) {
+ if (that.fSize <= fCapacity) {
+ fSize = that.fSize;
+ if (fSize > 0) {
+ memcpy(fStorage, that.data(), that.size_bytes());
+ }
+ } else {
+ *this = SkTDStorage{that.data(), that.size(), that.fSizeOfT};
+ }
+ }
+ return *this;
+}
+
+SkTDStorage::SkTDStorage(SkTDStorage&& that)
+ : fSizeOfT{that.fSizeOfT}
+ , fStorage(std::exchange(that.fStorage, nullptr))
+ , fCapacity{that.fCapacity}
+ , fSize{that.fSize} {}
+
+SkTDStorage& SkTDStorage::operator=(SkTDStorage&& that) {
+ if (this != &that) {
+ this->~SkTDStorage();
+ new (this) SkTDStorage{std::move(that)};
+ }
+ return *this;
+}
+
+SkTDStorage::~SkTDStorage() {
+ sk_free(fStorage);
+}
+
+void SkTDStorage::reset() {
+ const int sizeOfT = fSizeOfT;
+ this->~SkTDStorage();
+ new (this) SkTDStorage{sizeOfT};
+}
+
+void SkTDStorage::swap(SkTDStorage& that) {
+ SkASSERT(fSizeOfT == that.fSizeOfT);
+ using std::swap;
+ swap(fStorage, that.fStorage);
+ swap(fCapacity, that.fCapacity);
+ swap(fSize, that.fSize);
+}
+
+void SkTDStorage::resize(int newSize) {
+ SkASSERT(newSize >= 0);
+ if (newSize > fCapacity) {
+ this->reserve(newSize);
+ }
+ fSize = newSize;
+}
+
+void SkTDStorage::reserve(int newCapacity) {
+ SkASSERT(newCapacity >= 0);
+ if (newCapacity > fCapacity) {
+ // Establish the maximum number of elements that includes a valid count for end. In the
+ // largest case end() = &fArray[INT_MAX] which is 1 after the last indexable element.
+ static constexpr int kMaxCount = INT_MAX;
+
+ // Assume that the array will max out.
+ int expandedReserve = kMaxCount;
+ if (kMaxCount - newCapacity > 4) {
+ // Add 1/4 more than we need. Add 4 to ensure this grows by at least 1. Pin to
+ // kMaxCount if no room for 1/4 growth.
+ int growth = 4 + ((newCapacity + 4) >> 2);
+ // Read this line as: if (count + growth < kMaxCount) { ... }
+ // It's rewritten to avoid signed integer overflow.
+ if (kMaxCount - newCapacity > growth) {
+ expandedReserve = newCapacity + growth;
+ }
+ }
+
+
+ // With a T size of 1, the above allocator produces the progression of 7, 15, ... Since,
+ // the sizeof max_align_t is often 16, there is no reason to allocate anything less than
+ // 16 bytes. This eliminates a realloc when pushing back bytes to an SkTDArray.
+ if (fSizeOfT == 1) {
+ // Round up to the multiple of 16.
+ expandedReserve = (expandedReserve + 15) & ~15;
+ }
+
+ fCapacity = expandedReserve;
+ size_t newStorageSize = this->bytes(fCapacity);
+ fStorage = static_cast<std::byte*>(sk_realloc_throw(fStorage, newStorageSize));
+ }
+}
+
+void SkTDStorage::shrink_to_fit() {
+ if (fCapacity != fSize) {
+ fCapacity = fSize;
+ // Because calling realloc with size of 0 is implementation defined, force to a good state
+ // by freeing fStorage.
+ if (fCapacity > 0) {
+ fStorage = static_cast<std::byte*>(sk_realloc_throw(fStorage, this->bytes(fCapacity)));
+ } else {
+ sk_free(fStorage);
+ fStorage = nullptr;
+ }
+ }
+}
+
+void SkTDStorage::erase(int index, int count) {
+ SkASSERT(count >= 0);
+ SkASSERT(fSize >= count);
+ SkASSERT(0 <= index && index <= fSize);
+
+ if (count > 0) {
+ // Check that the resulting size fits in an int. This will abort if not.
+ const int newCount = this->calculateSizeOrDie(-count);
+ this->moveTail(index, index + count, fSize);
+ this->resize(newCount);
+ }
+}
+
+void SkTDStorage::removeShuffle(int index) {
+ SkASSERT(fSize > 0);
+ SkASSERT(0 <= index && index < fSize);
+ // Check that the new count is valid.
+ const int newCount = this->calculateSizeOrDie(-1);
+ this->moveTail(index, fSize - 1, fSize);
+ this->resize(newCount);
+}
+
+void* SkTDStorage::prepend() {
+ return this->insert(/*index=*/0);
+}
+
+void SkTDStorage::append() {
+ if (fSize < fCapacity) {
+ fSize++;
+ } else {
+ this->insert(fSize);
+ }
+}
+
+void SkTDStorage::append(int count) {
+ SkASSERT(count >= 0);
+ // Read as: if (fSize + count <= fCapacity) {...}. This is a UB safe way to avoid the add.
+ if (fCapacity - fSize >= count) {
+ fSize += count;
+ } else {
+ this->insert(fSize, count, nullptr);
+ }
+}
+
+void* SkTDStorage::append(const void* src, int count) {
+ return this->insert(fSize, count, src);
+}
+
+void* SkTDStorage::insert(int index) {
+ return this->insert(index, /*count=*/1, nullptr);
+}
+
+void* SkTDStorage::insert(int index, int count, const void* src) {
+ SkASSERT(0 <= index && index <= fSize);
+ SkASSERT(count >= 0);
+
+ if (count > 0) {
+ const int oldCount = fSize;
+ const int newCount = this->calculateSizeOrDie(count);
+ this->resize(newCount);
+ this->moveTail(index + count, index, oldCount);
+
+ if (src != nullptr) {
+ this->copySrc(index, src, count);
+ }
+ }
+
+ return this->address(index);
+}
+
+bool operator==(const SkTDStorage& a, const SkTDStorage& b) {
+ return a.size() == b.size() &&
+ (a.size() == 0 || !memcmp(a.data(), b.data(), a.bytes(a.size())));
+}
+
+int SkTDStorage::calculateSizeOrDie(int delta) {
+ // Check that count will not go negative.
+ SkASSERT_RELEASE(-fSize <= delta);
+
+ // We take care to avoid overflow here.
+ // Because count and delta are both signed 32-bit ints, the sum of count and delta is at
+ // most 4294967294, which fits fine in uint32_t. Proof follows in assert.
+ static_assert(UINT32_MAX >= (uint32_t)INT_MAX + (uint32_t)INT_MAX);
+ uint32_t testCount = (uint32_t)fSize + (uint32_t)delta;
+ SkASSERT_RELEASE(SkTFitsIn<int>(testCount));
+ return SkToInt(testCount);
+}
+
+void SkTDStorage::moveTail(int to, int tailStart, int tailEnd) {
+ SkASSERT(0 <= to && to <= fSize);
+ SkASSERT(0 <= tailStart && tailStart <= tailEnd && tailEnd <= fSize);
+ if (to != tailStart && tailStart != tailEnd) {
+ this->copySrc(to, this->address(tailStart), tailEnd - tailStart);
+ }
+}
+
+void SkTDStorage::copySrc(int dstIndex, const void* src, int count) {
+ SkASSERT(count > 0);
+ memmove(this->address(dstIndex), src, this->bytes(count));
+}
diff --git a/gfx/skia/skia/src/base/SkTDPQueue.h b/gfx/skia/skia/src/base/SkTDPQueue.h
new file mode 100644
index 0000000000..3a897130f2
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkTDPQueue.h
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTDPQueue_DEFINED
+#define SkTDPQueue_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkTDArray.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkTSort.h"
+
+#include <utility>
+
+/**
+ * This class implements a priority queue. T is the type of the elements in the queue. LESS is a
+ * function that compares two Ts and returns true if the first is higher priority than the second.
+ *
+ * Optionally objects may know their index into the priority queue. The queue will update the index
+ * as the objects move through the queue. This is enabled by using a non-nullptr function for INDEX.
+ * When an INDEX function is provided random deletes from the queue are allowed using remove().
+ * Additionally, the * priority is allowed to change as long as priorityDidChange() is called
+ * afterwards. In debug builds the index will be set to -1 before an element is removed from the
+ * queue.
+ */
+template <typename T,
+ bool (*LESS)(const T&, const T&),
+ int* (*INDEX)(const T&) = (int* (*)(const T&))nullptr>
+class SkTDPQueue {
+public:
+ SkTDPQueue() {}
+ SkTDPQueue(int reserve) { fArray.reserve(reserve); }
+
+ SkTDPQueue(SkTDPQueue&&) = default;
+ SkTDPQueue& operator =(SkTDPQueue&&) = default;
+
+ SkTDPQueue(const SkTDPQueue&) = delete;
+ SkTDPQueue& operator=(const SkTDPQueue&) = delete;
+
+ /** Number of items in the queue. */
+ int count() const { return fArray.size(); }
+
+ /** Gets the next item in the queue without popping it. */
+ const T& peek() const { return fArray[0]; }
+ T& peek() { return fArray[0]; }
+
+ /** Removes the next item. */
+ void pop() {
+ this->validate();
+ SkDEBUGCODE(if (SkToBool(INDEX)) { *INDEX(fArray[0]) = -1; })
+ if (1 == fArray.size()) {
+ fArray.pop_back();
+ return;
+ }
+
+ fArray[0] = fArray[fArray.size() - 1];
+ this->setIndex(0);
+ fArray.pop_back();
+ this->percolateDownIfNecessary(0);
+
+ this->validate();
+ }
+
+ /** Inserts a new item in the queue based on its priority. */
+ void insert(T entry) {
+ this->validate();
+ int index = fArray.size();
+ *fArray.append() = entry;
+ this->setIndex(fArray.size() - 1);
+ this->percolateUpIfNecessary(index);
+ this->validate();
+ }
+
+ /** Random access removal. This requires that the INDEX function is non-nullptr. */
+ void remove(T entry) {
+ SkASSERT(nullptr != INDEX);
+ int index = *INDEX(entry);
+ SkASSERT(index >= 0 && index < fArray.size());
+ this->validate();
+ SkDEBUGCODE(*INDEX(fArray[index]) = -1;)
+ if (index == fArray.size() - 1) {
+ fArray.pop_back();
+ return;
+ }
+ fArray[index] = fArray[fArray.size() - 1];
+ fArray.pop_back();
+ this->setIndex(index);
+ this->percolateUpOrDown(index);
+ this->validate();
+ }
+
+ /** Notification that the priority of an entry has changed. This must be called after an
+ item's priority is changed to maintain correct ordering. Changing the priority is only
+ allowed if an INDEX function is provided. */
+ void priorityDidChange(T entry) {
+ SkASSERT(nullptr != INDEX);
+ int index = *INDEX(entry);
+ SkASSERT(index >= 0 && index < fArray.size());
+ this->validate(index);
+ this->percolateUpOrDown(index);
+ this->validate();
+ }
+
+ /** Gets the item at index i in the priority queue (for i < this->count()). at(0) is equivalent
+ to peek(). Otherwise, there is no guarantee about ordering of elements in the queue. */
+ T at(int i) const { return fArray[i]; }
+
+ /** Sorts the queue into priority order. The queue is only guarenteed to remain in sorted order
+ * until any other operation, other than at(), is performed.
+ */
+ void sort() {
+ if (fArray.size() > 1) {
+ SkTQSort<T>(fArray.begin(), fArray.end(), LESS);
+ for (int i = 0; i < fArray.size(); i++) {
+ this->setIndex(i);
+ }
+ this->validate();
+ }
+ }
+
+private:
+ static int LeftOf(int x) { SkASSERT(x >= 0); return 2 * x + 1; }
+ static int ParentOf(int x) { SkASSERT(x > 0); return (x - 1) >> 1; }
+
+ void percolateUpOrDown(int index) {
+ SkASSERT(index >= 0);
+ if (!percolateUpIfNecessary(index)) {
+ this->validate(index);
+ this->percolateDownIfNecessary(index);
+ }
+ }
+
+ bool percolateUpIfNecessary(int index) {
+ SkASSERT(index >= 0);
+ bool percolated = false;
+ do {
+ if (0 == index) {
+ this->setIndex(index);
+ return percolated;
+ }
+ int p = ParentOf(index);
+ if (LESS(fArray[index], fArray[p])) {
+ using std::swap;
+ swap(fArray[index], fArray[p]);
+ this->setIndex(index);
+ index = p;
+ percolated = true;
+ } else {
+ this->setIndex(index);
+ return percolated;
+ }
+ this->validate(index);
+ } while (true);
+ }
+
+ void percolateDownIfNecessary(int index) {
+ SkASSERT(index >= 0);
+ do {
+ int child = LeftOf(index);
+
+ if (child >= fArray.size()) {
+ // We're a leaf.
+ this->setIndex(index);
+ return;
+ }
+
+ if (child + 1 >= fArray.size()) {
+ // We only have a left child.
+ if (LESS(fArray[child], fArray[index])) {
+ using std::swap;
+ swap(fArray[child], fArray[index]);
+ this->setIndex(child);
+ this->setIndex(index);
+ return;
+ }
+ } else if (LESS(fArray[child + 1], fArray[child])) {
+ // The right child is the one we should swap with, if we swap.
+ child++;
+ }
+
+ // Check if we need to swap.
+ if (LESS(fArray[child], fArray[index])) {
+ using std::swap;
+ swap(fArray[child], fArray[index]);
+ this->setIndex(index);
+ index = child;
+ } else {
+ // We're less than both our children.
+ this->setIndex(index);
+ return;
+ }
+ this->validate(index);
+ } while (true);
+ }
+
+ void setIndex(int index) {
+ SkASSERT(index < fArray.size());
+ if (SkToBool(INDEX)) {
+ *INDEX(fArray[index]) = index;
+ }
+ }
+
+ void validate(int excludedIndex = -1) const {
+#ifdef SK_DEBUG
+ for (int i = 1; i < fArray.size(); ++i) {
+ int p = ParentOf(i);
+ if (excludedIndex != p && excludedIndex != i) {
+ SkASSERT(!(LESS(fArray[i], fArray[p])));
+ SkASSERT(!SkToBool(INDEX) || *INDEX(fArray[i]) == i);
+ }
+ }
+#endif
+ }
+
+ SkTDArray<T> fArray;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/base/SkTInternalLList.h b/gfx/skia/skia/src/base/SkTInternalLList.h
new file mode 100644
index 0000000000..5b655a35eb
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkTInternalLList.h
@@ -0,0 +1,304 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTInternalLList_DEFINED
+#define SkTInternalLList_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkTo.h"
+
+/**
+ * This macro creates the member variables required by the SkTInternalLList class. It should be
+ * placed in the private section of any class that will be stored in a double linked list.
+ */
+#define SK_DECLARE_INTERNAL_LLIST_INTERFACE(ClassName) \
+ friend class SkTInternalLList<ClassName>; \
+ /* back pointer to the owning list - for debugging */ \
+ SkDEBUGCODE(SkTInternalLList<ClassName>* fList = nullptr;) \
+ ClassName* fPrev = nullptr; \
+ ClassName* fNext = nullptr
+
+/**
+ * This class implements a templated internal doubly linked list data structure.
+ */
+template <class T> class SkTInternalLList {
+public:
+ SkTInternalLList() {}
+
+ void reset() {
+ fHead = nullptr;
+ fTail = nullptr;
+ }
+
+ void remove(T* entry) {
+ SkASSERT(fHead && fTail);
+ SkASSERT(this->isInList(entry));
+
+ T* prev = entry->fPrev;
+ T* next = entry->fNext;
+
+ if (prev) {
+ prev->fNext = next;
+ } else {
+ fHead = next;
+ }
+ if (next) {
+ next->fPrev = prev;
+ } else {
+ fTail = prev;
+ }
+
+ entry->fPrev = nullptr;
+ entry->fNext = nullptr;
+
+#ifdef SK_DEBUG
+ entry->fList = nullptr;
+#endif
+ }
+
+ void addToHead(T* entry) {
+ SkASSERT(nullptr == entry->fPrev && nullptr == entry->fNext);
+ SkASSERT(nullptr == entry->fList);
+
+ entry->fPrev = nullptr;
+ entry->fNext = fHead;
+ if (fHead) {
+ fHead->fPrev = entry;
+ }
+ fHead = entry;
+ if (nullptr == fTail) {
+ fTail = entry;
+ }
+
+#ifdef SK_DEBUG
+ entry->fList = this;
+#endif
+ }
+
+ void addToTail(T* entry) {
+ SkASSERT(nullptr == entry->fPrev && nullptr == entry->fNext);
+ SkASSERT(nullptr == entry->fList);
+
+ entry->fPrev = fTail;
+ entry->fNext = nullptr;
+ if (fTail) {
+ fTail->fNext = entry;
+ }
+ fTail = entry;
+ if (nullptr == fHead) {
+ fHead = entry;
+ }
+
+#ifdef SK_DEBUG
+ entry->fList = this;
+#endif
+ }
+
+ /**
+ * Inserts a new list entry before an existing list entry. The new entry must not already be
+ * a member of this or any other list. If existingEntry is NULL then the new entry is added
+ * at the tail.
+ */
+ void addBefore(T* newEntry, T* existingEntry) {
+ SkASSERT(newEntry);
+
+ if (nullptr == existingEntry) {
+ this->addToTail(newEntry);
+ return;
+ }
+
+ SkASSERT(this->isInList(existingEntry));
+ newEntry->fNext = existingEntry;
+ T* prev = existingEntry->fPrev;
+ existingEntry->fPrev = newEntry;
+ newEntry->fPrev = prev;
+ if (nullptr == prev) {
+ SkASSERT(fHead == existingEntry);
+ fHead = newEntry;
+ } else {
+ prev->fNext = newEntry;
+ }
+#ifdef SK_DEBUG
+ newEntry->fList = this;
+#endif
+ }
+
+ /**
+ * Inserts a new list entry after an existing list entry. The new entry must not already be
+ * a member of this or any other list. If existingEntry is NULL then the new entry is added
+ * at the head.
+ */
+ void addAfter(T* newEntry, T* existingEntry) {
+ SkASSERT(newEntry);
+
+ if (nullptr == existingEntry) {
+ this->addToHead(newEntry);
+ return;
+ }
+
+ SkASSERT(this->isInList(existingEntry));
+ newEntry->fPrev = existingEntry;
+ T* next = existingEntry->fNext;
+ existingEntry->fNext = newEntry;
+ newEntry->fNext = next;
+ if (nullptr == next) {
+ SkASSERT(fTail == existingEntry);
+ fTail = newEntry;
+ } else {
+ next->fPrev = newEntry;
+ }
+#ifdef SK_DEBUG
+ newEntry->fList = this;
+#endif
+ }
+
+ void concat(SkTInternalLList&& list) {
+ if (list.isEmpty()) {
+ return;
+ }
+
+ list.fHead->fPrev = fTail;
+ if (!fHead) {
+ SkASSERT(!list.fHead->fPrev);
+ fHead = list.fHead;
+ } else {
+ SkASSERT(fTail);
+ fTail->fNext = list.fHead;
+ }
+ fTail = list.fTail;
+
+#ifdef SK_DEBUG
+ for (T* node = list.fHead; node; node = node->fNext) {
+ SkASSERT(node->fList == &list);
+ node->fList = this;
+ }
+#endif
+
+ list.fHead = list.fTail = nullptr;
+ }
+
+ bool isEmpty() const {
+ SkASSERT(SkToBool(fHead) == SkToBool(fTail));
+ return !fHead;
+ }
+
+ T* head() const { return fHead; }
+ T* tail() const { return fTail; }
+
+ class Iter {
+ public:
+ enum IterStart {
+ kHead_IterStart,
+ kTail_IterStart
+ };
+
+ Iter() : fCurr(nullptr) {}
+ Iter(const Iter& iter) : fCurr(iter.fCurr) {}
+ Iter& operator= (const Iter& iter) { fCurr = iter.fCurr; return *this; }
+
+ T* init(const SkTInternalLList& list, IterStart startLoc) {
+ if (kHead_IterStart == startLoc) {
+ fCurr = list.fHead;
+ } else {
+ SkASSERT(kTail_IterStart == startLoc);
+ fCurr = list.fTail;
+ }
+
+ return fCurr;
+ }
+
+ T* get() { return fCurr; }
+
+ /**
+ * Return the next/previous element in the list or NULL if at the end.
+ */
+ T* next() {
+ if (nullptr == fCurr) {
+ return nullptr;
+ }
+
+ fCurr = fCurr->fNext;
+ return fCurr;
+ }
+
+ T* prev() {
+ if (nullptr == fCurr) {
+ return nullptr;
+ }
+
+ fCurr = fCurr->fPrev;
+ return fCurr;
+ }
+
+ /**
+ * C++11 range-for interface.
+ */
+ bool operator!=(const Iter& that) { return fCurr != that.fCurr; }
+ T* operator*() { return this->get(); }
+ void operator++() { this->next(); }
+
+ private:
+ T* fCurr;
+ };
+
+ Iter begin() const {
+ Iter iter;
+ iter.init(*this, Iter::kHead_IterStart);
+ return iter;
+ }
+
+ Iter end() const { return Iter(); }
+
+#ifdef SK_DEBUG
+ void validate() const {
+ SkASSERT(!fHead == !fTail);
+ Iter iter;
+ for (T* item = iter.init(*this, Iter::kHead_IterStart); item; item = iter.next()) {
+ SkASSERT(this->isInList(item));
+ if (nullptr == item->fPrev) {
+ SkASSERT(fHead == item);
+ } else {
+ SkASSERT(item->fPrev->fNext == item);
+ }
+ if (nullptr == item->fNext) {
+ SkASSERT(fTail == item);
+ } else {
+ SkASSERT(item->fNext->fPrev == item);
+ }
+ }
+ }
+
+ /**
+ * Debugging-only method that uses the list back pointer to check if 'entry' is indeed in 'this'
+ * list.
+ */
+ bool isInList(const T* entry) const {
+ return entry->fList == this;
+ }
+
+ /**
+ * Debugging-only method that laboriously counts the list entries.
+ */
+ int countEntries() const {
+ int count = 0;
+ for (T* entry = fHead; entry; entry = entry->fNext) {
+ ++count;
+ }
+ return count;
+ }
+#endif // SK_DEBUG
+
+private:
+ T* fHead = nullptr;
+ T* fTail = nullptr;
+
+ SkTInternalLList(const SkTInternalLList&) = delete;
+ SkTInternalLList& operator=(const SkTInternalLList&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/base/SkTLazy.h b/gfx/skia/skia/src/base/SkTLazy.h
new file mode 100644
index 0000000000..38b3b373db
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkTLazy.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTLazy_DEFINED
+#define SkTLazy_DEFINED
+
+#include "include/private/base/SkAssert.h"
+
+#include <optional>
+#include <utility>
+
+/**
+ * Efficient way to defer allocating/initializing a class until it is needed
+ * (if ever).
+ */
+template <typename T> class SkTLazy {
+public:
+ SkTLazy() = default;
+ explicit SkTLazy(const T* src) : fValue(src ? std::optional<T>(*src) : std::nullopt) {}
+ SkTLazy(const SkTLazy& that) : fValue(that.fValue) {}
+ SkTLazy(SkTLazy&& that) : fValue(std::move(that.fValue)) {}
+
+ ~SkTLazy() = default;
+
+ SkTLazy& operator=(const SkTLazy& that) {
+ fValue = that.fValue;
+ return *this;
+ }
+
+ SkTLazy& operator=(SkTLazy&& that) {
+ fValue = std::move(that.fValue);
+ return *this;
+ }
+
+ /**
+ * Return a pointer to an instance of the class initialized with 'args'.
+ * If a previous instance had been initialized (either from init() or
+ * set()) it will first be destroyed, so that a freshly initialized
+ * instance is always returned.
+ */
+ template <typename... Args> T* init(Args&&... args) {
+ fValue.emplace(std::forward<Args>(args)...);
+ return this->get();
+ }
+
+ /**
+ * Copy src into this, and return a pointer to a copy of it. Note this
+ * will always return the same pointer, so if it is called on a lazy that
+ * has already been initialized, then this will copy over the previous
+ * contents.
+ */
+ T* set(const T& src) {
+ fValue = src;
+ return this->get();
+ }
+
+ T* set(T&& src) {
+ fValue = std::move(src);
+ return this->get();
+ }
+
+ /**
+ * Destroy the lazy object (if it was created via init() or set())
+ */
+ void reset() {
+ fValue.reset();
+ }
+
+ /**
+ * Returns true if a valid object has been initialized in the SkTLazy,
+ * false otherwise.
+ */
+ bool isValid() const { return fValue.has_value(); }
+
+ /**
+ * Returns the object. This version should only be called when the caller
+ * knows that the object has been initialized.
+ */
+ T* get() {
+ SkASSERT(fValue.has_value());
+ return &fValue.value();
+ }
+ const T* get() const {
+ SkASSERT(fValue.has_value());
+ return &fValue.value();
+ }
+
+ T* operator->() { return this->get(); }
+ const T* operator->() const { return this->get(); }
+
+ T& operator*() {
+ SkASSERT(fValue.has_value());
+ return *fValue;
+ }
+ const T& operator*() const {
+ SkASSERT(fValue.has_value());
+ return *fValue;
+ }
+
+ /**
+ * Like above but doesn't assert if object isn't initialized (in which case
+ * nullptr is returned).
+ */
+ const T* getMaybeNull() const { return fValue.has_value() ? this->get() : nullptr; }
+ T* getMaybeNull() { return fValue.has_value() ? this->get() : nullptr; }
+
+private:
+ std::optional<T> fValue;
+};
+
+/**
+ * A helper built on top of std::optional to do copy-on-first-write. The object is initialized
+ * with a const pointer but provides a non-const pointer accessor. The first time the
+ * accessor is called (if ever) the object is cloned.
+ *
+ * In the following example at most one copy of constThing is made:
+ *
+ * SkTCopyOnFirstWrite<Thing> thing(&constThing);
+ * ...
+ * function_that_takes_a_const_thing_ptr(thing); // constThing is passed
+ * ...
+ * if (need_to_modify_thing()) {
+ * thing.writable()->modifyMe(); // makes a copy of constThing
+ * }
+ * ...
+ * x = thing->readSomething();
+ * ...
+ * if (need_to_modify_thing_now()) {
+ * thing.writable()->changeMe(); // makes a copy of constThing if we didn't call modifyMe()
+ * }
+ *
+ * consume_a_thing(thing); // could be constThing or a modified copy.
+ */
+template <typename T>
+class SkTCopyOnFirstWrite {
+public:
+ explicit SkTCopyOnFirstWrite(const T& initial) : fObj(&initial) {}
+
+ explicit SkTCopyOnFirstWrite(const T* initial) : fObj(initial) {}
+
+ // Constructor for delayed initialization.
+ SkTCopyOnFirstWrite() : fObj(nullptr) {}
+
+ SkTCopyOnFirstWrite(const SkTCopyOnFirstWrite& that) { *this = that; }
+ SkTCopyOnFirstWrite( SkTCopyOnFirstWrite&& that) { *this = std::move(that); }
+
+ SkTCopyOnFirstWrite& operator=(const SkTCopyOnFirstWrite& that) {
+ fLazy = that.fLazy;
+ fObj = fLazy.has_value() ? &fLazy.value() : that.fObj;
+ return *this;
+ }
+
+ SkTCopyOnFirstWrite& operator=(SkTCopyOnFirstWrite&& that) {
+ fLazy = std::move(that.fLazy);
+ fObj = fLazy.has_value() ? &fLazy.value() : that.fObj;
+ return *this;
+ }
+
+ // Should only be called once, and only if the default constructor was used.
+ void init(const T& initial) {
+ SkASSERT(!fObj);
+ SkASSERT(!fLazy.has_value());
+ fObj = &initial;
+ }
+
+ // If not already initialized, in-place instantiates the writable object
+ template <typename... Args>
+ void initIfNeeded(Args&&... args) {
+ if (!fObj) {
+ SkASSERT(!fLazy.has_value());
+ fObj = &fLazy.emplace(std::forward<Args>(args)...);
+ }
+ }
+
+ /**
+ * Returns a writable T*. The first time this is called the initial object is cloned.
+ */
+ T* writable() {
+ SkASSERT(fObj);
+ if (!fLazy.has_value()) {
+ fLazy = *fObj;
+ fObj = &fLazy.value();
+ }
+ return &fLazy.value();
+ }
+
+ const T* get() const { return fObj; }
+
+ /**
+ * Operators for treating this as though it were a const pointer.
+ */
+
+ const T *operator->() const { return fObj; }
+
+ operator const T*() const { return fObj; }
+
+ const T& operator *() const { return *fObj; }
+
+private:
+ const T* fObj;
+ std::optional<T> fLazy;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/base/SkTSearch.cpp b/gfx/skia/skia/src/base/SkTSearch.cpp
new file mode 100644
index 0000000000..d91772e03b
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkTSearch.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "src/base/SkTSearch.h"
+
+#include "include/private/base/SkMalloc.h"
+
+#include <cstring>
+#include <ctype.h>
+
+static inline const char* index_into_base(const char*const* base, int index,
+ size_t elemSize)
+{
+ return *(const char*const*)((const char*)base + index * elemSize);
+}
+
+int SkStrSearch(const char*const* base, int count, const char target[],
+ size_t target_len, size_t elemSize)
+{
+ if (count <= 0)
+ return ~0;
+
+ SkASSERT(base != nullptr);
+
+ int lo = 0;
+ int hi = count - 1;
+
+ while (lo < hi)
+ {
+ int mid = (hi + lo) >> 1;
+ const char* elem = index_into_base(base, mid, elemSize);
+
+ int cmp = strncmp(elem, target, target_len);
+ if (cmp < 0)
+ lo = mid + 1;
+ else if (cmp > 0 || strlen(elem) > target_len)
+ hi = mid;
+ else
+ return mid;
+ }
+
+ const char* elem = index_into_base(base, hi, elemSize);
+ int cmp = strncmp(elem, target, target_len);
+ if (cmp || strlen(elem) > target_len)
+ {
+ if (cmp < 0)
+ hi += 1;
+ hi = ~hi;
+ }
+ return hi;
+}
+
+int SkStrSearch(const char*const* base, int count, const char target[],
+ size_t elemSize)
+{
+ return SkStrSearch(base, count, target, strlen(target), elemSize);
+}
+
+int SkStrLCSearch(const char*const* base, int count, const char target[],
+ size_t len, size_t elemSize)
+{
+ SkASSERT(target);
+
+ SkAutoAsciiToLC tolc(target, len);
+
+ return SkStrSearch(base, count, tolc.lc(), len, elemSize);
+}
+
+int SkStrLCSearch(const char*const* base, int count, const char target[],
+ size_t elemSize)
+{
+ return SkStrLCSearch(base, count, target, strlen(target), elemSize);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+SkAutoAsciiToLC::SkAutoAsciiToLC(const char str[], size_t len)
+{
+ // see if we need to compute the length
+ if ((long)len < 0) {
+ len = strlen(str);
+ }
+ fLength = len;
+
+ // assign lc to our preallocated storage if len is small enough, or allocate
+ // it on the heap
+ char* lc;
+ if (len <= STORAGE) {
+ lc = fStorage;
+ } else {
+ lc = (char*)sk_malloc_throw(len + 1);
+ }
+ fLC = lc;
+
+ // convert any asii to lower-case. we let non-ascii (utf8) chars pass
+ // through unchanged
+ for (int i = (int)(len - 1); i >= 0; --i) {
+ int c = str[i];
+ if ((c & 0x80) == 0) { // is just ascii
+ c = tolower(c);
+ }
+ lc[i] = c;
+ }
+ lc[len] = 0;
+}
+
+SkAutoAsciiToLC::~SkAutoAsciiToLC()
+{
+ if (fLC != fStorage) {
+ sk_free(fLC);
+ }
+}
diff --git a/gfx/skia/skia/src/base/SkTSearch.h b/gfx/skia/skia/src/base/SkTSearch.h
new file mode 100644
index 0000000000..6ebd304029
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkTSearch.h
@@ -0,0 +1,132 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkTSearch_DEFINED
+#define SkTSearch_DEFINED
+
+#include "include/private/base/SkAssert.h"
+
+#include <cstddef>
+
+/**
+ * All of the SkTSearch variants want to return the index (0...N-1) of the
+ * found element, or the bit-not of where to insert the element.
+ *
+ * At a simple level, if the return value is negative, it was not found.
+ *
+ * For clients that want to insert the new element if it was not found, use
+ * the following logic:
+ *
+ * int index = SkTSearch(...);
+ * if (index >= 0) {
+ * // found at index
+ * } else {
+ * index = ~index; // now we are positive
+ * // insert at index
+ * }
+ */
+
+
+// The most general form of SkTSearch takes an array of T and a key of type K. A functor, less, is
+// used to perform comparisons. It has two function operators:
+// bool operator() (const T& t, const K& k)
+// bool operator() (const K& t, const T& k)
+template <typename T, typename K, typename LESS>
+int SkTSearch(const T base[], int count, const K& key, size_t elemSize, const LESS& less)
+{
+ SkASSERT(count >= 0);
+ if (count <= 0) {
+ return ~0;
+ }
+
+ SkASSERT(base != nullptr); // base may be nullptr if count is zero
+
+ int lo = 0;
+ int hi = count - 1;
+
+ while (lo < hi) {
+ int mid = lo + ((hi - lo) >> 1);
+ const T* elem = (const T*)((const char*)base + mid * elemSize);
+
+ if (less(*elem, key))
+ lo = mid + 1;
+ else
+ hi = mid;
+ }
+
+ const T* elem = (const T*)((const char*)base + hi * elemSize);
+ if (less(*elem, key)) {
+ hi += 1;
+ hi = ~hi;
+ } else if (less(key, *elem)) {
+ hi = ~hi;
+ }
+ return hi;
+}
+
+// Specialization for case when T==K and the caller wants to use a function rather than functor.
+template <typename T, bool (LESS)(const T&, const T&)>
+int SkTSearch(const T base[], int count, const T& target, size_t elemSize) {
+ return SkTSearch(base, count, target, elemSize,
+ [](const T& a, const T& b) { return LESS(a, b); });
+}
+
+// Specialization for T==K, compare using op <.
+template <typename T>
+int SkTSearch(const T base[], int count, const T& target, size_t elemSize) {
+ return SkTSearch(base, count, target, elemSize, [](const T& a, const T& b) { return a < b; });
+}
+
+// Specialization for case where domain is an array of T* and the key value is a T*, and you want
+// to compare the T objects, not the pointers.
+template <typename T, bool (LESS)(const T&, const T&)>
+int SkTSearch(T* base[], int count, T* target, size_t elemSize) {
+ return SkTSearch(base, count, target, elemSize,
+ [](const T* t, const T* k) { return LESS(*t, *k); });
+}
+
+int SkStrSearch(const char*const* base, int count, const char target[],
+ size_t target_len, size_t elemSize);
+int SkStrSearch(const char*const* base, int count, const char target[],
+ size_t elemSize);
+
+/** Like SkStrSearch, but treats target as if it were all lower-case. Assumes that
+ base points to a table of lower-case strings.
+*/
+int SkStrLCSearch(const char*const* base, int count, const char target[],
+ size_t target_len, size_t elemSize);
+int SkStrLCSearch(const char*const* base, int count, const char target[],
+ size_t elemSize);
+
+/** Helper class to convert a string to lower-case, but only modifying the ascii
+ characters. This makes the routine very fast and never changes the string
+ length, but it is not suitable for linguistic purposes. Normally this is
+ used for buiding and searching string tables.
+*/
+class SkAutoAsciiToLC {
+public:
+ SkAutoAsciiToLC(const char str[], size_t len = (size_t)-1);
+ ~SkAutoAsciiToLC();
+
+ const char* lc() const { return fLC; }
+ size_t length() const { return fLength; }
+
+private:
+ char* fLC; // points to either the heap or fStorage
+ size_t fLength;
+ enum {
+ STORAGE = 64
+ };
+ char fStorage[STORAGE+1];
+};
+
+// Helper when calling qsort with a compare proc that has typed its arguments
+#define SkCastForQSort(compare) reinterpret_cast<int (*)(const void*, const void*)>(compare)
+
+#endif
diff --git a/gfx/skia/skia/src/base/SkTSort.h b/gfx/skia/skia/src/base/SkTSort.h
new file mode 100644
index 0000000000..a1d35cc158
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkTSort.h
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTSort_DEFINED
+#define SkTSort_DEFINED
+
+#include "include/private/base/SkTo.h"
+#include "src/base/SkMathPriv.h"
+
+#include <cstddef>
+#include <utility>
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* Sifts a broken heap. The input array is a heap from root to bottom
+ * except that the root entry may be out of place.
+ *
+ * Sinks a hole from array[root] to leaf and then sifts the original array[root] element
+ * from the leaf level up.
+ *
+ * This version does extra work, in that it copies child to parent on the way down,
+ * then copies parent to child on the way back up. When copies are inexpensive,
+ * this is an optimization as this sift variant should only be used when
+ * the potentially out of place root entry value is expected to be small.
+ *
+ * @param root the one based index into array of the out-of-place root of the heap.
+ * @param bottom the one based index in the array of the last entry in the heap.
+ */
+template <typename T, typename C>
+void SkTHeapSort_SiftUp(T array[], size_t root, size_t bottom, const C& lessThan) {
+ T x = array[root-1];
+ size_t start = root;
+ size_t j = root << 1;
+ while (j <= bottom) {
+ if (j < bottom && lessThan(array[j-1], array[j])) {
+ ++j;
+ }
+ array[root-1] = array[j-1];
+ root = j;
+ j = root << 1;
+ }
+ j = root >> 1;
+ while (j >= start) {
+ if (lessThan(array[j-1], x)) {
+ array[root-1] = array[j-1];
+ root = j;
+ j = root >> 1;
+ } else {
+ break;
+ }
+ }
+ array[root-1] = x;
+}
+
+/* Sifts a broken heap. The input array is a heap from root to bottom
+ * except that the root entry may be out of place.
+ *
+ * Sifts the array[root] element from the root down.
+ *
+ * @param root the one based index into array of the out-of-place root of the heap.
+ * @param bottom the one based index in the array of the last entry in the heap.
+ */
+template <typename T, typename C>
+void SkTHeapSort_SiftDown(T array[], size_t root, size_t bottom, const C& lessThan) {
+ T x = array[root-1];
+ size_t child = root << 1;
+ while (child <= bottom) {
+ if (child < bottom && lessThan(array[child-1], array[child])) {
+ ++child;
+ }
+ if (lessThan(x, array[child-1])) {
+ array[root-1] = array[child-1];
+ root = child;
+ child = root << 1;
+ } else {
+ break;
+ }
+ }
+ array[root-1] = x;
+}
+
+/** Sorts the array of size count using comparator lessThan using a Heap Sort algorithm. Be sure to
+ * specialize swap if T has an efficient swap operation.
+ *
+ * @param array the array to be sorted.
+ * @param count the number of elements in the array.
+ * @param lessThan a functor with bool operator()(T a, T b) which returns true if a comes before b.
+ */
+template <typename T, typename C> void SkTHeapSort(T array[], size_t count, const C& lessThan) {
+ for (size_t i = count >> 1; i > 0; --i) {
+ SkTHeapSort_SiftDown(array, i, count, lessThan);
+ }
+
+ for (size_t i = count - 1; i > 0; --i) {
+ using std::swap;
+ swap(array[0], array[i]);
+ SkTHeapSort_SiftUp(array, 1, i, lessThan);
+ }
+}
+
+/** Sorts the array of size count using comparator '<' using a Heap Sort algorithm. */
+template <typename T> void SkTHeapSort(T array[], size_t count) {
+ SkTHeapSort(array, count, [](const T& a, const T& b) { return a < b; });
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** Sorts the array of size count using comparator lessThan using an Insertion Sort algorithm. */
+template <typename T, typename C>
+void SkTInsertionSort(T* left, int count, const C& lessThan) {
+ T* right = left + count - 1;
+ for (T* next = left + 1; next <= right; ++next) {
+ if (!lessThan(*next, *(next - 1))) {
+ continue;
+ }
+ T insert = std::move(*next);
+ T* hole = next;
+ do {
+ *hole = std::move(*(hole - 1));
+ --hole;
+ } while (left < hole && lessThan(insert, *(hole - 1)));
+ *hole = std::move(insert);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+template <typename T, typename C>
+T* SkTQSort_Partition(T* left, int count, T* pivot, const C& lessThan) {
+ T* right = left + count - 1;
+ using std::swap;
+ T pivotValue = *pivot;
+ swap(*pivot, *right);
+ T* newPivot = left;
+ while (left < right) {
+ if (lessThan(*left, pivotValue)) {
+ swap(*left, *newPivot);
+ newPivot += 1;
+ }
+ left += 1;
+ }
+ swap(*newPivot, *right);
+ return newPivot;
+}
+
+/* Introsort is a modified Quicksort.
+ * When the region to be sorted is a small constant size, it uses Insertion Sort.
+ * When depth becomes zero, it switches over to Heap Sort.
+ * This implementation recurses on the left region after pivoting and loops on the right,
+ * we already limit the stack depth by switching to heap sort,
+ * and cache locality on the data appears more important than saving a few stack frames.
+ *
+ * @param depth at this recursion depth, switch to Heap Sort.
+ * @param left points to the beginning of the region to be sorted
+ * @param count number of items to be sorted
+ * @param lessThan a functor/lambda which returns true if a comes before b.
+ */
+template <typename T, typename C>
+void SkTIntroSort(int depth, T* left, int count, const C& lessThan) {
+ for (;;) {
+ if (count <= 32) {
+ SkTInsertionSort(left, count, lessThan);
+ return;
+ }
+
+ if (depth == 0) {
+ SkTHeapSort<T>(left, count, lessThan);
+ return;
+ }
+ --depth;
+
+ T* middle = left + ((count - 1) >> 1);
+ T* pivot = SkTQSort_Partition(left, count, middle, lessThan);
+ int pivotCount = pivot - left;
+
+ SkTIntroSort(depth, left, pivotCount, lessThan);
+ left += pivotCount + 1;
+ count -= pivotCount + 1;
+ }
+}
+
+/** Sorts the region from left to right using comparator lessThan using Introsort.
+ * Be sure to specialize `swap` if T has an efficient swap operation.
+ *
+ * @param begin points to the beginning of the region to be sorted
+ * @param end points past the end of the region to be sorted
+ * @param lessThan a functor/lambda which returns true if a comes before b.
+ */
+template <typename T, typename C>
+void SkTQSort(T* begin, T* end, const C& lessThan) {
+ int n = SkToInt(end - begin);
+ if (n <= 1) {
+ return;
+ }
+ // Limit Introsort recursion depth to no more than 2 * ceil(log2(n-1)).
+ int depth = 2 * SkNextLog2(n - 1);
+ SkTIntroSort(depth, begin, n, lessThan);
+}
+
+/** Sorts the region from left to right using comparator 'a < b' using Introsort. */
+template <typename T> void SkTQSort(T* begin, T* end) {
+ SkTQSort(begin, end, [](const T& a, const T& b) { return a < b; });
+}
+
+/** Sorts the region from left to right using comparator '*a < *b' using Introsort. */
+template <typename T> void SkTQSort(T** begin, T** end) {
+ SkTQSort(begin, end, [](const T* a, const T* b) { return *a < *b; });
+}
+
+#endif
diff --git a/gfx/skia/skia/src/base/SkThreadID.cpp b/gfx/skia/skia/src/base/SkThreadID.cpp
new file mode 100644
index 0000000000..e5b7a06c7c
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkThreadID.cpp
@@ -0,0 +1,16 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/base/SkThreadID.h"
+
+#ifdef SK_BUILD_FOR_WIN
+ #include "src/base/SkLeanWindows.h"
+ SkThreadID SkGetThreadID() { return GetCurrentThreadId(); }
+#else
+ #include <pthread.h>
+ SkThreadID SkGetThreadID() { return (int64_t)pthread_self(); }
+#endif
diff --git a/gfx/skia/skia/src/base/SkUTF.cpp b/gfx/skia/skia/src/base/SkUTF.cpp
new file mode 100644
index 0000000000..20325fb2b6
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkUTF.cpp
@@ -0,0 +1,316 @@
+// Copyright 2018 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+
+#include "src/base/SkUTF.h"
+
+#include "include/private/base/SkTFitsIn.h"
+
+static constexpr inline int32_t left_shift(int32_t value, int32_t shift) {
+ return (int32_t) ((uint32_t) value << shift);
+}
+
+template <typename T> static constexpr bool is_align2(T x) { return 0 == (x & 1); }
+
+template <typename T> static constexpr bool is_align4(T x) { return 0 == (x & 3); }
+
+static constexpr inline bool utf16_is_high_surrogate(uint16_t c) { return (c & 0xFC00) == 0xD800; }
+
+static constexpr inline bool utf16_is_low_surrogate(uint16_t c) { return (c & 0xFC00) == 0xDC00; }
+
+/** @returns -1 iff invalid UTF8 byte,
+ 0 iff UTF8 continuation byte,
+ 1 iff ASCII byte,
+ 2 iff leading byte of 2-byte sequence,
+ 3 iff leading byte of 3-byte sequence, and
+ 4 iff leading byte of 4-byte sequence.
+ I.e.: if return value > 0, then gives length of sequence.
+*/
+static int utf8_byte_type(uint8_t c) {
+ if (c < 0x80) {
+ return 1;
+ } else if (c < 0xC0) {
+ return 0;
+ } else if (c >= 0xF5 || (c & 0xFE) == 0xC0) { // "octet values c0, c1, f5 to ff never appear"
+ return -1;
+ } else {
+ int value = (((0xe5 << 24) >> ((unsigned)c >> 4 << 1)) & 3) + 1;
+ // assert(value >= 2 && value <=4);
+ return value;
+ }
+}
+static bool utf8_type_is_valid_leading_byte(int type) { return type > 0; }
+
+static bool utf8_byte_is_continuation(uint8_t c) { return utf8_byte_type(c) == 0; }
+
+////////////////////////////////////////////////////////////////////////////////
+
+int SkUTF::CountUTF8(const char* utf8, size_t byteLength) {
+ if (!utf8 && byteLength) {
+ return -1;
+ }
+ int count = 0;
+ const char* stop = utf8 + byteLength;
+ while (utf8 < stop) {
+ int type = utf8_byte_type(*(const uint8_t*)utf8);
+ if (!utf8_type_is_valid_leading_byte(type) || utf8 + type > stop) {
+ return -1; // Sequence extends beyond end.
+ }
+ while(type-- > 1) {
+ ++utf8;
+ if (!utf8_byte_is_continuation(*(const uint8_t*)utf8)) {
+ return -1;
+ }
+ }
+ ++utf8;
+ ++count;
+ }
+ return count;
+}
+
+int SkUTF::CountUTF16(const uint16_t* utf16, size_t byteLength) {
+ if (!utf16 || !is_align2(intptr_t(utf16)) || !is_align2(byteLength)) {
+ return -1;
+ }
+ const uint16_t* src = (const uint16_t*)utf16;
+ const uint16_t* stop = src + (byteLength >> 1);
+ int count = 0;
+ while (src < stop) {
+ unsigned c = *src++;
+ if (utf16_is_low_surrogate(c)) {
+ return -1;
+ }
+ if (utf16_is_high_surrogate(c)) {
+ if (src >= stop) {
+ return -1;
+ }
+ c = *src++;
+ if (!utf16_is_low_surrogate(c)) {
+ return -1;
+ }
+ }
+ count += 1;
+ }
+ return count;
+}
+
+int SkUTF::CountUTF32(const int32_t* utf32, size_t byteLength) {
+ if (!is_align4(intptr_t(utf32)) || !is_align4(byteLength) || !SkTFitsIn<int>(byteLength >> 2)) {
+ return -1;
+ }
+ const uint32_t kInvalidUnicharMask = 0xFF000000; // unichar fits in 24 bits
+ const uint32_t* ptr = (const uint32_t*)utf32;
+ const uint32_t* stop = ptr + (byteLength >> 2);
+ while (ptr < stop) {
+ if (*ptr & kInvalidUnicharMask) {
+ return -1;
+ }
+ ptr += 1;
+ }
+ return (int)(byteLength >> 2);
+}
+
+template <typename T>
+static SkUnichar next_fail(const T** ptr, const T* end) {
+ *ptr = end;
+ return -1;
+}
+
+SkUnichar SkUTF::NextUTF8(const char** ptr, const char* end) {
+ if (!ptr || !end ) {
+ return -1;
+ }
+ const uint8_t* p = (const uint8_t*)*ptr;
+ if (!p || p >= (const uint8_t*)end) {
+ return next_fail(ptr, end);
+ }
+ int c = *p;
+ int hic = c << 24;
+
+ if (!utf8_type_is_valid_leading_byte(utf8_byte_type(c))) {
+ return next_fail(ptr, end);
+ }
+ if (hic < 0) {
+ uint32_t mask = (uint32_t)~0x3F;
+ hic = left_shift(hic, 1);
+ do {
+ ++p;
+ if (p >= (const uint8_t*)end) {
+ return next_fail(ptr, end);
+ }
+ // check before reading off end of array.
+ uint8_t nextByte = *p;
+ if (!utf8_byte_is_continuation(nextByte)) {
+ return next_fail(ptr, end);
+ }
+ c = (c << 6) | (nextByte & 0x3F);
+ mask <<= 5;
+ } while ((hic = left_shift(hic, 1)) < 0);
+ c &= ~mask;
+ }
+ *ptr = (char*)p + 1;
+ return c;
+}
+
+SkUnichar SkUTF::NextUTF16(const uint16_t** ptr, const uint16_t* end) {
+ if (!ptr || !end ) {
+ return -1;
+ }
+ const uint16_t* src = *ptr;
+ if (!src || src + 1 > end || !is_align2(intptr_t(src))) {
+ return next_fail(ptr, end);
+ }
+ uint16_t c = *src++;
+ SkUnichar result = c;
+ if (utf16_is_low_surrogate(c)) {
+ return next_fail(ptr, end); // srcPtr should never point at low surrogate.
+ }
+ if (utf16_is_high_surrogate(c)) {
+ if (src + 1 > end) {
+ return next_fail(ptr, end); // Truncated string.
+ }
+ uint16_t low = *src++;
+ if (!utf16_is_low_surrogate(low)) {
+ return next_fail(ptr, end);
+ }
+ /*
+ [paraphrased from wikipedia]
+ Take the high surrogate and subtract 0xD800, then multiply by 0x400.
+ Take the low surrogate and subtract 0xDC00. Add these two results
+ together, and finally add 0x10000 to get the final decoded codepoint.
+
+ unicode = (high - 0xD800) * 0x400 + low - 0xDC00 + 0x10000
+ unicode = (high * 0x400) - (0xD800 * 0x400) + low - 0xDC00 + 0x10000
+ unicode = (high << 10) - (0xD800 << 10) + low - 0xDC00 + 0x10000
+ unicode = (high << 10) + low - ((0xD800 << 10) + 0xDC00 - 0x10000)
+ */
+ result = (result << 10) + (SkUnichar)low - ((0xD800 << 10) + 0xDC00 - 0x10000);
+ }
+ *ptr = src;
+ return result;
+}
+
+SkUnichar SkUTF::NextUTF32(const int32_t** ptr, const int32_t* end) {
+ if (!ptr || !end ) {
+ return -1;
+ }
+ const int32_t* s = *ptr;
+ if (!s || s + 1 > end || !is_align4(intptr_t(s))) {
+ return next_fail(ptr, end);
+ }
+ int32_t value = *s;
+ const uint32_t kInvalidUnicharMask = 0xFF000000; // unichar fits in 24 bits
+ if (value & kInvalidUnicharMask) {
+ return next_fail(ptr, end);
+ }
+ *ptr = s + 1;
+ return value;
+}
+
+size_t SkUTF::ToUTF8(SkUnichar uni, char utf8[SkUTF::kMaxBytesInUTF8Sequence]) {
+ if ((uint32_t)uni > 0x10FFFF) {
+ return 0;
+ }
+ if (uni <= 127) {
+ if (utf8) {
+ *utf8 = (char)uni;
+ }
+ return 1;
+ }
+ char tmp[4];
+ char* p = tmp;
+ size_t count = 1;
+ while (uni > 0x7F >> count) {
+ *p++ = (char)(0x80 | (uni & 0x3F));
+ uni >>= 6;
+ count += 1;
+ }
+ if (utf8) {
+ p = tmp;
+ utf8 += count;
+ while (p < tmp + count - 1) {
+ *--utf8 = *p++;
+ }
+ *--utf8 = (char)(~(0xFF >> count) | uni);
+ }
+ return count;
+}
+
+size_t SkUTF::ToUTF16(SkUnichar uni, uint16_t utf16[2]) {
+ if ((uint32_t)uni > 0x10FFFF) {
+ return 0;
+ }
+ int extra = (uni > 0xFFFF);
+ if (utf16) {
+ if (extra) {
+ utf16[0] = (uint16_t)((0xD800 - 64) + (uni >> 10));
+ utf16[1] = (uint16_t)(0xDC00 | (uni & 0x3FF));
+ } else {
+ utf16[0] = (uint16_t)uni;
+ }
+ }
+ return 1 + extra;
+}
+
+int SkUTF::UTF8ToUTF16(uint16_t dst[], int dstCapacity, const char src[], size_t srcByteLength) {
+ if (!dst) {
+ dstCapacity = 0;
+ }
+
+ int dstLength = 0;
+ uint16_t* endDst = dst + dstCapacity;
+ const char* endSrc = src + srcByteLength;
+ while (src < endSrc) {
+ SkUnichar uni = NextUTF8(&src, endSrc);
+ if (uni < 0) {
+ return -1;
+ }
+
+ uint16_t utf16[2];
+ size_t count = ToUTF16(uni, utf16);
+ if (count == 0) {
+ return -1;
+ }
+ dstLength += count;
+
+ if (dst) {
+ uint16_t* elems = utf16;
+ while (dst < endDst && count > 0) {
+ *dst++ = *elems++;
+ count -= 1;
+ }
+ }
+ }
+ return dstLength;
+}
+
+int SkUTF::UTF16ToUTF8(char dst[], int dstCapacity, const uint16_t src[], size_t srcLength) {
+ if (!dst) {
+ dstCapacity = 0;
+ }
+
+ int dstLength = 0;
+ const char* endDst = dst + dstCapacity;
+ const uint16_t* endSrc = src + srcLength;
+ while (src < endSrc) {
+ SkUnichar uni = NextUTF16(&src, endSrc);
+ if (uni < 0) {
+ return -1;
+ }
+
+ char utf8[SkUTF::kMaxBytesInUTF8Sequence];
+ size_t count = ToUTF8(uni, utf8);
+ if (count == 0) {
+ return -1;
+ }
+ dstLength += count;
+
+ if (dst) {
+ const char* elems = utf8;
+ while (dst < endDst && count > 0) {
+ *dst++ = *elems++;
+ count -= 1;
+ }
+ }
+ }
+ return dstLength;
+}
diff --git a/gfx/skia/skia/src/base/SkUTF.h b/gfx/skia/skia/src/base/SkUTF.h
new file mode 100644
index 0000000000..e50804da98
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkUTF.h
@@ -0,0 +1,95 @@
+// Copyright 2018 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+#ifndef SkUTF_DEFINED
+#define SkUTF_DEFINED
+
+#include "include/private/base/SkAPI.h"
+
+#include <cstddef>
+#include <cstdint>
+
+typedef int32_t SkUnichar;
+
+namespace SkUTF {
+
+/** Given a sequence of UTF-8 bytes, return the number of unicode codepoints.
+ If the sequence is invalid UTF-8, return -1.
+*/
+SK_SPI int CountUTF8(const char* utf8, size_t byteLength);
+
+/** Given a sequence of aligned UTF-16 characters in machine-endian form,
+ return the number of unicode codepoints. If the sequence is invalid
+ UTF-16, return -1.
+*/
+SK_SPI int CountUTF16(const uint16_t* utf16, size_t byteLength);
+
+/** Given a sequence of aligned UTF-32 characters in machine-endian form,
+ return the number of unicode codepoints. If the sequence is invalid
+ UTF-32, return -1.
+*/
+SK_SPI int CountUTF32(const int32_t* utf32, size_t byteLength);
+
+/** Given a sequence of UTF-8 bytes, return the first unicode codepoint.
+ The pointer will be incremented to point at the next codepoint's start. If
+ invalid UTF-8 is encountered, set *ptr to end and return -1.
+*/
+SK_SPI SkUnichar NextUTF8(const char** ptr, const char* end);
+
+/** Given a sequence of aligned UTF-16 characters in machine-endian form,
+ return the first unicode codepoint. The pointer will be incremented to
+ point at the next codepoint's start. If invalid UTF-16 is encountered,
+ set *ptr to end and return -1.
+*/
+SK_SPI SkUnichar NextUTF16(const uint16_t** ptr, const uint16_t* end);
+
+/** Given a sequence of aligned UTF-32 characters in machine-endian form,
+ return the first unicode codepoint. The pointer will be incremented to
+ point at the next codepoint's start. If invalid UTF-32 is encountered,
+ set *ptr to end and return -1.
+*/
+SK_SPI SkUnichar NextUTF32(const int32_t** ptr, const int32_t* end);
+
+constexpr unsigned kMaxBytesInUTF8Sequence = 4;
+
+/** Convert the unicode codepoint into UTF-8. If `utf8` is non-null, place the
+ result in that array. Return the number of bytes in the result. If `utf8`
+ is null, simply return the number of bytes that would be used. For invalid
+ unicode codepoints, return 0.
+*/
+SK_SPI size_t ToUTF8(SkUnichar uni, char utf8[kMaxBytesInUTF8Sequence] = nullptr);
+
+/** Convert the unicode codepoint into UTF-16. If `utf16` is non-null, place
+ the result in that array. Return the number of UTF-16 code units in the
+ result (1 or 2). If `utf16` is null, simply return the number of code
+ units that would be used. For invalid unicode codepoints, return 0.
+*/
+SK_SPI size_t ToUTF16(SkUnichar uni, uint16_t utf16[2] = nullptr);
+
+/** Returns the number of resulting UTF16 values needed to convert the src utf8 sequence.
+ * If dst is not null, it is filled with the corresponding values up to its capacity.
+ * If there is an error, -1 is returned and the dst[] buffer is undefined.
+ */
+SK_SPI int UTF8ToUTF16(uint16_t dst[], int dstCapacity, const char src[], size_t srcByteLength);
+
+/** Returns the number of resulting UTF8 values needed to convert the src utf16 sequence.
+ * If dst is not null, it is filled with the corresponding values up to its capacity.
+ * If there is an error, -1 is returned and the dst[] buffer is undefined.
+ */
+SK_SPI int UTF16ToUTF8(char dst[], int dstCapacity, const uint16_t src[], size_t srcLength);
+
+/**
+ * Given a UTF-16 code point, returns true iff it is a leading surrogate.
+ * https://unicode.org/faq/utf_bom.html#utf16-2
+ */
+static inline bool IsLeadingSurrogateUTF16(uint16_t c) { return ((c) & 0xFC00) == 0xD800; }
+
+/**
+ * Given a UTF-16 code point, returns true iff it is a trailing surrogate.
+ * https://unicode.org/faq/utf_bom.html#utf16-2
+ */
+static inline bool IsTrailingSurrogateUTF16(uint16_t c) { return ((c) & 0xFC00) == 0xDC00; }
+
+
+} // namespace SkUTF
+
+#endif // SkUTF_DEFINED
diff --git a/gfx/skia/skia/src/base/SkUtils.cpp b/gfx/skia/skia/src/base/SkUtils.cpp
new file mode 100644
index 0000000000..b9852e9389
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkUtils.cpp
@@ -0,0 +1,13 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/base/SkUtils.h"
+
+const char SkHexadecimalDigits::gUpper[16] =
+ { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' };
+const char SkHexadecimalDigits::gLower[16] =
+ { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
diff --git a/gfx/skia/skia/src/base/SkUtils.h b/gfx/skia/skia/src/base/SkUtils.h
new file mode 100644
index 0000000000..ae2331dfca
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkUtils.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkUtils_DEFINED
+#define SkUtils_DEFINED
+
+#include "include/private/base/SkAttributes.h"
+
+#include <cstring>
+#include <type_traits> // is_trivially_copyable
+
+namespace SkHexadecimalDigits {
+ extern const char gUpper[16]; // 0-9A-F
+ extern const char gLower[16]; // 0-9a-f
+} // namespace SkHexadecimalDigits
+
+///////////////////////////////////////////////////////////////////////////////
+
+// If T is an 8-byte GCC or Clang vector extension type, it would naturally
+// pass or return in the MMX mm0 register on 32-bit x86 builds. This has the
+// fun side effect of clobbering any state in the x87 st0 register. (There is
+// no ABI governing who should preserve mm?/st? registers, so no one does!)
+//
+// We force-inline sk_unaligned_load() and sk_unaligned_store() to avoid that,
+// making them safe to use for all types on all platforms, thus solving the
+// problem once and for all!
+
+template <typename T, typename P>
+static SK_ALWAYS_INLINE T sk_unaligned_load(const P* ptr) {
+ static_assert(std::is_trivially_copyable<T>::value);
+ static_assert(std::is_trivially_copyable<P>::value);
+ T val;
+ memcpy(&val, ptr, sizeof(val));
+ return val;
+}
+
+template <typename T, typename P>
+static SK_ALWAYS_INLINE void sk_unaligned_store(P* ptr, T val) {
+ static_assert(std::is_trivially_copyable<T>::value);
+ static_assert(std::is_trivially_copyable<P>::value);
+ memcpy(ptr, &val, sizeof(val));
+}
+
+// Copy the bytes from src into an instance of type Dst and return it.
+template <typename Dst, typename Src>
+static SK_ALWAYS_INLINE Dst sk_bit_cast(const Src& src) {
+ static_assert(sizeof(Dst) == sizeof(Src));
+ return sk_unaligned_load<Dst>(&src);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/base/SkVx.h b/gfx/skia/skia/src/base/SkVx.h
new file mode 100644
index 0000000000..a1731ad0c4
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkVx.h
@@ -0,0 +1,1183 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKVX_DEFINED
+#define SKVX_DEFINED
+
+// skvx::Vec<N,T> are SIMD vectors of N T's, a v1.5 successor to SkNx<N,T>.
+//
+// This time we're leaning a bit less on platform-specific intrinsics and a bit
+// more on Clang/GCC vector extensions, but still keeping the option open to
+// drop in platform-specific intrinsics, actually more easily than before.
+//
+// We've also fixed a few of the caveats that used to make SkNx awkward to work
+// with across translation units. skvx::Vec<N,T> always has N*sizeof(T) size
+// and alignment and is safe to use across translation units freely.
+// (Ideally we'd only align to T, but that tanks ARMv7 NEON codegen.)
+
+// Please try to keep this file independent of Skia headers.
+#include <algorithm> // std::min, std::max
+#include <cassert> // assert()
+#include <cmath> // ceilf, floorf, truncf, roundf, sqrtf, etc.
+#include <cstdint> // intXX_t
+#include <cstring> // memcpy()
+#include <initializer_list> // std::initializer_list
+#include <type_traits>
+#include <utility> // std::index_sequence
+
+// Users may disable SIMD with SKNX_NO_SIMD, which may be set via compiler flags.
+// The gn build has no option which sets SKNX_NO_SIMD.
+// Use SKVX_USE_SIMD internally to avoid confusing double negation.
+// Do not use 'defined' in a macro expansion.
+#if !defined(SKNX_NO_SIMD)
+ #define SKVX_USE_SIMD 1
+#else
+ #define SKVX_USE_SIMD 0
+#endif
+
+#if SKVX_USE_SIMD
+ #if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__)
+ #include <immintrin.h>
+ #elif defined(__ARM_NEON)
+ #include <arm_neon.h>
+ #elif defined(__wasm_simd128__)
+ #include <wasm_simd128.h>
+ #endif
+#endif
+
+// To avoid ODR violations, all methods must be force-inlined...
+#if defined(_MSC_VER)
+ #define SKVX_ALWAYS_INLINE __forceinline
+#else
+ #define SKVX_ALWAYS_INLINE __attribute__((always_inline))
+#endif
+
+// ... and all standalone functions must be static. Please use these helpers:
+#define SI static inline
+#define SIT template < typename T> SI
+#define SIN template <int N > SI
+#define SINT template <int N, typename T> SI
+#define SINTU template <int N, typename T, typename U, \
+ typename=std::enable_if_t<std::is_convertible<U,T>::value>> SI
+
+namespace skvx {
+
+template <int N, typename T>
+struct alignas(N*sizeof(T)) Vec;
+
+template <int... Ix, int N, typename T>
+SI Vec<sizeof...(Ix),T> shuffle(const Vec<N,T>&);
+
+template <typename D, typename S>
+SI D bit_pun(const S& s) {
+ static_assert(sizeof(D) == sizeof(S));
+ D d;
+ memcpy(&d, &s, sizeof(D));
+ return d;
+}
+
+// All Vec have the same simple memory layout, the same as `T vec[N]`.
+template <int N, typename T>
+struct alignas(N*sizeof(T)) VecStorage {
+ SKVX_ALWAYS_INLINE VecStorage() = default;
+ SKVX_ALWAYS_INLINE VecStorage(T s) : lo(s), hi(s) {}
+
+ Vec<N/2,T> lo, hi;
+};
+
+template <typename T>
+struct VecStorage<4,T> {
+ SKVX_ALWAYS_INLINE VecStorage() = default;
+ SKVX_ALWAYS_INLINE VecStorage(T s) : lo(s), hi(s) {}
+ SKVX_ALWAYS_INLINE VecStorage(T x, T y, T z, T w) : lo(x,y), hi(z, w) {}
+ SKVX_ALWAYS_INLINE VecStorage(Vec<2,T> xy, T z, T w) : lo(xy), hi(z,w) {}
+ SKVX_ALWAYS_INLINE VecStorage(T x, T y, Vec<2,T> zw) : lo(x,y), hi(zw) {}
+ SKVX_ALWAYS_INLINE VecStorage(Vec<2,T> xy, Vec<2,T> zw) : lo(xy), hi(zw) {}
+
+ SKVX_ALWAYS_INLINE Vec<2,T>& xy() { return lo; }
+ SKVX_ALWAYS_INLINE Vec<2,T>& zw() { return hi; }
+ SKVX_ALWAYS_INLINE T& x() { return lo.lo.val; }
+ SKVX_ALWAYS_INLINE T& y() { return lo.hi.val; }
+ SKVX_ALWAYS_INLINE T& z() { return hi.lo.val; }
+ SKVX_ALWAYS_INLINE T& w() { return hi.hi.val; }
+
+ SKVX_ALWAYS_INLINE Vec<2,T> xy() const { return lo; }
+ SKVX_ALWAYS_INLINE Vec<2,T> zw() const { return hi; }
+ SKVX_ALWAYS_INLINE T x() const { return lo.lo.val; }
+ SKVX_ALWAYS_INLINE T y() const { return lo.hi.val; }
+ SKVX_ALWAYS_INLINE T z() const { return hi.lo.val; }
+ SKVX_ALWAYS_INLINE T w() const { return hi.hi.val; }
+
+ // Exchange-based swizzles. These should take 1 cycle on NEON and 3 (pipelined) cycles on SSE.
+ SKVX_ALWAYS_INLINE Vec<4,T> yxwz() const { return shuffle<1,0,3,2>(bit_pun<Vec<4,T>>(*this)); }
+ SKVX_ALWAYS_INLINE Vec<4,T> zwxy() const { return shuffle<2,3,0,1>(bit_pun<Vec<4,T>>(*this)); }
+
+ Vec<2,T> lo, hi;
+};
+
+template <typename T>
+struct VecStorage<2,T> {
+ SKVX_ALWAYS_INLINE VecStorage() = default;
+ SKVX_ALWAYS_INLINE VecStorage(T s) : lo(s), hi(s) {}
+ SKVX_ALWAYS_INLINE VecStorage(T x, T y) : lo(x), hi(y) {}
+
+ SKVX_ALWAYS_INLINE T& x() { return lo.val; }
+ SKVX_ALWAYS_INLINE T& y() { return hi.val; }
+
+ SKVX_ALWAYS_INLINE T x() const { return lo.val; }
+ SKVX_ALWAYS_INLINE T y() const { return hi.val; }
+
+ // This exchange-based swizzle should take 1 cycle on NEON and 3 (pipelined) cycles on SSE.
+ SKVX_ALWAYS_INLINE Vec<2,T> yx() const { return shuffle<1,0>(bit_pun<Vec<2,T>>(*this)); }
+
+ SKVX_ALWAYS_INLINE Vec<4,T> xyxy() const {
+ return Vec<4,T>(bit_pun<Vec<2,T>>(*this), bit_pun<Vec<2,T>>(*this));
+ }
+
+ Vec<1,T> lo, hi;
+};
+
+// Translate from a value type T to its corresponding Mask, the result of a comparison.
+template <typename T> struct Mask { using type = T; };
+template <> struct Mask<float > { using type = int32_t; };
+template <> struct Mask<double> { using type = int64_t; };
+template <typename T> using M = typename Mask<T>::type;
+
+template <int N, typename T>
+struct NoConversion { T vals[N]; };
+
+template <int N, typename T>
+struct ConvertNative {
+ typedef NoConversion<N, T> type;
+};
+
+#if SKVX_USE_SIMD && defined(__SSE__)
+template<>
+struct ConvertNative<4, float> {
+ typedef __m128 type;
+};
+
+template<>
+struct ConvertNative<4, int32_t> {
+ typedef __m128i type;
+};
+
+template <>
+struct ConvertNative<4, uint32_t> {
+ typedef __m128i type;
+};
+
+template<>
+struct ConvertNative<8, int16_t> {
+ typedef __m128i type;
+};
+
+template <>
+struct ConvertNative<8, uint16_t> {
+ typedef __m128i type;
+};
+
+template <>
+struct ConvertNative<16, uint8_t> {
+ typedef __m128i type;
+};
+#endif
+
+#if SKVX_USE_SIMD && defined(__AVX__)
+template<>
+struct ConvertNative<8, float> {
+ typedef __m256 type;
+};
+
+template<>
+struct ConvertNative<8, int32_t> {
+ typedef __m256i type;
+};
+
+template <>
+struct ConvertNative<8, uint32_t> {
+ typedef __m256i type;
+};
+
+template<>
+struct ConvertNative<16, int16_t> {
+ typedef __m256i type;
+};
+
+template <>
+struct ConvertNative<16, uint16_t> {
+ typedef __m256i type;
+};
+#endif
+
+#if SKVX_USE_SIMD && defined(__ARM_NEON)
+template<>
+struct ConvertNative<4, float> {
+ typedef float32x4_t type;
+};
+
+template<>
+struct ConvertNative<4, int32_t> {
+ typedef int32x4_t type;
+};
+
+template <>
+struct ConvertNative<4, uint32_t> {
+ typedef uint32x4_t type;
+};
+
+template<>
+struct ConvertNative<4, int16_t> {
+ typedef int16x4_t type;
+};
+
+template <>
+struct ConvertNative<4, uint16_t> {
+ typedef uint16x4_t type;
+};
+
+template<>
+struct ConvertNative<8, int16_t> {
+ typedef int16x8_t type;
+};
+
+template <>
+struct ConvertNative<8, uint16_t> {
+ typedef uint16x8_t type;
+};
+
+template <>
+struct ConvertNative<8, uint8_t> {
+ typedef uint8x8_t type;
+};
+#endif
+
+template <int N, typename T>
+struct alignas(N*sizeof(T)) Vec : public VecStorage<N,T> {
+ typedef T elem_type;
+
+ static_assert((N & (N-1)) == 0, "N must be a power of 2.");
+ static_assert(sizeof(T) >= alignof(T), "What kind of unusual T is this?");
+
+ // Methods belong here in the class declaration of Vec only if:
+ // - they must be here, like constructors or operator[];
+ // - they'll definitely never want a specialized implementation.
+ // Other operations on Vec should be defined outside the type.
+
+ SKVX_ALWAYS_INLINE Vec() = default;
+ SKVX_ALWAYS_INLINE Vec(typename ConvertNative<N, T>::type native) : Vec(bit_pun<Vec>(native)) {}
+
+ using VecStorage<N,T>::VecStorage;
+
+ // NOTE: Vec{x} produces x000..., whereas Vec(x) produces xxxx.... since this constructor fills
+ // unspecified lanes with 0s, whereas the single T constructor fills all lanes with the value.
+ SKVX_ALWAYS_INLINE Vec(std::initializer_list<T> xs) {
+ T vals[N] = {0};
+ memcpy(vals, xs.begin(), std::min(xs.size(), (size_t)N)*sizeof(T));
+
+ this->lo = Vec<N/2,T>::Load(vals + 0);
+ this->hi = Vec<N/2,T>::Load(vals + N/2);
+ }
+
+ operator typename ConvertNative<N, T>::type() const { return bit_pun<typename ConvertNative<N, T>::type>(*this); }
+
+ SKVX_ALWAYS_INLINE T operator[](int i) const { return i<N/2 ? this->lo[i] : this->hi[i-N/2]; }
+ SKVX_ALWAYS_INLINE T& operator[](int i) { return i<N/2 ? this->lo[i] : this->hi[i-N/2]; }
+
+ SKVX_ALWAYS_INLINE static Vec Load(const void* ptr) {
+ Vec v;
+ memcpy(&v, ptr, sizeof(Vec));
+ return v;
+ }
+ SKVX_ALWAYS_INLINE void store(void* ptr) const {
+ memcpy(ptr, this, sizeof(Vec));
+ }
+};
+
+template <typename T>
+struct Vec<1,T> {
+ typedef T elem_type;
+
+ T val;
+
+ SKVX_ALWAYS_INLINE Vec() = default;
+
+ Vec(T s) : val(s) {}
+
+ SKVX_ALWAYS_INLINE Vec(std::initializer_list<T> xs) : val(xs.size() ? *xs.begin() : 0) {}
+
+ SKVX_ALWAYS_INLINE T operator[](int) const { return val; }
+ SKVX_ALWAYS_INLINE T& operator[](int) { return val; }
+
+ SKVX_ALWAYS_INLINE static Vec Load(const void* ptr) {
+ Vec v;
+ memcpy(&v, ptr, sizeof(Vec));
+ return v;
+ }
+ SKVX_ALWAYS_INLINE void store(void* ptr) const {
+ memcpy(ptr, this, sizeof(Vec));
+ }
+};
+
+// Join two Vec<N,T> into one Vec<2N,T>.
+SINT Vec<2*N,T> join(const Vec<N,T>& lo, const Vec<N,T>& hi) {
+ Vec<2*N,T> v;
+ v.lo = lo;
+ v.hi = hi;
+ return v;
+}
+
+// We have three strategies for implementing Vec operations:
+// 1) lean on Clang/GCC vector extensions when available;
+// 2) use map() to apply a scalar function lane-wise;
+// 3) recurse on lo/hi to scalar portable implementations.
+// We can slot in platform-specific implementations as overloads for particular Vec<N,T>,
+// or often integrate them directly into the recursion of style 3), allowing fine control.
+
+#if SKVX_USE_SIMD && (defined(__clang__) || defined(__GNUC__))
+
+ // VExt<N,T> types have the same size as Vec<N,T> and support most operations directly.
+ #if defined(__clang__)
+ template <int N, typename T>
+ using VExt = T __attribute__((ext_vector_type(N)));
+
+ #elif defined(__GNUC__)
+ template <int N, typename T>
+ struct VExtHelper {
+ typedef T __attribute__((vector_size(N*sizeof(T)))) type;
+ };
+
+ template <int N, typename T>
+ using VExt = typename VExtHelper<N,T>::type;
+
+ // For some reason some (new!) versions of GCC cannot seem to deduce N in the generic
+ // to_vec<N,T>() below for N=4 and T=float. This workaround seems to help...
+ SI Vec<4,float> to_vec(VExt<4,float> v) { return bit_pun<Vec<4,float>>(v); }
+ #endif
+
+ SINT VExt<N,T> to_vext(const Vec<N,T>& v) { return bit_pun<VExt<N,T>>(v); }
+ SINT Vec <N,T> to_vec(const VExt<N,T>& v) { return bit_pun<Vec <N,T>>(v); }
+
+ SINT Vec<N,T> operator+(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return to_vec<N,T>(to_vext(x) + to_vext(y));
+ }
+ SINT Vec<N,T> operator-(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return to_vec<N,T>(to_vext(x) - to_vext(y));
+ }
+ SINT Vec<N,T> operator*(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return to_vec<N,T>(to_vext(x) * to_vext(y));
+ }
+ SINT Vec<N,T> operator/(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return to_vec<N,T>(to_vext(x) / to_vext(y));
+ }
+
+ SINT Vec<N,T> operator^(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return to_vec<N,T>(to_vext(x) ^ to_vext(y));
+ }
+ SINT Vec<N,T> operator&(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return to_vec<N,T>(to_vext(x) & to_vext(y));
+ }
+ SINT Vec<N,T> operator|(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return to_vec<N,T>(to_vext(x) | to_vext(y));
+ }
+ SINT Vec<N,T> operator&&(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return to_vec<N,T>(to_vext(x) & to_vext(y));
+ }
+ SINT Vec<N,T> operator||(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return to_vec<N,T>(to_vext(x) | to_vext(y));
+ }
+
+ SINT Vec<N,T> operator!(const Vec<N,T>& x) { return to_vec<N,T>(!to_vext(x)); }
+ SINT Vec<N,T> operator-(const Vec<N,T>& x) { return to_vec<N,T>(-to_vext(x)); }
+ SINT Vec<N,T> operator~(const Vec<N,T>& x) { return to_vec<N,T>(~to_vext(x)); }
+
+ SINT Vec<N,T> operator<<(const Vec<N,T>& x, int k) { return to_vec<N,T>(to_vext(x) << k); }
+ SINT Vec<N,T> operator>>(const Vec<N,T>& x, int k) { return to_vec<N,T>(to_vext(x) >> k); }
+
+ SINT Vec<N,M<T>> operator==(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return bit_pun<Vec<N,M<T>>>(to_vext(x) == to_vext(y));
+ }
+ SINT Vec<N,M<T>> operator!=(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return bit_pun<Vec<N,M<T>>>(to_vext(x) != to_vext(y));
+ }
+ SINT Vec<N,M<T>> operator<=(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return bit_pun<Vec<N,M<T>>>(to_vext(x) <= to_vext(y));
+ }
+ SINT Vec<N,M<T>> operator>=(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return bit_pun<Vec<N,M<T>>>(to_vext(x) >= to_vext(y));
+ }
+ SINT Vec<N,M<T>> operator< (const Vec<N,T>& x, const Vec<N,T>& y) {
+ return bit_pun<Vec<N,M<T>>>(to_vext(x) < to_vext(y));
+ }
+ SINT Vec<N,M<T>> operator> (const Vec<N,T>& x, const Vec<N,T>& y) {
+ return bit_pun<Vec<N,M<T>>>(to_vext(x) > to_vext(y));
+ }
+
+#else
+
+ // Either SKNX_NO_SIMD is defined, or Clang/GCC vector extensions are not available.
+ // We'll implement things portably with N==1 scalar implementations and recursion onto them.
+
+ // N == 1 scalar implementations.
+ SIT Vec<1,T> operator+(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val + y.val; }
+ SIT Vec<1,T> operator-(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val - y.val; }
+ SIT Vec<1,T> operator*(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val * y.val; }
+ SIT Vec<1,T> operator/(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val / y.val; }
+
+ SIT Vec<1,T> operator^(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val ^ y.val; }
+ SIT Vec<1,T> operator&(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val & y.val; }
+ SIT Vec<1,T> operator|(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val | y.val; }
+ SIT Vec<1,T> operator&&(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val & y.val; }
+ SIT Vec<1,T> operator||(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val | y.val; }
+
+ SIT Vec<1,T> operator!(const Vec<1,T>& x) { return !x.val; }
+ SIT Vec<1,T> operator-(const Vec<1,T>& x) { return -x.val; }
+ SIT Vec<1,T> operator~(const Vec<1,T>& x) { return ~x.val; }
+
+ SIT Vec<1,T> operator<<(const Vec<1,T>& x, int k) { return x.val << k; }
+ SIT Vec<1,T> operator>>(const Vec<1,T>& x, int k) { return x.val >> k; }
+
+ SIT Vec<1,M<T>> operator==(const Vec<1,T>& x, const Vec<1,T>& y) {
+ return x.val == y.val ? ~0 : 0;
+ }
+ SIT Vec<1,M<T>> operator!=(const Vec<1,T>& x, const Vec<1,T>& y) {
+ return x.val != y.val ? ~0 : 0;
+ }
+ SIT Vec<1,M<T>> operator<=(const Vec<1,T>& x, const Vec<1,T>& y) {
+ return x.val <= y.val ? ~0 : 0;
+ }
+ SIT Vec<1,M<T>> operator>=(const Vec<1,T>& x, const Vec<1,T>& y) {
+ return x.val >= y.val ? ~0 : 0;
+ }
+ SIT Vec<1,M<T>> operator< (const Vec<1,T>& x, const Vec<1,T>& y) {
+ return x.val < y.val ? ~0 : 0;
+ }
+ SIT Vec<1,M<T>> operator> (const Vec<1,T>& x, const Vec<1,T>& y) {
+ return x.val > y.val ? ~0 : 0;
+ }
+
+ // Recurse on lo/hi down to N==1 scalar implementations.
+ SINT Vec<N,T> operator+(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return join(x.lo + y.lo, x.hi + y.hi);
+ }
+ SINT Vec<N,T> operator-(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return join(x.lo - y.lo, x.hi - y.hi);
+ }
+ SINT Vec<N,T> operator*(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return join(x.lo * y.lo, x.hi * y.hi);
+ }
+ SINT Vec<N,T> operator/(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return join(x.lo / y.lo, x.hi / y.hi);
+ }
+
+ SINT Vec<N,T> operator^(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return join(x.lo ^ y.lo, x.hi ^ y.hi);
+ }
+ SINT Vec<N,T> operator&(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return join(x.lo & y.lo, x.hi & y.hi);
+ }
+ SINT Vec<N,T> operator|(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return join(x.lo | y.lo, x.hi | y.hi);
+ }
+ SINT Vec<N,T> operator&&(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return join(x.lo & y.lo, x.hi & y.hi);
+ }
+ SINT Vec<N,T> operator||(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return join(x.lo | y.lo, x.hi | y.hi);
+ }
+
+ SINT Vec<N,T> operator!(const Vec<N,T>& x) { return join(!x.lo, !x.hi); }
+ SINT Vec<N,T> operator-(const Vec<N,T>& x) { return join(-x.lo, -x.hi); }
+ SINT Vec<N,T> operator~(const Vec<N,T>& x) { return join(~x.lo, ~x.hi); }
+
+ SINT Vec<N,T> operator<<(const Vec<N,T>& x, int k) { return join(x.lo << k, x.hi << k); }
+ SINT Vec<N,T> operator>>(const Vec<N,T>& x, int k) { return join(x.lo >> k, x.hi >> k); }
+
+ SINT Vec<N,M<T>> operator==(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return join(x.lo == y.lo, x.hi == y.hi);
+ }
+ SINT Vec<N,M<T>> operator!=(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return join(x.lo != y.lo, x.hi != y.hi);
+ }
+ SINT Vec<N,M<T>> operator<=(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return join(x.lo <= y.lo, x.hi <= y.hi);
+ }
+ SINT Vec<N,M<T>> operator>=(const Vec<N,T>& x, const Vec<N,T>& y) {
+ return join(x.lo >= y.lo, x.hi >= y.hi);
+ }
+ SINT Vec<N,M<T>> operator< (const Vec<N,T>& x, const Vec<N,T>& y) {
+ return join(x.lo < y.lo, x.hi < y.hi);
+ }
+ SINT Vec<N,M<T>> operator> (const Vec<N,T>& x, const Vec<N,T>& y) {
+ return join(x.lo > y.lo, x.hi > y.hi);
+ }
+#endif
+
+// Scalar/vector operations splat the scalar to a vector.
+SINTU Vec<N,T> operator+ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) + y; }
+SINTU Vec<N,T> operator- (U x, const Vec<N,T>& y) { return Vec<N,T>(x) - y; }
+SINTU Vec<N,T> operator* (U x, const Vec<N,T>& y) { return Vec<N,T>(x) * y; }
+SINTU Vec<N,T> operator/ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) / y; }
+SINTU Vec<N,T> operator^ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) ^ y; }
+SINTU Vec<N,T> operator& (U x, const Vec<N,T>& y) { return Vec<N,T>(x) & y; }
+SINTU Vec<N,T> operator| (U x, const Vec<N,T>& y) { return Vec<N,T>(x) | y; }
+SINTU Vec<N,T> operator&&(U x, const Vec<N,T>& y) { return Vec<N,T>(x) && y; }
+SINTU Vec<N,T> operator||(U x, const Vec<N,T>& y) { return Vec<N,T>(x) || y; }
+SINTU Vec<N,M<T>> operator==(U x, const Vec<N,T>& y) { return Vec<N,T>(x) == y; }
+SINTU Vec<N,M<T>> operator!=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) != y; }
+SINTU Vec<N,M<T>> operator<=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) <= y; }
+SINTU Vec<N,M<T>> operator>=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) >= y; }
+SINTU Vec<N,M<T>> operator< (U x, const Vec<N,T>& y) { return Vec<N,T>(x) < y; }
+SINTU Vec<N,M<T>> operator> (U x, const Vec<N,T>& y) { return Vec<N,T>(x) > y; }
+
+SINTU Vec<N,T> operator+ (const Vec<N,T>& x, U y) { return x + Vec<N,T>(y); }
+SINTU Vec<N,T> operator- (const Vec<N,T>& x, U y) { return x - Vec<N,T>(y); }
+SINTU Vec<N,T> operator* (const Vec<N,T>& x, U y) { return x * Vec<N,T>(y); }
+SINTU Vec<N,T> operator/ (const Vec<N,T>& x, U y) { return x / Vec<N,T>(y); }
+SINTU Vec<N,T> operator^ (const Vec<N,T>& x, U y) { return x ^ Vec<N,T>(y); }
+SINTU Vec<N,T> operator& (const Vec<N,T>& x, U y) { return x & Vec<N,T>(y); }
+SINTU Vec<N,T> operator| (const Vec<N,T>& x, U y) { return x | Vec<N,T>(y); }
+SINTU Vec<N,T> operator&&(const Vec<N,T>& x, U y) { return x && Vec<N,T>(y); }
+SINTU Vec<N,T> operator||(const Vec<N,T>& x, U y) { return x || Vec<N,T>(y); }
+SINTU Vec<N,M<T>> operator==(const Vec<N,T>& x, U y) { return x == Vec<N,T>(y); }
+SINTU Vec<N,M<T>> operator!=(const Vec<N,T>& x, U y) { return x != Vec<N,T>(y); }
+SINTU Vec<N,M<T>> operator<=(const Vec<N,T>& x, U y) { return x <= Vec<N,T>(y); }
+SINTU Vec<N,M<T>> operator>=(const Vec<N,T>& x, U y) { return x >= Vec<N,T>(y); }
+SINTU Vec<N,M<T>> operator< (const Vec<N,T>& x, U y) { return x < Vec<N,T>(y); }
+SINTU Vec<N,M<T>> operator> (const Vec<N,T>& x, U y) { return x > Vec<N,T>(y); }
+
+SINT Vec<N,T>& operator+=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x + y); }
+SINT Vec<N,T>& operator-=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x - y); }
+SINT Vec<N,T>& operator*=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x * y); }
+SINT Vec<N,T>& operator/=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x / y); }
+SINT Vec<N,T>& operator^=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x ^ y); }
+SINT Vec<N,T>& operator&=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x & y); }
+SINT Vec<N,T>& operator|=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x | y); }
+
+SINTU Vec<N,T>& operator+=(Vec<N,T>& x, U y) { return (x = x + Vec<N,T>(y)); }
+SINTU Vec<N,T>& operator-=(Vec<N,T>& x, U y) { return (x = x - Vec<N,T>(y)); }
+SINTU Vec<N,T>& operator*=(Vec<N,T>& x, U y) { return (x = x * Vec<N,T>(y)); }
+SINTU Vec<N,T>& operator/=(Vec<N,T>& x, U y) { return (x = x / Vec<N,T>(y)); }
+SINTU Vec<N,T>& operator^=(Vec<N,T>& x, U y) { return (x = x ^ Vec<N,T>(y)); }
+SINTU Vec<N,T>& operator&=(Vec<N,T>& x, U y) { return (x = x & Vec<N,T>(y)); }
+SINTU Vec<N,T>& operator|=(Vec<N,T>& x, U y) { return (x = x | Vec<N,T>(y)); }
+
+SINT Vec<N,T>& operator<<=(Vec<N,T>& x, int bits) { return (x = x << bits); }
+SINT Vec<N,T>& operator>>=(Vec<N,T>& x, int bits) { return (x = x >> bits); }
+
+// Some operations we want are not expressible with Clang/GCC vector extensions.
+
+// Clang can reason about naive_if_then_else() and optimize through it better
+// than if_then_else(), so it's sometimes useful to call it directly when we
+// think an entire expression should optimize away, e.g. min()/max().
+SINT Vec<N,T> naive_if_then_else(const Vec<N,M<T>>& cond, const Vec<N,T>& t, const Vec<N,T>& e) {
+ return bit_pun<Vec<N,T>>(( cond & bit_pun<Vec<N, M<T>>>(t)) |
+ (~cond & bit_pun<Vec<N, M<T>>>(e)) );
+}
+
+SIT Vec<1,T> if_then_else(const Vec<1,M<T>>& cond, const Vec<1,T>& t, const Vec<1,T>& e) {
+ // In practice this scalar implementation is unlikely to be used. See next if_then_else().
+ return bit_pun<Vec<1,T>>(( cond & bit_pun<Vec<1, M<T>>>(t)) |
+ (~cond & bit_pun<Vec<1, M<T>>>(e)) );
+}
+SINT Vec<N,T> if_then_else(const Vec<N,M<T>>& cond, const Vec<N,T>& t, const Vec<N,T>& e) {
+ // Specializations inline here so they can generalize what types the apply to.
+#if SKVX_USE_SIMD && defined(__AVX2__)
+ if constexpr (N*sizeof(T) == 32) {
+ return bit_pun<Vec<N,T>>(_mm256_blendv_epi8(bit_pun<__m256i>(e),
+ bit_pun<__m256i>(t),
+ bit_pun<__m256i>(cond)));
+ }
+#endif
+#if SKVX_USE_SIMD && defined(__SSE4_1__)
+ if constexpr (N*sizeof(T) == 16) {
+ return bit_pun<Vec<N,T>>(_mm_blendv_epi8(bit_pun<__m128i>(e),
+ bit_pun<__m128i>(t),
+ bit_pun<__m128i>(cond)));
+ }
+#endif
+#if SKVX_USE_SIMD && defined(__ARM_NEON)
+ if constexpr (N*sizeof(T) == 16) {
+ return bit_pun<Vec<N,T>>(vbslq_u8(bit_pun<uint8x16_t>(cond),
+ bit_pun<uint8x16_t>(t),
+ bit_pun<uint8x16_t>(e)));
+ }
+#endif
+ // Recurse for large vectors to try to hit the specializations above.
+ if constexpr (N*sizeof(T) > 16) {
+ return join(if_then_else(cond.lo, t.lo, e.lo),
+ if_then_else(cond.hi, t.hi, e.hi));
+ }
+ // This default can lead to better code than the recursing onto scalars.
+ return naive_if_then_else(cond, t, e);
+}
+
+SIT bool any(const Vec<1,T>& x) { return x.val != 0; }
+SINT bool any(const Vec<N,T>& x) {
+ // For any(), the _mm_testz intrinsics are correct and don't require comparing 'x' to 0, so it's
+ // lower latency compared to _mm_movemask + _mm_compneq on plain SSE.
+#if SKVX_USE_SIMD && defined(__AVX2__)
+ if constexpr (N*sizeof(T) == 32) {
+ return !_mm256_testz_si256(bit_pun<__m256i>(x), _mm256_set1_epi32(-1));
+ }
+#endif
+#if SKVX_USE_SIMD && defined(__SSE_4_1__)
+ if constexpr (N*sizeof(T) == 16) {
+ return !_mm_testz_si128(bit_pun<__m128i>(x), _mm_set1_epi32(-1));
+ }
+#endif
+#if SKVX_USE_SIMD && defined(__SSE__)
+ if constexpr (N*sizeof(T) == 16) {
+ // On SSE, movemask checks only the MSB in each lane, which is fine if the lanes were set
+ // directly from a comparison op (which sets all bits to 1 when true), but skvx::Vec<>
+ // treats any non-zero value as true, so we have to compare 'x' to 0 before calling movemask
+ return _mm_movemask_ps(_mm_cmpneq_ps(bit_pun<__m128>(x), _mm_set1_ps(0))) != 0b0000;
+ }
+#endif
+#if SKVX_USE_SIMD && defined(__aarch64__)
+ // On 64-bit NEON, take the max across lanes, which will be non-zero if any lane was true.
+ // The specific lane-size doesn't really matter in this case since it's really any set bit
+ // that we're looking for.
+ if constexpr (N*sizeof(T) == 8 ) { return vmaxv_u8 (bit_pun<uint8x8_t> (x)) > 0; }
+ if constexpr (N*sizeof(T) == 16) { return vmaxvq_u8(bit_pun<uint8x16_t>(x)) > 0; }
+#endif
+#if SKVX_USE_SIMD && defined(__wasm_simd128__)
+ if constexpr (N == 4 && sizeof(T) == 4) {
+ return wasm_i32x4_any_true(bit_pun<VExt<4,int>>(x));
+ }
+#endif
+ return any(x.lo)
+ || any(x.hi);
+}
+
+SIT bool all(const Vec<1,T>& x) { return x.val != 0; }
+SINT bool all(const Vec<N,T>& x) {
+// Unlike any(), we have to respect the lane layout, or we'll miss cases where a
+// true lane has a mix of 0 and 1 bits.
+#if SKVX_USE_SIMD && defined(__SSE__)
+ // Unfortunately, the _mm_testc intrinsics don't let us avoid the comparison to 0 for all()'s
+ // correctness, so always just use the plain SSE version.
+ if constexpr (N == 4 && sizeof(T) == 4) {
+ return _mm_movemask_ps(_mm_cmpneq_ps(bit_pun<__m128>(x), _mm_set1_ps(0))) == 0b1111;
+ }
+#endif
+#if SKVX_USE_SIMD && defined(__aarch64__)
+ // On 64-bit NEON, take the min across the lanes, which will be non-zero if all lanes are != 0.
+ if constexpr (sizeof(T)==1 && N==8) {return vminv_u8 (bit_pun<uint8x8_t> (x)) > 0;}
+ if constexpr (sizeof(T)==1 && N==16) {return vminvq_u8 (bit_pun<uint8x16_t>(x)) > 0;}
+ if constexpr (sizeof(T)==2 && N==4) {return vminv_u16 (bit_pun<uint16x4_t>(x)) > 0;}
+ if constexpr (sizeof(T)==2 && N==8) {return vminvq_u16(bit_pun<uint16x8_t>(x)) > 0;}
+ if constexpr (sizeof(T)==4 && N==2) {return vminv_u32 (bit_pun<uint32x2_t>(x)) > 0;}
+ if constexpr (sizeof(T)==4 && N==4) {return vminvq_u32(bit_pun<uint32x4_t>(x)) > 0;}
+#endif
+#if SKVX_USE_SIMD && defined(__wasm_simd128__)
+ if constexpr (N == 4 && sizeof(T) == 4) {
+ return wasm_i32x4_all_true(bit_pun<VExt<4,int>>(x));
+ }
+#endif
+ return all(x.lo)
+ && all(x.hi);
+}
+
+// cast() Vec<N,S> to Vec<N,D>, as if applying a C-cast to each lane.
+// TODO: implement with map()?
+template <typename D, typename S>
+SI Vec<1,D> cast(const Vec<1,S>& src) { return (D)src.val; }
+
+template <typename D, int N, typename S>
+SI Vec<N,D> cast(const Vec<N,S>& src) {
+#if SKVX_USE_SIMD && defined(__clang__)
+ return to_vec(__builtin_convertvector(to_vext(src), VExt<N,D>));
+#else
+ return join(cast<D>(src.lo), cast<D>(src.hi));
+#endif
+}
+
+// min/max match logic of std::min/std::max, which is important when NaN is involved.
+SIT T min(const Vec<1,T>& x) { return x.val; }
+SIT T max(const Vec<1,T>& x) { return x.val; }
+SINT T min(const Vec<N,T>& x) { return std::min(min(x.lo), min(x.hi)); }
+SINT T max(const Vec<N,T>& x) { return std::max(max(x.lo), max(x.hi)); }
+
+SINT Vec<N,T> min(const Vec<N,T>& x, const Vec<N,T>& y) { return naive_if_then_else(y < x, y, x); }
+SINT Vec<N,T> max(const Vec<N,T>& x, const Vec<N,T>& y) { return naive_if_then_else(x < y, y, x); }
+
+SINTU Vec<N,T> min(const Vec<N,T>& x, U y) { return min(x, Vec<N,T>(y)); }
+SINTU Vec<N,T> max(const Vec<N,T>& x, U y) { return max(x, Vec<N,T>(y)); }
+SINTU Vec<N,T> min(U x, const Vec<N,T>& y) { return min(Vec<N,T>(x), y); }
+SINTU Vec<N,T> max(U x, const Vec<N,T>& y) { return max(Vec<N,T>(x), y); }
+
+// pin matches the logic of SkTPin, which is important when NaN is involved. It always returns
+// values in the range lo..hi, and if x is NaN, it returns lo.
+SINT Vec<N,T> pin(const Vec<N,T>& x, const Vec<N,T>& lo, const Vec<N,T>& hi) {
+ return max(lo, min(x, hi));
+}
+
+// Shuffle values from a vector pretty arbitrarily:
+// skvx::Vec<4,float> rgba = {R,G,B,A};
+// shuffle<2,1,0,3> (rgba) ~> {B,G,R,A}
+// shuffle<2,1> (rgba) ~> {B,G}
+// shuffle<2,1,2,1,2,1,2,1>(rgba) ~> {B,G,B,G,B,G,B,G}
+// shuffle<3,3,3,3> (rgba) ~> {A,A,A,A}
+// The only real restriction is that the output also be a legal N=power-of-two sknx::Vec.
+template <int... Ix, int N, typename T>
+SI Vec<sizeof...(Ix),T> shuffle(const Vec<N,T>& x) {
+#if SKVX_USE_SIMD && defined(__clang__)
+ // TODO: can we just always use { x[Ix]... }?
+ return to_vec<sizeof...(Ix),T>(__builtin_shufflevector(to_vext(x), to_vext(x), Ix...));
+#else
+ return { x[Ix]... };
+#endif
+}
+
+// Call map(fn, x) for a vector with fn() applied to each lane of x, { fn(x[0]), fn(x[1]), ... },
+// or map(fn, x,y) for a vector of fn(x[i], y[i]), etc.
+
+template <typename Fn, typename... Args, size_t... I>
+SI auto map(std::index_sequence<I...>,
+ Fn&& fn, const Args&... args) -> skvx::Vec<sizeof...(I), decltype(fn(args[0]...))> {
+ auto lane = [&](size_t i)
+#if defined(__clang__)
+ // CFI, specifically -fsanitize=cfi-icall, seems to give a false positive here,
+ // with errors like "control flow integrity check for type 'float (float)
+ // noexcept' failed during indirect function call... note: sqrtf.cfi_jt defined
+ // here". But we can be quite sure fn is the right type: it's all inferred!
+ // So, stifle CFI in this function.
+ __attribute__((no_sanitize("cfi")))
+#endif
+ { return fn(args[static_cast<int>(i)]...); };
+
+ return { lane(I)... };
+}
+
+template <typename Fn, int N, typename T, typename... Rest>
+auto map(Fn&& fn, const Vec<N,T>& first, const Rest&... rest) {
+ // Derive an {0...N-1} index_sequence from the size of the first arg: N lanes in, N lanes out.
+ return map(std::make_index_sequence<N>{}, fn, first,rest...);
+}
+
+SIN Vec<N,float> ceil(const Vec<N,float>& x) { return map( ceilf, x); }
+SIN Vec<N,float> floor(const Vec<N,float>& x) { return map(floorf, x); }
+SIN Vec<N,float> trunc(const Vec<N,float>& x) { return map(truncf, x); }
+SIN Vec<N,float> round(const Vec<N,float>& x) { return map(roundf, x); }
+SIN Vec<N,float> sqrt(const Vec<N,float>& x) { return map( sqrtf, x); }
+SIN Vec<N,float> abs(const Vec<N,float>& x) { return map( fabsf, x); }
+SIN Vec<N,float> fma(const Vec<N,float>& x,
+ const Vec<N,float>& y,
+ const Vec<N,float>& z) {
+ // I don't understand why Clang's codegen is terrible if we write map(fmaf, x,y,z) directly.
+ auto fn = [](float x, float y, float z) { return fmaf(x,y,z); };
+ return map(fn, x,y,z);
+}
+
+SI Vec<1,int> lrint(const Vec<1,float>& x) {
+ return (int)lrintf(x.val);
+}
+SIN Vec<N,int> lrint(const Vec<N,float>& x) {
+#if SKVX_USE_SIMD && defined(__AVX__)
+ if constexpr (N == 8) {
+ return bit_pun<Vec<N,int>>(_mm256_cvtps_epi32(bit_pun<__m256>(x)));
+ }
+#endif
+#if SKVX_USE_SIMD && defined(__SSE__)
+ if constexpr (N == 4) {
+ return bit_pun<Vec<N,int>>(_mm_cvtps_epi32(bit_pun<__m128>(x)));
+ }
+#endif
+ return join(lrint(x.lo),
+ lrint(x.hi));
+}
+
+SIN Vec<N,float> fract(const Vec<N,float>& x) { return x - floor(x); }
+
+// Assumes inputs are finite and treat/flush denorm half floats as/to zero.
+// Key constants to watch for:
+// - a float is 32-bit, 1-8-23 sign-exponent-mantissa, with 127 exponent bias;
+// - a half is 16-bit, 1-5-10 sign-exponent-mantissa, with 15 exponent bias.
+SIN Vec<N,uint16_t> to_half_finite_ftz(const Vec<N,float>& x) {
+ Vec<N,uint32_t> sem = bit_pun<Vec<N,uint32_t>>(x),
+ s = sem & 0x8000'0000,
+ em = sem ^ s,
+ is_norm = em > 0x387f'd000, // halfway between largest f16 denorm and smallest norm
+ norm = (em>>13) - ((127-15)<<10);
+ return cast<uint16_t>((s>>16) | (is_norm & norm));
+}
+SIN Vec<N,float> from_half_finite_ftz(const Vec<N,uint16_t>& x) {
+ Vec<N,uint32_t> wide = cast<uint32_t>(x),
+ s = wide & 0x8000,
+ em = wide ^ s,
+ is_norm = em > 0x3ff,
+ norm = (em<<13) + ((127-15)<<23);
+ return bit_pun<Vec<N,float>>((s<<16) | (is_norm & norm));
+}
+
+// Like if_then_else(), these N=1 base cases won't actually be used unless explicitly called.
+SI Vec<1,uint16_t> to_half(const Vec<1,float>& x) { return to_half_finite_ftz(x); }
+SI Vec<1,float> from_half(const Vec<1,uint16_t>& x) { return from_half_finite_ftz(x); }
+
+SIN Vec<N,uint16_t> to_half(const Vec<N,float>& x) {
+#if SKVX_USE_SIMD && defined(__F16C__)
+ if constexpr (N == 8) {
+ return bit_pun<Vec<N,uint16_t>>(_mm256_cvtps_ph(bit_pun<__m256>(x),
+ _MM_FROUND_TO_NEAREST_INT));
+ }
+#endif
+#if SKVX_USE_SIMD && defined(__aarch64__)
+ if constexpr (N == 4) {
+ return bit_pun<Vec<N,uint16_t>>(vcvt_f16_f32(bit_pun<float32x4_t>(x)));
+
+ }
+#endif
+ if constexpr (N > 4) {
+ return join(to_half(x.lo),
+ to_half(x.hi));
+ }
+ return to_half_finite_ftz(x);
+}
+
+SIN Vec<N,float> from_half(const Vec<N,uint16_t>& x) {
+#if SKVX_USE_SIMD && defined(__F16C__)
+ if constexpr (N == 8) {
+ return bit_pun<Vec<N,float>>(_mm256_cvtph_ps(bit_pun<__m128i>(x)));
+ }
+#endif
+#if SKVX_USE_SIMD && defined(__aarch64__)
+ if constexpr (N == 4) {
+ return bit_pun<Vec<N,float>>(vcvt_f32_f16(bit_pun<float16x4_t>(x)));
+ }
+#endif
+ if constexpr (N > 4) {
+ return join(from_half(x.lo),
+ from_half(x.hi));
+ }
+ return from_half_finite_ftz(x);
+}
+
+// div255(x) = (x + 127) / 255 is a bit-exact rounding divide-by-255, packing down to 8-bit.
+SIN Vec<N,uint8_t> div255(const Vec<N,uint16_t>& x) {
+ return cast<uint8_t>( (x+127)/255 );
+}
+
+// approx_scale(x,y) approximates div255(cast<uint16_t>(x)*cast<uint16_t>(y)) within a bit,
+// and is always perfect when x or y is 0 or 255.
+SIN Vec<N,uint8_t> approx_scale(const Vec<N,uint8_t>& x, const Vec<N,uint8_t>& y) {
+ // All of (x*y+x)/256, (x*y+y)/256, and (x*y+255)/256 meet the criteria above.
+ // We happen to have historically picked (x*y+x)/256.
+ auto X = cast<uint16_t>(x),
+ Y = cast<uint16_t>(y);
+ return cast<uint8_t>( (X*Y+X)/256 );
+}
+
+// saturated_add(x,y) sums values and clamps to the maximum value instead of overflowing.
+SINT std::enable_if_t<std::is_unsigned_v<T>, Vec<N,T>> saturated_add(const Vec<N,T>& x,
+ const Vec<N,T>& y) {
+#if SKVX_USE_SIMD && (defined(__SSE__) || defined(__ARM_NEON))
+ // Both SSE and ARM have 16-lane saturated adds, so use intrinsics for those and recurse down
+ // or join up to take advantage.
+ if constexpr (N == 16 && sizeof(T) == 1) {
+ #if defined(__SSE__)
+ return bit_pun<Vec<N,T>>(_mm_adds_epu8(bit_pun<__m128i>(x), bit_pun<__m128i>(y)));
+ #else // __ARM_NEON
+ return bit_pun<Vec<N,T>>(vqaddq_u8(bit_pun<uint8x16_t>(x), bit_pun<uint8x16_t>(y)));
+ #endif
+ } else if constexpr (N < 16 && sizeof(T) == 1) {
+ return saturated_add(join(x,x), join(y,y)).lo;
+ } else if constexpr (sizeof(T) == 1) {
+ return join(saturated_add(x.lo, y.lo), saturated_add(x.hi, y.hi));
+ }
+#endif
+ // Otherwise saturate manually
+ auto sum = x + y;
+ return if_then_else(sum < x, Vec<N,T>(std::numeric_limits<T>::max()), sum);
+}
+
+// The ScaledDividerU32 takes a divisor > 1, and creates a function divide(numerator) that
+// calculates a numerator / denominator. For this to be rounded properly, numerator should have
+// half added in:
+// divide(numerator + half) == floor(numerator/denominator + 1/2).
+//
+// This gives an answer within +/- 1 from the true value.
+//
+// Derivation of half:
+// numerator/denominator + 1/2 = (numerator + half) / d
+// numerator + denominator / 2 = numerator + half
+// half = denominator / 2.
+//
+// Because half is divided by 2, that division must also be rounded.
+// half == denominator / 2 = (denominator + 1) / 2.
+//
+// The divisorFactor is just a scaled value:
+// divisorFactor = (1 / divisor) * 2 ^ 32.
+// The maximum that can be divided and rounded is UINT_MAX - half.
+class ScaledDividerU32 {
+public:
+ explicit ScaledDividerU32(uint32_t divisor)
+ : fDivisorFactor{(uint32_t)(std::round((1.0 / divisor) * (1ull << 32)))}
+ , fHalf{(divisor + 1) >> 1} {
+ assert(divisor > 1);
+ }
+
+ Vec<4, uint32_t> divide(const Vec<4, uint32_t>& numerator) const {
+#if SKVX_USE_SIMD && defined(__ARM_NEON)
+ uint64x2_t hi = vmull_n_u32(vget_high_u32(to_vext(numerator)), fDivisorFactor);
+ uint64x2_t lo = vmull_n_u32(vget_low_u32(to_vext(numerator)), fDivisorFactor);
+
+ return to_vec<4, uint32_t>(vcombine_u32(vshrn_n_u64(lo,32), vshrn_n_u64(hi,32)));
+#else
+ return cast<uint32_t>((cast<uint64_t>(numerator) * fDivisorFactor) >> 32);
+#endif
+ }
+
+ uint32_t half() const { return fHalf; }
+
+private:
+ const uint32_t fDivisorFactor;
+ const uint32_t fHalf;
+};
+
+
+SIN Vec<N,uint16_t> mull(const Vec<N,uint8_t>& x,
+ const Vec<N,uint8_t>& y) {
+#if SKVX_USE_SIMD && defined(__ARM_NEON)
+ // With NEON we can do eight u8*u8 -> u16 in one instruction, vmull_u8 (read, mul-long).
+ if constexpr (N == 8) {
+ return to_vec<8,uint16_t>(vmull_u8(to_vext(x), to_vext(y)));
+ } else if constexpr (N < 8) {
+ return mull(join(x,x), join(y,y)).lo;
+ } else { // N > 8
+ return join(mull(x.lo, y.lo), mull(x.hi, y.hi));
+ }
+#else
+ return cast<uint16_t>(x) * cast<uint16_t>(y);
+#endif
+}
+
+SIN Vec<N,uint32_t> mull(const Vec<N,uint16_t>& x,
+ const Vec<N,uint16_t>& y) {
+#if SKVX_USE_SIMD && defined(__ARM_NEON)
+ // NEON can do four u16*u16 -> u32 in one instruction, vmull_u16
+ if constexpr (N == 4) {
+ return to_vec<4,uint32_t>(vmull_u16(to_vext(x), to_vext(y)));
+ } else if constexpr (N < 4) {
+ return mull(join(x,x), join(y,y)).lo;
+ } else { // N > 4
+ return join(mull(x.lo, y.lo), mull(x.hi, y.hi));
+ }
+#else
+ return cast<uint32_t>(x) * cast<uint32_t>(y);
+#endif
+}
+
+SIN Vec<N,uint16_t> mulhi(const Vec<N,uint16_t>& x,
+ const Vec<N,uint16_t>& y) {
+#if SKVX_USE_SIMD && defined(__SSE__)
+ // Use _mm_mulhi_epu16 for 8xuint16_t and join or split to get there.
+ if constexpr (N == 8) {
+ return bit_pun<Vec<8,uint16_t>>(_mm_mulhi_epu16(bit_pun<__m128i>(x), bit_pun<__m128i>(y)));
+ } else if constexpr (N < 8) {
+ return mulhi(join(x,x), join(y,y)).lo;
+ } else { // N > 8
+ return join(mulhi(x.lo, y.lo), mulhi(x.hi, y.hi));
+ }
+#else
+ return skvx::cast<uint16_t>(mull(x, y) >> 16);
+#endif
+}
+
+SINT T dot(const Vec<N, T>& a, const Vec<N, T>& b) {
+ // While dot is a "horizontal" operation like any or all, it needs to remain
+ // in floating point and there aren't really any good SIMD instructions that make it faster.
+ // The constexpr cases remove the for loop in the only cases we realistically call.
+ auto ab = a*b;
+ if constexpr (N == 2) {
+ return ab[0] + ab[1];
+ } else if constexpr (N == 4) {
+ return ab[0] + ab[1] + ab[2] + ab[3];
+ } else {
+ T sum = ab[0];
+ for (int i = 1; i < N; ++i) {
+ sum += ab[i];
+ }
+ return sum;
+ }
+}
+
+SIT T cross(const Vec<2, T>& a, const Vec<2, T>& b) {
+ auto x = a * shuffle<1,0>(b);
+ return x[0] - x[1];
+}
+
+SIN float length(const Vec<N, float>& v) {
+ return std::sqrt(dot(v, v));
+}
+
+SIN double length(const Vec<N, double>& v) {
+ return std::sqrt(dot(v, v));
+}
+
+SIN Vec<N, float> normalize(const Vec<N, float>& v) {
+ return v / length(v);
+}
+
+SIN Vec<N, double> normalize(const Vec<N, double>& v) {
+ return v / length(v);
+}
+
+SINT bool isfinite(const Vec<N, T>& v) {
+ // Multiply all values together with 0. If they were all finite, the output is
+ // 0 (also finite). If any were not, we'll get nan.
+ return std::isfinite(dot(v, Vec<N, T>(0)));
+}
+
+// De-interleaving load of 4 vectors.
+//
+// WARNING: These are really only supported well on NEON. Consider restructuring your data before
+// resorting to these methods.
+SIT void strided_load4(const T* v,
+ Vec<1,T>& a,
+ Vec<1,T>& b,
+ Vec<1,T>& c,
+ Vec<1,T>& d) {
+ a.val = v[0];
+ b.val = v[1];
+ c.val = v[2];
+ d.val = v[3];
+}
+SINT void strided_load4(const T* v,
+ Vec<N,T>& a,
+ Vec<N,T>& b,
+ Vec<N,T>& c,
+ Vec<N,T>& d) {
+ strided_load4(v, a.lo, b.lo, c.lo, d.lo);
+ strided_load4(v + 4*(N/2), a.hi, b.hi, c.hi, d.hi);
+}
+#if SKVX_USE_SIMD && defined(__ARM_NEON)
+#define IMPL_LOAD4_TRANSPOSED(N, T, VLD) \
+SI void strided_load4(const T* v, \
+ Vec<N,T>& a, \
+ Vec<N,T>& b, \
+ Vec<N,T>& c, \
+ Vec<N,T>& d) { \
+ auto mat = VLD(v); \
+ a = bit_pun<Vec<N,T>>(mat.val[0]); \
+ b = bit_pun<Vec<N,T>>(mat.val[1]); \
+ c = bit_pun<Vec<N,T>>(mat.val[2]); \
+ d = bit_pun<Vec<N,T>>(mat.val[3]); \
+}
+IMPL_LOAD4_TRANSPOSED(2, uint32_t, vld4_u32)
+IMPL_LOAD4_TRANSPOSED(4, uint16_t, vld4_u16)
+IMPL_LOAD4_TRANSPOSED(8, uint8_t, vld4_u8)
+IMPL_LOAD4_TRANSPOSED(2, int32_t, vld4_s32)
+IMPL_LOAD4_TRANSPOSED(4, int16_t, vld4_s16)
+IMPL_LOAD4_TRANSPOSED(8, int8_t, vld4_s8)
+IMPL_LOAD4_TRANSPOSED(2, float, vld4_f32)
+IMPL_LOAD4_TRANSPOSED(4, uint32_t, vld4q_u32)
+IMPL_LOAD4_TRANSPOSED(8, uint16_t, vld4q_u16)
+IMPL_LOAD4_TRANSPOSED(16, uint8_t, vld4q_u8)
+IMPL_LOAD4_TRANSPOSED(4, int32_t, vld4q_s32)
+IMPL_LOAD4_TRANSPOSED(8, int16_t, vld4q_s16)
+IMPL_LOAD4_TRANSPOSED(16, int8_t, vld4q_s8)
+IMPL_LOAD4_TRANSPOSED(4, float, vld4q_f32)
+#undef IMPL_LOAD4_TRANSPOSED
+
+#elif SKVX_USE_SIMD && defined(__SSE__)
+
+SI void strided_load4(const float* v,
+ Vec<4,float>& a,
+ Vec<4,float>& b,
+ Vec<4,float>& c,
+ Vec<4,float>& d) {
+ __m128 a_ = _mm_loadu_ps(v);
+ __m128 b_ = _mm_loadu_ps(v+4);
+ __m128 c_ = _mm_loadu_ps(v+8);
+ __m128 d_ = _mm_loadu_ps(v+12);
+ _MM_TRANSPOSE4_PS(a_, b_, c_, d_);
+ a = bit_pun<Vec<4,float>>(a_);
+ b = bit_pun<Vec<4,float>>(b_);
+ c = bit_pun<Vec<4,float>>(c_);
+ d = bit_pun<Vec<4,float>>(d_);
+}
+#endif
+
+// De-interleaving load of 2 vectors.
+//
+// WARNING: These are really only supported well on NEON. Consider restructuring your data before
+// resorting to these methods.
+SIT void strided_load2(const T* v, Vec<1,T>& a, Vec<1,T>& b) {
+ a.val = v[0];
+ b.val = v[1];
+}
+SINT void strided_load2(const T* v, Vec<N,T>& a, Vec<N,T>& b) {
+ strided_load2(v, a.lo, b.lo);
+ strided_load2(v + 2*(N/2), a.hi, b.hi);
+}
+#if SKVX_USE_SIMD && defined(__ARM_NEON)
+#define IMPL_LOAD2_TRANSPOSED(N, T, VLD) \
+SI void strided_load2(const T* v, Vec<N,T>& a, Vec<N,T>& b) { \
+ auto mat = VLD(v); \
+ a = bit_pun<Vec<N,T>>(mat.val[0]); \
+ b = bit_pun<Vec<N,T>>(mat.val[1]); \
+}
+IMPL_LOAD2_TRANSPOSED(2, uint32_t, vld2_u32)
+IMPL_LOAD2_TRANSPOSED(4, uint16_t, vld2_u16)
+IMPL_LOAD2_TRANSPOSED(8, uint8_t, vld2_u8)
+IMPL_LOAD2_TRANSPOSED(2, int32_t, vld2_s32)
+IMPL_LOAD2_TRANSPOSED(4, int16_t, vld2_s16)
+IMPL_LOAD2_TRANSPOSED(8, int8_t, vld2_s8)
+IMPL_LOAD2_TRANSPOSED(2, float, vld2_f32)
+IMPL_LOAD2_TRANSPOSED(4, uint32_t, vld2q_u32)
+IMPL_LOAD2_TRANSPOSED(8, uint16_t, vld2q_u16)
+IMPL_LOAD2_TRANSPOSED(16, uint8_t, vld2q_u8)
+IMPL_LOAD2_TRANSPOSED(4, int32_t, vld2q_s32)
+IMPL_LOAD2_TRANSPOSED(8, int16_t, vld2q_s16)
+IMPL_LOAD2_TRANSPOSED(16, int8_t, vld2q_s8)
+IMPL_LOAD2_TRANSPOSED(4, float, vld2q_f32)
+#undef IMPL_LOAD2_TRANSPOSED
+#endif
+
+// Define commonly used aliases
+using float2 = Vec< 2, float>;
+using float4 = Vec< 4, float>;
+using float8 = Vec< 8, float>;
+
+using double2 = Vec< 2, double>;
+using double4 = Vec< 4, double>;
+using double8 = Vec< 8, double>;
+
+using byte2 = Vec< 2, uint8_t>;
+using byte4 = Vec< 4, uint8_t>;
+using byte8 = Vec< 8, uint8_t>;
+using byte16 = Vec<16, uint8_t>;
+
+using int2 = Vec< 2, int32_t>;
+using int4 = Vec< 4, int32_t>;
+using int8 = Vec< 8, int32_t>;
+
+using uint2 = Vec< 2, uint32_t>;
+using uint4 = Vec< 4, uint32_t>;
+using uint8 = Vec< 8, uint32_t>;
+
+using long2 = Vec< 2, int64_t>;
+using long4 = Vec< 4, int64_t>;
+using long8 = Vec< 8, int64_t>;
+
+// Use with from_half and to_half to convert between floatX, and use these for storage.
+using half2 = Vec< 2, uint16_t>;
+using half4 = Vec< 4, uint16_t>;
+using half8 = Vec< 8, uint16_t>;
+
+} // namespace skvx
+
+#undef SINTU
+#undef SINT
+#undef SIN
+#undef SIT
+#undef SI
+#undef SKVX_ALWAYS_INLINE
+#undef SKVX_USE_SIMD
+
+#endif//SKVX_DEFINED
diff --git a/gfx/skia/skia/src/base/SkZip.h b/gfx/skia/skia/src/base/SkZip.h
new file mode 100644
index 0000000000..884aa11d8d
--- /dev/null
+++ b/gfx/skia/skia/src/base/SkZip.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkZip_DEFINED
+#define SkZip_DEFINED
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkSpan_impl.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <tuple>
+#include <utility>
+
+// Take a list of things that can be pointers, and use them all in parallel. The iterators and
+// accessor operator[] for the class produce a tuple of the items.
+template<typename... Ts>
+class SkZip {
+ using ReturnTuple = std::tuple<Ts&...>;
+
+ class Iterator {
+ public:
+ using value_type = ReturnTuple;
+ using difference_type = ptrdiff_t;
+ using pointer = value_type*;
+ using reference = value_type;
+ using iterator_category = std::input_iterator_tag;
+ constexpr Iterator(const SkZip* zip, size_t index) : fZip{zip}, fIndex{index} { }
+ constexpr Iterator(const Iterator& that) : Iterator{ that.fZip, that.fIndex } { }
+ constexpr Iterator& operator++() { ++fIndex; return *this; }
+ constexpr Iterator operator++(int) { Iterator tmp(*this); operator++(); return tmp; }
+ constexpr bool operator==(const Iterator& rhs) const { return fIndex == rhs.fIndex; }
+ constexpr bool operator!=(const Iterator& rhs) const { return fIndex != rhs.fIndex; }
+ constexpr reference operator*() { return (*fZip)[fIndex]; }
+ friend constexpr difference_type operator-(Iterator lhs, Iterator rhs) {
+ return lhs.fIndex - rhs.fIndex;
+ }
+
+ private:
+ const SkZip* const fZip = nullptr;
+ size_t fIndex = 0;
+ };
+
+ template<typename T>
+ inline static constexpr T* nullify = nullptr;
+
+public:
+ constexpr SkZip() : fPointers{nullify<Ts>...}, fSize{0} {}
+ constexpr SkZip(size_t) = delete;
+ constexpr SkZip(size_t size, Ts*... ts)
+ : fPointers{ts...}
+ , fSize{size} {}
+ constexpr SkZip(const SkZip& that) = default;
+ constexpr SkZip& operator=(const SkZip &that) = default;
+
+ // Check to see if U can be used for const T or is the same as T
+ template <typename U, typename T>
+ using CanConvertToConst = typename std::integral_constant<bool,
+ std::is_convertible<U*, T*>::value && sizeof(U) == sizeof(T)>::type;
+
+ // Allow SkZip<const T> to be constructed from SkZip<T>.
+ template<typename... Us,
+ typename = std::enable_if<std::conjunction<CanConvertToConst<Us, Ts>...>::value>>
+ constexpr SkZip(const SkZip<Us...>& that)
+ : fPointers(that.data())
+ , fSize{that.size()} { }
+
+ constexpr ReturnTuple operator[](size_t i) const { return this->index(i);}
+ constexpr size_t size() const { return fSize; }
+ constexpr bool empty() const { return this->size() == 0; }
+ constexpr ReturnTuple front() const { return this->index(0); }
+ constexpr ReturnTuple back() const { return this->index(this->size() - 1); }
+ constexpr Iterator begin() const { return Iterator{this, 0}; }
+ constexpr Iterator end() const { return Iterator{this, this->size()}; }
+ template<size_t I> constexpr auto get() const {
+ return SkSpan(std::get<I>(fPointers), fSize);
+ }
+ constexpr std::tuple<Ts*...> data() const { return fPointers; }
+ constexpr SkZip first(size_t n) const {
+ SkASSERT(n <= this->size());
+ if (n == 0) { return SkZip(); }
+ return SkZip{n, fPointers};
+ }
+ constexpr SkZip last(size_t n) const {
+ SkASSERT(n <= this->size());
+ if (n == 0) { return SkZip(); }
+ return SkZip{n, this->pointersAt(fSize - n)};
+ }
+ constexpr SkZip subspan(size_t offset, size_t count) const {
+ SkASSERT(offset < this->size());
+ SkASSERT(count <= this->size() - offset);
+ if (count == 0) { return SkZip(); }
+ return SkZip(count, pointersAt(offset));
+ }
+
+private:
+ constexpr SkZip(size_t n, const std::tuple<Ts*...>& pointers)
+ : fPointers{pointers}
+ , fSize{n} {}
+
+ constexpr ReturnTuple index(size_t i) const {
+ SkASSERT(this->size() > 0);
+ SkASSERT(i < this->size());
+ return indexDetail(i, std::make_index_sequence<sizeof...(Ts)>{});
+ }
+
+ template<std::size_t... Is>
+ constexpr ReturnTuple indexDetail(size_t i, std::index_sequence<Is...>) const {
+ return ReturnTuple((std::get<Is>(fPointers))[i]...);
+ }
+
+ std::tuple<Ts*...> pointersAt(size_t i) const {
+ SkASSERT(this->size() > 0);
+ SkASSERT(i < this->size());
+ return pointersAtDetail(i, std::make_index_sequence<sizeof...(Ts)>{});
+ }
+
+ template<std::size_t... Is>
+ constexpr std::tuple<Ts*...> pointersAtDetail(size_t i, std::index_sequence<Is...>) const {
+ return std::tuple<Ts*...>{&(std::get<Is>(fPointers))[i]...};
+ }
+
+ std::tuple<Ts*...> fPointers;
+ size_t fSize;
+};
+
+class SkMakeZipDetail {
+ template<typename T> struct DecayPointer{
+ using U = typename std::remove_cv<typename std::remove_reference<T>::type>::type;
+ using type = typename std::conditional<std::is_pointer<U>::value, U, T>::type;
+ };
+ template<typename T> using DecayPointerT = typename DecayPointer<T>::type;
+
+ template<typename C> struct ContiguousMemory { };
+ template<typename T> struct ContiguousMemory<T*> {
+ using value_type = T;
+ static constexpr value_type* Data(T* t) { return t; }
+ static constexpr size_t Size(T* s) { return SIZE_MAX; }
+ };
+ template<typename T, size_t N> struct ContiguousMemory<T(&)[N]> {
+ using value_type = T;
+ static constexpr value_type* Data(T(&t)[N]) { return t; }
+ static constexpr size_t Size(T(&)[N]) { return N; }
+ };
+ // In general, we don't want r-value collections, but SkSpans are ok, because they are a view
+ // onto an actual container.
+ template<typename T> struct ContiguousMemory<SkSpan<T>> {
+ using value_type = T;
+ static constexpr value_type* Data(SkSpan<T> s) { return s.data(); }
+ static constexpr size_t Size(SkSpan<T> s) { return s.size(); }
+ };
+ // Only accept l-value references to collections.
+ template<typename C> struct ContiguousMemory<C&> {
+ using value_type = typename std::remove_pointer<decltype(std::declval<C>().data())>::type;
+ static constexpr value_type* Data(C& c) { return c.data(); }
+ static constexpr size_t Size(C& c) { return c.size(); }
+ };
+ template<typename C> using Span = ContiguousMemory<DecayPointerT<C>>;
+ template<typename C> using ValueType = typename Span<C>::value_type;
+
+ template<typename C, typename... Ts> struct PickOneSize { };
+ template <typename T, typename... Ts> struct PickOneSize<T*, Ts...> {
+ static constexpr size_t Size(T* t, Ts... ts) {
+ return PickOneSize<Ts...>::Size(std::forward<Ts>(ts)...);
+ }
+ };
+ template <typename T, typename... Ts, size_t N> struct PickOneSize<T(&)[N], Ts...> {
+ static constexpr size_t Size(T(&)[N], Ts...) { return N; }
+ };
+ template<typename T, typename... Ts> struct PickOneSize<SkSpan<T>, Ts...> {
+ static constexpr size_t Size(SkSpan<T> s, Ts...) { return s.size(); }
+ };
+ template<typename C, typename... Ts> struct PickOneSize<C&, Ts...> {
+ static constexpr size_t Size(C& c, Ts...) { return c.size(); }
+ };
+
+public:
+ template<typename... Ts>
+ static constexpr auto MakeZip(Ts&& ... ts) {
+
+ // Pick the first collection that has a size, and use that for the size.
+ size_t size = PickOneSize<DecayPointerT<Ts>...>::Size(std::forward<Ts>(ts)...);
+
+#ifdef SK_DEBUG
+ // Check that all sizes are the same.
+ size_t minSize = SIZE_MAX;
+ size_t maxSize = 0;
+ for (size_t s : {Span<Ts>::Size(std::forward<Ts>(ts))...}) {
+ if (s != SIZE_MAX) {
+ minSize = std::min(minSize, s);
+ maxSize = std::max(maxSize, s);
+ }
+ }
+ SkASSERT(minSize == maxSize);
+#endif
+
+ return SkZip<ValueType<Ts>...>{size, Span<Ts>::Data(std::forward<Ts>(ts))...};
+ }
+};
+
+template<typename... Ts>
+SkZip(size_t size, Ts*... ts) -> SkZip<Ts...>;
+
+template<typename... Ts>
+inline constexpr auto SkMakeZip(Ts&& ... ts) {
+ return SkMakeZipDetail::MakeZip(std::forward<Ts>(ts)...);
+}
+#endif //SkZip_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkCodec.cpp b/gfx/skia/skia/src/codec/SkCodec.cpp
new file mode 100644
index 0000000000..d49857a130
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkCodec.cpp
@@ -0,0 +1,972 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/codec/SkCodec.h"
+
+#include "include/codec/SkCodecAnimation.h"
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColorPriv.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImage.h" // IWYU pragma: keep
+#include "include/core/SkMatrix.h"
+#include "include/core/SkStream.h"
+#include "include/private/base/SkTemplates.h"
+#include "modules/skcms/skcms.h"
+#include "src/codec/SkCodecPriv.h"
+#include "src/codec/SkFrameHolder.h"
+#include "src/codec/SkSampler.h"
+
+// We always include and compile in these BMP codecs
+#include "src/codec/SkBmpCodec.h"
+#include "src/codec/SkWbmpCodec.h"
+
+#include <utility>
+
+#ifdef SK_CODEC_DECODES_AVIF
+#include "src/codec/SkAvifCodec.h"
+#endif
+
+#ifdef SK_HAS_HEIF_LIBRARY
+#include "src/codec/SkHeifCodec.h"
+#endif
+
+#ifdef SK_CODEC_DECODES_JPEG
+#include "src/codec/SkJpegCodec.h"
+#endif
+
+#ifdef SK_CODEC_DECODES_JPEGXL
+#include "src/codec/SkJpegxlCodec.h"
+#endif
+
+#ifdef SK_CODEC_DECODES_PNG
+#include "src/codec/SkIcoCodec.h"
+#include "src/codec/SkPngCodec.h"
+#endif
+
+#ifdef SK_CODEC_DECODES_RAW
+#include "src/codec/SkRawCodec.h"
+#endif
+
+#ifdef SK_CODEC_DECODES_WEBP
+#include "src/codec/SkWebpCodec.h"
+#endif
+
+#ifdef SK_HAS_WUFFS_LIBRARY
+#include "src/codec/SkWuffsCodec.h"
+#endif
+
+namespace {
+
+struct DecoderProc {
+ bool (*IsFormat)(const void*, size_t);
+ std::unique_ptr<SkCodec> (*MakeFromStream)(std::unique_ptr<SkStream>, SkCodec::Result*);
+};
+
+std::vector<DecoderProc>* decoders() {
+ static auto* decoders = new std::vector<DecoderProc> {
+ #ifdef SK_CODEC_DECODES_JPEG
+ { SkJpegCodec::IsJpeg, SkJpegCodec::MakeFromStream },
+ #endif
+ #ifdef SK_CODEC_DECODES_WEBP
+ { SkWebpCodec::IsWebp, SkWebpCodec::MakeFromStream },
+ #endif
+ #ifdef SK_HAS_WUFFS_LIBRARY
+ { SkWuffsCodec_IsFormat, SkWuffsCodec_MakeFromStream },
+ #endif
+ #ifdef SK_CODEC_DECODES_PNG
+ { SkIcoCodec::IsIco, SkIcoCodec::MakeFromStream },
+ #endif
+ { SkBmpCodec::IsBmp, SkBmpCodec::MakeFromStream },
+ { SkWbmpCodec::IsWbmp, SkWbmpCodec::MakeFromStream },
+ #ifdef SK_CODEC_DECODES_AVIF
+ { SkAvifCodec::IsAvif, SkAvifCodec::MakeFromStream },
+ #endif
+ #ifdef SK_CODEC_DECODES_JPEGXL
+ { SkJpegxlCodec::IsJpegxl, SkJpegxlCodec::MakeFromStream },
+ #endif
+ };
+ return decoders;
+}
+
+} // namespace
+
+void SkCodec::Register(
+ bool (*peek)(const void*, size_t),
+ std::unique_ptr<SkCodec> (*make)(std::unique_ptr<SkStream>, SkCodec::Result*)) {
+ decoders()->push_back(DecoderProc{peek, make});
+}
+
+std::unique_ptr<SkCodec> SkCodec::MakeFromStream(
+ std::unique_ptr<SkStream> stream, Result* outResult,
+ SkPngChunkReader* chunkReader, SelectionPolicy selectionPolicy) {
+ Result resultStorage;
+ if (!outResult) {
+ outResult = &resultStorage;
+ }
+
+ if (!stream) {
+ *outResult = kInvalidInput;
+ return nullptr;
+ }
+
+ if (selectionPolicy != SelectionPolicy::kPreferStillImage
+ && selectionPolicy != SelectionPolicy::kPreferAnimation) {
+ *outResult = kInvalidParameters;
+ return nullptr;
+ }
+
+ constexpr size_t bytesToRead = MinBufferedBytesNeeded();
+
+ char buffer[bytesToRead];
+ size_t bytesRead = stream->peek(buffer, bytesToRead);
+
+ // It is also possible to have a complete image less than bytesToRead bytes
+ // (e.g. a 1 x 1 wbmp), meaning peek() would return less than bytesToRead.
+ // Assume that if bytesRead < bytesToRead, but > 0, the stream is shorter
+ // than bytesToRead, so pass that directly to the decoder.
+ // It also is possible the stream uses too small a buffer for peeking, but
+ // we trust the caller to use a large enough buffer.
+
+ if (0 == bytesRead) {
+ // TODO: After implementing peek in CreateJavaOutputStreamAdaptor.cpp, this
+ // printf could be useful to notice failures.
+ // SkCodecPrintf("Encoded image data failed to peek!\n");
+
+ // It is possible the stream does not support peeking, but does support
+ // rewinding.
+ // Attempt to read() and pass the actual amount read to the decoder.
+ bytesRead = stream->read(buffer, bytesToRead);
+ if (!stream->rewind()) {
+ SkCodecPrintf("Encoded image data could not peek or rewind to determine format!\n");
+ *outResult = kCouldNotRewind;
+ return nullptr;
+ }
+ }
+
+ // PNG is special, since we want to be able to supply an SkPngChunkReader.
+ // But this code follows the same pattern as the loop.
+#ifdef SK_CODEC_DECODES_PNG
+ if (SkPngCodec::IsPng(buffer, bytesRead)) {
+ return SkPngCodec::MakeFromStream(std::move(stream), outResult, chunkReader);
+ }
+#endif
+
+ for (DecoderProc proc : *decoders()) {
+ if (proc.IsFormat(buffer, bytesRead)) {
+ return proc.MakeFromStream(std::move(stream), outResult);
+ }
+ }
+
+#ifdef SK_HAS_HEIF_LIBRARY
+ SkEncodedImageFormat format;
+ if (SkHeifCodec::IsSupported(buffer, bytesRead, &format)) {
+ return SkHeifCodec::MakeFromStream(std::move(stream), selectionPolicy,
+ format, outResult);
+ }
+#endif
+
+#ifdef SK_CODEC_DECODES_RAW
+ // Try to treat the input as RAW if all the other checks failed.
+ return SkRawCodec::MakeFromStream(std::move(stream), outResult);
+#else
+ if (bytesRead < bytesToRead) {
+ *outResult = kIncompleteInput;
+ } else {
+ *outResult = kUnimplemented;
+ }
+
+ return nullptr;
+#endif
+}
+
+std::unique_ptr<SkCodec> SkCodec::MakeFromData(sk_sp<SkData> data, SkPngChunkReader* reader) {
+ if (!data) {
+ return nullptr;
+ }
+ return MakeFromStream(SkMemoryStream::Make(std::move(data)), nullptr, reader);
+}
+
+SkCodec::SkCodec(SkEncodedInfo&& info,
+ XformFormat srcFormat,
+ std::unique_ptr<SkStream> stream,
+ SkEncodedOrigin origin)
+ : fEncodedInfo(std::move(info))
+ , fSrcXformFormat(srcFormat)
+ , fStream(std::move(stream))
+ , fOrigin(origin)
+ , fDstInfo()
+ , fOptions() {}
+
+SkCodec::~SkCodec() {}
+
+void SkCodec::setSrcXformFormat(XformFormat pixelFormat) {
+ fSrcXformFormat = pixelFormat;
+}
+
+bool SkCodec::queryYUVAInfo(const SkYUVAPixmapInfo::SupportedDataTypes& supportedDataTypes,
+ SkYUVAPixmapInfo* yuvaPixmapInfo) const {
+ if (!yuvaPixmapInfo) {
+ return false;
+ }
+ return this->onQueryYUVAInfo(supportedDataTypes, yuvaPixmapInfo) &&
+ yuvaPixmapInfo->isSupported(supportedDataTypes);
+}
+
+SkCodec::Result SkCodec::getYUVAPlanes(const SkYUVAPixmaps& yuvaPixmaps) {
+ if (!yuvaPixmaps.isValid()) {
+ return kInvalidInput;
+ }
+ if (!this->rewindIfNeeded()) {
+ return kCouldNotRewind;
+ }
+ return this->onGetYUVAPlanes(yuvaPixmaps);
+}
+
+bool SkCodec::conversionSupported(const SkImageInfo& dst, bool srcIsOpaque, bool needsColorXform) {
+ if (!valid_alpha(dst.alphaType(), srcIsOpaque)) {
+ return false;
+ }
+
+ switch (dst.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ case kRGBA_F16_SkColorType:
+ return true;
+ case kBGR_101010x_XR_SkColorType:
+ case kRGB_565_SkColorType:
+ return srcIsOpaque;
+ case kGray_8_SkColorType:
+ return SkEncodedInfo::kGray_Color == fEncodedInfo.color() && srcIsOpaque;
+ case kAlpha_8_SkColorType:
+ // conceptually we can convert anything into alpha_8, but we haven't actually coded
+ // all of those other conversions yet.
+ return SkEncodedInfo::kXAlpha_Color == fEncodedInfo.color();
+ default:
+ return false;
+ }
+}
+
+bool SkCodec::rewindIfNeeded() {
+ // Store the value of fNeedsRewind so we can update it. Next read will
+ // require a rewind.
+ const bool needsRewind = fNeedsRewind;
+ fNeedsRewind = true;
+ if (!needsRewind) {
+ return true;
+ }
+
+ // startScanlineDecode will need to be called before decoding scanlines.
+ fCurrScanline = -1;
+ // startIncrementalDecode will need to be called before incrementalDecode.
+ fStartedIncrementalDecode = false;
+
+ // Some codecs do not have a stream. They may hold onto their own data or another codec.
+ // They must handle rewinding themselves.
+ if (fStream && !fStream->rewind()) {
+ return false;
+ }
+
+ return this->onRewind();
+}
+
+static SkIRect frame_rect_on_screen(SkIRect frameRect,
+ const SkIRect& screenRect) {
+ if (!frameRect.intersect(screenRect)) {
+ return SkIRect::MakeEmpty();
+ }
+
+ return frameRect;
+}
+
+bool zero_rect(const SkImageInfo& dstInfo, void* pixels, size_t rowBytes,
+ SkISize srcDimensions, SkIRect prevRect) {
+ const auto dimensions = dstInfo.dimensions();
+ if (dimensions != srcDimensions) {
+ SkRect src = SkRect::Make(srcDimensions);
+ SkRect dst = SkRect::Make(dimensions);
+ SkMatrix map = SkMatrix::RectToRect(src, dst);
+ SkRect asRect = SkRect::Make(prevRect);
+ if (!map.mapRect(&asRect)) {
+ return false;
+ }
+ asRect.roundOut(&prevRect);
+ }
+
+ if (!prevRect.intersect(SkIRect::MakeSize(dimensions))) {
+ // Nothing to zero, due to scaling or bad frame rect.
+ return true;
+ }
+
+ const SkImageInfo info = dstInfo.makeDimensions(prevRect.size());
+ const size_t bpp = dstInfo.bytesPerPixel();
+ const size_t offset = prevRect.x() * bpp + prevRect.y() * rowBytes;
+ void* eraseDst = SkTAddOffset<void>(pixels, offset);
+ SkSampler::Fill(info, eraseDst, rowBytes, SkCodec::kNo_ZeroInitialized);
+ return true;
+}
+
+SkCodec::Result SkCodec::handleFrameIndex(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const Options& options, GetPixelsCallback getPixelsFn) {
+ if (getPixelsFn) {
+ // If a callback is used, it handles the frame index, so calls from this SkCodec
+ // should always short-circuit in the else case below.
+ fUsingCallbackForHandleFrameIndex = true;
+ } else if (fUsingCallbackForHandleFrameIndex) {
+ return kSuccess;
+ }
+
+ if (!this->rewindIfNeeded()) {
+ return kCouldNotRewind;
+ }
+
+ const int index = options.fFrameIndex;
+ if (0 == index) {
+ return this->initializeColorXform(info, fEncodedInfo.alpha(), fEncodedInfo.opaque())
+ ? kSuccess : kInvalidConversion;
+ }
+
+ if (index < 0) {
+ return kInvalidParameters;
+ }
+
+ if (options.fSubset) {
+ // If we add support for this, we need to update the code that zeroes
+ // a kRestoreBGColor frame.
+ return kInvalidParameters;
+ }
+
+ if (index >= this->onGetFrameCount()) {
+ return kIncompleteInput;
+ }
+
+ const auto* frameHolder = this->getFrameHolder();
+ SkASSERT(frameHolder);
+
+ const auto* frame = frameHolder->getFrame(index);
+ SkASSERT(frame);
+
+ const int requiredFrame = frame->getRequiredFrame();
+ if (requiredFrame != kNoFrame) {
+ // Decode earlier frame if necessary
+ const SkFrame* preppedFrame = nullptr;
+ if (options.fPriorFrame == kNoFrame) {
+ Result result = kInternalError;
+ // getPixelsFn will be set when things like SkAndroidCodec are calling this function.
+ // Thus, we call the provided function when recursively decoding previous frames,
+ // but only when necessary (i.e. there is a required frame).
+ if (getPixelsFn) {
+ result = getPixelsFn(info, pixels, rowBytes, options, requiredFrame);
+ } else {
+ Options prevFrameOptions(options);
+ prevFrameOptions.fFrameIndex = requiredFrame;
+ result = this->getPixels(info, pixels, rowBytes, &prevFrameOptions);
+ }
+ if (result != kSuccess) {
+ return result;
+ }
+ preppedFrame = frameHolder->getFrame(requiredFrame);
+ } else {
+ // Check for a valid frame as a starting point. Alternatively, we could
+ // treat an invalid frame as not providing one, but rejecting it will
+ // make it easier to catch the mistake.
+ if (options.fPriorFrame < requiredFrame || options.fPriorFrame >= index) {
+ return kInvalidParameters;
+ }
+ preppedFrame = frameHolder->getFrame(options.fPriorFrame);
+ }
+
+ SkASSERT(preppedFrame);
+ switch (preppedFrame->getDisposalMethod()) {
+ case SkCodecAnimation::DisposalMethod::kRestorePrevious:
+ SkASSERT(options.fPriorFrame != kNoFrame);
+ return kInvalidParameters;
+ case SkCodecAnimation::DisposalMethod::kRestoreBGColor:
+ // If a frame after the required frame is provided, there is no
+ // need to clear, since it must be covered by the desired frame.
+ // FIXME: If the required frame is kRestoreBGColor, we don't actually need to decode
+ // it, since we'll just clear it to transparent. Instead, we could decode *its*
+ // required frame and then clear.
+ if (preppedFrame->frameId() == requiredFrame) {
+ SkIRect preppedRect = preppedFrame->frameRect();
+ if (!zero_rect(info, pixels, rowBytes, this->dimensions(), preppedRect)) {
+ return kInternalError;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return this->initializeColorXform(info, frame->reportedAlpha(), !frame->hasAlpha())
+ ? kSuccess : kInvalidConversion;
+}
+
+SkCodec::Result SkCodec::getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const Options* options) {
+ if (kUnknown_SkColorType == info.colorType()) {
+ return kInvalidConversion;
+ }
+ if (nullptr == pixels) {
+ return kInvalidParameters;
+ }
+ if (rowBytes < info.minRowBytes()) {
+ return kInvalidParameters;
+ }
+
+ // Default options.
+ Options optsStorage;
+ if (nullptr == options) {
+ options = &optsStorage;
+ } else {
+ if (options->fSubset) {
+ SkIRect subset(*options->fSubset);
+ if (!this->onGetValidSubset(&subset) || subset != *options->fSubset) {
+ // FIXME: How to differentiate between not supporting subset at all
+ // and not supporting this particular subset?
+ return kUnimplemented;
+ }
+ }
+ }
+
+ const Result frameIndexResult = this->handleFrameIndex(info, pixels, rowBytes,
+ *options);
+ if (frameIndexResult != kSuccess) {
+ return frameIndexResult;
+ }
+
+ // FIXME: Support subsets somehow? Note that this works for SkWebpCodec
+ // because it supports arbitrary scaling/subset combinations.
+ if (!this->dimensionsSupported(info.dimensions())) {
+ return kInvalidScale;
+ }
+
+ fDstInfo = info;
+ fOptions = *options;
+
+ // On an incomplete decode, the subclass will specify the number of scanlines that it decoded
+ // successfully.
+ int rowsDecoded = 0;
+ const Result result = this->onGetPixels(info, pixels, rowBytes, *options, &rowsDecoded);
+
+ // A return value of kIncompleteInput indicates a truncated image stream.
+ // In this case, we will fill any uninitialized memory with a default value.
+ // Some subclasses will take care of filling any uninitialized memory on
+ // their own. They indicate that all of the memory has been filled by
+ // setting rowsDecoded equal to the height.
+ if ((kIncompleteInput == result || kErrorInInput == result) && rowsDecoded != info.height()) {
+ // FIXME: (skbug.com/5772) fillIncompleteImage will fill using the swizzler's width, unless
+ // there is a subset. In that case, it will use the width of the subset. From here, the
+ // subset will only be non-null in the case of SkWebpCodec, but it treats the subset
+ // differenty from the other codecs, and it needs to use the width specified by the info.
+ // Set the subset to null so SkWebpCodec uses the correct width.
+ fOptions.fSubset = nullptr;
+ this->fillIncompleteImage(info, pixels, rowBytes, options->fZeroInitialized, info.height(),
+ rowsDecoded);
+ }
+
+ return result;
+}
+
+std::tuple<sk_sp<SkImage>, SkCodec::Result> SkCodec::getImage(const SkImageInfo& info,
+ const Options* options) {
+ SkBitmap bm;
+ if (!bm.tryAllocPixels(info)) {
+ return {nullptr, kInternalError};
+ }
+
+ Result result = this->getPixels(info, bm.getPixels(), bm.rowBytes(), options);
+ switch (result) {
+ case kSuccess:
+ case kIncompleteInput:
+ case kErrorInInput:
+ bm.setImmutable();
+ return {bm.asImage(), result};
+
+ default: break;
+ }
+ return {nullptr, result};
+}
+
+std::tuple<sk_sp<SkImage>, SkCodec::Result> SkCodec::getImage() {
+ return this->getImage(this->getInfo(), nullptr);
+}
+
+SkCodec::Result SkCodec::startIncrementalDecode(const SkImageInfo& info, void* pixels,
+ size_t rowBytes, const SkCodec::Options* options) {
+ fStartedIncrementalDecode = false;
+
+ if (kUnknown_SkColorType == info.colorType()) {
+ return kInvalidConversion;
+ }
+ if (nullptr == pixels) {
+ return kInvalidParameters;
+ }
+
+ // Set options.
+ Options optsStorage;
+ if (nullptr == options) {
+ options = &optsStorage;
+ } else {
+ if (options->fSubset) {
+ SkIRect size = SkIRect::MakeSize(info.dimensions());
+ if (!size.contains(*options->fSubset)) {
+ return kInvalidParameters;
+ }
+
+ const int top = options->fSubset->top();
+ const int bottom = options->fSubset->bottom();
+ if (top < 0 || top >= info.height() || top >= bottom || bottom > info.height()) {
+ return kInvalidParameters;
+ }
+ }
+ }
+
+ const Result frameIndexResult = this->handleFrameIndex(info, pixels, rowBytes,
+ *options);
+ if (frameIndexResult != kSuccess) {
+ return frameIndexResult;
+ }
+
+ if (!this->dimensionsSupported(info.dimensions())) {
+ return kInvalidScale;
+ }
+
+ fDstInfo = info;
+ fOptions = *options;
+
+ const Result result = this->onStartIncrementalDecode(info, pixels, rowBytes, fOptions);
+ if (kSuccess == result) {
+ fStartedIncrementalDecode = true;
+ } else if (kUnimplemented == result) {
+ // FIXME: This is temporarily necessary, until we transition SkCodec
+ // implementations from scanline decoding to incremental decoding.
+ // SkAndroidCodec will first attempt to use incremental decoding, but
+ // will fall back to scanline decoding if incremental returns
+ // kUnimplemented. rewindIfNeeded(), above, set fNeedsRewind to true
+ // (after potentially rewinding), but we do not want the next call to
+ // startScanlineDecode() to do a rewind.
+ fNeedsRewind = false;
+ }
+ return result;
+}
+
+
+SkCodec::Result SkCodec::startScanlineDecode(const SkImageInfo& info,
+ const SkCodec::Options* options) {
+ // Reset fCurrScanline in case of failure.
+ fCurrScanline = -1;
+
+ // Set options.
+ Options optsStorage;
+ if (nullptr == options) {
+ options = &optsStorage;
+ } else if (options->fSubset) {
+ SkIRect size = SkIRect::MakeSize(info.dimensions());
+ if (!size.contains(*options->fSubset)) {
+ return kInvalidInput;
+ }
+
+ // We only support subsetting in the x-dimension for scanline decoder.
+ // Subsetting in the y-dimension can be accomplished using skipScanlines().
+ if (options->fSubset->top() != 0 || options->fSubset->height() != info.height()) {
+ return kInvalidInput;
+ }
+ }
+
+ // Scanline decoding only supports decoding the first frame.
+ if (options->fFrameIndex != 0) {
+ return kUnimplemented;
+ }
+
+ // The void* dst and rowbytes in handleFrameIndex or only used for decoding prior
+ // frames, which is not supported here anyway, so it is safe to pass nullptr/0.
+ const Result frameIndexResult = this->handleFrameIndex(info, nullptr, 0, *options);
+ if (frameIndexResult != kSuccess) {
+ return frameIndexResult;
+ }
+
+ // FIXME: Support subsets somehow?
+ if (!this->dimensionsSupported(info.dimensions())) {
+ return kInvalidScale;
+ }
+
+ const Result result = this->onStartScanlineDecode(info, *options);
+ if (result != SkCodec::kSuccess) {
+ return result;
+ }
+
+ // FIXME: See startIncrementalDecode. That method set fNeedsRewind to false
+ // so that when onStartScanlineDecode calls rewindIfNeeded it would not
+ // rewind. But it also relies on that call to rewindIfNeeded to set
+ // fNeedsRewind to true for future decodes. When
+ // fUsingCallbackForHandleFrameIndex is true, that call to rewindIfNeeded is
+ // skipped, so this method sets it back to true.
+ SkASSERT(fUsingCallbackForHandleFrameIndex || fNeedsRewind);
+ fNeedsRewind = true;
+
+ fCurrScanline = 0;
+ fDstInfo = info;
+ fOptions = *options;
+ return kSuccess;
+}
+
+int SkCodec::getScanlines(void* dst, int countLines, size_t rowBytes) {
+ if (fCurrScanline < 0) {
+ return 0;
+ }
+
+ SkASSERT(!fDstInfo.isEmpty());
+ if (countLines <= 0 || fCurrScanline + countLines > fDstInfo.height()) {
+ return 0;
+ }
+
+ const int linesDecoded = this->onGetScanlines(dst, countLines, rowBytes);
+ if (linesDecoded < countLines) {
+ this->fillIncompleteImage(this->dstInfo(), dst, rowBytes, this->options().fZeroInitialized,
+ countLines, linesDecoded);
+ }
+ fCurrScanline += countLines;
+ return linesDecoded;
+}
+
+bool SkCodec::skipScanlines(int countLines) {
+ if (fCurrScanline < 0) {
+ return false;
+ }
+
+ SkASSERT(!fDstInfo.isEmpty());
+ if (countLines < 0 || fCurrScanline + countLines > fDstInfo.height()) {
+ // Arguably, we could just skip the scanlines which are remaining,
+ // and return true. We choose to return false so the client
+ // can catch their bug.
+ return false;
+ }
+
+ bool result = this->onSkipScanlines(countLines);
+ fCurrScanline += countLines;
+ return result;
+}
+
+int SkCodec::outputScanline(int inputScanline) const {
+ SkASSERT(0 <= inputScanline && inputScanline < fEncodedInfo.height());
+ return this->onOutputScanline(inputScanline);
+}
+
+int SkCodec::onOutputScanline(int inputScanline) const {
+ switch (this->getScanlineOrder()) {
+ case kTopDown_SkScanlineOrder:
+ return inputScanline;
+ case kBottomUp_SkScanlineOrder:
+ return fEncodedInfo.height() - inputScanline - 1;
+ default:
+ // This case indicates an interlaced gif and is implemented by SkGifCodec.
+ SkASSERT(false);
+ return 0;
+ }
+}
+
+void SkCodec::fillIncompleteImage(const SkImageInfo& info, void* dst, size_t rowBytes,
+ ZeroInitialized zeroInit, int linesRequested, int linesDecoded) {
+ if (kYes_ZeroInitialized == zeroInit) {
+ return;
+ }
+
+ const int linesRemaining = linesRequested - linesDecoded;
+ SkSampler* sampler = this->getSampler(false);
+
+ const int fillWidth = sampler ? sampler->fillWidth() :
+ fOptions.fSubset ? fOptions.fSubset->width() :
+ info.width() ;
+ void* fillDst = this->getScanlineOrder() == kBottomUp_SkScanlineOrder ? dst :
+ SkTAddOffset<void>(dst, linesDecoded * rowBytes);
+ const auto fillInfo = info.makeWH(fillWidth, linesRemaining);
+ SkSampler::Fill(fillInfo, fillDst, rowBytes, kNo_ZeroInitialized);
+}
+
+bool sk_select_xform_format(SkColorType colorType, bool forColorTable,
+ skcms_PixelFormat* outFormat) {
+ SkASSERT(outFormat);
+
+ switch (colorType) {
+ case kRGBA_8888_SkColorType:
+ *outFormat = skcms_PixelFormat_RGBA_8888;
+ break;
+ case kBGRA_8888_SkColorType:
+ *outFormat = skcms_PixelFormat_BGRA_8888;
+ break;
+ case kRGB_565_SkColorType:
+ if (forColorTable) {
+#ifdef SK_PMCOLOR_IS_RGBA
+ *outFormat = skcms_PixelFormat_RGBA_8888;
+#else
+ *outFormat = skcms_PixelFormat_BGRA_8888;
+#endif
+ break;
+ }
+ *outFormat = skcms_PixelFormat_BGR_565;
+ break;
+ case kRGBA_F16_SkColorType:
+ *outFormat = skcms_PixelFormat_RGBA_hhhh;
+ break;
+ case kBGR_101010x_XR_SkColorType:
+ *outFormat = skcms_PixelFormat_BGR_101010x_XR;
+ break;
+ case kGray_8_SkColorType:
+ *outFormat = skcms_PixelFormat_G_8;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+bool SkCodec::initializeColorXform(const SkImageInfo& dstInfo, SkEncodedInfo::Alpha encodedAlpha,
+ bool srcIsOpaque) {
+ fXformTime = kNo_XformTime;
+ bool needsColorXform = false;
+ if (this->usesColorXform()) {
+ if (kRGBA_F16_SkColorType == dstInfo.colorType() ||
+ kBGR_101010x_XR_SkColorType == dstInfo.colorType()) {
+ needsColorXform = true;
+ if (dstInfo.colorSpace()) {
+ dstInfo.colorSpace()->toProfile(&fDstProfile);
+ } else {
+ // Use the srcProfile to avoid conversion.
+ const auto* srcProfile = fEncodedInfo.profile();
+ fDstProfile = srcProfile ? *srcProfile : *skcms_sRGB_profile();
+ }
+ } else if (dstInfo.colorSpace()) {
+ dstInfo.colorSpace()->toProfile(&fDstProfile);
+ const auto* srcProfile = fEncodedInfo.profile();
+ if (!srcProfile) {
+ srcProfile = skcms_sRGB_profile();
+ }
+ if (!skcms_ApproximatelyEqualProfiles(srcProfile, &fDstProfile) ) {
+ needsColorXform = true;
+ }
+ }
+ }
+
+ if (!this->conversionSupported(dstInfo, srcIsOpaque, needsColorXform)) {
+ return false;
+ }
+
+ if (needsColorXform) {
+ fXformTime = SkEncodedInfo::kPalette_Color != fEncodedInfo.color()
+ || kRGBA_F16_SkColorType == dstInfo.colorType()
+ ? kDecodeRow_XformTime : kPalette_XformTime;
+ if (!sk_select_xform_format(dstInfo.colorType(), fXformTime == kPalette_XformTime,
+ &fDstXformFormat)) {
+ return false;
+ }
+ if (encodedAlpha == SkEncodedInfo::kUnpremul_Alpha
+ && dstInfo.alphaType() == kPremul_SkAlphaType) {
+ fDstXformAlphaFormat = skcms_AlphaFormat_PremulAsEncoded;
+ } else {
+ fDstXformAlphaFormat = skcms_AlphaFormat_Unpremul;
+ }
+ }
+ return true;
+}
+
+void SkCodec::applyColorXform(void* dst, const void* src, int count) const {
+ // It is okay for srcProfile to be null. This will use sRGB.
+ const auto* srcProfile = fEncodedInfo.profile();
+ SkAssertResult(skcms_Transform(src, fSrcXformFormat, skcms_AlphaFormat_Unpremul, srcProfile,
+ dst, fDstXformFormat, fDstXformAlphaFormat, &fDstProfile,
+ count));
+}
+
+std::vector<SkCodec::FrameInfo> SkCodec::getFrameInfo() {
+ const int frameCount = this->getFrameCount();
+ SkASSERT(frameCount >= 0);
+ if (frameCount <= 0) {
+ return std::vector<FrameInfo>{};
+ }
+
+ if (frameCount == 1 && !this->onGetFrameInfo(0, nullptr)) {
+ // Not animated.
+ return std::vector<FrameInfo>{};
+ }
+
+ std::vector<FrameInfo> result(frameCount);
+ for (int i = 0; i < frameCount; ++i) {
+ SkAssertResult(this->onGetFrameInfo(i, &result[i]));
+ }
+ return result;
+}
+
+const char* SkCodec::ResultToString(Result result) {
+ switch (result) {
+ case kSuccess:
+ return "success";
+ case kIncompleteInput:
+ return "incomplete input";
+ case kErrorInInput:
+ return "error in input";
+ case kInvalidConversion:
+ return "invalid conversion";
+ case kInvalidScale:
+ return "invalid scale";
+ case kInvalidParameters:
+ return "invalid parameters";
+ case kInvalidInput:
+ return "invalid input";
+ case kCouldNotRewind:
+ return "could not rewind";
+ case kInternalError:
+ return "internal error";
+ case kUnimplemented:
+ return "unimplemented";
+ default:
+ SkASSERT(false);
+ return "bogus result value";
+ }
+}
+
+void SkFrame::fillIn(SkCodec::FrameInfo* frameInfo, bool fullyReceived) const {
+ SkASSERT(frameInfo);
+
+ frameInfo->fRequiredFrame = fRequiredFrame;
+ frameInfo->fDuration = fDuration;
+ frameInfo->fFullyReceived = fullyReceived;
+ frameInfo->fAlphaType = fHasAlpha ? kUnpremul_SkAlphaType
+ : kOpaque_SkAlphaType;
+ frameInfo->fHasAlphaWithinBounds = this->reportedAlpha() != SkEncodedInfo::kOpaque_Alpha;
+ frameInfo->fDisposalMethod = fDisposalMethod;
+ frameInfo->fBlend = fBlend;
+ frameInfo->fFrameRect = fRect;
+}
+
+static bool independent(const SkFrame& frame) {
+ return frame.getRequiredFrame() == SkCodec::kNoFrame;
+}
+
+static bool restore_bg(const SkFrame& frame) {
+ return frame.getDisposalMethod() == SkCodecAnimation::DisposalMethod::kRestoreBGColor;
+}
+
+// As its name suggests, this method computes a frame's alpha (e.g. completely
+// opaque, unpremul, binary) and its required frame (a preceding frame that
+// this frame depends on, to draw the complete image at this frame's point in
+// the animation stream), and calls this frame's setter methods with that
+// computed information.
+//
+// A required frame of kNoFrame means that this frame is independent: drawing
+// the complete image at this frame's point in the animation stream does not
+// require first preparing the pixel buffer based on another frame. Instead,
+// drawing can start from an uninitialized pixel buffer.
+//
+// "Uninitialized" is from the SkCodec's caller's point of view. In the SkCodec
+// implementation, for independent frames, first party Skia code (in src/codec)
+// will typically fill the buffer with a uniform background color (e.g.
+// transparent black) before calling into third party codec-specific code (e.g.
+// libjpeg or libpng). Pixels outside of the frame's rect will remain this
+// background color after drawing this frame. For incomplete decodes, pixels
+// inside that rect may be (at least temporarily) set to that background color.
+// In an incremental decode, later passes may then overwrite that background
+// color.
+//
+// Determining kNoFrame or otherwise involves testing a number of conditions
+// sequentially. The first satisfied condition results in setting the required
+// frame to kNoFrame (an "INDx" condition) or to a non-negative frame number (a
+// "DEPx" condition), and the function returning early. Those "INDx" and "DEPx"
+// labels also map to comments in the function body.
+//
+// - IND1: this frame is the first frame.
+// - IND2: this frame fills out the whole image, and it is completely opaque
+// or it overwrites (not blends with) the previous frame.
+// - IND3: all preceding frames' disposals are kRestorePrevious.
+// - IND4: the prevFrame's disposal is kRestoreBGColor, and it fills out the
+// whole image or it is itself otherwise independent.
+// - DEP5: this frame reports alpha (it is not completely opaque) and it
+// blends with (not overwrites) the previous frame.
+// - IND6: this frame's rect covers the rects of all preceding frames back to
+// and including the most recent independent frame before this frame.
+// - DEP7: unconditional.
+//
+// The "prevFrame" variable initially points to the previous frame (also known
+// as the prior frame), but that variable may iterate further backwards over
+// the course of this computation.
+void SkFrameHolder::setAlphaAndRequiredFrame(SkFrame* frame) {
+ const bool reportsAlpha = frame->reportedAlpha() != SkEncodedInfo::kOpaque_Alpha;
+ const auto screenRect = SkIRect::MakeWH(fScreenWidth, fScreenHeight);
+ const auto frameRect = frame_rect_on_screen(frame->frameRect(), screenRect);
+
+ const int i = frame->frameId();
+ if (0 == i) {
+ frame->setHasAlpha(reportsAlpha || frameRect != screenRect);
+ frame->setRequiredFrame(SkCodec::kNoFrame); // IND1
+ return;
+ }
+
+
+ const bool blendWithPrevFrame = frame->getBlend() == SkCodecAnimation::Blend::kSrcOver;
+ if ((!reportsAlpha || !blendWithPrevFrame) && frameRect == screenRect) {
+ frame->setHasAlpha(reportsAlpha);
+ frame->setRequiredFrame(SkCodec::kNoFrame); // IND2
+ return;
+ }
+
+ const SkFrame* prevFrame = this->getFrame(i-1);
+ while (prevFrame->getDisposalMethod() == SkCodecAnimation::DisposalMethod::kRestorePrevious) {
+ const int prevId = prevFrame->frameId();
+ if (0 == prevId) {
+ frame->setHasAlpha(true);
+ frame->setRequiredFrame(SkCodec::kNoFrame); // IND3
+ return;
+ }
+
+ prevFrame = this->getFrame(prevId - 1);
+ }
+
+ const bool clearPrevFrame = restore_bg(*prevFrame);
+ auto prevFrameRect = frame_rect_on_screen(prevFrame->frameRect(), screenRect);
+
+ if (clearPrevFrame) {
+ if (prevFrameRect == screenRect || independent(*prevFrame)) {
+ frame->setHasAlpha(true);
+ frame->setRequiredFrame(SkCodec::kNoFrame); // IND4
+ return;
+ }
+ }
+
+ if (reportsAlpha && blendWithPrevFrame) {
+ // Note: We could be more aggressive here. If prevFrame clears
+ // to background color and covers its required frame (and that
+ // frame is independent), prevFrame could be marked independent.
+ // Would this extra complexity be worth it?
+ frame->setRequiredFrame(prevFrame->frameId()); // DEP5
+ frame->setHasAlpha(prevFrame->hasAlpha() || clearPrevFrame);
+ return;
+ }
+
+ while (frameRect.contains(prevFrameRect)) {
+ const int prevRequiredFrame = prevFrame->getRequiredFrame();
+ if (prevRequiredFrame == SkCodec::kNoFrame) {
+ frame->setRequiredFrame(SkCodec::kNoFrame); // IND6
+ frame->setHasAlpha(true);
+ return;
+ }
+
+ prevFrame = this->getFrame(prevRequiredFrame);
+ prevFrameRect = frame_rect_on_screen(prevFrame->frameRect(), screenRect);
+ }
+
+ frame->setRequiredFrame(prevFrame->frameId()); // DEP7
+ if (restore_bg(*prevFrame)) {
+ frame->setHasAlpha(true);
+ return;
+ }
+ SkASSERT(prevFrame->getDisposalMethod() == SkCodecAnimation::DisposalMethod::kKeep);
+ frame->setHasAlpha(prevFrame->hasAlpha() || (reportsAlpha && !blendWithPrevFrame));
+}
+
diff --git a/gfx/skia/skia/src/codec/SkCodecImageGenerator.cpp b/gfx/skia/skia/src/codec/SkCodecImageGenerator.cpp
new file mode 100644
index 0000000000..5df8729148
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkCodecImageGenerator.cpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/codec/SkCodecImageGenerator.h"
+
+#include "include/codec/SkEncodedOrigin.h"
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkTypes.h"
+#include "src/codec/SkPixmapUtils.h"
+
+#include <utility>
+
+
+std::unique_ptr<SkImageGenerator> SkCodecImageGenerator::MakeFromEncodedCodec(
+ sk_sp<SkData> data, std::optional<SkAlphaType> at) {
+ auto codec = SkCodec::MakeFromData(data);
+ if (nullptr == codec) {
+ return nullptr;
+ }
+
+ return std::unique_ptr<SkImageGenerator>(new SkCodecImageGenerator(std::move(codec), data, at));
+}
+
+std::unique_ptr<SkImageGenerator> SkCodecImageGenerator::MakeFromCodec(
+ std::unique_ptr<SkCodec> codec) {
+ return codec ? std::unique_ptr<SkImageGenerator>(
+ new SkCodecImageGenerator(std::move(codec), nullptr, std::nullopt))
+ : nullptr;
+}
+
+static SkImageInfo adjust_info(SkCodec* codec, std::optional<SkAlphaType> at) {
+ SkASSERT(at != kOpaque_SkAlphaType);
+ SkImageInfo info = codec->getInfo();
+ if (at.has_value()) {
+ // If a specific alpha type was requested, use that.
+ info = info.makeAlphaType(*at);
+ } else if (kUnpremul_SkAlphaType == info.alphaType()) {
+ // Otherwise, prefer premul over unpremul (this produces better filtering in general)
+ info = info.makeAlphaType(kPremul_SkAlphaType);
+ }
+ if (SkEncodedOriginSwapsWidthHeight(codec->getOrigin())) {
+ info = SkPixmapUtils::SwapWidthHeight(info);
+ }
+ return info;
+}
+
+SkCodecImageGenerator::SkCodecImageGenerator(std::unique_ptr<SkCodec> codec,
+ sk_sp<SkData> data,
+ std::optional<SkAlphaType> at)
+ : INHERITED(adjust_info(codec.get(), at))
+ , fCodec(std::move(codec))
+ , fData(std::move(data)) {}
+
+sk_sp<SkData> SkCodecImageGenerator::onRefEncodedData() {
+ return fData;
+}
+
+bool SkCodecImageGenerator::getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes, const SkCodec::Options* options) {
+ SkPixmap dst(info, pixels, rowBytes);
+
+ auto decode = [this, options](const SkPixmap& pm) {
+ SkCodec::Result result = fCodec->getPixels(pm, options);
+ switch (result) {
+ case SkCodec::kSuccess:
+ case SkCodec::kIncompleteInput:
+ case SkCodec::kErrorInInput:
+ return true;
+ default:
+ return false;
+ }
+ };
+
+ return SkPixmapUtils::Orient(dst, fCodec->getOrigin(), decode);
+}
+
+bool SkCodecImageGenerator::onGetPixels(const SkImageInfo& requestInfo, void* requestPixels,
+ size_t requestRowBytes, const Options& options) {
+ return this->getPixels(requestInfo, requestPixels, requestRowBytes, nullptr);
+}
+
+bool SkCodecImageGenerator::onQueryYUVAInfo(
+ const SkYUVAPixmapInfo::SupportedDataTypes& supportedDataTypes,
+ SkYUVAPixmapInfo* yuvaPixmapInfo) const {
+ return fCodec->queryYUVAInfo(supportedDataTypes, yuvaPixmapInfo);
+}
+
+bool SkCodecImageGenerator::onGetYUVAPlanes(const SkYUVAPixmaps& yuvaPixmaps) {
+ switch (fCodec->getYUVAPlanes(yuvaPixmaps)) {
+ case SkCodec::kSuccess:
+ case SkCodec::kIncompleteInput:
+ case SkCodec::kErrorInInput:
+ return true;
+ default:
+ return false;
+ }
+}
+
+SkISize SkCodecImageGenerator::getScaledDimensions(float desiredScale) const {
+ SkISize size = fCodec->getScaledDimensions(desiredScale);
+ if (SkEncodedOriginSwapsWidthHeight(fCodec->getOrigin())) {
+ std::swap(size.fWidth, size.fHeight);
+ }
+ return size;
+}
diff --git a/gfx/skia/skia/src/codec/SkCodecImageGenerator.h b/gfx/skia/skia/src/codec/SkCodecImageGenerator.h
new file mode 100644
index 0000000000..5823ebd9cd
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkCodecImageGenerator.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkCodecImageGenerator_DEFINED
+#define SkCodecImageGenerator_DEFINED
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImageGenerator.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkYUVAPixmaps.h"
+
+#include <cstddef>
+#include <memory>
+#include <optional>
+
+enum SkAlphaType : int;
+struct SkImageInfo;
+
+class SkCodecImageGenerator : public SkImageGenerator {
+public:
+ /*
+ * If this data represents an encoded image that we know how to decode,
+ * return an SkCodecImageGenerator. Otherwise return nullptr.
+ */
+ static std::unique_ptr<SkImageGenerator> MakeFromEncodedCodec(
+ sk_sp<SkData>, std::optional<SkAlphaType> = std::nullopt);
+
+ static std::unique_ptr<SkImageGenerator> MakeFromCodec(std::unique_ptr<SkCodec>);
+
+ /**
+ * Return a size that approximately supports the desired scale factor. The codec may not be able
+ * to scale efficiently to the exact scale factor requested, so return a size that approximates
+ * that scale. The returned value is the codec's suggestion for the closest valid scale that it
+ * can natively support.
+ *
+ * This is similar to SkCodec::getScaledDimensions, but adjusts the returned dimensions based
+ * on the image's EXIF orientation.
+ */
+ SkISize getScaledDimensions(float desiredScale) const;
+
+ /**
+ * Decode into the given pixels, a block of memory of size at
+ * least (info.fHeight - 1) * rowBytes + (info.fWidth *
+ * bytesPerPixel)
+ *
+ * Repeated calls to this function should give the same results,
+ * allowing the PixelRef to be immutable.
+ *
+ * @param info A description of the format
+ * expected by the caller. This can simply be identical
+ * to the info returned by getInfo().
+ *
+ * This contract also allows the caller to specify
+ * different output-configs, which the implementation can
+ * decide to support or not.
+ *
+ * A size that does not match getInfo() implies a request
+ * to scale. If the generator cannot perform this scale,
+ * it will return false.
+ *
+ * @return true on success.
+ */
+ bool getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes, const SkCodec::Options* options = nullptr);
+
+ /**
+ * Return the number of frames in the image.
+ *
+ * May require reading through the stream.
+ */
+ int getFrameCount() { return fCodec->getFrameCount(); }
+
+ /**
+ * Return info about a single frame.
+ *
+ * Only supported by multi-frame images. Does not read through the stream,
+ * so it should be called after getFrameCount() to parse any frames that
+ * have not already been parsed.
+ */
+ bool getFrameInfo(int index, SkCodec::FrameInfo* info) const {
+ return fCodec->getFrameInfo(index, info);
+ }
+
+ /**
+ * Return the number of times to repeat, if this image is animated. This number does not
+ * include the first play through of each frame. For example, a repetition count of 4 means
+ * that each frame is played 5 times and then the animation stops.
+ *
+ * It can return kRepetitionCountInfinite, a negative number, meaning that the animation
+ * should loop forever.
+ *
+ * May require reading the stream to find the repetition count.
+ *
+ * As such, future decoding calls may require a rewind.
+ *
+ * For still (non-animated) image codecs, this will return 0.
+ */
+ int getRepetitionCount() { return fCodec->getRepetitionCount(); }
+
+protected:
+ sk_sp<SkData> onRefEncodedData() override;
+
+ bool onGetPixels(const SkImageInfo& info,
+ void* pixels,
+ size_t rowBytes,
+ const Options& opts) override;
+
+ bool onQueryYUVAInfo(const SkYUVAPixmapInfo::SupportedDataTypes&,
+ SkYUVAPixmapInfo*) const override;
+
+ bool onGetYUVAPlanes(const SkYUVAPixmaps& yuvaPixmaps) override;
+
+private:
+ /*
+ * Takes ownership of codec
+ */
+ SkCodecImageGenerator(std::unique_ptr<SkCodec>, sk_sp<SkData>, std::optional<SkAlphaType>);
+
+ std::unique_ptr<SkCodec> fCodec;
+ sk_sp<SkData> fData;
+
+ using INHERITED = SkImageGenerator;
+};
+#endif // SkCodecImageGenerator_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkCodecPriv.h b/gfx/skia/skia/src/codec/SkCodecPriv.h
new file mode 100644
index 0000000000..8d05f11e91
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkCodecPriv.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCodecPriv_DEFINED
+#define SkCodecPriv_DEFINED
+
+#include "include/codec/SkEncodedOrigin.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkEncodedInfo.h"
+#include "src/codec/SkColorTable.h"
+#include "src/base/SkEndian.h"
+
+#ifdef SK_PRINT_CODEC_MESSAGES
+ #define SkCodecPrintf SkDebugf
+#else
+ #define SkCodecPrintf(...)
+#endif
+
+// Defined in SkCodec.cpp
+bool sk_select_xform_format(SkColorType colorType, bool forColorTable,
+ skcms_PixelFormat* outFormat);
+
+// FIXME: Consider sharing with dm, nanbench, and tools.
+static inline float get_scale_from_sample_size(int sampleSize) {
+ return 1.0f / ((float) sampleSize);
+}
+
+static inline bool is_valid_subset(const SkIRect& subset, const SkISize& imageDims) {
+ return SkIRect::MakeSize(imageDims).contains(subset);
+}
+
+/*
+ * returns a scaled dimension based on the original dimension and the sampleSize
+ * NOTE: we round down here for scaled dimension to match the behavior of SkImageDecoder
+ * FIXME: I think we should call this get_sampled_dimension().
+ */
+static inline int get_scaled_dimension(int srcDimension, int sampleSize) {
+ if (sampleSize > srcDimension) {
+ return 1;
+ }
+ return srcDimension / sampleSize;
+}
+
+/*
+ * Returns the first coordinate that we will keep during a scaled decode.
+ * The output can be interpreted as an x-coordinate or a y-coordinate.
+ *
+ * This does not need to be called and is not called when sampleFactor == 1.
+ */
+static inline int get_start_coord(int sampleFactor) { return sampleFactor / 2; }
+
+/*
+ * Given a coordinate in the original image, this returns the corresponding
+ * coordinate in the scaled image. This function is meaningless if
+ * IsCoordNecessary returns false.
+ * The output can be interpreted as an x-coordinate or a y-coordinate.
+ *
+ * This does not need to be called and is not called when sampleFactor == 1.
+ */
+static inline int get_dst_coord(int srcCoord, int sampleFactor) { return srcCoord / sampleFactor; }
+
+/*
+ * When scaling, we will discard certain y-coordinates (rows) and
+ * x-coordinates (columns). This function returns true if we should keep the
+ * coordinate and false otherwise.
+ * The inputs may be x-coordinates or y-coordinates.
+ *
+ * This does not need to be called and is not called when sampleFactor == 1.
+ */
+static inline bool is_coord_necessary(int srcCoord, int sampleFactor, int scaledDim) {
+ // Get the first coordinate that we want to keep
+ int startCoord = get_start_coord(sampleFactor);
+
+ // Return false on edge cases
+ if (srcCoord < startCoord || get_dst_coord(srcCoord, sampleFactor) >= scaledDim) {
+ return false;
+ }
+
+ // Every sampleFactor rows are necessary
+ return ((srcCoord - startCoord) % sampleFactor) == 0;
+}
+
+static inline bool valid_alpha(SkAlphaType dstAlpha, bool srcIsOpaque) {
+ if (kUnknown_SkAlphaType == dstAlpha) {
+ return false;
+ }
+
+ if (srcIsOpaque) {
+ if (kOpaque_SkAlphaType != dstAlpha) {
+ SkCodecPrintf("Warning: an opaque image should be decoded as opaque "
+ "- it is being decoded as non-opaque, which will draw slower\n");
+ }
+ return true;
+ }
+
+ return dstAlpha != kOpaque_SkAlphaType;
+}
+
+/*
+ * If there is a color table, get a pointer to the colors, otherwise return nullptr
+ */
+static inline const SkPMColor* get_color_ptr(SkColorTable* colorTable) {
+ return nullptr != colorTable ? colorTable->readColors() : nullptr;
+}
+
+/*
+ * Compute row bytes for an image using pixels per byte
+ */
+static inline size_t compute_row_bytes_ppb(int width, uint32_t pixelsPerByte) {
+ return (width + pixelsPerByte - 1) / pixelsPerByte;
+}
+
+/*
+ * Compute row bytes for an image using bytes per pixel
+ */
+static inline size_t compute_row_bytes_bpp(int width, uint32_t bytesPerPixel) {
+ return width * bytesPerPixel;
+}
+
+/*
+ * Compute row bytes for an image
+ */
+static inline size_t compute_row_bytes(int width, uint32_t bitsPerPixel) {
+ if (bitsPerPixel < 16) {
+ SkASSERT(0 == 8 % bitsPerPixel);
+ const uint32_t pixelsPerByte = 8 / bitsPerPixel;
+ return compute_row_bytes_ppb(width, pixelsPerByte);
+ } else {
+ SkASSERT(0 == bitsPerPixel % 8);
+ const uint32_t bytesPerPixel = bitsPerPixel / 8;
+ return compute_row_bytes_bpp(width, bytesPerPixel);
+ }
+}
+
+/*
+ * Get a byte from a buffer
+ * This method is unsafe, the caller is responsible for performing a check
+ */
+static inline uint8_t get_byte(const uint8_t* buffer, uint32_t i) {
+ return buffer[i];
+}
+
+/*
+ * Get a short from a buffer
+ * This method is unsafe, the caller is responsible for performing a check
+ */
+static inline uint16_t get_short(const uint8_t* buffer, uint32_t i) {
+ uint16_t result;
+ memcpy(&result, &(buffer[i]), 2);
+#ifdef SK_CPU_BENDIAN
+ return SkEndianSwap16(result);
+#else
+ return result;
+#endif
+}
+
+/*
+ * Get an int from a buffer
+ * This method is unsafe, the caller is responsible for performing a check
+ */
+static inline uint32_t get_int(const uint8_t* buffer, uint32_t i) {
+ uint32_t result;
+ memcpy(&result, &(buffer[i]), 4);
+#ifdef SK_CPU_BENDIAN
+ return SkEndianSwap32(result);
+#else
+ return result;
+#endif
+}
+
+/*
+ * @param data Buffer to read bytes from
+ * @param isLittleEndian Output parameter
+ * Indicates if the data is little endian
+ * Is unaffected on false returns
+ */
+static inline bool is_valid_endian_marker(const uint8_t* data, bool* isLittleEndian) {
+ // II indicates Intel (little endian) and MM indicates motorola (big endian).
+ if (('I' != data[0] || 'I' != data[1]) && ('M' != data[0] || 'M' != data[1])) {
+ return false;
+ }
+
+ *isLittleEndian = ('I' == data[0]);
+ return true;
+}
+
+static inline uint16_t get_endian_short(const uint8_t* data, bool littleEndian) {
+ if (littleEndian) {
+ return (data[1] << 8) | (data[0]);
+ }
+
+ return (data[0] << 8) | (data[1]);
+}
+
+static inline uint32_t get_endian_int(const uint8_t* data, bool littleEndian) {
+ if (littleEndian) {
+ return (data[3] << 24) | (data[2] << 16) | (data[1] << 8) | (data[0]);
+ }
+
+ return (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | (data[3]);
+}
+
+static inline SkPMColor premultiply_argb_as_rgba(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ if (a != 255) {
+ r = SkMulDiv255Round(r, a);
+ g = SkMulDiv255Round(g, a);
+ b = SkMulDiv255Round(b, a);
+ }
+
+ return SkPackARGB_as_RGBA(a, r, g, b);
+}
+
+static inline SkPMColor premultiply_argb_as_bgra(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ if (a != 255) {
+ r = SkMulDiv255Round(r, a);
+ g = SkMulDiv255Round(g, a);
+ b = SkMulDiv255Round(b, a);
+ }
+
+ return SkPackARGB_as_BGRA(a, r, g, b);
+}
+
+static inline bool is_rgba(SkColorType colorType) {
+#ifdef SK_PMCOLOR_IS_RGBA
+ return (kBGRA_8888_SkColorType != colorType);
+#else
+ return (kRGBA_8888_SkColorType == colorType);
+#endif
+}
+
+// Method for coverting to a 32 bit pixel.
+typedef uint32_t (*PackColorProc)(U8CPU a, U8CPU r, U8CPU g, U8CPU b);
+
+static inline PackColorProc choose_pack_color_proc(bool isPremul, SkColorType colorType) {
+ bool isRGBA = is_rgba(colorType);
+ if (isPremul) {
+ if (isRGBA) {
+ return &premultiply_argb_as_rgba;
+ } else {
+ return &premultiply_argb_as_bgra;
+ }
+ } else {
+ if (isRGBA) {
+ return &SkPackARGB_as_RGBA;
+ } else {
+ return &SkPackARGB_as_BGRA;
+ }
+ }
+}
+
+bool is_orientation_marker(const uint8_t* data, size_t data_length, SkEncodedOrigin* orientation);
+
+#endif // SkCodecPriv_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkColorTable.cpp b/gfx/skia/skia/src/codec/SkColorTable.cpp
new file mode 100644
index 0000000000..5c700689e5
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkColorTable.cpp
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/codec/SkColorTable.h"
+
+#include "include/private/base/SkMalloc.h"
+
+#include <cstring>
+
+SkColorTable::SkColorTable(const SkPMColor colors[], int count) {
+ SkASSERT(0 == count || colors);
+ SkASSERT(count >= 0 && count <= 256);
+
+ fCount = count;
+ fColors = reinterpret_cast<SkPMColor*>(sk_malloc_throw(count * sizeof(SkPMColor)));
+
+ memcpy(fColors, colors, count * sizeof(SkPMColor));
+}
+
+SkColorTable::~SkColorTable() {
+ sk_free(fColors);
+}
diff --git a/gfx/skia/skia/src/codec/SkColorTable.h b/gfx/skia/skia/src/codec/SkColorTable.h
new file mode 100644
index 0000000000..b2f4f3f66d
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkColorTable.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorTable_DEFINED
+#define SkColorTable_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+/** \class SkColorTable
+
+ SkColorTable holds an array SkPMColors (premultiplied 32-bit colors) used by
+ 8-bit bitmaps, where the bitmap bytes are interpreted as indices into the colortable.
+
+ SkColorTable is thread-safe.
+*/
+class SkColorTable : public SkRefCnt {
+public:
+ /** Copy up to 256 colors into a new SkColorTable.
+ */
+ SkColorTable(const SkPMColor colors[], int count);
+ ~SkColorTable() override;
+
+ /** Returns the number of colors in the table.
+ */
+ int count() const { return fCount; }
+
+ /** Returns the specified color from the table. In the debug build, this asserts that
+ * the index is in range (0 <= index < count).
+ */
+ SkPMColor operator[](int index) const {
+ SkASSERT(fColors != nullptr && (unsigned)index < (unsigned)fCount);
+ return fColors[index];
+ }
+
+ /** Return the array of colors for reading. */
+ const SkPMColor* readColors() const { return fColors; }
+
+private:
+ SkPMColor* fColors;
+ int fCount;
+
+ using INHERITED = SkRefCnt;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/codec/SkEncodedInfo.cpp b/gfx/skia/skia/src/codec/SkEncodedInfo.cpp
new file mode 100644
index 0000000000..56f1a0259d
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkEncodedInfo.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkEncodedInfo.h"
+
+#include "modules/skcms/skcms.h"
+
+std::unique_ptr<SkEncodedInfo::ICCProfile> SkEncodedInfo::ICCProfile::Make(sk_sp<SkData> data) {
+ if (data) {
+ skcms_ICCProfile profile;
+ if (skcms_Parse(data->data(), data->size(), &profile)) {
+ return std::unique_ptr<ICCProfile>(new ICCProfile(profile, std::move(data)));
+ }
+ }
+ return nullptr;
+}
+
+std::unique_ptr<SkEncodedInfo::ICCProfile> SkEncodedInfo::ICCProfile::Make(
+ const skcms_ICCProfile& profile) {
+ return std::unique_ptr<ICCProfile>(new ICCProfile(profile));
+}
+
+SkEncodedInfo::ICCProfile::ICCProfile(const skcms_ICCProfile& profile, sk_sp<SkData> data)
+ : fProfile(profile)
+ , fData(std::move(data))
+{}
diff --git a/gfx/skia/skia/src/codec/SkFrameHolder.h b/gfx/skia/skia/src/codec/SkFrameHolder.h
new file mode 100644
index 0000000000..5facbd872a
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkFrameHolder.h
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFrameHolder_DEFINED
+#define SkFrameHolder_DEFINED
+
+#include "include/codec/SkCodec.h"
+#include "include/codec/SkCodecAnimation.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkEncodedInfo.h"
+#include "include/private/base/SkNoncopyable.h"
+
+/**
+ * Base class for a single frame of an animated image.
+ *
+ * Separate from SkCodec::FrameInfo, which is a pared down
+ * interface that only contains the info the client needs.
+ */
+class SkFrame : public SkNoncopyable {
+public:
+ SkFrame(int id)
+ : fId(id)
+ , fHasAlpha(false)
+ , fRequiredFrame(kUninitialized)
+ , fDisposalMethod(SkCodecAnimation::DisposalMethod::kKeep)
+ , fDuration(0)
+ , fBlend(SkCodecAnimation::Blend::kSrcOver)
+ {
+ fRect.setEmpty();
+ }
+
+ virtual ~SkFrame() {}
+
+ /**
+ * An explicit move constructor, as
+ * https://en.cppreference.com/w/cpp/language/move_constructor says that
+ * there is no implicit move constructor if there are user-declared
+ * destructors, and we have one, immediately above.
+ *
+ * Without a move constructor, it is harder to use an SkFrame, or an
+ * SkFrame subclass, inside a std::vector.
+ */
+ SkFrame(SkFrame&&) = default;
+
+ /**
+ * 0-based index of the frame in the image sequence.
+ */
+ int frameId() const { return fId; }
+
+ /**
+ * How this frame reports its alpha.
+ *
+ * This only considers the rectangle of this frame, and
+ * considers it to have alpha even if it is opaque once
+ * blended with the frame behind it.
+ */
+ SkEncodedInfo::Alpha reportedAlpha() const {
+ return this->onReportedAlpha();
+ }
+
+ /**
+ * Cached value representing whether the frame has alpha,
+ * after compositing with the prior frame.
+ */
+ bool hasAlpha() const { return fHasAlpha; }
+
+ /**
+ * Cache whether the finished frame has alpha.
+ */
+ void setHasAlpha(bool alpha) { fHasAlpha = alpha; }
+
+ /**
+ * Whether enough of the frame has been read to determine
+ * fRequiredFrame and fHasAlpha.
+ */
+ bool reachedStartOfData() const { return fRequiredFrame != kUninitialized; }
+
+ /**
+ * The frame this one depends on.
+ *
+ * Must not be called until fRequiredFrame has been set properly.
+ */
+ int getRequiredFrame() const {
+ SkASSERT(this->reachedStartOfData());
+ return fRequiredFrame;
+ }
+
+ /**
+ * Set the frame that this frame depends on.
+ */
+ void setRequiredFrame(int req) { fRequiredFrame = req; }
+
+ /**
+ * Set the rectangle that is updated by this frame.
+ */
+ void setXYWH(int x, int y, int width, int height) {
+ fRect.setXYWH(x, y, width, height);
+ }
+
+ /**
+ * The rectangle that is updated by this frame.
+ */
+ SkIRect frameRect() const { return fRect; }
+
+ int xOffset() const { return fRect.x(); }
+ int yOffset() const { return fRect.y(); }
+ int width() const { return fRect.width(); }
+ int height() const { return fRect.height(); }
+
+ SkCodecAnimation::DisposalMethod getDisposalMethod() const {
+ return fDisposalMethod;
+ }
+
+ void setDisposalMethod(SkCodecAnimation::DisposalMethod disposalMethod) {
+ fDisposalMethod = disposalMethod;
+ }
+
+ /**
+ * Set the duration (in ms) to show this frame.
+ */
+ void setDuration(int duration) {
+ fDuration = duration;
+ }
+
+ /**
+ * Duration in ms to show this frame.
+ */
+ int getDuration() const {
+ return fDuration;
+ }
+
+ void setBlend(SkCodecAnimation::Blend blend) {
+ fBlend = blend;
+ }
+
+ SkCodecAnimation::Blend getBlend() const {
+ return fBlend;
+ }
+
+ /**
+ * Fill in the FrameInfo with details from this object.
+ */
+ void fillIn(SkCodec::FrameInfo*, bool fullyReceived) const;
+
+protected:
+ virtual SkEncodedInfo::Alpha onReportedAlpha() const = 0;
+
+private:
+ inline static constexpr int kUninitialized = -2;
+
+ const int fId;
+ bool fHasAlpha;
+ int fRequiredFrame;
+ SkIRect fRect;
+ SkCodecAnimation::DisposalMethod fDisposalMethod;
+ int fDuration;
+ SkCodecAnimation::Blend fBlend;
+};
+
+/**
+ * Base class for an object which holds the SkFrames of an
+ * image sequence.
+ */
+class SkFrameHolder : public SkNoncopyable {
+public:
+ SkFrameHolder()
+ : fScreenWidth(0)
+ , fScreenHeight(0)
+ {}
+
+ virtual ~SkFrameHolder() {}
+
+ /**
+ * Size of the image. Each frame will be contained in
+ * these dimensions (possibly after clipping).
+ */
+ int screenWidth() const { return fScreenWidth; }
+ int screenHeight() const { return fScreenHeight; }
+
+ /**
+ * Compute the opacity and required frame, based on
+ * the frame's reportedAlpha and how it blends
+ * with prior frames.
+ */
+ void setAlphaAndRequiredFrame(SkFrame*);
+
+ /**
+ * Return the frame with frameId i.
+ */
+ const SkFrame* getFrame(int i) const {
+ return this->onGetFrame(i);
+ }
+
+protected:
+ int fScreenWidth;
+ int fScreenHeight;
+
+ virtual const SkFrame* onGetFrame(int i) const = 0;
+};
+
+#endif // SkFrameHolder_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkMaskSwizzler.cpp b/gfx/skia/skia/src/codec/SkMaskSwizzler.cpp
new file mode 100644
index 0000000000..4866e876a0
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkMaskSwizzler.cpp
@@ -0,0 +1,575 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/codec/SkMaskSwizzler.h"
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRect.h"
+#include "include/private/SkColorData.h"
+#include "src/codec/SkCodecPriv.h"
+#include "src/codec/SkMasks.h"
+
+static void swizzle_mask16_to_rgba_opaque(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint16_t* srcPtr = ((uint16_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint16_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPackARGB_as_RGBA(0xFF, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask16_to_bgra_opaque(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint16_t* srcPtr = ((uint16_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint16_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPackARGB_as_BGRA(0xFF, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask16_to_rgba_unpremul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint16_t* srcPtr = ((uint16_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint16_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = SkPackARGB_as_RGBA(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask16_to_bgra_unpremul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint16_t* srcPtr = ((uint16_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint16_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = SkPackARGB_as_BGRA(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask16_to_rgba_premul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint16_t* srcPtr = ((uint16_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint16_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = premultiply_argb_as_rgba(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask16_to_bgra_premul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint16_t* srcPtr = ((uint16_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint16_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = premultiply_argb_as_bgra(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+// TODO (msarett): We have promoted a two byte per pixel image to 8888, only to
+// convert it back to 565. Instead, we should swizzle to 565 directly.
+static void swizzle_mask16_to_565(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint16_t* srcPtr = ((uint16_t*) srcRow) + startX;
+ uint16_t* dstPtr = (uint16_t*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint16_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPack888ToRGB16(red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask24_to_rgba_opaque(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ srcRow += 3 * startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcRow[0] | (srcRow[1] << 8) | srcRow[2] << 16;
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPackARGB_as_RGBA(0xFF, red, green, blue);
+ srcRow += 3 * sampleX;
+ }
+}
+
+static void swizzle_mask24_to_bgra_opaque(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ srcRow += 3 * startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcRow[0] | (srcRow[1] << 8) | srcRow[2] << 16;
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPackARGB_as_BGRA(0xFF, red, green, blue);
+ srcRow += 3 * sampleX;
+ }
+}
+
+static void swizzle_mask24_to_rgba_unpremul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ srcRow += 3 * startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcRow[0] | (srcRow[1] << 8) | srcRow[2] << 16;
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = SkPackARGB_as_RGBA(alpha, red, green, blue);
+ srcRow += 3 * sampleX;
+ }
+}
+
+static void swizzle_mask24_to_bgra_unpremul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ srcRow += 3 * startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcRow[0] | (srcRow[1] << 8) | srcRow[2] << 16;
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = SkPackARGB_as_BGRA(alpha, red, green, blue);
+ srcRow += 3 * sampleX;
+ }
+}
+
+static void swizzle_mask24_to_rgba_premul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ srcRow += 3 * startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcRow[0] | (srcRow[1] << 8) | srcRow[2] << 16;
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = premultiply_argb_as_rgba(alpha, red, green, blue);
+ srcRow += 3 * sampleX;
+ }
+}
+
+static void swizzle_mask24_to_bgra_premul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ srcRow += 3 * startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcRow[0] | (srcRow[1] << 8) | srcRow[2] << 16;
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = premultiply_argb_as_bgra(alpha, red, green, blue);
+ srcRow += 3 * sampleX;
+ }
+}
+
+static void swizzle_mask24_to_565(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ srcRow += 3 * startX;
+ uint16_t* dstPtr = (uint16_t*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcRow[0] | (srcRow[1] << 8) | srcRow[2] << 16;
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPack888ToRGB16(red, green, blue);
+ srcRow += 3 * sampleX;
+ }
+}
+
+static void swizzle_mask32_to_rgba_opaque(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint32_t* srcPtr = ((uint32_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPackARGB_as_RGBA(0xFF, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask32_to_bgra_opaque(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint32_t* srcPtr = ((uint32_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPackARGB_as_BGRA(0xFF, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask32_to_rgba_unpremul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint32_t* srcPtr = ((uint32_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = SkPackARGB_as_RGBA(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask32_to_bgra_unpremul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint32_t* srcPtr = ((uint32_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = SkPackARGB_as_BGRA(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask32_to_rgba_premul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint32_t* srcPtr = ((uint32_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = premultiply_argb_as_rgba(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask32_to_bgra_premul(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+
+ // Use the masks to decode to the destination
+ uint32_t* srcPtr = ((uint32_t*) srcRow) + startX;
+ SkPMColor* dstPtr = (SkPMColor*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ uint8_t alpha = masks->getAlpha(p);
+ dstPtr[i] = premultiply_argb_as_bgra(alpha, red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+static void swizzle_mask32_to_565(
+ void* dstRow, const uint8_t* srcRow, int width, SkMasks* masks,
+ uint32_t startX, uint32_t sampleX) {
+ // Use the masks to decode to the destination
+ uint32_t* srcPtr = ((uint32_t*) srcRow) + startX;
+ uint16_t* dstPtr = (uint16_t*) dstRow;
+ for (int i = 0; i < width; i++) {
+ uint32_t p = srcPtr[0];
+ uint8_t red = masks->getRed(p);
+ uint8_t green = masks->getGreen(p);
+ uint8_t blue = masks->getBlue(p);
+ dstPtr[i] = SkPack888ToRGB16(red, green, blue);
+ srcPtr += sampleX;
+ }
+}
+
+/*
+ *
+ * Create a new mask swizzler
+ *
+ */
+SkMaskSwizzler* SkMaskSwizzler::CreateMaskSwizzler(const SkImageInfo& dstInfo,
+ bool srcIsOpaque, SkMasks* masks, uint32_t bitsPerPixel,
+ const SkCodec::Options& options) {
+
+ // Choose the appropriate row procedure
+ RowProc proc = nullptr;
+ switch (bitsPerPixel) {
+ case 16:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ if (srcIsOpaque) {
+ proc = &swizzle_mask16_to_rgba_opaque;
+ } else {
+ switch (dstInfo.alphaType()) {
+ case kUnpremul_SkAlphaType:
+ proc = &swizzle_mask16_to_rgba_unpremul;
+ break;
+ case kPremul_SkAlphaType:
+ proc = &swizzle_mask16_to_rgba_premul;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case kBGRA_8888_SkColorType:
+ if (srcIsOpaque) {
+ proc = &swizzle_mask16_to_bgra_opaque;
+ } else {
+ switch (dstInfo.alphaType()) {
+ case kUnpremul_SkAlphaType:
+ proc = &swizzle_mask16_to_bgra_unpremul;
+ break;
+ case kPremul_SkAlphaType:
+ proc = &swizzle_mask16_to_bgra_premul;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_mask16_to_565;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 24:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ if (srcIsOpaque) {
+ proc = &swizzle_mask24_to_rgba_opaque;
+ } else {
+ switch (dstInfo.alphaType()) {
+ case kUnpremul_SkAlphaType:
+ proc = &swizzle_mask24_to_rgba_unpremul;
+ break;
+ case kPremul_SkAlphaType:
+ proc = &swizzle_mask24_to_rgba_premul;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case kBGRA_8888_SkColorType:
+ if (srcIsOpaque) {
+ proc = &swizzle_mask24_to_bgra_opaque;
+ } else {
+ switch (dstInfo.alphaType()) {
+ case kUnpremul_SkAlphaType:
+ proc = &swizzle_mask24_to_bgra_unpremul;
+ break;
+ case kPremul_SkAlphaType:
+ proc = &swizzle_mask24_to_bgra_premul;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_mask24_to_565;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 32:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ if (srcIsOpaque) {
+ proc = &swizzle_mask32_to_rgba_opaque;
+ } else {
+ switch (dstInfo.alphaType()) {
+ case kUnpremul_SkAlphaType:
+ proc = &swizzle_mask32_to_rgba_unpremul;
+ break;
+ case kPremul_SkAlphaType:
+ proc = &swizzle_mask32_to_rgba_premul;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case kBGRA_8888_SkColorType:
+ if (srcIsOpaque) {
+ proc = &swizzle_mask32_to_bgra_opaque;
+ } else {
+ switch (dstInfo.alphaType()) {
+ case kUnpremul_SkAlphaType:
+ proc = &swizzle_mask32_to_bgra_unpremul;
+ break;
+ case kPremul_SkAlphaType:
+ proc = &swizzle_mask32_to_bgra_premul;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_mask32_to_565;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+
+ int srcOffset = 0;
+ int srcWidth = dstInfo.width();
+ if (options.fSubset) {
+ srcOffset = options.fSubset->left();
+ srcWidth = options.fSubset->width();
+ }
+
+ return new SkMaskSwizzler(masks, proc, srcOffset, srcWidth);
+}
+
+/*
+ *
+ * Constructor for mask swizzler
+ *
+ */
+SkMaskSwizzler::SkMaskSwizzler(SkMasks* masks, RowProc proc, int srcOffset, int subsetWidth)
+ : fMasks(masks)
+ , fRowProc(proc)
+ , fSubsetWidth(subsetWidth)
+ , fDstWidth(subsetWidth)
+ , fSampleX(1)
+ , fSrcOffset(srcOffset)
+ , fX0(srcOffset)
+{}
+
+int SkMaskSwizzler::onSetSampleX(int sampleX) {
+ // FIXME: Share this function with SkSwizzler?
+ SkASSERT(sampleX > 0); // Surely there is an upper limit? Should there be
+ // way to report failure?
+ fSampleX = sampleX;
+ fX0 = get_start_coord(sampleX) + fSrcOffset;
+ fDstWidth = get_scaled_dimension(fSubsetWidth, sampleX);
+
+ // check that fX0 is valid
+ SkASSERT(fX0 >= 0);
+ return fDstWidth;
+}
+
+/*
+ *
+ * Swizzle the specified row
+ *
+ */
+void SkMaskSwizzler::swizzle(void* dst, const uint8_t* SK_RESTRICT src) {
+ SkASSERT(nullptr != dst && nullptr != src);
+ fRowProc(dst, src, fDstWidth, fMasks, fX0, fSampleX);
+}
diff --git a/gfx/skia/skia/src/codec/SkMaskSwizzler.h b/gfx/skia/skia/src/codec/SkMaskSwizzler.h
new file mode 100644
index 0000000000..4cac41905c
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkMaskSwizzler.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkMaskSwizzler_DEFINED
+#define SkMaskSwizzler_DEFINED
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkTypes.h"
+#include "src/codec/SkSampler.h"
+
+#include <cstdint>
+
+class SkMasks;
+struct SkImageInfo;
+
+/*
+ *
+ * Used to swizzle images whose pixel components are extracted by bit masks
+ * Currently only used by bmp
+ *
+ */
+class SkMaskSwizzler : public SkSampler {
+public:
+
+ /*
+ * @param masks Unowned pointer to helper class
+ */
+ static SkMaskSwizzler* CreateMaskSwizzler(const SkImageInfo& dstInfo,
+ bool srcIsOpaque,
+ SkMasks* masks,
+ uint32_t bitsPerPixel,
+ const SkCodec::Options& options);
+
+ /*
+ * Swizzle a row
+ */
+ void swizzle(void* dst, const uint8_t* SK_RESTRICT src);
+
+ int fillWidth() const override {
+ return fDstWidth;
+ }
+
+ /**
+ * Returns the byte offset at which we write to destination memory, taking
+ * scaling, subsetting, and partial frames into account.
+ * A similar function exists on SkSwizzler.
+ */
+ int swizzleWidth() const { return fDstWidth; }
+
+private:
+
+ /*
+ * Row procedure used for swizzle
+ */
+ typedef void (*RowProc)(void* dstRow, const uint8_t* srcRow, int width,
+ SkMasks* masks, uint32_t startX, uint32_t sampleX);
+
+ SkMaskSwizzler(SkMasks* masks, RowProc proc, int subsetWidth, int srcOffset);
+
+ int onSetSampleX(int) override;
+
+ SkMasks* fMasks; // unowned
+ const RowProc fRowProc;
+
+ // FIXME: Can this class share more with SkSwizzler? These variables are all the same.
+ const int fSubsetWidth; // Width of the subset of source before any sampling.
+ int fDstWidth; // Width of dst, which may differ with sampling.
+ int fSampleX;
+ int fSrcOffset;
+ int fX0;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/codec/SkMasks.cpp b/gfx/skia/skia/src/codec/SkMasks.cpp
new file mode 100644
index 0000000000..c116167174
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkMasks.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/codec/SkMasks.h"
+
+#include "src/codec/SkCodecPriv.h"
+
+/*
+ *
+ * Used to convert 1-7 bit color components into 8-bit color components
+ *
+ */
+static constexpr uint8_t n_bit_to_8_bit_lookup_table[] = {
+ // 1 bit
+ 0, 255,
+ // 2 bits
+ 0, 85, 170, 255,
+ // 3 bits
+ 0, 36, 73, 109, 146, 182, 219, 255,
+ // 4 bits
+ 0, 17, 34, 51, 68, 85, 102, 119, 136, 153, 170, 187, 204, 221, 238, 255,
+ // 5 bits
+ 0, 8, 16, 25, 33, 41, 49, 58, 66, 74, 82, 90, 99, 107, 115, 123, 132, 140,
+ 148, 156, 165, 173, 181, 189, 197, 206, 214, 222, 230, 239, 247, 255,
+ // 6 bits
+ 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 45, 49, 53, 57, 61, 65, 69, 73,
+ 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 130, 134, 138,
+ 142, 146, 150, 154, 158, 162, 166, 170, 174, 178, 182, 186, 190, 194, 198,
+ 202, 206, 210, 215, 219, 223, 227, 231, 235, 239, 243, 247, 251, 255,
+ // 7 bits
+ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38,
+ 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76,
+ 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110,
+ 112, 114, 116, 118, 120, 122, 124, 126, 129, 131, 133, 135, 137, 139, 141,
+ 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171,
+ 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201,
+ 203, 205, 207, 209, 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231,
+ 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255
+};
+
+/*
+ *
+ * Convert an n bit component to an 8-bit component
+ *
+ */
+static uint8_t convert_to_8(uint8_t component, uint32_t n) {
+ if (0 == n) {
+ return 0;
+ } else if (8 > n) {
+ return n_bit_to_8_bit_lookup_table[(1 << n) - 2 + component];
+ } else {
+ SkASSERT(8 == n);
+ return component;
+ }
+}
+
+static uint8_t get_comp(uint32_t pixel, uint32_t mask, uint32_t shift,
+ uint32_t size) {
+ return convert_to_8((pixel & mask) >> shift, size);
+}
+
+/*
+ *
+ * Get a color component
+ *
+ */
+uint8_t SkMasks::getRed(uint32_t pixel) const {
+ return get_comp(pixel, fRed.mask, fRed.shift, fRed.size);
+}
+uint8_t SkMasks::getGreen(uint32_t pixel) const {
+ return get_comp(pixel, fGreen.mask, fGreen.shift, fGreen.size);
+}
+uint8_t SkMasks::getBlue(uint32_t pixel) const {
+ return get_comp(pixel, fBlue.mask, fBlue.shift, fBlue.size);
+}
+uint8_t SkMasks::getAlpha(uint32_t pixel) const {
+ return get_comp(pixel, fAlpha.mask, fAlpha.shift, fAlpha.size);
+}
+
+/*
+ *
+ * Process an input mask to obtain the necessary information
+ *
+ */
+static SkMasks::MaskInfo process_mask(uint32_t mask) {
+ // Determine properties of the mask
+ uint32_t tempMask = mask;
+ uint32_t shift = 0;
+ uint32_t size = 0;
+ if (tempMask != 0) {
+ // Count trailing zeros on masks
+ for (; (tempMask & 1) == 0; tempMask >>= 1) {
+ shift++;
+ }
+ // Count the size of the mask
+ for (; tempMask & 1; tempMask >>= 1) {
+ size++;
+ }
+ // Verify that the mask is continuous
+ if (tempMask) {
+ SkCodecPrintf("Warning: Bit mask is not continuous.\n");
+ // Finish processing the mask
+ for (; tempMask; tempMask >>= 1) {
+ size++;
+ }
+ }
+ // Truncate masks greater than 8 bits
+ if (size > 8) {
+ shift += size - 8;
+ size = 8;
+ mask &= 0xFF << shift;
+ }
+ }
+
+ return { mask, shift, size };
+}
+
+/*
+ *
+ * Create the masks object
+ *
+ */
+SkMasks* SkMasks::CreateMasks(InputMasks masks, int bytesPerPixel) {
+ SkASSERT(0 < bytesPerPixel && bytesPerPixel <= 4);
+
+ // Trim the input masks to match bytesPerPixel.
+ if (bytesPerPixel < 4) {
+ int bitsPerPixel = 8*bytesPerPixel;
+ masks.red &= (1 << bitsPerPixel) - 1;
+ masks.green &= (1 << bitsPerPixel) - 1;
+ masks.blue &= (1 << bitsPerPixel) - 1;
+ masks.alpha &= (1 << bitsPerPixel) - 1;
+ }
+
+ // Check that masks do not overlap.
+ if (((masks.red & masks.green) |
+ (masks.red & masks.blue ) |
+ (masks.red & masks.alpha) |
+ (masks.green & masks.blue ) |
+ (masks.green & masks.alpha) |
+ (masks.blue & masks.alpha) ) != 0) {
+ return nullptr;
+ }
+
+ return new SkMasks(process_mask(masks.red ),
+ process_mask(masks.green),
+ process_mask(masks.blue ),
+ process_mask(masks.alpha));
+}
+
diff --git a/gfx/skia/skia/src/codec/SkMasks.h b/gfx/skia/skia/src/codec/SkMasks.h
new file mode 100644
index 0000000000..99d1a9bed7
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkMasks.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkMasks_DEFINED
+#define SkMasks_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#include <cstdint>
+
+// Contains useful mask routines for SkMaskSwizzler
+class SkMasks {
+public:
+ //Contains all of the information for a single mask
+ struct MaskInfo {
+ uint32_t mask;
+ uint32_t shift; // To the left
+ uint32_t size; // Of mask width
+ };
+
+ constexpr SkMasks(const MaskInfo red, const MaskInfo green, const MaskInfo blue,
+ const MaskInfo alpha)
+ : fRed(red)
+ , fGreen(green)
+ , fBlue(blue)
+ , fAlpha(alpha) { }
+
+ //Input bit masks format
+ struct InputMasks {
+ uint32_t red;
+ uint32_t green;
+ uint32_t blue;
+ uint32_t alpha;
+ };
+
+ // Create the masks object
+ static SkMasks* CreateMasks(InputMasks masks, int bytesPerPixel);
+
+ // Get a color component
+ uint8_t getRed(uint32_t pixel) const;
+ uint8_t getGreen(uint32_t pixel) const;
+ uint8_t getBlue(uint32_t pixel) const;
+ uint8_t getAlpha(uint32_t pixel) const;
+
+ // Getter for the alpha mask
+ // The alpha mask may be used in other decoding modes
+ uint32_t getAlphaMask() const {
+ return fAlpha.mask;
+ }
+
+private:
+ const MaskInfo fRed;
+ const MaskInfo fGreen;
+ const MaskInfo fBlue;
+ const MaskInfo fAlpha;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/codec/SkParseEncodedOrigin.cpp b/gfx/skia/skia/src/codec/SkParseEncodedOrigin.cpp
new file mode 100644
index 0000000000..8d3d76a69f
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkParseEncodedOrigin.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/codec/SkEncodedOrigin.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTo.h"
+#include "src/codec/SkCodecPriv.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+
+static bool parse_encoded_origin(const uint8_t* exifData, size_t data_length, uint64_t offset,
+ bool littleEndian, bool is_root, SkEncodedOrigin* orientation) {
+ // Require that the marker is at least large enough to contain the number of entries.
+ if (data_length < offset + 2) {
+ return false;
+ }
+ uint32_t numEntries = get_endian_short(exifData + offset, littleEndian);
+
+ // Tag (2 bytes), Datatype (2 bytes), Number of elements (4 bytes), Data (4 bytes)
+ const uint32_t kEntrySize = 12;
+ const auto max = SkTo<uint32_t>((data_length - offset - 2) / kEntrySize);
+ numEntries = std::min(numEntries, max);
+
+ // Advance the data to the start of the entries.
+ auto data = exifData + offset + 2;
+
+ const uint16_t kOriginTag = 0x112;
+ const uint16_t kOriginType = 3;
+ const uint16_t kSubIFDOffsetTag = 0x8769;
+ const uint16_t kSubIFDOffsetType = 4;
+
+ for (uint32_t i = 0; i < numEntries; i++, data += kEntrySize) {
+ uint16_t tag = get_endian_short(data, littleEndian);
+ uint16_t type = get_endian_short(data + 2, littleEndian);
+ uint32_t count = get_endian_int(data + 4, littleEndian);
+
+ if (kOriginTag == tag && kOriginType == type && 1 == count) {
+ uint16_t val = get_endian_short(data + 8, littleEndian);
+ if (0 < val && val <= kLast_SkEncodedOrigin) {
+ *orientation = (SkEncodedOrigin)val;
+ return true;
+ }
+ } else if (kSubIFDOffsetTag == tag && kSubIFDOffsetType == type && 1 == count && is_root) {
+ uint32_t subifd = get_endian_int(data + 8, littleEndian);
+ if (0 < subifd && subifd < data_length) {
+ if (parse_encoded_origin(exifData, data_length, subifd, littleEndian, false,
+ orientation)) {
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+bool SkParseEncodedOrigin(const uint8_t* data, size_t data_length, SkEncodedOrigin* orientation) {
+ SkASSERT(orientation);
+ bool littleEndian;
+ // We need eight bytes to read the endian marker and the offset, below.
+ if (data_length < 8 || !is_valid_endian_marker(data, &littleEndian)) {
+ return false;
+ }
+
+ // Get the offset from the start of the marker.
+ // Though this only reads four bytes, use a larger int in case it overflows.
+ uint64_t offset = get_endian_int(data + 4, littleEndian);
+
+ return parse_encoded_origin(data, data_length, offset, littleEndian, true, orientation);
+}
diff --git a/gfx/skia/skia/src/codec/SkParseEncodedOrigin.h b/gfx/skia/skia/src/codec/SkParseEncodedOrigin.h
new file mode 100644
index 0000000000..4891557a19
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkParseEncodedOrigin.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkParseEncodedOrigin_DEFINED
+#define SkParseEncodedOrigin_DEFINED
+
+#include "include/codec/SkEncodedOrigin.h"
+
+/**
+ * If |data| is an EXIF tag representing an SkEncodedOrigin, return true and set |out|
+ * appropriately. Otherwise return false.
+ */
+bool SkParseEncodedOrigin(const uint8_t* data, size_t data_length, SkEncodedOrigin* out);
+
+#endif // SkParseEncodedOrigin_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkPixmapUtils.cpp b/gfx/skia/skia/src/codec/SkPixmapUtils.cpp
new file mode 100644
index 0000000000..9364ed4d7c
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkPixmapUtils.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/codec/SkPixmapUtils.h"
+
+#include "include/codec/SkEncodedOrigin.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkSurface.h"
+
+#include <utility>
+
+static bool draw_orientation(const SkPixmap& dst, const SkPixmap& src, SkEncodedOrigin origin) {
+ auto surf = SkSurface::MakeRasterDirect(dst.info(), dst.writable_addr(), dst.rowBytes());
+ if (!surf) {
+ return false;
+ }
+
+ SkBitmap bm;
+ bm.installPixels(src);
+
+ SkMatrix m = SkEncodedOriginToMatrix(origin, dst.width(), dst.height());
+
+ SkPaint p;
+ p.setBlendMode(SkBlendMode::kSrc);
+ surf->getCanvas()->concat(m);
+ surf->getCanvas()->drawImage(SkImage::MakeFromBitmap(bm), 0, 0, SkSamplingOptions(), &p);
+ return true;
+}
+
+bool SkPixmapUtils::Orient(const SkPixmap& dst, const SkPixmap& src, SkEncodedOrigin origin) {
+ if (src.colorType() != dst.colorType()) {
+ return false;
+ }
+ // note: we just ignore alphaType and colorSpace for this transformation
+
+ int w = src.width();
+ int h = src.height();
+ if (SkEncodedOriginSwapsWidthHeight(origin)) {
+ using std::swap;
+ swap(w, h);
+ }
+ if (dst.width() != w || dst.height() != h) {
+ return false;
+ }
+ if (w == 0 || h == 0) {
+ return true;
+ }
+
+ // check for aliasing to self
+ if (src.addr() == dst.addr()) {
+ return kTopLeft_SkEncodedOrigin == origin;
+ }
+ return draw_orientation(dst, src, origin);
+}
+
+SkImageInfo SkPixmapUtils::SwapWidthHeight(const SkImageInfo& info) {
+ return info.makeWH(info.height(), info.width());
+}
diff --git a/gfx/skia/skia/src/codec/SkPixmapUtils.h b/gfx/skia/skia/src/codec/SkPixmapUtils.h
new file mode 100644
index 0000000000..e438fbbe83
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkPixmapUtils.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPixmapUtils_DEFINED
+#define SkPixmapUtils_DEFINED
+
+#include "include/codec/SkEncodedOrigin.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "src/core/SkAutoPixmapStorage.h"
+
+class SkPixmapUtils {
+public:
+ /**
+ * Copy the pixels in this pixmap into dst, applying the orientation transformations specified
+ * by the flags. If the inputs are invalid, this returns false and no copy is made.
+ */
+ static bool Orient(const SkPixmap& dst, const SkPixmap& src, SkEncodedOrigin);
+
+ static SkImageInfo SwapWidthHeight(const SkImageInfo& info);
+
+ /**
+ * Decode an image and then copy into dst, applying origin.
+ *
+ * @param dst SkPixmap to write the final image, after
+ * applying the origin.
+ * @param origin SkEncodedOrigin to apply to the raw pixels.
+ * @param decode Function for decoding into a pixmap without
+ * applying the origin.
+ */
+
+ template <typename Fn>
+ static bool Orient(const SkPixmap& dst, SkEncodedOrigin origin, Fn&& decode) {
+ SkAutoPixmapStorage storage;
+ const SkPixmap* tmp = &dst;
+ if (origin != kTopLeft_SkEncodedOrigin) {
+ auto info = dst.info();
+ if (SkEncodedOriginSwapsWidthHeight(origin)) {
+ info = SwapWidthHeight(info);
+ }
+ if (!storage.tryAlloc(info)) {
+ return false;
+ }
+ tmp = &storage;
+ }
+ if (!decode(*tmp)) {
+ return false;
+ }
+ if (tmp != &dst) {
+ return Orient(dst, *tmp, origin);
+ }
+ return true;
+ }
+
+};
+
+#endif // SkPixmapUtils_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkSampler.cpp b/gfx/skia/skia/src/codec/SkSampler.cpp
new file mode 100644
index 0000000000..3a6832c183
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkSampler.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/codec/SkSampler.h"
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkImageInfo.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/codec/SkCodecPriv.h"
+#include "src/core/SkOpts.h"
+
+#include <cstdint>
+#include <cstring>
+
+void SkSampler::Fill(const SkImageInfo& info, void* dst, size_t rowBytes,
+ SkCodec::ZeroInitialized zeroInit) {
+ SkASSERT(dst != nullptr);
+
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ return;
+ }
+
+ const int width = info.width();
+ const int numRows = info.height();
+
+ // Use the proper memset routine to fill the remaining bytes
+ switch (info.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType: {
+ uint32_t* dstRow = (uint32_t*) dst;
+ for (int row = 0; row < numRows; row++) {
+ SkOpts::memset32(dstRow, 0, width);
+ dstRow = SkTAddOffset<uint32_t>(dstRow, rowBytes);
+ }
+ break;
+ }
+ case kRGB_565_SkColorType: {
+ uint16_t* dstRow = (uint16_t*) dst;
+ for (int row = 0; row < numRows; row++) {
+ SkOpts::memset16(dstRow, 0, width);
+ dstRow = SkTAddOffset<uint16_t>(dstRow, rowBytes);
+ }
+ break;
+ }
+ case kGray_8_SkColorType: {
+ uint8_t* dstRow = (uint8_t*) dst;
+ for (int row = 0; row < numRows; row++) {
+ memset(dstRow, 0, width);
+ dstRow = SkTAddOffset<uint8_t>(dstRow, rowBytes);
+ }
+ break;
+ }
+ case kRGBA_F16_SkColorType: {
+ uint64_t* dstRow = (uint64_t*) dst;
+ for (int row = 0; row < numRows; row++) {
+ SkOpts::memset64(dstRow, 0, width);
+ dstRow = SkTAddOffset<uint64_t>(dstRow, rowBytes);
+ }
+ break;
+ }
+ default:
+ SkCodecPrintf("Error: Unsupported dst color type for fill(). Doing nothing.\n");
+ SkASSERT(false);
+ break;
+ }
+}
diff --git a/gfx/skia/skia/src/codec/SkSampler.h b/gfx/skia/skia/src/codec/SkSampler.h
new file mode 100644
index 0000000000..ac3b27c441
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkSampler.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkSampler_DEFINED
+#define SkSampler_DEFINED
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkNoncopyable.h"
+#include "src/codec/SkCodecPriv.h"
+
+#include <cstddef>
+
+struct SkImageInfo;
+
+class SkSampler : public SkNoncopyable {
+public:
+ /**
+ * Update the sampler to sample every sampleX'th pixel. Returns the
+ * width after sampling.
+ */
+ int setSampleX(int sampleX) {
+ return this->onSetSampleX(sampleX);
+ }
+
+ /**
+ * Update the sampler to sample every sampleY'th row.
+ */
+ void setSampleY(int sampleY) {
+ fSampleY = sampleY;
+ }
+
+ /**
+ * Retrieve the value set for sampleY.
+ */
+ int sampleY() const {
+ return fSampleY;
+ }
+
+ /**
+ * Based on fSampleY, return whether this row belongs in the output.
+ *
+ * @param row Row of the image, starting with the first row in the subset.
+ */
+ bool rowNeeded(int row) const {
+ return (row - get_start_coord(fSampleY)) % fSampleY == 0;
+ }
+
+ /**
+ * Fill the remainder of the destination with 0.
+ *
+ * 0 has a different meaning depending on the SkColorType. For color types
+ * with transparency, this means transparent. For k565 and kGray, 0 is
+ * black.
+ *
+ * @param info
+ * Contains the color type of the rows to fill.
+ * Contains the pixel width of the destination rows to fill
+ * Contains the number of rows that we need to fill.
+ *
+ * @param dst
+ * The destination row to fill.
+ *
+ * @param rowBytes
+ * Stride in bytes of the destination.
+ *
+ * @param zeroInit
+ * Indicates whether memory is already zero initialized.
+ */
+ static void Fill(const SkImageInfo& info, void* dst, size_t rowBytes,
+ SkCodec::ZeroInitialized zeroInit);
+
+ virtual int fillWidth() const = 0;
+
+ SkSampler()
+ : fSampleY(1)
+ {}
+
+ virtual ~SkSampler() {}
+private:
+ int fSampleY;
+
+ virtual int onSetSampleX(int) = 0;
+};
+
+#endif // SkSampler_DEFINED
diff --git a/gfx/skia/skia/src/codec/SkSwizzler.cpp b/gfx/skia/skia/src/codec/SkSwizzler.cpp
new file mode 100644
index 0000000000..d5a1ddbc89
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkSwizzler.cpp
@@ -0,0 +1,1250 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/codec/SkSwizzler.h"
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkColorPriv.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRect.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkEncodedInfo.h"
+#include "include/private/base/SkAlign.h"
+#include "include/private/base/SkCPUTypes.h"
+#include "include/private/base/SkMath.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/base/SkHalf.h"
+#include "src/codec/SkCodecPriv.h"
+#include "src/core/SkOpts.h"
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ #include "include/android/SkAndroidFrameworkUtils.h"
+#endif
+
+#include <cstring>
+
+static void copy(void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ memcpy(dst, src + offset, width * bpp);
+}
+
+static void sample1(void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ src += offset;
+ uint8_t* dst8 = (uint8_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst8[x] = *src;
+ src += deltaSrc;
+ }
+}
+
+static void sample2(void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ src += offset;
+ uint16_t* dst16 = (uint16_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst16[x] = *((const uint16_t*) src);
+ src += deltaSrc;
+ }
+}
+
+static void sample4(void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ src += offset;
+ uint32_t* dst32 = (uint32_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst32[x] = *((const uint32_t*) src);
+ src += deltaSrc;
+ }
+}
+
+static void sample6(void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ src += offset;
+ uint8_t* dst8 = (uint8_t*) dst;
+ for (int x = 0; x < width; x++) {
+ memcpy(dst8, src, 6);
+ dst8 += 6;
+ src += deltaSrc;
+ }
+}
+
+static void sample8(void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ src += offset;
+ uint64_t* dst64 = (uint64_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst64[x] = *((const uint64_t*) src);
+ src += deltaSrc;
+ }
+}
+
+// kBit
+// These routines exclusively choose between white and black
+
+#define GRAYSCALE_BLACK 0
+#define GRAYSCALE_WHITE 0xFF
+
+
+// same as swizzle_bit_to_index and swizzle_bit_to_n32 except for value assigned to dst[x]
+static void swizzle_bit_to_grayscale(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor* /*ctable*/) {
+
+ uint8_t* SK_RESTRICT dst = (uint8_t*) dstRow;
+
+ // increment src by byte offset and bitIndex by bit offset
+ src += offset / 8;
+ int bitIndex = offset % 8;
+ uint8_t currByte = *src;
+
+ dst[0] = ((currByte >> (7-bitIndex)) & 1) ? GRAYSCALE_WHITE : GRAYSCALE_BLACK;
+
+ for (int x = 1; x < dstWidth; x++) {
+ int bitOffset = bitIndex + deltaSrc;
+ bitIndex = bitOffset % 8;
+ currByte = *(src += bitOffset / 8);
+ dst[x] = ((currByte >> (7-bitIndex)) & 1) ? GRAYSCALE_WHITE : GRAYSCALE_BLACK;
+ }
+}
+
+#undef GRAYSCALE_BLACK
+#undef GRAYSCALE_WHITE
+
+// same as swizzle_bit_to_grayscale and swizzle_bit_to_index except for value assigned to dst[x]
+static void swizzle_bit_to_n32(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor* /*ctable*/) {
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*) dstRow;
+
+ // increment src by byte offset and bitIndex by bit offset
+ src += offset / 8;
+ int bitIndex = offset % 8;
+ uint8_t currByte = *src;
+
+ dst[0] = ((currByte >> (7 - bitIndex)) & 1) ? SK_ColorWHITE : SK_ColorBLACK;
+
+ for (int x = 1; x < dstWidth; x++) {
+ int bitOffset = bitIndex + deltaSrc;
+ bitIndex = bitOffset % 8;
+ currByte = *(src += bitOffset / 8);
+ dst[x] = ((currByte >> (7 - bitIndex)) & 1) ? SK_ColorWHITE : SK_ColorBLACK;
+ }
+}
+
+#define RGB565_BLACK 0
+#define RGB565_WHITE 0xFFFF
+
+static void swizzle_bit_to_565(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor* /*ctable*/) {
+ uint16_t* SK_RESTRICT dst = (uint16_t*) dstRow;
+
+ // increment src by byte offset and bitIndex by bit offset
+ src += offset / 8;
+ int bitIndex = offset % 8;
+ uint8_t currByte = *src;
+
+ dst[0] = ((currByte >> (7 - bitIndex)) & 1) ? RGB565_WHITE : RGB565_BLACK;
+
+ for (int x = 1; x < dstWidth; x++) {
+ int bitOffset = bitIndex + deltaSrc;
+ bitIndex = bitOffset % 8;
+ currByte = *(src += bitOffset / 8);
+ dst[x] = ((currByte >> (7 - bitIndex)) & 1) ? RGB565_WHITE : RGB565_BLACK;
+ }
+}
+
+#undef RGB565_BLACK
+#undef RGB565_WHITE
+
+static void swizzle_bit_to_f16(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor* /*ctable*/) {
+ constexpr uint64_t kWhite = (((uint64_t) SK_Half1) << 0) |
+ (((uint64_t) SK_Half1) << 16) |
+ (((uint64_t) SK_Half1) << 32) |
+ (((uint64_t) SK_Half1) << 48);
+ constexpr uint64_t kBlack = (((uint64_t) 0) << 0) |
+ (((uint64_t) 0) << 16) |
+ (((uint64_t) 0) << 32) |
+ (((uint64_t) SK_Half1) << 48);
+
+ uint64_t* SK_RESTRICT dst = (uint64_t*) dstRow;
+
+ // increment src by byte offset and bitIndex by bit offset
+ src += offset / 8;
+ int bitIndex = offset % 8;
+ uint8_t currByte = *src;
+
+ dst[0] = ((currByte >> (7 - bitIndex)) & 1) ? kWhite : kBlack;
+
+ for (int x = 1; x < dstWidth; x++) {
+ int bitOffset = bitIndex + deltaSrc;
+ bitIndex = bitOffset % 8;
+ currByte = *(src += bitOffset / 8);
+ dst[x] = ((currByte >> (7 - bitIndex)) & 1) ? kWhite : kBlack;
+ }
+}
+
+// kIndex1, kIndex2, kIndex4
+
+static void swizzle_small_index_to_565(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ uint16_t* dst = (uint16_t*) dstRow;
+ src += offset / 8;
+ int bitIndex = offset % 8;
+ uint8_t currByte = *src;
+ const uint8_t mask = (1 << bpp) - 1;
+ uint8_t index = (currByte >> (8 - bpp - bitIndex)) & mask;
+ dst[0] = SkPixel32ToPixel16(ctable[index]);
+
+ for (int x = 1; x < dstWidth; x++) {
+ int bitOffset = bitIndex + deltaSrc;
+ bitIndex = bitOffset % 8;
+ currByte = *(src += bitOffset / 8);
+ index = (currByte >> (8 - bpp - bitIndex)) & mask;
+ dst[x] = SkPixel32ToPixel16(ctable[index]);
+ }
+}
+
+static void swizzle_small_index_to_n32(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ SkPMColor* dst = (SkPMColor*) dstRow;
+ src += offset / 8;
+ int bitIndex = offset % 8;
+ uint8_t currByte = *src;
+ const uint8_t mask = (1 << bpp) - 1;
+ uint8_t index = (currByte >> (8 - bpp - bitIndex)) & mask;
+ dst[0] = ctable[index];
+
+ for (int x = 1; x < dstWidth; x++) {
+ int bitOffset = bitIndex + deltaSrc;
+ bitIndex = bitOffset % 8;
+ currByte = *(src += bitOffset / 8);
+ index = (currByte >> (8 - bpp - bitIndex)) & mask;
+ dst[x] = ctable[index];
+ }
+}
+
+// kIndex
+
+static void swizzle_index_to_n32(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ SkPMColor c = ctable[*src];
+ dst[x] = c;
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_index_to_n32_skipZ(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ SkPMColor c = ctable[*src];
+ if (c != 0) {
+ dst[x] = c;
+ }
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_index_to_565(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bytesPerPixel, int deltaSrc, int offset, const SkPMColor ctable[]) {
+ src += offset;
+ uint16_t* SK_RESTRICT dst = (uint16_t*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = SkPixel32ToPixel16(ctable[*src]);
+ src += deltaSrc;
+ }
+}
+
+// kGray
+
+static void swizzle_gray_to_n32(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = SkPackARGB32NoCheck(0xFF, *src, *src, *src);
+ src += deltaSrc;
+ }
+}
+
+static void fast_swizzle_gray_to_n32(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ // Note that there is no need to distinguish between RGB and BGR.
+ // Each color channel will get the same value.
+ SkOpts::gray_to_RGB1((uint32_t*) dst, src + offset, width);
+}
+
+static void swizzle_gray_to_565(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bytesPerPixel, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ uint16_t* SK_RESTRICT dst = (uint16_t*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = SkPack888ToRGB16(src[0], src[0], src[0]);
+ src += deltaSrc;
+ }
+}
+
+// kGrayAlpha
+
+static void swizzle_grayalpha_to_n32_unpremul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* dst32 = (SkPMColor*) dst;
+ for (int x = 0; x < width; x++) {
+ dst32[x] = SkPackARGB32NoCheck(src[1], src[0], src[0], src[0]);
+ src += deltaSrc;
+ }
+}
+
+static void fast_swizzle_grayalpha_to_n32_unpremul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ // Note that there is no need to distinguish between RGB and BGR.
+ // Each color channel will get the same value.
+ SkOpts::grayA_to_RGBA((uint32_t*) dst, src + offset, width);
+}
+
+static void swizzle_grayalpha_to_n32_premul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* dst32 = (SkPMColor*) dst;
+ for (int x = 0; x < width; x++) {
+ uint8_t pmgray = SkMulDiv255Round(src[1], src[0]);
+ dst32[x] = SkPackARGB32NoCheck(src[1], pmgray, pmgray, pmgray);
+ src += deltaSrc;
+ }
+}
+
+static void fast_swizzle_grayalpha_to_n32_premul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ // Note that there is no need to distinguish between rgb and bgr.
+ // Each color channel will get the same value.
+ SkOpts::grayA_to_rgbA((uint32_t*) dst, src + offset, width);
+}
+
+static void swizzle_grayalpha_to_a8(void* dst, const uint8_t* src, int width, int bpp,
+ int deltaSrc, int offset, const SkPMColor[]) {
+ src += offset;
+ uint8_t* dst8 = (uint8_t*)dst;
+ for (int x = 0; x < width; ++x) {
+ dst8[x] = src[1]; // src[0] is gray, ignored
+ src += deltaSrc;
+ }
+}
+
+// kBGR
+
+static void swizzle_bgr_to_565(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ uint16_t* SK_RESTRICT dst = (uint16_t*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = SkPack888ToRGB16(src[2], src[1], src[0]);
+ src += deltaSrc;
+ }
+}
+
+// kRGB
+
+static void swizzle_rgb_to_rgba(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = SkPackARGB_as_RGBA(0xFF, src[0], src[1], src[2]);
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_rgb_to_bgra(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = SkPackARGB_as_BGRA(0xFF, src[0], src[1], src[2]);
+ src += deltaSrc;
+ }
+}
+
+static void fast_swizzle_rgb_to_rgba(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc,
+ int offset, const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ SkOpts::RGB_to_RGB1((uint32_t*) dst, src + offset, width);
+}
+
+static void fast_swizzle_rgb_to_bgra(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc,
+ int offset, const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ SkOpts::RGB_to_BGR1((uint32_t*) dst, src + offset, width);
+}
+
+static void swizzle_rgb_to_565(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bytesPerPixel, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ uint16_t* SK_RESTRICT dst = (uint16_t*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = SkPack888ToRGB16(src[0], src[1], src[2]);
+ src += deltaSrc;
+ }
+}
+
+// kRGBA
+
+static void swizzle_rgba_to_rgba_premul(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = premultiply_argb_as_rgba(src[3], src[0], src[1], src[2]);
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_rgba_to_bgra_premul(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ dst[x] = premultiply_argb_as_bgra(src[3], src[0], src[1], src[2]);
+ src += deltaSrc;
+ }
+}
+
+static void fast_swizzle_rgba_to_rgba_premul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc,
+ int offset, const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ SkOpts::RGBA_to_rgbA((uint32_t*) dst, (const uint32_t*)(src + offset), width);
+}
+
+static void fast_swizzle_rgba_to_bgra_premul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc,
+ int offset, const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ SkOpts::RGBA_to_bgrA((uint32_t*) dst, (const uint32_t*)(src + offset), width);
+}
+
+static void swizzle_rgba_to_bgra_unpremul(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ uint32_t* SK_RESTRICT dst = reinterpret_cast<uint32_t*>(dstRow);
+ for (int x = 0; x < dstWidth; x++) {
+ unsigned alpha = src[3];
+ dst[x] = SkPackARGB_as_BGRA(alpha, src[0], src[1], src[2]);
+ src += deltaSrc;
+ }
+}
+
+static void fast_swizzle_rgba_to_bgra_unpremul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ SkOpts::RGBA_to_BGRA((uint32_t*) dst, (const uint32_t*)(src + offset), width);
+}
+
+// 16-bits per component kRGB and kRGBA
+
+static void swizzle_rgb16_to_rgba(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ auto strip16to8 = [](const uint8_t* ptr) {
+ return 0xFF000000 | (ptr[4] << 16) | (ptr[2] << 8) | ptr[0];
+ };
+
+ src += offset;
+ uint32_t* dst32 = (uint32_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst32[x] = strip16to8(src);
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_rgb16_to_bgra(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ auto strip16to8 = [](const uint8_t* ptr) {
+ return 0xFF000000 | (ptr[0] << 16) | (ptr[2] << 8) | ptr[4];
+ };
+
+ src += offset;
+ uint32_t* dst32 = (uint32_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst32[x] = strip16to8(src);
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_rgb16_to_565(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ auto strip16to565 = [](const uint8_t* ptr) {
+ return SkPack888ToRGB16(ptr[0], ptr[2], ptr[4]);
+ };
+
+ src += offset;
+ uint16_t* dst16 = (uint16_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst16[x] = strip16to565(src);
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_rgba16_to_rgba_unpremul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ auto strip16to8 = [](const uint8_t* ptr) {
+ return (ptr[6] << 24) | (ptr[4] << 16) | (ptr[2] << 8) | ptr[0];
+ };
+
+ src += offset;
+ uint32_t* dst32 = (uint32_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst32[x] = strip16to8(src);
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_rgba16_to_rgba_premul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ auto stripAndPremul16to8 = [](const uint8_t* ptr) {
+ return premultiply_argb_as_rgba(ptr[6], ptr[0], ptr[2], ptr[4]);
+ };
+
+ src += offset;
+ uint32_t* dst32 = (uint32_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst32[x] = stripAndPremul16to8(src);
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_rgba16_to_bgra_unpremul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ auto strip16to8 = [](const uint8_t* ptr) {
+ return (ptr[6] << 24) | (ptr[0] << 16) | (ptr[2] << 8) | ptr[4];
+ };
+
+ src += offset;
+ uint32_t* dst32 = (uint32_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst32[x] = strip16to8(src);
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_rgba16_to_bgra_premul(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+ auto stripAndPremul16to8 = [](const uint8_t* ptr) {
+ return premultiply_argb_as_bgra(ptr[6], ptr[0], ptr[2], ptr[4]);
+ };
+
+ src += offset;
+ uint32_t* dst32 = (uint32_t*) dst;
+ for (int x = 0; x < width; x++) {
+ dst32[x] = stripAndPremul16to8(src);
+ src += deltaSrc;
+ }
+}
+
+// kCMYK
+//
+// CMYK is stored as four bytes per pixel.
+//
+// We will implement a crude conversion from CMYK -> RGB using formulas
+// from easyrgb.com.
+//
+// CMYK -> CMY
+// C = C * (1 - K) + K
+// M = M * (1 - K) + K
+// Y = Y * (1 - K) + K
+//
+// libjpeg actually gives us inverted CMYK, so we must subtract the
+// original terms from 1.
+// CMYK -> CMY
+// C = (1 - C) * (1 - (1 - K)) + (1 - K)
+// M = (1 - M) * (1 - (1 - K)) + (1 - K)
+// Y = (1 - Y) * (1 - (1 - K)) + (1 - K)
+//
+// Simplifying the above expression.
+// CMYK -> CMY
+// C = 1 - CK
+// M = 1 - MK
+// Y = 1 - YK
+//
+// CMY -> RGB
+// R = (1 - C) * 255
+// G = (1 - M) * 255
+// B = (1 - Y) * 255
+//
+// Therefore the full conversion is below. This can be verified at
+// www.rapidtables.com (assuming inverted CMYK).
+// CMYK -> RGB
+// R = C * K * 255
+// G = M * K * 255
+// B = Y * K * 255
+//
+// As a final note, we have treated the CMYK values as if they were on
+// a scale from 0-1, when in fact they are 8-bit ints scaling from 0-255.
+// We must divide each CMYK component by 255 to obtain the true conversion
+// we should perform.
+// CMYK -> RGB
+// R = C * K / 255
+// G = M * K / 255
+// B = Y * K / 255
+static void swizzle_cmyk_to_rgba(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ const uint8_t r = SkMulDiv255Round(src[0], src[3]);
+ const uint8_t g = SkMulDiv255Round(src[1], src[3]);
+ const uint8_t b = SkMulDiv255Round(src[2], src[3]);
+
+ dst[x] = SkPackARGB_as_RGBA(0xFF, r, g, b);
+ src += deltaSrc;
+ }
+}
+
+static void swizzle_cmyk_to_bgra(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ const uint8_t r = SkMulDiv255Round(src[0], src[3]);
+ const uint8_t g = SkMulDiv255Round(src[1], src[3]);
+ const uint8_t b = SkMulDiv255Round(src[2], src[3]);
+
+ dst[x] = SkPackARGB_as_BGRA(0xFF, r, g, b);
+ src += deltaSrc;
+ }
+}
+
+static void fast_swizzle_cmyk_to_rgba(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ SkOpts::inverted_CMYK_to_RGB1((uint32_t*) dst, (const uint32_t*)(src + offset), width);
+}
+
+static void fast_swizzle_cmyk_to_bgra(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+
+ SkOpts::inverted_CMYK_to_BGR1((uint32_t*) dst, (const uint32_t*)(src + offset), width);
+}
+
+static void swizzle_cmyk_to_565(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+
+ src += offset;
+ uint16_t* SK_RESTRICT dst = (uint16_t*)dstRow;
+ for (int x = 0; x < dstWidth; x++) {
+ const uint8_t r = SkMulDiv255Round(src[0], src[3]);
+ const uint8_t g = SkMulDiv255Round(src[1], src[3]);
+ const uint8_t b = SkMulDiv255Round(src[2], src[3]);
+
+ dst[x] = SkPack888ToRGB16(r, g, b);
+ src += deltaSrc;
+ }
+}
+
+template <SkSwizzler::RowProc proc>
+void SkSwizzler::SkipLeadingGrayAlphaZerosThen(
+ void* dst, const uint8_t* src, int width,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+ SkASSERT(!ctable);
+
+ const uint16_t* src16 = (const uint16_t*) (src + offset);
+ uint32_t* dst32 = (uint32_t*) dst;
+
+ // This may miss opportunities to skip when the output is premultiplied,
+ // e.g. for a src pixel 0x00FF which is not zero but becomes zero after premultiplication.
+ while (width > 0 && *src16 == 0x0000) {
+ width--;
+ dst32++;
+ src16 += deltaSrc / 2;
+ }
+ proc(dst32, (const uint8_t*)src16, width, bpp, deltaSrc, 0, ctable);
+}
+
+template <SkSwizzler::RowProc proc>
+void SkSwizzler::SkipLeading8888ZerosThen(
+ void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
+ int bpp, int deltaSrc, int offset, const SkPMColor ctable[]) {
+ SkASSERT(!ctable);
+
+ auto src32 = (const uint32_t*)(src+offset);
+ auto dst32 = (uint32_t*)dstRow;
+
+ // This may miss opportunities to skip when the output is premultiplied,
+ // e.g. for a src pixel 0x00FFFFFF which is not zero but becomes zero after premultiplication.
+ while (dstWidth > 0 && *src32 == 0x00000000) {
+ dstWidth--;
+ dst32++;
+ src32 += deltaSrc/4;
+ }
+ proc(dst32, (const uint8_t*)src32, dstWidth, bpp, deltaSrc, 0, ctable);
+}
+
+std::unique_ptr<SkSwizzler> SkSwizzler::MakeSimple(int srcBPP, const SkImageInfo& dstInfo,
+ const SkCodec::Options& options) {
+ RowProc proc = nullptr;
+ switch (srcBPP) {
+ case 1: // kGray_8_SkColorType
+ proc = &sample1;
+ break;
+ case 2: // kRGB_565_SkColorType
+ proc = &sample2;
+ break;
+ case 4: // kRGBA_8888_SkColorType
+ // kBGRA_8888_SkColorType
+ // kRGBA_1010102_SkColorType
+ proc = &sample4;
+ break;
+ case 6: // 16 bit PNG no alpha
+ proc = &sample6;
+ break;
+ case 8: // 16 bit PNG with alpha
+ proc = &sample8;
+ break;
+ default:
+ return nullptr;
+ }
+
+ return Make(dstInfo, &copy, proc, nullptr /*ctable*/, srcBPP,
+ dstInfo.bytesPerPixel(), options, nullptr /*frame*/);
+}
+
+std::unique_ptr<SkSwizzler> SkSwizzler::Make(const SkEncodedInfo& encodedInfo,
+ const SkPMColor* ctable,
+ const SkImageInfo& dstInfo,
+ const SkCodec::Options& options,
+ const SkIRect* frame) {
+ if (SkEncodedInfo::kPalette_Color == encodedInfo.color() && nullptr == ctable) {
+ return nullptr;
+ }
+
+ RowProc fastProc = nullptr;
+ RowProc proc = nullptr;
+ SkCodec::ZeroInitialized zeroInit = options.fZeroInitialized;
+ const bool premultiply = (SkEncodedInfo::kOpaque_Alpha != encodedInfo.alpha()) &&
+ (kPremul_SkAlphaType == dstInfo.alphaType());
+
+ switch (encodedInfo.color()) {
+ case SkEncodedInfo::kGray_Color:
+ switch (encodedInfo.bitsPerComponent()) {
+ case 1:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ proc = &swizzle_bit_to_n32;
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_bit_to_565;
+ break;
+ case kGray_8_SkColorType:
+ proc = &swizzle_bit_to_grayscale;
+ break;
+ case kRGBA_F16_SkColorType:
+ proc = &swizzle_bit_to_f16;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case 8:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ proc = &swizzle_gray_to_n32;
+ fastProc = &fast_swizzle_gray_to_n32;
+ break;
+ case kGray_8_SkColorType:
+ proc = &sample1;
+ fastProc = &copy;
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_gray_to_565;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::kXAlpha_Color:
+ case SkEncodedInfo::kGrayAlpha_Color:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ if (premultiply) {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeadingGrayAlphaZerosThen
+ <swizzle_grayalpha_to_n32_premul>;
+ fastProc = &SkipLeadingGrayAlphaZerosThen
+ <fast_swizzle_grayalpha_to_n32_premul>;
+ } else {
+ proc = &swizzle_grayalpha_to_n32_premul;
+ fastProc = &fast_swizzle_grayalpha_to_n32_premul;
+ }
+ } else {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeadingGrayAlphaZerosThen
+ <swizzle_grayalpha_to_n32_unpremul>;
+ fastProc = &SkipLeadingGrayAlphaZerosThen
+ <fast_swizzle_grayalpha_to_n32_unpremul>;
+ } else {
+ proc = &swizzle_grayalpha_to_n32_unpremul;
+ fastProc = &fast_swizzle_grayalpha_to_n32_unpremul;
+ }
+ }
+ break;
+ case kAlpha_8_SkColorType:
+ proc = &swizzle_grayalpha_to_a8;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::kPalette_Color:
+ // We assume that the color table is premultiplied and swizzled
+ // as desired.
+ switch (encodedInfo.bitsPerComponent()) {
+ case 1:
+ case 2:
+ case 4:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ proc = &swizzle_small_index_to_n32;
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_small_index_to_565;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case 8:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &swizzle_index_to_n32_skipZ;
+ } else {
+ proc = &swizzle_index_to_n32;
+ }
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_index_to_565;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::k565_Color:
+ // Treat 565 exactly like RGB (since it's still encoded as 8 bits per component).
+ // We just mark as 565 when we have a hint that there are only 5/6/5 "significant"
+ // bits in each channel.
+ case SkEncodedInfo::kRGB_Color:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ if (16 == encodedInfo.bitsPerComponent()) {
+ proc = &swizzle_rgb16_to_rgba;
+ break;
+ }
+
+ SkASSERT(8 == encodedInfo.bitsPerComponent());
+ proc = &swizzle_rgb_to_rgba;
+ fastProc = &fast_swizzle_rgb_to_rgba;
+ break;
+ case kBGRA_8888_SkColorType:
+ if (16 == encodedInfo.bitsPerComponent()) {
+ proc = &swizzle_rgb16_to_bgra;
+ break;
+ }
+
+ SkASSERT(8 == encodedInfo.bitsPerComponent());
+ proc = &swizzle_rgb_to_bgra;
+ fastProc = &fast_swizzle_rgb_to_bgra;
+ break;
+ case kRGB_565_SkColorType:
+ if (16 == encodedInfo.bitsPerComponent()) {
+ proc = &swizzle_rgb16_to_565;
+ break;
+ }
+
+ proc = &swizzle_rgb_to_565;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::kRGBA_Color:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ if (16 == encodedInfo.bitsPerComponent()) {
+ proc = premultiply ? &swizzle_rgba16_to_rgba_premul :
+ &swizzle_rgba16_to_rgba_unpremul;
+ break;
+ }
+
+ SkASSERT(8 == encodedInfo.bitsPerComponent());
+ if (premultiply) {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<swizzle_rgba_to_rgba_premul>;
+ fastProc = &SkipLeading8888ZerosThen
+ <fast_swizzle_rgba_to_rgba_premul>;
+ } else {
+ proc = &swizzle_rgba_to_rgba_premul;
+ fastProc = &fast_swizzle_rgba_to_rgba_premul;
+ }
+ } else {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<sample4>;
+ fastProc = &SkipLeading8888ZerosThen<copy>;
+ } else {
+ proc = &sample4;
+ fastProc = &copy;
+ }
+ }
+ break;
+ case kBGRA_8888_SkColorType:
+ if (16 == encodedInfo.bitsPerComponent()) {
+ proc = premultiply ? &swizzle_rgba16_to_bgra_premul :
+ &swizzle_rgba16_to_bgra_unpremul;
+ break;
+ }
+
+ SkASSERT(8 == encodedInfo.bitsPerComponent());
+ if (premultiply) {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<swizzle_rgba_to_bgra_premul>;
+ fastProc = &SkipLeading8888ZerosThen
+ <fast_swizzle_rgba_to_bgra_premul>;
+ } else {
+ proc = &swizzle_rgba_to_bgra_premul;
+ fastProc = &fast_swizzle_rgba_to_bgra_premul;
+ }
+ } else {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<swizzle_rgba_to_bgra_unpremul>;
+ fastProc = &SkipLeading8888ZerosThen
+ <fast_swizzle_rgba_to_bgra_unpremul>;
+ } else {
+ proc = &swizzle_rgba_to_bgra_unpremul;
+ fastProc = &fast_swizzle_rgba_to_bgra_unpremul;
+ }
+ }
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::kBGR_Color:
+ switch (dstInfo.colorType()) {
+ case kBGRA_8888_SkColorType:
+ proc = &swizzle_rgb_to_rgba;
+ fastProc = &fast_swizzle_rgb_to_rgba;
+ break;
+ case kRGBA_8888_SkColorType:
+ proc = &swizzle_rgb_to_bgra;
+ fastProc = &fast_swizzle_rgb_to_bgra;
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_bgr_to_565;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::kBGRX_Color:
+ switch (dstInfo.colorType()) {
+ case kBGRA_8888_SkColorType:
+ proc = &swizzle_rgb_to_rgba;
+ break;
+ case kRGBA_8888_SkColorType:
+ proc = &swizzle_rgb_to_bgra;
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_bgr_to_565;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::kBGRA_Color:
+ switch (dstInfo.colorType()) {
+ case kBGRA_8888_SkColorType:
+ if (premultiply) {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<swizzle_rgba_to_rgba_premul>;
+ fastProc = &SkipLeading8888ZerosThen
+ <fast_swizzle_rgba_to_rgba_premul>;
+ } else {
+ proc = &swizzle_rgba_to_rgba_premul;
+ fastProc = &fast_swizzle_rgba_to_rgba_premul;
+ }
+ } else {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<sample4>;
+ fastProc = &SkipLeading8888ZerosThen<copy>;
+ } else {
+ proc = &sample4;
+ fastProc = &copy;
+ }
+ }
+ break;
+ case kRGBA_8888_SkColorType:
+ if (premultiply) {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<swizzle_rgba_to_bgra_premul>;
+ fastProc = &SkipLeading8888ZerosThen
+ <fast_swizzle_rgba_to_bgra_premul>;
+ } else {
+ proc = &swizzle_rgba_to_bgra_premul;
+ fastProc = &fast_swizzle_rgba_to_bgra_premul;
+ }
+ } else {
+ if (SkCodec::kYes_ZeroInitialized == zeroInit) {
+ proc = &SkipLeading8888ZerosThen<swizzle_rgba_to_bgra_unpremul>;
+ fastProc = &SkipLeading8888ZerosThen
+ <fast_swizzle_rgba_to_bgra_unpremul>;
+ } else {
+ proc = &swizzle_rgba_to_bgra_unpremul;
+ fastProc = &fast_swizzle_rgba_to_bgra_unpremul;
+ }
+ }
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ case SkEncodedInfo::kInvertedCMYK_Color:
+ switch (dstInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ proc = &swizzle_cmyk_to_rgba;
+ fastProc = &fast_swizzle_cmyk_to_rgba;
+ break;
+ case kBGRA_8888_SkColorType:
+ proc = &swizzle_cmyk_to_bgra;
+ fastProc = &fast_swizzle_cmyk_to_bgra;
+ break;
+ case kRGB_565_SkColorType:
+ proc = &swizzle_cmyk_to_565;
+ break;
+ default:
+ return nullptr;
+ }
+ break;
+ default:
+ return nullptr;
+ }
+
+ // Store bpp in bytes if it is an even multiple, otherwise use bits
+ uint8_t bitsPerPixel = encodedInfo.bitsPerPixel();
+ int srcBPP = SkIsAlign8(bitsPerPixel) ? bitsPerPixel / 8 : bitsPerPixel;
+ int dstBPP = dstInfo.bytesPerPixel();
+ return Make(dstInfo, fastProc, proc, ctable, srcBPP, dstBPP, options, frame);
+}
+
+std::unique_ptr<SkSwizzler> SkSwizzler::Make(const SkImageInfo& dstInfo,
+ RowProc fastProc, RowProc proc, const SkPMColor* ctable, int srcBPP,
+ int dstBPP, const SkCodec::Options& options, const SkIRect* frame) {
+ int srcOffset = 0;
+ int srcWidth = dstInfo.width();
+ int dstOffset = 0;
+ int dstWidth = srcWidth;
+ if (options.fSubset) {
+ // We do not currently support subset decodes for image types that may have
+ // frames (gif).
+ SkASSERT(!frame);
+ srcOffset = options.fSubset->left();
+ srcWidth = options.fSubset->width();
+ dstWidth = srcWidth;
+ } else if (frame) {
+ dstOffset = frame->left();
+ srcWidth = frame->width();
+ }
+
+ return std::unique_ptr<SkSwizzler>(new SkSwizzler(fastProc, proc, ctable, srcOffset, srcWidth,
+ dstOffset, dstWidth, srcBPP, dstBPP));
+}
+
+SkSwizzler::SkSwizzler(RowProc fastProc, RowProc proc, const SkPMColor* ctable, int srcOffset,
+ int srcWidth, int dstOffset, int dstWidth, int srcBPP, int dstBPP)
+ : fFastProc(fastProc)
+ , fSlowProc(proc)
+ , fActualProc(fFastProc ? fFastProc : fSlowProc)
+ , fColorTable(ctable)
+ , fSrcOffset(srcOffset)
+ , fDstOffset(dstOffset)
+ , fSrcOffsetUnits(srcOffset * srcBPP)
+ , fDstOffsetBytes(dstOffset * dstBPP)
+ , fSrcWidth(srcWidth)
+ , fDstWidth(dstWidth)
+ , fSwizzleWidth(srcWidth)
+ , fAllocatedWidth(dstWidth)
+ , fSampleX(1)
+ , fSrcBPP(srcBPP)
+ , fDstBPP(dstBPP)
+{}
+
+int SkSwizzler::onSetSampleX(int sampleX) {
+ SkASSERT(sampleX > 0);
+
+ fSampleX = sampleX;
+ fDstOffsetBytes = (fDstOffset / sampleX) * fDstBPP;
+ fSwizzleWidth = get_scaled_dimension(fSrcWidth, sampleX);
+ fAllocatedWidth = get_scaled_dimension(fDstWidth, sampleX);
+
+ int frameSampleX = sampleX;
+ if (fSrcWidth < fDstWidth) {
+ // Although SkSampledCodec adjusted sampleX so that it will never be
+ // larger than the width of the image (or subset, if applicable), it
+ // doesn't account for the width of a subset frame (i.e. gif). As a
+ // result, get_start_coord(sampleX) could result in fSrcOffsetUnits
+ // being wider than fSrcWidth. Compute a sampling rate based on the
+ // frame width to ensure that fSrcOffsetUnits is sensible.
+ frameSampleX = fSrcWidth / fSwizzleWidth;
+ }
+ fSrcOffsetUnits = (get_start_coord(frameSampleX) + fSrcOffset) * fSrcBPP;
+
+ if (fDstOffsetBytes > 0) {
+ const size_t dstSwizzleBytes = fSwizzleWidth * fDstBPP;
+ const size_t dstAllocatedBytes = fAllocatedWidth * fDstBPP;
+ if (fDstOffsetBytes + dstSwizzleBytes > dstAllocatedBytes) {
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ SkAndroidFrameworkUtils::SafetyNetLog("118143775");
+#endif
+ SkASSERT(dstSwizzleBytes <= dstAllocatedBytes);
+ fDstOffsetBytes = dstAllocatedBytes - dstSwizzleBytes;
+ }
+ }
+
+ // The optimized swizzler functions do not support sampling. Sampled swizzles
+ // are already fast because they skip pixels. We haven't seen a situation
+ // where speeding up sampling has a significant impact on total decode time.
+ if (1 == fSampleX && fFastProc) {
+ fActualProc = fFastProc;
+ } else {
+ fActualProc = fSlowProc;
+ }
+
+ return fAllocatedWidth;
+}
+
+void SkSwizzler::swizzle(void* dst, const uint8_t* SK_RESTRICT src) {
+ SkASSERT(nullptr != dst && nullptr != src);
+ fActualProc(SkTAddOffset<void>(dst, fDstOffsetBytes), src, fSwizzleWidth, fSrcBPP,
+ fSampleX * fSrcBPP, fSrcOffsetUnits, fColorTable);
+}
diff --git a/gfx/skia/skia/src/codec/SkSwizzler.h b/gfx/skia/skia/src/codec/SkSwizzler.h
new file mode 100644
index 0000000000..e048bee3c8
--- /dev/null
+++ b/gfx/skia/skia/src/codec/SkSwizzler.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSwizzler_DEFINED
+#define SkSwizzler_DEFINED
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkTypes.h"
+#include "src/codec/SkSampler.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+
+struct SkEncodedInfo;
+struct SkIRect;
+struct SkImageInfo;
+
+class SkSwizzler : public SkSampler {
+public:
+ /**
+ * Create a new SkSwizzler.
+ * @param encodedInfo Description of the format of the encoded data.
+ * @param ctable Unowned pointer to an array of up to 256 colors for an
+ * index source.
+ * @param dstInfo Describes the destination.
+ * @param options Contains partial scanline information and whether the dst is zero-
+ * initialized.
+ * @param frame Is non-NULL if the source pixels are part of an image
+ * frame that is a subset of the full image.
+ *
+ * Note that a deeper discussion of partial scanline subsets and image frame
+ * subsets is below. Currently, we do not support both simultaneously. If
+ * options->fSubset is non-NULL, frame must be NULL.
+ *
+ * @return A new SkSwizzler or nullptr on failure.
+ */
+ static std::unique_ptr<SkSwizzler> Make(const SkEncodedInfo& encodedInfo,
+ const SkPMColor* ctable, const SkImageInfo& dstInfo, const SkCodec::Options&,
+ const SkIRect* frame = nullptr);
+
+ /**
+ * Create a simplified swizzler that does not need to do format conversion. The swizzler
+ * only needs to sample and/or subset.
+ *
+ * @param srcBPP Bytes per pixel of the source.
+ * @param dstInfo Describes the destination.
+ * @param options Contains partial scanline information and whether the dst is zero-
+ * initialized.
+ * @return A new SkSwizzler or nullptr on failure.
+ */
+ static std::unique_ptr<SkSwizzler> MakeSimple(int srcBPP, const SkImageInfo& dstInfo,
+ const SkCodec::Options&);
+
+ /**
+ * Swizzle a line. Generally this will be called height times, once
+ * for each row of source.
+ * By allowing the caller to pass in the dst pointer, we give the caller
+ * flexibility to use the swizzler even when the encoded data does not
+ * store the rows in order. This also improves usability for scaled and
+ * subset decodes.
+ * @param dst Where we write the output.
+ * @param src The next row of the source data.
+ */
+ void swizzle(void* dst, const uint8_t* SK_RESTRICT src);
+
+ int fillWidth() const override {
+ return fAllocatedWidth;
+ }
+
+ /**
+ * If fSampleX > 1, the swizzler is sampling every fSampleX'th pixel and
+ * discarding the rest.
+ *
+ * This getter is currently used by SkBmpStandardCodec for Bmp-in-Ico decodes.
+ * Ideally, the subclasses of SkCodec would have no knowledge of sampling, but
+ * this allows us to apply a transparency mask to pixels after swizzling.
+ */
+ int sampleX() const { return fSampleX; }
+
+ /**
+ * Returns the actual number of pixels written to destination memory, taking
+ * scaling, subsetting, and partial frames into account.
+ */
+ int swizzleWidth() const { return fSwizzleWidth; }
+
+ /**
+ * Returns the byte offset at which we write to destination memory, taking
+ * scaling, subsetting, and partial frames into account.
+ */
+ size_t swizzleOffsetBytes() const { return fDstOffsetBytes; }
+
+private:
+
+ /**
+ * Method for converting raw data to Skia pixels.
+ * @param dstRow Row in which to write the resulting pixels.
+ * @param src Row of src data, in format specified by SrcConfig
+ * @param dstWidth Width in pixels of the destination
+ * @param bpp if bitsPerPixel % 8 == 0, deltaSrc is bytesPerPixel
+ * else, deltaSrc is bitsPerPixel
+ * @param deltaSrc bpp * sampleX
+ * @param ctable Colors (used for kIndex source).
+ * @param offset The offset before the first pixel to sample.
+ Is in bytes or bits based on what deltaSrc is in.
+ */
+ typedef void (*RowProc)(void* SK_RESTRICT dstRow,
+ const uint8_t* SK_RESTRICT src,
+ int dstWidth, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]);
+
+ template <RowProc Proc>
+ static void SkipLeading8888ZerosThen(void* SK_RESTRICT dstRow,
+ const uint8_t* SK_RESTRICT src,
+ int dstWidth, int bpp, int deltaSrc, int offset,
+ const SkPMColor ctable[]);
+
+ template <RowProc Proc>
+ static void SkipLeadingGrayAlphaZerosThen(void* dst, const uint8_t* src, int width, int bpp,
+ int deltaSrc, int offset, const SkPMColor ctable[]);
+
+ // May be NULL. We have not implemented optimized functions for all supported transforms.
+ const RowProc fFastProc;
+ // Always non-NULL. Supports sampling.
+ const RowProc fSlowProc;
+ // The actual RowProc we are using. This depends on if fFastProc is non-NULL and
+ // whether or not we are sampling.
+ RowProc fActualProc;
+
+ const SkPMColor* fColorTable; // Unowned pointer
+
+ // Subset Swizzles
+ // There are two types of subset swizzles that we support. We do not
+ // support both at the same time.
+ // TODO: If we want to support partial scanlines for gifs (which may
+ // use frame subsets), we will need to support both subsetting
+ // modes at the same time.
+ // (1) Partial Scanlines
+ // The client only wants to write a subset of the source pixels
+ // to the destination. This subset is specified to CreateSwizzler
+ // using options->fSubset. We will store subset information in
+ // the following fields.
+ //
+ // fSrcOffset: The starting pixel of the source.
+ // fSrcOffsetUnits: Derived from fSrcOffset with two key
+ // differences:
+ // (1) This takes the size of source pixels into
+ // account by multiplying by fSrcBPP. This may
+ // be measured in bits or bytes depending on
+ // which is natural for the SrcConfig.
+ // (2) If we are sampling, this will be larger
+ // than fSrcOffset * fSrcBPP, since sampling
+ // implies that we will skip some pixels.
+ // fDstOffset: Will be zero. There is no destination offset
+ // for this type of subset.
+ // fDstOffsetBytes: Will be zero.
+ // fSrcWidth: The width of the desired subset of source
+ // pixels, before any sampling is performed.
+ // fDstWidth: Will be equal to fSrcWidth, since this is also
+ // calculated before any sampling is performed.
+ // For this type of subset, the destination width
+ // matches the desired subset of the source.
+ // fSwizzleWidth: The actual number of pixels that will be
+ // written by the RowProc. This is a scaled
+ // version of fSrcWidth/fDstWidth.
+ // fAllocatedWidth: Will be equal to fSwizzleWidth. For this type
+ // of subset, the number of pixels written is the
+ // same as the actual width of the destination.
+ // (2) Frame Subset
+ // The client will decode the entire width of the source into a
+ // subset of destination memory. This subset is specified to
+ // CreateSwizzler in the "frame" parameter. We store subset
+ // information in the following fields.
+ //
+ // fSrcOffset: Will be zero. The starting pixel of the source.
+ // fSrcOffsetUnits: Will only be non-zero if we are sampling,
+ // since sampling implies that we will skip some
+ // pixels. Note that this is measured in bits
+ // or bytes depending on which is natural for
+ // SrcConfig.
+ // fDstOffset: First pixel to write in destination.
+ // fDstOffsetBytes: fDstOffset * fDstBPP.
+ // fSrcWidth: The entire width of the source pixels, before
+ // any sampling is performed.
+ // fDstWidth: The entire width of the destination memory,
+ // before any sampling is performed.
+ // fSwizzleWidth: The actual number of pixels that will be
+ // written by the RowProc. This is a scaled
+ // version of fSrcWidth.
+ // fAllocatedWidth: The actual number of pixels in destination
+ // memory. This is a scaled version of
+ // fDstWidth.
+ //
+ // If we are not subsetting, these fields are more straightforward.
+ // fSrcOffset = fDstOffet = fDstOffsetBytes = 0
+ // fSrcOffsetUnits may be non-zero (we will skip the first few pixels when sampling)
+ // fSrcWidth = fDstWidth = Full original width
+ // fSwizzleWidth = fAllcoatedWidth = Scaled width (if we are sampling)
+ const int fSrcOffset;
+ const int fDstOffset;
+ int fSrcOffsetUnits;
+ int fDstOffsetBytes;
+ const int fSrcWidth;
+ const int fDstWidth;
+ int fSwizzleWidth;
+ int fAllocatedWidth;
+
+ int fSampleX; // Step between X samples
+ const int fSrcBPP; // Bits/bytes per pixel for the SrcConfig
+ // if bitsPerPixel % 8 == 0
+ // fBPP is bytesPerPixel
+ // else
+ // fBPP is bitsPerPixel
+ const int fDstBPP; // Bytes per pixel for the destination color type
+
+ SkSwizzler(RowProc fastProc, RowProc proc, const SkPMColor* ctable, int srcOffset,
+ int srcWidth, int dstOffset, int dstWidth, int srcBPP, int dstBPP);
+ static std::unique_ptr<SkSwizzler> Make(const SkImageInfo& dstInfo, RowProc fastProc,
+ RowProc proc, const SkPMColor* ctable, int srcBPP, int dstBPP,
+ const SkCodec::Options& options, const SkIRect* frame);
+
+ int onSetSampleX(int) override;
+
+};
+#endif // SkSwizzler_DEFINED
diff --git a/gfx/skia/skia/src/core/Sk4px.h b/gfx/skia/skia/src/core/Sk4px.h
new file mode 100644
index 0000000000..ec7653f34c
--- /dev/null
+++ b/gfx/skia/skia/src/core/Sk4px.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef Sk4px_DEFINED
+#define Sk4px_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/private/SkColorData.h"
+#include "src/base/SkVx.h"
+
+// 1, 2 or 4 SkPMColors, generally vectorized.
+class Sk4px {
+public:
+ Sk4px(const skvx::byte16& v) : fV(v) {}
+
+ static Sk4px DupPMColor(SkPMColor c) {
+ skvx::uint4 splat(c);
+
+ Sk4px v;
+ memcpy((void*)&v, &splat, 16);
+ return v;
+ }
+
+ // RGBA rgba XYZW xyzw -> AAAA aaaa WWWW wwww
+ Sk4px alphas() const {
+ static_assert(SK_A32_SHIFT == 24, "This method assumes little-endian.");
+ return Sk4px(skvx::shuffle<3,3,3,3, 7,7,7,7, 11,11,11,11, 15,15,15,15>(fV));
+ }
+ Sk4px inv() const { return Sk4px(skvx::byte16(255) - fV); }
+
+ // When loading or storing fewer than 4 SkPMColors, we use the low lanes.
+ static Sk4px Load4(const SkPMColor px[4]) {
+ Sk4px v;
+ memcpy((void*)&v, px, 16);
+ return v;
+ }
+ static Sk4px Load2(const SkPMColor px[2]) {
+ Sk4px v;
+ memcpy((void*)&v, px, 8);
+ return v;
+ }
+ static Sk4px Load1(const SkPMColor px[1]) {
+ Sk4px v;
+ memcpy((void*)&v, px, 4);
+ return v;
+ }
+
+ // Ditto for Alphas... Load2Alphas fills the low two lanes of Sk4px.
+ // AaXx -> AAAA aaaa XXXX xxxx
+ static Sk4px Load4Alphas(const SkAlpha alphas[4]) {
+ skvx::byte4 a = skvx::byte4::Load(alphas);
+ return Sk4px(skvx::shuffle<0,0,0,0, 1,1,1,1, 2,2,2,2, 3,3,3,3>(a));
+ }
+ // Aa -> AAAA aaaa ???? ????
+ static Sk4px Load2Alphas(const SkAlpha alphas[2]) {
+ skvx::byte2 a = skvx::byte2::Load(alphas);
+ return Sk4px(join(skvx::shuffle<0,0,0,0, 1,1,1,1>(a), skvx::byte8()));
+ }
+
+ void store4(SkPMColor px[4]) const { memcpy(px, this, 16); }
+ void store2(SkPMColor px[2]) const { memcpy(px, this, 8); }
+ void store1(SkPMColor px[1]) const { memcpy(px, this, 4); }
+
+ // 1, 2, or 4 SkPMColors with 16-bit components.
+ // This is most useful as the result of a multiply, e.g. from mulWiden().
+ class Wide {
+ public:
+ Wide(const skvx::Vec<16, uint16_t>& v) : fV(v) {}
+
+ // Rounds, i.e. (x+127) / 255.
+ Sk4px div255() const { return Sk4px(skvx::div255(fV)); }
+
+ Wide operator * (const Wide& o) const { return Wide(fV * o.fV); }
+ Wide operator + (const Wide& o) const { return Wide(fV + o.fV); }
+ Wide operator - (const Wide& o) const { return Wide(fV - o.fV); }
+ Wide operator >> (int bits) const { return Wide(fV >> bits); }
+ Wide operator << (int bits) const { return Wide(fV << bits); }
+
+ private:
+ skvx::Vec<16, uint16_t> fV;
+ };
+
+ // Widen 8-bit values to low 8-bits of 16-bit lanes.
+ Wide widen() const { return Wide(skvx::cast<uint16_t>(fV)); }
+ // 8-bit x 8-bit -> 16-bit components.
+ Wide mulWiden(const skvx::byte16& o) const { return Wide(mull(fV, o)); }
+
+ // The only 8-bit multiply we use is 8-bit x 8-bit -> 16-bit. Might as well make it pithy.
+ Wide operator * (const Sk4px& o) const { return this->mulWiden(o.fV); }
+
+ Sk4px operator + (const Sk4px& o) const { return Sk4px(fV + o.fV); }
+ Sk4px operator - (const Sk4px& o) const { return Sk4px(fV - o.fV); }
+ Sk4px operator < (const Sk4px& o) const { return Sk4px(fV < o.fV); }
+ Sk4px operator & (const Sk4px& o) const { return Sk4px(fV & o.fV); }
+ Sk4px thenElse(const Sk4px& t, const Sk4px& e) const {
+ return Sk4px(if_then_else(fV, t.fV, e.fV));
+ }
+
+ // Generally faster than (*this * o).div255().
+ // May be incorrect by +-1, but is always exactly correct when *this or o is 0 or 255.
+ Sk4px approxMulDiv255(const Sk4px& o) const {
+ return Sk4px(approx_scale(fV, o.fV));
+ }
+
+ Sk4px saturatedAdd(const Sk4px& o) const {
+ return Sk4px(saturated_add(fV, o.fV));
+ }
+
+ // A generic driver that maps fn over a src array into a dst array.
+ // fn should take an Sk4px (4 src pixels) and return an Sk4px (4 dst pixels).
+ template <typename Fn>
+ [[maybe_unused]] static void MapSrc(int n, SkPMColor* dst, const SkPMColor* src, const Fn& fn) {
+ SkASSERT(dst);
+ SkASSERT(src);
+ // This looks a bit odd, but it helps loop-invariant hoisting across different calls to fn.
+ // Basically, we need to make sure we keep things inside a single loop.
+ while (n > 0) {
+ if (n >= 8) {
+ Sk4px dst0 = fn(Load4(src+0)),
+ dst4 = fn(Load4(src+4));
+ dst0.store4(dst+0);
+ dst4.store4(dst+4);
+ dst += 8; src += 8; n -= 8;
+ continue; // Keep our stride at 8 pixels as long as possible.
+ }
+ SkASSERT(n <= 7);
+ if (n >= 4) {
+ fn(Load4(src)).store4(dst);
+ dst += 4; src += 4; n -= 4;
+ }
+ if (n >= 2) {
+ fn(Load2(src)).store2(dst);
+ dst += 2; src += 2; n -= 2;
+ }
+ if (n >= 1) {
+ fn(Load1(src)).store1(dst);
+ }
+ break;
+ }
+ }
+
+ // As above, but with dst4' = fn(dst4, src4).
+ template <typename Fn>
+ [[maybe_unused]] static void MapDstSrc(int n, SkPMColor* dst, const SkPMColor* src,
+ const Fn& fn) {
+ SkASSERT(dst);
+ SkASSERT(src);
+ while (n > 0) {
+ if (n >= 8) {
+ Sk4px dst0 = fn(Load4(dst+0), Load4(src+0)),
+ dst4 = fn(Load4(dst+4), Load4(src+4));
+ dst0.store4(dst+0);
+ dst4.store4(dst+4);
+ dst += 8; src += 8; n -= 8;
+ continue; // Keep our stride at 8 pixels as long as possible.
+ }
+ SkASSERT(n <= 7);
+ if (n >= 4) {
+ fn(Load4(dst), Load4(src)).store4(dst);
+ dst += 4; src += 4; n -= 4;
+ }
+ if (n >= 2) {
+ fn(Load2(dst), Load2(src)).store2(dst);
+ dst += 2; src += 2; n -= 2;
+ }
+ if (n >= 1) {
+ fn(Load1(dst), Load1(src)).store1(dst);
+ }
+ break;
+ }
+ }
+
+ // As above, but with dst4' = fn(dst4, alpha4).
+ template <typename Fn>
+ [[maybe_unused]] static void MapDstAlpha(int n, SkPMColor* dst, const SkAlpha* a,
+ const Fn& fn) {
+ SkASSERT(dst);
+ SkASSERT(a);
+ while (n > 0) {
+ if (n >= 8) {
+ Sk4px dst0 = fn(Load4(dst+0), Load4Alphas(a+0)),
+ dst4 = fn(Load4(dst+4), Load4Alphas(a+4));
+ dst0.store4(dst+0);
+ dst4.store4(dst+4);
+ dst += 8; a += 8; n -= 8;
+ continue; // Keep our stride at 8 pixels as long as possible.
+ }
+ SkASSERT(n <= 7);
+ if (n >= 4) {
+ fn(Load4(dst), Load4Alphas(a)).store4(dst);
+ dst += 4; a += 4; n -= 4;
+ }
+ if (n >= 2) {
+ fn(Load2(dst), Load2Alphas(a)).store2(dst);
+ dst += 2; a += 2; n -= 2;
+ }
+ if (n >= 1) {
+ fn(Load1(dst), skvx::byte16(*a)).store1(dst);
+ }
+ break;
+ }
+ }
+
+ // As above, but with dst4' = fn(dst4, src4, alpha4).
+ template <typename Fn>
+ [[maybe_unused]] static void MapDstSrcAlpha(int n, SkPMColor* dst, const SkPMColor* src,
+ const SkAlpha* a, const Fn& fn) {
+ SkASSERT(dst);
+ SkASSERT(src);
+ SkASSERT(a);
+ while (n > 0) {
+ if (n >= 8) {
+ Sk4px dst0 = fn(Load4(dst+0), Load4(src+0), Load4Alphas(a+0)),
+ dst4 = fn(Load4(dst+4), Load4(src+4), Load4Alphas(a+4));
+ dst0.store4(dst+0);
+ dst4.store4(dst+4);
+ dst += 8; src += 8; a += 8; n -= 8;
+ continue; // Keep our stride at 8 pixels as long as possible.
+ }
+ SkASSERT(n <= 7);
+ if (n >= 4) {
+ fn(Load4(dst), Load4(src), Load4Alphas(a)).store4(dst);
+ dst += 4; src += 4; a += 4; n -= 4;
+ }
+ if (n >= 2) {
+ fn(Load2(dst), Load2(src), Load2Alphas(a)).store2(dst);
+ dst += 2; src += 2; a += 2; n -= 2;
+ }
+ if (n >= 1) {
+ fn(Load1(dst), Load1(src), skvx::byte16(*a)).store1(dst);
+ }
+ break;
+ }
+ }
+
+private:
+ Sk4px() = default;
+
+ skvx::byte16 fV;
+};
+
+static_assert(sizeof(Sk4px) == sizeof(skvx::byte16));
+static_assert(alignof(Sk4px) == alignof(skvx::byte16));
+
+#endif // Sk4px_DEFINED
diff --git a/gfx/skia/skia/src/core/SkAAClip.cpp b/gfx/skia/skia/src/core/SkAAClip.cpp
new file mode 100644
index 0000000000..2506bbd46a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAAClip.cpp
@@ -0,0 +1,1968 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkAAClip.h"
+
+#include "include/core/SkPath.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkMacros.h"
+#include "include/private/base/SkTDArray.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkRectPriv.h"
+#include "src/core/SkScan.h"
+#include <atomic>
+#include <utility>
+
+namespace {
+
+class AutoAAClipValidate {
+public:
+ AutoAAClipValidate(const SkAAClip& clip) : fClip(clip) {
+ fClip.validate();
+ }
+ ~AutoAAClipValidate() {
+ fClip.validate();
+ }
+private:
+ const SkAAClip& fClip;
+};
+
+#ifdef SK_DEBUG
+ #define AUTO_AACLIP_VALIDATE(clip) AutoAAClipValidate acv(clip)
+#else
+ #define AUTO_AACLIP_VALIDATE(clip)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+static constexpr int32_t kMaxInt32 = 0x7FFFFFFF;
+
+#ifdef SK_DEBUG
+// assert we're exactly width-wide, and then return the number of bytes used
+static size_t compute_row_length(const uint8_t row[], int width) {
+ const uint8_t* origRow = row;
+ while (width > 0) {
+ int n = row[0];
+ SkASSERT(n > 0);
+ SkASSERT(n <= width);
+ row += 2;
+ width -= n;
+ }
+ SkASSERT(0 == width);
+ return row - origRow;
+}
+#endif
+
+/*
+ * Data runs are packed [count, alpha]
+ */
+struct YOffset {
+ int32_t fY;
+ uint32_t fOffset;
+};
+
+class RowIter {
+public:
+ RowIter(const uint8_t* row, const SkIRect& bounds) {
+ fRow = row;
+ fLeft = bounds.fLeft;
+ fBoundsRight = bounds.fRight;
+ if (row) {
+ fRight = bounds.fLeft + row[0];
+ SkASSERT(fRight <= fBoundsRight);
+ fAlpha = row[1];
+ fDone = false;
+ } else {
+ fDone = true;
+ fRight = kMaxInt32;
+ fAlpha = 0;
+ }
+ }
+
+ bool done() const { return fDone; }
+ int left() const { return fLeft; }
+ int right() const { return fRight; }
+ U8CPU alpha() const { return fAlpha; }
+ void next() {
+ if (!fDone) {
+ fLeft = fRight;
+ if (fRight == fBoundsRight) {
+ fDone = true;
+ fRight = kMaxInt32;
+ fAlpha = 0;
+ } else {
+ fRow += 2;
+ fRight += fRow[0];
+ fAlpha = fRow[1];
+ SkASSERT(fRight <= fBoundsRight);
+ }
+ }
+ }
+
+private:
+ const uint8_t* fRow;
+ int fLeft;
+ int fRight;
+ int fBoundsRight;
+ bool fDone;
+ uint8_t fAlpha;
+};
+
+class Iter {
+public:
+ Iter() = default;
+
+ Iter(int y, const uint8_t* data, const YOffset* start, const YOffset* end)
+ : fCurrYOff(start)
+ , fStopYOff(end)
+ , fData(data + start->fOffset)
+ , fTop(y)
+ , fBottom(y + start->fY + 1)
+ , fDone(false) {}
+
+ bool done() const { return fDone; }
+ int top() const { return fTop; }
+ int bottom() const { return fBottom; }
+ const uint8_t* data() const { return fData; }
+
+ void next() {
+ if (!fDone) {
+ const YOffset* prev = fCurrYOff;
+ const YOffset* curr = prev + 1;
+ SkASSERT(curr <= fStopYOff);
+
+ fTop = fBottom;
+ if (curr >= fStopYOff) {
+ fDone = true;
+ fBottom = kMaxInt32;
+ fData = nullptr;
+ } else {
+ fBottom += curr->fY - prev->fY;
+ fData += curr->fOffset - prev->fOffset;
+ fCurrYOff = curr;
+ }
+ }
+ }
+
+private:
+ const YOffset* fCurrYOff = nullptr;
+ const YOffset* fStopYOff = nullptr;
+ const uint8_t* fData = nullptr;
+
+ int fTop = kMaxInt32;
+ int fBottom = kMaxInt32;
+ bool fDone = true;
+};
+
+} // namespace
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct SkAAClip::RunHead {
+ std::atomic<int32_t> fRefCnt;
+ int32_t fRowCount;
+ size_t fDataSize;
+
+ YOffset* yoffsets() {
+ return (YOffset*)((char*)this + sizeof(RunHead));
+ }
+ const YOffset* yoffsets() const {
+ return (const YOffset*)((const char*)this + sizeof(RunHead));
+ }
+ uint8_t* data() {
+ return (uint8_t*)(this->yoffsets() + fRowCount);
+ }
+ const uint8_t* data() const {
+ return (const uint8_t*)(this->yoffsets() + fRowCount);
+ }
+
+ static RunHead* Alloc(int rowCount, size_t dataSize) {
+ size_t size = sizeof(RunHead) + rowCount * sizeof(YOffset) + dataSize;
+ RunHead* head = (RunHead*)sk_malloc_throw(size);
+ head->fRefCnt.store(1);
+ head->fRowCount = rowCount;
+ head->fDataSize = dataSize;
+ return head;
+ }
+
+ static int ComputeRowSizeForWidth(int width) {
+ // 2 bytes per segment, where each segment can store up to 255 for count
+ int segments = 0;
+ while (width > 0) {
+ segments += 1;
+ int n = std::min(width, 255);
+ width -= n;
+ }
+ return segments * 2; // each segment is row[0] + row[1] (n + alpha)
+ }
+
+ static RunHead* AllocRect(const SkIRect& bounds) {
+ SkASSERT(!bounds.isEmpty());
+ int width = bounds.width();
+ size_t rowSize = ComputeRowSizeForWidth(width);
+ RunHead* head = RunHead::Alloc(1, rowSize);
+ YOffset* yoff = head->yoffsets();
+ yoff->fY = bounds.height() - 1;
+ yoff->fOffset = 0;
+ uint8_t* row = head->data();
+ while (width > 0) {
+ int n = std::min(width, 255);
+ row[0] = n;
+ row[1] = 0xFF;
+ width -= n;
+ row += 2;
+ }
+ return head;
+ }
+
+ static Iter Iterate(const SkAAClip& clip) {
+ const RunHead* head = clip.fRunHead;
+ if (!clip.fRunHead) {
+ // A null run head is an empty clip, so return aan already finished iterator.
+ return Iter();
+ }
+
+ return Iter(clip.getBounds().fTop, head->data(), head->yoffsets(),
+ head->yoffsets() + head->fRowCount);
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkAAClip::Builder {
+ class Blitter;
+
+ SkIRect fBounds;
+ struct Row {
+ int fY;
+ int fWidth;
+ SkTDArray<uint8_t>* fData;
+ };
+ SkTDArray<Row> fRows;
+ Row* fCurrRow;
+ int fPrevY;
+ int fWidth;
+ int fMinY;
+
+public:
+ Builder(const SkIRect& bounds) : fBounds(bounds) {
+ fPrevY = -1;
+ fWidth = bounds.width();
+ fCurrRow = nullptr;
+ fMinY = bounds.fTop;
+ }
+
+ ~Builder() {
+ Row* row = fRows.begin();
+ Row* stop = fRows.end();
+ while (row < stop) {
+ delete row->fData;
+ row += 1;
+ }
+ }
+
+ bool applyClipOp(SkAAClip* target, const SkAAClip& other, SkClipOp op);
+ bool blitPath(SkAAClip* target, const SkPath& path, bool doAA);
+
+private:
+ using AlphaProc = U8CPU (*)(U8CPU alphaA, U8CPU alphaB);
+ void operateX(int lastY, RowIter& iterA, RowIter& iterB, AlphaProc proc);
+ void operateY(const SkAAClip& A, const SkAAClip& B, SkClipOp op);
+
+ void addRun(int x, int y, U8CPU alpha, int count) {
+ SkASSERT(count > 0);
+ SkASSERT(fBounds.contains(x, y));
+ SkASSERT(fBounds.contains(x + count - 1, y));
+
+ x -= fBounds.left();
+ y -= fBounds.top();
+
+ Row* row = fCurrRow;
+ if (y != fPrevY) {
+ SkASSERT(y > fPrevY);
+ fPrevY = y;
+ row = this->flushRow(true);
+ row->fY = y;
+ row->fWidth = 0;
+ SkASSERT(row->fData);
+ SkASSERT(row->fData->empty());
+ fCurrRow = row;
+ }
+
+ SkASSERT(row->fWidth <= x);
+ SkASSERT(row->fWidth < fBounds.width());
+
+ SkTDArray<uint8_t>& data = *row->fData;
+
+ int gap = x - row->fWidth;
+ if (gap) {
+ AppendRun(data, 0, gap);
+ row->fWidth += gap;
+ SkASSERT(row->fWidth < fBounds.width());
+ }
+
+ AppendRun(data, alpha, count);
+ row->fWidth += count;
+ SkASSERT(row->fWidth <= fBounds.width());
+ }
+
+ void addColumn(int x, int y, U8CPU alpha, int height) {
+ SkASSERT(fBounds.contains(x, y + height - 1));
+
+ this->addRun(x, y, alpha, 1);
+ this->flushRowH(fCurrRow);
+ y -= fBounds.fTop;
+ SkASSERT(y == fCurrRow->fY);
+ fCurrRow->fY = y + height - 1;
+ }
+
+ void addRectRun(int x, int y, int width, int height) {
+ SkASSERT(fBounds.contains(x + width - 1, y + height - 1));
+ this->addRun(x, y, 0xFF, width);
+
+ // we assum the rect must be all we'll see for these scanlines
+ // so we ensure our row goes all the way to our right
+ this->flushRowH(fCurrRow);
+
+ y -= fBounds.fTop;
+ SkASSERT(y == fCurrRow->fY);
+ fCurrRow->fY = y + height - 1;
+ }
+
+ void addAntiRectRun(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) {
+ // According to SkBlitter.cpp, no matter whether leftAlpha is 0 or positive,
+ // we should always consider [x, x+1] as the left-most column and [x+1, x+1+width]
+ // as the rect with full alpha.
+ SkASSERT(fBounds.contains(x + width + (rightAlpha > 0 ? 1 : 0),
+ y + height - 1));
+ SkASSERT(width >= 0);
+
+ // Conceptually we're always adding 3 runs, but we should
+ // merge or omit them if possible.
+ if (leftAlpha == 0xFF) {
+ width++;
+ } else if (leftAlpha > 0) {
+ this->addRun(x++, y, leftAlpha, 1);
+ } else {
+ // leftAlpha is 0, ignore the left column
+ x++;
+ }
+ if (rightAlpha == 0xFF) {
+ width++;
+ }
+ if (width > 0) {
+ this->addRun(x, y, 0xFF, width);
+ }
+ if (rightAlpha > 0 && rightAlpha < 255) {
+ this->addRun(x + width, y, rightAlpha, 1);
+ }
+
+ // if we never called addRun, we might not have a fCurrRow yet
+ if (fCurrRow) {
+ // we assume the rect must be all we'll see for these scanlines
+ // so we ensure our row goes all the way to our right
+ this->flushRowH(fCurrRow);
+
+ y -= fBounds.fTop;
+ SkASSERT(y == fCurrRow->fY);
+ fCurrRow->fY = y + height - 1;
+ }
+ }
+
+ bool finish(SkAAClip* target) {
+ this->flushRow(false);
+
+ const Row* row = fRows.begin();
+ const Row* stop = fRows.end();
+
+ size_t dataSize = 0;
+ while (row < stop) {
+ dataSize += row->fData->size();
+ row += 1;
+ }
+
+ if (0 == dataSize) {
+ return target->setEmpty();
+ }
+
+ SkASSERT(fMinY >= fBounds.fTop);
+ SkASSERT(fMinY < fBounds.fBottom);
+ int adjustY = fMinY - fBounds.fTop;
+ fBounds.fTop = fMinY;
+
+ RunHead* head = RunHead::Alloc(fRows.size(), dataSize);
+ YOffset* yoffset = head->yoffsets();
+ uint8_t* data = head->data();
+ uint8_t* baseData = data;
+
+ row = fRows.begin();
+ SkDEBUGCODE(int prevY = row->fY - 1;)
+ while (row < stop) {
+ SkASSERT(prevY < row->fY); // must be monotonic
+ SkDEBUGCODE(prevY = row->fY);
+
+ yoffset->fY = row->fY - adjustY;
+ yoffset->fOffset = SkToU32(data - baseData);
+ yoffset += 1;
+
+ size_t n = row->fData->size();
+ memcpy(data, row->fData->begin(), n);
+ SkASSERT(compute_row_length(data, fBounds.width()) == n);
+ data += n;
+
+ row += 1;
+ }
+
+ target->freeRuns();
+ target->fBounds = fBounds;
+ target->fRunHead = head;
+ return target->trimBounds();
+ }
+
+ void dump() {
+ this->validate();
+ int y;
+ for (y = 0; y < fRows.size(); ++y) {
+ const Row& row = fRows[y];
+ SkDebugf("Y:%3d W:%3d", row.fY, row.fWidth);
+ const SkTDArray<uint8_t>& data = *row.fData;
+ int count = data.size();
+ SkASSERT(!(count & 1));
+ const uint8_t* ptr = data.begin();
+ for (int x = 0; x < count; x += 2) {
+ SkDebugf(" [%3d:%02X]", ptr[0], ptr[1]);
+ ptr += 2;
+ }
+ SkDebugf("\n");
+ }
+ }
+
+ void validate() {
+#ifdef SK_DEBUG
+ int prevY = -1;
+ for (int i = 0; i < fRows.size(); ++i) {
+ const Row& row = fRows[i];
+ SkASSERT(prevY < row.fY);
+ SkASSERT(fWidth == row.fWidth);
+ int count = row.fData->size();
+ const uint8_t* ptr = row.fData->begin();
+ SkASSERT(!(count & 1));
+ int w = 0;
+ for (int x = 0; x < count; x += 2) {
+ int n = ptr[0];
+ SkASSERT(n > 0);
+ w += n;
+ SkASSERT(w <= fWidth);
+ ptr += 2;
+ }
+ SkASSERT(w == fWidth);
+ prevY = row.fY;
+ }
+#endif
+ }
+
+ void flushRowH(Row* row) {
+ // flush current row if needed
+ if (row->fWidth < fWidth) {
+ AppendRun(*row->fData, 0, fWidth - row->fWidth);
+ row->fWidth = fWidth;
+ }
+ }
+
+ Row* flushRow(bool readyForAnother) {
+ Row* next = nullptr;
+ int count = fRows.size();
+ if (count > 0) {
+ this->flushRowH(&fRows[count - 1]);
+ }
+ if (count > 1) {
+ // are our last two runs the same?
+ Row* prev = &fRows[count - 2];
+ Row* curr = &fRows[count - 1];
+ SkASSERT(prev->fWidth == fWidth);
+ SkASSERT(curr->fWidth == fWidth);
+ if (*prev->fData == *curr->fData) {
+ prev->fY = curr->fY;
+ if (readyForAnother) {
+ curr->fData->clear();
+ next = curr;
+ } else {
+ delete curr->fData;
+ fRows.removeShuffle(count - 1);
+ }
+ } else {
+ if (readyForAnother) {
+ next = fRows.append();
+ next->fData = new SkTDArray<uint8_t>;
+ }
+ }
+ } else {
+ if (readyForAnother) {
+ next = fRows.append();
+ next->fData = new SkTDArray<uint8_t>;
+ }
+ }
+ return next;
+ }
+
+ static void AppendRun(SkTDArray<uint8_t>& data, U8CPU alpha, int count) {
+ do {
+ int n = count;
+ if (n > 255) {
+ n = 255;
+ }
+ uint8_t* ptr = data.append(2);
+ ptr[0] = n;
+ ptr[1] = alpha;
+ count -= n;
+ } while (count > 0);
+ }
+};
+
+void SkAAClip::Builder::operateX(int lastY, RowIter& iterA, RowIter& iterB, AlphaProc proc) {
+ auto advanceRowIter = [](RowIter& iter, int& iterLeft, int& iterRite, int rite) {
+ if (rite == iterRite) {
+ iter.next();
+ iterLeft = iter.left();
+ iterRite = iter.right();
+ }
+ };
+
+ int leftA = iterA.left();
+ int riteA = iterA.right();
+ int leftB = iterB.left();
+ int riteB = iterB.right();
+
+ int prevRite = fBounds.fLeft;
+
+ do {
+ U8CPU alphaA = 0;
+ U8CPU alphaB = 0;
+ int left, rite;
+
+ if (leftA < leftB) {
+ left = leftA;
+ alphaA = iterA.alpha();
+ if (riteA <= leftB) {
+ rite = riteA;
+ } else {
+ rite = leftA = leftB;
+ }
+ } else if (leftB < leftA) {
+ left = leftB;
+ alphaB = iterB.alpha();
+ if (riteB <= leftA) {
+ rite = riteB;
+ } else {
+ rite = leftB = leftA;
+ }
+ } else {
+ left = leftA; // or leftB, since leftA == leftB
+ rite = leftA = leftB = std::min(riteA, riteB);
+ alphaA = iterA.alpha();
+ alphaB = iterB.alpha();
+ }
+
+ if (left >= fBounds.fRight) {
+ break;
+ }
+ if (rite > fBounds.fRight) {
+ rite = fBounds.fRight;
+ }
+
+ if (left >= fBounds.fLeft) {
+ SkASSERT(rite > left);
+ this->addRun(left, lastY, proc(alphaA, alphaB), rite - left);
+ prevRite = rite;
+ }
+
+ advanceRowIter(iterA, leftA, riteA, rite);
+ advanceRowIter(iterB, leftB, riteB, rite);
+ } while (!iterA.done() || !iterB.done());
+
+ if (prevRite < fBounds.fRight) {
+ this->addRun(prevRite, lastY, 0, fBounds.fRight - prevRite);
+ }
+}
+
+void SkAAClip::Builder::operateY(const SkAAClip& A, const SkAAClip& B, SkClipOp op) {
+ static const AlphaProc kDiff = [](U8CPU a, U8CPU b) { return SkMulDiv255Round(a, 0xFF - b); };
+ static const AlphaProc kIntersect = [](U8CPU a, U8CPU b) { return SkMulDiv255Round(a, b); };
+ AlphaProc proc = (op == SkClipOp::kDifference) ? kDiff : kIntersect;
+
+ Iter iterA = RunHead::Iterate(A);
+ Iter iterB = RunHead::Iterate(B);
+
+ SkASSERT(!iterA.done());
+ int topA = iterA.top();
+ int botA = iterA.bottom();
+ SkASSERT(!iterB.done());
+ int topB = iterB.top();
+ int botB = iterB.bottom();
+
+ auto advanceIter = [](Iter& iter, int& iterTop, int& iterBot, int bot) {
+ if (bot == iterBot) {
+ iter.next();
+ iterTop = iterBot;
+ SkASSERT(iterBot == iter.top());
+ iterBot = iter.bottom();
+ }
+ };
+
+#if defined(SK_BUILD_FOR_FUZZER)
+ if ((botA - topA) > 100000 || (botB - topB) > 100000) {
+ return;
+ }
+#endif
+
+ do {
+ const uint8_t* rowA = nullptr;
+ const uint8_t* rowB = nullptr;
+ int top, bot;
+
+ if (topA < topB) {
+ top = topA;
+ rowA = iterA.data();
+ if (botA <= topB) {
+ bot = botA;
+ } else {
+ bot = topA = topB;
+ }
+
+ } else if (topB < topA) {
+ top = topB;
+ rowB = iterB.data();
+ if (botB <= topA) {
+ bot = botB;
+ } else {
+ bot = topB = topA;
+ }
+ } else {
+ top = topA; // or topB, since topA == topB
+ bot = topA = topB = std::min(botA, botB);
+ rowA = iterA.data();
+ rowB = iterB.data();
+ }
+
+ if (top >= fBounds.fBottom) {
+ break;
+ }
+
+ if (bot > fBounds.fBottom) {
+ bot = fBounds.fBottom;
+ }
+ SkASSERT(top < bot);
+
+ if (!rowA && !rowB) {
+ this->addRun(fBounds.fLeft, bot - 1, 0, fBounds.width());
+ } else if (top >= fBounds.fTop) {
+ SkASSERT(bot <= fBounds.fBottom);
+ RowIter rowIterA(rowA, rowA ? A.getBounds() : fBounds);
+ RowIter rowIterB(rowB, rowB ? B.getBounds() : fBounds);
+ this->operateX(bot - 1, rowIterA, rowIterB, proc);
+ }
+
+ advanceIter(iterA, topA, botA, bot);
+ advanceIter(iterB, topB, botB, bot);
+ } while (!iterA.done() || !iterB.done());
+}
+
+class SkAAClip::Builder::Blitter final : public SkBlitter {
+ int fLastY;
+
+ /*
+ If we see a gap of 1 or more empty scanlines while building in Y-order,
+ we inject an explicit empty scanline (alpha==0)
+
+ See AAClipTest.cpp : test_path_with_hole()
+ */
+ void checkForYGap(int y) {
+ SkASSERT(y >= fLastY);
+ if (fLastY > -SK_MaxS32) {
+ int gap = y - fLastY;
+ if (gap > 1) {
+ fBuilder->addRun(fLeft, y - 1, 0, fRight - fLeft);
+ }
+ }
+ fLastY = y;
+ }
+
+public:
+ Blitter(Builder* builder) {
+ fBuilder = builder;
+ fLeft = builder->fBounds.fLeft;
+ fRight = builder->fBounds.fRight;
+ fMinY = SK_MaxS32;
+ fLastY = -SK_MaxS32; // sentinel
+ }
+
+ void finish() {
+ if (fMinY < SK_MaxS32) {
+ fBuilder->fMinY = fMinY;
+ }
+ }
+
+ /**
+ Must evaluate clips in scan-line order, so don't want to allow blitV(),
+ but an AAClip can be clipped down to a single pixel wide, so we
+ must support it (given AntiRect semantics: minimum width is 2).
+ Instead we'll rely on the runtime asserts to guarantee Y monotonicity;
+ any failure cases that misses may have minor artifacts.
+ */
+ void blitV(int x, int y, int height, SkAlpha alpha) override {
+ if (height == 1) {
+ // We're still in scan-line order if height is 1
+ // This is useful for Analytic AA
+ const SkAlpha alphas[2] = {alpha, 0};
+ const int16_t runs[2] = {1, 0};
+ this->blitAntiH(x, y, alphas, runs);
+ } else {
+ this->recordMinY(y);
+ fBuilder->addColumn(x, y, alpha, height);
+ fLastY = y + height - 1;
+ }
+ }
+
+ void blitRect(int x, int y, int width, int height) override {
+ this->recordMinY(y);
+ this->checkForYGap(y);
+ fBuilder->addRectRun(x, y, width, height);
+ fLastY = y + height - 1;
+ }
+
+ void blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) override {
+ this->recordMinY(y);
+ this->checkForYGap(y);
+ fBuilder->addAntiRectRun(x, y, width, height, leftAlpha, rightAlpha);
+ fLastY = y + height - 1;
+ }
+
+ void blitMask(const SkMask&, const SkIRect& clip) override
+ { unexpected(); }
+
+ const SkPixmap* justAnOpaqueColor(uint32_t*) override {
+ return nullptr;
+ }
+
+ void blitH(int x, int y, int width) override {
+ this->recordMinY(y);
+ this->checkForYGap(y);
+ fBuilder->addRun(x, y, 0xFF, width);
+ }
+
+ void blitAntiH(int x, int y, const SkAlpha alpha[], const int16_t runs[]) override {
+ this->recordMinY(y);
+ this->checkForYGap(y);
+ for (;;) {
+ int count = *runs;
+ if (count <= 0) {
+ return;
+ }
+
+ // The supersampler's buffer can be the width of the device, so
+ // we may have to trim the run to our bounds. Previously, we assert that
+ // the extra spans are always alpha==0.
+ // However, the analytic AA is too sensitive to precision errors
+ // so it may have extra spans with very tiny alpha because after several
+ // arithmatic operations, the edge may bleed the path boundary a little bit.
+ // Therefore, instead of always asserting alpha==0, we assert alpha < 0x10.
+ int localX = x;
+ int localCount = count;
+ if (x < fLeft) {
+ SkASSERT(0x10 > *alpha);
+ int gap = fLeft - x;
+ SkASSERT(gap <= count);
+ localX += gap;
+ localCount -= gap;
+ }
+ int right = x + count;
+ if (right > fRight) {
+ SkASSERT(0x10 > *alpha);
+ localCount -= right - fRight;
+ SkASSERT(localCount >= 0);
+ }
+
+ if (localCount) {
+ fBuilder->addRun(localX, y, *alpha, localCount);
+ }
+ // Next run
+ runs += count;
+ alpha += count;
+ x += count;
+ }
+ }
+
+private:
+ Builder* fBuilder;
+ int fLeft; // cache of builder's bounds' left edge
+ int fRight;
+ int fMinY;
+
+ /*
+ * We track this, in case the scan converter skipped some number of
+ * scanlines at the (relative to the bounds it was given). This allows
+ * the builder, during its finish, to trip its bounds down to the "real"
+ * top.
+ */
+ void recordMinY(int y) {
+ if (y < fMinY) {
+ fMinY = y;
+ }
+ }
+
+ void unexpected() {
+ SK_ABORT("---- did not expect to get called here");
+ }
+};
+
+bool SkAAClip::Builder::applyClipOp(SkAAClip* target, const SkAAClip& other, SkClipOp op) {
+ this->operateY(*target, other, op);
+ return this->finish(target);
+}
+
+bool SkAAClip::Builder::blitPath(SkAAClip* target, const SkPath& path, bool doAA) {
+ Blitter blitter(this);
+ SkRegion clip(fBounds);
+
+ if (doAA) {
+ SkScan::AntiFillPath(path, clip, &blitter, true);
+ } else {
+ SkScan::FillPath(path, clip, &blitter);
+ }
+
+ blitter.finish();
+ return this->finish(target);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkAAClip::copyToMask(SkMask* mask) const {
+ auto expandRowToMask = [](uint8_t* dst, const uint8_t* row, int width) {
+ while (width > 0) {
+ int n = row[0];
+ SkASSERT(width >= n);
+ memset(dst, row[1], n);
+ dst += n;
+ row += 2;
+ width -= n;
+ }
+ SkASSERT(0 == width);
+ };
+
+ mask->fFormat = SkMask::kA8_Format;
+ if (this->isEmpty()) {
+ mask->fBounds.setEmpty();
+ mask->fImage = nullptr;
+ mask->fRowBytes = 0;
+ return;
+ }
+
+ mask->fBounds = fBounds;
+ mask->fRowBytes = fBounds.width();
+ size_t size = mask->computeImageSize();
+ mask->fImage = SkMask::AllocImage(size);
+
+ Iter iter = RunHead::Iterate(*this);
+ uint8_t* dst = mask->fImage;
+ const int width = fBounds.width();
+
+ int y = fBounds.fTop;
+ while (!iter.done()) {
+ do {
+ expandRowToMask(dst, iter.data(), width);
+ dst += mask->fRowBytes;
+ } while (++y < iter.bottom());
+ iter.next();
+ }
+}
+
+#ifdef SK_DEBUG
+
+void SkAAClip::validate() const {
+ if (nullptr == fRunHead) {
+ SkASSERT(fBounds.isEmpty());
+ return;
+ }
+ SkASSERT(!fBounds.isEmpty());
+
+ const RunHead* head = fRunHead;
+ SkASSERT(head->fRefCnt.load() > 0);
+ SkASSERT(head->fRowCount > 0);
+
+ const YOffset* yoff = head->yoffsets();
+ const YOffset* ystop = yoff + head->fRowCount;
+ const int lastY = fBounds.height() - 1;
+
+ // Y and offset must be monotonic
+ int prevY = -1;
+ int32_t prevOffset = -1;
+ while (yoff < ystop) {
+ SkASSERT(prevY < yoff->fY);
+ SkASSERT(yoff->fY <= lastY);
+ prevY = yoff->fY;
+ SkASSERT(prevOffset < (int32_t)yoff->fOffset);
+ prevOffset = yoff->fOffset;
+ const uint8_t* row = head->data() + yoff->fOffset;
+ size_t rowLength = compute_row_length(row, fBounds.width());
+ SkASSERT(yoff->fOffset + rowLength <= head->fDataSize);
+ yoff += 1;
+ }
+ // check the last entry;
+ --yoff;
+ SkASSERT(yoff->fY == lastY);
+}
+
+static void dump_one_row(const uint8_t* SK_RESTRICT row,
+ int width, int leading_num) {
+ if (leading_num) {
+ SkDebugf( "%03d ", leading_num );
+ }
+ while (width > 0) {
+ int n = row[0];
+ int val = row[1];
+ char out = '.';
+ if (val == 0xff) {
+ out = '*';
+ } else if (val > 0) {
+ out = '+';
+ }
+ for (int i = 0 ; i < n ; i++) {
+ SkDebugf( "%c", out );
+ }
+ row += 2;
+ width -= n;
+ }
+ SkDebugf( "\n" );
+}
+
+void SkAAClip::debug(bool compress_y) const {
+ Iter iter = RunHead::Iterate(*this);
+ const int width = fBounds.width();
+
+ int y = fBounds.fTop;
+ while (!iter.done()) {
+ if (compress_y) {
+ dump_one_row(iter.data(), width, iter.bottom() - iter.top() + 1);
+ } else {
+ do {
+ dump_one_row(iter.data(), width, 0);
+ } while (++y < iter.bottom());
+ }
+ iter.next();
+ }
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Count the number of zeros on the left and right edges of the passed in
+// RLE row. If 'row' is all zeros return 'width' in both variables.
+static void count_left_right_zeros(const uint8_t* row, int width,
+ int* leftZ, int* riteZ) {
+ int zeros = 0;
+ do {
+ if (row[1]) {
+ break;
+ }
+ int n = row[0];
+ SkASSERT(n > 0);
+ SkASSERT(n <= width);
+ zeros += n;
+ row += 2;
+ width -= n;
+ } while (width > 0);
+ *leftZ = zeros;
+
+ if (0 == width) {
+ // this line is completely empty return 'width' in both variables
+ *riteZ = *leftZ;
+ return;
+ }
+
+ zeros = 0;
+ while (width > 0) {
+ int n = row[0];
+ SkASSERT(n > 0);
+ if (0 == row[1]) {
+ zeros += n;
+ } else {
+ zeros = 0;
+ }
+ row += 2;
+ width -= n;
+ }
+ *riteZ = zeros;
+}
+
+// modify row in place, trimming off (zeros) from the left and right sides.
+// return the number of bytes that were completely eliminated from the left
+static int trim_row_left_right(uint8_t* row, int width, int leftZ, int riteZ) {
+ int trim = 0;
+ while (leftZ > 0) {
+ SkASSERT(0 == row[1]);
+ int n = row[0];
+ SkASSERT(n > 0);
+ SkASSERT(n <= width);
+ width -= n;
+ row += 2;
+ if (n > leftZ) {
+ row[-2] = n - leftZ;
+ break;
+ }
+ trim += 2;
+ leftZ -= n;
+ SkASSERT(leftZ >= 0);
+ }
+
+ if (riteZ) {
+ // walk row to the end, and then we'll back up to trim riteZ
+ while (width > 0) {
+ int n = row[0];
+ SkASSERT(n <= width);
+ width -= n;
+ row += 2;
+ }
+ // now skip whole runs of zeros
+ do {
+ row -= 2;
+ SkASSERT(0 == row[1]);
+ int n = row[0];
+ SkASSERT(n > 0);
+ if (n > riteZ) {
+ row[0] = n - riteZ;
+ break;
+ }
+ riteZ -= n;
+ SkASSERT(riteZ >= 0);
+ } while (riteZ > 0);
+ }
+
+ return trim;
+}
+
+bool SkAAClip::trimLeftRight() {
+ if (this->isEmpty()) {
+ return false;
+ }
+
+ AUTO_AACLIP_VALIDATE(*this);
+
+ const int width = fBounds.width();
+ RunHead* head = fRunHead;
+ YOffset* yoff = head->yoffsets();
+ YOffset* stop = yoff + head->fRowCount;
+ uint8_t* base = head->data();
+
+ // After this loop, 'leftZeros' & 'rightZeros' will contain the minimum
+ // number of zeros on the left and right of the clip. This information
+ // can be used to shrink the bounding box.
+ int leftZeros = width;
+ int riteZeros = width;
+ while (yoff < stop) {
+ int L, R;
+ count_left_right_zeros(base + yoff->fOffset, width, &L, &R);
+ SkASSERT(L + R < width || (L == width && R == width));
+ if (L < leftZeros) {
+ leftZeros = L;
+ }
+ if (R < riteZeros) {
+ riteZeros = R;
+ }
+ if (0 == (leftZeros | riteZeros)) {
+ // no trimming to do
+ return true;
+ }
+ yoff += 1;
+ }
+
+ SkASSERT(leftZeros || riteZeros);
+ if (width == leftZeros) {
+ SkASSERT(width == riteZeros);
+ return this->setEmpty();
+ }
+
+ this->validate();
+
+ fBounds.fLeft += leftZeros;
+ fBounds.fRight -= riteZeros;
+ SkASSERT(!fBounds.isEmpty());
+
+ // For now we don't realloc the storage (for time), we just shrink in place
+ // This means we don't have to do any memmoves either, since we can just
+ // play tricks with the yoff->fOffset for each row
+ yoff = head->yoffsets();
+ while (yoff < stop) {
+ uint8_t* row = base + yoff->fOffset;
+ SkDEBUGCODE((void)compute_row_length(row, width);)
+ yoff->fOffset += trim_row_left_right(row, width, leftZeros, riteZeros);
+ SkDEBUGCODE((void)compute_row_length(base + yoff->fOffset, width - leftZeros - riteZeros);)
+ yoff += 1;
+ }
+ return true;
+}
+
+static bool row_is_all_zeros(const uint8_t* row, int width) {
+ SkASSERT(width > 0);
+ do {
+ if (row[1]) {
+ return false;
+ }
+ int n = row[0];
+ SkASSERT(n <= width);
+ width -= n;
+ row += 2;
+ } while (width > 0);
+ SkASSERT(0 == width);
+ return true;
+}
+
+bool SkAAClip::trimTopBottom() {
+ if (this->isEmpty()) {
+ return false;
+ }
+
+ this->validate();
+
+ const int width = fBounds.width();
+ RunHead* head = fRunHead;
+ YOffset* yoff = head->yoffsets();
+ YOffset* stop = yoff + head->fRowCount;
+ const uint8_t* base = head->data();
+
+ // Look to trim away empty rows from the top.
+ //
+ int skip = 0;
+ while (yoff < stop) {
+ const uint8_t* data = base + yoff->fOffset;
+ if (!row_is_all_zeros(data, width)) {
+ break;
+ }
+ skip += 1;
+ yoff += 1;
+ }
+ SkASSERT(skip <= head->fRowCount);
+ if (skip == head->fRowCount) {
+ return this->setEmpty();
+ }
+ if (skip > 0) {
+ // adjust fRowCount and fBounds.fTop, and slide all the data up
+ // as we remove [skip] number of YOffset entries
+ yoff = head->yoffsets();
+ int dy = yoff[skip - 1].fY + 1;
+ for (int i = skip; i < head->fRowCount; ++i) {
+ SkASSERT(yoff[i].fY >= dy);
+ yoff[i].fY -= dy;
+ }
+ YOffset* dst = head->yoffsets();
+ size_t size = head->fRowCount * sizeof(YOffset) + head->fDataSize;
+ memmove(dst, dst + skip, size - skip * sizeof(YOffset));
+
+ fBounds.fTop += dy;
+ SkASSERT(!fBounds.isEmpty());
+ head->fRowCount -= skip;
+ SkASSERT(head->fRowCount > 0);
+
+ this->validate();
+ // need to reset this after the memmove
+ base = head->data();
+ }
+
+ // Look to trim away empty rows from the bottom.
+ // We know that we have at least one non-zero row, so we can just walk
+ // backwards without checking for running past the start.
+ //
+ stop = yoff = head->yoffsets() + head->fRowCount;
+ do {
+ yoff -= 1;
+ } while (row_is_all_zeros(base + yoff->fOffset, width));
+ skip = SkToInt(stop - yoff - 1);
+ SkASSERT(skip >= 0 && skip < head->fRowCount);
+ if (skip > 0) {
+ // removing from the bottom is easier than from the top, as we don't
+ // have to adjust any of the Y values, we just have to trim the array
+ memmove(stop - skip, stop, head->fDataSize);
+
+ fBounds.fBottom = fBounds.fTop + yoff->fY + 1;
+ SkASSERT(!fBounds.isEmpty());
+ head->fRowCount -= skip;
+ SkASSERT(head->fRowCount > 0);
+ }
+ this->validate();
+
+ return true;
+}
+
+// can't validate before we're done, since trimming is part of the process of
+// making us valid after the Builder. Since we build from top to bottom, its
+// possible our fBounds.fBottom is bigger than our last scanline of data, so
+// we trim fBounds.fBottom back up.
+//
+// TODO: check for duplicates in X and Y to further compress our data
+//
+bool SkAAClip::trimBounds() {
+ if (this->isEmpty()) {
+ return false;
+ }
+
+ const RunHead* head = fRunHead;
+ const YOffset* yoff = head->yoffsets();
+
+ SkASSERT(head->fRowCount > 0);
+ const YOffset& lastY = yoff[head->fRowCount - 1];
+ SkASSERT(lastY.fY + 1 <= fBounds.height());
+ fBounds.fBottom = fBounds.fTop + lastY.fY + 1;
+ SkASSERT(lastY.fY + 1 == fBounds.height());
+ SkASSERT(!fBounds.isEmpty());
+
+ return this->trimTopBottom() && this->trimLeftRight();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkAAClip::SkAAClip() {
+ fBounds.setEmpty();
+ fRunHead = nullptr;
+}
+
+SkAAClip::SkAAClip(const SkAAClip& src) {
+ SkDEBUGCODE(fBounds.setEmpty();) // need this for validate
+ fRunHead = nullptr;
+ *this = src;
+}
+
+SkAAClip::~SkAAClip() {
+ this->freeRuns();
+}
+
+SkAAClip& SkAAClip::operator=(const SkAAClip& src) {
+ AUTO_AACLIP_VALIDATE(*this);
+ src.validate();
+
+ if (this != &src) {
+ this->freeRuns();
+ fBounds = src.fBounds;
+ fRunHead = src.fRunHead;
+ if (fRunHead) {
+ fRunHead->fRefCnt++;
+ }
+ }
+ return *this;
+}
+
+bool SkAAClip::setEmpty() {
+ this->freeRuns();
+ fBounds.setEmpty();
+ fRunHead = nullptr;
+ return false;
+}
+
+bool SkAAClip::setRect(const SkIRect& bounds) {
+ if (bounds.isEmpty()) {
+ return this->setEmpty();
+ }
+
+ AUTO_AACLIP_VALIDATE(*this);
+
+ this->freeRuns();
+ fBounds = bounds;
+ fRunHead = RunHead::AllocRect(bounds);
+ SkASSERT(!this->isEmpty());
+ return true;
+}
+
+bool SkAAClip::isRect() const {
+ if (this->isEmpty()) {
+ return false;
+ }
+
+ const RunHead* head = fRunHead;
+ if (head->fRowCount != 1) {
+ return false;
+ }
+ const YOffset* yoff = head->yoffsets();
+ if (yoff->fY != fBounds.fBottom - 1) {
+ return false;
+ }
+
+ const uint8_t* row = head->data() + yoff->fOffset;
+ int width = fBounds.width();
+ do {
+ if (row[1] != 0xFF) {
+ return false;
+ }
+ int n = row[0];
+ SkASSERT(n <= width);
+ width -= n;
+ row += 2;
+ } while (width > 0);
+ return true;
+}
+
+bool SkAAClip::setRegion(const SkRegion& rgn) {
+ if (rgn.isEmpty()) {
+ return this->setEmpty();
+ }
+ if (rgn.isRect()) {
+ return this->setRect(rgn.getBounds());
+ }
+
+
+ const SkIRect& bounds = rgn.getBounds();
+ const int offsetX = bounds.fLeft;
+ const int offsetY = bounds.fTop;
+
+ SkTDArray<YOffset> yArray;
+ SkTDArray<uint8_t> xArray;
+
+ yArray.reserve(std::min(bounds.height(), 1024));
+ xArray.reserve(std::min(bounds.width(), 512) * 128);
+
+ auto appendXRun = [&xArray](uint8_t value, int count) {
+ SkASSERT(count >= 0);
+ while (count > 0) {
+ int n = count;
+ if (n > 255) {
+ n = 255;
+ }
+ uint8_t* data = xArray.append(2);
+ data[0] = n;
+ data[1] = value;
+ count -= n;
+ }
+ };
+
+ SkRegion::Iterator iter(rgn);
+ int prevRight = 0;
+ int prevBot = 0;
+ YOffset* currY = nullptr;
+
+ for (; !iter.done(); iter.next()) {
+ const SkIRect& r = iter.rect();
+ SkASSERT(bounds.contains(r));
+
+ int bot = r.fBottom - offsetY;
+ SkASSERT(bot >= prevBot);
+ if (bot > prevBot) {
+ if (currY) {
+ // flush current row
+ appendXRun(0, bounds.width() - prevRight);
+ }
+ // did we introduce an empty-gap from the prev row?
+ int top = r.fTop - offsetY;
+ if (top > prevBot) {
+ currY = yArray.append();
+ currY->fY = top - 1;
+ currY->fOffset = xArray.size();
+ appendXRun(0, bounds.width());
+ }
+ // create a new record for this Y value
+ currY = yArray.append();
+ currY->fY = bot - 1;
+ currY->fOffset = xArray.size();
+ prevRight = 0;
+ prevBot = bot;
+ }
+
+ int x = r.fLeft - offsetX;
+ appendXRun(0, x - prevRight);
+
+ int w = r.fRight - r.fLeft;
+ appendXRun(0xFF, w);
+ prevRight = x + w;
+ SkASSERT(prevRight <= bounds.width());
+ }
+ // flush last row
+ appendXRun(0, bounds.width() - prevRight);
+
+ // now pack everything into a RunHead
+ RunHead* head = RunHead::Alloc(yArray.size(), xArray.size_bytes());
+ memcpy(head->yoffsets(), yArray.begin(), yArray.size_bytes());
+ memcpy(head->data(), xArray.begin(), xArray.size_bytes());
+
+ this->setEmpty();
+ fBounds = bounds;
+ fRunHead = head;
+ this->validate();
+ return true;
+}
+
+bool SkAAClip::setPath(const SkPath& path, const SkIRect& clip, bool doAA) {
+ AUTO_AACLIP_VALIDATE(*this);
+
+ if (clip.isEmpty()) {
+ return this->setEmpty();
+ }
+
+ SkIRect ibounds;
+ // Since we assert that the BuilderBlitter will never blit outside the intersection
+ // of clip and ibounds, we create the builder with the snug bounds.
+ if (path.isInverseFillType()) {
+ ibounds = clip;
+ } else {
+ path.getBounds().roundOut(&ibounds);
+ if (ibounds.isEmpty() || !ibounds.intersect(clip)) {
+ return this->setEmpty();
+ }
+ }
+
+ Builder builder(ibounds);
+ return builder.blitPath(this, path, doAA);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkAAClip::op(const SkAAClip& other, SkClipOp op) {
+ AUTO_AACLIP_VALIDATE(*this);
+
+ if (this->isEmpty()) {
+ // Once the clip is empty, it cannot become un-empty.
+ return false;
+ }
+
+ SkIRect bounds = fBounds;
+ switch(op) {
+ case SkClipOp::kDifference:
+ if (other.isEmpty() || !SkIRect::Intersects(fBounds, other.fBounds)) {
+ // this remains unmodified and isn't empty
+ return true;
+ }
+ break;
+
+ case SkClipOp::kIntersect:
+ if (other.isEmpty() || !bounds.intersect(other.fBounds)) {
+ // the intersected clip becomes empty
+ return this->setEmpty();
+ }
+ break;
+ }
+
+
+ SkASSERT(SkIRect::Intersects(bounds, fBounds));
+ SkASSERT(SkIRect::Intersects(bounds, other.fBounds));
+
+ Builder builder(bounds);
+ return builder.applyClipOp(this, other, op);
+}
+
+bool SkAAClip::op(const SkIRect& rect, SkClipOp op) {
+ // It can be expensive to build a local aaclip before applying the op, so
+ // we first see if we can restrict the bounds of new rect to our current
+ // bounds, or note that the new rect subsumes our current clip.
+ SkIRect pixelBounds = fBounds;
+ if (!pixelBounds.intersect(rect)) {
+ // No change or clip becomes empty depending on 'op'
+ switch(op) {
+ case SkClipOp::kDifference: return !this->isEmpty();
+ case SkClipOp::kIntersect: return this->setEmpty();
+ }
+ SkUNREACHABLE;
+ } else if (pixelBounds == fBounds) {
+ // Wholly inside 'rect', so clip becomes empty or remains unchanged
+ switch(op) {
+ case SkClipOp::kDifference: return this->setEmpty();
+ case SkClipOp::kIntersect: return !this->isEmpty();
+ }
+ SkUNREACHABLE;
+ } else if (op == SkClipOp::kIntersect && this->quickContains(pixelBounds)) {
+ // We become just the remaining rectangle
+ return this->setRect(pixelBounds);
+ } else {
+ SkAAClip clip;
+ clip.setRect(rect);
+ return this->op(clip, op);
+ }
+}
+
+bool SkAAClip::op(const SkRect& rect, SkClipOp op, bool doAA) {
+ if (!doAA) {
+ return this->op(rect.round(), op);
+ } else {
+ // Tighten bounds for "path" aaclip of the rect
+ SkIRect pixelBounds = fBounds;
+ if (!pixelBounds.intersect(rect.roundOut())) {
+ // No change or clip becomes empty depending on 'op'
+ switch(op) {
+ case SkClipOp::kDifference: return !this->isEmpty();
+ case SkClipOp::kIntersect: return this->setEmpty();
+ }
+ SkUNREACHABLE;
+ } else if (rect.contains(SkRect::Make(fBounds))) {
+ // Wholly inside 'rect', so clip becomes empty or remains unchanged
+ switch(op) {
+ case SkClipOp::kDifference: return this->setEmpty();
+ case SkClipOp::kIntersect: return !this->isEmpty();
+ }
+ SkUNREACHABLE;
+ } else if (op == SkClipOp::kIntersect && this->quickContains(pixelBounds)) {
+ // We become just the rect intersected with pixel bounds (preserving fractional coords
+ // for AA edges).
+ return this->setPath(SkPath::Rect(rect), pixelBounds, /*doAA=*/true);
+ } else {
+ SkAAClip rectClip;
+ rectClip.setPath(SkPath::Rect(rect),
+ op == SkClipOp::kDifference ? fBounds : pixelBounds,
+ /*doAA=*/true);
+ return this->op(rectClip, op);
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkAAClip::translate(int dx, int dy, SkAAClip* dst) const {
+ if (nullptr == dst) {
+ return !this->isEmpty();
+ }
+
+ if (this->isEmpty()) {
+ return dst->setEmpty();
+ }
+
+ if (this != dst) {
+ fRunHead->fRefCnt++;
+ dst->freeRuns();
+ dst->fRunHead = fRunHead;
+ dst->fBounds = fBounds;
+ }
+ dst->fBounds.offset(dx, dy);
+ return true;
+}
+
+void SkAAClip::freeRuns() {
+ if (fRunHead) {
+ SkASSERT(fRunHead->fRefCnt.load() >= 1);
+ if (1 == fRunHead->fRefCnt--) {
+ sk_free(fRunHead);
+ }
+ }
+}
+
+const uint8_t* SkAAClip::findRow(int y, int* lastYForRow) const {
+ SkASSERT(fRunHead);
+
+ if (y < fBounds.fTop || y >= fBounds.fBottom) {
+ return nullptr;
+ }
+ y -= fBounds.y(); // our yoffs values are relative to the top
+
+ const YOffset* yoff = fRunHead->yoffsets();
+ while (yoff->fY < y) {
+ yoff += 1;
+ SkASSERT(yoff - fRunHead->yoffsets() < fRunHead->fRowCount);
+ }
+
+ if (lastYForRow) {
+ *lastYForRow = fBounds.y() + yoff->fY;
+ }
+ return fRunHead->data() + yoff->fOffset;
+}
+
+const uint8_t* SkAAClip::findX(const uint8_t data[], int x, int* initialCount) const {
+ SkASSERT(x >= fBounds.fLeft && x < fBounds.fRight);
+ x -= fBounds.x();
+
+ // first skip up to X
+ for (;;) {
+ int n = data[0];
+ if (x < n) {
+ if (initialCount) {
+ *initialCount = n - x;
+ }
+ break;
+ }
+ data += 2;
+ x -= n;
+ }
+ return data;
+}
+
+bool SkAAClip::quickContains(int left, int top, int right, int bottom) const {
+ if (this->isEmpty()) {
+ return false;
+ }
+ if (!fBounds.contains(SkIRect{left, top, right, bottom})) {
+ return false;
+ }
+
+ int lastY SK_INIT_TO_AVOID_WARNING;
+ const uint8_t* row = this->findRow(top, &lastY);
+ if (lastY < bottom) {
+ return false;
+ }
+ // now just need to check in X
+ int count;
+ row = this->findX(row, left, &count);
+
+ int rectWidth = right - left;
+ while (0xFF == row[1]) {
+ if (count >= rectWidth) {
+ return true;
+ }
+ rectWidth -= count;
+ row += 2;
+ count = row[0];
+ }
+ return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void expandToRuns(const uint8_t* SK_RESTRICT data, int initialCount, int width,
+ int16_t* SK_RESTRICT runs, SkAlpha* SK_RESTRICT aa) {
+ // we don't read our initial n from data, since the caller may have had to
+ // clip it, hence the initialCount parameter.
+ int n = initialCount;
+ for (;;) {
+ if (n > width) {
+ n = width;
+ }
+ SkASSERT(n > 0);
+ runs[0] = n;
+ runs += n;
+
+ aa[0] = data[1];
+ aa += n;
+
+ data += 2;
+ width -= n;
+ if (0 == width) {
+ break;
+ }
+ // load the next count
+ n = data[0];
+ }
+ runs[0] = 0; // sentinel
+}
+
+SkAAClipBlitter::~SkAAClipBlitter() {
+ sk_free(fScanlineScratch);
+}
+
+void SkAAClipBlitter::ensureRunsAndAA() {
+ if (nullptr == fScanlineScratch) {
+ // add 1 so we can store the terminating run count of 0
+ int count = fAAClipBounds.width() + 1;
+ // we use this either for fRuns + fAA, or a scaline of a mask
+ // which may be as deep as 32bits
+ fScanlineScratch = sk_malloc_throw(count * sizeof(SkPMColor));
+ fRuns = (int16_t*)fScanlineScratch;
+ fAA = (SkAlpha*)(fRuns + count);
+ }
+}
+
+void SkAAClipBlitter::blitH(int x, int y, int width) {
+ SkASSERT(width > 0);
+ SkASSERT(fAAClipBounds.contains(x, y));
+ SkASSERT(fAAClipBounds.contains(x + width - 1, y));
+
+ const uint8_t* row = fAAClip->findRow(y);
+ int initialCount;
+ row = fAAClip->findX(row, x, &initialCount);
+
+ if (initialCount >= width) {
+ SkAlpha alpha = row[1];
+ if (0 == alpha) {
+ return;
+ }
+ if (0xFF == alpha) {
+ fBlitter->blitH(x, y, width);
+ return;
+ }
+ }
+
+ this->ensureRunsAndAA();
+ expandToRuns(row, initialCount, width, fRuns, fAA);
+
+ fBlitter->blitAntiH(x, y, fAA, fRuns);
+}
+
+static void merge(const uint8_t* SK_RESTRICT row, int rowN,
+ const SkAlpha* SK_RESTRICT srcAA,
+ const int16_t* SK_RESTRICT srcRuns,
+ SkAlpha* SK_RESTRICT dstAA,
+ int16_t* SK_RESTRICT dstRuns,
+ int width) {
+ SkDEBUGCODE(int accumulated = 0;)
+ int srcN = srcRuns[0];
+ // do we need this check?
+ if (0 == srcN) {
+ return;
+ }
+
+ for (;;) {
+ SkASSERT(rowN > 0);
+ SkASSERT(srcN > 0);
+
+ unsigned newAlpha = SkMulDiv255Round(srcAA[0], row[1]);
+ int minN = std::min(srcN, rowN);
+ dstRuns[0] = minN;
+ dstRuns += minN;
+ dstAA[0] = newAlpha;
+ dstAA += minN;
+
+ if (0 == (srcN -= minN)) {
+ srcN = srcRuns[0]; // refresh
+ srcRuns += srcN;
+ srcAA += srcN;
+ srcN = srcRuns[0]; // reload
+ if (0 == srcN) {
+ break;
+ }
+ }
+ if (0 == (rowN -= minN)) {
+ row += 2;
+ rowN = row[0]; // reload
+ }
+
+ SkDEBUGCODE(accumulated += minN;)
+ SkASSERT(accumulated <= width);
+ }
+ dstRuns[0] = 0;
+}
+
+void SkAAClipBlitter::blitAntiH(int x, int y, const SkAlpha aa[],
+ const int16_t runs[]) {
+
+ const uint8_t* row = fAAClip->findRow(y);
+ int initialCount;
+ row = fAAClip->findX(row, x, &initialCount);
+
+ this->ensureRunsAndAA();
+
+ merge(row, initialCount, aa, runs, fAA, fRuns, fAAClipBounds.width());
+ fBlitter->blitAntiH(x, y, fAA, fRuns);
+}
+
+void SkAAClipBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ if (fAAClip->quickContains(x, y, x + 1, y + height)) {
+ fBlitter->blitV(x, y, height, alpha);
+ return;
+ }
+
+ for (;;) {
+ int lastY SK_INIT_TO_AVOID_WARNING;
+ const uint8_t* row = fAAClip->findRow(y, &lastY);
+ int dy = lastY - y + 1;
+ if (dy > height) {
+ dy = height;
+ }
+ height -= dy;
+
+ row = fAAClip->findX(row, x);
+ SkAlpha newAlpha = SkMulDiv255Round(alpha, row[1]);
+ if (newAlpha) {
+ fBlitter->blitV(x, y, dy, newAlpha);
+ }
+ SkASSERT(height >= 0);
+ if (height <= 0) {
+ break;
+ }
+ y = lastY + 1;
+ }
+}
+
+void SkAAClipBlitter::blitRect(int x, int y, int width, int height) {
+ if (fAAClip->quickContains(x, y, x + width, y + height)) {
+ fBlitter->blitRect(x, y, width, height);
+ return;
+ }
+
+ while (--height >= 0) {
+ this->blitH(x, y, width);
+ y += 1;
+ }
+}
+
+typedef void (*MergeAAProc)(const void* src, int width, const uint8_t* row,
+ int initialRowCount, void* dst);
+
+static void small_memcpy(void* dst, const void* src, size_t n) {
+ memcpy(dst, src, n);
+}
+
+static void small_bzero(void* dst, size_t n) {
+ sk_bzero(dst, n);
+}
+
+static inline uint8_t mergeOne(uint8_t value, unsigned alpha) {
+ return SkMulDiv255Round(value, alpha);
+}
+
+static inline uint16_t mergeOne(uint16_t value, unsigned alpha) {
+ unsigned r = SkGetPackedR16(value);
+ unsigned g = SkGetPackedG16(value);
+ unsigned b = SkGetPackedB16(value);
+ return SkPackRGB16(SkMulDiv255Round(r, alpha),
+ SkMulDiv255Round(g, alpha),
+ SkMulDiv255Round(b, alpha));
+}
+
+template <typename T>
+void mergeT(const void* inSrc, int srcN, const uint8_t* SK_RESTRICT row, int rowN, void* inDst) {
+ const T* SK_RESTRICT src = static_cast<const T*>(inSrc);
+ T* SK_RESTRICT dst = static_cast<T*>(inDst);
+ for (;;) {
+ SkASSERT(rowN > 0);
+ SkASSERT(srcN > 0);
+
+ int n = std::min(rowN, srcN);
+ unsigned rowA = row[1];
+ if (0xFF == rowA) {
+ small_memcpy(dst, src, n * sizeof(T));
+ } else if (0 == rowA) {
+ small_bzero(dst, n * sizeof(T));
+ } else {
+ for (int i = 0; i < n; ++i) {
+ dst[i] = mergeOne(src[i], rowA);
+ }
+ }
+
+ if (0 == (srcN -= n)) {
+ break;
+ }
+
+ src += n;
+ dst += n;
+
+ SkASSERT(rowN == n);
+ row += 2;
+ rowN = row[0];
+ }
+}
+
+static MergeAAProc find_merge_aa_proc(SkMask::Format format) {
+ switch (format) {
+ case SkMask::kBW_Format:
+ SkDEBUGFAIL("unsupported");
+ return nullptr;
+ case SkMask::kA8_Format:
+ case SkMask::k3D_Format:
+ return mergeT<uint8_t> ;
+ case SkMask::kLCD16_Format:
+ return mergeT<uint16_t>;
+ default:
+ SkDEBUGFAIL("unsupported");
+ return nullptr;
+ }
+}
+
+static U8CPU bit2byte(int bitInAByte) {
+ SkASSERT(bitInAByte <= 0xFF);
+ // negation turns any non-zero into 0xFFFFFF??, so we just shift down
+ // some value >= 8 to get a full FF value
+ return -bitInAByte >> 8;
+}
+
+static void upscaleBW2A8(SkMask* dstMask, const SkMask& srcMask) {
+ SkASSERT(SkMask::kBW_Format == srcMask.fFormat);
+ SkASSERT(SkMask::kA8_Format == dstMask->fFormat);
+
+ const int width = srcMask.fBounds.width();
+ const int height = srcMask.fBounds.height();
+
+ const uint8_t* SK_RESTRICT src = (const uint8_t*)srcMask.fImage;
+ const size_t srcRB = srcMask.fRowBytes;
+ uint8_t* SK_RESTRICT dst = (uint8_t*)dstMask->fImage;
+ const size_t dstRB = dstMask->fRowBytes;
+
+ const int wholeBytes = width >> 3;
+ const int leftOverBits = width & 7;
+
+ for (int y = 0; y < height; ++y) {
+ uint8_t* SK_RESTRICT d = dst;
+ for (int i = 0; i < wholeBytes; ++i) {
+ int srcByte = src[i];
+ d[0] = bit2byte(srcByte & (1 << 7));
+ d[1] = bit2byte(srcByte & (1 << 6));
+ d[2] = bit2byte(srcByte & (1 << 5));
+ d[3] = bit2byte(srcByte & (1 << 4));
+ d[4] = bit2byte(srcByte & (1 << 3));
+ d[5] = bit2byte(srcByte & (1 << 2));
+ d[6] = bit2byte(srcByte & (1 << 1));
+ d[7] = bit2byte(srcByte & (1 << 0));
+ d += 8;
+ }
+ if (leftOverBits) {
+ int srcByte = src[wholeBytes];
+ for (int x = 0; x < leftOverBits; ++x) {
+ *d++ = bit2byte(srcByte & 0x80);
+ srcByte <<= 1;
+ }
+ }
+ src += srcRB;
+ dst += dstRB;
+ }
+}
+
+void SkAAClipBlitter::blitMask(const SkMask& origMask, const SkIRect& clip) {
+ SkASSERT(fAAClip->getBounds().contains(clip));
+
+ if (fAAClip->quickContains(clip)) {
+ fBlitter->blitMask(origMask, clip);
+ return;
+ }
+
+ const SkMask* mask = &origMask;
+
+ // if we're BW, we need to upscale to A8 (ugh)
+ SkMask grayMask;
+ if (SkMask::kBW_Format == origMask.fFormat) {
+ grayMask.fFormat = SkMask::kA8_Format;
+ grayMask.fBounds = origMask.fBounds;
+ grayMask.fRowBytes = origMask.fBounds.width();
+ size_t size = grayMask.computeImageSize();
+ grayMask.fImage = (uint8_t*)fGrayMaskScratch.reset(size,
+ SkAutoMalloc::kReuse_OnShrink);
+
+ upscaleBW2A8(&grayMask, origMask);
+ mask = &grayMask;
+ }
+
+ this->ensureRunsAndAA();
+
+ // HACK -- we are devolving 3D into A8, need to copy the rest of the 3D
+ // data into a temp block to support it better (ugh)
+
+ const void* src = mask->getAddr(clip.fLeft, clip.fTop);
+ const size_t srcRB = mask->fRowBytes;
+ const int width = clip.width();
+ MergeAAProc mergeProc = find_merge_aa_proc(mask->fFormat);
+
+ SkMask rowMask;
+ rowMask.fFormat = SkMask::k3D_Format == mask->fFormat ? SkMask::kA8_Format : mask->fFormat;
+ rowMask.fBounds.fLeft = clip.fLeft;
+ rowMask.fBounds.fRight = clip.fRight;
+ rowMask.fRowBytes = mask->fRowBytes; // doesn't matter, since our height==1
+ rowMask.fImage = (uint8_t*)fScanlineScratch;
+
+ int y = clip.fTop;
+ const int stopY = y + clip.height();
+
+ do {
+ int localStopY SK_INIT_TO_AVOID_WARNING;
+ const uint8_t* row = fAAClip->findRow(y, &localStopY);
+ // findRow returns last Y, not stop, so we add 1
+ localStopY = std::min(localStopY + 1, stopY);
+
+ int initialCount;
+ row = fAAClip->findX(row, clip.fLeft, &initialCount);
+ do {
+ mergeProc(src, width, row, initialCount, rowMask.fImage);
+ rowMask.fBounds.fTop = y;
+ rowMask.fBounds.fBottom = y + 1;
+ fBlitter->blitMask(rowMask, rowMask.fBounds);
+ src = (const void*)((const char*)src + srcRB);
+ } while (++y < localStopY);
+ } while (y < stopY);
+}
+
+const SkPixmap* SkAAClipBlitter::justAnOpaqueColor(uint32_t* value) {
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkAAClip.h b/gfx/skia/skia/src/core/SkAAClip.h
new file mode 100644
index 0000000000..bb79fc275f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAAClip.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAAClip_DEFINED
+#define SkAAClip_DEFINED
+
+#include "include/core/SkClipOp.h"
+#include "include/core/SkRect.h"
+#include "src/base/SkAutoMalloc.h"
+#include "src/core/SkBlitter.h"
+
+class SkPath;
+class SkRegion;
+
+class SkAAClip {
+public:
+ SkAAClip();
+ SkAAClip(const SkAAClip&);
+ ~SkAAClip();
+
+ SkAAClip& operator=(const SkAAClip&);
+
+ bool isEmpty() const { return nullptr == fRunHead; }
+ const SkIRect& getBounds() const { return fBounds; }
+
+ // Returns true iff the clip is not empty, and is just a hard-edged rect (no partial alpha).
+ // If true, getBounds() can be used in place of this clip.
+ bool isRect() const;
+
+ bool setEmpty();
+ bool setRect(const SkIRect&);
+ bool setPath(const SkPath&, const SkIRect& bounds, bool doAA = true);
+ bool setRegion(const SkRegion&);
+
+ bool op(const SkIRect&, SkClipOp);
+ bool op(const SkRect&, SkClipOp, bool doAA);
+ bool op(const SkAAClip&, SkClipOp);
+
+ bool translate(int dx, int dy, SkAAClip* dst) const;
+
+ /**
+ * Allocates a mask the size of the aaclip, and expands its data into
+ * the mask, using kA8_Format. Used for tests and visualization purposes.
+ */
+ void copyToMask(SkMask*) const;
+
+ bool quickContains(const SkIRect& r) const {
+ return this->quickContains(r.fLeft, r.fTop, r.fRight, r.fBottom);
+ }
+
+#ifdef SK_DEBUG
+ void validate() const;
+ void debug(bool compress_y=false) const;
+#else
+ void validate() const {}
+ void debug(bool compress_y=false) const {}
+#endif
+
+private:
+ class Builder;
+ struct RunHead;
+ friend class SkAAClipBlitter;
+
+ SkIRect fBounds;
+ RunHead* fRunHead;
+
+ void freeRuns();
+
+ bool quickContains(int left, int top, int right, int bottom) const;
+
+ bool trimBounds();
+ bool trimTopBottom();
+ bool trimLeftRight();
+
+ // For SkAAClipBlitter and quickContains
+ const uint8_t* findRow(int y, int* lastYForRow = nullptr) const;
+ const uint8_t* findX(const uint8_t data[], int x, int* initialCount = nullptr) const;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkAAClipBlitter : public SkBlitter {
+public:
+ SkAAClipBlitter() : fScanlineScratch(nullptr) {}
+ ~SkAAClipBlitter() override;
+
+ void init(SkBlitter* blitter, const SkAAClip* aaclip) {
+ SkASSERT(aaclip && !aaclip->isEmpty());
+ fBlitter = blitter;
+ fAAClip = aaclip;
+ fAAClipBounds = aaclip->getBounds();
+ }
+
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitMask(const SkMask&, const SkIRect& clip) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t* value) override;
+
+private:
+ SkBlitter* fBlitter;
+ const SkAAClip* fAAClip;
+ SkIRect fAAClipBounds;
+
+ // point into fScanlineScratch
+ int16_t* fRuns;
+ SkAlpha* fAA;
+
+ enum {
+ kSize = 32 * 32
+ };
+ SkAutoSMalloc<kSize> fGrayMaskScratch; // used for blitMask
+ void* fScanlineScratch; // enough for a mask at 32bit, or runs+aa
+
+ void ensureRunsAndAA();
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkATrace.cpp b/gfx/skia/skia/src/core/SkATrace.cpp
new file mode 100644
index 0000000000..f5c56be7b4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkATrace.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkATrace.h"
+
+#include "src/core/SkTraceEvent.h"
+#include "src/core/SkTraceEventCommon.h"
+
+#ifdef SK_BUILD_FOR_ANDROID
+ #include <dlfcn.h>
+#endif
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ #include <cutils/trace.h>
+#endif
+
+SkATrace::SkATrace() : fBeginSection(nullptr), fEndSection(nullptr), fIsEnabled(nullptr) {
+#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
+ fIsEnabled = []{ return static_cast<bool>(CC_UNLIKELY(ATRACE_ENABLED())); };
+ fBeginSection = [](const char* name){ ATRACE_BEGIN(name); };
+ fEndSection = []{ ATRACE_END(); };
+#elif defined(SK_BUILD_FOR_ANDROID)
+ if (void* lib = dlopen("libandroid.so", RTLD_NOW | RTLD_LOCAL)) {
+ fBeginSection = (decltype(fBeginSection))dlsym(lib, "ATrace_beginSection");
+ fEndSection = (decltype(fEndSection))dlsym(lib, "ATrace_endSection");
+ fIsEnabled = (decltype(fIsEnabled))dlsym(lib, "ATrace_isEnabled");
+ }
+#endif
+
+ if (!fIsEnabled) {
+ fIsEnabled = []{ return false; };
+ }
+}
+
+SkEventTracer::Handle SkATrace::addTraceEvent(char phase,
+ const uint8_t* categoryEnabledFlag,
+ const char* name,
+ uint64_t id,
+ int numArgs,
+ const char** argNames,
+ const uint8_t* argTypes,
+ const uint64_t* argValues,
+ uint8_t flags) {
+ if (fIsEnabled()) {
+ if (TRACE_EVENT_PHASE_COMPLETE == phase ||
+ TRACE_EVENT_PHASE_INSTANT == phase) {
+ fBeginSection(name);
+ }
+
+ if (TRACE_EVENT_PHASE_INSTANT == phase) {
+ fEndSection();
+ }
+ }
+ return 0;
+}
+
+void SkATrace::updateTraceEventDuration(const uint8_t* categoryEnabledFlag,
+ const char* name,
+ SkEventTracer::Handle handle) {
+ // This is only ever called from a scoped trace event so we will just end the ATrace section.
+ if (fIsEnabled()) {
+ fEndSection();
+ }
+}
+
+const uint8_t* SkATrace::getCategoryGroupEnabled(const char* name) {
+ // Chrome tracing is setup to not repeatly call this function once it has been initialized. So
+ // we can't use this to do a check for ATrace isEnabled(). Thus we will always return yes here
+ // and then check to see if ATrace is enabled when beginning and ending a section.
+ static uint8_t yes = SkEventTracer::kEnabledForRecording_CategoryGroupEnabledFlags;
+ return &yes;
+}
+
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+
+bool SkAndroidFrameworkTraceUtil::gEnableAndroidTracing = false;
+bool SkAndroidFrameworkTraceUtil::gUsePerfettoTrackEvents = false;
+
+#endif //SK_BUILD_FOR_ANDROID_FRAMEWORK
+
+
+
diff --git a/gfx/skia/skia/src/core/SkATrace.h b/gfx/skia/skia/src/core/SkATrace.h
new file mode 100644
index 0000000000..e0e642aa10
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkATrace.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkATrace_DEFINED
+#define SkATrace_DEFINED
+
+#include "include/utils/SkEventTracer.h"
+
+/**
+ * This class is used to support ATrace in android apps. It hooks into the SkEventTracer system. It
+ * currently supports the macros TRACE_EVENT*, TRACE_EVENT_INSTANT*, and TRACE_EVENT_BEGIN/END*.
+ * For versions of these calls that take additoinal args and value pairs we currently just drop them
+ * and report only the name. Since ATrace is a simple push and pop system (all traces are fully
+ * nested), if using BEGIN and END you should also make sure your calls are properly nested (i.e. if
+ * startA is before startB, then endB is before endA).
+ */
+class SkATrace : public SkEventTracer {
+public:
+ SkATrace();
+
+ SkEventTracer::Handle addTraceEvent(char phase,
+ const uint8_t* categoryEnabledFlag,
+ const char* name,
+ uint64_t id,
+ int numArgs,
+ const char** argNames,
+ const uint8_t* argTypes,
+ const uint64_t* argValues,
+ uint8_t flags) override;
+
+
+ void updateTraceEventDuration(const uint8_t* categoryEnabledFlag,
+ const char* name,
+ SkEventTracer::Handle handle) override;
+
+ const uint8_t* getCategoryGroupEnabled(const char* name) override;
+
+ const char* getCategoryGroupName(const uint8_t* categoryEnabledFlag) override {
+ static const char* category = "skiaATrace";
+ return category;
+ }
+
+ // Atrace does not yet support splitting up trace output into sections.
+ void newTracingSection(const char* name) override {}
+
+private:
+ SkATrace(const SkATrace&) = delete;
+ SkATrace& operator=(const SkATrace&) = delete;
+
+ void (*fBeginSection)(const char*);
+ void (*fEndSection)(void);
+ bool (*fIsEnabled)(void);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkAdvancedTypefaceMetrics.h b/gfx/skia/skia/src/core/SkAdvancedTypefaceMetrics.h
new file mode 100644
index 0000000000..4c05ce2184
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAdvancedTypefaceMetrics.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAdvancedTypefaceMetrics_DEFINED
+#define SkAdvancedTypefaceMetrics_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/core/SkString.h"
+#include "include/private/SkBitmaskEnum.h"
+
+/** \class SkAdvancedTypefaceMetrics
+
+ The SkAdvancedTypefaceMetrics class is used by the PDF backend to correctly
+ embed typefaces. This class is created and filled in with information by
+ SkTypeface::getAdvancedMetrics.
+*/
+struct SkAdvancedTypefaceMetrics {
+ // The PostScript name of the font. See `FontName` and `BaseFont` in PDF standard.
+ SkString fPostScriptName;
+ SkString fFontName;
+
+ // These enum values match the values used in the PDF file format.
+ enum StyleFlags : uint32_t {
+ kFixedPitch_Style = 0x00000001,
+ kSerif_Style = 0x00000002,
+ kScript_Style = 0x00000008,
+ kItalic_Style = 0x00000040,
+ kAllCaps_Style = 0x00010000,
+ kSmallCaps_Style = 0x00020000,
+ kForceBold_Style = 0x00040000
+ };
+ StyleFlags fStyle = (StyleFlags)0; // Font style characteristics.
+
+ enum FontType : uint8_t {
+ kType1_Font,
+ kType1CID_Font,
+ kCFF_Font,
+ kTrueType_Font,
+ kOther_Font,
+ };
+ // The type of the underlying font program. This field determines which
+ // of the following fields are valid. If it is kOther_Font the per glyph
+ // information will never be populated.
+ FontType fType = kOther_Font;
+
+ enum FontFlags : uint8_t {
+ kVariable_FontFlag = 1 << 0, //!<May be true for Type1, CFF, or TrueType fonts.
+ kNotEmbeddable_FontFlag = 1 << 1, //!<May not be embedded.
+ kNotSubsettable_FontFlag = 1 << 2, //!<May not be subset.
+ kAltDataFormat_FontFlag = 1 << 3, //!<Data compressed. Table access may still work.
+ };
+ FontFlags fFlags = (FontFlags)0; // Global font flags.
+
+ int16_t fItalicAngle = 0; // Counterclockwise degrees from vertical of the
+ // dominant vertical stroke for an Italic face.
+ // The following fields are all in font units.
+ int16_t fAscent = 0; // Max height above baseline, not including accents.
+ int16_t fDescent = 0; // Max depth below baseline (negative).
+ int16_t fStemV = 0; // Thickness of dominant vertical stem.
+ int16_t fCapHeight = 0; // Height (from baseline) of top of flat capitals.
+
+ SkIRect fBBox = {0, 0, 0, 0}; // The bounding box of all glyphs (in font units).
+};
+
+namespace sknonstd {
+template <> struct is_bitmask_enum<SkAdvancedTypefaceMetrics::FontFlags> : std::true_type {};
+template <> struct is_bitmask_enum<SkAdvancedTypefaceMetrics::StyleFlags> : std::true_type {};
+} // namespace sknonstd
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkAlphaRuns.cpp b/gfx/skia/skia/src/core/SkAlphaRuns.cpp
new file mode 100644
index 0000000000..ce1e42192d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAlphaRuns.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/base/SkTo.h"
+#include "src/core/SkAntiRun.h"
+#include "src/core/SkOpts.h"
+
+void SkAlphaRuns::reset(int width) {
+ SkASSERT(width > 0);
+
+#ifdef SK_DEBUG
+#ifndef SK_DISABLE_SLOW_DEBUG_VALIDATION
+ SkOpts::memset16((uint16_t*)fRuns, (uint16_t)(-42), width);
+#endif
+#endif
+ fRuns[0] = SkToS16(width);
+ fRuns[width] = 0;
+ fAlpha[0] = 0;
+
+ SkDEBUGCODE(fWidth = width;)
+ SkDEBUGCODE(this->validate();)
+}
+
+#ifdef SK_DEBUG
+ void SkAlphaRuns::assertValid(int y, int maxStep) const {
+#ifndef SK_DISABLE_SLOW_DEBUG_VALIDATION
+ int max = (y + 1) * maxStep - (y == maxStep - 1);
+
+ const int16_t* runs = fRuns;
+ const uint8_t* alpha = fAlpha;
+
+ while (*runs) {
+ SkASSERT(*alpha <= max);
+ alpha += *runs;
+ runs += *runs;
+ }
+#endif
+ }
+
+ void SkAlphaRuns::dump() const {
+ const int16_t* runs = fRuns;
+ const uint8_t* alpha = fAlpha;
+
+ SkDebugf("Runs");
+ while (*runs) {
+ int n = *runs;
+
+ SkDebugf(" %02x", *alpha);
+ if (n > 1) {
+ SkDebugf(",%d", n);
+ }
+ alpha += n;
+ runs += n;
+ }
+ SkDebugf("\n");
+ }
+
+ void SkAlphaRuns::validate() const {
+#ifndef SK_DISABLE_SLOW_DEBUG_VALIDATION
+ SkASSERT(fWidth > 0);
+
+ int count = 0;
+ const int16_t* runs = fRuns;
+
+ while (*runs) {
+ SkASSERT(*runs > 0);
+ count += *runs;
+ SkASSERT(count <= fWidth);
+ runs += *runs;
+ }
+ SkASSERT(count == fWidth);
+#endif
+ }
+#endif
diff --git a/gfx/skia/skia/src/core/SkAnalyticEdge.cpp b/gfx/skia/skia/src/core/SkAnalyticEdge.cpp
new file mode 100644
index 0000000000..16b31bf356
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAnalyticEdge.cpp
@@ -0,0 +1,438 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/base/SkTo.h"
+#include "src/base/SkMathPriv.h"
+#include "src/core/SkAnalyticEdge.h"
+#include "src/core/SkFDot6.h"
+#include <utility>
+
+static const int kInverseTableSize = 1024; // SK_FDot6One * 16
+
+static inline SkFixed quick_inverse(SkFDot6 x) {
+ static const int32_t table[] = {
+ -4096, -4100, -4104, -4108, -4112, -4116, -4120, -4124, -4128, -4132, -4136,
+ -4140, -4144, -4148, -4152, -4156, -4161, -4165, -4169, -4173, -4177, -4181,
+ -4185, -4190, -4194, -4198, -4202, -4206, -4211, -4215, -4219, -4223, -4228,
+ -4232, -4236, -4240, -4245, -4249, -4253, -4258, -4262, -4266, -4271, -4275,
+ -4279, -4284, -4288, -4293, -4297, -4301, -4306, -4310, -4315, -4319, -4324,
+ -4328, -4332, -4337, -4341, -4346, -4350, -4355, -4359, -4364, -4369, -4373,
+ -4378, -4382, -4387, -4391, -4396, -4401, -4405, -4410, -4415, -4419, -4424,
+ -4429, -4433, -4438, -4443, -4447, -4452, -4457, -4462, -4466, -4471, -4476,
+ -4481, -4485, -4490, -4495, -4500, -4505, -4510, -4514, -4519, -4524, -4529,
+ -4534, -4539, -4544, -4549, -4554, -4559, -4563, -4568, -4573, -4578, -4583,
+ -4588, -4593, -4599, -4604, -4609, -4614, -4619, -4624, -4629, -4634, -4639,
+ -4644, -4650, -4655, -4660, -4665, -4670, -4675, -4681, -4686, -4691, -4696,
+ -4702, -4707, -4712, -4718, -4723, -4728, -4733, -4739, -4744, -4750, -4755,
+ -4760, -4766, -4771, -4777, -4782, -4788, -4793, -4798, -4804, -4809, -4815,
+ -4821, -4826, -4832, -4837, -4843, -4848, -4854, -4860, -4865, -4871, -4877,
+ -4882, -4888, -4894, -4899, -4905, -4911, -4917, -4922, -4928, -4934, -4940,
+ -4946, -4951, -4957, -4963, -4969, -4975, -4981, -4987, -4993, -4999, -5005,
+ -5011, -5017, -5023, -5029, -5035, -5041, -5047, -5053, -5059, -5065, -5071,
+ -5077, -5084, -5090, -5096, -5102, -5108, -5115, -5121, -5127, -5133, -5140,
+ -5146, -5152, -5159, -5165, -5171, -5178, -5184, -5190, -5197, -5203, -5210,
+ -5216, -5223, -5229, -5236, -5242, -5249, -5256, -5262, -5269, -5275, -5282,
+ -5289, -5295, -5302, -5309, -5315, -5322, -5329, -5336, -5343, -5349, -5356,
+ -5363, -5370, -5377, -5384, -5391, -5398, -5405, -5412, -5418, -5426, -5433,
+ -5440, -5447, -5454, -5461, -5468, -5475, -5482, -5489, -5497, -5504, -5511,
+ -5518, -5526, -5533, -5540, -5548, -5555, -5562, -5570, -5577, -5584, -5592,
+ -5599, -5607, -5614, -5622, -5629, -5637, -5645, -5652, -5660, -5667, -5675,
+ -5683, -5691, -5698, -5706, -5714, -5722, -5729, -5737, -5745, -5753, -5761,
+ -5769, -5777, -5785, -5793, -5801, -5809, -5817, -5825, -5833, -5841, -5849,
+ -5857, -5866, -5874, -5882, -5890, -5899, -5907, -5915, -5924, -5932, -5940,
+ -5949, -5957, -5966, -5974, -5983, -5991, -6000, -6009, -6017, -6026, -6034,
+ -6043, -6052, -6061, -6069, -6078, -6087, -6096, -6105, -6114, -6123, -6132,
+ -6141, -6150, -6159, -6168, -6177, -6186, -6195, -6204, -6213, -6223, -6232,
+ -6241, -6250, -6260, -6269, -6278, -6288, -6297, -6307, -6316, -6326, -6335,
+ -6345, -6355, -6364, -6374, -6384, -6393, -6403, -6413, -6423, -6432, -6442,
+ -6452, -6462, -6472, -6482, -6492, -6502, -6512, -6523, -6533, -6543, -6553,
+ -6563, -6574, -6584, -6594, -6605, -6615, -6626, -6636, -6647, -6657, -6668,
+ -6678, -6689, -6700, -6710, -6721, -6732, -6743, -6754, -6765, -6775, -6786,
+ -6797, -6808, -6820, -6831, -6842, -6853, -6864, -6875, -6887, -6898, -6909,
+ -6921, -6932, -6944, -6955, -6967, -6978, -6990, -7002, -7013, -7025, -7037,
+ -7049, -7061, -7073, -7084, -7096, -7108, -7121, -7133, -7145, -7157, -7169,
+ -7182, -7194, -7206, -7219, -7231, -7244, -7256, -7269, -7281, -7294, -7307,
+ -7319, -7332, -7345, -7358, -7371, -7384, -7397, -7410, -7423, -7436, -7449,
+ -7463, -7476, -7489, -7503, -7516, -7530, -7543, -7557, -7570, -7584, -7598,
+ -7612, -7626, -7639, -7653, -7667, -7681, -7695, -7710, -7724, -7738, -7752,
+ -7767, -7781, -7796, -7810, -7825, -7839, -7854, -7869, -7884, -7898, -7913,
+ -7928, -7943, -7958, -7973, -7989, -8004, -8019, -8035, -8050, -8065, -8081,
+ -8097, -8112, -8128, -8144, -8160, -8176, -8192, -8208, -8224, -8240, -8256,
+ -8272, -8289, -8305, -8322, -8338, -8355, -8371, -8388, -8405, -8422, -8439,
+ -8456, -8473, -8490, -8507, -8525, -8542, -8559, -8577, -8594, -8612, -8630,
+ -8648, -8665, -8683, -8701, -8719, -8738, -8756, -8774, -8793, -8811, -8830,
+ -8848, -8867, -8886, -8905, -8924, -8943, -8962, -8981, -9000, -9020, -9039,
+ -9058, -9078, -9098, -9118, -9137, -9157, -9177, -9198, -9218, -9238, -9258,
+ -9279, -9300, -9320, -9341, -9362, -9383, -9404, -9425, -9446, -9467, -9489,
+ -9510, -9532, -9554, -9576, -9597, -9619, -9642, -9664, -9686, -9709, -9731,
+ -9754, -9776, -9799, -9822, -9845, -9868, -9892, -9915, -9939, -9962, -9986,
+ -10010, -10034, -10058, -10082, -10106, -10131, -10155, -10180, -10205, -10230,
+ -10255, -10280, -10305, -10330, -10356, -10381, -10407, -10433, -10459, -10485,
+ -10512, -10538, -10564, -10591, -10618, -10645, -10672, -10699, -10727, -10754,
+ -10782, -10810, -10837, -10866, -10894, -10922, -10951, -10979, -11008, -11037,
+ -11066, -11096, -11125, -11155, -11184, -11214, -11244, -11275, -11305, -11335,
+ -11366, -11397, -11428, -11459, -11491, -11522, -11554, -11586, -11618, -11650,
+ -11683, -11715, -11748, -11781, -11814, -11848, -11881, -11915, -11949, -11983,
+ -12018, -12052, -12087, -12122, -12157, -12192, -12228, -12264, -12300, -12336,
+ -12372, -12409, -12446, -12483, -12520, -12557, -12595, -12633, -12671, -12710,
+ -12748, -12787, -12826, -12865, -12905, -12945, -12985, -13025, -13066, -13107,
+ -13148, -13189, -13231, -13273, -13315, -13357, -13400, -13443, -13486, -13530,
+ -13573, -13617, -13662, -13706, -13751, -13797, -13842, -13888, -13934, -13981,
+ -14027, -14074, -14122, -14169, -14217, -14266, -14315, -14364, -14413, -14463,
+ -14513, -14563, -14614, -14665, -14716, -14768, -14820, -14873, -14926, -14979,
+ -15033, -15087, -15141, -15196, -15252, -15307, -15363, -15420, -15477, -15534,
+ -15592, -15650, -15709, -15768, -15827, -15887, -15947, -16008, -16070, -16131,
+ -16194, -16256, -16320, -16384, -16448, -16513, -16578, -16644, -16710, -16777,
+ -16844, -16912, -16980, -17050, -17119, -17189, -17260, -17331, -17403, -17476,
+ -17549, -17623, -17697, -17772, -17848, -17924, -18001, -18078, -18157, -18236,
+ -18315, -18396, -18477, -18558, -18641, -18724, -18808, -18893, -18978, -19065,
+ -19152, -19239, -19328, -19418, -19508, -19599, -19691, -19784, -19878, -19972,
+ -20068, -20164, -20262, -20360, -20460, -20560, -20661, -20763, -20867, -20971,
+ -21076, -21183, -21290, -21399, -21509, -21620, -21732, -21845, -21959, -22075,
+ -22192, -22310, -22429, -22550, -22671, -22795, -22919, -23045, -23172, -23301,
+ -23431, -23563, -23696, -23831, -23967, -24105, -24244, -24385, -24528, -24672,
+ -24818, -24966, -25115, -25266, -25420, -25575, -25731, -25890, -26051, -26214,
+ -26379, -26546, -26715, -26886, -27060, -27235, -27413, -27594, -27776, -27962,
+ -28149, -28339, -28532, -28728, -28926, -29127, -29330, -29537, -29746, -29959,
+ -30174, -30393, -30615, -30840, -31068, -31300, -31536, -31775, -32017, -32263,
+ -32513, -32768, -33026, -33288, -33554, -33825, -34100, -34379, -34663, -34952,
+ -35246, -35544, -35848, -36157, -36472, -36792, -37117, -37449, -37786, -38130,
+ -38479, -38836, -39199, -39568, -39945, -40329, -40721, -41120, -41527, -41943,
+ -42366, -42799, -43240, -43690, -44150, -44620, -45100, -45590, -46091, -46603,
+ -47127, -47662, -48210, -48770, -49344, -49932, -50533, -51150, -51781, -52428,
+ -53092, -53773, -54471, -55188, -55924, -56679, -57456, -58254, -59074, -59918,
+ -60787, -61680, -62601, -63550, -64527, -65536, -66576, -67650, -68759, -69905,
+ -71089, -72315, -73584, -74898, -76260, -77672, -79137, -80659, -82241, -83886,
+ -85598, -87381, -89240, -91180, -93206, -95325, -97541, -99864, -102300,
+ -104857, -107546, -110376, -113359, -116508, -119837, -123361, -127100, -131072,
+ -135300, -139810, -144631, -149796, -155344, -161319, -167772, -174762, -182361,
+ -190650, -199728, -209715, -220752, -233016, -246723, -262144, -279620, -299593,
+ -322638, -349525, -381300, -419430, -466033, -524288, -599186, -699050, -838860,
+ -1048576, -1398101, -2097152, -4194304, 0
+ };
+
+ static constexpr size_t kLastEntry = std::size(table) - 1;
+ SkASSERT(SkAbs32(x) <= static_cast<int32_t>(kLastEntry));
+ static_assert(kLastEntry == kInverseTableSize);
+
+ if (x > 0) {
+ return -table[kLastEntry - x];
+ } else {
+ return table[kLastEntry + x];
+ }
+}
+
+static inline SkFixed quick_div(SkFDot6 a, SkFDot6 b) {
+ const int kMinBits = 3; // abs(b) should be at least (1 << kMinBits) for quick division
+ const int kMaxBits = 31; // Number of bits available in signed int
+ // Given abs(b) <= (1 << kMinBits), the inverse of abs(b) is at most 1 << (22 - kMinBits) in
+ // SkFixed format. Hence abs(a) should be less than kMaxAbsA
+ const int kMaxAbsA = 1 << (kMaxBits - (22 - kMinBits));
+ SkFDot6 abs_a = SkAbs32(a);
+ SkFDot6 abs_b = SkAbs32(b);
+ if (abs_b >= (1 << kMinBits) && abs_b < kInverseTableSize && abs_a < kMaxAbsA) {
+ SkASSERT((int64_t)a * quick_inverse(b) <= SK_MaxS32
+ && (int64_t)a * quick_inverse(b) >= SK_MinS32);
+ SkFixed ourAnswer = (a * quick_inverse(b)) >> 6;
+ SkASSERT(
+ (SkFDot6Div(a,b) == 0 && ourAnswer == 0) ||
+ SkFixedDiv(SkAbs32(SkFDot6Div(a,b) - ourAnswer), SkAbs32(SkFDot6Div(a,b))) <= 1 << 10
+ );
+ return ourAnswer;
+ }
+ return SkFDot6Div(a, b);
+}
+
+bool SkAnalyticEdge::setLine(const SkPoint& p0, const SkPoint& p1) {
+ fRiteE = nullptr;
+
+ // We must set X/Y using the same way (e.g., times 4, to FDot6, then to Fixed) as Quads/Cubics.
+ // Otherwise the order of the edge might be wrong due to precision limit.
+ const int accuracy = kDefaultAccuracy;
+#ifdef SK_RASTERIZE_EVEN_ROUNDING
+ SkFixed x0 = SkFDot6ToFixed(SkScalarRoundToFDot6(p0.fX, accuracy)) >> accuracy;
+ SkFixed y0 = SnapY(SkFDot6ToFixed(SkScalarRoundToFDot6(p0.fY, accuracy)) >> accuracy);
+ SkFixed x1 = SkFDot6ToFixed(SkScalarRoundToFDot6(p1.fX, accuracy)) >> accuracy;
+ SkFixed y1 = SnapY(SkFDot6ToFixed(SkScalarRoundToFDot6(p1.fY, accuracy)) >> accuracy);
+#else
+ const int multiplier = (1 << kDefaultAccuracy);
+ SkFixed x0 = SkFDot6ToFixed(SkScalarToFDot6(p0.fX * multiplier)) >> accuracy;
+ SkFixed y0 = SnapY(SkFDot6ToFixed(SkScalarToFDot6(p0.fY * multiplier)) >> accuracy);
+ SkFixed x1 = SkFDot6ToFixed(SkScalarToFDot6(p1.fX * multiplier)) >> accuracy;
+ SkFixed y1 = SnapY(SkFDot6ToFixed(SkScalarToFDot6(p1.fY * multiplier)) >> accuracy);
+#endif
+
+ int winding = 1;
+
+ if (y0 > y1) {
+ using std::swap;
+ swap(x0, x1);
+ swap(y0, y1);
+ winding = -1;
+ }
+
+ // are we a zero-height line?
+ SkFDot6 dy = SkFixedToFDot6(y1 - y0);
+ if (dy == 0) {
+ return false;
+ }
+ SkFDot6 dx = SkFixedToFDot6(x1 - x0);
+ SkFixed slope = quick_div(dx, dy);
+ SkFixed absSlope = SkAbs32(slope);
+
+ fX = x0;
+ fDX = slope;
+ fUpperX = x0;
+ fY = y0;
+ fUpperY = y0;
+ fLowerY = y1;
+ fDY = dx == 0 || slope == 0 ? SK_MaxS32 : absSlope < kInverseTableSize
+ ? quick_inverse(absSlope)
+ : SkAbs32(quick_div(dy, dx));
+ fEdgeType = kLine_Type;
+ fCurveCount = 0;
+ fWinding = SkToS8(winding);
+ fCurveShift = 0;
+
+ return true;
+}
+
+// This will become a bottleneck for small ovals rendering if we call SkFixedDiv twice here.
+// Therefore, we'll let the outter function compute the slope once and send in the value.
+// Moreover, we'll compute fDY by quickly lookup the inverse table (if possible).
+bool SkAnalyticEdge::updateLine(SkFixed x0, SkFixed y0, SkFixed x1, SkFixed y1, SkFixed slope) {
+ // Since we send in the slope, we can no longer snap y inside this function.
+ // If we don't send in the slope, or we do some more sophisticated snapping, this function
+ // could be a performance bottleneck.
+ SkASSERT(fWinding == 1 || fWinding == -1);
+ SkASSERT(fCurveCount != 0);
+
+ // We don't chop at y extrema for cubics so the y is not guaranteed to be increasing for them.
+ // In that case, we have to swap x/y and negate the winding.
+ if (y0 > y1) {
+ using std::swap;
+ swap(x0, x1);
+ swap(y0, y1);
+ fWinding = -fWinding;
+ }
+
+ SkASSERT(y0 <= y1);
+
+ SkFDot6 dx = SkFixedToFDot6(x1 - x0);
+ SkFDot6 dy = SkFixedToFDot6(y1 - y0);
+
+ // are we a zero-height line?
+ if (dy == 0) {
+ return false;
+ }
+
+ SkASSERT(slope < SK_MaxS32);
+
+ SkFDot6 absSlope = SkAbs32(SkFixedToFDot6(slope));
+ fX = x0;
+ fDX = slope;
+ fUpperX = x0;
+ fY = y0;
+ fUpperY = y0;
+ fLowerY = y1;
+ fDY = (dx == 0 || slope == 0)
+ ? SK_MaxS32
+ : absSlope < kInverseTableSize
+ ? quick_inverse(absSlope)
+ : SkAbs32(quick_div(dy, dx));
+
+ return true;
+}
+
+bool SkAnalyticEdge::update(SkFixed last_y, bool sortY) {
+ SkASSERT(last_y >= fLowerY); // we shouldn't update edge if last_y < fLowerY
+ if (fCurveCount < 0) {
+ return static_cast<SkAnalyticCubicEdge*>(this)->updateCubic(sortY);
+ } else if (fCurveCount > 0) {
+ return static_cast<SkAnalyticQuadraticEdge*>(this)->updateQuadratic();
+ }
+ return false;
+}
+
+bool SkAnalyticQuadraticEdge::setQuadratic(const SkPoint pts[3]) {
+ fRiteE = nullptr;
+
+ if (!fQEdge.setQuadraticWithoutUpdate(pts, kDefaultAccuracy)) {
+ return false;
+ }
+ fQEdge.fQx >>= kDefaultAccuracy;
+ fQEdge.fQy >>= kDefaultAccuracy;
+ fQEdge.fQDx >>= kDefaultAccuracy;
+ fQEdge.fQDy >>= kDefaultAccuracy;
+ fQEdge.fQDDx >>= kDefaultAccuracy;
+ fQEdge.fQDDy >>= kDefaultAccuracy;
+ fQEdge.fQLastX >>= kDefaultAccuracy;
+ fQEdge.fQLastY >>= kDefaultAccuracy;
+ fQEdge.fQy = SnapY(fQEdge.fQy);
+ fQEdge.fQLastY = SnapY(fQEdge.fQLastY);
+
+ fWinding = fQEdge.fWinding;
+ fEdgeType = kQuad_Type;
+ fCurveCount = fQEdge.fCurveCount;
+ fCurveShift = fQEdge.fCurveShift;
+
+ fSnappedX = fQEdge.fQx;
+ fSnappedY = fQEdge.fQy;
+
+ return this->updateQuadratic();
+}
+
+bool SkAnalyticQuadraticEdge::updateQuadratic() {
+ int success = 0; // initialize to fail!
+ int count = fCurveCount;
+ SkFixed oldx = fQEdge.fQx;
+ SkFixed oldy = fQEdge.fQy;
+ SkFixed dx = fQEdge.fQDx;
+ SkFixed dy = fQEdge.fQDy;
+ SkFixed newx, newy, newSnappedX, newSnappedY;
+ int shift = fCurveShift;
+
+ SkASSERT(count > 0);
+
+ do {
+ SkFixed slope;
+ if (--count > 0)
+ {
+ newx = oldx + (dx >> shift);
+ newy = oldy + (dy >> shift);
+ if (SkAbs32(dy >> shift) >= SK_Fixed1 * 2) { // only snap when dy is large enough
+ SkFDot6 diffY = SkFixedToFDot6(newy - fSnappedY);
+ slope = diffY ? quick_div(SkFixedToFDot6(newx - fSnappedX), diffY)
+ : SK_MaxS32;
+ newSnappedY = std::min<SkFixed>(fQEdge.fQLastY, SkFixedRoundToFixed(newy));
+ newSnappedX = newx - SkFixedMul(slope, newy - newSnappedY);
+ } else {
+ newSnappedY = std::min(fQEdge.fQLastY, SnapY(newy));
+ newSnappedX = newx;
+ SkFDot6 diffY = SkFixedToFDot6(newSnappedY - fSnappedY);
+ slope = diffY ? quick_div(SkFixedToFDot6(newx - fSnappedX), diffY)
+ : SK_MaxS32;
+ }
+ dx += fQEdge.fQDDx;
+ dy += fQEdge.fQDDy;
+ }
+ else // last segment
+ {
+ newx = fQEdge.fQLastX;
+ newy = fQEdge.fQLastY;
+ newSnappedY = newy;
+ newSnappedX = newx;
+ SkFDot6 diffY = (newy - fSnappedY) >> 10;
+ slope = diffY ? quick_div((newx - fSnappedX) >> 10, diffY) : SK_MaxS32;
+ }
+ if (slope < SK_MaxS32) {
+ success = this->updateLine(fSnappedX, fSnappedY, newSnappedX, newSnappedY, slope);
+ }
+ oldx = newx;
+ oldy = newy;
+ } while (count > 0 && !success);
+
+ SkASSERT(newSnappedY <= fQEdge.fQLastY);
+
+ fQEdge.fQx = newx;
+ fQEdge.fQy = newy;
+ fQEdge.fQDx = dx;
+ fQEdge.fQDy = dy;
+ fSnappedX = newSnappedX;
+ fSnappedY = newSnappedY;
+ fCurveCount = SkToS8(count);
+ return success;
+}
+
+bool SkAnalyticCubicEdge::setCubic(const SkPoint pts[4], bool sortY) {
+ fRiteE = nullptr;
+
+ if (!fCEdge.setCubicWithoutUpdate(pts, kDefaultAccuracy, sortY)) {
+ return false;
+ }
+
+ fCEdge.fCx >>= kDefaultAccuracy;
+ fCEdge.fCy >>= kDefaultAccuracy;
+ fCEdge.fCDx >>= kDefaultAccuracy;
+ fCEdge.fCDy >>= kDefaultAccuracy;
+ fCEdge.fCDDx >>= kDefaultAccuracy;
+ fCEdge.fCDDy >>= kDefaultAccuracy;
+ fCEdge.fCDDDx >>= kDefaultAccuracy;
+ fCEdge.fCDDDy >>= kDefaultAccuracy;
+ fCEdge.fCLastX >>= kDefaultAccuracy;
+ fCEdge.fCLastY >>= kDefaultAccuracy;
+ fCEdge.fCy = SnapY(fCEdge.fCy);
+ fCEdge.fCLastY = SnapY(fCEdge.fCLastY);
+
+ fWinding = fCEdge.fWinding;
+ fEdgeType = kCubic_Type;
+ fCurveCount = fCEdge.fCurveCount;
+ fCurveShift = fCEdge.fCurveShift;
+ fCubicDShift = fCEdge.fCubicDShift;
+
+ fSnappedY = fCEdge.fCy;
+
+ return this->updateCubic(sortY);
+}
+
+bool SkAnalyticCubicEdge::updateCubic(bool sortY) {
+ int success;
+ int count = fCurveCount;
+ SkFixed oldx = fCEdge.fCx;
+ SkFixed oldy = fCEdge.fCy;
+ SkFixed newx, newy;
+ const int ddshift = fCurveShift;
+ const int dshift = fCubicDShift;
+
+ SkASSERT(count < 0);
+
+ do {
+ if (++count < 0) {
+ newx = oldx + (fCEdge.fCDx >> dshift);
+ fCEdge.fCDx += fCEdge.fCDDx >> ddshift;
+ fCEdge.fCDDx += fCEdge.fCDDDx;
+
+ newy = oldy + (fCEdge.fCDy >> dshift);
+ fCEdge.fCDy += fCEdge.fCDDy >> ddshift;
+ fCEdge.fCDDy += fCEdge.fCDDDy;
+ }
+ else { // last segment
+ newx = fCEdge.fCLastX;
+ newy = fCEdge.fCLastY;
+ }
+
+ // we want to say SkASSERT(oldy <= newy), but our finite fixedpoint
+ // doesn't always achieve that, so we have to explicitly pin it here.
+ if (sortY && newy < oldy) {
+ newy = oldy;
+ }
+
+ SkFixed newSnappedY = SnapY(newy);
+ // we want to SkASSERT(snappedNewY <= fCEdge.fCLastY), but our finite fixedpoint
+ // doesn't always achieve that, so we have to explicitly pin it here.
+ if (sortY && fCEdge.fCLastY < newSnappedY) {
+ newSnappedY = fCEdge.fCLastY;
+ count = 0;
+ }
+
+ SkFixed slope = SkFixedToFDot6(newSnappedY - fSnappedY) == 0
+ ? SK_MaxS32
+ : SkFDot6Div(SkFixedToFDot6(newx - oldx),
+ SkFixedToFDot6(newSnappedY - fSnappedY));
+
+ success = this->updateLine(oldx, fSnappedY, newx, newSnappedY, slope);
+
+ oldx = newx;
+ oldy = newy;
+ fSnappedY = newSnappedY;
+ } while (count < 0 && !success);
+
+ fCEdge.fCx = newx;
+ fCEdge.fCy = newy;
+ fCurveCount = SkToS8(count);
+ return success;
+}
diff --git a/gfx/skia/skia/src/core/SkAnalyticEdge.h b/gfx/skia/skia/src/core/SkAnalyticEdge.h
new file mode 100644
index 0000000000..eb2fa56755
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAnalyticEdge.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAnalyticEdge_DEFINED
+#define SkAnalyticEdge_DEFINED
+
+#include "include/private/base/SkTo.h"
+#include "src/core/SkEdge.h"
+
+#include <utility>
+
+struct SkAnalyticEdge {
+ // Similar to SkEdge, the conic edges will be converted to quadratic edges
+ enum Type {
+ kLine_Type,
+ kQuad_Type,
+ kCubic_Type
+ };
+
+ SkAnalyticEdge* fNext;
+ SkAnalyticEdge* fPrev;
+
+ // During aaa_walk_edges, if this edge is a left edge,
+ // then fRiteE is its corresponding right edge. Otherwise it's nullptr.
+ SkAnalyticEdge* fRiteE;
+
+ SkFixed fX;
+ SkFixed fDX;
+ SkFixed fUpperX; // The x value when y = fUpperY
+ SkFixed fY; // The current y
+ SkFixed fUpperY; // The upper bound of y (our edge is from y = fUpperY to y = fLowerY)
+ SkFixed fLowerY; // The lower bound of y (our edge is from y = fUpperY to y = fLowerY)
+ SkFixed fDY; // abs(1/fDX); may be SK_MaxS32 when fDX is close to 0.
+ // fDY is only used for blitting trapezoids.
+
+ SkFixed fSavedX; // For deferred blitting
+ SkFixed fSavedY; // For deferred blitting
+ SkFixed fSavedDY; // For deferred blitting
+
+ Type fEdgeType; // Remembers the *initial* edge type
+
+ int8_t fCurveCount; // only used by kQuad(+) and kCubic(-)
+ uint8_t fCurveShift; // appled to all Dx/DDx/DDDx except for fCubicDShift exception
+ uint8_t fCubicDShift; // applied to fCDx and fCDy only in cubic
+ int8_t fWinding; // 1 or -1
+
+ static const int kDefaultAccuracy = 2; // default accuracy for snapping
+
+ static inline SkFixed SnapY(SkFixed y) {
+ const int accuracy = kDefaultAccuracy;
+ // This approach is safer than left shift, round, then right shift
+ return ((unsigned)y + (SK_Fixed1 >> (accuracy + 1))) >> (16 - accuracy) << (16 - accuracy);
+ }
+
+ // Update fX, fY of this edge so fY = y
+ inline void goY(SkFixed y) {
+ if (y == fY + SK_Fixed1) {
+ fX = fX + fDX;
+ fY = y;
+ } else if (y != fY) {
+ // Drop lower digits as our alpha only has 8 bits
+ // (fDX and y - fUpperY may be greater than SK_Fixed1)
+ fX = fUpperX + SkFixedMul(fDX, y - fUpperY);
+ fY = y;
+ }
+ }
+
+ inline void goY(SkFixed y, int yShift) {
+ SkASSERT(yShift >= 0 && yShift <= kDefaultAccuracy);
+ SkASSERT(fDX == 0 || y - fY == SK_Fixed1 >> yShift);
+ fY = y;
+ fX += fDX >> yShift;
+ }
+
+ inline void saveXY(SkFixed x, SkFixed y, SkFixed dY) {
+ fSavedX = x;
+ fSavedY = y;
+ fSavedDY = dY;
+ }
+
+ bool setLine(const SkPoint& p0, const SkPoint& p1);
+ bool updateLine(SkFixed ax, SkFixed ay, SkFixed bx, SkFixed by, SkFixed slope);
+
+ // return true if we're NOT done with this edge
+ bool update(SkFixed last_y, bool sortY = true);
+
+#ifdef SK_DEBUG
+ void dump() const {
+ SkDebugf("edge: upperY:%d lowerY:%d y:%g x:%g dx:%g w:%d\n",
+ fUpperY, fLowerY, SkFixedToFloat(fY), SkFixedToFloat(fX),
+ SkFixedToFloat(fDX), fWinding);
+ }
+
+ void validate() const {
+ SkASSERT(fPrev && fNext);
+ SkASSERT(fPrev->fNext == this);
+ SkASSERT(fNext->fPrev == this);
+
+ SkASSERT(fUpperY < fLowerY);
+ SkASSERT(SkAbs32(fWinding) == 1);
+ }
+#endif
+};
+
+struct SkAnalyticQuadraticEdge : public SkAnalyticEdge {
+ SkQuadraticEdge fQEdge;
+
+ // snap y to integer points in the middle of the curve to accelerate AAA path filling
+ SkFixed fSnappedX, fSnappedY;
+
+ bool setQuadratic(const SkPoint pts[3]);
+ bool updateQuadratic();
+ inline void keepContinuous() {
+ // We use fX as the starting x to ensure the continuouty.
+ // Without it, we may break the sorted edge list.
+ SkASSERT(SkAbs32(fX - SkFixedMul(fY - fSnappedY, fDX) - fSnappedX) < SK_Fixed1);
+ SkASSERT(SkAbs32(fY - fSnappedY) < SK_Fixed1); // This may differ due to smooth jump
+ fSnappedX = fX;
+ fSnappedY = fY;
+ }
+};
+
+struct SkAnalyticCubicEdge : public SkAnalyticEdge {
+ SkCubicEdge fCEdge;
+
+ SkFixed fSnappedY; // to make sure that y is increasing with smooth jump and snapping
+
+ bool setCubic(const SkPoint pts[4], bool sortY = true);
+ bool updateCubic(bool sortY = true);
+ inline void keepContinuous() {
+ SkASSERT(SkAbs32(fX - SkFixedMul(fDX, fY - SnapY(fCEdge.fCy)) - fCEdge.fCx) < SK_Fixed1);
+ fCEdge.fCx = fX;
+ fSnappedY = fY;
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkAnnotation.cpp b/gfx/skia/skia/src/core/SkAnnotation.cpp
new file mode 100644
index 0000000000..9af344871d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAnnotation.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkAnnotation.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "src/core/SkAnnotationKeys.h"
+
+const char* SkAnnotationKeys::URL_Key() {
+ return "SkAnnotationKey_URL";
+}
+
+const char* SkAnnotationKeys::Define_Named_Dest_Key() {
+ return "SkAnnotationKey_Define_Named_Dest";
+}
+
+const char* SkAnnotationKeys::Link_Named_Dest_Key() {
+ return "SkAnnotationKey_Link_Named_Dest";
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkAnnotateRectWithURL(SkCanvas* canvas, const SkRect& rect, SkData* value) {
+ if (nullptr == value) {
+ return;
+ }
+ canvas->drawAnnotation(rect, SkAnnotationKeys::URL_Key(), value);
+}
+
+void SkAnnotateNamedDestination(SkCanvas* canvas, const SkPoint& point, SkData* name) {
+ if (nullptr == name) {
+ return;
+ }
+ const SkRect rect = SkRect::MakeXYWH(point.x(), point.y(), 0, 0);
+ canvas->drawAnnotation(rect, SkAnnotationKeys::Define_Named_Dest_Key(), name);
+}
+
+void SkAnnotateLinkToDestination(SkCanvas* canvas, const SkRect& rect, SkData* name) {
+ if (nullptr == name) {
+ return;
+ }
+ canvas->drawAnnotation(rect, SkAnnotationKeys::Link_Named_Dest_Key(), name);
+}
diff --git a/gfx/skia/skia/src/core/SkAnnotationKeys.h b/gfx/skia/skia/src/core/SkAnnotationKeys.h
new file mode 100644
index 0000000000..90fdc6d30a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAnnotationKeys.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAnnotationKeys_DEFINED
+#define SkAnnotationKeys_DEFINED
+
+#include "include/core/SkTypes.h"
+
+class SkAnnotationKeys {
+public:
+ /**
+ * Returns the canonical key whose payload is a URL
+ */
+ static const char* URL_Key();
+
+ /**
+ * Returns the canonical key whose payload is the name of a destination to
+ * be defined.
+ */
+ static const char* Define_Named_Dest_Key();
+
+ /**
+ * Returns the canonical key whose payload is the name of a destination to
+ * be linked to.
+ */
+ static const char* Link_Named_Dest_Key();
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkAntiRun.h b/gfx/skia/skia/src/core/SkAntiRun.h
new file mode 100644
index 0000000000..5a9800b796
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAntiRun.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAntiRun_DEFINED
+#define SkAntiRun_DEFINED
+
+#include "include/private/base/SkTo.h"
+#include "src/core/SkBlitter.h"
+
+/** Sparse array of run-length-encoded alpha (supersampling coverage) values.
+ Sparseness allows us to independently compose several paths into the
+ same SkAlphaRuns buffer.
+*/
+
+class SkAlphaRuns {
+public:
+ int16_t* fRuns;
+ uint8_t* fAlpha;
+
+ // Return 0-255 given 0-256
+ static inline SkAlpha CatchOverflow(int alpha) {
+ SkASSERT(alpha >= 0 && alpha <= 256);
+ return alpha - (alpha >> 8);
+ }
+
+ /// Returns true if the scanline contains only a single run,
+ /// of alpha value 0.
+ bool empty() const {
+ SkASSERT(fRuns[0] > 0);
+ return fAlpha[0] == 0 && fRuns[fRuns[0]] == 0;
+ }
+
+ /// Reinitialize for a new scanline.
+ void reset(int width);
+
+ /**
+ * Insert into the buffer a run starting at (x-offsetX):
+ * if startAlpha > 0
+ * one pixel with value += startAlpha,
+ * max 255
+ * if middleCount > 0
+ * middleCount pixels with value += maxValue
+ * if stopAlpha > 0
+ * one pixel with value += stopAlpha
+ * Returns the offsetX value that should be passed on the next call,
+ * assuming we're on the same scanline. If the caller is switching
+ * scanlines, then offsetX should be 0 when this is called.
+ */
+ SK_ALWAYS_INLINE int add(int x, U8CPU startAlpha, int middleCount, U8CPU stopAlpha,
+ U8CPU maxValue, int offsetX) {
+ SkASSERT(middleCount >= 0);
+ SkASSERT(x >= 0 && x + (startAlpha != 0) + middleCount + (stopAlpha != 0) <= fWidth);
+
+ SkASSERT(fRuns[offsetX] >= 0);
+
+ int16_t* runs = fRuns + offsetX;
+ uint8_t* alpha = fAlpha + offsetX;
+ uint8_t* lastAlpha = alpha;
+ x -= offsetX;
+
+ if (startAlpha) {
+ SkAlphaRuns::Break(runs, alpha, x, 1);
+ /* I should be able to just add alpha[x] + startAlpha.
+ However, if the trailing edge of the previous span and the leading
+ edge of the current span round to the same super-sampled x value,
+ I might overflow to 256 with this add, hence the funny subtract (crud).
+ */
+ unsigned tmp = alpha[x] + startAlpha;
+ SkASSERT(tmp <= 256);
+ alpha[x] = SkToU8(tmp - (tmp >> 8)); // was (tmp >> 7), but that seems wrong if we're trying to catch 256
+
+ runs += x + 1;
+ alpha += x + 1;
+ x = 0;
+ SkDEBUGCODE(this->validate();)
+ }
+
+ if (middleCount) {
+ SkAlphaRuns::Break(runs, alpha, x, middleCount);
+ alpha += x;
+ runs += x;
+ x = 0;
+ do {
+ alpha[0] = SkToU8(CatchOverflow(alpha[0] + maxValue));
+ int n = runs[0];
+ SkASSERT(n <= middleCount);
+ alpha += n;
+ runs += n;
+ middleCount -= n;
+ } while (middleCount > 0);
+ SkDEBUGCODE(this->validate();)
+ lastAlpha = alpha;
+ }
+
+ if (stopAlpha) {
+ SkAlphaRuns::Break(runs, alpha, x, 1);
+ alpha += x;
+ alpha[0] = SkToU8(alpha[0] + stopAlpha);
+ SkDEBUGCODE(this->validate();)
+ lastAlpha = alpha;
+ }
+
+ return SkToS32(lastAlpha - fAlpha); // new offsetX
+ }
+
+ SkDEBUGCODE(void assertValid(int y, int maxStep) const;)
+ SkDEBUGCODE(void dump() const;)
+
+ /**
+ * Break the runs in the buffer at offsets x and x+count, properly
+ * updating the runs to the right and left.
+ * i.e. from the state AAAABBBB, run-length encoded as A4B4,
+ * Break(..., 2, 5) would produce AAAABBBB rle as A2A2B3B1.
+ * Allows add() to sum another run to some of the new sub-runs.
+ * i.e. adding ..CCCCC. would produce AADDEEEB, rle as A2D2E3B1.
+ */
+ static void Break(int16_t runs[], uint8_t alpha[], int x, int count) {
+ SkASSERT(count > 0 && x >= 0);
+
+ // SkAlphaRuns::BreakAt(runs, alpha, x);
+ // SkAlphaRuns::BreakAt(&runs[x], &alpha[x], count);
+
+ int16_t* next_runs = runs + x;
+ uint8_t* next_alpha = alpha + x;
+
+ while (x > 0) {
+ int n = runs[0];
+ SkASSERT(n > 0);
+
+ if (x < n) {
+ alpha[x] = alpha[0];
+ runs[0] = SkToS16(x);
+ runs[x] = SkToS16(n - x);
+ break;
+ }
+ runs += n;
+ alpha += n;
+ x -= n;
+ }
+
+ runs = next_runs;
+ alpha = next_alpha;
+ x = count;
+
+ for (;;) {
+ int n = runs[0];
+ SkASSERT(n > 0);
+
+ if (x < n) {
+ alpha[x] = alpha[0];
+ runs[0] = SkToS16(x);
+ runs[x] = SkToS16(n - x);
+ break;
+ }
+ x -= n;
+ if (x <= 0) {
+ break;
+ }
+ runs += n;
+ alpha += n;
+ }
+ }
+
+ /**
+ * Cut (at offset x in the buffer) a run into two shorter runs with
+ * matching alpha values.
+ * Used by the RectClipBlitter to trim a RLE encoding to match the
+ * clipping rectangle.
+ */
+ static void BreakAt(int16_t runs[], uint8_t alpha[], int x) {
+ while (x > 0) {
+ int n = runs[0];
+ SkASSERT(n > 0);
+
+ if (x < n) {
+ alpha[x] = alpha[0];
+ runs[0] = SkToS16(x);
+ runs[x] = SkToS16(n - x);
+ break;
+ }
+ runs += n;
+ alpha += n;
+ x -= n;
+ }
+ }
+
+private:
+ SkDEBUGCODE(int fWidth;)
+ SkDEBUGCODE(void validate() const;)
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkAutoBlitterChoose.h b/gfx/skia/skia/src/core/SkAutoBlitterChoose.h
new file mode 100644
index 0000000000..34f4272e35
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAutoBlitterChoose.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAutoBlitterChoose_DEFINED
+#define SkAutoBlitterChoose_DEFINED
+
+#include "include/private/base/SkMacros.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkDrawBase.h"
+#include "src/core/SkMatrixProvider.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkSurfacePriv.h"
+
+class SkMatrix;
+class SkPaint;
+class SkPixmap;
+
+class SkAutoBlitterChoose : SkNoncopyable {
+public:
+ SkAutoBlitterChoose() {}
+ SkAutoBlitterChoose(const SkDrawBase& draw, const SkMatrixProvider* matrixProvider,
+ const SkPaint& paint, bool drawCoverage = false) {
+ this->choose(draw, matrixProvider, paint, drawCoverage);
+ }
+
+ SkBlitter* operator->() { return fBlitter; }
+ SkBlitter* get() const { return fBlitter; }
+
+ SkBlitter* choose(const SkDrawBase& draw, const SkMatrixProvider* matrixProvider,
+ const SkPaint& paint, bool drawCoverage = false) {
+ SkASSERT(!fBlitter);
+ if (!matrixProvider) {
+ matrixProvider = draw.fMatrixProvider;
+ }
+ fBlitter = draw.fBlitterChooser(draw.fDst,
+ matrixProvider->localToDevice(),
+ paint,
+ &fAlloc,
+ drawCoverage,
+ draw.fRC->clipShader(),
+ SkSurfacePropsCopyOrDefault(draw.fProps));
+ return fBlitter;
+ }
+
+private:
+ // Owned by fAlloc, which will handle the delete.
+ SkBlitter* fBlitter = nullptr;
+
+ SkSTArenaAlloc<kSkBlitterContextSize> fAlloc;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkAutoPixmapStorage.cpp b/gfx/skia/skia/src/core/SkAutoPixmapStorage.cpp
new file mode 100644
index 0000000000..9b7a886d8b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAutoPixmapStorage.cpp
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkData.h"
+#include "src/core/SkAutoPixmapStorage.h"
+
+SkAutoPixmapStorage::SkAutoPixmapStorage() : fStorage(nullptr) {}
+
+SkAutoPixmapStorage::~SkAutoPixmapStorage() {
+ this->freeStorage();
+}
+
+SkAutoPixmapStorage::SkAutoPixmapStorage(SkAutoPixmapStorage&& other) : fStorage(nullptr) {
+ *this = std::move(other);
+}
+
+SkAutoPixmapStorage& SkAutoPixmapStorage::operator=(SkAutoPixmapStorage&& other) {
+ this->fStorage = other.fStorage;
+ this->INHERITED::reset(other.info(), this->fStorage, other.rowBytes());
+
+ other.fStorage = nullptr;
+ other.INHERITED::reset();
+
+ return *this;
+}
+
+size_t SkAutoPixmapStorage::AllocSize(const SkImageInfo& info, size_t* rowBytes) {
+ size_t rb = info.minRowBytes();
+ if (rowBytes) {
+ *rowBytes = rb;
+ }
+ return info.computeByteSize(rb);
+}
+
+bool SkAutoPixmapStorage::tryAlloc(const SkImageInfo& info) {
+ this->freeStorage();
+
+ size_t rb;
+ size_t size = AllocSize(info, &rb);
+ if (SkImageInfo::ByteSizeOverflowed(size)) {
+ return false;
+ }
+ void* pixels = sk_malloc_canfail(size);
+ if (nullptr == pixels) {
+ return false;
+ }
+ this->reset(info, pixels, rb);
+ fStorage = pixels;
+ return true;
+}
+
+void SkAutoPixmapStorage::alloc(const SkImageInfo& info) {
+ SkASSERT_RELEASE(this->tryAlloc(info));
+}
+
+void* SkAutoPixmapStorage::detachPixels() {
+ if (!fStorage) {
+ return nullptr;
+ }
+
+ void* data = fStorage;
+ fStorage = nullptr;
+ this->INHERITED::reset();
+
+ return data;
+}
+
+sk_sp<SkData> SkAutoPixmapStorage::detachPixelsAsData() {
+ if (!fStorage) {
+ return nullptr;
+ }
+
+ sk_sp<SkData> data = SkData::MakeFromMalloc(fStorage, this->computeByteSize());
+ fStorage = nullptr;
+ this->INHERITED::reset();
+
+ return data;
+}
diff --git a/gfx/skia/skia/src/core/SkAutoPixmapStorage.h b/gfx/skia/skia/src/core/SkAutoPixmapStorage.h
new file mode 100644
index 0000000000..5d651f141c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkAutoPixmapStorage.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAutoPixmapStorage_DEFINED
+#define SkAutoPixmapStorage_DEFINED
+
+#include "include/core/SkPixmap.h"
+#include "include/private/base/SkMalloc.h"
+
+class SkData;
+
+class SkAutoPixmapStorage : public SkPixmap {
+public:
+ SkAutoPixmapStorage();
+ ~SkAutoPixmapStorage();
+
+ SkAutoPixmapStorage(SkAutoPixmapStorage&& other);
+
+ /**
+ * Leave the moved-from object in a free-but-valid state.
+ */
+ SkAutoPixmapStorage& operator=(SkAutoPixmapStorage&& other);
+
+ /**
+ * Try to allocate memory for the pixels needed to match the specified Info. On success
+ * return true and fill out the pixmap to point to that memory. The storage will be freed
+ * when this object is destroyed, or if another call to tryAlloc() or alloc() is made.
+ *
+ * On failure, return false and reset() the pixmap to empty.
+ */
+ bool tryAlloc(const SkImageInfo&);
+
+ /**
+ * Allocate memory for the pixels needed to match the specified Info and fill out the pixmap
+ * to point to that memory. The storage will be freed when this object is destroyed,
+ * or if another call to tryAlloc() or alloc() is made.
+ *
+ * If the memory cannot be allocated, calls SK_ABORT().
+ */
+ void alloc(const SkImageInfo&);
+
+ /**
+ * Gets the size and optionally the rowBytes that would be allocated by SkAutoPixmapStorage if
+ * alloc/tryAlloc was called.
+ */
+ static size_t AllocSize(const SkImageInfo& info, size_t* rowBytes);
+
+ /**
+ * Returns a void* of the allocated pixel memory and resets the pixmap. If the storage hasn't
+ * been allocated, the result is NULL. The caller is responsible for calling sk_free to free
+ * the returned memory.
+ */
+ void* SK_WARN_UNUSED_RESULT detachPixels();
+
+ /**
+ * Returns an SkData object wrapping the allocated pixels memory, and resets the pixmap.
+ * If the storage hasn't been allocated, the result is NULL.
+ */
+ sk_sp<SkData> SK_WARN_UNUSED_RESULT detachPixelsAsData();
+
+ // We wrap these so we can clear our internal storage
+
+ void reset() {
+ this->freeStorage();
+ this->INHERITED::reset();
+ }
+ void reset(const SkImageInfo& info, const void* addr, size_t rb) {
+ this->freeStorage();
+ this->INHERITED::reset(info, addr, rb);
+ }
+
+ bool SK_WARN_UNUSED_RESULT reset(const SkMask& mask) {
+ this->freeStorage();
+ return this->INHERITED::reset(mask);
+ }
+
+private:
+ void* fStorage;
+
+ void freeStorage() {
+ sk_free(fStorage);
+ fStorage = nullptr;
+ }
+
+ using INHERITED = SkPixmap;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBBHFactory.cpp b/gfx/skia/skia/src/core/SkBBHFactory.cpp
new file mode 100644
index 0000000000..18853a0052
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBBHFactory.cpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBBHFactory.h"
+#include "src/core/SkRTree.h"
+
+sk_sp<SkBBoxHierarchy> SkRTreeFactory::operator()() const {
+ return sk_make_sp<SkRTree>();
+}
+
+void SkBBoxHierarchy::insert(const SkRect rects[], const Metadata[], int N) {
+ // Ignore Metadata.
+ this->insert(rects, N);
+}
diff --git a/gfx/skia/skia/src/core/SkBigPicture.cpp b/gfx/skia/skia/src/core/SkBigPicture.cpp
new file mode 100644
index 0000000000..887b9df034
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBigPicture.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBBHFactory.h"
+#include "src/core/SkBigPicture.h"
+#include "src/core/SkRecord.h"
+#include "src/core/SkRecordDraw.h"
+#include "src/core/SkTraceEvent.h"
+
+SkBigPicture::SkBigPicture(const SkRect& cull,
+ sk_sp<SkRecord> record,
+ std::unique_ptr<SnapshotArray> drawablePicts,
+ sk_sp<SkBBoxHierarchy> bbh,
+ size_t approxBytesUsedBySubPictures)
+ : fCullRect(cull)
+ , fApproxBytesUsedBySubPictures(approxBytesUsedBySubPictures)
+ , fRecord(std::move(record))
+ , fDrawablePicts(std::move(drawablePicts))
+ , fBBH(std::move(bbh))
+{}
+
+void SkBigPicture::playback(SkCanvas* canvas, AbortCallback* callback) const {
+ SkASSERT(canvas);
+
+ // If the query contains the whole picture, don't bother with the BBH.
+ const bool useBBH = !canvas->getLocalClipBounds().contains(this->cullRect());
+
+ SkRecordDraw(*fRecord,
+ canvas,
+ this->drawablePicts(),
+ nullptr,
+ this->drawableCount(),
+ useBBH ? fBBH.get() : nullptr,
+ callback);
+}
+
+void SkBigPicture::partialPlayback(SkCanvas* canvas,
+ int start,
+ int stop,
+ const SkM44& initialCTM) const {
+ SkASSERT(canvas);
+ SkRecordPartialDraw(*fRecord,
+ canvas,
+ this->drawablePicts(),
+ this->drawableCount(),
+ start,
+ stop,
+ initialCTM);
+}
+
+struct NestedApproxOpCounter {
+ int fCount = 0;
+
+ template <typename T> void operator()(const T& op) {
+ fCount += 1;
+ }
+ void operator()(const SkRecords::DrawPicture& op) {
+ fCount += op.picture->approximateOpCount(true);
+ }
+};
+
+SkRect SkBigPicture::cullRect() const { return fCullRect; }
+int SkBigPicture::approximateOpCount(bool nested) const {
+ if (nested) {
+ NestedApproxOpCounter visitor;
+ for (int i = 0; i < fRecord->count(); i++) {
+ fRecord->visit(i, visitor);
+ }
+ return visitor.fCount;
+ } else {
+ return fRecord->count();
+ }
+}
+size_t SkBigPicture::approximateBytesUsed() const {
+ size_t bytes = sizeof(*this) + fRecord->bytesUsed() + fApproxBytesUsedBySubPictures;
+ if (fBBH) { bytes += fBBH->bytesUsed(); }
+ return bytes;
+}
+
+int SkBigPicture::drawableCount() const {
+ return fDrawablePicts ? fDrawablePicts->count() : 0;
+}
+
+SkPicture const* const* SkBigPicture::drawablePicts() const {
+ return fDrawablePicts ? fDrawablePicts->begin() : nullptr;
+}
+
diff --git a/gfx/skia/skia/src/core/SkBigPicture.h b/gfx/skia/skia/src/core/SkBigPicture.h
new file mode 100644
index 0000000000..09ae1e244f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBigPicture.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBigPicture_DEFINED
+#define SkBigPicture_DEFINED
+
+#include "include/core/SkM44.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkRect.h"
+#include "include/private/base/SkNoncopyable.h"
+#include "include/private/base/SkOnce.h"
+#include "include/private/base/SkTemplates.h"
+
+class SkBBoxHierarchy;
+class SkMatrix;
+class SkRecord;
+
+// An implementation of SkPicture supporting an arbitrary number of drawing commands.
+// This is called "big" because there used to be a "mini" that only supported a subset of the
+// calls as an optimization.
+class SkBigPicture final : public SkPicture {
+public:
+ // An array of refcounted const SkPicture pointers.
+ class SnapshotArray : ::SkNoncopyable {
+ public:
+ SnapshotArray(const SkPicture* pics[], int count) : fPics(pics), fCount(count) {}
+ ~SnapshotArray() { for (int i = 0; i < fCount; i++) { fPics[i]->unref(); } }
+
+ const SkPicture* const* begin() const { return fPics; }
+ int count() const { return fCount; }
+ private:
+ skia_private::AutoTMalloc<const SkPicture*> fPics;
+ int fCount;
+ };
+
+ SkBigPicture(const SkRect& cull,
+ sk_sp<SkRecord>,
+ std::unique_ptr<SnapshotArray>,
+ sk_sp<SkBBoxHierarchy>,
+ size_t approxBytesUsedBySubPictures);
+
+
+// SkPicture overrides
+ void playback(SkCanvas*, AbortCallback*) const override;
+ SkRect cullRect() const override;
+ int approximateOpCount(bool nested) const override;
+ size_t approximateBytesUsed() const override;
+ const SkBigPicture* asSkBigPicture() const override { return this; }
+
+// Used by GrLayerHoister
+ void partialPlayback(SkCanvas*,
+ int start,
+ int stop,
+ const SkM44& initialCTM) const;
+// Used by GrRecordReplaceDraw
+ const SkBBoxHierarchy* bbh() const { return fBBH.get(); }
+ const SkRecord* record() const { return fRecord.get(); }
+
+private:
+ int drawableCount() const;
+ SkPicture const* const* drawablePicts() const;
+
+ const SkRect fCullRect;
+ const size_t fApproxBytesUsedBySubPictures;
+ sk_sp<const SkRecord> fRecord;
+ std::unique_ptr<const SnapshotArray> fDrawablePicts;
+ sk_sp<const SkBBoxHierarchy> fBBH;
+};
+
+#endif//SkBigPicture_DEFINED
diff --git a/gfx/skia/skia/src/core/SkBitmap.cpp b/gfx/skia/skia/src/core/SkBitmap.cpp
new file mode 100644
index 0000000000..c89b53001c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmap.cpp
@@ -0,0 +1,671 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBitmap.h"
+
+#include "include/core/SkColorSpace.h" // IWYU pragma: keep
+#include "include/core/SkColorType.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkMallocPixelRef.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPixelRef.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkTileMode.h"
+#include "include/private/base/SkAlign.h"
+#include "include/private/base/SkTFitsIn.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkConvertPixels.h"
+#include "src/core/SkImageInfoPriv.h"
+#include "src/core/SkImagePriv.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkMipmap.h"
+#include "src/core/SkPixelRefPriv.h"
+#include "src/core/SkWritePixelsRec.h"
+#include "src/shaders/SkImageShader.h"
+
+#include <cstring>
+#include <utility>
+class SkMaskFilter;
+
+static bool reset_return_false(SkBitmap* bm) {
+ bm->reset();
+ return false;
+}
+
+SkBitmap::SkBitmap() {}
+
+SkBitmap::SkBitmap(const SkBitmap& src)
+ : fPixelRef (src.fPixelRef)
+ , fPixmap (src.fPixmap)
+ , fMips (src.fMips)
+{
+ SkDEBUGCODE(src.validate();)
+ SkDEBUGCODE(this->validate();)
+}
+
+SkBitmap::SkBitmap(SkBitmap&& other)
+ : fPixelRef (std::move(other.fPixelRef))
+ , fPixmap (std::move(other.fPixmap))
+ , fMips (std::move(other.fMips))
+{
+ SkASSERT(!other.fPixelRef);
+ other.fPixmap.reset();
+}
+
+SkBitmap::~SkBitmap() {}
+
+SkBitmap& SkBitmap::operator=(const SkBitmap& src) {
+ if (this != &src) {
+ fPixelRef = src.fPixelRef;
+ fPixmap = src.fPixmap;
+ fMips = src.fMips;
+ }
+ SkDEBUGCODE(this->validate();)
+ return *this;
+}
+
+SkBitmap& SkBitmap::operator=(SkBitmap&& other) {
+ if (this != &other) {
+ fPixelRef = std::move(other.fPixelRef);
+ fPixmap = std::move(other.fPixmap);
+ fMips = std::move(other.fMips);
+ SkASSERT(!other.fPixelRef);
+ other.fPixmap.reset();
+ }
+ return *this;
+}
+
+void SkBitmap::swap(SkBitmap& other) {
+ using std::swap;
+ swap(*this, other);
+ SkDEBUGCODE(this->validate();)
+}
+
+void SkBitmap::reset() {
+ fPixelRef = nullptr; // Free pixels.
+ fPixmap.reset();
+ fMips.reset();
+}
+
+void SkBitmap::getBounds(SkRect* bounds) const {
+ SkASSERT(bounds);
+ *bounds = SkRect::Make(this->dimensions());
+}
+
+void SkBitmap::getBounds(SkIRect* bounds) const {
+ SkASSERT(bounds);
+ *bounds = fPixmap.bounds();
+}
+
+SkColorSpace* SkBitmap::colorSpace() const { return fPixmap.colorSpace(); }
+
+sk_sp<SkColorSpace> SkBitmap::refColorSpace() const { return fPixmap.info().refColorSpace(); }
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkBitmap::setInfo(const SkImageInfo& info, size_t rowBytes) {
+ SkAlphaType newAT = info.alphaType();
+ if (!SkColorTypeValidateAlphaType(info.colorType(), info.alphaType(), &newAT)) {
+ return reset_return_false(this);
+ }
+ // don't look at info.alphaType(), since newAT is the real value...
+
+ // require that rowBytes fit in 31bits
+ int64_t mrb = info.minRowBytes64();
+ if (!SkTFitsIn<int32_t>(mrb)) {
+ return reset_return_false(this);
+ }
+ if (!SkTFitsIn<int32_t>(rowBytes)) {
+ return reset_return_false(this);
+ }
+
+ if (info.width() < 0 || info.height() < 0) {
+ return reset_return_false(this);
+ }
+
+ if (kUnknown_SkColorType == info.colorType()) {
+ rowBytes = 0;
+ } else if (0 == rowBytes) {
+ rowBytes = (size_t)mrb;
+ } else if (!info.validRowBytes(rowBytes)) {
+ return reset_return_false(this);
+ }
+
+ fPixelRef = nullptr; // Free pixels.
+ fPixmap.reset(info.makeAlphaType(newAT), nullptr, SkToU32(rowBytes));
+ SkDEBUGCODE(this->validate();)
+ return true;
+}
+
+
+
+bool SkBitmap::setAlphaType(SkAlphaType newAlphaType) {
+ if (!SkColorTypeValidateAlphaType(this->colorType(), newAlphaType, &newAlphaType)) {
+ return false;
+ }
+ if (this->alphaType() != newAlphaType) {
+ auto newInfo = fPixmap.info().makeAlphaType(newAlphaType);
+ fPixmap.reset(std::move(newInfo), fPixmap.addr(), fPixmap.rowBytes());
+ }
+ SkDEBUGCODE(this->validate();)
+ return true;
+}
+
+SkIPoint SkBitmap::pixelRefOrigin() const {
+ const char* addr = (const char*)fPixmap.addr();
+ const char* pix = (const char*)(fPixelRef ? fPixelRef->pixels() : nullptr);
+ size_t rb = this->rowBytes();
+ if (!pix || 0 == rb) {
+ return {0, 0};
+ }
+ SkASSERT(this->bytesPerPixel() > 0);
+ SkASSERT(this->bytesPerPixel() == (1 << this->shiftPerPixel()));
+ SkASSERT(addr >= pix);
+ size_t off = addr - pix;
+ return {SkToS32((off % rb) >> this->shiftPerPixel()), SkToS32(off / rb)};
+}
+
+void SkBitmap::setPixelRef(sk_sp<SkPixelRef> pr, int dx, int dy) {
+#ifdef SK_DEBUG
+ if (pr) {
+ if (kUnknown_SkColorType != this->colorType()) {
+ SkASSERT(dx >= 0 && this->width() + dx <= pr->width());
+ SkASSERT(dy >= 0 && this->height() + dy <= pr->height());
+ }
+ }
+#endif
+ fPixelRef = kUnknown_SkColorType != this->colorType() ? std::move(pr) : nullptr;
+ void* p = nullptr;
+ size_t rowBytes = this->rowBytes();
+ // ignore dx,dy if there is no pixelref
+ if (fPixelRef) {
+ rowBytes = fPixelRef->rowBytes();
+ // TODO(reed): Enforce that PixelRefs must have non-null pixels.
+ p = fPixelRef->pixels();
+ if (p) {
+ p = (char*)p + dy * rowBytes + dx * this->bytesPerPixel();
+ }
+ }
+ fPixmap.reset(fPixmap.info(), p, rowBytes);
+ SkDEBUGCODE(this->validate();)
+}
+
+void SkBitmap::setPixels(void* p) {
+ if (kUnknown_SkColorType == this->colorType()) {
+ p = nullptr;
+ }
+ size_t rb = this->rowBytes();
+ fPixmap.reset(fPixmap.info(), p, rb);
+ fPixelRef = p ? sk_make_sp<SkPixelRef>(this->width(), this->height(), p, rb) : nullptr;
+ SkDEBUGCODE(this->validate();)
+}
+
+bool SkBitmap::tryAllocPixels(Allocator* allocator) {
+ HeapAllocator stdalloc;
+
+ if (nullptr == allocator) {
+ allocator = &stdalloc;
+ }
+ return allocator->allocPixelRef(this);
+}
+
+bool SkBitmap::tryAllocN32Pixels(int width, int height, bool isOpaque) {
+ SkImageInfo info = SkImageInfo::MakeN32(width, height,
+ isOpaque ? kOpaque_SkAlphaType : kPremul_SkAlphaType);
+ return this->tryAllocPixels(info);
+}
+
+void SkBitmap::allocN32Pixels(int width, int height, bool isOpaque) {
+ SkImageInfo info = SkImageInfo::MakeN32(width, height,
+ isOpaque ? kOpaque_SkAlphaType : kPremul_SkAlphaType);
+ this->allocPixels(info);
+}
+
+void SkBitmap::allocPixels() {
+ this->allocPixels((Allocator*)nullptr);
+}
+
+void SkBitmap::allocPixels(Allocator* allocator) {
+ if (!this->tryAllocPixels(allocator)) {
+ const SkImageInfo& info = this->info();
+ SK_ABORT("SkBitmap::tryAllocPixels failed "
+ "ColorType:%d AlphaType:%d [w:%d h:%d] rb:%zu",
+ info.colorType(), info.alphaType(), info.width(), info.height(), this->rowBytes());
+ }
+}
+
+void SkBitmap::allocPixelsFlags(const SkImageInfo& info, uint32_t flags) {
+ SkASSERT_RELEASE(this->tryAllocPixelsFlags(info, flags));
+}
+
+void SkBitmap::allocPixels(const SkImageInfo& info, size_t rowBytes) {
+ SkASSERT_RELEASE(this->tryAllocPixels(info, rowBytes));
+}
+
+void SkBitmap::allocPixels(const SkImageInfo& info) {
+ this->allocPixels(info, info.minRowBytes());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkBitmap::tryAllocPixels(const SkImageInfo& requestedInfo, size_t rowBytes) {
+ if (!this->setInfo(requestedInfo, rowBytes)) {
+ return reset_return_false(this);
+ }
+
+ // setInfo may have corrected info (e.g. 565 is always opaque).
+ const SkImageInfo& correctedInfo = this->info();
+ if (kUnknown_SkColorType == correctedInfo.colorType()) {
+ return true;
+ }
+ // setInfo may have computed a valid rowbytes if 0 were passed in
+ rowBytes = this->rowBytes();
+
+ sk_sp<SkPixelRef> pr = SkMallocPixelRef::MakeAllocate(correctedInfo, rowBytes);
+ if (!pr) {
+ return reset_return_false(this);
+ }
+ this->setPixelRef(std::move(pr), 0, 0);
+ if (nullptr == this->getPixels()) {
+ return reset_return_false(this);
+ }
+ SkDEBUGCODE(this->validate();)
+ return true;
+}
+
+bool SkBitmap::tryAllocPixelsFlags(const SkImageInfo& requestedInfo, uint32_t allocFlags) {
+ if (!this->setInfo(requestedInfo)) {
+ return reset_return_false(this);
+ }
+
+ // setInfo may have corrected info (e.g. 565 is always opaque).
+ const SkImageInfo& correctedInfo = this->info();
+
+ sk_sp<SkPixelRef> pr = SkMallocPixelRef::MakeAllocate(correctedInfo,
+ correctedInfo.minRowBytes());
+ if (!pr) {
+ return reset_return_false(this);
+ }
+ this->setPixelRef(std::move(pr), 0, 0);
+ if (nullptr == this->getPixels()) {
+ return reset_return_false(this);
+ }
+ SkDEBUGCODE(this->validate();)
+ return true;
+}
+
+static void invoke_release_proc(void (*proc)(void* pixels, void* ctx), void* pixels, void* ctx) {
+ if (proc) {
+ proc(pixels, ctx);
+ }
+}
+
+bool SkBitmap::installPixels(const SkImageInfo& requestedInfo, void* pixels, size_t rb,
+ void (*releaseProc)(void* addr, void* context), void* context) {
+ if (!this->setInfo(requestedInfo, rb)) {
+ invoke_release_proc(releaseProc, pixels, context);
+ this->reset();
+ return false;
+ }
+ if (nullptr == pixels) {
+ invoke_release_proc(releaseProc, pixels, context);
+ return true; // we behaved as if they called setInfo()
+ }
+
+ // setInfo may have corrected info (e.g. 565 is always opaque).
+ const SkImageInfo& correctedInfo = this->info();
+ this->setPixelRef(
+ SkMakePixelRefWithProc(correctedInfo.width(), correctedInfo.height(),
+ rb, pixels, releaseProc, context), 0, 0);
+ SkDEBUGCODE(this->validate();)
+ return true;
+}
+
+bool SkBitmap::installPixels(const SkPixmap& pixmap) {
+ return this->installPixels(pixmap.info(), pixmap.writable_addr(), pixmap.rowBytes(),
+ nullptr, nullptr);
+}
+
+bool SkBitmap::installMaskPixels(const SkMask& mask) {
+ if (SkMask::kA8_Format != mask.fFormat) {
+ this->reset();
+ return false;
+ }
+ return this->installPixels(SkImageInfo::MakeA8(mask.fBounds.width(),
+ mask.fBounds.height()),
+ mask.fImage, mask.fRowBytes);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+uint32_t SkBitmap::getGenerationID() const {
+ return fPixelRef ? fPixelRef->getGenerationID() : 0;
+}
+
+void SkBitmap::notifyPixelsChanged() const {
+ SkASSERT(!this->isImmutable());
+ if (fPixelRef) {
+ fPixelRef->notifyPixelsChanged();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** We explicitly use the same allocator for our pixels that SkMask does,
+ so that we can freely assign memory allocated by one class to the other.
+ */
+bool SkBitmap::HeapAllocator::allocPixelRef(SkBitmap* dst) {
+ const SkImageInfo& info = dst->info();
+ if (kUnknown_SkColorType == info.colorType()) {
+// SkDebugf("unsupported config for info %d\n", dst->config());
+ return false;
+ }
+
+ sk_sp<SkPixelRef> pr = SkMallocPixelRef::MakeAllocate(info, dst->rowBytes());
+ if (!pr) {
+ return false;
+ }
+
+ dst->setPixelRef(std::move(pr), 0, 0);
+ SkDEBUGCODE(dst->validate();)
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkBitmap::isImmutable() const {
+ return fPixelRef ? fPixelRef->isImmutable() : false;
+}
+
+void SkBitmap::setImmutable() {
+ if (fPixelRef) {
+ fPixelRef->setImmutable();
+ }
+}
+
+void* SkBitmap::getAddr(int x, int y) const {
+ SkASSERT((unsigned)x < (unsigned)this->width());
+ SkASSERT((unsigned)y < (unsigned)this->height());
+
+ char* base = (char*)this->getPixels();
+ if (base) {
+ base += (y * this->rowBytes()) + (x << this->shiftPerPixel());
+ }
+ return base;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+void SkBitmap::erase(SkColor4f c, SkColorSpace* colorSpace, const SkIRect& area) const {
+ SkDEBUGCODE(this->validate();)
+
+ if (kUnknown_SkColorType == this->colorType()) {
+ // TODO: can we ASSERT that we never get here?
+ return; // can't erase. Should we bzero so the memory is not uninitialized?
+ }
+
+ SkPixmap result;
+ if (!this->peekPixels(&result)) {
+ return;
+ }
+
+ if (result.erase(c, colorSpace, &area)) {
+ this->notifyPixelsChanged();
+ }
+}
+
+void SkBitmap::erase(SkColor c, const SkIRect& area) const {
+ this->erase(SkColor4f::FromColor(c), nullptr, area);
+}
+
+void SkBitmap::erase(SkColor4f c, const SkIRect& area) const {
+ this->erase(c, nullptr, area);
+}
+
+void SkBitmap::eraseColor(SkColor4f c, SkColorSpace* colorSpace) const {
+ this->erase(c, colorSpace, SkIRect::MakeWH(this->width(), this->height()));
+}
+
+void SkBitmap::eraseColor(SkColor c) const {
+ this->erase(SkColor4f::FromColor(c), nullptr, SkIRect::MakeWH(this->width(), this->height()));
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////////////////
+
+bool SkBitmap::extractSubset(SkBitmap* result, const SkIRect& subset) const {
+ SkDEBUGCODE(this->validate();)
+
+ if (nullptr == result || !fPixelRef) {
+ return false; // no src pixels
+ }
+
+ SkIRect srcRect, r;
+ srcRect.setWH(this->width(), this->height());
+ if (!r.intersect(srcRect, subset)) {
+ return false; // r is empty (i.e. no intersection)
+ }
+
+ // If the upper left of the rectangle was outside the bounds of this SkBitmap, we should have
+ // exited above.
+ SkASSERT(static_cast<unsigned>(r.fLeft) < static_cast<unsigned>(this->width()));
+ SkASSERT(static_cast<unsigned>(r.fTop) < static_cast<unsigned>(this->height()));
+
+ SkBitmap dst;
+ dst.setInfo(this->info().makeDimensions(r.size()), this->rowBytes());
+
+ if (fPixelRef) {
+ SkIPoint origin = this->pixelRefOrigin();
+ // share the pixelref with a custom offset
+ dst.setPixelRef(fPixelRef, origin.x() + r.fLeft, origin.y() + r.fTop);
+ }
+ SkDEBUGCODE(dst.validate();)
+
+ // we know we're good, so commit to result
+ result->swap(dst);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkBitmap::readPixels(const SkImageInfo& requestedDstInfo, void* dstPixels, size_t dstRB,
+ int x, int y) const {
+ SkPixmap src;
+ if (!this->peekPixels(&src)) {
+ return false;
+ }
+ return src.readPixels(requestedDstInfo, dstPixels, dstRB, x, y);
+}
+
+bool SkBitmap::readPixels(const SkPixmap& dst, int srcX, int srcY) const {
+ return this->readPixels(dst.info(), dst.writable_addr(), dst.rowBytes(), srcX, srcY);
+}
+
+bool SkBitmap::writePixels(const SkPixmap& src, int dstX, int dstY) {
+ if (!SkImageInfoValidConversion(this->info(), src.info())) {
+ return false;
+ }
+
+ SkWritePixelsRec rec(src.info(), src.addr(), src.rowBytes(), dstX, dstY);
+ if (!rec.trim(this->width(), this->height())) {
+ return false;
+ }
+
+ void* dstPixels = this->getAddr(rec.fX, rec.fY);
+ const SkImageInfo dstInfo = this->info().makeDimensions(rec.fInfo.dimensions());
+ if (!SkConvertPixels(dstInfo, dstPixels, this->rowBytes(),
+ rec.fInfo, rec.fPixels, rec.fRowBytes)) {
+ return false;
+ }
+ this->notifyPixelsChanged();
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool GetBitmapAlpha(const SkBitmap& src, uint8_t* SK_RESTRICT alpha, int alphaRowBytes) {
+ SkASSERT(alpha != nullptr);
+ SkASSERT(alphaRowBytes >= src.width());
+
+ SkPixmap pmap;
+ if (!src.peekPixels(&pmap)) {
+ for (int y = 0; y < src.height(); ++y) {
+ memset(alpha, 0, src.width());
+ alpha += alphaRowBytes;
+ }
+ return false;
+ }
+ return SkConvertPixels(SkImageInfo::MakeA8(pmap.width(), pmap.height()), alpha, alphaRowBytes,
+ pmap.info(), pmap.addr(), pmap.rowBytes());
+}
+
+bool SkBitmap::extractAlpha(SkBitmap* dst, const SkPaint* paint,
+ Allocator *allocator, SkIPoint* offset) const {
+ SkDEBUGCODE(this->validate();)
+
+ SkBitmap tmpBitmap;
+ SkMatrix identity;
+ SkMask srcM, dstM;
+
+ if (this->width() == 0 || this->height() == 0) {
+ return false;
+ }
+ srcM.fBounds.setWH(this->width(), this->height());
+ srcM.fRowBytes = SkAlign4(this->width());
+ srcM.fFormat = SkMask::kA8_Format;
+
+ SkMaskFilter* filter = paint ? paint->getMaskFilter() : nullptr;
+
+ // compute our (larger?) dst bounds if we have a filter
+ if (filter) {
+ identity.reset();
+ if (!as_MFB(filter)->filterMask(&dstM, srcM, identity, nullptr)) {
+ goto NO_FILTER_CASE;
+ }
+ dstM.fRowBytes = SkAlign4(dstM.fBounds.width());
+ } else {
+ NO_FILTER_CASE:
+ tmpBitmap.setInfo(SkImageInfo::MakeA8(this->width(), this->height()), srcM.fRowBytes);
+ if (!tmpBitmap.tryAllocPixels(allocator)) {
+ // Allocation of pixels for alpha bitmap failed.
+ SkDebugf("extractAlpha failed to allocate (%d,%d) alpha bitmap\n",
+ tmpBitmap.width(), tmpBitmap.height());
+ return false;
+ }
+ GetBitmapAlpha(*this, tmpBitmap.getAddr8(0, 0), srcM.fRowBytes);
+ if (offset) {
+ offset->set(0, 0);
+ }
+ tmpBitmap.swap(*dst);
+ return true;
+ }
+ srcM.fImage = SkMask::AllocImage(srcM.computeImageSize());
+ SkAutoMaskFreeImage srcCleanup(srcM.fImage);
+
+ GetBitmapAlpha(*this, srcM.fImage, srcM.fRowBytes);
+ if (!as_MFB(filter)->filterMask(&dstM, srcM, identity, nullptr)) {
+ goto NO_FILTER_CASE;
+ }
+ SkAutoMaskFreeImage dstCleanup(dstM.fImage);
+
+ tmpBitmap.setInfo(SkImageInfo::MakeA8(dstM.fBounds.width(), dstM.fBounds.height()),
+ dstM.fRowBytes);
+ if (!tmpBitmap.tryAllocPixels(allocator)) {
+ // Allocation of pixels for alpha bitmap failed.
+ SkDebugf("extractAlpha failed to allocate (%d,%d) alpha bitmap\n",
+ tmpBitmap.width(), tmpBitmap.height());
+ return false;
+ }
+ memcpy(tmpBitmap.getPixels(), dstM.fImage, dstM.computeImageSize());
+ if (offset) {
+ offset->set(dstM.fBounds.fLeft, dstM.fBounds.fTop);
+ }
+ SkDEBUGCODE(tmpBitmap.validate();)
+
+ tmpBitmap.swap(*dst);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+void SkBitmap::validate() const {
+ this->info().validate();
+
+ SkASSERT(this->info().validRowBytes(this->rowBytes()));
+
+ if (fPixelRef && fPixelRef->pixels()) {
+ SkASSERT(this->getPixels());
+ } else {
+ SkASSERT(!this->getPixels());
+ }
+
+ if (this->getPixels()) {
+ SkASSERT(fPixelRef);
+ SkASSERT(fPixelRef->rowBytes() == this->rowBytes());
+ SkIPoint origin = this->pixelRefOrigin();
+ SkASSERT(origin.fX >= 0);
+ SkASSERT(origin.fY >= 0);
+ SkASSERT(fPixelRef->width() >= (int)this->width() + origin.fX);
+ SkASSERT(fPixelRef->height() >= (int)this->height() + origin.fY);
+ SkASSERT(fPixelRef->rowBytes() >= this->info().minRowBytes());
+ }
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkBitmap::peekPixels(SkPixmap* pmap) const {
+ if (this->getPixels()) {
+ if (pmap) {
+ *pmap = fPixmap;
+ }
+ return true;
+ }
+ return false;
+}
+
+sk_sp<SkImage> SkBitmap::asImage() const {
+ return SkImage::MakeFromBitmap(*this);
+}
+
+sk_sp<SkShader> SkBitmap::makeShader(const SkSamplingOptions& sampling,
+ const SkMatrix& lm) const {
+ return this->makeShader(SkTileMode::kClamp, SkTileMode::kClamp,
+ sampling, &lm);
+}
+
+sk_sp<SkShader> SkBitmap::makeShader(const SkSamplingOptions& sampling,
+ const SkMatrix* lm) const {
+ return this->makeShader(SkTileMode::kClamp, SkTileMode::kClamp,
+ sampling, lm);
+}
+
+sk_sp<SkShader> SkBitmap::makeShader(SkTileMode tmx, SkTileMode tmy,
+ const SkSamplingOptions& sampling,
+ const SkMatrix& lm) const {
+ if (!lm.invert(nullptr)) {
+ return nullptr;
+ }
+ return SkImageShader::Make(SkMakeImageFromRasterBitmap(*this, kIfMutable_SkCopyPixelsMode),
+ tmx, tmy, sampling, &lm);
+}
+
+sk_sp<SkShader> SkBitmap::makeShader(SkTileMode tmx, SkTileMode tmy,
+ const SkSamplingOptions& sampling,
+ const SkMatrix* lm) const {
+ if (lm && !lm->invert(nullptr)) {
+ return nullptr;
+ }
+ return SkImageShader::Make(SkMakeImageFromRasterBitmap(*this, kIfMutable_SkCopyPixelsMode),
+ tmx, tmy, sampling, lm);
+}
diff --git a/gfx/skia/skia/src/core/SkBitmapCache.cpp b/gfx/skia/skia/src/core/SkBitmapCache.cpp
new file mode 100644
index 0000000000..5b53273d65
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapCache.cpp
@@ -0,0 +1,300 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkImage.h"
+#include "include/core/SkPixelRef.h"
+#include "include/core/SkRect.h"
+#include "src/core/SkBitmapCache.h"
+#include "src/core/SkMipmap.h"
+#include "src/core/SkResourceCache.h"
+#include "src/image/SkImage_Base.h"
+
+/**
+ * Use this for bitmapcache and mipmapcache entries.
+ */
+uint64_t SkMakeResourceCacheSharedIDForBitmap(uint32_t bitmapGenID) {
+ uint64_t sharedID = SkSetFourByteTag('b', 'm', 'a', 'p');
+ return (sharedID << 32) | bitmapGenID;
+}
+
+void SkNotifyBitmapGenIDIsStale(uint32_t bitmapGenID) {
+ SkResourceCache::PostPurgeSharedID(SkMakeResourceCacheSharedIDForBitmap(bitmapGenID));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkBitmapCacheDesc SkBitmapCacheDesc::Make(uint32_t imageID, const SkIRect& subset) {
+ SkASSERT(imageID);
+ SkASSERT(subset.width() > 0 && subset.height() > 0);
+ return { imageID, subset };
+}
+
+SkBitmapCacheDesc SkBitmapCacheDesc::Make(const SkImage* image) {
+ SkIRect bounds = SkIRect::MakeWH(image->width(), image->height());
+ return Make(image->uniqueID(), bounds);
+}
+
+namespace {
+static unsigned gBitmapKeyNamespaceLabel;
+
+struct BitmapKey : public SkResourceCache::Key {
+public:
+ BitmapKey(const SkBitmapCacheDesc& desc) : fDesc(desc) {
+ this->init(&gBitmapKeyNamespaceLabel, SkMakeResourceCacheSharedIDForBitmap(fDesc.fImageID),
+ sizeof(fDesc));
+ }
+
+ const SkBitmapCacheDesc fDesc;
+};
+} // namespace
+
+//////////////////////
+#include "include/private/chromium/SkDiscardableMemory.h"
+#include "src/core/SkNextID.h"
+
+void SkBitmapCache_setImmutableWithID(SkPixelRef* pr, uint32_t id) {
+ pr->setImmutableWithID(id);
+}
+
+class SkBitmapCache::Rec : public SkResourceCache::Rec {
+public:
+ Rec(const SkBitmapCacheDesc& desc, const SkImageInfo& info, size_t rowBytes,
+ std::unique_ptr<SkDiscardableMemory> dm, void* block)
+ : fKey(desc)
+ , fDM(std::move(dm))
+ , fMalloc(block)
+ , fInfo(info)
+ , fRowBytes(rowBytes)
+ {
+ SkASSERT(!(fDM && fMalloc)); // can't have both
+
+ // We need an ID to return with the bitmap/pixelref. We can't necessarily use the key/desc
+ // ID - lazy images cache the same ID with multiple keys (in different color types).
+ fPrUniqueID = SkNextID::ImageID();
+ }
+
+ ~Rec() override {
+ SkASSERT(0 == fExternalCounter);
+ if (fDM && fDiscardableIsLocked) {
+ SkASSERT(fDM->data());
+ fDM->unlock();
+ }
+ sk_free(fMalloc); // may be null
+ }
+
+ const Key& getKey() const override { return fKey; }
+ size_t bytesUsed() const override {
+ return sizeof(fKey) + fInfo.computeByteSize(fRowBytes);
+ }
+ bool canBePurged() override {
+ SkAutoMutexExclusive ama(fMutex);
+ return fExternalCounter == 0;
+ }
+ void postAddInstall(void* payload) override {
+ SkAssertResult(this->install(static_cast<SkBitmap*>(payload)));
+ }
+
+ const char* getCategory() const override { return "bitmap"; }
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override {
+ return fDM.get();
+ }
+
+ static void ReleaseProc(void* addr, void* ctx) {
+ Rec* rec = static_cast<Rec*>(ctx);
+ SkAutoMutexExclusive ama(rec->fMutex);
+
+ SkASSERT(rec->fExternalCounter > 0);
+ rec->fExternalCounter -= 1;
+ if (rec->fDM) {
+ SkASSERT(rec->fMalloc == nullptr);
+ if (rec->fExternalCounter == 0) {
+ rec->fDM->unlock();
+ rec->fDiscardableIsLocked = false;
+ }
+ } else {
+ SkASSERT(rec->fMalloc != nullptr);
+ }
+ }
+
+ bool install(SkBitmap* bitmap) {
+ SkAutoMutexExclusive ama(fMutex);
+
+ if (!fDM && !fMalloc) {
+ return false;
+ }
+
+ if (fDM) {
+ if (!fDiscardableIsLocked) {
+ SkASSERT(fExternalCounter == 0);
+ if (!fDM->lock()) {
+ fDM.reset(nullptr);
+ return false;
+ }
+ fDiscardableIsLocked = true;
+ }
+ SkASSERT(fDM->data());
+ }
+
+ bitmap->installPixels(fInfo, fDM ? fDM->data() : fMalloc, fRowBytes, ReleaseProc, this);
+ SkBitmapCache_setImmutableWithID(bitmap->pixelRef(), fPrUniqueID);
+ fExternalCounter++;
+
+ return true;
+ }
+
+ static bool Finder(const SkResourceCache::Rec& baseRec, void* contextBitmap) {
+ Rec* rec = (Rec*)&baseRec;
+ SkBitmap* result = (SkBitmap*)contextBitmap;
+ return rec->install(result);
+ }
+
+private:
+ BitmapKey fKey;
+
+ SkMutex fMutex;
+
+ // either fDM or fMalloc can be non-null, but not both
+ std::unique_ptr<SkDiscardableMemory> fDM;
+ void* fMalloc;
+
+ SkImageInfo fInfo;
+ size_t fRowBytes;
+ uint32_t fPrUniqueID;
+
+ // This field counts the number of external pixelrefs we have created.
+ // They notify us when they are destroyed so we can decrement this.
+ int fExternalCounter = 0;
+ bool fDiscardableIsLocked = true;
+};
+
+void SkBitmapCache::PrivateDeleteRec(Rec* rec) { delete rec; }
+
+SkBitmapCache::RecPtr SkBitmapCache::Alloc(const SkBitmapCacheDesc& desc, const SkImageInfo& info,
+ SkPixmap* pmap) {
+ // Ensure that the info matches the subset (i.e. the subset is the entire image)
+ SkASSERT(info.width() == desc.fSubset.width());
+ SkASSERT(info.height() == desc.fSubset.height());
+
+ const size_t rb = info.minRowBytes();
+ size_t size = info.computeByteSize(rb);
+ if (SkImageInfo::ByteSizeOverflowed(size)) {
+ return nullptr;
+ }
+
+ std::unique_ptr<SkDiscardableMemory> dm;
+ void* block = nullptr;
+
+ auto factory = SkResourceCache::GetDiscardableFactory();
+ if (factory) {
+ dm.reset(factory(size));
+ } else {
+ block = sk_malloc_canfail(size);
+ }
+ if (!dm && !block) {
+ return nullptr;
+ }
+ *pmap = SkPixmap(info, dm ? dm->data() : block, rb);
+ return RecPtr(new Rec(desc, info, rb, std::move(dm), block));
+}
+
+void SkBitmapCache::Add(RecPtr rec, SkBitmap* bitmap) {
+ SkResourceCache::Add(rec.release(), bitmap);
+}
+
+bool SkBitmapCache::Find(const SkBitmapCacheDesc& desc, SkBitmap* result) {
+ desc.validate();
+ return SkResourceCache::Find(BitmapKey(desc), SkBitmapCache::Rec::Finder, result);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////////////////////
+
+#define CHECK_LOCAL(localCache, localName, globalName, ...) \
+ ((localCache) ? localCache->localName(__VA_ARGS__) : SkResourceCache::globalName(__VA_ARGS__))
+
+namespace {
+static unsigned gMipMapKeyNamespaceLabel;
+
+struct MipMapKey : public SkResourceCache::Key {
+public:
+ MipMapKey(const SkBitmapCacheDesc& desc) : fDesc(desc) {
+ this->init(&gMipMapKeyNamespaceLabel, SkMakeResourceCacheSharedIDForBitmap(fDesc.fImageID),
+ sizeof(fDesc));
+ }
+
+ const SkBitmapCacheDesc fDesc;
+};
+
+struct MipMapRec : public SkResourceCache::Rec {
+ MipMapRec(const SkBitmapCacheDesc& desc, const SkMipmap* result)
+ : fKey(desc)
+ , fMipMap(result)
+ {
+ fMipMap->attachToCacheAndRef();
+ }
+
+ ~MipMapRec() override {
+ fMipMap->detachFromCacheAndUnref();
+ }
+
+ const Key& getKey() const override { return fKey; }
+ size_t bytesUsed() const override { return sizeof(fKey) + fMipMap->size(); }
+ const char* getCategory() const override { return "mipmap"; }
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override {
+ return fMipMap->diagnostic_only_getDiscardable();
+ }
+
+ static bool Finder(const SkResourceCache::Rec& baseRec, void* contextMip) {
+ const MipMapRec& rec = static_cast<const MipMapRec&>(baseRec);
+ const SkMipmap* mm = SkRef(rec.fMipMap);
+ // the call to ref() above triggers a "lock" in the case of discardable memory,
+ // which means we can now check for null (in case the lock failed).
+ if (nullptr == mm->data()) {
+ mm->unref(); // balance our call to ref()
+ return false;
+ }
+ // the call must call unref() when they are done.
+ *(const SkMipmap**)contextMip = mm;
+ return true;
+ }
+
+private:
+ MipMapKey fKey;
+ const SkMipmap* fMipMap;
+};
+} // namespace
+
+const SkMipmap* SkMipmapCache::FindAndRef(const SkBitmapCacheDesc& desc,
+ SkResourceCache* localCache) {
+ MipMapKey key(desc);
+ const SkMipmap* result;
+
+ if (!CHECK_LOCAL(localCache, find, Find, key, MipMapRec::Finder, &result)) {
+ result = nullptr;
+ }
+ return result;
+}
+
+static SkResourceCache::DiscardableFactory get_fact(SkResourceCache* localCache) {
+ return localCache ? localCache->discardableFactory()
+ : SkResourceCache::GetDiscardableFactory();
+}
+
+const SkMipmap* SkMipmapCache::AddAndRef(const SkImage_Base* image, SkResourceCache* localCache) {
+ SkBitmap src;
+ if (!image->getROPixels(nullptr, &src)) {
+ return nullptr;
+ }
+
+ SkMipmap* mipmap = SkMipmap::Build(src, get_fact(localCache));
+ if (mipmap) {
+ MipMapRec* rec = new MipMapRec(SkBitmapCacheDesc::Make(image), mipmap);
+ CHECK_LOCAL(localCache, add, Add, rec);
+ image->notifyAddedToRasterCache();
+ }
+ return mipmap;
+}
diff --git a/gfx/skia/skia/src/core/SkBitmapCache.h b/gfx/skia/skia/src/core/SkBitmapCache.h
new file mode 100644
index 0000000000..18ebcb2bd5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapCache.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapCache_DEFINED
+#define SkBitmapCache_DEFINED
+
+#include "include/core/SkRect.h"
+#include <memory>
+
+class SkBitmap;
+class SkImage;
+class SkImage_Base;
+struct SkImageInfo;
+class SkMipmap;
+class SkPixmap;
+class SkResourceCache;
+
+uint64_t SkMakeResourceCacheSharedIDForBitmap(uint32_t bitmapGenID);
+
+void SkNotifyBitmapGenIDIsStale(uint32_t bitmapGenID);
+
+struct SkBitmapCacheDesc {
+ uint32_t fImageID; // != 0
+ SkIRect fSubset; // always set to a valid rect (entire or subset)
+
+ void validate() const {
+ SkASSERT(fImageID);
+ SkASSERT(fSubset.fLeft >= 0 && fSubset.fTop >= 0);
+ SkASSERT(fSubset.width() > 0 && fSubset.height() > 0);
+ }
+
+ static SkBitmapCacheDesc Make(const SkImage*);
+ static SkBitmapCacheDesc Make(uint32_t genID, const SkIRect& subset);
+};
+
+class SkBitmapCache {
+public:
+ /**
+ * Search based on the desc. If found, returns true and
+ * result will be set to the matching bitmap with its pixels already locked.
+ */
+ static bool Find(const SkBitmapCacheDesc&, SkBitmap* result);
+
+ class Rec;
+ struct RecDeleter { void operator()(Rec* r) { PrivateDeleteRec(r); } };
+ typedef std::unique_ptr<Rec, RecDeleter> RecPtr;
+
+ static RecPtr Alloc(const SkBitmapCacheDesc&, const SkImageInfo&, SkPixmap*);
+ static void Add(RecPtr, SkBitmap*);
+
+private:
+ static void PrivateDeleteRec(Rec*);
+};
+
+class SkMipmapCache {
+public:
+ static const SkMipmap* FindAndRef(const SkBitmapCacheDesc&,
+ SkResourceCache* localCache = nullptr);
+ static const SkMipmap* AddAndRef(const SkImage_Base*,
+ SkResourceCache* localCache = nullptr);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBitmapDevice.cpp b/gfx/skia/skia/src/core/SkBitmapDevice.cpp
new file mode 100644
index 0000000000..f00b7f6072
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapDevice.cpp
@@ -0,0 +1,705 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkBitmapDevice.h"
+
+#include "include/core/SkBlender.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRasterHandleAllocator.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkSurface.h"
+#include "include/core/SkVertices.h"
+#include "src/base/SkTLazy.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkImageFilterCache.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkImagePriv.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/image/SkImage_Base.h"
+#include "src/text/GlyphRun.h"
+
+struct Bounder {
+ SkRect fBounds;
+ bool fHasBounds;
+
+ Bounder(const SkRect& r, const SkPaint& paint) {
+ if ((fHasBounds = paint.canComputeFastBounds())) {
+ fBounds = paint.computeFastBounds(r, &fBounds);
+ }
+ }
+
+ bool hasBounds() const { return fHasBounds; }
+ const SkRect* bounds() const { return fHasBounds ? &fBounds : nullptr; }
+ operator const SkRect* () const { return this->bounds(); }
+};
+
+class SkDrawTiler {
+ enum {
+ // 8K is 1 too big, since 8K << supersample == 32768 which is too big for SkFixed
+ kMaxDim = 8192 - 1
+ };
+
+ SkBitmapDevice* fDevice;
+ SkPixmap fRootPixmap;
+ SkIRect fSrcBounds;
+
+ // Used for tiling and non-tiling
+ SkDraw fDraw;
+
+ // fCurr... are only used if fNeedTiling
+ SkTLazy<SkPostTranslateMatrixProvider> fTileMatrixProvider;
+ SkRasterClip fTileRC;
+ SkIPoint fOrigin;
+
+ bool fDone, fNeedsTiling;
+
+public:
+ static bool NeedsTiling(SkBitmapDevice* dev) {
+ return dev->width() > kMaxDim || dev->height() > kMaxDim;
+ }
+
+ SkDrawTiler(SkBitmapDevice* dev, const SkRect* bounds) : fDevice(dev) {
+ fDone = false;
+
+ // we need fDst to be set, and if we're actually drawing, to dirty the genID
+ if (!dev->accessPixels(&fRootPixmap)) {
+ // NoDrawDevice uses us (why?) so we have to catch this case w/ no pixels
+ fRootPixmap.reset(dev->imageInfo(), nullptr, 0);
+ }
+
+ // do a quick check, so we don't even have to process "bounds" if there is no need
+ const SkIRect clipR = dev->fRCStack.rc().getBounds();
+ fNeedsTiling = clipR.right() > kMaxDim || clipR.bottom() > kMaxDim;
+ if (fNeedsTiling) {
+ if (bounds) {
+ // Make sure we round first, and then intersect. We can't rely on promoting the
+ // clipR to floats (and then intersecting with devBounds) since promoting
+ // int --> float can make the float larger than the int.
+ // rounding(out) first runs the risk of clamping if the float is larger an intmax
+ // but our roundOut() is saturating, which is fine for this use case
+ //
+ // e.g. the older version of this code did this:
+ // devBounds = mapRect(bounds);
+ // if (devBounds.intersect(SkRect::Make(clipR))) {
+ // fSrcBounds = devBounds.roundOut();
+ // The problem being that the promotion of clipR to SkRect was unreliable
+ //
+ fSrcBounds = dev->localToDevice().mapRect(*bounds).roundOut();
+ if (fSrcBounds.intersect(clipR)) {
+ // Check again, now that we have computed srcbounds.
+ fNeedsTiling = fSrcBounds.right() > kMaxDim || fSrcBounds.bottom() > kMaxDim;
+ } else {
+ fNeedsTiling = false;
+ fDone = true;
+ }
+ } else {
+ fSrcBounds = clipR;
+ }
+ }
+
+ if (fNeedsTiling) {
+ // fDraw.fDst and fMatrixProvider are reset each time in setupTileDraw()
+ fDraw.fRC = &fTileRC;
+ // we'll step/increase it before using it
+ fOrigin.set(fSrcBounds.fLeft - kMaxDim, fSrcBounds.fTop);
+ } else {
+ // don't reference fSrcBounds, as it may not have been set
+ fDraw.fDst = fRootPixmap;
+ fDraw.fMatrixProvider = dev;
+ fDraw.fRC = &dev->fRCStack.rc();
+ fOrigin.set(0, 0);
+ }
+
+ fDraw.fProps = &fDevice->surfaceProps();
+ }
+
+ bool needsTiling() const { return fNeedsTiling; }
+
+ const SkDraw* next() {
+ if (fDone) {
+ return nullptr;
+ }
+ if (fNeedsTiling) {
+ do {
+ this->stepAndSetupTileDraw(); // might set the clip to empty and fDone to true
+ } while (!fDone && fTileRC.isEmpty());
+ // if we exit the loop and we're still empty, we're (past) done
+ if (fTileRC.isEmpty()) {
+ SkASSERT(fDone);
+ return nullptr;
+ }
+ SkASSERT(!fTileRC.isEmpty());
+ } else {
+ fDone = true; // only draw untiled once
+ }
+ return &fDraw;
+ }
+
+private:
+ void stepAndSetupTileDraw() {
+ SkASSERT(!fDone);
+ SkASSERT(fNeedsTiling);
+
+ // We do fRootPixmap.width() - kMaxDim instead of fOrigin.fX + kMaxDim to avoid overflow.
+ if (fOrigin.fX >= fSrcBounds.fRight - kMaxDim) { // too far
+ fOrigin.fX = fSrcBounds.fLeft;
+ fOrigin.fY += kMaxDim;
+ } else {
+ fOrigin.fX += kMaxDim;
+ }
+ // fDone = next origin will be invalid.
+ fDone = fOrigin.fX >= fSrcBounds.fRight - kMaxDim &&
+ fOrigin.fY >= fSrcBounds.fBottom - kMaxDim;
+
+ SkIRect bounds = SkIRect::MakeXYWH(fOrigin.x(), fOrigin.y(), kMaxDim, kMaxDim);
+ SkASSERT(!bounds.isEmpty());
+ bool success = fRootPixmap.extractSubset(&fDraw.fDst, bounds);
+ SkASSERT_RELEASE(success);
+ // now don't use bounds, since fDst has the clipped dimensions.
+
+ fDraw.fMatrixProvider = fTileMatrixProvider.init(fDevice->asMatrixProvider(),
+ SkIntToScalar(-fOrigin.x()),
+ SkIntToScalar(-fOrigin.y()));
+ fDevice->fRCStack.rc().translate(-fOrigin.x(), -fOrigin.y(), &fTileRC);
+ fTileRC.op(SkIRect::MakeWH(fDraw.fDst.width(), fDraw.fDst.height()),
+ SkClipOp::kIntersect);
+ }
+};
+
+// Passing a bounds allows the tiler to only visit the dst-tiles that might intersect the
+// drawing. If null is passed, the tiler has to visit everywhere. The bounds is expected to be
+// in local coordinates, as the tiler itself will transform that into device coordinates.
+//
+#define LOOP_TILER(code, boundsPtr) \
+ SkDrawTiler priv_tiler(this, boundsPtr); \
+ while (const SkDraw* priv_draw = priv_tiler.next()) { \
+ priv_draw->code; \
+ }
+
+// Helper to create an SkDraw from a device
+class SkBitmapDevice::BDDraw : public SkDraw {
+public:
+ BDDraw(SkBitmapDevice* dev) {
+ // we need fDst to be set, and if we're actually drawing, to dirty the genID
+ if (!dev->accessPixels(&fDst)) {
+ // NoDrawDevice uses us (why?) so we have to catch this case w/ no pixels
+ fDst.reset(dev->imageInfo(), nullptr, 0);
+ }
+ fMatrixProvider = dev;
+ fRC = &dev->fRCStack.rc();
+ }
+};
+
+static bool valid_for_bitmap_device(const SkImageInfo& info,
+ SkAlphaType* newAlphaType) {
+ if (info.width() < 0 || info.height() < 0 || kUnknown_SkColorType == info.colorType()) {
+ return false;
+ }
+
+ if (newAlphaType) {
+ *newAlphaType = SkColorTypeIsAlwaysOpaque(info.colorType()) ? kOpaque_SkAlphaType
+ : info.alphaType();
+ }
+
+ return true;
+}
+
+SkBitmapDevice::SkBitmapDevice(const SkBitmap& bitmap)
+ : INHERITED(bitmap.info(), SkSurfaceProps())
+ , fBitmap(bitmap)
+ , fRCStack(bitmap.width(), bitmap.height())
+ , fGlyphPainter(this->surfaceProps(), bitmap.colorType(), bitmap.colorSpace()) {
+ SkASSERT(valid_for_bitmap_device(bitmap.info(), nullptr));
+}
+
+SkBitmapDevice* SkBitmapDevice::Create(const SkImageInfo& info) {
+ return Create(info, SkSurfaceProps());
+}
+
+SkBitmapDevice::SkBitmapDevice(const SkBitmap& bitmap, const SkSurfaceProps& surfaceProps,
+ SkRasterHandleAllocator::Handle hndl)
+ : INHERITED(bitmap.info(), surfaceProps)
+ , fBitmap(bitmap)
+ , fRasterHandle(hndl)
+ , fRCStack(bitmap.width(), bitmap.height())
+ , fGlyphPainter(this->surfaceProps(), bitmap.colorType(), bitmap.colorSpace()) {
+ SkASSERT(valid_for_bitmap_device(bitmap.info(), nullptr));
+}
+
+SkBitmapDevice* SkBitmapDevice::Create(const SkImageInfo& origInfo,
+ const SkSurfaceProps& surfaceProps,
+ SkRasterHandleAllocator* allocator) {
+ SkAlphaType newAT = origInfo.alphaType();
+ if (!valid_for_bitmap_device(origInfo, &newAT)) {
+ return nullptr;
+ }
+
+ SkRasterHandleAllocator::Handle hndl = nullptr;
+ const SkImageInfo info = origInfo.makeAlphaType(newAT);
+ SkBitmap bitmap;
+
+ if (kUnknown_SkColorType == info.colorType()) {
+ if (!bitmap.setInfo(info)) {
+ return nullptr;
+ }
+ } else if (allocator) {
+ hndl = allocator->allocBitmap(info, &bitmap);
+ if (!hndl) {
+ return nullptr;
+ }
+ } else if (info.isOpaque()) {
+ // If this bitmap is opaque, we don't have any sensible default color,
+ // so we just return uninitialized pixels.
+ if (!bitmap.tryAllocPixels(info)) {
+ return nullptr;
+ }
+ } else {
+ // This bitmap has transparency, so we'll zero the pixels (to transparent).
+ // We use the flag as a faster alloc-then-eraseColor(SK_ColorTRANSPARENT).
+ if (!bitmap.tryAllocPixelsFlags(info, SkBitmap::kZeroPixels_AllocFlag)) {
+ return nullptr;
+ }
+ }
+
+ return new SkBitmapDevice(bitmap, surfaceProps, hndl);
+}
+
+void SkBitmapDevice::replaceBitmapBackendForRasterSurface(const SkBitmap& bm) {
+ SkASSERT(bm.width() == fBitmap.width());
+ SkASSERT(bm.height() == fBitmap.height());
+ fBitmap = bm; // intent is to use bm's pixelRef (and rowbytes/config)
+ this->privateResize(fBitmap.info().width(), fBitmap.info().height());
+}
+
+SkBaseDevice* SkBitmapDevice::onCreateDevice(const CreateInfo& cinfo, const SkPaint* layerPaint) {
+ const SkSurfaceProps surfaceProps(this->surfaceProps().flags(), cinfo.fPixelGeometry);
+
+ // Need to force L32 for now if we have an image filter.
+ // If filters ever support other colortypes, e.g. F16, we can modify this check.
+ SkImageInfo info = cinfo.fInfo;
+ if (layerPaint && layerPaint->getImageFilter()) {
+ // TODO: can we query the imagefilter, to see if it can handle floats (so we don't always
+ // use N32 when the layer itself was float)?
+ info = info.makeColorType(kN32_SkColorType);
+ }
+
+ return SkBitmapDevice::Create(info, surfaceProps, cinfo.fAllocator);
+}
+
+bool SkBitmapDevice::onAccessPixels(SkPixmap* pmap) {
+ if (this->onPeekPixels(pmap)) {
+ fBitmap.notifyPixelsChanged();
+ return true;
+ }
+ return false;
+}
+
+bool SkBitmapDevice::onPeekPixels(SkPixmap* pmap) {
+ const SkImageInfo info = fBitmap.info();
+ if (fBitmap.getPixels() && (kUnknown_SkColorType != info.colorType())) {
+ pmap->reset(fBitmap.info(), fBitmap.getPixels(), fBitmap.rowBytes());
+ return true;
+ }
+ return false;
+}
+
+bool SkBitmapDevice::onWritePixels(const SkPixmap& pm, int x, int y) {
+ // since we don't stop creating un-pixeled devices yet, check for no pixels here
+ if (nullptr == fBitmap.getPixels()) {
+ return false;
+ }
+
+ if (fBitmap.writePixels(pm, x, y)) {
+ fBitmap.notifyPixelsChanged();
+ return true;
+ }
+ return false;
+}
+
+bool SkBitmapDevice::onReadPixels(const SkPixmap& pm, int x, int y) {
+ return fBitmap.readPixels(pm, x, y);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkBitmapDevice::drawPaint(const SkPaint& paint) {
+ BDDraw(this).drawPaint(paint);
+}
+
+void SkBitmapDevice::drawPoints(SkCanvas::PointMode mode, size_t count,
+ const SkPoint pts[], const SkPaint& paint) {
+ LOOP_TILER( drawPoints(mode, count, pts, paint, nullptr), nullptr)
+}
+
+void SkBitmapDevice::drawRect(const SkRect& r, const SkPaint& paint) {
+ LOOP_TILER( drawRect(r, paint), Bounder(r, paint))
+}
+
+void SkBitmapDevice::drawOval(const SkRect& oval, const SkPaint& paint) {
+ // call the VIRTUAL version, so any subclasses who do handle drawPath aren't
+ // required to override drawOval.
+ this->drawPath(SkPath::Oval(oval), paint, true);
+}
+
+void SkBitmapDevice::drawRRect(const SkRRect& rrect, const SkPaint& paint) {
+#ifdef SK_IGNORE_BLURRED_RRECT_OPT
+ // call the VIRTUAL version, so any subclasses who do handle drawPath aren't
+ // required to override drawRRect.
+ this->drawPath(SkPath::RRect(rrect), paint, true);
+#else
+ LOOP_TILER( drawRRect(rrect, paint), Bounder(rrect.getBounds(), paint))
+#endif
+}
+
+void SkBitmapDevice::drawPath(const SkPath& path,
+ const SkPaint& paint,
+ bool pathIsMutable) {
+ const SkRect* bounds = nullptr;
+ if (SkDrawTiler::NeedsTiling(this) && !path.isInverseFillType()) {
+ bounds = &path.getBounds();
+ }
+ SkDrawTiler tiler(this, bounds ? Bounder(*bounds, paint).bounds() : nullptr);
+ if (tiler.needsTiling()) {
+ pathIsMutable = false;
+ }
+ while (const SkDraw* draw = tiler.next()) {
+ draw->drawPath(path, paint, nullptr, pathIsMutable);
+ }
+}
+
+void SkBitmapDevice::drawBitmap(const SkBitmap& bitmap, const SkMatrix& matrix,
+ const SkRect* dstOrNull, const SkSamplingOptions& sampling,
+ const SkPaint& paint) {
+ const SkRect* bounds = dstOrNull;
+ SkRect storage;
+ if (!bounds && SkDrawTiler::NeedsTiling(this)) {
+ matrix.mapRect(&storage, SkRect::MakeIWH(bitmap.width(), bitmap.height()));
+ Bounder b(storage, paint);
+ if (b.hasBounds()) {
+ storage = *b.bounds();
+ bounds = &storage;
+ }
+ }
+ LOOP_TILER(drawBitmap(bitmap, matrix, dstOrNull, sampling, paint), bounds)
+}
+
+static inline bool CanApplyDstMatrixAsCTM(const SkMatrix& m, const SkPaint& paint) {
+ if (!paint.getMaskFilter()) {
+ return true;
+ }
+
+ // Some mask filters parameters (sigma) depend on the CTM/scale.
+ return m.getType() <= SkMatrix::kTranslate_Mask;
+}
+
+void SkBitmapDevice::drawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
+ const SkSamplingOptions& sampling, const SkPaint& paint,
+ SkCanvas::SrcRectConstraint constraint) {
+ SkASSERT(dst.isFinite());
+ SkASSERT(dst.isSorted());
+
+ SkBitmap bitmap;
+ // TODO: Elevate direct context requirement to public API and remove cheat.
+ auto dContext = as_IB(image)->directContext();
+ if (!as_IB(image)->getROPixels(dContext, &bitmap)) {
+ return;
+ }
+
+ SkRect bitmapBounds, tmpSrc, tmpDst;
+ SkBitmap tmpBitmap;
+
+ bitmapBounds.setIWH(bitmap.width(), bitmap.height());
+
+ // Compute matrix from the two rectangles
+ if (src) {
+ tmpSrc = *src;
+ } else {
+ tmpSrc = bitmapBounds;
+ }
+ SkMatrix matrix = SkMatrix::RectToRect(tmpSrc, dst);
+
+ const SkRect* dstPtr = &dst;
+ const SkBitmap* bitmapPtr = &bitmap;
+
+ // clip the tmpSrc to the bounds of the bitmap, and recompute dstRect if
+ // needed (if the src was clipped). No check needed if src==null.
+ if (src) {
+ if (!bitmapBounds.contains(*src)) {
+ if (!tmpSrc.intersect(bitmapBounds)) {
+ return; // nothing to draw
+ }
+ // recompute dst, based on the smaller tmpSrc
+ matrix.mapRect(&tmpDst, tmpSrc);
+ if (!tmpDst.isFinite()) {
+ return;
+ }
+ dstPtr = &tmpDst;
+ }
+ }
+
+ if (src && !src->contains(bitmapBounds) &&
+ SkCanvas::kFast_SrcRectConstraint == constraint &&
+ sampling != SkSamplingOptions()) {
+ // src is smaller than the bounds of the bitmap, and we are filtering, so we don't know
+ // how much more of the bitmap we need, so we can't use extractSubset or drawBitmap,
+ // but we must use a shader w/ dst bounds (which can access all of the bitmap needed).
+ goto USE_SHADER;
+ }
+
+ if (src) {
+ // since we may need to clamp to the borders of the src rect within
+ // the bitmap, we extract a subset.
+ const SkIRect srcIR = tmpSrc.roundOut();
+ if (!bitmap.extractSubset(&tmpBitmap, srcIR)) {
+ return;
+ }
+ bitmapPtr = &tmpBitmap;
+
+ // Since we did an extract, we need to adjust the matrix accordingly
+ SkScalar dx = 0, dy = 0;
+ if (srcIR.fLeft > 0) {
+ dx = SkIntToScalar(srcIR.fLeft);
+ }
+ if (srcIR.fTop > 0) {
+ dy = SkIntToScalar(srcIR.fTop);
+ }
+ if (dx || dy) {
+ matrix.preTranslate(dx, dy);
+ }
+
+#ifdef SK_DRAWBITMAPRECT_FAST_OFFSET
+ SkRect extractedBitmapBounds = SkRect::MakeXYWH(dx, dy,
+ SkIntToScalar(bitmapPtr->width()),
+ SkIntToScalar(bitmapPtr->height()));
+#else
+ SkRect extractedBitmapBounds;
+ extractedBitmapBounds.setIWH(bitmapPtr->width(), bitmapPtr->height());
+#endif
+ if (extractedBitmapBounds == tmpSrc) {
+ // no fractional part in src, we can just call drawBitmap
+ goto USE_DRAWBITMAP;
+ }
+ } else {
+ USE_DRAWBITMAP:
+ // We can go faster by just calling drawBitmap, which will concat the
+ // matrix with the CTM, and try to call drawSprite if it can. If not,
+ // it will make a shader and call drawRect, as we do below.
+ if (CanApplyDstMatrixAsCTM(matrix, paint)) {
+ this->drawBitmap(*bitmapPtr, matrix, dstPtr, sampling, paint);
+ return;
+ }
+ }
+
+ USE_SHADER:
+
+ // construct a shader, so we can call drawRect with the dst
+ auto s = SkMakeBitmapShaderForPaint(paint, *bitmapPtr, SkTileMode::kClamp, SkTileMode::kClamp,
+ sampling, &matrix, kNever_SkCopyPixelsMode);
+ if (!s) {
+ return;
+ }
+
+ SkPaint paintWithShader(paint);
+ paintWithShader.setStyle(SkPaint::kFill_Style);
+ paintWithShader.setShader(std::move(s));
+
+ // Call ourself, in case the subclass wanted to share this setup code
+ // but handle the drawRect code themselves.
+ this->drawRect(*dstPtr, paintWithShader);
+}
+
+void SkBitmapDevice::onDrawGlyphRunList(SkCanvas* canvas,
+ const sktext::GlyphRunList& glyphRunList,
+ const SkPaint& initialPaint,
+ const SkPaint& drawingPaint) {
+ SkASSERT(!glyphRunList.hasRSXForm());
+ LOOP_TILER( drawGlyphRunList(canvas, &fGlyphPainter, glyphRunList, drawingPaint), nullptr )
+}
+
+void SkBitmapDevice::drawVertices(const SkVertices* vertices,
+ sk_sp<SkBlender> blender,
+ const SkPaint& paint,
+ bool skipColorXform) {
+#ifdef SK_LEGACY_IGNORE_DRAW_VERTICES_BLEND_WITH_NO_SHADER
+ if (!paint.getShader()) {
+ blender = SkBlender::Mode(SkBlendMode::kDst);
+ }
+#endif
+ BDDraw(this).drawVertices(vertices, std::move(blender), paint, skipColorXform);
+}
+
+#ifdef SK_ENABLE_SKSL
+void SkBitmapDevice::drawMesh(const SkMesh&, sk_sp<SkBlender>, const SkPaint&) {
+ // TODO: Implement
+}
+#endif
+
+void SkBitmapDevice::drawAtlas(const SkRSXform xform[],
+ const SkRect tex[],
+ const SkColor colors[],
+ int count,
+ sk_sp<SkBlender> blender,
+ const SkPaint& paint) {
+ // set this to true for performance comparisons with the old drawVertices way
+ if ((false)) {
+ this->INHERITED::drawAtlas(xform, tex, colors, count, std::move(blender), paint);
+ return;
+ }
+ BDDraw(this).drawAtlas(xform, tex, colors, count, std::move(blender), paint);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkBitmapDevice::drawDevice(SkBaseDevice* device, const SkSamplingOptions& sampling,
+ const SkPaint& paint) {
+ SkASSERT(!paint.getImageFilter());
+ SkASSERT(!paint.getMaskFilter());
+
+ this->INHERITED::drawDevice(device, sampling, paint);
+}
+
+void SkBitmapDevice::drawSpecial(SkSpecialImage* src,
+ const SkMatrix& localToDevice,
+ const SkSamplingOptions& sampling,
+ const SkPaint& paint) {
+ SkASSERT(!paint.getImageFilter());
+ SkASSERT(!paint.getMaskFilter());
+ SkASSERT(!src->isTextureBacked());
+
+ SkBitmap resultBM;
+ if (src->getROPixels(&resultBM)) {
+ SkDraw draw;
+ SkMatrixProvider matrixProvider(localToDevice);
+ if (!this->accessPixels(&draw.fDst)) {
+ return; // no pixels to draw to so skip it
+ }
+ draw.fMatrixProvider = &matrixProvider;
+ draw.fRC = &fRCStack.rc();
+ draw.drawBitmap(resultBM, SkMatrix::I(), nullptr, sampling, paint);
+ }
+}
+sk_sp<SkSpecialImage> SkBitmapDevice::makeSpecial(const SkBitmap& bitmap) {
+ return SkSpecialImage::MakeFromRaster(bitmap.bounds(), bitmap, this->surfaceProps());
+}
+
+sk_sp<SkSpecialImage> SkBitmapDevice::makeSpecial(const SkImage* image) {
+ return SkSpecialImage::MakeFromImage(nullptr, SkIRect::MakeWH(image->width(), image->height()),
+ image->makeNonTextureImage(), this->surfaceProps());
+}
+
+sk_sp<SkSpecialImage> SkBitmapDevice::snapSpecial(const SkIRect& bounds, bool forceCopy) {
+ if (forceCopy) {
+ return SkSpecialImage::CopyFromRaster(bounds, fBitmap, this->surfaceProps());
+ } else {
+ return SkSpecialImage::MakeFromRaster(bounds, fBitmap, this->surfaceProps());
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkSurface> SkBitmapDevice::makeSurface(const SkImageInfo& info, const SkSurfaceProps& props) {
+ return SkSurface::MakeRaster(info, &props);
+}
+
+SkImageFilterCache* SkBitmapDevice::getImageFilterCache() {
+ SkImageFilterCache* cache = SkImageFilterCache::Get();
+ cache->ref();
+ return cache;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkBitmapDevice::onSave() {
+ fRCStack.save();
+}
+
+void SkBitmapDevice::onRestore() {
+ fRCStack.restore();
+}
+
+void SkBitmapDevice::onClipRect(const SkRect& rect, SkClipOp op, bool aa) {
+ fRCStack.clipRect(this->localToDevice(), rect, op, aa);
+}
+
+void SkBitmapDevice::onClipRRect(const SkRRect& rrect, SkClipOp op, bool aa) {
+ fRCStack.clipRRect(this->localToDevice(), rrect, op, aa);
+}
+
+void SkBitmapDevice::onClipPath(const SkPath& path, SkClipOp op, bool aa) {
+ fRCStack.clipPath(this->localToDevice(), path, op, aa);
+}
+
+void SkBitmapDevice::onClipShader(sk_sp<SkShader> sh) {
+ fRCStack.clipShader(std::move(sh));
+}
+
+void SkBitmapDevice::onClipRegion(const SkRegion& rgn, SkClipOp op) {
+ SkIPoint origin = this->getOrigin();
+ SkRegion tmp;
+ const SkRegion* ptr = &rgn;
+ if (origin.fX | origin.fY) {
+ // translate from "global/canvas" coordinates to relative to this device
+ rgn.translate(-origin.fX, -origin.fY, &tmp);
+ ptr = &tmp;
+ }
+ fRCStack.clipRegion(*ptr, op);
+}
+
+void SkBitmapDevice::onReplaceClip(const SkIRect& rect) {
+ // Transform from "global/canvas" coordinates to relative to this device
+ SkRect deviceRect = SkMatrixPriv::MapRect(this->globalToDevice(), SkRect::Make(rect));
+ fRCStack.replaceClip(deviceRect.round());
+}
+
+bool SkBitmapDevice::onClipIsWideOpen() const {
+ const SkRasterClip& rc = fRCStack.rc();
+ // If we're AA, we can't be wide-open (we would represent that as BW)
+ return rc.isBW() && rc.bwRgn().isRect() &&
+ rc.bwRgn().getBounds() == SkIRect{0, 0, this->width(), this->height()};
+}
+
+bool SkBitmapDevice::onClipIsAA() const {
+ const SkRasterClip& rc = fRCStack.rc();
+ return !rc.isEmpty() && rc.isAA();
+}
+
+void SkBitmapDevice::onAsRgnClip(SkRegion* rgn) const {
+ const SkRasterClip& rc = fRCStack.rc();
+ if (rc.isAA()) {
+ rgn->setRect(rc.getBounds());
+ } else {
+ *rgn = rc.bwRgn();
+ }
+}
+
+void SkBitmapDevice::validateDevBounds(const SkIRect& drawClipBounds) {
+#ifdef SK_DEBUG
+ const SkIRect& stackBounds = fRCStack.rc().getBounds();
+ SkASSERT(drawClipBounds == stackBounds);
+#endif
+}
+
+SkBaseDevice::ClipType SkBitmapDevice::onGetClipType() const {
+ const SkRasterClip& rc = fRCStack.rc();
+ if (rc.isEmpty()) {
+ return ClipType::kEmpty;
+ } else if (rc.isRect() && !SkToBool(rc.clipShader())) {
+ return ClipType::kRect;
+ } else {
+ return ClipType::kComplex;
+ }
+}
+
+SkIRect SkBitmapDevice::onDevClipBounds() const {
+ return fRCStack.rc().getBounds();
+}
diff --git a/gfx/skia/skia/src/core/SkBitmapDevice.h b/gfx/skia/skia/src/core/SkBitmapDevice.h
new file mode 100644
index 0000000000..d5985b8577
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapDevice.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapDevice_DEFINED
+#define SkBitmapDevice_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSize.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkGlyphRunPainter.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkRasterClipStack.h"
+
+class SkImageFilterCache;
+class SkMatrix;
+class SkPaint;
+class SkPath;
+class SkPixmap;
+class SkRasterHandleAllocator;
+class SkRRect;
+class SkSurface;
+class SkSurfaceProps;
+struct SkPoint;
+#ifdef SK_ENABLE_SKSL
+class SkMesh;
+#endif
+///////////////////////////////////////////////////////////////////////////////
+class SkBitmapDevice : public SkBaseDevice {
+public:
+ /**
+ * Construct a new device with the specified bitmap as its backend. It is
+ * valid for the bitmap to have no pixels associated with it. In that case,
+ * any drawing to this device will have no effect.
+ */
+ SkBitmapDevice(const SkBitmap& bitmap);
+
+ /**
+ * Create a new device along with its requisite pixel memory using
+ * default SkSurfaceProps (i.e., kLegacyFontHost_InitType-style).
+ * Note: this entry point is slated for removal - no one should call it.
+ */
+ static SkBitmapDevice* Create(const SkImageInfo& info);
+
+ /**
+ * Construct a new device with the specified bitmap as its backend. It is
+ * valid for the bitmap to have no pixels associated with it. In that case,
+ * any drawing to this device will have no effect.
+ */
+ SkBitmapDevice(const SkBitmap& bitmap, const SkSurfaceProps& surfaceProps,
+ void* externalHandle = nullptr);
+
+ static SkBitmapDevice* Create(const SkImageInfo&, const SkSurfaceProps&,
+ SkRasterHandleAllocator* = nullptr);
+
+protected:
+ void* getRasterHandle() const override { return fRasterHandle; }
+
+ /** These are called inside the per-device-layer loop for each draw call.
+ When these are called, we have already applied any saveLayer operations,
+ and are handling any looping from the paint.
+ */
+ void drawPaint(const SkPaint& paint) override;
+ void drawPoints(SkCanvas::PointMode mode, size_t count,
+ const SkPoint[], const SkPaint& paint) override;
+ void drawRect(const SkRect& r, const SkPaint& paint) override;
+ void drawOval(const SkRect& oval, const SkPaint& paint) override;
+ void drawRRect(const SkRRect& rr, const SkPaint& paint) override;
+
+ /**
+ * If pathIsMutable, then the implementation is allowed to cast path to a
+ * non-const pointer and modify it in place (as an optimization). Canvas
+ * may do this to implement helpers such as drawOval, by placing a temp
+ * path on the stack to hold the representation of the oval.
+ */
+ void drawPath(const SkPath&, const SkPaint&, bool pathIsMutable) override;
+
+ void drawImageRect(const SkImage*, const SkRect* src, const SkRect& dst,
+ const SkSamplingOptions&, const SkPaint&,
+ SkCanvas::SrcRectConstraint) override;
+
+ void drawVertices(const SkVertices*, sk_sp<SkBlender>, const SkPaint&, bool) override;
+#ifdef SK_ENABLE_SKSL
+ void drawMesh(const SkMesh&, sk_sp<SkBlender>, const SkPaint&) override;
+#endif
+
+ void drawAtlas(const SkRSXform[], const SkRect[], const SkColor[], int count, sk_sp<SkBlender>,
+ const SkPaint&) override;
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ void drawDevice(SkBaseDevice*, const SkSamplingOptions&, const SkPaint&) override;
+ void drawSpecial(SkSpecialImage*, const SkMatrix&, const SkSamplingOptions&,
+ const SkPaint&) override;
+
+ sk_sp<SkSpecialImage> makeSpecial(const SkBitmap&) override;
+ sk_sp<SkSpecialImage> makeSpecial(const SkImage*) override;
+ sk_sp<SkSpecialImage> snapSpecial(const SkIRect&, bool forceCopy = false) override;
+ void setImmutable() override { fBitmap.setImmutable(); }
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ void onDrawGlyphRunList(SkCanvas*,
+ const sktext::GlyphRunList&,
+ const SkPaint& initialPaint,
+ const SkPaint& drawingPaint) override;
+ bool onReadPixels(const SkPixmap&, int x, int y) override;
+ bool onWritePixels(const SkPixmap&, int, int) override;
+ bool onPeekPixels(SkPixmap*) override;
+ bool onAccessPixels(SkPixmap*) override;
+
+ void onSave() override;
+ void onRestore() override;
+ void onClipRect(const SkRect& rect, SkClipOp, bool aa) override;
+ void onClipRRect(const SkRRect& rrect, SkClipOp, bool aa) override;
+ void onClipPath(const SkPath& path, SkClipOp, bool aa) override;
+ void onClipShader(sk_sp<SkShader>) override;
+ void onClipRegion(const SkRegion& deviceRgn, SkClipOp) override;
+ void onReplaceClip(const SkIRect& rect) override;
+ bool onClipIsAA() const override;
+ bool onClipIsWideOpen() const override;
+ void onAsRgnClip(SkRegion*) const override;
+ void validateDevBounds(const SkIRect& r) override;
+ ClipType onGetClipType() const override;
+ SkIRect onDevClipBounds() const override;
+
+ void drawBitmap(const SkBitmap&, const SkMatrix&, const SkRect* dstOrNull,
+ const SkSamplingOptions&, const SkPaint&);
+
+private:
+ friend class SkCanvas;
+ friend class SkDraw;
+ friend class SkDrawBase;
+ friend class SkDrawTiler;
+ friend class SkSurface_Raster;
+
+ class BDDraw;
+
+ // used to change the backend's pixels (and possibly config/rowbytes)
+ // but cannot change the width/height, so there should be no change to
+ // any clip information.
+ void replaceBitmapBackendForRasterSurface(const SkBitmap&) override;
+
+ SkBaseDevice* onCreateDevice(const CreateInfo&, const SkPaint*) override;
+
+ sk_sp<SkSurface> makeSurface(const SkImageInfo&, const SkSurfaceProps&) override;
+
+ SkImageFilterCache* getImageFilterCache() override;
+
+ SkBitmap fBitmap;
+ void* fRasterHandle = nullptr;
+ SkRasterClipStack fRCStack;
+ SkGlyphRunListPainterCPU fGlyphPainter;
+
+
+ using INHERITED = SkBaseDevice;
+};
+
+#endif // SkBitmapDevice_DEFINED
diff --git a/gfx/skia/skia/src/core/SkBitmapProcState.cpp b/gfx/skia/skia/src/core/SkBitmapProcState.cpp
new file mode 100644
index 0000000000..7f7dfd6db4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapProcState.cpp
@@ -0,0 +1,694 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkImageEncoder.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkShader.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkMacros.h"
+#include "include/private/base/SkTPin.h"
+#include "src/core/SkBitmapCache.h"
+#include "src/core/SkBitmapProcState.h"
+#include "src/core/SkMipmap.h"
+#include "src/core/SkMipmapAccessor.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkResourceCache.h"
+
+// One-stop-shop shader for,
+// - nearest-neighbor sampling (_nofilter_),
+// - clamp tiling in X and Y both (Clamp_),
+// - with at most a scale and translate matrix (_DX_),
+// - and no extra alpha applied (_opaque_),
+// - sampling from 8888 (_S32_) and drawing to 8888 (_S32_).
+static void Clamp_S32_opaque_D32_nofilter_DX_shaderproc(const void* sIn, int x, int y,
+ SkPMColor* dst, int count) {
+ const SkBitmapProcState& s = *static_cast<const SkBitmapProcState*>(sIn);
+ SkASSERT(s.fInvMatrix.isScaleTranslate());
+ SkASSERT(s.fAlphaScale == 256);
+
+ const unsigned maxX = s.fPixmap.width() - 1;
+ SkFractionalInt fx;
+ int dstY;
+ {
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ const unsigned maxY = s.fPixmap.height() - 1;
+ dstY = SkTPin<int>(mapper.intY(), 0, maxY);
+ fx = mapper.fractionalIntX();
+ }
+
+ const SkPMColor* src = s.fPixmap.addr32(0, dstY);
+ const SkFractionalInt dx = s.fInvSxFractionalInt;
+
+ // Check if we're safely inside [0...maxX] so no need to clamp each computed index.
+ //
+ if ((uint64_t)SkFractionalIntToInt(fx) <= maxX &&
+ (uint64_t)SkFractionalIntToInt(fx + dx * (count - 1)) <= maxX)
+ {
+ int count4 = count >> 2;
+ for (int i = 0; i < count4; ++i) {
+ SkPMColor src0 = src[SkFractionalIntToInt(fx)]; fx += dx;
+ SkPMColor src1 = src[SkFractionalIntToInt(fx)]; fx += dx;
+ SkPMColor src2 = src[SkFractionalIntToInt(fx)]; fx += dx;
+ SkPMColor src3 = src[SkFractionalIntToInt(fx)]; fx += dx;
+ dst[0] = src0;
+ dst[1] = src1;
+ dst[2] = src2;
+ dst[3] = src3;
+ dst += 4;
+ }
+ for (int i = (count4 << 2); i < count; ++i) {
+ unsigned index = SkFractionalIntToInt(fx);
+ SkASSERT(index <= maxX);
+ *dst++ = src[index];
+ fx += dx;
+ }
+ } else {
+ for (int i = 0; i < count; ++i) {
+ dst[i] = src[SkTPin<int>(SkFractionalIntToInt(fx), 0, maxX)];
+ fx += dx;
+ }
+ }
+}
+
+static void S32_alpha_D32_nofilter_DX(const SkBitmapProcState& s,
+ const uint32_t* xy, int count, SkPMColor* colors) {
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(s.fInvMatrix.isScaleTranslate());
+ SkASSERT(!s.fBilerp);
+ SkASSERT(4 == s.fPixmap.info().bytesPerPixel());
+ SkASSERT(s.fAlphaScale <= 256);
+
+ // xy is a 32-bit y-coordinate, followed by 16-bit x-coordinates.
+ unsigned y = *xy++;
+ SkASSERT(y < (unsigned)s.fPixmap.height());
+
+ auto row = (const SkPMColor*)( (const char*)s.fPixmap.addr() + y * s.fPixmap.rowBytes() );
+
+ if (1 == s.fPixmap.width()) {
+ SkOpts::memset32(colors, SkAlphaMulQ(row[0], s.fAlphaScale), count);
+ return;
+ }
+
+ // Step 4 xs == 2 uint32_t at a time.
+ while (count >= 4) {
+ uint32_t x01 = *xy++,
+ x23 = *xy++;
+
+ SkPMColor p0 = row[UNPACK_PRIMARY_SHORT (x01)];
+ SkPMColor p1 = row[UNPACK_SECONDARY_SHORT(x01)];
+ SkPMColor p2 = row[UNPACK_PRIMARY_SHORT (x23)];
+ SkPMColor p3 = row[UNPACK_SECONDARY_SHORT(x23)];
+
+ *colors++ = SkAlphaMulQ(p0, s.fAlphaScale);
+ *colors++ = SkAlphaMulQ(p1, s.fAlphaScale);
+ *colors++ = SkAlphaMulQ(p2, s.fAlphaScale);
+ *colors++ = SkAlphaMulQ(p3, s.fAlphaScale);
+
+ count -= 4;
+ }
+
+ // Step 1 x == 1 uint16_t at a time.
+ auto x = (const uint16_t*)xy;
+ while (count --> 0) {
+ *colors++ = SkAlphaMulQ(row[*x++], s.fAlphaScale);
+ }
+}
+
+static void S32_alpha_D32_nofilter_DXDY(const SkBitmapProcState& s,
+ const uint32_t* xy, int count, SkPMColor* colors) {
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(!s.fBilerp);
+ SkASSERT(4 == s.fPixmap.info().bytesPerPixel());
+ SkASSERT(s.fAlphaScale <= 256);
+
+ auto src = (const char*)s.fPixmap.addr();
+ size_t rb = s.fPixmap.rowBytes();
+
+ while (count --> 0) {
+ uint32_t XY = *xy++,
+ x = XY & 0xffff,
+ y = XY >> 16;
+ SkASSERT(x < (unsigned)s.fPixmap.width ());
+ SkASSERT(y < (unsigned)s.fPixmap.height());
+ *colors++ = ((const SkPMColor*)(src + y*rb))[x];
+ }
+}
+
+SkBitmapProcState::SkBitmapProcState(const SkImage_Base* image, SkTileMode tmx, SkTileMode tmy)
+ : fImage(image)
+ , fTileModeX(tmx)
+ , fTileModeY(tmy)
+{}
+
+// true iff the matrix has a scale and no more than an optional translate.
+static bool matrix_only_scale_translate(const SkMatrix& m) {
+ return (m.getType() & ~SkMatrix::kTranslate_Mask) == SkMatrix::kScale_Mask;
+}
+
+/**
+ * For the purposes of drawing bitmaps, if a matrix is "almost" translate
+ * go ahead and treat it as if it were, so that subsequent code can go fast.
+ */
+static bool just_trans_general(const SkMatrix& matrix) {
+ SkASSERT(matrix_only_scale_translate(matrix));
+
+ const SkScalar tol = SK_Scalar1 / 32768;
+
+ return SkScalarNearlyZero(matrix[SkMatrix::kMScaleX] - SK_Scalar1, tol)
+ && SkScalarNearlyZero(matrix[SkMatrix::kMScaleY] - SK_Scalar1, tol);
+}
+
+/**
+ * Determine if the matrix can be treated as integral-only-translate,
+ * for the purpose of filtering.
+ */
+static bool just_trans_integral(const SkMatrix& m) {
+ static constexpr SkScalar tol = SK_Scalar1 / 256;
+
+ return m.getType() <= SkMatrix::kTranslate_Mask
+ && SkScalarNearlyEqual(m.getTranslateX(), SkScalarRoundToScalar(m.getTranslateX()), tol)
+ && SkScalarNearlyEqual(m.getTranslateY(), SkScalarRoundToScalar(m.getTranslateY()), tol);
+}
+
+static bool valid_for_filtering(unsigned dimension) {
+ // for filtering, width and height must fit in 14bits, since we use steal
+ // 2 bits from each to store our 4bit subpixel data
+ return (dimension & ~0x3FFF) == 0;
+}
+
+bool SkBitmapProcState::init(const SkMatrix& inv, SkAlpha paintAlpha,
+ const SkSamplingOptions& sampling) {
+ SkASSERT(!inv.hasPerspective());
+ SkASSERT(SkOpts::S32_alpha_D32_filter_DXDY || inv.isScaleTranslate());
+ SkASSERT(!sampling.isAniso());
+ SkASSERT(!sampling.useCubic);
+ SkASSERT(sampling.mipmap != SkMipmapMode::kLinear);
+
+ fPixmap.reset();
+ fBilerp = false;
+
+ auto* access = SkMipmapAccessor::Make(&fAlloc, (const SkImage*)fImage, inv, sampling.mipmap);
+ if (!access) {
+ return false;
+ }
+ std::tie(fPixmap, fInvMatrix) = access->level();
+ fInvMatrix.preConcat(inv);
+
+ fPaintAlpha = paintAlpha;
+ fBilerp = sampling.filter == SkFilterMode::kLinear;
+ SkASSERT(fPixmap.addr());
+
+ bool integral_translate_only = just_trans_integral(fInvMatrix);
+ if (!integral_translate_only) {
+ // Most of the scanline procs deal with "unit" texture coordinates, as this
+ // makes it easy to perform tiling modes (repeat = (x & 0xFFFF)). To generate
+ // those, we divide the matrix by its dimensions here.
+ //
+ // We don't do this if we're either trivial (can ignore the matrix) or clamping
+ // in both X and Y since clamping to width,height is just as easy as to 0xFFFF.
+
+ if (fTileModeX != SkTileMode::kClamp || fTileModeY != SkTileMode::kClamp) {
+ SkMatrixPriv::PostIDiv(&fInvMatrix, fPixmap.width(), fPixmap.height());
+ }
+
+ // Now that all possible changes to the matrix have taken place, check
+ // to see if we're really close to a no-scale matrix. If so, explicitly
+ // set it to be so. Subsequent code may inspect this matrix to choose
+ // a faster path in this case.
+
+ // This code will only execute if the matrix has some scale component;
+ // if it's already pure translate then we won't do this inversion.
+
+ if (matrix_only_scale_translate(fInvMatrix)) {
+ SkMatrix forward;
+ if (fInvMatrix.invert(&forward) && just_trans_general(forward)) {
+ fInvMatrix.setTranslate(-forward.getTranslateX(), -forward.getTranslateY());
+ }
+ }
+
+ // Recompute the flag after matrix adjustments.
+ integral_translate_only = just_trans_integral(fInvMatrix);
+ }
+
+ if (fBilerp &&
+ (!valid_for_filtering(fPixmap.width() | fPixmap.height()) || integral_translate_only)) {
+ fBilerp = false;
+ }
+
+ return true;
+}
+
+/*
+ * Analyze filter-quality and matrix, and decide how to implement that.
+ *
+ * In general, we cascade down the request level [ High ... None ]
+ * - for a given level, if we can fulfill it, fine, else
+ * - else we downgrade to the next lower level and try again.
+ * We can always fulfill requests for Low and None
+ * - sometimes we will "ignore" Low and give None, but this is likely a legacy perf hack
+ * and may be removed.
+ */
+bool SkBitmapProcState::chooseProcs() {
+ SkASSERT(!fInvMatrix.hasPerspective());
+ SkASSERT(SkOpts::S32_alpha_D32_filter_DXDY || fInvMatrix.isScaleTranslate());
+ SkASSERT(fPixmap.colorType() == kN32_SkColorType);
+ SkASSERT(fPixmap.alphaType() == kPremul_SkAlphaType ||
+ fPixmap.alphaType() == kOpaque_SkAlphaType);
+
+ SkASSERT(fTileModeX != SkTileMode::kDecal);
+
+ fInvProc = SkMatrixPriv::GetMapXYProc(fInvMatrix);
+ fInvSxFractionalInt = SkScalarToFractionalInt(fInvMatrix.getScaleX());
+ fInvKyFractionalInt = SkScalarToFractionalInt(fInvMatrix.getSkewY ());
+
+ fAlphaScale = SkAlpha255To256(fPaintAlpha);
+
+ bool translate_only = (fInvMatrix.getType() & ~SkMatrix::kTranslate_Mask) == 0;
+ fMatrixProc = this->chooseMatrixProc(translate_only);
+ SkASSERT(fMatrixProc);
+
+ if (fInvMatrix.isScaleTranslate()) {
+ fSampleProc32 = fBilerp ? SkOpts::S32_alpha_D32_filter_DX : S32_alpha_D32_nofilter_DX ;
+ } else {
+ fSampleProc32 = fBilerp ? SkOpts::S32_alpha_D32_filter_DXDY : S32_alpha_D32_nofilter_DXDY;
+ }
+ SkASSERT(fSampleProc32);
+
+ fShaderProc32 = this->chooseShaderProc32();
+
+ // our special-case shaderprocs
+ // TODO: move this one into chooseShaderProc32() or pull all that in here.
+ if (nullptr == fShaderProc32
+ && fAlphaScale == 256
+ && !fBilerp
+ && SkTileMode::kClamp == fTileModeX
+ && SkTileMode::kClamp == fTileModeY
+ && fInvMatrix.isScaleTranslate()) {
+ fShaderProc32 = Clamp_S32_opaque_D32_nofilter_DX_shaderproc;
+ }
+
+ return true;
+}
+
+static void Clamp_S32_D32_nofilter_trans_shaderproc(const void* sIn,
+ int x, int y,
+ SkPMColor* colors,
+ int count) {
+ const SkBitmapProcState& s = *static_cast<const SkBitmapProcState*>(sIn);
+ SkASSERT(s.fInvMatrix.isTranslate());
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(!s.fBilerp);
+
+ const int maxX = s.fPixmap.width() - 1;
+ const int maxY = s.fPixmap.height() - 1;
+ int ix = s.fFilterOneX + x;
+ int iy = SkTPin(s.fFilterOneY + y, 0, maxY);
+ const SkPMColor* row = s.fPixmap.addr32(0, iy);
+
+ // clamp to the left
+ if (ix < 0) {
+ int n = std::min(-ix, count);
+ SkOpts::memset32(colors, row[0], n);
+ count -= n;
+ if (0 == count) {
+ return;
+ }
+ colors += n;
+ SkASSERT(-ix == n);
+ ix = 0;
+ }
+ // copy the middle
+ if (ix <= maxX) {
+ int n = std::min(maxX - ix + 1, count);
+ memcpy(colors, row + ix, n * sizeof(SkPMColor));
+ count -= n;
+ if (0 == count) {
+ return;
+ }
+ colors += n;
+ }
+ SkASSERT(count > 0);
+ // clamp to the right
+ SkOpts::memset32(colors, row[maxX], count);
+}
+
+static inline int sk_int_mod(int x, int n) {
+ SkASSERT(n > 0);
+ if ((unsigned)x >= (unsigned)n) {
+ if (x < 0) {
+ x = n + ~(~x % n);
+ } else {
+ x = x % n;
+ }
+ }
+ return x;
+}
+
+static inline int sk_int_mirror(int x, int n) {
+ x = sk_int_mod(x, 2 * n);
+ if (x >= n) {
+ x = n + ~(x - n);
+ }
+ return x;
+}
+
+static void Repeat_S32_D32_nofilter_trans_shaderproc(const void* sIn,
+ int x, int y,
+ SkPMColor* colors,
+ int count) {
+ const SkBitmapProcState& s = *static_cast<const SkBitmapProcState*>(sIn);
+ SkASSERT(s.fInvMatrix.isTranslate());
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(!s.fBilerp);
+
+ const int stopX = s.fPixmap.width();
+ const int stopY = s.fPixmap.height();
+ int ix = s.fFilterOneX + x;
+ int iy = sk_int_mod(s.fFilterOneY + y, stopY);
+ const SkPMColor* row = s.fPixmap.addr32(0, iy);
+
+ ix = sk_int_mod(ix, stopX);
+ for (;;) {
+ int n = std::min(stopX - ix, count);
+ memcpy(colors, row + ix, n * sizeof(SkPMColor));
+ count -= n;
+ if (0 == count) {
+ return;
+ }
+ colors += n;
+ ix = 0;
+ }
+}
+
+static inline void filter_32_alpha(unsigned t,
+ SkPMColor color0,
+ SkPMColor color1,
+ SkPMColor* dstColor,
+ unsigned alphaScale) {
+ SkASSERT((unsigned)t <= 0xF);
+ SkASSERT(alphaScale <= 256);
+
+ const uint32_t mask = 0xFF00FF;
+
+ int scale = 256 - 16*t;
+ uint32_t lo = (color0 & mask) * scale;
+ uint32_t hi = ((color0 >> 8) & mask) * scale;
+
+ scale = 16*t;
+ lo += (color1 & mask) * scale;
+ hi += ((color1 >> 8) & mask) * scale;
+
+ // TODO: if (alphaScale < 256) ...
+ lo = ((lo >> 8) & mask) * alphaScale;
+ hi = ((hi >> 8) & mask) * alphaScale;
+
+ *dstColor = ((lo >> 8) & mask) | (hi & ~mask);
+}
+
+static void S32_D32_constX_shaderproc(const void* sIn,
+ int x, int y,
+ SkPMColor* colors,
+ int count) {
+ const SkBitmapProcState& s = *static_cast<const SkBitmapProcState*>(sIn);
+ SkASSERT(s.fInvMatrix.isScaleTranslate());
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(1 == s.fPixmap.width());
+
+ int iY0;
+ int iY1 SK_INIT_TO_AVOID_WARNING;
+ int iSubY SK_INIT_TO_AVOID_WARNING;
+
+ if (s.fBilerp) {
+ SkBitmapProcState::MatrixProc mproc = s.getMatrixProc();
+ uint32_t xy[2];
+
+ mproc(s, xy, 1, x, y);
+
+ iY0 = xy[0] >> 18;
+ iY1 = xy[0] & 0x3FFF;
+ iSubY = (xy[0] >> 14) & 0xF;
+ } else {
+ int yTemp;
+
+ if (s.fInvMatrix.isTranslate()) {
+ yTemp = s.fFilterOneY + y;
+ } else{
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+
+ // When the matrix has a scale component the setup code in
+ // chooseProcs multiples the inverse matrix by the inverse of the
+ // bitmap's width and height. Since this method is going to do
+ // its own tiling and sampling we need to undo that here.
+ if (SkTileMode::kClamp != s.fTileModeX || SkTileMode::kClamp != s.fTileModeY) {
+ yTemp = SkFractionalIntToInt(mapper.fractionalIntY() * s.fPixmap.height());
+ } else {
+ yTemp = mapper.intY();
+ }
+ }
+
+ const int stopY = s.fPixmap.height();
+ switch (s.fTileModeY) {
+ case SkTileMode::kClamp:
+ iY0 = SkTPin(yTemp, 0, stopY-1);
+ break;
+ case SkTileMode::kRepeat:
+ iY0 = sk_int_mod(yTemp, stopY);
+ break;
+ case SkTileMode::kMirror:
+ default:
+ iY0 = sk_int_mirror(yTemp, stopY);
+ break;
+ }
+
+#ifdef SK_DEBUG
+ {
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ int iY2;
+
+ if (!s.fInvMatrix.isTranslate() &&
+ (SkTileMode::kClamp != s.fTileModeX || SkTileMode::kClamp != s.fTileModeY)) {
+ iY2 = SkFractionalIntToInt(mapper.fractionalIntY() * s.fPixmap.height());
+ } else {
+ iY2 = mapper.intY();
+ }
+
+ switch (s.fTileModeY) {
+ case SkTileMode::kClamp:
+ iY2 = SkTPin(iY2, 0, stopY-1);
+ break;
+ case SkTileMode::kRepeat:
+ iY2 = sk_int_mod(iY2, stopY);
+ break;
+ case SkTileMode::kMirror:
+ default:
+ iY2 = sk_int_mirror(iY2, stopY);
+ break;
+ }
+
+ SkASSERT(iY0 == iY2);
+ }
+#endif
+ }
+
+ const SkPMColor* row0 = s.fPixmap.addr32(0, iY0);
+ SkPMColor color;
+
+ if (s.fBilerp) {
+ const SkPMColor* row1 = s.fPixmap.addr32(0, iY1);
+ filter_32_alpha(iSubY, *row0, *row1, &color, s.fAlphaScale);
+ } else {
+ if (s.fAlphaScale < 256) {
+ color = SkAlphaMulQ(*row0, s.fAlphaScale);
+ } else {
+ color = *row0;
+ }
+ }
+
+ SkOpts::memset32(colors, color, count);
+}
+
+static void DoNothing_shaderproc(const void*, int x, int y,
+ SkPMColor* colors, int count) {
+ // if we get called, the matrix is too tricky, so we just draw nothing
+ SkOpts::memset32(colors, 0, count);
+}
+
+bool SkBitmapProcState::setupForTranslate() {
+ SkPoint pt;
+ const SkBitmapProcStateAutoMapper mapper(*this, 0, 0, &pt);
+
+ /*
+ * if the translate is larger than our ints, we can get random results, or
+ * worse, we might get 0x80000000, which wreaks havoc on us, since we can't
+ * negate it.
+ */
+ const SkScalar too_big = SkIntToScalar(1 << 30);
+ if (SkScalarAbs(pt.fX) > too_big || SkScalarAbs(pt.fY) > too_big) {
+ return false;
+ }
+
+ // Since we know we're not filtered, we re-purpose these fields allow
+ // us to go from device -> src coordinates w/ just an integer add,
+ // rather than running through the inverse-matrix
+ fFilterOneX = mapper.intX();
+ fFilterOneY = mapper.intY();
+
+ return true;
+}
+
+SkBitmapProcState::ShaderProc32 SkBitmapProcState::chooseShaderProc32() {
+
+ if (kN32_SkColorType != fPixmap.colorType()) {
+ return nullptr;
+ }
+
+ if (1 == fPixmap.width() && fInvMatrix.isScaleTranslate()) {
+ if (!fBilerp && fInvMatrix.isTranslate() && !this->setupForTranslate()) {
+ return DoNothing_shaderproc;
+ }
+ return S32_D32_constX_shaderproc;
+ }
+
+ if (fAlphaScale < 256) {
+ return nullptr;
+ }
+ if (!fInvMatrix.isTranslate()) {
+ return nullptr;
+ }
+ if (fBilerp) {
+ return nullptr;
+ }
+
+ SkTileMode tx = fTileModeX;
+ SkTileMode ty = fTileModeY;
+
+ if (SkTileMode::kClamp == tx && SkTileMode::kClamp == ty) {
+ if (this->setupForTranslate()) {
+ return Clamp_S32_D32_nofilter_trans_shaderproc;
+ }
+ return DoNothing_shaderproc;
+ }
+ if (SkTileMode::kRepeat == tx && SkTileMode::kRepeat == ty) {
+ if (this->setupForTranslate()) {
+ return Repeat_S32_D32_nofilter_trans_shaderproc;
+ }
+ return DoNothing_shaderproc;
+ }
+ return nullptr;
+}
+
+#ifdef SK_DEBUG
+
+static void check_scale_nofilter(uint32_t bitmapXY[], int count,
+ unsigned mx, unsigned my) {
+ unsigned y = *bitmapXY++;
+ SkASSERT(y < my);
+
+ const uint16_t* xptr = reinterpret_cast<const uint16_t*>(bitmapXY);
+ for (int i = 0; i < count; ++i) {
+ SkASSERT(xptr[i] < mx);
+ }
+}
+
+static void check_scale_filter(uint32_t bitmapXY[], int count,
+ unsigned mx, unsigned my) {
+ uint32_t YY = *bitmapXY++;
+ unsigned y0 = YY >> 18;
+ unsigned y1 = YY & 0x3FFF;
+ SkASSERT(y0 < my);
+ SkASSERT(y1 < my);
+
+ for (int i = 0; i < count; ++i) {
+ uint32_t XX = bitmapXY[i];
+ unsigned x0 = XX >> 18;
+ unsigned x1 = XX & 0x3FFF;
+ SkASSERT(x0 < mx);
+ SkASSERT(x1 < mx);
+ }
+}
+
+static void check_affine_nofilter(uint32_t bitmapXY[], int count, unsigned mx, unsigned my) {
+ for (int i = 0; i < count; ++i) {
+ uint32_t XY = bitmapXY[i];
+ unsigned x = XY & 0xFFFF;
+ unsigned y = XY >> 16;
+ SkASSERT(x < mx);
+ SkASSERT(y < my);
+ }
+}
+
+static void check_affine_filter(uint32_t bitmapXY[], int count, unsigned mx, unsigned my) {
+ for (int i = 0; i < count; ++i) {
+ uint32_t YY = *bitmapXY++;
+ unsigned y0 = YY >> 18;
+ unsigned y1 = YY & 0x3FFF;
+ SkASSERT(y0 < my);
+ SkASSERT(y1 < my);
+
+ uint32_t XX = *bitmapXY++;
+ unsigned x0 = XX >> 18;
+ unsigned x1 = XX & 0x3FFF;
+ SkASSERT(x0 < mx);
+ SkASSERT(x1 < mx);
+ }
+}
+
+void SkBitmapProcState::DebugMatrixProc(const SkBitmapProcState& state,
+ uint32_t bitmapXY[], int count,
+ int x, int y) {
+ SkASSERT(bitmapXY);
+ SkASSERT(count > 0);
+
+ state.fMatrixProc(state, bitmapXY, count, x, y);
+
+ void (*proc)(uint32_t bitmapXY[], int count, unsigned mx, unsigned my);
+
+ if (state.fInvMatrix.isScaleTranslate()) {
+ proc = state.fBilerp ? check_scale_filter : check_scale_nofilter;
+ } else {
+ proc = state.fBilerp ? check_affine_filter : check_affine_nofilter;
+ }
+
+ proc(bitmapXY, count, state.fPixmap.width(), state.fPixmap.height());
+}
+
+SkBitmapProcState::MatrixProc SkBitmapProcState::getMatrixProc() const {
+ return DebugMatrixProc;
+}
+
+#endif
+
+/*
+ The storage requirements for the different matrix procs are as follows,
+ where each X or Y is 2 bytes, and N is the number of pixels/elements:
+
+ scale/translate nofilter Y(4bytes) + N * X
+ affine/perspective nofilter N * (X Y)
+ scale/translate filter Y Y + N * (X X)
+ affine filter N * (Y Y X X)
+ */
+int SkBitmapProcState::maxCountForBufferSize(size_t bufferSize) const {
+ int32_t size = static_cast<int32_t>(bufferSize);
+
+ size &= ~3; // only care about 4-byte aligned chunks
+ if (fInvMatrix.isScaleTranslate()) {
+ size -= 4; // the shared Y (or YY) coordinate
+ if (size < 0) {
+ size = 0;
+ }
+ size >>= 1;
+ } else {
+ size >>= 2;
+ }
+
+ if (fBilerp) {
+ size >>= 1;
+ }
+
+ return size;
+}
+
diff --git a/gfx/skia/skia/src/core/SkBitmapProcState.h b/gfx/skia/skia/src/core/SkBitmapProcState.h
new file mode 100644
index 0000000000..c6ab4c4bc6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapProcState.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapProcState_DEFINED
+#define SkBitmapProcState_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkShader.h"
+#include "include/private/base/SkFixed.h"
+#include "include/private/base/SkFloatBits.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkMipmapAccessor.h"
+
+typedef SkFixed3232 SkFractionalInt;
+#define SkScalarToFractionalInt(x) SkScalarToFixed3232(x)
+#define SkFractionalIntToFixed(x) SkFixed3232ToFixed(x)
+#define SkFixedToFractionalInt(x) SkFixedToFixed3232(x)
+#define SkFractionalIntToInt(x) SkFixed3232ToInt(x)
+
+class SkPaint;
+
+struct SkBitmapProcState {
+ SkBitmapProcState(const SkImage_Base* image, SkTileMode tmx, SkTileMode tmy);
+
+ bool setup(const SkMatrix& inv, SkColor color, const SkSamplingOptions& sampling) {
+ return this->init(inv, color, sampling)
+ && this->chooseProcs();
+ }
+
+ typedef void (*ShaderProc32)(const void* ctx, int x, int y, SkPMColor[], int count);
+
+ typedef void (*MatrixProc)(const SkBitmapProcState&,
+ uint32_t bitmapXY[],
+ int count,
+ int x, int y);
+
+ typedef void (*SampleProc32)(const SkBitmapProcState&,
+ const uint32_t[],
+ int count,
+ SkPMColor colors[]);
+
+ const SkImage_Base* fImage;
+
+ SkPixmap fPixmap;
+ SkMatrix fInvMatrix; // This changes based on tile mode.
+ SkAlpha fPaintAlpha;
+ SkTileMode fTileModeX;
+ SkTileMode fTileModeY;
+ bool fBilerp;
+
+ SkMatrixPriv::MapXYProc fInvProc; // chooseProcs
+ SkFractionalInt fInvSxFractionalInt;
+ SkFractionalInt fInvKyFractionalInt;
+
+ SkFixed fFilterOneX;
+ SkFixed fFilterOneY;
+
+ uint16_t fAlphaScale; // chooseProcs
+
+ /** Given the byte size of the index buffer to be passed to the matrix proc,
+ return the maximum number of resulting pixels that can be computed
+ (i.e. the number of SkPMColor values to be written by the sample proc).
+ This routine takes into account that filtering and scale-vs-affine
+ affect the amount of buffer space needed.
+
+ Only valid to call after chooseProcs (setContext) has been called. It is
+ safe to call this inside the shader's shadeSpan() method.
+ */
+ int maxCountForBufferSize(size_t bufferSize) const;
+
+ // If a shader proc is present, then the corresponding matrix/sample procs
+ // are ignored
+ ShaderProc32 getShaderProc32() const { return fShaderProc32; }
+
+#ifdef SK_DEBUG
+ MatrixProc getMatrixProc() const;
+#else
+ MatrixProc getMatrixProc() const { return fMatrixProc; }
+#endif
+ SampleProc32 getSampleProc32() const { return fSampleProc32; }
+
+private:
+ enum {
+ kBMStateSize = 136 // found by inspection. if too small, we will call new/delete
+ };
+ SkSTArenaAlloc<kBMStateSize> fAlloc;
+
+ ShaderProc32 fShaderProc32; // chooseProcs
+ // These are used if the shaderproc is nullptr
+ MatrixProc fMatrixProc; // chooseProcs
+ SampleProc32 fSampleProc32; // chooseProcs
+
+ bool init(const SkMatrix& inverse, SkAlpha, const SkSamplingOptions&);
+ bool chooseProcs();
+ MatrixProc chooseMatrixProc(bool trivial_matrix);
+ ShaderProc32 chooseShaderProc32();
+
+ // Return false if we failed to setup for fast translate (e.g. overflow)
+ bool setupForTranslate();
+
+#ifdef SK_DEBUG
+ static void DebugMatrixProc(const SkBitmapProcState&,
+ uint32_t[], int count, int x, int y);
+#endif
+};
+
+/* Macros for packing and unpacking pairs of 16bit values in a 32bit uint.
+ Used to allow access to a stream of uint16_t either one at a time, or
+ 2 at a time by unpacking a uint32_t
+ */
+#ifdef SK_CPU_BENDIAN
+ #define PACK_TWO_SHORTS(pri, sec) ((pri) << 16 | (sec))
+ #define UNPACK_PRIMARY_SHORT(packed) ((uint32_t)(packed) >> 16)
+ #define UNPACK_SECONDARY_SHORT(packed) ((packed) & 0xFFFF)
+#else
+ #define PACK_TWO_SHORTS(pri, sec) ((pri) | ((sec) << 16))
+ #define UNPACK_PRIMARY_SHORT(packed) ((packed) & 0xFFFF)
+ #define UNPACK_SECONDARY_SHORT(packed) ((uint32_t)(packed) >> 16)
+#endif
+
+#ifdef SK_DEBUG
+ static inline uint32_t pack_two_shorts(U16CPU pri, U16CPU sec) {
+ SkASSERT((uint16_t)pri == pri);
+ SkASSERT((uint16_t)sec == sec);
+ return PACK_TWO_SHORTS(pri, sec);
+ }
+#else
+ #define pack_two_shorts(pri, sec) PACK_TWO_SHORTS(pri, sec)
+#endif
+
+// Helper class for mapping the middle of pixel (x, y) into SkFractionalInt bitmap space.
+// Discussion:
+// Overall, this code takes a point in destination space, and uses the center of the pixel
+// at (x, y) to determine the sample point in source space. It then adjusts the pixel by different
+// amounts based in filtering and tiling.
+// This code can be broken into two main cases based on filtering:
+// * no filtering (nearest neighbor) - when using nearest neighbor filtering all tile modes reduce
+// the sampled by one ulp. If a simple point pt lies precisely on XXX.1/2 then it forced down
+// when positive making 1/2 + 1/2 = .999999 instead of 1.0.
+// * filtering - in the filtering case, the code calculates the -1/2 shift for starting the
+// bilerp kernel. There is a twist; there is a big difference between clamp and the other tile
+// modes. In tile and repeat the matrix has been reduced by an additional 1/width and 1/height
+// factor. This maps from destination space to [0, 1) (instead of source space) to allow easy
+// modulo arithmetic. This means that the -1/2 needed by bilerp is actually 1/2 * 1/width for x
+// and 1/2 * 1/height for y. This is what happens when the poorly named fFilterOne{X|Y} is
+// divided by two.
+class SkBitmapProcStateAutoMapper {
+public:
+ SkBitmapProcStateAutoMapper(const SkBitmapProcState& s, int x, int y,
+ SkPoint* scalarPoint = nullptr) {
+ SkPoint pt;
+ s.fInvProc(s.fInvMatrix,
+ SkIntToScalar(x) + SK_ScalarHalf,
+ SkIntToScalar(y) + SK_ScalarHalf, &pt);
+
+ SkFixed biasX = 0, biasY = 0;
+ if (s.fBilerp) {
+ biasX = s.fFilterOneX >> 1;
+ biasY = s.fFilterOneY >> 1;
+ } else {
+ // Our rasterizer biases upward. That is a rect from 0.5...1.5 fills pixel 1 and not
+ // pixel 0. To make an image that is mapped 1:1 with device pixels but at a half pixel
+ // offset select every pixel from the src image once we make exact integer pixel sample
+ // values round down not up. Note that a mirror mapping will not have this property.
+ biasX = 1;
+ biasY = 1;
+ }
+
+ // punt to unsigned for defined underflow behavior
+ fX = (SkFractionalInt)((uint64_t)SkScalarToFractionalInt(pt.x()) -
+ (uint64_t)SkFixedToFractionalInt(biasX));
+ fY = (SkFractionalInt)((uint64_t)SkScalarToFractionalInt(pt.y()) -
+ (uint64_t)SkFixedToFractionalInt(biasY));
+
+ if (scalarPoint) {
+ scalarPoint->set(pt.x() - SkFixedToScalar(biasX),
+ pt.y() - SkFixedToScalar(biasY));
+ }
+ }
+
+ SkFractionalInt fractionalIntX() const { return fX; }
+ SkFractionalInt fractionalIntY() const { return fY; }
+
+ SkFixed fixedX() const { return SkFractionalIntToFixed(fX); }
+ SkFixed fixedY() const { return SkFractionalIntToFixed(fY); }
+
+ int intX() const { return SkFractionalIntToInt(fX); }
+ int intY() const { return SkFractionalIntToInt(fY); }
+
+private:
+ SkFractionalInt fX, fY;
+};
+
+namespace sktests {
+ // f is the value to pack, max is the largest the value can be.
+ uint32_t pack_clamp(SkFixed f, unsigned max);
+ // As above, but width is the width of the pretend bitmap.
+ uint32_t pack_repeat(SkFixed f, unsigned max, size_t width);
+ uint32_t pack_mirror(SkFixed f, unsigned max, size_t width);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBitmapProcState_matrixProcs.cpp b/gfx/skia/skia/src/core/SkBitmapProcState_matrixProcs.cpp
new file mode 100644
index 0000000000..184c63ea78
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBitmapProcState_matrixProcs.cpp
@@ -0,0 +1,541 @@
+/*
+ * Copyright 2008 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkShader.h"
+#include "include/private/base/SkTPin.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkBitmapProcState.h"
+#include "src/core/SkOpts.h"
+
+/*
+ * The decal_ functions require that
+ * 1. dx > 0
+ * 2. [fx, fx+dx, fx+2dx, fx+3dx, ... fx+(count-1)dx] are all <= maxX
+ *
+ * In addition, we use SkFractionalInt to keep more fractional precision than
+ * just SkFixed, so we will abort the decal_ call if dx is very small, since
+ * the decal_ function just operates on SkFixed. If that were changed, we could
+ * skip the very_small test here.
+ */
+static inline bool can_truncate_to_fixed_for_decal(SkFixed fx,
+ SkFixed dx,
+ int count, unsigned max) {
+ SkASSERT(count > 0);
+
+ // if decal_ kept SkFractionalInt precision, this would just be dx <= 0
+ // I just made up the 1/256. Just don't want to perceive accumulated error
+ // if we truncate frDx and lose its low bits.
+ if (dx <= SK_Fixed1 / 256) {
+ return false;
+ }
+
+ // Note: it seems the test should be (fx <= max && lastFx <= max); but
+ // historically it's been a strict inequality check, and changing produces
+ // unexpected diffs. Further investigation is needed.
+
+ // We cast to unsigned so we don't have to check for negative values, which
+ // will now appear as very large positive values, and thus fail our test!
+ if ((unsigned)SkFixedFloorToInt(fx) >= max) {
+ return false;
+ }
+
+ // Promote to 64bit (48.16) to avoid overflow.
+ const uint64_t lastFx = fx + sk_64_mul(dx, count - 1);
+
+ return SkTFitsIn<int32_t>(lastFx) && (unsigned)SkFixedFloorToInt(SkTo<int32_t>(lastFx)) < max;
+}
+
+// When not filtering, we store 32-bit y, 16-bit x, 16-bit x, 16-bit x, ...
+// When filtering we write out 32-bit encodings, pairing 14.4 x0 with 14-bit x1.
+
+// The clamp routines may try to fall into one of these unclamped decal fast-paths.
+// (Only clamp works in the right coordinate space to check for decal.)
+static void decal_nofilter_scale(uint32_t dst[], SkFixed fx, SkFixed dx, int count) {
+ // can_truncate_to_fixed_for_decal() checked only that stepping fx+=dx count-1
+ // times doesn't overflow fx, so we take unusual care not to step count times.
+ for (; count > 2; count -= 2) {
+ *dst++ = pack_two_shorts( (fx + 0) >> 16,
+ (fx + dx) >> 16);
+ fx += dx+dx;
+ }
+
+ SkASSERT(count <= 2);
+ switch (count) {
+ case 2: ((uint16_t*)dst)[1] = SkToU16((fx + dx) >> 16); [[fallthrough]];
+ case 1: ((uint16_t*)dst)[0] = SkToU16((fx + 0) >> 16);
+ }
+}
+
+// A generic implementation for unfiltered scale+translate, templated on tiling method.
+template <unsigned (*tilex)(SkFixed, int), unsigned (*tiley)(SkFixed, int), bool tryDecal>
+static void nofilter_scale(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT(s.fInvMatrix.isScaleTranslate());
+
+ // Write out our 32-bit y, and get our intial fx.
+ SkFractionalInt fx;
+ {
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ *xy++ = tiley(mapper.fixedY(), s.fPixmap.height() - 1);
+ fx = mapper.fractionalIntX();
+ }
+
+ const unsigned maxX = s.fPixmap.width() - 1;
+ if (0 == maxX) {
+ // If width == 1, all the x-values must refer to that pixel, and must be zero.
+ memset(xy, 0, count * sizeof(uint16_t));
+ return;
+ }
+
+ const SkFractionalInt dx = s.fInvSxFractionalInt;
+
+ if (tryDecal) {
+ const SkFixed fixedFx = SkFractionalIntToFixed(fx);
+ const SkFixed fixedDx = SkFractionalIntToFixed(dx);
+
+ if (can_truncate_to_fixed_for_decal(fixedFx, fixedDx, count, maxX)) {
+ decal_nofilter_scale(xy, fixedFx, fixedDx, count);
+ return;
+ }
+ }
+
+ // Remember, each x-coordinate is 16-bit.
+ for (; count >= 2; count -= 2) {
+ *xy++ = pack_two_shorts(tilex(SkFractionalIntToFixed(fx ), maxX),
+ tilex(SkFractionalIntToFixed(fx + dx), maxX));
+ fx += dx+dx;
+ }
+
+ auto xx = (uint16_t*)xy;
+ while (count --> 0) {
+ *xx++ = tilex(SkFractionalIntToFixed(fx), maxX);
+ fx += dx;
+ }
+}
+
+template <unsigned (*tilex)(SkFixed, int), unsigned (*tiley)(SkFixed, int)>
+static void nofilter_affine(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT(!s.fInvMatrix.hasPerspective());
+
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+
+ SkFractionalInt fx = mapper.fractionalIntX(),
+ fy = mapper.fractionalIntY(),
+ dx = s.fInvSxFractionalInt,
+ dy = s.fInvKyFractionalInt;
+ int maxX = s.fPixmap.width () - 1,
+ maxY = s.fPixmap.height() - 1;
+
+ while (count --> 0) {
+ *xy++ = (tiley(SkFractionalIntToFixed(fy), maxY) << 16)
+ | (tilex(SkFractionalIntToFixed(fx), maxX) );
+ fx += dx;
+ fy += dy;
+ }
+}
+
+// used when both tilex and tiley are clamp
+// Extract the high four fractional bits from fx, the lerp parameter when filtering.
+static unsigned extract_low_bits_clamp_clamp(SkFixed fx, int /*max*/) {
+ // If we're already scaled up to by max like clamp/decal,
+ // just grab the high four fractional bits.
+ return (fx >> 12) & 0xf;
+}
+
+//used when one of tilex and tiley is not clamp
+static unsigned extract_low_bits_general(SkFixed fx, int max) {
+ // In repeat or mirror fx is in [0,1], so scale up by max first.
+ // TODO: remove the +1 here and the -1 at the call sites...
+ return extract_low_bits_clamp_clamp((fx & 0xffff) * (max+1), max);
+}
+
+// Takes a SkFixed number and packs it into a 32bit integer in the following schema:
+// 14 bits to represent the low integer value (n)
+// 4 bits to represent a linear distance between low and high (floored to nearest 1/16)
+// 14 bits to represent the high integer value (n+1)
+// If f is less than 0, then both integers will be 0. If f is greater than or equal to max, both
+// integers will be that max value. In all cases, the middle 4 bits will represent the fractional
+// part (to a resolution of 1/16). If the two integers are equal, doing any linear interpolation
+// will result in the same integer, so the fractional part does not matter.
+//
+// The "one" parameter corresponds to the maximum distance between the high and low coordinate.
+// For the clamp operation, this is just SkFixed1, but for others it is 1 / pixmap width because the
+// distances are already normalized to between 0 and 1.0.
+//
+// See also SK_OPTS_NS::decode_packed_coordinates_and_weight for unpacking this value.
+template <unsigned (*tile)(SkFixed, int), unsigned (*extract_low_bits)(SkFixed, int)>
+SK_NO_SANITIZE("signed-integer-overflow")
+static uint32_t pack(SkFixed f, unsigned max, SkFixed one) {
+ uint32_t packed = tile(f, max); // low coordinate in high bits
+ packed = (packed << 4) | extract_low_bits(f, max); // (lerp weight _is_ coord fractional part)
+ packed = (packed << 14) | tile((f + one), max); // high coordinate in low bits
+ return packed;
+}
+
+template <unsigned (*tilex)(SkFixed, int), unsigned (*tiley)(SkFixed, int), unsigned (*extract_low_bits)(SkFixed, int), bool tryDecal>
+static void filter_scale(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT(s.fInvMatrix.isScaleTranslate());
+
+ const unsigned maxX = s.fPixmap.width() - 1;
+ const SkFractionalInt dx = s.fInvSxFractionalInt;
+ SkFractionalInt fx;
+ {
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ const unsigned maxY = s.fPixmap.height() - 1;
+ // compute our two Y values up front
+ *xy++ = pack<tiley, extract_low_bits>(mapper.fixedY(), maxY, s.fFilterOneY);
+ // now initialize fx
+ fx = mapper.fractionalIntX();
+ }
+
+ // For historical reasons we check both ends are < maxX rather than <= maxX.
+ // TODO: try changing this? See also can_truncate_to_fixed_for_decal().
+ if (tryDecal &&
+ (unsigned)SkFractionalIntToInt(fx ) < maxX &&
+ (unsigned)SkFractionalIntToInt(fx + dx*(count-1)) < maxX) {
+ while (count --> 0) {
+ SkFixed fixedFx = SkFractionalIntToFixed(fx);
+ SkASSERT((fixedFx >> (16 + 14)) == 0);
+ *xy++ = (fixedFx >> 12 << 14) | ((fixedFx >> 16) + 1);
+ fx += dx;
+ }
+ return;
+ }
+
+ while (count --> 0) {
+ *xy++ = pack<tilex, extract_low_bits>(SkFractionalIntToFixed(fx), maxX, s.fFilterOneX);
+ fx += dx;
+ }
+}
+
+template <unsigned (*tilex)(SkFixed, int), unsigned (*tiley)(SkFixed, int), unsigned (*extract_low_bits)(SkFixed, int)>
+static void filter_affine(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT(!s.fInvMatrix.hasPerspective());
+
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+
+ SkFixed oneX = s.fFilterOneX,
+ oneY = s.fFilterOneY;
+
+ SkFractionalInt fx = mapper.fractionalIntX(),
+ fy = mapper.fractionalIntY(),
+ dx = s.fInvSxFractionalInt,
+ dy = s.fInvKyFractionalInt;
+ unsigned maxX = s.fPixmap.width () - 1,
+ maxY = s.fPixmap.height() - 1;
+ while (count --> 0) {
+ *xy++ = pack<tiley, extract_low_bits>(SkFractionalIntToFixed(fy), maxY, oneY);
+ *xy++ = pack<tilex, extract_low_bits>(SkFractionalIntToFixed(fx), maxX, oneX);
+
+ fy += dy;
+ fx += dx;
+ }
+}
+
+// Helper to ensure that when we shift down, we do it w/o sign-extension
+// so the caller doesn't have to manually mask off the top 16 bits.
+static inline unsigned SK_USHIFT16(unsigned x) {
+ return x >> 16;
+}
+
+static unsigned repeat(SkFixed fx, int max) {
+ SkASSERT(max < 65535);
+ return SK_USHIFT16((unsigned)(fx & 0xFFFF) * (max + 1));
+}
+static unsigned mirror(SkFixed fx, int max) {
+ SkASSERT(max < 65535);
+ // s is 0xFFFFFFFF if we're on an odd interval, or 0 if an even interval
+ SkFixed s = SkLeftShift(fx, 15) >> 31;
+
+ // This should be exactly the same as repeat(fx ^ s, max) from here on.
+ return SK_USHIFT16( ((fx ^ s) & 0xFFFF) * (max + 1) );
+}
+
+static unsigned clamp(SkFixed fx, int max) {
+ return SkTPin(fx >> 16, 0, max);
+}
+
+static const SkBitmapProcState::MatrixProc ClampX_ClampY_Procs[] = {
+ nofilter_scale <clamp, clamp, true>, filter_scale <clamp, clamp, extract_low_bits_clamp_clamp, true>,
+ nofilter_affine<clamp, clamp>, filter_affine<clamp, clamp, extract_low_bits_clamp_clamp>,
+};
+static const SkBitmapProcState::MatrixProc RepeatX_RepeatY_Procs[] = {
+ nofilter_scale <repeat, repeat, false>, filter_scale <repeat, repeat, extract_low_bits_general, false>,
+ nofilter_affine<repeat, repeat>, filter_affine<repeat, repeat, extract_low_bits_general>
+};
+static const SkBitmapProcState::MatrixProc MirrorX_MirrorY_Procs[] = {
+ nofilter_scale <mirror, mirror, false>, filter_scale <mirror, mirror, extract_low_bits_general, false>,
+ nofilter_affine<mirror, mirror>, filter_affine<mirror, mirror, extract_low_bits_general>,
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+// This next chunk has some specializations for unfiltered translate-only matrices.
+
+static inline U16CPU int_clamp(int x, int n) {
+ if (x < 0) { x = 0; }
+ if (x >= n) { x = n - 1; }
+ return x;
+}
+
+/* returns 0...(n-1) given any x (positive or negative).
+
+ As an example, if n (which is always positive) is 5...
+
+ x: -8 -7 -6 -5 -4 -3 -2 -1 0 1 2 3 4 5 6 7 8
+ returns: 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3
+ */
+static inline int sk_int_mod(int x, int n) {
+ SkASSERT(n > 0);
+ if ((unsigned)x >= (unsigned)n) {
+ if (x < 0) {
+ x = n + ~(~x % n);
+ } else {
+ x = x % n;
+ }
+ }
+ return x;
+}
+
+static inline U16CPU int_repeat(int x, int n) {
+ return sk_int_mod(x, n);
+}
+
+static inline U16CPU int_mirror(int x, int n) {
+ x = sk_int_mod(x, 2 * n);
+ if (x >= n) {
+ x = n + ~(x - n);
+ }
+ return x;
+}
+
+static void fill_sequential(uint16_t xptr[], int pos, int count) {
+ while (count --> 0) {
+ *xptr++ = pos++;
+ }
+}
+
+static void fill_backwards(uint16_t xptr[], int pos, int count) {
+ while (count --> 0) {
+ SkASSERT(pos >= 0);
+ *xptr++ = pos--;
+ }
+}
+
+template< U16CPU (tiley)(int x, int n) >
+static void clampx_nofilter_trans(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT(s.fInvMatrix.isTranslate());
+
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ *xy++ = tiley(mapper.intY(), s.fPixmap.height());
+ int xpos = mapper.intX();
+
+ const int width = s.fPixmap.width();
+ if (1 == width) {
+ // all of the following X values must be 0
+ memset(xy, 0, count * sizeof(uint16_t));
+ return;
+ }
+
+ uint16_t* xptr = reinterpret_cast<uint16_t*>(xy);
+ int n;
+
+ // fill before 0 as needed
+ if (xpos < 0) {
+ n = -xpos;
+ if (n > count) {
+ n = count;
+ }
+ memset(xptr, 0, n * sizeof(uint16_t));
+ count -= n;
+ if (0 == count) {
+ return;
+ }
+ xptr += n;
+ xpos = 0;
+ }
+
+ // fill in 0..width-1 if needed
+ if (xpos < width) {
+ n = width - xpos;
+ if (n > count) {
+ n = count;
+ }
+ fill_sequential(xptr, xpos, n);
+ count -= n;
+ if (0 == count) {
+ return;
+ }
+ xptr += n;
+ }
+
+ // fill the remaining with the max value
+ SkOpts::memset16(xptr, width - 1, count);
+}
+
+template< U16CPU (tiley)(int x, int n) >
+static void repeatx_nofilter_trans(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT(s.fInvMatrix.isTranslate());
+
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ *xy++ = tiley(mapper.intY(), s.fPixmap.height());
+ int xpos = mapper.intX();
+
+ const int width = s.fPixmap.width();
+ if (1 == width) {
+ // all of the following X values must be 0
+ memset(xy, 0, count * sizeof(uint16_t));
+ return;
+ }
+
+ uint16_t* xptr = reinterpret_cast<uint16_t*>(xy);
+ int start = sk_int_mod(xpos, width);
+ int n = width - start;
+ if (n > count) {
+ n = count;
+ }
+ fill_sequential(xptr, start, n);
+ xptr += n;
+ count -= n;
+
+ while (count >= width) {
+ fill_sequential(xptr, 0, width);
+ xptr += width;
+ count -= width;
+ }
+
+ if (count > 0) {
+ fill_sequential(xptr, 0, count);
+ }
+}
+
+template< U16CPU (tiley)(int x, int n) >
+static void mirrorx_nofilter_trans(const SkBitmapProcState& s,
+ uint32_t xy[], int count, int x, int y) {
+ SkASSERT(s.fInvMatrix.isTranslate());
+
+ const SkBitmapProcStateAutoMapper mapper(s, x, y);
+ *xy++ = tiley(mapper.intY(), s.fPixmap.height());
+ int xpos = mapper.intX();
+
+ const int width = s.fPixmap.width();
+ if (1 == width) {
+ // all of the following X values must be 0
+ memset(xy, 0, count * sizeof(uint16_t));
+ return;
+ }
+
+ uint16_t* xptr = reinterpret_cast<uint16_t*>(xy);
+ // need to know our start, and our initial phase (forward or backward)
+ bool forward;
+ int n;
+ int start = sk_int_mod(xpos, 2 * width);
+ if (start >= width) {
+ start = width + ~(start - width);
+ forward = false;
+ n = start + 1; // [start .. 0]
+ } else {
+ forward = true;
+ n = width - start; // [start .. width)
+ }
+ if (n > count) {
+ n = count;
+ }
+ if (forward) {
+ fill_sequential(xptr, start, n);
+ } else {
+ fill_backwards(xptr, start, n);
+ }
+ forward = !forward;
+ xptr += n;
+ count -= n;
+
+ while (count >= width) {
+ if (forward) {
+ fill_sequential(xptr, 0, width);
+ } else {
+ fill_backwards(xptr, width - 1, width);
+ }
+ forward = !forward;
+ xptr += width;
+ count -= width;
+ }
+
+ if (count > 0) {
+ if (forward) {
+ fill_sequential(xptr, 0, count);
+ } else {
+ fill_backwards(xptr, width - 1, count);
+ }
+ }
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// The main entry point to the file, choosing between everything above.
+
+SkBitmapProcState::MatrixProc SkBitmapProcState::chooseMatrixProc(bool translate_only_matrix) {
+ SkASSERT(!fInvMatrix.hasPerspective());
+ SkASSERT(fTileModeX != SkTileMode::kDecal);
+
+ if( fTileModeX == fTileModeY ) {
+ // Check for our special case translate methods when there is no scale/affine/perspective.
+ if (translate_only_matrix && !fBilerp) {
+ switch (fTileModeX) {
+ default: SkASSERT(false); [[fallthrough]];
+ case SkTileMode::kClamp: return clampx_nofilter_trans<int_clamp>;
+ case SkTileMode::kRepeat: return repeatx_nofilter_trans<int_repeat>;
+ case SkTileMode::kMirror: return mirrorx_nofilter_trans<int_mirror>;
+ }
+ }
+
+ // The arrays are all [ nofilter, filter ].
+ int index = fBilerp ? 1 : 0;
+ if (!fInvMatrix.isScaleTranslate()) {
+ index |= 2;
+ }
+
+ if (fTileModeX == SkTileMode::kClamp) {
+ // clamp gets special version of filterOne, working in non-normalized space (allowing decal)
+ fFilterOneX = SK_Fixed1;
+ fFilterOneY = SK_Fixed1;
+ return ClampX_ClampY_Procs[index];
+ }
+
+ // all remaining procs use this form for filterOne, putting them into normalized space.
+ fFilterOneX = SK_Fixed1 / fPixmap.width();
+ fFilterOneY = SK_Fixed1 / fPixmap.height();
+
+ if (fTileModeX == SkTileMode::kRepeat) {
+ return RepeatX_RepeatY_Procs[index];
+ }
+ return MirrorX_MirrorY_Procs[index];
+ }
+
+ SkASSERT(fTileModeX == fTileModeY);
+ return nullptr;
+}
+
+uint32_t sktests::pack_clamp(SkFixed f, unsigned max) {
+ // Based on ClampX_ClampY_Procs[1] (filter_scale)
+ return ::pack<clamp, extract_low_bits_clamp_clamp>(f, max, SK_Fixed1);
+}
+
+uint32_t sktests::pack_repeat(SkFixed f, unsigned max, size_t width) {
+ // Based on RepeatX_RepeatY_Procs[1] (filter_scale)
+ return ::pack<repeat, extract_low_bits_general>(f, max, SK_Fixed1 / width);
+}
+
+uint32_t sktests::pack_mirror(SkFixed f, unsigned max, size_t width) {
+ // Based on MirrorX_MirrorY_Procs[1] (filter_scale)
+ return ::pack<mirror, extract_low_bits_general>(f, max, SK_Fixed1 / width);
+}
diff --git a/gfx/skia/skia/src/core/SkBlendMode.cpp b/gfx/skia/skia/src/core/SkBlendMode.cpp
new file mode 100644
index 0000000000..1d3bfd7bd1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlendMode.cpp
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkBlendModePriv.h"
+
+#include "src/base/SkVx.h"
+#include "src/core/SkRasterPipeline.h"
+
+bool SkBlendMode_ShouldPreScaleCoverage(SkBlendMode mode, bool rgb_coverage) {
+ // The most important things we do here are:
+ // 1) never pre-scale with rgb coverage if the blend mode involves a source-alpha term;
+ // 2) always pre-scale Plus.
+ //
+ // When we pre-scale with rgb coverage, we scale each of source r,g,b, with a distinct value,
+ // and source alpha with one of those three values. This process destructively updates the
+ // source-alpha term, so we can't evaluate blend modes that need its original value.
+ //
+ // Plus always requires pre-scaling as a specific quirk of its implementation in
+ // SkRasterPipeline. This lets us put the clamp inside the blend mode itself rather
+ // than as a separate stage that'd come after the lerp.
+ //
+ // This function is a finer-grained breakdown of SkBlendMode_SupportsCoverageAsAlpha().
+ switch (mode) {
+ case SkBlendMode::kDst: // d --> no sa term, ok!
+ case SkBlendMode::kDstOver: // d + s*inv(da) --> no sa term, ok!
+ case SkBlendMode::kPlus: // clamp(s+d) --> no sa term, ok!
+ return true;
+
+ case SkBlendMode::kDstOut: // d * inv(sa)
+ case SkBlendMode::kSrcATop: // s*da + d*inv(sa)
+ case SkBlendMode::kSrcOver: // s + d*inv(sa)
+ case SkBlendMode::kXor: // s*inv(da) + d*inv(sa)
+ return !rgb_coverage;
+
+ default: break;
+ }
+ return false;
+}
+
+// Users of this function may want to switch to the rgb-coverage aware version above.
+bool SkBlendMode_SupportsCoverageAsAlpha(SkBlendMode mode) {
+ return SkBlendMode_ShouldPreScaleCoverage(mode, false);
+}
+
+bool SkBlendMode_AsCoeff(SkBlendMode mode, SkBlendModeCoeff* src, SkBlendModeCoeff* dst) {
+ struct CoeffRec {
+ SkBlendModeCoeff fSrc;
+ SkBlendModeCoeff fDst;
+ };
+
+ static constexpr CoeffRec kCoeffs[] = {
+ // For Porter-Duff blend functions, color = src * src coeff + dst * dst coeff
+ // src coeff dst coeff blend func
+ // ---------------------- ----------------------- ----------
+ { SkBlendModeCoeff::kZero, SkBlendModeCoeff::kZero }, // clear
+ { SkBlendModeCoeff::kOne, SkBlendModeCoeff::kZero }, // src
+ { SkBlendModeCoeff::kZero, SkBlendModeCoeff::kOne }, // dst
+ { SkBlendModeCoeff::kOne, SkBlendModeCoeff::kISA }, // src-over
+ { SkBlendModeCoeff::kIDA, SkBlendModeCoeff::kOne }, // dst-over
+ { SkBlendModeCoeff::kDA, SkBlendModeCoeff::kZero }, // src-in
+ { SkBlendModeCoeff::kZero, SkBlendModeCoeff::kSA }, // dst-in
+ { SkBlendModeCoeff::kIDA, SkBlendModeCoeff::kZero }, // src-out
+ { SkBlendModeCoeff::kZero, SkBlendModeCoeff::kISA }, // dst-out
+ { SkBlendModeCoeff::kDA, SkBlendModeCoeff::kISA }, // src-atop
+ { SkBlendModeCoeff::kIDA, SkBlendModeCoeff::kSA }, // dst-atop
+ { SkBlendModeCoeff::kIDA, SkBlendModeCoeff::kISA }, // xor
+
+ { SkBlendModeCoeff::kOne, SkBlendModeCoeff::kOne }, // plus
+ { SkBlendModeCoeff::kZero, SkBlendModeCoeff::kSC }, // modulate
+ { SkBlendModeCoeff::kOne, SkBlendModeCoeff::kISC }, // screen
+ };
+
+ if (mode > SkBlendMode::kScreen) {
+ return false;
+ }
+ if (src) {
+ *src = kCoeffs[static_cast<int>(mode)].fSrc;
+ }
+ if (dst) {
+ *dst = kCoeffs[static_cast<int>(mode)].fDst;
+ }
+ return true;
+}
+
+void SkBlendMode_AppendStages(SkBlendMode mode, SkRasterPipeline* p) {
+ auto stage = SkRasterPipelineOp::srcover;
+ switch (mode) {
+ case SkBlendMode::kClear: stage = SkRasterPipelineOp::clear; break;
+ case SkBlendMode::kSrc: return; // This stage is a no-op.
+ case SkBlendMode::kDst: stage = SkRasterPipelineOp::move_dst_src; break;
+ case SkBlendMode::kSrcOver: stage = SkRasterPipelineOp::srcover; break;
+ case SkBlendMode::kDstOver: stage = SkRasterPipelineOp::dstover; break;
+ case SkBlendMode::kSrcIn: stage = SkRasterPipelineOp::srcin; break;
+ case SkBlendMode::kDstIn: stage = SkRasterPipelineOp::dstin; break;
+ case SkBlendMode::kSrcOut: stage = SkRasterPipelineOp::srcout; break;
+ case SkBlendMode::kDstOut: stage = SkRasterPipelineOp::dstout; break;
+ case SkBlendMode::kSrcATop: stage = SkRasterPipelineOp::srcatop; break;
+ case SkBlendMode::kDstATop: stage = SkRasterPipelineOp::dstatop; break;
+ case SkBlendMode::kXor: stage = SkRasterPipelineOp::xor_; break;
+ case SkBlendMode::kPlus: stage = SkRasterPipelineOp::plus_; break;
+ case SkBlendMode::kModulate: stage = SkRasterPipelineOp::modulate; break;
+
+ case SkBlendMode::kScreen: stage = SkRasterPipelineOp::screen; break;
+ case SkBlendMode::kOverlay: stage = SkRasterPipelineOp::overlay; break;
+ case SkBlendMode::kDarken: stage = SkRasterPipelineOp::darken; break;
+ case SkBlendMode::kLighten: stage = SkRasterPipelineOp::lighten; break;
+ case SkBlendMode::kColorDodge: stage = SkRasterPipelineOp::colordodge; break;
+ case SkBlendMode::kColorBurn: stage = SkRasterPipelineOp::colorburn; break;
+ case SkBlendMode::kHardLight: stage = SkRasterPipelineOp::hardlight; break;
+ case SkBlendMode::kSoftLight: stage = SkRasterPipelineOp::softlight; break;
+ case SkBlendMode::kDifference: stage = SkRasterPipelineOp::difference; break;
+ case SkBlendMode::kExclusion: stage = SkRasterPipelineOp::exclusion; break;
+ case SkBlendMode::kMultiply: stage = SkRasterPipelineOp::multiply; break;
+
+ case SkBlendMode::kHue: stage = SkRasterPipelineOp::hue; break;
+ case SkBlendMode::kSaturation: stage = SkRasterPipelineOp::saturation; break;
+ case SkBlendMode::kColor: stage = SkRasterPipelineOp::color; break;
+ case SkBlendMode::kLuminosity: stage = SkRasterPipelineOp::luminosity; break;
+ }
+ p->append(stage);
+}
+
+SkPMColor4f SkBlendMode_Apply(SkBlendMode mode, const SkPMColor4f& src, const SkPMColor4f& dst) {
+ // special-case simple/common modes...
+ switch (mode) {
+ case SkBlendMode::kClear: return SK_PMColor4fTRANSPARENT;
+ case SkBlendMode::kSrc: return src;
+ case SkBlendMode::kDst: return dst;
+ case SkBlendMode::kSrcOver: {
+ SkPMColor4f r;
+ (skvx::float4::Load(src.vec()) + skvx::float4::Load(dst.vec()) * (1-src.fA)).store(&r);
+ return r;
+ }
+ default:
+ break;
+ }
+
+ SkRasterPipeline_<256> p;
+ SkPMColor4f src_storage = src,
+ dst_storage = dst,
+ res_storage;
+ SkRasterPipeline_MemoryCtx src_ctx = { &src_storage, 0 },
+ dst_ctx = { &dst_storage, 0 },
+ res_ctx = { &res_storage, 0 };
+
+ p.append(SkRasterPipelineOp::load_f32, &dst_ctx);
+ p.append(SkRasterPipelineOp::move_src_dst);
+ p.append(SkRasterPipelineOp::load_f32, &src_ctx);
+ SkBlendMode_AppendStages(mode, &p);
+ p.append(SkRasterPipelineOp::store_f32, &res_ctx);
+ p.run(0,0, 1,1);
+ return res_storage;
+}
diff --git a/gfx/skia/skia/src/core/SkBlendModeBlender.cpp b/gfx/skia/skia/src/core/SkBlendModeBlender.cpp
new file mode 100644
index 0000000000..fab4359b0f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlendModeBlender.cpp
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkBlendModeBlender.h"
+#include "src/core/SkBlendModePriv.h"
+#include "src/core/SkEffectPriv.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+#if defined(SK_GANESH)
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/effects/GrBlendFragmentProcessor.h"
+#endif
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/KeyHelpers.h"
+#include "src/gpu/graphite/PaintParamsKey.h"
+#endif
+
+sk_sp<SkBlender> SkBlender::Mode(SkBlendMode mode) {
+#define RETURN_SINGLETON_BLENDER(m) \
+ case m: { \
+ static auto* sBlender = new SkBlendModeBlender{m}; \
+ return sk_ref_sp(sBlender); \
+ }
+
+ switch (mode) {
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kClear)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kSrc)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kDst)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kSrcOver)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kDstOver)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kSrcIn)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kDstIn)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kSrcOut)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kDstOut)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kSrcATop)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kDstATop)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kXor)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kPlus)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kModulate)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kScreen)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kOverlay)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kDarken)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kLighten)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kColorDodge)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kColorBurn)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kHardLight)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kSoftLight)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kDifference)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kExclusion)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kMultiply)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kHue)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kSaturation)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kColor)
+ RETURN_SINGLETON_BLENDER(SkBlendMode::kLuminosity)
+ }
+
+ SkDEBUGFAILF("invalid blend mode %d", (int)mode);
+ return nullptr;
+
+#undef RETURN_SINGLETON_BLENDER
+}
+
+#if defined(SK_GRAPHITE)
+void SkBlenderBase::addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer,
+ skgpu::graphite::DstColorType dstColorType) const {
+ using namespace skgpu::graphite;
+ SkASSERT(dstColorType == DstColorType::kSurface || dstColorType == DstColorType::kPrimitive);
+
+ const bool primitiveColorBlender = dstColorType == DstColorType::kPrimitive;
+ std::optional<SkBlendMode> bm = as_BB(this)->asBlendMode();
+ if (primitiveColorBlender && bm.has_value()) {
+ PrimitiveBlendModeBlock::BeginBlock(keyContext, builder, gatherer, bm.value());
+ builder->endBlock();
+ } else if (!primitiveColorBlender) {
+ BlendModeBlock::BeginBlock(keyContext, builder, gatherer,
+ bm.value_or(SkBlendMode::kSrcOver));
+ builder->endBlock();
+ }
+}
+#endif
+
+sk_sp<SkFlattenable> SkBlendModeBlender::CreateProc(SkReadBuffer& buffer) {
+ SkBlendMode mode = buffer.read32LE(SkBlendMode::kLastMode);
+ return SkBlender::Mode(mode);
+}
+
+void SkBlendModeBlender::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeInt((int)fMode);
+}
+
+#if defined(SK_GANESH)
+std::unique_ptr<GrFragmentProcessor> SkBlendModeBlender::asFragmentProcessor(
+ std::unique_ptr<GrFragmentProcessor> srcFP,
+ std::unique_ptr<GrFragmentProcessor> dstFP,
+ const GrFPArgs& fpArgs) const {
+ return GrBlendFragmentProcessor::Make(std::move(srcFP), std::move(dstFP), fMode);
+}
+#endif
+
+bool SkBlendModeBlender::onAppendStages(const SkStageRec& rec) const {
+ SkBlendMode_AppendStages(fMode, rec.fPipeline);
+ return true;
+}
+
+skvm::Color SkBlendModeBlender::onProgram(skvm::Builder* p, skvm::Color src, skvm::Color dst,
+ const SkColorInfo& colorInfo, skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const {
+ return p->blend(fMode, src, dst);
+}
diff --git a/gfx/skia/skia/src/core/SkBlendModeBlender.h b/gfx/skia/skia/src/core/SkBlendModeBlender.h
new file mode 100644
index 0000000000..5e0ab291e0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlendModeBlender.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlendModeBlender_DEFINED
+#define SkBlendModeBlender_DEFINED
+
+#include "src/core/SkBlenderBase.h"
+
+class SkBlendModeBlender : public SkBlenderBase {
+public:
+ SkBlendModeBlender(SkBlendMode mode) : fMode(mode) {}
+
+ SK_FLATTENABLE_HOOKS(SkBlendModeBlender)
+
+private:
+ using INHERITED = SkBlenderBase;
+
+ std::optional<SkBlendMode> asBlendMode() const final { return fMode; }
+
+#if defined(SK_GANESH)
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(
+ std::unique_ptr<GrFragmentProcessor> srcFP,
+ std::unique_ptr<GrFragmentProcessor> dstFP,
+ const GrFPArgs& fpArgs) const override;
+#endif
+
+ void flatten(SkWriteBuffer& buffer) const override;
+
+ bool onAppendStages(const SkStageRec& rec) const override;
+
+ skvm::Color onProgram(skvm::Builder* p, skvm::Color src, skvm::Color dst,
+ const SkColorInfo& colorInfo, skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const override;
+
+ SkBlendMode fMode;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBlendModePriv.h b/gfx/skia/skia/src/core/SkBlendModePriv.h
new file mode 100644
index 0000000000..1b1a592e36
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlendModePriv.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlendModePriv_DEFINED
+#define SkBlendModePriv_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkColor.h"
+#include "include/private/SkColorData.h"
+
+class SkRasterPipeline;
+
+/**
+ * Sentinel value for SkBlendMode enum.
+ *
+ * Will never be a valid enum value, but will be storable in a byte.
+ */
+constexpr uint8_t kCustom_SkBlendMode = 0xFF;
+
+bool SkBlendMode_SupportsCoverageAsAlpha(SkBlendMode);
+
+static inline bool SkBlendMode_CaresAboutRBOrder(SkBlendMode mode) {
+ return (mode > SkBlendMode::kLastSeparableMode);
+}
+
+bool SkBlendMode_ShouldPreScaleCoverage(SkBlendMode, bool rgb_coverage);
+void SkBlendMode_AppendStages(SkBlendMode, SkRasterPipeline*);
+
+SkPMColor4f SkBlendMode_Apply(SkBlendMode, const SkPMColor4f& src, const SkPMColor4f& dst);
+
+#if defined(SK_GANESH)
+#include "src/gpu/ganesh/GrXferProcessor.h"
+const GrXPFactory* SkBlendMode_AsXPFactory(SkBlendMode);
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBlenderBase.h b/gfx/skia/skia/src/core/SkBlenderBase.h
new file mode 100644
index 0000000000..5456be9973
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlenderBase.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlenderBase_DEFINED
+#define SkBlenderBase_DEFINED
+
+#include "include/core/SkBlender.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkVM.h"
+
+#include <optional>
+
+struct GrFPArgs;
+class GrFragmentProcessor;
+class SkColorInfo;
+class SkRuntimeEffect;
+struct SkStageRec;
+
+namespace skgpu::graphite {
+enum class DstColorType;
+class KeyContext;
+class PaintParamsKeyBuilder;
+class PipelineDataGatherer;
+}
+
+/**
+ * Encapsulates a blend function, including non-public APIs.
+ * Blends combine a source color (the result of our paint) and destination color (from the canvas)
+ * into a final color.
+ */
+class SkBlenderBase : public SkBlender {
+public:
+ /**
+ * Returns true if this SkBlender represents any SkBlendMode, and returns the blender's
+ * SkBlendMode in `mode`. Returns false for other types of blends.
+ */
+ virtual std::optional<SkBlendMode> asBlendMode() const { return {}; }
+
+ SK_WARN_UNUSED_RESULT bool appendStages(const SkStageRec& rec) const {
+ return this->onAppendStages(rec);
+ }
+
+ SK_WARN_UNUSED_RESULT
+ virtual bool onAppendStages(const SkStageRec& rec) const = 0;
+
+ /** Creates the blend program in SkVM. */
+ SK_WARN_UNUSED_RESULT
+ skvm::Color program(skvm::Builder* p, skvm::Color src, skvm::Color dst,
+ const SkColorInfo& colorInfo, skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const {
+ return this->onProgram(p, src, dst, colorInfo, uniforms, alloc);
+ }
+
+#if defined(SK_GANESH)
+ /**
+ * Returns a GrFragmentProcessor that implements this blend for the GPU backend.
+ * The GrFragmentProcessor expects premultiplied inputs and returns a premultiplied output.
+ */
+ virtual std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(
+ std::unique_ptr<GrFragmentProcessor> srcFP,
+ std::unique_ptr<GrFragmentProcessor> dstFP,
+ const GrFPArgs& fpArgs) const = 0;
+#endif
+
+ virtual SkRuntimeEffect* asRuntimeEffect() const { return nullptr; }
+
+#if defined(SK_GRAPHITE)
+ /**
+ * TODO: Make pure virtual.
+ * dstColorType = kPrimitive when blending the result of the paint evaluation with a primitive
+ * color (which is supplied by certain geometries). dstColorType = kSurface when blending the
+ * result of the paint evaluation with the back buffer.
+ */
+ virtual void addToKey(const skgpu::graphite::KeyContext&,
+ skgpu::graphite::PaintParamsKeyBuilder*,
+ skgpu::graphite::PipelineDataGatherer*,
+ skgpu::graphite::DstColorType dstColorType) const;
+#endif
+
+ static SkFlattenable::Type GetFlattenableType() { return kSkBlender_Type; }
+ Type getFlattenableType() const override { return GetFlattenableType(); }
+
+private:
+ virtual skvm::Color onProgram(skvm::Builder* p, skvm::Color src, skvm::Color dst,
+ const SkColorInfo& colorInfo, skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const = 0;
+
+ using INHERITED = SkFlattenable;
+};
+
+inline SkBlenderBase* as_BB(SkBlender* blend) {
+ return static_cast<SkBlenderBase*>(blend);
+}
+
+inline const SkBlenderBase* as_BB(const SkBlender* blend) {
+ return static_cast<const SkBlenderBase*>(blend);
+}
+
+inline const SkBlenderBase* as_BB(const sk_sp<SkBlender>& blend) {
+ return static_cast<SkBlenderBase*>(blend.get());
+}
+
+#endif // SkBlenderBase_DEFINED
diff --git a/gfx/skia/skia/src/core/SkBlitBWMaskTemplate.h b/gfx/skia/skia/src/core/SkBlitBWMaskTemplate.h
new file mode 100644
index 0000000000..7f0203671f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitBWMaskTemplate.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "include/core/SkBitmap.h"
+#include "src/core/SkMask.h"
+
+#ifndef ClearLow3Bits_DEFINED
+#define ClearLow3Bits_DEFINED
+ #define ClearLow3Bits(x) ((unsigned)(x) >> 3 << 3)
+#endif
+
+/*
+ SK_BLITBWMASK_NAME name of function(const SkBitmap& bitmap, const SkMask& mask, const SkIRect& clip, SK_BLITBWMASK_ARGS)
+ SK_BLITBWMASK_ARGS list of additional arguments to SK_BLITBWMASK_NAME, beginning with a comma
+ SK_BLITBWMASK_BLIT8 name of function(U8CPU byteMask, SK_BLITBWMASK_DEVTYPE* dst, int x, int y)
+ SK_BLITBWMASK_GETADDR either writable_addr[8,16,32]
+ SK_BLITBWMASK_DEVTYPE either U32 or U16 or U8
+*/
+
+static void SK_BLITBWMASK_NAME(const SkPixmap& dstPixmap, const SkMask& srcMask,
+ const SkIRect& clip SK_BLITBWMASK_ARGS) {
+ SkASSERT(clip.fRight <= srcMask.fBounds.fRight);
+
+ int cx = clip.fLeft;
+ int cy = clip.fTop;
+ int maskLeft = srcMask.fBounds.fLeft;
+ unsigned mask_rowBytes = srcMask.fRowBytes;
+ size_t bitmap_rowBytes = dstPixmap.rowBytes();
+ unsigned height = clip.height();
+
+ SkASSERT(mask_rowBytes != 0);
+ SkASSERT(bitmap_rowBytes != 0);
+ SkASSERT(height != 0);
+
+ const uint8_t* bits = srcMask.getAddr1(cx, cy);
+ SK_BLITBWMASK_DEVTYPE* device = dstPixmap.SK_BLITBWMASK_GETADDR(cx, cy);
+
+ if (cx == maskLeft && clip.fRight == srcMask.fBounds.fRight)
+ {
+ do {
+ SK_BLITBWMASK_DEVTYPE* dst = device;
+ unsigned rb = mask_rowBytes;
+ do {
+ U8CPU mask = *bits++;
+ SK_BLITBWMASK_BLIT8(mask, dst);
+ dst += 8;
+ } while (--rb != 0);
+ device = (SK_BLITBWMASK_DEVTYPE*)((char*)device + bitmap_rowBytes);
+ } while (--height != 0);
+ }
+ else
+ {
+ int left_edge = cx - maskLeft;
+ SkASSERT(left_edge >= 0);
+ int rite_edge = clip.fRight - maskLeft;
+ SkASSERT(rite_edge > left_edge);
+
+ int left_mask = 0xFF >> (left_edge & 7);
+ int rite_mask = 0xFF << (8 - (rite_edge & 7));
+ rite_mask &= 0xFF; // only want low-8 bits of mask
+ int full_runs = (rite_edge >> 3) - ((left_edge + 7) >> 3);
+
+ // check for empty right mask, so we don't read off the end (or go slower than we need to)
+ if (rite_mask == 0)
+ {
+ SkASSERT(full_runs >= 0);
+ full_runs -= 1;
+ rite_mask = 0xFF;
+ }
+ if (left_mask == 0xFF)
+ full_runs -= 1;
+
+ // back up manually so we can keep in sync with our byte-aligned src
+ // and not trigger an assert from the getAddr## function
+ device -= left_edge & 7;
+
+ if (full_runs < 0)
+ {
+ left_mask &= rite_mask;
+ SkASSERT(left_mask != 0);
+ do {
+ U8CPU mask = *bits & left_mask;
+ SK_BLITBWMASK_BLIT8(mask, device);
+ bits += mask_rowBytes;
+ device = (SK_BLITBWMASK_DEVTYPE*)((char*)device + bitmap_rowBytes);
+ } while (--height != 0);
+ }
+ else
+ {
+ do {
+ int runs = full_runs;
+ SK_BLITBWMASK_DEVTYPE* dst = device;
+ const uint8_t* b = bits;
+ U8CPU mask;
+
+ mask = *b++ & left_mask;
+ SK_BLITBWMASK_BLIT8(mask, dst);
+ dst += 8;
+
+ while (--runs >= 0)
+ {
+ mask = *b++;
+ SK_BLITBWMASK_BLIT8(mask, dst);
+ dst += 8;
+ }
+
+ mask = *b & rite_mask;
+ SK_BLITBWMASK_BLIT8(mask, dst);
+
+ bits += mask_rowBytes;
+ device = (SK_BLITBWMASK_DEVTYPE*)((char*)device + bitmap_rowBytes);
+ } while (--height != 0);
+ }
+ }
+}
+
+#undef SK_BLITBWMASK_NAME
+#undef SK_BLITBWMASK_ARGS
+#undef SK_BLITBWMASK_BLIT8
+#undef SK_BLITBWMASK_GETADDR
+#undef SK_BLITBWMASK_DEVTYPE
+#undef SK_BLITBWMASK_DOROWSETUP
diff --git a/gfx/skia/skia/src/core/SkBlitRow.h b/gfx/skia/skia/src/core/SkBlitRow.h
new file mode 100644
index 0000000000..cc4ba86407
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitRow.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlitRow_DEFINED
+#define SkBlitRow_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColor.h"
+
+class SkBlitRow {
+public:
+ enum Flags32 {
+ kGlobalAlpha_Flag32 = 1 << 0,
+ kSrcPixelAlpha_Flag32 = 1 << 1
+ };
+
+ /** Function pointer that blends 32bit colors onto a 32bit destination.
+ @param dst array of dst 32bit colors
+ @param src array of src 32bit colors (w/ or w/o alpha)
+ @param count number of colors to blend
+ @param alpha global alpha to be applied to all src colors
+ */
+ typedef void (*Proc32)(uint32_t dst[], const SkPMColor src[], int count, U8CPU alpha);
+
+ static Proc32 Factory32(unsigned flags32);
+
+ /** Blend a single color onto a row of S32 pixels, writing the result
+ into a row of D32 pixels. src and dst may be the same memory, but
+ if they are not, they may not overlap.
+ */
+ static void Color32(SkPMColor dst[], const SkPMColor src[], int count, SkPMColor color);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBlitRow_D32.cpp b/gfx/skia/skia/src/core/SkBlitRow_D32.cpp
new file mode 100644
index 0000000000..6959979c22
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitRow_D32.cpp
@@ -0,0 +1,313 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkColorData.h"
+#include "src/core/SkBlitRow.h"
+#include "src/core/SkOpts.h"
+
+// Everyone agrees memcpy() is the best way to do this.
+static void blit_row_s32_opaque(SkPMColor* dst,
+ const SkPMColor* src,
+ int count,
+ U8CPU alpha) {
+ SkASSERT(255 == alpha);
+ memcpy(dst, src, count * sizeof(SkPMColor));
+}
+
+// We have SSE2, NEON, and portable implementations of
+// blit_row_s32_blend() and blit_row_s32a_blend().
+
+// TODO(mtklein): can we do better in NEON than 2 pixels at a time?
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #include <emmintrin.h>
+
+ static inline __m128i SkPMLerp_SSE2(const __m128i& src,
+ const __m128i& dst,
+ const unsigned src_scale) {
+ // Computes dst + (((src - dst)*src_scale)>>8)
+ const __m128i mask = _mm_set1_epi32(0x00FF00FF);
+
+ // Unpack the 16x8-bit source into 2 8x16-bit splayed halves.
+ __m128i src_rb = _mm_and_si128(mask, src);
+ __m128i src_ag = _mm_srli_epi16(src, 8);
+ __m128i dst_rb = _mm_and_si128(mask, dst);
+ __m128i dst_ag = _mm_srli_epi16(dst, 8);
+
+ // Compute scaled differences.
+ __m128i diff_rb = _mm_sub_epi16(src_rb, dst_rb);
+ __m128i diff_ag = _mm_sub_epi16(src_ag, dst_ag);
+ __m128i s = _mm_set1_epi16(src_scale);
+ diff_rb = _mm_mullo_epi16(diff_rb, s);
+ diff_ag = _mm_mullo_epi16(diff_ag, s);
+
+ // Pack the differences back together.
+ diff_rb = _mm_srli_epi16(diff_rb, 8);
+ diff_ag = _mm_andnot_si128(mask, diff_ag);
+ __m128i diff = _mm_or_si128(diff_rb, diff_ag);
+
+ // Add difference to destination.
+ return _mm_add_epi8(dst, diff);
+ }
+
+
+ static void blit_row_s32_blend(SkPMColor* dst, const SkPMColor* src, int count, U8CPU alpha) {
+ SkASSERT(alpha <= 255);
+
+ auto src4 = (const __m128i*)src;
+ auto dst4 = ( __m128i*)dst;
+
+ while (count >= 4) {
+ _mm_storeu_si128(dst4, SkPMLerp_SSE2(_mm_loadu_si128(src4),
+ _mm_loadu_si128(dst4),
+ SkAlpha255To256(alpha)));
+ src4++;
+ dst4++;
+ count -= 4;
+ }
+
+ src = (const SkPMColor*)src4;
+ dst = ( SkPMColor*)dst4;
+
+ while (count --> 0) {
+ *dst = SkPMLerp(*src, *dst, SkAlpha255To256(alpha));
+ src++;
+ dst++;
+ }
+ }
+
+ static inline __m128i SkBlendARGB32_SSE2(const __m128i& src,
+ const __m128i& dst,
+ const unsigned aa) {
+ unsigned alpha = SkAlpha255To256(aa);
+ __m128i src_scale = _mm_set1_epi16(alpha);
+ // SkAlphaMulInv256(SkGetPackedA32(src), src_scale)
+ __m128i dst_scale = _mm_srli_epi32(src, 24);
+ // High words in dst_scale are 0, so it's safe to multiply with 16-bit src_scale.
+ dst_scale = _mm_mullo_epi16(dst_scale, src_scale);
+ dst_scale = _mm_sub_epi32(_mm_set1_epi32(0xFFFF), dst_scale);
+ dst_scale = _mm_add_epi32(dst_scale, _mm_srli_epi32(dst_scale, 8));
+ dst_scale = _mm_srli_epi32(dst_scale, 8);
+ // Duplicate scales into 2x16-bit pattern per pixel.
+ dst_scale = _mm_shufflelo_epi16(dst_scale, _MM_SHUFFLE(2, 2, 0, 0));
+ dst_scale = _mm_shufflehi_epi16(dst_scale, _MM_SHUFFLE(2, 2, 0, 0));
+
+ const __m128i mask = _mm_set1_epi32(0x00FF00FF);
+
+ // Unpack the 16x8-bit source/destination into 2 8x16-bit splayed halves.
+ __m128i src_rb = _mm_and_si128(mask, src);
+ __m128i src_ag = _mm_srli_epi16(src, 8);
+ __m128i dst_rb = _mm_and_si128(mask, dst);
+ __m128i dst_ag = _mm_srli_epi16(dst, 8);
+
+ // Scale them.
+ src_rb = _mm_mullo_epi16(src_rb, src_scale);
+ src_ag = _mm_mullo_epi16(src_ag, src_scale);
+ dst_rb = _mm_mullo_epi16(dst_rb, dst_scale);
+ dst_ag = _mm_mullo_epi16(dst_ag, dst_scale);
+
+ // Add the scaled source and destination.
+ dst_rb = _mm_add_epi16(src_rb, dst_rb);
+ dst_ag = _mm_add_epi16(src_ag, dst_ag);
+
+ // Unsplay the halves back together.
+ dst_rb = _mm_srli_epi16(dst_rb, 8);
+ dst_ag = _mm_andnot_si128(mask, dst_ag);
+ return _mm_or_si128(dst_rb, dst_ag);
+ }
+
+ static void blit_row_s32a_blend(SkPMColor* dst, const SkPMColor* src, int count, U8CPU alpha) {
+ SkASSERT(alpha <= 255);
+
+ auto src4 = (const __m128i*)src;
+ auto dst4 = ( __m128i*)dst;
+
+ while (count >= 4) {
+ _mm_storeu_si128(dst4, SkBlendARGB32_SSE2(_mm_loadu_si128(src4),
+ _mm_loadu_si128(dst4),
+ alpha));
+ src4++;
+ dst4++;
+ count -= 4;
+ }
+
+ src = (const SkPMColor*)src4;
+ dst = ( SkPMColor*)dst4;
+
+ while (count --> 0) {
+ *dst = SkBlendARGB32(*src, *dst, alpha);
+ src++;
+ dst++;
+ }
+ }
+
+#elif defined(SK_ARM_HAS_NEON)
+ #include <arm_neon.h>
+
+ static void blit_row_s32_blend(SkPMColor* dst, const SkPMColor* src, int count, U8CPU alpha) {
+ SkASSERT(alpha <= 255);
+
+ uint16_t src_scale = SkAlpha255To256(alpha);
+ uint16_t dst_scale = 256 - src_scale;
+
+ while (count >= 2) {
+ uint8x8_t vsrc, vdst, vres;
+ uint16x8_t vsrc_wide, vdst_wide;
+
+ vsrc = vreinterpret_u8_u32(vld1_u32(src));
+ vdst = vreinterpret_u8_u32(vld1_u32(dst));
+
+ vsrc_wide = vmovl_u8(vsrc);
+ vsrc_wide = vmulq_u16(vsrc_wide, vdupq_n_u16(src_scale));
+
+ vdst_wide = vmull_u8(vdst, vdup_n_u8(dst_scale));
+
+ vdst_wide += vsrc_wide;
+ vres = vshrn_n_u16(vdst_wide, 8);
+
+ vst1_u32(dst, vreinterpret_u32_u8(vres));
+
+ src += 2;
+ dst += 2;
+ count -= 2;
+ }
+
+ if (count == 1) {
+ uint8x8_t vsrc = vdup_n_u8(0), vdst = vdup_n_u8(0), vres;
+ uint16x8_t vsrc_wide, vdst_wide;
+
+ vsrc = vreinterpret_u8_u32(vld1_lane_u32(src, vreinterpret_u32_u8(vsrc), 0));
+ vdst = vreinterpret_u8_u32(vld1_lane_u32(dst, vreinterpret_u32_u8(vdst), 0));
+
+ vsrc_wide = vmovl_u8(vsrc);
+ vsrc_wide = vmulq_u16(vsrc_wide, vdupq_n_u16(src_scale));
+ vdst_wide = vmull_u8(vdst, vdup_n_u8(dst_scale));
+ vdst_wide += vsrc_wide;
+ vres = vshrn_n_u16(vdst_wide, 8);
+
+ vst1_lane_u32(dst, vreinterpret_u32_u8(vres), 0);
+ }
+ }
+
+ static void blit_row_s32a_blend(SkPMColor* dst, const SkPMColor* src, int count, U8CPU alpha) {
+ SkASSERT(alpha < 255);
+
+ unsigned alpha256 = SkAlpha255To256(alpha);
+
+ if (count & 1) {
+ uint8x8_t vsrc = vdup_n_u8(0), vdst = vdup_n_u8(0), vres;
+ uint16x8_t vdst_wide, vsrc_wide;
+ unsigned dst_scale;
+
+ vsrc = vreinterpret_u8_u32(vld1_lane_u32(src, vreinterpret_u32_u8(vsrc), 0));
+ vdst = vreinterpret_u8_u32(vld1_lane_u32(dst, vreinterpret_u32_u8(vdst), 0));
+
+ dst_scale = vget_lane_u8(vsrc, 3);
+ dst_scale = SkAlphaMulInv256(dst_scale, alpha256);
+
+ vsrc_wide = vmovl_u8(vsrc);
+ vsrc_wide = vmulq_n_u16(vsrc_wide, alpha256);
+
+ vdst_wide = vmovl_u8(vdst);
+ vdst_wide = vmulq_n_u16(vdst_wide, dst_scale);
+
+ vdst_wide += vsrc_wide;
+ vres = vshrn_n_u16(vdst_wide, 8);
+
+ vst1_lane_u32(dst, vreinterpret_u32_u8(vres), 0);
+ dst++;
+ src++;
+ count--;
+ }
+
+ uint8x8_t alpha_mask;
+ static const uint8_t alpha_mask_setup[] = {3,3,3,3,7,7,7,7};
+ alpha_mask = vld1_u8(alpha_mask_setup);
+
+ while (count) {
+
+ uint8x8_t vsrc, vdst, vres, vsrc_alphas;
+ uint16x8_t vdst_wide, vsrc_wide, vsrc_scale, vdst_scale;
+
+ __builtin_prefetch(src+32);
+ __builtin_prefetch(dst+32);
+
+ vsrc = vreinterpret_u8_u32(vld1_u32(src));
+ vdst = vreinterpret_u8_u32(vld1_u32(dst));
+
+ vsrc_scale = vdupq_n_u16(alpha256);
+
+ vsrc_alphas = vtbl1_u8(vsrc, alpha_mask);
+ vdst_scale = vmovl_u8(vsrc_alphas);
+ // Calculate SkAlphaMulInv256(vdst_scale, vsrc_scale).
+ // A 16-bit lane would overflow if we used 0xFFFF here,
+ // so use an approximation with 0xFF00 that is off by 1,
+ // and add back 1 after to get the correct value.
+ // This is valid if alpha256 <= 255.
+ vdst_scale = vmlsq_u16(vdupq_n_u16(0xFF00), vdst_scale, vsrc_scale);
+ vdst_scale = vsraq_n_u16(vdst_scale, vdst_scale, 8);
+ vdst_scale = vsraq_n_u16(vdupq_n_u16(1), vdst_scale, 8);
+
+ vsrc_wide = vmovl_u8(vsrc);
+ vsrc_wide *= vsrc_scale;
+
+ vdst_wide = vmovl_u8(vdst);
+ vdst_wide *= vdst_scale;
+
+ vdst_wide += vsrc_wide;
+ vres = vshrn_n_u16(vdst_wide, 8);
+
+ vst1_u32(dst, vreinterpret_u32_u8(vres));
+
+ src += 2;
+ dst += 2;
+ count -= 2;
+ }
+ }
+
+#else
+ static void blit_row_s32_blend(SkPMColor* dst, const SkPMColor* src, int count, U8CPU alpha) {
+ SkASSERT(alpha <= 255);
+ while (count --> 0) {
+ *dst = SkPMLerp(*src, *dst, SkAlpha255To256(alpha));
+ src++;
+ dst++;
+ }
+ }
+
+ static void blit_row_s32a_blend(SkPMColor* dst, const SkPMColor* src, int count, U8CPU alpha) {
+ SkASSERT(alpha <= 255);
+ while (count --> 0) {
+ *dst = SkBlendARGB32(*src, *dst, alpha);
+ src++;
+ dst++;
+ }
+ }
+#endif
+
+SkBlitRow::Proc32 SkBlitRow::Factory32(unsigned flags) {
+ static const SkBlitRow::Proc32 kProcs[] = {
+ blit_row_s32_opaque,
+ blit_row_s32_blend,
+ nullptr, // blit_row_s32a_opaque is in SkOpts
+ blit_row_s32a_blend
+ };
+
+ SkASSERT(flags < std::size(kProcs));
+ flags &= std::size(kProcs) - 1; // just to be safe
+
+ return flags == 2 ? SkOpts::blit_row_s32a_opaque
+ : kProcs[flags];
+}
+
+void SkBlitRow::Color32(SkPMColor dst[], const SkPMColor src[], int count, SkPMColor color) {
+ switch (SkGetPackedA32(color)) {
+ case 0: memmove(dst, src, count * sizeof(SkPMColor)); return;
+ case 255: SkOpts::memset32(dst, color, count); return;
+ }
+ return SkOpts::blit_row_color32(dst, src, count, color);
+}
diff --git a/gfx/skia/skia/src/core/SkBlitter.cpp b/gfx/skia/skia/src/core/SkBlitter.cpp
new file mode 100644
index 0000000000..e09886fcc9
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitter.cpp
@@ -0,0 +1,898 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkBlitter.h"
+
+#include "include/core/SkColor.h"
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkString.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/base/SkTLazy.h"
+#include "src/core/SkAntiRun.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkMatrixProvider.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkRegionPriv.h"
+#include "src/core/SkVMBlitter.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/core/SkXfermodeInterpretation.h"
+#include "src/shaders/SkShaderBase.h"
+
+using namespace skia_private;
+
+// Hacks for testing.
+bool gUseSkVMBlitter{false};
+bool gSkForceRasterPipelineBlitter{false};
+
+SkBlitter::~SkBlitter() {}
+
+bool SkBlitter::isNullBlitter() const { return false; }
+
+const SkPixmap* SkBlitter::justAnOpaqueColor(uint32_t* value) {
+ return nullptr;
+}
+
+/*
+void SkBlitter::blitH(int x, int y, int width) {
+ SkDEBUGFAIL("unimplemented");
+}
+
+
+void SkBlitter::blitAntiH(int x, int y, const SkAlpha antialias[],
+ const int16_t runs[]) {
+ SkDEBUGFAIL("unimplemented");
+}
+ */
+
+inline static SkAlpha ScalarToAlpha(SkScalar a) {
+ SkAlpha alpha = (SkAlpha)(a * 255);
+ return alpha > 247 ? 0xFF : alpha < 8 ? 0 : alpha;
+}
+
+void SkBlitter::blitFatAntiRect(const SkRect& rect) {
+ SkIRect bounds = rect.roundOut();
+ SkASSERT(bounds.width() >= 3);
+
+ // skbug.com/7813
+ // To ensure consistency of the threaded backend (a rect that's considered fat in the init-once
+ // phase must also be considered fat in the draw phase), we have to deal with rects with small
+ // heights because the horizontal tiling in the threaded backend may change the height.
+ //
+ // This also implies that we cannot do vertical tiling unless we can blit any rect (not just the
+ // fat one.)
+ if (bounds.height() == 0) {
+ return;
+ }
+
+ int runSize = bounds.width() + 1; // +1 so we can set runs[bounds.width()] = 0
+ void* storage = this->allocBlitMemory(runSize * (sizeof(int16_t) + sizeof(SkAlpha)));
+ int16_t* runs = reinterpret_cast<int16_t*>(storage);
+ SkAlpha* alphas = reinterpret_cast<SkAlpha*>(runs + runSize);
+
+ runs[0] = 1;
+ runs[1] = bounds.width() - 2;
+ runs[bounds.width() - 1] = 1;
+ runs[bounds.width()] = 0;
+
+ SkScalar partialL = bounds.fLeft + 1 - rect.fLeft;
+ SkScalar partialR = rect.fRight - (bounds.fRight - 1);
+ SkScalar partialT = bounds.fTop + 1 - rect.fTop;
+ SkScalar partialB = rect.fBottom - (bounds.fBottom - 1);
+
+ if (bounds.height() == 1) {
+ partialT = rect.fBottom - rect.fTop;
+ }
+
+ alphas[0] = ScalarToAlpha(partialL * partialT);
+ alphas[1] = ScalarToAlpha(partialT);
+ alphas[bounds.width() - 1] = ScalarToAlpha(partialR * partialT);
+ this->blitAntiH(bounds.fLeft, bounds.fTop, alphas, runs);
+
+ if (bounds.height() > 2) {
+ this->blitAntiRect(bounds.fLeft, bounds.fTop + 1, bounds.width() - 2, bounds.height() - 2,
+ ScalarToAlpha(partialL), ScalarToAlpha(partialR));
+ }
+
+ if (bounds.height() > 1) {
+ alphas[0] = ScalarToAlpha(partialL * partialB);
+ alphas[1] = ScalarToAlpha(partialB);
+ alphas[bounds.width() - 1] = ScalarToAlpha(partialR * partialB);
+ this->blitAntiH(bounds.fLeft, bounds.fBottom - 1, alphas, runs);
+ }
+}
+
+void SkBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ if (alpha == 255) {
+ this->blitRect(x, y, 1, height);
+ } else {
+ int16_t runs[2];
+ runs[0] = 1;
+ runs[1] = 0;
+
+ while (--height >= 0) {
+ this->blitAntiH(x, y++, &alpha, runs);
+ }
+ }
+}
+
+void SkBlitter::blitRect(int x, int y, int width, int height) {
+ SkASSERT(width > 0);
+ while (--height >= 0) {
+ this->blitH(x, y++, width);
+ }
+}
+
+/// Default implementation doesn't check for easy optimizations
+/// such as alpha == 255; also uses blitV(), which some subclasses
+/// may not support.
+void SkBlitter::blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) {
+ if (leftAlpha > 0) { // we may send in x = -1 with leftAlpha = 0
+ this->blitV(x, y, height, leftAlpha);
+ }
+ x++;
+ if (width > 0) {
+ this->blitRect(x, y, width, height);
+ x += width;
+ }
+ if (rightAlpha > 0) {
+ this->blitV(x, y, height, rightAlpha);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+static inline void bits_to_runs(SkBlitter* blitter, int x, int y,
+ const uint8_t bits[],
+ uint8_t left_mask, ptrdiff_t rowBytes,
+ uint8_t right_mask) {
+ int inFill = 0;
+ int pos = 0;
+
+ while (--rowBytes >= 0) {
+ uint8_t b = *bits++ & left_mask;
+ if (rowBytes == 0) {
+ b &= right_mask;
+ }
+
+ for (uint8_t test = 0x80U; test != 0; test >>= 1) {
+ if (b & test) {
+ if (!inFill) {
+ pos = x;
+ inFill = true;
+ }
+ } else {
+ if (inFill) {
+ blitter->blitH(pos, y, x - pos);
+ inFill = false;
+ }
+ }
+ x += 1;
+ }
+ left_mask = 0xFFU;
+ }
+
+ // final cleanup
+ if (inFill) {
+ blitter->blitH(pos, y, x - pos);
+ }
+}
+
+// maskBitCount is the number of 1's to place in the mask. It must be in the range between 1 and 8.
+static uint8_t generate_right_mask(int maskBitCount) {
+ return static_cast<uint8_t>((0xFF00U >> maskBitCount) & 0xFF);
+}
+
+void SkBlitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ SkASSERT(mask.fBounds.contains(clip));
+
+ if (mask.fFormat == SkMask::kLCD16_Format) {
+ return; // needs to be handled by subclass
+ }
+
+ if (mask.fFormat == SkMask::kBW_Format) {
+ int cx = clip.fLeft;
+ int cy = clip.fTop;
+ int maskLeft = mask.fBounds.fLeft;
+ int maskRowBytes = mask.fRowBytes;
+ int height = clip.height();
+
+ const uint8_t* bits = mask.getAddr1(cx, cy);
+
+ SkDEBUGCODE(const uint8_t* endOfImage =
+ mask.fImage + (mask.fBounds.height() - 1) * maskRowBytes
+ + ((mask.fBounds.width() + 7) >> 3));
+
+ if (cx == maskLeft && clip.fRight == mask.fBounds.fRight) {
+ while (--height >= 0) {
+ int affectedRightBit = mask.fBounds.width() - 1;
+ ptrdiff_t rowBytes = (affectedRightBit >> 3) + 1;
+ SkASSERT(bits + rowBytes <= endOfImage);
+ U8CPU rightMask = generate_right_mask((affectedRightBit & 7) + 1);
+ bits_to_runs(this, cx, cy, bits, 0xFF, rowBytes, rightMask);
+ bits += maskRowBytes;
+ cy += 1;
+ }
+ } else {
+ // Bits is calculated as the offset into the mask at the point {cx, cy} therefore, all
+ // addressing into the bit mask is relative to that point. Since this is an address
+ // calculated from a arbitrary bit in that byte, calculate the left most bit.
+ int bitsLeft = cx - ((cx - maskLeft) & 7);
+
+ // Everything is relative to the bitsLeft.
+ int leftEdge = cx - bitsLeft;
+ SkASSERT(leftEdge >= 0);
+ int rightEdge = clip.fRight - bitsLeft;
+ SkASSERT(rightEdge > leftEdge);
+
+ // Calculate left byte and mask
+ const uint8_t* leftByte = bits;
+ U8CPU leftMask = 0xFFU >> (leftEdge & 7);
+
+ // Calculate right byte and mask
+ int affectedRightBit = rightEdge - 1;
+ const uint8_t* rightByte = bits + (affectedRightBit >> 3);
+ U8CPU rightMask = generate_right_mask((affectedRightBit & 7) + 1);
+
+ // leftByte and rightByte are byte locations therefore, to get a count of bytes the
+ // code must add one.
+ ptrdiff_t rowBytes = rightByte - leftByte + 1;
+
+ while (--height >= 0) {
+ SkASSERT(bits + rowBytes <= endOfImage);
+ bits_to_runs(this, bitsLeft, cy, bits, leftMask, rowBytes, rightMask);
+ bits += maskRowBytes;
+ cy += 1;
+ }
+ }
+ } else {
+ int width = clip.width();
+ AutoSTMalloc<64, int16_t> runStorage(width + 1);
+ int16_t* runs = runStorage.get();
+ const uint8_t* aa = mask.getAddr8(clip.fLeft, clip.fTop);
+
+ SkOpts::memset16((uint16_t*)runs, 1, width);
+ runs[width] = 0;
+
+ int height = clip.height();
+ int y = clip.fTop;
+ while (--height >= 0) {
+ this->blitAntiH(clip.fLeft, y, aa, runs);
+ aa += mask.fRowBytes;
+ y += 1;
+ }
+ }
+}
+
+/////////////////////// these are not virtual, just helpers
+
+#if defined(SK_SUPPORT_LEGACY_ALPHA_BITMAP_AS_COVERAGE)
+void SkBlitter::blitMaskRegion(const SkMask& mask, const SkRegion& clip) {
+ if (clip.quickReject(mask.fBounds)) {
+ return;
+ }
+
+ SkRegion::Cliperator clipper(clip, mask.fBounds);
+
+ while (!clipper.done()) {
+ const SkIRect& cr = clipper.rect();
+ this->blitMask(mask, cr);
+ clipper.next();
+ }
+}
+#endif
+
+void SkBlitter::blitRectRegion(const SkIRect& rect, const SkRegion& clip) {
+ SkRegion::Cliperator clipper(clip, rect);
+
+ while (!clipper.done()) {
+ const SkIRect& cr = clipper.rect();
+ this->blitRect(cr.fLeft, cr.fTop, cr.width(), cr.height());
+ clipper.next();
+ }
+}
+
+void SkBlitter::blitRegion(const SkRegion& clip) {
+ SkRegionPriv::VisitSpans(clip, [this](const SkIRect& r) {
+ this->blitRect(r.left(), r.top(), r.width(), r.height());
+ });
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkNullBlitter::blitH(int x, int y, int width) {}
+
+void SkNullBlitter::blitAntiH(int x, int y, const SkAlpha antialias[],
+ const int16_t runs[]) {}
+
+void SkNullBlitter::blitV(int x, int y, int height, SkAlpha alpha) {}
+
+void SkNullBlitter::blitRect(int x, int y, int width, int height) {}
+
+void SkNullBlitter::blitMask(const SkMask& mask, const SkIRect& clip) {}
+
+const SkPixmap* SkNullBlitter::justAnOpaqueColor(uint32_t* value) {
+ return nullptr;
+}
+
+bool SkNullBlitter::isNullBlitter() const { return true; }
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int compute_anti_width(const int16_t runs[]) {
+ int width = 0;
+
+ for (;;) {
+ int count = runs[0];
+
+ SkASSERT(count >= 0);
+ if (count == 0) {
+ break;
+ }
+ width += count;
+ runs += count;
+ }
+ return width;
+}
+
+static inline bool y_in_rect(int y, const SkIRect& rect) {
+ return (unsigned)(y - rect.fTop) < (unsigned)rect.height();
+}
+
+static inline bool x_in_rect(int x, const SkIRect& rect) {
+ return (unsigned)(x - rect.fLeft) < (unsigned)rect.width();
+}
+
+void SkRectClipBlitter::blitH(int left, int y, int width) {
+ SkASSERT(width > 0);
+
+ if (!y_in_rect(y, fClipRect)) {
+ return;
+ }
+
+ int right = left + width;
+
+ if (left < fClipRect.fLeft) {
+ left = fClipRect.fLeft;
+ }
+ if (right > fClipRect.fRight) {
+ right = fClipRect.fRight;
+ }
+
+ width = right - left;
+ if (width > 0) {
+ fBlitter->blitH(left, y, width);
+ }
+}
+
+void SkRectClipBlitter::blitAntiH(int left, int y, const SkAlpha aa[],
+ const int16_t runs[]) {
+ if (!y_in_rect(y, fClipRect) || left >= fClipRect.fRight) {
+ return;
+ }
+
+ int x0 = left;
+ int x1 = left + compute_anti_width(runs);
+
+ if (x1 <= fClipRect.fLeft) {
+ return;
+ }
+
+ SkASSERT(x0 < x1);
+ if (x0 < fClipRect.fLeft) {
+ int dx = fClipRect.fLeft - x0;
+ SkAlphaRuns::BreakAt((int16_t*)runs, (uint8_t*)aa, dx);
+ runs += dx;
+ aa += dx;
+ x0 = fClipRect.fLeft;
+ }
+
+ SkASSERT(x0 < x1 && runs[x1 - x0] == 0);
+ if (x1 > fClipRect.fRight) {
+ x1 = fClipRect.fRight;
+ SkAlphaRuns::BreakAt((int16_t*)runs, (uint8_t*)aa, x1 - x0);
+ ((int16_t*)runs)[x1 - x0] = 0;
+ }
+
+ SkASSERT(x0 < x1 && runs[x1 - x0] == 0);
+ SkASSERT(compute_anti_width(runs) == x1 - x0);
+
+ fBlitter->blitAntiH(x0, y, aa, runs);
+}
+
+void SkRectClipBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ SkASSERT(height > 0);
+
+ if (!x_in_rect(x, fClipRect)) {
+ return;
+ }
+
+ int y0 = y;
+ int y1 = y + height;
+
+ if (y0 < fClipRect.fTop) {
+ y0 = fClipRect.fTop;
+ }
+ if (y1 > fClipRect.fBottom) {
+ y1 = fClipRect.fBottom;
+ }
+
+ if (y0 < y1) {
+ fBlitter->blitV(x, y0, y1 - y0, alpha);
+ }
+}
+
+void SkRectClipBlitter::blitRect(int left, int y, int width, int height) {
+ SkIRect r;
+
+ r.setLTRB(left, y, left + width, y + height);
+ if (r.intersect(fClipRect)) {
+ fBlitter->blitRect(r.fLeft, r.fTop, r.width(), r.height());
+ }
+}
+
+void SkRectClipBlitter::blitAntiRect(int left, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) {
+ SkIRect r;
+
+ // The *true* width of the rectangle blitted is width+2:
+ r.setLTRB(left, y, left + width + 2, y + height);
+ if (r.intersect(fClipRect)) {
+ if (r.fLeft != left) {
+ SkASSERT(r.fLeft > left);
+ leftAlpha = 255;
+ }
+ if (r.fRight != left + width + 2) {
+ SkASSERT(r.fRight < left + width + 2);
+ rightAlpha = 255;
+ }
+ if (255 == leftAlpha && 255 == rightAlpha) {
+ fBlitter->blitRect(r.fLeft, r.fTop, r.width(), r.height());
+ } else if (1 == r.width()) {
+ if (r.fLeft == left) {
+ fBlitter->blitV(r.fLeft, r.fTop, r.height(), leftAlpha);
+ } else {
+ SkASSERT(r.fLeft == left + width + 1);
+ fBlitter->blitV(r.fLeft, r.fTop, r.height(), rightAlpha);
+ }
+ } else {
+ fBlitter->blitAntiRect(r.fLeft, r.fTop, r.width() - 2, r.height(),
+ leftAlpha, rightAlpha);
+ }
+ }
+}
+
+void SkRectClipBlitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ SkASSERT(mask.fBounds.contains(clip));
+
+ SkIRect r = clip;
+
+ if (r.intersect(fClipRect)) {
+ fBlitter->blitMask(mask, r);
+ }
+}
+
+const SkPixmap* SkRectClipBlitter::justAnOpaqueColor(uint32_t* value) {
+ return fBlitter->justAnOpaqueColor(value);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkRgnClipBlitter::blitH(int x, int y, int width) {
+ SkRegion::Spanerator span(*fRgn, y, x, x + width);
+ int left, right;
+
+ while (span.next(&left, &right)) {
+ SkASSERT(left < right);
+ fBlitter->blitH(left, y, right - left);
+ }
+}
+
+void SkRgnClipBlitter::blitAntiH(int x, int y, const SkAlpha aa[],
+ const int16_t runs[]) {
+ int width = compute_anti_width(runs);
+ SkRegion::Spanerator span(*fRgn, y, x, x + width);
+ int left, right;
+ SkDEBUGCODE(const SkIRect& bounds = fRgn->getBounds();)
+
+ int prevRite = x;
+ while (span.next(&left, &right)) {
+ SkASSERT(x <= left);
+ SkASSERT(left < right);
+ SkASSERT(left >= bounds.fLeft && right <= bounds.fRight);
+
+ SkAlphaRuns::Break((int16_t*)runs, (uint8_t*)aa, left - x, right - left);
+
+ // now zero before left
+ if (left > prevRite) {
+ int index = prevRite - x;
+ ((uint8_t*)aa)[index] = 0; // skip runs after right
+ ((int16_t*)runs)[index] = SkToS16(left - prevRite);
+ }
+
+ prevRite = right;
+ }
+
+ if (prevRite > x) {
+ ((int16_t*)runs)[prevRite - x] = 0;
+
+ if (x < 0) {
+ int skip = runs[0];
+ SkASSERT(skip >= -x);
+ aa += skip;
+ runs += skip;
+ x += skip;
+ }
+ fBlitter->blitAntiH(x, y, aa, runs);
+ }
+}
+
+void SkRgnClipBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ SkIRect bounds;
+ bounds.setXYWH(x, y, 1, height);
+
+ SkRegion::Cliperator iter(*fRgn, bounds);
+
+ while (!iter.done()) {
+ const SkIRect& r = iter.rect();
+ SkASSERT(bounds.contains(r));
+
+ fBlitter->blitV(x, r.fTop, r.height(), alpha);
+ iter.next();
+ }
+}
+
+void SkRgnClipBlitter::blitRect(int x, int y, int width, int height) {
+ SkIRect bounds;
+ bounds.setXYWH(x, y, width, height);
+
+ SkRegion::Cliperator iter(*fRgn, bounds);
+
+ while (!iter.done()) {
+ const SkIRect& r = iter.rect();
+ SkASSERT(bounds.contains(r));
+
+ fBlitter->blitRect(r.fLeft, r.fTop, r.width(), r.height());
+ iter.next();
+ }
+}
+
+void SkRgnClipBlitter::blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) {
+ // The *true* width of the rectangle to blit is width + 2
+ SkIRect bounds;
+ bounds.setXYWH(x, y, width + 2, height);
+
+ SkRegion::Cliperator iter(*fRgn, bounds);
+
+ while (!iter.done()) {
+ const SkIRect& r = iter.rect();
+ SkASSERT(bounds.contains(r));
+ SkASSERT(r.fLeft >= x);
+ SkASSERT(r.fRight <= x + width + 2);
+
+ SkAlpha effectiveLeftAlpha = (r.fLeft == x) ? leftAlpha : 255;
+ SkAlpha effectiveRightAlpha = (r.fRight == x + width + 2) ?
+ rightAlpha : 255;
+
+ if (255 == effectiveLeftAlpha && 255 == effectiveRightAlpha) {
+ fBlitter->blitRect(r.fLeft, r.fTop, r.width(), r.height());
+ } else if (1 == r.width()) {
+ if (r.fLeft == x) {
+ fBlitter->blitV(r.fLeft, r.fTop, r.height(),
+ effectiveLeftAlpha);
+ } else {
+ SkASSERT(r.fLeft == x + width + 1);
+ fBlitter->blitV(r.fLeft, r.fTop, r.height(),
+ effectiveRightAlpha);
+ }
+ } else {
+ fBlitter->blitAntiRect(r.fLeft, r.fTop, r.width() - 2, r.height(),
+ effectiveLeftAlpha, effectiveRightAlpha);
+ }
+ iter.next();
+ }
+}
+
+
+void SkRgnClipBlitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ SkASSERT(mask.fBounds.contains(clip));
+
+ SkRegion::Cliperator iter(*fRgn, clip);
+ const SkIRect& r = iter.rect();
+ SkBlitter* blitter = fBlitter;
+
+ while (!iter.done()) {
+ blitter->blitMask(mask, r);
+ iter.next();
+ }
+}
+
+const SkPixmap* SkRgnClipBlitter::justAnOpaqueColor(uint32_t* value) {
+ return fBlitter->justAnOpaqueColor(value);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkBlitter* SkBlitterClipper::apply(SkBlitter* blitter, const SkRegion* clip,
+ const SkIRect* ir) {
+ if (clip) {
+ const SkIRect& clipR = clip->getBounds();
+
+ if (clip->isEmpty() || (ir && !SkIRect::Intersects(clipR, *ir))) {
+ blitter = &fNullBlitter;
+ } else if (clip->isRect()) {
+ if (ir == nullptr || !clipR.contains(*ir)) {
+ fRectBlitter.init(blitter, clipR);
+ blitter = &fRectBlitter;
+ }
+ } else {
+ fRgnBlitter.init(blitter, clip);
+ blitter = &fRgnBlitter;
+ }
+ }
+ return blitter;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "src/core/SkCoreBlitters.h"
+
+bool SkBlitter::UseLegacyBlitter(const SkPixmap& device,
+ const SkPaint& paint,
+ const SkMatrix& matrix) {
+ if (gSkForceRasterPipelineBlitter || gUseSkVMBlitter) {
+ return false;
+ }
+#if defined(SK_FORCE_RASTER_PIPELINE_BLITTER)
+ return false;
+#else
+
+ if (paint.isDither()) {
+ return false;
+ }
+
+ const SkMaskFilterBase* mf = as_MFB(paint.getMaskFilter());
+ const auto mode = paint.asBlendMode();
+
+ // The legacy blitters cannot handle any of these complex features (anymore).
+ if (device.alphaType() == kUnpremul_SkAlphaType ||
+ !mode ||
+ mode.value() > SkBlendMode::kLastCoeffMode ||
+ (mf && mf->getFormat() == SkMask::k3D_Format)) {
+ return false;
+ }
+
+ // All the real legacy fast paths are for shaders and SrcOver.
+ // Choosing SkRasterPipelineBlitter will also let us to hit its single-color memset path.
+ if (!paint.getShader() && mode != SkBlendMode::kSrcOver) {
+ return false;
+ }
+
+ auto cs = device.colorSpace();
+ // We check (indirectly via makeContext()) later on if the shader can handle the colorspace
+ // in legacy mode, so here we just focus on if a single color needs raster-pipeline.
+ if (cs && !paint.getShader()) {
+ if (!paint.getColor4f().fitsInBytes() || !cs->isSRGB()) {
+ return false;
+ }
+ }
+
+ // Only kN32 is handled by legacy blitters now
+ return device.colorType() == kN32_SkColorType;
+#endif
+}
+
+SkBlitter* SkBlitter::Choose(const SkPixmap& device,
+ const SkMatrix& ctm,
+ const SkPaint& origPaint,
+ SkArenaAlloc* alloc,
+ bool drawCoverage,
+ sk_sp<SkShader> clipShader,
+ const SkSurfaceProps& props) {
+ SkASSERT(alloc);
+
+ if (kUnknown_SkColorType == device.colorType()) {
+ return alloc->make<SkNullBlitter>();
+ }
+
+ // We may tweak the original paint as we go.
+ SkTCopyOnFirstWrite<SkPaint> paint(origPaint);
+
+ if (auto mode = paint->asBlendMode()) {
+ // We have the most fast-paths for SrcOver, so see if we can act like SrcOver.
+ if (mode.value() != SkBlendMode::kSrcOver) {
+ switch (SkInterpretXfermode(*paint, SkColorTypeIsAlwaysOpaque(device.colorType()))) {
+ case kSrcOver_SkXfermodeInterpretation:
+ paint.writable()->setBlendMode(SkBlendMode::kSrcOver);
+ break;
+ case kSkipDrawing_SkXfermodeInterpretation:
+ return alloc->make<SkNullBlitter>();
+ default:
+ break;
+ }
+ }
+
+ // A Clear blend mode will ignore the entire color pipeline, as if Src mode with 0x00000000.
+ if (mode.value() == SkBlendMode::kClear) {
+ SkPaint* p = paint.writable();
+ p->setShader(nullptr);
+ p->setColorFilter(nullptr);
+ p->setBlendMode(SkBlendMode::kSrc);
+ p->setColor(0x00000000);
+ }
+ }
+
+ if (paint->getColorFilter()) {
+ SkPaintPriv::RemoveColorFilter(paint.writable(), device.colorSpace());
+ }
+ SkASSERT(!paint->getColorFilter());
+
+ if (drawCoverage) {
+ if (device.colorType() == kAlpha_8_SkColorType) {
+ SkASSERT(!paint->getShader());
+ SkASSERT(paint->isSrcOver());
+ return alloc->make<SkA8_Coverage_Blitter>(device, *paint);
+ }
+ return alloc->make<SkNullBlitter>();
+ }
+
+ if (paint->isDither() && !SkPaintPriv::ShouldDither(*paint, device.colorType())) {
+ paint.writable()->setDither(false);
+ }
+
+ // Same basic idea used a few times: try SkRP, then try SkVM, then give up with a null-blitter.
+ // (Setting gUseSkVMBlitter is the only way we prefer SkVM over SkRP at the moment.)
+ auto create_SkRP_or_SkVMBlitter = [&]() -> SkBlitter* {
+
+ // We need to make sure that in case RP blitter cannot be created we use VM and
+ // when VM blitter cannot be created we use RP
+ if (gUseSkVMBlitter) {
+ if (auto blitter = SkVMBlitter::Make(device, *paint, ctm, alloc, clipShader)) {
+ return blitter;
+ }
+ }
+ if (auto blitter = SkCreateRasterPipelineBlitter(device,
+ *paint,
+ ctm,
+ alloc,
+ clipShader,
+ props)) {
+ return blitter;
+ }
+ if (!gUseSkVMBlitter) {
+ if (auto blitter = SkVMBlitter::Make(device, *paint, ctm, alloc, clipShader)) {
+ return blitter;
+ }
+ }
+ return alloc->make<SkNullBlitter>();
+ };
+
+ // We'll end here for many interesting cases: color spaces, color filters, most color types.
+ if (clipShader || !UseLegacyBlitter(device, *paint, ctm)) {
+ return create_SkRP_or_SkVMBlitter();
+ }
+
+ // Everything but legacy kN32_SkColorType should already be handled.
+ SkASSERT(device.colorType() == kN32_SkColorType);
+
+ // And we should either have a shader, be blending with SrcOver, or both.
+ SkASSERT(paint->getShader() || paint->asBlendMode() == SkBlendMode::kSrcOver);
+
+ // Legacy blitters keep their shader state on a shader context.
+ SkShaderBase::Context* shaderContext = nullptr;
+ if (paint->getShader()) {
+ shaderContext = as_SB(paint->getShader())->makeContext(
+ {paint->getColor4f(), ctm, nullptr, device.colorType(), device.colorSpace(), props},
+ alloc);
+
+ // Creating the context isn't always possible... try fallbacks before giving up.
+ if (!shaderContext) {
+ return create_SkRP_or_SkVMBlitter();
+ }
+ }
+
+ switch (device.colorType()) {
+ case kN32_SkColorType:
+ if (shaderContext) {
+ return alloc->make<SkARGB32_Shader_Blitter>(device, *paint, shaderContext);
+ } else if (paint->getColor() == SK_ColorBLACK) {
+ return alloc->make<SkARGB32_Black_Blitter>(device, *paint);
+ } else if (paint->getAlpha() == 0xFF) {
+ return alloc->make<SkARGB32_Opaque_Blitter>(device, *paint);
+ } else {
+ return alloc->make<SkARGB32_Blitter>(device, *paint);
+ }
+
+ default:
+ SkASSERT(false);
+ return alloc->make<SkNullBlitter>();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkShaderBlitter::SkShaderBlitter(const SkPixmap& device, const SkPaint& paint,
+ SkShaderBase::Context* shaderContext)
+ : INHERITED(device)
+ , fShader(paint.getShader())
+ , fShaderContext(shaderContext) {
+ SkASSERT(fShader);
+ SkASSERT(fShaderContext);
+
+ fShader->ref();
+ fShaderFlags = fShaderContext->getFlags();
+ fConstInY = SkToBool(fShaderFlags & SkShaderBase::kConstInY32_Flag);
+}
+
+SkShaderBlitter::~SkShaderBlitter() {
+ fShader->unref();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+
+void SkRectClipCheckBlitter::blitH(int x, int y, int width) {
+ SkASSERT(fClipRect.contains(SkIRect::MakeXYWH(x, y, width, 1)));
+ fBlitter->blitH(x, y, width);
+}
+
+void SkRectClipCheckBlitter::blitAntiH(int x, int y, const SkAlpha aa[], const int16_t runs[]) {
+ const int16_t* iter = runs;
+ for (; *iter; iter += *iter)
+ ;
+ int width = iter - runs;
+ SkASSERT(fClipRect.contains(SkIRect::MakeXYWH(x, y, width, 1)));
+ fBlitter->blitAntiH(x, y, aa, runs);
+}
+
+void SkRectClipCheckBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ SkASSERT(fClipRect.contains(SkIRect::MakeXYWH(x, y, 1, height)));
+ fBlitter->blitV(x, y, height, alpha);
+}
+
+void SkRectClipCheckBlitter::blitRect(int x, int y, int width, int height) {
+ SkASSERT(fClipRect.contains(SkIRect::MakeXYWH(x, y, width, height)));
+ fBlitter->blitRect(x, y, width, height);
+}
+
+void SkRectClipCheckBlitter::blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) {
+ bool skipLeft = !leftAlpha;
+ bool skipRight = !rightAlpha;
+ SkIRect r = SkIRect::MakeXYWH(x + skipLeft, y, width + 2 - skipRight - skipLeft, height);
+ SkASSERT(r.isEmpty() || fClipRect.contains(r));
+ fBlitter->blitAntiRect(x, y, width, height, leftAlpha, rightAlpha);
+}
+
+void SkRectClipCheckBlitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ SkASSERT(mask.fBounds.contains(clip));
+ SkASSERT(fClipRect.contains(clip));
+ fBlitter->blitMask(mask, clip);
+}
+
+const SkPixmap* SkRectClipCheckBlitter::justAnOpaqueColor(uint32_t* value) {
+ return fBlitter->justAnOpaqueColor(value);
+}
+
+void SkRectClipCheckBlitter::blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) {
+ SkASSERT(fClipRect.contains(SkIRect::MakeXYWH(x, y, 2, 1)));
+ fBlitter->blitAntiH2(x, y, a0, a1);
+}
+
+void SkRectClipCheckBlitter::blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) {
+ SkASSERT(fClipRect.contains(SkIRect::MakeXYWH(x, y, 1, 2)));
+ fBlitter->blitAntiV2(x, y, a0, a1);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBlitter.h b/gfx/skia/skia/src/core/SkBlitter.h
new file mode 100644
index 0000000000..03329da6ee
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitter.h
@@ -0,0 +1,300 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlitter_DEFINED
+#define SkBlitter_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRegion.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkAutoMalloc.h"
+#include "src/shaders/SkShaderBase.h"
+
+class SkArenaAlloc;
+class SkMatrix;
+class SkMatrixProvider;
+class SkPaint;
+class SkPixmap;
+class SkSurfaceProps;
+struct SkMask;
+
+/** SkBlitter and its subclasses are responsible for actually writing pixels
+ into memory. Besides efficiency, they handle clipping and antialiasing.
+ A SkBlitter subclass contains all the context needed to generate pixels
+ for the destination and how src/generated pixels map to the destination.
+ The coordinates passed to the blitX calls are in destination pixel space.
+*/
+class SkBlitter {
+public:
+ virtual ~SkBlitter();
+
+ /// Blit a horizontal run of one or more pixels.
+ virtual void blitH(int x, int y, int width) = 0;
+
+ /// Blit a horizontal run of antialiased pixels; runs[] is a *sparse*
+ /// zero-terminated run-length encoding of spans of constant alpha values.
+ /// The runs[] and antialias[] work together to represent long runs of pixels with the same
+ /// alphas. The runs[] contains the number of pixels with the same alpha, and antialias[]
+ /// contain the coverage value for that number of pixels. The runs[] (and antialias[]) are
+ /// encoded in a clever way. The runs array is zero terminated, and has enough entries for
+ /// each pixel plus one, in most cases some of the entries will not contain valid data. An entry
+ /// in the runs array contains the number of pixels (np) that have the same alpha value. The
+ /// next np value is found np entries away. For example, if runs[0] = 7, then the next valid
+ /// entry will by at runs[7]. The runs array and antialias[] are coupled by index. So, if the
+ /// np entry is at runs[45] = 12 then the alpha value can be found at antialias[45] = 0x88.
+ /// This would mean to use an alpha value of 0x88 for the next 12 pixels starting at pixel 45.
+ virtual void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) = 0;
+
+ /// Blit a vertical run of pixels with a constant alpha value.
+ virtual void blitV(int x, int y, int height, SkAlpha alpha);
+
+ /// Blit a solid rectangle one or more pixels wide.
+ virtual void blitRect(int x, int y, int width, int height);
+
+ /** Blit a rectangle with one alpha-blended column on the left,
+ width (zero or more) opaque pixels, and one alpha-blended column
+ on the right.
+ The result will always be at least two pixels wide.
+ */
+ virtual void blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha);
+
+ // Blit a rect in AA with size at least 3 x 3 (small rect has too many edge cases...)
+ void blitFatAntiRect(const SkRect& rect);
+
+ /// Blit a pattern of pixels defined by a rectangle-clipped mask;
+ /// typically used for text.
+ virtual void blitMask(const SkMask&, const SkIRect& clip);
+
+ /** If the blitter just sets a single value for each pixel, return the
+ bitmap it draws into, and assign value. If not, return nullptr and ignore
+ the value parameter.
+ */
+ virtual const SkPixmap* justAnOpaqueColor(uint32_t* value);
+
+ // (x, y), (x + 1, y)
+ virtual void blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) {
+ int16_t runs[3];
+ uint8_t aa[2];
+
+ runs[0] = 1;
+ runs[1] = 1;
+ runs[2] = 0;
+ aa[0] = SkToU8(a0);
+ aa[1] = SkToU8(a1);
+ this->blitAntiH(x, y, aa, runs);
+ }
+
+ // (x, y), (x, y + 1)
+ virtual void blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) {
+ int16_t runs[2];
+ uint8_t aa[1];
+
+ runs[0] = 1;
+ runs[1] = 0;
+ aa[0] = SkToU8(a0);
+ this->blitAntiH(x, y, aa, runs);
+ // reset in case the clipping blitter modified runs
+ runs[0] = 1;
+ runs[1] = 0;
+ aa[0] = SkToU8(a1);
+ this->blitAntiH(x, y + 1, aa, runs);
+ }
+
+ /**
+ * Special method just to identify the null blitter, which is returned
+ * from Choose() if the request cannot be fulfilled. Default impl
+ * returns false.
+ */
+ virtual bool isNullBlitter() const;
+
+ /**
+ * Special methods for blitters that can blit more than one row at a time.
+ * This function returns the number of rows that this blitter could optimally
+ * process at a time. It is still required to support blitting one scanline
+ * at a time.
+ */
+ virtual int requestRowsPreserved() const { return 1; }
+
+ /**
+ * This function allocates memory for the blitter that the blitter then owns.
+ * The memory can be used by the calling function at will, but it will be
+ * released when the blitter's destructor is called. This function returns
+ * nullptr if no persistent memory is needed by the blitter.
+ */
+ virtual void* allocBlitMemory(size_t sz) {
+ return fBlitMemory.reset(sz, SkAutoMalloc::kReuse_OnShrink);
+ }
+
+ ///@name non-virtual helpers
+#if defined(SK_SUPPORT_LEGACY_ALPHA_BITMAP_AS_COVERAGE)
+ void blitMaskRegion(const SkMask& mask, const SkRegion& clip);
+#endif
+ void blitRectRegion(const SkIRect& rect, const SkRegion& clip);
+ void blitRegion(const SkRegion& clip);
+ ///@}
+
+ /** @name Factories
+ Return the correct blitter to use given the specified context.
+ */
+ static SkBlitter* Choose(const SkPixmap& dst,
+ const SkMatrix& ctm,
+ const SkPaint& paint,
+ SkArenaAlloc*,
+ bool drawCoverage,
+ sk_sp<SkShader> clipShader,
+ const SkSurfaceProps& props);
+
+ static SkBlitter* ChooseSprite(const SkPixmap& dst,
+ const SkPaint&,
+ const SkPixmap& src,
+ int left, int top,
+ SkArenaAlloc*, sk_sp<SkShader> clipShader);
+ ///@}
+
+ static bool UseLegacyBlitter(const SkPixmap&, const SkPaint&, const SkMatrix&);
+
+protected:
+ SkAutoMalloc fBlitMemory;
+};
+
+/** This blitter silently never draws anything.
+*/
+class SkNullBlitter : public SkBlitter {
+public:
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitMask(const SkMask&, const SkIRect& clip) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t* value) override;
+ bool isNullBlitter() const override;
+};
+
+/** Wraps another (real) blitter, and ensures that the real blitter is only
+ called with coordinates that have been clipped by the specified clipRect.
+ This means the caller need not perform the clipping ahead of time.
+*/
+class SkRectClipBlitter : public SkBlitter {
+public:
+ void init(SkBlitter* blitter, const SkIRect& clipRect) {
+ SkASSERT(!clipRect.isEmpty());
+ fBlitter = blitter;
+ fClipRect = clipRect;
+ }
+
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) override;
+ void blitMask(const SkMask&, const SkIRect& clip) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t* value) override;
+
+ int requestRowsPreserved() const override {
+ return fBlitter->requestRowsPreserved();
+ }
+
+ void* allocBlitMemory(size_t sz) override {
+ return fBlitter->allocBlitMemory(sz);
+ }
+
+private:
+ SkBlitter* fBlitter;
+ SkIRect fClipRect;
+};
+
+/** Wraps another (real) blitter, and ensures that the real blitter is only
+ called with coordinates that have been clipped by the specified clipRgn.
+ This means the caller need not perform the clipping ahead of time.
+*/
+class SkRgnClipBlitter : public SkBlitter {
+public:
+ void init(SkBlitter* blitter, const SkRegion* clipRgn) {
+ SkASSERT(clipRgn && !clipRgn->isEmpty());
+ fBlitter = blitter;
+ fRgn = clipRgn;
+ }
+
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) override;
+ void blitMask(const SkMask&, const SkIRect& clip) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t* value) override;
+
+ int requestRowsPreserved() const override {
+ return fBlitter->requestRowsPreserved();
+ }
+
+ void* allocBlitMemory(size_t sz) override {
+ return fBlitter->allocBlitMemory(sz);
+ }
+
+private:
+ SkBlitter* fBlitter;
+ const SkRegion* fRgn;
+};
+
+#ifdef SK_DEBUG
+class SkRectClipCheckBlitter : public SkBlitter {
+public:
+ void init(SkBlitter* blitter, const SkIRect& clipRect) {
+ SkASSERT(blitter);
+ SkASSERT(!clipRect.isEmpty());
+ fBlitter = blitter;
+ fClipRect = clipRect;
+ }
+
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitAntiRect(int x, int y, int width, int height,
+ SkAlpha leftAlpha, SkAlpha rightAlpha) override;
+ void blitMask(const SkMask&, const SkIRect& clip) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t* value) override;
+ void blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) override;
+ void blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) override;
+
+ int requestRowsPreserved() const override {
+ return fBlitter->requestRowsPreserved();
+ }
+
+ void* allocBlitMemory(size_t sz) override {
+ return fBlitter->allocBlitMemory(sz);
+ }
+
+private:
+ SkBlitter* fBlitter;
+ SkIRect fClipRect;
+};
+#endif
+
+/** Factory to set up the appropriate most-efficient wrapper blitter
+ to apply a clip. Returns a pointer to a member, so lifetime must
+ be managed carefully.
+*/
+class SkBlitterClipper {
+public:
+ SkBlitter* apply(SkBlitter* blitter, const SkRegion* clip,
+ const SkIRect* bounds = nullptr);
+
+private:
+ SkNullBlitter fNullBlitter;
+ SkRectClipBlitter fRectBlitter;
+ SkRgnClipBlitter fRgnBlitter;
+};
+
+// A good size for creating shader contexts on the stack.
+enum {kSkBlitterContextSize = 3332};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkBlitter_A8.cpp b/gfx/skia/skia/src/core/SkBlitter_A8.cpp
new file mode 100644
index 0000000000..ea01296c99
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitter_A8.cpp
@@ -0,0 +1,313 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPaint.h"
+#include "include/core/SkTypes.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkBlitter_A8.h"
+
+SkA8_Coverage_Blitter::SkA8_Coverage_Blitter(const SkPixmap& device, const SkPaint& paint)
+ : fDevice(device)
+{
+ SkASSERT(nullptr == paint.getShader());
+ SkASSERT(nullptr == paint.getColorFilter());
+}
+
+void SkA8_Coverage_Blitter::blitAntiH(int x, int y, const SkAlpha antialias[],
+ const int16_t runs[]) {
+ uint8_t* device = fDevice.writable_addr8(x, y);
+ SkDEBUGCODE(int totalCount = 0;)
+
+ for (;;) {
+ int count = runs[0];
+ SkASSERT(count >= 0);
+ if (count == 0) {
+ return;
+ }
+ if (antialias[0]) {
+ memset(device, antialias[0], count);
+ }
+ runs += count;
+ antialias += count;
+ device += count;
+
+ SkDEBUGCODE(totalCount += count;)
+ }
+ SkASSERT(fDevice.width() == totalCount);
+}
+
+void SkA8_Coverage_Blitter::blitH(int x, int y, int width) {
+ memset(fDevice.writable_addr8(x, y), 0xFF, width);
+}
+
+void SkA8_Coverage_Blitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ if (0 == alpha) {
+ return;
+ }
+
+ uint8_t* dst = fDevice.writable_addr8(x, y);
+ const size_t dstRB = fDevice.rowBytes();
+ while (--height >= 0) {
+ *dst = alpha;
+ dst += dstRB;
+ }
+}
+
+void SkA8_Coverage_Blitter::blitRect(int x, int y, int width, int height) {
+ uint8_t* dst = fDevice.writable_addr8(x, y);
+ const size_t dstRB = fDevice.rowBytes();
+ while (--height >= 0) {
+ memset(dst, 0xFF, width);
+ dst += dstRB;
+ }
+}
+
+void SkA8_Coverage_Blitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ if (SkMask::kA8_Format != mask.fFormat) {
+ this->SkBlitter::blitMask(mask, clip);
+ return;
+ }
+
+ int x = clip.fLeft;
+ int y = clip.fTop;
+ int width = clip.width();
+ int height = clip.height();
+
+ uint8_t* dst = fDevice.writable_addr8(x, y);
+ const uint8_t* src = mask.getAddr8(x, y);
+ const size_t srcRB = mask.fRowBytes;
+ const size_t dstRB = fDevice.rowBytes();
+
+ while (--height >= 0) {
+ memcpy(dst, src, width);
+ dst += dstRB;
+ src += srcRB;
+ }
+}
+
+const SkPixmap* SkA8_Coverage_Blitter::justAnOpaqueColor(uint32_t*) {
+ return nullptr;
+}
+
+//////////////
+
+static inline uint8_t div255(unsigned prod) {
+ SkASSERT(prod <= 255*255);
+ return (prod + 128) * 257 >> 16;
+}
+
+static inline unsigned u8_lerp(uint8_t a, uint8_t b, uint8_t t) {
+ return div255((255 - t) * a + t * b);
+}
+
+using AlphaProc = uint8_t(*)(uint8_t src, uint8_t dst);
+
+static uint8_t srcover_p (uint8_t src, uint8_t dst) { return src + div255((255 - src) * dst); }
+static uint8_t src_p (uint8_t src, uint8_t dst) { return src; }
+
+template <typename Mode> void A8_row_bw(uint8_t dst[], uint8_t src, int N, Mode proc) {
+ for (int i = 0; i < N; ++i) {
+ dst[i] = proc(src, dst[i]);
+ }
+}
+using A8_RowBlitBW = void(*)(uint8_t[], uint8_t, int);
+
+template <typename Mode>
+void A8_row_aa(uint8_t dst[], uint8_t src, int N, uint8_t aa, Mode proc, const bool canFoldAA) {
+ if (canFoldAA) {
+ src = div255(src * aa);
+ for (int i = 0; i < N; ++i) {
+ dst[i] = proc(src, dst[i]);
+ }
+ } else {
+ for (int i = 0; i < N; ++i) {
+ dst[i] = u8_lerp(dst[i], proc(src, dst[i]), aa);
+ }
+ }
+}
+using A8_RowBlitAA = void(*)(uint8_t[], uint8_t, int, uint8_t aa);
+
+#define WRAP_BLIT(proc, canFoldAA) \
+ proc, \
+ [](uint8_t dst[], uint8_t src, int N) \
+ { A8_row_bw(dst, src, N, proc); }, \
+ [](uint8_t dst[], uint8_t src, int N, uint8_t aa) \
+ { A8_row_aa(dst, src, N, aa, proc, canFoldAA); }
+
+struct A8_RowBlitBWPair {
+ SkBlendMode mode;
+ AlphaProc oneProc;
+ A8_RowBlitBW bwProc;
+ A8_RowBlitAA aaProc;
+};
+constexpr A8_RowBlitBWPair gA8_RowBlitPairs[] = {
+ {SkBlendMode::kSrcOver, WRAP_BLIT(srcover_p, true)},
+ {SkBlendMode::kSrc, WRAP_BLIT(src_p, false)},
+};
+#undef WRAP_BLIT
+
+static const A8_RowBlitBWPair* find_a8_rowproc_pair(SkBlendMode bm) {
+ for (auto& pair : gA8_RowBlitPairs) {
+ if (pair.mode == bm) {
+ return &pair;
+ }
+ }
+ return nullptr;
+}
+
+class SkA8_Blitter : public SkBlitter {
+public:
+ SkA8_Blitter(const SkPixmap& device, const SkPaint& paint);
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitMask(const SkMask&, const SkIRect&) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t*) override;
+
+private:
+ const SkPixmap fDevice;
+ AlphaProc fOneProc;
+ A8_RowBlitBW fBWProc;
+ A8_RowBlitAA fAAProc;
+ SkAlpha fSrc;
+
+ using INHERITED = SkBlitter;
+};
+
+SkA8_Blitter::SkA8_Blitter(const SkPixmap& device,
+ const SkPaint& paint) : fDevice(device) {
+ SkASSERT(nullptr == paint.getShader());
+ SkASSERT(nullptr == paint.getColorFilter());
+ auto mode = paint.asBlendMode();
+ SkASSERT(mode);
+ auto pair = find_a8_rowproc_pair(*mode);
+ SkASSERT(pair);
+
+ fOneProc = pair->oneProc;
+ fBWProc = pair->bwProc;
+ fAAProc = pair->aaProc;
+ fSrc = paint.getAlpha();
+}
+
+void SkA8_Blitter::blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) {
+ uint8_t* device = fDevice.writable_addr8(x, y);
+ SkDEBUGCODE(int totalCount = 0;)
+
+ for (;;) {
+ int count = runs[0];
+ SkASSERT(count >= 0);
+ if (count == 0) {
+ return;
+ }
+
+ if (antialias[0] == 0xFF) {
+ fBWProc(device, fSrc, count);
+ } else if (antialias[0] != 0) {
+ fAAProc(device, fSrc, count, antialias[0]);
+ }
+
+ runs += count;
+ antialias += count;
+ device += count;
+
+ SkDEBUGCODE(totalCount += count;)
+ }
+ SkASSERT(fDevice.width() == totalCount);
+}
+
+void SkA8_Blitter::blitH(int x, int y, int width) {
+ fBWProc(fDevice.writable_addr8(x, y), fSrc, width);
+}
+
+void SkA8_Blitter::blitV(int x, int y, int height, SkAlpha aa) {
+ uint8_t* device = fDevice.writable_addr8(x, y);
+ const size_t dstRB = fDevice.rowBytes();
+
+ if (aa == 0xFF) {
+ while (--height >= 0) {
+ *device = fOneProc(fSrc, *device);
+ device += dstRB;
+ }
+ } else if (aa != 0) {
+ while (--height >= 0) {
+ fAAProc(device, fSrc, 1, aa);
+ device += dstRB;
+ }
+ }
+}
+
+void SkA8_Blitter::blitRect(int x, int y, int width, int height) {
+ uint8_t* device = fDevice.writable_addr8(x, y);
+ const size_t dstRB = fDevice.rowBytes();
+
+ while (--height >= 0) {
+ fBWProc(device, fSrc, width);
+ device += dstRB;
+ }
+}
+
+void SkA8_Blitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ if (SkMask::kA8_Format != mask.fFormat) {
+ this->INHERITED::blitMask(mask, clip);
+ return;
+ }
+
+ int x = clip.fLeft;
+ int y = clip.fTop;
+ int width = clip.width();
+ int height = clip.height();
+
+ uint8_t* dst = fDevice.writable_addr8(x, y);
+ const uint8_t* src = mask.getAddr8(x, y);
+ const size_t srcRB = mask.fRowBytes;
+ const size_t dstRB = fDevice.rowBytes();
+
+ while (--height >= 0) {
+ for (int i = 0; i < width; ++i) {
+ dst[i] = u8_lerp(dst[i], fOneProc(fSrc, dst[i]), src[i]);
+ }
+ dst += dstRB;
+ src += srcRB;
+ }
+}
+
+const SkPixmap* SkA8_Blitter::justAnOpaqueColor(uint32_t*) {
+ return nullptr;
+}
+
+//////////////////
+
+SkBlitter* SkA8Blitter_Choose(const SkPixmap& dst,
+ const SkMatrix& ctm,
+ const SkPaint& paint,
+ SkArenaAlloc* alloc,
+ bool drawCoverage,
+ sk_sp<SkShader> clipShader,
+ const SkSurfaceProps&) {
+ if (dst.colorType() != SkColorType::kAlpha_8_SkColorType) {
+ return nullptr;
+ }
+ if (paint.getShader() || paint.getColorFilter()) {
+ return nullptr;
+ }
+ if (clipShader) {
+ return nullptr; // would not be hard to support ...?
+ }
+
+ if (drawCoverage) {
+ return alloc->make<SkA8_Coverage_Blitter>(dst, paint);
+ } else {
+ // we only support certain blendmodes...
+ auto mode = paint.asBlendMode();
+ if (mode && find_a8_rowproc_pair(*mode)) {
+ return alloc->make<SkA8_Blitter>(dst, paint);
+ }
+ }
+ return nullptr;
+}
+
diff --git a/gfx/skia/skia/src/core/SkBlitter_A8.h b/gfx/skia/skia/src/core/SkBlitter_A8.h
new file mode 100644
index 0000000000..cd91d5fdd7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitter_A8.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlitter_A8_DEFINED
+#define SkBlitter_A8_DEFINED
+
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRefCnt.h"
+#include "src/core/SkBlitter.h"
+
+class SkPaint;
+class SkMatrix;
+class SkArenaAlloc;
+class SkShader;
+class SkSurfaceProps;
+
+class SkA8_Coverage_Blitter : public SkBlitter {
+public:
+ SkA8_Coverage_Blitter(const SkPixmap& device, const SkPaint& paint);
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitMask(const SkMask&, const SkIRect&) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t*) override;
+
+private:
+ const SkPixmap fDevice;
+};
+
+SkBlitter* SkA8Blitter_Choose(const SkPixmap& dst,
+ const SkMatrix& ctm,
+ const SkPaint& paint,
+ SkArenaAlloc*,
+ bool drawCoverage,
+ sk_sp<SkShader> clipShader,
+ const SkSurfaceProps&);
+
+#endif // SkBlitter_A8_DEFINED
diff --git a/gfx/skia/skia/src/core/SkBlitter_ARGB32.cpp b/gfx/skia/skia/src/core/SkBlitter_ARGB32.cpp
new file mode 100644
index 0000000000..fbb5322080
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitter_ARGB32.cpp
@@ -0,0 +1,1420 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkShader.h"
+#include "include/private/SkColorData.h"
+#include "src/base/SkVx.h"
+#include "src/core/SkCoreBlitters.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkXfermodePriv.h"
+
+static inline int upscale_31_to_32(int value) {
+ SkASSERT((unsigned)value <= 31);
+ return value + (value >> 4);
+}
+
+static inline int blend_32(int src, int dst, int scale) {
+ SkASSERT((unsigned)src <= 0xFF);
+ SkASSERT((unsigned)dst <= 0xFF);
+ SkASSERT((unsigned)scale <= 32);
+ return dst + ((src - dst) * scale >> 5);
+}
+
+static inline SkPMColor blend_lcd16(int srcA, int srcR, int srcG, int srcB,
+ SkPMColor dst, uint16_t mask) {
+ if (mask == 0) {
+ return dst;
+ }
+
+ /* We want all of these in 5bits, hence the shifts in case one of them
+ * (green) is 6bits.
+ */
+ int maskR = SkGetPackedR16(mask) >> (SK_R16_BITS - 5);
+ int maskG = SkGetPackedG16(mask) >> (SK_G16_BITS - 5);
+ int maskB = SkGetPackedB16(mask) >> (SK_B16_BITS - 5);
+
+ // Now upscale them to 0..32, so we can use blend32
+ maskR = upscale_31_to_32(maskR);
+ maskG = upscale_31_to_32(maskG);
+ maskB = upscale_31_to_32(maskB);
+
+ // srcA has been upscaled to 256 before passed into this function
+ maskR = maskR * srcA >> 8;
+ maskG = maskG * srcA >> 8;
+ maskB = maskB * srcA >> 8;
+
+ int dstR = SkGetPackedR32(dst);
+ int dstG = SkGetPackedG32(dst);
+ int dstB = SkGetPackedB32(dst);
+
+ // LCD blitting is only supported if the dst is known/required
+ // to be opaque
+ return SkPackARGB32(0xFF,
+ blend_32(srcR, dstR, maskR),
+ blend_32(srcG, dstG, maskG),
+ blend_32(srcB, dstB, maskB));
+}
+
+static inline SkPMColor blend_lcd16_opaque(int srcR, int srcG, int srcB,
+ SkPMColor dst, uint16_t mask,
+ SkPMColor opaqueDst) {
+ if (mask == 0) {
+ return dst;
+ }
+
+ if (0xFFFF == mask) {
+ return opaqueDst;
+ }
+
+ /* We want all of these in 5bits, hence the shifts in case one of them
+ * (green) is 6bits.
+ */
+ int maskR = SkGetPackedR16(mask) >> (SK_R16_BITS - 5);
+ int maskG = SkGetPackedG16(mask) >> (SK_G16_BITS - 5);
+ int maskB = SkGetPackedB16(mask) >> (SK_B16_BITS - 5);
+
+ // Now upscale them to 0..32, so we can use blend32
+ maskR = upscale_31_to_32(maskR);
+ maskG = upscale_31_to_32(maskG);
+ maskB = upscale_31_to_32(maskB);
+
+ int dstR = SkGetPackedR32(dst);
+ int dstG = SkGetPackedG32(dst);
+ int dstB = SkGetPackedB32(dst);
+
+ // LCD blitting is only supported if the dst is known/required
+ // to be opaque
+ return SkPackARGB32(0xFF,
+ blend_32(srcR, dstR, maskR),
+ blend_32(srcG, dstG, maskG),
+ blend_32(srcB, dstB, maskB));
+}
+
+
+// TODO: rewrite at least the SSE code here. It's miserable.
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #include <emmintrin.h>
+
+ // The following (left) shifts cause the top 5 bits of the mask components to
+ // line up with the corresponding components in an SkPMColor.
+ // Note that the mask's RGB16 order may differ from the SkPMColor order.
+ #define SK_R16x5_R32x5_SHIFT (SK_R32_SHIFT - SK_R16_SHIFT - SK_R16_BITS + 5)
+ #define SK_G16x5_G32x5_SHIFT (SK_G32_SHIFT - SK_G16_SHIFT - SK_G16_BITS + 5)
+ #define SK_B16x5_B32x5_SHIFT (SK_B32_SHIFT - SK_B16_SHIFT - SK_B16_BITS + 5)
+
+ #if SK_R16x5_R32x5_SHIFT == 0
+ #define SkPackedR16x5ToUnmaskedR32x5_SSE2(x) (x)
+ #elif SK_R16x5_R32x5_SHIFT > 0
+ #define SkPackedR16x5ToUnmaskedR32x5_SSE2(x) (_mm_slli_epi32(x, SK_R16x5_R32x5_SHIFT))
+ #else
+ #define SkPackedR16x5ToUnmaskedR32x5_SSE2(x) (_mm_srli_epi32(x, -SK_R16x5_R32x5_SHIFT))
+ #endif
+
+ #if SK_G16x5_G32x5_SHIFT == 0
+ #define SkPackedG16x5ToUnmaskedG32x5_SSE2(x) (x)
+ #elif SK_G16x5_G32x5_SHIFT > 0
+ #define SkPackedG16x5ToUnmaskedG32x5_SSE2(x) (_mm_slli_epi32(x, SK_G16x5_G32x5_SHIFT))
+ #else
+ #define SkPackedG16x5ToUnmaskedG32x5_SSE2(x) (_mm_srli_epi32(x, -SK_G16x5_G32x5_SHIFT))
+ #endif
+
+ #if SK_B16x5_B32x5_SHIFT == 0
+ #define SkPackedB16x5ToUnmaskedB32x5_SSE2(x) (x)
+ #elif SK_B16x5_B32x5_SHIFT > 0
+ #define SkPackedB16x5ToUnmaskedB32x5_SSE2(x) (_mm_slli_epi32(x, SK_B16x5_B32x5_SHIFT))
+ #else
+ #define SkPackedB16x5ToUnmaskedB32x5_SSE2(x) (_mm_srli_epi32(x, -SK_B16x5_B32x5_SHIFT))
+ #endif
+
+ static __m128i blend_lcd16_sse2(__m128i &src, __m128i &dst, __m128i &mask, __m128i &srcA) {
+ // In the following comments, the components of src, dst and mask are
+ // abbreviated as (s)rc, (d)st, and (m)ask. Color components are marked
+ // by an R, G, B, or A suffix. Components of one of the four pixels that
+ // are processed in parallel are marked with 0, 1, 2, and 3. "d1B", for
+ // example is the blue channel of the second destination pixel. Memory
+ // layout is shown for an ARGB byte order in a color value.
+
+ // src and srcA store 8-bit values interleaved with zeros.
+ // src = (0xFF, 0, sR, 0, sG, 0, sB, 0, 0xFF, 0, sR, 0, sG, 0, sB, 0)
+ // srcA = (srcA, 0, srcA, 0, srcA, 0, srcA, 0,
+ // srcA, 0, srcA, 0, srcA, 0, srcA, 0)
+ // mask stores 16-bit values (compressed three channels) interleaved with zeros.
+ // Lo and Hi denote the low and high bytes of a 16-bit value, respectively.
+ // mask = (m0RGBLo, m0RGBHi, 0, 0, m1RGBLo, m1RGBHi, 0, 0,
+ // m2RGBLo, m2RGBHi, 0, 0, m3RGBLo, m3RGBHi, 0, 0)
+
+ // Get the R,G,B of each 16bit mask pixel, we want all of them in 5 bits.
+ // r = (0, m0R, 0, 0, 0, m1R, 0, 0, 0, m2R, 0, 0, 0, m3R, 0, 0)
+ __m128i r = _mm_and_si128(SkPackedR16x5ToUnmaskedR32x5_SSE2(mask),
+ _mm_set1_epi32(0x1F << SK_R32_SHIFT));
+
+ // g = (0, 0, m0G, 0, 0, 0, m1G, 0, 0, 0, m2G, 0, 0, 0, m3G, 0)
+ __m128i g = _mm_and_si128(SkPackedG16x5ToUnmaskedG32x5_SSE2(mask),
+ _mm_set1_epi32(0x1F << SK_G32_SHIFT));
+
+ // b = (0, 0, 0, m0B, 0, 0, 0, m1B, 0, 0, 0, m2B, 0, 0, 0, m3B)
+ __m128i b = _mm_and_si128(SkPackedB16x5ToUnmaskedB32x5_SSE2(mask),
+ _mm_set1_epi32(0x1F << SK_B32_SHIFT));
+
+ // Pack the 4 16bit mask pixels into 4 32bit pixels, (p0, p1, p2, p3)
+ // Each component (m0R, m0G, etc.) is then a 5-bit value aligned to an
+ // 8-bit position
+ // mask = (0, m0R, m0G, m0B, 0, m1R, m1G, m1B,
+ // 0, m2R, m2G, m2B, 0, m3R, m3G, m3B)
+ mask = _mm_or_si128(_mm_or_si128(r, g), b);
+
+ // Interleave R,G,B into the lower byte of word.
+ // i.e. split the sixteen 8-bit values from mask into two sets of eight
+ // 16-bit values, padded by zero.
+ __m128i maskLo, maskHi;
+ // maskLo = (0, 0, m0R, 0, m0G, 0, m0B, 0, 0, 0, m1R, 0, m1G, 0, m1B, 0)
+ maskLo = _mm_unpacklo_epi8(mask, _mm_setzero_si128());
+ // maskHi = (0, 0, m2R, 0, m2G, 0, m2B, 0, 0, 0, m3R, 0, m3G, 0, m3B, 0)
+ maskHi = _mm_unpackhi_epi8(mask, _mm_setzero_si128());
+
+ // Upscale from 0..31 to 0..32
+ // (allows to replace division by left-shift further down)
+ // Left-shift each component by 4 and add the result back to that component,
+ // mapping numbers in the range 0..15 to 0..15, and 16..31 to 17..32
+ maskLo = _mm_add_epi16(maskLo, _mm_srli_epi16(maskLo, 4));
+ maskHi = _mm_add_epi16(maskHi, _mm_srli_epi16(maskHi, 4));
+
+ // Multiply each component of maskLo and maskHi by srcA
+ maskLo = _mm_mullo_epi16(maskLo, srcA);
+ maskHi = _mm_mullo_epi16(maskHi, srcA);
+
+ // Left shift mask components by 8 (divide by 256)
+ maskLo = _mm_srli_epi16(maskLo, 8);
+ maskHi = _mm_srli_epi16(maskHi, 8);
+
+ // Interleave R,G,B into the lower byte of the word
+ // dstLo = (0, 0, d0R, 0, d0G, 0, d0B, 0, 0, 0, d1R, 0, d1G, 0, d1B, 0)
+ __m128i dstLo = _mm_unpacklo_epi8(dst, _mm_setzero_si128());
+ // dstLo = (0, 0, d2R, 0, d2G, 0, d2B, 0, 0, 0, d3R, 0, d3G, 0, d3B, 0)
+ __m128i dstHi = _mm_unpackhi_epi8(dst, _mm_setzero_si128());
+
+ // mask = (src - dst) * mask
+ maskLo = _mm_mullo_epi16(maskLo, _mm_sub_epi16(src, dstLo));
+ maskHi = _mm_mullo_epi16(maskHi, _mm_sub_epi16(src, dstHi));
+
+ // mask = (src - dst) * mask >> 5
+ maskLo = _mm_srai_epi16(maskLo, 5);
+ maskHi = _mm_srai_epi16(maskHi, 5);
+
+ // Add two pixels into result.
+ // result = dst + ((src - dst) * mask >> 5)
+ __m128i resultLo = _mm_add_epi16(dstLo, maskLo);
+ __m128i resultHi = _mm_add_epi16(dstHi, maskHi);
+
+ // Pack into 4 32bit dst pixels.
+ // resultLo and resultHi contain eight 16-bit components (two pixels) each.
+ // Merge into one SSE regsiter with sixteen 8-bit values (four pixels),
+ // clamping to 255 if necessary.
+ return _mm_packus_epi16(resultLo, resultHi);
+ }
+
+ static __m128i blend_lcd16_opaque_sse2(__m128i &src, __m128i &dst, __m128i &mask) {
+ // In the following comments, the components of src, dst and mask are
+ // abbreviated as (s)rc, (d)st, and (m)ask. Color components are marked
+ // by an R, G, B, or A suffix. Components of one of the four pixels that
+ // are processed in parallel are marked with 0, 1, 2, and 3. "d1B", for
+ // example is the blue channel of the second destination pixel. Memory
+ // layout is shown for an ARGB byte order in a color value.
+
+ // src and srcA store 8-bit values interleaved with zeros.
+ // src = (0xFF, 0, sR, 0, sG, 0, sB, 0, 0xFF, 0, sR, 0, sG, 0, sB, 0)
+ // mask stores 16-bit values (shown as high and low bytes) interleaved with
+ // zeros
+ // mask = (m0RGBLo, m0RGBHi, 0, 0, m1RGBLo, m1RGBHi, 0, 0,
+ // m2RGBLo, m2RGBHi, 0, 0, m3RGBLo, m3RGBHi, 0, 0)
+
+ // Get the R,G,B of each 16bit mask pixel, we want all of them in 5 bits.
+ // r = (0, m0R, 0, 0, 0, m1R, 0, 0, 0, m2R, 0, 0, 0, m3R, 0, 0)
+ __m128i r = _mm_and_si128(SkPackedR16x5ToUnmaskedR32x5_SSE2(mask),
+ _mm_set1_epi32(0x1F << SK_R32_SHIFT));
+
+ // g = (0, 0, m0G, 0, 0, 0, m1G, 0, 0, 0, m2G, 0, 0, 0, m3G, 0)
+ __m128i g = _mm_and_si128(SkPackedG16x5ToUnmaskedG32x5_SSE2(mask),
+ _mm_set1_epi32(0x1F << SK_G32_SHIFT));
+
+ // b = (0, 0, 0, m0B, 0, 0, 0, m1B, 0, 0, 0, m2B, 0, 0, 0, m3B)
+ __m128i b = _mm_and_si128(SkPackedB16x5ToUnmaskedB32x5_SSE2(mask),
+ _mm_set1_epi32(0x1F << SK_B32_SHIFT));
+
+ // Pack the 4 16bit mask pixels into 4 32bit pixels, (p0, p1, p2, p3)
+ // Each component (m0R, m0G, etc.) is then a 5-bit value aligned to an
+ // 8-bit position
+ // mask = (0, m0R, m0G, m0B, 0, m1R, m1G, m1B,
+ // 0, m2R, m2G, m2B, 0, m3R, m3G, m3B)
+ mask = _mm_or_si128(_mm_or_si128(r, g), b);
+
+ // Interleave R,G,B into the lower byte of word.
+ // i.e. split the sixteen 8-bit values from mask into two sets of eight
+ // 16-bit values, padded by zero.
+ __m128i maskLo, maskHi;
+ // maskLo = (0, 0, m0R, 0, m0G, 0, m0B, 0, 0, 0, m1R, 0, m1G, 0, m1B, 0)
+ maskLo = _mm_unpacklo_epi8(mask, _mm_setzero_si128());
+ // maskHi = (0, 0, m2R, 0, m2G, 0, m2B, 0, 0, 0, m3R, 0, m3G, 0, m3B, 0)
+ maskHi = _mm_unpackhi_epi8(mask, _mm_setzero_si128());
+
+ // Upscale from 0..31 to 0..32
+ // (allows to replace division by left-shift further down)
+ // Left-shift each component by 4 and add the result back to that component,
+ // mapping numbers in the range 0..15 to 0..15, and 16..31 to 17..32
+ maskLo = _mm_add_epi16(maskLo, _mm_srli_epi16(maskLo, 4));
+ maskHi = _mm_add_epi16(maskHi, _mm_srli_epi16(maskHi, 4));
+
+ // Interleave R,G,B into the lower byte of the word
+ // dstLo = (0, 0, d0R, 0, d0G, 0, d0B, 0, 0, 0, d1R, 0, d1G, 0, d1B, 0)
+ __m128i dstLo = _mm_unpacklo_epi8(dst, _mm_setzero_si128());
+ // dstLo = (0, 0, d2R, 0, d2G, 0, d2B, 0, 0, 0, d3R, 0, d3G, 0, d3B, 0)
+ __m128i dstHi = _mm_unpackhi_epi8(dst, _mm_setzero_si128());
+
+ // mask = (src - dst) * mask
+ maskLo = _mm_mullo_epi16(maskLo, _mm_sub_epi16(src, dstLo));
+ maskHi = _mm_mullo_epi16(maskHi, _mm_sub_epi16(src, dstHi));
+
+ // mask = (src - dst) * mask >> 5
+ maskLo = _mm_srai_epi16(maskLo, 5);
+ maskHi = _mm_srai_epi16(maskHi, 5);
+
+ // Add two pixels into result.
+ // result = dst + ((src - dst) * mask >> 5)
+ __m128i resultLo = _mm_add_epi16(dstLo, maskLo);
+ __m128i resultHi = _mm_add_epi16(dstHi, maskHi);
+
+ // Pack into 4 32bit dst pixels and force opaque.
+ // resultLo and resultHi contain eight 16-bit components (two pixels) each.
+ // Merge into one SSE regsiter with sixteen 8-bit values (four pixels),
+ // clamping to 255 if necessary. Set alpha components to 0xFF.
+ return _mm_or_si128(_mm_packus_epi16(resultLo, resultHi),
+ _mm_set1_epi32(SK_A32_MASK << SK_A32_SHIFT));
+ }
+
+ void blit_row_lcd16(SkPMColor dst[], const uint16_t mask[], SkColor src, int width, SkPMColor) {
+ if (width <= 0) {
+ return;
+ }
+
+ int srcA = SkColorGetA(src);
+ int srcR = SkColorGetR(src);
+ int srcG = SkColorGetG(src);
+ int srcB = SkColorGetB(src);
+
+ srcA = SkAlpha255To256(srcA);
+
+ if (width >= 4) {
+ SkASSERT(((size_t)dst & 0x03) == 0);
+ while (((size_t)dst & 0x0F) != 0) {
+ *dst = blend_lcd16(srcA, srcR, srcG, srcB, *dst, *mask);
+ mask++;
+ dst++;
+ width--;
+ }
+
+ __m128i *d = reinterpret_cast<__m128i*>(dst);
+ // Set alpha to 0xFF and replicate source four times in SSE register.
+ __m128i src_sse = _mm_set1_epi32(SkPackARGB32(0xFF, srcR, srcG, srcB));
+ // Interleave with zeros to get two sets of four 16-bit values.
+ src_sse = _mm_unpacklo_epi8(src_sse, _mm_setzero_si128());
+ // Set srcA_sse to contain eight copies of srcA, padded with zero.
+ // src_sse=(0xFF, 0, sR, 0, sG, 0, sB, 0, 0xFF, 0, sR, 0, sG, 0, sB, 0)
+ __m128i srcA_sse = _mm_set1_epi16(srcA);
+ while (width >= 4) {
+ // Load four destination pixels into dst_sse.
+ __m128i dst_sse = _mm_load_si128(d);
+ // Load four 16-bit masks into lower half of mask_sse.
+ __m128i mask_sse = _mm_loadl_epi64(
+ reinterpret_cast<const __m128i*>(mask));
+
+ // Check whether masks are equal to 0 and get the highest bit
+ // of each byte of result, if masks are all zero, we will get
+ // pack_cmp to 0xFFFF
+ int pack_cmp = _mm_movemask_epi8(_mm_cmpeq_epi16(mask_sse,
+ _mm_setzero_si128()));
+
+ // if mask pixels are not all zero, we will blend the dst pixels
+ if (pack_cmp != 0xFFFF) {
+ // Unpack 4 16bit mask pixels to
+ // mask_sse = (m0RGBLo, m0RGBHi, 0, 0, m1RGBLo, m1RGBHi, 0, 0,
+ // m2RGBLo, m2RGBHi, 0, 0, m3RGBLo, m3RGBHi, 0, 0)
+ mask_sse = _mm_unpacklo_epi16(mask_sse,
+ _mm_setzero_si128());
+
+ // Process 4 32bit dst pixels
+ __m128i result = blend_lcd16_sse2(src_sse, dst_sse, mask_sse, srcA_sse);
+ _mm_store_si128(d, result);
+ }
+
+ d++;
+ mask += 4;
+ width -= 4;
+ }
+
+ dst = reinterpret_cast<SkPMColor*>(d);
+ }
+
+ while (width > 0) {
+ *dst = blend_lcd16(srcA, srcR, srcG, srcB, *dst, *mask);
+ mask++;
+ dst++;
+ width--;
+ }
+ }
+
+ void blit_row_lcd16_opaque(SkPMColor dst[], const uint16_t mask[],
+ SkColor src, int width, SkPMColor opaqueDst) {
+ if (width <= 0) {
+ return;
+ }
+
+ int srcR = SkColorGetR(src);
+ int srcG = SkColorGetG(src);
+ int srcB = SkColorGetB(src);
+
+ if (width >= 4) {
+ SkASSERT(((size_t)dst & 0x03) == 0);
+ while (((size_t)dst & 0x0F) != 0) {
+ *dst = blend_lcd16_opaque(srcR, srcG, srcB, *dst, *mask, opaqueDst);
+ mask++;
+ dst++;
+ width--;
+ }
+
+ __m128i *d = reinterpret_cast<__m128i*>(dst);
+ // Set alpha to 0xFF and replicate source four times in SSE register.
+ __m128i src_sse = _mm_set1_epi32(SkPackARGB32(0xFF, srcR, srcG, srcB));
+ // Set srcA_sse to contain eight copies of srcA, padded with zero.
+ // src_sse=(0xFF, 0, sR, 0, sG, 0, sB, 0, 0xFF, 0, sR, 0, sG, 0, sB, 0)
+ src_sse = _mm_unpacklo_epi8(src_sse, _mm_setzero_si128());
+ while (width >= 4) {
+ // Load four destination pixels into dst_sse.
+ __m128i dst_sse = _mm_load_si128(d);
+ // Load four 16-bit masks into lower half of mask_sse.
+ __m128i mask_sse = _mm_loadl_epi64(
+ reinterpret_cast<const __m128i*>(mask));
+
+ // Check whether masks are equal to 0 and get the highest bit
+ // of each byte of result, if masks are all zero, we will get
+ // pack_cmp to 0xFFFF
+ int pack_cmp = _mm_movemask_epi8(_mm_cmpeq_epi16(mask_sse,
+ _mm_setzero_si128()));
+
+ // if mask pixels are not all zero, we will blend the dst pixels
+ if (pack_cmp != 0xFFFF) {
+ // Unpack 4 16bit mask pixels to
+ // mask_sse = (m0RGBLo, m0RGBHi, 0, 0, m1RGBLo, m1RGBHi, 0, 0,
+ // m2RGBLo, m2RGBHi, 0, 0, m3RGBLo, m3RGBHi, 0, 0)
+ mask_sse = _mm_unpacklo_epi16(mask_sse,
+ _mm_setzero_si128());
+
+ // Process 4 32bit dst pixels
+ __m128i result = blend_lcd16_opaque_sse2(src_sse, dst_sse, mask_sse);
+ _mm_store_si128(d, result);
+ }
+
+ d++;
+ mask += 4;
+ width -= 4;
+ }
+
+ dst = reinterpret_cast<SkPMColor*>(d);
+ }
+
+ while (width > 0) {
+ *dst = blend_lcd16_opaque(srcR, srcG, srcB, *dst, *mask, opaqueDst);
+ mask++;
+ dst++;
+ width--;
+ }
+ }
+
+#elif defined(SK_ARM_HAS_NEON)
+ #include <arm_neon.h>
+
+ #define NEON_A (SK_A32_SHIFT / 8)
+ #define NEON_R (SK_R32_SHIFT / 8)
+ #define NEON_G (SK_G32_SHIFT / 8)
+ #define NEON_B (SK_B32_SHIFT / 8)
+
+ static inline uint8x8_t blend_32_neon(uint8x8_t src, uint8x8_t dst, uint16x8_t scale) {
+ int16x8_t src_wide, dst_wide;
+
+ src_wide = vreinterpretq_s16_u16(vmovl_u8(src));
+ dst_wide = vreinterpretq_s16_u16(vmovl_u8(dst));
+
+ src_wide = (src_wide - dst_wide) * vreinterpretq_s16_u16(scale);
+
+ dst_wide += vshrq_n_s16(src_wide, 5);
+
+ return vmovn_u16(vreinterpretq_u16_s16(dst_wide));
+ }
+
+ void blit_row_lcd16_opaque(SkPMColor dst[], const uint16_t src[],
+ SkColor color, int width,
+ SkPMColor opaqueDst) {
+ int colR = SkColorGetR(color);
+ int colG = SkColorGetG(color);
+ int colB = SkColorGetB(color);
+
+ uint8x8_t vcolR = vdup_n_u8(colR);
+ uint8x8_t vcolG = vdup_n_u8(colG);
+ uint8x8_t vcolB = vdup_n_u8(colB);
+ uint8x8_t vopqDstA = vdup_n_u8(SkGetPackedA32(opaqueDst));
+ uint8x8_t vopqDstR = vdup_n_u8(SkGetPackedR32(opaqueDst));
+ uint8x8_t vopqDstG = vdup_n_u8(SkGetPackedG32(opaqueDst));
+ uint8x8_t vopqDstB = vdup_n_u8(SkGetPackedB32(opaqueDst));
+
+ while (width >= 8) {
+ uint8x8x4_t vdst;
+ uint16x8_t vmask;
+ uint16x8_t vmaskR, vmaskG, vmaskB;
+ uint8x8_t vsel_trans, vsel_opq;
+
+ vdst = vld4_u8((uint8_t*)dst);
+ vmask = vld1q_u16(src);
+
+ // Prepare compare masks
+ vsel_trans = vmovn_u16(vceqq_u16(vmask, vdupq_n_u16(0)));
+ vsel_opq = vmovn_u16(vceqq_u16(vmask, vdupq_n_u16(0xFFFF)));
+
+ // Get all the color masks on 5 bits
+ vmaskR = vshrq_n_u16(vmask, SK_R16_SHIFT);
+ vmaskG = vshrq_n_u16(vshlq_n_u16(vmask, SK_R16_BITS),
+ SK_B16_BITS + SK_R16_BITS + 1);
+ vmaskB = vmask & vdupq_n_u16(SK_B16_MASK);
+
+ // Upscale to 0..32
+ vmaskR = vmaskR + vshrq_n_u16(vmaskR, 4);
+ vmaskG = vmaskG + vshrq_n_u16(vmaskG, 4);
+ vmaskB = vmaskB + vshrq_n_u16(vmaskB, 4);
+
+ vdst.val[NEON_A] = vbsl_u8(vsel_trans, vdst.val[NEON_A], vdup_n_u8(0xFF));
+ vdst.val[NEON_A] = vbsl_u8(vsel_opq, vopqDstA, vdst.val[NEON_A]);
+
+ vdst.val[NEON_R] = blend_32_neon(vcolR, vdst.val[NEON_R], vmaskR);
+ vdst.val[NEON_G] = blend_32_neon(vcolG, vdst.val[NEON_G], vmaskG);
+ vdst.val[NEON_B] = blend_32_neon(vcolB, vdst.val[NEON_B], vmaskB);
+
+ vdst.val[NEON_R] = vbsl_u8(vsel_opq, vopqDstR, vdst.val[NEON_R]);
+ vdst.val[NEON_G] = vbsl_u8(vsel_opq, vopqDstG, vdst.val[NEON_G]);
+ vdst.val[NEON_B] = vbsl_u8(vsel_opq, vopqDstB, vdst.val[NEON_B]);
+
+ vst4_u8((uint8_t*)dst, vdst);
+
+ dst += 8;
+ src += 8;
+ width -= 8;
+ }
+
+ // Leftovers
+ for (int i = 0; i < width; i++) {
+ dst[i] = blend_lcd16_opaque(colR, colG, colB, dst[i], src[i], opaqueDst);
+ }
+ }
+
+ void blit_row_lcd16(SkPMColor dst[], const uint16_t src[],
+ SkColor color, int width, SkPMColor) {
+ int colA = SkColorGetA(color);
+ int colR = SkColorGetR(color);
+ int colG = SkColorGetG(color);
+ int colB = SkColorGetB(color);
+
+ colA = SkAlpha255To256(colA);
+
+ uint16x8_t vcolA = vdupq_n_u16(colA);
+ uint8x8_t vcolR = vdup_n_u8(colR);
+ uint8x8_t vcolG = vdup_n_u8(colG);
+ uint8x8_t vcolB = vdup_n_u8(colB);
+
+ while (width >= 8) {
+ uint8x8x4_t vdst;
+ uint16x8_t vmask;
+ uint16x8_t vmaskR, vmaskG, vmaskB;
+
+ vdst = vld4_u8((uint8_t*)dst);
+ vmask = vld1q_u16(src);
+
+ // Get all the color masks on 5 bits
+ vmaskR = vshrq_n_u16(vmask, SK_R16_SHIFT);
+ vmaskG = vshrq_n_u16(vshlq_n_u16(vmask, SK_R16_BITS),
+ SK_B16_BITS + SK_R16_BITS + 1);
+ vmaskB = vmask & vdupq_n_u16(SK_B16_MASK);
+
+ // Upscale to 0..32
+ vmaskR = vmaskR + vshrq_n_u16(vmaskR, 4);
+ vmaskG = vmaskG + vshrq_n_u16(vmaskG, 4);
+ vmaskB = vmaskB + vshrq_n_u16(vmaskB, 4);
+
+ vmaskR = vshrq_n_u16(vmaskR * vcolA, 8);
+ vmaskG = vshrq_n_u16(vmaskG * vcolA, 8);
+ vmaskB = vshrq_n_u16(vmaskB * vcolA, 8);
+
+ vdst.val[NEON_A] = vdup_n_u8(0xFF);
+ vdst.val[NEON_R] = blend_32_neon(vcolR, vdst.val[NEON_R], vmaskR);
+ vdst.val[NEON_G] = blend_32_neon(vcolG, vdst.val[NEON_G], vmaskG);
+ vdst.val[NEON_B] = blend_32_neon(vcolB, vdst.val[NEON_B], vmaskB);
+
+ vst4_u8((uint8_t*)dst, vdst);
+
+ dst += 8;
+ src += 8;
+ width -= 8;
+ }
+
+ for (int i = 0; i < width; i++) {
+ dst[i] = blend_lcd16(colA, colR, colG, colB, dst[i], src[i]);
+ }
+ }
+
+#else
+
+ static inline void blit_row_lcd16(SkPMColor dst[], const uint16_t mask[],
+ SkColor src, int width, SkPMColor) {
+ int srcA = SkColorGetA(src);
+ int srcR = SkColorGetR(src);
+ int srcG = SkColorGetG(src);
+ int srcB = SkColorGetB(src);
+
+ srcA = SkAlpha255To256(srcA);
+
+ for (int i = 0; i < width; i++) {
+ dst[i] = blend_lcd16(srcA, srcR, srcG, srcB, dst[i], mask[i]);
+ }
+ }
+
+ static inline void blit_row_lcd16_opaque(SkPMColor dst[], const uint16_t mask[],
+ SkColor src, int width,
+ SkPMColor opaqueDst) {
+ int srcR = SkColorGetR(src);
+ int srcG = SkColorGetG(src);
+ int srcB = SkColorGetB(src);
+
+ for (int i = 0; i < width; i++) {
+ dst[i] = blend_lcd16_opaque(srcR, srcG, srcB, dst[i], mask[i], opaqueDst);
+ }
+ }
+
+#endif
+
+static bool blit_color(const SkPixmap& device,
+ const SkMask& mask,
+ const SkIRect& clip,
+ SkColor color) {
+ int x = clip.fLeft,
+ y = clip.fTop;
+
+ if (device.colorType() == kN32_SkColorType && mask.fFormat == SkMask::kA8_Format) {
+ SkOpts::blit_mask_d32_a8(device.writable_addr32(x,y), device.rowBytes(),
+ (const SkAlpha*)mask.getAddr(x,y), mask.fRowBytes,
+ color, clip.width(), clip.height());
+ return true;
+ }
+
+ if (device.colorType() == kN32_SkColorType && mask.fFormat == SkMask::kLCD16_Format) {
+ auto dstRow = device.writable_addr32(x,y);
+ auto maskRow = (const uint16_t*)mask.getAddr(x,y);
+
+ auto blit_row = blit_row_lcd16;
+ SkPMColor opaqueDst = 0; // ignored unless opaque
+
+ if (0xff == SkColorGetA(color)) {
+ blit_row = blit_row_lcd16_opaque;
+ opaqueDst = SkPreMultiplyColor(color);
+ }
+
+ for (int height = clip.height(); height --> 0; ) {
+ blit_row(dstRow, maskRow, color, clip.width(), opaqueDst);
+
+ dstRow = (SkPMColor*) (( char*) dstRow + device.rowBytes());
+ maskRow = (const uint16_t*)((const char*)maskRow + mask.fRowBytes);
+ }
+ return true;
+ }
+
+ return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void SkARGB32_Blit32(const SkPixmap& device, const SkMask& mask,
+ const SkIRect& clip, SkPMColor srcColor) {
+ U8CPU alpha = SkGetPackedA32(srcColor);
+ unsigned flags = SkBlitRow::kSrcPixelAlpha_Flag32;
+ if (alpha != 255) {
+ flags |= SkBlitRow::kGlobalAlpha_Flag32;
+ }
+ SkBlitRow::Proc32 proc = SkBlitRow::Factory32(flags);
+
+ int x = clip.fLeft;
+ int y = clip.fTop;
+ int width = clip.width();
+ int height = clip.height();
+
+ SkPMColor* dstRow = device.writable_addr32(x, y);
+ const SkPMColor* srcRow = reinterpret_cast<const SkPMColor*>(mask.getAddr8(x, y));
+
+ do {
+ proc(dstRow, srcRow, width, alpha);
+ dstRow = (SkPMColor*)((char*)dstRow + device.rowBytes());
+ srcRow = (const SkPMColor*)((const char*)srcRow + mask.fRowBytes);
+ } while (--height != 0);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+
+SkARGB32_Blitter::SkARGB32_Blitter(const SkPixmap& device, const SkPaint& paint)
+ : INHERITED(device) {
+ SkColor color = paint.getColor();
+ fColor = color;
+
+ fSrcA = SkColorGetA(color);
+ unsigned scale = SkAlpha255To256(fSrcA);
+ fSrcR = SkAlphaMul(SkColorGetR(color), scale);
+ fSrcG = SkAlphaMul(SkColorGetG(color), scale);
+ fSrcB = SkAlphaMul(SkColorGetB(color), scale);
+
+ fPMColor = SkPackARGB32(fSrcA, fSrcR, fSrcG, fSrcB);
+}
+
+const SkPixmap* SkARGB32_Blitter::justAnOpaqueColor(uint32_t* value) {
+ if (255 == fSrcA) {
+ *value = fPMColor;
+ return &fDevice;
+ }
+ return nullptr;
+}
+
+#if defined _WIN32 // disable warning : local variable used without having been initialized
+#pragma warning ( push )
+#pragma warning ( disable : 4701 )
+#endif
+
+void SkARGB32_Blitter::blitH(int x, int y, int width) {
+ SkASSERT(x >= 0 && y >= 0 && x + width <= fDevice.width());
+
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkBlitRow::Color32(device, device, width, fPMColor);
+}
+
+void SkARGB32_Blitter::blitAntiH(int x, int y, const SkAlpha antialias[],
+ const int16_t runs[]) {
+ if (fSrcA == 0) {
+ return;
+ }
+
+ uint32_t color = fPMColor;
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ unsigned opaqueMask = fSrcA; // if fSrcA is 0xFF, then we will catch the fast opaque case
+
+ for (;;) {
+ int count = runs[0];
+ SkASSERT(count >= 0);
+ if (count <= 0) {
+ return;
+ }
+ unsigned aa = antialias[0];
+ if (aa) {
+ if ((opaqueMask & aa) == 255) {
+ SkOpts::memset32(device, color, count);
+ } else {
+ uint32_t sc = SkAlphaMulQ(color, SkAlpha255To256(aa));
+ SkBlitRow::Color32(device, device, count, sc);
+ }
+ }
+ runs += count;
+ antialias += count;
+ device += count;
+ }
+}
+
+void SkARGB32_Blitter::blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) {
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkDEBUGCODE((void)fDevice.writable_addr32(x + 1, y);)
+
+ device[0] = SkBlendARGB32(fPMColor, device[0], a0);
+ device[1] = SkBlendARGB32(fPMColor, device[1], a1);
+}
+
+void SkARGB32_Blitter::blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) {
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkDEBUGCODE((void)fDevice.writable_addr32(x, y + 1);)
+
+ device[0] = SkBlendARGB32(fPMColor, device[0], a0);
+ device = (uint32_t*)((char*)device + fDevice.rowBytes());
+ device[0] = SkBlendARGB32(fPMColor, device[0], a1);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+
+#define solid_8_pixels(mask, dst, color) \
+ do { \
+ if (mask & 0x80) dst[0] = color; \
+ if (mask & 0x40) dst[1] = color; \
+ if (mask & 0x20) dst[2] = color; \
+ if (mask & 0x10) dst[3] = color; \
+ if (mask & 0x08) dst[4] = color; \
+ if (mask & 0x04) dst[5] = color; \
+ if (mask & 0x02) dst[6] = color; \
+ if (mask & 0x01) dst[7] = color; \
+ } while (0)
+
+#define SK_BLITBWMASK_NAME SkARGB32_BlitBW
+#define SK_BLITBWMASK_ARGS , SkPMColor color
+#define SK_BLITBWMASK_BLIT8(mask, dst) solid_8_pixels(mask, dst, color)
+#define SK_BLITBWMASK_GETADDR writable_addr32
+#define SK_BLITBWMASK_DEVTYPE uint32_t
+#include "src/core/SkBlitBWMaskTemplate.h"
+
+#define blend_8_pixels(mask, dst, sc, dst_scale) \
+ do { \
+ if (mask & 0x80) { dst[0] = sc + SkAlphaMulQ(dst[0], dst_scale); } \
+ if (mask & 0x40) { dst[1] = sc + SkAlphaMulQ(dst[1], dst_scale); } \
+ if (mask & 0x20) { dst[2] = sc + SkAlphaMulQ(dst[2], dst_scale); } \
+ if (mask & 0x10) { dst[3] = sc + SkAlphaMulQ(dst[3], dst_scale); } \
+ if (mask & 0x08) { dst[4] = sc + SkAlphaMulQ(dst[4], dst_scale); } \
+ if (mask & 0x04) { dst[5] = sc + SkAlphaMulQ(dst[5], dst_scale); } \
+ if (mask & 0x02) { dst[6] = sc + SkAlphaMulQ(dst[6], dst_scale); } \
+ if (mask & 0x01) { dst[7] = sc + SkAlphaMulQ(dst[7], dst_scale); } \
+ } while (0)
+
+#define SK_BLITBWMASK_NAME SkARGB32_BlendBW
+#define SK_BLITBWMASK_ARGS , uint32_t sc, unsigned dst_scale
+#define SK_BLITBWMASK_BLIT8(mask, dst) blend_8_pixels(mask, dst, sc, dst_scale)
+#define SK_BLITBWMASK_GETADDR writable_addr32
+#define SK_BLITBWMASK_DEVTYPE uint32_t
+#include "src/core/SkBlitBWMaskTemplate.h"
+
+void SkARGB32_Blitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ SkASSERT(mask.fBounds.contains(clip));
+ SkASSERT(fSrcA != 0xFF);
+
+ if (fSrcA == 0) {
+ return;
+ }
+
+ if (blit_color(fDevice, mask, clip, fColor)) {
+ return;
+ }
+
+ switch (mask.fFormat) {
+ case SkMask::kBW_Format:
+ SkARGB32_BlendBW(fDevice, mask, clip, fPMColor, SkAlpha255To256(255 - fSrcA));
+ break;
+ case SkMask::kARGB32_Format:
+ SkARGB32_Blit32(fDevice, mask, clip, fPMColor);
+ break;
+ default:
+ SK_ABORT("Mask format not handled.");
+ }
+}
+
+void SkARGB32_Opaque_Blitter::blitMask(const SkMask& mask,
+ const SkIRect& clip) {
+ SkASSERT(mask.fBounds.contains(clip));
+
+ if (blit_color(fDevice, mask, clip, fColor)) {
+ return;
+ }
+
+ switch (mask.fFormat) {
+ case SkMask::kBW_Format:
+ SkARGB32_BlitBW(fDevice, mask, clip, fPMColor);
+ break;
+ case SkMask::kARGB32_Format:
+ SkARGB32_Blit32(fDevice, mask, clip, fPMColor);
+ break;
+ default:
+ SK_ABORT("Mask format not handled.");
+ }
+}
+
+void SkARGB32_Opaque_Blitter::blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) {
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkDEBUGCODE((void)fDevice.writable_addr32(x + 1, y);)
+
+ device[0] = SkFastFourByteInterp(fPMColor, device[0], a0);
+ device[1] = SkFastFourByteInterp(fPMColor, device[1], a1);
+}
+
+void SkARGB32_Opaque_Blitter::blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) {
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkDEBUGCODE((void)fDevice.writable_addr32(x, y + 1);)
+
+ device[0] = SkFastFourByteInterp(fPMColor, device[0], a0);
+ device = (uint32_t*)((char*)device + fDevice.rowBytes());
+ device[0] = SkFastFourByteInterp(fPMColor, device[0], a1);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkARGB32_Blitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ if (alpha == 0 || fSrcA == 0) {
+ return;
+ }
+
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ uint32_t color = fPMColor;
+
+ if (alpha != 255) {
+ color = SkAlphaMulQ(color, SkAlpha255To256(alpha));
+ }
+
+ unsigned dst_scale = SkAlpha255To256(255 - SkGetPackedA32(color));
+ size_t rowBytes = fDevice.rowBytes();
+ while (--height >= 0) {
+ device[0] = color + SkAlphaMulQ(device[0], dst_scale);
+ device = (uint32_t*)((char*)device + rowBytes);
+ }
+}
+
+void SkARGB32_Blitter::blitRect(int x, int y, int width, int height) {
+ SkASSERT(x >= 0 && y >= 0 && x + width <= fDevice.width() && y + height <= fDevice.height());
+
+ if (fSrcA == 0) {
+ return;
+ }
+
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ uint32_t color = fPMColor;
+ size_t rowBytes = fDevice.rowBytes();
+
+ if (SkGetPackedA32(fPMColor) == 0xFF) {
+ SkOpts::rect_memset32(device, color, width, rowBytes, height);
+ } else {
+ while (height --> 0) {
+ SkBlitRow::Color32(device, device, width, color);
+ device = (uint32_t*)((char*)device + rowBytes);
+ }
+ }
+}
+
+#if defined _WIN32
+#pragma warning ( pop )
+#endif
+
+///////////////////////////////////////////////////////////////////////
+
+void SkARGB32_Black_Blitter::blitAntiH(int x, int y, const SkAlpha antialias[],
+ const int16_t runs[]) {
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkPMColor black = (SkPMColor)(SK_A32_MASK << SK_A32_SHIFT);
+
+ for (;;) {
+ int count = runs[0];
+ SkASSERT(count >= 0);
+ if (count <= 0) {
+ return;
+ }
+ unsigned aa = antialias[0];
+ if (aa) {
+ if (aa == 255) {
+ SkOpts::memset32(device, black, count);
+ } else {
+ SkPMColor src = aa << SK_A32_SHIFT;
+ unsigned dst_scale = 256 - aa;
+ int n = count;
+ do {
+ --n;
+ device[n] = src + SkAlphaMulQ(device[n], dst_scale);
+ } while (n > 0);
+ }
+ }
+ runs += count;
+ antialias += count;
+ device += count;
+ }
+}
+
+void SkARGB32_Black_Blitter::blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) {
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkDEBUGCODE((void)fDevice.writable_addr32(x + 1, y);)
+
+ device[0] = (a0 << SK_A32_SHIFT) + SkAlphaMulQ(device[0], 256 - a0);
+ device[1] = (a1 << SK_A32_SHIFT) + SkAlphaMulQ(device[1], 256 - a1);
+}
+
+void SkARGB32_Black_Blitter::blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) {
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ SkDEBUGCODE((void)fDevice.writable_addr32(x, y + 1);)
+
+ device[0] = (a0 << SK_A32_SHIFT) + SkAlphaMulQ(device[0], 256 - a0);
+ device = (uint32_t*)((char*)device + fDevice.rowBytes());
+ device[0] = (a1 << SK_A32_SHIFT) + SkAlphaMulQ(device[0], 256 - a1);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Special version of SkBlitRow::Factory32 that knows we're in kSrc_Mode,
+// instead of kSrcOver_Mode
+static void blend_srcmode(SkPMColor* SK_RESTRICT device,
+ const SkPMColor* SK_RESTRICT span,
+ int count, U8CPU aa) {
+ int aa256 = SkAlpha255To256(aa);
+ for (int i = 0; i < count; ++i) {
+ device[i] = SkFourByteInterp256(span[i], device[i], aa256);
+ }
+}
+
+SkARGB32_Shader_Blitter::SkARGB32_Shader_Blitter(const SkPixmap& device,
+ const SkPaint& paint, SkShaderBase::Context* shaderContext)
+ : INHERITED(device, paint, shaderContext)
+{
+ fBuffer = (SkPMColor*)sk_malloc_throw(device.width() * (sizeof(SkPMColor)));
+
+ fXfermode = SkXfermode::Peek(paint.getBlendMode_or(SkBlendMode::kSrcOver));
+
+ int flags = 0;
+ if (!(shaderContext->getFlags() & SkShaderBase::kOpaqueAlpha_Flag)) {
+ flags |= SkBlitRow::kSrcPixelAlpha_Flag32;
+ }
+ // we call this on the output from the shader
+ fProc32 = SkBlitRow::Factory32(flags);
+ // we call this on the output from the shader + alpha from the aa buffer
+ fProc32Blend = SkBlitRow::Factory32(flags | SkBlitRow::kGlobalAlpha_Flag32);
+
+ fShadeDirectlyIntoDevice = false;
+ if (fXfermode == nullptr) {
+ if (shaderContext->getFlags() & SkShaderBase::kOpaqueAlpha_Flag) {
+ fShadeDirectlyIntoDevice = true;
+ }
+ } else {
+ if (SkBlendMode::kSrc == paint.asBlendMode()) {
+ fShadeDirectlyIntoDevice = true;
+ fProc32Blend = blend_srcmode;
+ }
+ }
+
+ fConstInY = SkToBool(shaderContext->getFlags() & SkShaderBase::kConstInY32_Flag);
+}
+
+SkARGB32_Shader_Blitter::~SkARGB32_Shader_Blitter() {
+ sk_free(fBuffer);
+}
+
+void SkARGB32_Shader_Blitter::blitH(int x, int y, int width) {
+ SkASSERT(x >= 0 && y >= 0 && x + width <= fDevice.width());
+
+ uint32_t* device = fDevice.writable_addr32(x, y);
+
+ if (fShadeDirectlyIntoDevice) {
+ fShaderContext->shadeSpan(x, y, device, width);
+ } else {
+ SkPMColor* span = fBuffer;
+ fShaderContext->shadeSpan(x, y, span, width);
+ if (fXfermode) {
+ fXfermode->xfer32(device, span, width, nullptr);
+ } else {
+ fProc32(device, span, width, 255);
+ }
+ }
+}
+
+void SkARGB32_Shader_Blitter::blitRect(int x, int y, int width, int height) {
+ SkASSERT(x >= 0 && y >= 0 &&
+ x + width <= fDevice.width() && y + height <= fDevice.height());
+
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ size_t deviceRB = fDevice.rowBytes();
+ auto* shaderContext = fShaderContext;
+ SkPMColor* span = fBuffer;
+
+ if (fConstInY) {
+ if (fShadeDirectlyIntoDevice) {
+ // shade the first row directly into the device
+ shaderContext->shadeSpan(x, y, device, width);
+ span = device;
+ while (--height > 0) {
+ device = (uint32_t*)((char*)device + deviceRB);
+ memcpy(device, span, width << 2);
+ }
+ } else {
+ shaderContext->shadeSpan(x, y, span, width);
+ SkXfermode* xfer = fXfermode;
+ if (xfer) {
+ do {
+ xfer->xfer32(device, span, width, nullptr);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ } else {
+ SkBlitRow::Proc32 proc = fProc32;
+ do {
+ proc(device, span, width, 255);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ }
+ }
+ return;
+ }
+
+ if (fShadeDirectlyIntoDevice) {
+ do {
+ shaderContext->shadeSpan(x, y, device, width);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ } else {
+ SkXfermode* xfer = fXfermode;
+ if (xfer) {
+ do {
+ shaderContext->shadeSpan(x, y, span, width);
+ xfer->xfer32(device, span, width, nullptr);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ } else {
+ SkBlitRow::Proc32 proc = fProc32;
+ do {
+ shaderContext->shadeSpan(x, y, span, width);
+ proc(device, span, width, 255);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ }
+ }
+}
+
+void SkARGB32_Shader_Blitter::blitAntiH(int x, int y, const SkAlpha antialias[],
+ const int16_t runs[]) {
+ SkPMColor* span = fBuffer;
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ auto* shaderContext = fShaderContext;
+
+ if (fXfermode && !fShadeDirectlyIntoDevice) {
+ for (;;) {
+ SkXfermode* xfer = fXfermode;
+
+ int count = *runs;
+ if (count <= 0)
+ break;
+ int aa = *antialias;
+ if (aa) {
+ shaderContext->shadeSpan(x, y, span, count);
+ if (aa == 255) {
+ xfer->xfer32(device, span, count, nullptr);
+ } else {
+ // count is almost always 1
+ for (int i = count - 1; i >= 0; --i) {
+ xfer->xfer32(&device[i], &span[i], 1, antialias);
+ }
+ }
+ }
+ device += count;
+ runs += count;
+ antialias += count;
+ x += count;
+ }
+ } else if (fShadeDirectlyIntoDevice ||
+ (shaderContext->getFlags() & SkShaderBase::kOpaqueAlpha_Flag)) {
+ for (;;) {
+ int count = *runs;
+ if (count <= 0) {
+ break;
+ }
+ int aa = *antialias;
+ if (aa) {
+ if (aa == 255) {
+ // cool, have the shader draw right into the device
+ shaderContext->shadeSpan(x, y, device, count);
+ } else {
+ shaderContext->shadeSpan(x, y, span, count);
+ fProc32Blend(device, span, count, aa);
+ }
+ }
+ device += count;
+ runs += count;
+ antialias += count;
+ x += count;
+ }
+ } else {
+ for (;;) {
+ int count = *runs;
+ if (count <= 0) {
+ break;
+ }
+ int aa = *antialias;
+ if (aa) {
+ shaderContext->shadeSpan(x, y, span, count);
+ if (aa == 255) {
+ fProc32(device, span, count, 255);
+ } else {
+ fProc32Blend(device, span, count, aa);
+ }
+ }
+ device += count;
+ runs += count;
+ antialias += count;
+ x += count;
+ }
+ }
+}
+
+using U32 = skvx::Vec< 4, uint32_t>;
+using U8x4 = skvx::Vec<16, uint8_t>;
+using U8 = skvx::Vec< 4, uint8_t>;
+
+static void drive(SkPMColor* dst, const SkPMColor* src, const uint8_t* cov, int n,
+ U8x4 (*kernel)(U8x4,U8x4,U8x4)) {
+
+ auto apply = [kernel](U32 dst, U32 src, U8 cov) -> U32 {
+ U8x4 cov_splat = skvx::shuffle<0,0,0,0, 1,1,1,1, 2,2,2,2, 3,3,3,3>(cov);
+ return skvx::bit_pun<U32>(kernel(skvx::bit_pun<U8x4>(dst),
+ skvx::bit_pun<U8x4>(src),
+ cov_splat));
+ };
+ while (n >= 4) {
+ apply(U32::Load(dst), U32::Load(src), U8::Load(cov)).store(dst);
+ dst += 4;
+ src += 4;
+ cov += 4;
+ n -= 4;
+ }
+ while (n --> 0) {
+ *dst = apply(U32{*dst}, U32{*src}, U8{*cov})[0];
+ dst++;
+ src++;
+ cov++;
+ }
+}
+
+static void blend_row_A8(SkPMColor* dst, const void* mask, const SkPMColor* src, int n) {
+ auto cov = (const uint8_t*)mask;
+ drive(dst, src, cov, n, [](U8x4 d, U8x4 s, U8x4 c) {
+ U8x4 s_aa = skvx::approx_scale(s, c),
+ alpha = skvx::shuffle<3,3,3,3, 7,7,7,7, 11,11,11,11, 15,15,15,15>(s_aa);
+ return s_aa + skvx::approx_scale(d, 255 - alpha);
+ });
+}
+
+static void blend_row_A8_opaque(SkPMColor* dst, const void* mask, const SkPMColor* src, int n) {
+ auto cov = (const uint8_t*)mask;
+ drive(dst, src, cov, n, [](U8x4 d, U8x4 s, U8x4 c) {
+ return skvx::div255( skvx::cast<uint16_t>(s) * skvx::cast<uint16_t>( c )
+ + skvx::cast<uint16_t>(d) * skvx::cast<uint16_t>(255-c));
+ });
+}
+
+static void blend_row_lcd16(SkPMColor* dst, const void* vmask, const SkPMColor* src, int n) {
+ auto src_alpha_blend = [](int s, int d, int sa, int m) {
+ return d + SkAlphaMul(s - SkAlphaMul(sa, d), m);
+ };
+
+ auto upscale_31_to_255 = [](int v) {
+ return (v << 3) | (v >> 2);
+ };
+
+ auto mask = (const uint16_t*)vmask;
+ for (int i = 0; i < n; ++i) {
+ uint16_t m = mask[i];
+ if (0 == m) {
+ continue;
+ }
+
+ SkPMColor s = src[i];
+ SkPMColor d = dst[i];
+
+ int srcA = SkGetPackedA32(s);
+ int srcR = SkGetPackedR32(s);
+ int srcG = SkGetPackedG32(s);
+ int srcB = SkGetPackedB32(s);
+
+ srcA += srcA >> 7;
+
+ // We're ignoring the least significant bit of the green coverage channel here.
+ int maskR = SkGetPackedR16(m) >> (SK_R16_BITS - 5);
+ int maskG = SkGetPackedG16(m) >> (SK_G16_BITS - 5);
+ int maskB = SkGetPackedB16(m) >> (SK_B16_BITS - 5);
+
+ // Scale up to 8-bit coverage to work with SkAlphaMul() in src_alpha_blend().
+ maskR = upscale_31_to_255(maskR);
+ maskG = upscale_31_to_255(maskG);
+ maskB = upscale_31_to_255(maskB);
+
+ // This LCD blit routine only works if the destination is opaque.
+ dst[i] = SkPackARGB32(0xFF,
+ src_alpha_blend(srcR, SkGetPackedR32(d), srcA, maskR),
+ src_alpha_blend(srcG, SkGetPackedG32(d), srcA, maskG),
+ src_alpha_blend(srcB, SkGetPackedB32(d), srcA, maskB));
+ }
+}
+
+static void blend_row_LCD16_opaque(SkPMColor* dst, const void* vmask, const SkPMColor* src, int n) {
+ auto mask = (const uint16_t*)vmask;
+
+ for (int i = 0; i < n; ++i) {
+ uint16_t m = mask[i];
+ if (0 == m) {
+ continue;
+ }
+
+ SkPMColor s = src[i];
+ SkPMColor d = dst[i];
+
+ int srcR = SkGetPackedR32(s);
+ int srcG = SkGetPackedG32(s);
+ int srcB = SkGetPackedB32(s);
+
+ // We're ignoring the least significant bit of the green coverage channel here.
+ int maskR = SkGetPackedR16(m) >> (SK_R16_BITS - 5);
+ int maskG = SkGetPackedG16(m) >> (SK_G16_BITS - 5);
+ int maskB = SkGetPackedB16(m) >> (SK_B16_BITS - 5);
+
+ // Now upscale them to 0..32, so we can use blend_32.
+ maskR = upscale_31_to_32(maskR);
+ maskG = upscale_31_to_32(maskG);
+ maskB = upscale_31_to_32(maskB);
+
+ // This LCD blit routine only works if the destination is opaque.
+ dst[i] = SkPackARGB32(0xFF,
+ blend_32(srcR, SkGetPackedR32(d), maskR),
+ blend_32(srcG, SkGetPackedG32(d), maskG),
+ blend_32(srcB, SkGetPackedB32(d), maskB));
+ }
+}
+
+void SkARGB32_Shader_Blitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ // we only handle kA8 with an xfermode
+ if (fXfermode && (SkMask::kA8_Format != mask.fFormat)) {
+ this->INHERITED::blitMask(mask, clip);
+ return;
+ }
+
+ SkASSERT(mask.fBounds.contains(clip));
+
+ void (*blend_row)(SkPMColor*, const void* mask, const SkPMColor*, int) = nullptr;
+
+ if (!fXfermode) {
+ bool opaque = (fShaderContext->getFlags() & SkShaderBase::kOpaqueAlpha_Flag);
+
+ if (mask.fFormat == SkMask::kA8_Format && opaque) {
+ blend_row = blend_row_A8_opaque;
+ } else if (mask.fFormat == SkMask::kA8_Format) {
+ blend_row = blend_row_A8;
+ } else if (mask.fFormat == SkMask::kLCD16_Format && opaque) {
+ blend_row = blend_row_LCD16_opaque;
+ } else if (mask.fFormat == SkMask::kLCD16_Format) {
+ blend_row = blend_row_lcd16;
+ } else {
+ this->INHERITED::blitMask(mask, clip);
+ return;
+ }
+ }
+
+ const int x = clip.fLeft;
+ const int width = clip.width();
+ int y = clip.fTop;
+ int height = clip.height();
+
+ char* dstRow = (char*)fDevice.writable_addr32(x, y);
+ const size_t dstRB = fDevice.rowBytes();
+ const uint8_t* maskRow = (const uint8_t*)mask.getAddr(x, y);
+ const size_t maskRB = mask.fRowBytes;
+
+ SkPMColor* span = fBuffer;
+
+ if (fXfermode) {
+ SkASSERT(SkMask::kA8_Format == mask.fFormat);
+ SkXfermode* xfer = fXfermode;
+ do {
+ fShaderContext->shadeSpan(x, y, span, width);
+ xfer->xfer32(reinterpret_cast<SkPMColor*>(dstRow), span, width, maskRow);
+ dstRow += dstRB;
+ maskRow += maskRB;
+ y += 1;
+ } while (--height > 0);
+ } else {
+ SkASSERT(blend_row);
+ do {
+ fShaderContext->shadeSpan(x, y, span, width);
+ blend_row(reinterpret_cast<SkPMColor*>(dstRow), maskRow, span, width);
+ dstRow += dstRB;
+ maskRow += maskRB;
+ y += 1;
+ } while (--height > 0);
+ }
+}
+
+void SkARGB32_Shader_Blitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ SkASSERT(x >= 0 && y >= 0 && y + height <= fDevice.height());
+
+ uint32_t* device = fDevice.writable_addr32(x, y);
+ size_t deviceRB = fDevice.rowBytes();
+
+ if (fConstInY) {
+ SkPMColor c;
+ fShaderContext->shadeSpan(x, y, &c, 1);
+
+ if (fShadeDirectlyIntoDevice) {
+ if (255 == alpha) {
+ do {
+ *device = c;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ } else {
+ do {
+ *device = SkFourByteInterp(c, *device, alpha);
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ }
+ } else {
+ SkXfermode* xfer = fXfermode;
+ if (xfer) {
+ do {
+ xfer->xfer32(device, &c, 1, &alpha);
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ } else {
+ SkBlitRow::Proc32 proc = (255 == alpha) ? fProc32 : fProc32Blend;
+ do {
+ proc(device, &c, 1, alpha);
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ }
+ }
+ return;
+ }
+
+ if (fShadeDirectlyIntoDevice) {
+ if (255 == alpha) {
+ do {
+ fShaderContext->shadeSpan(x, y, device, 1);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ } else {
+ do {
+ SkPMColor c;
+ fShaderContext->shadeSpan(x, y, &c, 1);
+ *device = SkFourByteInterp(c, *device, alpha);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ }
+ } else {
+ SkPMColor* span = fBuffer;
+ SkXfermode* xfer = fXfermode;
+ if (xfer) {
+ do {
+ fShaderContext->shadeSpan(x, y, span, 1);
+ xfer->xfer32(device, span, 1, &alpha);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ } else {
+ SkBlitRow::Proc32 proc = (255 == alpha) ? fProc32 : fProc32Blend;
+ do {
+ fShaderContext->shadeSpan(x, y, span, 1);
+ proc(device, span, 1, alpha);
+ y += 1;
+ device = (uint32_t*)((char*)device + deviceRB);
+ } while (--height > 0);
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkBlitter_Sprite.cpp b/gfx/skia/skia/src/core/SkBlitter_Sprite.cpp
new file mode 100644
index 0000000000..ac38d1bbc9
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlitter_Sprite.cpp
@@ -0,0 +1,228 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorSpace.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkCoreBlitters.h"
+#include "src/core/SkImageInfoPriv.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkSpriteBlitter.h"
+#include "src/core/SkVMBlitter.h"
+
+extern bool gUseSkVMBlitter;
+extern bool gSkForceRasterPipelineBlitter;
+
+SkSpriteBlitter::SkSpriteBlitter(const SkPixmap& source)
+ : fSource(source) {}
+
+bool SkSpriteBlitter::setup(const SkPixmap& dst, int left, int top, const SkPaint& paint) {
+ fDst = dst;
+ fLeft = left;
+ fTop = top;
+ fPaint = &paint;
+ return true;
+}
+
+void SkSpriteBlitter::blitH(int x, int y, int width) {
+ SkDEBUGFAIL("how did we get here?");
+
+ // Fallback to blitRect.
+ this->blitRect(x, y, width, 1);
+}
+
+void SkSpriteBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) {
+ SkDEBUGFAIL("how did we get here?");
+
+ // No fallback strategy.
+}
+
+void SkSpriteBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ SkDEBUGFAIL("how did we get here?");
+
+ // Fall back to superclass if the code gets here in release mode.
+ INHERITED::blitV(x, y, height, alpha);
+}
+
+void SkSpriteBlitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ SkDEBUGFAIL("how did we get here?");
+
+ // Fall back to superclass if the code gets here in release mode.
+ INHERITED::blitMask(mask, clip);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkSpriteBlitter_Memcpy final : public SkSpriteBlitter {
+public:
+ static bool Supports(const SkPixmap& dst, const SkPixmap& src, const SkPaint& paint) {
+ // the caller has already inspected the colorspace on src and dst
+ SkASSERT(0 == SkColorSpaceXformSteps(src,dst).flags.mask());
+
+ if (dst.colorType() != src.colorType()) {
+ return false;
+ }
+ if (paint.getMaskFilter() || paint.getColorFilter() || paint.getImageFilter()) {
+ return false;
+ }
+ if (0xFF != paint.getAlpha()) {
+ return false;
+ }
+ const auto mode = paint.asBlendMode();
+ return mode == SkBlendMode::kSrc || (mode == SkBlendMode::kSrcOver && src.isOpaque());
+ }
+
+ SkSpriteBlitter_Memcpy(const SkPixmap& src)
+ : INHERITED(src) {}
+
+ void blitRect(int x, int y, int width, int height) override {
+ SkASSERT(fDst.colorType() == fSource.colorType());
+ SkASSERT(width > 0 && height > 0);
+
+ char* dst = (char*)fDst.writable_addr(x, y);
+ const char* src = (const char*)fSource.addr(x - fLeft, y - fTop);
+ const size_t dstRB = fDst.rowBytes();
+ const size_t srcRB = fSource.rowBytes();
+ const size_t bytesToCopy = width << fSource.shiftPerPixel();
+
+ while (height --> 0) {
+ memcpy(dst, src, bytesToCopy);
+ dst += dstRB;
+ src += srcRB;
+ }
+ }
+
+private:
+ using INHERITED = SkSpriteBlitter;
+};
+
+class SkRasterPipelineSpriteBlitter : public SkSpriteBlitter {
+public:
+ SkRasterPipelineSpriteBlitter(const SkPixmap& src, SkArenaAlloc* alloc,
+ sk_sp<SkShader> clipShader)
+ : INHERITED(src)
+ , fAlloc(alloc)
+ , fBlitter(nullptr)
+ , fSrcPtr{nullptr, 0}
+ , fClipShader(std::move(clipShader))
+ {}
+
+ bool setup(const SkPixmap& dst, int left, int top, const SkPaint& paint) override {
+ fDst = dst;
+ fLeft = left;
+ fTop = top;
+ fPaintColor = paint.getColor4f();
+
+ SkRasterPipeline p(fAlloc);
+ p.append_load(fSource.colorType(), &fSrcPtr);
+
+ if (SkColorTypeIsAlphaOnly(fSource.colorType())) {
+ // The color for A8 images comes from the (sRGB) paint color.
+ p.append_set_rgb(fAlloc, fPaintColor);
+ p.append(SkRasterPipelineOp::premul);
+ }
+ if (auto dstCS = fDst.colorSpace()) {
+ auto srcCS = fSource.colorSpace();
+ if (!srcCS || SkColorTypeIsAlphaOnly(fSource.colorType())) {
+ // We treat untagged images as sRGB.
+ // Alpha-only images get their r,g,b from the paint color, so they're also sRGB.
+ srcCS = sk_srgb_singleton();
+ }
+ auto srcAT = fSource.isOpaque() ? kOpaque_SkAlphaType
+ : kPremul_SkAlphaType;
+ fAlloc->make<SkColorSpaceXformSteps>(srcCS, srcAT,
+ dstCS, kPremul_SkAlphaType)
+ ->apply(&p);
+ }
+ if (fPaintColor.fA != 1.0f) {
+ p.append(SkRasterPipelineOp::scale_1_float, &fPaintColor.fA);
+ }
+
+ bool is_opaque = fSource.isOpaque() && fPaintColor.fA == 1.0f;
+ fBlitter = SkCreateRasterPipelineBlitter(fDst, paint, p, is_opaque, fAlloc, fClipShader);
+ return fBlitter != nullptr;
+ }
+
+ void blitRect(int x, int y, int width, int height) override {
+ fSrcPtr.stride = fSource.rowBytesAsPixels();
+
+ // We really want fSrcPtr.pixels = fSource.addr(-fLeft, -fTop) here, but that asserts.
+ // Instead we ask for addr(-fLeft+x, -fTop+y), then back up (x,y) manually.
+ // Representing bpp as a size_t keeps all this math in size_t instead of int,
+ // which could wrap around with large enough fSrcPtr.stride and y.
+ size_t bpp = fSource.info().bytesPerPixel();
+ fSrcPtr.pixels = (char*)fSource.addr(-fLeft+x, -fTop+y) - bpp * x
+ - bpp * y * fSrcPtr.stride;
+
+ fBlitter->blitRect(x,y,width,height);
+ }
+
+private:
+ SkArenaAlloc* fAlloc;
+ SkBlitter* fBlitter;
+ SkRasterPipeline_MemoryCtx fSrcPtr;
+ SkColor4f fPaintColor;
+ sk_sp<SkShader> fClipShader;
+
+ using INHERITED = SkSpriteBlitter;
+};
+
+// returning null means the caller will call SkBlitter::Choose() and
+// have wrapped the source bitmap inside a shader
+SkBlitter* SkBlitter::ChooseSprite(const SkPixmap& dst, const SkPaint& paint,
+ const SkPixmap& source, int left, int top,
+ SkArenaAlloc* alloc, sk_sp<SkShader> clipShader) {
+ /* We currently ignore antialiasing and filtertype, meaning we will take our
+ special blitters regardless of these settings. Ignoring filtertype seems fine
+ since by definition there is no scale in the matrix. Ignoring antialiasing is
+ a bit of a hack, since we "could" pass in the fractional left/top for the bitmap,
+ and respect that by blending the edges of the bitmap against the device. To support
+ this we could either add more special blitters here, or detect antialiasing in the
+ paint and return null if it is set, forcing the client to take the slow shader case
+ (which does respect soft edges).
+ */
+ SkASSERT(alloc != nullptr);
+
+ if (gUseSkVMBlitter) {
+ return SkVMBlitter::Make(dst, paint, source,left,top, alloc, std::move(clipShader));
+ }
+
+ // TODO: in principle SkRasterPipelineSpriteBlitter could be made to handle this.
+ if (source.alphaType() == kUnpremul_SkAlphaType) {
+ return nullptr;
+ }
+
+ SkSpriteBlitter* blitter = nullptr;
+
+ if (gSkForceRasterPipelineBlitter) {
+ // Do not use any of these optimized memory blitters
+ } else if (0 == SkColorSpaceXformSteps(source,dst).flags.mask() && !clipShader) {
+ if (!blitter && SkSpriteBlitter_Memcpy::Supports(dst, source, paint)) {
+ blitter = alloc->make<SkSpriteBlitter_Memcpy>(source);
+ }
+ if (!blitter) {
+ switch (dst.colorType()) {
+ case kN32_SkColorType:
+ blitter = SkSpriteBlitter::ChooseL32(source, paint, alloc);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ if (!blitter && !paint.getMaskFilter()) {
+ blitter = alloc->make<SkRasterPipelineSpriteBlitter>(source, alloc, clipShader);
+ }
+
+ if (blitter && blitter->setup(dst, left,top, paint)) {
+ return blitter;
+ }
+
+ return SkVMBlitter::Make(dst, paint, source,left,top, alloc, std::move(clipShader));
+}
diff --git a/gfx/skia/skia/src/core/SkBlurMF.cpp b/gfx/skia/skia/src/core/SkBlurMF.cpp
new file mode 100644
index 0000000000..c315d7e047
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlurMF.cpp
@@ -0,0 +1,1680 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkPathBuilder.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/core/SkVertices.h"
+#include "src/base/SkMathPriv.h"
+#include "src/core/SkBlitter_A8.h"
+#include "src/core/SkBlurMask.h"
+#include "src/core/SkDrawBase.h"
+#include "src/core/SkGpuBlurUtils.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkMatrixProvider.h"
+#include "src/core/SkRRectPriv.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkStringUtils.h"
+#include "src/core/SkWriteBuffer.h"
+
+#if defined(SK_GANESH)
+#include "include/gpu/GrRecordingContext.h"
+#include "src/core/SkRuntimeEffectPriv.h"
+#include "src/gpu/SkBackingFit.h"
+#include "src/gpu/ganesh/GrCaps.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/GrResourceProvider.h"
+#include "src/gpu/ganesh/GrShaderCaps.h"
+#include "src/gpu/ganesh/GrStyle.h"
+#include "src/gpu/ganesh/GrTextureProxy.h"
+#include "src/gpu/ganesh/GrThreadSafeCache.h"
+#include "src/gpu/ganesh/SkGr.h"
+#include "src/gpu/ganesh/SurfaceDrawContext.h"
+#include "src/gpu/ganesh/effects/GrBlendFragmentProcessor.h"
+#include "src/gpu/ganesh/effects/GrMatrixEffect.h"
+#include "src/gpu/ganesh/effects/GrSkSLFP.h"
+#include "src/gpu/ganesh/effects/GrTextureEffect.h"
+#include "src/gpu/ganesh/geometry/GrStyledShape.h"
+#include "src/gpu/ganesh/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/ganesh/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/ganesh/glsl/GrGLSLUniformHandler.h"
+#endif // defined(SK_GANESH)
+
+using namespace skia_private;
+
+class SkBlurMaskFilterImpl : public SkMaskFilterBase {
+public:
+ SkBlurMaskFilterImpl(SkScalar sigma, SkBlurStyle, bool respectCTM);
+
+ // overrides from SkMaskFilter
+ SkMask::Format getFormat() const override;
+ bool filterMask(SkMask* dst, const SkMask& src, const SkMatrix&,
+ SkIPoint* margin) const override;
+
+#if defined(SK_GANESH)
+ bool canFilterMaskGPU(const GrStyledShape& shape,
+ const SkIRect& devSpaceShapeBounds,
+ const SkIRect& clipBounds,
+ const SkMatrix& ctm,
+ SkIRect* maskRect) const override;
+ bool directFilterMaskGPU(GrRecordingContext*,
+ skgpu::ganesh::SurfaceDrawContext*,
+ GrPaint&&,
+ const GrClip*,
+ const SkMatrix& viewMatrix,
+ const GrStyledShape&) const override;
+ GrSurfaceProxyView filterMaskGPU(GrRecordingContext*,
+ GrSurfaceProxyView srcView,
+ GrColorType srcColorType,
+ SkAlphaType srcAlphaType,
+ const SkMatrix& ctm,
+ const SkIRect& maskRect) const override;
+#endif
+
+ void computeFastBounds(const SkRect&, SkRect*) const override;
+ bool asABlur(BlurRec*) const override;
+
+
+protected:
+ FilterReturn filterRectsToNine(const SkRect[], int count, const SkMatrix&,
+ const SkIRect& clipBounds,
+ NinePatch*) const override;
+
+ FilterReturn filterRRectToNine(const SkRRect&, const SkMatrix&,
+ const SkIRect& clipBounds,
+ NinePatch*) const override;
+
+ bool filterRectMask(SkMask* dstM, const SkRect& r, const SkMatrix& matrix,
+ SkIPoint* margin, SkMask::CreateMode createMode) const;
+ bool filterRRectMask(SkMask* dstM, const SkRRect& r, const SkMatrix& matrix,
+ SkIPoint* margin, SkMask::CreateMode createMode) const;
+
+ bool ignoreXform() const { return !fRespectCTM; }
+
+private:
+ SK_FLATTENABLE_HOOKS(SkBlurMaskFilterImpl)
+ // To avoid unseemly allocation requests (esp. for finite platforms like
+ // handset) we limit the radius so something manageable. (as opposed to
+ // a request like 10,000)
+ static const SkScalar kMAX_BLUR_SIGMA;
+
+ SkScalar fSigma;
+ SkBlurStyle fBlurStyle;
+ bool fRespectCTM;
+
+ SkBlurMaskFilterImpl(SkReadBuffer&);
+ void flatten(SkWriteBuffer&) const override;
+
+ SkScalar computeXformedSigma(const SkMatrix& ctm) const {
+ SkScalar xformedSigma = this->ignoreXform() ? fSigma : ctm.mapRadius(fSigma);
+ return std::min(xformedSigma, kMAX_BLUR_SIGMA);
+ }
+
+ friend class SkBlurMaskFilter;
+
+ using INHERITED = SkMaskFilter;
+ friend void sk_register_blur_maskfilter_createproc();
+};
+
+const SkScalar SkBlurMaskFilterImpl::kMAX_BLUR_SIGMA = SkIntToScalar(128);
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkBlurMaskFilterImpl::SkBlurMaskFilterImpl(SkScalar sigma, SkBlurStyle style, bool respectCTM)
+ : fSigma(sigma)
+ , fBlurStyle(style)
+ , fRespectCTM(respectCTM) {
+ SkASSERT(fSigma > 0);
+ SkASSERT((unsigned)style <= kLastEnum_SkBlurStyle);
+}
+
+SkMask::Format SkBlurMaskFilterImpl::getFormat() const {
+ return SkMask::kA8_Format;
+}
+
+bool SkBlurMaskFilterImpl::asABlur(BlurRec* rec) const {
+ if (this->ignoreXform()) {
+ return false;
+ }
+
+ if (rec) {
+ rec->fSigma = fSigma;
+ rec->fStyle = fBlurStyle;
+ }
+ return true;
+}
+
+bool SkBlurMaskFilterImpl::filterMask(SkMask* dst, const SkMask& src,
+ const SkMatrix& matrix,
+ SkIPoint* margin) const {
+ SkScalar sigma = this->computeXformedSigma(matrix);
+ return SkBlurMask::BoxBlur(dst, src, sigma, fBlurStyle, margin);
+}
+
+bool SkBlurMaskFilterImpl::filterRectMask(SkMask* dst, const SkRect& r,
+ const SkMatrix& matrix,
+ SkIPoint* margin, SkMask::CreateMode createMode) const {
+ SkScalar sigma = computeXformedSigma(matrix);
+
+ return SkBlurMask::BlurRect(sigma, dst, r, fBlurStyle, margin, createMode);
+}
+
+bool SkBlurMaskFilterImpl::filterRRectMask(SkMask* dst, const SkRRect& r,
+ const SkMatrix& matrix,
+ SkIPoint* margin, SkMask::CreateMode createMode) const {
+ SkScalar sigma = computeXformedSigma(matrix);
+
+ return SkBlurMask::BlurRRect(sigma, dst, r, fBlurStyle, margin, createMode);
+}
+
+static bool prepare_to_draw_into_mask(const SkRect& bounds, SkMask* mask) {
+ SkASSERT(mask != nullptr);
+
+ mask->fBounds = bounds.roundOut();
+ mask->fRowBytes = SkAlign4(mask->fBounds.width());
+ mask->fFormat = SkMask::kA8_Format;
+ const size_t size = mask->computeImageSize();
+ mask->fImage = SkMask::AllocImage(size, SkMask::kZeroInit_Alloc);
+ if (nullptr == mask->fImage) {
+ return false;
+ }
+ return true;
+}
+
+template <typename Proc> bool draw_into_mask(SkMask* mask, const SkRect& bounds, Proc proc) {
+ if (!prepare_to_draw_into_mask(bounds, mask)) {
+ return false;
+ }
+
+ const int dx = mask->fBounds.fLeft;
+ const int dy = mask->fBounds.fTop;
+ SkRasterClip rclip(mask->fBounds);
+ rclip.setRect(mask->fBounds.makeOffset(-dx, -dy));
+
+ SkASSERT(mask->fFormat == SkMask::kA8_Format);
+ auto info = SkImageInfo::MakeA8(mask->fBounds.width(), mask->fBounds.height());
+ auto pm = SkPixmap(info, mask->fImage, mask->fRowBytes);
+
+ SkMatrix ctm = SkMatrix::Translate(-SkIntToScalar(dx), -SkIntToScalar(dy));
+
+ SkMatrixProvider matrixProvider(ctm);
+
+ SkDrawBase draw;
+ draw.fBlitterChooser = SkA8Blitter_Choose;
+ draw.fMatrixProvider = &matrixProvider;
+ draw.fDst = pm;
+ draw.fRC = &rclip;
+
+ SkPaint paint;
+ paint.setAntiAlias(true);
+
+ proc(draw, paint);
+ return true;
+}
+
+static bool draw_rects_into_mask(const SkRect rects[], int count, SkMask* mask) {
+ return draw_into_mask(mask, rects[0], [&](SkDrawBase& draw, const SkPaint& paint) {
+ if (1 == count) {
+ draw.drawRect(rects[0], paint);
+ } else {
+ // todo: do I need a fast way to do this?
+ SkPath path = SkPathBuilder().addRect(rects[0])
+ .addRect(rects[1])
+ .setFillType(SkPathFillType::kEvenOdd)
+ .detach();
+ draw.drawPath(path, paint);
+ }
+ });
+}
+
+static bool draw_rrect_into_mask(const SkRRect rrect, SkMask* mask) {
+ return draw_into_mask(mask, rrect.rect(), [&](SkDrawBase& draw, const SkPaint& paint) {
+ draw.drawRRect(rrect, paint);
+ });
+}
+
+static bool rect_exceeds(const SkRect& r, SkScalar v) {
+ return r.fLeft < -v || r.fTop < -v || r.fRight > v || r.fBottom > v ||
+ r.width() > v || r.height() > v;
+}
+
+#include "src/core/SkMaskCache.h"
+
+static SkCachedData* copy_mask_to_cacheddata(SkMask* mask) {
+ const size_t size = mask->computeTotalImageSize();
+ SkCachedData* data = SkResourceCache::NewCachedData(size);
+ if (data) {
+ memcpy(data->writable_data(), mask->fImage, size);
+ SkMask::FreeImage(mask->fImage);
+ mask->fImage = (uint8_t*)data->data();
+ }
+ return data;
+}
+
+static SkCachedData* find_cached_rrect(SkMask* mask, SkScalar sigma, SkBlurStyle style,
+ const SkRRect& rrect) {
+ return SkMaskCache::FindAndRef(sigma, style, rrect, mask);
+}
+
+static SkCachedData* add_cached_rrect(SkMask* mask, SkScalar sigma, SkBlurStyle style,
+ const SkRRect& rrect) {
+ SkCachedData* cache = copy_mask_to_cacheddata(mask);
+ if (cache) {
+ SkMaskCache::Add(sigma, style, rrect, *mask, cache);
+ }
+ return cache;
+}
+
+static SkCachedData* find_cached_rects(SkMask* mask, SkScalar sigma, SkBlurStyle style,
+ const SkRect rects[], int count) {
+ return SkMaskCache::FindAndRef(sigma, style, rects, count, mask);
+}
+
+static SkCachedData* add_cached_rects(SkMask* mask, SkScalar sigma, SkBlurStyle style,
+ const SkRect rects[], int count) {
+ SkCachedData* cache = copy_mask_to_cacheddata(mask);
+ if (cache) {
+ SkMaskCache::Add(sigma, style, rects, count, *mask, cache);
+ }
+ return cache;
+}
+
+static const bool c_analyticBlurRRect{true};
+
+SkMaskFilterBase::FilterReturn
+SkBlurMaskFilterImpl::filterRRectToNine(const SkRRect& rrect, const SkMatrix& matrix,
+ const SkIRect& clipBounds,
+ NinePatch* patch) const {
+ SkASSERT(patch != nullptr);
+ switch (rrect.getType()) {
+ case SkRRect::kEmpty_Type:
+ // Nothing to draw.
+ return kFalse_FilterReturn;
+
+ case SkRRect::kRect_Type:
+ // We should have caught this earlier.
+ SkASSERT(false);
+ [[fallthrough]];
+ case SkRRect::kOval_Type:
+ // The nine patch special case does not handle ovals, and we
+ // already have code for rectangles.
+ return kUnimplemented_FilterReturn;
+
+ // These three can take advantage of this fast path.
+ case SkRRect::kSimple_Type:
+ case SkRRect::kNinePatch_Type:
+ case SkRRect::kComplex_Type:
+ break;
+ }
+
+ // TODO: report correct metrics for innerstyle, where we do not grow the
+ // total bounds, but we do need an inset the size of our blur-radius
+ if (kInner_SkBlurStyle == fBlurStyle) {
+ return kUnimplemented_FilterReturn;
+ }
+
+ // TODO: take clipBounds into account to limit our coordinates up front
+ // for now, just skip too-large src rects (to take the old code path).
+ if (rect_exceeds(rrect.rect(), SkIntToScalar(32767))) {
+ return kUnimplemented_FilterReturn;
+ }
+
+ SkIPoint margin;
+ SkMask srcM, dstM;
+ srcM.fBounds = rrect.rect().roundOut();
+ srcM.fFormat = SkMask::kA8_Format;
+ srcM.fRowBytes = 0;
+
+ bool filterResult = false;
+ if (c_analyticBlurRRect) {
+ // special case for fast round rect blur
+ // don't actually do the blur the first time, just compute the correct size
+ filterResult = this->filterRRectMask(&dstM, rrect, matrix, &margin,
+ SkMask::kJustComputeBounds_CreateMode);
+ }
+
+ if (!filterResult) {
+ filterResult = this->filterMask(&dstM, srcM, matrix, &margin);
+ }
+
+ if (!filterResult) {
+ return kFalse_FilterReturn;
+ }
+
+ // Now figure out the appropriate width and height of the smaller round rectangle
+ // to stretch. It will take into account the larger radius per side as well as double
+ // the margin, to account for inner and outer blur.
+ const SkVector& UL = rrect.radii(SkRRect::kUpperLeft_Corner);
+ const SkVector& UR = rrect.radii(SkRRect::kUpperRight_Corner);
+ const SkVector& LR = rrect.radii(SkRRect::kLowerRight_Corner);
+ const SkVector& LL = rrect.radii(SkRRect::kLowerLeft_Corner);
+
+ const SkScalar leftUnstretched = std::max(UL.fX, LL.fX) + SkIntToScalar(2 * margin.fX);
+ const SkScalar rightUnstretched = std::max(UR.fX, LR.fX) + SkIntToScalar(2 * margin.fX);
+
+ // Extra space in the middle to ensure an unchanging piece for stretching. Use 3 to cover
+ // any fractional space on either side plus 1 for the part to stretch.
+ const SkScalar stretchSize = SkIntToScalar(3);
+
+ const SkScalar totalSmallWidth = leftUnstretched + rightUnstretched + stretchSize;
+ if (totalSmallWidth >= rrect.rect().width()) {
+ // There is no valid piece to stretch.
+ return kUnimplemented_FilterReturn;
+ }
+
+ const SkScalar topUnstretched = std::max(UL.fY, UR.fY) + SkIntToScalar(2 * margin.fY);
+ const SkScalar bottomUnstretched = std::max(LL.fY, LR.fY) + SkIntToScalar(2 * margin.fY);
+
+ const SkScalar totalSmallHeight = topUnstretched + bottomUnstretched + stretchSize;
+ if (totalSmallHeight >= rrect.rect().height()) {
+ // There is no valid piece to stretch.
+ return kUnimplemented_FilterReturn;
+ }
+
+ SkRect smallR = SkRect::MakeWH(totalSmallWidth, totalSmallHeight);
+
+ SkRRect smallRR;
+ SkVector radii[4];
+ radii[SkRRect::kUpperLeft_Corner] = UL;
+ radii[SkRRect::kUpperRight_Corner] = UR;
+ radii[SkRRect::kLowerRight_Corner] = LR;
+ radii[SkRRect::kLowerLeft_Corner] = LL;
+ smallRR.setRectRadii(smallR, radii);
+
+ const SkScalar sigma = this->computeXformedSigma(matrix);
+ SkCachedData* cache = find_cached_rrect(&patch->fMask, sigma, fBlurStyle, smallRR);
+ if (!cache) {
+ bool analyticBlurWorked = false;
+ if (c_analyticBlurRRect) {
+ analyticBlurWorked =
+ this->filterRRectMask(&patch->fMask, smallRR, matrix, &margin,
+ SkMask::kComputeBoundsAndRenderImage_CreateMode);
+ }
+
+ if (!analyticBlurWorked) {
+ if (!draw_rrect_into_mask(smallRR, &srcM)) {
+ return kFalse_FilterReturn;
+ }
+
+ SkAutoMaskFreeImage amf(srcM.fImage);
+
+ if (!this->filterMask(&patch->fMask, srcM, matrix, &margin)) {
+ return kFalse_FilterReturn;
+ }
+ }
+ cache = add_cached_rrect(&patch->fMask, sigma, fBlurStyle, smallRR);
+ }
+
+ patch->fMask.fBounds.offsetTo(0, 0);
+ patch->fOuterRect = dstM.fBounds;
+ patch->fCenter.fX = SkScalarCeilToInt(leftUnstretched) + 1;
+ patch->fCenter.fY = SkScalarCeilToInt(topUnstretched) + 1;
+ SkASSERT(nullptr == patch->fCache);
+ patch->fCache = cache; // transfer ownership to patch
+ return kTrue_FilterReturn;
+}
+
+// Use the faster analytic blur approach for ninepatch rects
+static const bool c_analyticBlurNinepatch{true};
+
+SkMaskFilterBase::FilterReturn
+SkBlurMaskFilterImpl::filterRectsToNine(const SkRect rects[], int count,
+ const SkMatrix& matrix,
+ const SkIRect& clipBounds,
+ NinePatch* patch) const {
+ if (count < 1 || count > 2) {
+ return kUnimplemented_FilterReturn;
+ }
+
+ // TODO: report correct metrics for innerstyle, where we do not grow the
+ // total bounds, but we do need an inset the size of our blur-radius
+ if (kInner_SkBlurStyle == fBlurStyle || kOuter_SkBlurStyle == fBlurStyle) {
+ return kUnimplemented_FilterReturn;
+ }
+
+ // TODO: take clipBounds into account to limit our coordinates up front
+ // for now, just skip too-large src rects (to take the old code path).
+ if (rect_exceeds(rects[0], SkIntToScalar(32767))) {
+ return kUnimplemented_FilterReturn;
+ }
+
+ SkIPoint margin;
+ SkMask srcM, dstM;
+ srcM.fBounds = rects[0].roundOut();
+ srcM.fFormat = SkMask::kA8_Format;
+ srcM.fRowBytes = 0;
+
+ bool filterResult = false;
+ if (count == 1 && c_analyticBlurNinepatch) {
+ // special case for fast rect blur
+ // don't actually do the blur the first time, just compute the correct size
+ filterResult = this->filterRectMask(&dstM, rects[0], matrix, &margin,
+ SkMask::kJustComputeBounds_CreateMode);
+ } else {
+ filterResult = this->filterMask(&dstM, srcM, matrix, &margin);
+ }
+
+ if (!filterResult) {
+ return kFalse_FilterReturn;
+ }
+
+ /*
+ * smallR is the smallest version of 'rect' that will still guarantee that
+ * we get the same blur results on all edges, plus 1 center row/col that is
+ * representative of the extendible/stretchable edges of the ninepatch.
+ * Since our actual edge may be fractional we inset 1 more to be sure we
+ * don't miss any interior blur.
+ * x is an added pixel of blur, and { and } are the (fractional) edge
+ * pixels from the original rect.
+ *
+ * x x { x x .... x x } x x
+ *
+ * Thus, in this case, we inset by a total of 5 (on each side) beginning
+ * with our outer-rect (dstM.fBounds)
+ */
+ SkRect smallR[2];
+ SkIPoint center;
+
+ // +2 is from +1 for each edge (to account for possible fractional edges
+ int smallW = dstM.fBounds.width() - srcM.fBounds.width() + 2;
+ int smallH = dstM.fBounds.height() - srcM.fBounds.height() + 2;
+ SkIRect innerIR;
+
+ if (1 == count) {
+ innerIR = srcM.fBounds;
+ center.set(smallW, smallH);
+ } else {
+ SkASSERT(2 == count);
+ rects[1].roundIn(&innerIR);
+ center.set(smallW + (innerIR.left() - srcM.fBounds.left()),
+ smallH + (innerIR.top() - srcM.fBounds.top()));
+ }
+
+ // +1 so we get a clean, stretchable, center row/col
+ smallW += 1;
+ smallH += 1;
+
+ // we want the inset amounts to be integral, so we don't change any
+ // fractional phase on the fRight or fBottom of our smallR.
+ const SkScalar dx = SkIntToScalar(innerIR.width() - smallW);
+ const SkScalar dy = SkIntToScalar(innerIR.height() - smallH);
+ if (dx < 0 || dy < 0) {
+ // we're too small, relative to our blur, to break into nine-patch,
+ // so we ask to have our normal filterMask() be called.
+ return kUnimplemented_FilterReturn;
+ }
+
+ smallR[0].setLTRB(rects[0].left(), rects[0].top(),
+ rects[0].right() - dx, rects[0].bottom() - dy);
+ if (smallR[0].width() < 2 || smallR[0].height() < 2) {
+ return kUnimplemented_FilterReturn;
+ }
+ if (2 == count) {
+ smallR[1].setLTRB(rects[1].left(), rects[1].top(),
+ rects[1].right() - dx, rects[1].bottom() - dy);
+ SkASSERT(!smallR[1].isEmpty());
+ }
+
+ const SkScalar sigma = this->computeXformedSigma(matrix);
+ SkCachedData* cache = find_cached_rects(&patch->fMask, sigma, fBlurStyle, smallR, count);
+ if (!cache) {
+ if (count > 1 || !c_analyticBlurNinepatch) {
+ if (!draw_rects_into_mask(smallR, count, &srcM)) {
+ return kFalse_FilterReturn;
+ }
+
+ SkAutoMaskFreeImage amf(srcM.fImage);
+
+ if (!this->filterMask(&patch->fMask, srcM, matrix, &margin)) {
+ return kFalse_FilterReturn;
+ }
+ } else {
+ if (!this->filterRectMask(&patch->fMask, smallR[0], matrix, &margin,
+ SkMask::kComputeBoundsAndRenderImage_CreateMode)) {
+ return kFalse_FilterReturn;
+ }
+ }
+ cache = add_cached_rects(&patch->fMask, sigma, fBlurStyle, smallR, count);
+ }
+ patch->fMask.fBounds.offsetTo(0, 0);
+ patch->fOuterRect = dstM.fBounds;
+ patch->fCenter = center;
+ SkASSERT(nullptr == patch->fCache);
+ patch->fCache = cache; // transfer ownership to patch
+ return kTrue_FilterReturn;
+}
+
+void SkBlurMaskFilterImpl::computeFastBounds(const SkRect& src,
+ SkRect* dst) const {
+ // TODO: if we're doing kInner blur, should we return a different outset?
+ // i.e. pad == 0 ?
+
+ SkScalar pad = 3.0f * fSigma;
+
+ dst->setLTRB(src.fLeft - pad, src.fTop - pad,
+ src.fRight + pad, src.fBottom + pad);
+}
+
+sk_sp<SkFlattenable> SkBlurMaskFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ const SkScalar sigma = buffer.readScalar();
+ SkBlurStyle style = buffer.read32LE(kLastEnum_SkBlurStyle);
+
+ uint32_t flags = buffer.read32LE(0x3); // historically we only recorded 2 bits
+ bool respectCTM = !(flags & 1); // historically we stored ignoreCTM in low bit
+
+ return SkMaskFilter::MakeBlur((SkBlurStyle)style, sigma, respectCTM);
+}
+
+void SkBlurMaskFilterImpl::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeScalar(fSigma);
+ buffer.writeUInt(fBlurStyle);
+ buffer.writeUInt(!fRespectCTM); // historically we recorded ignoreCTM
+}
+
+
+#if defined(SK_GANESH) && defined(SK_GANESH)
+
+///////////////////////////////////////////////////////////////////////////////
+// Circle Blur
+///////////////////////////////////////////////////////////////////////////////
+
+// Computes an unnormalized half kernel (right side). Returns the summation of all the half
+// kernel values.
+static float make_unnormalized_half_kernel(float* halfKernel, int halfKernelSize, float sigma) {
+ const float invSigma = 1.f / sigma;
+ const float b = -0.5f * invSigma * invSigma;
+ float tot = 0.0f;
+ // Compute half kernel values at half pixel steps out from the center.
+ float t = 0.5f;
+ for (int i = 0; i < halfKernelSize; ++i) {
+ float value = expf(t * t * b);
+ tot += value;
+ halfKernel[i] = value;
+ t += 1.f;
+ }
+ return tot;
+}
+
+// Create a Gaussian half-kernel (right side) and a summed area table given a sigma and number
+// of discrete steps. The half kernel is normalized to sum to 0.5.
+static void make_half_kernel_and_summed_table(float* halfKernel,
+ float* summedHalfKernel,
+ int halfKernelSize,
+ float sigma) {
+ // The half kernel should sum to 0.5 not 1.0.
+ const float tot = 2.f * make_unnormalized_half_kernel(halfKernel, halfKernelSize, sigma);
+ float sum = 0.f;
+ for (int i = 0; i < halfKernelSize; ++i) {
+ halfKernel[i] /= tot;
+ sum += halfKernel[i];
+ summedHalfKernel[i] = sum;
+ }
+}
+
+// Applies the 1D half kernel vertically at points along the x axis to a circle centered at the
+// origin with radius circleR.
+void apply_kernel_in_y(float* results,
+ int numSteps,
+ float firstX,
+ float circleR,
+ int halfKernelSize,
+ const float* summedHalfKernelTable) {
+ float x = firstX;
+ for (int i = 0; i < numSteps; ++i, x += 1.f) {
+ if (x < -circleR || x > circleR) {
+ results[i] = 0;
+ continue;
+ }
+ float y = sqrtf(circleR * circleR - x * x);
+ // In the column at x we exit the circle at +y and -y
+ // The summed table entry j is actually reflects an offset of j + 0.5.
+ y -= 0.5f;
+ int yInt = SkScalarFloorToInt(y);
+ SkASSERT(yInt >= -1);
+ if (y < 0) {
+ results[i] = (y + 0.5f) * summedHalfKernelTable[0];
+ } else if (yInt >= halfKernelSize - 1) {
+ results[i] = 0.5f;
+ } else {
+ float yFrac = y - yInt;
+ results[i] = (1.f - yFrac) * summedHalfKernelTable[yInt] +
+ yFrac * summedHalfKernelTable[yInt + 1];
+ }
+ }
+}
+
+// Apply a Gaussian at point (evalX, 0) to a circle centered at the origin with radius circleR.
+// This relies on having a half kernel computed for the Gaussian and a table of applications of
+// the half kernel in y to columns at (evalX - halfKernel, evalX - halfKernel + 1, ..., evalX +
+// halfKernel) passed in as yKernelEvaluations.
+static uint8_t eval_at(float evalX,
+ float circleR,
+ const float* halfKernel,
+ int halfKernelSize,
+ const float* yKernelEvaluations) {
+ float acc = 0;
+
+ float x = evalX - halfKernelSize;
+ for (int i = 0; i < halfKernelSize; ++i, x += 1.f) {
+ if (x < -circleR || x > circleR) {
+ continue;
+ }
+ float verticalEval = yKernelEvaluations[i];
+ acc += verticalEval * halfKernel[halfKernelSize - i - 1];
+ }
+ for (int i = 0; i < halfKernelSize; ++i, x += 1.f) {
+ if (x < -circleR || x > circleR) {
+ continue;
+ }
+ float verticalEval = yKernelEvaluations[i + halfKernelSize];
+ acc += verticalEval * halfKernel[i];
+ }
+ // Since we applied a half kernel in y we multiply acc by 2 (the circle is symmetric about
+ // the x axis).
+ return SkUnitScalarClampToByte(2.f * acc);
+}
+
+// This function creates a profile of a blurred circle. It does this by computing a kernel for
+// half the Gaussian and a matching summed area table. The summed area table is used to compute
+// an array of vertical applications of the half kernel to the circle along the x axis. The
+// table of y evaluations has 2 * k + n entries where k is the size of the half kernel and n is
+// the size of the profile being computed. Then for each of the n profile entries we walk out k
+// steps in each horizontal direction multiplying the corresponding y evaluation by the half
+// kernel entry and sum these values to compute the profile entry.
+static void create_circle_profile(uint8_t* weights,
+ float sigma,
+ float circleR,
+ int profileTextureWidth) {
+ const int numSteps = profileTextureWidth;
+
+ // The full kernel is 6 sigmas wide.
+ int halfKernelSize = SkScalarCeilToInt(6.0f * sigma);
+ // round up to next multiple of 2 and then divide by 2
+ halfKernelSize = ((halfKernelSize + 1) & ~1) >> 1;
+
+ // Number of x steps at which to apply kernel in y to cover all the profile samples in x.
+ int numYSteps = numSteps + 2 * halfKernelSize;
+
+ AutoTArray<float> bulkAlloc(halfKernelSize + halfKernelSize + numYSteps);
+ float* halfKernel = bulkAlloc.get();
+ float* summedKernel = bulkAlloc.get() + halfKernelSize;
+ float* yEvals = bulkAlloc.get() + 2 * halfKernelSize;
+ make_half_kernel_and_summed_table(halfKernel, summedKernel, halfKernelSize, sigma);
+
+ float firstX = -halfKernelSize + 0.5f;
+ apply_kernel_in_y(yEvals, numYSteps, firstX, circleR, halfKernelSize, summedKernel);
+
+ for (int i = 0; i < numSteps - 1; ++i) {
+ float evalX = i + 0.5f;
+ weights[i] = eval_at(evalX, circleR, halfKernel, halfKernelSize, yEvals + i);
+ }
+ // Ensure the tail of the Gaussian goes to zero.
+ weights[numSteps - 1] = 0;
+}
+
+static void create_half_plane_profile(uint8_t* profile, int profileWidth) {
+ SkASSERT(!(profileWidth & 0x1));
+ // The full kernel is 6 sigmas wide.
+ float sigma = profileWidth / 6.f;
+ int halfKernelSize = profileWidth / 2;
+
+ AutoTArray<float> halfKernel(halfKernelSize);
+
+ // The half kernel should sum to 0.5.
+ const float tot = 2.f * make_unnormalized_half_kernel(halfKernel.get(), halfKernelSize, sigma);
+ float sum = 0.f;
+ // Populate the profile from the right edge to the middle.
+ for (int i = 0; i < halfKernelSize; ++i) {
+ halfKernel[halfKernelSize - i - 1] /= tot;
+ sum += halfKernel[halfKernelSize - i - 1];
+ profile[profileWidth - i - 1] = SkUnitScalarClampToByte(sum);
+ }
+ // Populate the profile from the middle to the left edge (by flipping the half kernel and
+ // continuing the summation).
+ for (int i = 0; i < halfKernelSize; ++i) {
+ sum += halfKernel[i];
+ profile[halfKernelSize - i - 1] = SkUnitScalarClampToByte(sum);
+ }
+ // Ensure tail goes to 0.
+ profile[profileWidth - 1] = 0;
+}
+
+static std::unique_ptr<GrFragmentProcessor> create_profile_effect(GrRecordingContext* rContext,
+ const SkRect& circle,
+ float sigma,
+ float* solidRadius,
+ float* textureRadius) {
+ float circleR = circle.width() / 2.0f;
+ if (!sk_float_isfinite(circleR) || circleR < SK_ScalarNearlyZero) {
+ return nullptr;
+ }
+
+ auto threadSafeCache = rContext->priv().threadSafeCache();
+
+ // Profile textures are cached by the ratio of sigma to circle radius and by the size of the
+ // profile texture (binned by powers of 2).
+ SkScalar sigmaToCircleRRatio = sigma / circleR;
+ // When sigma is really small this becomes a equivalent to convolving a Gaussian with a
+ // half-plane. Similarly, in the extreme high ratio cases circle becomes a point WRT to the
+ // Guassian and the profile texture is a just a Gaussian evaluation. However, we haven't yet
+ // implemented this latter optimization.
+ sigmaToCircleRRatio = std::min(sigmaToCircleRRatio, 8.f);
+ SkFixed sigmaToCircleRRatioFixed;
+ static const SkScalar kHalfPlaneThreshold = 0.1f;
+ bool useHalfPlaneApprox = false;
+ if (sigmaToCircleRRatio <= kHalfPlaneThreshold) {
+ useHalfPlaneApprox = true;
+ sigmaToCircleRRatioFixed = 0;
+ *solidRadius = circleR - 3 * sigma;
+ *textureRadius = 6 * sigma;
+ } else {
+ // Convert to fixed point for the key.
+ sigmaToCircleRRatioFixed = SkScalarToFixed(sigmaToCircleRRatio);
+ // We shave off some bits to reduce the number of unique entries. We could probably
+ // shave off more than we do.
+ sigmaToCircleRRatioFixed &= ~0xff;
+ sigmaToCircleRRatio = SkFixedToScalar(sigmaToCircleRRatioFixed);
+ sigma = circleR * sigmaToCircleRRatio;
+ *solidRadius = 0;
+ *textureRadius = circleR + 3 * sigma;
+ }
+
+ static constexpr int kProfileTextureWidth = 512;
+ // This would be kProfileTextureWidth/textureRadius if it weren't for the fact that we do
+ // the calculation of the profile coord in a coord space that has already been scaled by
+ // 1 / textureRadius. This is done to avoid overflow in length().
+ SkMatrix texM = SkMatrix::Scale(kProfileTextureWidth, 1.f);
+
+ static const skgpu::UniqueKey::Domain kDomain = skgpu::UniqueKey::GenerateDomain();
+ skgpu::UniqueKey key;
+ skgpu::UniqueKey::Builder builder(&key, kDomain, 1, "1-D Circular Blur");
+ builder[0] = sigmaToCircleRRatioFixed;
+ builder.finish();
+
+ GrSurfaceProxyView profileView = threadSafeCache->find(key);
+ if (profileView) {
+ SkASSERT(profileView.asTextureProxy());
+ SkASSERT(profileView.origin() == kTopLeft_GrSurfaceOrigin);
+ return GrTextureEffect::Make(std::move(profileView), kPremul_SkAlphaType, texM);
+ }
+
+ SkBitmap bm;
+ if (!bm.tryAllocPixels(SkImageInfo::MakeA8(kProfileTextureWidth, 1))) {
+ return nullptr;
+ }
+
+ if (useHalfPlaneApprox) {
+ create_half_plane_profile(bm.getAddr8(0, 0), kProfileTextureWidth);
+ } else {
+ // Rescale params to the size of the texture we're creating.
+ SkScalar scale = kProfileTextureWidth / *textureRadius;
+ create_circle_profile(
+ bm.getAddr8(0, 0), sigma * scale, circleR * scale, kProfileTextureWidth);
+ }
+ bm.setImmutable();
+
+ profileView = std::get<0>(GrMakeUncachedBitmapProxyView(rContext, bm));
+ if (!profileView) {
+ return nullptr;
+ }
+
+ profileView = threadSafeCache->add(key, profileView);
+ return GrTextureEffect::Make(std::move(profileView), kPremul_SkAlphaType, texM);
+}
+
+static std::unique_ptr<GrFragmentProcessor> make_circle_blur(GrRecordingContext* context,
+ const SkRect& circle,
+ float sigma) {
+ if (SkGpuBlurUtils::IsEffectivelyZeroSigma(sigma)) {
+ return nullptr;
+ }
+
+ float solidRadius;
+ float textureRadius;
+ std::unique_ptr<GrFragmentProcessor> profile =
+ create_profile_effect(context, circle, sigma, &solidRadius, &textureRadius);
+ if (!profile) {
+ return nullptr;
+ }
+
+ static const SkRuntimeEffect* effect = SkMakeRuntimeEffect(SkRuntimeEffect::MakeForShader,
+ "uniform shader blurProfile;"
+ "uniform half4 circleData;"
+
+ "half4 main(float2 xy) {"
+ // We just want to compute "(length(vec) - circleData.z + 0.5) * circleData.w" but need
+ // to rearrange to avoid passing large values to length() that would overflow.
+ "half2 vec = half2((sk_FragCoord.xy - circleData.xy) * circleData.w);"
+ "half dist = length(vec) + (0.5 - circleData.z) * circleData.w;"
+ "return blurProfile.eval(half2(dist, 0.5)).aaaa;"
+ "}"
+ );
+
+ SkV4 circleData = {circle.centerX(), circle.centerY(), solidRadius, 1.f / textureRadius};
+ auto circleBlurFP = GrSkSLFP::Make(effect, "CircleBlur", /*inputFP=*/nullptr,
+ GrSkSLFP::OptFlags::kCompatibleWithCoverageAsAlpha,
+ "blurProfile", GrSkSLFP::IgnoreOptFlags(std::move(profile)),
+ "circleData", circleData);
+ // Modulate blur with the input color.
+ return GrBlendFragmentProcessor::Make<SkBlendMode::kModulate>(std::move(circleBlurFP),
+ /*dst=*/nullptr);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Rect Blur
+///////////////////////////////////////////////////////////////////////////////
+
+static std::unique_ptr<GrFragmentProcessor> make_rect_integral_fp(GrRecordingContext* rContext,
+ float sixSigma) {
+ SkASSERT(!SkGpuBlurUtils::IsEffectivelyZeroSigma(sixSigma / 6.f));
+ auto threadSafeCache = rContext->priv().threadSafeCache();
+
+ int width = SkGpuBlurUtils::CreateIntegralTable(sixSigma, nullptr);
+
+ static const skgpu::UniqueKey::Domain kDomain = skgpu::UniqueKey::GenerateDomain();
+ skgpu::UniqueKey key;
+ skgpu::UniqueKey::Builder builder(&key, kDomain, 1, "Rect Blur Mask");
+ builder[0] = width;
+ builder.finish();
+
+ SkMatrix m = SkMatrix::Scale(width / sixSigma, 1.f);
+
+ GrSurfaceProxyView view = threadSafeCache->find(key);
+
+ if (view) {
+ SkASSERT(view.origin() == kTopLeft_GrSurfaceOrigin);
+ return GrTextureEffect::Make(
+ std::move(view), kPremul_SkAlphaType, m, GrSamplerState::Filter::kLinear);
+ }
+
+ SkBitmap bitmap;
+ if (!SkGpuBlurUtils::CreateIntegralTable(sixSigma, &bitmap)) {
+ return {};
+ }
+
+ view = std::get<0>(GrMakeUncachedBitmapProxyView(rContext, bitmap));
+ if (!view) {
+ return {};
+ }
+
+ view = threadSafeCache->add(key, view);
+
+ SkASSERT(view.origin() == kTopLeft_GrSurfaceOrigin);
+ return GrTextureEffect::Make(
+ std::move(view), kPremul_SkAlphaType, m, GrSamplerState::Filter::kLinear);
+}
+
+static std::unique_ptr<GrFragmentProcessor> make_rect_blur(GrRecordingContext* context,
+ const GrShaderCaps& caps,
+ const SkRect& srcRect,
+ const SkMatrix& viewMatrix,
+ float transformedSigma) {
+ SkASSERT(viewMatrix.preservesRightAngles());
+ SkASSERT(srcRect.isSorted());
+
+ if (SkGpuBlurUtils::IsEffectivelyZeroSigma(transformedSigma)) {
+ // No need to blur the rect
+ return nullptr;
+ }
+
+ SkMatrix invM;
+ SkRect rect;
+ if (viewMatrix.rectStaysRect()) {
+ invM = SkMatrix::I();
+ // We can do everything in device space when the src rect projects to a rect in device space
+ SkAssertResult(viewMatrix.mapRect(&rect, srcRect));
+ } else {
+ // The view matrix may scale, perhaps anisotropically. But we want to apply our device space
+ // "transformedSigma" to the delta of frag coord from the rect edges. Factor out the scaling
+ // to define a space that is purely rotation/translation from device space (and scale from
+ // src space) We'll meet in the middle: pre-scale the src rect to be in this space and then
+ // apply the inverse of the rotation/translation portion to the frag coord.
+ SkMatrix m;
+ SkSize scale;
+ if (!viewMatrix.decomposeScale(&scale, &m)) {
+ return nullptr;
+ }
+ if (!m.invert(&invM)) {
+ return nullptr;
+ }
+ rect = {srcRect.left() * scale.width(),
+ srcRect.top() * scale.height(),
+ srcRect.right() * scale.width(),
+ srcRect.bottom() * scale.height()};
+ }
+
+ if (!caps.fFloatIs32Bits) {
+ // We promote the math that gets us into the Gaussian space to full float when the rect
+ // coords are large. If we don't have full float then fail. We could probably clip the rect
+ // to an outset device bounds instead.
+ if (SkScalarAbs(rect.fLeft) > 16000.f || SkScalarAbs(rect.fTop) > 16000.f ||
+ SkScalarAbs(rect.fRight) > 16000.f || SkScalarAbs(rect.fBottom) > 16000.f) {
+ return nullptr;
+ }
+ }
+
+ const float sixSigma = 6 * transformedSigma;
+ std::unique_ptr<GrFragmentProcessor> integral = make_rect_integral_fp(context, sixSigma);
+ if (!integral) {
+ return nullptr;
+ }
+
+ // In the fast variant we think of the midpoint of the integral texture as aligning with the
+ // closest rect edge both in x and y. To simplify texture coord calculation we inset the rect so
+ // that the edge of the inset rect corresponds to t = 0 in the texture. It actually simplifies
+ // things a bit in the !isFast case, too.
+ float threeSigma = sixSigma / 2;
+ SkRect insetRect = {rect.left() + threeSigma,
+ rect.top() + threeSigma,
+ rect.right() - threeSigma,
+ rect.bottom() - threeSigma};
+
+ // In our fast variant we find the nearest horizontal and vertical edges and for each do a
+ // lookup in the integral texture for each and multiply them. When the rect is less than 6 sigma
+ // wide then things aren't so simple and we have to consider both the left and right edge of the
+ // rectangle (and similar in y).
+ bool isFast = insetRect.isSorted();
+
+ static const SkRuntimeEffect* effect = SkMakeRuntimeEffect(SkRuntimeEffect::MakeForShader,
+ // Effect that is a LUT for integral of normal distribution. The value at x:[0,6*sigma] is
+ // the integral from -inf to (3*sigma - x). I.e. x is mapped from [0, 6*sigma] to
+ // [3*sigma to -3*sigma]. The flip saves a reversal in the shader.
+ "uniform shader integral;"
+
+ "uniform float4 rect;"
+ "uniform int isFast;" // specialized
+
+ "half4 main(float2 pos) {"
+ "half xCoverage, yCoverage;"
+ "if (bool(isFast)) {"
+ // Get the smaller of the signed distance from the frag coord to the left and right
+ // edges and similar for y.
+ // The integral texture goes "backwards" (from 3*sigma to -3*sigma), So, the below
+ // computations align the left edge of the integral texture with the inset rect's
+ // edge extending outward 6 * sigma from the inset rect.
+ "half2 xy = max(half2(rect.LT - pos), half2(pos - rect.RB));"
+ "xCoverage = integral.eval(half2(xy.x, 0.5)).a;"
+ "yCoverage = integral.eval(half2(xy.y, 0.5)).a;"
+ "} else {"
+ // We just consider just the x direction here. In practice we compute x and y
+ // separately and multiply them together.
+ // We define our coord system so that the point at which we're evaluating a kernel
+ // defined by the normal distribution (K) at 0. In this coord system let L be left
+ // edge and R be the right edge of the rectangle.
+ // We can calculate C by integrating K with the half infinite ranges outside the
+ // L to R range and subtracting from 1:
+ // C = 1 - <integral of K from from -inf to L> - <integral of K from R to inf>
+ // K is symmetric about x=0 so:
+ // C = 1 - <integral of K from from -inf to L> - <integral of K from -inf to -R>
+
+ // The integral texture goes "backwards" (from 3*sigma to -3*sigma) which is
+ // factored in to the below calculations.
+ // Also, our rect uniform was pre-inset by 3 sigma from the actual rect being
+ // blurred, also factored in.
+ "half4 rect = half4(half2(rect.LT - pos), half2(pos - rect.RB));"
+ "xCoverage = 1 - integral.eval(half2(rect.L, 0.5)).a"
+ "- integral.eval(half2(rect.R, 0.5)).a;"
+ "yCoverage = 1 - integral.eval(half2(rect.T, 0.5)).a"
+ "- integral.eval(half2(rect.B, 0.5)).a;"
+ "}"
+ "return half4(xCoverage * yCoverage);"
+ "}"
+ );
+
+ std::unique_ptr<GrFragmentProcessor> fp =
+ GrSkSLFP::Make(effect, "RectBlur", /*inputFP=*/nullptr,
+ GrSkSLFP::OptFlags::kCompatibleWithCoverageAsAlpha,
+ "integral", GrSkSLFP::IgnoreOptFlags(std::move(integral)),
+ "rect", insetRect,
+ "isFast", GrSkSLFP::Specialize<int>(isFast));
+ // Modulate blur with the input color.
+ fp = GrBlendFragmentProcessor::Make<SkBlendMode::kModulate>(std::move(fp),
+ /*dst=*/nullptr);
+ if (!invM.isIdentity()) {
+ fp = GrMatrixEffect::Make(invM, std::move(fp));
+ }
+ return GrFragmentProcessor::DeviceSpace(std::move(fp));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// RRect Blur
+///////////////////////////////////////////////////////////////////////////////
+
+static constexpr auto kBlurredRRectMaskOrigin = kTopLeft_GrSurfaceOrigin;
+
+static void make_blurred_rrect_key(skgpu::UniqueKey* key,
+ const SkRRect& rrectToDraw,
+ float xformedSigma) {
+ SkASSERT(!SkGpuBlurUtils::IsEffectivelyZeroSigma(xformedSigma));
+ static const skgpu::UniqueKey::Domain kDomain = skgpu::UniqueKey::GenerateDomain();
+
+ skgpu::UniqueKey::Builder builder(key, kDomain, 9, "RoundRect Blur Mask");
+ builder[0] = SkScalarCeilToInt(xformedSigma - 1 / 6.0f);
+
+ int index = 1;
+ // TODO: this is overkill for _simple_ circular rrects
+ for (auto c : {SkRRect::kUpperLeft_Corner,
+ SkRRect::kUpperRight_Corner,
+ SkRRect::kLowerRight_Corner,
+ SkRRect::kLowerLeft_Corner}) {
+ SkASSERT(SkScalarIsInt(rrectToDraw.radii(c).fX) && SkScalarIsInt(rrectToDraw.radii(c).fY));
+ builder[index++] = SkScalarCeilToInt(rrectToDraw.radii(c).fX);
+ builder[index++] = SkScalarCeilToInt(rrectToDraw.radii(c).fY);
+ }
+ builder.finish();
+}
+
+static bool fillin_view_on_gpu(GrDirectContext* dContext,
+ const GrSurfaceProxyView& lazyView,
+ sk_sp<GrThreadSafeCache::Trampoline> trampoline,
+ const SkRRect& rrectToDraw,
+ const SkISize& dimensions,
+ float xformedSigma) {
+#if defined(SK_GANESH)
+ SkASSERT(!SkGpuBlurUtils::IsEffectivelyZeroSigma(xformedSigma));
+
+ // We cache blur masks. Use default surface props here so we can use the same cached mask
+ // regardless of the final dst surface.
+ SkSurfaceProps defaultSurfaceProps;
+
+ std::unique_ptr<skgpu::ganesh::SurfaceDrawContext> sdc =
+ skgpu::ganesh::SurfaceDrawContext::MakeWithFallback(dContext,
+ GrColorType::kAlpha_8,
+ nullptr,
+ SkBackingFit::kExact,
+ dimensions,
+ defaultSurfaceProps,
+ 1,
+ GrMipmapped::kNo,
+ GrProtected::kNo,
+ kBlurredRRectMaskOrigin);
+ if (!sdc) {
+ return false;
+ }
+
+ GrPaint paint;
+
+ sdc->clear(SK_PMColor4fTRANSPARENT);
+ sdc->drawRRect(nullptr,
+ std::move(paint),
+ GrAA::kYes,
+ SkMatrix::I(),
+ rrectToDraw,
+ GrStyle::SimpleFill());
+
+ GrSurfaceProxyView srcView = sdc->readSurfaceView();
+ SkASSERT(srcView.asTextureProxy());
+ auto rtc2 = SkGpuBlurUtils::GaussianBlur(dContext,
+ std::move(srcView),
+ sdc->colorInfo().colorType(),
+ sdc->colorInfo().alphaType(),
+ nullptr,
+ SkIRect::MakeSize(dimensions),
+ SkIRect::MakeSize(dimensions),
+ xformedSigma,
+ xformedSigma,
+ SkTileMode::kClamp,
+ SkBackingFit::kExact);
+ if (!rtc2 || !rtc2->readSurfaceView()) {
+ return false;
+ }
+
+ auto view = rtc2->readSurfaceView();
+ SkASSERT(view.swizzle() == lazyView.swizzle());
+ SkASSERT(view.origin() == lazyView.origin());
+ trampoline->fProxy = view.asTextureProxyRef();
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+// Evaluate the vertical blur at the specified 'y' value given the location of the top of the
+// rrect.
+static uint8_t eval_V(float top, int y, const uint8_t* integral, int integralSize, float sixSigma) {
+ if (top < 0) {
+ return 0; // an empty column
+ }
+
+ float fT = (top - y - 0.5f) * (integralSize / sixSigma);
+ if (fT < 0) {
+ return 255;
+ } else if (fT >= integralSize - 1) {
+ return 0;
+ }
+
+ int lower = (int)fT;
+ float frac = fT - lower;
+
+ SkASSERT(lower + 1 < integralSize);
+
+ return integral[lower] * (1.0f - frac) + integral[lower + 1] * frac;
+}
+
+// Apply a gaussian 'kernel' horizontally at the specified 'x', 'y' location.
+static uint8_t eval_H(int x,
+ int y,
+ const std::vector<float>& topVec,
+ const float* kernel,
+ int kernelSize,
+ const uint8_t* integral,
+ int integralSize,
+ float sixSigma) {
+ SkASSERT(0 <= x && x < (int)topVec.size());
+ SkASSERT(kernelSize % 2);
+
+ float accum = 0.0f;
+
+ int xSampleLoc = x - (kernelSize / 2);
+ for (int i = 0; i < kernelSize; ++i, ++xSampleLoc) {
+ if (xSampleLoc < 0 || xSampleLoc >= (int)topVec.size()) {
+ continue;
+ }
+
+ accum += kernel[i] * eval_V(topVec[xSampleLoc], y, integral, integralSize, sixSigma);
+ }
+
+ return accum + 0.5f;
+}
+
+// Create a cpu-side blurred-rrect mask that is close to the version the gpu would've produced.
+// The match needs to be close bc the cpu- and gpu-generated version must be interchangeable.
+static GrSurfaceProxyView create_mask_on_cpu(GrRecordingContext* rContext,
+ const SkRRect& rrectToDraw,
+ const SkISize& dimensions,
+ float xformedSigma) {
+ SkASSERT(!SkGpuBlurUtils::IsEffectivelyZeroSigma(xformedSigma));
+ int radius = SkGpuBlurUtils::SigmaRadius(xformedSigma);
+ int kernelSize = 2 * radius + 1;
+
+ SkASSERT(kernelSize % 2);
+ SkASSERT(dimensions.width() % 2);
+ SkASSERT(dimensions.height() % 2);
+
+ SkVector radii = rrectToDraw.getSimpleRadii();
+ SkASSERT(SkScalarNearlyEqual(radii.fX, radii.fY));
+
+ const int halfWidthPlus1 = (dimensions.width() / 2) + 1;
+ const int halfHeightPlus1 = (dimensions.height() / 2) + 1;
+
+ std::unique_ptr<float[]> kernel(new float[kernelSize]);
+
+ SkGpuBlurUtils::Compute1DGaussianKernel(kernel.get(), xformedSigma, radius);
+
+ SkBitmap integral;
+ if (!SkGpuBlurUtils::CreateIntegralTable(6 * xformedSigma, &integral)) {
+ return {};
+ }
+
+ SkBitmap result;
+ if (!result.tryAllocPixels(SkImageInfo::MakeA8(dimensions.width(), dimensions.height()))) {
+ return {};
+ }
+
+ std::vector<float> topVec;
+ topVec.reserve(dimensions.width());
+ for (int x = 0; x < dimensions.width(); ++x) {
+ if (x < rrectToDraw.rect().fLeft || x > rrectToDraw.rect().fRight) {
+ topVec.push_back(-1);
+ } else {
+ if (x + 0.5f < rrectToDraw.rect().fLeft + radii.fX) { // in the circular section
+ float xDist = rrectToDraw.rect().fLeft + radii.fX - x - 0.5f;
+ float h = sqrtf(radii.fX * radii.fX - xDist * xDist);
+ SkASSERT(0 <= h && h < radii.fY);
+ topVec.push_back(rrectToDraw.rect().fTop + radii.fX - h + 3 * xformedSigma);
+ } else {
+ topVec.push_back(rrectToDraw.rect().fTop + 3 * xformedSigma);
+ }
+ }
+ }
+
+ for (int y = 0; y < halfHeightPlus1; ++y) {
+ uint8_t* scanline = result.getAddr8(0, y);
+
+ for (int x = 0; x < halfWidthPlus1; ++x) {
+ scanline[x] = eval_H(x,
+ y,
+ topVec,
+ kernel.get(),
+ kernelSize,
+ integral.getAddr8(0, 0),
+ integral.width(),
+ 6 * xformedSigma);
+ scanline[dimensions.width() - x - 1] = scanline[x];
+ }
+
+ memcpy(result.getAddr8(0, dimensions.height() - y - 1), scanline, result.rowBytes());
+ }
+
+ result.setImmutable();
+
+ auto view = std::get<0>(GrMakeUncachedBitmapProxyView(rContext, result));
+ if (!view) {
+ return {};
+ }
+
+ SkASSERT(view.origin() == kBlurredRRectMaskOrigin);
+ return view;
+}
+
+static std::unique_ptr<GrFragmentProcessor> find_or_create_rrect_blur_mask_fp(
+ GrRecordingContext* rContext,
+ const SkRRect& rrectToDraw,
+ const SkISize& dimensions,
+ float xformedSigma) {
+ SkASSERT(!SkGpuBlurUtils::IsEffectivelyZeroSigma(xformedSigma));
+ skgpu::UniqueKey key;
+ make_blurred_rrect_key(&key, rrectToDraw, xformedSigma);
+
+ auto threadSafeCache = rContext->priv().threadSafeCache();
+
+ // It seems like we could omit this matrix and modify the shader code to not normalize
+ // the coords used to sample the texture effect. However, the "proxyDims" value in the
+ // shader is not always the actual the proxy dimensions. This is because 'dimensions' here
+ // was computed using integer corner radii as determined in
+ // SkComputeBlurredRRectParams whereas the shader code uses the float radius to compute
+ // 'proxyDims'. Why it draws correctly with these unequal values is a mystery for the ages.
+ auto m = SkMatrix::Scale(dimensions.width(), dimensions.height());
+
+ GrSurfaceProxyView view;
+
+ if (GrDirectContext* dContext = rContext->asDirectContext()) {
+ // The gpu thread gets priority over the recording threads. If the gpu thread is first,
+ // it crams a lazy proxy into the cache and then fills it in later.
+ auto [lazyView, trampoline] = GrThreadSafeCache::CreateLazyView(dContext,
+ GrColorType::kAlpha_8,
+ dimensions,
+ kBlurredRRectMaskOrigin,
+ SkBackingFit::kExact);
+ if (!lazyView) {
+ return nullptr;
+ }
+
+ view = threadSafeCache->findOrAdd(key, lazyView);
+ if (view != lazyView) {
+ SkASSERT(view.asTextureProxy());
+ SkASSERT(view.origin() == kBlurredRRectMaskOrigin);
+ return GrTextureEffect::Make(std::move(view), kPremul_SkAlphaType, m);
+ }
+
+ if (!fillin_view_on_gpu(dContext,
+ lazyView,
+ std::move(trampoline),
+ rrectToDraw,
+ dimensions,
+ xformedSigma)) {
+ // In this case something has gone disastrously wrong so set up to drop the draw
+ // that needed this resource and reduce future pollution of the cache.
+ threadSafeCache->remove(key);
+ return nullptr;
+ }
+ } else {
+ view = threadSafeCache->find(key);
+ if (view) {
+ SkASSERT(view.asTextureProxy());
+ SkASSERT(view.origin() == kBlurredRRectMaskOrigin);
+ return GrTextureEffect::Make(std::move(view), kPremul_SkAlphaType, m);
+ }
+
+ view = create_mask_on_cpu(rContext, rrectToDraw, dimensions, xformedSigma);
+ if (!view) {
+ return nullptr;
+ }
+
+ view = threadSafeCache->add(key, view);
+ }
+
+ SkASSERT(view.asTextureProxy());
+ SkASSERT(view.origin() == kBlurredRRectMaskOrigin);
+ return GrTextureEffect::Make(std::move(view), kPremul_SkAlphaType, m);
+}
+
+static std::unique_ptr<GrFragmentProcessor> make_rrect_blur(GrRecordingContext* context,
+ float sigma,
+ float xformedSigma,
+ const SkRRect& srcRRect,
+ const SkRRect& devRRect) {
+ // Should've been caught up-stream
+#ifdef SK_DEBUG
+ SkASSERTF(!SkRRectPriv::IsCircle(devRRect),
+ "Unexpected circle. %d\n\t%s\n\t%s",
+ SkRRectPriv::IsCircle(srcRRect),
+ srcRRect.dumpToString(true).c_str(),
+ devRRect.dumpToString(true).c_str());
+ SkASSERTF(!devRRect.isRect(),
+ "Unexpected rect. %d\n\t%s\n\t%s",
+ srcRRect.isRect(),
+ srcRRect.dumpToString(true).c_str(),
+ devRRect.dumpToString(true).c_str());
+#endif
+
+ // TODO: loosen this up
+ if (!SkRRectPriv::IsSimpleCircular(devRRect)) {
+ return nullptr;
+ }
+
+ if (SkGpuBlurUtils::IsEffectivelyZeroSigma(xformedSigma)) {
+ return nullptr;
+ }
+
+ // Make sure we can successfully ninepatch this rrect -- the blur sigma has to be sufficiently
+ // small relative to both the size of the corner radius and the width (and height) of the rrect.
+ SkRRect rrectToDraw;
+ SkISize dimensions;
+ SkScalar ignored[SkGpuBlurUtils::kBlurRRectMaxDivisions];
+
+ bool ninePatchable = SkGpuBlurUtils::ComputeBlurredRRectParams(srcRRect,
+ devRRect,
+ sigma,
+ xformedSigma,
+ &rrectToDraw,
+ &dimensions,
+ ignored,
+ ignored,
+ ignored,
+ ignored);
+ if (!ninePatchable) {
+ return nullptr;
+ }
+
+ std::unique_ptr<GrFragmentProcessor> maskFP =
+ find_or_create_rrect_blur_mask_fp(context, rrectToDraw, dimensions, xformedSigma);
+ if (!maskFP) {
+ return nullptr;
+ }
+
+ static const SkRuntimeEffect* effect = SkMakeRuntimeEffect(SkRuntimeEffect::MakeForShader,
+ "uniform shader ninePatchFP;"
+
+ "uniform half cornerRadius;"
+ "uniform float4 proxyRect;"
+ "uniform half blurRadius;"
+
+ "half4 main(float2 xy) {"
+ // Warp the fragment position to the appropriate part of the 9-patch blur texture by
+ // snipping out the middle section of the proxy rect.
+ "float2 translatedFragPosFloat = sk_FragCoord.xy - proxyRect.LT;"
+ "float2 proxyCenter = (proxyRect.RB - proxyRect.LT) * 0.5;"
+ "half edgeSize = 2.0 * blurRadius + cornerRadius + 0.5;"
+
+ // Position the fragment so that (0, 0) marks the center of the proxy rectangle.
+ // Negative coordinates are on the left/top side and positive numbers are on the
+ // right/bottom.
+ "translatedFragPosFloat -= proxyCenter;"
+
+ // Temporarily strip off the fragment's sign. x/y are now strictly increasing as we
+ // move away from the center.
+ "half2 fragDirection = half2(sign(translatedFragPosFloat));"
+ "translatedFragPosFloat = abs(translatedFragPosFloat);"
+
+ // Our goal is to snip out the "middle section" of the proxy rect (everything but the
+ // edge). We've repositioned our fragment position so that (0, 0) is the centerpoint
+ // and x/y are always positive, so we can subtract here and interpret negative results
+ // as being within the middle section.
+ "half2 translatedFragPosHalf = half2(translatedFragPosFloat - (proxyCenter - edgeSize));"
+
+ // Remove the middle section by clamping to zero.
+ "translatedFragPosHalf = max(translatedFragPosHalf, 0);"
+
+ // Reapply the fragment's sign, so that negative coordinates once again mean left/top
+ // side and positive means bottom/right side.
+ "translatedFragPosHalf *= fragDirection;"
+
+ // Offset the fragment so that (0, 0) marks the upper-left again, instead of the center
+ // point.
+ "translatedFragPosHalf += half2(edgeSize);"
+
+ "half2 proxyDims = half2(2.0 * edgeSize);"
+ "half2 texCoord = translatedFragPosHalf / proxyDims;"
+
+ "return ninePatchFP.eval(texCoord).aaaa;"
+ "}"
+ );
+
+ float cornerRadius = SkRRectPriv::GetSimpleRadii(devRRect).fX;
+ float blurRadius = 3.f * SkScalarCeilToScalar(xformedSigma - 1 / 6.0f);
+ SkRect proxyRect = devRRect.getBounds().makeOutset(blurRadius, blurRadius);
+
+ auto rrectBlurFP = GrSkSLFP::Make(effect, "RRectBlur", /*inputFP=*/nullptr,
+ GrSkSLFP::OptFlags::kCompatibleWithCoverageAsAlpha,
+ "ninePatchFP", GrSkSLFP::IgnoreOptFlags(std::move(maskFP)),
+ "cornerRadius", cornerRadius,
+ "proxyRect", proxyRect,
+ "blurRadius", blurRadius);
+ // Modulate blur with the input color.
+ return GrBlendFragmentProcessor::Make<SkBlendMode::kModulate>(std::move(rrectBlurFP),
+ /*dst=*/nullptr);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkBlurMaskFilterImpl::directFilterMaskGPU(GrRecordingContext* context,
+ skgpu::ganesh::SurfaceDrawContext* sdc,
+ GrPaint&& paint,
+ const GrClip* clip,
+ const SkMatrix& viewMatrix,
+ const GrStyledShape& shape) const {
+ SkASSERT(sdc);
+
+ if (fBlurStyle != kNormal_SkBlurStyle) {
+ return false;
+ }
+
+ // TODO: we could handle blurred stroked circles
+ if (!shape.style().isSimpleFill()) {
+ return false;
+ }
+
+ SkScalar xformedSigma = this->computeXformedSigma(viewMatrix);
+ if (SkGpuBlurUtils::IsEffectivelyZeroSigma(xformedSigma)) {
+ sdc->drawShape(clip, std::move(paint), GrAA::kYes, viewMatrix, GrStyledShape(shape));
+ return true;
+ }
+
+ SkRRect srcRRect;
+ bool inverted;
+ if (!shape.asRRect(&srcRRect, nullptr, nullptr, &inverted) || inverted) {
+ return false;
+ }
+
+ std::unique_ptr<GrFragmentProcessor> fp;
+
+ SkRRect devRRect;
+ bool devRRectIsValid = srcRRect.transform(viewMatrix, &devRRect);
+
+ bool devRRectIsCircle = devRRectIsValid && SkRRectPriv::IsCircle(devRRect);
+
+ bool canBeRect = srcRRect.isRect() && viewMatrix.preservesRightAngles();
+ bool canBeCircle = (SkRRectPriv::IsCircle(srcRRect) && viewMatrix.isSimilarity()) ||
+ devRRectIsCircle;
+
+ if (canBeRect || canBeCircle) {
+ if (canBeRect) {
+ fp = make_rect_blur(context, *context->priv().caps()->shaderCaps(),
+ srcRRect.rect(), viewMatrix, xformedSigma);
+ } else {
+ SkRect devBounds;
+ if (devRRectIsCircle) {
+ devBounds = devRRect.getBounds();
+ } else {
+ SkPoint center = {srcRRect.getBounds().centerX(), srcRRect.getBounds().centerY()};
+ viewMatrix.mapPoints(&center, 1);
+ SkScalar radius = viewMatrix.mapVector(0, srcRRect.width()/2.f).length();
+ devBounds = {center.x() - radius,
+ center.y() - radius,
+ center.x() + radius,
+ center.y() + radius};
+ }
+ fp = make_circle_blur(context, devBounds, xformedSigma);
+ }
+
+ if (!fp) {
+ return false;
+ }
+
+ SkRect srcProxyRect = srcRRect.rect();
+ // Determine how much to outset the src rect to ensure we hit pixels within three sigma.
+ SkScalar outsetX = 3.0f*xformedSigma;
+ SkScalar outsetY = 3.0f*xformedSigma;
+ if (viewMatrix.isScaleTranslate()) {
+ outsetX /= SkScalarAbs(viewMatrix.getScaleX());
+ outsetY /= SkScalarAbs(viewMatrix.getScaleY());
+ } else {
+ SkSize scale;
+ if (!viewMatrix.decomposeScale(&scale, nullptr)) {
+ return false;
+ }
+ outsetX /= scale.width();
+ outsetY /= scale.height();
+ }
+ srcProxyRect.outset(outsetX, outsetY);
+
+ paint.setCoverageFragmentProcessor(std::move(fp));
+ sdc->drawRect(clip, std::move(paint), GrAA::kNo, viewMatrix, srcProxyRect);
+ return true;
+ }
+ if (!viewMatrix.isScaleTranslate()) {
+ return false;
+ }
+ if (!devRRectIsValid || !SkRRectPriv::AllCornersCircular(devRRect)) {
+ return false;
+ }
+
+ fp = make_rrect_blur(context, fSigma, xformedSigma, srcRRect, devRRect);
+ if (!fp) {
+ return false;
+ }
+
+ if (!this->ignoreXform()) {
+ SkRect srcProxyRect = srcRRect.rect();
+ srcProxyRect.outset(3.0f*fSigma, 3.0f*fSigma);
+ paint.setCoverageFragmentProcessor(std::move(fp));
+ sdc->drawRect(clip, std::move(paint), GrAA::kNo, viewMatrix, srcProxyRect);
+ } else {
+ SkMatrix inverse;
+ if (!viewMatrix.invert(&inverse)) {
+ return false;
+ }
+
+ SkIRect proxyBounds;
+ float extra=3.f*SkScalarCeilToScalar(xformedSigma-1/6.0f);
+ devRRect.rect().makeOutset(extra, extra).roundOut(&proxyBounds);
+
+ paint.setCoverageFragmentProcessor(std::move(fp));
+ sdc->fillPixelsWithLocalMatrix(clip, std::move(paint), proxyBounds, inverse);
+ }
+
+ return true;
+}
+
+bool SkBlurMaskFilterImpl::canFilterMaskGPU(const GrStyledShape& shape,
+ const SkIRect& devSpaceShapeBounds,
+ const SkIRect& clipBounds,
+ const SkMatrix& ctm,
+ SkIRect* maskRect) const {
+ SkScalar xformedSigma = this->computeXformedSigma(ctm);
+ if (SkGpuBlurUtils::IsEffectivelyZeroSigma(xformedSigma)) {
+ *maskRect = devSpaceShapeBounds;
+ return maskRect->intersect(clipBounds);
+ }
+
+ if (maskRect) {
+ float sigma3 = 3 * SkScalarToFloat(xformedSigma);
+
+ // Outset srcRect and clipRect by 3 * sigma, to compute affected blur area.
+ SkIRect clipRect = clipBounds.makeOutset(sigma3, sigma3);
+ SkIRect srcRect = devSpaceShapeBounds.makeOutset(sigma3, sigma3);
+
+ if (!srcRect.intersect(clipRect)) {
+ srcRect.setEmpty();
+ }
+ *maskRect = srcRect;
+ }
+
+ // We prefer to blur paths with small blur radii on the CPU.
+ static const SkScalar kMIN_GPU_BLUR_SIZE = SkIntToScalar(64);
+ static const SkScalar kMIN_GPU_BLUR_SIGMA = SkIntToScalar(32);
+
+ if (devSpaceShapeBounds.width() <= kMIN_GPU_BLUR_SIZE &&
+ devSpaceShapeBounds.height() <= kMIN_GPU_BLUR_SIZE &&
+ xformedSigma <= kMIN_GPU_BLUR_SIGMA) {
+ return false;
+ }
+
+ return true;
+}
+
+GrSurfaceProxyView SkBlurMaskFilterImpl::filterMaskGPU(GrRecordingContext* context,
+ GrSurfaceProxyView srcView,
+ GrColorType srcColorType,
+ SkAlphaType srcAlphaType,
+ const SkMatrix& ctm,
+ const SkIRect& maskRect) const {
+ // 'maskRect' isn't snapped to the UL corner but the mask in 'src' is.
+ const SkIRect clipRect = SkIRect::MakeWH(maskRect.width(), maskRect.height());
+
+ SkScalar xformedSigma = this->computeXformedSigma(ctm);
+
+ // If we're doing a normal blur, we can clobber the pathTexture in the
+ // gaussianBlur. Otherwise, we need to save it for later compositing.
+ bool isNormalBlur = (kNormal_SkBlurStyle == fBlurStyle);
+ auto srcBounds = SkIRect::MakeSize(srcView.proxy()->dimensions());
+ auto surfaceDrawContext = SkGpuBlurUtils::GaussianBlur(context,
+ srcView,
+ srcColorType,
+ srcAlphaType,
+ nullptr,
+ clipRect,
+ srcBounds,
+ xformedSigma,
+ xformedSigma,
+ SkTileMode::kClamp);
+ if (!surfaceDrawContext || !surfaceDrawContext->asTextureProxy()) {
+ return {};
+ }
+
+ if (!isNormalBlur) {
+ GrPaint paint;
+ // Blend pathTexture over blurTexture.
+ paint.setCoverageFragmentProcessor(GrTextureEffect::Make(std::move(srcView), srcAlphaType));
+ if (kInner_SkBlurStyle == fBlurStyle) {
+ // inner: dst = dst * src
+ paint.setCoverageSetOpXPFactory(SkRegion::kIntersect_Op);
+ } else if (kSolid_SkBlurStyle == fBlurStyle) {
+ // solid: dst = src + dst - src * dst
+ // = src + (1 - src) * dst
+ paint.setCoverageSetOpXPFactory(SkRegion::kUnion_Op);
+ } else if (kOuter_SkBlurStyle == fBlurStyle) {
+ // outer: dst = dst * (1 - src)
+ // = 0 * src + (1 - src) * dst
+ paint.setCoverageSetOpXPFactory(SkRegion::kDifference_Op);
+ } else {
+ paint.setCoverageSetOpXPFactory(SkRegion::kReplace_Op);
+ }
+
+ surfaceDrawContext->fillPixelsWithLocalMatrix(nullptr, std::move(paint), clipRect,
+ SkMatrix::I());
+ }
+
+ return surfaceDrawContext->readSurfaceView();
+}
+
+#endif // defined(SK_GANESH) && defined(SK_GANESH)
+
+void sk_register_blur_maskfilter_createproc() { SK_REGISTER_FLATTENABLE(SkBlurMaskFilterImpl); }
+
+sk_sp<SkMaskFilter> SkMaskFilter::MakeBlur(SkBlurStyle style, SkScalar sigma, bool respectCTM) {
+ if (SkScalarIsFinite(sigma) && sigma > 0) {
+ return sk_sp<SkMaskFilter>(new SkBlurMaskFilterImpl(sigma, style, respectCTM));
+ }
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkBlurMask.cpp b/gfx/skia/skia/src/core/SkBlurMask.cpp
new file mode 100644
index 0000000000..faeae36e71
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlurMask.cpp
@@ -0,0 +1,661 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkBlurMask.h"
+
+#include "include/core/SkColorPriv.h"
+#include "include/private/base/SkMath.h"
+#include "include/private/base/SkTPin.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkEndian.h"
+#include "src/base/SkMathPriv.h"
+#include "src/core/SkMaskBlurFilter.h"
+
+using namespace skia_private;
+
+// This constant approximates the scaling done in the software path's
+// "high quality" mode, in SkBlurMask::Blur() (1 / sqrt(3)).
+// IMHO, it actually should be 1: we blur "less" than we should do
+// according to the CSS and canvas specs, simply because Safari does the same.
+// Firefox used to do the same too, until 4.0 where they fixed it. So at some
+// point we should probably get rid of these scaling constants and rebaseline
+// all the blur tests.
+static const SkScalar kBLUR_SIGMA_SCALE = 0.57735f;
+
+SkScalar SkBlurMask::ConvertRadiusToSigma(SkScalar radius) {
+ return radius > 0 ? kBLUR_SIGMA_SCALE * radius + 0.5f : 0.0f;
+}
+
+SkScalar SkBlurMask::ConvertSigmaToRadius(SkScalar sigma) {
+ return sigma > 0.5f ? (sigma - 0.5f) / kBLUR_SIGMA_SCALE : 0.0f;
+}
+
+
+template <typename AlphaIter>
+static void merge_src_with_blur(uint8_t dst[], int dstRB,
+ AlphaIter src, int srcRB,
+ const uint8_t blur[], int blurRB,
+ int sw, int sh) {
+ dstRB -= sw;
+ blurRB -= sw;
+ while (--sh >= 0) {
+ AlphaIter rowSrc(src);
+ for (int x = sw - 1; x >= 0; --x) {
+ *dst = SkToU8(SkAlphaMul(*blur, SkAlpha255To256(*rowSrc)));
+ ++dst;
+ ++rowSrc;
+ ++blur;
+ }
+ dst += dstRB;
+ src >>= srcRB;
+ blur += blurRB;
+ }
+}
+
+template <typename AlphaIter>
+static void clamp_solid_with_orig(uint8_t dst[], int dstRowBytes,
+ AlphaIter src, int srcRowBytes,
+ int sw, int sh) {
+ int x;
+ while (--sh >= 0) {
+ AlphaIter rowSrc(src);
+ for (x = sw - 1; x >= 0; --x) {
+ int s = *rowSrc;
+ int d = *dst;
+ *dst = SkToU8(s + d - SkMulDiv255Round(s, d));
+ ++dst;
+ ++rowSrc;
+ }
+ dst += dstRowBytes - sw;
+ src >>= srcRowBytes;
+ }
+}
+
+template <typename AlphaIter>
+static void clamp_outer_with_orig(uint8_t dst[], int dstRowBytes,
+ AlphaIter src, int srcRowBytes,
+ int sw, int sh) {
+ int x;
+ while (--sh >= 0) {
+ AlphaIter rowSrc(src);
+ for (x = sw - 1; x >= 0; --x) {
+ int srcValue = *rowSrc;
+ if (srcValue) {
+ *dst = SkToU8(SkAlphaMul(*dst, SkAlpha255To256(255 - srcValue)));
+ }
+ ++dst;
+ ++rowSrc;
+ }
+ dst += dstRowBytes - sw;
+ src >>= srcRowBytes;
+ }
+}
+///////////////////////////////////////////////////////////////////////////////
+
+// we use a local function to wrap the class static method to work around
+// a bug in gcc98
+void SkMask_FreeImage(uint8_t* image);
+void SkMask_FreeImage(uint8_t* image) {
+ SkMask::FreeImage(image);
+}
+
+bool SkBlurMask::BoxBlur(SkMask* dst, const SkMask& src, SkScalar sigma, SkBlurStyle style,
+ SkIPoint* margin) {
+ if (src.fFormat != SkMask::kBW_Format &&
+ src.fFormat != SkMask::kA8_Format &&
+ src.fFormat != SkMask::kARGB32_Format &&
+ src.fFormat != SkMask::kLCD16_Format)
+ {
+ return false;
+ }
+
+ SkMaskBlurFilter blurFilter{sigma, sigma};
+ if (blurFilter.hasNoBlur()) {
+ // If there is no effective blur most styles will just produce the original mask.
+ // However, kOuter_SkBlurStyle will produce an empty mask.
+ if (style == kOuter_SkBlurStyle) {
+ dst->fImage = nullptr;
+ dst->fBounds = SkIRect::MakeEmpty();
+ dst->fRowBytes = dst->fBounds.width();
+ dst->fFormat = SkMask::kA8_Format;
+ if (margin != nullptr) {
+ // This filter will disregard the src.fImage completely.
+ // The margin is actually {-(src.fBounds.width() / 2), -(src.fBounds.height() / 2)}
+ // but it is not clear if callers will fall over with negative margins.
+ *margin = SkIPoint{0,0};
+ }
+ return true;
+ }
+ return false;
+ }
+ const SkIPoint border = blurFilter.blur(src, dst);
+ // If src.fImage is null, then this call is only to calculate the border.
+ if (src.fImage != nullptr && dst->fImage == nullptr) {
+ return false;
+ }
+
+ if (margin != nullptr) {
+ *margin = border;
+ }
+
+ if (src.fImage == nullptr) {
+ if (style == kInner_SkBlurStyle) {
+ dst->fBounds = src.fBounds; // restore trimmed bounds
+ dst->fRowBytes = dst->fBounds.width();
+ }
+ return true;
+ }
+
+ switch (style) {
+ case kNormal_SkBlurStyle:
+ break;
+ case kSolid_SkBlurStyle: {
+ auto dstStart = &dst->fImage[border.x() + border.y() * dst->fRowBytes];
+ switch (src.fFormat) {
+ case SkMask::kBW_Format:
+ clamp_solid_with_orig(
+ dstStart, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kBW_Format>(src.fImage, 0), src.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ break;
+ case SkMask::kA8_Format:
+ clamp_solid_with_orig(
+ dstStart, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kA8_Format>(src.fImage), src.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ break;
+ case SkMask::kARGB32_Format: {
+ uint32_t* srcARGB = reinterpret_cast<uint32_t*>(src.fImage);
+ clamp_solid_with_orig(
+ dstStart, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kARGB32_Format>(srcARGB), src.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ } break;
+ case SkMask::kLCD16_Format: {
+ uint16_t* srcLCD = reinterpret_cast<uint16_t*>(src.fImage);
+ clamp_solid_with_orig(
+ dstStart, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kLCD16_Format>(srcLCD), src.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ } break;
+ default:
+ SK_ABORT("Unhandled format.");
+ }
+ } break;
+ case kOuter_SkBlurStyle: {
+ auto dstStart = &dst->fImage[border.x() + border.y() * dst->fRowBytes];
+ switch (src.fFormat) {
+ case SkMask::kBW_Format:
+ clamp_outer_with_orig(
+ dstStart, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kBW_Format>(src.fImage, 0), src.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ break;
+ case SkMask::kA8_Format:
+ clamp_outer_with_orig(
+ dstStart, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kA8_Format>(src.fImage), src.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ break;
+ case SkMask::kARGB32_Format: {
+ uint32_t* srcARGB = reinterpret_cast<uint32_t*>(src.fImage);
+ clamp_outer_with_orig(
+ dstStart, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kARGB32_Format>(srcARGB), src.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ } break;
+ case SkMask::kLCD16_Format: {
+ uint16_t* srcLCD = reinterpret_cast<uint16_t*>(src.fImage);
+ clamp_outer_with_orig(
+ dstStart, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kLCD16_Format>(srcLCD), src.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ } break;
+ default:
+ SK_ABORT("Unhandled format.");
+ }
+ } break;
+ case kInner_SkBlurStyle: {
+ // now we allocate the "real" dst, mirror the size of src
+ SkMask blur = *dst;
+ SkAutoMaskFreeImage autoFreeBlurMask(blur.fImage);
+ dst->fBounds = src.fBounds;
+ dst->fRowBytes = dst->fBounds.width();
+ size_t dstSize = dst->computeImageSize();
+ if (0 == dstSize) {
+ return false; // too big to allocate, abort
+ }
+ dst->fImage = SkMask::AllocImage(dstSize);
+ auto blurStart = &blur.fImage[border.x() + border.y() * blur.fRowBytes];
+ switch (src.fFormat) {
+ case SkMask::kBW_Format:
+ merge_src_with_blur(
+ dst->fImage, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kBW_Format>(src.fImage, 0), src.fRowBytes,
+ blurStart, blur.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ break;
+ case SkMask::kA8_Format:
+ merge_src_with_blur(
+ dst->fImage, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kA8_Format>(src.fImage), src.fRowBytes,
+ blurStart, blur.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ break;
+ case SkMask::kARGB32_Format: {
+ uint32_t* srcARGB = reinterpret_cast<uint32_t*>(src.fImage);
+ merge_src_with_blur(
+ dst->fImage, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kARGB32_Format>(srcARGB), src.fRowBytes,
+ blurStart, blur.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ } break;
+ case SkMask::kLCD16_Format: {
+ uint16_t* srcLCD = reinterpret_cast<uint16_t*>(src.fImage);
+ merge_src_with_blur(
+ dst->fImage, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kLCD16_Format>(srcLCD), src.fRowBytes,
+ blurStart, blur.fRowBytes,
+ src.fBounds.width(), src.fBounds.height());
+ } break;
+ default:
+ SK_ABORT("Unhandled format.");
+ }
+ } break;
+ }
+
+ return true;
+}
+
+/* Convolving a box with itself three times results in a piecewise
+ quadratic function:
+
+ 0 x <= -1.5
+ 9/8 + 3/2 x + 1/2 x^2 -1.5 < x <= -.5
+ 3/4 - x^2 -.5 < x <= .5
+ 9/8 - 3/2 x + 1/2 x^2 0.5 < x <= 1.5
+ 0 1.5 < x
+
+ Mathematica:
+
+ g[x_] := Piecewise [ {
+ {9/8 + 3/2 x + 1/2 x^2 , -1.5 < x <= -.5},
+ {3/4 - x^2 , -.5 < x <= .5},
+ {9/8 - 3/2 x + 1/2 x^2 , 0.5 < x <= 1.5}
+ }, 0]
+
+ To get the profile curve of the blurred step function at the rectangle
+ edge, we evaluate the indefinite integral, which is piecewise cubic:
+
+ 0 x <= -1.5
+ 9/16 + 9/8 x + 3/4 x^2 + 1/6 x^3 -1.5 < x <= -0.5
+ 1/2 + 3/4 x - 1/3 x^3 -.5 < x <= .5
+ 7/16 + 9/8 x - 3/4 x^2 + 1/6 x^3 .5 < x <= 1.5
+ 1 1.5 < x
+
+ in Mathematica code:
+
+ gi[x_] := Piecewise[ {
+ { 0 , x <= -1.5 },
+ { 9/16 + 9/8 x + 3/4 x^2 + 1/6 x^3, -1.5 < x <= -0.5 },
+ { 1/2 + 3/4 x - 1/3 x^3 , -.5 < x <= .5},
+ { 7/16 + 9/8 x - 3/4 x^2 + 1/6 x^3, .5 < x <= 1.5}
+ },1]
+*/
+
+static float gaussianIntegral(float x) {
+ if (x > 1.5f) {
+ return 0.0f;
+ }
+ if (x < -1.5f) {
+ return 1.0f;
+ }
+
+ float x2 = x*x;
+ float x3 = x2*x;
+
+ if ( x > 0.5f ) {
+ return 0.5625f - (x3 / 6.0f - 3.0f * x2 * 0.25f + 1.125f * x);
+ }
+ if ( x > -0.5f ) {
+ return 0.5f - (0.75f * x - x3 / 3.0f);
+ }
+ return 0.4375f + (-x3 / 6.0f - 3.0f * x2 * 0.25f - 1.125f * x);
+}
+
+/* ComputeBlurProfile fills in an array of floating
+ point values between 0 and 255 for the profile signature of
+ a blurred half-plane with the given blur radius. Since we're
+ going to be doing screened multiplications (i.e., 1 - (1-x)(1-y))
+ all the time, we actually fill in the profile pre-inverted
+ (already done 255-x).
+*/
+
+void SkBlurMask::ComputeBlurProfile(uint8_t* profile, int size, SkScalar sigma) {
+ SkASSERT(SkScalarCeilToInt(6*sigma) == size);
+
+ int center = size >> 1;
+
+ float invr = 1.f/(2*sigma);
+
+ profile[0] = 255;
+ for (int x = 1 ; x < size ; ++x) {
+ float scaled_x = (center - x - .5f) * invr;
+ float gi = gaussianIntegral(scaled_x);
+ profile[x] = 255 - (uint8_t) (255.f * gi);
+ }
+}
+
+// TODO MAYBE: Maintain a profile cache to avoid recomputing this for
+// commonly used radii. Consider baking some of the most common blur radii
+// directly in as static data?
+
+// Implementation adapted from Michael Herf's approach:
+// http://stereopsis.com/shadowrect/
+
+uint8_t SkBlurMask::ProfileLookup(const uint8_t *profile, int loc,
+ int blurredWidth, int sharpWidth) {
+ // how far are we from the original edge?
+ int dx = SkAbs32(((loc << 1) + 1) - blurredWidth) - sharpWidth;
+ int ox = dx >> 1;
+ if (ox < 0) {
+ ox = 0;
+ }
+
+ return profile[ox];
+}
+
+void SkBlurMask::ComputeBlurredScanline(uint8_t *pixels, const uint8_t *profile,
+ unsigned int width, SkScalar sigma) {
+
+ unsigned int profile_size = SkScalarCeilToInt(6*sigma);
+ skia_private::AutoTMalloc<uint8_t> horizontalScanline(width);
+
+ unsigned int sw = width - profile_size;
+ // nearest odd number less than the profile size represents the center
+ // of the (2x scaled) profile
+ int center = ( profile_size & ~1 ) - 1;
+
+ int w = sw - center;
+
+ for (unsigned int x = 0 ; x < width ; ++x) {
+ if (profile_size <= sw) {
+ pixels[x] = ProfileLookup(profile, x, width, w);
+ } else {
+ float span = float(sw)/(2*sigma);
+ float giX = 1.5f - (x+.5f)/(2*sigma);
+ pixels[x] = (uint8_t) (255 * (gaussianIntegral(giX) - gaussianIntegral(giX + span)));
+ }
+ }
+}
+
+bool SkBlurMask::BlurRect(SkScalar sigma, SkMask *dst,
+ const SkRect &src, SkBlurStyle style,
+ SkIPoint *margin, SkMask::CreateMode createMode) {
+ int profileSize = SkScalarCeilToInt(6*sigma);
+ if (profileSize <= 0) {
+ return false; // no blur to compute
+ }
+
+ int pad = profileSize/2;
+ if (margin) {
+ margin->set( pad, pad );
+ }
+
+ dst->fBounds.setLTRB(SkScalarRoundToInt(src.fLeft - pad),
+ SkScalarRoundToInt(src.fTop - pad),
+ SkScalarRoundToInt(src.fRight + pad),
+ SkScalarRoundToInt(src.fBottom + pad));
+
+ dst->fRowBytes = dst->fBounds.width();
+ dst->fFormat = SkMask::kA8_Format;
+ dst->fImage = nullptr;
+
+ int sw = SkScalarFloorToInt(src.width());
+ int sh = SkScalarFloorToInt(src.height());
+
+ if (createMode == SkMask::kJustComputeBounds_CreateMode) {
+ if (style == kInner_SkBlurStyle) {
+ dst->fBounds = src.round(); // restore trimmed bounds
+ dst->fRowBytes = sw;
+ }
+ return true;
+ }
+
+ AutoTMalloc<uint8_t> profile(profileSize);
+
+ ComputeBlurProfile(profile, profileSize, sigma);
+
+ size_t dstSize = dst->computeImageSize();
+ if (0 == dstSize) {
+ return false; // too big to allocate, abort
+ }
+
+ uint8_t* dp = SkMask::AllocImage(dstSize);
+
+ dst->fImage = dp;
+
+ int dstHeight = dst->fBounds.height();
+ int dstWidth = dst->fBounds.width();
+
+ uint8_t *outptr = dp;
+
+ AutoTMalloc<uint8_t> horizontalScanline(dstWidth);
+ AutoTMalloc<uint8_t> verticalScanline(dstHeight);
+
+ ComputeBlurredScanline(horizontalScanline, profile, dstWidth, sigma);
+ ComputeBlurredScanline(verticalScanline, profile, dstHeight, sigma);
+
+ for (int y = 0 ; y < dstHeight ; ++y) {
+ for (int x = 0 ; x < dstWidth ; x++) {
+ unsigned int maskval = SkMulDiv255Round(horizontalScanline[x], verticalScanline[y]);
+ *(outptr++) = maskval;
+ }
+ }
+
+ if (style == kInner_SkBlurStyle) {
+ // now we allocate the "real" dst, mirror the size of src
+ size_t srcSize = (size_t)(src.width() * src.height());
+ if (0 == srcSize) {
+ return false; // too big to allocate, abort
+ }
+ dst->fImage = SkMask::AllocImage(srcSize);
+ for (int y = 0 ; y < sh ; y++) {
+ uint8_t *blur_scanline = dp + (y+pad)*dstWidth + pad;
+ uint8_t *inner_scanline = dst->fImage + y*sw;
+ memcpy(inner_scanline, blur_scanline, sw);
+ }
+ SkMask::FreeImage(dp);
+
+ dst->fBounds = src.round(); // restore trimmed bounds
+ dst->fRowBytes = sw;
+
+ } else if (style == kOuter_SkBlurStyle) {
+ for (int y = pad ; y < dstHeight-pad ; y++) {
+ uint8_t *dst_scanline = dp + y*dstWidth + pad;
+ memset(dst_scanline, 0, sw);
+ }
+ } else if (style == kSolid_SkBlurStyle) {
+ for (int y = pad ; y < dstHeight-pad ; y++) {
+ uint8_t *dst_scanline = dp + y*dstWidth + pad;
+ memset(dst_scanline, 0xff, sw);
+ }
+ }
+ // normal and solid styles are the same for analytic rect blurs, so don't
+ // need to handle solid specially.
+
+ return true;
+}
+
+bool SkBlurMask::BlurRRect(SkScalar sigma, SkMask *dst,
+ const SkRRect &src, SkBlurStyle style,
+ SkIPoint *margin, SkMask::CreateMode createMode) {
+ // Temporary for now -- always fail, should cause caller to fall back
+ // to old path. Plumbing just to land API and parallelize effort.
+
+ return false;
+}
+
+// The "simple" blur is a direct implementation of separable convolution with a discrete
+// gaussian kernel. It's "ground truth" in a sense; too slow to be used, but very
+// useful for correctness comparisons.
+
+bool SkBlurMask::BlurGroundTruth(SkScalar sigma, SkMask* dst, const SkMask& src,
+ SkBlurStyle style, SkIPoint* margin) {
+
+ if (src.fFormat != SkMask::kA8_Format) {
+ return false;
+ }
+
+ float variance = sigma * sigma;
+
+ int windowSize = SkScalarCeilToInt(sigma*6);
+ // round window size up to nearest odd number
+ windowSize |= 1;
+
+ AutoTMalloc<float> gaussWindow(windowSize);
+
+ int halfWindow = windowSize >> 1;
+
+ gaussWindow[halfWindow] = 1;
+
+ float windowSum = 1;
+ for (int x = 1 ; x <= halfWindow ; ++x) {
+ float gaussian = expf(-x*x / (2*variance));
+ gaussWindow[halfWindow + x] = gaussWindow[halfWindow-x] = gaussian;
+ windowSum += 2*gaussian;
+ }
+
+ // leave the filter un-normalized for now; we will divide by the normalization
+ // sum later;
+
+ int pad = halfWindow;
+ if (margin) {
+ margin->set( pad, pad );
+ }
+
+ dst->fBounds = src.fBounds;
+ dst->fBounds.outset(pad, pad);
+
+ dst->fRowBytes = dst->fBounds.width();
+ dst->fFormat = SkMask::kA8_Format;
+ dst->fImage = nullptr;
+
+ if (src.fImage) {
+
+ size_t dstSize = dst->computeImageSize();
+ if (0 == dstSize) {
+ return false; // too big to allocate, abort
+ }
+
+ int srcWidth = src.fBounds.width();
+ int srcHeight = src.fBounds.height();
+ int dstWidth = dst->fBounds.width();
+
+ const uint8_t* srcPixels = src.fImage;
+ uint8_t* dstPixels = SkMask::AllocImage(dstSize);
+ SkAutoMaskFreeImage autoFreeDstPixels(dstPixels);
+
+ // do the actual blur. First, make a padded copy of the source.
+ // use double pad so we never have to check if we're outside anything
+
+ int padWidth = srcWidth + 4*pad;
+ int padHeight = srcHeight;
+ int padSize = padWidth * padHeight;
+
+ AutoTMalloc<uint8_t> padPixels(padSize);
+ memset(padPixels, 0, padSize);
+
+ for (int y = 0 ; y < srcHeight; ++y) {
+ uint8_t* padptr = padPixels + y * padWidth + 2*pad;
+ const uint8_t* srcptr = srcPixels + y * srcWidth;
+ memcpy(padptr, srcptr, srcWidth);
+ }
+
+ // blur in X, transposing the result into a temporary floating point buffer.
+ // also double-pad the intermediate result so that the second blur doesn't
+ // have to do extra conditionals.
+
+ int tmpWidth = padHeight + 4*pad;
+ int tmpHeight = padWidth - 2*pad;
+ int tmpSize = tmpWidth * tmpHeight;
+
+ AutoTMalloc<float> tmpImage(tmpSize);
+ memset(tmpImage, 0, tmpSize*sizeof(tmpImage[0]));
+
+ for (int y = 0 ; y < padHeight ; ++y) {
+ uint8_t *srcScanline = padPixels + y*padWidth;
+ for (int x = pad ; x < padWidth - pad ; ++x) {
+ float *outPixel = tmpImage + (x-pad)*tmpWidth + y + 2*pad; // transposed output
+ uint8_t *windowCenter = srcScanline + x;
+ for (int i = -pad ; i <= pad ; ++i) {
+ *outPixel += gaussWindow[pad+i]*windowCenter[i];
+ }
+ *outPixel /= windowSum;
+ }
+ }
+
+ // blur in Y; now filling in the actual desired destination. We have to do
+ // the transpose again; these transposes guarantee that we read memory in
+ // linear order.
+
+ for (int y = 0 ; y < tmpHeight ; ++y) {
+ float *srcScanline = tmpImage + y*tmpWidth;
+ for (int x = pad ; x < tmpWidth - pad ; ++x) {
+ float *windowCenter = srcScanline + x;
+ float finalValue = 0;
+ for (int i = -pad ; i <= pad ; ++i) {
+ finalValue += gaussWindow[pad+i]*windowCenter[i];
+ }
+ finalValue /= windowSum;
+ uint8_t *outPixel = dstPixels + (x-pad)*dstWidth + y; // transposed output
+ int integerPixel = int(finalValue + 0.5f);
+ *outPixel = SkTPin(SkClampPos(integerPixel), 0, 255);
+ }
+ }
+
+ dst->fImage = dstPixels;
+ switch (style) {
+ case kNormal_SkBlurStyle:
+ break;
+ case kSolid_SkBlurStyle: {
+ clamp_solid_with_orig(
+ dstPixels + pad*dst->fRowBytes + pad, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kA8_Format>(srcPixels), src.fRowBytes,
+ srcWidth, srcHeight);
+ } break;
+ case kOuter_SkBlurStyle: {
+ clamp_outer_with_orig(
+ dstPixels + pad*dst->fRowBytes + pad, dst->fRowBytes,
+ SkMask::AlphaIter<SkMask::kA8_Format>(srcPixels), src.fRowBytes,
+ srcWidth, srcHeight);
+ } break;
+ case kInner_SkBlurStyle: {
+ // now we allocate the "real" dst, mirror the size of src
+ size_t srcSize = src.computeImageSize();
+ if (0 == srcSize) {
+ return false; // too big to allocate, abort
+ }
+ dst->fImage = SkMask::AllocImage(srcSize);
+ merge_src_with_blur(dst->fImage, src.fRowBytes,
+ SkMask::AlphaIter<SkMask::kA8_Format>(srcPixels), src.fRowBytes,
+ dstPixels + pad*dst->fRowBytes + pad,
+ dst->fRowBytes, srcWidth, srcHeight);
+ SkMask::FreeImage(dstPixels);
+ } break;
+ }
+ autoFreeDstPixels.release();
+ }
+
+ if (style == kInner_SkBlurStyle) {
+ dst->fBounds = src.fBounds; // restore trimmed bounds
+ dst->fRowBytes = src.fRowBytes;
+ }
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkBlurMask.h b/gfx/skia/skia/src/core/SkBlurMask.h
new file mode 100644
index 0000000000..b7f790e962
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkBlurMask.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlurMask_DEFINED
+#define SkBlurMask_DEFINED
+
+#include "include/core/SkBlurTypes.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkShader.h"
+#include "src/core/SkMask.h"
+
+class SkBlurMask {
+public:
+ static bool SK_WARN_UNUSED_RESULT BlurRect(SkScalar sigma, SkMask *dst, const SkRect &src,
+ SkBlurStyle, SkIPoint *margin = nullptr,
+ SkMask::CreateMode createMode =
+ SkMask::kComputeBoundsAndRenderImage_CreateMode);
+ static bool SK_WARN_UNUSED_RESULT BlurRRect(SkScalar sigma, SkMask *dst, const SkRRect &src,
+ SkBlurStyle, SkIPoint *margin = nullptr,
+ SkMask::CreateMode createMode =
+ SkMask::kComputeBoundsAndRenderImage_CreateMode);
+
+ // forceQuality will prevent BoxBlur from falling back to the low quality approach when sigma
+ // is very small -- this can be used predict the margin bump ahead of time without completely
+ // replicating the internal logic. This permits not only simpler caching of blurred results,
+ // but also being able to predict precisely at what pixels the blurred profile of e.g. a
+ // rectangle will lie.
+ //
+ // Calling details:
+ // * calculate margin - if src.fImage is null, then this call only calculates the border.
+ // * failure - if src.fImage is not null, failure is signal with dst->fImage being
+ // null.
+
+ static bool SK_WARN_UNUSED_RESULT BoxBlur(SkMask* dst, const SkMask& src,
+ SkScalar sigma, SkBlurStyle style,
+ SkIPoint* margin = nullptr);
+
+ // the "ground truth" blur does a gaussian convolution; it's slow
+ // but useful for comparison purposes.
+ static bool SK_WARN_UNUSED_RESULT BlurGroundTruth(SkScalar sigma, SkMask* dst,
+ const SkMask& src,
+ SkBlurStyle, SkIPoint* margin = nullptr);
+
+ // If radius > 0, return the corresponding sigma, else return 0
+ static SkScalar SK_SPI ConvertRadiusToSigma(SkScalar radius);
+ // If sigma > 0.5, return the corresponding radius, else return 0
+ static SkScalar SK_SPI ConvertSigmaToRadius(SkScalar sigma);
+
+ /* Helper functions for analytic rectangle blurs */
+
+ /** Look up the intensity of the (one dimnensional) blurred half-plane.
+ @param profile The precomputed 1D blur profile; initialized by ComputeBlurProfile below.
+ @param loc the location to look up; The lookup will clamp invalid inputs, but
+ meaningful data are available between 0 and blurred_width
+ @param blurred_width The width of the final, blurred rectangle
+ @param sharp_width The width of the original, unblurred rectangle.
+ */
+ static uint8_t ProfileLookup(const uint8_t* profile, int loc, int blurredWidth, int sharpWidth);
+
+ /** Populate the profile of a 1D blurred halfplane.
+ @param profile The 1D table to fill in
+ @param size Should be 6*sigma bytes
+ @param sigma The standard deviation of the gaussian blur kernel
+ */
+ static void ComputeBlurProfile(uint8_t* profile, int size, SkScalar sigma);
+
+ /** Compute an entire scanline of a blurred step function. This is a 1D helper that
+ will produce both the horizontal and vertical profiles of the blurry rectangle.
+ @param pixels Location to store the resulting pixel data; allocated and managed by caller
+ @param profile Precomputed blur profile computed by ComputeBlurProfile above.
+ @param width Size of the pixels array.
+ @param sigma Standard deviation of the gaussian blur kernel used to compute the profile;
+ this implicitly gives the size of the pixels array.
+ */
+
+ static void ComputeBlurredScanline(uint8_t* pixels, const uint8_t* profile,
+ unsigned int width, SkScalar sigma);
+
+
+
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkCachedData.cpp b/gfx/skia/skia/src/core/SkCachedData.cpp
new file mode 100644
index 0000000000..7731a6b351
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCachedData.cpp
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/base/SkMalloc.h"
+#include "include/private/chromium/SkDiscardableMemory.h"
+#include "src/core/SkCachedData.h"
+
+SkCachedData::SkCachedData(void* data, size_t size)
+ : fData(data)
+ , fSize(size)
+ , fRefCnt(1)
+ , fStorageType(kMalloc_StorageType)
+ , fInCache(false)
+ , fIsLocked(true)
+{
+ fStorage.fMalloc = data;
+}
+
+SkCachedData::SkCachedData(size_t size, SkDiscardableMemory* dm)
+ : fData(dm->data())
+ , fSize(size)
+ , fRefCnt(1)
+ , fStorageType(kDiscardableMemory_StorageType)
+ , fInCache(false)
+ , fIsLocked(true)
+{
+ fStorage.fDM = dm;
+}
+
+SkCachedData::~SkCachedData() {
+ switch (fStorageType) {
+ case kMalloc_StorageType:
+ sk_free(fStorage.fMalloc);
+ break;
+ case kDiscardableMemory_StorageType:
+ delete fStorage.fDM;
+ break;
+ }
+}
+
+class SkCachedData::AutoMutexWritable {
+public:
+ AutoMutexWritable(const SkCachedData* cd) : fCD(const_cast<SkCachedData*>(cd)) {
+ fCD->fMutex.acquire();
+ fCD->validate();
+ }
+ ~AutoMutexWritable() {
+ fCD->validate();
+ fCD->fMutex.release();
+ }
+
+ SkCachedData* get() { return fCD; }
+ SkCachedData* operator->() { return fCD; }
+
+private:
+ SkCachedData* fCD;
+};
+
+void SkCachedData::internalRef(bool fromCache) const {
+ AutoMutexWritable(this)->inMutexRef(fromCache);
+}
+
+void SkCachedData::internalUnref(bool fromCache) const {
+ if (AutoMutexWritable(this)->inMutexUnref(fromCache)) {
+ // can't delete inside doInternalUnref, since it is locking a mutex (which we own)
+ delete this;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkCachedData::inMutexRef(bool fromCache) {
+ if ((1 == fRefCnt) && fInCache) {
+ this->inMutexLock();
+ }
+
+ fRefCnt += 1;
+ if (fromCache) {
+ SkASSERT(!fInCache);
+ fInCache = true;
+ }
+}
+
+bool SkCachedData::inMutexUnref(bool fromCache) {
+ switch (--fRefCnt) {
+ case 0:
+ // we're going to be deleted, so we need to be unlocked (for DiscardableMemory)
+ if (fIsLocked) {
+ this->inMutexUnlock();
+ }
+ break;
+ case 1:
+ if (fInCache && !fromCache) {
+ // If we're down to 1 owner, and that owner is the cache, this it is safe
+ // to unlock (and mutate fData) even if the cache is in a different thread,
+ // as the cache is NOT allowed to inspect or use fData.
+ this->inMutexUnlock();
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (fromCache) {
+ SkASSERT(fInCache);
+ fInCache = false;
+ }
+
+ // return true when we need to be deleted
+ return 0 == fRefCnt;
+}
+
+void SkCachedData::inMutexLock() {
+ fMutex.assertHeld();
+
+ SkASSERT(!fIsLocked);
+ fIsLocked = true;
+
+ switch (fStorageType) {
+ case kMalloc_StorageType:
+ this->setData(fStorage.fMalloc);
+ break;
+ case kDiscardableMemory_StorageType:
+ if (fStorage.fDM->lock()) {
+ void* ptr = fStorage.fDM->data();
+ SkASSERT(ptr);
+ this->setData(ptr);
+ } else {
+ this->setData(nullptr); // signal failure to lock, contents are gone
+ }
+ break;
+ }
+}
+
+void SkCachedData::inMutexUnlock() {
+ fMutex.assertHeld();
+
+ SkASSERT(fIsLocked);
+ fIsLocked = false;
+
+ switch (fStorageType) {
+ case kMalloc_StorageType:
+ // nothing to do/check
+ break;
+ case kDiscardableMemory_StorageType:
+ if (fData) { // did the previous lock succeed?
+ fStorage.fDM->unlock();
+ }
+ break;
+ }
+ this->setData(nullptr); // signal that we're in an unlocked state
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+void SkCachedData::validate() const {
+ if (fIsLocked) {
+ SkASSERT((fInCache && fRefCnt > 1) || !fInCache);
+ switch (fStorageType) {
+ case kMalloc_StorageType:
+ SkASSERT(fData == fStorage.fMalloc);
+ break;
+ case kDiscardableMemory_StorageType:
+ // fData can be null or the actual value, depending if DM's lock succeeded
+ break;
+ }
+ } else {
+ SkASSERT((fInCache && 1 == fRefCnt) || (0 == fRefCnt));
+ SkASSERT(nullptr == fData);
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkCachedData.h b/gfx/skia/skia/src/core/SkCachedData.h
new file mode 100644
index 0000000000..2573bafbd5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCachedData.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCachedData_DEFINED
+#define SkCachedData_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkNoncopyable.h"
+
+class SkDiscardableMemory;
+
+class SkCachedData : ::SkNoncopyable {
+public:
+ SkCachedData(void* mallocData, size_t size);
+ SkCachedData(size_t size, SkDiscardableMemory*);
+ virtual ~SkCachedData();
+
+ size_t size() const { return fSize; }
+ const void* data() const { return fData; }
+
+ void* writable_data() { return fData; }
+
+ void ref() const { this->internalRef(false); }
+ void unref() const { this->internalUnref(false); }
+
+ int testing_only_getRefCnt() const { return fRefCnt; }
+ bool testing_only_isLocked() const { return fIsLocked; }
+ bool testing_only_isInCache() const { return fInCache; }
+
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const {
+ return kDiscardableMemory_StorageType == fStorageType ? fStorage.fDM : nullptr;
+ }
+
+protected:
+ // called when fData changes. could be nullptr.
+ virtual void onDataChange(void* oldData, void* newData) {}
+
+private:
+ SkMutex fMutex; // could use a pool of these...
+
+ enum StorageType {
+ kDiscardableMemory_StorageType,
+ kMalloc_StorageType
+ };
+
+ union {
+ SkDiscardableMemory* fDM;
+ void* fMalloc;
+ } fStorage;
+ void* fData;
+ size_t fSize;
+ int fRefCnt; // low-bit means we're owned by the cache
+ StorageType fStorageType;
+ bool fInCache;
+ bool fIsLocked;
+
+ void internalRef(bool fromCache) const;
+ void internalUnref(bool fromCache) const;
+
+ void inMutexRef(bool fromCache);
+ bool inMutexUnref(bool fromCache); // returns true if we should delete "this"
+ void inMutexLock();
+ void inMutexUnlock();
+
+ // called whenever our fData might change (lock or unlock)
+ void setData(void* newData) {
+ if (newData != fData) {
+ // notify our subclasses of the change
+ this->onDataChange(fData, newData);
+ fData = newData;
+ }
+ }
+
+ class AutoMutexWritable;
+
+public:
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+ /*
+ * Attaching a data to to a SkResourceCache (only one at a time) enables the data to be
+ * unlocked when the cache is the only owner, thus freeing it to be purged (assuming the
+ * data is backed by a SkDiscardableMemory).
+ *
+ * When attached, it also automatically attempts to "lock" the data when the first client
+ * ref's the data (typically from a find(key, visitor) call).
+ *
+ * Thus the data will always be "locked" when a non-cache has a ref on it (whether or not
+ * the lock succeeded to recover the memory -- check data() to see if it is nullptr).
+ */
+
+ /*
+ * Call when adding this instance to a SkResourceCache::Rec subclass
+ * (typically in the Rec's constructor).
+ */
+ void attachToCacheAndRef() const { this->internalRef(true); }
+
+ /*
+ * Call when removing this instance from a SkResourceCache::Rec subclass
+ * (typically in the Rec's destructor).
+ */
+ void detachFromCacheAndUnref() const { this->internalUnref(true); }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkCanvas.cpp b/gfx/skia/skia/src/core/SkCanvas.cpp
new file mode 100644
index 0000000000..c072afe5fb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCanvas.cpp
@@ -0,0 +1,3087 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkBlender.h"
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkMesh.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkRSXform.h"
+#include "include/core/SkRasterHandleAllocator.h"
+#include "include/core/SkRegion.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkSurface.h"
+#include "include/core/SkTextBlob.h"
+#include "include/core/SkTileMode.h"
+#include "include/core/SkTypes.h"
+#include "include/core/SkVertices.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkSafe32.h"
+#include "include/private/base/SkSpan_impl.h"
+#include "include/private/base/SkTPin.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "include/utils/SkNoDrawCanvas.h"
+#include "src/base/SkMSAN.h"
+#include "src/core/SkCanvasPriv.h"
+#include "src/core/SkColorFilterBase.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkImageFilterTypes.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkLatticeIter.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkMatrixUtils.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSurfacePriv.h"
+#include "src/core/SkTextBlobPriv.h"
+#include "src/core/SkTraceEvent.h"
+#include "src/core/SkVerticesPriv.h"
+#include "src/image/SkSurface_Base.h"
+#include "src/text/GlyphRun.h"
+#include "src/utils/SkPatchUtils.h"
+
+#include <algorithm>
+#include <atomic>
+#include <functional>
+#include <memory>
+#include <new>
+#include <optional>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#if defined(SK_GANESH)
+#include "include/gpu/GrDirectContext.h"
+#include "include/gpu/GrRecordingContext.h"
+#include "src/gpu/ganesh/Device_v1.h"
+#include "src/utils/SkTestCanvas.h"
+#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
+# include "src/gpu/ganesh/GrRenderTarget.h"
+# include "src/gpu/ganesh/GrRenderTargetProxy.h"
+#endif
+#endif
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/Device.h"
+#endif
+
+#if (defined(SK_GANESH) || defined(SK_GRAPHITE))
+#include "include/private/chromium/SkChromeRemoteGlyphCache.h"
+#include "include/private/chromium/Slug.h"
+#endif
+
+#define RETURN_ON_NULL(ptr) do { if (nullptr == (ptr)) return; } while (0)
+#define RETURN_ON_FALSE(pred) do { if (!(pred)) return; } while (0)
+
+// This is a test: static_assert with no message is a c++17 feature,
+// and std::max() is constexpr only since the c++14 stdlib.
+static_assert(std::max(3,4) == 4);
+
+using Slug = sktext::gpu::Slug;
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+/*
+ * Return true if the drawing this rect would hit every pixels in the canvas.
+ *
+ * Returns false if
+ * - rect does not contain the canvas' bounds
+ * - paint is not fill
+ * - paint would blur or otherwise change the coverage of the rect
+ */
+bool SkCanvas::wouldOverwriteEntireSurface(const SkRect* rect, const SkPaint* paint,
+ ShaderOverrideOpacity overrideOpacity) const {
+ static_assert((int)SkPaintPriv::kNone_ShaderOverrideOpacity ==
+ (int)kNone_ShaderOverrideOpacity,
+ "need_matching_enums0");
+ static_assert((int)SkPaintPriv::kOpaque_ShaderOverrideOpacity ==
+ (int)kOpaque_ShaderOverrideOpacity,
+ "need_matching_enums1");
+ static_assert((int)SkPaintPriv::kNotOpaque_ShaderOverrideOpacity ==
+ (int)kNotOpaque_ShaderOverrideOpacity,
+ "need_matching_enums2");
+
+ const SkISize size = this->getBaseLayerSize();
+ const SkRect bounds = SkRect::MakeIWH(size.width(), size.height());
+
+ // if we're clipped at all, we can't overwrite the entire surface
+ {
+ const SkBaseDevice* base = this->baseDevice();
+ const SkBaseDevice* top = this->topDevice();
+ if (base != top) {
+ return false; // we're in a saveLayer, so conservatively don't assume we'll overwrite
+ }
+ if (!base->clipIsWideOpen()) {
+ return false;
+ }
+ }
+
+ if (rect) {
+ if (!this->getTotalMatrix().isScaleTranslate()) {
+ return false; // conservative
+ }
+
+ SkRect devRect;
+ this->getTotalMatrix().mapRectScaleTranslate(&devRect, *rect);
+ if (!devRect.contains(bounds)) {
+ return false;
+ }
+ }
+
+ if (paint) {
+ SkPaint::Style paintStyle = paint->getStyle();
+ if (!(paintStyle == SkPaint::kFill_Style ||
+ paintStyle == SkPaint::kStrokeAndFill_Style)) {
+ return false;
+ }
+ if (paint->getMaskFilter() || paint->getPathEffect() || paint->getImageFilter()) {
+ return false; // conservative
+ }
+ }
+ return SkPaintPriv::Overwrites(paint, (SkPaintPriv::ShaderOverrideOpacity)overrideOpacity);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkCanvas::predrawNotify(bool willOverwritesEntireSurface) {
+ if (fSurfaceBase) {
+ if (!fSurfaceBase->aboutToDraw(willOverwritesEntireSurface
+ ? SkSurface::kDiscard_ContentChangeMode
+ : SkSurface::kRetain_ContentChangeMode)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool SkCanvas::predrawNotify(const SkRect* rect, const SkPaint* paint,
+ ShaderOverrideOpacity overrideOpacity) {
+ if (fSurfaceBase) {
+ SkSurface::ContentChangeMode mode = SkSurface::kRetain_ContentChangeMode;
+ // Since willOverwriteAllPixels() may not be complete free to call, we only do so if
+ // there is an outstanding snapshot, since w/o that, there will be no copy-on-write
+ // and therefore we don't care which mode we're in.
+ //
+ if (fSurfaceBase->outstandingImageSnapshot()) {
+ if (this->wouldOverwriteEntireSurface(rect, paint, overrideOpacity)) {
+ mode = SkSurface::kDiscard_ContentChangeMode;
+ }
+ }
+ if (!fSurfaceBase->aboutToDraw(mode)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkCanvas::Layer::Layer(sk_sp<SkBaseDevice> device,
+ sk_sp<SkImageFilter> imageFilter,
+ const SkPaint& paint)
+ : fDevice(std::move(device))
+ , fImageFilter(std::move(imageFilter))
+ , fPaint(paint)
+ , fDiscard(false) {
+ SkASSERT(fDevice);
+ // Any image filter should have been pulled out and stored in 'imageFilter' so that 'paint'
+ // can be used as-is to draw the result of the filter to the dst device.
+ SkASSERT(!fPaint.getImageFilter());
+}
+
+SkCanvas::BackImage::BackImage(sk_sp<SkSpecialImage> img, SkIPoint loc)
+ :fImage(img), fLoc(loc) {}
+SkCanvas::BackImage::BackImage(const BackImage&) = default;
+SkCanvas::BackImage::BackImage(BackImage&&) = default;
+SkCanvas::BackImage& SkCanvas::BackImage::operator=(const BackImage&) = default;
+SkCanvas::BackImage::~BackImage() = default;
+
+SkCanvas::MCRec::MCRec(SkBaseDevice* device) : fDevice(device) {
+ SkASSERT(fDevice);
+}
+
+SkCanvas::MCRec::MCRec(const MCRec* prev) : fDevice(prev->fDevice), fMatrix(prev->fMatrix) {
+ SkASSERT(fDevice);
+}
+
+SkCanvas::MCRec::~MCRec() {}
+
+void SkCanvas::MCRec::newLayer(sk_sp<SkBaseDevice> layerDevice,
+ sk_sp<SkImageFilter> filter,
+ const SkPaint& restorePaint) {
+ SkASSERT(!fBackImage);
+ fLayer = std::make_unique<Layer>(std::move(layerDevice), std::move(filter), restorePaint);
+ fDevice = fLayer->fDevice.get();
+}
+
+void SkCanvas::MCRec::reset(SkBaseDevice* device) {
+ SkASSERT(!fLayer);
+ SkASSERT(device);
+ SkASSERT(fDeferredSaveCount == 0);
+ fDevice = device;
+ fMatrix.setIdentity();
+}
+
+class SkCanvas::AutoUpdateQRBounds {
+public:
+ explicit AutoUpdateQRBounds(SkCanvas* canvas) : fCanvas(canvas) {
+ // pre-condition, fQuickRejectBounds and other state should be valid before anything
+ // modifies the device's clip.
+ fCanvas->validateClip();
+ }
+ ~AutoUpdateQRBounds() {
+ fCanvas->fQuickRejectBounds = fCanvas->computeDeviceClipBounds();
+ // post-condition, we should remain valid after re-computing the bounds
+ fCanvas->validateClip();
+ }
+
+private:
+ SkCanvas* fCanvas;
+
+ AutoUpdateQRBounds(AutoUpdateQRBounds&&) = delete;
+ AutoUpdateQRBounds(const AutoUpdateQRBounds&) = delete;
+ AutoUpdateQRBounds& operator=(AutoUpdateQRBounds&&) = delete;
+ AutoUpdateQRBounds& operator=(const AutoUpdateQRBounds&) = delete;
+};
+
+/////////////////////////////////////////////////////////////////////////////
+// Attempts to convert an image filter to its equivalent color filter, which if possible, modifies
+// the paint to compose the image filter's color filter into the paint's color filter slot.
+// Returns true if the paint has been modified.
+// Requires the paint to have an image filter and the copy-on-write be initialized.
+static bool image_to_color_filter(SkPaint* paint) {
+ SkASSERT(SkToBool(paint) && paint->getImageFilter());
+
+ SkColorFilter* imgCFPtr;
+ if (!paint->getImageFilter()->asAColorFilter(&imgCFPtr)) {
+ return false;
+ }
+ sk_sp<SkColorFilter> imgCF(imgCFPtr);
+
+ SkColorFilter* paintCF = paint->getColorFilter();
+ if (paintCF) {
+ // The paint has both a colorfilter(paintCF) and an imagefilter-that-is-a-colorfilter(imgCF)
+ // and we need to combine them into a single colorfilter.
+ imgCF = imgCF->makeComposed(sk_ref_sp(paintCF));
+ }
+
+ paint->setColorFilter(std::move(imgCF));
+ paint->setImageFilter(nullptr);
+ return true;
+}
+
+/**
+ * We implement ImageFilters for a given draw by creating a layer, then applying the
+ * imagefilter to the pixels of that layer (its backing surface/image), and then
+ * we call restore() to xfer that layer to the main canvas.
+ *
+ * 1. SaveLayer (with a paint containing the current imagefilter and xfermode)
+ * 2. Generate the src pixels:
+ * Remove the imagefilter and the xfermode from the paint that we (AutoDrawLooper)
+ * return (fPaint). We then draw the primitive (using srcover) into a cleared
+ * buffer/surface.
+ * 3. Restore the layer created in #1
+ * The imagefilter is passed the buffer/surface from the layer (now filled with the
+ * src pixels of the primitive). It returns a new "filtered" buffer, which we
+ * draw onto the previous layer using the xfermode from the original paint.
+ */
+class AutoLayerForImageFilter {
+public:
+ // "rawBounds" is the original bounds of the primitive about to be drawn, unmodified by the
+ // paint. It's used to determine the size of the offscreen layer for filters.
+ // If null, the clip will be used instead.
+ //
+ // Draw functions should use layer->paint() instead of the passed-in paint.
+ AutoLayerForImageFilter(SkCanvas* canvas,
+ const SkPaint& paint,
+ const SkRect* rawBounds = nullptr)
+ : fPaint(paint)
+ , fCanvas(canvas)
+ , fTempLayerForImageFilter(false) {
+ SkDEBUGCODE(fSaveCount = canvas->getSaveCount();)
+
+ if (fPaint.getImageFilter() && !image_to_color_filter(&fPaint)) {
+ // The draw paint has an image filter that couldn't be simplified to an equivalent
+ // color filter, so we have to inject an automatic saveLayer().
+ SkPaint restorePaint;
+ restorePaint.setImageFilter(fPaint.refImageFilter());
+ restorePaint.setBlender(fPaint.refBlender());
+
+ // Remove the restorePaint fields from our "working" paint
+ fPaint.setImageFilter(nullptr);
+ fPaint.setBlendMode(SkBlendMode::kSrcOver);
+
+ SkRect storage;
+ if (rawBounds && fPaint.canComputeFastBounds()) {
+ // Make rawBounds include all paint outsets except for those due to image filters.
+ // At this point, fPaint's image filter has been moved to 'restorePaint'.
+ SkASSERT(!fPaint.getImageFilter());
+ rawBounds = &fPaint.computeFastBounds(*rawBounds, &storage);
+ }
+
+ canvas->fSaveCount += 1;
+ (void)canvas->internalSaveLayer(SkCanvas::SaveLayerRec(rawBounds, &restorePaint),
+ SkCanvas::kFullLayer_SaveLayerStrategy);
+ fTempLayerForImageFilter = true;
+ }
+ }
+
+ AutoLayerForImageFilter(const AutoLayerForImageFilter&) = delete;
+ AutoLayerForImageFilter& operator=(const AutoLayerForImageFilter&) = delete;
+ AutoLayerForImageFilter(AutoLayerForImageFilter&&) = default;
+ AutoLayerForImageFilter& operator=(AutoLayerForImageFilter&&) = default;
+
+ ~AutoLayerForImageFilter() {
+ if (fTempLayerForImageFilter) {
+ fCanvas->fSaveCount -= 1;
+ fCanvas->internalRestore();
+ }
+ SkASSERT(fCanvas->getSaveCount() == fSaveCount);
+ }
+
+ const SkPaint& paint() const { return fPaint; }
+
+private:
+ SkPaint fPaint;
+ SkCanvas* fCanvas;
+ bool fTempLayerForImageFilter;
+
+ SkDEBUGCODE(int fSaveCount;)
+};
+
+std::optional<AutoLayerForImageFilter> SkCanvas::aboutToDraw(
+ SkCanvas* canvas,
+ const SkPaint& paint,
+ const SkRect* rawBounds,
+ CheckForOverwrite checkOverwrite,
+ ShaderOverrideOpacity overrideOpacity)
+{
+ if (checkOverwrite == CheckForOverwrite::kYes) {
+ if (!this->predrawNotify(rawBounds, &paint, overrideOpacity)) {
+ return std::nullopt;
+ }
+ } else {
+ if (!this->predrawNotify()) {
+ return std::nullopt;
+ }
+ }
+ return std::optional<AutoLayerForImageFilter>(std::in_place, canvas, paint, rawBounds);
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+void SkCanvas::resetForNextPicture(const SkIRect& bounds) {
+ this->restoreToCount(1);
+
+ // We're peering through a lot of structs here. Only at this scope do we
+ // know that the device is a SkNoPixelsDevice.
+ SkASSERT(fBaseDevice->isNoPixelsDevice());
+ static_cast<SkNoPixelsDevice*>(fBaseDevice.get())->resetForNextPicture(bounds);
+ fMCRec->reset(fBaseDevice.get());
+ fQuickRejectBounds = this->computeDeviceClipBounds();
+}
+
+void SkCanvas::init(sk_sp<SkBaseDevice> device) {
+ // SkCanvas.h declares internal storage for the hidden struct MCRec, and this
+ // assert ensure it's sufficient. <= is used because the struct has pointer fields, so the
+ // declared size is an upper bound across architectures. When the size is smaller, more stack
+ static_assert(sizeof(MCRec) <= kMCRecSize);
+
+ if (!device) {
+ device = sk_make_sp<SkNoPixelsDevice>(SkIRect::MakeEmpty(), fProps);
+ }
+
+ // From this point on, SkCanvas will always have a device
+ SkASSERT(device);
+
+ fSaveCount = 1;
+ fMCRec = new (fMCStack.push_back()) MCRec(device.get());
+
+ // The root device and the canvas should always have the same pixel geometry
+ SkASSERT(fProps.pixelGeometry() == device->surfaceProps().pixelGeometry());
+
+ fSurfaceBase = nullptr;
+ fBaseDevice = std::move(device);
+ fScratchGlyphRunBuilder = std::make_unique<sktext::GlyphRunBuilder>();
+ fQuickRejectBounds = this->computeDeviceClipBounds();
+}
+
+SkCanvas::SkCanvas() : fMCStack(sizeof(MCRec), fMCRecStorage, sizeof(fMCRecStorage)) {
+ this->init(nullptr);
+}
+
+SkCanvas::SkCanvas(int width, int height, const SkSurfaceProps* props)
+ : fMCStack(sizeof(MCRec), fMCRecStorage, sizeof(fMCRecStorage))
+ , fProps(SkSurfacePropsCopyOrDefault(props)) {
+ this->init(sk_make_sp<SkNoPixelsDevice>(
+ SkIRect::MakeWH(std::max(width, 0), std::max(height, 0)), fProps));
+}
+
+SkCanvas::SkCanvas(const SkIRect& bounds)
+ : fMCStack(sizeof(MCRec), fMCRecStorage, sizeof(fMCRecStorage)) {
+ SkIRect r = bounds.isEmpty() ? SkIRect::MakeEmpty() : bounds;
+ this->init(sk_make_sp<SkNoPixelsDevice>(r, fProps));
+}
+
+SkCanvas::SkCanvas(sk_sp<SkBaseDevice> device)
+ : fMCStack(sizeof(MCRec), fMCRecStorage, sizeof(fMCRecStorage))
+ , fProps(device->surfaceProps()) {
+ this->init(std::move(device));
+}
+
+SkCanvas::~SkCanvas() {
+ // Mark all pending layers to be discarded during restore (rather than drawn)
+ SkDeque::Iter iter(fMCStack, SkDeque::Iter::kFront_IterStart);
+ for (;;) {
+ MCRec* rec = (MCRec*)iter.next();
+ if (!rec) {
+ break;
+ }
+ if (rec->fLayer) {
+ rec->fLayer->fDiscard = true;
+ }
+ }
+
+ // free up the contents of our deque
+ this->restoreToCount(1); // restore everything but the last
+ this->internalRestore(); // restore the last, since we're going away
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkCanvas::flush() {
+ this->onFlush();
+}
+
+void SkCanvas::onFlush() {
+#if defined(SK_GANESH)
+ auto dContext = GrAsDirectContext(this->recordingContext());
+
+ if (dContext) {
+ dContext->flushAndSubmit();
+ }
+#endif
+}
+
+SkSurface* SkCanvas::getSurface() const {
+ return fSurfaceBase;
+}
+
+SkISize SkCanvas::getBaseLayerSize() const {
+ return this->baseDevice()->imageInfo().dimensions();
+}
+
+SkBaseDevice* SkCanvas::topDevice() const {
+ SkASSERT(fMCRec->fDevice);
+ return fMCRec->fDevice;
+}
+
+bool SkCanvas::readPixels(const SkPixmap& pm, int x, int y) {
+ return pm.addr() && this->baseDevice()->readPixels(pm, x, y);
+}
+
+bool SkCanvas::readPixels(const SkImageInfo& dstInfo, void* dstP, size_t rowBytes, int x, int y) {
+ return this->readPixels({ dstInfo, dstP, rowBytes}, x, y);
+}
+
+bool SkCanvas::readPixels(const SkBitmap& bm, int x, int y) {
+ SkPixmap pm;
+ return bm.peekPixels(&pm) && this->readPixels(pm, x, y);
+}
+
+bool SkCanvas::writePixels(const SkBitmap& bitmap, int x, int y) {
+ SkPixmap pm;
+ if (bitmap.peekPixels(&pm)) {
+ return this->writePixels(pm.info(), pm.addr(), pm.rowBytes(), x, y);
+ }
+ return false;
+}
+
+bool SkCanvas::writePixels(const SkImageInfo& srcInfo, const void* pixels, size_t rowBytes,
+ int x, int y) {
+ SkBaseDevice* device = this->baseDevice();
+
+ // This check gives us an early out and prevents generation ID churn on the surface.
+ // This is purely optional: it is a subset of the checks performed by SkWritePixelsRec.
+ SkIRect srcRect = SkIRect::MakeXYWH(x, y, srcInfo.width(), srcInfo.height());
+ if (!srcRect.intersect({0, 0, device->width(), device->height()})) {
+ return false;
+ }
+
+ // Tell our owning surface to bump its generation ID.
+ const bool completeOverwrite = srcRect.size() == device->imageInfo().dimensions();
+ if (!this->predrawNotify(completeOverwrite)) {
+ return false;
+ }
+
+ // This can still fail, most notably in the case of a invalid color type or alpha type
+ // conversion. We could pull those checks into this function and avoid the unnecessary
+ // generation ID bump. But then we would be performing those checks twice, since they
+ // are also necessary at the bitmap/pixmap entry points.
+ return device->writePixels({srcInfo, pixels, rowBytes}, x, y);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+void SkCanvas::checkForDeferredSave() {
+ if (fMCRec->fDeferredSaveCount > 0) {
+ this->doSave();
+ }
+}
+
+int SkCanvas::getSaveCount() const {
+#ifdef SK_DEBUG
+ int count = 0;
+ SkDeque::Iter iter(fMCStack, SkDeque::Iter::kFront_IterStart);
+ for (;;) {
+ const MCRec* rec = (const MCRec*)iter.next();
+ if (!rec) {
+ break;
+ }
+ count += 1 + rec->fDeferredSaveCount;
+ }
+ SkASSERT(count == fSaveCount);
+#endif
+ return fSaveCount;
+}
+
+int SkCanvas::save() {
+ fSaveCount += 1;
+ fMCRec->fDeferredSaveCount += 1;
+ return this->getSaveCount() - 1; // return our prev value
+}
+
+void SkCanvas::doSave() {
+ this->willSave();
+
+ SkASSERT(fMCRec->fDeferredSaveCount > 0);
+ fMCRec->fDeferredSaveCount -= 1;
+ this->internalSave();
+}
+
+void SkCanvas::restore() {
+ if (fMCRec->fDeferredSaveCount > 0) {
+ SkASSERT(fSaveCount > 1);
+ fSaveCount -= 1;
+ fMCRec->fDeferredSaveCount -= 1;
+ } else {
+ // check for underflow
+ if (fMCStack.count() > 1) {
+ this->willRestore();
+ SkASSERT(fSaveCount > 1);
+ fSaveCount -= 1;
+ this->internalRestore();
+ this->didRestore();
+ }
+ }
+}
+
+void SkCanvas::restoreToCount(int count) {
+ // safety check
+ if (count < 1) {
+ count = 1;
+ }
+
+ int n = this->getSaveCount() - count;
+ for (int i = 0; i < n; ++i) {
+ this->restore();
+ }
+}
+
+void SkCanvas::internalSave() {
+ fMCRec = new (fMCStack.push_back()) MCRec(fMCRec);
+
+ this->topDevice()->save();
+}
+
+int SkCanvas::saveLayer(const SkRect* bounds, const SkPaint* paint) {
+ return this->saveLayer(SaveLayerRec(bounds, paint, 0));
+}
+
+int SkCanvas::saveLayer(const SaveLayerRec& rec) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (rec.fPaint && rec.fPaint->nothingToDraw()) {
+ // no need for the layer (or any of the draws until the matching restore()
+ this->save();
+ this->clipRect({0,0,0,0});
+ } else {
+ SaveLayerStrategy strategy = this->getSaveLayerStrategy(rec);
+ fSaveCount += 1;
+ this->internalSaveLayer(rec, strategy);
+ }
+ return this->getSaveCount() - 1;
+}
+
+int SkCanvas::only_axis_aligned_saveBehind(const SkRect* bounds) {
+ if (bounds && !this->getLocalClipBounds().intersects(*bounds)) {
+ // Assuming clips never expand, if the request bounds is outside of the current clip
+ // there is no need to copy/restore the area, so just devolve back to a regular save.
+ this->save();
+ } else {
+ bool doTheWork = this->onDoSaveBehind(bounds);
+ fSaveCount += 1;
+ this->internalSave();
+ if (doTheWork) {
+ this->internalSaveBehind(bounds);
+ }
+ }
+ return this->getSaveCount() - 1;
+}
+
+// In our current design/features, we should never have a layer (src) in a different colorspace
+// than its parent (dst), so we assert that here. This is called out from other asserts, in case
+// we add some feature in the future to allow a given layer/imagefilter to operate in a specific
+// colorspace.
+static void check_drawdevice_colorspaces(SkColorSpace* src, SkColorSpace* dst) {
+ SkASSERT(src == dst);
+}
+
+// Helper function to compute the center reference point used for scale decomposition under
+// non-linear transformations.
+static skif::ParameterSpace<SkPoint> compute_decomposition_center(
+ const SkMatrix& dstToLocal,
+ const skif::ParameterSpace<SkRect>* contentBounds,
+ const skif::DeviceSpace<SkIRect>& targetOutput) {
+ // Will use the inverse and center of the device bounds if the content bounds aren't provided.
+ SkRect rect = contentBounds ? SkRect(*contentBounds) : SkRect::Make(SkIRect(targetOutput));
+ SkPoint center = {rect.centerX(), rect.centerY()};
+ if (!contentBounds) {
+ // Theoretically, the inverse transform could put center's homogeneous coord behind W = 0,
+ // but that case is handled automatically in Mapping::decomposeCTM later.
+ dstToLocal.mapPoints(&center, 1);
+ }
+
+ return skif::ParameterSpace<SkPoint>(center);
+}
+
+// Compute suitable transformations and layer bounds for a new layer that will be used as the source
+// input into 'filter' before being drawn into 'dst' via the returned skif::Mapping.
+// Null filters are permitted and act as the identity. The returned mapping will be compatible with
+// the image filter.
+//
+// Returns an empty rect if the layer wouldn't draw anything after filtering.
+static std::pair<skif::Mapping, skif::LayerSpace<SkIRect>> get_layer_mapping_and_bounds(
+ const SkImageFilter* filter,
+ const SkMatrix& localToDst,
+ const skif::DeviceSpace<SkIRect>& targetOutput,
+ const skif::ParameterSpace<SkRect>* contentBounds = nullptr,
+ bool mustCoverDst = true,
+ SkScalar scaleFactor = 1.0f) {
+ auto failedMapping = []() {
+ return std::make_pair<skif::Mapping, skif::LayerSpace<SkIRect>>(
+ {}, skif::LayerSpace<SkIRect>::Empty());
+ };
+
+ SkMatrix dstToLocal;
+ if (!localToDst.isFinite() ||
+ !localToDst.invert(&dstToLocal)) {
+ return failedMapping();
+ }
+
+ skif::ParameterSpace<SkPoint> center =
+ compute_decomposition_center(dstToLocal, contentBounds, targetOutput);
+ // *after* possibly getting a representative point from the provided content bounds, it might
+ // be necessary to discard the bounds for subsequent layer calculations.
+ if (mustCoverDst) {
+ contentBounds = nullptr;
+ }
+
+ // Determine initial mapping and a reasonable maximum dimension to prevent layer-to-device
+ // transforms with perspective and skew from triggering excessive buffer allocations.
+ skif::Mapping mapping;
+ if (!mapping.decomposeCTM(localToDst, filter, center)) {
+ return failedMapping();
+ }
+ // Push scale factor into layer matrix and device matrix (net no change, but the layer will have
+ // its resolution adjusted in comparison to the final device).
+ if (scaleFactor != 1.0f &&
+ !mapping.adjustLayerSpace(SkMatrix::Scale(scaleFactor, scaleFactor))) {
+ return failedMapping();
+ }
+
+ // Perspective and skew could exceed this since mapping.deviceToLayer(targetOutput) is
+ // theoretically unbounded under those conditions. Under a 45 degree rotation, a layer needs to
+ // be 2X larger per side of the prior device in order to fully cover it. We use the max of that
+ // and 2048 for a reasonable upper limit (this allows small layers under extreme transforms to
+ // use more relative resolution than a larger layer).
+ static const int kMinDimThreshold = 2048;
+ int maxLayerDim = std::max(Sk64_pin_to_s32(2 * std::max(SkIRect(targetOutput).width64(),
+ SkIRect(targetOutput).height64())),
+ kMinDimThreshold);
+
+ skif::LayerSpace<SkIRect> layerBounds;
+ if (filter) {
+ layerBounds = as_IFB(filter)->getInputBounds(mapping, targetOutput, contentBounds);
+ // When a filter is involved, the layer size may be larger than the default maxLayerDim due
+ // to required inputs for filters (e.g. a displacement map with a large radius).
+ if (layerBounds.width() > maxLayerDim || layerBounds.height() > maxLayerDim) {
+ skif::Mapping idealMapping{mapping.layerMatrix()};
+ auto idealLayerBounds = as_IFB(filter)->getInputBounds(idealMapping, targetOutput,
+ contentBounds);
+ maxLayerDim = std::max(std::max(idealLayerBounds.width(), idealLayerBounds.height()),
+ maxLayerDim);
+ }
+ } else {
+ layerBounds = mapping.deviceToLayer(targetOutput);
+ if (contentBounds) {
+ // For better or for worse, user bounds currently act as a hard clip on the layer's
+ // extent (i.e., they implement the CSS filter-effects 'filter region' feature).
+ skif::LayerSpace<SkIRect> knownBounds = mapping.paramToLayer(*contentBounds).roundOut();
+ if (!layerBounds.intersect(knownBounds)) {
+ return failedMapping();
+ }
+ }
+ }
+
+ if (layerBounds.width() > maxLayerDim || layerBounds.height() > maxLayerDim) {
+ skif::LayerSpace<SkIRect> newLayerBounds(
+ SkIRect::MakeWH(std::min(layerBounds.width(), maxLayerDim),
+ std::min(layerBounds.height(), maxLayerDim)));
+ SkMatrix adjust = SkMatrix::MakeRectToRect(SkRect::Make(SkIRect(layerBounds)),
+ SkRect::Make(SkIRect(newLayerBounds)),
+ SkMatrix::kFill_ScaleToFit);
+ if (!mapping.adjustLayerSpace(adjust)) {
+ return failedMapping();
+ } else {
+ layerBounds = newLayerBounds;
+ }
+ }
+
+ return {mapping, layerBounds};
+}
+
+// Ideally image filters operate in the dst color type, but if there is insufficient alpha bits
+// we move some bits from color channels into the alpha channel since that can greatly improve
+// the quality of blurs and other filters.
+static SkColorType image_filter_color_type(SkImageInfo dstInfo) {
+ if (dstInfo.bytesPerPixel() <= 4 &&
+ dstInfo.colorType() != kRGBA_8888_SkColorType &&
+ dstInfo.colorType() != kBGRA_8888_SkColorType) {
+ // "Upgrade" A8, G8, 565, 4444, 1010102, 101010x, and 888x to 8888
+ return kN32_SkColorType;
+ } else {
+ return dstInfo.colorType();
+ }
+}
+
+static bool draw_layer_as_sprite(const SkMatrix& matrix, const SkISize& size) {
+ // Assume anti-aliasing and highest valid filter mode (linear) for drawing layers and image
+ // filters. If the layer can be drawn as a sprite, these can be downgraded.
+ SkPaint paint;
+ paint.setAntiAlias(true);
+ SkSamplingOptions sampling{SkFilterMode::kLinear};
+ return SkTreatAsSprite(matrix, size, sampling, paint.isAntiAlias());
+}
+
+void SkCanvas::internalDrawDeviceWithFilter(SkBaseDevice* src,
+ SkBaseDevice* dst,
+ const SkImageFilter* filter,
+ const SkPaint& paint,
+ DeviceCompatibleWithFilter compat,
+ SkScalar scaleFactor) {
+ check_drawdevice_colorspaces(dst->imageInfo().colorSpace(),
+ src->imageInfo().colorSpace());
+ sk_sp<SkColorSpace> filterColorSpace = dst->imageInfo().refColorSpace(); // == src.refColorSpace
+
+ // 'filterColorType' ends up being the actual color type of the layer, so image filtering is
+ // effectively done in the layer's format. We get there in a roundabout way due to handling both
+ // regular and backdrop filters:
+ // - For regular filters, 'src' is the layer and 'dst' is the parent device. But the layer
+ // was constructed with a color type equal to image_filter_color_type(dst), so this matches
+ // the layer.
+ // - For backdrop filters, 'src' is the parent device and 'dst' is the layer, which was already
+ // constructed as image_filter_color_type(src). Calling image_filter_color_type twice does
+ // not change the color type, so it remains the color type of the layer.
+ const SkColorType filterColorType = image_filter_color_type(dst->imageInfo());
+
+ // 'filter' sees the src device's buffer as the implicit input image, and processes the image
+ // in this device space (referred to as the "layer" space). However, the filter
+ // parameters need to respect the current matrix, which is not necessarily the local matrix that
+ // was set on 'src' (e.g. because we've popped src off the stack already).
+ // TODO (michaelludwig): Stay in SkM44 once skif::Mapping supports SkM44 instead of SkMatrix.
+ SkMatrix localToSrc = (src->globalToDevice() * fMCRec->fMatrix).asM33();
+ SkISize srcDims = src->imageInfo().dimensions();
+
+ // Whether or not we need to make a transformed tmp image from 'src', and what that transform is
+ bool needsIntermediateImage = false;
+ SkMatrix srcToIntermediate;
+
+ skif::Mapping mapping;
+ skif::LayerSpace<SkIRect> requiredInput;
+ if (compat == DeviceCompatibleWithFilter::kYes) {
+ // Just use the relative transform from src to dst and the src's whole image, since
+ // internalSaveLayer should have already determined what was necessary. We explicitly
+ // construct the inverse (dst->src) to avoid the case where src's and dst's coord transforms
+ // were individually invertible by SkM44::invert() but their product is considered not
+ // invertible by SkMatrix::invert(). When this happens the matrices are already poorly
+ // conditioned so getRelativeTransform() gives us something reasonable.
+ SkASSERT(scaleFactor == 1.0f);
+ mapping = skif::Mapping(src->getRelativeTransform(*dst),
+ dst->getRelativeTransform(*src),
+ localToSrc);
+ requiredInput = skif::LayerSpace<SkIRect>(SkIRect::MakeSize(srcDims));
+ SkASSERT(!requiredInput.isEmpty());
+ } else {
+ // Compute the image filter mapping by decomposing the local->device matrix of dst and
+ // re-determining the required input.
+ std::tie(mapping, requiredInput) = get_layer_mapping_and_bounds(
+ filter, dst->localToDevice(), skif::DeviceSpace<SkIRect>(dst->devClipBounds()),
+ nullptr, true, SkTPin(scaleFactor, 0.f, 1.f));
+ if (requiredInput.isEmpty()) {
+ return;
+ }
+
+ // The above mapping transforms from local to dst's device space, where the layer space
+ // represents the intermediate buffer. Now we need to determine the transform from src to
+ // intermediate to prepare the input to the filter.
+ if (!localToSrc.invert(&srcToIntermediate)) {
+ return;
+ }
+ srcToIntermediate.postConcat(mapping.layerMatrix());
+ if (draw_layer_as_sprite(srcToIntermediate, srcDims)) {
+ // src differs from intermediate by just an integer translation, so it can be applied
+ // automatically when taking a subset of src if we update the mapping.
+ skif::LayerSpace<SkIPoint> srcOrigin({(int) srcToIntermediate.getTranslateX(),
+ (int) srcToIntermediate.getTranslateY()});
+ mapping.applyOrigin(srcOrigin);
+ requiredInput.offset(-srcOrigin);
+ } else {
+ // The contents of 'src' will be drawn to an intermediate buffer using srcToIntermediate
+ // and that buffer will be the input to the image filter.
+ needsIntermediateImage = true;
+ }
+ }
+
+ sk_sp<SkSpecialImage> filterInput;
+ if (!needsIntermediateImage) {
+ // The src device can be snapped directly
+ skif::LayerSpace<SkIRect> srcSubset(SkIRect::MakeSize(srcDims));
+ if (srcSubset.intersect(requiredInput)) {
+ filterInput = src->snapSpecial(SkIRect(srcSubset));
+
+ // TODO: For now image filter input images need to have a (0,0) origin. The required
+ // input's top left has been baked into srcSubset so we use that as the image origin.
+ mapping.applyOrigin(srcSubset.topLeft());
+ }
+ } else {
+ // We need to produce a temporary image that is equivalent to 'src' but transformed to
+ // a coordinate space compatible with the image filter
+ SkASSERT(compat == DeviceCompatibleWithFilter::kUnknown);
+ SkRect srcRect;
+ if (!SkMatrixPriv::InverseMapRect(srcToIntermediate, &srcRect,
+ SkRect::Make(SkIRect(requiredInput)))) {
+ return;
+ }
+
+ if (!srcRect.intersect(SkRect::Make(srcDims))) {
+ return;
+ }
+ SkIRect srcSubset = skif::RoundOut(srcRect);
+
+ if (srcToIntermediate.isScaleTranslate()) {
+ // The transform is from srcRect to requiredInput, but srcRect may have been reduced
+ // to the src dimensions, so map srcSubset back to the intermediate space to get the
+ // appropriate scaled dimensions for snapScaledSpecial.
+ skif::LayerSpace<SkIRect> requiredSubset(
+ skif::RoundOut(srcToIntermediate.mapRect(srcRect)));
+ filterInput = src->snapSpecialScaled(srcSubset,
+ {requiredSubset.width(), requiredSubset.height()});
+ if (filterInput) {
+ // TODO: Like the non-intermediate case, we need to apply the image origin
+ mapping.applyOrigin(requiredSubset.topLeft());
+ } // else fall through and apply transform using a draw
+ }
+
+ if (!filterInput) {
+ // Either a complex transform or the scaled copy failed so do a copy-as-draw fallback.
+ sk_sp<SkSpecialImage> srcImage = src->snapSpecial(srcSubset);
+ if (!srcImage) {
+ return;
+ }
+ // Make a new surface and draw 'srcImage' into it with the srcToIntermediate transform
+ // to produce the final input image for the filter
+ SkBaseDevice::CreateInfo info(SkImageInfo::Make(requiredInput.width(),
+ requiredInput.height(),
+ filterColorType,
+ kPremul_SkAlphaType,
+ filterColorSpace),
+ SkPixelGeometry::kUnknown_SkPixelGeometry,
+ SkBaseDevice::TileUsage::kNever_TileUsage,
+ fAllocator.get());
+ sk_sp<SkBaseDevice> intermediateDevice(src->onCreateDevice(info, &paint));
+ if (!intermediateDevice) {
+ return;
+ }
+ intermediateDevice->setOrigin(SkM44(srcToIntermediate),
+ requiredInput.left(), requiredInput.top());
+
+ // We use drawPaint to fill the entire device with the src input + clamp tiling, which
+ // extends the backdrop's edge pixels to the parts of 'requiredInput' that map offscreen
+ // Without this, the intermediateDevice would contain transparent pixels that may then
+ // infect blurs and other filters with large kernels.
+ SkPaint imageFill;
+ imageFill.setShader(srcImage->asShader(SkTileMode::kClamp,
+ SkSamplingOptions{SkFilterMode::kLinear},
+ SkMatrix::Translate(srcSubset.topLeft())));
+ intermediateDevice->drawPaint(imageFill);
+ filterInput = intermediateDevice->snapSpecial();
+
+ // TODO: Like the non-intermediate case, we need to apply the image origin.
+ mapping.applyOrigin(requiredInput.topLeft());
+ }
+ }
+
+ if (filterInput) {
+ const bool use_nn =
+ draw_layer_as_sprite(mapping.layerToDevice(), filterInput->subset().size());
+ SkSamplingOptions sampling{use_nn ? SkFilterMode::kNearest : SkFilterMode::kLinear};
+ if (filter) {
+ dst->drawFilteredImage(mapping, filterInput.get(), filterColorType, filter,
+ sampling, paint);
+ } else {
+ dst->drawSpecial(filterInput.get(), mapping.layerToDevice(), sampling, paint);
+ }
+ }
+}
+
+// This is similar to image_to_color_filter used by AutoLayerForImageFilter, but with key changes:
+// - image_to_color_filter requires the entire image filter DAG to be represented as a color filter
+// that does not affect transparent black (SkImageFilter::asAColorFilter)
+// - when that is met, the image filter's CF is composed around any CF that was on the draw's paint
+// since for a draw, the color filtering happens before any image filtering
+// - optimize_layer_filter only applies to the last node and does not care about transparent black
+// since a layer is being made regardless (SkImageFilter::isColorFilterNode)
+// - any extracted CF is composed inside the restore paint's CF because image filters are evaluated
+// before the color filter of a restore paint for layers.
+//
+// Assumes that 'filter', and thus its inputs, will remain owned by the caller. Modifies 'paint'
+// to have the updated color filter and returns the image filter to evaluate on restore.
+// TODO(michaelludwig): skbug.com/12083, once this guard goes away, the coversDevice arg can go away
+static const SkImageFilter* optimize_layer_filter(const SkImageFilter* filter, SkPaint* paint,
+ bool* coversDevice=nullptr) {
+ SkASSERT(paint);
+ SkColorFilter* cf;
+ if (filter && filter->isColorFilterNode(&cf)) {
+ sk_sp<SkColorFilter> inner(cf);
+ if (paint->getAlphaf() < 1.f) {
+ // The paint's alpha is applied after the image filter but before the paint's color
+ // filter. If there is transparency, we have to apply it between the two filters.
+ // FIXME: The Blend CF should allow composing directly at construction.
+ inner = SkColorFilters::Compose(
+ SkColorFilters::Blend(/*src*/paint->getColor4f(), nullptr, SkBlendMode::kDstIn),
+ /*dst*/std::move(inner));
+ paint->setAlphaf(1.f);
+ }
+
+ // Check if the once-wrapped color filter affects transparent black *before* we combine
+ // it with any original color filter on the paint.
+ if (coversDevice) {
+#if defined(SK_LEGACY_LAYER_BOUNDS_EXPANSION)
+ *coversDevice = as_CFB(inner)->affectsTransparentBlack();
+#else
+ *coversDevice = false;
+#endif
+ }
+
+ paint->setColorFilter(SkColorFilters::Compose(paint->refColorFilter(), std::move(inner)));
+ SkASSERT(filter->countInputs() == 1);
+ return filter->getInput(0);
+ } else {
+ if (coversDevice) {
+ *coversDevice = false;
+ }
+ return filter;
+ }
+}
+
+// If there is a backdrop filter, or if the restore paint has a color filter or blend mode that
+// affects transparent black, then the new layer must be sized such that it covers the entire device
+// clip bounds of the prior device (otherwise edges of the temporary layer would be visible).
+// See skbug.com/8783
+static bool must_cover_prior_device(const SkImageFilter* backdrop,
+ const SkPaint& restorePaint) {
+#if defined(SK_LEGACY_LAYER_BOUNDS_EXPANSION)
+ return SkToBool(backdrop);
+#else
+ const SkColorFilter* cf = restorePaint.getColorFilter();
+ if (backdrop || (cf && as_CFB(cf)->affectsTransparentBlack())) {
+ // Backdrop image filters always affect the entire (clip-limited) layer. A color filter
+ // affecting transparent black will colorize pixels that are outside the drawn bounds hint.
+ return true;
+ }
+ // A custom blender is assumed to modify transparent black; some fixed blend modes also modify
+ // transparent black and the whole layer must be used for the same reason as color filters.
+ if (auto blendMode = restorePaint.asBlendMode()) {
+ SkBlendModeCoeff src, dst;
+ if (SkBlendMode_AsCoeff(*blendMode, &src, &dst)) {
+ // If the source is (0,0,0,0), then dst is preserved as long as its coefficient
+ // evaluates to 1.0. This is true for kOne, kISA, and kISC. Anything else means the
+ // blend mode affects transparent black.
+ return dst != SkBlendModeCoeff::kOne &&
+ dst != SkBlendModeCoeff::kISA &&
+ dst != SkBlendModeCoeff::kISC;
+ } else {
+ // else an advanced blend mode, which preserve transparent black
+ return false;
+ }
+ } else {
+ // Blenders that aren't blend modes are assumed to modify transparent black.
+ return true;
+ }
+#endif
+}
+
+void SkCanvas::internalSaveLayer(const SaveLayerRec& rec, SaveLayerStrategy strategy) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ // Do this before we create the layer. We don't call the public save() since that would invoke a
+ // possibly overridden virtual.
+ this->internalSave();
+
+ if (this->isClipEmpty()) {
+ // Early out if the layer wouldn't draw anything
+ return;
+ }
+
+ // Build up the paint for restoring the layer, taking only the pieces of rec.fPaint that are
+ // relevant. Filtering is automatically chosen in internalDrawDeviceWithFilter based on the
+ // device's coordinate space.
+ SkPaint restorePaint(rec.fPaint ? *rec.fPaint : SkPaint());
+ restorePaint.setMaskFilter(nullptr); // mask filters are ignored for saved layers
+ restorePaint.setImageFilter(nullptr); // the image filter is held separately
+ // Smooth non-axis-aligned layer edges; this automatically downgrades to non-AA for aligned
+ // layer restores. This is done to match legacy behavior where the post-applied MatrixTransform
+ // bilerp also smoothed cropped edges. See skbug.com/11252
+ restorePaint.setAntiAlias(true);
+
+ bool optimizedCFAffectsTransparent;
+ const SkImageFilter* filter = optimize_layer_filter(
+ rec.fPaint ? rec.fPaint->getImageFilter() : nullptr, &restorePaint,
+ &optimizedCFAffectsTransparent);
+
+#if !defined(SK_LEGACY_LAYER_BOUNDS_EXPANSION)
+ SkASSERT(!optimizedCFAffectsTransparent); // shouldn't be needed by new code
+#endif
+
+ // Size the new layer relative to the prior device, which may already be aligned for filters.
+ SkBaseDevice* priorDevice = this->topDevice();
+ skif::Mapping newLayerMapping;
+ skif::LayerSpace<SkIRect> layerBounds;
+ std::tie(newLayerMapping, layerBounds) = get_layer_mapping_and_bounds(
+ filter, priorDevice->localToDevice(),
+ skif::DeviceSpace<SkIRect>(priorDevice->devClipBounds()),
+ skif::ParameterSpace<SkRect>::Optional(rec.fBounds),
+ must_cover_prior_device(rec.fBackdrop, restorePaint) || optimizedCFAffectsTransparent);
+
+ auto abortLayer = [this]() {
+ // The filtered content would not draw anything, or the new device space has an invalid
+ // coordinate system, in which case we mark the current top device as empty so that nothing
+ // draws until the canvas is restored past this saveLayer.
+ AutoUpdateQRBounds aqr(this);
+ this->topDevice()->clipRect(SkRect::MakeEmpty(), SkClipOp::kIntersect, /* aa */ false);
+ };
+
+ if (layerBounds.isEmpty()) {
+ abortLayer();
+ return;
+ }
+
+ sk_sp<SkBaseDevice> newDevice;
+ if (strategy == kFullLayer_SaveLayerStrategy) {
+ SkASSERT(!layerBounds.isEmpty());
+
+ SkColorType layerColorType = SkToBool(rec.fSaveLayerFlags & kF16ColorType)
+ ? kRGBA_F16_SkColorType
+ : image_filter_color_type(priorDevice->imageInfo());
+ SkImageInfo info = SkImageInfo::Make(layerBounds.width(), layerBounds.height(),
+ layerColorType, kPremul_SkAlphaType,
+ priorDevice->imageInfo().refColorSpace());
+
+ SkPixelGeometry geo = rec.fSaveLayerFlags & kPreserveLCDText_SaveLayerFlag
+ ? fProps.pixelGeometry()
+ : kUnknown_SkPixelGeometry;
+ const auto createInfo = SkBaseDevice::CreateInfo(info, geo, SkBaseDevice::kNever_TileUsage,
+ fAllocator.get());
+ // Use the original paint as a hint so that it includes the image filter
+ newDevice.reset(priorDevice->onCreateDevice(createInfo, rec.fPaint));
+ }
+
+ bool initBackdrop = (rec.fSaveLayerFlags & kInitWithPrevious_SaveLayerFlag) || rec.fBackdrop;
+ if (!newDevice) {
+ // Either we weren't meant to allocate a full layer, or the full layer creation failed.
+ // Using an explicit NoPixelsDevice lets us reflect what the layer state would have been
+ // on success (or kFull_LayerStrategy) while squashing draw calls that target something that
+ // doesn't exist.
+ newDevice = sk_make_sp<SkNoPixelsDevice>(SkIRect::MakeWH(layerBounds.width(),
+ layerBounds.height()),
+ fProps, this->imageInfo().refColorSpace());
+ initBackdrop = false;
+ }
+
+ // Configure device to match determined mapping for any image filters.
+ // The setDeviceCoordinateSystem applies the prior device's global transform since
+ // 'newLayerMapping' only defines the transforms between the two devices and it must be updated
+ // to the global coordinate system.
+ newDevice->setDeviceCoordinateSystem(
+ priorDevice->deviceToGlobal() * SkM44(newLayerMapping.layerToDevice()),
+ SkM44(newLayerMapping.deviceToLayer()) * priorDevice->globalToDevice(),
+ SkM44(newLayerMapping.layerMatrix()),
+ layerBounds.left(),
+ layerBounds.top());
+
+ if (initBackdrop) {
+ SkPaint backdropPaint;
+ const SkImageFilter* backdropFilter = optimize_layer_filter(rec.fBackdrop, &backdropPaint);
+ // The new device was constructed to be compatible with 'filter', not necessarily
+ // 'rec.fBackdrop', so allow DrawDeviceWithFilter to transform the prior device contents
+ // if necessary to evaluate the backdrop filter. If no filters are involved, then the
+ // devices differ by integer translations and are always compatible.
+ bool scaleBackdrop = rec.fExperimentalBackdropScale != 1.0f;
+ auto compat = (filter || backdropFilter || scaleBackdrop)
+ ? DeviceCompatibleWithFilter::kUnknown : DeviceCompatibleWithFilter::kYes;
+ this->internalDrawDeviceWithFilter(priorDevice, // src
+ newDevice.get(), // dst
+ backdropFilter,
+ backdropPaint,
+ compat,
+ rec.fExperimentalBackdropScale);
+ }
+
+ fMCRec->newLayer(std::move(newDevice), sk_ref_sp(filter), restorePaint);
+ fQuickRejectBounds = this->computeDeviceClipBounds();
+}
+
+int SkCanvas::saveLayerAlphaf(const SkRect* bounds, float alpha) {
+ if (alpha >= 1.0f) {
+ return this->saveLayer(bounds, nullptr);
+ } else {
+ SkPaint tmpPaint;
+ tmpPaint.setAlphaf(alpha);
+ return this->saveLayer(bounds, &tmpPaint);
+ }
+}
+
+void SkCanvas::internalSaveBehind(const SkRect* localBounds) {
+ SkBaseDevice* device = this->topDevice();
+
+ // Map the local bounds into the top device's coordinate space (this is not
+ // necessarily the full global CTM transform).
+ SkIRect devBounds;
+ if (localBounds) {
+ SkRect tmp;
+ device->localToDevice().mapRect(&tmp, *localBounds);
+ if (!devBounds.intersect(tmp.round(), device->devClipBounds())) {
+ devBounds.setEmpty();
+ }
+ } else {
+ devBounds = device->devClipBounds();
+ }
+ if (devBounds.isEmpty()) {
+ return;
+ }
+
+ // This is getting the special image from the current device, which is then drawn into (both by
+ // a client, and the drawClippedToSaveBehind below). Since this is not saving a layer, with its
+ // own device, we need to explicitly copy the back image contents so that its original content
+ // is available when we splat it back later during restore.
+ auto backImage = device->snapSpecial(devBounds, /* forceCopy= */ true);
+ if (!backImage) {
+ return;
+ }
+
+ // we really need the save, so we can wack the fMCRec
+ this->checkForDeferredSave();
+
+ fMCRec->fBackImage =
+ std::make_unique<BackImage>(BackImage{std::move(backImage), devBounds.topLeft()});
+
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kClear);
+ this->drawClippedToSaveBehind(paint);
+}
+
+void SkCanvas::internalRestore() {
+ SkASSERT(!fMCStack.empty());
+
+ // now detach these from fMCRec so we can pop(). Gets freed after its drawn
+ std::unique_ptr<Layer> layer = std::move(fMCRec->fLayer);
+ std::unique_ptr<BackImage> backImage = std::move(fMCRec->fBackImage);
+
+ // now do the normal restore()
+ fMCRec->~MCRec(); // balanced in save()
+ fMCStack.pop_back();
+ fMCRec = (MCRec*) fMCStack.back();
+
+ if (!fMCRec) {
+ // This was the last record, restored during the destruction of the SkCanvas
+ return;
+ }
+
+ this->topDevice()->restore(fMCRec->fMatrix);
+
+ if (backImage) {
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kDstOver);
+ this->topDevice()->drawSpecial(backImage->fImage.get(),
+ SkMatrix::Translate(backImage->fLoc),
+ SkSamplingOptions(),
+ paint);
+ }
+
+ // Draw the layer's device contents into the now-current older device. We can't call public
+ // draw functions since we don't want to record them.
+ if (layer && !layer->fDevice->isNoPixelsDevice() && !layer->fDiscard) {
+ layer->fDevice->setImmutable();
+
+ // Don't go through AutoLayerForImageFilter since device draws are so closely tied to
+ // internalSaveLayer and internalRestore.
+ if (this->predrawNotify()) {
+ SkBaseDevice* dstDev = this->topDevice();
+ if (layer->fImageFilter) {
+ this->internalDrawDeviceWithFilter(layer->fDevice.get(), // src
+ dstDev, // dst
+ layer->fImageFilter.get(),
+ layer->fPaint,
+ DeviceCompatibleWithFilter::kYes);
+ } else {
+ // NOTE: We don't just call internalDrawDeviceWithFilter with a null filter
+ // because we want to take advantage of overridden drawDevice functions for
+ // document-based devices.
+ SkSamplingOptions sampling;
+ dstDev->drawDevice(layer->fDevice.get(), sampling, layer->fPaint);
+ }
+ }
+ }
+
+ // Reset the clip restriction if the restore went past the save point that had added it.
+ if (this->getSaveCount() < fClipRestrictionSaveCount) {
+ fClipRestrictionRect.setEmpty();
+ fClipRestrictionSaveCount = -1;
+ }
+ // Update the quick-reject bounds in case the restore changed the top device or the
+ // removed save record had included modifications to the clip stack.
+ fQuickRejectBounds = this->computeDeviceClipBounds();
+ this->validateClip();
+}
+
+sk_sp<SkSurface> SkCanvas::makeSurface(const SkImageInfo& info, const SkSurfaceProps* props) {
+ if (nullptr == props) {
+ props = &fProps;
+ }
+ return this->onNewSurface(info, *props);
+}
+
+sk_sp<SkSurface> SkCanvas::onNewSurface(const SkImageInfo& info, const SkSurfaceProps& props) {
+ return this->baseDevice()->makeSurface(info, props);
+}
+
+SkImageInfo SkCanvas::imageInfo() const {
+ return this->onImageInfo();
+}
+
+SkImageInfo SkCanvas::onImageInfo() const {
+ return this->baseDevice()->imageInfo();
+}
+
+bool SkCanvas::getProps(SkSurfaceProps* props) const {
+ return this->onGetProps(props, /*top=*/false);
+}
+
+SkSurfaceProps SkCanvas::getBaseProps() const {
+ SkSurfaceProps props;
+ this->onGetProps(&props, /*top=*/false);
+ return props;
+}
+
+SkSurfaceProps SkCanvas::getTopProps() const {
+ SkSurfaceProps props;
+ this->onGetProps(&props, /*top=*/true);
+ return props;
+}
+
+bool SkCanvas::onGetProps(SkSurfaceProps* props, bool top) const {
+ if (props) {
+ *props = top ? topDevice()->surfaceProps() : fProps;
+ }
+ return true;
+}
+
+bool SkCanvas::peekPixels(SkPixmap* pmap) {
+ return this->onPeekPixels(pmap);
+}
+
+bool SkCanvas::onPeekPixels(SkPixmap* pmap) {
+ return this->baseDevice()->peekPixels(pmap);
+}
+
+void* SkCanvas::accessTopLayerPixels(SkImageInfo* info, size_t* rowBytes, SkIPoint* origin) {
+ SkPixmap pmap;
+ if (!this->onAccessTopLayerPixels(&pmap)) {
+ return nullptr;
+ }
+ if (info) {
+ *info = pmap.info();
+ }
+ if (rowBytes) {
+ *rowBytes = pmap.rowBytes();
+ }
+ if (origin) {
+ // If the caller requested the origin, they presumably are expecting the returned pixels to
+ // be axis-aligned with the root canvas. If the top level device isn't axis aligned, that's
+ // not the case. Until we update accessTopLayerPixels() to accept a coord space matrix
+ // instead of an origin, just don't expose the pixels in that case. Note that this means
+ // that layers with complex coordinate spaces can still report their pixels if the caller
+ // does not ask for the origin (e.g. just to dump its output to a file, etc).
+ if (this->topDevice()->isPixelAlignedToGlobal()) {
+ *origin = this->topDevice()->getOrigin();
+ } else {
+ return nullptr;
+ }
+ }
+ return pmap.writable_addr();
+}
+
+bool SkCanvas::onAccessTopLayerPixels(SkPixmap* pmap) {
+ return this->topDevice()->accessPixels(pmap);
+}
+
+/////////////////////////////////////////////////////////////////////////////
+
+void SkCanvas::translate(SkScalar dx, SkScalar dy) {
+ if (dx || dy) {
+ this->checkForDeferredSave();
+ fMCRec->fMatrix.preTranslate(dx, dy);
+
+ this->topDevice()->setGlobalCTM(fMCRec->fMatrix);
+
+ this->didTranslate(dx,dy);
+ }
+}
+
+void SkCanvas::scale(SkScalar sx, SkScalar sy) {
+ if (sx != 1 || sy != 1) {
+ this->checkForDeferredSave();
+ fMCRec->fMatrix.preScale(sx, sy);
+
+ this->topDevice()->setGlobalCTM(fMCRec->fMatrix);
+
+ this->didScale(sx, sy);
+ }
+}
+
+void SkCanvas::rotate(SkScalar degrees) {
+ SkMatrix m;
+ m.setRotate(degrees);
+ this->concat(m);
+}
+
+void SkCanvas::rotate(SkScalar degrees, SkScalar px, SkScalar py) {
+ SkMatrix m;
+ m.setRotate(degrees, px, py);
+ this->concat(m);
+}
+
+void SkCanvas::skew(SkScalar sx, SkScalar sy) {
+ SkMatrix m;
+ m.setSkew(sx, sy);
+ this->concat(m);
+}
+
+void SkCanvas::concat(const SkMatrix& matrix) {
+ if (matrix.isIdentity()) {
+ return;
+ }
+ this->concat(SkM44(matrix));
+}
+
+void SkCanvas::internalConcat44(const SkM44& m) {
+ this->checkForDeferredSave();
+
+ fMCRec->fMatrix.preConcat(m);
+
+ this->topDevice()->setGlobalCTM(fMCRec->fMatrix);
+}
+
+void SkCanvas::concat(const SkM44& m) {
+ this->internalConcat44(m);
+ // notify subclasses
+ this->didConcat44(m);
+}
+
+void SkCanvas::internalSetMatrix(const SkM44& m) {
+ fMCRec->fMatrix = m;
+
+ this->topDevice()->setGlobalCTM(fMCRec->fMatrix);
+}
+
+void SkCanvas::setMatrix(const SkMatrix& matrix) {
+ this->setMatrix(SkM44(matrix));
+}
+
+void SkCanvas::setMatrix(const SkM44& m) {
+ this->checkForDeferredSave();
+ this->internalSetMatrix(m);
+ this->didSetM44(m);
+}
+
+void SkCanvas::resetMatrix() {
+ this->setMatrix(SkM44());
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+void SkCanvas::clipRect(const SkRect& rect, SkClipOp op, bool doAA) {
+ if (!rect.isFinite()) {
+ return;
+ }
+ this->checkForDeferredSave();
+ ClipEdgeStyle edgeStyle = doAA ? kSoft_ClipEdgeStyle : kHard_ClipEdgeStyle;
+ this->onClipRect(rect.makeSorted(), op, edgeStyle);
+}
+
+void SkCanvas::onClipRect(const SkRect& rect, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ SkASSERT(rect.isSorted());
+ const bool isAA = kSoft_ClipEdgeStyle == edgeStyle;
+
+ AutoUpdateQRBounds aqr(this);
+ this->topDevice()->clipRect(rect, op, isAA);
+}
+
+void SkCanvas::androidFramework_setDeviceClipRestriction(const SkIRect& rect) {
+ // The device clip restriction is a surface-space rectangular intersection that cannot be
+ // drawn outside of. The rectangle is remembered so that subsequent resetClip calls still
+ // respect the restriction. Other than clip resetting, all clip operations restrict the set
+ // of renderable pixels, so once set, the restriction will be respected until the canvas
+ // save stack is restored past the point this function was invoked. Unfortunately, the current
+ // implementation relies on the clip stack of the underyling SkDevices, which leads to some
+ // awkward behavioral interactions (see skbug.com/12252).
+ //
+ // Namely, a canvas restore() could undo the clip restriction's rect, and if
+ // setDeviceClipRestriction were called at a nested save level, there's no way to undo just the
+ // prior restriction and re-apply the new one. It also only makes sense to apply to the base
+ // device; any other device for a saved layer will be clipped back to the base device during its
+ // matched restore. As such, we:
+ // - Remember the save count that added the clip restriction and reset the rect to empty when
+ // we've restored past that point to keep our state in sync with the device's clip stack.
+ // - We assert that we're on the base device when this is invoked.
+ // - We assert that setDeviceClipRestriction() is only called when there was no prior
+ // restriction (cannot re-restrict, and prior state must have been reset by restoring the
+ // canvas state).
+ // - Historically, the empty rect would reset the clip restriction but it only could do so
+ // partially since the device's clips wasn't adjusted. Resetting is now handled
+ // automatically via SkCanvas::restore(), so empty input rects are skipped.
+ SkASSERT(this->topDevice() == this->baseDevice()); // shouldn't be in a nested layer
+ // and shouldn't already have a restriction
+ SkASSERT(fClipRestrictionSaveCount < 0 && fClipRestrictionRect.isEmpty());
+
+ if (fClipRestrictionSaveCount < 0 && !rect.isEmpty()) {
+ fClipRestrictionRect = rect;
+ fClipRestrictionSaveCount = this->getSaveCount();
+
+ // A non-empty clip restriction immediately applies an intersection op (ignoring the ctm).
+ // so we have to resolve the save.
+ this->checkForDeferredSave();
+ AutoUpdateQRBounds aqr(this);
+ // Use clipRegion() since that operates in canvas-space, whereas clipRect() would apply the
+ // device's current transform first.
+ this->topDevice()->clipRegion(SkRegion(rect), SkClipOp::kIntersect);
+ }
+}
+
+void SkCanvas::internal_private_resetClip() {
+ this->checkForDeferredSave();
+ this->onResetClip();
+}
+
+void SkCanvas::onResetClip() {
+ SkIRect deviceRestriction = this->topDevice()->imageInfo().bounds();
+ if (fClipRestrictionSaveCount >= 0 && this->topDevice() == this->baseDevice()) {
+ // Respect the device clip restriction when resetting the clip if we're on the base device.
+ // If we're not on the base device, then the "reset" applies to the top device's clip stack,
+ // and the clip restriction will be respected automatically during a restore of the layer.
+ if (!deviceRestriction.intersect(fClipRestrictionRect)) {
+ deviceRestriction = SkIRect::MakeEmpty();
+ }
+ }
+
+ AutoUpdateQRBounds aqr(this);
+ this->topDevice()->replaceClip(deviceRestriction);
+}
+
+void SkCanvas::clipRRect(const SkRRect& rrect, SkClipOp op, bool doAA) {
+ this->checkForDeferredSave();
+ ClipEdgeStyle edgeStyle = doAA ? kSoft_ClipEdgeStyle : kHard_ClipEdgeStyle;
+ if (rrect.isRect()) {
+ this->onClipRect(rrect.getBounds(), op, edgeStyle);
+ } else {
+ this->onClipRRect(rrect, op, edgeStyle);
+ }
+}
+
+void SkCanvas::onClipRRect(const SkRRect& rrect, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ bool isAA = kSoft_ClipEdgeStyle == edgeStyle;
+
+ AutoUpdateQRBounds aqr(this);
+ this->topDevice()->clipRRect(rrect, op, isAA);
+}
+
+void SkCanvas::clipPath(const SkPath& path, SkClipOp op, bool doAA) {
+ this->checkForDeferredSave();
+ ClipEdgeStyle edgeStyle = doAA ? kSoft_ClipEdgeStyle : kHard_ClipEdgeStyle;
+
+ if (!path.isInverseFillType() && fMCRec->fMatrix.asM33().rectStaysRect()) {
+ SkRect r;
+ if (path.isRect(&r)) {
+ this->onClipRect(r, op, edgeStyle);
+ return;
+ }
+ SkRRect rrect;
+ if (path.isOval(&r)) {
+ rrect.setOval(r);
+ this->onClipRRect(rrect, op, edgeStyle);
+ return;
+ }
+ if (path.isRRect(&rrect)) {
+ this->onClipRRect(rrect, op, edgeStyle);
+ return;
+ }
+ }
+
+ this->onClipPath(path, op, edgeStyle);
+}
+
+void SkCanvas::onClipPath(const SkPath& path, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ bool isAA = kSoft_ClipEdgeStyle == edgeStyle;
+
+ AutoUpdateQRBounds aqr(this);
+ this->topDevice()->clipPath(path, op, isAA);
+}
+
+void SkCanvas::clipShader(sk_sp<SkShader> sh, SkClipOp op) {
+ if (sh) {
+ if (sh->isOpaque()) {
+ if (op == SkClipOp::kIntersect) {
+ // we don't occlude anything, so skip this call
+ } else {
+ SkASSERT(op == SkClipOp::kDifference);
+ // we occlude everything, so set the clip to empty
+ this->clipRect({0,0,0,0});
+ }
+ } else {
+ this->checkForDeferredSave();
+ this->onClipShader(std::move(sh), op);
+ }
+ }
+}
+
+void SkCanvas::onClipShader(sk_sp<SkShader> sh, SkClipOp op) {
+ AutoUpdateQRBounds aqr(this);
+ this->topDevice()->clipShader(sh, op);
+}
+
+void SkCanvas::clipRegion(const SkRegion& rgn, SkClipOp op) {
+ this->checkForDeferredSave();
+ this->onClipRegion(rgn, op);
+}
+
+void SkCanvas::onClipRegion(const SkRegion& rgn, SkClipOp op) {
+ AutoUpdateQRBounds aqr(this);
+ this->topDevice()->clipRegion(rgn, op);
+}
+
+void SkCanvas::validateClip() const {
+#ifdef SK_DEBUG
+#ifndef SK_DISABLE_SLOW_DEBUG_VALIDATION
+ SkRect tmp = this->computeDeviceClipBounds();
+ if (this->isClipEmpty()) {
+ SkASSERT(fQuickRejectBounds.isEmpty());
+ } else {
+ SkASSERT(tmp == fQuickRejectBounds);
+ }
+#endif
+#endif
+}
+
+bool SkCanvas::androidFramework_isClipAA() const {
+ return this->topDevice()->onClipIsAA();
+}
+
+void SkCanvas::temporary_internal_getRgnClip(SkRegion* rgn) {
+ rgn->setEmpty();
+ SkBaseDevice* device = this->topDevice();
+ if (device && device->isPixelAlignedToGlobal()) {
+ device->onAsRgnClip(rgn);
+ SkIPoint origin = device->getOrigin();
+ if (origin.x() | origin.y()) {
+ rgn->translate(origin.x(), origin.y());
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkCanvas::isClipEmpty() const {
+ return this->topDevice()->onGetClipType() == SkBaseDevice::ClipType::kEmpty;
+}
+
+bool SkCanvas::isClipRect() const {
+ return this->topDevice()->onGetClipType() == SkBaseDevice::ClipType::kRect;
+}
+
+bool SkCanvas::quickReject(const SkRect& src) const {
+#ifdef SK_DEBUG
+ // Verify that fQuickRejectBounds are set properly.
+ this->validateClip();
+#endif
+
+ SkRect devRect = SkMatrixPriv::MapRect(fMCRec->fMatrix, src);
+ return !devRect.isFinite() || !devRect.intersects(fQuickRejectBounds);
+}
+
+bool SkCanvas::quickReject(const SkPath& path) const {
+ return path.isEmpty() || this->quickReject(path.getBounds());
+}
+
+bool SkCanvas::internalQuickReject(const SkRect& bounds, const SkPaint& paint,
+ const SkMatrix* matrix) {
+ if (!bounds.isFinite() || paint.nothingToDraw()) {
+ return true;
+ }
+
+ if (paint.canComputeFastBounds()) {
+ SkRect tmp = matrix ? matrix->mapRect(bounds) : bounds;
+ return this->quickReject(paint.computeFastBounds(tmp, &tmp));
+ }
+
+ return false;
+}
+
+
+SkRect SkCanvas::getLocalClipBounds() const {
+ SkIRect ibounds = this->getDeviceClipBounds();
+ if (ibounds.isEmpty()) {
+ return SkRect::MakeEmpty();
+ }
+
+ SkMatrix inverse;
+ // if we can't invert the CTM, we can't return local clip bounds
+ if (!fMCRec->fMatrix.asM33().invert(&inverse)) {
+ return SkRect::MakeEmpty();
+ }
+
+ SkRect bounds;
+ // adjust it outwards in case we are antialiasing
+ const int margin = 1;
+
+ SkRect r = SkRect::Make(ibounds.makeOutset(margin, margin));
+ inverse.mapRect(&bounds, r);
+ return bounds;
+}
+
+SkIRect SkCanvas::getDeviceClipBounds() const {
+ return this->computeDeviceClipBounds(/*outsetForAA=*/false).roundOut();
+}
+
+SkRect SkCanvas::computeDeviceClipBounds(bool outsetForAA) const {
+ const SkBaseDevice* dev = this->topDevice();
+ if (dev->onGetClipType() == SkBaseDevice::ClipType::kEmpty) {
+ return SkRect::MakeEmpty();
+ } else {
+ SkRect devClipBounds =
+ SkMatrixPriv::MapRect(dev->deviceToGlobal(), SkRect::Make(dev->devClipBounds()));
+ if (outsetForAA) {
+ // Expand bounds out by 1 in case we are anti-aliasing. We store the
+ // bounds as floats to enable a faster quick reject implementation.
+ devClipBounds.outset(1.f, 1.f);
+ }
+ return devClipBounds;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////
+
+SkMatrix SkCanvas::getTotalMatrix() const {
+ return fMCRec->fMatrix.asM33();
+}
+
+SkM44 SkCanvas::getLocalToDevice() const {
+ return fMCRec->fMatrix;
+}
+
+#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) && defined(SK_GANESH)
+
+SkIRect SkCanvas::topLayerBounds() const {
+ return this->topDevice()->getGlobalBounds();
+}
+
+GrBackendRenderTarget SkCanvas::topLayerBackendRenderTarget() const {
+ auto proxy = SkCanvasPriv::TopDeviceTargetProxy(const_cast<SkCanvas*>(this));
+ if (!proxy) {
+ return {};
+ }
+ const GrRenderTarget* renderTarget = proxy->peekRenderTarget();
+ return renderTarget ? renderTarget->getBackendRenderTarget() : GrBackendRenderTarget();
+}
+#endif
+
+GrRecordingContext* SkCanvas::recordingContext() {
+#if defined(SK_GANESH)
+ if (auto gpuDevice = this->topDevice()->asGaneshDevice()) {
+ return gpuDevice->recordingContext();
+ }
+#endif
+
+ return nullptr;
+}
+
+skgpu::graphite::Recorder* SkCanvas::recorder() {
+#if defined(SK_GRAPHITE)
+ if (auto graphiteDevice = this->topDevice()->asGraphiteDevice()) {
+ return graphiteDevice->recorder();
+ }
+#endif
+
+ return nullptr;
+}
+
+
+void SkCanvas::drawDRRect(const SkRRect& outer, const SkRRect& inner,
+ const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (outer.isEmpty()) {
+ return;
+ }
+ if (inner.isEmpty()) {
+ this->drawRRect(outer, paint);
+ return;
+ }
+
+ // We don't have this method (yet), but technically this is what we should
+ // be able to return ...
+ // if (!outer.contains(inner))) {
+ //
+ // For now at least check for containment of bounds
+ if (!outer.getBounds().contains(inner.getBounds())) {
+ return;
+ }
+
+ this->onDrawDRRect(outer, inner, paint);
+}
+
+void SkCanvas::drawPaint(const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ this->onDrawPaint(paint);
+}
+
+void SkCanvas::drawRect(const SkRect& r, const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ // To avoid redundant logic in our culling code and various backends, we always sort rects
+ // before passing them along.
+ this->onDrawRect(r.makeSorted(), paint);
+}
+
+void SkCanvas::drawClippedToSaveBehind(const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ this->onDrawBehind(paint);
+}
+
+void SkCanvas::drawRegion(const SkRegion& region, const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (region.isEmpty()) {
+ return;
+ }
+
+ if (region.isRect()) {
+ return this->drawIRect(region.getBounds(), paint);
+ }
+
+ this->onDrawRegion(region, paint);
+}
+
+void SkCanvas::drawOval(const SkRect& r, const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ // To avoid redundant logic in our culling code and various backends, we always sort rects
+ // before passing them along.
+ this->onDrawOval(r.makeSorted(), paint);
+}
+
+void SkCanvas::drawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ this->onDrawRRect(rrect, paint);
+}
+
+void SkCanvas::drawPoints(PointMode mode, size_t count, const SkPoint pts[], const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ this->onDrawPoints(mode, count, pts, paint);
+}
+
+void SkCanvas::drawVertices(const sk_sp<SkVertices>& vertices, SkBlendMode mode,
+ const SkPaint& paint) {
+ this->drawVertices(vertices.get(), mode, paint);
+}
+
+void SkCanvas::drawVertices(const SkVertices* vertices, SkBlendMode mode, const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ RETURN_ON_NULL(vertices);
+
+ // We expect fans to be converted to triangles when building or deserializing SkVertices.
+ SkASSERT(vertices->priv().mode() != SkVertices::kTriangleFan_VertexMode);
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ // Preserve legacy behavior for Android: ignore the SkShader if there are no texCoords present
+ if (paint.getShader() && !vertices->priv().hasTexCoords()) {
+ SkPaint noShaderPaint(paint);
+ noShaderPaint.setShader(nullptr);
+ this->onDrawVerticesObject(vertices, mode, noShaderPaint);
+ return;
+ }
+#endif
+ this->onDrawVerticesObject(vertices, mode, paint);
+}
+
+#ifdef SK_ENABLE_SKSL
+void SkCanvas::drawMesh(const SkMesh& mesh, sk_sp<SkBlender> blender, const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ RETURN_ON_FALSE(mesh.isValid());
+ if (!blender) {
+ blender = SkBlender::Mode(SkBlendMode::kModulate);
+ }
+ this->onDrawMesh(mesh, std::move(blender), paint);
+}
+#endif
+
+void SkCanvas::drawPath(const SkPath& path, const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ this->onDrawPath(path, paint);
+}
+
+// Returns true if the rect can be "filled" : non-empty and finite
+static bool fillable(const SkRect& r) {
+ SkScalar w = r.width();
+ SkScalar h = r.height();
+ return SkScalarIsFinite(w) && w > 0 && SkScalarIsFinite(h) && h > 0;
+}
+
+static SkPaint clean_paint_for_lattice(const SkPaint* paint) {
+ SkPaint cleaned;
+ if (paint) {
+ cleaned = *paint;
+ cleaned.setMaskFilter(nullptr);
+ cleaned.setAntiAlias(false);
+ }
+ return cleaned;
+}
+
+void SkCanvas::drawImageNine(const SkImage* image, const SkIRect& center, const SkRect& dst,
+ SkFilterMode filter, const SkPaint* paint) {
+ RETURN_ON_NULL(image);
+
+ const int xdivs[] = {center.fLeft, center.fRight};
+ const int ydivs[] = {center.fTop, center.fBottom};
+
+ Lattice lat;
+ lat.fXDivs = xdivs;
+ lat.fYDivs = ydivs;
+ lat.fRectTypes = nullptr;
+ lat.fXCount = lat.fYCount = 2;
+ lat.fBounds = nullptr;
+ lat.fColors = nullptr;
+ this->drawImageLattice(image, lat, dst, filter, paint);
+}
+
+void SkCanvas::drawImageLattice(const SkImage* image, const Lattice& lattice, const SkRect& dst,
+ SkFilterMode filter, const SkPaint* paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ RETURN_ON_NULL(image);
+ if (dst.isEmpty()) {
+ return;
+ }
+
+ SkIRect bounds;
+ Lattice latticePlusBounds = lattice;
+ if (!latticePlusBounds.fBounds) {
+ bounds = SkIRect::MakeWH(image->width(), image->height());
+ latticePlusBounds.fBounds = &bounds;
+ }
+
+ SkPaint latticePaint = clean_paint_for_lattice(paint);
+ if (SkLatticeIter::Valid(image->width(), image->height(), latticePlusBounds)) {
+ this->onDrawImageLattice2(image, latticePlusBounds, dst, filter, &latticePaint);
+ } else {
+ this->drawImageRect(image, SkRect::MakeIWH(image->width(), image->height()), dst,
+ SkSamplingOptions(filter), &latticePaint, kStrict_SrcRectConstraint);
+ }
+}
+
+void SkCanvas::drawAtlas(const SkImage* atlas, const SkRSXform xform[], const SkRect tex[],
+ const SkColor colors[], int count, SkBlendMode mode,
+ const SkSamplingOptions& sampling, const SkRect* cull,
+ const SkPaint* paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ RETURN_ON_NULL(atlas);
+ if (count <= 0) {
+ return;
+ }
+ SkASSERT(atlas);
+ SkASSERT(tex);
+ this->onDrawAtlas2(atlas, xform, tex, colors, count, mode, sampling, cull, paint);
+}
+
+void SkCanvas::drawAnnotation(const SkRect& rect, const char key[], SkData* value) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (key) {
+ this->onDrawAnnotation(rect, key, value);
+ }
+}
+
+void SkCanvas::private_draw_shadow_rec(const SkPath& path, const SkDrawShadowRec& rec) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ this->onDrawShadowRec(path, rec);
+}
+
+void SkCanvas::onDrawShadowRec(const SkPath& path, const SkDrawShadowRec& rec) {
+ // We don't test quickReject because the shadow outsets the path's bounds.
+ // TODO(michaelludwig): Is it worth calling SkDrawShadowMetrics::GetLocalBounds here?
+ if (!this->predrawNotify()) {
+ return;
+ }
+ this->topDevice()->drawShadow(path, rec);
+}
+
+void SkCanvas::experimental_DrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4],
+ QuadAAFlags aaFlags, const SkColor4f& color,
+ SkBlendMode mode) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ // Make sure the rect is sorted before passing it along
+ this->onDrawEdgeAAQuad(rect.makeSorted(), clip, aaFlags, color, mode);
+}
+
+void SkCanvas::experimental_DrawEdgeAAImageSet(const ImageSetEntry imageSet[], int cnt,
+ const SkPoint dstClips[],
+ const SkMatrix preViewMatrices[],
+ const SkSamplingOptions& sampling,
+ const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ this->onDrawEdgeAAImageSet2(imageSet, cnt, dstClips, preViewMatrices, sampling, paint,
+ constraint);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// These are the virtual drawing methods
+//////////////////////////////////////////////////////////////////////////////
+
+void SkCanvas::onDiscard() {
+ if (fSurfaceBase) {
+ sk_ignore_unused_variable(fSurfaceBase->aboutToDraw(SkSurface::kDiscard_ContentChangeMode));
+ }
+}
+
+void SkCanvas::onDrawPaint(const SkPaint& paint) {
+ this->internalDrawPaint(paint);
+}
+
+void SkCanvas::internalDrawPaint(const SkPaint& paint) {
+ // drawPaint does not call internalQuickReject() because computing its geometry is not free
+ // (see getLocalClipBounds(), and the two conditions below are sufficient.
+ if (paint.nothingToDraw() || this->isClipEmpty()) {
+ return;
+ }
+
+ auto layer = this->aboutToDraw(this, paint, nullptr, CheckForOverwrite::kYes);
+ if (layer) {
+ this->topDevice()->drawPaint(layer->paint());
+ }
+}
+
+void SkCanvas::onDrawPoints(PointMode mode, size_t count, const SkPoint pts[],
+ const SkPaint& paint) {
+ if ((long)count <= 0 || paint.nothingToDraw()) {
+ return;
+ }
+ SkASSERT(pts != nullptr);
+
+ SkRect bounds;
+ // Compute bounds from points (common for drawing a single line)
+ if (count == 2) {
+ bounds.set(pts[0], pts[1]);
+ } else {
+ bounds.setBounds(pts, SkToInt(count));
+ }
+
+ // Enforce paint style matches implicit behavior of drawPoints
+ SkPaint strokePaint = paint;
+ strokePaint.setStyle(SkPaint::kStroke_Style);
+ if (this->internalQuickReject(bounds, strokePaint)) {
+ return;
+ }
+
+ auto layer = this->aboutToDraw(this, strokePaint, &bounds);
+ if (layer) {
+ this->topDevice()->drawPoints(mode, count, pts, layer->paint());
+ }
+}
+
+void SkCanvas::onDrawRect(const SkRect& r, const SkPaint& paint) {
+ SkASSERT(r.isSorted());
+ if (this->internalQuickReject(r, paint)) {
+ return;
+ }
+
+ auto layer = this->aboutToDraw(this, paint, &r, CheckForOverwrite::kYes);
+ if (layer) {
+ this->topDevice()->drawRect(r, layer->paint());
+ }
+}
+
+void SkCanvas::onDrawRegion(const SkRegion& region, const SkPaint& paint) {
+ const SkRect bounds = SkRect::Make(region.getBounds());
+ if (this->internalQuickReject(bounds, paint)) {
+ return;
+ }
+
+ auto layer = this->aboutToDraw(this, paint, &bounds);
+ if (layer) {
+ this->topDevice()->drawRegion(region, layer->paint());
+ }
+}
+
+void SkCanvas::onDrawBehind(const SkPaint& paint) {
+ SkBaseDevice* dev = this->topDevice();
+ if (!dev) {
+ return;
+ }
+
+ SkIRect bounds;
+ SkDeque::Iter iter(fMCStack, SkDeque::Iter::kBack_IterStart);
+ for (;;) {
+ const MCRec* rec = (const MCRec*)iter.prev();
+ if (!rec) {
+ return; // no backimages, so nothing to draw
+ }
+ if (rec->fBackImage) {
+ // drawBehind should only have been called when the saveBehind record is active;
+ // if this fails, it means a real saveLayer was made w/o being restored first.
+ SkASSERT(dev == rec->fDevice);
+ bounds = SkIRect::MakeXYWH(rec->fBackImage->fLoc.fX, rec->fBackImage->fLoc.fY,
+ rec->fBackImage->fImage->width(),
+ rec->fBackImage->fImage->height());
+ break;
+ }
+ }
+
+ // The backimage location (and thus bounds) were defined in the device's space, so mark it
+ // as a clip. We use a clip instead of just drawing a rect in case the paint has an image
+ // filter on it (which is applied before any auto-layer so the filter is clipped).
+ dev->save();
+ {
+ // We also have to temporarily whack the device matrix since clipRegion is affected by the
+ // global-to-device matrix and clipRect is affected by the local-to-device.
+ SkAutoDeviceTransformRestore adtr(dev, SkMatrix::I());
+ dev->clipRect(SkRect::Make(bounds), SkClipOp::kIntersect, /* aa */ false);
+ // ~adtr will reset the local-to-device matrix so that drawPaint() shades correctly.
+ }
+
+ auto layer = this->aboutToDraw(this, paint);
+ if (layer) {
+ this->topDevice()->drawPaint(layer->paint());
+ }
+
+ dev->restore(fMCRec->fMatrix);
+}
+
+void SkCanvas::onDrawOval(const SkRect& oval, const SkPaint& paint) {
+ SkASSERT(oval.isSorted());
+ if (this->internalQuickReject(oval, paint)) {
+ return;
+ }
+
+ auto layer = this->aboutToDraw(this, paint, &oval);
+ if (layer) {
+ this->topDevice()->drawOval(oval, layer->paint());
+ }
+}
+
+void SkCanvas::onDrawArc(const SkRect& oval, SkScalar startAngle,
+ SkScalar sweepAngle, bool useCenter,
+ const SkPaint& paint) {
+ SkASSERT(oval.isSorted());
+ if (this->internalQuickReject(oval, paint)) {
+ return;
+ }
+
+ auto layer = this->aboutToDraw(this, paint, &oval);
+ if (layer) {
+ this->topDevice()->drawArc(oval, startAngle, sweepAngle, useCenter, layer->paint());
+ }
+}
+
+void SkCanvas::onDrawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ const SkRect& bounds = rrect.getBounds();
+
+ // Delegating to simpler draw operations
+ if (rrect.isRect()) {
+ // call the non-virtual version
+ this->SkCanvas::drawRect(bounds, paint);
+ return;
+ } else if (rrect.isOval()) {
+ // call the non-virtual version
+ this->SkCanvas::drawOval(bounds, paint);
+ return;
+ }
+
+ if (this->internalQuickReject(bounds, paint)) {
+ return;
+ }
+
+ auto layer = this->aboutToDraw(this, paint, &bounds);
+ if (layer) {
+ this->topDevice()->drawRRect(rrect, layer->paint());
+ }
+}
+
+void SkCanvas::onDrawDRRect(const SkRRect& outer, const SkRRect& inner, const SkPaint& paint) {
+ const SkRect& bounds = outer.getBounds();
+ if (this->internalQuickReject(bounds, paint)) {
+ return;
+ }
+
+ auto layer = this->aboutToDraw(this, paint, &bounds);
+ if (layer) {
+ this->topDevice()->drawDRRect(outer, inner, layer->paint());
+ }
+}
+
+void SkCanvas::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ if (!path.isFinite()) {
+ return;
+ }
+
+ const SkRect& pathBounds = path.getBounds();
+ if (!path.isInverseFillType() && this->internalQuickReject(pathBounds, paint)) {
+ return;
+ }
+ if (path.isInverseFillType() && pathBounds.width() <= 0 && pathBounds.height() <= 0) {
+ this->internalDrawPaint(paint);
+ return;
+ }
+
+ auto layer = this->aboutToDraw(this, paint, path.isInverseFillType() ? nullptr : &pathBounds);
+ if (layer) {
+ this->topDevice()->drawPath(path, layer->paint());
+ }
+}
+
+bool SkCanvas::canDrawBitmapAsSprite(SkScalar x, SkScalar y, int w, int h,
+ const SkSamplingOptions& sampling, const SkPaint& paint) {
+ if (!paint.getImageFilter()) {
+ return false;
+ }
+
+ const SkMatrix& ctm = this->getTotalMatrix();
+ if (!SkTreatAsSprite(ctm, SkISize::Make(w, h), sampling, paint.isAntiAlias())) {
+ return false;
+ }
+
+ // The other paint effects need to be applied before the image filter, but the sprite draw
+ // applies the filter explicitly first.
+ if (paint.getAlphaf() < 1.f || paint.getColorFilter() || paint.getMaskFilter()) {
+ return false;
+ }
+ // Currently we can only use the filterSprite code if we are clipped to the bitmap's bounds.
+ // Once we can filter and the filter will return a result larger than itself, we should be
+ // able to remove this constraint.
+ // skbug.com/4526
+ //
+ SkPoint pt;
+ ctm.mapXY(x, y, &pt);
+ SkIRect ir = SkIRect::MakeXYWH(SkScalarRoundToInt(pt.x()), SkScalarRoundToInt(pt.y()), w, h);
+ // quick bounds have been outset by 1px compared to overall device bounds, so this makes the
+ // contains check equivalent to between ir and device bounds
+ ir.outset(1, 1);
+ return ir.contains(fQuickRejectBounds);
+}
+
+// Clean-up the paint to match the drawing semantics for drawImage et al. (skbug.com/7804).
+static SkPaint clean_paint_for_drawImage(const SkPaint* paint) {
+ SkPaint cleaned;
+ if (paint) {
+ cleaned = *paint;
+ cleaned.setStyle(SkPaint::kFill_Style);
+ cleaned.setPathEffect(nullptr);
+ }
+ return cleaned;
+}
+
+// drawVertices fills triangles and ignores mask filter and path effect,
+// so canonicalize the paint before checking quick reject.
+static SkPaint clean_paint_for_drawVertices(SkPaint paint) {
+ paint.setStyle(SkPaint::kFill_Style);
+ paint.setMaskFilter(nullptr);
+ paint.setPathEffect(nullptr);
+ return paint;
+}
+
+void SkCanvas::onDrawImage2(const SkImage* image, SkScalar x, SkScalar y,
+ const SkSamplingOptions& sampling, const SkPaint* paint) {
+ SkPaint realPaint = clean_paint_for_drawImage(paint);
+
+ SkRect bounds = SkRect::MakeXYWH(x, y, image->width(), image->height());
+ if (this->internalQuickReject(bounds, realPaint)) {
+ return;
+ }
+
+ if (realPaint.getImageFilter() &&
+ this->canDrawBitmapAsSprite(x, y, image->width(), image->height(), sampling, realPaint) &&
+ !image_to_color_filter(&realPaint)) {
+ // Evaluate the image filter directly on the input image and then draw the result, instead
+ // of first drawing the image to a temporary layer and filtering.
+ SkBaseDevice* device = this->topDevice();
+ sk_sp<SkSpecialImage> special;
+ if ((special = device->makeSpecial(image))) {
+ sk_sp<SkImageFilter> filter = realPaint.refImageFilter();
+ realPaint.setImageFilter(nullptr);
+
+ // TODO(michaelludwig) - Many filters could probably be evaluated like this even if the
+ // CTM is not translate-only; the post-transformation of the filtered image by the CTM
+ // will probably look just as good and not require an extra layer.
+ // TODO(michaelludwig) - Once image filter implementations can support source images
+ // with non-(0,0) origins, we can just mark the origin as (x,y) instead of doing a
+ // pre-concat here.
+ SkMatrix layerToDevice = device->localToDevice();
+ layerToDevice.preTranslate(x, y);
+
+ SkMatrix deviceToLayer;
+ if (!layerToDevice.invert(&deviceToLayer)) {
+ return; // bad ctm, draw nothing
+ }
+
+ skif::Mapping mapping(layerToDevice, deviceToLayer, SkMatrix::Translate(-x, -y));
+
+ if (this->predrawNotify()) {
+ // While we are skipping an initial layer, evaluate the rest of the image filter
+ // pipeline in the same color format as we would have if there was a layer.
+ const auto filterColorType = image_filter_color_type(device->imageInfo());
+ device->drawFilteredImage(mapping, special.get(), filterColorType, filter.get(),
+ sampling,realPaint);
+ }
+ return;
+ } // else fall through to regular drawing path
+ }
+
+ auto layer = this->aboutToDraw(this, realPaint, &bounds);
+ if (layer) {
+ this->topDevice()->drawImageRect(image, nullptr, bounds, sampling,
+ layer->paint(), kFast_SrcRectConstraint);
+ }
+}
+
+static SkSamplingOptions clean_sampling_for_constraint(
+ const SkSamplingOptions& sampling,
+ SkCanvas::SrcRectConstraint constraint) {
+ if (constraint == SkCanvas::kStrict_SrcRectConstraint) {
+ if (sampling.mipmap != SkMipmapMode::kNone) {
+ return SkSamplingOptions(sampling.filter);
+ }
+ if (sampling.isAniso()) {
+ return SkSamplingOptions(SkFilterMode::kLinear);
+ }
+ }
+ return sampling;
+}
+
+void SkCanvas::onDrawImageRect2(const SkImage* image, const SkRect& src, const SkRect& dst,
+ const SkSamplingOptions& sampling, const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ SkPaint realPaint = clean_paint_for_drawImage(paint);
+ SkSamplingOptions realSampling = clean_sampling_for_constraint(sampling, constraint);
+
+ if (this->internalQuickReject(dst, realPaint)) {
+ return;
+ }
+
+ auto layer = this->aboutToDraw(this, realPaint, &dst, CheckForOverwrite::kYes,
+ image->isOpaque() ? kOpaque_ShaderOverrideOpacity
+ : kNotOpaque_ShaderOverrideOpacity);
+ if (layer) {
+ this->topDevice()->drawImageRect(image, &src, dst, realSampling, layer->paint(), constraint);
+ }
+}
+
+void SkCanvas::onDrawImageLattice2(const SkImage* image, const Lattice& lattice, const SkRect& dst,
+ SkFilterMode filter, const SkPaint* paint) {
+ SkPaint realPaint = clean_paint_for_drawImage(paint);
+
+ if (this->internalQuickReject(dst, realPaint)) {
+ return;
+ }
+
+ auto layer = this->aboutToDraw(this, realPaint, &dst);
+ if (layer) {
+ this->topDevice()->drawImageLattice(image, lattice, dst, filter, layer->paint());
+ }
+}
+
+void SkCanvas::drawImage(const SkImage* image, SkScalar x, SkScalar y,
+ const SkSamplingOptions& sampling, const SkPaint* paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ RETURN_ON_NULL(image);
+ this->onDrawImage2(image, x, y, sampling, paint);
+}
+
+void SkCanvas::drawImageRect(const SkImage* image, const SkRect& src, const SkRect& dst,
+ const SkSamplingOptions& sampling, const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ RETURN_ON_NULL(image);
+ if (!fillable(dst) || !fillable(src)) {
+ return;
+ }
+ this->onDrawImageRect2(image, src, dst, sampling, paint, constraint);
+}
+
+void SkCanvas::drawImageRect(const SkImage* image, const SkRect& dst,
+ const SkSamplingOptions& sampling, const SkPaint* paint) {
+ RETURN_ON_NULL(image);
+ this->drawImageRect(image, SkRect::MakeIWH(image->width(), image->height()), dst, sampling,
+ paint, kFast_SrcRectConstraint);
+}
+
+void SkCanvas::onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ auto glyphRunList = fScratchGlyphRunBuilder->blobToGlyphRunList(*blob, {x, y});
+ this->onDrawGlyphRunList(glyphRunList, paint);
+}
+
+void SkCanvas::onDrawGlyphRunList(const sktext::GlyphRunList& glyphRunList, const SkPaint& paint) {
+ SkRect bounds = glyphRunList.sourceBoundsWithOrigin();
+ if (this->internalQuickReject(bounds, paint)) {
+ return;
+ }
+ auto layer = this->aboutToDraw(this, paint, &bounds);
+ if (layer) {
+ this->topDevice()->drawGlyphRunList(this, glyphRunList, paint, layer->paint());
+ }
+}
+
+#if (defined(SK_GANESH) || defined(SK_GRAPHITE))
+sk_sp<Slug> SkCanvas::convertBlobToSlug(
+ const SkTextBlob& blob, SkPoint origin, const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ auto glyphRunList = fScratchGlyphRunBuilder->blobToGlyphRunList(blob, origin);
+ return this->onConvertGlyphRunListToSlug(glyphRunList, paint);
+}
+
+sk_sp<Slug>
+SkCanvas::onConvertGlyphRunListToSlug(
+ const sktext::GlyphRunList& glyphRunList, const SkPaint& paint) {
+ SkRect bounds = glyphRunList.sourceBoundsWithOrigin();
+ if (bounds.isEmpty() || !bounds.isFinite() || paint.nothingToDraw()) {
+ return nullptr;
+ }
+ auto layer = this->aboutToDraw(this, paint, &bounds);
+ if (layer) {
+ return this->topDevice()->convertGlyphRunListToSlug(glyphRunList, paint, layer->paint());
+ }
+ return nullptr;
+}
+
+void SkCanvas::drawSlug(const Slug* slug) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (slug) {
+ this->onDrawSlug(slug);
+ }
+}
+
+void SkCanvas::onDrawSlug(const Slug* slug) {
+ SkRect bounds = slug->sourceBoundsWithOrigin();
+ if (this->internalQuickReject(bounds, slug->initialPaint())) {
+ return;
+ }
+
+ auto layer = this->aboutToDraw(this, slug->initialPaint(), &bounds);
+ if (layer) {
+ this->topDevice()->drawSlug(this, slug, layer->paint());
+ }
+}
+#endif
+
+// These call the (virtual) onDraw... method
+void SkCanvas::drawSimpleText(const void* text, size_t byteLength, SkTextEncoding encoding,
+ SkScalar x, SkScalar y, const SkFont& font, const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (byteLength) {
+ sk_msan_assert_initialized(text, SkTAddOffset<const void>(text, byteLength));
+ const sktext::GlyphRunList& glyphRunList =
+ fScratchGlyphRunBuilder->textToGlyphRunList(
+ font, paint, text, byteLength, {x, y}, encoding);
+ if (!glyphRunList.empty()) {
+ this->onDrawGlyphRunList(glyphRunList, paint);
+ }
+ }
+}
+
+void SkCanvas::drawGlyphs(int count, const SkGlyphID* glyphs, const SkPoint* positions,
+ const uint32_t* clusters, int textByteCount, const char* utf8text,
+ SkPoint origin, const SkFont& font, const SkPaint& paint) {
+ if (count <= 0) { return; }
+
+ sktext::GlyphRun glyphRun {
+ font,
+ SkSpan(positions, count),
+ SkSpan(glyphs, count),
+ SkSpan(utf8text, textByteCount),
+ SkSpan(clusters, count),
+ SkSpan<SkVector>()
+ };
+
+ sktext::GlyphRunList glyphRunList = fScratchGlyphRunBuilder->makeGlyphRunList(
+ glyphRun, paint, origin);
+ this->onDrawGlyphRunList(glyphRunList, paint);
+}
+
+void SkCanvas::drawGlyphs(int count, const SkGlyphID glyphs[], const SkPoint positions[],
+ SkPoint origin, const SkFont& font, const SkPaint& paint) {
+ if (count <= 0) { return; }
+
+ sktext::GlyphRun glyphRun {
+ font,
+ SkSpan(positions, count),
+ SkSpan(glyphs, count),
+ SkSpan<const char>(),
+ SkSpan<const uint32_t>(),
+ SkSpan<SkVector>()
+ };
+
+ sktext::GlyphRunList glyphRunList = fScratchGlyphRunBuilder->makeGlyphRunList(
+ glyphRun, paint, origin);
+ this->onDrawGlyphRunList(glyphRunList, paint);
+}
+
+void SkCanvas::drawGlyphs(int count, const SkGlyphID glyphs[], const SkRSXform xforms[],
+ SkPoint origin, const SkFont& font, const SkPaint& paint) {
+ if (count <= 0) { return; }
+
+ auto [positions, rotateScales] =
+ fScratchGlyphRunBuilder->convertRSXForm(SkSpan(xforms, count));
+
+ sktext::GlyphRun glyphRun {
+ font,
+ positions,
+ SkSpan(glyphs, count),
+ SkSpan<const char>(),
+ SkSpan<const uint32_t>(),
+ rotateScales
+ };
+ sktext::GlyphRunList glyphRunList = fScratchGlyphRunBuilder->makeGlyphRunList(
+ glyphRun, paint, origin);
+ this->onDrawGlyphRunList(glyphRunList, paint);
+}
+
+#if defined(SK_GANESH) && GR_TEST_UTILS
+bool gSkBlobAsSlugTesting = false;
+#endif
+
+void SkCanvas::drawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ RETURN_ON_NULL(blob);
+ RETURN_ON_FALSE(blob->bounds().makeOffset(x, y).isFinite());
+
+ // Overflow if more than 2^21 glyphs stopping a buffer overflow latter in the stack.
+ // See chromium:1080481
+ // TODO: can consider unrolling a few at a time if this limit becomes a problem.
+ int totalGlyphCount = 0;
+ constexpr int kMaxGlyphCount = 1 << 21;
+ SkTextBlob::Iter i(*blob);
+ SkTextBlob::Iter::Run r;
+ while (i.next(&r)) {
+ int glyphsLeft = kMaxGlyphCount - totalGlyphCount;
+ RETURN_ON_FALSE(r.fGlyphCount <= glyphsLeft);
+ totalGlyphCount += r.fGlyphCount;
+ }
+
+#if defined(SK_GANESH) && GR_TEST_UTILS
+ // Draw using text blob normally or if the blob has RSX form because slugs can't convert that
+ // form.
+ if (!gSkBlobAsSlugTesting ||
+ this->topDevice()->asGaneshDevice() == nullptr ||
+ SkTextBlobPriv::HasRSXForm(*blob))
+#endif
+ {
+ this->onDrawTextBlob(blob, x, y, paint);
+ }
+#if defined(SK_GANESH) && GR_TEST_UTILS
+ else {
+ auto slug = Slug::ConvertBlob(this, *blob, {x, y}, paint);
+ slug->draw(this);
+ }
+#endif
+}
+
+void SkCanvas::onDrawVerticesObject(const SkVertices* vertices, SkBlendMode bmode,
+ const SkPaint& paint) {
+ SkPaint simplePaint = clean_paint_for_drawVertices(paint);
+
+ const SkRect& bounds = vertices->bounds();
+ if (this->internalQuickReject(bounds, simplePaint)) {
+ return;
+ }
+
+ auto layer = this->aboutToDraw(this, simplePaint, &bounds);
+ if (layer) {
+ this->topDevice()->drawVertices(vertices, SkBlender::Mode(bmode), layer->paint());
+ }
+}
+
+#ifdef SK_ENABLE_SKSL
+void SkCanvas::onDrawMesh(const SkMesh& mesh, sk_sp<SkBlender> blender, const SkPaint& paint) {
+ SkPaint simplePaint = clean_paint_for_drawVertices(paint);
+
+ if (this->internalQuickReject(mesh.bounds(), simplePaint)) {
+ return;
+ }
+
+ auto layer = this->aboutToDraw(this, simplePaint, nullptr);
+ if (layer) {
+ this->topDevice()->drawMesh(mesh, std::move(blender), paint);
+ }
+}
+#endif
+
+void SkCanvas::drawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode bmode,
+ const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (nullptr == cubics) {
+ return;
+ }
+
+ this->onDrawPatch(cubics, colors, texCoords, bmode, paint);
+}
+
+void SkCanvas::onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode bmode,
+ const SkPaint& paint) {
+ // drawPatch has the same behavior restrictions as drawVertices
+ SkPaint simplePaint = clean_paint_for_drawVertices(paint);
+
+ // Since a patch is always within the convex hull of the control points, we discard it when its
+ // bounding rectangle is completely outside the current clip.
+ SkRect bounds;
+ bounds.setBounds(cubics, SkPatchUtils::kNumCtrlPts);
+ if (this->internalQuickReject(bounds, simplePaint)) {
+ return;
+ }
+
+ auto layer = this->aboutToDraw(this, simplePaint, &bounds);
+ if (layer) {
+ this->topDevice()->drawPatch(cubics, colors, texCoords, SkBlender::Mode(bmode),
+ layer->paint());
+ }
+}
+
+void SkCanvas::drawDrawable(SkDrawable* dr, SkScalar x, SkScalar y) {
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ TRACE_EVENT0("skia", TRACE_FUNC);
+#endif
+ RETURN_ON_NULL(dr);
+ if (x || y) {
+ SkMatrix matrix = SkMatrix::Translate(x, y);
+ this->onDrawDrawable(dr, &matrix);
+ } else {
+ this->onDrawDrawable(dr, nullptr);
+ }
+}
+
+void SkCanvas::drawDrawable(SkDrawable* dr, const SkMatrix* matrix) {
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ TRACE_EVENT0("skia", TRACE_FUNC);
+#endif
+ RETURN_ON_NULL(dr);
+ if (matrix && matrix->isIdentity()) {
+ matrix = nullptr;
+ }
+ this->onDrawDrawable(dr, matrix);
+}
+
+void SkCanvas::onDrawDrawable(SkDrawable* dr, const SkMatrix* matrix) {
+ // drawable bounds are no longer reliable (e.g. android displaylist)
+ // so don't use them for quick-reject
+ if (this->predrawNotify()) {
+ this->topDevice()->drawDrawable(this, dr, matrix);
+ }
+}
+
+void SkCanvas::onDrawAtlas2(const SkImage* atlas, const SkRSXform xform[], const SkRect tex[],
+ const SkColor colors[], int count, SkBlendMode bmode,
+ const SkSamplingOptions& sampling, const SkRect* cull,
+ const SkPaint* paint) {
+ // drawAtlas is a combination of drawVertices and drawImage...
+ SkPaint realPaint = clean_paint_for_drawVertices(clean_paint_for_drawImage(paint));
+ realPaint.setShader(atlas->makeShader(sampling));
+
+ if (cull && this->internalQuickReject(*cull, realPaint)) {
+ return;
+ }
+
+ auto layer = this->aboutToDraw(this, realPaint);
+ if (layer) {
+ this->topDevice()->drawAtlas(xform, tex, colors, count, SkBlender::Mode(bmode),
+ layer->paint());
+ }
+}
+
+void SkCanvas::onDrawAnnotation(const SkRect& rect, const char key[], SkData* value) {
+ SkASSERT(key);
+
+ if (this->predrawNotify()) {
+ this->topDevice()->drawAnnotation(rect, key, value);
+ }
+}
+
+void SkCanvas::onDrawEdgeAAQuad(const SkRect& r, const SkPoint clip[4], QuadAAFlags edgeAA,
+ const SkColor4f& color, SkBlendMode mode) {
+ SkASSERT(r.isSorted());
+
+ SkPaint paint{color};
+ paint.setBlendMode(mode);
+ if (this->internalQuickReject(r, paint)) {
+ return;
+ }
+
+ if (this->predrawNotify()) {
+ this->topDevice()->drawEdgeAAQuad(r, clip, edgeAA, color, mode);
+ }
+}
+
+void SkCanvas::onDrawEdgeAAImageSet2(const ImageSetEntry imageSet[], int count,
+ const SkPoint dstClips[], const SkMatrix preViewMatrices[],
+ const SkSamplingOptions& sampling, const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ if (count <= 0) {
+ // Nothing to draw
+ return;
+ }
+
+ SkPaint realPaint = clean_paint_for_drawImage(paint);
+ SkSamplingOptions realSampling = clean_sampling_for_constraint(sampling, constraint);
+
+ // We could calculate the set's dstRect union to always check quickReject(), but we can't reject
+ // individual entries and Chromium's occlusion culling already makes it likely that at least one
+ // entry will be visible. So, we only calculate the draw bounds when it's trivial (count == 1),
+ // or we need it for the autolooper (since it greatly improves image filter perf).
+ bool needsAutoLayer = SkToBool(realPaint.getImageFilter());
+ bool setBoundsValid = count == 1 || needsAutoLayer;
+ SkRect setBounds = imageSet[0].fDstRect;
+ if (imageSet[0].fMatrixIndex >= 0) {
+ // Account for the per-entry transform that is applied prior to the CTM when drawing
+ preViewMatrices[imageSet[0].fMatrixIndex].mapRect(&setBounds);
+ }
+ if (needsAutoLayer) {
+ for (int i = 1; i < count; ++i) {
+ SkRect entryBounds = imageSet[i].fDstRect;
+ if (imageSet[i].fMatrixIndex >= 0) {
+ preViewMatrices[imageSet[i].fMatrixIndex].mapRect(&entryBounds);
+ }
+ setBounds.joinPossiblyEmptyRect(entryBounds);
+ }
+ }
+
+ // If we happen to have the draw bounds, though, might as well check quickReject().
+ if (setBoundsValid && this->internalQuickReject(setBounds, realPaint)) {
+ return;
+ }
+
+ auto layer = this->aboutToDraw(this, realPaint, setBoundsValid ? &setBounds : nullptr);
+ if (layer) {
+ this->topDevice()->drawEdgeAAImageSet(imageSet, count, dstClips, preViewMatrices,
+ realSampling, layer->paint(), constraint);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// These methods are NOT virtual, and therefore must call back into virtual
+// methods, rather than actually drawing themselves.
+//////////////////////////////////////////////////////////////////////////////
+
+void SkCanvas::drawColor(const SkColor4f& c, SkBlendMode mode) {
+ SkPaint paint;
+ paint.setColor(c);
+ paint.setBlendMode(mode);
+ this->drawPaint(paint);
+}
+
+void SkCanvas::drawPoint(SkScalar x, SkScalar y, const SkPaint& paint) {
+ const SkPoint pt = { x, y };
+ this->drawPoints(kPoints_PointMode, 1, &pt, paint);
+}
+
+void SkCanvas::drawLine(SkScalar x0, SkScalar y0, SkScalar x1, SkScalar y1, const SkPaint& paint) {
+ SkPoint pts[2];
+ pts[0].set(x0, y0);
+ pts[1].set(x1, y1);
+ this->drawPoints(kLines_PointMode, 2, pts, paint);
+}
+
+void SkCanvas::drawCircle(SkScalar cx, SkScalar cy, SkScalar radius, const SkPaint& paint) {
+ if (radius < 0) {
+ radius = 0;
+ }
+
+ SkRect r;
+ r.setLTRB(cx - radius, cy - radius, cx + radius, cy + radius);
+ this->drawOval(r, paint);
+}
+
+void SkCanvas::drawRoundRect(const SkRect& r, SkScalar rx, SkScalar ry,
+ const SkPaint& paint) {
+ if (rx > 0 && ry > 0) {
+ SkRRect rrect;
+ rrect.setRectXY(r, rx, ry);
+ this->drawRRect(rrect, paint);
+ } else {
+ this->drawRect(r, paint);
+ }
+}
+
+void SkCanvas::drawArc(const SkRect& oval, SkScalar startAngle,
+ SkScalar sweepAngle, bool useCenter,
+ const SkPaint& paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (oval.isEmpty() || !sweepAngle) {
+ return;
+ }
+ this->onDrawArc(oval, startAngle, sweepAngle, useCenter, paint);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+#ifdef SK_DISABLE_SKPICTURE
+void SkCanvas::drawPicture(const SkPicture* picture, const SkMatrix* matrix, const SkPaint* paint) {}
+
+
+void SkCanvas::onDrawPicture(const SkPicture* picture, const SkMatrix* matrix,
+ const SkPaint* paint) {}
+#else
+
+void SkCanvas::drawPicture(const SkPicture* picture, const SkMatrix* matrix, const SkPaint* paint) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ RETURN_ON_NULL(picture);
+
+ if (matrix && matrix->isIdentity()) {
+ matrix = nullptr;
+ }
+ if (picture->approximateOpCount() <= kMaxPictureOpsToUnrollInsteadOfRef) {
+ SkAutoCanvasMatrixPaint acmp(this, matrix, paint, picture->cullRect());
+ picture->playback(this);
+ } else {
+ this->onDrawPicture(picture, matrix, paint);
+ }
+}
+
+void SkCanvas::onDrawPicture(const SkPicture* picture, const SkMatrix* matrix,
+ const SkPaint* paint) {
+ if (this->internalQuickReject(picture->cullRect(), paint ? *paint : SkPaint{}, matrix)) {
+ return;
+ }
+
+ SkAutoCanvasMatrixPaint acmp(this, matrix, paint, picture->cullRect());
+ picture->playback(this);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkCanvas::ImageSetEntry::ImageSetEntry() = default;
+SkCanvas::ImageSetEntry::~ImageSetEntry() = default;
+SkCanvas::ImageSetEntry::ImageSetEntry(const ImageSetEntry&) = default;
+SkCanvas::ImageSetEntry& SkCanvas::ImageSetEntry::operator=(const ImageSetEntry&) = default;
+
+SkCanvas::ImageSetEntry::ImageSetEntry(sk_sp<const SkImage> image, const SkRect& srcRect,
+ const SkRect& dstRect, int matrixIndex, float alpha,
+ unsigned aaFlags, bool hasClip)
+ : fImage(std::move(image))
+ , fSrcRect(srcRect)
+ , fDstRect(dstRect)
+ , fMatrixIndex(matrixIndex)
+ , fAlpha(alpha)
+ , fAAFlags(aaFlags)
+ , fHasClip(hasClip) {}
+
+SkCanvas::ImageSetEntry::ImageSetEntry(sk_sp<const SkImage> image, const SkRect& srcRect,
+ const SkRect& dstRect, float alpha, unsigned aaFlags)
+ : fImage(std::move(image))
+ , fSrcRect(srcRect)
+ , fDstRect(dstRect)
+ , fAlpha(alpha)
+ , fAAFlags(aaFlags) {}
+
+///////////////////////////////////////////////////////////////////////////////
+
+std::unique_ptr<SkCanvas> SkCanvas::MakeRasterDirect(const SkImageInfo& info, void* pixels,
+ size_t rowBytes, const SkSurfaceProps* props) {
+ if (!SkSurfaceValidateRasterInfo(info, rowBytes)) {
+ return nullptr;
+ }
+
+ SkBitmap bitmap;
+ if (!bitmap.installPixels(info, pixels, rowBytes)) {
+ return nullptr;
+ }
+
+ return props ?
+ std::make_unique<SkCanvas>(bitmap, *props) :
+ std::make_unique<SkCanvas>(bitmap);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkNoDrawCanvas::SkNoDrawCanvas(int width, int height)
+ : INHERITED(SkIRect::MakeWH(width, height)) {}
+
+SkNoDrawCanvas::SkNoDrawCanvas(const SkIRect& bounds)
+ : INHERITED(bounds) {}
+
+SkNoDrawCanvas::SkNoDrawCanvas(sk_sp<SkBaseDevice> device)
+ : INHERITED(device) {}
+
+SkCanvas::SaveLayerStrategy SkNoDrawCanvas::getSaveLayerStrategy(const SaveLayerRec& rec) {
+ (void)this->INHERITED::getSaveLayerStrategy(rec);
+ return kNoLayer_SaveLayerStrategy;
+}
+
+bool SkNoDrawCanvas::onDoSaveBehind(const SkRect*) {
+ return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static_assert((int)SkRegion::kDifference_Op == (int)SkClipOp::kDifference, "");
+static_assert((int)SkRegion::kIntersect_Op == (int)SkClipOp::kIntersect, "");
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkRasterHandleAllocator::Handle SkCanvas::accessTopRasterHandle() const {
+ const SkBaseDevice* dev = this->topDevice();
+ if (fAllocator) {
+ SkRasterHandleAllocator::Handle handle = dev->getRasterHandle();
+ SkIRect clip = dev->devClipBounds();
+ if (!clip.intersect({0, 0, dev->width(), dev->height()})) {
+ clip.setEmpty();
+ }
+
+ fAllocator->updateHandle(handle, dev->localToDevice(), clip);
+ return handle;
+ }
+ return nullptr;
+}
+
+static bool install(SkBitmap* bm, const SkImageInfo& info,
+ const SkRasterHandleAllocator::Rec& rec) {
+ return bm->installPixels(info, rec.fPixels, rec.fRowBytes, rec.fReleaseProc, rec.fReleaseCtx);
+}
+
+SkRasterHandleAllocator::Handle SkRasterHandleAllocator::allocBitmap(const SkImageInfo& info,
+ SkBitmap* bm) {
+ SkRasterHandleAllocator::Rec rec;
+ if (!this->allocHandle(info, &rec) || !install(bm, info, rec)) {
+ return nullptr;
+ }
+ return rec.fHandle;
+}
+
+std::unique_ptr<SkCanvas>
+SkRasterHandleAllocator::MakeCanvas(std::unique_ptr<SkRasterHandleAllocator> alloc,
+ const SkImageInfo& info, const Rec* rec,
+ const SkSurfaceProps* props) {
+ if (!alloc || !SkSurfaceValidateRasterInfo(info, rec ? rec->fRowBytes : kIgnoreRowBytesValue)) {
+ return nullptr;
+ }
+
+ SkBitmap bm;
+ Handle hndl;
+
+ if (rec) {
+ hndl = install(&bm, info, *rec) ? rec->fHandle : nullptr;
+ } else {
+ hndl = alloc->allocBitmap(info, &bm);
+ }
+ return hndl ? std::unique_ptr<SkCanvas>(new SkCanvas(bm, std::move(alloc), hndl, props))
+ : nullptr;
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+#if defined(SK_GANESH) && GR_TEST_UTILS
+SkTestCanvas<SkSlugTestKey>::SkTestCanvas(SkCanvas* canvas)
+ : SkCanvas(sk_ref_sp(canvas->baseDevice())) {}
+
+void SkTestCanvas<SkSlugTestKey>::onDrawGlyphRunList(
+ const sktext::GlyphRunList& glyphRunList, const SkPaint& paint) {
+ SkRect bounds = glyphRunList.sourceBoundsWithOrigin();
+ if (this->internalQuickReject(bounds, paint)) {
+ return;
+ }
+ auto layer = this->aboutToDraw(this, paint, &bounds);
+ if (layer) {
+ if (glyphRunList.hasRSXForm()) {
+ this->SkCanvas::onDrawGlyphRunList(glyphRunList, layer->paint());
+ } else {
+ auto slug = this->onConvertGlyphRunListToSlug(glyphRunList, layer->paint());
+ this->drawSlug(slug.get());
+ }
+ }
+}
+
+SkTestCanvas<SkSerializeSlugTestKey>::SkTestCanvas(SkCanvas* canvas)
+ : SkCanvas(sk_ref_sp(canvas->baseDevice())) {}
+
+void SkTestCanvas<SkSerializeSlugTestKey>::onDrawGlyphRunList(
+ const sktext::GlyphRunList& glyphRunList, const SkPaint& paint) {
+ SkRect bounds = glyphRunList.sourceBoundsWithOrigin();
+ if (this->internalQuickReject(bounds, paint)) {
+ return;
+ }
+ auto layer = this->aboutToDraw(this, paint, &bounds);
+ if (layer) {
+ if (glyphRunList.hasRSXForm()) {
+ this->SkCanvas::onDrawGlyphRunList(glyphRunList, layer->paint());
+ } else {
+ sk_sp<SkData> bytes;
+ {
+ auto slug = this->onConvertGlyphRunListToSlug(glyphRunList, layer->paint());
+ if (slug != nullptr) {
+ bytes = slug->serialize();
+ }
+ }
+ {
+ if (bytes != nullptr) {
+ auto slug = Slug::Deserialize(bytes->data(), bytes->size());
+ this->drawSlug(slug.get());
+ }
+ }
+ }
+ }
+}
+
+// A do nothing handle manager for the remote strike server.
+class ServerHandleManager : public SkStrikeServer::DiscardableHandleManager {
+public:
+ SkDiscardableHandleId createHandle() override {
+ return 0;
+ }
+
+ bool lockHandle(SkDiscardableHandleId id) override {
+ return true;
+ }
+
+ bool isHandleDeleted(SkDiscardableHandleId id) override {
+ return false;
+ }
+};
+
+// Lock the strikes into the cache for the length of the test. This handler is tied to the lifetime
+// of the canvas used to render the entire test.
+class ClientHandleManager : public SkStrikeClient::DiscardableHandleManager {
+public:
+ bool deleteHandle(SkDiscardableHandleId id) override {
+ return fIsLocked;
+ }
+
+ void assertHandleValid(SkDiscardableHandleId id) override {
+ DiscardableHandleManager::assertHandleValid(id);
+ }
+
+ void notifyCacheMiss(SkStrikeClient::CacheMissType type, int fontSize) override {
+
+ }
+
+ void notifyReadFailure(const ReadFailureData& data) override {
+ DiscardableHandleManager::notifyReadFailure(data);
+ }
+
+ void unlock() {
+ fIsLocked = true;
+ }
+
+private:
+ bool fIsLocked{false};
+};
+
+SkTestCanvas<SkRemoteSlugTestKey>::SkTestCanvas(SkCanvas* canvas)
+ : SkCanvas(sk_ref_sp(canvas->baseDevice()))
+ , fServerHandleManager(new ServerHandleManager{})
+ , fClientHandleManager(new ClientHandleManager{})
+ , fStrikeServer(fServerHandleManager.get())
+ , fStrikeClient(fClientHandleManager) {}
+
+// Allow the strikes to be freed from the strike cache after the test has been drawn.
+SkTestCanvas<SkRemoteSlugTestKey>::~SkTestCanvas() {
+ static_cast<ClientHandleManager*>(fClientHandleManager.get())->unlock();
+}
+
+void SkTestCanvas<SkRemoteSlugTestKey>::onDrawGlyphRunList(
+ const sktext::GlyphRunList& glyphRunList, const SkPaint& paint) {
+ SkRect bounds = glyphRunList.sourceBoundsWithOrigin();
+ if (this->internalQuickReject(bounds, paint)) {
+ return;
+ }
+ auto layer = this->aboutToDraw(this, paint, &bounds);
+ if (layer) {
+ if (glyphRunList.hasRSXForm()) {
+ this->SkCanvas::onDrawGlyphRunList(glyphRunList, layer->paint());
+ } else {
+ sk_sp<SkData> slugBytes;
+ std::vector<uint8_t> glyphBytes;
+ {
+ auto analysisCanvas = fStrikeServer.makeAnalysisCanvas(
+ this->topDevice()->width(),
+ this->topDevice()->height(),
+ this->fProps,
+ this->topDevice()->imageInfo().refColorSpace(),
+ // TODO: Where should we get this value from?
+ /*DFTSupport=*/ true);
+
+ // TODO: Move the analysis canvas processing up to the via to handle a whole
+ // document at a time. This is not the correct way to handle the CTM; it doesn't
+ // work for layers.
+ analysisCanvas->setMatrix(this->getLocalToDevice());
+ auto slug = analysisCanvas->onConvertGlyphRunListToSlug(glyphRunList,
+ layer->paint());
+ if (slug != nullptr) {
+ slugBytes = slug->serialize();
+ }
+ fStrikeServer.writeStrikeData(&glyphBytes);
+ }
+ {
+ if (!glyphBytes.empty()) {
+ fStrikeClient.readStrikeData(glyphBytes.data(), glyphBytes.size());
+ }
+ if (slugBytes != nullptr) {
+ auto slug = Slug::Deserialize(
+ slugBytes->data(), slugBytes->size(), &fStrikeClient);
+ this->drawSlug(slug.get());
+ }
+ }
+ }
+ }
+}
+#endif
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
diff --git a/gfx/skia/skia/src/core/SkCanvasPriv.cpp b/gfx/skia/skia/src/core/SkCanvasPriv.cpp
new file mode 100644
index 0000000000..3733516dbc
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCanvasPriv.cpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkCanvasPriv.h"
+
+#include "src/base/SkAutoMalloc.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriter32.h"
+
+#include <locale>
+
+SkAutoCanvasMatrixPaint::SkAutoCanvasMatrixPaint(SkCanvas* canvas, const SkMatrix* matrix,
+ const SkPaint* paint, const SkRect& bounds)
+ : fCanvas(canvas)
+ , fSaveCount(canvas->getSaveCount()) {
+ if (paint) {
+ SkRect newBounds = bounds;
+ if (matrix) {
+ matrix->mapRect(&newBounds);
+ }
+ canvas->saveLayer(&newBounds, paint);
+ } else if (matrix) {
+ canvas->save();
+ }
+
+ if (matrix) {
+ canvas->concat(*matrix);
+ }
+}
+
+SkAutoCanvasMatrixPaint::~SkAutoCanvasMatrixPaint() {
+ fCanvas->restoreToCount(fSaveCount);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkCanvasPriv::ReadLattice(SkReadBuffer& buffer, SkCanvas::Lattice* lattice) {
+ lattice->fXCount = buffer.readInt();
+ lattice->fXDivs = buffer.skipT<int32_t>(lattice->fXCount);
+ lattice->fYCount = buffer.readInt();
+ lattice->fYDivs = buffer.skipT<int32_t>(lattice->fYCount);
+ int flagCount = buffer.readInt();
+ lattice->fRectTypes = nullptr;
+ lattice->fColors = nullptr;
+ if (flagCount) {
+ lattice->fRectTypes = buffer.skipT<SkCanvas::Lattice::RectType>(flagCount);
+ lattice->fColors = buffer.skipT<SkColor>(flagCount);
+ }
+ lattice->fBounds = buffer.skipT<SkIRect>();
+ return buffer.isValid();
+}
+
+size_t SkCanvasPriv::WriteLattice(void* buffer, const SkCanvas::Lattice& lattice) {
+ int flagCount = lattice.fRectTypes ? (lattice.fXCount + 1) * (lattice.fYCount + 1) : 0;
+
+ const size_t size = (1 + lattice.fXCount + 1 + lattice.fYCount + 1) * sizeof(int32_t) +
+ SkAlign4(flagCount * sizeof(SkCanvas::Lattice::RectType)) +
+ SkAlign4(flagCount * sizeof(SkColor)) +
+ sizeof(SkIRect);
+
+ if (buffer) {
+ SkWriter32 writer(buffer, size);
+ writer.write32(lattice.fXCount);
+ writer.write(lattice.fXDivs, lattice.fXCount * sizeof(uint32_t));
+ writer.write32(lattice.fYCount);
+ writer.write(lattice.fYDivs, lattice.fYCount * sizeof(uint32_t));
+ writer.write32(flagCount);
+ writer.writePad(lattice.fRectTypes, flagCount * sizeof(uint8_t));
+ writer.write(lattice.fColors, flagCount * sizeof(SkColor));
+ SkASSERT(lattice.fBounds);
+ writer.write(lattice.fBounds, sizeof(SkIRect));
+ SkASSERT(writer.bytesWritten() == size);
+ }
+ return size;
+}
+
+void SkCanvasPriv::WriteLattice(SkWriteBuffer& buffer, const SkCanvas::Lattice& lattice) {
+ const size_t size = WriteLattice(nullptr, lattice);
+ SkAutoSMalloc<1024> storage(size);
+ WriteLattice(storage.get(), lattice);
+ buffer.writePad32(storage.get(), size);
+}
+
+void SkCanvasPriv::GetDstClipAndMatrixCounts(const SkCanvas::ImageSetEntry set[], int count,
+ int* totalDstClipCount, int* totalMatrixCount) {
+ int dstClipCount = 0;
+ int maxMatrixIndex = -1;
+ for (int i = 0; i < count; ++i) {
+ dstClipCount += 4 * set[i].fHasClip;
+ if (set[i].fMatrixIndex > maxMatrixIndex) {
+ maxMatrixIndex = set[i].fMatrixIndex;
+ }
+ }
+
+ *totalDstClipCount = dstClipCount;
+ *totalMatrixCount = maxMatrixIndex + 1;
+}
+
+#if GR_TEST_UTILS && defined(SK_GANESH)
+
+#include "src/gpu/ganesh/Device_v1.h"
+
+skgpu::ganesh::SurfaceDrawContext* SkCanvasPriv::TopDeviceSurfaceDrawContext(SkCanvas* canvas) {
+ if (auto gpuDevice = canvas->topDevice()->asGaneshDevice()) {
+ return gpuDevice->surfaceDrawContext();
+ }
+
+ return nullptr;
+}
+
+skgpu::ganesh::SurfaceFillContext* SkCanvasPriv::TopDeviceSurfaceFillContext(SkCanvas* canvas) {
+ if (auto gpuDevice = canvas->topDevice()->asGaneshDevice()) {
+ return gpuDevice->surfaceFillContext();
+ }
+
+ return nullptr;
+}
+
+#endif // GR_TEST_UTILS && defined(SK_GANESH)
+
+
+#if defined(SK_GANESH)
+#include "src/gpu/ganesh/Device_v1.h"
+
+GrRenderTargetProxy* SkCanvasPriv::TopDeviceTargetProxy(SkCanvas* canvas) {
+ if (auto gpuDevice = canvas->topDevice()->asGaneshDevice()) {
+ return gpuDevice->targetProxy();
+ }
+
+ return nullptr;
+}
+
+#else // defined(SK_GANESH)
+
+GrRenderTargetProxy* SkCanvasPriv::TopDeviceTargetProxy(SkCanvas* canvas) {
+ return nullptr;
+}
+
+#endif // defined(SK_GANESH)
+
+#if GRAPHITE_TEST_UTILS
+#include "src/gpu/graphite/Device.h"
+
+skgpu::graphite::TextureProxy* SkCanvasPriv::TopDeviceGraphiteTargetProxy(SkCanvas* canvas) {
+ if (auto gpuDevice = canvas->topDevice()->asGraphiteDevice()) {
+ return gpuDevice->target();
+ }
+ return nullptr;
+}
+
+#endif // GRAPHITE_TEST_UTILS
diff --git a/gfx/skia/skia/src/core/SkCanvasPriv.h b/gfx/skia/skia/src/core/SkCanvasPriv.h
new file mode 100644
index 0000000000..432127d8a1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCanvasPriv.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCanvasPriv_DEFINED
+#define SkCanvasPriv_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/private/base/SkNoncopyable.h"
+
+class SkReadBuffer;
+class SkWriteBuffer;
+
+#if GR_TEST_UTILS && defined(SK_GANESH)
+namespace skgpu::ganesh {
+class SurfaceDrawContext;
+class SurfaceFillContext;
+} // namespace skgpu::ganesh
+#endif
+
+// This declaration must match the one in SkDeferredDisplayList.h
+#if defined(SK_GANESH)
+class GrRenderTargetProxy;
+#else
+using GrRenderTargetProxy = SkRefCnt;
+#endif // defined(SK_GANESH)
+
+#if GRAPHITE_TEST_UTILS
+namespace skgpu::graphite {
+ class TextureProxy;
+}
+#endif
+
+class SkAutoCanvasMatrixPaint : SkNoncopyable {
+public:
+ SkAutoCanvasMatrixPaint(SkCanvas*, const SkMatrix*, const SkPaint*, const SkRect& bounds);
+ ~SkAutoCanvasMatrixPaint();
+
+private:
+ SkCanvas* fCanvas;
+ int fSaveCount;
+};
+
+class SkCanvasPriv {
+public:
+ // The lattice has pointers directly into the readbuffer
+ static bool ReadLattice(SkReadBuffer&, SkCanvas::Lattice*);
+
+ static void WriteLattice(SkWriteBuffer&, const SkCanvas::Lattice&);
+
+ // return the byte-size of the lattice, even if the buffer is null
+ // storage must be 4-byte aligned
+ static size_t WriteLattice(void* storage, const SkCanvas::Lattice&);
+
+ static int SaveBehind(SkCanvas* canvas, const SkRect* subset) {
+ return canvas->only_axis_aligned_saveBehind(subset);
+ }
+ static void DrawBehind(SkCanvas* canvas, const SkPaint& paint) {
+ canvas->drawClippedToSaveBehind(paint);
+ }
+
+ // Exposed for testing on non-Android framework builds
+ static void ResetClip(SkCanvas* canvas) {
+ canvas->internal_private_resetClip();
+ }
+
+ static SkBaseDevice* TopDevice(SkCanvas* canvas) {
+ return canvas->topDevice();
+ }
+
+#if GR_TEST_UTILS && defined(SK_GANESH)
+ static skgpu::ganesh::SurfaceDrawContext* TopDeviceSurfaceDrawContext(SkCanvas*);
+ static skgpu::ganesh::SurfaceFillContext* TopDeviceSurfaceFillContext(SkCanvas*);
+#endif
+ static GrRenderTargetProxy* TopDeviceTargetProxy(SkCanvas*);
+
+#if GRAPHITE_TEST_UTILS
+ static skgpu::graphite::TextureProxy* TopDeviceGraphiteTargetProxy(SkCanvas*);
+#endif
+
+ // The experimental_DrawEdgeAAImageSet API accepts separate dstClips and preViewMatrices arrays,
+ // where entries refer into them, but no explicit size is provided. Given a set of entries,
+ // computes the minimum length for these arrays that would provide index access errors.
+ static void GetDstClipAndMatrixCounts(const SkCanvas::ImageSetEntry set[], int count,
+ int* totalDstClipCount, int* totalMatrixCount);
+
+ static SkCanvas::SaveLayerRec ScaledBackdropLayer(const SkRect* bounds,
+ const SkPaint* paint,
+ const SkImageFilter* backdrop,
+ SkScalar backdropScale,
+ SkCanvas::SaveLayerFlags saveLayerFlags) {
+ return SkCanvas::SaveLayerRec(bounds, paint, backdrop, backdropScale, saveLayerFlags);
+ }
+
+ static SkScalar GetBackdropScaleFactor(const SkCanvas::SaveLayerRec& rec) {
+ return rec.fExperimentalBackdropScale;
+ }
+
+ static void SetBackdropScaleFactor(SkCanvas::SaveLayerRec* rec, SkScalar scale) {
+ rec->fExperimentalBackdropScale = scale;
+ }
+};
+
+/**
+ * This constant is trying to balance the speed of ref'ing a subpicture into a parent picture,
+ * against the playback cost of recursing into the subpicture to get at its actual ops.
+ *
+ * For now we pick a conservatively small value, though measurement (and other heuristics like
+ * the type of ops contained) may justify changing this value.
+ */
+constexpr int kMaxPictureOpsToUnrollInsteadOfRef = 1;
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkCanvas_Raster.cpp b/gfx/skia/skia/src/core/SkCanvas_Raster.cpp
new file mode 100644
index 0000000000..28694ac42a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCanvas_Raster.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkRasterHandleAllocator.h"
+#include "include/core/SkRefCnt.h"
+#include "src/core/SkBitmapDevice.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkSurfacePriv.h"
+#include "src/text/GlyphRun.h" // IWYU pragma: keep
+
+#include <memory>
+#include <utility>
+
+class SkBitmap;
+class SkSurfaceProps;
+
+#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkImageInfo.h"
+
+#endif
+
+SkCanvas::SkCanvas(const SkBitmap& bitmap, const SkSurfaceProps& props)
+ : fMCStack(sizeof(MCRec), fMCRecStorage, sizeof(fMCRecStorage)), fProps(props) {
+ this->init(sk_make_sp<SkBitmapDevice>(bitmap, fProps));
+}
+
+SkCanvas::SkCanvas(const SkBitmap& bitmap,
+ std::unique_ptr<SkRasterHandleAllocator> alloc,
+ SkRasterHandleAllocator::Handle hndl,
+ const SkSurfaceProps* props)
+ : fMCStack(sizeof(MCRec), fMCRecStorage, sizeof(fMCRecStorage))
+ , fProps(SkSurfacePropsCopyOrDefault(props))
+ , fAllocator(std::move(alloc)) {
+ this->init(sk_make_sp<SkBitmapDevice>(bitmap, fProps, hndl));
+}
+
+SkCanvas::SkCanvas(const SkBitmap& bitmap) : SkCanvas(bitmap, nullptr, nullptr, nullptr) {}
+
+#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
+SkCanvas::SkCanvas(const SkBitmap& bitmap, ColorBehavior)
+ : fMCStack(sizeof(MCRec), fMCRecStorage, sizeof(fMCRecStorage)) {
+ SkBitmap tmp(bitmap);
+ *const_cast<SkImageInfo*>(&tmp.info()) = tmp.info().makeColorSpace(nullptr);
+ this->init(sk_make_sp<SkBitmapDevice>(tmp, fProps));
+}
+#endif
+
diff --git a/gfx/skia/skia/src/core/SkCapabilities.cpp b/gfx/skia/skia/src/core/SkCapabilities.cpp
new file mode 100644
index 0000000000..50e7e58715
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCapabilities.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCapabilities.h"
+
+#ifdef SK_ENABLE_SKSL
+#include "src/sksl/SkSLUtil.h"
+#endif
+
+sk_sp<const SkCapabilities> SkCapabilities::RasterBackend() {
+ static SkCapabilities* sCaps = [](){
+ SkCapabilities* caps = new SkCapabilities;
+#ifdef SK_ENABLE_SKSL
+ caps->fSkSLVersion = SkSL::Version::k100;
+#endif
+ return caps;
+ }();
+
+ return sk_ref_sp(sCaps);
+}
+
+#ifdef SK_ENABLE_SKSL
+void SkCapabilities::initSkCaps(const SkSL::ShaderCaps* shaderCaps) {
+ this->fSkSLVersion = shaderCaps->supportedSkSLVerion();
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkChromeRemoteGlyphCache.cpp b/gfx/skia/skia/src/core/SkChromeRemoteGlyphCache.cpp
new file mode 100644
index 0000000000..4802c1e9b8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkChromeRemoteGlyphCache.cpp
@@ -0,0 +1,1271 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/chromium/SkChromeRemoteGlyphCache.h"
+
+#include "include/core/SkDrawable.h"
+#include "include/core/SkFont.h"
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/SkChecksum.h"
+#include "include/private/base/SkDebug.h"
+#include "src/base/SkTLazy.h"
+#include "src/core/SkDescriptor.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkDistanceFieldGen.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkEnumerate.h"
+#include "src/core/SkFontMetricsPriv.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkScalerContext.h"
+#include "src/core/SkStrike.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkTHash.h"
+#include "src/core/SkTraceEvent.h"
+#include "src/core/SkTypeface_remote.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/text/GlyphRun.h"
+#include "src/text/StrikeForGPU.h"
+
+#include <algorithm>
+#include <bitset>
+#include <iterator>
+#include <memory>
+#include <new>
+#include <optional>
+#include <string>
+#include <tuple>
+#include <unordered_map>
+
+#if defined(SK_GANESH)
+#include "include/gpu/GrContextOptions.h"
+#include "src/gpu/ganesh/GrDrawOpAtlas.h"
+#include "src/text/gpu/SDFTControl.h"
+#include "src/text/gpu/SubRunAllocator.h"
+#include "src/text/gpu/SubRunContainer.h"
+#include "src/text/gpu/TextBlob.h"
+#endif
+
+using namespace sktext;
+using namespace sktext::gpu;
+using namespace skglyph;
+
+// TODO: remove when new serialization code is done.
+//#define SK_SUPPORT_LEGACY_STRIKE_SERIALIZATION
+
+namespace {
+#if defined(SK_SUPPORT_LEGACY_STRIKE_SERIALIZATION)
+// -- Serializer -----------------------------------------------------------------------------------
+size_t pad(size_t size, size_t alignment) { return (size + (alignment - 1)) & ~(alignment - 1); }
+
+// Alignment between x86 and x64 differs for some types, in particular
+// int64_t and doubles have 4 and 8-byte alignment, respectively.
+// Be consistent even when writing and reading across different architectures.
+template<typename T>
+size_t serialization_alignment() {
+ return sizeof(T) == 8 ? 8 : alignof(T);
+}
+
+class Serializer {
+public:
+ explicit Serializer(std::vector<uint8_t>* buffer) : fBuffer{buffer} {}
+
+ template <typename T, typename... Args>
+ T* emplace(Args&&... args) {
+ auto result = this->allocate(sizeof(T), serialization_alignment<T>());
+ return new (result) T{std::forward<Args>(args)...};
+ }
+
+ template <typename T>
+ void write(const T& data) {
+ T* result = (T*)this->allocate(sizeof(T), serialization_alignment<T>());
+ memcpy(result, &data, sizeof(T));
+ }
+
+ void writeDescriptor(const SkDescriptor& desc) {
+ write(desc.getLength());
+ auto result = this->allocate(desc.getLength(), alignof(SkDescriptor));
+ memcpy(result, &desc, desc.getLength());
+ }
+
+ void* allocate(size_t size, size_t alignment) {
+ size_t aligned = pad(fBuffer->size(), alignment);
+ fBuffer->resize(aligned + size);
+ return &(*fBuffer)[aligned];
+ }
+
+private:
+ std::vector<uint8_t>* fBuffer;
+};
+
+// -- Deserializer -------------------------------------------------------------------------------
+// Note that the Deserializer is reading untrusted data, we need to guard against invalid data.
+class Deserializer {
+public:
+ Deserializer(const volatile char* memory, size_t memorySize)
+ : fMemory(memory), fMemorySize(memorySize) {}
+
+ template <typename T>
+ bool read(T* val) {
+ auto* result = this->ensureAtLeast(sizeof(T), serialization_alignment<T>());
+ if (!result) return false;
+
+ memcpy(val, const_cast<const char*>(result), sizeof(T));
+ return true;
+ }
+
+ bool readDescriptor(SkAutoDescriptor* ad) {
+ uint32_t descLength = 0u;
+ if (!this->read<uint32_t>(&descLength)) return false;
+
+ auto* underlyingBuffer = this->ensureAtLeast(descLength, alignof(SkDescriptor));
+ if (!underlyingBuffer) return false;
+ SkReadBuffer buffer((void*)underlyingBuffer, descLength);
+ auto autoDescriptor = SkAutoDescriptor::MakeFromBuffer(buffer);
+ if (!autoDescriptor.has_value()) { return false; }
+
+ *ad = std::move(*autoDescriptor);
+ return true;
+ }
+
+ const volatile void* read(size_t size, size_t alignment) {
+ return this->ensureAtLeast(size, alignment);
+ }
+
+ size_t bytesRead() const { return fBytesRead; }
+
+private:
+ const volatile char* ensureAtLeast(size_t size, size_t alignment) {
+ size_t padded = pad(fBytesRead, alignment);
+
+ // Not enough data.
+ if (padded > fMemorySize) return nullptr;
+ if (size > fMemorySize - padded) return nullptr;
+
+ auto* result = fMemory + padded;
+ fBytesRead = padded + size;
+ return result;
+ }
+
+ // Note that we read each piece of memory only once to guard against TOCTOU violations.
+ const volatile char* fMemory;
+ size_t fMemorySize;
+ size_t fBytesRead = 0u;
+};
+
+// Paths use a SkWriter32 which requires 4 byte alignment.
+static const size_t kPathAlignment = 4u;
+static const size_t kDrawableAlignment = 8u;
+#endif
+
+// -- StrikeSpec -----------------------------------------------------------------------------------
+struct StrikeSpec {
+ StrikeSpec() = default;
+ StrikeSpec(SkTypefaceID typefaceID, SkDiscardableHandleId discardableHandleId)
+ : fTypefaceID{typefaceID}, fDiscardableHandleId(discardableHandleId) {}
+ SkTypefaceID fTypefaceID = 0u;
+ SkDiscardableHandleId fDiscardableHandleId = 0u;
+};
+
+// -- RemoteStrike ----------------------------------------------------------------------------
+class RemoteStrike final : public sktext::StrikeForGPU {
+public:
+ // N.B. RemoteStrike is not valid until ensureScalerContext is called.
+ RemoteStrike(const SkStrikeSpec& strikeSpec,
+ std::unique_ptr<SkScalerContext> context,
+ SkDiscardableHandleId discardableHandleId);
+ ~RemoteStrike() override = default;
+
+ void lock() override {}
+ void unlock() override {}
+ SkGlyphDigest digestFor(skglyph::ActionType, SkPackedGlyphID) override;
+ bool prepareForImage(SkGlyph* glyph) override {
+ this->ensureScalerContext();
+ glyph->setImage(&fAlloc, fContext.get());
+ return glyph->image() != nullptr;
+ }
+ bool prepareForPath(SkGlyph* glyph) override {
+ this->ensureScalerContext();
+ glyph->setPath(&fAlloc, fContext.get());
+ return glyph->path() != nullptr;
+ }
+ bool prepareForDrawable(SkGlyph* glyph) override {
+ this->ensureScalerContext();
+ glyph->setDrawable(&fAlloc, fContext.get());
+ return glyph->drawable() != nullptr;
+ }
+
+ #if defined(SK_SUPPORT_LEGACY_STRIKE_SERIALIZATION)
+ void writePendingGlyphs(Serializer* serializer);
+ #else
+ void writePendingGlyphs(SkWriteBuffer& buffer);
+ #endif
+
+ SkDiscardableHandleId discardableHandleId() const { return fDiscardableHandleId; }
+
+ const SkDescriptor& getDescriptor() const override {
+ return *fDescriptor.getDesc();
+ }
+
+ void setStrikeSpec(const SkStrikeSpec& strikeSpec);
+
+ const SkGlyphPositionRoundingSpec& roundingSpec() const override {
+ return fRoundingSpec;
+ }
+
+ sktext::SkStrikePromise strikePromise() override;
+
+ bool hasPendingGlyphs() const {
+ return !fMasksToSend.empty() || !fPathsToSend.empty() || !fDrawablesToSend.empty();
+ }
+
+ void resetScalerContext();
+
+private:
+ #if defined(SK_SUPPORT_LEGACY_STRIKE_SERIALIZATION)
+ void writeGlyphPath(const SkGlyph& glyph, Serializer* serializer) const;
+ void writeGlyphDrawable(const SkGlyph& glyph, Serializer* serializer) const;
+ #endif
+
+ void ensureScalerContext();
+
+ const SkAutoDescriptor fDescriptor;
+ const SkDiscardableHandleId fDiscardableHandleId;
+
+ const SkGlyphPositionRoundingSpec fRoundingSpec;
+
+ // The context built using fDescriptor
+ std::unique_ptr<SkScalerContext> fContext;
+
+ // fStrikeSpec is set every time getOrCreateCache is called. This allows the code to maintain
+ // the fContext as lazy as possible.
+ const SkStrikeSpec* fStrikeSpec;
+
+ // Have the metrics been sent for this strike. Only send them once.
+ bool fHaveSentFontMetrics{false};
+
+ // The masks and paths that currently reside in the GPU process.
+ SkTHashTable<SkGlyphDigest, SkPackedGlyphID, SkGlyphDigest> fSentGlyphs;
+
+ // The Masks, SDFT Mask, and Paths that need to be sent to the GPU task for the processed
+ // TextBlobs. Cleared after diffs are serialized.
+ std::vector<SkGlyph> fMasksToSend;
+ std::vector<SkGlyph> fPathsToSend;
+ std::vector<SkGlyph> fDrawablesToSend;
+
+ // Alloc for storing bits and pieces of paths and drawables, Cleared after diffs are serialized.
+ SkArenaAllocWithReset fAlloc{256};
+};
+
+RemoteStrike::RemoteStrike(
+ const SkStrikeSpec& strikeSpec,
+ std::unique_ptr<SkScalerContext> context,
+ uint32_t discardableHandleId)
+ : fDescriptor{strikeSpec.descriptor()}
+ , fDiscardableHandleId(discardableHandleId)
+ , fRoundingSpec{context->isSubpixel(), context->computeAxisAlignmentForHText()}
+ // N.B. context must come last because it is used above.
+ , fContext{std::move(context)} {
+ SkASSERT(fDescriptor.getDesc() != nullptr);
+ SkASSERT(fContext != nullptr);
+}
+
+#if defined(SK_SUPPORT_LEGACY_STRIKE_SERIALIZATION)
+// No need to write fScalerContextBits because any needed image is already generated.
+void write_glyph(const SkGlyph& glyph, Serializer* serializer) {
+ serializer->write<SkPackedGlyphID>(glyph.getPackedID());
+ serializer->write<float>(glyph.advanceX());
+ serializer->write<float>(glyph.advanceY());
+ serializer->write<uint16_t>(glyph.width());
+ serializer->write<uint16_t>(glyph.height());
+ serializer->write<int16_t>(glyph.top());
+ serializer->write<int16_t>(glyph.left());
+ serializer->write<uint8_t>(glyph.maskFormat());
+}
+
+void RemoteStrike::writePendingGlyphs(Serializer* serializer) {
+ SkASSERT(this->hasPendingGlyphs());
+
+ // Write the desc.
+ serializer->emplace<StrikeSpec>(fContext->getTypeface()->uniqueID(), fDiscardableHandleId);
+ serializer->writeDescriptor(*fDescriptor.getDesc());
+
+ serializer->emplace<bool>(fHaveSentFontMetrics);
+ if (!fHaveSentFontMetrics) {
+ // Write FontMetrics if not sent before.
+ SkFontMetrics fontMetrics;
+ fContext->getFontMetrics(&fontMetrics);
+ serializer->write<SkFontMetrics>(fontMetrics);
+ fHaveSentFontMetrics = true;
+ }
+
+ // Write mask glyphs
+ serializer->emplace<uint64_t>(fMasksToSend.size());
+ for (SkGlyph& glyph : fMasksToSend) {
+ SkASSERT(SkMask::IsValidFormat(glyph.maskFormat()));
+
+ write_glyph(glyph, serializer);
+ auto imageSize = glyph.imageSize();
+ if (imageSize > 0 && SkGlyphDigest::FitsInAtlas(glyph)) {
+ glyph.setImage(serializer->allocate(imageSize, glyph.formatAlignment()));
+ fContext->getImage(glyph);
+ }
+ }
+ fMasksToSend.clear();
+
+ // Write glyphs paths.
+ serializer->emplace<uint64_t>(fPathsToSend.size());
+ for (SkGlyph& glyph : fPathsToSend) {
+ SkASSERT(SkMask::IsValidFormat(glyph.maskFormat()));
+
+ write_glyph(glyph, serializer);
+ this->writeGlyphPath(glyph, serializer);
+ }
+ fPathsToSend.clear();
+
+ // Write glyphs drawables.
+ serializer->emplace<uint64_t>(fDrawablesToSend.size());
+ for (SkGlyph& glyph : fDrawablesToSend) {
+ SkASSERT(SkMask::IsValidFormat(glyph.maskFormat()));
+
+ write_glyph(glyph, serializer);
+ writeGlyphDrawable(glyph, serializer);
+ }
+ fDrawablesToSend.clear();
+ fAlloc.reset();
+}
+#else
+void RemoteStrike::writePendingGlyphs(SkWriteBuffer& buffer) {
+ SkASSERT(this->hasPendingGlyphs());
+
+ buffer.writeUInt(fContext->getTypeface()->uniqueID());
+ buffer.writeUInt(fDiscardableHandleId);
+ fDescriptor.getDesc()->flatten(buffer);
+
+ buffer.writeBool(fHaveSentFontMetrics);
+ if (!fHaveSentFontMetrics) {
+ // Write FontMetrics if not sent before.
+ SkFontMetrics fontMetrics;
+ fContext->getFontMetrics(&fontMetrics);
+ SkFontMetricsPriv::Flatten(buffer, fontMetrics);
+ fHaveSentFontMetrics = true;
+ }
+
+ // Make sure to install all the mask data into the glyphs before sending.
+ for (SkGlyph& glyph: fMasksToSend) {
+ this->prepareForImage(&glyph);
+ }
+
+ // Make sure to install all the path data into the glyphs before sending.
+ for (SkGlyph& glyph: fPathsToSend) {
+ this->prepareForPath(&glyph);
+ }
+
+ // Make sure to install all the drawable data into the glyphs before sending.
+ for (SkGlyph& glyph: fDrawablesToSend) {
+ this->prepareForDrawable(&glyph);
+ }
+
+ // Send all the pending glyph information.
+ SkStrike::FlattenGlyphsByType(buffer, fMasksToSend, fPathsToSend, fDrawablesToSend);
+
+ // Reset all the sending data.
+ fMasksToSend.clear();
+ fPathsToSend.clear();
+ fDrawablesToSend.clear();
+ fAlloc.reset();
+}
+#endif
+
+void RemoteStrike::ensureScalerContext() {
+ if (fContext == nullptr) {
+ fContext = fStrikeSpec->createScalerContext();
+ }
+}
+
+void RemoteStrike::resetScalerContext() {
+ fContext = nullptr;
+ fStrikeSpec = nullptr;
+}
+
+void RemoteStrike::setStrikeSpec(const SkStrikeSpec& strikeSpec) {
+ fStrikeSpec = &strikeSpec;
+}
+
+#if defined(SK_SUPPORT_LEGACY_STRIKE_SERIALIZATION)
+void RemoteStrike::writeGlyphPath(const SkGlyph& glyph, Serializer* serializer) const {
+ if (glyph.isEmpty()) {
+ serializer->write<uint64_t>(0u);
+ return;
+ }
+
+ const SkPath* path = glyph.path();
+
+ if (path == nullptr) {
+ serializer->write<uint64_t>(0u);
+ return;
+ }
+
+ size_t pathSize = path->writeToMemory(nullptr);
+ serializer->write<uint64_t>(pathSize);
+ path->writeToMemory(serializer->allocate(pathSize, kPathAlignment));
+
+ serializer->write<bool>(glyph.pathIsHairline());
+}
+
+void RemoteStrike::writeGlyphDrawable(const SkGlyph& glyph, Serializer* serializer) const {
+ if (glyph.isEmpty()) {
+ serializer->write<uint64_t>(0u);
+ return;
+ }
+
+ SkDrawable* drawable = glyph.drawable();
+
+ if (drawable == nullptr) {
+ serializer->write<uint64_t>(0u);
+ return;
+ }
+
+ sk_sp<SkPicture> picture(drawable->newPictureSnapshot());
+ sk_sp<SkData> data = picture->serialize();
+ serializer->write<uint64_t>(data->size());
+ memcpy(serializer->allocate(data->size(), kDrawableAlignment), data->data(), data->size());
+}
+#endif
+
+SkGlyphDigest RemoteStrike::digestFor(ActionType actionType, SkPackedGlyphID packedGlyphID) {
+ SkGlyphDigest* digestPtr = fSentGlyphs.find(packedGlyphID);
+ if (digestPtr != nullptr && digestPtr->actionFor(actionType) != GlyphAction::kUnset) {
+ return *digestPtr;
+ }
+
+ SkGlyph* glyph;
+ this->ensureScalerContext();
+ switch (actionType) {
+ case kPath: {
+ fPathsToSend.emplace_back(fContext->makeGlyph(packedGlyphID, &fAlloc));
+ glyph = &fPathsToSend.back();
+ break;
+ }
+ case kDrawable: {
+ fDrawablesToSend.emplace_back(fContext->makeGlyph(packedGlyphID, &fAlloc));
+ glyph = &fDrawablesToSend.back();
+ break;
+ }
+ default: {
+ fMasksToSend.emplace_back(fContext->makeGlyph(packedGlyphID, &fAlloc));
+ glyph = &fMasksToSend.back();
+ break;
+ }
+ }
+
+ if (digestPtr == nullptr) {
+ digestPtr = fSentGlyphs.set(SkGlyphDigest{0, *glyph});
+ }
+
+ digestPtr->setActionFor(actionType, glyph, this);
+
+ return *digestPtr;
+}
+
+sktext::SkStrikePromise RemoteStrike::strikePromise() {
+ return sktext::SkStrikePromise{*this->fStrikeSpec};
+}
+} // namespace
+
+// -- SkStrikeServerImpl ---------------------------------------------------------------------------
+class SkStrikeServerImpl final : public sktext::StrikeForGPUCacheInterface {
+public:
+ explicit SkStrikeServerImpl(
+ SkStrikeServer::DiscardableHandleManager* discardableHandleManager);
+
+ // SkStrikeServer API methods
+ void writeStrikeData(std::vector<uint8_t>* memory);
+
+ sk_sp<sktext::StrikeForGPU> findOrCreateScopedStrike(const SkStrikeSpec& strikeSpec) override;
+
+ // Methods for testing
+ void setMaxEntriesInDescriptorMapForTesting(size_t count);
+ size_t remoteStrikeMapSizeForTesting() const;
+
+private:
+ inline static constexpr size_t kMaxEntriesInDescriptorMap = 2000u;
+
+ void checkForDeletedEntries();
+
+ sk_sp<RemoteStrike> getOrCreateCache(const SkStrikeSpec& strikeSpec);
+
+ struct MapOps {
+ size_t operator()(const SkDescriptor* key) const {
+ return key->getChecksum();
+ }
+ bool operator()(const SkDescriptor* lhs, const SkDescriptor* rhs) const {
+ return *lhs == *rhs;
+ }
+ };
+
+ using DescToRemoteStrike =
+ std::unordered_map<const SkDescriptor*, sk_sp<RemoteStrike>, MapOps, MapOps>;
+ DescToRemoteStrike fDescToRemoteStrike;
+
+ SkStrikeServer::DiscardableHandleManager* const fDiscardableHandleManager;
+ SkTHashSet<SkTypefaceID> fCachedTypefaces;
+ size_t fMaxEntriesInDescriptorMap = kMaxEntriesInDescriptorMap;
+
+ // State cached until the next serialization.
+ SkTHashSet<RemoteStrike*> fRemoteStrikesToSend;
+ std::vector<SkTypefaceProxyPrototype> fTypefacesToSend;
+};
+
+SkStrikeServerImpl::SkStrikeServerImpl(SkStrikeServer::DiscardableHandleManager* dhm)
+ : fDiscardableHandleManager(dhm) {
+ SkASSERT(fDiscardableHandleManager);
+}
+
+void SkStrikeServerImpl::setMaxEntriesInDescriptorMapForTesting(size_t count) {
+ fMaxEntriesInDescriptorMap = count;
+}
+size_t SkStrikeServerImpl::remoteStrikeMapSizeForTesting() const {
+ return fDescToRemoteStrike.size();
+}
+
+#if defined(SK_SUPPORT_LEGACY_STRIKE_SERIALIZATION)
+void SkStrikeServerImpl::writeStrikeData(std::vector<uint8_t>* memory) {
+ #if defined(SK_TRACE_GLYPH_RUN_PROCESS)
+ SkString msg;
+ msg.appendf("\nBegin send strike differences\n");
+ #endif
+
+ size_t strikesToSend = 0;
+ fRemoteStrikesToSend.foreach ([&](RemoteStrike* strike) {
+ if (strike->hasPendingGlyphs()) {
+ strikesToSend++;
+ } else {
+ strike->resetScalerContext();
+ }
+ });
+
+ if (strikesToSend == 0 && fTypefacesToSend.empty()) {
+ fRemoteStrikesToSend.reset();
+ return;
+ }
+
+ Serializer serializer(memory);
+ serializer.emplace<uint64_t>(fTypefacesToSend.size());
+ for (const auto& typefaceProto: fTypefacesToSend) {
+ // Temporary: use inside knowledge of SkBinaryWriteBuffer to set the size and alignment.
+ // This should agree with the alignment used in readStrikeData.
+ alignas(uint32_t) std::uint8_t bufferBytes[24];
+ SkBinaryWriteBuffer buffer{bufferBytes, std::size(bufferBytes)};
+ typefaceProto.flatten(buffer);
+ serializer.write<uint32_t>(buffer.bytesWritten());
+ void* dest = serializer.allocate(buffer.bytesWritten(), alignof(uint32_t));
+ buffer.writeToMemory(dest);
+ }
+ fTypefacesToSend.clear();
+
+ serializer.emplace<uint64_t>(SkTo<uint64_t>(strikesToSend));
+ fRemoteStrikesToSend.foreach (
+ [&](RemoteStrike* strike) {
+ if (strike->hasPendingGlyphs()) {
+ strike->writePendingGlyphs(&serializer);
+ strike->resetScalerContext();
+ }
+ #ifdef SK_DEBUG
+ auto it = fDescToRemoteStrike.find(&strike->getDescriptor());
+ SkASSERT(it != fDescToRemoteStrike.end());
+ SkASSERT(it->second.get() == strike);
+ #endif
+ #if defined(SK_TRACE_GLYPH_RUN_PROCESS)
+ msg.append(strike->getDescriptor().dumpRec());
+ #endif
+ }
+ );
+ fRemoteStrikesToSend.reset();
+ #if defined(SK_TRACE_GLYPH_RUN_PROCESS)
+ msg.appendf("End send strike differences");
+ SkDebugf("%s\n", msg.c_str());
+ #endif
+}
+#else
+void SkStrikeServerImpl::writeStrikeData(std::vector<uint8_t>* memory) {
+ SkBinaryWriteBuffer buffer{nullptr, 0};
+
+ // Gather statistics about what needs to be sent.
+ size_t strikesToSend = 0;
+ fRemoteStrikesToSend.foreach([&](RemoteStrike* strike) {
+ if (strike->hasPendingGlyphs()) {
+ strikesToSend++;
+ } else {
+ // This strike has nothing to send, so drop its scaler context to reduce memory.
+ strike->resetScalerContext();
+ }
+ });
+
+ // If there are no strikes or typefaces to send, then cleanup and return.
+ if (strikesToSend == 0 && fTypefacesToSend.empty()) {
+ fRemoteStrikesToSend.reset();
+ return;
+ }
+
+ // Send newly seen typefaces.
+ SkASSERT_RELEASE(SkTFitsIn<int>(fTypefacesToSend.size()));
+ buffer.writeInt(fTypefacesToSend.size());
+ for (const auto& typeface: fTypefacesToSend) {
+ SkTypefaceProxyPrototype proto{typeface};
+ proto.flatten(buffer);
+ }
+ fTypefacesToSend.clear();
+
+ buffer.writeInt(strikesToSend);
+ fRemoteStrikesToSend.foreach(
+ [&](RemoteStrike* strike) {
+ if (strike->hasPendingGlyphs()) {
+ strike->writePendingGlyphs(buffer);
+ strike->resetScalerContext();
+ }
+ }
+ );
+ fRemoteStrikesToSend.reset();
+
+ // Copy data into the vector.
+ auto data = buffer.snapshotAsData();
+ memory->assign(data->bytes(), data->bytes() + data->size());
+}
+#endif // defined(SK_SUPPORT_LEGACY_STRIKE_SERIALIZATION)
+
+sk_sp<StrikeForGPU> SkStrikeServerImpl::findOrCreateScopedStrike(
+ const SkStrikeSpec& strikeSpec) {
+ return this->getOrCreateCache(strikeSpec);
+}
+
+void SkStrikeServerImpl::checkForDeletedEntries() {
+ auto it = fDescToRemoteStrike.begin();
+ while (fDescToRemoteStrike.size() > fMaxEntriesInDescriptorMap &&
+ it != fDescToRemoteStrike.end()) {
+ RemoteStrike* strike = it->second.get();
+ if (fDiscardableHandleManager->isHandleDeleted(strike->discardableHandleId())) {
+ // If we are trying to send the strike, then do not erase it.
+ if (!fRemoteStrikesToSend.contains(strike)) {
+ // Erase returns the iterator following the removed element.
+ it = fDescToRemoteStrike.erase(it);
+ continue;
+ }
+ }
+ ++it;
+ }
+}
+
+sk_sp<RemoteStrike> SkStrikeServerImpl::getOrCreateCache(const SkStrikeSpec& strikeSpec) {
+ // In cases where tracing is turned off, make sure not to get an unused function warning.
+ // Lambdaize the function.
+ TRACE_EVENT1("skia", "RecForDesc", "rec",
+ TRACE_STR_COPY(
+ [&strikeSpec](){
+ auto ptr =
+ strikeSpec.descriptor().findEntry(kRec_SkDescriptorTag, nullptr);
+ SkScalerContextRec rec;
+ std::memcpy((void*)&rec, ptr, sizeof(rec));
+ return rec.dump();
+ }().c_str()
+ )
+ );
+
+ if (auto it = fDescToRemoteStrike.find(&strikeSpec.descriptor());
+ it != fDescToRemoteStrike.end())
+ {
+ // We have processed the RemoteStrike before. Reuse it.
+ sk_sp<RemoteStrike> strike = it->second;
+ strike->setStrikeSpec(strikeSpec);
+ if (fRemoteStrikesToSend.contains(strike.get())) {
+ // Already tracking
+ return strike;
+ }
+
+ // Strike is in unknown state on GPU. Start tracking strike on GPU by locking it.
+ bool locked = fDiscardableHandleManager->lockHandle(it->second->discardableHandleId());
+ if (locked) {
+ fRemoteStrikesToSend.add(strike.get());
+ return strike;
+ }
+
+ // If it wasn't locked, then forget this strike, and build it anew below.
+ fDescToRemoteStrike.erase(it);
+ }
+
+ const SkTypeface& typeface = strikeSpec.typeface();
+ // Create a new RemoteStrike. Start by processing the typeface.
+ const SkTypefaceID typefaceId = typeface.uniqueID();
+ if (!fCachedTypefaces.contains(typefaceId)) {
+ fCachedTypefaces.add(typefaceId);
+ fTypefacesToSend.emplace_back(typeface);
+ }
+
+ auto context = strikeSpec.createScalerContext();
+ auto newHandle = fDiscardableHandleManager->createHandle(); // Locked on creation
+ auto remoteStrike = sk_make_sp<RemoteStrike>(strikeSpec, std::move(context), newHandle);
+ remoteStrike->setStrikeSpec(strikeSpec);
+ fRemoteStrikesToSend.add(remoteStrike.get());
+ auto d = &remoteStrike->getDescriptor();
+ fDescToRemoteStrike[d] = remoteStrike;
+
+ checkForDeletedEntries();
+
+ return remoteStrike;
+}
+
+// -- GlyphTrackingDevice --------------------------------------------------------------------------
+#if defined(SK_GANESH)
+class GlyphTrackingDevice final : public SkNoPixelsDevice {
+public:
+ GlyphTrackingDevice(
+ const SkISize& dimensions, const SkSurfaceProps& props, SkStrikeServerImpl* server,
+ sk_sp<SkColorSpace> colorSpace, sktext::gpu::SDFTControl SDFTControl)
+ : SkNoPixelsDevice(SkIRect::MakeSize(dimensions), props, std::move(colorSpace))
+ , fStrikeServerImpl(server)
+ , fSDFTControl(SDFTControl) {
+ SkASSERT(fStrikeServerImpl != nullptr);
+ }
+
+ SkBaseDevice* onCreateDevice(const CreateInfo& cinfo, const SkPaint*) override {
+ const SkSurfaceProps surfaceProps(this->surfaceProps().flags(), cinfo.fPixelGeometry);
+ return new GlyphTrackingDevice(cinfo.fInfo.dimensions(), surfaceProps, fStrikeServerImpl,
+ cinfo.fInfo.refColorSpace(), fSDFTControl);
+ }
+
+ SkStrikeDeviceInfo strikeDeviceInfo() const override {
+ return {this->surfaceProps(), this->scalerContextFlags(), &fSDFTControl};
+ }
+
+protected:
+ void onDrawGlyphRunList(SkCanvas*,
+ const sktext::GlyphRunList& glyphRunList,
+ const SkPaint& initialPaint,
+ const SkPaint& drawingPaint) override {
+ SkMatrix drawMatrix = this->localToDevice();
+ drawMatrix.preTranslate(glyphRunList.origin().x(), glyphRunList.origin().y());
+
+ // Just ignore the resulting SubRunContainer. Since we're passing in a null SubRunAllocator
+ // no SubRuns will be produced.
+ STSubRunAllocator<sizeof(SubRunContainer), alignof(SubRunContainer)> tempAlloc;
+ auto container = SubRunContainer::MakeInAlloc(glyphRunList,
+ drawMatrix,
+ drawingPaint,
+ this->strikeDeviceInfo(),
+ fStrikeServerImpl,
+ &tempAlloc,
+ SubRunContainer::kStrikeCalculationsOnly,
+ "Cache Diff");
+ // Calculations only. No SubRuns.
+ SkASSERT(container->isEmpty());
+ }
+
+ sk_sp<sktext::gpu::Slug> convertGlyphRunListToSlug(const sktext::GlyphRunList& glyphRunList,
+ const SkPaint& initialPaint,
+ const SkPaint& drawingPaint) override {
+ // Full matrix for placing glyphs.
+ SkMatrix positionMatrix = this->localToDevice();
+ positionMatrix.preTranslate(glyphRunList.origin().x(), glyphRunList.origin().y());
+
+ // Use the SkStrikeServer's strike cache to generate the Slug.
+ return skgpu::ganesh::MakeSlug(this->localToDevice(),
+ glyphRunList,
+ initialPaint,
+ drawingPaint,
+ this->strikeDeviceInfo(),
+ fStrikeServerImpl);
+ }
+
+private:
+ SkStrikeServerImpl* const fStrikeServerImpl;
+ const sktext::gpu::SDFTControl fSDFTControl;
+};
+#endif // defined(SK_GANESH)
+
+// -- SkStrikeServer -------------------------------------------------------------------------------
+SkStrikeServer::SkStrikeServer(DiscardableHandleManager* dhm)
+ : fImpl(new SkStrikeServerImpl{dhm}) { }
+
+SkStrikeServer::~SkStrikeServer() = default;
+
+std::unique_ptr<SkCanvas> SkStrikeServer::makeAnalysisCanvas(int width, int height,
+ const SkSurfaceProps& props,
+ sk_sp<SkColorSpace> colorSpace,
+ bool DFTSupport,
+ bool DFTPerspSupport) {
+#if defined(SK_GANESH)
+ GrContextOptions ctxOptions;
+#if !defined(SK_DISABLE_SDF_TEXT)
+ auto control = sktext::gpu::SDFTControl{DFTSupport,
+ props.isUseDeviceIndependentFonts(),
+ DFTPerspSupport,
+ ctxOptions.fMinDistanceFieldFontSize,
+ ctxOptions.fGlyphsAsPathsFontSize};
+#else
+ auto control = sktext::gpu::SDFTControl{};
+#endif
+
+ sk_sp<SkBaseDevice> trackingDevice(new GlyphTrackingDevice(
+ SkISize::Make(width, height),
+ props, this->impl(),
+ std::move(colorSpace),
+ control));
+#else
+ sk_sp<SkBaseDevice> trackingDevice(new SkNoPixelsDevice(
+ SkIRect::MakeWH(width, height), props, std::move(colorSpace)));
+#endif
+ return std::make_unique<SkCanvas>(std::move(trackingDevice));
+}
+
+void SkStrikeServer::writeStrikeData(std::vector<uint8_t>* memory) {
+ fImpl->writeStrikeData(memory);
+}
+
+SkStrikeServerImpl* SkStrikeServer::impl() { return fImpl.get(); }
+
+void SkStrikeServer::setMaxEntriesInDescriptorMapForTesting(size_t count) {
+ fImpl->setMaxEntriesInDescriptorMapForTesting(count);
+}
+size_t SkStrikeServer::remoteStrikeMapSizeForTesting() const {
+ return fImpl->remoteStrikeMapSizeForTesting();
+}
+
+// -- DiscardableStrikePinner ----------------------------------------------------------------------
+class DiscardableStrikePinner : public SkStrikePinner {
+public:
+ DiscardableStrikePinner(SkDiscardableHandleId discardableHandleId,
+ sk_sp<SkStrikeClient::DiscardableHandleManager> manager)
+ : fDiscardableHandleId(discardableHandleId), fManager(std::move(manager)) {}
+
+ ~DiscardableStrikePinner() override = default;
+ bool canDelete() override { return fManager->deleteHandle(fDiscardableHandleId); }
+ void assertValid() override { fManager->assertHandleValid(fDiscardableHandleId); }
+
+private:
+ const SkDiscardableHandleId fDiscardableHandleId;
+ sk_sp<SkStrikeClient::DiscardableHandleManager> fManager;
+};
+
+// -- SkStrikeClientImpl ---------------------------------------------------------------------------
+class SkStrikeClientImpl {
+public:
+ explicit SkStrikeClientImpl(sk_sp<SkStrikeClient::DiscardableHandleManager>,
+ bool isLogging = true,
+ SkStrikeCache* strikeCache = nullptr);
+
+ bool readStrikeData(const volatile void* memory, size_t memorySize);
+ bool translateTypefaceID(SkAutoDescriptor* descriptor) const;
+ sk_sp<SkTypeface> retrieveTypefaceUsingServerID(SkTypefaceID) const;
+
+private:
+ class PictureBackedGlyphDrawable final : public SkDrawable {
+ public:
+ PictureBackedGlyphDrawable(sk_sp<SkPicture> self) : fSelf(std::move(self)) {}
+ private:
+ sk_sp<SkPicture> fSelf;
+ SkRect onGetBounds() override { return fSelf->cullRect(); }
+ size_t onApproximateBytesUsed() override {
+ return sizeof(PictureBackedGlyphDrawable) + fSelf->approximateBytesUsed();
+ }
+ void onDraw(SkCanvas* canvas) override { canvas->drawPicture(fSelf); }
+ };
+
+ #if defined(SK_SUPPORT_LEGACY_STRIKE_SERIALIZATION)
+ static bool ReadGlyph(SkTLazy<SkGlyph>& glyph, Deserializer* deserializer);
+ #endif
+
+ sk_sp<SkTypeface> addTypeface(const SkTypefaceProxyPrototype& typefaceProto);
+
+ SkTHashMap<SkTypefaceID, sk_sp<SkTypeface>> fServerTypefaceIdToTypeface;
+ sk_sp<SkStrikeClient::DiscardableHandleManager> fDiscardableHandleManager;
+ SkStrikeCache* const fStrikeCache;
+ const bool fIsLogging;
+};
+
+SkStrikeClientImpl::SkStrikeClientImpl(
+ sk_sp<SkStrikeClient::DiscardableHandleManager>
+ discardableManager,
+ bool isLogging,
+ SkStrikeCache* strikeCache)
+ : fDiscardableHandleManager(std::move(discardableManager)),
+ fStrikeCache{strikeCache ? strikeCache : SkStrikeCache::GlobalStrikeCache()},
+ fIsLogging{isLogging} {}
+
+#if defined(SK_SUPPORT_LEGACY_STRIKE_SERIALIZATION)
+// No need to write fScalerContextBits because any needed image is already generated.
+bool SkStrikeClientImpl::ReadGlyph(SkTLazy<SkGlyph>& glyph, Deserializer* deserializer) {
+ SkPackedGlyphID glyphID;
+ if (!deserializer->read<SkPackedGlyphID>(&glyphID)) return false;
+ glyph.init(glyphID);
+ if (!deserializer->read<float>(&glyph->fAdvanceX)) return false;
+ if (!deserializer->read<float>(&glyph->fAdvanceY)) return false;
+ if (!deserializer->read<uint16_t>(&glyph->fWidth)) return false;
+ if (!deserializer->read<uint16_t>(&glyph->fHeight)) return false;
+ if (!deserializer->read<int16_t>(&glyph->fTop)) return false;
+ if (!deserializer->read<int16_t>(&glyph->fLeft)) return false;
+ uint8_t maskFormat;
+ if (!deserializer->read<uint8_t>(&maskFormat)) return false;
+ if (!SkMask::IsValidFormat(maskFormat)) return false;
+ glyph->fMaskFormat = static_cast<SkMask::Format>(maskFormat);
+ SkDEBUGCODE(glyph->fAdvancesBoundsFormatAndInitialPathDone = true;)
+
+ return true;
+}
+#endif
+
+// Change the path count to track the line number of the failing read.
+// TODO: change __LINE__ back to glyphPathsCount when bug chromium:1287356 is closed.
+#define READ_FAILURE \
+ { \
+ SkDebugf("Bad font data serialization line: %d", __LINE__); \
+ SkStrikeClient::DiscardableHandleManager::ReadFailureData data = { \
+ memorySize, deserializer.bytesRead(), typefaceSize, \
+ strikeCount, glyphImagesCount, __LINE__}; \
+ fDiscardableHandleManager->notifyReadFailure(data); \
+ return false; \
+ }
+
+#if defined(SK_SUPPORT_LEGACY_STRIKE_SERIALIZATION)
+bool SkStrikeClientImpl::readStrikeData(const volatile void* memory, size_t memorySize) {
+ SkASSERT(memorySize != 0u);
+ Deserializer deserializer(static_cast<const volatile char*>(memory), memorySize);
+
+ uint64_t typefaceSize = 0;
+ uint64_t strikeCount = 0;
+ uint64_t glyphImagesCount = 0;
+ uint64_t glyphPathsCount = 0;
+ uint64_t glyphDrawablesCount = 0;
+
+ if (!deserializer.read<uint64_t>(&typefaceSize)) READ_FAILURE
+ for (size_t i = 0; i < typefaceSize; ++i) {
+ uint32_t typefaceSizeBytes;
+ // Read the size of the buffer generated at flatten time.
+ if (!deserializer.read<uint32_t>(&typefaceSizeBytes)) READ_FAILURE
+ // Temporary: use inside knowledge of SkReadBuffer to set the alignment.
+ // This should agree with the alignment used in writeStrikeData.
+ auto* bytes = deserializer.read(typefaceSizeBytes, alignof(uint32_t));
+ if (bytes == nullptr) READ_FAILURE
+ SkReadBuffer buffer(const_cast<const void*>(bytes), typefaceSizeBytes);
+ auto typefaceProto = SkTypefaceProxyPrototype::MakeFromBuffer(buffer);
+ if (!typefaceProto) READ_FAILURE
+
+ this->addTypeface(typefaceProto.value());
+ }
+
+ #if defined(SK_TRACE_GLYPH_RUN_PROCESS)
+ SkString msg;
+ msg.appendf("\nBegin receive strike differences\n");
+ #endif
+
+ if (!deserializer.read<uint64_t>(&strikeCount)) READ_FAILURE
+
+ for (size_t i = 0; i < strikeCount; ++i) {
+ StrikeSpec spec;
+ if (!deserializer.read<StrikeSpec>(&spec)) READ_FAILURE
+
+ SkAutoDescriptor ad;
+ if (!deserializer.readDescriptor(&ad)) READ_FAILURE
+ #if defined(SK_TRACE_GLYPH_RUN_PROCESS)
+ msg.appendf(" Received descriptor:\n%s", ad.getDesc()->dumpRec().c_str());
+ #endif
+
+ bool fontMetricsInitialized;
+ if (!deserializer.read(&fontMetricsInitialized)) READ_FAILURE
+
+ SkFontMetrics fontMetrics{};
+ if (!fontMetricsInitialized) {
+ if (!deserializer.read<SkFontMetrics>(&fontMetrics)) READ_FAILURE
+ }
+
+ // Preflight the TypefaceID before doing the Descriptor translation.
+ auto* tfPtr = fServerTypefaceIdToTypeface.find(spec.fTypefaceID);
+ // Received a TypefaceID for a typeface we don't know about.
+ if (!tfPtr) READ_FAILURE
+
+ // Replace the ContextRec in the desc from the server to create the client
+ // side descriptor.
+ if (!this->translateTypefaceID(&ad)) READ_FAILURE
+ SkDescriptor* clientDesc = ad.getDesc();
+
+ #if defined(SK_TRACE_GLYPH_RUN_PROCESS)
+ msg.appendf(" Mapped descriptor:\n%s", clientDesc->dumpRec().c_str());
+ #endif
+ auto strike = fStrikeCache->findStrike(*clientDesc);
+
+ // Make sure strike is pinned
+ if (strike) {
+ strike->verifyPinnedStrike();
+ }
+
+ // Metrics are only sent the first time. If the metrics are not initialized, there must
+ // be an existing strike.
+ if (fontMetricsInitialized && strike == nullptr) READ_FAILURE
+ if (strike == nullptr) {
+ // Note that we don't need to deserialize the effects since we won't be generating any
+ // glyphs here anyway, and the desc is still correct since it includes the serialized
+ // effects.
+ SkStrikeSpec strikeSpec{*clientDesc, *tfPtr};
+ strike = fStrikeCache->createStrike(
+ strikeSpec, &fontMetrics,
+ std::make_unique<DiscardableStrikePinner>(
+ spec.fDiscardableHandleId, fDiscardableHandleManager));
+ }
+
+ if (!deserializer.read<uint64_t>(&glyphImagesCount)) READ_FAILURE
+ for (size_t j = 0; j < glyphImagesCount; j++) {
+ SkTLazy<SkGlyph> glyph;
+ if (!ReadGlyph(glyph, &deserializer)) READ_FAILURE
+
+ if (!glyph->isEmpty() && SkGlyphDigest::FitsInAtlas(*glyph)) {
+ const volatile void* image =
+ deserializer.read(glyph->imageSize(), glyph->formatAlignment());
+ if (!image) READ_FAILURE
+ glyph->fImage = (void*)image;
+ }
+
+ strike->mergeGlyphAndImage(glyph->getPackedID(), *glyph);
+ }
+
+ if (!deserializer.read<uint64_t>(&glyphPathsCount)) READ_FAILURE
+ for (size_t j = 0; j < glyphPathsCount; j++) {
+ SkTLazy<SkGlyph> glyph;
+ if (!ReadGlyph(glyph, &deserializer)) READ_FAILURE
+
+ SkGlyph* allocatedGlyph = strike->mergeGlyphAndImage(glyph->getPackedID(), *glyph);
+
+ SkPath* pathPtr = nullptr;
+ SkPath path;
+ uint64_t pathSize = 0u;
+ bool hairline = false;
+ if (!deserializer.read<uint64_t>(&pathSize)) READ_FAILURE
+
+ if (pathSize > 0) {
+ auto* pathData = deserializer.read(pathSize, kPathAlignment);
+ if (!pathData) READ_FAILURE
+ if (!path.readFromMemory(const_cast<const void*>(pathData), pathSize)) READ_FAILURE
+ pathPtr = &path;
+ if (!deserializer.read<bool>(&hairline)) READ_FAILURE
+ }
+
+ strike->mergePath(allocatedGlyph, pathPtr, hairline);
+ }
+
+ if (!deserializer.read<uint64_t>(&glyphDrawablesCount)) READ_FAILURE
+ for (size_t j = 0; j < glyphDrawablesCount; j++) {
+ SkTLazy<SkGlyph> glyph;
+ if (!ReadGlyph(glyph, &deserializer)) READ_FAILURE
+
+ SkGlyph* allocatedGlyph = strike->mergeGlyphAndImage(glyph->getPackedID(), *glyph);
+
+ sk_sp<SkDrawable> drawable;
+ uint64_t drawableSize = 0u;
+ if (!deserializer.read<uint64_t>(&drawableSize)) READ_FAILURE
+
+ if (drawableSize > 0) {
+ auto* drawableData = deserializer.read(drawableSize, kDrawableAlignment);
+ if (!drawableData) READ_FAILURE
+ sk_sp<SkPicture> picture(SkPicture::MakeFromData(
+ const_cast<const void*>(drawableData), drawableSize));
+ if (!picture) READ_FAILURE
+
+ drawable = sk_make_sp<PictureBackedGlyphDrawable>(std::move(picture));
+ }
+
+ strike->mergeDrawable(allocatedGlyph, std::move(drawable));
+ }
+ }
+
+#if defined(SK_TRACE_GLYPH_RUN_PROCESS)
+ msg.appendf("End receive strike differences");
+ SkDebugf("%s\n", msg.c_str());
+#endif
+
+ return true;
+}
+#else
+bool SkStrikeClientImpl::readStrikeData(const volatile void* memory, size_t memorySize) {
+ SkASSERT(memorySize != 0);
+ SkASSERT(memory != nullptr);
+
+ SkReadBuffer buffer{const_cast<const void*>(memory), memorySize};
+
+ int curTypeface = 0,
+ curStrike = 0;
+
+ auto postError = [&](int line) {
+ SkDebugf("Read Error Posted %s : %d", __FILE__, line);
+ SkStrikeClient::DiscardableHandleManager::ReadFailureData data{
+ memorySize,
+ buffer.offset(),
+ SkTo<uint64_t>(curTypeface),
+ SkTo<uint64_t>(curStrike),
+ SkTo<uint64_t>(0),
+ SkTo<uint64_t>(0)};
+ fDiscardableHandleManager->notifyReadFailure(data);
+ };
+
+ // Read the number of typefaces sent.
+ const int typefaceCount = buffer.readInt();
+ for (curTypeface = 0; curTypeface < typefaceCount; ++curTypeface) {
+ auto proto = SkTypefaceProxyPrototype::MakeFromBuffer(buffer);
+ if (proto) {
+ this->addTypeface(proto.value());
+ } else {
+ postError(__LINE__);
+ return false;
+ }
+ }
+
+ // Read the number of strikes sent.
+ const int stirkeCount = buffer.readInt();
+ for (curStrike = 0; curStrike < stirkeCount; ++curStrike) {
+
+ const SkTypefaceID serverTypefaceID = buffer.readUInt();
+ if (serverTypefaceID == 0 && !buffer.isValid()) {
+ postError(__LINE__);
+ return false;
+ }
+
+ const SkDiscardableHandleId discardableHandleID = buffer.readUInt();
+ if (discardableHandleID == 0 && !buffer.isValid()) {
+ postError(__LINE__);
+ return false;
+ }
+
+ std::optional<SkAutoDescriptor> serverDescriptor = SkAutoDescriptor::MakeFromBuffer(buffer);
+ if (!buffer.validate(serverDescriptor.has_value())) {
+ postError(__LINE__);
+ return false;
+ }
+
+ const bool fontMetricsInitialized = buffer.readBool();
+ if (!fontMetricsInitialized && !buffer.isValid()) {
+ postError(__LINE__);
+ return false;
+ }
+
+ std::optional<SkFontMetrics> fontMetrics;
+ if (!fontMetricsInitialized) {
+ fontMetrics = SkFontMetricsPriv::MakeFromBuffer(buffer);
+ if (!fontMetrics || !buffer.isValid()) {
+ postError(__LINE__);
+ return false;
+ }
+ }
+
+ auto* clientTypeface = fServerTypefaceIdToTypeface.find(serverTypefaceID);
+ if (clientTypeface == nullptr) {
+ postError(__LINE__);
+ return false;
+ }
+
+ if (!this->translateTypefaceID(&serverDescriptor.value())) {
+ postError(__LINE__);
+ return false;
+ }
+
+ SkDescriptor* clientDescriptor = serverDescriptor->getDesc();
+ auto strike = fStrikeCache->findStrike(*clientDescriptor);
+
+ if (strike == nullptr) {
+ // Metrics are only sent the first time. If creating a new strike, then the metrics
+ // are not initialized.
+ if (fontMetricsInitialized) {
+ postError(__LINE__);
+ return false;
+ }
+ SkStrikeSpec strikeSpec{*clientDescriptor, *clientTypeface};
+ strike = fStrikeCache->createStrike(
+ strikeSpec, &fontMetrics.value(),
+ std::make_unique<DiscardableStrikePinner>(
+ discardableHandleID, fDiscardableHandleManager));
+ }
+
+ // Make sure this strike is pinned on the GPU side.
+ strike->verifyPinnedStrike();
+
+ if (!strike->mergeFromBuffer(buffer)) {
+ postError(__LINE__);
+ return false;
+ }
+ }
+
+ return true;
+}
+#endif // defined(SK_SUPPORT_LEGACY_STRIKE_SERIALIZATION)
+
+bool SkStrikeClientImpl::translateTypefaceID(SkAutoDescriptor* toChange) const {
+ SkDescriptor& descriptor = *toChange->getDesc();
+
+ // Rewrite the typefaceID in the rec.
+ {
+ uint32_t size;
+ // findEntry returns a const void*, remove the const in order to update in place.
+ void* ptr = const_cast<void *>(descriptor.findEntry(kRec_SkDescriptorTag, &size));
+ SkScalerContextRec rec;
+ std::memcpy((void*)&rec, ptr, size);
+ // Get the local typeface from remote typefaceID.
+ auto* tfPtr = fServerTypefaceIdToTypeface.find(rec.fTypefaceID);
+ // Received a strike for a typeface which doesn't exist.
+ if (!tfPtr) { return false; }
+ // Update the typeface id to work with the client side.
+ rec.fTypefaceID = tfPtr->get()->uniqueID();
+ std::memcpy(ptr, &rec, size);
+ }
+
+ descriptor.computeChecksum();
+
+ return true;
+}
+
+sk_sp<SkTypeface> SkStrikeClientImpl::retrieveTypefaceUsingServerID(SkTypefaceID typefaceID) const {
+ auto* tfPtr = fServerTypefaceIdToTypeface.find(typefaceID);
+ return tfPtr != nullptr ? *tfPtr : nullptr;
+}
+
+sk_sp<SkTypeface> SkStrikeClientImpl::addTypeface(const SkTypefaceProxyPrototype& typefaceProto) {
+ sk_sp<SkTypeface>* typeface =
+ fServerTypefaceIdToTypeface.find(typefaceProto.serverTypefaceID());
+
+ // We already have the typeface.
+ if (typeface != nullptr) {
+ return *typeface;
+ }
+
+ auto newTypeface = sk_make_sp<SkTypefaceProxy>(
+ typefaceProto, fDiscardableHandleManager, fIsLogging);
+ fServerTypefaceIdToTypeface.set(typefaceProto.serverTypefaceID(), newTypeface);
+ return std::move(newTypeface);
+}
+
+// SkStrikeClient ----------------------------------------------------------------------------------
+SkStrikeClient::SkStrikeClient(sk_sp<DiscardableHandleManager> discardableManager,
+ bool isLogging,
+ SkStrikeCache* strikeCache)
+ : fImpl{new SkStrikeClientImpl{std::move(discardableManager), isLogging, strikeCache}} {}
+
+SkStrikeClient::~SkStrikeClient() = default;
+
+bool SkStrikeClient::readStrikeData(const volatile void* memory, size_t memorySize) {
+ return fImpl->readStrikeData(memory, memorySize);
+}
+
+sk_sp<SkTypeface> SkStrikeClient::retrieveTypefaceUsingServerIDForTest(
+ SkTypefaceID typefaceID) const {
+ return fImpl->retrieveTypefaceUsingServerID(typefaceID);
+}
+
+bool SkStrikeClient::translateTypefaceID(SkAutoDescriptor* descriptor) const {
+ return fImpl->translateTypefaceID(descriptor);
+}
+
+#if defined(SK_GANESH)
+sk_sp<sktext::gpu::Slug> SkStrikeClient::deserializeSlugForTest(const void* data, size_t size) const {
+ return sktext::gpu::Slug::Deserialize(data, size, this);
+}
+#endif // defined(SK_GANESH)
diff --git a/gfx/skia/skia/src/core/SkClipStack.cpp b/gfx/skia/skia/src/core/SkClipStack.cpp
new file mode 100644
index 0000000000..2e26754e52
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkClipStack.cpp
@@ -0,0 +1,999 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkPath.h"
+#include "src/core/SkClipStack.h"
+#include "src/core/SkRectPriv.h"
+#include "src/shaders/SkShaderBase.h"
+
+#include <atomic>
+#include <new>
+
+SkClipStack::Element::Element(const Element& that) {
+ switch (that.getDeviceSpaceType()) {
+ case DeviceSpaceType::kEmpty:
+ fDeviceSpaceRRect.setEmpty();
+ fDeviceSpacePath.reset();
+ fShader.reset();
+ break;
+ case DeviceSpaceType::kRect: // Rect uses rrect
+ case DeviceSpaceType::kRRect:
+ fDeviceSpacePath.reset();
+ fShader.reset();
+ fDeviceSpaceRRect = that.fDeviceSpaceRRect;
+ break;
+ case DeviceSpaceType::kPath:
+ fShader.reset();
+ fDeviceSpacePath.set(that.getDeviceSpacePath());
+ break;
+ case DeviceSpaceType::kShader:
+ fDeviceSpacePath.reset();
+ fShader = that.fShader;
+ break;
+ }
+
+ fSaveCount = that.fSaveCount;
+ fOp = that.fOp;
+ fDeviceSpaceType = that.fDeviceSpaceType;
+ fDoAA = that.fDoAA;
+ fIsReplace = that.fIsReplace;
+ fFiniteBoundType = that.fFiniteBoundType;
+ fFiniteBound = that.fFiniteBound;
+ fIsIntersectionOfRects = that.fIsIntersectionOfRects;
+ fGenID = that.fGenID;
+}
+
+SkClipStack::Element::~Element() = default;
+
+bool SkClipStack::Element::operator== (const Element& element) const {
+ if (this == &element) {
+ return true;
+ }
+ if (fOp != element.fOp || fDeviceSpaceType != element.fDeviceSpaceType ||
+ fDoAA != element.fDoAA || fIsReplace != element.fIsReplace ||
+ fSaveCount != element.fSaveCount) {
+ return false;
+ }
+ switch (fDeviceSpaceType) {
+ case DeviceSpaceType::kShader:
+ return this->getShader() == element.getShader();
+ case DeviceSpaceType::kPath:
+ return this->getDeviceSpacePath() == element.getDeviceSpacePath();
+ case DeviceSpaceType::kRRect:
+ return fDeviceSpaceRRect == element.fDeviceSpaceRRect;
+ case DeviceSpaceType::kRect:
+ return this->getDeviceSpaceRect() == element.getDeviceSpaceRect();
+ case DeviceSpaceType::kEmpty:
+ return true;
+ default:
+ SkDEBUGFAIL("Unexpected type.");
+ return false;
+ }
+}
+
+const SkRect& SkClipStack::Element::getBounds() const {
+ static const SkRect kEmpty = {0, 0, 0, 0};
+ static const SkRect kInfinite = SkRectPriv::MakeLargeS32();
+ switch (fDeviceSpaceType) {
+ case DeviceSpaceType::kRect: // fallthrough
+ case DeviceSpaceType::kRRect:
+ return fDeviceSpaceRRect.getBounds();
+ case DeviceSpaceType::kPath:
+ return fDeviceSpacePath->getBounds();
+ case DeviceSpaceType::kShader:
+ // Shaders have infinite bounds since any pixel could have clipped or full coverage
+ // (which is different from wide-open, where every pixel has 1.0 coverage, or empty
+ // where every pixel has 0.0 coverage).
+ return kInfinite;
+ case DeviceSpaceType::kEmpty:
+ return kEmpty;
+ default:
+ SkDEBUGFAIL("Unexpected type.");
+ return kEmpty;
+ }
+}
+
+bool SkClipStack::Element::contains(const SkRect& rect) const {
+ switch (fDeviceSpaceType) {
+ case DeviceSpaceType::kRect:
+ return this->getDeviceSpaceRect().contains(rect);
+ case DeviceSpaceType::kRRect:
+ return fDeviceSpaceRRect.contains(rect);
+ case DeviceSpaceType::kPath:
+ return fDeviceSpacePath->conservativelyContainsRect(rect);
+ case DeviceSpaceType::kEmpty:
+ case DeviceSpaceType::kShader:
+ return false;
+ default:
+ SkDEBUGFAIL("Unexpected type.");
+ return false;
+ }
+}
+
+bool SkClipStack::Element::contains(const SkRRect& rrect) const {
+ switch (fDeviceSpaceType) {
+ case DeviceSpaceType::kRect:
+ return this->getDeviceSpaceRect().contains(rrect.getBounds());
+ case DeviceSpaceType::kRRect:
+ // We don't currently have a generalized rrect-rrect containment.
+ return fDeviceSpaceRRect.contains(rrect.getBounds()) || rrect == fDeviceSpaceRRect;
+ case DeviceSpaceType::kPath:
+ return fDeviceSpacePath->conservativelyContainsRect(rrect.getBounds());
+ case DeviceSpaceType::kEmpty:
+ case DeviceSpaceType::kShader:
+ return false;
+ default:
+ SkDEBUGFAIL("Unexpected type.");
+ return false;
+ }
+}
+
+void SkClipStack::Element::invertShapeFillType() {
+ switch (fDeviceSpaceType) {
+ case DeviceSpaceType::kRect:
+ fDeviceSpacePath.init();
+ fDeviceSpacePath->addRect(this->getDeviceSpaceRect());
+ fDeviceSpacePath->setFillType(SkPathFillType::kInverseEvenOdd);
+ fDeviceSpaceType = DeviceSpaceType::kPath;
+ break;
+ case DeviceSpaceType::kRRect:
+ fDeviceSpacePath.init();
+ fDeviceSpacePath->addRRect(fDeviceSpaceRRect);
+ fDeviceSpacePath->setFillType(SkPathFillType::kInverseEvenOdd);
+ fDeviceSpaceType = DeviceSpaceType::kPath;
+ break;
+ case DeviceSpaceType::kPath:
+ fDeviceSpacePath->toggleInverseFillType();
+ break;
+ case DeviceSpaceType::kShader:
+ fShader = as_SB(fShader)->makeInvertAlpha();
+ break;
+ case DeviceSpaceType::kEmpty:
+ // Should this set to an empty, inverse filled path?
+ break;
+ }
+}
+
+void SkClipStack::Element::initCommon(int saveCount, SkClipOp op, bool doAA) {
+ fSaveCount = saveCount;
+ fOp = op;
+ fDoAA = doAA;
+ fIsReplace = false;
+ // A default of inside-out and empty bounds means the bounds are effectively void as it
+ // indicates that nothing is known to be outside the clip.
+ fFiniteBoundType = kInsideOut_BoundsType;
+ fFiniteBound.setEmpty();
+ fIsIntersectionOfRects = false;
+ fGenID = kInvalidGenID;
+}
+
+void SkClipStack::Element::initRect(int saveCount, const SkRect& rect, const SkMatrix& m,
+ SkClipOp op, bool doAA) {
+ if (m.rectStaysRect()) {
+ SkRect devRect;
+ m.mapRect(&devRect, rect);
+ fDeviceSpaceRRect.setRect(devRect);
+ fDeviceSpaceType = DeviceSpaceType::kRect;
+ this->initCommon(saveCount, op, doAA);
+ return;
+ }
+ SkPath path;
+ path.addRect(rect);
+ path.setIsVolatile(true);
+ this->initAsPath(saveCount, path, m, op, doAA);
+}
+
+void SkClipStack::Element::initRRect(int saveCount, const SkRRect& rrect, const SkMatrix& m,
+ SkClipOp op, bool doAA) {
+ if (rrect.transform(m, &fDeviceSpaceRRect)) {
+ SkRRect::Type type = fDeviceSpaceRRect.getType();
+ if (SkRRect::kRect_Type == type || SkRRect::kEmpty_Type == type) {
+ fDeviceSpaceType = DeviceSpaceType::kRect;
+ } else {
+ fDeviceSpaceType = DeviceSpaceType::kRRect;
+ }
+ this->initCommon(saveCount, op, doAA);
+ return;
+ }
+ SkPath path;
+ path.addRRect(rrect);
+ path.setIsVolatile(true);
+ this->initAsPath(saveCount, path, m, op, doAA);
+}
+
+void SkClipStack::Element::initPath(int saveCount, const SkPath& path, const SkMatrix& m,
+ SkClipOp op, bool doAA) {
+ if (!path.isInverseFillType()) {
+ SkRect r;
+ if (path.isRect(&r)) {
+ this->initRect(saveCount, r, m, op, doAA);
+ return;
+ }
+ SkRect ovalRect;
+ if (path.isOval(&ovalRect)) {
+ SkRRect rrect;
+ rrect.setOval(ovalRect);
+ this->initRRect(saveCount, rrect, m, op, doAA);
+ return;
+ }
+ }
+ this->initAsPath(saveCount, path, m, op, doAA);
+}
+
+void SkClipStack::Element::initAsPath(int saveCount, const SkPath& path, const SkMatrix& m,
+ SkClipOp op, bool doAA) {
+ path.transform(m, fDeviceSpacePath.init());
+ fDeviceSpacePath->setIsVolatile(true);
+ fDeviceSpaceType = DeviceSpaceType::kPath;
+ this->initCommon(saveCount, op, doAA);
+}
+
+void SkClipStack::Element::initShader(int saveCount, sk_sp<SkShader> shader) {
+ SkASSERT(shader);
+ fDeviceSpaceType = DeviceSpaceType::kShader;
+ fShader = std::move(shader);
+ this->initCommon(saveCount, SkClipOp::kIntersect, false);
+}
+
+void SkClipStack::Element::initReplaceRect(int saveCount, const SkRect& rect, bool doAA) {
+ fDeviceSpaceRRect.setRect(rect);
+ fDeviceSpaceType = DeviceSpaceType::kRect;
+ this->initCommon(saveCount, SkClipOp::kIntersect, doAA);
+ fIsReplace = true;
+}
+
+void SkClipStack::Element::asDeviceSpacePath(SkPath* path) const {
+ switch (fDeviceSpaceType) {
+ case DeviceSpaceType::kEmpty:
+ path->reset();
+ break;
+ case DeviceSpaceType::kRect:
+ path->reset();
+ path->addRect(this->getDeviceSpaceRect());
+ break;
+ case DeviceSpaceType::kRRect:
+ path->reset();
+ path->addRRect(fDeviceSpaceRRect);
+ break;
+ case DeviceSpaceType::kPath:
+ *path = *fDeviceSpacePath;
+ break;
+ case DeviceSpaceType::kShader:
+ path->reset();
+ path->addRect(SkRectPriv::MakeLargeS32());
+ break;
+ }
+ path->setIsVolatile(true);
+}
+
+void SkClipStack::Element::setEmpty() {
+ fDeviceSpaceType = DeviceSpaceType::kEmpty;
+ fFiniteBound.setEmpty();
+ fFiniteBoundType = kNormal_BoundsType;
+ fIsIntersectionOfRects = false;
+ fDeviceSpaceRRect.setEmpty();
+ fDeviceSpacePath.reset();
+ fShader.reset();
+ fGenID = kEmptyGenID;
+ SkDEBUGCODE(this->checkEmpty();)
+}
+
+void SkClipStack::Element::checkEmpty() const {
+ SkASSERT(fFiniteBound.isEmpty());
+ SkASSERT(kNormal_BoundsType == fFiniteBoundType);
+ SkASSERT(!fIsIntersectionOfRects);
+ SkASSERT(kEmptyGenID == fGenID);
+ SkASSERT(fDeviceSpaceRRect.isEmpty());
+ SkASSERT(!fDeviceSpacePath.isValid());
+ SkASSERT(!fShader);
+}
+
+bool SkClipStack::Element::canBeIntersectedInPlace(int saveCount, SkClipOp op) const {
+ if (DeviceSpaceType::kEmpty == fDeviceSpaceType &&
+ (SkClipOp::kDifference == op || SkClipOp::kIntersect == op)) {
+ return true;
+ }
+ // Only clips within the same save/restore frame (as captured by
+ // the save count) can be merged
+ return fSaveCount == saveCount &&
+ SkClipOp::kIntersect == op &&
+ (SkClipOp::kIntersect == fOp || this->isReplaceOp());
+}
+
+bool SkClipStack::Element::rectRectIntersectAllowed(const SkRect& newR, bool newAA) const {
+ SkASSERT(DeviceSpaceType::kRect == fDeviceSpaceType);
+
+ if (fDoAA == newAA) {
+ // if the AA setting is the same there is no issue
+ return true;
+ }
+
+ if (!SkRect::Intersects(this->getDeviceSpaceRect(), newR)) {
+ // The calling code will correctly set the result to the empty clip
+ return true;
+ }
+
+ if (this->getDeviceSpaceRect().contains(newR)) {
+ // if the new rect carves out a portion of the old one there is no
+ // issue
+ return true;
+ }
+
+ // So either the two overlap in some complex manner or newR contains oldR.
+ // In the first, case the edges will require different AA. In the second,
+ // the AA setting that would be carried forward is incorrect (e.g., oldR
+ // is AA while newR is BW but since newR contains oldR, oldR will be
+ // drawn BW) since the new AA setting will predominate.
+ return false;
+}
+
+// a mirror of combineBoundsRevDiff
+void SkClipStack::Element::combineBoundsDiff(FillCombo combination, const SkRect& prevFinite) {
+ switch (combination) {
+ case kInvPrev_InvCur_FillCombo:
+ // In this case the only pixels that can remain set
+ // are inside the current clip rect since the extensions
+ // to infinity of both clips cancel out and whatever
+ // is outside of the current clip is removed
+ fFiniteBoundType = kNormal_BoundsType;
+ break;
+ case kInvPrev_Cur_FillCombo:
+ // In this case the current op is finite so the only pixels
+ // that aren't set are whatever isn't set in the previous
+ // clip and whatever this clip carves out
+ fFiniteBound.join(prevFinite);
+ fFiniteBoundType = kInsideOut_BoundsType;
+ break;
+ case kPrev_InvCur_FillCombo:
+ // In this case everything outside of this clip's bound
+ // is erased, so the only pixels that can remain set
+ // occur w/in the intersection of the two finite bounds
+ if (!fFiniteBound.intersect(prevFinite)) {
+ fFiniteBound.setEmpty();
+ fGenID = kEmptyGenID;
+ }
+ fFiniteBoundType = kNormal_BoundsType;
+ break;
+ case kPrev_Cur_FillCombo:
+ // The most conservative result bound is that of the
+ // prior clip. This could be wildly incorrect if the
+ // second clip either exactly matches the first clip
+ // (which should yield the empty set) or reduces the
+ // size of the prior bound (e.g., if the second clip
+ // exactly matched the bottom half of the prior clip).
+ // We ignore these two possibilities.
+ fFiniteBound = prevFinite;
+ break;
+ default:
+ SkDEBUGFAIL("SkClipStack::Element::combineBoundsDiff Invalid fill combination");
+ break;
+ }
+}
+
+// a mirror of combineBoundsUnion
+void SkClipStack::Element::combineBoundsIntersection(int combination, const SkRect& prevFinite) {
+
+ switch (combination) {
+ case kInvPrev_InvCur_FillCombo:
+ // The only pixels that aren't writable in this case
+ // occur in the union of the two finite bounds
+ fFiniteBound.join(prevFinite);
+ fFiniteBoundType = kInsideOut_BoundsType;
+ break;
+ case kInvPrev_Cur_FillCombo:
+ // In this case the only pixels that will remain writeable
+ // are within the current clip
+ break;
+ case kPrev_InvCur_FillCombo:
+ // In this case the only pixels that will remain writeable
+ // are with the previous clip
+ fFiniteBound = prevFinite;
+ fFiniteBoundType = kNormal_BoundsType;
+ break;
+ case kPrev_Cur_FillCombo:
+ if (!fFiniteBound.intersect(prevFinite)) {
+ this->setEmpty();
+ }
+ break;
+ default:
+ SkDEBUGFAIL("SkClipStack::Element::combineBoundsIntersection Invalid fill combination");
+ break;
+ }
+}
+
+void SkClipStack::Element::updateBoundAndGenID(const Element* prior) {
+ // We set this first here but we may overwrite it later if we determine that the clip is
+ // either wide-open or empty.
+ fGenID = GetNextGenID();
+
+ // First, optimistically update the current Element's bound information
+ // with the current clip's bound
+ fIsIntersectionOfRects = false;
+ switch (fDeviceSpaceType) {
+ case DeviceSpaceType::kRect:
+ fFiniteBound = this->getDeviceSpaceRect();
+ fFiniteBoundType = kNormal_BoundsType;
+
+ if (this->isReplaceOp() ||
+ (SkClipOp::kIntersect == fOp && nullptr == prior) ||
+ (SkClipOp::kIntersect == fOp && prior->fIsIntersectionOfRects &&
+ prior->rectRectIntersectAllowed(this->getDeviceSpaceRect(), fDoAA))) {
+ fIsIntersectionOfRects = true;
+ }
+ break;
+ case DeviceSpaceType::kRRect:
+ fFiniteBound = fDeviceSpaceRRect.getBounds();
+ fFiniteBoundType = kNormal_BoundsType;
+ break;
+ case DeviceSpaceType::kPath:
+ fFiniteBound = fDeviceSpacePath->getBounds();
+
+ if (fDeviceSpacePath->isInverseFillType()) {
+ fFiniteBoundType = kInsideOut_BoundsType;
+ } else {
+ fFiniteBoundType = kNormal_BoundsType;
+ }
+ break;
+ case DeviceSpaceType::kShader:
+ // A shader is infinite. We don't act as wide-open here (which is an empty bounds with
+ // the inside out type). This is because when the bounds is empty and inside-out, we
+ // know there's full coverage everywhere. With a shader, there's *unknown* coverage
+ // everywhere.
+ fFiniteBound = SkRectPriv::MakeLargeS32();
+ fFiniteBoundType = kNormal_BoundsType;
+ break;
+ case DeviceSpaceType::kEmpty:
+ SkDEBUGFAIL("We shouldn't get here with an empty element.");
+ break;
+ }
+
+ // Now determine the previous Element's bound information taking into
+ // account that there may be no previous clip
+ SkRect prevFinite;
+ SkClipStack::BoundsType prevType;
+
+ if (nullptr == prior) {
+ // no prior clip means the entire plane is writable
+ prevFinite.setEmpty(); // there are no pixels that cannot be drawn to
+ prevType = kInsideOut_BoundsType;
+ } else {
+ prevFinite = prior->fFiniteBound;
+ prevType = prior->fFiniteBoundType;
+ }
+
+ FillCombo combination = kPrev_Cur_FillCombo;
+ if (kInsideOut_BoundsType == fFiniteBoundType) {
+ combination = (FillCombo) (combination | 0x01);
+ }
+ if (kInsideOut_BoundsType == prevType) {
+ combination = (FillCombo) (combination | 0x02);
+ }
+
+ SkASSERT(kInvPrev_InvCur_FillCombo == combination ||
+ kInvPrev_Cur_FillCombo == combination ||
+ kPrev_InvCur_FillCombo == combination ||
+ kPrev_Cur_FillCombo == combination);
+
+ // Now integrate with clip with the prior clips
+ if (!this->isReplaceOp()) {
+ switch (fOp) {
+ case SkClipOp::kDifference:
+ this->combineBoundsDiff(combination, prevFinite);
+ break;
+ case SkClipOp::kIntersect:
+ this->combineBoundsIntersection(combination, prevFinite);
+ break;
+ default:
+ SkDebugf("SkClipOp error\n");
+ SkASSERT(0);
+ break;
+ }
+ } // else Replace just ignores everything prior and should already have filled in bounds.
+}
+
+// This constant determines how many Element's are allocated together as a block in
+// the deque. As such it needs to balance allocating too much memory vs.
+// incurring allocation/deallocation thrashing. It should roughly correspond to
+// the deepest save/restore stack we expect to see.
+static const int kDefaultElementAllocCnt = 8;
+
+SkClipStack::SkClipStack()
+ : fDeque(sizeof(Element), kDefaultElementAllocCnt)
+ , fSaveCount(0) {
+}
+
+SkClipStack::SkClipStack(void* storage, size_t size)
+ : fDeque(sizeof(Element), storage, size, kDefaultElementAllocCnt)
+ , fSaveCount(0) {
+}
+
+SkClipStack::SkClipStack(const SkClipStack& b)
+ : fDeque(sizeof(Element), kDefaultElementAllocCnt) {
+ *this = b;
+}
+
+SkClipStack::~SkClipStack() {
+ reset();
+}
+
+SkClipStack& SkClipStack::operator=(const SkClipStack& b) {
+ if (this == &b) {
+ return *this;
+ }
+ reset();
+
+ fSaveCount = b.fSaveCount;
+ SkDeque::F2BIter recIter(b.fDeque);
+ for (const Element* element = (const Element*)recIter.next();
+ element != nullptr;
+ element = (const Element*)recIter.next()) {
+ new (fDeque.push_back()) Element(*element);
+ }
+
+ return *this;
+}
+
+bool SkClipStack::operator==(const SkClipStack& b) const {
+ if (this->getTopmostGenID() == b.getTopmostGenID()) {
+ return true;
+ }
+ if (fSaveCount != b.fSaveCount ||
+ fDeque.count() != b.fDeque.count()) {
+ return false;
+ }
+ SkDeque::F2BIter myIter(fDeque);
+ SkDeque::F2BIter bIter(b.fDeque);
+ const Element* myElement = (const Element*)myIter.next();
+ const Element* bElement = (const Element*)bIter.next();
+
+ while (myElement != nullptr && bElement != nullptr) {
+ if (*myElement != *bElement) {
+ return false;
+ }
+ myElement = (const Element*)myIter.next();
+ bElement = (const Element*)bIter.next();
+ }
+ return myElement == nullptr && bElement == nullptr;
+}
+
+void SkClipStack::reset() {
+ // We used a placement new for each object in fDeque, so we're responsible
+ // for calling the destructor on each of them as well.
+ while (!fDeque.empty()) {
+ Element* element = (Element*)fDeque.back();
+ element->~Element();
+ fDeque.pop_back();
+ }
+
+ fSaveCount = 0;
+}
+
+void SkClipStack::save() {
+ fSaveCount += 1;
+}
+
+void SkClipStack::restore() {
+ fSaveCount -= 1;
+ restoreTo(fSaveCount);
+}
+
+void SkClipStack::restoreTo(int saveCount) {
+ while (!fDeque.empty()) {
+ Element* element = (Element*)fDeque.back();
+ if (element->fSaveCount <= saveCount) {
+ break;
+ }
+ element->~Element();
+ fDeque.pop_back();
+ }
+}
+
+SkRect SkClipStack::bounds(const SkIRect& deviceBounds) const {
+ // TODO: optimize this.
+ SkRect r;
+ SkClipStack::BoundsType bounds;
+ this->getBounds(&r, &bounds);
+ if (bounds == SkClipStack::kInsideOut_BoundsType) {
+ return SkRect::Make(deviceBounds);
+ }
+ return r.intersect(SkRect::Make(deviceBounds)) ? r : SkRect::MakeEmpty();
+}
+
+// TODO: optimize this.
+bool SkClipStack::isEmpty(const SkIRect& r) const { return this->bounds(r).isEmpty(); }
+
+void SkClipStack::getBounds(SkRect* canvFiniteBound,
+ BoundsType* boundType,
+ bool* isIntersectionOfRects) const {
+ SkASSERT(canvFiniteBound && boundType);
+
+ Element* element = (Element*)fDeque.back();
+
+ if (nullptr == element) {
+ // the clip is wide open - the infinite plane w/ no pixels un-writeable
+ canvFiniteBound->setEmpty();
+ *boundType = kInsideOut_BoundsType;
+ if (isIntersectionOfRects) {
+ *isIntersectionOfRects = false;
+ }
+ return;
+ }
+
+ *canvFiniteBound = element->fFiniteBound;
+ *boundType = element->fFiniteBoundType;
+ if (isIntersectionOfRects) {
+ *isIntersectionOfRects = element->fIsIntersectionOfRects;
+ }
+}
+
+bool SkClipStack::internalQuickContains(const SkRect& rect) const {
+ Iter iter(*this, Iter::kTop_IterStart);
+ const Element* element = iter.prev();
+ while (element != nullptr) {
+ // TODO: Once expanding ops are removed, this condition is equiv. to op == kDifference.
+ if (SkClipOp::kIntersect != element->getOp() && !element->isReplaceOp()) {
+ return false;
+ }
+ if (element->isInverseFilled()) {
+ // Part of 'rect' could be trimmed off by the inverse-filled clip element
+ if (SkRect::Intersects(element->getBounds(), rect)) {
+ return false;
+ }
+ } else {
+ if (!element->contains(rect)) {
+ return false;
+ }
+ }
+ if (element->isReplaceOp()) {
+ break;
+ }
+ element = iter.prev();
+ }
+ return true;
+}
+
+bool SkClipStack::internalQuickContains(const SkRRect& rrect) const {
+ Iter iter(*this, Iter::kTop_IterStart);
+ const Element* element = iter.prev();
+ while (element != nullptr) {
+ // TODO: Once expanding ops are removed, this condition is equiv. to op == kDifference.
+ if (SkClipOp::kIntersect != element->getOp() && !element->isReplaceOp()) {
+ return false;
+ }
+ if (element->isInverseFilled()) {
+ // Part of 'rrect' could be trimmed off by the inverse-filled clip element
+ if (SkRect::Intersects(element->getBounds(), rrect.getBounds())) {
+ return false;
+ }
+ } else {
+ if (!element->contains(rrect)) {
+ return false;
+ }
+ }
+ if (element->isReplaceOp()) {
+ break;
+ }
+ element = iter.prev();
+ }
+ return true;
+}
+
+void SkClipStack::pushElement(const Element& element) {
+ // Use reverse iterator instead of back because Rect path may need previous
+ SkDeque::Iter iter(fDeque, SkDeque::Iter::kBack_IterStart);
+ Element* prior = (Element*) iter.prev();
+
+ if (prior) {
+ if (element.isReplaceOp()) {
+ this->restoreTo(fSaveCount - 1);
+ prior = (Element*) fDeque.back();
+ } else if (prior->canBeIntersectedInPlace(fSaveCount, element.getOp())) {
+ switch (prior->fDeviceSpaceType) {
+ case Element::DeviceSpaceType::kEmpty:
+ SkDEBUGCODE(prior->checkEmpty();)
+ return;
+ case Element::DeviceSpaceType::kShader:
+ if (Element::DeviceSpaceType::kShader == element.getDeviceSpaceType()) {
+ prior->fShader = SkShaders::Blend(SkBlendMode::kSrcIn,
+ element.fShader, prior->fShader);
+ Element* priorPrior = (Element*) iter.prev();
+ prior->updateBoundAndGenID(priorPrior);
+ return;
+ }
+ break;
+ case Element::DeviceSpaceType::kRect:
+ if (Element::DeviceSpaceType::kRect == element.getDeviceSpaceType()) {
+ if (prior->rectRectIntersectAllowed(element.getDeviceSpaceRect(),
+ element.isAA())) {
+ SkRect isectRect;
+ if (!isectRect.intersect(prior->getDeviceSpaceRect(),
+ element.getDeviceSpaceRect())) {
+ prior->setEmpty();
+ return;
+ }
+
+ prior->fDeviceSpaceRRect.setRect(isectRect);
+ prior->fDoAA = element.isAA();
+ Element* priorPrior = (Element*) iter.prev();
+ prior->updateBoundAndGenID(priorPrior);
+ return;
+ }
+ break;
+ }
+ [[fallthrough]];
+ default:
+ if (!SkRect::Intersects(prior->getBounds(), element.getBounds())) {
+ prior->setEmpty();
+ return;
+ }
+ break;
+ }
+ }
+ }
+ Element* newElement = new (fDeque.push_back()) Element(element);
+ newElement->updateBoundAndGenID(prior);
+}
+
+void SkClipStack::clipRRect(const SkRRect& rrect, const SkMatrix& matrix, SkClipOp op, bool doAA) {
+ Element element(fSaveCount, rrect, matrix, op, doAA);
+ this->pushElement(element);
+}
+
+void SkClipStack::clipRect(const SkRect& rect, const SkMatrix& matrix, SkClipOp op, bool doAA) {
+ Element element(fSaveCount, rect, matrix, op, doAA);
+ this->pushElement(element);
+}
+
+void SkClipStack::clipPath(const SkPath& path, const SkMatrix& matrix, SkClipOp op,
+ bool doAA) {
+ Element element(fSaveCount, path, matrix, op, doAA);
+ this->pushElement(element);
+}
+
+void SkClipStack::clipShader(sk_sp<SkShader> shader) {
+ Element element(fSaveCount, std::move(shader));
+ this->pushElement(element);
+}
+
+void SkClipStack::replaceClip(const SkRect& rect, bool doAA) {
+ Element element(fSaveCount, rect, doAA);
+ this->pushElement(element);
+}
+
+void SkClipStack::clipEmpty() {
+ Element* element = (Element*) fDeque.back();
+
+ if (element && element->canBeIntersectedInPlace(fSaveCount, SkClipOp::kIntersect)) {
+ element->setEmpty();
+ }
+ new (fDeque.push_back()) Element(fSaveCount);
+
+ ((Element*)fDeque.back())->fGenID = kEmptyGenID;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkClipStack::Iter::Iter() : fStack(nullptr) {
+}
+
+SkClipStack::Iter::Iter(const SkClipStack& stack, IterStart startLoc)
+ : fStack(&stack) {
+ this->reset(stack, startLoc);
+}
+
+const SkClipStack::Element* SkClipStack::Iter::next() {
+ return (const SkClipStack::Element*)fIter.next();
+}
+
+const SkClipStack::Element* SkClipStack::Iter::prev() {
+ return (const SkClipStack::Element*)fIter.prev();
+}
+
+const SkClipStack::Element* SkClipStack::Iter::skipToTopmost(SkClipOp op) {
+ if (nullptr == fStack) {
+ return nullptr;
+ }
+
+ fIter.reset(fStack->fDeque, SkDeque::Iter::kBack_IterStart);
+
+ const SkClipStack::Element* element = nullptr;
+
+ for (element = (const SkClipStack::Element*) fIter.prev();
+ element;
+ element = (const SkClipStack::Element*) fIter.prev()) {
+
+ if (op == element->fOp) {
+ // The Deque's iterator is actually one pace ahead of the
+ // returned value. So while "element" is the element we want to
+ // return, the iterator is actually pointing at (and will
+ // return on the next "next" or "prev" call) the element
+ // in front of it in the deque. Bump the iterator forward a
+ // step so we get the expected result.
+ if (nullptr == fIter.next()) {
+ // The reverse iterator has run off the front of the deque
+ // (i.e., the "op" clip is the first clip) and can't
+ // recover. Reset the iterator to start at the front.
+ fIter.reset(fStack->fDeque, SkDeque::Iter::kFront_IterStart);
+ }
+ break;
+ }
+ }
+
+ if (nullptr == element) {
+ // There were no "op" clips
+ fIter.reset(fStack->fDeque, SkDeque::Iter::kFront_IterStart);
+ }
+
+ return this->next();
+}
+
+void SkClipStack::Iter::reset(const SkClipStack& stack, IterStart startLoc) {
+ fStack = &stack;
+ fIter.reset(stack.fDeque, static_cast<SkDeque::Iter::IterStart>(startLoc));
+}
+
+// helper method
+void SkClipStack::getConservativeBounds(int offsetX,
+ int offsetY,
+ int maxWidth,
+ int maxHeight,
+ SkRect* devBounds,
+ bool* isIntersectionOfRects) const {
+ SkASSERT(devBounds);
+
+ devBounds->setLTRB(0, 0,
+ SkIntToScalar(maxWidth), SkIntToScalar(maxHeight));
+
+ SkRect temp;
+ SkClipStack::BoundsType boundType;
+
+ // temp starts off in canvas space here
+ this->getBounds(&temp, &boundType, isIntersectionOfRects);
+ if (SkClipStack::kInsideOut_BoundsType == boundType) {
+ return;
+ }
+
+ // but is converted to device space here
+ temp.offset(SkIntToScalar(offsetX), SkIntToScalar(offsetY));
+
+ if (!devBounds->intersect(temp)) {
+ devBounds->setEmpty();
+ }
+}
+
+bool SkClipStack::isRRect(const SkRect& bounds, SkRRect* rrect, bool* aa) const {
+ const Element* back = static_cast<const Element*>(fDeque.back());
+ if (!back) {
+ // TODO: return bounds?
+ return false;
+ }
+ // First check if the entire stack is known to be a rect by the top element.
+ if (back->fIsIntersectionOfRects && back->fFiniteBoundType == BoundsType::kNormal_BoundsType) {
+ rrect->setRect(back->fFiniteBound);
+ *aa = back->isAA();
+ return true;
+ }
+
+ if (back->getDeviceSpaceType() != SkClipStack::Element::DeviceSpaceType::kRect &&
+ back->getDeviceSpaceType() != SkClipStack::Element::DeviceSpaceType::kRRect) {
+ return false;
+ }
+ if (back->isReplaceOp()) {
+ *rrect = back->asDeviceSpaceRRect();
+ *aa = back->isAA();
+ return true;
+ }
+
+ if (back->getOp() == SkClipOp::kIntersect) {
+ SkRect backBounds;
+ if (!backBounds.intersect(bounds, back->asDeviceSpaceRRect().rect())) {
+ return false;
+ }
+ // We limit to 17 elements. This means the back element will be bounds checked at most 16
+ // times if it is an rrect.
+ int cnt = fDeque.count();
+ if (cnt > 17) {
+ return false;
+ }
+ if (cnt > 1) {
+ SkDeque::Iter iter(fDeque, SkDeque::Iter::kBack_IterStart);
+ SkAssertResult(static_cast<const Element*>(iter.prev()) == back);
+ while (const Element* prior = (const Element*)iter.prev()) {
+ // TODO: Once expanding clip ops are removed, this is equiv. to op == kDifference
+ if ((prior->getOp() != SkClipOp::kIntersect && !prior->isReplaceOp()) ||
+ !prior->contains(backBounds)) {
+ return false;
+ }
+ if (prior->isReplaceOp()) {
+ break;
+ }
+ }
+ }
+ *rrect = back->asDeviceSpaceRRect();
+ *aa = back->isAA();
+ return true;
+ }
+ return false;
+}
+
+uint32_t SkClipStack::GetNextGenID() {
+ // 0-2 are reserved for invalid, empty & wide-open
+ static const uint32_t kFirstUnreservedGenID = 3;
+ static std::atomic<uint32_t> nextID{kFirstUnreservedGenID};
+
+ uint32_t id;
+ do {
+ id = nextID.fetch_add(1, std::memory_order_relaxed);
+ } while (id < kFirstUnreservedGenID);
+ return id;
+}
+
+uint32_t SkClipStack::getTopmostGenID() const {
+ if (fDeque.empty()) {
+ return kWideOpenGenID;
+ }
+
+ const Element* back = static_cast<const Element*>(fDeque.back());
+ if (kInsideOut_BoundsType == back->fFiniteBoundType && back->fFiniteBound.isEmpty() &&
+ Element::DeviceSpaceType::kShader != back->fDeviceSpaceType) {
+ return kWideOpenGenID;
+ }
+
+ return back->getGenID();
+}
+
+#ifdef SK_DEBUG
+void SkClipStack::Element::dump() const {
+ static const char* kTypeStrings[] = {
+ "empty",
+ "rect",
+ "rrect",
+ "path",
+ "shader"
+ };
+ static_assert(0 == static_cast<int>(DeviceSpaceType::kEmpty), "enum mismatch");
+ static_assert(1 == static_cast<int>(DeviceSpaceType::kRect), "enum mismatch");
+ static_assert(2 == static_cast<int>(DeviceSpaceType::kRRect), "enum mismatch");
+ static_assert(3 == static_cast<int>(DeviceSpaceType::kPath), "enum mismatch");
+ static_assert(4 == static_cast<int>(DeviceSpaceType::kShader), "enum mismatch");
+ static_assert(std::size(kTypeStrings) == kTypeCnt, "enum mismatch");
+
+ const char* opName = this->isReplaceOp() ? "replace" :
+ (fOp == SkClipOp::kDifference ? "difference" : "intersect");
+ SkDebugf("Type: %s, Op: %s, AA: %s, Save Count: %d\n", kTypeStrings[(int)fDeviceSpaceType],
+ opName, (fDoAA ? "yes" : "no"), fSaveCount);
+ switch (fDeviceSpaceType) {
+ case DeviceSpaceType::kEmpty:
+ SkDebugf("\n");
+ break;
+ case DeviceSpaceType::kRect:
+ this->getDeviceSpaceRect().dump();
+ SkDebugf("\n");
+ break;
+ case DeviceSpaceType::kRRect:
+ this->getDeviceSpaceRRect().dump();
+ SkDebugf("\n");
+ break;
+ case DeviceSpaceType::kPath:
+ this->getDeviceSpacePath().dump(nullptr, false);
+ break;
+ case DeviceSpaceType::kShader:
+ // SkShaders don't provide much introspection that's worth while.
+ break;
+ }
+}
+
+void SkClipStack::dump() const {
+ B2TIter iter(*this);
+ const Element* e;
+ while ((e = iter.next())) {
+ e->dump();
+ SkDebugf("\n");
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkClipStack.h b/gfx/skia/skia/src/core/SkClipStack.h
new file mode 100644
index 0000000000..64c086b352
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkClipStack.h
@@ -0,0 +1,507 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkClipStack_DEFINED
+#define SkClipStack_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRegion.h"
+#include "include/core/SkShader.h"
+#include "include/private/base/SkDeque.h"
+#include "src/base/SkTLazy.h"
+#include "src/core/SkMessageBus.h"
+
+// Because a single save/restore state can have multiple clips, this class
+// stores the stack depth (fSaveCount) and clips (fDeque) separately.
+// Each clip in fDeque stores the stack state to which it belongs
+// (i.e., the fSaveCount in force when it was added). Restores are thus
+// implemented by removing clips from fDeque that have an fSaveCount larger
+// then the freshly decremented count.
+class SkClipStack {
+public:
+ enum BoundsType {
+ // The bounding box contains all the pixels that can be written to
+ kNormal_BoundsType,
+ // The bounding box contains all the pixels that cannot be written to.
+ // The real bound extends out to infinity and all the pixels outside
+ // of the bound can be written to. Note that some of the pixels inside
+ // the bound may also be writeable but all pixels that cannot be
+ // written to are guaranteed to be inside.
+ kInsideOut_BoundsType
+ };
+
+ /**
+ * An element of the clip stack. It represents a shape combined with the prevoius clip using a
+ * set operator. Each element can be antialiased or not.
+ */
+ class Element {
+ public:
+ /** This indicates the shape type of the clip element in device space. */
+ enum class DeviceSpaceType {
+ //!< This element makes the clip empty (regardless of previous elements).
+ kEmpty,
+ //!< This element combines a device space rect with the current clip.
+ kRect,
+ //!< This element combines a device space round-rect with the current clip.
+ kRRect,
+ //!< This element combines a device space path with the current clip.
+ kPath,
+ //!< This element does not have geometry, but applies a shader to the clip
+ kShader,
+
+ kLastType = kShader
+ };
+ static const int kTypeCnt = (int)DeviceSpaceType::kLastType + 1;
+
+ Element() {
+ this->initCommon(0, SkClipOp::kIntersect, false);
+ this->setEmpty();
+ }
+
+ Element(const Element&);
+
+ Element(const SkRect& rect, const SkMatrix& m, SkClipOp op, bool doAA) {
+ this->initRect(0, rect, m, op, doAA);
+ }
+
+ Element(const SkRRect& rrect, const SkMatrix& m, SkClipOp op, bool doAA) {
+ this->initRRect(0, rrect, m, op, doAA);
+ }
+
+ Element(const SkPath& path, const SkMatrix& m, SkClipOp op, bool doAA) {
+ this->initPath(0, path, m, op, doAA);
+ }
+
+ Element(sk_sp<SkShader> shader) {
+ this->initShader(0, std::move(shader));
+ }
+
+ Element(const SkRect& rect, bool doAA) {
+ this->initReplaceRect(0, rect, doAA);
+ }
+
+ ~Element();
+
+ bool operator== (const Element& element) const;
+ bool operator!= (const Element& element) const { return !(*this == element); }
+
+ //!< Call to get the type of the clip element.
+ DeviceSpaceType getDeviceSpaceType() const { return fDeviceSpaceType; }
+
+ //!< Call to get the save count associated with this clip element.
+ int getSaveCount() const { return fSaveCount; }
+
+ //!< Call if getDeviceSpaceType() is kPath to get the path.
+ const SkPath& getDeviceSpacePath() const {
+ SkASSERT(DeviceSpaceType::kPath == fDeviceSpaceType);
+ return *fDeviceSpacePath;
+ }
+
+ //!< Call if getDeviceSpaceType() is kRRect to get the round-rect.
+ const SkRRect& getDeviceSpaceRRect() const {
+ SkASSERT(DeviceSpaceType::kRRect == fDeviceSpaceType);
+ return fDeviceSpaceRRect;
+ }
+
+ //!< Call if getDeviceSpaceType() is kRect to get the rect.
+ const SkRect& getDeviceSpaceRect() const {
+ SkASSERT(DeviceSpaceType::kRect == fDeviceSpaceType &&
+ (fDeviceSpaceRRect.isRect() || fDeviceSpaceRRect.isEmpty()));
+ return fDeviceSpaceRRect.getBounds();
+ }
+
+ //!<Call if getDeviceSpaceType() is kShader to get a reference to the clip shader.
+ sk_sp<SkShader> refShader() const {
+ return fShader;
+ }
+ const SkShader* getShader() const {
+ return fShader.get();
+ }
+
+ //!< Call if getDeviceSpaceType() is not kEmpty to get the set operation used to combine
+ //!< this element.
+ SkClipOp getOp() const { return fOp; }
+ // Augments getOps()'s behavior by requiring a clip reset before the op is applied.
+ bool isReplaceOp() const { return fIsReplace; }
+
+ //!< Call to get the element as a path, regardless of its type.
+ void asDeviceSpacePath(SkPath* path) const;
+
+ //!< Call if getType() is not kPath to get the element as a round rect.
+ const SkRRect& asDeviceSpaceRRect() const {
+ SkASSERT(DeviceSpaceType::kPath != fDeviceSpaceType);
+ return fDeviceSpaceRRect;
+ }
+
+ /** If getType() is not kEmpty this indicates whether the clip shape should be anti-aliased
+ when it is rasterized. */
+ bool isAA() const { return fDoAA; }
+
+ //!< Inverts the fill of the clip shape. Note that a kEmpty element remains kEmpty.
+ void invertShapeFillType();
+
+ /** The GenID can be used by clip stack clients to cache representations of the clip. The
+ ID corresponds to the set of clip elements up to and including this element within the
+ stack not to the element itself. That is the same clip path in different stacks will
+ have a different ID since the elements produce different clip result in the context of
+ their stacks. */
+ uint32_t getGenID() const { SkASSERT(kInvalidGenID != fGenID); return fGenID; }
+
+ /**
+ * Gets the bounds of the clip element, either the rect or path bounds. (Whether the shape
+ * is inverse filled is not considered.)
+ */
+ const SkRect& getBounds() const;
+
+ /**
+ * Conservatively checks whether the clip shape contains the rect/rrect. (Whether the shape
+ * is inverse filled is not considered.)
+ */
+ bool contains(const SkRect& rect) const;
+ bool contains(const SkRRect& rrect) const;
+
+ /**
+ * Is the clip shape inverse filled.
+ */
+ bool isInverseFilled() const {
+ return DeviceSpaceType::kPath == fDeviceSpaceType &&
+ fDeviceSpacePath->isInverseFillType();
+ }
+
+#ifdef SK_DEBUG
+ /**
+ * Dumps the element to SkDebugf. This is intended for Skia development debugging
+ * Don't rely on the existence of this function or the formatting of its output.
+ */
+ void dump() const;
+#endif
+
+ private:
+ friend class SkClipStack;
+
+ SkTLazy<SkPath> fDeviceSpacePath;
+ SkRRect fDeviceSpaceRRect;
+ sk_sp<SkShader> fShader;
+ int fSaveCount; // save count of stack when this element was added.
+ SkClipOp fOp;
+ DeviceSpaceType fDeviceSpaceType;
+ bool fDoAA;
+ bool fIsReplace;
+
+ /* fFiniteBoundType and fFiniteBound are used to incrementally update the clip stack's
+ bound. When fFiniteBoundType is kNormal_BoundsType, fFiniteBound represents the
+ conservative bounding box of the pixels that aren't clipped (i.e., any pixels that can be
+ drawn to are inside the bound). When fFiniteBoundType is kInsideOut_BoundsType (which
+ occurs when a clip is inverse filled), fFiniteBound represents the conservative bounding
+ box of the pixels that _are_ clipped (i.e., any pixels that cannot be drawn to are inside
+ the bound). When fFiniteBoundType is kInsideOut_BoundsType the actual bound is the
+ infinite plane. This behavior of fFiniteBoundType and fFiniteBound is required so that we
+ can capture the cancelling out of the extensions to infinity when two inverse filled
+ clips are Booleaned together. */
+ SkClipStack::BoundsType fFiniteBoundType;
+ SkRect fFiniteBound;
+
+ // When element is applied to the previous elements in the stack is the result known to be
+ // equivalent to a single rect intersection? IIOW, is the clip effectively a rectangle.
+ bool fIsIntersectionOfRects;
+
+ uint32_t fGenID;
+ Element(int saveCount) {
+ this->initCommon(saveCount, SkClipOp::kIntersect, false);
+ this->setEmpty();
+ }
+
+ Element(int saveCount, const SkRRect& rrect, const SkMatrix& m, SkClipOp op, bool doAA) {
+ this->initRRect(saveCount, rrect, m, op, doAA);
+ }
+
+ Element(int saveCount, const SkRect& rect, const SkMatrix& m, SkClipOp op, bool doAA) {
+ this->initRect(saveCount, rect, m, op, doAA);
+ }
+
+ Element(int saveCount, const SkPath& path, const SkMatrix& m, SkClipOp op, bool doAA) {
+ this->initPath(saveCount, path, m, op, doAA);
+ }
+
+ Element(int saveCount, sk_sp<SkShader> shader) {
+ this->initShader(saveCount, std::move(shader));
+ }
+
+ Element(int saveCount, const SkRect& rect, bool doAA) {
+ this->initReplaceRect(saveCount, rect, doAA);
+ }
+
+ void initCommon(int saveCount, SkClipOp op, bool doAA);
+ void initRect(int saveCount, const SkRect&, const SkMatrix&, SkClipOp, bool doAA);
+ void initRRect(int saveCount, const SkRRect&, const SkMatrix&, SkClipOp, bool doAA);
+ void initPath(int saveCount, const SkPath&, const SkMatrix&, SkClipOp, bool doAA);
+ void initAsPath(int saveCount, const SkPath&, const SkMatrix&, SkClipOp, bool doAA);
+ void initShader(int saveCount, sk_sp<SkShader>);
+ void initReplaceRect(int saveCount, const SkRect&, bool doAA);
+
+ void setEmpty();
+
+ // All Element methods below are only used within SkClipStack.cpp
+ inline void checkEmpty() const;
+ inline bool canBeIntersectedInPlace(int saveCount, SkClipOp op) const;
+ /* This method checks to see if two rect clips can be safely merged into one. The issue here
+ is that to be strictly correct all the edges of the resulting rect must have the same
+ anti-aliasing. */
+ bool rectRectIntersectAllowed(const SkRect& newR, bool newAA) const;
+ /** Determines possible finite bounds for the Element given the previous element of the
+ stack */
+ void updateBoundAndGenID(const Element* prior);
+ // The different combination of fill & inverse fill when combining bounding boxes
+ enum FillCombo {
+ kPrev_Cur_FillCombo,
+ kPrev_InvCur_FillCombo,
+ kInvPrev_Cur_FillCombo,
+ kInvPrev_InvCur_FillCombo
+ };
+ // per-set operation functions used by updateBoundAndGenID().
+ inline void combineBoundsDiff(FillCombo combination, const SkRect& prevFinite);
+ inline void combineBoundsIntersection(int combination, const SkRect& prevFinite);
+ };
+
+ SkClipStack();
+ SkClipStack(void* storage, size_t size);
+ SkClipStack(const SkClipStack& b);
+ ~SkClipStack();
+
+ SkClipStack& operator=(const SkClipStack& b);
+ bool operator==(const SkClipStack& b) const;
+ bool operator!=(const SkClipStack& b) const { return !(*this == b); }
+
+ void reset();
+
+ int getSaveCount() const { return fSaveCount; }
+ void save();
+ void restore();
+
+ class AutoRestore {
+ public:
+ AutoRestore(SkClipStack* cs, bool doSave)
+ : fCS(cs), fSaveCount(cs->getSaveCount())
+ {
+ if (doSave) {
+ fCS->save();
+ }
+ }
+ ~AutoRestore() {
+ SkASSERT(fCS->getSaveCount() >= fSaveCount); // no underflow
+ while (fCS->getSaveCount() > fSaveCount) {
+ fCS->restore();
+ }
+ }
+
+ private:
+ SkClipStack* fCS;
+ const int fSaveCount;
+ };
+
+ /**
+ * getBounds places the current finite bound in its first parameter. In its
+ * second, it indicates which kind of bound is being returned. If
+ * 'canvFiniteBound' is a normal bounding box then it encloses all writeable
+ * pixels. If 'canvFiniteBound' is an inside out bounding box then it
+ * encloses all the un-writeable pixels and the true/normal bound is the
+ * infinite plane. isIntersectionOfRects is an optional parameter
+ * that is true if 'canvFiniteBound' resulted from an intersection of rects.
+ */
+ void getBounds(SkRect* canvFiniteBound,
+ BoundsType* boundType,
+ bool* isIntersectionOfRects = nullptr) const;
+
+ SkRect bounds(const SkIRect& deviceBounds) const;
+ bool isEmpty(const SkIRect& deviceBounds) const;
+
+ /**
+ * Returns true if the input (r)rect in device space is entirely contained
+ * by the clip. A return value of false does not guarantee that the (r)rect
+ * is not contained by the clip.
+ */
+ bool quickContains(const SkRect& devRect) const {
+ return this->isWideOpen() || this->internalQuickContains(devRect);
+ }
+
+ bool quickContains(const SkRRect& devRRect) const {
+ return this->isWideOpen() || this->internalQuickContains(devRRect);
+ }
+
+ void clipDevRect(const SkIRect& ir, SkClipOp op) {
+ SkRect r;
+ r.set(ir);
+ this->clipRect(r, SkMatrix::I(), op, false);
+ }
+ void clipRect(const SkRect&, const SkMatrix& matrix, SkClipOp, bool doAA);
+ void clipRRect(const SkRRect&, const SkMatrix& matrix, SkClipOp, bool doAA);
+ void clipPath(const SkPath&, const SkMatrix& matrix, SkClipOp, bool doAA);
+ void clipShader(sk_sp<SkShader>);
+ // An optimized version of clipDevRect(emptyRect, kIntersect, ...)
+ void clipEmpty();
+
+ void replaceClip(const SkRect& devRect, bool doAA);
+
+ /**
+ * isWideOpen returns true if the clip state corresponds to the infinite
+ * plane (i.e., draws are not limited at all)
+ */
+ bool isWideOpen() const { return this->getTopmostGenID() == kWideOpenGenID; }
+
+ /**
+ * This method quickly and conservatively determines whether the entire stack is equivalent to
+ * intersection with a rrect given a bounds, where the rrect must not contain the entire bounds.
+ *
+ * @param bounds A bounds on what will be drawn through the clip. The clip only need be
+ * equivalent to a intersection with a rrect for draws within the bounds. The
+ * returned rrect must intersect the bounds but need not be contained by the
+ * bounds.
+ * @param rrect If return is true rrect will contain the rrect equivalent to the stack.
+ * @param aa If return is true aa will indicate whether the equivalent rrect clip is
+ * antialiased.
+ * @return true if the stack is equivalent to a single rrect intersect clip, false otherwise.
+ */
+ bool isRRect(const SkRect& bounds, SkRRect* rrect, bool* aa) const;
+
+ /**
+ * The generation ID has three reserved values to indicate special
+ * (potentially ignorable) cases
+ */
+ static const uint32_t kInvalidGenID = 0; //!< Invalid id that is never returned by
+ //!< SkClipStack. Useful when caching clips
+ //!< based on GenID.
+ static const uint32_t kEmptyGenID = 1; // no pixels writeable
+ static const uint32_t kWideOpenGenID = 2; // all pixels writeable
+
+ uint32_t getTopmostGenID() const;
+
+#ifdef SK_DEBUG
+ /**
+ * Dumps the contents of the clip stack to SkDebugf. This is intended for Skia development
+ * debugging. Don't rely on the existence of this function or the formatting of its output.
+ */
+ void dump() const;
+#endif
+
+public:
+ class Iter {
+ public:
+ enum IterStart {
+ kBottom_IterStart = SkDeque::Iter::kFront_IterStart,
+ kTop_IterStart = SkDeque::Iter::kBack_IterStart
+ };
+
+ /**
+ * Creates an uninitialized iterator. Must be reset()
+ */
+ Iter();
+
+ Iter(const SkClipStack& stack, IterStart startLoc);
+
+ /**
+ * Return the clip element for this iterator. If next()/prev() returns NULL, then the
+ * iterator is done.
+ */
+ const Element* next();
+ const Element* prev();
+
+ /**
+ * Moves the iterator to the topmost element with the specified RegionOp and returns that
+ * element. If no clip element with that op is found, the first element is returned.
+ */
+ const Element* skipToTopmost(SkClipOp op);
+
+ /**
+ * Restarts the iterator on a clip stack.
+ */
+ void reset(const SkClipStack& stack, IterStart startLoc);
+
+ private:
+ const SkClipStack* fStack;
+ SkDeque::Iter fIter;
+ };
+
+ /**
+ * The B2TIter iterates from the bottom of the stack to the top.
+ * It inherits privately from Iter to prevent access to reverse iteration.
+ */
+ class B2TIter : private Iter {
+ public:
+ B2TIter() {}
+
+ /**
+ * Wrap Iter's 2 parameter ctor to force initialization to the
+ * beginning of the deque/bottom of the stack
+ */
+ B2TIter(const SkClipStack& stack)
+ : INHERITED(stack, kBottom_IterStart) {
+ }
+
+ using Iter::next;
+
+ /**
+ * Wrap Iter::reset to force initialization to the
+ * beginning of the deque/bottom of the stack
+ */
+ void reset(const SkClipStack& stack) {
+ this->INHERITED::reset(stack, kBottom_IterStart);
+ }
+
+ private:
+
+ using INHERITED = Iter;
+ };
+
+ /**
+ * GetConservativeBounds returns a conservative bound of the current clip.
+ * Since this could be the infinite plane (if inverse fills were involved) the
+ * maxWidth and maxHeight parameters can be used to limit the returned bound
+ * to the expected drawing area. Similarly, the offsetX and offsetY parameters
+ * allow the caller to offset the returned bound to account for translated
+ * drawing areas (i.e., those resulting from a saveLayer). For finite bounds,
+ * the translation (+offsetX, +offsetY) is applied before the clamp to the
+ * maximum rectangle: [0,maxWidth) x [0,maxHeight).
+ * isIntersectionOfRects is an optional parameter that is true when
+ * 'devBounds' is the result of an intersection of rects. In this case
+ * 'devBounds' is the exact answer/clip.
+ */
+ void getConservativeBounds(int offsetX,
+ int offsetY,
+ int maxWidth,
+ int maxHeight,
+ SkRect* devBounds,
+ bool* isIntersectionOfRects = nullptr) const;
+
+private:
+ friend class Iter;
+
+ SkDeque fDeque;
+ int fSaveCount;
+
+ bool internalQuickContains(const SkRect& devRect) const;
+ bool internalQuickContains(const SkRRect& devRRect) const;
+
+ /**
+ * Helper for clipDevPath, etc.
+ */
+ void pushElement(const Element& element);
+
+ /**
+ * Restore the stack back to the specified save count.
+ */
+ void restoreTo(int saveCount);
+
+ /**
+ * Return the next unique generation ID.
+ */
+ static uint32_t GetNextGenID();
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkClipStackDevice.cpp b/gfx/skia/skia/src/core/SkClipStackDevice.cpp
new file mode 100644
index 0000000000..d30fd6880d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkClipStackDevice.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkClipStackDevice.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkRasterClip.h"
+
+SkIRect SkClipStackDevice::onDevClipBounds() const {
+ SkIRect r = fClipStack.bounds(this->imageInfo().bounds()).roundOut();
+ if (!r.isEmpty()) {
+ SkASSERT(this->imageInfo().bounds().contains(r));
+ }
+ return r;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkClipStackDevice::onSave() {
+ fClipStack.save();
+}
+
+void SkClipStackDevice::onRestore() {
+ fClipStack.restore();
+}
+
+void SkClipStackDevice::onClipRect(const SkRect& rect, SkClipOp op, bool aa) {
+ fClipStack.clipRect(rect, this->localToDevice(), op, aa);
+}
+
+void SkClipStackDevice::onClipRRect(const SkRRect& rrect, SkClipOp op, bool aa) {
+ fClipStack.clipRRect(rrect, this->localToDevice(), op, aa);
+}
+
+void SkClipStackDevice::onClipPath(const SkPath& path, SkClipOp op, bool aa) {
+ fClipStack.clipPath(path, this->localToDevice(), op, aa);
+}
+
+void SkClipStackDevice::onClipShader(sk_sp<SkShader> shader) {
+ fClipStack.clipShader(std::move(shader));
+}
+
+void SkClipStackDevice::onClipRegion(const SkRegion& rgn, SkClipOp op) {
+ SkIPoint origin = this->getOrigin();
+ SkRegion tmp;
+ SkPath path;
+ rgn.getBoundaryPath(&path);
+ path.transform(SkMatrix::Translate(-origin));
+ fClipStack.clipPath(path, SkMatrix::I(), op, false);
+}
+
+void SkClipStackDevice::onReplaceClip(const SkIRect& rect) {
+ SkRect deviceRect = SkMatrixPriv::MapRect(this->globalToDevice(), SkRect::Make(rect));
+ fClipStack.replaceClip(deviceRect, /*doAA=*/false);
+}
+
+bool SkClipStackDevice::onClipIsAA() const {
+ SkClipStack::B2TIter iter(fClipStack);
+ const SkClipStack::Element* element;
+
+ while ((element = iter.next()) != nullptr) {
+ if (element->isAA()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool SkClipStackDevice::onClipIsWideOpen() const {
+ return fClipStack.quickContains(SkRect::MakeIWH(this->width(), this->height()));
+}
+
+void SkClipStackDevice::onAsRgnClip(SkRegion* rgn) const {
+ SkClipStack::BoundsType boundType;
+ bool isIntersectionOfRects;
+ SkRect bounds;
+ fClipStack.getBounds(&bounds, &boundType, &isIntersectionOfRects);
+ if (isIntersectionOfRects && SkClipStack::kNormal_BoundsType == boundType) {
+ rgn->setRect(bounds.round());
+ } else {
+ SkRegion boundsRgn({0, 0, this->width(), this->height()});
+ SkPath tmpPath;
+
+ *rgn = boundsRgn;
+ SkClipStack::B2TIter iter(fClipStack);
+ while (auto elem = iter.next()) {
+ tmpPath.rewind();
+ elem->asDeviceSpacePath(&tmpPath);
+ SkRegion tmpRgn;
+ tmpRgn.setPath(tmpPath, boundsRgn);
+ if (elem->isReplaceOp()) {
+ // All replace elements are rectangles
+ // TODO: SkClipStack can be simplified to be I,D,R ops now, which means element
+ // iteration can be from top of the stack to the most recent replace element.
+ // When that's done, this loop will be simplifiable.
+ rgn->setRect(elem->getDeviceSpaceRect().round());
+ } else {
+ rgn->op(tmpRgn, static_cast<SkRegion::Op>(elem->getOp()));
+ }
+ }
+ }
+}
+
+SkBaseDevice::ClipType SkClipStackDevice::onGetClipType() const {
+ if (fClipStack.isWideOpen()) {
+ return ClipType::kRect;
+ }
+ if (fClipStack.isEmpty(SkIRect::MakeWH(this->width(), this->height()))) {
+ return ClipType::kEmpty;
+ } else {
+ SkClipStack::BoundsType boundType;
+ bool isIntersectionOfRects;
+ SkRect bounds;
+ fClipStack.getBounds(&bounds, &boundType, &isIntersectionOfRects);
+ if (isIntersectionOfRects && SkClipStack::kNormal_BoundsType == boundType) {
+ return ClipType::kRect;
+ } else {
+ return ClipType::kComplex;
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkClipStackDevice.h b/gfx/skia/skia/src/core/SkClipStackDevice.h
new file mode 100644
index 0000000000..eff1f1a440
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkClipStackDevice.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkClipStackDevice_DEFINED
+#define SkClipStackDevice_DEFINED
+
+#include "src/core/SkClipStack.h"
+#include "src/core/SkDevice.h"
+
+class SkClipStackDevice : public SkBaseDevice {
+public:
+ SkClipStackDevice(const SkImageInfo& info, const SkSurfaceProps& props)
+ : SkBaseDevice(info, props)
+ , fClipStack(fStorage, sizeof(fStorage))
+ {}
+
+ SkClipStack& cs() { return fClipStack; }
+ const SkClipStack& cs() const { return fClipStack; }
+
+protected:
+ void onSave() override;
+ void onRestore() override;
+ void onClipRect(const SkRect& rect, SkClipOp, bool aa) override;
+ void onClipRRect(const SkRRect& rrect, SkClipOp, bool aa) override;
+ void onClipPath(const SkPath& path, SkClipOp, bool aa) override;
+ void onClipShader(sk_sp<SkShader>) override;
+ void onClipRegion(const SkRegion& deviceRgn, SkClipOp) override;
+ void onReplaceClip(const SkIRect& rect) override;
+ bool onClipIsAA() const override;
+ bool onClipIsWideOpen() const override;
+ void onAsRgnClip(SkRegion*) const override;
+ ClipType onGetClipType() const override;
+ SkIRect onDevClipBounds() const override;
+
+private:
+ enum {
+ kPreallocCount = 16 // empirically determined, adjust as needed to reduce mallocs
+ };
+ intptr_t fStorage[kPreallocCount * sizeof(SkClipStack::Element) / sizeof(intptr_t)];
+ SkClipStack fClipStack;
+
+ using INHERITED = SkBaseDevice;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkColor.cpp b/gfx/skia/skia/src/core/SkColor.cpp
new file mode 100644
index 0000000000..02a79e43eb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColor.cpp
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColor.h"
+#include "include/core/SkColorPriv.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkTPin.h"
+#include "src/base/SkVx.h"
+#include "src/core/SkSwizzlePriv.h"
+
+#include <algorithm>
+
+SkPMColor SkPreMultiplyARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ return SkPremultiplyARGBInline(a, r, g, b);
+}
+
+SkPMColor SkPreMultiplyColor(SkColor c) {
+ return SkPremultiplyARGBInline(SkColorGetA(c), SkColorGetR(c),
+ SkColorGetG(c), SkColorGetB(c));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline SkScalar ByteToScalar(U8CPU x) {
+ SkASSERT(x <= 255);
+ return SkIntToScalar(x) / 255;
+}
+
+static inline SkScalar ByteDivToScalar(int numer, U8CPU denom) {
+ // cast to keep the answer signed
+ return SkIntToScalar(numer) / (int)denom;
+}
+
+void SkRGBToHSV(U8CPU r, U8CPU g, U8CPU b, SkScalar hsv[3]) {
+ SkASSERT(hsv);
+
+ unsigned min = std::min(r, std::min(g, b));
+ unsigned max = std::max(r, std::max(g, b));
+ unsigned delta = max - min;
+
+ SkScalar v = ByteToScalar(max);
+ SkASSERT(v >= 0 && v <= SK_Scalar1);
+
+ if (0 == delta) { // we're a shade of gray
+ hsv[0] = 0;
+ hsv[1] = 0;
+ hsv[2] = v;
+ return;
+ }
+
+ SkScalar s = ByteDivToScalar(delta, max);
+ SkASSERT(s >= 0 && s <= SK_Scalar1);
+
+ SkScalar h;
+ if (r == max) {
+ h = ByteDivToScalar(g - b, delta);
+ } else if (g == max) {
+ h = SkIntToScalar(2) + ByteDivToScalar(b - r, delta);
+ } else { // b == max
+ h = SkIntToScalar(4) + ByteDivToScalar(r - g, delta);
+ }
+
+ h *= 60;
+ if (h < 0) {
+ h += SkIntToScalar(360);
+ }
+ SkASSERT(h >= 0 && h < SkIntToScalar(360));
+
+ hsv[0] = h;
+ hsv[1] = s;
+ hsv[2] = v;
+}
+
+SkColor SkHSVToColor(U8CPU a, const SkScalar hsv[3]) {
+ SkASSERT(hsv);
+
+ SkScalar s = SkTPin(hsv[1], 0.0f, 1.0f);
+ SkScalar v = SkTPin(hsv[2], 0.0f, 1.0f);
+
+ U8CPU v_byte = SkScalarRoundToInt(v * 255);
+
+ if (SkScalarNearlyZero(s)) { // shade of gray
+ return SkColorSetARGB(a, v_byte, v_byte, v_byte);
+ }
+ SkScalar hx = (hsv[0] < 0 || hsv[0] >= SkIntToScalar(360)) ? 0 : hsv[0]/60;
+ SkScalar w = SkScalarFloorToScalar(hx);
+ SkScalar f = hx - w;
+
+ unsigned p = SkScalarRoundToInt((SK_Scalar1 - s) * v * 255);
+ unsigned q = SkScalarRoundToInt((SK_Scalar1 - (s * f)) * v * 255);
+ unsigned t = SkScalarRoundToInt((SK_Scalar1 - (s * (SK_Scalar1 - f))) * v * 255);
+
+ unsigned r, g, b;
+
+ SkASSERT((unsigned)(w) < 6);
+ switch ((unsigned)(w)) {
+ case 0: r = v_byte; g = t; b = p; break;
+ case 1: r = q; g = v_byte; b = p; break;
+ case 2: r = p; g = v_byte; b = t; break;
+ case 3: r = p; g = q; b = v_byte; break;
+ case 4: r = t; g = p; b = v_byte; break;
+ default: r = v_byte; g = p; b = q; break;
+ }
+ return SkColorSetARGB(a, r, g, b);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+template <>
+SkColor4f SkColor4f::FromColor(SkColor bgra) {
+ SkColor4f rgba;
+ auto c4f = Sk4f_fromL32(bgra);
+#ifdef SK_CPU_BENDIAN
+ // ARGB -> RGBA
+ c4f = skvx::shuffle<1, 2, 3, 0>(c4f);
+#else
+ // BGRA -> RGBA
+ c4f = swizzle_rb(c4f);
+#endif
+ c4f.store(rgba.vec());
+ return rgba;
+}
+
+template <>
+SkColor SkColor4f::toSkColor() const {
+ auto c4f = skvx::float4::Load(this->vec());
+#ifdef SK_CPU_BENDIAN
+ // RGBA -> ARGB
+ c4f = skvx::shuffle<3, 0, 1, 2>(c4f);
+#else
+ // RGBA -> BGRA
+ c4f = swizzle_rb(c4f);
+#endif
+ return Sk4f_toL32(c4f);
+}
+
+template <>
+uint32_t SkColor4f::toBytes_RGBA() const {
+ return Sk4f_toL32(skvx::float4::Load(this->vec()));
+}
+
+template <>
+SkColor4f SkColor4f::FromBytes_RGBA(uint32_t c) {
+ SkColor4f color;
+ Sk4f_fromL32(c).store(&color);
+ return color;
+}
+
+template <>
+SkPMColor4f SkPMColor4f::FromPMColor(SkPMColor c) {
+ SkPMColor4f color;
+ swizzle_rb_if_bgra(Sk4f_fromL32(c)).store(&color);
+ return color;
+}
+
+template <>
+uint32_t SkPMColor4f::toBytes_RGBA() const {
+ return Sk4f_toL32(skvx::float4::Load(this->vec()));
+}
+
+template <>
+SkPMColor4f SkPMColor4f::FromBytes_RGBA(uint32_t c) {
+ SkPMColor4f color;
+ Sk4f_fromL32(c).store(&color);
+ return color;
+}
diff --git a/gfx/skia/skia/src/core/SkColorFilter.cpp b/gfx/skia/skia/src/core/SkColorFilter.cpp
new file mode 100644
index 0000000000..e4328e1b71
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorFilter.cpp
@@ -0,0 +1,633 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkString.h"
+#include "include/core/SkUnPreMultiply.h"
+#include "include/effects/SkRuntimeEffect.h"
+#include "include/private/base/SkTDArray.h"
+#include "modules/skcms/skcms.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkColorFilterBase.h"
+#include "src/core/SkColorFilterPriv.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkMatrixProvider.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkRuntimeEffectPriv.h"
+#include "src/core/SkVM.h"
+#include "src/core/SkWriteBuffer.h"
+
+#if defined(SK_GANESH)
+#include "src/gpu/ganesh/GrColorInfo.h"
+#include "src/gpu/ganesh/GrColorSpaceXform.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#endif
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/KeyContext.h"
+#include "src/gpu/graphite/KeyHelpers.h"
+#include "src/gpu/graphite/PaintParamsKey.h"
+#endif
+
+bool SkColorFilter::asAColorMode(SkColor* color, SkBlendMode* mode) const {
+ return as_CFB(this)->onAsAColorMode(color, mode);
+}
+
+bool SkColorFilter::asAColorMatrix(float matrix[20]) const {
+ return as_CFB(this)->onAsAColorMatrix(matrix);
+}
+
+bool SkColorFilter::isAlphaUnchanged() const {
+ return as_CFB(this)->onIsAlphaUnchanged();
+}
+
+sk_sp<SkColorFilter> SkColorFilter::Deserialize(const void* data, size_t size,
+ const SkDeserialProcs* procs) {
+ return sk_sp<SkColorFilter>(static_cast<SkColorFilter*>(
+ SkFlattenable::Deserialize(
+ kSkColorFilter_Type, data, size, procs).release()));
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkColorFilterBase::onAsAColorMode(SkColor*, SkBlendMode*) const {
+ return false;
+}
+
+bool SkColorFilterBase::onAsAColorMatrix(float matrix[20]) const {
+ return false;
+}
+
+#if defined(SK_GANESH)
+GrFPResult SkColorFilterBase::asFragmentProcessor(std::unique_ptr<GrFragmentProcessor> inputFP,
+ GrRecordingContext* context,
+ const GrColorInfo& dstColorInfo,
+ const SkSurfaceProps& props) const {
+ // This color filter doesn't implement `asFragmentProcessor`.
+ return GrFPFailure(std::move(inputFP));
+}
+#endif
+
+skvm::Color SkColorFilterBase::program(skvm::Builder* p, skvm::Color c,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms, SkArenaAlloc* alloc) const {
+ skvm::F32 original = c.a;
+ if ((c = this->onProgram(p,c, dst, uniforms,alloc))) {
+ if (this->isAlphaUnchanged()) {
+ c.a = original;
+ }
+ return c;
+ }
+ //SkDebugf("cannot program %s\n", this->getTypeName());
+ return {};
+}
+
+SkColor SkColorFilter::filterColor(SkColor c) const {
+ // This is mostly meaningless. We should phase-out this call entirely.
+ SkColorSpace* cs = nullptr;
+ return this->filterColor4f(SkColor4f::FromColor(c), cs, cs).toSkColor();
+}
+
+SkColor4f SkColorFilter::filterColor4f(const SkColor4f& origSrcColor, SkColorSpace* srcCS,
+ SkColorSpace* dstCS) const {
+ SkPMColor4f color = { origSrcColor.fR, origSrcColor.fG, origSrcColor.fB, origSrcColor.fA };
+ SkColorSpaceXformSteps(srcCS, kUnpremul_SkAlphaType,
+ dstCS, kPremul_SkAlphaType).apply(color.vec());
+
+ return as_CFB(this)->onFilterColor4f(color, dstCS).unpremul();
+}
+
+SkPMColor4f SkColorFilterBase::onFilterColor4f(const SkPMColor4f& color,
+ SkColorSpace* dstCS) const {
+ constexpr size_t kEnoughForCommonFilters = 512; // big enough for compose+colormatrix
+ SkSTArenaAlloc<kEnoughForCommonFilters> alloc;
+ SkRasterPipeline pipeline(&alloc);
+ pipeline.append_constant_color(&alloc, color.vec());
+ SkMatrixProvider matrixProvider(SkMatrix::I());
+ SkSurfaceProps props{}; // default OK; colorFilters don't render text
+ SkStageRec rec = {&pipeline, &alloc, kRGBA_F32_SkColorType, dstCS, color.unpremul(), props};
+
+ if (as_CFB(this)->appendStages(rec, color.fA == 1)) {
+ SkPMColor4f dst;
+ SkRasterPipeline_MemoryCtx dstPtr = { &dst, 0 };
+ pipeline.append(SkRasterPipelineOp::store_f32, &dstPtr);
+ pipeline.run(0,0, 1,1);
+ return dst;
+ }
+
+ // This filter doesn't support SkRasterPipeline... try skvm.
+ skvm::Builder b;
+ skvm::Uniforms uni(b.uniform(), 4);
+ SkColor4f uniColor = {color.fR, color.fG, color.fB, color.fA};
+ SkColorInfo dstInfo = {kRGBA_F32_SkColorType, kPremul_SkAlphaType, sk_ref_sp(dstCS)};
+ if (skvm::Color filtered =
+ as_CFB(this)->program(&b, b.uniformColor(uniColor, &uni), dstInfo, &uni, &alloc)) {
+
+ b.store({skvm::PixelFormat::FLOAT, 32,32,32,32, 0,32,64,96},
+ b.varying<SkColor4f>(), filtered);
+
+ const bool allow_jit = false; // We're only filtering one color, no point JITing.
+ b.done("filterColor4f", allow_jit).eval(1, uni.buf.data(), &color);
+ return color;
+ }
+
+ SkASSERT(false);
+ return SkPMColor4f{0,0,0,0};
+}
+
+#if defined(SK_GRAPHITE)
+void SkColorFilterBase::addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const {
+ using namespace skgpu::graphite;
+
+ // Return the input color as-is.
+ PassthroughShaderBlock::BeginBlock(keyContext, builder, gatherer);
+ builder->endBlock();
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SkComposeColorFilter final : public SkColorFilterBase {
+public:
+ bool onIsAlphaUnchanged() const override {
+ // Can only claim alphaunchanged support if both our proxys do.
+ return fOuter->isAlphaUnchanged() && fInner->isAlphaUnchanged();
+ }
+
+ bool appendStages(const SkStageRec& rec, bool shaderIsOpaque) const override {
+ bool innerIsOpaque = shaderIsOpaque;
+ if (!fInner->isAlphaUnchanged()) {
+ innerIsOpaque = false;
+ }
+ return fInner->appendStages(rec, shaderIsOpaque) &&
+ fOuter->appendStages(rec, innerIsOpaque);
+ }
+
+ skvm::Color onProgram(skvm::Builder* p, skvm::Color c,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms, SkArenaAlloc* alloc) const override {
+ c = fInner->program(p, c, dst, uniforms, alloc);
+ return c ? fOuter->program(p, c, dst, uniforms, alloc) : skvm::Color{};
+ }
+
+#if defined(SK_GANESH)
+ GrFPResult asFragmentProcessor(std::unique_ptr<GrFragmentProcessor> inputFP,
+ GrRecordingContext* context,
+ const GrColorInfo& dstColorInfo,
+ const SkSurfaceProps& props) const override {
+ // Unfortunately, we need to clone the input before we know we need it. This lets us return
+ // the original FP if either internal color filter fails.
+ auto inputClone = inputFP ? inputFP->clone() : nullptr;
+
+ auto [innerSuccess, innerFP] =
+ fInner->asFragmentProcessor(std::move(inputFP), context, dstColorInfo, props);
+ if (!innerSuccess) {
+ return GrFPFailure(std::move(inputClone));
+ }
+
+ auto [outerSuccess, outerFP] =
+ fOuter->asFragmentProcessor(std::move(innerFP), context, dstColorInfo, props);
+ if (!outerSuccess) {
+ return GrFPFailure(std::move(inputClone));
+ }
+
+ return GrFPSuccess(std::move(outerFP));
+ }
+#endif
+
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const override {
+ using namespace skgpu::graphite;
+
+ ComposeColorFilterBlock::BeginBlock(keyContext, builder, gatherer);
+
+ as_CFB(fInner)->addToKey(keyContext, builder, gatherer);
+ as_CFB(fOuter)->addToKey(keyContext, builder, gatherer);
+
+ builder->endBlock();
+ }
+#endif // SK_GRAPHITE
+
+protected:
+ void flatten(SkWriteBuffer& buffer) const override {
+ buffer.writeFlattenable(fOuter.get());
+ buffer.writeFlattenable(fInner.get());
+ }
+
+private:
+ friend void ::SkRegisterComposeColorFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkComposeColorFilter)
+
+ SkComposeColorFilter(sk_sp<SkColorFilter> outer, sk_sp<SkColorFilter> inner)
+ : fOuter(as_CFB_sp(std::move(outer)))
+ , fInner(as_CFB_sp(std::move(inner)))
+ {}
+
+ sk_sp<SkColorFilterBase> fOuter;
+ sk_sp<SkColorFilterBase> fInner;
+
+ friend class SkColorFilter;
+
+ using INHERITED = SkColorFilter;
+};
+
+sk_sp<SkFlattenable> SkComposeColorFilter::CreateProc(SkReadBuffer& buffer) {
+ sk_sp<SkColorFilter> outer(buffer.readColorFilter());
+ sk_sp<SkColorFilter> inner(buffer.readColorFilter());
+ return outer ? outer->makeComposed(std::move(inner)) : inner;
+}
+
+sk_sp<SkColorFilter> SkColorFilter::makeComposed(sk_sp<SkColorFilter> inner) const {
+ if (!inner) {
+ return sk_ref_sp(this);
+ }
+
+ return sk_sp<SkColorFilter>(new SkComposeColorFilter(sk_ref_sp(this), std::move(inner)));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+class ColorSpaceXformColorFilter final : public SkColorFilterBase {
+public:
+ ColorSpaceXformColorFilter(sk_sp<SkColorSpace> src, sk_sp<SkColorSpace> dst)
+ : fSrc(std::move(src))
+ , fDst(std::move(dst))
+ , fSteps(
+ // We handle premul/unpremul separately, so here just always upm->upm.
+ fSrc.get(),
+ kUnpremul_SkAlphaType,
+ fDst.get(),
+ kUnpremul_SkAlphaType)
+
+ {}
+
+#if defined(SK_GANESH)
+ GrFPResult asFragmentProcessor(std::unique_ptr<GrFragmentProcessor> inputFP,
+ GrRecordingContext* context,
+ const GrColorInfo& dstColorInfo,
+ const SkSurfaceProps& props) const override {
+ // wish our caller would let us know if our input was opaque...
+ constexpr SkAlphaType alphaType = kPremul_SkAlphaType;
+ return GrFPSuccess(GrColorSpaceXformEffect::Make(
+ std::move(inputFP), fSrc.get(), alphaType, fDst.get(), alphaType));
+ SkUNREACHABLE;
+ }
+#endif
+
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const override {
+ using namespace skgpu::graphite;
+
+ constexpr SkAlphaType alphaType = kPremul_SkAlphaType;
+ ColorSpaceTransformBlock::ColorSpaceTransformData data(
+ fSrc.get(), alphaType, fDst.get(), alphaType);
+ ColorSpaceTransformBlock::BeginBlock(keyContext, builder, gatherer, &data);
+ builder->endBlock();
+ }
+#endif
+
+ bool appendStages(const SkStageRec& rec, bool shaderIsOpaque) const override {
+ if (!shaderIsOpaque) {
+ rec.fPipeline->append(SkRasterPipelineOp::unpremul);
+ }
+
+ fSteps.apply(rec.fPipeline);
+
+ if (!shaderIsOpaque) {
+ rec.fPipeline->append(SkRasterPipelineOp::premul);
+ }
+ return true;
+ }
+
+ skvm::Color onProgram(skvm::Builder* p, skvm::Color c, const SkColorInfo& dst,
+ skvm::Uniforms* uniforms, SkArenaAlloc* alloc) const override {
+ return premul(fSteps.program(p, uniforms, unpremul(c)));
+ }
+
+protected:
+ void flatten(SkWriteBuffer& buffer) const override {
+ buffer.writeDataAsByteArray(fSrc->serialize().get());
+ buffer.writeDataAsByteArray(fDst->serialize().get());
+ }
+
+private:
+ friend void ::SkRegisterColorSpaceXformColorFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(ColorSpaceXformColorFilter)
+ static sk_sp<SkFlattenable> LegacyGammaOnlyCreateProc(SkReadBuffer& buffer);
+
+ const sk_sp<SkColorSpace> fSrc;
+ const sk_sp<SkColorSpace> fDst;
+ SkColorSpaceXformSteps fSteps;
+
+ friend class SkColorFilter;
+ using INHERITED = SkColorFilterBase;
+};
+
+sk_sp<SkFlattenable> ColorSpaceXformColorFilter::LegacyGammaOnlyCreateProc(SkReadBuffer& buffer) {
+ uint32_t dir = buffer.read32();
+ if (!buffer.validate(dir <= 1)) {
+ return nullptr;
+ }
+ if (dir == 0) {
+ return SkColorFilters::LinearToSRGBGamma();
+ }
+ return SkColorFilters::SRGBToLinearGamma();
+}
+
+sk_sp<SkFlattenable> ColorSpaceXformColorFilter::CreateProc(SkReadBuffer& buffer) {
+ sk_sp<SkColorSpace> colorSpaces[2];
+ for (int i = 0; i < 2; ++i) {
+ auto data = buffer.readByteArrayAsData();
+ if (!buffer.validate(data != nullptr)) {
+ return nullptr;
+ }
+ colorSpaces[i] = SkColorSpace::Deserialize(data->data(), data->size());
+ if (!buffer.validate(colorSpaces[i] != nullptr)) {
+ return nullptr;
+ }
+ }
+ return sk_sp<SkFlattenable>(
+ new ColorSpaceXformColorFilter(std::move(colorSpaces[0]), std::move(colorSpaces[1])));
+}
+
+sk_sp<SkColorFilter> SkColorFilters::LinearToSRGBGamma() {
+ static SkColorFilter* gSingleton = new ColorSpaceXformColorFilter(
+ SkColorSpace::MakeSRGBLinear(), SkColorSpace::MakeSRGB());
+ return sk_ref_sp(gSingleton);
+}
+
+sk_sp<SkColorFilter> SkColorFilters::SRGBToLinearGamma() {
+ static SkColorFilter* gSingleton = new ColorSpaceXformColorFilter(
+ SkColorSpace::MakeSRGB(), SkColorSpace::MakeSRGBLinear());
+ return sk_ref_sp(gSingleton);
+}
+
+sk_sp<SkColorFilter> SkColorFilterPriv::MakeColorSpaceXform(sk_sp<SkColorSpace> src,
+ sk_sp<SkColorSpace> dst) {
+ return sk_make_sp<ColorSpaceXformColorFilter>(std::move(src), std::move(dst));
+}
+
+class SkWorkingFormatColorFilter final : public SkColorFilterBase {
+public:
+ SkWorkingFormatColorFilter(sk_sp<SkColorFilter> child,
+ const skcms_TransferFunction* tf,
+ const skcms_Matrix3x3* gamut,
+ const SkAlphaType* at) {
+ fChild = std::move(child);
+ if (tf) { fTF = *tf; fUseDstTF = false; }
+ if (gamut) { fGamut = *gamut; fUseDstGamut = false; }
+ if (at) { fAT = *at; fUseDstAT = false; }
+ }
+
+ sk_sp<SkColorSpace> workingFormat(const sk_sp<SkColorSpace>& dstCS, SkAlphaType* at) const {
+ skcms_TransferFunction tf = fTF;
+ skcms_Matrix3x3 gamut = fGamut;
+
+ if (fUseDstTF ) { SkAssertResult(dstCS->isNumericalTransferFn(&tf)); }
+ if (fUseDstGamut) { SkAssertResult(dstCS->toXYZD50 (&gamut)); }
+
+ *at = fUseDstAT ? kPremul_SkAlphaType : fAT;
+ return SkColorSpace::MakeRGB(tf, gamut);
+ }
+
+#if defined(SK_GANESH)
+ GrFPResult asFragmentProcessor(std::unique_ptr<GrFragmentProcessor> inputFP,
+ GrRecordingContext* context,
+ const GrColorInfo& dstColorInfo,
+ const SkSurfaceProps& props) const override {
+ sk_sp<SkColorSpace> dstCS = dstColorInfo.refColorSpace();
+ if (!dstCS) { dstCS = SkColorSpace::MakeSRGB(); }
+
+ SkAlphaType workingAT;
+ sk_sp<SkColorSpace> workingCS = this->workingFormat(dstCS, &workingAT);
+
+ GrColorInfo dst = {dstColorInfo.colorType(), dstColorInfo.alphaType(), dstCS},
+ working = {dstColorInfo.colorType(), workingAT, workingCS};
+
+ auto [ok, fp] = as_CFB(fChild)->asFragmentProcessor(
+ GrColorSpaceXformEffect::Make(std::move(inputFP), dst,working), context, working,
+ props);
+
+ return ok ? GrFPSuccess(GrColorSpaceXformEffect::Make(std::move(fp), working,dst))
+ : GrFPFailure(std::move(fp));
+ }
+#endif
+
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const override {
+ using namespace skgpu::graphite;
+
+ const SkAlphaType dstAT = keyContext.dstColorInfo().alphaType();
+ sk_sp<SkColorSpace> dstCS = keyContext.dstColorInfo().refColorSpace();
+ if (!dstCS) {
+ dstCS = SkColorSpace::MakeSRGB();
+ }
+
+ SkAlphaType workingAT;
+ sk_sp<SkColorSpace> workingCS = this->workingFormat(dstCS, &workingAT);
+
+ ColorSpaceTransformBlock::ColorSpaceTransformData data1(
+ dstCS.get(), dstAT, workingCS.get(), workingAT);
+ ColorSpaceTransformBlock::BeginBlock(keyContext, builder, gatherer, &data1);
+ builder->endBlock();
+
+ as_CFB(fChild)->addToKey(keyContext, builder, gatherer);
+
+ ColorSpaceTransformBlock::ColorSpaceTransformData data2(
+ workingCS.get(), workingAT, dstCS.get(), dstAT);
+ ColorSpaceTransformBlock::BeginBlock(keyContext, builder, gatherer, &data2);
+ builder->endBlock();
+ }
+#endif
+
+ bool appendStages(const SkStageRec& rec, bool shaderIsOpaque) const override {
+ sk_sp<SkColorSpace> dstCS = sk_ref_sp(rec.fDstCS);
+
+ if (!dstCS) { dstCS = SkColorSpace::MakeSRGB(); }
+
+ SkAlphaType workingAT;
+ sk_sp<SkColorSpace> workingCS = this->workingFormat(dstCS, &workingAT);
+
+ SkColorInfo dst = {rec.fDstColorType, kPremul_SkAlphaType, dstCS},
+ working = {rec.fDstColorType, workingAT, workingCS};
+
+ const auto* dstToWorking = rec.fAlloc->make<SkColorSpaceXformSteps>(dst, working);
+ const auto* workingToDst = rec.fAlloc->make<SkColorSpaceXformSteps>(working, dst);
+
+ // Any SkSL effects might reference the paint color, which is already in the destination
+ // color space. We need to transform it to the working space for consistency.
+ SkColor4f paintColorInWorkingSpace = rec.fPaintColor;
+ dstToWorking->apply(paintColorInWorkingSpace.vec());
+
+ SkStageRec workingRec = {rec.fPipeline,
+ rec.fAlloc,
+ rec.fDstColorType,
+ workingCS.get(),
+ paintColorInWorkingSpace,
+ rec.fSurfaceProps};
+
+ dstToWorking->apply(rec.fPipeline);
+ if (!as_CFB(fChild)->appendStages(workingRec, shaderIsOpaque)) {
+ return false;
+ }
+ workingToDst->apply(rec.fPipeline);
+ return true;
+ }
+
+ skvm::Color onProgram(skvm::Builder* p, skvm::Color c, const SkColorInfo& rawDst,
+ skvm::Uniforms* uniforms, SkArenaAlloc* alloc) const override {
+ sk_sp<SkColorSpace> dstCS = rawDst.refColorSpace();
+ if (!dstCS) { dstCS = SkColorSpace::MakeSRGB(); }
+
+ SkAlphaType workingAT;
+ sk_sp<SkColorSpace> workingCS = this->workingFormat(dstCS, &workingAT);
+
+ SkColorInfo dst = {rawDst.colorType(), kPremul_SkAlphaType, dstCS},
+ working = {rawDst.colorType(), workingAT, workingCS};
+
+ c = SkColorSpaceXformSteps{dst,working}.program(p, uniforms, c);
+ c = as_CFB(fChild)->program(p, c, working, uniforms, alloc);
+ return c ? SkColorSpaceXformSteps{working,dst}.program(p, uniforms, c)
+ : c;
+ }
+
+ SkPMColor4f onFilterColor4f(const SkPMColor4f& origColor,
+ SkColorSpace* rawDstCS) const override {
+ sk_sp<SkColorSpace> dstCS = sk_ref_sp(rawDstCS);
+ if (!dstCS) { dstCS = SkColorSpace::MakeSRGB(); }
+
+ SkAlphaType workingAT;
+ sk_sp<SkColorSpace> workingCS = this->workingFormat(dstCS, &workingAT);
+
+ SkColorInfo dst = {kUnknown_SkColorType, kPremul_SkAlphaType, dstCS},
+ working = {kUnknown_SkColorType, workingAT, workingCS};
+
+ SkPMColor4f color = origColor;
+ SkColorSpaceXformSteps{dst,working}.apply(color.vec());
+ color = as_CFB(fChild)->onFilterColor4f(color, working.colorSpace());
+ SkColorSpaceXformSteps{working,dst}.apply(color.vec());
+ return color;
+ }
+
+ bool onIsAlphaUnchanged() const override { return fChild->isAlphaUnchanged(); }
+
+private:
+ friend void ::SkRegisterWorkingFormatColorFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkWorkingFormatColorFilter)
+
+ void flatten(SkWriteBuffer& buffer) const override {
+ buffer.writeFlattenable(fChild.get());
+ buffer.writeBool(fUseDstTF);
+ buffer.writeBool(fUseDstGamut);
+ buffer.writeBool(fUseDstAT);
+ if (!fUseDstTF) { buffer.writeScalarArray(&fTF.g, 7); }
+ if (!fUseDstGamut) { buffer.writeScalarArray(&fGamut.vals[0][0], 9); }
+ if (!fUseDstAT) { buffer.writeInt(fAT); }
+ }
+
+ sk_sp<SkColorFilter> fChild;
+ skcms_TransferFunction fTF; bool fUseDstTF = true;
+ skcms_Matrix3x3 fGamut; bool fUseDstGamut = true;
+ SkAlphaType fAT; bool fUseDstAT = true;
+};
+
+sk_sp<SkFlattenable> SkWorkingFormatColorFilter::CreateProc(SkReadBuffer& buffer) {
+ sk_sp<SkColorFilter> child = buffer.readColorFilter();
+ bool useDstTF = buffer.readBool(),
+ useDstGamut = buffer.readBool(),
+ useDstAT = buffer.readBool();
+
+ skcms_TransferFunction tf;
+ skcms_Matrix3x3 gamut;
+ SkAlphaType at;
+
+ if (!useDstTF) { buffer.readScalarArray(&tf.g, 7); }
+ if (!useDstGamut) { buffer.readScalarArray(&gamut.vals[0][0], 9); }
+ if (!useDstAT) { at = buffer.read32LE(kLastEnum_SkAlphaType); }
+
+ return SkColorFilterPriv::WithWorkingFormat(std::move(child),
+ useDstTF ? nullptr : &tf,
+ useDstGamut ? nullptr : &gamut,
+ useDstAT ? nullptr : &at);
+}
+
+sk_sp<SkColorFilter> SkColorFilterPriv::WithWorkingFormat(sk_sp<SkColorFilter> child,
+ const skcms_TransferFunction* tf,
+ const skcms_Matrix3x3* gamut,
+ const SkAlphaType* at) {
+ return sk_make_sp<SkWorkingFormatColorFilter>(std::move(child), tf, gamut, at);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkColorFilter> SkColorFilters::Lerp(float weight, sk_sp<SkColorFilter> cf0,
+ sk_sp<SkColorFilter> cf1) {
+#ifdef SK_ENABLE_SKSL
+ if (!cf0 && !cf1) {
+ return nullptr;
+ }
+ if (SkScalarIsNaN(weight)) {
+ return nullptr;
+ }
+
+ if (cf0 == cf1) {
+ return cf0; // or cf1
+ }
+
+ if (weight <= 0) {
+ return cf0;
+ }
+ if (weight >= 1) {
+ return cf1;
+ }
+
+ static const SkRuntimeEffect* effect = SkMakeCachedRuntimeEffect(
+ SkRuntimeEffect::MakeForColorFilter,
+ "uniform colorFilter cf0;"
+ "uniform colorFilter cf1;"
+ "uniform half weight;"
+ "half4 main(half4 color) {"
+ "return mix(cf0.eval(color), cf1.eval(color), weight);"
+ "}"
+ ).release();
+ SkASSERT(effect);
+
+ sk_sp<SkColorFilter> inputs[] = {cf0,cf1};
+ return effect->makeColorFilter(SkData::MakeWithCopy(&weight, sizeof(weight)),
+ inputs, std::size(inputs));
+#else
+ // TODO(skia:12197)
+ return nullptr;
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkRegisterComposeColorFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkComposeColorFilter);
+}
+
+void SkRegisterColorSpaceXformColorFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(ColorSpaceXformColorFilter);
+ // TODO(ccameron): Remove after grace period for SKPs to stop using old serialization.
+ SkFlattenable::Register("SkSRGBGammaColorFilter",
+ ColorSpaceXformColorFilter::LegacyGammaOnlyCreateProc);
+}
+
+void SkRegisterWorkingFormatColorFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkWorkingFormatColorFilter);
+}
diff --git a/gfx/skia/skia/src/core/SkColorFilterBase.h b/gfx/skia/skia/src/core/SkColorFilterBase.h
new file mode 100644
index 0000000000..31652db6be
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorFilterBase.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorFilterBase_DEFINED
+#define SkColorFilterBase_DEFINED
+
+#include "include/core/SkColorFilter.h"
+#include "include/private/SkColorData.h"
+#include "src/core/SkVM_fwd.h"
+
+#include <memory>
+#include <tuple>
+
+class GrColorInfo;
+class GrFragmentProcessor;
+class GrRecordingContext;
+class SkArenaAlloc;
+class SkBitmap;
+class SkColorInfo;
+class SkColorSpace;
+class SkRuntimeEffect;
+class SkSurfaceProps;
+struct SkStageRec;
+using GrFPResult = std::tuple<bool, std::unique_ptr<GrFragmentProcessor>>;
+
+namespace skgpu::graphite {
+class KeyContext;
+class PaintParamsKeyBuilder;
+class PipelineDataGatherer;
+}
+
+class SkColorFilterBase : public SkColorFilter {
+public:
+ SK_WARN_UNUSED_RESULT
+ virtual bool appendStages(const SkStageRec& rec, bool shaderIsOpaque) const = 0;
+
+ SK_WARN_UNUSED_RESULT
+ skvm::Color program(skvm::Builder*, skvm::Color,
+ const SkColorInfo& dst, skvm::Uniforms*, SkArenaAlloc*) const;
+
+ /** Returns the flags for this filter. Override in subclasses to return custom flags.
+ */
+ virtual bool onIsAlphaUnchanged() const { return false; }
+
+#if defined(SK_GANESH)
+ /**
+ * A subclass may implement this factory function to work with the GPU backend. It returns
+ * a GrFragmentProcessor that implements the color filter in GPU shader code.
+ *
+ * The fragment processor receives a input FP that generates a premultiplied input color, and
+ * produces a premultiplied output color.
+ *
+ * A GrFPFailure indicates that the color filter isn't implemented for the GPU backend.
+ */
+ virtual GrFPResult asFragmentProcessor(std::unique_ptr<GrFragmentProcessor> inputFP,
+ GrRecordingContext* context,
+ const GrColorInfo& dstColorInfo,
+ const SkSurfaceProps& props) const;
+#endif
+
+ bool affectsTransparentBlack() const {
+ return this->filterColor(SK_ColorTRANSPARENT) != SK_ColorTRANSPARENT;
+ }
+
+ virtual SkRuntimeEffect* asRuntimeEffect() const { return nullptr; }
+
+ static SkFlattenable::Type GetFlattenableType() {
+ return kSkColorFilter_Type;
+ }
+
+ SkFlattenable::Type getFlattenableType() const override {
+ return kSkColorFilter_Type;
+ }
+
+ static sk_sp<SkColorFilter> Deserialize(const void* data, size_t size,
+ const SkDeserialProcs* procs = nullptr) {
+ return sk_sp<SkColorFilter>(static_cast<SkColorFilter*>(
+ SkFlattenable::Deserialize(
+ kSkColorFilter_Type, data, size, procs).release()));
+ }
+
+ virtual SkPMColor4f onFilterColor4f(const SkPMColor4f& color, SkColorSpace* dstCS) const;
+
+#if defined(SK_GRAPHITE)
+ /**
+ Add implementation details, for the specified backend, of this SkColorFilter to the
+ provided key.
+
+ @param keyContext backend context for key creation
+ @param builder builder for creating the key for this SkShader
+ @param gatherer if non-null, storage for this colorFilter's data
+ */
+ virtual void addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const;
+#endif
+
+protected:
+ SkColorFilterBase() {}
+
+ virtual bool onAsAColorMatrix(float[20]) const;
+ virtual bool onAsAColorMode(SkColor* color, SkBlendMode* bmode) const;
+
+private:
+ virtual skvm::Color onProgram(skvm::Builder*, skvm::Color,
+ const SkColorInfo& dst, skvm::Uniforms*, SkArenaAlloc*) const = 0;
+
+ friend class SkColorFilter;
+
+ using INHERITED = SkFlattenable;
+};
+
+static inline SkColorFilterBase* as_CFB(SkColorFilter* filter) {
+ return static_cast<SkColorFilterBase*>(filter);
+}
+
+static inline const SkColorFilterBase* as_CFB(const SkColorFilter* filter) {
+ return static_cast<const SkColorFilterBase*>(filter);
+}
+
+static inline const SkColorFilterBase* as_CFB(const sk_sp<SkColorFilter>& filter) {
+ return static_cast<SkColorFilterBase*>(filter.get());
+}
+
+static inline sk_sp<SkColorFilterBase> as_CFB_sp(sk_sp<SkColorFilter> filter) {
+ return sk_sp<SkColorFilterBase>(static_cast<SkColorFilterBase*>(filter.release()));
+}
+
+
+void SkRegisterComposeColorFilterFlattenable();
+void SkRegisterMatrixColorFilterFlattenable();
+void SkRegisterModeColorFilterFlattenable();
+void SkRegisterColorSpaceXformColorFilterFlattenable();
+void SkRegisterTableColorFilterFlattenable();
+void SkRegisterWorkingFormatColorFilterFlattenable();
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkColorFilterPriv.h b/gfx/skia/skia/src/core/SkColorFilterPriv.h
new file mode 100644
index 0000000000..2a243103f7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorFilterPriv.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorFilterPriv_DEFINED
+#define SkColorFilterPriv_DEFINED
+
+#include "include/core/SkColorFilter.h"
+
+class SkColorSpace;
+struct skcms_Matrix3x3;
+struct skcms_TransferFunction;
+
+class SkColorFilterPriv {
+public:
+ static sk_sp<SkColorFilter> MakeGaussian();
+
+ // Make a color filter that will convert from src to dst.
+ static sk_sp<SkColorFilter> MakeColorSpaceXform(sk_sp<SkColorSpace> src,
+ sk_sp<SkColorSpace> dst);
+
+ // Runs the child filter in a different working color format than usual (premul in
+ // destination surface's color space), with all inputs and outputs expressed in this format.
+ // Each non-null {tf,gamut,at} parameter overrides that particular aspect of the color format.
+ static sk_sp<SkColorFilter> WithWorkingFormat(sk_sp<SkColorFilter> child,
+ const skcms_TransferFunction* tf,
+ const skcms_Matrix3x3* gamut,
+ const SkAlphaType* at);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkColorFilter_Matrix.cpp b/gfx/skia/skia/src/core/SkColorFilter_Matrix.cpp
new file mode 100644
index 0000000000..7ecdbcc9d6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorFilter_Matrix.cpp
@@ -0,0 +1,256 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkUnPreMultiply.h"
+#include "include/effects/SkColorMatrix.h"
+#include "include/effects/SkRuntimeEffect.h"
+#include "include/private/SkColorData.h"
+#include "src/core/SkColorFilterBase.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkRuntimeEffectPriv.h"
+#include "src/core/SkVM.h"
+#include "src/core/SkWriteBuffer.h"
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/KeyHelpers.h"
+#include "src/gpu/graphite/PaintParamsKey.h"
+#endif // SK_GRAPHITE
+
+static bool is_alpha_unchanged(const float matrix[20]) {
+ const float* srcA = matrix + 15;
+
+ return SkScalarNearlyZero (srcA[0])
+ && SkScalarNearlyZero (srcA[1])
+ && SkScalarNearlyZero (srcA[2])
+ && SkScalarNearlyEqual(srcA[3], 1)
+ && SkScalarNearlyZero (srcA[4]);
+}
+
+class SkColorFilter_Matrix final : public SkColorFilterBase {
+public:
+ enum class Domain : uint8_t { kRGBA, kHSLA };
+
+ explicit SkColorFilter_Matrix(const float array[20], Domain);
+
+ bool appendStages(const SkStageRec& rec, bool shaderIsOpaque) const override;
+
+ bool onIsAlphaUnchanged() const override { return fAlphaIsUnchanged; }
+
+#if defined(SK_GANESH)
+ GrFPResult asFragmentProcessor(std::unique_ptr<GrFragmentProcessor> inputFP,
+ GrRecordingContext*,
+ const GrColorInfo&,
+ const SkSurfaceProps&) const override;
+#endif
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext&,
+ skgpu::graphite::PaintParamsKeyBuilder*,
+ skgpu::graphite::PipelineDataGatherer*) const override;
+#endif
+
+private:
+ friend void ::SkRegisterMatrixColorFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkColorFilter_Matrix)
+
+ void flatten(SkWriteBuffer&) const override;
+ bool onAsAColorMatrix(float matrix[20]) const override;
+
+ skvm::Color onProgram(skvm::Builder*, skvm::Color,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms, SkArenaAlloc*) const override;
+
+ float fMatrix[20];
+ bool fAlphaIsUnchanged;
+ Domain fDomain;
+};
+
+SkColorFilter_Matrix::SkColorFilter_Matrix(const float array[20], Domain domain)
+ : fAlphaIsUnchanged(is_alpha_unchanged(array))
+ , fDomain(domain) {
+ memcpy(fMatrix, array, 20 * sizeof(float));
+}
+
+void SkColorFilter_Matrix::flatten(SkWriteBuffer& buffer) const {
+ SkASSERT(sizeof(fMatrix)/sizeof(float) == 20);
+ buffer.writeScalarArray(fMatrix, 20);
+
+ // RGBA flag
+ buffer.writeBool(fDomain == Domain::kRGBA);
+}
+
+sk_sp<SkFlattenable> SkColorFilter_Matrix::CreateProc(SkReadBuffer& buffer) {
+ float matrix[20];
+ if (!buffer.readScalarArray(matrix, 20)) {
+ return nullptr;
+ }
+
+ auto is_rgba = buffer.readBool();
+ return is_rgba ? SkColorFilters::Matrix(matrix)
+ : SkColorFilters::HSLAMatrix(matrix);
+}
+
+bool SkColorFilter_Matrix::onAsAColorMatrix(float matrix[20]) const {
+ if (matrix) {
+ memcpy(matrix, fMatrix, 20 * sizeof(float));
+ }
+ return true;
+}
+
+bool SkColorFilter_Matrix::appendStages(const SkStageRec& rec, bool shaderIsOpaque) const {
+ const bool willStayOpaque = shaderIsOpaque && fAlphaIsUnchanged,
+ hsla = fDomain == Domain::kHSLA;
+
+ SkRasterPipeline* p = rec.fPipeline;
+ if (!shaderIsOpaque) { p->append(SkRasterPipelineOp::unpremul); }
+ if ( hsla) { p->append(SkRasterPipelineOp::rgb_to_hsl); }
+ if ( true) { p->append(SkRasterPipelineOp::matrix_4x5, fMatrix); }
+ if ( hsla) { p->append(SkRasterPipelineOp::hsl_to_rgb); }
+ if ( true) { p->append(SkRasterPipelineOp::clamp_01); }
+ if (!willStayOpaque) { p->append(SkRasterPipelineOp::premul); }
+ return true;
+}
+
+
+skvm::Color SkColorFilter_Matrix::onProgram(skvm::Builder* p, skvm::Color c,
+ const SkColorInfo& /*dst*/,
+ skvm::Uniforms* uniforms, SkArenaAlloc*) const {
+ auto apply_matrix = [&](auto xyzw) {
+ auto dot = [&](int j) {
+ auto custom_mad = [&](float f, skvm::F32 m, skvm::F32 a) {
+ // skvm::Builder won't fold f*0 == 0, but we shouldn't encounter NaN here.
+ // While looking, also simplify f == ±1. Anything else becomes a uniform.
+ return f == 0.0f ? a
+ : f == +1.0f ? a + m
+ : f == -1.0f ? a - m
+ : m * p->uniformF(uniforms->pushF(f)) + a;
+ };
+
+ // Similarly, let skvm::Builder fold away the additive bias when zero.
+ const float b = fMatrix[4+j*5];
+ skvm::F32 bias = b == 0.0f ? p->splat(0.0f)
+ : p->uniformF(uniforms->pushF(b));
+
+ auto [x,y,z,w] = xyzw;
+ return custom_mad(fMatrix[0+j*5], x,
+ custom_mad(fMatrix[1+j*5], y,
+ custom_mad(fMatrix[2+j*5], z,
+ custom_mad(fMatrix[3+j*5], w, bias))));
+ };
+ return std::make_tuple(dot(0), dot(1), dot(2), dot(3));
+ };
+
+ c = unpremul(c);
+
+ if (fDomain == Domain::kHSLA) {
+ auto [h,s,l,a] = apply_matrix(p->to_hsla(c));
+ c = p->to_rgba({h,s,l,a});
+ } else {
+ auto [r,g,b,a] = apply_matrix(c);
+ c = {r,g,b,a};
+ }
+
+ return premul(clamp01(c));
+}
+
+#if defined(SK_GANESH)
+#include "src/gpu/ganesh/effects/GrSkSLFP.h"
+
+static std::unique_ptr<GrFragmentProcessor> rgb_to_hsl(std::unique_ptr<GrFragmentProcessor> child) {
+ static const SkRuntimeEffect* effect = SkMakeRuntimeEffect(SkRuntimeEffect::MakeForColorFilter,
+ "half4 main(half4 color) {"
+ "return $rgb_to_hsl(color.rgb, color.a);"
+ "}"
+ );
+ SkASSERT(SkRuntimeEffectPriv::SupportsConstantOutputForConstantInput(effect));
+ return GrSkSLFP::Make(effect, "RgbToHsl", std::move(child),
+ GrSkSLFP::OptFlags::kPreservesOpaqueInput);
+}
+
+static std::unique_ptr<GrFragmentProcessor> hsl_to_rgb(std::unique_ptr<GrFragmentProcessor> child) {
+ static const SkRuntimeEffect* effect = SkMakeRuntimeEffect(SkRuntimeEffect::MakeForColorFilter,
+ "half4 main(half4 color) {"
+ "return $hsl_to_rgb(color.rgb, color.a);"
+ "}"
+ );
+ SkASSERT(SkRuntimeEffectPriv::SupportsConstantOutputForConstantInput(effect));
+ return GrSkSLFP::Make(effect, "HslToRgb", std::move(child),
+ GrSkSLFP::OptFlags::kPreservesOpaqueInput);
+}
+
+GrFPResult SkColorFilter_Matrix::asFragmentProcessor(std::unique_ptr<GrFragmentProcessor> fp,
+ GrRecordingContext*,
+ const GrColorInfo&,
+ const SkSurfaceProps&) const {
+ switch (fDomain) {
+ case Domain::kRGBA:
+ fp = GrFragmentProcessor::ColorMatrix(std::move(fp), fMatrix,
+ /* unpremulInput = */ true,
+ /* clampRGBOutput = */ true,
+ /* premulOutput = */ true);
+ break;
+
+ case Domain::kHSLA:
+ fp = rgb_to_hsl(std::move(fp));
+ fp = GrFragmentProcessor::ColorMatrix(std::move(fp), fMatrix,
+ /* unpremulInput = */ false,
+ /* clampRGBOutput = */ false,
+ /* premulOutput = */ false);
+ fp = hsl_to_rgb(std::move(fp));
+ break;
+ }
+
+ return GrFPSuccess(std::move(fp));
+}
+
+#endif // defined(SK_GANESH)
+
+#if defined(SK_GRAPHITE)
+void SkColorFilter_Matrix::addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const {
+ using namespace skgpu::graphite;
+
+ MatrixColorFilterBlock::MatrixColorFilterData matrixCFData(fMatrix,
+ fDomain == Domain::kHSLA);
+
+ MatrixColorFilterBlock::BeginBlock(keyContext, builder, gatherer, &matrixCFData);
+ builder->endBlock();
+}
+#endif // SK_GRAPHITE
+
+///////////////////////////////////////////////////////////////////////////////
+
+static sk_sp<SkColorFilter> MakeMatrix(const float array[20],
+ SkColorFilter_Matrix::Domain domain) {
+ if (!sk_floats_are_finite(array, 20)) {
+ return nullptr;
+ }
+ return sk_make_sp<SkColorFilter_Matrix>(array, domain);
+}
+
+sk_sp<SkColorFilter> SkColorFilters::Matrix(const float array[20]) {
+ return MakeMatrix(array, SkColorFilter_Matrix::Domain::kRGBA);
+}
+
+sk_sp<SkColorFilter> SkColorFilters::Matrix(const SkColorMatrix& cm) {
+ return MakeMatrix(cm.fMat.data(), SkColorFilter_Matrix::Domain::kRGBA);
+}
+
+sk_sp<SkColorFilter> SkColorFilters::HSLAMatrix(const float array[20]) {
+ return MakeMatrix(array, SkColorFilter_Matrix::Domain::kHSLA);
+}
+
+sk_sp<SkColorFilter> SkColorFilters::HSLAMatrix(const SkColorMatrix& cm) {
+ return MakeMatrix(cm.fMat.data(), SkColorFilter_Matrix::Domain::kHSLA);
+}
+
+void SkRegisterMatrixColorFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkColorFilter_Matrix);
+}
diff --git a/gfx/skia/skia/src/core/SkColorSpace.cpp b/gfx/skia/skia/src/core/SkColorSpace.cpp
new file mode 100644
index 0000000000..d93435d0cc
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorSpace.cpp
@@ -0,0 +1,411 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkData.h"
+#include "include/private/SkOpts_spi.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkTemplates.h"
+#include "modules/skcms/skcms.h"
+#include "src/core/SkColorSpacePriv.h"
+
+#include <cstring>
+
+bool SkColorSpacePrimaries::toXYZD50(skcms_Matrix3x3* toXYZ_D50) const {
+ return skcms_PrimariesToXYZD50(fRX, fRY, fGX, fGY, fBX, fBY, fWX, fWY, toXYZ_D50);
+}
+
+SkColorSpace::SkColorSpace(const skcms_TransferFunction& transferFn,
+ const skcms_Matrix3x3& toXYZD50)
+ : fTransferFn(transferFn)
+ , fToXYZD50(toXYZD50) {
+ fTransferFnHash = SkOpts::hash_fn(&fTransferFn, 7*sizeof(float), 0);
+ fToXYZD50Hash = SkOpts::hash_fn(&fToXYZD50, 9*sizeof(float), 0);
+}
+
+static bool xyz_almost_equal(const skcms_Matrix3x3& mA, const skcms_Matrix3x3& mB) {
+ for (int r = 0; r < 3; ++r) {
+ for (int c = 0; c < 3; ++c) {
+ if (!color_space_almost_equal(mA.vals[r][c], mB.vals[r][c])) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+sk_sp<SkColorSpace> SkColorSpace::MakeRGB(const skcms_TransferFunction& transferFn,
+ const skcms_Matrix3x3& toXYZ) {
+ if (skcms_TransferFunction_getType(&transferFn) == skcms_TFType_Invalid) {
+ return nullptr;
+ }
+
+ const skcms_TransferFunction* tf = &transferFn;
+
+ if (is_almost_srgb(transferFn)) {
+ if (xyz_almost_equal(toXYZ, SkNamedGamut::kSRGB)) {
+ return SkColorSpace::MakeSRGB();
+ }
+ tf = &SkNamedTransferFn::kSRGB;
+ } else if (is_almost_2dot2(transferFn)) {
+ tf = &SkNamedTransferFn::k2Dot2;
+ } else if (is_almost_linear(transferFn)) {
+ if (xyz_almost_equal(toXYZ, SkNamedGamut::kSRGB)) {
+ return SkColorSpace::MakeSRGBLinear();
+ }
+ tf = &SkNamedTransferFn::kLinear;
+ }
+
+ return sk_sp<SkColorSpace>(new SkColorSpace(*tf, toXYZ));
+}
+
+class SkColorSpaceSingletonFactory {
+public:
+ static SkColorSpace* Make(const skcms_TransferFunction& transferFn,
+ const skcms_Matrix3x3& to_xyz) {
+ return new SkColorSpace(transferFn, to_xyz);
+ }
+};
+
+SkColorSpace* sk_srgb_singleton() {
+ static SkColorSpace* cs = SkColorSpaceSingletonFactory::Make(SkNamedTransferFn::kSRGB,
+ SkNamedGamut::kSRGB);
+ return cs;
+}
+
+SkColorSpace* sk_srgb_linear_singleton() {
+ static SkColorSpace* cs = SkColorSpaceSingletonFactory::Make(SkNamedTransferFn::kLinear,
+ SkNamedGamut::kSRGB);
+ return cs;
+}
+
+sk_sp<SkColorSpace> SkColorSpace::MakeSRGB() {
+ return sk_ref_sp(sk_srgb_singleton());
+}
+
+sk_sp<SkColorSpace> SkColorSpace::MakeSRGBLinear() {
+ return sk_ref_sp(sk_srgb_linear_singleton());
+}
+
+void SkColorSpace::computeLazyDstFields() const {
+ fLazyDstFieldsOnce([this] {
+
+ // Invert 3x3 gamut, defaulting to sRGB if we can't.
+ {
+ if (!skcms_Matrix3x3_invert(&fToXYZD50, &fFromXYZD50)) {
+ SkAssertResult(skcms_Matrix3x3_invert(&skcms_sRGB_profile()->toXYZD50,
+ &fFromXYZD50));
+ }
+ }
+
+ // Invert transfer function, defaulting to sRGB if we can't.
+ {
+ if (!skcms_TransferFunction_invert(&fTransferFn, &fInvTransferFn)) {
+ fInvTransferFn = *skcms_sRGB_Inverse_TransferFunction();
+ }
+ }
+
+ });
+}
+
+bool SkColorSpace::isNumericalTransferFn(skcms_TransferFunction* coeffs) const {
+ // TODO: Change transferFn/invTransferFn to just operate on skcms_TransferFunction (all callers
+ // already pass pointers to an skcms struct). Then remove this function, and update the two
+ // remaining callers to do the right thing with transferFn and classify.
+ this->transferFn(coeffs);
+ return skcms_TransferFunction_getType(coeffs) == skcms_TFType_sRGBish;
+}
+
+void SkColorSpace::transferFn(float gabcdef[7]) const {
+ memcpy(gabcdef, &fTransferFn, 7*sizeof(float));
+}
+
+void SkColorSpace::transferFn(skcms_TransferFunction* fn) const {
+ *fn = fTransferFn;
+}
+
+void SkColorSpace::invTransferFn(skcms_TransferFunction* fn) const {
+ this->computeLazyDstFields();
+ *fn = fInvTransferFn;
+}
+
+bool SkColorSpace::toXYZD50(skcms_Matrix3x3* toXYZD50) const {
+ *toXYZD50 = fToXYZD50;
+ return true;
+}
+
+void SkColorSpace::gamutTransformTo(const SkColorSpace* dst, skcms_Matrix3x3* src_to_dst) const {
+ dst->computeLazyDstFields();
+ *src_to_dst = skcms_Matrix3x3_concat(&dst->fFromXYZD50, &fToXYZD50);
+}
+
+bool SkColorSpace::isSRGB() const {
+ return sk_srgb_singleton() == this;
+}
+
+bool SkColorSpace::gammaCloseToSRGB() const {
+ // Nearly-equal transfer functions were snapped at construction time, so just do an exact test
+ return memcmp(&fTransferFn, &SkNamedTransferFn::kSRGB, 7*sizeof(float)) == 0;
+}
+
+bool SkColorSpace::gammaIsLinear() const {
+ // Nearly-equal transfer functions were snapped at construction time, so just do an exact test
+ return memcmp(&fTransferFn, &SkNamedTransferFn::kLinear, 7*sizeof(float)) == 0;
+}
+
+sk_sp<SkColorSpace> SkColorSpace::makeLinearGamma() const {
+ if (this->gammaIsLinear()) {
+ return sk_ref_sp(const_cast<SkColorSpace*>(this));
+ }
+ return SkColorSpace::MakeRGB(SkNamedTransferFn::kLinear, fToXYZD50);
+}
+
+sk_sp<SkColorSpace> SkColorSpace::makeSRGBGamma() const {
+ if (this->gammaCloseToSRGB()) {
+ return sk_ref_sp(const_cast<SkColorSpace*>(this));
+ }
+ return SkColorSpace::MakeRGB(SkNamedTransferFn::kSRGB, fToXYZD50);
+}
+
+sk_sp<SkColorSpace> SkColorSpace::makeColorSpin() const {
+ skcms_Matrix3x3 spin = {{
+ { 0, 0, 1 },
+ { 1, 0, 0 },
+ { 0, 1, 0 },
+ }};
+
+ skcms_Matrix3x3 spun = skcms_Matrix3x3_concat(&fToXYZD50, &spin);
+
+ return sk_sp<SkColorSpace>(new SkColorSpace(fTransferFn, spun));
+}
+
+void SkColorSpace::toProfile(skcms_ICCProfile* profile) const {
+ skcms_Init (profile);
+ skcms_SetTransferFunction(profile, &fTransferFn);
+ skcms_SetXYZD50 (profile, &fToXYZD50);
+}
+
+sk_sp<SkColorSpace> SkColorSpace::Make(const skcms_ICCProfile& profile) {
+ // TODO: move below ≈sRGB test?
+ if (!profile.has_toXYZD50 || !profile.has_trc) {
+ return nullptr;
+ }
+
+ if (skcms_ApproximatelyEqualProfiles(&profile, skcms_sRGB_profile())) {
+ return SkColorSpace::MakeSRGB();
+ }
+
+ // TODO: can we save this work and skip lazily inverting the matrix later?
+ skcms_Matrix3x3 inv;
+ if (!skcms_Matrix3x3_invert(&profile.toXYZD50, &inv)) {
+ return nullptr;
+ }
+
+ // We can't work with tables or mismatched parametric curves,
+ // but if they all look close enough to sRGB, that's fine.
+ // TODO: should we maybe do this unconditionally to snap near-sRGB parametrics to sRGB?
+ const skcms_Curve* trc = profile.trc;
+ if (trc[0].table_entries != 0 ||
+ trc[1].table_entries != 0 ||
+ trc[2].table_entries != 0 ||
+ 0 != memcmp(&trc[0].parametric, &trc[1].parametric, sizeof(trc[0].parametric)) ||
+ 0 != memcmp(&trc[0].parametric, &trc[2].parametric, sizeof(trc[0].parametric)))
+ {
+ if (skcms_TRCs_AreApproximateInverse(&profile, skcms_sRGB_Inverse_TransferFunction())) {
+ return SkColorSpace::MakeRGB(SkNamedTransferFn::kSRGB, profile.toXYZD50);
+ }
+ return nullptr;
+ }
+
+ return SkColorSpace::MakeRGB(profile.trc[0].parametric, profile.toXYZD50);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+enum Version {
+ k0_Version, // Initial version, header + flags for matrix and profile
+ k1_Version, // Simple header (version tag) + 16 floats
+
+ kCurrent_Version = k1_Version,
+};
+
+enum NamedColorSpace {
+ kSRGB_NamedColorSpace,
+ kAdobeRGB_NamedColorSpace,
+ kSRGBLinear_NamedColorSpace,
+};
+
+enum NamedGamma {
+ kLinear_NamedGamma,
+ kSRGB_NamedGamma,
+ k2Dot2_NamedGamma,
+};
+
+struct ColorSpaceHeader {
+ // Flag values, only used by old (k0_Version) serialization
+ inline static constexpr uint8_t kMatrix_Flag = 1 << 0;
+ inline static constexpr uint8_t kICC_Flag = 1 << 1;
+ inline static constexpr uint8_t kTransferFn_Flag = 1 << 3;
+
+ uint8_t fVersion = kCurrent_Version;
+
+ // Other fields are only used by k0_Version. Could be re-purposed in future versions.
+ uint8_t fNamed = 0;
+ uint8_t fGammaNamed = 0;
+ uint8_t fFlags = 0;
+};
+
+size_t SkColorSpace::writeToMemory(void* memory) const {
+ if (memory) {
+ *((ColorSpaceHeader*) memory) = ColorSpaceHeader();
+ memory = SkTAddOffset<void>(memory, sizeof(ColorSpaceHeader));
+
+ memcpy(memory, &fTransferFn, 7 * sizeof(float));
+ memory = SkTAddOffset<void>(memory, 7 * sizeof(float));
+
+ memcpy(memory, &fToXYZD50, 9 * sizeof(float));
+ }
+
+ return sizeof(ColorSpaceHeader) + 16 * sizeof(float);
+}
+
+sk_sp<SkData> SkColorSpace::serialize() const {
+ sk_sp<SkData> data = SkData::MakeUninitialized(this->writeToMemory(nullptr));
+ this->writeToMemory(data->writable_data());
+ return data;
+}
+
+sk_sp<SkColorSpace> SkColorSpace::Deserialize(const void* data, size_t length) {
+ if (length < sizeof(ColorSpaceHeader)) {
+ return nullptr;
+ }
+
+ ColorSpaceHeader header = *((const ColorSpaceHeader*) data);
+ data = SkTAddOffset<const void>(data, sizeof(ColorSpaceHeader));
+ length -= sizeof(ColorSpaceHeader);
+ if (k1_Version == header.fVersion) {
+ if (length < 16 * sizeof(float)) {
+ return nullptr;
+ }
+
+ skcms_TransferFunction transferFn;
+ memcpy(&transferFn, data, 7 * sizeof(float));
+ data = SkTAddOffset<const void>(data, 7 * sizeof(float));
+
+ skcms_Matrix3x3 toXYZ;
+ memcpy(&toXYZ, data, 9 * sizeof(float));
+ return SkColorSpace::MakeRGB(transferFn, toXYZ);
+ } else if (k0_Version == header.fVersion) {
+ if (0 == header.fFlags) {
+ switch ((NamedColorSpace)header.fNamed) {
+ case kSRGB_NamedColorSpace:
+ return SkColorSpace::MakeSRGB();
+ case kSRGBLinear_NamedColorSpace:
+ return SkColorSpace::MakeSRGBLinear();
+ case kAdobeRGB_NamedColorSpace:
+ return SkColorSpace::MakeRGB(SkNamedTransferFn::k2Dot2,
+ SkNamedGamut::kAdobeRGB);
+ }
+ }
+
+ auto make_named_tf = [=](const skcms_TransferFunction& tf) {
+ if (ColorSpaceHeader::kMatrix_Flag != header.fFlags || length < 12 * sizeof(float)) {
+ return sk_sp<SkColorSpace>(nullptr);
+ }
+
+ // Version 0 matrix is row-major 3x4
+ skcms_Matrix3x3 toXYZ;
+ memcpy(&toXYZ.vals[0][0], (const float*)data + 0, 3 * sizeof(float));
+ memcpy(&toXYZ.vals[1][0], (const float*)data + 4, 3 * sizeof(float));
+ memcpy(&toXYZ.vals[2][0], (const float*)data + 8, 3 * sizeof(float));
+ return SkColorSpace::MakeRGB(tf, toXYZ);
+ };
+
+ switch ((NamedGamma) header.fGammaNamed) {
+ case kSRGB_NamedGamma:
+ return make_named_tf(SkNamedTransferFn::kSRGB);
+ case k2Dot2_NamedGamma:
+ return make_named_tf(SkNamedTransferFn::k2Dot2);
+ case kLinear_NamedGamma:
+ return make_named_tf(SkNamedTransferFn::kLinear);
+ default:
+ break;
+ }
+
+ switch (header.fFlags) {
+ case ColorSpaceHeader::kICC_Flag: {
+ // Deprecated and unsupported code path
+ return nullptr;
+ }
+ case ColorSpaceHeader::kTransferFn_Flag: {
+ if (length < 19 * sizeof(float)) {
+ return nullptr;
+ }
+
+ // Version 0 TF is in abcdefg order
+ skcms_TransferFunction transferFn;
+ transferFn.a = *(((const float*) data) + 0);
+ transferFn.b = *(((const float*) data) + 1);
+ transferFn.c = *(((const float*) data) + 2);
+ transferFn.d = *(((const float*) data) + 3);
+ transferFn.e = *(((const float*) data) + 4);
+ transferFn.f = *(((const float*) data) + 5);
+ transferFn.g = *(((const float*) data) + 6);
+ data = SkTAddOffset<const void>(data, 7 * sizeof(float));
+
+ // Version 0 matrix is row-major 3x4
+ skcms_Matrix3x3 toXYZ;
+ memcpy(&toXYZ.vals[0][0], (const float*)data + 0, 3 * sizeof(float));
+ memcpy(&toXYZ.vals[1][0], (const float*)data + 4, 3 * sizeof(float));
+ memcpy(&toXYZ.vals[2][0], (const float*)data + 8, 3 * sizeof(float));
+ return SkColorSpace::MakeRGB(transferFn, toXYZ);
+ }
+ default:
+ return nullptr;
+ }
+ } else {
+ return nullptr;
+ }
+}
+
+bool SkColorSpace::Equals(const SkColorSpace* x, const SkColorSpace* y) {
+ if (x == y) {
+ return true;
+ }
+
+ if (!x || !y) {
+ return false;
+ }
+
+ if (x->hash() == y->hash()) {
+ #if defined(SK_DEBUG)
+ // Do these floats function equivalently?
+ // This returns true more often than simple float comparison (NaN vs. NaN) and,
+ // also returns true more often than simple bitwise comparison (+0 vs. -0) and,
+ // even returns true more often than those two OR'd together (two different NaNs).
+ auto equiv = [](float X, float Y) {
+ return (X==Y)
+ || (sk_float_isnan(X) && sk_float_isnan(Y));
+ };
+
+ for (int i = 0; i < 7; i++) {
+ float X = (&x->fTransferFn.g)[i],
+ Y = (&y->fTransferFn.g)[i];
+ SkASSERTF(equiv(X,Y), "Hash collision at tf[%d], !equiv(%g,%g)\n", i, X,Y);
+ }
+ for (int r = 0; r < 3; r++)
+ for (int c = 0; c < 3; c++) {
+ float X = x->fToXYZD50.vals[r][c],
+ Y = y->fToXYZD50.vals[r][c];
+ SkASSERTF(equiv(X,Y), "Hash collision at toXYZD50[%d][%d], !equiv(%g,%g)\n", r,c, X,Y);
+ }
+ #endif
+ return true;
+ }
+ return false;
+}
diff --git a/gfx/skia/skia/src/core/SkColorSpacePriv.h b/gfx/skia/skia/src/core/SkColorSpacePriv.h
new file mode 100644
index 0000000000..cfcb91fc7c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorSpacePriv.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkColorSpacePriv_DEFINED
+#define SkColorSpacePriv_DEFINED
+
+#include "include/core/SkColorSpace.h"
+#include "include/private/base/SkTemplates.h"
+#include "modules/skcms/skcms.h"
+
+namespace skvm {
+class Builder;
+struct Color;
+struct F32;
+struct Uniforms;
+}
+
+// A gamut narrower than sRGB, useful for testing.
+static constexpr skcms_Matrix3x3 gNarrow_toXYZD50 = {{
+ { 0.190974f, 0.404865f, 0.368380f },
+ { 0.114746f, 0.582937f, 0.302318f },
+ { 0.032925f, 0.153615f, 0.638669f },
+}};
+
+static inline bool color_space_almost_equal(float a, float b) {
+ return SkTAbs(a - b) < 0.01f;
+}
+
+// Let's use a stricter version for transfer functions. Worst case, these are encoded
+// in ICC format, which offers 16-bits of fractional precision.
+static inline bool transfer_fn_almost_equal(float a, float b) {
+ return SkTAbs(a - b) < 0.001f;
+}
+
+static inline bool is_almost_srgb(const skcms_TransferFunction& coeffs) {
+ return transfer_fn_almost_equal(SkNamedTransferFn::kSRGB.a, coeffs.a) &&
+ transfer_fn_almost_equal(SkNamedTransferFn::kSRGB.b, coeffs.b) &&
+ transfer_fn_almost_equal(SkNamedTransferFn::kSRGB.c, coeffs.c) &&
+ transfer_fn_almost_equal(SkNamedTransferFn::kSRGB.d, coeffs.d) &&
+ transfer_fn_almost_equal(SkNamedTransferFn::kSRGB.e, coeffs.e) &&
+ transfer_fn_almost_equal(SkNamedTransferFn::kSRGB.f, coeffs.f) &&
+ transfer_fn_almost_equal(SkNamedTransferFn::kSRGB.g, coeffs.g);
+}
+
+static inline bool is_almost_2dot2(const skcms_TransferFunction& coeffs) {
+ return transfer_fn_almost_equal(1.0f, coeffs.a) &&
+ transfer_fn_almost_equal(0.0f, coeffs.b) &&
+ transfer_fn_almost_equal(0.0f, coeffs.e) &&
+ transfer_fn_almost_equal(2.2f, coeffs.g) &&
+ coeffs.d <= 0.0f;
+}
+
+static inline bool is_almost_linear(const skcms_TransferFunction& coeffs) {
+ // OutputVal = InputVal ^ 1.0f
+ const bool linearExp =
+ transfer_fn_almost_equal(1.0f, coeffs.a) &&
+ transfer_fn_almost_equal(0.0f, coeffs.b) &&
+ transfer_fn_almost_equal(0.0f, coeffs.e) &&
+ transfer_fn_almost_equal(1.0f, coeffs.g) &&
+ coeffs.d <= 0.0f;
+
+ // OutputVal = 1.0f * InputVal
+ const bool linearFn =
+ transfer_fn_almost_equal(1.0f, coeffs.c) &&
+ transfer_fn_almost_equal(0.0f, coeffs.f) &&
+ coeffs.d >= 1.0f;
+
+ return linearExp || linearFn;
+}
+
+skvm::F32 sk_program_transfer_fn(
+ skvm::F32 v, skcms_TFType,
+ skvm::F32 G, skvm::F32 A, skvm::F32 B, skvm::F32 C, skvm::F32 D, skvm::F32 E, skvm::F32 F);
+
+skvm::Color sk_program_transfer_fn(skvm::Builder*, skvm::Uniforms*,
+ const skcms_TransferFunction&, skvm::Color);
+
+// Return raw pointers to commonly used SkColorSpaces.
+// No need to ref/unref these, but if you do, do it in pairs.
+SkColorSpace* sk_srgb_singleton();
+SkColorSpace* sk_srgb_linear_singleton();
+
+#endif // SkColorSpacePriv_DEFINED
diff --git a/gfx/skia/skia/src/core/SkColorSpaceXformSteps.cpp b/gfx/skia/skia/src/core/SkColorSpaceXformSteps.cpp
new file mode 100644
index 0000000000..de94bc6f86
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorSpaceXformSteps.cpp
@@ -0,0 +1,227 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkColorSpaceXformSteps.h"
+
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "modules/skcms/skcms.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkVM.h"
+
+// See skia.org/user/color (== site/user/color.md).
+
+SkColorSpaceXformSteps::SkColorSpaceXformSteps(const SkColorSpace* src, SkAlphaType srcAT,
+ const SkColorSpace* dst, SkAlphaType dstAT) {
+ // Opaque outputs are treated as the same alpha type as the source input.
+ // TODO: we'd really like to have a good way of explaining why we think this is useful.
+ if (dstAT == kOpaque_SkAlphaType) {
+ dstAT = srcAT;
+ }
+
+ // We have some options about what to do with null src or dst here.
+ // This pair seems to be the most consistent with legacy expectations.
+ if (!src) { src = sk_srgb_singleton(); }
+ if (!dst) { dst = src; }
+
+ if (src->hash() == dst->hash() && srcAT == dstAT) {
+ SkASSERT(SkColorSpace::Equals(src,dst));
+ return;
+ }
+
+ this->flags.unpremul = srcAT == kPremul_SkAlphaType;
+ this->flags.linearize = !src->gammaIsLinear();
+ this->flags.gamut_transform = src->toXYZD50Hash() != dst->toXYZD50Hash();
+ this->flags.encode = !dst->gammaIsLinear();
+ this->flags.premul = srcAT != kOpaque_SkAlphaType && dstAT == kPremul_SkAlphaType;
+
+ if (this->flags.gamut_transform) {
+ skcms_Matrix3x3 src_to_dst; // TODO: switch src_to_dst_matrix to row-major
+ src->gamutTransformTo(dst, &src_to_dst);
+
+ this->src_to_dst_matrix[0] = src_to_dst.vals[0][0];
+ this->src_to_dst_matrix[1] = src_to_dst.vals[1][0];
+ this->src_to_dst_matrix[2] = src_to_dst.vals[2][0];
+
+ this->src_to_dst_matrix[3] = src_to_dst.vals[0][1];
+ this->src_to_dst_matrix[4] = src_to_dst.vals[1][1];
+ this->src_to_dst_matrix[5] = src_to_dst.vals[2][1];
+
+ this->src_to_dst_matrix[6] = src_to_dst.vals[0][2];
+ this->src_to_dst_matrix[7] = src_to_dst.vals[1][2];
+ this->src_to_dst_matrix[8] = src_to_dst.vals[2][2];
+ } else {
+ #ifdef SK_DEBUG
+ skcms_Matrix3x3 srcM, dstM;
+ src->toXYZD50(&srcM);
+ dst->toXYZD50(&dstM);
+ SkASSERT(0 == memcmp(&srcM, &dstM, 9*sizeof(float)) && "Hash collision");
+ #endif
+ }
+
+ // Fill out all the transfer functions we'll use.
+ src-> transferFn(&this->srcTF );
+ dst->invTransferFn(&this->dstTFInv);
+
+ // If we linearize then immediately reencode with the same transfer function, skip both.
+ if ( this->flags.linearize &&
+ !this->flags.gamut_transform &&
+ this->flags.encode &&
+ src->transferFnHash() == dst->transferFnHash())
+ {
+ #ifdef SK_DEBUG
+ skcms_TransferFunction dstTF;
+ dst->transferFn(&dstTF);
+ for (int i = 0; i < 7; i++) {
+ SkASSERT( (&srcTF.g)[i] == (&dstTF.g)[i] && "Hash collision" );
+ }
+ #endif
+ this->flags.linearize = false;
+ this->flags.encode = false;
+ }
+
+ // Skip unpremul...premul if there are no non-linear operations between.
+ if ( this->flags.unpremul &&
+ !this->flags.linearize &&
+ !this->flags.encode &&
+ this->flags.premul)
+ {
+ this->flags.unpremul = false;
+ this->flags.premul = false;
+ }
+}
+
+void SkColorSpaceXformSteps::apply(float* rgba) const {
+ if (flags.unpremul) {
+ // I don't know why isfinite(x) stopped working on the Chromecast bots...
+ auto is_finite = [](float x) { return x*0 == 0; };
+
+ float invA = sk_ieee_float_divide(1.0f, rgba[3]);
+ invA = is_finite(invA) ? invA : 0;
+ rgba[0] *= invA;
+ rgba[1] *= invA;
+ rgba[2] *= invA;
+ }
+ if (flags.linearize) {
+ rgba[0] = skcms_TransferFunction_eval(&srcTF, rgba[0]);
+ rgba[1] = skcms_TransferFunction_eval(&srcTF, rgba[1]);
+ rgba[2] = skcms_TransferFunction_eval(&srcTF, rgba[2]);
+ }
+ if (flags.gamut_transform) {
+ float temp[3] = { rgba[0], rgba[1], rgba[2] };
+ for (int i = 0; i < 3; ++i) {
+ rgba[i] = src_to_dst_matrix[ i] * temp[0] +
+ src_to_dst_matrix[3 + i] * temp[1] +
+ src_to_dst_matrix[6 + i] * temp[2];
+ }
+ }
+ if (flags.encode) {
+ rgba[0] = skcms_TransferFunction_eval(&dstTFInv, rgba[0]);
+ rgba[1] = skcms_TransferFunction_eval(&dstTFInv, rgba[1]);
+ rgba[2] = skcms_TransferFunction_eval(&dstTFInv, rgba[2]);
+ }
+ if (flags.premul) {
+ rgba[0] *= rgba[3];
+ rgba[1] *= rgba[3];
+ rgba[2] *= rgba[3];
+ }
+}
+
+void SkColorSpaceXformSteps::apply(SkRasterPipeline* p) const {
+ if (flags.unpremul) { p->append(SkRasterPipelineOp::unpremul); }
+ if (flags.linearize) { p->append_transfer_function(srcTF); }
+ if (flags.gamut_transform) { p->append(SkRasterPipelineOp::matrix_3x3, &src_to_dst_matrix); }
+ if (flags.encode) { p->append_transfer_function(dstTFInv); }
+ if (flags.premul) { p->append(SkRasterPipelineOp::premul); }
+}
+
+skvm::F32 sk_program_transfer_fn(
+ skvm::F32 v, skcms_TFType tf_type,
+ skvm::F32 G, skvm::F32 A, skvm::F32 B, skvm::F32 C, skvm::F32 D, skvm::F32 E, skvm::F32 F)
+{
+ // Strip off the sign bit and save it for later.
+ skvm::I32 bits = pun_to_I32(v),
+ sign = bits & 0x80000000;
+ v = pun_to_F32(bits ^ sign);
+
+ switch (tf_type) {
+ case skcms_TFType_Invalid: SkASSERT(false); break;
+
+ case skcms_TFType_sRGBish: {
+ v = select(v <= D, C*v + F
+ , approx_powf(A*v + B, G) + E);
+ } break;
+
+ case skcms_TFType_PQish: {
+ skvm::F32 vC = approx_powf(v, C);
+ v = approx_powf(max(B * vC + A, 0.0f) / (E * vC + D), F);
+ } break;
+
+ case skcms_TFType_HLGish: {
+ skvm::F32 vA = v*A,
+ K = F + 1.0f;
+ v = K*select(vA <= 1.0f, approx_powf(vA, B)
+ , approx_exp((v-E) * C + D));
+ } break;
+
+ case skcms_TFType_HLGinvish: {
+ skvm::F32 K = F + 1.0f;
+ v /= K;
+ v = select(v <= 1.0f, A * approx_powf(v, B)
+ , C * approx_log(v-D) + E);
+ } break;
+ }
+
+ // Re-apply the original sign bit on our way out the door.
+ return pun_to_F32(sign | pun_to_I32(v));
+}
+
+skvm::Color sk_program_transfer_fn(skvm::Builder* p, skvm::Uniforms* uniforms,
+ const skcms_TransferFunction& tf, skvm::Color c) {
+ skvm::F32 G = p->uniformF(uniforms->pushF(tf.g)),
+ A = p->uniformF(uniforms->pushF(tf.a)),
+ B = p->uniformF(uniforms->pushF(tf.b)),
+ C = p->uniformF(uniforms->pushF(tf.c)),
+ D = p->uniformF(uniforms->pushF(tf.d)),
+ E = p->uniformF(uniforms->pushF(tf.e)),
+ F = p->uniformF(uniforms->pushF(tf.f));
+ skcms_TFType tf_type = skcms_TransferFunction_getType(&tf);
+ return {
+ sk_program_transfer_fn(c.r, tf_type, G,A,B,C,D,E,F),
+ sk_program_transfer_fn(c.g, tf_type, G,A,B,C,D,E,F),
+ sk_program_transfer_fn(c.b, tf_type, G,A,B,C,D,E,F),
+ c.a,
+ };
+}
+
+skvm::Color SkColorSpaceXformSteps::program(skvm::Builder* p, skvm::Uniforms* uniforms,
+ skvm::Color c) const {
+ if (flags.unpremul) {
+ c = unpremul(c);
+ }
+ if (flags.linearize) {
+ c = sk_program_transfer_fn(p, uniforms, srcTF, c);
+ }
+ if (flags.gamut_transform) {
+ auto m = [&](int index) {
+ return p->uniformF(uniforms->pushF(src_to_dst_matrix[index]));
+ };
+ auto R = c.r * m(0) + c.g * m(3) + c.b * m(6),
+ G = c.r * m(1) + c.g * m(4) + c.b * m(7),
+ B = c.r * m(2) + c.g * m(5) + c.b * m(8);
+ c = {R, G, B, c.a};
+ }
+ if (flags.encode) {
+ c = sk_program_transfer_fn(p, uniforms, dstTFInv, c);
+ }
+ if (flags.premul) {
+ c = premul(c);
+ }
+ return c;
+}
diff --git a/gfx/skia/skia/src/core/SkColorSpaceXformSteps.h b/gfx/skia/skia/src/core/SkColorSpaceXformSteps.h
new file mode 100644
index 0000000000..37bc4f9113
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkColorSpaceXformSteps.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorSpaceXformSteps_DEFINED
+#define SkColorSpaceXformSteps_DEFINED
+
+#include "include/core/SkAlphaType.h"
+#include "modules/skcms/skcms.h"
+#include "src/core/SkVM.h"
+#include <stdint.h>
+
+class SkColorSpace;
+class SkRasterPipeline;
+
+struct SkColorSpaceXformSteps {
+
+ struct Flags {
+ bool unpremul = false;
+ bool linearize = false;
+ bool gamut_transform = false;
+ bool encode = false;
+ bool premul = false;
+
+ constexpr uint32_t mask() const {
+ return (unpremul ? 1 : 0)
+ | (linearize ? 2 : 0)
+ | (gamut_transform ? 4 : 0)
+ | (encode ? 8 : 0)
+ | (premul ? 16 : 0);
+ }
+ };
+
+ SkColorSpaceXformSteps() {}
+ SkColorSpaceXformSteps(const SkColorSpace* src, SkAlphaType srcAT,
+ const SkColorSpace* dst, SkAlphaType dstAT);
+
+ template <typename S, typename D>
+ SkColorSpaceXformSteps(const S& src, const D& dst)
+ : SkColorSpaceXformSteps(src.colorSpace(), src.alphaType(),
+ dst.colorSpace(), dst.alphaType()) {}
+
+ void apply(float rgba[4]) const;
+ void apply(SkRasterPipeline*) const;
+ skvm::Color program(skvm::Builder*, skvm::Uniforms*, skvm::Color) const;
+
+ Flags flags;
+
+ skcms_TransferFunction srcTF, // Apply for linearize.
+ dstTFInv; // Apply for encode.
+ float src_to_dst_matrix[9]; // Apply this 3x3 column-major matrix for gamut_transform.
+};
+
+#endif//SkColorSpaceXformSteps_DEFINED
diff --git a/gfx/skia/skia/src/core/SkCompressedDataUtils.cpp b/gfx/skia/skia/src/core/SkCompressedDataUtils.cpp
new file mode 100644
index 0000000000..8d9be0c874
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCompressedDataUtils.cpp
@@ -0,0 +1,306 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkCompressedDataUtils.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkColorPriv.h"
+#include "include/core/SkData.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSize.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkTPin.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkMathPriv.h"
+#include "src/core/SkMipmap.h"
+
+#include <algorithm>
+#include <cstdint>
+
+using namespace skia_private;
+
+struct ETC1Block {
+ uint32_t fHigh;
+ uint32_t fLow;
+};
+
+constexpr uint32_t kFlipBit = 0x1; // set -> T/B sub-blocks; not-set -> L/R sub-blocks
+constexpr uint32_t kDiffBit = 0x2; // set -> differential; not-set -> individual
+
+static inline int extend_4To8bits(int b) {
+ int c = b & 0xf;
+ return (c << 4) | c;
+}
+
+static inline int extend_5To8bits(int b) {
+ int c = b & 0x1f;
+ return (c << 3) | (c >> 2);
+}
+
+static inline int extend_5plus3To8Bits(int base, int diff) {
+ static const int kLookup[8] = { 0, 1, 2, 3, -4, -3, -2, -1 };
+
+ return extend_5To8bits((0x1f & base) + kLookup[0x7 & diff]);
+}
+
+static const int kNumETC1ModifierTables = 8;
+static const int kNumETC1PixelIndices = 4;
+
+// The index of each row in this table is the ETC1 table codeword
+// The index of each column in this table is the ETC1 pixel index value
+static const int kETC1ModifierTables[kNumETC1ModifierTables][kNumETC1PixelIndices] = {
+ /* 0 */ { 2, 8, -2, -8 },
+ /* 1 */ { 5, 17, -5, -17 },
+ /* 2 */ { 9, 29, -9, -29 },
+ /* 3 */ { 13, 42, -13, -42 },
+ /* 4 */ { 18, 60, -18, -60 },
+ /* 5 */ { 24, 80, -24, -80 },
+ /* 6 */ { 33, 106, -33, -106 },
+ /* 7 */ { 47, 183, -47, -183 }
+};
+
+static int num_4x4_blocks(int size) {
+ return ((size + 3) & ~3) >> 2;
+}
+
+// Return which sub-block a given x,y location in the overall 4x4 block belongs to
+static int xy_to_subblock_index(int x, int y, bool flip) {
+ SkASSERT(x >= 0 && x < 4);
+ SkASSERT(y >= 0 && y < 4);
+
+ if (flip) {
+ return y < 2 ? 0 : 1; // sub-block 1 is on top of sub-block 2
+ } else {
+ return x < 2 ? 0 : 1; // sub-block 1 is to the left of sub-block 2
+ }
+}
+
+struct IColor {
+ int fR, fG, fB;
+};
+
+static SkPMColor add_delta_and_clamp(const IColor& col, int delta) {
+ int r8 = SkTPin(col.fR + delta, 0, 255);
+ int g8 = SkTPin(col.fG + delta, 0, 255);
+ int b8 = SkTPin(col.fB + delta, 0, 255);
+
+ return SkPackARGB32(0xFF, r8, g8, b8);
+}
+
+static bool decompress_etc1(SkISize dimensions, const uint8_t* srcData, SkBitmap* dst) {
+ const ETC1Block* srcBlocks = reinterpret_cast<const ETC1Block*>(srcData);
+
+ int numXBlocks = num_4x4_blocks(dimensions.width());
+ int numYBlocks = num_4x4_blocks(dimensions.height());
+
+ for (int y = 0; y < numYBlocks; ++y) {
+ for (int x = 0; x < numXBlocks; ++x) {
+ const ETC1Block* curBlock1 = &srcBlocks[y * numXBlocks + x];
+ uint32_t high = SkBSwap32(curBlock1->fHigh);
+ uint32_t low = SkBSwap32(curBlock1->fLow);
+
+ bool flipped = SkToBool(high & kFlipBit);
+ bool differential = SkToBool(high & kDiffBit);
+
+ IColor colors[2];
+
+ if (differential) {
+ colors[0].fR = extend_5To8bits(high >> 27);
+ colors[1].fR = extend_5plus3To8Bits(high >> 27, high >> 24);
+ colors[0].fG = extend_5To8bits(high >> 19);
+ colors[1].fG = extend_5plus3To8Bits(high >> 19, high >> 16);
+ colors[0].fB = extend_5To8bits(high >> 11);
+ colors[1].fB = extend_5plus3To8Bits(high >> 11, high >> 8);
+ } else {
+ colors[0].fR = extend_4To8bits(high >> 28);
+ colors[1].fR = extend_4To8bits(high >> 24);
+ colors[0].fG = extend_4To8bits(high >> 20);
+ colors[1].fG = extend_4To8bits(high >> 16);
+ colors[0].fB = extend_4To8bits(high >> 12);
+ colors[1].fB = extend_4To8bits(high >> 8);
+ }
+
+ int tableIndex0 = (high >> 5) & 0x7;
+ int tableIndex1 = (high >> 2) & 0x7;
+ const int* tables[2] = {
+ kETC1ModifierTables[tableIndex0],
+ kETC1ModifierTables[tableIndex1]
+ };
+
+ int baseShift = 0;
+ int offsetX = 4 * x, offsetY = 4 * y;
+ for (int i = 0; i < 4; ++i, ++baseShift) {
+ for (int j = 0; j < 4; ++j) {
+ if (offsetX + j >= dst->width() || offsetY + i >= dst->height()) {
+ // This can happen for the topmost levels of a mipmap and for
+ // non-multiple of 4 textures
+ continue;
+ }
+
+ int subBlockIndex = xy_to_subblock_index(j, i, flipped);
+ int pixelIndex = ((low >> (baseShift+(j*4))) & 0x1) |
+ (low >> (baseShift+(j*4)+15) & 0x2);
+
+ SkASSERT(subBlockIndex == 0 || subBlockIndex == 1);
+ SkASSERT(pixelIndex >= 0 && pixelIndex < 4);
+
+ int delta = tables[subBlockIndex][pixelIndex];
+ *dst->getAddr32(offsetX + j, offsetY + i) =
+ add_delta_and_clamp(colors[subBlockIndex], delta);
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+//------------------------------------------------------------------------------------------------
+struct BC1Block {
+ uint16_t fColor0;
+ uint16_t fColor1;
+ uint32_t fIndices;
+};
+
+static SkPMColor from565(uint16_t rgb565) {
+ uint8_t r8 = SkR16ToR32((rgb565 >> 11) & 0x1F);
+ uint8_t g8 = SkG16ToG32((rgb565 >> 5) & 0x3F);
+ uint8_t b8 = SkB16ToB32(rgb565 & 0x1F);
+
+ return SkPackARGB32(0xFF, r8, g8, b8);
+}
+
+// return t*col0 + (1-t)*col1
+static SkPMColor lerp(float t, SkPMColor col0, SkPMColor col1) {
+ SkASSERT(SkGetPackedA32(col0) == 0xFF && SkGetPackedA32(col1) == 0xFF);
+
+ // TODO: given 't' is only either 1/3 or 2/3 this could be done faster
+ uint8_t r8 = SkScalarRoundToInt(t * SkGetPackedR32(col0) + (1.0f - t) * SkGetPackedR32(col1));
+ uint8_t g8 = SkScalarRoundToInt(t * SkGetPackedG32(col0) + (1.0f - t) * SkGetPackedG32(col1));
+ uint8_t b8 = SkScalarRoundToInt(t * SkGetPackedB32(col0) + (1.0f - t) * SkGetPackedB32(col1));
+ return SkPackARGB32(0xFF, r8, g8, b8);
+}
+
+static bool decompress_bc1(SkISize dimensions, const uint8_t* srcData,
+ bool isOpaque, SkBitmap* dst) {
+ const BC1Block* srcBlocks = reinterpret_cast<const BC1Block*>(srcData);
+
+ int numXBlocks = num_4x4_blocks(dimensions.width());
+ int numYBlocks = num_4x4_blocks(dimensions.height());
+
+ SkPMColor colors[4];
+
+ for (int y = 0; y < numYBlocks; ++y) {
+ for (int x = 0; x < numXBlocks; ++x) {
+ const BC1Block* curBlock = &srcBlocks[y * numXBlocks + x];
+
+ colors[0] = from565(curBlock->fColor0);
+ colors[1] = from565(curBlock->fColor1);
+ if (curBlock->fColor0 <= curBlock->fColor1) { // signal for a transparent block
+ colors[2] = SkPackARGB32(
+ 0xFF,
+ (SkGetPackedR32(colors[0]) + SkGetPackedR32(colors[1])) >> 1,
+ (SkGetPackedG32(colors[0]) + SkGetPackedG32(colors[1])) >> 1,
+ (SkGetPackedB32(colors[0]) + SkGetPackedB32(colors[1])) >> 1);
+ // The opacity of the overall texture trumps the per-block transparency
+ colors[3] = SkPackARGB32(isOpaque ? 0xFF : 0, 0, 0, 0);
+ } else {
+ colors[2] = lerp(2.0f/3.0f, colors[0], colors[1]);
+ colors[3] = lerp(1.0f/3.0f, colors[0], colors[1]);
+ }
+
+ int shift = 0;
+ int offsetX = 4 * x, offsetY = 4 * y;
+ for (int i = 0; i < 4; ++i) {
+ for (int j = 0; j < 4; ++j, shift += 2) {
+ if (offsetX + j >= dst->width() || offsetY + i >= dst->height()) {
+ // This can happen for the topmost levels of a mipmap and for
+ // non-multiple of 4 textures
+ continue;
+ }
+
+ int index = (curBlock->fIndices >> shift) & 0x3;
+ *dst->getAddr32(offsetX + j, offsetY + i) = colors[index];
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+bool SkDecompress(sk_sp<SkData> data,
+ SkISize dimensions,
+ SkTextureCompressionType compressionType,
+ SkBitmap* dst) {
+ using Type = SkTextureCompressionType;
+
+ const uint8_t* bytes = data->bytes();
+ switch (compressionType) {
+ case Type::kNone: return false;
+ case Type::kETC2_RGB8_UNORM: return decompress_etc1(dimensions, bytes, dst);
+ case Type::kBC1_RGB8_UNORM: return decompress_bc1(dimensions, bytes, true, dst);
+ case Type::kBC1_RGBA8_UNORM: return decompress_bc1(dimensions, bytes, false, dst);
+ }
+
+ SkUNREACHABLE;
+}
+
+size_t SkCompressedDataSize(SkTextureCompressionType type, SkISize dimensions,
+ TArray<size_t>* individualMipOffsets, bool mipmapped) {
+ SkASSERT(!individualMipOffsets || !individualMipOffsets->size());
+
+ int numMipLevels = 1;
+ if (mipmapped) {
+ numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
+ }
+
+ size_t totalSize = 0;
+ switch (type) {
+ case SkTextureCompressionType::kNone:
+ break;
+ case SkTextureCompressionType::kETC2_RGB8_UNORM:
+ case SkTextureCompressionType::kBC1_RGB8_UNORM:
+ case SkTextureCompressionType::kBC1_RGBA8_UNORM: {
+ for (int i = 0; i < numMipLevels; ++i) {
+ int numBlocks = num_4x4_blocks(dimensions.width()) *
+ num_4x4_blocks(dimensions.height());
+
+ if (individualMipOffsets) {
+ individualMipOffsets->push_back(totalSize);
+ }
+
+ static_assert(sizeof(ETC1Block) == sizeof(BC1Block));
+ totalSize += numBlocks * sizeof(ETC1Block);
+
+ dimensions = {std::max(1, dimensions.width()/2), std::max(1, dimensions.height()/2)};
+ }
+ break;
+ }
+ }
+
+ return totalSize;
+}
+
+size_t SkCompressedBlockSize(SkTextureCompressionType type) {
+ switch (type) {
+ case SkTextureCompressionType::kNone:
+ return 0;
+ case SkTextureCompressionType::kETC2_RGB8_UNORM:
+ return sizeof(ETC1Block);
+ case SkTextureCompressionType::kBC1_RGB8_UNORM:
+ case SkTextureCompressionType::kBC1_RGBA8_UNORM:
+ return sizeof(BC1Block);
+ }
+ SkUNREACHABLE;
+}
+
+size_t SkCompressedFormatDataSize(SkTextureCompressionType compressionType,
+ SkISize dimensions, bool mipmapped) {
+ return SkCompressedDataSize(compressionType, dimensions, nullptr, mipmapped);
+}
diff --git a/gfx/skia/skia/src/core/SkCompressedDataUtils.h b/gfx/skia/skia/src/core/SkCompressedDataUtils.h
new file mode 100644
index 0000000000..45e8246f23
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCompressedDataUtils.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCompressedDataUtils_DEFINED
+#define SkCompressedDataUtils_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTextureCompressionType.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkTArray.h"
+
+#include <cstddef>
+
+class SkBitmap;
+class SkData;
+struct SkISize;
+
+static constexpr bool SkTextureCompressionTypeIsOpaque(SkTextureCompressionType compression) {
+ switch (compression) {
+ case SkTextureCompressionType::kNone: return true;
+ case SkTextureCompressionType::kETC2_RGB8_UNORM: return true;
+ case SkTextureCompressionType::kBC1_RGB8_UNORM: return true;
+ case SkTextureCompressionType::kBC1_RGBA8_UNORM: return false;
+ }
+
+ SkUNREACHABLE;
+}
+
+size_t SkCompressedDataSize(SkTextureCompressionType, SkISize baseDimensions,
+ skia_private::TArray<size_t>* individualMipOffsets, bool mipmapped);
+size_t SkCompressedBlockSize(SkTextureCompressionType type);
+
+/**
+ * Returns the data size for the given SkTextureCompressionType
+ */
+size_t SkCompressedFormatDataSize(SkTextureCompressionType compressionType,
+ SkISize dimensions, bool mipmapped);
+
+ /*
+ * This method will decompress the bottommost level in 'data' into 'dst'.
+ */
+bool SkDecompress(sk_sp<SkData> data,
+ SkISize dimensions,
+ SkTextureCompressionType compressionType,
+ SkBitmap* dst);
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkContourMeasure.cpp b/gfx/skia/skia/src/core/SkContourMeasure.cpp
new file mode 100644
index 0000000000..58ad078266
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkContourMeasure.cpp
@@ -0,0 +1,673 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkContourMeasure.h"
+#include "include/core/SkPath.h"
+#include "src/base/SkTSearch.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkPathMeasurePriv.h"
+#include "src/core/SkPathPriv.h"
+
+#define kMaxTValue 0x3FFFFFFF
+
+constexpr static inline SkScalar tValue2Scalar(int t) {
+ SkASSERT((unsigned)t <= kMaxTValue);
+ // 1/kMaxTValue can't be represented as a float, but it's close and the limits work fine.
+ const SkScalar kMaxTReciprocal = 1.0f / (SkScalar)kMaxTValue;
+ return t * kMaxTReciprocal;
+}
+
+static_assert(0.0f == tValue2Scalar( 0), "Lower limit should be exact.");
+static_assert(1.0f == tValue2Scalar(kMaxTValue), "Upper limit should be exact.");
+
+SkScalar SkContourMeasure::Segment::getScalarT() const {
+ return tValue2Scalar(fTValue);
+}
+
+void SkContourMeasure_segTo(const SkPoint pts[], unsigned segType,
+ SkScalar startT, SkScalar stopT, SkPath* dst) {
+ SkASSERT(startT >= 0 && startT <= SK_Scalar1);
+ SkASSERT(stopT >= 0 && stopT <= SK_Scalar1);
+ SkASSERT(startT <= stopT);
+
+ if (startT == stopT) {
+ if (!dst->isEmpty()) {
+ /* if the dash as a zero-length on segment, add a corresponding zero-length line.
+ The stroke code will add end caps to zero length lines as appropriate */
+ SkPoint lastPt;
+ SkAssertResult(dst->getLastPt(&lastPt));
+ dst->lineTo(lastPt);
+ }
+ return;
+ }
+
+ SkPoint tmp0[7], tmp1[7];
+
+ switch (segType) {
+ case kLine_SegType:
+ if (SK_Scalar1 == stopT) {
+ dst->lineTo(pts[1]);
+ } else {
+ dst->lineTo(SkScalarInterp(pts[0].fX, pts[1].fX, stopT),
+ SkScalarInterp(pts[0].fY, pts[1].fY, stopT));
+ }
+ break;
+ case kQuad_SegType:
+ if (0 == startT) {
+ if (SK_Scalar1 == stopT) {
+ dst->quadTo(pts[1], pts[2]);
+ } else {
+ SkChopQuadAt(pts, tmp0, stopT);
+ dst->quadTo(tmp0[1], tmp0[2]);
+ }
+ } else {
+ SkChopQuadAt(pts, tmp0, startT);
+ if (SK_Scalar1 == stopT) {
+ dst->quadTo(tmp0[3], tmp0[4]);
+ } else {
+ SkChopQuadAt(&tmp0[2], tmp1, (stopT - startT) / (1 - startT));
+ dst->quadTo(tmp1[1], tmp1[2]);
+ }
+ }
+ break;
+ case kConic_SegType: {
+ SkConic conic(pts[0], pts[2], pts[3], pts[1].fX);
+
+ if (0 == startT) {
+ if (SK_Scalar1 == stopT) {
+ dst->conicTo(conic.fPts[1], conic.fPts[2], conic.fW);
+ } else {
+ SkConic tmp[2];
+ if (conic.chopAt(stopT, tmp)) {
+ dst->conicTo(tmp[0].fPts[1], tmp[0].fPts[2], tmp[0].fW);
+ }
+ }
+ } else {
+ if (SK_Scalar1 == stopT) {
+ SkConic tmp[2];
+ if (conic.chopAt(startT, tmp)) {
+ dst->conicTo(tmp[1].fPts[1], tmp[1].fPts[2], tmp[1].fW);
+ }
+ } else {
+ SkConic tmp;
+ conic.chopAt(startT, stopT, &tmp);
+ dst->conicTo(tmp.fPts[1], tmp.fPts[2], tmp.fW);
+ }
+ }
+ } break;
+ case kCubic_SegType:
+ if (0 == startT) {
+ if (SK_Scalar1 == stopT) {
+ dst->cubicTo(pts[1], pts[2], pts[3]);
+ } else {
+ SkChopCubicAt(pts, tmp0, stopT);
+ dst->cubicTo(tmp0[1], tmp0[2], tmp0[3]);
+ }
+ } else {
+ SkChopCubicAt(pts, tmp0, startT);
+ if (SK_Scalar1 == stopT) {
+ dst->cubicTo(tmp0[4], tmp0[5], tmp0[6]);
+ } else {
+ SkChopCubicAt(&tmp0[3], tmp1, (stopT - startT) / (1 - startT));
+ dst->cubicTo(tmp1[1], tmp1[2], tmp1[3]);
+ }
+ }
+ break;
+ default:
+ SK_ABORT("unknown segType");
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline int tspan_big_enough(int tspan) {
+ SkASSERT((unsigned)tspan <= kMaxTValue);
+ return tspan >> 10;
+}
+
+// can't use tangents, since we need [0..1..................2] to be seen
+// as definitely not a line (it is when drawn, but not parametrically)
+// so we compare midpoints
+#define CHEAP_DIST_LIMIT (SK_Scalar1/2) // just made this value up
+
+static bool quad_too_curvy(const SkPoint pts[3], SkScalar tolerance) {
+ // diff = (a/4 + b/2 + c/4) - (a/2 + c/2)
+ // diff = -a/4 + b/2 - c/4
+ SkScalar dx = SkScalarHalf(pts[1].fX) -
+ SkScalarHalf(SkScalarHalf(pts[0].fX + pts[2].fX));
+ SkScalar dy = SkScalarHalf(pts[1].fY) -
+ SkScalarHalf(SkScalarHalf(pts[0].fY + pts[2].fY));
+
+ SkScalar dist = std::max(SkScalarAbs(dx), SkScalarAbs(dy));
+ return dist > tolerance;
+}
+
+static bool conic_too_curvy(const SkPoint& firstPt, const SkPoint& midTPt,
+ const SkPoint& lastPt, SkScalar tolerance) {
+ SkPoint midEnds = firstPt + lastPt;
+ midEnds *= 0.5f;
+ SkVector dxy = midTPt - midEnds;
+ SkScalar dist = std::max(SkScalarAbs(dxy.fX), SkScalarAbs(dxy.fY));
+ return dist > tolerance;
+}
+
+static bool cheap_dist_exceeds_limit(const SkPoint& pt, SkScalar x, SkScalar y,
+ SkScalar tolerance) {
+ SkScalar dist = std::max(SkScalarAbs(x - pt.fX), SkScalarAbs(y - pt.fY));
+ // just made up the 1/2
+ return dist > tolerance;
+}
+
+static bool cubic_too_curvy(const SkPoint pts[4], SkScalar tolerance) {
+ return cheap_dist_exceeds_limit(pts[1],
+ SkScalarInterp(pts[0].fX, pts[3].fX, SK_Scalar1/3),
+ SkScalarInterp(pts[0].fY, pts[3].fY, SK_Scalar1/3), tolerance)
+ ||
+ cheap_dist_exceeds_limit(pts[2],
+ SkScalarInterp(pts[0].fX, pts[3].fX, SK_Scalar1*2/3),
+ SkScalarInterp(pts[0].fY, pts[3].fY, SK_Scalar1*2/3), tolerance);
+}
+
+class SkContourMeasureIter::Impl {
+public:
+ Impl(const SkPath& path, bool forceClosed, SkScalar resScale)
+ : fPath(path)
+ , fIter(SkPathPriv::Iterate(fPath).begin())
+ , fTolerance(CHEAP_DIST_LIMIT * SkScalarInvert(resScale))
+ , fForceClosed(forceClosed) {}
+
+ bool hasNextSegments() const { return fIter != SkPathPriv::Iterate(fPath).end(); }
+ SkContourMeasure* buildSegments();
+
+private:
+ SkPath fPath;
+ SkPathPriv::RangeIter fIter;
+ SkScalar fTolerance;
+ bool fForceClosed;
+
+ // temporary
+ SkTDArray<SkContourMeasure::Segment> fSegments;
+ SkTDArray<SkPoint> fPts; // Points used to define the segments
+
+ SkDEBUGCODE(void validate() const;)
+ SkScalar compute_line_seg(SkPoint p0, SkPoint p1, SkScalar distance, unsigned ptIndex);
+ SkScalar compute_quad_segs(const SkPoint pts[3], SkScalar distance,
+ int mint, int maxt, unsigned ptIndex);
+ SkScalar compute_conic_segs(const SkConic& conic, SkScalar distance,
+ int mint, const SkPoint& minPt,
+ int maxt, const SkPoint& maxPt,
+ unsigned ptIndex);
+ SkScalar compute_cubic_segs(const SkPoint pts[4], SkScalar distance,
+ int mint, int maxt, unsigned ptIndex);
+};
+
+SkScalar SkContourMeasureIter::Impl::compute_quad_segs(const SkPoint pts[3], SkScalar distance,
+ int mint, int maxt, unsigned ptIndex) {
+ if (tspan_big_enough(maxt - mint) && quad_too_curvy(pts, fTolerance)) {
+ SkPoint tmp[5];
+ int halft = (mint + maxt) >> 1;
+
+ SkChopQuadAtHalf(pts, tmp);
+ distance = this->compute_quad_segs(tmp, distance, mint, halft, ptIndex);
+ distance = this->compute_quad_segs(&tmp[2], distance, halft, maxt, ptIndex);
+ } else {
+ SkScalar d = SkPoint::Distance(pts[0], pts[2]);
+ SkScalar prevD = distance;
+ distance += d;
+ if (distance > prevD) {
+ SkASSERT(ptIndex < (unsigned)fPts.size());
+ SkContourMeasure::Segment* seg = fSegments.append();
+ seg->fDistance = distance;
+ seg->fPtIndex = ptIndex;
+ seg->fType = kQuad_SegType;
+ seg->fTValue = maxt;
+ }
+ }
+ return distance;
+}
+
+SkScalar SkContourMeasureIter::Impl::compute_conic_segs(const SkConic& conic, SkScalar distance,
+ int mint, const SkPoint& minPt,
+ int maxt, const SkPoint& maxPt,
+ unsigned ptIndex) {
+ int halft = (mint + maxt) >> 1;
+ SkPoint halfPt = conic.evalAt(tValue2Scalar(halft));
+ if (!halfPt.isFinite()) {
+ return distance;
+ }
+ if (tspan_big_enough(maxt - mint) && conic_too_curvy(minPt, halfPt, maxPt, fTolerance)) {
+ distance = this->compute_conic_segs(conic, distance, mint, minPt, halft, halfPt, ptIndex);
+ distance = this->compute_conic_segs(conic, distance, halft, halfPt, maxt, maxPt, ptIndex);
+ } else {
+ SkScalar d = SkPoint::Distance(minPt, maxPt);
+ SkScalar prevD = distance;
+ distance += d;
+ if (distance > prevD) {
+ SkASSERT(ptIndex < (unsigned)fPts.size());
+ SkContourMeasure::Segment* seg = fSegments.append();
+ seg->fDistance = distance;
+ seg->fPtIndex = ptIndex;
+ seg->fType = kConic_SegType;
+ seg->fTValue = maxt;
+ }
+ }
+ return distance;
+}
+
+SkScalar SkContourMeasureIter::Impl::compute_cubic_segs(const SkPoint pts[4], SkScalar distance,
+ int mint, int maxt, unsigned ptIndex) {
+ if (tspan_big_enough(maxt - mint) && cubic_too_curvy(pts, fTolerance)) {
+ SkPoint tmp[7];
+ int halft = (mint + maxt) >> 1;
+
+ SkChopCubicAtHalf(pts, tmp);
+ distance = this->compute_cubic_segs(tmp, distance, mint, halft, ptIndex);
+ distance = this->compute_cubic_segs(&tmp[3], distance, halft, maxt, ptIndex);
+ } else {
+ SkScalar d = SkPoint::Distance(pts[0], pts[3]);
+ SkScalar prevD = distance;
+ distance += d;
+ if (distance > prevD) {
+ SkASSERT(ptIndex < (unsigned)fPts.size());
+ SkContourMeasure::Segment* seg = fSegments.append();
+ seg->fDistance = distance;
+ seg->fPtIndex = ptIndex;
+ seg->fType = kCubic_SegType;
+ seg->fTValue = maxt;
+ }
+ }
+ return distance;
+}
+
+SkScalar SkContourMeasureIter::Impl::compute_line_seg(SkPoint p0, SkPoint p1, SkScalar distance,
+ unsigned ptIndex) {
+ SkScalar d = SkPoint::Distance(p0, p1);
+ SkASSERT(d >= 0);
+ SkScalar prevD = distance;
+ distance += d;
+ if (distance > prevD) {
+ SkASSERT((unsigned)ptIndex < (unsigned)fPts.size());
+ SkContourMeasure::Segment* seg = fSegments.append();
+ seg->fDistance = distance;
+ seg->fPtIndex = ptIndex;
+ seg->fType = kLine_SegType;
+ seg->fTValue = kMaxTValue;
+ }
+ return distance;
+}
+
+#ifdef SK_DEBUG
+void SkContourMeasureIter::Impl::validate() const {
+#ifndef SK_DISABLE_SLOW_DEBUG_VALIDATION
+ const SkContourMeasure::Segment* seg = fSegments.begin();
+ const SkContourMeasure::Segment* stop = fSegments.end();
+ unsigned ptIndex = 0;
+ SkScalar distance = 0;
+ // limit the loop to a reasonable number; pathological cases can run for minutes
+ int maxChecks = 10000000; // set to INT_MAX to defeat the check
+ while (seg < stop) {
+ SkASSERT(seg->fDistance > distance);
+ SkASSERT(seg->fPtIndex >= ptIndex);
+ SkASSERT(seg->fTValue > 0);
+
+ const SkContourMeasure::Segment* s = seg;
+ while (s < stop - 1 && s[0].fPtIndex == s[1].fPtIndex && --maxChecks > 0) {
+ SkASSERT(s[0].fType == s[1].fType);
+ SkASSERT(s[0].fTValue < s[1].fTValue);
+ s += 1;
+ }
+
+ distance = seg->fDistance;
+ ptIndex = seg->fPtIndex;
+ seg += 1;
+ }
+#endif
+}
+#endif
+
+SkContourMeasure* SkContourMeasureIter::Impl::buildSegments() {
+ int ptIndex = -1;
+ SkScalar distance = 0;
+ bool haveSeenClose = fForceClosed;
+ bool haveSeenMoveTo = false;
+
+ /* Note:
+ * as we accumulate distance, we have to check that the result of +=
+ * actually made it larger, since a very small delta might be > 0, but
+ * still have no effect on distance (if distance >>> delta).
+ *
+ * We do this check below, and in compute_quad_segs and compute_cubic_segs
+ */
+
+ fSegments.reset();
+ fPts.reset();
+
+ auto end = SkPathPriv::Iterate(fPath).end();
+ for (; fIter != end; ++fIter) {
+ auto [verb, pts, w] = *fIter;
+ if (haveSeenMoveTo && verb == SkPathVerb::kMove) {
+ break;
+ }
+ switch (verb) {
+ case SkPathVerb::kMove:
+ ptIndex += 1;
+ fPts.append(1, pts);
+ SkASSERT(!haveSeenMoveTo);
+ haveSeenMoveTo = true;
+ break;
+
+ case SkPathVerb::kLine: {
+ SkASSERT(haveSeenMoveTo);
+ SkScalar prevD = distance;
+ distance = this->compute_line_seg(pts[0], pts[1], distance, ptIndex);
+ if (distance > prevD) {
+ fPts.append(1, pts + 1);
+ ptIndex++;
+ }
+ } break;
+
+ case SkPathVerb::kQuad: {
+ SkASSERT(haveSeenMoveTo);
+ SkScalar prevD = distance;
+ distance = this->compute_quad_segs(pts, distance, 0, kMaxTValue, ptIndex);
+ if (distance > prevD) {
+ fPts.append(2, pts + 1);
+ ptIndex += 2;
+ }
+ } break;
+
+ case SkPathVerb::kConic: {
+ SkASSERT(haveSeenMoveTo);
+ const SkConic conic(pts, *w);
+ SkScalar prevD = distance;
+ distance = this->compute_conic_segs(conic, distance, 0, conic.fPts[0],
+ kMaxTValue, conic.fPts[2], ptIndex);
+ if (distance > prevD) {
+ // we store the conic weight in our next point, followed by the last 2 pts
+ // thus to reconstitue a conic, you'd need to say
+ // SkConic(pts[0], pts[2], pts[3], weight = pts[1].fX)
+ fPts.append()->set(conic.fW, 0);
+ fPts.append(2, pts + 1);
+ ptIndex += 3;
+ }
+ } break;
+
+ case SkPathVerb::kCubic: {
+ SkASSERT(haveSeenMoveTo);
+ SkScalar prevD = distance;
+ distance = this->compute_cubic_segs(pts, distance, 0, kMaxTValue, ptIndex);
+ if (distance > prevD) {
+ fPts.append(3, pts + 1);
+ ptIndex += 3;
+ }
+ } break;
+
+ case SkPathVerb::kClose:
+ haveSeenClose = true;
+ break;
+ }
+
+ }
+
+ if (!SkScalarIsFinite(distance)) {
+ return nullptr;
+ }
+ if (fSegments.empty()) {
+ return nullptr;
+ }
+
+ if (haveSeenClose) {
+ SkScalar prevD = distance;
+ SkPoint firstPt = fPts[0];
+ distance = this->compute_line_seg(fPts[ptIndex], firstPt, distance, ptIndex);
+ if (distance > prevD) {
+ *fPts.append() = firstPt;
+ }
+ }
+
+ SkDEBUGCODE(this->validate();)
+
+ return new SkContourMeasure(std::move(fSegments), std::move(fPts), distance, haveSeenClose);
+}
+
+static void compute_pos_tan(const SkPoint pts[], unsigned segType,
+ SkScalar t, SkPoint* pos, SkVector* tangent) {
+ switch (segType) {
+ case kLine_SegType:
+ if (pos) {
+ pos->set(SkScalarInterp(pts[0].fX, pts[1].fX, t),
+ SkScalarInterp(pts[0].fY, pts[1].fY, t));
+ }
+ if (tangent) {
+ tangent->setNormalize(pts[1].fX - pts[0].fX, pts[1].fY - pts[0].fY);
+ }
+ break;
+ case kQuad_SegType:
+ SkEvalQuadAt(pts, t, pos, tangent);
+ if (tangent) {
+ tangent->normalize();
+ }
+ break;
+ case kConic_SegType: {
+ SkConic(pts[0], pts[2], pts[3], pts[1].fX).evalAt(t, pos, tangent);
+ if (tangent) {
+ tangent->normalize();
+ }
+ } break;
+ case kCubic_SegType:
+ SkEvalCubicAt(pts, t, pos, tangent, nullptr);
+ if (tangent) {
+ tangent->normalize();
+ }
+ break;
+ default:
+ SkDEBUGFAIL("unknown segType");
+ }
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+
+SkContourMeasureIter::SkContourMeasureIter() {
+}
+
+SkContourMeasureIter::SkContourMeasureIter(const SkPath& path, bool forceClosed,
+ SkScalar resScale) {
+ this->reset(path, forceClosed, resScale);
+}
+
+SkContourMeasureIter::~SkContourMeasureIter() {}
+
+/** Assign a new path, or null to have none.
+*/
+void SkContourMeasureIter::reset(const SkPath& path, bool forceClosed, SkScalar resScale) {
+ if (path.isFinite()) {
+ fImpl = std::make_unique<Impl>(path, forceClosed, resScale);
+ } else {
+ fImpl.reset();
+ }
+}
+
+sk_sp<SkContourMeasure> SkContourMeasureIter::next() {
+ if (!fImpl) {
+ return nullptr;
+ }
+ while (fImpl->hasNextSegments()) {
+ auto cm = fImpl->buildSegments();
+ if (cm) {
+ return sk_sp<SkContourMeasure>(cm);
+ }
+ }
+ return nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkContourMeasure::SkContourMeasure(SkTDArray<Segment>&& segs, SkTDArray<SkPoint>&& pts, SkScalar length, bool isClosed)
+ : fSegments(std::move(segs))
+ , fPts(std::move(pts))
+ , fLength(length)
+ , fIsClosed(isClosed)
+ {}
+
+template <typename T, typename K>
+int SkTKSearch(const T base[], int count, const K& key) {
+ SkASSERT(count >= 0);
+ if (count <= 0) {
+ return ~0;
+ }
+
+ SkASSERT(base != nullptr); // base may be nullptr if count is zero
+
+ unsigned lo = 0;
+ unsigned hi = count - 1;
+
+ while (lo < hi) {
+ unsigned mid = (hi + lo) >> 1;
+ if (base[mid].fDistance < key) {
+ lo = mid + 1;
+ } else {
+ hi = mid;
+ }
+ }
+
+ if (base[hi].fDistance < key) {
+ hi += 1;
+ hi = ~hi;
+ } else if (key < base[hi].fDistance) {
+ hi = ~hi;
+ }
+ return hi;
+}
+
+const SkContourMeasure::Segment* SkContourMeasure::distanceToSegment( SkScalar distance,
+ SkScalar* t) const {
+ SkDEBUGCODE(SkScalar length = ) this->length();
+ SkASSERT(distance >= 0 && distance <= length);
+
+ const Segment* seg = fSegments.begin();
+ int count = fSegments.size();
+
+ int index = SkTKSearch<Segment, SkScalar>(seg, count, distance);
+ // don't care if we hit an exact match or not, so we xor index if it is negative
+ index ^= (index >> 31);
+ seg = &seg[index];
+
+ // now interpolate t-values with the prev segment (if possible)
+ SkScalar startT = 0, startD = 0;
+ // check if the prev segment is legal, and references the same set of points
+ if (index > 0) {
+ startD = seg[-1].fDistance;
+ if (seg[-1].fPtIndex == seg->fPtIndex) {
+ SkASSERT(seg[-1].fType == seg->fType);
+ startT = seg[-1].getScalarT();
+ }
+ }
+
+ SkASSERT(seg->getScalarT() > startT);
+ SkASSERT(distance >= startD);
+ SkASSERT(seg->fDistance > startD);
+
+ *t = startT + (seg->getScalarT() - startT) * (distance - startD) / (seg->fDistance - startD);
+ return seg;
+}
+
+bool SkContourMeasure::getPosTan(SkScalar distance, SkPoint* pos, SkVector* tangent) const {
+ if (SkScalarIsNaN(distance)) {
+ return false;
+ }
+
+ const SkScalar length = this->length();
+ SkASSERT(length > 0 && !fSegments.empty());
+
+ // pin the distance to a legal range
+ if (distance < 0) {
+ distance = 0;
+ } else if (distance > length) {
+ distance = length;
+ }
+
+ SkScalar t;
+ const Segment* seg = this->distanceToSegment(distance, &t);
+ if (SkScalarIsNaN(t)) {
+ return false;
+ }
+
+ SkASSERT((unsigned)seg->fPtIndex < (unsigned)fPts.size());
+ compute_pos_tan(&fPts[seg->fPtIndex], seg->fType, t, pos, tangent);
+ return true;
+}
+
+bool SkContourMeasure::getMatrix(SkScalar distance, SkMatrix* matrix, MatrixFlags flags) const {
+ SkPoint position;
+ SkVector tangent;
+
+ if (this->getPosTan(distance, &position, &tangent)) {
+ if (matrix) {
+ if (flags & kGetTangent_MatrixFlag) {
+ matrix->setSinCos(tangent.fY, tangent.fX, 0, 0);
+ } else {
+ matrix->reset();
+ }
+ if (flags & kGetPosition_MatrixFlag) {
+ matrix->postTranslate(position.fX, position.fY);
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+bool SkContourMeasure::getSegment(SkScalar startD, SkScalar stopD, SkPath* dst,
+ bool startWithMoveTo) const {
+ SkASSERT(dst);
+
+ SkScalar length = this->length(); // ensure we have built our segments
+
+ if (startD < 0) {
+ startD = 0;
+ }
+ if (stopD > length) {
+ stopD = length;
+ }
+ if (!(startD <= stopD)) { // catch NaN values as well
+ return false;
+ }
+ if (fSegments.empty()) {
+ return false;
+ }
+
+ SkPoint p;
+ SkScalar startT, stopT;
+ const Segment* seg = this->distanceToSegment(startD, &startT);
+ if (!SkScalarIsFinite(startT)) {
+ return false;
+ }
+ const Segment* stopSeg = this->distanceToSegment(stopD, &stopT);
+ if (!SkScalarIsFinite(stopT)) {
+ return false;
+ }
+ SkASSERT(seg <= stopSeg);
+ if (startWithMoveTo) {
+ compute_pos_tan(&fPts[seg->fPtIndex], seg->fType, startT, &p, nullptr);
+ dst->moveTo(p);
+ }
+
+ if (seg->fPtIndex == stopSeg->fPtIndex) {
+ SkContourMeasure_segTo(&fPts[seg->fPtIndex], seg->fType, startT, stopT, dst);
+ } else {
+ do {
+ SkContourMeasure_segTo(&fPts[seg->fPtIndex], seg->fType, startT, SK_Scalar1, dst);
+ seg = SkContourMeasure::Segment::Next(seg);
+ startT = 0;
+ } while (seg->fPtIndex < stopSeg->fPtIndex);
+ SkContourMeasure_segTo(&fPts[seg->fPtIndex], seg->fType, 0, stopT, dst);
+ }
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkConvertPixels.cpp b/gfx/skia/skia/src/core/SkConvertPixels.cpp
new file mode 100644
index 0000000000..1ffdc1d37c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkConvertPixels.cpp
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/core/SkConvertPixels.h"
+
+#include "include/core/SkColorType.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkSize.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/base/SkHalf.h"
+#include "src/base/SkRectMemcpy.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkImageInfoPriv.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkRasterPipelineOpContexts.h"
+
+#include <cstdint>
+#include <cstring>
+#include <initializer_list>
+
+static bool rect_memcpy(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRB,
+ const SkImageInfo& srcInfo, const void* srcPixels, size_t srcRB,
+ const SkColorSpaceXformSteps& steps) {
+ // We can copy the pixels when no color type, alpha type, or color space changes.
+ if (dstInfo.colorType() != srcInfo.colorType()) {
+ return false;
+ }
+ if (dstInfo.colorType() != kAlpha_8_SkColorType
+ && steps.flags.mask() != 0b00000) {
+ return false;
+ }
+
+ SkRectMemcpy(dstPixels, dstRB,
+ srcPixels, srcRB, dstInfo.minRowBytes(), dstInfo.height());
+ return true;
+}
+
+static bool swizzle_or_premul(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRB,
+ const SkImageInfo& srcInfo, const void* srcPixels, size_t srcRB,
+ const SkColorSpaceXformSteps& steps) {
+ auto is_8888 = [](SkColorType ct) {
+ return ct == kRGBA_8888_SkColorType || ct == kBGRA_8888_SkColorType;
+ };
+ if (!is_8888(dstInfo.colorType()) ||
+ !is_8888(srcInfo.colorType()) ||
+ steps.flags.linearize ||
+ steps.flags.gamut_transform ||
+ steps.flags.unpremul ||
+ steps.flags.encode) {
+ return false;
+ }
+
+ const bool swapRB = dstInfo.colorType() != srcInfo.colorType();
+
+ void (*fn)(uint32_t*, const uint32_t*, int) = nullptr;
+
+ if (steps.flags.premul) {
+ fn = swapRB ? SkOpts::RGBA_to_bgrA
+ : SkOpts::RGBA_to_rgbA;
+ } else {
+ // If we're not swizzling, we ought to have used rect_memcpy().
+ SkASSERT(swapRB);
+ fn = SkOpts::RGBA_to_BGRA;
+ }
+
+ for (int y = 0; y < dstInfo.height(); y++) {
+ fn((uint32_t*)dstPixels, (const uint32_t*)srcPixels, dstInfo.width());
+ dstPixels = SkTAddOffset<void>(dstPixels, dstRB);
+ srcPixels = SkTAddOffset<const void>(srcPixels, srcRB);
+ }
+ return true;
+}
+
+static bool convert_to_alpha8(const SkImageInfo& dstInfo, void* vdst, size_t dstRB,
+ const SkImageInfo& srcInfo, const void* src, size_t srcRB,
+ const SkColorSpaceXformSteps&) {
+ if (dstInfo.colorType() != kAlpha_8_SkColorType) {
+ return false;
+ }
+ auto dst = (uint8_t*)vdst;
+
+ switch (srcInfo.colorType()) {
+ case kUnknown_SkColorType:
+ case kAlpha_8_SkColorType: {
+ // Unknown should never happen.
+ // Alpha8 should have been handled by rect_memcpy().
+ SkASSERT(false);
+ return false;
+ }
+
+ case kA16_unorm_SkColorType: {
+ auto src16 = (const uint16_t*) src;
+ for (int y = 0; y < srcInfo.height(); y++) {
+ for (int x = 0; x < srcInfo.width(); x++) {
+ dst[x] = src16[x] >> 8;
+ }
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ src16 = SkTAddOffset<const uint16_t>(src16, srcRB);
+ }
+ return true;
+ }
+
+ case kGray_8_SkColorType:
+ case kRGB_565_SkColorType:
+ case kR8G8_unorm_SkColorType:
+ case kR16G16_unorm_SkColorType:
+ case kR16G16_float_SkColorType:
+ case kRGB_888x_SkColorType:
+ case kRGB_101010x_SkColorType:
+ case kBGR_101010x_SkColorType:
+ case kBGR_101010x_XR_SkColorType:
+ case kR8_unorm_SkColorType: {
+ for (int y = 0; y < srcInfo.height(); ++y) {
+ memset(dst, 0xFF, srcInfo.width());
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ }
+ return true;
+ }
+
+ case kARGB_4444_SkColorType: {
+ auto src16 = (const uint16_t*) src;
+ for (int y = 0; y < srcInfo.height(); y++) {
+ for (int x = 0; x < srcInfo.width(); x++) {
+ dst[x] = SkPacked4444ToA32(src16[x]);
+ }
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ src16 = SkTAddOffset<const uint16_t>(src16, srcRB);
+ }
+ return true;
+ }
+
+ case kBGRA_8888_SkColorType:
+ case kRGBA_8888_SkColorType:
+ case kSRGBA_8888_SkColorType: {
+ auto src32 = (const uint32_t*) src;
+ for (int y = 0; y < srcInfo.height(); y++) {
+ for (int x = 0; x < srcInfo.width(); x++) {
+ dst[x] = src32[x] >> 24;
+ }
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ src32 = SkTAddOffset<const uint32_t>(src32, srcRB);
+ }
+ return true;
+ }
+
+ case kRGBA_1010102_SkColorType:
+ case kBGRA_1010102_SkColorType: {
+ auto src32 = (const uint32_t*) src;
+ for (int y = 0; y < srcInfo.height(); y++) {
+ for (int x = 0; x < srcInfo.width(); x++) {
+ dst[x] = (src32[x] >> 30) * 0x55;
+ }
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ src32 = SkTAddOffset<const uint32_t>(src32, srcRB);
+ }
+ return true;
+ }
+
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType: {
+ auto src64 = (const uint64_t*) src;
+ for (int y = 0; y < srcInfo.height(); y++) {
+ for (int x = 0; x < srcInfo.width(); x++) {
+ dst[x] = (uint8_t) (255.0f * SkHalfToFloat(src64[x] >> 48));
+ }
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ src64 = SkTAddOffset<const uint64_t>(src64, srcRB);
+ }
+ return true;
+ }
+
+ case kRGBA_F32_SkColorType: {
+ auto rgba = (const float*)src;
+ for (int y = 0; y < srcInfo.height(); y++) {
+ for (int x = 0; x < srcInfo.width(); x++) {
+ dst[x] = (uint8_t)(255.0f * rgba[4*x+3]);
+ }
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ rgba = SkTAddOffset<const float>(rgba, srcRB);
+ }
+ return true;
+ }
+
+ case kA16_float_SkColorType: {
+ auto srcF16 = (const uint16_t*) src;
+ for (int y = 0; y < srcInfo.height(); y++) {
+ for (int x = 0; x < srcInfo.width(); x++) {
+ dst[x] = (uint8_t) (255.0f * SkHalfToFloat(srcF16[x]));
+ }
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ srcF16 = SkTAddOffset<const uint16_t>(srcF16, srcRB);
+ }
+ return true;
+ }
+
+ case kR16G16B16A16_unorm_SkColorType: {
+ auto src64 = (const uint64_t*) src;
+ for (int y = 0; y < srcInfo.height(); y++) {
+ for (int x = 0; x < srcInfo.width(); x++) {
+ dst[x] = (src64[x] >> 48) >> 8;
+ }
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ src64 = SkTAddOffset<const uint64_t>(src64, srcRB);
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+// Default: Use the pipeline.
+static void convert_with_pipeline(const SkImageInfo& dstInfo, void* dstRow, int dstStride,
+ const SkImageInfo& srcInfo, const void* srcRow, int srcStride,
+ const SkColorSpaceXformSteps& steps) {
+ SkRasterPipeline_MemoryCtx src = { (void*)srcRow, srcStride },
+ dst = { (void*)dstRow, dstStride };
+
+ SkRasterPipeline_<256> pipeline;
+ pipeline.append_load(srcInfo.colorType(), &src);
+ steps.apply(&pipeline);
+ pipeline.append_store(dstInfo.colorType(), &dst);
+ pipeline.run(0,0, srcInfo.width(), srcInfo.height());
+}
+
+bool SkConvertPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRB,
+ const SkImageInfo& srcInfo, const void* srcPixels, size_t srcRB) {
+ SkASSERT(dstInfo.dimensions() == srcInfo.dimensions());
+ SkASSERT(SkImageInfoValidConversion(dstInfo, srcInfo));
+
+ int srcStride = (int)(srcRB / srcInfo.bytesPerPixel());
+ int dstStride = (int)(dstRB / dstInfo.bytesPerPixel());
+ if ((size_t)srcStride * srcInfo.bytesPerPixel() != srcRB ||
+ (size_t)dstStride * dstInfo.bytesPerPixel() != dstRB) {
+ return false;
+ }
+
+ SkColorSpaceXformSteps steps{srcInfo.colorSpace(), srcInfo.alphaType(),
+ dstInfo.colorSpace(), dstInfo.alphaType()};
+
+ for (auto fn : {rect_memcpy, swizzle_or_premul, convert_to_alpha8}) {
+ if (fn(dstInfo, dstPixels, dstRB, srcInfo, srcPixels, srcRB, steps)) {
+ return true;
+ }
+ }
+ convert_with_pipeline(dstInfo, dstPixels, dstStride, srcInfo, srcPixels, srcStride, steps);
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkConvertPixels.h b/gfx/skia/skia/src/core/SkConvertPixels.h
new file mode 100644
index 0000000000..fd04535a52
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkConvertPixels.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkConvertPixels_DEFINED
+#define SkConvertPixels_DEFINED
+
+#include "include/private/base/SkAttributes.h"
+
+#include <cstddef>
+
+struct SkImageInfo;
+
+bool SK_WARN_UNUSED_RESULT SkConvertPixels(
+ const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ const SkImageInfo& srcInfo, const void* srcPixels, size_t srcRowBytes);
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkCoreBlitters.h b/gfx/skia/skia/src/core/SkCoreBlitters.h
new file mode 100644
index 0000000000..aa658a63bf
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCoreBlitters.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCoreBlitters_DEFINED
+#define SkCoreBlitters_DEFINED
+
+#include "include/core/SkPaint.h"
+#include "src/core/SkBlitRow.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkBlitter_A8.h"
+#include "src/core/SkXfermodePriv.h"
+#include "src/shaders/SkBitmapProcShader.h"
+#include "src/shaders/SkShaderBase.h"
+
+class SkSurfaceProps;
+
+class SkRasterBlitter : public SkBlitter {
+public:
+ SkRasterBlitter(const SkPixmap& device) : fDevice(device) {}
+
+protected:
+ const SkPixmap fDevice;
+
+private:
+ using INHERITED = SkBlitter;
+};
+
+class SkShaderBlitter : public SkRasterBlitter {
+public:
+ /**
+ * The storage for shaderContext is owned by the caller, but the object itself is not.
+ * The blitter only ensures that the storage always holds a live object, but it may
+ * exchange that object.
+ */
+ SkShaderBlitter(const SkPixmap& device, const SkPaint& paint,
+ SkShaderBase::Context* shaderContext);
+ ~SkShaderBlitter() override;
+
+protected:
+ uint32_t fShaderFlags;
+ const SkShader* fShader;
+ SkShaderBase::Context* fShaderContext;
+ bool fConstInY;
+
+private:
+ // illegal
+ SkShaderBlitter& operator=(const SkShaderBlitter&);
+
+ using INHERITED = SkRasterBlitter;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkARGB32_Blitter : public SkRasterBlitter {
+public:
+ SkARGB32_Blitter(const SkPixmap& device, const SkPaint& paint);
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitMask(const SkMask&, const SkIRect&) override;
+ const SkPixmap* justAnOpaqueColor(uint32_t*) override;
+ void blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) override;
+ void blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) override;
+
+protected:
+ SkColor fColor;
+ SkPMColor fPMColor;
+
+private:
+ unsigned fSrcA, fSrcR, fSrcG, fSrcB;
+
+ // illegal
+ SkARGB32_Blitter& operator=(const SkARGB32_Blitter&);
+
+ using INHERITED = SkRasterBlitter;
+};
+
+class SkARGB32_Opaque_Blitter : public SkARGB32_Blitter {
+public:
+ SkARGB32_Opaque_Blitter(const SkPixmap& device, const SkPaint& paint)
+ : INHERITED(device, paint) { SkASSERT(paint.getAlpha() == 0xFF); }
+ void blitMask(const SkMask&, const SkIRect&) override;
+ void blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) override;
+ void blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) override;
+
+private:
+ using INHERITED = SkARGB32_Blitter;
+};
+
+class SkARGB32_Black_Blitter : public SkARGB32_Opaque_Blitter {
+public:
+ SkARGB32_Black_Blitter(const SkPixmap& device, const SkPaint& paint)
+ : INHERITED(device, paint) {}
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override;
+ void blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) override;
+ void blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) override;
+
+private:
+ using INHERITED = SkARGB32_Opaque_Blitter;
+};
+
+class SkARGB32_Shader_Blitter : public SkShaderBlitter {
+public:
+ SkARGB32_Shader_Blitter(const SkPixmap& device, const SkPaint& paint,
+ SkShaderBase::Context* shaderContext);
+ ~SkARGB32_Shader_Blitter() override;
+ void blitH(int x, int y, int width) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitAntiH(int x, int y, const SkAlpha[], const int16_t[]) override;
+ void blitMask(const SkMask&, const SkIRect&) override;
+
+private:
+ SkXfermode* fXfermode;
+ SkPMColor* fBuffer;
+ SkBlitRow::Proc32 fProc32;
+ SkBlitRow::Proc32 fProc32Blend;
+ bool fShadeDirectlyIntoDevice;
+
+ // illegal
+ SkARGB32_Shader_Blitter& operator=(const SkARGB32_Shader_Blitter&);
+
+ using INHERITED = SkShaderBlitter;
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkBlitter* SkCreateRasterPipelineBlitter(const SkPixmap&,
+ const SkPaint&,
+ const SkMatrix& ctm,
+ SkArenaAlloc*,
+ sk_sp<SkShader> clipShader,
+ const SkSurfaceProps& props);
+// Use this if you've pre-baked a shader pipeline, including modulating with paint alpha.
+SkBlitter* SkCreateRasterPipelineBlitter(const SkPixmap&, const SkPaint&,
+ const SkRasterPipeline& shaderPipeline,
+ bool shader_is_opaque,
+ SkArenaAlloc*, sk_sp<SkShader> clipShader);
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkCpu.cpp b/gfx/skia/skia/src/core/SkCpu.cpp
new file mode 100644
index 0000000000..26afc7038a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCpu.cpp
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/private/base/SkOnce.h"
+#include "src/core/SkCpu.h"
+
+#if defined(SK_CPU_X86)
+ #if defined(_MSC_VER)
+ #include <intrin.h>
+ static void cpuid (uint32_t abcd[4]) { __cpuid ((int*)abcd, 1); }
+ static void cpuid7(uint32_t abcd[4]) { __cpuidex((int*)abcd, 7, 0); }
+ static uint64_t xgetbv(uint32_t xcr) { return _xgetbv(xcr); }
+ #else
+ #include <cpuid.h>
+ #if !defined(__cpuid_count) // Old Mac Clang doesn't have this defined.
+ #define __cpuid_count(eax, ecx, a, b, c, d) \
+ __asm__("cpuid" : "=a"(a), "=b"(b), "=c"(c), "=d"(d) : "0"(eax), "2"(ecx))
+ #endif
+ static void cpuid (uint32_t abcd[4]) { __get_cpuid(1, abcd+0, abcd+1, abcd+2, abcd+3); }
+ static void cpuid7(uint32_t abcd[4]) {
+ __cpuid_count(7, 0, abcd[0], abcd[1], abcd[2], abcd[3]);
+ }
+ static uint64_t xgetbv(uint32_t xcr) {
+ uint32_t eax, edx;
+ __asm__ __volatile__ ( "xgetbv" : "=a"(eax), "=d"(edx) : "c"(xcr));
+ return (uint64_t)(edx) << 32 | eax;
+ }
+ #endif
+
+ static uint32_t read_cpu_features() {
+ uint32_t features = 0;
+ uint32_t abcd[4] = {0,0,0,0};
+
+ // You might want to refer to http://www.sandpile.org/x86/cpuid.htm
+
+ cpuid(abcd);
+ if (abcd[3] & (1<<25)) { features |= SkCpu:: SSE1; }
+ if (abcd[3] & (1<<26)) { features |= SkCpu:: SSE2; }
+ if (abcd[2] & (1<< 0)) { features |= SkCpu:: SSE3; }
+ if (abcd[2] & (1<< 9)) { features |= SkCpu::SSSE3; }
+ if (abcd[2] & (1<<19)) { features |= SkCpu::SSE41; }
+ if (abcd[2] & (1<<20)) { features |= SkCpu::SSE42; }
+
+ if ((abcd[2] & (3<<26)) == (3<<26) // XSAVE + OSXSAVE
+ && (xgetbv(0) & (3<<1)) == (3<<1)) { // XMM and YMM state enabled.
+ if (abcd[2] & (1<<28)) { features |= SkCpu:: AVX; }
+ if (abcd[2] & (1<<29)) { features |= SkCpu::F16C; }
+ if (abcd[2] & (1<<12)) { features |= SkCpu:: FMA; }
+
+ cpuid7(abcd);
+ if (abcd[1] & (1<<5)) { features |= SkCpu::AVX2; }
+ if (abcd[1] & (1<<3)) { features |= SkCpu::BMI1; }
+ if (abcd[1] & (1<<8)) { features |= SkCpu::BMI2; }
+ if (abcd[1] & (1<<9)) { features |= SkCpu::ERMS; }
+
+ if ((xgetbv(0) & (7<<5)) == (7<<5)) { // All ZMM state bits enabled too.
+ if (abcd[1] & (1<<16)) { features |= SkCpu::AVX512F; }
+ if (abcd[1] & (1<<17)) { features |= SkCpu::AVX512DQ; }
+ if (abcd[1] & (1<<21)) { features |= SkCpu::AVX512IFMA; }
+ if (abcd[1] & (1<<26)) { features |= SkCpu::AVX512PF; }
+ if (abcd[1] & (1<<27)) { features |= SkCpu::AVX512ER; }
+ if (abcd[1] & (1<<28)) { features |= SkCpu::AVX512CD; }
+ if (abcd[1] & (1<<30)) { features |= SkCpu::AVX512BW; }
+ if (abcd[1] & (1<<31)) { features |= SkCpu::AVX512VL; }
+ }
+ }
+ return features;
+ }
+
+#elif defined(SK_CPU_ARM64) && __has_include(<sys/auxv.h>)
+ #include <sys/auxv.h>
+
+ static uint32_t read_cpu_features() {
+ const uint32_t kHWCAP_CRC32 = (1<< 7),
+ kHWCAP_ASIMDHP = (1<<10);
+
+ uint32_t features = 0;
+ uint32_t hwcaps = getauxval(AT_HWCAP);
+ if (hwcaps & kHWCAP_CRC32 ) { features |= SkCpu::CRC32; }
+ if (hwcaps & kHWCAP_ASIMDHP) { features |= SkCpu::ASIMDHP; }
+
+ // The Samsung Mongoose 3 core sets the ASIMDHP bit but doesn't support it.
+ for (int core = 0; features & SkCpu::ASIMDHP; core++) {
+ // These /sys files contain the core's MIDR_EL1 register, the source of
+ // CPU {implementer, variant, part, revision} you'd see in /proc/cpuinfo.
+ SkString path =
+ SkStringPrintf("/sys/devices/system/cpu/cpu%d/regs/identification/midr_el1", core);
+
+ // Can't use SkData::MakeFromFileName() here, I think because /sys can't be mmap()'d.
+ SkFILEStream midr_el1(path.c_str());
+ if (!midr_el1.isValid()) {
+ // This is our ordinary exit path.
+ // If we ask for MIDR_EL1 from a core that doesn't exist, we've checked all cores.
+ if (core == 0) {
+ // On the other hand, if we can't read MIDR_EL1 from any core, assume the worst.
+ features &= ~(SkCpu::ASIMDHP);
+ }
+ break;
+ }
+
+ const char kMongoose3[] = "0x00000000531f0020"; // 53 == Samsung.
+ char buf[std::size(kMongoose3) - 1]; // No need for the terminating \0.
+
+ if (std::size(buf) != midr_el1.read(buf, std::size(buf))
+ || 0 == memcmp(kMongoose3, buf, std::size(buf))) {
+ features &= ~(SkCpu::ASIMDHP);
+ }
+ }
+ return features;
+ }
+
+#elif defined(SK_CPU_ARM32) && __has_include(<sys/auxv.h>) && \
+ (!defined(__ANDROID_API__) || __ANDROID_API__ >= 18)
+ // sys/auxv.h will always be present in the Android NDK due to unified
+ //headers, but getauxval is only defined for API >= 18.
+ #include <sys/auxv.h>
+
+ static uint32_t read_cpu_features() {
+ const uint32_t kHWCAP_NEON = (1<<12);
+ const uint32_t kHWCAP_VFPv4 = (1<<16);
+
+ uint32_t features = 0;
+ uint32_t hwcaps = getauxval(AT_HWCAP);
+ if (hwcaps & kHWCAP_NEON ) {
+ features |= SkCpu::NEON;
+ if (hwcaps & kHWCAP_VFPv4) { features |= SkCpu::NEON_FMA|SkCpu::VFP_FP16; }
+ }
+ return features;
+ }
+
+#elif defined(SK_CPU_ARM32) && __has_include(<cpu-features.h>)
+ #include <cpu-features.h>
+
+ static uint32_t read_cpu_features() {
+ uint32_t features = 0;
+ uint64_t cpu_features = android_getCpuFeatures();
+ if (cpu_features & ANDROID_CPU_ARM_FEATURE_NEON) { features |= SkCpu::NEON; }
+ if (cpu_features & ANDROID_CPU_ARM_FEATURE_NEON_FMA) { features |= SkCpu::NEON_FMA; }
+ if (cpu_features & ANDROID_CPU_ARM_FEATURE_VFP_FP16) { features |= SkCpu::VFP_FP16; }
+ return features;
+ }
+
+#else
+ static uint32_t read_cpu_features() {
+ return 0;
+ }
+
+#endif
+
+uint32_t SkCpu::gCachedFeatures = 0;
+
+void SkCpu::CacheRuntimeFeatures() {
+ static SkOnce once;
+ once([] { gCachedFeatures = read_cpu_features(); });
+}
diff --git a/gfx/skia/skia/src/core/SkCpu.h b/gfx/skia/skia/src/core/SkCpu.h
new file mode 100644
index 0000000000..2450d93dcb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCpu.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCpu_DEFINED
+#define SkCpu_DEFINED
+
+#include "include/core/SkTypes.h"
+
+struct SkCpu {
+ enum {
+ SSE1 = 1 << 0,
+ SSE2 = 1 << 1,
+ SSE3 = 1 << 2,
+ SSSE3 = 1 << 3,
+ SSE41 = 1 << 4,
+ SSE42 = 1 << 5,
+ AVX = 1 << 6,
+ F16C = 1 << 7,
+ FMA = 1 << 8,
+ AVX2 = 1 << 9,
+ BMI1 = 1 << 10,
+ BMI2 = 1 << 11,
+ // Handy alias for all the cool Haswell+ instructions.
+ HSW = AVX2 | BMI1 | BMI2 | F16C | FMA,
+
+ AVX512F = 1 << 12,
+ AVX512DQ = 1 << 13,
+ AVX512IFMA = 1 << 14,
+ AVX512PF = 1 << 15,
+ AVX512ER = 1 << 16,
+ AVX512CD = 1 << 17,
+ AVX512BW = 1 << 18,
+ AVX512VL = 1 << 19,
+
+ // Handy alias for all the cool Skylake Xeon+ instructions.
+ SKX = AVX512F | AVX512DQ | AVX512CD | AVX512BW | AVX512VL,
+
+ ERMS = 1 << 20,
+ };
+ enum {
+ NEON = 1 << 0,
+ NEON_FMA = 1 << 1,
+ VFP_FP16 = 1 << 2,
+ CRC32 = 1 << 3,
+ ASIMDHP = 1 << 4,
+ };
+
+ static void CacheRuntimeFeatures();
+ static bool Supports(uint32_t);
+private:
+ static uint32_t gCachedFeatures;
+};
+
+inline bool SkCpu::Supports(uint32_t mask) {
+ uint32_t features = gCachedFeatures;
+
+ // If we mask in compile-time known lower limits, the compiler can
+ // often compile away this entire function.
+#if SK_CPU_X86
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1
+ features |= SSE1;
+ #endif
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ features |= SSE2;
+ #endif
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE3
+ features |= SSE3;
+ #endif
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ features |= SSSE3;
+ #endif
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ features |= SSE41;
+ #endif
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE42
+ features |= SSE42;
+ #endif
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX
+ features |= AVX;
+ #endif
+ // F16C goes here if we add SK_CPU_SSE_LEVEL_F16C
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ features |= AVX2;
+ #endif
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SKX
+ features |= (AVX512F | AVX512DQ | AVX512CD | AVX512BW | AVX512VL);
+ #endif
+ // FMA doesn't fit neatly into this total ordering.
+ // It's available on Haswell+ just like AVX2, but it's technically a different bit.
+ // TODO: circle back on this if we find ourselves limited by lack of compile-time FMA
+
+ #if defined(SK_CPU_LIMIT_AVX)
+ features &= (SSE1 | SSE2 | SSE3 | SSSE3 | SSE41 | SSE42 | AVX);
+ #elif defined(SK_CPU_LIMIT_SSE41)
+ features &= (SSE1 | SSE2 | SSE3 | SSSE3 | SSE41);
+ #elif defined(SK_CPU_LIMIT_SSE2)
+ features &= (SSE1 | SSE2);
+ #endif
+
+#else
+ #if defined(SK_ARM_HAS_NEON)
+ features |= NEON;
+ #endif
+
+ #if defined(SK_CPU_ARM64)
+ features |= NEON|NEON_FMA|VFP_FP16;
+ #endif
+
+ #if defined(SK_ARM_HAS_CRC32)
+ features |= CRC32;
+ #endif
+
+#endif
+ return (features & mask) == mask;
+}
+
+#endif//SkCpu_DEFINED
diff --git a/gfx/skia/skia/src/core/SkCubicClipper.cpp b/gfx/skia/skia/src/core/SkCubicClipper.cpp
new file mode 100644
index 0000000000..32af6f37df
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCubicClipper.cpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkCubicClipper.h"
+
+#include "include/core/SkPoint.h"
+#include "src/core/SkGeometry.h"
+
+#include <cstring>
+#include <utility>
+
+SkCubicClipper::SkCubicClipper() {
+ fClip.setEmpty();
+}
+
+void SkCubicClipper::setClip(const SkIRect& clip) {
+ // conver to scalars, since that's where we'll see the points
+ fClip.set(clip);
+}
+
+
+bool SkCubicClipper::ChopMonoAtY(const SkPoint pts[4], SkScalar y, SkScalar* t) {
+ SkScalar ycrv[4];
+ ycrv[0] = pts[0].fY - y;
+ ycrv[1] = pts[1].fY - y;
+ ycrv[2] = pts[2].fY - y;
+ ycrv[3] = pts[3].fY - y;
+
+#ifdef NEWTON_RAPHSON // Quadratic convergence, typically <= 3 iterations.
+ // Initial guess.
+ // TODO(turk): Check for zero denominator? Shouldn't happen unless the curve
+ // is not only monotonic but degenerate.
+ SkScalar t1 = ycrv[0] / (ycrv[0] - ycrv[3]);
+
+ // Newton's iterations.
+ const SkScalar tol = SK_Scalar1 / 16384; // This leaves 2 fixed noise bits.
+ SkScalar t0;
+ const int maxiters = 5;
+ int iters = 0;
+ bool converged;
+ do {
+ t0 = t1;
+ SkScalar y01 = SkScalarInterp(ycrv[0], ycrv[1], t0);
+ SkScalar y12 = SkScalarInterp(ycrv[1], ycrv[2], t0);
+ SkScalar y23 = SkScalarInterp(ycrv[2], ycrv[3], t0);
+ SkScalar y012 = SkScalarInterp(y01, y12, t0);
+ SkScalar y123 = SkScalarInterp(y12, y23, t0);
+ SkScalar y0123 = SkScalarInterp(y012, y123, t0);
+ SkScalar yder = (y123 - y012) * 3;
+ // TODO(turk): check for yder==0: horizontal.
+ t1 -= y0123 / yder;
+ converged = SkScalarAbs(t1 - t0) <= tol; // NaN-safe
+ ++iters;
+ } while (!converged && (iters < maxiters));
+ *t = t1; // Return the result.
+
+ // The result might be valid, even if outside of the range [0, 1], but
+ // we never evaluate a Bezier outside this interval, so we return false.
+ if (t1 < 0 || t1 > SK_Scalar1)
+ return false; // This shouldn't happen, but check anyway.
+ return converged;
+
+#else // BISECTION // Linear convergence, typically 16 iterations.
+
+ // Check that the endpoints straddle zero.
+ SkScalar tNeg, tPos; // Negative and positive function parameters.
+ if (ycrv[0] < 0) {
+ if (ycrv[3] < 0)
+ return false;
+ tNeg = 0;
+ tPos = SK_Scalar1;
+ } else if (ycrv[0] > 0) {
+ if (ycrv[3] > 0)
+ return false;
+ tNeg = SK_Scalar1;
+ tPos = 0;
+ } else {
+ *t = 0;
+ return true;
+ }
+
+ const SkScalar tol = SK_Scalar1 / 65536; // 1 for fixed, 1e-5 for float.
+ do {
+ SkScalar tMid = (tPos + tNeg) / 2;
+ SkScalar y01 = SkScalarInterp(ycrv[0], ycrv[1], tMid);
+ SkScalar y12 = SkScalarInterp(ycrv[1], ycrv[2], tMid);
+ SkScalar y23 = SkScalarInterp(ycrv[2], ycrv[3], tMid);
+ SkScalar y012 = SkScalarInterp(y01, y12, tMid);
+ SkScalar y123 = SkScalarInterp(y12, y23, tMid);
+ SkScalar y0123 = SkScalarInterp(y012, y123, tMid);
+ if (y0123 == 0) {
+ *t = tMid;
+ return true;
+ }
+ if (y0123 < 0) tNeg = tMid;
+ else tPos = tMid;
+ } while (!(SkScalarAbs(tPos - tNeg) <= tol)); // Nan-safe
+
+ *t = (tNeg + tPos) / 2;
+ return true;
+#endif // BISECTION
+}
+
+
+bool SkCubicClipper::clipCubic(const SkPoint srcPts[4], SkPoint dst[4]) {
+ bool reverse;
+
+ // we need the data to be monotonically descending in Y
+ if (srcPts[0].fY > srcPts[3].fY) {
+ dst[0] = srcPts[3];
+ dst[1] = srcPts[2];
+ dst[2] = srcPts[1];
+ dst[3] = srcPts[0];
+ reverse = true;
+ } else {
+ memcpy(dst, srcPts, 4 * sizeof(SkPoint));
+ reverse = false;
+ }
+
+ // are we completely above or below
+ const SkScalar ctop = fClip.fTop;
+ const SkScalar cbot = fClip.fBottom;
+ if (dst[3].fY <= ctop || dst[0].fY >= cbot) {
+ return false;
+ }
+
+ SkScalar t;
+ SkPoint tmp[7]; // for SkChopCubicAt
+
+ // are we partially above
+ if (dst[0].fY < ctop && ChopMonoAtY(dst, ctop, &t)) {
+ SkChopCubicAt(dst, tmp, t);
+ dst[0] = tmp[3];
+ dst[1] = tmp[4];
+ dst[2] = tmp[5];
+ }
+
+ // are we partially below
+ if (dst[3].fY > cbot && ChopMonoAtY(dst, cbot, &t)) {
+ SkChopCubicAt(dst, tmp, t);
+ dst[1] = tmp[1];
+ dst[2] = tmp[2];
+ dst[3] = tmp[3];
+ }
+
+ if (reverse) {
+ using std::swap;
+ swap(dst[0], dst[3]);
+ swap(dst[1], dst[2]);
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkCubicClipper.h b/gfx/skia/skia/src/core/SkCubicClipper.h
new file mode 100644
index 0000000000..328d63b8b8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCubicClipper.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkCubicClipper_DEFINED
+#define SkCubicClipper_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+struct SkPoint;
+
+/** This class is initialized with a clip rectangle, and then can be fed cubics,
+ which must already be monotonic in Y.
+
+ In the future, it might return a series of segments, allowing it to clip
+ also in X, to ensure that all segments fit in a finite coordinate system.
+ */
+class SkCubicClipper {
+public:
+ SkCubicClipper();
+
+ void setClip(const SkIRect& clip);
+
+ bool SK_WARN_UNUSED_RESULT clipCubic(const SkPoint src[4], SkPoint dst[4]);
+
+ static bool SK_WARN_UNUSED_RESULT ChopMonoAtY(const SkPoint pts[4], SkScalar y, SkScalar* t);
+private:
+ SkRect fClip;
+};
+
+#endif // SkCubicClipper_DEFINED
diff --git a/gfx/skia/skia/src/core/SkCubicMap.cpp b/gfx/skia/skia/src/core/SkCubicMap.cpp
new file mode 100644
index 0000000000..a76712647d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCubicMap.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCubicMap.h"
+
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkTPin.h"
+#include "src/base/SkVx.h"
+#include "src/core/SkOpts.h"
+
+#include <algorithm>
+
+static inline bool nearly_zero(SkScalar x) {
+ SkASSERT(x >= 0);
+ return x <= 0.0000000001f;
+}
+
+static float compute_t_from_x(float A, float B, float C, float x) {
+ return SkOpts::cubic_solver(A, B, C, -x);
+}
+
+float SkCubicMap::computeYFromX(float x) const {
+ x = SkTPin(x, 0.0f, 1.0f);
+
+ if (nearly_zero(x) || nearly_zero(1 - x)) {
+ return x;
+ }
+ if (fType == kLine_Type) {
+ return x;
+ }
+ float t;
+ if (fType == kCubeRoot_Type) {
+ t = sk_float_pow(x / fCoeff[0].fX, 1.0f / 3);
+ } else {
+ t = compute_t_from_x(fCoeff[0].fX, fCoeff[1].fX, fCoeff[2].fX, x);
+ }
+ float a = fCoeff[0].fY;
+ float b = fCoeff[1].fY;
+ float c = fCoeff[2].fY;
+ float y = ((a * t + b) * t + c) * t;
+
+ return y;
+}
+
+static inline bool coeff_nearly_zero(float delta) {
+ return sk_float_abs(delta) <= 0.0000001f;
+}
+
+SkCubicMap::SkCubicMap(SkPoint p1, SkPoint p2) {
+ // Clamp X values only (we allow Ys outside [0..1]).
+ p1.fX = std::min(std::max(p1.fX, 0.0f), 1.0f);
+ p2.fX = std::min(std::max(p2.fX, 0.0f), 1.0f);
+
+ auto s1 = skvx::float2::Load(&p1) * 3;
+ auto s2 = skvx::float2::Load(&p2) * 3;
+
+ (1 + s1 - s2).store(&fCoeff[0]);
+ (s2 - s1 - s1).store(&fCoeff[1]);
+ s1.store(&fCoeff[2]);
+
+ fType = kSolver_Type;
+ if (SkScalarNearlyEqual(p1.fX, p1.fY) && SkScalarNearlyEqual(p2.fX, p2.fY)) {
+ fType = kLine_Type;
+ } else if (coeff_nearly_zero(fCoeff[1].fX) && coeff_nearly_zero(fCoeff[2].fX)) {
+ fType = kCubeRoot_Type;
+ }
+}
+
+SkPoint SkCubicMap::computeFromT(float t) const {
+ auto a = skvx::float2::Load(&fCoeff[0]);
+ auto b = skvx::float2::Load(&fCoeff[1]);
+ auto c = skvx::float2::Load(&fCoeff[2]);
+
+ SkPoint result;
+ (((a * t + b) * t + c) * t).store(&result);
+ return result;
+}
diff --git a/gfx/skia/skia/src/core/SkCubicSolver.h b/gfx/skia/skia/src/core/SkCubicSolver.h
new file mode 100644
index 0000000000..e65f3cc06f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkCubicSolver.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCubicSolver_DEFINED
+#define SkCubicSolver_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkFloatingPoint.h"
+
+namespace SK_OPTS_NS {
+
+ static float eval_poly(float t, float b) {
+ return b;
+ }
+
+ template <typename... Rest>
+ static float eval_poly(float t, float m, float b, Rest... rest) {
+ return eval_poly(t, sk_fmaf(m,t,b), rest...);
+ }
+
+ inline float cubic_solver(float A, float B, float C, float D) {
+
+ #ifdef SK_DEBUG
+ auto valid = [](float t) {
+ return t >= 0 && t <= 1;
+ };
+ #endif
+
+ auto guess_nice_cubic_root = [](float a, float b, float c, float d) {
+ return -d;
+ };
+ float t = guess_nice_cubic_root(A, B, C, D);
+
+ int iters = 0;
+ const int MAX_ITERS = 8;
+ for (; iters < MAX_ITERS; ++iters) {
+ SkASSERT(valid(t));
+ float f = eval_poly(t, A,B,C,D); // f = At^3 + Bt^2 + Ct + D
+ if (sk_float_abs(f) <= 0.00005f) {
+ break;
+ }
+ float fp = eval_poly(t, 3*A, 2*B, C); // f' = 3At^2 + 2Bt + C
+ float fpp = eval_poly(t, 3*A+3*A, 2*B); // f'' = 6At + 2B
+
+ float numer = 2 * fp * f;
+ float denom = sk_fmaf(2*fp, fp, -(f*fpp));
+
+ t -= numer / denom;
+ }
+
+ SkASSERT(valid(t));
+ return t;
+ }
+
+} // namespace SK_OPTS_NS
+#endif
diff --git a/gfx/skia/skia/src/core/SkData.cpp b/gfx/skia/skia/src/core/SkData.cpp
new file mode 100644
index 0000000000..6dd2eb98cc
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkData.cpp
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkData.h"
+
+#include "include/core/SkStream.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkOnce.h"
+#include "src/core/SkOSFile.h"
+#include "src/core/SkStreamPriv.h"
+
+#include <cstring>
+#include <new>
+
+SkData::SkData(const void* ptr, size_t size, ReleaseProc proc, void* context)
+ : fReleaseProc(proc)
+ , fReleaseProcContext(context)
+ , fPtr(ptr)
+ , fSize(size)
+{}
+
+/** This constructor means we are inline with our fPtr's contents.
+ * Thus we set fPtr to point right after this.
+ */
+SkData::SkData(size_t size)
+ : fReleaseProc(nullptr)
+ , fReleaseProcContext(nullptr)
+ , fPtr((const char*)(this + 1))
+ , fSize(size)
+{}
+
+SkData::~SkData() {
+ if (fReleaseProc) {
+ fReleaseProc(fPtr, fReleaseProcContext);
+ }
+}
+
+bool SkData::equals(const SkData* other) const {
+ if (this == other) {
+ return true;
+ }
+ if (nullptr == other) {
+ return false;
+ }
+ return fSize == other->fSize && !sk_careful_memcmp(fPtr, other->fPtr, fSize);
+}
+
+size_t SkData::copyRange(size_t offset, size_t length, void* buffer) const {
+ size_t available = fSize;
+ if (offset >= available || 0 == length) {
+ return 0;
+ }
+ available -= offset;
+ if (length > available) {
+ length = available;
+ }
+ SkASSERT(length > 0);
+
+ if (buffer) {
+ memcpy(buffer, this->bytes() + offset, length);
+ }
+ return length;
+}
+
+void SkData::operator delete(void* p) {
+ ::operator delete(p);
+}
+
+sk_sp<SkData> SkData::PrivateNewWithCopy(const void* srcOrNull, size_t length) {
+ if (0 == length) {
+ return SkData::MakeEmpty();
+ }
+
+ const size_t actualLength = length + sizeof(SkData);
+ SkASSERT_RELEASE(length < actualLength); // Check for overflow.
+
+ void* storage = ::operator new (actualLength);
+ sk_sp<SkData> data(new (storage) SkData(length));
+ if (srcOrNull) {
+ memcpy(data->writable_data(), srcOrNull, length);
+ }
+ return data;
+}
+
+void SkData::NoopReleaseProc(const void*, void*) {}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkData> SkData::MakeEmpty() {
+ static SkOnce once;
+ static SkData* empty;
+
+ once([]{ empty = new SkData(nullptr, 0, nullptr, nullptr); });
+ return sk_ref_sp(empty);
+}
+
+// assumes fPtr was allocated via sk_malloc
+static void sk_free_releaseproc(const void* ptr, void*) {
+ sk_free((void*)ptr);
+}
+
+sk_sp<SkData> SkData::MakeFromMalloc(const void* data, size_t length) {
+ return sk_sp<SkData>(new SkData(data, length, sk_free_releaseproc, nullptr));
+}
+
+sk_sp<SkData> SkData::MakeWithCopy(const void* src, size_t length) {
+ SkASSERT(src);
+ return PrivateNewWithCopy(src, length);
+}
+
+sk_sp<SkData> SkData::MakeUninitialized(size_t length) {
+ return PrivateNewWithCopy(nullptr, length);
+}
+
+sk_sp<SkData> SkData::MakeZeroInitialized(size_t length) {
+ auto data = MakeUninitialized(length);
+ if (length != 0) {
+ memset(data->writable_data(), 0, data->size());
+ }
+ return data;
+}
+
+sk_sp<SkData> SkData::MakeWithProc(const void* ptr, size_t length, ReleaseProc proc, void* ctx) {
+ return sk_sp<SkData>(new SkData(ptr, length, proc, ctx));
+}
+
+// assumes fPtr was allocated with sk_fmmap
+static void sk_mmap_releaseproc(const void* addr, void* ctx) {
+ size_t length = reinterpret_cast<size_t>(ctx);
+ sk_fmunmap(addr, length);
+}
+
+sk_sp<SkData> SkData::MakeFromFILE(FILE* f) {
+ size_t size;
+ void* addr = sk_fmmap(f, &size);
+ if (nullptr == addr) {
+ return nullptr;
+ }
+
+ return SkData::MakeWithProc(addr, size, sk_mmap_releaseproc, reinterpret_cast<void*>(size));
+}
+
+sk_sp<SkData> SkData::MakeFromFileName(const char path[]) {
+ FILE* f = path ? sk_fopen(path, kRead_SkFILE_Flag) : nullptr;
+ if (nullptr == f) {
+ return nullptr;
+ }
+ auto data = MakeFromFILE(f);
+ sk_fclose(f);
+ return data;
+}
+
+sk_sp<SkData> SkData::MakeFromFD(int fd) {
+ size_t size;
+ void* addr = sk_fdmmap(fd, &size);
+ if (nullptr == addr) {
+ return nullptr;
+ }
+ return SkData::MakeWithProc(addr, size, sk_mmap_releaseproc, reinterpret_cast<void*>(size));
+}
+
+// assumes context is a SkData
+static void sk_dataref_releaseproc(const void*, void* context) {
+ SkData* src = reinterpret_cast<SkData*>(context);
+ src->unref();
+}
+
+sk_sp<SkData> SkData::MakeSubset(const SkData* src, size_t offset, size_t length) {
+ /*
+ We could, if we wanted/need to, just make a deep copy of src's data,
+ rather than referencing it. This would duplicate the storage (of the
+ subset amount) but would possibly allow src to go out of scope sooner.
+ */
+
+ size_t available = src->size();
+ if (offset >= available || 0 == length) {
+ return SkData::MakeEmpty();
+ }
+ available -= offset;
+ if (length > available) {
+ length = available;
+ }
+ SkASSERT(length > 0);
+
+ src->ref(); // this will be balanced in sk_dataref_releaseproc
+ return sk_sp<SkData>(new SkData(src->bytes() + offset, length, sk_dataref_releaseproc,
+ const_cast<SkData*>(src)));
+}
+
+sk_sp<SkData> SkData::MakeWithCString(const char cstr[]) {
+ size_t size;
+ if (nullptr == cstr) {
+ cstr = "";
+ size = 1;
+ } else {
+ size = strlen(cstr) + 1;
+ }
+ return MakeWithCopy(cstr, size);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkData> SkData::MakeFromStream(SkStream* stream, size_t size) {
+ // reduce the chance of OOM by checking that the stream has enough bytes to read from before
+ // allocating that potentially large buffer.
+ if (StreamRemainingLengthIsBelow(stream, size)) {
+ return nullptr;
+ }
+ sk_sp<SkData> data(SkData::MakeUninitialized(size));
+ if (stream->read(data->writable_data(), size) != size) {
+ return nullptr;
+ }
+ return data;
+}
diff --git a/gfx/skia/skia/src/core/SkDataTable.cpp b/gfx/skia/skia/src/core/SkDataTable.cpp
new file mode 100644
index 0000000000..d15e65b473
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDataTable.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkDataTable.h"
+
+#include "include/core/SkRefCnt.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkOnce.h"
+
+#include <cstring>
+
+static void malloc_freeproc(void* context) {
+ sk_free(context);
+}
+
+// Makes empty table
+SkDataTable::SkDataTable() {
+ fCount = 0;
+ fElemSize = 0; // 0 signals that we use fDir instead of fElems
+ fU.fDir = nullptr;
+ fFreeProc = nullptr;
+ fFreeProcContext = nullptr;
+}
+
+SkDataTable::SkDataTable(const void* array, size_t elemSize, int count,
+ FreeProc proc, void* context) {
+ SkASSERT(count > 0);
+
+ fCount = count;
+ fElemSize = elemSize; // non-zero signals we use fElems instead of fDir
+ fU.fElems = (const char*)array;
+ fFreeProc = proc;
+ fFreeProcContext = context;
+}
+
+SkDataTable::SkDataTable(const Dir* dir, int count, FreeProc proc, void* ctx) {
+ SkASSERT(count > 0);
+
+ fCount = count;
+ fElemSize = 0; // 0 signals that we use fDir instead of fElems
+ fU.fDir = dir;
+ fFreeProc = proc;
+ fFreeProcContext = ctx;
+}
+
+SkDataTable::~SkDataTable() {
+ if (fFreeProc) {
+ fFreeProc(fFreeProcContext);
+ }
+}
+
+size_t SkDataTable::atSize(int index) const {
+ SkASSERT((unsigned)index < (unsigned)fCount);
+
+ if (fElemSize) {
+ return fElemSize;
+ } else {
+ return fU.fDir[index].fSize;
+ }
+}
+
+const void* SkDataTable::at(int index, size_t* size) const {
+ SkASSERT((unsigned)index < (unsigned)fCount);
+
+ if (fElemSize) {
+ if (size) {
+ *size = fElemSize;
+ }
+ return fU.fElems + index * fElemSize;
+ } else {
+ if (size) {
+ *size = fU.fDir[index].fSize;
+ }
+ return fU.fDir[index].fPtr;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkDataTable> SkDataTable::MakeEmpty() {
+ static SkDataTable* singleton;
+ static SkOnce once;
+ once([]{ singleton = new SkDataTable(); });
+ return sk_ref_sp(singleton);
+}
+
+sk_sp<SkDataTable> SkDataTable::MakeCopyArrays(const void * const * ptrs,
+ const size_t sizes[], int count) {
+ if (count <= 0) {
+ return SkDataTable::MakeEmpty();
+ }
+
+ size_t dataSize = 0;
+ for (int i = 0; i < count; ++i) {
+ dataSize += sizes[i];
+ }
+
+ size_t bufferSize = count * sizeof(Dir) + dataSize;
+ void* buffer = sk_malloc_throw(bufferSize);
+
+ Dir* dir = (Dir*)buffer;
+ char* elem = (char*)(dir + count);
+ for (int i = 0; i < count; ++i) {
+ dir[i].fPtr = elem;
+ dir[i].fSize = sizes[i];
+ memcpy(elem, ptrs[i], sizes[i]);
+ elem += sizes[i];
+ }
+
+ return sk_sp<SkDataTable>(new SkDataTable(dir, count, malloc_freeproc, buffer));
+}
+
+sk_sp<SkDataTable> SkDataTable::MakeCopyArray(const void* array, size_t elemSize, int count) {
+ if (count <= 0) {
+ return SkDataTable::MakeEmpty();
+ }
+
+ size_t bufferSize = elemSize * count;
+ void* buffer = sk_malloc_throw(bufferSize);
+ memcpy(buffer, array, bufferSize);
+
+ return sk_sp<SkDataTable>(new SkDataTable(buffer, elemSize, count, malloc_freeproc, buffer));
+}
+
+sk_sp<SkDataTable> SkDataTable::MakeArrayProc(const void* array, size_t elemSize, int count,
+ FreeProc proc, void* ctx) {
+ if (count <= 0) {
+ return SkDataTable::MakeEmpty();
+ }
+ return sk_sp<SkDataTable>(new SkDataTable(array, elemSize, count, proc, ctx));
+}
diff --git a/gfx/skia/skia/src/core/SkDebug.cpp b/gfx/skia/skia/src/core/SkDebug.cpp
new file mode 100644
index 0000000000..b02ddf7fa9
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDebug.cpp
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+
+#if defined(SK_BUILD_FOR_GOOGLE3)
+void SkDebugfForDumpStackTrace(const char* data, void* unused) {
+ SkDebugf("%s", data);
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkDebugUtils.h b/gfx/skia/skia/src/core/SkDebugUtils.h
new file mode 100644
index 0000000000..9333e837d1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDebugUtils.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDebugUtils_DEFINED
+#define SkDebugUtils_DEFINED
+
+#include "include/core/SkTileMode.h"
+
+static constexpr const char* SkTileModeToStr(SkTileMode tm) {
+ switch (tm) {
+ case SkTileMode::kClamp: return "Clamp";
+ case SkTileMode::kRepeat: return "Repeat";
+ case SkTileMode::kMirror: return "Mirror";
+ case SkTileMode::kDecal: return "Decal";
+ }
+ SkUNREACHABLE;
+}
+
+#endif // SkDebugUtils_DEFINED
diff --git a/gfx/skia/skia/src/core/SkDeferredDisplayList.cpp b/gfx/skia/skia/src/core/SkDeferredDisplayList.cpp
new file mode 100644
index 0000000000..9c9e98cdb6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDeferredDisplayList.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkDeferredDisplayList.h"
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+#include "src/base/SkArenaAlloc.h"
+
+#include <utility>
+
+class SkSurfaceCharacterization;
+
+#if defined(SK_GANESH)
+#include "src/gpu/ganesh/GrDirectContextPriv.h"
+#include "src/gpu/ganesh/GrRenderTargetProxy.h"
+#include "src/gpu/ganesh/GrRenderTask.h"
+#endif
+
+SkDeferredDisplayList::SkDeferredDisplayList(const SkSurfaceCharacterization& characterization,
+ sk_sp<GrRenderTargetProxy> targetProxy,
+ sk_sp<LazyProxyData> lazyProxyData)
+ : fCharacterization(characterization)
+#if defined(SK_GANESH)
+ , fArenas(true)
+ , fTargetProxy(std::move(targetProxy))
+ , fLazyProxyData(std::move(lazyProxyData))
+#endif
+{
+#if defined(SK_GANESH)
+ SkASSERT(fTargetProxy->isDDLTarget());
+#endif
+}
+
+SkDeferredDisplayList::~SkDeferredDisplayList() {
+#if defined(SK_GANESH) && defined(SK_DEBUG)
+ for (auto& renderTask : fRenderTasks) {
+ SkASSERT(renderTask->unique());
+ }
+#endif
+}
+
+//-------------------------------------------------------------------------------------------------
+#if defined(SK_GANESH)
+
+SkDeferredDisplayList::ProgramIterator::ProgramIterator(GrDirectContext* dContext,
+ SkDeferredDisplayList* ddl)
+ : fDContext(dContext)
+ , fProgramData(ddl->programData())
+ , fIndex(0) {
+}
+
+SkDeferredDisplayList::ProgramIterator::~ProgramIterator() {}
+
+bool SkDeferredDisplayList::ProgramIterator::compile() {
+ if (!fDContext || fIndex < 0 || fIndex >= (int) fProgramData.size()) {
+ return false;
+ }
+
+ return fDContext->priv().compile(fProgramData[fIndex].desc(), fProgramData[fIndex].info());
+}
+
+bool SkDeferredDisplayList::ProgramIterator::done() const {
+ return fIndex >= (int) fProgramData.size();
+}
+
+void SkDeferredDisplayList::ProgramIterator::next() {
+ ++fIndex;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDeferredDisplayListPriv.h b/gfx/skia/skia/src/core/SkDeferredDisplayListPriv.h
new file mode 100644
index 0000000000..d6321cb7bf
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDeferredDisplayListPriv.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDeferredDisplayListPriv_DEFINED
+#define SkDeferredDisplayListPriv_DEFINED
+
+#include "include/core/SkDeferredDisplayList.h"
+
+/*************************************************************************************************/
+/** Class that adds methods to SkDeferredDisplayList that are only intended for use internal to Skia.
+ This class is purely a privileged window into SkDeferredDisplayList. It should never have
+ additional data members or virtual methods. */
+class SkDeferredDisplayListPriv {
+public:
+
+#if defined(SK_GANESH)
+ int numRenderTasks() const {
+ return fDDL->fRenderTasks.size();
+ }
+
+ GrRenderTargetProxy* targetProxy() const {
+ return fDDL->fTargetProxy.get();
+ }
+
+ const SkDeferredDisplayList::LazyProxyData* lazyProxyData() const {
+ return fDDL->fLazyProxyData.get();
+ }
+
+ const skia_private::TArray<GrRecordingContext::ProgramData>& programData() const {
+ return fDDL->programData();
+ }
+
+ const skia_private::TArray<sk_sp<GrRenderTask>>& renderTasks() const {
+ return fDDL->fRenderTasks;
+ }
+#endif
+
+private:
+ explicit SkDeferredDisplayListPriv(SkDeferredDisplayList* ddl) : fDDL(ddl) {}
+ SkDeferredDisplayListPriv& operator=(const SkDeferredDisplayListPriv&) = delete;
+
+ // No taking addresses of this type.
+ const SkDeferredDisplayListPriv* operator&() const;
+ SkDeferredDisplayListPriv* operator&();
+
+ SkDeferredDisplayList* fDDL;
+
+ friend class SkDeferredDisplayList; // to construct/copy this type.
+};
+
+inline SkDeferredDisplayListPriv SkDeferredDisplayList::priv() {
+ return SkDeferredDisplayListPriv(this);
+}
+
+inline const SkDeferredDisplayListPriv SkDeferredDisplayList::priv () const { // NOLINT(readability-const-return-type)
+ return SkDeferredDisplayListPriv(const_cast<SkDeferredDisplayList*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDeferredDisplayListRecorder.cpp b/gfx/skia/skia/src/core/SkDeferredDisplayListRecorder.cpp
new file mode 100644
index 0000000000..91080bd1a3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDeferredDisplayListRecorder.cpp
@@ -0,0 +1,260 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkDeferredDisplayListRecorder.h"
+
+#include "include/core/SkDeferredDisplayList.h"
+#include "include/core/SkSurface.h"
+#include "include/core/SkSurfaceCharacterization.h"
+#include "src/core/SkMessageBus.h"
+
+#if !defined(SK_GANESH)
+SkDeferredDisplayListRecorder::SkDeferredDisplayListRecorder(const SkSurfaceCharacterization&) {}
+
+SkDeferredDisplayListRecorder::~SkDeferredDisplayListRecorder() {}
+
+bool SkDeferredDisplayListRecorder::init() { return false; }
+
+SkCanvas* SkDeferredDisplayListRecorder::getCanvas() { return nullptr; }
+
+sk_sp<SkDeferredDisplayList> SkDeferredDisplayListRecorder::detach() { return nullptr; }
+
+#else
+
+#include "include/core/SkPromiseImageTexture.h"
+#include "include/gpu/GrRecordingContext.h"
+#include "include/gpu/GrYUVABackendTextures.h"
+#include "src/gpu/SkBackingFit.h"
+#include "src/gpu/ganesh/GrCaps.h"
+#include "src/gpu/ganesh/GrProxyProvider.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/GrRenderTargetProxy.h"
+#include "src/gpu/ganesh/GrTexture.h"
+#include "src/gpu/ganesh/SkGr.h"
+#include "src/image/SkImage_Gpu.h"
+#include "src/image/SkImage_GpuYUVA.h"
+#include "src/image/SkSurface_Gpu.h"
+
+SkDeferredDisplayListRecorder::SkDeferredDisplayListRecorder(const SkSurfaceCharacterization& c)
+ : fCharacterization(c) {
+ if (fCharacterization.isValid()) {
+ fContext = GrRecordingContextPriv::MakeDDL(fCharacterization.refContextInfo());
+ }
+}
+
+SkDeferredDisplayListRecorder::~SkDeferredDisplayListRecorder() {
+ if (fContext) {
+ auto proxyProvider = fContext->priv().proxyProvider();
+
+ // This allows the uniquely keyed proxies to keep their keys but removes their back
+ // pointer to the about-to-be-deleted proxy provider. The proxies will use their
+ // unique key to reattach to cached versions of themselves or to appropriately tag new
+ // resources (if a cached version was not found). This system operates independent of
+ // the replaying context's proxy provider (i.e., these uniquely keyed proxies will not
+ // appear in the replaying proxy providers uniquely keyed proxy map). This should be fine
+ // since no one else should be trying to reconnect to the orphaned proxies and orphaned
+ // proxies from different DDLs that share the same key should simply reconnect to the
+ // same cached resource.
+ proxyProvider->orphanAllUniqueKeys();
+ }
+}
+
+bool SkDeferredDisplayListRecorder::init() {
+ SkASSERT(fContext);
+ SkASSERT(!fTargetProxy);
+ SkASSERT(!fLazyProxyData);
+ SkASSERT(!fSurface);
+
+ if (!fCharacterization.isValid()) {
+ return false;
+ }
+
+ fLazyProxyData = sk_sp<SkDeferredDisplayList::LazyProxyData>(
+ new SkDeferredDisplayList::LazyProxyData);
+
+ auto proxyProvider = fContext->priv().proxyProvider();
+ const GrCaps* caps = fContext->priv().caps();
+
+ bool usesGLFBO0 = fCharacterization.usesGLFBO0();
+ if (usesGLFBO0) {
+ if (GrBackendApi::kOpenGL != fContext->backend() ||
+ fCharacterization.isTextureable()) {
+ return false;
+ }
+ }
+
+ bool vkRTSupportsInputAttachment = fCharacterization.vkRTSupportsInputAttachment();
+ if (vkRTSupportsInputAttachment && GrBackendApi::kVulkan != fContext->backend()) {
+ return false;
+ }
+
+ if (fCharacterization.vulkanSecondaryCBCompatible()) {
+ // Because of the restrictive API allowed for a GrVkSecondaryCBDrawContext, we know ahead
+ // of time that we don't be able to support certain parameter combinations. Specifically we
+ // fail on usesGLFBO0 since we can't mix GL and Vulkan. We can't have a texturable object.
+ // We can't use it as in input attachment since we don't control the render pass this will
+ // be played into and thus can't force it to have an input attachment and the correct
+ // dependencies. And finally the GrVkSecondaryCBDrawContext always assumes a top left
+ // origin.
+ if (usesGLFBO0 ||
+ vkRTSupportsInputAttachment ||
+ fCharacterization.isTextureable() ||
+ fCharacterization.origin() == kBottomLeft_GrSurfaceOrigin) {
+ return false;
+ }
+ }
+
+ GrColorType grColorType = SkColorTypeToGrColorType(fCharacterization.colorType());
+
+ // What we're doing here is we're creating a lazy proxy to back the SkSurface. The lazy
+ // proxy, when instantiated, will use the GrRenderTarget that backs the SkSurface that the
+ // DDL is being replayed into.
+
+ GrInternalSurfaceFlags surfaceFlags = GrInternalSurfaceFlags::kNone;
+ if (usesGLFBO0) {
+ surfaceFlags |= GrInternalSurfaceFlags::kGLRTFBOIDIs0;
+ } else if (fCharacterization.sampleCount() > 1 && !caps->msaaResolvesAutomatically() &&
+ fCharacterization.isTextureable()) {
+ surfaceFlags |= GrInternalSurfaceFlags::kRequiresManualMSAAResolve;
+ }
+
+ if (vkRTSupportsInputAttachment) {
+ surfaceFlags |= GrInternalSurfaceFlags::kVkRTSupportsInputAttachment;
+ }
+
+ // FIXME: Why do we use GrMipmapped::kNo instead of SkSurfaceCharacterization::fIsMipMapped?
+ static constexpr GrProxyProvider::TextureInfo kTextureInfo{GrMipmapped::kNo,
+ GrTextureType::k2D};
+ const GrProxyProvider::TextureInfo* optionalTextureInfo = nullptr;
+ if (fCharacterization.isTextureable()) {
+ optionalTextureInfo = &kTextureInfo;
+ }
+
+ fTargetProxy = proxyProvider->createLazyRenderTargetProxy(
+ [lazyProxyData = fLazyProxyData](GrResourceProvider* resourceProvider,
+ const GrSurfaceProxy::LazySurfaceDesc&) {
+ // The proxy backing the destination surface had better have been instantiated
+ // prior to this one (i.e., the proxy backing the DDL's surface).
+ // Fulfill this lazy proxy with the destination surface's GrRenderTarget.
+ SkASSERT(lazyProxyData->fReplayDest->peekSurface());
+ auto surface = sk_ref_sp<GrSurface>(lazyProxyData->fReplayDest->peekSurface());
+ return GrSurfaceProxy::LazyCallbackResult(std::move(surface));
+ },
+ fCharacterization.backendFormat(),
+ fCharacterization.dimensions(),
+ fCharacterization.sampleCount(),
+ surfaceFlags,
+ optionalTextureInfo,
+ GrMipmapStatus::kNotAllocated,
+ SkBackingFit::kExact,
+ skgpu::Budgeted::kYes,
+ fCharacterization.isProtected(),
+ fCharacterization.vulkanSecondaryCBCompatible(),
+ GrSurfaceProxy::UseAllocator::kYes);
+
+ if (!fTargetProxy) {
+ return false;
+ }
+ fTargetProxy->priv().setIsDDLTarget();
+
+ auto device = fContext->priv().createDevice(grColorType,
+ fTargetProxy,
+ fCharacterization.refColorSpace(),
+ fCharacterization.origin(),
+ fCharacterization.surfaceProps(),
+ skgpu::ganesh::Device::InitContents::kUninit);
+ if (!device) {
+ return false;
+ }
+
+ fSurface = sk_make_sp<SkSurface_Gpu>(std::move(device));
+ return SkToBool(fSurface.get());
+}
+
+SkCanvas* SkDeferredDisplayListRecorder::getCanvas() {
+ if (!fContext) {
+ return nullptr;
+ }
+
+ if (!fSurface && !this->init()) {
+ return nullptr;
+ }
+
+ return fSurface->getCanvas();
+}
+
+sk_sp<SkDeferredDisplayList> SkDeferredDisplayListRecorder::detach() {
+ if (!fContext || !fTargetProxy) {
+ return nullptr;
+ }
+
+ if (fSurface) {
+ SkCanvas* canvas = fSurface->getCanvas();
+
+ canvas->restoreToCount(0);
+ }
+
+ auto ddl = sk_sp<SkDeferredDisplayList>(new SkDeferredDisplayList(fCharacterization,
+ std::move(fTargetProxy),
+ std::move(fLazyProxyData)));
+
+ fContext->priv().moveRenderTasksToDDL(ddl.get());
+
+ // We want a new lazy proxy target for each recorded DDL so force the (lazy proxy-backed)
+ // SkSurface to be regenerated for each DDL.
+ fSurface = nullptr;
+ return ddl;
+}
+
+#ifndef SK_MAKE_PROMISE_TEXTURE_DISABLE_LEGACY_API
+sk_sp<SkImage> SkDeferredDisplayListRecorder::makePromiseTexture(
+ const GrBackendFormat& backendFormat,
+ int width,
+ int height,
+ GrMipmapped mipmapped,
+ GrSurfaceOrigin origin,
+ SkColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ PromiseImageTextureFulfillProc textureFulfillProc,
+ PromiseImageTextureReleaseProc textureReleaseProc,
+ PromiseImageTextureContext textureContext) {
+ if (!fContext) {
+ return nullptr;
+ }
+ return SkImage::MakePromiseTexture(fContext->threadSafeProxy(),
+ backendFormat,
+ {width, height},
+ mipmapped,
+ origin,
+ colorType,
+ alphaType,
+ std::move(colorSpace),
+ textureFulfillProc,
+ textureReleaseProc,
+ textureContext);
+}
+
+sk_sp<SkImage> SkDeferredDisplayListRecorder::makeYUVAPromiseTexture(
+ const GrYUVABackendTextureInfo& backendTextureInfo,
+ sk_sp<SkColorSpace> imageColorSpace,
+ PromiseImageTextureFulfillProc textureFulfillProc,
+ PromiseImageTextureReleaseProc textureReleaseProc,
+ PromiseImageTextureContext textureContexts[]) {
+ if (!fContext) {
+ return nullptr;
+ }
+ return SkImage::MakePromiseYUVATexture(fContext->threadSafeProxy(),
+ backendTextureInfo,
+ std::move(imageColorSpace),
+ textureFulfillProc,
+ textureReleaseProc,
+ textureContexts);
+}
+#endif // !SK_MAKE_PROMISE_TEXTURE_DISABLE_LEGACY_API
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDescriptor.cpp b/gfx/skia/skia/src/core/SkDescriptor.cpp
new file mode 100644
index 0000000000..827a635241
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDescriptor.cpp
@@ -0,0 +1,231 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkDescriptor.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTo.h"
+#include "include/private/chromium/SkChromeRemoteGlyphCache.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <string.h>
+#include <new>
+
+std::unique_ptr<SkDescriptor> SkDescriptor::Alloc(size_t length) {
+ SkASSERT(length >= sizeof(SkDescriptor) && SkAlign4(length) == length);
+ void* allocation = ::operator new(length);
+ return std::unique_ptr<SkDescriptor>(new (allocation) SkDescriptor{});
+}
+
+void SkDescriptor::operator delete(void* p) { ::operator delete(p); }
+void* SkDescriptor::operator new(size_t) {
+ SK_ABORT("Descriptors are created with placement new.");
+}
+
+void SkDescriptor::flatten(SkWriteBuffer& buffer) const {
+ buffer.writePad32(static_cast<const void*>(this), this->fLength);
+}
+
+void* SkDescriptor::addEntry(uint32_t tag, size_t length, const void* data) {
+ SkASSERT(tag);
+ SkASSERT(SkAlign4(length) == length);
+ SkASSERT(this->findEntry(tag, nullptr) == nullptr);
+
+ Entry* entry = (Entry*)((char*)this + fLength);
+ entry->fTag = tag;
+ entry->fLen = SkToU32(length);
+ if (data) {
+ memcpy(entry + 1, data, length);
+ }
+
+ fCount += 1;
+ fLength = SkToU32(fLength + sizeof(Entry) + length);
+ return (entry + 1); // return its data
+}
+
+void SkDescriptor::computeChecksum() {
+ fChecksum = SkDescriptor::ComputeChecksum(this);
+}
+
+const void* SkDescriptor::findEntry(uint32_t tag, uint32_t* length) const {
+ const Entry* entry = (const Entry*)(this + 1);
+ int count = fCount;
+
+ while (--count >= 0) {
+ if (entry->fTag == tag) {
+ if (length) {
+ *length = entry->fLen;
+ }
+ return entry + 1;
+ }
+ entry = (const Entry*)((const char*)(entry + 1) + entry->fLen);
+ }
+ return nullptr;
+}
+
+std::unique_ptr<SkDescriptor> SkDescriptor::copy() const {
+ std::unique_ptr<SkDescriptor> desc = SkDescriptor::Alloc(fLength);
+ memcpy(desc.get(), this, fLength);
+ return desc;
+}
+
+bool SkDescriptor::operator==(const SkDescriptor& other) const {
+ // the first value we should look at is the checksum, so this loop
+ // should terminate early if they descriptors are different.
+ // NOTE: if we wrote a sentinel value at the end of each, we could
+ // remove the aa < stop test in the loop...
+ const uint32_t* aa = (const uint32_t*)this;
+ const uint32_t* bb = (const uint32_t*)&other;
+ const uint32_t* stop = (const uint32_t*)((const char*)aa + fLength);
+ do {
+ if (*aa++ != *bb++)
+ return false;
+ } while (aa < stop);
+ return true;
+}
+
+SkString SkDescriptor::dumpRec() const {
+ const SkScalerContextRec* rec = static_cast<const SkScalerContextRec*>(
+ this->findEntry(kRec_SkDescriptorTag, nullptr));
+
+ SkString result;
+ result.appendf(" Checksum: %x\n", fChecksum);
+ if (rec != nullptr) {
+ result.append(rec->dump());
+ }
+ return result;
+}
+
+uint32_t SkDescriptor::ComputeChecksum(const SkDescriptor* desc) {
+ const uint32_t* ptr = (const uint32_t*)desc + 1; // skip the checksum field
+ size_t len = desc->fLength - sizeof(uint32_t);
+ return SkOpts::hash(ptr, len);
+}
+
+bool SkDescriptor::isValid() const {
+ uint32_t count = fCount;
+ size_t lengthRemaining = this->fLength;
+ if (lengthRemaining < sizeof(SkDescriptor)) {
+ return false;
+ }
+ lengthRemaining -= sizeof(SkDescriptor);
+ size_t offset = sizeof(SkDescriptor);
+
+ while (lengthRemaining > 0 && count > 0) {
+ if (lengthRemaining < sizeof(Entry)) {
+ return false;
+ }
+ lengthRemaining -= sizeof(Entry);
+
+ const Entry* entry = (const Entry*)(reinterpret_cast<const char*>(this) + offset);
+
+ if (lengthRemaining < entry->fLen) {
+ return false;
+ }
+ lengthRemaining -= entry->fLen;
+
+ // rec tags are always a known size.
+ if (entry->fTag == kRec_SkDescriptorTag && entry->fLen != sizeof(SkScalerContextRec)) {
+ return false;
+ }
+
+ offset += sizeof(Entry) + entry->fLen;
+ count--;
+ }
+ return lengthRemaining == 0 && count == 0;
+}
+
+SkAutoDescriptor::SkAutoDescriptor() = default;
+SkAutoDescriptor::SkAutoDescriptor(size_t size) { this->reset(size); }
+SkAutoDescriptor::SkAutoDescriptor(const SkDescriptor& desc) { this->reset(desc); }
+SkAutoDescriptor::SkAutoDescriptor(const SkAutoDescriptor& that) {
+ this->reset(*that.getDesc());
+}
+SkAutoDescriptor& SkAutoDescriptor::operator=(const SkAutoDescriptor& that) {
+ this->reset(*that.getDesc());
+ return *this;
+}
+SkAutoDescriptor::SkAutoDescriptor(SkAutoDescriptor&& that) {
+ if (that.fDesc == (SkDescriptor*)&that.fStorage) {
+ this->reset(*that.getDesc());
+ } else {
+ fDesc = that.fDesc;
+ that.fDesc = nullptr;
+ }
+}
+SkAutoDescriptor& SkAutoDescriptor::operator=(SkAutoDescriptor&& that) {
+ if (that.fDesc == (SkDescriptor*)&that.fStorage) {
+ this->reset(*that.getDesc());
+ } else {
+ this->free();
+ fDesc = that.fDesc;
+ that.fDesc = nullptr;
+ }
+ return *this;
+}
+
+SkAutoDescriptor::~SkAutoDescriptor() { this->free(); }
+
+std::optional<SkAutoDescriptor> SkAutoDescriptor::MakeFromBuffer(SkReadBuffer& buffer) {
+ SkDescriptor descriptorHeader;
+ if (!buffer.readPad32(&descriptorHeader, sizeof(SkDescriptor))) { return {}; }
+
+ // Basic bounds check on header length to make sure that bodyLength calculation does not
+ // underflow.
+ if (descriptorHeader.getLength() < sizeof(SkDescriptor)) { return {}; }
+ uint32_t bodyLength = descriptorHeader.getLength() - sizeof(SkDescriptor);
+
+ // Make sure the fLength makes sense with respect to the incoming data.
+ if (bodyLength > buffer.available()) {
+ return {};
+ }
+
+ SkAutoDescriptor ad{descriptorHeader.getLength()};
+ memcpy(ad.fDesc, &descriptorHeader, sizeof(SkDescriptor));
+ if (!buffer.readPad32(SkTAddOffset<void>(ad.fDesc, sizeof(SkDescriptor)), bodyLength)) {
+ return {};
+ }
+
+// If the fuzzer produces data but the checksum does not match, let it continue. This will boost
+// fuzzing speed. We leave the actual checksum computation in for fuzzing builds to make sure
+// the ComputeChecksum function is covered.
+#if defined(SK_BUILD_FOR_FUZZER)
+ SkDescriptor::ComputeChecksum(ad.getDesc());
+#else
+ if (SkDescriptor::ComputeChecksum(ad.getDesc()) != ad.getDesc()->fChecksum) { return {}; }
+#endif
+ if (!ad.getDesc()->isValid()) { return {}; }
+
+ return {ad};
+}
+
+void SkAutoDescriptor::reset(size_t size) {
+ this->free();
+ if (size <= sizeof(fStorage)) {
+ fDesc = new (&fStorage) SkDescriptor{};
+ } else {
+ fDesc = SkDescriptor::Alloc(size).release();
+ }
+}
+
+void SkAutoDescriptor::reset(const SkDescriptor& desc) {
+ size_t size = desc.getLength();
+ this->reset(size);
+ memcpy(fDesc, &desc, size);
+}
+
+void SkAutoDescriptor::free() {
+ if (fDesc == (SkDescriptor*)&fStorage) {
+ fDesc->~SkDescriptor();
+ } else {
+ delete fDesc;
+ }
+}
+
+
diff --git a/gfx/skia/skia/src/core/SkDescriptor.h b/gfx/skia/skia/src/core/SkDescriptor.h
new file mode 100644
index 0000000000..101ab160ce
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDescriptor.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDescriptor_DEFINED
+#define SkDescriptor_DEFINED
+
+#include <memory>
+#include <new>
+
+#include "include/private/base/SkMacros.h"
+#include "include/private/base/SkNoncopyable.h"
+#include "src/base/SkBuffer.h"
+#include "src/core/SkFontPriv.h"
+#include "src/core/SkScalerContext.h"
+
+class SkDescriptor : SkNoncopyable {
+public:
+ static size_t ComputeOverhead(int entryCount) {
+ SkASSERT(entryCount >= 0);
+ return sizeof(SkDescriptor) + entryCount * sizeof(Entry);
+ }
+
+ static std::unique_ptr<SkDescriptor> Alloc(size_t length);
+
+ //
+ // Ensure the unsized delete is called.
+ void operator delete(void* p);
+ void* operator new(size_t);
+ void* operator new(size_t, void* p) { return p; }
+
+ void flatten(SkWriteBuffer& buffer) const;
+
+ uint32_t getLength() const { return fLength; }
+ void* addEntry(uint32_t tag, size_t length, const void* data = nullptr);
+ void computeChecksum();
+
+ // Assumes that getLength <= capacity of this SkDescriptor.
+ bool isValid() const;
+
+#ifdef SK_DEBUG
+ void assertChecksum() const {
+ SkASSERT(SkDescriptor::ComputeChecksum(this) == fChecksum);
+ }
+#endif
+
+ const void* findEntry(uint32_t tag, uint32_t* length) const;
+
+ std::unique_ptr<SkDescriptor> copy() const;
+
+ // This assumes that all memory added has a length that is a multiple of 4. This is checked
+ // by the assert in addEntry.
+ bool operator==(const SkDescriptor& other) const;
+ bool operator!=(const SkDescriptor& other) const { return !(*this == other); }
+
+ uint32_t getChecksum() const { return fChecksum; }
+
+ struct Entry {
+ uint32_t fTag;
+ uint32_t fLen;
+ };
+
+ uint32_t getCount() const { return fCount; }
+
+ SkString dumpRec() const;
+
+private:
+ SkDescriptor() = default;
+ friend class SkDescriptorTestHelper;
+ friend class SkAutoDescriptor;
+
+ static uint32_t ComputeChecksum(const SkDescriptor* desc);
+
+ uint32_t fChecksum{0}; // must be first
+ uint32_t fLength{sizeof(SkDescriptor)}; // must be second
+ uint32_t fCount{0};
+};
+
+class SkAutoDescriptor {
+public:
+ SkAutoDescriptor();
+ explicit SkAutoDescriptor(size_t size);
+ explicit SkAutoDescriptor(const SkDescriptor&);
+ SkAutoDescriptor(const SkAutoDescriptor&);
+ SkAutoDescriptor& operator=(const SkAutoDescriptor&);
+ SkAutoDescriptor(SkAutoDescriptor&&);
+ SkAutoDescriptor& operator=(SkAutoDescriptor&&);
+ ~SkAutoDescriptor();
+
+ // Returns no value if there is an error.
+ static std::optional<SkAutoDescriptor> MakeFromBuffer(SkReadBuffer& buffer);
+
+ void reset(size_t size);
+ void reset(const SkDescriptor& desc);
+ SkDescriptor* getDesc() const { SkASSERT(fDesc); return fDesc; }
+
+private:
+ void free();
+ static constexpr size_t kStorageSize
+ = sizeof(SkDescriptor)
+ + sizeof(SkDescriptor::Entry) + sizeof(SkScalerContextRec) // for rec
+ + sizeof(SkDescriptor::Entry) + sizeof(void*) // for typeface
+ + 32; // slop for occasional small extras
+
+ SkDescriptor* fDesc{nullptr};
+ alignas(uint32_t) char fStorage[kStorageSize];
+};
+
+#endif //SkDescriptor_DEFINED
diff --git a/gfx/skia/skia/src/core/SkDevice.cpp b/gfx/skia/skia/src/core/SkDevice.cpp
new file mode 100644
index 0000000000..cfebd98299
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDevice.cpp
@@ -0,0 +1,637 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkDevice.h"
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkDrawable.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkPathMeasure.h"
+#include "include/core/SkRSXform.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkVertices.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkTLazy.h"
+#include "src/core/SkEnumerate.h"
+#include "src/core/SkImageFilterCache.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkImagePriv.h"
+#include "src/core/SkLatticeIter.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkRectPriv.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkTextBlobPriv.h"
+#include "src/image/SkImage_Base.h"
+#include "src/shaders/SkLocalMatrixShader.h"
+#include "src/text/GlyphRun.h"
+#include "src/utils/SkPatchUtils.h"
+#if defined(SK_GANESH)
+#include "include/private/chromium/Slug.h"
+#endif
+
+SkBaseDevice::SkBaseDevice(const SkImageInfo& info, const SkSurfaceProps& surfaceProps)
+ : SkMatrixProvider(/* localToDevice = */ SkMatrix::I())
+ , fInfo(info)
+ , fSurfaceProps(surfaceProps) {
+ fDeviceToGlobal.setIdentity();
+ fGlobalToDevice.setIdentity();
+}
+
+void SkBaseDevice::setDeviceCoordinateSystem(const SkM44& deviceToGlobal,
+ const SkM44& globalToDevice,
+ const SkM44& localToDevice,
+ int bufferOriginX,
+ int bufferOriginY) {
+ fDeviceToGlobal = deviceToGlobal;
+ fDeviceToGlobal.normalizePerspective();
+ fGlobalToDevice = globalToDevice;
+ fGlobalToDevice.normalizePerspective();
+
+ fLocalToDevice = localToDevice;
+ fLocalToDevice.normalizePerspective();
+ if (bufferOriginX | bufferOriginY) {
+ fDeviceToGlobal.preTranslate(bufferOriginX, bufferOriginY);
+ fGlobalToDevice.postTranslate(-bufferOriginX, -bufferOriginY);
+ fLocalToDevice.postTranslate(-bufferOriginX, -bufferOriginY);
+ }
+ fLocalToDevice33 = fLocalToDevice.asM33();
+ fLocalToDeviceDirty = true;
+}
+
+void SkBaseDevice::setGlobalCTM(const SkM44& ctm) {
+ fLocalToDevice = ctm;
+ fLocalToDevice.normalizePerspective();
+ // Map from the global CTM state to this device's coordinate system.
+ fLocalToDevice.postConcat(fGlobalToDevice);
+ fLocalToDevice33 = fLocalToDevice.asM33();
+ fLocalToDeviceDirty = true;
+}
+
+bool SkBaseDevice::isPixelAlignedToGlobal() const {
+ // pixelAligned is set to the identity + integer translation of the device-to-global matrix.
+ // If they are equal then the device is by definition pixel aligned.
+ SkM44 pixelAligned = SkM44();
+ pixelAligned.setRC(0, 3, SkScalarFloorToScalar(fDeviceToGlobal.rc(0, 3)));
+ pixelAligned.setRC(1, 3, SkScalarFloorToScalar(fDeviceToGlobal.rc(1, 3)));
+ return pixelAligned == fDeviceToGlobal;
+}
+
+SkIPoint SkBaseDevice::getOrigin() const {
+ // getOrigin() is deprecated, the old origin has been moved into the fDeviceToGlobal matrix.
+ // This extracts the origin from the matrix, but asserts that a more complicated coordinate
+ // space hasn't been set of the device. This function can be removed once existing use cases
+ // have been updated to use the device-to-global matrix instead or have themselves been removed
+ // (e.g. Android's device-space clip regions are going away, and are not compatible with the
+ // generalized device coordinate system).
+ SkASSERT(this->isPixelAlignedToGlobal());
+ return SkIPoint::Make(SkScalarFloorToInt(fDeviceToGlobal.rc(0, 3)),
+ SkScalarFloorToInt(fDeviceToGlobal.rc(1, 3)));
+}
+
+SkMatrix SkBaseDevice::getRelativeTransform(const SkBaseDevice& dstDevice) const {
+ // To get the transform from this space to the other device's, transform from our space to
+ // global and then from global to the other device.
+ return (dstDevice.fGlobalToDevice * fDeviceToGlobal).asM33();
+}
+
+static inline bool is_int(float x) {
+ return x == (float) sk_float_round2int(x);
+}
+
+void SkBaseDevice::drawRegion(const SkRegion& region, const SkPaint& paint) {
+ const SkMatrix& localToDevice = this->localToDevice();
+ bool isNonTranslate = localToDevice.getType() & ~(SkMatrix::kTranslate_Mask);
+ bool complexPaint = paint.getStyle() != SkPaint::kFill_Style || paint.getMaskFilter() ||
+ paint.getPathEffect();
+ bool antiAlias = paint.isAntiAlias() && (!is_int(localToDevice.getTranslateX()) ||
+ !is_int(localToDevice.getTranslateY()));
+ if (isNonTranslate || complexPaint || antiAlias) {
+ SkPath path;
+ region.getBoundaryPath(&path);
+ path.setIsVolatile(true);
+ return this->drawPath(path, paint, true);
+ }
+
+ SkRegion::Iterator it(region);
+ while (!it.done()) {
+ this->drawRect(SkRect::Make(it.rect()), paint);
+ it.next();
+ }
+}
+
+void SkBaseDevice::drawArc(const SkRect& oval, SkScalar startAngle,
+ SkScalar sweepAngle, bool useCenter, const SkPaint& paint) {
+ SkPath path;
+ bool isFillNoPathEffect = SkPaint::kFill_Style == paint.getStyle() && !paint.getPathEffect();
+ SkPathPriv::CreateDrawArcPath(&path, oval, startAngle, sweepAngle, useCenter,
+ isFillNoPathEffect);
+ this->drawPath(path, paint);
+}
+
+void SkBaseDevice::drawDRRect(const SkRRect& outer,
+ const SkRRect& inner, const SkPaint& paint) {
+ SkPath path;
+ path.addRRect(outer);
+ path.addRRect(inner);
+ path.setFillType(SkPathFillType::kEvenOdd);
+ path.setIsVolatile(true);
+
+ this->drawPath(path, paint, true);
+}
+
+void SkBaseDevice::drawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], sk_sp<SkBlender> blender,
+ const SkPaint& paint) {
+ SkISize lod = SkPatchUtils::GetLevelOfDetail(cubics, &this->localToDevice());
+ auto vertices = SkPatchUtils::MakeVertices(cubics, colors, texCoords, lod.width(), lod.height(),
+ this->imageInfo().colorSpace());
+ if (vertices) {
+ this->drawVertices(vertices.get(), std::move(blender), paint);
+ }
+}
+
+void SkBaseDevice::drawImageLattice(const SkImage* image, const SkCanvas::Lattice& lattice,
+ const SkRect& dst, SkFilterMode filter, const SkPaint& paint) {
+ SkLatticeIter iter(lattice, dst);
+
+ SkRect srcR, dstR;
+ SkColor c;
+ bool isFixedColor = false;
+ const SkImageInfo info = SkImageInfo::Make(1, 1, kBGRA_8888_SkColorType, kUnpremul_SkAlphaType);
+
+ while (iter.next(&srcR, &dstR, &isFixedColor, &c)) {
+ // TODO: support this fast-path for GPU images
+ if (isFixedColor || (srcR.width() <= 1.0f && srcR.height() <= 1.0f &&
+ image->readPixels(nullptr, info, &c, 4, srcR.fLeft, srcR.fTop))) {
+ // Fast draw with drawRect, if this is a patch containing a single color
+ // or if this is a patch containing a single pixel.
+ if (0 != c || !paint.isSrcOver()) {
+ SkPaint paintCopy(paint);
+ int alpha = SkAlphaMul(SkColorGetA(c), SkAlpha255To256(paint.getAlpha()));
+ paintCopy.setColor(SkColorSetA(c, alpha));
+ this->drawRect(dstR, paintCopy);
+ }
+ } else {
+ this->drawImageRect(image, &srcR, dstR, SkSamplingOptions(filter), paint,
+ SkCanvas::kStrict_SrcRectConstraint);
+ }
+ }
+}
+
+static SkPoint* quad_to_tris(SkPoint tris[6], const SkPoint quad[4]) {
+ tris[0] = quad[0];
+ tris[1] = quad[1];
+ tris[2] = quad[2];
+
+ tris[3] = quad[0];
+ tris[4] = quad[2];
+ tris[5] = quad[3];
+
+ return tris + 6;
+}
+
+void SkBaseDevice::drawAtlas(const SkRSXform xform[],
+ const SkRect tex[],
+ const SkColor colors[],
+ int quadCount,
+ sk_sp<SkBlender> blender,
+ const SkPaint& paint) {
+ const int triCount = quadCount << 1;
+ const int vertexCount = triCount * 3;
+ uint32_t flags = SkVertices::kHasTexCoords_BuilderFlag;
+ if (colors) {
+ flags |= SkVertices::kHasColors_BuilderFlag;
+ }
+ SkVertices::Builder builder(SkVertices::kTriangles_VertexMode, vertexCount, 0, flags);
+
+ SkPoint* vPos = builder.positions();
+ SkPoint* vTex = builder.texCoords();
+ SkColor* vCol = builder.colors();
+ for (int i = 0; i < quadCount; ++i) {
+ SkPoint tmp[4];
+ xform[i].toQuad(tex[i].width(), tex[i].height(), tmp);
+ vPos = quad_to_tris(vPos, tmp);
+
+ tex[i].toQuad(tmp);
+ vTex = quad_to_tris(vTex, tmp);
+
+ if (colors) {
+ SkOpts::memset32(vCol, colors[i], 6);
+ vCol += 6;
+ }
+ }
+ this->drawVertices(builder.detach().get(), std::move(blender), paint);
+}
+
+void SkBaseDevice::drawEdgeAAQuad(const SkRect& r, const SkPoint clip[4], SkCanvas::QuadAAFlags aa,
+ const SkColor4f& color, SkBlendMode mode) {
+ SkPaint paint;
+ paint.setColor4f(color);
+ paint.setBlendMode(mode);
+ paint.setAntiAlias(aa == SkCanvas::kAll_QuadAAFlags);
+
+ if (clip) {
+ // Draw the clip directly as a quad since it's a filled color with no local coords
+ SkPath clipPath;
+ clipPath.addPoly(clip, 4, true);
+ this->drawPath(clipPath, paint);
+ } else {
+ this->drawRect(r, paint);
+ }
+}
+
+void SkBaseDevice::drawEdgeAAImageSet(const SkCanvas::ImageSetEntry images[], int count,
+ const SkPoint dstClips[], const SkMatrix preViewMatrices[],
+ const SkSamplingOptions& sampling, const SkPaint& paint,
+ SkCanvas::SrcRectConstraint constraint) {
+ SkASSERT(paint.getStyle() == SkPaint::kFill_Style);
+ SkASSERT(!paint.getPathEffect());
+
+ SkPaint entryPaint = paint;
+ const SkM44 baseLocalToDevice = this->localToDevice44();
+ int clipIndex = 0;
+ for (int i = 0; i < count; ++i) {
+ // TODO: Handle per-edge AA. Right now this mirrors the SkiaRenderer component of Chrome
+ // which turns off antialiasing unless all four edges should be antialiased. This avoids
+ // seaming in tiled composited layers.
+ entryPaint.setAntiAlias(images[i].fAAFlags == SkCanvas::kAll_QuadAAFlags);
+ entryPaint.setAlphaf(paint.getAlphaf() * images[i].fAlpha);
+
+ bool needsRestore = false;
+ SkASSERT(images[i].fMatrixIndex < 0 || preViewMatrices);
+ if (images[i].fMatrixIndex >= 0) {
+ this->save();
+ this->setLocalToDevice(baseLocalToDevice *
+ SkM44(preViewMatrices[images[i].fMatrixIndex]));
+ needsRestore = true;
+ }
+
+ SkASSERT(!images[i].fHasClip || dstClips);
+ if (images[i].fHasClip) {
+ // Since drawImageRect requires a srcRect, the dst clip is implemented as a true clip
+ if (!needsRestore) {
+ this->save();
+ needsRestore = true;
+ }
+ SkPath clipPath;
+ clipPath.addPoly(dstClips + clipIndex, 4, true);
+ this->clipPath(clipPath, SkClipOp::kIntersect, entryPaint.isAntiAlias());
+ clipIndex += 4;
+ }
+ this->drawImageRect(images[i].fImage.get(), &images[i].fSrcRect, images[i].fDstRect,
+ sampling, entryPaint, constraint);
+ if (needsRestore) {
+ this->restoreLocal(baseLocalToDevice);
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkBaseDevice::drawDrawable(SkCanvas* canvas, SkDrawable* drawable, const SkMatrix* matrix) {
+ drawable->draw(canvas, matrix);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkBaseDevice::drawSpecial(SkSpecialImage*, const SkMatrix&, const SkSamplingOptions&,
+ const SkPaint&) {}
+sk_sp<SkSpecialImage> SkBaseDevice::makeSpecial(const SkBitmap&) { return nullptr; }
+sk_sp<SkSpecialImage> SkBaseDevice::makeSpecial(const SkImage*) { return nullptr; }
+sk_sp<SkSpecialImage> SkBaseDevice::snapSpecial(const SkIRect&, bool forceCopy) { return nullptr; }
+sk_sp<SkSpecialImage> SkBaseDevice::snapSpecialScaled(const SkIRect& subset,
+ const SkISize& dstDims) {
+ return nullptr;
+}
+sk_sp<SkSpecialImage> SkBaseDevice::snapSpecial() {
+ return this->snapSpecial(SkIRect::MakeWH(this->width(), this->height()));
+}
+
+void SkBaseDevice::drawDevice(SkBaseDevice* device, const SkSamplingOptions& sampling,
+ const SkPaint& paint) {
+ sk_sp<SkSpecialImage> deviceImage = device->snapSpecial();
+ if (deviceImage) {
+ this->drawSpecial(deviceImage.get(), device->getRelativeTransform(*this), sampling, paint);
+ }
+}
+
+void SkBaseDevice::drawFilteredImage(const skif::Mapping& mapping,
+ SkSpecialImage* src,
+ SkColorType colorType,
+ const SkImageFilter* filter,
+ const SkSamplingOptions& sampling,
+ const SkPaint& paint) {
+ SkASSERT(!paint.getImageFilter() && !paint.getMaskFilter());
+
+ skif::LayerSpace<SkIRect> targetOutput = mapping.deviceToLayer(
+ skif::DeviceSpace<SkIRect>(this->devClipBounds()));
+
+ if (colorType == kUnknown_SkColorType) {
+ colorType = kRGBA_8888_SkColorType;
+ }
+
+ // getImageFilterCache returns a bare image filter cache pointer that must be ref'ed until the
+ // filter's filterImage(ctx) function returns.
+ sk_sp<SkImageFilterCache> cache(this->getImageFilterCache());
+ skif::Context ctx(mapping, targetOutput, cache.get(), colorType, this->imageInfo().colorSpace(),
+ skif::FilterResult(sk_ref_sp(src)));
+
+ SkIPoint offset;
+ sk_sp<SkSpecialImage> result = as_IFB(filter)->filterImage(ctx).imageAndOffset(&offset);
+ if (result) {
+ SkMatrix deviceMatrixWithOffset = mapping.layerToDevice();
+ deviceMatrixWithOffset.preTranslate(offset.fX, offset.fY);
+ this->drawSpecial(result.get(), deviceMatrixWithOffset, sampling, paint);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkBaseDevice::readPixels(const SkPixmap& pm, int x, int y) {
+ return this->onReadPixels(pm, x, y);
+}
+
+bool SkBaseDevice::writePixels(const SkPixmap& pm, int x, int y) {
+ return this->onWritePixels(pm, x, y);
+}
+
+bool SkBaseDevice::onWritePixels(const SkPixmap&, int, int) {
+ return false;
+}
+
+bool SkBaseDevice::onReadPixels(const SkPixmap&, int x, int y) {
+ return false;
+}
+
+bool SkBaseDevice::accessPixels(SkPixmap* pmap) {
+ SkPixmap tempStorage;
+ if (nullptr == pmap) {
+ pmap = &tempStorage;
+ }
+ return this->onAccessPixels(pmap);
+}
+
+bool SkBaseDevice::peekPixels(SkPixmap* pmap) {
+ SkPixmap tempStorage;
+ if (nullptr == pmap) {
+ pmap = &tempStorage;
+ }
+ return this->onPeekPixels(pmap);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+
+#include "src/base/SkUtils.h"
+
+static sk_sp<SkShader> make_post_inverse_lm(const SkShader* shader, const SkMatrix& lm) {
+ SkMatrix inverse_lm;
+ if (!shader || !lm.invert(&inverse_lm)) {
+ return nullptr;
+ }
+
+#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) // b/256873449
+ // Legacy impl for old concat order. This does not work for arbitrary shader DAGs (when there is
+ // no single leaf local matrix).
+
+ // LMs pre-compose. In order to push a post local matrix, we peel off any existing local matrix
+ // and set a new local matrix of inverse_lm * prev_local_matrix.
+ SkMatrix prev_local_matrix;
+ const auto nested_shader = as_SB(shader)->makeAsALocalMatrixShader(&prev_local_matrix);
+ if (nested_shader) {
+ // unfurl the shader
+ shader = nested_shader.get();
+ }
+
+ return shader->makeWithLocalMatrix(inverse_lm * prev_local_matrix);
+#endif
+
+ return shader->makeWithLocalMatrix(inverse_lm);
+}
+
+void SkBaseDevice::drawGlyphRunList(SkCanvas* canvas,
+ const sktext::GlyphRunList& glyphRunList,
+ const SkPaint& initialPaint,
+ const SkPaint& drawingPaint) {
+ if (!this->localToDevice().isFinite()) {
+ return;
+ }
+
+ if (!glyphRunList.hasRSXForm()) {
+ this->onDrawGlyphRunList(canvas, glyphRunList, initialPaint, drawingPaint);
+ } else {
+ this->simplifyGlyphRunRSXFormAndRedraw(canvas, glyphRunList, initialPaint, drawingPaint);
+ }
+}
+
+void SkBaseDevice::simplifyGlyphRunRSXFormAndRedraw(SkCanvas* canvas,
+ const sktext::GlyphRunList& glyphRunList,
+ const SkPaint& initialPaint,
+ const SkPaint& drawingPaint) {
+ for (const sktext::GlyphRun& run : glyphRunList) {
+ if (run.scaledRotations().empty()) {
+ auto subList = glyphRunList.builder()->makeGlyphRunList(
+ run, drawingPaint, {0, 0});
+ this->drawGlyphRunList(canvas, subList, initialPaint, drawingPaint);
+ } else {
+ SkPoint origin = glyphRunList.origin();
+ SkPoint sharedPos{0, 0}; // we're at the origin
+ SkGlyphID sharedGlyphID;
+ sktext::GlyphRun glyphRun {
+ run.font(),
+ SkSpan<const SkPoint>{&sharedPos, 1},
+ SkSpan<const SkGlyphID>{&sharedGlyphID, 1},
+ SkSpan<const char>{},
+ SkSpan<const uint32_t>{},
+ SkSpan<const SkVector>{}
+ };
+
+ for (auto [i, glyphID, pos] : SkMakeEnumerate(run.source())) {
+ sharedGlyphID = glyphID;
+ auto [scos, ssin] = run.scaledRotations()[i];
+ SkRSXform rsxForm = SkRSXform::Make(scos, ssin, pos.x(), pos.y());
+ SkMatrix glyphToLocal;
+ glyphToLocal.setRSXform(rsxForm).postTranslate(origin.x(), origin.y());
+
+ // We want to rotate each glyph by the rsxform, but we don't want to rotate "space"
+ // (i.e. the shader that cares about the ctm) so we have to undo our little ctm
+ // trick with a localmatrixshader so that the shader draws as if there was no
+ // change to the ctm.
+ SkPaint invertingPaint{drawingPaint};
+ invertingPaint.setShader(
+ make_post_inverse_lm(drawingPaint.getShader(), glyphToLocal));
+ SkAutoCanvasRestore acr(canvas, true);
+ canvas->concat(SkM44(glyphToLocal));
+ sktext::GlyphRunList subList = glyphRunList.builder()->makeGlyphRunList(
+ glyphRun, drawingPaint, {0, 0});
+ this->drawGlyphRunList(canvas, subList, initialPaint, invertingPaint);
+ }
+ }
+ }
+}
+
+#if (defined(SK_GANESH) || defined(SK_GRAPHITE))
+sk_sp<sktext::gpu::Slug> SkBaseDevice::convertGlyphRunListToSlug(
+ const sktext::GlyphRunList& glyphRunList,
+ const SkPaint& initialPaint,
+ const SkPaint& drawingPaint) {
+ return nullptr;
+}
+
+void SkBaseDevice::drawSlug(SkCanvas*, const sktext::gpu::Slug*, const SkPaint&) {
+ SK_ABORT("Slug drawing not supported.");
+}
+#endif
+
+//////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkSurface> SkBaseDevice::makeSurface(SkImageInfo const&, SkSurfaceProps const&) {
+ return nullptr;
+}
+
+SkScalerContextFlags SkBaseDevice::scalerContextFlags() const {
+ // If we're doing linear blending, then we can disable the gamma hacks.
+ // Otherwise, leave them on. In either case, we still want the contrast boost:
+ // TODO: Can we be even smarter about mask gamma based on the dest transfer function?
+ const SkColorSpace* const cs = fInfo.colorSpace();
+ if (cs && cs->gammaIsLinear()) {
+ return SkScalerContextFlags::kBoostContrast;
+ } else {
+ return SkScalerContextFlags::kFakeGammaAndBoostContrast;
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+
+SkNoPixelsDevice::SkNoPixelsDevice(const SkIRect& bounds, const SkSurfaceProps& props)
+ : SkNoPixelsDevice(bounds, props, nullptr) {}
+
+SkNoPixelsDevice::SkNoPixelsDevice(const SkIRect& bounds, const SkSurfaceProps& props,
+ sk_sp<SkColorSpace> colorSpace)
+ : SkBaseDevice(SkImageInfo::Make(bounds.size(), kUnknown_SkColorType, kUnknown_SkAlphaType,
+ std::move(colorSpace)), props) {
+ // this fails if we enable this assert: DiscardableImageMapTest.GetDiscardableImagesInRectMaxImage
+ //SkASSERT(bounds.width() >= 0 && bounds.height() >= 0);
+
+ this->setOrigin(SkM44(), bounds.left(), bounds.top());
+ this->resetClipStack();
+}
+
+void SkNoPixelsDevice::onSave() {
+ SkASSERT(!fClipStack.empty());
+ fClipStack.back().fDeferredSaveCount++;
+}
+
+void SkNoPixelsDevice::onRestore() {
+ SkASSERT(!fClipStack.empty());
+ if (fClipStack.back().fDeferredSaveCount > 0) {
+ fClipStack.back().fDeferredSaveCount--;
+ } else {
+ fClipStack.pop_back();
+ SkASSERT(!fClipStack.empty());
+ }
+}
+
+SkNoPixelsDevice::ClipState& SkNoPixelsDevice::writableClip() {
+ SkASSERT(!fClipStack.empty());
+ ClipState& current = fClipStack.back();
+ if (current.fDeferredSaveCount > 0) {
+ current.fDeferredSaveCount--;
+ // Stash current state in case 'current' moves during a resize
+ SkIRect bounds = current.fClipBounds;
+ bool aa = current.fIsAA;
+ bool rect = current.fIsRect;
+ return fClipStack.emplace_back(bounds, aa, rect);
+ } else {
+ return current;
+ }
+}
+
+void SkNoPixelsDevice::onClipRect(const SkRect& rect, SkClipOp op, bool aa) {
+ this->writableClip().op(op, this->localToDevice44(), rect,
+ aa, /*fillsBounds=*/true);
+}
+
+void SkNoPixelsDevice::onClipRRect(const SkRRect& rrect, SkClipOp op, bool aa) {
+ this->writableClip().op(op, this->localToDevice44(), rrect.getBounds(),
+ aa, /*fillsBounds=*/rrect.isRect());
+}
+
+void SkNoPixelsDevice::onClipPath(const SkPath& path, SkClipOp op, bool aa) {
+ // Toggle op if the path is inverse filled
+ if (path.isInverseFillType()) {
+ op = (op == SkClipOp::kDifference ? SkClipOp::kIntersect : SkClipOp::kDifference);
+ }
+ this->writableClip().op(op, this->localToDevice44(), path.getBounds(),
+ aa, /*fillsBounds=*/false);
+}
+
+void SkNoPixelsDevice::onClipRegion(const SkRegion& globalRgn, SkClipOp op) {
+ this->writableClip().op(op, this->globalToDevice(), SkRect::Make(globalRgn.getBounds()),
+ /*isAA=*/false, /*fillsBounds=*/globalRgn.isRect());
+}
+
+void SkNoPixelsDevice::onClipShader(sk_sp<SkShader> shader) {
+ this->writableClip().fIsRect = false;
+}
+
+void SkNoPixelsDevice::onReplaceClip(const SkIRect& rect) {
+ SkIRect deviceRect = SkMatrixPriv::MapRect(this->globalToDevice(), SkRect::Make(rect)).round();
+ if (!deviceRect.intersect(this->bounds())) {
+ deviceRect.setEmpty();
+ }
+ auto& clip = this->writableClip();
+ clip.fClipBounds = deviceRect;
+ clip.fIsRect = true;
+ clip.fIsAA = false;
+}
+
+SkBaseDevice::ClipType SkNoPixelsDevice::onGetClipType() const {
+ const auto& clip = this->clip();
+ if (clip.fClipBounds.isEmpty()) {
+ return ClipType::kEmpty;
+ } else if (clip.fIsRect) {
+ return ClipType::kRect;
+ } else {
+ return ClipType::kComplex;
+ }
+}
+
+void SkNoPixelsDevice::ClipState::op(SkClipOp op, const SkM44& transform, const SkRect& bounds,
+ bool isAA, bool fillsBounds) {
+ const bool isRect = fillsBounds && SkMatrixPriv::IsScaleTranslateAsM33(transform);
+ fIsAA |= isAA;
+
+ SkRect devBounds = bounds.isEmpty() ? SkRect::MakeEmpty()
+ : SkMatrixPriv::MapRect(transform, bounds);
+ if (op == SkClipOp::kIntersect) {
+ if (!fClipBounds.intersect(isAA ? devBounds.roundOut() : devBounds.round())) {
+ fClipBounds.setEmpty();
+ }
+ // A rectangular clip remains rectangular if the intersection is a rect
+ fIsRect &= isRect;
+ } else if (isRect) {
+ // Conservatively, we can leave the clip bounds unchanged and respect the difference op.
+ // But, if we're subtracting out an axis-aligned rectangle that fully spans our existing
+ // clip on an axis, we can shrink the clip bounds.
+ SkASSERT(op == SkClipOp::kDifference);
+ SkIRect difference;
+ if (SkRectPriv::Subtract(fClipBounds, isAA ? devBounds.roundIn() : devBounds.round(),
+ &difference)) {
+ fClipBounds = difference;
+ } else {
+ // The difference couldn't be represented as a rect
+ fIsRect = false;
+ }
+ } else {
+ // A non-rect shape was applied
+ fIsRect = false;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkDevice.h b/gfx/skia/skia/src/core/SkDevice.h
new file mode 100644
index 0000000000..562351a9da
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDevice.h
@@ -0,0 +1,637 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDevice_DEFINED
+#define SkDevice_DEFINED
+
+#include "include/core/SkBlender.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkRegion.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkSurfaceProps.h"
+#include "include/private/base/SkNoncopyable.h"
+#include "include/private/base/SkTArray.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkMatrixProvider.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkScalerContext.h"
+#include "src/shaders/SkShaderBase.h"
+
+namespace sktext {
+class GlyphRun;
+class GlyphRunList;
+}
+class SkBitmap;
+class SkColorSpace;
+class SkMesh;
+struct SkDrawShadowRec;
+class SkImageFilter;
+class SkImageFilterCache;
+struct SkIRect;
+class SkRasterHandleAllocator;
+class SkSpecialImage;
+
+namespace skif { class Mapping; }
+namespace skgpu::ganesh {
+class Device;
+}
+namespace skgpu::graphite {
+class Device;
+}
+namespace sktext::gpu {
+class SDFTControl;
+}
+
+struct SkStrikeDeviceInfo {
+ const SkSurfaceProps fSurfaceProps;
+ const SkScalerContextFlags fScalerContextFlags;
+ // This is a pointer so this can be compiled without SK_GPU_SUPPORT.
+ const sktext::gpu::SDFTControl* const fSDFTControl;
+};
+
+class SkBaseDevice : public SkRefCnt, public SkMatrixProvider {
+public:
+ SkBaseDevice(const SkImageInfo&, const SkSurfaceProps&);
+
+ /**
+ * Return ImageInfo for this device. If the canvas is not backed by pixels
+ * (cpu or gpu), then the info's ColorType will be kUnknown_SkColorType.
+ */
+ const SkImageInfo& imageInfo() const { return fInfo; }
+
+ /**
+ * Return SurfaceProps for this device.
+ */
+ const SkSurfaceProps& surfaceProps() const {
+ return fSurfaceProps;
+ }
+
+ SkScalerContextFlags scalerContextFlags() const;
+
+ virtual SkStrikeDeviceInfo strikeDeviceInfo() const {
+ return {fSurfaceProps, this->scalerContextFlags(), nullptr};
+ }
+
+ SkIRect bounds() const { return SkIRect::MakeWH(this->width(), this->height()); }
+
+ /**
+ * Return the bounds of the device in the coordinate space of the root
+ * canvas. The root device will have its top-left at 0,0, but other devices
+ * such as those associated with saveLayer may have a non-zero origin.
+ */
+ void getGlobalBounds(SkIRect* bounds) const {
+ SkASSERT(bounds);
+ *bounds = SkMatrixPriv::MapRect(fDeviceToGlobal, SkRect::Make(this->bounds())).roundOut();
+ }
+
+ SkIRect getGlobalBounds() const {
+ SkIRect bounds;
+ this->getGlobalBounds(&bounds);
+ return bounds;
+ }
+
+ /**
+ * Returns the bounding box of the current clip, in this device's
+ * coordinate space. No pixels outside of these bounds will be touched by
+ * draws unless the clip is further modified (at which point this will
+ * return the updated bounds).
+ */
+ SkIRect devClipBounds() const { return this->onDevClipBounds(); }
+
+ int width() const {
+ return this->imageInfo().width();
+ }
+
+ int height() const {
+ return this->imageInfo().height();
+ }
+
+ bool isOpaque() const {
+ return this->imageInfo().isOpaque();
+ }
+
+ bool writePixels(const SkPixmap&, int x, int y);
+
+ /**
+ * Try to get write-access to the pixels behind the device. If successful, this returns true
+ * and fills-out the pixmap parameter. On success it also bumps the genID of the underlying
+ * bitmap.
+ *
+ * On failure, returns false and ignores the pixmap parameter.
+ */
+ bool accessPixels(SkPixmap* pmap);
+
+ /**
+ * Try to get read-only-access to the pixels behind the device. If successful, this returns
+ * true and fills-out the pixmap parameter.
+ *
+ * On failure, returns false and ignores the pixmap parameter.
+ */
+ bool peekPixels(SkPixmap*);
+
+ /**
+ * Return the device's coordinate space transform: this maps from the device's coordinate space
+ * into the global canvas' space (or root device space). This includes the translation
+ * necessary to account for the device's origin.
+ */
+ const SkM44& deviceToGlobal() const { return fDeviceToGlobal; }
+ /**
+ * Return the inverse of getDeviceToGlobal(), mapping from the global canvas' space (or root
+ * device space) into this device's coordinate space.
+ */
+ const SkM44& globalToDevice() const { return fGlobalToDevice; }
+ /**
+ * DEPRECATED: This asserts that 'getDeviceToGlobal' is a translation matrix with integer
+ * components. In the future some SkDevices will have more complex device-to-global transforms,
+ * so getDeviceToGlobal() or getRelativeTransform() should be used instead.
+ */
+ SkIPoint getOrigin() const;
+ /**
+ * Returns true when this device's pixel grid is axis aligned with the global coordinate space,
+ * and any relative translation between the two spaces is in integer pixel units.
+ */
+ bool isPixelAlignedToGlobal() const;
+ /**
+ * Get the transformation from this device's coordinate system to the provided device space.
+ * This transform can be used to draw this device into the provided device, such that once
+ * that device is drawn to the root device, the net effect will be that this device's contents
+ * have been transformed by the global CTM.
+ */
+ SkMatrix getRelativeTransform(const SkBaseDevice&) const;
+
+ virtual void* getRasterHandle() const { return nullptr; }
+
+ const SkMatrixProvider& asMatrixProvider() const { return *this; }
+
+ void save() { this->onSave(); }
+ void restore(const SkM44& ctm) {
+ this->onRestore();
+ this->setGlobalCTM(ctm);
+ }
+ void restoreLocal(const SkM44& localToDevice) {
+ this->onRestore();
+ this->setLocalToDevice(localToDevice);
+ }
+ void clipRect(const SkRect& rect, SkClipOp op, bool aa) {
+ this->onClipRect(rect, op, aa);
+ }
+ void clipRRect(const SkRRect& rrect, SkClipOp op, bool aa) {
+ this->onClipRRect(rrect, op, aa);
+ }
+ void clipPath(const SkPath& path, SkClipOp op, bool aa) {
+ this->onClipPath(path, op, aa);
+ }
+ void clipShader(sk_sp<SkShader> sh, SkClipOp op) {
+ sh = as_SB(sh)->makeWithCTM(this->localToDevice());
+ if (op == SkClipOp::kDifference) {
+ sh = as_SB(sh)->makeInvertAlpha();
+ }
+ this->onClipShader(std::move(sh));
+ }
+ void clipRegion(const SkRegion& region, SkClipOp op) {
+ this->onClipRegion(region, op);
+ }
+ void replaceClip(const SkIRect& rect) {
+ this->onReplaceClip(rect);
+ }
+
+ bool clipIsWideOpen() const {
+ return this->onClipIsWideOpen();
+ }
+
+ void setLocalToDevice(const SkM44& localToDevice) {
+ fLocalToDevice = localToDevice;
+ fLocalToDevice33 = fLocalToDevice.asM33();
+ fLocalToDeviceDirty = true;
+ }
+ void setGlobalCTM(const SkM44& ctm);
+ virtual void validateDevBounds(const SkIRect&) {}
+
+ virtual bool android_utils_clipWithStencil() { return false; }
+
+ virtual skgpu::ganesh::Device* asGaneshDevice() { return nullptr; }
+ virtual skgpu::graphite::Device* asGraphiteDevice() { return nullptr; }
+
+ // Ensure that non-RSXForm runs are passed to onDrawGlyphRunList.
+ void drawGlyphRunList(SkCanvas*,
+ const sktext::GlyphRunList& glyphRunList,
+ const SkPaint& initialPaint,
+ const SkPaint& drawingPaint);
+
+ // Snap the 'subset' contents from this device, possibly as a read-only view. If 'forceCopy'
+ // is true then the returned image's pixels must not be affected by subsequent draws into the
+ // device. When 'forceCopy' is false, the image can be a view into the device's pixels
+ // (avoiding a copy for performance, at the expense of safety). Default returns null.
+ virtual sk_sp<SkSpecialImage> snapSpecial(const SkIRect& subset, bool forceCopy = false);
+ // Can return null if unable to perform scaling as part of the copy, even if snapSpecial() w/o
+ // scaling would succeed.
+ virtual sk_sp<SkSpecialImage> snapSpecialScaled(const SkIRect& subset, const SkISize& dstDims);
+ // Get a view of the entire device's current contents as an image.
+ sk_sp<SkSpecialImage> snapSpecial();
+
+protected:
+ enum TileUsage {
+ kPossible_TileUsage, //!< the created device may be drawn tiled
+ kNever_TileUsage, //!< the created device will never be drawn tiled
+ };
+
+ struct TextFlags {
+ uint32_t fFlags; // SkPaint::getFlags()
+ };
+
+ virtual void onSave() {}
+ virtual void onRestore() {}
+ virtual void onClipRect(const SkRect& rect, SkClipOp, bool aa) {}
+ virtual void onClipRRect(const SkRRect& rrect, SkClipOp, bool aa) {}
+ virtual void onClipPath(const SkPath& path, SkClipOp, bool aa) {}
+ virtual void onClipShader(sk_sp<SkShader>) {}
+ virtual void onClipRegion(const SkRegion& deviceRgn, SkClipOp) {}
+ virtual void onReplaceClip(const SkIRect& rect) {}
+ virtual bool onClipIsAA() const = 0;
+ virtual bool onClipIsWideOpen() const = 0;
+ virtual void onAsRgnClip(SkRegion*) const = 0;
+ enum class ClipType {
+ kEmpty,
+ kRect,
+ kComplex
+ };
+ virtual ClipType onGetClipType() const = 0;
+
+ // This should strive to be as tight as possible, ideally not just mapping
+ // the global clip bounds by fToGlobal^-1.
+ virtual SkIRect onDevClipBounds() const = 0;
+
+ /** These are called inside the per-device-layer loop for each draw call.
+ When these are called, we have already applied any saveLayer operations,
+ and are handling any looping from the paint.
+ */
+ virtual void drawPaint(const SkPaint& paint) = 0;
+ virtual void drawPoints(SkCanvas::PointMode mode, size_t count,
+ const SkPoint[], const SkPaint& paint) = 0;
+ virtual void drawRect(const SkRect& r,
+ const SkPaint& paint) = 0;
+ virtual void drawRegion(const SkRegion& r,
+ const SkPaint& paint);
+ virtual void drawOval(const SkRect& oval,
+ const SkPaint& paint) = 0;
+ /** By the time this is called we know that abs(sweepAngle) is in the range [0, 360). */
+ virtual void drawArc(const SkRect& oval, SkScalar startAngle,
+ SkScalar sweepAngle, bool useCenter, const SkPaint& paint);
+ virtual void drawRRect(const SkRRect& rr,
+ const SkPaint& paint) = 0;
+
+ // Default impl calls drawPath()
+ virtual void drawDRRect(const SkRRect& outer,
+ const SkRRect& inner, const SkPaint&);
+
+ /**
+ * If pathIsMutable, then the implementation is allowed to cast path to a
+ * non-const pointer and modify it in place (as an optimization). Canvas
+ * may do this to implement helpers such as drawOval, by placing a temp
+ * path on the stack to hold the representation of the oval.
+ */
+ virtual void drawPath(const SkPath& path,
+ const SkPaint& paint,
+ bool pathIsMutable = false) = 0;
+
+ virtual void drawImageRect(const SkImage*, const SkRect* src, const SkRect& dst,
+ const SkSamplingOptions&, const SkPaint&,
+ SkCanvas::SrcRectConstraint) = 0;
+ virtual void drawImageLattice(const SkImage*, const SkCanvas::Lattice&,
+ const SkRect& dst, SkFilterMode, const SkPaint&);
+
+ /**
+ * If skipColorXform is true, then the implementation should assume that the provided
+ * vertex colors are already in the destination color space.
+ */
+ virtual void drawVertices(const SkVertices*,
+ sk_sp<SkBlender>,
+ const SkPaint&,
+ bool skipColorXform = false) = 0;
+#ifdef SK_ENABLE_SKSL
+ virtual void drawMesh(const SkMesh& mesh, sk_sp<SkBlender>, const SkPaint&) = 0;
+#endif
+ virtual void drawShadow(const SkPath&, const SkDrawShadowRec&);
+
+ // default implementation calls drawVertices
+ virtual void drawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], sk_sp<SkBlender>, const SkPaint& paint);
+
+ // default implementation calls drawVertices
+ virtual void drawAtlas(const SkRSXform[], const SkRect[], const SkColor[], int count,
+ sk_sp<SkBlender>, const SkPaint&);
+
+ virtual void drawAnnotation(const SkRect&, const char[], SkData*) {}
+
+ // Default impl always calls drawRect() with a solid-color paint, setting it to anti-aliased
+ // only when all edge flags are set. If there's a clip region, it draws that using drawPath,
+ // or uses clipPath().
+ virtual void drawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4],
+ SkCanvas::QuadAAFlags aaFlags, const SkColor4f& color,
+ SkBlendMode mode);
+ // Default impl uses drawImageRect per entry, being anti-aliased only when an entry's edge flags
+ // are all set. If there's a clip region, it will be applied using clipPath().
+ virtual void drawEdgeAAImageSet(const SkCanvas::ImageSetEntry[], int count,
+ const SkPoint dstClips[], const SkMatrix preViewMatrices[],
+ const SkSamplingOptions&, const SkPaint&,
+ SkCanvas::SrcRectConstraint);
+
+ virtual void drawDrawable(SkCanvas*, SkDrawable*, const SkMatrix*);
+
+ // Only called with glyphRunLists that do not contain RSXForm.
+ virtual void onDrawGlyphRunList(SkCanvas*,
+ const sktext::GlyphRunList&,
+ const SkPaint& initialPaint,
+ const SkPaint& drawingPaint) = 0;
+
+ // Slug handling routines.
+#if (defined(SK_GANESH) || defined(SK_GRAPHITE))
+ virtual sk_sp<sktext::gpu::Slug> convertGlyphRunListToSlug(
+ const sktext::GlyphRunList& glyphRunList,
+ const SkPaint& initialPaint,
+ const SkPaint& drawingPaint);
+ virtual void drawSlug(SkCanvas*, const sktext::gpu::Slug* slug, const SkPaint& drawingPaint);
+#endif
+
+ /**
+ * The SkDevice passed will be an SkDevice which was returned by a call to
+ * onCreateDevice on this device with kNeverTile_TileExpectation.
+ *
+ * The default implementation calls snapSpecial() and drawSpecial() with the relative transform
+ * from the input device to this device. The provided SkPaint cannot have a mask filter or
+ * image filter, and any shader is ignored.
+ */
+ virtual void drawDevice(SkBaseDevice*, const SkSamplingOptions&, const SkPaint&);
+
+ /**
+ * Draw the special image's subset to this device, subject to the given matrix transform instead
+ * of the device's current local to device matrix.
+ */
+ virtual void drawSpecial(SkSpecialImage*, const SkMatrix& localToDevice,
+ const SkSamplingOptions&, const SkPaint&);
+
+ /**
+ * Evaluate 'filter' and draw the final output into this device using 'paint'. The 'mapping'
+ * defines the parameter-to-layer space transform used to evaluate the image filter on 'src',
+ * and the layer-to-device space transform that is used to draw the result into this device.
+ * Since 'mapping' fully specifies the transform, this draw function ignores the current
+ * local-to-device matrix (i.e. just like drawSpecial and drawDevice).
+ *
+ * The final paint must not have an image filter or mask filter set on it; a shader is ignored.
+ * The provided color type will be used for any intermediate surfaces that need to be created as
+ * part of filter evaluation. It does not have to be src's color type or this Device's type.
+ */
+ void drawFilteredImage(const skif::Mapping& mapping, SkSpecialImage* src, SkColorType ct,
+ const SkImageFilter*, const SkSamplingOptions&, const SkPaint&);
+
+ virtual sk_sp<SkSpecialImage> makeSpecial(const SkBitmap&);
+ virtual sk_sp<SkSpecialImage> makeSpecial(const SkImage*);
+
+ virtual void setImmutable() {}
+
+ bool readPixels(const SkPixmap&, int x, int y);
+
+ virtual sk_sp<SkSurface> makeSurface(const SkImageInfo&, const SkSurfaceProps&);
+ virtual bool onPeekPixels(SkPixmap*) { return false; }
+
+ /**
+ * The caller is responsible for "pre-clipping" the dst. The impl can assume that the dst
+ * image at the specified x,y offset will fit within the device's bounds.
+ *
+ * This is explicitly asserted in readPixels(), the public way to call this.
+ */
+ virtual bool onReadPixels(const SkPixmap&, int x, int y);
+
+ /**
+ * The caller is responsible for "pre-clipping" the src. The impl can assume that the src
+ * image at the specified x,y offset will fit within the device's bounds.
+ *
+ * This is explicitly asserted in writePixelsDirect(), the public way to call this.
+ */
+ virtual bool onWritePixels(const SkPixmap&, int x, int y);
+
+ virtual bool onAccessPixels(SkPixmap*) { return false; }
+
+ struct CreateInfo {
+ CreateInfo(const SkImageInfo& info,
+ SkPixelGeometry geo,
+ TileUsage tileUsage,
+ SkRasterHandleAllocator* allocator)
+ : fInfo(info)
+ , fTileUsage(tileUsage)
+ , fPixelGeometry(geo)
+ , fAllocator(allocator)
+ {}
+
+ const SkImageInfo fInfo;
+ const TileUsage fTileUsage;
+ const SkPixelGeometry fPixelGeometry;
+ SkRasterHandleAllocator* fAllocator = nullptr;
+ };
+
+ /**
+ * Create a new device based on CreateInfo. If the paint is not null, then it represents a
+ * preview of how the new device will be composed with its creator device (this).
+ *
+ * The subclass may be handed this device in drawDevice(), so it must always return
+ * a device that it knows how to draw, and that it knows how to identify if it is not of the
+ * same subclass (since drawDevice is passed a SkBaseDevice*). If the subclass cannot fulfill
+ * that contract (e.g. PDF cannot support some settings on the paint) it should return NULL,
+ * and the caller may then decide to explicitly create a bitmapdevice, knowing that later
+ * it could not call drawDevice with it (but it could call drawSprite or drawBitmap).
+ */
+ virtual SkBaseDevice* onCreateDevice(const CreateInfo&, const SkPaint*) {
+ return nullptr;
+ }
+
+ // SkCanvas uses NoPixelsDevice when onCreateDevice fails; but then it needs to be able to
+ // inspect a layer's device to know if calling drawDevice() later is allowed.
+ virtual bool isNoPixelsDevice() const { return false; }
+
+ // Returns whether or not localToDevice() has changed since the last call to this function.
+ bool checkLocalToDeviceDirty() {
+ bool wasDirty = fLocalToDeviceDirty;
+ fLocalToDeviceDirty = false;
+ return wasDirty;
+ }
+
+private:
+ friend class SkAndroidFrameworkUtils;
+ friend class SkCanvas;
+ friend class SkDraw;
+ friend class SkDrawBase;
+ friend class SkSurface_Raster;
+ friend class DeviceTestingAccess;
+
+ void simplifyGlyphRunRSXFormAndRedraw(SkCanvas*,
+ const sktext::GlyphRunList&,
+ const SkPaint& initialPaint,
+ const SkPaint& drawingPaint);
+
+ // used to change the backend's pixels (and possibly config/rowbytes)
+ // but cannot change the width/height, so there should be no change to
+ // any clip information.
+ // TODO: move to SkBitmapDevice
+ virtual void replaceBitmapBackendForRasterSurface(const SkBitmap&) {}
+
+ virtual bool forceConservativeRasterClip() const { return false; }
+
+ // Configure the device's coordinate spaces, specifying both how its device image maps back to
+ // the global space (via 'deviceToGlobal') and the initial CTM of the device (via
+ // 'localToDevice', i.e. what geometry drawn into this device will be transformed with).
+ //
+ // (bufferOriginX, bufferOriginY) defines where the (0,0) pixel the device's backing buffer
+ // is anchored in the device space. The final device-to-global matrix stored by the SkDevice
+ // will include a pre-translation by T(deviceOriginX, deviceOriginY), and the final
+ // local-to-device matrix will have a post-translation of T(-deviceOriginX, -deviceOriginY).
+ void setDeviceCoordinateSystem(const SkM44& deviceToGlobal,
+ const SkM44& globalToDevice,
+ const SkM44& localToDevice,
+ int bufferOriginX,
+ int bufferOriginY);
+ // Convenience to configure the device to be axis-aligned with the root canvas, but with a
+ // unique origin.
+ void setOrigin(const SkM44& globalCTM, int x, int y) {
+ this->setDeviceCoordinateSystem(SkM44(), SkM44(), globalCTM, x, y);
+ }
+
+ virtual SkImageFilterCache* getImageFilterCache() { return nullptr; }
+
+ friend class SkNoPixelsDevice;
+ friend class SkBitmapDevice;
+ void privateResize(int w, int h) {
+ *const_cast<SkImageInfo*>(&fInfo) = fInfo.makeWH(w, h);
+ }
+
+ const SkImageInfo fInfo;
+ const SkSurfaceProps fSurfaceProps;
+ // fDeviceToGlobal and fGlobalToDevice are inverses of each other; there are never that many
+ // SkDevices, so pay the memory cost to avoid recalculating the inverse.
+ SkM44 fDeviceToGlobal;
+ SkM44 fGlobalToDevice;
+
+ // fLocalToDevice (inherited from SkMatrixProvider) is the device CTM, not the global CTM
+ // It maps from local space to the device's coordinate space.
+ // fDeviceToGlobal * fLocalToDevice will match the canvas' CTM.
+ //
+ // setGlobalCTM and setLocalToDevice are intentionally not virtual for performance reasons.
+ // However, track a dirty bit for subclasses that want to defer local-to-device dependent
+ // calculations until needed for a clip or draw.
+ bool fLocalToDeviceDirty = true;
+
+ using INHERITED = SkRefCnt;
+};
+
+class SkNoPixelsDevice : public SkBaseDevice {
+public:
+ SkNoPixelsDevice(const SkIRect& bounds, const SkSurfaceProps& props);
+ SkNoPixelsDevice(const SkIRect& bounds, const SkSurfaceProps& props,
+ sk_sp<SkColorSpace> colorSpace);
+
+ void resetForNextPicture(const SkIRect& bounds) {
+ //SkASSERT(bounds.width() >= 0 && bounds.height() >= 0);
+ this->privateResize(bounds.width(), bounds.height());
+ this->setOrigin(SkM44(), bounds.left(), bounds.top());
+ this->resetClipStack();
+ }
+
+protected:
+ // SkNoPixelsDevice tracks the clip conservatively in order to respond to some queries as
+ // accurately as possible while emphasizing performance
+ void onSave() override;
+ void onRestore() override;
+ void onClipRect(const SkRect& rect, SkClipOp op, bool aa) override;
+ void onClipRRect(const SkRRect& rrect, SkClipOp op, bool aa) override;
+ void onClipPath(const SkPath& path, SkClipOp op, bool aa) override;
+ void onClipRegion(const SkRegion& globalRgn, SkClipOp op) override;
+ void onClipShader(sk_sp<SkShader> shader) override;
+ void onReplaceClip(const SkIRect& rect) override;
+ bool onClipIsAA() const override { return this->clip().fIsAA; }
+ bool onClipIsWideOpen() const override {
+ return this->clip().fIsRect &&
+ this->onDevClipBounds() == this->bounds();
+ }
+ void onAsRgnClip(SkRegion* rgn) const override {
+ rgn->setRect(this->onDevClipBounds());
+ }
+ ClipType onGetClipType() const override;
+ SkIRect onDevClipBounds() const override { return this->clip().fClipBounds; }
+
+ void drawPaint(const SkPaint& paint) override {}
+ void drawPoints(SkCanvas::PointMode, size_t, const SkPoint[], const SkPaint&) override {}
+ void drawImageRect(const SkImage*, const SkRect*, const SkRect&,
+ const SkSamplingOptions&, const SkPaint&,
+ SkCanvas::SrcRectConstraint) override {}
+ void drawRect(const SkRect&, const SkPaint&) override {}
+ void drawOval(const SkRect&, const SkPaint&) override {}
+ void drawRRect(const SkRRect&, const SkPaint&) override {}
+ void drawPath(const SkPath&, const SkPaint&, bool) override {}
+ void drawDevice(SkBaseDevice*, const SkSamplingOptions&, const SkPaint&) override {}
+ void drawVertices(const SkVertices*, sk_sp<SkBlender>, const SkPaint&, bool) override {}
+#ifdef SK_ENABLE_SKSL
+ void drawMesh(const SkMesh&, sk_sp<SkBlender>, const SkPaint&) override {}
+#endif
+
+#if defined(SK_GANESH)
+ void drawSlug(SkCanvas*, const sktext::gpu::Slug*, const SkPaint&) override {}
+#endif
+
+ void onDrawGlyphRunList(
+ SkCanvas*, const sktext::GlyphRunList&, const SkPaint&, const SkPaint&) override {}
+
+ bool isNoPixelsDevice() const override { return true; }
+
+private:
+ struct ClipState {
+ SkIRect fClipBounds;
+ int fDeferredSaveCount;
+ bool fIsAA;
+ bool fIsRect;
+
+ ClipState(const SkIRect& bounds, bool isAA, bool isRect)
+ : fClipBounds(bounds)
+ , fDeferredSaveCount(0)
+ , fIsAA(isAA)
+ , fIsRect(isRect) {}
+
+ void op(SkClipOp op, const SkM44& transform, const SkRect& bounds,
+ bool isAA, bool fillsBounds);
+ };
+
+ const ClipState& clip() const { return fClipStack.back(); }
+ ClipState& writableClip();
+
+ void resetClipStack() {
+ fClipStack.clear();
+ fClipStack.emplace_back(this->bounds(), /*isAA=*/false, /*isRect=*/true);
+ }
+
+ SkSTArray<4, ClipState> fClipStack;
+
+ using INHERITED = SkBaseDevice;
+};
+
+class SkAutoDeviceTransformRestore : SkNoncopyable {
+public:
+ SkAutoDeviceTransformRestore(SkBaseDevice* device, const SkMatrix& localToDevice)
+ : fDevice(device)
+ , fPrevLocalToDevice(device->localToDevice())
+ {
+ fDevice->setLocalToDevice(SkM44(localToDevice));
+ }
+ ~SkAutoDeviceTransformRestore() {
+ fDevice->setLocalToDevice(fPrevLocalToDevice);
+ }
+
+private:
+ SkBaseDevice* fDevice;
+ const SkM44 fPrevLocalToDevice;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDistanceFieldGen.cpp b/gfx/skia/skia/src/core/SkDistanceFieldGen.cpp
new file mode 100644
index 0000000000..828ba25768
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDistanceFieldGen.cpp
@@ -0,0 +1,567 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkTPin.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/base/SkAutoMalloc.h"
+#include "src/core/SkDistanceFieldGen.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkPointPriv.h"
+
+#include <utility>
+
+using namespace skia_private;
+
+#if !defined(SK_DISABLE_SDF_TEXT)
+
+struct DFData {
+ float fAlpha; // alpha value of source texel
+ float fDistSq; // distance squared to nearest (so far) edge texel
+ SkPoint fDistVector; // distance vector to nearest (so far) edge texel
+};
+
+enum NeighborFlags {
+ kLeft_NeighborFlag = 0x01,
+ kRight_NeighborFlag = 0x02,
+ kTopLeft_NeighborFlag = 0x04,
+ kTop_NeighborFlag = 0x08,
+ kTopRight_NeighborFlag = 0x10,
+ kBottomLeft_NeighborFlag = 0x20,
+ kBottom_NeighborFlag = 0x40,
+ kBottomRight_NeighborFlag = 0x80,
+ kAll_NeighborFlags = 0xff,
+
+ kNeighborFlagCount = 8
+};
+
+// We treat an "edge" as a place where we cross from >=128 to <128, or vice versa, or
+// where we have two non-zero pixels that are <128.
+// 'neighborFlags' is used to limit the directions in which we test to avoid indexing
+// outside of the image
+static bool found_edge(const unsigned char* imagePtr, int width, int neighborFlags) {
+ // the order of these should match the neighbor flags above
+ const int kNum8ConnectedNeighbors = 8;
+ const int offsets[8] = {-1, 1, -width-1, -width, -width+1, width-1, width, width+1 };
+ SkASSERT(kNum8ConnectedNeighbors == kNeighborFlagCount);
+
+ // search for an edge
+ unsigned char currVal = *imagePtr;
+ unsigned char currCheck = (currVal >> 7);
+ for (int i = 0; i < kNum8ConnectedNeighbors; ++i) {
+ unsigned char neighborVal;
+ if ((1 << i) & neighborFlags) {
+ const unsigned char* checkPtr = imagePtr + offsets[i];
+ neighborVal = *checkPtr;
+ } else {
+ neighborVal = 0;
+ }
+ unsigned char neighborCheck = (neighborVal >> 7);
+ SkASSERT(currCheck == 0 || currCheck == 1);
+ SkASSERT(neighborCheck == 0 || neighborCheck == 1);
+ // if sharp transition
+ if (currCheck != neighborCheck ||
+ // or both <128 and >0
+ (!currCheck && !neighborCheck && currVal && neighborVal)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void init_glyph_data(DFData* data, unsigned char* edges, const unsigned char* image,
+ int dataWidth, int dataHeight,
+ int imageWidth, int imageHeight,
+ int pad) {
+ data += pad*dataWidth;
+ data += pad;
+ edges += (pad*dataWidth + pad);
+
+ for (int j = 0; j < imageHeight; ++j) {
+ for (int i = 0; i < imageWidth; ++i) {
+ if (255 == *image) {
+ data->fAlpha = 1.0f;
+ } else {
+ data->fAlpha = (*image)*0.00392156862f; // 1/255
+ }
+ int checkMask = kAll_NeighborFlags;
+ if (i == 0) {
+ checkMask &= ~(kLeft_NeighborFlag|kTopLeft_NeighborFlag|kBottomLeft_NeighborFlag);
+ }
+ if (i == imageWidth-1) {
+ checkMask &= ~(kRight_NeighborFlag|kTopRight_NeighborFlag|kBottomRight_NeighborFlag);
+ }
+ if (j == 0) {
+ checkMask &= ~(kTopLeft_NeighborFlag|kTop_NeighborFlag|kTopRight_NeighborFlag);
+ }
+ if (j == imageHeight-1) {
+ checkMask &= ~(kBottomLeft_NeighborFlag|kBottom_NeighborFlag|kBottomRight_NeighborFlag);
+ }
+ if (found_edge(image, imageWidth, checkMask)) {
+ *edges = 255; // using 255 makes for convenient debug rendering
+ }
+ ++data;
+ ++image;
+ ++edges;
+ }
+ data += 2*pad;
+ edges += 2*pad;
+ }
+}
+
+// from Gustavson (2011)
+// computes the distance to an edge given an edge normal vector and a pixel's alpha value
+// assumes that direction has been pre-normalized
+static float edge_distance(const SkPoint& direction, float alpha) {
+ float dx = direction.fX;
+ float dy = direction.fY;
+ float distance;
+ if (SkScalarNearlyZero(dx) || SkScalarNearlyZero(dy)) {
+ distance = 0.5f - alpha;
+ } else {
+ // this is easier if we treat the direction as being in the first octant
+ // (other octants are symmetrical)
+ dx = SkScalarAbs(dx);
+ dy = SkScalarAbs(dy);
+ if (dx < dy) {
+ using std::swap;
+ swap(dx, dy);
+ }
+
+ // a1 = 0.5*dy/dx is the smaller fractional area chopped off by the edge
+ // to avoid the divide, we just consider the numerator
+ float a1num = 0.5f*dy;
+
+ // we now compute the approximate distance, depending where the alpha falls
+ // relative to the edge fractional area
+
+ // if 0 <= alpha < a1
+ if (alpha*dx < a1num) {
+ // TODO: find a way to do this without square roots?
+ distance = 0.5f*(dx + dy) - SkScalarSqrt(2.0f*dx*dy*alpha);
+ // if a1 <= alpha <= 1 - a1
+ } else if (alpha*dx < (dx - a1num)) {
+ distance = (0.5f - alpha)*dx;
+ // if 1 - a1 < alpha <= 1
+ } else {
+ // TODO: find a way to do this without square roots?
+ distance = -0.5f*(dx + dy) + SkScalarSqrt(2.0f*dx*dy*(1.0f - alpha));
+ }
+ }
+
+ return distance;
+}
+
+static void init_distances(DFData* data, unsigned char* edges, int width, int height) {
+ // skip one pixel border
+ DFData* currData = data;
+ DFData* prevData = data - width;
+ DFData* nextData = data + width;
+
+ for (int j = 0; j < height; ++j) {
+ for (int i = 0; i < width; ++i) {
+ if (*edges) {
+ // we should not be in the one-pixel outside band
+ SkASSERT(i > 0 && i < width-1 && j > 0 && j < height-1);
+ // gradient will point from low to high
+ // +y is down in this case
+ // i.e., if you're outside, gradient points towards edge
+ // if you're inside, gradient points away from edge
+ SkPoint currGrad;
+ currGrad.fX = (prevData+1)->fAlpha - (prevData-1)->fAlpha
+ + SK_ScalarSqrt2*(currData+1)->fAlpha
+ - SK_ScalarSqrt2*(currData-1)->fAlpha
+ + (nextData+1)->fAlpha - (nextData-1)->fAlpha;
+ currGrad.fY = (nextData-1)->fAlpha - (prevData-1)->fAlpha
+ + SK_ScalarSqrt2*nextData->fAlpha
+ - SK_ScalarSqrt2*prevData->fAlpha
+ + (nextData+1)->fAlpha - (prevData+1)->fAlpha;
+ SkPointPriv::SetLengthFast(&currGrad, 1.0f);
+
+ // init squared distance to edge and distance vector
+ float dist = edge_distance(currGrad, currData->fAlpha);
+ currGrad.scale(dist, &currData->fDistVector);
+ currData->fDistSq = dist*dist;
+ } else {
+ // init distance to "far away"
+ currData->fDistSq = 2000000.f;
+ currData->fDistVector.fX = 1000.f;
+ currData->fDistVector.fY = 1000.f;
+ }
+ ++currData;
+ ++prevData;
+ ++nextData;
+ ++edges;
+ }
+ }
+}
+
+// Danielsson's 8SSEDT
+
+// first stage forward pass
+// (forward in Y, forward in X)
+static void F1(DFData* curr, int width) {
+ // upper left
+ DFData* check = curr - width-1;
+ SkPoint distVec = check->fDistVector;
+ float distSq = check->fDistSq - 2.0f*(distVec.fX + distVec.fY - 1.0f);
+ if (distSq < curr->fDistSq) {
+ distVec.fX -= 1.0f;
+ distVec.fY -= 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+
+ // up
+ check = curr - width;
+ distVec = check->fDistVector;
+ distSq = check->fDistSq - 2.0f*distVec.fY + 1.0f;
+ if (distSq < curr->fDistSq) {
+ distVec.fY -= 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+
+ // upper right
+ check = curr - width+1;
+ distVec = check->fDistVector;
+ distSq = check->fDistSq + 2.0f*(distVec.fX - distVec.fY + 1.0f);
+ if (distSq < curr->fDistSq) {
+ distVec.fX += 1.0f;
+ distVec.fY -= 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+
+ // left
+ check = curr - 1;
+ distVec = check->fDistVector;
+ distSq = check->fDistSq - 2.0f*distVec.fX + 1.0f;
+ if (distSq < curr->fDistSq) {
+ distVec.fX -= 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+}
+
+// second stage forward pass
+// (forward in Y, backward in X)
+static void F2(DFData* curr, int width) {
+ // right
+ DFData* check = curr + 1;
+ SkPoint distVec = check->fDistVector;
+ float distSq = check->fDistSq + 2.0f*distVec.fX + 1.0f;
+ if (distSq < curr->fDistSq) {
+ distVec.fX += 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+}
+
+// first stage backward pass
+// (backward in Y, forward in X)
+static void B1(DFData* curr, int width) {
+ // left
+ DFData* check = curr - 1;
+ SkPoint distVec = check->fDistVector;
+ float distSq = check->fDistSq - 2.0f*distVec.fX + 1.0f;
+ if (distSq < curr->fDistSq) {
+ distVec.fX -= 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+}
+
+// second stage backward pass
+// (backward in Y, backwards in X)
+static void B2(DFData* curr, int width) {
+ // right
+ DFData* check = curr + 1;
+ SkPoint distVec = check->fDistVector;
+ float distSq = check->fDistSq + 2.0f*distVec.fX + 1.0f;
+ if (distSq < curr->fDistSq) {
+ distVec.fX += 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+
+ // bottom left
+ check = curr + width-1;
+ distVec = check->fDistVector;
+ distSq = check->fDistSq - 2.0f*(distVec.fX - distVec.fY - 1.0f);
+ if (distSq < curr->fDistSq) {
+ distVec.fX -= 1.0f;
+ distVec.fY += 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+
+ // bottom
+ check = curr + width;
+ distVec = check->fDistVector;
+ distSq = check->fDistSq + 2.0f*distVec.fY + 1.0f;
+ if (distSq < curr->fDistSq) {
+ distVec.fY += 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+
+ // bottom right
+ check = curr + width+1;
+ distVec = check->fDistVector;
+ distSq = check->fDistSq + 2.0f*(distVec.fX + distVec.fY + 1.0f);
+ if (distSq < curr->fDistSq) {
+ distVec.fX += 1.0f;
+ distVec.fY += 1.0f;
+ curr->fDistSq = distSq;
+ curr->fDistVector = distVec;
+ }
+}
+
+// enable this to output edge data rather than the distance field
+#define DUMP_EDGE 0
+
+#if !DUMP_EDGE
+template <int distanceMagnitude>
+static unsigned char pack_distance_field_val(float dist) {
+ // The distance field is constructed as unsigned char values, so that the zero value is at 128,
+ // Beside 128, we have 128 values in range [0, 128), but only 127 values in range (128, 255].
+ // So we multiply distanceMagnitude by 127/128 at the latter range to avoid overflow.
+ dist = SkTPin<float>(-dist, -distanceMagnitude, distanceMagnitude * 127.0f / 128.0f);
+
+ // Scale into the positive range for unsigned distance.
+ dist += distanceMagnitude;
+
+ // Scale into unsigned char range.
+ // Round to place negative and positive values as equally as possible around 128
+ // (which represents zero).
+ return (unsigned char)SkScalarRoundToInt(dist / (2 * distanceMagnitude) * 256.0f);
+}
+#endif
+
+// assumes a padded 8-bit image and distance field
+// width and height are the original width and height of the image
+static bool generate_distance_field_from_image(unsigned char* distanceField,
+ const unsigned char* copyPtr,
+ int width, int height) {
+ SkASSERT(distanceField);
+ SkASSERT(copyPtr);
+
+ // we expand our temp data by one more on each side to simplify
+ // the scanning code -- will always be treated as infinitely far away
+ int pad = SK_DistanceFieldPad + 1;
+
+ // set params for distance field data
+ int dataWidth = width + 2*pad;
+ int dataHeight = height + 2*pad;
+
+ // create zeroed temp DFData+edge storage
+ UniqueVoidPtr storage(sk_calloc_throw(dataWidth*dataHeight*(sizeof(DFData) + 1)));
+ DFData* dataPtr = (DFData*)storage.get();
+ unsigned char* edgePtr = (unsigned char*)storage.get() + dataWidth*dataHeight*sizeof(DFData);
+
+ // copy glyph into distance field storage
+ init_glyph_data(dataPtr, edgePtr, copyPtr,
+ dataWidth, dataHeight,
+ width+2, height+2, SK_DistanceFieldPad);
+
+ // create initial distance data, particularly at edges
+ init_distances(dataPtr, edgePtr, dataWidth, dataHeight);
+
+ // now perform Euclidean distance transform to propagate distances
+
+ // forwards in y
+ DFData* currData = dataPtr+dataWidth+1; // skip outer buffer
+ unsigned char* currEdge = edgePtr+dataWidth+1;
+ for (int j = 1; j < dataHeight-1; ++j) {
+ // forwards in x
+ for (int i = 1; i < dataWidth-1; ++i) {
+ // don't need to calculate distance for edge pixels
+ if (!*currEdge) {
+ F1(currData, dataWidth);
+ }
+ ++currData;
+ ++currEdge;
+ }
+
+ // backwards in x
+ --currData; // reset to end
+ --currEdge;
+ for (int i = 1; i < dataWidth-1; ++i) {
+ // don't need to calculate distance for edge pixels
+ if (!*currEdge) {
+ F2(currData, dataWidth);
+ }
+ --currData;
+ --currEdge;
+ }
+
+ currData += dataWidth+1;
+ currEdge += dataWidth+1;
+ }
+
+ // backwards in y
+ currData = dataPtr+dataWidth*(dataHeight-2) - 1; // skip outer buffer
+ currEdge = edgePtr+dataWidth*(dataHeight-2) - 1;
+ for (int j = 1; j < dataHeight-1; ++j) {
+ // forwards in x
+ for (int i = 1; i < dataWidth-1; ++i) {
+ // don't need to calculate distance for edge pixels
+ if (!*currEdge) {
+ B1(currData, dataWidth);
+ }
+ ++currData;
+ ++currEdge;
+ }
+
+ // backwards in x
+ --currData; // reset to end
+ --currEdge;
+ for (int i = 1; i < dataWidth-1; ++i) {
+ // don't need to calculate distance for edge pixels
+ if (!*currEdge) {
+ B2(currData, dataWidth);
+ }
+ --currData;
+ --currEdge;
+ }
+
+ currData -= dataWidth-1;
+ currEdge -= dataWidth-1;
+ }
+
+ // copy results to final distance field data
+ currData = dataPtr + dataWidth+1;
+ currEdge = edgePtr + dataWidth+1;
+ unsigned char *dfPtr = distanceField;
+ for (int j = 1; j < dataHeight-1; ++j) {
+ for (int i = 1; i < dataWidth-1; ++i) {
+#if DUMP_EDGE
+ float alpha = currData->fAlpha;
+ float edge = 0.0f;
+ if (*currEdge) {
+ edge = 0.25f;
+ }
+ // blend with original image
+ float result = alpha + (1.0f-alpha)*edge;
+ unsigned char val = sk_float_round2int(255*result);
+ *dfPtr++ = val;
+#else
+ float dist;
+ if (currData->fAlpha > 0.5f) {
+ dist = -SkScalarSqrt(currData->fDistSq);
+ } else {
+ dist = SkScalarSqrt(currData->fDistSq);
+ }
+ *dfPtr++ = pack_distance_field_val<SK_DistanceFieldMagnitude>(dist);
+#endif
+ ++currData;
+ ++currEdge;
+ }
+ currData += 2;
+ currEdge += 2;
+ }
+
+ return true;
+}
+
+// assumes an 8-bit image and distance field
+bool SkGenerateDistanceFieldFromA8Image(unsigned char* distanceField,
+ const unsigned char* image,
+ int width, int height, size_t rowBytes) {
+ SkASSERT(distanceField);
+ SkASSERT(image);
+
+ // create temp data
+ SkAutoSMalloc<1024> copyStorage((width+2)*(height+2)*sizeof(char));
+ unsigned char* copyPtr = (unsigned char*) copyStorage.get();
+
+ // we copy our source image into a padded copy to ensure we catch edge transitions
+ // around the outside
+ const unsigned char* currSrcScanLine = image;
+ sk_bzero(copyPtr, (width+2)*sizeof(char));
+ unsigned char* currDestPtr = copyPtr + width + 2;
+ for (int i = 0; i < height; ++i) {
+ *currDestPtr++ = 0;
+ memcpy(currDestPtr, currSrcScanLine, width);
+ currSrcScanLine += rowBytes;
+ currDestPtr += width;
+ *currDestPtr++ = 0;
+ }
+ sk_bzero(currDestPtr, (width+2)*sizeof(char));
+
+ return generate_distance_field_from_image(distanceField, copyPtr, width, height);
+}
+
+// assumes a 16-bit lcd mask and 8-bit distance field
+bool SkGenerateDistanceFieldFromLCD16Mask(unsigned char* distanceField,
+ const unsigned char* image,
+ int w, int h, size_t rowBytes) {
+ SkASSERT(distanceField);
+ SkASSERT(image);
+
+ // create temp data
+ SkAutoSMalloc<1024> copyStorage((w+2)*(h+2)*sizeof(char));
+ unsigned char* copyPtr = (unsigned char*) copyStorage.get();
+
+ // we copy our source image into a padded copy to ensure we catch edge transitions
+ // around the outside
+ const uint16_t* start = reinterpret_cast<const uint16_t*>(image);
+ auto currSrcScanline = SkMask::AlphaIter<SkMask::kLCD16_Format>(start);
+ auto endSrcScanline = SkMask::AlphaIter<SkMask::kLCD16_Format>(start + w);
+ sk_bzero(copyPtr, (w+2)*sizeof(char));
+ unsigned char* currDestPtr = copyPtr + w + 2;
+ for (int i = 0; i < h; ++i, currSrcScanline >>= rowBytes, endSrcScanline >>= rowBytes) {
+ *currDestPtr++ = 0;
+ for (auto src = currSrcScanline; src < endSrcScanline; ++src) {
+ *currDestPtr++ = *src;
+ }
+ *currDestPtr++ = 0;
+ }
+ sk_bzero(currDestPtr, (w+2)*sizeof(char));
+
+ return generate_distance_field_from_image(distanceField, copyPtr, w, h);
+}
+
+// assumes a 1-bit image and 8-bit distance field
+bool SkGenerateDistanceFieldFromBWImage(unsigned char* distanceField,
+ const unsigned char* image,
+ int width, int height, size_t rowBytes) {
+ SkASSERT(distanceField);
+ SkASSERT(image);
+
+ // create temp data
+ SkAutoSMalloc<1024> copyStorage((width+2)*(height+2)*sizeof(char));
+ unsigned char* copyPtr = (unsigned char*) copyStorage.get();
+
+ // we copy our source image into a padded copy to ensure we catch edge transitions
+ // around the outside
+ const unsigned char* currSrcScanLine = image;
+ sk_bzero(copyPtr, (width+2)*sizeof(char));
+ unsigned char* currDestPtr = copyPtr + width + 2;
+ for (int i = 0; i < height; ++i) {
+ *currDestPtr++ = 0;
+
+ int rowWritesLeft = width;
+ const unsigned char *maskPtr = currSrcScanLine;
+ while (rowWritesLeft > 0) {
+ unsigned mask = *maskPtr++;
+ for (int j = 7; j >= 0 && rowWritesLeft; --j, --rowWritesLeft) {
+ *currDestPtr++ = (mask & (1 << j)) ? 0xff : 0;
+ }
+ }
+ currSrcScanLine += rowBytes;
+
+ *currDestPtr++ = 0;
+ }
+ sk_bzero(currDestPtr, (width+2)*sizeof(char));
+
+ return generate_distance_field_from_image(distanceField, copyPtr, width, height);
+}
+
+#endif // !defined(SK_DISABLE_SDF_TEXT)
diff --git a/gfx/skia/skia/src/core/SkDistanceFieldGen.h b/gfx/skia/skia/src/core/SkDistanceFieldGen.h
new file mode 100644
index 0000000000..8374dbe05b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDistanceFieldGen.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkDistanceFieldGen_DEFINED
+#define SkDistanceFieldGen_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#include <cstddef>
+
+#if !defined(SK_DISABLE_SDF_TEXT)
+
+// the max magnitude for the distance field
+// distance values are limited to the range (-SK_DistanceFieldMagnitude, SK_DistanceFieldMagnitude]
+#define SK_DistanceFieldMagnitude 4
+// we need to pad around the original glyph to allow our maximum distance of
+// SK_DistanceFieldMagnitude texels away from any edge
+#define SK_DistanceFieldPad 4
+// the rect we render with is inset from the distance field glyph size to allow for bilerp
+#define SK_DistanceFieldInset 2
+
+// For the fragment shader:
+// The distance field is constructed as unsigned char values,
+// so that the zero value is at 128, and the supported range of distances is [-4 * 127/128, 4].
+// Hence our multiplier (width of the range) is 4 * 255/128 and zero threshold is 128/255.
+#define SK_DistanceFieldMultiplier "7.96875"
+#define SK_DistanceFieldThreshold "0.50196078431"
+
+/** Given 8-bit mask data, generate the associated distance field
+
+ * @param distanceField The distance field to be generated. Should already be allocated
+ * by the client with the padding above.
+ * @param image 8-bit mask we're using to generate the distance field.
+ * @param w Width of the original image.
+ * @param h Height of the original image.
+ * @param rowBytes Size of each row in the image, in bytes
+ */
+bool SkGenerateDistanceFieldFromA8Image(unsigned char* distanceField,
+ const unsigned char* image,
+ int w, int h, size_t rowBytes);
+
+/** Given LCD16 mask data (not a 16-bit image), generate the associated distance field
+
+ * @param distanceField The distance field to be generated. Should already be allocated
+ * by the client with the padding above.
+ * @param image 16-bit LCD data we're using to generate the distance field.
+ * @param w Width of the original image.
+ * @param h Height of the original image.
+ * @param rowBytes Size of each row in the image, in bytes
+ */
+bool SkGenerateDistanceFieldFromLCD16Mask(unsigned char* distanceField,
+ const unsigned char* image,
+ int w, int h, size_t rowBytes);
+
+/** Given 1-bit mask data, generate the associated distance field
+
+ * @param distanceField The distance field to be generated. Should already be allocated
+ * by the client with the padding above.
+ * @param image 1-bit mask we're using to generate the distance field.
+ * @param w Width of the original image.
+ * @param h Height of the original image.
+ * @param rowBytes Size of each row in the image, in bytes
+ */
+bool SkGenerateDistanceFieldFromBWImage(unsigned char* distanceField,
+ const unsigned char* image,
+ int w, int h, size_t rowBytes);
+
+/** Given width and height of original image, return size (in bytes) of distance field
+ * @param w Width of the original image.
+ * @param h Height of the original image.
+ */
+inline size_t SkComputeDistanceFieldSize(int w, int h) {
+ return (w + 2*SK_DistanceFieldPad) * (h + 2*SK_DistanceFieldPad) * sizeof(unsigned char);
+}
+
+#endif // !defined(SK_DISABLE_SDF_TEXT)
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDocument.cpp b/gfx/skia/skia/src/core/SkDocument.cpp
new file mode 100644
index 0000000000..30c94d8317
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDocument.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkDocument.h"
+#include "include/core/SkStream.h"
+
+SkDocument::SkDocument(SkWStream* stream) : fStream(stream), fState(kBetweenPages_State) {}
+
+SkDocument::~SkDocument() {
+ this->close();
+}
+
+static SkCanvas* trim(SkCanvas* canvas, SkScalar width, SkScalar height,
+ const SkRect* content) {
+ if (content && canvas) {
+ SkRect inner = *content;
+ if (!inner.intersect({0, 0, width, height})) {
+ return nullptr;
+ }
+ canvas->clipRect(inner);
+ canvas->translate(inner.x(), inner.y());
+ }
+ return canvas;
+}
+
+SkCanvas* SkDocument::beginPage(SkScalar width, SkScalar height,
+ const SkRect* content) {
+ if (width <= 0 || height <= 0 || kClosed_State == fState) {
+ return nullptr;
+ }
+ if (kInPage_State == fState) {
+ this->endPage();
+ }
+ SkASSERT(kBetweenPages_State == fState);
+ fState = kInPage_State;
+ return trim(this->onBeginPage(width, height), width, height, content);
+}
+
+void SkDocument::endPage() {
+ if (kInPage_State == fState) {
+ fState = kBetweenPages_State;
+ this->onEndPage();
+ }
+}
+
+void SkDocument::close() {
+ for (;;) {
+ switch (fState) {
+ case kBetweenPages_State: {
+ fState = kClosed_State;
+ this->onClose(fStream);
+ // we don't own the stream, but we mark it nullptr since we can
+ // no longer write to it.
+ fStream = nullptr;
+ return;
+ }
+ case kInPage_State:
+ this->endPage();
+ break;
+ case kClosed_State:
+ return;
+ }
+ }
+}
+
+void SkDocument::abort() {
+ this->onAbort();
+
+ fState = kClosed_State;
+ // we don't own the stream, but we mark it nullptr since we can
+ // no longer write to it.
+ fStream = nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkDraw.cpp b/gfx/skia/skia/src/core/SkDraw.cpp
new file mode 100644
index 0000000000..cdecd7b82d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDraw.cpp
@@ -0,0 +1,616 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRegion.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTileMode.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkFixed.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/base/SkTLazy.h"
+#include "src/core/SkAutoBlitterChoose.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkImageInfoPriv.h"
+#include "src/core/SkImagePriv.h"
+#include "src/core/SkMatrixProvider.h"
+#include "src/core/SkMatrixUtils.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkRectPriv.h"
+#include "src/core/SkScan.h"
+#include <cstdint>
+
+#if defined(SK_SUPPORT_LEGACY_ALPHA_BITMAP_AS_COVERAGE)
+#include "src/core/SkMaskFilterBase.h"
+#endif
+
+using namespace skia_private;
+
+static SkPaint make_paint_with_image(const SkPaint& origPaint, const SkBitmap& bitmap,
+ const SkSamplingOptions& sampling,
+ SkMatrix* matrix = nullptr) {
+ SkPaint paint(origPaint);
+ paint.setShader(SkMakeBitmapShaderForPaint(origPaint, bitmap, SkTileMode::kClamp,
+ SkTileMode::kClamp, sampling, matrix,
+ kNever_SkCopyPixelsMode));
+ return paint;
+}
+
+SkDraw::SkDraw() {
+ fBlitterChooser = SkBlitter::Choose;
+}
+
+struct PtProcRec {
+ SkCanvas::PointMode fMode;
+ const SkPaint* fPaint;
+ const SkRegion* fClip;
+ const SkRasterClip* fRC;
+
+ // computed values
+ SkRect fClipBounds;
+ SkScalar fRadius;
+
+ typedef void (*Proc)(const PtProcRec&, const SkPoint devPts[], int count,
+ SkBlitter*);
+
+ bool init(SkCanvas::PointMode, const SkPaint&, const SkMatrix* matrix,
+ const SkRasterClip*);
+ Proc chooseProc(SkBlitter** blitter);
+
+private:
+ SkAAClipBlitterWrapper fWrapper;
+};
+
+static void bw_pt_rect_hair_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ SkASSERT(rec.fClip->isRect());
+ const SkIRect& r = rec.fClip->getBounds();
+
+ for (int i = 0; i < count; i++) {
+ int x = SkScalarFloorToInt(devPts[i].fX);
+ int y = SkScalarFloorToInt(devPts[i].fY);
+ if (r.contains(x, y)) {
+ blitter->blitH(x, y, 1);
+ }
+ }
+}
+
+static void bw_pt_rect_16_hair_proc(const PtProcRec& rec,
+ const SkPoint devPts[], int count,
+ SkBlitter* blitter) {
+ SkASSERT(rec.fRC->isRect());
+ const SkIRect& r = rec.fRC->getBounds();
+ uint32_t value;
+ const SkPixmap* dst = blitter->justAnOpaqueColor(&value);
+ SkASSERT(dst);
+
+ uint16_t* addr = dst->writable_addr16(0, 0);
+ size_t rb = dst->rowBytes();
+
+ for (int i = 0; i < count; i++) {
+ int x = SkScalarFloorToInt(devPts[i].fX);
+ int y = SkScalarFloorToInt(devPts[i].fY);
+ if (r.contains(x, y)) {
+ ((uint16_t*)((char*)addr + y * rb))[x] = SkToU16(value);
+ }
+ }
+}
+
+static void bw_pt_rect_32_hair_proc(const PtProcRec& rec,
+ const SkPoint devPts[], int count,
+ SkBlitter* blitter) {
+ SkASSERT(rec.fRC->isRect());
+ const SkIRect& r = rec.fRC->getBounds();
+ uint32_t value;
+ const SkPixmap* dst = blitter->justAnOpaqueColor(&value);
+ SkASSERT(dst);
+
+ SkPMColor* addr = dst->writable_addr32(0, 0);
+ size_t rb = dst->rowBytes();
+
+ for (int i = 0; i < count; i++) {
+ int x = SkScalarFloorToInt(devPts[i].fX);
+ int y = SkScalarFloorToInt(devPts[i].fY);
+ if (r.contains(x, y)) {
+ ((SkPMColor*)((char*)addr + y * rb))[x] = value;
+ }
+ }
+}
+
+static void bw_pt_hair_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ for (int i = 0; i < count; i++) {
+ int x = SkScalarFloorToInt(devPts[i].fX);
+ int y = SkScalarFloorToInt(devPts[i].fY);
+ if (rec.fClip->contains(x, y)) {
+ blitter->blitH(x, y, 1);
+ }
+ }
+}
+
+static void bw_line_hair_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ for (int i = 0; i < count; i += 2) {
+ SkScan::HairLine(&devPts[i], 2, *rec.fRC, blitter);
+ }
+}
+
+static void bw_poly_hair_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ SkScan::HairLine(devPts, count, *rec.fRC, blitter);
+}
+
+// aa versions
+
+static void aa_line_hair_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ for (int i = 0; i < count; i += 2) {
+ SkScan::AntiHairLine(&devPts[i], 2, *rec.fRC, blitter);
+ }
+}
+
+static void aa_poly_hair_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ SkScan::AntiHairLine(devPts, count, *rec.fRC, blitter);
+}
+
+// square procs (strokeWidth > 0 but matrix is square-scale (sx == sy)
+
+static SkRect make_square_rad(SkPoint center, SkScalar radius) {
+ return {
+ center.fX - radius, center.fY - radius,
+ center.fX + radius, center.fY + radius
+ };
+}
+
+static SkXRect make_xrect(const SkRect& r) {
+ SkASSERT(SkRectPriv::FitsInFixed(r));
+ return {
+ SkScalarToFixed(r.fLeft), SkScalarToFixed(r.fTop),
+ SkScalarToFixed(r.fRight), SkScalarToFixed(r.fBottom)
+ };
+}
+
+static void bw_square_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ for (int i = 0; i < count; i++) {
+ SkRect r = make_square_rad(devPts[i], rec.fRadius);
+ if (r.intersect(rec.fClipBounds)) {
+ SkScan::FillXRect(make_xrect(r), *rec.fRC, blitter);
+ }
+ }
+}
+
+static void aa_square_proc(const PtProcRec& rec, const SkPoint devPts[],
+ int count, SkBlitter* blitter) {
+ for (int i = 0; i < count; i++) {
+ SkRect r = make_square_rad(devPts[i], rec.fRadius);
+ if (r.intersect(rec.fClipBounds)) {
+ SkScan::AntiFillXRect(make_xrect(r), *rec.fRC, blitter);
+ }
+ }
+}
+
+// If this returns true, then chooseProc() must return a valid proc
+bool PtProcRec::init(SkCanvas::PointMode mode, const SkPaint& paint,
+ const SkMatrix* matrix, const SkRasterClip* rc) {
+ if ((unsigned)mode > (unsigned)SkCanvas::kPolygon_PointMode) {
+ return false;
+ }
+ if (paint.getPathEffect() || paint.getMaskFilter()) {
+ return false;
+ }
+ SkScalar width = paint.getStrokeWidth();
+ SkScalar radius = -1; // sentinel value, a "valid" value must be > 0
+
+ if (0 == width) {
+ radius = 0.5f;
+ } else if (paint.getStrokeCap() != SkPaint::kRound_Cap &&
+ matrix->isScaleTranslate() && SkCanvas::kPoints_PointMode == mode) {
+ SkScalar sx = matrix->get(SkMatrix::kMScaleX);
+ SkScalar sy = matrix->get(SkMatrix::kMScaleY);
+ if (SkScalarNearlyZero(sx - sy)) {
+ radius = SkScalarHalf(width * SkScalarAbs(sx));
+ }
+ }
+ if (radius > 0) {
+ SkRect clipBounds = SkRect::Make(rc->getBounds());
+ // if we return true, the caller may assume that the constructed shapes can be represented
+ // using SkFixed (after clipping), so we preflight that here.
+ if (!SkRectPriv::FitsInFixed(clipBounds)) {
+ return false;
+ }
+ fMode = mode;
+ fPaint = &paint;
+ fClip = nullptr;
+ fRC = rc;
+ fClipBounds = clipBounds;
+ fRadius = radius;
+ return true;
+ }
+ return false;
+}
+
+PtProcRec::Proc PtProcRec::chooseProc(SkBlitter** blitterPtr) {
+ Proc proc = nullptr;
+
+ SkBlitter* blitter = *blitterPtr;
+ if (fRC->isBW()) {
+ fClip = &fRC->bwRgn();
+ } else {
+ fWrapper.init(*fRC, blitter);
+ fClip = &fWrapper.getRgn();
+ blitter = fWrapper.getBlitter();
+ *blitterPtr = blitter;
+ }
+
+ // for our arrays
+ SkASSERT(0 == SkCanvas::kPoints_PointMode);
+ SkASSERT(1 == SkCanvas::kLines_PointMode);
+ SkASSERT(2 == SkCanvas::kPolygon_PointMode);
+ SkASSERT((unsigned)fMode <= (unsigned)SkCanvas::kPolygon_PointMode);
+
+ if (fPaint->isAntiAlias()) {
+ if (0 == fPaint->getStrokeWidth()) {
+ static const Proc gAAProcs[] = {
+ aa_square_proc, aa_line_hair_proc, aa_poly_hair_proc
+ };
+ proc = gAAProcs[fMode];
+ } else if (fPaint->getStrokeCap() != SkPaint::kRound_Cap) {
+ SkASSERT(SkCanvas::kPoints_PointMode == fMode);
+ proc = aa_square_proc;
+ }
+ } else { // BW
+ if (fRadius <= 0.5f) { // small radii and hairline
+ if (SkCanvas::kPoints_PointMode == fMode && fClip->isRect()) {
+ uint32_t value;
+ const SkPixmap* bm = blitter->justAnOpaqueColor(&value);
+ if (bm && kRGB_565_SkColorType == bm->colorType()) {
+ proc = bw_pt_rect_16_hair_proc;
+ } else if (bm && kN32_SkColorType == bm->colorType()) {
+ proc = bw_pt_rect_32_hair_proc;
+ } else {
+ proc = bw_pt_rect_hair_proc;
+ }
+ } else {
+ static Proc gBWProcs[] = {
+ bw_pt_hair_proc, bw_line_hair_proc, bw_poly_hair_proc
+ };
+ proc = gBWProcs[fMode];
+ }
+ } else {
+ proc = bw_square_proc;
+ }
+ }
+ return proc;
+}
+
+// each of these costs 8-bytes of stack space, so don't make it too large
+// must be even for lines/polygon to work
+#define MAX_DEV_PTS 32
+
+void SkDraw::drawPoints(SkCanvas::PointMode mode, size_t count,
+ const SkPoint pts[], const SkPaint& paint,
+ SkBaseDevice* device) const {
+ // if we're in lines mode, force count to be even
+ if (SkCanvas::kLines_PointMode == mode) {
+ count &= ~(size_t)1;
+ }
+
+ SkASSERT(pts != nullptr);
+ SkDEBUGCODE(this->validate();)
+
+ // nothing to draw
+ if (!count || fRC->isEmpty()) {
+ return;
+ }
+
+ SkMatrix ctm = fMatrixProvider->localToDevice();
+ PtProcRec rec;
+ if (!device && rec.init(mode, paint, &ctm, fRC)) {
+ SkAutoBlitterChoose blitter(*this, nullptr, paint);
+
+ SkPoint devPts[MAX_DEV_PTS];
+ SkBlitter* bltr = blitter.get();
+ PtProcRec::Proc proc = rec.chooseProc(&bltr);
+ // we have to back up subsequent passes if we're in polygon mode
+ const size_t backup = (SkCanvas::kPolygon_PointMode == mode);
+
+ do {
+ int n = SkToInt(count);
+ if (n > MAX_DEV_PTS) {
+ n = MAX_DEV_PTS;
+ }
+ ctm.mapPoints(devPts, pts, n);
+ if (!SkScalarsAreFinite(&devPts[0].fX, n * 2)) {
+ return;
+ }
+ proc(rec, devPts, n, bltr);
+ pts += n - backup;
+ SkASSERT(SkToInt(count) >= n);
+ count -= n;
+ if (count > 0) {
+ count += backup;
+ }
+ } while (count != 0);
+ } else {
+ this->drawDevicePoints(mode, count, pts, paint, device);
+ }
+}
+
+static bool clipped_out(const SkMatrix& m, const SkRasterClip& c,
+ const SkRect& srcR) {
+ SkRect dstR;
+ m.mapRect(&dstR, srcR);
+ return c.quickReject(dstR.roundOut());
+}
+
+static bool clipped_out(const SkMatrix& matrix, const SkRasterClip& clip,
+ int width, int height) {
+ SkRect r;
+ r.setIWH(width, height);
+ return clipped_out(matrix, clip, r);
+}
+
+static bool clipHandlesSprite(const SkRasterClip& clip, int x, int y, const SkPixmap& pmap) {
+ return clip.isBW() || clip.quickContains(SkIRect::MakeXYWH(x, y, pmap.width(), pmap.height()));
+}
+
+void SkDraw::drawBitmap(const SkBitmap& bitmap, const SkMatrix& prematrix,
+ const SkRect* dstBounds, const SkSamplingOptions& sampling,
+ const SkPaint& origPaint) const {
+ SkDEBUGCODE(this->validate();)
+
+ // nothing to draw
+ if (fRC->isEmpty() ||
+ bitmap.width() == 0 || bitmap.height() == 0 ||
+ bitmap.colorType() == kUnknown_SkColorType) {
+ return;
+ }
+
+ SkTCopyOnFirstWrite<SkPaint> paint(origPaint);
+ if (origPaint.getStyle() != SkPaint::kFill_Style) {
+ paint.writable()->setStyle(SkPaint::kFill_Style);
+ }
+
+ SkPreConcatMatrixProvider matrixProvider(*fMatrixProvider, prematrix);
+ SkMatrix matrix = matrixProvider.localToDevice();
+
+ if (clipped_out(matrix, *fRC, bitmap.width(), bitmap.height())) {
+ return;
+ }
+
+ if (!SkColorTypeIsAlphaOnly(bitmap.colorType()) &&
+ SkTreatAsSprite(matrix, bitmap.dimensions(), sampling, paint->isAntiAlias())) {
+ //
+ // It is safe to call lock pixels now, since we know the matrix is
+ // (more or less) identity.
+ //
+ SkPixmap pmap;
+ if (!bitmap.peekPixels(&pmap)) {
+ return;
+ }
+ int ix = SkScalarRoundToInt(matrix.getTranslateX());
+ int iy = SkScalarRoundToInt(matrix.getTranslateY());
+ if (clipHandlesSprite(*fRC, ix, iy, pmap)) {
+ SkSTArenaAlloc<kSkBlitterContextSize> allocator;
+ // blitter will be owned by the allocator.
+ SkBlitter* blitter = SkBlitter::ChooseSprite(fDst, *paint, pmap, ix, iy, &allocator,
+ fRC->clipShader());
+ if (blitter) {
+ SkScan::FillIRect(SkIRect::MakeXYWH(ix, iy, pmap.width(), pmap.height()),
+ *fRC, blitter);
+ return;
+ }
+ // if !blitter, then we fall-through to the slower case
+ }
+ }
+
+ // now make a temp draw on the stack, and use it
+ //
+ SkDraw draw(*this);
+ draw.fMatrixProvider = &matrixProvider;
+
+ // For a long time, the CPU backend treated A8 bitmaps as coverage, rather than alpha. This was
+ // inconsistent with the GPU backend (skbug.com/9692). When this was fixed, it altered behavior
+ // for some Android apps (b/231400686). Thus: keep the old behavior in the framework.
+#if defined(SK_SUPPORT_LEGACY_ALPHA_BITMAP_AS_COVERAGE)
+ if (bitmap.colorType() == kAlpha_8_SkColorType && !paint->getColorFilter()) {
+ draw.drawBitmapAsMask(bitmap, sampling, *paint);
+ return;
+ }
+#endif
+
+ SkPaint paintWithShader = make_paint_with_image(*paint, bitmap, sampling);
+ const SkRect srcBounds = SkRect::MakeIWH(bitmap.width(), bitmap.height());
+ if (dstBounds) {
+ this->drawRect(srcBounds, paintWithShader, &prematrix, dstBounds);
+ } else {
+ draw.drawRect(srcBounds, paintWithShader);
+ }
+}
+
+void SkDraw::drawSprite(const SkBitmap& bitmap, int x, int y, const SkPaint& origPaint) const {
+ SkDEBUGCODE(this->validate();)
+
+ // nothing to draw
+ if (fRC->isEmpty() ||
+ bitmap.width() == 0 || bitmap.height() == 0 ||
+ bitmap.colorType() == kUnknown_SkColorType) {
+ return;
+ }
+
+ const SkIRect bounds = SkIRect::MakeXYWH(x, y, bitmap.width(), bitmap.height());
+
+ if (fRC->quickReject(bounds)) {
+ return; // nothing to draw
+ }
+
+ SkPaint paint(origPaint);
+ paint.setStyle(SkPaint::kFill_Style);
+
+ SkPixmap pmap;
+ if (!bitmap.peekPixels(&pmap)) {
+ return;
+ }
+
+ if (nullptr == paint.getColorFilter() && clipHandlesSprite(*fRC, x, y, pmap)) {
+ // blitter will be owned by the allocator.
+ SkSTArenaAlloc<kSkBlitterContextSize> allocator;
+ SkBlitter* blitter = SkBlitter::ChooseSprite(fDst, paint, pmap, x, y, &allocator,
+ fRC->clipShader());
+ if (blitter) {
+ SkScan::FillIRect(bounds, *fRC, blitter);
+ return;
+ }
+ }
+
+ SkMatrix matrix;
+ SkRect r;
+
+ // get a scalar version of our rect
+ r.set(bounds);
+
+ // create shader with offset
+ matrix.setTranslate(r.fLeft, r.fTop);
+ SkPaint paintWithShader = make_paint_with_image(paint, bitmap, SkSamplingOptions(), &matrix);
+ SkDraw draw(*this);
+ SkMatrixProvider matrixProvider(SkMatrix::I());
+ draw.fMatrixProvider = &matrixProvider;
+ // call ourself with a rect
+ draw.drawRect(r, paintWithShader);
+}
+
+#if defined(SK_SUPPORT_LEGACY_ALPHA_BITMAP_AS_COVERAGE)
+void SkDraw::drawDevMask(const SkMask& srcM, const SkPaint& paint) const {
+ if (srcM.fBounds.isEmpty()) {
+ return;
+ }
+
+ const SkMask* mask = &srcM;
+
+ SkMask dstM;
+ if (paint.getMaskFilter() &&
+ as_MFB(paint.getMaskFilter())
+ ->filterMask(&dstM, srcM, fMatrixProvider->localToDevice(), nullptr)) {
+ mask = &dstM;
+ }
+ SkAutoMaskFreeImage ami(dstM.fImage);
+
+ SkAutoBlitterChoose blitterChooser(*this, nullptr, paint);
+ SkBlitter* blitter = blitterChooser.get();
+
+ SkAAClipBlitterWrapper wrapper;
+ const SkRegion* clipRgn;
+
+ if (fRC->isBW()) {
+ clipRgn = &fRC->bwRgn();
+ } else {
+ wrapper.init(*fRC, blitter);
+ clipRgn = &wrapper.getRgn();
+ blitter = wrapper.getBlitter();
+ }
+ blitter->blitMaskRegion(*mask, *clipRgn);
+}
+
+void SkDraw::drawBitmapAsMask(const SkBitmap& bitmap, const SkSamplingOptions& sampling,
+ const SkPaint& paint) const {
+ SkASSERT(bitmap.colorType() == kAlpha_8_SkColorType);
+
+ // nothing to draw
+ if (fRC->isEmpty()) {
+ return;
+ }
+
+ SkMatrix ctm = fMatrixProvider->localToDevice();
+ if (SkTreatAsSprite(ctm, bitmap.dimensions(), sampling, paint.isAntiAlias()))
+ {
+ int ix = SkScalarRoundToInt(ctm.getTranslateX());
+ int iy = SkScalarRoundToInt(ctm.getTranslateY());
+
+ SkPixmap pmap;
+ if (!bitmap.peekPixels(&pmap)) {
+ return;
+ }
+ SkMask mask;
+ mask.fBounds.setXYWH(ix, iy, pmap.width(), pmap.height());
+ mask.fFormat = SkMask::kA8_Format;
+ mask.fRowBytes = SkToU32(pmap.rowBytes());
+ // fImage is typed as writable, but in this case it is used read-only
+ mask.fImage = (uint8_t*)pmap.addr8(0, 0);
+
+ this->drawDevMask(mask, paint);
+ } else { // need to xform the bitmap first
+ SkRect r;
+ SkMask mask;
+
+ r.setIWH(bitmap.width(), bitmap.height());
+ ctm.mapRect(&r);
+ r.round(&mask.fBounds);
+
+ // set the mask's bounds to the transformed bitmap-bounds,
+ // clipped to the actual device and further limited by the clip bounds
+ {
+ SkASSERT(fDst.bounds().contains(fRC->getBounds()));
+ SkIRect devBounds = fDst.bounds();
+ devBounds.intersect(fRC->getBounds().makeOutset(1, 1));
+ // need intersect(l, t, r, b) on irect
+ if (!mask.fBounds.intersect(devBounds)) {
+ return;
+ }
+ }
+
+ mask.fFormat = SkMask::kA8_Format;
+ mask.fRowBytes = SkAlign4(mask.fBounds.width());
+ size_t size = mask.computeImageSize();
+ if (0 == size) {
+ // the mask is too big to allocated, draw nothing
+ return;
+ }
+
+ // allocate (and clear) our temp buffer to hold the transformed bitmap
+ AutoTMalloc<uint8_t> storage(size);
+ mask.fImage = storage.get();
+ memset(mask.fImage, 0, size);
+
+ // now draw our bitmap(src) into mask(dst), transformed by the matrix
+ {
+ SkBitmap device;
+ device.installPixels(SkImageInfo::MakeA8(mask.fBounds.width(), mask.fBounds.height()),
+ mask.fImage, mask.fRowBytes);
+
+ SkCanvas c(device);
+ // need the unclipped top/left for the translate
+ c.translate(-SkIntToScalar(mask.fBounds.fLeft),
+ -SkIntToScalar(mask.fBounds.fTop));
+ c.concat(ctm);
+
+ // We can't call drawBitmap, or we'll infinitely recurse. Instead
+ // we manually build a shader and draw that into our new mask
+ SkPaint tmpPaint;
+ tmpPaint.setAntiAlias(paint.isAntiAlias());
+ tmpPaint.setDither(paint.isDither());
+ SkPaint paintWithShader = make_paint_with_image(tmpPaint, bitmap, sampling);
+ SkRect rr;
+ rr.setIWH(bitmap.width(), bitmap.height());
+ c.drawRect(rr, paintWithShader);
+ }
+ this->drawDevMask(mask, paint);
+ }
+}
+#endif
+
diff --git a/gfx/skia/skia/src/core/SkDraw.h b/gfx/skia/skia/src/core/SkDraw.h
new file mode 100644
index 0000000000..bdb3b999de
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDraw.h
@@ -0,0 +1,79 @@
+
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkDraw_DEFINED
+#define SkDraw_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "src/base/SkZip.h"
+#include "src/core/SkDrawBase.h"
+#include <cstddef>
+
+class SkArenaAlloc;
+class SkBaseDevice;
+class SkBitmap;
+class SkBlender;
+class SkGlyph;
+class SkGlyphRunListPainterCPU;
+class SkMatrix;
+class SkPaint;
+class SkVertices;
+namespace sktext { class GlyphRunList; }
+struct SkPoint3;
+struct SkPoint;
+struct SkRSXform;
+struct SkRect;
+
+
+// defaults to use SkBlitter::Choose()
+class SkDraw : public SkDrawBase {
+public:
+ SkDraw();
+
+ /* If dstOrNull is null, computes a dst by mapping the bitmap's bounds through the matrix. */
+ void drawBitmap(const SkBitmap&, const SkMatrix&, const SkRect* dstOrNull,
+ const SkSamplingOptions&, const SkPaint&) const override;
+ void drawSprite(const SkBitmap&, int x, int y, const SkPaint&) const;
+ void drawGlyphRunList(SkCanvas* canvas,
+ SkGlyphRunListPainterCPU* glyphPainter,
+ const sktext::GlyphRunList& glyphRunList,
+ const SkPaint& paint) const;
+
+ void paintMasks(SkZip<const SkGlyph*, SkPoint> accepted, const SkPaint& paint) const override;
+
+ void drawPoints(SkCanvas::PointMode, size_t count, const SkPoint[],
+ const SkPaint&, SkBaseDevice*) const;
+ /* If skipColorXform, skips color conversion when assigning per-vertex colors */
+ void drawVertices(const SkVertices*,
+ sk_sp<SkBlender>,
+ const SkPaint&,
+ bool skipColorXform) const;
+ void drawAtlas(const SkRSXform[], const SkRect[], const SkColor[], int count,
+ sk_sp<SkBlender>, const SkPaint&);
+
+#if defined(SK_SUPPORT_LEGACY_ALPHA_BITMAP_AS_COVERAGE)
+ void drawDevMask(const SkMask& mask, const SkPaint&) const;
+ void drawBitmapAsMask(const SkBitmap&, const SkSamplingOptions&, const SkPaint&) const;
+#endif
+
+private:
+ void drawFixedVertices(const SkVertices* vertices,
+ sk_sp<SkBlender> blender,
+ const SkPaint& paint,
+ const SkMatrix& ctmInverse,
+ const SkPoint* dev2,
+ const SkPoint3* dev3,
+ SkArenaAlloc* outerAlloc,
+ bool skipColorXform) const;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDrawBase.cpp b/gfx/skia/skia/src/core/SkDrawBase.cpp
new file mode 100644
index 0000000000..2aace4361b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDrawBase.cpp
@@ -0,0 +1,776 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkPathTypes.h"
+#include "include/core/SkPathUtils.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkCPUTypes.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/base/SkTLazy.h"
+#include "src/base/SkZip.h"
+#include "src/core/SkAutoBlitterChoose.h"
+#include "src/core/SkBlendModePriv.h"
+#include "src/core/SkBlitter_A8.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkDrawBase.h"
+#include "src/core/SkDrawProcs.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkMatrixProvider.h"
+#include "src/core/SkPathEffectBase.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkRectPriv.h"
+#include "src/core/SkScan.h"
+#include <algorithm>
+#include <cstddef>
+#include <optional>
+
+class SkBitmap;
+class SkBlitter;
+class SkGlyph;
+class SkMaskFilter;
+
+using namespace skia_private;
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkDrawBase::SkDrawBase() {}
+
+bool SkDrawBase::computeConservativeLocalClipBounds(SkRect* localBounds) const {
+ if (fRC->isEmpty()) {
+ return false;
+ }
+
+ SkMatrix inverse;
+ if (!fMatrixProvider->localToDevice().invert(&inverse)) {
+ return false;
+ }
+
+ SkIRect devBounds = fRC->getBounds();
+ // outset to have slop for antialasing and hairlines
+ devBounds.outset(1, 1);
+ inverse.mapRect(localBounds, SkRect::Make(devBounds));
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkDrawBase::drawPaint(const SkPaint& paint) const {
+ SkDEBUGCODE(this->validate();)
+
+ if (fRC->isEmpty()) {
+ return;
+ }
+
+ SkIRect devRect;
+ devRect.setWH(fDst.width(), fDst.height());
+
+ SkAutoBlitterChoose blitter(*this, nullptr, paint);
+ SkScan::FillIRect(devRect, *fRC, blitter.get());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline SkPoint compute_stroke_size(const SkPaint& paint, const SkMatrix& matrix) {
+ SkASSERT(matrix.rectStaysRect());
+ SkASSERT(SkPaint::kFill_Style != paint.getStyle());
+
+ SkVector size;
+ SkPoint pt = { paint.getStrokeWidth(), paint.getStrokeWidth() };
+ matrix.mapVectors(&size, &pt, 1);
+ return SkPoint::Make(SkScalarAbs(size.fX), SkScalarAbs(size.fY));
+}
+
+static bool easy_rect_join(const SkRect& rect, const SkPaint& paint, const SkMatrix& matrix,
+ SkPoint* strokeSize) {
+ if (rect.isEmpty() || SkPaint::kMiter_Join != paint.getStrokeJoin() ||
+ paint.getStrokeMiter() < SK_ScalarSqrt2) {
+ return false;
+ }
+
+ *strokeSize = compute_stroke_size(paint, matrix);
+ return true;
+}
+
+SkDrawBase::RectType SkDrawBase::ComputeRectType(const SkRect& rect,
+ const SkPaint& paint,
+ const SkMatrix& matrix,
+ SkPoint* strokeSize) {
+ RectType rtype;
+ const SkScalar width = paint.getStrokeWidth();
+ const bool zeroWidth = (0 == width);
+ SkPaint::Style style = paint.getStyle();
+
+ if ((SkPaint::kStrokeAndFill_Style == style) && zeroWidth) {
+ style = SkPaint::kFill_Style;
+ }
+
+ if (paint.getPathEffect() || paint.getMaskFilter() ||
+ !matrix.rectStaysRect() || SkPaint::kStrokeAndFill_Style == style) {
+ rtype = kPath_RectType;
+ } else if (SkPaint::kFill_Style == style) {
+ rtype = kFill_RectType;
+ } else if (zeroWidth) {
+ rtype = kHair_RectType;
+ } else if (easy_rect_join(rect, paint, matrix, strokeSize)) {
+ rtype = kStroke_RectType;
+ } else {
+ rtype = kPath_RectType;
+ }
+ return rtype;
+}
+
+static const SkPoint* rect_points(const SkRect& r) {
+ return reinterpret_cast<const SkPoint*>(&r);
+}
+
+static SkPoint* rect_points(SkRect& r) {
+ return reinterpret_cast<SkPoint*>(&r);
+}
+
+static void draw_rect_as_path(const SkDrawBase& orig, const SkRect& prePaintRect,
+ const SkPaint& paint, const SkMatrixProvider* matrixProvider) {
+ SkDrawBase draw(orig);
+ draw.fMatrixProvider = matrixProvider;
+ SkPath tmp;
+ tmp.addRect(prePaintRect);
+ tmp.setFillType(SkPathFillType::kWinding);
+ draw.drawPath(tmp, paint, nullptr, true);
+}
+
+void SkDrawBase::drawRect(const SkRect& prePaintRect, const SkPaint& paint,
+ const SkMatrix* paintMatrix, const SkRect* postPaintRect) const {
+ SkDEBUGCODE(this->validate();)
+
+ // nothing to draw
+ if (fRC->isEmpty()) {
+ return;
+ }
+
+ const SkMatrixProvider* matrixProvider = fMatrixProvider;
+ SkTLazy<SkPreConcatMatrixProvider> preConcatMatrixProvider;
+ if (paintMatrix) {
+ SkASSERT(postPaintRect);
+ matrixProvider = preConcatMatrixProvider.init(*matrixProvider, *paintMatrix);
+ } else {
+ SkASSERT(!postPaintRect);
+ }
+
+ SkMatrix ctm = fMatrixProvider->localToDevice();
+ SkPoint strokeSize;
+ RectType rtype = ComputeRectType(prePaintRect, paint, ctm, &strokeSize);
+
+ if (kPath_RectType == rtype) {
+ draw_rect_as_path(*this, prePaintRect, paint, matrixProvider);
+ return;
+ }
+
+ SkRect devRect;
+ const SkRect& paintRect = paintMatrix ? *postPaintRect : prePaintRect;
+ // skip the paintMatrix when transforming the rect by the CTM
+ ctm.mapPoints(rect_points(devRect), rect_points(paintRect), 2);
+ devRect.sort();
+
+ // look for the quick exit, before we build a blitter
+ SkRect bbox = devRect;
+ if (paint.getStyle() != SkPaint::kFill_Style) {
+ // extra space for hairlines
+ if (paint.getStrokeWidth() == 0) {
+ bbox.outset(1, 1);
+ } else {
+ // For kStroke_RectType, strokeSize is already computed.
+ const SkPoint& ssize = (kStroke_RectType == rtype)
+ ? strokeSize
+ : compute_stroke_size(paint, ctm);
+ bbox.outset(SkScalarHalf(ssize.x()), SkScalarHalf(ssize.y()));
+ }
+ }
+ if (SkPathPriv::TooBigForMath(bbox)) {
+ return;
+ }
+
+ if (!SkRectPriv::FitsInFixed(bbox) && rtype != kHair_RectType) {
+ draw_rect_as_path(*this, prePaintRect, paint, matrixProvider);
+ return;
+ }
+
+ SkIRect ir = bbox.roundOut();
+ if (fRC->quickReject(ir)) {
+ return;
+ }
+
+ SkAutoBlitterChoose blitterStorage(*this, matrixProvider, paint);
+ const SkRasterClip& clip = *fRC;
+ SkBlitter* blitter = blitterStorage.get();
+
+ // we want to "fill" if we are kFill or kStrokeAndFill, since in the latter
+ // case we are also hairline (if we've gotten to here), which devolves to
+ // effectively just kFill
+ switch (rtype) {
+ case kFill_RectType:
+ if (paint.isAntiAlias()) {
+ SkScan::AntiFillRect(devRect, clip, blitter);
+ } else {
+ SkScan::FillRect(devRect, clip, blitter);
+ }
+ break;
+ case kStroke_RectType:
+ if (paint.isAntiAlias()) {
+ SkScan::AntiFrameRect(devRect, strokeSize, clip, blitter);
+ } else {
+ SkScan::FrameRect(devRect, strokeSize, clip, blitter);
+ }
+ break;
+ case kHair_RectType:
+ if (paint.isAntiAlias()) {
+ SkScan::AntiHairRect(devRect, clip, blitter);
+ } else {
+ SkScan::HairRect(devRect, clip, blitter);
+ }
+ break;
+ default:
+ SkDEBUGFAIL("bad rtype");
+ }
+}
+
+static SkScalar fast_len(const SkVector& vec) {
+ SkScalar x = SkScalarAbs(vec.fX);
+ SkScalar y = SkScalarAbs(vec.fY);
+ if (x < y) {
+ using std::swap;
+ swap(x, y);
+ }
+ return x + SkScalarHalf(y);
+}
+
+bool SkDrawTreatAAStrokeAsHairline(SkScalar strokeWidth, const SkMatrix& matrix,
+ SkScalar* coverage) {
+ SkASSERT(strokeWidth > 0);
+ // We need to try to fake a thick-stroke with a modulated hairline.
+
+ if (matrix.hasPerspective()) {
+ return false;
+ }
+
+ SkVector src[2], dst[2];
+ src[0].set(strokeWidth, 0);
+ src[1].set(0, strokeWidth);
+ matrix.mapVectors(dst, src, 2);
+ SkScalar len0 = fast_len(dst[0]);
+ SkScalar len1 = fast_len(dst[1]);
+ if (len0 <= SK_Scalar1 && len1 <= SK_Scalar1) {
+ if (coverage) {
+ *coverage = SkScalarAve(len0, len1);
+ }
+ return true;
+ }
+ return false;
+}
+
+void SkDrawBase::drawRRect(const SkRRect& rrect, const SkPaint& paint) const {
+ SkDEBUGCODE(this->validate());
+
+ if (fRC->isEmpty()) {
+ return;
+ }
+
+ SkMatrix ctm = fMatrixProvider->localToDevice();
+ {
+ // TODO: Investigate optimizing these options. They are in the same
+ // order as SkDrawBase::drawPath, which handles each case. It may be
+ // that there is no way to optimize for these using the SkRRect path.
+ SkScalar coverage;
+ if (SkDrawTreatAsHairline(paint, ctm, &coverage)) {
+ goto DRAW_PATH;
+ }
+
+ if (paint.getPathEffect() || paint.getStyle() != SkPaint::kFill_Style) {
+ goto DRAW_PATH;
+ }
+ }
+
+ if (paint.getMaskFilter()) {
+ // Transform the rrect into device space.
+ SkRRect devRRect;
+ if (rrect.transform(ctm, &devRRect)) {
+ SkAutoBlitterChoose blitter(*this, nullptr, paint);
+ if (as_MFB(paint.getMaskFilter())->filterRRect(devRRect, ctm, *fRC, blitter.get())) {
+ return; // filterRRect() called the blitter, so we're done
+ }
+ }
+ }
+
+DRAW_PATH:
+ // Now fall back to the default case of using a path.
+ SkPath path;
+ path.addRRect(rrect);
+ this->drawPath(path, paint, nullptr, true);
+}
+
+void SkDrawBase::drawDevPath(const SkPath& devPath, const SkPaint& paint, bool drawCoverage,
+ SkBlitter* customBlitter, bool doFill) const {
+ if (SkPathPriv::TooBigForMath(devPath)) {
+ return;
+ }
+ SkBlitter* blitter = nullptr;
+ SkAutoBlitterChoose blitterStorage;
+ if (nullptr == customBlitter) {
+ blitter = blitterStorage.choose(*this, nullptr, paint, drawCoverage);
+ } else {
+ blitter = customBlitter;
+ }
+
+ if (paint.getMaskFilter()) {
+ SkStrokeRec::InitStyle style = doFill ? SkStrokeRec::kFill_InitStyle
+ : SkStrokeRec::kHairline_InitStyle;
+ if (as_MFB(paint.getMaskFilter())
+ ->filterPath(devPath, fMatrixProvider->localToDevice(), *fRC, blitter, style)) {
+ return; // filterPath() called the blitter, so we're done
+ }
+ }
+
+ void (*proc)(const SkPath&, const SkRasterClip&, SkBlitter*);
+ if (doFill) {
+ if (paint.isAntiAlias()) {
+ proc = SkScan::AntiFillPath;
+ } else {
+ proc = SkScan::FillPath;
+ }
+ } else { // hairline
+ if (paint.isAntiAlias()) {
+ switch (paint.getStrokeCap()) {
+ case SkPaint::kButt_Cap:
+ proc = SkScan::AntiHairPath;
+ break;
+ case SkPaint::kSquare_Cap:
+ proc = SkScan::AntiHairSquarePath;
+ break;
+ case SkPaint::kRound_Cap:
+ proc = SkScan::AntiHairRoundPath;
+ break;
+ }
+ } else {
+ switch (paint.getStrokeCap()) {
+ case SkPaint::kButt_Cap:
+ proc = SkScan::HairPath;
+ break;
+ case SkPaint::kSquare_Cap:
+ proc = SkScan::HairSquarePath;
+ break;
+ case SkPaint::kRound_Cap:
+ proc = SkScan::HairRoundPath;
+ break;
+ }
+ }
+ }
+
+ proc(devPath, *fRC, blitter);
+}
+
+void SkDrawBase::drawPath(const SkPath& origSrcPath, const SkPaint& origPaint,
+ const SkMatrix* prePathMatrix, bool pathIsMutable,
+ bool drawCoverage, SkBlitter* customBlitter) const {
+ SkDEBUGCODE(this->validate();)
+
+ // nothing to draw
+ if (fRC->isEmpty()) {
+ return;
+ }
+
+ SkPath* pathPtr = (SkPath*)&origSrcPath;
+ bool doFill = true;
+ SkPath tmpPathStorage;
+ SkPath* tmpPath = &tmpPathStorage;
+ const SkMatrixProvider* matrixProvider = fMatrixProvider;
+ SkTLazy<SkPreConcatMatrixProvider> preConcatMatrixProvider;
+ tmpPath->setIsVolatile(true);
+
+ if (prePathMatrix) {
+ if (origPaint.getPathEffect() || origPaint.getStyle() != SkPaint::kFill_Style) {
+ SkPath* result = pathPtr;
+
+ if (!pathIsMutable) {
+ result = tmpPath;
+ pathIsMutable = true;
+ }
+ pathPtr->transform(*prePathMatrix, result);
+ pathPtr = result;
+ } else {
+ matrixProvider = preConcatMatrixProvider.init(*matrixProvider, *prePathMatrix);
+ }
+ }
+
+ SkTCopyOnFirstWrite<SkPaint> paint(origPaint);
+
+ {
+ SkScalar coverage;
+ if (SkDrawTreatAsHairline(origPaint, matrixProvider->localToDevice(), &coverage)) {
+ const auto bm = origPaint.asBlendMode();
+ if (SK_Scalar1 == coverage) {
+ paint.writable()->setStrokeWidth(0);
+ } else if (bm && SkBlendMode_SupportsCoverageAsAlpha(bm.value())) {
+ U8CPU newAlpha;
+#if 0
+ newAlpha = SkToU8(SkScalarRoundToInt(coverage *
+ origPaint.getAlpha()));
+#else
+ // this is the old technique, which we preserve for now so
+ // we don't change previous results (testing)
+ // the new way seems fine, its just (a tiny bit) different
+ int scale = (int)(coverage * 256);
+ newAlpha = origPaint.getAlpha() * scale >> 8;
+#endif
+ SkPaint* writablePaint = paint.writable();
+ writablePaint->setStrokeWidth(0);
+ writablePaint->setAlpha(newAlpha);
+ }
+ }
+ }
+
+ if (paint->getPathEffect() || paint->getStyle() != SkPaint::kFill_Style) {
+ SkRect cullRect;
+ const SkRect* cullRectPtr = nullptr;
+ if (this->computeConservativeLocalClipBounds(&cullRect)) {
+ cullRectPtr = &cullRect;
+ }
+ doFill = skpathutils::FillPathWithPaint(*pathPtr, *paint, tmpPath, cullRectPtr,
+ fMatrixProvider->localToDevice());
+ pathPtr = tmpPath;
+ }
+
+ // avoid possibly allocating a new path in transform if we can
+ SkPath* devPathPtr = pathIsMutable ? pathPtr : tmpPath;
+
+ // transform the path into device space
+ pathPtr->transform(matrixProvider->localToDevice(), devPathPtr);
+
+#if defined(SK_BUILD_FOR_FUZZER)
+ if (devPathPtr->countPoints() > 1000) {
+ return;
+ }
+#endif
+
+ this->drawDevPath(*devPathPtr, *paint, drawCoverage, customBlitter, doFill);
+}
+
+void SkDrawBase::paintMasks(SkZip<const SkGlyph*, SkPoint>, const SkPaint&) const {
+ SkASSERT(false);
+}
+void SkDrawBase::drawBitmap(const SkBitmap&, const SkMatrix&, const SkRect*,
+ const SkSamplingOptions&, const SkPaint&) const {
+ SkASSERT(false);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+
+void SkDrawBase::validate() const {
+ SkASSERT(fMatrixProvider != nullptr);
+ SkASSERT(fRC != nullptr);
+
+ const SkIRect& cr = fRC->getBounds();
+ SkIRect br;
+
+ br.setWH(fDst.width(), fDst.height());
+ SkASSERT(cr.isEmpty() || br.contains(cr));
+}
+
+#endif
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkDrawBase::ComputeMaskBounds(const SkRect& devPathBounds, const SkIRect& clipBounds,
+ const SkMaskFilter* filter, const SkMatrix* filterMatrix,
+ SkIRect* bounds) {
+ // init our bounds from the path
+ *bounds = devPathBounds.makeOutset(SK_ScalarHalf, SK_ScalarHalf).roundOut();
+
+ SkIPoint margin = SkIPoint::Make(0, 0);
+ if (filter) {
+ SkASSERT(filterMatrix);
+
+ SkMask srcM, dstM;
+
+ srcM.fBounds = *bounds;
+ srcM.fFormat = SkMask::kA8_Format;
+ if (!as_MFB(filter)->filterMask(&dstM, srcM, *filterMatrix, &margin)) {
+ return false;
+ }
+ }
+
+ // trim the bounds to reflect the clip (plus whatever slop the filter needs)
+ // Ugh. Guard against gigantic margins from wacky filters. Without this
+ // check we can request arbitrary amounts of slop beyond our visible
+ // clip, and bring down the renderer (at least on finite RAM machines
+ // like handsets, etc.). Need to balance this invented value between
+ // quality of large filters like blurs, and the corresponding memory
+ // requests.
+ static constexpr int kMaxMargin = 128;
+ if (!bounds->intersect(clipBounds.makeOutset(std::min(margin.fX, kMaxMargin),
+ std::min(margin.fY, kMaxMargin)))) {
+ return false;
+ }
+
+ return true;
+}
+
+static void draw_into_mask(const SkMask& mask, const SkPath& devPath,
+ SkStrokeRec::InitStyle style) {
+ SkDrawBase draw;
+ draw.fBlitterChooser = SkA8Blitter_Choose;
+ if (!draw.fDst.reset(mask)) {
+ return;
+ }
+
+ SkRasterClip clip;
+ SkMatrix matrix;
+ SkPaint paint;
+
+ clip.setRect(SkIRect::MakeWH(mask.fBounds.width(), mask.fBounds.height()));
+ matrix.setTranslate(-SkIntToScalar(mask.fBounds.fLeft),
+ -SkIntToScalar(mask.fBounds.fTop));
+
+ SkMatrixProvider matrixProvider(matrix);
+ draw.fRC = &clip;
+ draw.fMatrixProvider = &matrixProvider;
+ paint.setAntiAlias(true);
+ switch (style) {
+ case SkStrokeRec::kHairline_InitStyle:
+ SkASSERT(!paint.getStrokeWidth());
+ paint.setStyle(SkPaint::kStroke_Style);
+ break;
+ case SkStrokeRec::kFill_InitStyle:
+ SkASSERT(paint.getStyle() == SkPaint::kFill_Style);
+ break;
+
+ }
+ draw.drawPath(devPath, paint);
+}
+
+bool SkDrawBase::DrawToMask(const SkPath& devPath, const SkIRect& clipBounds,
+ const SkMaskFilter* filter, const SkMatrix* filterMatrix,
+ SkMask* mask, SkMask::CreateMode mode,
+ SkStrokeRec::InitStyle style) {
+ if (devPath.isEmpty()) {
+ return false;
+ }
+
+ if (SkMask::kJustRenderImage_CreateMode != mode) {
+ // By using infinite bounds for inverse fills, ComputeMaskBounds is able to clip it to
+ // 'clipBounds' outset by whatever extra margin the mask filter requires.
+ static const SkRect kInverseBounds = { SK_ScalarNegativeInfinity, SK_ScalarNegativeInfinity,
+ SK_ScalarInfinity, SK_ScalarInfinity};
+ SkRect pathBounds = devPath.isInverseFillType() ? kInverseBounds
+ : devPath.getBounds();
+ if (!ComputeMaskBounds(pathBounds, clipBounds, filter,
+ filterMatrix, &mask->fBounds))
+ return false;
+ }
+
+ if (SkMask::kComputeBoundsAndRenderImage_CreateMode == mode) {
+ mask->fFormat = SkMask::kA8_Format;
+ mask->fRowBytes = mask->fBounds.width();
+ size_t size = mask->computeImageSize();
+ if (0 == size) {
+ // we're too big to allocate the mask, abort
+ return false;
+ }
+ mask->fImage = SkMask::AllocImage(size, SkMask::kZeroInit_Alloc);
+ }
+
+ if (SkMask::kJustComputeBounds_CreateMode != mode) {
+ draw_into_mask(*mask, devPath, style);
+ }
+
+ return true;
+}
+
+void SkDrawBase::drawDevicePoints(SkCanvas::PointMode mode, size_t count,
+ const SkPoint pts[], const SkPaint& paint,
+ SkBaseDevice* device) const {
+ // if we're in lines mode, force count to be even
+ if (SkCanvas::kLines_PointMode == mode) {
+ count &= ~(size_t)1;
+ }
+
+ SkASSERT(pts != nullptr);
+ SkDEBUGCODE(this->validate();)
+
+ // nothing to draw
+ if (!count || fRC->isEmpty()) {
+ return;
+ }
+
+ // needed?
+ if (!SkScalarsAreFinite(&pts[0].fX, count * 2)) {
+ return;
+ }
+
+ SkMatrix ctm = fMatrixProvider->localToDevice();
+ switch (mode) {
+ case SkCanvas::kPoints_PointMode: {
+ // temporarily mark the paint as filling.
+ SkPaint newPaint(paint);
+ newPaint.setStyle(SkPaint::kFill_Style);
+
+ SkScalar width = newPaint.getStrokeWidth();
+ SkScalar radius = SkScalarHalf(width);
+
+ if (newPaint.getStrokeCap() == SkPaint::kRound_Cap) {
+ if (device) {
+ for (size_t i = 0; i < count; ++i) {
+ SkRect r = SkRect::MakeLTRB(pts[i].fX - radius, pts[i].fY - radius,
+ pts[i].fX + radius, pts[i].fY + radius);
+ device->drawOval(r, newPaint);
+ }
+ } else {
+ SkPath path;
+ SkMatrix preMatrix;
+
+ path.addCircle(0, 0, radius);
+ for (size_t i = 0; i < count; i++) {
+ preMatrix.setTranslate(pts[i].fX, pts[i].fY);
+ // pass true for the last point, since we can modify
+ // then path then
+ path.setIsVolatile((count-1) == i);
+ this->drawPath(path, newPaint, &preMatrix, (count-1) == i);
+ }
+ }
+ } else {
+ SkRect r;
+
+ for (size_t i = 0; i < count; i++) {
+ r.fLeft = pts[i].fX - radius;
+ r.fTop = pts[i].fY - radius;
+ r.fRight = r.fLeft + width;
+ r.fBottom = r.fTop + width;
+ if (device) {
+ device->drawRect(r, newPaint);
+ } else {
+ this->drawRect(r, newPaint);
+ }
+ }
+ }
+ break;
+ }
+ case SkCanvas::kLines_PointMode:
+ if (2 == count && paint.getPathEffect()) {
+ // most likely a dashed line - see if it is one of the ones
+ // we can accelerate
+ SkStrokeRec stroke(paint);
+ SkPathEffectBase::PointData pointData;
+
+ SkPath path = SkPath::Line(pts[0], pts[1]);
+
+ SkRect cullRect = SkRect::Make(fRC->getBounds());
+
+ if (as_PEB(paint.getPathEffect())->asPoints(&pointData, path, stroke, ctm,
+ &cullRect)) {
+ // 'asPoints' managed to find some fast path
+
+ SkPaint newP(paint);
+ newP.setPathEffect(nullptr);
+ newP.setStyle(SkPaint::kFill_Style);
+
+ if (!pointData.fFirst.isEmpty()) {
+ if (device) {
+ device->drawPath(pointData.fFirst, newP);
+ } else {
+ this->drawPath(pointData.fFirst, newP);
+ }
+ }
+
+ if (!pointData.fLast.isEmpty()) {
+ if (device) {
+ device->drawPath(pointData.fLast, newP);
+ } else {
+ this->drawPath(pointData.fLast, newP);
+ }
+ }
+
+ if (pointData.fSize.fX == pointData.fSize.fY) {
+ // The rest of the dashed line can just be drawn as points
+ SkASSERT(pointData.fSize.fX == SkScalarHalf(newP.getStrokeWidth()));
+
+ if (SkPathEffectBase::PointData::kCircles_PointFlag & pointData.fFlags) {
+ newP.setStrokeCap(SkPaint::kRound_Cap);
+ } else {
+ newP.setStrokeCap(SkPaint::kButt_Cap);
+ }
+
+ if (device) {
+ device->drawPoints(SkCanvas::kPoints_PointMode,
+ pointData.fNumPoints,
+ pointData.fPoints,
+ newP);
+ } else {
+ this->drawDevicePoints(SkCanvas::kPoints_PointMode,
+ pointData.fNumPoints,
+ pointData.fPoints,
+ newP,
+ device);
+ }
+ break;
+ } else {
+ // The rest of the dashed line must be drawn as rects
+ SkASSERT(!(SkPathEffectBase::PointData::kCircles_PointFlag &
+ pointData.fFlags));
+
+ SkRect r;
+
+ for (int i = 0; i < pointData.fNumPoints; ++i) {
+ r.setLTRB(pointData.fPoints[i].fX - pointData.fSize.fX,
+ pointData.fPoints[i].fY - pointData.fSize.fY,
+ pointData.fPoints[i].fX + pointData.fSize.fX,
+ pointData.fPoints[i].fY + pointData.fSize.fY);
+ if (device) {
+ device->drawRect(r, newP);
+ } else {
+ this->drawRect(r, newP);
+ }
+ }
+ }
+
+ break;
+ }
+ }
+ [[fallthrough]]; // couldn't take fast path
+ case SkCanvas::kPolygon_PointMode: {
+ count -= 1;
+ SkPath path;
+ SkPaint p(paint);
+ p.setStyle(SkPaint::kStroke_Style);
+ size_t inc = (SkCanvas::kLines_PointMode == mode) ? 2 : 1;
+ path.setIsVolatile(true);
+ for (size_t i = 0; i < count; i += inc) {
+ path.moveTo(pts[i]);
+ path.lineTo(pts[i+1]);
+ if (device) {
+ device->drawPath(path, p, true);
+ } else {
+ this->drawPath(path, p, nullptr, true);
+ }
+ path.rewind();
+ }
+ break;
+ }
+ }
+}
+
diff --git a/gfx/skia/skia/src/core/SkDrawBase.h b/gfx/skia/skia/src/core/SkDrawBase.h
new file mode 100644
index 0000000000..6afa13738b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDrawBase.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDrawBase_DEFINED
+#define SkDrawBase_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/private/base/SkAttributes.h"
+#include "src/base/SkZip.h"
+#include "src/core/SkGlyphRunPainter.h"
+#include "src/core/SkMask.h"
+#include <cstddef>
+
+class SkArenaAlloc;
+class SkBaseDevice;
+class SkBitmap;
+class SkBlitter;
+class SkGlyph;
+class SkMaskFilter;
+class SkMatrix;
+class SkMatrixProvider;
+class SkPath;
+class SkRRect;
+class SkRasterClip;
+class SkShader;
+class SkSurfaceProps;
+struct SkIRect;
+struct SkPoint;
+struct SkRect;
+
+class SkDrawBase : public SkGlyphRunListPainterCPU::BitmapDevicePainter {
+public:
+ SkDrawBase();
+
+ void drawPaint(const SkPaint&) const;
+ void drawRect(const SkRect& prePaintRect, const SkPaint&, const SkMatrix* paintMatrix,
+ const SkRect* postPaintRect) const;
+ void drawRect(const SkRect& rect, const SkPaint& paint) const {
+ this->drawRect(rect, paint, nullptr, nullptr);
+ }
+ void drawRRect(const SkRRect&, const SkPaint&) const;
+ /**
+ * To save on mallocs, we allow a flag that tells us that srcPath is
+ * mutable, so that we don't have to make copies of it as we transform it.
+ *
+ * If prePathMatrix is not null, it should logically be applied before any
+ * stroking or other effects. If there are no effects on the paint that
+ * affect the geometry/rasterization, then the pre matrix can just be
+ * pre-concated with the current matrix.
+ */
+ void drawPath(const SkPath& path, const SkPaint& paint,
+ const SkMatrix* prePathMatrix = nullptr, bool pathIsMutable = false) const {
+ this->drawPath(path, paint, prePathMatrix, pathIsMutable, false);
+ }
+
+ /**
+ * Overwrite the target with the path's coverage (i.e. its mask).
+ * Will overwrite the entire device, so it need not be zero'd first.
+ *
+ * Only device A8 is supported right now.
+ */
+ void drawPathCoverage(const SkPath& src, const SkPaint& paint,
+ SkBlitter* customBlitter = nullptr) const {
+ bool isHairline = paint.getStyle() == SkPaint::kStroke_Style &&
+ paint.getStrokeWidth() > 0;
+ this->drawPath(src, paint, nullptr, false, !isHairline, customBlitter);
+ }
+
+ void drawDevicePoints(SkCanvas::PointMode, size_t count, const SkPoint[], const SkPaint&,
+ SkBaseDevice*) const;
+
+ static bool ComputeMaskBounds(const SkRect& devPathBounds, const SkIRect& clipBounds,
+ const SkMaskFilter* filter, const SkMatrix* filterMatrix,
+ SkIRect* bounds);
+
+ /** Helper function that creates a mask from a path and an optional maskfilter.
+ Note however, that the resulting mask will not have been actually filtered,
+ that must be done afterwards (by calling filterMask). The maskfilter is provided
+ solely to assist in computing the mask's bounds (if the mode requests that).
+ */
+ static bool DrawToMask(const SkPath& devPath, const SkIRect& clipBounds,
+ const SkMaskFilter*, const SkMatrix* filterMatrix,
+ SkMask* mask, SkMask::CreateMode mode,
+ SkStrokeRec::InitStyle style);
+
+ enum RectType {
+ kHair_RectType,
+ kFill_RectType,
+ kStroke_RectType,
+ kPath_RectType
+ };
+
+ /**
+ * Based on the paint's style, strokeWidth, and the matrix, classify how
+ * to draw the rect. If no special-case is available, returns
+ * kPath_RectType.
+ *
+ * Iff RectType == kStroke_RectType, then strokeSize is set to the device
+ * width and height of the stroke.
+ */
+ static RectType ComputeRectType(const SkRect&, const SkPaint&, const SkMatrix&,
+ SkPoint* strokeSize);
+
+ using BlitterChooser = SkBlitter* (const SkPixmap& dst,
+ const SkMatrix& ctm,
+ const SkPaint&,
+ SkArenaAlloc*,
+ bool drawCoverage,
+ sk_sp<SkShader> clipShader,
+ const SkSurfaceProps&);
+
+
+private:
+ // not supported
+ void paintMasks(SkZip<const SkGlyph*, SkPoint> accepted, const SkPaint& paint) const override;
+ void drawBitmap(const SkBitmap&, const SkMatrix&, const SkRect* dstOrNull,
+ const SkSamplingOptions&, const SkPaint&) const override;
+
+ void drawPath(const SkPath&,
+ const SkPaint&,
+ const SkMatrix* preMatrix,
+ bool pathIsMutable,
+ bool drawCoverage,
+ SkBlitter* customBlitter = nullptr) const;
+
+ void drawLine(const SkPoint[2], const SkPaint&) const;
+
+ void drawDevPath(const SkPath& devPath,
+ const SkPaint& paint,
+ bool drawCoverage,
+ SkBlitter* customBlitter,
+ bool doFill) const;
+ /**
+ * Return the current clip bounds, in local coordinates, with slop to account
+ * for antialiasing or hairlines (i.e. device-bounds outset by 1, and then
+ * run through the inverse of the matrix).
+ *
+ * If the matrix cannot be inverted, or the current clip is empty, return
+ * false and ignore bounds parameter.
+ */
+ bool SK_WARN_UNUSED_RESULT computeConservativeLocalClipBounds(SkRect* bounds) const;
+
+public:
+ SkPixmap fDst;
+ BlitterChooser* fBlitterChooser{nullptr}; // required
+ const SkMatrixProvider* fMatrixProvider{nullptr}; // required
+ const SkRasterClip* fRC{nullptr}; // required
+ const SkSurfaceProps* fProps{nullptr}; // optional
+
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+};
+
+#endif // SkDrawBase_DEFINED
diff --git a/gfx/skia/skia/src/core/SkDrawLooper.cpp b/gfx/skia/skia/src/core/SkDrawLooper.cpp
new file mode 100644
index 0000000000..cf5cf343bf
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDrawLooper.cpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkRect.h"
+#include "src/base/SkArenaAlloc.h"
+
+#ifdef SK_SUPPORT_LEGACY_DRAWLOOPER
+
+#include "include/core/SkDrawLooper.h"
+
+void SkDrawLooper::Context::Info::applyToCTM(SkMatrix* ctm) const {
+ if (fApplyPostCTM) {
+ ctm->postTranslate(fTranslate.fX, fTranslate.fY);
+ } else {
+ ctm->preTranslate(fTranslate.fX, fTranslate.fY);
+ }
+}
+
+void SkDrawLooper::Context::Info::applyToCanvas(SkCanvas* canvas) const {
+ if (fApplyPostCTM) {
+ canvas->setMatrix(canvas->getLocalToDevice().postTranslate(fTranslate.fX, fTranslate.fY));
+ } else {
+ canvas->translate(fTranslate.fX, fTranslate.fY);
+ }
+}
+
+bool SkDrawLooper::canComputeFastBounds(const SkPaint& paint) const {
+ SkSTArenaAlloc<48> alloc;
+
+ SkDrawLooper::Context* context = this->makeContext(&alloc);
+ for (;;) {
+ SkPaint p(paint);
+ SkDrawLooper::Context::Info info;
+ if (context->next(&info, &p)) {
+ if (!p.canComputeFastBounds()) {
+ return false;
+ }
+ } else {
+ break;
+ }
+ }
+ return true;
+}
+
+void SkDrawLooper::computeFastBounds(const SkPaint& paint, const SkRect& s,
+ SkRect* dst) const {
+ // src and dst rects may alias and we need to keep the original src, so copy it.
+ const SkRect src = s;
+
+ SkSTArenaAlloc<48> alloc;
+
+ *dst = src; // catch case where there are no loops
+ SkDrawLooper::Context* context = this->makeContext(&alloc);
+
+ for (bool firstTime = true;; firstTime = false) {
+ SkPaint p(paint);
+ SkDrawLooper::Context::Info info;
+ if (context->next(&info, &p)) {
+ SkRect r(src);
+
+ p.computeFastBounds(r, &r);
+ r.offset(info.fTranslate.fX, info.fTranslate.fY);
+
+ if (firstTime) {
+ *dst = r;
+ } else {
+ dst->join(r);
+ }
+ } else {
+ break;
+ }
+ }
+}
+
+bool SkDrawLooper::asABlurShadow(BlurShadowRec*) const {
+ return false;
+}
+
+void SkDrawLooper::apply(SkCanvas* canvas, const SkPaint& paint,
+ std::function<void(SkCanvas*, const SkPaint&)> proc) {
+ SkSTArenaAlloc<256> alloc;
+ Context* ctx = this->makeContext(&alloc);
+ if (ctx) {
+ Context::Info info;
+ for (;;) {
+ SkPaint p = paint;
+ if (!ctx->next(&info, &p)) {
+ break;
+ }
+ canvas->save();
+ if (info.fApplyPostCTM) {
+ canvas->setMatrix(canvas->getLocalToDevice().postTranslate(info.fTranslate.fX,
+ info.fTranslate.fY));
+ } else {
+ canvas->translate(info.fTranslate.fX, info.fTranslate.fY);
+ }
+ proc(canvas, p);
+ canvas->restore();
+ }
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDrawProcs.h b/gfx/skia/skia/src/core/SkDrawProcs.h
new file mode 100644
index 0000000000..fc59f52ec8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDrawProcs.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDrawProcs_DEFINED
+#define SkDrawProcs_DEFINED
+
+#include "include/core/SkPaint.h"
+#include "include/core/SkScalar.h"
+class SkMatrix;
+
+bool SkDrawTreatAAStrokeAsHairline(SkScalar strokeWidth, const SkMatrix&,
+ SkScalar* coverage);
+
+/**
+ * If the current paint is set to stroke and the stroke-width when applied to
+ * the matrix is <= 1.0, then this returns true, and sets coverage (simulating
+ * a stroke by drawing a hairline with partial coverage). If any of these
+ * conditions are false, then this returns false and coverage is ignored.
+ */
+inline bool SkDrawTreatAsHairline(const SkPaint& paint, const SkMatrix& matrix,
+ SkScalar* coverage) {
+ if (SkPaint::kStroke_Style != paint.getStyle()) {
+ return false;
+ }
+
+ SkScalar strokeWidth = paint.getStrokeWidth();
+ if (0 == strokeWidth) {
+ *coverage = SK_Scalar1;
+ return true;
+ }
+
+ if (!paint.isAntiAlias()) {
+ return false;
+ }
+
+ return SkDrawTreatAAStrokeAsHairline(strokeWidth, matrix, coverage);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDrawShadowInfo.cpp b/gfx/skia/skia/src/core/SkDrawShadowInfo.cpp
new file mode 100644
index 0000000000..3a84f6c294
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDrawShadowInfo.cpp
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkDrawShadowInfo.h"
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkRect.h"
+#include "include/private/SkShadowFlags.h"
+#include "include/private/base/SkTo.h"
+
+namespace SkDrawShadowMetrics {
+
+static SkScalar compute_z(SkScalar x, SkScalar y, const SkPoint3& params) {
+ return x*params.fX + y*params.fY + params.fZ;
+}
+
+bool GetSpotShadowTransform(const SkPoint3& lightPos, SkScalar lightRadius,
+ const SkMatrix& ctm, const SkPoint3& zPlaneParams,
+ const SkRect& pathBounds, bool directional,
+ SkMatrix* shadowTransform, SkScalar* radius) {
+ auto heightFunc = [zPlaneParams] (SkScalar x, SkScalar y) {
+ return zPlaneParams.fX*x + zPlaneParams.fY*y + zPlaneParams.fZ;
+ };
+ SkScalar occluderHeight = heightFunc(pathBounds.centerX(), pathBounds.centerY());
+
+ // TODO: have directional lights support tilt via the zPlaneParams
+ if (!ctm.hasPerspective() || directional) {
+ SkScalar scale;
+ SkVector translate;
+ if (directional) {
+ SkDrawShadowMetrics::GetDirectionalParams(occluderHeight, lightPos.fX, lightPos.fY,
+ lightPos.fZ, lightRadius, radius,
+ &scale, &translate);
+ } else {
+ SkDrawShadowMetrics::GetSpotParams(occluderHeight, lightPos.fX, lightPos.fY,
+ lightPos.fZ, lightRadius, radius,
+ &scale, &translate);
+ }
+ shadowTransform->setScaleTranslate(scale, scale, translate.fX, translate.fY);
+ shadowTransform->preConcat(ctm);
+ } else {
+ if (SkScalarNearlyZero(pathBounds.width()) || SkScalarNearlyZero(pathBounds.height())) {
+ return false;
+ }
+
+ // get rotated quad in 3D
+ SkPoint pts[4];
+ ctm.mapRectToQuad(pts, pathBounds);
+
+ SkPoint3 pts3D[4];
+ SkScalar z = heightFunc(pathBounds.fLeft, pathBounds.fTop);
+ pts3D[0].set(pts[0].fX, pts[0].fY, z);
+ z = heightFunc(pathBounds.fRight, pathBounds.fTop);
+ pts3D[1].set(pts[1].fX, pts[1].fY, z);
+ z = heightFunc(pathBounds.fRight, pathBounds.fBottom);
+ pts3D[2].set(pts[2].fX, pts[2].fY, z);
+ z = heightFunc(pathBounds.fLeft, pathBounds.fBottom);
+ pts3D[3].set(pts[3].fX, pts[3].fY, z);
+
+ // project from light through corners to z=0 plane
+ for (int i = 0; i < 4; ++i) {
+ SkScalar dz = lightPos.fZ - pts3D[i].fZ;
+ // light shouldn't be below or at a corner's z-location
+ if (dz <= SK_ScalarNearlyZero) {
+ return false;
+ }
+ SkScalar zRatio = pts3D[i].fZ / dz;
+ pts3D[i].fX -= (lightPos.fX - pts3D[i].fX)*zRatio;
+ pts3D[i].fY -= (lightPos.fY - pts3D[i].fY)*zRatio;
+ pts3D[i].fZ = SK_Scalar1;
+ }
+
+ // Generate matrix that projects from [-1,1]x[-1,1] square to projected quad
+ SkPoint3 h0, h1, h2;
+ // Compute homogenous crossing point between top and bottom edges (gives new x-axis).
+ h0 = (pts3D[1].cross(pts3D[0])).cross(pts3D[2].cross(pts3D[3]));
+ // Compute homogenous crossing point between left and right edges (gives new y-axis).
+ h1 = (pts3D[0].cross(pts3D[3])).cross(pts3D[1].cross(pts3D[2]));
+ // Compute homogenous crossing point between diagonals (gives new origin).
+ h2 = (pts3D[0].cross(pts3D[2])).cross(pts3D[1].cross(pts3D[3]));
+ // If h2 is a vector (z=0 in 2D homogeneous space), that means that at least
+ // two of the quad corners are coincident and we don't have a realistic projection
+ if (SkScalarNearlyZero(h2.fZ)) {
+ return false;
+ }
+ // In some cases the crossing points are in the wrong direction
+ // to map (-1,-1) to pts3D[0], so we need to correct for that.
+ // Want h0 to be to the right of the left edge.
+ SkVector3 v = pts3D[3] - pts3D[0];
+ SkVector3 w = h0 - pts3D[0];
+ SkScalar perpDot = v.fX*w.fY - v.fY*w.fX;
+ if (perpDot > 0) {
+ h0 = -h0;
+ }
+ // Want h1 to be above the bottom edge.
+ v = pts3D[1] - pts3D[0];
+ perpDot = v.fX*w.fY - v.fY*w.fX;
+ if (perpDot < 0) {
+ h1 = -h1;
+ }
+ shadowTransform->setAll(h0.fX / h2.fZ, h1.fX / h2.fZ, h2.fX / h2.fZ,
+ h0.fY / h2.fZ, h1.fY / h2.fZ, h2.fY / h2.fZ,
+ h0.fZ / h2.fZ, h1.fZ / h2.fZ, 1);
+ // generate matrix that transforms from bounds to [-1,1]x[-1,1] square
+ SkMatrix toHomogeneous;
+ SkScalar xScale = 2/(pathBounds.fRight - pathBounds.fLeft);
+ SkScalar yScale = 2/(pathBounds.fBottom - pathBounds.fTop);
+ toHomogeneous.setAll(xScale, 0, -xScale*pathBounds.fLeft - 1,
+ 0, yScale, -yScale*pathBounds.fTop - 1,
+ 0, 0, 1);
+ shadowTransform->preConcat(toHomogeneous);
+
+ *radius = SkDrawShadowMetrics::SpotBlurRadius(occluderHeight, lightPos.fZ, lightRadius);
+ }
+
+ return true;
+}
+
+void GetLocalBounds(const SkPath& path, const SkDrawShadowRec& rec, const SkMatrix& ctm,
+ SkRect* bounds) {
+ SkRect ambientBounds = path.getBounds();
+ SkScalar occluderZ;
+ if (SkScalarNearlyZero(rec.fZPlaneParams.fX) && SkScalarNearlyZero(rec.fZPlaneParams.fY)) {
+ occluderZ = rec.fZPlaneParams.fZ;
+ } else {
+ occluderZ = compute_z(ambientBounds.fLeft, ambientBounds.fTop, rec.fZPlaneParams);
+ occluderZ = std::max(occluderZ, compute_z(ambientBounds.fRight, ambientBounds.fTop,
+ rec.fZPlaneParams));
+ occluderZ = std::max(occluderZ, compute_z(ambientBounds.fLeft, ambientBounds.fBottom,
+ rec.fZPlaneParams));
+ occluderZ = std::max(occluderZ, compute_z(ambientBounds.fRight, ambientBounds.fBottom,
+ rec.fZPlaneParams));
+ }
+ SkScalar ambientBlur;
+ SkScalar spotBlur;
+ SkScalar spotScale;
+ SkPoint spotOffset;
+ if (ctm.hasPerspective()) {
+ // transform ambient and spot bounds into device space
+ ctm.mapRect(&ambientBounds);
+
+ // get ambient blur (in device space)
+ ambientBlur = SkDrawShadowMetrics::AmbientBlurRadius(occluderZ);
+
+ // get spot params (in device space)
+ if (SkToBool(rec.fFlags & SkShadowFlags::kDirectionalLight_ShadowFlag)) {
+ SkDrawShadowMetrics::GetDirectionalParams(occluderZ, rec.fLightPos.fX, rec.fLightPos.fY,
+ rec.fLightPos.fZ, rec.fLightRadius,
+ &spotBlur, &spotScale, &spotOffset);
+ } else {
+ SkPoint devLightPos = SkPoint::Make(rec.fLightPos.fX, rec.fLightPos.fY);
+ ctm.mapPoints(&devLightPos, 1);
+ SkDrawShadowMetrics::GetSpotParams(occluderZ, devLightPos.fX, devLightPos.fY,
+ rec.fLightPos.fZ, rec.fLightRadius,
+ &spotBlur, &spotScale, &spotOffset);
+ }
+ } else {
+ SkScalar devToSrcScale = SkScalarInvert(ctm.getMinScale());
+
+ // get ambient blur (in local space)
+ SkScalar devSpaceAmbientBlur = SkDrawShadowMetrics::AmbientBlurRadius(occluderZ);
+ ambientBlur = devSpaceAmbientBlur*devToSrcScale;
+
+ // get spot params (in local space)
+ if (SkToBool(rec.fFlags & SkShadowFlags::kDirectionalLight_ShadowFlag)) {
+ SkDrawShadowMetrics::GetDirectionalParams(occluderZ, rec.fLightPos.fX, rec.fLightPos.fY,
+ rec.fLightPos.fZ, rec.fLightRadius,
+ &spotBlur, &spotScale, &spotOffset);
+ // light dir is in device space, so need to map spot offset back into local space
+ SkMatrix inverse;
+ if (ctm.invert(&inverse)) {
+ inverse.mapVectors(&spotOffset, 1);
+ }
+ } else {
+ SkDrawShadowMetrics::GetSpotParams(occluderZ, rec.fLightPos.fX, rec.fLightPos.fY,
+ rec.fLightPos.fZ, rec.fLightRadius,
+ &spotBlur, &spotScale, &spotOffset);
+ }
+
+ // convert spot blur to local space
+ spotBlur *= devToSrcScale;
+ }
+
+ // in both cases, adjust ambient and spot bounds
+ SkRect spotBounds = ambientBounds;
+ ambientBounds.outset(ambientBlur, ambientBlur);
+ spotBounds.fLeft *= spotScale;
+ spotBounds.fTop *= spotScale;
+ spotBounds.fRight *= spotScale;
+ spotBounds.fBottom *= spotScale;
+ spotBounds.offset(spotOffset.fX, spotOffset.fY);
+ spotBounds.outset(spotBlur, spotBlur);
+
+ // merge bounds
+ *bounds = ambientBounds;
+ bounds->join(spotBounds);
+ // outset a bit to account for floating point error
+ bounds->outset(1, 1);
+
+ // if perspective, transform back to src space
+ if (ctm.hasPerspective()) {
+ // TODO: create tighter mapping from dev rect back to src rect
+ SkMatrix inverse;
+ if (ctm.invert(&inverse)) {
+ inverse.mapRect(bounds);
+ }
+ }
+}
+
+
+} // namespace SkDrawShadowMetrics
+
diff --git a/gfx/skia/skia/src/core/SkDrawShadowInfo.h b/gfx/skia/skia/src/core/SkDrawShadowInfo.h
new file mode 100644
index 0000000000..d0957cc6a6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDrawShadowInfo.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDrawShadowInfo_DEFINED
+#define SkDrawShadowInfo_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkPoint3.h"
+#include "include/core/SkScalar.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkTPin.h"
+
+#include <algorithm>
+#include <cstdint>
+
+class SkMatrix;
+class SkPath;
+struct SkRect;
+
+struct SkDrawShadowRec {
+ SkPoint3 fZPlaneParams;
+ SkPoint3 fLightPos;
+ SkScalar fLightRadius;
+ SkColor fAmbientColor;
+ SkColor fSpotColor;
+ uint32_t fFlags;
+};
+
+namespace SkDrawShadowMetrics {
+
+static constexpr auto kAmbientHeightFactor = 1.0f / 128.0f;
+static constexpr auto kAmbientGeomFactor = 64.0f;
+// Assuming that we have a light height of 600 for the spot shadow,
+// the spot values will reach their maximum at a height of approximately 292.3077.
+// We'll round up to 300 to keep it simple.
+static constexpr auto kMaxAmbientRadius = 300*kAmbientHeightFactor*kAmbientGeomFactor;
+
+static inline float divide_and_pin(float numer, float denom, float min, float max) {
+ float result = SkTPin(sk_ieee_float_divide(numer, denom), min, max);
+ // ensure that SkTPin handled non-finites correctly
+ SkASSERT(result >= min && result <= max);
+ return result;
+}
+
+inline SkScalar AmbientBlurRadius(SkScalar height) {
+ return std::min(height*kAmbientHeightFactor*kAmbientGeomFactor, kMaxAmbientRadius);
+}
+
+inline SkScalar AmbientRecipAlpha(SkScalar height) {
+ return 1.0f + std::max(height*kAmbientHeightFactor, 0.0f);
+}
+
+inline SkScalar SpotBlurRadius(SkScalar occluderZ, SkScalar lightZ, SkScalar lightRadius) {
+ return lightRadius*divide_and_pin(occluderZ, lightZ - occluderZ, 0.0f, 0.95f);
+}
+
+inline void GetSpotParams(SkScalar occluderZ, SkScalar lightX, SkScalar lightY, SkScalar lightZ,
+ SkScalar lightRadius,
+ SkScalar* blurRadius, SkScalar* scale, SkVector* translate) {
+ SkScalar zRatio = divide_and_pin(occluderZ, lightZ - occluderZ, 0.0f, 0.95f);
+ *blurRadius = lightRadius*zRatio;
+ *scale = divide_and_pin(lightZ, lightZ - occluderZ, 1.0f, 1.95f);
+ *translate = SkVector::Make(-zRatio * lightX, -zRatio * lightY);
+}
+
+inline void GetDirectionalParams(SkScalar occluderZ, SkScalar lightX, SkScalar lightY,
+ SkScalar lightZ, SkScalar lightRadius,
+ SkScalar* blurRadius, SkScalar* scale, SkVector* translate) {
+ *blurRadius = lightRadius*occluderZ;
+ *scale = 1;
+ // Max z-ratio is "max expected elevation"/"min allowable z"
+ constexpr SkScalar kMaxZRatio = 64/SK_ScalarNearlyZero;
+ SkScalar zRatio = divide_and_pin(occluderZ, lightZ, 0.0f, kMaxZRatio);
+ *translate = SkVector::Make(-zRatio * lightX, -zRatio * lightY);
+}
+
+// Create the transformation to apply to a path to get its base shadow outline, given the light
+// parameters and the path's 3D transformation (given by ctm and zPlaneParams).
+// Also computes the blur radius to apply the transformed outline.
+bool GetSpotShadowTransform(const SkPoint3& lightPos, SkScalar lightRadius,
+ const SkMatrix& ctm, const SkPoint3& zPlaneParams,
+ const SkRect& pathBounds, bool directional,
+ SkMatrix* shadowTransform, SkScalar* radius);
+
+// get bounds prior to the ctm being applied
+void GetLocalBounds(const SkPath&, const SkDrawShadowRec&, const SkMatrix& ctm, SkRect* bounds);
+
+} // namespace SkDrawShadowMetrics
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkDraw_atlas.cpp b/gfx/skia/skia/src/core/SkDraw_atlas.cpp
new file mode 100644
index 0000000000..54bf06734c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDraw_atlas.cpp
@@ -0,0 +1,237 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBlender.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRSXform.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkSurfaceProps.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkBlendModePriv.h"
+#include "src/core/SkBlenderBase.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkCoreBlitters.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkEffectPriv.h"
+#include "src/core/SkMatrixProvider.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkRasterPipelineOpContexts.h"
+#include "src/core/SkRasterPipelineOpList.h"
+#include "src/core/SkScan.h"
+#include "src/core/SkSurfacePriv.h"
+#include "src/core/SkVM.h"
+#include "src/core/SkVMBlitter.h"
+#include "src/shaders/SkShaderBase.h"
+#include "src/shaders/SkTransformShader.h"
+
+#include <cstdint>
+#include <optional>
+#include <utility>
+
+class SkBlitter;
+class SkColorInfo;
+class SkColorSpace;
+enum class SkBlendMode;
+
+
+static void fill_rect(const SkMatrix& ctm, const SkRasterClip& rc,
+ const SkRect& r, SkBlitter* blitter, SkPath* scratchPath) {
+ if (ctm.rectStaysRect()) {
+ SkRect dr;
+ ctm.mapRect(&dr, r);
+ SkScan::FillRect(dr, rc, blitter);
+ } else {
+ SkPoint pts[4];
+ r.toQuad(pts);
+ ctm.mapPoints(pts, pts, 4);
+
+ scratchPath->rewind();
+ scratchPath->addPoly(pts, 4, true);
+ SkScan::FillPath(*scratchPath, rc, blitter);
+ }
+}
+
+static void load_color(SkRasterPipeline_UniformColorCtx* ctx, const float rgba[]) {
+ // only need one of these. can I query the pipeline to know if its lowp or highp?
+ ctx->rgba[0] = SkScalarRoundToInt(rgba[0]*255); ctx->r = rgba[0];
+ ctx->rgba[1] = SkScalarRoundToInt(rgba[1]*255); ctx->g = rgba[1];
+ ctx->rgba[2] = SkScalarRoundToInt(rgba[2]*255); ctx->b = rgba[2];
+ ctx->rgba[3] = SkScalarRoundToInt(rgba[3]*255); ctx->a = rgba[3];
+}
+
+extern bool gUseSkVMBlitter;
+
+class UpdatableColorShader : public SkShaderBase {
+public:
+ explicit UpdatableColorShader(SkColorSpace* cs)
+ : fSteps{sk_srgb_singleton(), kUnpremul_SkAlphaType, cs, kUnpremul_SkAlphaType} {}
+ skvm::Color program(skvm::Builder* builder,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color paint,
+ const MatrixRec&,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const override {
+ skvm::Uniform color = uniforms->pushPtr(fValues);
+ skvm::F32 r = builder->arrayF(color, 0);
+ skvm::F32 g = builder->arrayF(color, 1);
+ skvm::F32 b = builder->arrayF(color, 2);
+ skvm::F32 a = builder->arrayF(color, 3);
+
+ return {r, g, b, a};
+ }
+
+ void updateColor(SkColor c) const {
+ SkColor4f c4 = SkColor4f::FromColor(c);
+ fSteps.apply(c4.vec());
+ auto cp4 = c4.premul();
+ fValues[0] = cp4.fR;
+ fValues[1] = cp4.fG;
+ fValues[2] = cp4.fB;
+ fValues[3] = cp4.fA;
+ }
+
+private:
+ // For serialization. This will never be called.
+ Factory getFactory() const override { return nullptr; }
+ const char* getTypeName() const override { return nullptr; }
+
+ SkColorSpaceXformSteps fSteps;
+ mutable float fValues[4];
+};
+
+void SkDraw::drawAtlas(const SkRSXform xform[],
+ const SkRect textures[],
+ const SkColor colors[],
+ int count,
+ sk_sp<SkBlender> blender,
+ const SkPaint& paint) {
+ sk_sp<SkShader> atlasShader = paint.refShader();
+ if (!atlasShader) {
+ return;
+ }
+
+ SkSTArenaAlloc<256> alloc;
+
+ SkPaint p(paint);
+ p.setAntiAlias(false); // we never respect this for drawAtlas(or drawVertices)
+ p.setStyle(SkPaint::kFill_Style);
+ p.setShader(nullptr);
+ p.setMaskFilter(nullptr);
+
+ const SkMatrix& ctm = fMatrixProvider->localToDevice();
+ // The RSXForms can't contain perspective - only the CTM cab.
+ const bool perspective = ctm.hasPerspective();
+
+ auto transformShader = alloc.make<SkTransformShader>(*as_SB(atlasShader), perspective);
+
+ auto rpblit = [&]() {
+ SkRasterPipeline pipeline(&alloc);
+ SkSurfaceProps props = SkSurfacePropsCopyOrDefault(fProps);
+ SkStageRec rec = {
+ &pipeline, &alloc, fDst.colorType(), fDst.colorSpace(), p.getColor4f(), props};
+ // We pass an identity matrix here rather than the CTM. The CTM gets folded into the
+ // per-triangle matrix.
+ if (!as_SB(transformShader)->appendRootStages(rec, SkMatrix::I())) {
+ return false;
+ }
+
+ SkRasterPipeline_UniformColorCtx* uniformCtx = nullptr;
+ SkColorSpaceXformSteps steps(
+ sk_srgb_singleton(), kUnpremul_SkAlphaType, rec.fDstCS, kUnpremul_SkAlphaType);
+
+ if (colors) {
+ // we will late-bind the values in ctx, once for each color in the loop
+ uniformCtx = alloc.make<SkRasterPipeline_UniformColorCtx>();
+ rec.fPipeline->append(SkRasterPipelineOp::uniform_color_dst, uniformCtx);
+ if (std::optional<SkBlendMode> bm = as_BB(blender)->asBlendMode(); bm.has_value()) {
+ SkBlendMode_AppendStages(*bm, rec.fPipeline);
+ } else {
+ return false;
+ }
+ }
+
+ bool isOpaque = !colors && transformShader->isOpaque();
+ if (p.getAlphaf() != 1) {
+ rec.fPipeline->append(SkRasterPipelineOp::scale_1_float,
+ alloc.make<float>(p.getAlphaf()));
+ isOpaque = false;
+ }
+
+ auto blitter = SkCreateRasterPipelineBlitter(
+ fDst, p, pipeline, isOpaque, &alloc, fRC->clipShader());
+ if (!blitter) {
+ return false;
+ }
+ SkPath scratchPath;
+
+ for (int i = 0; i < count; ++i) {
+ if (colors) {
+ SkColor4f c4 = SkColor4f::FromColor(colors[i]);
+ steps.apply(c4.vec());
+ load_color(uniformCtx, c4.premul().vec());
+ }
+
+ SkMatrix mx;
+ mx.setRSXform(xform[i]);
+ mx.preTranslate(-textures[i].fLeft, -textures[i].fTop);
+ mx.postConcat(ctm);
+ if (transformShader->update(mx)) {
+ fill_rect(mx, *fRC, textures[i], blitter, &scratchPath);
+ }
+ }
+ return true;
+ };
+
+ if (gUseSkVMBlitter || !rpblit()) {
+ UpdatableColorShader* colorShader = nullptr;
+ sk_sp<SkShader> shader;
+ if (colors) {
+ colorShader = alloc.make<UpdatableColorShader>(fDst.colorSpace());
+ shader = SkShaders::Blend(std::move(blender),
+ sk_ref_sp(colorShader),
+ sk_ref_sp(transformShader));
+ } else {
+ shader = sk_ref_sp(transformShader);
+ }
+ p.setShader(std::move(shader));
+ // We use identity here and fold the CTM into the update matrix.
+ if (auto blitter = SkVMBlitter::Make(fDst,
+ p,
+ SkMatrix::I(),
+ &alloc,
+ fRC->clipShader())) {
+ SkPath scratchPath;
+ for (int i = 0; i < count; ++i) {
+ if (colorShader) {
+ colorShader->updateColor(colors[i]);
+ }
+
+ SkMatrix mx;
+ mx.setRSXform(xform[i]);
+ mx.preTranslate(-textures[i].fLeft, -textures[i].fTop);
+ mx.postConcat(ctm);
+ if (transformShader->update(mx)) {
+ fill_rect(mx, *fRC, textures[i], blitter, &scratchPath);
+ }
+ }
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkDraw_text.cpp b/gfx/skia/skia/src/core/SkDraw_text.cpp
new file mode 100644
index 0000000000..0263eed15e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDraw_text.cpp
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRegion.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/base/SkZip.h"
+#include "src/core/SkAAClip.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkGlyphRunPainter.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkMatrixProvider.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkSurfacePriv.h"
+
+#include <cstdint>
+#include <climits>
+
+class SkCanvas;
+class SkPaint;
+namespace sktext { class GlyphRunList; }
+
+// disable warning : local variable used without having been initialized
+#if defined _WIN32
+#pragma warning ( push )
+#pragma warning ( disable : 4701 )
+#endif
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+static bool check_glyph_position(SkPoint position) {
+ // Prevent glyphs from being drawn outside of or straddling the edge of device space.
+ // Comparisons written a little weirdly so that NaN coordinates are treated safely.
+ auto gt = [](float a, int b) { return !(a <= (float)b); };
+ auto lt = [](float a, int b) { return !(a >= (float)b); };
+ return !(gt(position.fX, INT_MAX - (INT16_MAX + SkTo<int>(UINT16_MAX))) ||
+ lt(position.fX, INT_MIN - (INT16_MIN + 0 /*UINT16_MIN*/)) ||
+ gt(position.fY, INT_MAX - (INT16_MAX + SkTo<int>(UINT16_MAX))) ||
+ lt(position.fY, INT_MIN - (INT16_MIN + 0 /*UINT16_MIN*/)));
+}
+
+void SkDraw::paintMasks(SkZip<const SkGlyph*, SkPoint> accepted, const SkPaint& paint) const {
+ // The size used for a typical blitter.
+ SkSTArenaAlloc<3308> alloc;
+ SkBlitter* blitter = SkBlitter::Choose(fDst,
+ fMatrixProvider->localToDevice(),
+ paint,
+ &alloc,
+ false,
+ fRC->clipShader(),
+ SkSurfacePropsCopyOrDefault(fProps));
+
+ SkAAClipBlitterWrapper wrapper{*fRC, blitter};
+ blitter = wrapper.getBlitter();
+
+ bool useRegion = fRC->isBW() && !fRC->isRect();
+
+ if (useRegion) {
+ for (auto [glyph, pos] : accepted) {
+ if (check_glyph_position(pos)) {
+ SkMask mask = glyph->mask(pos);
+
+ SkRegion::Cliperator clipper(fRC->bwRgn(), mask.fBounds);
+
+ if (!clipper.done()) {
+ if (SkMask::kARGB32_Format == mask.fFormat) {
+ SkBitmap bm;
+ bm.installPixels(SkImageInfo::MakeN32Premul(mask.fBounds.size()),
+ mask.fImage,
+ mask.fRowBytes);
+ this->drawSprite(bm, mask.fBounds.x(), mask.fBounds.y(), paint);
+ } else {
+ const SkIRect& cr = clipper.rect();
+ do {
+ blitter->blitMask(mask, cr);
+ clipper.next();
+ } while (!clipper.done());
+ }
+ }
+ }
+ }
+ } else {
+ SkIRect clipBounds = fRC->isBW() ? fRC->bwRgn().getBounds()
+ : fRC->aaRgn().getBounds();
+ for (auto [glyph, pos] : accepted) {
+ if (check_glyph_position(pos)) {
+ SkMask mask = glyph->mask(pos);
+ SkIRect storage;
+ const SkIRect* bounds = &mask.fBounds;
+
+ // this extra test is worth it, assuming that most of the time it succeeds
+ // since we can avoid writing to storage
+ if (!clipBounds.containsNoEmptyCheck(mask.fBounds)) {
+ if (!storage.intersect(mask.fBounds, clipBounds)) {
+ continue;
+ }
+ bounds = &storage;
+ }
+
+ if (SkMask::kARGB32_Format == mask.fFormat) {
+ SkBitmap bm;
+ bm.installPixels(SkImageInfo::MakeN32Premul(mask.fBounds.size()),
+ mask.fImage,
+ mask.fRowBytes);
+ this->drawSprite(bm, mask.fBounds.x(), mask.fBounds.y(), paint);
+ } else {
+ blitter->blitMask(mask, *bounds);
+ }
+ }
+ }
+ }
+}
+
+void SkDraw::drawGlyphRunList(SkCanvas* canvas,
+ SkGlyphRunListPainterCPU* glyphPainter,
+ const sktext::GlyphRunList& glyphRunList,
+ const SkPaint& paint) const {
+
+ SkDEBUGCODE(this->validate();)
+
+ if (fRC->isEmpty()) {
+ return;
+ }
+
+ glyphPainter->drawForBitmapDevice(canvas, this, glyphRunList, paint,
+ fMatrixProvider->localToDevice());
+}
+
+#if defined _WIN32
+#pragma warning ( pop )
+#endif
+
diff --git a/gfx/skia/skia/src/core/SkDraw_vertices.cpp b/gfx/skia/skia/src/core/SkDraw_vertices.cpp
new file mode 100644
index 0000000000..78c48d1135
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDraw_vertices.cpp
@@ -0,0 +1,551 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkBlender.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkPoint3.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkSurfaceProps.h"
+#include "include/core/SkTypes.h"
+#include "include/core/SkVertices.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/base/SkTLazy.h"
+#include "src/base/SkVx.h"
+#include "src/core/SkBlenderBase.h"
+#include "src/core/SkConvertPixels.h"
+#include "src/core/SkCoreBlitters.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkEffectPriv.h"
+#include "src/core/SkMatrixProvider.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkRasterPipelineOpList.h"
+#include "src/core/SkScan.h"
+#include "src/core/SkSurfacePriv.h"
+#include "src/core/SkVM.h"
+#include "src/core/SkVMBlitter.h"
+#include "src/core/SkVertState.h"
+#include "src/core/SkVerticesPriv.h"
+#include "src/shaders/SkShaderBase.h"
+#include "src/shaders/SkTransformShader.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <optional>
+#include <utility>
+
+class SkBlitter;
+
+struct Matrix43 {
+ float fMat[12]; // column major
+
+ skvx::float4 map(float x, float y) const {
+ return skvx::float4::Load(&fMat[0]) * x +
+ skvx::float4::Load(&fMat[4]) * y +
+ skvx::float4::Load(&fMat[8]);
+ }
+
+ // Pass a by value, so we don't have to worry about aliasing with this
+ void setConcat(const Matrix43 a, const SkMatrix& b) {
+ SkASSERT(!b.hasPerspective());
+
+ fMat[ 0] = a.dot(0, b.getScaleX(), b.getSkewY());
+ fMat[ 1] = a.dot(1, b.getScaleX(), b.getSkewY());
+ fMat[ 2] = a.dot(2, b.getScaleX(), b.getSkewY());
+ fMat[ 3] = a.dot(3, b.getScaleX(), b.getSkewY());
+
+ fMat[ 4] = a.dot(0, b.getSkewX(), b.getScaleY());
+ fMat[ 5] = a.dot(1, b.getSkewX(), b.getScaleY());
+ fMat[ 6] = a.dot(2, b.getSkewX(), b.getScaleY());
+ fMat[ 7] = a.dot(3, b.getSkewX(), b.getScaleY());
+
+ fMat[ 8] = a.dot(0, b.getTranslateX(), b.getTranslateY()) + a.fMat[ 8];
+ fMat[ 9] = a.dot(1, b.getTranslateX(), b.getTranslateY()) + a.fMat[ 9];
+ fMat[10] = a.dot(2, b.getTranslateX(), b.getTranslateY()) + a.fMat[10];
+ fMat[11] = a.dot(3, b.getTranslateX(), b.getTranslateY()) + a.fMat[11];
+ }
+
+private:
+ float dot(int index, float x, float y) const {
+ return fMat[index + 0] * x + fMat[index + 4] * y;
+ }
+};
+
+static bool SK_WARN_UNUSED_RESULT
+texture_to_matrix(const VertState& state, const SkPoint verts[], const SkPoint texs[],
+ SkMatrix* matrix) {
+ SkPoint src[3], dst[3];
+
+ src[0] = texs[state.f0];
+ src[1] = texs[state.f1];
+ src[2] = texs[state.f2];
+ dst[0] = verts[state.f0];
+ dst[1] = verts[state.f1];
+ dst[2] = verts[state.f2];
+ return matrix->setPolyToPoly(src, dst, 3);
+}
+
+class SkTriColorShader : public SkShaderBase {
+public:
+ SkTriColorShader(bool isOpaque, bool usePersp) : fIsOpaque(isOpaque), fUsePersp(usePersp) {}
+
+ // This gets called for each triangle, without re-calling appendStages.
+ bool update(const SkMatrix& ctmInv, const SkPoint pts[], const SkPMColor4f colors[],
+ int index0, int index1, int index2);
+
+protected:
+ bool appendStages(const SkStageRec& rec, const MatrixRec&) const override {
+ rec.fPipeline->append(SkRasterPipelineOp::seed_shader);
+ if (fUsePersp) {
+ rec.fPipeline->append(SkRasterPipelineOp::matrix_perspective, &fM33);
+ }
+ rec.fPipeline->append(SkRasterPipelineOp::matrix_4x3, &fM43);
+ return true;
+ }
+
+ skvm::Color program(skvm::Builder*,
+ skvm::Coord,
+ skvm::Coord,
+ skvm::Color,
+ const MatrixRec&,
+ const SkColorInfo&,
+ skvm::Uniforms*,
+ SkArenaAlloc*) const override;
+
+private:
+ bool isOpaque() const override { return fIsOpaque; }
+ // For serialization. This will never be called.
+ Factory getFactory() const override { return nullptr; }
+ const char* getTypeName() const override { return nullptr; }
+
+ // If fUsePersp, we need both of these matrices,
+ // otherwise we can combine them, and only use fM43
+
+ Matrix43 fM43;
+ SkMatrix fM33;
+ const bool fIsOpaque;
+ const bool fUsePersp; // controls our stages, and what we do in update()
+ mutable skvm::Uniform fColorMatrix;
+ mutable skvm::Uniform fCoordMatrix;
+
+ using INHERITED = SkShaderBase;
+};
+
+skvm::Color SkTriColorShader::program(skvm::Builder* b,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color,
+ const MatrixRec&,
+ const SkColorInfo&,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const {
+ fColorMatrix = uniforms->pushPtr(&fM43);
+
+ skvm::F32 x = local.x,
+ y = local.y;
+
+ if (fUsePersp) {
+ fCoordMatrix = uniforms->pushPtr(&fM33);
+ auto dot = [&, x, y](int row) {
+ return b->mad(x, b->arrayF(fCoordMatrix, row),
+ b->mad(y, b->arrayF(fCoordMatrix, row + 3),
+ b->arrayF(fCoordMatrix, row + 6)));
+ };
+
+ x = dot(0);
+ y = dot(1);
+ x = x * (1.0f / dot(2));
+ y = y * (1.0f / dot(2));
+ }
+
+ auto colorDot = [&, x, y](int row) {
+ return b->mad(x, b->arrayF(fColorMatrix, row),
+ b->mad(y, b->arrayF(fColorMatrix, row + 4),
+ b->arrayF(fColorMatrix, row + 8)));
+ };
+
+ skvm::Color color;
+ color.r = colorDot(0);
+ color.g = colorDot(1);
+ color.b = colorDot(2);
+ color.a = colorDot(3);
+ return color;
+}
+
+bool SkTriColorShader::update(const SkMatrix& ctmInv, const SkPoint pts[],
+ const SkPMColor4f colors[], int index0, int index1, int index2) {
+ SkMatrix m, im;
+ m.reset();
+ m.set(0, pts[index1].fX - pts[index0].fX);
+ m.set(1, pts[index2].fX - pts[index0].fX);
+ m.set(2, pts[index0].fX);
+ m.set(3, pts[index1].fY - pts[index0].fY);
+ m.set(4, pts[index2].fY - pts[index0].fY);
+ m.set(5, pts[index0].fY);
+ if (!m.invert(&im)) {
+ return false;
+ }
+
+ fM33.setConcat(im, ctmInv);
+
+ auto c0 = skvx::float4::Load(colors[index0].vec()),
+ c1 = skvx::float4::Load(colors[index1].vec()),
+ c2 = skvx::float4::Load(colors[index2].vec());
+
+ (c1 - c0).store(&fM43.fMat[0]);
+ (c2 - c0).store(&fM43.fMat[4]);
+ c0.store(&fM43.fMat[8]);
+
+ if (!fUsePersp) {
+ fM43.setConcat(fM43, fM33);
+ }
+ return true;
+}
+
+// Convert the SkColors into float colors. The conversion depends on some conditions:
+// - If the pixmap has a dst colorspace, we have to be "color-correct".
+// Do we map into dst-colorspace before or after we interpolate?
+// - We have to decide when to apply per-color alpha (before or after we interpolate)
+//
+// For now, we will take a simple approach, but recognize this is just a start:
+// - convert colors into dst colorspace before interpolation (matches gradients)
+// - apply per-color alpha before interpolation (matches old version of vertices)
+//
+static SkPMColor4f* convert_colors(const SkColor src[],
+ int count,
+ SkColorSpace* deviceCS,
+ SkArenaAlloc* alloc,
+ bool skipColorXform) {
+ SkPMColor4f* dst = alloc->makeArray<SkPMColor4f>(count);
+
+ // Passing `nullptr` for the destination CS effectively disables color conversion.
+ auto dstCS = skipColorXform ? nullptr : sk_ref_sp(deviceCS);
+ SkImageInfo srcInfo = SkImageInfo::Make(
+ count, 1, kBGRA_8888_SkColorType, kUnpremul_SkAlphaType, SkColorSpace::MakeSRGB());
+ SkImageInfo dstInfo =
+ SkImageInfo::Make(count, 1, kRGBA_F32_SkColorType, kPremul_SkAlphaType, dstCS);
+ SkAssertResult(SkConvertPixels(dstInfo, dst, 0, srcInfo, src, 0));
+ return dst;
+}
+
+static bool compute_is_opaque(const SkColor colors[], int count) {
+ uint32_t c = ~0;
+ for (int i = 0; i < count; ++i) {
+ c &= colors[i];
+ }
+ return SkColorGetA(c) == 0xFF;
+}
+
+static void fill_triangle_2(const VertState& state, SkBlitter* blitter, const SkRasterClip& rc,
+ const SkPoint dev2[]) {
+ SkPoint tmp[] = {
+ dev2[state.f0], dev2[state.f1], dev2[state.f2]
+ };
+ SkScan::FillTriangle(tmp, rc, blitter);
+}
+
+static constexpr int kMaxClippedTrianglePointCount = 4;
+static void fill_triangle_3(const VertState& state, SkBlitter* blitter, const SkRasterClip& rc,
+ const SkPoint3 dev3[]) {
+ // Compute the crossing point (across zero) for the two values, expressed as a
+ // normalized 0...1 value. If curr is 0, returns 0. If next is 0, returns 1.
+ auto computeT = [](float curr, float next) {
+ // Check that 0 is between next and curr.
+ SkASSERT((next <= 0 && 0 < curr) || (curr <= 0 && 0 < next));
+ float t = curr / (curr - next);
+ SkASSERT(0 <= t && t <= 1);
+ return t;
+ };
+
+ auto lerp = [](SkPoint3 curr, SkPoint3 next, float t) {
+ return curr + t * (next - curr);
+ };
+
+ constexpr float tol = 0.05f;
+ // tol is the nudge away from zero, to keep the numerics nice.
+ // Think of it as our near-clipping-plane (or w-plane).
+ auto clip = [&](SkPoint3 curr, SkPoint3 next) {
+ // Return the point between curr and next where the fZ value crosses tol.
+ // To be (really) perspective correct, we should be computing based on 1/Z, not Z.
+ // For now, this is close enough (and faster).
+ return lerp(curr, next, computeT(curr.fZ - tol, next.fZ - tol));
+ };
+
+ // Clip a triangle (based on its homogeneous W values), and return the projected polygon.
+ // Since we only clip against one "edge"/plane, the max number of points in the clipped
+ // polygon is 4.
+ auto clipTriangle = [&](SkPoint dst[], const int idx[3], const SkPoint3 pts[]) -> int {
+ SkPoint3 outPoints[kMaxClippedTrianglePointCount];
+ SkPoint3* outP = outPoints;
+
+ for (int i = 0; i < 3; ++i) {
+ int curr = idx[i];
+ int next = idx[(i + 1) % 3];
+ if (pts[curr].fZ > tol) {
+ *outP++ = pts[curr];
+ if (pts[next].fZ <= tol) { // curr is IN, next is OUT
+ *outP++ = clip(pts[curr], pts[next]);
+ }
+ } else {
+ if (pts[next].fZ > tol) { // curr is OUT, next is IN
+ *outP++ = clip(pts[curr], pts[next]);
+ }
+ }
+ }
+
+ const int count = SkTo<int>(outP - outPoints);
+ SkASSERT(count == 0 || count == 3 || count == 4);
+ for (int i = 0; i < count; ++i) {
+ float scale = sk_ieee_float_divide(1.0f, outPoints[i].fZ);
+ dst[i].set(outPoints[i].fX * scale, outPoints[i].fY * scale);
+ }
+ return count;
+ };
+
+ SkPoint tmp[kMaxClippedTrianglePointCount];
+ int idx[] = { state.f0, state.f1, state.f2 };
+ if (int n = clipTriangle(tmp, idx, dev3)) {
+ // TODO: SkScan::FillConvexPoly(tmp, n, ...);
+ SkASSERT(n == 3 || n == 4);
+ SkScan::FillTriangle(tmp, rc, blitter);
+ if (n == 4) {
+ tmp[1] = tmp[2];
+ tmp[2] = tmp[3];
+ SkScan::FillTriangle(tmp, rc, blitter);
+ }
+ }
+}
+
+static void fill_triangle(const VertState& state, SkBlitter* blitter, const SkRasterClip& rc,
+ const SkPoint dev2[], const SkPoint3 dev3[]) {
+ if (dev3) {
+ fill_triangle_3(state, blitter, rc, dev3);
+ } else {
+ fill_triangle_2(state, blitter, rc, dev2);
+ }
+}
+
+extern bool gUseSkVMBlitter;
+
+void SkDraw::drawFixedVertices(const SkVertices* vertices,
+ sk_sp<SkBlender> blender,
+ const SkPaint& paint,
+ const SkMatrix& ctmInverse,
+ const SkPoint* dev2,
+ const SkPoint3* dev3,
+ SkArenaAlloc* outerAlloc,
+ bool skipColorXform) const {
+ SkVerticesPriv info(vertices->priv());
+
+ const int vertexCount = info.vertexCount();
+ const int indexCount = info.indexCount();
+ const SkPoint* positions = info.positions();
+ const SkPoint* texCoords = info.texCoords();
+ const uint16_t* indices = info.indices();
+ const SkColor* colors = info.colors();
+
+ SkShader* paintShader = paint.getShader();
+
+ if (paintShader) {
+ if (!texCoords) {
+ texCoords = positions;
+ }
+ } else {
+ texCoords = nullptr;
+ }
+
+ bool blenderIsDst = false;
+ // We can simplify things for certain blend modes. This is for speed, and SkShader_Blend
+ // itself insists we don't pass kSrc or kDst to it.
+ if (std::optional<SkBlendMode> bm = as_BB(blender)->asBlendMode(); bm.has_value() && colors) {
+ switch (*bm) {
+ case SkBlendMode::kSrc:
+ colors = nullptr;
+ break;
+ case SkBlendMode::kDst:
+ blenderIsDst = true;
+ texCoords = nullptr;
+ paintShader = nullptr;
+ break;
+ default: break;
+ }
+ }
+
+ // There is a paintShader iff there is texCoords.
+ SkASSERT((texCoords != nullptr) == (paintShader != nullptr));
+
+ SkMatrix ctm = fMatrixProvider->localToDevice();
+ // Explicit texture coords can't contain perspective - only the CTM can.
+ const bool usePerspective = ctm.hasPerspective();
+
+ SkTriColorShader* triColorShader = nullptr;
+ SkPMColor4f* dstColors = nullptr;
+ if (colors) {
+ dstColors =
+ convert_colors(colors, vertexCount, fDst.colorSpace(), outerAlloc, skipColorXform);
+ triColorShader = outerAlloc->make<SkTriColorShader>(compute_is_opaque(colors, vertexCount),
+ usePerspective);
+ }
+
+ // Combines per-vertex colors with 'shader' using 'blender'.
+ auto applyShaderColorBlend = [&](SkShader* shader) -> sk_sp<SkShader> {
+ if (!colors) {
+ return sk_ref_sp(shader);
+ }
+ if (blenderIsDst) {
+ return sk_ref_sp(triColorShader);
+ }
+ sk_sp<SkShader> shaderWithWhichToBlend;
+ if (!shader) {
+ // When there is no shader then the blender applies to the vertex colors and opaque
+ // paint color.
+ shaderWithWhichToBlend = SkShaders::Color(paint.getColor4f().makeOpaque(), nullptr);
+ } else {
+ shaderWithWhichToBlend = sk_ref_sp(shader);
+ }
+ return SkShaders::Blend(blender,
+ sk_ref_sp(triColorShader),
+ std::move(shaderWithWhichToBlend));
+ };
+
+ // If there are separate texture coords then we need to insert a transform shader to update
+ // a matrix derived from each triangle's coords. In that case we will fold the CTM into
+ // each update and use an identity matrix provider.
+ SkTransformShader* transformShader = nullptr;
+ const SkMatrixProvider* matrixProvider = fMatrixProvider;
+ SkTLazy<SkMatrixProvider> identityProvider;
+ if (texCoords && texCoords != positions) {
+ paintShader = transformShader = outerAlloc->make<SkTransformShader>(*as_SB(paintShader),
+ usePerspective);
+ matrixProvider = identityProvider.init(SkMatrix::I());
+ }
+ sk_sp<SkShader> blenderShader = applyShaderColorBlend(paintShader);
+
+ SkPaint finalPaint{paint};
+ finalPaint.setShader(std::move(blenderShader));
+
+ auto rpblit = [&]() {
+ VertState state(vertexCount, indices, indexCount);
+ VertState::Proc vertProc = state.chooseProc(info.mode());
+ SkSurfaceProps props = SkSurfacePropsCopyOrDefault(fProps);
+
+ auto blitter = SkCreateRasterPipelineBlitter(fDst,
+ finalPaint,
+ matrixProvider->localToDevice(),
+ outerAlloc,
+ fRC->clipShader(),
+ props);
+ if (!blitter) {
+ return false;
+ }
+ while (vertProc(&state)) {
+ if (triColorShader && !triColorShader->update(ctmInverse, positions, dstColors,
+ state.f0, state.f1, state.f2)) {
+ continue;
+ }
+
+ SkMatrix localM;
+ if (!transformShader || (texture_to_matrix(state, positions, texCoords, &localM) &&
+ transformShader->update(SkMatrix::Concat(ctm, localM)))) {
+ fill_triangle(state, blitter, *fRC, dev2, dev3);
+ }
+ }
+ return true;
+ };
+
+ if (gUseSkVMBlitter || !rpblit()) {
+ VertState state(vertexCount, indices, indexCount);
+ VertState::Proc vertProc = state.chooseProc(info.mode());
+
+ auto blitter = SkVMBlitter::Make(fDst,
+ finalPaint,
+ matrixProvider->localToDevice(),
+ outerAlloc,
+ this->fRC->clipShader());
+ if (!blitter) {
+ return;
+ }
+ while (vertProc(&state)) {
+ SkMatrix localM;
+ if (transformShader && !(texture_to_matrix(state, positions, texCoords, &localM) &&
+ transformShader->update(SkMatrix::Concat(ctm, localM)))) {
+ continue;
+ }
+
+ if (triColorShader && !triColorShader->update(ctmInverse, positions, dstColors,state.f0,
+ state.f1, state.f2)) {
+ continue;
+ }
+
+ fill_triangle(state, blitter, *fRC, dev2, dev3);
+ }
+ }
+}
+
+void SkDraw::drawVertices(const SkVertices* vertices,
+ sk_sp<SkBlender> blender,
+ const SkPaint& paint,
+ bool skipColorXform) const {
+ SkVerticesPriv info(vertices->priv());
+ const int vertexCount = info.vertexCount();
+ const int indexCount = info.indexCount();
+
+ // abort early if there is nothing to draw
+ if (vertexCount < 3 || (indexCount > 0 && indexCount < 3) || fRC->isEmpty()) {
+ return;
+ }
+ SkMatrix ctm = fMatrixProvider->localToDevice();
+ SkMatrix ctmInv;
+ if (!ctm.invert(&ctmInv)) {
+ return;
+ }
+
+ constexpr size_t kDefVertexCount = 16;
+ constexpr size_t kOuterSize = sizeof(SkTriColorShader) +
+ (2 * sizeof(SkPoint) + sizeof(SkColor4f)) * kDefVertexCount;
+ SkSTArenaAlloc<kOuterSize> outerAlloc;
+
+ SkPoint* dev2 = nullptr;
+ SkPoint3* dev3 = nullptr;
+
+ if (ctm.hasPerspective()) {
+ dev3 = outerAlloc.makeArray<SkPoint3>(vertexCount);
+ ctm.mapHomogeneousPoints(dev3, info.positions(), vertexCount);
+ // similar to the bounds check for 2d points (below)
+ if (!SkScalarsAreFinite((const SkScalar*)dev3, vertexCount * 3)) {
+ return;
+ }
+ } else {
+ dev2 = outerAlloc.makeArray<SkPoint>(vertexCount);
+ ctm.mapPoints(dev2, info.positions(), vertexCount);
+
+ SkRect bounds;
+ // this also sets bounds to empty if we see a non-finite value
+ bounds.setBounds(dev2, vertexCount);
+ if (bounds.isEmpty()) {
+ return;
+ }
+ }
+
+ this->drawFixedVertices(
+ vertices, std::move(blender), paint, ctmInv, dev2, dev3, &outerAlloc, skipColorXform);
+}
diff --git a/gfx/skia/skia/src/core/SkDrawable.cpp b/gfx/skia/skia/src/core/SkDrawable.cpp
new file mode 100644
index 0000000000..15f6222613
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkDrawable.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkDrawable.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPictureRecorder.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+
+#include <atomic>
+#include <cstddef>
+#include <cstdint>
+
+class SkPicture;
+
+static int32_t next_generation_id() {
+ static std::atomic<int32_t> nextID{1};
+
+ int32_t id;
+ do {
+ id = nextID.fetch_add(1, std::memory_order_relaxed);
+ } while (id == 0);
+ return id;
+}
+
+SkDrawable::SkDrawable() : fGenerationID(0) {}
+
+static void draw_bbox(SkCanvas* canvas, const SkRect& r) {
+ SkPaint paint;
+ paint.setStyle(SkPaint::kStroke_Style);
+ paint.setColor(0xFFFF7088);
+ canvas->drawRect(r, paint);
+ canvas->drawLine(r.left(), r.top(), r.right(), r.bottom(), paint);
+ canvas->drawLine(r.left(), r.bottom(), r.right(), r.top(), paint);
+}
+
+void SkDrawable::draw(SkCanvas* canvas, const SkMatrix* matrix) {
+ SkAutoCanvasRestore acr(canvas, true);
+ if (matrix) {
+ canvas->concat(*matrix);
+ }
+ this->onDraw(canvas);
+
+ if ((false)) {
+ draw_bbox(canvas, this->getBounds());
+ }
+}
+
+void SkDrawable::draw(SkCanvas* canvas, SkScalar x, SkScalar y) {
+ SkMatrix matrix = SkMatrix::Translate(x, y);
+ this->draw(canvas, &matrix);
+}
+
+SkPicture* SkDrawable::newPictureSnapshot() {
+ return this->onNewPictureSnapshot();
+}
+
+uint32_t SkDrawable::getGenerationID() {
+ if (0 == fGenerationID) {
+ fGenerationID = next_generation_id();
+ }
+ return fGenerationID;
+}
+
+SkRect SkDrawable::getBounds() {
+ return this->onGetBounds();
+}
+
+size_t SkDrawable::approximateBytesUsed() {
+ return this->onApproximateBytesUsed();
+}
+size_t SkDrawable::onApproximateBytesUsed() {
+ return 0;
+}
+
+void SkDrawable::notifyDrawingChanged() {
+ fGenerationID = 0;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////
+
+SkPicture* SkDrawable::onNewPictureSnapshot() {
+ SkPictureRecorder recorder;
+
+ const SkRect bounds = this->getBounds();
+ SkCanvas* canvas = recorder.beginRecording(bounds);
+ this->draw(canvas);
+ if ((false)) {
+ draw_bbox(canvas, bounds);
+ }
+ return recorder.finishRecordingAsPicture().release();
+}
diff --git a/gfx/skia/skia/src/core/SkEdge.cpp b/gfx/skia/skia/src/core/SkEdge.cpp
new file mode 100644
index 0000000000..39ff122005
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEdge.cpp
@@ -0,0 +1,524 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkEdge.h"
+
+#include "include/private/base/SkTo.h"
+#include "src/base/SkMathPriv.h"
+#include "src/core/SkFDot6.h"
+
+#include <utility>
+
+/*
+ In setLine, setQuadratic, setCubic, the first thing we do is to convert
+ the points into FDot6. This is modulated by the shift parameter, which
+ will either be 0, or something like 2 for antialiasing.
+
+ In the float case, we want to turn the float into .6 by saying pt * 64,
+ or pt * 256 for antialiasing. This is implemented as 1 << (shift + 6).
+
+ In the fixed case, we want to turn the fixed into .6 by saying pt >> 10,
+ or pt >> 8 for antialiasing. This is implemented as pt >> (10 - shift).
+*/
+
+static inline SkFixed SkFDot6ToFixedDiv2(SkFDot6 value) {
+ // we want to return SkFDot6ToFixed(value >> 1), but we don't want to throw
+ // away data in value, so just perform a modify up-shift
+ return SkLeftShift(value, 16 - 6 - 1);
+}
+
+/////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+void SkEdge::dump() const {
+ int realLastY = SkScalarToFixed(fLastY);
+ if (fCurveCount > 0) {
+ realLastY = static_cast<const SkQuadraticEdge*>(this)->fQLastY;
+ } else if (fCurveCount < 0) {
+ realLastY = static_cast<const SkCubicEdge*>(this)->fCLastY;
+ }
+ SkDebugf("edge (%c): firstY:%d lastY:%d (%g) x:%g dx:%g w:%d\n",
+ fCurveCount > 0 ? 'Q' : (fCurveCount < 0 ? 'C' : 'L'),
+ fFirstY,
+ fLastY,
+ SkFixedToFloat(realLastY),
+ SkFixedToFloat(fX),
+ SkFixedToFloat(fDX),
+ fWinding);
+}
+#endif
+
+int SkEdge::setLine(const SkPoint& p0, const SkPoint& p1, const SkIRect* clip, int shift) {
+ SkFDot6 x0, y0, x1, y1;
+
+ {
+#ifdef SK_RASTERIZE_EVEN_ROUNDING
+ x0 = SkScalarRoundToFDot6(p0.fX, shift);
+ y0 = SkScalarRoundToFDot6(p0.fY, shift);
+ x1 = SkScalarRoundToFDot6(p1.fX, shift);
+ y1 = SkScalarRoundToFDot6(p1.fY, shift);
+#else
+ float scale = float(1 << (shift + 6));
+ x0 = int(p0.fX * scale);
+ y0 = int(p0.fY * scale);
+ x1 = int(p1.fX * scale);
+ y1 = int(p1.fY * scale);
+#endif
+ }
+
+ int winding = 1;
+
+ if (y0 > y1) {
+ using std::swap;
+ swap(x0, x1);
+ swap(y0, y1);
+ winding = -1;
+ }
+
+ int top = SkFDot6Round(y0);
+ int bot = SkFDot6Round(y1);
+
+ // are we a zero-height line?
+ if (top == bot) {
+ return 0;
+ }
+ // are we completely above or below the clip?
+ if (clip && (top >= clip->fBottom || bot <= clip->fTop)) {
+ return 0;
+ }
+
+ SkFixed slope = SkFDot6Div(x1 - x0, y1 - y0);
+ const SkFDot6 dy = SkEdge_Compute_DY(top, y0);
+
+ fX = SkFDot6ToFixed(x0 + SkFixedMul(slope, dy)); // + SK_Fixed1/2
+ fDX = slope;
+ fFirstY = top;
+ fLastY = bot - 1;
+ fEdgeType = kLine_Type;
+ fCurveCount = 0;
+ fWinding = SkToS8(winding);
+ fCurveShift = 0;
+
+ if (clip) {
+ this->chopLineWithClip(*clip);
+ }
+ return 1;
+}
+
+// called from a curve subclass
+int SkEdge::updateLine(SkFixed x0, SkFixed y0, SkFixed x1, SkFixed y1)
+{
+ SkASSERT(fWinding == 1 || fWinding == -1);
+ SkASSERT(fCurveCount != 0);
+// SkASSERT(fCurveShift != 0);
+
+ y0 >>= 10;
+ y1 >>= 10;
+
+ SkASSERT(y0 <= y1);
+
+ int top = SkFDot6Round(y0);
+ int bot = SkFDot6Round(y1);
+
+// SkASSERT(top >= fFirstY);
+
+ // are we a zero-height line?
+ if (top == bot)
+ return 0;
+
+ x0 >>= 10;
+ x1 >>= 10;
+
+ SkFixed slope = SkFDot6Div(x1 - x0, y1 - y0);
+ const SkFDot6 dy = SkEdge_Compute_DY(top, y0);
+
+ fX = SkFDot6ToFixed(x0 + SkFixedMul(slope, dy)); // + SK_Fixed1/2
+ fDX = slope;
+ fFirstY = top;
+ fLastY = bot - 1;
+
+ return 1;
+}
+
+void SkEdge::chopLineWithClip(const SkIRect& clip)
+{
+ int top = fFirstY;
+
+ SkASSERT(top < clip.fBottom);
+
+ // clip the line to the top
+ if (top < clip.fTop)
+ {
+ SkASSERT(fLastY >= clip.fTop);
+ fX += fDX * (clip.fTop - top);
+ fFirstY = clip.fTop;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* We store 1<<shift in a (signed) byte, so its maximum value is 1<<6 == 64.
+ Note that this limits the number of lines we use to approximate a curve.
+ If we need to increase this, we need to store fCurveCount in something
+ larger than int8_t.
+*/
+#define MAX_COEFF_SHIFT 6
+
+static inline SkFDot6 cheap_distance(SkFDot6 dx, SkFDot6 dy)
+{
+ dx = SkAbs32(dx);
+ dy = SkAbs32(dy);
+ // return max + min/2
+ if (dx > dy)
+ dx += dy >> 1;
+ else
+ dx = dy + (dx >> 1);
+ return dx;
+}
+
+static inline int diff_to_shift(SkFDot6 dx, SkFDot6 dy, int shiftAA = 2)
+{
+ // cheap calc of distance from center of p0-p2 to the center of the curve
+ SkFDot6 dist = cheap_distance(dx, dy);
+
+ // shift down dist (it is currently in dot6)
+ // down by 3 should give us 1/8 pixel accuracy (assuming our dist is accurate...)
+ // this is chosen by heuristic: make it as big as possible (to minimize segments)
+ // ... but small enough so that our curves still look smooth
+ // When shift > 0, we're using AA and everything is scaled up so we can
+ // lower the accuracy.
+ dist = (dist + (1 << 4)) >> (3 + shiftAA);
+
+ // each subdivision (shift value) cuts this dist (error) by 1/4
+ return (32 - SkCLZ(dist)) >> 1;
+}
+
+bool SkQuadraticEdge::setQuadraticWithoutUpdate(const SkPoint pts[3], int shift) {
+ SkFDot6 x0, y0, x1, y1, x2, y2;
+
+ {
+#ifdef SK_RASTERIZE_EVEN_ROUNDING
+ x0 = SkScalarRoundToFDot6(pts[0].fX, shift);
+ y0 = SkScalarRoundToFDot6(pts[0].fY, shift);
+ x1 = SkScalarRoundToFDot6(pts[1].fX, shift);
+ y1 = SkScalarRoundToFDot6(pts[1].fY, shift);
+ x2 = SkScalarRoundToFDot6(pts[2].fX, shift);
+ y2 = SkScalarRoundToFDot6(pts[2].fY, shift);
+#else
+ float scale = float(1 << (shift + 6));
+ x0 = int(pts[0].fX * scale);
+ y0 = int(pts[0].fY * scale);
+ x1 = int(pts[1].fX * scale);
+ y1 = int(pts[1].fY * scale);
+ x2 = int(pts[2].fX * scale);
+ y2 = int(pts[2].fY * scale);
+#endif
+ }
+
+ int winding = 1;
+ if (y0 > y2)
+ {
+ using std::swap;
+ swap(x0, x2);
+ swap(y0, y2);
+ winding = -1;
+ }
+ SkASSERT(y0 <= y1 && y1 <= y2);
+
+ int top = SkFDot6Round(y0);
+ int bot = SkFDot6Round(y2);
+
+ // are we a zero-height quad (line)?
+ if (top == bot)
+ return 0;
+
+ // compute number of steps needed (1 << shift)
+ {
+ SkFDot6 dx = (SkLeftShift(x1, 1) - x0 - x2) >> 2;
+ SkFDot6 dy = (SkLeftShift(y1, 1) - y0 - y2) >> 2;
+ // This is a little confusing:
+ // before this line, shift is the scale up factor for AA;
+ // after this line, shift is the fCurveShift.
+ shift = diff_to_shift(dx, dy, shift);
+ SkASSERT(shift >= 0);
+ }
+ // need at least 1 subdivision for our bias trick
+ if (shift == 0) {
+ shift = 1;
+ } else if (shift > MAX_COEFF_SHIFT) {
+ shift = MAX_COEFF_SHIFT;
+ }
+
+ fWinding = SkToS8(winding);
+ //fCubicDShift only set for cubics
+ fEdgeType = kQuad_Type;
+ fCurveCount = SkToS8(1 << shift);
+
+ /*
+ * We want to reformulate into polynomial form, to make it clear how we
+ * should forward-difference.
+ *
+ * p0 (1 - t)^2 + p1 t(1 - t) + p2 t^2 ==> At^2 + Bt + C
+ *
+ * A = p0 - 2p1 + p2
+ * B = 2(p1 - p0)
+ * C = p0
+ *
+ * Our caller must have constrained our inputs (p0..p2) to all fit into
+ * 16.16. However, as seen above, we sometimes compute values that can be
+ * larger (e.g. B = 2*(p1 - p0)). To guard against overflow, we will store
+ * A and B at 1/2 of their actual value, and just apply a 2x scale during
+ * application in updateQuadratic(). Hence we store (shift - 1) in
+ * fCurveShift.
+ */
+
+ fCurveShift = SkToU8(shift - 1);
+
+ SkFixed A = SkFDot6ToFixedDiv2(x0 - x1 - x1 + x2); // 1/2 the real value
+ SkFixed B = SkFDot6ToFixed(x1 - x0); // 1/2 the real value
+
+ fQx = SkFDot6ToFixed(x0);
+ fQDx = B + (A >> shift); // biased by shift
+ fQDDx = A >> (shift - 1); // biased by shift
+
+ A = SkFDot6ToFixedDiv2(y0 - y1 - y1 + y2); // 1/2 the real value
+ B = SkFDot6ToFixed(y1 - y0); // 1/2 the real value
+
+ fQy = SkFDot6ToFixed(y0);
+ fQDy = B + (A >> shift); // biased by shift
+ fQDDy = A >> (shift - 1); // biased by shift
+
+ fQLastX = SkFDot6ToFixed(x2);
+ fQLastY = SkFDot6ToFixed(y2);
+
+ return true;
+}
+
+int SkQuadraticEdge::setQuadratic(const SkPoint pts[3], int shift) {
+ if (!setQuadraticWithoutUpdate(pts, shift)) {
+ return 0;
+ }
+ return this->updateQuadratic();
+}
+
+int SkQuadraticEdge::updateQuadratic()
+{
+ int success;
+ int count = fCurveCount;
+ SkFixed oldx = fQx;
+ SkFixed oldy = fQy;
+ SkFixed dx = fQDx;
+ SkFixed dy = fQDy;
+ SkFixed newx, newy;
+ int shift = fCurveShift;
+
+ SkASSERT(count > 0);
+
+ do {
+ if (--count > 0)
+ {
+ newx = oldx + (dx >> shift);
+ dx += fQDDx;
+ newy = oldy + (dy >> shift);
+ dy += fQDDy;
+ }
+ else // last segment
+ {
+ newx = fQLastX;
+ newy = fQLastY;
+ }
+ success = this->updateLine(oldx, oldy, newx, newy);
+ oldx = newx;
+ oldy = newy;
+ } while (count > 0 && !success);
+
+ fQx = newx;
+ fQy = newy;
+ fQDx = dx;
+ fQDy = dy;
+ fCurveCount = SkToS8(count);
+ return success;
+}
+
+/////////////////////////////////////////////////////////////////////////
+
+static inline int SkFDot6UpShift(SkFDot6 x, int upShift) {
+ SkASSERT((SkLeftShift(x, upShift) >> upShift) == x);
+ return SkLeftShift(x, upShift);
+}
+
+/* f(1/3) = (8a + 12b + 6c + d) / 27
+ f(2/3) = (a + 6b + 12c + 8d) / 27
+
+ f(1/3)-b = (8a - 15b + 6c + d) / 27
+ f(2/3)-c = (a + 6b - 15c + 8d) / 27
+
+ use 16/512 to approximate 1/27
+*/
+static SkFDot6 cubic_delta_from_line(SkFDot6 a, SkFDot6 b, SkFDot6 c, SkFDot6 d)
+{
+ // since our parameters may be negative, we don't use << to avoid ASAN warnings
+ SkFDot6 oneThird = (a*8 - b*15 + 6*c + d) * 19 >> 9;
+ SkFDot6 twoThird = (a + 6*b - c*15 + d*8) * 19 >> 9;
+
+ return std::max(SkAbs32(oneThird), SkAbs32(twoThird));
+}
+
+bool SkCubicEdge::setCubicWithoutUpdate(const SkPoint pts[4], int shift, bool sortY) {
+ SkFDot6 x0, y0, x1, y1, x2, y2, x3, y3;
+
+ {
+#ifdef SK_RASTERIZE_EVEN_ROUNDING
+ x0 = SkScalarRoundToFDot6(pts[0].fX, shift);
+ y0 = SkScalarRoundToFDot6(pts[0].fY, shift);
+ x1 = SkScalarRoundToFDot6(pts[1].fX, shift);
+ y1 = SkScalarRoundToFDot6(pts[1].fY, shift);
+ x2 = SkScalarRoundToFDot6(pts[2].fX, shift);
+ y2 = SkScalarRoundToFDot6(pts[2].fY, shift);
+ x3 = SkScalarRoundToFDot6(pts[3].fX, shift);
+ y3 = SkScalarRoundToFDot6(pts[3].fY, shift);
+#else
+ float scale = float(1 << (shift + 6));
+ x0 = int(pts[0].fX * scale);
+ y0 = int(pts[0].fY * scale);
+ x1 = int(pts[1].fX * scale);
+ y1 = int(pts[1].fY * scale);
+ x2 = int(pts[2].fX * scale);
+ y2 = int(pts[2].fY * scale);
+ x3 = int(pts[3].fX * scale);
+ y3 = int(pts[3].fY * scale);
+#endif
+ }
+
+ int winding = 1;
+ if (sortY && y0 > y3)
+ {
+ using std::swap;
+ swap(x0, x3);
+ swap(x1, x2);
+ swap(y0, y3);
+ swap(y1, y2);
+ winding = -1;
+ }
+
+ int top = SkFDot6Round(y0);
+ int bot = SkFDot6Round(y3);
+
+ // are we a zero-height cubic (line)?
+ if (sortY && top == bot)
+ return 0;
+
+ // compute number of steps needed (1 << shift)
+ {
+ // Can't use (center of curve - center of baseline), since center-of-curve
+ // need not be the max delta from the baseline (it could even be coincident)
+ // so we try just looking at the two off-curve points
+ SkFDot6 dx = cubic_delta_from_line(x0, x1, x2, x3);
+ SkFDot6 dy = cubic_delta_from_line(y0, y1, y2, y3);
+ // add 1 (by observation)
+ shift = diff_to_shift(dx, dy) + 1;
+ }
+ // need at least 1 subdivision for our bias trick
+ SkASSERT(shift > 0);
+ if (shift > MAX_COEFF_SHIFT) {
+ shift = MAX_COEFF_SHIFT;
+ }
+
+ /* Since our in coming data is initially shifted down by 10 (or 8 in
+ antialias). That means the most we can shift up is 8. However, we
+ compute coefficients with a 3*, so the safest upshift is really 6
+ */
+ int upShift = 6; // largest safe value
+ int downShift = shift + upShift - 10;
+ if (downShift < 0) {
+ downShift = 0;
+ upShift = 10 - shift;
+ }
+
+ fWinding = SkToS8(winding);
+ fEdgeType = kCubic_Type;
+ fCurveCount = SkToS8(SkLeftShift(-1, shift));
+ fCurveShift = SkToU8(shift);
+ fCubicDShift = SkToU8(downShift);
+
+ SkFixed B = SkFDot6UpShift(3 * (x1 - x0), upShift);
+ SkFixed C = SkFDot6UpShift(3 * (x0 - x1 - x1 + x2), upShift);
+ SkFixed D = SkFDot6UpShift(x3 + 3 * (x1 - x2) - x0, upShift);
+
+ fCx = SkFDot6ToFixed(x0);
+ fCDx = B + (C >> shift) + (D >> 2*shift); // biased by shift
+ fCDDx = 2*C + (3*D >> (shift - 1)); // biased by 2*shift
+ fCDDDx = 3*D >> (shift - 1); // biased by 2*shift
+
+ B = SkFDot6UpShift(3 * (y1 - y0), upShift);
+ C = SkFDot6UpShift(3 * (y0 - y1 - y1 + y2), upShift);
+ D = SkFDot6UpShift(y3 + 3 * (y1 - y2) - y0, upShift);
+
+ fCy = SkFDot6ToFixed(y0);
+ fCDy = B + (C >> shift) + (D >> 2*shift); // biased by shift
+ fCDDy = 2*C + (3*D >> (shift - 1)); // biased by 2*shift
+ fCDDDy = 3*D >> (shift - 1); // biased by 2*shift
+
+ fCLastX = SkFDot6ToFixed(x3);
+ fCLastY = SkFDot6ToFixed(y3);
+
+ return true;
+}
+
+int SkCubicEdge::setCubic(const SkPoint pts[4], int shift) {
+ if (!this->setCubicWithoutUpdate(pts, shift)) {
+ return 0;
+ }
+ return this->updateCubic();
+}
+
+int SkCubicEdge::updateCubic()
+{
+ int success;
+ int count = fCurveCount;
+ SkFixed oldx = fCx;
+ SkFixed oldy = fCy;
+ SkFixed newx, newy;
+ const int ddshift = fCurveShift;
+ const int dshift = fCubicDShift;
+
+ SkASSERT(count < 0);
+
+ do {
+ if (++count < 0)
+ {
+ newx = oldx + (fCDx >> dshift);
+ fCDx += fCDDx >> ddshift;
+ fCDDx += fCDDDx;
+
+ newy = oldy + (fCDy >> dshift);
+ fCDy += fCDDy >> ddshift;
+ fCDDy += fCDDDy;
+ }
+ else // last segment
+ {
+ // SkDebugf("LastX err=%d, LastY err=%d\n", (oldx + (fCDx >> shift) - fLastX), (oldy + (fCDy >> shift) - fLastY));
+ newx = fCLastX;
+ newy = fCLastY;
+ }
+
+ // we want to say SkASSERT(oldy <= newy), but our finite fixedpoint
+ // doesn't always achieve that, so we have to explicitly pin it here.
+ if (newy < oldy) {
+ newy = oldy;
+ }
+
+ success = this->updateLine(oldx, oldy, newx, newy);
+ oldx = newx;
+ oldy = newy;
+ } while (count < 0 && !success);
+
+ fCx = newx;
+ fCy = newy;
+ fCurveCount = SkToS8(count);
+ return success;
+}
diff --git a/gfx/skia/skia/src/core/SkEdge.h b/gfx/skia/skia/src/core/SkEdge.h
new file mode 100644
index 0000000000..99c30c530b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEdge.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEdge_DEFINED
+#define SkEdge_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/private/base/SkMath.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkFDot6.h"
+
+#include <utility>
+
+// This correctly favors the lower-pixel when y0 is on a 1/2 pixel boundary
+#define SkEdge_Compute_DY(top, y0) (SkLeftShift(top, 6) + 32 - (y0))
+
+struct SkEdge {
+ enum Type {
+ kLine_Type,
+ kQuad_Type,
+ kCubic_Type
+ };
+
+ SkEdge* fNext;
+ SkEdge* fPrev;
+
+ SkFixed fX;
+ SkFixed fDX;
+ int32_t fFirstY;
+ int32_t fLastY;
+ Type fEdgeType; // Remembers the *initial* edge type
+ int8_t fCurveCount; // only used by kQuad(+) and kCubic(-)
+ uint8_t fCurveShift; // appled to all Dx/DDx/DDDx except for fCubicDShift exception
+ uint8_t fCubicDShift; // applied to fCDx and fCDy only in cubic
+ int8_t fWinding; // 1 or -1
+
+ int setLine(const SkPoint& p0, const SkPoint& p1, const SkIRect* clip, int shiftUp);
+ // call this version if you know you don't have a clip
+ inline int setLine(const SkPoint& p0, const SkPoint& p1, int shiftUp);
+ inline int updateLine(SkFixed ax, SkFixed ay, SkFixed bx, SkFixed by);
+ void chopLineWithClip(const SkIRect& clip);
+
+ inline bool intersectsClip(const SkIRect& clip) const {
+ SkASSERT(fFirstY < clip.fBottom);
+ return fLastY >= clip.fTop;
+ }
+
+#ifdef SK_DEBUG
+ void dump() const;
+ void validate() const {
+ SkASSERT(fPrev && fNext);
+ SkASSERT(fPrev->fNext == this);
+ SkASSERT(fNext->fPrev == this);
+
+ SkASSERT(fFirstY <= fLastY);
+ SkASSERT(SkAbs32(fWinding) == 1);
+ }
+#endif
+};
+
+struct SkQuadraticEdge : public SkEdge {
+ SkFixed fQx, fQy;
+ SkFixed fQDx, fQDy;
+ SkFixed fQDDx, fQDDy;
+ SkFixed fQLastX, fQLastY;
+
+ bool setQuadraticWithoutUpdate(const SkPoint pts[3], int shiftUp);
+ int setQuadratic(const SkPoint pts[3], int shiftUp);
+ int updateQuadratic();
+};
+
+struct SkCubicEdge : public SkEdge {
+ SkFixed fCx, fCy;
+ SkFixed fCDx, fCDy;
+ SkFixed fCDDx, fCDDy;
+ SkFixed fCDDDx, fCDDDy;
+ SkFixed fCLastX, fCLastY;
+
+ bool setCubicWithoutUpdate(const SkPoint pts[4], int shiftUp, bool sortY = true);
+ int setCubic(const SkPoint pts[4], int shiftUp);
+ int updateCubic();
+};
+
+int SkEdge::setLine(const SkPoint& p0, const SkPoint& p1, int shift) {
+ SkFDot6 x0, y0, x1, y1;
+
+ {
+#ifdef SK_RASTERIZE_EVEN_ROUNDING
+ x0 = SkScalarRoundToFDot6(p0.fX, shift);
+ y0 = SkScalarRoundToFDot6(p0.fY, shift);
+ x1 = SkScalarRoundToFDot6(p1.fX, shift);
+ y1 = SkScalarRoundToFDot6(p1.fY, shift);
+#else
+ float scale = float(1 << (shift + 6));
+ x0 = int(p0.fX * scale);
+ y0 = int(p0.fY * scale);
+ x1 = int(p1.fX * scale);
+ y1 = int(p1.fY * scale);
+#endif
+ }
+
+ int winding = 1;
+
+ if (y0 > y1) {
+ using std::swap;
+ swap(x0, x1);
+ swap(y0, y1);
+ winding = -1;
+ }
+
+ int top = SkFDot6Round(y0);
+ int bot = SkFDot6Round(y1);
+
+ // are we a zero-height line?
+ if (top == bot) {
+ return 0;
+ }
+
+ SkFixed slope = SkFDot6Div(x1 - x0, y1 - y0);
+ const SkFDot6 dy = SkEdge_Compute_DY(top, y0);
+
+ fX = SkFDot6ToFixed(x0 + SkFixedMul(slope, dy)); // + SK_Fixed1/2
+ fDX = slope;
+ fFirstY = top;
+ fLastY = bot - 1;
+ fEdgeType = kLine_Type;
+ fCurveCount = 0;
+ fWinding = SkToS8(winding);
+ fCurveShift = 0;
+ return 1;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkEdgeBuilder.cpp b/gfx/skia/skia/src/core/SkEdgeBuilder.cpp
new file mode 100644
index 0000000000..4cc560a795
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEdgeBuilder.cpp
@@ -0,0 +1,394 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkEdgeBuilder.h"
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkFixed.h"
+#include "include/private/base/SkSafe32.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkSafeMath.h"
+#include "src/core/SkAnalyticEdge.h"
+#include "src/core/SkEdge.h"
+#include "src/core/SkEdgeClipper.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkLineClipper.h"
+#include "src/core/SkPathPriv.h"
+
+SkEdgeBuilder::Combine SkBasicEdgeBuilder::combineVertical(const SkEdge* edge, SkEdge* last) {
+ // We only consider edges that were originally lines to be vertical to avoid numerical issues
+ // (crbug.com/1154864).
+ if (last->fEdgeType != SkEdge::kLine_Type || last->fDX || edge->fX != last->fX) {
+ return kNo_Combine;
+ }
+ if (edge->fWinding == last->fWinding) {
+ if (edge->fLastY + 1 == last->fFirstY) {
+ last->fFirstY = edge->fFirstY;
+ return kPartial_Combine;
+ }
+ if (edge->fFirstY == last->fLastY + 1) {
+ last->fLastY = edge->fLastY;
+ return kPartial_Combine;
+ }
+ return kNo_Combine;
+ }
+ if (edge->fFirstY == last->fFirstY) {
+ if (edge->fLastY == last->fLastY) {
+ return kTotal_Combine;
+ }
+ if (edge->fLastY < last->fLastY) {
+ last->fFirstY = edge->fLastY + 1;
+ return kPartial_Combine;
+ }
+ last->fFirstY = last->fLastY + 1;
+ last->fLastY = edge->fLastY;
+ last->fWinding = edge->fWinding;
+ return kPartial_Combine;
+ }
+ if (edge->fLastY == last->fLastY) {
+ if (edge->fFirstY > last->fFirstY) {
+ last->fLastY = edge->fFirstY - 1;
+ return kPartial_Combine;
+ }
+ last->fLastY = last->fFirstY - 1;
+ last->fFirstY = edge->fFirstY;
+ last->fWinding = edge->fWinding;
+ return kPartial_Combine;
+ }
+ return kNo_Combine;
+}
+
+SkEdgeBuilder::Combine SkAnalyticEdgeBuilder::combineVertical(const SkAnalyticEdge* edge,
+ SkAnalyticEdge* last) {
+ auto approximately_equal = [](SkFixed a, SkFixed b) {
+ return SkAbs32(a - b) < 0x100;
+ };
+
+ // We only consider edges that were originally lines to be vertical to avoid numerical issues
+ // (crbug.com/1154864).
+ if (last->fEdgeType != SkAnalyticEdge::kLine_Type || last->fDX || edge->fX != last->fX) {
+ return kNo_Combine;
+ }
+ if (edge->fWinding == last->fWinding) {
+ if (edge->fLowerY == last->fUpperY) {
+ last->fUpperY = edge->fUpperY;
+ last->fY = last->fUpperY;
+ return kPartial_Combine;
+ }
+ if (approximately_equal(edge->fUpperY, last->fLowerY)) {
+ last->fLowerY = edge->fLowerY;
+ return kPartial_Combine;
+ }
+ return kNo_Combine;
+ }
+ if (approximately_equal(edge->fUpperY, last->fUpperY)) {
+ if (approximately_equal(edge->fLowerY, last->fLowerY)) {
+ return kTotal_Combine;
+ }
+ if (edge->fLowerY < last->fLowerY) {
+ last->fUpperY = edge->fLowerY;
+ last->fY = last->fUpperY;
+ return kPartial_Combine;
+ }
+ last->fUpperY = last->fLowerY;
+ last->fY = last->fUpperY;
+ last->fLowerY = edge->fLowerY;
+ last->fWinding = edge->fWinding;
+ return kPartial_Combine;
+ }
+ if (approximately_equal(edge->fLowerY, last->fLowerY)) {
+ if (edge->fUpperY > last->fUpperY) {
+ last->fLowerY = edge->fUpperY;
+ return kPartial_Combine;
+ }
+ last->fLowerY = last->fUpperY;
+ last->fUpperY = edge->fUpperY;
+ last->fY = last->fUpperY;
+ last->fWinding = edge->fWinding;
+ return kPartial_Combine;
+ }
+ return kNo_Combine;
+}
+
+template <typename Edge>
+static bool is_vertical(const Edge* edge) {
+ // We only consider edges that were originally lines to be vertical to avoid numerical issues
+ // (crbug.com/1154864).
+ return edge->fDX == 0
+ && edge->fEdgeType == Edge::kLine_Type;
+}
+
+// TODO: we can deallocate the edge if edge->setFoo() fails
+// or when we don't use it (kPartial_Combine or kTotal_Combine).
+
+void SkBasicEdgeBuilder::addLine(const SkPoint pts[]) {
+ SkEdge* edge = fAlloc.make<SkEdge>();
+ if (edge->setLine(pts[0], pts[1], fClipShift)) {
+ Combine combine = is_vertical(edge) && !fList.empty()
+ ? this->combineVertical(edge, (SkEdge*)fList.back())
+ : kNo_Combine;
+
+ switch (combine) {
+ case kTotal_Combine: fList.pop_back(); break;
+ case kPartial_Combine: break;
+ case kNo_Combine: fList.push_back(edge); break;
+ }
+ }
+}
+void SkAnalyticEdgeBuilder::addLine(const SkPoint pts[]) {
+ SkAnalyticEdge* edge = fAlloc.make<SkAnalyticEdge>();
+ if (edge->setLine(pts[0], pts[1])) {
+
+ Combine combine = is_vertical(edge) && !fList.empty()
+ ? this->combineVertical(edge, (SkAnalyticEdge*)fList.back())
+ : kNo_Combine;
+
+ switch (combine) {
+ case kTotal_Combine: fList.pop_back(); break;
+ case kPartial_Combine: break;
+ case kNo_Combine: fList.push_back(edge); break;
+ }
+ }
+}
+void SkBasicEdgeBuilder::addQuad(const SkPoint pts[]) {
+ SkQuadraticEdge* edge = fAlloc.make<SkQuadraticEdge>();
+ if (edge->setQuadratic(pts, fClipShift)) {
+ fList.push_back(edge);
+ }
+}
+void SkAnalyticEdgeBuilder::addQuad(const SkPoint pts[]) {
+ SkAnalyticQuadraticEdge* edge = fAlloc.make<SkAnalyticQuadraticEdge>();
+ if (edge->setQuadratic(pts)) {
+ fList.push_back(edge);
+ }
+}
+
+void SkBasicEdgeBuilder::addCubic(const SkPoint pts[]) {
+ SkCubicEdge* edge = fAlloc.make<SkCubicEdge>();
+ if (edge->setCubic(pts, fClipShift)) {
+ fList.push_back(edge);
+ }
+}
+void SkAnalyticEdgeBuilder::addCubic(const SkPoint pts[]) {
+ SkAnalyticCubicEdge* edge = fAlloc.make<SkAnalyticCubicEdge>();
+ if (edge->setCubic(pts)) {
+ fList.push_back(edge);
+ }
+}
+
+// TODO: merge addLine() and addPolyLine()?
+
+SkEdgeBuilder::Combine SkBasicEdgeBuilder::addPolyLine(const SkPoint pts[],
+ char* arg_edge, char** arg_edgePtr) {
+ auto edge = (SkEdge*) arg_edge;
+ auto edgePtr = (SkEdge**)arg_edgePtr;
+
+ if (edge->setLine(pts[0], pts[1], fClipShift)) {
+ return is_vertical(edge) && edgePtr > (SkEdge**)fEdgeList
+ ? this->combineVertical(edge, edgePtr[-1])
+ : kNo_Combine;
+ }
+ return SkEdgeBuilder::kPartial_Combine; // A convenient lie. Same do-nothing behavior.
+}
+SkEdgeBuilder::Combine SkAnalyticEdgeBuilder::addPolyLine(const SkPoint pts[],
+ char* arg_edge, char** arg_edgePtr) {
+ auto edge = (SkAnalyticEdge*) arg_edge;
+ auto edgePtr = (SkAnalyticEdge**)arg_edgePtr;
+
+ if (edge->setLine(pts[0], pts[1])) {
+ return is_vertical(edge) && edgePtr > (SkAnalyticEdge**)fEdgeList
+ ? this->combineVertical(edge, edgePtr[-1])
+ : kNo_Combine;
+ }
+ return SkEdgeBuilder::kPartial_Combine; // As above.
+}
+
+SkRect SkBasicEdgeBuilder::recoverClip(const SkIRect& src) const {
+ return { SkIntToScalar(src.fLeft >> fClipShift),
+ SkIntToScalar(src.fTop >> fClipShift),
+ SkIntToScalar(src.fRight >> fClipShift),
+ SkIntToScalar(src.fBottom >> fClipShift), };
+}
+SkRect SkAnalyticEdgeBuilder::recoverClip(const SkIRect& src) const {
+ return SkRect::Make(src);
+}
+
+char* SkBasicEdgeBuilder::allocEdges(size_t n, size_t* size) {
+ *size = sizeof(SkEdge);
+ return (char*)fAlloc.makeArrayDefault<SkEdge>(n);
+}
+char* SkAnalyticEdgeBuilder::allocEdges(size_t n, size_t* size) {
+ *size = sizeof(SkAnalyticEdge);
+ return (char*)fAlloc.makeArrayDefault<SkAnalyticEdge>(n);
+}
+
+// TODO: maybe get rid of buildPoly() entirely?
+int SkEdgeBuilder::buildPoly(const SkPath& path, const SkIRect* iclip, bool canCullToTheRight) {
+ size_t maxEdgeCount = path.countPoints();
+ if (iclip) {
+ // clipping can turn 1 line into (up to) kMaxClippedLineSegments, since
+ // we turn portions that are clipped out on the left/right into vertical
+ // segments.
+ SkSafeMath safe;
+ maxEdgeCount = safe.mul(maxEdgeCount, SkLineClipper::kMaxClippedLineSegments);
+ if (!safe) {
+ return 0;
+ }
+ }
+
+ size_t edgeSize;
+ char* edge = this->allocEdges(maxEdgeCount, &edgeSize);
+
+ SkDEBUGCODE(char* edgeStart = edge);
+ char** edgePtr = fAlloc.makeArrayDefault<char*>(maxEdgeCount);
+ fEdgeList = (void**)edgePtr;
+
+ SkPathEdgeIter iter(path);
+ if (iclip) {
+ SkRect clip = this->recoverClip(*iclip);
+
+ while (auto e = iter.next()) {
+ switch (e.fEdge) {
+ case SkPathEdgeIter::Edge::kLine: {
+ SkPoint lines[SkLineClipper::kMaxPoints];
+ int lineCount = SkLineClipper::ClipLine(e.fPts, clip, lines, canCullToTheRight);
+ SkASSERT(lineCount <= SkLineClipper::kMaxClippedLineSegments);
+ for (int i = 0; i < lineCount; i++) {
+ switch( this->addPolyLine(lines + i, edge, edgePtr) ) {
+ case kTotal_Combine: edgePtr--; break;
+ case kPartial_Combine: break;
+ case kNo_Combine: *edgePtr++ = edge;
+ edge += edgeSize;
+ }
+ }
+ break;
+ }
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ break;
+ }
+ }
+ } else {
+ while (auto e = iter.next()) {
+ switch (e.fEdge) {
+ case SkPathEdgeIter::Edge::kLine: {
+ switch( this->addPolyLine(e.fPts, edge, edgePtr) ) {
+ case kTotal_Combine: edgePtr--; break;
+ case kPartial_Combine: break;
+ case kNo_Combine: *edgePtr++ = edge;
+ edge += edgeSize;
+ }
+ break;
+ }
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ break;
+ }
+ }
+ }
+ SkASSERT((size_t)(edge - edgeStart) <= maxEdgeCount * edgeSize);
+ SkASSERT((size_t)(edgePtr - (char**)fEdgeList) <= maxEdgeCount);
+ return SkToInt(edgePtr - (char**)fEdgeList);
+}
+
+int SkEdgeBuilder::build(const SkPath& path, const SkIRect* iclip, bool canCullToTheRight) {
+ SkAutoConicToQuads quadder;
+ const SkScalar conicTol = SK_Scalar1 / 4;
+ bool is_finite = true;
+
+ SkPathEdgeIter iter(path);
+ if (iclip) {
+ SkRect clip = this->recoverClip(*iclip);
+ struct Rec {
+ SkEdgeBuilder* fBuilder;
+ bool fIsFinite;
+ } rec = { this, true };
+
+ SkEdgeClipper::ClipPath(path, clip, canCullToTheRight,
+ [](SkEdgeClipper* clipper, bool, void* ctx) {
+ Rec* rec = (Rec*)ctx;
+ SkPoint pts[4];
+ SkPath::Verb verb;
+
+ while ((verb = clipper->next(pts)) != SkPath::kDone_Verb) {
+ const int count = SkPathPriv::PtsInIter(verb);
+ if (!SkScalarsAreFinite(&pts[0].fX, count*2)) {
+ rec->fIsFinite = false;
+ return;
+ }
+ switch (verb) {
+ case SkPath::kLine_Verb: rec->fBuilder->addLine (pts); break;
+ case SkPath::kQuad_Verb: rec->fBuilder->addQuad (pts); break;
+ case SkPath::kCubic_Verb: rec->fBuilder->addCubic(pts); break;
+ default: break;
+ }
+ }
+ }, &rec);
+ is_finite = rec.fIsFinite;
+ } else {
+ auto handle_quad = [this](const SkPoint pts[3]) {
+ SkPoint monoX[5];
+ int n = SkChopQuadAtYExtrema(pts, monoX);
+ for (int i = 0; i <= n; i++) {
+ this->addQuad(&monoX[i * 2]);
+ }
+ };
+ while (auto e = iter.next()) {
+ switch (e.fEdge) {
+ case SkPathEdgeIter::Edge::kLine:
+ this->addLine(e.fPts);
+ break;
+ case SkPathEdgeIter::Edge::kQuad: {
+ handle_quad(e.fPts);
+ break;
+ }
+ case SkPathEdgeIter::Edge::kConic: {
+ const SkPoint* quadPts = quadder.computeQuads(
+ e.fPts, iter.conicWeight(), conicTol);
+ for (int i = 0; i < quadder.countQuads(); ++i) {
+ handle_quad(quadPts);
+ quadPts += 2;
+ }
+ } break;
+ case SkPathEdgeIter::Edge::kCubic: {
+ SkPoint monoY[10];
+ int n = SkChopCubicAtYExtrema(e.fPts, monoY);
+ for (int i = 0; i <= n; i++) {
+ this->addCubic(&monoY[i * 3]);
+ }
+ break;
+ }
+ }
+ }
+ }
+ fEdgeList = fList.begin();
+ return is_finite ? fList.size() : 0;
+}
+
+int SkEdgeBuilder::buildEdges(const SkPath& path,
+ const SkIRect* shiftedClip) {
+ // If we're convex, then we need both edges, even if the right edge is past the clip.
+ const bool canCullToTheRight = !path.isConvex();
+
+ // We can use our buildPoly() optimization if all the segments are lines.
+ // (Edges are homogeneous and stored contiguously in memory, no need for indirection.)
+ const int count = SkPath::kLine_SegmentMask == path.getSegmentMasks()
+ ? this->buildPoly(path, shiftedClip, canCullToTheRight)
+ : this->build (path, shiftedClip, canCullToTheRight);
+
+ SkASSERT(count >= 0);
+
+ // If we can't cull to the right, we should have count > 1 (or 0).
+ if (!canCullToTheRight) {
+ SkASSERT(count != 1);
+ }
+ return count;
+}
diff --git a/gfx/skia/skia/src/core/SkEdgeBuilder.h b/gfx/skia/skia/src/core/SkEdgeBuilder.h
new file mode 100644
index 0000000000..0db124c575
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEdgeBuilder.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkEdgeBuilder_DEFINED
+#define SkEdgeBuilder_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/private/base/SkTDArray.h"
+#include "src/base/SkArenaAlloc.h"
+
+#include <cstddef>
+
+class SkPath;
+struct SkAnalyticEdge;
+struct SkEdge;
+struct SkPoint;
+
+class SkEdgeBuilder {
+public:
+ int buildEdges(const SkPath& path,
+ const SkIRect* shiftedClip);
+
+protected:
+ SkEdgeBuilder() = default;
+ virtual ~SkEdgeBuilder() = default;
+
+ // In general mode we allocate pointers in fList and fEdgeList points to its head.
+ // In polygon mode we preallocated edges contiguously in fAlloc and fEdgeList points there.
+ void** fEdgeList = nullptr;
+ SkTDArray<void*> fList;
+ SkSTArenaAlloc<512> fAlloc;
+
+ enum Combine {
+ kNo_Combine,
+ kPartial_Combine,
+ kTotal_Combine
+ };
+
+private:
+ int build (const SkPath& path, const SkIRect* clip, bool clipToTheRight);
+ int buildPoly(const SkPath& path, const SkIRect* clip, bool clipToTheRight);
+
+ virtual char* allocEdges(size_t n, size_t* sizeof_edge) = 0;
+ virtual SkRect recoverClip(const SkIRect&) const = 0;
+
+ virtual void addLine (const SkPoint pts[]) = 0;
+ virtual void addQuad (const SkPoint pts[]) = 0;
+ virtual void addCubic(const SkPoint pts[]) = 0;
+ virtual Combine addPolyLine(const SkPoint pts[], char* edge, char** edgePtr) = 0;
+};
+
+class SkBasicEdgeBuilder final : public SkEdgeBuilder {
+public:
+ explicit SkBasicEdgeBuilder(int clipShift) : fClipShift(clipShift) {}
+
+ SkEdge** edgeList() { return (SkEdge**)fEdgeList; }
+
+private:
+ Combine combineVertical(const SkEdge* edge, SkEdge* last);
+
+ char* allocEdges(size_t, size_t*) override;
+ SkRect recoverClip(const SkIRect&) const override;
+
+ void addLine (const SkPoint pts[]) override;
+ void addQuad (const SkPoint pts[]) override;
+ void addCubic(const SkPoint pts[]) override;
+ Combine addPolyLine(const SkPoint pts[], char* edge, char** edgePtr) override;
+
+ const int fClipShift;
+};
+
+class SkAnalyticEdgeBuilder final : public SkEdgeBuilder {
+public:
+ SkAnalyticEdgeBuilder() {}
+
+ SkAnalyticEdge** analyticEdgeList() { return (SkAnalyticEdge**)fEdgeList; }
+
+private:
+ Combine combineVertical(const SkAnalyticEdge* edge, SkAnalyticEdge* last);
+
+ char* allocEdges(size_t, size_t*) override;
+ SkRect recoverClip(const SkIRect&) const override;
+
+ void addLine (const SkPoint pts[]) override;
+ void addQuad (const SkPoint pts[]) override;
+ void addCubic(const SkPoint pts[]) override;
+ Combine addPolyLine(const SkPoint pts[], char* edge, char** edgePtr) override;
+};
+#endif
diff --git a/gfx/skia/skia/src/core/SkEdgeClipper.cpp b/gfx/skia/skia/src/core/SkEdgeClipper.cpp
new file mode 100644
index 0000000000..6789b681e8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEdgeClipper.cpp
@@ -0,0 +1,604 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkEdgeClipper.h"
+
+#include "include/core/SkRect.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkMacros.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkLineClipper.h"
+#include "src/core/SkPathPriv.h"
+
+#include <algorithm>
+#include <cstring>
+
+static bool quick_reject(const SkRect& bounds, const SkRect& clip) {
+ return bounds.fTop >= clip.fBottom || bounds.fBottom <= clip.fTop;
+}
+
+static inline void clamp_le(SkScalar& value, SkScalar max) {
+ if (value > max) {
+ value = max;
+ }
+}
+
+static inline void clamp_ge(SkScalar& value, SkScalar min) {
+ if (value < min) {
+ value = min;
+ }
+}
+
+/* src[] must be monotonic in Y. This routine copies src into dst, and sorts
+ it to be increasing in Y. If it had to reverse the order of the points,
+ it returns true, otherwise it returns false
+ */
+static bool sort_increasing_Y(SkPoint dst[], const SkPoint src[], int count) {
+ // we need the data to be monotonically increasing in Y
+ if (src[0].fY > src[count - 1].fY) {
+ for (int i = 0; i < count; i++) {
+ dst[i] = src[count - i - 1];
+ }
+ return true;
+ } else {
+ memcpy(dst, src, count * sizeof(SkPoint));
+ return false;
+ }
+}
+
+bool SkEdgeClipper::clipLine(SkPoint p0, SkPoint p1, const SkRect& clip) {
+ fCurrPoint = fPoints;
+ fCurrVerb = fVerbs;
+
+ SkPoint lines[SkLineClipper::kMaxPoints];
+ const SkPoint pts[] = { p0, p1 };
+ int lineCount = SkLineClipper::ClipLine(pts, clip, lines, fCanCullToTheRight);
+ for (int i = 0; i < lineCount; i++) {
+ this->appendLine(lines[i], lines[i + 1]);
+ }
+
+ *fCurrVerb = SkPath::kDone_Verb;
+ fCurrPoint = fPoints;
+ fCurrVerb = fVerbs;
+ return SkPath::kDone_Verb != fVerbs[0];
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool chopMonoQuadAt(SkScalar c0, SkScalar c1, SkScalar c2,
+ SkScalar target, SkScalar* t) {
+ /* Solve F(t) = y where F(t) := [0](1-t)^2 + 2[1]t(1-t) + [2]t^2
+ * We solve for t, using quadratic equation, hence we have to rearrange
+ * our cooefficents to look like At^2 + Bt + C
+ */
+ SkScalar A = c0 - c1 - c1 + c2;
+ SkScalar B = 2*(c1 - c0);
+ SkScalar C = c0 - target;
+
+ SkScalar roots[2]; // we only expect one, but make room for 2 for safety
+ int count = SkFindUnitQuadRoots(A, B, C, roots);
+ if (count) {
+ *t = roots[0];
+ return true;
+ }
+ return false;
+}
+
+static bool chopMonoQuadAtY(SkPoint pts[3], SkScalar y, SkScalar* t) {
+ return chopMonoQuadAt(pts[0].fY, pts[1].fY, pts[2].fY, y, t);
+}
+
+static bool chopMonoQuadAtX(SkPoint pts[3], SkScalar x, SkScalar* t) {
+ return chopMonoQuadAt(pts[0].fX, pts[1].fX, pts[2].fX, x, t);
+}
+
+// Modify pts[] in place so that it is clipped in Y to the clip rect
+static void chop_quad_in_Y(SkPoint pts[3], const SkRect& clip) {
+ SkScalar t;
+ SkPoint tmp[5]; // for SkChopQuadAt
+
+ // are we partially above
+ if (pts[0].fY < clip.fTop) {
+ if (chopMonoQuadAtY(pts, clip.fTop, &t)) {
+ // take the 2nd chopped quad
+ SkChopQuadAt(pts, tmp, t);
+ // clamp to clean up imprecise numerics in the chop
+ tmp[2].fY = clip.fTop;
+ clamp_ge(tmp[3].fY, clip.fTop);
+
+ pts[0] = tmp[2];
+ pts[1] = tmp[3];
+ } else {
+ // if chopMonoQuadAtY failed, then we may have hit inexact numerics
+ // so we just clamp against the top
+ for (int i = 0; i < 3; i++) {
+ if (pts[i].fY < clip.fTop) {
+ pts[i].fY = clip.fTop;
+ }
+ }
+ }
+ }
+
+ // are we partially below
+ if (pts[2].fY > clip.fBottom) {
+ if (chopMonoQuadAtY(pts, clip.fBottom, &t)) {
+ SkChopQuadAt(pts, tmp, t);
+ // clamp to clean up imprecise numerics in the chop
+ clamp_le(tmp[1].fY, clip.fBottom);
+ tmp[2].fY = clip.fBottom;
+
+ pts[1] = tmp[1];
+ pts[2] = tmp[2];
+ } else {
+ // if chopMonoQuadAtY failed, then we may have hit inexact numerics
+ // so we just clamp against the bottom
+ for (int i = 0; i < 3; i++) {
+ if (pts[i].fY > clip.fBottom) {
+ pts[i].fY = clip.fBottom;
+ }
+ }
+ }
+ }
+}
+
+// srcPts[] must be monotonic in X and Y
+void SkEdgeClipper::clipMonoQuad(const SkPoint srcPts[3], const SkRect& clip) {
+ SkPoint pts[3];
+ bool reverse = sort_increasing_Y(pts, srcPts, 3);
+
+ // are we completely above or below
+ if (pts[2].fY <= clip.fTop || pts[0].fY >= clip.fBottom) {
+ return;
+ }
+
+ // Now chop so that pts is contained within clip in Y
+ chop_quad_in_Y(pts, clip);
+
+ if (pts[0].fX > pts[2].fX) {
+ using std::swap;
+ swap(pts[0], pts[2]);
+ reverse = !reverse;
+ }
+ SkASSERT(pts[0].fX <= pts[1].fX);
+ SkASSERT(pts[1].fX <= pts[2].fX);
+
+ // Now chop in X has needed, and record the segments
+
+ if (pts[2].fX <= clip.fLeft) { // wholly to the left
+ this->appendVLine(clip.fLeft, pts[0].fY, pts[2].fY, reverse);
+ return;
+ }
+ if (pts[0].fX >= clip.fRight) { // wholly to the right
+ if (!this->canCullToTheRight()) {
+ this->appendVLine(clip.fRight, pts[0].fY, pts[2].fY, reverse);
+ }
+ return;
+ }
+
+ SkScalar t;
+ SkPoint tmp[5]; // for SkChopQuadAt
+
+ // are we partially to the left
+ if (pts[0].fX < clip.fLeft) {
+ if (chopMonoQuadAtX(pts, clip.fLeft, &t)) {
+ SkChopQuadAt(pts, tmp, t);
+ this->appendVLine(clip.fLeft, tmp[0].fY, tmp[2].fY, reverse);
+ // clamp to clean up imprecise numerics in the chop
+ tmp[2].fX = clip.fLeft;
+ clamp_ge(tmp[3].fX, clip.fLeft);
+
+ pts[0] = tmp[2];
+ pts[1] = tmp[3];
+ } else {
+ // if chopMonoQuadAtY failed, then we may have hit inexact numerics
+ // so we just clamp against the left
+ this->appendVLine(clip.fLeft, pts[0].fY, pts[2].fY, reverse);
+ return;
+ }
+ }
+
+ // are we partially to the right
+ if (pts[2].fX > clip.fRight) {
+ if (chopMonoQuadAtX(pts, clip.fRight, &t)) {
+ SkChopQuadAt(pts, tmp, t);
+ // clamp to clean up imprecise numerics in the chop
+ clamp_le(tmp[1].fX, clip.fRight);
+ tmp[2].fX = clip.fRight;
+
+ this->appendQuad(tmp, reverse);
+ this->appendVLine(clip.fRight, tmp[2].fY, tmp[4].fY, reverse);
+ } else {
+ // if chopMonoQuadAtY failed, then we may have hit inexact numerics
+ // so we just clamp against the right
+ pts[1].fX = std::min(pts[1].fX, clip.fRight);
+ pts[2].fX = std::min(pts[2].fX, clip.fRight);
+ this->appendQuad(pts, reverse);
+ }
+ } else { // wholly inside the clip
+ this->appendQuad(pts, reverse);
+ }
+}
+
+bool SkEdgeClipper::clipQuad(const SkPoint srcPts[3], const SkRect& clip) {
+ fCurrPoint = fPoints;
+ fCurrVerb = fVerbs;
+
+ SkRect bounds;
+ bounds.setBounds(srcPts, 3);
+
+ if (!quick_reject(bounds, clip)) {
+ SkPoint monoY[5];
+ int countY = SkChopQuadAtYExtrema(srcPts, monoY);
+ for (int y = 0; y <= countY; y++) {
+ SkPoint monoX[5];
+ int countX = SkChopQuadAtXExtrema(&monoY[y * 2], monoX);
+ for (int x = 0; x <= countX; x++) {
+ this->clipMonoQuad(&monoX[x * 2], clip);
+ SkASSERT(fCurrVerb - fVerbs < kMaxVerbs);
+ SkASSERT(fCurrPoint - fPoints <= kMaxPoints);
+ }
+ }
+ }
+
+ *fCurrVerb = SkPath::kDone_Verb;
+ fCurrPoint = fPoints;
+ fCurrVerb = fVerbs;
+ return SkPath::kDone_Verb != fVerbs[0];
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static SkScalar mono_cubic_closestT(const SkScalar src[], SkScalar x) {
+ SkScalar t = 0.5f;
+ SkScalar lastT;
+ SkScalar bestT SK_INIT_TO_AVOID_WARNING;
+ SkScalar step = 0.25f;
+ SkScalar D = src[0];
+ SkScalar A = src[6] + 3*(src[2] - src[4]) - D;
+ SkScalar B = 3*(src[4] - src[2] - src[2] + D);
+ SkScalar C = 3*(src[2] - D);
+ x -= D;
+ SkScalar closest = SK_ScalarMax;
+ do {
+ SkScalar loc = ((A * t + B) * t + C) * t;
+ SkScalar dist = SkScalarAbs(loc - x);
+ if (closest > dist) {
+ closest = dist;
+ bestT = t;
+ }
+ lastT = t;
+ t += loc < x ? step : -step;
+ step *= 0.5f;
+ } while (closest > 0.25f && lastT != t);
+ return bestT;
+}
+
+static void chop_mono_cubic_at_y(SkPoint src[4], SkScalar y, SkPoint dst[7]) {
+ if (SkChopMonoCubicAtY(src, y, dst)) {
+ return;
+ }
+ SkChopCubicAt(src, dst, mono_cubic_closestT(&src->fY, y));
+}
+
+// Modify pts[] in place so that it is clipped in Y to the clip rect
+static void chop_cubic_in_Y(SkPoint pts[4], const SkRect& clip) {
+
+ // are we partially above
+ if (pts[0].fY < clip.fTop) {
+ SkPoint tmp[7];
+ chop_mono_cubic_at_y(pts, clip.fTop, tmp);
+
+ /*
+ * For a large range in the points, we can do a poor job of chopping, such that the t
+ * we computed resulted in the lower cubic still being partly above the clip.
+ *
+ * If just the first or first 2 Y values are above the fTop, we can just smash them
+ * down. If the first 3 Ys are above fTop, we can't smash all 3, as that can really
+ * distort the cubic. In this case, we take the first output (tmp[3..6] and treat it as
+ * a guess, and re-chop against fTop. Then we fall through to checking if we need to
+ * smash the first 1 or 2 Y values.
+ */
+ if (tmp[3].fY < clip.fTop && tmp[4].fY < clip.fTop && tmp[5].fY < clip.fTop) {
+ SkPoint tmp2[4];
+ memcpy(tmp2, &tmp[3].fX, 4 * sizeof(SkPoint));
+ chop_mono_cubic_at_y(tmp2, clip.fTop, tmp);
+ }
+
+ // tmp[3, 4].fY should all be to the below clip.fTop.
+ // Since we can't trust the numerics of the chopper, we force those conditions now
+ tmp[3].fY = clip.fTop;
+ clamp_ge(tmp[4].fY, clip.fTop);
+
+ pts[0] = tmp[3];
+ pts[1] = tmp[4];
+ pts[2] = tmp[5];
+ }
+
+ // are we partially below
+ if (pts[3].fY > clip.fBottom) {
+ SkPoint tmp[7];
+ chop_mono_cubic_at_y(pts, clip.fBottom, tmp);
+ tmp[3].fY = clip.fBottom;
+ clamp_le(tmp[2].fY, clip.fBottom);
+
+ pts[1] = tmp[1];
+ pts[2] = tmp[2];
+ pts[3] = tmp[3];
+ }
+}
+
+static void chop_mono_cubic_at_x(SkPoint src[4], SkScalar x, SkPoint dst[7]) {
+ if (SkChopMonoCubicAtX(src, x, dst)) {
+ return;
+ }
+ SkChopCubicAt(src, dst, mono_cubic_closestT(&src->fX, x));
+}
+
+// srcPts[] must be monotonic in X and Y
+void SkEdgeClipper::clipMonoCubic(const SkPoint src[4], const SkRect& clip) {
+ SkPoint pts[4];
+ bool reverse = sort_increasing_Y(pts, src, 4);
+
+ // are we completely above or below
+ if (pts[3].fY <= clip.fTop || pts[0].fY >= clip.fBottom) {
+ return;
+ }
+
+ // Now chop so that pts is contained within clip in Y
+ chop_cubic_in_Y(pts, clip);
+
+ if (pts[0].fX > pts[3].fX) {
+ using std::swap;
+ swap(pts[0], pts[3]);
+ swap(pts[1], pts[2]);
+ reverse = !reverse;
+ }
+
+ // Now chop in X has needed, and record the segments
+
+ if (pts[3].fX <= clip.fLeft) { // wholly to the left
+ this->appendVLine(clip.fLeft, pts[0].fY, pts[3].fY, reverse);
+ return;
+ }
+ if (pts[0].fX >= clip.fRight) { // wholly to the right
+ if (!this->canCullToTheRight()) {
+ this->appendVLine(clip.fRight, pts[0].fY, pts[3].fY, reverse);
+ }
+ return;
+ }
+
+ // are we partially to the left
+ if (pts[0].fX < clip.fLeft) {
+ SkPoint tmp[7];
+ chop_mono_cubic_at_x(pts, clip.fLeft, tmp);
+ this->appendVLine(clip.fLeft, tmp[0].fY, tmp[3].fY, reverse);
+
+ // tmp[3, 4].fX should all be to the right of clip.fLeft.
+ // Since we can't trust the numerics of
+ // the chopper, we force those conditions now
+ tmp[3].fX = clip.fLeft;
+ clamp_ge(tmp[4].fX, clip.fLeft);
+
+ pts[0] = tmp[3];
+ pts[1] = tmp[4];
+ pts[2] = tmp[5];
+ }
+
+ // are we partially to the right
+ if (pts[3].fX > clip.fRight) {
+ SkPoint tmp[7];
+ chop_mono_cubic_at_x(pts, clip.fRight, tmp);
+ tmp[3].fX = clip.fRight;
+ clamp_le(tmp[2].fX, clip.fRight);
+
+ this->appendCubic(tmp, reverse);
+ this->appendVLine(clip.fRight, tmp[3].fY, tmp[6].fY, reverse);
+ } else { // wholly inside the clip
+ this->appendCubic(pts, reverse);
+ }
+}
+
+static SkRect compute_cubic_bounds(const SkPoint pts[4]) {
+ SkRect r;
+ r.setBounds(pts, 4);
+ return r;
+}
+
+static bool too_big_for_reliable_float_math(const SkRect& r) {
+ // limit set as the largest float value for which we can still reliably compute things like
+ // - chopping at XY extrema
+ // - chopping at Y or X values for clipping
+ //
+ // Current value chosen just by experiment. Larger (and still succeeds) is always better.
+ //
+ const SkScalar limit = 1 << 22;
+ return r.fLeft < -limit || r.fTop < -limit || r.fRight > limit || r.fBottom > limit;
+}
+
+bool SkEdgeClipper::clipCubic(const SkPoint srcPts[4], const SkRect& clip) {
+ fCurrPoint = fPoints;
+ fCurrVerb = fVerbs;
+
+ const SkRect bounds = compute_cubic_bounds(srcPts);
+ // check if we're clipped out vertically
+ if (bounds.fBottom > clip.fTop && bounds.fTop < clip.fBottom) {
+ if (too_big_for_reliable_float_math(bounds)) {
+ // can't safely clip the cubic, so we give up and draw a line (which we can safely clip)
+ //
+ // If we rewrote chopcubicat*extrema and chopmonocubic using doubles, we could very
+ // likely always handle the cubic safely, but (it seems) at a big loss in speed, so
+ // we'd only want to take that alternate impl if needed. Perhaps a TODO to try it.
+ //
+ return this->clipLine(srcPts[0], srcPts[3], clip);
+ } else {
+ SkPoint monoY[10];
+ int countY = SkChopCubicAtYExtrema(srcPts, monoY);
+ for (int y = 0; y <= countY; y++) {
+ SkPoint monoX[10];
+ int countX = SkChopCubicAtXExtrema(&monoY[y * 3], monoX);
+ for (int x = 0; x <= countX; x++) {
+ this->clipMonoCubic(&monoX[x * 3], clip);
+ SkASSERT(fCurrVerb - fVerbs < kMaxVerbs);
+ SkASSERT(fCurrPoint - fPoints <= kMaxPoints);
+ }
+ }
+ }
+ }
+
+ *fCurrVerb = SkPath::kDone_Verb;
+ fCurrPoint = fPoints;
+ fCurrVerb = fVerbs;
+ return SkPath::kDone_Verb != fVerbs[0];
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkEdgeClipper::appendLine(SkPoint p0, SkPoint p1) {
+ *fCurrVerb++ = SkPath::kLine_Verb;
+ fCurrPoint[0] = p0;
+ fCurrPoint[1] = p1;
+ fCurrPoint += 2;
+}
+
+void SkEdgeClipper::appendVLine(SkScalar x, SkScalar y0, SkScalar y1, bool reverse) {
+ *fCurrVerb++ = SkPath::kLine_Verb;
+
+ if (reverse) {
+ using std::swap;
+ swap(y0, y1);
+ }
+ fCurrPoint[0].set(x, y0);
+ fCurrPoint[1].set(x, y1);
+ fCurrPoint += 2;
+}
+
+void SkEdgeClipper::appendQuad(const SkPoint pts[3], bool reverse) {
+ *fCurrVerb++ = SkPath::kQuad_Verb;
+
+ if (reverse) {
+ fCurrPoint[0] = pts[2];
+ fCurrPoint[2] = pts[0];
+ } else {
+ fCurrPoint[0] = pts[0];
+ fCurrPoint[2] = pts[2];
+ }
+ fCurrPoint[1] = pts[1];
+ fCurrPoint += 3;
+}
+
+void SkEdgeClipper::appendCubic(const SkPoint pts[4], bool reverse) {
+ *fCurrVerb++ = SkPath::kCubic_Verb;
+
+ if (reverse) {
+ for (int i = 0; i < 4; i++) {
+ fCurrPoint[i] = pts[3 - i];
+ }
+ } else {
+ memcpy(fCurrPoint, pts, 4 * sizeof(SkPoint));
+ }
+ fCurrPoint += 4;
+}
+
+SkPath::Verb SkEdgeClipper::next(SkPoint pts[]) {
+ SkPath::Verb verb = *fCurrVerb;
+
+ switch (verb) {
+ case SkPath::kLine_Verb:
+ memcpy(pts, fCurrPoint, 2 * sizeof(SkPoint));
+ fCurrPoint += 2;
+ fCurrVerb += 1;
+ break;
+ case SkPath::kQuad_Verb:
+ memcpy(pts, fCurrPoint, 3 * sizeof(SkPoint));
+ fCurrPoint += 3;
+ fCurrVerb += 1;
+ break;
+ case SkPath::kCubic_Verb:
+ memcpy(pts, fCurrPoint, 4 * sizeof(SkPoint));
+ fCurrPoint += 4;
+ fCurrVerb += 1;
+ break;
+ case SkPath::kDone_Verb:
+ break;
+ default:
+ SkDEBUGFAIL("unexpected verb in quadclippper2 iter");
+ break;
+ }
+ return verb;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+static void assert_monotonic(const SkScalar coord[], int count) {
+ if (coord[0] > coord[(count - 1) * 2]) {
+ for (int i = 1; i < count; i++) {
+ SkASSERT(coord[2 * (i - 1)] >= coord[i * 2]);
+ }
+ } else if (coord[0] < coord[(count - 1) * 2]) {
+ for (int i = 1; i < count; i++) {
+ SkASSERT(coord[2 * (i - 1)] <= coord[i * 2]);
+ }
+ } else {
+ for (int i = 1; i < count; i++) {
+ SkASSERT(coord[2 * (i - 1)] == coord[i * 2]);
+ }
+ }
+}
+
+void sk_assert_monotonic_y(const SkPoint pts[], int count) {
+ if (count > 1) {
+ assert_monotonic(&pts[0].fY, count);
+ }
+}
+
+void sk_assert_monotonic_x(const SkPoint pts[], int count) {
+ if (count > 1) {
+ assert_monotonic(&pts[0].fX, count);
+ }
+}
+#endif
+
+void SkEdgeClipper::ClipPath(const SkPath& path, const SkRect& clip, bool canCullToTheRight,
+ void (*consume)(SkEdgeClipper*, bool newCtr, void* ctx), void* ctx) {
+ SkASSERT(path.isFinite());
+
+ SkAutoConicToQuads quadder;
+ const SkScalar conicTol = SK_Scalar1 / 4;
+
+ SkPathEdgeIter iter(path);
+ SkEdgeClipper clipper(canCullToTheRight);
+
+ while (auto e = iter.next()) {
+ switch (e.fEdge) {
+ case SkPathEdgeIter::Edge::kLine:
+ if (clipper.clipLine(e.fPts[0], e.fPts[1], clip)) {
+ consume(&clipper, e.fIsNewContour, ctx);
+ }
+ break;
+ case SkPathEdgeIter::Edge::kQuad:
+ if (clipper.clipQuad(e.fPts, clip)) {
+ consume(&clipper, e.fIsNewContour, ctx);
+ }
+ break;
+ case SkPathEdgeIter::Edge::kConic: {
+ const SkPoint* quadPts = quadder.computeQuads(e.fPts, iter.conicWeight(), conicTol);
+ for (int i = 0; i < quadder.countQuads(); ++i) {
+ if (clipper.clipQuad(quadPts, clip)) {
+ consume(&clipper, e.fIsNewContour, ctx);
+ }
+ quadPts += 2;
+ }
+ } break;
+ case SkPathEdgeIter::Edge::kCubic:
+ if (clipper.clipCubic(e.fPts, clip)) {
+ consume(&clipper, e.fIsNewContour, ctx);
+ }
+ break;
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkEdgeClipper.h b/gfx/skia/skia/src/core/SkEdgeClipper.h
new file mode 100644
index 0000000000..230646b246
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEdgeClipper.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkEdgeClipper_DEFINED
+#define SkEdgeClipper_DEFINED
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "include/private/base/SkDebug.h"
+
+struct SkRect;
+
+/** This is basically an iterator. It is initialized with an edge and a clip,
+ and then next() is called until it returns kDone_Verb.
+ */
+class SkEdgeClipper {
+public:
+ SkEdgeClipper(bool canCullToTheRight) : fCanCullToTheRight(canCullToTheRight) {}
+
+ bool clipLine(SkPoint p0, SkPoint p1, const SkRect& clip);
+ bool clipQuad(const SkPoint pts[3], const SkRect& clip);
+ bool clipCubic(const SkPoint pts[4], const SkRect& clip);
+
+ SkPath::Verb next(SkPoint pts[]);
+
+ bool canCullToTheRight() const { return fCanCullToTheRight; }
+
+ /**
+ * Clips each segment from the path, and passes the result (in a clipper) to the
+ * consume proc.
+ */
+ static void ClipPath(const SkPath& path, const SkRect& clip, bool canCullToTheRight,
+ void (*consume)(SkEdgeClipper*, bool newCtr, void* ctx), void* ctx);
+
+private:
+ SkPoint* fCurrPoint;
+ SkPath::Verb* fCurrVerb;
+ const bool fCanCullToTheRight;
+
+ enum {
+ kMaxVerbs = 18, // max curvature in X and Y split cubic into 9 pieces, * (line + cubic)
+ kMaxPoints = 54 // 2 lines + 1 cubic require 6 points; times 9 pieces
+ };
+ SkPoint fPoints[kMaxPoints];
+ SkPath::Verb fVerbs[kMaxVerbs];
+
+ void clipMonoQuad(const SkPoint srcPts[3], const SkRect& clip);
+ void clipMonoCubic(const SkPoint srcPts[4], const SkRect& clip);
+ void appendLine(SkPoint p0, SkPoint p1);
+ void appendVLine(SkScalar x, SkScalar y0, SkScalar y1, bool reverse);
+ void appendQuad(const SkPoint pts[3], bool reverse);
+ void appendCubic(const SkPoint pts[4], bool reverse);
+};
+
+#ifdef SK_DEBUG
+ void sk_assert_monotonic_x(const SkPoint pts[], int count);
+ void sk_assert_monotonic_y(const SkPoint pts[], int count);
+#else
+ #define sk_assert_monotonic_x(pts, count)
+ #define sk_assert_monotonic_y(pts, count)
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkEffectPriv.h b/gfx/skia/skia/src/core/SkEffectPriv.h
new file mode 100644
index 0000000000..e1d5764efa
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEffectPriv.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEffectPriv_DEFINED
+#define SkEffectPriv_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkColorType.h"
+
+class SkArenaAlloc;
+class SkColorSpace;
+class SkRasterPipeline;
+class SkSurfaceProps;
+
+// Passed to effects that will add stages to rasterpipeline
+struct SkStageRec {
+ SkRasterPipeline* fPipeline;
+ SkArenaAlloc* fAlloc;
+ SkColorType fDstColorType;
+ SkColorSpace* fDstCS; // may be nullptr
+ SkColor4f fPaintColor;
+ const SkSurfaceProps& fSurfaceProps;
+};
+
+#endif // SkEffectPriv_DEFINED
diff --git a/gfx/skia/skia/src/core/SkEnumBitMask.h b/gfx/skia/skia/src/core/SkEnumBitMask.h
new file mode 100644
index 0000000000..a04f6cd0b8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEnumBitMask.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEnumBitMask_DEFINED
+#define SkEnumBitMask_DEFINED
+
+#include "include/core/SkTypes.h"
+
+/**
+ * Wraps an enum that is used for flags, and enables masking with type safety. Example:
+ *
+ * enum class MyFlags {
+ * kNone = 0,
+ * kA = 1,
+ * kB = 2,
+ * kC = 4,
+ * };
+ *
+ * MAKE_MASK_OPS(MyFlags)
+ *
+ * ...
+ *
+ * Mask<MyFlags> flags = MyFlags::kA | MyFlags::kB;
+ *
+ * if (flags & MyFlags::kB) {}
+ *
+ * ...
+ */
+template<typename E>
+class SkEnumBitMask {
+public:
+ SK_ALWAYS_INLINE constexpr SkEnumBitMask(E e) : SkEnumBitMask((int)e) {}
+
+ SK_ALWAYS_INLINE constexpr operator bool() const { return fValue; }
+
+ SK_ALWAYS_INLINE bool operator==(SkEnumBitMask m) const { return fValue == m.fValue; }
+ SK_ALWAYS_INLINE bool operator!=(SkEnumBitMask m) const { return fValue != m.fValue; }
+
+ SK_ALWAYS_INLINE constexpr SkEnumBitMask operator|(SkEnumBitMask m) const {
+ return SkEnumBitMask(fValue | m.fValue);
+ }
+ SK_ALWAYS_INLINE constexpr SkEnumBitMask operator&(SkEnumBitMask m) const {
+ return SkEnumBitMask(fValue & m.fValue);
+ }
+ SK_ALWAYS_INLINE constexpr SkEnumBitMask operator^(SkEnumBitMask m) const {
+ return SkEnumBitMask(fValue ^ m.fValue);
+ }
+ SK_ALWAYS_INLINE constexpr SkEnumBitMask operator~() const { return SkEnumBitMask(~fValue); }
+
+ SK_ALWAYS_INLINE SkEnumBitMask& operator|=(SkEnumBitMask m) { return *this = *this | m; }
+ SK_ALWAYS_INLINE SkEnumBitMask& operator&=(SkEnumBitMask m) { return *this = *this & m; }
+ SK_ALWAYS_INLINE SkEnumBitMask& operator^=(SkEnumBitMask m) { return *this = *this ^ m; }
+
+private:
+ SK_ALWAYS_INLINE constexpr explicit SkEnumBitMask(int value) : fValue(value) {}
+
+ int fValue;
+};
+
+/**
+ * Defines functions that make it possible to use bitwise operators on an enum.
+ */
+#define SK_MAKE_BITMASK_OPS(E) \
+ [[maybe_unused]] constexpr SkEnumBitMask<E> operator|(E a, E b) { \
+ return SkEnumBitMask<E>(a) | b; \
+ } \
+ [[maybe_unused]] constexpr SkEnumBitMask<E> operator&(E a, E b) { \
+ return SkEnumBitMask<E>(a) & b; \
+ } \
+ [[maybe_unused]] constexpr SkEnumBitMask<E> operator^(E a, E b) { \
+ return SkEnumBitMask<E>(a) ^ b; \
+ } \
+ [[maybe_unused]] constexpr SkEnumBitMask<E> operator~(E e) { \
+ return ~SkEnumBitMask<E>(e); \
+ } \
+
+#define SK_DECL_BITMASK_OPS_FRIENDS(E) \
+ friend constexpr SkEnumBitMask<E> operator|(E, E); \
+ friend constexpr SkEnumBitMask<E> operator&(E, E); \
+ friend constexpr SkEnumBitMask<E> operator^(E, E); \
+ friend constexpr SkEnumBitMask<E> operator~(E); \
+
+#endif // SkEnumBitMask_DEFINED
diff --git a/gfx/skia/skia/src/core/SkEnumerate.h b/gfx/skia/skia/src/core/SkEnumerate.h
new file mode 100644
index 0000000000..ecc3bf390b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkEnumerate.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEnumerate_DEFINED
+#define SkEnumerate_DEFINED
+
+#include <cstddef>
+#include <iterator>
+#include <tuple>
+#include <variant>
+
+template <typename Iter, typename C = std::monostate>
+class SkEnumerate {
+ using Captured = decltype(*std::declval<Iter>());
+ template <typename> struct is_tuple : std::false_type {};
+ template <typename... T> struct is_tuple<std::tuple<T...>> : std::true_type {};
+
+ // v must be a r-value to bind to temporary non-const references.
+ static constexpr auto MakeResult(size_t i, Captured&& v) {
+ if constexpr (is_tuple<Captured>::value) {
+ return std::tuple_cat(std::tuple<size_t>{i}, v);
+ } else {
+ // Capture v by reference instead of by value by using std::tie.
+ return std::tuple_cat(std::tuple<size_t>{i}, std::tie(v));
+ }
+ }
+
+ using Result = decltype(MakeResult(0, std::declval<Captured>()));
+
+ class Iterator {
+ public:
+ using value_type = Result;
+ using difference_type = ptrdiff_t;
+ using pointer = value_type*;
+ using reference = value_type;
+ using iterator_category = std::input_iterator_tag;
+ constexpr Iterator(ptrdiff_t index, Iter it) : fIndex{index}, fIt{it} { }
+ constexpr Iterator(const Iterator&) = default;
+ constexpr Iterator operator++() { ++fIndex; ++fIt; return *this; }
+ constexpr Iterator operator++(int) { Iterator tmp(*this); operator++(); return tmp; }
+ constexpr bool operator==(const Iterator& rhs) const { return fIt == rhs.fIt; }
+ constexpr bool operator!=(const Iterator& rhs) const { return fIt != rhs.fIt; }
+ constexpr reference operator*() { return MakeResult(fIndex, *fIt); }
+
+ private:
+ ptrdiff_t fIndex;
+ Iter fIt;
+ };
+
+public:
+ constexpr SkEnumerate(Iter begin, Iter end) : SkEnumerate{0, begin, end} {}
+ explicit constexpr SkEnumerate(C&& c)
+ : fCollection{std::move(c)}
+ , fBeginIndex{0}
+ , fBegin{std::begin(fCollection)}
+ , fEnd{std::end(fCollection)} { }
+ constexpr SkEnumerate(const SkEnumerate& that) = default;
+ constexpr SkEnumerate& operator=(const SkEnumerate& that) {
+ fBegin = that.fBegin;
+ fEnd = that.fEnd;
+ return *this;
+ }
+ constexpr Iterator begin() const { return Iterator{fBeginIndex, fBegin}; }
+ constexpr Iterator end() const { return Iterator{fBeginIndex + this->ssize(), fEnd}; }
+ constexpr bool empty() const { return fBegin == fEnd; }
+ constexpr size_t size() const { return std::distance(fBegin, fEnd); }
+ constexpr ptrdiff_t ssize() const { return std::distance(fBegin, fEnd); }
+ constexpr SkEnumerate first(size_t n) {
+ SkASSERT(n <= this->size());
+ ptrdiff_t deltaEnd = this->ssize() - n;
+ return SkEnumerate{fBeginIndex, fBegin, std::prev(fEnd, deltaEnd)};
+ }
+ constexpr SkEnumerate last(size_t n) {
+ SkASSERT(n <= this->size());
+ ptrdiff_t deltaBegin = this->ssize() - n;
+ return SkEnumerate{fBeginIndex + deltaBegin, std::next(fBegin, deltaBegin), fEnd};
+ }
+ constexpr SkEnumerate subspan(size_t offset, size_t count) {
+ SkASSERT(offset < this->size());
+ SkASSERT(count <= this->size() - offset);
+ auto newBegin = std::next(fBegin, offset);
+ return SkEnumerate(fBeginIndex + offset, newBegin, std::next(newBegin, count));
+ }
+
+private:
+ constexpr SkEnumerate(ptrdiff_t beginIndex, Iter begin, Iter end)
+ : fBeginIndex{beginIndex}
+ , fBegin(begin)
+ , fEnd(end) {}
+
+ C fCollection;
+ const ptrdiff_t fBeginIndex;
+ Iter fBegin;
+ Iter fEnd;
+};
+
+template <typename C, typename Iter = decltype(std::begin(std::declval<C>()))>
+inline constexpr SkEnumerate<Iter> SkMakeEnumerate(C& c) {
+ return SkEnumerate<Iter>{std::begin(c), std::end(c)};
+}
+template <typename C, typename Iter = decltype(std::begin(std::declval<C>()))>
+inline constexpr SkEnumerate<Iter, C> SkMakeEnumerate(C&& c) {
+ return SkEnumerate<Iter, C>{std::forward<C>(c)};
+}
+
+template <class T, std::size_t N, typename Iter = decltype(std::begin(std::declval<T(&)[N]>()))>
+inline constexpr SkEnumerate<Iter> SkMakeEnumerate(T (&a)[N]) {
+ return SkEnumerate<Iter>{std::begin(a), std::end(a)};
+}
+#endif // SkEnumerate_DEFINED
diff --git a/gfx/skia/skia/src/core/SkExecutor.cpp b/gfx/skia/skia/src/core/SkExecutor.cpp
new file mode 100644
index 0000000000..fe908ad35f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkExecutor.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkExecutor.h"
+#include "include/private/SkSpinlock.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkSemaphore.h"
+#include "include/private/base/SkTArray.h"
+#include <deque>
+#include <thread>
+
+using namespace skia_private;
+
+#if defined(SK_BUILD_FOR_WIN)
+ #include "src/base/SkLeanWindows.h"
+ static int num_cores() {
+ SYSTEM_INFO sysinfo;
+ GetNativeSystemInfo(&sysinfo);
+ return (int)sysinfo.dwNumberOfProcessors;
+ }
+#else
+ #include <unistd.h>
+ static int num_cores() {
+ return (int)sysconf(_SC_NPROCESSORS_ONLN);
+ }
+#endif
+
+SkExecutor::~SkExecutor() {}
+
+// The default default SkExecutor is an SkTrivialExecutor, which just runs the work right away.
+class SkTrivialExecutor final : public SkExecutor {
+ void add(std::function<void(void)> work) override {
+ work();
+ }
+};
+
+static SkExecutor& trivial_executor() {
+ static auto* executor = new SkTrivialExecutor();
+ return *executor;
+}
+
+static SkExecutor* gDefaultExecutor = nullptr;
+
+SkExecutor& SkExecutor::GetDefault() {
+ if (gDefaultExecutor) {
+ return *gDefaultExecutor;
+ }
+ return trivial_executor();
+}
+
+void SkExecutor::SetDefault(SkExecutor* executor) {
+ gDefaultExecutor = executor;
+}
+
+// We'll always push_back() new work, but pop from the front of deques or the back of SkTArray.
+static inline std::function<void(void)> pop(std::deque<std::function<void(void)>>* list) {
+ std::function<void(void)> fn = std::move(list->front());
+ list->pop_front();
+ return fn;
+}
+static inline std::function<void(void)> pop(TArray<std::function<void(void)>>* list) {
+ std::function<void(void)> fn = std::move(list->back());
+ list->pop_back();
+ return fn;
+}
+
+// An SkThreadPool is an executor that runs work on a fixed pool of OS threads.
+template <typename WorkList>
+class SkThreadPool final : public SkExecutor {
+public:
+ explicit SkThreadPool(int threads, bool allowBorrowing) : fAllowBorrowing(allowBorrowing) {
+ for (int i = 0; i < threads; i++) {
+ fThreads.emplace_back(&Loop, this);
+ }
+ }
+
+ ~SkThreadPool() override {
+ // Signal each thread that it's time to shut down.
+ for (int i = 0; i < fThreads.size(); i++) {
+ this->add(nullptr);
+ }
+ // Wait for each thread to shut down.
+ for (int i = 0; i < fThreads.size(); i++) {
+ fThreads[i].join();
+ }
+ }
+
+ void add(std::function<void(void)> work) override {
+ // Add some work to our pile of work to do.
+ {
+ SkAutoMutexExclusive lock(fWorkLock);
+ fWork.emplace_back(std::move(work));
+ }
+ // Tell the Loop() threads to pick it up.
+ fWorkAvailable.signal(1);
+ }
+
+ void borrow() override {
+ // If there is work waiting and we're allowed to borrow work, do it.
+ if (fAllowBorrowing && fWorkAvailable.try_wait()) {
+ SkAssertResult(this->do_work());
+ }
+ }
+
+private:
+ // This method should be called only when fWorkAvailable indicates there's work to do.
+ bool do_work() {
+ std::function<void(void)> work;
+ {
+ SkAutoMutexExclusive lock(fWorkLock);
+ SkASSERT(!fWork.empty()); // TODO: if (fWork.empty()) { return true; } ?
+ work = pop(&fWork);
+ }
+
+ if (!work) {
+ return false; // This is Loop()'s signal to shut down.
+ }
+
+ work();
+ return true;
+ }
+
+ static void Loop(void* ctx) {
+ auto pool = (SkThreadPool*)ctx;
+ do {
+ pool->fWorkAvailable.wait();
+ } while (pool->do_work());
+ }
+
+ // Both SkMutex and SkSpinlock can work here.
+ using Lock = SkMutex;
+
+ TArray<std::thread> fThreads;
+ WorkList fWork;
+ Lock fWorkLock;
+ SkSemaphore fWorkAvailable;
+ bool fAllowBorrowing;
+};
+
+std::unique_ptr<SkExecutor> SkExecutor::MakeFIFOThreadPool(int threads, bool allowBorrowing) {
+ using WorkList = std::deque<std::function<void(void)>>;
+ return std::make_unique<SkThreadPool<WorkList>>(threads > 0 ? threads : num_cores(),
+ allowBorrowing);
+}
+std::unique_ptr<SkExecutor> SkExecutor::MakeLIFOThreadPool(int threads, bool allowBorrowing) {
+ using WorkList = TArray<std::function<void(void)>>;
+ return std::make_unique<SkThreadPool<WorkList>>(threads > 0 ? threads : num_cores(),
+ allowBorrowing);
+}
diff --git a/gfx/skia/skia/src/core/SkFDot6.h b/gfx/skia/skia/src/core/SkFDot6.h
new file mode 100644
index 0000000000..36bc33370e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFDot6.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFDot6_DEFINED
+#define SkFDot6_DEFINED
+
+#include "include/core/SkScalar.h"
+#include "include/private/base/SkFixed.h"
+#include "include/private/base/SkMath.h"
+#include "include/private/base/SkTo.h"
+
+typedef int32_t SkFDot6;
+
+/* This uses the magic number approach suggested here:
+ * http://stereopsis.com/sree/fpu2006.html and used in
+ * _cairo_fixed_from_double. It does banker's rounding
+ * (i.e. round to nearest even)
+ */
+inline SkFDot6 SkScalarRoundToFDot6(SkScalar x, int shift = 0)
+{
+ union {
+ double fDouble;
+ int32_t fBits[2];
+ } tmp;
+ int fractionalBits = 6 + shift;
+ double magic = (1LL << (52 - (fractionalBits))) * 1.5;
+
+ tmp.fDouble = SkScalarToDouble(x) + magic;
+#ifdef SK_CPU_BENDIAN
+ return tmp.fBits[1];
+#else
+ return tmp.fBits[0];
+#endif
+}
+
+#define SK_FDot6One (64)
+#define SK_FDot6Half (32)
+
+#ifdef SK_DEBUG
+ inline SkFDot6 SkIntToFDot6(int x) {
+ SkASSERT(SkToS16(x) == x);
+ return x << 6;
+ }
+#else
+ #define SkIntToFDot6(x) ((x) << 6)
+#endif
+
+#define SkFDot6Floor(x) ((x) >> 6)
+#define SkFDot6Ceil(x) (((x) + 63) >> 6)
+#define SkFDot6Round(x) (((x) + 32) >> 6)
+
+#define SkFixedToFDot6(x) ((x) >> 10)
+
+inline SkFixed SkFDot6ToFixed(SkFDot6 x) {
+ SkASSERT((SkLeftShift(x, 10) >> 10) == x);
+
+ return SkLeftShift(x, 10);
+}
+
+#define SkScalarToFDot6(x) (SkFDot6)((x) * 64)
+#define SkFDot6ToScalar(x) ((SkScalar)(x) * 0.015625f)
+#define SkFDot6ToFloat SkFDot6ToScalar
+
+inline SkFixed SkFDot6Div(SkFDot6 a, SkFDot6 b) {
+ SkASSERT(b != 0);
+
+ if (SkTFitsIn<int16_t>(a)) {
+ return SkLeftShift(a, 16) / b;
+ } else {
+ return SkFixedDiv(a, b);
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkFlattenable.cpp b/gfx/skia/skia/src/core/SkFlattenable.cpp
new file mode 100644
index 0000000000..9fb121d675
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFlattenable.cpp
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkFlattenable.h"
+
+#include "include/core/SkData.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTDArray.h"
+#include "src/core/SkPtrRecorder.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <cstring>
+#include <iterator>
+#include <utility>
+
+struct SkDeserialProcs;
+struct SkSerialProcs;
+
+SkNamedFactorySet::SkNamedFactorySet() : fNextAddedFactory(0) {}
+
+uint32_t SkNamedFactorySet::find(SkFlattenable::Factory factory) {
+ uint32_t index = fFactorySet.find(factory);
+ if (index > 0) {
+ return index;
+ }
+ const char* name = SkFlattenable::FactoryToName(factory);
+ if (nullptr == name) {
+ return 0;
+ }
+ *fNames.append() = name;
+ return fFactorySet.add(factory);
+}
+
+const char* SkNamedFactorySet::getNextAddedFactoryName() {
+ if (fNextAddedFactory < fNames.size()) {
+ return fNames[fNextAddedFactory++];
+ }
+ return nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkRefCntSet::~SkRefCntSet() {
+ // call this now, while our decPtr() is sill in scope
+ this->reset();
+}
+
+void SkRefCntSet::incPtr(void* ptr) {
+ ((SkRefCnt*)ptr)->ref();
+}
+
+void SkRefCntSet::decPtr(void* ptr) {
+ ((SkRefCnt*)ptr)->unref();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+struct Entry {
+ const char* fName;
+ SkFlattenable::Factory fFactory;
+};
+
+struct EntryComparator {
+ bool operator()(const Entry& a, const Entry& b) const {
+ return strcmp(a.fName, b.fName) < 0;
+ }
+ bool operator()(const Entry& a, const char* b) const {
+ return strcmp(a.fName, b) < 0;
+ }
+ bool operator()(const char* a, const Entry& b) const {
+ return strcmp(a, b.fName) < 0;
+ }
+};
+
+int gCount = 0;
+Entry gEntries[128];
+
+} // namespace
+
+void SkFlattenable::Finalize() {
+ std::sort(gEntries, gEntries + gCount, EntryComparator());
+}
+
+void SkFlattenable::Register(const char name[], Factory factory) {
+ SkASSERT(name);
+ SkASSERT(factory);
+ SkASSERT(gCount < (int)std::size(gEntries));
+
+ gEntries[gCount].fName = name;
+ gEntries[gCount].fFactory = factory;
+ gCount += 1;
+}
+
+SkFlattenable::Factory SkFlattenable::NameToFactory(const char name[]) {
+ RegisterFlattenablesIfNeeded();
+
+ SkASSERT(std::is_sorted(gEntries, gEntries + gCount, EntryComparator()));
+ auto pair = std::equal_range(gEntries, gEntries + gCount, name, EntryComparator());
+ if (pair.first == pair.second) {
+ return nullptr;
+ }
+ return pair.first->fFactory;
+}
+
+const char* SkFlattenable::FactoryToName(Factory fact) {
+ RegisterFlattenablesIfNeeded();
+
+ const Entry* entries = gEntries;
+ for (int i = gCount - 1; i >= 0; --i) {
+ if (entries[i].fFactory == fact) {
+ return entries[i].fName;
+ }
+ }
+ return nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkData> SkFlattenable::serialize(const SkSerialProcs* procs) const {
+ SkBinaryWriteBuffer writer;
+ if (procs) {
+ writer.setSerialProcs(*procs);
+ }
+ writer.writeFlattenable(this);
+ size_t size = writer.bytesWritten();
+ auto data = SkData::MakeUninitialized(size);
+ writer.writeToMemory(data->writable_data());
+ return data;
+}
+
+size_t SkFlattenable::serialize(void* memory, size_t memory_size,
+ const SkSerialProcs* procs) const {
+ SkBinaryWriteBuffer writer(memory, memory_size);
+ if (procs) {
+ writer.setSerialProcs(*procs);
+ }
+ writer.writeFlattenable(this);
+ return writer.usingInitialStorage() ? writer.bytesWritten() : 0u;
+}
+
+sk_sp<SkFlattenable> SkFlattenable::Deserialize(SkFlattenable::Type type, const void* data,
+ size_t size, const SkDeserialProcs* procs) {
+ SkReadBuffer buffer(data, size);
+ if (procs) {
+ buffer.setDeserialProcs(*procs);
+ }
+ return sk_sp<SkFlattenable>(buffer.readFlattenable(type));
+}
diff --git a/gfx/skia/skia/src/core/SkFont.cpp b/gfx/skia/skia/src/core/SkFont.cpp
new file mode 100644
index 0000000000..84426788d5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFont.cpp
@@ -0,0 +1,394 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkTLazy.h"
+#include "src/base/SkUTF.h"
+#include "src/base/SkUtils.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkFontPriv.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkPaintDefaults.h"
+#include "src/core/SkScalerContext.h"
+#include "src/core/SkStrike.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkStrikeSpec.h"
+
+using namespace skia_private;
+
+#define kDefault_Size SkPaintDefaults_TextSize
+#define kDefault_Flags SkFont::kBaselineSnap_PrivFlag
+#define kDefault_Edging SkFont::Edging::kAntiAlias
+#define kDefault_Hinting SkPaintDefaults_Hinting
+
+static inline SkScalar valid_size(SkScalar size) {
+ return std::max<SkScalar>(0, size);
+}
+
+SkFont::SkFont(sk_sp<SkTypeface> face, SkScalar size, SkScalar scaleX, SkScalar skewX)
+ : fTypeface(std::move(face))
+ , fSize(valid_size(size))
+ , fScaleX(scaleX)
+ , fSkewX(skewX)
+ , fFlags(kDefault_Flags)
+ , fEdging(static_cast<unsigned>(kDefault_Edging))
+ , fHinting(static_cast<unsigned>(kDefault_Hinting))
+{}
+
+SkFont::SkFont(sk_sp<SkTypeface> face, SkScalar size) : SkFont(std::move(face), size, 1, 0) {}
+
+SkFont::SkFont(sk_sp<SkTypeface> face) : SkFont(std::move(face), kDefault_Size, 1, 0) {}
+
+SkFont::SkFont() : SkFont(nullptr, kDefault_Size) {}
+
+bool SkFont::operator==(const SkFont& b) const {
+ return fTypeface.get() == b.fTypeface.get() &&
+ fSize == b.fSize &&
+ fScaleX == b.fScaleX &&
+ fSkewX == b.fSkewX &&
+ fFlags == b.fFlags &&
+ fEdging == b.fEdging &&
+ fHinting == b.fHinting;
+}
+
+void SkFont::dump() const {
+ SkDebugf("typeface %p\n", fTypeface.get());
+ SkDebugf("size %g\n", fSize);
+ SkDebugf("skewx %g\n", fSkewX);
+ SkDebugf("scalex %g\n", fScaleX);
+ SkDebugf("flags 0x%X\n", fFlags);
+ SkDebugf("edging %d\n", (unsigned)fEdging);
+ SkDebugf("hinting %d\n", (unsigned)fHinting);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static inline uint32_t set_clear_mask(uint32_t bits, bool cond, uint32_t mask) {
+ return cond ? bits | mask : bits & ~mask;
+}
+
+void SkFont::setForceAutoHinting(bool predicate) {
+ fFlags = set_clear_mask(fFlags, predicate, kForceAutoHinting_PrivFlag);
+}
+void SkFont::setEmbeddedBitmaps(bool predicate) {
+ fFlags = set_clear_mask(fFlags, predicate, kEmbeddedBitmaps_PrivFlag);
+}
+void SkFont::setSubpixel(bool predicate) {
+ fFlags = set_clear_mask(fFlags, predicate, kSubpixel_PrivFlag);
+}
+void SkFont::setLinearMetrics(bool predicate) {
+ fFlags = set_clear_mask(fFlags, predicate, kLinearMetrics_PrivFlag);
+}
+void SkFont::setEmbolden(bool predicate) {
+ fFlags = set_clear_mask(fFlags, predicate, kEmbolden_PrivFlag);
+}
+void SkFont::setBaselineSnap(bool predicate) {
+ fFlags = set_clear_mask(fFlags, predicate, kBaselineSnap_PrivFlag);
+}
+void SkFont::setEdging(Edging e) {
+ fEdging = SkToU8(e);
+}
+
+void SkFont::setHinting(SkFontHinting h) {
+ fHinting = SkToU8(h);
+}
+
+void SkFont::setSize(SkScalar size) {
+ fSize = valid_size(size);
+}
+void SkFont::setScaleX(SkScalar scale) {
+ fScaleX = scale;
+}
+void SkFont::setSkewX(SkScalar skew) {
+ fSkewX = skew;
+}
+
+SkFont SkFont::makeWithSize(SkScalar newSize) const {
+ SkFont font = *this;
+ font.setSize(newSize);
+ return font;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkScalar SkFont::setupForAsPaths(SkPaint* paint) {
+ constexpr uint32_t flagsToIgnore = kEmbeddedBitmaps_PrivFlag |
+ kForceAutoHinting_PrivFlag;
+
+ fFlags = (fFlags & ~flagsToIgnore) | kSubpixel_PrivFlag;
+ this->setHinting(SkFontHinting::kNone);
+
+ if (this->getEdging() == Edging::kSubpixelAntiAlias) {
+ this->setEdging(Edging::kAntiAlias);
+ }
+
+ if (paint) {
+ paint->setStyle(SkPaint::kFill_Style);
+ paint->setPathEffect(nullptr);
+ }
+ SkScalar textSize = fSize;
+ this->setSize(SkIntToScalar(SkFontPriv::kCanonicalTextSizeForPaths));
+ return textSize / SkFontPriv::kCanonicalTextSizeForPaths;
+}
+
+bool SkFont::hasSomeAntiAliasing() const {
+ Edging edging = this->getEdging();
+ return edging == SkFont::Edging::kAntiAlias
+ || edging == SkFont::Edging::kSubpixelAntiAlias;
+}
+
+SkGlyphID SkFont::unicharToGlyph(SkUnichar uni) const {
+ return this->getTypefaceOrDefault()->unicharToGlyph(uni);
+}
+
+void SkFont::unicharsToGlyphs(const SkUnichar uni[], int count, SkGlyphID glyphs[]) const {
+ this->getTypefaceOrDefault()->unicharsToGlyphs(uni, count, glyphs);
+}
+
+int SkFont::textToGlyphs(const void* text, size_t byteLength, SkTextEncoding encoding,
+ SkGlyphID glyphs[], int maxGlyphCount) const {
+ return this->getTypefaceOrDefault()->textToGlyphs(text, byteLength, encoding,
+ glyphs, maxGlyphCount);
+}
+
+SkScalar SkFont::measureText(const void* text, size_t length, SkTextEncoding encoding,
+ SkRect* bounds, const SkPaint* paint) const {
+
+ SkAutoToGlyphs atg(*this, text, length, encoding);
+ const int glyphCount = atg.count();
+ if (glyphCount == 0) {
+ if (bounds) {
+ bounds->setEmpty();
+ }
+ return 0;
+ }
+ const SkGlyphID* glyphIDs = atg.glyphs();
+
+ auto [strikeSpec, strikeToSourceScale] = SkStrikeSpec::MakeCanonicalized(*this, paint);
+ SkBulkGlyphMetrics metrics{strikeSpec};
+ SkSpan<const SkGlyph*> glyphs = metrics.glyphs(SkSpan(glyphIDs, glyphCount));
+
+ SkScalar width = 0;
+ if (bounds) {
+ *bounds = glyphs[0]->rect();
+ width = glyphs[0]->advanceX();
+ for (int i = 1; i < glyphCount; ++i) {
+ SkRect r = glyphs[i]->rect();
+ r.offset(width, 0);
+ bounds->join(r);
+ width += glyphs[i]->advanceX();
+ }
+ } else {
+ for (auto glyph : glyphs) {
+ width += glyph->advanceX();
+ }
+ }
+
+ if (strikeToSourceScale != 1) {
+ width *= strikeToSourceScale;
+ if (bounds) {
+ bounds->fLeft *= strikeToSourceScale;
+ bounds->fTop *= strikeToSourceScale;
+ bounds->fRight *= strikeToSourceScale;
+ bounds->fBottom *= strikeToSourceScale;
+ }
+ }
+
+ return width;
+}
+
+void SkFont::getWidthsBounds(const SkGlyphID glyphIDs[],
+ int count,
+ SkScalar widths[],
+ SkRect bounds[],
+ const SkPaint* paint) const {
+ auto [strikeSpec, strikeToSourceScale] = SkStrikeSpec::MakeCanonicalized(*this, paint);
+ SkBulkGlyphMetrics metrics{strikeSpec};
+ SkSpan<const SkGlyph*> glyphs = metrics.glyphs(SkSpan(glyphIDs, count));
+
+ if (bounds) {
+ SkMatrix scaleMat = SkMatrix::Scale(strikeToSourceScale, strikeToSourceScale);
+ SkRect* cursor = bounds;
+ for (auto glyph : glyphs) {
+ scaleMat.mapRectScaleTranslate(cursor++, glyph->rect());
+ }
+ }
+
+ if (widths) {
+ SkScalar* cursor = widths;
+ for (auto glyph : glyphs) {
+ *cursor++ = glyph->advanceX() * strikeToSourceScale;
+ }
+ }
+}
+
+void SkFont::getPos(const SkGlyphID glyphIDs[], int count, SkPoint pos[], SkPoint origin) const {
+ auto [strikeSpec, strikeToSourceScale] = SkStrikeSpec::MakeCanonicalized(*this);
+ SkBulkGlyphMetrics metrics{strikeSpec};
+ SkSpan<const SkGlyph*> glyphs = metrics.glyphs(SkSpan(glyphIDs, count));
+
+ SkPoint sum = origin;
+ for (auto glyph : glyphs) {
+ *pos++ = sum;
+ sum += glyph->advanceVector() * strikeToSourceScale;
+ }
+}
+
+void SkFont::getXPos(
+ const SkGlyphID glyphIDs[], int count, SkScalar xpos[], SkScalar origin) const {
+
+ auto [strikeSpec, strikeToSourceScale] = SkStrikeSpec::MakeCanonicalized(*this);
+ SkBulkGlyphMetrics metrics{strikeSpec};
+ SkSpan<const SkGlyph*> glyphs = metrics.glyphs(SkSpan(glyphIDs, count));
+
+ SkScalar loc = origin;
+ SkScalar* cursor = xpos;
+ for (auto glyph : glyphs) {
+ *cursor++ = loc;
+ loc += glyph->advanceX() * strikeToSourceScale;
+ }
+}
+
+void SkFont::getPaths(const SkGlyphID glyphIDs[], int count,
+ void (*proc)(const SkPath*, const SkMatrix&, void*), void* ctx) const {
+ SkFont font(*this);
+ SkScalar scale = font.setupForAsPaths(nullptr);
+ const SkMatrix mx = SkMatrix::Scale(scale, scale);
+
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakeWithNoDevice(font);
+ SkBulkGlyphMetricsAndPaths paths{strikeSpec};
+ SkSpan<const SkGlyph*> glyphs = paths.glyphs(SkSpan(glyphIDs, count));
+
+ for (auto glyph : glyphs) {
+ proc(glyph->path(), mx, ctx);
+ }
+}
+
+bool SkFont::getPath(SkGlyphID glyphID, SkPath* path) const {
+ struct Pair {
+ SkPath* fPath;
+ bool fWasSet;
+ } pair = { path, false };
+
+ this->getPaths(&glyphID, 1, [](const SkPath* orig, const SkMatrix& mx, void* ctx) {
+ Pair* pair = static_cast<Pair*>(ctx);
+ if (orig) {
+ orig->transform(mx, pair->fPath);
+ pair->fWasSet = true;
+ }
+ }, &pair);
+ return pair.fWasSet;
+}
+
+SkScalar SkFont::getMetrics(SkFontMetrics* metrics) const {
+
+ auto [strikeSpec, strikeToSourceScale] = SkStrikeSpec::MakeCanonicalized(*this, nullptr);
+
+ SkFontMetrics storage;
+ if (nullptr == metrics) {
+ metrics = &storage;
+ }
+
+ auto cache = strikeSpec.findOrCreateStrike();
+ *metrics = cache->getFontMetrics();
+
+ if (strikeToSourceScale != 1) {
+ SkFontPriv::ScaleFontMetrics(metrics, strikeToSourceScale);
+ }
+ return metrics->fDescent - metrics->fAscent + metrics->fLeading;
+}
+
+SkTypeface* SkFont::getTypefaceOrDefault() const {
+ return fTypeface ? fTypeface.get() : SkTypeface::GetDefaultTypeface();
+}
+
+sk_sp<SkTypeface> SkFont::refTypefaceOrDefault() const {
+ return fTypeface ? fTypeface : SkTypeface::MakeDefault();
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkFontPriv::ScaleFontMetrics(SkFontMetrics* metrics, SkScalar scale) {
+ metrics->fTop *= scale;
+ metrics->fAscent *= scale;
+ metrics->fDescent *= scale;
+ metrics->fBottom *= scale;
+ metrics->fLeading *= scale;
+ metrics->fAvgCharWidth *= scale;
+ metrics->fMaxCharWidth *= scale;
+ metrics->fXMin *= scale;
+ metrics->fXMax *= scale;
+ metrics->fXHeight *= scale;
+ metrics->fCapHeight *= scale;
+ metrics->fUnderlineThickness *= scale;
+ metrics->fUnderlinePosition *= scale;
+ metrics->fStrikeoutThickness *= scale;
+ metrics->fStrikeoutPosition *= scale;
+}
+
+SkRect SkFontPriv::GetFontBounds(const SkFont& font) {
+ SkMatrix m;
+ m.setScale(font.getSize() * font.getScaleX(), font.getSize());
+ m.postSkew(font.getSkewX(), 0);
+
+ SkTypeface* typeface = font.getTypefaceOrDefault();
+
+ SkRect bounds;
+ m.mapRect(&bounds, typeface->getBounds());
+ return bounds;
+}
+
+SkScalar SkFontPriv::ApproximateTransformedTextSize(const SkFont& font, const SkMatrix& matrix,
+ const SkPoint& textLocation) {
+ if (!matrix.hasPerspective()) {
+ return font.getSize() * matrix.getMaxScale();
+ } else {
+ // approximate the scale since we can't get it directly from the matrix
+ SkScalar maxScaleSq = SkMatrixPriv::DifferentialAreaScale(matrix, textLocation);
+ if (SkScalarIsFinite(maxScaleSq) && !SkScalarNearlyZero(maxScaleSq)) {
+ return font.getSize() * SkScalarSqrt(maxScaleSq);
+ } else {
+ return -font.getSize();
+ }
+ }
+}
+
+int SkFontPriv::CountTextElements(const void* text, size_t byteLength, SkTextEncoding encoding) {
+ switch (encoding) {
+ case SkTextEncoding::kUTF8:
+ return SkUTF::CountUTF8(reinterpret_cast<const char*>(text), byteLength);
+ case SkTextEncoding::kUTF16:
+ return SkUTF::CountUTF16(reinterpret_cast<const uint16_t*>(text), byteLength);
+ case SkTextEncoding::kUTF32:
+ return byteLength >> 2;
+ case SkTextEncoding::kGlyphID:
+ return byteLength >> 1;
+ }
+ SkASSERT(false);
+ return 0;
+}
+
+void SkFontPriv::GlyphsToUnichars(const SkFont& font, const SkGlyphID glyphs[], int count,
+ SkUnichar text[]) {
+ if (count <= 0) {
+ return;
+ }
+
+ auto typeface = font.getTypefaceOrDefault();
+ const unsigned numGlyphsInTypeface = typeface->countGlyphs();
+ AutoTArray<SkUnichar> unichars(numGlyphsInTypeface);
+ typeface->getGlyphToUnicodeMap(unichars.get());
+
+ for (int i = 0; i < count; ++i) {
+ unsigned id = glyphs[i];
+ text[i] = (id < numGlyphsInTypeface) ? unichars[id] : 0xFFFD;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkFontDescriptor.cpp b/gfx/skia/skia/src/core/SkFontDescriptor.cpp
new file mode 100644
index 0000000000..7c82e19918
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontDescriptor.cpp
@@ -0,0 +1,277 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkData.h"
+#include "include/core/SkStream.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkStreamPriv.h"
+
+enum {
+ kInvalid = 0x00,
+
+ // Related to a font request.
+ kFontFamilyName = 0x01, // int length, data[length]
+ kFullName = 0x04, // int length, data[length]
+ kPostscriptName = 0x06, // int length, data[length]
+ kWeight = 0x10, // scalar (1 - 1000)
+ kWidth = 0x11, // scalar (percentage, 100 is 'normal')
+ kSlant = 0x12, // scalar (cw angle, 14 is a normal right leaning oblique)
+ kItalic = 0x13, // scalar (0 is Roman, 1 is fully Italic)
+
+ // Related to font data. Can also be used with a requested font.
+ kPaletteIndex = 0xF8, // int
+ kPaletteEntryOverrides = 0xF9, // int count, (int, u32)[count]
+ kFontVariation = 0xFA, // int count, (u32, scalar)[count]
+
+ // Related to font data.
+ kFactoryId = 0xFC, // int
+ kFontIndex = 0xFD, // int
+ kSentinel = 0xFF, // no data
+};
+
+SkFontDescriptor::SkFontDescriptor() { }
+
+static bool SK_WARN_UNUSED_RESULT read_string(SkStream* stream, SkString* string) {
+ size_t length;
+ if (!stream->readPackedUInt(&length)) { return false; }
+ if (length > 0) {
+ if (StreamRemainingLengthIsBelow(stream, length)) {
+ return false;
+ }
+ string->resize(length);
+ if (stream->read(string->data(), length) != length) { return false; }
+ }
+ return true;
+}
+
+static bool write_string(SkWStream* stream, const SkString& string, uint32_t id) {
+ if (string.isEmpty()) { return true; }
+ return stream->writePackedUInt(id) &&
+ stream->writePackedUInt(string.size()) &&
+ stream->write(string.c_str(), string.size());
+}
+
+static bool write_uint(SkWStream* stream, size_t n, uint32_t id) {
+ return stream->writePackedUInt(id) &&
+ stream->writePackedUInt(n);
+}
+
+static bool write_scalar(SkWStream* stream, SkScalar n, uint32_t id) {
+ return stream->writePackedUInt(id) &&
+ stream->writeScalar(n);
+}
+
+static size_t SK_WARN_UNUSED_RESULT read_id(SkStream* stream) {
+ size_t i;
+ if (!stream->readPackedUInt(&i)) { return kInvalid; }
+ return i;
+}
+
+static constexpr SkScalar usWidths[9] {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9
+};
+static constexpr SkScalar width_for_usWidth[0x10] = {
+ 50,
+ 50, 62.5, 75, 87.5, 100, 112.5, 125, 150, 200,
+ 200, 200, 200, 200, 200, 200
+};
+
+bool SkFontDescriptor::Deserialize(SkStream* stream, SkFontDescriptor* result) {
+ size_t factoryId;
+ using FactoryIdType = decltype(result->fFactoryId);
+
+ size_t coordinateCount;
+ using CoordinateCountType = decltype(result->fCoordinateCount);
+
+ size_t index;
+ using CollectionIndexType = decltype(result->fCollectionIndex);
+
+ size_t paletteIndex;
+ using PaletteIndexType = decltype(result->fPaletteIndex);
+
+ size_t paletteEntryOverrideCount;
+ using PaletteEntryOverrideCountType = decltype(result->fPaletteEntryOverrideCount);
+
+ size_t paletteEntryOverrideIndex;
+ using PaletteEntryOverrideIndexType = decltype(result->fPaletteEntryOverrides[0].index);
+
+ SkScalar weight = SkFontStyle::kNormal_Weight;
+ SkScalar width = SkFontStyle::kNormal_Width;
+ SkScalar slant = 0;
+ SkScalar italic = 0;
+
+ size_t styleBits;
+ if (!stream->readPackedUInt(&styleBits)) { return false; }
+ weight = ((styleBits >> 16) & 0xFFFF);
+ width = ((styleBits >> 8) & 0x000F)[width_for_usWidth];
+ slant = ((styleBits >> 0) & 0x000F) != SkFontStyle::kUpright_Slant ? 14 : 0;
+ italic = ((styleBits >> 0) & 0x000F) == SkFontStyle::kItalic_Slant ? 1 : 0;
+
+ for (size_t id; (id = read_id(stream)) != kSentinel;) {
+ switch (id) {
+ case kFontFamilyName:
+ if (!read_string(stream, &result->fFamilyName)) { return false; }
+ break;
+ case kFullName:
+ if (!read_string(stream, &result->fFullName)) { return false; }
+ break;
+ case kPostscriptName:
+ if (!read_string(stream, &result->fPostscriptName)) { return false; }
+ break;
+ case kWeight:
+ if (!stream->readScalar(&weight)) { return false; }
+ break;
+ case kWidth:
+ if (!stream->readScalar(&width)) { return false; }
+ break;
+ case kSlant:
+ if (!stream->readScalar(&slant)) { return false; }
+ break;
+ case kItalic:
+ if (!stream->readScalar(&italic)) { return false; }
+ break;
+ case kFontVariation:
+ if (!stream->readPackedUInt(&coordinateCount)) { return false; }
+ if (!SkTFitsIn<CoordinateCountType>(coordinateCount)) { return false; }
+ if (StreamRemainingLengthIsBelow(stream, coordinateCount)) {
+ return false;
+ }
+ result->fCoordinateCount = SkTo<CoordinateCountType>(coordinateCount);
+
+ result->fVariation.reset(coordinateCount);
+ for (size_t i = 0; i < coordinateCount; ++i) {
+ if (!stream->readU32(&result->fVariation[i].axis)) { return false; }
+ if (!stream->readScalar(&result->fVariation[i].value)) { return false; }
+ }
+ break;
+ case kFontIndex:
+ if (!stream->readPackedUInt(&index)) { return false; }
+ if (!SkTFitsIn<CollectionIndexType>(index)) { return false; }
+ result->fCollectionIndex = SkTo<CollectionIndexType>(index);
+ break;
+ case kPaletteIndex:
+ if (!stream->readPackedUInt(&paletteIndex)) { return false; }
+ if (!SkTFitsIn<PaletteIndexType>(paletteIndex)) { return false; }
+ result->fPaletteIndex = SkTo<PaletteIndexType>(paletteIndex);
+ break;
+ case kPaletteEntryOverrides:
+ if (!stream->readPackedUInt(&paletteEntryOverrideCount)) { return false; }
+ if (!SkTFitsIn<PaletteEntryOverrideCountType>(paletteEntryOverrideCount)) {
+ return false;
+ }
+ if (StreamRemainingLengthIsBelow(stream, paletteEntryOverrideCount)) {
+ return false;
+ }
+ result->fPaletteEntryOverrideCount =
+ SkTo<PaletteEntryOverrideCountType>(paletteEntryOverrideCount);
+
+ result->fPaletteEntryOverrides.reset(paletteEntryOverrideCount);
+ for (size_t i = 0; i < paletteEntryOverrideCount; ++i) {
+ if (!stream->readPackedUInt(&paletteEntryOverrideIndex)) { return false; }
+ if (!SkTFitsIn<PaletteEntryOverrideIndexType>(paletteEntryOverrideIndex)) {
+ return false;
+ }
+ result->fPaletteEntryOverrides[i].index =
+ SkTo<PaletteEntryOverrideIndexType>(paletteEntryOverrideIndex);
+ if (!stream->readU32(&result->fPaletteEntryOverrides[i].color)) {
+ return false;
+ }
+ }
+ break;
+ case kFactoryId:
+ if (!stream->readPackedUInt(&factoryId)) { return false; }
+ if (!SkTFitsIn<FactoryIdType>(factoryId)) { return false; }
+ result->fFactoryId = SkTo<FactoryIdType>(factoryId);
+ break;
+ default:
+ SkDEBUGFAIL("Unknown id used by a font descriptor");
+ return false;
+ }
+ }
+
+ SkFontStyle::Slant slantEnum = SkFontStyle::kUpright_Slant;
+ if (slant != 0) { slantEnum = SkFontStyle::kOblique_Slant; }
+ if (0 < italic) { slantEnum = SkFontStyle::kItalic_Slant; }
+ SkFontStyle::Width widthEnum = SkFontStyleWidthForWidthAxisValue(width);
+ result->fStyle = SkFontStyle(SkScalarRoundToInt(weight), widthEnum, slantEnum);
+
+ size_t length;
+ if (!stream->readPackedUInt(&length)) { return false; }
+ if (length > 0) {
+ if (StreamRemainingLengthIsBelow(stream, length)) {
+ return false;
+ }
+ sk_sp<SkData> data(SkData::MakeUninitialized(length));
+ if (stream->read(data->writable_data(), length) != length) {
+ SkDEBUGFAIL("Could not read font data");
+ return false;
+ }
+ result->fStream = SkMemoryStream::Make(std::move(data));
+ }
+ return true;
+}
+
+void SkFontDescriptor::serialize(SkWStream* stream) const {
+ uint32_t styleBits = (fStyle.weight() << 16) | (fStyle.width() << 8) | (fStyle.slant());
+ stream->writePackedUInt(styleBits);
+
+ write_string(stream, fFamilyName, kFontFamilyName);
+ write_string(stream, fFullName, kFullName);
+ write_string(stream, fPostscriptName, kPostscriptName);
+
+ write_scalar(stream, fStyle.weight(), kWeight);
+ write_scalar(stream, fStyle.width()[width_for_usWidth], kWidth);
+ write_scalar(stream, fStyle.slant() == SkFontStyle::kUpright_Slant ? 0 : 14, kSlant);
+ write_scalar(stream, fStyle.slant() == SkFontStyle::kItalic_Slant ? 1 : 0, kItalic);
+
+ if (fCollectionIndex > 0) {
+ write_uint(stream, fCollectionIndex, kFontIndex);
+ }
+ if (fPaletteIndex > 0) {
+ write_uint(stream, fPaletteIndex, kPaletteIndex);
+ }
+ if (fCoordinateCount > 0) {
+ write_uint(stream, fCoordinateCount, kFontVariation);
+ for (int i = 0; i < fCoordinateCount; ++i) {
+ stream->write32(fVariation[i].axis);
+ stream->writeScalar(fVariation[i].value);
+ }
+ }
+ if (fPaletteEntryOverrideCount > 0) {
+ int nonNegativePaletteOverrideIndexes = 0;
+ for (int i = 0; i < fPaletteEntryOverrideCount; ++i) {
+ if (0 <= fPaletteEntryOverrides[i].index) {
+ ++nonNegativePaletteOverrideIndexes;
+ }
+ }
+ write_uint(stream, nonNegativePaletteOverrideIndexes, kPaletteEntryOverrides);
+ for (int i = 0; i < fPaletteEntryOverrideCount; ++i) {
+ if (0 <= fPaletteEntryOverrides[i].index) {
+ stream->writePackedUInt(fPaletteEntryOverrides[i].index);
+ stream->write32(fPaletteEntryOverrides[i].color);
+ }
+ }
+ }
+
+ write_uint(stream, fFactoryId, kFactoryId);
+
+ stream->writePackedUInt(kSentinel);
+
+ if (fStream) {
+ std::unique_ptr<SkStreamAsset> fontStream = fStream->duplicate();
+ size_t length = fontStream->getLength();
+ stream->writePackedUInt(length);
+ stream->writeStream(fontStream.get(), length);
+ } else {
+ stream->writePackedUInt(0);
+ }
+}
+
+SkFontStyle::Width SkFontDescriptor::SkFontStyleWidthForWidthAxisValue(SkScalar width) {
+ int usWidth = SkScalarRoundToInt(SkScalarInterpFunc(width, &width_for_usWidth[1], usWidths, 9));
+ return static_cast<SkFontStyle::Width>(usWidth);
+}
diff --git a/gfx/skia/skia/src/core/SkFontDescriptor.h b/gfx/skia/skia/src/core/SkFontDescriptor.h
new file mode 100644
index 0000000000..b4437d73e3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontDescriptor.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontDescriptor_DEFINED
+#define SkFontDescriptor_DEFINED
+
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/base/SkFixed.h"
+#include "include/private/base/SkNoncopyable.h"
+#include "include/private/base/SkTemplates.h"
+
+class SkFontData {
+public:
+ /** Makes a copy of the data in 'axis'. */
+ SkFontData(std::unique_ptr<SkStreamAsset> stream, int index, int paletteIndex,
+ const SkFixed* axis, int axisCount,
+ const SkFontArguments::Palette::Override* paletteOverrides, int paletteOverrideCount)
+ : fStream(std::move(stream))
+ , fIndex(index)
+ , fPaletteIndex(paletteIndex)
+ , fAxisCount(axisCount)
+ , fPaletteOverrideCount(paletteOverrideCount)
+ , fAxis(fAxisCount)
+ , fPaletteOverrides(fPaletteOverrideCount)
+ {
+ for (int i = 0; i < fAxisCount; ++i) {
+ fAxis[i] = axis[i];
+ }
+ for (int i = 0; i < fPaletteOverrideCount; ++i) {
+ fPaletteOverrides[i] = paletteOverrides[i];
+ }
+ }
+
+ SkFontData(const SkFontData& that)
+ : fStream(that.fStream->duplicate())
+ , fIndex(that.fIndex)
+ , fPaletteIndex(that.fPaletteIndex)
+ , fAxisCount(that.fAxisCount)
+ , fPaletteOverrideCount(that.fPaletteOverrideCount)
+ , fAxis(fAxisCount)
+ , fPaletteOverrides(fPaletteOverrideCount)
+ {
+ for (int i = 0; i < fAxisCount; ++i) {
+ fAxis[i] = that.fAxis[i];
+ }
+ for (int i = 0; i < fPaletteOverrideCount; ++i) {
+ fPaletteOverrides[i] = that.fPaletteOverrides[i];
+ }
+ }
+ bool hasStream() const { return fStream != nullptr; }
+ std::unique_ptr<SkStreamAsset> detachStream() { return std::move(fStream); }
+ SkStreamAsset* getStream() { return fStream.get(); }
+ SkStreamAsset const* getStream() const { return fStream.get(); }
+ int getIndex() const { return fIndex; }
+ int getAxisCount() const { return fAxisCount; }
+ const SkFixed* getAxis() const { return fAxis.get(); }
+ int getPaletteIndex() const { return fPaletteIndex; }
+ int getPaletteOverrideCount() const { return fPaletteOverrideCount; }
+ const SkFontArguments::Palette::Override* getPaletteOverrides() const {
+ return fPaletteOverrides.get();
+ }
+
+private:
+ std::unique_ptr<SkStreamAsset> fStream;
+ int fIndex;
+ int fPaletteIndex;
+ int fAxisCount;
+ int fPaletteOverrideCount;
+ skia_private::AutoSTMalloc<4, SkFixed> fAxis;
+ skia_private::AutoSTMalloc<4, SkFontArguments::Palette::Override> fPaletteOverrides;
+};
+
+class SkFontDescriptor : SkNoncopyable {
+public:
+ SkFontDescriptor();
+ // Does not affect ownership of SkStream.
+ static bool Deserialize(SkStream*, SkFontDescriptor* result);
+
+ void serialize(SkWStream*) const;
+
+ SkFontStyle getStyle() const { return fStyle; }
+ void setStyle(SkFontStyle style) { fStyle = style; }
+
+ const char* getFamilyName() const { return fFamilyName.c_str(); }
+ const char* getFullName() const { return fFullName.c_str(); }
+ const char* getPostscriptName() const { return fPostscriptName.c_str(); }
+
+ void setFamilyName(const char* name) { fFamilyName.set(name); }
+ void setFullName(const char* name) { fFullName.set(name); }
+ void setPostscriptName(const char* name) { fPostscriptName.set(name); }
+
+ bool hasStream() const { return bool(fStream); }
+ std::unique_ptr<SkStreamAsset> dupStream() const { return fStream->duplicate(); }
+ int getCollectionIndex() const { return fCollectionIndex; }
+ int getPaletteIndex() const { return fPaletteIndex; }
+ int getVariationCoordinateCount() const { return fCoordinateCount; }
+ const SkFontArguments::VariationPosition::Coordinate* getVariation() const {
+ return fVariation.get();
+ }
+ int getPaletteEntryOverrideCount() const { return fPaletteEntryOverrideCount; }
+ const SkFontArguments::Palette::Override* getPaletteEntryOverrides() const {
+ return fPaletteEntryOverrides.get();
+ }
+ SkTypeface::FactoryId getFactoryId() {
+ return fFactoryId;
+ }
+
+ std::unique_ptr<SkStreamAsset> detachStream() { return std::move(fStream); }
+ void setStream(std::unique_ptr<SkStreamAsset> stream) { fStream = std::move(stream); }
+ void setCollectionIndex(int collectionIndex) { fCollectionIndex = collectionIndex; }
+ void setPaletteIndex(int paletteIndex) { fPaletteIndex = paletteIndex; }
+ SkFontArguments::VariationPosition::Coordinate* setVariationCoordinates(int coordinateCount) {
+ fCoordinateCount = coordinateCount;
+ return fVariation.reset(coordinateCount);
+ }
+ SkFontArguments::Palette::Override* setPaletteEntryOverrides(int paletteEntryOverrideCount) {
+ fPaletteEntryOverrideCount = paletteEntryOverrideCount;
+ return fPaletteEntryOverrides.reset(paletteEntryOverrideCount);
+ }
+ void setFactoryId(SkTypeface::FactoryId factoryId) {
+ fFactoryId = factoryId;
+ }
+
+ SkFontArguments getFontArguments() const {
+ return SkFontArguments()
+ .setCollectionIndex(this->getCollectionIndex())
+ .setVariationDesignPosition({this->getVariation(),this->getVariationCoordinateCount()})
+ .setPalette({this->getPaletteIndex(),
+ this->getPaletteEntryOverrides(),
+ this->getPaletteEntryOverrideCount()});
+ }
+ static SkFontStyle::Width SkFontStyleWidthForWidthAxisValue(SkScalar width);
+
+private:
+ SkString fFamilyName;
+ SkString fFullName;
+ SkString fPostscriptName;
+ SkFontStyle fStyle;
+
+ std::unique_ptr<SkStreamAsset> fStream;
+ int fCollectionIndex = 0;
+ using Coordinates =
+ skia_private::AutoSTMalloc<4, SkFontArguments::VariationPosition::Coordinate>;
+ int fCoordinateCount = 0;
+ Coordinates fVariation;
+ int fPaletteIndex = 0;
+ int fPaletteEntryOverrideCount = 0;
+ skia_private::AutoTMalloc<SkFontArguments::Palette::Override> fPaletteEntryOverrides;
+ SkTypeface::FactoryId fFactoryId = 0;
+};
+
+#endif // SkFontDescriptor_DEFINED
diff --git a/gfx/skia/skia/src/core/SkFontMetricsPriv.cpp b/gfx/skia/skia/src/core/SkFontMetricsPriv.cpp
new file mode 100644
index 0000000000..484911a413
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontMetricsPriv.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/core/SkFontMetricsPriv.h"
+
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <optional>
+
+void SkFontMetricsPriv::Flatten(SkWriteBuffer& buffer, const SkFontMetrics& metrics) {
+ buffer.writeUInt(metrics.fFlags);
+ buffer.writeScalar(metrics.fTop);
+ buffer.writeScalar(metrics.fAscent);
+ buffer.writeScalar(metrics.fDescent);
+ buffer.writeScalar(metrics.fBottom);
+ buffer.writeScalar(metrics.fLeading);
+ buffer.writeScalar(metrics.fAvgCharWidth);
+ buffer.writeScalar(metrics.fMaxCharWidth);
+ buffer.writeScalar(metrics.fXMin);
+ buffer.writeScalar(metrics.fXMax);
+ buffer.writeScalar(metrics.fXHeight);
+ buffer.writeScalar(metrics.fCapHeight);
+ buffer.writeScalar(metrics.fUnderlineThickness);
+ buffer.writeScalar(metrics.fUnderlinePosition);
+ buffer.writeScalar(metrics.fStrikeoutThickness);
+ buffer.writeScalar(metrics.fStrikeoutPosition);
+}
+
+std::optional<SkFontMetrics> SkFontMetricsPriv::MakeFromBuffer(SkReadBuffer& buffer) {
+ SkASSERT(buffer.isValid());
+
+ SkFontMetrics metrics;
+ metrics.fFlags = buffer.readUInt();
+ metrics.fTop = buffer.readScalar();
+ metrics.fAscent = buffer.readScalar();
+ metrics.fDescent = buffer.readScalar();
+ metrics.fBottom = buffer.readScalar();
+ metrics.fLeading = buffer.readScalar();
+ metrics.fAvgCharWidth = buffer.readScalar();
+ metrics.fMaxCharWidth = buffer.readScalar();
+ metrics.fXMin = buffer.readScalar();
+ metrics.fXMax = buffer.readScalar();
+ metrics.fXHeight = buffer.readScalar();
+ metrics.fCapHeight = buffer.readScalar();
+ metrics.fUnderlineThickness = buffer.readScalar();
+ metrics.fUnderlinePosition = buffer.readScalar();
+ metrics.fStrikeoutThickness = buffer.readScalar();
+ metrics.fStrikeoutPosition = buffer.readScalar();
+
+ // All the reads above were valid, so return the metrics.
+ if (buffer.isValid()) {
+ return metrics;
+ }
+
+ return std::nullopt;
+}
diff --git a/gfx/skia/skia/src/core/SkFontMetricsPriv.h b/gfx/skia/skia/src/core/SkFontMetricsPriv.h
new file mode 100644
index 0000000000..a39e0b4b53
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontMetricsPriv.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMetricsPriv_DEFINED
+#define SkFontMetricsPriv_DEFINED
+
+#include "include/core/SkFontMetrics.h"
+
+#include <optional>
+
+class SkReadBuffer;
+class SkWriteBuffer;
+
+class SkFontMetricsPriv {
+public:
+ static void Flatten(SkWriteBuffer& buffer, const SkFontMetrics& metrics);
+ static std::optional<SkFontMetrics> MakeFromBuffer(SkReadBuffer& buffer);
+};
+#endif //SkFontMetricsPriv_DEFINED
diff --git a/gfx/skia/skia/src/core/SkFontMgr.cpp b/gfx/skia/skia/src/core/SkFontMgr.cpp
new file mode 100644
index 0000000000..74e2ea10f4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontMgr.cpp
@@ -0,0 +1,287 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkOnce.h"
+#include "src/core/SkFontDescriptor.h"
+
+class SkFontStyle;
+class SkTypeface;
+
+class SkEmptyFontStyleSet : public SkFontStyleSet {
+public:
+ int count() override { return 0; }
+ void getStyle(int, SkFontStyle*, SkString*) override {
+ SkDEBUGFAIL("SkFontStyleSet::getStyle called on empty set");
+ }
+ SkTypeface* createTypeface(int index) override {
+ SkDEBUGFAIL("SkFontStyleSet::createTypeface called on empty set");
+ return nullptr;
+ }
+ SkTypeface* matchStyle(const SkFontStyle&) override {
+ return nullptr;
+ }
+};
+
+SkFontStyleSet* SkFontStyleSet::CreateEmpty() { return new SkEmptyFontStyleSet; }
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkEmptyFontMgr : public SkFontMgr {
+protected:
+ int onCountFamilies() const override {
+ return 0;
+ }
+ void onGetFamilyName(int index, SkString* familyName) const override {
+ SkDEBUGFAIL("onGetFamilyName called with bad index");
+ }
+ SkFontStyleSet* onCreateStyleSet(int index) const override {
+ SkDEBUGFAIL("onCreateStyleSet called with bad index");
+ return nullptr;
+ }
+ SkFontStyleSet* onMatchFamily(const char[]) const override {
+ return SkFontStyleSet::CreateEmpty();
+ }
+
+ SkTypeface* onMatchFamilyStyle(const char[], const SkFontStyle&) const override {
+ return nullptr;
+ }
+ SkTypeface* onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle& style,
+ const char* bcp47[],
+ int bcp47Count,
+ SkUnichar character) const override {
+ return nullptr;
+ }
+
+ sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData>, int) const override {
+ return nullptr;
+ }
+ sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset>, int) const override {
+ return nullptr;
+ }
+ sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset>,
+ const SkFontArguments&) const override {
+ return nullptr;
+ }
+ sk_sp<SkTypeface> onMakeFromFile(const char[], int) const override {
+ return nullptr;
+ }
+ sk_sp<SkTypeface> onLegacyMakeTypeface(const char [], SkFontStyle) const override {
+ return nullptr;
+ }
+};
+
+static SkFontStyleSet* emptyOnNull(SkFontStyleSet* fsset) {
+ if (nullptr == fsset) {
+ fsset = SkFontStyleSet::CreateEmpty();
+ }
+ return fsset;
+}
+
+int SkFontMgr::countFamilies() const {
+ return this->onCountFamilies();
+}
+
+void SkFontMgr::getFamilyName(int index, SkString* familyName) const {
+ this->onGetFamilyName(index, familyName);
+}
+
+SkFontStyleSet* SkFontMgr::createStyleSet(int index) const {
+ return emptyOnNull(this->onCreateStyleSet(index));
+}
+
+SkFontStyleSet* SkFontMgr::matchFamily(const char familyName[]) const {
+ return emptyOnNull(this->onMatchFamily(familyName));
+}
+
+SkTypeface* SkFontMgr::matchFamilyStyle(const char familyName[],
+ const SkFontStyle& fs) const {
+ return this->onMatchFamilyStyle(familyName, fs);
+}
+
+SkTypeface* SkFontMgr::matchFamilyStyleCharacter(const char familyName[], const SkFontStyle& style,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const {
+ return this->onMatchFamilyStyleCharacter(familyName, style, bcp47, bcp47Count, character);
+}
+
+sk_sp<SkTypeface> SkFontMgr::makeFromData(sk_sp<SkData> data, int ttcIndex) const {
+ if (nullptr == data) {
+ return nullptr;
+ }
+ return this->onMakeFromData(std::move(data), ttcIndex);
+}
+
+sk_sp<SkTypeface> SkFontMgr::makeFromStream(std::unique_ptr<SkStreamAsset> stream,
+ int ttcIndex) const {
+ if (nullptr == stream) {
+ return nullptr;
+ }
+ return this->onMakeFromStreamIndex(std::move(stream), ttcIndex);
+}
+
+sk_sp<SkTypeface> SkFontMgr::makeFromStream(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments& args) const {
+ if (nullptr == stream) {
+ return nullptr;
+ }
+ return this->onMakeFromStreamArgs(std::move(stream), args);
+}
+
+sk_sp<SkTypeface> SkFontMgr::makeFromFile(const char path[], int ttcIndex) const {
+ if (nullptr == path) {
+ return nullptr;
+ }
+ return this->onMakeFromFile(path, ttcIndex);
+}
+
+sk_sp<SkTypeface> SkFontMgr::legacyMakeTypeface(const char familyName[], SkFontStyle style) const {
+ return this->onLegacyMakeTypeface(familyName, style);
+}
+
+sk_sp<SkFontMgr> SkFontMgr::RefEmpty() {
+ static SkEmptyFontMgr singleton;
+ return sk_ref_sp(&singleton);
+}
+
+// A global function pointer that's not declared, but can be overriden at startup by test tools.
+sk_sp<SkFontMgr> (*gSkFontMgr_DefaultFactory)() = nullptr;
+
+sk_sp<SkFontMgr> SkFontMgr::RefDefault() {
+ static SkOnce once;
+ static sk_sp<SkFontMgr> singleton;
+
+ once([]{
+ sk_sp<SkFontMgr> fm = gSkFontMgr_DefaultFactory ? gSkFontMgr_DefaultFactory()
+ : SkFontMgr::Factory();
+ singleton = fm ? std::move(fm) : RefEmpty();
+ });
+ return singleton;
+}
+
+/**
+* Width has the greatest priority.
+* If the value of pattern.width is 5 (normal) or less,
+* narrower width values are checked first, then wider values.
+* If the value of pattern.width is greater than 5 (normal),
+* wider values are checked first, followed by narrower values.
+*
+* Italic/Oblique has the next highest priority.
+* If italic requested and there is some italic font, use it.
+* If oblique requested and there is some oblique font, use it.
+* If italic requested and there is some oblique font, use it.
+* If oblique requested and there is some italic font, use it.
+*
+* Exact match.
+* If pattern.weight < 400, weights below pattern.weight are checked
+* in descending order followed by weights above pattern.weight
+* in ascending order until a match is found.
+* If pattern.weight > 500, weights above pattern.weight are checked
+* in ascending order followed by weights below pattern.weight
+* in descending order until a match is found.
+* If pattern.weight is 400, 500 is checked first
+* and then the rule for pattern.weight < 400 is used.
+* If pattern.weight is 500, 400 is checked first
+* and then the rule for pattern.weight < 400 is used.
+*/
+SkTypeface* SkFontStyleSet::matchStyleCSS3(const SkFontStyle& pattern) {
+ int count = this->count();
+ if (0 == count) {
+ return nullptr;
+ }
+
+ struct Score {
+ int score;
+ int index;
+ Score& operator +=(int rhs) { this->score += rhs; return *this; }
+ Score& operator <<=(int rhs) { this->score <<= rhs; return *this; }
+ bool operator <(const Score& that) { return this->score < that.score; }
+ };
+
+ Score maxScore = { 0, 0 };
+ for (int i = 0; i < count; ++i) {
+ SkFontStyle current;
+ this->getStyle(i, &current, nullptr);
+ Score currentScore = { 0, i };
+
+ // CSS stretch / SkFontStyle::Width
+ // Takes priority over everything else.
+ if (pattern.width() <= SkFontStyle::kNormal_Width) {
+ if (current.width() <= pattern.width()) {
+ currentScore += 10 - pattern.width() + current.width();
+ } else {
+ currentScore += 10 - current.width();
+ }
+ } else {
+ if (current.width() > pattern.width()) {
+ currentScore += 10 + pattern.width() - current.width();
+ } else {
+ currentScore += current.width();
+ }
+ }
+ currentScore <<= 8;
+
+ // CSS style (normal, italic, oblique) / SkFontStyle::Slant (upright, italic, oblique)
+ // Takes priority over all valid weights.
+ static_assert(SkFontStyle::kUpright_Slant == 0 &&
+ SkFontStyle::kItalic_Slant == 1 &&
+ SkFontStyle::kOblique_Slant == 2,
+ "SkFontStyle::Slant values not as required.");
+ SkASSERT(0 <= pattern.slant() && pattern.slant() <= 2 &&
+ 0 <= current.slant() && current.slant() <= 2);
+ static const int score[3][3] = {
+ /* Upright Italic Oblique [current]*/
+ /* Upright */ { 3 , 1 , 2 },
+ /* Italic */ { 1 , 3 , 2 },
+ /* Oblique */ { 1 , 2 , 3 },
+ /* [pattern] */
+ };
+ currentScore += score[pattern.slant()][current.slant()];
+ currentScore <<= 8;
+
+ // Synthetics (weight, style) [no stretch synthetic?]
+
+ // CSS weight / SkFontStyle::Weight
+ // The 'closer' to the target weight, the higher the score.
+ // 1000 is the 'heaviest' recognized weight
+ if (pattern.weight() == current.weight()) {
+ currentScore += 1000;
+ // less than 400 prefer lighter weights
+ } else if (pattern.weight() < 400) {
+ if (current.weight() <= pattern.weight()) {
+ currentScore += 1000 - pattern.weight() + current.weight();
+ } else {
+ currentScore += 1000 - current.weight();
+ }
+ // between 400 and 500 prefer heavier up to 500, then lighter weights
+ } else if (pattern.weight() <= 500) {
+ if (current.weight() >= pattern.weight() && current.weight() <= 500) {
+ currentScore += 1000 + pattern.weight() - current.weight();
+ } else if (current.weight() <= pattern.weight()) {
+ currentScore += 500 + current.weight();
+ } else {
+ currentScore += 1000 - current.weight();
+ }
+ // greater than 500 prefer heavier weights
+ } else if (pattern.weight() > 500) {
+ if (current.weight() > pattern.weight()) {
+ currentScore += 1000 + pattern.weight() - current.weight();
+ } else {
+ currentScore += current.weight();
+ }
+ }
+
+ if (maxScore < currentScore) {
+ maxScore = currentScore;
+ }
+ }
+
+ return this->createTypeface(maxScore.index);
+}
diff --git a/gfx/skia/skia/src/core/SkFontMgrPriv.h b/gfx/skia/skia/src/core/SkFontMgrPriv.h
new file mode 100644
index 0000000000..40cf264037
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontMgrPriv.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkFontMgrPriv_DEFINED
+#define SkFontMgrPriv_DEFINED
+
+#include "include/core/SkFontMgr.h"
+
+extern sk_sp<SkFontMgr> (*gSkFontMgr_DefaultFactory)();
+
+#endif // SkFontMgrPriv_DEFINED
diff --git a/gfx/skia/skia/src/core/SkFontPriv.h b/gfx/skia/skia/src/core/SkFontPriv.h
new file mode 100644
index 0000000000..3907085baa
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontPriv.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontPriv_DEFINED
+#define SkFontPriv_DEFINED
+
+#include "include/core/SkFont.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/base/SkTemplates.h"
+
+class SkReadBuffer;
+class SkWriteBuffer;
+
+class SkFontPriv {
+public:
+ /* This is the size we use when we ask for a glyph's path. We then
+ * post-transform it as we draw to match the request.
+ * This is done to try to re-use cache entries for the path.
+ *
+ * This value is somewhat arbitrary. In theory, it could be 1, since
+ * we store paths as floats. However, we get the path from the font
+ * scaler, and it may represent its paths as fixed-point (or 26.6),
+ * so we shouldn't ask for something too big (might overflow 16.16)
+ * or too small (underflow 26.6).
+ *
+ * This value could track kMaxSizeForGlyphCache, assuming the above
+ * constraints, but since we ask for unhinted paths, the two values
+ * need not match per-se.
+ */
+ inline static constexpr int kCanonicalTextSizeForPaths = 64;
+
+ /**
+ * Return a matrix that applies the paint's text values: size, scale, skew
+ */
+ static SkMatrix MakeTextMatrix(SkScalar size, SkScalar scaleX, SkScalar skewX) {
+ SkMatrix m = SkMatrix::Scale(size * scaleX, size);
+ if (skewX) {
+ m.postSkew(skewX, 0);
+ }
+ return m;
+ }
+
+ static SkMatrix MakeTextMatrix(const SkFont& font) {
+ return MakeTextMatrix(font.getSize(), font.getScaleX(), font.getSkewX());
+ }
+
+ static void ScaleFontMetrics(SkFontMetrics*, SkScalar);
+
+ /**
+ Returns the union of bounds of all glyphs.
+ Returned dimensions are computed by font manager from font data,
+ ignoring SkPaint::Hinting. Includes font metrics, but not fake bold or SkPathEffect.
+
+ If text size is large, text scale is one, and text skew is zero,
+ returns the bounds as:
+ { SkFontMetrics::fXMin, SkFontMetrics::fTop, SkFontMetrics::fXMax, SkFontMetrics::fBottom }.
+
+ @return union of bounds of all glyphs
+ */
+ static SkRect GetFontBounds(const SkFont&);
+
+ /** Return the approximate largest dimension of typical text when transformed by the matrix.
+ *
+ * @param matrix used to transform size
+ * @param textLocation location of the text prior to matrix transformation. Used if the
+ * matrix has perspective.
+ * @return typical largest dimension
+ */
+ static SkScalar ApproximateTransformedTextSize(const SkFont& font, const SkMatrix& matrix,
+ const SkPoint& textLocation);
+
+ static bool IsFinite(const SkFont& font) {
+ return SkScalarIsFinite(font.getSize()) &&
+ SkScalarIsFinite(font.getScaleX()) &&
+ SkScalarIsFinite(font.getSkewX());
+ }
+
+ // Returns the number of elements (characters or glyphs) in the array.
+ static int CountTextElements(const void* text, size_t byteLength, SkTextEncoding);
+
+ static void GlyphsToUnichars(const SkFont&, const uint16_t glyphs[], int count, SkUnichar[]);
+
+ static void Flatten(const SkFont&, SkWriteBuffer& buffer);
+ static bool Unflatten(SkFont*, SkReadBuffer& buffer);
+
+ static inline uint8_t Flags(const SkFont& font) { return font.fFlags; }
+};
+
+class SkAutoToGlyphs {
+public:
+ SkAutoToGlyphs(const SkFont& font, const void* text, size_t length, SkTextEncoding encoding) {
+ if (encoding == SkTextEncoding::kGlyphID || length == 0) {
+ fGlyphs = reinterpret_cast<const uint16_t*>(text);
+ fCount = SkToInt(length >> 1);
+ } else {
+ fCount = font.countText(text, length, encoding);
+ if (fCount < 0) {
+ fCount = 0;
+ }
+ fStorage.reset(fCount);
+ font.textToGlyphs(text, length, encoding, fStorage.get(), fCount);
+ fGlyphs = fStorage.get();
+ }
+ }
+
+ int count() const { return fCount; }
+ const uint16_t* glyphs() const { return fGlyphs; }
+
+private:
+ skia_private::AutoSTArray<32, uint16_t> fStorage;
+ const uint16_t* fGlyphs;
+ int fCount;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkFontStream.cpp b/gfx/skia/skia/src/core/SkFontStream.cpp
new file mode 100644
index 0000000000..a3194e295f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontStream.cpp
@@ -0,0 +1,211 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStream.h"
+#include "src/base/SkAutoMalloc.h"
+#include "src/base/SkEndian.h"
+#include "src/core/SkFontStream.h"
+
+struct SkSFNTHeader {
+ uint32_t fVersion;
+ uint16_t fNumTables;
+ uint16_t fSearchRange;
+ uint16_t fEntrySelector;
+ uint16_t fRangeShift;
+};
+
+struct SkTTCFHeader {
+ uint32_t fTag;
+ uint32_t fVersion;
+ uint32_t fNumOffsets;
+ uint32_t fOffset0; // the first of N (fNumOffsets)
+};
+
+union SkSharedTTHeader {
+ SkSFNTHeader fSingle;
+ SkTTCFHeader fCollection;
+};
+
+struct SkSFNTDirEntry {
+ uint32_t fTag;
+ uint32_t fChecksum;
+ uint32_t fOffset;
+ uint32_t fLength;
+};
+
+static bool read(SkStream* stream, void* buffer, size_t amount) {
+ return stream->read(buffer, amount) == amount;
+}
+
+static bool skip(SkStream* stream, size_t amount) {
+ return stream->skip(amount) == amount;
+}
+
+/** Return the number of tables, or if this is a TTC (collection), return the
+ number of tables in the first element of the collection. In either case,
+ if offsetToDir is not-null, set it to the offset to the beginning of the
+ table headers (SkSFNTDirEntry), relative to the start of the stream.
+
+ On an error, return 0 for number of tables, and ignore offsetToDir
+ */
+static int count_tables(SkStream* stream, int ttcIndex, size_t* offsetToDir) {
+ SkASSERT(ttcIndex >= 0);
+
+ SkAutoSMalloc<1024> storage(sizeof(SkSharedTTHeader));
+ SkSharedTTHeader* header = (SkSharedTTHeader*)storage.get();
+
+ if (!read(stream, header, sizeof(SkSharedTTHeader))) {
+ return 0;
+ }
+
+ // by default, SkSFNTHeader is at the start of the stream
+ size_t offset = 0;
+
+ // if we're really a collection, the first 4-bytes will be 'ttcf'
+ uint32_t tag = SkEndian_SwapBE32(header->fCollection.fTag);
+ if (SkSetFourByteTag('t', 't', 'c', 'f') == tag) {
+ unsigned count = SkEndian_SwapBE32(header->fCollection.fNumOffsets);
+ if ((unsigned)ttcIndex >= count) {
+ return 0;
+ }
+
+ if (ttcIndex > 0) { // need to read more of the shared header
+ stream->rewind();
+ size_t amount = sizeof(SkSharedTTHeader) + ttcIndex * sizeof(uint32_t);
+ header = (SkSharedTTHeader*)storage.reset(amount);
+ if (!read(stream, header, amount)) {
+ return 0;
+ }
+ }
+ // this is the offset to the local SkSFNTHeader
+ offset = SkEndian_SwapBE32((&header->fCollection.fOffset0)[ttcIndex]);
+ stream->rewind();
+ if (!skip(stream, offset)) {
+ return 0;
+ }
+ if (!read(stream, header, sizeof(SkSFNTHeader))) {
+ return 0;
+ }
+ }
+
+ if (offsetToDir) {
+ // add the size of the header, so we will point to the DirEntries
+ *offsetToDir = offset + sizeof(SkSFNTHeader);
+ }
+ return SkEndian_SwapBE16(header->fSingle.fNumTables);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct SfntHeader {
+ SfntHeader() : fCount(0), fDir(nullptr) {}
+ ~SfntHeader() { sk_free(fDir); }
+
+ /** If it returns true, then fCount and fDir are properly initialized.
+ Note: fDir will point to the raw array of SkSFNTDirEntry values,
+ meaning they will still be in the file's native endianness (BE).
+
+ fDir will be automatically freed when this object is destroyed
+ */
+ bool init(SkStream* stream, int ttcIndex) {
+ stream->rewind();
+
+ size_t offsetToDir;
+ fCount = count_tables(stream, ttcIndex, &offsetToDir);
+ if (0 == fCount) {
+ return false;
+ }
+
+ stream->rewind();
+ if (!skip(stream, offsetToDir)) {
+ return false;
+ }
+
+ size_t size = fCount * sizeof(SkSFNTDirEntry);
+ fDir = reinterpret_cast<SkSFNTDirEntry*>(sk_malloc_throw(size));
+ return read(stream, fDir, size);
+ }
+
+ int fCount;
+ SkSFNTDirEntry* fDir;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+int SkFontStream::CountTTCEntries(SkStream* stream) {
+ stream->rewind();
+
+ SkSharedTTHeader shared;
+ if (!read(stream, &shared, sizeof(shared))) {
+ return 0;
+ }
+
+ // if we're really a collection, the first 4-bytes will be 'ttcf'
+ uint32_t tag = SkEndian_SwapBE32(shared.fCollection.fTag);
+ if (SkSetFourByteTag('t', 't', 'c', 'f') == tag) {
+ return SkEndian_SwapBE32(shared.fCollection.fNumOffsets);
+ } else {
+ return 1; // normal 'sfnt' has 1 dir entry
+ }
+}
+
+int SkFontStream::GetTableTags(SkStream* stream, int ttcIndex,
+ SkFontTableTag tags[]) {
+ SfntHeader header;
+ if (!header.init(stream, ttcIndex)) {
+ return 0;
+ }
+
+ if (tags) {
+ for (int i = 0; i < header.fCount; i++) {
+ tags[i] = SkEndian_SwapBE32(header.fDir[i].fTag);
+ }
+ }
+ return header.fCount;
+}
+
+size_t SkFontStream::GetTableData(SkStream* stream, int ttcIndex,
+ SkFontTableTag tag,
+ size_t offset, size_t length, void* data) {
+ SfntHeader header;
+ if (!header.init(stream, ttcIndex)) {
+ return 0;
+ }
+
+ for (int i = 0; i < header.fCount; i++) {
+ if (SkEndian_SwapBE32(header.fDir[i].fTag) == tag) {
+ size_t realOffset = SkEndian_SwapBE32(header.fDir[i].fOffset);
+ size_t realLength = SkEndian_SwapBE32(header.fDir[i].fLength);
+ if (offset >= realLength) {
+ // invalid
+ return 0;
+ }
+ // if the caller is trusting the length from the file, then a
+ // hostile file might choose a value which would overflow offset +
+ // length.
+ if (offset + length < offset) {
+ return 0;
+ }
+ if (length > realLength - offset) {
+ length = realLength - offset;
+ }
+ if (data) {
+ // skip the stream to the part of the table we want to copy from
+ stream->rewind();
+ size_t bytesToSkip = realOffset + offset;
+ if (!skip(stream, bytesToSkip)) {
+ return 0;
+ }
+ if (!read(stream, data, length)) {
+ return 0;
+ }
+ }
+ return length;
+ }
+ }
+ return 0;
+}
diff --git a/gfx/skia/skia/src/core/SkFontStream.h b/gfx/skia/skia/src/core/SkFontStream.h
new file mode 100644
index 0000000000..57f0e85137
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFontStream.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontStream_DEFINED
+#define SkFontStream_DEFINED
+
+class SkStream;
+
+#include "include/core/SkTypeface.h"
+
+class SkFontStream {
+public:
+ /**
+ * Return the number of shared directories inside a TTC sfnt, or return 1
+ * if the stream is a normal sfnt (ttf). If there is an error or
+ * no directory is found, return 0.
+ *
+ * Note: the stream is rewound initially, but is returned at an arbitrary
+ * read offset.
+ */
+ static int CountTTCEntries(SkStream*);
+
+ /**
+ * @param ttcIndex 0 for normal sfnts, or the index within a TTC sfnt.
+ *
+ * Note: the stream is rewound initially, but is returned at an arbitrary
+ * read offset.
+ */
+ static int GetTableTags(SkStream*, int ttcIndex, SkFontTableTag tags[]);
+
+ /**
+ * @param ttcIndex 0 for normal sfnts, or the index within a TTC sfnt.
+ *
+ * Note: the stream is rewound initially, but is returned at an arbitrary
+ * read offset.
+ */
+ static size_t GetTableData(SkStream*, int ttcIndex, SkFontTableTag tag,
+ size_t offset, size_t length, void* data);
+
+ static size_t GetTableSize(SkStream* stream, int ttcIndex, SkFontTableTag tag) {
+ return GetTableData(stream, ttcIndex, tag, 0, ~0U, nullptr);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkFont_serial.cpp b/gfx/skia/skia/src/core/SkFont_serial.cpp
new file mode 100644
index 0000000000..0ed5c16756
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFont_serial.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypeface.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkFontPriv.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+// packed int at the beginning of the serialized font:
+//
+// control_bits:8 size_as_byte:8 flags:12 edging:2 hinting:2
+
+enum {
+ kSize_Is_Byte_Bit = 1 << 31,
+ kHas_ScaleX_Bit = 1 << 30,
+ kHas_SkewX_Bit = 1 << 29,
+ kHas_Typeface_Bit = 1 << 28,
+
+ kShift_for_Size = 16,
+ kMask_For_Size = 0xFF,
+
+ kShift_For_Flags = 4,
+ kMask_For_Flags = 0xFFF,
+
+ kShift_For_Edging = 2,
+ kMask_For_Edging = 0x3,
+
+ kShift_For_Hinting = 0,
+ kMask_For_Hinting = 0x3
+};
+
+static bool scalar_is_byte(SkScalar x) {
+ int ix = (int)x;
+ return ix == x && ix >= 0 && ix <= kMask_For_Size;
+}
+
+void SkFontPriv::Flatten(const SkFont& font, SkWriteBuffer& buffer) {
+ SkASSERT(font.fFlags <= SkFont::kAllFlags);
+ SkASSERT((font.fFlags & ~kMask_For_Flags) == 0);
+ SkASSERT((font.fEdging & ~kMask_For_Edging) == 0);
+ SkASSERT((font.fHinting & ~kMask_For_Hinting) == 0);
+
+ uint32_t packed = 0;
+ packed |= font.fFlags << kShift_For_Flags;
+ packed |= font.fEdging << kShift_For_Edging;
+ packed |= font.fHinting << kShift_For_Hinting;
+
+ if (scalar_is_byte(font.fSize)) {
+ packed |= kSize_Is_Byte_Bit;
+ packed |= (int)font.fSize << kShift_for_Size;
+ }
+ if (font.fScaleX != 1) {
+ packed |= kHas_ScaleX_Bit;
+ }
+ if (font.fSkewX != 0) {
+ packed |= kHas_SkewX_Bit;
+ }
+ if (font.fTypeface) {
+ packed |= kHas_Typeface_Bit;
+ }
+
+ buffer.write32(packed);
+ if (!(packed & kSize_Is_Byte_Bit)) {
+ buffer.writeScalar(font.fSize);
+ }
+ if (packed & kHas_ScaleX_Bit) {
+ buffer.writeScalar(font.fScaleX);
+ }
+ if (packed & kHas_SkewX_Bit) {
+ buffer.writeScalar(font.fSkewX);
+ }
+ if (packed & kHas_Typeface_Bit) {
+ buffer.writeTypeface(font.fTypeface.get());
+ }
+}
+
+bool SkFontPriv::Unflatten(SkFont* font, SkReadBuffer& buffer) {
+ const uint32_t packed = buffer.read32();
+
+ if (packed & kSize_Is_Byte_Bit) {
+ font->fSize = (packed >> kShift_for_Size) & kMask_For_Size;
+ } else {
+ font->fSize = buffer.readScalar();
+ }
+ if (packed & kHas_ScaleX_Bit) {
+ font->fScaleX = buffer.readScalar();
+ }
+ if (packed & kHas_SkewX_Bit) {
+ font->fSkewX = buffer.readScalar();
+ }
+ if (packed & kHas_Typeface_Bit) {
+ font->fTypeface = buffer.readTypeface();
+ }
+
+ SkASSERT(SkFont::kAllFlags <= kMask_For_Flags);
+ // we & with kAllFlags, to clear out any unknown flag bits
+ font->fFlags = SkToU8((packed >> kShift_For_Flags) & SkFont::kAllFlags);
+
+ unsigned edging = (packed >> kShift_For_Edging) & kMask_For_Edging;
+ if (edging > (unsigned)SkFont::Edging::kSubpixelAntiAlias) {
+ edging = 0;
+ }
+ font->fEdging = SkToU8(edging);
+
+ unsigned hinting = (packed >> kShift_For_Hinting) & kMask_For_Hinting;
+ if (hinting > (unsigned)SkFontHinting::kFull) {
+ hinting = 0;
+ }
+ font->fHinting = SkToU8(hinting);
+
+ return buffer.isValid();
+}
diff --git a/gfx/skia/skia/src/core/SkFuzzLogging.h b/gfx/skia/skia/src/core/SkFuzzLogging.h
new file mode 100644
index 0000000000..9f942f35de
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkFuzzLogging.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFuzzLogging_DEFINED
+#define SkFuzzLogging_DEFINED
+
+// Utilities for Skia's fuzzer
+
+// When SK_FUZZ_LOGGING is defined SkDebugfs relevant to image filter fuzzing
+// will be enabled. This allows the filter fuzzing code to include fuzzer
+// failures based on the output logs.
+// Define this flag in your SkUserConfig.h or in your Make/Build system.
+#ifdef SK_FUZZ_LOGGING
+ #define SkFUZZF(args) SkDebugf("SkFUZZ: "); SkDebugf args
+#else
+ #define SkFUZZF(args)
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkGaussFilter.cpp b/gfx/skia/skia/src/core/SkGaussFilter.cpp
new file mode 100644
index 0000000000..5cbc93705c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGaussFilter.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "src/core/SkGaussFilter.h"
+#include <cmath>
+
+// The value when we can stop expanding the filter. The spec implies that 3% is acceptable, but
+// we just use 1%.
+static constexpr double kGoodEnough = 1.0 / 100.0;
+
+// Normalize the values of gauss to 1.0, and make sure they add to one.
+// NB if n == 1, then this will force gauss[0] == 1.
+static void normalize(int n, double* gauss) {
+ // Carefully add from smallest to largest to calculate the normalizing sum.
+ double sum = 0;
+ for (int i = n-1; i >= 1; i--) {
+ sum += 2 * gauss[i];
+ }
+ sum += gauss[0];
+
+ // Normalize gauss.
+ for (int i = 0; i < n; i++) {
+ gauss[i] /= sum;
+ }
+
+ // The factors should sum to 1. Take any remaining slop, and add it to gauss[0]. Add the
+ // values in such a way to maintain the most accuracy.
+ sum = 0;
+ for (int i = n - 1; i >= 1; i--) {
+ sum += 2 * gauss[i];
+ }
+
+ gauss[0] = 1 - sum;
+}
+
+static int calculate_bessel_factors(double sigma, double *gauss) {
+ auto var = sigma * sigma;
+
+ // The two functions below come from the equations in "Handbook of Mathematical Functions"
+ // by Abramowitz and Stegun. Specifically, equation 9.6.10 on page 375. Bessel0 is given
+ // explicitly as 9.6.12
+ // BesselI_0 for 0 <= sigma < 2.
+ // NB the k = 0 factor is just sum = 1.0.
+ auto besselI_0 = [](double t) -> double {
+ auto tSquaredOver4 = t * t / 4.0;
+ auto sum = 1.0;
+ auto factor = 1.0;
+ auto k = 1;
+ // Use a variable number of loops. When sigma is small, this only requires 3-4 loops, but
+ // when sigma is near 2, it could require 10 loops. The same holds for BesselI_1.
+ while(factor > 1.0/1000000.0) {
+ factor *= tSquaredOver4 / (k * k);
+ sum += factor;
+ k += 1;
+ }
+ return sum;
+ };
+ // BesselI_1 for 0 <= sigma < 2.
+ auto besselI_1 = [](double t) -> double {
+ auto tSquaredOver4 = t * t / 4.0;
+ auto sum = t / 2.0;
+ auto factor = sum;
+ auto k = 1;
+ while (factor > 1.0/1000000.0) {
+ factor *= tSquaredOver4 / (k * (k + 1));
+ sum += factor;
+ k += 1;
+ }
+ return sum;
+ };
+
+ // The following formula for calculating the Gaussian kernel is from
+ // "Scale-Space for Discrete Signals" by Tony Lindeberg.
+ // gauss(n; var) = besselI_n(var) / (e^var)
+ auto d = std::exp(var);
+ double b[SkGaussFilter::kGaussArrayMax] = {besselI_0(var), besselI_1(var)};
+ gauss[0] = b[0]/d;
+ gauss[1] = b[1]/d;
+
+ // The code below is tricky, and written to mirror the recursive equations from the book.
+ // The maximum spread for sigma == 2 is guass[4], but in order to know to stop guass[5]
+ // is calculated. At this point n == 5 meaning that gauss[0..4] are the factors, but a 6th
+ // element was used to calculate them.
+ int n = 1;
+ // The recurrence relation below is from "Numerical Recipes" 3rd Edition.
+ // Equation 6.5.16 p.282
+ while (gauss[n] > kGoodEnough) {
+ b[n+1] = -(2*n/var) * b[n] + b[n-1];
+ gauss[n+1] = b[n+1] / d;
+ n += 1;
+ }
+
+ normalize(n, gauss);
+
+ return n;
+}
+
+SkGaussFilter::SkGaussFilter(double sigma) {
+ SkASSERT(0 <= sigma && sigma < 2);
+
+ fN = calculate_bessel_factors(sigma, fBasis);
+}
diff --git a/gfx/skia/skia/src/core/SkGaussFilter.h b/gfx/skia/skia/src/core/SkGaussFilter.h
new file mode 100644
index 0000000000..11ff4a85e8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGaussFilter.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGaussFilter_DEFINED
+#define SkGaussFilter_DEFINED
+
+#include <cstddef>
+
+// Define gaussian filters for values of sigma < 2. Produce values good to 1 part in 1,000,000.
+// Produces values as defined in "Scale-Space for Discrete Signals" by Tony Lindeberg.
+class SkGaussFilter {
+public:
+ inline static constexpr int kGaussArrayMax = 6;
+
+ explicit SkGaussFilter(double sigma);
+
+ size_t size() const { return fN; }
+ int radius() const { return fN - 1; }
+ int width() const { return 2 * this->radius() + 1; }
+
+ // Allow a filter to be used in a C++ ranged-for loop.
+ const double* begin() const { return &fBasis[0]; }
+ const double* end() const { return &fBasis[fN]; }
+
+private:
+ double fBasis[kGaussArrayMax];
+ int fN;
+};
+
+#endif // SkGaussFilter_DEFINED
diff --git a/gfx/skia/skia/src/core/SkGeometry.cpp b/gfx/skia/skia/src/core/SkGeometry.cpp
new file mode 100644
index 0000000000..f13bfadd68
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGeometry.cpp
@@ -0,0 +1,1780 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkGeometry.h"
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPoint3.h"
+#include "include/core/SkRect.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkTPin.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkBezierCurves.h"
+#include "src/base/SkCubics.h"
+#include "src/base/SkVx.h"
+#include "src/core/SkPointPriv.h"
+
+#include <algorithm>
+#include <array>
+#include <cmath>
+#include <cstddef>
+#include <cstdint>
+
+namespace {
+
+using float2 = skvx::float2;
+using float4 = skvx::float4;
+
+SkVector to_vector(const float2& x) {
+ SkVector vector;
+ x.store(&vector);
+ return vector;
+}
+
+////////////////////////////////////////////////////////////////////////
+
+int is_not_monotonic(SkScalar a, SkScalar b, SkScalar c) {
+ SkScalar ab = a - b;
+ SkScalar bc = b - c;
+ if (ab < 0) {
+ bc = -bc;
+ }
+ return ab == 0 || bc < 0;
+}
+
+////////////////////////////////////////////////////////////////////////
+
+int valid_unit_divide(SkScalar numer, SkScalar denom, SkScalar* ratio) {
+ SkASSERT(ratio);
+
+ if (numer < 0) {
+ numer = -numer;
+ denom = -denom;
+ }
+
+ if (denom == 0 || numer == 0 || numer >= denom) {
+ return 0;
+ }
+
+ SkScalar r = numer / denom;
+ if (SkScalarIsNaN(r)) {
+ return 0;
+ }
+ SkASSERTF(r >= 0 && r < SK_Scalar1, "numer %f, denom %f, r %f", numer, denom, r);
+ if (r == 0) { // catch underflow if numer <<<< denom
+ return 0;
+ }
+ *ratio = r;
+ return 1;
+}
+
+// Just returns its argument, but makes it easy to set a break-point to know when
+// SkFindUnitQuadRoots is going to return 0 (an error).
+int return_check_zero(int value) {
+ if (value == 0) {
+ return 0;
+ }
+ return value;
+}
+
+} // namespace
+
+/** From Numerical Recipes in C.
+
+ Q = -1/2 (B + sign(B) sqrt[B*B - 4*A*C])
+ x1 = Q / A
+ x2 = C / Q
+*/
+int SkFindUnitQuadRoots(SkScalar A, SkScalar B, SkScalar C, SkScalar roots[2]) {
+ SkASSERT(roots);
+
+ if (A == 0) {
+ return return_check_zero(valid_unit_divide(-C, B, roots));
+ }
+
+ SkScalar* r = roots;
+
+ // use doubles so we don't overflow temporarily trying to compute R
+ double dr = (double)B * B - 4 * (double)A * C;
+ if (dr < 0) {
+ return return_check_zero(0);
+ }
+ dr = sqrt(dr);
+ SkScalar R = SkDoubleToScalar(dr);
+ if (!SkScalarIsFinite(R)) {
+ return return_check_zero(0);
+ }
+
+ SkScalar Q = (B < 0) ? -(B-R)/2 : -(B+R)/2;
+ r += valid_unit_divide(Q, A, r);
+ r += valid_unit_divide(C, Q, r);
+ if (r - roots == 2) {
+ if (roots[0] > roots[1]) {
+ using std::swap;
+ swap(roots[0], roots[1]);
+ } else if (roots[0] == roots[1]) { // nearly-equal?
+ r -= 1; // skip the double root
+ }
+ }
+ return return_check_zero((int)(r - roots));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+void SkEvalQuadAt(const SkPoint src[3], SkScalar t, SkPoint* pt, SkVector* tangent) {
+ SkASSERT(src);
+ SkASSERT(t >= 0 && t <= SK_Scalar1);
+
+ if (pt) {
+ *pt = SkEvalQuadAt(src, t);
+ }
+ if (tangent) {
+ *tangent = SkEvalQuadTangentAt(src, t);
+ }
+}
+
+SkPoint SkEvalQuadAt(const SkPoint src[3], SkScalar t) {
+ return to_point(SkQuadCoeff(src).eval(t));
+}
+
+SkVector SkEvalQuadTangentAt(const SkPoint src[3], SkScalar t) {
+ // The derivative equation is 2(b - a +(a - 2b +c)t). This returns a
+ // zero tangent vector when t is 0 or 1, and the control point is equal
+ // to the end point. In this case, use the quad end points to compute the tangent.
+ if ((t == 0 && src[0] == src[1]) || (t == 1 && src[1] == src[2])) {
+ return src[2] - src[0];
+ }
+ SkASSERT(src);
+ SkASSERT(t >= 0 && t <= SK_Scalar1);
+
+ float2 P0 = from_point(src[0]);
+ float2 P1 = from_point(src[1]);
+ float2 P2 = from_point(src[2]);
+
+ float2 B = P1 - P0;
+ float2 A = P2 - P1 - B;
+ float2 T = A * t + B;
+
+ return to_vector(T + T);
+}
+
+static inline float2 interp(const float2& v0,
+ const float2& v1,
+ const float2& t) {
+ return v0 + (v1 - v0) * t;
+}
+
+void SkChopQuadAt(const SkPoint src[3], SkPoint dst[5], SkScalar t) {
+ SkASSERT(t > 0 && t < SK_Scalar1);
+
+ float2 p0 = from_point(src[0]);
+ float2 p1 = from_point(src[1]);
+ float2 p2 = from_point(src[2]);
+ float2 tt(t);
+
+ float2 p01 = interp(p0, p1, tt);
+ float2 p12 = interp(p1, p2, tt);
+
+ dst[0] = to_point(p0);
+ dst[1] = to_point(p01);
+ dst[2] = to_point(interp(p01, p12, tt));
+ dst[3] = to_point(p12);
+ dst[4] = to_point(p2);
+}
+
+void SkChopQuadAtHalf(const SkPoint src[3], SkPoint dst[5]) {
+ SkChopQuadAt(src, dst, 0.5f);
+}
+
+float SkMeasureAngleBetweenVectors(SkVector a, SkVector b) {
+ float cosTheta = sk_ieee_float_divide(a.dot(b), sqrtf(a.dot(a) * b.dot(b)));
+ // Pin cosTheta such that if it is NaN (e.g., if a or b was 0), then we return acos(1) = 0.
+ cosTheta = std::max(std::min(1.f, cosTheta), -1.f);
+ return acosf(cosTheta);
+}
+
+SkVector SkFindBisector(SkVector a, SkVector b) {
+ std::array<SkVector, 2> v;
+ if (a.dot(b) >= 0) {
+ // a,b are within +/-90 degrees apart.
+ v = {a, b};
+ } else if (a.cross(b) >= 0) {
+ // a,b are >90 degrees apart. Find the bisector of their interior normals instead. (Above 90
+ // degrees, the original vectors start cancelling each other out which eventually becomes
+ // unstable.)
+ v[0].set(-a.fY, +a.fX);
+ v[1].set(+b.fY, -b.fX);
+ } else {
+ // a,b are <-90 degrees apart. Find the bisector of their interior normals instead. (Below
+ // -90 degrees, the original vectors start cancelling each other out which eventually
+ // becomes unstable.)
+ v[0].set(+a.fY, -a.fX);
+ v[1].set(-b.fY, +b.fX);
+ }
+ // Return "normalize(v[0]) + normalize(v[1])".
+ skvx::float2 x0_x1{v[0].fX, v[1].fX};
+ skvx::float2 y0_y1{v[0].fY, v[1].fY};
+ auto invLengths = 1.0f / sqrt(x0_x1 * x0_x1 + y0_y1 * y0_y1);
+ x0_x1 *= invLengths;
+ y0_y1 *= invLengths;
+ return SkPoint{x0_x1[0] + x0_x1[1], y0_y1[0] + y0_y1[1]};
+}
+
+float SkFindQuadMidTangent(const SkPoint src[3]) {
+ // Tangents point in the direction of increasing T, so tan0 and -tan1 both point toward the
+ // midtangent. The bisector of tan0 and -tan1 is orthogonal to the midtangent:
+ //
+ // n dot midtangent = 0
+ //
+ SkVector tan0 = src[1] - src[0];
+ SkVector tan1 = src[2] - src[1];
+ SkVector bisector = SkFindBisector(tan0, -tan1);
+
+ // The midtangent can be found where (F' dot bisector) = 0:
+ //
+ // 0 = (F'(T) dot bisector) = |2*T 1| * |p0 - 2*p1 + p2| * |bisector.x|
+ // |-2*p0 + 2*p1 | |bisector.y|
+ //
+ // = |2*T 1| * |tan1 - tan0| * |nx|
+ // |2*tan0 | |ny|
+ //
+ // = 2*T * ((tan1 - tan0) dot bisector) + (2*tan0 dot bisector)
+ //
+ // T = (tan0 dot bisector) / ((tan0 - tan1) dot bisector)
+ float T = sk_ieee_float_divide(tan0.dot(bisector), (tan0 - tan1).dot(bisector));
+ if (!(T > 0 && T < 1)) { // Use "!(positive_logic)" so T=nan will take this branch.
+ T = .5; // The quadratic was a line or near-line. Just chop at .5.
+ }
+
+ return T;
+}
+
+/** Quad'(t) = At + B, where
+ A = 2(a - 2b + c)
+ B = 2(b - a)
+ Solve for t, only if it fits between 0 < t < 1
+*/
+int SkFindQuadExtrema(SkScalar a, SkScalar b, SkScalar c, SkScalar tValue[1]) {
+ /* At + B == 0
+ t = -B / A
+ */
+ return valid_unit_divide(a - b, a - b - b + c, tValue);
+}
+
+static inline void flatten_double_quad_extrema(SkScalar coords[14]) {
+ coords[2] = coords[6] = coords[4];
+}
+
+/* Returns 0 for 1 quad, and 1 for two quads, either way the answer is
+ stored in dst[]. Guarantees that the 1/2 quads will be monotonic.
+ */
+int SkChopQuadAtYExtrema(const SkPoint src[3], SkPoint dst[5]) {
+ SkASSERT(src);
+ SkASSERT(dst);
+
+ SkScalar a = src[0].fY;
+ SkScalar b = src[1].fY;
+ SkScalar c = src[2].fY;
+
+ if (is_not_monotonic(a, b, c)) {
+ SkScalar tValue;
+ if (valid_unit_divide(a - b, a - b - b + c, &tValue)) {
+ SkChopQuadAt(src, dst, tValue);
+ flatten_double_quad_extrema(&dst[0].fY);
+ return 1;
+ }
+ // if we get here, we need to force dst to be monotonic, even though
+ // we couldn't compute a unit_divide value (probably underflow).
+ b = SkScalarAbs(a - b) < SkScalarAbs(b - c) ? a : c;
+ }
+ dst[0].set(src[0].fX, a);
+ dst[1].set(src[1].fX, b);
+ dst[2].set(src[2].fX, c);
+ return 0;
+}
+
+/* Returns 0 for 1 quad, and 1 for two quads, either way the answer is
+ stored in dst[]. Guarantees that the 1/2 quads will be monotonic.
+ */
+int SkChopQuadAtXExtrema(const SkPoint src[3], SkPoint dst[5]) {
+ SkASSERT(src);
+ SkASSERT(dst);
+
+ SkScalar a = src[0].fX;
+ SkScalar b = src[1].fX;
+ SkScalar c = src[2].fX;
+
+ if (is_not_monotonic(a, b, c)) {
+ SkScalar tValue;
+ if (valid_unit_divide(a - b, a - b - b + c, &tValue)) {
+ SkChopQuadAt(src, dst, tValue);
+ flatten_double_quad_extrema(&dst[0].fX);
+ return 1;
+ }
+ // if we get here, we need to force dst to be monotonic, even though
+ // we couldn't compute a unit_divide value (probably underflow).
+ b = SkScalarAbs(a - b) < SkScalarAbs(b - c) ? a : c;
+ }
+ dst[0].set(a, src[0].fY);
+ dst[1].set(b, src[1].fY);
+ dst[2].set(c, src[2].fY);
+ return 0;
+}
+
+// F(t) = a (1 - t) ^ 2 + 2 b t (1 - t) + c t ^ 2
+// F'(t) = 2 (b - a) + 2 (a - 2b + c) t
+// F''(t) = 2 (a - 2b + c)
+//
+// A = 2 (b - a)
+// B = 2 (a - 2b + c)
+//
+// Maximum curvature for a quadratic means solving
+// Fx' Fx'' + Fy' Fy'' = 0
+//
+// t = - (Ax Bx + Ay By) / (Bx ^ 2 + By ^ 2)
+//
+SkScalar SkFindQuadMaxCurvature(const SkPoint src[3]) {
+ SkScalar Ax = src[1].fX - src[0].fX;
+ SkScalar Ay = src[1].fY - src[0].fY;
+ SkScalar Bx = src[0].fX - src[1].fX - src[1].fX + src[2].fX;
+ SkScalar By = src[0].fY - src[1].fY - src[1].fY + src[2].fY;
+
+ SkScalar numer = -(Ax * Bx + Ay * By);
+ SkScalar denom = Bx * Bx + By * By;
+ if (denom < 0) {
+ numer = -numer;
+ denom = -denom;
+ }
+ if (numer <= 0) {
+ return 0;
+ }
+ if (numer >= denom) { // Also catches denom=0.
+ return 1;
+ }
+ SkScalar t = numer / denom;
+ SkASSERT((0 <= t && t < 1) || SkScalarIsNaN(t));
+ return t;
+}
+
+int SkChopQuadAtMaxCurvature(const SkPoint src[3], SkPoint dst[5]) {
+ SkScalar t = SkFindQuadMaxCurvature(src);
+ if (t > 0 && t < 1) {
+ SkChopQuadAt(src, dst, t);
+ return 2;
+ } else {
+ memcpy(dst, src, 3 * sizeof(SkPoint));
+ return 1;
+ }
+}
+
+void SkConvertQuadToCubic(const SkPoint src[3], SkPoint dst[4]) {
+ float2 scale(SkDoubleToScalar(2.0 / 3.0));
+ float2 s0 = from_point(src[0]);
+ float2 s1 = from_point(src[1]);
+ float2 s2 = from_point(src[2]);
+
+ dst[0] = to_point(s0);
+ dst[1] = to_point(s0 + (s1 - s0) * scale);
+ dst[2] = to_point(s2 + (s1 - s2) * scale);
+ dst[3] = to_point(s2);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+///// CUBICS // CUBICS // CUBICS // CUBICS // CUBICS // CUBICS // CUBICS /////
+//////////////////////////////////////////////////////////////////////////////
+
+static SkVector eval_cubic_derivative(const SkPoint src[4], SkScalar t) {
+ SkQuadCoeff coeff;
+ float2 P0 = from_point(src[0]);
+ float2 P1 = from_point(src[1]);
+ float2 P2 = from_point(src[2]);
+ float2 P3 = from_point(src[3]);
+
+ coeff.fA = P3 + 3 * (P1 - P2) - P0;
+ coeff.fB = times_2(P2 - times_2(P1) + P0);
+ coeff.fC = P1 - P0;
+ return to_vector(coeff.eval(t));
+}
+
+static SkVector eval_cubic_2ndDerivative(const SkPoint src[4], SkScalar t) {
+ float2 P0 = from_point(src[0]);
+ float2 P1 = from_point(src[1]);
+ float2 P2 = from_point(src[2]);
+ float2 P3 = from_point(src[3]);
+ float2 A = P3 + 3 * (P1 - P2) - P0;
+ float2 B = P2 - times_2(P1) + P0;
+
+ return to_vector(A * t + B);
+}
+
+void SkEvalCubicAt(const SkPoint src[4], SkScalar t, SkPoint* loc,
+ SkVector* tangent, SkVector* curvature) {
+ SkASSERT(src);
+ SkASSERT(t >= 0 && t <= SK_Scalar1);
+
+ if (loc) {
+ *loc = to_point(SkCubicCoeff(src).eval(t));
+ }
+ if (tangent) {
+ // The derivative equation returns a zero tangent vector when t is 0 or 1, and the
+ // adjacent control point is equal to the end point. In this case, use the
+ // next control point or the end points to compute the tangent.
+ if ((t == 0 && src[0] == src[1]) || (t == 1 && src[2] == src[3])) {
+ if (t == 0) {
+ *tangent = src[2] - src[0];
+ } else {
+ *tangent = src[3] - src[1];
+ }
+ if (!tangent->fX && !tangent->fY) {
+ *tangent = src[3] - src[0];
+ }
+ } else {
+ *tangent = eval_cubic_derivative(src, t);
+ }
+ }
+ if (curvature) {
+ *curvature = eval_cubic_2ndDerivative(src, t);
+ }
+}
+
+/** Cubic'(t) = At^2 + Bt + C, where
+ A = 3(-a + 3(b - c) + d)
+ B = 6(a - 2b + c)
+ C = 3(b - a)
+ Solve for t, keeping only those that fit betwee 0 < t < 1
+*/
+int SkFindCubicExtrema(SkScalar a, SkScalar b, SkScalar c, SkScalar d,
+ SkScalar tValues[2]) {
+ // we divide A,B,C by 3 to simplify
+ SkScalar A = d - a + 3*(b - c);
+ SkScalar B = 2*(a - b - b + c);
+ SkScalar C = b - a;
+
+ return SkFindUnitQuadRoots(A, B, C, tValues);
+}
+
+// This does not return b when t==1, but it otherwise seems to get better precision than
+// "a*(1 - t) + b*t" for things like chopping cubics on exact cusp points.
+// The responsibility falls on the caller to check that t != 1 before calling.
+template<int N, typename T>
+inline static skvx::Vec<N,T> unchecked_mix(const skvx::Vec<N,T>& a, const skvx::Vec<N,T>& b,
+ const skvx::Vec<N,T>& t) {
+ return (b - a)*t + a;
+}
+
+void SkChopCubicAt(const SkPoint src[4], SkPoint dst[7], SkScalar t) {
+ SkASSERT(0 <= t && t <= 1);
+
+ if (t == 1) {
+ memcpy(dst, src, sizeof(SkPoint) * 4);
+ dst[4] = dst[5] = dst[6] = src[3];
+ return;
+ }
+
+ float2 p0 = skvx::bit_pun<float2>(src[0]);
+ float2 p1 = skvx::bit_pun<float2>(src[1]);
+ float2 p2 = skvx::bit_pun<float2>(src[2]);
+ float2 p3 = skvx::bit_pun<float2>(src[3]);
+ float2 T = t;
+
+ float2 ab = unchecked_mix(p0, p1, T);
+ float2 bc = unchecked_mix(p1, p2, T);
+ float2 cd = unchecked_mix(p2, p3, T);
+ float2 abc = unchecked_mix(ab, bc, T);
+ float2 bcd = unchecked_mix(bc, cd, T);
+ float2 abcd = unchecked_mix(abc, bcd, T);
+
+ dst[0] = skvx::bit_pun<SkPoint>(p0);
+ dst[1] = skvx::bit_pun<SkPoint>(ab);
+ dst[2] = skvx::bit_pun<SkPoint>(abc);
+ dst[3] = skvx::bit_pun<SkPoint>(abcd);
+ dst[4] = skvx::bit_pun<SkPoint>(bcd);
+ dst[5] = skvx::bit_pun<SkPoint>(cd);
+ dst[6] = skvx::bit_pun<SkPoint>(p3);
+}
+
+void SkChopCubicAt(const SkPoint src[4], SkPoint dst[10], float t0, float t1) {
+ SkASSERT(0 <= t0 && t0 <= t1 && t1 <= 1);
+
+ if (t1 == 1) {
+ SkChopCubicAt(src, dst, t0);
+ dst[7] = dst[8] = dst[9] = src[3];
+ return;
+ }
+
+ // Perform both chops in parallel using 4-lane SIMD.
+ float4 p00, p11, p22, p33, T;
+ p00.lo = p00.hi = skvx::bit_pun<float2>(src[0]);
+ p11.lo = p11.hi = skvx::bit_pun<float2>(src[1]);
+ p22.lo = p22.hi = skvx::bit_pun<float2>(src[2]);
+ p33.lo = p33.hi = skvx::bit_pun<float2>(src[3]);
+ T.lo = t0;
+ T.hi = t1;
+
+ float4 ab = unchecked_mix(p00, p11, T);
+ float4 bc = unchecked_mix(p11, p22, T);
+ float4 cd = unchecked_mix(p22, p33, T);
+ float4 abc = unchecked_mix(ab, bc, T);
+ float4 bcd = unchecked_mix(bc, cd, T);
+ float4 abcd = unchecked_mix(abc, bcd, T);
+ float4 middle = unchecked_mix(abc, bcd, skvx::shuffle<2,3,0,1>(T));
+
+ dst[0] = skvx::bit_pun<SkPoint>(p00.lo);
+ dst[1] = skvx::bit_pun<SkPoint>(ab.lo);
+ dst[2] = skvx::bit_pun<SkPoint>(abc.lo);
+ dst[3] = skvx::bit_pun<SkPoint>(abcd.lo);
+ middle.store(dst + 4);
+ dst[6] = skvx::bit_pun<SkPoint>(abcd.hi);
+ dst[7] = skvx::bit_pun<SkPoint>(bcd.hi);
+ dst[8] = skvx::bit_pun<SkPoint>(cd.hi);
+ dst[9] = skvx::bit_pun<SkPoint>(p33.hi);
+}
+
+void SkChopCubicAt(const SkPoint src[4], SkPoint dst[],
+ const SkScalar tValues[], int tCount) {
+ SkASSERT(std::all_of(tValues, tValues + tCount, [](SkScalar t) { return t >= 0 && t <= 1; }));
+ SkASSERT(std::is_sorted(tValues, tValues + tCount));
+
+ if (dst) {
+ if (tCount == 0) { // nothing to chop
+ memcpy(dst, src, 4*sizeof(SkPoint));
+ } else {
+ int i = 0;
+ for (; i < tCount - 1; i += 2) {
+ // Do two chops at once.
+ float2 tt = float2::Load(tValues + i);
+ if (i != 0) {
+ float lastT = tValues[i - 1];
+ tt = skvx::pin((tt - lastT) / (1 - lastT), float2(0), float2(1));
+ }
+ SkChopCubicAt(src, dst, tt[0], tt[1]);
+ src = dst = dst + 6;
+ }
+ if (i < tCount) {
+ // Chop the final cubic if there was an odd number of chops.
+ SkASSERT(i + 1 == tCount);
+ float t = tValues[i];
+ if (i != 0) {
+ float lastT = tValues[i - 1];
+ t = SkTPin(sk_ieee_float_divide(t - lastT, 1 - lastT), 0.f, 1.f);
+ }
+ SkChopCubicAt(src, dst, t);
+ }
+ }
+ }
+}
+
+void SkChopCubicAtHalf(const SkPoint src[4], SkPoint dst[7]) {
+ SkChopCubicAt(src, dst, 0.5f);
+}
+
+float SkMeasureNonInflectCubicRotation(const SkPoint pts[4]) {
+ SkVector a = pts[1] - pts[0];
+ SkVector b = pts[2] - pts[1];
+ SkVector c = pts[3] - pts[2];
+ if (a.isZero()) {
+ return SkMeasureAngleBetweenVectors(b, c);
+ }
+ if (b.isZero()) {
+ return SkMeasureAngleBetweenVectors(a, c);
+ }
+ if (c.isZero()) {
+ return SkMeasureAngleBetweenVectors(a, b);
+ }
+ // Postulate: When no points are colocated and there are no inflection points in T=0..1, the
+ // rotation is: 360 degrees, minus the angle [p0,p1,p2], minus the angle [p1,p2,p3].
+ return 2*SK_ScalarPI - SkMeasureAngleBetweenVectors(a,-b) - SkMeasureAngleBetweenVectors(b,-c);
+}
+
+static skvx::float4 fma(const skvx::float4& f, float m, const skvx::float4& a) {
+ return skvx::fma(f, skvx::float4(m), a);
+}
+
+// Finds the root nearest 0.5. Returns 0.5 if the roots are undefined or outside 0..1.
+static float solve_quadratic_equation_for_midtangent(float a, float b, float c, float discr) {
+ // Quadratic formula from Numerical Recipes in C:
+ float q = -.5f * (b + copysignf(sqrtf(discr), b));
+ // The roots are q/a and c/q. Pick the midtangent closer to T=.5.
+ float _5qa = -.5f*q*a;
+ float T = fabsf(q*q + _5qa) < fabsf(a*c + _5qa) ? sk_ieee_float_divide(q,a)
+ : sk_ieee_float_divide(c,q);
+ if (!(T > 0 && T < 1)) { // Use "!(positive_logic)" so T=NaN will take this branch.
+ // Either the curve is a flat line with no rotation or FP precision failed us. Chop at .5.
+ T = .5;
+ }
+ return T;
+}
+
+static float solve_quadratic_equation_for_midtangent(float a, float b, float c) {
+ return solve_quadratic_equation_for_midtangent(a, b, c, b*b - 4*a*c);
+}
+
+float SkFindCubicMidTangent(const SkPoint src[4]) {
+ // Tangents point in the direction of increasing T, so tan0 and -tan1 both point toward the
+ // midtangent. The bisector of tan0 and -tan1 is orthogonal to the midtangent:
+ //
+ // bisector dot midtangent == 0
+ //
+ SkVector tan0 = (src[0] == src[1]) ? src[2] - src[0] : src[1] - src[0];
+ SkVector tan1 = (src[2] == src[3]) ? src[3] - src[1] : src[3] - src[2];
+ SkVector bisector = SkFindBisector(tan0, -tan1);
+
+ // Find the T value at the midtangent. This is a simple quadratic equation:
+ //
+ // midtangent dot bisector == 0, or using a tangent matrix C' in power basis form:
+ //
+ // |C'x C'y|
+ // |T^2 T 1| * |. . | * |bisector.x| == 0
+ // |. . | |bisector.y|
+ //
+ // The coeffs for the quadratic equation we need to solve are therefore: C' * bisector
+ static const skvx::float4 kM[4] = {skvx::float4(-1, 2, -1, 0),
+ skvx::float4( 3, -4, 1, 0),
+ skvx::float4(-3, 2, 0, 0)};
+ auto C_x = fma(kM[0], src[0].fX,
+ fma(kM[1], src[1].fX,
+ fma(kM[2], src[2].fX, skvx::float4(src[3].fX, 0,0,0))));
+ auto C_y = fma(kM[0], src[0].fY,
+ fma(kM[1], src[1].fY,
+ fma(kM[2], src[2].fY, skvx::float4(src[3].fY, 0,0,0))));
+ auto coeffs = C_x * bisector.x() + C_y * bisector.y();
+
+ // Now solve the quadratic for T.
+ float T = 0;
+ float a=coeffs[0], b=coeffs[1], c=coeffs[2];
+ float discr = b*b - 4*a*c;
+ if (discr > 0) { // This will only be false if the curve is a line.
+ return solve_quadratic_equation_for_midtangent(a, b, c, discr);
+ } else {
+ // This is a 0- or 360-degree flat line. It doesn't have single points of midtangent.
+ // (tangent == midtangent at every point on the curve except the cusp points.)
+ // Chop in between both cusps instead, if any. There can be up to two cusps on a flat line,
+ // both where the tangent is perpendicular to the starting tangent:
+ //
+ // tangent dot tan0 == 0
+ //
+ coeffs = C_x * tan0.x() + C_y * tan0.y();
+ a = coeffs[0];
+ b = coeffs[1];
+ if (a != 0) {
+ // We want the point in between both cusps. The midpoint of:
+ //
+ // (-b +/- sqrt(b^2 - 4*a*c)) / (2*a)
+ //
+ // Is equal to:
+ //
+ // -b / (2*a)
+ T = -b / (2*a);
+ }
+ if (!(T > 0 && T < 1)) { // Use "!(positive_logic)" so T=NaN will take this branch.
+ // Either the curve is a flat line with no rotation or FP precision failed us. Chop at
+ // .5.
+ T = .5;
+ }
+ return T;
+ }
+}
+
+static void flatten_double_cubic_extrema(SkScalar coords[14]) {
+ coords[4] = coords[8] = coords[6];
+}
+
+/** Given 4 points on a cubic bezier, chop it into 1, 2, 3 beziers such that
+ the resulting beziers are monotonic in Y. This is called by the scan
+ converter. Depending on what is returned, dst[] is treated as follows:
+ 0 dst[0..3] is the original cubic
+ 1 dst[0..3] and dst[3..6] are the two new cubics
+ 2 dst[0..3], dst[3..6], dst[6..9] are the three new cubics
+ If dst == null, it is ignored and only the count is returned.
+*/
+int SkChopCubicAtYExtrema(const SkPoint src[4], SkPoint dst[10]) {
+ SkScalar tValues[2];
+ int roots = SkFindCubicExtrema(src[0].fY, src[1].fY, src[2].fY,
+ src[3].fY, tValues);
+
+ SkChopCubicAt(src, dst, tValues, roots);
+ if (dst && roots > 0) {
+ // we do some cleanup to ensure our Y extrema are flat
+ flatten_double_cubic_extrema(&dst[0].fY);
+ if (roots == 2) {
+ flatten_double_cubic_extrema(&dst[3].fY);
+ }
+ }
+ return roots;
+}
+
+int SkChopCubicAtXExtrema(const SkPoint src[4], SkPoint dst[10]) {
+ SkScalar tValues[2];
+ int roots = SkFindCubicExtrema(src[0].fX, src[1].fX, src[2].fX,
+ src[3].fX, tValues);
+
+ SkChopCubicAt(src, dst, tValues, roots);
+ if (dst && roots > 0) {
+ // we do some cleanup to ensure our Y extrema are flat
+ flatten_double_cubic_extrema(&dst[0].fX);
+ if (roots == 2) {
+ flatten_double_cubic_extrema(&dst[3].fX);
+ }
+ }
+ return roots;
+}
+
+/** http://www.faculty.idc.ac.il/arik/quality/appendixA.html
+
+ Inflection means that curvature is zero.
+ Curvature is [F' x F''] / [F'^3]
+ So we solve F'x X F''y - F'y X F''y == 0
+ After some canceling of the cubic term, we get
+ A = b - a
+ B = c - 2b + a
+ C = d - 3c + 3b - a
+ (BxCy - ByCx)t^2 + (AxCy - AyCx)t + AxBy - AyBx == 0
+*/
+int SkFindCubicInflections(const SkPoint src[4], SkScalar tValues[2]) {
+ SkScalar Ax = src[1].fX - src[0].fX;
+ SkScalar Ay = src[1].fY - src[0].fY;
+ SkScalar Bx = src[2].fX - 2 * src[1].fX + src[0].fX;
+ SkScalar By = src[2].fY - 2 * src[1].fY + src[0].fY;
+ SkScalar Cx = src[3].fX + 3 * (src[1].fX - src[2].fX) - src[0].fX;
+ SkScalar Cy = src[3].fY + 3 * (src[1].fY - src[2].fY) - src[0].fY;
+
+ return SkFindUnitQuadRoots(Bx*Cy - By*Cx,
+ Ax*Cy - Ay*Cx,
+ Ax*By - Ay*Bx,
+ tValues);
+}
+
+int SkChopCubicAtInflections(const SkPoint src[4], SkPoint dst[10]) {
+ SkScalar tValues[2];
+ int count = SkFindCubicInflections(src, tValues);
+
+ if (dst) {
+ if (count == 0) {
+ memcpy(dst, src, 4 * sizeof(SkPoint));
+ } else {
+ SkChopCubicAt(src, dst, tValues, count);
+ }
+ }
+ return count + 1;
+}
+
+// Assumes the third component of points is 1.
+// Calcs p0 . (p1 x p2)
+static double calc_dot_cross_cubic(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2) {
+ const double xComp = (double) p0.fX * ((double) p1.fY - (double) p2.fY);
+ const double yComp = (double) p0.fY * ((double) p2.fX - (double) p1.fX);
+ const double wComp = (double) p1.fX * (double) p2.fY - (double) p1.fY * (double) p2.fX;
+ return (xComp + yComp + wComp);
+}
+
+// Returns a positive power of 2 that, when multiplied by n, and excepting the two edge cases listed
+// below, shifts the exponent of n to yield a magnitude somewhere inside [1..2).
+// Returns 2^1023 if abs(n) < 2^-1022 (including 0).
+// Returns NaN if n is Inf or NaN.
+inline static double previous_inverse_pow2(double n) {
+ uint64_t bits;
+ memcpy(&bits, &n, sizeof(double));
+ bits = ((1023llu*2 << 52) + ((1llu << 52) - 1)) - bits; // exp=-exp
+ bits &= (0x7ffllu) << 52; // mantissa=1.0, sign=0
+ memcpy(&n, &bits, sizeof(double));
+ return n;
+}
+
+inline static void write_cubic_inflection_roots(double t0, double s0, double t1, double s1,
+ double* t, double* s) {
+ t[0] = t0;
+ s[0] = s0;
+
+ // This copysign/abs business orients the implicit function so positive values are always on the
+ // "left" side of the curve.
+ t[1] = -copysign(t1, t1 * s1);
+ s[1] = -fabs(s1);
+
+ // Ensure t[0]/s[0] <= t[1]/s[1] (s[1] is negative from above).
+ if (copysign(s[1], s[0]) * t[0] > -fabs(s[0]) * t[1]) {
+ using std::swap;
+ swap(t[0], t[1]);
+ swap(s[0], s[1]);
+ }
+}
+
+SkCubicType SkClassifyCubic(const SkPoint P[4], double t[2], double s[2], double d[4]) {
+ // Find the cubic's inflection function, I = [T^3 -3T^2 3T -1] dot D. (D0 will always be 0
+ // for integral cubics.)
+ //
+ // See "Resolution Independent Curve Rendering using Programmable Graphics Hardware",
+ // 4.2 Curve Categorization:
+ //
+ // https://www.microsoft.com/en-us/research/wp-content/uploads/2005/01/p1000-loop.pdf
+ double A1 = calc_dot_cross_cubic(P[0], P[3], P[2]);
+ double A2 = calc_dot_cross_cubic(P[1], P[0], P[3]);
+ double A3 = calc_dot_cross_cubic(P[2], P[1], P[0]);
+
+ double D3 = 3 * A3;
+ double D2 = D3 - A2;
+ double D1 = D2 - A2 + A1;
+
+ // Shift the exponents in D so the largest magnitude falls somewhere in 1..2. This protects us
+ // from overflow down the road while solving for roots and KLM functionals.
+ double Dmax = std::max(std::max(fabs(D1), fabs(D2)), fabs(D3));
+ double norm = previous_inverse_pow2(Dmax);
+ D1 *= norm;
+ D2 *= norm;
+ D3 *= norm;
+
+ if (d) {
+ d[3] = D3;
+ d[2] = D2;
+ d[1] = D1;
+ d[0] = 0;
+ }
+
+ // Now use the inflection function to classify the cubic.
+ //
+ // See "Resolution Independent Curve Rendering using Programmable Graphics Hardware",
+ // 4.4 Integral Cubics:
+ //
+ // https://www.microsoft.com/en-us/research/wp-content/uploads/2005/01/p1000-loop.pdf
+ if (0 != D1) {
+ double discr = 3*D2*D2 - 4*D1*D3;
+ if (discr > 0) { // Serpentine.
+ if (t && s) {
+ double q = 3*D2 + copysign(sqrt(3*discr), D2);
+ write_cubic_inflection_roots(q, 6*D1, 2*D3, q, t, s);
+ }
+ return SkCubicType::kSerpentine;
+ } else if (discr < 0) { // Loop.
+ if (t && s) {
+ double q = D2 + copysign(sqrt(-discr), D2);
+ write_cubic_inflection_roots(q, 2*D1, 2*(D2*D2 - D3*D1), D1*q, t, s);
+ }
+ return SkCubicType::kLoop;
+ } else { // Cusp.
+ if (t && s) {
+ write_cubic_inflection_roots(D2, 2*D1, D2, 2*D1, t, s);
+ }
+ return SkCubicType::kLocalCusp;
+ }
+ } else {
+ if (0 != D2) { // Cusp at T=infinity.
+ if (t && s) {
+ write_cubic_inflection_roots(D3, 3*D2, 1, 0, t, s); // T1=infinity.
+ }
+ return SkCubicType::kCuspAtInfinity;
+ } else { // Degenerate.
+ if (t && s) {
+ write_cubic_inflection_roots(1, 0, 1, 0, t, s); // T0=T1=infinity.
+ }
+ return 0 != D3 ? SkCubicType::kQuadratic : SkCubicType::kLineOrPoint;
+ }
+ }
+}
+
+template <typename T> void bubble_sort(T array[], int count) {
+ for (int i = count - 1; i > 0; --i)
+ for (int j = i; j > 0; --j)
+ if (array[j] < array[j-1])
+ {
+ T tmp(array[j]);
+ array[j] = array[j-1];
+ array[j-1] = tmp;
+ }
+}
+
+/**
+ * Given an array and count, remove all pair-wise duplicates from the array,
+ * keeping the existing sorting, and return the new count
+ */
+static int collaps_duplicates(SkScalar array[], int count) {
+ for (int n = count; n > 1; --n) {
+ if (array[0] == array[1]) {
+ for (int i = 1; i < n; ++i) {
+ array[i - 1] = array[i];
+ }
+ count -= 1;
+ } else {
+ array += 1;
+ }
+ }
+ return count;
+}
+
+#ifdef SK_DEBUG
+
+#define TEST_COLLAPS_ENTRY(array) array, std::size(array)
+
+static void test_collaps_duplicates() {
+ static bool gOnce;
+ if (gOnce) { return; }
+ gOnce = true;
+ const SkScalar src0[] = { 0 };
+ const SkScalar src1[] = { 0, 0 };
+ const SkScalar src2[] = { 0, 1 };
+ const SkScalar src3[] = { 0, 0, 0 };
+ const SkScalar src4[] = { 0, 0, 1 };
+ const SkScalar src5[] = { 0, 1, 1 };
+ const SkScalar src6[] = { 0, 1, 2 };
+ const struct {
+ const SkScalar* fData;
+ int fCount;
+ int fCollapsedCount;
+ } data[] = {
+ { TEST_COLLAPS_ENTRY(src0), 1 },
+ { TEST_COLLAPS_ENTRY(src1), 1 },
+ { TEST_COLLAPS_ENTRY(src2), 2 },
+ { TEST_COLLAPS_ENTRY(src3), 1 },
+ { TEST_COLLAPS_ENTRY(src4), 2 },
+ { TEST_COLLAPS_ENTRY(src5), 2 },
+ { TEST_COLLAPS_ENTRY(src6), 3 },
+ };
+ for (size_t i = 0; i < std::size(data); ++i) {
+ SkScalar dst[3];
+ memcpy(dst, data[i].fData, data[i].fCount * sizeof(dst[0]));
+ int count = collaps_duplicates(dst, data[i].fCount);
+ SkASSERT(data[i].fCollapsedCount == count);
+ for (int j = 1; j < count; ++j) {
+ SkASSERT(dst[j-1] < dst[j]);
+ }
+ }
+}
+#endif
+
+static SkScalar SkScalarCubeRoot(SkScalar x) {
+ return SkScalarPow(x, 0.3333333f);
+}
+
+/* Solve coeff(t) == 0, returning the number of roots that
+ lie withing 0 < t < 1.
+ coeff[0]t^3 + coeff[1]t^2 + coeff[2]t + coeff[3]
+
+ Eliminates repeated roots (so that all tValues are distinct, and are always
+ in increasing order.
+*/
+static int solve_cubic_poly(const SkScalar coeff[4], SkScalar tValues[3]) {
+ if (SkScalarNearlyZero(coeff[0])) { // we're just a quadratic
+ return SkFindUnitQuadRoots(coeff[1], coeff[2], coeff[3], tValues);
+ }
+
+ SkScalar a, b, c, Q, R;
+
+ {
+ SkASSERT(coeff[0] != 0);
+
+ SkScalar inva = SkScalarInvert(coeff[0]);
+ a = coeff[1] * inva;
+ b = coeff[2] * inva;
+ c = coeff[3] * inva;
+ }
+ Q = (a*a - b*3) / 9;
+ R = (2*a*a*a - 9*a*b + 27*c) / 54;
+
+ SkScalar Q3 = Q * Q * Q;
+ SkScalar R2MinusQ3 = R * R - Q3;
+ SkScalar adiv3 = a / 3;
+
+ if (R2MinusQ3 < 0) { // we have 3 real roots
+ // the divide/root can, due to finite precisions, be slightly outside of -1...1
+ SkScalar theta = SkScalarACos(SkTPin(R / SkScalarSqrt(Q3), -1.0f, 1.0f));
+ SkScalar neg2RootQ = -2 * SkScalarSqrt(Q);
+
+ tValues[0] = SkTPin(neg2RootQ * SkScalarCos(theta/3) - adiv3, 0.0f, 1.0f);
+ tValues[1] = SkTPin(neg2RootQ * SkScalarCos((theta + 2*SK_ScalarPI)/3) - adiv3, 0.0f, 1.0f);
+ tValues[2] = SkTPin(neg2RootQ * SkScalarCos((theta - 2*SK_ScalarPI)/3) - adiv3, 0.0f, 1.0f);
+ SkDEBUGCODE(test_collaps_duplicates();)
+
+ // now sort the roots
+ bubble_sort(tValues, 3);
+ return collaps_duplicates(tValues, 3);
+ } else { // we have 1 real root
+ SkScalar A = SkScalarAbs(R) + SkScalarSqrt(R2MinusQ3);
+ A = SkScalarCubeRoot(A);
+ if (R > 0) {
+ A = -A;
+ }
+ if (A != 0) {
+ A += Q / A;
+ }
+ tValues[0] = SkTPin(A - adiv3, 0.0f, 1.0f);
+ return 1;
+ }
+}
+
+/* Looking for F' dot F'' == 0
+
+ A = b - a
+ B = c - 2b + a
+ C = d - 3c + 3b - a
+
+ F' = 3Ct^2 + 6Bt + 3A
+ F'' = 6Ct + 6B
+
+ F' dot F'' -> CCt^3 + 3BCt^2 + (2BB + CA)t + AB
+*/
+static void formulate_F1DotF2(const SkScalar src[], SkScalar coeff[4]) {
+ SkScalar a = src[2] - src[0];
+ SkScalar b = src[4] - 2 * src[2] + src[0];
+ SkScalar c = src[6] + 3 * (src[2] - src[4]) - src[0];
+
+ coeff[0] = c * c;
+ coeff[1] = 3 * b * c;
+ coeff[2] = 2 * b * b + c * a;
+ coeff[3] = a * b;
+}
+
+/* Looking for F' dot F'' == 0
+
+ A = b - a
+ B = c - 2b + a
+ C = d - 3c + 3b - a
+
+ F' = 3Ct^2 + 6Bt + 3A
+ F'' = 6Ct + 6B
+
+ F' dot F'' -> CCt^3 + 3BCt^2 + (2BB + CA)t + AB
+*/
+int SkFindCubicMaxCurvature(const SkPoint src[4], SkScalar tValues[3]) {
+ SkScalar coeffX[4], coeffY[4];
+ int i;
+
+ formulate_F1DotF2(&src[0].fX, coeffX);
+ formulate_F1DotF2(&src[0].fY, coeffY);
+
+ for (i = 0; i < 4; i++) {
+ coeffX[i] += coeffY[i];
+ }
+
+ int numRoots = solve_cubic_poly(coeffX, tValues);
+ // now remove extrema where the curvature is zero (mins)
+ // !!!! need a test for this !!!!
+ return numRoots;
+}
+
+int SkChopCubicAtMaxCurvature(const SkPoint src[4], SkPoint dst[13],
+ SkScalar tValues[3]) {
+ SkScalar t_storage[3];
+
+ if (tValues == nullptr) {
+ tValues = t_storage;
+ }
+
+ SkScalar roots[3];
+ int rootCount = SkFindCubicMaxCurvature(src, roots);
+
+ // Throw out values not inside 0..1.
+ int count = 0;
+ for (int i = 0; i < rootCount; ++i) {
+ if (0 < roots[i] && roots[i] < 1) {
+ tValues[count++] = roots[i];
+ }
+ }
+
+ if (dst) {
+ if (count == 0) {
+ memcpy(dst, src, 4 * sizeof(SkPoint));
+ } else {
+ SkChopCubicAt(src, dst, tValues, count);
+ }
+ }
+ return count + 1;
+}
+
+// Returns a constant proportional to the dimensions of the cubic.
+// Constant found through experimentation -- maybe there's a better way....
+static SkScalar calc_cubic_precision(const SkPoint src[4]) {
+ return (SkPointPriv::DistanceToSqd(src[1], src[0]) + SkPointPriv::DistanceToSqd(src[2], src[1])
+ + SkPointPriv::DistanceToSqd(src[3], src[2])) * 1e-8f;
+}
+
+// Returns true if both points src[testIndex], src[testIndex+1] are in the same half plane defined
+// by the line segment src[lineIndex], src[lineIndex+1].
+static bool on_same_side(const SkPoint src[4], int testIndex, int lineIndex) {
+ SkPoint origin = src[lineIndex];
+ SkVector line = src[lineIndex + 1] - origin;
+ SkScalar crosses[2];
+ for (int index = 0; index < 2; ++index) {
+ SkVector testLine = src[testIndex + index] - origin;
+ crosses[index] = line.cross(testLine);
+ }
+ return crosses[0] * crosses[1] >= 0;
+}
+
+// Return location (in t) of cubic cusp, if there is one.
+// Note that classify cubic code does not reliably return all cusp'd cubics, so
+// it is not called here.
+SkScalar SkFindCubicCusp(const SkPoint src[4]) {
+ // When the adjacent control point matches the end point, it behaves as if
+ // the cubic has a cusp: there's a point of max curvature where the derivative
+ // goes to zero. Ideally, this would be where t is zero or one, but math
+ // error makes not so. It is not uncommon to create cubics this way; skip them.
+ if (src[0] == src[1]) {
+ return -1;
+ }
+ if (src[2] == src[3]) {
+ return -1;
+ }
+ // Cubics only have a cusp if the line segments formed by the control and end points cross.
+ // Detect crossing if line ends are on opposite sides of plane formed by the other line.
+ if (on_same_side(src, 0, 2) || on_same_side(src, 2, 0)) {
+ return -1;
+ }
+ // Cubics may have multiple points of maximum curvature, although at most only
+ // one is a cusp.
+ SkScalar maxCurvature[3];
+ int roots = SkFindCubicMaxCurvature(src, maxCurvature);
+ for (int index = 0; index < roots; ++index) {
+ SkScalar testT = maxCurvature[index];
+ if (0 >= testT || testT >= 1) { // no need to consider max curvature on the end
+ continue;
+ }
+ // A cusp is at the max curvature, and also has a derivative close to zero.
+ // Choose the 'close to zero' meaning by comparing the derivative length
+ // with the overall cubic size.
+ SkVector dPt = eval_cubic_derivative(src, testT);
+ SkScalar dPtMagnitude = SkPointPriv::LengthSqd(dPt);
+ SkScalar precision = calc_cubic_precision(src);
+ if (dPtMagnitude < precision) {
+ // All three max curvature t values may be close to the cusp;
+ // return the first one.
+ return testT;
+ }
+ }
+ return -1;
+}
+
+static bool close_enough_to_zero(double x) {
+ return std::fabs(x) < 0.00001;
+}
+
+static bool first_axis_intersection(const double coefficients[8], bool yDirection,
+ double axisIntercept, double* solution) {
+ auto [A, B, C, D] = SkBezierCubic::ConvertToPolynomial(coefficients, yDirection);
+ D -= axisIntercept;
+ double roots[3] = {0, 0, 0};
+ int count = SkCubics::RootsValidT(A, B, C, D, roots);
+ if (count == 0) {
+ return false;
+ }
+ // Verify that at least one of the roots is accurate.
+ for (int i = 0; i < count; i++) {
+ if (close_enough_to_zero(SkCubics::EvalAt(A, B, C, D, roots[i]))) {
+ *solution = roots[i];
+ return true;
+ }
+ }
+ // None of the roots returned by our normal cubic solver were correct enough
+ // (e.g. https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=55732)
+ // So we need to fallback to a more accurate solution.
+ count = SkCubics::BinarySearchRootsValidT(A, B, C, D, roots);
+ if (count == 0) {
+ return false;
+ }
+ for (int i = 0; i < count; i++) {
+ if (close_enough_to_zero(SkCubics::EvalAt(A, B, C, D, roots[i]))) {
+ *solution = roots[i];
+ return true;
+ }
+ }
+ return false;
+}
+
+bool SkChopMonoCubicAtY(const SkPoint src[4], SkScalar y, SkPoint dst[7]) {
+ double coefficients[8] = {src[0].fX, src[0].fY, src[1].fX, src[1].fY,
+ src[2].fX, src[2].fY, src[3].fX, src[3].fY};
+ double solution = 0;
+ if (first_axis_intersection(coefficients, true, y, &solution)) {
+ double cubicPair[14];
+ SkBezierCubic::Subdivide(coefficients, solution, cubicPair);
+ for (int i = 0; i < 7; i ++) {
+ dst[i].fX = sk_double_to_float(cubicPair[i*2]);
+ dst[i].fY = sk_double_to_float(cubicPair[i*2 + 1]);
+ }
+ return true;
+ }
+ return false;
+}
+
+bool SkChopMonoCubicAtX(const SkPoint src[4], SkScalar x, SkPoint dst[7]) {
+ double coefficients[8] = {src[0].fX, src[0].fY, src[1].fX, src[1].fY,
+ src[2].fX, src[2].fY, src[3].fX, src[3].fY};
+ double solution = 0;
+ if (first_axis_intersection(coefficients, false, x, &solution)) {
+ double cubicPair[14];
+ SkBezierCubic::Subdivide(coefficients, solution, cubicPair);
+ for (int i = 0; i < 7; i ++) {
+ dst[i].fX = sk_double_to_float(cubicPair[i*2]);
+ dst[i].fY = sk_double_to_float(cubicPair[i*2 + 1]);
+ }
+ return true;
+ }
+ return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// NURB representation for conics. Helpful explanations at:
+//
+// http://citeseerx.ist.psu.edu/viewdoc/
+// download?doi=10.1.1.44.5740&rep=rep1&type=ps
+// and
+// http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/NURBS/RB-conics.html
+//
+// F = (A (1 - t)^2 + C t^2 + 2 B (1 - t) t w)
+// ------------------------------------------
+// ((1 - t)^2 + t^2 + 2 (1 - t) t w)
+//
+// = {t^2 (P0 + P2 - 2 P1 w), t (-2 P0 + 2 P1 w), P0}
+// ------------------------------------------------
+// {t^2 (2 - 2 w), t (-2 + 2 w), 1}
+//
+
+// F' = 2 (C t (1 + t (-1 + w)) - A (-1 + t) (t (-1 + w) - w) + B (1 - 2 t) w)
+//
+// t^2 : (2 P0 - 2 P2 - 2 P0 w + 2 P2 w)
+// t^1 : (-2 P0 + 2 P2 + 4 P0 w - 4 P1 w)
+// t^0 : -2 P0 w + 2 P1 w
+//
+// We disregard magnitude, so we can freely ignore the denominator of F', and
+// divide the numerator by 2
+//
+// coeff[0] for t^2
+// coeff[1] for t^1
+// coeff[2] for t^0
+//
+static void conic_deriv_coeff(const SkScalar src[],
+ SkScalar w,
+ SkScalar coeff[3]) {
+ const SkScalar P20 = src[4] - src[0];
+ const SkScalar P10 = src[2] - src[0];
+ const SkScalar wP10 = w * P10;
+ coeff[0] = w * P20 - P20;
+ coeff[1] = P20 - 2 * wP10;
+ coeff[2] = wP10;
+}
+
+static bool conic_find_extrema(const SkScalar src[], SkScalar w, SkScalar* t) {
+ SkScalar coeff[3];
+ conic_deriv_coeff(src, w, coeff);
+
+ SkScalar tValues[2];
+ int roots = SkFindUnitQuadRoots(coeff[0], coeff[1], coeff[2], tValues);
+ SkASSERT(0 == roots || 1 == roots);
+
+ if (1 == roots) {
+ *t = tValues[0];
+ return true;
+ }
+ return false;
+}
+
+// We only interpolate one dimension at a time (the first, at +0, +3, +6).
+static void p3d_interp(const SkScalar src[7], SkScalar dst[7], SkScalar t) {
+ SkScalar ab = SkScalarInterp(src[0], src[3], t);
+ SkScalar bc = SkScalarInterp(src[3], src[6], t);
+ dst[0] = ab;
+ dst[3] = SkScalarInterp(ab, bc, t);
+ dst[6] = bc;
+}
+
+static void ratquad_mapTo3D(const SkPoint src[3], SkScalar w, SkPoint3 dst[3]) {
+ dst[0].set(src[0].fX * 1, src[0].fY * 1, 1);
+ dst[1].set(src[1].fX * w, src[1].fY * w, w);
+ dst[2].set(src[2].fX * 1, src[2].fY * 1, 1);
+}
+
+static SkPoint project_down(const SkPoint3& src) {
+ return {src.fX / src.fZ, src.fY / src.fZ};
+}
+
+// return false if infinity or NaN is generated; caller must check
+bool SkConic::chopAt(SkScalar t, SkConic dst[2]) const {
+ SkPoint3 tmp[3], tmp2[3];
+
+ ratquad_mapTo3D(fPts, fW, tmp);
+
+ p3d_interp(&tmp[0].fX, &tmp2[0].fX, t);
+ p3d_interp(&tmp[0].fY, &tmp2[0].fY, t);
+ p3d_interp(&tmp[0].fZ, &tmp2[0].fZ, t);
+
+ dst[0].fPts[0] = fPts[0];
+ dst[0].fPts[1] = project_down(tmp2[0]);
+ dst[0].fPts[2] = project_down(tmp2[1]); dst[1].fPts[0] = dst[0].fPts[2];
+ dst[1].fPts[1] = project_down(tmp2[2]);
+ dst[1].fPts[2] = fPts[2];
+
+ // to put in "standard form", where w0 and w2 are both 1, we compute the
+ // new w1 as sqrt(w1*w1/w0*w2)
+ // or
+ // w1 /= sqrt(w0*w2)
+ //
+ // However, in our case, we know that for dst[0]:
+ // w0 == 1, and for dst[1], w2 == 1
+ //
+ SkScalar root = SkScalarSqrt(tmp2[1].fZ);
+ dst[0].fW = tmp2[0].fZ / root;
+ dst[1].fW = tmp2[2].fZ / root;
+ SkASSERT(sizeof(dst[0]) == sizeof(SkScalar) * 7);
+ SkASSERT(0 == offsetof(SkConic, fPts[0].fX));
+ return SkScalarsAreFinite(&dst[0].fPts[0].fX, 7 * 2);
+}
+
+void SkConic::chopAt(SkScalar t1, SkScalar t2, SkConic* dst) const {
+ if (0 == t1 || 1 == t2) {
+ if (0 == t1 && 1 == t2) {
+ *dst = *this;
+ return;
+ } else {
+ SkConic pair[2];
+ if (this->chopAt(t1 ? t1 : t2, pair)) {
+ *dst = pair[SkToBool(t1)];
+ return;
+ }
+ }
+ }
+ SkConicCoeff coeff(*this);
+ float2 tt1(t1);
+ float2 aXY = coeff.fNumer.eval(tt1);
+ float2 aZZ = coeff.fDenom.eval(tt1);
+ float2 midTT((t1 + t2) / 2);
+ float2 dXY = coeff.fNumer.eval(midTT);
+ float2 dZZ = coeff.fDenom.eval(midTT);
+ float2 tt2(t2);
+ float2 cXY = coeff.fNumer.eval(tt2);
+ float2 cZZ = coeff.fDenom.eval(tt2);
+ float2 bXY = times_2(dXY) - (aXY + cXY) * 0.5f;
+ float2 bZZ = times_2(dZZ) - (aZZ + cZZ) * 0.5f;
+ dst->fPts[0] = to_point(aXY / aZZ);
+ dst->fPts[1] = to_point(bXY / bZZ);
+ dst->fPts[2] = to_point(cXY / cZZ);
+ float2 ww = bZZ / sqrt(aZZ * cZZ);
+ dst->fW = ww[0];
+}
+
+SkPoint SkConic::evalAt(SkScalar t) const {
+ return to_point(SkConicCoeff(*this).eval(t));
+}
+
+SkVector SkConic::evalTangentAt(SkScalar t) const {
+ // The derivative equation returns a zero tangent vector when t is 0 or 1,
+ // and the control point is equal to the end point.
+ // In this case, use the conic endpoints to compute the tangent.
+ if ((t == 0 && fPts[0] == fPts[1]) || (t == 1 && fPts[1] == fPts[2])) {
+ return fPts[2] - fPts[0];
+ }
+ float2 p0 = from_point(fPts[0]);
+ float2 p1 = from_point(fPts[1]);
+ float2 p2 = from_point(fPts[2]);
+ float2 ww(fW);
+
+ float2 p20 = p2 - p0;
+ float2 p10 = p1 - p0;
+
+ float2 C = ww * p10;
+ float2 A = ww * p20 - p20;
+ float2 B = p20 - C - C;
+
+ return to_vector(SkQuadCoeff(A, B, C).eval(t));
+}
+
+void SkConic::evalAt(SkScalar t, SkPoint* pt, SkVector* tangent) const {
+ SkASSERT(t >= 0 && t <= SK_Scalar1);
+
+ if (pt) {
+ *pt = this->evalAt(t);
+ }
+ if (tangent) {
+ *tangent = this->evalTangentAt(t);
+ }
+}
+
+static SkScalar subdivide_w_value(SkScalar w) {
+ return SkScalarSqrt(SK_ScalarHalf + w * SK_ScalarHalf);
+}
+
+void SkConic::chop(SkConic * SK_RESTRICT dst) const {
+ float2 scale = SkScalarInvert(SK_Scalar1 + fW);
+ SkScalar newW = subdivide_w_value(fW);
+
+ float2 p0 = from_point(fPts[0]);
+ float2 p1 = from_point(fPts[1]);
+ float2 p2 = from_point(fPts[2]);
+ float2 ww(fW);
+
+ float2 wp1 = ww * p1;
+ float2 m = (p0 + times_2(wp1) + p2) * scale * 0.5f;
+ SkPoint mPt = to_point(m);
+ if (!mPt.isFinite()) {
+ double w_d = fW;
+ double w_2 = w_d * 2;
+ double scale_half = 1 / (1 + w_d) * 0.5;
+ mPt.fX = SkDoubleToScalar((fPts[0].fX + w_2 * fPts[1].fX + fPts[2].fX) * scale_half);
+ mPt.fY = SkDoubleToScalar((fPts[0].fY + w_2 * fPts[1].fY + fPts[2].fY) * scale_half);
+ }
+ dst[0].fPts[0] = fPts[0];
+ dst[0].fPts[1] = to_point((p0 + wp1) * scale);
+ dst[0].fPts[2] = dst[1].fPts[0] = mPt;
+ dst[1].fPts[1] = to_point((wp1 + p2) * scale);
+ dst[1].fPts[2] = fPts[2];
+
+ dst[0].fW = dst[1].fW = newW;
+}
+
+/*
+ * "High order approximation of conic sections by quadratic splines"
+ * by Michael Floater, 1993
+ */
+#define AS_QUAD_ERROR_SETUP \
+ SkScalar a = fW - 1; \
+ SkScalar k = a / (4 * (2 + a)); \
+ SkScalar x = k * (fPts[0].fX - 2 * fPts[1].fX + fPts[2].fX); \
+ SkScalar y = k * (fPts[0].fY - 2 * fPts[1].fY + fPts[2].fY);
+
+void SkConic::computeAsQuadError(SkVector* err) const {
+ AS_QUAD_ERROR_SETUP
+ err->set(x, y);
+}
+
+bool SkConic::asQuadTol(SkScalar tol) const {
+ AS_QUAD_ERROR_SETUP
+ return (x * x + y * y) <= tol * tol;
+}
+
+// Limit the number of suggested quads to approximate a conic
+#define kMaxConicToQuadPOW2 5
+
+int SkConic::computeQuadPOW2(SkScalar tol) const {
+ if (tol < 0 || !SkScalarIsFinite(tol) || !SkPointPriv::AreFinite(fPts, 3)) {
+ return 0;
+ }
+
+ AS_QUAD_ERROR_SETUP
+
+ SkScalar error = SkScalarSqrt(x * x + y * y);
+ int pow2;
+ for (pow2 = 0; pow2 < kMaxConicToQuadPOW2; ++pow2) {
+ if (error <= tol) {
+ break;
+ }
+ error *= 0.25f;
+ }
+ // float version -- using ceil gives the same results as the above.
+ if ((false)) {
+ SkScalar err = SkScalarSqrt(x * x + y * y);
+ if (err <= tol) {
+ return 0;
+ }
+ SkScalar tol2 = tol * tol;
+ if (tol2 == 0) {
+ return kMaxConicToQuadPOW2;
+ }
+ SkScalar fpow2 = SkScalarLog2((x * x + y * y) / tol2) * 0.25f;
+ int altPow2 = SkScalarCeilToInt(fpow2);
+ if (altPow2 != pow2) {
+ SkDebugf("pow2 %d altPow2 %d fbits %g err %g tol %g\n", pow2, altPow2, fpow2, err, tol);
+ }
+ pow2 = altPow2;
+ }
+ return pow2;
+}
+
+// This was originally developed and tested for pathops: see SkOpTypes.h
+// returns true if (a <= b <= c) || (a >= b >= c)
+static bool between(SkScalar a, SkScalar b, SkScalar c) {
+ return (a - b) * (c - b) <= 0;
+}
+
+static SkPoint* subdivide(const SkConic& src, SkPoint pts[], int level) {
+ SkASSERT(level >= 0);
+
+ if (0 == level) {
+ memcpy(pts, &src.fPts[1], 2 * sizeof(SkPoint));
+ return pts + 2;
+ } else {
+ SkConic dst[2];
+ src.chop(dst);
+ const SkScalar startY = src.fPts[0].fY;
+ SkScalar endY = src.fPts[2].fY;
+ if (between(startY, src.fPts[1].fY, endY)) {
+ // If the input is monotonic and the output is not, the scan converter hangs.
+ // Ensure that the chopped conics maintain their y-order.
+ SkScalar midY = dst[0].fPts[2].fY;
+ if (!between(startY, midY, endY)) {
+ // If the computed midpoint is outside the ends, move it to the closer one.
+ SkScalar closerY = SkTAbs(midY - startY) < SkTAbs(midY - endY) ? startY : endY;
+ dst[0].fPts[2].fY = dst[1].fPts[0].fY = closerY;
+ }
+ if (!between(startY, dst[0].fPts[1].fY, dst[0].fPts[2].fY)) {
+ // If the 1st control is not between the start and end, put it at the start.
+ // This also reduces the quad to a line.
+ dst[0].fPts[1].fY = startY;
+ }
+ if (!between(dst[1].fPts[0].fY, dst[1].fPts[1].fY, endY)) {
+ // If the 2nd control is not between the start and end, put it at the end.
+ // This also reduces the quad to a line.
+ dst[1].fPts[1].fY = endY;
+ }
+ // Verify that all five points are in order.
+ SkASSERT(between(startY, dst[0].fPts[1].fY, dst[0].fPts[2].fY));
+ SkASSERT(between(dst[0].fPts[1].fY, dst[0].fPts[2].fY, dst[1].fPts[1].fY));
+ SkASSERT(between(dst[0].fPts[2].fY, dst[1].fPts[1].fY, endY));
+ }
+ --level;
+ pts = subdivide(dst[0], pts, level);
+ return subdivide(dst[1], pts, level);
+ }
+}
+
+int SkConic::chopIntoQuadsPOW2(SkPoint pts[], int pow2) const {
+ SkASSERT(pow2 >= 0);
+ *pts = fPts[0];
+ SkDEBUGCODE(SkPoint* endPts);
+ if (pow2 == kMaxConicToQuadPOW2) { // If an extreme weight generates many quads ...
+ SkConic dst[2];
+ this->chop(dst);
+ // check to see if the first chop generates a pair of lines
+ if (SkPointPriv::EqualsWithinTolerance(dst[0].fPts[1], dst[0].fPts[2]) &&
+ SkPointPriv::EqualsWithinTolerance(dst[1].fPts[0], dst[1].fPts[1])) {
+ pts[1] = pts[2] = pts[3] = dst[0].fPts[1]; // set ctrl == end to make lines
+ pts[4] = dst[1].fPts[2];
+ pow2 = 1;
+ SkDEBUGCODE(endPts = &pts[5]);
+ goto commonFinitePtCheck;
+ }
+ }
+ SkDEBUGCODE(endPts = ) subdivide(*this, pts + 1, pow2);
+commonFinitePtCheck:
+ const int quadCount = 1 << pow2;
+ const int ptCount = 2 * quadCount + 1;
+ SkASSERT(endPts - pts == ptCount);
+ if (!SkPointPriv::AreFinite(pts, ptCount)) {
+ // if we generated a non-finite, pin ourselves to the middle of the hull,
+ // as our first and last are already on the first/last pts of the hull.
+ for (int i = 1; i < ptCount - 1; ++i) {
+ pts[i] = fPts[1];
+ }
+ }
+ return 1 << pow2;
+}
+
+float SkConic::findMidTangent() const {
+ // Tangents point in the direction of increasing T, so tan0 and -tan1 both point toward the
+ // midtangent. The bisector of tan0 and -tan1 is orthogonal to the midtangent:
+ //
+ // bisector dot midtangent = 0
+ //
+ SkVector tan0 = fPts[1] - fPts[0];
+ SkVector tan1 = fPts[2] - fPts[1];
+ SkVector bisector = SkFindBisector(tan0, -tan1);
+
+ // Start by finding the tangent function's power basis coefficients. These define a tangent
+ // direction (scaled by some uniform value) as:
+ // |T^2|
+ // Tangent_Direction(T) = dx,dy = |A B C| * |T |
+ // |. . .| |1 |
+ //
+ // The derivative of a conic has a cumbersome order-4 denominator. However, this isn't necessary
+ // if we are only interested in a vector in the same *direction* as a given tangent line. Since
+ // the denominator scales dx and dy uniformly, we can throw it out completely after evaluating
+ // the derivative with the standard quotient rule. This leaves us with a simpler quadratic
+ // function that we use to find a tangent.
+ SkVector A = (fPts[2] - fPts[0]) * (fW - 1);
+ SkVector B = (fPts[2] - fPts[0]) - (fPts[1] - fPts[0]) * (fW*2);
+ SkVector C = (fPts[1] - fPts[0]) * fW;
+
+ // Now solve for "bisector dot midtangent = 0":
+ //
+ // |T^2|
+ // bisector * |A B C| * |T | = 0
+ // |. . .| |1 |
+ //
+ float a = bisector.dot(A);
+ float b = bisector.dot(B);
+ float c = bisector.dot(C);
+ return solve_quadratic_equation_for_midtangent(a, b, c);
+}
+
+bool SkConic::findXExtrema(SkScalar* t) const {
+ return conic_find_extrema(&fPts[0].fX, fW, t);
+}
+
+bool SkConic::findYExtrema(SkScalar* t) const {
+ return conic_find_extrema(&fPts[0].fY, fW, t);
+}
+
+bool SkConic::chopAtXExtrema(SkConic dst[2]) const {
+ SkScalar t;
+ if (this->findXExtrema(&t)) {
+ if (!this->chopAt(t, dst)) {
+ // if chop can't return finite values, don't chop
+ return false;
+ }
+ // now clean-up the middle, since we know t was meant to be at
+ // an X-extrema
+ SkScalar value = dst[0].fPts[2].fX;
+ dst[0].fPts[1].fX = value;
+ dst[1].fPts[0].fX = value;
+ dst[1].fPts[1].fX = value;
+ return true;
+ }
+ return false;
+}
+
+bool SkConic::chopAtYExtrema(SkConic dst[2]) const {
+ SkScalar t;
+ if (this->findYExtrema(&t)) {
+ if (!this->chopAt(t, dst)) {
+ // if chop can't return finite values, don't chop
+ return false;
+ }
+ // now clean-up the middle, since we know t was meant to be at
+ // an Y-extrema
+ SkScalar value = dst[0].fPts[2].fY;
+ dst[0].fPts[1].fY = value;
+ dst[1].fPts[0].fY = value;
+ dst[1].fPts[1].fY = value;
+ return true;
+ }
+ return false;
+}
+
+void SkConic::computeTightBounds(SkRect* bounds) const {
+ SkPoint pts[4];
+ pts[0] = fPts[0];
+ pts[1] = fPts[2];
+ int count = 2;
+
+ SkScalar t;
+ if (this->findXExtrema(&t)) {
+ this->evalAt(t, &pts[count++]);
+ }
+ if (this->findYExtrema(&t)) {
+ this->evalAt(t, &pts[count++]);
+ }
+ bounds->setBounds(pts, count);
+}
+
+void SkConic::computeFastBounds(SkRect* bounds) const {
+ bounds->setBounds(fPts, 3);
+}
+
+#if 0 // unimplemented
+bool SkConic::findMaxCurvature(SkScalar* t) const {
+ // TODO: Implement me
+ return false;
+}
+#endif
+
+SkScalar SkConic::TransformW(const SkPoint pts[3], SkScalar w, const SkMatrix& matrix) {
+ if (!matrix.hasPerspective()) {
+ return w;
+ }
+
+ SkPoint3 src[3], dst[3];
+
+ ratquad_mapTo3D(pts, w, src);
+
+ matrix.mapHomogeneousPoints(dst, src, 3);
+
+ // w' = sqrt(w1*w1/w0*w2)
+ // use doubles temporarily, to handle small numer/denom
+ double w0 = dst[0].fZ;
+ double w1 = dst[1].fZ;
+ double w2 = dst[2].fZ;
+ return sk_double_to_float(sqrt(sk_ieee_double_divide(w1 * w1, w0 * w2)));
+}
+
+int SkConic::BuildUnitArc(const SkVector& uStart, const SkVector& uStop, SkRotationDirection dir,
+ const SkMatrix* userMatrix, SkConic dst[kMaxConicsForArc]) {
+ // rotate by x,y so that uStart is (1.0)
+ SkScalar x = SkPoint::DotProduct(uStart, uStop);
+ SkScalar y = SkPoint::CrossProduct(uStart, uStop);
+
+ SkScalar absY = SkScalarAbs(y);
+
+ // check for (effectively) coincident vectors
+ // this can happen if our angle is nearly 0 or nearly 180 (y == 0)
+ // ... we use the dot-prod to distinguish between 0 and 180 (x > 0)
+ if (absY <= SK_ScalarNearlyZero && x > 0 && ((y >= 0 && kCW_SkRotationDirection == dir) ||
+ (y <= 0 && kCCW_SkRotationDirection == dir))) {
+ return 0;
+ }
+
+ if (dir == kCCW_SkRotationDirection) {
+ y = -y;
+ }
+
+ // We decide to use 1-conic per quadrant of a circle. What quadrant does [xy] lie in?
+ // 0 == [0 .. 90)
+ // 1 == [90 ..180)
+ // 2 == [180..270)
+ // 3 == [270..360)
+ //
+ int quadrant = 0;
+ if (0 == y) {
+ quadrant = 2; // 180
+ SkASSERT(SkScalarAbs(x + SK_Scalar1) <= SK_ScalarNearlyZero);
+ } else if (0 == x) {
+ SkASSERT(absY - SK_Scalar1 <= SK_ScalarNearlyZero);
+ quadrant = y > 0 ? 1 : 3; // 90 : 270
+ } else {
+ if (y < 0) {
+ quadrant += 2;
+ }
+ if ((x < 0) != (y < 0)) {
+ quadrant += 1;
+ }
+ }
+
+ const SkPoint quadrantPts[] = {
+ { 1, 0 }, { 1, 1 }, { 0, 1 }, { -1, 1 }, { -1, 0 }, { -1, -1 }, { 0, -1 }, { 1, -1 }
+ };
+ const SkScalar quadrantWeight = SK_ScalarRoot2Over2;
+
+ int conicCount = quadrant;
+ for (int i = 0; i < conicCount; ++i) {
+ dst[i].set(&quadrantPts[i * 2], quadrantWeight);
+ }
+
+ // Now compute any remaing (sub-90-degree) arc for the last conic
+ const SkPoint finalP = { x, y };
+ const SkPoint& lastQ = quadrantPts[quadrant * 2]; // will already be a unit-vector
+ const SkScalar dot = SkVector::DotProduct(lastQ, finalP);
+ if (!SkScalarIsFinite(dot)) {
+ return 0;
+ }
+ SkASSERT(0 <= dot && dot <= SK_Scalar1 + SK_ScalarNearlyZero);
+
+ if (dot < 1) {
+ SkVector offCurve = { lastQ.x() + x, lastQ.y() + y };
+ // compute the bisector vector, and then rescale to be the off-curve point.
+ // we compute its length from cos(theta/2) = length / 1, using half-angle identity we get
+ // length = sqrt(2 / (1 + cos(theta)). We already have cos() when to computed the dot.
+ // This is nice, since our computed weight is cos(theta/2) as well!
+ //
+ const SkScalar cosThetaOver2 = SkScalarSqrt((1 + dot) / 2);
+ offCurve.setLength(SkScalarInvert(cosThetaOver2));
+ if (!SkPointPriv::EqualsWithinTolerance(lastQ, offCurve)) {
+ dst[conicCount].set(lastQ, offCurve, finalP, cosThetaOver2);
+ conicCount += 1;
+ }
+ }
+
+ // now handle counter-clockwise and the initial unitStart rotation
+ SkMatrix matrix;
+ matrix.setSinCos(uStart.fY, uStart.fX);
+ if (dir == kCCW_SkRotationDirection) {
+ matrix.preScale(SK_Scalar1, -SK_Scalar1);
+ }
+ if (userMatrix) {
+ matrix.postConcat(*userMatrix);
+ }
+ for (int i = 0; i < conicCount; ++i) {
+ matrix.mapPoints(dst[i].fPts, 3);
+ }
+ return conicCount;
+}
diff --git a/gfx/skia/skia/src/core/SkGeometry.h b/gfx/skia/skia/src/core/SkGeometry.h
new file mode 100644
index 0000000000..e8c9a05ad1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGeometry.h
@@ -0,0 +1,543 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGeometry_DEFINED
+#define SkGeometry_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "src/base/SkVx.h"
+
+#include <cstring>
+
+class SkMatrix;
+struct SkRect;
+
+static inline skvx::float2 from_point(const SkPoint& point) {
+ return skvx::float2::Load(&point);
+}
+
+static inline SkPoint to_point(const skvx::float2& x) {
+ SkPoint point;
+ x.store(&point);
+ return point;
+}
+
+static skvx::float2 times_2(const skvx::float2& value) {
+ return value + value;
+}
+
+/** Given a quadratic equation Ax^2 + Bx + C = 0, return 0, 1, 2 roots for the
+ equation.
+*/
+int SkFindUnitQuadRoots(SkScalar A, SkScalar B, SkScalar C, SkScalar roots[2]);
+
+/** Measures the angle between two vectors, in the range [0, pi].
+*/
+float SkMeasureAngleBetweenVectors(SkVector, SkVector);
+
+/** Returns a new, arbitrarily scaled vector that bisects the given vectors. The returned bisector
+ will always point toward the interior of the provided vectors.
+*/
+SkVector SkFindBisector(SkVector, SkVector);
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPoint SkEvalQuadAt(const SkPoint src[3], SkScalar t);
+SkPoint SkEvalQuadTangentAt(const SkPoint src[3], SkScalar t);
+
+/** Set pt to the point on the src quadratic specified by t. t must be
+ 0 <= t <= 1.0
+*/
+void SkEvalQuadAt(const SkPoint src[3], SkScalar t, SkPoint* pt, SkVector* tangent = nullptr);
+
+/** Given a src quadratic bezier, chop it at the specified t value,
+ where 0 < t < 1, and return the two new quadratics in dst:
+ dst[0..2] and dst[2..4]
+*/
+void SkChopQuadAt(const SkPoint src[3], SkPoint dst[5], SkScalar t);
+
+/** Given a src quadratic bezier, chop it at the specified t == 1/2,
+ The new quads are returned in dst[0..2] and dst[2..4]
+*/
+void SkChopQuadAtHalf(const SkPoint src[3], SkPoint dst[5]);
+
+/** Measures the rotation of the given quadratic curve in radians.
+
+ Rotation is perhaps easiest described via a driving analogy: If you drive your car along the
+ curve from p0 to p2, then by the time you arrive at p2, how many radians will your car have
+ rotated? For a quadratic this is the same as the vector inside the tangents at the endpoints.
+
+ Quadratics can have rotations in the range [0, pi].
+*/
+inline float SkMeasureQuadRotation(const SkPoint pts[3]) {
+ return SkMeasureAngleBetweenVectors(pts[1] - pts[0], pts[2] - pts[1]);
+}
+
+/** Given a src quadratic bezier, returns the T value whose tangent angle is halfway between the
+ tangents at p0 and p3.
+*/
+float SkFindQuadMidTangent(const SkPoint src[3]);
+
+/** Given a src quadratic bezier, chop it at the tangent whose angle is halfway between the
+ tangents at p0 and p2. The new quads are returned in dst[0..2] and dst[2..4].
+*/
+inline void SkChopQuadAtMidTangent(const SkPoint src[3], SkPoint dst[5]) {
+ SkChopQuadAt(src, dst, SkFindQuadMidTangent(src));
+}
+
+/** Given the 3 coefficients for a quadratic bezier (either X or Y values), look
+ for extrema, and return the number of t-values that are found that represent
+ these extrema. If the quadratic has no extrema betwee (0..1) exclusive, the
+ function returns 0.
+ Returned count tValues[]
+ 0 ignored
+ 1 0 < tValues[0] < 1
+*/
+int SkFindQuadExtrema(SkScalar a, SkScalar b, SkScalar c, SkScalar tValues[1]);
+
+/** Given 3 points on a quadratic bezier, chop it into 1, 2 beziers such that
+ the resulting beziers are monotonic in Y. This is called by the scan converter.
+ Depending on what is returned, dst[] is treated as follows
+ 0 dst[0..2] is the original quad
+ 1 dst[0..2] and dst[2..4] are the two new quads
+*/
+int SkChopQuadAtYExtrema(const SkPoint src[3], SkPoint dst[5]);
+int SkChopQuadAtXExtrema(const SkPoint src[3], SkPoint dst[5]);
+
+/** Given 3 points on a quadratic bezier, if the point of maximum
+ curvature exists on the segment, returns the t value for this
+ point along the curve. Otherwise it will return a value of 0.
+*/
+SkScalar SkFindQuadMaxCurvature(const SkPoint src[3]);
+
+/** Given 3 points on a quadratic bezier, divide it into 2 quadratics
+ if the point of maximum curvature exists on the quad segment.
+ Depending on what is returned, dst[] is treated as follows
+ 1 dst[0..2] is the original quad
+ 2 dst[0..2] and dst[2..4] are the two new quads
+ If dst == null, it is ignored and only the count is returned.
+*/
+int SkChopQuadAtMaxCurvature(const SkPoint src[3], SkPoint dst[5]);
+
+/** Given 3 points on a quadratic bezier, use degree elevation to
+ convert it into the cubic fitting the same curve. The new cubic
+ curve is returned in dst[0..3].
+*/
+void SkConvertQuadToCubic(const SkPoint src[3], SkPoint dst[4]);
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** Set pt to the point on the src cubic specified by t. t must be
+ 0 <= t <= 1.0
+*/
+void SkEvalCubicAt(const SkPoint src[4], SkScalar t, SkPoint* locOrNull,
+ SkVector* tangentOrNull, SkVector* curvatureOrNull);
+
+/** Given a src cubic bezier, chop it at the specified t value,
+ where 0 <= t <= 1, and return the two new cubics in dst:
+ dst[0..3] and dst[3..6]
+*/
+void SkChopCubicAt(const SkPoint src[4], SkPoint dst[7], SkScalar t);
+
+/** Given a src cubic bezier, chop it at the specified t0 and t1 values,
+ where 0 <= t0 <= t1 <= 1, and return the three new cubics in dst:
+ dst[0..3], dst[3..6], and dst[6..9]
+*/
+void SkChopCubicAt(const SkPoint src[4], SkPoint dst[10], float t0, float t1);
+
+/** Given a src cubic bezier, chop it at the specified t values,
+ where 0 <= t0 <= t1 <= ... <= 1, and return the new cubics in dst:
+ dst[0..3],dst[3..6],...,dst[3*t_count..3*(t_count+1)]
+*/
+void SkChopCubicAt(const SkPoint src[4], SkPoint dst[], const SkScalar t[],
+ int t_count);
+
+/** Given a src cubic bezier, chop it at the specified t == 1/2,
+ The new cubics are returned in dst[0..3] and dst[3..6]
+*/
+void SkChopCubicAtHalf(const SkPoint src[4], SkPoint dst[7]);
+
+/** Given a cubic curve with no inflection points, this method measures the rotation in radians.
+
+ Rotation is perhaps easiest described via a driving analogy: If you drive your car along the
+ curve from p0 to p3, then by the time you arrive at p3, how many radians will your car have
+ rotated? This is not quite the same as the vector inside the tangents at the endpoints, even
+ without inflection, because the curve might rotate around the outside of the
+ tangents (>= 180 degrees) or the inside (<= 180 degrees).
+
+ Cubics can have rotations in the range [0, 2*pi].
+
+ NOTE: The caller must either call SkChopCubicAtInflections or otherwise prove that the provided
+ cubic has no inflection points prior to calling this method.
+*/
+float SkMeasureNonInflectCubicRotation(const SkPoint[4]);
+
+/** Given a src cubic bezier, returns the T value whose tangent angle is halfway between the
+ tangents at p0 and p3.
+*/
+float SkFindCubicMidTangent(const SkPoint src[4]);
+
+/** Given a src cubic bezier, chop it at the tangent whose angle is halfway between the
+ tangents at p0 and p3. The new cubics are returned in dst[0..3] and dst[3..6].
+
+ NOTE: 0- and 360-degree flat lines don't have single points of midtangent.
+ (tangent == midtangent at every point on these curves except the cusp points.)
+ If this is the case then we simply chop at a point which guarantees neither side rotates more
+ than 180 degrees.
+*/
+inline void SkChopCubicAtMidTangent(const SkPoint src[4], SkPoint dst[7]) {
+ SkChopCubicAt(src, dst, SkFindCubicMidTangent(src));
+}
+
+/** Given the 4 coefficients for a cubic bezier (either X or Y values), look
+ for extrema, and return the number of t-values that are found that represent
+ these extrema. If the cubic has no extrema betwee (0..1) exclusive, the
+ function returns 0.
+ Returned count tValues[]
+ 0 ignored
+ 1 0 < tValues[0] < 1
+ 2 0 < tValues[0] < tValues[1] < 1
+*/
+int SkFindCubicExtrema(SkScalar a, SkScalar b, SkScalar c, SkScalar d,
+ SkScalar tValues[2]);
+
+/** Given 4 points on a cubic bezier, chop it into 1, 2, 3 beziers such that
+ the resulting beziers are monotonic in Y. This is called by the scan converter.
+ Depending on what is returned, dst[] is treated as follows
+ 0 dst[0..3] is the original cubic
+ 1 dst[0..3] and dst[3..6] are the two new cubics
+ 2 dst[0..3], dst[3..6], dst[6..9] are the three new cubics
+ If dst == null, it is ignored and only the count is returned.
+*/
+int SkChopCubicAtYExtrema(const SkPoint src[4], SkPoint dst[10]);
+int SkChopCubicAtXExtrema(const SkPoint src[4], SkPoint dst[10]);
+
+/** Given a cubic bezier, return 0, 1, or 2 t-values that represent the
+ inflection points.
+*/
+int SkFindCubicInflections(const SkPoint src[4], SkScalar tValues[2]);
+
+/** Return 1 for no chop, 2 for having chopped the cubic at a single
+ inflection point, 3 for having chopped at 2 inflection points.
+ dst will hold the resulting 1, 2, or 3 cubics.
+*/
+int SkChopCubicAtInflections(const SkPoint src[4], SkPoint dst[10]);
+
+int SkFindCubicMaxCurvature(const SkPoint src[4], SkScalar tValues[3]);
+int SkChopCubicAtMaxCurvature(const SkPoint src[4], SkPoint dst[13],
+ SkScalar tValues[3] = nullptr);
+/** Returns t value of cusp if cubic has one; returns -1 otherwise.
+ */
+SkScalar SkFindCubicCusp(const SkPoint src[4]);
+
+/** Given a monotonically increasing or decreasing cubic bezier src, chop it
+ * where the X value is the specified value. The returned cubics will be in
+ * dst, sharing the middle point. That is, the first cubic is dst[0..3] and
+ * the second dst[3..6].
+ *
+ * If the cubic provided is *not* monotone, it will be chopped at the first
+ * time the curve has the specified X value.
+ *
+ * If the cubic never reaches the specified value, the function returns false.
+*/
+bool SkChopMonoCubicAtX(const SkPoint src[4], SkScalar x, SkPoint dst[7]);
+
+/** Given a monotonically increasing or decreasing cubic bezier src, chop it
+ * where the Y value is the specified value. The returned cubics will be in
+ * dst, sharing the middle point. That is, the first cubic is dst[0..3] and
+ * the second dst[3..6].
+ *
+ * If the cubic provided is *not* monotone, it will be chopped at the first
+ * time the curve has the specified Y value.
+ *
+ * If the cubic never reaches the specified value, the function returns false.
+*/
+bool SkChopMonoCubicAtY(const SkPoint src[4], SkScalar y, SkPoint dst[7]);
+
+enum class SkCubicType {
+ kSerpentine,
+ kLoop,
+ kLocalCusp, // Cusp at a non-infinite parameter value with an inflection at t=infinity.
+ kCuspAtInfinity, // Cusp with a cusp at t=infinity and a local inflection.
+ kQuadratic,
+ kLineOrPoint
+};
+
+static inline bool SkCubicIsDegenerate(SkCubicType type) {
+ switch (type) {
+ case SkCubicType::kSerpentine:
+ case SkCubicType::kLoop:
+ case SkCubicType::kLocalCusp:
+ case SkCubicType::kCuspAtInfinity:
+ return false;
+ case SkCubicType::kQuadratic:
+ case SkCubicType::kLineOrPoint:
+ return true;
+ }
+ SK_ABORT("Invalid SkCubicType");
+}
+
+static inline const char* SkCubicTypeName(SkCubicType type) {
+ switch (type) {
+ case SkCubicType::kSerpentine: return "kSerpentine";
+ case SkCubicType::kLoop: return "kLoop";
+ case SkCubicType::kLocalCusp: return "kLocalCusp";
+ case SkCubicType::kCuspAtInfinity: return "kCuspAtInfinity";
+ case SkCubicType::kQuadratic: return "kQuadratic";
+ case SkCubicType::kLineOrPoint: return "kLineOrPoint";
+ }
+ SK_ABORT("Invalid SkCubicType");
+}
+
+/** Returns the cubic classification.
+
+ t[],s[] are set to the two homogeneous parameter values at which points the lines L & M
+ intersect with K, sorted from smallest to largest and oriented so positive values of the
+ implicit are on the "left" side. For a serpentine curve they are the inflection points. For a
+ loop they are the double point. For a local cusp, they are both equal and denote the cusp point.
+ For a cusp at an infinite parameter value, one will be the local inflection point and the other
+ +inf (t,s = 1,0). If the curve is degenerate (i.e. quadratic or linear) they are both set to a
+ parameter value of +inf (t,s = 1,0).
+
+ d[] is filled with the cubic inflection function coefficients. See "Resolution Independent
+ Curve Rendering using Programmable Graphics Hardware", 4.2 Curve Categorization:
+
+ If the input points contain infinities or NaN, the return values are undefined.
+
+ https://www.microsoft.com/en-us/research/wp-content/uploads/2005/01/p1000-loop.pdf
+*/
+SkCubicType SkClassifyCubic(const SkPoint p[4], double t[2] = nullptr, double s[2] = nullptr,
+ double d[4] = nullptr);
+
+///////////////////////////////////////////////////////////////////////////////
+
+enum SkRotationDirection {
+ kCW_SkRotationDirection,
+ kCCW_SkRotationDirection
+};
+
+struct SkConic {
+ SkConic() {}
+ SkConic(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2, SkScalar w) {
+ fPts[0] = p0;
+ fPts[1] = p1;
+ fPts[2] = p2;
+ fW = w;
+ }
+ SkConic(const SkPoint pts[3], SkScalar w) {
+ memcpy(fPts, pts, sizeof(fPts));
+ fW = w;
+ }
+
+ SkPoint fPts[3];
+ SkScalar fW;
+
+ void set(const SkPoint pts[3], SkScalar w) {
+ memcpy(fPts, pts, 3 * sizeof(SkPoint));
+ fW = w;
+ }
+
+ void set(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2, SkScalar w) {
+ fPts[0] = p0;
+ fPts[1] = p1;
+ fPts[2] = p2;
+ fW = w;
+ }
+
+ /**
+ * Given a t-value [0...1] return its position and/or tangent.
+ * If pos is not null, return its position at the t-value.
+ * If tangent is not null, return its tangent at the t-value. NOTE the
+ * tangent value's length is arbitrary, and only its direction should
+ * be used.
+ */
+ void evalAt(SkScalar t, SkPoint* pos, SkVector* tangent = nullptr) const;
+ bool SK_WARN_UNUSED_RESULT chopAt(SkScalar t, SkConic dst[2]) const;
+ void chopAt(SkScalar t1, SkScalar t2, SkConic* dst) const;
+ void chop(SkConic dst[2]) const;
+
+ SkPoint evalAt(SkScalar t) const;
+ SkVector evalTangentAt(SkScalar t) const;
+
+ void computeAsQuadError(SkVector* err) const;
+ bool asQuadTol(SkScalar tol) const;
+
+ /**
+ * return the power-of-2 number of quads needed to approximate this conic
+ * with a sequence of quads. Will be >= 0.
+ */
+ int SK_SPI computeQuadPOW2(SkScalar tol) const;
+
+ /**
+ * Chop this conic into N quads, stored continguously in pts[], where
+ * N = 1 << pow2. The amount of storage needed is (1 + 2 * N)
+ */
+ int SK_SPI SK_WARN_UNUSED_RESULT chopIntoQuadsPOW2(SkPoint pts[], int pow2) const;
+
+ float findMidTangent() const;
+ bool findXExtrema(SkScalar* t) const;
+ bool findYExtrema(SkScalar* t) const;
+ bool chopAtXExtrema(SkConic dst[2]) const;
+ bool chopAtYExtrema(SkConic dst[2]) const;
+
+ void computeTightBounds(SkRect* bounds) const;
+ void computeFastBounds(SkRect* bounds) const;
+
+ /** Find the parameter value where the conic takes on its maximum curvature.
+ *
+ * @param t output scalar for max curvature. Will be unchanged if
+ * max curvature outside 0..1 range.
+ *
+ * @return true if max curvature found inside 0..1 range, false otherwise
+ */
+// bool findMaxCurvature(SkScalar* t) const; // unimplemented
+
+ static SkScalar TransformW(const SkPoint[3], SkScalar w, const SkMatrix&);
+
+ enum {
+ kMaxConicsForArc = 5
+ };
+ static int BuildUnitArc(const SkVector& start, const SkVector& stop, SkRotationDirection,
+ const SkMatrix*, SkConic conics[kMaxConicsForArc]);
+};
+
+// inline helpers are contained in a namespace to avoid external leakage to fragile SkVx members
+namespace { // NOLINT(google-build-namespaces)
+
+/**
+ * use for : eval(t) == A * t^2 + B * t + C
+ */
+struct SkQuadCoeff {
+ SkQuadCoeff() {}
+
+ SkQuadCoeff(const skvx::float2& A, const skvx::float2& B, const skvx::float2& C)
+ : fA(A)
+ , fB(B)
+ , fC(C)
+ {
+ }
+
+ SkQuadCoeff(const SkPoint src[3]) {
+ fC = from_point(src[0]);
+ auto P1 = from_point(src[1]);
+ auto P2 = from_point(src[2]);
+ fB = times_2(P1 - fC);
+ fA = P2 - times_2(P1) + fC;
+ }
+
+ skvx::float2 eval(const skvx::float2& tt) {
+ return (fA * tt + fB) * tt + fC;
+ }
+
+ skvx::float2 fA;
+ skvx::float2 fB;
+ skvx::float2 fC;
+};
+
+struct SkConicCoeff {
+ SkConicCoeff(const SkConic& conic) {
+ skvx::float2 p0 = from_point(conic.fPts[0]);
+ skvx::float2 p1 = from_point(conic.fPts[1]);
+ skvx::float2 p2 = from_point(conic.fPts[2]);
+ skvx::float2 ww(conic.fW);
+
+ auto p1w = p1 * ww;
+ fNumer.fC = p0;
+ fNumer.fA = p2 - times_2(p1w) + p0;
+ fNumer.fB = times_2(p1w - p0);
+
+ fDenom.fC = 1;
+ fDenom.fB = times_2(ww - fDenom.fC);
+ fDenom.fA = 0 - fDenom.fB;
+ }
+
+ skvx::float2 eval(SkScalar t) {
+ skvx::float2 tt(t);
+ skvx::float2 numer = fNumer.eval(tt);
+ skvx::float2 denom = fDenom.eval(tt);
+ return numer / denom;
+ }
+
+ SkQuadCoeff fNumer;
+ SkQuadCoeff fDenom;
+};
+
+struct SkCubicCoeff {
+ SkCubicCoeff(const SkPoint src[4]) {
+ skvx::float2 P0 = from_point(src[0]);
+ skvx::float2 P1 = from_point(src[1]);
+ skvx::float2 P2 = from_point(src[2]);
+ skvx::float2 P3 = from_point(src[3]);
+ skvx::float2 three(3);
+ fA = P3 + three * (P1 - P2) - P0;
+ fB = three * (P2 - times_2(P1) + P0);
+ fC = three * (P1 - P0);
+ fD = P0;
+ }
+
+ skvx::float2 eval(const skvx::float2& t) {
+ return ((fA * t + fB) * t + fC) * t + fD;
+ }
+
+ skvx::float2 fA;
+ skvx::float2 fB;
+ skvx::float2 fC;
+ skvx::float2 fD;
+};
+
+} // namespace
+
+#include "include/private/base/SkTemplates.h"
+
+/**
+ * Help class to allocate storage for approximating a conic with N quads.
+ */
+class SkAutoConicToQuads {
+public:
+ SkAutoConicToQuads() : fQuadCount(0) {}
+
+ /**
+ * Given a conic and a tolerance, return the array of points for the
+ * approximating quad(s). Call countQuads() to know the number of quads
+ * represented in these points.
+ *
+ * The quads are allocated to share end-points. e.g. if there are 4 quads,
+ * there will be 9 points allocated as follows
+ * quad[0] == pts[0..2]
+ * quad[1] == pts[2..4]
+ * quad[2] == pts[4..6]
+ * quad[3] == pts[6..8]
+ */
+ const SkPoint* computeQuads(const SkConic& conic, SkScalar tol) {
+ int pow2 = conic.computeQuadPOW2(tol);
+ fQuadCount = 1 << pow2;
+ SkPoint* pts = fStorage.reset(1 + 2 * fQuadCount);
+ fQuadCount = conic.chopIntoQuadsPOW2(pts, pow2);
+ return pts;
+ }
+
+ const SkPoint* computeQuads(const SkPoint pts[3], SkScalar weight,
+ SkScalar tol) {
+ SkConic conic;
+ conic.set(pts, weight);
+ return computeQuads(conic, tol);
+ }
+
+ int countQuads() const { return fQuadCount; }
+
+private:
+ enum {
+ kQuadCount = 8, // should handle most conics
+ kPointCount = 1 + 2 * kQuadCount,
+ };
+ skia_private::AutoSTMalloc<kPointCount, SkPoint> fStorage;
+ int fQuadCount; // #quads for current usage
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkGlobalInitialization_core.cpp b/gfx/skia/skia/src/core/SkGlobalInitialization_core.cpp
new file mode 100644
index 0000000000..c189a32cb6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGlobalInitialization_core.cpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFlattenable.h"
+#include "include/private/base/SkOnce.h"
+
+void SkFlattenable::RegisterFlattenablesIfNeeded() {
+ static SkOnce once;
+ once([]{
+ SkFlattenable::PrivateInitializer::InitEffects();
+ SkFlattenable::PrivateInitializer::InitImageFilters();
+ SkFlattenable::Finalize();
+ });
+}
diff --git a/gfx/skia/skia/src/core/SkGlyph.cpp b/gfx/skia/skia/src/core/SkGlyph.cpp
new file mode 100644
index 0000000000..56f481e312
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGlyph.cpp
@@ -0,0 +1,700 @@
+/*
+ * Copyright 2018 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkGlyph.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkData.h"
+#include "include/core/SkDrawable.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkScalar.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkTFitsIn.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkScalerContext.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkPathOpsQuad.h"
+#include "src/text/StrikeForGPU.h"
+
+#include <cstring>
+#include <optional>
+#include <tuple>
+#include <utility>
+
+using namespace skia_private;
+using namespace skglyph;
+using namespace sktext;
+
+//-- SkGlyph ---------------------------------------------------------------------------------------
+std::optional<SkGlyph> SkGlyph::MakeFromBuffer(SkReadBuffer& buffer) {
+ SkASSERT(buffer.isValid());
+ const SkPackedGlyphID packedID{buffer.readUInt()};
+ const SkVector advance = buffer.readPoint();
+ const uint32_t dimensions = buffer.readUInt();
+ const uint32_t leftTop = buffer.readUInt();
+ const SkMask::Format format = SkTo<SkMask::Format>(buffer.readUInt());
+
+ if (!buffer.validate(SkMask::IsValidFormat(format))) {
+ return std::nullopt;
+ }
+
+ SkGlyph glyph{packedID};
+ glyph.fAdvanceX = advance.x();
+ glyph.fAdvanceY = advance.y();
+ glyph.fWidth = dimensions >> 16;
+ glyph.fHeight = dimensions & 0xffffu;
+ glyph.fLeft = leftTop >> 16;
+ glyph.fTop = leftTop & 0xffffu;
+ glyph.fMaskFormat = format;
+ SkDEBUGCODE(glyph.fAdvancesBoundsFormatAndInitialPathDone = true;)
+ return std::move(glyph);
+}
+
+SkGlyph::SkGlyph(const SkGlyph&) = default;
+SkGlyph& SkGlyph::operator=(const SkGlyph&) = default;
+SkGlyph::SkGlyph(SkGlyph&&) = default;
+SkGlyph& SkGlyph::operator=(SkGlyph&&) = default;
+SkGlyph::~SkGlyph() = default;
+
+SkMask SkGlyph::mask() const {
+ SkMask mask;
+ mask.fImage = (uint8_t*)fImage;
+ mask.fBounds.setXYWH(fLeft, fTop, fWidth, fHeight);
+ mask.fRowBytes = this->rowBytes();
+ mask.fFormat = fMaskFormat;
+ return mask;
+}
+
+SkMask SkGlyph::mask(SkPoint position) const {
+ SkASSERT(SkScalarIsInt(position.x()) && SkScalarIsInt(position.y()));
+ SkMask answer = this->mask();
+ answer.fBounds.offset(SkScalarFloorToInt(position.x()), SkScalarFloorToInt(position.y()));
+ return answer;
+}
+
+void SkGlyph::zeroMetrics() {
+ fAdvanceX = 0;
+ fAdvanceY = 0;
+ fWidth = 0;
+ fHeight = 0;
+ fTop = 0;
+ fLeft = 0;
+}
+
+static size_t bits_to_bytes(size_t bits) {
+ return (bits + 7) >> 3;
+}
+
+static size_t format_alignment(SkMask::Format format) {
+ switch (format) {
+ case SkMask::kBW_Format:
+ case SkMask::kA8_Format:
+ case SkMask::k3D_Format:
+ case SkMask::kSDF_Format:
+ return alignof(uint8_t);
+ case SkMask::kARGB32_Format:
+ return alignof(uint32_t);
+ case SkMask::kLCD16_Format:
+ return alignof(uint16_t);
+ default:
+ SK_ABORT("Unknown mask format.");
+ break;
+ }
+ return 0;
+}
+
+static size_t format_rowbytes(int width, SkMask::Format format) {
+ return format == SkMask::kBW_Format ? bits_to_bytes(width)
+ : width * format_alignment(format);
+}
+
+size_t SkGlyph::formatAlignment() const {
+ return format_alignment(this->maskFormat());
+}
+
+size_t SkGlyph::allocImage(SkArenaAlloc* alloc) {
+ SkASSERT(!this->isEmpty());
+ auto size = this->imageSize();
+ fImage = alloc->makeBytesAlignedTo(size, this->formatAlignment());
+
+ return size;
+}
+
+bool SkGlyph::setImage(SkArenaAlloc* alloc, SkScalerContext* scalerContext) {
+ if (!this->setImageHasBeenCalled()) {
+ // It used to be that getImage() could change the fMaskFormat. Extra checking to make
+ // sure there are no regressions.
+ SkDEBUGCODE(SkMask::Format oldFormat = this->maskFormat());
+ this->allocImage(alloc);
+ scalerContext->getImage(*this);
+ SkASSERT(oldFormat == this->maskFormat());
+ return true;
+ }
+ return false;
+}
+
+bool SkGlyph::setImage(SkArenaAlloc* alloc, const void* image) {
+ if (!this->setImageHasBeenCalled()) {
+ this->allocImage(alloc);
+ memcpy(fImage, image, this->imageSize());
+ return true;
+ }
+ return false;
+}
+
+size_t SkGlyph::setMetricsAndImage(SkArenaAlloc* alloc, const SkGlyph& from) {
+ // Since the code no longer tries to find replacement glyphs, the image should always be
+ // nullptr.
+ SkASSERT(fImage == nullptr || from.fImage == nullptr);
+
+ // TODO(herb): remove "if" when we are sure there are no colliding glyphs.
+ if (fImage == nullptr) {
+ fAdvanceX = from.fAdvanceX;
+ fAdvanceY = from.fAdvanceY;
+ fWidth = from.fWidth;
+ fHeight = from.fHeight;
+ fTop = from.fTop;
+ fLeft = from.fLeft;
+ fScalerContextBits = from.fScalerContextBits;
+ fMaskFormat = from.fMaskFormat;
+
+ // From glyph may not have an image because the glyph is too large.
+ if (from.fImage != nullptr && this->setImage(alloc, from.image())) {
+ return this->imageSize();
+ }
+
+ SkDEBUGCODE(fAdvancesBoundsFormatAndInitialPathDone = from.fAdvancesBoundsFormatAndInitialPathDone;)
+ }
+ return 0;
+}
+
+size_t SkGlyph::rowBytes() const {
+ return format_rowbytes(fWidth, fMaskFormat);
+}
+
+size_t SkGlyph::rowBytesUsingFormat(SkMask::Format format) const {
+ return format_rowbytes(fWidth, format);
+}
+
+size_t SkGlyph::imageSize() const {
+ if (this->isEmpty() || this->imageTooLarge()) { return 0; }
+
+ size_t size = this->rowBytes() * fHeight;
+
+ if (fMaskFormat == SkMask::k3D_Format) {
+ size *= 3;
+ }
+
+ return size;
+}
+
+void SkGlyph::installPath(SkArenaAlloc* alloc, const SkPath* path, bool hairline) {
+ SkASSERT(fPathData == nullptr);
+ SkASSERT(!this->setPathHasBeenCalled());
+ fPathData = alloc->make<SkGlyph::PathData>();
+ if (path != nullptr) {
+ fPathData->fPath = *path;
+ fPathData->fPath.updateBoundsCache();
+ fPathData->fPath.getGenerationID();
+ fPathData->fHasPath = true;
+ fPathData->fHairline = hairline;
+ }
+}
+
+bool SkGlyph::setPath(SkArenaAlloc* alloc, SkScalerContext* scalerContext) {
+ if (!this->setPathHasBeenCalled()) {
+ scalerContext->getPath(*this, alloc);
+ SkASSERT(this->setPathHasBeenCalled());
+ return this->path() != nullptr;
+ }
+
+ return false;
+}
+
+bool SkGlyph::setPath(SkArenaAlloc* alloc, const SkPath* path, bool hairline) {
+ if (!this->setPathHasBeenCalled()) {
+ this->installPath(alloc, path, hairline);
+ return this->path() != nullptr;
+ }
+ return false;
+}
+
+const SkPath* SkGlyph::path() const {
+ // setPath must have been called previously.
+ SkASSERT(this->setPathHasBeenCalled());
+ if (fPathData->fHasPath) {
+ return &fPathData->fPath;
+ }
+ return nullptr;
+}
+
+bool SkGlyph::pathIsHairline() const {
+ // setPath must have been called previously.
+ SkASSERT(this->setPathHasBeenCalled());
+ return fPathData->fHairline;
+}
+
+void SkGlyph::installDrawable(SkArenaAlloc* alloc, sk_sp<SkDrawable> drawable) {
+ SkASSERT(fDrawableData == nullptr);
+ SkASSERT(!this->setDrawableHasBeenCalled());
+ fDrawableData = alloc->make<SkGlyph::DrawableData>();
+ if (drawable != nullptr) {
+ fDrawableData->fDrawable = std::move(drawable);
+ fDrawableData->fDrawable->getGenerationID();
+ fDrawableData->fHasDrawable = true;
+ }
+}
+
+bool SkGlyph::setDrawable(SkArenaAlloc* alloc, SkScalerContext* scalerContext) {
+ if (!this->setDrawableHasBeenCalled()) {
+ sk_sp<SkDrawable> drawable = scalerContext->getDrawable(*this);
+ this->installDrawable(alloc, std::move(drawable));
+ return this->drawable() != nullptr;
+ }
+ return false;
+}
+
+bool SkGlyph::setDrawable(SkArenaAlloc* alloc, sk_sp<SkDrawable> drawable) {
+ if (!this->setDrawableHasBeenCalled()) {
+ this->installDrawable(alloc, std::move(drawable));
+ return this->drawable() != nullptr;
+ }
+ return false;
+}
+
+SkDrawable* SkGlyph::drawable() const {
+ // setDrawable must have been called previously.
+ SkASSERT(this->setDrawableHasBeenCalled());
+ if (fDrawableData->fHasDrawable) {
+ return fDrawableData->fDrawable.get();
+ }
+ return nullptr;
+}
+
+void SkGlyph::flattenMetrics(SkWriteBuffer& buffer) const {
+ buffer.writeUInt(fID.value());
+ buffer.writePoint({fAdvanceX, fAdvanceY});
+ buffer.writeUInt(fWidth << 16 | fHeight);
+ // Note: << has undefined behavior for negative values, so convert everything to the bit
+ // values of uint16_t. Using the cast keeps the signed values fLeft and fTop from sign
+ // extending.
+ const uint32_t left = static_cast<uint16_t>(fLeft);
+ const uint32_t top = static_cast<uint16_t>(fTop);
+ buffer.writeUInt(left << 16 | top);
+ buffer.writeUInt(SkTo<uint32_t>(fMaskFormat));
+}
+
+void SkGlyph::flattenImage(SkWriteBuffer& buffer) const {
+ SkASSERT(this->setImageHasBeenCalled());
+
+ // If the glyph is empty or too big, then no image data is sent.
+ if (!this->isEmpty() && SkGlyphDigest::FitsInAtlas(*this)) {
+ buffer.writeByteArray(this->image(), this->imageSize());
+ }
+}
+
+size_t SkGlyph::addImageFromBuffer(SkReadBuffer& buffer, SkArenaAlloc* alloc) {
+ SkASSERT(buffer.isValid());
+
+ // If the glyph is empty or too big, then no image data is received.
+ if (this->isEmpty() || !SkGlyphDigest::FitsInAtlas(*this)) {
+ return 0;
+ }
+
+ size_t memoryIncrease = 0;
+
+ void* imageData = alloc->makeBytesAlignedTo(this->imageSize(), this->formatAlignment());
+ buffer.readByteArray(imageData, this->imageSize());
+ if (buffer.isValid()) {
+ this->installImage(imageData);
+ memoryIncrease += this->imageSize();
+ }
+
+ return memoryIncrease;
+}
+
+void SkGlyph::flattenPath(SkWriteBuffer& buffer) const {
+ SkASSERT(this->setPathHasBeenCalled());
+
+ const bool hasPath = this->path() != nullptr;
+ buffer.writeBool(hasPath);
+ if (hasPath) {
+ buffer.writeBool(this->pathIsHairline());
+ buffer.writePath(*this->path());
+ }
+}
+
+size_t SkGlyph::addPathFromBuffer(SkReadBuffer& buffer, SkArenaAlloc* alloc) {
+ SkASSERT(buffer.isValid());
+
+ size_t memoryIncrease = 0;
+ const bool hasPath = buffer.readBool();
+ // Check if the buffer is invalid, so as to not make a logical decision on invalid data.
+ if (!buffer.isValid()) {
+ return 0;
+ }
+ if (hasPath) {
+ const bool pathIsHairline = buffer.readBool();
+ SkPath path;
+ buffer.readPath(&path);
+ if (buffer.isValid()) {
+ if (this->setPath(alloc, &path, pathIsHairline)) {
+ memoryIncrease += path.approximateBytesUsed();
+ }
+ }
+ } else {
+ this->setPath(alloc, nullptr, false);
+ }
+
+ return memoryIncrease;
+}
+
+void SkGlyph::flattenDrawable(SkWriteBuffer& buffer) const {
+ SkASSERT(this->setDrawableHasBeenCalled());
+
+ if (this->isEmpty() || this->drawable() == nullptr) {
+ buffer.writeByteArray(nullptr, 0);
+ return;
+ }
+
+ sk_sp<SkPicture> picture{this->drawable()->newPictureSnapshot()};
+ sk_sp<SkData> data = picture->serialize();
+
+ // If the picture is too big, or there is no picture, then drop by sending an empty byte array.
+ if (!SkTFitsIn<uint32_t>(data->size()) || data->size() == 0) {
+ buffer.writeByteArray(nullptr, 0);
+ return;
+ }
+
+ buffer.writeByteArray(data->data(), data->size());
+}
+
+size_t SkGlyph::addDrawableFromBuffer(SkReadBuffer& buffer, SkArenaAlloc* alloc) {
+ SkASSERT(buffer.isValid());
+
+ // Class to turn the drawable into a picture to serialize.
+ class PictureBackedGlyphDrawable final : public SkDrawable {
+ public:
+ PictureBackedGlyphDrawable(sk_sp<SkPicture> self) : fSelf(std::move(self)) {}
+ private:
+ sk_sp<SkPicture> fSelf;
+ SkRect onGetBounds() override { return fSelf->cullRect(); }
+ size_t onApproximateBytesUsed() override {
+ return sizeof(PictureBackedGlyphDrawable) + fSelf->approximateBytesUsed();
+ }
+ void onDraw(SkCanvas* canvas) override { canvas->drawPicture(fSelf); }
+ };
+
+ size_t memoryIncrease = 0;
+
+ sk_sp<SkData> pictureData = buffer.readByteArrayAsData();
+ if (!buffer.isValid()) {
+ return 0;
+ }
+
+ // If the picture is too big, or there is no picture is indicated by an empty byte array.
+ if (pictureData->size() > 0) {
+ sk_sp<SkPicture> picture = SkPicture::MakeFromData(pictureData.get());
+ if (!buffer.validate(picture != nullptr)) {
+ return 0;
+ }
+ sk_sp<SkDrawable> drawable = sk_make_sp<PictureBackedGlyphDrawable>(std::move(picture));
+ if (this->setDrawable(alloc, std::move(drawable))) {
+ memoryIncrease += this->drawable()->approximateBytesUsed();
+ }
+ } else {
+ this->setDrawable(alloc, sk_sp<SkDrawable>(nullptr));
+ }
+
+ return memoryIncrease;
+}
+
+static std::tuple<SkScalar, SkScalar> calculate_path_gap(
+ SkScalar topOffset, SkScalar bottomOffset, const SkPath& path) {
+
+ // Left and Right of an ever expanding gap around the path.
+ SkScalar left = SK_ScalarMax,
+ right = SK_ScalarMin;
+ auto expandGap = [&left, &right](SkScalar v) {
+ left = std::min(left, v);
+ right = std::max(right, v);
+ };
+
+ // Handle all the different verbs for the path.
+ SkPoint pts[4];
+ auto addLine = [&expandGap, &pts](SkScalar offset) {
+ SkScalar t = sk_ieee_float_divide(offset - pts[0].fY, pts[1].fY - pts[0].fY);
+ if (0 <= t && t < 1) { // this handles divide by zero above
+ expandGap(pts[0].fX + t * (pts[1].fX - pts[0].fX));
+ }
+ };
+
+ auto addQuad = [&expandGap, &pts](SkScalar offset) {
+ SkDQuad quad;
+ quad.set(pts);
+ double roots[2];
+ int count = quad.horizontalIntersect(offset, roots);
+ while (--count >= 0) {
+ expandGap(quad.ptAtT(roots[count]).asSkPoint().fX);
+ }
+ };
+
+ auto addCubic = [&expandGap, &pts](SkScalar offset) {
+ SkDCubic cubic;
+ cubic.set(pts);
+ double roots[3];
+ int count = cubic.horizontalIntersect(offset, roots);
+ while (--count >= 0) {
+ expandGap(cubic.ptAtT(roots[count]).asSkPoint().fX);
+ }
+ };
+
+ // Handle when a verb's points are in the gap between top and bottom.
+ auto addPts = [&expandGap, &pts, topOffset, bottomOffset](int ptCount) {
+ for (int i = 0; i < ptCount; ++i) {
+ if (topOffset < pts[i].fY && pts[i].fY < bottomOffset) {
+ expandGap(pts[i].fX);
+ }
+ }
+ };
+
+ SkPath::Iter iter(path, false);
+ SkPath::Verb verb;
+ while (SkPath::kDone_Verb != (verb = iter.next(pts))) {
+ switch (verb) {
+ case SkPath::kMove_Verb: {
+ break;
+ }
+ case SkPath::kLine_Verb: {
+ addLine(topOffset);
+ addLine(bottomOffset);
+ addPts(2);
+ break;
+ }
+ case SkPath::kQuad_Verb: {
+ SkScalar quadTop = std::min(std::min(pts[0].fY, pts[1].fY), pts[2].fY);
+ if (bottomOffset < quadTop) { break; }
+ SkScalar quadBottom = std::max(std::max(pts[0].fY, pts[1].fY), pts[2].fY);
+ if (topOffset > quadBottom) { break; }
+ addQuad(topOffset);
+ addQuad(bottomOffset);
+ addPts(3);
+ break;
+ }
+ case SkPath::kConic_Verb: {
+ SkASSERT(0); // no support for text composed of conics
+ break;
+ }
+ case SkPath::kCubic_Verb: {
+ SkScalar quadTop =
+ std::min(std::min(std::min(pts[0].fY, pts[1].fY), pts[2].fY), pts[3].fY);
+ if (bottomOffset < quadTop) { break; }
+ SkScalar quadBottom =
+ std::max(std::max(std::max(pts[0].fY, pts[1].fY), pts[2].fY), pts[3].fY);
+ if (topOffset > quadBottom) { break; }
+ addCubic(topOffset);
+ addCubic(bottomOffset);
+ addPts(4);
+ break;
+ }
+ case SkPath::kClose_Verb: {
+ break;
+ }
+ default: {
+ SkASSERT(0);
+ break;
+ }
+ }
+ }
+
+ return std::tie(left, right);
+}
+
+void SkGlyph::ensureIntercepts(const SkScalar* bounds, SkScalar scale, SkScalar xPos,
+ SkScalar* array, int* count, SkArenaAlloc* alloc) {
+
+ auto offsetResults = [scale, xPos](
+ const SkGlyph::Intercept* intercept,SkScalar* array, int* count) {
+ if (array) {
+ array += *count;
+ for (int index = 0; index < 2; index++) {
+ *array++ = intercept->fInterval[index] * scale + xPos;
+ }
+ }
+ *count += 2;
+ };
+
+ const SkGlyph::Intercept* match =
+ [this](const SkScalar bounds[2]) -> const SkGlyph::Intercept* {
+ if (!fPathData) {
+ return nullptr;
+ }
+ const SkGlyph::Intercept* intercept = fPathData->fIntercept;
+ while (intercept) {
+ if (bounds[0] == intercept->fBounds[0] && bounds[1] == intercept->fBounds[1]) {
+ return intercept;
+ }
+ intercept = intercept->fNext;
+ }
+ return nullptr;
+ }(bounds);
+
+ if (match) {
+ if (match->fInterval[0] < match->fInterval[1]) {
+ offsetResults(match, array, count);
+ }
+ return;
+ }
+
+ SkGlyph::Intercept* intercept = alloc->make<SkGlyph::Intercept>();
+ intercept->fNext = fPathData->fIntercept;
+ intercept->fBounds[0] = bounds[0];
+ intercept->fBounds[1] = bounds[1];
+ intercept->fInterval[0] = SK_ScalarMax;
+ intercept->fInterval[1] = SK_ScalarMin;
+ fPathData->fIntercept = intercept;
+ const SkPath* path = &(fPathData->fPath);
+ const SkRect& pathBounds = path->getBounds();
+ if (pathBounds.fBottom < bounds[0] || bounds[1] < pathBounds.fTop) {
+ return;
+ }
+
+ std::tie(intercept->fInterval[0], intercept->fInterval[1])
+ = calculate_path_gap(bounds[0], bounds[1], *path);
+
+ if (intercept->fInterval[0] >= intercept->fInterval[1]) {
+ intercept->fInterval[0] = SK_ScalarMax;
+ intercept->fInterval[1] = SK_ScalarMin;
+ return;
+ }
+ offsetResults(intercept, array, count);
+}
+
+namespace {
+uint32_t init_actions(const SkGlyph& glyph) {
+ constexpr uint32_t kAllUnset = 0;
+ constexpr uint32_t kDrop = SkTo<uint32_t>(GlyphAction::kDrop);
+ constexpr uint32_t kAllDrop =
+ kDrop << kDirectMask |
+ kDrop << kDirectMaskCPU |
+ kDrop << kMask |
+ kDrop << kSDFT |
+ kDrop << kPath |
+ kDrop << kDrawable;
+ return glyph.isEmpty() ? kAllDrop : kAllUnset;
+}
+} // namespace
+
+// -- SkGlyphDigest --------------------------------------------------------------------------------
+SkGlyphDigest::SkGlyphDigest(size_t index, const SkGlyph& glyph)
+ : fPackedID{SkTo<uint64_t>(glyph.getPackedID().value())}
+ , fIndex{SkTo<uint64_t>(index)}
+ , fIsEmpty(glyph.isEmpty())
+ , fFormat(glyph.maskFormat())
+ , fActions{init_actions(glyph)}
+ , fLeft{SkTo<int16_t>(glyph.left())}
+ , fTop{SkTo<int16_t>(glyph.top())}
+ , fWidth{SkTo<uint16_t>(glyph.width())}
+ , fHeight{SkTo<uint16_t>(glyph.height())} {}
+
+void SkGlyphDigest::setActionFor(skglyph::ActionType actionType,
+ SkGlyph* glyph,
+ StrikeForGPU* strike) {
+ // We don't have to do any more if the glyph is marked as kDrop because it was isEmpty().
+ if (this->actionFor(actionType) == GlyphAction::kUnset) {
+ GlyphAction action = GlyphAction::kReject;
+ switch (actionType) {
+ case kDirectMask: {
+ if (this->fitsInAtlasDirect()) {
+ action = GlyphAction::kAccept;
+ }
+ break;
+ }
+ case kDirectMaskCPU: {
+ if (strike->prepareForImage(glyph)) {
+ action = GlyphAction::kAccept;
+ }
+ break;
+ }
+ case kMask: {
+ if (this->fitsInAtlasInterpolated()) {
+ action = GlyphAction::kAccept;
+ }
+ break;
+ }
+ case kSDFT: {
+ if (this->fitsInAtlasDirect() &&
+ this->maskFormat() == SkMask::Format::kSDF_Format) {
+ action = GlyphAction::kAccept;
+ }
+ break;
+ }
+ case kPath: {
+ if (strike->prepareForPath(glyph)) {
+ action = GlyphAction::kAccept;
+ }
+ break;
+ }
+ case kDrawable: {
+ if (strike->prepareForDrawable(glyph)) {
+ action = GlyphAction::kAccept;
+ }
+ break;
+ }
+ }
+ this->setAction(actionType, action);
+ }
+}
+
+bool SkGlyphDigest::FitsInAtlas(const SkGlyph& glyph) {
+ return glyph.maxDimension() <= kSkSideTooBigForAtlas;
+}
+
+// -- SkGlyphPositionRoundingSpec ------------------------------------------------------------------
+SkVector SkGlyphPositionRoundingSpec::HalfAxisSampleFreq(
+ bool isSubpixel, SkAxisAlignment axisAlignment) {
+ if (!isSubpixel) {
+ return {SK_ScalarHalf, SK_ScalarHalf};
+ } else {
+ switch (axisAlignment) {
+ case SkAxisAlignment::kX:
+ return {SkPackedGlyphID::kSubpixelRound, SK_ScalarHalf};
+ case SkAxisAlignment::kY:
+ return {SK_ScalarHalf, SkPackedGlyphID::kSubpixelRound};
+ case SkAxisAlignment::kNone:
+ return {SkPackedGlyphID::kSubpixelRound, SkPackedGlyphID::kSubpixelRound};
+ }
+ }
+
+ // Some compilers need this.
+ return {0, 0};
+}
+
+SkIPoint SkGlyphPositionRoundingSpec::IgnorePositionMask(
+ bool isSubpixel, SkAxisAlignment axisAlignment) {
+ return SkIPoint::Make((!isSubpixel || axisAlignment == SkAxisAlignment::kY) ? 0 : ~0,
+ (!isSubpixel || axisAlignment == SkAxisAlignment::kX) ? 0 : ~0);
+}
+
+SkIPoint SkGlyphPositionRoundingSpec::IgnorePositionFieldMask(bool isSubpixel,
+ SkAxisAlignment axisAlignment) {
+ SkIPoint ignoreMask = IgnorePositionMask(isSubpixel, axisAlignment);
+ SkIPoint answer{ignoreMask.x() & SkPackedGlyphID::kXYFieldMask.x(),
+ ignoreMask.y() & SkPackedGlyphID::kXYFieldMask.y()};
+ return answer;
+}
+
+SkGlyphPositionRoundingSpec::SkGlyphPositionRoundingSpec(
+ bool isSubpixel, SkAxisAlignment axisAlignment)
+ : halfAxisSampleFreq{HalfAxisSampleFreq(isSubpixel, axisAlignment)}
+ , ignorePositionMask{IgnorePositionMask(isSubpixel, axisAlignment)}
+ , ignorePositionFieldMask {IgnorePositionFieldMask(isSubpixel, axisAlignment)} {}
diff --git a/gfx/skia/skia/src/core/SkGlyph.h b/gfx/skia/skia/src/core/SkGlyph.h
new file mode 100644
index 0000000000..73ce682016
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGlyph.h
@@ -0,0 +1,639 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGlyph_DEFINED
+#define SkGlyph_DEFINED
+
+#include "include/core/SkDrawable.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkChecksum.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkFixed.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkVx.h"
+#include "src/core/SkMask.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+#include <optional>
+
+class SkArenaAlloc;
+class SkGlyph;
+class SkReadBuffer;
+class SkScalerContext;
+class SkWriteBuffer;
+namespace sktext {
+class StrikeForGPU;
+} // namespace sktext
+
+// -- SkPackedGlyphID ------------------------------------------------------------------------------
+// A combination of SkGlyphID and sub-pixel position information.
+struct SkPackedGlyphID {
+ inline static constexpr uint32_t kImpossibleID = ~0u;
+ enum {
+ // Lengths
+ kGlyphIDLen = 16u,
+ kSubPixelPosLen = 2u,
+
+ // Bit positions
+ kSubPixelX = 0u,
+ kGlyphID = kSubPixelPosLen,
+ kSubPixelY = kGlyphIDLen + kSubPixelPosLen,
+ kEndData = kGlyphIDLen + 2 * kSubPixelPosLen,
+
+ // Masks
+ kGlyphIDMask = (1u << kGlyphIDLen) - 1,
+ kSubPixelPosMask = (1u << kSubPixelPosLen) - 1,
+ kMaskAll = (1u << kEndData) - 1,
+
+ // Location of sub pixel info in a fixed pointer number.
+ kFixedPointBinaryPointPos = 16u,
+ kFixedPointSubPixelPosBits = kFixedPointBinaryPointPos - kSubPixelPosLen,
+ };
+
+ inline static const constexpr SkScalar kSubpixelRound =
+ 1.f / (1u << (SkPackedGlyphID::kSubPixelPosLen + 1));
+
+ inline static const constexpr SkIPoint kXYFieldMask{kSubPixelPosMask << kSubPixelX,
+ kSubPixelPosMask << kSubPixelY};
+
+ struct Hash {
+ uint32_t operator() (SkPackedGlyphID packedID) const {
+ return packedID.hash();
+ }
+ };
+
+ constexpr explicit SkPackedGlyphID(SkGlyphID glyphID)
+ : fID{(uint32_t)glyphID << kGlyphID} { }
+
+ constexpr SkPackedGlyphID(SkGlyphID glyphID, SkFixed x, SkFixed y)
+ : fID {PackIDXY(glyphID, x, y)} { }
+
+ constexpr SkPackedGlyphID(SkGlyphID glyphID, uint32_t x, uint32_t y)
+ : fID {PackIDSubXSubY(glyphID, x, y)} { }
+
+ SkPackedGlyphID(SkGlyphID glyphID, SkPoint pt, SkIPoint mask)
+ : fID{PackIDSkPoint(glyphID, pt, mask)} { }
+
+ constexpr explicit SkPackedGlyphID(uint32_t v) : fID{v & kMaskAll} { }
+ constexpr SkPackedGlyphID() : fID{kImpossibleID} {}
+
+ bool operator==(const SkPackedGlyphID& that) const {
+ return fID == that.fID;
+ }
+ bool operator!=(const SkPackedGlyphID& that) const {
+ return !(*this == that);
+ }
+ bool operator<(SkPackedGlyphID that) const {
+ return this->fID < that.fID;
+ }
+
+ SkGlyphID glyphID() const {
+ return (fID >> kGlyphID) & kGlyphIDMask;
+ }
+
+ uint32_t value() const {
+ return fID;
+ }
+
+ SkFixed getSubXFixed() const {
+ return this->subToFixed(kSubPixelX);
+ }
+
+ SkFixed getSubYFixed() const {
+ return this->subToFixed(kSubPixelY);
+ }
+
+ uint32_t hash() const {
+ return SkChecksum::CheapMix(fID);
+ }
+
+ SkString dump() const {
+ SkString str;
+ str.appendf("glyphID: %d, x: %d, y:%d", glyphID(), getSubXFixed(), getSubYFixed());
+ return str;
+ }
+
+ SkString shortDump() const {
+ SkString str;
+ str.appendf("0x%x|%1d|%1d", this->glyphID(),
+ this->subPixelField(kSubPixelX),
+ this->subPixelField(kSubPixelY));
+ return str;
+ }
+
+private:
+ static constexpr uint32_t PackIDSubXSubY(SkGlyphID glyphID, uint32_t x, uint32_t y) {
+ SkASSERT(x < (1u << kSubPixelPosLen));
+ SkASSERT(y < (1u << kSubPixelPosLen));
+
+ return (x << kSubPixelX) | (y << kSubPixelY) | (glyphID << kGlyphID);
+ }
+
+ // Assumptions: pt is properly rounded. mask is set for the x or y fields.
+ //
+ // A sub-pixel field is a number on the interval [2^kSubPixel, 2^(kSubPixel + kSubPixelPosLen)).
+ // Where kSubPixel is either kSubPixelX or kSubPixelY. Given a number x on [0, 1) we can
+ // generate a sub-pixel field using:
+ // sub-pixel-field = x * 2^(kSubPixel + kSubPixelPosLen)
+ //
+ // We can generate the integer sub-pixel field by &-ing the integer part of sub-filed with the
+ // sub-pixel field mask.
+ // int-sub-pixel-field = int(sub-pixel-field) & (kSubPixelPosMask << kSubPixel)
+ //
+ // The last trick is to extend the range from [0, 1) to [0, 2). The extend range is
+ // necessary because the modulo 1 calculation (pt - floor(pt)) generates numbers on [-1, 1).
+ // This does not round (floor) properly when converting to integer. Adding one to the range
+ // causes truncation and floor to be the same. Coincidentally, masking to produce the field also
+ // removes the +1.
+ static uint32_t PackIDSkPoint(SkGlyphID glyphID, SkPoint pt, SkIPoint mask) {
+ #if 0
+ // TODO: why does this code not work on GCC 8.3 x86 Debug builds?
+ using namespace skvx;
+ using XY = Vec<2, float>;
+ using SubXY = Vec<2, int>;
+
+ const XY magic = {1.f * (1u << (kSubPixelPosLen + kSubPixelX)),
+ 1.f * (1u << (kSubPixelPosLen + kSubPixelY))};
+ XY pos{pt.x(), pt.y()};
+ XY subPos = (pos - floor(pos)) + 1.0f;
+ SubXY sub = cast<int>(subPos * magic) & SubXY{mask.x(), mask.y()};
+ #else
+ const float magicX = 1.f * (1u << (kSubPixelPosLen + kSubPixelX)),
+ magicY = 1.f * (1u << (kSubPixelPosLen + kSubPixelY));
+
+ float x = pt.x(),
+ y = pt.y();
+ x = (x - floorf(x)) + 1.0f;
+ y = (y - floorf(y)) + 1.0f;
+ int sub[] = {
+ (int)(x * magicX) & mask.x(),
+ (int)(y * magicY) & mask.y(),
+ };
+ #endif
+
+ SkASSERT(sub[0] / (1u << kSubPixelX) < (1u << kSubPixelPosLen));
+ SkASSERT(sub[1] / (1u << kSubPixelY) < (1u << kSubPixelPosLen));
+ return (glyphID << kGlyphID) | sub[0] | sub[1];
+ }
+
+ static constexpr uint32_t PackIDXY(SkGlyphID glyphID, SkFixed x, SkFixed y) {
+ return PackIDSubXSubY(glyphID, FixedToSub(x), FixedToSub(y));
+ }
+
+ static constexpr uint32_t FixedToSub(SkFixed n) {
+ return ((uint32_t)n >> kFixedPointSubPixelPosBits) & kSubPixelPosMask;
+ }
+
+ constexpr uint32_t subPixelField(uint32_t subPixelPosBit) const {
+ return (fID >> subPixelPosBit) & kSubPixelPosMask;
+ }
+
+ constexpr SkFixed subToFixed(uint32_t subPixelPosBit) const {
+ uint32_t subPixelPosition = this->subPixelField(subPixelPosBit);
+ return subPixelPosition << kFixedPointSubPixelPosBits;
+ }
+
+ uint32_t fID;
+};
+
+// -- SkAxisAlignment ------------------------------------------------------------------------------
+// SkAxisAlignment specifies the x component of a glyph's position is rounded when kX, and the y
+// component is rounded when kY. If kNone then neither are rounded.
+enum class SkAxisAlignment : uint32_t {
+ kNone,
+ kX,
+ kY,
+};
+
+// round and ignorePositionMask are used to calculate the subpixel position of a glyph.
+// The per component (x or y) calculation is:
+//
+// subpixelOffset = (floor((viewportPosition + rounding) & mask) >> 14) & 3
+//
+// where mask is either 0 or ~0, and rounding is either
+// 1/2 for non-subpixel or 1/8 for subpixel.
+struct SkGlyphPositionRoundingSpec {
+ SkGlyphPositionRoundingSpec(bool isSubpixel, SkAxisAlignment axisAlignment);
+ const SkVector halfAxisSampleFreq;
+ const SkIPoint ignorePositionMask;
+ const SkIPoint ignorePositionFieldMask;
+
+private:
+ static SkVector HalfAxisSampleFreq(bool isSubpixel, SkAxisAlignment axisAlignment);
+ static SkIPoint IgnorePositionMask(bool isSubpixel, SkAxisAlignment axisAlignment);
+ static SkIPoint IgnorePositionFieldMask(bool isSubpixel, SkAxisAlignment axisAlignment);
+};
+
+class SkGlyphRect;
+namespace skglyph {
+SkGlyphRect rect_union(SkGlyphRect, SkGlyphRect);
+SkGlyphRect rect_intersection(SkGlyphRect, SkGlyphRect);
+} // namespace skglyph
+
+// SkGlyphRect encodes rectangles with coordinates using SkScalar. It is specialized for
+// rectangle union and intersection operations.
+class SkGlyphRect {
+public:
+ SkGlyphRect() = default;
+ SkGlyphRect(SkScalar left, SkScalar top, SkScalar right, SkScalar bottom)
+ : fRect{-left, -top, right, bottom} { }
+ bool empty() const {
+ return -fRect[0] >= fRect[2] || -fRect[1] >= fRect[3];
+ }
+ SkRect rect() const {
+ return SkRect::MakeLTRB(-fRect[0], -fRect[1], fRect[2], fRect[3]);
+ }
+ SkGlyphRect offset(SkScalar x, SkScalar y) const {
+ return SkGlyphRect{fRect + Storage{-x, -y, x, y}};
+ }
+ SkGlyphRect offset(SkPoint pt) const {
+ return this->offset(pt.x(), pt.y());
+ }
+ SkGlyphRect scaleAndOffset(SkScalar scale, SkPoint offset) const {
+ auto [x, y] = offset;
+ return fRect * scale + Storage{-x, -y, x, y};
+ }
+ SkGlyphRect inset(SkScalar dx, SkScalar dy) const {
+ return fRect - Storage{dx, dy, dx, dy};
+ }
+ SkPoint leftTop() const { return -this->negLeftTop(); }
+ SkPoint rightBottom() const { return {fRect[2], fRect[3]}; }
+ SkPoint widthHeight() const { return this->rightBottom() + negLeftTop(); }
+ friend SkGlyphRect skglyph::rect_union(SkGlyphRect, SkGlyphRect);
+ friend SkGlyphRect skglyph::rect_intersection(SkGlyphRect, SkGlyphRect);
+
+private:
+ SkPoint negLeftTop() const { return {fRect[0], fRect[1]}; }
+ using Storage = skvx::Vec<4, SkScalar>;
+ SkGlyphRect(Storage rect) : fRect{rect} { }
+ Storage fRect;
+};
+
+namespace skglyph {
+inline SkGlyphRect empty_rect() {
+ constexpr SkScalar max = std::numeric_limits<SkScalar>::max();
+ return {max, max, -max, -max};
+}
+inline SkGlyphRect full_rect() {
+ constexpr SkScalar max = std::numeric_limits<SkScalar>::max();
+ return {-max, -max, max, max};
+}
+inline SkGlyphRect rect_union(SkGlyphRect a, SkGlyphRect b) {
+ return skvx::max(a.fRect, b.fRect);
+}
+inline SkGlyphRect rect_intersection(SkGlyphRect a, SkGlyphRect b) {
+ return skvx::min(a.fRect, b.fRect);
+}
+
+enum class GlyphAction {
+ kUnset,
+ kAccept,
+ kReject,
+ kDrop,
+ kSize,
+};
+
+enum ActionType {
+ kDirectMask = 0,
+ kDirectMaskCPU = 2,
+ kMask = 4,
+ kSDFT = 6,
+ kPath = 8,
+ kDrawable = 10,
+};
+
+enum ActionTypeSize {
+ kTotalBits = 12
+};
+} // namespace skglyph
+
+// SkGlyphDigest contains a digest of information for making GPU drawing decisions. It can be
+// referenced instead of the glyph itself in many situations. In the remote glyphs cache the
+// SkGlyphDigest is the only information that needs to be stored in the cache.
+class SkGlyphDigest {
+public:
+ // An atlas consists of plots, and plots hold glyphs. The minimum a plot can be is 256x256.
+ // This means that the maximum size a glyph can be is 256x256.
+ static constexpr uint16_t kSkSideTooBigForAtlas = 256;
+
+ // Default ctor is only needed for the hash table.
+ SkGlyphDigest() = default;
+ SkGlyphDigest(size_t index, const SkGlyph& glyph);
+ int index() const { return fIndex; }
+ bool isEmpty() const { return fIsEmpty; }
+ bool isColor() const { return fFormat == SkMask::kARGB32_Format; }
+ SkMask::Format maskFormat() const { return static_cast<SkMask::Format>(fFormat); }
+
+ skglyph::GlyphAction actionFor(skglyph::ActionType actionType) const {
+ return static_cast<skglyph::GlyphAction>((fActions >> actionType) & 0b11);
+ }
+
+ void setActionFor(skglyph::ActionType, SkGlyph*, sktext::StrikeForGPU*);
+
+ uint16_t maxDimension() const {
+ return std::max(fWidth, fHeight);
+ }
+
+ bool fitsInAtlasDirect() const {
+ return this->maxDimension() <= kSkSideTooBigForAtlas;
+ }
+
+ bool fitsInAtlasInterpolated() const {
+ // Include the padding needed for interpolating the glyph when drawing.
+ return this->maxDimension() <= kSkSideTooBigForAtlas - 2;
+ }
+
+ SkGlyphRect bounds() const {
+ return SkGlyphRect(fLeft, fTop, (SkScalar)fLeft + fWidth, (SkScalar)fTop + fHeight);
+ }
+
+ static bool FitsInAtlas(const SkGlyph& glyph);
+
+ // GetKey and Hash implement the required methods for SkTHashTable.
+ static SkPackedGlyphID GetKey(SkGlyphDigest digest) {
+ return SkPackedGlyphID{SkTo<uint32_t>(digest.fPackedID)};
+ }
+ static uint32_t Hash(SkPackedGlyphID packedID) {
+ return packedID.hash();
+ }
+
+private:
+ void setAction(skglyph::ActionType actionType, skglyph::GlyphAction action) {
+ using namespace skglyph;
+ SkASSERT(action != GlyphAction::kUnset);
+ SkASSERT(this->actionFor(actionType) == GlyphAction::kUnset);
+ const uint64_t mask = 0b11 << actionType;
+ fActions &= ~mask;
+ fActions |= SkTo<uint64_t>(action) << actionType;
+ }
+
+ static_assert(SkPackedGlyphID::kEndData == 20);
+ static_assert(SkMask::kCountMaskFormats <= 8);
+ static_assert(SkTo<int>(skglyph::GlyphAction::kSize) <= 4);
+ struct {
+ uint64_t fPackedID : SkPackedGlyphID::kEndData;
+ uint64_t fIndex : SkPackedGlyphID::kEndData;
+ uint64_t fIsEmpty : 1;
+ uint64_t fFormat : 3;
+ uint64_t fActions : skglyph::ActionTypeSize::kTotalBits;
+ };
+ int16_t fLeft, fTop;
+ uint16_t fWidth, fHeight;
+};
+
+class SkGlyph {
+public:
+ static std::optional<SkGlyph> MakeFromBuffer(SkReadBuffer&);
+ // SkGlyph() is used for testing.
+ constexpr SkGlyph() : SkGlyph{SkPackedGlyphID()} { }
+ SkGlyph(const SkGlyph&);
+ SkGlyph& operator=(const SkGlyph&);
+ SkGlyph(SkGlyph&&);
+ SkGlyph& operator=(SkGlyph&&);
+ ~SkGlyph();
+ constexpr explicit SkGlyph(SkPackedGlyphID id) : fID{id} { }
+
+ SkVector advanceVector() const { return SkVector{fAdvanceX, fAdvanceY}; }
+ SkScalar advanceX() const { return fAdvanceX; }
+ SkScalar advanceY() const { return fAdvanceY; }
+
+ SkGlyphID getGlyphID() const { return fID.glyphID(); }
+ SkPackedGlyphID getPackedID() const { return fID; }
+ SkFixed getSubXFixed() const { return fID.getSubXFixed(); }
+ SkFixed getSubYFixed() const { return fID.getSubYFixed(); }
+
+ size_t rowBytes() const;
+ size_t rowBytesUsingFormat(SkMask::Format format) const;
+
+ // Call this to set all the metrics fields to 0 (e.g. if the scaler
+ // encounters an error measuring a glyph). Note: this does not alter the
+ // fImage, fPath, fID, fMaskFormat fields.
+ void zeroMetrics();
+
+ SkMask mask() const;
+
+ SkMask mask(SkPoint position) const;
+
+ // Image
+ // If we haven't already tried to associate an image with this glyph
+ // (i.e. setImageHasBeenCalled() returns false), then use the
+ // SkScalerContext or const void* argument to set the image.
+ bool setImage(SkArenaAlloc* alloc, SkScalerContext* scalerContext);
+ bool setImage(SkArenaAlloc* alloc, const void* image);
+
+ // Merge the 'from' glyph into this glyph using alloc to allocate image data. Return the number
+ // of bytes allocated. Copy the width, height, top, left, format, and image into this glyph
+ // making a copy of the image using the alloc.
+ size_t setMetricsAndImage(SkArenaAlloc* alloc, const SkGlyph& from);
+
+ // Returns true if the image has been set.
+ bool setImageHasBeenCalled() const {
+ return fImage != nullptr || this->isEmpty() || this->imageTooLarge();
+ }
+
+ // Return a pointer to the path if the image exists, otherwise return nullptr.
+ const void* image() const { SkASSERT(this->setImageHasBeenCalled()); return fImage; }
+
+ // Return the size of the image.
+ size_t imageSize() const;
+
+ // Path
+ // If we haven't already tried to associate a path to this glyph
+ // (i.e. setPathHasBeenCalled() returns false), then use the
+ // SkScalerContext or SkPath argument to try to do so. N.B. this
+ // may still result in no path being associated with this glyph,
+ // e.g. if you pass a null SkPath or the typeface is bitmap-only.
+ //
+ // This setPath() call is sticky... once you call it, the glyph
+ // stays in its state permanently, ignoring any future calls.
+ //
+ // Returns true if this is the first time you called setPath()
+ // and there actually is a path; call path() to get it.
+ bool setPath(SkArenaAlloc* alloc, SkScalerContext* scalerContext);
+ bool setPath(SkArenaAlloc* alloc, const SkPath* path, bool hairline);
+
+ // Returns true if that path has been set.
+ bool setPathHasBeenCalled() const { return fPathData != nullptr; }
+
+ // Return a pointer to the path if it exists, otherwise return nullptr. Only works if the
+ // path was previously set.
+ const SkPath* path() const;
+ bool pathIsHairline() const;
+
+ bool setDrawable(SkArenaAlloc* alloc, SkScalerContext* scalerContext);
+ bool setDrawable(SkArenaAlloc* alloc, sk_sp<SkDrawable> drawable);
+ bool setDrawableHasBeenCalled() const { return fDrawableData != nullptr; }
+ SkDrawable* drawable() const;
+
+ // Format
+ bool isColor() const { return fMaskFormat == SkMask::kARGB32_Format; }
+ SkMask::Format maskFormat() const { return fMaskFormat; }
+ size_t formatAlignment() const;
+
+ // Bounds
+ int maxDimension() const { return std::max(fWidth, fHeight); }
+ SkIRect iRect() const { return SkIRect::MakeXYWH(fLeft, fTop, fWidth, fHeight); }
+ SkRect rect() const { return SkRect::MakeXYWH(fLeft, fTop, fWidth, fHeight); }
+ SkGlyphRect glyphRect() const {
+ return SkGlyphRect(fLeft, fTop, fLeft + fWidth, fTop + fHeight);
+ }
+ int left() const { return fLeft; }
+ int top() const { return fTop; }
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+ bool isEmpty() const {
+ // fHeight == 0 -> fWidth == 0;
+ SkASSERT(fHeight != 0 || fWidth == 0);
+ return fWidth == 0;
+ }
+ bool imageTooLarge() const { return fWidth >= kMaxGlyphWidth; }
+
+ // Make sure that the intercept information is on the glyph and return it, or return it if it
+ // already exists.
+ // * bounds - either end of the gap for the character.
+ // * scale, xPos - information about how wide the gap is.
+ // * array - accumulated gaps for many characters if not null.
+ // * count - the number of gaps.
+ void ensureIntercepts(const SkScalar bounds[2], SkScalar scale, SkScalar xPos,
+ SkScalar* array, int* count, SkArenaAlloc* alloc);
+
+ // Deprecated. Do not use. The last use is in SkChromeRemoteCache, and will be deleted soon.
+ void setImage(void* image) { fImage = image; }
+
+ // Serialize/deserialize functions.
+ // Flatten the metrics portions, but no drawing data.
+ void flattenMetrics(SkWriteBuffer&) const;
+
+ // Flatten just the the mask data.
+ void flattenImage(SkWriteBuffer&) const;
+
+ // Read the image data, store it in the alloc, and add it to the glyph.
+ size_t addImageFromBuffer(SkReadBuffer&, SkArenaAlloc*);
+
+ // Flatten just the the path data.
+ void flattenPath(SkWriteBuffer&) const;
+
+ // Read the path data, create the glyph's path data in the alloc, and add it to the glyph.
+ size_t addPathFromBuffer(SkReadBuffer&, SkArenaAlloc*);
+
+ // Flatten just the drawable data.
+ void flattenDrawable(SkWriteBuffer&) const;
+
+ // Read the drawable data, create the glyph's drawable data in the alloc, and add it to the
+ // glyph.
+ size_t addDrawableFromBuffer(SkReadBuffer&, SkArenaAlloc*);
+
+private:
+ // There are two sides to an SkGlyph, the scaler side (things that create glyph data) have
+ // access to all the fields. Scalers are assumed to maintain all the SkGlyph invariants. The
+ // consumer side has a tighter interface.
+ friend class RandomScalerContext;
+ friend class SkScalerContext;
+ friend class SkScalerContextProxy;
+ friend class SkScalerContext_Empty;
+ friend class SkScalerContext_FreeType;
+ friend class SkScalerContext_FreeType_Base;
+ friend class SkScalerContext_CairoFT;
+ friend class SkScalerContext_DW;
+ friend class SkScalerContext_GDI;
+ friend class SkScalerContext_Mac;
+ friend class SkStrikeClientImpl;
+ friend class SkTestScalerContext;
+ friend class SkTestSVGScalerContext;
+ friend class SkUserScalerContext;
+ friend class TestSVGTypeface;
+ friend class TestTypeface;
+ friend class SkGlyphTestPeer;
+
+ inline static constexpr uint16_t kMaxGlyphWidth = 1u << 13u;
+
+ // Support horizontal and vertical skipping strike-through / underlines.
+ // The caller walks the linked list looking for a match. For a horizontal underline,
+ // the fBounds contains the top and bottom of the underline. The fInterval pair contains the
+ // beginning and end of the intersection of the bounds and the glyph's path.
+ // If interval[0] >= interval[1], no intersection was found.
+ struct Intercept {
+ Intercept* fNext;
+ SkScalar fBounds[2]; // for horz underlines, the boundaries in Y
+ SkScalar fInterval[2]; // the outside intersections of the axis and the glyph
+ };
+
+ struct PathData {
+ Intercept* fIntercept{nullptr};
+ SkPath fPath;
+ bool fHasPath{false};
+ // A normal user-path will have patheffects applied to it and eventually become a dev-path.
+ // A dev-path is always a fill-path, except when it is hairline.
+ // The fPath is a dev-path, so sidecar the paths hairline status.
+ // This allows the user to avoid filling paths which should not be filled.
+ bool fHairline{false};
+ };
+
+ struct DrawableData {
+ Intercept* fIntercept{nullptr};
+ sk_sp<SkDrawable> fDrawable;
+ bool fHasDrawable{false};
+ };
+
+ size_t allocImage(SkArenaAlloc* alloc);
+
+ void installImage(void* imageData) {
+ SkASSERT(!this->setImageHasBeenCalled());
+ fImage = imageData;
+ }
+
+ // path == nullptr indicates that there is no path.
+ void installPath(SkArenaAlloc* alloc, const SkPath* path, bool hairline);
+
+ // drawable == nullptr indicates that there is no path.
+ void installDrawable(SkArenaAlloc* alloc, sk_sp<SkDrawable> drawable);
+
+ // The width and height of the glyph mask.
+ uint16_t fWidth = 0,
+ fHeight = 0;
+
+ // The offset from the glyphs origin on the baseline to the top left of the glyph mask.
+ int16_t fTop = 0,
+ fLeft = 0;
+
+ // fImage must remain null if the glyph is empty or if width > kMaxGlyphWidth.
+ void* fImage = nullptr;
+
+ // Path data has tricky state. If the glyph isEmpty, then fPathData should always be nullptr,
+ // else if fPathData is not null, then a path has been requested. The fPath field of fPathData
+ // may still be null after the request meaning that there is no path for this glyph.
+ PathData* fPathData = nullptr;
+ DrawableData* fDrawableData = nullptr;
+
+ // The advance for this glyph.
+ float fAdvanceX = 0,
+ fAdvanceY = 0;
+
+ SkMask::Format fMaskFormat{SkMask::kBW_Format};
+
+ // Used by the SkScalerContext to pass state from generateMetrics to generateImage.
+ // Usually specifies which glyph representation was used to generate the metrics.
+ uint16_t fScalerContextBits = 0;
+
+ // An SkGlyph can be created with just a packedID, but generally speaking some glyph factory
+ // needs to actually fill out the glyph before it can be used as part of that system.
+ SkDEBUGCODE(bool fAdvancesBoundsFormatAndInitialPathDone{false};)
+
+ SkPackedGlyphID fID;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkGlyphRunPainter.cpp b/gfx/skia/skia/src/core/SkGlyphRunPainter.cpp
new file mode 100644
index 0000000000..48755aaff4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGlyphRunPainter.cpp
@@ -0,0 +1,366 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkGlyphRunPainter.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkPathEffect.h"
+#include "include/private/base/SkTDArray.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkEnumerate.h"
+#include "src/core/SkFontPriv.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkScalerContext.h"
+#include "src/core/SkStrike.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkStrikeSpec.h"
+#include "src/text/GlyphRun.h"
+
+using namespace skglyph;
+using namespace sktext;
+
+namespace {
+SkScalerContextFlags compute_scaler_context_flags(const SkColorSpace* cs) {
+ // If we're doing linear blending, then we can disable the gamma hacks.
+ // Otherwise, leave them on. In either case, we still want the contrast boost:
+ // TODO: Can we be even smarter about mask gamma based on the dest transfer function?
+ if (cs && cs->gammaIsLinear()) {
+ return SkScalerContextFlags::kBoostContrast;
+ } else {
+ return SkScalerContextFlags::kFakeGammaAndBoostContrast;
+ }
+}
+
+// TODO: collect this up into a single class when all the details are worked out.
+// This is duplicate code. The original is in SubRunContainer.cpp.
+std::tuple<SkZip<const SkGlyph*, SkPoint>, SkZip<SkGlyphID, SkPoint>>
+prepare_for_path_drawing(SkStrike* strike,
+ SkZip<const SkGlyphID, const SkPoint> source,
+ SkZip<const SkGlyph*, SkPoint> acceptedBuffer,
+ SkZip<SkGlyphID, SkPoint> rejectedBuffer) {
+ int acceptedSize = 0;
+ int rejectedSize = 0;
+ strike->lock();
+ for (auto [glyphID, pos] : source) {
+ if (!SkScalarsAreFinite(pos.x(), pos.y())) {
+ continue;
+ }
+ const SkPackedGlyphID packedID{glyphID};
+ switch (SkGlyphDigest digest = strike->digestFor(kPath, packedID);
+ digest.actionFor(kPath)) {
+ case GlyphAction::kAccept:
+ acceptedBuffer[acceptedSize++] = std::make_tuple(strike->glyph(digest), pos);
+ break;
+ case GlyphAction::kReject:
+ rejectedBuffer[rejectedSize++] = std::make_tuple(glyphID, pos);
+ break;
+ default:
+ break;
+ }
+ }
+ strike->unlock();
+ return {acceptedBuffer.first(acceptedSize), rejectedBuffer.first(rejectedSize)};
+}
+
+// TODO: collect this up into a single class when all the details are worked out.
+// This is duplicate code. The original is in SubRunContainer.cpp.
+std::tuple<SkZip<const SkGlyph*, SkPoint>, SkZip<SkGlyphID, SkPoint>>
+prepare_for_drawable_drawing(SkStrike* strike,
+ SkZip<const SkGlyphID, const SkPoint> source,
+ SkZip<const SkGlyph*, SkPoint> acceptedBuffer,
+ SkZip<SkGlyphID, SkPoint> rejectedBuffer) {
+ int acceptedSize = 0;
+ int rejectedSize = 0;
+ strike->lock();
+ for (auto [glyphID, pos] : source) {
+ if (!SkScalarsAreFinite(pos.x(), pos.y())) {
+ continue;
+ }
+ const SkPackedGlyphID packedID{glyphID};
+ switch (SkGlyphDigest digest = strike->digestFor(kDrawable, packedID);
+ digest.actionFor(kDrawable)) {
+ case GlyphAction::kAccept:
+ acceptedBuffer[acceptedSize++] = std::make_tuple(strike->glyph(digest), pos);
+ break;
+ case GlyphAction::kReject:
+ rejectedBuffer[rejectedSize++] = std::make_tuple(glyphID, pos);
+ break;
+ default:
+ break;
+ }
+ }
+ strike->unlock();
+ return {acceptedBuffer.first(acceptedSize), rejectedBuffer.first(rejectedSize)};
+}
+
+std::tuple<SkZip<const SkGlyph*, SkPoint>, SkZip<SkGlyphID, SkPoint>>
+prepare_for_direct_mask_drawing(SkStrike* strike,
+ const SkMatrix& creationMatrix,
+ SkZip<const SkGlyphID, const SkPoint> source,
+ SkZip<const SkGlyph*, SkPoint> acceptedBuffer,
+ SkZip<SkGlyphID, SkPoint> rejectedBuffer) {
+ const SkIPoint mask = strike->roundingSpec().ignorePositionFieldMask;
+ const SkPoint halfSampleFreq = strike->roundingSpec().halfAxisSampleFreq;
+
+ // Build up the mapping from source space to device space. Add the rounding constant
+ // halfSampleFreq, so we just need to floor to get the device result.
+ SkMatrix positionMatrixWithRounding = creationMatrix;
+ positionMatrixWithRounding.postTranslate(halfSampleFreq.x(), halfSampleFreq.y());
+
+ int acceptedSize = 0;
+ int rejectedSize = 0;
+ strike->lock();
+ for (auto [glyphID, pos] : source) {
+ if (!SkScalarsAreFinite(pos.x(), pos.y())) {
+ continue;
+ }
+
+ const SkPoint mappedPos = positionMatrixWithRounding.mapPoint(pos);
+ const SkPackedGlyphID packedGlyphID = SkPackedGlyphID{glyphID, mappedPos, mask};
+ switch (SkGlyphDigest digest = strike->digestFor(kDirectMaskCPU, packedGlyphID);
+ digest.actionFor(kDirectMaskCPU)) {
+ case GlyphAction::kAccept: {
+ const SkPoint roundedPos{SkScalarFloorToScalar(mappedPos.x()),
+ SkScalarFloorToScalar(mappedPos.y())};
+ acceptedBuffer[acceptedSize++] =
+ std::make_tuple(strike->glyph(digest), roundedPos);
+ break;
+ }
+ case GlyphAction::kReject:
+ rejectedBuffer[rejectedSize++] = std::make_tuple(glyphID, pos);
+ break;
+ default:
+ break;
+ }
+ }
+ strike->unlock();
+
+ return {acceptedBuffer.first(acceptedSize), rejectedBuffer.first(rejectedSize)};
+}
+} // namespace
+
+// -- SkGlyphRunListPainterCPU ---------------------------------------------------------------------
+SkGlyphRunListPainterCPU::SkGlyphRunListPainterCPU(const SkSurfaceProps& props,
+ SkColorType colorType,
+ SkColorSpace* cs)
+ : fDeviceProps{props}
+ , fBitmapFallbackProps{SkSurfaceProps{props.flags(), kUnknown_SkPixelGeometry}}
+ , fColorType{colorType}
+ , fScalerContextFlags{compute_scaler_context_flags(cs)} {}
+
+void SkGlyphRunListPainterCPU::drawForBitmapDevice(SkCanvas* canvas,
+ const BitmapDevicePainter* bitmapDevice,
+ const sktext::GlyphRunList& glyphRunList,
+ const SkPaint& paint,
+ const SkMatrix& drawMatrix) {
+ SkSTArray<64, const SkGlyph*> acceptedPackedGlyphIDs;
+ SkSTArray<64, SkPoint> acceptedPositions;
+ SkSTArray<64, SkGlyphID> rejectedGlyphIDs;
+ SkSTArray<64, SkPoint> rejectedPositions;
+ const int maxGlyphRunSize = glyphRunList.maxGlyphRunSize();
+ acceptedPackedGlyphIDs.resize(maxGlyphRunSize);
+ acceptedPositions.resize(maxGlyphRunSize);
+ const auto acceptedBuffer = SkMakeZip(acceptedPackedGlyphIDs, acceptedPositions);
+ rejectedGlyphIDs.resize(maxGlyphRunSize);
+ rejectedPositions.resize(maxGlyphRunSize);
+ const auto rejectedBuffer = SkMakeZip(rejectedGlyphIDs, rejectedPositions);
+
+ // The bitmap blitters can only draw lcd text to a N32 bitmap in srcOver. Otherwise,
+ // convert the lcd text into A8 text. The props communicate this to the scaler.
+ auto& props = (kN32_SkColorType == fColorType && paint.isSrcOver())
+ ? fDeviceProps
+ : fBitmapFallbackProps;
+
+ SkPoint drawOrigin = glyphRunList.origin();
+ SkMatrix positionMatrix{drawMatrix};
+ positionMatrix.preTranslate(drawOrigin.x(), drawOrigin.y());
+ for (auto& glyphRun : glyphRunList) {
+ const SkFont& runFont = glyphRun.font();
+
+ SkZip<const SkGlyphID, const SkPoint> source = glyphRun.source();
+
+ if (SkStrikeSpec::ShouldDrawAsPath(paint, runFont, positionMatrix)) {
+ auto [strikeSpec, strikeToSourceScale] =
+ SkStrikeSpec::MakePath(runFont, paint, props, fScalerContextFlags);
+
+ auto strike = strikeSpec.findOrCreateStrike();
+
+ {
+ auto [accepted, rejected] = prepare_for_path_drawing(strike.get(),
+ source,
+ acceptedBuffer,
+ rejectedBuffer);
+
+ source = rejected;
+ // The paint we draw paths with must have the same anti-aliasing state as the
+ // runFont allowing the paths to have the same edging as the glyph masks.
+ SkPaint pathPaint = paint;
+ pathPaint.setAntiAlias(runFont.hasSomeAntiAliasing());
+
+ const bool stroking = pathPaint.getStyle() != SkPaint::kFill_Style;
+ const bool hairline = pathPaint.getStrokeWidth() == 0;
+ const bool needsExactCTM = pathPaint.getShader() ||
+ pathPaint.getPathEffect() ||
+ pathPaint.getMaskFilter() ||
+ (stroking && !hairline);
+
+ if (!needsExactCTM) {
+ for (auto [glyph, pos] : accepted) {
+ const SkPath* path = glyph->path();
+ SkMatrix m;
+ SkPoint translate = drawOrigin + pos;
+ m.setScaleTranslate(strikeToSourceScale, strikeToSourceScale,
+ translate.x(), translate.y());
+ SkAutoCanvasRestore acr(canvas, true);
+ canvas->concat(m);
+ canvas->drawPath(*path, pathPaint);
+ }
+ } else {
+ for (auto [glyph, pos] : accepted) {
+ const SkPath* path = glyph->path();
+ SkMatrix m;
+ SkPoint translate = drawOrigin + pos;
+ m.setScaleTranslate(strikeToSourceScale, strikeToSourceScale,
+ translate.x(), translate.y());
+
+ SkPath deviceOutline;
+ path->transform(m, &deviceOutline);
+ deviceOutline.setIsVolatile(true);
+ canvas->drawPath(deviceOutline, pathPaint);
+ }
+ }
+ }
+
+ if (!source.empty()) {
+ auto [accepted, rejected] = prepare_for_drawable_drawing(strike.get(),
+ source,
+ acceptedBuffer,
+ rejectedBuffer);
+ source = rejected;
+
+ for (auto [glyph, pos] : accepted) {
+ SkDrawable* drawable = glyph->drawable();
+ SkMatrix m;
+ SkPoint translate = drawOrigin + pos;
+ m.setScaleTranslate(strikeToSourceScale, strikeToSourceScale,
+ translate.x(), translate.y());
+ SkAutoCanvasRestore acr(canvas, false);
+ SkRect drawableBounds = drawable->getBounds();
+ m.mapRect(&drawableBounds);
+ canvas->saveLayer(&drawableBounds, &paint);
+ drawable->draw(canvas, &m);
+ }
+ }
+ }
+ if (!source.empty() && !positionMatrix.hasPerspective()) {
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakeMask(
+ runFont, paint, props, fScalerContextFlags, positionMatrix);
+
+ auto strike = strikeSpec.findOrCreateStrike();
+
+ auto [accepted, rejected] = prepare_for_direct_mask_drawing(strike.get(),
+ positionMatrix,
+ source,
+ acceptedBuffer,
+ rejectedBuffer);
+ source = rejected;
+ bitmapDevice->paintMasks(accepted, paint);
+ }
+ if (!source.empty()) {
+ std::vector<SkPoint> sourcePositions;
+
+ // Create a strike is source space to calculate scale information.
+ SkStrikeSpec scaleStrikeSpec = SkStrikeSpec::MakeMask(
+ runFont, paint, props, fScalerContextFlags, SkMatrix::I());
+ SkBulkGlyphMetrics metrics{scaleStrikeSpec};
+
+ auto glyphIDs = source.get<0>();
+ auto positions = source.get<1>();
+ SkSpan<const SkGlyph*> glyphs = metrics.glyphs(glyphIDs);
+ SkScalar maxScale = SK_ScalarMin;
+
+ // Calculate the scale that makes the longest edge 1:1 with its side in the cache.
+ for (auto [glyph, pos] : SkMakeZip(glyphs, positions)) {
+ SkPoint corners[4];
+ SkPoint srcPos = pos + drawOrigin;
+ // Store off the positions in device space to position the glyphs during drawing.
+ sourcePositions.push_back(srcPos);
+ SkRect rect = glyph->rect();
+ rect.makeOffset(srcPos);
+ positionMatrix.mapRectToQuad(corners, rect);
+ // left top -> right top
+ SkScalar scale = (corners[1] - corners[0]).length() / rect.width();
+ maxScale = std::max(maxScale, scale);
+ // right top -> right bottom
+ scale = (corners[2] - corners[1]).length() / rect.height();
+ maxScale = std::max(maxScale, scale);
+ // right bottom -> left bottom
+ scale = (corners[3] - corners[2]).length() / rect.width();
+ maxScale = std::max(maxScale, scale);
+ // left bottom -> left top
+ scale = (corners[0] - corners[3]).length() / rect.height();
+ maxScale = std::max(maxScale, scale);
+ }
+
+ if (maxScale <= 0) {
+ continue; // to the next run.
+ }
+
+ if (maxScale * runFont.getSize() > 256) {
+ maxScale = 256.0f / runFont.getSize();
+ }
+
+ SkMatrix cacheScale = SkMatrix::Scale(maxScale, maxScale);
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakeMask(
+ runFont, paint, props, fScalerContextFlags, cacheScale);
+
+ auto strike = strikeSpec.findOrCreateStrike();
+
+ auto [accepted, rejected] = prepare_for_direct_mask_drawing(strike.get(),
+ positionMatrix,
+ source,
+ acceptedBuffer,
+ rejectedBuffer);
+ const SkScalar invMaxScale = 1.0f/maxScale;
+ for (auto [glyph, srcPos] : SkMakeZip(accepted.get<0>(), sourcePositions)) {
+ SkMask mask = glyph->mask();
+ // TODO: is this needed will A8 and BW just work?
+ if (mask.fFormat != SkMask::kARGB32_Format) {
+ continue;
+ }
+ SkBitmap bm;
+ bm.installPixels(SkImageInfo::MakeN32Premul(mask.fBounds.size()),
+ mask.fImage,
+ mask.fRowBytes);
+
+ // Since the glyph in the cache is scaled by maxScale, its top left vector is too
+ // long. Reduce it to find proper positions on the device.
+ SkPoint realPos =
+ srcPos + SkPoint::Make(mask.fBounds.left(), mask.fBounds.top())*invMaxScale;
+
+ // Calculate the preConcat matrix for drawBitmap to get the rectangle from the
+ // glyph cache (which is multiplied by maxScale) to land in the right place.
+ SkMatrix translate = SkMatrix::Translate(realPos);
+ translate.preScale(invMaxScale, invMaxScale);
+
+ // Draw the bitmap using the rect from the scaled cache, and not the source
+ // rectangle for the glyph.
+ bitmapDevice->drawBitmap(
+ bm, translate, nullptr, SkSamplingOptions{SkFilterMode::kLinear},
+ paint);
+ }
+ }
+
+ // TODO: have the mask stage above reject the glyphs that are too big, and handle the
+ // rejects in a more sophisticated stage.
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkGlyphRunPainter.h b/gfx/skia/skia/src/core/SkGlyphRunPainter.h
new file mode 100644
index 0000000000..8519770509
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGlyphRunPainter.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGlyphRunPainter_DEFINED
+#define SkGlyphRunPainter_DEFINED
+
+#include "include/core/SkColorType.h"
+#include "include/core/SkSurfaceProps.h"
+#include "src/base/SkZip.h"
+#include "src/core/SkScalerContext.h"
+
+class SkColorSpace;
+class SkDrawableGlyphBuffer;
+namespace sktext { class GlyphRunList; }
+
+// -- SkGlyphRunListPainterCPU ---------------------------------------------------------------------
+class SkGlyphRunListPainterCPU {
+public:
+ class BitmapDevicePainter {
+ public:
+ BitmapDevicePainter() = default;
+ BitmapDevicePainter(const BitmapDevicePainter&) = default;
+ virtual ~BitmapDevicePainter() = default;
+
+ virtual void paintMasks(SkZip<const SkGlyph*, SkPoint> accepted,
+ const SkPaint& paint) const = 0;
+ virtual void drawBitmap(const SkBitmap&, const SkMatrix&, const SkRect* dstOrNull,
+ const SkSamplingOptions&, const SkPaint&) const = 0;
+ };
+
+ SkGlyphRunListPainterCPU(const SkSurfaceProps& props,
+ SkColorType colorType,
+ SkColorSpace* cs);
+
+ void drawForBitmapDevice(
+ SkCanvas* canvas, const BitmapDevicePainter* bitmapDevice,
+ const sktext::GlyphRunList& glyphRunList, const SkPaint& paint,
+ const SkMatrix& drawMatrix);
+private:
+ // The props as on the actual device.
+ const SkSurfaceProps fDeviceProps;
+
+ // The props for when the bitmap device can't draw LCD text.
+ const SkSurfaceProps fBitmapFallbackProps;
+ const SkColorType fColorType;
+ const SkScalerContextFlags fScalerContextFlags;
+};
+#endif // SkGlyphRunPainter_DEFINED
diff --git a/gfx/skia/skia/src/core/SkGpuBlurUtils.cpp b/gfx/skia/skia/src/core/SkGpuBlurUtils.cpp
new file mode 100644
index 0000000000..9f18c115f8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGpuBlurUtils.cpp
@@ -0,0 +1,1039 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkGpuBlurUtils.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkRect.h"
+#include "src/base/SkMathPriv.h"
+
+#if defined(SK_GANESH)
+#include "include/core/SkColorSpace.h"
+#include "include/gpu/GrRecordingContext.h"
+#include "src/gpu/ganesh/GrCaps.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/SkGr.h"
+#include "src/gpu/ganesh/effects/GrGaussianConvolutionFragmentProcessor.h"
+#include "src/gpu/ganesh/effects/GrMatrixConvolutionEffect.h"
+#include "src/gpu/ganesh/effects/GrTextureEffect.h"
+
+#include "src/gpu/ganesh/SurfaceDrawContext.h"
+
+using Direction = GrGaussianConvolutionFragmentProcessor::Direction;
+
+static void fill_in_2D_gaussian_kernel(
+ float* kernel, int width, int height, SkScalar sigmaX, SkScalar sigmaY) {
+ const float twoSigmaSqrdX = 2.0f * SkScalarToFloat(SkScalarSquare(sigmaX));
+ const float twoSigmaSqrdY = 2.0f * SkScalarToFloat(SkScalarSquare(sigmaY));
+
+ // SkGpuBlurUtils::GaussianBlur() should have detected the cases where a 2D blur
+ // degenerates to a 1D on X or Y, or to the identity.
+ SkASSERT(!SkGpuBlurUtils::IsEffectivelyZeroSigma(sigmaX) &&
+ !SkGpuBlurUtils::IsEffectivelyZeroSigma(sigmaY));
+ SkASSERT(!SkScalarNearlyZero(twoSigmaSqrdX) && !SkScalarNearlyZero(twoSigmaSqrdY));
+
+ const float sigmaXDenom = 1.0f / twoSigmaSqrdX;
+ const float sigmaYDenom = 1.0f / twoSigmaSqrdY;
+ const int xRadius = width / 2;
+ const int yRadius = height / 2;
+
+ float sum = 0.0f;
+ for (int x = 0; x < width; x++) {
+ float xTerm = static_cast<float>(x - xRadius);
+ xTerm = xTerm * xTerm * sigmaXDenom;
+ for (int y = 0; y < height; y++) {
+ float yTerm = static_cast<float>(y - yRadius);
+ float xyTerm = sk_float_exp(-(xTerm + yTerm * yTerm * sigmaYDenom));
+ // Note that the constant term (1/(sqrt(2*pi*sigma^2)) of the Gaussian
+ // is dropped here, since we renormalize the kernel below.
+ kernel[y * width + x] = xyTerm;
+ sum += xyTerm;
+ }
+ }
+ // Normalize the kernel
+ float scale = 1.0f / sum;
+ for (int i = 0; i < width * height; ++i) {
+ kernel[i] *= scale;
+ }
+}
+
+/**
+ * Draws 'dstRect' into 'surfaceFillContext' evaluating a 1D Gaussian over 'srcView'. The src rect
+ * is 'dstRect' offset by 'dstToSrcOffset'. 'mode' and 'bounds' are applied to the src coords.
+ */
+static void convolve_gaussian_1d(skgpu::ganesh::SurfaceFillContext* sfc,
+ GrSurfaceProxyView srcView,
+ const SkIRect srcSubset,
+ SkIVector dstToSrcOffset,
+ const SkIRect& dstRect,
+ SkAlphaType srcAlphaType,
+ Direction direction,
+ int radius,
+ float sigma,
+ SkTileMode mode) {
+ SkASSERT(radius && !SkGpuBlurUtils::IsEffectivelyZeroSigma(sigma));
+ auto wm = SkTileModeToWrapMode(mode);
+ auto srcRect = dstRect.makeOffset(dstToSrcOffset);
+ // NOTE: This could just be GrMatrixConvolutionEffect with one of the dimensions set to 1
+ // and the appropriate kernel already computed, but there's value in keeping the shader simpler.
+ // TODO(michaelludwig): Is this true? If not, is the shader key simplicity worth it two have
+ // two convolution effects?
+ std::unique_ptr<GrFragmentProcessor> conv =
+ GrGaussianConvolutionFragmentProcessor::Make(std::move(srcView),
+ srcAlphaType,
+ direction,
+ radius,
+ sigma,
+ wm,
+ srcSubset,
+ &srcRect,
+ *sfc->caps());
+ sfc->fillRectToRectWithFP(srcRect, dstRect, std::move(conv));
+}
+
+static std::unique_ptr<skgpu::ganesh::SurfaceDrawContext> convolve_gaussian_2d(
+ GrRecordingContext* rContext,
+ GrSurfaceProxyView srcView,
+ GrColorType srcColorType,
+ const SkIRect& srcBounds,
+ const SkIRect& dstBounds,
+ int radiusX,
+ int radiusY,
+ SkScalar sigmaX,
+ SkScalar sigmaY,
+ SkTileMode mode,
+ sk_sp<SkColorSpace> finalCS,
+ SkBackingFit dstFit) {
+ SkASSERT(radiusX && radiusY);
+ SkASSERT(!SkGpuBlurUtils::IsEffectivelyZeroSigma(sigmaX) &&
+ !SkGpuBlurUtils::IsEffectivelyZeroSigma(sigmaY));
+ // Create the sdc with default SkSurfaceProps. Gaussian blurs will soon use a
+ // SurfaceFillContext, at which point the SkSurfaceProps won't exist anymore.
+ auto sdc = skgpu::ganesh::SurfaceDrawContext::Make(
+ rContext,
+ srcColorType,
+ std::move(finalCS),
+ dstFit,
+ dstBounds.size(),
+ SkSurfaceProps(),
+ /*label=*/"SurfaceDrawContext_ConvolveGaussian2d",
+ 1,
+ GrMipmapped::kNo,
+ srcView.proxy()->isProtected(),
+ srcView.origin());
+ if (!sdc) {
+ return nullptr;
+ }
+
+ SkISize size = SkISize::Make(SkGpuBlurUtils::KernelWidth(radiusX),
+ SkGpuBlurUtils::KernelWidth(radiusY));
+ SkIPoint kernelOffset = SkIPoint::Make(radiusX, radiusY);
+ GrPaint paint;
+ auto wm = SkTileModeToWrapMode(mode);
+
+ // GaussianBlur() should have downsampled the request until we can handle the 2D blur with
+ // just a uniform array.
+ SkASSERT(size.area() <= GrMatrixConvolutionEffect::kMaxUniformSize);
+ float kernel[GrMatrixConvolutionEffect::kMaxUniformSize];
+ fill_in_2D_gaussian_kernel(kernel, size.width(), size.height(), sigmaX, sigmaY);
+ auto conv = GrMatrixConvolutionEffect::Make(rContext,
+ std::move(srcView),
+ srcBounds,
+ size,
+ kernel,
+ 1.0f,
+ 0.0f,
+ kernelOffset,
+ wm,
+ true,
+ *sdc->caps());
+
+ paint.setColorFragmentProcessor(std::move(conv));
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ // 'dstBounds' is actually in 'srcView' proxy space. It represents the blurred area from src
+ // space that we want to capture in the new RTC at {0, 0}. Hence, we use its size as the rect to
+ // draw and it directly as the local rect.
+ sdc->fillRectToRect(nullptr,
+ std::move(paint),
+ GrAA::kNo,
+ SkMatrix::I(),
+ SkRect::Make(dstBounds.size()),
+ SkRect::Make(dstBounds));
+
+ return sdc;
+}
+
+static std::unique_ptr<skgpu::ganesh::SurfaceDrawContext> convolve_gaussian(
+ GrRecordingContext* rContext,
+ GrSurfaceProxyView srcView,
+ GrColorType srcColorType,
+ SkAlphaType srcAlphaType,
+ SkIRect srcBounds,
+ SkIRect dstBounds,
+ Direction direction,
+ int radius,
+ float sigma,
+ SkTileMode mode,
+ sk_sp<SkColorSpace> finalCS,
+ SkBackingFit fit) {
+ using namespace SkGpuBlurUtils;
+ SkASSERT(radius > 0 && !SkGpuBlurUtils::IsEffectivelyZeroSigma(sigma));
+ // Logically we're creating an infinite blur of 'srcBounds' of 'srcView' with 'mode' tiling
+ // and then capturing the 'dstBounds' portion in a new RTC where the top left of 'dstBounds' is
+ // at {0, 0} in the new RTC.
+ //
+ // Create the sdc with default SkSurfaceProps. Gaussian blurs will soon use a
+ // SurfaceFillContext, at which point the SkSurfaceProps won't exist anymore.
+ auto dstSDC =
+ skgpu::ganesh::SurfaceDrawContext::Make(rContext,
+ srcColorType,
+ std::move(finalCS),
+ fit,
+ dstBounds.size(),
+ SkSurfaceProps(),
+ /*label=*/"SurfaceDrawContext_ConvolveGaussian",
+ 1,
+ GrMipmapped::kNo,
+ srcView.proxy()->isProtected(),
+ srcView.origin());
+ if (!dstSDC) {
+ return nullptr;
+ }
+ // This represents the translation from 'dstSurfaceDrawContext' coords to 'srcView' coords.
+ auto rtcToSrcOffset = dstBounds.topLeft();
+
+ auto srcBackingBounds = SkIRect::MakeSize(srcView.proxy()->backingStoreDimensions());
+ // We've implemented splitting the dst bounds up into areas that do and do not need to
+ // use shader based tiling but only for some modes...
+ bool canSplit = mode == SkTileMode::kDecal || mode == SkTileMode::kClamp;
+ // ...but it's not worth doing the splitting if we'll get HW tiling instead of shader tiling.
+ bool canHWTile =
+ srcBounds.contains(srcBackingBounds) &&
+ !rContext->priv().caps()->reducedShaderMode() && // this mode always uses shader tiling
+ !(mode == SkTileMode::kDecal && !rContext->priv().caps()->clampToBorderSupport());
+ if (!canSplit || canHWTile) {
+ auto dstRect = SkIRect::MakeSize(dstBounds.size());
+ convolve_gaussian_1d(dstSDC.get(),
+ std::move(srcView),
+ srcBounds,
+ rtcToSrcOffset,
+ dstRect,
+ srcAlphaType,
+ direction,
+ radius,
+ sigma,
+ mode);
+ return dstSDC;
+ }
+
+ // 'left' and 'right' are the sub rects of 'srcBounds' where 'mode' must be enforced.
+ // 'mid' is the area where we can ignore the mode because the kernel does not reach to the
+ // edge of 'srcBounds'.
+ SkIRect mid, left, right;
+ // 'top' and 'bottom' are areas of 'dstBounds' that are entirely above/below 'srcBounds'.
+ // These are areas that we can simply clear in the dst in kDecal mode. If 'srcBounds'
+ // straddles the top edge of 'dstBounds' then 'top' will be inverted and we will skip
+ // processing for the rect. Similar for 'bottom'. The positional/directional labels above refer
+ // to the Direction::kX case and one should think of these as 'left' and 'right' for
+ // Direction::kY.
+ SkIRect top, bottom;
+ if (Direction::kX == direction) {
+ top = {dstBounds.left(), dstBounds.top(), dstBounds.right(), srcBounds.top()};
+ bottom = {dstBounds.left(), srcBounds.bottom(), dstBounds.right(), dstBounds.bottom()};
+
+ // Inset for sub-rect of 'srcBounds' where the x-dir kernel doesn't reach the edges, clipped
+ // vertically to dstBounds.
+ int midA = std::max(srcBounds.top(), dstBounds.top());
+ int midB = std::min(srcBounds.bottom(), dstBounds.bottom());
+ mid = {srcBounds.left() + radius, midA, srcBounds.right() - radius, midB};
+ if (mid.isEmpty()) {
+ // There is no middle where the bounds can be ignored. Make the left span the whole
+ // width of dst and we will not draw mid or right.
+ left = {dstBounds.left(), mid.top(), dstBounds.right(), mid.bottom()};
+ } else {
+ left = {dstBounds.left(), mid.top(), mid.left(), mid.bottom()};
+ right = {mid.right(), mid.top(), dstBounds.right(), mid.bottom()};
+ }
+ } else {
+ // This is the same as the x direction code if you turn your head 90 degrees CCW. Swap x and
+ // y and swap top/bottom with left/right.
+ top = {dstBounds.left(), dstBounds.top(), srcBounds.left(), dstBounds.bottom()};
+ bottom = {srcBounds.right(), dstBounds.top(), dstBounds.right(), dstBounds.bottom()};
+
+ int midA = std::max(srcBounds.left(), dstBounds.left());
+ int midB = std::min(srcBounds.right(), dstBounds.right());
+ mid = {midA, srcBounds.top() + radius, midB, srcBounds.bottom() - radius};
+
+ if (mid.isEmpty()) {
+ left = {mid.left(), dstBounds.top(), mid.right(), dstBounds.bottom()};
+ } else {
+ left = {mid.left(), dstBounds.top(), mid.right(), mid.top()};
+ right = {mid.left(), mid.bottom(), mid.right(), dstBounds.bottom()};
+ }
+ }
+
+ auto convolve = [&](SkIRect rect) {
+ // Transform rect into the render target's coord system.
+ rect.offset(-rtcToSrcOffset);
+ convolve_gaussian_1d(dstSDC.get(),
+ srcView,
+ srcBounds,
+ rtcToSrcOffset,
+ rect,
+ srcAlphaType,
+ direction,
+ radius,
+ sigma,
+ mode);
+ };
+ auto clear = [&](SkIRect rect) {
+ // Transform rect into the render target's coord system.
+ rect.offset(-rtcToSrcOffset);
+ dstSDC->clearAtLeast(rect, SK_PMColor4fTRANSPARENT);
+ };
+
+ // Doing mid separately will cause two draws to occur (left and right batch together). At
+ // small sizes of mid it is worse to issue more draws than to just execute the slightly
+ // more complicated shader that implements the tile mode across mid. This threshold is
+ // very arbitrary right now. It is believed that a 21x44 mid on a Moto G4 is a significant
+ // regression compared to doing one draw but it has not been locally evaluated or tuned.
+ // The optimal cutoff is likely to vary by GPU.
+ if (!mid.isEmpty() && mid.width() * mid.height() < 256 * 256) {
+ left.join(mid);
+ left.join(right);
+ mid = SkIRect::MakeEmpty();
+ right = SkIRect::MakeEmpty();
+ // It's unknown whether for kDecal it'd be better to expand the draw rather than a draw and
+ // up to two clears.
+ if (mode == SkTileMode::kClamp) {
+ left.join(top);
+ left.join(bottom);
+ top = SkIRect::MakeEmpty();
+ bottom = SkIRect::MakeEmpty();
+ }
+ }
+
+ if (!top.isEmpty()) {
+ if (mode == SkTileMode::kDecal) {
+ clear(top);
+ } else {
+ convolve(top);
+ }
+ }
+
+ if (!bottom.isEmpty()) {
+ if (mode == SkTileMode::kDecal) {
+ clear(bottom);
+ } else {
+ convolve(bottom);
+ }
+ }
+
+ if (mid.isEmpty()) {
+ convolve(left);
+ } else {
+ convolve(left);
+ convolve(right);
+ convolve(mid);
+ }
+ return dstSDC;
+}
+
+// Expand the contents of 'src' to fit in 'dstSize'. At this point, we are expanding an intermediate
+// image, so there's no need to account for a proxy offset from the original input.
+static std::unique_ptr<skgpu::ganesh::SurfaceDrawContext> reexpand(
+ GrRecordingContext* rContext,
+ std::unique_ptr<skgpu::ganesh::SurfaceContext> src,
+ const SkRect& srcBounds,
+ SkISize dstSize,
+ sk_sp<SkColorSpace> colorSpace,
+ SkBackingFit fit) {
+ GrSurfaceProxyView srcView = src->readSurfaceView();
+ if (!srcView.asTextureProxy()) {
+ return nullptr;
+ }
+
+ GrColorType srcColorType = src->colorInfo().colorType();
+ SkAlphaType srcAlphaType = src->colorInfo().alphaType();
+
+ src.reset(); // no longer needed
+
+ // Create the sdc with default SkSurfaceProps. Gaussian blurs will soon use a
+ // SurfaceFillContext, at which point the SkSurfaceProps won't exist anymore.
+ auto dstSDC = skgpu::ganesh::SurfaceDrawContext::Make(rContext,
+ srcColorType,
+ std::move(colorSpace),
+ fit,
+ dstSize,
+ SkSurfaceProps(),
+ /*label=*/"SurfaceDrawContext_Reexpand",
+ 1,
+ GrMipmapped::kNo,
+ srcView.proxy()->isProtected(),
+ srcView.origin());
+ if (!dstSDC) {
+ return nullptr;
+ }
+
+ GrPaint paint;
+ auto fp = GrTextureEffect::MakeSubset(std::move(srcView),
+ srcAlphaType,
+ SkMatrix::I(),
+ GrSamplerState::Filter::kLinear,
+ srcBounds,
+ srcBounds,
+ *rContext->priv().caps());
+ paint.setColorFragmentProcessor(std::move(fp));
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ dstSDC->fillRectToRect(
+ nullptr, std::move(paint), GrAA::kNo, SkMatrix::I(), SkRect::Make(dstSize), srcBounds);
+
+ return dstSDC;
+}
+
+static std::unique_ptr<skgpu::ganesh::SurfaceDrawContext> two_pass_gaussian(
+ GrRecordingContext* rContext,
+ GrSurfaceProxyView srcView,
+ GrColorType srcColorType,
+ SkAlphaType srcAlphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ SkIRect srcBounds,
+ SkIRect dstBounds,
+ float sigmaX,
+ float sigmaY,
+ int radiusX,
+ int radiusY,
+ SkTileMode mode,
+ SkBackingFit fit) {
+ SkASSERT(radiusX || radiusY);
+ std::unique_ptr<skgpu::ganesh::SurfaceDrawContext> dstSDC;
+ if (radiusX > 0) {
+ SkBackingFit xFit = radiusY > 0 ? SkBackingFit::kApprox : fit;
+ // Expand the dstBounds vertically to produce necessary content for the y-pass. Then we will
+ // clip these in a tile-mode dependent way to ensure the tile-mode gets implemented
+ // correctly. However, if we're not going to do a y-pass then we must use the original
+ // dstBounds without clipping to produce the correct output size.
+ SkIRect xPassDstBounds = dstBounds;
+ if (radiusY) {
+ xPassDstBounds.outset(0, radiusY);
+ if (mode == SkTileMode::kRepeat || mode == SkTileMode::kMirror) {
+ int srcH = srcBounds.height();
+ int srcTop = srcBounds.top();
+ if (mode == SkTileMode::kMirror) {
+ srcTop -= srcH;
+ srcH *= 2;
+ }
+
+ float floatH = srcH;
+ // First row above the dst rect where we should restart the tile mode.
+ int n = sk_float_floor2int_no_saturate((xPassDstBounds.top() - srcTop) / floatH);
+ int topClip = srcTop + n * srcH;
+
+ // First row above below the dst rect where we should restart the tile mode.
+ n = sk_float_ceil2int_no_saturate((xPassDstBounds.bottom() - srcBounds.bottom()) /
+ floatH);
+ int bottomClip = srcBounds.bottom() + n * srcH;
+
+ xPassDstBounds.fTop = std::max(xPassDstBounds.top(), topClip);
+ xPassDstBounds.fBottom = std::min(xPassDstBounds.bottom(), bottomClip);
+ } else {
+ if (xPassDstBounds.fBottom <= srcBounds.top()) {
+ if (mode == SkTileMode::kDecal) {
+ return nullptr;
+ }
+ xPassDstBounds.fTop = srcBounds.top();
+ xPassDstBounds.fBottom = xPassDstBounds.fTop + 1;
+ } else if (xPassDstBounds.fTop >= srcBounds.bottom()) {
+ if (mode == SkTileMode::kDecal) {
+ return nullptr;
+ }
+ xPassDstBounds.fBottom = srcBounds.bottom();
+ xPassDstBounds.fTop = xPassDstBounds.fBottom - 1;
+ } else {
+ xPassDstBounds.fTop = std::max(xPassDstBounds.fTop, srcBounds.top());
+ xPassDstBounds.fBottom = std::min(xPassDstBounds.fBottom, srcBounds.bottom());
+ }
+ int leftSrcEdge = srcBounds.fLeft - radiusX;
+ int rightSrcEdge = srcBounds.fRight + radiusX;
+ if (mode == SkTileMode::kClamp) {
+ // In clamp the column just outside the src bounds has the same value as the
+ // column just inside, unlike decal.
+ leftSrcEdge += 1;
+ rightSrcEdge -= 1;
+ }
+ if (xPassDstBounds.fRight <= leftSrcEdge) {
+ if (mode == SkTileMode::kDecal) {
+ return nullptr;
+ }
+ xPassDstBounds.fLeft = xPassDstBounds.fRight - 1;
+ } else {
+ xPassDstBounds.fLeft = std::max(xPassDstBounds.fLeft, leftSrcEdge);
+ }
+ if (xPassDstBounds.fLeft >= rightSrcEdge) {
+ if (mode == SkTileMode::kDecal) {
+ return nullptr;
+ }
+ xPassDstBounds.fRight = xPassDstBounds.fLeft + 1;
+ } else {
+ xPassDstBounds.fRight = std::min(xPassDstBounds.fRight, rightSrcEdge);
+ }
+ }
+ }
+ dstSDC = convolve_gaussian(rContext,
+ std::move(srcView),
+ srcColorType,
+ srcAlphaType,
+ srcBounds,
+ xPassDstBounds,
+ Direction::kX,
+ radiusX,
+ sigmaX,
+ mode,
+ colorSpace,
+ xFit);
+ if (!dstSDC) {
+ return nullptr;
+ }
+ srcView = dstSDC->readSurfaceView();
+ SkIVector newDstBoundsOffset = dstBounds.topLeft() - xPassDstBounds.topLeft();
+ dstBounds = SkIRect::MakeSize(dstBounds.size()).makeOffset(newDstBoundsOffset);
+ srcBounds = SkIRect::MakeSize(xPassDstBounds.size());
+ }
+
+ if (!radiusY) {
+ return dstSDC;
+ }
+
+ return convolve_gaussian(rContext,
+ std::move(srcView),
+ srcColorType,
+ srcAlphaType,
+ srcBounds,
+ dstBounds,
+ Direction::kY,
+ radiusY,
+ sigmaY,
+ mode,
+ colorSpace,
+ fit);
+}
+
+namespace SkGpuBlurUtils {
+
+std::unique_ptr<skgpu::ganesh::SurfaceDrawContext> GaussianBlur(GrRecordingContext* rContext,
+ GrSurfaceProxyView srcView,
+ GrColorType srcColorType,
+ SkAlphaType srcAlphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ SkIRect dstBounds,
+ SkIRect srcBounds,
+ float sigmaX,
+ float sigmaY,
+ SkTileMode mode,
+ SkBackingFit fit) {
+ SkASSERT(rContext);
+ TRACE_EVENT2("skia.gpu", "GaussianBlur", "sigmaX", sigmaX, "sigmaY", sigmaY);
+
+ if (!srcView.asTextureProxy()) {
+ return nullptr;
+ }
+
+ int maxRenderTargetSize = rContext->priv().caps()->maxRenderTargetSize();
+ if (dstBounds.width() > maxRenderTargetSize || dstBounds.height() > maxRenderTargetSize) {
+ return nullptr;
+ }
+
+ int radiusX = SigmaRadius(sigmaX);
+ int radiusY = SigmaRadius(sigmaY);
+ // Attempt to reduce the srcBounds in order to detect that we can set the sigmas to zero or
+ // to reduce the amount of work to rescale the source if sigmas are large. TODO: Could consider
+ // how to minimize the required source bounds for repeat/mirror modes.
+ if (mode == SkTileMode::kClamp || mode == SkTileMode::kDecal) {
+ SkIRect reach = dstBounds.makeOutset(radiusX, radiusY);
+ SkIRect intersection;
+ if (!intersection.intersect(reach, srcBounds)) {
+ if (mode == SkTileMode::kDecal) {
+ return nullptr;
+ } else {
+ if (reach.fLeft >= srcBounds.fRight) {
+ srcBounds.fLeft = srcBounds.fRight - 1;
+ } else if (reach.fRight <= srcBounds.fLeft) {
+ srcBounds.fRight = srcBounds.fLeft + 1;
+ }
+ if (reach.fTop >= srcBounds.fBottom) {
+ srcBounds.fTop = srcBounds.fBottom - 1;
+ } else if (reach.fBottom <= srcBounds.fTop) {
+ srcBounds.fBottom = srcBounds.fTop + 1;
+ }
+ }
+ } else {
+ srcBounds = intersection;
+ }
+ }
+
+ if (mode != SkTileMode::kDecal) {
+ // All non-decal tile modes are equivalent for one pixel width/height src and amount to a
+ // single color value repeated at each column/row. Applying the normalized kernel to that
+ // column/row yields that same color. So no blurring is necessary.
+ if (srcBounds.width() == 1) {
+ sigmaX = 0.f;
+ radiusX = 0;
+ }
+ if (srcBounds.height() == 1) {
+ sigmaY = 0.f;
+ radiusY = 0;
+ }
+ }
+
+ // If we determined that there is no blurring necessary in either direction then just do a
+ // a draw that applies the tile mode.
+ if (!radiusX && !radiusY) {
+ // Create the sdc with default SkSurfaceProps. Gaussian blurs will soon use a
+ // SurfaceFillContext, at which point the SkSurfaceProps won't exist anymore.
+ auto result =
+ skgpu::ganesh::SurfaceDrawContext::Make(rContext,
+ srcColorType,
+ std::move(colorSpace),
+ fit,
+ dstBounds.size(),
+ SkSurfaceProps(),
+ /*label=*/"SurfaceDrawContext_GaussianBlur",
+ 1,
+ GrMipmapped::kNo,
+ srcView.proxy()->isProtected(),
+ srcView.origin());
+ if (!result) {
+ return nullptr;
+ }
+ GrSamplerState sampler(SkTileModeToWrapMode(mode), GrSamplerState::Filter::kNearest);
+ auto fp = GrTextureEffect::MakeSubset(std::move(srcView),
+ srcAlphaType,
+ SkMatrix::I(),
+ sampler,
+ SkRect::Make(srcBounds),
+ SkRect::Make(dstBounds),
+ *rContext->priv().caps());
+ result->fillRectToRectWithFP(dstBounds, SkIRect::MakeSize(dstBounds.size()), std::move(fp));
+ return result;
+ }
+
+ if (sigmaX <= kMaxSigma && sigmaY <= kMaxSigma) {
+ SkASSERT(radiusX <= GrGaussianConvolutionFragmentProcessor::kMaxKernelRadius);
+ SkASSERT(radiusY <= GrGaussianConvolutionFragmentProcessor::kMaxKernelRadius);
+ // For really small blurs (certainly no wider than 5x5 on desktop GPUs) it is faster to just
+ // launch a single non separable kernel vs two launches.
+ const int kernelSize = (2 * radiusX + 1) * (2 * radiusY + 1);
+ if (radiusX > 0 && radiusY > 0 &&
+ kernelSize <= GrMatrixConvolutionEffect::kMaxUniformSize &&
+ !rContext->priv().caps()->reducedShaderMode()) {
+ // Apply the proxy offset to src bounds and offset directly
+ return convolve_gaussian_2d(rContext,
+ std::move(srcView),
+ srcColorType,
+ srcBounds,
+ dstBounds,
+ radiusX,
+ radiusY,
+ sigmaX,
+ sigmaY,
+ mode,
+ std::move(colorSpace),
+ fit);
+ }
+ // This will automatically degenerate into a single pass of X or Y if only one of the
+ // radii are non-zero.
+ return two_pass_gaussian(rContext,
+ std::move(srcView),
+ srcColorType,
+ srcAlphaType,
+ std::move(colorSpace),
+ srcBounds,
+ dstBounds,
+ sigmaX,
+ sigmaY,
+ radiusX,
+ radiusY,
+ mode,
+ fit);
+ }
+
+ GrColorInfo colorInfo(srcColorType, srcAlphaType, colorSpace);
+ auto srcCtx = rContext->priv().makeSC(srcView, colorInfo);
+ SkASSERT(srcCtx);
+
+ float scaleX = sigmaX > kMaxSigma ? kMaxSigma / sigmaX : 1.f;
+ float scaleY = sigmaY > kMaxSigma ? kMaxSigma / sigmaY : 1.f;
+ // We round down here so that when we recalculate sigmas we know they will be below
+ // kMaxSigma (but clamp to 1 do we don't have an empty texture).
+ SkISize rescaledSize = {std::max(sk_float_floor2int(srcBounds.width() * scaleX), 1),
+ std::max(sk_float_floor2int(srcBounds.height() * scaleY), 1)};
+ // Compute the sigmas using the actual scale factors used once we integerized the
+ // rescaledSize.
+ scaleX = static_cast<float>(rescaledSize.width()) / srcBounds.width();
+ scaleY = static_cast<float>(rescaledSize.height()) / srcBounds.height();
+ sigmaX *= scaleX;
+ sigmaY *= scaleY;
+
+ // When we are in clamp mode any artifacts in the edge pixels due to downscaling may be
+ // exacerbated because of the tile mode. The particularly egregious case is when the original
+ // image has transparent black around the edges and the downscaling pulls in some non-zero
+ // values from the interior. Ultimately it'd be better for performance if the calling code could
+ // give us extra context around the blur to account for this. We don't currently have a good way
+ // to communicate this up stack. So we leave a 1 pixel border around the rescaled src bounds.
+ // We populate the top 1 pixel tall row of this border by rescaling the top row of the original
+ // source bounds into it. Because this is only rescaling in x (i.e. rescaling a 1 pixel high
+ // row into a shorter but still 1 pixel high row) we won't read any interior values. And similar
+ // for the other three borders. We'll adjust the source/dest bounds rescaled blur so that this
+ // border of extra pixels is used as the edge pixels for clamp mode but the dest bounds
+ // corresponds only to the pixels inside the border (the normally rescaled pixels inside this
+ // border).
+ // Moreover, if we clamped the rescaled size to 1 column or row then we still have a sigma
+ // that is greater than kMaxSigma. By using a pad and making the src 3 wide/tall instead of
+ // 1 we can recurse again and do another downscale. Since mirror and repeat modes are trivial
+ // for a single col/row we only add padding based on sigma exceeding kMaxSigma for decal.
+ int padX = mode == SkTileMode::kClamp || (mode == SkTileMode::kDecal && sigmaX > kMaxSigma) ? 1
+ : 0;
+ int padY = mode == SkTileMode::kClamp || (mode == SkTileMode::kDecal && sigmaY > kMaxSigma) ? 1
+ : 0;
+ // Create the sdc with default SkSurfaceProps. Gaussian blurs will soon use a
+ // SurfaceFillContext, at which point the SkSurfaceProps won't exist anymore.
+ auto rescaledSDC = skgpu::ganesh::SurfaceDrawContext::Make(
+ srcCtx->recordingContext(),
+ colorInfo.colorType(),
+ colorInfo.refColorSpace(),
+ SkBackingFit::kApprox,
+ {rescaledSize.width() + 2 * padX, rescaledSize.height() + 2 * padY},
+ SkSurfaceProps(),
+ /*label=*/"RescaledSurfaceDrawContext",
+ 1,
+ GrMipmapped::kNo,
+ srcCtx->asSurfaceProxy()->isProtected(),
+ srcCtx->origin());
+ if (!rescaledSDC) {
+ return nullptr;
+ }
+ if ((padX || padY) && mode == SkTileMode::kDecal) {
+ rescaledSDC->clear(SkPMColor4f{0, 0, 0, 0});
+ }
+ if (!srcCtx->rescaleInto(rescaledSDC.get(),
+ SkIRect::MakeSize(rescaledSize).makeOffset(padX, padY),
+ srcBounds,
+ SkSurface::RescaleGamma::kSrc,
+ SkSurface::RescaleMode::kRepeatedLinear)) {
+ return nullptr;
+ }
+ if (mode == SkTileMode::kClamp) {
+ SkASSERT(padX == 1 && padY == 1);
+ // Rather than run a potentially multi-pass rescaler on single rows/columns we just do a
+ // single bilerp draw. If we find this quality unacceptable we should think more about how
+ // to rescale these with better quality but without 4 separate multi-pass downscales.
+ auto cheapDownscale = [&](SkIRect dstRect, SkIRect srcRect) {
+ rescaledSDC->drawTexture(nullptr,
+ srcCtx->readSurfaceView(),
+ srcAlphaType,
+ GrSamplerState::Filter::kLinear,
+ GrSamplerState::MipmapMode::kNone,
+ SkBlendMode::kSrc,
+ SK_PMColor4fWHITE,
+ SkRect::Make(srcRect),
+ SkRect::Make(dstRect),
+ GrQuadAAFlags::kNone,
+ SkCanvas::SrcRectConstraint::kFast_SrcRectConstraint,
+ SkMatrix::I(),
+ nullptr);
+ };
+ auto [dw, dh] = rescaledSize;
+ // The are the src rows and columns from the source that we will scale into the dst padding.
+ float sLCol = srcBounds.left();
+ float sTRow = srcBounds.top();
+ float sRCol = srcBounds.right() - 1;
+ float sBRow = srcBounds.bottom() - 1;
+
+ int sx = srcBounds.left();
+ int sy = srcBounds.top();
+ int sw = srcBounds.width();
+ int sh = srcBounds.height();
+
+ // Downscale the edges from the original source. These draws should batch together (and with
+ // the above interior rescaling when it is a single pass).
+ cheapDownscale(SkIRect::MakeXYWH(0, 1, 1, dh), SkIRect::MakeXYWH(sLCol, sy, 1, sh));
+ cheapDownscale(SkIRect::MakeXYWH(1, 0, dw, 1), SkIRect::MakeXYWH(sx, sTRow, sw, 1));
+ cheapDownscale(SkIRect::MakeXYWH(dw + 1, 1, 1, dh), SkIRect::MakeXYWH(sRCol, sy, 1, sh));
+ cheapDownscale(SkIRect::MakeXYWH(1, dh + 1, dw, 1), SkIRect::MakeXYWH(sx, sBRow, sw, 1));
+
+ // Copy the corners from the original source. These would batch with the edges except that
+ // at time of writing we recognize these can use kNearest and downgrade the filter. So they
+ // batch with each other but not the edge draws.
+ cheapDownscale(SkIRect::MakeXYWH(0, 0, 1, 1), SkIRect::MakeXYWH(sLCol, sTRow, 1, 1));
+ cheapDownscale(SkIRect::MakeXYWH(dw + 1, 0, 1, 1), SkIRect::MakeXYWH(sRCol, sTRow, 1, 1));
+ cheapDownscale(SkIRect::MakeXYWH(dw + 1, dh + 1, 1, 1),
+ SkIRect::MakeXYWH(sRCol, sBRow, 1, 1));
+ cheapDownscale(SkIRect::MakeXYWH(0, dh + 1, 1, 1), SkIRect::MakeXYWH(sLCol, sBRow, 1, 1));
+ }
+ srcView = rescaledSDC->readSurfaceView();
+ // Drop the contexts so we don't hold the proxies longer than necessary.
+ rescaledSDC.reset();
+ srcCtx.reset();
+
+ // Compute the dst bounds in the scaled down space. First move the origin to be at the top
+ // left since we trimmed off everything above and to the left of the original src bounds during
+ // the rescale.
+ SkRect scaledDstBounds = SkRect::Make(dstBounds.makeOffset(-srcBounds.topLeft()));
+ scaledDstBounds.fLeft *= scaleX;
+ scaledDstBounds.fTop *= scaleY;
+ scaledDstBounds.fRight *= scaleX;
+ scaledDstBounds.fBottom *= scaleY;
+ // Account for padding in our rescaled src, if any.
+ scaledDstBounds.offset(padX, padY);
+ // Turn the scaled down dst bounds into an integer pixel rect.
+ auto scaledDstBoundsI = scaledDstBounds.roundOut();
+
+ SkIRect scaledSrcBounds = SkIRect::MakeSize(srcView.dimensions());
+ auto sdc = GaussianBlur(rContext,
+ std::move(srcView),
+ srcColorType,
+ srcAlphaType,
+ colorSpace,
+ scaledDstBoundsI,
+ scaledSrcBounds,
+ sigmaX,
+ sigmaY,
+ mode,
+ fit);
+ if (!sdc) {
+ return nullptr;
+ }
+ // We rounded out the integer scaled dst bounds. Select the fractional dst bounds from the
+ // integer dimension blurred result when we scale back up.
+ scaledDstBounds.offset(-scaledDstBoundsI.left(), -scaledDstBoundsI.top());
+ return reexpand(rContext,
+ std::move(sdc),
+ scaledDstBounds,
+ dstBounds.size(),
+ std::move(colorSpace),
+ fit);
+}
+
+bool ComputeBlurredRRectParams(const SkRRect& srcRRect,
+ const SkRRect& devRRect,
+ SkScalar sigma,
+ SkScalar xformedSigma,
+ SkRRect* rrectToDraw,
+ SkISize* widthHeight,
+ SkScalar rectXs[kBlurRRectMaxDivisions],
+ SkScalar rectYs[kBlurRRectMaxDivisions],
+ SkScalar texXs[kBlurRRectMaxDivisions],
+ SkScalar texYs[kBlurRRectMaxDivisions]) {
+ unsigned int devBlurRadius = 3 * SkScalarCeilToInt(xformedSigma - 1 / 6.0f);
+ SkScalar srcBlurRadius = 3.0f * sigma;
+
+ const SkRect& devOrig = devRRect.getBounds();
+ const SkVector& devRadiiUL = devRRect.radii(SkRRect::kUpperLeft_Corner);
+ const SkVector& devRadiiUR = devRRect.radii(SkRRect::kUpperRight_Corner);
+ const SkVector& devRadiiLR = devRRect.radii(SkRRect::kLowerRight_Corner);
+ const SkVector& devRadiiLL = devRRect.radii(SkRRect::kLowerLeft_Corner);
+
+ const int devLeft = SkScalarCeilToInt(std::max<SkScalar>(devRadiiUL.fX, devRadiiLL.fX));
+ const int devTop = SkScalarCeilToInt(std::max<SkScalar>(devRadiiUL.fY, devRadiiUR.fY));
+ const int devRight = SkScalarCeilToInt(std::max<SkScalar>(devRadiiUR.fX, devRadiiLR.fX));
+ const int devBot = SkScalarCeilToInt(std::max<SkScalar>(devRadiiLL.fY, devRadiiLR.fY));
+
+ // This is a conservative check for nine-patchability
+ if (devOrig.fLeft + devLeft + devBlurRadius >= devOrig.fRight - devRight - devBlurRadius ||
+ devOrig.fTop + devTop + devBlurRadius >= devOrig.fBottom - devBot - devBlurRadius) {
+ return false;
+ }
+
+ const SkVector& srcRadiiUL = srcRRect.radii(SkRRect::kUpperLeft_Corner);
+ const SkVector& srcRadiiUR = srcRRect.radii(SkRRect::kUpperRight_Corner);
+ const SkVector& srcRadiiLR = srcRRect.radii(SkRRect::kLowerRight_Corner);
+ const SkVector& srcRadiiLL = srcRRect.radii(SkRRect::kLowerLeft_Corner);
+
+ const SkScalar srcLeft = std::max<SkScalar>(srcRadiiUL.fX, srcRadiiLL.fX);
+ const SkScalar srcTop = std::max<SkScalar>(srcRadiiUL.fY, srcRadiiUR.fY);
+ const SkScalar srcRight = std::max<SkScalar>(srcRadiiUR.fX, srcRadiiLR.fX);
+ const SkScalar srcBot = std::max<SkScalar>(srcRadiiLL.fY, srcRadiiLR.fY);
+
+ int newRRWidth = 2 * devBlurRadius + devLeft + devRight + 1;
+ int newRRHeight = 2 * devBlurRadius + devTop + devBot + 1;
+ widthHeight->fWidth = newRRWidth + 2 * devBlurRadius;
+ widthHeight->fHeight = newRRHeight + 2 * devBlurRadius;
+
+ const SkRect srcProxyRect = srcRRect.getBounds().makeOutset(srcBlurRadius, srcBlurRadius);
+
+ rectXs[0] = srcProxyRect.fLeft;
+ rectXs[1] = srcProxyRect.fLeft + 2 * srcBlurRadius + srcLeft;
+ rectXs[2] = srcProxyRect.fRight - 2 * srcBlurRadius - srcRight;
+ rectXs[3] = srcProxyRect.fRight;
+
+ rectYs[0] = srcProxyRect.fTop;
+ rectYs[1] = srcProxyRect.fTop + 2 * srcBlurRadius + srcTop;
+ rectYs[2] = srcProxyRect.fBottom - 2 * srcBlurRadius - srcBot;
+ rectYs[3] = srcProxyRect.fBottom;
+
+ texXs[0] = 0.0f;
+ texXs[1] = 2.0f * devBlurRadius + devLeft;
+ texXs[2] = 2.0f * devBlurRadius + devLeft + 1;
+ texXs[3] = SkIntToScalar(widthHeight->fWidth);
+
+ texYs[0] = 0.0f;
+ texYs[1] = 2.0f * devBlurRadius + devTop;
+ texYs[2] = 2.0f * devBlurRadius + devTop + 1;
+ texYs[3] = SkIntToScalar(widthHeight->fHeight);
+
+ const SkRect newRect = SkRect::MakeXYWH(SkIntToScalar(devBlurRadius),
+ SkIntToScalar(devBlurRadius),
+ SkIntToScalar(newRRWidth),
+ SkIntToScalar(newRRHeight));
+ SkVector newRadii[4];
+ newRadii[0] = {SkScalarCeilToScalar(devRadiiUL.fX), SkScalarCeilToScalar(devRadiiUL.fY)};
+ newRadii[1] = {SkScalarCeilToScalar(devRadiiUR.fX), SkScalarCeilToScalar(devRadiiUR.fY)};
+ newRadii[2] = {SkScalarCeilToScalar(devRadiiLR.fX), SkScalarCeilToScalar(devRadiiLR.fY)};
+ newRadii[3] = {SkScalarCeilToScalar(devRadiiLL.fX), SkScalarCeilToScalar(devRadiiLL.fY)};
+
+ rrectToDraw->setRectRadii(newRect, newRadii);
+ return true;
+}
+
+// TODO: it seems like there should be some synergy with SkBlurMask::ComputeBlurProfile
+// TODO: maybe cache this on the cpu side?
+int CreateIntegralTable(float sixSigma, SkBitmap* table) {
+ // Check for NaN
+ if (sk_float_isnan(sixSigma)) {
+ return 0;
+ }
+ // Avoid overflow, covers both multiplying by 2 and finding next power of 2:
+ // 2*((2^31-1)/4 + 1) = 2*(2^29-1) + 2 = 2^30 and SkNextPow2(2^30) = 2^30
+ if (sixSigma > SK_MaxS32/4 + 1) {
+ return 0;
+ }
+ // The texture we're producing represents the integral of a normal distribution over a
+ // six-sigma range centered at zero. We want enough resolution so that the linear
+ // interpolation done in texture lookup doesn't introduce noticeable artifacts. We
+ // conservatively choose to have 2 texels for each dst pixel.
+ int minWidth = 2*((int)sk_float_ceil(sixSigma));
+ // Bin by powers of 2 with a minimum so we get good profile reuse.
+ int width = std::max(SkNextPow2(minWidth), 32);
+
+ if (!table) {
+ return width;
+ }
+
+ if (!table->tryAllocPixels(SkImageInfo::MakeA8(width, 1))) {
+ return 0;
+ }
+ *table->getAddr8(0, 0) = 255;
+ const float invWidth = 1.f / width;
+ for (int i = 1; i < width - 1; ++i) {
+ float x = (i + 0.5f) * invWidth;
+ x = (-6 * x + 3) * SK_ScalarRoot2Over2;
+ float integral = 0.5f * (std::erf(x) + 1.f);
+ *table->getAddr8(i, 0) = SkToU8(sk_float_round2int(255.f * integral));
+ }
+
+ *table->getAddr8(width - 1, 0) = 0;
+ table->setImmutable();
+ return table->width();
+}
+
+void Compute1DGaussianKernel(float* kernel, float sigma, int radius) {
+ SkASSERT(radius == SigmaRadius(sigma));
+ if (SkGpuBlurUtils::IsEffectivelyZeroSigma(sigma)) {
+ // Calling SigmaRadius() produces 1, just computing ceil(sigma)*3 produces 3
+ SkASSERT(KernelWidth(radius) == 1);
+ std::fill_n(kernel, 1, 0.f);
+ kernel[0] = 1.f;
+ return;
+ }
+
+ // If this fails, kEffectivelyZeroSigma isn't big enough to prevent precision issues
+ SkASSERT(!SkScalarNearlyZero(2.f * sigma * sigma));
+
+ const float sigmaDenom = 1.0f / (2.f * sigma * sigma);
+ int size = KernelWidth(radius);
+ float sum = 0.0f;
+ for (int i = 0; i < size; ++i) {
+ float term = static_cast<float>(i - radius);
+ // Note that the constant term (1/(sqrt(2*pi*sigma^2)) of the Gaussian
+ // is dropped here, since we renormalize the kernel below.
+ kernel[i] = sk_float_exp(-term * term * sigmaDenom);
+ sum += kernel[i];
+ }
+ // Normalize the kernel
+ float scale = 1.0f / sum;
+ for (int i = 0; i < size; ++i) {
+ kernel[i] *= scale;
+ }
+}
+
+void Compute1DLinearGaussianKernel(float* kernel, float* offset, float sigma, int radius) {
+ // Given 2 adjacent gaussian points, they are blended as: Wi * Ci + Wj * Cj.
+ // The GPU will mix Ci and Cj as Ci * (1 - x) + Cj * x during sampling.
+ // Compute W', x such that W' * (Ci * (1 - x) + Cj * x) = Wi * Ci + Wj * Cj.
+ // Solving W' * x = Wj, W' * (1 - x) = Wi:
+ // W' = Wi + Wj
+ // x = Wj / (Wi + Wj)
+ auto get_new_weight = [](float* new_w, float* offset, float wi, float wj) {
+ *new_w = wi + wj;
+ *offset = wj / (wi + wj);
+ };
+
+ // Create a temporary standard kernel.
+ int size = KernelWidth(radius);
+ std::unique_ptr<float[]> temp_kernel(new float[size]);
+ Compute1DGaussianKernel(temp_kernel.get(), sigma, radius);
+
+ // Note that halfsize isn't just size / 2, but radius + 1. This is the size of the output array.
+ int halfsize = LinearKernelWidth(radius);
+ int halfradius = halfsize / 2;
+ int low_index = halfradius - 1;
+
+ // Compute1DGaussianKernel produces a full 2N + 1 kernel. Since the kernel can be mirrored,
+ // compute only the upper half and mirror to the lower half.
+
+ int index = radius;
+ if (radius & 1) {
+ // If N is odd, then use two samples.
+ // The centre texel gets sampled twice, so halve its influence for each sample.
+ // We essentially sample like this:
+ // Texel edges
+ // v v v v
+ // | | | |
+ // \-----^---/ Lower sample
+ // \---^-----/ Upper sample
+ get_new_weight(&kernel[halfradius],
+ &offset[halfradius],
+ temp_kernel[index] * 0.5f,
+ temp_kernel[index + 1]);
+ kernel[low_index] = kernel[halfradius];
+ offset[low_index] = -offset[halfradius];
+ index++;
+ low_index--;
+ } else {
+ // If N is even, then there are an even number of texels on either side of the centre texel.
+ // Sample the centre texel directly.
+ kernel[halfradius] = temp_kernel[index];
+ offset[halfradius] = 0.0f;
+ }
+ index++;
+
+ // Every other pair gets one sample.
+ for (int i = halfradius + 1; i < halfsize; index += 2, i++, low_index--) {
+ get_new_weight(&kernel[i], &offset[i], temp_kernel[index], temp_kernel[index + 1]);
+ offset[i] += static_cast<float>(index - radius);
+
+ // Mirror to lower half.
+ kernel[low_index] = kernel[i];
+ offset[low_index] = -offset[i];
+ }
+}
+
+} // namespace SkGpuBlurUtils
+
+#endif // defined(SK_GANESH)
diff --git a/gfx/skia/skia/src/core/SkGpuBlurUtils.h b/gfx/skia/skia/src/core/SkGpuBlurUtils.h
new file mode 100644
index 0000000000..c16f20e29c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGpuBlurUtils.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGpuBlurUtils_DEFINED
+#define SkGpuBlurUtils_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#if defined(SK_GANESH)
+#include "include/core/SkRefCnt.h"
+#include "include/private/gpu/ganesh/GrTypesPriv.h"
+#include "src/gpu/SkBackingFit.h"
+
+class GrRecordingContext;
+namespace skgpu {
+namespace ganesh {
+class SurfaceDrawContext;
+}
+} // namespace skgpu
+class GrSurfaceProxyView;
+class GrTexture;
+
+class SkBitmap;
+enum class SkTileMode;
+struct SkRect;
+
+namespace SkGpuBlurUtils {
+
+/** Maximum sigma before the implementation downscales the input image. */
+static constexpr float kMaxSigma = 4.f;
+
+/**
+ * Applies a 2D Gaussian blur to a given texture. The blurred result is returned
+ * as a surfaceDrawContext in case the caller wishes to draw into the result.
+ * The GrSurfaceOrigin of the result will watch the GrSurfaceOrigin of srcView. The output
+ * color type, color space, and alpha type will be the same as the src.
+ *
+ * Note: one of sigmaX and sigmaY should be non-zero!
+ * @param context The GPU context
+ * @param srcView The source to be blurred.
+ * @param srcColorType The colorType of srcProxy
+ * @param srcAlphaType The alphaType of srcProxy
+ * @param srcColorSpace Color space of the source.
+ * @param dstBounds The destination bounds, relative to the source texture.
+ * @param srcBounds The source bounds, relative to the source texture's offset. No pixels
+ * will be sampled outside of this rectangle.
+ * @param sigmaX The blur's standard deviation in X.
+ * @param sigmaY The blur's standard deviation in Y.
+ * @param tileMode The mode to handle samples outside bounds.
+ * @param fit backing fit for the returned render target context
+ * @return The surfaceDrawContext containing the blurred result.
+ */
+std::unique_ptr<skgpu::ganesh::SurfaceDrawContext> GaussianBlur(
+ GrRecordingContext*,
+ GrSurfaceProxyView srcView,
+ GrColorType srcColorType,
+ SkAlphaType srcAlphaType,
+ sk_sp<SkColorSpace> srcColorSpace,
+ SkIRect dstBounds,
+ SkIRect srcBounds,
+ float sigmaX,
+ float sigmaY,
+ SkTileMode mode,
+ SkBackingFit fit = SkBackingFit::kApprox);
+
+static const int kBlurRRectMaxDivisions = 6;
+
+// This method computes all the parameters for drawing a partially occluded nine-patched
+// blurred rrect mask:
+// rrectToDraw - the integerized rrect to draw in the mask
+// widthHeight - how large to make the mask (rrectToDraw will be centered in this coord sys)
+// rectXs, rectYs - the x & y coordinates of the covering geometry lattice
+// texXs, texYs - the texture coordinate at each point in rectXs & rectYs
+// It returns true if 'devRRect' is nine-patchable
+bool ComputeBlurredRRectParams(const SkRRect& srcRRect,
+ const SkRRect& devRRect,
+ SkScalar sigma,
+ SkScalar xformedSigma,
+ SkRRect* rrectToDraw,
+ SkISize* widthHeight,
+ SkScalar rectXs[kBlurRRectMaxDivisions],
+ SkScalar rectYs[kBlurRRectMaxDivisions],
+ SkScalar texXs[kBlurRRectMaxDivisions],
+ SkScalar texYs[kBlurRRectMaxDivisions]);
+
+int CreateIntegralTable(float sixSigma, SkBitmap* table);
+
+void Compute1DGaussianKernel(float* kernel, float sigma, int radius);
+
+void Compute1DLinearGaussianKernel(float* kernel, float* offset, float sigma, int radius);
+
+// Any sigmas smaller than this are effectively an identity blur so can skip convolution at a higher
+// level. The value was chosen because it corresponds roughly to a radius of 1/10px, and is slightly
+// greater than sqrt(1/2*sigma^2) for SK_ScalarNearlyZero.
+inline bool IsEffectivelyZeroSigma(float sigma) { return sigma <= 0.03f; }
+
+inline int SigmaRadius(float sigma) {
+ return IsEffectivelyZeroSigma(sigma) ? 0 : static_cast<int>(ceilf(sigma * 3.0f));
+}
+
+inline int KernelWidth(int radius) { return 2 * radius + 1; }
+
+inline int LinearKernelWidth(int radius) { return radius + 1; }
+
+} // namespace SkGpuBlurUtils
+
+#endif // defined(SK_GANESH)
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkGraphics.cpp b/gfx/skia/skia/src/core/SkGraphics.cpp
new file mode 100644
index 0000000000..46a6355413
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkGraphics.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkGraphics.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkOpenTypeSVGDecoder.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTime.h"
+#include "include/private/base/SkMath.h"
+#include "src/base/SkTSearch.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkCpu.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkResourceCache.h"
+#include "src/core/SkScalerContext.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkTypefaceCache.h"
+
+#include <stdlib.h>
+
+void SkGraphics::Init() {
+ // SkGraphics::Init() must be thread-safe and idempotent.
+ SkCpu::CacheRuntimeFeatures();
+ SkOpts::Init();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkGraphics::DumpMemoryStatistics(SkTraceMemoryDump* dump) {
+ SkResourceCache::DumpMemoryStatistics(dump);
+ SkStrikeCache::DumpMemoryStatistics(dump);
+}
+
+void SkGraphics::PurgeAllCaches() {
+ SkGraphics::PurgeFontCache();
+ SkGraphics::PurgeResourceCache();
+ SkImageFilter_Base::PurgeCache();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+size_t SkGraphics::GetFontCacheLimit() {
+ return SkStrikeCache::GlobalStrikeCache()->getCacheSizeLimit();
+}
+
+size_t SkGraphics::SetFontCacheLimit(size_t bytes) {
+ return SkStrikeCache::GlobalStrikeCache()->setCacheSizeLimit(bytes);
+}
+
+size_t SkGraphics::GetFontCacheUsed() {
+ return SkStrikeCache::GlobalStrikeCache()->getTotalMemoryUsed();
+}
+
+int SkGraphics::GetFontCacheCountLimit() {
+ return SkStrikeCache::GlobalStrikeCache()->getCacheCountLimit();
+}
+
+int SkGraphics::SetFontCacheCountLimit(int count) {
+ return SkStrikeCache::GlobalStrikeCache()->setCacheCountLimit(count);
+}
+
+int SkGraphics::GetFontCacheCountUsed() {
+ return SkStrikeCache::GlobalStrikeCache()->getCacheCountUsed();
+}
+
+void SkGraphics::PurgeFontCache() {
+ SkStrikeCache::GlobalStrikeCache()->purgeAll();
+ SkTypefaceCache::PurgeAll();
+}
+
+static SkGraphics::OpenTypeSVGDecoderFactory gSVGDecoderFactory = nullptr;
+
+SkGraphics::OpenTypeSVGDecoderFactory
+SkGraphics::SetOpenTypeSVGDecoderFactory(OpenTypeSVGDecoderFactory svgDecoderFactory) {
+ OpenTypeSVGDecoderFactory old(gSVGDecoderFactory);
+ gSVGDecoderFactory = svgDecoderFactory;
+ return old;
+}
+
+SkGraphics::OpenTypeSVGDecoderFactory SkGraphics::GetOpenTypeSVGDecoderFactory() {
+ return gSVGDecoderFactory;
+}
+
+extern bool gSkVMAllowJIT;
+
+void SkGraphics::AllowJIT() {
+ gSkVMAllowJIT = true;
+}
diff --git a/gfx/skia/skia/src/core/SkIDChangeListener.cpp b/gfx/skia/skia/src/core/SkIDChangeListener.cpp
new file mode 100644
index 0000000000..43c6a9530e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkIDChangeListener.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkIDChangeListener.h"
+
+/**
+ * Used to be notified when a gen/unique ID is invalidated, typically to preemptively purge
+ * associated items from a cache that are no longer reachable. The listener can
+ * be marked for deregistration if the cached item is remove before the listener is
+ * triggered. This prevents unbounded listener growth when cache items are routinely
+ * removed before the gen ID/unique ID is invalidated.
+ */
+
+SkIDChangeListener::SkIDChangeListener() : fShouldDeregister(false) {}
+
+SkIDChangeListener::~SkIDChangeListener() = default;
+
+using List = SkIDChangeListener::List;
+
+List::List() = default;
+
+List::~List() {
+ // We don't need the mutex. No other thread should have this list while it's being
+ // destroyed.
+ for (auto& listener : fListeners) {
+ if (!listener->shouldDeregister()) {
+ listener->changed();
+ }
+ }
+}
+
+void List::add(sk_sp<SkIDChangeListener> listener) {
+ if (!listener) {
+ return;
+ }
+ SkASSERT(!listener->shouldDeregister());
+
+ SkAutoMutexExclusive lock(fMutex);
+ // Clean out any stale listeners before we append the new one.
+ for (int i = 0; i < fListeners.size(); ++i) {
+ if (fListeners[i]->shouldDeregister()) {
+ fListeners.removeShuffle(i--); // No need to preserve the order after i.
+ }
+ }
+ fListeners.push_back(std::move(listener));
+}
+
+int List::count() const {
+ SkAutoMutexExclusive lock(fMutex);
+ return fListeners.size();
+}
+
+void List::changed() {
+ SkAutoMutexExclusive lock(fMutex);
+ for (auto& listener : fListeners) {
+ if (!listener->shouldDeregister()) {
+ listener->changed();
+ }
+ }
+ fListeners.clear();
+}
+
+void List::reset() {
+ SkAutoMutexExclusive lock(fMutex);
+ fListeners.clear();
+}
diff --git a/gfx/skia/skia/src/core/SkIPoint16.h b/gfx/skia/skia/src/core/SkIPoint16.h
new file mode 100644
index 0000000000..949ae2db3c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkIPoint16.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkIPoint16_DEFINED
+#define SkIPoint16_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTo.h"
+
+/** \struct SkIPoint16
+ SkIPoint16 holds two 16 bit integer coordinates.
+ */
+struct SkIPoint16 {
+ int16_t fX; //!< x-axis value used by SkIPoint16
+
+ int16_t fY; //!< y-axis value used by SkIPoint16
+
+ /** Sets fX to x, fY to y. If SK_DEBUG is defined, asserts
+ if x or y does not fit in 16 bits.
+
+ @param x integer x-axis value of constructed SkIPoint
+ @param y integer y-axis value of constructed SkIPoint
+ @return SkIPoint16 (x, y)
+ */
+ static constexpr SkIPoint16 Make(int x, int y) {
+ return {SkToS16(x), SkToS16(y)};
+ }
+
+ /** Returns x-axis value of SkIPoint16.
+
+ @return fX
+ */
+ int16_t x() const { return fX; }
+
+ /** Returns y-axis value of SkIPoint.
+
+ @return fY
+ */
+ int16_t y() const { return fY; }
+
+ /** Sets fX to x and fY to y.
+
+ @param x new value for fX
+ @param y new value for fY
+ */
+ void set(int x, int y) {
+ fX = SkToS16(x);
+ fY = SkToS16(y);
+ }
+};
+
+#endif
+
diff --git a/gfx/skia/skia/src/core/SkImageFilter.cpp b/gfx/skia/skia/src/core/SkImageFilter.cpp
new file mode 100644
index 0000000000..67740fcfde
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageFilter.cpp
@@ -0,0 +1,682 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkImageFilter.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkRect.h"
+#include "include/private/base/SkSafe32.h"
+#include "src/core/SkFuzzLogging.h"
+#include "src/core/SkImageFilterCache.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkLocalMatrixImageFilter.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkValidationUtils.h"
+#include "src/core/SkWriteBuffer.h"
+#if defined(SK_GANESH)
+#include "include/gpu/GrRecordingContext.h"
+#include "src/gpu/SkBackingFit.h"
+#include "src/gpu/ganesh/GrColorSpaceXform.h"
+#include "src/gpu/ganesh/GrDirectContextPriv.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/GrTextureProxy.h"
+#include "src/gpu/ganesh/SkGr.h"
+#include "src/gpu/ganesh/SurfaceFillContext.h"
+#endif
+#include <atomic>
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// SkImageFilter - A number of the public APIs on SkImageFilter downcast to SkImageFilter_Base
+// in order to perform their actual work.
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Returns the number of inputs this filter will accept (some inputs can
+ * be NULL).
+ */
+int SkImageFilter::countInputs() const { return as_IFB(this)->fInputs.count(); }
+
+/**
+ * Returns the input filter at a given index, or NULL if no input is
+ * connected. The indices used are filter-specific.
+ */
+const SkImageFilter* SkImageFilter::getInput(int i) const {
+ SkASSERT(i < this->countInputs());
+ return as_IFB(this)->fInputs[i].get();
+}
+
+bool SkImageFilter::isColorFilterNode(SkColorFilter** filterPtr) const {
+ return as_IFB(this)->onIsColorFilterNode(filterPtr);
+}
+
+SkIRect SkImageFilter::filterBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection direction, const SkIRect* inputRect) const {
+ // The old filterBounds() function uses SkIRects that are defined in layer space so, while
+ // we still are supporting it, bypass SkIF_B's new public filter bounds functions and go right
+ // to the internal layer-space calculations.
+ skif::Mapping mapping{ctm};
+ if (kReverse_MapDirection == direction) {
+ skif::LayerSpace<SkIRect> targetOutput(src);
+ if (as_IFB(this)->cropRectIsSet()) {
+ skif::LayerSpace<SkIRect> outputCrop = mapping.paramToLayer(
+ skif::ParameterSpace<SkRect>(as_IFB(this)->getCropRect().rect())).roundOut();
+ // Just intersect directly; unlike the forward-mapping case, since we start with the
+ // external target output, there's no need to embiggen due to affecting trans. black
+ if (!targetOutput.intersect(outputCrop)) {
+ // Nothing would be output by the filter, so return empty rect
+ return SkIRect::MakeEmpty();
+ }
+ }
+ skif::LayerSpace<SkIRect> content(inputRect ? *inputRect : src);
+ return SkIRect(as_IFB(this)->onGetInputLayerBounds(mapping, targetOutput, content));
+ } else {
+ SkASSERT(!inputRect);
+ skif::LayerSpace<SkIRect> content(src);
+ skif::LayerSpace<SkIRect> output = as_IFB(this)->onGetOutputLayerBounds(mapping, content);
+ // Manually apply the crop rect for now, until cropping is performed by a dedicated SkIF.
+ SkIRect dst;
+ as_IFB(this)->getCropRect().applyTo(
+ SkIRect(output), ctm, as_IFB(this)->onAffectsTransparentBlack(), &dst);
+ return dst;
+ }
+}
+
+SkRect SkImageFilter::computeFastBounds(const SkRect& src) const {
+ if (0 == this->countInputs()) {
+ return src;
+ }
+ SkRect combinedBounds = this->getInput(0) ? this->getInput(0)->computeFastBounds(src) : src;
+ for (int i = 1; i < this->countInputs(); i++) {
+ const SkImageFilter* input = this->getInput(i);
+ if (input) {
+ combinedBounds.join(input->computeFastBounds(src));
+ } else {
+ combinedBounds.join(src);
+ }
+ }
+ return combinedBounds;
+}
+
+bool SkImageFilter::canComputeFastBounds() const {
+ return !as_IFB(this)->affectsTransparentBlack();
+}
+
+bool SkImageFilter_Base::affectsTransparentBlack() const {
+ if (this->onAffectsTransparentBlack()) {
+ return true;
+ }
+ for (int i = 0; i < this->countInputs(); i++) {
+ const SkImageFilter* input = this->getInput(i);
+ if (input && as_IFB(input)->affectsTransparentBlack()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool SkImageFilter::asAColorFilter(SkColorFilter** filterPtr) const {
+ SkASSERT(nullptr != filterPtr);
+ if (!this->isColorFilterNode(filterPtr)) {
+ return false;
+ }
+ if (nullptr != this->getInput(0) || as_CFB(*filterPtr)->affectsTransparentBlack()) {
+ (*filterPtr)->unref();
+ return false;
+ }
+ return true;
+}
+
+sk_sp<SkImageFilter> SkImageFilter::makeWithLocalMatrix(const SkMatrix& matrix) const {
+ return SkLocalMatrixImageFilter::Make(matrix, this->refMe());
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// SkImageFilter_Base
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static int32_t next_image_filter_unique_id() {
+ static std::atomic<int32_t> nextID{1};
+
+ int32_t id;
+ do {
+ id = nextID.fetch_add(1, std::memory_order_relaxed);
+ } while (id == 0);
+ return id;
+}
+
+SkImageFilter_Base::SkImageFilter_Base(sk_sp<SkImageFilter> const* inputs,
+ int inputCount, const SkRect* cropRect)
+ : fUsesSrcInput(false)
+ , fCropRect(cropRect)
+ , fUniqueID(next_image_filter_unique_id()) {
+ fInputs.reset(inputCount);
+
+ for (int i = 0; i < inputCount; ++i) {
+ if (!inputs[i] || as_IFB(inputs[i])->fUsesSrcInput) {
+ fUsesSrcInput = true;
+ }
+ fInputs[i] = inputs[i];
+ }
+}
+
+SkImageFilter_Base::~SkImageFilter_Base() {
+ SkImageFilterCache::Get()->purgeByImageFilter(this);
+}
+
+bool SkImageFilter_Base::Common::unflatten(SkReadBuffer& buffer, int expectedCount) {
+ const int count = buffer.readInt();
+ if (!buffer.validate(count >= 0)) {
+ return false;
+ }
+ if (!buffer.validate(expectedCount < 0 || count == expectedCount)) {
+ return false;
+ }
+
+#if defined(SK_BUILD_FOR_FUZZER)
+ if (count > 4) {
+ return false;
+ }
+#endif
+
+ SkASSERT(fInputs.empty());
+ for (int i = 0; i < count; i++) {
+ fInputs.push_back(buffer.readBool() ? buffer.readImageFilter() : nullptr);
+ if (!buffer.isValid()) {
+ return false;
+ }
+ }
+ SkRect rect;
+ buffer.readRect(&rect);
+ if (!buffer.isValid() || !buffer.validate(SkIsValidRect(rect))) {
+ return false;
+ }
+
+ uint32_t flags = buffer.readUInt();
+ if (!buffer.isValid() ||
+ !buffer.validate(flags == 0x0 || flags == CropRect::kHasAll_CropEdge)) {
+ return false;
+ }
+ fCropRect = CropRect(flags ? &rect : nullptr);
+ return buffer.isValid();
+}
+
+void SkImageFilter_Base::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeInt(fInputs.count());
+ for (int i = 0; i < fInputs.count(); i++) {
+ const SkImageFilter* input = this->getInput(i);
+ buffer.writeBool(input != nullptr);
+ if (input != nullptr) {
+ buffer.writeFlattenable(input);
+ }
+ }
+ buffer.writeRect(fCropRect.rect());
+ buffer.writeUInt(fCropRect.flags());
+}
+
+skif::FilterResult SkImageFilter_Base::filterImage(const skif::Context& context) const {
+ // TODO (michaelludwig) - Old filters have an implicit assumption that the source image
+ // (originally passed separately) has an origin of (0, 0). SkComposeImageFilter makes an effort
+ // to ensure that remains the case. Once everyone uses the new type systems for bounds, non
+ // (0, 0) source origins will be easy to support.
+ SkASSERT(context.source().layerBounds().left() == 0 &&
+ context.source().layerBounds().top() == 0 &&
+ context.source().layerBounds().right() == context.source().image()->width() &&
+ context.source().layerBounds().bottom() == context.source().image()->height());
+
+ skif::FilterResult result;
+ if (context.desiredOutput().isEmpty() || !context.isValid()) {
+ return result;
+ }
+
+ uint32_t srcGenID = fUsesSrcInput ? context.sourceImage()->uniqueID() : 0;
+ const SkIRect srcSubset = fUsesSrcInput ? context.sourceImage()->subset()
+ : SkIRect::MakeWH(0, 0);
+
+ SkImageFilterCacheKey key(fUniqueID, context.mapping().layerMatrix(), context.clipBounds(),
+ srcGenID, srcSubset);
+ if (context.cache() && context.cache()->get(key, &result)) {
+ return result;
+ }
+
+ result = this->onFilterImage(context);
+
+ if (context.gpuBacked()) {
+ SkASSERT(!result.image() || result.image()->isTextureBacked());
+ }
+
+ if (context.cache()) {
+ context.cache()->set(key, this, result);
+ }
+
+ return result;
+}
+
+skif::LayerSpace<SkIRect> SkImageFilter_Base::getInputBounds(
+ const skif::Mapping& mapping, const skif::DeviceSpace<SkIRect>& desiredOutput,
+ const skif::ParameterSpace<SkRect>* knownContentBounds) const {
+ // Map both the device-space desired coverage area and the known content bounds to layer space
+ skif::LayerSpace<SkIRect> desiredBounds = mapping.deviceToLayer(desiredOutput);
+
+ // TODO (michaelludwig) - To be removed once cropping is its own filter, since then an output
+ // crop would automatically adjust the required input of its child filter in this same way.
+ if (this->cropRectIsSet()) {
+ skif::LayerSpace<SkIRect> outputCrop =
+ mapping.paramToLayer(skif::ParameterSpace<SkRect>(fCropRect.rect())).roundOut();
+ if (!desiredBounds.intersect(outputCrop)) {
+ // Nothing would be output by the filter, so return empty rect
+ return skif::LayerSpace<SkIRect>(SkIRect::MakeEmpty());
+ }
+ }
+
+ // If we have no known content bounds use the desired coverage area, because that is the most
+ // conservative possibility.
+ skif::LayerSpace<SkIRect> contentBounds =
+ knownContentBounds ? mapping.paramToLayer(*knownContentBounds).roundOut()
+ : desiredBounds;
+
+ // Process the layer-space desired output with the filter DAG to determine required input
+ skif::LayerSpace<SkIRect> requiredInput = this->onGetInputLayerBounds(
+ mapping, desiredBounds, contentBounds);
+ // If we know what's actually going to be drawn into the layer, and we don't change transparent
+ // black, then we can further restrict the layer to what the known content is
+ // TODO (michaelludwig) - This logic could be moved into visitInputLayerBounds() when an input
+ // filter is null. Additionally, once all filters are robust to FilterResults with tile modes,
+ // we can always restrict the required input by content bounds since any additional transparent
+ // black is handled when producing the output result and sampling outside the input image with
+ // a decal tile mode.
+ if (knownContentBounds && !this->affectsTransparentBlack()) {
+ if (!requiredInput.intersect(contentBounds)) {
+ // Nothing would be output by the filter, so return empty rect
+ return skif::LayerSpace<SkIRect>(SkIRect::MakeEmpty());
+ }
+ }
+ return requiredInput;
+}
+
+skif::DeviceSpace<SkIRect> SkImageFilter_Base::getOutputBounds(
+ const skif::Mapping& mapping, const skif::ParameterSpace<SkRect>& contentBounds) const {
+ // Map the input content into the layer space where filtering will occur
+ skif::LayerSpace<SkRect> layerContent = mapping.paramToLayer(contentBounds);
+ // Determine the filter DAGs output bounds in layer space
+ skif::LayerSpace<SkIRect> filterOutput = this->onGetOutputLayerBounds(
+ mapping, layerContent.roundOut());
+ // FIXME (michaelludwig) - To be removed once cropping is isolated, but remain consistent with
+ // old filterBounds(kForward) behavior.
+ SkIRect dst;
+ as_IFB(this)->getCropRect().applyTo(
+ SkIRect(filterOutput), mapping.layerMatrix(),
+ as_IFB(this)->onAffectsTransparentBlack(), &dst);
+
+ // Map all the way to device space
+ return mapping.layerToDevice(skif::LayerSpace<SkIRect>(dst));
+}
+
+// TODO (michaelludwig) - Default to using the old onFilterImage, as filters are updated one by one.
+// Once the old function is gone, this onFilterImage() will be made a pure virtual.
+skif::FilterResult SkImageFilter_Base::onFilterImage(const skif::Context& context) const {
+ SkIPoint origin = {0, 0};
+ auto image = this->onFilterImage(context, &origin);
+ return skif::FilterResult(std::move(image), skif::LayerSpace<SkIPoint>(origin));
+}
+
+SkImageFilter_Base::MatrixCapability SkImageFilter_Base::getCTMCapability() const {
+ MatrixCapability result = this->onGetCTMCapability();
+ // CropRects need to apply in the source coordinate system, but are not aware of complex CTMs
+ // when performing clipping. For a simple fix, any filter with a crop rect set cannot support
+ // more than scale+translate CTMs until that's updated.
+ if (this->cropRectIsSet()) {
+ result = std::min(result, MatrixCapability::kScaleTranslate);
+ }
+ const int count = this->countInputs();
+ for (int i = 0; i < count; ++i) {
+ if (const SkImageFilter_Base* input = as_IFB(this->getInput(i))) {
+ result = std::min(result, input->getCTMCapability());
+ }
+ }
+ return result;
+}
+
+void SkImageFilter_Base::CropRect::applyTo(const SkIRect& imageBounds, const SkMatrix& ctm,
+ bool embiggen, SkIRect* cropped) const {
+ *cropped = imageBounds;
+ if (fFlags) {
+ SkRect devCropR;
+ ctm.mapRect(&devCropR, fRect);
+ SkIRect devICropR = devCropR.roundOut();
+
+ // Compute the left/top first, in case we need to modify the right/bottom for a missing edge
+ if (fFlags & kHasLeft_CropEdge) {
+ if (embiggen || devICropR.fLeft > cropped->fLeft) {
+ cropped->fLeft = devICropR.fLeft;
+ }
+ } else {
+ devICropR.fRight = Sk32_sat_add(cropped->fLeft, devICropR.width());
+ }
+ if (fFlags & kHasTop_CropEdge) {
+ if (embiggen || devICropR.fTop > cropped->fTop) {
+ cropped->fTop = devICropR.fTop;
+ }
+ } else {
+ devICropR.fBottom = Sk32_sat_add(cropped->fTop, devICropR.height());
+ }
+ if (fFlags & kHasWidth_CropEdge) {
+ if (embiggen || devICropR.fRight < cropped->fRight) {
+ cropped->fRight = devICropR.fRight;
+ }
+ }
+ if (fFlags & kHasHeight_CropEdge) {
+ if (embiggen || devICropR.fBottom < cropped->fBottom) {
+ cropped->fBottom = devICropR.fBottom;
+ }
+ }
+ }
+}
+
+bool SkImageFilter_Base::applyCropRect(const Context& ctx, const SkIRect& srcBounds,
+ SkIRect* dstBounds) const {
+ SkIRect tmpDst = this->onFilterNodeBounds(srcBounds, ctx.ctm(), kForward_MapDirection, nullptr);
+ fCropRect.applyTo(tmpDst, ctx.ctm(), this->onAffectsTransparentBlack(), dstBounds);
+ // Intersect against the clip bounds, in case the crop rect has
+ // grown the bounds beyond the original clip. This can happen for
+ // example in tiling, where the clip is much smaller than the filtered
+ // primitive. If we didn't do this, we would be processing the filter
+ // at the full crop rect size in every tile.
+ return dstBounds->intersect(ctx.clipBounds());
+}
+
+// Return a larger (newWidth x newHeight) copy of 'src' with black padding
+// around it.
+static sk_sp<SkSpecialImage> pad_image(SkSpecialImage* src, const SkImageFilter_Base::Context& ctx,
+ int newWidth, int newHeight, int offX, int offY) {
+ // We would like to operate in the source's color space (so that we return an "identical"
+ // image, other than the padding. To achieve that, we'd create a new context using
+ // src->getColorSpace() to replace ctx.colorSpace().
+
+ // That fails in at least two ways. For formats that are texturable but not renderable (like
+ // F16 on some ES implementations), we can't create a surface to do the work. For sRGB, images
+ // may be tagged with an sRGB color space (which leads to an sRGB config in makeSurface). But
+ // the actual config of that sRGB image on a device with no sRGB support is non-sRGB.
+ //
+ // Rather than try to special case these situations, we execute the image padding in the
+ // destination color space. This should not affect the output of the DAG in (almost) any case,
+ // because the result of this call is going to be used as an input, where it would have been
+ // switched to the destination space anyway. The one exception would be a filter that expected
+ // to consume unclamped F16 data, but the padded version of the image is pre-clamped to 8888.
+ // We can revisit this logic if that ever becomes an actual problem.
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(SkISize::Make(newWidth, newHeight)));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ canvas->clear(0x0);
+
+ src->draw(canvas, offX, offY);
+
+ return surf->makeImageSnapshot();
+}
+
+sk_sp<SkSpecialImage> SkImageFilter_Base::applyCropRectAndPad(const Context& ctx,
+ SkSpecialImage* src,
+ SkIPoint* srcOffset,
+ SkIRect* bounds) const {
+ const SkIRect srcBounds = SkIRect::MakeXYWH(srcOffset->x(), srcOffset->y(),
+ src->width(), src->height());
+
+ if (!this->applyCropRect(ctx, srcBounds, bounds)) {
+ return nullptr;
+ }
+
+ if (srcBounds.contains(*bounds)) {
+ return sk_sp<SkSpecialImage>(SkRef(src));
+ } else {
+ sk_sp<SkSpecialImage> img(pad_image(src, ctx, bounds->width(), bounds->height(),
+ Sk32_sat_sub(srcOffset->x(), bounds->x()),
+ Sk32_sat_sub(srcOffset->y(), bounds->y())));
+ *srcOffset = SkIPoint::Make(bounds->x(), bounds->y());
+ return img;
+ }
+}
+
+// NOTE: The new onGetOutputLayerBounds() and onGetInputLayerBounds() default to calling into the
+// deprecated onFilterBounds and onFilterNodeBounds. While these functions are not tagged, they do
+// match the documented default behavior for the new bounds functions.
+SkIRect SkImageFilter_Base::onFilterBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection dir, const SkIRect* inputRect) const {
+ if (this->countInputs() < 1) {
+ return src;
+ }
+
+ SkIRect totalBounds;
+ for (int i = 0; i < this->countInputs(); ++i) {
+ const SkImageFilter* filter = this->getInput(i);
+ SkIRect rect = filter ? filter->filterBounds(src, ctm, dir, inputRect) : src;
+ if (0 == i) {
+ totalBounds = rect;
+ } else {
+ totalBounds.join(rect);
+ }
+ }
+
+ return totalBounds;
+}
+
+SkIRect SkImageFilter_Base::onFilterNodeBounds(const SkIRect& src, const SkMatrix&,
+ MapDirection, const SkIRect*) const {
+ return src;
+}
+
+skif::LayerSpace<SkIRect> SkImageFilter_Base::visitInputLayerBounds(
+ const skif::Mapping& mapping, const skif::LayerSpace<SkIRect>& desiredOutput,
+ const skif::LayerSpace<SkIRect>& contentBounds) const {
+ if (this->countInputs() < 1) {
+ // TODO (michaelludwig) - if a filter doesn't have any inputs, it doesn't need any
+ // implicit source image, so arguably we could return an empty rect here. 'desiredOutput' is
+ // consistent with original behavior, so empty bounds may have unintended side effects
+ // but should be explored later. Of note is that right now an empty layer bounds assumes
+ // that there's no need to filter on restore, which is not the case for these filters.
+ return desiredOutput;
+ }
+
+ skif::LayerSpace<SkIRect> netInput;
+ for (int i = 0; i < this->countInputs(); ++i) {
+ const SkImageFilter* filter = this->getInput(i);
+ // The required input for this input filter, or 'targetOutput' if the filter is null and
+ // the source image is used (so must be sized to cover 'targetOutput').
+ // TODO (michaelludwig) - Right now contentBounds is applied conditionally at the end of
+ // the root getInputLayerBounds() based on affecting transparent black. Once that bit only
+ // changes output behavior, we can have the required bounds for a null input filter be the
+ // intersection of the desired output and the content bounds.
+ skif::LayerSpace<SkIRect> requiredInput =
+ filter ? as_IFB(filter)->onGetInputLayerBounds(mapping, desiredOutput,
+ contentBounds)
+ : desiredOutput;
+ // Accumulate with all other filters
+ if (i == 0) {
+ netInput = requiredInput;
+ } else {
+ netInput.join(requiredInput);
+ }
+ }
+ return netInput;
+}
+
+skif::LayerSpace<SkIRect> SkImageFilter_Base::visitOutputLayerBounds(
+ const skif::Mapping& mapping, const skif::LayerSpace<SkIRect>& contentBounds) const {
+ if (this->countInputs() < 1) {
+ // TODO (michaelludwig) - if a filter doesn't have any inputs, it presumably is determining
+ // its output size from something other than the implicit source contentBounds, in which
+ // case it shouldn't be calling this helper function, so explore adding an unreachable test
+ return contentBounds;
+ }
+
+ skif::LayerSpace<SkIRect> netOutput;
+ for (int i = 0; i < this->countInputs(); ++i) {
+ const SkImageFilter* filter = this->getInput(i);
+ // The output for just this input filter, or 'contentBounds' if the filter is null and
+ // the source image is used (i.e. the identity filter applied to the source).
+ skif::LayerSpace<SkIRect> output =
+ filter ? as_IFB(filter)->onGetOutputLayerBounds(mapping, contentBounds)
+ : contentBounds;
+ // Accumulate with all other filters
+ if (i == 0) {
+ netOutput = output;
+ } else {
+ netOutput.join(output);
+ }
+ }
+ return netOutput;
+}
+
+skif::LayerSpace<SkIRect> SkImageFilter_Base::onGetInputLayerBounds(
+ const skif::Mapping& mapping, const skif::LayerSpace<SkIRect>& desiredOutput,
+ const skif::LayerSpace<SkIRect>& contentBounds, VisitChildren recurse) const {
+ // Call old functions for now since they may have been overridden by a subclass that's not been
+ // updated yet; eventually this will be a pure virtual and impls control visiting children
+ SkIRect content = SkIRect(contentBounds);
+ SkIRect input = this->onFilterNodeBounds(SkIRect(desiredOutput), mapping.layerMatrix(),
+ kReverse_MapDirection, &content);
+ if (recurse == VisitChildren::kYes) {
+ SkIRect aggregate = this->onFilterBounds(input, mapping.layerMatrix(),
+ kReverse_MapDirection, &input);
+ return skif::LayerSpace<SkIRect>(aggregate);
+ } else {
+ return skif::LayerSpace<SkIRect>(input);
+ }
+}
+
+skif::LayerSpace<SkIRect> SkImageFilter_Base::onGetOutputLayerBounds(
+ const skif::Mapping& mapping, const skif::LayerSpace<SkIRect>& contentBounds) const {
+ // Call old functions for now; eventually this will be a pure virtual
+ SkIRect aggregate = this->onFilterBounds(SkIRect(contentBounds), mapping.layerMatrix(),
+ kForward_MapDirection, nullptr);
+ SkIRect output = this->onFilterNodeBounds(aggregate, mapping.layerMatrix(),
+ kForward_MapDirection, nullptr);
+ return skif::LayerSpace<SkIRect>(output);
+}
+
+skif::FilterResult SkImageFilter_Base::filterInput(int index, const skif::Context& ctx) const {
+ const SkImageFilter* input = this->getInput(index);
+ if (!input) {
+ // Null image filters late bind to the source image
+ return ctx.source();
+ }
+
+ skif::FilterResult result = as_IFB(input)->filterImage(this->mapContext(ctx));
+ SkASSERT(!result.image() || ctx.gpuBacked() == result.image()->isTextureBacked());
+
+ return result;
+}
+
+SkImageFilter_Base::Context SkImageFilter_Base::mapContext(const Context& ctx) const {
+ // We don't recurse through the child input filters because that happens automatically
+ // as part of the filterImage() evaluation. In this case, we want the bounds for the
+ // edge from this node to its children, without the effects of the child filters.
+ skif::LayerSpace<SkIRect> childOutput = this->onGetInputLayerBounds(
+ ctx.mapping(), ctx.desiredOutput(), ctx.desiredOutput(), VisitChildren::kNo);
+ return ctx.withNewDesiredOutput(childOutput);
+}
+
+#if defined(SK_GANESH)
+sk_sp<SkSpecialImage> SkImageFilter_Base::DrawWithFP(GrRecordingContext* rContext,
+ std::unique_ptr<GrFragmentProcessor> fp,
+ const SkIRect& bounds,
+ SkColorType colorType,
+ const SkColorSpace* colorSpace,
+ const SkSurfaceProps& surfaceProps,
+ GrSurfaceOrigin surfaceOrigin,
+ GrProtected isProtected) {
+ GrImageInfo info(SkColorTypeToGrColorType(colorType),
+ kPremul_SkAlphaType,
+ sk_ref_sp(colorSpace),
+ bounds.size());
+
+ auto sfc = rContext->priv().makeSFC(info,
+ "ImageFilterBase_DrawWithFP",
+ SkBackingFit::kApprox,
+ 1,
+ GrMipmapped::kNo,
+ isProtected,
+ surfaceOrigin);
+ if (!sfc) {
+ return nullptr;
+ }
+
+ SkIRect dstIRect = SkIRect::MakeWH(bounds.width(), bounds.height());
+ SkRect srcRect = SkRect::Make(bounds);
+ sfc->fillRectToRectWithFP(srcRect, dstIRect, std::move(fp));
+
+ return SkSpecialImage::MakeDeferredFromGpu(rContext,
+ dstIRect,
+ kNeedNewImageUniqueID_SpecialImage,
+ sfc->readSurfaceView(),
+ sfc->colorInfo(),
+ surfaceProps);
+}
+
+sk_sp<SkSpecialImage> SkImageFilter_Base::ImageToColorSpace(SkSpecialImage* src,
+ SkColorType colorType,
+ SkColorSpace* colorSpace,
+ const SkSurfaceProps& surfaceProps) {
+ // There are several conditions that determine if we actually need to convert the source to the
+ // destination's color space. Rather than duplicate that logic here, just try to make an xform
+ // object. If that produces something, then both are tagged, and the source is in a different
+ // gamut than the dest. There is some overhead to making the xform, but those are cached, and
+ // if we get one back, that means we're about to use it during the conversion anyway.
+ auto colorSpaceXform = GrColorSpaceXform::Make(src->getColorSpace(), src->alphaType(),
+ colorSpace, kPremul_SkAlphaType);
+
+ if (!colorSpaceXform) {
+ // No xform needed, just return the original image
+ return sk_ref_sp(src);
+ }
+
+ sk_sp<SkSpecialSurface> surf(src->makeSurface(colorType, colorSpace,
+ SkISize::Make(src->width(), src->height()),
+ kPremul_SkAlphaType, surfaceProps));
+ if (!surf) {
+ return sk_ref_sp(src);
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+ SkPaint p;
+ p.setBlendMode(SkBlendMode::kSrc);
+ src->draw(canvas, 0, 0, SkSamplingOptions(), &p);
+ return surf->makeImageSnapshot();
+}
+#endif
+
+// In repeat mode, when we are going to sample off one edge of the srcBounds we require the
+// opposite side be preserved.
+SkIRect SkImageFilter_Base::DetermineRepeatedSrcBound(const SkIRect& srcBounds,
+ const SkIVector& filterOffset,
+ const SkISize& filterSize,
+ const SkIRect& originalSrcBounds) {
+ SkIRect tmp = srcBounds;
+ tmp.adjust(-filterOffset.fX, -filterOffset.fY,
+ filterSize.fWidth - filterOffset.fX, filterSize.fHeight - filterOffset.fY);
+
+ if (tmp.fLeft < originalSrcBounds.fLeft || tmp.fRight > originalSrcBounds.fRight) {
+ tmp.fLeft = originalSrcBounds.fLeft;
+ tmp.fRight = originalSrcBounds.fRight;
+ }
+ if (tmp.fTop < originalSrcBounds.fTop || tmp.fBottom > originalSrcBounds.fBottom) {
+ tmp.fTop = originalSrcBounds.fTop;
+ tmp.fBottom = originalSrcBounds.fBottom;
+ }
+
+ return tmp;
+}
+
+void SkImageFilter_Base::PurgeCache() {
+ SkImageFilterCache::Get()->purge();
+}
diff --git a/gfx/skia/skia/src/core/SkImageFilterCache.cpp b/gfx/skia/skia/src/core/SkImageFilterCache.cpp
new file mode 100644
index 0000000000..4d998cf86e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageFilterCache.cpp
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkImageFilterCache.h"
+
+#include <vector>
+
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkRefCnt.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkOnce.h"
+#include "src/base/SkTInternalLList.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkTDynamicHash.h"
+#include "src/core/SkTHash.h"
+
+#ifdef SK_BUILD_FOR_IOS
+ enum { kDefaultCacheSize = 2 * 1024 * 1024 };
+#else
+ enum { kDefaultCacheSize = 128 * 1024 * 1024 };
+#endif
+
+namespace {
+
+class CacheImpl : public SkImageFilterCache {
+public:
+ typedef SkImageFilterCacheKey Key;
+ CacheImpl(size_t maxBytes) : fMaxBytes(maxBytes), fCurrentBytes(0) { }
+ ~CacheImpl() override {
+ fLookup.foreach([&](Value* v) { delete v; });
+ }
+ struct Value {
+ Value(const Key& key, const skif::FilterResult& image,
+ const SkImageFilter* filter)
+ : fKey(key), fImage(image), fFilter(filter) {}
+
+ Key fKey;
+ skif::FilterResult fImage;
+ const SkImageFilter* fFilter;
+ static const Key& GetKey(const Value& v) {
+ return v.fKey;
+ }
+ static uint32_t Hash(const Key& key) {
+ return SkOpts::hash(reinterpret_cast<const uint32_t*>(&key), sizeof(Key));
+ }
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(Value);
+ };
+
+ bool get(const Key& key, skif::FilterResult* result) const override {
+ SkASSERT(result);
+
+ SkAutoMutexExclusive mutex(fMutex);
+ if (Value* v = fLookup.find(key)) {
+ if (v != fLRU.head()) {
+ fLRU.remove(v);
+ fLRU.addToHead(v);
+ }
+
+ *result = v->fImage;
+ return true;
+ }
+ return false;
+ }
+
+ void set(const Key& key, const SkImageFilter* filter,
+ const skif::FilterResult& result) override {
+ SkAutoMutexExclusive mutex(fMutex);
+ if (Value* v = fLookup.find(key)) {
+ this->removeInternal(v);
+ }
+ Value* v = new Value(key, result, filter);
+ fLookup.add(v);
+ fLRU.addToHead(v);
+ fCurrentBytes += result.image() ? result.image()->getSize() : 0;
+ if (auto* values = fImageFilterValues.find(filter)) {
+ values->push_back(v);
+ } else {
+ fImageFilterValues.set(filter, {v});
+ }
+
+ while (fCurrentBytes > fMaxBytes) {
+ Value* tail = fLRU.tail();
+ SkASSERT(tail);
+ if (tail == v) {
+ break;
+ }
+ this->removeInternal(tail);
+ }
+ }
+
+ void purge() override {
+ SkAutoMutexExclusive mutex(fMutex);
+ while (fCurrentBytes > 0) {
+ Value* tail = fLRU.tail();
+ SkASSERT(tail);
+ this->removeInternal(tail);
+ }
+ }
+
+ void purgeByImageFilter(const SkImageFilter* filter) override {
+ SkAutoMutexExclusive mutex(fMutex);
+ auto* values = fImageFilterValues.find(filter);
+ if (!values) {
+ return;
+ }
+ for (Value* v : *values) {
+ // We set the filter to be null so that removeInternal() won't delete from values while
+ // we're iterating over it.
+ v->fFilter = nullptr;
+ this->removeInternal(v);
+ }
+ fImageFilterValues.remove(filter);
+ }
+
+ SkDEBUGCODE(int count() const override { return fLookup.count(); })
+private:
+ void removeInternal(Value* v) {
+ if (v->fFilter) {
+ if (auto* values = fImageFilterValues.find(v->fFilter)) {
+ if (values->size() == 1 && (*values)[0] == v) {
+ fImageFilterValues.remove(v->fFilter);
+ } else {
+ for (auto it = values->begin(); it != values->end(); ++it) {
+ if (*it == v) {
+ values->erase(it);
+ break;
+ }
+ }
+ }
+ }
+ }
+ fCurrentBytes -= v->fImage.image() ? v->fImage.image()->getSize() : 0;
+ fLRU.remove(v);
+ fLookup.remove(v->fKey);
+ delete v;
+ }
+private:
+ SkTDynamicHash<Value, Key> fLookup;
+ mutable SkTInternalLList<Value> fLRU;
+ // Value* always points to an item in fLookup.
+ SkTHashMap<const SkImageFilter*, std::vector<Value*>> fImageFilterValues;
+ size_t fMaxBytes;
+ size_t fCurrentBytes;
+ mutable SkMutex fMutex;
+};
+
+} // namespace
+
+SkImageFilterCache* SkImageFilterCache::Create(size_t maxBytes) {
+ return new CacheImpl(maxBytes);
+}
+
+SkImageFilterCache* SkImageFilterCache::Get() {
+ static SkOnce once;
+ static SkImageFilterCache* cache;
+
+ once([]{ cache = SkImageFilterCache::Create(kDefaultCacheSize); });
+ return cache;
+}
diff --git a/gfx/skia/skia/src/core/SkImageFilterCache.h b/gfx/skia/skia/src/core/SkImageFilterCache.h
new file mode 100644
index 0000000000..5af859367e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageFilterCache.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageFilterCache_DEFINED
+#define SkImageFilterCache_DEFINED
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkRefCnt.h"
+#include "src/core/SkImageFilterTypes.h"
+
+struct SkIPoint;
+class SkImageFilter;
+
+struct SkImageFilterCacheKey {
+ SkImageFilterCacheKey(const uint32_t uniqueID, const SkMatrix& matrix,
+ const SkIRect& clipBounds, uint32_t srcGenID, const SkIRect& srcSubset)
+ : fUniqueID(uniqueID)
+ , fMatrix(matrix)
+ , fClipBounds(clipBounds)
+ , fSrcGenID(srcGenID)
+ , fSrcSubset(srcSubset) {
+ // Assert that Key is tightly-packed, since it is hashed.
+ static_assert(sizeof(SkImageFilterCacheKey) == sizeof(uint32_t) + sizeof(SkMatrix) +
+ sizeof(SkIRect) + sizeof(uint32_t) + 4 * sizeof(int32_t),
+ "image_filter_key_tight_packing");
+ fMatrix.getType(); // force initialization of type, so hashes match
+ SkASSERT(fMatrix.isFinite()); // otherwise we can't rely on == self when comparing keys
+ }
+
+ uint32_t fUniqueID;
+ SkMatrix fMatrix;
+ SkIRect fClipBounds;
+ uint32_t fSrcGenID;
+ SkIRect fSrcSubset;
+
+ bool operator==(const SkImageFilterCacheKey& other) const {
+ return fUniqueID == other.fUniqueID &&
+ fMatrix == other.fMatrix &&
+ fClipBounds == other.fClipBounds &&
+ fSrcGenID == other.fSrcGenID &&
+ fSrcSubset == other.fSrcSubset;
+ }
+};
+
+// This cache maps from (filter's unique ID + CTM + clipBounds + src bitmap generation ID) to result
+// NOTE: this is the _specific_ unique ID of the image filter, so refiltering the same image with a
+// copy of the image filter (with exactly the same parameters) will not yield a cache hit.
+class SkImageFilterCache : public SkRefCnt {
+public:
+ enum { kDefaultTransientSize = 32 * 1024 * 1024 };
+
+ ~SkImageFilterCache() override {}
+ static SkImageFilterCache* Create(size_t maxBytes);
+ static SkImageFilterCache* Get();
+
+ // Returns true on cache hit and updates 'result' to be the cached result. Returns false when
+ // not in the cache, in which case 'result' is not modified.
+ virtual bool get(const SkImageFilterCacheKey& key,
+ skif::FilterResult* result) const = 0;
+ // 'filter' is included in the caching to allow the purging of all of an image filter's cached
+ // results when it is destroyed.
+ virtual void set(const SkImageFilterCacheKey& key, const SkImageFilter* filter,
+ const skif::FilterResult& result) = 0;
+ virtual void purge() = 0;
+ virtual void purgeByImageFilter(const SkImageFilter*) = 0;
+ SkDEBUGCODE(virtual int count() const = 0;)
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkImageFilterTypes.cpp b/gfx/skia/skia/src/core/SkImageFilterTypes.cpp
new file mode 100644
index 0000000000..aa8cddb9cc
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageFilterTypes.cpp
@@ -0,0 +1,430 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkImageFilterTypes.h"
+
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkMatrixPriv.h"
+
+// This exists to cover up issues where infinite precision would produce integers but float
+// math produces values just larger/smaller than an int and roundOut/In on bounds would produce
+// nearly a full pixel error. One such case is crbug.com/1313579 where the caller has produced
+// near integer CTM and uses integer crop rects that would grab an extra row/column of the
+// input image when using a strict roundOut.
+static constexpr float kRoundEpsilon = 1e-3f;
+
+// Both [I]Vectors and Sk[I]Sizes are transformed as non-positioned values, i.e. go through
+// mapVectors() not mapPoints().
+static SkIVector map_as_vector(int32_t x, int32_t y, const SkMatrix& matrix) {
+ SkVector v = SkVector::Make(SkIntToScalar(x), SkIntToScalar(y));
+ matrix.mapVectors(&v, 1);
+ return SkIVector::Make(SkScalarRoundToInt(v.fX), SkScalarRoundToInt(v.fY));
+}
+
+static SkVector map_as_vector(SkScalar x, SkScalar y, const SkMatrix& matrix) {
+ SkVector v = SkVector::Make(x, y);
+ matrix.mapVectors(&v, 1);
+ return v;
+}
+
+// If m is epsilon within the form [1 0 tx], this returns true and sets out to [tx, ty]
+// [0 1 ty]
+// [0 0 1 ]
+// TODO: Use this in decomposeCTM() (and possibly extend it to support is_nearly_scale_translate)
+// to be a little more forgiving on matrix types during layer configuration.
+static bool is_nearly_integer_translation(const skif::LayerSpace<SkMatrix>& m,
+ skif::LayerSpace<SkIPoint>* out=nullptr) {
+ float tx = SkScalarRoundToScalar(sk_ieee_float_divide(m.rc(0,2), m.rc(2,2)));
+ float ty = SkScalarRoundToScalar(sk_ieee_float_divide(m.rc(1,2), m.rc(2,2)));
+ SkMatrix expected = SkMatrix::MakeAll(1.f, 0.f, tx,
+ 0.f, 1.f, ty,
+ 0.f, 0.f, 1.f);
+ for (int i = 0; i < 9; ++i) {
+ if (!SkScalarNearlyEqual(expected.get(i), m.get(i), kRoundEpsilon)) {
+ return false;
+ }
+ }
+
+ if (out) {
+ *out = skif::LayerSpace<SkIPoint>({(int) tx, (int) ty});
+ }
+ return true;
+}
+
+static SkRect map_rect(const SkMatrix& matrix, const SkRect& rect) {
+ if (rect.isEmpty()) {
+ return SkRect::MakeEmpty();
+ }
+ return matrix.mapRect(rect);
+}
+
+static SkIRect map_rect(const SkMatrix& matrix, const SkIRect& rect) {
+ if (rect.isEmpty()) {
+ return SkIRect::MakeEmpty();
+ }
+ // Unfortunately, there is a range of integer values such that we have 1px precision as an int,
+ // but less precision as a float. This can lead to non-empty SkIRects becoming empty simply
+ // because of float casting. If we're already dealing with a float rect or having a float
+ // output, that's what we're stuck with; but if we are starting form an irect and desiring an
+ // SkIRect output, we go through efforts to preserve the 1px precision for simple transforms.
+ if (matrix.isScaleTranslate()) {
+ double l = (double)matrix.getScaleX()*rect.fLeft + (double)matrix.getTranslateX();
+ double r = (double)matrix.getScaleX()*rect.fRight + (double)matrix.getTranslateX();
+ double t = (double)matrix.getScaleY()*rect.fTop + (double)matrix.getTranslateY();
+ double b = (double)matrix.getScaleY()*rect.fBottom + (double)matrix.getTranslateY();
+
+ return {sk_double_saturate2int(sk_double_floor(std::min(l, r) + kRoundEpsilon)),
+ sk_double_saturate2int(sk_double_floor(std::min(t, b) + kRoundEpsilon)),
+ sk_double_saturate2int(sk_double_ceil(std::max(l, r) - kRoundEpsilon)),
+ sk_double_saturate2int(sk_double_ceil(std::max(t, b) - kRoundEpsilon))};
+ } else {
+ return skif::RoundOut(matrix.mapRect(SkRect::Make(rect)));
+ }
+}
+
+namespace skif {
+
+SkIRect RoundOut(SkRect r) { return r.makeInset(kRoundEpsilon, kRoundEpsilon).roundOut(); }
+
+SkIRect RoundIn(SkRect r) { return r.makeOutset(kRoundEpsilon, kRoundEpsilon).roundIn(); }
+
+bool Mapping::decomposeCTM(const SkMatrix& ctm, const SkImageFilter* filter,
+ const skif::ParameterSpace<SkPoint>& representativePt) {
+ SkMatrix remainder, layer;
+ SkSize decomposed;
+ using MatrixCapability = SkImageFilter_Base::MatrixCapability;
+ MatrixCapability capability =
+ filter ? as_IFB(filter)->getCTMCapability() : MatrixCapability::kComplex;
+ if (capability == MatrixCapability::kTranslate) {
+ // Apply the entire CTM post-filtering
+ remainder = ctm;
+ layer = SkMatrix::I();
+ } else if (ctm.isScaleTranslate() || capability == MatrixCapability::kComplex) {
+ // Either layer space can be anything (kComplex) - or - it can be scale+translate, and the
+ // ctm is. In both cases, the layer space can be equivalent to device space.
+ remainder = SkMatrix::I();
+ layer = ctm;
+ } else if (ctm.decomposeScale(&decomposed, &remainder)) {
+ // This case implies some amount of sampling post-filtering, either due to skew or rotation
+ // in the original matrix. As such, keep the layer matrix as simple as possible.
+ layer = SkMatrix::Scale(decomposed.fWidth, decomposed.fHeight);
+ } else {
+ // Perspective, which has a non-uniform scaling effect on the filter. Pick a single scale
+ // factor that best matches where the filter will be evaluated.
+ SkScalar scale = SkMatrixPriv::DifferentialAreaScale(ctm, SkPoint(representativePt));
+ if (SkScalarIsFinite(scale) && !SkScalarNearlyZero(scale)) {
+ // Now take the sqrt to go from an area scale factor to a scaling per X and Y
+ // FIXME: It would be nice to be able to choose a non-uniform scale.
+ scale = SkScalarSqrt(scale);
+ } else {
+ // The representative point was behind the W = 0 plane, so don't factor out any scale.
+ // NOTE: This makes remainder and layer the same as the MatrixCapability::Translate case
+ scale = 1.f;
+ }
+
+ remainder = ctm;
+ remainder.preScale(SkScalarInvert(scale), SkScalarInvert(scale));
+ layer = SkMatrix::Scale(scale, scale);
+ }
+
+ SkMatrix invRemainder;
+ if (!remainder.invert(&invRemainder)) {
+ // Under floating point arithmetic, it's possible to decompose an invertible matrix into
+ // a scaling matrix and a remainder and have the remainder be non-invertible. Generally
+ // when this happens the scale factors are so large and the matrix so ill-conditioned that
+ // it's unlikely that any drawing would be reasonable, so failing to make a layer is okay.
+ return false;
+ } else {
+ fParamToLayerMatrix = layer;
+ fLayerToDevMatrix = remainder;
+ fDevToLayerMatrix = invRemainder;
+ return true;
+ }
+}
+
+bool Mapping::adjustLayerSpace(const SkMatrix& layer) {
+ SkMatrix invLayer;
+ if (!layer.invert(&invLayer)) {
+ return false;
+ }
+ fParamToLayerMatrix.postConcat(layer);
+ fDevToLayerMatrix.postConcat(layer);
+ fLayerToDevMatrix.preConcat(invLayer);
+ return true;
+}
+
+// Instantiate map specializations for the 6 geometric types used during filtering
+template<>
+SkRect Mapping::map<SkRect>(const SkRect& geom, const SkMatrix& matrix) {
+ return map_rect(matrix, geom);
+}
+
+template<>
+SkIRect Mapping::map<SkIRect>(const SkIRect& geom, const SkMatrix& matrix) {
+ return map_rect(matrix, geom);
+}
+
+template<>
+SkIPoint Mapping::map<SkIPoint>(const SkIPoint& geom, const SkMatrix& matrix) {
+ SkPoint p = SkPoint::Make(SkIntToScalar(geom.fX), SkIntToScalar(geom.fY));
+ matrix.mapPoints(&p, 1);
+ return SkIPoint::Make(SkScalarRoundToInt(p.fX), SkScalarRoundToInt(p.fY));
+}
+
+template<>
+SkPoint Mapping::map<SkPoint>(const SkPoint& geom, const SkMatrix& matrix) {
+ SkPoint p;
+ matrix.mapPoints(&p, &geom, 1);
+ return p;
+}
+
+template<>
+IVector Mapping::map<IVector>(const IVector& geom, const SkMatrix& matrix) {
+ return IVector(map_as_vector(geom.fX, geom.fY, matrix));
+}
+
+template<>
+Vector Mapping::map<Vector>(const Vector& geom, const SkMatrix& matrix) {
+ return Vector(map_as_vector(geom.fX, geom.fY, matrix));
+}
+
+template<>
+SkISize Mapping::map<SkISize>(const SkISize& geom, const SkMatrix& matrix) {
+ SkIVector v = map_as_vector(geom.fWidth, geom.fHeight, matrix);
+ return SkISize::Make(v.fX, v.fY);
+}
+
+template<>
+SkSize Mapping::map<SkSize>(const SkSize& geom, const SkMatrix& matrix) {
+ SkVector v = map_as_vector(geom.fWidth, geom.fHeight, matrix);
+ return SkSize::Make(v.fX, v.fY);
+}
+
+template<>
+SkMatrix Mapping::map<SkMatrix>(const SkMatrix& m, const SkMatrix& matrix) {
+ // If 'matrix' maps from the C1 coord space to the C2 coord space, and 'm' is a transform that
+ // operates on, and outputs to, the C1 coord space, we want to return a new matrix that is
+ // equivalent to 'm' that operates on and outputs to C2. This is the same as mapping the input
+ // from C2 to C1 (matrix^-1), then transforming by 'm', and then mapping from C1 to C2 (matrix).
+ SkMatrix inv;
+ SkAssertResult(matrix.invert(&inv));
+ inv.postConcat(m);
+ inv.postConcat(matrix);
+ return inv;
+}
+
+LayerSpace<SkRect> LayerSpace<SkMatrix>::mapRect(const LayerSpace<SkRect>& r) const {
+ return LayerSpace<SkRect>(map_rect(fData, SkRect(r)));
+}
+
+LayerSpace<SkIRect> LayerSpace<SkMatrix>::mapRect(const LayerSpace<SkIRect>& r) const {
+ return LayerSpace<SkIRect>(map_rect(fData, SkIRect(r)));
+}
+
+sk_sp<SkSpecialImage> FilterResult::imageAndOffset(SkIPoint* offset) const {
+ auto [image, origin] = this->resolve(fLayerBounds);
+ *offset = SkIPoint(origin);
+ return image;
+}
+
+FilterResult FilterResult::applyCrop(const Context& ctx,
+ const LayerSpace<SkIRect>& crop) const {
+ LayerSpace<SkIRect> tightBounds = crop;
+ // TODO(michaelludwig): Intersecting to the target output is only valid when the crop has
+ // decal tiling (the only current option).
+ if (!fImage || !tightBounds.intersect(ctx.desiredOutput())) {
+ // The desired output would be filled with transparent black.
+ return {};
+ }
+
+ if (crop.contains(fLayerBounds)) {
+ // The original crop does not affect the image (although the context's desired output might)
+ // We can tighten fLayerBounds to the desired output without resolving the image, regardless
+ // of the transform type.
+ // TODO(michaelludwig): If the crop would use mirror or repeat, the above isn't true.
+ FilterResult restrictedOutput = *this;
+ SkAssertResult(restrictedOutput.fLayerBounds.intersect(ctx.desiredOutput()));
+ return restrictedOutput;
+ } else {
+ return this->resolve(tightBounds);
+ }
+}
+
+static bool compatible_sampling(const SkSamplingOptions& currentSampling,
+ bool currentXformWontAffectNearest,
+ SkSamplingOptions* nextSampling,
+ bool nextXformWontAffectNearest) {
+ // Both transforms could perform non-trivial sampling, but if they are similar enough we
+ // assume performing one non-trivial sampling operation with the concatenated transform will
+ // not be visually distinguishable from sampling twice.
+ // TODO(michaelludwig): For now ignore mipmap policy, SkSpecialImages are not supposed to be
+ // drawn with mipmapping, and the majority of filter steps produce images that are at the
+ // proper scale and do not define mip levels. The main exception is the ::Image() filter
+ // leaf but that doesn't use this system yet.
+ if (currentSampling.isAniso() && nextSampling->isAniso()) {
+ // Assume we can get away with one sampling at the highest anisotropy level
+ *nextSampling = SkSamplingOptions::Aniso(std::max(currentSampling.maxAniso,
+ nextSampling->maxAniso));
+ return true;
+ } else if (currentSampling.useCubic && (nextSampling->filter == SkFilterMode::kLinear ||
+ (nextSampling->useCubic &&
+ currentSampling.cubic.B == nextSampling->cubic.B &&
+ currentSampling.cubic.C == nextSampling->cubic.C))) {
+ // Assume we can get away with the current bicubic filter, since the next is the same
+ // or a bilerp that can be upgraded.
+ *nextSampling = currentSampling;
+ return true;
+ } else if (nextSampling->useCubic && currentSampling.filter == SkFilterMode::kLinear) {
+ // Mirror of the above, assume we can just get away with next's cubic resampler
+ return true;
+ } else if (currentSampling.filter == SkFilterMode::kLinear &&
+ nextSampling->filter == SkFilterMode::kLinear) {
+ // Assume we can get away with a single bilerp vs. the two
+ return true;
+ } else if (nextSampling->filter == SkFilterMode::kNearest && currentXformWontAffectNearest) {
+ // The next transform and nearest-neighbor filtering isn't impacted by the current transform
+ SkASSERT(currentSampling.filter == SkFilterMode::kLinear);
+ return true;
+ } else if (currentSampling.filter == SkFilterMode::kNearest && nextXformWontAffectNearest) {
+ // The next transform doesn't change the nearest-neighbor filtering of the current transform
+ SkASSERT(nextSampling->filter == SkFilterMode::kLinear);
+ *nextSampling = currentSampling;
+ return true;
+ } else {
+ // The current or next sampling is nearest neighbor, and will produce visible texels
+ // oriented with the current transform; assume this is a desired effect and preserve it.
+ return false;
+ }
+}
+
+FilterResult FilterResult::applyTransform(const Context& ctx,
+ const LayerSpace<SkMatrix> &transform,
+ const SkSamplingOptions &sampling) const {
+ if (!fImage) {
+ // Transformed transparent black remains transparent black.
+ return {};
+ }
+
+ // Extract the sampling options that matter based on the current and next transforms.
+ // We make sure the new sampling is bilerp (default) if the new transform doesn't matter
+ // (and assert that the current is bilerp if its transform didn't matter). Bilerp can be
+ // maximally combined, so simplifies the logic in compatible_sampling().
+ const bool currentXformIsInteger = is_nearly_integer_translation(fTransform);
+ const bool nextXformIsInteger = is_nearly_integer_translation(transform);
+
+ SkASSERT(!currentXformIsInteger || fSamplingOptions == kDefaultSampling);
+ SkSamplingOptions nextSampling = nextXformIsInteger ? kDefaultSampling : sampling;
+
+ FilterResult transformed;
+ if (compatible_sampling(fSamplingOptions, currentXformIsInteger,
+ &nextSampling, nextXformIsInteger)) {
+ // We can concat transforms and 'nextSampling' will be either fSamplingOptions,
+ // sampling, or a merged combination depending on the two transforms in play.
+ transformed = *this;
+ } else {
+ // We'll have to resolve this FilterResult first before 'transform' and 'sampling' can be
+ // correctly evaluated. 'nextSampling' will always be 'sampling'.
+ transformed = this->resolve(fLayerBounds);
+ }
+
+ transformed.concatTransform(transform, nextSampling, ctx.desiredOutput());
+ if (transformed.layerBounds().isEmpty()) {
+ return {};
+ } else {
+ return transformed;
+ }
+}
+
+void FilterResult::concatTransform(const LayerSpace<SkMatrix>& transform,
+ const SkSamplingOptions& newSampling,
+ const LayerSpace<SkIRect>& desiredOutput) {
+ if (!fImage) {
+ // Under normal circumstances, concatTransform() will only be called when we have an image,
+ // but if resolve() fails to make a special surface, we may end up here at which point
+ // doing nothing further is appropriate.
+ return;
+ }
+ fSamplingOptions = newSampling;
+ fTransform.postConcat(transform);
+ // Rebuild the layer bounds and then restrict to the current desired output. The original value
+ // of fLayerBounds includes the image mapped by the original fTransform as well as any
+ // accumulated soft crops from desired outputs of prior stages. To prevent discarding that info,
+ // we map fLayerBounds by the additional transform, instead of re-mapping the image bounds.
+ fLayerBounds = transform.mapRect(fLayerBounds);
+ if (!fLayerBounds.intersect(desiredOutput)) {
+ // The transformed output doesn't touch the desired, so it would just be transparent black.
+ // TODO: This intersection only applies when the tile mode is kDecal.
+ fLayerBounds = LayerSpace<SkIRect>::Empty();
+ }
+}
+
+std::pair<sk_sp<SkSpecialImage>, LayerSpace<SkIPoint>> FilterResult::resolve(
+ LayerSpace<SkIRect> dstBounds) const {
+ // TODO(michaelludwig): Only valid for kDecal, although kClamp would only need 1 extra
+ // pixel of padding so some restriction could happen. We also should skip the intersection if
+ // we need to include transparent black pixels.
+ if (!fImage || !dstBounds.intersect(fLayerBounds)) {
+ return {nullptr, {}};
+ }
+
+ // TODO: This logic to skip a draw will also need to account for the tile mode, but we can
+ // always restrict to the intersection of dstBounds and the image's subset since we are
+ // currently always decal sampling.
+ // TODO(michaelludwig): If we get to the point where all filter results track bounds in
+ // floating point, then we can extend this case to any S+T transform.
+ LayerSpace<SkIPoint> origin;
+ if (is_nearly_integer_translation(fTransform, &origin)) {
+ LayerSpace<SkIRect> imageBounds(SkIRect::MakeXYWH(origin.x(), origin.y(),
+ fImage->width(), fImage->height()));
+ if (!imageBounds.intersect(dstBounds)) {
+ return {nullptr, {}};
+ }
+
+ // Offset the image subset directly to avoid issues negating (origin). With the prior
+ // intersection (bounds - origin) will be >= 0, but (bounds + (-origin)) may not, (e.g.
+ // origin is INT_MIN).
+ SkIRect subset = { imageBounds.left() - origin.x(),
+ imageBounds.top() - origin.y(),
+ imageBounds.right() - origin.x(),
+ imageBounds.bottom() - origin.y() };
+ SkASSERT(subset.fLeft >= 0 && subset.fTop >= 0 &&
+ subset.fRight <= fImage->width() && subset.fBottom <= fImage->height());
+
+ return {fImage->makeSubset(subset), imageBounds.topLeft()};
+ } // else fall through and attempt a draw
+
+ sk_sp<SkSpecialSurface> surface = fImage->makeSurface(fImage->colorType(),
+ fImage->getColorSpace(),
+ SkISize(dstBounds.size()),
+ kPremul_SkAlphaType, {});
+ if (!surface) {
+ return {nullptr, {}};
+ }
+ SkCanvas* canvas = surface->getCanvas();
+ // skbug.com/5075: GPU-backed special surfaces don't reset their contents.
+ canvas->clear(SK_ColorTRANSPARENT);
+ canvas->translate(-dstBounds.left(), -dstBounds.top()); // dst's origin adjustment
+
+ SkPaint paint;
+ paint.setAntiAlias(true);
+ paint.setBlendMode(SkBlendMode::kSrc);
+
+ // TODO: When using a tile mode other than kDecal, we'll need to use SkSpecialImage::asShader()
+ // and use drawRect(fLayerBounds).
+ if (!fLayerBounds.contains(dstBounds)) {
+ // We're resolving to a larger than necessary image, so make sure transparency outside of
+ // fLayerBounds is preserved.
+ // NOTE: This should only happen when the next layer requires processing transparent black.
+ canvas->clipIRect(SkIRect(fLayerBounds));
+ }
+ canvas->concat(SkMatrix(fTransform)); // src's origin is embedded in fTransform
+ fImage->draw(canvas, 0.f, 0.f, fSamplingOptions, &paint);
+
+ return {surface->makeImageSnapshot(), dstBounds.topLeft()};
+}
+
+} // end namespace skif
diff --git a/gfx/skia/skia/src/core/SkImageFilterTypes.h b/gfx/skia/skia/src/core/SkImageFilterTypes.h
new file mode 100644
index 0000000000..58dc680adf
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageFilterTypes.h
@@ -0,0 +1,799 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageFilterTypes_DEFINED
+#define SkImageFilterTypes_DEFINED
+
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkTypes.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+
+class GrRecordingContext;
+class SkImageFilter;
+class SkImageFilterCache;
+class SkSpecialSurface;
+class SkSurfaceProps;
+
+// The skif (SKI[mage]F[ilter]) namespace contains types that are used for filter implementations.
+// The defined types come in two groups: users of internal Skia types, and templates to help with
+// readability. Image filters cannot be implemented without access to key internal types, such as
+// SkSpecialImage. It is possible to avoid the use of the readability templates, although they are
+// strongly encouraged.
+namespace skif {
+
+// Rounds in/out but with a tolerance.
+SkIRect RoundOut(SkRect);
+SkIRect RoundIn(SkRect);
+
+// skif::IVector and skif::Vector represent plain-old-data types for storing direction vectors, so
+// that the coordinate-space templating system defined below can have a separate type id for
+// directions vs. points, and specialize appropriately. As such, all operations with direction
+// vectors are defined on the LayerSpace specialization, since that is the intended point of use.
+struct IVector {
+ int32_t fX;
+ int32_t fY;
+
+ IVector() = default;
+ IVector(int32_t x, int32_t y) : fX(x), fY(y) {}
+ explicit IVector(const SkIVector& v) : fX(v.fX), fY(v.fY) {}
+};
+
+struct Vector {
+ SkScalar fX;
+ SkScalar fY;
+
+ Vector() = default;
+ Vector(SkScalar x, SkScalar y) : fX(x), fY(y) {}
+ explicit Vector(const SkVector& v) : fX(v.fX), fY(v.fY) {}
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// Coordinate Space Tagging
+// - In order to enforce correct coordinate spaces in image filter implementations and use,
+// geometry is wrapped by templated structs to declare in the type system what coordinate space
+// the coordinates are defined in.
+// - Currently there is ParameterSpace and DeviceSpace that are data-only wrappers around
+// coordinates, and the primary LayerSpace that provides all operative functionality for image
+// filters. It is intended that all logic about image bounds and access be conducted in the shared
+// layer space.
+// - The LayerSpace struct has type-safe specializations for SkIRect, SkRect, SkIPoint, SkPoint,
+// skif::IVector (to distinguish SkIVector from SkIPoint), skif::Vector, SkISize, and SkSize.
+// - A Mapping object provides type safe coordinate conversions between these spaces, and
+// automatically does the "right thing" for each geometric type.
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+// ParameterSpace is a data-only wrapper around Skia's geometric types such as SkIPoint, and SkRect.
+// Parameter space is the same as the local coordinate space of an SkShader, or the coordinates
+// passed into SkCanvas::drawX calls, but "local" is avoided due to the alliteration with layer
+// space. SkImageFilters are defined in terms of ParameterSpace<T> geometry and must use the Mapping
+// on Context to transform the parameters into LayerSpace to evaluate the filter in the shared
+// coordinate space of the entire filter DAG.
+//
+// A value of ParameterSpace<SkIRect> implies that its wrapped SkIRect is defined in the local
+// parameter space.
+template<typename T>
+class ParameterSpace {
+public:
+ ParameterSpace() = default;
+ explicit ParameterSpace(const T& data) : fData(data) {}
+ explicit ParameterSpace(T&& data) : fData(std::move(data)) {}
+
+ explicit operator const T&() const { return fData; }
+
+ static const ParameterSpace<T>* Optional(const T* ptr) {
+ return static_cast<const ParameterSpace<T>*>(reinterpret_cast<const void*>(ptr));
+ }
+private:
+ T fData;
+};
+
+// DeviceSpace is a data-only wrapper around Skia's geometric types. It is similar to
+// 'ParameterSpace' except that it is used to represent geometry that has been transformed or
+// defined in the root device space (i.e. the final pixels of drawn content). Much of what SkCanvas
+// tracks, such as its clip bounds are defined in this space and DeviceSpace provides a
+// type-enforced mechanism for the canvas to pass that information into the image filtering system,
+// using the Mapping of the filtering context.
+template<typename T>
+class DeviceSpace {
+public:
+ DeviceSpace() = default;
+ explicit DeviceSpace(const T& data) : fData(data) {}
+ explicit DeviceSpace(T&& data) : fData(std::move(data)) {}
+
+ explicit operator const T&() const { return fData; }
+
+private:
+ T fData;
+};
+
+// LayerSpace is a geometric wrapper that specifies the geometry is defined in the shared layer
+// space where image filters are evaluated. For a given Context (and its Mapping), the image filter
+// DAG operates in the same coordinate space. This space may be different from the local coordinate
+// space that defined the image filter parameters (such as blur sigma), and it may be different
+// from the total CTM of the SkCanvas.
+//
+// To encourage correct filter use and implementation, the bulk of filter logic should be performed
+// in layer space (e.g. determining what portion of an input image to read, or what the output
+// region is). LayerSpace specializations for the six common Skia math types (Sk[I]Rect, Sk[I]Point,
+// and Sk[I]Size), and skif::[I]Vector (to allow vectors to be specialized separately from points))
+// are provided that mimic their APIs but preserve the coordinate space and enforce type semantics.
+template<typename T>
+class LayerSpace {};
+
+// Layer-space specialization for integerized direction vectors.
+template<>
+class LayerSpace<IVector> {
+public:
+ LayerSpace() = default;
+ explicit LayerSpace(const IVector& geometry) : fData(geometry) {}
+ explicit LayerSpace(IVector&& geometry) : fData(std::move(geometry)) {}
+ explicit operator const IVector&() const { return fData; }
+
+ explicit operator SkIVector() const { return SkIVector::Make(fData.fX, fData.fY); }
+
+ int32_t x() const { return fData.fX; }
+ int32_t y() const { return fData.fY; }
+
+ LayerSpace<IVector> operator-() const { return LayerSpace<IVector>({-fData.fX, -fData.fY}); }
+
+ LayerSpace<IVector> operator+(const LayerSpace<IVector>& v) const {
+ LayerSpace<IVector> sum = *this;
+ sum += v;
+ return sum;
+ }
+ LayerSpace<IVector> operator-(const LayerSpace<IVector>& v) const {
+ LayerSpace<IVector> diff = *this;
+ diff -= v;
+ return diff;
+ }
+
+ void operator+=(const LayerSpace<IVector>& v) {
+ fData.fX += v.fData.fX;
+ fData.fY += v.fData.fY;
+ }
+ void operator-=(const LayerSpace<IVector>& v) {
+ fData.fX -= v.fData.fX;
+ fData.fY -= v.fData.fY;
+ }
+
+private:
+ IVector fData;
+};
+
+// Layer-space specialization for floating point direction vectors.
+template<>
+class LayerSpace<Vector> {
+public:
+ LayerSpace() = default;
+ explicit LayerSpace(const Vector& geometry) : fData(geometry) {}
+ explicit LayerSpace(Vector&& geometry) : fData(std::move(geometry)) {}
+ explicit operator const Vector&() const { return fData; }
+
+ explicit operator SkVector() const { return SkVector::Make(fData.fX, fData.fY); }
+
+ SkScalar x() const { return fData.fX; }
+ SkScalar y() const { return fData.fY; }
+
+ SkScalar length() const { return SkVector::Length(fData.fX, fData.fY); }
+
+ LayerSpace<Vector> operator-() const { return LayerSpace<Vector>({-fData.fX, -fData.fY}); }
+
+ LayerSpace<Vector> operator*(SkScalar s) const {
+ LayerSpace<Vector> scaled = *this;
+ scaled *= s;
+ return scaled;
+ }
+
+ LayerSpace<Vector> operator+(const LayerSpace<Vector>& v) const {
+ LayerSpace<Vector> sum = *this;
+ sum += v;
+ return sum;
+ }
+ LayerSpace<Vector> operator-(const LayerSpace<Vector>& v) const {
+ LayerSpace<Vector> diff = *this;
+ diff -= v;
+ return diff;
+ }
+
+ void operator*=(SkScalar s) {
+ fData.fX *= s;
+ fData.fY *= s;
+ }
+ void operator+=(const LayerSpace<Vector>& v) {
+ fData.fX += v.fData.fX;
+ fData.fY += v.fData.fY;
+ }
+ void operator-=(const LayerSpace<Vector>& v) {
+ fData.fX -= v.fData.fX;
+ fData.fY -= v.fData.fY;
+ }
+
+ friend LayerSpace<Vector> operator*(SkScalar s, const LayerSpace<Vector>& b) {
+ return b * s;
+ }
+
+private:
+ Vector fData;
+};
+
+// Layer-space specialization for integer 2D coordinates (treated as positions, not directions).
+template<>
+class LayerSpace<SkIPoint> {
+public:
+ LayerSpace() = default;
+ explicit LayerSpace(const SkIPoint& geometry) : fData(geometry) {}
+ explicit LayerSpace(SkIPoint&& geometry) : fData(std::move(geometry)) {}
+ explicit operator const SkIPoint&() const { return fData; }
+
+ // Parrot the SkIPoint API while preserving coordinate space.
+ int32_t x() const { return fData.fX; }
+ int32_t y() const { return fData.fY; }
+
+ // Offsetting by direction vectors produce more points
+ LayerSpace<SkIPoint> operator+(const LayerSpace<IVector>& v) {
+ return LayerSpace<SkIPoint>(fData + SkIVector(v));
+ }
+ LayerSpace<SkIPoint> operator-(const LayerSpace<IVector>& v) {
+ return LayerSpace<SkIPoint>(fData - SkIVector(v));
+ }
+
+ void operator+=(const LayerSpace<IVector>& v) {
+ fData += SkIVector(v);
+ }
+ void operator-=(const LayerSpace<IVector>& v) {
+ fData -= SkIVector(v);
+ }
+
+ // Subtracting another point makes a direction between them
+ LayerSpace<IVector> operator-(const LayerSpace<SkIPoint>& p) {
+ return LayerSpace<IVector>(IVector(fData - p.fData));
+ }
+
+ LayerSpace<IVector> operator-() const { return LayerSpace<IVector>({-fData.fX, -fData.fY}); }
+
+private:
+ SkIPoint fData;
+};
+
+// Layer-space specialization for floating point 2D coordinates (treated as positions)
+template<>
+class LayerSpace<SkPoint> {
+public:
+ LayerSpace() = default;
+ explicit LayerSpace(const SkPoint& geometry) : fData(geometry) {}
+ explicit LayerSpace(SkPoint&& geometry) : fData(std::move(geometry)) {}
+ explicit operator const SkPoint&() const { return fData; }
+
+ // Parrot the SkPoint API while preserving coordinate space.
+ SkScalar x() const { return fData.fX; }
+ SkScalar y() const { return fData.fY; }
+
+ SkScalar distanceToOrigin() const { return fData.distanceToOrigin(); }
+
+ // Offsetting by direction vectors produce more points
+ LayerSpace<SkPoint> operator+(const LayerSpace<Vector>& v) {
+ return LayerSpace<SkPoint>(fData + SkVector(v));
+ }
+ LayerSpace<SkPoint> operator-(const LayerSpace<Vector>& v) {
+ return LayerSpace<SkPoint>(fData - SkVector(v));
+ }
+
+ void operator+=(const LayerSpace<Vector>& v) {
+ fData += SkVector(v);
+ }
+ void operator-=(const LayerSpace<Vector>& v) {
+ fData -= SkVector(v);
+ }
+
+ // Subtracting another point makes a direction between them
+ LayerSpace<Vector> operator-(const LayerSpace<SkPoint>& p) {
+ return LayerSpace<Vector>(Vector(fData - p.fData));
+ }
+
+ LayerSpace<Vector> operator-() const { return LayerSpace<Vector>({-fData.fX, -fData.fY}); }
+
+private:
+ SkPoint fData;
+};
+
+// Layer-space specialization for integer dimensions
+template<>
+class LayerSpace<SkISize> {
+public:
+ LayerSpace() = default;
+ explicit LayerSpace(const SkISize& geometry) : fData(geometry) {}
+ explicit LayerSpace(SkISize&& geometry) : fData(std::move(geometry)) {}
+ explicit operator const SkISize&() const { return fData; }
+
+ int32_t width() const { return fData.width(); }
+ int32_t height() const { return fData.height(); }
+
+ bool isEmpty() const { return fData.isEmpty(); }
+
+private:
+ SkISize fData;
+};
+
+// Layer-space specialization for floating point dimensions
+template<>
+class LayerSpace<SkSize> {
+public:
+ LayerSpace() = default;
+ explicit LayerSpace(const SkSize& geometry) : fData(geometry) {}
+ explicit LayerSpace(SkSize&& geometry) : fData(std::move(geometry)) {}
+ explicit operator const SkSize&() const { return fData; }
+
+ SkScalar width() const { return fData.width(); }
+ SkScalar height() const { return fData.height(); }
+
+ bool isEmpty() const { return fData.isEmpty(); }
+ bool isZero() const { return fData.isZero(); }
+
+ LayerSpace<SkISize> round() const { return LayerSpace<SkISize>(fData.toRound()); }
+ LayerSpace<SkISize> ceil() const { return LayerSpace<SkISize>(fData.toCeil()); }
+ LayerSpace<SkISize> floor() const { return LayerSpace<SkISize>(fData.toFloor()); }
+
+private:
+ SkSize fData;
+};
+
+// Layer-space specialization for axis-aligned integer bounding boxes.
+template<>
+class LayerSpace<SkIRect> {
+public:
+ LayerSpace() = default;
+ explicit LayerSpace(const SkIRect& geometry) : fData(geometry) {}
+ explicit LayerSpace(SkIRect&& geometry) : fData(std::move(geometry)) {}
+ explicit LayerSpace(const SkISize& size) : fData(SkIRect::MakeSize(size)) {}
+ explicit operator const SkIRect&() const { return fData; }
+
+ static LayerSpace<SkIRect> Empty() { return LayerSpace<SkIRect>(SkIRect::MakeEmpty()); }
+
+ // Parrot the SkIRect API while preserving coord space
+ bool isEmpty() const { return fData.isEmpty(); }
+ bool contains(const LayerSpace<SkIRect>& r) const { return fData.contains(r.fData); }
+
+ int32_t left() const { return fData.fLeft; }
+ int32_t top() const { return fData.fTop; }
+ int32_t right() const { return fData.fRight; }
+ int32_t bottom() const { return fData.fBottom; }
+
+ int32_t width() const { return fData.width(); }
+ int32_t height() const { return fData.height(); }
+
+ LayerSpace<SkIPoint> topLeft() const { return LayerSpace<SkIPoint>(fData.topLeft()); }
+ LayerSpace<SkISize> size() const { return LayerSpace<SkISize>(fData.size()); }
+
+ bool intersect(const LayerSpace<SkIRect>& r) { return fData.intersect(r.fData); }
+ void join(const LayerSpace<SkIRect>& r) { fData.join(r.fData); }
+ void offset(const LayerSpace<IVector>& v) { fData.offset(SkIVector(v)); }
+ void outset(const LayerSpace<SkISize>& delta) { fData.outset(delta.width(), delta.height()); }
+
+private:
+ SkIRect fData;
+};
+
+// Layer-space specialization for axis-aligned float bounding boxes.
+template<>
+class LayerSpace<SkRect> {
+public:
+ LayerSpace() = default;
+ explicit LayerSpace(const SkRect& geometry) : fData(geometry) {}
+ explicit LayerSpace(SkRect&& geometry) : fData(std::move(geometry)) {}
+ explicit LayerSpace(const LayerSpace<SkIRect>& rect) : fData(SkRect::Make(SkIRect(rect))) {}
+ explicit operator const SkRect&() const { return fData; }
+
+ static LayerSpace<SkRect> Empty() { return LayerSpace<SkRect>(SkRect::MakeEmpty()); }
+
+ // Parrot the SkRect API while preserving coord space and usage
+ bool isEmpty() const { return fData.isEmpty(); }
+ bool contains(const LayerSpace<SkRect>& r) const { return fData.contains(r.fData); }
+
+ SkScalar left() const { return fData.fLeft; }
+ SkScalar top() const { return fData.fTop; }
+ SkScalar right() const { return fData.fRight; }
+ SkScalar bottom() const { return fData.fBottom; }
+
+ SkScalar width() const { return fData.width(); }
+ SkScalar height() const { return fData.height(); }
+
+ LayerSpace<SkPoint> topLeft() const {
+ return LayerSpace<SkPoint>(SkPoint::Make(fData.fLeft, fData.fTop));
+ }
+ LayerSpace<SkSize> size() const {
+ return LayerSpace<SkSize>(SkSize::Make(fData.width(), fData.height()));
+ }
+
+ LayerSpace<SkIRect> round() const { return LayerSpace<SkIRect>(fData.round()); }
+ LayerSpace<SkIRect> roundIn() const { return LayerSpace<SkIRect>(RoundIn(fData)); }
+ LayerSpace<SkIRect> roundOut() const { return LayerSpace<SkIRect>(RoundOut(fData)); }
+
+ bool intersect(const LayerSpace<SkRect>& r) { return fData.intersect(r.fData); }
+ void join(const LayerSpace<SkRect>& r) { fData.join(r.fData); }
+ void offset(const LayerSpace<Vector>& v) { fData.offset(SkVector(v)); }
+ void outset(const LayerSpace<SkSize>& delta) { fData.outset(delta.width(), delta.height()); }
+
+private:
+ SkRect fData;
+};
+
+// A transformation that manipulates geometry in the layer-space coordinate system. Mathematically
+// there's little difference from these matrices compared to what's stored in a skif::Mapping, but
+// the intent differs. skif::Mapping's matrices map geometry from one coordinate space to another
+// while these transforms move geometry w/o changing the coordinate space semantics.
+// TODO(michaelludwig): Will be replaced with an SkM44 version when skif::Mapping works with SkM44.
+template<>
+class LayerSpace<SkMatrix> {
+public:
+ LayerSpace() = default;
+ explicit LayerSpace(const SkMatrix& m) : fData(m) {}
+ explicit LayerSpace(SkMatrix&& m) : fData(std::move(m)) {}
+ explicit operator const SkMatrix&() const { return fData; }
+
+ // Parrot a limited selection of the SkMatrix API while preserving coordinate space.
+ LayerSpace<SkRect> mapRect(const LayerSpace<SkRect>& r) const;
+
+ // Effectively mapRect(SkRect).roundOut() but more accurate when the underlying matrix or
+ // SkIRect has large floating point values.
+ LayerSpace<SkIRect> mapRect(const LayerSpace<SkIRect>& r) const;
+
+ LayerSpace<SkPoint> mapPoint(const LayerSpace<SkPoint>& p) const {
+ return LayerSpace<SkPoint>(fData.mapPoint(SkPoint(p)));
+ }
+
+ LayerSpace<Vector> mapVector(const LayerSpace<Vector>& v) const {
+ return LayerSpace<Vector>(Vector(fData.mapVector(v.x(), v.y())));
+ }
+
+ LayerSpace<SkMatrix>& preConcat(const LayerSpace<SkMatrix>& m) {
+ fData = SkMatrix::Concat(fData, m.fData);
+ return *this;
+ }
+
+ LayerSpace<SkMatrix>& postConcat(const LayerSpace<SkMatrix>& m) {
+ fData = SkMatrix::Concat(m.fData, fData);
+ return *this;
+ }
+
+ bool invert(LayerSpace<SkMatrix>* inverse) const {
+ return fData.invert(&inverse->fData);
+ }
+
+ float rc(int row, int col) const { return fData.rc(row, col); }
+ float get(int i) const { return fData.get(i); }
+
+private:
+ SkMatrix fData;
+};
+
+// Mapping is the primary definition of the shared layer space used when evaluating an image filter
+// DAG. It encapsulates any needed decomposition of the total CTM into the parameter-to-layer matrix
+// (that filters use to map their parameters to the layer space), and the layer-to-device matrix
+// (that canvas uses to map the output layer-space image into its root device space). Mapping
+// defines functions to transform ParameterSpace and DeviceSpace types to and from their LayerSpace
+// variants, which can then be used and reasoned about by SkImageFilter implementations.
+class Mapping {
+public:
+ Mapping() = default;
+
+ // Helper constructor that equates device and layer space to the same coordinate space.
+ explicit Mapping(const SkMatrix& paramToLayer)
+ : fLayerToDevMatrix(SkMatrix::I())
+ , fParamToLayerMatrix(paramToLayer)
+ , fDevToLayerMatrix(SkMatrix::I()) {}
+
+ // This constructor allows the decomposition to be explicitly provided, assumes that
+ // 'layerToDev's inverse has already been calculated in 'devToLayer'
+ Mapping(const SkMatrix& layerToDev, const SkMatrix& devToLayer, const SkMatrix& paramToLayer)
+ : fLayerToDevMatrix(layerToDev)
+ , fParamToLayerMatrix(paramToLayer)
+ , fDevToLayerMatrix(devToLayer) {}
+
+ // Sets this Mapping to the default decomposition of the canvas's total transform, given the
+ // requirements of the 'filter'. Returns false if the decomposition failed or would produce an
+ // invalid device matrix. Assumes 'ctm' is invertible.
+ bool SK_WARN_UNUSED_RESULT decomposeCTM(const SkMatrix& ctm,
+ const SkImageFilter* filter,
+ const skif::ParameterSpace<SkPoint>& representativePt);
+
+ // Update the mapping's parameter-to-layer matrix to be pre-concatenated with the specified
+ // local space transformation. This changes the definition of parameter space, any
+ // skif::ParameterSpace<> values are interpreted anew. Layer space and device space are
+ // unchanged.
+ void concatLocal(const SkMatrix& local) { fParamToLayerMatrix.preConcat(local); }
+
+ // Update the mapping's layer space coordinate system by post-concatenating the given matrix
+ // to it's parameter-to-layer transform, and pre-concatenating the inverse of the matrix with
+ // it's layer-to-device transform. The net effect is that neither the parameter nor device
+ // coordinate systems are changed, but skif::LayerSpace is adjusted.
+ //
+ // Returns false if the layer matrix cannot be inverted, and this mapping is left unmodified.
+ bool adjustLayerSpace(const SkMatrix& layer);
+
+ // Update the mapping's layer space so that the point 'origin' in the current layer coordinate
+ // space maps to (0, 0) in the adjusted coordinate space.
+ void applyOrigin(const LayerSpace<SkIPoint>& origin) {
+ SkAssertResult(this->adjustLayerSpace(SkMatrix::Translate(-origin.x(), -origin.y())));
+ }
+
+ const SkMatrix& layerToDevice() const { return fLayerToDevMatrix; }
+ const SkMatrix& deviceToLayer() const { return fDevToLayerMatrix; }
+ const SkMatrix& layerMatrix() const { return fParamToLayerMatrix; }
+ SkMatrix totalMatrix() const {
+ return SkMatrix::Concat(fLayerToDevMatrix, fParamToLayerMatrix);
+ }
+
+ template<typename T>
+ LayerSpace<T> paramToLayer(const ParameterSpace<T>& paramGeometry) const {
+ return LayerSpace<T>(map(static_cast<const T&>(paramGeometry), fParamToLayerMatrix));
+ }
+
+ template<typename T>
+ LayerSpace<T> deviceToLayer(const DeviceSpace<T>& devGeometry) const {
+ return LayerSpace<T>(map(static_cast<const T&>(devGeometry), fDevToLayerMatrix));
+ }
+
+ template<typename T>
+ DeviceSpace<T> layerToDevice(const LayerSpace<T>& layerGeometry) const {
+ return DeviceSpace<T>(map(static_cast<const T&>(layerGeometry), fLayerToDevMatrix));
+ }
+
+private:
+ // The image filter process decomposes the total CTM into layerToDev * paramToLayer and uses the
+ // param-to-layer matrix to define the layer-space coordinate system. Depending on how it's
+ // decomposed, either the layer matrix or the device matrix could be the identity matrix (but
+ // sometimes neither).
+ SkMatrix fLayerToDevMatrix;
+ SkMatrix fParamToLayerMatrix;
+
+ // Cached inverse of fLayerToDevMatrix
+ SkMatrix fDevToLayerMatrix;
+
+ // Actual geometric mapping operations that work on coordinates and matrices w/o the type
+ // safety of the coordinate space wrappers (hence these are private).
+ template<typename T>
+ static T map(const T& geom, const SkMatrix& matrix);
+};
+
+class Context; // Forward declare for FilterResult
+
+// Wraps an SkSpecialImage and metadata needed to rasterize it to a shared layer coordinate space.
+// This includes a transform matrix, sampling options, and clip. Frequently, the transform is an
+// integer translation that effectively places the origin of the image within the layer space. When
+// this is the case, the FilterResult's layerBounds have the same width and height as the subset
+// of the special image and translated to that origin. However, the transform of a FilterResult can
+// be arbitrary, in which case its layer bounds is the bounding box that would contain the sampled
+// image's contents.
+//
+// In order to collapse image filter nodes dynamically, FilterResult provides utilities to apply
+// operations (like transform or crop) that attempt to modify the metadata without producing an
+// intermediate image. Internally it tracks when a new image is needed and rasterizes as needed.
+//
+// When filter implementations are processing intermediate FilterResult results, it can be assumed
+// that all FilterResult' layerBounds are in the same coordinate space defined by the shared
+// skif::Context.
+//
+// NOTE: This is named FilterResult since most instances will represent the output of an image
+// filter (even if that is then used as an input to the next filter). The main exception is the
+// source input used when an input filter is null, but from a data-standpoint it is the same since
+// it is equivalent to the result of an identity filter.
+class FilterResult {
+ // Bilinear is used as the default because it can be downgraded to nearest-neighbor when the
+ // final transform is pixel-aligned, and chaining multiple bilinear samples and transforms is
+ // assumed to be visually close enough to sampling once at highest quality and final transform.
+ static constexpr SkSamplingOptions kDefaultSampling{SkFilterMode::kLinear};
+public:
+ FilterResult() : FilterResult(nullptr) {}
+
+ explicit FilterResult(sk_sp<SkSpecialImage> image)
+ : FilterResult(std::move(image), LayerSpace<SkIPoint>({0, 0})) {}
+
+ FilterResult(std::pair<sk_sp<SkSpecialImage>, LayerSpace<SkIPoint>> imageAndOrigin)
+ : FilterResult(std::move(std::get<0>(imageAndOrigin)), std::get<1>(imageAndOrigin)) {}
+
+ FilterResult(sk_sp<SkSpecialImage> image, const LayerSpace<SkIPoint>& origin)
+ : fImage(std::move(image))
+ , fSamplingOptions(kDefaultSampling)
+ , fTransform(SkMatrix::Translate(origin.x(), origin.y()))
+ , fLayerBounds(
+ fTransform.mapRect(LayerSpace<SkIRect>(fImage ? fImage->dimensions()
+ : SkISize{0, 0}))) {}
+
+ explicit operator bool() const { return SkToBool(fImage); }
+
+ // TODO(michaelludwig): Given the planned expansion of FilterResult state, it might be nice to
+ // pull this back and not expose anything other than its bounding box. This will be possible if
+ // all rendering can be handled by functions defined on FilterResult.
+ const SkSpecialImage* image() const { return fImage.get(); }
+ sk_sp<SkSpecialImage> refImage() const { return fImage; }
+
+ // Get the layer-space bounds of the result. This will incorporate any layer-space transform.
+ LayerSpace<SkIRect> layerBounds() const {
+ return fLayerBounds;
+ }
+
+ // Produce a new FilterResult that has been cropped to 'crop', taking into account the context's
+ // desired output. When possible, the returned FilterResult will reuse the underlying image and
+ // adjust its metadata. This will depend on the current transform and tile mode as well as how
+ // the crop rect intersects this result's layer bounds.
+ // TODO (michaelludwig): All FilterResults are decal mode and there are no current usages that
+ // require force-padding a decal FilterResult so these arguments aren't implemented yet.
+ FilterResult applyCrop(const Context& ctx,
+ const LayerSpace<SkIRect>& crop) const;
+ // SkTileMode newTileMode=SkTileMode::kDecal,
+ // bool forcePad=false) const;
+
+ // Produce a new FilterResult that is the transformation of this FilterResult. When this
+ // result's sampling and transform are compatible with the new transformation, the returned
+ // FilterResult can reuse the same image data and adjust just the metadata.
+ FilterResult applyTransform(const Context& ctx,
+ const LayerSpace<SkMatrix>& transform,
+ const SkSamplingOptions& sampling) const;
+
+ // Extract image and origin, safely when the image is null. If there are deferred operations
+ // on FilterResult (such as tiling or transforms) not representable as an image+origin pair,
+ // the returned image will be the resolution resulting from that metadata and not necessarily
+ // equal to the original 'image()'.
+ // TODO (michaelludwig) - This is intended for convenience until all call sites of
+ // SkImageFilter_Base::filterImage() have been updated to work in the new type system
+ // (which comes later as SkDevice, SkCanvas, etc. need to be modified, and coordinate space
+ // tagging needs to be added).
+ sk_sp<SkSpecialImage> imageAndOffset(SkIPoint* offset) const;
+
+private:
+ // Renders this FilterResult into a new, but visually equivalent, image that fills 'dstBounds',
+ // has nearest-neighbor sampling, and a transform that just translates by 'dstBounds' TL corner.
+ std::pair<sk_sp<SkSpecialImage>, LayerSpace<SkIPoint>>
+ resolve(LayerSpace<SkIRect> dstBounds) const;
+
+ // Update metadata to concat the given transform directly.
+ void concatTransform(const LayerSpace<SkMatrix>& transform,
+ const SkSamplingOptions& newSampling,
+ const LayerSpace<SkIRect>& outputBounds);
+
+ // The effective image of a FilterResult is 'fImage' sampled by 'fSamplingOptions' and
+ // respecting 'fTileMode' (on the SkSpecialImage's subset), transformed by 'fTransform', clipped
+ // to 'fLayerBounds'.
+ sk_sp<SkSpecialImage> fImage;
+ SkSamplingOptions fSamplingOptions;
+ // SkTileMode fTileMode = SkTileMode::kDecal;
+ // Typically this will be an integer translation that encodes the origin of the top left corner,
+ // but can become more complex when combined with applyTransform().
+ LayerSpace<SkMatrix> fTransform;
+
+ // The layer bounds are initially fImage's dimensions mapped by fTransform. As the filter result
+ // is processed by the image filter DAG, it can be further restricted by crop rects or the
+ // implicit desired output at each node.
+ LayerSpace<SkIRect> fLayerBounds;
+};
+
+// The context contains all necessary information to describe how the image filter should be
+// computed (i.e. the current layer matrix and clip), and the color information of the output of a
+// filter DAG. For now, this is just the color space (of the original requesting device). This is
+// used when constructing intermediate rendering surfaces, so that we ensure we land in a surface
+// that's similar/compatible to the final consumer of the DAG's output.
+class Context {
+public:
+ // Creates a context with the given layer matrix and destination clip, reading from 'source'
+ // with an origin of (0,0).
+ Context(const SkMatrix& layerMatrix, const SkIRect& clipBounds, SkImageFilterCache* cache,
+ SkColorType colorType, SkColorSpace* colorSpace, const SkSpecialImage* source)
+ : fMapping(layerMatrix)
+ , fDesiredOutput(clipBounds)
+ , fCache(cache)
+ , fColorType(colorType)
+ , fColorSpace(colorSpace)
+ , fSource(sk_ref_sp(source), LayerSpace<SkIPoint>({0, 0})) {}
+
+ Context(const Mapping& mapping, const LayerSpace<SkIRect>& desiredOutput,
+ SkImageFilterCache* cache, SkColorType colorType, SkColorSpace* colorSpace,
+ const FilterResult& source)
+ : fMapping(mapping)
+ , fDesiredOutput(desiredOutput)
+ , fCache(cache)
+ , fColorType(colorType)
+ , fColorSpace(colorSpace)
+ , fSource(source) {}
+
+ // The mapping that defines the transformation from local parameter space of the filters to the
+ // layer space where the image filters are evaluated, as well as the remaining transformation
+ // from the layer space to the final device space. The layer space defined by the returned
+ // Mapping may be the same as the root device space, or be an intermediate space that is
+ // supported by the image filter DAG (depending on what it returns from getCTMCapability()).
+ // If a node returns something other than kComplex from getCTMCapability(), the layer matrix of
+ // the mapping will respect that return value, and the remaining matrix will be appropriately
+ // set to transform the layer space to the final device space (applied by the SkCanvas when
+ // filtering is finished).
+ const Mapping& mapping() const { return fMapping; }
+ // DEPRECATED: Use mapping() and its coordinate-space types instead
+ const SkMatrix& ctm() const { return fMapping.layerMatrix(); }
+ // The bounds, in the layer space, that the filtered image will be clipped to. The output
+ // from filterImage() must cover these clip bounds, except in areas where it will just be
+ // transparent black, in which case a smaller output image can be returned.
+ const LayerSpace<SkIRect>& desiredOutput() const { return fDesiredOutput; }
+ // DEPRECATED: Use desiredOutput() instead
+ const SkIRect& clipBounds() const { return static_cast<const SkIRect&>(fDesiredOutput); }
+ // The cache to use when recursing through the filter DAG, in order to avoid repeated
+ // calculations of the same image.
+ SkImageFilterCache* cache() const { return fCache; }
+ // The output device's color type, which can be used for intermediate images to be
+ // compatible with the eventual target of the filtered result.
+ SkColorType colorType() const { return fColorType; }
+#if defined(SK_GANESH)
+ GrColorType grColorType() const { return SkColorTypeToGrColorType(fColorType); }
+#endif
+ // The output device's color space, so intermediate images can match, and so filtering can
+ // be performed in the destination color space.
+ SkColorSpace* colorSpace() const { return fColorSpace; }
+ sk_sp<SkColorSpace> refColorSpace() const { return sk_ref_sp(fColorSpace); }
+ // The default surface properties to use when making transient surfaces during filtering.
+ const SkSurfaceProps& surfaceProps() const { return fSource.image()->props(); }
+
+ // This is the image to use whenever an expected input filter has been set to null. In the
+ // majority of cases, this is the original source image for the image filter DAG so it comes
+ // from the SkDevice that holds either the saveLayer or the temporary rendered result. The
+ // exception is composing two image filters (via SkImageFilters::Compose), which must use
+ // the output of the inner DAG as the "source" for the outer DAG.
+ const FilterResult& source() const { return fSource; }
+ // DEPRECATED: Use source() instead to get both the image and its origin.
+ const SkSpecialImage* sourceImage() const { return fSource.image(); }
+
+ // True if image filtering should occur on the GPU if possible.
+ bool gpuBacked() const { return fSource.image()->isTextureBacked(); }
+ // The recording context to use when computing the filter with the GPU.
+ GrRecordingContext* getContext() const { return fSource.image()->getContext(); }
+
+ /**
+ * Since a context can be built directly, its constructor has no chance to "return null" if
+ * it's given invalid or unsupported inputs. Call this to know of the the context can be
+ * used.
+ *
+ * The SkImageFilterCache Key, for example, requires a finite ctm (no infinities or NaN),
+ * so that test is part of isValid.
+ */
+ bool isValid() const { return fSource.image() != nullptr && fMapping.layerMatrix().isFinite(); }
+
+ // Create a surface of the given size, that matches the context's color type and color space
+ // as closely as possible, and uses the same backend of the device that produced the source
+ // image.
+ sk_sp<SkSpecialSurface> makeSurface(const SkISize& size,
+ const SkSurfaceProps* props = nullptr) const {
+ if (!props) {
+ props = &this->surfaceProps();
+ }
+ return fSource.image()->makeSurface(fColorType, fColorSpace, size,
+ kPremul_SkAlphaType, *props);
+ }
+
+ // Create a new context that matches this context, but with an overridden layer space.
+ Context withNewMapping(const Mapping& mapping) const {
+ return Context(mapping, fDesiredOutput, fCache, fColorType, fColorSpace, fSource);
+ }
+ // Create a new context that matches this context, but with an overridden desired output rect.
+ Context withNewDesiredOutput(const LayerSpace<SkIRect>& desiredOutput) const {
+ return Context(fMapping, desiredOutput, fCache, fColorType, fColorSpace, fSource);
+ }
+
+private:
+ Mapping fMapping;
+ LayerSpace<SkIRect> fDesiredOutput;
+ SkImageFilterCache* fCache;
+ SkColorType fColorType;
+ // The pointed-to object is owned by the device controlling the filter process, and our lifetime
+ // is bounded by the device, so this can be a bare pointer.
+ SkColorSpace* fColorSpace;
+ FilterResult fSource;
+};
+
+} // end namespace skif
+
+#endif // SkImageFilterTypes_DEFINED
diff --git a/gfx/skia/skia/src/core/SkImageFilter_Base.h b/gfx/skia/skia/src/core/SkImageFilter_Base.h
new file mode 100644
index 0000000000..48dfc94101
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageFilter_Base.h
@@ -0,0 +1,492 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageFilter_Base_DEFINED
+#define SkImageFilter_Base_DEFINED
+
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkImageInfo.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTemplates.h"
+
+#include "src/core/SkImageFilterTypes.h"
+
+class GrFragmentProcessor;
+class GrRecordingContext;
+
+// True base class that all SkImageFilter implementations need to extend from. This provides the
+// actual API surface that Skia will use to compute the filtered images.
+class SkImageFilter_Base : public SkImageFilter {
+public:
+ // DEPRECATED - Use skif::Context directly.
+ using Context = skif::Context;
+
+ /**
+ * Request a new filtered image to be created from the src image. The returned skif::Image
+ * provides both the pixel data and the origin point that it should be drawn at, relative to
+ * the layer space defined by the provided context.
+ *
+ * If the result image cannot be created, or the result would be transparent black, returns
+ * a skif::Image that has a null special image, in which its origin should be ignored.
+ *
+ * TODO: Right now the imagefilters sometimes return empty result bitmaps/
+ * specialimages. That doesn't seem quite right.
+ */
+ skif::FilterResult filterImage(const skif::Context& context) const;
+
+ /**
+ * Calculate the smallest-possible required layer bounds that would provide sufficient
+ * information to correctly compute the image filter for every pixel in the desired output
+ * bounds. The 'desiredOutput' is intended to represent either the root render target bounds,
+ * or the device-space bounds of the current clip. If the bounds of the content that will be
+ * drawn into the layer is known, 'knownContentBounds' should be provided, since it can be
+ * used to restrict the size of the layer if the image filter DAG does not affect transparent
+ * black.
+ *
+ * The returned rect is in the layer space defined by 'mapping', so it directly represents
+ * the size and location of the SkDevice created to rasterize the content prior to invoking the
+ * image filter (assuming its CTM and basis matrix are configured to match 'mapping').
+ *
+ * While this operation transforms an device-space output bounds to a layer-space input bounds,
+ * it is not necessarily the inverse of getOutputBounds(). For instance, a blur needs to have
+ * an outset margin when reading pixels at the edge (to satisfy its kernel), thus it expands
+ * its required input rect to include every pixel that contributes to the desired output rect.
+
+ * @param mapping The coordinate space mapping that defines both the transformation
+ * between local and layer, and layer to root device space, that will be
+ * used when the filter is later invoked.
+ * @param desiredOutput The desired output boundary that needs to be covered by the filter's
+ * output (assuming that the filter is then invoked with a suitable input)
+ * @param knownContentBounds
+ * Optional, the known layer-space bounds of the non-transparent content
+ * that would be rasterized in the source input image.
+ *
+ * @return The layer-space bounding box to use for an SkDevice when drawing the source image.
+ */
+ skif::LayerSpace<SkIRect> getInputBounds(
+ const skif::Mapping& mapping, const skif::DeviceSpace<SkIRect>& desiredOutput,
+ const skif::ParameterSpace<SkRect>* knownContentBounds) const;
+
+ /**
+ * Calculate the device-space bounds of the output of this filter DAG, if it were to process
+ * an image layer covering the 'contentBounds'. The 'mapping' defines how the content will be
+ * transformed to layer space when it is drawn, and how the output filter image is then
+ * transformed to the final device space (i.e. it specifies the mapping between the root device
+ * space and the parameter space of the initially provided content).
+ *
+ * While this operation transforms a parameter-space input bounds to an device-space output
+ * bounds, it is not necessarily the inverse of getInputBounds(). For instance, a blur needs to
+ * have an outset margin when reading pixels at the edge (to satisfy its kernel), so it will
+ * generate a result larger than its input (so that the blur is visible) and, thus, expands its
+ * output to include every pixel that it will touch.
+ *
+ * @param mapping The coordinate space mapping that defines both the transformation
+ * between local and layer, and layer to root device space, that will be
+ * used when the filter is later invoked.
+ * @param contentBounds The local-space bounds of the non-transparent content that would be
+ * drawn into the source image prior to filtering with this DAG, i.e.
+ * the same as 'knownContentBounds' in getInputBounds().
+ *
+ * @return The root device-space bounding box of the filtered image, were it applied to
+ * content contained by 'contentBounds' and then drawn with 'mapping' to the root
+ * device (w/o any additional clipping).
+ */
+ skif::DeviceSpace<SkIRect> getOutputBounds(
+ const skif::Mapping& mapping, const skif::ParameterSpace<SkRect>& contentBounds) const;
+
+ // Returns true if this image filter graph transforms a source transparent black pixel to a
+ // color other than transparent black.
+ bool affectsTransparentBlack() const;
+
+ /**
+ * Most ImageFilters can natively handle scaling and translate components in the CTM. Only
+ * some of them can handle affine (or more complex) matrices. Some may only handle translation.
+ * This call returns the maximum "kind" of CTM for a filter and all of its (non-null) inputs.
+ */
+ enum class MatrixCapability {
+ kTranslate,
+ kScaleTranslate,
+ kComplex,
+ };
+ MatrixCapability getCTMCapability() const;
+
+ uint32_t uniqueID() const { return fUniqueID; }
+
+ static SkFlattenable::Type GetFlattenableType() {
+ return kSkImageFilter_Type;
+ }
+
+ SkFlattenable::Type getFlattenableType() const override {
+ return kSkImageFilter_Type;
+ }
+
+protected:
+ // DEPRECATED: Will be removed once cropping is handled by a standalone image filter
+ class CropRect {
+ public:
+ enum CropEdge {
+ kHasLeft_CropEdge = 0x01,
+ kHasTop_CropEdge = 0x02,
+ kHasWidth_CropEdge = 0x04,
+ kHasHeight_CropEdge = 0x08,
+ kHasAll_CropEdge = 0x0F,
+ };
+ CropRect() : fFlags(0) {}
+ explicit CropRect(const SkRect* rect)
+ : fRect(rect ? *rect : SkRect::MakeEmpty()), fFlags(rect ? kHasAll_CropEdge : 0x0) {}
+
+ // CropRect(const CropRect&) = default;
+
+ uint32_t flags() const { return fFlags; }
+ const SkRect& rect() const { return fRect; }
+
+ /**
+ * Apply this cropRect to the imageBounds. If a given edge of the cropRect is not set, then
+ * the corresponding edge from imageBounds will be used. If "embiggen" is true, the crop
+ * rect is allowed to enlarge the size of the rect, otherwise it may only reduce the rect.
+ * Filters that can affect transparent black should pass "true", while all other filters
+ * should pass "false".
+ *
+ * Note: imageBounds is in "device" space, as the output cropped rectangle will be, so the
+ * matrix is ignored for those. It is only applied to the cropRect's bounds.
+ */
+ void applyTo(const SkIRect& imageBounds, const SkMatrix& matrix, bool embiggen,
+ SkIRect* cropped) const;
+
+ private:
+ SkRect fRect;
+ uint32_t fFlags;
+ };
+
+ class Common {
+ public:
+ /**
+ * Attempt to unflatten the cropRect and the expected number of input filters.
+ * If any number of input filters is valid, pass -1.
+ * If this fails (i.e. corrupt buffer or contents) then return false and common will
+ * be left uninitialized.
+ * If this returns true, then inputCount() is the number of found input filters, each
+ * of which may be NULL or a valid imagefilter.
+ */
+ bool unflatten(SkReadBuffer&, int expectedInputs);
+
+ const SkRect* cropRect() const {
+ return fCropRect.flags() != 0x0 ? &fCropRect.rect() : nullptr;
+ }
+ int inputCount() const { return fInputs.size(); }
+ sk_sp<SkImageFilter>* inputs() { return fInputs.begin(); }
+
+ sk_sp<SkImageFilter> getInput(int index) { return fInputs[index]; }
+
+ private:
+ CropRect fCropRect;
+ // most filters accept at most 2 input-filters
+ SkSTArray<2, sk_sp<SkImageFilter>, true> fInputs;
+ };
+
+ // Whether or not to recurse to child input filters for certain operations that walk the DAG.
+ enum class VisitChildren : bool {
+ kNo = false,
+ kYes = true
+ };
+
+ SkImageFilter_Base(sk_sp<SkImageFilter> const* inputs, int inputCount,
+ const SkRect* cropRect);
+
+ ~SkImageFilter_Base() override;
+
+ void flatten(SkWriteBuffer&) const override;
+
+ // DEPRECATED - Use the private context-only variant
+ virtual sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const {
+ return nullptr;
+ }
+
+ // DEPRECATED - Override onGetOutputLayerBounds and onGetInputLayerBounds instead. The
+ // node-specific and aggregation functions are no longer separated in the current API. A helper
+ // function is provided to do the default recursion for the common filter case.
+ virtual SkIRect onFilterBounds(const SkIRect&, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const;
+ virtual SkIRect onFilterNodeBounds(const SkIRect&, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const;
+
+ // DEPRECRATED - Call the Context-only filterInput()
+ sk_sp<SkSpecialImage> filterInput(int index, const Context& ctx, SkIPoint* offset) const {
+ return this->filterInput(index, ctx).imageAndOffset(offset);
+ }
+
+ // Helper function to visit each of this filter's child filters and call their
+ // onGetInputLayerBounds with the provided 'desiredOutput' and 'contentBounds'. Automatically
+ // handles null input filters. Returns the union of all of the children's input bounds.
+ skif::LayerSpace<SkIRect> visitInputLayerBounds(
+ const skif::Mapping& mapping, const skif::LayerSpace<SkIRect>& desiredOutput,
+ const skif::LayerSpace<SkIRect>& contentBounds) const;
+ // Helper function to visit each of this filter's child filters and call their
+ // onGetOutputLayerBounds with the provided 'contentBounds'. Automatically handles null input
+ // filters.
+ skif::LayerSpace<SkIRect> visitOutputLayerBounds(
+ const skif::Mapping& mapping, const skif::LayerSpace<SkIRect>& contentBounds) const;
+
+ // Helper function for recursing through the filter DAG. It automatically evaluates the input
+ // image filter at 'index' using the given context. If the input image filter is null, it
+ // automatically returns the context's dynamic source image.
+ //
+ // Implementations must handle cases when the input filter was unable to compute an image and
+ // the returned skif::Image has a null SkSpecialImage. If the filter affects transparent black,
+ // it should treat null results or images that do not fully cover the requested output bounds as
+ // being transparent black in those regions. Filters that do not affect transparent black can
+ // exit early since the null image would remain transparent.
+ skif::FilterResult filterInput(int index, const skif::Context& ctx) const;
+
+ /**
+ * Returns whether any edges of the crop rect have been set. The crop
+ * rect is set at construction time, and determines which pixels from the
+ * input image will be processed, and which pixels in the output image will be allowed.
+ * The size of the crop rect should be
+ * used as the size of the destination image. The origin of this rect
+ * should be used to offset access to the input images, and should also
+ * be added to the "offset" parameter in onFilterImage.
+ *
+ * DEPRECATED - Remove once cropping is handled by a separate filter
+ */
+ bool cropRectIsSet() const { return fCropRect.flags() != 0x0; }
+
+ // DEPRECATED - Remove once cropping is handled by a separate filter
+ CropRect getCropRect() const { return fCropRect; }
+
+ // DEPRECATED - Remove once cropping is handled by a separate filter
+ const CropRect* getCropRectIfSet() const {
+ return this->cropRectIsSet() ? &fCropRect : nullptr;
+ }
+
+ /** Given a "srcBounds" rect, computes destination bounds for this filter.
+ * "dstBounds" are computed by transforming the crop rect by the context's
+ * CTM, applying it to the initial bounds, and intersecting the result with
+ * the context's clip bounds. "srcBounds" (if non-null) are computed by
+ * intersecting the initial bounds with "dstBounds", to ensure that we never
+ * sample outside of the crop rect (this restriction may be relaxed in the
+ * future).
+ *
+ * DEPRECATED - Remove once cropping is handled by a separate filter, although it may be
+ * necessary to provide a similar convenience function to compute the output bounds given the
+ * images returned by filterInput().
+ */
+ bool applyCropRect(const Context&, const SkIRect& srcBounds, SkIRect* dstBounds) const;
+
+ /** A variant of the above call which takes the original source bitmap and
+ * source offset. If the resulting crop rect is not entirely contained by
+ * the source bitmap's bounds, it creates a new bitmap in "result" and
+ * pads the edges with transparent black. In that case, the srcOffset is
+ * modified to be the same as the bounds, since no further adjustment is
+ * needed by the caller. This version should only be used by filters
+ * which are not capable of processing a smaller source bitmap into a
+ * larger destination.
+ *
+ * DEPRECATED - Remove once cropping is handled by a separate filter.
+ */
+ sk_sp<SkSpecialImage> applyCropRectAndPad(const Context&, SkSpecialImage* src,
+ SkIPoint* srcOffset, SkIRect* bounds) const;
+
+ /**
+ * Creates a modified Context for use when recursing up the image filter DAG.
+ * The clip bounds are adjusted to accommodate any margins that this
+ * filter requires by calling this node's
+ * onFilterNodeBounds(..., kReverse_MapDirection).
+ */
+ // TODO (michaelludwig) - I don't think this is necessary to keep as protected. Other than the
+ // real use case in recursing through the DAG for filterInput(), it feels wrong for blur and
+ // other filters to need to call it.
+ Context mapContext(const Context& ctx) const;
+
+#if defined(SK_GANESH)
+ static sk_sp<SkSpecialImage> DrawWithFP(GrRecordingContext* context,
+ std::unique_ptr<GrFragmentProcessor> fp,
+ const SkIRect& bounds,
+ SkColorType colorType,
+ const SkColorSpace* colorSpace,
+ const SkSurfaceProps&,
+ GrSurfaceOrigin surfaceOrigin,
+ GrProtected isProtected = GrProtected::kNo);
+
+ /**
+ * Returns a version of the passed-in image (possibly the original), that is in a colorspace
+ * with the same gamut as the one from the OutputProperties. This allows filters that do many
+ * texture samples to guarantee that any color space conversion has happened before running.
+ */
+ static sk_sp<SkSpecialImage> ImageToColorSpace(SkSpecialImage* src,
+ SkColorType colorType,
+ SkColorSpace* colorSpace,
+ const SkSurfaceProps&);
+#endif
+
+ // If 'srcBounds' will sample outside the border of 'originalSrcBounds' (i.e., the sample
+ // will wrap around to the other side) we must preserve the far side of the src along that
+ // axis (e.g., if we will sample beyond the left edge of the src, the right side must be
+ // preserved for the repeat sampling to work).
+ // DEPRECATED - Remove once cropping is handled by a separate filter, that can also handle all
+ // tile modes (including repeat) properly
+ static SkIRect DetermineRepeatedSrcBound(const SkIRect& srcBounds,
+ const SkIVector& filterOffset,
+ const SkISize& filterSize,
+ const SkIRect& originalSrcBounds);
+
+private:
+ friend class SkImageFilter;
+ // For PurgeCache()
+ friend class SkGraphics;
+
+ static void PurgeCache();
+
+ // Configuration points for the filter implementation, marked private since they should not
+ // need to be invoked by the subclasses. These refer to the node's specific behavior and are
+ // not responsible for aggregating the behavior of the entire filter DAG.
+
+ /**
+ * Return true (and returns a ref'd colorfilter) if this node in the DAG is just a colorfilter
+ * w/o CropRect constraints.
+ */
+ virtual bool onIsColorFilterNode(SkColorFilter** /*filterPtr*/) const { return false; }
+
+ /**
+ * Return the most complex matrix type this filter can support (mapping from its parameter
+ * space to a layer space). If this returns anything less than kComplex, the filter only needs
+ * to worry about mapping from parameter to layer using a matrix that is constrained in that
+ * way (eg, scale+translate).
+ */
+ virtual MatrixCapability onGetCTMCapability() const {
+ return MatrixCapability::kScaleTranslate;
+ }
+
+ /**
+ * Return true if this filter would transform transparent black pixels to a color other than
+ * transparent black. When false, optimizations can be taken to discard regions known to be
+ * transparent black and thus process fewer pixels.
+ */
+ virtual bool onAffectsTransparentBlack() const { return false; }
+
+ /**
+ * This is the virtual which should be overridden by the derived class to perform image
+ * filtering. Subclasses are responsible for recursing to their input filters, although the
+ * filterInput() function is provided to handle all necessary details of this.
+ *
+ * If the image cannot be created (either because of an error or if the result would be empty
+ * because it was clipped out), this should return a filtered Image with a null SkSpecialImage.
+ * In these situations, callers that do not affect transparent black can end early, since the
+ * "transparent" implicit image would be unchanged. Callers that affect transparent black need
+ * to safely handle these null and empty images and return an image filling the context's clip
+ * bounds as if its input filtered image were transparent black.
+ */
+ virtual skif::FilterResult onFilterImage(const skif::Context& context) const;
+
+ /**
+ * Calculates the necessary input layer size in order for the final output of the filter to
+ * cover the desired output bounds. The provided 'desiredOutput' represents the requested
+ * input bounds for this node's parent filter node, i.e. this function answers "what does this
+ * node require for input in order to satisfy (as its own output), the input needs of its
+ * parent?".
+ *
+ * If 'recurse' is true, this function is responsible for recursing to its child image filters
+ * and accounting for what they require to meet this filter's input requirements. It is up to
+ * the filter to determine how to aggregate these inputs, but a helper function is provided for
+ * the common case where the final required layer size is the union of the child filters'
+ * required inputs, evaluated on what this filter requires for itself. 'recurse' is kNo
+ * when mapping Contexts while actually filtering images, since the child recursion is
+ * happening at a higher level.
+ *
+ * Unlike the public getInputBounds(), all internal bounds calculations are done in the shared
+ * layer space defined by 'mapping'.
+ *
+ * The default implementation assumes that current filter requires an input equal to
+ * 'desiredOutputBounds', and passes this down to its child filters, and returns the union of
+ * their required inputs.
+ */
+ virtual skif::LayerSpace<SkIRect> onGetInputLayerBounds(
+ const skif::Mapping& mapping, const skif::LayerSpace<SkIRect>& desiredOutput,
+ const skif::LayerSpace<SkIRect>& contentBounds,
+ VisitChildren recurse = VisitChildren::kYes) const;
+
+ /**
+ * Calculates the output bounds that this filter node would touch when processing an input
+ * sized to 'contentBounds'. This function is responsible for recursing to its child image
+ * filters and accounting for what they output. It is up to the filter to determine how to
+ * aggregate the outputs of its children, but a helper function is provided for the common
+ * case where the filter output is the union of its child outputs.
+ *
+ * Unlike the public getOutputBounds(), all internal bounds calculations are done in the
+ * shared layer space defined by 'mapping'.
+ *
+ * The default implementation assumes that the output of this filter is equal to the union of
+ * the outputs of its child filters evaluated with 'contentBounds'.
+ */
+ // TODO (michaelludwig) - When layerMatrix = I, this function could be used to implement
+ // onComputeFastBounds() instead of making filters implement the essentially the same calcs x2
+ virtual skif::LayerSpace<SkIRect> onGetOutputLayerBounds(
+ const skif::Mapping& mapping, const skif::LayerSpace<SkIRect>& contentBounds) const;
+
+ skia_private::AutoSTArray<2, sk_sp<SkImageFilter>> fInputs;
+
+ bool fUsesSrcInput;
+ CropRect fCropRect;
+ uint32_t fUniqueID; // Globally unique
+
+ using INHERITED = SkImageFilter;
+};
+
+static inline SkImageFilter_Base* as_IFB(SkImageFilter* filter) {
+ return static_cast<SkImageFilter_Base*>(filter);
+}
+
+static inline SkImageFilter_Base* as_IFB(const sk_sp<SkImageFilter>& filter) {
+ return static_cast<SkImageFilter_Base*>(filter.get());
+}
+
+static inline const SkImageFilter_Base* as_IFB(const SkImageFilter* filter) {
+ return static_cast<const SkImageFilter_Base*>(filter);
+}
+
+/**
+ * Helper to unflatten the common data, and return nullptr if we fail.
+ */
+#define SK_IMAGEFILTER_UNFLATTEN_COMMON(localVar, expectedCount) \
+ Common localVar; \
+ do { \
+ if (!localVar.unflatten(buffer, expectedCount)) { \
+ return nullptr; \
+ } \
+ } while (0)
+
+
+/**
+ * All image filter implementations defined for the include/effects/SkImageFilters.h factories
+ * are entirely encapsulated within their own CPP files. SkFlattenable deserialization needs a hook
+ * into these types, so their registration functions are exposed here.
+ */
+void SkRegisterAlphaThresholdImageFilterFlattenable();
+void SkRegisterArithmeticImageFilterFlattenable();
+void SkRegisterBlendImageFilterFlattenable();
+void SkRegisterBlurImageFilterFlattenable();
+void SkRegisterColorFilterImageFilterFlattenable();
+void SkRegisterComposeImageFilterFlattenable();
+void SkRegisterCropImageFilterFlattenable();
+void SkRegisterDisplacementMapImageFilterFlattenable();
+void SkRegisterDropShadowImageFilterFlattenable();
+void SkRegisterImageImageFilterFlattenable();
+void SkRegisterLightingImageFilterFlattenables();
+void SkRegisterMagnifierImageFilterFlattenable();
+void SkRegisterMatrixConvolutionImageFilterFlattenable();
+void SkRegisterMatrixTransformImageFilterFlattenable();
+void SkRegisterMergeImageFilterFlattenable();
+void SkRegisterMorphologyImageFilterFlattenables();
+void SkRegisterPictureImageFilterFlattenable();
+#ifdef SK_ENABLE_SKSL
+void SkRegisterRuntimeImageFilterFlattenable();
+#endif
+void SkRegisterShaderImageFilterFlattenable();
+void SkRegisterTileImageFilterFlattenable();
+
+#endif // SkImageFilter_Base_DEFINED
diff --git a/gfx/skia/skia/src/core/SkImageGenerator.cpp b/gfx/skia/skia/src/core/SkImageGenerator.cpp
new file mode 100644
index 0000000000..9c15b5654d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageGenerator.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkImageGenerator.h"
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkGraphics.h"
+#include "include/core/SkSize.h"
+#include "include/private/base/SkAssert.h"
+#include "src/core/SkNextID.h"
+
+#include <utility>
+
+#if defined(SK_GANESH)
+#include "include/gpu/GrRecordingContext.h"
+#include "src/gpu/ganesh/GrSurfaceProxyView.h"
+#endif
+
+SkImageGenerator::SkImageGenerator(const SkImageInfo& info, uint32_t uniqueID)
+ : fInfo(info)
+ , fUniqueID(kNeedNewImageUniqueID == uniqueID ? SkNextID::ImageID() : uniqueID)
+{}
+
+bool SkImageGenerator::getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes) {
+ if (kUnknown_SkColorType == info.colorType()) {
+ return false;
+ }
+ if (nullptr == pixels) {
+ return false;
+ }
+ if (rowBytes < info.minRowBytes()) {
+ return false;
+ }
+
+ Options defaultOpts;
+ return this->onGetPixels(info, pixels, rowBytes, defaultOpts);
+}
+
+bool SkImageGenerator::queryYUVAInfo(const SkYUVAPixmapInfo::SupportedDataTypes& supportedDataTypes,
+ SkYUVAPixmapInfo* yuvaPixmapInfo) const {
+ SkASSERT(yuvaPixmapInfo);
+
+ return this->onQueryYUVAInfo(supportedDataTypes, yuvaPixmapInfo) &&
+ yuvaPixmapInfo->isSupported(supportedDataTypes);
+}
+
+bool SkImageGenerator::getYUVAPlanes(const SkYUVAPixmaps& yuvaPixmaps) {
+ return this->onGetYUVAPlanes(yuvaPixmaps);
+}
+
+#if defined(SK_GANESH)
+GrSurfaceProxyView SkImageGenerator::generateTexture(GrRecordingContext* ctx,
+ const SkImageInfo& info,
+ GrMipmapped mipmapped,
+ GrImageTexGenPolicy texGenPolicy) {
+ SkASSERT_RELEASE(fInfo.dimensions() == info.dimensions());
+
+ if (!ctx || ctx->abandoned()) {
+ return {};
+ }
+
+ return this->onGenerateTexture(ctx, info, mipmapped, texGenPolicy);
+}
+
+GrSurfaceProxyView SkImageGenerator::onGenerateTexture(GrRecordingContext*,
+ const SkImageInfo&,
+ GrMipmapped,
+ GrImageTexGenPolicy) {
+ return {};
+}
+#endif // defined(SK_GANESH)
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/Image_Graphite.h"
+
+sk_sp<SkImage> SkImageGenerator::makeTextureImage(skgpu::graphite::Recorder* recorder,
+ const SkImageInfo& info,
+ skgpu::Mipmapped mipmapped) {
+ // This still allows for a difference in colorType and colorSpace. Just no subsetting.
+ if (fInfo.dimensions() != info.dimensions()) {
+ return nullptr;
+ }
+
+ return this->onMakeTextureImage(recorder, info, mipmapped);
+}
+
+sk_sp<SkImage> SkImageGenerator::onMakeTextureImage(skgpu::graphite::Recorder*,
+ const SkImageInfo&,
+ skgpu::Mipmapped) {
+ return nullptr;
+}
+
+#endif // SK_GRAPHITE
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static SkGraphics::ImageGeneratorFromEncodedDataFactory gFactory;
+
+SkGraphics::ImageGeneratorFromEncodedDataFactory
+SkGraphics::SetImageGeneratorFromEncodedDataFactory(ImageGeneratorFromEncodedDataFactory factory)
+{
+ ImageGeneratorFromEncodedDataFactory prev = gFactory;
+ gFactory = factory;
+ return prev;
+}
+
+std::unique_ptr<SkImageGenerator> SkImageGenerator::MakeFromEncoded(
+ sk_sp<SkData> data, std::optional<SkAlphaType> at) {
+ if (!data || at == kOpaque_SkAlphaType) {
+ return nullptr;
+ }
+ if (gFactory) {
+ if (std::unique_ptr<SkImageGenerator> generator = gFactory(data)) {
+ return generator;
+ }
+ }
+ return SkImageGenerator::MakeFromEncodedImpl(std::move(data), at);
+}
diff --git a/gfx/skia/skia/src/core/SkImageInfo.cpp b/gfx/skia/skia/src/core/SkImageInfo.cpp
new file mode 100644
index 0000000000..b717c07d97
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageInfo.cpp
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkImageInfo.h"
+
+#include "include/core/SkColor.h"
+#include "include/core/SkColorSpace.h"
+#include "include/private/base/SkAssert.h"
+#include "src/base/SkSafeMath.h"
+#include "src/core/SkImageInfoPriv.h"
+
+int SkColorTypeBytesPerPixel(SkColorType ct) {
+ switch (ct) {
+ case kUnknown_SkColorType: return 0;
+ case kAlpha_8_SkColorType: return 1;
+ case kRGB_565_SkColorType: return 2;
+ case kARGB_4444_SkColorType: return 2;
+ case kRGBA_8888_SkColorType: return 4;
+ case kBGRA_8888_SkColorType: return 4;
+ case kRGB_888x_SkColorType: return 4;
+ case kRGBA_1010102_SkColorType: return 4;
+ case kRGB_101010x_SkColorType: return 4;
+ case kBGRA_1010102_SkColorType: return 4;
+ case kBGR_101010x_SkColorType: return 4;
+ case kBGR_101010x_XR_SkColorType: return 4;
+ case kGray_8_SkColorType: return 1;
+ case kRGBA_F16Norm_SkColorType: return 8;
+ case kRGBA_F16_SkColorType: return 8;
+ case kRGBA_F32_SkColorType: return 16;
+ case kR8G8_unorm_SkColorType: return 2;
+ case kA16_unorm_SkColorType: return 2;
+ case kR16G16_unorm_SkColorType: return 4;
+ case kA16_float_SkColorType: return 2;
+ case kR16G16_float_SkColorType: return 4;
+ case kR16G16B16A16_unorm_SkColorType: return 8;
+ case kSRGBA_8888_SkColorType: return 4;
+ case kR8_unorm_SkColorType: return 1;
+ }
+ SkUNREACHABLE;
+}
+
+bool SkColorTypeIsAlwaysOpaque(SkColorType ct) {
+ return !(SkColorTypeChannelFlags(ct) & kAlpha_SkColorChannelFlag);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkColorInfo::SkColorInfo() = default;
+SkColorInfo::~SkColorInfo() = default;
+
+SkColorInfo::SkColorInfo(SkColorType ct, SkAlphaType at, sk_sp<SkColorSpace> cs)
+ : fColorSpace(std::move(cs)), fColorType(ct), fAlphaType(at) {}
+
+SkColorInfo::SkColorInfo(const SkColorInfo&) = default;
+SkColorInfo::SkColorInfo(SkColorInfo&&) = default;
+
+SkColorInfo& SkColorInfo::operator=(const SkColorInfo&) = default;
+SkColorInfo& SkColorInfo::operator=(SkColorInfo&&) = default;
+
+SkColorSpace* SkColorInfo::colorSpace() const { return fColorSpace.get(); }
+sk_sp<SkColorSpace> SkColorInfo::refColorSpace() const { return fColorSpace; }
+
+bool SkColorInfo::operator==(const SkColorInfo& other) const {
+ return fColorType == other.fColorType && fAlphaType == other.fAlphaType &&
+ SkColorSpace::Equals(fColorSpace.get(), other.fColorSpace.get());
+}
+
+bool SkColorInfo::operator!=(const SkColorInfo& other) const { return !(*this == other); }
+
+SkColorInfo SkColorInfo::makeAlphaType(SkAlphaType newAlphaType) const {
+ return SkColorInfo(this->colorType(), newAlphaType, this->refColorSpace());
+}
+
+SkColorInfo SkColorInfo::makeColorType(SkColorType newColorType) const {
+ return SkColorInfo(newColorType, this->alphaType(), this->refColorSpace());
+}
+
+SkColorInfo SkColorInfo::makeColorSpace(sk_sp<SkColorSpace> cs) const {
+ return SkColorInfo(this->colorType(), this->alphaType(), std::move(cs));
+}
+
+int SkColorInfo::bytesPerPixel() const { return SkColorTypeBytesPerPixel(fColorType); }
+
+bool SkColorInfo::gammaCloseToSRGB() const {
+ return fColorSpace && fColorSpace->gammaCloseToSRGB();
+}
+
+int SkColorInfo::shiftPerPixel() const { return SkColorTypeShiftPerPixel(fColorType); }
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+size_t SkImageInfo::computeOffset(int x, int y, size_t rowBytes) const {
+ SkASSERT((unsigned)x < (unsigned)this->width());
+ SkASSERT((unsigned)y < (unsigned)this->height());
+ return SkColorTypeComputeOffset(this->colorType(), x, y, rowBytes);
+}
+
+size_t SkImageInfo::computeByteSize(size_t rowBytes) const {
+ if (0 == this->height()) {
+ return 0;
+ }
+ SkSafeMath safe;
+ size_t bytes = safe.add(safe.mul(safe.addInt(this->height(), -1), rowBytes),
+ safe.mul(this->width(), this->bytesPerPixel()));
+
+ // The CPU backend implements some memory operations on images using instructions that take a
+ // signed 32-bit offset from the base. If we ever make an image larger than that, overflow can
+ // cause us to read/write memory that starts 2GB *before* the buffer. (crbug.com/1264705)
+ constexpr size_t kMaxSigned32BitSize = SK_MaxS32;
+ return (safe.ok() && (bytes <= kMaxSigned32BitSize)) ? bytes : SIZE_MAX;
+}
+
+SkColorSpace* SkImageInfo::colorSpace() const { return fColorInfo.colorSpace(); }
+
+sk_sp<SkColorSpace> SkImageInfo::refColorSpace() const { return fColorInfo.refColorSpace(); }
+
+SkImageInfo SkImageInfo::makeColorSpace(sk_sp<SkColorSpace> cs) const {
+ return Make(fDimensions, fColorInfo.makeColorSpace(std::move(cs)));
+}
+
+SkImageInfo SkImageInfo::Make(int width, int height, SkColorType ct, SkAlphaType at) {
+ return Make(width, height, ct, at, nullptr);
+}
+
+SkImageInfo SkImageInfo::Make(int width, int height, SkColorType ct, SkAlphaType at,
+ sk_sp<SkColorSpace> cs) {
+ return SkImageInfo({width, height}, {ct, at, std::move(cs)});
+}
+
+SkImageInfo SkImageInfo::Make(SkISize dimensions, SkColorType ct, SkAlphaType at) {
+ return Make(dimensions, ct, at, nullptr);
+}
+
+SkImageInfo SkImageInfo::Make(SkISize dimensions, SkColorType ct, SkAlphaType at,
+ sk_sp<SkColorSpace> cs) {
+ return SkImageInfo(dimensions, {ct, at, std::move(cs)});
+}
+
+SkImageInfo SkImageInfo::MakeN32(int width, int height, SkAlphaType at) {
+ return MakeN32(width, height, at, nullptr);
+}
+
+SkImageInfo SkImageInfo::MakeN32(int width, int height, SkAlphaType at, sk_sp<SkColorSpace> cs) {
+ return Make({width, height}, kN32_SkColorType, at, std::move(cs));
+}
+
+SkImageInfo SkImageInfo::MakeS32(int width, int height, SkAlphaType at) {
+ return SkImageInfo({width, height}, {kN32_SkColorType, at, SkColorSpace::MakeSRGB()});
+}
+
+SkImageInfo SkImageInfo::MakeN32Premul(int width, int height) {
+ return MakeN32Premul(width, height, nullptr);
+}
+
+SkImageInfo SkImageInfo::MakeN32Premul(int width, int height, sk_sp<SkColorSpace> cs) {
+ return Make({width, height}, kN32_SkColorType, kPremul_SkAlphaType, std::move(cs));
+}
+
+SkImageInfo SkImageInfo::MakeN32Premul(SkISize dimensions) {
+ return MakeN32Premul(dimensions, nullptr);
+}
+
+SkImageInfo SkImageInfo::MakeN32Premul(SkISize dimensions, sk_sp<SkColorSpace> cs) {
+ return Make(dimensions, kN32_SkColorType, kPremul_SkAlphaType, std::move(cs));
+}
+
+SkImageInfo SkImageInfo::MakeA8(int width, int height) {
+ return Make({width, height}, kAlpha_8_SkColorType, kPremul_SkAlphaType, nullptr);
+}
+
+SkImageInfo SkImageInfo::MakeA8(SkISize dimensions) {
+ return Make(dimensions, kAlpha_8_SkColorType, kPremul_SkAlphaType, nullptr);
+}
+
+SkImageInfo SkImageInfo::MakeUnknown(int width, int height) {
+ return Make({width, height}, kUnknown_SkColorType, kUnknown_SkAlphaType, nullptr);
+}
+
+#ifdef SK_DEBUG
+void SkImageInfo::validate() const {
+ SkASSERT(fDimensions.width() >= 0);
+ SkASSERT(fDimensions.height() >= 0);
+ SkASSERT(SkColorTypeIsValid(this->colorType()));
+ SkASSERT(SkAlphaTypeIsValid(this->alphaType()));
+}
+#endif
+
+bool SkColorTypeValidateAlphaType(SkColorType colorType, SkAlphaType alphaType,
+ SkAlphaType* canonical) {
+ switch (colorType) {
+ case kUnknown_SkColorType:
+ alphaType = kUnknown_SkAlphaType;
+ break;
+ case kAlpha_8_SkColorType: // fall-through
+ case kA16_unorm_SkColorType: // fall-through
+ case kA16_float_SkColorType:
+ if (kUnpremul_SkAlphaType == alphaType) {
+ alphaType = kPremul_SkAlphaType;
+ }
+ [[fallthrough]];
+ case kARGB_4444_SkColorType:
+ case kRGBA_8888_SkColorType:
+ case kSRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ case kRGBA_1010102_SkColorType:
+ case kBGRA_1010102_SkColorType:
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType:
+ case kRGBA_F32_SkColorType:
+ case kR16G16B16A16_unorm_SkColorType:
+ if (kUnknown_SkAlphaType == alphaType) {
+ return false;
+ }
+ break;
+ case kGray_8_SkColorType:
+ case kR8G8_unorm_SkColorType:
+ case kR16G16_unorm_SkColorType:
+ case kR16G16_float_SkColorType:
+ case kRGB_565_SkColorType:
+ case kRGB_888x_SkColorType:
+ case kRGB_101010x_SkColorType:
+ case kBGR_101010x_SkColorType:
+ case kBGR_101010x_XR_SkColorType:
+ case kR8_unorm_SkColorType:
+ alphaType = kOpaque_SkAlphaType;
+ break;
+ }
+ if (canonical) {
+ *canonical = alphaType;
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkImageInfoPriv.h b/gfx/skia/skia/src/core/SkImageInfoPriv.h
new file mode 100644
index 0000000000..2d42d5fdc5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImageInfoPriv.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageInfoPriv_DEFINED
+#define SkImageInfoPriv_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkImageInfo.h"
+
+static inline uint32_t SkColorTypeChannelFlags(SkColorType ct) {
+ switch (ct) {
+ case kUnknown_SkColorType: return 0;
+ case kAlpha_8_SkColorType: return kAlpha_SkColorChannelFlag;
+ case kRGB_565_SkColorType: return kRGB_SkColorChannelFlags;
+ case kARGB_4444_SkColorType: return kRGBA_SkColorChannelFlags;
+ case kRGBA_8888_SkColorType: return kRGBA_SkColorChannelFlags;
+ case kRGB_888x_SkColorType: return kRGB_SkColorChannelFlags;
+ case kBGRA_8888_SkColorType: return kRGBA_SkColorChannelFlags;
+ case kRGBA_1010102_SkColorType: return kRGBA_SkColorChannelFlags;
+ case kRGB_101010x_SkColorType: return kRGB_SkColorChannelFlags;
+ case kBGRA_1010102_SkColorType: return kRGBA_SkColorChannelFlags;
+ case kBGR_101010x_SkColorType: return kRGB_SkColorChannelFlags;
+ case kBGR_101010x_XR_SkColorType: return kRGB_SkColorChannelFlags;
+ case kGray_8_SkColorType: return kGray_SkColorChannelFlag;
+ case kRGBA_F16Norm_SkColorType: return kRGBA_SkColorChannelFlags;
+ case kRGBA_F16_SkColorType: return kRGBA_SkColorChannelFlags;
+ case kRGBA_F32_SkColorType: return kRGBA_SkColorChannelFlags;
+ case kR8G8_unorm_SkColorType: return kRG_SkColorChannelFlags;
+ case kA16_unorm_SkColorType: return kAlpha_SkColorChannelFlag;
+ case kR16G16_unorm_SkColorType: return kRG_SkColorChannelFlags;
+ case kA16_float_SkColorType: return kAlpha_SkColorChannelFlag;
+ case kR16G16_float_SkColorType: return kRG_SkColorChannelFlags;
+ case kR16G16B16A16_unorm_SkColorType: return kRGBA_SkColorChannelFlags;
+ case kSRGBA_8888_SkColorType: return kRGBA_SkColorChannelFlags;
+ case kR8_unorm_SkColorType: return kRed_SkColorChannelFlag;
+ }
+ SkUNREACHABLE;
+}
+
+static inline bool SkColorTypeIsAlphaOnly(SkColorType ct) {
+ return SkColorTypeChannelFlags(ct) == kAlpha_SkColorChannelFlag;
+}
+
+static inline bool SkAlphaTypeIsValid(unsigned value) {
+ return value <= kLastEnum_SkAlphaType;
+}
+
+static int SkColorTypeShiftPerPixel(SkColorType ct) {
+ switch (ct) {
+ case kUnknown_SkColorType: return 0;
+ case kAlpha_8_SkColorType: return 0;
+ case kRGB_565_SkColorType: return 1;
+ case kARGB_4444_SkColorType: return 1;
+ case kRGBA_8888_SkColorType: return 2;
+ case kRGB_888x_SkColorType: return 2;
+ case kBGRA_8888_SkColorType: return 2;
+ case kRGBA_1010102_SkColorType: return 2;
+ case kRGB_101010x_SkColorType: return 2;
+ case kBGRA_1010102_SkColorType: return 2;
+ case kBGR_101010x_SkColorType: return 2;
+ case kBGR_101010x_XR_SkColorType: return 2;
+ case kGray_8_SkColorType: return 0;
+ case kRGBA_F16Norm_SkColorType: return 3;
+ case kRGBA_F16_SkColorType: return 3;
+ case kRGBA_F32_SkColorType: return 4;
+ case kR8G8_unorm_SkColorType: return 1;
+ case kA16_unorm_SkColorType: return 1;
+ case kR16G16_unorm_SkColorType: return 2;
+ case kA16_float_SkColorType: return 1;
+ case kR16G16_float_SkColorType: return 2;
+ case kR16G16B16A16_unorm_SkColorType: return 3;
+ case kSRGBA_8888_SkColorType: return 2;
+ case kR8_unorm_SkColorType: return 0;
+ }
+ SkUNREACHABLE;
+}
+
+static inline size_t SkColorTypeMinRowBytes(SkColorType ct, int width) {
+ return (size_t)(width * SkColorTypeBytesPerPixel(ct));
+}
+
+static inline bool SkColorTypeIsValid(unsigned value) {
+ return value <= kLastEnum_SkColorType;
+}
+
+static inline size_t SkColorTypeComputeOffset(SkColorType ct, int x, int y, size_t rowBytes) {
+ if (kUnknown_SkColorType == ct) {
+ return 0;
+ }
+ return (size_t)y * rowBytes + ((size_t)x << SkColorTypeShiftPerPixel(ct));
+}
+
+static inline bool SkColorTypeIsNormalized(SkColorType ct) {
+ switch (ct) {
+ case kUnknown_SkColorType:
+ case kAlpha_8_SkColorType:
+ case kRGB_565_SkColorType:
+ case kARGB_4444_SkColorType:
+ case kRGBA_8888_SkColorType:
+ case kRGB_888x_SkColorType:
+ case kBGRA_8888_SkColorType:
+ case kRGBA_1010102_SkColorType:
+ case kRGB_101010x_SkColorType:
+ case kBGRA_1010102_SkColorType:
+ case kBGR_101010x_SkColorType:
+ case kGray_8_SkColorType:
+ case kRGBA_F16Norm_SkColorType:
+ case kR8G8_unorm_SkColorType:
+ case kA16_unorm_SkColorType:
+ case kA16_float_SkColorType: /*subtle... alpha is always [0,1]*/
+ case kR16G16_unorm_SkColorType:
+ case kR16G16B16A16_unorm_SkColorType:
+ case kSRGBA_8888_SkColorType:
+ case kR8_unorm_SkColorType:
+ return true;
+
+ case kBGR_101010x_XR_SkColorType:
+ case kRGBA_F16_SkColorType:
+ case kRGBA_F32_SkColorType:
+ case kR16G16_float_SkColorType:
+ return false;
+ }
+ SkUNREACHABLE;
+}
+
+static inline int SkColorTypeMaxBitsPerChannel(SkColorType ct) {
+ switch (ct) {
+ case kUnknown_SkColorType:
+ return 0;
+
+ case kARGB_4444_SkColorType:
+ return 4;
+
+ case kRGB_565_SkColorType:
+ return 6;
+
+ case kAlpha_8_SkColorType:
+ case kRGBA_8888_SkColorType:
+ case kRGB_888x_SkColorType:
+ case kBGRA_8888_SkColorType:
+ case kGray_8_SkColorType:
+ case kR8G8_unorm_SkColorType:
+ case kSRGBA_8888_SkColorType:
+ case kR8_unorm_SkColorType:
+ return 8;
+
+ case kRGBA_1010102_SkColorType:
+ case kRGB_101010x_SkColorType:
+ case kBGRA_1010102_SkColorType:
+ case kBGR_101010x_SkColorType:
+ case kBGR_101010x_XR_SkColorType:
+ return 10;
+
+ case kRGBA_F16Norm_SkColorType:
+ case kA16_unorm_SkColorType:
+ case kA16_float_SkColorType:
+ case kR16G16_unorm_SkColorType:
+ case kR16G16B16A16_unorm_SkColorType:
+ case kRGBA_F16_SkColorType:
+ case kR16G16_float_SkColorType:
+ return 16;
+
+ case kRGBA_F32_SkColorType:
+ return 32;
+ }
+ SkUNREACHABLE;
+}
+
+/**
+ * Returns true if |info| contains a valid colorType and alphaType.
+ */
+static inline bool SkColorInfoIsValid(const SkColorInfo& info) {
+ return info.colorType() != kUnknown_SkColorType && info.alphaType() != kUnknown_SkAlphaType;
+}
+
+/**
+ * Returns true if |info| contains a valid combination of width, height and colorInfo.
+ */
+static inline bool SkImageInfoIsValid(const SkImageInfo& info) {
+ if (info.width() <= 0 || info.height() <= 0) {
+ return false;
+ }
+
+ const int kMaxDimension = SK_MaxS32 >> 2;
+ if (info.width() > kMaxDimension || info.height() > kMaxDimension) {
+ return false;
+ }
+
+ return SkColorInfoIsValid(info.colorInfo());
+}
+
+/**
+ * Returns true if Skia has defined a pixel conversion from the |src| to the |dst|.
+ * Returns false otherwise.
+ */
+static inline bool SkImageInfoValidConversion(const SkImageInfo& dst, const SkImageInfo& src) {
+ return SkImageInfoIsValid(dst) && SkImageInfoIsValid(src);
+}
+#endif // SkImageInfoPriv_DEFINED
diff --git a/gfx/skia/skia/src/core/SkImagePriv.h b/gfx/skia/skia/src/core/SkImagePriv.h
new file mode 100644
index 0000000000..7d343fe2c9
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkImagePriv.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImagePriv_DEFINED
+#define SkImagePriv_DEFINED
+
+#include "include/core/SkImage.h"
+#include "include/core/SkSurface.h"
+#include "include/core/SkTileMode.h"
+
+class SkPixelRef;
+
+enum SkCopyPixelsMode {
+ kIfMutable_SkCopyPixelsMode, //!< only copy src pixels if they are marked mutable
+ kAlways_SkCopyPixelsMode, //!< always copy src pixels (even if they are marked immutable)
+ kNever_SkCopyPixelsMode, //!< never copy src pixels (even if they are marked mutable)
+};
+
+// If alloc is non-nullptr, it will be used to allocate the returned SkShader, and MUST outlive
+// the SkShader.
+sk_sp<SkShader> SkMakeBitmapShader(const SkBitmap& src, SkTileMode, SkTileMode,
+ const SkSamplingOptions&, const SkMatrix* localMatrix,
+ SkCopyPixelsMode);
+
+// Convenience function to return a shader that implements the shader+image behavior defined for
+// drawImage/Bitmap where the paint's shader is ignored when the bitmap is a color image, but
+// properly compose them together when it is an alpha image. This allows the returned paint to
+// be assigned to a paint clone without discarding the original behavior.
+sk_sp<SkShader> SkMakeBitmapShaderForPaint(const SkPaint& paint, const SkBitmap& src,
+ SkTileMode, SkTileMode, const SkSamplingOptions&,
+ const SkMatrix* localMatrix, SkCopyPixelsMode);
+
+/**
+ * Examines the bitmap to decide if it can share the existing pixelRef, or
+ * if it needs to make a deep-copy of the pixels.
+ *
+ * The bitmap's pixelref will be shared if either the bitmap is marked as
+ * immutable, or CopyPixelsMode allows it. Shared pixel refs are also
+ * locked when kLocked_SharedPixelRefMode is specified.
+ *
+ * Passing kLocked_SharedPixelRefMode allows the image's peekPixels() method
+ * to succeed, but it will force any lazy decodes/generators to execute if
+ * they exist on the pixelref.
+ *
+ * It is illegal to call this with a texture-backed bitmap.
+ *
+ * If the bitmap's colortype cannot be converted into a corresponding
+ * SkImageInfo, or the bitmap's pixels cannot be accessed, this will return
+ * nullptr.
+ */
+extern SK_SPI sk_sp<SkImage> SkMakeImageFromRasterBitmap(const SkBitmap&, SkCopyPixelsMode);
+
+// Given an image created from SkNewImageFromBitmap, return its pixelref. This
+// may be called to see if the surface and the image share the same pixelref,
+// in which case the surface may need to perform a copy-on-write.
+extern const SkPixelRef* SkBitmapImageGetPixelRef(const SkImage* rasterImage);
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkLRUCache.h b/gfx/skia/skia/src/core/SkLRUCache.h
new file mode 100644
index 0000000000..83b1e688ac
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLRUCache.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLRUCache_DEFINED
+#define SkLRUCache_DEFINED
+
+#include "include/private/SkChecksum.h"
+#include "src/base/SkTInternalLList.h"
+#include "src/core/SkTHash.h"
+
+/**
+ * A generic LRU cache.
+ */
+template <typename K, typename V, typename HashK = SkGoodHash>
+class SkLRUCache {
+private:
+ struct Entry {
+ Entry(const K& key, V&& value)
+ : fKey(key)
+ , fValue(std::move(value)) {}
+
+ K fKey;
+ V fValue;
+
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(Entry);
+ };
+
+public:
+ explicit SkLRUCache(int maxCount) : fMaxCount(maxCount) {}
+ SkLRUCache() = delete;
+
+ ~SkLRUCache() {
+ Entry* node = fLRU.head();
+ while (node) {
+ fLRU.remove(node);
+ delete node;
+ node = fLRU.head();
+ }
+ }
+
+ // Make noncopyable
+ SkLRUCache(const SkLRUCache&) = delete;
+ SkLRUCache& operator=(const SkLRUCache&) = delete;
+
+ V* find(const K& key) {
+ Entry** value = fMap.find(key);
+ if (!value) {
+ return nullptr;
+ }
+ Entry* entry = *value;
+ if (entry != fLRU.head()) {
+ fLRU.remove(entry);
+ fLRU.addToHead(entry);
+ } // else it's already at head position, don't need to do anything
+ return &entry->fValue;
+ }
+
+ V* insert(const K& key, V value) {
+ SkASSERT(!this->find(key));
+
+ Entry* entry = new Entry(key, std::move(value));
+ fMap.set(entry);
+ fLRU.addToHead(entry);
+ while (fMap.count() > fMaxCount) {
+ this->remove(fLRU.tail()->fKey);
+ }
+ return &entry->fValue;
+ }
+
+ V* insert_or_update(const K& key, V value) {
+ if (V* found = this->find(key)) {
+ *found = std::move(value);
+ return found;
+ } else {
+ return this->insert(key, std::move(value));
+ }
+ }
+
+ int count() const {
+ return fMap.count();
+ }
+
+ template <typename Fn> // f(K*, V*)
+ void foreach(Fn&& fn) {
+ typename SkTInternalLList<Entry>::Iter iter;
+ for (Entry* e = iter.init(fLRU, SkTInternalLList<Entry>::Iter::kHead_IterStart); e;
+ e = iter.next()) {
+ fn(&e->fKey, &e->fValue);
+ }
+ }
+
+ void reset() {
+ fMap.reset();
+ for (Entry* e = fLRU.head(); e; e = fLRU.head()) {
+ fLRU.remove(e);
+ delete e;
+ }
+ }
+
+private:
+ struct Traits {
+ static const K& GetKey(Entry* e) {
+ return e->fKey;
+ }
+
+ static uint32_t Hash(const K& k) {
+ return HashK()(k);
+ }
+ };
+
+ void remove(const K& key) {
+ Entry** value = fMap.find(key);
+ SkASSERT(value);
+ Entry* entry = *value;
+ SkASSERT(key == entry->fKey);
+ fMap.remove(key);
+ fLRU.remove(entry);
+ delete entry;
+ }
+
+ int fMaxCount;
+ SkTHashTable<Entry*, K, Traits> fMap;
+ SkTInternalLList<Entry> fLRU;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkLatticeIter.cpp b/gfx/skia/skia/src/core/SkLatticeIter.cpp
new file mode 100644
index 0000000000..d95d67e6b9
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLatticeIter.cpp
@@ -0,0 +1,302 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkRect.h"
+#include "src/core/SkLatticeIter.h"
+
+/**
+ * Divs must be in increasing order with no duplicates.
+ */
+static bool valid_divs(const int* divs, int count, int start, int end) {
+ int prev = start - 1;
+ for (int i = 0; i < count; i++) {
+ if (prev >= divs[i] || divs[i] >= end) {
+ return false;
+ }
+ prev = divs[i];
+ }
+
+ return true;
+}
+
+bool SkLatticeIter::Valid(int width, int height, const SkCanvas::Lattice& lattice) {
+ SkIRect totalBounds = SkIRect::MakeWH(width, height);
+ SkASSERT(lattice.fBounds);
+ const SkIRect latticeBounds = *lattice.fBounds;
+ if (!totalBounds.contains(latticeBounds)) {
+ return false;
+ }
+
+ bool zeroXDivs = lattice.fXCount <= 0 || (1 == lattice.fXCount &&
+ latticeBounds.fLeft == lattice.fXDivs[0]);
+ bool zeroYDivs = lattice.fYCount <= 0 || (1 == lattice.fYCount &&
+ latticeBounds.fTop == lattice.fYDivs[0]);
+ if (zeroXDivs && zeroYDivs) {
+ return false;
+ }
+
+ return valid_divs(lattice.fXDivs, lattice.fXCount, latticeBounds.fLeft, latticeBounds.fRight)
+ && valid_divs(lattice.fYDivs, lattice.fYCount, latticeBounds.fTop, latticeBounds.fBottom);
+}
+
+/**
+ * Count the number of pixels that are in "scalable" patches.
+ */
+static int count_scalable_pixels(const int32_t* divs, int numDivs, bool firstIsScalable,
+ int start, int end) {
+ if (0 == numDivs) {
+ return firstIsScalable ? end - start : 0;
+ }
+
+ int i;
+ int count;
+ if (firstIsScalable) {
+ count = divs[0] - start;
+ i = 1;
+ } else {
+ count = 0;
+ i = 0;
+ }
+
+ for (; i < numDivs; i += 2) {
+ // Alternatively, we could use |top| and |bottom| as variable names, instead of
+ // |left| and |right|.
+ int left = divs[i];
+ int right = (i + 1 < numDivs) ? divs[i + 1] : end;
+ count += right - left;
+ }
+
+ return count;
+}
+
+/**
+ * Set points for the src and dst rects on subsequent draw calls.
+ */
+static void set_points(float* dst, int* src, const int* divs, int divCount, int srcFixed,
+ int srcScalable, int srcStart, int srcEnd, float dstStart, float dstEnd,
+ bool isScalable) {
+ float dstLen = dstEnd - dstStart;
+ float scale;
+ if (srcFixed <= dstLen) {
+ // This is the "normal" case, where we scale the "scalable" patches and leave
+ // the other patches fixed.
+ scale = (dstLen - ((float) srcFixed)) / ((float) srcScalable);
+ } else {
+ // In this case, we eliminate the "scalable" patches and scale the "fixed" patches.
+ scale = dstLen / ((float) srcFixed);
+ }
+
+ src[0] = srcStart;
+ dst[0] = dstStart;
+ for (int i = 0; i < divCount; i++) {
+ src[i + 1] = divs[i];
+ int srcDelta = src[i + 1] - src[i];
+ float dstDelta;
+ if (srcFixed <= dstLen) {
+ dstDelta = isScalable ? scale * srcDelta : srcDelta;
+ } else {
+ dstDelta = isScalable ? 0.0f : scale * srcDelta;
+ }
+ dst[i + 1] = dst[i] + dstDelta;
+
+ // Alternate between "scalable" and "fixed" patches.
+ isScalable = !isScalable;
+ }
+
+ src[divCount + 1] = srcEnd;
+ dst[divCount + 1] = dstEnd;
+}
+
+SkLatticeIter::SkLatticeIter(const SkCanvas::Lattice& lattice, const SkRect& dst) {
+ const int* xDivs = lattice.fXDivs;
+ const int origXCount = lattice.fXCount;
+ const int* yDivs = lattice.fYDivs;
+ const int origYCount = lattice.fYCount;
+ SkASSERT(lattice.fBounds);
+ const SkIRect src = *lattice.fBounds;
+
+ // In the x-dimension, the first rectangle always starts at x = 0 and is "scalable".
+ // If xDiv[0] is 0, it indicates that the first rectangle is degenerate, so the
+ // first real rectangle "scalable" in the x-direction.
+ //
+ // The same interpretation applies to the y-dimension.
+ //
+ // As we move left to right across the image, alternating patches will be "fixed" or
+ // "scalable" in the x-direction. Similarly, as move top to bottom, alternating
+ // patches will be "fixed" or "scalable" in the y-direction.
+ int xCount = origXCount;
+ int yCount = origYCount;
+ bool xIsScalable = (xCount > 0 && src.fLeft == xDivs[0]);
+ if (xIsScalable) {
+ // Once we've decided that the first patch is "scalable", we don't need the
+ // xDiv. It is always implied that we start at the edge of the bounds.
+ xDivs++;
+ xCount--;
+ }
+ bool yIsScalable = (yCount > 0 && src.fTop == yDivs[0]);
+ if (yIsScalable) {
+ // Once we've decided that the first patch is "scalable", we don't need the
+ // yDiv. It is always implied that we start at the edge of the bounds.
+ yDivs++;
+ yCount--;
+ }
+
+ // Count "scalable" and "fixed" pixels in each dimension.
+ int xCountScalable = count_scalable_pixels(xDivs, xCount, xIsScalable, src.fLeft, src.fRight);
+ int xCountFixed = src.width() - xCountScalable;
+ int yCountScalable = count_scalable_pixels(yDivs, yCount, yIsScalable, src.fTop, src.fBottom);
+ int yCountFixed = src.height() - yCountScalable;
+
+ fSrcX.reset(xCount + 2);
+ fDstX.reset(xCount + 2);
+ set_points(fDstX.begin(), fSrcX.begin(), xDivs, xCount, xCountFixed, xCountScalable,
+ src.fLeft, src.fRight, dst.fLeft, dst.fRight, xIsScalable);
+
+ fSrcY.reset(yCount + 2);
+ fDstY.reset(yCount + 2);
+ set_points(fDstY.begin(), fSrcY.begin(), yDivs, yCount, yCountFixed, yCountScalable,
+ src.fTop, src.fBottom, dst.fTop, dst.fBottom, yIsScalable);
+
+ fCurrX = fCurrY = 0;
+ fNumRectsInLattice = (xCount + 1) * (yCount + 1);
+ fNumRectsToDraw = fNumRectsInLattice;
+
+ if (lattice.fRectTypes) {
+ fRectTypes.push_back_n(fNumRectsInLattice);
+ fColors.push_back_n(fNumRectsInLattice);
+
+ const SkCanvas::Lattice::RectType* flags = lattice.fRectTypes;
+ const SkColor* colors = lattice.fColors;
+
+ bool hasPadRow = (yCount != origYCount);
+ bool hasPadCol = (xCount != origXCount);
+ if (hasPadRow) {
+ // The first row of rects are all empty, skip the first row of flags.
+ flags += origXCount + 1;
+ colors += origXCount + 1;
+ }
+
+ int i = 0;
+ for (int y = 0; y < yCount + 1; y++) {
+ for (int x = 0; x < origXCount + 1; x++) {
+ if (0 == x && hasPadCol) {
+ // The first column of rects are all empty. Skip a rect.
+ flags++;
+ colors++;
+ continue;
+ }
+
+ fRectTypes[i] = *flags;
+ fColors[i] = SkCanvas::Lattice::kFixedColor == *flags ? *colors : 0;
+ flags++;
+ colors++;
+ i++;
+ }
+ }
+
+ for (int j = 0; j < fRectTypes.size(); j++) {
+ if (SkCanvas::Lattice::kTransparent == fRectTypes[j]) {
+ fNumRectsToDraw--;
+ }
+ }
+ }
+}
+
+bool SkLatticeIter::Valid(int width, int height, const SkIRect& center) {
+ return !center.isEmpty() && SkIRect::MakeWH(width, height).contains(center);
+}
+
+SkLatticeIter::SkLatticeIter(int w, int h, const SkIRect& c, const SkRect& dst) {
+ SkASSERT(SkIRect::MakeWH(w, h).contains(c));
+
+ fSrcX.reset(4);
+ fSrcY.reset(4);
+ fDstX.reset(4);
+ fDstY.reset(4);
+
+ fSrcX[0] = 0;
+ fSrcX[1] = SkIntToScalar(c.fLeft);
+ fSrcX[2] = SkIntToScalar(c.fRight);
+ fSrcX[3] = SkIntToScalar(w);
+
+ fSrcY[0] = 0;
+ fSrcY[1] = SkIntToScalar(c.fTop);
+ fSrcY[2] = SkIntToScalar(c.fBottom);
+ fSrcY[3] = SkIntToScalar(h);
+
+ fDstX[0] = dst.fLeft;
+ fDstX[1] = dst.fLeft + SkIntToScalar(c.fLeft);
+ fDstX[2] = dst.fRight - SkIntToScalar(w - c.fRight);
+ fDstX[3] = dst.fRight;
+
+ fDstY[0] = dst.fTop;
+ fDstY[1] = dst.fTop + SkIntToScalar(c.fTop);
+ fDstY[2] = dst.fBottom - SkIntToScalar(h - c.fBottom);
+ fDstY[3] = dst.fBottom;
+
+ if (fDstX[1] > fDstX[2]) {
+ fDstX[1] = fDstX[0] + (fDstX[3] - fDstX[0]) * c.fLeft / (w - c.width());
+ fDstX[2] = fDstX[1];
+ }
+
+ if (fDstY[1] > fDstY[2]) {
+ fDstY[1] = fDstY[0] + (fDstY[3] - fDstY[0]) * c.fTop / (h - c.height());
+ fDstY[2] = fDstY[1];
+ }
+
+ fCurrX = fCurrY = 0;
+ fNumRectsInLattice = 9;
+ fNumRectsToDraw = 9;
+}
+
+bool SkLatticeIter::next(SkIRect* src, SkRect* dst, bool* isFixedColor, SkColor* fixedColor) {
+ int currRect = fCurrX + fCurrY * (fSrcX.size() - 1);
+ if (currRect == fNumRectsInLattice) {
+ return false;
+ }
+
+ const int x = fCurrX;
+ const int y = fCurrY;
+ SkASSERT(x >= 0 && x < fSrcX.size() - 1);
+ SkASSERT(y >= 0 && y < fSrcY.size() - 1);
+
+ if (fSrcX.size() - 1 == ++fCurrX) {
+ fCurrX = 0;
+ fCurrY += 1;
+ }
+
+ if (fRectTypes.size() > 0
+ && SkToBool(SkCanvas::Lattice::kTransparent == fRectTypes[currRect])) {
+ return this->next(src, dst, isFixedColor, fixedColor);
+ }
+
+ src->setLTRB(fSrcX[x], fSrcY[y], fSrcX[x + 1], fSrcY[y + 1]);
+ dst->setLTRB(fDstX[x], fDstY[y], fDstX[x + 1], fDstY[y + 1]);
+ if (isFixedColor && fixedColor) {
+ *isFixedColor = fRectTypes.size() > 0
+ && SkToBool(SkCanvas::Lattice::kFixedColor == fRectTypes[currRect]);
+ if (*isFixedColor) {
+ *fixedColor = fColors[currRect];
+ }
+ }
+ return true;
+}
+
+void SkLatticeIter::mapDstScaleTranslate(const SkMatrix& matrix) {
+ SkASSERT(matrix.isScaleTranslate());
+ SkScalar tx = matrix.getTranslateX();
+ SkScalar sx = matrix.getScaleX();
+ for (int i = 0; i < fDstX.size(); i++) {
+ fDstX[i] = fDstX[i] * sx + tx;
+ }
+
+ SkScalar ty = matrix.getTranslateY();
+ SkScalar sy = matrix.getScaleY();
+ for (int i = 0; i < fDstY.size(); i++) {
+ fDstY[i] = fDstY[i] * sy + ty;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkLatticeIter.h b/gfx/skia/skia/src/core/SkLatticeIter.h
new file mode 100644
index 0000000000..5148e55555
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLatticeIter.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLatticeIter_DEFINED
+#define SkLatticeIter_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkScalar.h"
+#include "include/private/base/SkTArray.h"
+
+struct SkIRect;
+struct SkRect;
+
+/**
+ * Disect a lattice request into an sequence of src-rect / dst-rect pairs
+ */
+class SK_SPI SkLatticeIter {
+public:
+
+ static bool Valid(int imageWidth, int imageHeight, const SkCanvas::Lattice& lattice);
+
+ SkLatticeIter(const SkCanvas::Lattice& lattice, const SkRect& dst);
+
+ static bool Valid(int imageWidth, int imageHeight, const SkIRect& center);
+
+ SkLatticeIter(int imageWidth, int imageHeight, const SkIRect& center, const SkRect& dst);
+
+ /**
+ * While it returns true, use src/dst to draw the image/bitmap. Optional parameters
+ * isFixedColor and fixedColor specify if the rectangle is filled with a fixed color.
+ * If (*isFixedColor) is true, then (*fixedColor) contains the rectangle color.
+ */
+ bool next(SkIRect* src, SkRect* dst, bool* isFixedColor = nullptr,
+ SkColor* fixedColor = nullptr);
+
+ /** Version of above that converts the integer src rect to a scalar rect. */
+ bool next(SkRect* src, SkRect* dst, bool* isFixedColor = nullptr,
+ SkColor* fixedColor = nullptr) {
+ SkIRect isrcR;
+ if (this->next(&isrcR, dst, isFixedColor, fixedColor)) {
+ *src = SkRect::Make(isrcR);
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Apply a matrix to the dst points.
+ */
+ void mapDstScaleTranslate(const SkMatrix& matrix);
+
+ /**
+ * Returns the number of rects that will actually be drawn.
+ */
+ int numRectsToDraw() const {
+ return fNumRectsToDraw;
+ }
+
+private:
+ skia_private::TArray<int> fSrcX;
+ skia_private::TArray<int> fSrcY;
+ skia_private::TArray<SkScalar> fDstX;
+ skia_private::TArray<SkScalar> fDstY;
+ skia_private::TArray<SkCanvas::Lattice::RectType> fRectTypes;
+ skia_private::TArray<SkColor> fColors;
+
+ int fCurrX;
+ int fCurrY;
+ int fNumRectsInLattice;
+ int fNumRectsToDraw;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkLineClipper.cpp b/gfx/skia/skia/src/core/SkLineClipper.cpp
new file mode 100644
index 0000000000..a2e8031096
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLineClipper.cpp
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkLineClipper.h"
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTo.h"
+
+#include <cstring>
+#include <utility>
+
+template <typename T> T pin_unsorted(T value, T limit0, T limit1) {
+ if (limit1 < limit0) {
+ using std::swap;
+ swap(limit0, limit1);
+ }
+ // now the limits are sorted
+ SkASSERT(limit0 <= limit1);
+
+ if (value < limit0) {
+ value = limit0;
+ } else if (value > limit1) {
+ value = limit1;
+ }
+ return value;
+}
+
+// return X coordinate of intersection with horizontal line at Y
+static SkScalar sect_with_horizontal(const SkPoint src[2], SkScalar Y) {
+ SkScalar dy = src[1].fY - src[0].fY;
+ if (SkScalarNearlyZero(dy)) {
+ return SkScalarAve(src[0].fX, src[1].fX);
+ } else {
+ // need the extra precision so we don't compute a value that exceeds
+ // our original limits
+ double X0 = src[0].fX;
+ double Y0 = src[0].fY;
+ double X1 = src[1].fX;
+ double Y1 = src[1].fY;
+ double result = X0 + ((double)Y - Y0) * (X1 - X0) / (Y1 - Y0);
+
+ // The computed X value might still exceed [X0..X1] due to quantum flux
+ // when the doubles were added and subtracted, so we have to pin the
+ // answer :(
+ return (float)pin_unsorted(result, X0, X1);
+ }
+}
+
+// return Y coordinate of intersection with vertical line at X
+static SkScalar sect_with_vertical(const SkPoint src[2], SkScalar X) {
+ SkScalar dx = src[1].fX - src[0].fX;
+ if (SkScalarNearlyZero(dx)) {
+ return SkScalarAve(src[0].fY, src[1].fY);
+ } else {
+ // need the extra precision so we don't compute a value that exceeds
+ // our original limits
+ double X0 = src[0].fX;
+ double Y0 = src[0].fY;
+ double X1 = src[1].fX;
+ double Y1 = src[1].fY;
+ double result = Y0 + ((double)X - X0) * (Y1 - Y0) / (X1 - X0);
+ return (float)result;
+ }
+}
+
+static SkScalar sect_clamp_with_vertical(const SkPoint src[2], SkScalar x) {
+ SkScalar y = sect_with_vertical(src, x);
+ // Our caller expects y to be between src[0].fY and src[1].fY (unsorted), but due to the
+ // numerics of floats/doubles, we might have computed a value slightly outside of that,
+ // so we have to manually clamp afterwards.
+ // See skbug.com/7491
+ return pin_unsorted(y, src[0].fY, src[1].fY);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline bool nestedLT(SkScalar a, SkScalar b, SkScalar dim) {
+ return a <= b && (a < b || dim > 0);
+}
+
+// returns true if outer contains inner, even if inner is empty.
+// note: outer.contains(inner) always returns false if inner is empty.
+static inline bool containsNoEmptyCheck(const SkRect& outer,
+ const SkRect& inner) {
+ return outer.fLeft <= inner.fLeft && outer.fTop <= inner.fTop &&
+ outer.fRight >= inner.fRight && outer.fBottom >= inner.fBottom;
+}
+
+bool SkLineClipper::IntersectLine(const SkPoint src[2], const SkRect& clip,
+ SkPoint dst[2]) {
+ SkRect bounds;
+
+ bounds.set(src[0], src[1]);
+ if (containsNoEmptyCheck(clip, bounds)) {
+ if (src != dst) {
+ memcpy(dst, src, 2 * sizeof(SkPoint));
+ }
+ return true;
+ }
+ // check for no overlap, and only permit coincident edges if the line
+ // and the edge are colinear
+ if (nestedLT(bounds.fRight, clip.fLeft, bounds.width()) ||
+ nestedLT(clip.fRight, bounds.fLeft, bounds.width()) ||
+ nestedLT(bounds.fBottom, clip.fTop, bounds.height()) ||
+ nestedLT(clip.fBottom, bounds.fTop, bounds.height())) {
+ return false;
+ }
+
+ int index0, index1;
+
+ if (src[0].fY < src[1].fY) {
+ index0 = 0;
+ index1 = 1;
+ } else {
+ index0 = 1;
+ index1 = 0;
+ }
+
+ SkPoint tmp[2];
+ memcpy(tmp, src, sizeof(tmp));
+
+ // now compute Y intersections
+ if (tmp[index0].fY < clip.fTop) {
+ tmp[index0].set(sect_with_horizontal(src, clip.fTop), clip.fTop);
+ }
+ if (tmp[index1].fY > clip.fBottom) {
+ tmp[index1].set(sect_with_horizontal(src, clip.fBottom), clip.fBottom);
+ }
+
+ if (tmp[0].fX < tmp[1].fX) {
+ index0 = 0;
+ index1 = 1;
+ } else {
+ index0 = 1;
+ index1 = 0;
+ }
+
+ // check for quick-reject in X again, now that we may have been chopped
+ if ((tmp[index1].fX <= clip.fLeft || tmp[index0].fX >= clip.fRight)) {
+ // usually we will return false, but we don't if the line is vertical and coincident
+ // with the clip.
+ if (tmp[0].fX != tmp[1].fX || tmp[0].fX < clip.fLeft || tmp[0].fX > clip.fRight) {
+ return false;
+ }
+ }
+
+ if (tmp[index0].fX < clip.fLeft) {
+ tmp[index0].set(clip.fLeft, sect_with_vertical(tmp, clip.fLeft));
+ }
+ if (tmp[index1].fX > clip.fRight) {
+ tmp[index1].set(clip.fRight, sect_with_vertical(tmp, clip.fRight));
+ }
+#ifdef SK_DEBUG
+ bounds.set(tmp[0], tmp[1]);
+ SkASSERT(containsNoEmptyCheck(clip, bounds));
+#endif
+ memcpy(dst, tmp, sizeof(tmp));
+ return true;
+}
+
+#ifdef SK_DEBUG
+// return value between the two limits, where the limits are either ascending
+// or descending.
+static bool is_between_unsorted(SkScalar value,
+ SkScalar limit0, SkScalar limit1) {
+ if (limit0 < limit1) {
+ return limit0 <= value && value <= limit1;
+ } else {
+ return limit1 <= value && value <= limit0;
+ }
+}
+#endif
+
+int SkLineClipper::ClipLine(const SkPoint pts[2], const SkRect& clip, SkPoint lines[kMaxPoints],
+ bool canCullToTheRight) {
+ int index0, index1;
+
+ if (pts[0].fY < pts[1].fY) {
+ index0 = 0;
+ index1 = 1;
+ } else {
+ index0 = 1;
+ index1 = 0;
+ }
+
+ // Check if we're completely clipped out in Y (above or below
+
+ if (pts[index1].fY <= clip.fTop) { // we're above the clip
+ return 0;
+ }
+ if (pts[index0].fY >= clip.fBottom) { // we're below the clip
+ return 0;
+ }
+
+ // Chop in Y to produce a single segment, stored in tmp[0..1]
+
+ SkPoint tmp[2];
+ memcpy(tmp, pts, sizeof(tmp));
+
+ // now compute intersections
+ if (pts[index0].fY < clip.fTop) {
+ tmp[index0].set(sect_with_horizontal(pts, clip.fTop), clip.fTop);
+ SkASSERT(is_between_unsorted(tmp[index0].fX, pts[0].fX, pts[1].fX));
+ }
+ if (tmp[index1].fY > clip.fBottom) {
+ tmp[index1].set(sect_with_horizontal(pts, clip.fBottom), clip.fBottom);
+ SkASSERT(is_between_unsorted(tmp[index1].fX, pts[0].fX, pts[1].fX));
+ }
+
+ // Chop it into 1..3 segments that are wholly within the clip in X.
+
+ // temp storage for up to 3 segments
+ SkPoint resultStorage[kMaxPoints];
+ SkPoint* result; // points to our results, either tmp or resultStorage
+ int lineCount = 1;
+ bool reverse;
+
+ if (pts[0].fX < pts[1].fX) {
+ index0 = 0;
+ index1 = 1;
+ reverse = false;
+ } else {
+ index0 = 1;
+ index1 = 0;
+ reverse = true;
+ }
+
+ if (tmp[index1].fX <= clip.fLeft) { // wholly to the left
+ tmp[0].fX = tmp[1].fX = clip.fLeft;
+ result = tmp;
+ reverse = false;
+ } else if (tmp[index0].fX >= clip.fRight) { // wholly to the right
+ if (canCullToTheRight) {
+ return 0;
+ }
+ tmp[0].fX = tmp[1].fX = clip.fRight;
+ result = tmp;
+ reverse = false;
+ } else {
+ result = resultStorage;
+ SkPoint* r = result;
+
+ if (tmp[index0].fX < clip.fLeft) {
+ r->set(clip.fLeft, tmp[index0].fY);
+ r += 1;
+ r->set(clip.fLeft, sect_clamp_with_vertical(tmp, clip.fLeft));
+ SkASSERT(is_between_unsorted(r->fY, tmp[0].fY, tmp[1].fY));
+ } else {
+ *r = tmp[index0];
+ }
+ r += 1;
+
+ if (tmp[index1].fX > clip.fRight) {
+ r->set(clip.fRight, sect_clamp_with_vertical(tmp, clip.fRight));
+ SkASSERT(is_between_unsorted(r->fY, tmp[0].fY, tmp[1].fY));
+ r += 1;
+ r->set(clip.fRight, tmp[index1].fY);
+ } else {
+ *r = tmp[index1];
+ }
+
+ lineCount = SkToInt(r - result);
+ }
+
+ // Now copy the results into the caller's lines[] parameter
+ if (reverse) {
+ // copy the pts in reverse order to maintain winding order
+ for (int i = 0; i <= lineCount; i++) {
+ lines[lineCount - i] = result[i];
+ }
+ } else {
+ memcpy(lines, result, (lineCount + 1) * sizeof(SkPoint));
+ }
+ return lineCount;
+}
diff --git a/gfx/skia/skia/src/core/SkLineClipper.h b/gfx/skia/skia/src/core/SkLineClipper.h
new file mode 100644
index 0000000000..65460147c7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLineClipper.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkLineClipper_DEFINED
+#define SkLineClipper_DEFINED
+
+struct SkPoint;
+struct SkRect;
+
+class SkLineClipper {
+public:
+ enum {
+ kMaxPoints = 4,
+ kMaxClippedLineSegments = kMaxPoints - 1
+ };
+
+ /* Clip the line pts[0]...pts[1] against clip, ignoring segments that
+ lie completely above or below the clip. For portions to the left or
+ right, turn those into vertical line segments that are aligned to the
+ edge of the clip.
+
+ Return the number of line segments that result, and store the end-points
+ of those segments sequentially in lines as follows:
+ 1st segment: lines[0]..lines[1]
+ 2nd segment: lines[1]..lines[2]
+ 3rd segment: lines[2]..lines[3]
+ */
+ static int ClipLine(const SkPoint pts[2], const SkRect& clip,
+ SkPoint lines[kMaxPoints], bool canCullToTheRight);
+
+ /* Intersect the line segment against the rect. If there is a non-empty
+ resulting segment, return true and set dst[] to that segment. If not,
+ return false and ignore dst[].
+
+ ClipLine is specialized for scan-conversion, as it adds vertical
+ segments on the sides to show where the line extended beyond the
+ left or right sides. IntersectLine does not.
+ */
+ static bool IntersectLine(const SkPoint src[2], const SkRect& clip, SkPoint dst[2]);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkLocalMatrixImageFilter.cpp b/gfx/skia/skia/src/core/SkLocalMatrixImageFilter.cpp
new file mode 100644
index 0000000000..34be583a98
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLocalMatrixImageFilter.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkLocalMatrixImageFilter.h"
+
+#include "include/core/SkString.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkWriteBuffer.h"
+
+sk_sp<SkImageFilter> SkLocalMatrixImageFilter::Make(const SkMatrix& localM,
+ sk_sp<SkImageFilter> input) {
+ if (!input) {
+ return nullptr;
+ }
+ if (localM.isIdentity()) {
+ return input;
+ }
+ MatrixCapability inputCapability = as_IFB(input)->getCTMCapability();
+ if ((inputCapability == MatrixCapability::kTranslate && !localM.isTranslate()) ||
+ (inputCapability == MatrixCapability::kScaleTranslate && !localM.isScaleTranslate())) {
+ // Nothing we can do at this point
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkLocalMatrixImageFilter(localM, input));
+}
+
+SkLocalMatrixImageFilter::SkLocalMatrixImageFilter(const SkMatrix& localM,
+ sk_sp<SkImageFilter> input)
+ : INHERITED(&input, 1, nullptr)
+ , fLocalM(localM) {
+}
+
+sk_sp<SkFlattenable> SkLocalMatrixImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkMatrix lm;
+ buffer.readMatrix(&lm);
+ return SkLocalMatrixImageFilter::Make(lm, common.getInput(0));
+}
+
+void SkLocalMatrixImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeMatrix(fLocalM);
+}
+
+sk_sp<SkSpecialImage> SkLocalMatrixImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ skif::Mapping newMapping = ctx.mapping();
+ newMapping.concatLocal(fLocalM);
+ Context localCtx = ctx.withNewMapping(newMapping);
+ return this->filterInput(0, localCtx, offset);
+}
+
+SkIRect SkLocalMatrixImageFilter::onFilterBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection dir, const SkIRect* inputRect) const {
+ return this->getInput(0)->filterBounds(src, SkMatrix::Concat(ctm, fLocalM), dir, inputRect);
+}
+
+SkRect SkLocalMatrixImageFilter::computeFastBounds(const SkRect& bounds) const {
+ // In order to match the behavior of onFilterBounds, we map 'bounds' by the inverse of our
+ // local matrix, pass that to our child, and then map the result by our local matrix.
+ SkMatrix localInv;
+ if (!fLocalM.invert(&localInv)) {
+ return this->getInput(0)->computeFastBounds(bounds);
+ }
+
+ SkRect localBounds = localInv.mapRect(bounds);
+ return fLocalM.mapRect(this->getInput(0)->computeFastBounds(localBounds));
+}
diff --git a/gfx/skia/skia/src/core/SkLocalMatrixImageFilter.h b/gfx/skia/skia/src/core/SkLocalMatrixImageFilter.h
new file mode 100644
index 0000000000..d12666a6f4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkLocalMatrixImageFilter.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLocalMatrixImageFilter_DEFINED
+#define SkLocalMatrixImageFilter_DEFINED
+
+#include "include/core/SkFlattenable.h"
+#include "src/core/SkImageFilter_Base.h"
+
+/**
+ * Wraps another imagefilter + matrix, such that using this filter will give the same result
+ * as using the wrapped filter with the matrix applied to its context.
+ */
+class SkLocalMatrixImageFilter : public SkImageFilter_Base {
+public:
+ static sk_sp<SkImageFilter> Make(const SkMatrix& localM, sk_sp<SkImageFilter> input);
+
+ SkRect computeFastBounds(const SkRect&) const override;
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+ SkIRect onFilterBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+
+ MatrixCapability onGetCTMCapability() const override { return MatrixCapability::kComplex; }
+
+private:
+ SK_FLATTENABLE_HOOKS(SkLocalMatrixImageFilter)
+
+ SkLocalMatrixImageFilter(const SkMatrix& localM, sk_sp<SkImageFilter> input);
+
+ SkMatrix fLocalM;
+
+ using INHERITED = SkImageFilter_Base;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkM44.cpp b/gfx/skia/skia/src/core/SkM44.cpp
new file mode 100644
index 0000000000..747372cfd8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkM44.cpp
@@ -0,0 +1,356 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkM44.h"
+#include "include/core/SkMatrix.h"
+#include "src/base/SkVx.h"
+
+#include "src/core/SkMatrixInvert.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkPathPriv.h"
+
+bool SkM44::operator==(const SkM44& other) const {
+ if (this == &other) {
+ return true;
+ }
+
+ auto a0 = skvx::float4::Load(fMat + 0);
+ auto a1 = skvx::float4::Load(fMat + 4);
+ auto a2 = skvx::float4::Load(fMat + 8);
+ auto a3 = skvx::float4::Load(fMat + 12);
+
+ auto b0 = skvx::float4::Load(other.fMat + 0);
+ auto b1 = skvx::float4::Load(other.fMat + 4);
+ auto b2 = skvx::float4::Load(other.fMat + 8);
+ auto b3 = skvx::float4::Load(other.fMat + 12);
+
+ auto eq = (a0 == b0) & (a1 == b1) & (a2 == b2) & (a3 == b3);
+ return (eq[0] & eq[1] & eq[2] & eq[3]) == ~0;
+}
+
+static void transpose_arrays(SkScalar dst[], const SkScalar src[]) {
+ dst[0] = src[0]; dst[1] = src[4]; dst[2] = src[8]; dst[3] = src[12];
+ dst[4] = src[1]; dst[5] = src[5]; dst[6] = src[9]; dst[7] = src[13];
+ dst[8] = src[2]; dst[9] = src[6]; dst[10] = src[10]; dst[11] = src[14];
+ dst[12] = src[3]; dst[13] = src[7]; dst[14] = src[11]; dst[15] = src[15];
+}
+
+void SkM44::getRowMajor(SkScalar v[]) const {
+ transpose_arrays(v, fMat);
+}
+
+SkM44& SkM44::setConcat(const SkM44& a, const SkM44& b) {
+ auto c0 = skvx::float4::Load(a.fMat + 0);
+ auto c1 = skvx::float4::Load(a.fMat + 4);
+ auto c2 = skvx::float4::Load(a.fMat + 8);
+ auto c3 = skvx::float4::Load(a.fMat + 12);
+
+ auto compute = [&](skvx::float4 r) {
+ return c0*r[0] + (c1*r[1] + (c2*r[2] + c3*r[3]));
+ };
+
+ auto m0 = compute(skvx::float4::Load(b.fMat + 0));
+ auto m1 = compute(skvx::float4::Load(b.fMat + 4));
+ auto m2 = compute(skvx::float4::Load(b.fMat + 8));
+ auto m3 = compute(skvx::float4::Load(b.fMat + 12));
+
+ m0.store(fMat + 0);
+ m1.store(fMat + 4);
+ m2.store(fMat + 8);
+ m3.store(fMat + 12);
+ return *this;
+}
+
+SkM44& SkM44::preConcat(const SkMatrix& b) {
+ auto c0 = skvx::float4::Load(fMat + 0);
+ auto c1 = skvx::float4::Load(fMat + 4);
+ auto c3 = skvx::float4::Load(fMat + 12);
+
+ auto compute = [&](float r0, float r1, float r3) {
+ return (c0*r0 + (c1*r1 + c3*r3));
+ };
+
+ auto m0 = compute(b[0], b[3], b[6]);
+ auto m1 = compute(b[1], b[4], b[7]);
+ auto m3 = compute(b[2], b[5], b[8]);
+
+ m0.store(fMat + 0);
+ m1.store(fMat + 4);
+ m3.store(fMat + 12);
+ return *this;
+}
+
+SkM44& SkM44::preTranslate(SkScalar x, SkScalar y, SkScalar z) {
+ auto c0 = skvx::float4::Load(fMat + 0);
+ auto c1 = skvx::float4::Load(fMat + 4);
+ auto c2 = skvx::float4::Load(fMat + 8);
+ auto c3 = skvx::float4::Load(fMat + 12);
+
+ // only need to update the last column
+ (c0*x + (c1*y + (c2*z + c3))).store(fMat + 12);
+ return *this;
+}
+
+SkM44& SkM44::postTranslate(SkScalar x, SkScalar y, SkScalar z) {
+ skvx::float4 t = { x, y, z, 0 };
+ (t * fMat[ 3] + skvx::float4::Load(fMat + 0)).store(fMat + 0);
+ (t * fMat[ 7] + skvx::float4::Load(fMat + 4)).store(fMat + 4);
+ (t * fMat[11] + skvx::float4::Load(fMat + 8)).store(fMat + 8);
+ (t * fMat[15] + skvx::float4::Load(fMat + 12)).store(fMat + 12);
+ return *this;
+}
+
+SkM44& SkM44::preScale(SkScalar x, SkScalar y) {
+ auto c0 = skvx::float4::Load(fMat + 0);
+ auto c1 = skvx::float4::Load(fMat + 4);
+
+ (c0 * x).store(fMat + 0);
+ (c1 * y).store(fMat + 4);
+ return *this;
+}
+
+SkM44& SkM44::preScale(SkScalar x, SkScalar y, SkScalar z) {
+ auto c0 = skvx::float4::Load(fMat + 0);
+ auto c1 = skvx::float4::Load(fMat + 4);
+ auto c2 = skvx::float4::Load(fMat + 8);
+
+ (c0 * x).store(fMat + 0);
+ (c1 * y).store(fMat + 4);
+ (c2 * z).store(fMat + 8);
+ return *this;
+}
+
+SkV4 SkM44::map(float x, float y, float z, float w) const {
+ auto c0 = skvx::float4::Load(fMat + 0);
+ auto c1 = skvx::float4::Load(fMat + 4);
+ auto c2 = skvx::float4::Load(fMat + 8);
+ auto c3 = skvx::float4::Load(fMat + 12);
+
+ SkV4 v;
+ (c0*x + (c1*y + (c2*z + c3*w))).store(&v.x);
+ return v;
+}
+
+static SkRect map_rect_affine(const SkRect& src, const float mat[16]) {
+ // When multiplied against vectors of the form <x,y,x,y>, 'flip' allows a single min()
+ // to compute both the min and "negated" max between the xy coordinates. Once finished, another
+ // multiplication produces the original max.
+ const skvx::float4 flip{1.f, 1.f, -1.f, -1.f};
+
+ // Since z = 0 and it's assumed ther's no perspective, only load the upper 2x2 and (tx,ty) in c3
+ auto c0 = skvx::shuffle<0,1,0,1>(skvx::float2::Load(mat + 0)) * flip;
+ auto c1 = skvx::shuffle<0,1,0,1>(skvx::float2::Load(mat + 4)) * flip;
+ auto c3 = skvx::shuffle<0,1,0,1>(skvx::float2::Load(mat + 12));
+
+ // Compute the min and max of the four transformed corners pre-translation; then translate once
+ // at the end.
+ auto minMax = c3 + flip * min(min(c0 * src.fLeft + c1 * src.fTop,
+ c0 * src.fRight + c1 * src.fTop),
+ min(c0 * src.fLeft + c1 * src.fBottom,
+ c0 * src.fRight + c1 * src.fBottom));
+
+ // minMax holds (min x, min y, max x, max y) so can be copied into an SkRect expecting l,t,r,b
+ SkRect r;
+ minMax.store(&r);
+ return r;
+}
+
+static SkRect map_rect_perspective(const SkRect& src, const float mat[16]) {
+ // Like map_rect_affine, z = 0 so we can skip the 3rd column, but we do need to compute w's
+ // for each corner of the src rect.
+ auto c0 = skvx::float4::Load(mat + 0);
+ auto c1 = skvx::float4::Load(mat + 4);
+ auto c3 = skvx::float4::Load(mat + 12);
+
+ // Unlike map_rect_affine, we do not defer the 4th column since we may need to homogeneous
+ // coordinates to clip against the w=0 plane
+ auto tl = c0 * src.fLeft + c1 * src.fTop + c3;
+ auto tr = c0 * src.fRight + c1 * src.fTop + c3;
+ auto bl = c0 * src.fLeft + c1 * src.fBottom + c3;
+ auto br = c0 * src.fRight + c1 * src.fBottom + c3;
+
+ // After clipping to w>0 and projecting to 2d, 'project' employs the same negation trick to
+ // compute min and max at the same time.
+ const skvx::float4 flip{1.f, 1.f, -1.f, -1.f};
+ auto project = [&flip](const skvx::float4& p0, const skvx::float4& p1, const skvx::float4& p2) {
+ float w0 = p0[3];
+ if (w0 >= SkPathPriv::kW0PlaneDistance) {
+ // Unclipped, just divide by w
+ return flip * skvx::shuffle<0,1,0,1>(p0) / w0;
+ } else {
+ auto clip = [&](const skvx::float4& p) {
+ float w = p[3];
+ if (w >= SkPathPriv::kW0PlaneDistance) {
+ float t = (SkPathPriv::kW0PlaneDistance - w0) / (w - w0);
+ auto c = (t * skvx::shuffle<0,1>(p) + (1.f - t) * skvx::shuffle<0,1>(p0)) /
+ SkPathPriv::kW0PlaneDistance;
+
+ return flip * skvx::shuffle<0,1,0,1>(c);
+ } else {
+ return skvx::float4(SK_ScalarInfinity);
+ }
+ };
+ // Clip both edges leaving p0, and return the min/max of the two clipped points
+ // (since clip returns infinity when both p0 and 2nd vertex have w<0, it'll
+ // automatically be ignored).
+ return min(clip(p1), clip(p2));
+ }
+ };
+
+ // Project all 4 corners, and pass in their adjacent vertices for clipping if it has w < 0,
+ // then accumulate the min and max xy's.
+ auto minMax = flip * min(min(project(tl, tr, bl), project(tr, br, tl)),
+ min(project(br, bl, tr), project(bl, tl, br)));
+
+ SkRect r;
+ minMax.store(&r);
+ return r;
+}
+
+SkRect SkMatrixPriv::MapRect(const SkM44& m, const SkRect& src) {
+ const bool hasPerspective =
+ m.fMat[3] != 0 || m.fMat[7] != 0 || m.fMat[11] != 0 || m.fMat[15] != 1;
+ if (hasPerspective) {
+ return map_rect_perspective(src, m.fMat);
+ } else {
+ return map_rect_affine(src, m.fMat);
+ }
+}
+
+void SkM44::normalizePerspective() {
+ // If the bottom row of the matrix is [0, 0, 0, not_one], we will treat the matrix as if it
+ // is in perspective, even though it stills behaves like its affine. If we divide everything
+ // by the not_one value, then it will behave the same, but will be treated as affine,
+ // and therefore faster (e.g. clients can forward-difference calculations).
+ if (fMat[15] != 1 && fMat[15] != 0 && fMat[3] == 0 && fMat[7] == 0 && fMat[11] == 0) {
+ double inv = 1.0 / fMat[15];
+ (skvx::float4::Load(fMat + 0) * inv).store(fMat + 0);
+ (skvx::float4::Load(fMat + 4) * inv).store(fMat + 4);
+ (skvx::float4::Load(fMat + 8) * inv).store(fMat + 8);
+ (skvx::float4::Load(fMat + 12) * inv).store(fMat + 12);
+ fMat[15] = 1.0f;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** We always perform the calculation in doubles, to avoid prematurely losing
+ precision along the way. This relies on the compiler automatically
+ promoting our SkScalar values to double (if needed).
+ */
+bool SkM44::invert(SkM44* inverse) const {
+ SkScalar tmp[16];
+ if (SkInvert4x4Matrix(fMat, tmp) == 0.0f) {
+ return false;
+ }
+ memcpy(inverse->fMat, tmp, sizeof(tmp));
+ return true;
+}
+
+SkM44 SkM44::transpose() const {
+ SkM44 trans(SkM44::kUninitialized_Constructor);
+ transpose_arrays(trans.fMat, fMat);
+ return trans;
+}
+
+SkM44& SkM44::setRotateUnitSinCos(SkV3 axis, SkScalar sinAngle, SkScalar cosAngle) {
+ // Taken from "Essential Mathematics for Games and Interactive Applications"
+ // James M. Van Verth and Lars M. Bishop -- third edition
+ SkScalar x = axis.x;
+ SkScalar y = axis.y;
+ SkScalar z = axis.z;
+ SkScalar c = cosAngle;
+ SkScalar s = sinAngle;
+ SkScalar t = 1 - c;
+
+ *this = { t*x*x + c, t*x*y - s*z, t*x*z + s*y, 0,
+ t*x*y + s*z, t*y*y + c, t*y*z - s*x, 0,
+ t*x*z - s*y, t*y*z + s*x, t*z*z + c, 0,
+ 0, 0, 0, 1 };
+ return *this;
+}
+
+SkM44& SkM44::setRotate(SkV3 axis, SkScalar radians) {
+ SkScalar len = axis.length();
+ if (len > 0 && SkScalarIsFinite(len)) {
+ this->setRotateUnit(axis * (SK_Scalar1 / len), radians);
+ } else {
+ this->setIdentity();
+ }
+ return *this;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkM44::dump() const {
+ SkDebugf("|%g %g %g %g|\n"
+ "|%g %g %g %g|\n"
+ "|%g %g %g %g|\n"
+ "|%g %g %g %g|\n",
+ fMat[0], fMat[4], fMat[8], fMat[12],
+ fMat[1], fMat[5], fMat[9], fMat[13],
+ fMat[2], fMat[6], fMat[10], fMat[14],
+ fMat[3], fMat[7], fMat[11], fMat[15]);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkM44 SkM44::RectToRect(const SkRect& src, const SkRect& dst) {
+ if (src.isEmpty()) {
+ return SkM44();
+ } else if (dst.isEmpty()) {
+ return SkM44::Scale(0.f, 0.f, 0.f);
+ }
+
+ float sx = dst.width() / src.width();
+ float sy = dst.height() / src.height();
+
+ float tx = dst.fLeft - sx * src.fLeft;
+ float ty = dst.fTop - sy * src.fTop;
+
+ return SkM44{sx, 0.f, 0.f, tx,
+ 0.f, sy, 0.f, ty,
+ 0.f, 0.f, 1.f, 0.f,
+ 0.f, 0.f, 0.f, 1.f};
+}
+
+static SkV3 normalize(SkV3 v) {
+ const auto vlen = v.length();
+
+ return SkScalarNearlyZero(vlen) ? v : v * (1.0f / vlen);
+}
+
+static SkV4 v4(SkV3 v, SkScalar w) { return {v.x, v.y, v.z, w}; }
+
+SkM44 SkM44::LookAt(const SkV3& eye, const SkV3& center, const SkV3& up) {
+ SkV3 f = normalize(center - eye);
+ SkV3 u = normalize(up);
+ SkV3 s = normalize(f.cross(u));
+
+ SkM44 m(SkM44::kUninitialized_Constructor);
+ if (!SkM44::Cols(v4(s, 0), v4(s.cross(f), 0), v4(-f, 0), v4(eye, 1)).invert(&m)) {
+ m.setIdentity();
+ }
+ return m;
+}
+
+SkM44 SkM44::Perspective(float near, float far, float angle) {
+ SkASSERT(far > near);
+
+ float denomInv = sk_ieee_float_divide(1, far - near);
+ float halfAngle = angle * 0.5f;
+ SkASSERT(halfAngle != 0);
+ float cot = sk_ieee_float_divide(1, sk_float_tan(halfAngle));
+
+ SkM44 m;
+ m.setRC(0, 0, cot);
+ m.setRC(1, 1, cot);
+ m.setRC(2, 2, (far + near) * denomInv);
+ m.setRC(2, 3, 2 * far * near * denomInv);
+ m.setRC(3, 2, -1);
+ return m;
+}
diff --git a/gfx/skia/skia/src/core/SkMD5.cpp b/gfx/skia/skia/src/core/SkMD5.cpp
new file mode 100644
index 0000000000..43dc0db261
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMD5.cpp
@@ -0,0 +1,261 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * The following code is based on the description in RFC 1321.
+ * http://www.ietf.org/rfc/rfc1321.txt
+ */
+
+//The following macros can be defined to affect the MD5 code generated.
+//SK_MD5_CLEAR_DATA causes all intermediate state to be overwritten with 0's.
+//SK_CPU_LENDIAN allows 32 bit <=> 8 bit conversions without copies (if alligned).
+//SK_CPU_FAST_UNALIGNED_ACCESS allows 32 bit <=> 8 bit conversions without copies if SK_CPU_LENDIAN.
+
+#include "src/core/SkMD5.h"
+
+#include "include/private/base/SkFeatures.h"
+
+/** MD5 basic transformation. Transforms state based on block. */
+static void transform(uint32_t state[4], const uint8_t block[64]);
+
+/** Encodes input into output (4 little endian 32 bit values). */
+static void encode(uint8_t output[16], const uint32_t input[4]);
+
+/** Encodes input into output (little endian 64 bit value). */
+static void encode(uint8_t output[8], const uint64_t input);
+
+/** Decodes input (4 little endian 32 bit values) into storage, if required. */
+static const uint32_t* decode(uint32_t storage[16], const uint8_t input[64]);
+
+SkMD5::SkMD5() : byteCount(0) {
+ // These are magic numbers from the specification.
+ this->state[0] = 0x67452301;
+ this->state[1] = 0xefcdab89;
+ this->state[2] = 0x98badcfe;
+ this->state[3] = 0x10325476;
+}
+
+bool SkMD5::write(const void* buf, size_t inputLength) {
+ const uint8_t* input = reinterpret_cast<const uint8_t*>(buf);
+ unsigned int bufferIndex = (unsigned int)(this->byteCount & 0x3F);
+ unsigned int bufferAvailable = 64 - bufferIndex;
+
+ unsigned int inputIndex;
+ if (inputLength >= bufferAvailable) {
+ if (bufferIndex) {
+ memcpy(&this->buffer[bufferIndex], input, bufferAvailable);
+ transform(this->state, this->buffer);
+ inputIndex = bufferAvailable;
+ } else {
+ inputIndex = 0;
+ }
+
+ for (; inputIndex + 63 < inputLength; inputIndex += 64) {
+ transform(this->state, &input[inputIndex]);
+ }
+
+ bufferIndex = 0;
+ } else {
+ inputIndex = 0;
+ }
+
+ memcpy(&this->buffer[bufferIndex], &input[inputIndex], inputLength - inputIndex);
+
+ this->byteCount += inputLength;
+ return true;
+}
+
+SkMD5::Digest SkMD5::finish() {
+ SkMD5::Digest digest;
+ // Get the number of bits before padding.
+ uint8_t bits[8];
+ encode(bits, this->byteCount << 3);
+
+ // Pad out to 56 mod 64.
+ unsigned int bufferIndex = (unsigned int)(this->byteCount & 0x3F);
+ unsigned int paddingLength = (bufferIndex < 56) ? (56 - bufferIndex) : (120 - bufferIndex);
+ static const uint8_t PADDING[64] = {
+ 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ };
+ (void)this->write(PADDING, paddingLength);
+
+ // Append length (length before padding, will cause final update).
+ (void)this->write(bits, 8);
+
+ // Write out digest.
+ encode(digest.data, this->state);
+
+#if defined(SK_MD5_CLEAR_DATA)
+ // Clear state.
+ memset(this, 0, sizeof(*this));
+#endif
+ return digest;
+}
+
+struct F { uint32_t operator()(uint32_t x, uint32_t y, uint32_t z) {
+ //return (x & y) | ((~x) & z);
+ return ((y ^ z) & x) ^ z; //equivelent but faster
+}};
+
+struct G { uint32_t operator()(uint32_t x, uint32_t y, uint32_t z) {
+ return (x & z) | (y & (~z));
+ //return ((x ^ y) & z) ^ y; //equivelent but slower
+}};
+
+struct H { uint32_t operator()(uint32_t x, uint32_t y, uint32_t z) {
+ return x ^ y ^ z;
+}};
+
+struct I { uint32_t operator()(uint32_t x, uint32_t y, uint32_t z) {
+ return y ^ (x | (~z));
+}};
+
+/** Rotates x left n bits. */
+static inline uint32_t rotate_left(uint32_t x, uint8_t n) {
+ return (x << n) | (x >> (32 - n));
+}
+
+template <typename T>
+static inline void operation(T operation, uint32_t& a, uint32_t b, uint32_t c, uint32_t d,
+ uint32_t x, uint8_t s, uint32_t t) {
+ a = b + rotate_left(a + operation(b, c, d) + x + t, s);
+}
+
+static void transform(uint32_t state[4], const uint8_t block[64]) {
+ uint32_t a = state[0], b = state[1], c = state[2], d = state[3];
+
+ uint32_t storage[16];
+ const uint32_t* X = decode(storage, block);
+
+ // Round 1
+ operation(F(), a, b, c, d, X[ 0], 7, 0xd76aa478); // 1
+ operation(F(), d, a, b, c, X[ 1], 12, 0xe8c7b756); // 2
+ operation(F(), c, d, a, b, X[ 2], 17, 0x242070db); // 3
+ operation(F(), b, c, d, a, X[ 3], 22, 0xc1bdceee); // 4
+ operation(F(), a, b, c, d, X[ 4], 7, 0xf57c0faf); // 5
+ operation(F(), d, a, b, c, X[ 5], 12, 0x4787c62a); // 6
+ operation(F(), c, d, a, b, X[ 6], 17, 0xa8304613); // 7
+ operation(F(), b, c, d, a, X[ 7], 22, 0xfd469501); // 8
+ operation(F(), a, b, c, d, X[ 8], 7, 0x698098d8); // 9
+ operation(F(), d, a, b, c, X[ 9], 12, 0x8b44f7af); // 10
+ operation(F(), c, d, a, b, X[10], 17, 0xffff5bb1); // 11
+ operation(F(), b, c, d, a, X[11], 22, 0x895cd7be); // 12
+ operation(F(), a, b, c, d, X[12], 7, 0x6b901122); // 13
+ operation(F(), d, a, b, c, X[13], 12, 0xfd987193); // 14
+ operation(F(), c, d, a, b, X[14], 17, 0xa679438e); // 15
+ operation(F(), b, c, d, a, X[15], 22, 0x49b40821); // 16
+
+ // Round 2
+ operation(G(), a, b, c, d, X[ 1], 5, 0xf61e2562); // 17
+ operation(G(), d, a, b, c, X[ 6], 9, 0xc040b340); // 18
+ operation(G(), c, d, a, b, X[11], 14, 0x265e5a51); // 19
+ operation(G(), b, c, d, a, X[ 0], 20, 0xe9b6c7aa); // 20
+ operation(G(), a, b, c, d, X[ 5], 5, 0xd62f105d); // 21
+ operation(G(), d, a, b, c, X[10], 9, 0x2441453); // 22
+ operation(G(), c, d, a, b, X[15], 14, 0xd8a1e681); // 23
+ operation(G(), b, c, d, a, X[ 4], 20, 0xe7d3fbc8); // 24
+ operation(G(), a, b, c, d, X[ 9], 5, 0x21e1cde6); // 25
+ operation(G(), d, a, b, c, X[14], 9, 0xc33707d6); // 26
+ operation(G(), c, d, a, b, X[ 3], 14, 0xf4d50d87); // 27
+ operation(G(), b, c, d, a, X[ 8], 20, 0x455a14ed); // 28
+ operation(G(), a, b, c, d, X[13], 5, 0xa9e3e905); // 29
+ operation(G(), d, a, b, c, X[ 2], 9, 0xfcefa3f8); // 30
+ operation(G(), c, d, a, b, X[ 7], 14, 0x676f02d9); // 31
+ operation(G(), b, c, d, a, X[12], 20, 0x8d2a4c8a); // 32
+
+ // Round 3
+ operation(H(), a, b, c, d, X[ 5], 4, 0xfffa3942); // 33
+ operation(H(), d, a, b, c, X[ 8], 11, 0x8771f681); // 34
+ operation(H(), c, d, a, b, X[11], 16, 0x6d9d6122); // 35
+ operation(H(), b, c, d, a, X[14], 23, 0xfde5380c); // 36
+ operation(H(), a, b, c, d, X[ 1], 4, 0xa4beea44); // 37
+ operation(H(), d, a, b, c, X[ 4], 11, 0x4bdecfa9); // 38
+ operation(H(), c, d, a, b, X[ 7], 16, 0xf6bb4b60); // 39
+ operation(H(), b, c, d, a, X[10], 23, 0xbebfbc70); // 40
+ operation(H(), a, b, c, d, X[13], 4, 0x289b7ec6); // 41
+ operation(H(), d, a, b, c, X[ 0], 11, 0xeaa127fa); // 42
+ operation(H(), c, d, a, b, X[ 3], 16, 0xd4ef3085); // 43
+ operation(H(), b, c, d, a, X[ 6], 23, 0x4881d05); // 44
+ operation(H(), a, b, c, d, X[ 9], 4, 0xd9d4d039); // 45
+ operation(H(), d, a, b, c, X[12], 11, 0xe6db99e5); // 46
+ operation(H(), c, d, a, b, X[15], 16, 0x1fa27cf8); // 47
+ operation(H(), b, c, d, a, X[ 2], 23, 0xc4ac5665); // 48
+
+ // Round 4
+ operation(I(), a, b, c, d, X[ 0], 6, 0xf4292244); // 49
+ operation(I(), d, a, b, c, X[ 7], 10, 0x432aff97); // 50
+ operation(I(), c, d, a, b, X[14], 15, 0xab9423a7); // 51
+ operation(I(), b, c, d, a, X[ 5], 21, 0xfc93a039); // 52
+ operation(I(), a, b, c, d, X[12], 6, 0x655b59c3); // 53
+ operation(I(), d, a, b, c, X[ 3], 10, 0x8f0ccc92); // 54
+ operation(I(), c, d, a, b, X[10], 15, 0xffeff47d); // 55
+ operation(I(), b, c, d, a, X[ 1], 21, 0x85845dd1); // 56
+ operation(I(), a, b, c, d, X[ 8], 6, 0x6fa87e4f); // 57
+ operation(I(), d, a, b, c, X[15], 10, 0xfe2ce6e0); // 58
+ operation(I(), c, d, a, b, X[ 6], 15, 0xa3014314); // 59
+ operation(I(), b, c, d, a, X[13], 21, 0x4e0811a1); // 60
+ operation(I(), a, b, c, d, X[ 4], 6, 0xf7537e82); // 61
+ operation(I(), d, a, b, c, X[11], 10, 0xbd3af235); // 62
+ operation(I(), c, d, a, b, X[ 2], 15, 0x2ad7d2bb); // 63
+ operation(I(), b, c, d, a, X[ 9], 21, 0xeb86d391); // 64
+
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+
+#if defined(SK_MD5_CLEAR_DATA)
+ // Clear sensitive information.
+ if (X == &storage) {
+ memset(storage, 0, sizeof(storage));
+ }
+#endif
+}
+
+static void encode(uint8_t output[16], const uint32_t input[4]) {
+ for (size_t i = 0, j = 0; i < 4; i++, j += 4) {
+ output[j ] = (uint8_t) (input[i] & 0xff);
+ output[j+1] = (uint8_t)((input[i] >> 8) & 0xff);
+ output[j+2] = (uint8_t)((input[i] >> 16) & 0xff);
+ output[j+3] = (uint8_t)((input[i] >> 24) & 0xff);
+ }
+}
+
+static void encode(uint8_t output[8], const uint64_t input) {
+ output[0] = (uint8_t) (input & 0xff);
+ output[1] = (uint8_t)((input >> 8) & 0xff);
+ output[2] = (uint8_t)((input >> 16) & 0xff);
+ output[3] = (uint8_t)((input >> 24) & 0xff);
+ output[4] = (uint8_t)((input >> 32) & 0xff);
+ output[5] = (uint8_t)((input >> 40) & 0xff);
+ output[6] = (uint8_t)((input >> 48) & 0xff);
+ output[7] = (uint8_t)((input >> 56) & 0xff);
+}
+
+static inline bool is_aligned(const void *pointer, size_t byte_count) {
+ return reinterpret_cast<uintptr_t>(pointer) % byte_count == 0;
+}
+
+static const uint32_t* decode(uint32_t storage[16], const uint8_t input[64]) {
+#if defined(SK_CPU_LENDIAN) && defined(SK_CPU_FAST_UNALIGNED_ACCESS)
+ return reinterpret_cast<const uint32_t*>(input);
+#else
+#if defined(SK_CPU_LENDIAN)
+ if (is_aligned(input, 4)) {
+ return reinterpret_cast<const uint32_t*>(input);
+ }
+#endif
+ for (size_t i = 0, j = 0; j < 64; i++, j += 4) {
+ storage[i] = ((uint32_t)input[j ]) |
+ (((uint32_t)input[j+1]) << 8) |
+ (((uint32_t)input[j+2]) << 16) |
+ (((uint32_t)input[j+3]) << 24);
+ }
+ return storage;
+#endif
+}
diff --git a/gfx/skia/skia/src/core/SkMD5.h b/gfx/skia/skia/src/core/SkMD5.h
new file mode 100644
index 0000000000..10cacf188b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMD5.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMD5_DEFINED
+#define SkMD5_DEFINED
+
+#include "include/core/SkStream.h"
+#include "include/private/base/SkTo.h"
+
+#include <cstdint>
+#include <cstring>
+
+/* Calculate a 128-bit MD5 message-digest of the bytes sent to this stream. */
+class SkMD5 : public SkWStream {
+public:
+ SkMD5();
+
+ /** Processes input, adding it to the digest.
+ Calling this after finish is undefined. */
+ bool write(const void* buffer, size_t size) final;
+
+ size_t bytesWritten() const final { return SkToSizeT(this->byteCount); }
+
+ struct Digest {
+ uint8_t data[16];
+ bool operator ==(Digest const& other) const {
+ return 0 == memcmp(data, other.data, sizeof(data));
+ }
+ bool operator !=(Digest const& other) const { return !(*this == other); }
+ };
+
+ /** Computes and returns the digest. */
+ Digest finish();
+
+private:
+ uint64_t byteCount; // number of bytes, modulo 2^64
+ uint32_t state[4]; // state (ABCD)
+ uint8_t buffer[64]; // input buffer
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMallocPixelRef.cpp b/gfx/skia/skia/src/core/SkMallocPixelRef.cpp
new file mode 100644
index 0000000000..6ee86aba35
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMallocPixelRef.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkMallocPixelRef.h"
+
+#include "include/core/SkData.h"
+#include "include/core/SkImageInfo.h"
+#include "include/private/base/SkMalloc.h"
+
+static bool is_valid(const SkImageInfo& info) {
+ if (info.width() < 0 || info.height() < 0 ||
+ (unsigned)info.colorType() > (unsigned)kLastEnum_SkColorType ||
+ (unsigned)info.alphaType() > (unsigned)kLastEnum_SkAlphaType)
+ {
+ return false;
+ }
+ return true;
+}
+
+sk_sp<SkPixelRef> SkMallocPixelRef::MakeAllocate(const SkImageInfo& info, size_t rowBytes) {
+ if (rowBytes == 0) {
+ rowBytes = info.minRowBytes();
+ // rowBytes can still be zero, if it overflowed (width * bytesPerPixel > size_t)
+ // or if colortype is unknown
+ }
+ if (!is_valid(info) || !info.validRowBytes(rowBytes)) {
+ return nullptr;
+ }
+ size_t size = info.computeByteSize(rowBytes);
+ if (SkImageInfo::ByteSizeOverflowed(size)) {
+ return nullptr;
+ }
+#if defined(SK_BUILD_FOR_FUZZER)
+ if (size > 10000000) {
+ return nullptr;
+ }
+#endif
+ void* addr = sk_calloc_canfail(size);
+ if (nullptr == addr) {
+ return nullptr;
+ }
+
+ struct PixelRef final : public SkPixelRef {
+ PixelRef(int w, int h, void* s, size_t r) : SkPixelRef(w, h, s, r) {}
+ ~PixelRef() override { sk_free(this->pixels()); }
+ };
+ return sk_sp<SkPixelRef>(new PixelRef(info.width(), info.height(), addr, rowBytes));
+}
+
+sk_sp<SkPixelRef> SkMallocPixelRef::MakeWithData(const SkImageInfo& info,
+ size_t rowBytes,
+ sk_sp<SkData> data) {
+ SkASSERT(data != nullptr);
+ if (!is_valid(info)) {
+ return nullptr;
+ }
+ // TODO: what should we return if computeByteSize returns 0?
+ // - the info was empty?
+ // - we overflowed computing the size?
+ if ((rowBytes < info.minRowBytes()) || (data->size() < info.computeByteSize(rowBytes))) {
+ return nullptr;
+ }
+ struct PixelRef final : public SkPixelRef {
+ sk_sp<SkData> fData;
+ PixelRef(int w, int h, void* s, size_t r, sk_sp<SkData> d)
+ : SkPixelRef(w, h, s, r), fData(std::move(d)) {}
+ };
+ void* pixels = const_cast<void*>(data->data());
+ sk_sp<SkPixelRef> pr(new PixelRef(info.width(), info.height(), pixels, rowBytes,
+ std::move(data)));
+ pr->setImmutable(); // since we were created with (immutable) data
+ return pr;
+}
diff --git a/gfx/skia/skia/src/core/SkMask.cpp b/gfx/skia/skia/src/core/SkMask.cpp
new file mode 100644
index 0000000000..d072b2297f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMask.cpp
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkMask.h"
+
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkSafeMath.h"
+
+#include <climits>
+
+/** returns the product if it is positive and fits in 31 bits. Otherwise this
+ returns 0.
+ */
+static int32_t safeMul32(int32_t a, int32_t b) {
+ int64_t size = sk_64_mul(a, b);
+ if (size > 0 && SkTFitsIn<int32_t>(size)) {
+ return size;
+ }
+ return 0;
+}
+
+size_t SkMask::computeImageSize() const {
+ return safeMul32(fBounds.height(), fRowBytes);
+}
+
+size_t SkMask::computeTotalImageSize() const {
+ size_t size = this->computeImageSize();
+ if (fFormat == SkMask::k3D_Format) {
+ size = safeMul32(SkToS32(size), 3);
+ }
+ return size;
+}
+
+/** We explicitly use this allocator for SkBimap pixels, so that we can
+ freely assign memory allocated by one class to the other.
+*/
+uint8_t* SkMask::AllocImage(size_t size, AllocType at) {
+ size_t aligned_size = SkSafeMath::Align4(size);
+ unsigned flags = SK_MALLOC_THROW;
+ if (at == kZeroInit_Alloc) {
+ flags |= SK_MALLOC_ZERO_INITIALIZE;
+ }
+ return static_cast<uint8_t*>(sk_malloc_flags(aligned_size, flags));
+}
+
+/** We explicitly use this allocator for SkBimap pixels, so that we can
+ freely assign memory allocated by one class to the other.
+*/
+void SkMask::FreeImage(void* image) {
+ sk_free(image);
+}
+
+SkMask SkMask::PrepareDestination(int radiusX, int radiusY, const SkMask& src) {
+ SkSafeMath safe;
+
+ SkMask dst;
+ dst.fImage = nullptr;
+ dst.fFormat = SkMask::kA8_Format;
+
+ // dstW = srcW + 2 * radiusX;
+ size_t dstW = safe.add(src.fBounds.width(), safe.add(radiusX, radiusX));
+ // dstH = srcH + 2 * radiusY;
+ size_t dstH = safe.add(src.fBounds.height(), safe.add(radiusY, radiusY));
+
+ size_t toAlloc = safe.mul(dstW, dstH);
+
+ // We can only deal with masks that fit in INT_MAX and sides that fit in int.
+ if (!SkTFitsIn<int>(dstW) || !SkTFitsIn<int>(dstH) || toAlloc > INT_MAX || !safe) {
+ dst.fBounds.setEmpty();
+ dst.fRowBytes = 0;
+ return dst;
+ }
+
+ dst.fBounds.setWH(SkTo<int>(dstW), SkTo<int>(dstH));
+ dst.fBounds.offset(src.fBounds.x(), src.fBounds.y());
+ dst.fBounds.offset(-radiusX, -radiusY);
+ dst.fRowBytes = SkTo<uint32_t>(dstW);
+
+ if (src.fImage != nullptr) {
+ dst.fImage = SkMask::AllocImage(toAlloc);
+ }
+
+ return dst;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+static const int gMaskFormatToShift[] = {
+ ~0, // BW -- not supported
+ 0, // A8
+ 0, // 3D
+ 2, // ARGB32
+ 1, // LCD16
+ 0, // SDF
+};
+
+static int maskFormatToShift(SkMask::Format format) {
+ SkASSERT((unsigned)format < std::size(gMaskFormatToShift));
+ SkASSERT(SkMask::kBW_Format != format);
+ return gMaskFormatToShift[format];
+}
+
+void* SkMask::getAddr(int x, int y) const {
+ SkASSERT(kBW_Format != fFormat);
+ SkASSERT(fBounds.contains(x, y));
+ SkASSERT(fImage);
+
+ char* addr = (char*)fImage;
+ addr += (y - fBounds.fTop) * fRowBytes;
+ addr += (x - fBounds.fLeft) << maskFormatToShift(fFormat);
+ return addr;
+}
diff --git a/gfx/skia/skia/src/core/SkMask.h b/gfx/skia/skia/src/core/SkMask.h
new file mode 100644
index 0000000000..619dfcf76d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMask.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMask_DEFINED
+#define SkMask_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkMacros.h"
+#include "include/private/base/SkTemplates.h"
+
+#include <memory>
+
+/** \class SkMask
+ SkMask is used to describe alpha bitmaps, either 1bit, 8bit, or
+ the 3-channel 3D format. These are passed to SkMaskFilter objects.
+*/
+struct SkMask {
+ SkMask() : fImage(nullptr) {}
+
+ enum Format : uint8_t {
+ kBW_Format, //!< 1bit per pixel mask (e.g. monochrome)
+ kA8_Format, //!< 8bits per pixel mask (e.g. antialiasing)
+ k3D_Format, //!< 3 8bit per pixl planes: alpha, mul, add
+ kARGB32_Format, //!< SkPMColor
+ kLCD16_Format, //!< 565 alpha for r/g/b
+ kSDF_Format, //!< 8bits representing signed distance field
+ };
+
+ enum {
+ kCountMaskFormats = kSDF_Format + 1
+ };
+
+ uint8_t* fImage;
+ SkIRect fBounds;
+ uint32_t fRowBytes;
+ Format fFormat;
+
+ static bool IsValidFormat(uint8_t format) { return format < kCountMaskFormats; }
+
+ /** Returns true if the mask is empty: i.e. it has an empty bounds.
+ */
+ bool isEmpty() const { return fBounds.isEmpty(); }
+
+ /** Return the byte size of the mask, assuming only 1 plane.
+ Does not account for k3D_Format. For that, use computeTotalImageSize().
+ If there is an overflow of 32bits, then returns 0.
+ */
+ size_t computeImageSize() const;
+
+ /** Return the byte size of the mask, taking into account
+ any extra planes (e.g. k3D_Format).
+ If there is an overflow of 32bits, then returns 0.
+ */
+ size_t computeTotalImageSize() const;
+
+ /** Returns the address of the byte that holds the specified bit.
+ Asserts that the mask is kBW_Format, and that x,y are in range.
+ x,y are in the same coordiate space as fBounds.
+ */
+ uint8_t* getAddr1(int x, int y) const {
+ SkASSERT(kBW_Format == fFormat);
+ SkASSERT(fBounds.contains(x, y));
+ SkASSERT(fImage != nullptr);
+ return fImage + ((x - fBounds.fLeft) >> 3) + (y - fBounds.fTop) * fRowBytes;
+ }
+
+ /** Returns the address of the specified byte.
+ Asserts that the mask is kA8_Format, and that x,y are in range.
+ x,y are in the same coordiate space as fBounds.
+ */
+ uint8_t* getAddr8(int x, int y) const {
+ SkASSERT(kA8_Format == fFormat || kSDF_Format == fFormat);
+ SkASSERT(fBounds.contains(x, y));
+ SkASSERT(fImage != nullptr);
+ return fImage + x - fBounds.fLeft + (y - fBounds.fTop) * fRowBytes;
+ }
+
+ /**
+ * Return the address of the specified 16bit mask. In the debug build,
+ * this asserts that the mask's format is kLCD16_Format, and that (x,y)
+ * are contained in the mask's fBounds.
+ */
+ uint16_t* getAddrLCD16(int x, int y) const {
+ SkASSERT(kLCD16_Format == fFormat);
+ SkASSERT(fBounds.contains(x, y));
+ SkASSERT(fImage != nullptr);
+ uint16_t* row = (uint16_t*)(fImage + (y - fBounds.fTop) * fRowBytes);
+ return row + (x - fBounds.fLeft);
+ }
+
+ /**
+ * Return the address of the specified 32bit mask. In the debug build,
+ * this asserts that the mask's format is 32bits, and that (x,y)
+ * are contained in the mask's fBounds.
+ */
+ uint32_t* getAddr32(int x, int y) const {
+ SkASSERT(kARGB32_Format == fFormat);
+ SkASSERT(fBounds.contains(x, y));
+ SkASSERT(fImage != nullptr);
+ uint32_t* row = (uint32_t*)(fImage + (y - fBounds.fTop) * fRowBytes);
+ return row + (x - fBounds.fLeft);
+ }
+
+ /**
+ * Returns the address of the specified pixel, computing the pixel-size
+ * at runtime based on the mask format. This will be slightly slower than
+ * using one of the routines where the format is implied by the name
+ * e.g. getAddr8 or getAddr32.
+ *
+ * x,y must be contained by the mask's bounds (this is asserted in the
+ * debug build, but not checked in the release build.)
+ *
+ * This should not be called with kBW_Format, as it will give unspecified
+ * results (and assert in the debug build).
+ */
+ void* getAddr(int x, int y) const;
+
+ enum AllocType {
+ kUninit_Alloc,
+ kZeroInit_Alloc,
+ };
+ static uint8_t* AllocImage(size_t bytes, AllocType = kUninit_Alloc);
+ static void FreeImage(void* image);
+
+ enum CreateMode {
+ kJustComputeBounds_CreateMode, //!< compute bounds and return
+ kJustRenderImage_CreateMode, //!< render into preallocate mask
+ kComputeBoundsAndRenderImage_CreateMode //!< compute bounds, alloc image and render into it
+ };
+
+ /** Iterates over the coverage values along a scanline in a given SkMask::Format. Provides
+ * constructor, copy constructor for creating
+ * operator++, operator-- for iterating over the coverage values on a scanline
+ * operator>>= to add row bytes
+ * operator* to get the coverage value at the current location
+ * operator< to compare two iterators
+ */
+ template <Format F> struct AlphaIter;
+
+ /**
+ * Returns initial destination mask data padded by radiusX and radiusY
+ */
+ static SkMask PrepareDestination(int radiusX, int radiusY, const SkMask& src);
+};
+
+template <> struct SkMask::AlphaIter<SkMask::kBW_Format> {
+ AlphaIter(const uint8_t* ptr, int offset) : fPtr(ptr), fOffset(7 - offset) {}
+ AlphaIter(const AlphaIter& that) : fPtr(that.fPtr), fOffset(that.fOffset) {}
+ AlphaIter& operator++() {
+ if (0 < fOffset ) {
+ --fOffset;
+ } else {
+ ++fPtr;
+ fOffset = 7;
+ }
+ return *this;
+ }
+ AlphaIter& operator--() {
+ if (fOffset < 7) {
+ ++fOffset;
+ } else {
+ --fPtr;
+ fOffset = 0;
+ }
+ return *this;
+ }
+ AlphaIter& operator>>=(uint32_t rb) {
+ fPtr = SkTAddOffset<const uint8_t>(fPtr, rb);
+ return *this;
+ }
+ uint8_t operator*() const { return ((*fPtr) >> fOffset) & 1 ? 0xFF : 0; }
+ bool operator<(const AlphaIter& that) const {
+ return fPtr < that.fPtr || (fPtr == that.fPtr && fOffset > that.fOffset);
+ }
+ const uint8_t* fPtr;
+ int fOffset;
+};
+
+template <> struct SkMask::AlphaIter<SkMask::kA8_Format> {
+ AlphaIter(const uint8_t* ptr) : fPtr(ptr) {}
+ AlphaIter(const AlphaIter& that) : fPtr(that.fPtr) {}
+ AlphaIter& operator++() { ++fPtr; return *this; }
+ AlphaIter& operator--() { --fPtr; return *this; }
+ AlphaIter& operator>>=(uint32_t rb) {
+ fPtr = SkTAddOffset<const uint8_t>(fPtr, rb);
+ return *this;
+ }
+ uint8_t operator*() const { return *fPtr; }
+ bool operator<(const AlphaIter& that) const { return fPtr < that.fPtr; }
+ const uint8_t* fPtr;
+};
+
+template <> struct SkMask::AlphaIter<SkMask::kARGB32_Format> {
+ AlphaIter(const uint32_t* ptr) : fPtr(ptr) {}
+ AlphaIter(const AlphaIter& that) : fPtr(that.fPtr) {}
+ AlphaIter& operator++() { ++fPtr; return *this; }
+ AlphaIter& operator--() { --fPtr; return *this; }
+ AlphaIter& operator>>=(uint32_t rb) {
+ fPtr = SkTAddOffset<const uint32_t>(fPtr, rb);
+ return *this;
+ }
+ uint8_t operator*() const { return SkGetPackedA32(*fPtr); }
+ bool operator<(const AlphaIter& that) const { return fPtr < that.fPtr; }
+ const uint32_t* fPtr;
+};
+
+template <> struct SkMask::AlphaIter<SkMask::kLCD16_Format> {
+ AlphaIter(const uint16_t* ptr) : fPtr(ptr) {}
+ AlphaIter(const AlphaIter& that) : fPtr(that.fPtr) {}
+ AlphaIter& operator++() { ++fPtr; return *this; }
+ AlphaIter& operator--() { --fPtr; return *this; }
+ AlphaIter& operator>>=(uint32_t rb) {
+ fPtr = SkTAddOffset<const uint16_t>(fPtr, rb);
+ return *this;
+ }
+ uint8_t operator*() const {
+ unsigned packed = *fPtr;
+ unsigned r = SkPacked16ToR32(packed);
+ unsigned g = SkPacked16ToG32(packed);
+ unsigned b = SkPacked16ToB32(packed);
+ return (r + g + b) / 3;
+ }
+ bool operator<(const AlphaIter& that) const { return fPtr < that.fPtr; }
+ const uint16_t* fPtr;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * \using SkAutoMaskImage
+ *
+ * Stack class used to manage the fImage buffer in a SkMask.
+ * When this object loses scope, the buffer is freed with SkMask::FreeImage().
+ */
+using SkAutoMaskFreeImage =
+ std::unique_ptr<uint8_t, SkFunctionObject<SkMask::FreeImage>>;
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMaskBlurFilter.cpp b/gfx/skia/skia/src/core/SkMaskBlurFilter.cpp
new file mode 100644
index 0000000000..f159dbc571
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMaskBlurFilter.cpp
@@ -0,0 +1,1054 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkMaskBlurFilter.h"
+
+#include "include/core/SkColorPriv.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkTPin.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/base/SkVx.h"
+#include "src/core/SkGaussFilter.h"
+
+#include <cmath>
+#include <climits>
+
+namespace {
+static const double kPi = 3.14159265358979323846264338327950288;
+
+class PlanGauss final {
+public:
+ explicit PlanGauss(double sigma) {
+ auto possibleWindow = static_cast<int>(floor(sigma * 3 * sqrt(2 * kPi) / 4 + 0.5));
+ auto window = std::max(1, possibleWindow);
+
+ fPass0Size = window - 1;
+ fPass1Size = window - 1;
+ fPass2Size = (window & 1) == 1 ? window - 1 : window;
+
+ // Calculating the border is tricky. I will go through the odd case which is simpler, and
+ // then through the even case. Given a stack of filters seven wide for the odd case of
+ // three passes.
+ //
+ // S
+ // aaaAaaa
+ // bbbBbbb
+ // cccCccc
+ // D
+ //
+ // The furthest changed pixel is when the filters are in the following configuration.
+ //
+ // S
+ // aaaAaaa
+ // bbbBbbb
+ // cccCccc
+ // D
+ //
+ // The A pixel is calculated using the value S, the B uses A, and the C uses B, and
+ // finally D is C. So, with a window size of seven the border is nine. In general, the
+ // border is 3*((window - 1)/2).
+ //
+ // For even cases the filter stack is more complicated. The spec specifies two passes
+ // of even filters and a final pass of odd filters. A stack for a width of six looks like
+ // this.
+ //
+ // S
+ // aaaAaa
+ // bbBbbb
+ // cccCccc
+ // D
+ //
+ // The furthest pixel looks like this.
+ //
+ // S
+ // aaaAaa
+ // bbBbbb
+ // cccCccc
+ // D
+ //
+ // For a window of size, the border value is seven. In general the border is 3 *
+ // (window/2) -1.
+ fBorder = (window & 1) == 1 ? 3 * ((window - 1) / 2) : 3 * (window / 2) - 1;
+ fSlidingWindow = 2 * fBorder + 1;
+
+ // If the window is odd then the divisor is just window ^ 3 otherwise,
+ // it is window * window * (window + 1) = window ^ 2 + window ^ 3;
+ auto window2 = window * window;
+ auto window3 = window2 * window;
+ auto divisor = (window & 1) == 1 ? window3 : window3 + window2;
+
+ fWeight = static_cast<uint64_t>(round(1.0 / divisor * (1ull << 32)));
+ }
+
+ size_t bufferSize() const { return fPass0Size + fPass1Size + fPass2Size; }
+
+ int border() const { return fBorder; }
+
+public:
+ class Scan {
+ public:
+ Scan(uint64_t weight, int noChangeCount,
+ uint32_t* buffer0, uint32_t* buffer0End,
+ uint32_t* buffer1, uint32_t* buffer1End,
+ uint32_t* buffer2, uint32_t* buffer2End)
+ : fWeight{weight}
+ , fNoChangeCount{noChangeCount}
+ , fBuffer0{buffer0}
+ , fBuffer0End{buffer0End}
+ , fBuffer1{buffer1}
+ , fBuffer1End{buffer1End}
+ , fBuffer2{buffer2}
+ , fBuffer2End{buffer2End}
+ { }
+
+ template <typename AlphaIter> void blur(const AlphaIter srcBegin, const AlphaIter srcEnd,
+ uint8_t* dst, int dstStride, uint8_t* dstEnd) const {
+ auto buffer0Cursor = fBuffer0;
+ auto buffer1Cursor = fBuffer1;
+ auto buffer2Cursor = fBuffer2;
+
+ std::memset(fBuffer0, 0x00, (fBuffer2End - fBuffer0) * sizeof(*fBuffer0));
+
+ uint32_t sum0 = 0;
+ uint32_t sum1 = 0;
+ uint32_t sum2 = 0;
+
+ // Consume the source generating pixels.
+ for (AlphaIter src = srcBegin; src < srcEnd; ++src, dst += dstStride) {
+ uint32_t leadingEdge = *src;
+ sum0 += leadingEdge;
+ sum1 += sum0;
+ sum2 += sum1;
+
+ *dst = this->finalScale(sum2);
+
+ sum2 -= *buffer2Cursor;
+ *buffer2Cursor = sum1;
+ buffer2Cursor = (buffer2Cursor + 1) < fBuffer2End ? buffer2Cursor + 1 : fBuffer2;
+
+ sum1 -= *buffer1Cursor;
+ *buffer1Cursor = sum0;
+ buffer1Cursor = (buffer1Cursor + 1) < fBuffer1End ? buffer1Cursor + 1 : fBuffer1;
+
+ sum0 -= *buffer0Cursor;
+ *buffer0Cursor = leadingEdge;
+ buffer0Cursor = (buffer0Cursor + 1) < fBuffer0End ? buffer0Cursor + 1 : fBuffer0;
+ }
+
+ // The leading edge is off the right side of the mask.
+ for (int i = 0; i < fNoChangeCount; i++) {
+ uint32_t leadingEdge = 0;
+ sum0 += leadingEdge;
+ sum1 += sum0;
+ sum2 += sum1;
+
+ *dst = this->finalScale(sum2);
+
+ sum2 -= *buffer2Cursor;
+ *buffer2Cursor = sum1;
+ buffer2Cursor = (buffer2Cursor + 1) < fBuffer2End ? buffer2Cursor + 1 : fBuffer2;
+
+ sum1 -= *buffer1Cursor;
+ *buffer1Cursor = sum0;
+ buffer1Cursor = (buffer1Cursor + 1) < fBuffer1End ? buffer1Cursor + 1 : fBuffer1;
+
+ sum0 -= *buffer0Cursor;
+ *buffer0Cursor = leadingEdge;
+ buffer0Cursor = (buffer0Cursor + 1) < fBuffer0End ? buffer0Cursor + 1 : fBuffer0;
+
+ dst += dstStride;
+ }
+
+ // Starting from the right, fill in the rest of the buffer.
+ std::memset(fBuffer0, 0, (fBuffer2End - fBuffer0) * sizeof(*fBuffer0));
+
+ sum0 = sum1 = sum2 = 0;
+
+ uint8_t* dstCursor = dstEnd;
+ AlphaIter src = srcEnd;
+ while (dstCursor > dst) {
+ dstCursor -= dstStride;
+ uint32_t leadingEdge = *(--src);
+ sum0 += leadingEdge;
+ sum1 += sum0;
+ sum2 += sum1;
+
+ *dstCursor = this->finalScale(sum2);
+
+ sum2 -= *buffer2Cursor;
+ *buffer2Cursor = sum1;
+ buffer2Cursor = (buffer2Cursor + 1) < fBuffer2End ? buffer2Cursor + 1 : fBuffer2;
+
+ sum1 -= *buffer1Cursor;
+ *buffer1Cursor = sum0;
+ buffer1Cursor = (buffer1Cursor + 1) < fBuffer1End ? buffer1Cursor + 1 : fBuffer1;
+
+ sum0 -= *buffer0Cursor;
+ *buffer0Cursor = leadingEdge;
+ buffer0Cursor = (buffer0Cursor + 1) < fBuffer0End ? buffer0Cursor + 1 : fBuffer0;
+ }
+ }
+
+ private:
+ inline static constexpr uint64_t kHalf = static_cast<uint64_t>(1) << 31;
+
+ uint8_t finalScale(uint32_t sum) const {
+ return SkTo<uint8_t>((fWeight * sum + kHalf) >> 32);
+ }
+
+ uint64_t fWeight;
+ int fNoChangeCount;
+ uint32_t* fBuffer0;
+ uint32_t* fBuffer0End;
+ uint32_t* fBuffer1;
+ uint32_t* fBuffer1End;
+ uint32_t* fBuffer2;
+ uint32_t* fBuffer2End;
+ };
+
+ Scan makeBlurScan(int width, uint32_t* buffer) const {
+ uint32_t* buffer0, *buffer0End, *buffer1, *buffer1End, *buffer2, *buffer2End;
+ buffer0 = buffer;
+ buffer0End = buffer1 = buffer0 + fPass0Size;
+ buffer1End = buffer2 = buffer1 + fPass1Size;
+ buffer2End = buffer2 + fPass2Size;
+ int noChangeCount = fSlidingWindow > width ? fSlidingWindow - width : 0;
+
+ return Scan(
+ fWeight, noChangeCount,
+ buffer0, buffer0End,
+ buffer1, buffer1End,
+ buffer2, buffer2End);
+ }
+
+ uint64_t fWeight;
+ int fBorder;
+ int fSlidingWindow;
+ int fPass0Size;
+ int fPass1Size;
+ int fPass2Size;
+};
+
+} // namespace
+
+// NB 135 is the largest sigma that will not cause a buffer full of 255 mask values to overflow
+// using the Gauss filter. It also limits the size of buffers used hold intermediate values. The
+// additional + 1 added to window represents adding one more leading element before subtracting the
+// trailing element.
+// Explanation of maximums:
+// sum0 = (window + 1) * 255
+// sum1 = (window + 1) * sum0 -> (window + 1) * (window + 1) * 255
+// sum2 = (window + 1) * sum1 -> (window + 1) * (window + 1) * (window + 1) * 255 -> window^3 * 255
+//
+// The value (window + 1)^3 * 255 must fit in a uint32_t. So,
+// (window + 1)^3 * 255 < 2^32. window = 255.
+//
+// window = floor(sigma * 3 * sqrt(2 * kPi) / 4)
+// For window <= 255, the largest value for sigma is 135.
+SkMaskBlurFilter::SkMaskBlurFilter(double sigmaW, double sigmaH)
+ : fSigmaW{SkTPin(sigmaW, 0.0, 135.0)}
+ , fSigmaH{SkTPin(sigmaH, 0.0, 135.0)}
+{
+ SkASSERT(sigmaW >= 0);
+ SkASSERT(sigmaH >= 0);
+}
+
+bool SkMaskBlurFilter::hasNoBlur() const {
+ return (3 * fSigmaW <= 1) && (3 * fSigmaH <= 1);
+}
+
+// We favor A8 masks, and if we need to work with another format, we'll convert to A8 first.
+// Each of these converts width (up to 8) mask values to A8.
+static void bw_to_a8(uint8_t* a8, const uint8_t* from, int width) {
+ SkASSERT(0 < width && width <= 8);
+
+ uint8_t masks = *from;
+ for (int i = 0; i < width; ++i) {
+ a8[i] = (masks >> (7 - i)) & 1 ? 0xFF
+ : 0x00;
+ }
+}
+static void lcd_to_a8(uint8_t* a8, const uint8_t* from, int width) {
+ SkASSERT(0 < width && width <= 8);
+
+ for (int i = 0; i < width; ++i) {
+ unsigned rgb = reinterpret_cast<const uint16_t*>(from)[i],
+ r = SkPacked16ToR32(rgb),
+ g = SkPacked16ToG32(rgb),
+ b = SkPacked16ToB32(rgb);
+ a8[i] = (r + g + b) / 3;
+ }
+}
+static void argb32_to_a8(uint8_t* a8, const uint8_t* from, int width) {
+ SkASSERT(0 < width && width <= 8);
+ for (int i = 0; i < width; ++i) {
+ uint32_t rgba = reinterpret_cast<const uint32_t*>(from)[i];
+ a8[i] = SkGetPackedA32(rgba);
+ }
+}
+using ToA8 = decltype(bw_to_a8);
+
+using fp88 = skvx::Vec<8, uint16_t>; // 8-wide fixed point 8.8
+
+static fp88 load(const uint8_t* from, int width, ToA8* toA8) {
+ // Our fast path is a full 8-byte load of A8.
+ // So we'll conditionally handle the two slow paths using tmp:
+ // - if we have a function to convert another mask to A8, use it;
+ // - if not but we have less than 8 bytes to load, load them one at a time.
+ uint8_t tmp[8] = {0,0,0,0, 0,0,0,0};
+ if (toA8) {
+ toA8(tmp, from, width);
+ from = tmp;
+ } else if (width < 8) {
+ for (int i = 0; i < width; ++i) {
+ tmp[i] = from[i];
+ }
+ from = tmp;
+ }
+
+ // Load A8 and convert to 8.8 fixed-point.
+ return skvx::cast<uint16_t>(skvx::byte8::Load(from)) << 8;
+}
+
+static void store(uint8_t* to, const fp88& v, int width) {
+ skvx::byte8 b = skvx::cast<uint8_t>(v >> 8);
+ if (width == 8) {
+ b.store(to);
+ } else {
+ uint8_t buffer[8];
+ b.store(buffer);
+ for (int i = 0; i < width; i++) {
+ to[i] = buffer[i];
+ }
+ }
+}
+
+static constexpr uint16_t _____ = 0u;
+static constexpr uint16_t kHalf = 0x80u;
+
+// In all the blur_x_radius_N and blur_y_radius_N functions the gaussian values are encoded
+// in 0.16 format, none of the values is greater than one. The incoming mask values are in 8.8
+// format. The resulting multiply has a 8.24 format, by the mulhi truncates the lower 16 bits
+// resulting in a 8.8 format.
+//
+// The blur_x_radius_N function below blur along a row of pixels using a kernel with radius N. This
+// system is setup to minimize the number of multiplies needed.
+//
+// Explanation:
+// Blurring a specific mask value is given by the following equation where D_n is the resulting
+// mask value and S_n is the source value. The example below is for a filter with a radius of 1
+// and a width of 3 (radius == (width-1)/2). The indexes for the source and destination are
+// aligned. The filter is given by G_n where n is the symmetric filter value.
+//
+// D[n] = S[n-1]*G[1] + S[n]*G[0] + S[n+1]*G[1].
+//
+// We can start the source index at an offset relative to the destination separated by the
+// radius. This results in a non-traditional restating of the above filter.
+//
+// D[n] = S[n]*G[1] + S[n+1]*G[0] + S[n+2]*G[1]
+//
+// If we look at three specific consecutive destinations the following equations result:
+//
+// D[5] = S[5]*G[1] + S[6]*G[0] + S[7]*G[1]
+// D[7] = S[6]*G[1] + S[7]*G[0] + S[8]*G[1]
+// D[8] = S[7]*G[1] + S[8]*G[0] + S[9]*G[1].
+//
+// In the above equations, notice that S[7] is used in all three. In particular, two values are
+// used: S[7]*G[0] and S[7]*G[1]. So, S[7] is only multiplied twice, but used in D[5], D[6] and
+// D[7].
+//
+// From the point of view of a source value we end up with the following three equations.
+//
+// Given S[7]:
+// D[5] += S[7]*G[1]
+// D[6] += S[7]*G[0]
+// D[7] += S[7]*G[1]
+//
+// In General:
+// D[n] += S[n]*G[1]
+// D[n+1] += S[n]*G[0]
+// D[n+2] += S[n]*G[1]
+//
+// Now these equations can be ganged using SIMD to form:
+// D[n..n+7] += S[n..n+7]*G[1]
+// D[n+1..n+8] += S[n..n+7]*G[0]
+// D[n+2..n+9] += S[n..n+7]*G[1]
+// The next set of values becomes.
+// D[n+8..n+15] += S[n+8..n+15]*G[1]
+// D[n+9..n+16] += S[n+8..n+15]*G[0]
+// D[n+10..n+17] += S[n+8..n+15]*G[1]
+// You can see that the D[n+8] and D[n+9] values overlap the two sets, using parts of both
+// S[n..7] and S[n+8..n+15].
+//
+// Just one more transformation allows the code to maintain all working values in
+// registers. I introduce the notation {0, S[n..n+7] * G[k]} to mean that the value where 0 is
+// prepended to the array of values to form {0, S[n] * G[k], ..., S[n+7]*G[k]}.
+//
+// D[n..n+7] += S[n..n+7] * G[1]
+// D[n..n+8] += {0, S[n..n+7] * G[0]}
+// D[n..n+9] += {0, 0, S[n..n+7] * G[1]}
+//
+// Now we can encode D[n..n+7] in a single Sk8h register called d0, and D[n+8..n+15] in a
+// register d8. In addition, S[0..n+7] becomes s0.
+//
+// The translation of the {0, S[n..n+7] * G[k]} is translated in the following way below.
+//
+// Sk8h v0 = s0*G[0]
+// Sk8h v1 = s0*G[1]
+// /* D[n..n+7] += S[n..n+7] * G[1] */
+// d0 += v1;
+// /* D[n..n+8] += {0, S[n..n+7] * G[0]} */
+// d0 += {_____, v0[0], v0[1], v0[2], v0[3], v0[4], v0[5], v0[6]}
+// d1 += {v0[7], _____, _____, _____, _____, _____, _____, _____}
+// /* D[n..n+9] += {0, 0, S[n..n+7] * G[1]} */
+// d0 += {_____, _____, v1[0], v1[1], v1[2], v1[3], v1[4], v1[5]}
+// d1 += {v1[6], v1[7], _____, _____, _____, _____, _____, _____}
+// Where we rely on the compiler to generate efficient code for the {____, n, ....} notation.
+
+static void blur_x_radius_1(
+ const fp88& s0,
+ const fp88& g0, const fp88& g1, const fp88&, const fp88&, const fp88&,
+ fp88* d0, fp88* d8) {
+
+ auto v1 = mulhi(s0, g1);
+ auto v0 = mulhi(s0, g0);
+
+ // D[n..n+7] += S[n..n+7] * G[1]
+ *d0 += v1;
+
+ //D[n..n+8] += {0, S[n..n+7] * G[0]}
+ *d0 += fp88{_____, v0[0], v0[1], v0[2], v0[3], v0[4], v0[5], v0[6]};
+ *d8 += fp88{v0[7], _____, _____, _____, _____, _____, _____, _____};
+
+ // D[n..n+9] += {0, 0, S[n..n+7] * G[1]}
+ *d0 += fp88{_____, _____, v1[0], v1[1], v1[2], v1[3], v1[4], v1[5]};
+ *d8 += fp88{v1[6], v1[7], _____, _____, _____, _____, _____, _____};
+
+}
+
+static void blur_x_radius_2(
+ const fp88& s0,
+ const fp88& g0, const fp88& g1, const fp88& g2, const fp88&, const fp88&,
+ fp88* d0, fp88* d8) {
+ auto v0 = mulhi(s0, g0);
+ auto v1 = mulhi(s0, g1);
+ auto v2 = mulhi(s0, g2);
+
+ // D[n..n+7] += S[n..n+7] * G[2]
+ *d0 += v2;
+
+ // D[n..n+8] += {0, S[n..n+7] * G[1]}
+ *d0 += fp88{_____, v1[0], v1[1], v1[2], v1[3], v1[4], v1[5], v1[6]};
+ *d8 += fp88{v1[7], _____, _____, _____, _____, _____, _____, _____};
+
+ // D[n..n+9] += {0, 0, S[n..n+7] * G[0]}
+ *d0 += fp88{_____, _____, v0[0], v0[1], v0[2], v0[3], v0[4], v0[5]};
+ *d8 += fp88{v0[6], v0[7], _____, _____, _____, _____, _____, _____};
+
+ // D[n..n+10] += {0, 0, 0, S[n..n+7] * G[1]}
+ *d0 += fp88{_____, _____, _____, v1[0], v1[1], v1[2], v1[3], v1[4]};
+ *d8 += fp88{v1[5], v1[6], v1[7], _____, _____, _____, _____, _____};
+
+ // D[n..n+11] += {0, 0, 0, 0, S[n..n+7] * G[2]}
+ *d0 += fp88{_____, _____, _____, _____, v2[0], v2[1], v2[2], v2[3]};
+ *d8 += fp88{v2[4], v2[5], v2[6], v2[7], _____, _____, _____, _____};
+}
+
+static void blur_x_radius_3(
+ const fp88& s0,
+ const fp88& g0, const fp88& g1, const fp88& g2, const fp88& g3, const fp88&,
+ fp88* d0, fp88* d8) {
+ auto v0 = mulhi(s0, g0);
+ auto v1 = mulhi(s0, g1);
+ auto v2 = mulhi(s0, g2);
+ auto v3 = mulhi(s0, g3);
+
+ // D[n..n+7] += S[n..n+7] * G[3]
+ *d0 += v3;
+
+ // D[n..n+8] += {0, S[n..n+7] * G[2]}
+ *d0 += fp88{_____, v2[0], v2[1], v2[2], v2[3], v2[4], v2[5], v2[6]};
+ *d8 += fp88{v2[7], _____, _____, _____, _____, _____, _____, _____};
+
+ // D[n..n+9] += {0, 0, S[n..n+7] * G[1]}
+ *d0 += fp88{_____, _____, v1[0], v1[1], v1[2], v1[3], v1[4], v1[5]};
+ *d8 += fp88{v1[6], v1[7], _____, _____, _____, _____, _____, _____};
+
+ // D[n..n+10] += {0, 0, 0, S[n..n+7] * G[0]}
+ *d0 += fp88{_____, _____, _____, v0[0], v0[1], v0[2], v0[3], v0[4]};
+ *d8 += fp88{v0[5], v0[6], v0[7], _____, _____, _____, _____, _____};
+
+ // D[n..n+11] += {0, 0, 0, 0, S[n..n+7] * G[1]}
+ *d0 += fp88{_____, _____, _____, _____, v1[0], v1[1], v1[2], v1[3]};
+ *d8 += fp88{v1[4], v1[5], v1[6], v1[7], _____, _____, _____, _____};
+
+ // D[n..n+12] += {0, 0, 0, 0, 0, S[n..n+7] * G[2]}
+ *d0 += fp88{_____, _____, _____, _____, _____, v2[0], v2[1], v2[2]};
+ *d8 += fp88{v2[3], v2[4], v2[5], v2[6], v2[7], _____, _____, _____};
+
+ // D[n..n+13] += {0, 0, 0, 0, 0, 0, S[n..n+7] * G[3]}
+ *d0 += fp88{_____, _____, _____, _____, _____, _____, v3[0], v3[1]};
+ *d8 += fp88{v3[2], v3[3], v3[4], v3[5], v3[6], v3[7], _____, _____};
+}
+
+static void blur_x_radius_4(
+ const fp88& s0,
+ const fp88& g0, const fp88& g1, const fp88& g2, const fp88& g3, const fp88& g4,
+ fp88* d0, fp88* d8) {
+ auto v0 = mulhi(s0, g0);
+ auto v1 = mulhi(s0, g1);
+ auto v2 = mulhi(s0, g2);
+ auto v3 = mulhi(s0, g3);
+ auto v4 = mulhi(s0, g4);
+
+ // D[n..n+7] += S[n..n+7] * G[4]
+ *d0 += v4;
+
+ // D[n..n+8] += {0, S[n..n+7] * G[3]}
+ *d0 += fp88{_____, v3[0], v3[1], v3[2], v3[3], v3[4], v3[5], v3[6]};
+ *d8 += fp88{v3[7], _____, _____, _____, _____, _____, _____, _____};
+
+ // D[n..n+9] += {0, 0, S[n..n+7] * G[2]}
+ *d0 += fp88{_____, _____, v2[0], v2[1], v2[2], v2[3], v2[4], v2[5]};
+ *d8 += fp88{v2[6], v2[7], _____, _____, _____, _____, _____, _____};
+
+ // D[n..n+10] += {0, 0, 0, S[n..n+7] * G[1]}
+ *d0 += fp88{_____, _____, _____, v1[0], v1[1], v1[2], v1[3], v1[4]};
+ *d8 += fp88{v1[5], v1[6], v1[7], _____, _____, _____, _____, _____};
+
+ // D[n..n+11] += {0, 0, 0, 0, S[n..n+7] * G[0]}
+ *d0 += fp88{_____, _____, _____, _____, v0[0], v0[1], v0[2], v0[3]};
+ *d8 += fp88{v0[4], v0[5], v0[6], v0[7], _____, _____, _____, _____};
+
+ // D[n..n+12] += {0, 0, 0, 0, 0, S[n..n+7] * G[1]}
+ *d0 += fp88{_____, _____, _____, _____, _____, v1[0], v1[1], v1[2]};
+ *d8 += fp88{v1[3], v1[4], v1[5], v1[6], v1[7], _____, _____, _____};
+
+ // D[n..n+13] += {0, 0, 0, 0, 0, 0, S[n..n+7] * G[2]}
+ *d0 += fp88{_____, _____, _____, _____, _____, _____, v2[0], v2[1]};
+ *d8 += fp88{v2[2], v2[3], v2[4], v2[5], v2[6], v2[7], _____, _____};
+
+ // D[n..n+14] += {0, 0, 0, 0, 0, 0, 0, S[n..n+7] * G[3]}
+ *d0 += fp88{_____, _____, _____, _____, _____, _____, _____, v3[0]};
+ *d8 += fp88{v3[1], v3[2], v3[3], v3[4], v3[5], v3[6], v3[7], _____};
+
+ // D[n..n+15] += {0, 0, 0, 0, 0, 0, 0, 0, S[n..n+7] * G[4]}
+ *d8 += v4;
+}
+
+using BlurX = decltype(blur_x_radius_1);
+
+// BlurX will only be one of the functions blur_x_radius_(1|2|3|4).
+static void blur_row(
+ BlurX blur,
+ const fp88& g0, const fp88& g1, const fp88& g2, const fp88& g3, const fp88& g4,
+ const uint8_t* src, int srcW,
+ uint8_t* dst, int dstW) {
+ // Clear the buffer to handle summing wider than source.
+ fp88 d0(kHalf), d8(kHalf);
+
+ // Go by multiples of 8 in src.
+ int x = 0;
+ for (; x <= srcW - 8; x += 8) {
+ blur(load(src, 8, nullptr), g0, g1, g2, g3, g4, &d0, &d8);
+
+ store(dst, d0, 8);
+
+ d0 = d8;
+ d8 = fp88(kHalf);
+
+ src += 8;
+ dst += 8;
+ }
+
+ // There are src values left, but the remainder of src values is not a multiple of 8.
+ int srcTail = srcW - x;
+ if (srcTail > 0) {
+
+ blur(load(src, srcTail, nullptr), g0, g1, g2, g3, g4, &d0, &d8);
+
+ int dstTail = std::min(8, dstW - x);
+ store(dst, d0, dstTail);
+
+ d0 = d8;
+ dst += dstTail;
+ x += dstTail;
+ }
+
+ // There are dst mask values to complete.
+ int dstTail = dstW - x;
+ if (dstTail > 0) {
+ store(dst, d0, dstTail);
+ }
+}
+
+// BlurX will only be one of the functions blur_x_radius_(1|2|3|4).
+static void blur_x_rect(BlurX blur,
+ uint16_t* gauss,
+ const uint8_t* src, size_t srcStride, int srcW,
+ uint8_t* dst, size_t dstStride, int dstW, int dstH) {
+
+ fp88 g0(gauss[0]),
+ g1(gauss[1]),
+ g2(gauss[2]),
+ g3(gauss[3]),
+ g4(gauss[4]);
+
+ // Blur *ALL* the rows.
+ for (int y = 0; y < dstH; y++) {
+ blur_row(blur, g0, g1, g2, g3, g4, src, srcW, dst, dstW);
+ src += srcStride;
+ dst += dstStride;
+ }
+}
+
+static void direct_blur_x(int radius, uint16_t* gauss,
+ const uint8_t* src, size_t srcStride, int srcW,
+ uint8_t* dst, size_t dstStride, int dstW, int dstH) {
+
+ switch (radius) {
+ case 1:
+ blur_x_rect(blur_x_radius_1, gauss, src, srcStride, srcW, dst, dstStride, dstW, dstH);
+ break;
+
+ case 2:
+ blur_x_rect(blur_x_radius_2, gauss, src, srcStride, srcW, dst, dstStride, dstW, dstH);
+ break;
+
+ case 3:
+ blur_x_rect(blur_x_radius_3, gauss, src, srcStride, srcW, dst, dstStride, dstW, dstH);
+ break;
+
+ case 4:
+ blur_x_rect(blur_x_radius_4, gauss, src, srcStride, srcW, dst, dstStride, dstW, dstH);
+ break;
+
+ default:
+ SkASSERTF(false, "The radius %d is not handled\n", radius);
+ }
+}
+
+// The operations of the blur_y_radius_N functions work on a theme similar to the blur_x_radius_N
+// functions, but end up being simpler because there is no complicated shift of registers. We
+// start with the non-traditional form of the gaussian filter. In the following r is the value
+// when added generates the next value in the column.
+//
+// D[n+0r] = S[n+0r]*G[1]
+// + S[n+1r]*G[0]
+// + S[n+2r]*G[1]
+//
+// Expanding out in a way similar to blur_x_radius_N for specific values of n.
+//
+// D[n+0r] = S[n-2r]*G[1] + S[n-1r]*G[0] + S[n+0r]*G[1]
+// D[n+1r] = S[n-1r]*G[1] + S[n+0r]*G[0] + S[n+1r]*G[1]
+// D[n+2r] = S[n+0r]*G[1] + S[n+1r]*G[0] + S[n+2r]*G[1]
+//
+// We can see that S[n+0r] is in all three D[] equations, but is only multiplied twice. Now we
+// can look at the calculation form the point of view of a source value.
+//
+// Given S[n+0r]:
+// D[n+0r] += S[n+0r]*G[1];
+// /* D[n+0r] is done and can be stored now. */
+// D[n+1r] += S[n+0r]*G[0];
+// D[n+2r] = S[n+0r]*G[1];
+//
+// Remember, by induction, that D[n+0r] == S[n-2r]*G[1] + S[n-1r]*G[0] before adding in
+// S[n+0r]*G[1]. So, after the addition D[n+0r] has finished calculation and can be stored. Also,
+// notice that D[n+2r] is receiving its first value from S[n+0r]*G[1] and is not added in. Notice
+// how values flow in the following two iterations in source.
+//
+// D[n+0r] += S[n+0r]*G[1]
+// D[n+1r] += S[n+0r]*G[0]
+// D[n+2r] = S[n+0r]*G[1]
+// /* ------- */
+// D[n+1r] += S[n+1r]*G[1]
+// D[n+2r] += S[n+1r]*G[0]
+// D[n+3r] = S[n+1r]*G[1]
+//
+// Instead of using memory we can introduce temporaries d01 and d12. The update step changes
+// to the following.
+//
+// answer = d01 + S[n+0r]*G[1]
+// d01 = d12 + S[n+0r]*G[0]
+// d12 = S[n+0r]*G[1]
+// return answer
+//
+// Finally, this can be ganged into SIMD style.
+// answer[0..7] = d01[0..7] + S[n+0r..n+0r+7]*G[1]
+// d01[0..7] = d12[0..7] + S[n+0r..n+0r+7]*G[0]
+// d12[0..7] = S[n+0r..n+0r+7]*G[1]
+// return answer[0..7]
+static fp88 blur_y_radius_1(
+ const fp88& s0,
+ const fp88& g0, const fp88& g1, const fp88&, const fp88&, const fp88&,
+ fp88* d01, fp88* d12, fp88*, fp88*, fp88*, fp88*, fp88*, fp88*) {
+ auto v0 = mulhi(s0, g0);
+ auto v1 = mulhi(s0, g1);
+
+ fp88 answer = *d01 + v1;
+ *d01 = *d12 + v0;
+ *d12 = v1 + kHalf;
+
+ return answer;
+}
+
+static fp88 blur_y_radius_2(
+ const fp88& s0,
+ const fp88& g0, const fp88& g1, const fp88& g2, const fp88&, const fp88&,
+ fp88* d01, fp88* d12, fp88* d23, fp88* d34, fp88*, fp88*, fp88*, fp88*) {
+ auto v0 = mulhi(s0, g0);
+ auto v1 = mulhi(s0, g1);
+ auto v2 = mulhi(s0, g2);
+
+ fp88 answer = *d01 + v2;
+ *d01 = *d12 + v1;
+ *d12 = *d23 + v0;
+ *d23 = *d34 + v1;
+ *d34 = v2 + kHalf;
+
+ return answer;
+}
+
+static fp88 blur_y_radius_3(
+ const fp88& s0,
+ const fp88& g0, const fp88& g1, const fp88& g2, const fp88& g3, const fp88&,
+ fp88* d01, fp88* d12, fp88* d23, fp88* d34, fp88* d45, fp88* d56, fp88*, fp88*) {
+ auto v0 = mulhi(s0, g0);
+ auto v1 = mulhi(s0, g1);
+ auto v2 = mulhi(s0, g2);
+ auto v3 = mulhi(s0, g3);
+
+ fp88 answer = *d01 + v3;
+ *d01 = *d12 + v2;
+ *d12 = *d23 + v1;
+ *d23 = *d34 + v0;
+ *d34 = *d45 + v1;
+ *d45 = *d56 + v2;
+ *d56 = v3 + kHalf;
+
+ return answer;
+}
+
+static fp88 blur_y_radius_4(
+ const fp88& s0,
+ const fp88& g0, const fp88& g1, const fp88& g2, const fp88& g3, const fp88& g4,
+ fp88* d01, fp88* d12, fp88* d23, fp88* d34, fp88* d45, fp88* d56, fp88* d67, fp88* d78) {
+ auto v0 = mulhi(s0, g0);
+ auto v1 = mulhi(s0, g1);
+ auto v2 = mulhi(s0, g2);
+ auto v3 = mulhi(s0, g3);
+ auto v4 = mulhi(s0, g4);
+
+ fp88 answer = *d01 + v4;
+ *d01 = *d12 + v3;
+ *d12 = *d23 + v2;
+ *d23 = *d34 + v1;
+ *d34 = *d45 + v0;
+ *d45 = *d56 + v1;
+ *d56 = *d67 + v2;
+ *d67 = *d78 + v3;
+ *d78 = v4 + kHalf;
+
+ return answer;
+}
+
+using BlurY = decltype(blur_y_radius_1);
+
+// BlurY will be one of blur_y_radius_(1|2|3|4).
+static void blur_column(
+ ToA8 toA8,
+ BlurY blur, int radius, int width,
+ const fp88& g0, const fp88& g1, const fp88& g2, const fp88& g3, const fp88& g4,
+ const uint8_t* src, size_t srcRB, int srcH,
+ uint8_t* dst, size_t dstRB) {
+ fp88 d01(kHalf), d12(kHalf), d23(kHalf), d34(kHalf),
+ d45(kHalf), d56(kHalf), d67(kHalf), d78(kHalf);
+
+ auto flush = [&](uint8_t* to, const fp88& v0, const fp88& v1) {
+ store(to, v0, width);
+ to += dstRB;
+ store(to, v1, width);
+ return to + dstRB;
+ };
+
+ for (int y = 0; y < srcH; y += 1) {
+ auto s = load(src, width, toA8);
+ auto b = blur(s,
+ g0, g1, g2, g3, g4,
+ &d01, &d12, &d23, &d34, &d45, &d56, &d67, &d78);
+ store(dst, b, width);
+ src += srcRB;
+ dst += dstRB;
+ }
+
+ if (radius >= 1) {
+ dst = flush(dst, d01, d12);
+ }
+ if (radius >= 2) {
+ dst = flush(dst, d23, d34);
+ }
+ if (radius >= 3) {
+ dst = flush(dst, d45, d56);
+ }
+ if (radius >= 4) {
+ flush(dst, d67, d78);
+ }
+}
+
+// BlurY will be one of blur_y_radius_(1|2|3|4).
+static void blur_y_rect(ToA8 toA8, const int strideOf8,
+ BlurY blur, int radius, uint16_t *gauss,
+ const uint8_t *src, size_t srcRB, int srcW, int srcH,
+ uint8_t *dst, size_t dstRB) {
+
+ fp88 g0(gauss[0]),
+ g1(gauss[1]),
+ g2(gauss[2]),
+ g3(gauss[3]),
+ g4(gauss[4]);
+
+ int x = 0;
+ for (; x <= srcW - 8; x += 8) {
+ blur_column(toA8, blur, radius, 8,
+ g0, g1, g2, g3, g4,
+ src, srcRB, srcH,
+ dst, dstRB);
+ src += strideOf8;
+ dst += 8;
+ }
+
+ int xTail = srcW - x;
+ if (xTail > 0) {
+ blur_column(toA8, blur, radius, xTail,
+ g0, g1, g2, g3, g4,
+ src, srcRB, srcH,
+ dst, dstRB);
+ }
+}
+
+static void direct_blur_y(ToA8 toA8, const int strideOf8,
+ int radius, uint16_t* gauss,
+ const uint8_t* src, size_t srcRB, int srcW, int srcH,
+ uint8_t* dst, size_t dstRB) {
+
+ switch (radius) {
+ case 1:
+ blur_y_rect(toA8, strideOf8, blur_y_radius_1, 1, gauss,
+ src, srcRB, srcW, srcH,
+ dst, dstRB);
+ break;
+
+ case 2:
+ blur_y_rect(toA8, strideOf8, blur_y_radius_2, 2, gauss,
+ src, srcRB, srcW, srcH,
+ dst, dstRB);
+ break;
+
+ case 3:
+ blur_y_rect(toA8, strideOf8, blur_y_radius_3, 3, gauss,
+ src, srcRB, srcW, srcH,
+ dst, dstRB);
+ break;
+
+ case 4:
+ blur_y_rect(toA8, strideOf8, blur_y_radius_4, 4, gauss,
+ src, srcRB, srcW, srcH,
+ dst, dstRB);
+ break;
+
+ default:
+ SkASSERTF(false, "The radius %d is not handled\n", radius);
+ }
+}
+
+static SkIPoint small_blur(double sigmaX, double sigmaY, const SkMask& src, SkMask* dst) {
+ SkASSERT(sigmaX == sigmaY); // TODO
+ SkASSERT(0.01 <= sigmaX && sigmaX < 2);
+ SkASSERT(0.01 <= sigmaY && sigmaY < 2);
+
+ SkGaussFilter filterX{sigmaX},
+ filterY{sigmaY};
+
+ int radiusX = filterX.radius(),
+ radiusY = filterY.radius();
+
+ SkASSERT(radiusX <= 4 && radiusY <= 4);
+
+ auto prepareGauss = [](const SkGaussFilter& filter, uint16_t* factors) {
+ int i = 0;
+ for (double d : filter) {
+ factors[i++] = static_cast<uint16_t>(round(d * (1 << 16)));
+ }
+ };
+
+ uint16_t gaussFactorsX[SkGaussFilter::kGaussArrayMax],
+ gaussFactorsY[SkGaussFilter::kGaussArrayMax];
+
+ prepareGauss(filterX, gaussFactorsX);
+ prepareGauss(filterY, gaussFactorsY);
+
+ *dst = SkMask::PrepareDestination(radiusX, radiusY, src);
+ if (src.fImage == nullptr) {
+ return {SkTo<int32_t>(radiusX), SkTo<int32_t>(radiusY)};
+ }
+ if (dst->fImage == nullptr) {
+ dst->fBounds.setEmpty();
+ return {0, 0};
+ }
+
+ int srcW = src.fBounds.width(),
+ srcH = src.fBounds.height();
+
+ int dstW = dst->fBounds.width(),
+ dstH = dst->fBounds.height();
+
+ size_t srcRB = src.fRowBytes,
+ dstRB = dst->fRowBytes;
+
+ //TODO: handle bluring in only one direction.
+
+ // Blur vertically and copy to destination.
+ switch (src.fFormat) {
+ case SkMask::kBW_Format:
+ direct_blur_y(bw_to_a8, 1,
+ radiusY, gaussFactorsY,
+ src.fImage, srcRB, srcW, srcH,
+ dst->fImage + radiusX, dstRB);
+ break;
+ case SkMask::kA8_Format:
+ direct_blur_y(nullptr, 8,
+ radiusY, gaussFactorsY,
+ src.fImage, srcRB, srcW, srcH,
+ dst->fImage + radiusX, dstRB);
+ break;
+ case SkMask::kARGB32_Format:
+ direct_blur_y(argb32_to_a8, 32,
+ radiusY, gaussFactorsY,
+ src.fImage, srcRB, srcW, srcH,
+ dst->fImage + radiusX, dstRB);
+ break;
+ case SkMask::kLCD16_Format:
+ direct_blur_y(lcd_to_a8, 16, radiusY, gaussFactorsY,
+ src.fImage, srcRB, srcW, srcH,
+ dst->fImage + radiusX, dstRB);
+ break;
+ default:
+ SK_ABORT("Unhandled format.");
+ }
+
+ // Blur horizontally in place.
+ direct_blur_x(radiusX, gaussFactorsX,
+ dst->fImage + radiusX, dstRB, srcW,
+ dst->fImage, dstRB, dstW, dstH);
+
+ return {radiusX, radiusY};
+}
+
+// TODO: assuming sigmaW = sigmaH. Allow different sigmas. Right now the
+// API forces the sigmas to be the same.
+SkIPoint SkMaskBlurFilter::blur(const SkMask& src, SkMask* dst) const {
+
+ if (fSigmaW < 2.0 && fSigmaH < 2.0) {
+ return small_blur(fSigmaW, fSigmaH, src, dst);
+ }
+
+ // 1024 is a place holder guess until more analysis can be done.
+ SkSTArenaAlloc<1024> alloc;
+
+ PlanGauss planW(fSigmaW);
+ PlanGauss planH(fSigmaH);
+
+ int borderW = planW.border(),
+ borderH = planH.border();
+ SkASSERT(borderH >= 0 && borderW >= 0);
+
+ *dst = SkMask::PrepareDestination(borderW, borderH, src);
+ if (src.fImage == nullptr) {
+ return {SkTo<int32_t>(borderW), SkTo<int32_t>(borderH)};
+ }
+ if (dst->fImage == nullptr) {
+ dst->fBounds.setEmpty();
+ return {0, 0};
+ }
+
+ int srcW = src.fBounds.width(),
+ srcH = src.fBounds.height(),
+ dstW = dst->fBounds.width(),
+ dstH = dst->fBounds.height();
+ SkASSERT(srcW >= 0 && srcH >= 0 && dstW >= 0 && dstH >= 0);
+
+ auto bufferSize = std::max(planW.bufferSize(), planH.bufferSize());
+ auto buffer = alloc.makeArrayDefault<uint32_t>(bufferSize);
+
+ // Blur both directions.
+ int tmpW = srcH,
+ tmpH = dstW;
+
+ // Make sure not to overflow the multiply for the tmp buffer size.
+ if (tmpH > std::numeric_limits<int>::max() / tmpW) {
+ return {0, 0};
+ }
+ auto tmp = alloc.makeArrayDefault<uint8_t>(tmpW * tmpH);
+
+ // Blur horizontally, and transpose.
+ const PlanGauss::Scan& scanW = planW.makeBlurScan(srcW, buffer);
+ switch (src.fFormat) {
+ case SkMask::kBW_Format: {
+ const uint8_t* bwStart = src.fImage;
+ auto start = SkMask::AlphaIter<SkMask::kBW_Format>(bwStart, 0);
+ auto end = SkMask::AlphaIter<SkMask::kBW_Format>(bwStart + (srcW / 8), srcW % 8);
+ for (int y = 0; y < srcH; ++y, start >>= src.fRowBytes, end >>= src.fRowBytes) {
+ auto tmpStart = &tmp[y];
+ scanW.blur(start, end, tmpStart, tmpW, tmpStart + tmpW * tmpH);
+ }
+ } break;
+ case SkMask::kA8_Format: {
+ const uint8_t* a8Start = src.fImage;
+ auto start = SkMask::AlphaIter<SkMask::kA8_Format>(a8Start);
+ auto end = SkMask::AlphaIter<SkMask::kA8_Format>(a8Start + srcW);
+ for (int y = 0; y < srcH; ++y, start >>= src.fRowBytes, end >>= src.fRowBytes) {
+ auto tmpStart = &tmp[y];
+ scanW.blur(start, end, tmpStart, tmpW, tmpStart + tmpW * tmpH);
+ }
+ } break;
+ case SkMask::kARGB32_Format: {
+ const uint32_t* argbStart = reinterpret_cast<const uint32_t*>(src.fImage);
+ auto start = SkMask::AlphaIter<SkMask::kARGB32_Format>(argbStart);
+ auto end = SkMask::AlphaIter<SkMask::kARGB32_Format>(argbStart + srcW);
+ for (int y = 0; y < srcH; ++y, start >>= src.fRowBytes, end >>= src.fRowBytes) {
+ auto tmpStart = &tmp[y];
+ scanW.blur(start, end, tmpStart, tmpW, tmpStart + tmpW * tmpH);
+ }
+ } break;
+ case SkMask::kLCD16_Format: {
+ const uint16_t* lcdStart = reinterpret_cast<const uint16_t*>(src.fImage);
+ auto start = SkMask::AlphaIter<SkMask::kLCD16_Format>(lcdStart);
+ auto end = SkMask::AlphaIter<SkMask::kLCD16_Format>(lcdStart + srcW);
+ for (int y = 0; y < srcH; ++y, start >>= src.fRowBytes, end >>= src.fRowBytes) {
+ auto tmpStart = &tmp[y];
+ scanW.blur(start, end, tmpStart, tmpW, tmpStart + tmpW * tmpH);
+ }
+ } break;
+ default:
+ SK_ABORT("Unhandled format.");
+ }
+
+ // Blur vertically (scan in memory order because of the transposition),
+ // and transpose back to the original orientation.
+ const PlanGauss::Scan& scanH = planH.makeBlurScan(tmpW, buffer);
+ for (int y = 0; y < tmpH; y++) {
+ auto tmpStart = &tmp[y * tmpW];
+ auto dstStart = &dst->fImage[y];
+
+ scanH.blur(tmpStart, tmpStart + tmpW,
+ dstStart, dst->fRowBytes, dstStart + dst->fRowBytes * dstH);
+ }
+
+ return {SkTo<int32_t>(borderW), SkTo<int32_t>(borderH)};
+}
diff --git a/gfx/skia/skia/src/core/SkMaskBlurFilter.h b/gfx/skia/skia/src/core/SkMaskBlurFilter.h
new file mode 100644
index 0000000000..fe10cf4abb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMaskBlurFilter.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMaskBlurFilter_DEFINED
+#define SkMaskBlurFilter_DEFINED
+
+#include <algorithm>
+#include <memory>
+#include <tuple>
+
+#include "include/core/SkTypes.h"
+#include "src/core/SkMask.h"
+
+// Implement a single channel Gaussian blur. The specifics for implementation are taken from:
+// https://drafts.fxtf.org/filters/#feGaussianBlurElement
+class SkMaskBlurFilter {
+public:
+ // Create an object suitable for filtering an SkMask using a filter with width sigmaW and
+ // height sigmaH.
+ SkMaskBlurFilter(double sigmaW, double sigmaH);
+
+ // returns true iff the sigmas will result in an identity mask (no blurring)
+ bool hasNoBlur() const;
+
+ // Given a src SkMask, generate dst SkMask returning the border width and height.
+ SkIPoint blur(const SkMask& src, SkMask* dst) const;
+
+private:
+ const double fSigmaW;
+ const double fSigmaH;
+};
+
+#endif // SkBlurMaskFilter_DEFINED
diff --git a/gfx/skia/skia/src/core/SkMaskCache.cpp b/gfx/skia/skia/src/core/SkMaskCache.cpp
new file mode 100644
index 0000000000..f08f4d7ee0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMaskCache.cpp
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkMaskCache.h"
+
+#define CHECK_LOCAL(localCache, localName, globalName, ...) \
+ ((localCache) ? localCache->localName(__VA_ARGS__) : SkResourceCache::globalName(__VA_ARGS__))
+
+struct MaskValue {
+ SkMask fMask;
+ SkCachedData* fData;
+};
+
+namespace {
+static unsigned gRRectBlurKeyNamespaceLabel;
+
+struct RRectBlurKey : public SkResourceCache::Key {
+public:
+ RRectBlurKey(SkScalar sigma, const SkRRect& rrect, SkBlurStyle style)
+ : fSigma(sigma)
+ , fStyle(style)
+ , fRRect(rrect)
+ {
+ this->init(&gRRectBlurKeyNamespaceLabel, 0,
+ sizeof(fSigma) + sizeof(fStyle) + sizeof(fRRect));
+ }
+
+ SkScalar fSigma;
+ int32_t fStyle;
+ SkRRect fRRect;
+};
+
+struct RRectBlurRec : public SkResourceCache::Rec {
+ RRectBlurRec(RRectBlurKey key, const SkMask& mask, SkCachedData* data)
+ : fKey(key)
+ {
+ fValue.fMask = mask;
+ fValue.fData = data;
+ fValue.fData->attachToCacheAndRef();
+ }
+ ~RRectBlurRec() override {
+ fValue.fData->detachFromCacheAndUnref();
+ }
+
+ RRectBlurKey fKey;
+ MaskValue fValue;
+
+ const Key& getKey() const override { return fKey; }
+ size_t bytesUsed() const override { return sizeof(*this) + fValue.fData->size(); }
+ const char* getCategory() const override { return "rrect-blur"; }
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override {
+ return fValue.fData->diagnostic_only_getDiscardable();
+ }
+
+ static bool Visitor(const SkResourceCache::Rec& baseRec, void* contextData) {
+ const RRectBlurRec& rec = static_cast<const RRectBlurRec&>(baseRec);
+ MaskValue* result = (MaskValue*)contextData;
+
+ SkCachedData* tmpData = rec.fValue.fData;
+ tmpData->ref();
+ if (nullptr == tmpData->data()) {
+ tmpData->unref();
+ return false;
+ }
+ *result = rec.fValue;
+ return true;
+ }
+};
+} // namespace
+
+SkCachedData* SkMaskCache::FindAndRef(SkScalar sigma, SkBlurStyle style,
+ const SkRRect& rrect, SkMask* mask, SkResourceCache* localCache) {
+ MaskValue result;
+ RRectBlurKey key(sigma, rrect, style);
+ if (!CHECK_LOCAL(localCache, find, Find, key, RRectBlurRec::Visitor, &result)) {
+ return nullptr;
+ }
+
+ *mask = result.fMask;
+ mask->fImage = (uint8_t*)(result.fData->data());
+ return result.fData;
+}
+
+void SkMaskCache::Add(SkScalar sigma, SkBlurStyle style,
+ const SkRRect& rrect, const SkMask& mask, SkCachedData* data,
+ SkResourceCache* localCache) {
+ RRectBlurKey key(sigma, rrect, style);
+ return CHECK_LOCAL(localCache, add, Add, new RRectBlurRec(key, mask, data));
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+static unsigned gRectsBlurKeyNamespaceLabel;
+
+struct RectsBlurKey : public SkResourceCache::Key {
+public:
+ RectsBlurKey(SkScalar sigma, SkBlurStyle style, const SkRect rects[], int count)
+ : fSigma(sigma)
+ , fStyle(style)
+ {
+ SkASSERT(1 == count || 2 == count);
+ SkIRect ir;
+ rects[0].roundOut(&ir);
+ fSizes[0] = SkSize{rects[0].width(), rects[0].height()};
+ if (2 == count) {
+ fSizes[1] = SkSize{rects[1].width(), rects[1].height()};
+ fSizes[2] = SkSize{rects[0].x() - rects[1].x(), rects[0].y() - rects[1].y()};
+ } else {
+ fSizes[1] = SkSize{0, 0};
+ fSizes[2] = SkSize{0, 0};
+ }
+ fSizes[3] = SkSize{rects[0].x() - ir.x(), rects[0].y() - ir.y()};
+
+ this->init(&gRectsBlurKeyNamespaceLabel, 0,
+ sizeof(fSigma) + sizeof(fStyle) + sizeof(fSizes));
+ }
+
+ SkScalar fSigma;
+ int32_t fStyle;
+ SkSize fSizes[4];
+};
+
+struct RectsBlurRec : public SkResourceCache::Rec {
+ RectsBlurRec(RectsBlurKey key, const SkMask& mask, SkCachedData* data)
+ : fKey(key)
+ {
+ fValue.fMask = mask;
+ fValue.fData = data;
+ fValue.fData->attachToCacheAndRef();
+ }
+ ~RectsBlurRec() override {
+ fValue.fData->detachFromCacheAndUnref();
+ }
+
+ RectsBlurKey fKey;
+ MaskValue fValue;
+
+ const Key& getKey() const override { return fKey; }
+ size_t bytesUsed() const override { return sizeof(*this) + fValue.fData->size(); }
+ const char* getCategory() const override { return "rects-blur"; }
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override {
+ return fValue.fData->diagnostic_only_getDiscardable();
+ }
+
+ static bool Visitor(const SkResourceCache::Rec& baseRec, void* contextData) {
+ const RectsBlurRec& rec = static_cast<const RectsBlurRec&>(baseRec);
+ MaskValue* result = static_cast<MaskValue*>(contextData);
+
+ SkCachedData* tmpData = rec.fValue.fData;
+ tmpData->ref();
+ if (nullptr == tmpData->data()) {
+ tmpData->unref();
+ return false;
+ }
+ *result = rec.fValue;
+ return true;
+ }
+};
+} // namespace
+
+SkCachedData* SkMaskCache::FindAndRef(SkScalar sigma, SkBlurStyle style,
+ const SkRect rects[], int count, SkMask* mask,
+ SkResourceCache* localCache) {
+ MaskValue result;
+ RectsBlurKey key(sigma, style, rects, count);
+ if (!CHECK_LOCAL(localCache, find, Find, key, RectsBlurRec::Visitor, &result)) {
+ return nullptr;
+ }
+
+ *mask = result.fMask;
+ mask->fImage = (uint8_t*)(result.fData->data());
+ return result.fData;
+}
+
+void SkMaskCache::Add(SkScalar sigma, SkBlurStyle style,
+ const SkRect rects[], int count, const SkMask& mask, SkCachedData* data,
+ SkResourceCache* localCache) {
+ RectsBlurKey key(sigma, style, rects, count);
+ return CHECK_LOCAL(localCache, add, Add, new RectsBlurRec(key, mask, data));
+}
diff --git a/gfx/skia/skia/src/core/SkMaskCache.h b/gfx/skia/skia/src/core/SkMaskCache.h
new file mode 100644
index 0000000000..d22a5d1be0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMaskCache.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMaskCache_DEFINED
+#define SkMaskCache_DEFINED
+
+#include "include/core/SkBlurTypes.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkRect.h"
+#include "src/core/SkCachedData.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkResourceCache.h"
+
+class SkMaskCache {
+public:
+ /**
+ * On success, return a ref to the SkCachedData that holds the pixels, and have mask
+ * already point to that memory.
+ *
+ * On failure, return nullptr.
+ */
+ static SkCachedData* FindAndRef(SkScalar sigma, SkBlurStyle style,
+ const SkRRect& rrect, SkMask* mask,
+ SkResourceCache* localCache = nullptr);
+ static SkCachedData* FindAndRef(SkScalar sigma, SkBlurStyle style,
+ const SkRect rects[], int count, SkMask* mask,
+ SkResourceCache* localCache = nullptr);
+
+ /**
+ * Add a mask and its pixel-data to the cache.
+ */
+ static void Add(SkScalar sigma, SkBlurStyle style,
+ const SkRRect& rrect, const SkMask& mask, SkCachedData* data,
+ SkResourceCache* localCache = nullptr);
+ static void Add(SkScalar sigma, SkBlurStyle style,
+ const SkRect rects[], int count, const SkMask& mask, SkCachedData* data,
+ SkResourceCache* localCache = nullptr);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMaskFilter.cpp b/gfx/skia/skia/src/core/SkMaskFilter.cpp
new file mode 100644
index 0000000000..9cae59f5b6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMaskFilter.cpp
@@ -0,0 +1,414 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkMaskFilter.h"
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkRegion.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/base/SkAutoMalloc.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkCachedData.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkRasterClip.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+
+#if defined(SK_GANESH)
+#include "include/private/base/SkTo.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/GrSurfaceProxyView.h"
+
+class GrClip;
+class GrRecordingContext;
+class GrStyledShape;
+enum class GrColorType;
+struct GrFPArgs;
+namespace skgpu {
+namespace ganesh {
+class SurfaceDrawContext;
+}
+} // namespace skgpu
+#endif
+#if defined(SK_GANESH) || defined(SK_GRAPHITE)
+#include "src/text/gpu/SDFMaskFilter.h"
+#endif
+
+class SkRRect;
+enum SkAlphaType : int;
+struct SkDeserialProcs;
+
+SkMaskFilterBase::NinePatch::~NinePatch() {
+ if (fCache) {
+ SkASSERT((const void*)fMask.fImage == fCache->data());
+ fCache->unref();
+ } else {
+ SkMask::FreeImage(fMask.fImage);
+ }
+}
+
+bool SkMaskFilterBase::asABlur(BlurRec*) const {
+ return false;
+}
+
+static void extractMaskSubset(const SkMask& src, SkMask* dst) {
+ SkASSERT(src.fBounds.contains(dst->fBounds));
+
+ const int dx = dst->fBounds.left() - src.fBounds.left();
+ const int dy = dst->fBounds.top() - src.fBounds.top();
+ dst->fImage = src.fImage + dy * src.fRowBytes + dx;
+ dst->fRowBytes = src.fRowBytes;
+ dst->fFormat = src.fFormat;
+}
+
+static void blitClippedMask(SkBlitter* blitter, const SkMask& mask,
+ const SkIRect& bounds, const SkIRect& clipR) {
+ SkIRect r;
+ if (r.intersect(bounds, clipR)) {
+ blitter->blitMask(mask, r);
+ }
+}
+
+static void blitClippedRect(SkBlitter* blitter, const SkIRect& rect, const SkIRect& clipR) {
+ SkIRect r;
+ if (r.intersect(rect, clipR)) {
+ blitter->blitRect(r.left(), r.top(), r.width(), r.height());
+ }
+}
+
+#if 0
+static void dump(const SkMask& mask) {
+ for (int y = mask.fBounds.top(); y < mask.fBounds.bottom(); ++y) {
+ for (int x = mask.fBounds.left(); x < mask.fBounds.right(); ++x) {
+ SkDebugf("%02X", *mask.getAddr8(x, y));
+ }
+ SkDebugf("\n");
+ }
+ SkDebugf("\n");
+}
+#endif
+
+static void draw_nine_clipped(const SkMask& mask, const SkIRect& outerR,
+ const SkIPoint& center, bool fillCenter,
+ const SkIRect& clipR, SkBlitter* blitter) {
+ int cx = center.x();
+ int cy = center.y();
+ SkMask m;
+
+ // top-left
+ m.fBounds = mask.fBounds;
+ m.fBounds.fRight = cx;
+ m.fBounds.fBottom = cy;
+ if (m.fBounds.width() > 0 && m.fBounds.height() > 0) {
+ extractMaskSubset(mask, &m);
+ m.fBounds.offsetTo(outerR.left(), outerR.top());
+ blitClippedMask(blitter, m, m.fBounds, clipR);
+ }
+
+ // top-right
+ m.fBounds = mask.fBounds;
+ m.fBounds.fLeft = cx + 1;
+ m.fBounds.fBottom = cy;
+ if (m.fBounds.width() > 0 && m.fBounds.height() > 0) {
+ extractMaskSubset(mask, &m);
+ m.fBounds.offsetTo(outerR.right() - m.fBounds.width(), outerR.top());
+ blitClippedMask(blitter, m, m.fBounds, clipR);
+ }
+
+ // bottom-left
+ m.fBounds = mask.fBounds;
+ m.fBounds.fRight = cx;
+ m.fBounds.fTop = cy + 1;
+ if (m.fBounds.width() > 0 && m.fBounds.height() > 0) {
+ extractMaskSubset(mask, &m);
+ m.fBounds.offsetTo(outerR.left(), outerR.bottom() - m.fBounds.height());
+ blitClippedMask(blitter, m, m.fBounds, clipR);
+ }
+
+ // bottom-right
+ m.fBounds = mask.fBounds;
+ m.fBounds.fLeft = cx + 1;
+ m.fBounds.fTop = cy + 1;
+ if (m.fBounds.width() > 0 && m.fBounds.height() > 0) {
+ extractMaskSubset(mask, &m);
+ m.fBounds.offsetTo(outerR.right() - m.fBounds.width(),
+ outerR.bottom() - m.fBounds.height());
+ blitClippedMask(blitter, m, m.fBounds, clipR);
+ }
+
+ SkIRect innerR;
+ innerR.setLTRB(outerR.left() + cx - mask.fBounds.left(),
+ outerR.top() + cy - mask.fBounds.top(),
+ outerR.right() + (cx + 1 - mask.fBounds.right()),
+ outerR.bottom() + (cy + 1 - mask.fBounds.bottom()));
+ if (fillCenter) {
+ blitClippedRect(blitter, innerR, clipR);
+ }
+
+ const int innerW = innerR.width();
+ size_t storageSize = (innerW + 1) * (sizeof(int16_t) + sizeof(uint8_t));
+ SkAutoSMalloc<4*1024> storage(storageSize);
+ int16_t* runs = (int16_t*)storage.get();
+ uint8_t* alpha = (uint8_t*)(runs + innerW + 1);
+
+ SkIRect r;
+ // top
+ r.setLTRB(innerR.left(), outerR.top(), innerR.right(), innerR.top());
+ if (r.intersect(clipR)) {
+ int startY = std::max(0, r.top() - outerR.top());
+ int stopY = startY + r.height();
+ int width = r.width();
+ for (int y = startY; y < stopY; ++y) {
+ runs[0] = width;
+ runs[width] = 0;
+ alpha[0] = *mask.getAddr8(cx, mask.fBounds.top() + y);
+ blitter->blitAntiH(r.left(), outerR.top() + y, alpha, runs);
+ }
+ }
+ // bottom
+ r.setLTRB(innerR.left(), innerR.bottom(), innerR.right(), outerR.bottom());
+ if (r.intersect(clipR)) {
+ int startY = outerR.bottom() - r.bottom();
+ int stopY = startY + r.height();
+ int width = r.width();
+ for (int y = startY; y < stopY; ++y) {
+ runs[0] = width;
+ runs[width] = 0;
+ alpha[0] = *mask.getAddr8(cx, mask.fBounds.bottom() - y - 1);
+ blitter->blitAntiH(r.left(), outerR.bottom() - y - 1, alpha, runs);
+ }
+ }
+ // left
+ r.setLTRB(outerR.left(), innerR.top(), innerR.left(), innerR.bottom());
+ if (r.intersect(clipR)) {
+ SkMask leftMask;
+ leftMask.fImage = mask.getAddr8(mask.fBounds.left() + r.left() - outerR.left(),
+ mask.fBounds.top() + cy);
+ leftMask.fBounds = r;
+ leftMask.fRowBytes = 0; // so we repeat the scanline for our height
+ leftMask.fFormat = SkMask::kA8_Format;
+ blitter->blitMask(leftMask, r);
+ }
+ // right
+ r.setLTRB(innerR.right(), innerR.top(), outerR.right(), innerR.bottom());
+ if (r.intersect(clipR)) {
+ SkMask rightMask;
+ rightMask.fImage = mask.getAddr8(mask.fBounds.right() - outerR.right() + r.left(),
+ mask.fBounds.top() + cy);
+ rightMask.fBounds = r;
+ rightMask.fRowBytes = 0; // so we repeat the scanline for our height
+ rightMask.fFormat = SkMask::kA8_Format;
+ blitter->blitMask(rightMask, r);
+ }
+}
+
+static void draw_nine(const SkMask& mask, const SkIRect& outerR, const SkIPoint& center,
+ bool fillCenter, const SkRasterClip& clip, SkBlitter* blitter) {
+ // if we get here, we need to (possibly) resolve the clip and blitter
+ SkAAClipBlitterWrapper wrapper(clip, blitter);
+ blitter = wrapper.getBlitter();
+
+ SkRegion::Cliperator clipper(wrapper.getRgn(), outerR);
+
+ if (!clipper.done()) {
+ const SkIRect& cr = clipper.rect();
+ do {
+ draw_nine_clipped(mask, outerR, center, fillCenter, cr, blitter);
+ clipper.next();
+ } while (!clipper.done());
+ }
+}
+
+static int countNestedRects(const SkPath& path, SkRect rects[2]) {
+ if (SkPathPriv::IsNestedFillRects(path, rects)) {
+ return 2;
+ }
+ return path.isRect(&rects[0]);
+}
+
+bool SkMaskFilterBase::filterRRect(const SkRRect& devRRect, const SkMatrix& matrix,
+ const SkRasterClip& clip, SkBlitter* blitter) const {
+ // Attempt to speed up drawing by creating a nine patch. If a nine patch
+ // cannot be used, return false to allow our caller to recover and perform
+ // the drawing another way.
+ NinePatch patch;
+ patch.fMask.fImage = nullptr;
+ if (kTrue_FilterReturn != this->filterRRectToNine(devRRect, matrix,
+ clip.getBounds(),
+ &patch)) {
+ SkASSERT(nullptr == patch.fMask.fImage);
+ return false;
+ }
+ draw_nine(patch.fMask, patch.fOuterRect, patch.fCenter, true, clip, blitter);
+ return true;
+}
+
+bool SkMaskFilterBase::filterPath(const SkPath& devPath, const SkMatrix& matrix,
+ const SkRasterClip& clip, SkBlitter* blitter,
+ SkStrokeRec::InitStyle style) const {
+ SkRect rects[2];
+ int rectCount = 0;
+ if (SkStrokeRec::kFill_InitStyle == style) {
+ rectCount = countNestedRects(devPath, rects);
+ }
+ if (rectCount > 0) {
+ NinePatch patch;
+
+ switch (this->filterRectsToNine(rects, rectCount, matrix, clip.getBounds(), &patch)) {
+ case kFalse_FilterReturn:
+ SkASSERT(nullptr == patch.fMask.fImage);
+ return false;
+
+ case kTrue_FilterReturn:
+ draw_nine(patch.fMask, patch.fOuterRect, patch.fCenter, 1 == rectCount, clip,
+ blitter);
+ return true;
+
+ case kUnimplemented_FilterReturn:
+ SkASSERT(nullptr == patch.fMask.fImage);
+ // fall out
+ break;
+ }
+ }
+
+ SkMask srcM, dstM;
+
+#if defined(SK_BUILD_FOR_FUZZER)
+ if (devPath.countVerbs() > 1000 || devPath.countPoints() > 1000) {
+ return false;
+ }
+#endif
+ if (!SkDraw::DrawToMask(devPath, clip.getBounds(), this, &matrix, &srcM,
+ SkMask::kComputeBoundsAndRenderImage_CreateMode,
+ style)) {
+ return false;
+ }
+ SkAutoMaskFreeImage autoSrc(srcM.fImage);
+
+ if (!this->filterMask(&dstM, srcM, matrix, nullptr)) {
+ return false;
+ }
+ SkAutoMaskFreeImage autoDst(dstM.fImage);
+
+ // if we get here, we need to (possibly) resolve the clip and blitter
+ SkAAClipBlitterWrapper wrapper(clip, blitter);
+ blitter = wrapper.getBlitter();
+
+ SkRegion::Cliperator clipper(wrapper.getRgn(), dstM.fBounds);
+
+ if (!clipper.done()) {
+ const SkIRect& cr = clipper.rect();
+ do {
+ blitter->blitMask(dstM, cr);
+ clipper.next();
+ } while (!clipper.done());
+ }
+
+ return true;
+}
+
+SkMaskFilterBase::FilterReturn
+SkMaskFilterBase::filterRRectToNine(const SkRRect&, const SkMatrix&,
+ const SkIRect& clipBounds, NinePatch*) const {
+ return kUnimplemented_FilterReturn;
+}
+
+SkMaskFilterBase::FilterReturn
+SkMaskFilterBase::filterRectsToNine(const SkRect[], int count, const SkMatrix&,
+ const SkIRect& clipBounds, NinePatch*) const {
+ return kUnimplemented_FilterReturn;
+}
+
+#if defined(SK_GANESH)
+std::unique_ptr<GrFragmentProcessor>
+SkMaskFilterBase::asFragmentProcessor(const GrFPArgs& args, const SkMatrix& ctm) const {
+ auto fp = this->onAsFragmentProcessor(args, MatrixRec(ctm));
+ SkASSERT(SkToBool(fp) == this->hasFragmentProcessor());
+ return fp;
+}
+bool SkMaskFilterBase::hasFragmentProcessor() const {
+ return this->onHasFragmentProcessor();
+}
+
+std::unique_ptr<GrFragmentProcessor>
+SkMaskFilterBase::onAsFragmentProcessor(const GrFPArgs&, const MatrixRec&) const {
+ return nullptr;
+}
+bool SkMaskFilterBase::onHasFragmentProcessor() const { return false; }
+
+bool SkMaskFilterBase::canFilterMaskGPU(const GrStyledShape& shape,
+ const SkIRect& devSpaceShapeBounds,
+ const SkIRect& clipBounds,
+ const SkMatrix& ctm,
+ SkIRect* maskRect) const {
+ return false;
+}
+
+bool SkMaskFilterBase::directFilterMaskGPU(GrRecordingContext*,
+ skgpu::ganesh::SurfaceDrawContext*,
+ GrPaint&&,
+ const GrClip*,
+ const SkMatrix& viewMatrix,
+ const GrStyledShape&) const {
+ return false;
+}
+
+GrSurfaceProxyView SkMaskFilterBase::filterMaskGPU(GrRecordingContext*,
+ GrSurfaceProxyView view,
+ GrColorType srcColorType,
+ SkAlphaType srcAlphaType,
+ const SkMatrix& ctm,
+ const SkIRect& maskRect) const {
+ return {};
+}
+#endif
+
+void SkMaskFilterBase::computeFastBounds(const SkRect& src, SkRect* dst) const {
+ SkMask srcM, dstM;
+
+ srcM.fBounds = src.roundOut();
+ srcM.fRowBytes = 0;
+ srcM.fFormat = SkMask::kA8_Format;
+
+ SkIPoint margin; // ignored
+ if (this->filterMask(&dstM, srcM, SkMatrix::I(), &margin)) {
+ dst->set(dstM.fBounds);
+ } else {
+ dst->set(srcM.fBounds);
+ }
+}
+
+SkRect SkMaskFilter::approximateFilteredBounds(const SkRect& src) const {
+ SkRect dst;
+ as_MFB(this)->computeFastBounds(src, &dst);
+ return dst;
+}
+
+void SkMaskFilter::RegisterFlattenables() {
+ sk_register_blur_maskfilter_createproc();
+#if (defined(SK_GANESH) || defined(SK_GRAPHITE)) && !defined(SK_DISABLE_SDF_TEXT)
+ sktext::gpu::register_sdf_maskfilter_createproc();
+#endif
+}
+
+sk_sp<SkMaskFilter> SkMaskFilter::Deserialize(const void* data, size_t size,
+ const SkDeserialProcs* procs) {
+ return sk_sp<SkMaskFilter>(static_cast<SkMaskFilter*>(
+ SkFlattenable::Deserialize(
+ kSkMaskFilter_Type, data, size, procs).release()));
+}
diff --git a/gfx/skia/skia/src/core/SkMaskFilterBase.h b/gfx/skia/skia/src/core/SkMaskFilterBase.h
new file mode 100644
index 0000000000..b48e377617
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMaskFilterBase.h
@@ -0,0 +1,266 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMaskFilterBase_DEFINED
+#define SkMaskFilterBase_DEFINED
+
+#include "include/core/SkBlurTypes.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/private/base/SkNoncopyable.h"
+#include "src/core/SkMask.h"
+
+#if defined(SK_GANESH)
+#include "include/private/gpu/ganesh/GrTypesPriv.h"
+#include "src/shaders/SkShaderBase.h"
+#endif
+
+class GrClip;
+struct GrFPArgs;
+class GrFragmentProcessor;
+class GrPaint;
+class GrRecordingContext;
+class GrRenderTarget;
+namespace skgpu {
+namespace ganesh {
+class SurfaceDrawContext;
+}
+} // namespace skgpu
+class GrResourceProvider;
+class GrStyledShape;
+class GrSurfaceProxyView;
+class GrTexture;
+class GrTextureProxy;
+
+class SkBitmap;
+class SkBlitter;
+class SkCachedData;
+class SkMatrix;
+class SkPath;
+class SkRasterClip;
+class SkRRect;
+
+class SkMaskFilterBase : public SkMaskFilter {
+public:
+ /** Returns the format of the resulting mask that this subclass will return
+ when its filterMask() method is called.
+ */
+ virtual SkMask::Format getFormat() const = 0;
+
+ /** Create a new mask by filter the src mask.
+ If src.fImage == null, then do not allocate or create the dst image
+ but do fill out the other fields in dstMask.
+ If you do allocate a dst image, use SkMask::AllocImage()
+ If this returns false, dst mask is ignored.
+ @param dst the result of the filter. If src.fImage == null, dst should not allocate its image
+ @param src the original image to be filtered.
+ @param matrix the CTM
+ @param margin if not null, return the buffer dx/dy need when calculating the effect. Used when
+ drawing a clipped object to know how much larger to allocate the src before
+ applying the filter. If returning false, ignore this parameter.
+ @return true if the dst mask was correctly created.
+ */
+ virtual bool filterMask(SkMask* dst, const SkMask& src, const SkMatrix&,
+ SkIPoint* margin) const = 0;
+
+#if defined(SK_GANESH)
+ /**
+ * Returns a processor if the filter can be expressed a single-pass GrProcessor without
+ * requiring an explicit input mask. Per-pixel, the effect receives the incoming mask's
+ * coverage as the input color and outputs the filtered covereage value. This means that each
+ * pixel's filtered coverage must only depend on the unfiltered mask value for that pixel and
+ * not on surrounding values.
+ */
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs& args,
+ const SkMatrix& ctm) const;
+
+ /**
+ * Returns true iff asFragmentProcessor() will return a processor
+ */
+ bool hasFragmentProcessor() const;
+
+ /**
+ * If asFragmentProcessor() fails the filter may be implemented on the GPU by a subclass
+ * overriding filterMaskGPU (declared below). That code path requires constructing a
+ * src mask as input. Since that is a potentially expensive operation, the subclass must also
+ * override this function to indicate whether filterTextureMaskGPU would succeeed if the mask
+ * were to be created.
+ *
+ * 'maskRect' returns the device space portion of the mask that the filter needs. The mask
+ * passed into 'filterMaskGPU' should have the same extent as 'maskRect' but be
+ * translated to the upper-left corner of the mask (i.e., (maskRect.fLeft, maskRect.fTop)
+ * appears at (0, 0) in the mask).
+ *
+ * Logically, how this works is:
+ * canFilterMaskGPU is called
+ * if (it returns true)
+ * the returned mask rect is used for quick rejecting
+ * the mask rect is used to generate the mask
+ * filterMaskGPU is called to filter the mask
+ *
+ * TODO: this should work as:
+ * if (canFilterMaskGPU(devShape, ...)) // rect, rrect, drrect, path
+ * filterMaskGPU(devShape, ...)
+ * this would hide the RRect special case and the mask generation
+ */
+ virtual bool canFilterMaskGPU(const GrStyledShape&,
+ const SkIRect& devSpaceShapeBounds,
+ const SkIRect& clipBounds,
+ const SkMatrix& ctm,
+ SkIRect* maskRect) const;
+
+ /**
+ * Try to directly render the mask filter into the target. Returns true if drawing was
+ * successful. If false is returned then paint is unmodified.
+ */
+ virtual bool directFilterMaskGPU(GrRecordingContext*,
+ skgpu::ganesh::SurfaceDrawContext*,
+ GrPaint&& paint,
+ const GrClip*,
+ const SkMatrix& viewMatrix,
+ const GrStyledShape& shape) const;
+
+ /**
+ * This function is used to implement filters that require an explicit src mask. It should only
+ * be called if canFilterMaskGPU returned true and the maskRect param should be the output from
+ * that call.
+ * Implementations are free to get the GrContext from the src texture in order to create
+ * additional textures and perform multiple passes.
+ */
+ virtual GrSurfaceProxyView filterMaskGPU(GrRecordingContext*,
+ GrSurfaceProxyView srcView,
+ GrColorType srcColorType,
+ SkAlphaType srcAlphaType,
+ const SkMatrix& ctm,
+ const SkIRect& maskRect) const;
+#endif
+
+ /**
+ * The fast bounds function is used to enable the paint to be culled early
+ * in the drawing pipeline. This function accepts the current bounds of the
+ * paint as its src param and the filter adjust those bounds using its
+ * current mask and returns the result using the dest param. Callers are
+ * allowed to provide the same struct for both src and dest so each
+ * implementation must accommodate that behavior.
+ *
+ * The default impl calls filterMask with the src mask having no image,
+ * but subclasses may override this if they can compute the rect faster.
+ */
+ virtual void computeFastBounds(const SkRect& src, SkRect* dest) const;
+
+ struct BlurRec {
+ SkScalar fSigma;
+ SkBlurStyle fStyle;
+ };
+ /**
+ * If this filter can be represented by a BlurRec, return true and (if not null) fill in the
+ * provided BlurRec parameter. If this effect cannot be represented as a BlurRec, return false
+ * and ignore the BlurRec parameter.
+ */
+ virtual bool asABlur(BlurRec*) const;
+
+ static SkFlattenable::Type GetFlattenableType() {
+ return kSkMaskFilter_Type;
+ }
+
+ SkFlattenable::Type getFlattenableType() const override {
+ return kSkMaskFilter_Type;
+ }
+
+protected:
+ SkMaskFilterBase() {}
+
+#if defined(SK_GANESH)
+ using MatrixRec = SkShaderBase::MatrixRec;
+ virtual std::unique_ptr<GrFragmentProcessor> onAsFragmentProcessor(const GrFPArgs&,
+ const MatrixRec&) const;
+ virtual bool onHasFragmentProcessor() const;
+#endif
+
+ enum FilterReturn {
+ kFalse_FilterReturn,
+ kTrue_FilterReturn,
+ kUnimplemented_FilterReturn
+ };
+
+ class NinePatch : ::SkNoncopyable {
+ public:
+ NinePatch() : fCache(nullptr) { }
+ ~NinePatch();
+
+ SkMask fMask; // fBounds must have [0,0] in its top-left
+ SkIRect fOuterRect; // width/height must be >= fMask.fBounds'
+ SkIPoint fCenter; // identifies center row/col for stretching
+ SkCachedData* fCache;
+ };
+
+ /**
+ * Override if your subclass can filter a rect, and return the answer as
+ * a ninepatch mask to be stretched over the returned outerRect. On success
+ * return kTrue_FilterReturn. On failure (e.g. out of memory) return
+ * kFalse_FilterReturn. If the normal filterMask() entry-point should be
+ * called (the default) return kUnimplemented_FilterReturn.
+ *
+ * By convention, the caller will take the center rol/col from the returned
+ * mask as the slice it can replicate horizontally and vertically as we
+ * stretch the mask to fit inside outerRect. It is an error for outerRect
+ * to be smaller than the mask's bounds. This would imply that the width
+ * and height of the mask should be odd. This is not required, just that
+ * the caller will call mask.fBounds.centerX() and centerY() to find the
+ * strips that will be replicated.
+ */
+ virtual FilterReturn filterRectsToNine(const SkRect[], int count,
+ const SkMatrix&,
+ const SkIRect& clipBounds,
+ NinePatch*) const;
+ /**
+ * Similar to filterRectsToNine, except it performs the work on a round rect.
+ */
+ virtual FilterReturn filterRRectToNine(const SkRRect&, const SkMatrix&,
+ const SkIRect& clipBounds,
+ NinePatch*) const;
+
+private:
+ friend class SkDraw;
+ friend class SkDrawBase;
+
+ /** Helper method that, given a path in device space, will rasterize it into a kA8_Format mask
+ and then call filterMask(). If this returns true, the specified blitter will be called
+ to render that mask. Returns false if filterMask() returned false.
+ This method is not exported to java.
+ */
+ bool filterPath(const SkPath& devPath, const SkMatrix& ctm, const SkRasterClip&, SkBlitter*,
+ SkStrokeRec::InitStyle) const;
+
+ /** Helper method that, given a roundRect in device space, will rasterize it into a kA8_Format
+ mask and then call filterMask(). If this returns true, the specified blitter will be called
+ to render that mask. Returns false if filterMask() returned false.
+ */
+ bool filterRRect(const SkRRect& devRRect, const SkMatrix& ctm, const SkRasterClip&,
+ SkBlitter*) const;
+
+ using INHERITED = SkFlattenable;
+};
+
+inline SkMaskFilterBase* as_MFB(SkMaskFilter* mf) {
+ return static_cast<SkMaskFilterBase*>(mf);
+}
+
+inline const SkMaskFilterBase* as_MFB(const SkMaskFilter* mf) {
+ return static_cast<const SkMaskFilterBase*>(mf);
+}
+
+inline const SkMaskFilterBase* as_MFB(const sk_sp<SkMaskFilter>& mf) {
+ return static_cast<SkMaskFilterBase*>(mf.get());
+}
+
+// For RegisterFlattenables access to the blur mask filter implementation
+extern void sk_register_blur_maskfilter_createproc();
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMaskGamma.cpp b/gfx/skia/skia/src/core/SkMaskGamma.cpp
new file mode 100644
index 0000000000..5c82c5e7a8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMaskGamma.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkMaskGamma.h"
+
+#include "include/core/SkColor.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkTo.h"
+
+class SkLinearColorSpaceLuminance : public SkColorSpaceLuminance {
+ SkScalar toLuma(SkScalar SkDEBUGCODE(gamma), SkScalar luminance) const override {
+ SkASSERT(SK_Scalar1 == gamma);
+ return luminance;
+ }
+ SkScalar fromLuma(SkScalar SkDEBUGCODE(gamma), SkScalar luma) const override {
+ SkASSERT(SK_Scalar1 == gamma);
+ return luma;
+ }
+};
+
+class SkGammaColorSpaceLuminance : public SkColorSpaceLuminance {
+ SkScalar toLuma(SkScalar gamma, SkScalar luminance) const override {
+ return SkScalarPow(luminance, gamma);
+ }
+ SkScalar fromLuma(SkScalar gamma, SkScalar luma) const override {
+ return SkScalarPow(luma, SkScalarInvert(gamma));
+ }
+};
+
+class SkSRGBColorSpaceLuminance : public SkColorSpaceLuminance {
+ SkScalar toLuma(SkScalar SkDEBUGCODE(gamma), SkScalar luminance) const override {
+ SkASSERT(0 == gamma);
+ //The magic numbers are derived from the sRGB specification.
+ //See http://www.color.org/chardata/rgb/srgb.xalter .
+ if (luminance <= 0.04045f) {
+ return luminance / 12.92f;
+ }
+ return SkScalarPow((luminance + 0.055f) / 1.055f,
+ 2.4f);
+ }
+ SkScalar fromLuma(SkScalar SkDEBUGCODE(gamma), SkScalar luma) const override {
+ SkASSERT(0 == gamma);
+ //The magic numbers are derived from the sRGB specification.
+ //See http://www.color.org/chardata/rgb/srgb.xalter .
+ if (luma <= 0.0031308f) {
+ return luma * 12.92f;
+ }
+ return 1.055f * SkScalarPow(luma, SkScalarInvert(2.4f))
+ - 0.055f;
+ }
+};
+
+/*static*/ const SkColorSpaceLuminance& SkColorSpaceLuminance::Fetch(SkScalar gamma) {
+ static SkLinearColorSpaceLuminance gSkLinearColorSpaceLuminance;
+ static SkGammaColorSpaceLuminance gSkGammaColorSpaceLuminance;
+ static SkSRGBColorSpaceLuminance gSkSRGBColorSpaceLuminance;
+
+ if (0 == gamma) {
+ return gSkSRGBColorSpaceLuminance;
+ } else if (SK_Scalar1 == gamma) {
+ return gSkLinearColorSpaceLuminance;
+ } else {
+ return gSkGammaColorSpaceLuminance;
+ }
+}
+
+static float apply_contrast(float srca, float contrast) {
+ return srca + ((1.0f - srca) * contrast * srca);
+}
+
+void SkTMaskGamma_build_correcting_lut(uint8_t table[256], U8CPU srcI, SkScalar contrast,
+ const SkColorSpaceLuminance& srcConvert, SkScalar srcGamma,
+ const SkColorSpaceLuminance& dstConvert, SkScalar dstGamma) {
+ const float src = (float)srcI / 255.0f;
+ const float linSrc = srcConvert.toLuma(srcGamma, src);
+ //Guess at the dst. The perceptual inverse provides smaller visual
+ //discontinuities when slight changes to desaturated colors cause a channel
+ //to map to a different correcting lut with neighboring srcI.
+ //See https://code.google.com/p/chromium/issues/detail?id=141425#c59 .
+ const float dst = 1.0f - src;
+ const float linDst = dstConvert.toLuma(dstGamma, dst);
+
+ //Contrast value tapers off to 0 as the src luminance becomes white
+ const float adjustedContrast = SkScalarToFloat(contrast) * linDst;
+
+ //Remove discontinuity and instability when src is close to dst.
+ //The value 1/256 is arbitrary and appears to contain the instability.
+ if (fabs(src - dst) < (1.0f / 256.0f)) {
+ float ii = 0.0f;
+ for (int i = 0; i < 256; ++i, ii += 1.0f) {
+ float rawSrca = ii / 255.0f;
+ float srca = apply_contrast(rawSrca, adjustedContrast);
+ table[i] = SkToU8(sk_float_round2int(255.0f * srca));
+ }
+ } else {
+ // Avoid slow int to float conversion.
+ float ii = 0.0f;
+ for (int i = 0; i < 256; ++i, ii += 1.0f) {
+ // 'rawSrca += 1.0f / 255.0f' and even
+ // 'rawSrca = i * (1.0f / 255.0f)' can add up to more than 1.0f.
+ // When this happens the table[255] == 0x0 instead of 0xff.
+ // See http://code.google.com/p/chromium/issues/detail?id=146466
+ float rawSrca = ii / 255.0f;
+ float srca = apply_contrast(rawSrca, adjustedContrast);
+ SkASSERT(srca <= 1.0f);
+ float dsta = 1.0f - srca;
+
+ //Calculate the output we want.
+ float linOut = (linSrc * srca + dsta * linDst);
+ SkASSERT(linOut <= 1.0f);
+ float out = dstConvert.fromLuma(dstGamma, linOut);
+
+ //Undo what the blit blend will do.
+ float result = (out - dst) / (src - dst);
+ SkASSERT(sk_float_round2int(255.0f * result) <= 255);
+
+ table[i] = SkToU8(sk_float_round2int(255.0f * result));
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkMaskGamma.h b/gfx/skia/skia/src/core/SkMaskGamma.h
new file mode 100644
index 0000000000..4d6acd786e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMaskGamma.h
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMaskGamma_DEFINED
+#define SkMaskGamma_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkNoncopyable.h"
+
+/**
+ * SkColorSpaceLuminance is used to convert luminances to and from linear and
+ * perceptual color spaces.
+ *
+ * Luma is used to specify a linear luminance value [0.0, 1.0].
+ * Luminance is used to specify a luminance value in an arbitrary color space [0.0, 1.0].
+ */
+class SkColorSpaceLuminance : SkNoncopyable {
+public:
+ virtual ~SkColorSpaceLuminance() { }
+
+ /** Converts a color component luminance in the color space to a linear luma. */
+ virtual SkScalar toLuma(SkScalar gamma, SkScalar luminance) const = 0;
+ /** Converts a linear luma to a color component luminance in the color space. */
+ virtual SkScalar fromLuma(SkScalar gamma, SkScalar luma) const = 0;
+
+ /** Converts a color to a luminance value. */
+ static U8CPU computeLuminance(SkScalar gamma, SkColor c) {
+ const SkColorSpaceLuminance& luminance = Fetch(gamma);
+ SkScalar r = luminance.toLuma(gamma, SkIntToScalar(SkColorGetR(c)) / 255);
+ SkScalar g = luminance.toLuma(gamma, SkIntToScalar(SkColorGetG(c)) / 255);
+ SkScalar b = luminance.toLuma(gamma, SkIntToScalar(SkColorGetB(c)) / 255);
+ SkScalar luma = r * SK_LUM_COEFF_R +
+ g * SK_LUM_COEFF_G +
+ b * SK_LUM_COEFF_B;
+ SkASSERT(luma <= SK_Scalar1);
+ return SkScalarRoundToInt(luminance.fromLuma(gamma, luma) * 255);
+ }
+
+ /** Retrieves the SkColorSpaceLuminance for the given gamma. */
+ static const SkColorSpaceLuminance& Fetch(SkScalar gamma);
+};
+
+///@{
+/**
+ * Scales base <= 2^N-1 to 2^8-1
+ * @param N [1, 8] the number of bits used by base.
+ * @param base the number to be scaled to [0, 255].
+ */
+template<U8CPU N> static inline U8CPU sk_t_scale255(U8CPU base) {
+ base <<= (8 - N);
+ U8CPU lum = base;
+ for (unsigned int i = N; i < 8; i += N) {
+ lum |= base >> i;
+ }
+ return lum;
+}
+template<> /*static*/ inline U8CPU sk_t_scale255<1>(U8CPU base) {
+ return base * 0xFF;
+}
+template<> /*static*/ inline U8CPU sk_t_scale255<2>(U8CPU base) {
+ return base * 0x55;
+}
+template<> /*static*/ inline U8CPU sk_t_scale255<4>(U8CPU base) {
+ return base * 0x11;
+}
+template<> /*static*/ inline U8CPU sk_t_scale255<8>(U8CPU base) {
+ return base;
+}
+///@}
+
+template <int R_LUM_BITS, int G_LUM_BITS, int B_LUM_BITS> class SkTMaskPreBlend;
+
+void SkTMaskGamma_build_correcting_lut(uint8_t table[256], U8CPU srcI, SkScalar contrast,
+ const SkColorSpaceLuminance& srcConvert, SkScalar srcGamma,
+ const SkColorSpaceLuminance& dstConvert, SkScalar dstGamma);
+
+/**
+ * A regular mask contains linear alpha values. A gamma correcting mask
+ * contains non-linear alpha values in an attempt to create gamma correct blits
+ * in the presence of a gamma incorrect (linear) blend in the blitter.
+ *
+ * SkMaskGamma creates and maintains tables which convert linear alpha values
+ * to gamma correcting alpha values.
+ * @param R The number of luminance bits to use [1, 8] from the red channel.
+ * @param G The number of luminance bits to use [1, 8] from the green channel.
+ * @param B The number of luminance bits to use [1, 8] from the blue channel.
+ */
+template <int R_LUM_BITS, int G_LUM_BITS, int B_LUM_BITS> class SkTMaskGamma : public SkRefCnt {
+
+public:
+
+ /** Creates a linear SkTMaskGamma. */
+ SkTMaskGamma() : fIsLinear(true) { }
+
+ /**
+ * Creates tables to convert linear alpha values to gamma correcting alpha
+ * values.
+ *
+ * @param contrast A value in the range [0.0, 1.0] which indicates the
+ * amount of artificial contrast to add.
+ * @param paint The color space in which the paint color was chosen.
+ * @param device The color space of the target device.
+ */
+ SkTMaskGamma(SkScalar contrast, SkScalar paintGamma, SkScalar deviceGamma) : fIsLinear(false) {
+ const SkColorSpaceLuminance& paintConvert = SkColorSpaceLuminance::Fetch(paintGamma);
+ const SkColorSpaceLuminance& deviceConvert = SkColorSpaceLuminance::Fetch(deviceGamma);
+ for (U8CPU i = 0; i < (1 << MAX_LUM_BITS); ++i) {
+ U8CPU lum = sk_t_scale255<MAX_LUM_BITS>(i);
+ SkTMaskGamma_build_correcting_lut(fGammaTables[i], lum, contrast,
+ paintConvert, paintGamma,
+ deviceConvert, deviceGamma);
+ }
+ }
+
+ /** Given a color, returns the closest canonical color. */
+ static SkColor CanonicalColor(SkColor color) {
+ return SkColorSetRGB(
+ sk_t_scale255<R_LUM_BITS>(SkColorGetR(color) >> (8 - R_LUM_BITS)),
+ sk_t_scale255<G_LUM_BITS>(SkColorGetG(color) >> (8 - G_LUM_BITS)),
+ sk_t_scale255<B_LUM_BITS>(SkColorGetB(color) >> (8 - B_LUM_BITS)));
+ }
+
+ /** The type of the mask pre-blend which will be returned from preBlend(SkColor). */
+ typedef SkTMaskPreBlend<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS> PreBlend;
+
+ /**
+ * Provides access to the tables appropriate for converting linear alpha
+ * values into gamma correcting alpha values when drawing the given color
+ * through the mask. The destination color will be approximated.
+ */
+ PreBlend preBlend(SkColor color) const;
+
+ /**
+ * Get dimensions for the full table set, so it can be allocated as a block.
+ */
+ void getGammaTableDimensions(int* tableWidth, int* numTables) const {
+ *tableWidth = 256;
+ *numTables = (1 << MAX_LUM_BITS);
+ }
+
+ /**
+ * Provides direct access to the full table set, so it can be uploaded
+ * into a texture or analyzed in other ways.
+ * Returns nullptr if fGammaTables hasn't been initialized.
+ */
+ const uint8_t* getGammaTables() const {
+ return fIsLinear ? nullptr : (const uint8_t*) fGammaTables;
+ }
+
+private:
+ static const int MAX_LUM_BITS =
+ B_LUM_BITS > (R_LUM_BITS > G_LUM_BITS ? R_LUM_BITS : G_LUM_BITS)
+ ? B_LUM_BITS : (R_LUM_BITS > G_LUM_BITS ? R_LUM_BITS : G_LUM_BITS);
+ uint8_t fGammaTables[1 << MAX_LUM_BITS][256];
+ bool fIsLinear;
+
+ using INHERITED = SkRefCnt;
+};
+
+
+/**
+ * SkTMaskPreBlend is a tear-off of SkTMaskGamma. It provides the tables to
+ * convert a linear alpha value for a given channel to a gamma correcting alpha
+ * value for that channel. This class is immutable.
+ *
+ * If fR, fG, or fB is nullptr, all of them will be. This indicates that no mask
+ * pre blend should be applied. SkTMaskPreBlend::isApplicable() is provided as
+ * a convenience function to test for the absence of this case.
+ */
+template <int R_LUM_BITS, int G_LUM_BITS, int B_LUM_BITS> class SkTMaskPreBlend {
+private:
+ SkTMaskPreBlend(sk_sp<const SkTMaskGamma<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>> parent,
+ const uint8_t* r, const uint8_t* g, const uint8_t* b)
+ : fParent(std::move(parent)), fR(r), fG(g), fB(b) { }
+
+ sk_sp<const SkTMaskGamma<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>> fParent;
+ friend class SkTMaskGamma<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>;
+public:
+ /** Creates a non applicable SkTMaskPreBlend. */
+ SkTMaskPreBlend() : fParent(), fR(nullptr), fG(nullptr), fB(nullptr) { }
+
+ /**
+ * This copy contructor exists for correctness, but should never be called
+ * when return value optimization is enabled.
+ */
+ SkTMaskPreBlend(const SkTMaskPreBlend<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>& that)
+ : fParent(that.fParent), fR(that.fR), fG(that.fG), fB(that.fB) { }
+
+ ~SkTMaskPreBlend() { }
+
+ /** True if this PreBlend should be applied. When false, fR, fG, and fB are nullptr. */
+ bool isApplicable() const { return SkToBool(this->fG); }
+
+ const uint8_t* fR;
+ const uint8_t* fG;
+ const uint8_t* fB;
+};
+
+template <int R_LUM_BITS, int G_LUM_BITS, int B_LUM_BITS>
+SkTMaskPreBlend<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>
+SkTMaskGamma<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>::preBlend(SkColor color) const {
+ return fIsLinear ? SkTMaskPreBlend<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>()
+ : SkTMaskPreBlend<R_LUM_BITS, G_LUM_BITS, B_LUM_BITS>(sk_ref_sp(this),
+ fGammaTables[SkColorGetR(color) >> (8 - MAX_LUM_BITS)],
+ fGammaTables[SkColorGetG(color) >> (8 - MAX_LUM_BITS)],
+ fGammaTables[SkColorGetB(color) >> (8 - MAX_LUM_BITS)]);
+}
+
+///@{
+/**
+ * If APPLY_LUT is false, returns component unchanged.
+ * If APPLY_LUT is true, returns lut[component].
+ * @param APPLY_LUT whether or not the look-up table should be applied to component.
+ * @component the initial component.
+ * @lut a look-up table which transforms the component.
+ */
+template<bool APPLY_LUT> static inline U8CPU sk_apply_lut_if(U8CPU component, const uint8_t*) {
+ return component;
+}
+template<> /*static*/ inline U8CPU sk_apply_lut_if<true>(U8CPU component, const uint8_t* lut) {
+ return lut[component];
+}
+///@}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMatrix.cpp b/gfx/skia/skia/src/core/SkMatrix.cpp
new file mode 100644
index 0000000000..863f428a0b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMatrix.cpp
@@ -0,0 +1,1881 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkMatrix.h"
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint3.h"
+#include "include/core/SkRSXform.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkString.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkFloatBits.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkMath.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkVx.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkMatrixUtils.h"
+#include "src/core/SkSamplingPriv.h"
+
+#include <algorithm>
+#include <cmath>
+
+struct SkSamplingOptions;
+
+void SkMatrix::doNormalizePerspective() {
+ // If the bottom row of the matrix is [0, 0, not_one], we will treat the matrix as if it
+ // is in perspective, even though it stills behaves like its affine. If we divide everything
+ // by the not_one value, then it will behave the same, but will be treated as affine,
+ // and therefore faster (e.g. clients can forward-difference calculations).
+ //
+ if (0 == fMat[SkMatrix::kMPersp0] && 0 == fMat[SkMatrix::kMPersp1]) {
+ SkScalar p2 = fMat[SkMatrix::kMPersp2];
+ if (p2 != 0 && p2 != 1) {
+ double inv = 1.0 / p2;
+ for (int i = 0; i < 6; ++i) {
+ fMat[i] = SkDoubleToScalar(fMat[i] * inv);
+ }
+ fMat[SkMatrix::kMPersp2] = 1;
+ }
+ this->setTypeMask(kUnknown_Mask);
+ }
+}
+
+SkMatrix& SkMatrix::reset() { *this = SkMatrix(); return *this; }
+
+SkMatrix& SkMatrix::set9(const SkScalar buffer[9]) {
+ memcpy(fMat, buffer, 9 * sizeof(SkScalar));
+ this->setTypeMask(kUnknown_Mask);
+ return *this;
+}
+
+SkMatrix& SkMatrix::setAffine(const SkScalar buffer[6]) {
+ fMat[kMScaleX] = buffer[kAScaleX];
+ fMat[kMSkewX] = buffer[kASkewX];
+ fMat[kMTransX] = buffer[kATransX];
+ fMat[kMSkewY] = buffer[kASkewY];
+ fMat[kMScaleY] = buffer[kAScaleY];
+ fMat[kMTransY] = buffer[kATransY];
+ fMat[kMPersp0] = 0;
+ fMat[kMPersp1] = 0;
+ fMat[kMPersp2] = 1;
+ this->setTypeMask(kUnknown_Mask);
+ return *this;
+}
+
+// this aligns with the masks, so we can compute a mask from a variable 0/1
+enum {
+ kTranslate_Shift,
+ kScale_Shift,
+ kAffine_Shift,
+ kPerspective_Shift,
+ kRectStaysRect_Shift
+};
+
+static const int32_t kScalar1Int = 0x3f800000;
+
+uint8_t SkMatrix::computePerspectiveTypeMask() const {
+ // Benchmarking suggests that replacing this set of SkScalarAs2sCompliment
+ // is a win, but replacing those below is not. We don't yet understand
+ // that result.
+ if (fMat[kMPersp0] != 0 || fMat[kMPersp1] != 0 || fMat[kMPersp2] != 1) {
+ // If this is a perspective transform, we return true for all other
+ // transform flags - this does not disable any optimizations, respects
+ // the rule that the type mask must be conservative, and speeds up
+ // type mask computation.
+ return SkToU8(kORableMasks);
+ }
+
+ return SkToU8(kOnlyPerspectiveValid_Mask | kUnknown_Mask);
+}
+
+uint8_t SkMatrix::computeTypeMask() const {
+ unsigned mask = 0;
+
+ if (fMat[kMPersp0] != 0 || fMat[kMPersp1] != 0 || fMat[kMPersp2] != 1) {
+ // Once it is determined that that this is a perspective transform,
+ // all other flags are moot as far as optimizations are concerned.
+ return SkToU8(kORableMasks);
+ }
+
+ if (fMat[kMTransX] != 0 || fMat[kMTransY] != 0) {
+ mask |= kTranslate_Mask;
+ }
+
+ int m00 = SkScalarAs2sCompliment(fMat[SkMatrix::kMScaleX]);
+ int m01 = SkScalarAs2sCompliment(fMat[SkMatrix::kMSkewX]);
+ int m10 = SkScalarAs2sCompliment(fMat[SkMatrix::kMSkewY]);
+ int m11 = SkScalarAs2sCompliment(fMat[SkMatrix::kMScaleY]);
+
+ if (m01 | m10) {
+ // The skew components may be scale-inducing, unless we are dealing
+ // with a pure rotation. Testing for a pure rotation is expensive,
+ // so we opt for being conservative by always setting the scale bit.
+ // along with affine.
+ // By doing this, we are also ensuring that matrices have the same
+ // type masks as their inverses.
+ mask |= kAffine_Mask | kScale_Mask;
+
+ // For rectStaysRect, in the affine case, we only need check that
+ // the primary diagonal is all zeros and that the secondary diagonal
+ // is all non-zero.
+
+ // map non-zero to 1
+ m01 = m01 != 0;
+ m10 = m10 != 0;
+
+ int dp0 = 0 == (m00 | m11) ; // true if both are 0
+ int ds1 = m01 & m10; // true if both are 1
+
+ mask |= (dp0 & ds1) << kRectStaysRect_Shift;
+ } else {
+ // Only test for scale explicitly if not affine, since affine sets the
+ // scale bit.
+ if ((m00 ^ kScalar1Int) | (m11 ^ kScalar1Int)) {
+ mask |= kScale_Mask;
+ }
+
+ // Not affine, therefore we already know secondary diagonal is
+ // all zeros, so we just need to check that primary diagonal is
+ // all non-zero.
+
+ // map non-zero to 1
+ m00 = m00 != 0;
+ m11 = m11 != 0;
+
+ // record if the (p)rimary diagonal is all non-zero
+ mask |= (m00 & m11) << kRectStaysRect_Shift;
+ }
+
+ return SkToU8(mask);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool operator==(const SkMatrix& a, const SkMatrix& b) {
+ const SkScalar* SK_RESTRICT ma = a.fMat;
+ const SkScalar* SK_RESTRICT mb = b.fMat;
+
+ return ma[0] == mb[0] && ma[1] == mb[1] && ma[2] == mb[2] &&
+ ma[3] == mb[3] && ma[4] == mb[4] && ma[5] == mb[5] &&
+ ma[6] == mb[6] && ma[7] == mb[7] && ma[8] == mb[8];
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// helper function to determine if upper-left 2x2 of matrix is degenerate
+static inline bool is_degenerate_2x2(SkScalar scaleX, SkScalar skewX,
+ SkScalar skewY, SkScalar scaleY) {
+ SkScalar perp_dot = scaleX*scaleY - skewX*skewY;
+ return SkScalarNearlyZero(perp_dot, SK_ScalarNearlyZero*SK_ScalarNearlyZero);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkMatrix::isSimilarity(SkScalar tol) const {
+ // if identity or translate matrix
+ TypeMask mask = this->getType();
+ if (mask <= kTranslate_Mask) {
+ return true;
+ }
+ if (mask & kPerspective_Mask) {
+ return false;
+ }
+
+ SkScalar mx = fMat[kMScaleX];
+ SkScalar my = fMat[kMScaleY];
+ // if no skew, can just compare scale factors
+ if (!(mask & kAffine_Mask)) {
+ return !SkScalarNearlyZero(mx) && SkScalarNearlyEqual(SkScalarAbs(mx), SkScalarAbs(my));
+ }
+ SkScalar sx = fMat[kMSkewX];
+ SkScalar sy = fMat[kMSkewY];
+
+ if (is_degenerate_2x2(mx, sx, sy, my)) {
+ return false;
+ }
+
+ // upper 2x2 is rotation/reflection + uniform scale if basis vectors
+ // are 90 degree rotations of each other
+ return (SkScalarNearlyEqual(mx, my, tol) && SkScalarNearlyEqual(sx, -sy, tol))
+ || (SkScalarNearlyEqual(mx, -my, tol) && SkScalarNearlyEqual(sx, sy, tol));
+}
+
+bool SkMatrix::preservesRightAngles(SkScalar tol) const {
+ TypeMask mask = this->getType();
+
+ if (mask <= kTranslate_Mask) {
+ // identity, translate and/or scale
+ return true;
+ }
+ if (mask & kPerspective_Mask) {
+ return false;
+ }
+
+ SkASSERT(mask & (kAffine_Mask | kScale_Mask));
+
+ SkScalar mx = fMat[kMScaleX];
+ SkScalar my = fMat[kMScaleY];
+ SkScalar sx = fMat[kMSkewX];
+ SkScalar sy = fMat[kMSkewY];
+
+ if (is_degenerate_2x2(mx, sx, sy, my)) {
+ return false;
+ }
+
+ // upper 2x2 is scale + rotation/reflection if basis vectors are orthogonal
+ SkVector vec[2];
+ vec[0].set(mx, sy);
+ vec[1].set(sx, my);
+
+ return SkScalarNearlyZero(vec[0].dot(vec[1]), SkScalarSquare(tol));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline SkScalar sdot(SkScalar a, SkScalar b, SkScalar c, SkScalar d) {
+ return a * b + c * d;
+}
+
+static inline SkScalar sdot(SkScalar a, SkScalar b, SkScalar c, SkScalar d,
+ SkScalar e, SkScalar f) {
+ return a * b + c * d + e * f;
+}
+
+static inline SkScalar scross(SkScalar a, SkScalar b, SkScalar c, SkScalar d) {
+ return a * b - c * d;
+}
+
+SkMatrix& SkMatrix::setTranslate(SkScalar dx, SkScalar dy) {
+ *this = SkMatrix(1, 0, dx,
+ 0, 1, dy,
+ 0, 0, 1,
+ (dx != 0 || dy != 0) ? kTranslate_Mask | kRectStaysRect_Mask
+ : kIdentity_Mask | kRectStaysRect_Mask);
+ return *this;
+}
+
+SkMatrix& SkMatrix::preTranslate(SkScalar dx, SkScalar dy) {
+ const unsigned mask = this->getType();
+
+ if (mask <= kTranslate_Mask) {
+ fMat[kMTransX] += dx;
+ fMat[kMTransY] += dy;
+ } else if (mask & kPerspective_Mask) {
+ SkMatrix m;
+ m.setTranslate(dx, dy);
+ return this->preConcat(m);
+ } else {
+ fMat[kMTransX] += sdot(fMat[kMScaleX], dx, fMat[kMSkewX], dy);
+ fMat[kMTransY] += sdot(fMat[kMSkewY], dx, fMat[kMScaleY], dy);
+ }
+ this->updateTranslateMask();
+ return *this;
+}
+
+SkMatrix& SkMatrix::postTranslate(SkScalar dx, SkScalar dy) {
+ if (this->hasPerspective()) {
+ SkMatrix m;
+ m.setTranslate(dx, dy);
+ this->postConcat(m);
+ } else {
+ fMat[kMTransX] += dx;
+ fMat[kMTransY] += dy;
+ this->updateTranslateMask();
+ }
+ return *this;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkMatrix& SkMatrix::setScale(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py) {
+ if (1 == sx && 1 == sy) {
+ this->reset();
+ } else {
+ this->setScaleTranslate(sx, sy, px - sx * px, py - sy * py);
+ }
+ return *this;
+}
+
+SkMatrix& SkMatrix::setScale(SkScalar sx, SkScalar sy) {
+ auto rectMask = (sx == 0 || sy == 0) ? 0 : kRectStaysRect_Mask;
+ *this = SkMatrix(sx, 0, 0,
+ 0, sy, 0,
+ 0, 0, 1,
+ (sx == 1 && sy == 1) ? kIdentity_Mask | rectMask
+ : kScale_Mask | rectMask);
+ return *this;
+}
+
+SkMatrix& SkMatrix::preScale(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py) {
+ if (1 == sx && 1 == sy) {
+ return *this;
+ }
+
+ SkMatrix m;
+ m.setScale(sx, sy, px, py);
+ return this->preConcat(m);
+}
+
+SkMatrix& SkMatrix::preScale(SkScalar sx, SkScalar sy) {
+ if (1 == sx && 1 == sy) {
+ return *this;
+ }
+
+ // the assumption is that these multiplies are very cheap, and that
+ // a full concat and/or just computing the matrix type is more expensive.
+ // Also, the fixed-point case checks for overflow, but the float doesn't,
+ // so we can get away with these blind multiplies.
+
+ fMat[kMScaleX] *= sx;
+ fMat[kMSkewY] *= sx;
+ fMat[kMPersp0] *= sx;
+
+ fMat[kMSkewX] *= sy;
+ fMat[kMScaleY] *= sy;
+ fMat[kMPersp1] *= sy;
+
+ // Attempt to simplify our type when applying an inverse scale.
+ // TODO: The persp/affine preconditions are in place to keep the mask consistent with
+ // what computeTypeMask() would produce (persp/skew always implies kScale).
+ // We should investigate whether these flag dependencies are truly needed.
+ if (fMat[kMScaleX] == 1 && fMat[kMScaleY] == 1
+ && !(fTypeMask & (kPerspective_Mask | kAffine_Mask))) {
+ this->clearTypeMask(kScale_Mask);
+ } else {
+ this->orTypeMask(kScale_Mask);
+ // Remove kRectStaysRect if the preScale factors were 0
+ if (!sx || !sy) {
+ this->clearTypeMask(kRectStaysRect_Mask);
+ }
+ }
+ return *this;
+}
+
+SkMatrix& SkMatrix::postScale(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py) {
+ if (1 == sx && 1 == sy) {
+ return *this;
+ }
+ SkMatrix m;
+ m.setScale(sx, sy, px, py);
+ return this->postConcat(m);
+}
+
+SkMatrix& SkMatrix::postScale(SkScalar sx, SkScalar sy) {
+ if (1 == sx && 1 == sy) {
+ return *this;
+ }
+ SkMatrix m;
+ m.setScale(sx, sy);
+ return this->postConcat(m);
+}
+
+// this perhaps can go away, if we have a fract/high-precision way to
+// scale matrices
+bool SkMatrix::postIDiv(int divx, int divy) {
+ if (divx == 0 || divy == 0) {
+ return false;
+ }
+
+ const float invX = 1.f / divx;
+ const float invY = 1.f / divy;
+
+ fMat[kMScaleX] *= invX;
+ fMat[kMSkewX] *= invX;
+ fMat[kMTransX] *= invX;
+
+ fMat[kMScaleY] *= invY;
+ fMat[kMSkewY] *= invY;
+ fMat[kMTransY] *= invY;
+
+ this->setTypeMask(kUnknown_Mask);
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////////
+
+SkMatrix& SkMatrix::setSinCos(SkScalar sinV, SkScalar cosV, SkScalar px, SkScalar py) {
+ const SkScalar oneMinusCosV = 1 - cosV;
+
+ fMat[kMScaleX] = cosV;
+ fMat[kMSkewX] = -sinV;
+ fMat[kMTransX] = sdot(sinV, py, oneMinusCosV, px);
+
+ fMat[kMSkewY] = sinV;
+ fMat[kMScaleY] = cosV;
+ fMat[kMTransY] = sdot(-sinV, px, oneMinusCosV, py);
+
+ fMat[kMPersp0] = fMat[kMPersp1] = 0;
+ fMat[kMPersp2] = 1;
+
+ this->setTypeMask(kUnknown_Mask | kOnlyPerspectiveValid_Mask);
+ return *this;
+}
+
+SkMatrix& SkMatrix::setRSXform(const SkRSXform& xform) {
+ fMat[kMScaleX] = xform.fSCos;
+ fMat[kMSkewX] = -xform.fSSin;
+ fMat[kMTransX] = xform.fTx;
+
+ fMat[kMSkewY] = xform.fSSin;
+ fMat[kMScaleY] = xform.fSCos;
+ fMat[kMTransY] = xform.fTy;
+
+ fMat[kMPersp0] = fMat[kMPersp1] = 0;
+ fMat[kMPersp2] = 1;
+
+ this->setTypeMask(kUnknown_Mask | kOnlyPerspectiveValid_Mask);
+ return *this;
+}
+
+SkMatrix& SkMatrix::setSinCos(SkScalar sinV, SkScalar cosV) {
+ fMat[kMScaleX] = cosV;
+ fMat[kMSkewX] = -sinV;
+ fMat[kMTransX] = 0;
+
+ fMat[kMSkewY] = sinV;
+ fMat[kMScaleY] = cosV;
+ fMat[kMTransY] = 0;
+
+ fMat[kMPersp0] = fMat[kMPersp1] = 0;
+ fMat[kMPersp2] = 1;
+
+ this->setTypeMask(kUnknown_Mask | kOnlyPerspectiveValid_Mask);
+ return *this;
+}
+
+SkMatrix& SkMatrix::setRotate(SkScalar degrees, SkScalar px, SkScalar py) {
+ SkScalar rad = SkDegreesToRadians(degrees);
+ return this->setSinCos(SkScalarSinSnapToZero(rad), SkScalarCosSnapToZero(rad), px, py);
+}
+
+SkMatrix& SkMatrix::setRotate(SkScalar degrees) {
+ SkScalar rad = SkDegreesToRadians(degrees);
+ return this->setSinCos(SkScalarSinSnapToZero(rad), SkScalarCosSnapToZero(rad));
+}
+
+SkMatrix& SkMatrix::preRotate(SkScalar degrees, SkScalar px, SkScalar py) {
+ SkMatrix m;
+ m.setRotate(degrees, px, py);
+ return this->preConcat(m);
+}
+
+SkMatrix& SkMatrix::preRotate(SkScalar degrees) {
+ SkMatrix m;
+ m.setRotate(degrees);
+ return this->preConcat(m);
+}
+
+SkMatrix& SkMatrix::postRotate(SkScalar degrees, SkScalar px, SkScalar py) {
+ SkMatrix m;
+ m.setRotate(degrees, px, py);
+ return this->postConcat(m);
+}
+
+SkMatrix& SkMatrix::postRotate(SkScalar degrees) {
+ SkMatrix m;
+ m.setRotate(degrees);
+ return this->postConcat(m);
+}
+
+////////////////////////////////////////////////////////////////////////////////////
+
+SkMatrix& SkMatrix::setSkew(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py) {
+ *this = SkMatrix(1, sx, -sx * py,
+ sy, 1, -sy * px,
+ 0, 0, 1,
+ kUnknown_Mask | kOnlyPerspectiveValid_Mask);
+ return *this;
+}
+
+SkMatrix& SkMatrix::setSkew(SkScalar sx, SkScalar sy) {
+ fMat[kMScaleX] = 1;
+ fMat[kMSkewX] = sx;
+ fMat[kMTransX] = 0;
+
+ fMat[kMSkewY] = sy;
+ fMat[kMScaleY] = 1;
+ fMat[kMTransY] = 0;
+
+ fMat[kMPersp0] = fMat[kMPersp1] = 0;
+ fMat[kMPersp2] = 1;
+
+ this->setTypeMask(kUnknown_Mask | kOnlyPerspectiveValid_Mask);
+ return *this;
+}
+
+SkMatrix& SkMatrix::preSkew(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py) {
+ SkMatrix m;
+ m.setSkew(sx, sy, px, py);
+ return this->preConcat(m);
+}
+
+SkMatrix& SkMatrix::preSkew(SkScalar sx, SkScalar sy) {
+ SkMatrix m;
+ m.setSkew(sx, sy);
+ return this->preConcat(m);
+}
+
+SkMatrix& SkMatrix::postSkew(SkScalar sx, SkScalar sy, SkScalar px, SkScalar py) {
+ SkMatrix m;
+ m.setSkew(sx, sy, px, py);
+ return this->postConcat(m);
+}
+
+SkMatrix& SkMatrix::postSkew(SkScalar sx, SkScalar sy) {
+ SkMatrix m;
+ m.setSkew(sx, sy);
+ return this->postConcat(m);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkMatrix::setRectToRect(const SkRect& src, const SkRect& dst, ScaleToFit align) {
+ if (src.isEmpty()) {
+ this->reset();
+ return false;
+ }
+
+ if (dst.isEmpty()) {
+ sk_bzero(fMat, 8 * sizeof(SkScalar));
+ fMat[kMPersp2] = 1;
+ this->setTypeMask(kScale_Mask);
+ } else {
+ SkScalar tx, sx = dst.width() / src.width();
+ SkScalar ty, sy = dst.height() / src.height();
+ bool xLarger = false;
+
+ if (align != kFill_ScaleToFit) {
+ if (sx > sy) {
+ xLarger = true;
+ sx = sy;
+ } else {
+ sy = sx;
+ }
+ }
+
+ tx = dst.fLeft - src.fLeft * sx;
+ ty = dst.fTop - src.fTop * sy;
+ if (align == kCenter_ScaleToFit || align == kEnd_ScaleToFit) {
+ SkScalar diff;
+
+ if (xLarger) {
+ diff = dst.width() - src.width() * sy;
+ } else {
+ diff = dst.height() - src.height() * sy;
+ }
+
+ if (align == kCenter_ScaleToFit) {
+ diff = SkScalarHalf(diff);
+ }
+
+ if (xLarger) {
+ tx += diff;
+ } else {
+ ty += diff;
+ }
+ }
+
+ this->setScaleTranslate(sx, sy, tx, ty);
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline float muladdmul(float a, float b, float c, float d) {
+ return sk_double_to_float((double)a * b + (double)c * d);
+}
+
+static inline float rowcol3(const float row[], const float col[]) {
+ return row[0] * col[0] + row[1] * col[3] + row[2] * col[6];
+}
+
+static bool only_scale_and_translate(unsigned mask) {
+ return 0 == (mask & (SkMatrix::kAffine_Mask | SkMatrix::kPerspective_Mask));
+}
+
+SkMatrix& SkMatrix::setConcat(const SkMatrix& a, const SkMatrix& b) {
+ TypeMask aType = a.getType();
+ TypeMask bType = b.getType();
+
+ if (a.isTriviallyIdentity()) {
+ *this = b;
+ } else if (b.isTriviallyIdentity()) {
+ *this = a;
+ } else if (only_scale_and_translate(aType | bType)) {
+ this->setScaleTranslate(a.fMat[kMScaleX] * b.fMat[kMScaleX],
+ a.fMat[kMScaleY] * b.fMat[kMScaleY],
+ a.fMat[kMScaleX] * b.fMat[kMTransX] + a.fMat[kMTransX],
+ a.fMat[kMScaleY] * b.fMat[kMTransY] + a.fMat[kMTransY]);
+ } else {
+ SkMatrix tmp;
+
+ if ((aType | bType) & kPerspective_Mask) {
+ tmp.fMat[kMScaleX] = rowcol3(&a.fMat[0], &b.fMat[0]);
+ tmp.fMat[kMSkewX] = rowcol3(&a.fMat[0], &b.fMat[1]);
+ tmp.fMat[kMTransX] = rowcol3(&a.fMat[0], &b.fMat[2]);
+ tmp.fMat[kMSkewY] = rowcol3(&a.fMat[3], &b.fMat[0]);
+ tmp.fMat[kMScaleY] = rowcol3(&a.fMat[3], &b.fMat[1]);
+ tmp.fMat[kMTransY] = rowcol3(&a.fMat[3], &b.fMat[2]);
+ tmp.fMat[kMPersp0] = rowcol3(&a.fMat[6], &b.fMat[0]);
+ tmp.fMat[kMPersp1] = rowcol3(&a.fMat[6], &b.fMat[1]);
+ tmp.fMat[kMPersp2] = rowcol3(&a.fMat[6], &b.fMat[2]);
+
+ tmp.setTypeMask(kUnknown_Mask);
+ } else {
+ tmp.fMat[kMScaleX] = muladdmul(a.fMat[kMScaleX],
+ b.fMat[kMScaleX],
+ a.fMat[kMSkewX],
+ b.fMat[kMSkewY]);
+
+ tmp.fMat[kMSkewX] = muladdmul(a.fMat[kMScaleX],
+ b.fMat[kMSkewX],
+ a.fMat[kMSkewX],
+ b.fMat[kMScaleY]);
+
+ tmp.fMat[kMTransX] = muladdmul(a.fMat[kMScaleX],
+ b.fMat[kMTransX],
+ a.fMat[kMSkewX],
+ b.fMat[kMTransY]) + a.fMat[kMTransX];
+
+ tmp.fMat[kMSkewY] = muladdmul(a.fMat[kMSkewY],
+ b.fMat[kMScaleX],
+ a.fMat[kMScaleY],
+ b.fMat[kMSkewY]);
+
+ tmp.fMat[kMScaleY] = muladdmul(a.fMat[kMSkewY],
+ b.fMat[kMSkewX],
+ a.fMat[kMScaleY],
+ b.fMat[kMScaleY]);
+
+ tmp.fMat[kMTransY] = muladdmul(a.fMat[kMSkewY],
+ b.fMat[kMTransX],
+ a.fMat[kMScaleY],
+ b.fMat[kMTransY]) + a.fMat[kMTransY];
+
+ tmp.fMat[kMPersp0] = 0;
+ tmp.fMat[kMPersp1] = 0;
+ tmp.fMat[kMPersp2] = 1;
+ //SkDebugf("Concat mat non-persp type: %d\n", tmp.getType());
+ //SkASSERT(!(tmp.getType() & kPerspective_Mask));
+ tmp.setTypeMask(kUnknown_Mask | kOnlyPerspectiveValid_Mask);
+ }
+ *this = tmp;
+ }
+ return *this;
+}
+
+SkMatrix& SkMatrix::preConcat(const SkMatrix& mat) {
+ // check for identity first, so we don't do a needless copy of ourselves
+ // to ourselves inside setConcat()
+ if(!mat.isIdentity()) {
+ this->setConcat(*this, mat);
+ }
+ return *this;
+}
+
+SkMatrix& SkMatrix::postConcat(const SkMatrix& mat) {
+ // check for identity first, so we don't do a needless copy of ourselves
+ // to ourselves inside setConcat()
+ if (!mat.isIdentity()) {
+ this->setConcat(mat, *this);
+ }
+ return *this;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* Matrix inversion is very expensive, but also the place where keeping
+ precision may be most important (here and matrix concat). Hence to avoid
+ bitmap blitting artifacts when walking the inverse, we use doubles for
+ the intermediate math, even though we know that is more expensive.
+ */
+
+static inline SkScalar scross_dscale(SkScalar a, SkScalar b,
+ SkScalar c, SkScalar d, double scale) {
+ return SkDoubleToScalar(scross(a, b, c, d) * scale);
+}
+
+static inline double dcross(double a, double b, double c, double d) {
+ return a * b - c * d;
+}
+
+static inline SkScalar dcross_dscale(double a, double b,
+ double c, double d, double scale) {
+ return SkDoubleToScalar(dcross(a, b, c, d) * scale);
+}
+
+static double sk_determinant(const float mat[9], int isPerspective) {
+ if (isPerspective) {
+ return mat[SkMatrix::kMScaleX] *
+ dcross(mat[SkMatrix::kMScaleY], mat[SkMatrix::kMPersp2],
+ mat[SkMatrix::kMTransY], mat[SkMatrix::kMPersp1])
+ +
+ mat[SkMatrix::kMSkewX] *
+ dcross(mat[SkMatrix::kMTransY], mat[SkMatrix::kMPersp0],
+ mat[SkMatrix::kMSkewY], mat[SkMatrix::kMPersp2])
+ +
+ mat[SkMatrix::kMTransX] *
+ dcross(mat[SkMatrix::kMSkewY], mat[SkMatrix::kMPersp1],
+ mat[SkMatrix::kMScaleY], mat[SkMatrix::kMPersp0]);
+ } else {
+ return dcross(mat[SkMatrix::kMScaleX], mat[SkMatrix::kMScaleY],
+ mat[SkMatrix::kMSkewX], mat[SkMatrix::kMSkewY]);
+ }
+}
+
+static double sk_inv_determinant(const float mat[9], int isPerspective) {
+ double det = sk_determinant(mat, isPerspective);
+
+ // Since the determinant is on the order of the cube of the matrix members,
+ // compare to the cube of the default nearly-zero constant (although an
+ // estimate of the condition number would be better if it wasn't so expensive).
+ if (SkScalarNearlyZero(sk_double_to_float(det),
+ SK_ScalarNearlyZero * SK_ScalarNearlyZero * SK_ScalarNearlyZero)) {
+ return 0;
+ }
+ return 1.0 / det;
+}
+
+void SkMatrix::SetAffineIdentity(SkScalar affine[6]) {
+ affine[kAScaleX] = 1;
+ affine[kASkewY] = 0;
+ affine[kASkewX] = 0;
+ affine[kAScaleY] = 1;
+ affine[kATransX] = 0;
+ affine[kATransY] = 0;
+}
+
+bool SkMatrix::asAffine(SkScalar affine[6]) const {
+ if (this->hasPerspective()) {
+ return false;
+ }
+ if (affine) {
+ affine[kAScaleX] = this->fMat[kMScaleX];
+ affine[kASkewY] = this->fMat[kMSkewY];
+ affine[kASkewX] = this->fMat[kMSkewX];
+ affine[kAScaleY] = this->fMat[kMScaleY];
+ affine[kATransX] = this->fMat[kMTransX];
+ affine[kATransY] = this->fMat[kMTransY];
+ }
+ return true;
+}
+
+void SkMatrix::mapPoints(SkPoint dst[], const SkPoint src[], int count) const {
+ SkASSERT((dst && src && count > 0) || 0 == count);
+ // no partial overlap
+ SkASSERT(src == dst || &dst[count] <= &src[0] || &src[count] <= &dst[0]);
+ this->getMapPtsProc()(*this, dst, src, count);
+}
+
+void SkMatrix::mapXY(SkScalar x, SkScalar y, SkPoint* result) const {
+ SkASSERT(result);
+ this->getMapXYProc()(*this, x, y, result);
+}
+
+void SkMatrix::ComputeInv(SkScalar dst[9], const SkScalar src[9], double invDet, bool isPersp) {
+ SkASSERT(src != dst);
+ SkASSERT(src && dst);
+
+ if (isPersp) {
+ dst[kMScaleX] = scross_dscale(src[kMScaleY], src[kMPersp2], src[kMTransY], src[kMPersp1], invDet);
+ dst[kMSkewX] = scross_dscale(src[kMTransX], src[kMPersp1], src[kMSkewX], src[kMPersp2], invDet);
+ dst[kMTransX] = scross_dscale(src[kMSkewX], src[kMTransY], src[kMTransX], src[kMScaleY], invDet);
+
+ dst[kMSkewY] = scross_dscale(src[kMTransY], src[kMPersp0], src[kMSkewY], src[kMPersp2], invDet);
+ dst[kMScaleY] = scross_dscale(src[kMScaleX], src[kMPersp2], src[kMTransX], src[kMPersp0], invDet);
+ dst[kMTransY] = scross_dscale(src[kMTransX], src[kMSkewY], src[kMScaleX], src[kMTransY], invDet);
+
+ dst[kMPersp0] = scross_dscale(src[kMSkewY], src[kMPersp1], src[kMScaleY], src[kMPersp0], invDet);
+ dst[kMPersp1] = scross_dscale(src[kMSkewX], src[kMPersp0], src[kMScaleX], src[kMPersp1], invDet);
+ dst[kMPersp2] = scross_dscale(src[kMScaleX], src[kMScaleY], src[kMSkewX], src[kMSkewY], invDet);
+ } else { // not perspective
+ dst[kMScaleX] = SkDoubleToScalar(src[kMScaleY] * invDet);
+ dst[kMSkewX] = SkDoubleToScalar(-src[kMSkewX] * invDet);
+ dst[kMTransX] = dcross_dscale(src[kMSkewX], src[kMTransY], src[kMScaleY], src[kMTransX], invDet);
+
+ dst[kMSkewY] = SkDoubleToScalar(-src[kMSkewY] * invDet);
+ dst[kMScaleY] = SkDoubleToScalar(src[kMScaleX] * invDet);
+ dst[kMTransY] = dcross_dscale(src[kMSkewY], src[kMTransX], src[kMScaleX], src[kMTransY], invDet);
+
+ dst[kMPersp0] = 0;
+ dst[kMPersp1] = 0;
+ dst[kMPersp2] = 1;
+ }
+}
+
+bool SkMatrix::invertNonIdentity(SkMatrix* inv) const {
+ SkASSERT(!this->isIdentity());
+
+ TypeMask mask = this->getType();
+
+ if (0 == (mask & ~(kScale_Mask | kTranslate_Mask))) {
+ bool invertible = true;
+ if (inv) {
+ if (mask & kScale_Mask) {
+ SkScalar invX = fMat[kMScaleX];
+ SkScalar invY = fMat[kMScaleY];
+ if (0 == invX || 0 == invY) {
+ return false;
+ }
+ invX = SkScalarInvert(invX);
+ invY = SkScalarInvert(invY);
+
+ // Must be careful when writing to inv, since it may be the
+ // same memory as this.
+
+ inv->fMat[kMSkewX] = inv->fMat[kMSkewY] =
+ inv->fMat[kMPersp0] = inv->fMat[kMPersp1] = 0;
+
+ inv->fMat[kMScaleX] = invX;
+ inv->fMat[kMScaleY] = invY;
+ inv->fMat[kMPersp2] = 1;
+ inv->fMat[kMTransX] = -fMat[kMTransX] * invX;
+ inv->fMat[kMTransY] = -fMat[kMTransY] * invY;
+
+ inv->setTypeMask(mask | kRectStaysRect_Mask);
+ } else {
+ // translate only
+ inv->setTranslate(-fMat[kMTransX], -fMat[kMTransY]);
+ }
+ } else { // inv is nullptr, just check if we're invertible
+ if (!fMat[kMScaleX] || !fMat[kMScaleY]) {
+ invertible = false;
+ }
+ }
+ return invertible;
+ }
+
+ int isPersp = mask & kPerspective_Mask;
+ double invDet = sk_inv_determinant(fMat, isPersp);
+
+ if (invDet == 0) { // underflow
+ return false;
+ }
+
+ bool applyingInPlace = (inv == this);
+
+ SkMatrix* tmp = inv;
+
+ SkMatrix storage;
+ if (applyingInPlace || nullptr == tmp) {
+ tmp = &storage; // we either need to avoid trampling memory or have no memory
+ }
+
+ ComputeInv(tmp->fMat, fMat, invDet, isPersp);
+ if (!tmp->isFinite()) {
+ return false;
+ }
+
+ tmp->setTypeMask(fTypeMask);
+
+ if (applyingInPlace) {
+ *inv = storage; // need to copy answer back
+ }
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix::Identity_pts(const SkMatrix& m, SkPoint dst[], const SkPoint src[], int count) {
+ SkASSERT(m.getType() == 0);
+
+ if (dst != src && count > 0) {
+ memcpy(dst, src, count * sizeof(SkPoint));
+ }
+}
+
+void SkMatrix::Trans_pts(const SkMatrix& m, SkPoint dst[], const SkPoint src[], int count) {
+ SkASSERT(m.getType() <= SkMatrix::kTranslate_Mask);
+ if (count > 0) {
+ SkScalar tx = m.getTranslateX();
+ SkScalar ty = m.getTranslateY();
+ if (count & 1) {
+ dst->fX = src->fX + tx;
+ dst->fY = src->fY + ty;
+ src += 1;
+ dst += 1;
+ }
+ skvx::float4 trans4(tx, ty, tx, ty);
+ count >>= 1;
+ if (count & 1) {
+ (skvx::float4::Load(src) + trans4).store(dst);
+ src += 2;
+ dst += 2;
+ }
+ count >>= 1;
+ for (int i = 0; i < count; ++i) {
+ (skvx::float4::Load(src+0) + trans4).store(dst+0);
+ (skvx::float4::Load(src+2) + trans4).store(dst+2);
+ src += 4;
+ dst += 4;
+ }
+ }
+}
+
+void SkMatrix::Scale_pts(const SkMatrix& m, SkPoint dst[], const SkPoint src[], int count) {
+ SkASSERT(m.getType() <= (SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask));
+ if (count > 0) {
+ SkScalar tx = m.getTranslateX();
+ SkScalar ty = m.getTranslateY();
+ SkScalar sx = m.getScaleX();
+ SkScalar sy = m.getScaleY();
+ skvx::float4 trans4(tx, ty, tx, ty);
+ skvx::float4 scale4(sx, sy, sx, sy);
+ if (count & 1) {
+ skvx::float4 p(src->fX, src->fY, 0, 0);
+ p = p * scale4 + trans4;
+ dst->fX = p[0];
+ dst->fY = p[1];
+ src += 1;
+ dst += 1;
+ }
+ count >>= 1;
+ if (count & 1) {
+ (skvx::float4::Load(src) * scale4 + trans4).store(dst);
+ src += 2;
+ dst += 2;
+ }
+ count >>= 1;
+ for (int i = 0; i < count; ++i) {
+ (skvx::float4::Load(src+0) * scale4 + trans4).store(dst+0);
+ (skvx::float4::Load(src+2) * scale4 + trans4).store(dst+2);
+ src += 4;
+ dst += 4;
+ }
+ }
+}
+
+void SkMatrix::Persp_pts(const SkMatrix& m, SkPoint dst[],
+ const SkPoint src[], int count) {
+ SkASSERT(m.hasPerspective());
+
+ if (count > 0) {
+ do {
+ SkScalar sy = src->fY;
+ SkScalar sx = src->fX;
+ src += 1;
+
+ SkScalar x = sdot(sx, m.fMat[kMScaleX], sy, m.fMat[kMSkewX]) + m.fMat[kMTransX];
+ SkScalar y = sdot(sx, m.fMat[kMSkewY], sy, m.fMat[kMScaleY]) + m.fMat[kMTransY];
+ SkScalar z = sdot(sx, m.fMat[kMPersp0], sy, m.fMat[kMPersp1]) + m.fMat[kMPersp2];
+ if (z) {
+ z = 1 / z;
+ }
+
+ dst->fY = y * z;
+ dst->fX = x * z;
+ dst += 1;
+ } while (--count);
+ }
+}
+
+void SkMatrix::Affine_vpts(const SkMatrix& m, SkPoint dst[], const SkPoint src[], int count) {
+ SkASSERT(m.getType() != SkMatrix::kPerspective_Mask);
+ if (count > 0) {
+ SkScalar tx = m.getTranslateX();
+ SkScalar ty = m.getTranslateY();
+ SkScalar sx = m.getScaleX();
+ SkScalar sy = m.getScaleY();
+ SkScalar kx = m.getSkewX();
+ SkScalar ky = m.getSkewY();
+ skvx::float4 trans4(tx, ty, tx, ty);
+ skvx::float4 scale4(sx, sy, sx, sy);
+ skvx::float4 skew4(kx, ky, kx, ky); // applied to swizzle of src4
+ bool trailingElement = (count & 1);
+ count >>= 1;
+ skvx::float4 src4;
+ for (int i = 0; i < count; ++i) {
+ src4 = skvx::float4::Load(src);
+ skvx::float4 swz4 = skvx::shuffle<1,0,3,2>(src4); // y0 x0, y1 x1
+ (src4 * scale4 + swz4 * skew4 + trans4).store(dst);
+ src += 2;
+ dst += 2;
+ }
+ if (trailingElement) {
+ // We use the same logic here to ensure that the math stays consistent throughout, even
+ // though the high float2 is ignored.
+ src4.lo = skvx::float2::Load(src);
+ skvx::float4 swz4 = skvx::shuffle<1,0,3,2>(src4); // y0 x0, y1 x1
+ (src4 * scale4 + swz4 * skew4 + trans4).lo.store(dst);
+ }
+ }
+}
+
+const SkMatrix::MapPtsProc SkMatrix::gMapPtsProcs[] = {
+ SkMatrix::Identity_pts, SkMatrix::Trans_pts,
+ SkMatrix::Scale_pts, SkMatrix::Scale_pts,
+ SkMatrix::Affine_vpts, SkMatrix::Affine_vpts,
+ SkMatrix::Affine_vpts, SkMatrix::Affine_vpts,
+ // repeat the persp proc 8 times
+ SkMatrix::Persp_pts, SkMatrix::Persp_pts,
+ SkMatrix::Persp_pts, SkMatrix::Persp_pts,
+ SkMatrix::Persp_pts, SkMatrix::Persp_pts,
+ SkMatrix::Persp_pts, SkMatrix::Persp_pts
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrixPriv::MapHomogeneousPointsWithStride(const SkMatrix& mx, SkPoint3 dst[],
+ size_t dstStride, const SkPoint3 src[],
+ size_t srcStride, int count) {
+ SkASSERT((dst && src && count > 0) || 0 == count);
+ // no partial overlap
+ SkASSERT(src == dst || &dst[count] <= &src[0] || &src[count] <= &dst[0]);
+
+ if (count > 0) {
+ if (mx.isIdentity()) {
+ if (src != dst) {
+ if (srcStride == sizeof(SkPoint3) && dstStride == sizeof(SkPoint3)) {
+ memcpy(dst, src, count * sizeof(SkPoint3));
+ } else {
+ for (int i = 0; i < count; ++i) {
+ *dst = *src;
+ dst = reinterpret_cast<SkPoint3*>(reinterpret_cast<char*>(dst) + dstStride);
+ src = reinterpret_cast<const SkPoint3*>(reinterpret_cast<const char*>(src) +
+ srcStride);
+ }
+ }
+ }
+ return;
+ }
+ do {
+ SkScalar sx = src->fX;
+ SkScalar sy = src->fY;
+ SkScalar sw = src->fZ;
+ src = reinterpret_cast<const SkPoint3*>(reinterpret_cast<const char*>(src) + srcStride);
+ const SkScalar* mat = mx.fMat;
+ typedef SkMatrix M;
+ SkScalar x = sdot(sx, mat[M::kMScaleX], sy, mat[M::kMSkewX], sw, mat[M::kMTransX]);
+ SkScalar y = sdot(sx, mat[M::kMSkewY], sy, mat[M::kMScaleY], sw, mat[M::kMTransY]);
+ SkScalar w = sdot(sx, mat[M::kMPersp0], sy, mat[M::kMPersp1], sw, mat[M::kMPersp2]);
+
+ dst->set(x, y, w);
+ dst = reinterpret_cast<SkPoint3*>(reinterpret_cast<char*>(dst) + dstStride);
+ } while (--count);
+ }
+}
+
+void SkMatrix::mapHomogeneousPoints(SkPoint3 dst[], const SkPoint3 src[], int count) const {
+ SkMatrixPriv::MapHomogeneousPointsWithStride(*this, dst, sizeof(SkPoint3), src,
+ sizeof(SkPoint3), count);
+}
+
+void SkMatrix::mapHomogeneousPoints(SkPoint3 dst[], const SkPoint src[], int count) const {
+ if (this->isIdentity()) {
+ for (int i = 0; i < count; ++i) {
+ dst[i] = { src[i].fX, src[i].fY, 1 };
+ }
+ } else if (this->hasPerspective()) {
+ for (int i = 0; i < count; ++i) {
+ dst[i] = {
+ fMat[0] * src[i].fX + fMat[1] * src[i].fY + fMat[2],
+ fMat[3] * src[i].fX + fMat[4] * src[i].fY + fMat[5],
+ fMat[6] * src[i].fX + fMat[7] * src[i].fY + fMat[8],
+ };
+ }
+ } else { // affine
+ for (int i = 0; i < count; ++i) {
+ dst[i] = {
+ fMat[0] * src[i].fX + fMat[1] * src[i].fY + fMat[2],
+ fMat[3] * src[i].fX + fMat[4] * src[i].fY + fMat[5],
+ 1,
+ };
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix::mapVectors(SkPoint dst[], const SkPoint src[], int count) const {
+ if (this->hasPerspective()) {
+ SkPoint origin;
+
+ MapXYProc proc = this->getMapXYProc();
+ proc(*this, 0, 0, &origin);
+
+ for (int i = count - 1; i >= 0; --i) {
+ SkPoint tmp;
+
+ proc(*this, src[i].fX, src[i].fY, &tmp);
+ dst[i].set(tmp.fX - origin.fX, tmp.fY - origin.fY);
+ }
+ } else {
+ SkMatrix tmp = *this;
+
+ tmp.fMat[kMTransX] = tmp.fMat[kMTransY] = 0;
+ tmp.clearTypeMask(kTranslate_Mask);
+ tmp.mapPoints(dst, src, count);
+ }
+}
+
+static skvx::float4 sort_as_rect(const skvx::float4& ltrb) {
+ skvx::float4 rblt(ltrb[2], ltrb[3], ltrb[0], ltrb[1]);
+ auto min = skvx::min(ltrb, rblt);
+ auto max = skvx::max(ltrb, rblt);
+ // We can extract either pair [0,1] or [2,3] from min and max and be correct, but on
+ // ARM this sequence generates the fastest (a single instruction).
+ return skvx::float4(min[2], min[3], max[0], max[1]);
+}
+
+void SkMatrix::mapRectScaleTranslate(SkRect* dst, const SkRect& src) const {
+ SkASSERT(dst);
+ SkASSERT(this->isScaleTranslate());
+
+ SkScalar sx = fMat[kMScaleX];
+ SkScalar sy = fMat[kMScaleY];
+ SkScalar tx = fMat[kMTransX];
+ SkScalar ty = fMat[kMTransY];
+ skvx::float4 scale(sx, sy, sx, sy);
+ skvx::float4 trans(tx, ty, tx, ty);
+ sort_as_rect(skvx::float4::Load(&src.fLeft) * scale + trans).store(&dst->fLeft);
+}
+
+bool SkMatrix::mapRect(SkRect* dst, const SkRect& src, SkApplyPerspectiveClip pc) const {
+ SkASSERT(dst);
+
+ if (this->getType() <= kTranslate_Mask) {
+ SkScalar tx = fMat[kMTransX];
+ SkScalar ty = fMat[kMTransY];
+ skvx::float4 trans(tx, ty, tx, ty);
+ sort_as_rect(skvx::float4::Load(&src.fLeft) + trans).store(&dst->fLeft);
+ return true;
+ }
+ if (this->isScaleTranslate()) {
+ this->mapRectScaleTranslate(dst, src);
+ return true;
+ } else if (pc == SkApplyPerspectiveClip::kYes && this->hasPerspective()) {
+ SkPath path;
+ path.addRect(src);
+ path.transform(*this);
+ *dst = path.getBounds();
+ return false;
+ } else {
+ SkPoint quad[4];
+
+ src.toQuad(quad);
+ this->mapPoints(quad, quad, 4);
+ dst->setBoundsNoCheck(quad, 4);
+ return this->rectStaysRect(); // might still return true if rotated by 90, etc.
+ }
+}
+
+SkScalar SkMatrix::mapRadius(SkScalar radius) const {
+ SkVector vec[2];
+
+ vec[0].set(radius, 0);
+ vec[1].set(0, radius);
+ this->mapVectors(vec, 2);
+
+ SkScalar d0 = vec[0].length();
+ SkScalar d1 = vec[1].length();
+
+ // return geometric mean
+ return SkScalarSqrt(d0 * d1);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkMatrix::Persp_xy(const SkMatrix& m, SkScalar sx, SkScalar sy,
+ SkPoint* pt) {
+ SkASSERT(m.hasPerspective());
+
+ SkScalar x = sdot(sx, m.fMat[kMScaleX], sy, m.fMat[kMSkewX]) + m.fMat[kMTransX];
+ SkScalar y = sdot(sx, m.fMat[kMSkewY], sy, m.fMat[kMScaleY]) + m.fMat[kMTransY];
+ SkScalar z = sdot(sx, m.fMat[kMPersp0], sy, m.fMat[kMPersp1]) + m.fMat[kMPersp2];
+ if (z) {
+ z = 1 / z;
+ }
+ pt->fX = x * z;
+ pt->fY = y * z;
+}
+
+void SkMatrix::RotTrans_xy(const SkMatrix& m, SkScalar sx, SkScalar sy,
+ SkPoint* pt) {
+ SkASSERT((m.getType() & (kAffine_Mask | kPerspective_Mask)) == kAffine_Mask);
+
+ pt->fX = sdot(sx, m.fMat[kMScaleX], sy, m.fMat[kMSkewX]) + m.fMat[kMTransX];
+ pt->fY = sdot(sx, m.fMat[kMSkewY], sy, m.fMat[kMScaleY]) + m.fMat[kMTransY];
+}
+
+void SkMatrix::Rot_xy(const SkMatrix& m, SkScalar sx, SkScalar sy,
+ SkPoint* pt) {
+ SkASSERT((m.getType() & (kAffine_Mask | kPerspective_Mask))== kAffine_Mask);
+ SkASSERT(0 == m.fMat[kMTransX]);
+ SkASSERT(0 == m.fMat[kMTransY]);
+
+ pt->fX = sdot(sx, m.fMat[kMScaleX], sy, m.fMat[kMSkewX]) + m.fMat[kMTransX];
+ pt->fY = sdot(sx, m.fMat[kMSkewY], sy, m.fMat[kMScaleY]) + m.fMat[kMTransY];
+}
+
+void SkMatrix::ScaleTrans_xy(const SkMatrix& m, SkScalar sx, SkScalar sy,
+ SkPoint* pt) {
+ SkASSERT((m.getType() & (kScale_Mask | kAffine_Mask | kPerspective_Mask))
+ == kScale_Mask);
+
+ pt->fX = sx * m.fMat[kMScaleX] + m.fMat[kMTransX];
+ pt->fY = sy * m.fMat[kMScaleY] + m.fMat[kMTransY];
+}
+
+void SkMatrix::Scale_xy(const SkMatrix& m, SkScalar sx, SkScalar sy,
+ SkPoint* pt) {
+ SkASSERT((m.getType() & (kScale_Mask | kAffine_Mask | kPerspective_Mask))
+ == kScale_Mask);
+ SkASSERT(0 == m.fMat[kMTransX]);
+ SkASSERT(0 == m.fMat[kMTransY]);
+
+ pt->fX = sx * m.fMat[kMScaleX];
+ pt->fY = sy * m.fMat[kMScaleY];
+}
+
+void SkMatrix::Trans_xy(const SkMatrix& m, SkScalar sx, SkScalar sy,
+ SkPoint* pt) {
+ SkASSERT(m.getType() == kTranslate_Mask);
+
+ pt->fX = sx + m.fMat[kMTransX];
+ pt->fY = sy + m.fMat[kMTransY];
+}
+
+void SkMatrix::Identity_xy(const SkMatrix& m, SkScalar sx, SkScalar sy,
+ SkPoint* pt) {
+ SkASSERT(0 == m.getType());
+
+ pt->fX = sx;
+ pt->fY = sy;
+}
+
+const SkMatrix::MapXYProc SkMatrix::gMapXYProcs[] = {
+ SkMatrix::Identity_xy, SkMatrix::Trans_xy,
+ SkMatrix::Scale_xy, SkMatrix::ScaleTrans_xy,
+ SkMatrix::Rot_xy, SkMatrix::RotTrans_xy,
+ SkMatrix::Rot_xy, SkMatrix::RotTrans_xy,
+ // repeat the persp proc 8 times
+ SkMatrix::Persp_xy, SkMatrix::Persp_xy,
+ SkMatrix::Persp_xy, SkMatrix::Persp_xy,
+ SkMatrix::Persp_xy, SkMatrix::Persp_xy,
+ SkMatrix::Persp_xy, SkMatrix::Persp_xy
+};
+
+///////////////////////////////////////////////////////////////////////////////
+#if 0
+// if its nearly zero (just made up 26, perhaps it should be bigger or smaller)
+#define PerspNearlyZero(x) SkScalarNearlyZero(x, (1.0f / (1 << 26)))
+
+bool SkMatrix::isFixedStepInX() const {
+ return PerspNearlyZero(fMat[kMPersp0]);
+}
+
+SkVector SkMatrix::fixedStepInX(SkScalar y) const {
+ SkASSERT(PerspNearlyZero(fMat[kMPersp0]));
+ if (PerspNearlyZero(fMat[kMPersp1]) &&
+ PerspNearlyZero(fMat[kMPersp2] - 1)) {
+ return SkVector::Make(fMat[kMScaleX], fMat[kMSkewY]);
+ } else {
+ SkScalar z = y * fMat[kMPersp1] + fMat[kMPersp2];
+ return SkVector::Make(fMat[kMScaleX] / z, fMat[kMSkewY] / z);
+ }
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+static inline bool checkForZero(float x) {
+ return x*x == 0;
+}
+
+bool SkMatrix::Poly2Proc(const SkPoint srcPt[], SkMatrix* dst) {
+ dst->fMat[kMScaleX] = srcPt[1].fY - srcPt[0].fY;
+ dst->fMat[kMSkewY] = srcPt[0].fX - srcPt[1].fX;
+ dst->fMat[kMPersp0] = 0;
+
+ dst->fMat[kMSkewX] = srcPt[1].fX - srcPt[0].fX;
+ dst->fMat[kMScaleY] = srcPt[1].fY - srcPt[0].fY;
+ dst->fMat[kMPersp1] = 0;
+
+ dst->fMat[kMTransX] = srcPt[0].fX;
+ dst->fMat[kMTransY] = srcPt[0].fY;
+ dst->fMat[kMPersp2] = 1;
+ dst->setTypeMask(kUnknown_Mask);
+ return true;
+}
+
+bool SkMatrix::Poly3Proc(const SkPoint srcPt[], SkMatrix* dst) {
+ dst->fMat[kMScaleX] = srcPt[2].fX - srcPt[0].fX;
+ dst->fMat[kMSkewY] = srcPt[2].fY - srcPt[0].fY;
+ dst->fMat[kMPersp0] = 0;
+
+ dst->fMat[kMSkewX] = srcPt[1].fX - srcPt[0].fX;
+ dst->fMat[kMScaleY] = srcPt[1].fY - srcPt[0].fY;
+ dst->fMat[kMPersp1] = 0;
+
+ dst->fMat[kMTransX] = srcPt[0].fX;
+ dst->fMat[kMTransY] = srcPt[0].fY;
+ dst->fMat[kMPersp2] = 1;
+ dst->setTypeMask(kUnknown_Mask);
+ return true;
+}
+
+bool SkMatrix::Poly4Proc(const SkPoint srcPt[], SkMatrix* dst) {
+ float a1, a2;
+ float x0, y0, x1, y1, x2, y2;
+
+ x0 = srcPt[2].fX - srcPt[0].fX;
+ y0 = srcPt[2].fY - srcPt[0].fY;
+ x1 = srcPt[2].fX - srcPt[1].fX;
+ y1 = srcPt[2].fY - srcPt[1].fY;
+ x2 = srcPt[2].fX - srcPt[3].fX;
+ y2 = srcPt[2].fY - srcPt[3].fY;
+
+ /* check if abs(x2) > abs(y2) */
+ if ( x2 > 0 ? y2 > 0 ? x2 > y2 : x2 > -y2 : y2 > 0 ? -x2 > y2 : x2 < y2) {
+ float denom = sk_ieee_float_divide(x1 * y2, x2) - y1;
+ if (checkForZero(denom)) {
+ return false;
+ }
+ a1 = (((x0 - x1) * y2 / x2) - y0 + y1) / denom;
+ } else {
+ float denom = x1 - sk_ieee_float_divide(y1 * x2, y2);
+ if (checkForZero(denom)) {
+ return false;
+ }
+ a1 = (x0 - x1 - sk_ieee_float_divide((y0 - y1) * x2, y2)) / denom;
+ }
+
+ /* check if abs(x1) > abs(y1) */
+ if ( x1 > 0 ? y1 > 0 ? x1 > y1 : x1 > -y1 : y1 > 0 ? -x1 > y1 : x1 < y1) {
+ float denom = y2 - sk_ieee_float_divide(x2 * y1, x1);
+ if (checkForZero(denom)) {
+ return false;
+ }
+ a2 = (y0 - y2 - sk_ieee_float_divide((x0 - x2) * y1, x1)) / denom;
+ } else {
+ float denom = sk_ieee_float_divide(y2 * x1, y1) - x2;
+ if (checkForZero(denom)) {
+ return false;
+ }
+ a2 = (sk_ieee_float_divide((y0 - y2) * x1, y1) - x0 + x2) / denom;
+ }
+
+ dst->fMat[kMScaleX] = a2 * srcPt[3].fX + srcPt[3].fX - srcPt[0].fX;
+ dst->fMat[kMSkewY] = a2 * srcPt[3].fY + srcPt[3].fY - srcPt[0].fY;
+ dst->fMat[kMPersp0] = a2;
+
+ dst->fMat[kMSkewX] = a1 * srcPt[1].fX + srcPt[1].fX - srcPt[0].fX;
+ dst->fMat[kMScaleY] = a1 * srcPt[1].fY + srcPt[1].fY - srcPt[0].fY;
+ dst->fMat[kMPersp1] = a1;
+
+ dst->fMat[kMTransX] = srcPt[0].fX;
+ dst->fMat[kMTransY] = srcPt[0].fY;
+ dst->fMat[kMPersp2] = 1;
+ dst->setTypeMask(kUnknown_Mask);
+ return true;
+}
+
+typedef bool (*PolyMapProc)(const SkPoint[], SkMatrix*);
+
+/* Adapted from Rob Johnson's original sample code in QuickDraw GX
+*/
+bool SkMatrix::setPolyToPoly(const SkPoint src[], const SkPoint dst[], int count) {
+ if ((unsigned)count > 4) {
+ SkDebugf("--- SkMatrix::setPolyToPoly count out of range %d\n", count);
+ return false;
+ }
+
+ if (0 == count) {
+ this->reset();
+ return true;
+ }
+ if (1 == count) {
+ this->setTranslate(dst[0].fX - src[0].fX, dst[0].fY - src[0].fY);
+ return true;
+ }
+
+ const PolyMapProc gPolyMapProcs[] = {
+ SkMatrix::Poly2Proc, SkMatrix::Poly3Proc, SkMatrix::Poly4Proc
+ };
+ PolyMapProc proc = gPolyMapProcs[count - 2];
+
+ SkMatrix tempMap, result;
+
+ if (!proc(src, &tempMap)) {
+ return false;
+ }
+ if (!tempMap.invert(&result)) {
+ return false;
+ }
+ if (!proc(dst, &tempMap)) {
+ return false;
+ }
+ this->setConcat(tempMap, result);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+enum MinMaxOrBoth {
+ kMin_MinMaxOrBoth,
+ kMax_MinMaxOrBoth,
+ kBoth_MinMaxOrBoth
+};
+
+template <MinMaxOrBoth MIN_MAX_OR_BOTH> bool get_scale_factor(SkMatrix::TypeMask typeMask,
+ const SkScalar m[9],
+ SkScalar results[/*1 or 2*/]) {
+ if (typeMask & SkMatrix::kPerspective_Mask) {
+ return false;
+ }
+ if (SkMatrix::kIdentity_Mask == typeMask) {
+ results[0] = SK_Scalar1;
+ if (kBoth_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ results[1] = SK_Scalar1;
+ }
+ return true;
+ }
+ if (!(typeMask & SkMatrix::kAffine_Mask)) {
+ if (kMin_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ results[0] = std::min(SkScalarAbs(m[SkMatrix::kMScaleX]),
+ SkScalarAbs(m[SkMatrix::kMScaleY]));
+ } else if (kMax_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ results[0] = std::max(SkScalarAbs(m[SkMatrix::kMScaleX]),
+ SkScalarAbs(m[SkMatrix::kMScaleY]));
+ } else {
+ results[0] = SkScalarAbs(m[SkMatrix::kMScaleX]);
+ results[1] = SkScalarAbs(m[SkMatrix::kMScaleY]);
+ if (results[0] > results[1]) {
+ using std::swap;
+ swap(results[0], results[1]);
+ }
+ }
+ return true;
+ }
+ // ignore the translation part of the matrix, just look at 2x2 portion.
+ // compute singular values, take largest or smallest abs value.
+ // [a b; b c] = A^T*A
+ SkScalar a = sdot(m[SkMatrix::kMScaleX], m[SkMatrix::kMScaleX],
+ m[SkMatrix::kMSkewY], m[SkMatrix::kMSkewY]);
+ SkScalar b = sdot(m[SkMatrix::kMScaleX], m[SkMatrix::kMSkewX],
+ m[SkMatrix::kMScaleY], m[SkMatrix::kMSkewY]);
+ SkScalar c = sdot(m[SkMatrix::kMSkewX], m[SkMatrix::kMSkewX],
+ m[SkMatrix::kMScaleY], m[SkMatrix::kMScaleY]);
+ // eigenvalues of A^T*A are the squared singular values of A.
+ // characteristic equation is det((A^T*A) - l*I) = 0
+ // l^2 - (a + c)l + (ac-b^2)
+ // solve using quadratic equation (divisor is non-zero since l^2 has 1 coeff
+ // and roots are guaranteed to be pos and real).
+ SkScalar bSqd = b * b;
+ // if upper left 2x2 is orthogonal save some math
+ if (bSqd <= SK_ScalarNearlyZero*SK_ScalarNearlyZero) {
+ if (kMin_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ results[0] = std::min(a, c);
+ } else if (kMax_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ results[0] = std::max(a, c);
+ } else {
+ results[0] = a;
+ results[1] = c;
+ if (results[0] > results[1]) {
+ using std::swap;
+ swap(results[0], results[1]);
+ }
+ }
+ } else {
+ SkScalar aminusc = a - c;
+ SkScalar apluscdiv2 = SkScalarHalf(a + c);
+ SkScalar x = SkScalarHalf(SkScalarSqrt(aminusc * aminusc + 4 * bSqd));
+ if (kMin_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ results[0] = apluscdiv2 - x;
+ } else if (kMax_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ results[0] = apluscdiv2 + x;
+ } else {
+ results[0] = apluscdiv2 - x;
+ results[1] = apluscdiv2 + x;
+ }
+ }
+ if (!SkScalarIsFinite(results[0])) {
+ return false;
+ }
+ // Due to the floating point inaccuracy, there might be an error in a, b, c
+ // calculated by sdot, further deepened by subsequent arithmetic operations
+ // on them. Therefore, we allow and cap the nearly-zero negative values.
+ if (results[0] < 0) {
+ results[0] = 0;
+ }
+ results[0] = SkScalarSqrt(results[0]);
+ if (kBoth_MinMaxOrBoth == MIN_MAX_OR_BOTH) {
+ if (!SkScalarIsFinite(results[1])) {
+ return false;
+ }
+ if (results[1] < 0) {
+ results[1] = 0;
+ }
+ results[1] = SkScalarSqrt(results[1]);
+ }
+ return true;
+}
+
+SkScalar SkMatrix::getMinScale() const {
+ SkScalar factor;
+ if (get_scale_factor<kMin_MinMaxOrBoth>(this->getType(), fMat, &factor)) {
+ return factor;
+ } else {
+ return -1;
+ }
+}
+
+SkScalar SkMatrix::getMaxScale() const {
+ SkScalar factor;
+ if (get_scale_factor<kMax_MinMaxOrBoth>(this->getType(), fMat, &factor)) {
+ return factor;
+ } else {
+ return -1;
+ }
+}
+
+bool SkMatrix::getMinMaxScales(SkScalar scaleFactors[2]) const {
+ return get_scale_factor<kBoth_MinMaxOrBoth>(this->getType(), fMat, scaleFactors);
+}
+
+const SkMatrix& SkMatrix::I() {
+ static constexpr SkMatrix identity;
+ SkASSERT(identity.isIdentity());
+ return identity;
+}
+
+const SkMatrix& SkMatrix::InvalidMatrix() {
+ static constexpr SkMatrix invalid(SK_ScalarMax, SK_ScalarMax, SK_ScalarMax,
+ SK_ScalarMax, SK_ScalarMax, SK_ScalarMax,
+ SK_ScalarMax, SK_ScalarMax, SK_ScalarMax,
+ kTranslate_Mask | kScale_Mask |
+ kAffine_Mask | kPerspective_Mask);
+ return invalid;
+}
+
+bool SkMatrix::decomposeScale(SkSize* scale, SkMatrix* remaining) const {
+ if (this->hasPerspective()) {
+ return false;
+ }
+
+ const SkScalar sx = SkVector::Length(this->getScaleX(), this->getSkewY());
+ const SkScalar sy = SkVector::Length(this->getSkewX(), this->getScaleY());
+ if (!SkScalarIsFinite(sx) || !SkScalarIsFinite(sy) ||
+ SkScalarNearlyZero(sx) || SkScalarNearlyZero(sy)) {
+ return false;
+ }
+
+ if (scale) {
+ scale->set(sx, sy);
+ }
+ if (remaining) {
+ *remaining = *this;
+ remaining->preScale(SkScalarInvert(sx), SkScalarInvert(sy));
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+size_t SkMatrix::writeToMemory(void* buffer) const {
+ // TODO write less for simple matrices
+ static const size_t sizeInMemory = 9 * sizeof(SkScalar);
+ if (buffer) {
+ memcpy(buffer, fMat, sizeInMemory);
+ }
+ return sizeInMemory;
+}
+
+size_t SkMatrix::readFromMemory(const void* buffer, size_t length) {
+ static const size_t sizeInMemory = 9 * sizeof(SkScalar);
+ if (length < sizeInMemory) {
+ return 0;
+ }
+ memcpy(fMat, buffer, sizeInMemory);
+ this->setTypeMask(kUnknown_Mask);
+ // Figure out the type now so that we're thread-safe
+ (void)this->getType();
+ return sizeInMemory;
+}
+
+void SkMatrix::dump() const {
+ SkString str;
+ str.appendf("[%8.4f %8.4f %8.4f][%8.4f %8.4f %8.4f][%8.4f %8.4f %8.4f]",
+ fMat[0], fMat[1], fMat[2], fMat[3], fMat[4], fMat[5],
+ fMat[6], fMat[7], fMat[8]);
+ SkDebugf("%s\n", str.c_str());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkTreatAsSprite(const SkMatrix& mat, const SkISize& size, const SkSamplingOptions& sampling,
+ bool isAntiAlias) {
+ if (!SkSamplingPriv::NoChangeWithIdentityMatrix(sampling)) {
+ return false;
+ }
+
+ // Our path aa is 2-bits, and our rect aa is 8, so we could use 8,
+ // but in practice 4 seems enough (still looks smooth) and allows
+ // more slightly fractional cases to fall into the fast (sprite) case.
+ static const unsigned kAntiAliasSubpixelBits = 4;
+
+ const unsigned subpixelBits = isAntiAlias ? kAntiAliasSubpixelBits : 0;
+
+ // quick reject on affine or perspective
+ if (mat.getType() & ~(SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask)) {
+ return false;
+ }
+
+ // quick success check
+ if (!subpixelBits && !(mat.getType() & ~SkMatrix::kTranslate_Mask)) {
+ return true;
+ }
+
+ // mapRect supports negative scales, so we eliminate those first
+ if (mat.getScaleX() < 0 || mat.getScaleY() < 0) {
+ return false;
+ }
+
+ SkRect dst;
+ SkIRect isrc = SkIRect::MakeSize(size);
+
+ {
+ SkRect src;
+ src.set(isrc);
+ mat.mapRect(&dst, src);
+ }
+
+ // just apply the translate to isrc
+ isrc.offset(SkScalarRoundToInt(mat.getTranslateX()),
+ SkScalarRoundToInt(mat.getTranslateY()));
+
+ if (subpixelBits) {
+ isrc.fLeft = SkLeftShift(isrc.fLeft, subpixelBits);
+ isrc.fTop = SkLeftShift(isrc.fTop, subpixelBits);
+ isrc.fRight = SkLeftShift(isrc.fRight, subpixelBits);
+ isrc.fBottom = SkLeftShift(isrc.fBottom, subpixelBits);
+
+ const float scale = 1 << subpixelBits;
+ dst.fLeft *= scale;
+ dst.fTop *= scale;
+ dst.fRight *= scale;
+ dst.fBottom *= scale;
+ }
+
+ SkIRect idst;
+ dst.round(&idst);
+ return isrc == idst;
+}
+
+// A square matrix M can be decomposed (via polar decomposition) into two matrices --
+// an orthogonal matrix Q and a symmetric matrix S. In turn we can decompose S into U*W*U^T,
+// where U is another orthogonal matrix and W is a scale matrix. These can be recombined
+// to give M = (Q*U)*W*U^T, i.e., the product of two orthogonal matrices and a scale matrix.
+//
+// The one wrinkle is that traditionally Q may contain a reflection -- the
+// calculation has been rejiggered to put that reflection into W.
+bool SkDecomposeUpper2x2(const SkMatrix& matrix,
+ SkPoint* rotation1,
+ SkPoint* scale,
+ SkPoint* rotation2) {
+
+ SkScalar A = matrix[SkMatrix::kMScaleX];
+ SkScalar B = matrix[SkMatrix::kMSkewX];
+ SkScalar C = matrix[SkMatrix::kMSkewY];
+ SkScalar D = matrix[SkMatrix::kMScaleY];
+
+ if (is_degenerate_2x2(A, B, C, D)) {
+ return false;
+ }
+
+ double w1, w2;
+ SkScalar cos1, sin1;
+ SkScalar cos2, sin2;
+
+ // do polar decomposition (M = Q*S)
+ SkScalar cosQ, sinQ;
+ double Sa, Sb, Sd;
+ // if M is already symmetric (i.e., M = I*S)
+ if (SkScalarNearlyEqual(B, C)) {
+ cosQ = 1;
+ sinQ = 0;
+
+ Sa = A;
+ Sb = B;
+ Sd = D;
+ } else {
+ cosQ = A + D;
+ sinQ = C - B;
+ SkScalar reciplen = SkScalarInvert(SkScalarSqrt(cosQ*cosQ + sinQ*sinQ));
+ cosQ *= reciplen;
+ sinQ *= reciplen;
+
+ // S = Q^-1*M
+ // we don't calc Sc since it's symmetric
+ Sa = A*cosQ + C*sinQ;
+ Sb = B*cosQ + D*sinQ;
+ Sd = -B*sinQ + D*cosQ;
+ }
+
+ // Now we need to compute eigenvalues of S (our scale factors)
+ // and eigenvectors (bases for our rotation)
+ // From this, should be able to reconstruct S as U*W*U^T
+ if (SkScalarNearlyZero(SkDoubleToScalar(Sb))) {
+ // already diagonalized
+ cos1 = 1;
+ sin1 = 0;
+ w1 = Sa;
+ w2 = Sd;
+ cos2 = cosQ;
+ sin2 = sinQ;
+ } else {
+ double diff = Sa - Sd;
+ double discriminant = sqrt(diff*diff + 4.0*Sb*Sb);
+ double trace = Sa + Sd;
+ if (diff > 0) {
+ w1 = 0.5*(trace + discriminant);
+ w2 = 0.5*(trace - discriminant);
+ } else {
+ w1 = 0.5*(trace - discriminant);
+ w2 = 0.5*(trace + discriminant);
+ }
+
+ cos1 = SkDoubleToScalar(Sb); sin1 = SkDoubleToScalar(w1 - Sa);
+ SkScalar reciplen = SkScalarInvert(SkScalarSqrt(cos1*cos1 + sin1*sin1));
+ cos1 *= reciplen;
+ sin1 *= reciplen;
+
+ // rotation 2 is composition of Q and U
+ cos2 = cos1*cosQ - sin1*sinQ;
+ sin2 = sin1*cosQ + cos1*sinQ;
+
+ // rotation 1 is U^T
+ sin1 = -sin1;
+ }
+
+ if (scale) {
+ scale->fX = SkDoubleToScalar(w1);
+ scale->fY = SkDoubleToScalar(w2);
+ }
+ if (rotation1) {
+ rotation1->fX = cos1;
+ rotation1->fY = sin1;
+ }
+ if (rotation2) {
+ rotation2->fX = cos2;
+ rotation2->fY = sin2;
+ }
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkScalar SkMatrixPriv::DifferentialAreaScale(const SkMatrix& m, const SkPoint& p) {
+ // [m00 m01 m02] [f(u,v)]
+ // Assuming M = [m10 m11 m12], define the projected p'(u,v) = [g(u,v)] where
+ // [m20 m12 m22]
+ // [x] [u]
+ // f(u,v) = x(u,v) / w(u,v), g(u,v) = y(u,v) / w(u,v) and [y] = M*[v]
+ // [w] [1]
+ //
+ // Then the differential scale factor between p = (u,v) and p' is |det J|,
+ // where J is the Jacobian for p': [df/du dg/du]
+ // [df/dv dg/dv]
+ // and df/du = (w*dx/du - x*dw/du)/w^2, dg/du = (w*dy/du - y*dw/du)/w^2
+ // df/dv = (w*dx/dv - x*dw/dv)/w^2, dg/dv = (w*dy/dv - y*dw/dv)/w^2
+ //
+ // From here, |det J| can be rewritten as |det J'/w^3|, where
+ // [x y w ] [x y w ]
+ // J' = [dx/du dy/du dw/du] = [m00 m10 m20]
+ // [dx/dv dy/dv dw/dv] [m01 m11 m21]
+ SkPoint3 xyw;
+ m.mapHomogeneousPoints(&xyw, &p, 1);
+
+ if (xyw.fZ < SK_ScalarNearlyZero) {
+ // Reaching the discontinuity of xy/w and where the point would clip to w >= 0
+ return SK_ScalarInfinity;
+ }
+ SkMatrix jacobian = SkMatrix::MakeAll(xyw.fX, xyw.fY, xyw.fZ,
+ m.getScaleX(), m.getSkewY(), m.getPerspX(),
+ m.getSkewX(), m.getScaleY(), m.getPerspY());
+
+ double denom = 1.0 / xyw.fZ; // 1/w
+ denom = denom * denom * denom; // 1/w^3
+ return SkScalarAbs(SkDoubleToScalar(sk_determinant(jacobian.fMat, true) * denom));
+}
+
+bool SkMatrixPriv::NearlyAffine(const SkMatrix& m,
+ const SkRect& bounds,
+ SkScalar tolerance) {
+ if (!m.hasPerspective()) {
+ return true;
+ }
+
+ // The idea here is that we are computing the differential area scale at each corner,
+ // and comparing them with some tolerance value. If they are similar, then we can say
+ // that the transformation is nearly affine.
+
+ // We can map the four points simultaneously.
+ SkPoint quad[4];
+ bounds.toQuad(quad);
+ SkPoint3 xyw[4];
+ m.mapHomogeneousPoints(xyw, quad, 4);
+
+ // Since the Jacobian is a 3x3 matrix, the determinant is a scalar triple product,
+ // and the initial cross product is constant across all four points.
+ SkPoint3 v1{m.getScaleX(), m.getSkewY(), m.getPerspX()};
+ SkPoint3 v2{m.getSkewX(), m.getScaleY(), m.getPerspY()};
+ SkPoint3 detCrossProd = v1.cross(v2);
+
+ // Start with the calculations at P0.
+ if (xyw[0].fZ < SK_ScalarNearlyZero) {
+ // Reaching the discontinuity of xy/w and where the point would clip to w >= 0
+ return false;
+ }
+
+ // Performing a dot product with the pre-w divide transformed point completes
+ // the scalar triple product and the determinant calculation.
+ double det = detCrossProd.dot(xyw[0]);
+ // From that we can compute the differential area scale at P0.
+ double denom = 1.0 / xyw[0].fZ; // 1/w
+ denom = denom * denom * denom; // 1/w^3
+ SkScalar a0 = SkScalarAbs(SkDoubleToScalar(det*denom));
+
+ // Now we compare P0's scale with that at the other three points
+ tolerance *= tolerance; // squared tolerance since we're comparing area
+ for (int i = 1; i < 4; ++i) {
+ if (xyw[i].fZ < SK_ScalarNearlyZero) {
+ // Reaching the discontinuity of xy/w and where the point would clip to w >= 0
+ return false;
+ }
+
+ det = detCrossProd.dot(xyw[i]); // completing scalar triple product
+ denom = 1.0 / xyw[i].fZ; // 1/w
+ denom = denom * denom * denom; // 1/w^3
+ SkScalar a = SkScalarAbs(SkDoubleToScalar(det*denom));
+ if (!SkScalarNearlyEqual(a0, a, tolerance)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+SkScalar SkMatrixPriv::ComputeResScaleForStroking(const SkMatrix& matrix) {
+ // Not sure how to handle perspective differently, so we just don't try (yet)
+ SkScalar sx = SkPoint::Length(matrix[SkMatrix::kMScaleX], matrix[SkMatrix::kMSkewY]);
+ SkScalar sy = SkPoint::Length(matrix[SkMatrix::kMSkewX], matrix[SkMatrix::kMScaleY]);
+ if (SkScalarsAreFinite(sx, sy)) {
+ SkScalar scale = std::max(sx, sy);
+ if (scale > 0) {
+ static const SkScalar kMaxStrokeScale = 1e5f;
+ return std::min(scale, kMaxStrokeScale);
+ }
+ }
+ return 1;
+}
diff --git a/gfx/skia/skia/src/core/SkMatrixInvert.cpp b/gfx/skia/skia/src/core/SkMatrixInvert.cpp
new file mode 100644
index 0000000000..ea8d36702c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMatrixInvert.cpp
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkMatrixInvert.h"
+
+#include "include/private/base/SkFloatingPoint.h"
+
+SkScalar SkInvert2x2Matrix(const SkScalar inMatrix[4], SkScalar outMatrix[4]) {
+ double a00 = inMatrix[0];
+ double a01 = inMatrix[1];
+ double a10 = inMatrix[2];
+ double a11 = inMatrix[3];
+
+ // Calculate the determinant
+ double determinant = a00 * a11 - a01 * a10;
+ if (outMatrix) {
+ double invdet = sk_ieee_double_divide(1.0, determinant);
+ outMatrix[0] = a11 * invdet;
+ outMatrix[1] = -a01 * invdet;
+ outMatrix[2] = -a10 * invdet;
+ outMatrix[3] = a00 * invdet;
+ // If 1/det overflows to infinity (i.e. det is denormalized) or any of the inverted matrix
+ // values is non-finite, return zero to indicate a non-invertible matrix.
+ if (!SkScalarsAreFinite(outMatrix, 4)) {
+ determinant = 0.0f;
+ }
+ }
+ return determinant;
+}
+
+SkScalar SkInvert3x3Matrix(const SkScalar inMatrix[9], SkScalar outMatrix[9]) {
+ double a00 = inMatrix[0];
+ double a01 = inMatrix[1];
+ double a02 = inMatrix[2];
+ double a10 = inMatrix[3];
+ double a11 = inMatrix[4];
+ double a12 = inMatrix[5];
+ double a20 = inMatrix[6];
+ double a21 = inMatrix[7];
+ double a22 = inMatrix[8];
+
+ double b01 = a22 * a11 - a12 * a21;
+ double b11 = -a22 * a10 + a12 * a20;
+ double b21 = a21 * a10 - a11 * a20;
+
+ // Calculate the determinant
+ double determinant = a00 * b01 + a01 * b11 + a02 * b21;
+ if (outMatrix) {
+ double invdet = sk_ieee_double_divide(1.0, determinant);
+ outMatrix[0] = b01 * invdet;
+ outMatrix[1] = (-a22 * a01 + a02 * a21) * invdet;
+ outMatrix[2] = ( a12 * a01 - a02 * a11) * invdet;
+ outMatrix[3] = b11 * invdet;
+ outMatrix[4] = ( a22 * a00 - a02 * a20) * invdet;
+ outMatrix[5] = (-a12 * a00 + a02 * a10) * invdet;
+ outMatrix[6] = b21 * invdet;
+ outMatrix[7] = (-a21 * a00 + a01 * a20) * invdet;
+ outMatrix[8] = ( a11 * a00 - a01 * a10) * invdet;
+ // If 1/det overflows to infinity (i.e. det is denormalized) or any of the inverted matrix
+ // values is non-finite, return zero to indicate a non-invertible matrix.
+ if (!SkScalarsAreFinite(outMatrix, 9)) {
+ determinant = 0.0f;
+ }
+ }
+ return determinant;
+}
+
+SkScalar SkInvert4x4Matrix(const SkScalar inMatrix[16], SkScalar outMatrix[16]) {
+ double a00 = inMatrix[0];
+ double a01 = inMatrix[1];
+ double a02 = inMatrix[2];
+ double a03 = inMatrix[3];
+ double a10 = inMatrix[4];
+ double a11 = inMatrix[5];
+ double a12 = inMatrix[6];
+ double a13 = inMatrix[7];
+ double a20 = inMatrix[8];
+ double a21 = inMatrix[9];
+ double a22 = inMatrix[10];
+ double a23 = inMatrix[11];
+ double a30 = inMatrix[12];
+ double a31 = inMatrix[13];
+ double a32 = inMatrix[14];
+ double a33 = inMatrix[15];
+
+ double b00 = a00 * a11 - a01 * a10;
+ double b01 = a00 * a12 - a02 * a10;
+ double b02 = a00 * a13 - a03 * a10;
+ double b03 = a01 * a12 - a02 * a11;
+ double b04 = a01 * a13 - a03 * a11;
+ double b05 = a02 * a13 - a03 * a12;
+ double b06 = a20 * a31 - a21 * a30;
+ double b07 = a20 * a32 - a22 * a30;
+ double b08 = a20 * a33 - a23 * a30;
+ double b09 = a21 * a32 - a22 * a31;
+ double b10 = a21 * a33 - a23 * a31;
+ double b11 = a22 * a33 - a23 * a32;
+
+ // Calculate the determinant
+ double determinant = b00 * b11 - b01 * b10 + b02 * b09 + b03 * b08 - b04 * b07 + b05 * b06;
+ if (outMatrix) {
+ double invdet = sk_ieee_double_divide(1.0, determinant);
+ b00 *= invdet;
+ b01 *= invdet;
+ b02 *= invdet;
+ b03 *= invdet;
+ b04 *= invdet;
+ b05 *= invdet;
+ b06 *= invdet;
+ b07 *= invdet;
+ b08 *= invdet;
+ b09 *= invdet;
+ b10 *= invdet;
+ b11 *= invdet;
+
+ outMatrix[0] = a11 * b11 - a12 * b10 + a13 * b09;
+ outMatrix[1] = a02 * b10 - a01 * b11 - a03 * b09;
+ outMatrix[2] = a31 * b05 - a32 * b04 + a33 * b03;
+ outMatrix[3] = a22 * b04 - a21 * b05 - a23 * b03;
+ outMatrix[4] = a12 * b08 - a10 * b11 - a13 * b07;
+ outMatrix[5] = a00 * b11 - a02 * b08 + a03 * b07;
+ outMatrix[6] = a32 * b02 - a30 * b05 - a33 * b01;
+ outMatrix[7] = a20 * b05 - a22 * b02 + a23 * b01;
+ outMatrix[8] = a10 * b10 - a11 * b08 + a13 * b06;
+ outMatrix[9] = a01 * b08 - a00 * b10 - a03 * b06;
+ outMatrix[10] = a30 * b04 - a31 * b02 + a33 * b00;
+ outMatrix[11] = a21 * b02 - a20 * b04 - a23 * b00;
+ outMatrix[12] = a11 * b07 - a10 * b09 - a12 * b06;
+ outMatrix[13] = a00 * b09 - a01 * b07 + a02 * b06;
+ outMatrix[14] = a31 * b01 - a30 * b03 - a32 * b00;
+ outMatrix[15] = a20 * b03 - a21 * b01 + a22 * b00;
+
+ // If 1/det overflows to infinity (i.e. det is denormalized) or any of the inverted matrix
+ // values is non-finite, return zero to indicate a non-invertible matrix.
+ if (!SkScalarsAreFinite(outMatrix, 16)) {
+ determinant = 0.0f;
+ }
+ }
+ return determinant;
+}
diff --git a/gfx/skia/skia/src/core/SkMatrixInvert.h b/gfx/skia/skia/src/core/SkMatrixInvert.h
new file mode 100644
index 0000000000..eda4cb9044
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMatrixInvert.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMatrixInvert_DEFINED
+#define SkMatrixInvert_DEFINED
+
+#include "include/core/SkScalar.h"
+
+/**
+ * Computes the inverse of `inMatrix`, passed in column-major order.
+ * `inMatrix` and `outMatrix` are allowed to point to the same array of scalars in memory.
+ * `outMatrix` is allowed to be null.
+ * The return value is the determinant of the input matrix. If zero is returned, the matrix was
+ * non-invertible, and `outMatrix` has been left in an indeterminate state.
+ */
+SkScalar SkInvert2x2Matrix(const SkScalar inMatrix[4], SkScalar outMatrix[4]);
+SkScalar SkInvert3x3Matrix(const SkScalar inMatrix[9], SkScalar outMatrix[9]);
+SkScalar SkInvert4x4Matrix(const SkScalar inMatrix[16], SkScalar outMatrix[16]);
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMatrixPriv.h b/gfx/skia/skia/src/core/SkMatrixPriv.h
new file mode 100644
index 0000000000..086d840a7b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMatrixPriv.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMatrixPriv_DEFINE
+#define SkMatrixPriv_DEFINE
+
+#include "include/core/SkM44.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "src/base/SkVx.h"
+
+#include <cstdint>
+#include <cstring>
+struct SkPoint3;
+
+class SkMatrixPriv {
+public:
+ enum {
+ // writeTo/readFromMemory will never return a value larger than this
+ kMaxFlattenSize = 9 * sizeof(SkScalar) + sizeof(uint32_t),
+ };
+
+ static size_t WriteToMemory(const SkMatrix& matrix, void* buffer) {
+ return matrix.writeToMemory(buffer);
+ }
+
+ static size_t ReadFromMemory(SkMatrix* matrix, const void* buffer, size_t length) {
+ return matrix->readFromMemory(buffer, length);
+ }
+
+ typedef SkMatrix::MapXYProc MapXYProc;
+ typedef SkMatrix::MapPtsProc MapPtsProc;
+
+
+ static MapPtsProc GetMapPtsProc(const SkMatrix& matrix) {
+ return SkMatrix::GetMapPtsProc(matrix.getType());
+ }
+
+ static MapXYProc GetMapXYProc(const SkMatrix& matrix) {
+ return SkMatrix::GetMapXYProc(matrix.getType());
+ }
+
+ /**
+ * Attempt to map the rect through the inverse of the matrix. If it is not invertible,
+ * then this returns false and dst is unchanged.
+ */
+ static bool SK_WARN_UNUSED_RESULT InverseMapRect(const SkMatrix& mx,
+ SkRect* dst, const SkRect& src) {
+ if (mx.getType() <= SkMatrix::kTranslate_Mask) {
+ SkScalar tx = mx.getTranslateX();
+ SkScalar ty = mx.getTranslateY();
+ skvx::float4 trans(tx, ty, tx, ty);
+ (skvx::float4::Load(&src.fLeft) - trans).store(&dst->fLeft);
+ return true;
+ }
+ // Insert other special-cases here (e.g. scale+translate)
+
+ // general case
+ SkMatrix inverse;
+ if (mx.invert(&inverse)) {
+ inverse.mapRect(dst, src);
+ return true;
+ }
+ return false;
+ }
+
+ /** Maps count pts, skipping stride bytes to advance from one SkPoint to the next.
+ Points are mapped by multiplying each SkPoint by SkMatrix. Given:
+
+ | A B C | | x |
+ Matrix = | D E F |, pt = | y |
+ | G H I | | 1 |
+
+ each resulting pts SkPoint is computed as:
+
+ |A B C| |x| Ax+By+C Dx+Ey+F
+ Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , -------
+ |G H I| |1| Gx+Hy+I Gx+Hy+I
+
+ @param mx matrix used to map the points
+ @param pts storage for mapped points
+ @param stride size of record starting with SkPoint, in bytes
+ @param count number of points to transform
+ */
+ static void MapPointsWithStride(const SkMatrix& mx, SkPoint pts[], size_t stride, int count) {
+ SkASSERT(stride >= sizeof(SkPoint));
+ SkASSERT(0 == stride % sizeof(SkScalar));
+
+ SkMatrix::TypeMask tm = mx.getType();
+
+ if (SkMatrix::kIdentity_Mask == tm) {
+ return;
+ }
+ if (SkMatrix::kTranslate_Mask == tm) {
+ const SkScalar tx = mx.getTranslateX();
+ const SkScalar ty = mx.getTranslateY();
+ skvx::float2 trans(tx, ty);
+ for (int i = 0; i < count; ++i) {
+ (skvx::float2::Load(&pts->fX) + trans).store(&pts->fX);
+ pts = (SkPoint*)((intptr_t)pts + stride);
+ }
+ return;
+ }
+ // Insert other special-cases here (e.g. scale+translate)
+
+ // general case
+ SkMatrix::MapXYProc proc = mx.getMapXYProc();
+ for (int i = 0; i < count; ++i) {
+ proc(mx, pts->fX, pts->fY, pts);
+ pts = (SkPoint*)((intptr_t)pts + stride);
+ }
+ }
+
+ /** Maps src SkPoint array of length count to dst SkPoint array, skipping stride bytes
+ to advance from one SkPoint to the next.
+ Points are mapped by multiplying each SkPoint by SkMatrix. Given:
+
+ | A B C | | x |
+ Matrix = | D E F |, src = | y |
+ | G H I | | 1 |
+
+ each resulting dst SkPoint is computed as:
+
+ |A B C| |x| Ax+By+C Dx+Ey+F
+ Matrix * pt = |D E F| |y| = |Ax+By+C Dx+Ey+F Gx+Hy+I| = ------- , -------
+ |G H I| |1| Gx+Hy+I Gx+Hy+I
+
+ @param mx matrix used to map the points
+ @param dst storage for mapped points
+ @param src points to transform
+ @param stride size of record starting with SkPoint, in bytes
+ @param count number of points to transform
+ */
+ static void MapPointsWithStride(const SkMatrix& mx, SkPoint dst[], size_t dstStride,
+ const SkPoint src[], size_t srcStride, int count) {
+ SkASSERT(srcStride >= sizeof(SkPoint));
+ SkASSERT(dstStride >= sizeof(SkPoint));
+ SkASSERT(0 == srcStride % sizeof(SkScalar));
+ SkASSERT(0 == dstStride % sizeof(SkScalar));
+ for (int i = 0; i < count; ++i) {
+ mx.mapPoints(dst, src, 1);
+ src = (SkPoint*)((intptr_t)src + srcStride);
+ dst = (SkPoint*)((intptr_t)dst + dstStride);
+ }
+ }
+
+ static void MapHomogeneousPointsWithStride(const SkMatrix& mx, SkPoint3 dst[], size_t dstStride,
+ const SkPoint3 src[], size_t srcStride, int count);
+
+ static bool PostIDiv(SkMatrix* matrix, int divx, int divy) {
+ return matrix->postIDiv(divx, divy);
+ }
+
+ static bool CheapEqual(const SkMatrix& a, const SkMatrix& b) {
+ return &a == &b || 0 == memcmp(a.fMat, b.fMat, sizeof(a.fMat));
+ }
+
+ static const SkScalar* M44ColMajor(const SkM44& m) { return m.fMat; }
+
+ // This is legacy functionality that only checks the 3x3 portion. The matrix could have Z-based
+ // shear, or other complex behavior. Only use this if you're planning to use the information
+ // to accelerate some purely 2D operation.
+ static bool IsScaleTranslateAsM33(const SkM44& m) {
+ return m.rc(1,0) == 0 && m.rc(3,0) == 0 &&
+ m.rc(0,1) == 0 && m.rc(3,1) == 0 &&
+ m.rc(3,3) == 1;
+
+ }
+
+ // Map the four corners of 'r' and return the bounding box of those points. The four corners of
+ // 'r' are assumed to have z = 0 and w = 1. If the matrix has perspective, the returned
+ // rectangle will be the bounding box of the projected points after being clipped to w > 0.
+ static SkRect MapRect(const SkM44& m, const SkRect& r);
+
+ // Returns the differential area scale factor for a local point 'p' that will be transformed
+ // by 'm' (which may have perspective). If 'm' does not have perspective, this scale factor is
+ // constant regardless of 'p'; when it does have perspective, it is specific to that point.
+ //
+ // This can be crudely thought of as "device pixel area" / "local pixel area" at 'p'.
+ //
+ // Returns positive infinity if the transformed homogeneous point has w <= 0.
+ static SkScalar DifferentialAreaScale(const SkMatrix& m, const SkPoint& p);
+
+ // Determines if the transformation m applied to the bounds can be approximated by
+ // an affine transformation, i.e., the perspective part of the transformation has little
+ // visible effect.
+ static bool NearlyAffine(const SkMatrix& m,
+ const SkRect& bounds,
+ SkScalar tolerance = SK_ScalarNearlyZero);
+
+ static SkScalar ComputeResScaleForStroking(const SkMatrix& matrix);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMatrixProvider.h b/gfx/skia/skia/src/core/SkMatrixProvider.h
new file mode 100644
index 0000000000..3dfa5676ba
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMatrixProvider.h
@@ -0,0 +1,68 @@
+/*
+* Copyright 2020 Google LLC
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef SkMatrixProvider_DEFINED
+#define SkMatrixProvider_DEFINED
+
+#include "include/core/SkM44.h"
+#include "include/core/SkMatrix.h"
+
+/**
+ * All matrix providers report a flag: "localToDeviceHitsPixelCenters". This is confusing.
+ * It doesn't say anything about the actual matrix in the provider. Instead, it means: "is it safe
+ * to tweak sampling based on the contents of the matrix". In other words, does the device end of
+ * the local-to-device matrix actually map to pixels, AND are the local coordinates being fed to
+ * the shader produced by the inverse of that matrix? For a normal device, this is trivially true.
+ * The matrix may be updated via transforms, but when we draw (and the local coordinates come from
+ * rasterization of primitives against that device), we can know that the device coordinates will
+ * land on pixel centers.
+ *
+ * In a few places, the matrix provider is lying about how sampling "works". When we invoke a child
+ * from runtime effects, we give that child a matrix provider with an identity matrix. However --
+ * the coordinates being passed to that child are not the result of device -> local transformed
+ * coordinates. Runtime effects can generate coordinates arbitrarily - even though the provider has
+ * an identity matrix, we can't assume it's safe to (for example) convert linear -> nearest.
+ * Clip shaders are similar - they overwrite the local-to-device matrix (to match what it was when
+ * the clip shader was inserted). The CTM continues to change before drawing, though. In that case,
+ * the two matrices are not inverses, so the local coordinates may not land on texel centers in
+ * the clip shader.
+ *
+ * In cases where we need to inhibit filtering optimizations, use SkOverrideDeviceMatrixProvider.
+ */
+class SkMatrixProvider {
+public:
+ SkMatrixProvider(const SkMatrix& localToDevice)
+ : fLocalToDevice(localToDevice), fLocalToDevice33(localToDevice) {}
+
+ SkMatrixProvider(const SkM44& localToDevice)
+ : fLocalToDevice(localToDevice), fLocalToDevice33(localToDevice.asM33()) {}
+
+ // These should return the "same" matrix, as either a 3x3 or 4x4. Most sites in Skia still
+ // call localToDevice, and operate on SkMatrix.
+ const SkMatrix& localToDevice() const { return fLocalToDevice33; }
+ const SkM44& localToDevice44() const { return fLocalToDevice; }
+
+private:
+ friend class SkBaseDevice;
+
+ SkM44 fLocalToDevice;
+ SkMatrix fLocalToDevice33; // Cached SkMatrix version of above, for legacy usage
+};
+
+class SkPostTranslateMatrixProvider : public SkMatrixProvider {
+public:
+ SkPostTranslateMatrixProvider(const SkMatrixProvider& parent, SkScalar dx, SkScalar dy)
+ : SkMatrixProvider(SkM44::Translate(dx, dy) * parent.localToDevice44()) {}
+};
+
+class SkPreConcatMatrixProvider : public SkMatrixProvider {
+public:
+ SkPreConcatMatrixProvider(const SkMatrixProvider& parent, const SkMatrix& preMatrix)
+ : SkMatrixProvider(parent.localToDevice44() * SkM44(preMatrix)) {}
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMatrixUtils.h b/gfx/skia/skia/src/core/SkMatrixUtils.h
new file mode 100644
index 0000000000..e6a9003bed
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMatrixUtils.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMatrixUtils_DEFINED
+#define SkMatrixUtils_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkSize.h"
+
+class SkMatrix;
+struct SkSamplingOptions;
+
+/**
+ * Given a matrix, size and an antialias setting, return true if the computed dst-rect
+ * would align such that there is a 1-to-1 coorspondence between src and dst pixels.
+ * This can be called by drawing code to see if drawBitmap can be turned into
+ * drawSprite (which is faster).
+ *
+ * The src-rect is defined to be { 0, 0, size.width(), size.height() }
+ */
+bool SkTreatAsSprite(const SkMatrix&, const SkISize& size, const SkSamplingOptions&,
+ bool isAntiAlias);
+
+/** Decomposes the upper-left 2x2 of the matrix into a rotation (represented by
+ the cosine and sine of the rotation angle), followed by a non-uniform scale,
+ followed by another rotation. If there is a reflection, one of the scale
+ factors will be negative.
+ Returns true if successful. Returns false if the matrix is degenerate.
+ */
+bool SkDecomposeUpper2x2(const SkMatrix& matrix,
+ SkPoint* rotation1,
+ SkPoint* scale,
+ SkPoint* rotation2);
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMesh.cpp b/gfx/skia/skia/src/core/SkMesh.cpp
new file mode 100644
index 0000000000..c31dfb78a9
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMesh.cpp
@@ -0,0 +1,925 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkMesh.h"
+
+#ifdef SK_ENABLE_SKSL
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkData.h"
+#include "include/private/SkOpts_spi.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLProgramKind.h"
+#include "include/private/base/SkMath.h"
+#include "src/base/SkSafeMath.h"
+#include "src/core/SkMeshPriv.h"
+#include "src/core/SkRuntimeEffectPriv.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/SkSLUtil.h"
+#include "src/sksl/analysis/SkSLProgramVisitor.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/ir/SkSLReturnStatement.h"
+#include "src/sksl/ir/SkSLStructDefinition.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+
+#if defined(SK_GANESH)
+#include "src/gpu/ganesh/GrGpu.h"
+#include "src/gpu/ganesh/GrStagingBufferManager.h"
+#endif // defined(SK_GANESH)
+
+#include <locale>
+#include <string>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+using Attribute = SkMeshSpecification::Attribute;
+using Varying = SkMeshSpecification::Varying;
+
+using IndexBuffer = SkMesh::IndexBuffer;
+using VertexBuffer = SkMesh::VertexBuffer;
+
+#define RETURN_FAILURE(...) return Result{nullptr, SkStringPrintf(__VA_ARGS__)}
+
+#define RETURN_ERROR(...) return std::make_tuple(false, SkStringPrintf(__VA_ARGS__))
+
+#define RETURN_SUCCESS return std::make_tuple(true, SkString{})
+
+using Uniform = SkMeshSpecification::Uniform;
+
+static std::vector<Uniform>::iterator find_uniform(std::vector<Uniform>& uniforms,
+ std::string_view name) {
+ return std::find_if(uniforms.begin(), uniforms.end(),
+ [name](const SkMeshSpecification::Uniform& u) { return u.name == name; });
+}
+
+static std::tuple<bool, SkString>
+gather_uniforms_and_check_for_main(const SkSL::Program& program,
+ std::vector<Uniform>* uniforms,
+ SkMeshSpecification::Uniform::Flags stage,
+ size_t* offset) {
+ bool foundMain = false;
+ for (const SkSL::ProgramElement* elem : program.elements()) {
+ if (elem->is<SkSL::FunctionDefinition>()) {
+ const SkSL::FunctionDefinition& defn = elem->as<SkSL::FunctionDefinition>();
+ const SkSL::FunctionDeclaration& decl = defn.declaration();
+ if (decl.isMain()) {
+ foundMain = true;
+ }
+ } else if (elem->is<SkSL::GlobalVarDeclaration>()) {
+ const SkSL::GlobalVarDeclaration& global = elem->as<SkSL::GlobalVarDeclaration>();
+ const SkSL::VarDeclaration& varDecl = global.declaration()->as<SkSL::VarDeclaration>();
+ const SkSL::Variable& var = *varDecl.var();
+ if (var.modifiers().fFlags & SkSL::Modifiers::kUniform_Flag) {
+ auto iter = find_uniform(*uniforms, var.name());
+ const auto& context = *program.fContext;
+ if (iter == uniforms->end()) {
+ uniforms->push_back(SkRuntimeEffectPriv::VarAsUniform(var, context, offset));
+ uniforms->back().flags |= stage;
+ } else {
+ // Check that the two declarations are equivalent
+ size_t ignoredOffset = 0;
+ auto uniform = SkRuntimeEffectPriv::VarAsUniform(var, context, &ignoredOffset);
+ if (uniform.isArray() != iter->isArray() ||
+ uniform.type != iter->type ||
+ uniform.count != iter->count) {
+ return {false, SkStringPrintf("Uniform %.*s declared with different types"
+ " in vertex and fragment shaders.",
+ (int)iter->name.size(), iter->name.data())};
+ }
+ if (uniform.isColor() != iter->isColor()) {
+ return {false, SkStringPrintf("Uniform %.*s declared with different color"
+ " layout in vertex and fragment shaders.",
+ (int)iter->name.size(), iter->name.data())};
+ }
+ (*iter).flags |= stage;
+ }
+ }
+ }
+ }
+ if (!foundMain) {
+ return {false, SkString("No main function found.")};
+ }
+ return {true, {}};
+}
+
+using ColorType = SkMeshSpecificationPriv::ColorType;
+
+ColorType get_fs_color_type(const SkSL::Program& fsProgram) {
+ for (const SkSL::ProgramElement* elem : fsProgram.elements()) {
+ if (elem->is<SkSL::FunctionDefinition>()) {
+ const SkSL::FunctionDefinition& defn = elem->as<SkSL::FunctionDefinition>();
+ const SkSL::FunctionDeclaration& decl = defn.declaration();
+ if (decl.isMain()) {
+ SkASSERT(decl.parameters().size() == 1 || decl.parameters().size() == 2);
+ if (decl.parameters().size() == 1) {
+ return ColorType::kNone;
+ }
+ const SkSL::Type& paramType = decl.parameters()[1]->type();
+ SkASSERT(paramType.matches(*fsProgram.fContext->fTypes.fHalf4) ||
+ paramType.matches(*fsProgram.fContext->fTypes.fFloat4));
+ return paramType.matches(*fsProgram.fContext->fTypes.fHalf4) ? ColorType::kHalf4
+ : ColorType::kFloat4;
+ }
+ }
+ }
+ SkUNREACHABLE;
+}
+
+// This is a non-exhaustive check for the validity of a variable name. The SkSL compiler will
+// actually process the name. We're just guarding against having multiple tokens embedded in the
+// name before we put it into a struct definition.
+static bool check_name(const SkString& name) {
+ if (name.isEmpty()) {
+ return false;
+ }
+ for (size_t i = 0; i < name.size(); ++i) {
+ if (name[i] != '_' && !std::isalnum(name[i], std::locale::classic())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static size_t attribute_type_size(Attribute::Type type) {
+ switch (type) {
+ case Attribute::Type::kFloat: return 4;
+ case Attribute::Type::kFloat2: return 2*4;
+ case Attribute::Type::kFloat3: return 3*4;
+ case Attribute::Type::kFloat4: return 4*4;
+ case Attribute::Type::kUByte4_unorm: return 4;
+ }
+ SkUNREACHABLE;
+}
+
+static const char* attribute_type_string(Attribute::Type type) {
+ switch (type) {
+ case Attribute::Type::kFloat: return "float";
+ case Attribute::Type::kFloat2: return "float2";
+ case Attribute::Type::kFloat3: return "float3";
+ case Attribute::Type::kFloat4: return "float4";
+ case Attribute::Type::kUByte4_unorm: return "half4";
+ }
+ SkUNREACHABLE;
+}
+
+static const char* varying_type_string(Varying::Type type) {
+ switch (type) {
+ case Varying::Type::kFloat: return "float";
+ case Varying::Type::kFloat2: return "float2";
+ case Varying::Type::kFloat3: return "float3";
+ case Varying::Type::kFloat4: return "float4";
+ case Varying::Type::kHalf: return "half";
+ case Varying::Type::kHalf2: return "half2";
+ case Varying::Type::kHalf3: return "half3";
+ case Varying::Type::kHalf4: return "half4";
+ }
+ SkUNREACHABLE;
+}
+
+std::tuple<bool, SkString>
+check_vertex_offsets_and_stride(SkSpan<const Attribute> attributes,
+ size_t stride) {
+ // Vulkan 1.0 has a minimum maximum attribute count of 2048.
+ static_assert(SkMeshSpecification::kMaxStride <= 2048);
+ // ES 2 has a max of 8.
+ static_assert(SkMeshSpecification::kMaxAttributes <= 8);
+ // Four bytes alignment is required by Metal.
+ static_assert(SkMeshSpecification::kStrideAlignment >= 4);
+ static_assert(SkMeshSpecification::kOffsetAlignment >= 4);
+ // ES2 has a minimum maximum of 8. We may need one for a broken gl_FragCoord workaround and
+ // one for local coords.
+ static_assert(SkMeshSpecification::kMaxVaryings <= 6);
+
+ if (attributes.empty()) {
+ RETURN_ERROR("At least 1 attribute is required.");
+ }
+ if (attributes.size() > SkMeshSpecification::kMaxAttributes) {
+ RETURN_ERROR("A maximum of %zu attributes is allowed.",
+ SkMeshSpecification::kMaxAttributes);
+ }
+ static_assert(SkIsPow2(SkMeshSpecification::kStrideAlignment));
+ if (stride == 0 || stride & (SkMeshSpecification::kStrideAlignment - 1)) {
+ RETURN_ERROR("Vertex stride must be a non-zero multiple of %zu.",
+ SkMeshSpecification::kStrideAlignment);
+ }
+ if (stride > SkMeshSpecification::kMaxStride) {
+ RETURN_ERROR("Stride cannot exceed %zu.", SkMeshSpecification::kMaxStride);
+ }
+ for (const auto& a : attributes) {
+ if (a.offset & (SkMeshSpecification::kOffsetAlignment - 1)) {
+ RETURN_ERROR("Attribute offset must be a multiple of %zu.",
+ SkMeshSpecification::kOffsetAlignment);
+ }
+ // This equivalent to vertexAttributeAccessBeyondStride==VK_FALSE in
+ // VK_KHR_portability_subset. First check is to avoid overflow in second check.
+ if (a.offset >= stride || a.offset + attribute_type_size(a.type) > stride) {
+ RETURN_ERROR("Attribute offset plus size cannot exceed stride.");
+ }
+ }
+ RETURN_SUCCESS;
+}
+
+int check_for_passthrough_local_coords_and_dead_varyings(const SkSL::Program& fsProgram,
+ uint32_t* deadVaryingMask) {
+ SkASSERT(deadVaryingMask);
+
+ using namespace SkSL;
+ static constexpr int kFailed = -2;
+
+ class Visitor final : public SkSL::ProgramVisitor {
+ public:
+ Visitor(const Context& context) : fContext(context) {}
+
+ void visit(const Program& program) { ProgramVisitor::visit(program); }
+
+ int passthroughFieldIndex() const { return fPassthroughFieldIndex; }
+
+ uint32_t fieldUseMask() const { return fFieldUseMask; }
+
+ protected:
+ bool visitProgramElement(const ProgramElement& p) override {
+ if (p.is<StructDefinition>()) {
+ const auto& def = p.as<StructDefinition>();
+ if (def.type().name() == "Varyings") {
+ fVaryingsType = &def.type();
+ }
+ // No reason to keep looking at this type definition.
+ return false;
+ }
+ if (p.is<FunctionDefinition>() && p.as<FunctionDefinition>().declaration().isMain()) {
+ SkASSERT(!fVaryings);
+ fVaryings = p.as<FunctionDefinition>().declaration().parameters()[0];
+
+ SkASSERT(fVaryingsType && fVaryingsType->matches(fVaryings->type()));
+
+ fInMain = true;
+ bool result = ProgramVisitor::visitProgramElement(p);
+ fInMain = false;
+ return result;
+ }
+ return ProgramVisitor::visitProgramElement(p);
+ }
+
+ bool visitStatement(const Statement& s) override {
+ if (!fInMain) {
+ return ProgramVisitor::visitStatement(s);
+ }
+ // We should only get here if are in main and therefore found the varyings parameter.
+ SkASSERT(fVaryings);
+ SkASSERT(fVaryingsType);
+
+ if (fPassthroughFieldIndex == kFailed) {
+ // We've already determined there are return statements that aren't passthrough
+ // or return different fields.
+ return ProgramVisitor::visitStatement(s);
+ }
+ if (!s.is<ReturnStatement>()) {
+ return ProgramVisitor::visitStatement(s);
+ }
+
+ // We just detect simple cases like "return varyings.foo;"
+ const auto& rs = s.as<ReturnStatement>();
+ SkASSERT(rs.expression());
+ if (!rs.expression()->is<FieldAccess>()) {
+ this->passthroughFailed();
+ return ProgramVisitor::visitStatement(s);
+ }
+ const auto& fa = rs.expression()->as<FieldAccess>();
+ if (!fa.base()->is<VariableReference>()) {
+ this->passthroughFailed();
+ return ProgramVisitor::visitStatement(s);
+ }
+ const auto& baseRef = fa.base()->as<VariableReference>();
+ if (baseRef.variable() != fVaryings) {
+ this->passthroughFailed();
+ return ProgramVisitor::visitStatement(s);
+ }
+ if (fPassthroughFieldIndex >= 0) {
+ // We already found an OK return statement. Check if this one returns the same
+ // field.
+ if (fa.fieldIndex() != fPassthroughFieldIndex) {
+ this->passthroughFailed();
+ return ProgramVisitor::visitStatement(s);
+ }
+ // We don't call our base class here because we don't want to hit visitExpression
+ // and mark the returned field as used.
+ return false;
+ }
+ const Type::Field& field = fVaryings->type().fields()[fa.fieldIndex()];
+ if (!field.fType->matches(*fContext.fTypes.fFloat2)) {
+ this->passthroughFailed();
+ return ProgramVisitor::visitStatement(s);
+ }
+ fPassthroughFieldIndex = fa.fieldIndex();
+ // We don't call our base class here because we don't want to hit visitExpression and
+ // mark the returned field as used.
+ return false;
+ }
+
+ bool visitExpression(const Expression& e) override {
+ // Anything before the Varyings struct is defined doesn't matter.
+ if (!fVaryingsType) {
+ return false;
+ }
+ if (!e.is<FieldAccess>()) {
+ return ProgramVisitor::visitExpression(e);
+ }
+ const auto& fa = e.as<FieldAccess>();
+ if (!fa.base()->type().matches(*fVaryingsType)) {
+ return ProgramVisitor::visitExpression(e);
+ }
+ fFieldUseMask |= 1 << fa.fieldIndex();
+ return false;
+ }
+
+ private:
+ void passthroughFailed() {
+ if (fPassthroughFieldIndex >= 0) {
+ fFieldUseMask |= 1 << fPassthroughFieldIndex;
+ }
+ fPassthroughFieldIndex = kFailed;
+ }
+
+ const Context& fContext;
+ const Type* fVaryingsType = nullptr;
+ const Variable* fVaryings = nullptr;
+ int fPassthroughFieldIndex = -1;
+ bool fInMain = false;
+ uint32_t fFieldUseMask = 0;
+ };
+
+ Visitor v(*fsProgram.fContext);
+ v.visit(fsProgram);
+ *deadVaryingMask = ~v.fieldUseMask();
+ return v.passthroughFieldIndex();
+}
+
+SkMeshSpecification::Result SkMeshSpecification::Make(SkSpan<const Attribute> attributes,
+ size_t vertexStride,
+ SkSpan<const Varying> varyings,
+ const SkString& vs,
+ const SkString& fs) {
+ return Make(attributes,
+ vertexStride,
+ varyings,
+ vs,
+ fs,
+ SkColorSpace::MakeSRGB(),
+ kPremul_SkAlphaType);
+}
+
+SkMeshSpecification::Result SkMeshSpecification::Make(SkSpan<const Attribute> attributes,
+ size_t vertexStride,
+ SkSpan<const Varying> varyings,
+ const SkString& vs,
+ const SkString& fs,
+ sk_sp<SkColorSpace> cs) {
+ return Make(attributes, vertexStride, varyings, vs, fs, std::move(cs), kPremul_SkAlphaType);
+}
+
+SkMeshSpecification::Result SkMeshSpecification::Make(SkSpan<const Attribute> attributes,
+ size_t vertexStride,
+ SkSpan<const Varying> varyings,
+ const SkString& vs,
+ const SkString& fs,
+ sk_sp<SkColorSpace> cs,
+ SkAlphaType at) {
+ SkString attributesStruct("struct Attributes {\n");
+ for (const auto& a : attributes) {
+ attributesStruct.appendf(" %s %s;\n", attribute_type_string(a.type), a.name.c_str());
+ }
+ attributesStruct.append("};\n");
+
+ bool userProvidedPositionVarying = false;
+ for (const auto& v : varyings) {
+ if (v.name.equals("position")) {
+ if (v.type != Varying::Type::kFloat2) {
+ return {nullptr, SkString("Varying \"position\" must have type float2.")};
+ }
+ userProvidedPositionVarying = true;
+ }
+ }
+
+ SkSTArray<kMaxVaryings, Varying> tempVaryings;
+ if (!userProvidedPositionVarying) {
+ // Even though we check the # of varyings in MakeFromSourceWithStructs we check here, too,
+ // to avoid overflow with + 1.
+ if (varyings.size() > kMaxVaryings - 1) {
+ RETURN_FAILURE("A maximum of %zu varyings is allowed.", kMaxVaryings);
+ }
+ for (const auto& v : varyings) {
+ tempVaryings.push_back(v);
+ }
+ tempVaryings.push_back(Varying{Varying::Type::kFloat2, SkString("position")});
+ varyings = tempVaryings;
+ }
+
+ SkString varyingStruct("struct Varyings {\n");
+ for (const auto& v : varyings) {
+ varyingStruct.appendf(" %s %s;\n", varying_type_string(v.type), v.name.c_str());
+ }
+ varyingStruct.append("};\n");
+
+ SkString fullVS;
+ fullVS.append(varyingStruct.c_str());
+ fullVS.append(attributesStruct.c_str());
+ fullVS.append(vs.c_str());
+
+ SkString fullFS;
+ fullFS.append(varyingStruct.c_str());
+ fullFS.append(fs.c_str());
+
+ return MakeFromSourceWithStructs(attributes,
+ vertexStride,
+ varyings,
+ fullVS,
+ fullFS,
+ std::move(cs),
+ at);
+}
+
+SkMeshSpecification::Result SkMeshSpecification::MakeFromSourceWithStructs(
+ SkSpan<const Attribute> attributes,
+ size_t stride,
+ SkSpan<const Varying> varyings,
+ const SkString& vs,
+ const SkString& fs,
+ sk_sp<SkColorSpace> cs,
+ SkAlphaType at) {
+ if (auto [ok, error] = check_vertex_offsets_and_stride(attributes, stride); !ok) {
+ return {nullptr, error};
+ }
+
+ for (const auto& a : attributes) {
+ if (!check_name(a.name)) {
+ RETURN_FAILURE("\"%s\" is not a valid attribute name.", a.name.c_str());
+ }
+ }
+
+ if (varyings.size() > kMaxVaryings) {
+ RETURN_FAILURE("A maximum of %zu varyings is allowed.", kMaxVaryings);
+ }
+
+ for (const auto& v : varyings) {
+ if (!check_name(v.name)) {
+ return {nullptr, SkStringPrintf("\"%s\" is not a valid varying name.", v.name.c_str())};
+ }
+ }
+
+ std::vector<Uniform> uniforms;
+ size_t offset = 0;
+
+ SkSL::Compiler compiler(SkSL::ShaderCapsFactory::Standalone());
+
+ // Disable memory pooling; this might slow down compilation slightly, but it will ensure that a
+ // long-lived mesh specification doesn't waste memory.
+ SkSL::ProgramSettings settings;
+ settings.fUseMemoryPool = false;
+
+ // TODO(skia:11209): Add SkCapabilities to the API, check against required version.
+ std::unique_ptr<SkSL::Program> vsProgram = compiler.convertProgram(
+ SkSL::ProgramKind::kMeshVertex,
+ std::string(vs.c_str()),
+ settings);
+ if (!vsProgram) {
+ RETURN_FAILURE("VS: %s", compiler.errorText().c_str());
+ }
+
+ if (auto [result, error] = gather_uniforms_and_check_for_main(
+ *vsProgram,
+ &uniforms,
+ SkMeshSpecification::Uniform::Flags::kVertex_Flag,
+ &offset);
+ !result) {
+ return {nullptr, std::move(error)};
+ }
+
+ if (SkSL::Analysis::CallsColorTransformIntrinsics(*vsProgram)) {
+ RETURN_FAILURE("Color transform intrinsics are not permitted in custom mesh shaders");
+ }
+
+ std::unique_ptr<SkSL::Program> fsProgram = compiler.convertProgram(
+ SkSL::ProgramKind::kMeshFragment,
+ std::string(fs.c_str()),
+ settings);
+
+ if (!fsProgram) {
+ RETURN_FAILURE("FS: %s", compiler.errorText().c_str());
+ }
+
+ if (auto [result, error] = gather_uniforms_and_check_for_main(
+ *fsProgram,
+ &uniforms,
+ SkMeshSpecification::Uniform::Flags::kFragment_Flag,
+ &offset);
+ !result) {
+ return {nullptr, std::move(error)};
+ }
+
+ if (SkSL::Analysis::CallsColorTransformIntrinsics(*fsProgram)) {
+ RETURN_FAILURE("Color transform intrinsics are not permitted in custom mesh shaders");
+ }
+
+ ColorType ct = get_fs_color_type(*fsProgram);
+
+ if (ct == ColorType::kNone) {
+ cs = nullptr;
+ at = kPremul_SkAlphaType;
+ } else {
+ if (!cs) {
+ return {nullptr, SkString{"Must provide a color space if FS returns a color."}};
+ }
+ if (at == kUnknown_SkAlphaType) {
+ return {nullptr, SkString{"Must provide a valid alpha type if FS returns a color."}};
+ }
+ }
+
+ uint32_t deadVaryingMask;
+ int passthroughLocalCoordsVaryingIndex =
+ check_for_passthrough_local_coords_and_dead_varyings(*fsProgram, &deadVaryingMask);
+
+ if (passthroughLocalCoordsVaryingIndex >= 0) {
+ SkASSERT(varyings[passthroughLocalCoordsVaryingIndex].type == Varying::Type::kFloat2);
+ }
+
+ return {sk_sp<SkMeshSpecification>(new SkMeshSpecification(attributes,
+ stride,
+ varyings,
+ passthroughLocalCoordsVaryingIndex,
+ deadVaryingMask,
+ std::move(uniforms),
+ std::move(vsProgram),
+ std::move(fsProgram),
+ ct,
+ std::move(cs),
+ at)),
+ /*error=*/{}};
+}
+
+SkMeshSpecification::~SkMeshSpecification() = default;
+
+SkMeshSpecification::SkMeshSpecification(
+ SkSpan<const Attribute> attributes,
+ size_t stride,
+ SkSpan<const Varying> varyings,
+ int passthroughLocalCoordsVaryingIndex,
+ uint32_t deadVaryingMask,
+ std::vector<Uniform> uniforms,
+ std::unique_ptr<const SkSL::Program> vs,
+ std::unique_ptr<const SkSL::Program> fs,
+ ColorType ct,
+ sk_sp<SkColorSpace> cs,
+ SkAlphaType at)
+ : fAttributes(attributes.begin(), attributes.end())
+ , fVaryings(varyings.begin(), varyings.end())
+ , fUniforms(std::move(uniforms))
+ , fVS(std::move(vs))
+ , fFS(std::move(fs))
+ , fStride(stride)
+ , fPassthroughLocalCoordsVaryingIndex(passthroughLocalCoordsVaryingIndex)
+ , fDeadVaryingMask(deadVaryingMask)
+ , fColorType(ct)
+ , fColorSpace(std::move(cs))
+ , fAlphaType(at) {
+ fHash = SkOpts::hash_fn(fVS->fSource->c_str(), fVS->fSource->size(), 0);
+ fHash = SkOpts::hash_fn(fFS->fSource->c_str(), fFS->fSource->size(), fHash);
+
+ // The attributes and varyings SkSL struct declarations are included in the program source.
+ // However, the attribute offsets and types need to be included, the latter because the SkSL
+ // struct definition has the GPU type but not the CPU data format.
+ for (const auto& a : fAttributes) {
+ fHash = SkOpts::hash_fn(&a.offset, sizeof(a.offset), fHash);
+ fHash = SkOpts::hash_fn(&a.type, sizeof(a.type), fHash);
+ }
+
+ fHash = SkOpts::hash_fn(&stride, sizeof(stride), fHash);
+
+ uint64_t csHash = fColorSpace ? fColorSpace->hash() : 0;
+ fHash = SkOpts::hash_fn(&csHash, sizeof(csHash), fHash);
+
+ auto atInt = static_cast<uint32_t>(fAlphaType);
+ fHash = SkOpts::hash_fn(&atInt, sizeof(atInt), fHash);
+}
+
+size_t SkMeshSpecification::uniformSize() const {
+ return fUniforms.empty() ? 0
+ : SkAlign4(fUniforms.back().offset + fUniforms.back().sizeInBytes());
+}
+
+const Uniform* SkMeshSpecification::findUniform(std::string_view name) const {
+ auto iter = std::find_if(fUniforms.begin(), fUniforms.end(), [name] (const Uniform& u) {
+ return u.name == name;
+ });
+ return iter == fUniforms.end() ? nullptr : &(*iter);
+}
+
+const Attribute* SkMeshSpecification::findAttribute(std::string_view name) const {
+ auto iter = std::find_if(fAttributes.begin(), fAttributes.end(), [name](const Attribute& a) {
+ return name.compare(a.name.c_str()) == 0;
+ });
+ return iter == fAttributes.end() ? nullptr : &(*iter);
+}
+
+const Varying* SkMeshSpecification::findVarying(std::string_view name) const {
+ auto iter = std::find_if(fVaryings.begin(), fVaryings.end(), [name](const Varying& v) {
+ return name.compare(v.name.c_str()) == 0;
+ });
+ return iter == fVaryings.end() ? nullptr : &(*iter);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+SkMesh::SkMesh() = default;
+SkMesh::~SkMesh() = default;
+
+SkMesh::SkMesh(const SkMesh&) = default;
+SkMesh::SkMesh(SkMesh&&) = default;
+
+SkMesh& SkMesh::operator=(const SkMesh&) = default;
+SkMesh& SkMesh::operator=(SkMesh&&) = default;
+
+sk_sp<IndexBuffer> SkMesh::MakeIndexBuffer(GrDirectContext* dc, const void* data, size_t size) {
+ if (!dc) {
+ return SkMeshPriv::CpuIndexBuffer::Make(data, size);
+ }
+#if defined(SK_GANESH)
+ return SkMeshPriv::GpuIndexBuffer::Make(dc, data, size);
+#else
+ return nullptr;
+#endif
+}
+
+sk_sp<IndexBuffer> SkMesh::CopyIndexBuffer(GrDirectContext* dc, sk_sp<IndexBuffer> src) {
+ if (!src) {
+ return nullptr;
+ }
+ auto* ib = static_cast<SkMeshPriv::IB*>(src.get());
+ const void* data = ib->peek();
+ if (!data) {
+ return nullptr;
+ }
+ return MakeIndexBuffer(dc, data, ib->size());
+}
+
+sk_sp<VertexBuffer> SkMesh::MakeVertexBuffer(GrDirectContext* dc, const void* data, size_t size) {
+ if (!dc) {
+ return SkMeshPriv::CpuVertexBuffer::Make(data, size);
+ }
+#if defined(SK_GANESH)
+ return SkMeshPriv::GpuVertexBuffer::Make(dc, data, size);
+#else
+ return nullptr;
+#endif
+}
+
+sk_sp<VertexBuffer> SkMesh::CopyVertexBuffer(GrDirectContext* dc, sk_sp<VertexBuffer> src) {
+ if (!src) {
+ return nullptr;
+ }
+ auto* vb = static_cast<SkMeshPriv::VB*>(src.get());
+ const void* data = vb->peek();
+ if (!data) {
+ return nullptr;
+ }
+ return MakeVertexBuffer(dc, data, vb->size());
+}
+
+SkMesh::Result SkMesh::Make(sk_sp<SkMeshSpecification> spec,
+ Mode mode,
+ sk_sp<VertexBuffer> vb,
+ size_t vertexCount,
+ size_t vertexOffset,
+ sk_sp<const SkData> uniforms,
+ const SkRect& bounds) {
+ SkMesh mesh;
+ mesh.fSpec = std::move(spec);
+ mesh.fMode = mode;
+ mesh.fVB = std::move(vb);
+ mesh.fUniforms = std::move(uniforms);
+ mesh.fVCount = vertexCount;
+ mesh.fVOffset = vertexOffset;
+ mesh.fBounds = bounds;
+ auto [valid, msg] = mesh.validate();
+ if (!valid) {
+ mesh = {};
+ }
+ return {std::move(mesh), std::move(msg)};
+}
+
+SkMesh::Result SkMesh::MakeIndexed(sk_sp<SkMeshSpecification> spec,
+ Mode mode,
+ sk_sp<VertexBuffer> vb,
+ size_t vertexCount,
+ size_t vertexOffset,
+ sk_sp<IndexBuffer> ib,
+ size_t indexCount,
+ size_t indexOffset,
+ sk_sp<const SkData> uniforms,
+ const SkRect& bounds) {
+ if (!ib) {
+ // We check this before calling validate to disambiguate from a non-indexed mesh where
+ // IB is expected to be null.
+ return {{}, SkString{"An index buffer is required."}};
+ }
+ SkMesh mesh;
+ mesh.fSpec = std::move(spec);
+ mesh.fMode = mode;
+ mesh.fVB = std::move(vb);
+ mesh.fVCount = vertexCount;
+ mesh.fVOffset = vertexOffset;
+ mesh.fIB = std::move(ib);
+ mesh.fUniforms = std::move(uniforms);
+ mesh.fICount = indexCount;
+ mesh.fIOffset = indexOffset;
+ mesh.fBounds = bounds;
+ auto [valid, msg] = mesh.validate();
+ if (!valid) {
+ mesh = {};
+ }
+ return {std::move(mesh), std::move(msg)};
+}
+
+bool SkMesh::isValid() const {
+ bool valid = SkToBool(fSpec);
+ SkASSERT(valid == std::get<0>(this->validate()));
+ return valid;
+}
+
+static size_t min_vcount_for_mode(SkMesh::Mode mode) {
+ switch (mode) {
+ case SkMesh::Mode::kTriangles: return 3;
+ case SkMesh::Mode::kTriangleStrip: return 3;
+ }
+ SkUNREACHABLE;
+}
+
+std::tuple<bool, SkString> SkMesh::validate() const {
+#define FAIL_MESH_VALIDATE(...) return std::make_tuple(false, SkStringPrintf(__VA_ARGS__))
+ if (!fSpec) {
+ FAIL_MESH_VALIDATE("SkMeshSpecification is required.");
+ }
+
+ if (!fVB) {
+ FAIL_MESH_VALIDATE("A vertex buffer is required.");
+ }
+
+ auto vb = static_cast<SkMeshPriv::VB*>(fVB.get());
+ auto ib = static_cast<SkMeshPriv::IB*>(fIB.get());
+
+ SkSafeMath sm;
+ size_t vsize = sm.mul(fSpec->stride(), fVCount);
+ if (sm.add(vsize, fVOffset) > vb->size()) {
+ FAIL_MESH_VALIDATE("The vertex buffer offset and vertex count reads beyond the end of the"
+ " vertex buffer.");
+ }
+
+ if (fVOffset%fSpec->stride() != 0) {
+ FAIL_MESH_VALIDATE("The vertex offset (%zu) must be a multiple of the vertex stride (%zu).",
+ fVOffset,
+ fSpec->stride());
+ }
+
+ if (size_t uniformSize = fSpec->uniformSize()) {
+ if (!fUniforms || fUniforms->size() < uniformSize) {
+ FAIL_MESH_VALIDATE("The uniform data is %zu bytes but must be at least %zu.",
+ fUniforms->size(),
+ uniformSize);
+ }
+ }
+
+ auto modeToStr = [](Mode m) {
+ switch (m) {
+ case Mode::kTriangles: return "triangles";
+ case Mode::kTriangleStrip: return "triangle-strip";
+ }
+ SkUNREACHABLE;
+ };
+ if (ib) {
+ if (fICount < min_vcount_for_mode(fMode)) {
+ FAIL_MESH_VALIDATE("%s mode requires at least %zu indices but index count is %zu.",
+ modeToStr(fMode),
+ min_vcount_for_mode(fMode),
+ fICount);
+ }
+ size_t isize = sm.mul(sizeof(uint16_t), fICount);
+ if (sm.add(isize, fIOffset) > ib->size()) {
+ FAIL_MESH_VALIDATE("The index buffer offset and index count reads beyond the end of the"
+ " index buffer.");
+
+ }
+ // If we allow 32 bit indices then this should enforce 4 byte alignment in that case.
+ if (!SkIsAlign2(fIOffset)) {
+ FAIL_MESH_VALIDATE("The index offset must be a multiple of 2.");
+ }
+ } else {
+ if (fVCount < min_vcount_for_mode(fMode)) {
+ FAIL_MESH_VALIDATE("%s mode requires at least %zu vertices but vertex count is %zu.",
+ modeToStr(fMode),
+ min_vcount_for_mode(fMode),
+ fICount);
+ }
+ SkASSERT(!fICount);
+ SkASSERT(!fIOffset);
+ }
+
+ if (!sm.ok()) {
+ FAIL_MESH_VALIDATE("Overflow");
+ }
+#undef FAIL_MESH_VALIDATE
+ return {true, {}};
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+static inline bool check_update(const void* data, size_t offset, size_t size, size_t bufferSize) {
+ SkSafeMath sm;
+ return data &&
+ size &&
+ SkIsAlign4(offset) &&
+ SkIsAlign4(size) &&
+ sm.add(offset, size) <= bufferSize &&
+ sm.ok();
+}
+
+bool SkMesh::IndexBuffer::update(GrDirectContext* dc,
+ const void* data,
+ size_t offset,
+ size_t size) {
+ return check_update(data, offset, size, this->size()) && this->onUpdate(dc, data, offset, size);
+}
+
+bool SkMesh::VertexBuffer::update(GrDirectContext* dc,
+ const void* data,
+ size_t offset,
+ size_t size) {
+ return check_update(data, offset, size, this->size()) && this->onUpdate(dc, data, offset, size);
+}
+
+#if defined(SK_GANESH)
+bool SkMeshPriv::UpdateGpuBuffer(GrDirectContext* dc,
+ sk_sp<GrGpuBuffer> buffer,
+ const void* data,
+ size_t offset,
+ size_t size) {
+ if (!dc || dc != buffer->getContext()) {
+ return false;
+ }
+ SkASSERT(!dc->abandoned()); // If dc is abandoned then buffer->getContext() should be null.
+
+ if (!dc->priv().caps()->transferFromBufferToBufferSupport()) {
+ auto ownedData = SkData::MakeWithCopy(data, size);
+ dc->priv().drawingManager()->newBufferUpdateTask(std::move(ownedData),
+ std::move(buffer),
+ offset);
+ return true;
+ }
+
+ sk_sp<GrGpuBuffer> tempBuffer;
+ size_t tempOffset = 0;
+ if (auto* sbm = dc->priv().getGpu()->stagingBufferManager()) {
+ auto alignment = dc->priv().caps()->transferFromBufferToBufferAlignment();
+ auto [sliceBuffer, sliceOffset, ptr] = sbm->allocateStagingBufferSlice(size, alignment);
+ if (sliceBuffer) {
+ std::memcpy(ptr, data, size);
+ tempBuffer.reset(SkRef(sliceBuffer));
+ tempOffset = sliceOffset;
+ }
+ }
+
+ if (!tempBuffer) {
+ tempBuffer = dc->priv().resourceProvider()->createBuffer(size,
+ GrGpuBufferType::kXferCpuToGpu,
+ kDynamic_GrAccessPattern,
+ GrResourceProvider::ZeroInit::kNo);
+ if (!tempBuffer) {
+ return false;
+ }
+ if (!tempBuffer->updateData(data, 0, size, /*preserve=*/false)) {
+ return false;
+ }
+ }
+
+ dc->priv().drawingManager()->newBufferTransferTask(std::move(tempBuffer),
+ tempOffset,
+ std::move(buffer),
+ offset,
+ size);
+
+ return true;
+}
+#endif // defined(SK_GANESH)
+
+#endif // SK_ENABLE_SKSL
diff --git a/gfx/skia/skia/src/core/SkMeshPriv.h b/gfx/skia/skia/src/core/SkMeshPriv.h
new file mode 100644
index 0000000000..a4f50e9bc4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMeshPriv.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMeshPriv_DEFINED
+#define SkMeshPriv_DEFINED
+
+#include "include/core/SkMesh.h"
+
+#ifdef SK_ENABLE_SKSL
+#include "include/core/SkData.h"
+#include "src/core/SkSLTypeShared.h"
+
+#if defined(SK_GANESH)
+#include "include/gpu/GrDirectContext.h"
+#include "include/private/gpu/ganesh/GrTypesPriv.h"
+#include "src/gpu/ganesh/GrDirectContextPriv.h"
+#include "src/gpu/ganesh/GrDrawingManager.h"
+#include "src/gpu/ganesh/GrGpuBuffer.h"
+#include "src/gpu/ganesh/GrResourceCache.h"
+#include "src/gpu/ganesh/GrResourceProvider.h"
+#endif
+
+struct SkMeshSpecificationPriv {
+ using Varying = SkMeshSpecification::Varying;
+ using Attribute = SkMeshSpecification::Attribute;
+ using ColorType = SkMeshSpecification::ColorType;
+
+ static SkSpan<const Varying> Varyings(const SkMeshSpecification& spec) {
+ return SkSpan(spec.fVaryings);
+ }
+
+ static const SkSL::Program* VS(const SkMeshSpecification& spec) { return spec.fVS.get(); }
+ static const SkSL::Program* FS(const SkMeshSpecification& spec) { return spec.fFS.get(); }
+
+ static int Hash(const SkMeshSpecification& spec) { return spec.fHash; }
+
+ static ColorType GetColorType(const SkMeshSpecification& spec) { return spec.fColorType; }
+ static bool HasColors(const SkMeshSpecification& spec) {
+ return GetColorType(spec) != ColorType::kNone;
+ }
+
+ static SkColorSpace* ColorSpace(const SkMeshSpecification& spec) {
+ return spec.fColorSpace.get();
+ }
+
+ static SkAlphaType AlphaType(const SkMeshSpecification& spec) { return spec.fAlphaType; }
+
+ static SkSLType VaryingTypeAsSLType(Varying::Type type) {
+ switch (type) {
+ case Varying::Type::kFloat: return SkSLType::kFloat;
+ case Varying::Type::kFloat2: return SkSLType::kFloat2;
+ case Varying::Type::kFloat3: return SkSLType::kFloat3;
+ case Varying::Type::kFloat4: return SkSLType::kFloat4;
+ case Varying::Type::kHalf: return SkSLType::kHalf;
+ case Varying::Type::kHalf2: return SkSLType::kHalf2;
+ case Varying::Type::kHalf3: return SkSLType::kHalf3;
+ case Varying::Type::kHalf4: return SkSLType::kHalf4;
+ }
+ SkUNREACHABLE;
+ }
+
+#if defined(SK_GANESH)
+ static GrVertexAttribType AttrTypeAsVertexAttribType(Attribute::Type type) {
+ switch (type) {
+ case Attribute::Type::kFloat: return kFloat_GrVertexAttribType;
+ case Attribute::Type::kFloat2: return kFloat2_GrVertexAttribType;
+ case Attribute::Type::kFloat3: return kFloat3_GrVertexAttribType;
+ case Attribute::Type::kFloat4: return kFloat4_GrVertexAttribType;
+ case Attribute::Type::kUByte4_unorm: return kUByte4_norm_GrVertexAttribType;
+ }
+ SkUNREACHABLE;
+ }
+#endif
+
+ static SkSLType AttrTypeAsSLType(Attribute::Type type) {
+ switch (type) {
+ case Attribute::Type::kFloat: return SkSLType::kFloat;
+ case Attribute::Type::kFloat2: return SkSLType::kFloat2;
+ case Attribute::Type::kFloat3: return SkSLType::kFloat3;
+ case Attribute::Type::kFloat4: return SkSLType::kFloat4;
+ case Attribute::Type::kUByte4_unorm: return SkSLType::kHalf4;
+ }
+ SkUNREACHABLE;
+ }
+
+ static int PassthroughLocalCoordsVaryingIndex(const SkMeshSpecification& spec) {
+ return spec.fPassthroughLocalCoordsVaryingIndex;
+ }
+
+ /**
+ * A varying is dead if it is never referenced OR it is only referenced as a passthrough for
+ * local coordinates. In the latter case it's index will returned as
+ * PassthroughLocalCoordsVaryingIndex. Our analysis is not very sophisticated so this is
+ * determined conservatively.
+ */
+ static bool VaryingIsDead(const SkMeshSpecification& spec, int v) {
+ SkASSERT(v >= 0 && SkToSizeT(v) < spec.fVaryings.size());
+ return (1 << v) & spec.fDeadVaryingMask;
+ }
+};
+
+struct SkMeshPriv {
+ class Buffer {
+ public:
+ virtual ~Buffer() = 0;
+
+ Buffer() = default;
+ Buffer(const Buffer&) = delete;
+
+ Buffer& operator=(const Buffer&) = delete;
+
+ virtual const void* peek() const { return nullptr; }
+
+#if defined(SK_GANESH)
+ virtual sk_sp<const GrGpuBuffer> asGpuBuffer() const { return nullptr; }
+#endif
+ };
+
+ class IB : public Buffer, public SkMesh::IndexBuffer {};
+ class VB : public Buffer, public SkMesh::VertexBuffer {};
+
+ template <typename Base> class CpuBuffer final : public Base {
+ public:
+ ~CpuBuffer() override = default;
+
+ static sk_sp<Base> Make(const void* data, size_t size);
+
+ const void* peek() const override { return fData->data(); }
+
+ size_t size() const override { return fData->size(); }
+
+ private:
+ CpuBuffer(sk_sp<SkData> data) : fData(std::move(data)) {}
+
+ bool onUpdate(GrDirectContext*, const void* data, size_t offset, size_t size) override;
+
+ sk_sp<SkData> fData;
+ };
+
+ using CpuIndexBuffer = CpuBuffer<IB>;
+ using CpuVertexBuffer = CpuBuffer<VB>;
+
+#if defined(SK_GANESH)
+ template <typename Base, GrGpuBufferType> class GpuBuffer final : public Base {
+ public:
+ GpuBuffer() = default;
+
+ ~GpuBuffer() override;
+
+ static sk_sp<Base> Make(GrDirectContext*, const void* data, size_t size);
+
+ sk_sp<const GrGpuBuffer> asGpuBuffer() const override { return fBuffer; }
+
+ size_t size() const override { return fBuffer->size(); }
+
+ private:
+ bool onUpdate(GrDirectContext*, const void* data, size_t offset, size_t size) override;
+
+ sk_sp<GrGpuBuffer> fBuffer;
+ GrDirectContext::DirectContextID fContextID;
+ };
+
+ using GpuIndexBuffer = GpuBuffer<IB, GrGpuBufferType::kIndex >;
+ using GpuVertexBuffer = GpuBuffer<VB, GrGpuBufferType::kVertex>;
+#endif // defined(SK_GANESH)
+
+private:
+#if defined(SK_GANESH)
+ static bool UpdateGpuBuffer(GrDirectContext*,
+ sk_sp<GrGpuBuffer>,
+ const void*,
+ size_t offset,
+ size_t size);
+#endif
+};
+
+inline SkMeshPriv::Buffer::~Buffer() = default;
+
+template <typename Base> sk_sp<Base> SkMeshPriv::CpuBuffer<Base>::Make(const void* data,
+ size_t size) {
+ SkASSERT(size);
+ sk_sp<SkData> storage;
+ if (data) {
+ storage = SkData::MakeWithCopy(data, size);
+ } else {
+ storage = SkData::MakeZeroInitialized(size);
+ }
+ return sk_sp<Base>(new CpuBuffer<Base>(std::move(storage)));
+}
+
+template <typename Base> bool SkMeshPriv::CpuBuffer<Base>::onUpdate(GrDirectContext* dc,
+ const void* data,
+ size_t offset,
+ size_t size) {
+ if (dc) {
+ return false;
+ }
+ std::memcpy(SkTAddOffset<void>(fData->writable_data(), offset), data, size);
+ return true;
+}
+
+#if defined(SK_GANESH)
+
+template <typename Base, GrGpuBufferType Type> SkMeshPriv::GpuBuffer<Base, Type>::~GpuBuffer() {
+ GrResourceCache::ReturnResourceFromThread(std::move(fBuffer), fContextID);
+}
+
+template <typename Base, GrGpuBufferType Type>
+sk_sp<Base> SkMeshPriv::GpuBuffer<Base, Type>::Make(GrDirectContext* dc,
+ const void* data,
+ size_t size) {
+ SkASSERT(dc);
+
+ sk_sp<GrGpuBuffer> buffer = dc->priv().resourceProvider()->createBuffer(
+ size,
+ Type,
+ kStatic_GrAccessPattern,
+ data ? GrResourceProvider::ZeroInit::kNo : GrResourceProvider::ZeroInit::kYes);
+ if (!buffer) {
+ return nullptr;
+ }
+
+ if (data && !buffer->updateData(data, 0, size, /*preserve=*/false)) {
+ return nullptr;
+ }
+
+ auto result = new GpuBuffer;
+ result->fBuffer = std::move(buffer);
+ result->fContextID = dc->directContextID();
+ return sk_sp<Base>(result);
+}
+
+
+template <typename Base, GrGpuBufferType Type>
+bool SkMeshPriv::GpuBuffer<Base, Type>::onUpdate(GrDirectContext* dc,
+ const void* data,
+ size_t offset,
+ size_t size) {
+ return UpdateGpuBuffer(dc, fBuffer, data, offset, size);
+}
+
+#endif // defined(SK_GANESH)
+
+#endif // SK_ENABLE_SKSL
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMessageBus.h b/gfx/skia/skia/src/core/SkMessageBus.h
new file mode 100644
index 0000000000..3b78793da6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMessageBus.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMessageBus_DEFINED
+#define SkMessageBus_DEFINED
+
+#include <type_traits>
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkNoncopyable.h"
+#include "include/private/base/SkOnce.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTDArray.h"
+
+/**
+ * The following method must have a specialization for type 'Message':
+ *
+ * bool SkShouldPostMessageToBus(const Message&, IDType msgBusUniqueID)
+ *
+ * We may want to consider providing a default template implementation, to avoid this requirement by
+ * sending to all inboxes when the specialization for type 'Message' is not present.
+ */
+template <typename Message, typename IDType, bool AllowCopyableMessage = true>
+class SkMessageBus : SkNoncopyable {
+public:
+ template <typename T> struct is_sk_sp : std::false_type {};
+ template <typename T> struct is_sk_sp<sk_sp<T>> : std::true_type {};
+
+ // We want to make sure the caller of Post() method will not keep a ref or copy of the message,
+ // so the message type must be sk_sp or non copyable.
+ static_assert(AllowCopyableMessage || is_sk_sp<Message>::value ||
+ !std::is_copy_constructible<Message>::value,
+ "The message type must be sk_sp or non copyable.");
+
+ // Post a message to be received by Inboxes for this Message type. Checks
+ // SkShouldPostMessageToBus() for each inbox. Threadsafe.
+ static void Post(Message m);
+
+ class Inbox {
+ public:
+ Inbox(IDType uniqueID);
+ ~Inbox();
+
+ IDType uniqueID() const { return fUniqueID; }
+
+ // Overwrite out with all the messages we've received since the last call. Threadsafe.
+ void poll(skia_private::TArray<Message>* out);
+
+ private:
+ skia_private::TArray<Message> fMessages;
+ SkMutex fMessagesMutex;
+ const IDType fUniqueID;
+
+ friend class SkMessageBus;
+ void receive(Message m); // SkMessageBus is a friend only to call this.
+ };
+
+private:
+ SkMessageBus();
+ static SkMessageBus* Get();
+
+ SkTDArray<Inbox*> fInboxes;
+ SkMutex fInboxesMutex;
+};
+
+// This must go in a single .cpp file, not some .h, or we risk creating more than one global
+// SkMessageBus per type when using shared libraries. NOTE: at most one per file will compile.
+#define DECLARE_SKMESSAGEBUS_MESSAGE(Message, IDType, AllowCopyableMessage) \
+ template <> \
+ SkMessageBus<Message, IDType, AllowCopyableMessage>* \
+ SkMessageBus<Message, IDType, AllowCopyableMessage>::Get() { \
+ static SkOnce once; \
+ static SkMessageBus<Message, IDType, AllowCopyableMessage>* bus; \
+ once([] { bus = new SkMessageBus<Message, IDType, AllowCopyableMessage>(); }); \
+ return bus; \
+ }
+
+// ----------------------- Implementation of SkMessageBus::Inbox -----------------------
+
+template <typename Message, typename IDType, bool AllowCopyableMessage>
+SkMessageBus<Message, IDType, AllowCopyableMessage>::Inbox::Inbox(IDType uniqueID)
+ : fUniqueID(uniqueID) {
+ // Register ourselves with the corresponding message bus.
+ auto* bus = SkMessageBus<Message, IDType, AllowCopyableMessage>::Get();
+ SkAutoMutexExclusive lock(bus->fInboxesMutex);
+ bus->fInboxes.push_back(this);
+}
+
+template <typename Message, typename IDType, bool AllowCopyableMessage>
+SkMessageBus<Message, IDType, AllowCopyableMessage>::Inbox::~Inbox() {
+ // Remove ourselves from the corresponding message bus.
+ auto* bus = SkMessageBus<Message, IDType, AllowCopyableMessage>::Get();
+ SkAutoMutexExclusive lock(bus->fInboxesMutex);
+ // This is a cheaper fInboxes.remove(fInboxes.find(this)) when order doesn't matter.
+ for (int i = 0; i < bus->fInboxes.size(); i++) {
+ if (this == bus->fInboxes[i]) {
+ bus->fInboxes.removeShuffle(i);
+ break;
+ }
+ }
+}
+
+template <typename Message, typename IDType, bool AllowCopyableMessage>
+void SkMessageBus<Message, IDType, AllowCopyableMessage>::Inbox::receive(Message m) {
+ SkAutoMutexExclusive lock(fMessagesMutex);
+ fMessages.push_back(std::move(m));
+}
+
+template <typename Message, typename IDType, bool AllowCopyableMessage>
+void SkMessageBus<Message, IDType, AllowCopyableMessage>::Inbox::poll(
+ skia_private::TArray<Message>* messages) {
+ SkASSERT(messages);
+ messages->clear();
+ SkAutoMutexExclusive lock(fMessagesMutex);
+ fMessages.swap(*messages);
+}
+
+// ----------------------- Implementation of SkMessageBus -----------------------
+
+template <typename Message, typename IDType, bool AllowCopyableMessage>
+SkMessageBus<Message, IDType, AllowCopyableMessage>::SkMessageBus() = default;
+
+template <typename Message, typename IDType, bool AllowCopyableMessage>
+/*static*/ void SkMessageBus<Message, IDType, AllowCopyableMessage>::Post(Message m) {
+ auto* bus = SkMessageBus<Message, IDType, AllowCopyableMessage>::Get();
+ SkAutoMutexExclusive lock(bus->fInboxesMutex);
+ for (int i = 0; i < bus->fInboxes.size(); i++) {
+ if (SkShouldPostMessageToBus(m, bus->fInboxes[i]->fUniqueID)) {
+ if constexpr (AllowCopyableMessage) {
+ bus->fInboxes[i]->receive(m);
+ } else {
+ if constexpr (is_sk_sp<Message>::value) {
+ SkASSERT(m->unique());
+ }
+ bus->fInboxes[i]->receive(std::move(m));
+ break;
+ }
+ }
+ }
+
+ if constexpr (is_sk_sp<Message>::value && !AllowCopyableMessage) {
+ // Make sure sk_sp has been sent to an inbox.
+ SkASSERT(!m); // NOLINT(bugprone-use-after-move)
+ }
+}
+
+#endif // SkMessageBus_DEFINED
diff --git a/gfx/skia/skia/src/core/SkMipmap.cpp b/gfx/skia/skia/src/core/SkMipmap.cpp
new file mode 100644
index 0000000000..3cc9b7ce46
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMipmap.cpp
@@ -0,0 +1,895 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkHalf.h"
+#include "src/base/SkMathPriv.h"
+#include "src/base/SkVx.h"
+#include "src/core/SkImageInfoPriv.h"
+#include "src/core/SkMipmap.h"
+#include "src/core/SkMipmapBuilder.h"
+#include <new>
+
+//
+// ColorTypeFilter is the "Type" we pass to some downsample template functions.
+// It controls how we expand a pixel into a large type, with space between each component,
+// so we can then perform our simple filter (either box or triangle) and store the intermediates
+// in the expanded type.
+//
+
+struct ColorTypeFilter_8888 {
+ typedef uint32_t Type;
+ static skvx::Vec<4, uint16_t> Expand(uint32_t x) {
+ return skvx::cast<uint16_t>(skvx::byte4::Load(&x));
+ }
+ static uint32_t Compact(const skvx::Vec<4, uint16_t>& x) {
+ uint32_t r;
+ skvx::cast<uint8_t>(x).store(&r);
+ return r;
+ }
+};
+
+struct ColorTypeFilter_565 {
+ typedef uint16_t Type;
+ static uint32_t Expand(uint16_t x) {
+ return (x & ~SK_G16_MASK_IN_PLACE) | ((x & SK_G16_MASK_IN_PLACE) << 16);
+ }
+ static uint16_t Compact(uint32_t x) {
+ return ((x & ~SK_G16_MASK_IN_PLACE) & 0xFFFF) | ((x >> 16) & SK_G16_MASK_IN_PLACE);
+ }
+};
+
+struct ColorTypeFilter_4444 {
+ typedef uint16_t Type;
+ static uint32_t Expand(uint16_t x) {
+ return (x & 0xF0F) | ((x & ~0xF0F) << 12);
+ }
+ static uint16_t Compact(uint32_t x) {
+ return (x & 0xF0F) | ((x >> 12) & ~0xF0F);
+ }
+};
+
+struct ColorTypeFilter_8 {
+ typedef uint8_t Type;
+ static unsigned Expand(unsigned x) {
+ return x;
+ }
+ static uint8_t Compact(unsigned x) {
+ return (uint8_t)x;
+ }
+};
+
+struct ColorTypeFilter_Alpha_F16 {
+ typedef uint16_t Type;
+ static skvx::float4 Expand(uint16_t x) {
+ return SkHalfToFloat_finite_ftz((uint64_t) x); // expand out to four lanes
+
+ }
+ static uint16_t Compact(const skvx::float4& x) {
+ uint64_t r;
+ SkFloatToHalf_finite_ftz(x).store(&r);
+ return r & 0xFFFF; // but ignore the extra 3 here
+ }
+};
+
+struct ColorTypeFilter_RGBA_F16 {
+ typedef uint64_t Type; // SkHalf x4
+ static skvx::float4 Expand(uint64_t x) {
+ return SkHalfToFloat_finite_ftz(x);
+ }
+ static uint64_t Compact(const skvx::float4& x) {
+ uint64_t r;
+ SkFloatToHalf_finite_ftz(x).store(&r);
+ return r;
+ }
+};
+
+struct ColorTypeFilter_88 {
+ typedef uint16_t Type;
+ static uint32_t Expand(uint16_t x) {
+ return (x & 0xFF) | ((x & ~0xFF) << 8);
+ }
+ static uint16_t Compact(uint32_t x) {
+ return (x & 0xFF) | ((x >> 8) & ~0xFF);
+ }
+};
+
+struct ColorTypeFilter_1616 {
+ typedef uint32_t Type;
+ static uint64_t Expand(uint32_t x) {
+ return (x & 0xFFFF) | ((x & ~0xFFFF) << 16);
+ }
+ static uint16_t Compact(uint64_t x) {
+ return (x & 0xFFFF) | ((x >> 16) & ~0xFFFF);
+ }
+};
+
+struct ColorTypeFilter_F16F16 {
+ typedef uint32_t Type;
+ static skvx::float4 Expand(uint32_t x) {
+ return SkHalfToFloat_finite_ftz((uint64_t) x); // expand out to four lanes
+ }
+ static uint32_t Compact(const skvx::float4& x) {
+ uint64_t r;
+ SkFloatToHalf_finite_ftz(x).store(&r);
+ return (uint32_t) (r & 0xFFFFFFFF); // but ignore the extra 2 here
+ }
+};
+
+struct ColorTypeFilter_16161616 {
+ typedef uint64_t Type;
+ static skvx::Vec<4, uint32_t> Expand(uint64_t x) {
+ return skvx::cast<uint32_t>(skvx::Vec<4, uint16_t>::Load(&x));
+ }
+ static uint64_t Compact(const skvx::Vec<4, uint32_t>& x) {
+ uint64_t r;
+ skvx::cast<uint16_t>(x).store(&r);
+ return r;
+ }
+};
+
+struct ColorTypeFilter_16 {
+ typedef uint16_t Type;
+ static uint32_t Expand(uint16_t x) {
+ return x;
+ }
+ static uint16_t Compact(uint32_t x) {
+ return (uint16_t) x;
+ }
+};
+
+struct ColorTypeFilter_1010102 {
+ typedef uint32_t Type;
+ static uint64_t Expand(uint64_t x) {
+ return (((x ) & 0x3ff) ) |
+ (((x >> 10) & 0x3ff) << 20) |
+ (((x >> 20) & 0x3ff) << 40) |
+ (((x >> 30) & 0x3 ) << 60);
+ }
+ static uint32_t Compact(uint64_t x) {
+ return (((x ) & 0x3ff) ) |
+ (((x >> 20) & 0x3ff) << 10) |
+ (((x >> 40) & 0x3ff) << 20) |
+ (((x >> 60) & 0x3 ) << 30);
+ }
+};
+
+template <typename T> T add_121(const T& a, const T& b, const T& c) {
+ return a + b + b + c;
+}
+
+template <typename T> T shift_right(const T& x, int bits) {
+ return x >> bits;
+}
+
+skvx::float4 shift_right(const skvx::float4& x, int bits) {
+ return x * (1.0f / (1 << bits));
+}
+
+template <typename T> T shift_left(const T& x, int bits) {
+ return x << bits;
+}
+
+skvx::float4 shift_left(const skvx::float4& x, int bits) {
+ return x * (1 << bits);
+}
+
+//
+// To produce each mip level, we need to filter down by 1/2 (e.g. 100x100 -> 50,50)
+// If the starting dimension is odd, we floor the size of the lower level (e.g. 101 -> 50)
+// In those (odd) cases, we use a triangle filter, with 1-pixel overlap between samplings,
+// else for even cases, we just use a 2x box filter.
+//
+// This produces 4 possible isotropic filters: 2x2 2x3 3x2 3x3 where WxH indicates the number of
+// src pixels we need to sample in each dimension to produce 1 dst pixel.
+//
+// OpenGL expects a full mipmap stack to contain anisotropic space as well.
+// This means a 100x1 image would continue down to a 50x1 image, 25x1 image...
+// Because of this, we need 4 more anisotropic filters: 1x2, 1x3, 2x1, 3x1.
+
+template <typename F> void downsample_1_2(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto p1 = (const typename F::Type*)((const char*)p0 + srcRB);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ for (int i = 0; i < count; ++i) {
+ auto c00 = F::Expand(p0[0]);
+ auto c10 = F::Expand(p1[0]);
+
+ auto c = c00 + c10;
+ d[i] = F::Compact(shift_right(c, 1));
+ p0 += 2;
+ p1 += 2;
+ }
+}
+
+template <typename F> void downsample_1_3(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto p1 = (const typename F::Type*)((const char*)p0 + srcRB);
+ auto p2 = (const typename F::Type*)((const char*)p1 + srcRB);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ for (int i = 0; i < count; ++i) {
+ auto c00 = F::Expand(p0[0]);
+ auto c10 = F::Expand(p1[0]);
+ auto c20 = F::Expand(p2[0]);
+
+ auto c = add_121(c00, c10, c20);
+ d[i] = F::Compact(shift_right(c, 2));
+ p0 += 2;
+ p1 += 2;
+ p2 += 2;
+ }
+}
+
+template <typename F> void downsample_2_1(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ for (int i = 0; i < count; ++i) {
+ auto c00 = F::Expand(p0[0]);
+ auto c01 = F::Expand(p0[1]);
+
+ auto c = c00 + c01;
+ d[i] = F::Compact(shift_right(c, 1));
+ p0 += 2;
+ }
+}
+
+template <typename F> void downsample_2_2(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto p1 = (const typename F::Type*)((const char*)p0 + srcRB);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ for (int i = 0; i < count; ++i) {
+ auto c00 = F::Expand(p0[0]);
+ auto c01 = F::Expand(p0[1]);
+ auto c10 = F::Expand(p1[0]);
+ auto c11 = F::Expand(p1[1]);
+
+ auto c = c00 + c10 + c01 + c11;
+ d[i] = F::Compact(shift_right(c, 2));
+ p0 += 2;
+ p1 += 2;
+ }
+}
+
+template <typename F> void downsample_2_3(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto p1 = (const typename F::Type*)((const char*)p0 + srcRB);
+ auto p2 = (const typename F::Type*)((const char*)p1 + srcRB);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ for (int i = 0; i < count; ++i) {
+ auto c00 = F::Expand(p0[0]);
+ auto c01 = F::Expand(p0[1]);
+ auto c10 = F::Expand(p1[0]);
+ auto c11 = F::Expand(p1[1]);
+ auto c20 = F::Expand(p2[0]);
+ auto c21 = F::Expand(p2[1]);
+
+ auto c = add_121(c00, c10, c20) + add_121(c01, c11, c21);
+ d[i] = F::Compact(shift_right(c, 3));
+ p0 += 2;
+ p1 += 2;
+ p2 += 2;
+ }
+}
+
+template <typename F> void downsample_3_1(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ auto c02 = F::Expand(p0[0]);
+ for (int i = 0; i < count; ++i) {
+ auto c00 = c02;
+ auto c01 = F::Expand(p0[1]);
+ c02 = F::Expand(p0[2]);
+
+ auto c = add_121(c00, c01, c02);
+ d[i] = F::Compact(shift_right(c, 2));
+ p0 += 2;
+ }
+}
+
+template <typename F> void downsample_3_2(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto p1 = (const typename F::Type*)((const char*)p0 + srcRB);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ // Given pixels:
+ // a0 b0 c0 d0 e0 ...
+ // a1 b1 c1 d1 e1 ...
+ // We want:
+ // (a0 + 2*b0 + c0 + a1 + 2*b1 + c1) / 8
+ // (c0 + 2*d0 + e0 + c1 + 2*d1 + e1) / 8
+ // ...
+
+ auto c0 = F::Expand(p0[0]);
+ auto c1 = F::Expand(p1[0]);
+ auto c = c0 + c1;
+ for (int i = 0; i < count; ++i) {
+ auto a = c;
+
+ auto b0 = F::Expand(p0[1]);
+ auto b1 = F::Expand(p1[1]);
+ auto b = b0 + b0 + b1 + b1;
+
+ c0 = F::Expand(p0[2]);
+ c1 = F::Expand(p1[2]);
+ c = c0 + c1;
+
+ auto sum = a + b + c;
+ d[i] = F::Compact(shift_right(sum, 3));
+ p0 += 2;
+ p1 += 2;
+ }
+}
+
+template <typename F> void downsample_3_3(void* dst, const void* src, size_t srcRB, int count) {
+ SkASSERT(count > 0);
+ auto p0 = static_cast<const typename F::Type*>(src);
+ auto p1 = (const typename F::Type*)((const char*)p0 + srcRB);
+ auto p2 = (const typename F::Type*)((const char*)p1 + srcRB);
+ auto d = static_cast<typename F::Type*>(dst);
+
+ // Given pixels:
+ // a0 b0 c0 d0 e0 ...
+ // a1 b1 c1 d1 e1 ...
+ // a2 b2 c2 d2 e2 ...
+ // We want:
+ // (a0 + 2*b0 + c0 + 2*a1 + 4*b1 + 2*c1 + a2 + 2*b2 + c2) / 16
+ // (c0 + 2*d0 + e0 + 2*c1 + 4*d1 + 2*e1 + c2 + 2*d2 + e2) / 16
+ // ...
+
+ auto c0 = F::Expand(p0[0]);
+ auto c1 = F::Expand(p1[0]);
+ auto c2 = F::Expand(p2[0]);
+ auto c = add_121(c0, c1, c2);
+ for (int i = 0; i < count; ++i) {
+ auto a = c;
+
+ auto b0 = F::Expand(p0[1]);
+ auto b1 = F::Expand(p1[1]);
+ auto b2 = F::Expand(p2[1]);
+ auto b = shift_left(add_121(b0, b1, b2), 1);
+
+ c0 = F::Expand(p0[2]);
+ c1 = F::Expand(p1[2]);
+ c2 = F::Expand(p2[2]);
+ c = add_121(c0, c1, c2);
+
+ auto sum = a + b + c;
+ d[i] = F::Compact(shift_right(sum, 4));
+ p0 += 2;
+ p1 += 2;
+ p2 += 2;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkMipmap::SkMipmap(void* malloc, size_t size) : SkCachedData(malloc, size) {}
+SkMipmap::SkMipmap(size_t size, SkDiscardableMemory* dm) : SkCachedData(size, dm) {}
+
+SkMipmap::~SkMipmap() = default;
+
+size_t SkMipmap::AllocLevelsSize(int levelCount, size_t pixelSize) {
+ if (levelCount < 0) {
+ return 0;
+ }
+ int64_t size = sk_64_mul(levelCount + 1, sizeof(Level)) + pixelSize;
+ if (!SkTFitsIn<int32_t>(size)) {
+ return 0;
+ }
+ return SkTo<int32_t>(size);
+}
+
+SkMipmap* SkMipmap::Build(const SkPixmap& src, SkDiscardableFactoryProc fact,
+ bool computeContents) {
+ typedef void FilterProc(void*, const void* srcPtr, size_t srcRB, int count);
+
+ FilterProc* proc_1_2 = nullptr;
+ FilterProc* proc_1_3 = nullptr;
+ FilterProc* proc_2_1 = nullptr;
+ FilterProc* proc_2_2 = nullptr;
+ FilterProc* proc_2_3 = nullptr;
+ FilterProc* proc_3_1 = nullptr;
+ FilterProc* proc_3_2 = nullptr;
+ FilterProc* proc_3_3 = nullptr;
+
+ const SkColorType ct = src.colorType();
+ const SkAlphaType at = src.alphaType();
+
+ switch (ct) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_8888>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_8888>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_8888>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_8888>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_8888>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_8888>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_8888>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_8888>;
+ break;
+ case kRGB_565_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_565>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_565>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_565>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_565>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_565>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_565>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_565>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_565>;
+ break;
+ case kARGB_4444_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_4444>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_4444>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_4444>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_4444>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_4444>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_4444>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_4444>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_4444>;
+ break;
+ case kAlpha_8_SkColorType:
+ case kGray_8_SkColorType:
+ case kR8_unorm_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_8>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_8>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_8>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_8>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_8>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_8>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_8>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_8>;
+ break;
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_RGBA_F16>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_RGBA_F16>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_RGBA_F16>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_RGBA_F16>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_RGBA_F16>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_RGBA_F16>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_RGBA_F16>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_RGBA_F16>;
+ break;
+ case kR8G8_unorm_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_88>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_88>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_88>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_88>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_88>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_88>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_88>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_88>;
+ break;
+ case kR16G16_unorm_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_1616>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_1616>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_1616>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_1616>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_1616>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_1616>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_1616>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_1616>;
+ break;
+ case kA16_unorm_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_16>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_16>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_16>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_16>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_16>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_16>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_16>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_16>;
+ break;
+ case kRGBA_1010102_SkColorType:
+ case kBGRA_1010102_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_1010102>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_1010102>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_1010102>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_1010102>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_1010102>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_1010102>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_1010102>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_1010102>;
+ break;
+ case kA16_float_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_Alpha_F16>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_Alpha_F16>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_Alpha_F16>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_Alpha_F16>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_Alpha_F16>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_Alpha_F16>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_Alpha_F16>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_Alpha_F16>;
+ break;
+ case kR16G16_float_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_F16F16>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_F16F16>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_F16F16>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_F16F16>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_F16F16>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_F16F16>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_F16F16>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_F16F16>;
+ break;
+ case kR16G16B16A16_unorm_SkColorType:
+ proc_1_2 = downsample_1_2<ColorTypeFilter_16161616>;
+ proc_1_3 = downsample_1_3<ColorTypeFilter_16161616>;
+ proc_2_1 = downsample_2_1<ColorTypeFilter_16161616>;
+ proc_2_2 = downsample_2_2<ColorTypeFilter_16161616>;
+ proc_2_3 = downsample_2_3<ColorTypeFilter_16161616>;
+ proc_3_1 = downsample_3_1<ColorTypeFilter_16161616>;
+ proc_3_2 = downsample_3_2<ColorTypeFilter_16161616>;
+ proc_3_3 = downsample_3_3<ColorTypeFilter_16161616>;
+ break;
+
+ case kUnknown_SkColorType:
+ case kRGB_888x_SkColorType: // TODO: use 8888?
+ case kRGB_101010x_SkColorType: // TODO: use 1010102?
+ case kBGR_101010x_SkColorType: // TODO: use 1010102?
+ case kBGR_101010x_XR_SkColorType: // TODO: use 1010102?
+ case kRGBA_F32_SkColorType:
+ return nullptr;
+
+ case kSRGBA_8888_SkColorType: // TODO: needs careful handling
+ return nullptr;
+ }
+
+ if (src.width() <= 1 && src.height() <= 1) {
+ return nullptr;
+ }
+ // whip through our loop to compute the exact size needed
+ size_t size = 0;
+ int countLevels = ComputeLevelCount(src.width(), src.height());
+ for (int currentMipLevel = countLevels; currentMipLevel >= 0; currentMipLevel--) {
+ SkISize mipSize = ComputeLevelSize(src.width(), src.height(), currentMipLevel);
+ size += SkColorTypeMinRowBytes(ct, mipSize.fWidth) * mipSize.fHeight;
+ }
+
+ size_t storageSize = SkMipmap::AllocLevelsSize(countLevels, size);
+ if (0 == storageSize) {
+ return nullptr;
+ }
+
+ SkMipmap* mipmap;
+ if (fact) {
+ SkDiscardableMemory* dm = fact(storageSize);
+ if (nullptr == dm) {
+ return nullptr;
+ }
+ mipmap = new SkMipmap(storageSize, dm);
+ } else {
+ mipmap = new SkMipmap(sk_malloc_throw(storageSize), storageSize);
+ }
+
+ // init
+ mipmap->fCS = sk_ref_sp(src.info().colorSpace());
+ mipmap->fCount = countLevels;
+ mipmap->fLevels = (Level*)mipmap->writable_data();
+ SkASSERT(mipmap->fLevels);
+
+ Level* levels = mipmap->fLevels;
+ uint8_t* baseAddr = (uint8_t*)&levels[countLevels];
+ uint8_t* addr = baseAddr;
+ int width = src.width();
+ int height = src.height();
+ uint32_t rowBytes;
+ SkPixmap srcPM(src);
+
+ // Depending on architecture and other factors, the pixel data alignment may need to be as
+ // large as 8 (for F16 pixels). See the comment on SkMipmap::Level.
+ SkASSERT(SkIsAlign8((uintptr_t)addr));
+
+ for (int i = 0; i < countLevels; ++i) {
+ FilterProc* proc;
+ if (height & 1) {
+ if (height == 1) { // src-height is 1
+ if (width & 1) { // src-width is 3
+ proc = proc_3_1;
+ } else { // src-width is 2
+ proc = proc_2_1;
+ }
+ } else { // src-height is 3
+ if (width & 1) {
+ if (width == 1) { // src-width is 1
+ proc = proc_1_3;
+ } else { // src-width is 3
+ proc = proc_3_3;
+ }
+ } else { // src-width is 2
+ proc = proc_2_3;
+ }
+ }
+ } else { // src-height is 2
+ if (width & 1) {
+ if (width == 1) { // src-width is 1
+ proc = proc_1_2;
+ } else { // src-width is 3
+ proc = proc_3_2;
+ }
+ } else { // src-width is 2
+ proc = proc_2_2;
+ }
+ }
+ width = std::max(1, width >> 1);
+ height = std::max(1, height >> 1);
+ rowBytes = SkToU32(SkColorTypeMinRowBytes(ct, width));
+
+ // We make the Info w/o any colorspace, since that storage is not under our control, and
+ // will not be deleted in a controlled fashion. When the caller is given the pixmap for
+ // a given level, we augment this pixmap with fCS (which we do manage).
+ new (&levels[i].fPixmap) SkPixmap(SkImageInfo::Make(width, height, ct, at), addr, rowBytes);
+ levels[i].fScale = SkSize::Make(SkIntToScalar(width) / src.width(),
+ SkIntToScalar(height) / src.height());
+
+ const SkPixmap& dstPM = levels[i].fPixmap;
+ if (computeContents) {
+ const void* srcBasePtr = srcPM.addr();
+ void* dstBasePtr = dstPM.writable_addr();
+
+ const size_t srcRB = srcPM.rowBytes();
+ for (int y = 0; y < height; y++) {
+ proc(dstBasePtr, srcBasePtr, srcRB, width);
+ srcBasePtr = (char*)srcBasePtr + srcRB * 2; // jump two rows
+ dstBasePtr = (char*)dstBasePtr + dstPM.rowBytes();
+ }
+ }
+ srcPM = dstPM;
+ addr += height * rowBytes;
+ }
+ SkASSERT(addr == baseAddr + size);
+
+ SkASSERT(mipmap->fLevels);
+ return mipmap;
+}
+
+int SkMipmap::ComputeLevelCount(int baseWidth, int baseHeight) {
+ if (baseWidth < 1 || baseHeight < 1) {
+ return 0;
+ }
+
+ // OpenGL's spec requires that each mipmap level have height/width equal to
+ // max(1, floor(original_height / 2^i)
+ // (or original_width) where i is the mipmap level.
+ // Continue scaling down until both axes are size 1.
+
+ const int largestAxis = std::max(baseWidth, baseHeight);
+ if (largestAxis < 2) {
+ // SkMipmap::Build requires a minimum size of 2.
+ return 0;
+ }
+ const int leadingZeros = SkCLZ(static_cast<uint32_t>(largestAxis));
+ // If the value 00011010 has 3 leading 0s then it has 5 significant bits
+ // (the bits which are not leading zeros)
+ const int significantBits = (sizeof(uint32_t) * 8) - leadingZeros;
+ // This is making the assumption that the size of a byte is 8 bits
+ // and that sizeof(uint32_t)'s implementation-defined behavior is 4.
+ int mipLevelCount = significantBits;
+
+ // SkMipmap does not include the base mip level.
+ // For example, it contains levels 1-x instead of 0-x.
+ // This is because the image used to create SkMipmap is the base level.
+ // So subtract 1 from the mip level count.
+ if (mipLevelCount > 0) {
+ --mipLevelCount;
+ }
+
+ return mipLevelCount;
+}
+
+SkISize SkMipmap::ComputeLevelSize(int baseWidth, int baseHeight, int level) {
+ if (baseWidth < 1 || baseHeight < 1) {
+ return SkISize::Make(0, 0);
+ }
+
+ int maxLevelCount = ComputeLevelCount(baseWidth, baseHeight);
+ if (level >= maxLevelCount || level < 0) {
+ return SkISize::Make(0, 0);
+ }
+ // OpenGL's spec requires that each mipmap level have height/width equal to
+ // max(1, floor(original_height / 2^i)
+ // (or original_width) where i is the mipmap level.
+
+ // SkMipmap does not include the base mip level.
+ // For example, it contains levels 1-x instead of 0-x.
+ // This is because the image used to create SkMipmap is the base level.
+ // So subtract 1 from the mip level to get the index stored by SkMipmap.
+ int width = std::max(1, baseWidth >> (level + 1));
+ int height = std::max(1, baseHeight >> (level + 1));
+
+ return SkISize::Make(width, height);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Returns fractional level value. floor(level) is the index of the larger level.
+// < 0 means failure.
+float SkMipmap::ComputeLevel(SkSize scaleSize) {
+ SkASSERT(scaleSize.width() >= 0 && scaleSize.height() >= 0);
+
+#ifndef SK_SUPPORT_LEGACY_ANISOTROPIC_MIPMAP_SCALE
+ // Use the smallest scale to match the GPU impl.
+ const float scale = std::min(scaleSize.width(), scaleSize.height());
+#else
+ // Ideally we'd pick the smaller scale, to match Ganesh. But ignoring one of the
+ // scales can produce some atrocious results, so for now we use the geometric mean.
+ // (https://bugs.chromium.org/p/skia/issues/detail?id=4863)
+ const float scale = sk_float_sqrt(scaleSize.width() * scaleSize.height());
+#endif
+
+ if (scale >= SK_Scalar1 || scale <= 0 || !SkScalarIsFinite(scale)) {
+ return -1;
+ }
+
+ // The -0.5 bias here is to emulate GPU's sharpen mipmap option.
+ float L = std::max(-SkScalarLog2(scale) - 0.5f, 0.f);
+ if (!SkScalarIsFinite(L)) {
+ return -1;
+ }
+ return L;
+}
+
+bool SkMipmap::extractLevel(SkSize scaleSize, Level* levelPtr) const {
+ if (nullptr == fLevels) {
+ return false;
+ }
+
+ float L = ComputeLevel(scaleSize);
+ int level = sk_float_round2int(L);
+ if (level <= 0) {
+ return false;
+ }
+
+ if (level > fCount) {
+ level = fCount;
+ }
+ if (levelPtr) {
+ *levelPtr = fLevels[level - 1];
+ // need to augment with our colorspace
+ levelPtr->fPixmap.setColorSpace(fCS);
+ }
+ return true;
+}
+
+bool SkMipmap::validForRootLevel(const SkImageInfo& root) const {
+ if (nullptr == fLevels) {
+ return false;
+ }
+
+ const SkISize dimension = root.dimensions();
+ if (dimension.width() <= 1 && dimension.height() <= 1) {
+ return false;
+ }
+
+ if (fLevels[0].fPixmap. width() != std::max(1, dimension. width() >> 1) ||
+ fLevels[0].fPixmap.height() != std::max(1, dimension.height() >> 1)) {
+ return false;
+ }
+
+ for (int i = 0; i < this->countLevels(); ++i) {
+ if (fLevels[i].fPixmap.colorType() != root.colorType() ||
+ fLevels[i].fPixmap.alphaType() != root.alphaType()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Helper which extracts a pixmap from the src bitmap
+//
+SkMipmap* SkMipmap::Build(const SkBitmap& src, SkDiscardableFactoryProc fact) {
+ SkPixmap srcPixmap;
+ if (!src.peekPixels(&srcPixmap)) {
+ return nullptr;
+ }
+ return Build(srcPixmap, fact);
+}
+
+int SkMipmap::countLevels() const {
+ return fCount;
+}
+
+bool SkMipmap::getLevel(int index, Level* levelPtr) const {
+ if (nullptr == fLevels) {
+ return false;
+ }
+ if (index < 0) {
+ return false;
+ }
+ if (index > fCount - 1) {
+ return false;
+ }
+ if (levelPtr) {
+ *levelPtr = fLevels[index];
+ // need to augment with our colorspace
+ levelPtr->fPixmap.setColorSpace(fCS);
+ }
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "include/core/SkImageGenerator.h"
+#include "include/core/SkStream.h"
+#include "include/encode/SkPngEncoder.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+static sk_sp<SkData> encode_to_data(const SkPixmap& pm) {
+ SkDynamicMemoryWStream stream;
+ if (SkPngEncoder::Encode(&stream, pm, SkPngEncoder::Options())) {
+ return stream.detachAsData();
+ }
+ return nullptr;
+}
+
+/* Format
+ count_levels:32
+ for each level, starting with the biggest (index 0 in our iterator)
+ encoded_size:32
+ encoded_data (padded)
+ */
+sk_sp<SkData> SkMipmap::serialize() const {
+ const int count = this->countLevels();
+
+ SkBinaryWriteBuffer buffer;
+ buffer.write32(count);
+ for (int i = 0; i < count; ++i) {
+ Level level;
+ if (this->getLevel(i, &level)) {
+ buffer.writeDataAsByteArray(encode_to_data(level.fPixmap).get());
+ } else {
+ return nullptr;
+ }
+ }
+ return buffer.snapshotAsData();
+}
+
+bool SkMipmap::Deserialize(SkMipmapBuilder* builder, const void* data, size_t length) {
+ SkReadBuffer buffer(data, length);
+
+ int count = buffer.read32();
+ if (builder->countLevels() != count) {
+ return false;
+ }
+ for (int i = 0; i < count; ++i) {
+ size_t size = buffer.read32();
+ const void* ptr = buffer.skip(size);
+ if (!ptr) {
+ return false;
+ }
+ auto gen = SkImageGenerator::MakeFromEncoded(
+ SkData::MakeWithProc(ptr, size, nullptr, nullptr));
+ if (!gen) {
+ return false;
+ }
+
+ SkPixmap pm = builder->level(i);
+ if (gen->getInfo().dimensions() != pm.dimensions()) {
+ return false;
+ }
+ if (!gen->getPixels(pm)) {
+ return false;
+ }
+ }
+ return buffer.isValid();
+}
diff --git a/gfx/skia/skia/src/core/SkMipmap.h b/gfx/skia/skia/src/core/SkMipmap.h
new file mode 100644
index 0000000000..5e1ad63faa
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMipmap.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMipmap_DEFINED
+#define SkMipmap_DEFINED
+
+#include "include/core/SkPixmap.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSize.h"
+#include "src/core/SkCachedData.h"
+#include "src/core/SkImageInfoPriv.h"
+#include "src/shaders/SkShaderBase.h"
+
+class SkBitmap;
+class SkData;
+class SkDiscardableMemory;
+class SkMipmapBuilder;
+
+typedef SkDiscardableMemory* (*SkDiscardableFactoryProc)(size_t bytes);
+
+/*
+ * SkMipmap will generate mipmap levels when given a base mipmap level image.
+ *
+ * Any function which deals with mipmap levels indices will start with index 0
+ * being the first mipmap level which was generated. Said another way, it does
+ * not include the base level in its range.
+ */
+class SkMipmap : public SkCachedData {
+public:
+ ~SkMipmap() override;
+ // Allocate and fill-in a mipmap. If computeContents is false, we just allocated
+ // and compute the sizes/rowbytes, but leave the pixel-data uninitialized.
+ static SkMipmap* Build(const SkPixmap& src, SkDiscardableFactoryProc,
+ bool computeContents = true);
+
+ static SkMipmap* Build(const SkBitmap& src, SkDiscardableFactoryProc);
+
+ // Determines how many levels a SkMipmap will have without creating that mipmap.
+ // This does not include the base mipmap level that the user provided when
+ // creating the SkMipmap.
+ static int ComputeLevelCount(int baseWidth, int baseHeight);
+ static int ComputeLevelCount(SkISize s) { return ComputeLevelCount(s.width(), s.height()); }
+
+ // Determines the size of a given mipmap level.
+ // |level| is an index into the generated mipmap levels. It does not include
+ // the base level. So index 0 represents mipmap level 1.
+ static SkISize ComputeLevelSize(int baseWidth, int baseHeight, int level);
+
+ // Computes the fractional level based on the scaling in X and Y.
+ static float ComputeLevel(SkSize scaleSize);
+
+ // We use a block of (possibly discardable) memory to hold an array of Level structs, followed
+ // by the pixel data for each level. On 32-bit platforms, Level would naturally be 4 byte
+ // aligned, so the pixel data could end up with 4 byte alignment. If the pixel data is F16,
+ // it must be 8 byte aligned. To ensure this, keep the Level struct 8 byte aligned as well.
+ struct alignas(8) Level {
+ SkPixmap fPixmap;
+ SkSize fScale; // < 1.0
+ };
+
+ bool extractLevel(SkSize scale, Level*) const;
+
+ // countLevels returns the number of mipmap levels generated (which does not
+ // include the base mipmap level).
+ int countLevels() const;
+
+ // |index| is an index into the generated mipmap levels. It does not include
+ // the base level. So index 0 represents mipmap level 1.
+ bool getLevel(int index, Level*) const;
+
+ bool validForRootLevel(const SkImageInfo&) const;
+
+ sk_sp<SkData> serialize() const;
+ static bool Deserialize(SkMipmapBuilder*, const void* data, size_t size);
+
+protected:
+ void onDataChange(void* oldData, void* newData) override {
+ fLevels = (Level*)newData; // could be nullptr
+ }
+
+private:
+ sk_sp<SkColorSpace> fCS;
+ Level* fLevels; // managed by the baseclass, may be null due to onDataChanged.
+ int fCount;
+
+ SkMipmap(void* malloc, size_t size);
+ SkMipmap(size_t size, SkDiscardableMemory* dm);
+
+ static size_t AllocLevelsSize(int levelCount, size_t pixelSize);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMipmapAccessor.cpp b/gfx/skia/skia/src/core/SkMipmapAccessor.cpp
new file mode 100644
index 0000000000..c6f64a30f5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMipmapAccessor.cpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkMatrix.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkBitmapCache.h"
+#include "src/core/SkMipmap.h"
+#include "src/core/SkMipmapAccessor.h"
+#include "src/image/SkImage_Base.h"
+
+// Try to load from the base image, or from the cache
+static sk_sp<const SkMipmap> try_load_mips(const SkImage_Base* image) {
+ sk_sp<const SkMipmap> mips = image->refMips();
+ if (!mips) {
+ mips.reset(SkMipmapCache::FindAndRef(SkBitmapCacheDesc::Make(image)));
+ }
+ if (!mips) {
+ mips.reset(SkMipmapCache::AddAndRef(image));
+ }
+ return mips;
+}
+
+SkMipmapAccessor::SkMipmapAccessor(const SkImage_Base* image, const SkMatrix& inv,
+ SkMipmapMode requestedMode) {
+ SkMipmapMode resolvedMode = requestedMode;
+ fLowerWeight = 0;
+
+ auto load_upper_from_base = [&]() {
+ // only do this once
+ if (fBaseStorage.getPixels() == nullptr) {
+ auto dContext = as_IB(image)->directContext();
+ (void)image->getROPixels(dContext, &fBaseStorage);
+ fUpper.reset(fBaseStorage.info(), fBaseStorage.getPixels(), fBaseStorage.rowBytes());
+ }
+ };
+
+ float level = 0;
+ if (requestedMode != SkMipmapMode::kNone) {
+ SkSize scale;
+ if (!inv.decomposeScale(&scale, nullptr)) {
+ resolvedMode = SkMipmapMode::kNone;
+ } else {
+ level = SkMipmap::ComputeLevel({1/scale.width(), 1/scale.height()});
+ if (level <= 0) {
+ resolvedMode = SkMipmapMode::kNone;
+ level = 0;
+ }
+ }
+ }
+
+ auto scale = [image](const SkPixmap& pm) {
+ return SkMatrix::Scale(SkIntToScalar(pm.width()) / image->width(),
+ SkIntToScalar(pm.height()) / image->height());
+ };
+
+ // Nearest mode uses this level, so we round to pick the nearest. In linear mode we use this
+ // level as the lower of the two to interpolate between, so we take the floor.
+ int levelNum = resolvedMode == SkMipmapMode::kNearest ? sk_float_round2int(level)
+ : sk_float_floor2int(level);
+ float lowerWeight = level - levelNum; // fract(level)
+ SkASSERT(levelNum >= 0);
+
+ if (levelNum == 0) {
+ load_upper_from_base();
+ }
+ // load fCurrMip if needed
+ if (levelNum > 0 || (resolvedMode == SkMipmapMode::kLinear && lowerWeight > 0)) {
+ fCurrMip = try_load_mips(image);
+ if (!fCurrMip) {
+ load_upper_from_base();
+ resolvedMode = SkMipmapMode::kNone;
+ } else {
+ SkMipmap::Level levelRec;
+
+ SkASSERT(resolvedMode != SkMipmapMode::kNone);
+ if (levelNum > 0) {
+ if (fCurrMip->getLevel(levelNum - 1, &levelRec)) {
+ fUpper = levelRec.fPixmap;
+ } else {
+ load_upper_from_base();
+ resolvedMode = SkMipmapMode::kNone;
+ }
+ }
+
+ if (resolvedMode == SkMipmapMode::kLinear) {
+ if (fCurrMip->getLevel(levelNum, &levelRec)) {
+ fLower = levelRec.fPixmap;
+ fLowerWeight = lowerWeight;
+ fLowerInv = scale(fLower);
+ } else {
+ resolvedMode = SkMipmapMode::kNearest;
+ }
+ }
+ }
+ }
+ fUpperInv = scale(fUpper);
+}
+
+SkMipmapAccessor* SkMipmapAccessor::Make(SkArenaAlloc* alloc, const SkImage* image,
+ const SkMatrix& inv, SkMipmapMode mipmap) {
+ auto* access = alloc->make<SkMipmapAccessor>(as_IB(image), inv, mipmap);
+ // return null if we failed to get the level (so the caller won't try to use it)
+ return access->fUpper.addr() ? access : nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkMipmapAccessor.h b/gfx/skia/skia/src/core/SkMipmapAccessor.h
new file mode 100644
index 0000000000..36b742a27a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMipmapAccessor.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMipmapAccessor_DEFINED
+#define SkMipmapAccessor_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkMatrix.h"
+#include "src/core/SkMipmap.h"
+#include <tuple>
+
+class SkImage_Base;
+
+class SkMipmapAccessor : ::SkNoncopyable {
+public:
+ // Returns null on failure
+ static SkMipmapAccessor* Make(SkArenaAlloc*, const SkImage*, const SkMatrix& inv, SkMipmapMode);
+
+ std::pair<SkPixmap, SkMatrix> level() const {
+ SkASSERT(fUpper.addr() != nullptr);
+ return std::make_pair(fUpper, fUpperInv);
+ }
+
+ std::pair<SkPixmap, SkMatrix> lowerLevel() const {
+ SkASSERT(fLower.addr() != nullptr);
+ return std::make_pair(fLower, fLowerInv);
+ }
+
+ // 0....1. Will be 0 if there is no lowerLevel
+ float lowerWeight() const { return fLowerWeight; }
+
+private:
+ SkPixmap fUpper,
+ fLower; // only valid for mip_linear
+ float fLowerWeight; // lower * weight + upper * (1 - weight)
+ SkMatrix fUpperInv,
+ fLowerInv;
+
+ // these manage lifetime for the buffers
+ SkBitmap fBaseStorage;
+ sk_sp<const SkMipmap> fCurrMip;
+
+public:
+ // Don't call publicly -- this is only public for SkArenaAlloc to access it inside Make()
+ SkMipmapAccessor(const SkImage_Base*, const SkMatrix& inv, SkMipmapMode requestedMode);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkMipmapBuilder.cpp b/gfx/skia/skia/src/core/SkMipmapBuilder.cpp
new file mode 100644
index 0000000000..a83b503ce2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMipmapBuilder.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/core/SkMipmapBuilder.h"
+
+#include "include/core/SkImage.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkTypes.h"
+#include "src/core/SkMipmap.h"
+struct SkImageInfo;
+
+SkMipmapBuilder::SkMipmapBuilder(const SkImageInfo& info) {
+ fMM = sk_sp<SkMipmap>(SkMipmap::Build({info, nullptr, 0}, nullptr, false));
+}
+
+SkMipmapBuilder::~SkMipmapBuilder() {}
+
+int SkMipmapBuilder::countLevels() const {
+ return fMM ? fMM->countLevels() : 0;
+}
+
+SkPixmap SkMipmapBuilder::level(int index) const {
+ SkPixmap pm;
+
+ SkMipmap::Level level;
+ if (fMM && fMM->getLevel(index, &level)) {
+ pm = level.fPixmap;
+ }
+ return pm;
+}
+
+sk_sp<SkImage> SkMipmapBuilder::attachTo(const SkImage* src) {
+ return src->withMipmaps(fMM);
+}
diff --git a/gfx/skia/skia/src/core/SkMipmapBuilder.h b/gfx/skia/skia/src/core/SkMipmapBuilder.h
new file mode 100644
index 0000000000..1e76ed4f70
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkMipmapBuilder.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMipmapBuilder_DEFINED
+#define SkMipmapBuilder_DEFINED
+
+#include "include/core/SkRefCnt.h"
+
+class SkImage;
+class SkMipmap;
+class SkPixmap;
+struct SkImageInfo;
+
+class SkMipmapBuilder {
+public:
+ SkMipmapBuilder(const SkImageInfo&);
+ ~SkMipmapBuilder();
+
+ int countLevels() const;
+ SkPixmap level(int index) const;
+
+ /**
+ * If these levels are compatible with src, return a new Image that combines src's base level
+ * with these levels as mip levels. If not compatible, this returns nullptr.
+ */
+ sk_sp<SkImage> attachTo(const SkImage* src);
+
+ sk_sp<SkImage> attachTo(sk_sp<SkImage> src) {
+ return this->attachTo(src.get());
+ }
+
+private:
+ sk_sp<SkMipmap> fMM;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkModeColorFilter.cpp b/gfx/skia/skia/src/core/SkModeColorFilter.cpp
new file mode 100644
index 0000000000..322db1b5b9
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkModeColorFilter.cpp
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorFilter.h"
+#include "include/private/SkColorData.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkBlendModePriv.h"
+#include "src/core/SkBlitRow.h"
+#include "src/core/SkColorFilterBase.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkVM.h"
+#include "src/core/SkValidationUtils.h"
+#include "src/core/SkWriteBuffer.h"
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/KeyContext.h"
+#include "src/gpu/graphite/KeyHelpers.h"
+#include "src/gpu/graphite/PaintParamsKey.h"
+#endif
+
+template <SkAlphaType kDstAT = kPremul_SkAlphaType>
+static SkRGBA4f<kDstAT> map_color(const SkColor4f& c, SkColorSpace* src, SkColorSpace* dst) {
+ SkRGBA4f<kDstAT> color = {c.fR, c.fG, c.fB, c.fA};
+ SkColorSpaceXformSteps(src, kUnpremul_SkAlphaType,
+ dst, kDstAT).apply(color.vec());
+ return color;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SkModeColorFilter final : public SkColorFilterBase {
+public:
+ SkModeColorFilter(const SkColor4f& color, SkBlendMode mode);
+
+ bool appendStages(const SkStageRec& rec, bool shaderIsOpaque) const override;
+
+ bool onIsAlphaUnchanged() const override;
+
+#if defined(SK_GANESH)
+ GrFPResult asFragmentProcessor(std::unique_ptr<GrFragmentProcessor> inputFP,
+ GrRecordingContext*,
+ const GrColorInfo&,
+ const SkSurfaceProps&) const override;
+#endif
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext&,
+ skgpu::graphite::PaintParamsKeyBuilder*,
+ skgpu::graphite::PipelineDataGatherer*) const override;
+#endif
+
+private:
+ friend void ::SkRegisterModeColorFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkModeColorFilter)
+
+ void flatten(SkWriteBuffer&) const override;
+ bool onAsAColorMode(SkColor*, SkBlendMode*) const override;
+
+ skvm::Color onProgram(skvm::Builder*, skvm::Color,
+ const SkColorInfo&, skvm::Uniforms*, SkArenaAlloc*) const override;
+
+ SkColor4f fColor; // always stored in sRGB
+ SkBlendMode fMode;
+};
+
+SkModeColorFilter::SkModeColorFilter(const SkColor4f& color,
+ SkBlendMode mode)
+ : fColor(color)
+ , fMode(mode) {}
+
+bool SkModeColorFilter::onAsAColorMode(SkColor* color, SkBlendMode* mode) const {
+ if (color) {
+ *color = fColor.toSkColor();
+ }
+ if (mode) {
+ *mode = fMode;
+ }
+ return true;
+}
+
+bool SkModeColorFilter::onIsAlphaUnchanged() const {
+ switch (fMode) {
+ case SkBlendMode::kDst: //!< [Da, Dc]
+ case SkBlendMode::kSrcATop: //!< [Da, Sc * Da + (1 - Sa) * Dc]
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+void SkModeColorFilter::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeColor4f(fColor);
+ buffer.writeUInt((int) fMode);
+}
+
+sk_sp<SkFlattenable> SkModeColorFilter::CreateProc(SkReadBuffer& buffer) {
+ if (buffer.isVersionLT(SkPicturePriv::kBlend4fColorFilter)) {
+ // Color is 8-bit, sRGB
+ SkColor color = buffer.readColor();
+ SkBlendMode mode = (SkBlendMode)buffer.readUInt();
+ return SkColorFilters::Blend(SkColor4f::FromColor(color), /*sRGB*/nullptr, mode);
+ } else {
+ // Color is 32-bit, sRGB
+ SkColor4f color;
+ buffer.readColor4f(&color);
+ SkBlendMode mode = (SkBlendMode)buffer.readUInt();
+ return SkColorFilters::Blend(color, /*sRGB*/nullptr, mode);
+ }
+}
+
+bool SkModeColorFilter::appendStages(const SkStageRec& rec, bool shaderIsOpaque) const {
+ rec.fPipeline->append(SkRasterPipelineOp::move_src_dst);
+ SkPMColor4f color = map_color(fColor, sk_srgb_singleton(), rec.fDstCS);
+ rec.fPipeline->append_constant_color(rec.fAlloc, color.vec());
+ SkBlendMode_AppendStages(fMode, rec.fPipeline);
+ return true;
+}
+
+skvm::Color SkModeColorFilter::onProgram(skvm::Builder* p, skvm::Color c,
+ const SkColorInfo& dstInfo,
+ skvm::Uniforms* uniforms, SkArenaAlloc*) const {
+ SkPMColor4f color = map_color(fColor, sk_srgb_singleton(), dstInfo.colorSpace());
+ // The blend program operates on this as if it were premul but the API takes an SkColor4f
+ skvm::Color dst = c,
+ src = p->uniformColor({color.fR, color.fG, color.fB, color.fA}, uniforms);
+ return p->blend(fMode, src,dst);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+#if defined(SK_GANESH)
+#include "src/gpu/Blend.h"
+#include "src/gpu/ganesh/GrColorInfo.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/SkGr.h"
+#include "src/gpu/ganesh/effects/GrBlendFragmentProcessor.h"
+
+GrFPResult SkModeColorFilter::asFragmentProcessor(std::unique_ptr<GrFragmentProcessor> inputFP,
+ GrRecordingContext*,
+ const GrColorInfo& dstColorInfo,
+ const SkSurfaceProps& props) const {
+ if (fMode == SkBlendMode::kDst) {
+ // If the blend mode is "dest," the blend color won't factor into it at all.
+ // We can return the input FP as-is.
+ return GrFPSuccess(std::move(inputFP));
+ }
+
+ SkDEBUGCODE(const bool fpHasConstIO = !inputFP || inputFP->hasConstantOutputForConstantInput();)
+
+ SkPMColor4f color = map_color(fColor, sk_srgb_singleton(), dstColorInfo.colorSpace());
+
+ auto colorFP = GrFragmentProcessor::MakeColor(color);
+ auto xferFP = GrBlendFragmentProcessor::Make(std::move(colorFP), std::move(inputFP), fMode);
+
+ if (xferFP == nullptr) {
+ // This is only expected to happen if the blend mode is "dest" and the input FP is null.
+ // Since we already did an early-out in the "dest" blend mode case, we shouldn't get here.
+ SkDEBUGFAIL("GrBlendFragmentProcessor::Make returned null unexpectedly");
+ return GrFPFailure(nullptr);
+ }
+
+ // With a solid color input this should always be able to compute the blended color
+ // (at least for coeff modes).
+ // Occasionally, we even do better than we started; specifically, in "src" blend mode, we end up
+ // ditching the input FP entirely, which turns a non-constant operation into a constant one.
+ SkASSERT(fMode > SkBlendMode::kLastCoeffMode ||
+ xferFP->hasConstantOutputForConstantInput() >= fpHasConstIO);
+
+ return GrFPSuccess(std::move(xferFP));
+}
+
+#endif
+
+#if defined(SK_GRAPHITE)
+void SkModeColorFilter::addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const {
+ using namespace skgpu::graphite;
+
+ SkPMColor4f color = map_color(fColor, sk_srgb_singleton(),
+ keyContext.dstColorInfo().colorSpace());
+ BlendColorFilterBlock::BlendColorFilterData data(fMode, color);
+
+ BlendColorFilterBlock::BeginBlock(keyContext, builder, gatherer, &data);
+ builder->endBlock();
+}
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkColorFilter> SkColorFilters::Blend(const SkColor4f& color,
+ sk_sp<SkColorSpace> colorSpace,
+ SkBlendMode mode) {
+ if (!SkIsValidMode(mode)) {
+ return nullptr;
+ }
+
+ // First map to sRGB to simplify storage in the actual SkColorFilter instance, staying unpremul
+ // until the final dst color space is known when actually filtering.
+ SkColor4f srgb = map_color<kUnpremul_SkAlphaType>(
+ color, colorSpace.get(), sk_srgb_singleton());
+
+ // Next collapse some modes if possible
+ float alpha = srgb.fA;
+ if (SkBlendMode::kClear == mode) {
+ srgb = SkColors::kTransparent;
+ mode = SkBlendMode::kSrc;
+ } else if (SkBlendMode::kSrcOver == mode) {
+ if (0.f == alpha) {
+ mode = SkBlendMode::kDst;
+ } else if (1.f == alpha) {
+ mode = SkBlendMode::kSrc;
+ }
+ // else just stay srcover
+ }
+
+ // Finally weed out combinations that are noops, and just return null
+ if (SkBlendMode::kDst == mode ||
+ (0.f == alpha && (SkBlendMode::kSrcOver == mode ||
+ SkBlendMode::kDstOver == mode ||
+ SkBlendMode::kDstOut == mode ||
+ SkBlendMode::kSrcATop == mode ||
+ SkBlendMode::kXor == mode ||
+ SkBlendMode::kDarken == mode)) ||
+ (1.f == alpha && SkBlendMode::kDstIn == mode)) {
+ return nullptr;
+ }
+
+ return sk_sp<SkColorFilter>(new SkModeColorFilter(srgb, mode));
+}
+
+sk_sp<SkColorFilter> SkColorFilters::Blend(SkColor color, SkBlendMode mode) {
+ return Blend(SkColor4f::FromColor(color), /*sRGB*/nullptr, mode);
+}
+
+void SkRegisterModeColorFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkModeColorFilter);
+}
diff --git a/gfx/skia/skia/src/core/SkNextID.h b/gfx/skia/skia/src/core/SkNextID.h
new file mode 100644
index 0000000000..395c9a27a6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkNextID.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNextID_DEFINED
+#define SkNextID_DEFINED
+
+#include "include/core/SkTypes.h"
+
+class SkNextID {
+public:
+ /**
+ * Shared between SkPixelRef's generationID and SkImage's uniqueID
+ */
+ static uint32_t ImageID();
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkOSFile.h b/gfx/skia/skia/src/core/SkOSFile.h
new file mode 100644
index 0000000000..2f9eccf193
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkOSFile.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+// TODO: add unittests for all these operations
+
+#ifndef SkOSFile_DEFINED
+#define SkOSFile_DEFINED
+
+#include <stdio.h>
+
+#include "include/core/SkString.h"
+#include "include/private/base/SkTemplates.h"
+
+enum SkFILE_Flags {
+ kRead_SkFILE_Flag = 0x01,
+ kWrite_SkFILE_Flag = 0x02
+};
+
+FILE* sk_fopen(const char path[], SkFILE_Flags);
+void sk_fclose(FILE*);
+
+size_t sk_fgetsize(FILE*);
+
+size_t sk_fwrite(const void* buffer, size_t byteCount, FILE*);
+
+void sk_fflush(FILE*);
+void sk_fsync(FILE*);
+
+size_t sk_ftell(FILE*);
+
+/** Maps a file into memory. Returns the address and length on success, NULL otherwise.
+ * The mapping is read only.
+ * When finished with the mapping, free the returned pointer with sk_fmunmap.
+ */
+void* sk_fmmap(FILE* f, size_t* length);
+
+/** Maps a file descriptor into memory. Returns the address and length on success, NULL otherwise.
+ * The mapping is read only.
+ * When finished with the mapping, free the returned pointer with sk_fmunmap.
+ */
+void* sk_fdmmap(int fd, size_t* length);
+
+/** Unmaps a file previously mapped by sk_fmmap or sk_fdmmap.
+ * The length parameter must be the same as returned from sk_fmmap.
+ */
+void sk_fmunmap(const void* addr, size_t length);
+
+/** Returns true if the two point at the exact same filesystem object. */
+bool sk_fidentical(FILE* a, FILE* b);
+
+/** Returns the underlying file descriptor for the given file.
+ * The return value will be < 0 on failure.
+ */
+int sk_fileno(FILE* f);
+
+/** Returns true if something (file, directory, ???) exists at this path,
+ * and has the specified access flags.
+ */
+bool sk_exists(const char *path, SkFILE_Flags = (SkFILE_Flags)0);
+
+// Returns true if a directory exists at this path.
+bool sk_isdir(const char *path);
+
+// Like pread, but may affect the file position marker.
+// Returns the number of bytes read or SIZE_MAX if failed.
+size_t sk_qread(FILE*, void* buffer, size_t count, size_t offset);
+
+
+// Create a new directory at this path; returns true if successful.
+// If the directory already existed, this will return true.
+// Description of the error, if any, will be written to stderr.
+bool sk_mkdir(const char* path);
+
+class SkOSFile {
+public:
+ class Iter {
+ public:
+ // SPI for module use.
+ SK_SPI Iter();
+ SK_SPI Iter(const char path[], const char suffix[] = nullptr);
+ SK_SPI ~Iter();
+
+ SK_SPI void reset(const char path[], const char suffix[] = nullptr);
+ /** If getDir is true, only returns directories.
+ Results are undefined if true and false calls are
+ interleaved on a single iterator.
+ */
+ SK_SPI bool next(SkString* name, bool getDir = false);
+
+ static const size_t kStorageSize = 40;
+ private:
+ alignas(void*) alignas(double) char fSelf[kStorageSize];
+ };
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkOpts.cpp b/gfx/skia/skia/src/core/SkOpts.cpp
new file mode 100644
index 0000000000..1c329ca647
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkOpts.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/base/SkOnce.h"
+#include "src/base/SkHalf.h"
+#include "src/core/SkCpu.h"
+#include "src/core/SkOpts.h"
+
+#if defined(SK_ARM_HAS_NEON)
+ #if defined(SK_ARM_HAS_CRC32)
+ #define SK_OPTS_NS neon_and_crc32
+ #else
+ #define SK_OPTS_NS neon
+ #endif
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SKX
+ #define SK_OPTS_NS skx
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ #define SK_OPTS_NS avx2
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX
+ #define SK_OPTS_NS avx
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE42
+ #define SK_OPTS_NS sse42
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ #define SK_OPTS_NS sse41
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ #define SK_OPTS_NS ssse3
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE3
+ #define SK_OPTS_NS sse3
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #define SK_OPTS_NS sse2
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1
+ #define SK_OPTS_NS sse
+#else
+ #define SK_OPTS_NS portable
+#endif
+
+#include "src/core/SkCubicSolver.h"
+#include "src/opts/SkBitmapProcState_opts.h"
+#include "src/opts/SkBlitMask_opts.h"
+#include "src/opts/SkBlitRow_opts.h"
+#include "src/opts/SkChecksum_opts.h"
+#include "src/opts/SkRasterPipeline_opts.h"
+#include "src/opts/SkSwizzler_opts.h"
+#include "src/opts/SkUtils_opts.h"
+#include "src/opts/SkVM_opts.h"
+#include "src/opts/SkXfermode_opts.h"
+
+namespace SkOpts {
+ // Define default function pointer values here...
+ // If our global compile options are set high enough, these defaults might even be
+ // CPU-specialized, e.g. a typical x86-64 machine might start with SSE2 defaults.
+ // They'll still get a chance to be replaced with even better ones, e.g. using SSE4.1.
+#define DEFINE_DEFAULT(name) decltype(name) name = SK_OPTS_NS::name
+ DEFINE_DEFAULT(create_xfermode);
+
+ DEFINE_DEFAULT(blit_mask_d32_a8);
+
+ DEFINE_DEFAULT(blit_row_color32);
+ DEFINE_DEFAULT(blit_row_s32a_opaque);
+
+ DEFINE_DEFAULT(RGBA_to_BGRA);
+ DEFINE_DEFAULT(RGBA_to_rgbA);
+ DEFINE_DEFAULT(RGBA_to_bgrA);
+ DEFINE_DEFAULT(RGB_to_RGB1);
+ DEFINE_DEFAULT(RGB_to_BGR1);
+ DEFINE_DEFAULT(gray_to_RGB1);
+ DEFINE_DEFAULT(grayA_to_RGBA);
+ DEFINE_DEFAULT(grayA_to_rgbA);
+ DEFINE_DEFAULT(inverted_CMYK_to_RGB1);
+ DEFINE_DEFAULT(inverted_CMYK_to_BGR1);
+
+ DEFINE_DEFAULT(memset16);
+ DEFINE_DEFAULT(memset32);
+ DEFINE_DEFAULT(memset64);
+
+ DEFINE_DEFAULT(rect_memset16);
+ DEFINE_DEFAULT(rect_memset32);
+ DEFINE_DEFAULT(rect_memset64);
+
+ DEFINE_DEFAULT(cubic_solver);
+
+ DEFINE_DEFAULT(hash_fn);
+
+ DEFINE_DEFAULT(S32_alpha_D32_filter_DX);
+ DEFINE_DEFAULT(S32_alpha_D32_filter_DXDY);
+
+ DEFINE_DEFAULT(interpret_skvm);
+#undef DEFINE_DEFAULT
+
+ size_t raster_pipeline_lowp_stride = SK_OPTS_NS::raster_pipeline_lowp_stride();
+ size_t raster_pipeline_highp_stride = SK_OPTS_NS::raster_pipeline_highp_stride();
+
+#define M(st) (StageFn)SK_OPTS_NS::st,
+ StageFn ops_highp[] = { SK_RASTER_PIPELINE_OPS_ALL(M) };
+ StageFn just_return_highp = (StageFn)SK_OPTS_NS::just_return;
+ void (*start_pipeline_highp)(size_t, size_t, size_t, size_t, SkRasterPipelineStage*) =
+ SK_OPTS_NS::start_pipeline;
+#undef M
+
+#define M(st) (StageFn)SK_OPTS_NS::lowp::st,
+ StageFn ops_lowp[] = { SK_RASTER_PIPELINE_OPS_LOWP(M) };
+ StageFn just_return_lowp = (StageFn)SK_OPTS_NS::lowp::just_return;
+ void (*start_pipeline_lowp)(size_t, size_t, size_t, size_t, SkRasterPipelineStage*) =
+ SK_OPTS_NS::lowp::start_pipeline;
+#undef M
+
+ // Each Init_foo() is defined in src/opts/SkOpts_foo.cpp.
+ void Init_ssse3();
+ void Init_sse42();
+ void Init_avx();
+ void Init_hsw();
+ void Init_skx();
+ void Init_erms();
+ void Init_crc32();
+
+ static void init() {
+ #if defined(SK_ENABLE_OPTIMIZE_SIZE)
+ // All Init_foo functions are omitted when optimizing for size
+ #elif defined(SK_CPU_X86)
+ #if SK_CPU_SSE_LEVEL < SK_CPU_SSE_LEVEL_SSSE3
+ if (SkCpu::Supports(SkCpu::SSSE3)) { Init_ssse3(); }
+ #endif
+
+ #if SK_CPU_SSE_LEVEL < SK_CPU_SSE_LEVEL_SSE42
+ if (SkCpu::Supports(SkCpu::SSE42)) { Init_sse42(); }
+ #endif
+
+ #if SK_CPU_SSE_LEVEL < SK_CPU_SSE_LEVEL_AVX
+ if (SkCpu::Supports(SkCpu::AVX)) { Init_avx(); }
+ if (SkCpu::Supports(SkCpu::HSW)) { Init_hsw(); }
+ #endif
+
+ #if SK_CPU_SSE_LEVEL < SK_CPU_SSE_LEVEL_SKX
+ if (SkCpu::Supports(SkCpu::SKX)) { Init_skx(); }
+ #endif
+
+ if (SkCpu::Supports(SkCpu::ERMS)) { Init_erms(); }
+
+ #elif defined(SK_CPU_ARM64)
+ if (SkCpu::Supports(SkCpu::CRC32)) { Init_crc32(); }
+
+ #endif
+ }
+
+ void Init() {
+ static SkOnce once;
+ once(init);
+ }
+} // namespace SkOpts
diff --git a/gfx/skia/skia/src/core/SkOpts.h b/gfx/skia/skia/src/core/SkOpts.h
new file mode 100644
index 0000000000..4df2a4f98c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkOpts.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOpts_DEFINED
+#define SkOpts_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkOpts_spi.h"
+#include "src/core/SkRasterPipelineOpList.h"
+#include "src/core/SkXfermodePriv.h"
+
+/**
+ * SkOpts (short for SkOptimizations) is a mechanism where we can ship with multiple implementations
+ * of a set of functions and dynamically choose the best one at runtime (e.g. the call to
+ * SkGraphics::Init(), which calls SkOpts::Init()) depending on the detected CPU features. This is
+ * also referred to as having "specializations" of a given function.
+ *
+ * For example, Skia might be compiled to support CPUs that only have the sse2 instruction set
+ * (https://en.wikipedia.org/wiki/X86_instruction_listings#SSE2_instructions)
+ * but may be run on a more modern CPU that supports sse42 instructions.
+ * (https://en.wikipedia.org/wiki/SSE4)
+ * SkOpts allow Skia to have two versions of a CRC32 checksum function, one that uses normal C++
+ * code (e.g. loops, bit operations, table lookups) and one that makes use of the _mm_crc32_u64
+ * intrinsic function which uses the SSE4.2 crc32 machine instruction under the hood. This hash
+ * function is declared here in the SkOpts namespace, and then the implementation (see SkOpts.cpp)
+ * is deferred to a function of the same name in the sse2:: namespace (the minimum Skia is compiled
+ * with) using DEFINE_DEFAULT.
+ *
+ * All implementations of this hash function are done in a header file file in //src/opts
+ * (e.g. //src/opts/SkChecksum_opts.h). ifdefs guard each of the implementations, such that only
+ * one implementation is possible for a given SK_CPU_SSE_LEVEL. This header will be compiled
+ * *multiple* times with a different SK_CPU_SSE_LEVEL each compilation.
+ *
+ * Each CPU instruction set that we want specializations for has a .cpp file in //src/opts which
+ * defines an Init() function that replaces the function pointers in the SkOpts namespace with the
+ * ones from the specialized namespace (e.g. sse42::). These .cpp files don't implement the
+ * specializations, they just refer to the specialization created in the header files (e.g.
+ * SkChecksum_opts.h).
+ *
+ * At compile time:
+ * - SkOpts.cpp is compiled with the minimum CPU level (e.g. SSE2). Because this
+ * file includes all the headers in //src/opts/, those headers add "the default implementation"
+ * of all their functions to the SK_OPTS_NS namespace (e.g. sse2::hash_fn).
+ * - Each of the specialized .cpp files in //src/opts/ are compiled with their respective
+ * compiler flags. Because the specialized .cpp file includes the headers that implement the
+ * functions using intrinsics or other CPU-specific code, those specialized functions end up
+ * in the specialized namespace, e.g. (sse42::hash_fn).
+ *
+ * At link time, the default implementations and all specializations of all SkOpts functions are
+ * included in the resulting library/binary file.
+ *
+ * At runtime, SkOpts::Init() will run the appropriate Init functions that the current CPU level
+ * supports specializations for (e.g. Init_sse42, Init_ssse3). Note multiple Init functions can
+ * be called as CPU instruction sets are typically super sets of older instruction sets
+ */
+
+struct SkBitmapProcState;
+struct SkRasterPipelineStage;
+namespace skvm {
+struct InterpreterInstruction;
+}
+namespace SkSL {
+class TraceHook;
+}
+
+namespace SkOpts {
+ // Call to replace pointers to portable functions with pointers to CPU-specific functions.
+ // Thread-safe and idempotent.
+ // Called by SkGraphics::Init().
+ void Init();
+
+ // Declare function pointers here...
+
+ // May return nullptr if we haven't specialized the given Mode.
+ extern SkXfermode* (*create_xfermode)(SkBlendMode);
+
+ extern void (*blit_mask_d32_a8)(SkPMColor*, size_t, const SkAlpha*, size_t, SkColor, int, int);
+ extern void (*blit_row_color32)(SkPMColor*, const SkPMColor*, int, SkPMColor);
+ extern void (*blit_row_s32a_opaque)(SkPMColor*, const SkPMColor*, int, U8CPU);
+
+ // Swizzle input into some sort of 8888 pixel, {premul,unpremul} x {rgba,bgra}.
+ typedef void (*Swizzle_8888_u32)(uint32_t*, const uint32_t*, int);
+ extern Swizzle_8888_u32 RGBA_to_BGRA, // i.e. just swap RB
+ RGBA_to_rgbA, // i.e. just premultiply
+ RGBA_to_bgrA, // i.e. swap RB and premultiply
+ inverted_CMYK_to_RGB1, // i.e. convert color space
+ inverted_CMYK_to_BGR1; // i.e. convert color space
+
+ typedef void (*Swizzle_8888_u8)(uint32_t*, const uint8_t*, int);
+ extern Swizzle_8888_u8 RGB_to_RGB1, // i.e. insert an opaque alpha
+ RGB_to_BGR1, // i.e. swap RB and insert an opaque alpha
+ gray_to_RGB1, // i.e. expand to color channels + an opaque alpha
+ grayA_to_RGBA, // i.e. expand to color channels
+ grayA_to_rgbA; // i.e. expand to color channels and premultiply
+
+ extern void (*memset16)(uint16_t[], uint16_t, int);
+ extern void (*memset32)(uint32_t[], uint32_t, int);
+ extern void (*memset64)(uint64_t[], uint64_t, int);
+
+ extern void (*rect_memset16)(uint16_t[], uint16_t, int, size_t, int);
+ extern void (*rect_memset32)(uint32_t[], uint32_t, int, size_t, int);
+ extern void (*rect_memset64)(uint64_t[], uint64_t, int, size_t, int);
+
+ extern float (*cubic_solver)(float, float, float, float);
+
+ static inline uint32_t hash(const void* data, size_t bytes, uint32_t seed=0) {
+ // hash_fn is defined in SkOpts_spi.h so it can be used by //modules
+ return hash_fn(data, bytes, seed);
+ }
+
+ // SkBitmapProcState optimized Shader, Sample, or Matrix procs.
+ extern void (*S32_alpha_D32_filter_DX)(const SkBitmapProcState&,
+ const uint32_t* xy, int count, SkPMColor*);
+ extern void (*S32_alpha_D32_filter_DXDY)(const SkBitmapProcState&,
+ const uint32_t* xy, int count, SkPMColor*);
+
+ // We can't necessarily express the type of SkRasterPipeline stage functions here,
+ // so we just use this void(*)(void) as a stand-in.
+ using StageFn = void(*)(void);
+ extern StageFn ops_highp[kNumRasterPipelineHighpOps], just_return_highp;
+ extern StageFn ops_lowp [kNumRasterPipelineLowpOps ], just_return_lowp;
+
+ extern void (*start_pipeline_highp)(size_t,size_t,size_t,size_t, SkRasterPipelineStage*);
+ extern void (*start_pipeline_lowp )(size_t,size_t,size_t,size_t, SkRasterPipelineStage*);
+
+ extern size_t raster_pipeline_lowp_stride;
+ extern size_t raster_pipeline_highp_stride;
+
+ extern void (*interpret_skvm)(const skvm::InterpreterInstruction insts[], int ninsts,
+ int nregs, int loop, const int strides[],
+ SkSL::TraceHook* traceHooks[], int nTraceHooks,
+ int nargs, int n, void* args[]);
+} // namespace SkOpts
+
+#endif // SkOpts_DEFINED
diff --git a/gfx/skia/skia/src/core/SkOpts_erms.cpp b/gfx/skia/skia/src/core/SkOpts_erms.cpp
new file mode 100644
index 0000000000..4e1e096d7d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkOpts_erms.cpp
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/base/SkMSAN.h"
+#include "src/core/SkOpts.h"
+
+#if defined(__x86_64__) || defined(_M_X64) // memset16 and memset32 could work on 32-bit x86 too.
+
+ static const char* note = "MSAN can't see that rep sto initializes memory.";
+
+ #if defined(_MSC_VER)
+ #include <intrin.h>
+ static inline void repsto(uint16_t* dst, uint16_t v, size_t n) {
+ sk_msan_mark_initialized(dst,dst+n,note);
+ __stosw(dst, v, n);
+ }
+ static inline void repsto(uint32_t* dst, uint32_t v, size_t n) {
+ sk_msan_mark_initialized(dst,dst+n,note);
+ static_assert(sizeof(uint32_t) == sizeof(unsigned long));
+ __stosd(reinterpret_cast<unsigned long*>(dst), v, n);
+ }
+ static inline void repsto(uint64_t* dst, uint64_t v, size_t n) {
+ sk_msan_mark_initialized(dst,dst+n,note);
+ __stosq(dst, v, n);
+ }
+ #else
+ static inline void repsto(uint16_t* dst, uint16_t v, size_t n) {
+ sk_msan_mark_initialized(dst,dst+n,note);
+ asm volatile("rep stosw" : "+D"(dst), "+c"(n) : "a"(v) : "memory");
+ }
+ static inline void repsto(uint32_t* dst, uint32_t v, size_t n) {
+ sk_msan_mark_initialized(dst,dst+n,note);
+ asm volatile("rep stosl" : "+D"(dst), "+c"(n) : "a"(v) : "memory");
+ }
+ static inline void repsto(uint64_t* dst, uint64_t v, size_t n) {
+ sk_msan_mark_initialized(dst,dst+n,note);
+ asm volatile("rep stosq" : "+D"(dst), "+c"(n) : "a"(v) : "memory");
+ }
+ #endif
+
+ // ERMS is ideal for large copies but has a relatively high setup cost,
+ // so we use the previous best routine for small inputs. FSRM would make this moot.
+ static void (*g_memset16_prev)(uint16_t*, uint16_t, int);
+ static void (*g_memset32_prev)(uint32_t*, uint32_t, int);
+ static void (*g_memset64_prev)(uint64_t*, uint64_t, int);
+ static void (*g_rect_memset16_prev)(uint16_t*, uint16_t, int, size_t, int);
+ static void (*g_rect_memset32_prev)(uint32_t*, uint32_t, int, size_t, int);
+ static void (*g_rect_memset64_prev)(uint64_t*, uint64_t, int, size_t, int);
+
+ // Empirically determined with `nanobench -m memset`.
+ static bool small(size_t bytes) { return bytes < 1024; }
+
+ #define SK_OPTS_NS erms
+ namespace SK_OPTS_NS {
+ static inline void memset16(uint16_t* dst, uint16_t v, int n) {
+ return small(sizeof(v)*n) ? g_memset16_prev(dst, v, n)
+ : repsto(dst, v, n);
+ }
+ static inline void memset32(uint32_t* dst, uint32_t v, int n) {
+ return small(sizeof(v)*n) ? g_memset32_prev(dst, v, n)
+ : repsto(dst, v, n);
+ }
+ static inline void memset64(uint64_t* dst, uint64_t v, int n) {
+ return small(sizeof(v)*n) ? g_memset64_prev(dst, v, n)
+ : repsto(dst, v, n);
+ }
+
+ static inline void rect_memset16(uint16_t* dst, uint16_t v, int n,
+ size_t rowBytes, int height) {
+ if (small(sizeof(v)*n)) {
+ return g_rect_memset16_prev(dst,v,n, rowBytes,height);
+ }
+ for (int stride = rowBytes/sizeof(v); height --> 0; dst += stride) {
+ repsto(dst, v, n);
+ }
+ }
+ static inline void rect_memset32(uint32_t* dst, uint32_t v, int n,
+ size_t rowBytes, int height) {
+ if (small(sizeof(v)*n)) {
+ return g_rect_memset32_prev(dst,v,n, rowBytes,height);
+ }
+ for (int stride = rowBytes/sizeof(v); height --> 0; dst += stride) {
+ repsto(dst, v, n);
+ }
+ }
+ static inline void rect_memset64(uint64_t* dst, uint64_t v, int n,
+ size_t rowBytes, int height) {
+ if (small(sizeof(v)*n)) {
+ return g_rect_memset64_prev(dst,v,n, rowBytes,height);
+ }
+ for (int stride = rowBytes/sizeof(v); height --> 0; dst += stride) {
+ repsto(dst, v, n);
+ }
+ }
+ } // namespace SK_OPTS_NS
+
+ namespace SkOpts {
+ void Init_erms() {
+ g_memset16_prev = memset16;
+ g_memset32_prev = memset32;
+ g_memset64_prev = memset64;
+ g_rect_memset16_prev = rect_memset16;
+ g_rect_memset32_prev = rect_memset32;
+ g_rect_memset64_prev = rect_memset64;
+
+ memset16 = SK_OPTS_NS::memset16;
+ memset32 = SK_OPTS_NS::memset32;
+ memset64 = SK_OPTS_NS::memset64;
+ rect_memset16 = SK_OPTS_NS::rect_memset16;
+ rect_memset32 = SK_OPTS_NS::rect_memset32;
+ rect_memset64 = SK_OPTS_NS::rect_memset64;
+ }
+ }
+#else
+ namespace SkOpts {
+ void Init_erms() {}
+ }
+#endif
diff --git a/gfx/skia/skia/src/core/SkOrderedReadBuffer.h b/gfx/skia/skia/src/core/SkOrderedReadBuffer.h
new file mode 100644
index 0000000000..239d8b68c2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkOrderedReadBuffer.h
@@ -0,0 +1,9 @@
+// Temporary shim to keep a couple dependencies working in Chromium.
+#ifndef SkOrderedReadBuffer_DEFINED
+#define SkOrderedReadBuffer_DEFINED
+
+#include "src/core/SkReadBuffer.h"
+
+typedef SkReadBuffer SkOrderedReadBuffer;
+
+#endif//SkOrderedReadBuffer_DEFINED
diff --git a/gfx/skia/skia/src/core/SkOverdrawCanvas.cpp b/gfx/skia/skia/src/core/SkOverdrawCanvas.cpp
new file mode 100644
index 0000000000..621ff4b87b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkOverdrawCanvas.cpp
@@ -0,0 +1,259 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkOverdrawCanvas.h"
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkDrawable.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkRSXform.h"
+#include "include/core/SkTextBlob.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkDrawShadowInfo.h"
+#include "src/core/SkGlyphRunPainter.h"
+#include "src/core/SkImagePriv.h"
+#include "src/core/SkLatticeIter.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkTextBlobPriv.h"
+#include "src/text/GlyphRun.h"
+#include "src/utils/SkPatchUtils.h"
+
+SkOverdrawCanvas::SkOverdrawCanvas(SkCanvas* canvas)
+ : INHERITED(canvas->onImageInfo().width(), canvas->onImageInfo().height())
+{
+ // Non-drawing calls that SkOverdrawCanvas does not override (translate, save, etc.)
+ // will pass through to the input canvas.
+ this->addCanvas(canvas);
+
+ static constexpr float kIncrementAlpha[] = {
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f, 1.0f/255,
+ };
+
+ fPaint.setAntiAlias(false);
+ fPaint.setBlendMode(SkBlendMode::kPlus);
+ fPaint.setColorFilter(SkColorFilters::Matrix(kIncrementAlpha));
+}
+
+namespace {
+class TextDevice : public SkNoPixelsDevice, public SkGlyphRunListPainterCPU::BitmapDevicePainter {
+public:
+ TextDevice(SkCanvas* overdrawCanvas, const SkSurfaceProps& props)
+ : SkNoPixelsDevice{SkIRect::MakeWH(32767, 32767), props},
+ fOverdrawCanvas{overdrawCanvas},
+ fPainter{props, kN32_SkColorType, nullptr} {}
+
+ void paintMasks(SkZip<const SkGlyph*, SkPoint> accepted, const SkPaint& paint) const override {
+ for (auto [glyph, pos] : accepted) {
+ SkMask mask = glyph->mask(pos);
+ // We need to ignore any matrix on the overdraw canvas (it's already been baked into
+ // our glyph positions). Otherwise, the CTM is double-applied. (skbug.com/13732)
+ fOverdrawCanvas->save();
+ fOverdrawCanvas->resetMatrix();
+ fOverdrawCanvas->drawRect(SkRect::Make(mask.fBounds), SkPaint());
+ fOverdrawCanvas->restore();
+ }
+ }
+
+ void drawBitmap(const SkBitmap&, const SkMatrix&, const SkRect* dstOrNull,
+ const SkSamplingOptions&, const SkPaint&) const override {}
+
+ void onDrawGlyphRunList(SkCanvas* canvas,
+ const sktext::GlyphRunList& glyphRunList,
+ const SkPaint& initialPaint,
+ const SkPaint& drawingPaint) override {
+ SkASSERT(!glyphRunList.hasRSXForm());
+ fPainter.drawForBitmapDevice(canvas, this, glyphRunList, drawingPaint,
+ fOverdrawCanvas->getTotalMatrix());
+ }
+
+private:
+ SkCanvas* const fOverdrawCanvas;
+ SkGlyphRunListPainterCPU fPainter;
+};
+} // namespace
+
+void SkOverdrawCanvas::onDrawTextBlob(
+ const SkTextBlob* blob, SkScalar x, SkScalar y, const SkPaint& paint) {
+ sktext::GlyphRunBuilder b;
+ auto glyphRunList = b.blobToGlyphRunList(*blob, {x, y});
+ this->onDrawGlyphRunList(glyphRunList, paint);
+}
+
+void SkOverdrawCanvas::onDrawGlyphRunList(
+ const sktext::GlyphRunList& glyphRunList,
+ const SkPaint& paint) {
+ SkSurfaceProps props{0, kUnknown_SkPixelGeometry};
+ this->getProps(&props);
+ TextDevice device{this, props};
+
+ device.drawGlyphRunList(this, glyphRunList, paint, paint);
+}
+
+void SkOverdrawCanvas::onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode blendMode,
+ const SkPaint&) {
+ fList[0]->onDrawPatch(cubics, colors, texCoords, blendMode, fPaint);
+}
+
+void SkOverdrawCanvas::onDrawPaint(const SkPaint& paint) {
+ if (0 == paint.getColor() && !paint.getColorFilter() && !paint.getShader()) {
+ // This is a clear, ignore it.
+ } else {
+ fList[0]->onDrawPaint(this->overdrawPaint(paint));
+ }
+}
+
+void SkOverdrawCanvas::onDrawBehind(const SkPaint& paint) {
+ fList[0]->onDrawBehind(this->overdrawPaint(paint));
+}
+
+void SkOverdrawCanvas::onDrawRect(const SkRect& rect, const SkPaint& paint) {
+ fList[0]->onDrawRect(rect, this->overdrawPaint(paint));
+}
+
+void SkOverdrawCanvas::onDrawRegion(const SkRegion& region, const SkPaint& paint) {
+ fList[0]->onDrawRegion(region, this->overdrawPaint(paint));
+}
+
+void SkOverdrawCanvas::onDrawOval(const SkRect& oval, const SkPaint& paint) {
+ fList[0]->onDrawOval(oval, this->overdrawPaint(paint));
+}
+
+void SkOverdrawCanvas::onDrawArc(const SkRect& arc, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint) {
+ fList[0]->onDrawArc(arc, startAngle, sweepAngle, useCenter, this->overdrawPaint(paint));
+}
+
+void SkOverdrawCanvas::onDrawDRRect(const SkRRect& outer, const SkRRect& inner,
+ const SkPaint& paint) {
+ fList[0]->onDrawDRRect(outer, inner, this->overdrawPaint(paint));
+}
+
+void SkOverdrawCanvas::onDrawRRect(const SkRRect& rect, const SkPaint& paint) {
+ fList[0]->onDrawRRect(rect, this->overdrawPaint(paint));
+}
+
+void SkOverdrawCanvas::onDrawPoints(PointMode mode, size_t count, const SkPoint points[],
+ const SkPaint& paint) {
+ fList[0]->onDrawPoints(mode, count, points, this->overdrawPaint(paint));
+}
+
+void SkOverdrawCanvas::onDrawVerticesObject(const SkVertices* vertices,
+ SkBlendMode blendMode, const SkPaint& paint) {
+ fList[0]->onDrawVerticesObject(vertices, blendMode, this->overdrawPaint(paint));
+}
+
+void SkOverdrawCanvas::onDrawAtlas2(const SkImage* image, const SkRSXform xform[],
+ const SkRect texs[], const SkColor colors[], int count,
+ SkBlendMode mode, const SkSamplingOptions& sampling,
+ const SkRect* cull, const SkPaint* paint) {
+ SkPaint* paintPtr = &fPaint;
+ SkPaint storage;
+ if (paint) {
+ storage = this->overdrawPaint(*paint);
+ paintPtr = &storage;
+ }
+
+ fList[0]->onDrawAtlas2(image, xform, texs, colors, count, mode, sampling, cull, paintPtr);
+}
+
+void SkOverdrawCanvas::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ fList[0]->onDrawPath(path, fPaint);
+}
+
+void SkOverdrawCanvas::onDrawImage2(const SkImage* image, SkScalar x, SkScalar y,
+ const SkSamplingOptions&, const SkPaint*) {
+ fList[0]->onDrawRect(SkRect::MakeXYWH(x, y, image->width(), image->height()), fPaint);
+}
+
+void SkOverdrawCanvas::onDrawImageRect2(const SkImage* image, const SkRect& src, const SkRect& dst,
+ const SkSamplingOptions&, const SkPaint*, SrcRectConstraint) {
+ fList[0]->onDrawRect(dst, fPaint);
+}
+
+void SkOverdrawCanvas::onDrawImageLattice2(const SkImage* image, const Lattice& lattice,
+ const SkRect& dst, SkFilterMode, const SkPaint*) {
+ SkIRect bounds;
+ Lattice latticePlusBounds = lattice;
+ if (!latticePlusBounds.fBounds) {
+ bounds = SkIRect::MakeWH(image->width(), image->height());
+ latticePlusBounds.fBounds = &bounds;
+ }
+
+ if (SkLatticeIter::Valid(image->width(), image->height(), latticePlusBounds)) {
+ SkLatticeIter iter(latticePlusBounds, dst);
+
+ SkRect ignored, iterDst;
+ while (iter.next(&ignored, &iterDst)) {
+ fList[0]->onDrawRect(iterDst, fPaint);
+ }
+ } else {
+ fList[0]->onDrawRect(dst, fPaint);
+ }
+}
+
+void SkOverdrawCanvas::onDrawDrawable(SkDrawable* drawable, const SkMatrix* matrix) {
+ drawable->draw(this, matrix);
+}
+
+void SkOverdrawCanvas::onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) {
+ SkASSERT(false);
+ return;
+}
+
+void SkOverdrawCanvas::onDrawAnnotation(const SkRect&, const char[], SkData*) {}
+
+void SkOverdrawCanvas::onDrawShadowRec(const SkPath& path, const SkDrawShadowRec& rec) {
+ SkRect bounds;
+ SkDrawShadowMetrics::GetLocalBounds(path, rec, this->getTotalMatrix(), &bounds);
+ fList[0]->onDrawRect(bounds, fPaint);
+}
+
+void SkOverdrawCanvas::onDrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4],
+ QuadAAFlags aa, const SkColor4f& color, SkBlendMode mode) {
+ if (clip) {
+ fList[0]->onDrawPath(SkPath::Polygon(clip, 4, true), fPaint);
+ } else {
+ fList[0]->onDrawRect(rect, fPaint);
+ }
+}
+
+void SkOverdrawCanvas::onDrawEdgeAAImageSet2(const ImageSetEntry set[], int count,
+ const SkPoint dstClips[],
+ const SkMatrix preViewMatrices[],
+ const SkSamplingOptions& sampling,
+ const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ int clipIndex = 0;
+ for (int i = 0; i < count; ++i) {
+ if (set[i].fMatrixIndex >= 0) {
+ fList[0]->save();
+ fList[0]->concat(preViewMatrices[set[i].fMatrixIndex]);
+ }
+ if (set[i].fHasClip) {
+ fList[0]->onDrawPath(SkPath::Polygon(dstClips + clipIndex, 4, true), fPaint);
+ clipIndex += 4;
+ } else {
+ fList[0]->onDrawRect(set[i].fDstRect, fPaint);
+ }
+ if (set[i].fMatrixIndex >= 0) {
+ fList[0]->restore();
+ }
+ }
+}
+
+inline SkPaint SkOverdrawCanvas::overdrawPaint(const SkPaint& paint) {
+ SkPaint newPaint = fPaint;
+ newPaint.setStyle(paint.getStyle());
+ newPaint.setStrokeWidth(paint.getStrokeWidth());
+ return newPaint;
+}
diff --git a/gfx/skia/skia/src/core/SkPaint.cpp b/gfx/skia/skia/src/core/SkPaint.cpp
new file mode 100644
index 0000000000..67f66bf295
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPaint.cpp
@@ -0,0 +1,294 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPaint.h"
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkBlender.h"
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/private/base/SkTPin.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkBlenderBase.h"
+#include "src/core/SkColorFilterBase.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkPaintDefaults.h"
+#include "src/core/SkPathEffectBase.h"
+
+#include <utility>
+
+// define this to get a printf for out-of-range parameter in setters
+// e.g. setTextSize(-1)
+//#define SK_REPORT_API_RANGE_CHECK
+
+
+SkPaint::SkPaint()
+ : fColor4f{0, 0, 0, 1} // opaque black
+ , fWidth{0}
+ , fMiterLimit{SkPaintDefaults_MiterLimit}
+ , fBitfields{(unsigned)false, // fAntiAlias
+ (unsigned)false, // fDither
+ (unsigned)SkPaint::kDefault_Cap, // fCapType
+ (unsigned)SkPaint::kDefault_Join, // fJoinType
+ (unsigned)SkPaint::kFill_Style, // fStyle
+ 0} // fPadding
+{
+ static_assert(sizeof(fBitfields) == sizeof(fBitfieldsUInt), "");
+}
+
+SkPaint::SkPaint(const SkColor4f& color, SkColorSpace* colorSpace) : SkPaint() {
+ this->setColor(color, colorSpace);
+}
+
+SkPaint::SkPaint(const SkPaint& src) = default;
+
+SkPaint::SkPaint(SkPaint&& src) = default;
+
+SkPaint::~SkPaint() = default;
+
+SkPaint& SkPaint::operator=(const SkPaint& src) = default;
+
+SkPaint& SkPaint::operator=(SkPaint&& src) = default;
+
+bool operator==(const SkPaint& a, const SkPaint& b) {
+#define EQUAL(field) (a.field == b.field)
+ return EQUAL(fPathEffect)
+ && EQUAL(fShader)
+ && EQUAL(fMaskFilter)
+ && EQUAL(fColorFilter)
+ && EQUAL(fBlender)
+ && EQUAL(fImageFilter)
+ && EQUAL(fColor4f)
+ && EQUAL(fWidth)
+ && EQUAL(fMiterLimit)
+ && EQUAL(fBitfieldsUInt)
+ ;
+#undef EQUAL
+}
+
+#define DEFINE_FIELD_REF(type) \
+ sk_sp<Sk##type> SkPaint::ref##type() const { return f##type; }
+DEFINE_FIELD_REF(ColorFilter)
+DEFINE_FIELD_REF(Blender)
+DEFINE_FIELD_REF(ImageFilter)
+DEFINE_FIELD_REF(MaskFilter)
+DEFINE_FIELD_REF(PathEffect)
+DEFINE_FIELD_REF(Shader)
+#undef DEFINE_FIELD_REF
+
+#define DEFINE_FIELD_SET(Field) \
+ void SkPaint::set##Field(sk_sp<Sk##Field> f) { f##Field = std::move(f); }
+DEFINE_FIELD_SET(ColorFilter)
+DEFINE_FIELD_SET(ImageFilter)
+DEFINE_FIELD_SET(MaskFilter)
+DEFINE_FIELD_SET(PathEffect)
+DEFINE_FIELD_SET(Shader)
+#undef DEFINE_FIELD_SET
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkPaint::reset() { *this = SkPaint(); }
+
+void SkPaint::setStyle(Style style) {
+ if ((unsigned)style < kStyleCount) {
+ fBitfields.fStyle = style;
+ } else {
+#ifdef SK_REPORT_API_RANGE_CHECK
+ SkDebugf("SkPaint::setStyle(%d) out of range\n", style);
+#endif
+ }
+}
+
+void SkPaint::setStroke(bool isStroke) {
+ fBitfields.fStyle = isStroke ? kStroke_Style : kFill_Style;
+}
+
+void SkPaint::setColor(SkColor color) {
+ fColor4f = SkColor4f::FromColor(color);
+}
+
+void SkPaint::setColor(const SkColor4f& color, SkColorSpace* colorSpace) {
+ SkColorSpaceXformSteps steps{colorSpace, kUnpremul_SkAlphaType,
+ sk_srgb_singleton(), kUnpremul_SkAlphaType};
+ fColor4f = {color.fR, color.fG, color.fB, SkTPin(color.fA, 0.0f, 1.0f)};
+ steps.apply(fColor4f.vec());
+}
+
+void SkPaint::setAlphaf(float a) {
+ fColor4f.fA = SkTPin(a, 0.0f, 1.0f);
+}
+
+void SkPaint::setARGB(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
+ this->setColor(SkColorSetARGB(a, r, g, b));
+}
+
+std::optional<SkBlendMode> SkPaint::asBlendMode() const {
+ return fBlender ? as_BB(fBlender)->asBlendMode()
+ : SkBlendMode::kSrcOver;
+}
+
+SkBlendMode SkPaint::getBlendMode_or(SkBlendMode defaultMode) const {
+ return this->asBlendMode().value_or(defaultMode);
+}
+
+bool SkPaint::isSrcOver() const {
+ return !fBlender || as_BB(fBlender)->asBlendMode() == SkBlendMode::kSrcOver;
+}
+
+void SkPaint::setBlendMode(SkBlendMode mode) {
+ this->setBlender(mode == SkBlendMode::kSrcOver ? nullptr : SkBlender::Mode(mode));
+}
+
+void SkPaint::setBlender(sk_sp<SkBlender> blender) {
+ fBlender = std::move(blender);
+}
+
+void SkPaint::setStrokeWidth(SkScalar width) {
+ if (width >= 0) {
+ fWidth = width;
+ } else {
+#ifdef SK_REPORT_API_RANGE_CHECK
+ SkDebugf("SkPaint::setStrokeWidth() called with negative value\n");
+#endif
+ }
+}
+
+void SkPaint::setStrokeMiter(SkScalar limit) {
+ if (limit >= 0) {
+ fMiterLimit = limit;
+ } else {
+#ifdef SK_REPORT_API_RANGE_CHECK
+ SkDebugf("SkPaint::setStrokeMiter() called with negative value\n");
+#endif
+ }
+}
+
+void SkPaint::setStrokeCap(Cap ct) {
+ if ((unsigned)ct < kCapCount) {
+ fBitfields.fCapType = SkToU8(ct);
+ } else {
+#ifdef SK_REPORT_API_RANGE_CHECK
+ SkDebugf("SkPaint::setStrokeCap(%d) out of range\n", ct);
+#endif
+ }
+}
+
+void SkPaint::setStrokeJoin(Join jt) {
+ if ((unsigned)jt < kJoinCount) {
+ fBitfields.fJoinType = SkToU8(jt);
+ } else {
+#ifdef SK_REPORT_API_RANGE_CHECK
+ SkDebugf("SkPaint::setStrokeJoin(%d) out of range\n", jt);
+#endif
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkPaint::canComputeFastBounds() const {
+ if (this->getImageFilter() && !this->getImageFilter()->canComputeFastBounds()) {
+ return false;
+ }
+ // Pass nullptr for the bounds to determine if they can be computed
+ if (this->getPathEffect() &&
+ !as_PEB(this->getPathEffect())->computeFastBounds(nullptr)) {
+ return false;
+ }
+ return true;
+}
+
+const SkRect& SkPaint::computeFastBounds(const SkRect& orig, SkRect* storage) const {
+ // Things like stroking, etc... will do math on the bounds rect, assuming that it's sorted.
+ SkASSERT(orig.isSorted());
+ SkPaint::Style style = this->getStyle();
+ // ultra fast-case: filling with no effects that affect geometry
+ if (kFill_Style == style) {
+ uintptr_t effects = 0;
+ effects |= reinterpret_cast<uintptr_t>(this->getMaskFilter());
+ effects |= reinterpret_cast<uintptr_t>(this->getPathEffect());
+ effects |= reinterpret_cast<uintptr_t>(this->getImageFilter());
+ if (!effects) {
+ return orig;
+ }
+ }
+
+ return this->doComputeFastBounds(orig, storage, style);
+}
+
+const SkRect& SkPaint::doComputeFastBounds(const SkRect& origSrc,
+ SkRect* storage,
+ Style style) const {
+ SkASSERT(storage);
+
+ const SkRect* src = &origSrc;
+
+ SkRect tmpSrc;
+ if (this->getPathEffect()) {
+ tmpSrc = origSrc;
+ SkAssertResult(as_PEB(this->getPathEffect())->computeFastBounds(&tmpSrc));
+ src = &tmpSrc;
+ }
+
+ SkScalar radius = SkStrokeRec::GetInflationRadius(*this, style);
+ *storage = src->makeOutset(radius, radius);
+
+ if (this->getMaskFilter()) {
+ as_MFB(this->getMaskFilter())->computeFastBounds(*storage, storage);
+ }
+
+ if (this->getImageFilter()) {
+ *storage = this->getImageFilter()->computeFastBounds(*storage);
+ }
+
+ return *storage;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// return true if the filter exists, and may affect alpha
+static bool affects_alpha(const SkColorFilter* cf) {
+ return cf && !as_CFB(cf)->isAlphaUnchanged();
+}
+
+// return true if the filter exists, and may affect alpha
+static bool affects_alpha(const SkImageFilter* imf) {
+ // TODO: check if we should allow imagefilters to broadcast that they don't affect alpha
+ // ala colorfilters
+ return imf != nullptr;
+}
+
+bool SkPaint::nothingToDraw() const {
+ auto bm = this->asBlendMode();
+ if (!bm) {
+ return false;
+ }
+ switch (bm.value()) {
+ case SkBlendMode::kSrcOver:
+ case SkBlendMode::kSrcATop:
+ case SkBlendMode::kDstOut:
+ case SkBlendMode::kDstOver:
+ case SkBlendMode::kPlus:
+ if (0 == this->getAlpha()) {
+ return !affects_alpha(fColorFilter.get()) && !affects_alpha(fImageFilter.get());
+ }
+ break;
+ case SkBlendMode::kDst:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
diff --git a/gfx/skia/skia/src/core/SkPaintDefaults.h b/gfx/skia/skia/src/core/SkPaintDefaults.h
new file mode 100644
index 0000000000..ce90fd1803
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPaintDefaults.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPaintDefaults_DEFINED
+#define SkPaintDefaults_DEFINED
+
+#include "include/core/SkFontTypes.h"
+
+/**
+ * Any of these can be specified by the build system (or SkUserConfig.h)
+ * to change the default values for a SkPaint. This file should not be
+ * edited directly.
+ */
+
+#ifndef SkPaintDefaults_TextSize
+ #define SkPaintDefaults_TextSize SkIntToScalar(12)
+#endif
+
+#ifndef SkPaintDefaults_Hinting
+ #define SkPaintDefaults_Hinting SkFontHinting::kNormal
+#endif
+
+#ifndef SkPaintDefaults_MiterLimit
+ #define SkPaintDefaults_MiterLimit SkIntToScalar(4)
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPaintPriv.cpp b/gfx/skia/skia/src/core/SkPaintPriv.cpp
new file mode 100644
index 0000000000..5dc4e67049
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPaintPriv.cpp
@@ -0,0 +1,274 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPaint.h"
+
+#include "src/core/SkBlenderBase.h"
+#include "src/core/SkColorFilterBase.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/core/SkPicturePriv.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSafeRange.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/core/SkXfermodePriv.h"
+#include "src/shaders/SkColorFilterShader.h"
+#include "src/shaders/SkShaderBase.h"
+
+static bool changes_alpha(const SkPaint& paint) {
+ SkColorFilter* cf = paint.getColorFilter();
+ return cf && !as_CFB(cf)->isAlphaUnchanged();
+}
+
+bool SkPaintPriv::Overwrites(const SkPaint* paint, ShaderOverrideOpacity overrideOpacity) {
+ if (!paint) {
+ // No paint means we default to SRC_OVER, so we overwrite iff our shader-override
+ // is opaque, or we don't have one.
+ return overrideOpacity != kNotOpaque_ShaderOverrideOpacity;
+ }
+
+ SkXfermode::SrcColorOpacity opacityType = SkXfermode::kUnknown_SrcColorOpacity;
+
+ if (!changes_alpha(*paint)) {
+ const unsigned paintAlpha = paint->getAlpha();
+ if (0xff == paintAlpha && overrideOpacity != kNotOpaque_ShaderOverrideOpacity &&
+ (!paint->getShader() || paint->getShader()->isOpaque()))
+ {
+ opacityType = SkXfermode::kOpaque_SrcColorOpacity;
+ } else if (0 == paintAlpha) {
+ if (overrideOpacity == kNone_ShaderOverrideOpacity && !paint->getShader()) {
+ opacityType = SkXfermode::kTransparentBlack_SrcColorOpacity;
+ } else {
+ opacityType = SkXfermode::kTransparentAlpha_SrcColorOpacity;
+ }
+ }
+ }
+
+ const auto bm = paint->asBlendMode();
+ if (!bm) {
+ return false; // don't know for sure, so we play it safe and return false.
+ }
+ return SkXfermode::IsOpaque(bm.value(), opacityType);
+}
+
+bool SkPaintPriv::ShouldDither(const SkPaint& p, SkColorType dstCT) {
+ // The paint dither flag can veto.
+ if (!p.isDither()) {
+ return false;
+ }
+
+ if (dstCT == kUnknown_SkColorType) {
+ return false;
+ }
+
+ // We always dither 565 or 4444 when requested.
+ if (dstCT == kRGB_565_SkColorType || dstCT == kARGB_4444_SkColorType) {
+ return true;
+ }
+
+ // Otherwise, dither is only needed for non-const paints.
+ return p.getImageFilter() || p.getMaskFilter() ||
+ (p.getShader() && !as_SB(p.getShader())->isConstant());
+}
+
+// return true if the paint is just a single color (i.e. not a shader). If its
+// a shader, then we can't compute a const luminance for it :(
+static bool just_a_color(const SkPaint& paint, SkColor* color) {
+ SkColor c = paint.getColor();
+
+ const auto* shader = as_SB(paint.getShader());
+ if (shader && !shader->asLuminanceColor(&c)) {
+ return false;
+ }
+ if (paint.getColorFilter()) {
+ c = paint.getColorFilter()->filterColor(c);
+ }
+ if (color) {
+ *color = c;
+ }
+ return true;
+}
+
+SkColor SkPaintPriv::ComputeLuminanceColor(const SkPaint& paint) {
+ SkColor c;
+ if (!just_a_color(paint, &c)) {
+ c = SkColorSetRGB(0x7F, 0x80, 0x7F);
+ }
+ return c;
+}
+
+void SkPaintPriv::RemoveColorFilter(SkPaint* p, SkColorSpace* dstCS) {
+ if (SkColorFilter* filter = p->getColorFilter()) {
+ if (SkShader* shader = p->getShader()) {
+ // SkColorFilterShader will modulate the shader color by paint alpha
+ // before applying the filter, so we'll reset it to opaque.
+ p->setShader(sk_make_sp<SkColorFilterShader>(sk_ref_sp(shader),
+ p->getAlphaf(),
+ sk_ref_sp(filter)));
+ p->setAlphaf(1.0f);
+ } else {
+ p->setColor(filter->filterColor4f(p->getColor4f(), sk_srgb_singleton(), dstCS), dstCS);
+ }
+ p->setColorFilter(nullptr);
+ }
+}
+
+#ifdef SK_DEBUG
+ static void ASSERT_FITS_IN(uint32_t value, int bitCount) {
+ SkASSERT(bitCount > 0 && bitCount <= 32);
+ uint32_t mask = ~0U;
+ mask >>= (32 - bitCount);
+ SkASSERT(0 == (value & ~mask));
+ }
+#else
+ #define ASSERT_FITS_IN(value, bitcount)
+#endif
+
+enum FlatFlags {
+ kHasTypeface_FlatFlag = 0x1,
+ kHasEffects_FlatFlag = 0x2,
+
+ kFlatFlagMask = 0x3,
+};
+
+// SkPaint originally defined flags, some of which now apply to SkFont. These are renames
+// of those flags, split into categories depending on which objects they (now) apply to.
+
+template <typename T> uint32_t shift_bits(T value, unsigned shift, unsigned bits) {
+ SkASSERT(shift + bits <= 32);
+ uint32_t v = static_cast<uint32_t>(value);
+ ASSERT_FITS_IN(v, bits);
+ return v << shift;
+}
+
+constexpr uint8_t CUSTOM_BLEND_MODE_SENTINEL = 0xFF;
+
+/* Packing the paint
+ flags : 8 // 2...
+ blend : 8 // 30+
+ cap : 2 // 3
+ join : 2 // 3
+ style : 2 // 3
+ filter: 2 // 4
+ flat : 8 // 1...
+ total : 32
+ */
+static uint32_t pack_v68(const SkPaint& paint, unsigned flatFlags) {
+ uint32_t packed = 0;
+ const auto bm = paint.asBlendMode();
+ const unsigned mode = bm ? static_cast<unsigned>(bm.value())
+ : CUSTOM_BLEND_MODE_SENTINEL;
+
+ packed |= shift_bits(((unsigned)paint.isDither() << 1) |
+ (unsigned)paint.isAntiAlias(), 0, 8);
+ packed |= shift_bits(mode, 8, 8);
+ packed |= shift_bits(paint.getStrokeCap(), 16, 2);
+ packed |= shift_bits(paint.getStrokeJoin(), 18, 2);
+ packed |= shift_bits(paint.getStyle(), 20, 2);
+ packed |= shift_bits(0, 22, 2); // was filterquality
+ packed |= shift_bits(flatFlags, 24, 8);
+ return packed;
+}
+
+static uint32_t unpack_v68(SkPaint* paint, uint32_t packed, SkSafeRange& safe) {
+ paint->setAntiAlias((packed & 1) != 0);
+ paint->setDither((packed & 2) != 0);
+ packed >>= 8;
+ {
+ unsigned mode = packed & 0xFF;
+ if (mode != CUSTOM_BLEND_MODE_SENTINEL) { // sentinel for custom blender
+ paint->setBlendMode(safe.checkLE(mode, SkBlendMode::kLastMode));
+ }
+ // else we will unflatten the custom blender
+ }
+ packed >>= 8;
+ paint->setStrokeCap(safe.checkLE(packed & 0x3, SkPaint::kLast_Cap));
+ packed >>= 2;
+ paint->setStrokeJoin(safe.checkLE(packed & 0x3, SkPaint::kLast_Join));
+ packed >>= 2;
+ paint->setStyle(safe.checkLE(packed & 0x3, SkPaint::kStrokeAndFill_Style));
+ packed >>= 2;
+ // skip the (now ignored) filterquality bits
+ packed >>= 2;
+
+ return packed;
+}
+
+/* To save space/time, we analyze the paint, and write a truncated version of
+ it if there are not tricky elements like shaders, etc.
+ */
+void SkPaintPriv::Flatten(const SkPaint& paint, SkWriteBuffer& buffer) {
+ uint8_t flatFlags = 0;
+
+ if (paint.getPathEffect() ||
+ paint.getShader() ||
+ paint.getMaskFilter() ||
+ paint.getColorFilter() ||
+ paint.getImageFilter() ||
+ !paint.asBlendMode()) {
+ flatFlags |= kHasEffects_FlatFlag;
+ }
+
+ buffer.writeScalar(paint.getStrokeWidth());
+ buffer.writeScalar(paint.getStrokeMiter());
+ buffer.writeColor4f(paint.getColor4f());
+
+ buffer.write32(pack_v68(paint, flatFlags));
+
+ if (flatFlags & kHasEffects_FlatFlag) {
+ buffer.writeFlattenable(paint.getPathEffect());
+ buffer.writeFlattenable(paint.getShader());
+ buffer.writeFlattenable(paint.getMaskFilter());
+ buffer.writeFlattenable(paint.getColorFilter());
+ buffer.writeFlattenable(paint.getImageFilter());
+ buffer.writeFlattenable(paint.getBlender());
+ }
+}
+
+SkPaint SkPaintPriv::Unflatten(SkReadBuffer& buffer) {
+ SkPaint paint;
+
+ paint.setStrokeWidth(buffer.readScalar());
+ paint.setStrokeMiter(buffer.readScalar());
+ {
+ SkColor4f color;
+ buffer.readColor4f(&color);
+ paint.setColor(color, sk_srgb_singleton());
+ }
+
+ SkSafeRange safe;
+ unsigned flatFlags = unpack_v68(&paint, buffer.readUInt(), safe);
+
+ if (!(flatFlags & kHasEffects_FlatFlag)) {
+ // This is a simple SkPaint without any effects, so clear all the effect-related fields.
+ paint.setPathEffect(nullptr);
+ paint.setShader(nullptr);
+ paint.setMaskFilter(nullptr);
+ paint.setColorFilter(nullptr);
+ paint.setImageFilter(nullptr);
+ } else if (buffer.isVersionLT(SkPicturePriv::kSkBlenderInSkPaint)) {
+ // This paint predates the introduction of user blend functions (via SkBlender).
+ paint.setPathEffect(buffer.readPathEffect());
+ paint.setShader(buffer.readShader());
+ paint.setMaskFilter(buffer.readMaskFilter());
+ paint.setColorFilter(buffer.readColorFilter());
+ (void)buffer.read32(); // was drawLooper (now deprecated)
+ paint.setImageFilter(buffer.readImageFilter());
+ } else {
+ paint.setPathEffect(buffer.readPathEffect());
+ paint.setShader(buffer.readShader());
+ paint.setMaskFilter(buffer.readMaskFilter());
+ paint.setColorFilter(buffer.readColorFilter());
+ paint.setImageFilter(buffer.readImageFilter());
+ paint.setBlender(buffer.readBlender());
+ }
+
+ if (!buffer.validate(safe.ok())) {
+ paint.reset();
+ }
+ return paint;
+}
diff --git a/gfx/skia/skia/src/core/SkPaintPriv.h b/gfx/skia/skia/src/core/SkPaintPriv.h
new file mode 100644
index 0000000000..0d4b5d05b4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPaintPriv.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPaintPriv_DEFINED
+#define SkPaintPriv_DEFINED
+
+#include "include/core/SkPaint.h"
+
+class SkReadBuffer;
+class SkWriteBuffer;
+enum SkColorType : int;
+
+class SkPaintPriv {
+public:
+ enum ShaderOverrideOpacity {
+ kNone_ShaderOverrideOpacity, //!< there is no overriding shader (bitmap or image)
+ kOpaque_ShaderOverrideOpacity, //!< the overriding shader is opaque
+ kNotOpaque_ShaderOverrideOpacity, //!< the overriding shader may not be opaque
+ };
+
+ /**
+ * Returns true if drawing with this paint (or nullptr) will ovewrite all affected pixels.
+ *
+ * Note: returns conservative true, meaning it may return false even though the paint might
+ * in fact overwrite its pixels.
+ */
+ static bool Overwrites(const SkPaint* paint, ShaderOverrideOpacity);
+
+ static bool ShouldDither(const SkPaint&, SkColorType);
+
+ /*
+ * The luminance color is used to determine which Gamma Canonical color to map to. This is
+ * really only used by backends which want to cache glyph masks, and need some way to know if
+ * they need to generate new masks based off a given color.
+ */
+ static SkColor ComputeLuminanceColor(const SkPaint&);
+
+ /** Serializes SkPaint into a buffer. A companion unflatten() call
+ can reconstitute the paint at a later time.
+
+ @param buffer SkWriteBuffer receiving the flattened SkPaint data
+ */
+ static void Flatten(const SkPaint& paint, SkWriteBuffer& buffer);
+
+ /** Populates SkPaint, typically from a serialized stream, created by calling
+ flatten() at an earlier time.
+ */
+ static SkPaint Unflatten(SkReadBuffer& buffer);
+
+ // If this paint has any color filter, fold it into the shader and/or paint color
+ // so that it draws the same but getColorFilter() returns nullptr.
+ //
+ // Since we may be filtering now, we need to know what color space to filter in,
+ // typically the color space of the device we're drawing into.
+ static void RemoveColorFilter(SkPaint*, SkColorSpace* dstCS);
+
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPath.cpp b/gfx/skia/skia/src/core/SkPath.cpp
new file mode 100644
index 0000000000..2e9cfa9927
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPath.cpp
@@ -0,0 +1,3918 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPath.h"
+
+#include "include/core/SkPathBuilder.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/private/SkPathRef.h"
+#include "include/private/base/SkFloatBits.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkPathEnums.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTDArray.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkTLazy.h"
+#include "src/base/SkVx.h"
+#include "src/core/SkCubicClipper.h"
+#include "src/core/SkEdgeClipper.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkPathMakers.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkPointPriv.h"
+#include "src/core/SkStringUtils.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstring>
+#include <iterator>
+#include <utility>
+
+struct SkPath_Storage_Equivalent {
+ void* fPtr;
+ int32_t fIndex;
+ uint32_t fFlags;
+};
+
+static_assert(sizeof(SkPath) == sizeof(SkPath_Storage_Equivalent),
+ "Please keep an eye on SkPath packing.");
+
+static float poly_eval(float A, float B, float C, float t) {
+ return (A * t + B) * t + C;
+}
+
+static float poly_eval(float A, float B, float C, float D, float t) {
+ return ((A * t + B) * t + C) * t + D;
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Path.bounds is defined to be the bounds of all the control points.
+ * If we called bounds.join(r) we would skip r if r was empty, which breaks
+ * our promise. Hence we have a custom joiner that doesn't look at emptiness
+ */
+static void joinNoEmptyChecks(SkRect* dst, const SkRect& src) {
+ dst->fLeft = std::min(dst->fLeft, src.fLeft);
+ dst->fTop = std::min(dst->fTop, src.fTop);
+ dst->fRight = std::max(dst->fRight, src.fRight);
+ dst->fBottom = std::max(dst->fBottom, src.fBottom);
+}
+
+static bool is_degenerate(const SkPath& path) {
+ return (path.countVerbs() - SkPathPriv::LeadingMoveToCount(path)) == 0;
+}
+
+class SkAutoDisableDirectionCheck {
+public:
+ SkAutoDisableDirectionCheck(SkPath* path) : fPath(path) {
+ fSaved = static_cast<SkPathFirstDirection>(fPath->getFirstDirection());
+ }
+
+ ~SkAutoDisableDirectionCheck() {
+ fPath->setFirstDirection(fSaved);
+ }
+
+private:
+ SkPath* fPath;
+ SkPathFirstDirection fSaved;
+};
+
+/* This class's constructor/destructor bracket a path editing operation. It is
+ used when we know the bounds of the amount we are going to add to the path
+ (usually a new contour, but not required).
+
+ It captures some state about the path up front (i.e. if it already has a
+ cached bounds), and then if it can, it updates the cache bounds explicitly,
+ avoiding the need to revisit all of the points in getBounds().
+
+ It also notes if the path was originally degenerate, and if so, sets
+ isConvex to true. Thus it can only be used if the contour being added is
+ convex.
+ */
+class SkAutoPathBoundsUpdate {
+public:
+ SkAutoPathBoundsUpdate(SkPath* path, const SkRect& r) : fPath(path), fRect(r) {
+ // Cannot use fRect for our bounds unless we know it is sorted
+ fRect.sort();
+ // Mark the path's bounds as dirty if (1) they are, or (2) the path
+ // is non-finite, and therefore its bounds are not meaningful
+ fHasValidBounds = path->hasComputedBounds() && path->isFinite();
+ fEmpty = path->isEmpty();
+ if (fHasValidBounds && !fEmpty) {
+ joinNoEmptyChecks(&fRect, fPath->getBounds());
+ }
+ fDegenerate = is_degenerate(*path);
+ }
+
+ ~SkAutoPathBoundsUpdate() {
+ fPath->setConvexity(fDegenerate ? SkPathConvexity::kConvex
+ : SkPathConvexity::kUnknown);
+ if ((fEmpty || fHasValidBounds) && fRect.isFinite()) {
+ fPath->setBounds(fRect);
+ }
+ }
+
+private:
+ SkPath* fPath;
+ SkRect fRect;
+ bool fHasValidBounds;
+ bool fDegenerate;
+ bool fEmpty;
+};
+
+////////////////////////////////////////////////////////////////////////////
+
+/*
+ Stores the verbs and points as they are given to us, with exceptions:
+ - we only record "Close" if it was immediately preceeded by Move | Line | Quad | Cubic
+ - we insert a Move(0,0) if Line | Quad | Cubic is our first command
+
+ The iterator does more cleanup, especially if forceClose == true
+ 1. If we encounter degenerate segments, remove them
+ 2. if we encounter Close, return a cons'd up Line() first (if the curr-pt != start-pt)
+ 3. if we encounter Move without a preceeding Close, and forceClose is true, goto #2
+ 4. if we encounter Line | Quad | Cubic after Close, cons up a Move
+*/
+
+////////////////////////////////////////////////////////////////////////////
+
+// flag to require a moveTo if we begin with something else, like lineTo etc.
+// This will also be the value of lastMoveToIndex for a single contour
+// ending with close, so countVerbs needs to be checked against 0.
+#define INITIAL_LASTMOVETOINDEX_VALUE ~0
+
+SkPath::SkPath()
+ : fPathRef(SkPathRef::CreateEmpty()) {
+ this->resetFields();
+ fIsVolatile = false;
+}
+
+SkPath::SkPath(sk_sp<SkPathRef> pr, SkPathFillType ft, bool isVolatile, SkPathConvexity ct,
+ SkPathFirstDirection firstDirection)
+ : fPathRef(std::move(pr))
+ , fLastMoveToIndex(INITIAL_LASTMOVETOINDEX_VALUE)
+ , fConvexity((uint8_t)ct)
+ , fFirstDirection((uint8_t)firstDirection)
+ , fFillType((unsigned)ft)
+ , fIsVolatile(isVolatile)
+{}
+
+void SkPath::resetFields() {
+ //fPathRef is assumed to have been emptied by the caller.
+ fLastMoveToIndex = INITIAL_LASTMOVETOINDEX_VALUE;
+ fFillType = SkToU8(SkPathFillType::kWinding);
+ this->setConvexity(SkPathConvexity::kUnknown);
+ this->setFirstDirection(SkPathFirstDirection::kUnknown);
+
+ // We don't touch Android's fSourcePath. It's used to track texture garbage collection, so we
+ // don't want to muck with it if it's been set to something non-nullptr.
+}
+
+SkPath::SkPath(const SkPath& that)
+ : fPathRef(SkRef(that.fPathRef.get())) {
+ this->copyFields(that);
+ SkDEBUGCODE(that.validate();)
+}
+
+SkPath::~SkPath() {
+ SkDEBUGCODE(this->validate();)
+}
+
+SkPath& SkPath::operator=(const SkPath& that) {
+ SkDEBUGCODE(that.validate();)
+
+ if (this != &that) {
+ fPathRef.reset(SkRef(that.fPathRef.get()));
+ this->copyFields(that);
+ }
+ SkDEBUGCODE(this->validate();)
+ return *this;
+}
+
+void SkPath::copyFields(const SkPath& that) {
+ //fPathRef is assumed to have been set by the caller.
+ fLastMoveToIndex = that.fLastMoveToIndex;
+ fFillType = that.fFillType;
+ fIsVolatile = that.fIsVolatile;
+
+ // Non-atomic assignment of atomic values.
+ this->setConvexity(that.getConvexityOrUnknown());
+ this->setFirstDirection(that.getFirstDirection());
+}
+
+bool operator==(const SkPath& a, const SkPath& b) {
+ // note: don't need to look at isConvex or bounds, since just comparing the
+ // raw data is sufficient.
+ return &a == &b ||
+ (a.fFillType == b.fFillType && *a.fPathRef == *b.fPathRef);
+}
+
+void SkPath::swap(SkPath& that) {
+ if (this != &that) {
+ fPathRef.swap(that.fPathRef);
+ std::swap(fLastMoveToIndex, that.fLastMoveToIndex);
+
+ const auto ft = fFillType;
+ fFillType = that.fFillType;
+ that.fFillType = ft;
+
+ const auto iv = fIsVolatile;
+ fIsVolatile = that.fIsVolatile;
+ that.fIsVolatile = iv;
+
+ // Non-atomic swaps of atomic values.
+ SkPathConvexity c = this->getConvexityOrUnknown();
+ this->setConvexity(that.getConvexityOrUnknown());
+ that.setConvexity(c);
+
+ SkPathFirstDirection fd = this->getFirstDirection();
+ this->setFirstDirection(that.getFirstDirection());
+ that.setFirstDirection(fd);
+ }
+}
+
+bool SkPath::isInterpolatable(const SkPath& compare) const {
+ // need the same structure (verbs, conicweights) and same point-count
+ return fPathRef->fPoints.size() == compare.fPathRef->fPoints.size() &&
+ fPathRef->fVerbs == compare.fPathRef->fVerbs &&
+ fPathRef->fConicWeights == compare.fPathRef->fConicWeights;
+}
+
+bool SkPath::interpolate(const SkPath& ending, SkScalar weight, SkPath* out) const {
+ int pointCount = fPathRef->countPoints();
+ if (pointCount != ending.fPathRef->countPoints()) {
+ return false;
+ }
+ if (!pointCount) {
+ return true;
+ }
+ out->reset();
+ out->addPath(*this);
+ fPathRef->interpolate(*ending.fPathRef, weight, out->fPathRef.get());
+ return true;
+}
+
+static inline bool check_edge_against_rect(const SkPoint& p0,
+ const SkPoint& p1,
+ const SkRect& rect,
+ SkPathFirstDirection dir) {
+ const SkPoint* edgeBegin;
+ SkVector v;
+ if (SkPathFirstDirection::kCW == dir) {
+ v = p1 - p0;
+ edgeBegin = &p0;
+ } else {
+ v = p0 - p1;
+ edgeBegin = &p1;
+ }
+ if (v.fX || v.fY) {
+ // check the cross product of v with the vec from edgeBegin to each rect corner
+ SkScalar yL = v.fY * (rect.fLeft - edgeBegin->fX);
+ SkScalar xT = v.fX * (rect.fTop - edgeBegin->fY);
+ SkScalar yR = v.fY * (rect.fRight - edgeBegin->fX);
+ SkScalar xB = v.fX * (rect.fBottom - edgeBegin->fY);
+ if ((xT < yL) || (xT < yR) || (xB < yL) || (xB < yR)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool SkPath::conservativelyContainsRect(const SkRect& rect) const {
+ // This only handles non-degenerate convex paths currently.
+ if (!this->isConvex()) {
+ return false;
+ }
+
+ SkPathFirstDirection direction = SkPathPriv::ComputeFirstDirection(*this);
+ if (direction == SkPathFirstDirection::kUnknown) {
+ return false;
+ }
+
+ SkPoint firstPt;
+ SkPoint prevPt;
+ int segmentCount = 0;
+ SkDEBUGCODE(int moveCnt = 0;)
+
+ for (auto [verb, pts, weight] : SkPathPriv::Iterate(*this)) {
+ if (verb == SkPathVerb::kClose || (segmentCount > 0 && verb == SkPathVerb::kMove)) {
+ // Closing the current contour; but since convexity is a precondition, it's the only
+ // contour that matters.
+ SkASSERT(moveCnt);
+ segmentCount++;
+ break;
+ } else if (verb == SkPathVerb::kMove) {
+ // A move at the start of the contour (or multiple leading moves, in which case we
+ // keep the last one before a non-move verb).
+ SkASSERT(!segmentCount);
+ SkDEBUGCODE(++moveCnt);
+ firstPt = prevPt = pts[0];
+ } else {
+ int pointCount = SkPathPriv::PtsInVerb((unsigned) verb);
+ SkASSERT(pointCount > 0);
+
+ if (!SkPathPriv::AllPointsEq(pts, pointCount + 1)) {
+ SkASSERT(moveCnt);
+ int nextPt = pointCount;
+ segmentCount++;
+
+ if (SkPathVerb::kConic == verb) {
+ SkConic orig;
+ orig.set(pts, *weight);
+ SkPoint quadPts[5];
+ int count = orig.chopIntoQuadsPOW2(quadPts, 1);
+ SkASSERT_RELEASE(2 == count);
+
+ if (!check_edge_against_rect(quadPts[0], quadPts[2], rect, direction)) {
+ return false;
+ }
+ if (!check_edge_against_rect(quadPts[2], quadPts[4], rect, direction)) {
+ return false;
+ }
+ } else {
+ if (!check_edge_against_rect(prevPt, pts[nextPt], rect, direction)) {
+ return false;
+ }
+ }
+ prevPt = pts[nextPt];
+ }
+ }
+ }
+
+ if (segmentCount) {
+ return check_edge_against_rect(prevPt, firstPt, rect, direction);
+ }
+ return false;
+}
+
+uint32_t SkPath::getGenerationID() const {
+ return fPathRef->genID(fFillType);
+}
+
+SkPath& SkPath::reset() {
+ SkDEBUGCODE(this->validate();)
+
+ if (fPathRef->unique()) {
+ fPathRef->reset();
+ } else {
+ fPathRef.reset(SkPathRef::CreateEmpty());
+ }
+ this->resetFields();
+ return *this;
+}
+
+SkPath& SkPath::rewind() {
+ SkDEBUGCODE(this->validate();)
+
+ SkPathRef::Rewind(&fPathRef);
+ this->resetFields();
+ return *this;
+}
+
+bool SkPath::isLastContourClosed() const {
+ int verbCount = fPathRef->countVerbs();
+ if (0 == verbCount) {
+ return false;
+ }
+ return kClose_Verb == fPathRef->atVerb(verbCount - 1);
+}
+
+bool SkPath::isLine(SkPoint line[2]) const {
+ int verbCount = fPathRef->countVerbs();
+
+ if (2 == verbCount) {
+ SkASSERT(kMove_Verb == fPathRef->atVerb(0));
+ if (kLine_Verb == fPathRef->atVerb(1)) {
+ SkASSERT(2 == fPathRef->countPoints());
+ if (line) {
+ const SkPoint* pts = fPathRef->points();
+ line[0] = pts[0];
+ line[1] = pts[1];
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+bool SkPath::isEmpty() const {
+ SkDEBUGCODE(this->validate();)
+ return 0 == fPathRef->countVerbs();
+}
+
+bool SkPath::isFinite() const {
+ SkDEBUGCODE(this->validate();)
+ return fPathRef->isFinite();
+}
+
+bool SkPath::isConvex() const {
+ return SkPathConvexity::kConvex == this->getConvexity();
+}
+
+const SkRect& SkPath::getBounds() const {
+ return fPathRef->getBounds();
+}
+
+uint32_t SkPath::getSegmentMasks() const {
+ return fPathRef->getSegmentMasks();
+}
+
+bool SkPath::isValid() const {
+ return this->isValidImpl() && fPathRef->isValid();
+}
+
+bool SkPath::hasComputedBounds() const {
+ SkDEBUGCODE(this->validate();)
+ return fPathRef->hasComputedBounds();
+}
+
+void SkPath::setBounds(const SkRect& rect) {
+ SkPathRef::Editor ed(&fPathRef);
+ ed.setBounds(rect);
+}
+
+SkPathConvexity SkPath::getConvexityOrUnknown() const {
+ return (SkPathConvexity)fConvexity.load(std::memory_order_relaxed);
+}
+
+#ifdef SK_DEBUG
+void SkPath::validate() const {
+ SkASSERT(this->isValidImpl());
+}
+
+void SkPath::validateRef() const {
+ // This will SkASSERT if not valid.
+ fPathRef->validate();
+}
+#endif
+/*
+ Determines if path is a rect by keeping track of changes in direction
+ and looking for a loop either clockwise or counterclockwise.
+
+ The direction is computed such that:
+ 0: vertical up
+ 1: horizontal left
+ 2: vertical down
+ 3: horizontal right
+
+A rectangle cycles up/right/down/left or up/left/down/right.
+
+The test fails if:
+ The path is closed, and followed by a line.
+ A second move creates a new endpoint.
+ A diagonal line is parsed.
+ There's more than four changes of direction.
+ There's a discontinuity on the line (e.g., a move in the middle)
+ The line reverses direction.
+ The path contains a quadratic or cubic.
+ The path contains fewer than four points.
+ *The rectangle doesn't complete a cycle.
+ *The final point isn't equal to the first point.
+
+ *These last two conditions we relax if we have a 3-edge path that would
+ form a rectangle if it were closed (as we do when we fill a path)
+
+It's OK if the path has:
+ Several colinear line segments composing a rectangle side.
+ Single points on the rectangle side.
+
+The direction takes advantage of the corners found since opposite sides
+must travel in opposite directions.
+
+FIXME: Allow colinear quads and cubics to be treated like lines.
+FIXME: If the API passes fill-only, return true if the filled stroke
+ is a rectangle, though the caller failed to close the path.
+
+ directions values:
+ 0x1 is set if the segment is horizontal
+ 0x2 is set if the segment is moving to the right or down
+ thus:
+ two directions are opposites iff (dirA ^ dirB) == 0x2
+ two directions are perpendicular iff (dirA ^ dirB) == 0x1
+
+ */
+static int rect_make_dir(SkScalar dx, SkScalar dy) {
+ return ((0 != dx) << 0) | ((dx > 0 || dy > 0) << 1);
+}
+
+bool SkPath::isRect(SkRect* rect, bool* isClosed, SkPathDirection* direction) const {
+ SkDEBUGCODE(this->validate();)
+ int currVerb = 0;
+ const SkPoint* pts = fPathRef->points();
+ return SkPathPriv::IsRectContour(*this, false, &currVerb, &pts, isClosed, direction, rect);
+}
+
+bool SkPath::isOval(SkRect* bounds) const {
+ return SkPathPriv::IsOval(*this, bounds, nullptr, nullptr);
+}
+
+bool SkPath::isRRect(SkRRect* rrect) const {
+ return SkPathPriv::IsRRect(*this, rrect, nullptr, nullptr);
+}
+
+int SkPath::countPoints() const {
+ return fPathRef->countPoints();
+}
+
+int SkPath::getPoints(SkPoint dst[], int max) const {
+ SkDEBUGCODE(this->validate();)
+
+ SkASSERT(max >= 0);
+ SkASSERT(!max || dst);
+ int count = std::min(max, fPathRef->countPoints());
+ sk_careful_memcpy(dst, fPathRef->points(), count * sizeof(SkPoint));
+ return fPathRef->countPoints();
+}
+
+SkPoint SkPath::getPoint(int index) const {
+ if ((unsigned)index < (unsigned)fPathRef->countPoints()) {
+ return fPathRef->atPoint(index);
+ }
+ return SkPoint::Make(0, 0);
+}
+
+int SkPath::countVerbs() const {
+ return fPathRef->countVerbs();
+}
+
+int SkPath::getVerbs(uint8_t dst[], int max) const {
+ SkDEBUGCODE(this->validate();)
+
+ SkASSERT(max >= 0);
+ SkASSERT(!max || dst);
+ int count = std::min(max, fPathRef->countVerbs());
+ if (count) {
+ memcpy(dst, fPathRef->verbsBegin(), count);
+ }
+ return fPathRef->countVerbs();
+}
+
+size_t SkPath::approximateBytesUsed() const {
+ size_t size = sizeof (SkPath);
+ if (fPathRef != nullptr) {
+ size += fPathRef->approximateBytesUsed();
+ }
+ return size;
+}
+
+bool SkPath::getLastPt(SkPoint* lastPt) const {
+ SkDEBUGCODE(this->validate();)
+
+ int count = fPathRef->countPoints();
+ if (count > 0) {
+ if (lastPt) {
+ *lastPt = fPathRef->atPoint(count - 1);
+ }
+ return true;
+ }
+ if (lastPt) {
+ lastPt->set(0, 0);
+ }
+ return false;
+}
+
+void SkPath::setPt(int index, SkScalar x, SkScalar y) {
+ SkDEBUGCODE(this->validate();)
+
+ int count = fPathRef->countPoints();
+ if (count <= index) {
+ return;
+ } else {
+ SkPathRef::Editor ed(&fPathRef);
+ ed.atPoint(index)->set(x, y);
+ }
+}
+
+void SkPath::setLastPt(SkScalar x, SkScalar y) {
+ SkDEBUGCODE(this->validate();)
+
+ int count = fPathRef->countPoints();
+ if (count == 0) {
+ this->moveTo(x, y);
+ } else {
+ SkPathRef::Editor ed(&fPathRef);
+ ed.atPoint(count-1)->set(x, y);
+ }
+}
+
+// This is the public-facing non-const setConvexity().
+void SkPath::setConvexity(SkPathConvexity c) {
+ fConvexity.store((uint8_t)c, std::memory_order_relaxed);
+}
+
+// Const hooks for working with fConvexity and fFirstDirection from const methods.
+void SkPath::setConvexity(SkPathConvexity c) const {
+ fConvexity.store((uint8_t)c, std::memory_order_relaxed);
+}
+void SkPath::setFirstDirection(SkPathFirstDirection d) const {
+ fFirstDirection.store((uint8_t)d, std::memory_order_relaxed);
+}
+SkPathFirstDirection SkPath::getFirstDirection() const {
+ return (SkPathFirstDirection)fFirstDirection.load(std::memory_order_relaxed);
+}
+
+bool SkPath::isConvexityAccurate() const {
+ SkPathConvexity convexity = this->getConvexityOrUnknown();
+ if (convexity != SkPathConvexity::kUnknown) {
+ auto conv = this->computeConvexity();
+ if (conv != convexity) {
+ SkASSERT(false);
+ return false;
+ }
+ }
+ return true;
+}
+
+SkPathConvexity SkPath::getConvexity() const {
+// Enable once we fix all the bugs
+// SkDEBUGCODE(this->isConvexityAccurate());
+ SkPathConvexity convexity = this->getConvexityOrUnknown();
+ if (convexity == SkPathConvexity::kUnknown) {
+ convexity = this->computeConvexity();
+ }
+ SkASSERT(convexity != SkPathConvexity::kUnknown);
+ return convexity;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// Construction methods
+
+SkPath& SkPath::dirtyAfterEdit() {
+ this->setConvexity(SkPathConvexity::kUnknown);
+ this->setFirstDirection(SkPathFirstDirection::kUnknown);
+
+#ifdef SK_DEBUG
+ // enable this as needed for testing, but it slows down some chrome tests so much
+ // that they don't complete, so we don't enable it by default
+ // e.g. TEST(IdentifiabilityPaintOpDigestTest, MassiveOpSkipped)
+ if (this->countVerbs() < 16) {
+ SkASSERT(fPathRef->dataMatchesVerbs());
+ }
+#endif
+
+ return *this;
+}
+
+void SkPath::incReserve(int inc) {
+ SkDEBUGCODE(this->validate();)
+ if (inc > 0) {
+ SkPathRef::Editor(&fPathRef, inc, inc);
+ }
+ SkDEBUGCODE(this->validate();)
+}
+
+SkPath& SkPath::moveTo(SkScalar x, SkScalar y) {
+ SkDEBUGCODE(this->validate();)
+
+ SkPathRef::Editor ed(&fPathRef);
+
+ // remember our index
+ fLastMoveToIndex = fPathRef->countPoints();
+
+ ed.growForVerb(kMove_Verb)->set(x, y);
+
+ return this->dirtyAfterEdit();
+}
+
+SkPath& SkPath::rMoveTo(SkScalar x, SkScalar y) {
+ SkPoint pt = {0,0};
+ int count = fPathRef->countPoints();
+ if (count > 0) {
+ if (fLastMoveToIndex >= 0) {
+ pt = fPathRef->atPoint(count - 1);
+ } else {
+ pt = fPathRef->atPoint(~fLastMoveToIndex);
+ }
+ }
+ return this->moveTo(pt.fX + x, pt.fY + y);
+}
+
+void SkPath::injectMoveToIfNeeded() {
+ if (fLastMoveToIndex < 0) {
+ SkScalar x, y;
+ if (fPathRef->countVerbs() == 0) {
+ x = y = 0;
+ } else {
+ const SkPoint& pt = fPathRef->atPoint(~fLastMoveToIndex);
+ x = pt.fX;
+ y = pt.fY;
+ }
+ this->moveTo(x, y);
+ }
+}
+
+SkPath& SkPath::lineTo(SkScalar x, SkScalar y) {
+ SkDEBUGCODE(this->validate();)
+
+ this->injectMoveToIfNeeded();
+
+ SkPathRef::Editor ed(&fPathRef);
+ ed.growForVerb(kLine_Verb)->set(x, y);
+
+ return this->dirtyAfterEdit();
+}
+
+SkPath& SkPath::rLineTo(SkScalar x, SkScalar y) {
+ this->injectMoveToIfNeeded(); // This can change the result of this->getLastPt().
+ SkPoint pt;
+ this->getLastPt(&pt);
+ return this->lineTo(pt.fX + x, pt.fY + y);
+}
+
+SkPath& SkPath::quadTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2) {
+ SkDEBUGCODE(this->validate();)
+
+ this->injectMoveToIfNeeded();
+
+ SkPathRef::Editor ed(&fPathRef);
+ SkPoint* pts = ed.growForVerb(kQuad_Verb);
+ pts[0].set(x1, y1);
+ pts[1].set(x2, y2);
+
+ return this->dirtyAfterEdit();
+}
+
+SkPath& SkPath::rQuadTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2) {
+ this->injectMoveToIfNeeded(); // This can change the result of this->getLastPt().
+ SkPoint pt;
+ this->getLastPt(&pt);
+ return this->quadTo(pt.fX + x1, pt.fY + y1, pt.fX + x2, pt.fY + y2);
+}
+
+SkPath& SkPath::conicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2,
+ SkScalar w) {
+ // check for <= 0 or NaN with this test
+ if (!(w > 0)) {
+ this->lineTo(x2, y2);
+ } else if (!SkScalarIsFinite(w)) {
+ this->lineTo(x1, y1);
+ this->lineTo(x2, y2);
+ } else if (SK_Scalar1 == w) {
+ this->quadTo(x1, y1, x2, y2);
+ } else {
+ SkDEBUGCODE(this->validate();)
+
+ this->injectMoveToIfNeeded();
+
+ SkPathRef::Editor ed(&fPathRef);
+ SkPoint* pts = ed.growForVerb(kConic_Verb, w);
+ pts[0].set(x1, y1);
+ pts[1].set(x2, y2);
+
+ (void)this->dirtyAfterEdit();
+ }
+ return *this;
+}
+
+SkPath& SkPath::rConicTo(SkScalar dx1, SkScalar dy1, SkScalar dx2, SkScalar dy2,
+ SkScalar w) {
+ this->injectMoveToIfNeeded(); // This can change the result of this->getLastPt().
+ SkPoint pt;
+ this->getLastPt(&pt);
+ return this->conicTo(pt.fX + dx1, pt.fY + dy1, pt.fX + dx2, pt.fY + dy2, w);
+}
+
+SkPath& SkPath::cubicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2,
+ SkScalar x3, SkScalar y3) {
+ SkDEBUGCODE(this->validate();)
+
+ this->injectMoveToIfNeeded();
+
+ SkPathRef::Editor ed(&fPathRef);
+ SkPoint* pts = ed.growForVerb(kCubic_Verb);
+ pts[0].set(x1, y1);
+ pts[1].set(x2, y2);
+ pts[2].set(x3, y3);
+
+ return this->dirtyAfterEdit();
+}
+
+SkPath& SkPath::rCubicTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2,
+ SkScalar x3, SkScalar y3) {
+ this->injectMoveToIfNeeded(); // This can change the result of this->getLastPt().
+ SkPoint pt;
+ this->getLastPt(&pt);
+ return this->cubicTo(pt.fX + x1, pt.fY + y1, pt.fX + x2, pt.fY + y2,
+ pt.fX + x3, pt.fY + y3);
+}
+
+SkPath& SkPath::close() {
+ SkDEBUGCODE(this->validate();)
+
+ int count = fPathRef->countVerbs();
+ if (count > 0) {
+ switch (fPathRef->atVerb(count - 1)) {
+ case kLine_Verb:
+ case kQuad_Verb:
+ case kConic_Verb:
+ case kCubic_Verb:
+ case kMove_Verb: {
+ SkPathRef::Editor ed(&fPathRef);
+ ed.growForVerb(kClose_Verb);
+ break;
+ }
+ case kClose_Verb:
+ // don't add a close if it's the first verb or a repeat
+ break;
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ break;
+ }
+ }
+
+ // signal that we need a moveTo to follow us (unless we're done)
+#if 0
+ if (fLastMoveToIndex >= 0) {
+ fLastMoveToIndex = ~fLastMoveToIndex;
+ }
+#else
+ fLastMoveToIndex ^= ~fLastMoveToIndex >> (8 * sizeof(fLastMoveToIndex) - 1);
+#endif
+ return *this;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void assert_known_direction(SkPathDirection dir) {
+ SkASSERT(SkPathDirection::kCW == dir || SkPathDirection::kCCW == dir);
+}
+
+SkPath& SkPath::addRect(const SkRect &rect, SkPathDirection dir, unsigned startIndex) {
+ assert_known_direction(dir);
+ this->setFirstDirection(this->hasOnlyMoveTos() ? (SkPathFirstDirection)dir
+ : SkPathFirstDirection::kUnknown);
+ SkAutoDisableDirectionCheck addc(this);
+ SkAutoPathBoundsUpdate apbu(this, rect);
+
+ SkDEBUGCODE(int initialVerbCount = this->countVerbs());
+
+ const int kVerbs = 5; // moveTo + 3x lineTo + close
+ this->incReserve(kVerbs);
+
+ SkPath_RectPointIterator iter(rect, dir, startIndex);
+
+ this->moveTo(iter.current());
+ this->lineTo(iter.next());
+ this->lineTo(iter.next());
+ this->lineTo(iter.next());
+ this->close();
+
+ SkASSERT(this->countVerbs() == initialVerbCount + kVerbs);
+ return *this;
+}
+
+SkPath& SkPath::addPoly(const SkPoint pts[], int count, bool close) {
+ SkDEBUGCODE(this->validate();)
+ if (count <= 0) {
+ return *this;
+ }
+
+ fLastMoveToIndex = fPathRef->countPoints();
+
+ // +close makes room for the extra kClose_Verb
+ SkPathRef::Editor ed(&fPathRef, count+close, count);
+
+ ed.growForVerb(kMove_Verb)->set(pts[0].fX, pts[0].fY);
+ if (count > 1) {
+ SkPoint* p = ed.growForRepeatedVerb(kLine_Verb, count - 1);
+ memcpy(p, &pts[1], (count-1) * sizeof(SkPoint));
+ }
+
+ if (close) {
+ ed.growForVerb(kClose_Verb);
+ fLastMoveToIndex ^= ~fLastMoveToIndex >> (8 * sizeof(fLastMoveToIndex) - 1);
+ }
+
+ (void)this->dirtyAfterEdit();
+ SkDEBUGCODE(this->validate();)
+ return *this;
+}
+
+static bool arc_is_lone_point(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle,
+ SkPoint* pt) {
+ if (0 == sweepAngle && (0 == startAngle || SkIntToScalar(360) == startAngle)) {
+ // Chrome uses this path to move into and out of ovals. If not
+ // treated as a special case the moves can distort the oval's
+ // bounding box (and break the circle special case).
+ pt->set(oval.fRight, oval.centerY());
+ return true;
+ } else if (0 == oval.width() && 0 == oval.height()) {
+ // Chrome will sometimes create 0 radius round rects. Having degenerate
+ // quad segments in the path prevents the path from being recognized as
+ // a rect.
+ // TODO: optimizing the case where only one of width or height is zero
+ // should also be considered. This case, however, doesn't seem to be
+ // as common as the single point case.
+ pt->set(oval.fRight, oval.fTop);
+ return true;
+ }
+ return false;
+}
+
+// Return the unit vectors pointing at the start/stop points for the given start/sweep angles
+//
+static void angles_to_unit_vectors(SkScalar startAngle, SkScalar sweepAngle,
+ SkVector* startV, SkVector* stopV, SkRotationDirection* dir) {
+ SkScalar startRad = SkDegreesToRadians(startAngle),
+ stopRad = SkDegreesToRadians(startAngle + sweepAngle);
+
+ startV->fY = SkScalarSinSnapToZero(startRad);
+ startV->fX = SkScalarCosSnapToZero(startRad);
+ stopV->fY = SkScalarSinSnapToZero(stopRad);
+ stopV->fX = SkScalarCosSnapToZero(stopRad);
+
+ /* If the sweep angle is nearly (but less than) 360, then due to precision
+ loss in radians-conversion and/or sin/cos, we may end up with coincident
+ vectors, which will fool SkBuildQuadArc into doing nothing (bad) instead
+ of drawing a nearly complete circle (good).
+ e.g. canvas.drawArc(0, 359.99, ...)
+ -vs- canvas.drawArc(0, 359.9, ...)
+ We try to detect this edge case, and tweak the stop vector
+ */
+ if (*startV == *stopV) {
+ SkScalar sw = SkScalarAbs(sweepAngle);
+ if (sw < SkIntToScalar(360) && sw > SkIntToScalar(359)) {
+ // make a guess at a tiny angle (in radians) to tweak by
+ SkScalar deltaRad = SkScalarCopySign(SK_Scalar1/512, sweepAngle);
+ // not sure how much will be enough, so we use a loop
+ do {
+ stopRad -= deltaRad;
+ stopV->fY = SkScalarSinSnapToZero(stopRad);
+ stopV->fX = SkScalarCosSnapToZero(stopRad);
+ } while (*startV == *stopV);
+ }
+ }
+ *dir = sweepAngle > 0 ? kCW_SkRotationDirection : kCCW_SkRotationDirection;
+}
+
+/**
+ * If this returns 0, then the caller should just line-to the singlePt, else it should
+ * ignore singlePt and append the specified number of conics.
+ */
+static int build_arc_conics(const SkRect& oval, const SkVector& start, const SkVector& stop,
+ SkRotationDirection dir, SkConic conics[SkConic::kMaxConicsForArc],
+ SkPoint* singlePt) {
+ SkMatrix matrix;
+
+ matrix.setScale(SkScalarHalf(oval.width()), SkScalarHalf(oval.height()));
+ matrix.postTranslate(oval.centerX(), oval.centerY());
+
+ int count = SkConic::BuildUnitArc(start, stop, dir, &matrix, conics);
+ if (0 == count) {
+ matrix.mapXY(stop.x(), stop.y(), singlePt);
+ }
+ return count;
+}
+
+SkPath& SkPath::addRoundRect(const SkRect& rect, const SkScalar radii[],
+ SkPathDirection dir) {
+ SkRRect rrect;
+ rrect.setRectRadii(rect, (const SkVector*) radii);
+ return this->addRRect(rrect, dir);
+}
+
+SkPath& SkPath::addRRect(const SkRRect& rrect, SkPathDirection dir) {
+ // legacy start indices: 6 (CW) and 7(CCW)
+ return this->addRRect(rrect, dir, dir == SkPathDirection::kCW ? 6 : 7);
+}
+
+SkPath& SkPath::addRRect(const SkRRect &rrect, SkPathDirection dir, unsigned startIndex) {
+ assert_known_direction(dir);
+
+ bool isRRect = hasOnlyMoveTos();
+ const SkRect& bounds = rrect.getBounds();
+
+ if (rrect.isRect() || rrect.isEmpty()) {
+ // degenerate(rect) => radii points are collapsing
+ this->addRect(bounds, dir, (startIndex + 1) / 2);
+ } else if (rrect.isOval()) {
+ // degenerate(oval) => line points are collapsing
+ this->addOval(bounds, dir, startIndex / 2);
+ } else {
+ this->setFirstDirection(this->hasOnlyMoveTos() ? (SkPathFirstDirection)dir
+ : SkPathFirstDirection::kUnknown);
+
+ SkAutoPathBoundsUpdate apbu(this, bounds);
+ SkAutoDisableDirectionCheck addc(this);
+
+ // we start with a conic on odd indices when moving CW vs. even indices when moving CCW
+ const bool startsWithConic = ((startIndex & 1) == (dir == SkPathDirection::kCW));
+ const SkScalar weight = SK_ScalarRoot2Over2;
+
+ SkDEBUGCODE(int initialVerbCount = this->countVerbs());
+ const int kVerbs = startsWithConic
+ ? 9 // moveTo + 4x conicTo + 3x lineTo + close
+ : 10; // moveTo + 4x lineTo + 4x conicTo + close
+ this->incReserve(kVerbs);
+
+ SkPath_RRectPointIterator rrectIter(rrect, dir, startIndex);
+ // Corner iterator indices follow the collapsed radii model,
+ // adjusted such that the start pt is "behind" the radii start pt.
+ const unsigned rectStartIndex = startIndex / 2 + (dir == SkPathDirection::kCW ? 0 : 1);
+ SkPath_RectPointIterator rectIter(bounds, dir, rectStartIndex);
+
+ this->moveTo(rrectIter.current());
+ if (startsWithConic) {
+ for (unsigned i = 0; i < 3; ++i) {
+ this->conicTo(rectIter.next(), rrectIter.next(), weight);
+ this->lineTo(rrectIter.next());
+ }
+ this->conicTo(rectIter.next(), rrectIter.next(), weight);
+ // final lineTo handled by close().
+ } else {
+ for (unsigned i = 0; i < 4; ++i) {
+ this->lineTo(rrectIter.next());
+ this->conicTo(rectIter.next(), rrectIter.next(), weight);
+ }
+ }
+ this->close();
+
+ SkPathRef::Editor ed(&fPathRef);
+ ed.setIsRRect(isRRect, dir == SkPathDirection::kCCW, startIndex % 8);
+
+ SkASSERT(this->countVerbs() == initialVerbCount + kVerbs);
+ }
+
+ SkDEBUGCODE(fPathRef->validate();)
+ return *this;
+}
+
+bool SkPath::hasOnlyMoveTos() const {
+ int count = fPathRef->countVerbs();
+ const uint8_t* verbs = fPathRef->verbsBegin();
+ for (int i = 0; i < count; ++i) {
+ if (*verbs == kLine_Verb ||
+ *verbs == kQuad_Verb ||
+ *verbs == kConic_Verb ||
+ *verbs == kCubic_Verb) {
+ return false;
+ }
+ ++verbs;
+ }
+ return true;
+}
+
+bool SkPath::isZeroLengthSincePoint(int startPtIndex) const {
+ int count = fPathRef->countPoints() - startPtIndex;
+ if (count < 2) {
+ return true;
+ }
+ const SkPoint* pts = fPathRef->points() + startPtIndex;
+ const SkPoint& first = *pts;
+ for (int index = 1; index < count; ++index) {
+ if (first != pts[index]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+SkPath& SkPath::addRoundRect(const SkRect& rect, SkScalar rx, SkScalar ry,
+ SkPathDirection dir) {
+ assert_known_direction(dir);
+
+ if (rx < 0 || ry < 0) {
+ return *this;
+ }
+
+ SkRRect rrect;
+ rrect.setRectXY(rect, rx, ry);
+ return this->addRRect(rrect, dir);
+}
+
+SkPath& SkPath::addOval(const SkRect& oval, SkPathDirection dir) {
+ // legacy start index: 1
+ return this->addOval(oval, dir, 1);
+}
+
+SkPath& SkPath::addOval(const SkRect &oval, SkPathDirection dir, unsigned startPointIndex) {
+ assert_known_direction(dir);
+
+ /* If addOval() is called after previous moveTo(),
+ this path is still marked as an oval. This is used to
+ fit into WebKit's calling sequences.
+ We can't simply check isEmpty() in this case, as additional
+ moveTo() would mark the path non empty.
+ */
+ bool isOval = hasOnlyMoveTos();
+ if (isOval) {
+ this->setFirstDirection((SkPathFirstDirection)dir);
+ } else {
+ this->setFirstDirection(SkPathFirstDirection::kUnknown);
+ }
+
+ SkAutoDisableDirectionCheck addc(this);
+ SkAutoPathBoundsUpdate apbu(this, oval);
+
+ SkDEBUGCODE(int initialVerbCount = this->countVerbs());
+ const int kVerbs = 6; // moveTo + 4x conicTo + close
+ this->incReserve(kVerbs);
+
+ SkPath_OvalPointIterator ovalIter(oval, dir, startPointIndex);
+ // The corner iterator pts are tracking "behind" the oval/radii pts.
+ SkPath_RectPointIterator rectIter(oval, dir, startPointIndex + (dir == SkPathDirection::kCW ? 0 : 1));
+ const SkScalar weight = SK_ScalarRoot2Over2;
+
+ this->moveTo(ovalIter.current());
+ for (unsigned i = 0; i < 4; ++i) {
+ this->conicTo(rectIter.next(), ovalIter.next(), weight);
+ }
+ this->close();
+
+ SkASSERT(this->countVerbs() == initialVerbCount + kVerbs);
+
+ SkPathRef::Editor ed(&fPathRef);
+
+ ed.setIsOval(isOval, SkPathDirection::kCCW == dir, startPointIndex % 4);
+ return *this;
+}
+
+SkPath& SkPath::addCircle(SkScalar x, SkScalar y, SkScalar r, SkPathDirection dir) {
+ if (r > 0) {
+ this->addOval(SkRect::MakeLTRB(x - r, y - r, x + r, y + r), dir);
+ }
+ return *this;
+}
+
+SkPath& SkPath::arcTo(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle,
+ bool forceMoveTo) {
+ if (oval.width() < 0 || oval.height() < 0) {
+ return *this;
+ }
+
+ startAngle = SkScalarMod(startAngle, 360.0f);
+
+ if (fPathRef->countVerbs() == 0) {
+ forceMoveTo = true;
+ }
+
+ SkPoint lonePt;
+ if (arc_is_lone_point(oval, startAngle, sweepAngle, &lonePt)) {
+ return forceMoveTo ? this->moveTo(lonePt) : this->lineTo(lonePt);
+ }
+
+ SkVector startV, stopV;
+ SkRotationDirection dir;
+ angles_to_unit_vectors(startAngle, sweepAngle, &startV, &stopV, &dir);
+
+ SkPoint singlePt;
+
+ // Adds a move-to to 'pt' if forceMoveTo is true. Otherwise a lineTo unless we're sufficiently
+ // close to 'pt' currently. This prevents spurious lineTos when adding a series of contiguous
+ // arcs from the same oval.
+ auto addPt = [&forceMoveTo, this](const SkPoint& pt) {
+ SkPoint lastPt;
+ if (forceMoveTo) {
+ this->moveTo(pt);
+ } else if (!this->getLastPt(&lastPt) ||
+ !SkScalarNearlyEqual(lastPt.fX, pt.fX) ||
+ !SkScalarNearlyEqual(lastPt.fY, pt.fY)) {
+ this->lineTo(pt);
+ }
+ };
+
+ // At this point, we know that the arc is not a lone point, but startV == stopV
+ // indicates that the sweepAngle is too small such that angles_to_unit_vectors
+ // cannot handle it.
+ if (startV == stopV) {
+ SkScalar endAngle = SkDegreesToRadians(startAngle + sweepAngle);
+ SkScalar radiusX = oval.width() / 2;
+ SkScalar radiusY = oval.height() / 2;
+ // We do not use SkScalar[Sin|Cos]SnapToZero here. When sin(startAngle) is 0 and sweepAngle
+ // is very small and radius is huge, the expected behavior here is to draw a line. But
+ // calling SkScalarSinSnapToZero will make sin(endAngle) be 0 which will then draw a dot.
+ singlePt.set(oval.centerX() + radiusX * SkScalarCos(endAngle),
+ oval.centerY() + radiusY * SkScalarSin(endAngle));
+ addPt(singlePt);
+ return *this;
+ }
+
+ SkConic conics[SkConic::kMaxConicsForArc];
+ int count = build_arc_conics(oval, startV, stopV, dir, conics, &singlePt);
+ if (count) {
+ this->incReserve(count * 2 + 1);
+ const SkPoint& pt = conics[0].fPts[0];
+ addPt(pt);
+ for (int i = 0; i < count; ++i) {
+ this->conicTo(conics[i].fPts[1], conics[i].fPts[2], conics[i].fW);
+ }
+ } else {
+ addPt(singlePt);
+ }
+ return *this;
+}
+
+// This converts the SVG arc to conics.
+// Partly adapted from Niko's code in kdelibs/kdecore/svgicons.
+// Then transcribed from webkit/chrome's SVGPathNormalizer::decomposeArcToCubic()
+// See also SVG implementation notes:
+// http://www.w3.org/TR/SVG/implnote.html#ArcConversionEndpointToCenter
+// Note that arcSweep bool value is flipped from the original implementation.
+SkPath& SkPath::arcTo(SkScalar rx, SkScalar ry, SkScalar angle, SkPath::ArcSize arcLarge,
+ SkPathDirection arcSweep, SkScalar x, SkScalar y) {
+ this->injectMoveToIfNeeded();
+ SkPoint srcPts[2];
+ this->getLastPt(&srcPts[0]);
+ // If rx = 0 or ry = 0 then this arc is treated as a straight line segment (a "lineto")
+ // joining the endpoints.
+ // http://www.w3.org/TR/SVG/implnote.html#ArcOutOfRangeParameters
+ if (!rx || !ry) {
+ return this->lineTo(x, y);
+ }
+ // If the current point and target point for the arc are identical, it should be treated as a
+ // zero length path. This ensures continuity in animations.
+ srcPts[1].set(x, y);
+ if (srcPts[0] == srcPts[1]) {
+ return this->lineTo(x, y);
+ }
+ rx = SkScalarAbs(rx);
+ ry = SkScalarAbs(ry);
+ SkVector midPointDistance = srcPts[0] - srcPts[1];
+ midPointDistance *= 0.5f;
+
+ SkMatrix pointTransform;
+ pointTransform.setRotate(-angle);
+
+ SkPoint transformedMidPoint;
+ pointTransform.mapPoints(&transformedMidPoint, &midPointDistance, 1);
+ SkScalar squareRx = rx * rx;
+ SkScalar squareRy = ry * ry;
+ SkScalar squareX = transformedMidPoint.fX * transformedMidPoint.fX;
+ SkScalar squareY = transformedMidPoint.fY * transformedMidPoint.fY;
+
+ // Check if the radii are big enough to draw the arc, scale radii if not.
+ // http://www.w3.org/TR/SVG/implnote.html#ArcCorrectionOutOfRangeRadii
+ SkScalar radiiScale = squareX / squareRx + squareY / squareRy;
+ if (radiiScale > 1) {
+ radiiScale = SkScalarSqrt(radiiScale);
+ rx *= radiiScale;
+ ry *= radiiScale;
+ }
+
+ pointTransform.setScale(1 / rx, 1 / ry);
+ pointTransform.preRotate(-angle);
+
+ SkPoint unitPts[2];
+ pointTransform.mapPoints(unitPts, srcPts, (int) std::size(unitPts));
+ SkVector delta = unitPts[1] - unitPts[0];
+
+ SkScalar d = delta.fX * delta.fX + delta.fY * delta.fY;
+ SkScalar scaleFactorSquared = std::max(1 / d - 0.25f, 0.f);
+
+ SkScalar scaleFactor = SkScalarSqrt(scaleFactorSquared);
+ if ((arcSweep == SkPathDirection::kCCW) != SkToBool(arcLarge)) { // flipped from the original implementation
+ scaleFactor = -scaleFactor;
+ }
+ delta.scale(scaleFactor);
+ SkPoint centerPoint = unitPts[0] + unitPts[1];
+ centerPoint *= 0.5f;
+ centerPoint.offset(-delta.fY, delta.fX);
+ unitPts[0] -= centerPoint;
+ unitPts[1] -= centerPoint;
+ SkScalar theta1 = SkScalarATan2(unitPts[0].fY, unitPts[0].fX);
+ SkScalar theta2 = SkScalarATan2(unitPts[1].fY, unitPts[1].fX);
+ SkScalar thetaArc = theta2 - theta1;
+ if (thetaArc < 0 && (arcSweep == SkPathDirection::kCW)) { // arcSweep flipped from the original implementation
+ thetaArc += SK_ScalarPI * 2;
+ } else if (thetaArc > 0 && (arcSweep != SkPathDirection::kCW)) { // arcSweep flipped from the original implementation
+ thetaArc -= SK_ScalarPI * 2;
+ }
+
+ // Very tiny angles cause our subsequent math to go wonky (skbug.com/9272)
+ // so we do a quick check here. The precise tolerance amount is just made up.
+ // PI/million happens to fix the bug in 9272, but a larger value is probably
+ // ok too.
+ if (SkScalarAbs(thetaArc) < (SK_ScalarPI / (1000 * 1000))) {
+ return this->lineTo(x, y);
+ }
+
+ pointTransform.setRotate(angle);
+ pointTransform.preScale(rx, ry);
+
+ // the arc may be slightly bigger than 1/4 circle, so allow up to 1/3rd
+ int segments = SkScalarCeilToInt(SkScalarAbs(thetaArc / (2 * SK_ScalarPI / 3)));
+ SkScalar thetaWidth = thetaArc / segments;
+ SkScalar t = SkScalarTan(0.5f * thetaWidth);
+ if (!SkScalarIsFinite(t)) {
+ return *this;
+ }
+ SkScalar startTheta = theta1;
+ SkScalar w = SkScalarSqrt(SK_ScalarHalf + SkScalarCos(thetaWidth) * SK_ScalarHalf);
+ auto scalar_is_integer = [](SkScalar scalar) -> bool {
+ return scalar == SkScalarFloorToScalar(scalar);
+ };
+ bool expectIntegers = SkScalarNearlyZero(SK_ScalarPI/2 - SkScalarAbs(thetaWidth)) &&
+ scalar_is_integer(rx) && scalar_is_integer(ry) &&
+ scalar_is_integer(x) && scalar_is_integer(y);
+
+ for (int i = 0; i < segments; ++i) {
+ SkScalar endTheta = startTheta + thetaWidth,
+ sinEndTheta = SkScalarSinSnapToZero(endTheta),
+ cosEndTheta = SkScalarCosSnapToZero(endTheta);
+
+ unitPts[1].set(cosEndTheta, sinEndTheta);
+ unitPts[1] += centerPoint;
+ unitPts[0] = unitPts[1];
+ unitPts[0].offset(t * sinEndTheta, -t * cosEndTheta);
+ SkPoint mapped[2];
+ pointTransform.mapPoints(mapped, unitPts, (int) std::size(unitPts));
+ /*
+ Computing the arc width introduces rounding errors that cause arcs to start
+ outside their marks. A round rect may lose convexity as a result. If the input
+ values are on integers, place the conic on integers as well.
+ */
+ if (expectIntegers) {
+ for (SkPoint& point : mapped) {
+ point.fX = SkScalarRoundToScalar(point.fX);
+ point.fY = SkScalarRoundToScalar(point.fY);
+ }
+ }
+ this->conicTo(mapped[0], mapped[1], w);
+ startTheta = endTheta;
+ }
+
+ // The final point should match the input point (by definition); replace it to
+ // ensure that rounding errors in the above math don't cause any problems.
+ this->setLastPt(x, y);
+ return *this;
+}
+
+SkPath& SkPath::rArcTo(SkScalar rx, SkScalar ry, SkScalar xAxisRotate, SkPath::ArcSize largeArc,
+ SkPathDirection sweep, SkScalar dx, SkScalar dy) {
+ SkPoint currentPoint;
+ this->getLastPt(&currentPoint);
+ return this->arcTo(rx, ry, xAxisRotate, largeArc, sweep,
+ currentPoint.fX + dx, currentPoint.fY + dy);
+}
+
+SkPath& SkPath::addArc(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle) {
+ if (oval.isEmpty() || 0 == sweepAngle) {
+ return *this;
+ }
+
+ const SkScalar kFullCircleAngle = SkIntToScalar(360);
+
+ if (sweepAngle >= kFullCircleAngle || sweepAngle <= -kFullCircleAngle) {
+ // We can treat the arc as an oval if it begins at one of our legal starting positions.
+ // See SkPath::addOval() docs.
+ SkScalar startOver90 = startAngle / 90.f;
+ SkScalar startOver90I = SkScalarRoundToScalar(startOver90);
+ SkScalar error = startOver90 - startOver90I;
+ if (SkScalarNearlyEqual(error, 0)) {
+ // Index 1 is at startAngle == 0.
+ SkScalar startIndex = std::fmod(startOver90I + 1.f, 4.f);
+ startIndex = startIndex < 0 ? startIndex + 4.f : startIndex;
+ return this->addOval(oval, sweepAngle > 0 ? SkPathDirection::kCW : SkPathDirection::kCCW,
+ (unsigned) startIndex);
+ }
+ }
+ return this->arcTo(oval, startAngle, sweepAngle, true);
+}
+
+/*
+ Need to handle the case when the angle is sharp, and our computed end-points
+ for the arc go behind pt1 and/or p2...
+*/
+SkPath& SkPath::arcTo(SkScalar x1, SkScalar y1, SkScalar x2, SkScalar y2, SkScalar radius) {
+ this->injectMoveToIfNeeded();
+
+ if (radius == 0) {
+ return this->lineTo(x1, y1);
+ }
+
+ // need to know our prev pt so we can construct tangent vectors
+ SkPoint start;
+ this->getLastPt(&start);
+
+ // need double precision for these calcs.
+ skvx::double2 befored = normalize(skvx::double2{x1 - start.fX, y1 - start.fY});
+ skvx::double2 afterd = normalize(skvx::double2{x2 - x1, y2 - y1});
+ double cosh = dot(befored, afterd);
+ double sinh = cross(befored, afterd);
+
+ // If the previous point equals the first point, befored will be denormalized.
+ // If the two points equal, afterd will be denormalized.
+ // If the second point equals the first point, sinh will be zero.
+ // In all these cases, we cannot construct an arc, so we construct a line to the first point.
+ if (!isfinite(befored) || !isfinite(afterd) || SkScalarNearlyZero(SkDoubleToScalar(sinh))) {
+ return this->lineTo(x1, y1);
+ }
+
+ // safe to convert back to floats now
+ SkScalar dist = SkScalarAbs(SkDoubleToScalar(radius * (1 - cosh) / sinh));
+ SkScalar xx = x1 - dist * befored[0];
+ SkScalar yy = y1 - dist * befored[1];
+
+ SkVector after = SkVector::Make(afterd[0], afterd[1]);
+ after.setLength(dist);
+ this->lineTo(xx, yy);
+ SkScalar weight = SkScalarSqrt(SkDoubleToScalar(SK_ScalarHalf + cosh * 0.5));
+ return this->conicTo(x1, y1, x1 + after.fX, y1 + after.fY, weight);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPath& SkPath::addPath(const SkPath& path, SkScalar dx, SkScalar dy, AddPathMode mode) {
+ SkMatrix matrix;
+
+ matrix.setTranslate(dx, dy);
+ return this->addPath(path, matrix, mode);
+}
+
+SkPath& SkPath::addPath(const SkPath& srcPath, const SkMatrix& matrix, AddPathMode mode) {
+ if (srcPath.isEmpty()) {
+ return *this;
+ }
+
+ // Detect if we're trying to add ourself
+ const SkPath* src = &srcPath;
+ SkTLazy<SkPath> tmp;
+ if (this == src) {
+ src = tmp.set(srcPath);
+ }
+
+ if (kAppend_AddPathMode == mode && !matrix.hasPerspective()) {
+ fLastMoveToIndex = this->countPoints() + src->fLastMoveToIndex;
+
+ SkPathRef::Editor ed(&fPathRef);
+ auto [newPts, newWeights] = ed.growForVerbsInPath(*src->fPathRef);
+ matrix.mapPoints(newPts, src->fPathRef->points(), src->countPoints());
+ if (int numWeights = src->fPathRef->countWeights()) {
+ memcpy(newWeights, src->fPathRef->conicWeights(), numWeights * sizeof(newWeights[0]));
+ }
+ // fiddle with fLastMoveToIndex, as we do in SkPath::close()
+ if ((SkPathVerb)fPathRef->verbsEnd()[-1] == SkPathVerb::kClose) {
+ fLastMoveToIndex ^= ~fLastMoveToIndex >> (8 * sizeof(fLastMoveToIndex) - 1);
+ }
+ return this->dirtyAfterEdit();
+ }
+
+ SkMatrixPriv::MapPtsProc mapPtsProc = SkMatrixPriv::GetMapPtsProc(matrix);
+ bool firstVerb = true;
+ for (auto [verb, pts, w] : SkPathPriv::Iterate(*src)) {
+ SkPoint mappedPts[3];
+ switch (verb) {
+ case SkPathVerb::kMove:
+ mapPtsProc(matrix, mappedPts, &pts[0], 1);
+ if (firstVerb && mode == kExtend_AddPathMode && !isEmpty()) {
+ injectMoveToIfNeeded(); // In case last contour is closed
+ SkPoint lastPt;
+ // don't add lineTo if it is degenerate
+ if (fLastMoveToIndex < 0 || !this->getLastPt(&lastPt) ||
+ lastPt != mappedPts[0]) {
+ this->lineTo(mappedPts[0]);
+ }
+ } else {
+ this->moveTo(mappedPts[0]);
+ }
+ break;
+ case SkPathVerb::kLine:
+ mapPtsProc(matrix, mappedPts, &pts[1], 1);
+ this->lineTo(mappedPts[0]);
+ break;
+ case SkPathVerb::kQuad:
+ mapPtsProc(matrix, mappedPts, &pts[1], 2);
+ this->quadTo(mappedPts[0], mappedPts[1]);
+ break;
+ case SkPathVerb::kConic:
+ mapPtsProc(matrix, mappedPts, &pts[1], 2);
+ this->conicTo(mappedPts[0], mappedPts[1], *w);
+ break;
+ case SkPathVerb::kCubic:
+ mapPtsProc(matrix, mappedPts, &pts[1], 3);
+ this->cubicTo(mappedPts[0], mappedPts[1], mappedPts[2]);
+ break;
+ case SkPathVerb::kClose:
+ this->close();
+ break;
+ }
+ firstVerb = false;
+ }
+ return *this;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// ignore the last point of the 1st contour
+SkPath& SkPath::reversePathTo(const SkPath& path) {
+ if (path.fPathRef->fVerbs.empty()) {
+ return *this;
+ }
+
+ const uint8_t* verbs = path.fPathRef->verbsEnd();
+ const uint8_t* verbsBegin = path.fPathRef->verbsBegin();
+ SkASSERT(verbsBegin[0] == kMove_Verb);
+ const SkPoint* pts = path.fPathRef->pointsEnd() - 1;
+ const SkScalar* conicWeights = path.fPathRef->conicWeightsEnd();
+
+ while (verbs > verbsBegin) {
+ uint8_t v = *--verbs;
+ pts -= SkPathPriv::PtsInVerb(v);
+ switch (v) {
+ case kMove_Verb:
+ // if the path has multiple contours, stop after reversing the last
+ return *this;
+ case kLine_Verb:
+ this->lineTo(pts[0]);
+ break;
+ case kQuad_Verb:
+ this->quadTo(pts[1], pts[0]);
+ break;
+ case kConic_Verb:
+ this->conicTo(pts[1], pts[0], *--conicWeights);
+ break;
+ case kCubic_Verb:
+ this->cubicTo(pts[2], pts[1], pts[0]);
+ break;
+ case kClose_Verb:
+ break;
+ default:
+ SkDEBUGFAIL("bad verb");
+ break;
+ }
+ }
+ return *this;
+}
+
+SkPath& SkPath::reverseAddPath(const SkPath& srcPath) {
+ // Detect if we're trying to add ourself
+ const SkPath* src = &srcPath;
+ SkTLazy<SkPath> tmp;
+ if (this == src) {
+ src = tmp.set(srcPath);
+ }
+
+ const uint8_t* verbsBegin = src->fPathRef->verbsBegin();
+ const uint8_t* verbs = src->fPathRef->verbsEnd();
+ const SkPoint* pts = src->fPathRef->pointsEnd();
+ const SkScalar* conicWeights = src->fPathRef->conicWeightsEnd();
+
+ bool needMove = true;
+ bool needClose = false;
+ while (verbs > verbsBegin) {
+ uint8_t v = *--verbs;
+ int n = SkPathPriv::PtsInVerb(v);
+
+ if (needMove) {
+ --pts;
+ this->moveTo(pts->fX, pts->fY);
+ needMove = false;
+ }
+ pts -= n;
+ switch (v) {
+ case kMove_Verb:
+ if (needClose) {
+ this->close();
+ needClose = false;
+ }
+ needMove = true;
+ pts += 1; // so we see the point in "if (needMove)" above
+ break;
+ case kLine_Verb:
+ this->lineTo(pts[0]);
+ break;
+ case kQuad_Verb:
+ this->quadTo(pts[1], pts[0]);
+ break;
+ case kConic_Verb:
+ this->conicTo(pts[1], pts[0], *--conicWeights);
+ break;
+ case kCubic_Verb:
+ this->cubicTo(pts[2], pts[1], pts[0]);
+ break;
+ case kClose_Verb:
+ needClose = true;
+ break;
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ }
+ }
+ return *this;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkPath::offset(SkScalar dx, SkScalar dy, SkPath* dst) const {
+ SkMatrix matrix;
+
+ matrix.setTranslate(dx, dy);
+ this->transform(matrix, dst);
+}
+
+static void subdivide_cubic_to(SkPath* path, const SkPoint pts[4],
+ int level = 2) {
+ if (--level >= 0) {
+ SkPoint tmp[7];
+
+ SkChopCubicAtHalf(pts, tmp);
+ subdivide_cubic_to(path, &tmp[0], level);
+ subdivide_cubic_to(path, &tmp[3], level);
+ } else {
+ path->cubicTo(pts[1], pts[2], pts[3]);
+ }
+}
+
+void SkPath::transform(const SkMatrix& matrix, SkPath* dst, SkApplyPerspectiveClip pc) const {
+ if (matrix.isIdentity()) {
+ if (dst != nullptr && dst != this) {
+ *dst = *this;
+ }
+ return;
+ }
+
+ SkDEBUGCODE(this->validate();)
+ if (dst == nullptr) {
+ dst = (SkPath*)this;
+ }
+
+ if (matrix.hasPerspective()) {
+ SkPath tmp;
+ tmp.fFillType = fFillType;
+
+ SkPath clipped;
+ const SkPath* src = this;
+ if (pc == SkApplyPerspectiveClip::kYes &&
+ SkPathPriv::PerspectiveClip(*this, matrix, &clipped))
+ {
+ src = &clipped;
+ }
+
+ SkPath::Iter iter(*src, false);
+ SkPoint pts[4];
+ SkPath::Verb verb;
+
+ while ((verb = iter.next(pts)) != kDone_Verb) {
+ switch (verb) {
+ case kMove_Verb:
+ tmp.moveTo(pts[0]);
+ break;
+ case kLine_Verb:
+ tmp.lineTo(pts[1]);
+ break;
+ case kQuad_Verb:
+ // promote the quad to a conic
+ tmp.conicTo(pts[1], pts[2],
+ SkConic::TransformW(pts, SK_Scalar1, matrix));
+ break;
+ case kConic_Verb:
+ tmp.conicTo(pts[1], pts[2],
+ SkConic::TransformW(pts, iter.conicWeight(), matrix));
+ break;
+ case kCubic_Verb:
+ subdivide_cubic_to(&tmp, pts);
+ break;
+ case kClose_Verb:
+ tmp.close();
+ break;
+ default:
+ SkDEBUGFAIL("unknown verb");
+ break;
+ }
+ }
+
+ dst->swap(tmp);
+ SkPathRef::Editor ed(&dst->fPathRef);
+ matrix.mapPoints(ed.writablePoints(), ed.pathRef()->countPoints());
+ dst->setFirstDirection(SkPathFirstDirection::kUnknown);
+ } else {
+ SkPathConvexity convexity = this->getConvexityOrUnknown();
+
+ SkPathRef::CreateTransformedCopy(&dst->fPathRef, *fPathRef, matrix);
+
+ if (this != dst) {
+ dst->fLastMoveToIndex = fLastMoveToIndex;
+ dst->fFillType = fFillType;
+ dst->fIsVolatile = fIsVolatile;
+ }
+
+ // Due to finite/fragile float numerics, we can't assume that a convex path remains
+ // convex after a transformation, so mark it as unknown here.
+ // However, some transformations are thought to be safe:
+ // axis-aligned values under scale/translate.
+ //
+ if (convexity == SkPathConvexity::kConvex &&
+ (!matrix.isScaleTranslate() || !SkPathPriv::IsAxisAligned(*this))) {
+ // Not safe to still assume we're convex...
+ convexity = SkPathConvexity::kUnknown;
+ }
+ dst->setConvexity(convexity);
+
+ if (this->getFirstDirection() == SkPathFirstDirection::kUnknown) {
+ dst->setFirstDirection(SkPathFirstDirection::kUnknown);
+ } else {
+ SkScalar det2x2 =
+ matrix.get(SkMatrix::kMScaleX) * matrix.get(SkMatrix::kMScaleY) -
+ matrix.get(SkMatrix::kMSkewX) * matrix.get(SkMatrix::kMSkewY);
+ if (det2x2 < 0) {
+ dst->setFirstDirection(
+ SkPathPriv::OppositeFirstDirection(
+ (SkPathFirstDirection)this->getFirstDirection()));
+ } else if (det2x2 > 0) {
+ dst->setFirstDirection(this->getFirstDirection());
+ } else {
+ dst->setFirstDirection(SkPathFirstDirection::kUnknown);
+ }
+ }
+
+ SkDEBUGCODE(dst->validate();)
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+SkPath::Iter::Iter() {
+#ifdef SK_DEBUG
+ fPts = nullptr;
+ fConicWeights = nullptr;
+ fMoveTo.fX = fMoveTo.fY = fLastPt.fX = fLastPt.fY = 0;
+ fForceClose = fCloseLine = false;
+#endif
+ // need to init enough to make next() harmlessly return kDone_Verb
+ fVerbs = nullptr;
+ fVerbStop = nullptr;
+ fNeedClose = false;
+}
+
+SkPath::Iter::Iter(const SkPath& path, bool forceClose) {
+ this->setPath(path, forceClose);
+}
+
+void SkPath::Iter::setPath(const SkPath& path, bool forceClose) {
+ fPts = path.fPathRef->points();
+ fVerbs = path.fPathRef->verbsBegin();
+ fVerbStop = path.fPathRef->verbsEnd();
+ fConicWeights = path.fPathRef->conicWeights();
+ if (fConicWeights) {
+ fConicWeights -= 1; // begin one behind
+ }
+ fLastPt.fX = fLastPt.fY = 0;
+ fMoveTo.fX = fMoveTo.fY = 0;
+ fForceClose = SkToU8(forceClose);
+ fNeedClose = false;
+}
+
+bool SkPath::Iter::isClosedContour() const {
+ if (fVerbs == nullptr || fVerbs == fVerbStop) {
+ return false;
+ }
+ if (fForceClose) {
+ return true;
+ }
+
+ const uint8_t* verbs = fVerbs;
+ const uint8_t* stop = fVerbStop;
+
+ if (kMove_Verb == *verbs) {
+ verbs += 1; // skip the initial moveto
+ }
+
+ while (verbs < stop) {
+ // verbs points one beyond the current verb, decrement first.
+ unsigned v = *verbs++;
+ if (kMove_Verb == v) {
+ break;
+ }
+ if (kClose_Verb == v) {
+ return true;
+ }
+ }
+ return false;
+}
+
+SkPath::Verb SkPath::Iter::autoClose(SkPoint pts[2]) {
+ SkASSERT(pts);
+ if (fLastPt != fMoveTo) {
+ // A special case: if both points are NaN, SkPoint::operation== returns
+ // false, but the iterator expects that they are treated as the same.
+ // (consider SkPoint is a 2-dimension float point).
+ if (SkScalarIsNaN(fLastPt.fX) || SkScalarIsNaN(fLastPt.fY) ||
+ SkScalarIsNaN(fMoveTo.fX) || SkScalarIsNaN(fMoveTo.fY)) {
+ return kClose_Verb;
+ }
+
+ pts[0] = fLastPt;
+ pts[1] = fMoveTo;
+ fLastPt = fMoveTo;
+ fCloseLine = true;
+ return kLine_Verb;
+ } else {
+ pts[0] = fMoveTo;
+ return kClose_Verb;
+ }
+}
+
+SkPath::Verb SkPath::Iter::next(SkPoint ptsParam[4]) {
+ SkASSERT(ptsParam);
+
+ if (fVerbs == fVerbStop) {
+ // Close the curve if requested and if there is some curve to close
+ if (fNeedClose) {
+ if (kLine_Verb == this->autoClose(ptsParam)) {
+ return kLine_Verb;
+ }
+ fNeedClose = false;
+ return kClose_Verb;
+ }
+ return kDone_Verb;
+ }
+
+ unsigned verb = *fVerbs++;
+ const SkPoint* SK_RESTRICT srcPts = fPts;
+ SkPoint* SK_RESTRICT pts = ptsParam;
+
+ switch (verb) {
+ case kMove_Verb:
+ if (fNeedClose) {
+ fVerbs--; // move back one verb
+ verb = this->autoClose(pts);
+ if (verb == kClose_Verb) {
+ fNeedClose = false;
+ }
+ return (Verb)verb;
+ }
+ if (fVerbs == fVerbStop) { // might be a trailing moveto
+ return kDone_Verb;
+ }
+ fMoveTo = *srcPts;
+ pts[0] = *srcPts;
+ srcPts += 1;
+ fLastPt = fMoveTo;
+ fNeedClose = fForceClose;
+ break;
+ case kLine_Verb:
+ pts[0] = fLastPt;
+ pts[1] = srcPts[0];
+ fLastPt = srcPts[0];
+ fCloseLine = false;
+ srcPts += 1;
+ break;
+ case kConic_Verb:
+ fConicWeights += 1;
+ [[fallthrough]];
+ case kQuad_Verb:
+ pts[0] = fLastPt;
+ memcpy(&pts[1], srcPts, 2 * sizeof(SkPoint));
+ fLastPt = srcPts[1];
+ srcPts += 2;
+ break;
+ case kCubic_Verb:
+ pts[0] = fLastPt;
+ memcpy(&pts[1], srcPts, 3 * sizeof(SkPoint));
+ fLastPt = srcPts[2];
+ srcPts += 3;
+ break;
+ case kClose_Verb:
+ verb = this->autoClose(pts);
+ if (verb == kLine_Verb) {
+ fVerbs--; // move back one verb
+ } else {
+ fNeedClose = false;
+ }
+ fLastPt = fMoveTo;
+ break;
+ }
+ fPts = srcPts;
+ return (Verb)verb;
+}
+
+void SkPath::RawIter::setPath(const SkPath& path) {
+ SkPathPriv::Iterate iterate(path);
+ fIter = iterate.begin();
+ fEnd = iterate.end();
+}
+
+SkPath::Verb SkPath::RawIter::next(SkPoint pts[4]) {
+ if (!(fIter != fEnd)) {
+ return kDone_Verb;
+ }
+ auto [verb, iterPts, weights] = *fIter;
+ int numPts;
+ switch (verb) {
+ case SkPathVerb::kMove: numPts = 1; break;
+ case SkPathVerb::kLine: numPts = 2; break;
+ case SkPathVerb::kQuad: numPts = 3; break;
+ case SkPathVerb::kConic:
+ numPts = 3;
+ fConicWeight = *weights;
+ break;
+ case SkPathVerb::kCubic: numPts = 4; break;
+ case SkPathVerb::kClose: numPts = 0; break;
+ }
+ memcpy(pts, iterPts, sizeof(SkPoint) * numPts);
+ ++fIter;
+ return (Verb) verb;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void append_params(SkString* str, const char label[], const SkPoint pts[],
+ int count, SkScalarAsStringType strType, SkScalar conicWeight = -12345) {
+ str->append(label);
+ str->append("(");
+
+ const SkScalar* values = &pts[0].fX;
+ count *= 2;
+
+ for (int i = 0; i < count; ++i) {
+ SkAppendScalar(str, values[i], strType);
+ if (i < count - 1) {
+ str->append(", ");
+ }
+ }
+ if (conicWeight != -12345) {
+ str->append(", ");
+ SkAppendScalar(str, conicWeight, strType);
+ }
+ str->append(");");
+ if (kHex_SkScalarAsStringType == strType) {
+ str->append(" // ");
+ for (int i = 0; i < count; ++i) {
+ SkAppendScalarDec(str, values[i]);
+ if (i < count - 1) {
+ str->append(", ");
+ }
+ }
+ if (conicWeight >= 0) {
+ str->append(", ");
+ SkAppendScalarDec(str, conicWeight);
+ }
+ }
+ str->append("\n");
+}
+
+void SkPath::dump(SkWStream* wStream, bool dumpAsHex) const {
+ SkScalarAsStringType asType = dumpAsHex ? kHex_SkScalarAsStringType : kDec_SkScalarAsStringType;
+ Iter iter(*this, false);
+ SkPoint pts[4];
+ Verb verb;
+
+ SkString builder;
+ char const * const gFillTypeStrs[] = {
+ "Winding",
+ "EvenOdd",
+ "InverseWinding",
+ "InverseEvenOdd",
+ };
+ builder.printf("path.setFillType(SkPathFillType::k%s);\n",
+ gFillTypeStrs[(int) this->getFillType()]);
+ while ((verb = iter.next(pts)) != kDone_Verb) {
+ switch (verb) {
+ case kMove_Verb:
+ append_params(&builder, "path.moveTo", &pts[0], 1, asType);
+ break;
+ case kLine_Verb:
+ append_params(&builder, "path.lineTo", &pts[1], 1, asType);
+ break;
+ case kQuad_Verb:
+ append_params(&builder, "path.quadTo", &pts[1], 2, asType);
+ break;
+ case kConic_Verb:
+ append_params(&builder, "path.conicTo", &pts[1], 2, asType, iter.conicWeight());
+ break;
+ case kCubic_Verb:
+ append_params(&builder, "path.cubicTo", &pts[1], 3, asType);
+ break;
+ case kClose_Verb:
+ builder.append("path.close();\n");
+ break;
+ default:
+ SkDebugf(" path: UNKNOWN VERB %d, aborting dump...\n", verb);
+ verb = kDone_Verb; // stop the loop
+ break;
+ }
+ if (!wStream && builder.size()) {
+ SkDebugf("%s", builder.c_str());
+ builder.reset();
+ }
+ }
+ if (wStream) {
+ wStream->writeText(builder.c_str());
+ }
+}
+
+void SkPath::dumpArrays(SkWStream* wStream, bool dumpAsHex) const {
+ SkString builder;
+
+ auto bool_str = [](bool v) { return v ? "true" : "false"; };
+
+ builder.appendf("// fBoundsIsDirty = %s\n", bool_str(fPathRef->fBoundsIsDirty));
+ builder.appendf("// fGenerationID = %d\n", fPathRef->fGenerationID);
+ builder.appendf("// fSegmentMask = %d\n", fPathRef->fSegmentMask);
+ builder.appendf("// fIsOval = %s\n", bool_str(fPathRef->fIsOval));
+ builder.appendf("// fIsRRect = %s\n", bool_str(fPathRef->fIsRRect));
+
+ auto append_scalar = [&](SkScalar v) {
+ if (dumpAsHex) {
+ builder.appendf("SkBits2Float(0x%08X) /* %g */", SkFloat2Bits(v), v);
+ } else {
+ builder.appendf("%g", v);
+ }
+ };
+
+ builder.append("const SkPoint path_points[] = {\n");
+ for (int i = 0; i < this->countPoints(); ++i) {
+ SkPoint p = this->getPoint(i);
+ builder.append(" { ");
+ append_scalar(p.fX);
+ builder.append(", ");
+ append_scalar(p.fY);
+ builder.append(" },\n");
+ }
+ builder.append("};\n");
+
+ const char* gVerbStrs[] = {
+ "Move", "Line", "Quad", "Conic", "Cubic", "Close"
+ };
+ builder.append("const uint8_t path_verbs[] = {\n ");
+ for (auto v = fPathRef->verbsBegin(); v != fPathRef->verbsEnd(); ++v) {
+ builder.appendf("(uint8_t)SkPathVerb::k%s, ", gVerbStrs[*v]);
+ }
+ builder.append("\n};\n");
+
+ const int nConics = fPathRef->conicWeightsEnd() - fPathRef->conicWeights();
+ if (nConics) {
+ builder.append("const SkScalar path_conics[] = {\n ");
+ for (auto c = fPathRef->conicWeights(); c != fPathRef->conicWeightsEnd(); ++c) {
+ append_scalar(*c);
+ builder.append(", ");
+ }
+ builder.append("\n};\n");
+ }
+
+ char const * const gFillTypeStrs[] = {
+ "Winding",
+ "EvenOdd",
+ "InverseWinding",
+ "InverseEvenOdd",
+ };
+
+ builder.appendf("SkPath path = SkPath::Make(path_points, %d, path_verbs, %d, %s, %d,\n",
+ this->countPoints(), this->countVerbs(),
+ nConics ? "path_conics" : "nullptr", nConics);
+ builder.appendf(" SkPathFillType::k%s, %s);\n",
+ gFillTypeStrs[(int)this->getFillType()],
+ bool_str(fIsVolatile));
+
+ if (wStream) {
+ wStream->writeText(builder.c_str());
+ } else {
+ SkDebugf("%s\n", builder.c_str());
+ }
+}
+
+bool SkPath::isValidImpl() const {
+ if ((fFillType & ~3) != 0) {
+ return false;
+ }
+
+#ifdef SK_DEBUG_PATH
+ if (!fBoundsIsDirty) {
+ SkRect bounds;
+
+ bool isFinite = compute_pt_bounds(&bounds, *fPathRef.get());
+ if (SkToBool(fIsFinite) != isFinite) {
+ return false;
+ }
+
+ if (fPathRef->countPoints() <= 1) {
+ // if we're empty, fBounds may be empty but translated, so we can't
+ // necessarily compare to bounds directly
+ // try path.addOval(2, 2, 2, 2) which is empty, but the bounds will
+ // be [2, 2, 2, 2]
+ if (!bounds.isEmpty() || !fBounds.isEmpty()) {
+ return false;
+ }
+ } else {
+ if (bounds.isEmpty()) {
+ if (!fBounds.isEmpty()) {
+ return false;
+ }
+ } else {
+ if (!fBounds.isEmpty()) {
+ if (!fBounds.contains(bounds)) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+#endif // SK_DEBUG_PATH
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int sign(SkScalar x) { return x < 0; }
+#define kValueNeverReturnedBySign 2
+
+enum DirChange {
+ kUnknown_DirChange,
+ kLeft_DirChange,
+ kRight_DirChange,
+ kStraight_DirChange,
+ kBackwards_DirChange, // if double back, allow simple lines to be convex
+ kInvalid_DirChange
+};
+
+// only valid for a single contour
+struct Convexicator {
+
+ /** The direction returned is only valid if the path is determined convex */
+ SkPathFirstDirection getFirstDirection() const { return fFirstDirection; }
+
+ void setMovePt(const SkPoint& pt) {
+ fFirstPt = fLastPt = pt;
+ fExpectedDir = kInvalid_DirChange;
+ }
+
+ bool addPt(const SkPoint& pt) {
+ if (fLastPt == pt) {
+ return true;
+ }
+ // should only be true for first non-zero vector after setMovePt was called.
+ if (fFirstPt == fLastPt && fExpectedDir == kInvalid_DirChange) {
+ fLastVec = pt - fLastPt;
+ fFirstVec = fLastVec;
+ } else if (!this->addVec(pt - fLastPt)) {
+ return false;
+ }
+ fLastPt = pt;
+ return true;
+ }
+
+ static SkPathConvexity BySign(const SkPoint points[], int count) {
+ if (count <= 3) {
+ // point, line, or triangle are always convex
+ return SkPathConvexity::kConvex;
+ }
+
+ const SkPoint* last = points + count;
+ SkPoint currPt = *points++;
+ SkPoint firstPt = currPt;
+ int dxes = 0;
+ int dyes = 0;
+ int lastSx = kValueNeverReturnedBySign;
+ int lastSy = kValueNeverReturnedBySign;
+ for (int outerLoop = 0; outerLoop < 2; ++outerLoop ) {
+ while (points != last) {
+ SkVector vec = *points - currPt;
+ if (!vec.isZero()) {
+ // give up if vector construction failed
+ if (!vec.isFinite()) {
+ return SkPathConvexity::kUnknown;
+ }
+ int sx = sign(vec.fX);
+ int sy = sign(vec.fY);
+ dxes += (sx != lastSx);
+ dyes += (sy != lastSy);
+ if (dxes > 3 || dyes > 3) {
+ return SkPathConvexity::kConcave;
+ }
+ lastSx = sx;
+ lastSy = sy;
+ }
+ currPt = *points++;
+ if (outerLoop) {
+ break;
+ }
+ }
+ points = &firstPt;
+ }
+ return SkPathConvexity::kConvex; // that is, it may be convex, don't know yet
+ }
+
+ bool close() {
+ // If this was an explicit close, there was already a lineTo to fFirstPoint, so this
+ // addPt() is a no-op. Otherwise, the addPt implicitly closes the contour. In either case,
+ // we have to check the direction change along the first vector in case it is concave.
+ return this->addPt(fFirstPt) && this->addVec(fFirstVec);
+ }
+
+ bool isFinite() const {
+ return fIsFinite;
+ }
+
+ int reversals() const {
+ return fReversals;
+ }
+
+private:
+ DirChange directionChange(const SkVector& curVec) {
+ SkScalar cross = SkPoint::CrossProduct(fLastVec, curVec);
+ if (!SkScalarIsFinite(cross)) {
+ return kUnknown_DirChange;
+ }
+ if (cross == 0) {
+ return fLastVec.dot(curVec) < 0 ? kBackwards_DirChange : kStraight_DirChange;
+ }
+ return 1 == SkScalarSignAsInt(cross) ? kRight_DirChange : kLeft_DirChange;
+ }
+
+ bool addVec(const SkVector& curVec) {
+ DirChange dir = this->directionChange(curVec);
+ switch (dir) {
+ case kLeft_DirChange: // fall through
+ case kRight_DirChange:
+ if (kInvalid_DirChange == fExpectedDir) {
+ fExpectedDir = dir;
+ fFirstDirection = (kRight_DirChange == dir) ? SkPathFirstDirection::kCW
+ : SkPathFirstDirection::kCCW;
+ } else if (dir != fExpectedDir) {
+ fFirstDirection = SkPathFirstDirection::kUnknown;
+ return false;
+ }
+ fLastVec = curVec;
+ break;
+ case kStraight_DirChange:
+ break;
+ case kBackwards_DirChange:
+ // allow path to reverse direction twice
+ // Given path.moveTo(0, 0); path.lineTo(1, 1);
+ // - 1st reversal: direction change formed by line (0,0 1,1), line (1,1 0,0)
+ // - 2nd reversal: direction change formed by line (1,1 0,0), line (0,0 1,1)
+ fLastVec = curVec;
+ return ++fReversals < 3;
+ case kUnknown_DirChange:
+ return (fIsFinite = false);
+ case kInvalid_DirChange:
+ SK_ABORT("Use of invalid direction change flag");
+ break;
+ }
+ return true;
+ }
+
+ SkPoint fFirstPt {0, 0}; // The first point of the contour, e.g. moveTo(x,y)
+ SkVector fFirstVec {0, 0}; // The direction leaving fFirstPt to the next vertex
+
+ SkPoint fLastPt {0, 0}; // The last point passed to addPt()
+ SkVector fLastVec {0, 0}; // The direction that brought the path to fLastPt
+
+ DirChange fExpectedDir { kInvalid_DirChange };
+ SkPathFirstDirection fFirstDirection { SkPathFirstDirection::kUnknown };
+ int fReversals { 0 };
+ bool fIsFinite { true };
+};
+
+SkPathConvexity SkPath::computeConvexity() const {
+ auto setComputedConvexity = [=](SkPathConvexity convexity){
+ SkASSERT(SkPathConvexity::kUnknown != convexity);
+ this->setConvexity(convexity);
+ return convexity;
+ };
+
+ auto setFail = [=](){
+ return setComputedConvexity(SkPathConvexity::kConcave);
+ };
+
+ if (!this->isFinite()) {
+ return setFail();
+ }
+
+ // pointCount potentially includes a block of leading moveTos and trailing moveTos. Convexity
+ // only cares about the last of the initial moveTos and the verbs before the final moveTos.
+ int pointCount = this->countPoints();
+ int skipCount = SkPathPriv::LeadingMoveToCount(*this) - 1;
+
+ if (fLastMoveToIndex >= 0) {
+ if (fLastMoveToIndex == pointCount - 1) {
+ // Find the last real verb that affects convexity
+ auto verbs = fPathRef->verbsEnd() - 1;
+ while(verbs > fPathRef->verbsBegin() && *verbs == Verb::kMove_Verb) {
+ verbs--;
+ pointCount--;
+ }
+ } else if (fLastMoveToIndex != skipCount) {
+ // There's an additional moveTo between two blocks of other verbs, so the path must have
+ // more than one contour and cannot be convex.
+ return setComputedConvexity(SkPathConvexity::kConcave);
+ } // else no trailing or intermediate moveTos to worry about
+ }
+ const SkPoint* points = fPathRef->points();
+ if (skipCount > 0) {
+ points += skipCount;
+ pointCount -= skipCount;
+ }
+
+ // Check to see if path changes direction more than three times as quick concave test
+ SkPathConvexity convexity = Convexicator::BySign(points, pointCount);
+ if (SkPathConvexity::kConvex != convexity) {
+ return setComputedConvexity(SkPathConvexity::kConcave);
+ }
+
+ int contourCount = 0;
+ bool needsClose = false;
+ Convexicator state;
+
+ for (auto [verb, pts, wt] : SkPathPriv::Iterate(*this)) {
+ // Looking for the last moveTo before non-move verbs start
+ if (contourCount == 0) {
+ if (verb == SkPathVerb::kMove) {
+ state.setMovePt(pts[0]);
+ } else {
+ // Starting the actual contour, fall through to c=1 to add the points
+ contourCount++;
+ needsClose = true;
+ }
+ }
+ // Accumulating points into the Convexicator until we hit a close or another move
+ if (contourCount == 1) {
+ if (verb == SkPathVerb::kClose || verb == SkPathVerb::kMove) {
+ if (!state.close()) {
+ return setFail();
+ }
+ needsClose = false;
+ contourCount++;
+ } else {
+ // lines add 1 point, cubics add 3, conics and quads add 2
+ int count = SkPathPriv::PtsInVerb((unsigned) verb);
+ SkASSERT(count > 0);
+ for (int i = 1; i <= count; ++i) {
+ if (!state.addPt(pts[i])) {
+ return setFail();
+ }
+ }
+ }
+ } else {
+ // The first contour has closed and anything other than spurious trailing moves means
+ // there's multiple contours and the path can't be convex
+ if (verb != SkPathVerb::kMove) {
+ return setFail();
+ }
+ }
+ }
+
+ // If the path isn't explicitly closed do so implicitly
+ if (needsClose && !state.close()) {
+ return setFail();
+ }
+
+ if (this->getFirstDirection() == SkPathFirstDirection::kUnknown) {
+ if (state.getFirstDirection() == SkPathFirstDirection::kUnknown
+ && !this->getBounds().isEmpty()) {
+ return setComputedConvexity(state.reversals() < 3 ?
+ SkPathConvexity::kConvex : SkPathConvexity::kConcave);
+ }
+ this->setFirstDirection(state.getFirstDirection());
+ }
+ return setComputedConvexity(SkPathConvexity::kConvex);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class ContourIter {
+public:
+ ContourIter(const SkPathRef& pathRef);
+
+ bool done() const { return fDone; }
+ // if !done() then these may be called
+ int count() const { return fCurrPtCount; }
+ const SkPoint* pts() const { return fCurrPt; }
+ void next();
+
+private:
+ int fCurrPtCount;
+ const SkPoint* fCurrPt;
+ const uint8_t* fCurrVerb;
+ const uint8_t* fStopVerbs;
+ const SkScalar* fCurrConicWeight;
+ bool fDone;
+ SkDEBUGCODE(int fContourCounter;)
+};
+
+ContourIter::ContourIter(const SkPathRef& pathRef) {
+ fStopVerbs = pathRef.verbsEnd();
+ fDone = false;
+ fCurrPt = pathRef.points();
+ fCurrVerb = pathRef.verbsBegin();
+ fCurrConicWeight = pathRef.conicWeights();
+ fCurrPtCount = 0;
+ SkDEBUGCODE(fContourCounter = 0;)
+ this->next();
+}
+
+void ContourIter::next() {
+ if (fCurrVerb >= fStopVerbs) {
+ fDone = true;
+ }
+ if (fDone) {
+ return;
+ }
+
+ // skip pts of prev contour
+ fCurrPt += fCurrPtCount;
+
+ SkASSERT(SkPath::kMove_Verb == fCurrVerb[0]);
+ int ptCount = 1; // moveTo
+ const uint8_t* verbs = fCurrVerb;
+
+ for (verbs++; verbs < fStopVerbs; verbs++) {
+ switch (*verbs) {
+ case SkPath::kMove_Verb:
+ goto CONTOUR_END;
+ case SkPath::kLine_Verb:
+ ptCount += 1;
+ break;
+ case SkPath::kConic_Verb:
+ fCurrConicWeight += 1;
+ [[fallthrough]];
+ case SkPath::kQuad_Verb:
+ ptCount += 2;
+ break;
+ case SkPath::kCubic_Verb:
+ ptCount += 3;
+ break;
+ case SkPath::kClose_Verb:
+ break;
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ break;
+ }
+ }
+CONTOUR_END:
+ fCurrPtCount = ptCount;
+ fCurrVerb = verbs;
+ SkDEBUGCODE(++fContourCounter;)
+}
+
+// returns cross product of (p1 - p0) and (p2 - p0)
+static SkScalar cross_prod(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2) {
+ SkScalar cross = SkPoint::CrossProduct(p1 - p0, p2 - p0);
+ // We may get 0 when the above subtracts underflow. We expect this to be
+ // very rare and lazily promote to double.
+ if (0 == cross) {
+ double p0x = SkScalarToDouble(p0.fX);
+ double p0y = SkScalarToDouble(p0.fY);
+
+ double p1x = SkScalarToDouble(p1.fX);
+ double p1y = SkScalarToDouble(p1.fY);
+
+ double p2x = SkScalarToDouble(p2.fX);
+ double p2y = SkScalarToDouble(p2.fY);
+
+ cross = SkDoubleToScalar((p1x - p0x) * (p2y - p0y) -
+ (p1y - p0y) * (p2x - p0x));
+
+ }
+ return cross;
+}
+
+// Returns the first pt with the maximum Y coordinate
+static int find_max_y(const SkPoint pts[], int count) {
+ SkASSERT(count > 0);
+ SkScalar max = pts[0].fY;
+ int firstIndex = 0;
+ for (int i = 1; i < count; ++i) {
+ SkScalar y = pts[i].fY;
+ if (y > max) {
+ max = y;
+ firstIndex = i;
+ }
+ }
+ return firstIndex;
+}
+
+static int find_diff_pt(const SkPoint pts[], int index, int n, int inc) {
+ int i = index;
+ for (;;) {
+ i = (i + inc) % n;
+ if (i == index) { // we wrapped around, so abort
+ break;
+ }
+ if (pts[index] != pts[i]) { // found a different point, success!
+ break;
+ }
+ }
+ return i;
+}
+
+/**
+ * Starting at index, and moving forward (incrementing), find the xmin and
+ * xmax of the contiguous points that have the same Y.
+ */
+static int find_min_max_x_at_y(const SkPoint pts[], int index, int n,
+ int* maxIndexPtr) {
+ const SkScalar y = pts[index].fY;
+ SkScalar min = pts[index].fX;
+ SkScalar max = min;
+ int minIndex = index;
+ int maxIndex = index;
+ for (int i = index + 1; i < n; ++i) {
+ if (pts[i].fY != y) {
+ break;
+ }
+ SkScalar x = pts[i].fX;
+ if (x < min) {
+ min = x;
+ minIndex = i;
+ } else if (x > max) {
+ max = x;
+ maxIndex = i;
+ }
+ }
+ *maxIndexPtr = maxIndex;
+ return minIndex;
+}
+
+static SkPathFirstDirection crossToDir(SkScalar cross) {
+ return cross > 0 ? SkPathFirstDirection::kCW : SkPathFirstDirection::kCCW;
+}
+
+/*
+ * We loop through all contours, and keep the computed cross-product of the
+ * contour that contained the global y-max. If we just look at the first
+ * contour, we may find one that is wound the opposite way (correctly) since
+ * it is the interior of a hole (e.g. 'o'). Thus we must find the contour
+ * that is outer most (or at least has the global y-max) before we can consider
+ * its cross product.
+ */
+SkPathFirstDirection SkPathPriv::ComputeFirstDirection(const SkPath& path) {
+ auto d = path.getFirstDirection();
+ if (d != SkPathFirstDirection::kUnknown) {
+ return d;
+ }
+
+ // We don't want to pay the cost for computing convexity if it is unknown,
+ // so we call getConvexityOrUnknown() instead of isConvex().
+ if (path.getConvexityOrUnknown() == SkPathConvexity::kConvex) {
+ SkASSERT(d == SkPathFirstDirection::kUnknown);
+ return d;
+ }
+
+ ContourIter iter(*path.fPathRef);
+
+ // initialize with our logical y-min
+ SkScalar ymax = path.getBounds().fTop;
+ SkScalar ymaxCross = 0;
+
+ for (; !iter.done(); iter.next()) {
+ int n = iter.count();
+ if (n < 3) {
+ continue;
+ }
+
+ const SkPoint* pts = iter.pts();
+ SkScalar cross = 0;
+ int index = find_max_y(pts, n);
+ if (pts[index].fY < ymax) {
+ continue;
+ }
+
+ // If there is more than 1 distinct point at the y-max, we take the
+ // x-min and x-max of them and just subtract to compute the dir.
+ if (pts[(index + 1) % n].fY == pts[index].fY) {
+ int maxIndex;
+ int minIndex = find_min_max_x_at_y(pts, index, n, &maxIndex);
+ if (minIndex == maxIndex) {
+ goto TRY_CROSSPROD;
+ }
+ SkASSERT(pts[minIndex].fY == pts[index].fY);
+ SkASSERT(pts[maxIndex].fY == pts[index].fY);
+ SkASSERT(pts[minIndex].fX <= pts[maxIndex].fX);
+ // we just subtract the indices, and let that auto-convert to
+ // SkScalar, since we just want - or + to signal the direction.
+ cross = minIndex - maxIndex;
+ } else {
+ TRY_CROSSPROD:
+ // Find a next and prev index to use for the cross-product test,
+ // but we try to find pts that form non-zero vectors from pts[index]
+ //
+ // Its possible that we can't find two non-degenerate vectors, so
+ // we have to guard our search (e.g. all the pts could be in the
+ // same place).
+
+ // we pass n - 1 instead of -1 so we don't foul up % operator by
+ // passing it a negative LH argument.
+ int prev = find_diff_pt(pts, index, n, n - 1);
+ if (prev == index) {
+ // completely degenerate, skip to next contour
+ continue;
+ }
+ int next = find_diff_pt(pts, index, n, 1);
+ SkASSERT(next != index);
+ cross = cross_prod(pts[prev], pts[index], pts[next]);
+ // if we get a zero and the points are horizontal, then we look at the spread in
+ // x-direction. We really should continue to walk away from the degeneracy until
+ // there is a divergence.
+ if (0 == cross && pts[prev].fY == pts[index].fY && pts[next].fY == pts[index].fY) {
+ // construct the subtract so we get the correct Direction below
+ cross = pts[index].fX - pts[next].fX;
+ }
+ }
+
+ if (cross) {
+ // record our best guess so far
+ ymax = pts[index].fY;
+ ymaxCross = cross;
+ }
+ }
+ if (ymaxCross) {
+ d = crossToDir(ymaxCross);
+ path.setFirstDirection(d);
+ }
+ return d; // may still be kUnknown
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool between(SkScalar a, SkScalar b, SkScalar c) {
+ SkASSERT(((a <= b && b <= c) || (a >= b && b >= c)) == ((a - b) * (c - b) <= 0)
+ || (SkScalarNearlyZero(a) && SkScalarNearlyZero(b) && SkScalarNearlyZero(c)));
+ return (a - b) * (c - b) <= 0;
+}
+
+static SkScalar eval_cubic_pts(SkScalar c0, SkScalar c1, SkScalar c2, SkScalar c3,
+ SkScalar t) {
+ SkScalar A = c3 + 3*(c1 - c2) - c0;
+ SkScalar B = 3*(c2 - c1 - c1 + c0);
+ SkScalar C = 3*(c1 - c0);
+ SkScalar D = c0;
+ return poly_eval(A, B, C, D, t);
+}
+
+template <size_t N> static void find_minmax(const SkPoint pts[],
+ SkScalar* minPtr, SkScalar* maxPtr) {
+ SkScalar min, max;
+ min = max = pts[0].fX;
+ for (size_t i = 1; i < N; ++i) {
+ min = std::min(min, pts[i].fX);
+ max = std::max(max, pts[i].fX);
+ }
+ *minPtr = min;
+ *maxPtr = max;
+}
+
+static bool checkOnCurve(SkScalar x, SkScalar y, const SkPoint& start, const SkPoint& end) {
+ if (start.fY == end.fY) {
+ return between(start.fX, x, end.fX) && x != end.fX;
+ } else {
+ return x == start.fX && y == start.fY;
+ }
+}
+
+static int winding_mono_cubic(const SkPoint pts[], SkScalar x, SkScalar y, int* onCurveCount) {
+ SkScalar y0 = pts[0].fY;
+ SkScalar y3 = pts[3].fY;
+
+ int dir = 1;
+ if (y0 > y3) {
+ using std::swap;
+ swap(y0, y3);
+ dir = -1;
+ }
+ if (y < y0 || y > y3) {
+ return 0;
+ }
+ if (checkOnCurve(x, y, pts[0], pts[3])) {
+ *onCurveCount += 1;
+ return 0;
+ }
+ if (y == y3) {
+ return 0;
+ }
+
+ // quickreject or quickaccept
+ SkScalar min, max;
+ find_minmax<4>(pts, &min, &max);
+ if (x < min) {
+ return 0;
+ }
+ if (x > max) {
+ return dir;
+ }
+
+ // compute the actual x(t) value
+ SkScalar t;
+ if (!SkCubicClipper::ChopMonoAtY(pts, y, &t)) {
+ return 0;
+ }
+ SkScalar xt = eval_cubic_pts(pts[0].fX, pts[1].fX, pts[2].fX, pts[3].fX, t);
+ if (SkScalarNearlyEqual(xt, x)) {
+ if (x != pts[3].fX || y != pts[3].fY) { // don't test end points; they're start points
+ *onCurveCount += 1;
+ return 0;
+ }
+ }
+ return xt < x ? dir : 0;
+}
+
+static int winding_cubic(const SkPoint pts[], SkScalar x, SkScalar y, int* onCurveCount) {
+ SkPoint dst[10];
+ int n = SkChopCubicAtYExtrema(pts, dst);
+ int w = 0;
+ for (int i = 0; i <= n; ++i) {
+ w += winding_mono_cubic(&dst[i * 3], x, y, onCurveCount);
+ }
+ return w;
+}
+
+static double conic_eval_numerator(const SkScalar src[], SkScalar w, SkScalar t) {
+ SkASSERT(src);
+ SkASSERT(t >= 0 && t <= 1);
+ SkScalar src2w = src[2] * w;
+ SkScalar C = src[0];
+ SkScalar A = src[4] - 2 * src2w + C;
+ SkScalar B = 2 * (src2w - C);
+ return poly_eval(A, B, C, t);
+}
+
+
+static double conic_eval_denominator(SkScalar w, SkScalar t) {
+ SkScalar B = 2 * (w - 1);
+ SkScalar C = 1;
+ SkScalar A = -B;
+ return poly_eval(A, B, C, t);
+}
+
+static int winding_mono_conic(const SkConic& conic, SkScalar x, SkScalar y, int* onCurveCount) {
+ const SkPoint* pts = conic.fPts;
+ SkScalar y0 = pts[0].fY;
+ SkScalar y2 = pts[2].fY;
+
+ int dir = 1;
+ if (y0 > y2) {
+ using std::swap;
+ swap(y0, y2);
+ dir = -1;
+ }
+ if (y < y0 || y > y2) {
+ return 0;
+ }
+ if (checkOnCurve(x, y, pts[0], pts[2])) {
+ *onCurveCount += 1;
+ return 0;
+ }
+ if (y == y2) {
+ return 0;
+ }
+
+ SkScalar roots[2];
+ SkScalar A = pts[2].fY;
+ SkScalar B = pts[1].fY * conic.fW - y * conic.fW + y;
+ SkScalar C = pts[0].fY;
+ A += C - 2 * B; // A = a + c - 2*(b*w - yCept*w + yCept)
+ B -= C; // B = b*w - w * yCept + yCept - a
+ C -= y;
+ int n = SkFindUnitQuadRoots(A, 2 * B, C, roots);
+ SkASSERT(n <= 1);
+ SkScalar xt;
+ if (0 == n) {
+ // zero roots are returned only when y0 == y
+ // Need [0] if dir == 1
+ // and [2] if dir == -1
+ xt = pts[1 - dir].fX;
+ } else {
+ SkScalar t = roots[0];
+ xt = conic_eval_numerator(&pts[0].fX, conic.fW, t) / conic_eval_denominator(conic.fW, t);
+ }
+ if (SkScalarNearlyEqual(xt, x)) {
+ if (x != pts[2].fX || y != pts[2].fY) { // don't test end points; they're start points
+ *onCurveCount += 1;
+ return 0;
+ }
+ }
+ return xt < x ? dir : 0;
+}
+
+static bool is_mono_quad(SkScalar y0, SkScalar y1, SkScalar y2) {
+ // return SkScalarSignAsInt(y0 - y1) + SkScalarSignAsInt(y1 - y2) != 0;
+ if (y0 == y1) {
+ return true;
+ }
+ if (y0 < y1) {
+ return y1 <= y2;
+ } else {
+ return y1 >= y2;
+ }
+}
+
+static int winding_conic(const SkPoint pts[], SkScalar x, SkScalar y, SkScalar weight,
+ int* onCurveCount) {
+ SkConic conic(pts, weight);
+ SkConic chopped[2];
+ // If the data points are very large, the conic may not be monotonic but may also
+ // fail to chop. Then, the chopper does not split the original conic in two.
+ bool isMono = is_mono_quad(pts[0].fY, pts[1].fY, pts[2].fY) || !conic.chopAtYExtrema(chopped);
+ int w = winding_mono_conic(isMono ? conic : chopped[0], x, y, onCurveCount);
+ if (!isMono) {
+ w += winding_mono_conic(chopped[1], x, y, onCurveCount);
+ }
+ return w;
+}
+
+static int winding_mono_quad(const SkPoint pts[], SkScalar x, SkScalar y, int* onCurveCount) {
+ SkScalar y0 = pts[0].fY;
+ SkScalar y2 = pts[2].fY;
+
+ int dir = 1;
+ if (y0 > y2) {
+ using std::swap;
+ swap(y0, y2);
+ dir = -1;
+ }
+ if (y < y0 || y > y2) {
+ return 0;
+ }
+ if (checkOnCurve(x, y, pts[0], pts[2])) {
+ *onCurveCount += 1;
+ return 0;
+ }
+ if (y == y2) {
+ return 0;
+ }
+ // bounds check on X (not required. is it faster?)
+#if 0
+ if (pts[0].fX > x && pts[1].fX > x && pts[2].fX > x) {
+ return 0;
+ }
+#endif
+
+ SkScalar roots[2];
+ int n = SkFindUnitQuadRoots(pts[0].fY - 2 * pts[1].fY + pts[2].fY,
+ 2 * (pts[1].fY - pts[0].fY),
+ pts[0].fY - y,
+ roots);
+ SkASSERT(n <= 1);
+ SkScalar xt;
+ if (0 == n) {
+ // zero roots are returned only when y0 == y
+ // Need [0] if dir == 1
+ // and [2] if dir == -1
+ xt = pts[1 - dir].fX;
+ } else {
+ SkScalar t = roots[0];
+ SkScalar C = pts[0].fX;
+ SkScalar A = pts[2].fX - 2 * pts[1].fX + C;
+ SkScalar B = 2 * (pts[1].fX - C);
+ xt = poly_eval(A, B, C, t);
+ }
+ if (SkScalarNearlyEqual(xt, x)) {
+ if (x != pts[2].fX || y != pts[2].fY) { // don't test end points; they're start points
+ *onCurveCount += 1;
+ return 0;
+ }
+ }
+ return xt < x ? dir : 0;
+}
+
+static int winding_quad(const SkPoint pts[], SkScalar x, SkScalar y, int* onCurveCount) {
+ SkPoint dst[5];
+ int n = 0;
+
+ if (!is_mono_quad(pts[0].fY, pts[1].fY, pts[2].fY)) {
+ n = SkChopQuadAtYExtrema(pts, dst);
+ pts = dst;
+ }
+ int w = winding_mono_quad(pts, x, y, onCurveCount);
+ if (n > 0) {
+ w += winding_mono_quad(&pts[2], x, y, onCurveCount);
+ }
+ return w;
+}
+
+static int winding_line(const SkPoint pts[], SkScalar x, SkScalar y, int* onCurveCount) {
+ SkScalar x0 = pts[0].fX;
+ SkScalar y0 = pts[0].fY;
+ SkScalar x1 = pts[1].fX;
+ SkScalar y1 = pts[1].fY;
+
+ SkScalar dy = y1 - y0;
+
+ int dir = 1;
+ if (y0 > y1) {
+ using std::swap;
+ swap(y0, y1);
+ dir = -1;
+ }
+ if (y < y0 || y > y1) {
+ return 0;
+ }
+ if (checkOnCurve(x, y, pts[0], pts[1])) {
+ *onCurveCount += 1;
+ return 0;
+ }
+ if (y == y1) {
+ return 0;
+ }
+ SkScalar cross = (x1 - x0) * (y - pts[0].fY) - dy * (x - x0);
+
+ if (!cross) {
+ // zero cross means the point is on the line, and since the case where
+ // y of the query point is at the end point is handled above, we can be
+ // sure that we're on the line (excluding the end point) here
+ if (x != x1 || y != pts[1].fY) {
+ *onCurveCount += 1;
+ }
+ dir = 0;
+ } else if (SkScalarSignAsInt(cross) == dir) {
+ dir = 0;
+ }
+ return dir;
+}
+
+static void tangent_cubic(const SkPoint pts[], SkScalar x, SkScalar y,
+ SkTDArray<SkVector>* tangents) {
+ if (!between(pts[0].fY, y, pts[1].fY) && !between(pts[1].fY, y, pts[2].fY)
+ && !between(pts[2].fY, y, pts[3].fY)) {
+ return;
+ }
+ if (!between(pts[0].fX, x, pts[1].fX) && !between(pts[1].fX, x, pts[2].fX)
+ && !between(pts[2].fX, x, pts[3].fX)) {
+ return;
+ }
+ SkPoint dst[10];
+ int n = SkChopCubicAtYExtrema(pts, dst);
+ for (int i = 0; i <= n; ++i) {
+ SkPoint* c = &dst[i * 3];
+ SkScalar t;
+ if (!SkCubicClipper::ChopMonoAtY(c, y, &t)) {
+ continue;
+ }
+ SkScalar xt = eval_cubic_pts(c[0].fX, c[1].fX, c[2].fX, c[3].fX, t);
+ if (!SkScalarNearlyEqual(x, xt)) {
+ continue;
+ }
+ SkVector tangent;
+ SkEvalCubicAt(c, t, nullptr, &tangent, nullptr);
+ tangents->push_back(tangent);
+ }
+}
+
+static void tangent_conic(const SkPoint pts[], SkScalar x, SkScalar y, SkScalar w,
+ SkTDArray<SkVector>* tangents) {
+ if (!between(pts[0].fY, y, pts[1].fY) && !between(pts[1].fY, y, pts[2].fY)) {
+ return;
+ }
+ if (!between(pts[0].fX, x, pts[1].fX) && !between(pts[1].fX, x, pts[2].fX)) {
+ return;
+ }
+ SkScalar roots[2];
+ SkScalar A = pts[2].fY;
+ SkScalar B = pts[1].fY * w - y * w + y;
+ SkScalar C = pts[0].fY;
+ A += C - 2 * B; // A = a + c - 2*(b*w - yCept*w + yCept)
+ B -= C; // B = b*w - w * yCept + yCept - a
+ C -= y;
+ int n = SkFindUnitQuadRoots(A, 2 * B, C, roots);
+ for (int index = 0; index < n; ++index) {
+ SkScalar t = roots[index];
+ SkScalar xt = conic_eval_numerator(&pts[0].fX, w, t) / conic_eval_denominator(w, t);
+ if (!SkScalarNearlyEqual(x, xt)) {
+ continue;
+ }
+ SkConic conic(pts, w);
+ tangents->push_back(conic.evalTangentAt(t));
+ }
+}
+
+static void tangent_quad(const SkPoint pts[], SkScalar x, SkScalar y,
+ SkTDArray<SkVector>* tangents) {
+ if (!between(pts[0].fY, y, pts[1].fY) && !between(pts[1].fY, y, pts[2].fY)) {
+ return;
+ }
+ if (!between(pts[0].fX, x, pts[1].fX) && !between(pts[1].fX, x, pts[2].fX)) {
+ return;
+ }
+ SkScalar roots[2];
+ int n = SkFindUnitQuadRoots(pts[0].fY - 2 * pts[1].fY + pts[2].fY,
+ 2 * (pts[1].fY - pts[0].fY),
+ pts[0].fY - y,
+ roots);
+ for (int index = 0; index < n; ++index) {
+ SkScalar t = roots[index];
+ SkScalar C = pts[0].fX;
+ SkScalar A = pts[2].fX - 2 * pts[1].fX + C;
+ SkScalar B = 2 * (pts[1].fX - C);
+ SkScalar xt = poly_eval(A, B, C, t);
+ if (!SkScalarNearlyEqual(x, xt)) {
+ continue;
+ }
+ tangents->push_back(SkEvalQuadTangentAt(pts, t));
+ }
+}
+
+static void tangent_line(const SkPoint pts[], SkScalar x, SkScalar y,
+ SkTDArray<SkVector>* tangents) {
+ SkScalar y0 = pts[0].fY;
+ SkScalar y1 = pts[1].fY;
+ if (!between(y0, y, y1)) {
+ return;
+ }
+ SkScalar x0 = pts[0].fX;
+ SkScalar x1 = pts[1].fX;
+ if (!between(x0, x, x1)) {
+ return;
+ }
+ SkScalar dx = x1 - x0;
+ SkScalar dy = y1 - y0;
+ if (!SkScalarNearlyEqual((x - x0) * dy, dx * (y - y0))) {
+ return;
+ }
+ SkVector v;
+ v.set(dx, dy);
+ tangents->push_back(v);
+}
+
+static bool contains_inclusive(const SkRect& r, SkScalar x, SkScalar y) {
+ return r.fLeft <= x && x <= r.fRight && r.fTop <= y && y <= r.fBottom;
+}
+
+bool SkPath::contains(SkScalar x, SkScalar y) const {
+ bool isInverse = this->isInverseFillType();
+ if (this->isEmpty()) {
+ return isInverse;
+ }
+
+ if (!contains_inclusive(this->getBounds(), x, y)) {
+ return isInverse;
+ }
+
+ SkPath::Iter iter(*this, true);
+ bool done = false;
+ int w = 0;
+ int onCurveCount = 0;
+ do {
+ SkPoint pts[4];
+ switch (iter.next(pts)) {
+ case SkPath::kMove_Verb:
+ case SkPath::kClose_Verb:
+ break;
+ case SkPath::kLine_Verb:
+ w += winding_line(pts, x, y, &onCurveCount);
+ break;
+ case SkPath::kQuad_Verb:
+ w += winding_quad(pts, x, y, &onCurveCount);
+ break;
+ case SkPath::kConic_Verb:
+ w += winding_conic(pts, x, y, iter.conicWeight(), &onCurveCount);
+ break;
+ case SkPath::kCubic_Verb:
+ w += winding_cubic(pts, x, y, &onCurveCount);
+ break;
+ case SkPath::kDone_Verb:
+ done = true;
+ break;
+ }
+ } while (!done);
+ bool evenOddFill = SkPathFillType::kEvenOdd == this->getFillType()
+ || SkPathFillType::kInverseEvenOdd == this->getFillType();
+ if (evenOddFill) {
+ w &= 1;
+ }
+ if (w) {
+ return !isInverse;
+ }
+ if (onCurveCount <= 1) {
+ return SkToBool(onCurveCount) ^ isInverse;
+ }
+ if ((onCurveCount & 1) || evenOddFill) {
+ return SkToBool(onCurveCount & 1) ^ isInverse;
+ }
+ // If the point touches an even number of curves, and the fill is winding, check for
+ // coincidence. Count coincidence as places where the on curve points have identical tangents.
+ iter.setPath(*this, true);
+ done = false;
+ SkTDArray<SkVector> tangents;
+ do {
+ SkPoint pts[4];
+ int oldCount = tangents.size();
+ switch (iter.next(pts)) {
+ case SkPath::kMove_Verb:
+ case SkPath::kClose_Verb:
+ break;
+ case SkPath::kLine_Verb:
+ tangent_line(pts, x, y, &tangents);
+ break;
+ case SkPath::kQuad_Verb:
+ tangent_quad(pts, x, y, &tangents);
+ break;
+ case SkPath::kConic_Verb:
+ tangent_conic(pts, x, y, iter.conicWeight(), &tangents);
+ break;
+ case SkPath::kCubic_Verb:
+ tangent_cubic(pts, x, y, &tangents);
+ break;
+ case SkPath::kDone_Verb:
+ done = true;
+ break;
+ }
+ if (tangents.size() > oldCount) {
+ int last = tangents.size() - 1;
+ const SkVector& tangent = tangents[last];
+ if (SkScalarNearlyZero(SkPointPriv::LengthSqd(tangent))) {
+ tangents.remove(last);
+ } else {
+ for (int index = 0; index < last; ++index) {
+ const SkVector& test = tangents[index];
+ if (SkScalarNearlyZero(test.cross(tangent))
+ && SkScalarSignAsInt(tangent.fX * test.fX) <= 0
+ && SkScalarSignAsInt(tangent.fY * test.fY) <= 0) {
+ tangents.remove(last);
+ tangents.removeShuffle(index);
+ break;
+ }
+ }
+ }
+ }
+ } while (!done);
+ return SkToBool(tangents.size()) ^ isInverse;
+}
+
+// Sort of like makeSpace(0) but the the additional requirement that we actively shrink the
+// allocations to just fit the current needs. makeSpace() will only grow, but never shrinks.
+//
+void SkPath::shrinkToFit() {
+ // Since this can relocate the allocated arrays, we have to defensively copy ourselves if
+ // we're not the only owner of the pathref... since relocating the arrays will invalidate
+ // any existing iterators.
+ if (!fPathRef->unique()) {
+ SkPathRef* pr = new SkPathRef;
+ pr->copy(*fPathRef, 0, 0);
+ fPathRef.reset(pr);
+ }
+ fPathRef->fPoints.shrink_to_fit();
+ fPathRef->fVerbs.shrink_to_fit();
+ fPathRef->fConicWeights.shrink_to_fit();
+ SkDEBUGCODE(fPathRef->validate();)
+}
+
+
+int SkPath::ConvertConicToQuads(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2,
+ SkScalar w, SkPoint pts[], int pow2) {
+ const SkConic conic(p0, p1, p2, w);
+ return conic.chopIntoQuadsPOW2(pts, pow2);
+}
+
+bool SkPathPriv::IsSimpleRect(const SkPath& path, bool isSimpleFill, SkRect* rect,
+ SkPathDirection* direction, unsigned* start) {
+ if (path.getSegmentMasks() != SkPath::kLine_SegmentMask) {
+ return false;
+ }
+ SkPoint rectPts[5];
+ int rectPtCnt = 0;
+ bool needsClose = !isSimpleFill;
+ for (auto [v, verbPts, w] : SkPathPriv::Iterate(path)) {
+ switch (v) {
+ case SkPathVerb::kMove:
+ if (0 != rectPtCnt) {
+ return false;
+ }
+ rectPts[0] = verbPts[0];
+ ++rectPtCnt;
+ break;
+ case SkPathVerb::kLine:
+ if (5 == rectPtCnt) {
+ return false;
+ }
+ rectPts[rectPtCnt] = verbPts[1];
+ ++rectPtCnt;
+ break;
+ case SkPathVerb::kClose:
+ if (4 == rectPtCnt) {
+ rectPts[4] = rectPts[0];
+ rectPtCnt = 5;
+ }
+ needsClose = false;
+ break;
+ case SkPathVerb::kQuad:
+ case SkPathVerb::kConic:
+ case SkPathVerb::kCubic:
+ return false;
+ }
+ }
+ if (needsClose) {
+ return false;
+ }
+ if (rectPtCnt < 5) {
+ return false;
+ }
+ if (rectPts[0] != rectPts[4]) {
+ return false;
+ }
+ // Check for two cases of rectangles: pts 0 and 3 form a vertical edge or a horizontal edge (
+ // and pts 1 and 2 the opposite vertical or horizontal edge).
+ bool vec03IsVertical;
+ if (rectPts[0].fX == rectPts[3].fX && rectPts[1].fX == rectPts[2].fX &&
+ rectPts[0].fY == rectPts[1].fY && rectPts[3].fY == rectPts[2].fY) {
+ // Make sure it has non-zero width and height
+ if (rectPts[0].fX == rectPts[1].fX || rectPts[0].fY == rectPts[3].fY) {
+ return false;
+ }
+ vec03IsVertical = true;
+ } else if (rectPts[0].fY == rectPts[3].fY && rectPts[1].fY == rectPts[2].fY &&
+ rectPts[0].fX == rectPts[1].fX && rectPts[3].fX == rectPts[2].fX) {
+ // Make sure it has non-zero width and height
+ if (rectPts[0].fY == rectPts[1].fY || rectPts[0].fX == rectPts[3].fX) {
+ return false;
+ }
+ vec03IsVertical = false;
+ } else {
+ return false;
+ }
+ // Set sortFlags so that it has the low bit set if pt index 0 is on right edge and second bit
+ // set if it is on the bottom edge.
+ unsigned sortFlags =
+ ((rectPts[0].fX < rectPts[2].fX) ? 0b00 : 0b01) |
+ ((rectPts[0].fY < rectPts[2].fY) ? 0b00 : 0b10);
+ switch (sortFlags) {
+ case 0b00:
+ rect->setLTRB(rectPts[0].fX, rectPts[0].fY, rectPts[2].fX, rectPts[2].fY);
+ *direction = vec03IsVertical ? SkPathDirection::kCW : SkPathDirection::kCCW;
+ *start = 0;
+ break;
+ case 0b01:
+ rect->setLTRB(rectPts[2].fX, rectPts[0].fY, rectPts[0].fX, rectPts[2].fY);
+ *direction = vec03IsVertical ? SkPathDirection::kCCW : SkPathDirection::kCW;
+ *start = 1;
+ break;
+ case 0b10:
+ rect->setLTRB(rectPts[0].fX, rectPts[2].fY, rectPts[2].fX, rectPts[0].fY);
+ *direction = vec03IsVertical ? SkPathDirection::kCCW : SkPathDirection::kCW;
+ *start = 3;
+ break;
+ case 0b11:
+ rect->setLTRB(rectPts[2].fX, rectPts[2].fY, rectPts[0].fX, rectPts[0].fY);
+ *direction = vec03IsVertical ? SkPathDirection::kCW : SkPathDirection::kCCW;
+ *start = 2;
+ break;
+ }
+ return true;
+}
+
+bool SkPathPriv::DrawArcIsConvex(SkScalar sweepAngle, bool useCenter, bool isFillNoPathEffect) {
+ if (isFillNoPathEffect && SkScalarAbs(sweepAngle) >= 360.f) {
+ // This gets converted to an oval.
+ return true;
+ }
+ if (useCenter) {
+ // This is a pie wedge. It's convex if the angle is <= 180.
+ return SkScalarAbs(sweepAngle) <= 180.f;
+ }
+ // When the angle exceeds 360 this wraps back on top of itself. Otherwise it is a circle clipped
+ // to a secant, i.e. convex.
+ return SkScalarAbs(sweepAngle) <= 360.f;
+}
+
+void SkPathPriv::CreateDrawArcPath(SkPath* path, const SkRect& oval, SkScalar startAngle,
+ SkScalar sweepAngle, bool useCenter, bool isFillNoPathEffect) {
+ SkASSERT(!oval.isEmpty());
+ SkASSERT(sweepAngle);
+#if defined(SK_BUILD_FOR_FUZZER)
+ if (sweepAngle > 3600.0f || sweepAngle < -3600.0f) {
+ return;
+ }
+#endif
+ path->reset();
+ path->setIsVolatile(true);
+ path->setFillType(SkPathFillType::kWinding);
+ if (isFillNoPathEffect && SkScalarAbs(sweepAngle) >= 360.f) {
+ path->addOval(oval);
+ SkASSERT(path->isConvex() && DrawArcIsConvex(sweepAngle, false, isFillNoPathEffect));
+ return;
+ }
+ if (useCenter) {
+ path->moveTo(oval.centerX(), oval.centerY());
+ }
+ auto firstDir =
+ sweepAngle > 0 ? SkPathFirstDirection::kCW : SkPathFirstDirection::kCCW;
+ bool convex = DrawArcIsConvex(sweepAngle, useCenter, isFillNoPathEffect);
+ // Arc to mods at 360 and drawArc is not supposed to.
+ bool forceMoveTo = !useCenter;
+ while (sweepAngle <= -360.f) {
+ path->arcTo(oval, startAngle, -180.f, forceMoveTo);
+ startAngle -= 180.f;
+ path->arcTo(oval, startAngle, -180.f, false);
+ startAngle -= 180.f;
+ forceMoveTo = false;
+ sweepAngle += 360.f;
+ }
+ while (sweepAngle >= 360.f) {
+ path->arcTo(oval, startAngle, 180.f, forceMoveTo);
+ startAngle += 180.f;
+ path->arcTo(oval, startAngle, 180.f, false);
+ startAngle += 180.f;
+ forceMoveTo = false;
+ sweepAngle -= 360.f;
+ }
+ path->arcTo(oval, startAngle, sweepAngle, forceMoveTo);
+ if (useCenter) {
+ path->close();
+ }
+ path->setConvexity(convex ? SkPathConvexity::kConvex : SkPathConvexity::kConcave);
+ path->setFirstDirection(firstDir);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static int compute_quad_extremas(const SkPoint src[3], SkPoint extremas[3]) {
+ SkScalar ts[2];
+ int n = SkFindQuadExtrema(src[0].fX, src[1].fX, src[2].fX, ts);
+ n += SkFindQuadExtrema(src[0].fY, src[1].fY, src[2].fY, &ts[n]);
+ SkASSERT(n >= 0 && n <= 2);
+ for (int i = 0; i < n; ++i) {
+ extremas[i] = SkEvalQuadAt(src, ts[i]);
+ }
+ extremas[n] = src[2];
+ return n + 1;
+}
+
+static int compute_conic_extremas(const SkPoint src[3], SkScalar w, SkPoint extremas[3]) {
+ SkConic conic(src[0], src[1], src[2], w);
+ SkScalar ts[2];
+ int n = conic.findXExtrema(ts);
+ n += conic.findYExtrema(&ts[n]);
+ SkASSERT(n >= 0 && n <= 2);
+ for (int i = 0; i < n; ++i) {
+ extremas[i] = conic.evalAt(ts[i]);
+ }
+ extremas[n] = src[2];
+ return n + 1;
+}
+
+static int compute_cubic_extremas(const SkPoint src[4], SkPoint extremas[5]) {
+ SkScalar ts[4];
+ int n = SkFindCubicExtrema(src[0].fX, src[1].fX, src[2].fX, src[3].fX, ts);
+ n += SkFindCubicExtrema(src[0].fY, src[1].fY, src[2].fY, src[3].fY, &ts[n]);
+ SkASSERT(n >= 0 && n <= 4);
+ for (int i = 0; i < n; ++i) {
+ SkEvalCubicAt(src, ts[i], &extremas[i], nullptr, nullptr);
+ }
+ extremas[n] = src[3];
+ return n + 1;
+}
+
+SkRect SkPath::computeTightBounds() const {
+ if (0 == this->countVerbs()) {
+ return SkRect::MakeEmpty();
+ }
+
+ if (this->getSegmentMasks() == SkPath::kLine_SegmentMask) {
+ return this->getBounds();
+ }
+
+ SkPoint extremas[5]; // big enough to hold worst-case curve type (cubic) extremas + 1
+
+ // initial with the first MoveTo, so we don't have to check inside the switch
+ skvx::float2 min, max;
+ min = max = from_point(this->getPoint(0));
+ for (auto [verb, pts, w] : SkPathPriv::Iterate(*this)) {
+ int count = 0;
+ switch (verb) {
+ case SkPathVerb::kMove:
+ extremas[0] = pts[0];
+ count = 1;
+ break;
+ case SkPathVerb::kLine:
+ extremas[0] = pts[1];
+ count = 1;
+ break;
+ case SkPathVerb::kQuad:
+ count = compute_quad_extremas(pts, extremas);
+ break;
+ case SkPathVerb::kConic:
+ count = compute_conic_extremas(pts, *w, extremas);
+ break;
+ case SkPathVerb::kCubic:
+ count = compute_cubic_extremas(pts, extremas);
+ break;
+ case SkPathVerb::kClose:
+ break;
+ }
+ for (int i = 0; i < count; ++i) {
+ skvx::float2 tmp = from_point(extremas[i]);
+ min = skvx::min(min, tmp);
+ max = skvx::max(max, tmp);
+ }
+ }
+ SkRect bounds;
+ min.store((SkPoint*)&bounds.fLeft);
+ max.store((SkPoint*)&bounds.fRight);
+ return bounds;
+}
+
+bool SkPath::IsLineDegenerate(const SkPoint& p1, const SkPoint& p2, bool exact) {
+ return exact ? p1 == p2 : SkPointPriv::EqualsWithinTolerance(p1, p2);
+}
+
+bool SkPath::IsQuadDegenerate(const SkPoint& p1, const SkPoint& p2,
+ const SkPoint& p3, bool exact) {
+ return exact ? p1 == p2 && p2 == p3 : SkPointPriv::EqualsWithinTolerance(p1, p2) &&
+ SkPointPriv::EqualsWithinTolerance(p2, p3);
+}
+
+bool SkPath::IsCubicDegenerate(const SkPoint& p1, const SkPoint& p2,
+ const SkPoint& p3, const SkPoint& p4, bool exact) {
+ return exact ? p1 == p2 && p2 == p3 && p3 == p4 :
+ SkPointPriv::EqualsWithinTolerance(p1, p2) &&
+ SkPointPriv::EqualsWithinTolerance(p2, p3) &&
+ SkPointPriv::EqualsWithinTolerance(p3, p4);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkPathVerbAnalysis sk_path_analyze_verbs(const uint8_t vbs[], int verbCount) {
+ SkPathVerbAnalysis info = {false, 0, 0, 0};
+
+ bool needMove = true;
+ bool invalid = false;
+
+ if (verbCount >= (INT_MAX / 3)) {
+ invalid = true;
+ } else {
+ for (int i = 0; i < verbCount; ++i) {
+ switch ((SkPathVerb)vbs[i]) {
+ case SkPathVerb::kMove:
+ needMove = false;
+ info.points += 1;
+ break;
+ case SkPathVerb::kLine:
+ invalid |= needMove;
+ info.segmentMask |= kLine_SkPathSegmentMask;
+ info.points += 1;
+ break;
+ case SkPathVerb::kQuad:
+ invalid |= needMove;
+ info.segmentMask |= kQuad_SkPathSegmentMask;
+ info.points += 2;
+ break;
+ case SkPathVerb::kConic:
+ invalid |= needMove;
+ info.segmentMask |= kConic_SkPathSegmentMask;
+ info.points += 2;
+ info.weights += 1;
+ break;
+ case SkPathVerb::kCubic:
+ invalid |= needMove;
+ info.segmentMask |= kCubic_SkPathSegmentMask;
+ info.points += 3;
+ break;
+ case SkPathVerb::kClose:
+ invalid |= needMove;
+ needMove = true;
+ break;
+ default:
+ invalid = true;
+ break;
+ }
+ }
+ }
+ info.valid = !invalid;
+ return info;
+}
+
+SkPath SkPath::Make(const SkPoint pts[], int pointCount,
+ const uint8_t vbs[], int verbCount,
+ const SkScalar ws[], int wCount,
+ SkPathFillType ft, bool isVolatile) {
+ if (verbCount <= 0) {
+ return SkPath();
+ }
+
+ const auto info = sk_path_analyze_verbs(vbs, verbCount);
+ if (!info.valid || info.points > pointCount || info.weights > wCount) {
+ SkDEBUGFAIL("invalid verbs and number of points/weights");
+ return SkPath();
+ }
+
+ return SkPath(sk_sp<SkPathRef>(new SkPathRef(
+ SkPathRef::PointsArray(pts, info.points),
+ SkPathRef::VerbsArray(vbs, verbCount),
+ SkPathRef::ConicWeightsArray(ws, info.weights),
+ info.segmentMask)),
+ ft, isVolatile, SkPathConvexity::kUnknown, SkPathFirstDirection::kUnknown);
+}
+
+SkPath SkPath::Rect(const SkRect& r, SkPathDirection dir, unsigned startIndex) {
+ return SkPathBuilder().addRect(r, dir, startIndex).detach();
+}
+
+SkPath SkPath::Oval(const SkRect& r, SkPathDirection dir) {
+ return SkPathBuilder().addOval(r, dir).detach();
+}
+
+SkPath SkPath::Oval(const SkRect& r, SkPathDirection dir, unsigned startIndex) {
+ return SkPathBuilder().addOval(r, dir, startIndex).detach();
+}
+
+SkPath SkPath::Circle(SkScalar x, SkScalar y, SkScalar r, SkPathDirection dir) {
+ return SkPathBuilder().addCircle(x, y, r, dir).detach();
+}
+
+SkPath SkPath::RRect(const SkRRect& rr, SkPathDirection dir) {
+ return SkPathBuilder().addRRect(rr, dir).detach();
+}
+
+SkPath SkPath::RRect(const SkRRect& rr, SkPathDirection dir, unsigned startIndex) {
+ return SkPathBuilder().addRRect(rr, dir, startIndex).detach();
+}
+
+SkPath SkPath::RRect(const SkRect& r, SkScalar rx, SkScalar ry, SkPathDirection dir) {
+ return SkPathBuilder().addRRect(SkRRect::MakeRectXY(r, rx, ry), dir).detach();
+}
+
+SkPath SkPath::Polygon(const SkPoint pts[], int count, bool isClosed,
+ SkPathFillType ft, bool isVolatile) {
+ return SkPathBuilder().addPolygon(pts, count, isClosed)
+ .setFillType(ft)
+ .setIsVolatile(isVolatile)
+ .detach();
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkPathPriv::IsRectContour(const SkPath& path, bool allowPartial, int* currVerb,
+ const SkPoint** ptsPtr, bool* isClosed, SkPathDirection* direction,
+ SkRect* rect) {
+ int corners = 0;
+ SkPoint closeXY; // used to determine if final line falls on a diagonal
+ SkPoint lineStart; // used to construct line from previous point
+ const SkPoint* firstPt = nullptr; // first point in the rect (last of first moves)
+ const SkPoint* lastPt = nullptr; // last point in the rect (last of lines or first if closed)
+ SkPoint firstCorner;
+ SkPoint thirdCorner;
+ const SkPoint* pts = *ptsPtr;
+ const SkPoint* savePts = nullptr; // used to allow caller to iterate through a pair of rects
+ lineStart.set(0, 0);
+ signed char directions[] = {-1, -1, -1, -1, -1}; // -1 to 3; -1 is uninitialized
+ bool closedOrMoved = false;
+ bool autoClose = false;
+ bool insertClose = false;
+ int verbCnt = path.fPathRef->countVerbs();
+ while (*currVerb < verbCnt && (!allowPartial || !autoClose)) {
+ uint8_t verb = insertClose ? (uint8_t) SkPath::kClose_Verb : path.fPathRef->atVerb(*currVerb);
+ switch (verb) {
+ case SkPath::kClose_Verb:
+ savePts = pts;
+ autoClose = true;
+ insertClose = false;
+ [[fallthrough]];
+ case SkPath::kLine_Verb: {
+ if (SkPath::kClose_Verb != verb) {
+ lastPt = pts;
+ }
+ SkPoint lineEnd = SkPath::kClose_Verb == verb ? *firstPt : *pts++;
+ SkVector lineDelta = lineEnd - lineStart;
+ if (lineDelta.fX && lineDelta.fY) {
+ return false; // diagonal
+ }
+ if (!lineDelta.isFinite()) {
+ return false; // path contains infinity or NaN
+ }
+ if (lineStart == lineEnd) {
+ break; // single point on side OK
+ }
+ int nextDirection = rect_make_dir(lineDelta.fX, lineDelta.fY); // 0 to 3
+ if (0 == corners) {
+ directions[0] = nextDirection;
+ corners = 1;
+ closedOrMoved = false;
+ lineStart = lineEnd;
+ break;
+ }
+ if (closedOrMoved) {
+ return false; // closed followed by a line
+ }
+ if (autoClose && nextDirection == directions[0]) {
+ break; // colinear with first
+ }
+ closedOrMoved = autoClose;
+ if (directions[corners - 1] == nextDirection) {
+ if (3 == corners && SkPath::kLine_Verb == verb) {
+ thirdCorner = lineEnd;
+ }
+ lineStart = lineEnd;
+ break; // colinear segment
+ }
+ directions[corners++] = nextDirection;
+ // opposite lines must point in opposite directions; xoring them should equal 2
+ switch (corners) {
+ case 2:
+ firstCorner = lineStart;
+ break;
+ case 3:
+ if ((directions[0] ^ directions[2]) != 2) {
+ return false;
+ }
+ thirdCorner = lineEnd;
+ break;
+ case 4:
+ if ((directions[1] ^ directions[3]) != 2) {
+ return false;
+ }
+ break;
+ default:
+ return false; // too many direction changes
+ }
+ lineStart = lineEnd;
+ break;
+ }
+ case SkPath::kQuad_Verb:
+ case SkPath::kConic_Verb:
+ case SkPath::kCubic_Verb:
+ return false; // quadratic, cubic not allowed
+ case SkPath::kMove_Verb:
+ if (allowPartial && !autoClose && directions[0] >= 0) {
+ insertClose = true;
+ *currVerb -= 1; // try move again afterwards
+ goto addMissingClose;
+ }
+ if (pts != *ptsPtr) {
+ return false;
+ }
+ if (!corners) {
+ firstPt = pts;
+ } else {
+ closeXY = *firstPt - *lastPt;
+ if (closeXY.fX && closeXY.fY) {
+ return false; // we're diagonal, abort
+ }
+ }
+ lineStart = *pts++;
+ closedOrMoved = true;
+ break;
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ break;
+ }
+ *currVerb += 1;
+ addMissingClose:
+ ;
+ }
+ // Success if 4 corners and first point equals last
+ if (corners < 3 || corners > 4) {
+ return false;
+ }
+ if (savePts) {
+ *ptsPtr = savePts;
+ }
+ // check if close generates diagonal
+ closeXY = *firstPt - *lastPt;
+ if (closeXY.fX && closeXY.fY) {
+ return false;
+ }
+ if (rect) {
+ rect->set(firstCorner, thirdCorner);
+ }
+ if (isClosed) {
+ *isClosed = autoClose;
+ }
+ if (direction) {
+ *direction = directions[0] == ((directions[1] + 1) & 3) ?
+ SkPathDirection::kCW : SkPathDirection::kCCW;
+ }
+ return true;
+}
+
+
+bool SkPathPriv::IsNestedFillRects(const SkPath& path, SkRect rects[2], SkPathDirection dirs[2]) {
+ SkDEBUGCODE(path.validate();)
+ int currVerb = 0;
+ const SkPoint* pts = path.fPathRef->points();
+ SkPathDirection testDirs[2];
+ SkRect testRects[2];
+ if (!IsRectContour(path, true, &currVerb, &pts, nullptr, &testDirs[0], &testRects[0])) {
+ return false;
+ }
+ if (IsRectContour(path, false, &currVerb, &pts, nullptr, &testDirs[1], &testRects[1])) {
+ if (testRects[0].contains(testRects[1])) {
+ if (rects) {
+ rects[0] = testRects[0];
+ rects[1] = testRects[1];
+ }
+ if (dirs) {
+ dirs[0] = testDirs[0];
+ dirs[1] = testDirs[1];
+ }
+ return true;
+ }
+ if (testRects[1].contains(testRects[0])) {
+ if (rects) {
+ rects[0] = testRects[1];
+ rects[1] = testRects[0];
+ }
+ if (dirs) {
+ dirs[0] = testDirs[1];
+ dirs[1] = testDirs[0];
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+struct SkHalfPlane {
+ SkScalar fA, fB, fC;
+
+ SkScalar eval(SkScalar x, SkScalar y) const {
+ return fA * x + fB * y + fC;
+ }
+ SkScalar operator()(SkScalar x, SkScalar y) const { return this->eval(x, y); }
+
+ bool normalize() {
+ double a = fA;
+ double b = fB;
+ double c = fC;
+ double dmag = sqrt(a * a + b * b);
+ // length of initial plane normal is zero
+ if (dmag == 0) {
+ fA = fB = 0;
+ fC = SK_Scalar1;
+ return true;
+ }
+ double dscale = sk_ieee_double_divide(1.0, dmag);
+ a *= dscale;
+ b *= dscale;
+ c *= dscale;
+ // check if we're not finite, or normal is zero-length
+ if (!sk_float_isfinite(a) || !sk_float_isfinite(b) || !sk_float_isfinite(c) ||
+ (a == 0 && b == 0)) {
+ fA = fB = 0;
+ fC = SK_Scalar1;
+ return false;
+ }
+ fA = a;
+ fB = b;
+ fC = c;
+ return true;
+ }
+
+ enum Result {
+ kAllNegative,
+ kAllPositive,
+ kMixed
+ };
+ Result test(const SkRect& bounds) const {
+ // check whether the diagonal aligned with the normal crosses the plane
+ SkPoint diagMin, diagMax;
+ if (fA >= 0) {
+ diagMin.fX = bounds.fLeft;
+ diagMax.fX = bounds.fRight;
+ } else {
+ diagMin.fX = bounds.fRight;
+ diagMax.fX = bounds.fLeft;
+ }
+ if (fB >= 0) {
+ diagMin.fY = bounds.fTop;
+ diagMax.fY = bounds.fBottom;
+ } else {
+ diagMin.fY = bounds.fBottom;
+ diagMax.fY = bounds.fTop;
+ }
+ SkScalar test = this->eval(diagMin.fX, diagMin.fY);
+ SkScalar sign = test*this->eval(diagMax.fX, diagMax.fY);
+ if (sign > 0) {
+ // the path is either all on one side of the half-plane or the other
+ if (test < 0) {
+ return kAllNegative;
+ } else {
+ return kAllPositive;
+ }
+ }
+ return kMixed;
+ }
+};
+
+// assumes plane is pre-normalized
+// If we fail in our calculations, we return the empty path
+static SkPath clip(const SkPath& path, const SkHalfPlane& plane) {
+ SkMatrix mx, inv;
+ SkPoint p0 = { -plane.fA*plane.fC, -plane.fB*plane.fC };
+ mx.setAll( plane.fB, plane.fA, p0.fX,
+ -plane.fA, plane.fB, p0.fY,
+ 0, 0, 1);
+ if (!mx.invert(&inv)) {
+ return SkPath();
+ }
+
+ SkPath rotated;
+ path.transform(inv, &rotated);
+ if (!rotated.isFinite()) {
+ return SkPath();
+ }
+
+ SkScalar big = SK_ScalarMax;
+ SkRect clip = {-big, 0, big, big };
+
+ struct Rec {
+ SkPathBuilder fResult;
+ SkPoint fPrev = {0,0};
+ } rec;
+
+ SkEdgeClipper::ClipPath(rotated, clip, false,
+ [](SkEdgeClipper* clipper, bool newCtr, void* ctx) {
+ Rec* rec = (Rec*)ctx;
+
+ bool addLineTo = false;
+ SkPoint pts[4];
+ SkPath::Verb verb;
+ while ((verb = clipper->next(pts)) != SkPath::kDone_Verb) {
+ if (newCtr) {
+ rec->fResult.moveTo(pts[0]);
+ rec->fPrev = pts[0];
+ newCtr = false;
+ }
+
+ if (addLineTo || pts[0] != rec->fPrev) {
+ rec->fResult.lineTo(pts[0]);
+ }
+
+ switch (verb) {
+ case SkPath::kLine_Verb:
+ rec->fResult.lineTo(pts[1]);
+ rec->fPrev = pts[1];
+ break;
+ case SkPath::kQuad_Verb:
+ rec->fResult.quadTo(pts[1], pts[2]);
+ rec->fPrev = pts[2];
+ break;
+ case SkPath::kCubic_Verb:
+ rec->fResult.cubicTo(pts[1], pts[2], pts[3]);
+ rec->fPrev = pts[3];
+ break;
+ default: break;
+ }
+ addLineTo = true;
+ }
+ }, &rec);
+
+ rec.fResult.setFillType(path.getFillType());
+ SkPath result = rec.fResult.detach().makeTransform(mx);
+ if (!result.isFinite()) {
+ result = SkPath();
+ }
+ return result;
+}
+
+// true means we have written to clippedPath
+bool SkPathPriv::PerspectiveClip(const SkPath& path, const SkMatrix& matrix, SkPath* clippedPath) {
+ if (!matrix.hasPerspective()) {
+ return false;
+ }
+
+ SkHalfPlane plane {
+ matrix[SkMatrix::kMPersp0],
+ matrix[SkMatrix::kMPersp1],
+ matrix[SkMatrix::kMPersp2] - kW0PlaneDistance
+ };
+ if (plane.normalize()) {
+ switch (plane.test(path.getBounds())) {
+ case SkHalfPlane::kAllPositive:
+ return false;
+ case SkHalfPlane::kMixed: {
+ *clippedPath = clip(path, plane);
+ return true;
+ }
+ default: break; // handled outside of the switch
+ }
+ }
+ // clipped out (or failed)
+ *clippedPath = SkPath();
+ return true;
+}
+
+int SkPathPriv::GenIDChangeListenersCount(const SkPath& path) {
+ return path.fPathRef->genIDChangeListenerCount();
+}
+
+bool SkPathPriv::IsAxisAligned(const SkPath& path) {
+ // Conservative (quick) test to see if all segments are axis-aligned.
+ // Multiple contours might give a false-negative, but for speed, we ignore that
+ // and just look at the raw points.
+
+ const SkPoint* pts = path.fPathRef->points();
+ const int count = path.fPathRef->countPoints();
+
+ for (int i = 1; i < count; ++i) {
+ if (pts[i-1].fX != pts[i].fX && pts[i-1].fY != pts[i].fY) {
+ return false;
+ }
+ }
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkPathEdgeIter::SkPathEdgeIter(const SkPath& path) {
+ fMoveToPtr = fPts = path.fPathRef->points();
+ fVerbs = path.fPathRef->verbsBegin();
+ fVerbsStop = path.fPathRef->verbsEnd();
+ fConicWeights = path.fPathRef->conicWeights();
+ if (fConicWeights) {
+ fConicWeights -= 1; // begin one behind
+ }
+
+ fNeedsCloseLine = false;
+ fNextIsNewContour = false;
+ SkDEBUGCODE(fIsConic = false;)
+}
diff --git a/gfx/skia/skia/src/core/SkPathBuilder.cpp b/gfx/skia/skia/src/core/SkPathBuilder.cpp
new file mode 100644
index 0000000000..2ea78257e3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPathBuilder.cpp
@@ -0,0 +1,867 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPathBuilder.h"
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkRRect.h"
+#include "include/private/SkPathRef.h"
+#include "include/private/base/SkPathEnums.h"
+#include "include/private/base/SkSafe32.h"
+#include "src/base/SkVx.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkPathPriv.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstdint>
+#include <cstring>
+#include <iterator>
+#include <utility>
+
+SkPathBuilder::SkPathBuilder() {
+ this->reset();
+}
+
+SkPathBuilder::SkPathBuilder(SkPathFillType ft) {
+ this->reset();
+ fFillType = ft;
+}
+
+SkPathBuilder::SkPathBuilder(const SkPath& src) {
+ *this = src;
+}
+
+SkPathBuilder::~SkPathBuilder() {
+}
+
+SkPathBuilder& SkPathBuilder::reset() {
+ fPts.clear();
+ fVerbs.clear();
+ fConicWeights.clear();
+ fFillType = SkPathFillType::kWinding;
+ fIsVolatile = false;
+
+ // these are internal state
+
+ fSegmentMask = 0;
+ fLastMovePoint = {0, 0};
+ fLastMoveIndex = -1; // illegal
+ fNeedsMoveVerb = true;
+
+ return *this;
+}
+
+SkPathBuilder& SkPathBuilder::operator=(const SkPath& src) {
+ this->reset().setFillType(src.getFillType());
+
+ for (auto [verb, pts, w] : SkPathPriv::Iterate(src)) {
+ switch (verb) {
+ case SkPathVerb::kMove: this->moveTo(pts[0]); break;
+ case SkPathVerb::kLine: this->lineTo(pts[1]); break;
+ case SkPathVerb::kQuad: this->quadTo(pts[1], pts[2]); break;
+ case SkPathVerb::kConic: this->conicTo(pts[1], pts[2], w[0]); break;
+ case SkPathVerb::kCubic: this->cubicTo(pts[1], pts[2], pts[3]); break;
+ case SkPathVerb::kClose: this->close(); break;
+ }
+ }
+ return *this;
+}
+
+void SkPathBuilder::incReserve(int extraPtCount, int extraVbCount) {
+ fPts.reserve_back( Sk32_sat_add(fPts.size(), extraPtCount));
+ fVerbs.reserve_back(Sk32_sat_add(fVerbs.size(), extraVbCount));
+}
+
+SkRect SkPathBuilder::computeBounds() const {
+ SkRect bounds;
+ bounds.setBounds(fPts.begin(), fPts.size());
+ return bounds;
+}
+
+/*
+ * Some old behavior in SkPath -- should we keep it?
+ *
+ * After each edit (i.e. adding a verb)
+ this->setConvexityType(SkPathConvexity::kUnknown);
+ this->setFirstDirection(SkPathPriv::kUnknown_FirstDirection);
+ */
+
+SkPathBuilder& SkPathBuilder::moveTo(SkPoint pt) {
+ // only needed while SkPath is mutable
+ fLastMoveIndex = SkToInt(fPts.size());
+
+ fPts.push_back(pt);
+ fVerbs.push_back((uint8_t)SkPathVerb::kMove);
+
+ fLastMovePoint = pt;
+ fNeedsMoveVerb = false;
+ return *this;
+}
+
+SkPathBuilder& SkPathBuilder::lineTo(SkPoint pt) {
+ this->ensureMove();
+
+ fPts.push_back(pt);
+ fVerbs.push_back((uint8_t)SkPathVerb::kLine);
+
+ fSegmentMask |= kLine_SkPathSegmentMask;
+ return *this;
+}
+
+SkPathBuilder& SkPathBuilder::quadTo(SkPoint pt1, SkPoint pt2) {
+ this->ensureMove();
+
+ SkPoint* p = fPts.push_back_n(2);
+ p[0] = pt1;
+ p[1] = pt2;
+ fVerbs.push_back((uint8_t)SkPathVerb::kQuad);
+
+ fSegmentMask |= kQuad_SkPathSegmentMask;
+ return *this;
+}
+
+SkPathBuilder& SkPathBuilder::conicTo(SkPoint pt1, SkPoint pt2, SkScalar w) {
+ this->ensureMove();
+
+ SkPoint* p = fPts.push_back_n(2);
+ p[0] = pt1;
+ p[1] = pt2;
+ fVerbs.push_back((uint8_t)SkPathVerb::kConic);
+ fConicWeights.push_back(w);
+
+ fSegmentMask |= kConic_SkPathSegmentMask;
+ return *this;
+}
+
+SkPathBuilder& SkPathBuilder::cubicTo(SkPoint pt1, SkPoint pt2, SkPoint pt3) {
+ this->ensureMove();
+
+ SkPoint* p = fPts.push_back_n(3);
+ p[0] = pt1;
+ p[1] = pt2;
+ p[2] = pt3;
+ fVerbs.push_back((uint8_t)SkPathVerb::kCubic);
+
+ fSegmentMask |= kCubic_SkPathSegmentMask;
+ return *this;
+}
+
+SkPathBuilder& SkPathBuilder::close() {
+ if (!fVerbs.empty()) {
+ this->ensureMove();
+
+ fVerbs.push_back((uint8_t)SkPathVerb::kClose);
+
+ // fLastMovePoint stays where it is -- the previous moveTo
+ fNeedsMoveVerb = true;
+ }
+ return *this;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+SkPathBuilder& SkPathBuilder::rLineTo(SkPoint p1) {
+ this->ensureMove();
+ return this->lineTo(fPts.back() + p1);
+}
+
+SkPathBuilder& SkPathBuilder::rQuadTo(SkPoint p1, SkPoint p2) {
+ this->ensureMove();
+ SkPoint base = fPts.back();
+ return this->quadTo(base + p1, base + p2);
+}
+
+SkPathBuilder& SkPathBuilder::rConicTo(SkPoint p1, SkPoint p2, SkScalar w) {
+ this->ensureMove();
+ SkPoint base = fPts.back();
+ return this->conicTo(base + p1, base + p2, w);
+}
+
+SkPathBuilder& SkPathBuilder::rCubicTo(SkPoint p1, SkPoint p2, SkPoint p3) {
+ this->ensureMove();
+ SkPoint base = fPts.back();
+ return this->cubicTo(base + p1, base + p2, base + p3);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+SkPath SkPathBuilder::make(sk_sp<SkPathRef> pr) const {
+ auto convexity = SkPathConvexity::kUnknown;
+ SkPathFirstDirection dir = SkPathFirstDirection::kUnknown;
+
+ switch (fIsA) {
+ case kIsA_Oval:
+ pr->setIsOval( true, fIsACCW, fIsAStart);
+ convexity = SkPathConvexity::kConvex;
+ dir = fIsACCW ? SkPathFirstDirection::kCCW : SkPathFirstDirection::kCW;
+ break;
+ case kIsA_RRect:
+ pr->setIsRRect(true, fIsACCW, fIsAStart);
+ convexity = SkPathConvexity::kConvex;
+ dir = fIsACCW ? SkPathFirstDirection::kCCW : SkPathFirstDirection::kCW;
+ break;
+ default: break;
+ }
+
+ // Wonder if we can combine convexity and dir internally...
+ // unknown, convex_cw, convex_ccw, concave
+ // Do we ever have direction w/o convexity, or viceversa (inside path)?
+ //
+ auto path = SkPath(std::move(pr), fFillType, fIsVolatile, convexity, dir);
+
+ // This hopefully can go away in the future when Paths are immutable,
+ // but if while they are still editable, we need to correctly set this.
+ const uint8_t* start = path.fPathRef->verbsBegin();
+ const uint8_t* stop = path.fPathRef->verbsEnd();
+ if (start < stop) {
+ SkASSERT(fLastMoveIndex >= 0);
+ // peek at the last verb, to know if our last contour is closed
+ const bool isClosed = (stop[-1] == (uint8_t)SkPathVerb::kClose);
+ path.fLastMoveToIndex = isClosed ? ~fLastMoveIndex : fLastMoveIndex;
+ }
+
+ return path;
+}
+
+SkPath SkPathBuilder::snapshot() const {
+ return this->make(sk_sp<SkPathRef>(new SkPathRef(fPts,
+ fVerbs,
+ fConicWeights,
+ fSegmentMask)));
+}
+
+SkPath SkPathBuilder::detach() {
+ auto path = this->make(sk_sp<SkPathRef>(new SkPathRef(std::move(fPts),
+ std::move(fVerbs),
+ std::move(fConicWeights),
+ fSegmentMask)));
+ this->reset();
+ return path;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static bool arc_is_lone_point(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle,
+ SkPoint* pt) {
+ if (0 == sweepAngle && (0 == startAngle || SkIntToScalar(360) == startAngle)) {
+ // Chrome uses this path to move into and out of ovals. If not
+ // treated as a special case the moves can distort the oval's
+ // bounding box (and break the circle special case).
+ pt->set(oval.fRight, oval.centerY());
+ return true;
+ } else if (0 == oval.width() && 0 == oval.height()) {
+ // Chrome will sometimes create 0 radius round rects. Having degenerate
+ // quad segments in the path prevents the path from being recognized as
+ // a rect.
+ // TODO: optimizing the case where only one of width or height is zero
+ // should also be considered. This case, however, doesn't seem to be
+ // as common as the single point case.
+ pt->set(oval.fRight, oval.fTop);
+ return true;
+ }
+ return false;
+}
+
+// Return the unit vectors pointing at the start/stop points for the given start/sweep angles
+//
+static void angles_to_unit_vectors(SkScalar startAngle, SkScalar sweepAngle,
+ SkVector* startV, SkVector* stopV, SkRotationDirection* dir) {
+ SkScalar startRad = SkDegreesToRadians(startAngle),
+ stopRad = SkDegreesToRadians(startAngle + sweepAngle);
+
+ startV->fY = SkScalarSinSnapToZero(startRad);
+ startV->fX = SkScalarCosSnapToZero(startRad);
+ stopV->fY = SkScalarSinSnapToZero(stopRad);
+ stopV->fX = SkScalarCosSnapToZero(stopRad);
+
+ /* If the sweep angle is nearly (but less than) 360, then due to precision
+ loss in radians-conversion and/or sin/cos, we may end up with coincident
+ vectors, which will fool SkBuildQuadArc into doing nothing (bad) instead
+ of drawing a nearly complete circle (good).
+ e.g. canvas.drawArc(0, 359.99, ...)
+ -vs- canvas.drawArc(0, 359.9, ...)
+ We try to detect this edge case, and tweak the stop vector
+ */
+ if (*startV == *stopV) {
+ SkScalar sw = SkScalarAbs(sweepAngle);
+ if (sw < SkIntToScalar(360) && sw > SkIntToScalar(359)) {
+ // make a guess at a tiny angle (in radians) to tweak by
+ SkScalar deltaRad = SkScalarCopySign(SK_Scalar1/512, sweepAngle);
+ // not sure how much will be enough, so we use a loop
+ do {
+ stopRad -= deltaRad;
+ stopV->fY = SkScalarSinSnapToZero(stopRad);
+ stopV->fX = SkScalarCosSnapToZero(stopRad);
+ } while (*startV == *stopV);
+ }
+ }
+ *dir = sweepAngle > 0 ? kCW_SkRotationDirection : kCCW_SkRotationDirection;
+}
+
+/**
+ * If this returns 0, then the caller should just line-to the singlePt, else it should
+ * ignore singlePt and append the specified number of conics.
+ */
+static int build_arc_conics(const SkRect& oval, const SkVector& start, const SkVector& stop,
+ SkRotationDirection dir, SkConic conics[SkConic::kMaxConicsForArc],
+ SkPoint* singlePt) {
+ SkMatrix matrix;
+
+ matrix.setScale(SkScalarHalf(oval.width()), SkScalarHalf(oval.height()));
+ matrix.postTranslate(oval.centerX(), oval.centerY());
+
+ int count = SkConic::BuildUnitArc(start, stop, dir, &matrix, conics);
+ if (0 == count) {
+ matrix.mapXY(stop.x(), stop.y(), singlePt);
+ }
+ return count;
+}
+
+static bool nearly_equal(const SkPoint& a, const SkPoint& b) {
+ return SkScalarNearlyEqual(a.fX, b.fX)
+ && SkScalarNearlyEqual(a.fY, b.fY);
+}
+
+SkPathBuilder& SkPathBuilder::arcTo(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle,
+ bool forceMoveTo) {
+ if (oval.width() < 0 || oval.height() < 0) {
+ return *this;
+ }
+
+ if (fVerbs.empty()) {
+ forceMoveTo = true;
+ }
+
+ SkPoint lonePt;
+ if (arc_is_lone_point(oval, startAngle, sweepAngle, &lonePt)) {
+ return forceMoveTo ? this->moveTo(lonePt) : this->lineTo(lonePt);
+ }
+
+ SkVector startV, stopV;
+ SkRotationDirection dir;
+ angles_to_unit_vectors(startAngle, sweepAngle, &startV, &stopV, &dir);
+
+ SkPoint singlePt;
+
+ // Adds a move-to to 'pt' if forceMoveTo is true. Otherwise a lineTo unless we're sufficiently
+ // close to 'pt' currently. This prevents spurious lineTos when adding a series of contiguous
+ // arcs from the same oval.
+ auto addPt = [forceMoveTo, this](const SkPoint& pt) {
+ if (forceMoveTo) {
+ this->moveTo(pt);
+ } else if (!nearly_equal(fPts.back(), pt)) {
+ this->lineTo(pt);
+ }
+ };
+
+ // At this point, we know that the arc is not a lone point, but startV == stopV
+ // indicates that the sweepAngle is too small such that angles_to_unit_vectors
+ // cannot handle it.
+ if (startV == stopV) {
+ SkScalar endAngle = SkDegreesToRadians(startAngle + sweepAngle);
+ SkScalar radiusX = oval.width() / 2;
+ SkScalar radiusY = oval.height() / 2;
+ // We do not use SkScalar[Sin|Cos]SnapToZero here. When sin(startAngle) is 0 and sweepAngle
+ // is very small and radius is huge, the expected behavior here is to draw a line. But
+ // calling SkScalarSinSnapToZero will make sin(endAngle) be 0 which will then draw a dot.
+ singlePt.set(oval.centerX() + radiusX * SkScalarCos(endAngle),
+ oval.centerY() + radiusY * SkScalarSin(endAngle));
+ addPt(singlePt);
+ return *this;
+ }
+
+ SkConic conics[SkConic::kMaxConicsForArc];
+ int count = build_arc_conics(oval, startV, stopV, dir, conics, &singlePt);
+ if (count) {
+ this->incReserve(count * 2 + 1);
+ const SkPoint& pt = conics[0].fPts[0];
+ addPt(pt);
+ for (int i = 0; i < count; ++i) {
+ this->conicTo(conics[i].fPts[1], conics[i].fPts[2], conics[i].fW);
+ }
+ } else {
+ addPt(singlePt);
+ }
+ return *this;
+}
+
+SkPathBuilder& SkPathBuilder::addArc(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle) {
+ if (oval.isEmpty() || 0 == sweepAngle) {
+ return *this;
+ }
+
+ const SkScalar kFullCircleAngle = SkIntToScalar(360);
+
+ if (sweepAngle >= kFullCircleAngle || sweepAngle <= -kFullCircleAngle) {
+ // We can treat the arc as an oval if it begins at one of our legal starting positions.
+ // See SkPath::addOval() docs.
+ SkScalar startOver90 = startAngle / 90.f;
+ SkScalar startOver90I = SkScalarRoundToScalar(startOver90);
+ SkScalar error = startOver90 - startOver90I;
+ if (SkScalarNearlyEqual(error, 0)) {
+ // Index 1 is at startAngle == 0.
+ SkScalar startIndex = std::fmod(startOver90I + 1.f, 4.f);
+ startIndex = startIndex < 0 ? startIndex + 4.f : startIndex;
+ return this->addOval(oval, sweepAngle > 0 ? SkPathDirection::kCW : SkPathDirection::kCCW,
+ (unsigned) startIndex);
+ }
+ }
+ return this->arcTo(oval, startAngle, sweepAngle, true);
+}
+
+SkPathBuilder& SkPathBuilder::arcTo(SkPoint p1, SkPoint p2, SkScalar radius) {
+ this->ensureMove();
+
+ if (radius == 0) {
+ return this->lineTo(p1);
+ }
+
+ // need to know our prev pt so we can construct tangent vectors
+ SkPoint start = fPts.back();
+
+ // need double precision for these calcs.
+ skvx::double2 befored = normalize(skvx::double2{p1.fX - start.fX, p1.fY - start.fY});
+ skvx::double2 afterd = normalize(skvx::double2{p2.fX - p1.fX, p2.fY - p1.fY});
+ double cosh = dot(befored, afterd);
+ double sinh = cross(befored, afterd);
+
+ // If the previous point equals the first point, befored will be denormalized.
+ // If the two points equal, afterd will be denormalized.
+ // If the second point equals the first point, sinh will be zero.
+ // In all these cases, we cannot construct an arc, so we construct a line to the first point.
+ if (!isfinite(befored) || !isfinite(afterd) || SkScalarNearlyZero(SkDoubleToScalar(sinh))) {
+ return this->lineTo(p1);
+ }
+
+ // safe to convert back to floats now
+ SkScalar dist = SkScalarAbs(SkDoubleToScalar(radius * (1 - cosh) / sinh));
+ SkScalar xx = p1.fX - dist * befored[0];
+ SkScalar yy = p1.fY - dist * befored[1];
+
+ SkVector after = SkVector::Make(afterd[0], afterd[1]);
+ after.setLength(dist);
+ this->lineTo(xx, yy);
+ SkScalar weight = SkScalarSqrt(SkDoubleToScalar(SK_ScalarHalf + cosh * 0.5));
+ return this->conicTo(p1, p1 + after, weight);
+}
+
+// This converts the SVG arc to conics.
+// Partly adapted from Niko's code in kdelibs/kdecore/svgicons.
+// Then transcribed from webkit/chrome's SVGPathNormalizer::decomposeArcToCubic()
+// See also SVG implementation notes:
+// http://www.w3.org/TR/SVG/implnote.html#ArcConversionEndpointToCenter
+// Note that arcSweep bool value is flipped from the original implementation.
+SkPathBuilder& SkPathBuilder::arcTo(SkPoint rad, SkScalar angle, SkPathBuilder::ArcSize arcLarge,
+ SkPathDirection arcSweep, SkPoint endPt) {
+ this->ensureMove();
+
+ SkPoint srcPts[2] = { fPts.back(), endPt };
+
+ // If rx = 0 or ry = 0 then this arc is treated as a straight line segment (a "lineto")
+ // joining the endpoints.
+ // http://www.w3.org/TR/SVG/implnote.html#ArcOutOfRangeParameters
+ if (!rad.fX || !rad.fY) {
+ return this->lineTo(endPt);
+ }
+ // If the current point and target point for the arc are identical, it should be treated as a
+ // zero length path. This ensures continuity in animations.
+ if (srcPts[0] == srcPts[1]) {
+ return this->lineTo(endPt);
+ }
+ SkScalar rx = SkScalarAbs(rad.fX);
+ SkScalar ry = SkScalarAbs(rad.fY);
+ SkVector midPointDistance = srcPts[0] - srcPts[1];
+ midPointDistance *= 0.5f;
+
+ SkMatrix pointTransform;
+ pointTransform.setRotate(-angle);
+
+ SkPoint transformedMidPoint;
+ pointTransform.mapPoints(&transformedMidPoint, &midPointDistance, 1);
+ SkScalar squareRx = rx * rx;
+ SkScalar squareRy = ry * ry;
+ SkScalar squareX = transformedMidPoint.fX * transformedMidPoint.fX;
+ SkScalar squareY = transformedMidPoint.fY * transformedMidPoint.fY;
+
+ // Check if the radii are big enough to draw the arc, scale radii if not.
+ // http://www.w3.org/TR/SVG/implnote.html#ArcCorrectionOutOfRangeRadii
+ SkScalar radiiScale = squareX / squareRx + squareY / squareRy;
+ if (radiiScale > 1) {
+ radiiScale = SkScalarSqrt(radiiScale);
+ rx *= radiiScale;
+ ry *= radiiScale;
+ }
+
+ pointTransform.setScale(1 / rx, 1 / ry);
+ pointTransform.preRotate(-angle);
+
+ SkPoint unitPts[2];
+ pointTransform.mapPoints(unitPts, srcPts, (int) std::size(unitPts));
+ SkVector delta = unitPts[1] - unitPts[0];
+
+ SkScalar d = delta.fX * delta.fX + delta.fY * delta.fY;
+ SkScalar scaleFactorSquared = std::max(1 / d - 0.25f, 0.f);
+
+ SkScalar scaleFactor = SkScalarSqrt(scaleFactorSquared);
+ if ((arcSweep == SkPathDirection::kCCW) != SkToBool(arcLarge)) { // flipped from the original implementation
+ scaleFactor = -scaleFactor;
+ }
+ delta.scale(scaleFactor);
+ SkPoint centerPoint = unitPts[0] + unitPts[1];
+ centerPoint *= 0.5f;
+ centerPoint.offset(-delta.fY, delta.fX);
+ unitPts[0] -= centerPoint;
+ unitPts[1] -= centerPoint;
+ SkScalar theta1 = SkScalarATan2(unitPts[0].fY, unitPts[0].fX);
+ SkScalar theta2 = SkScalarATan2(unitPts[1].fY, unitPts[1].fX);
+ SkScalar thetaArc = theta2 - theta1;
+ if (thetaArc < 0 && (arcSweep == SkPathDirection::kCW)) { // arcSweep flipped from the original implementation
+ thetaArc += SK_ScalarPI * 2;
+ } else if (thetaArc > 0 && (arcSweep != SkPathDirection::kCW)) { // arcSweep flipped from the original implementation
+ thetaArc -= SK_ScalarPI * 2;
+ }
+
+ // Very tiny angles cause our subsequent math to go wonky (skbug.com/9272)
+ // so we do a quick check here. The precise tolerance amount is just made up.
+ // PI/million happens to fix the bug in 9272, but a larger value is probably
+ // ok too.
+ if (SkScalarAbs(thetaArc) < (SK_ScalarPI / (1000 * 1000))) {
+ return this->lineTo(endPt);
+ }
+
+ pointTransform.setRotate(angle);
+ pointTransform.preScale(rx, ry);
+
+ // the arc may be slightly bigger than 1/4 circle, so allow up to 1/3rd
+ int segments = SkScalarCeilToInt(SkScalarAbs(thetaArc / (2 * SK_ScalarPI / 3)));
+ SkScalar thetaWidth = thetaArc / segments;
+ SkScalar t = SkScalarTan(0.5f * thetaWidth);
+ if (!SkScalarIsFinite(t)) {
+ return *this;
+ }
+ SkScalar startTheta = theta1;
+ SkScalar w = SkScalarSqrt(SK_ScalarHalf + SkScalarCos(thetaWidth) * SK_ScalarHalf);
+ auto scalar_is_integer = [](SkScalar scalar) -> bool {
+ return scalar == SkScalarFloorToScalar(scalar);
+ };
+ bool expectIntegers = SkScalarNearlyZero(SK_ScalarPI/2 - SkScalarAbs(thetaWidth)) &&
+ scalar_is_integer(rx) && scalar_is_integer(ry) &&
+ scalar_is_integer(endPt.fX) && scalar_is_integer(endPt.fY);
+
+ for (int i = 0; i < segments; ++i) {
+ SkScalar endTheta = startTheta + thetaWidth,
+ sinEndTheta = SkScalarSinSnapToZero(endTheta),
+ cosEndTheta = SkScalarCosSnapToZero(endTheta);
+
+ unitPts[1].set(cosEndTheta, sinEndTheta);
+ unitPts[1] += centerPoint;
+ unitPts[0] = unitPts[1];
+ unitPts[0].offset(t * sinEndTheta, -t * cosEndTheta);
+ SkPoint mapped[2];
+ pointTransform.mapPoints(mapped, unitPts, (int) std::size(unitPts));
+ /*
+ Computing the arc width introduces rounding errors that cause arcs to start
+ outside their marks. A round rect may lose convexity as a result. If the input
+ values are on integers, place the conic on integers as well.
+ */
+ if (expectIntegers) {
+ for (SkPoint& point : mapped) {
+ point.fX = SkScalarRoundToScalar(point.fX);
+ point.fY = SkScalarRoundToScalar(point.fY);
+ }
+ }
+ this->conicTo(mapped[0], mapped[1], w);
+ startTheta = endTheta;
+ }
+
+ // The final point should match the input point (by definition); replace it to
+ // ensure that rounding errors in the above math don't cause any problems.
+ fPts.back() = endPt;
+ return *this;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+ template <unsigned N> class PointIterator {
+ public:
+ PointIterator(SkPathDirection dir, unsigned startIndex)
+ : fCurrent(startIndex % N)
+ , fAdvance(dir == SkPathDirection::kCW ? 1 : N - 1)
+ {}
+
+ const SkPoint& current() const {
+ SkASSERT(fCurrent < N);
+ return fPts[fCurrent];
+ }
+
+ const SkPoint& next() {
+ fCurrent = (fCurrent + fAdvance) % N;
+ return this->current();
+ }
+
+ protected:
+ SkPoint fPts[N];
+
+ private:
+ unsigned fCurrent;
+ unsigned fAdvance;
+ };
+
+ class RectPointIterator : public PointIterator<4> {
+ public:
+ RectPointIterator(const SkRect& rect, SkPathDirection dir, unsigned startIndex)
+ : PointIterator(dir, startIndex) {
+
+ fPts[0] = SkPoint::Make(rect.fLeft, rect.fTop);
+ fPts[1] = SkPoint::Make(rect.fRight, rect.fTop);
+ fPts[2] = SkPoint::Make(rect.fRight, rect.fBottom);
+ fPts[3] = SkPoint::Make(rect.fLeft, rect.fBottom);
+ }
+ };
+
+ class OvalPointIterator : public PointIterator<4> {
+ public:
+ OvalPointIterator(const SkRect& oval, SkPathDirection dir, unsigned startIndex)
+ : PointIterator(dir, startIndex) {
+
+ const SkScalar cx = oval.centerX();
+ const SkScalar cy = oval.centerY();
+
+ fPts[0] = SkPoint::Make(cx, oval.fTop);
+ fPts[1] = SkPoint::Make(oval.fRight, cy);
+ fPts[2] = SkPoint::Make(cx, oval.fBottom);
+ fPts[3] = SkPoint::Make(oval.fLeft, cy);
+ }
+ };
+
+ class RRectPointIterator : public PointIterator<8> {
+ public:
+ RRectPointIterator(const SkRRect& rrect, SkPathDirection dir, unsigned startIndex)
+ : PointIterator(dir, startIndex)
+ {
+ const SkRect& bounds = rrect.getBounds();
+ const SkScalar L = bounds.fLeft;
+ const SkScalar T = bounds.fTop;
+ const SkScalar R = bounds.fRight;
+ const SkScalar B = bounds.fBottom;
+
+ fPts[0] = SkPoint::Make(L + rrect.radii(SkRRect::kUpperLeft_Corner).fX, T);
+ fPts[1] = SkPoint::Make(R - rrect.radii(SkRRect::kUpperRight_Corner).fX, T);
+ fPts[2] = SkPoint::Make(R, T + rrect.radii(SkRRect::kUpperRight_Corner).fY);
+ fPts[3] = SkPoint::Make(R, B - rrect.radii(SkRRect::kLowerRight_Corner).fY);
+ fPts[4] = SkPoint::Make(R - rrect.radii(SkRRect::kLowerRight_Corner).fX, B);
+ fPts[5] = SkPoint::Make(L + rrect.radii(SkRRect::kLowerLeft_Corner).fX, B);
+ fPts[6] = SkPoint::Make(L, B - rrect.radii(SkRRect::kLowerLeft_Corner).fY);
+ fPts[7] = SkPoint::Make(L, T + rrect.radii(SkRRect::kUpperLeft_Corner).fY);
+ }
+ };
+} // anonymous namespace
+
+
+SkPathBuilder& SkPathBuilder::addRect(const SkRect& rect, SkPathDirection dir, unsigned index) {
+ const int kPts = 4; // moveTo + 3 lines
+ const int kVerbs = 5; // moveTo + 3 lines + close
+ this->incReserve(kPts, kVerbs);
+
+ RectPointIterator iter(rect, dir, index);
+
+ this->moveTo(iter.current());
+ this->lineTo(iter.next());
+ this->lineTo(iter.next());
+ this->lineTo(iter.next());
+ return this->close();
+}
+
+SkPathBuilder& SkPathBuilder::addOval(const SkRect& oval, SkPathDirection dir, unsigned index) {
+ const IsA prevIsA = fIsA;
+
+ const int kPts = 9; // moveTo + 4 conics(2 pts each)
+ const int kVerbs = 6; // moveTo + 4 conics + close
+ this->incReserve(kPts, kVerbs);
+
+ OvalPointIterator ovalIter(oval, dir, index);
+ RectPointIterator rectIter(oval, dir, index + (dir == SkPathDirection::kCW ? 0 : 1));
+
+ // The corner iterator pts are tracking "behind" the oval/radii pts.
+
+ this->moveTo(ovalIter.current());
+ for (unsigned i = 0; i < 4; ++i) {
+ this->conicTo(rectIter.next(), ovalIter.next(), SK_ScalarRoot2Over2);
+ }
+ this->close();
+
+ if (prevIsA == kIsA_JustMoves) {
+ fIsA = kIsA_Oval;
+ fIsACCW = (dir == SkPathDirection::kCCW);
+ fIsAStart = index % 4;
+ }
+ return *this;
+}
+
+SkPathBuilder& SkPathBuilder::addRRect(const SkRRect& rrect, SkPathDirection dir, unsigned index) {
+ const IsA prevIsA = fIsA;
+ const SkRect& bounds = rrect.getBounds();
+
+ if (rrect.isRect() || rrect.isEmpty()) {
+ // degenerate(rect) => radii points are collapsing
+ this->addRect(bounds, dir, (index + 1) / 2);
+ } else if (rrect.isOval()) {
+ // degenerate(oval) => line points are collapsing
+ this->addOval(bounds, dir, index / 2);
+ } else {
+ // we start with a conic on odd indices when moving CW vs. even indices when moving CCW
+ const bool startsWithConic = ((index & 1) == (dir == SkPathDirection::kCW));
+ const SkScalar weight = SK_ScalarRoot2Over2;
+
+ const int kVerbs = startsWithConic
+ ? 9 // moveTo + 4x conicTo + 3x lineTo + close
+ : 10; // moveTo + 4x lineTo + 4x conicTo + close
+ this->incReserve(kVerbs);
+
+ RRectPointIterator rrectIter(rrect, dir, index);
+ // Corner iterator indices follow the collapsed radii model,
+ // adjusted such that the start pt is "behind" the radii start pt.
+ const unsigned rectStartIndex = index / 2 + (dir == SkPathDirection::kCW ? 0 : 1);
+ RectPointIterator rectIter(bounds, dir, rectStartIndex);
+
+ this->moveTo(rrectIter.current());
+ if (startsWithConic) {
+ for (unsigned i = 0; i < 3; ++i) {
+ this->conicTo(rectIter.next(), rrectIter.next(), weight);
+ this->lineTo(rrectIter.next());
+ }
+ this->conicTo(rectIter.next(), rrectIter.next(), weight);
+ // final lineTo handled by close().
+ } else {
+ for (unsigned i = 0; i < 4; ++i) {
+ this->lineTo(rrectIter.next());
+ this->conicTo(rectIter.next(), rrectIter.next(), weight);
+ }
+ }
+ this->close();
+ }
+
+ if (prevIsA == kIsA_JustMoves) {
+ fIsA = kIsA_RRect;
+ fIsACCW = (dir == SkPathDirection::kCCW);
+ fIsAStart = index % 8;
+ }
+ return *this;
+}
+
+SkPathBuilder& SkPathBuilder::addCircle(SkScalar x, SkScalar y, SkScalar r, SkPathDirection dir) {
+ if (r >= 0) {
+ this->addOval(SkRect::MakeLTRB(x - r, y - r, x + r, y + r), dir);
+ }
+ return *this;
+}
+
+SkPathBuilder& SkPathBuilder::addPolygon(const SkPoint pts[], int count, bool isClosed) {
+ if (count <= 0) {
+ return *this;
+ }
+
+ this->moveTo(pts[0]);
+ this->polylineTo(&pts[1], count - 1);
+ if (isClosed) {
+ this->close();
+ }
+ return *this;
+}
+
+SkPathBuilder& SkPathBuilder::polylineTo(const SkPoint pts[], int count) {
+ if (count > 0) {
+ this->ensureMove();
+
+ this->incReserve(count, count);
+ memcpy(fPts.push_back_n(count), pts, count * sizeof(SkPoint));
+ memset(fVerbs.push_back_n(count), (uint8_t)SkPathVerb::kLine, count);
+ fSegmentMask |= kLine_SkPathSegmentMask;
+ }
+ return *this;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkPathBuilder& SkPathBuilder::offset(SkScalar dx, SkScalar dy) {
+ for (auto& p : fPts) {
+ p += {dx, dy};
+ }
+ return *this;
+}
+
+SkPathBuilder& SkPathBuilder::addPath(const SkPath& src) {
+ SkPath::RawIter iter(src);
+ SkPoint pts[4];
+ SkPath::Verb verb;
+
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kMove_Verb: this->moveTo (pts[0]); break;
+ case SkPath::kLine_Verb: this->lineTo (pts[1]); break;
+ case SkPath::kQuad_Verb: this->quadTo (pts[1], pts[2]); break;
+ case SkPath::kCubic_Verb: this->cubicTo(pts[1], pts[2], pts[3]); break;
+ case SkPath::kConic_Verb: this->conicTo(pts[1], pts[2], iter.conicWeight()); break;
+ case SkPath::kClose_Verb: this->close(); break;
+ case SkPath::kDone_Verb: SkUNREACHABLE;
+ }
+ }
+
+ return *this;
+}
+
+SkPathBuilder& SkPathBuilder::privateReverseAddPath(const SkPath& src) {
+
+ const uint8_t* verbsBegin = src.fPathRef->verbsBegin();
+ const uint8_t* verbs = src.fPathRef->verbsEnd();
+ const SkPoint* pts = src.fPathRef->pointsEnd();
+ const SkScalar* conicWeights = src.fPathRef->conicWeightsEnd();
+
+ bool needMove = true;
+ bool needClose = false;
+ while (verbs > verbsBegin) {
+ uint8_t v = *--verbs;
+ int n = SkPathPriv::PtsInVerb(v);
+
+ if (needMove) {
+ --pts;
+ this->moveTo(pts->fX, pts->fY);
+ needMove = false;
+ }
+ pts -= n;
+ switch ((SkPathVerb)v) {
+ case SkPathVerb::kMove:
+ if (needClose) {
+ this->close();
+ needClose = false;
+ }
+ needMove = true;
+ pts += 1; // so we see the point in "if (needMove)" above
+ break;
+ case SkPathVerb::kLine:
+ this->lineTo(pts[0]);
+ break;
+ case SkPathVerb::kQuad:
+ this->quadTo(pts[1], pts[0]);
+ break;
+ case SkPathVerb::kConic:
+ this->conicTo(pts[1], pts[0], *--conicWeights);
+ break;
+ case SkPathVerb::kCubic:
+ this->cubicTo(pts[2], pts[1], pts[0]);
+ break;
+ case SkPathVerb::kClose:
+ needClose = true;
+ break;
+ default:
+ SkDEBUGFAIL("unexpected verb");
+ }
+ }
+ return *this;
+}
diff --git a/gfx/skia/skia/src/core/SkPathEffect.cpp b/gfx/skia/skia/src/core/SkPathEffect.cpp
new file mode 100644
index 0000000000..785cc62f72
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPathEffect.cpp
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPathEffect.h"
+#include "src/core/SkPathEffectBase.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkPathEffect::filterPath(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* bounds) const {
+ return this->filterPath(dst, src, rec, bounds, SkMatrix::I());
+}
+
+bool SkPathEffect::filterPath(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* bounds, const SkMatrix& ctm) const {
+ SkPath tmp, *tmpDst = dst;
+ if (dst == &src) {
+ tmpDst = &tmp;
+ }
+ if (as_PEB(this)->onFilterPath(tmpDst, src, rec, bounds, ctm)) {
+ if (dst == &src) {
+ *dst = tmp;
+ }
+ return true;
+ }
+ return false;
+}
+
+bool SkPathEffectBase::asPoints(PointData* results, const SkPath& src,
+ const SkStrokeRec& rec, const SkMatrix& mx, const SkRect* rect) const {
+ return this->onAsPoints(results, src, rec, mx, rect);
+}
+
+SkPathEffect::DashType SkPathEffect::asADash(DashInfo* info) const {
+ return as_PEB(this)->onAsADash(info);
+}
+
+bool SkPathEffect::needsCTM() const {
+ return as_PEB(this)->onNeedsCTM();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** \class SkPairPathEffect
+
+ Common baseclass for Compose and Sum. This subclass manages two pathEffects,
+ including flattening them. It does nothing in filterPath, and is only useful
+ for managing the lifetimes of its two arguments.
+ */
+class SkPairPathEffect : public SkPathEffectBase {
+protected:
+ SkPairPathEffect(sk_sp<SkPathEffect> pe0, sk_sp<SkPathEffect> pe1)
+ : fPE0(std::move(pe0)), fPE1(std::move(pe1))
+ {
+ SkASSERT(fPE0.get());
+ SkASSERT(fPE1.get());
+ }
+
+ void flatten(SkWriteBuffer& buffer) const override {
+ buffer.writeFlattenable(fPE0.get());
+ buffer.writeFlattenable(fPE1.get());
+ }
+
+ // these are visible to our subclasses
+ sk_sp<SkPathEffect> fPE0;
+ sk_sp<SkPathEffect> fPE1;
+
+private:
+ using INHERITED = SkPathEffectBase;
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SkComposePathEffect : public SkPairPathEffect {
+public:
+ /** Construct a pathEffect whose effect is to apply first the inner pathEffect
+ and the the outer pathEffect (e.g. outer(inner(path)))
+ The reference counts for outer and inner are both incremented in the constructor,
+ and decremented in the destructor.
+ */
+ static sk_sp<SkPathEffect> Make(sk_sp<SkPathEffect> outer, sk_sp<SkPathEffect> inner) {
+ if (!outer) {
+ return inner;
+ }
+ if (!inner) {
+ return outer;
+ }
+ return sk_sp<SkPathEffect>(new SkComposePathEffect(outer, inner));
+ }
+
+ SkComposePathEffect(sk_sp<SkPathEffect> outer, sk_sp<SkPathEffect> inner)
+ : INHERITED(outer, inner) {}
+
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cullRect, const SkMatrix& ctm) const override {
+ SkPath tmp;
+ const SkPath* ptr = &src;
+
+ if (fPE1->filterPath(&tmp, src, rec, cullRect, ctm)) {
+ ptr = &tmp;
+ }
+ return fPE0->filterPath(dst, *ptr, rec, cullRect, ctm);
+ }
+
+ SK_FLATTENABLE_HOOKS(SkComposePathEffect)
+
+ bool computeFastBounds(SkRect* bounds) const override {
+ // inner (fPE1) is computed first, automatically updating bounds before computing outer.
+ return as_PEB(fPE1)->computeFastBounds(bounds) &&
+ as_PEB(fPE0)->computeFastBounds(bounds);
+ }
+
+private:
+ // illegal
+ SkComposePathEffect(const SkComposePathEffect&);
+ SkComposePathEffect& operator=(const SkComposePathEffect&);
+ friend class SkPathEffect;
+
+ using INHERITED = SkPairPathEffect;
+};
+
+sk_sp<SkFlattenable> SkComposePathEffect::CreateProc(SkReadBuffer& buffer) {
+ sk_sp<SkPathEffect> pe0(buffer.readPathEffect());
+ sk_sp<SkPathEffect> pe1(buffer.readPathEffect());
+ return SkComposePathEffect::Make(std::move(pe0), std::move(pe1));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/** \class SkSumPathEffect
+
+ This subclass of SkPathEffect applies two pathEffects, one after the other.
+ Its filterPath() returns true if either of the effects succeeded.
+ */
+class SkSumPathEffect : public SkPairPathEffect {
+public:
+ /** Construct a pathEffect whose effect is to apply two effects, in sequence.
+ (e.g. first(path) + second(path))
+ The reference counts for first and second are both incremented in the constructor,
+ and decremented in the destructor.
+ */
+ static sk_sp<SkPathEffect> Make(sk_sp<SkPathEffect> first, sk_sp<SkPathEffect> second) {
+ if (!first) {
+ return second;
+ }
+ if (!second) {
+ return first;
+ }
+ return sk_sp<SkPathEffect>(new SkSumPathEffect(first, second));
+ }
+
+ SkSumPathEffect(sk_sp<SkPathEffect> first, sk_sp<SkPathEffect> second)
+ : INHERITED(first, second) {}
+
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cullRect, const SkMatrix& ctm) const override {
+ // always call both, even if the first one succeeds
+ bool filteredFirst = fPE0->filterPath(dst, src, rec, cullRect, ctm);
+ bool filteredSecond = fPE1->filterPath(dst, src, rec, cullRect, ctm);
+ return filteredFirst || filteredSecond;
+ }
+
+ SK_FLATTENABLE_HOOKS(SkSumPathEffect)
+
+ bool computeFastBounds(SkRect* bounds) const override {
+ // Unlike Compose(), PE0 modifies the path first for Sum
+ return as_PEB(fPE0)->computeFastBounds(bounds) &&
+ as_PEB(fPE1)->computeFastBounds(bounds);
+ }
+
+private:
+ // illegal
+ SkSumPathEffect(const SkSumPathEffect&);
+ SkSumPathEffect& operator=(const SkSumPathEffect&);
+ friend class SkPathEffect;
+
+ using INHERITED = SkPairPathEffect;
+};
+
+sk_sp<SkFlattenable> SkSumPathEffect::CreateProc(SkReadBuffer& buffer) {
+ sk_sp<SkPathEffect> pe0(buffer.readPathEffect());
+ sk_sp<SkPathEffect> pe1(buffer.readPathEffect());
+ return SkSumPathEffect::Make(pe0, pe1);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkPathEffect> SkPathEffect::MakeSum(sk_sp<SkPathEffect> first, sk_sp<SkPathEffect> second) {
+ return SkSumPathEffect::Make(std::move(first), std::move(second));
+}
+
+sk_sp<SkPathEffect> SkPathEffect::MakeCompose(sk_sp<SkPathEffect> outer,
+ sk_sp<SkPathEffect> inner) {
+ return SkComposePathEffect::Make(std::move(outer), std::move(inner));
+}
+
+void SkPathEffectBase::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkComposePathEffect);
+ SK_REGISTER_FLATTENABLE(SkSumPathEffect);
+}
+
+sk_sp<SkPathEffect> SkPathEffect::Deserialize(const void* data, size_t size,
+ const SkDeserialProcs* procs) {
+ return sk_sp<SkPathEffect>(static_cast<SkPathEffect*>(
+ SkFlattenable::Deserialize(
+ kSkPathEffect_Type, data, size, procs).release()));
+}
diff --git a/gfx/skia/skia/src/core/SkPathEffectBase.h b/gfx/skia/skia/src/core/SkPathEffectBase.h
new file mode 100644
index 0000000000..93feea2ee6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPathEffectBase.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathEffectBase_DEFINED
+#define SkPathEffectBase_DEFINED
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+
+class SkPath;
+class SkStrokeRec;
+
+class SkPathEffectBase : public SkPathEffect {
+public:
+ SkPathEffectBase() {}
+
+ /** \class PointData
+
+ PointData aggregates all the information needed to draw the point
+ primitives returned by an 'asPoints' call.
+ */
+ class PointData {
+ public:
+ PointData()
+ : fFlags(0)
+ , fPoints(nullptr)
+ , fNumPoints(0) {
+ fSize.set(SK_Scalar1, SK_Scalar1);
+ // 'asPoints' needs to initialize/fill-in 'fClipRect' if it sets
+ // the kUseClip flag
+ }
+ ~PointData() {
+ delete [] fPoints;
+ }
+
+ // TODO: consider using passed-in flags to limit the work asPoints does.
+ // For example, a kNoPath flag could indicate don't bother generating
+ // stamped solutions.
+
+ // Currently none of these flags are supported.
+ enum PointFlags {
+ kCircles_PointFlag = 0x01, // draw points as circles (instead of rects)
+ kUsePath_PointFlag = 0x02, // draw points as stamps of the returned path
+ kUseClip_PointFlag = 0x04, // apply 'fClipRect' before drawing the points
+ };
+
+ uint32_t fFlags; // flags that impact the drawing of the points
+ SkPoint* fPoints; // the center point of each generated point
+ int fNumPoints; // number of points in fPoints
+ SkVector fSize; // the size to draw the points
+ SkRect fClipRect; // clip required to draw the points (if kUseClip is set)
+ SkPath fPath; // 'stamp' to be used at each point (if kUsePath is set)
+
+ SkPath fFirst; // If not empty, contains geometry for first point
+ SkPath fLast; // If not empty, contains geometry for last point
+ };
+
+ /**
+ * Does applying this path effect to 'src' yield a set of points? If so,
+ * optionally return the points in 'results'.
+ */
+ bool asPoints(PointData* results, const SkPath& src,
+ const SkStrokeRec&, const SkMatrix&,
+ const SkRect* cullR) const;
+
+ /**
+ * If the PathEffect can be represented as a dash pattern, asADash will return kDash_DashType
+ * and None otherwise. If a non NULL info is passed in, the various DashInfo will be filled
+ * in if the PathEffect can be a dash pattern. If passed in info has an fCount equal or
+ * greater to that of the effect, it will memcpy the values of the dash intervals into the
+ * info. Thus the general approach will be call asADash once with default info to get DashType
+ * and fCount. If effect can be represented as a dash pattern, allocate space for the intervals
+ * in info, then call asADash again with the same info and the intervals will get copied in.
+ */
+
+ SkFlattenable::Type getFlattenableType() const override {
+ return kSkPathEffect_Type;
+ }
+
+ static sk_sp<SkPathEffect> Deserialize(const void* data, size_t size,
+ const SkDeserialProcs* procs = nullptr) {
+ return sk_sp<SkPathEffect>(static_cast<SkPathEffect*>(
+ SkFlattenable::Deserialize(
+ kSkPathEffect_Type, data, size, procs).release()));
+ }
+
+ /**
+ * Filter the input path.
+ *
+ * The CTM parameter is provided for path effects that can use the information.
+ * The output of path effects must always be in the original (input) coordinate system,
+ * regardless of whether the path effect uses the CTM or not.
+ */
+ virtual bool onFilterPath(SkPath*, const SkPath&, SkStrokeRec*, const SkRect*,
+ const SkMatrix& /* ctm */) const = 0;
+
+ /** Path effects *requiring* a valid CTM should override to return true. */
+ virtual bool onNeedsCTM() const { return false; }
+
+ virtual bool onAsPoints(PointData*, const SkPath&, const SkStrokeRec&, const SkMatrix&,
+ const SkRect*) const {
+ return false;
+ }
+ virtual DashType onAsADash(DashInfo*) const {
+ return kNone_DashType;
+ }
+
+
+ // Compute a conservative bounds for its effect, given the bounds of the path. 'bounds' is
+ // both the input and output; if false is returned, fast bounds could not be calculated and
+ // 'bounds' is undefined.
+ //
+ // If 'bounds' is null, performs a dry-run determining if bounds could be computed.
+ virtual bool computeFastBounds(SkRect* bounds) const = 0;
+
+ static void RegisterFlattenables();
+
+private:
+ using INHERITED = SkPathEffect;
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+static inline SkPathEffectBase* as_PEB(SkPathEffect* effect) {
+ return static_cast<SkPathEffectBase*>(effect);
+}
+
+static inline const SkPathEffectBase* as_PEB(const SkPathEffect* effect) {
+ return static_cast<const SkPathEffectBase*>(effect);
+}
+
+static inline const SkPathEffectBase* as_PEB(const sk_sp<SkPathEffect>& effect) {
+ return static_cast<SkPathEffectBase*>(effect.get());
+}
+
+static inline sk_sp<SkPathEffectBase> as_PEB_sp(sk_sp<SkPathEffect> effect) {
+ return sk_sp<SkPathEffectBase>(static_cast<SkPathEffectBase*>(effect.release()));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPathMakers.h b/gfx/skia/skia/src/core/SkPathMakers.h
new file mode 100644
index 0000000000..8e668e5378
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPathMakers.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathMakers_DEFINED
+#define SkPathMakers_DEFINED
+
+#include "include/core/SkPathTypes.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRRect.h"
+
+template <unsigned N> class SkPath_PointIterator {
+public:
+ SkPath_PointIterator(SkPathDirection dir, unsigned startIndex)
+ : fCurrent(startIndex % N)
+ , fAdvance(dir == SkPathDirection::kCW ? 1 : N - 1) { }
+
+ const SkPoint& current() const {
+ SkASSERT(fCurrent < N);
+ return fPts[fCurrent];
+ }
+
+ const SkPoint& next() {
+ fCurrent = (fCurrent + fAdvance) % N;
+ return this->current();
+ }
+
+ protected:
+ SkPoint fPts[N];
+
+ private:
+ unsigned fCurrent;
+ unsigned fAdvance;
+};
+
+class SkPath_RectPointIterator : public SkPath_PointIterator<4> {
+public:
+ SkPath_RectPointIterator(const SkRect& rect, SkPathDirection dir, unsigned startIndex)
+ : SkPath_PointIterator(dir, startIndex) {
+
+ fPts[0] = SkPoint::Make(rect.fLeft, rect.fTop);
+ fPts[1] = SkPoint::Make(rect.fRight, rect.fTop);
+ fPts[2] = SkPoint::Make(rect.fRight, rect.fBottom);
+ fPts[3] = SkPoint::Make(rect.fLeft, rect.fBottom);
+ }
+};
+
+class SkPath_OvalPointIterator : public SkPath_PointIterator<4> {
+public:
+ SkPath_OvalPointIterator(const SkRect& oval, SkPathDirection dir, unsigned startIndex)
+ : SkPath_PointIterator(dir, startIndex) {
+
+ const SkScalar cx = oval.centerX();
+ const SkScalar cy = oval.centerY();
+
+ fPts[0] = SkPoint::Make(cx, oval.fTop);
+ fPts[1] = SkPoint::Make(oval.fRight, cy);
+ fPts[2] = SkPoint::Make(cx, oval.fBottom);
+ fPts[3] = SkPoint::Make(oval.fLeft, cy);
+ }
+};
+
+class SkPath_RRectPointIterator : public SkPath_PointIterator<8> {
+public:
+ SkPath_RRectPointIterator(const SkRRect& rrect, SkPathDirection dir, unsigned startIndex)
+ : SkPath_PointIterator(dir, startIndex) {
+
+ const SkRect& bounds = rrect.getBounds();
+ const SkScalar L = bounds.fLeft;
+ const SkScalar T = bounds.fTop;
+ const SkScalar R = bounds.fRight;
+ const SkScalar B = bounds.fBottom;
+
+ fPts[0] = SkPoint::Make(L + rrect.radii(SkRRect::kUpperLeft_Corner).fX, T);
+ fPts[1] = SkPoint::Make(R - rrect.radii(SkRRect::kUpperRight_Corner).fX, T);
+ fPts[2] = SkPoint::Make(R, T + rrect.radii(SkRRect::kUpperRight_Corner).fY);
+ fPts[3] = SkPoint::Make(R, B - rrect.radii(SkRRect::kLowerRight_Corner).fY);
+ fPts[4] = SkPoint::Make(R - rrect.radii(SkRRect::kLowerRight_Corner).fX, B);
+ fPts[5] = SkPoint::Make(L + rrect.radii(SkRRect::kLowerLeft_Corner).fX, B);
+ fPts[6] = SkPoint::Make(L, B - rrect.radii(SkRRect::kLowerLeft_Corner).fY);
+ fPts[7] = SkPoint::Make(L, T + rrect.radii(SkRRect::kUpperLeft_Corner).fY);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPathMeasure.cpp b/gfx/skia/skia/src/core/SkPathMeasure.cpp
new file mode 100644
index 0000000000..445e6a12ba
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPathMeasure.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkContourMeasure.h"
+#include "include/core/SkPathMeasure.h"
+
+SkPathMeasure::SkPathMeasure() {}
+
+SkPathMeasure::SkPathMeasure(const SkPath& path, bool forceClosed, SkScalar resScale)
+ : fIter(path, forceClosed, resScale)
+{
+ fContour = fIter.next();
+}
+
+SkPathMeasure::~SkPathMeasure() {}
+
+void SkPathMeasure::setPath(const SkPath* path, bool forceClosed) {
+ fIter.reset(path ? *path : SkPath(), forceClosed);
+ fContour = fIter.next();
+}
+
+SkScalar SkPathMeasure::getLength() {
+ return fContour ? fContour->length() : 0;
+}
+
+bool SkPathMeasure::getPosTan(SkScalar distance, SkPoint* position, SkVector* tangent) {
+ return fContour && fContour->getPosTan(distance, position, tangent);
+}
+
+bool SkPathMeasure::getMatrix(SkScalar distance, SkMatrix* matrix, MatrixFlags flags) {
+ return fContour && fContour->getMatrix(distance, matrix, (SkContourMeasure::MatrixFlags)flags);
+}
+
+bool SkPathMeasure::getSegment(SkScalar startD, SkScalar stopD, SkPath* dst, bool startWithMoveTo) {
+ return fContour && fContour->getSegment(startD, stopD, dst, startWithMoveTo);
+}
+
+bool SkPathMeasure::isClosed() {
+ return fContour && fContour->isClosed();
+}
+
+bool SkPathMeasure::nextContour() {
+ fContour = fIter.next();
+ return !!fContour;
+}
+
+#ifdef SK_DEBUG
+void SkPathMeasure::dump() {}
+#endif
diff --git a/gfx/skia/skia/src/core/SkPathMeasurePriv.h b/gfx/skia/skia/src/core/SkPathMeasurePriv.h
new file mode 100644
index 0000000000..dbad22b622
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPathMeasurePriv.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathMeasurePriv_DEFINED
+#define SkPathMeasurePriv_DEFINED
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "src/core/SkGeometry.h"
+
+// Used in the Segment struct defined in SkPathMeasure.h
+// It is used as a 2-bit field so if you add to this
+// you must increase the size of the bitfield there.
+enum SkSegType {
+ kLine_SegType,
+ kQuad_SegType,
+ kCubic_SegType,
+ kConic_SegType,
+};
+
+
+void SkPathMeasure_segTo(const SkPoint pts[], unsigned segType,
+ SkScalar startT, SkScalar stopT, SkPath* dst);
+
+#endif // SkPathMeasurePriv_DEFINED
diff --git a/gfx/skia/skia/src/core/SkPathPriv.h b/gfx/skia/skia/src/core/SkPathPriv.h
new file mode 100644
index 0000000000..f1721a77ef
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPathPriv.h
@@ -0,0 +1,529 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathPriv_DEFINED
+#define SkPathPriv_DEFINED
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPathBuilder.h"
+#include "include/core/SkPathTypes.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkIDChangeListener.h"
+#include "include/private/SkPathRef.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkPathEnums.h"
+
+#include <cstdint>
+#include <iterator>
+#include <utility>
+
+class SkMatrix;
+class SkRRect;
+
+static_assert(0 == static_cast<int>(SkPathFillType::kWinding), "fill_type_mismatch");
+static_assert(1 == static_cast<int>(SkPathFillType::kEvenOdd), "fill_type_mismatch");
+static_assert(2 == static_cast<int>(SkPathFillType::kInverseWinding), "fill_type_mismatch");
+static_assert(3 == static_cast<int>(SkPathFillType::kInverseEvenOdd), "fill_type_mismatch");
+
+class SkPathPriv {
+public:
+ // skbug.com/9906: Not a perfect solution for W plane clipping, but 1/16384 is a
+ // reasonable limit (roughly 5e-5)
+ inline static constexpr SkScalar kW0PlaneDistance = 1.f / (1 << 14);
+
+ static SkPathFirstDirection AsFirstDirection(SkPathDirection dir) {
+ // since we agree numerically for the values in Direction, we can just cast.
+ return (SkPathFirstDirection)dir;
+ }
+
+ /**
+ * Return the opposite of the specified direction. kUnknown is its own
+ * opposite.
+ */
+ static SkPathFirstDirection OppositeFirstDirection(SkPathFirstDirection dir) {
+ static const SkPathFirstDirection gOppositeDir[] = {
+ SkPathFirstDirection::kCCW, SkPathFirstDirection::kCW, SkPathFirstDirection::kUnknown,
+ };
+ return gOppositeDir[(unsigned)dir];
+ }
+
+ /**
+ * Tries to compute the direction of the outer-most non-degenerate
+ * contour. If it can be computed, return that direction. If it cannot be determined,
+ * or the contour is known to be convex, return kUnknown. If the direction was determined,
+ * it is cached to make subsequent calls return quickly.
+ */
+ static SkPathFirstDirection ComputeFirstDirection(const SkPath&);
+
+ static bool IsClosedSingleContour(const SkPath& path) {
+ int verbCount = path.countVerbs();
+ if (verbCount == 0)
+ return false;
+ int moveCount = 0;
+ auto verbs = path.fPathRef->verbsBegin();
+ for (int i = 0; i < verbCount; i++) {
+ switch (verbs[i]) {
+ case SkPath::Verb::kMove_Verb:
+ moveCount += 1;
+ if (moveCount > 1) {
+ return false;
+ }
+ break;
+ case SkPath::Verb::kClose_Verb:
+ if (i == verbCount - 1) {
+ return true;
+ }
+ return false;
+ default: break;
+ }
+ }
+ return false;
+ }
+
+ // In some scenarios (e.g. fill or convexity checking all but the last leading move to are
+ // irrelevant to behavior). SkPath::injectMoveToIfNeeded should ensure that this is always at
+ // least 1.
+ static int LeadingMoveToCount(const SkPath& path) {
+ int verbCount = path.countVerbs();
+ auto verbs = path.fPathRef->verbsBegin();
+ for (int i = 0; i < verbCount; i++) {
+ if (verbs[i] != SkPath::Verb::kMove_Verb) {
+ return i;
+ }
+ }
+ return verbCount; // path is all move verbs
+ }
+
+ static void AddGenIDChangeListener(const SkPath& path, sk_sp<SkIDChangeListener> listener) {
+ path.fPathRef->addGenIDChangeListener(std::move(listener));
+ }
+
+ /**
+ * This returns true for a rect that has a move followed by 3 or 4 lines and a close. If
+ * 'isSimpleFill' is true, an uncloseed rect will also be accepted as long as it starts and
+ * ends at the same corner. This does not permit degenerate line or point rectangles.
+ */
+ static bool IsSimpleRect(const SkPath& path, bool isSimpleFill, SkRect* rect,
+ SkPathDirection* direction, unsigned* start);
+
+ /**
+ * Creates a path from arc params using the semantics of SkCanvas::drawArc. This function
+ * assumes empty ovals and zero sweeps have already been filtered out.
+ */
+ static void CreateDrawArcPath(SkPath* path, const SkRect& oval, SkScalar startAngle,
+ SkScalar sweepAngle, bool useCenter, bool isFillNoPathEffect);
+
+ /**
+ * Determines whether an arc produced by CreateDrawArcPath will be convex. Assumes a non-empty
+ * oval.
+ */
+ static bool DrawArcIsConvex(SkScalar sweepAngle, bool useCenter, bool isFillNoPathEffect);
+
+ static void ShrinkToFit(SkPath* path) {
+ path->shrinkToFit();
+ }
+
+ /**
+ * Returns a C++11-iterable object that traverses a path's verbs in order. e.g:
+ *
+ * for (SkPath::Verb verb : SkPathPriv::Verbs(path)) {
+ * ...
+ * }
+ */
+ struct Verbs {
+ public:
+ Verbs(const SkPath& path) : fPathRef(path.fPathRef.get()) {}
+ struct Iter {
+ void operator++() { fVerb++; }
+ bool operator!=(const Iter& b) { return fVerb != b.fVerb; }
+ SkPath::Verb operator*() { return static_cast<SkPath::Verb>(*fVerb); }
+ const uint8_t* fVerb;
+ };
+ Iter begin() { return Iter{fPathRef->verbsBegin()}; }
+ Iter end() { return Iter{fPathRef->verbsEnd()}; }
+ private:
+ Verbs(const Verbs&) = delete;
+ Verbs& operator=(const Verbs&) = delete;
+ SkPathRef* fPathRef;
+ };
+
+ /**
+ * Iterates through a raw range of path verbs, points, and conics. All values are returned
+ * unaltered.
+ *
+ * NOTE: This class's definition will be moved into SkPathPriv once RangeIter is removed.
+ */
+ using RangeIter = SkPath::RangeIter;
+
+ /**
+ * Iterable object for traversing verbs, points, and conic weights in a path:
+ *
+ * for (auto [verb, pts, weights] : SkPathPriv::Iterate(skPath)) {
+ * ...
+ * }
+ */
+ struct Iterate {
+ public:
+ Iterate(const SkPath& path)
+ : Iterate(path.fPathRef->verbsBegin(),
+ // Don't allow iteration through non-finite points.
+ (!path.isFinite()) ? path.fPathRef->verbsBegin()
+ : path.fPathRef->verbsEnd(),
+ path.fPathRef->points(), path.fPathRef->conicWeights()) {
+ }
+ Iterate(const uint8_t* verbsBegin, const uint8_t* verbsEnd, const SkPoint* points,
+ const SkScalar* weights)
+ : fVerbsBegin(verbsBegin), fVerbsEnd(verbsEnd), fPoints(points), fWeights(weights) {
+ }
+ SkPath::RangeIter begin() { return {fVerbsBegin, fPoints, fWeights}; }
+ SkPath::RangeIter end() { return {fVerbsEnd, nullptr, nullptr}; }
+ private:
+ const uint8_t* fVerbsBegin;
+ const uint8_t* fVerbsEnd;
+ const SkPoint* fPoints;
+ const SkScalar* fWeights;
+ };
+
+ /**
+ * Returns a pointer to the verb data.
+ */
+ static const uint8_t* VerbData(const SkPath& path) {
+ return path.fPathRef->verbsBegin();
+ }
+
+ /** Returns a raw pointer to the path points */
+ static const SkPoint* PointData(const SkPath& path) {
+ return path.fPathRef->points();
+ }
+
+ /** Returns the number of conic weights in the path */
+ static int ConicWeightCnt(const SkPath& path) {
+ return path.fPathRef->countWeights();
+ }
+
+ /** Returns a raw pointer to the path conic weights. */
+ static const SkScalar* ConicWeightData(const SkPath& path) {
+ return path.fPathRef->conicWeights();
+ }
+
+ /** Returns true if the underlying SkPathRef has one single owner. */
+ static bool TestingOnly_unique(const SkPath& path) {
+ return path.fPathRef->unique();
+ }
+
+ // Won't be needed once we can make path's immutable (with their bounds always computed)
+ static bool HasComputedBounds(const SkPath& path) {
+ return path.hasComputedBounds();
+ }
+
+ /** Returns true if constructed by addCircle(), addOval(); and in some cases,
+ addRoundRect(), addRRect(). SkPath constructed with conicTo() or rConicTo() will not
+ return true though SkPath draws oval.
+
+ rect receives bounds of oval.
+ dir receives SkPathDirection of oval: kCW_Direction if clockwise, kCCW_Direction if
+ counterclockwise.
+ start receives start of oval: 0 for top, 1 for right, 2 for bottom, 3 for left.
+
+ rect, dir, and start are unmodified if oval is not found.
+
+ Triggers performance optimizations on some GPU surface implementations.
+
+ @param rect storage for bounding SkRect of oval; may be nullptr
+ @param dir storage for SkPathDirection; may be nullptr
+ @param start storage for start of oval; may be nullptr
+ @return true if SkPath was constructed by method that reduces to oval
+ */
+ static bool IsOval(const SkPath& path, SkRect* rect, SkPathDirection* dir, unsigned* start) {
+ bool isCCW = false;
+ bool result = path.fPathRef->isOval(rect, &isCCW, start);
+ if (dir && result) {
+ *dir = isCCW ? SkPathDirection::kCCW : SkPathDirection::kCW;
+ }
+ return result;
+ }
+
+ /** Returns true if constructed by addRoundRect(), addRRect(); and if construction
+ is not empty, not SkRect, and not oval. SkPath constructed with other calls
+ will not return true though SkPath draws SkRRect.
+
+ rrect receives bounds of SkRRect.
+ dir receives SkPathDirection of oval: kCW_Direction if clockwise, kCCW_Direction if
+ counterclockwise.
+ start receives start of SkRRect: 0 for top, 1 for right, 2 for bottom, 3 for left.
+
+ rrect, dir, and start are unmodified if SkRRect is not found.
+
+ Triggers performance optimizations on some GPU surface implementations.
+
+ @param rrect storage for bounding SkRect of SkRRect; may be nullptr
+ @param dir storage for SkPathDirection; may be nullptr
+ @param start storage for start of SkRRect; may be nullptr
+ @return true if SkPath contains only SkRRect
+ */
+ static bool IsRRect(const SkPath& path, SkRRect* rrect, SkPathDirection* dir,
+ unsigned* start) {
+ bool isCCW = false;
+ bool result = path.fPathRef->isRRect(rrect, &isCCW, start);
+ if (dir && result) {
+ *dir = isCCW ? SkPathDirection::kCCW : SkPathDirection::kCW;
+ }
+ return result;
+ }
+
+ /**
+ * Sometimes in the drawing pipeline, we have to perform math on path coordinates, even after
+ * the path is in device-coordinates. Tessellation and clipping are two examples. Usually this
+ * is pretty modest, but it can involve subtracting/adding coordinates, or multiplying by
+ * small constants (e.g. 2,3,4). To try to preflight issues where these optionations could turn
+ * finite path values into infinities (or NaNs), we allow the upper drawing code to reject
+ * the path if its bounds (in device coordinates) is too close to max float.
+ */
+ static bool TooBigForMath(const SkRect& bounds) {
+ // This value is just a guess. smaller is safer, but we don't want to reject largish paths
+ // that we don't have to.
+ constexpr SkScalar scale_down_to_allow_for_small_multiplies = 0.25f;
+ constexpr SkScalar max = SK_ScalarMax * scale_down_to_allow_for_small_multiplies;
+
+ // use ! expression so we return true if bounds contains NaN
+ return !(bounds.fLeft >= -max && bounds.fTop >= -max &&
+ bounds.fRight <= max && bounds.fBottom <= max);
+ }
+ static bool TooBigForMath(const SkPath& path) {
+ return TooBigForMath(path.getBounds());
+ }
+
+ // Returns number of valid points for each SkPath::Iter verb
+ static int PtsInIter(unsigned verb) {
+ static const uint8_t gPtsInVerb[] = {
+ 1, // kMove pts[0]
+ 2, // kLine pts[0..1]
+ 3, // kQuad pts[0..2]
+ 3, // kConic pts[0..2]
+ 4, // kCubic pts[0..3]
+ 0, // kClose
+ 0 // kDone
+ };
+
+ SkASSERT(verb < std::size(gPtsInVerb));
+ return gPtsInVerb[verb];
+ }
+
+ // Returns number of valid points for each verb, not including the "starter"
+ // point that the Iterator adds for line/quad/conic/cubic
+ static int PtsInVerb(unsigned verb) {
+ static const uint8_t gPtsInVerb[] = {
+ 1, // kMove pts[0]
+ 1, // kLine pts[0..1]
+ 2, // kQuad pts[0..2]
+ 2, // kConic pts[0..2]
+ 3, // kCubic pts[0..3]
+ 0, // kClose
+ 0 // kDone
+ };
+
+ SkASSERT(verb < std::size(gPtsInVerb));
+ return gPtsInVerb[verb];
+ }
+
+ static bool IsAxisAligned(const SkPath& path);
+
+ static bool AllPointsEq(const SkPoint pts[], int count) {
+ for (int i = 1; i < count; ++i) {
+ if (pts[0] != pts[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ static int LastMoveToIndex(const SkPath& path) { return path.fLastMoveToIndex; }
+
+ static bool IsRectContour(const SkPath&, bool allowPartial, int* currVerb,
+ const SkPoint** ptsPtr, bool* isClosed, SkPathDirection* direction,
+ SkRect* rect);
+
+ /** Returns true if SkPath is equivalent to nested SkRect pair when filled.
+ If false, rect and dirs are unchanged.
+ If true, rect and dirs are written to if not nullptr:
+ setting rect[0] to outer SkRect, and rect[1] to inner SkRect;
+ setting dirs[0] to SkPathDirection of outer SkRect, and dirs[1] to SkPathDirection of
+ inner SkRect.
+
+ @param rect storage for SkRect pair; may be nullptr
+ @param dirs storage for SkPathDirection pair; may be nullptr
+ @return true if SkPath contains nested SkRect pair
+ */
+ static bool IsNestedFillRects(const SkPath&, SkRect rect[2],
+ SkPathDirection dirs[2] = nullptr);
+
+ static bool IsInverseFillType(SkPathFillType fill) {
+ return (static_cast<int>(fill) & 2) != 0;
+ }
+
+ /** Returns equivalent SkPath::FillType representing SkPath fill inside its bounds.
+ .
+
+ @param fill one of: kWinding_FillType, kEvenOdd_FillType,
+ kInverseWinding_FillType, kInverseEvenOdd_FillType
+ @return fill, or kWinding_FillType or kEvenOdd_FillType if fill is inverted
+ */
+ static SkPathFillType ConvertToNonInverseFillType(SkPathFillType fill) {
+ return (SkPathFillType)(static_cast<int>(fill) & 1);
+ }
+
+ /**
+ * If needed (to not blow-up under a perspective matrix), clip the path, returning the
+ * answer in "result", and return true.
+ *
+ * Note result might be empty (if the path was completely clipped out).
+ *
+ * If no clipping is needed, returns false and "result" is left unchanged.
+ */
+ static bool PerspectiveClip(const SkPath& src, const SkMatrix&, SkPath* result);
+
+ /**
+ * Gets the number of GenIDChangeListeners. If another thread has access to this path then
+ * this may be stale before return and only indicates that the count was the return value
+ * at some point during the execution of the function.
+ */
+ static int GenIDChangeListenersCount(const SkPath&);
+
+ static void UpdatePathPoint(SkPath* path, int index, const SkPoint& pt) {
+ SkASSERT(index < path->countPoints());
+ SkPathRef::Editor ed(&path->fPathRef);
+ ed.writablePoints()[index] = pt;
+ path->dirtyAfterEdit();
+ }
+
+ static SkPathConvexity GetConvexity(const SkPath& path) {
+ return path.getConvexity();
+ }
+ static SkPathConvexity GetConvexityOrUnknown(const SkPath& path) {
+ return path.getConvexityOrUnknown();
+ }
+ static void SetConvexity(const SkPath& path, SkPathConvexity c) {
+ path.setConvexity(c);
+ }
+ static void ForceComputeConvexity(const SkPath& path) {
+ path.setConvexity(SkPathConvexity::kUnknown);
+ (void)path.isConvex();
+ }
+
+ static void ReverseAddPath(SkPathBuilder* builder, const SkPath& reverseMe) {
+ builder->privateReverseAddPath(reverseMe);
+ }
+};
+
+// Lightweight variant of SkPath::Iter that only returns segments (e.g. lines/conics).
+// Does not return kMove or kClose.
+// Always "auto-closes" each contour.
+// Roughly the same as SkPath::Iter(path, true), but does not return moves or closes
+//
+class SkPathEdgeIter {
+ const uint8_t* fVerbs;
+ const uint8_t* fVerbsStop;
+ const SkPoint* fPts;
+ const SkPoint* fMoveToPtr;
+ const SkScalar* fConicWeights;
+ SkPoint fScratch[2]; // for auto-close lines
+ bool fNeedsCloseLine;
+ bool fNextIsNewContour;
+ SkDEBUGCODE(bool fIsConic;)
+
+ enum {
+ kIllegalEdgeValue = 99
+ };
+
+public:
+ SkPathEdgeIter(const SkPath& path);
+
+ SkScalar conicWeight() const {
+ SkASSERT(fIsConic);
+ return *fConicWeights;
+ }
+
+ enum class Edge {
+ kLine = SkPath::kLine_Verb,
+ kQuad = SkPath::kQuad_Verb,
+ kConic = SkPath::kConic_Verb,
+ kCubic = SkPath::kCubic_Verb,
+ };
+
+ static SkPath::Verb EdgeToVerb(Edge e) {
+ return SkPath::Verb(e);
+ }
+
+ struct Result {
+ const SkPoint* fPts; // points for the segment, or null if done
+ Edge fEdge;
+ bool fIsNewContour;
+
+ // Returns true when it holds an Edge, false when the path is done.
+ explicit operator bool() { return fPts != nullptr; }
+ };
+
+ Result next() {
+ auto closeline = [&]() {
+ fScratch[0] = fPts[-1];
+ fScratch[1] = *fMoveToPtr;
+ fNeedsCloseLine = false;
+ fNextIsNewContour = true;
+ return Result{ fScratch, Edge::kLine, false };
+ };
+
+ for (;;) {
+ SkASSERT(fVerbs <= fVerbsStop);
+ if (fVerbs == fVerbsStop) {
+ return fNeedsCloseLine
+ ? closeline()
+ : Result{ nullptr, Edge(kIllegalEdgeValue), false };
+ }
+
+ SkDEBUGCODE(fIsConic = false;)
+
+ const auto v = *fVerbs++;
+ switch (v) {
+ case SkPath::kMove_Verb: {
+ if (fNeedsCloseLine) {
+ auto res = closeline();
+ fMoveToPtr = fPts++;
+ return res;
+ }
+ fMoveToPtr = fPts++;
+ fNextIsNewContour = true;
+ } break;
+ case SkPath::kClose_Verb:
+ if (fNeedsCloseLine) return closeline();
+ break;
+ default: {
+ // Actual edge.
+ const int pts_count = (v+2) / 2,
+ cws_count = (v & (v-1)) / 2;
+ SkASSERT(pts_count == SkPathPriv::PtsInIter(v) - 1);
+
+ fNeedsCloseLine = true;
+ fPts += pts_count;
+ fConicWeights += cws_count;
+
+ SkDEBUGCODE(fIsConic = (v == SkPath::kConic_Verb);)
+ SkASSERT(fIsConic == (cws_count > 0));
+
+ bool isNewContour = fNextIsNewContour;
+ fNextIsNewContour = false;
+ return { &fPts[-(pts_count + 1)], Edge(v), isNewContour };
+ }
+ }
+ }
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPathRef.cpp b/gfx/skia/skia/src/core/SkPathRef.cpp
new file mode 100644
index 0000000000..0595a4bff5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPathRef.cpp
@@ -0,0 +1,689 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkPathRef.h"
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkRRect.h"
+#include "include/private/base/SkOnce.h"
+#include "src/base/SkVx.h"
+
+#include <cstring>
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ static constexpr int kPathRefGenIDBitCnt = 30; // leave room for the fill type (skbug.com/1762)
+#else
+ static constexpr int kPathRefGenIDBitCnt = 32;
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+SkPathRef::Editor::Editor(sk_sp<SkPathRef>* pathRef,
+ int incReserveVerbs,
+ int incReservePoints)
+{
+ SkASSERT(incReserveVerbs >= 0);
+ SkASSERT(incReservePoints >= 0);
+
+ if ((*pathRef)->unique()) {
+ (*pathRef)->incReserve(incReserveVerbs, incReservePoints);
+ } else {
+ SkPathRef* copy;
+ // No need to copy if the existing ref is the empty ref (because it doesn't contain
+ // anything).
+ if (!(*pathRef)->isInitialEmptyPathRef()) {
+ copy = new SkPathRef;
+ copy->copy(**pathRef, incReserveVerbs, incReservePoints);
+ } else {
+ // Size previously empty paths to exactly fit the supplied hints. The assumpion is
+ // the caller knows the exact size they want (as happens in chrome when deserializing
+ // paths).
+ copy = new SkPathRef(incReserveVerbs, incReservePoints);
+ }
+ pathRef->reset(copy);
+ }
+ fPathRef = pathRef->get();
+ fPathRef->callGenIDChangeListeners();
+ fPathRef->fGenerationID = 0;
+ fPathRef->fBoundsIsDirty = true;
+ SkDEBUGCODE(fPathRef->fEditorsAttached++;)
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+size_t SkPathRef::approximateBytesUsed() const {
+ return sizeof(SkPathRef)
+ + fPoints .capacity() * sizeof(fPoints [0])
+ + fVerbs .capacity() * sizeof(fVerbs [0])
+ + fConicWeights.capacity() * sizeof(fConicWeights[0]);
+}
+
+SkPathRef::~SkPathRef() {
+ // Deliberately don't validate() this path ref, otherwise there's no way
+ // to read one that's not valid and then free its memory without asserting.
+ SkDEBUGCODE(fGenerationID = 0xEEEEEEEE;)
+ SkDEBUGCODE(fEditorsAttached.store(0x7777777);)
+}
+
+static SkPathRef* gEmpty = nullptr;
+
+SkPathRef* SkPathRef::CreateEmpty() {
+ static SkOnce once;
+ once([]{
+ gEmpty = new SkPathRef;
+ gEmpty->computeBounds(); // Avoids races later to be the first to do this.
+ });
+ return SkRef(gEmpty);
+}
+
+static void transform_dir_and_start(const SkMatrix& matrix, bool isRRect, bool* isCCW,
+ unsigned* start) {
+ int inStart = *start;
+ int rm = 0;
+ if (isRRect) {
+ // Degenerate rrect indices to oval indices and remember the remainder.
+ // Ovals have one index per side whereas rrects have two.
+ rm = inStart & 0b1;
+ inStart /= 2;
+ }
+ // Is the antidiagonal non-zero (otherwise the diagonal is zero)
+ int antiDiag;
+ // Is the non-zero value in the top row (either kMScaleX or kMSkewX) negative
+ int topNeg;
+ // Are the two non-zero diagonal or antidiagonal values the same sign.
+ int sameSign;
+ if (matrix.get(SkMatrix::kMScaleX) != 0) {
+ antiDiag = 0b00;
+ if (matrix.get(SkMatrix::kMScaleX) > 0) {
+ topNeg = 0b00;
+ sameSign = matrix.get(SkMatrix::kMScaleY) > 0 ? 0b01 : 0b00;
+ } else {
+ topNeg = 0b10;
+ sameSign = matrix.get(SkMatrix::kMScaleY) > 0 ? 0b00 : 0b01;
+ }
+ } else {
+ antiDiag = 0b01;
+ if (matrix.get(SkMatrix::kMSkewX) > 0) {
+ topNeg = 0b00;
+ sameSign = matrix.get(SkMatrix::kMSkewY) > 0 ? 0b01 : 0b00;
+ } else {
+ topNeg = 0b10;
+ sameSign = matrix.get(SkMatrix::kMSkewY) > 0 ? 0b00 : 0b01;
+ }
+ }
+ if (sameSign != antiDiag) {
+ // This is a rotation (and maybe scale). The direction is unchanged.
+ // Trust me on the start computation (or draw yourself some pictures)
+ *start = (inStart + 4 - (topNeg | antiDiag)) % 4;
+ SkASSERT(*start < 4);
+ if (isRRect) {
+ *start = 2 * *start + rm;
+ }
+ } else {
+ // This is a mirror (and maybe scale). The direction is reversed.
+ *isCCW = !*isCCW;
+ // Trust me on the start computation (or draw yourself some pictures)
+ *start = (6 + (topNeg | antiDiag) - inStart) % 4;
+ SkASSERT(*start < 4);
+ if (isRRect) {
+ *start = 2 * *start + (rm ? 0 : 1);
+ }
+ }
+}
+
+void SkPathRef::CreateTransformedCopy(sk_sp<SkPathRef>* dst,
+ const SkPathRef& src,
+ const SkMatrix& matrix) {
+ SkDEBUGCODE(src.validate();)
+ if (matrix.isIdentity()) {
+ if (dst->get() != &src) {
+ src.ref();
+ dst->reset(const_cast<SkPathRef*>(&src));
+ SkDEBUGCODE((*dst)->validate();)
+ }
+ return;
+ }
+
+ sk_sp<const SkPathRef> srcKeepAlive;
+ if (!(*dst)->unique()) {
+ // If dst and src are the same then we are about to drop our only ref on the common path
+ // ref. Some other thread may have owned src when we checked unique() above but it may not
+ // continue to do so. Add another ref so we continue to be an owner until we're done.
+ if (dst->get() == &src) {
+ srcKeepAlive.reset(SkRef(&src));
+ }
+ dst->reset(new SkPathRef);
+ }
+
+ if (dst->get() != &src) {
+ (*dst)->fVerbs = src.fVerbs;
+ (*dst)->fConicWeights = src.fConicWeights;
+ (*dst)->callGenIDChangeListeners();
+ (*dst)->fGenerationID = 0; // mark as dirty
+ // don't copy, just allocate the points
+ (*dst)->fPoints.resize(src.fPoints.size());
+ }
+ matrix.mapPoints((*dst)->fPoints.begin(), src.fPoints.begin(), src.fPoints.size());
+
+ // Need to check this here in case (&src == dst)
+ bool canXformBounds = !src.fBoundsIsDirty && matrix.rectStaysRect() && src.countPoints() > 1;
+
+ /*
+ * Here we optimize the bounds computation, by noting if the bounds are
+ * already known, and if so, we just transform those as well and mark
+ * them as "known", rather than force the transformed path to have to
+ * recompute them.
+ *
+ * Special gotchas if the path is effectively empty (<= 1 point) or
+ * if it is non-finite. In those cases bounds need to stay empty,
+ * regardless of the matrix.
+ */
+ if (canXformBounds) {
+ (*dst)->fBoundsIsDirty = false;
+ if (src.fIsFinite) {
+ matrix.mapRect(&(*dst)->fBounds, src.fBounds);
+ if (!((*dst)->fIsFinite = (*dst)->fBounds.isFinite())) {
+ (*dst)->fBounds.setEmpty();
+ } else if (src.countPoints() & 1) {
+ /* Matrix optimizations may cause the first point to use slightly different
+ * math for its transform, which can lead to it being outside the transformed
+ * bounds. Include it in the bounds just in case.
+ */
+ SkPoint p = (*dst)->fPoints[0];
+ SkRect& r = (*dst)->fBounds;
+ r.fLeft = std::min(r.fLeft, p.fX);
+ r.fTop = std::min(r.fTop, p.fY);
+ r.fRight = std::max(r.fRight, p.fX);
+ r.fBottom = std::max(r.fBottom, p.fY);
+ }
+ } else {
+ (*dst)->fIsFinite = false;
+ (*dst)->fBounds.setEmpty();
+ }
+ } else {
+ (*dst)->fBoundsIsDirty = true;
+ }
+
+ (*dst)->fSegmentMask = src.fSegmentMask;
+
+ // It's an oval only if it stays a rect.
+ bool rectStaysRect = matrix.rectStaysRect();
+ (*dst)->fIsOval = src.fIsOval && rectStaysRect;
+ (*dst)->fIsRRect = src.fIsRRect && rectStaysRect;
+ if ((*dst)->fIsOval || (*dst)->fIsRRect) {
+ unsigned start = src.fRRectOrOvalStartIdx;
+ bool isCCW = SkToBool(src.fRRectOrOvalIsCCW);
+ transform_dir_and_start(matrix, (*dst)->fIsRRect, &isCCW, &start);
+ (*dst)->fRRectOrOvalIsCCW = isCCW;
+ (*dst)->fRRectOrOvalStartIdx = start;
+ }
+
+ if (dst->get() == &src) {
+ (*dst)->callGenIDChangeListeners();
+ (*dst)->fGenerationID = 0;
+ }
+
+ SkDEBUGCODE((*dst)->validate();)
+}
+
+void SkPathRef::Rewind(sk_sp<SkPathRef>* pathRef) {
+ if ((*pathRef)->unique()) {
+ SkDEBUGCODE((*pathRef)->validate();)
+ (*pathRef)->callGenIDChangeListeners();
+ (*pathRef)->fBoundsIsDirty = true; // this also invalidates fIsFinite
+ (*pathRef)->fGenerationID = 0;
+ (*pathRef)->fPoints.clear();
+ (*pathRef)->fVerbs.clear();
+ (*pathRef)->fConicWeights.clear();
+ (*pathRef)->fSegmentMask = 0;
+ (*pathRef)->fIsOval = false;
+ (*pathRef)->fIsRRect = false;
+ SkDEBUGCODE((*pathRef)->validate();)
+ } else {
+ int oldVCnt = (*pathRef)->countVerbs();
+ int oldPCnt = (*pathRef)->countPoints();
+ pathRef->reset(new SkPathRef);
+ (*pathRef)->resetToSize(0, 0, 0, oldVCnt, oldPCnt);
+ }
+}
+
+bool SkPathRef::operator== (const SkPathRef& ref) const {
+ SkDEBUGCODE(this->validate();)
+ SkDEBUGCODE(ref.validate();)
+
+ // We explicitly check fSegmentMask as a quick-reject. We could skip it,
+ // since it is only a cache of info in the fVerbs, but its a fast way to
+ // notice a difference
+ if (fSegmentMask != ref.fSegmentMask) {
+ return false;
+ }
+
+ bool genIDMatch = fGenerationID && fGenerationID == ref.fGenerationID;
+#ifdef SK_RELEASE
+ if (genIDMatch) {
+ return true;
+ }
+#endif
+ if (fPoints != ref.fPoints || fConicWeights != ref.fConicWeights || fVerbs != ref.fVerbs) {
+ SkASSERT(!genIDMatch);
+ return false;
+ }
+ if (ref.fVerbs.empty()) {
+ SkASSERT(ref.fPoints.empty());
+ }
+ return true;
+}
+
+void SkPathRef::copy(const SkPathRef& ref,
+ int additionalReserveVerbs,
+ int additionalReservePoints) {
+ SkDEBUGCODE(this->validate();)
+ this->resetToSize(ref.fVerbs.size(), ref.fPoints.size(), ref.fConicWeights.size(),
+ additionalReserveVerbs, additionalReservePoints);
+ fVerbs = ref.fVerbs;
+ fPoints = ref.fPoints;
+ fConicWeights = ref.fConicWeights;
+ fBoundsIsDirty = ref.fBoundsIsDirty;
+ if (!fBoundsIsDirty) {
+ fBounds = ref.fBounds;
+ fIsFinite = ref.fIsFinite;
+ }
+ fSegmentMask = ref.fSegmentMask;
+ fIsOval = ref.fIsOval;
+ fIsRRect = ref.fIsRRect;
+ fRRectOrOvalIsCCW = ref.fRRectOrOvalIsCCW;
+ fRRectOrOvalStartIdx = ref.fRRectOrOvalStartIdx;
+ SkDEBUGCODE(this->validate();)
+}
+
+void SkPathRef::interpolate(const SkPathRef& ending, SkScalar weight, SkPathRef* out) const {
+ const SkScalar* inValues = &ending.getPoints()->fX;
+ SkScalar* outValues = &out->getWritablePoints()->fX;
+ int count = out->countPoints() * 2;
+ for (int index = 0; index < count; ++index) {
+ outValues[index] = outValues[index] * weight + inValues[index] * (1 - weight);
+ }
+ out->fBoundsIsDirty = true;
+ out->fIsOval = false;
+ out->fIsRRect = false;
+}
+
+std::tuple<SkPoint*, SkScalar*> SkPathRef::growForVerbsInPath(const SkPathRef& path) {
+ SkDEBUGCODE(this->validate();)
+
+ fSegmentMask |= path.fSegmentMask;
+ fBoundsIsDirty = true; // this also invalidates fIsFinite
+ fIsOval = false;
+ fIsRRect = false;
+
+ if (int numVerbs = path.countVerbs()) {
+ memcpy(fVerbs.push_back_n(numVerbs), path.fVerbs.begin(), numVerbs * sizeof(fVerbs[0]));
+ }
+
+ SkPoint* pts = nullptr;
+ if (int numPts = path.countPoints()) {
+ pts = fPoints.push_back_n(numPts);
+ }
+
+ SkScalar* weights = nullptr;
+ if (int numConics = path.countWeights()) {
+ weights = fConicWeights.push_back_n(numConics);
+ }
+
+ SkDEBUGCODE(this->validate();)
+ return {pts, weights};
+}
+
+SkPoint* SkPathRef::growForRepeatedVerb(int /*SkPath::Verb*/ verb,
+ int numVbs,
+ SkScalar** weights) {
+ SkDEBUGCODE(this->validate();)
+ int pCnt;
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ pCnt = numVbs;
+ break;
+ case SkPath::kLine_Verb:
+ fSegmentMask |= SkPath::kLine_SegmentMask;
+ pCnt = numVbs;
+ break;
+ case SkPath::kQuad_Verb:
+ fSegmentMask |= SkPath::kQuad_SegmentMask;
+ pCnt = 2 * numVbs;
+ break;
+ case SkPath::kConic_Verb:
+ fSegmentMask |= SkPath::kConic_SegmentMask;
+ pCnt = 2 * numVbs;
+ break;
+ case SkPath::kCubic_Verb:
+ fSegmentMask |= SkPath::kCubic_SegmentMask;
+ pCnt = 3 * numVbs;
+ break;
+ case SkPath::kClose_Verb:
+ SkDEBUGFAIL("growForRepeatedVerb called for kClose_Verb");
+ pCnt = 0;
+ break;
+ case SkPath::kDone_Verb:
+ SkDEBUGFAIL("growForRepeatedVerb called for kDone");
+ pCnt = 0;
+ break;
+ default:
+ SkDEBUGFAIL("default should not be reached");
+ pCnt = 0;
+ break;
+ }
+
+ fBoundsIsDirty = true; // this also invalidates fIsFinite
+ fIsOval = false;
+ fIsRRect = false;
+
+ memset(fVerbs.push_back_n(numVbs), verb, numVbs);
+ if (SkPath::kConic_Verb == verb) {
+ SkASSERT(weights);
+ *weights = fConicWeights.push_back_n(numVbs);
+ }
+ SkPoint* pts = fPoints.push_back_n(pCnt);
+
+ SkDEBUGCODE(this->validate();)
+ return pts;
+}
+
+SkPoint* SkPathRef::growForVerb(int /* SkPath::Verb*/ verb, SkScalar weight) {
+ SkDEBUGCODE(this->validate();)
+ int pCnt;
+ unsigned mask = 0;
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ pCnt = 1;
+ break;
+ case SkPath::kLine_Verb:
+ mask = SkPath::kLine_SegmentMask;
+ pCnt = 1;
+ break;
+ case SkPath::kQuad_Verb:
+ mask = SkPath::kQuad_SegmentMask;
+ pCnt = 2;
+ break;
+ case SkPath::kConic_Verb:
+ mask = SkPath::kConic_SegmentMask;
+ pCnt = 2;
+ break;
+ case SkPath::kCubic_Verb:
+ mask = SkPath::kCubic_SegmentMask;
+ pCnt = 3;
+ break;
+ case SkPath::kClose_Verb:
+ pCnt = 0;
+ break;
+ case SkPath::kDone_Verb:
+ SkDEBUGFAIL("growForVerb called for kDone");
+ pCnt = 0;
+ break;
+ default:
+ SkDEBUGFAIL("default is not reached");
+ pCnt = 0;
+ break;
+ }
+
+ fSegmentMask |= mask;
+ fBoundsIsDirty = true; // this also invalidates fIsFinite
+ fIsOval = false;
+ fIsRRect = false;
+
+ fVerbs.push_back(verb);
+ if (SkPath::kConic_Verb == verb) {
+ fConicWeights.push_back(weight);
+ }
+ SkPoint* pts = fPoints.push_back_n(pCnt);
+
+ SkDEBUGCODE(this->validate();)
+ return pts;
+}
+
+uint32_t SkPathRef::genID(uint8_t fillType) const {
+ SkASSERT(fEditorsAttached.load() == 0);
+ static const uint32_t kMask = (static_cast<int64_t>(1) << kPathRefGenIDBitCnt) - 1;
+
+ if (fGenerationID == 0) {
+ if (fPoints.empty() && fVerbs.empty()) {
+ fGenerationID = kEmptyGenID;
+ } else {
+ static std::atomic<uint32_t> nextID{kEmptyGenID + 1};
+ do {
+ fGenerationID = nextID.fetch_add(1, std::memory_order_relaxed) & kMask;
+ } while (fGenerationID == 0 || fGenerationID == kEmptyGenID);
+ }
+ }
+ #if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
+ SkASSERT((unsigned)fillType < (1 << (32 - kPathRefGenIDBitCnt)));
+ fGenerationID |= static_cast<uint32_t>(fillType) << kPathRefGenIDBitCnt;
+ #endif
+ return fGenerationID;
+}
+
+void SkPathRef::addGenIDChangeListener(sk_sp<SkIDChangeListener> listener) {
+ if (this == gEmpty) {
+ return;
+ }
+ fGenIDChangeListeners.add(std::move(listener));
+}
+
+int SkPathRef::genIDChangeListenerCount() { return fGenIDChangeListeners.count(); }
+
+// we need to be called *before* the genID gets changed or zerod
+void SkPathRef::callGenIDChangeListeners() {
+ fGenIDChangeListeners.changed();
+}
+
+SkRRect SkPathRef::getRRect() const {
+ const SkRect& bounds = this->getBounds();
+ SkVector radii[4] = {{0, 0}, {0, 0}, {0, 0}, {0, 0}};
+ Iter iter(*this);
+ SkPoint pts[4];
+ uint8_t verb = iter.next(pts);
+ SkASSERT(SkPath::kMove_Verb == verb);
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+ if (SkPath::kConic_Verb == verb) {
+ SkVector v1_0 = pts[1] - pts[0];
+ SkVector v2_1 = pts[2] - pts[1];
+ SkVector dxdy;
+ if (v1_0.fX) {
+ SkASSERT(!v2_1.fX && !v1_0.fY);
+ dxdy.set(SkScalarAbs(v1_0.fX), SkScalarAbs(v2_1.fY));
+ } else if (!v1_0.fY) {
+ SkASSERT(!v2_1.fX || !v2_1.fY);
+ dxdy.set(SkScalarAbs(v2_1.fX), SkScalarAbs(v2_1.fY));
+ } else {
+ SkASSERT(!v2_1.fY);
+ dxdy.set(SkScalarAbs(v2_1.fX), SkScalarAbs(v1_0.fY));
+ }
+ SkRRect::Corner corner =
+ pts[1].fX == bounds.fLeft ?
+ pts[1].fY == bounds.fTop ?
+ SkRRect::kUpperLeft_Corner : SkRRect::kLowerLeft_Corner :
+ pts[1].fY == bounds.fTop ?
+ SkRRect::kUpperRight_Corner : SkRRect::kLowerRight_Corner;
+ SkASSERT(!radii[corner].fX && !radii[corner].fY);
+ radii[corner] = dxdy;
+ } else {
+ SkASSERT((verb == SkPath::kLine_Verb
+ && (!(pts[1].fX - pts[0].fX) || !(pts[1].fY - pts[0].fY)))
+ || verb == SkPath::kClose_Verb);
+ }
+ }
+ SkRRect rrect;
+ rrect.setRectRadii(bounds, radii);
+ return rrect;
+}
+
+bool SkPathRef::isRRect(SkRRect* rrect, bool* isCCW, unsigned* start) const {
+ if (fIsRRect) {
+ if (rrect) {
+ *rrect = this->getRRect();
+ }
+ if (isCCW) {
+ *isCCW = SkToBool(fRRectOrOvalIsCCW);
+ }
+ if (start) {
+ *start = fRRectOrOvalStartIdx;
+ }
+ }
+ return SkToBool(fIsRRect);
+ }
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPathRef::Iter::Iter() {
+#ifdef SK_DEBUG
+ fPts = nullptr;
+ fConicWeights = nullptr;
+#endif
+ // need to init enough to make next() harmlessly return kDone_Verb
+ fVerbs = nullptr;
+ fVerbStop = nullptr;
+}
+
+SkPathRef::Iter::Iter(const SkPathRef& path) {
+ this->setPathRef(path);
+}
+
+void SkPathRef::Iter::setPathRef(const SkPathRef& path) {
+ fPts = path.points();
+ fVerbs = path.verbsBegin();
+ fVerbStop = path.verbsEnd();
+ fConicWeights = path.conicWeights();
+ if (fConicWeights) {
+ fConicWeights -= 1; // begin one behind
+ }
+
+ // Don't allow iteration through non-finite points.
+ if (!path.isFinite()) {
+ fVerbStop = fVerbs;
+ }
+}
+
+uint8_t SkPathRef::Iter::next(SkPoint pts[4]) {
+ SkASSERT(pts);
+
+ SkDEBUGCODE(unsigned peekResult = this->peek();)
+
+ if (fVerbs == fVerbStop) {
+ SkASSERT(peekResult == SkPath::kDone_Verb);
+ return (uint8_t) SkPath::kDone_Verb;
+ }
+
+ // fVerbs points one beyond next verb so decrement first.
+ unsigned verb = *fVerbs++;
+ const SkPoint* srcPts = fPts;
+
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ pts[0] = srcPts[0];
+ srcPts += 1;
+ break;
+ case SkPath::kLine_Verb:
+ pts[0] = srcPts[-1];
+ pts[1] = srcPts[0];
+ srcPts += 1;
+ break;
+ case SkPath::kConic_Verb:
+ fConicWeights += 1;
+ [[fallthrough]];
+ case SkPath::kQuad_Verb:
+ pts[0] = srcPts[-1];
+ pts[1] = srcPts[0];
+ pts[2] = srcPts[1];
+ srcPts += 2;
+ break;
+ case SkPath::kCubic_Verb:
+ pts[0] = srcPts[-1];
+ pts[1] = srcPts[0];
+ pts[2] = srcPts[1];
+ pts[3] = srcPts[2];
+ srcPts += 3;
+ break;
+ case SkPath::kClose_Verb:
+ break;
+ case SkPath::kDone_Verb:
+ SkASSERT(fVerbs == fVerbStop);
+ break;
+ }
+ fPts = srcPts;
+ SkASSERT(peekResult == verb);
+ return (uint8_t) verb;
+}
+
+uint8_t SkPathRef::Iter::peek() const {
+ return fVerbs < fVerbStop ? *fVerbs : (uint8_t) SkPath::kDone_Verb;
+}
+
+
+bool SkPathRef::isValid() const {
+ if (fIsOval || fIsRRect) {
+ // Currently we don't allow both of these to be set, even though ovals are ro
+ if (fIsOval == fIsRRect) {
+ return false;
+ }
+ if (fIsOval) {
+ if (fRRectOrOvalStartIdx >= 4) {
+ return false;
+ }
+ } else {
+ if (fRRectOrOvalStartIdx >= 8) {
+ return false;
+ }
+ }
+ }
+
+ if (!fBoundsIsDirty && !fBounds.isEmpty()) {
+ bool isFinite = true;
+ auto leftTop = skvx::float2(fBounds.fLeft, fBounds.fTop);
+ auto rightBot = skvx::float2(fBounds.fRight, fBounds.fBottom);
+ for (int i = 0; i < fPoints.size(); ++i) {
+ auto point = skvx::float2(fPoints[i].fX, fPoints[i].fY);
+#ifdef SK_DEBUG
+ if (fPoints[i].isFinite() && (any(point < leftTop)|| any(point > rightBot))) {
+ SkDebugf("bad SkPathRef bounds: %g %g %g %g\n",
+ fBounds.fLeft, fBounds.fTop, fBounds.fRight, fBounds.fBottom);
+ for (int j = 0; j < fPoints.size(); ++j) {
+ if (i == j) {
+ SkDebugf("*** bounds do not contain: ");
+ }
+ SkDebugf("%g %g\n", fPoints[j].fX, fPoints[j].fY);
+ }
+ return false;
+ }
+#endif
+
+ if (fPoints[i].isFinite() && any(point < leftTop) && !any(point > rightBot))
+ return false;
+ if (!fPoints[i].isFinite()) {
+ isFinite = false;
+ }
+ }
+ if (SkToBool(fIsFinite) != isFinite) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void SkPathRef::reset() {
+ commonReset();
+ fPoints.clear();
+ fVerbs.clear();
+ fConicWeights.clear();
+ SkDEBUGCODE(validate();)
+}
+
+bool SkPathRef::dataMatchesVerbs() const {
+ const auto info = sk_path_analyze_verbs(fVerbs.begin(), fVerbs.size());
+ return info.valid &&
+ info.segmentMask == fSegmentMask &&
+ info.points == fPoints.size() &&
+ info.weights == fConicWeights.size();
+}
diff --git a/gfx/skia/skia/src/core/SkPathUtils.cpp b/gfx/skia/skia/src/core/SkPathUtils.cpp
new file mode 100644
index 0000000000..368292ca91
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPathUtils.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPathUtils.h"
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkStrokeRec.h"
+#include "src/core/SkMatrixPriv.h"
+
+namespace skpathutils {
+
+bool FillPathWithPaint(const SkPath& src, const SkPaint& paint, SkPath* dst) {
+ return skpathutils::FillPathWithPaint(src, paint, dst, nullptr, 1);
+}
+
+bool FillPathWithPaint(const SkPath& src, const SkPaint& paint, SkPath* dst,
+ const SkRect* cullRect, SkScalar resScale) {
+ return skpathutils::FillPathWithPaint(src, paint, dst, cullRect,
+ SkMatrix::Scale(resScale, resScale));
+}
+
+bool FillPathWithPaint(const SkPath& src, const SkPaint& paint, SkPath* dst,
+ const SkRect* cullRect, const SkMatrix& ctm) {
+ if (!src.isFinite()) {
+ dst->reset();
+ return false;
+ }
+
+ const SkScalar resScale = SkMatrixPriv::ComputeResScaleForStroking(ctm);
+ SkStrokeRec rec(paint, resScale);
+
+#if defined(SK_BUILD_FOR_FUZZER)
+ // Prevent lines with small widths from timing out.
+ if (rec.getStyle() == SkStrokeRec::Style::kStroke_Style && rec.getWidth() < 0.001) {
+ return false;
+ }
+#endif
+
+ const SkPath* srcPtr = &src;
+ SkPath tmpPath;
+
+ SkPathEffect* pe = paint.getPathEffect();
+ if (pe && pe->filterPath(&tmpPath, src, &rec, cullRect, ctm)) {
+ srcPtr = &tmpPath;
+ }
+
+ if (!rec.applyToPath(dst, *srcPtr)) {
+ if (srcPtr == &tmpPath) {
+ // If path's were copy-on-write, this trick would not be needed.
+ // As it is, we want to save making a deep-copy from tmpPath -> dst
+ // since we know we're just going to delete tmpPath when we return,
+ // so the swap saves that copy.
+ dst->swap(tmpPath);
+ } else {
+ *dst = *srcPtr;
+ }
+ }
+
+ if (!dst->isFinite()) {
+ dst->reset();
+ return false;
+ }
+ return !rec.isHairlineStyle();
+}
+
+} // namespace skpathutils
+
+SK_API bool FillPathWithPaint(const SkPath &src, const SkPaint &paint, SkPath *dst,
+ const SkRect *cullRect, SkScalar resScale) {
+ return skpathutils::FillPathWithPaint(src, paint, dst, cullRect, resScale);
+}
+
+SK_API bool FillPathWithPaint(const SkPath &src, const SkPaint &paint, SkPath *dst,
+ const SkRect *cullRect, const SkMatrix &ctm) {
+ return skpathutils::FillPathWithPaint(src, paint, dst, cullRect, ctm);
+}
+
+SK_API bool FillPathWithPaint(const SkPath &src, const SkPaint &paint, SkPath *dst) {
+ return skpathutils::FillPathWithPaint(src, paint, dst);
+}
diff --git a/gfx/skia/skia/src/core/SkPath_serial.cpp b/gfx/skia/skia/src/core/SkPath_serial.cpp
new file mode 100644
index 0000000000..ffdc5b0e3d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPath_serial.cpp
@@ -0,0 +1,297 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkData.h"
+#include "include/private/SkPathRef.h"
+#include "include/private/base/SkMath.h"
+#include "include/private/base/SkPathEnums.h"
+#include "include/private/base/SkTPin.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkBuffer.h"
+#include "src/base/SkSafeMath.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkRRectPriv.h"
+
+#include <cmath>
+
+enum SerializationOffsets {
+ kType_SerializationShift = 28, // requires 4 bits
+ kDirection_SerializationShift = 26, // requires 2 bits
+ kFillType_SerializationShift = 8, // requires 8 bits
+ // low-8-bits are version
+ kVersion_SerializationMask = 0xFF,
+};
+
+enum SerializationVersions {
+ // kPathPrivFirstDirection_Version = 1,
+ // kPathPrivLastMoveToIndex_Version = 2,
+ // kPathPrivTypeEnumVersion = 3,
+ kJustPublicData_Version = 4, // introduced Feb/2018
+ kVerbsAreStoredForward_Version = 5, // introduced Sept/2019
+
+ kMin_Version = kJustPublicData_Version,
+ kCurrent_Version = kVerbsAreStoredForward_Version
+};
+
+enum SerializationType {
+ kGeneral = 0,
+ kRRect = 1
+};
+
+static unsigned extract_version(uint32_t packed) {
+ return packed & kVersion_SerializationMask;
+}
+
+static SkPathFillType extract_filltype(uint32_t packed) {
+ return static_cast<SkPathFillType>((packed >> kFillType_SerializationShift) & 0x3);
+}
+
+static SerializationType extract_serializationtype(uint32_t packed) {
+ return static_cast<SerializationType>((packed >> kType_SerializationShift) & 0xF);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+size_t SkPath::writeToMemoryAsRRect(void* storage) const {
+ SkRect oval;
+ SkRRect rrect;
+ bool isCCW;
+ unsigned start;
+ if (fPathRef->isOval(&oval, &isCCW, &start)) {
+ rrect.setOval(oval);
+ // Convert to rrect start indices.
+ start *= 2;
+ } else if (!fPathRef->isRRect(&rrect, &isCCW, &start)) {
+ return 0;
+ }
+
+ // packed header, rrect, start index.
+ const size_t sizeNeeded = sizeof(int32_t) + SkRRect::kSizeInMemory + sizeof(int32_t);
+ if (!storage) {
+ return sizeNeeded;
+ }
+
+ int firstDir = isCCW ? (int)SkPathFirstDirection::kCCW : (int)SkPathFirstDirection::kCW;
+ int32_t packed = (fFillType << kFillType_SerializationShift) |
+ (firstDir << kDirection_SerializationShift) |
+ (SerializationType::kRRect << kType_SerializationShift) |
+ kCurrent_Version;
+
+ SkWBuffer buffer(storage);
+ buffer.write32(packed);
+ SkRRectPriv::WriteToBuffer(rrect, &buffer);
+ buffer.write32(SkToS32(start));
+ buffer.padToAlign4();
+ SkASSERT(sizeNeeded == buffer.pos());
+ return buffer.pos();
+}
+
+size_t SkPath::writeToMemory(void* storage) const {
+ SkDEBUGCODE(this->validate();)
+
+ if (size_t bytes = this->writeToMemoryAsRRect(storage)) {
+ return bytes;
+ }
+
+ int32_t packed = (fFillType << kFillType_SerializationShift) |
+ (SerializationType::kGeneral << kType_SerializationShift) |
+ kCurrent_Version;
+
+ int32_t pts = fPathRef->countPoints();
+ int32_t cnx = fPathRef->countWeights();
+ int32_t vbs = fPathRef->countVerbs();
+
+ SkSafeMath safe;
+ size_t size = 4 * sizeof(int32_t);
+ size = safe.add(size, safe.mul(pts, sizeof(SkPoint)));
+ size = safe.add(size, safe.mul(cnx, sizeof(SkScalar)));
+ size = safe.add(size, safe.mul(vbs, sizeof(uint8_t)));
+ size = safe.alignUp(size, 4);
+ if (!safe) {
+ return 0;
+ }
+ if (!storage) {
+ return size;
+ }
+
+ SkWBuffer buffer(storage);
+ buffer.write32(packed);
+ buffer.write32(pts);
+ buffer.write32(cnx);
+ buffer.write32(vbs);
+ buffer.write(fPathRef->points(), pts * sizeof(SkPoint));
+ buffer.write(fPathRef->conicWeights(), cnx * sizeof(SkScalar));
+ buffer.write(fPathRef->verbsBegin(), vbs * sizeof(uint8_t));
+ buffer.padToAlign4();
+
+ SkASSERT(buffer.pos() == size);
+ return size;
+}
+
+sk_sp<SkData> SkPath::serialize() const {
+ size_t size = this->writeToMemory(nullptr);
+ sk_sp<SkData> data = SkData::MakeUninitialized(size);
+ this->writeToMemory(data->writable_data());
+ return data;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// reading
+
+size_t SkPath::readFromMemory(const void* storage, size_t length) {
+ SkRBuffer buffer(storage, length);
+ uint32_t packed;
+ if (!buffer.readU32(&packed)) {
+ return 0;
+ }
+ unsigned version = extract_version(packed);
+ if (version < kMin_Version || version > kCurrent_Version) {
+ return 0;
+ }
+
+ if (version == kJustPublicData_Version || version == kVerbsAreStoredForward_Version) {
+ return this->readFromMemory_EQ4Or5(storage, length);
+ }
+ return 0;
+}
+
+size_t SkPath::readAsRRect(const void* storage, size_t length) {
+ SkRBuffer buffer(storage, length);
+ uint32_t packed;
+ if (!buffer.readU32(&packed)) {
+ return 0;
+ }
+
+ SkASSERT(extract_serializationtype(packed) == SerializationType::kRRect);
+
+ uint8_t dir = (packed >> kDirection_SerializationShift) & 0x3;
+ SkPathFillType fillType = extract_filltype(packed);
+
+ SkPathDirection rrectDir;
+ SkRRect rrect;
+ int32_t start;
+ switch (dir) {
+ case (int)SkPathFirstDirection::kCW:
+ rrectDir = SkPathDirection::kCW;
+ break;
+ case (int)SkPathFirstDirection::kCCW:
+ rrectDir = SkPathDirection::kCCW;
+ break;
+ default:
+ return 0;
+ }
+ if (!SkRRectPriv::ReadFromBuffer(&buffer, &rrect)) {
+ return 0;
+ }
+ if (!buffer.readS32(&start) || start != SkTPin(start, 0, 7)) {
+ return 0;
+ }
+ this->reset();
+ this->addRRect(rrect, rrectDir, SkToUInt(start));
+ this->setFillType(fillType);
+ buffer.skipToAlign4();
+ return buffer.pos();
+}
+
+size_t SkPath::readFromMemory_EQ4Or5(const void* storage, size_t length) {
+ SkRBuffer buffer(storage, length);
+ uint32_t packed;
+ if (!buffer.readU32(&packed)) {
+ return 0;
+ }
+
+ bool verbsAreReversed = true;
+ if (extract_version(packed) == kVerbsAreStoredForward_Version) {
+ verbsAreReversed = false;
+ }
+
+ switch (extract_serializationtype(packed)) {
+ case SerializationType::kRRect:
+ return this->readAsRRect(storage, length);
+ case SerializationType::kGeneral:
+ break; // fall out
+ default:
+ return 0;
+ }
+
+ int32_t pts, cnx, vbs;
+ if (!buffer.readS32(&pts) || !buffer.readS32(&cnx) || !buffer.readS32(&vbs)) {
+ return 0;
+ }
+
+ const SkPoint* points = buffer.skipCount<SkPoint>(pts);
+ const SkScalar* conics = buffer.skipCount<SkScalar>(cnx);
+ const uint8_t* verbs = buffer.skipCount<uint8_t>(vbs);
+ buffer.skipToAlign4();
+ if (!buffer.isValid()) {
+ return 0;
+ }
+ SkASSERT(buffer.pos() <= length);
+
+#define CHECK_POINTS_CONICS(p, c) \
+ do { \
+ if (p && ((pts -= p) < 0)) { \
+ return 0; \
+ } \
+ if (c && ((cnx -= c) < 0)) { \
+ return 0; \
+ } \
+ } while (0)
+
+ int verbsStep = 1;
+ if (verbsAreReversed) {
+ verbs += vbs - 1;
+ verbsStep = -1;
+ }
+
+ SkPath tmp;
+ tmp.setFillType(extract_filltype(packed));
+ {
+ // Reserve the exact number of verbs and points needed.
+ SkPathRef::Editor(&tmp.fPathRef, vbs, pts);
+ }
+ for (int i = 0; i < vbs; ++i) {
+ switch (*verbs) {
+ case kMove_Verb:
+ CHECK_POINTS_CONICS(1, 0);
+ tmp.moveTo(*points++);
+ break;
+ case kLine_Verb:
+ CHECK_POINTS_CONICS(1, 0);
+ tmp.lineTo(*points++);
+ break;
+ case kQuad_Verb:
+ CHECK_POINTS_CONICS(2, 0);
+ tmp.quadTo(points[0], points[1]);
+ points += 2;
+ break;
+ case kConic_Verb:
+ CHECK_POINTS_CONICS(2, 1);
+ tmp.conicTo(points[0], points[1], *conics++);
+ points += 2;
+ break;
+ case kCubic_Verb:
+ CHECK_POINTS_CONICS(3, 0);
+ tmp.cubicTo(points[0], points[1], points[2]);
+ points += 3;
+ break;
+ case kClose_Verb:
+ tmp.close();
+ break;
+ default:
+ return 0; // bad verb
+ }
+ verbs += verbsStep;
+ }
+#undef CHECK_POINTS_CONICS
+ if (pts || cnx) {
+ return 0; // leftover points and/or conics
+ }
+
+ *this = std::move(tmp);
+ return buffer.pos();
+}
diff --git a/gfx/skia/skia/src/core/SkPicture.cpp b/gfx/skia/skia/src/core/SkPicture.cpp
new file mode 100644
index 0000000000..609943748d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPicture.cpp
@@ -0,0 +1,352 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPicture.h"
+
+#include "include/core/SkImageGenerator.h"
+#include "include/core/SkPictureRecorder.h"
+#include "include/core/SkSerialProcs.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkMathPriv.h"
+#include "src/core/SkCanvasPriv.h"
+#include "src/core/SkPictureData.h"
+#include "src/core/SkPicturePlayback.h"
+#include "src/core/SkPicturePriv.h"
+#include "src/core/SkPictureRecord.h"
+#include "src/core/SkResourceCache.h"
+#include "src/core/SkStreamPriv.h"
+
+#include <atomic>
+
+#if defined(SK_GANESH)
+#include "include/private/chromium/Slug.h"
+#endif
+
+// When we read/write the SkPictInfo via a stream, we have a sentinel byte right after the info.
+// Note: in the read/write buffer versions, we have a slightly different convention:
+// We have a sentinel int32_t:
+// 0 : failure
+// 1 : PictureData
+// <0 : -size of the custom data
+enum {
+ kFailure_TrailingStreamByteAfterPictInfo = 0, // nothing follows
+ kPictureData_TrailingStreamByteAfterPictInfo = 1, // SkPictureData follows
+ kCustom_TrailingStreamByteAfterPictInfo = 2, // -size32 follows
+};
+
+/* SkPicture impl. This handles generic responsibilities like unique IDs and serialization. */
+
+SkPicture::SkPicture() {
+ static std::atomic<uint32_t> nextID{1};
+ do {
+ fUniqueID = nextID.fetch_add(+1, std::memory_order_relaxed);
+ } while (fUniqueID == 0);
+}
+
+SkPicture::~SkPicture() {
+ if (fAddedToCache.load()) {
+ SkResourceCache::PostPurgeSharedID(SkPicturePriv::MakeSharedID(fUniqueID));
+ }
+}
+
+static const char kMagic[] = { 's', 'k', 'i', 'a', 'p', 'i', 'c', 't' };
+
+SkPictInfo SkPicture::createHeader() const {
+ SkPictInfo info;
+ // Copy magic bytes at the beginning of the header
+ static_assert(sizeof(kMagic) == 8, "");
+ static_assert(sizeof(kMagic) == sizeof(info.fMagic), "");
+ memcpy(info.fMagic, kMagic, sizeof(kMagic));
+
+ // Set picture info after magic bytes in the header
+ info.setVersion(SkPicturePriv::kCurrent_Version);
+ info.fCullRect = this->cullRect();
+ return info;
+}
+
+bool SkPicture::IsValidPictInfo(const SkPictInfo& info) {
+ if (0 != memcmp(info.fMagic, kMagic, sizeof(kMagic))) {
+ return false;
+ }
+ if (info.getVersion() < SkPicturePriv::kMin_Version ||
+ info.getVersion() > SkPicturePriv::kCurrent_Version) {
+ return false;
+ }
+ return true;
+}
+
+bool SkPicture::StreamIsSKP(SkStream* stream, SkPictInfo* pInfo) {
+ if (!stream) {
+ return false;
+ }
+
+ SkPictInfo info;
+ SkASSERT(sizeof(kMagic) == sizeof(info.fMagic));
+ if (stream->read(&info.fMagic, sizeof(kMagic)) != sizeof(kMagic)) {
+ return false;
+ }
+
+ uint32_t version;
+ if (!stream->readU32(&version)) { return false; }
+ info.setVersion(version);
+ if (!stream->readScalar(&info.fCullRect.fLeft )) { return false; }
+ if (!stream->readScalar(&info.fCullRect.fTop )) { return false; }
+ if (!stream->readScalar(&info.fCullRect.fRight )) { return false; }
+ if (!stream->readScalar(&info.fCullRect.fBottom)) { return false; }
+
+ if (pInfo) {
+ *pInfo = info;
+ }
+ return IsValidPictInfo(info);
+}
+
+bool SkPicture_StreamIsSKP(SkStream* stream, SkPictInfo* pInfo) {
+ return SkPicture::StreamIsSKP(stream, pInfo);
+}
+
+bool SkPicture::BufferIsSKP(SkReadBuffer* buffer, SkPictInfo* pInfo) {
+ SkPictInfo info;
+ SkASSERT(sizeof(kMagic) == sizeof(info.fMagic));
+ if (!buffer->readByteArray(&info.fMagic, sizeof(kMagic))) {
+ return false;
+ }
+
+ info.setVersion(buffer->readUInt());
+ buffer->readRect(&info.fCullRect);
+
+ if (IsValidPictInfo(info)) {
+ if (pInfo) { *pInfo = info; }
+ return true;
+ }
+ return false;
+}
+
+sk_sp<SkPicture> SkPicture::Forwardport(const SkPictInfo& info,
+ const SkPictureData* data,
+ SkReadBuffer* buffer) {
+ if (!data) {
+ return nullptr;
+ }
+ if (!data->opData()) {
+ return nullptr;
+ }
+ SkPicturePlayback playback(data);
+ SkPictureRecorder r;
+ playback.draw(r.beginRecording(info.fCullRect), nullptr/*no callback*/, buffer);
+ return r.finishRecordingAsPicture();
+}
+
+static const int kNestedSKPLimit = 100; // Arbitrarily set
+
+sk_sp<SkPicture> SkPicture::MakeFromStream(SkStream* stream, const SkDeserialProcs* procs) {
+ return MakeFromStreamPriv(stream, procs, nullptr, kNestedSKPLimit);
+}
+
+sk_sp<SkPicture> SkPicture::MakeFromData(const void* data, size_t size,
+ const SkDeserialProcs* procs) {
+ if (!data) {
+ return nullptr;
+ }
+ SkMemoryStream stream(data, size);
+ return MakeFromStreamPriv(&stream, procs, nullptr, kNestedSKPLimit);
+}
+
+sk_sp<SkPicture> SkPicture::MakeFromData(const SkData* data, const SkDeserialProcs* procs) {
+ if (!data) {
+ return nullptr;
+ }
+ SkMemoryStream stream(data->data(), data->size());
+ return MakeFromStreamPriv(&stream, procs, nullptr, kNestedSKPLimit);
+}
+
+sk_sp<SkPicture> SkPicture::MakeFromStreamPriv(SkStream* stream, const SkDeserialProcs* procsPtr,
+ SkTypefacePlayback* typefaces, int recursionLimit) {
+ if (recursionLimit <= 0) {
+ return nullptr;
+ }
+ SkPictInfo info;
+ if (!StreamIsSKP(stream, &info)) {
+ return nullptr;
+ }
+
+ SkDeserialProcs procs;
+ if (procsPtr) {
+ procs = *procsPtr;
+ }
+
+ uint8_t trailingStreamByteAfterPictInfo;
+ if (!stream->readU8(&trailingStreamByteAfterPictInfo)) { return nullptr; }
+ switch (trailingStreamByteAfterPictInfo) {
+ case kPictureData_TrailingStreamByteAfterPictInfo: {
+ std::unique_ptr<SkPictureData> data(
+ SkPictureData::CreateFromStream(stream, info, procs, typefaces,
+ recursionLimit));
+ return Forwardport(info, data.get(), nullptr);
+ }
+ case kCustom_TrailingStreamByteAfterPictInfo: {
+ int32_t ssize;
+ if (!stream->readS32(&ssize) || ssize >= 0 || !procs.fPictureProc) {
+ return nullptr;
+ }
+ size_t size = sk_negate_to_size_t(ssize);
+ if (StreamRemainingLengthIsBelow(stream, size)) {
+ return nullptr;
+ }
+ auto data = SkData::MakeUninitialized(size);
+ if (stream->read(data->writable_data(), size) != size) {
+ return nullptr;
+ }
+ return procs.fPictureProc(data->data(), size, procs.fPictureCtx);
+ }
+ default: // fall out to error return
+ break;
+ }
+ return nullptr;
+}
+
+sk_sp<SkPicture> SkPicturePriv::MakeFromBuffer(SkReadBuffer& buffer) {
+ SkPictInfo info;
+ if (!SkPicture::BufferIsSKP(&buffer, &info)) {
+ return nullptr;
+ }
+ // size should be 0, 1, or negative
+ int32_t ssize = buffer.read32();
+ if (ssize < 0) {
+ const SkDeserialProcs& procs = buffer.getDeserialProcs();
+ if (!procs.fPictureProc) {
+ return nullptr;
+ }
+ size_t size = sk_negate_to_size_t(ssize);
+ return procs.fPictureProc(buffer.skip(size), size, procs.fPictureCtx);
+ }
+ if (ssize != 1) {
+ // 1 is the magic 'size' that means SkPictureData follows
+ return nullptr;
+ }
+ std::unique_ptr<SkPictureData> data(SkPictureData::CreateFromBuffer(buffer, info));
+ return SkPicture::Forwardport(info, data.get(), &buffer);
+}
+
+SkPictureData* SkPicture::backport() const {
+ SkPictInfo info = this->createHeader();
+ SkPictureRecord rec(info.fCullRect.roundOut(), 0/*flags*/);
+ rec.beginRecording();
+ this->playback(&rec);
+ rec.endRecording();
+ return new SkPictureData(rec, info);
+}
+
+void SkPicture::serialize(SkWStream* stream, const SkSerialProcs* procs) const {
+ this->serialize(stream, procs, nullptr);
+}
+
+sk_sp<SkData> SkPicture::serialize(const SkSerialProcs* procs) const {
+ SkDynamicMemoryWStream stream;
+ this->serialize(&stream, procs, nullptr);
+ return stream.detachAsData();
+}
+
+static sk_sp<SkData> custom_serialize(const SkPicture* picture, const SkSerialProcs& procs) {
+ if (procs.fPictureProc) {
+ auto data = procs.fPictureProc(const_cast<SkPicture*>(picture), procs.fPictureCtx);
+ if (data) {
+ size_t size = data->size();
+ if (!SkTFitsIn<int32_t>(size) || size <= 1) {
+ return SkData::MakeEmpty();
+ }
+ return data;
+ }
+ }
+ return nullptr;
+}
+
+static bool write_pad32(SkWStream* stream, const void* data, size_t size) {
+ if (!stream->write(data, size)) {
+ return false;
+ }
+ if (size & 3) {
+ uint32_t zero = 0;
+ return stream->write(&zero, 4 - (size & 3));
+ }
+ return true;
+}
+
+// Private serialize.
+// SkPictureData::serialize makes a first pass on all subpictures, indicatewd by textBlobsOnly=true,
+// to fill typefaceSet.
+void SkPicture::serialize(SkWStream* stream, const SkSerialProcs* procsPtr,
+ SkRefCntSet* typefaceSet, bool textBlobsOnly) const {
+ SkSerialProcs procs;
+ if (procsPtr) {
+ procs = *procsPtr;
+ }
+
+ SkPictInfo info = this->createHeader();
+ stream->write(&info, sizeof(info));
+
+ if (auto custom = custom_serialize(this, procs)) {
+ int32_t size = SkToS32(custom->size());
+ if (size == 0) {
+ stream->write8(kFailure_TrailingStreamByteAfterPictInfo);
+ return;
+ }
+ stream->write8(kCustom_TrailingStreamByteAfterPictInfo);
+ stream->write32(-size); // negative for custom format
+ write_pad32(stream, custom->data(), size);
+ return;
+ }
+
+ std::unique_ptr<SkPictureData> data(this->backport());
+ if (data) {
+ stream->write8(kPictureData_TrailingStreamByteAfterPictInfo);
+ data->serialize(stream, procs, typefaceSet, textBlobsOnly);
+ } else {
+ stream->write8(kFailure_TrailingStreamByteAfterPictInfo);
+ }
+}
+
+void SkPicturePriv::Flatten(const sk_sp<const SkPicture> picture, SkWriteBuffer& buffer) {
+ SkPictInfo info = picture->createHeader();
+ std::unique_ptr<SkPictureData> data(picture->backport());
+
+ buffer.writeByteArray(&info.fMagic, sizeof(info.fMagic));
+ buffer.writeUInt(info.getVersion());
+ buffer.writeRect(info.fCullRect);
+
+ if (auto custom = custom_serialize(picture.get(), buffer.fProcs)) {
+ int32_t size = SkToS32(custom->size());
+ buffer.write32(-size); // negative for custom format
+ buffer.writePad32(custom->data(), size);
+ return;
+ }
+
+ if (data) {
+ buffer.write32(1); // special size meaning SkPictureData
+ data->flatten(buffer);
+ } else {
+ buffer.write32(0); // signal no content
+ }
+}
+
+sk_sp<SkPicture> SkPicture::MakePlaceholder(SkRect cull) {
+ struct Placeholder : public SkPicture {
+ explicit Placeholder(SkRect cull) : fCull(cull) {}
+
+ void playback(SkCanvas*, AbortCallback*) const override { }
+
+ // approximateOpCount() needs to be greater than kMaxPictureOpsToUnrollInsteadOfRef
+ // (SkCanvasPriv.h) to avoid unrolling this into a parent picture.
+ int approximateOpCount(bool) const override {
+ return kMaxPictureOpsToUnrollInsteadOfRef+1;
+ }
+ size_t approximateBytesUsed() const override { return sizeof(*this); }
+ SkRect cullRect() const override { return fCull; }
+
+ SkRect fCull;
+ };
+ return sk_make_sp<Placeholder>(cull);
+}
diff --git a/gfx/skia/skia/src/core/SkPictureData.cpp b/gfx/skia/skia/src/core/SkPictureData.cpp
new file mode 100644
index 0000000000..d35b35e512
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureData.cpp
@@ -0,0 +1,601 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkPictureData.h"
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkSerialProcs.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkTFitsIn.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkAutoMalloc.h"
+#include "src/core/SkPicturePriv.h"
+#include "src/core/SkPictureRecord.h"
+#include "src/core/SkPtrRecorder.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkStreamPriv.h"
+#include "src/core/SkTHash.h"
+#include "src/core/SkTextBlobPriv.h"
+#include "src/core/SkVerticesPriv.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <cstring>
+#include <utility>
+
+using namespace skia_private;
+
+template <typename T> int SafeCount(const T* obj) {
+ return obj ? obj->size() : 0;
+}
+
+SkPictureData::SkPictureData(const SkPictInfo& info)
+ : fInfo(info) {}
+
+void SkPictureData::initForPlayback() const {
+ // ensure that the paths bounds are pre-computed
+ for (int i = 0; i < fPaths.size(); i++) {
+ fPaths[i].updateBoundsCache();
+ }
+}
+
+SkPictureData::SkPictureData(const SkPictureRecord& record,
+ const SkPictInfo& info)
+ : fPictures(record.getPictures())
+ , fDrawables(record.getDrawables())
+ , fTextBlobs(record.getTextBlobs())
+ , fVertices(record.getVertices())
+ , fImages(record.getImages())
+#if defined(SK_GANESH)
+ , fSlugs(record.getSlugs())
+#endif
+ , fInfo(info) {
+
+ fOpData = record.opData();
+
+ fPaints = record.fPaints;
+
+ fPaths.reset(record.fPaths.count());
+ record.fPaths.foreach([this](const SkPath& path, int n) {
+ // These indices are logically 1-based, but we need to serialize them
+ // 0-based to keep the deserializing SkPictureData::getPath() working.
+ fPaths[n-1] = path;
+ });
+
+ this->initForPlayback();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+#include "include/core/SkStream.h"
+
+static size_t compute_chunk_size(SkFlattenable::Factory* array, int count) {
+ size_t size = 4; // for 'count'
+
+ for (int i = 0; i < count; i++) {
+ const char* name = SkFlattenable::FactoryToName(array[i]);
+ if (nullptr == name || 0 == *name) {
+ size += SkWStream::SizeOfPackedUInt(0);
+ } else {
+ size_t len = strlen(name);
+ size += SkWStream::SizeOfPackedUInt(len);
+ size += len;
+ }
+ }
+
+ return size;
+}
+
+static void write_tag_size(SkWriteBuffer& buffer, uint32_t tag, size_t size) {
+ buffer.writeUInt(tag);
+ buffer.writeUInt(SkToU32(size));
+}
+
+static void write_tag_size(SkWStream* stream, uint32_t tag, size_t size) {
+ stream->write32(tag);
+ stream->write32(SkToU32(size));
+}
+
+void SkPictureData::WriteFactories(SkWStream* stream, const SkFactorySet& rec) {
+ int count = rec.count();
+
+ AutoSTMalloc<16, SkFlattenable::Factory> storage(count);
+ SkFlattenable::Factory* array = (SkFlattenable::Factory*)storage.get();
+ rec.copyToArray(array);
+
+ size_t size = compute_chunk_size(array, count);
+
+ // TODO: write_tag_size should really take a size_t
+ write_tag_size(stream, SK_PICT_FACTORY_TAG, (uint32_t) size);
+ SkDEBUGCODE(size_t start = stream->bytesWritten());
+ stream->write32(count);
+
+ for (int i = 0; i < count; i++) {
+ const char* name = SkFlattenable::FactoryToName(array[i]);
+ if (nullptr == name || 0 == *name) {
+ stream->writePackedUInt(0);
+ } else {
+ size_t len = strlen(name);
+ stream->writePackedUInt(len);
+ stream->write(name, len);
+ }
+ }
+
+ SkASSERT(size == (stream->bytesWritten() - start));
+}
+
+void SkPictureData::WriteTypefaces(SkWStream* stream, const SkRefCntSet& rec,
+ const SkSerialProcs& procs) {
+ int count = rec.count();
+
+ write_tag_size(stream, SK_PICT_TYPEFACE_TAG, count);
+
+ AutoSTMalloc<16, SkTypeface*> storage(count);
+ SkTypeface** array = (SkTypeface**)storage.get();
+ rec.copyToArray((SkRefCnt**)array);
+
+ for (int i = 0; i < count; i++) {
+ SkTypeface* tf = array[i];
+ if (procs.fTypefaceProc) {
+ auto data = procs.fTypefaceProc(tf, procs.fTypefaceCtx);
+ if (data) {
+ stream->write(data->data(), data->size());
+ continue;
+ }
+ }
+ array[i]->serialize(stream);
+ }
+}
+
+void SkPictureData::flattenToBuffer(SkWriteBuffer& buffer, bool textBlobsOnly) const {
+ if (!textBlobsOnly) {
+ int numPaints = fPaints.size();
+ if (numPaints > 0) {
+ write_tag_size(buffer, SK_PICT_PAINT_BUFFER_TAG, numPaints);
+ for (const SkPaint& paint : fPaints) {
+ buffer.writePaint(paint);
+ }
+ }
+
+ int numPaths = fPaths.size();
+ if (numPaths > 0) {
+ write_tag_size(buffer, SK_PICT_PATH_BUFFER_TAG, numPaths);
+ buffer.writeInt(numPaths);
+ for (const SkPath& path : fPaths) {
+ buffer.writePath(path);
+ }
+ }
+ }
+
+ if (!fTextBlobs.empty()) {
+ write_tag_size(buffer, SK_PICT_TEXTBLOB_BUFFER_TAG, fTextBlobs.size());
+ for (const auto& blob : fTextBlobs) {
+ SkTextBlobPriv::Flatten(*blob, buffer);
+ }
+ }
+
+#if defined(SK_GANESH)
+ if (!textBlobsOnly) {
+ write_tag_size(buffer, SK_PICT_SLUG_BUFFER_TAG, fSlugs.size());
+ for (const auto& slug : fSlugs) {
+ slug->doFlatten(buffer);
+ }
+ }
+#endif
+
+ if (!textBlobsOnly) {
+ if (!fVertices.empty()) {
+ write_tag_size(buffer, SK_PICT_VERTICES_BUFFER_TAG, fVertices.size());
+ for (const auto& vert : fVertices) {
+ vert->priv().encode(buffer);
+ }
+ }
+
+ if (!fImages.empty()) {
+ write_tag_size(buffer, SK_PICT_IMAGE_BUFFER_TAG, fImages.size());
+ for (const auto& img : fImages) {
+ buffer.writeImage(img.get());
+ }
+ }
+ }
+}
+
+// SkPictureData::serialize() will write out paints, and then write out an array of typefaces
+// (unique set). However, paint's serializer will respect SerialProcs, which can cause us to
+// call that custom typefaceproc on *every* typeface, not just on the unique ones. To avoid this,
+// we ignore the custom proc (here) when we serialize the paints, and then do respect it when
+// we serialize the typefaces.
+static SkSerialProcs skip_typeface_proc(const SkSerialProcs& procs) {
+ SkSerialProcs newProcs = procs;
+ newProcs.fTypefaceProc = nullptr;
+ newProcs.fTypefaceCtx = nullptr;
+ return newProcs;
+}
+
+// topLevelTypeFaceSet is null only on the top level call.
+// This method is called recursively on every subpicture in two passes.
+// textBlobsOnly serves to indicate that we are on the first pass and skip as much work as
+// possible that is not relevant to collecting text blobs in topLevelTypeFaceSet
+// TODO(nifong): dedupe typefaces and all other shared resources in a faster and more readable way.
+void SkPictureData::serialize(SkWStream* stream, const SkSerialProcs& procs,
+ SkRefCntSet* topLevelTypeFaceSet, bool textBlobsOnly) const {
+ // This can happen at pretty much any time, so might as well do it first.
+ write_tag_size(stream, SK_PICT_READER_TAG, fOpData->size());
+ stream->write(fOpData->bytes(), fOpData->size());
+
+ // We serialize all typefaces into the typeface section of the top-level picture.
+ SkRefCntSet localTypefaceSet;
+ SkRefCntSet* typefaceSet = topLevelTypeFaceSet ? topLevelTypeFaceSet : &localTypefaceSet;
+
+ // We delay serializing the bulk of our data until after we've serialized
+ // factories and typefaces by first serializing to an in-memory write buffer.
+ SkFactorySet factSet; // buffer refs factSet, so factSet must come first.
+ SkBinaryWriteBuffer buffer;
+ buffer.setFactoryRecorder(sk_ref_sp(&factSet));
+ buffer.setSerialProcs(skip_typeface_proc(procs));
+ buffer.setTypefaceRecorder(sk_ref_sp(typefaceSet));
+ this->flattenToBuffer(buffer, textBlobsOnly);
+
+ // Pretend to serialize our sub-pictures for the side effect of filling typefaceSet
+ // with typefaces from sub-pictures.
+ struct DevNull: public SkWStream {
+ DevNull() : fBytesWritten(0) {}
+ size_t fBytesWritten;
+ bool write(const void*, size_t size) override { fBytesWritten += size; return true; }
+ size_t bytesWritten() const override { return fBytesWritten; }
+ } devnull;
+ for (const auto& pic : fPictures) {
+ pic->serialize(&devnull, nullptr, typefaceSet, /*textBlobsOnly=*/ true);
+ }
+ if (textBlobsOnly) { return; } // return early from fake serialize
+
+ // We need to write factories before we write the buffer.
+ // We need to write typefaces before we write the buffer or any sub-picture.
+ WriteFactories(stream, factSet);
+ // Pass the original typefaceproc (if any) now that we're ready to actually serialize the
+ // typefaces. We skipped this proc before, when we were serializing paints, so that the
+ // paints would just write indices into our typeface set.
+ WriteTypefaces(stream, *typefaceSet, procs);
+
+ // Write the buffer.
+ write_tag_size(stream, SK_PICT_BUFFER_SIZE_TAG, buffer.bytesWritten());
+ buffer.writeToStream(stream);
+
+ // Write sub-pictures by calling serialize again.
+ if (!fPictures.empty()) {
+ write_tag_size(stream, SK_PICT_PICTURE_TAG, fPictures.size());
+ for (const auto& pic : fPictures) {
+ pic->serialize(stream, &procs, typefaceSet, /*textBlobsOnly=*/ false);
+ }
+ }
+
+ stream->write32(SK_PICT_EOF_TAG);
+}
+
+void SkPictureData::flatten(SkWriteBuffer& buffer) const {
+ write_tag_size(buffer, SK_PICT_READER_TAG, fOpData->size());
+ buffer.writeByteArray(fOpData->bytes(), fOpData->size());
+
+ if (!fPictures.empty()) {
+ write_tag_size(buffer, SK_PICT_PICTURE_TAG, fPictures.size());
+ for (const auto& pic : fPictures) {
+ SkPicturePriv::Flatten(pic, buffer);
+ }
+ }
+
+ if (!fDrawables.empty()) {
+ write_tag_size(buffer, SK_PICT_DRAWABLE_TAG, fDrawables.size());
+ for (const auto& draw : fDrawables) {
+ buffer.writeFlattenable(draw.get());
+ }
+ }
+
+ // Write this picture playback's data into a writebuffer
+ this->flattenToBuffer(buffer, false);
+ buffer.write32(SK_PICT_EOF_TAG);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkPictureData::parseStreamTag(SkStream* stream,
+ uint32_t tag,
+ uint32_t size,
+ const SkDeserialProcs& procs,
+ SkTypefacePlayback* topLevelTFPlayback,
+ int recursionLimit) {
+ switch (tag) {
+ case SK_PICT_READER_TAG:
+ SkASSERT(nullptr == fOpData);
+ fOpData = SkData::MakeFromStream(stream, size);
+ if (!fOpData) {
+ return false;
+ }
+ break;
+ case SK_PICT_FACTORY_TAG: {
+ if (!stream->readU32(&size)) { return false; }
+ if (StreamRemainingLengthIsBelow(stream, size)) {
+ return false;
+ }
+ fFactoryPlayback = std::make_unique<SkFactoryPlayback>(size);
+ for (size_t i = 0; i < size; i++) {
+ SkString str;
+ size_t len;
+ if (!stream->readPackedUInt(&len)) { return false; }
+ if (StreamRemainingLengthIsBelow(stream, len)) {
+ return false;
+ }
+ str.resize(len);
+ if (stream->read(str.data(), len) != len) {
+ return false;
+ }
+ fFactoryPlayback->base()[i] = SkFlattenable::NameToFactory(str.c_str());
+ }
+ } break;
+ case SK_PICT_TYPEFACE_TAG: {
+ if (StreamRemainingLengthIsBelow(stream, size)) {
+ return false;
+ }
+ fTFPlayback.setCount(size);
+ for (uint32_t i = 0; i < size; ++i) {
+ if (stream->isAtEnd()) {
+ return false;
+ }
+ sk_sp<SkTypeface> tf;
+ if (procs.fTypefaceProc) {
+ tf = procs.fTypefaceProc(&stream, sizeof(stream), procs.fTypefaceCtx);
+ } else {
+ tf = SkTypeface::MakeDeserialize(stream);
+ }
+ if (!tf) { // failed to deserialize
+ // fTFPlayback asserts it never has a null, so we plop in
+ // the default here.
+ tf = SkTypeface::MakeDefault();
+ }
+ fTFPlayback[i] = std::move(tf);
+ }
+ } break;
+ case SK_PICT_PICTURE_TAG: {
+ SkASSERT(fPictures.empty());
+ if (StreamRemainingLengthIsBelow(stream, size)) {
+ return false;
+ }
+ fPictures.reserve_back(SkToInt(size));
+
+ for (uint32_t i = 0; i < size; i++) {
+ auto pic = SkPicture::MakeFromStreamPriv(stream, &procs,
+ topLevelTFPlayback, recursionLimit - 1);
+ if (!pic) {
+ return false;
+ }
+ fPictures.push_back(std::move(pic));
+ }
+ } break;
+ case SK_PICT_BUFFER_SIZE_TAG: {
+ if (StreamRemainingLengthIsBelow(stream, size)) {
+ return false;
+ }
+ SkAutoMalloc storage(size);
+ if (stream->read(storage.get(), size) != size) {
+ return false;
+ }
+
+ SkReadBuffer buffer(storage.get(), size);
+ buffer.setVersion(fInfo.getVersion());
+
+ if (!fFactoryPlayback) {
+ return false;
+ }
+ fFactoryPlayback->setupBuffer(buffer);
+ buffer.setDeserialProcs(procs);
+
+ if (fTFPlayback.count() > 0) {
+ // .skp files <= v43 have typefaces serialized with each sub picture.
+ fTFPlayback.setupBuffer(buffer);
+ } else {
+ // Newer .skp files serialize all typefaces with the top picture.
+ topLevelTFPlayback->setupBuffer(buffer);
+ }
+
+ while (!buffer.eof() && buffer.isValid()) {
+ tag = buffer.readUInt();
+ size = buffer.readUInt();
+ this->parseBufferTag(buffer, tag, size);
+ }
+ if (!buffer.isValid()) {
+ return false;
+ }
+ } break;
+ }
+ return true; // success
+}
+
+static sk_sp<SkImage> create_image_from_buffer(SkReadBuffer& buffer) {
+ return buffer.readImage();
+}
+
+static sk_sp<SkDrawable> create_drawable_from_buffer(SkReadBuffer& buffer) {
+ return sk_sp<SkDrawable>((SkDrawable*)buffer.readFlattenable(SkFlattenable::kSkDrawable_Type));
+}
+
+// We need two types 'cause SkDrawable is const-variant.
+template <typename T, typename U>
+bool new_array_from_buffer(SkReadBuffer& buffer, uint32_t inCount,
+ TArray<sk_sp<T>>& array, sk_sp<U> (*factory)(SkReadBuffer&)) {
+ if (!buffer.validate(array.empty() && SkTFitsIn<int>(inCount))) {
+ return false;
+ }
+ if (0 == inCount) {
+ return true;
+ }
+
+ for (uint32_t i = 0; i < inCount; ++i) {
+ auto obj = factory(buffer);
+
+ if (!buffer.validate(obj != nullptr)) {
+ array.clear();
+ return false;
+ }
+
+ array.push_back(std::move(obj));
+ }
+
+ return true;
+}
+
+void SkPictureData::parseBufferTag(SkReadBuffer& buffer, uint32_t tag, uint32_t size) {
+ switch (tag) {
+ case SK_PICT_PAINT_BUFFER_TAG: {
+ if (!buffer.validate(SkTFitsIn<int>(size))) {
+ return;
+ }
+ const int count = SkToInt(size);
+
+ for (int i = 0; i < count; ++i) {
+ fPaints.push_back(buffer.readPaint());
+ if (!buffer.isValid()) {
+ return;
+ }
+ }
+ } break;
+ case SK_PICT_PATH_BUFFER_TAG:
+ if (size > 0) {
+ const int count = buffer.readInt();
+ if (!buffer.validate(count >= 0)) {
+ return;
+ }
+ for (int i = 0; i < count; i++) {
+ buffer.readPath(&fPaths.push_back());
+ if (!buffer.isValid()) {
+ return;
+ }
+ }
+ } break;
+ case SK_PICT_TEXTBLOB_BUFFER_TAG:
+ new_array_from_buffer(buffer, size, fTextBlobs, SkTextBlobPriv::MakeFromBuffer);
+ break;
+ case SK_PICT_SLUG_BUFFER_TAG:
+#if defined(SK_GANESH)
+ new_array_from_buffer(buffer, size, fSlugs, sktext::gpu::Slug::MakeFromBuffer);
+#endif
+ break;
+ case SK_PICT_VERTICES_BUFFER_TAG:
+ new_array_from_buffer(buffer, size, fVertices, SkVerticesPriv::Decode);
+ break;
+ case SK_PICT_IMAGE_BUFFER_TAG:
+ new_array_from_buffer(buffer, size, fImages, create_image_from_buffer);
+ break;
+ case SK_PICT_READER_TAG: {
+ // Preflight check that we can initialize all data from the buffer
+ // before allocating it.
+ if (!buffer.validateCanReadN<uint8_t>(size)) {
+ return;
+ }
+ auto data(SkData::MakeUninitialized(size));
+ if (!buffer.readByteArray(data->writable_data(), size) ||
+ !buffer.validate(nullptr == fOpData)) {
+ return;
+ }
+ SkASSERT(nullptr == fOpData);
+ fOpData = std::move(data);
+ } break;
+ case SK_PICT_PICTURE_TAG:
+ new_array_from_buffer(buffer, size, fPictures, SkPicturePriv::MakeFromBuffer);
+ break;
+ case SK_PICT_DRAWABLE_TAG:
+ new_array_from_buffer(buffer, size, fDrawables, create_drawable_from_buffer);
+ break;
+ default:
+ buffer.validate(false); // The tag was invalid.
+ break;
+ }
+}
+
+SkPictureData* SkPictureData::CreateFromStream(SkStream* stream,
+ const SkPictInfo& info,
+ const SkDeserialProcs& procs,
+ SkTypefacePlayback* topLevelTFPlayback,
+ int recursionLimit) {
+ std::unique_ptr<SkPictureData> data(new SkPictureData(info));
+ if (!topLevelTFPlayback) {
+ topLevelTFPlayback = &data->fTFPlayback;
+ }
+
+ if (!data->parseStream(stream, procs, topLevelTFPlayback, recursionLimit)) {
+ return nullptr;
+ }
+ return data.release();
+}
+
+SkPictureData* SkPictureData::CreateFromBuffer(SkReadBuffer& buffer,
+ const SkPictInfo& info) {
+ std::unique_ptr<SkPictureData> data(new SkPictureData(info));
+ buffer.setVersion(info.getVersion());
+
+ if (!data->parseBuffer(buffer)) {
+ return nullptr;
+ }
+ return data.release();
+}
+
+bool SkPictureData::parseStream(SkStream* stream,
+ const SkDeserialProcs& procs,
+ SkTypefacePlayback* topLevelTFPlayback,
+ int recursionLimit) {
+ for (;;) {
+ uint32_t tag;
+ if (!stream->readU32(&tag)) { return false; }
+ if (SK_PICT_EOF_TAG == tag) {
+ break;
+ }
+
+ uint32_t size;
+ if (!stream->readU32(&size)) { return false; }
+ if (!this->parseStreamTag(stream, tag, size, procs, topLevelTFPlayback, recursionLimit)) {
+ return false; // we're invalid
+ }
+ }
+ return true;
+}
+
+bool SkPictureData::parseBuffer(SkReadBuffer& buffer) {
+ while (buffer.isValid()) {
+ uint32_t tag = buffer.readUInt();
+ if (SK_PICT_EOF_TAG == tag) {
+ break;
+ }
+ this->parseBufferTag(buffer, tag, buffer.readUInt());
+ }
+
+ // Check that we encountered required tags
+ if (!buffer.validate(this->opData() != nullptr)) {
+ // If we didn't build any opData, we are invalid. Even an EmptyPicture allocates the
+ // SkData for the ops (though its length may be zero).
+ return false;
+ }
+ return true;
+}
+
+const SkPaint* SkPictureData::optionalPaint(SkReadBuffer* reader) const {
+ int index = reader->readInt();
+ if (index == 0) {
+ return nullptr; // recorder wrote a zero for no paint (likely drawimage)
+ }
+ return reader->validate(index > 0 && index <= fPaints.size()) ?
+ &fPaints[index - 1] : nullptr;
+}
+
+const SkPaint& SkPictureData::requiredPaint(SkReadBuffer* reader) const {
+ const SkPaint* paint = this->optionalPaint(reader);
+ if (reader->validate(paint != nullptr)) {
+ return *paint;
+ }
+ static const SkPaint& stub = *(new SkPaint);
+ return stub;
+}
diff --git a/gfx/skia/skia/src/core/SkPictureData.h b/gfx/skia/skia/src/core/SkPictureData.h
new file mode 100644
index 0000000000..4a384d0832
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureData.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPictureData_DEFINED
+#define SkPictureData_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkData.h"
+#include "include/core/SkDrawable.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTextBlob.h"
+#include "include/core/SkTypes.h"
+#include "include/core/SkVertices.h"
+#include "include/private/base/SkTArray.h"
+#include "src/core/SkPictureFlat.h"
+#include "src/core/SkReadBuffer.h"
+
+#if defined(SK_GANESH)
+#include "include/private/chromium/Slug.h"
+#endif
+
+#include <cstdint>
+#include <memory>
+
+class SkFactorySet;
+class SkPictureRecord;
+class SkRefCntSet;
+class SkStream;
+class SkWStream;
+class SkWriteBuffer;
+struct SkDeserialProcs;
+struct SkSerialProcs;
+
+struct SkPictInfo {
+ SkPictInfo() : fVersion(~0U) {}
+
+ uint32_t getVersion() const {
+ SkASSERT(fVersion != ~0U);
+ return fVersion;
+ }
+
+ void setVersion(uint32_t version) {
+ SkASSERT(version != ~0U);
+ fVersion = version;
+ }
+
+public:
+ char fMagic[8];
+private:
+ uint32_t fVersion;
+public:
+ SkRect fCullRect;
+};
+
+#define SK_PICT_READER_TAG SkSetFourByteTag('r', 'e', 'a', 'd')
+#define SK_PICT_FACTORY_TAG SkSetFourByteTag('f', 'a', 'c', 't')
+#define SK_PICT_TYPEFACE_TAG SkSetFourByteTag('t', 'p', 'f', 'c')
+#define SK_PICT_PICTURE_TAG SkSetFourByteTag('p', 'c', 't', 'r')
+#define SK_PICT_DRAWABLE_TAG SkSetFourByteTag('d', 'r', 'a', 'w')
+
+// This tag specifies the size of the ReadBuffer, needed for the following tags
+#define SK_PICT_BUFFER_SIZE_TAG SkSetFourByteTag('a', 'r', 'a', 'y')
+// these are all inside the ARRAYS tag
+#define SK_PICT_PAINT_BUFFER_TAG SkSetFourByteTag('p', 'n', 't', ' ')
+#define SK_PICT_PATH_BUFFER_TAG SkSetFourByteTag('p', 't', 'h', ' ')
+#define SK_PICT_TEXTBLOB_BUFFER_TAG SkSetFourByteTag('b', 'l', 'o', 'b')
+#define SK_PICT_SLUG_BUFFER_TAG SkSetFourByteTag('s', 'l', 'u', 'g')
+#define SK_PICT_VERTICES_BUFFER_TAG SkSetFourByteTag('v', 'e', 'r', 't')
+#define SK_PICT_IMAGE_BUFFER_TAG SkSetFourByteTag('i', 'm', 'a', 'g')
+
+// Always write this last (with no length field afterwards)
+#define SK_PICT_EOF_TAG SkSetFourByteTag('e', 'o', 'f', ' ')
+
+template <typename T>
+T* read_index_base_1_or_null(SkReadBuffer* reader,
+ const skia_private::TArray<sk_sp<T>>& array) {
+ int index = reader->readInt();
+ return reader->validate(index > 0 && index <= array.size()) ? array[index - 1].get() : nullptr;
+}
+
+class SkPictureData {
+public:
+ SkPictureData(const SkPictureRecord& record, const SkPictInfo&);
+ // Does not affect ownership of SkStream.
+ static SkPictureData* CreateFromStream(SkStream*,
+ const SkPictInfo&,
+ const SkDeserialProcs&,
+ SkTypefacePlayback*,
+ int recursionLimit);
+ static SkPictureData* CreateFromBuffer(SkReadBuffer&, const SkPictInfo&);
+
+ void serialize(SkWStream*, const SkSerialProcs&, SkRefCntSet*, bool textBlobsOnly=false) const;
+ void flatten(SkWriteBuffer&) const;
+
+ const SkPictInfo& info() const { return fInfo; }
+
+ const sk_sp<SkData>& opData() const { return fOpData; }
+
+protected:
+ explicit SkPictureData(const SkPictInfo& info);
+
+ // Does not affect ownership of SkStream.
+ bool parseStream(SkStream*, const SkDeserialProcs&, SkTypefacePlayback*,
+ int recursionLimit);
+ bool parseBuffer(SkReadBuffer& buffer);
+
+public:
+ const SkImage* getImage(SkReadBuffer* reader) const {
+ // images are written base-0, unlike paths, pictures, drawables, etc.
+ const int index = reader->readInt();
+ return reader->validateIndex(index, fImages.size()) ? fImages[index].get() : nullptr;
+ }
+
+ const SkPath& getPath(SkReadBuffer* reader) const {
+ int index = reader->readInt();
+ return reader->validate(index > 0 && index <= fPaths.size()) ?
+ fPaths[index - 1] : fEmptyPath;
+ }
+
+ const SkPicture* getPicture(SkReadBuffer* reader) const {
+ return read_index_base_1_or_null(reader, fPictures);
+ }
+
+ SkDrawable* getDrawable(SkReadBuffer* reader) const {
+ return read_index_base_1_or_null(reader, fDrawables);
+ }
+
+ // Return a paint if one was used for this op, or nullptr if none was used.
+ const SkPaint* optionalPaint(SkReadBuffer* reader) const;
+
+ // Return the paint used for this op, invalidating the SkReadBuffer if there appears to be none.
+ // The returned paint is always safe to use.
+ const SkPaint& requiredPaint(SkReadBuffer* reader) const;
+
+ const SkTextBlob* getTextBlob(SkReadBuffer* reader) const {
+ return read_index_base_1_or_null(reader, fTextBlobs);
+ }
+
+#if defined(SK_GANESH)
+ const sktext::gpu::Slug* getSlug(SkReadBuffer* reader) const {
+ return read_index_base_1_or_null(reader, fSlugs);
+ }
+#endif
+
+ const SkVertices* getVertices(SkReadBuffer* reader) const {
+ return read_index_base_1_or_null(reader, fVertices);
+ }
+
+private:
+ // these help us with reading/writing
+ // Does not affect ownership of SkStream.
+ bool parseStreamTag(SkStream*, uint32_t tag, uint32_t size,
+ const SkDeserialProcs&, SkTypefacePlayback*,
+ int recursionLimit);
+ void parseBufferTag(SkReadBuffer&, uint32_t tag, uint32_t size);
+ void flattenToBuffer(SkWriteBuffer&, bool textBlobsOnly) const;
+
+ skia_private::TArray<SkPaint> fPaints;
+ skia_private::TArray<SkPath> fPaths;
+
+ sk_sp<SkData> fOpData; // opcodes and parameters
+
+ const SkPath fEmptyPath;
+ const SkBitmap fEmptyBitmap;
+
+ skia_private::TArray<sk_sp<const SkPicture>> fPictures;
+ skia_private::TArray<sk_sp<SkDrawable>> fDrawables;
+ skia_private::TArray<sk_sp<const SkTextBlob>> fTextBlobs;
+ skia_private::TArray<sk_sp<const SkVertices>> fVertices;
+ skia_private::TArray<sk_sp<const SkImage>> fImages;
+#if defined(SK_GANESH)
+ skia_private::TArray<sk_sp<const sktext::gpu::Slug>> fSlugs;
+#endif
+
+
+ SkTypefacePlayback fTFPlayback;
+ std::unique_ptr<SkFactoryPlayback> fFactoryPlayback;
+
+ const SkPictInfo fInfo;
+
+ static void WriteFactories(SkWStream* stream, const SkFactorySet& rec);
+ static void WriteTypefaces(SkWStream* stream, const SkRefCntSet& rec, const SkSerialProcs&);
+
+ void initForPlayback() const;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPictureFlat.cpp b/gfx/skia/skia/src/core/SkPictureFlat.cpp
new file mode 100644
index 0000000000..24c979e12d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureFlat.cpp
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/SkChecksum.h"
+#include "src/core/SkPictureFlat.h"
+
+#include <memory>
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkTypefacePlayback::setCount(size_t count) {
+ fCount = count;
+ fArray = std::make_unique<sk_sp<SkTypeface>[]>(count);
+}
diff --git a/gfx/skia/skia/src/core/SkPictureFlat.h b/gfx/skia/skia/src/core/SkPictureFlat.h
new file mode 100644
index 0000000000..36cb274bda
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureFlat.h
@@ -0,0 +1,223 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPictureFlat_DEFINED
+#define SkPictureFlat_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPicture.h"
+#include "include/private/SkChecksum.h"
+#include "src/core/SkPtrRecorder.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkTDynamicHash.h"
+#include "src/core/SkWriteBuffer.h"
+
+/*
+ * Note: While adding new DrawTypes, it is necessary to add to the end of this list
+ * and update LAST_DRAWTYPE_ENUM to avoid having the code read older skps wrong.
+ * (which can cause segfaults)
+ *
+ * Reordering can be done during version updates.
+ */
+enum DrawType {
+ UNUSED,
+ CLIP_PATH,
+ CLIP_REGION,
+ CLIP_RECT,
+ CLIP_RRECT,
+ CONCAT,
+ DRAW_BITMAP_RETIRED_2016_REMOVED_2018,
+ DRAW_BITMAP_MATRIX_RETIRED_2016_REMOVED_2018,
+ DRAW_BITMAP_NINE_RETIRED_2016_REMOVED_2018,
+ DRAW_BITMAP_RECT_RETIRED_2016_REMOVED_2018,
+ DRAW_CLEAR,
+ DRAW_DATA,
+ DRAW_OVAL,
+ DRAW_PAINT,
+ DRAW_PATH,
+ DRAW_PICTURE,
+ DRAW_POINTS,
+ DRAW_POS_TEXT_REMOVED_1_2019,
+ DRAW_POS_TEXT_TOP_BOTTOM_REMOVED_1_2019,
+ DRAW_POS_TEXT_H_REMOVED_1_2019,
+ DRAW_POS_TEXT_H_TOP_BOTTOM_REMOVED_1_2019,
+ DRAW_RECT,
+ DRAW_RRECT,
+ DRAW_SPRITE_RETIRED_2015_REMOVED_2018,
+ DRAW_TEXT_REMOVED_1_2019,
+ DRAW_TEXT_ON_PATH_RETIRED_08_2018_REMOVED_10_2018,
+ DRAW_TEXT_TOP_BOTTOM_REMOVED_1_2019,
+ DRAW_VERTICES_RETIRED_03_2017_REMOVED_01_2018,
+ RESTORE,
+ ROTATE,
+ SAVE,
+ SAVE_LAYER_SAVEFLAGS_DEPRECATED_2015_REMOVED_12_2020,
+ SCALE,
+ SET_MATRIX,
+ SKEW,
+ TRANSLATE,
+ NOOP,
+ BEGIN_COMMENT_GROUP_obsolete,
+ COMMENT_obsolete,
+ END_COMMENT_GROUP_obsolete,
+
+ // new ops -- feel free to re-alphabetize on next version bump
+ DRAW_DRRECT,
+ PUSH_CULL, // deprecated, M41 was last Chromium version to write this to an .skp
+ POP_CULL, // deprecated, M41 was last Chromium version to write this to an .skp
+
+ DRAW_PATCH, // could not add in aphabetical order
+ DRAW_PICTURE_MATRIX_PAINT,
+ DRAW_TEXT_BLOB,
+ DRAW_IMAGE,
+ DRAW_IMAGE_RECT_STRICT_obsolete,
+ DRAW_ATLAS,
+ DRAW_IMAGE_NINE,
+ DRAW_IMAGE_RECT,
+
+ SAVE_LAYER_SAVELAYERFLAGS_DEPRECATED_JAN_2016_REMOVED_01_2018,
+ SAVE_LAYER_SAVELAYERREC,
+
+ DRAW_ANNOTATION,
+ DRAW_DRAWABLE,
+ DRAW_DRAWABLE_MATRIX,
+ DRAW_TEXT_RSXFORM_DEPRECATED_DEC_2018,
+
+ TRANSLATE_Z, // deprecated (M60)
+
+ DRAW_SHADOW_REC,
+ DRAW_IMAGE_LATTICE,
+ DRAW_ARC,
+ DRAW_REGION,
+ DRAW_VERTICES_OBJECT,
+
+ FLUSH,
+
+ DRAW_EDGEAA_IMAGE_SET,
+
+ SAVE_BEHIND,
+
+ DRAW_EDGEAA_QUAD,
+
+ DRAW_BEHIND_PAINT,
+ CONCAT44,
+ CLIP_SHADER_IN_PAINT,
+ MARK_CTM, // deprecated
+ SET_M44,
+
+ DRAW_IMAGE2,
+ DRAW_IMAGE_RECT2,
+ DRAW_IMAGE_LATTICE2,
+ DRAW_EDGEAA_IMAGE_SET2,
+
+ RESET_CLIP,
+
+ DRAW_SLUG,
+
+ LAST_DRAWTYPE_ENUM = DRAW_SLUG,
+};
+
+enum DrawVertexFlags {
+ DRAW_VERTICES_HAS_TEXS = 0x01,
+ DRAW_VERTICES_HAS_COLORS = 0x02,
+ DRAW_VERTICES_HAS_INDICES = 0x04,
+ DRAW_VERTICES_HAS_XFER = 0x08,
+};
+
+enum DrawAtlasFlags {
+ DRAW_ATLAS_HAS_COLORS = 1 << 0,
+ DRAW_ATLAS_HAS_CULL = 1 << 1,
+ DRAW_ATLAS_HAS_SAMPLING = 1 << 2,
+};
+
+enum DrawTextRSXformFlags {
+ DRAW_TEXT_RSXFORM_HAS_CULL = 1 << 0,
+};
+
+enum SaveLayerRecFlatFlags {
+ SAVELAYERREC_HAS_BOUNDS = 1 << 0,
+ SAVELAYERREC_HAS_PAINT = 1 << 1,
+ SAVELAYERREC_HAS_BACKDROP = 1 << 2,
+ SAVELAYERREC_HAS_FLAGS = 1 << 3,
+ SAVELAYERREC_HAS_CLIPMASK_OBSOLETE = 1 << 4, // 6/13/2020
+ SAVELAYERREC_HAS_CLIPMATRIX_OBSOLETE = 1 << 5, // 6/13/2020
+ SAVELAYERREC_HAS_BACKDROP_SCALE = 1 << 6
+};
+
+enum SaveBehindFlatFlags {
+ SAVEBEHIND_HAS_SUBSET = 1 << 0,
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// clipparams are packed in 5 bits
+// doAA:1 | clipOp:4
+
+// Newly serialized pictures will only write kIntersect or kDifference.
+static inline uint32_t ClipParams_pack(SkClipOp op, bool doAA) {
+ unsigned doAABit = doAA ? 1 : 0;
+ return (doAABit << 4) | static_cast<int>(op);
+}
+
+// But old SKPs may have been serialized with the SK_SUPPORT_DEPRECATED_CLIPOP flag, so might
+// encounter expanding clip ops. Thus, this returns the clip op as the more general Region::Op.
+static inline SkRegion::Op ClipParams_unpackRegionOp(SkReadBuffer* buffer, uint32_t packed) {
+ uint32_t unpacked = packed & 0xF;
+ if (buffer->validate(unpacked <= SkRegion::kIntersect_Op ||
+ (unpacked <= SkRegion::kReplace_Op &&
+ buffer->isVersionLT(SkPicturePriv::kNoExpandingClipOps)))) {
+ return static_cast<SkRegion::Op>(unpacked);
+ }
+ return SkRegion::kIntersect_Op;
+}
+
+static inline bool ClipParams_unpackDoAA(uint32_t packed) {
+ return SkToBool((packed >> 4) & 1);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkTypefacePlayback {
+public:
+ SkTypefacePlayback() : fCount(0), fArray(nullptr) {}
+ ~SkTypefacePlayback() = default;
+
+ void setCount(size_t count);
+
+ size_t count() const { return fCount; }
+
+ sk_sp<SkTypeface>& operator[](size_t index) {
+ SkASSERT(index < fCount);
+ return fArray[index];
+ }
+
+ void setupBuffer(SkReadBuffer& buffer) const {
+ buffer.setTypefaceArray(fArray.get(), fCount);
+ }
+
+protected:
+ size_t fCount;
+ std::unique_ptr<sk_sp<SkTypeface>[]> fArray;
+};
+
+class SkFactoryPlayback {
+public:
+ SkFactoryPlayback(int count) : fCount(count) { fArray = new SkFlattenable::Factory[count]; }
+
+ ~SkFactoryPlayback() { delete[] fArray; }
+
+ SkFlattenable::Factory* base() const { return fArray; }
+
+ void setupBuffer(SkReadBuffer& buffer) const {
+ buffer.setFactoryPlayback(fArray, fCount);
+ }
+
+private:
+ int fCount;
+ SkFlattenable::Factory* fArray;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPictureImageGenerator.cpp b/gfx/skia/skia/src/core/SkPictureImageGenerator.cpp
new file mode 100644
index 0000000000..706812be3d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureImageGenerator.cpp
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkImageGenerator.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkSurface.h"
+#include "include/core/SkSurfaceProps.h"
+#include "src/base/SkTLazy.h"
+#include "src/image/SkImage_Base.h"
+
+#if defined(SK_GANESH)
+#include "src/gpu/ganesh/GrTextureProxy.h"
+#endif
+
+class SkPictureImageGenerator : public SkImageGenerator {
+public:
+ SkPictureImageGenerator(const SkImageInfo&, sk_sp<SkPicture>, const SkMatrix*,
+ const SkPaint*, const SkSurfaceProps&);
+
+protected:
+ bool onGetPixels(const SkImageInfo&, void* pixels, size_t rowBytes, const Options&) override;
+
+#if defined(SK_GANESH)
+ GrSurfaceProxyView onGenerateTexture(GrRecordingContext*, const SkImageInfo&,
+ GrMipmapped, GrImageTexGenPolicy) override;
+#endif
+
+#if defined(SK_GRAPHITE)
+ sk_sp<SkImage> onMakeTextureImage(skgpu::graphite::Recorder*,
+ const SkImageInfo&,
+ skgpu::Mipmapped) override;
+#endif
+
+private:
+ sk_sp<SkPicture> fPicture;
+ SkMatrix fMatrix;
+ SkTLazy<SkPaint> fPaint;
+ SkSurfaceProps fProps;
+
+ using INHERITED = SkImageGenerator;
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+std::unique_ptr<SkImageGenerator>
+SkImageGenerator::MakeFromPicture(const SkISize& size, sk_sp<SkPicture> picture,
+ const SkMatrix* matrix, const SkPaint* paint,
+ SkImage::BitDepth bitDepth, sk_sp<SkColorSpace> colorSpace) {
+ return SkImageGenerator::MakeFromPicture(size, picture, matrix, paint, bitDepth,
+ colorSpace, {});
+}
+
+std::unique_ptr<SkImageGenerator>
+SkImageGenerator::MakeFromPicture(const SkISize& size, sk_sp<SkPicture> picture,
+ const SkMatrix* matrix, const SkPaint* paint,
+ SkImage::BitDepth bitDepth, sk_sp<SkColorSpace> colorSpace,
+ SkSurfaceProps props) {
+ if (!picture || !colorSpace || size.isEmpty()) {
+ return nullptr;
+ }
+
+ SkColorType colorType = kN32_SkColorType;
+ if (SkImage::BitDepth::kF16 == bitDepth) {
+ colorType = kRGBA_F16_SkColorType;
+ }
+
+ SkImageInfo info =
+ SkImageInfo::Make(size, colorType, kPremul_SkAlphaType, std::move(colorSpace));
+ return std::unique_ptr<SkImageGenerator>(
+ new SkPictureImageGenerator(info, std::move(picture), matrix, paint, props));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkPictureImageGenerator::SkPictureImageGenerator(const SkImageInfo& info, sk_sp<SkPicture> picture,
+ const SkMatrix* matrix, const SkPaint* paint,
+ const SkSurfaceProps& props)
+ : SkImageGenerator(info)
+ , fPicture(std::move(picture))
+ , fProps(props) {
+
+ if (matrix) {
+ fMatrix = *matrix;
+ } else {
+ fMatrix.reset();
+ }
+
+ if (paint) {
+ fPaint.set(*paint);
+ }
+}
+
+bool SkPictureImageGenerator::onGetPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const Options& opts) {
+ std::unique_ptr<SkCanvas> canvas = SkCanvas::MakeRasterDirect(info, pixels, rowBytes, &fProps);
+ if (!canvas) {
+ return false;
+ }
+ canvas->clear(0);
+ canvas->drawPicture(fPicture, &fMatrix, fPaint.getMaybeNull());
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if defined(SK_GANESH)
+#include "include/gpu/GrRecordingContext.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/SkGr.h"
+
+GrSurfaceProxyView SkPictureImageGenerator::onGenerateTexture(GrRecordingContext* ctx,
+ const SkImageInfo& info,
+ GrMipmapped mipmapped,
+ GrImageTexGenPolicy texGenPolicy) {
+ SkASSERT(ctx);
+
+ skgpu::Budgeted budgeted = texGenPolicy == GrImageTexGenPolicy::kNew_Uncached_Unbudgeted
+ ? skgpu::Budgeted::kNo
+ : skgpu::Budgeted::kYes;
+ auto surface = SkSurface::MakeRenderTarget(ctx, budgeted, info, 0, kTopLeft_GrSurfaceOrigin,
+ &fProps, mipmapped == GrMipmapped::kYes);
+ if (!surface) {
+ return {};
+ }
+
+ surface->getCanvas()->clear(SkColors::kTransparent);
+ surface->getCanvas()->drawPicture(fPicture.get(), &fMatrix, fPaint.getMaybeNull());
+ sk_sp<SkImage> image(surface->makeImageSnapshot());
+ if (!image) {
+ return {};
+ }
+ auto [view, ct] = as_IB(image)->asView(ctx, mipmapped);
+ SkASSERT(view);
+ SkASSERT(mipmapped == GrMipmapped::kNo ||
+ view.asTextureProxy()->mipmapped() == GrMipmapped::kYes);
+ return view;
+}
+
+#endif // defined(SK_GANESH)
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/Log.h"
+
+sk_sp<SkImage> SkPictureImageGenerator::onMakeTextureImage(skgpu::graphite::Recorder* recorder,
+ const SkImageInfo& info,
+ skgpu::Mipmapped mipmapped) {
+ using namespace skgpu::graphite;
+
+ sk_sp<SkSurface> surface = SkSurface::MakeGraphite(recorder, info, mipmapped);
+ if (!surface) {
+ SKGPU_LOG_E("Failed to create Surface");
+ return nullptr;
+ }
+
+ surface->getCanvas()->clear(SkColors::kTransparent);
+ surface->getCanvas()->drawPicture(fPicture.get(), &fMatrix, fPaint.getMaybeNull());
+ return surface->asImage();
+}
+
+#endif // SK_GRAPHITE
diff --git a/gfx/skia/skia/src/core/SkPicturePlayback.cpp b/gfx/skia/skia/src/core/SkPicturePlayback.cpp
new file mode 100644
index 0000000000..6c4fe97948
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPicturePlayback.cpp
@@ -0,0 +1,739 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/core/SkPicturePlayback.h"
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkClipOp.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkRSXform.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkRegion.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkAlign.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkSafeMath.h"
+#include "src/core/SkCanvasPriv.h"
+#include "src/core/SkDrawShadowInfo.h"
+#include "src/core/SkPictureData.h"
+#include "src/core/SkPictureFlat.h"
+#include "src/core/SkPicturePriv.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkVerticesPriv.h"
+#include "src/utils/SkPatchUtils.h"
+
+#if defined(SK_GANESH)
+#include "include/private/chromium/Slug.h"
+#endif
+
+class SkDrawable;
+class SkPath;
+class SkTextBlob;
+class SkVertices;
+
+using namespace skia_private;
+
+static const SkRect* get_rect_ptr(SkReadBuffer* reader, SkRect* storage) {
+ if (reader->readBool()) {
+ reader->readRect(storage);
+ return storage;
+ } else {
+ return nullptr;
+ }
+}
+
+void SkPicturePlayback::draw(SkCanvas* canvas,
+ SkPicture::AbortCallback* callback,
+ SkReadBuffer* buffer) {
+ AutoResetOpID aroi(this);
+ SkASSERT(0 == fCurOffset);
+
+ SkReadBuffer reader(fPictureData->opData()->bytes(),
+ fPictureData->opData()->size());
+ reader.setVersion(fPictureData->info().getVersion());
+
+ // Record this, so we can concat w/ it if we encounter a setMatrix()
+ SkM44 initialMatrix = canvas->getLocalToDevice();
+
+ SkAutoCanvasRestore acr(canvas, false);
+
+ while (!reader.eof() && reader.isValid()) {
+ if (callback && callback->abort()) {
+ return;
+ }
+
+ fCurOffset = reader.offset();
+
+ uint32_t bits = reader.readInt();
+ uint32_t op = bits >> 24,
+ size = bits & 0xffffff;
+ if (size == 0xffffff) {
+ size = reader.readInt();
+ }
+
+ if (!reader.validate(size > 0 && op > UNUSED && op <= LAST_DRAWTYPE_ENUM)) {
+ return;
+ }
+
+ this->handleOp(&reader, (DrawType)op, size, canvas, initialMatrix);
+ }
+
+ // need to propagate invalid state to the parent reader
+ if (buffer) {
+ buffer->validate(reader.isValid());
+ }
+}
+
+static void validate_offsetToRestore(SkReadBuffer* reader, size_t offsetToRestore) {
+ if (offsetToRestore) {
+ reader->validate(SkIsAlign4(offsetToRestore) && offsetToRestore >= reader->offset());
+ }
+}
+
+static bool do_clip_op(SkReadBuffer* reader, SkCanvas* canvas, SkRegion::Op op,
+ SkClipOp* clipOpToUse) {
+ switch(op) {
+ case SkRegion::kDifference_Op:
+ case SkRegion::kIntersect_Op:
+ // Fully supported, identity mapping between SkClipOp and Region::Op
+ *clipOpToUse = static_cast<SkClipOp>(op);
+ return true;
+ case SkRegion::kReplace_Op:
+ // Emulate the replace by resetting first and following it up with an intersect
+ SkASSERT(reader->isVersionLT(SkPicturePriv::kNoExpandingClipOps));
+ SkCanvasPriv::ResetClip(canvas);
+ *clipOpToUse = SkClipOp::kIntersect;
+ return true;
+ default:
+ // An expanding clip op, which if encountered on an old SKP, we just silently ignore
+ SkASSERT(reader->isVersionLT(SkPicturePriv::kNoExpandingClipOps));
+ return false;
+ }
+}
+
+void SkPicturePlayback::handleOp(SkReadBuffer* reader,
+ DrawType op,
+ uint32_t size,
+ SkCanvas* canvas,
+ const SkM44& initialMatrix) {
+#define BREAK_ON_READ_ERROR(r) if (!r->isValid()) break
+
+ switch (op) {
+ case NOOP: {
+ SkASSERT(size >= 4);
+ reader->skip(size - 4);
+ } break;
+ case FLUSH:
+ canvas->flush();
+ break;
+ case CLIP_PATH: {
+ const SkPath& path = fPictureData->getPath(reader);
+ uint32_t packed = reader->readInt();
+ SkRegion::Op rgnOp = ClipParams_unpackRegionOp(reader, packed);
+ bool doAA = ClipParams_unpackDoAA(packed);
+ size_t offsetToRestore = reader->readInt();
+ validate_offsetToRestore(reader, offsetToRestore);
+ BREAK_ON_READ_ERROR(reader);
+
+ SkClipOp clipOp;
+ if (do_clip_op(reader, canvas, rgnOp, &clipOp)) {
+ canvas->clipPath(path, clipOp, doAA);
+ }
+ if (canvas->isClipEmpty() && offsetToRestore) {
+ reader->skip(offsetToRestore - reader->offset());
+ }
+ } break;
+ case CLIP_REGION: {
+ SkRegion region;
+ reader->readRegion(&region);
+ uint32_t packed = reader->readInt();
+ SkRegion::Op rgnOp = ClipParams_unpackRegionOp(reader, packed);
+ size_t offsetToRestore = reader->readInt();
+ validate_offsetToRestore(reader, offsetToRestore);
+ BREAK_ON_READ_ERROR(reader);
+
+ SkClipOp clipOp;
+ if (do_clip_op(reader, canvas, rgnOp, &clipOp)) {
+ canvas->clipRegion(region, clipOp);
+ }
+ if (canvas->isClipEmpty() && offsetToRestore) {
+ reader->skip(offsetToRestore - reader->offset());
+ }
+ } break;
+ case CLIP_RECT: {
+ SkRect rect;
+ reader->readRect(&rect);
+ uint32_t packed = reader->readInt();
+ SkRegion::Op rgnOp = ClipParams_unpackRegionOp(reader, packed);
+ bool doAA = ClipParams_unpackDoAA(packed);
+ size_t offsetToRestore = reader->readInt();
+ validate_offsetToRestore(reader, offsetToRestore);
+ BREAK_ON_READ_ERROR(reader);
+
+ SkClipOp clipOp;
+ if (do_clip_op(reader, canvas, rgnOp, &clipOp)) {
+ canvas->clipRect(rect, clipOp, doAA);
+ }
+ if (canvas->isClipEmpty() && offsetToRestore) {
+ reader->skip(offsetToRestore - reader->offset());
+ }
+ } break;
+ case CLIP_RRECT: {
+ SkRRect rrect;
+ reader->readRRect(&rrect);
+ uint32_t packed = reader->readInt();
+ SkRegion::Op rgnOp = ClipParams_unpackRegionOp(reader, packed);
+ bool doAA = ClipParams_unpackDoAA(packed);
+ size_t offsetToRestore = reader->readInt();
+ validate_offsetToRestore(reader, offsetToRestore);
+ BREAK_ON_READ_ERROR(reader);
+
+ SkClipOp clipOp;
+ if (do_clip_op(reader, canvas, rgnOp, &clipOp)) {
+ canvas->clipRRect(rrect, clipOp, doAA);
+ }
+ if (canvas->isClipEmpty() && offsetToRestore) {
+ reader->skip(offsetToRestore - reader->offset());
+ }
+ } break;
+ case CLIP_SHADER_IN_PAINT: {
+ const SkPaint& paint = fPictureData->requiredPaint(reader);
+ // clipShader() was never used in conjunction with deprecated, expanding clip ops, so
+ // it requires the op to just be intersect or difference.
+ SkClipOp clipOp = reader->checkRange(SkClipOp::kDifference, SkClipOp::kIntersect);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->clipShader(paint.refShader(), clipOp);
+ } break;
+ case RESET_CLIP:
+ // For Android, an emulated "replace" clip op appears as a manual reset followed by
+ // an intersect operation (equivalent to the above handling of replace ops encountered
+ // in old serialized pictures).
+ SkCanvasPriv::ResetClip(canvas);
+ break;
+ case PUSH_CULL: break; // Deprecated, safe to ignore both push and pop.
+ case POP_CULL: break;
+ case CONCAT: {
+ SkMatrix matrix;
+ reader->readMatrix(&matrix);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->concat(matrix);
+ break;
+ }
+ case CONCAT44: {
+ const SkScalar* colMaj = reader->skipT<SkScalar>(16);
+ BREAK_ON_READ_ERROR(reader);
+ canvas->concat(SkM44::ColMajor(colMaj));
+ break;
+ }
+ case DRAW_ANNOTATION: {
+ SkRect rect;
+ reader->readRect(&rect);
+ SkString key;
+ reader->readString(&key);
+ sk_sp<SkData> data = reader->readByteArrayAsData();
+ BREAK_ON_READ_ERROR(reader);
+ SkASSERT(data);
+
+ canvas->drawAnnotation(rect, key.c_str(), data.get());
+ } break;
+ case DRAW_ARC: {
+ const SkPaint& paint = fPictureData->requiredPaint(reader);
+ SkRect rect;
+ reader->readRect(&rect);
+ SkScalar startAngle = reader->readScalar();
+ SkScalar sweepAngle = reader->readScalar();
+ int useCenter = reader->readInt();
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawArc(rect, startAngle, sweepAngle, SkToBool(useCenter), paint);
+ } break;
+ case DRAW_ATLAS: {
+ const SkPaint* paint = fPictureData->optionalPaint(reader);
+ const SkImage* atlas = fPictureData->getImage(reader);
+ const uint32_t flags = reader->readUInt();
+ const int count = reader->readUInt();
+ const SkRSXform* xform = (const SkRSXform*)reader->skip(count, sizeof(SkRSXform));
+ const SkRect* tex = (const SkRect*)reader->skip(count, sizeof(SkRect));
+ const SkColor* colors = nullptr;
+ SkBlendMode mode = SkBlendMode::kDst;
+ if (flags & DRAW_ATLAS_HAS_COLORS) {
+ colors = (const SkColor*)reader->skip(count, sizeof(SkColor));
+ mode = reader->read32LE(SkBlendMode::kLastMode);
+ BREAK_ON_READ_ERROR(reader);
+ }
+ const SkRect* cull = nullptr;
+ if (flags & DRAW_ATLAS_HAS_CULL) {
+ cull = (const SkRect*)reader->skip(sizeof(SkRect));
+ }
+ BREAK_ON_READ_ERROR(reader);
+
+ SkSamplingOptions sampling;
+ if (flags & DRAW_ATLAS_HAS_SAMPLING) {
+ sampling = reader->readSampling();
+ BREAK_ON_READ_ERROR(reader);
+ }
+ canvas->drawAtlas(atlas, xform, tex, colors, count, mode, sampling, cull, paint);
+ } break;
+ case DRAW_CLEAR: {
+ auto c = reader->readInt();
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->clear(c);
+ } break;
+ case DRAW_DATA: {
+ // This opcode is now dead, just need to skip it for backwards compatibility
+ size_t length = reader->readInt();
+ (void)reader->skip(length);
+ // skip handles padding the read out to a multiple of 4
+ } break;
+ case DRAW_DRAWABLE: {
+ auto* d = fPictureData->getDrawable(reader);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawDrawable(d);
+ } break;
+ case DRAW_DRAWABLE_MATRIX: {
+ SkMatrix matrix;
+ reader->readMatrix(&matrix);
+ SkDrawable* drawable = fPictureData->getDrawable(reader);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawDrawable(drawable, &matrix);
+ } break;
+ case DRAW_DRRECT: {
+ const SkPaint& paint = fPictureData->requiredPaint(reader);
+ SkRRect outer, inner;
+ reader->readRRect(&outer);
+ reader->readRRect(&inner);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawDRRect(outer, inner, paint);
+ } break;
+ case DRAW_EDGEAA_QUAD: {
+ SkRect rect;
+ reader->readRect(&rect);
+ SkCanvas::QuadAAFlags aaFlags = static_cast<SkCanvas::QuadAAFlags>(reader->read32());
+ SkColor4f color;
+ reader->readColor4f(&color);
+ SkBlendMode blend = reader->read32LE(SkBlendMode::kLastMode);
+ BREAK_ON_READ_ERROR(reader);
+ bool hasClip = reader->readInt();
+ SkPoint* clip = nullptr;
+ if (hasClip) {
+ clip = (SkPoint*) reader->skip(4, sizeof(SkPoint));
+ }
+ BREAK_ON_READ_ERROR(reader);
+ canvas->experimental_DrawEdgeAAQuad(rect, clip, aaFlags, color, blend);
+ } break;
+ case DRAW_EDGEAA_IMAGE_SET:
+ case DRAW_EDGEAA_IMAGE_SET2: {
+ static const size_t kEntryReadSize =
+ 4 * sizeof(uint32_t) + 2 * sizeof(SkRect) + sizeof(SkScalar);
+ static const size_t kMatrixSize = 9 * sizeof(SkScalar); // != sizeof(SkMatrix)
+
+ int cnt = reader->readInt();
+ if (!reader->validate(cnt >= 0)) {
+ break;
+ }
+ const SkPaint* paint = fPictureData->optionalPaint(reader);
+
+ SkSamplingOptions sampling;
+ if (op == DRAW_EDGEAA_IMAGE_SET2) {
+ sampling = reader->readSampling();
+ } else {
+ sampling = SkSamplingOptions(SkFilterMode::kNearest);
+ }
+
+ SkCanvas::SrcRectConstraint constraint =
+ reader->checkRange(SkCanvas::kStrict_SrcRectConstraint,
+ SkCanvas::kFast_SrcRectConstraint);
+
+ if (!reader->validate(SkSafeMath::Mul(cnt, kEntryReadSize) <= reader->available())) {
+ break;
+ }
+
+ // Track minimum necessary clip points and matrices that must be provided to satisfy
+ // the entries.
+ int expectedClips = 0;
+ int maxMatrixIndex = -1;
+ AutoTArray<SkCanvas::ImageSetEntry> set(cnt);
+ for (int i = 0; i < cnt && reader->isValid(); ++i) {
+ set[i].fImage = sk_ref_sp(fPictureData->getImage(reader));
+ reader->readRect(&set[i].fSrcRect);
+ reader->readRect(&set[i].fDstRect);
+ set[i].fMatrixIndex = reader->readInt();
+ set[i].fAlpha = reader->readScalar();
+ set[i].fAAFlags = reader->readUInt();
+ set[i].fHasClip = reader->readInt();
+
+ expectedClips += set[i].fHasClip ? 1 : 0;
+ if (set[i].fMatrixIndex > maxMatrixIndex) {
+ maxMatrixIndex = set[i].fMatrixIndex;
+ }
+ }
+
+ int dstClipCount = reader->readInt();
+ SkPoint* dstClips = nullptr;
+ if (!reader->validate(dstClipCount >= 0) ||
+ !reader->validate(expectedClips <= dstClipCount)) {
+ // A bad dstClipCount (either negative, or not enough to satisfy entries).
+ break;
+ } else if (dstClipCount > 0) {
+ dstClips = (SkPoint*) reader->skip(dstClipCount, sizeof(SkPoint));
+ if (dstClips == nullptr) {
+ // Not enough bytes remaining so the reader has been invalidated
+ break;
+ }
+ }
+ int matrixCount = reader->readInt();
+ if (!reader->validate(matrixCount >= 0) ||
+ !reader->validate(maxMatrixIndex <= (matrixCount - 1)) ||
+ !reader->validate(
+ SkSafeMath::Mul(matrixCount, kMatrixSize) <= reader->available())) {
+ // Entries access out-of-bound matrix indices, given provided matrices or
+ // there aren't enough bytes to provide that many matrices
+ break;
+ }
+ TArray<SkMatrix> matrices(matrixCount);
+ for (int i = 0; i < matrixCount && reader->isValid(); ++i) {
+ reader->readMatrix(&matrices.push_back());
+ }
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->experimental_DrawEdgeAAImageSet(set.get(), cnt, dstClips, matrices.begin(),
+ sampling, paint, constraint);
+ } break;
+ case DRAW_IMAGE: {
+ const SkPaint* paint = fPictureData->optionalPaint(reader);
+ const SkImage* image = fPictureData->getImage(reader);
+ SkPoint loc;
+ reader->readPoint(&loc);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawImage(image, loc.fX, loc.fY,
+ SkSamplingOptions(SkFilterMode::kNearest),
+ paint);
+ } break;
+ case DRAW_IMAGE2: {
+ const SkPaint* paint = fPictureData->optionalPaint(reader);
+ const SkImage* image = fPictureData->getImage(reader);
+ SkPoint loc;
+ reader->readPoint(&loc);
+ SkSamplingOptions sampling = reader->readSampling();
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawImage(image, loc.fX, loc.fY, sampling, paint);
+ } break;
+ case DRAW_IMAGE_LATTICE: {
+ const SkPaint* paint = fPictureData->optionalPaint(reader);
+ const SkImage* image = fPictureData->getImage(reader);
+ SkCanvas::Lattice lattice;
+ (void)SkCanvasPriv::ReadLattice(*reader, &lattice);
+ const SkRect* dst = reader->skipT<SkRect>();
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawImageLattice(image, lattice, *dst, SkFilterMode::kNearest, paint);
+ } break;
+ case DRAW_IMAGE_LATTICE2: {
+ const SkPaint* paint = fPictureData->optionalPaint(reader);
+ const SkImage* image = fPictureData->getImage(reader);
+ SkCanvas::Lattice lattice;
+ (void)SkCanvasPriv::ReadLattice(*reader, &lattice);
+ const SkRect* dst = reader->skipT<SkRect>();
+ SkFilterMode filter = reader->read32LE(SkFilterMode::kLinear);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawImageLattice(image, lattice, *dst, filter, paint);
+ } break;
+ case DRAW_IMAGE_NINE: {
+ const SkPaint* paint = fPictureData->optionalPaint(reader);
+ const SkImage* image = fPictureData->getImage(reader);
+ SkIRect center;
+ reader->readIRect(&center);
+ SkRect dst;
+ reader->readRect(&dst);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawImageNine(image, center, dst, SkFilterMode::kNearest, paint);
+ } break;
+ case DRAW_IMAGE_RECT: {
+ const SkPaint* paint = fPictureData->optionalPaint(reader);
+ const SkImage* image = fPictureData->getImage(reader);
+ SkRect storage;
+ const SkRect* src = get_rect_ptr(reader, &storage); // may be null
+ SkRect dst;
+ reader->readRect(&dst); // required
+ // DRAW_IMAGE_RECT_STRICT assumes this constraint, and doesn't store it
+ SkCanvas::SrcRectConstraint constraint = SkCanvas::kStrict_SrcRectConstraint;
+ if (DRAW_IMAGE_RECT == op) {
+ // newer op-code stores the constraint explicitly
+ constraint = reader->checkRange(SkCanvas::kStrict_SrcRectConstraint,
+ SkCanvas::kFast_SrcRectConstraint);
+ }
+ BREAK_ON_READ_ERROR(reader);
+
+ auto sampling = SkSamplingOptions(SkFilterMode::kNearest);
+ if (src) {
+ canvas->drawImageRect(image, *src, dst, sampling, paint, constraint);
+ } else {
+ canvas->drawImageRect(image, dst, sampling, paint);
+ }
+ } break;
+ case DRAW_IMAGE_RECT2: {
+ const SkPaint* paint = fPictureData->optionalPaint(reader);
+ const SkImage* image = fPictureData->getImage(reader);
+ SkRect src = reader->readRect();
+ SkRect dst = reader->readRect();
+ SkSamplingOptions sampling = reader->readSampling();
+ auto constraint = reader->read32LE(SkCanvas::kFast_SrcRectConstraint);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawImageRect(image, src, dst, sampling, paint, constraint);
+ } break;
+ case DRAW_OVAL: {
+ const SkPaint& paint = fPictureData->requiredPaint(reader);
+ SkRect rect;
+ reader->readRect(&rect);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawOval(rect, paint);
+ } break;
+ case DRAW_PAINT: {
+ const SkPaint& paint = fPictureData->requiredPaint(reader);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawPaint(paint);
+ } break;
+ case DRAW_BEHIND_PAINT: {
+ const SkPaint& paint = fPictureData->requiredPaint(reader);
+ BREAK_ON_READ_ERROR(reader);
+
+ SkCanvasPriv::DrawBehind(canvas, paint);
+ } break;
+ case DRAW_PATCH: {
+ const SkPaint& paint = fPictureData->requiredPaint(reader);
+
+ const SkPoint* cubics = (const SkPoint*)reader->skip(SkPatchUtils::kNumCtrlPts,
+ sizeof(SkPoint));
+ uint32_t flag = reader->readInt();
+ const SkColor* colors = nullptr;
+ if (flag & DRAW_VERTICES_HAS_COLORS) {
+ colors = (const SkColor*)reader->skip(SkPatchUtils::kNumCorners, sizeof(SkColor));
+ }
+ const SkPoint* texCoords = nullptr;
+ if (flag & DRAW_VERTICES_HAS_TEXS) {
+ texCoords = (const SkPoint*)reader->skip(SkPatchUtils::kNumCorners,
+ sizeof(SkPoint));
+ }
+ SkBlendMode bmode = SkBlendMode::kModulate;
+ if (flag & DRAW_VERTICES_HAS_XFER) {
+ unsigned mode = reader->readInt();
+ if (mode <= (unsigned)SkBlendMode::kLastMode) {
+ bmode = (SkBlendMode)mode;
+ }
+ }
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawPatch(cubics, colors, texCoords, bmode, paint);
+ } break;
+ case DRAW_PATH: {
+ const SkPaint& paint = fPictureData->requiredPaint(reader);
+ const auto& path = fPictureData->getPath(reader);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawPath(path, paint);
+ } break;
+ case DRAW_PICTURE: {
+ const auto* pic = fPictureData->getPicture(reader);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawPicture(pic);
+ } break;
+ case DRAW_PICTURE_MATRIX_PAINT: {
+ const SkPaint* paint = fPictureData->optionalPaint(reader);
+ SkMatrix matrix;
+ reader->readMatrix(&matrix);
+ const SkPicture* pic = fPictureData->getPicture(reader);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawPicture(pic, &matrix, paint);
+ } break;
+ case DRAW_POINTS: {
+ const SkPaint& paint = fPictureData->requiredPaint(reader);
+ SkCanvas::PointMode mode = reader->checkRange(SkCanvas::kPoints_PointMode,
+ SkCanvas::kPolygon_PointMode);
+ size_t count = reader->readInt();
+ const SkPoint* pts = (const SkPoint*)reader->skip(count, sizeof(SkPoint));
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawPoints(mode, count, pts, paint);
+ } break;
+ case DRAW_RECT: {
+ const SkPaint& paint = fPictureData->requiredPaint(reader);
+ SkRect rect;
+ reader->readRect(&rect);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawRect(rect, paint);
+ } break;
+ case DRAW_REGION: {
+ const SkPaint& paint = fPictureData->requiredPaint(reader);
+ SkRegion region;
+ reader->readRegion(&region);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawRegion(region, paint);
+ } break;
+ case DRAW_RRECT: {
+ const SkPaint& paint = fPictureData->requiredPaint(reader);
+ SkRRect rrect;
+ reader->readRRect(&rrect);
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawRRect(rrect, paint);
+ } break;
+ case DRAW_SHADOW_REC: {
+ const auto& path = fPictureData->getPath(reader);
+ SkDrawShadowRec rec;
+ reader->readPoint3(&rec.fZPlaneParams);
+ reader->readPoint3(&rec.fLightPos);
+ rec.fLightRadius = reader->readScalar();
+ rec.fAmbientColor = reader->read32();
+ rec.fSpotColor = reader->read32();
+ rec.fFlags = reader->read32();
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->private_draw_shadow_rec(path, rec);
+ } break;
+ case DRAW_TEXT_BLOB: {
+ const SkPaint& paint = fPictureData->requiredPaint(reader);
+ const SkTextBlob* blob = fPictureData->getTextBlob(reader);
+ SkScalar x = reader->readScalar();
+ SkScalar y = reader->readScalar();
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->drawTextBlob(blob, x, y, paint);
+ } break;
+ case DRAW_SLUG: {
+#if defined(SK_GANESH)
+ const sktext::gpu::Slug* slug = fPictureData->getSlug(reader);
+ BREAK_ON_READ_ERROR(reader);
+
+ slug->draw(canvas);
+#endif
+ } break;
+ case DRAW_VERTICES_OBJECT: {
+ const SkPaint& paint = fPictureData->requiredPaint(reader);
+ const SkVertices* vertices = fPictureData->getVertices(reader);
+ const int boneCount = reader->readInt();
+ (void)reader->skip(boneCount, sizeof(SkVertices_DeprecatedBone));
+ SkBlendMode bmode = reader->read32LE(SkBlendMode::kLastMode);
+ BREAK_ON_READ_ERROR(reader);
+
+ if (vertices) { // TODO: read error if vertices == null?
+ canvas->drawVertices(vertices, bmode, paint);
+ }
+ } break;
+ case RESTORE:
+ canvas->restore();
+ break;
+ case ROTATE: {
+ auto deg = reader->readScalar();
+ canvas->rotate(deg);
+ } break;
+ case SAVE:
+ canvas->save();
+ break;
+ case SAVE_BEHIND: {
+ uint32_t flags = reader->readInt();
+ const SkRect* subset = nullptr;
+ SkRect storage;
+ if (flags & SAVEBEHIND_HAS_SUBSET) {
+ reader->readRect(&storage);
+ subset = &storage;
+ }
+ SkCanvasPriv::SaveBehind(canvas, subset);
+ } break;
+ case SAVE_LAYER_SAVELAYERREC: {
+ SkCanvas::SaveLayerRec rec(nullptr, nullptr, nullptr, 0);
+ const uint32_t flatFlags = reader->readInt();
+ SkRect bounds;
+ if (flatFlags & SAVELAYERREC_HAS_BOUNDS) {
+ reader->readRect(&bounds);
+ rec.fBounds = &bounds;
+ }
+ if (flatFlags & SAVELAYERREC_HAS_PAINT) {
+ rec.fPaint = &fPictureData->requiredPaint(reader);
+ }
+ if (flatFlags & SAVELAYERREC_HAS_BACKDROP) {
+ const SkPaint& paint = fPictureData->requiredPaint(reader);
+ rec.fBackdrop = paint.getImageFilter();
+ }
+ if (flatFlags & SAVELAYERREC_HAS_FLAGS) {
+ rec.fSaveLayerFlags = reader->readInt();
+ }
+ if (flatFlags & SAVELAYERREC_HAS_CLIPMASK_OBSOLETE) {
+ (void)fPictureData->getImage(reader);
+ }
+ if (flatFlags & SAVELAYERREC_HAS_CLIPMATRIX_OBSOLETE) {
+ SkMatrix clipMatrix_ignored;
+ reader->readMatrix(&clipMatrix_ignored);
+ }
+ if (!reader->isVersionLT(SkPicturePriv::Version::kBackdropScaleFactor) &&
+ (flatFlags & SAVELAYERREC_HAS_BACKDROP_SCALE)) {
+ SkCanvasPriv::SetBackdropScaleFactor(&rec, reader->readScalar());
+ }
+ BREAK_ON_READ_ERROR(reader);
+
+ canvas->saveLayer(rec);
+ } break;
+ case SCALE: {
+ SkScalar sx = reader->readScalar();
+ SkScalar sy = reader->readScalar();
+ canvas->scale(sx, sy);
+ } break;
+ case SET_M44: {
+ SkM44 m;
+ reader->read(&m);
+ canvas->setMatrix(initialMatrix * m);
+ } break;
+ case SET_MATRIX: {
+ SkMatrix matrix;
+ reader->readMatrix(&matrix);
+ canvas->setMatrix(initialMatrix * SkM44(matrix));
+ } break;
+ case SKEW: {
+ SkScalar sx = reader->readScalar();
+ SkScalar sy = reader->readScalar();
+ canvas->skew(sx, sy);
+ } break;
+ case TRANSLATE: {
+ SkScalar dx = reader->readScalar();
+ SkScalar dy = reader->readScalar();
+ canvas->translate(dx, dy);
+ } break;
+ default:
+ reader->validate(false); // unknown op
+ break;
+ }
+
+#undef BREAK_ON_READ_ERROR
+}
diff --git a/gfx/skia/skia/src/core/SkPicturePlayback.h b/gfx/skia/skia/src/core/SkPicturePlayback.h
new file mode 100644
index 0000000000..fdad38e92d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPicturePlayback.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPicturePlayback_DEFINED
+#define SkPicturePlayback_DEFINED
+
+#include "include/core/SkM44.h"
+#include "include/core/SkPicture.h"
+#include "include/private/base/SkNoncopyable.h"
+#include "src/core/SkPictureFlat.h"
+
+#include <cstddef>
+#include <cstdint>
+
+class SkCanvas;
+class SkPictureData;
+class SkReadBuffer;
+
+// The basic picture playback class replays the provided picture into a canvas.
+class SkPicturePlayback final : SkNoncopyable {
+public:
+ SkPicturePlayback(const SkPictureData* data)
+ : fPictureData(data)
+ , fCurOffset(0) {
+ }
+
+ void draw(SkCanvas* canvas, SkPicture::AbortCallback*, SkReadBuffer* buffer);
+
+ // TODO: remove the curOp calls after cleaning up GrGatherDevice
+ // Return the ID of the operation currently being executed when playing
+ // back. 0 indicates no call is active.
+ size_t curOpID() const { return fCurOffset; }
+ void resetOpID() { fCurOffset = 0; }
+
+private:
+ const SkPictureData* fPictureData;
+
+ // The offset of the current operation when within the draw method
+ size_t fCurOffset;
+
+ void handleOp(SkReadBuffer* reader,
+ DrawType op,
+ uint32_t size,
+ SkCanvas* canvas,
+ const SkM44& initialMatrix);
+
+ class AutoResetOpID {
+ public:
+ AutoResetOpID(SkPicturePlayback* playback) : fPlayback(playback) { }
+ ~AutoResetOpID() {
+ if (fPlayback) {
+ fPlayback->resetOpID();
+ }
+ }
+
+ private:
+ SkPicturePlayback* fPlayback;
+ };
+
+ using INHERITED = SkNoncopyable;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPicturePriv.h b/gfx/skia/skia/src/core/SkPicturePriv.h
new file mode 100644
index 0000000000..e1a0b10e98
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPicturePriv.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPicturePriv_DEFINED
+#define SkPicturePriv_DEFINED
+
+#include "include/core/SkPicture.h"
+
+class SkReadBuffer;
+class SkWriteBuffer;
+class SkStream;
+struct SkPictInfo;
+
+class SkPicturePriv {
+public:
+ /**
+ * Recreate a picture that was serialized into a buffer. If the creation requires bitmap
+ * decoding, the decoder must be set on the SkReadBuffer parameter by calling
+ * SkReadBuffer::setBitmapDecoder() before calling SkPicture::MakeFromBuffer().
+ * @param buffer Serialized picture data.
+ * @return A new SkPicture representing the serialized data, or NULL if the buffer is
+ * invalid.
+ */
+ static sk_sp<SkPicture> MakeFromBuffer(SkReadBuffer& buffer);
+
+ /**
+ * Serialize to a buffer.
+ */
+ static void Flatten(const sk_sp<const SkPicture> , SkWriteBuffer& buffer);
+
+ // Returns NULL if this is not an SkBigPicture.
+ static const SkBigPicture* AsSkBigPicture(const sk_sp<const SkPicture> picture) {
+ return picture->asSkBigPicture();
+ }
+
+ static uint64_t MakeSharedID(uint32_t pictureID) {
+ uint64_t sharedID = SkSetFourByteTag('p', 'i', 'c', 't');
+ return (sharedID << 32) | pictureID;
+ }
+
+ static void AddedToCache(const SkPicture* pic) {
+ pic->fAddedToCache.store(true);
+ }
+
+ // V35: Store SkRect (rather then width & height) in header
+ // V36: Remove (obsolete) alphatype from SkColorTable
+ // V37: Added shadow only option to SkDropShadowImageFilter (last version to record CLEAR)
+ // V38: Added PictureResolution option to SkPictureImageFilter
+ // V39: Added FilterLevel option to SkPictureImageFilter
+ // V40: Remove UniqueID serialization from SkImageFilter.
+ // V41: Added serialization of SkBitmapSource's filterQuality parameter
+ // V42: Added a bool to SkPictureShader serialization to indicate did-we-serialize-a-picture?
+ // V43: Added DRAW_IMAGE and DRAW_IMAGE_RECT opt codes to serialized data
+ // V44: Move annotations from paint to drawAnnotation
+ // V45: Add invNormRotation to SkLightingShader.
+ // V46: Add drawTextRSXform
+ // V47: Add occluder rect to SkBlurMaskFilter
+ // V48: Read and write extended SkTextBlobs.
+ // V49: Gradients serialized as SkColor4f + SkColorSpace
+ // V50: SkXfermode -> SkBlendMode
+ // V51: more SkXfermode -> SkBlendMode
+ // V52: Remove SkTextBlob::fRunCount
+ // V53: SaveLayerRec clip mask
+ // V54: ComposeShader can use a Mode or a Lerp
+ // V55: Drop blendmode[] from MergeImageFilter
+ // V56: Add TileMode in SkBlurImageFilter.
+ // V57: Sweep tiling info.
+ // V58: No more 2pt conical flipping.
+ // V59: No more LocalSpace option on PictureImageFilter
+ // V60: Remove flags in picture header
+ // V61: Change SkDrawPictureRec to take two colors rather than two alphas
+ // V62: Don't negate size of custom encoded images (don't write origin x,y either)
+ // V63: Store image bounds (including origin) instead of just width/height to support subsets
+ // V64: Remove occluder feature from blur maskFilter
+ // V65: Float4 paint color
+ // V66: Add saveBehind
+ // V67: Blobs serialize fonts instead of paints
+ // V68: Paint doesn't serialize font-related stuff
+ // V69: Clean up duplicated and redundant SkImageFilter related enums
+ // V70: Image filters definitions hidden, registered names updated to include "Impl"
+ // V71: Unify erode and dilate image filters
+ // V72: SkColorFilter_Matrix domain (rgba vs. hsla)
+ // V73: Use SkColor4f in per-edge AA quad API
+ // V74: MorphologyImageFilter internal radius is SkScaler
+ // V75: SkVertices switched from unsafe use of SkReader32 to SkReadBuffer (like everything else)
+ // V76: Add filtering enum to ImageShader
+ // V77: Explicit filtering options on imageshaders
+ // V78: Serialize skmipmap data for images that have it
+ // V79: Cubic Resampler option on imageshader
+ // V80: Smapling options on imageshader
+ // V81: sampling parameters on drawImage/drawImageRect/etc.
+ // V82: Add filter param to picture-shader
+ // V83: SkMatrixImageFilter now takes SkSamplingOptions instead of SkFilterQuality
+ // V84: SkImageFilters::Image now takes SkSamplingOptions instead of SkFilterQuality
+ // V85: Remove legacy support for inheriting sampling from the paint.
+ // V86: Remove support for custom data inside SkVertices
+ // V87: SkPaint now holds a user-defined blend function (SkBlender), no longer has DrawLooper
+ // V88: Add blender to ComposeShader and BlendImageFilter
+ // V89: Deprecated SkClipOps are no longer supported
+ // V90: Private API for backdrop scale factor in SaveLayerRec
+ // V91: Added raw image shaders
+ // V92: Added anisotropic filtering to SkSamplingOptions
+ // V94: Removed local matrices from SkShaderBase. Local matrices always use SkLocalMatrixShader.
+ // V95: SkImageFilters::Shader only saves SkShader, not a full SkPaint
+
+ enum Version {
+ kPictureShaderFilterParam_Version = 82,
+ kMatrixImageFilterSampling_Version = 83,
+ kImageFilterImageSampling_Version = 84,
+ kNoFilterQualityShaders_Version = 85,
+ kVerticesRemoveCustomData_Version = 86,
+ kSkBlenderInSkPaint = 87,
+ kBlenderInEffects = 88,
+ kNoExpandingClipOps = 89,
+ kBackdropScaleFactor = 90,
+ kRawImageShaders = 91,
+ kAnisotropicFilter = 92,
+ kBlend4fColorFilter = 93,
+ kNoShaderLocalMatrix = 94,
+ kShaderImageFilterSerializeShader = 95,
+
+ // Only SKPs within the min/current picture version range (inclusive) can be read.
+ //
+ // When updating kMin_Version also update oldestSupportedSkpVersion in
+ // infra/bots/gen_tasks_logic/gen_tasks_logic.go
+ //
+ // Steps on how to find which oldestSupportedSkpVersion to use:
+ // 1) Find the git hash when the desired kMin_Version was the kCurrent_Version from the
+ // git logs: https://skia.googlesource.com/skia/+log/main/src/core/SkPicturePriv.h
+ // Eg: https://skia.googlesource.com/skia/+/bfd330d081952424a93d51715653e4d1314d4822%5E%21/#F1
+ //
+ // 2) Use that git hash to find the SKP asset version number at that time here:
+ // https://skia.googlesource.com/skia/+/bfd330d081952424a93d51715653e4d1314d4822/infra/bots/assets/skp/VERSION
+ //
+ // 3) [Optional] Increment the SKP asset version number from step 3 and verify that it has
+ // the expected version number by downloading the asset and running skpinfo on it.
+ //
+ // 4) Use the incremented SKP asset version number as the oldestSupportedSkpVersion in
+ // infra/bots/gen_tasks_logic/gen_tasks_logic.go
+ //
+ // 5) Run `make -C infra/bots train`
+ //
+ // Contact the Infra Gardener (or directly ping rmistry@) if the above steps do not work
+ // for you.
+ kMin_Version = kPictureShaderFilterParam_Version,
+ kCurrent_Version = kShaderImageFilterSerializeShader
+ };
+};
+
+bool SkPicture_StreamIsSKP(SkStream*, SkPictInfo*);
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPictureRecord.cpp b/gfx/skia/skia/src/core/SkPictureRecord.cpp
new file mode 100644
index 0000000000..6a4ee9c467
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureRecord.cpp
@@ -0,0 +1,953 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkPictureRecord.h"
+
+#include "include/core/SkRRect.h"
+#include "include/core/SkRSXform.h"
+#include "include/core/SkSurface.h"
+#include "include/core/SkTextBlob.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkTSearch.h"
+#include "src/core/SkCanvasPriv.h"
+#include "src/core/SkDrawShadowInfo.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkSamplingPriv.h"
+#include "src/image/SkImage_Base.h"
+#include "src/utils/SkPatchUtils.h"
+
+#if defined(SK_GANESH)
+#include "include/private/chromium/Slug.h"
+#endif
+
+using namespace skia_private;
+
+#define HEAP_BLOCK_SIZE 4096
+
+enum {
+ // just need a value that save or getSaveCount would never return
+ kNoInitialSave = -1,
+};
+
+// A lot of basic types get stored as a uint32_t: bools, ints, paint indices, etc.
+static int const kUInt32Size = 4;
+
+SkPictureRecord::SkPictureRecord(const SkIRect& dimensions, uint32_t flags)
+ : INHERITED(dimensions)
+ , fRecordFlags(flags)
+ , fInitialSaveCount(kNoInitialSave) {
+}
+
+SkPictureRecord::SkPictureRecord(const SkISize& dimensions, uint32_t flags)
+ : SkPictureRecord(SkIRect::MakeSize(dimensions), flags) {}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkPictureRecord::onFlush() {
+ size_t size = sizeof(kUInt32Size);
+ size_t initialOffset = this->addDraw(FLUSH, &size);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::willSave() {
+ // record the offset to us, making it non-positive to distinguish a save
+ // from a clip entry.
+ fRestoreOffsetStack.push_back(-(int32_t)fWriter.bytesWritten());
+ this->recordSave();
+
+ this->INHERITED::willSave();
+}
+
+void SkPictureRecord::recordSave() {
+ // op only
+ size_t size = sizeof(kUInt32Size);
+ size_t initialOffset = this->addDraw(SAVE, &size);
+
+ this->validate(initialOffset, size);
+}
+
+SkCanvas::SaveLayerStrategy SkPictureRecord::getSaveLayerStrategy(const SaveLayerRec& rec) {
+ // record the offset to us, making it non-positive to distinguish a save
+ // from a clip entry.
+ fRestoreOffsetStack.push_back(-(int32_t)fWriter.bytesWritten());
+ this->recordSaveLayer(rec);
+
+ (void)this->INHERITED::getSaveLayerStrategy(rec);
+ /* No need for a (potentially very big) layer which we don't actually need
+ at this time (and may not be able to afford since during record our
+ clip starts out the size of the picture, which is often much larger
+ than the size of the actual device we'll use during playback).
+ */
+ return kNoLayer_SaveLayerStrategy;
+}
+
+bool SkPictureRecord::onDoSaveBehind(const SkRect* subset) {
+ fRestoreOffsetStack.push_back(-(int32_t)fWriter.bytesWritten());
+
+ size_t size = sizeof(kUInt32Size) + sizeof(uint32_t); // op + flags
+ uint32_t flags = 0;
+ if (subset) {
+ flags |= SAVEBEHIND_HAS_SUBSET;
+ size += sizeof(*subset);
+ }
+
+ size_t initialOffset = this->addDraw(SAVE_BEHIND, &size);
+ this->addInt(flags);
+ if (subset) {
+ this->addRect(*subset);
+ }
+
+ this->validate(initialOffset, size);
+ return false;
+}
+
+void SkPictureRecord::recordSaveLayer(const SaveLayerRec& rec) {
+ // op + flatflags
+ size_t size = 2 * kUInt32Size;
+ uint32_t flatFlags = 0;
+
+ if (rec.fBounds) {
+ flatFlags |= SAVELAYERREC_HAS_BOUNDS;
+ size += sizeof(*rec.fBounds);
+ }
+ if (rec.fPaint) {
+ flatFlags |= SAVELAYERREC_HAS_PAINT;
+ size += sizeof(uint32_t); // index
+ }
+ if (rec.fBackdrop) {
+ flatFlags |= SAVELAYERREC_HAS_BACKDROP;
+ size += sizeof(uint32_t); // (paint) index
+ }
+ if (rec.fSaveLayerFlags) {
+ flatFlags |= SAVELAYERREC_HAS_FLAGS;
+ size += sizeof(uint32_t);
+ }
+ if (SkCanvasPriv::GetBackdropScaleFactor(rec) != 1.f) {
+ flatFlags |= SAVELAYERREC_HAS_BACKDROP_SCALE;
+ size += sizeof(SkScalar);
+ }
+
+ const size_t initialOffset = this->addDraw(SAVE_LAYER_SAVELAYERREC, &size);
+ this->addInt(flatFlags);
+ if (flatFlags & SAVELAYERREC_HAS_BOUNDS) {
+ this->addRect(*rec.fBounds);
+ }
+ if (flatFlags & SAVELAYERREC_HAS_PAINT) {
+ this->addPaintPtr(rec.fPaint);
+ }
+ if (flatFlags & SAVELAYERREC_HAS_BACKDROP) {
+ // overkill, but we didn't already track single flattenables, so using a paint for that
+ SkPaint paint;
+ paint.setImageFilter(sk_ref_sp(const_cast<SkImageFilter*>(rec.fBackdrop)));
+ this->addPaint(paint);
+ }
+ if (flatFlags & SAVELAYERREC_HAS_FLAGS) {
+ this->addInt(rec.fSaveLayerFlags);
+ }
+ if (flatFlags & SAVELAYERREC_HAS_BACKDROP_SCALE) {
+ this->addScalar(SkCanvasPriv::GetBackdropScaleFactor(rec));
+ }
+ this->validate(initialOffset, size);
+}
+
+#ifdef SK_DEBUG
+/*
+ * Read the op code from 'offset' in 'writer' and extract the size too.
+ */
+static DrawType peek_op_and_size(SkWriter32* writer, size_t offset, uint32_t* size) {
+ uint32_t peek = writer->readTAt<uint32_t>(offset);
+
+ uint32_t op;
+ UNPACK_8_24(peek, op, *size);
+ if (MASK_24 == *size) {
+ // size required its own slot right after the op code
+ *size = writer->readTAt<uint32_t>(offset + kUInt32Size);
+ }
+ return (DrawType) op;
+}
+#endif//SK_DEBUG
+
+void SkPictureRecord::willRestore() {
+#if 0
+ SkASSERT(fRestoreOffsetStack.count() > 1);
+#endif
+
+ // check for underflow
+ if (fRestoreOffsetStack.empty()) {
+ return;
+ }
+
+ this->recordRestore();
+
+ fRestoreOffsetStack.pop_back();
+
+ this->INHERITED::willRestore();
+}
+
+void SkPictureRecord::recordRestore(bool fillInSkips) {
+ if (fillInSkips) {
+ this->fillRestoreOffsetPlaceholdersForCurrentStackLevel((uint32_t)fWriter.bytesWritten());
+ }
+ size_t size = 1 * kUInt32Size; // RESTORE consists solely of 1 op code
+ size_t initialOffset = this->addDraw(RESTORE, &size);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::recordTranslate(const SkMatrix& m) {
+ SkASSERT(SkMatrix::kTranslate_Mask == m.getType());
+
+ // op + dx + dy
+ size_t size = 1 * kUInt32Size + 2 * sizeof(SkScalar);
+ size_t initialOffset = this->addDraw(TRANSLATE, &size);
+ this->addScalar(m.getTranslateX());
+ this->addScalar(m.getTranslateY());
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::recordScale(const SkMatrix& m) {
+ SkASSERT(SkMatrix::kScale_Mask == m.getType());
+
+ // op + sx + sy
+ size_t size = 1 * kUInt32Size + 2 * sizeof(SkScalar);
+ size_t initialOffset = this->addDraw(SCALE, &size);
+ this->addScalar(m.getScaleX());
+ this->addScalar(m.getScaleY());
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::didConcat44(const SkM44& m) {
+ this->validate(fWriter.bytesWritten(), 0);
+ // op + matrix
+ size_t size = kUInt32Size + 16 * sizeof(SkScalar);
+ size_t initialOffset = this->addDraw(CONCAT44, &size);
+ fWriter.write(SkMatrixPriv::M44ColMajor(m), 16 * sizeof(SkScalar));
+ this->validate(initialOffset, size);
+
+ this->INHERITED::didConcat44(m);
+}
+
+void SkPictureRecord::didSetM44(const SkM44& m) {
+ this->validate(fWriter.bytesWritten(), 0);
+ // op + matrix
+ size_t size = kUInt32Size + 16 * sizeof(SkScalar);
+ size_t initialOffset = this->addDraw(SET_M44, &size);
+ fWriter.write(SkMatrixPriv::M44ColMajor(m), 16 * sizeof(SkScalar));
+ this->validate(initialOffset, size);
+ this->INHERITED::didSetM44(m);
+}
+
+void SkPictureRecord::didScale(SkScalar x, SkScalar y) {
+ this->didConcat44(SkM44::Scale(x, y));
+}
+
+void SkPictureRecord::didTranslate(SkScalar x, SkScalar y) {
+ this->didConcat44(SkM44::Translate(x, y));
+}
+
+void SkPictureRecord::recordConcat(const SkMatrix& matrix) {
+ this->validate(fWriter.bytesWritten(), 0);
+ // op + matrix
+ size_t size = kUInt32Size + SkMatrixPriv::WriteToMemory(matrix, nullptr);
+ size_t initialOffset = this->addDraw(CONCAT, &size);
+ this->addMatrix(matrix);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::fillRestoreOffsetPlaceholdersForCurrentStackLevel(uint32_t restoreOffset) {
+ int32_t offset = fRestoreOffsetStack.back();
+ while (offset > 0) {
+ uint32_t peek = fWriter.readTAt<uint32_t>(offset);
+ fWriter.overwriteTAt(offset, restoreOffset);
+ offset = peek;
+ }
+
+#ifdef SK_DEBUG
+ // offset of 0 has been disabled, so we skip it
+ if (offset > 0) {
+ // assert that the final offset value points to a save verb
+ uint32_t opSize;
+ DrawType drawOp = peek_op_and_size(&fWriter, -offset, &opSize);
+ SkASSERT(SAVE == drawOp || SAVE_LAYER_SAVELAYERREC == drawOp);
+ }
+#endif
+}
+
+void SkPictureRecord::beginRecording() {
+ // we have to call this *after* our constructor, to ensure that it gets
+ // recorded. This is balanced by restoreToCount() call from endRecording,
+ // which in-turn calls our overridden restore(), so those get recorded too.
+ fInitialSaveCount = this->save();
+}
+
+void SkPictureRecord::endRecording() {
+ SkASSERT(kNoInitialSave != fInitialSaveCount);
+ this->restoreToCount(fInitialSaveCount);
+}
+
+size_t SkPictureRecord::recordRestoreOffsetPlaceholder() {
+ if (fRestoreOffsetStack.empty()) {
+ return -1;
+ }
+
+ // The RestoreOffset field is initially filled with a placeholder
+ // value that points to the offset of the previous RestoreOffset
+ // in the current stack level, thus forming a linked list so that
+ // the restore offsets can be filled in when the corresponding
+ // restore command is recorded.
+ int32_t prevOffset = fRestoreOffsetStack.back();
+
+ size_t offset = fWriter.bytesWritten();
+ this->addInt(prevOffset);
+ fRestoreOffsetStack.back() = SkToU32(offset);
+ return offset;
+}
+
+void SkPictureRecord::onClipRect(const SkRect& rect, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ this->recordClipRect(rect, op, kSoft_ClipEdgeStyle == edgeStyle);
+ this->INHERITED::onClipRect(rect, op, edgeStyle);
+}
+
+size_t SkPictureRecord::recordClipRect(const SkRect& rect, SkClipOp op, bool doAA) {
+ // id + rect + clip params
+ size_t size = 1 * kUInt32Size + sizeof(rect) + 1 * kUInt32Size;
+ // recordRestoreOffsetPlaceholder doesn't always write an offset
+ if (!fRestoreOffsetStack.empty()) {
+ // + restore offset
+ size += kUInt32Size;
+ }
+ size_t initialOffset = this->addDraw(CLIP_RECT, &size);
+ this->addRect(rect);
+ this->addInt(ClipParams_pack(op, doAA));
+ size_t offset = this->recordRestoreOffsetPlaceholder();
+
+ this->validate(initialOffset, size);
+ return offset;
+}
+
+void SkPictureRecord::onClipRRect(const SkRRect& rrect, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ this->recordClipRRect(rrect, op, kSoft_ClipEdgeStyle == edgeStyle);
+ this->INHERITED::onClipRRect(rrect, op, edgeStyle);
+}
+
+size_t SkPictureRecord::recordClipRRect(const SkRRect& rrect, SkClipOp op, bool doAA) {
+ // op + rrect + clip params
+ size_t size = 1 * kUInt32Size + SkRRect::kSizeInMemory + 1 * kUInt32Size;
+ // recordRestoreOffsetPlaceholder doesn't always write an offset
+ if (!fRestoreOffsetStack.empty()) {
+ // + restore offset
+ size += kUInt32Size;
+ }
+ size_t initialOffset = this->addDraw(CLIP_RRECT, &size);
+ this->addRRect(rrect);
+ this->addInt(ClipParams_pack(op, doAA));
+ size_t offset = recordRestoreOffsetPlaceholder();
+ this->validate(initialOffset, size);
+ return offset;
+}
+
+void SkPictureRecord::onClipPath(const SkPath& path, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ int pathID = this->addPathToHeap(path);
+ this->recordClipPath(pathID, op, kSoft_ClipEdgeStyle == edgeStyle);
+ this->INHERITED::onClipPath(path, op, edgeStyle);
+}
+
+size_t SkPictureRecord::recordClipPath(int pathID, SkClipOp op, bool doAA) {
+ // op + path index + clip params
+ size_t size = 3 * kUInt32Size;
+ // recordRestoreOffsetPlaceholder doesn't always write an offset
+ if (!fRestoreOffsetStack.empty()) {
+ // + restore offset
+ size += kUInt32Size;
+ }
+ size_t initialOffset = this->addDraw(CLIP_PATH, &size);
+ this->addInt(pathID);
+ this->addInt(ClipParams_pack(op, doAA));
+ size_t offset = recordRestoreOffsetPlaceholder();
+ this->validate(initialOffset, size);
+ return offset;
+}
+
+void SkPictureRecord::onClipShader(sk_sp<SkShader> cs, SkClipOp op) {
+ // Overkill to store a whole paint, but we don't have an existing structure to just store
+ // shaders. If size becomes an issue in the future, we can optimize this.
+ SkPaint paint;
+ paint.setShader(cs);
+
+ // op + paint index + clipop
+ size_t size = 3 * kUInt32Size;
+ size_t initialOffset = this->addDraw(CLIP_SHADER_IN_PAINT, &size);
+ this->addPaint(paint);
+ this->addInt((int)op);
+ this->validate(initialOffset, size);
+
+ this->INHERITED::onClipShader(std::move(cs), op);
+}
+
+void SkPictureRecord::onClipRegion(const SkRegion& region, SkClipOp op) {
+ this->recordClipRegion(region, op);
+ this->INHERITED::onClipRegion(region, op);
+}
+
+size_t SkPictureRecord::recordClipRegion(const SkRegion& region, SkClipOp op) {
+ // op + clip params + region
+ size_t size = 2 * kUInt32Size + region.writeToMemory(nullptr);
+ // recordRestoreOffsetPlaceholder doesn't always write an offset
+ if (!fRestoreOffsetStack.empty()) {
+ // + restore offset
+ size += kUInt32Size;
+ }
+ size_t initialOffset = this->addDraw(CLIP_REGION, &size);
+ this->addRegion(region);
+ this->addInt(ClipParams_pack(op, false));
+ size_t offset = this->recordRestoreOffsetPlaceholder();
+
+ this->validate(initialOffset, size);
+ return offset;
+}
+
+void SkPictureRecord::onResetClip() {
+ if (!fRestoreOffsetStack.empty()) {
+ // Run back through any previous clip ops, and mark their offset to
+ // be 0, disabling their ability to trigger a jump-to-restore, otherwise
+ // they could hide this expansion of the clip.
+ this->fillRestoreOffsetPlaceholdersForCurrentStackLevel(0);
+ }
+ size_t size = sizeof(kUInt32Size);
+ size_t initialOffset = this->addDraw(RESET_CLIP, &size);
+ this->validate(initialOffset, size);
+ this->INHERITED::onResetClip();
+}
+
+void SkPictureRecord::onDrawPaint(const SkPaint& paint) {
+ // op + paint index
+ size_t size = 2 * kUInt32Size;
+ size_t initialOffset = this->addDraw(DRAW_PAINT, &size);
+ this->addPaint(paint);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawBehind(const SkPaint& paint) {
+ // logically the same as drawPaint, but with a diff enum
+ // op + paint index
+ size_t size = 2 * kUInt32Size;
+ size_t initialOffset = this->addDraw(DRAW_BEHIND_PAINT, &size);
+ this->addPaint(paint);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawPoints(PointMode mode, size_t count, const SkPoint pts[],
+ const SkPaint& paint) {
+ // op + paint index + mode + count + point data
+ size_t size = 4 * kUInt32Size + count * sizeof(SkPoint);
+ size_t initialOffset = this->addDraw(DRAW_POINTS, &size);
+ this->addPaint(paint);
+
+ this->addInt(mode);
+ this->addInt(SkToInt(count));
+ fWriter.writeMul4(pts, count * sizeof(SkPoint));
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawOval(const SkRect& oval, const SkPaint& paint) {
+ // op + paint index + rect
+ size_t size = 2 * kUInt32Size + sizeof(oval);
+ size_t initialOffset = this->addDraw(DRAW_OVAL, &size);
+ this->addPaint(paint);
+ this->addRect(oval);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawArc(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint) {
+ // op + paint index + rect + start + sweep + bool (as int)
+ size_t size = 2 * kUInt32Size + sizeof(oval) + sizeof(startAngle) + sizeof(sweepAngle) +
+ sizeof(int);
+ size_t initialOffset = this->addDraw(DRAW_ARC, &size);
+ this->addPaint(paint);
+ this->addRect(oval);
+ this->addScalar(startAngle);
+ this->addScalar(sweepAngle);
+ this->addInt(useCenter);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawRect(const SkRect& rect, const SkPaint& paint) {
+ // op + paint index + rect
+ size_t size = 2 * kUInt32Size + sizeof(rect);
+ size_t initialOffset = this->addDraw(DRAW_RECT, &size);
+ this->addPaint(paint);
+ this->addRect(rect);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawRegion(const SkRegion& region, const SkPaint& paint) {
+ // op + paint index + region
+ size_t regionBytes = region.writeToMemory(nullptr);
+ size_t size = 2 * kUInt32Size + regionBytes;
+ size_t initialOffset = this->addDraw(DRAW_REGION, &size);
+ this->addPaint(paint);
+ fWriter.writeRegion(region);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ // op + paint index + rrect
+ size_t size = 2 * kUInt32Size + SkRRect::kSizeInMemory;
+ size_t initialOffset = this->addDraw(DRAW_RRECT, &size);
+ this->addPaint(paint);
+ this->addRRect(rrect);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawDRRect(const SkRRect& outer, const SkRRect& inner,
+ const SkPaint& paint) {
+ // op + paint index + rrects
+ size_t size = 2 * kUInt32Size + SkRRect::kSizeInMemory * 2;
+ size_t initialOffset = this->addDraw(DRAW_DRRECT, &size);
+ this->addPaint(paint);
+ this->addRRect(outer);
+ this->addRRect(inner);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ // op + paint index + path index
+ size_t size = 3 * kUInt32Size;
+ size_t initialOffset = this->addDraw(DRAW_PATH, &size);
+ this->addPaint(paint);
+ this->addPath(path);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawImage2(const SkImage* image, SkScalar x, SkScalar y,
+ const SkSamplingOptions& sampling, const SkPaint* paint) {
+ // op + paint_index + image_index + x + y
+ size_t size = 3 * kUInt32Size + 2 * sizeof(SkScalar) + SkSamplingPriv::FlatSize(sampling);
+ size_t initialOffset = this->addDraw(DRAW_IMAGE2, &size);
+ this->addPaintPtr(paint);
+ this->addImage(image);
+ this->addScalar(x);
+ this->addScalar(y);
+ this->addSampling(sampling);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawImageRect2(const SkImage* image, const SkRect& src, const SkRect& dst,
+ const SkSamplingOptions& sampling, const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ // id + paint_index + image_index + constraint
+ size_t size = 3 * kUInt32Size + 2 * sizeof(dst) + SkSamplingPriv::FlatSize(sampling) +
+ kUInt32Size;
+
+ size_t initialOffset = this->addDraw(DRAW_IMAGE_RECT2, &size);
+ this->addPaintPtr(paint);
+ this->addImage(image);
+ this->addRect(src);
+ this->addRect(dst);
+ this->addSampling(sampling);
+ this->addInt(constraint);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawImageLattice2(const SkImage* image, const Lattice& lattice,
+ const SkRect& dst, SkFilterMode filter,
+ const SkPaint* paint) {
+ size_t latticeSize = SkCanvasPriv::WriteLattice(nullptr, lattice);
+ // op + paint index + image index + lattice + dst rect
+ size_t size = 3 * kUInt32Size + latticeSize + sizeof(dst) + sizeof(uint32_t); // filter
+ size_t initialOffset = this->addDraw(DRAW_IMAGE_LATTICE2, &size);
+ this->addPaintPtr(paint);
+ this->addImage(image);
+ (void)SkCanvasPriv::WriteLattice(fWriter.reservePad(latticeSize), lattice);
+ this->addRect(dst);
+ this->addInt(static_cast<uint32_t>(filter));
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+
+ // op + paint index + blob index + x/y
+ size_t size = 3 * kUInt32Size + 2 * sizeof(SkScalar);
+ size_t initialOffset = this->addDraw(DRAW_TEXT_BLOB, &size);
+
+ this->addPaint(paint);
+ this->addTextBlob(blob);
+ this->addScalar(x);
+ this->addScalar(y);
+
+ this->validate(initialOffset, size);
+}
+
+#if defined(SK_GANESH)
+void SkPictureRecord::onDrawSlug(const sktext::gpu::Slug* slug) {
+ // op + slug id
+ size_t size = 2 * kUInt32Size;
+ size_t initialOffset = this->addDraw(DRAW_SLUG, &size);
+
+ this->addSlug(slug);
+ this->validate(initialOffset, size);
+}
+#endif
+
+void SkPictureRecord::onDrawPicture(const SkPicture* picture, const SkMatrix* matrix,
+ const SkPaint* paint) {
+ // op + picture index
+ size_t size = 2 * kUInt32Size;
+ size_t initialOffset;
+
+ if (nullptr == matrix && nullptr == paint) {
+ initialOffset = this->addDraw(DRAW_PICTURE, &size);
+ this->addPicture(picture);
+ } else {
+ const SkMatrix& m = matrix ? *matrix : SkMatrix::I();
+ size += SkMatrixPriv::WriteToMemory(m, nullptr) + kUInt32Size; // matrix + paint
+ initialOffset = this->addDraw(DRAW_PICTURE_MATRIX_PAINT, &size);
+ this->addPaintPtr(paint);
+ this->addMatrix(m);
+ this->addPicture(picture);
+ }
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawDrawable(SkDrawable* drawable, const SkMatrix* matrix) {
+ // op + drawable index
+ size_t size = 2 * kUInt32Size;
+ size_t initialOffset;
+
+ if (nullptr == matrix) {
+ initialOffset = this->addDraw(DRAW_DRAWABLE, &size);
+ this->addDrawable(drawable);
+ } else {
+ size += SkMatrixPriv::WriteToMemory(*matrix, nullptr); // matrix
+ initialOffset = this->addDraw(DRAW_DRAWABLE_MATRIX, &size);
+ this->addMatrix(*matrix);
+ this->addDrawable(drawable);
+ }
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawVerticesObject(const SkVertices* vertices,
+ SkBlendMode mode, const SkPaint& paint) {
+ // op + paint index + vertices index + zero_bones + mode
+ size_t size = 5 * kUInt32Size;
+ size_t initialOffset = this->addDraw(DRAW_VERTICES_OBJECT, &size);
+
+ this->addPaint(paint);
+ this->addVertices(vertices);
+ this->addInt(0); // legacy bone count
+ this->addInt(static_cast<uint32_t>(mode));
+
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode bmode,
+ const SkPaint& paint) {
+ // op + paint index + patch 12 control points + flag + patch 4 colors + 4 texture coordinates
+ size_t size = 2 * kUInt32Size + SkPatchUtils::kNumCtrlPts * sizeof(SkPoint) + kUInt32Size;
+ uint32_t flag = 0;
+ if (colors) {
+ flag |= DRAW_VERTICES_HAS_COLORS;
+ size += SkPatchUtils::kNumCorners * sizeof(SkColor);
+ }
+ if (texCoords) {
+ flag |= DRAW_VERTICES_HAS_TEXS;
+ size += SkPatchUtils::kNumCorners * sizeof(SkPoint);
+ }
+ if (SkBlendMode::kModulate != bmode) {
+ flag |= DRAW_VERTICES_HAS_XFER;
+ size += kUInt32Size;
+ }
+
+ size_t initialOffset = this->addDraw(DRAW_PATCH, &size);
+ this->addPaint(paint);
+ this->addPatch(cubics);
+ this->addInt(flag);
+
+ // write optional parameters
+ if (colors) {
+ fWriter.write(colors, SkPatchUtils::kNumCorners * sizeof(SkColor));
+ }
+ if (texCoords) {
+ fWriter.write(texCoords, SkPatchUtils::kNumCorners * sizeof(SkPoint));
+ }
+ if (flag & DRAW_VERTICES_HAS_XFER) {
+ this->addInt((int)bmode);
+ }
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawAtlas2(const SkImage* atlas, const SkRSXform xform[], const SkRect tex[],
+ const SkColor colors[], int count, SkBlendMode mode,
+ const SkSamplingOptions& sampling, const SkRect* cull,
+ const SkPaint* paint) {
+ // [op + paint-index + atlas-index + flags + count] + [xform] + [tex] + [*colors + mode] + cull
+ size_t size = 5 * kUInt32Size + count * sizeof(SkRSXform) + count * sizeof(SkRect);
+ size += SkSamplingPriv::FlatSize(sampling);
+ uint32_t flags = 0;
+ if (colors) {
+ flags |= DRAW_ATLAS_HAS_COLORS;
+ size += count * sizeof(SkColor);
+ size += sizeof(uint32_t); // xfermode::mode
+ }
+ if (cull) {
+ flags |= DRAW_ATLAS_HAS_CULL;
+ size += sizeof(SkRect);
+ }
+ flags |= DRAW_ATLAS_HAS_SAMPLING;
+
+ size_t initialOffset = this->addDraw(DRAW_ATLAS, &size);
+ this->addPaintPtr(paint);
+ this->addImage(atlas);
+ this->addInt(flags);
+ this->addInt(count);
+ fWriter.write(xform, count * sizeof(SkRSXform));
+ fWriter.write(tex, count * sizeof(SkRect));
+
+ // write optional parameters
+ if (colors) {
+ fWriter.write(colors, count * sizeof(SkColor));
+ this->addInt((int)mode);
+ }
+ if (cull) {
+ fWriter.write(cull, sizeof(SkRect));
+ }
+ this->addSampling(sampling);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawShadowRec(const SkPath& path, const SkDrawShadowRec& rec) {
+ // op + path index + zParams + lightPos + lightRadius + spot/ambient alphas + color + flags
+ size_t size = 2 * kUInt32Size + 2 * sizeof(SkPoint3) + 1 * sizeof(SkScalar) + 3 * kUInt32Size;
+ size_t initialOffset = this->addDraw(DRAW_SHADOW_REC, &size);
+
+ this->addPath(path);
+
+ fWriter.writePoint3(rec.fZPlaneParams);
+ fWriter.writePoint3(rec.fLightPos);
+ fWriter.writeScalar(rec.fLightRadius);
+ fWriter.write32(rec.fAmbientColor);
+ fWriter.write32(rec.fSpotColor);
+ fWriter.write32(rec.fFlags);
+
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawAnnotation(const SkRect& rect, const char key[], SkData* value) {
+ size_t keyLen = SkWriter32::WriteStringSize(key);
+ size_t valueLen = SkWriter32::WriteDataSize(value);
+ size_t size = 4 + sizeof(SkRect) + keyLen + valueLen;
+
+ size_t initialOffset = this->addDraw(DRAW_ANNOTATION, &size);
+ this->addRect(rect);
+ fWriter.writeString(key);
+ fWriter.writeData(value);
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4],
+ SkCanvas::QuadAAFlags aa, const SkColor4f& color,
+ SkBlendMode mode) {
+
+ // op + rect + aa flags + color + mode + hasClip(as int) + clipCount*points
+ size_t size = 4 * kUInt32Size + sizeof(SkColor4f) + sizeof(rect) +
+ (clip ? 4 : 0) * sizeof(SkPoint);
+ size_t initialOffset = this->addDraw(DRAW_EDGEAA_QUAD, &size);
+ this->addRect(rect);
+ this->addInt((int) aa);
+ fWriter.write(&color, sizeof(SkColor4f));
+ this->addInt((int) mode);
+ this->addInt(clip != nullptr);
+ if (clip) {
+ this->addPoints(clip, 4);
+ }
+ this->validate(initialOffset, size);
+}
+
+void SkPictureRecord::onDrawEdgeAAImageSet2(const SkCanvas::ImageSetEntry set[], int count,
+ const SkPoint dstClips[],
+ const SkMatrix preViewMatrices[],
+ const SkSamplingOptions& sampling,
+ const SkPaint* paint,
+ SkCanvas::SrcRectConstraint constraint) {
+ static constexpr size_t kMatrixSize = 9 * sizeof(SkScalar); // *not* sizeof(SkMatrix)
+ // op + count + paint + constraint + (image index, src rect, dst rect, alpha, aa flags,
+ // hasClip(int), matrixIndex) * cnt + totalClipCount + dstClips + totalMatrixCount + matrices
+ int totalDstClipCount, totalMatrixCount;
+ SkCanvasPriv::GetDstClipAndMatrixCounts(set, count, &totalDstClipCount, &totalMatrixCount);
+
+ size_t size = 6 * kUInt32Size + sizeof(SkPoint) * totalDstClipCount +
+ kMatrixSize * totalMatrixCount +
+ (4 * kUInt32Size + 2 * sizeof(SkRect) + sizeof(SkScalar)) * count +
+ SkSamplingPriv::FlatSize(sampling);
+ size_t initialOffset = this->addDraw(DRAW_EDGEAA_IMAGE_SET2, &size);
+ this->addInt(count);
+ this->addPaintPtr(paint);
+ this->addSampling(sampling);
+ this->addInt((int) constraint);
+ for (int i = 0; i < count; ++i) {
+ this->addImage(set[i].fImage.get());
+ this->addRect(set[i].fSrcRect);
+ this->addRect(set[i].fDstRect);
+ this->addInt(set[i].fMatrixIndex);
+ this->addScalar(set[i].fAlpha);
+ this->addInt((int)set[i].fAAFlags);
+ this->addInt(set[i].fHasClip);
+ }
+ this->addInt(totalDstClipCount);
+ this->addPoints(dstClips, totalDstClipCount);
+ this->addInt(totalMatrixCount);
+ for (int i = 0; i < totalMatrixCount; ++i) {
+ this->addMatrix(preViewMatrices[i]);
+ }
+ this->validate(initialOffset, size);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// De-duping helper.
+
+template <typename T>
+static bool equals(T* a, T* b) { return a->uniqueID() == b->uniqueID(); }
+
+template <>
+bool equals(SkDrawable* a, SkDrawable* b) {
+ // SkDrawable's generationID is not a stable unique identifier.
+ return a == b;
+}
+
+template <typename T>
+static int find_or_append(TArray<sk_sp<T>>& array, T* obj) {
+ for (int i = 0; i < array.size(); i++) {
+ if (equals(array[i].get(), obj)) {
+ return i;
+ }
+ }
+
+ array.push_back(sk_ref_sp(obj));
+
+ return array.size() - 1;
+}
+
+sk_sp<SkSurface> SkPictureRecord::onNewSurface(const SkImageInfo& info, const SkSurfaceProps&) {
+ return nullptr;
+}
+
+void SkPictureRecord::addImage(const SkImage* image) {
+ // convention for images is 0-based index
+ this->addInt(find_or_append(fImages, image));
+}
+
+void SkPictureRecord::addMatrix(const SkMatrix& matrix) {
+ fWriter.writeMatrix(matrix);
+}
+
+void SkPictureRecord::addPaintPtr(const SkPaint* paint) {
+ if (paint) {
+ fPaints.push_back(*paint);
+ this->addInt(fPaints.size());
+ } else {
+ this->addInt(0);
+ }
+}
+
+int SkPictureRecord::addPathToHeap(const SkPath& path) {
+ if (int* n = fPaths.find(path)) {
+ return *n;
+ }
+ int n = fPaths.count() + 1; // 0 is reserved for null / error.
+ fPaths.set(path, n);
+ return n;
+}
+
+void SkPictureRecord::addPath(const SkPath& path) {
+ this->addInt(this->addPathToHeap(path));
+}
+
+void SkPictureRecord::addPatch(const SkPoint cubics[12]) {
+ fWriter.write(cubics, SkPatchUtils::kNumCtrlPts * sizeof(SkPoint));
+}
+
+void SkPictureRecord::addPicture(const SkPicture* picture) {
+ // follow the convention of recording a 1-based index
+ this->addInt(find_or_append(fPictures, picture) + 1);
+}
+
+void SkPictureRecord::addDrawable(SkDrawable* drawable) {
+ // follow the convention of recording a 1-based index
+ this->addInt(find_or_append(fDrawables, drawable) + 1);
+}
+
+void SkPictureRecord::addPoint(const SkPoint& point) {
+ fWriter.writePoint(point);
+}
+
+void SkPictureRecord::addPoints(const SkPoint pts[], int count) {
+ fWriter.writeMul4(pts, count * sizeof(SkPoint));
+}
+
+void SkPictureRecord::addNoOp() {
+ size_t size = kUInt32Size; // op
+ this->addDraw(NOOP, &size);
+}
+
+void SkPictureRecord::addRect(const SkRect& rect) {
+ fWriter.writeRect(rect);
+}
+
+void SkPictureRecord::addRectPtr(const SkRect* rect) {
+ if (fWriter.writeBool(rect != nullptr)) {
+ fWriter.writeRect(*rect);
+ }
+}
+
+void SkPictureRecord::addIRect(const SkIRect& rect) {
+ fWriter.write(&rect, sizeof(rect));
+}
+
+void SkPictureRecord::addIRectPtr(const SkIRect* rect) {
+ if (fWriter.writeBool(rect != nullptr)) {
+ *(SkIRect*)fWriter.reserve(sizeof(SkIRect)) = *rect;
+ }
+}
+
+void SkPictureRecord::addRRect(const SkRRect& rrect) {
+ fWriter.writeRRect(rrect);
+}
+
+void SkPictureRecord::addRegion(const SkRegion& region) {
+ fWriter.writeRegion(region);
+}
+
+void SkPictureRecord::addSampling(const SkSamplingOptions& sampling) {
+ fWriter.writeSampling(sampling);
+}
+
+void SkPictureRecord::addText(const void* text, size_t byteLength) {
+ addInt(SkToInt(byteLength));
+ fWriter.writePad(text, byteLength);
+}
+
+void SkPictureRecord::addTextBlob(const SkTextBlob* blob) {
+ // follow the convention of recording a 1-based index
+ this->addInt(find_or_append(fTextBlobs, blob) + 1);
+}
+
+#if defined(SK_GANESH)
+void SkPictureRecord::addSlug(const sktext::gpu::Slug* slug) {
+ // follow the convention of recording a 1-based index
+ this->addInt(find_or_append(fSlugs, slug) + 1);
+}
+#endif
+
+void SkPictureRecord::addVertices(const SkVertices* vertices) {
+ // follow the convention of recording a 1-based index
+ this->addInt(find_or_append(fVertices, vertices) + 1);
+}
+
+///////////////////////////////////////////////////////////////////////////////
diff --git a/gfx/skia/skia/src/core/SkPictureRecord.h b/gfx/skia/skia/src/core/SkPictureRecord.h
new file mode 100644
index 0000000000..dd609b7b8f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureRecord.h
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPictureRecord_DEFINED
+#define SkPictureRecord_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkCanvasVirtualEnforcer.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkTextBlob.h"
+#include "include/core/SkVertices.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTDArray.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkPictureData.h"
+#include "src/core/SkTHash.h"
+#include "src/core/SkWriter32.h"
+
+// These macros help with packing and unpacking a single byte value and
+// a 3 byte value into/out of a uint32_t
+#define MASK_24 0x00FFFFFF
+#define UNPACK_8_24(combined, small, large) \
+ small = (combined >> 24) & 0xFF; \
+ large = combined & MASK_24
+#define PACK_8_24(small, large) ((small << 24) | large)
+
+
+class SkPictureRecord : public SkCanvasVirtualEnforcer<SkCanvas> {
+public:
+ SkPictureRecord(const SkISize& dimensions, uint32_t recordFlags);
+
+ SkPictureRecord(const SkIRect& dimensions, uint32_t recordFlags);
+
+ const skia_private::TArray<sk_sp<const SkPicture>>& getPictures() const {
+ return fPictures;
+ }
+
+ const skia_private::TArray<sk_sp<SkDrawable>>& getDrawables() const {
+ return fDrawables;
+ }
+
+ const skia_private::TArray<sk_sp<const SkTextBlob>>& getTextBlobs() const {
+ return fTextBlobs;
+ }
+
+#if defined(SK_GANESH)
+ const skia_private::TArray<sk_sp<const sktext::gpu::Slug>>& getSlugs() const {
+ return fSlugs;
+ }
+#endif
+
+ const skia_private::TArray<sk_sp<const SkVertices>>& getVertices() const {
+ return fVertices;
+ }
+
+ const skia_private::TArray<sk_sp<const SkImage>>& getImages() const {
+ return fImages;
+ }
+
+ sk_sp<SkData> opData() const {
+ this->validate(fWriter.bytesWritten(), 0);
+
+ if (fWriter.bytesWritten() == 0) {
+ return SkData::MakeEmpty();
+ }
+ return fWriter.snapshotAsData();
+ }
+
+ void setFlags(uint32_t recordFlags) {
+ fRecordFlags = recordFlags;
+ }
+
+ const SkWriter32& writeStream() const {
+ return fWriter;
+ }
+
+ void beginRecording();
+ void endRecording();
+
+protected:
+ void addNoOp();
+
+private:
+ void handleOptimization(int opt);
+ size_t recordRestoreOffsetPlaceholder();
+ void fillRestoreOffsetPlaceholdersForCurrentStackLevel(uint32_t restoreOffset);
+
+ SkTDArray<int32_t> fRestoreOffsetStack;
+
+ SkTDArray<uint32_t> fCullOffsetStack;
+
+ /*
+ * Write the 'drawType' operation and chunk size to the skp. 'size'
+ * can potentially be increased if the chunk size needs its own storage
+ * location (i.e., it overflows 24 bits).
+ * Returns the start offset of the chunk. This is the location at which
+ * the opcode & size are stored.
+ * TODO: since we are handing the size into here we could call reserve
+ * and then return a pointer to the memory storage. This could decrease
+ * allocation overhead but could lead to more wasted space (the tail
+ * end of blocks could go unused). Possibly add a second addDraw that
+ * operates in this manner.
+ */
+ size_t addDraw(DrawType drawType, size_t* size) {
+ size_t offset = fWriter.bytesWritten();
+
+ SkASSERT_RELEASE(this->predrawNotify());
+
+ SkASSERT(0 != *size);
+ SkASSERT(((uint8_t) drawType) == drawType);
+
+ if (0 != (*size & ~MASK_24) || *size == MASK_24) {
+ fWriter.writeInt(PACK_8_24(drawType, MASK_24));
+ *size += 1;
+ fWriter.writeInt(SkToU32(*size));
+ } else {
+ fWriter.writeInt(PACK_8_24(drawType, SkToU32(*size)));
+ }
+
+ return offset;
+ }
+
+ void addInt(int value) {
+ fWriter.writeInt(value);
+ }
+ void addScalar(SkScalar scalar) {
+ fWriter.writeScalar(scalar);
+ }
+
+ void addImage(const SkImage*);
+ void addMatrix(const SkMatrix& matrix);
+ void addPaint(const SkPaint& paint) { this->addPaintPtr(&paint); }
+ void addPaintPtr(const SkPaint* paint);
+ void addPatch(const SkPoint cubics[12]);
+ void addPath(const SkPath& path);
+ void addPicture(const SkPicture* picture);
+ void addDrawable(SkDrawable* picture);
+ void addPoint(const SkPoint& point);
+ void addPoints(const SkPoint pts[], int count);
+ void addRect(const SkRect& rect);
+ void addRectPtr(const SkRect* rect);
+ void addIRect(const SkIRect& rect);
+ void addIRectPtr(const SkIRect* rect);
+ void addRRect(const SkRRect&);
+ void addRegion(const SkRegion& region);
+ void addSampling(const SkSamplingOptions&);
+ void addText(const void* text, size_t byteLength);
+ void addTextBlob(const SkTextBlob* blob);
+ void addSlug(const sktext::gpu::Slug* slug);
+ void addVertices(const SkVertices*);
+
+ int find(const SkBitmap& bitmap);
+
+protected:
+ void validate(size_t initialOffset, size_t size) const {
+ SkASSERT(fWriter.bytesWritten() == initialOffset + size);
+ }
+
+ sk_sp<SkSurface> onNewSurface(const SkImageInfo&, const SkSurfaceProps&) override;
+ bool onPeekPixels(SkPixmap*) override { return false; }
+
+ void onFlush() override;
+
+ void willSave() override;
+ SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec&) override;
+ bool onDoSaveBehind(const SkRect*) override;
+ void willRestore() override;
+
+ void didConcat44(const SkM44&) override;
+ void didSetM44(const SkM44&) override;
+ void didScale(SkScalar, SkScalar) override;
+ void didTranslate(SkScalar, SkScalar) override;
+
+ void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override;
+
+ void onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) override;
+#if defined(SK_GANESH)
+ void onDrawSlug(const sktext::gpu::Slug* slug) override;
+#endif
+ void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode, const SkPaint& paint) override;
+
+ void onDrawPaint(const SkPaint&) override;
+ void onDrawBehind(const SkPaint&) override;
+ void onDrawPoints(PointMode, size_t count, const SkPoint pts[], const SkPaint&) override;
+ void onDrawRect(const SkRect&, const SkPaint&) override;
+ void onDrawRegion(const SkRegion&, const SkPaint&) override;
+ void onDrawOval(const SkRect&, const SkPaint&) override;
+ void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override;
+ void onDrawRRect(const SkRRect&, const SkPaint&) override;
+ void onDrawPath(const SkPath&, const SkPaint&) override;
+
+ void onDrawImage2(const SkImage*, SkScalar, SkScalar, const SkSamplingOptions&,
+ const SkPaint*) override;
+ void onDrawImageRect2(const SkImage*, const SkRect&, const SkRect&, const SkSamplingOptions&,
+ const SkPaint*, SrcRectConstraint) override;
+ void onDrawImageLattice2(const SkImage*, const Lattice&, const SkRect&, SkFilterMode,
+ const SkPaint*) override;
+ void onDrawAtlas2(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[], int,
+ SkBlendMode, const SkSamplingOptions&, const SkRect*, const SkPaint*) override;
+
+ void onDrawShadowRec(const SkPath&, const SkDrawShadowRec&) override;
+ void onDrawVerticesObject(const SkVertices*, SkBlendMode, const SkPaint&) override;
+
+ void onClipRect(const SkRect&, SkClipOp, ClipEdgeStyle) override;
+ void onClipRRect(const SkRRect&, SkClipOp, ClipEdgeStyle) override;
+ void onClipPath(const SkPath&, SkClipOp, ClipEdgeStyle) override;
+ void onClipShader(sk_sp<SkShader>, SkClipOp) override;
+ void onClipRegion(const SkRegion&, SkClipOp) override;
+ void onResetClip() override;
+
+ void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override;
+
+ void onDrawDrawable(SkDrawable*, const SkMatrix*) override;
+ void onDrawAnnotation(const SkRect&, const char[], SkData*) override;
+
+ void onDrawEdgeAAQuad(const SkRect&, const SkPoint[4], QuadAAFlags, const SkColor4f&,
+ SkBlendMode) override;
+ void onDrawEdgeAAImageSet2(const ImageSetEntry[], int count, const SkPoint[], const SkMatrix[],
+ const SkSamplingOptions&,const SkPaint*, SrcRectConstraint) override;
+
+ int addPathToHeap(const SkPath& path); // does not write to ops stream
+
+ // These entry points allow the writing of matrices, clips, saves &
+ // restores to be deferred (e.g., if the MC state is being collapsed and
+ // only written out as needed).
+ void recordConcat(const SkMatrix& matrix);
+ void recordTranslate(const SkMatrix& matrix);
+ void recordScale(const SkMatrix& matrix);
+ size_t recordClipRect(const SkRect& rect, SkClipOp op, bool doAA);
+ size_t recordClipRRect(const SkRRect& rrect, SkClipOp op, bool doAA);
+ size_t recordClipPath(int pathID, SkClipOp op, bool doAA);
+ size_t recordClipRegion(const SkRegion& region, SkClipOp op);
+ void recordSave();
+ void recordSaveLayer(const SaveLayerRec&);
+ void recordRestore(bool fillInSkips = true);
+
+private:
+ skia_private::TArray<SkPaint> fPaints;
+
+ struct PathHash {
+ uint32_t operator()(const SkPath& p) { return p.getGenerationID(); }
+ };
+ SkTHashMap<SkPath, int, PathHash> fPaths;
+
+ SkWriter32 fWriter;
+
+ skia_private::TArray<sk_sp<const SkImage>> fImages;
+ skia_private::TArray<sk_sp<const SkPicture>> fPictures;
+ skia_private::TArray<sk_sp<SkDrawable>> fDrawables;
+ skia_private::TArray<sk_sp<const SkTextBlob>> fTextBlobs;
+ skia_private::TArray<sk_sp<const SkVertices>> fVertices;
+#if defined(SK_GANESH)
+ skia_private::TArray<sk_sp<const sktext::gpu::Slug>> fSlugs;
+#endif
+
+ uint32_t fRecordFlags;
+ int fInitialSaveCount;
+
+ friend class SkPictureData; // for SkPictureData's SkPictureRecord-based constructor
+
+ using INHERITED = SkCanvasVirtualEnforcer<SkCanvas>;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPictureRecorder.cpp b/gfx/skia/skia/src/core/SkPictureRecorder.cpp
new file mode 100644
index 0000000000..caf7d3df92
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPictureRecorder.cpp
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <memory>
+
+#include "include/core/SkData.h"
+#include "include/core/SkDrawable.h"
+#include "include/core/SkPictureRecorder.h"
+#include "include/core/SkTypes.h"
+#include "src/core/SkBigPicture.h"
+#include "src/core/SkRecord.h"
+#include "src/core/SkRecordDraw.h"
+#include "src/core/SkRecordOpts.h"
+#include "src/core/SkRecordedDrawable.h"
+#include "src/core/SkRecorder.h"
+
+using namespace skia_private;
+
+SkPictureRecorder::SkPictureRecorder() {
+ fActivelyRecording = false;
+ fRecorder = std::make_unique<SkRecorder>(nullptr, SkRect::MakeEmpty());
+}
+
+SkPictureRecorder::~SkPictureRecorder() {}
+
+SkCanvas* SkPictureRecorder::beginRecording(const SkRect& userCullRect,
+ sk_sp<SkBBoxHierarchy> bbh) {
+ const SkRect cullRect = userCullRect.isEmpty() ? SkRect::MakeEmpty() : userCullRect;
+
+ fCullRect = cullRect;
+ fBBH = std::move(bbh);
+
+ if (!fRecord) {
+ fRecord.reset(new SkRecord);
+ }
+ fRecorder->reset(fRecord.get(), cullRect);
+ fActivelyRecording = true;
+ return this->getRecordingCanvas();
+}
+
+SkCanvas* SkPictureRecorder::beginRecording(const SkRect& bounds, SkBBHFactory* factory) {
+ return this->beginRecording(bounds, factory ? (*factory)() : nullptr);
+}
+
+SkCanvas* SkPictureRecorder::getRecordingCanvas() {
+ return fActivelyRecording ? fRecorder.get() : nullptr;
+}
+
+class SkEmptyPicture final : public SkPicture {
+public:
+ void playback(SkCanvas*, AbortCallback*) const override { }
+
+ size_t approximateBytesUsed() const override { return sizeof(*this); }
+ int approximateOpCount(bool nested) const override { return 0; }
+ SkRect cullRect() const override { return SkRect::MakeEmpty(); }
+};
+
+sk_sp<SkPicture> SkPictureRecorder::finishRecordingAsPicture() {
+ fActivelyRecording = false;
+ fRecorder->restoreToCount(1); // If we were missing any restores, add them now.
+
+ if (fRecord->count() == 0) {
+ return sk_make_sp<SkEmptyPicture>();
+ }
+
+ // TODO: delay as much of this work until just before first playback?
+ SkRecordOptimize(fRecord.get());
+
+ SkDrawableList* drawableList = fRecorder->getDrawableList();
+ std::unique_ptr<SkBigPicture::SnapshotArray> pictList{
+ drawableList ? drawableList->newDrawableSnapshot() : nullptr
+ };
+
+ if (fBBH) {
+ AutoTMalloc<SkRect> bounds(fRecord->count());
+ AutoTMalloc<SkBBoxHierarchy::Metadata> meta(fRecord->count());
+ SkRecordFillBounds(fCullRect, *fRecord, bounds, meta);
+
+ fBBH->insert(bounds, meta, fRecord->count());
+
+ // Now that we've calculated content bounds, we can update fCullRect, often trimming it.
+ SkRect bbhBound = SkRect::MakeEmpty();
+ for (int i = 0; i < fRecord->count(); i++) {
+ bbhBound.join(bounds[i]);
+ }
+ SkASSERT((bbhBound.isEmpty() || fCullRect.contains(bbhBound))
+ || (bbhBound.isEmpty() && fCullRect.isEmpty()));
+ fCullRect = bbhBound;
+ }
+
+ size_t subPictureBytes = fRecorder->approxBytesUsedBySubPictures();
+ for (int i = 0; pictList && i < pictList->count(); i++) {
+ subPictureBytes += pictList->begin()[i]->approximateBytesUsed();
+ }
+ return sk_make_sp<SkBigPicture>(fCullRect,
+ std::move(fRecord),
+ std::move(pictList),
+ std::move(fBBH),
+ subPictureBytes);
+}
+
+sk_sp<SkPicture> SkPictureRecorder::finishRecordingAsPictureWithCull(const SkRect& cullRect) {
+ fCullRect = cullRect;
+ return this->finishRecordingAsPicture();
+}
+
+
+void SkPictureRecorder::partialReplay(SkCanvas* canvas) const {
+ if (nullptr == canvas) {
+ return;
+ }
+
+ int drawableCount = 0;
+ SkDrawable* const* drawables = nullptr;
+ SkDrawableList* drawableList = fRecorder->getDrawableList();
+ if (drawableList) {
+ drawableCount = drawableList->count();
+ drawables = drawableList->begin();
+ }
+ SkRecordDraw(*fRecord, canvas, nullptr, drawables, drawableCount, nullptr/*bbh*/, nullptr/*callback*/);
+}
+
+sk_sp<SkDrawable> SkPictureRecorder::finishRecordingAsDrawable() {
+ fActivelyRecording = false;
+ fRecorder->restoreToCount(1); // If we were missing any restores, add them now.
+
+ SkRecordOptimize(fRecord.get());
+
+ if (fBBH) {
+ AutoTMalloc<SkRect> bounds(fRecord->count());
+ AutoTMalloc<SkBBoxHierarchy::Metadata> meta(fRecord->count());
+ SkRecordFillBounds(fCullRect, *fRecord, bounds, meta);
+ fBBH->insert(bounds, meta, fRecord->count());
+ }
+
+ sk_sp<SkDrawable> drawable =
+ sk_make_sp<SkRecordedDrawable>(std::move(fRecord), std::move(fBBH),
+ fRecorder->detachDrawableList(), fCullRect);
+
+ return drawable;
+}
diff --git a/gfx/skia/skia/src/core/SkPixelRef.cpp b/gfx/skia/skia/src/core/SkPixelRef.cpp
new file mode 100644
index 0000000000..44d8542a97
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPixelRef.cpp
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPixelRef.h"
+#include "include/private/base/SkMutex.h"
+#include "src/core/SkBitmapCache.h"
+#include "src/core/SkNextID.h"
+#include "src/core/SkPixelRefPriv.h"
+#include "src/core/SkTraceEvent.h"
+
+#include <atomic>
+
+uint32_t SkNextID::ImageID() {
+ // We never set the low bit.... see SkPixelRef::genIDIsUnique().
+ static std::atomic<uint32_t> nextID{2};
+
+ uint32_t id;
+ do {
+ id = nextID.fetch_add(2, std::memory_order_relaxed);
+ } while (id == 0);
+ return id;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPixelRef::SkPixelRef(int width, int height, void* pixels, size_t rowBytes)
+ : fWidth(width)
+ , fHeight(height)
+ , fPixels(pixels)
+ , fRowBytes(rowBytes)
+ , fAddedToCache(false)
+{
+ this->needsNewGenID();
+ fMutability = kMutable;
+}
+
+SkPixelRef::~SkPixelRef() {
+ this->callGenIDChangeListeners();
+}
+
+// This is undefined if there are clients in-flight trying to use us
+void SkPixelRef::android_only_reset(int width, int height, size_t rowBytes) {
+ fWidth = width;
+ fHeight = height;
+ fRowBytes = rowBytes;
+ // note: we do not change fPixels
+
+ // conservative, since its possible the "new" settings are the same as the old.
+ this->notifyPixelsChanged();
+}
+
+void SkPixelRef::needsNewGenID() {
+ fTaggedGenID.store(0);
+ SkASSERT(!this->genIDIsUnique()); // This method isn't threadsafe, so the assert should be fine.
+}
+
+uint32_t SkPixelRef::getGenerationID() const {
+ uint32_t id = fTaggedGenID.load();
+ if (0 == id) {
+ uint32_t next = SkNextID::ImageID() | 1u;
+ if (fTaggedGenID.compare_exchange_strong(id, next)) {
+ id = next; // There was no race or we won the race. fTaggedGenID is next now.
+ } else {
+ // We lost a race to set fTaggedGenID. compare_exchange() filled id with the winner.
+ }
+ // We can't quite SkASSERT(this->genIDIsUnique()). It could be non-unique
+ // if we got here via the else path (pretty unlikely, but possible).
+ }
+ return id & ~1u; // Mask off bottom unique bit.
+}
+
+void SkPixelRef::addGenIDChangeListener(sk_sp<SkIDChangeListener> listener) {
+ if (!listener || !this->genIDIsUnique()) {
+ // No point in tracking this if we're not going to call it.
+ return;
+ }
+ SkASSERT(!listener->shouldDeregister());
+ fGenIDChangeListeners.add(std::move(listener));
+}
+
+// we need to be called *before* the genID gets changed or zerod
+void SkPixelRef::callGenIDChangeListeners() {
+ // We don't invalidate ourselves if we think another SkPixelRef is sharing our genID.
+ if (this->genIDIsUnique()) {
+ fGenIDChangeListeners.changed();
+ if (fAddedToCache.exchange(false)) {
+ SkNotifyBitmapGenIDIsStale(this->getGenerationID());
+ }
+ } else {
+ // Listeners get at most one shot, so even though these weren't triggered or not, blow them
+ // away.
+ fGenIDChangeListeners.reset();
+ }
+}
+
+void SkPixelRef::notifyPixelsChanged() {
+#ifdef SK_DEBUG
+ if (this->isImmutable()) {
+ SkDebugf("========== notifyPixelsChanged called on immutable pixelref");
+ }
+#endif
+ this->callGenIDChangeListeners();
+ this->needsNewGenID();
+}
+
+void SkPixelRef::setImmutable() {
+ fMutability = kImmutable;
+}
+
+void SkPixelRef::setImmutableWithID(uint32_t genID) {
+ /*
+ * We are forcing the genID to match an external value. The caller must ensure that this
+ * value does not conflict with other content.
+ *
+ * One use is to force this pixelref's id to match an SkImage's id
+ */
+ fMutability = kImmutable;
+ fTaggedGenID.store(genID);
+}
+
+void SkPixelRef::setTemporarilyImmutable() {
+ SkASSERT(fMutability != kImmutable);
+ fMutability = kTemporarilyImmutable;
+}
+
+void SkPixelRef::restoreMutability() {
+ SkASSERT(fMutability != kImmutable);
+ fMutability = kMutable;
+}
+
+sk_sp<SkPixelRef> SkMakePixelRefWithProc(int width, int height, size_t rowBytes, void* addr,
+ void (*releaseProc)(void* addr, void* ctx), void* ctx) {
+ SkASSERT(width >= 0 && height >= 0);
+ if (nullptr == releaseProc) {
+ return sk_make_sp<SkPixelRef>(width, height, addr, rowBytes);
+ }
+ struct PixelRef final : public SkPixelRef {
+ void (*fReleaseProc)(void*, void*);
+ void* fReleaseProcContext;
+ PixelRef(int w, int h, void* s, size_t r, void (*proc)(void*, void*), void* ctx)
+ : SkPixelRef(w, h, s, r), fReleaseProc(proc), fReleaseProcContext(ctx) {}
+ ~PixelRef() override { fReleaseProc(this->pixels(), fReleaseProcContext); }
+ };
+ return sk_sp<SkPixelRef>(new PixelRef(width, height, addr, rowBytes, releaseProc, ctx));
+}
diff --git a/gfx/skia/skia/src/core/SkPixelRefPriv.h b/gfx/skia/skia/src/core/SkPixelRefPriv.h
new file mode 100644
index 0000000000..6198ba9091
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPixelRefPriv.h
@@ -0,0 +1,27 @@
+// Copyright 2019 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+
+#ifndef SkPixelRefPriv_DEFINED
+#define SkPixelRefPriv_DEFINED
+
+#include "include/core/SkRefCnt.h"
+
+#include <cstddef>
+
+class SkPixelRef;
+
+/**
+ * Return a new SkMallocPixelRef with the provided pixel storage and
+ * rowBytes. On destruction, ReleaseProc will be called.
+ *
+ * If ReleaseProc is NULL, the pixels will never be released. This
+ * can be useful if the pixels were stack allocated. However, such an
+ * SkMallocPixelRef must not live beyond its pixels (e.g. by copying
+ * an SkBitmap pointing to it, or drawing to an SkPicture).
+ *
+ * Returns NULL on failure.
+ */
+sk_sp<SkPixelRef> SkMakePixelRefWithProc(int w, int h, size_t rowBytes, void* addr,
+ void (*releaseProc)(void* addr, void* ctx), void* ctx);
+
+#endif // SkPixelRefPriv_DEFINED
diff --git a/gfx/skia/skia/src/core/SkPixmap.cpp b/gfx/skia/skia/src/core/SkPixmap.cpp
new file mode 100644
index 0000000000..73a79fdb8f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPixmap.cpp
@@ -0,0 +1,745 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPixmap.h"
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkColorPriv.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkUnPreMultiply.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkTPin.h"
+#include "src/base/SkHalf.h"
+#include "src/base/SkVx.h"
+#include "src/core/SkConvertPixels.h"
+#include "src/core/SkImageInfoPriv.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkReadPixelsRec.h"
+#include "src/core/SkSwizzlePriv.h"
+#include "src/opts/SkUtils_opts.h"
+
+#include <cstring>
+#include <iterator>
+#include <utility>
+
+void SkPixmap::reset() {
+ fPixels = nullptr;
+ fRowBytes = 0;
+ fInfo = SkImageInfo::MakeUnknown();
+}
+
+void SkPixmap::reset(const SkImageInfo& info, const void* addr, size_t rowBytes) {
+ if (addr) {
+ SkASSERT(info.validRowBytes(rowBytes));
+ }
+ fPixels = addr;
+ fRowBytes = rowBytes;
+ fInfo = info;
+}
+
+bool SkPixmap::reset(const SkMask& src) {
+ if (SkMask::kA8_Format == src.fFormat) {
+ this->reset(SkImageInfo::MakeA8(src.fBounds.width(), src.fBounds.height()),
+ src.fImage, src.fRowBytes);
+ return true;
+ }
+ this->reset();
+ return false;
+}
+
+void SkPixmap::setColorSpace(sk_sp<SkColorSpace> cs) {
+ fInfo = fInfo.makeColorSpace(std::move(cs));
+}
+
+SkColorSpace* SkPixmap::colorSpace() const { return fInfo.colorSpace(); }
+
+sk_sp<SkColorSpace> SkPixmap::refColorSpace() const { return fInfo.refColorSpace(); }
+
+bool SkPixmap::extractSubset(SkPixmap* result, const SkIRect& subset) const {
+ SkIRect srcRect, r;
+ srcRect.setWH(this->width(), this->height());
+ if (!r.intersect(srcRect, subset)) {
+ return false; // r is empty (i.e. no intersection)
+ }
+
+ // If the upper left of the rectangle was outside the bounds of this SkBitmap, we should have
+ // exited above.
+ SkASSERT(static_cast<unsigned>(r.fLeft) < static_cast<unsigned>(this->width()));
+ SkASSERT(static_cast<unsigned>(r.fTop) < static_cast<unsigned>(this->height()));
+
+ const void* pixels = nullptr;
+ if (fPixels) {
+ const size_t bpp = fInfo.bytesPerPixel();
+ pixels = (const uint8_t*)fPixels + r.fTop * fRowBytes + r.fLeft * bpp;
+ }
+ result->reset(fInfo.makeDimensions(r.size()), pixels, fRowBytes);
+ return true;
+}
+
+// This is the same as SkPixmap::addr(x,y), but this version gets inlined, while the public
+// method does not. Perhaps we could bloat it so it can be inlined, but that would grow code-size
+// everywhere, instead of just here (on behalf of getAlphaf()).
+static const void* fast_getaddr(const SkPixmap& pm, int x, int y) {
+ x <<= SkColorTypeShiftPerPixel(pm.colorType());
+ return static_cast<const char*>(pm.addr()) + y * pm.rowBytes() + x;
+}
+
+float SkPixmap::getAlphaf(int x, int y) const {
+ SkASSERT(this->addr());
+ SkASSERT((unsigned)x < (unsigned)this->width());
+ SkASSERT((unsigned)y < (unsigned)this->height());
+
+ float value = 0;
+ const void* srcPtr = fast_getaddr(*this, x, y);
+
+ switch (this->colorType()) {
+ case kUnknown_SkColorType:
+ return 0;
+ case kGray_8_SkColorType:
+ case kR8G8_unorm_SkColorType:
+ case kR16G16_unorm_SkColorType:
+ case kR16G16_float_SkColorType:
+ case kRGB_565_SkColorType:
+ case kRGB_888x_SkColorType:
+ case kRGB_101010x_SkColorType:
+ case kBGR_101010x_SkColorType:
+ case kBGR_101010x_XR_SkColorType:
+ case kR8_unorm_SkColorType:
+ return 1;
+ case kAlpha_8_SkColorType:
+ value = static_cast<const uint8_t*>(srcPtr)[0] * (1.0f/255);
+ break;
+ case kA16_unorm_SkColorType:
+ value = static_cast<const uint16_t*>(srcPtr)[0] * (1.0f/65535);
+ break;
+ case kA16_float_SkColorType: {
+ SkHalf half = static_cast<const SkHalf*>(srcPtr)[0];
+ value = SkHalfToFloat(half);
+ break;
+ }
+ case kARGB_4444_SkColorType: {
+ uint16_t u16 = static_cast<const uint16_t*>(srcPtr)[0];
+ value = SkGetPackedA4444(u16) * (1.0f/15);
+ break;
+ }
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ case kSRGBA_8888_SkColorType:
+ value = static_cast<const uint8_t*>(srcPtr)[3] * (1.0f/255);
+ break;
+ case kRGBA_1010102_SkColorType:
+ case kBGRA_1010102_SkColorType: {
+ uint32_t u32 = static_cast<const uint32_t*>(srcPtr)[0];
+ value = (u32 >> 30) * (1.0f/3);
+ break;
+ }
+ case kR16G16B16A16_unorm_SkColorType: {
+ uint64_t u64 = static_cast<const uint64_t*>(srcPtr)[0];
+ value = (u64 >> 48) * (1.0f/65535);
+ break;
+ }
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType: {
+ uint64_t px;
+ memcpy(&px, srcPtr, sizeof(px));
+ value = SkHalfToFloat_finite_ftz(px)[3];
+ break;
+ }
+ case kRGBA_F32_SkColorType:
+ value = static_cast<const float*>(srcPtr)[3];
+ break;
+ }
+ return value;
+}
+
+bool SkPixmap::readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRB,
+ int x, int y) const {
+ if (!SkImageInfoValidConversion(dstInfo, fInfo)) {
+ return false;
+ }
+
+ SkReadPixelsRec rec(dstInfo, dstPixels, dstRB, x, y);
+ if (!rec.trim(fInfo.width(), fInfo.height())) {
+ return false;
+ }
+
+ const void* srcPixels = this->addr(rec.fX, rec.fY);
+ const SkImageInfo srcInfo = fInfo.makeDimensions(rec.fInfo.dimensions());
+ return SkConvertPixels(rec.fInfo, rec.fPixels, rec.fRowBytes, srcInfo, srcPixels,
+ this->rowBytes());
+}
+
+SkColor SkPixmap::getColor(int x, int y) const {
+ SkASSERT(this->addr());
+ SkASSERT((unsigned)x < (unsigned)this->width());
+ SkASSERT((unsigned)y < (unsigned)this->height());
+
+ const bool needsUnpremul = (kPremul_SkAlphaType == fInfo.alphaType());
+ auto toColor = [needsUnpremul](uint32_t maybePremulColor) {
+ return needsUnpremul ? SkUnPreMultiply::PMColorToColor(maybePremulColor)
+ : SkSwizzle_BGRA_to_PMColor(maybePremulColor);
+ };
+
+ switch (this->colorType()) {
+ case kGray_8_SkColorType: {
+ uint8_t value = *this->addr8(x, y);
+ return SkColorSetRGB(value, value, value);
+ }
+ case kR8_unorm_SkColorType: {
+ uint8_t value = *this->addr8(x, y);
+ return SkColorSetRGB(value, 0, 0);
+ }
+ case kAlpha_8_SkColorType: {
+ return SkColorSetA(0, *this->addr8(x, y));
+ }
+ case kA16_unorm_SkColorType: {
+ uint16_t value = *this->addr16(x, y);
+ return SkColorSetA(0, value * (255 / 65535.0f));
+ }
+ case kA16_float_SkColorType: {
+ SkHalf value = *this->addr16(x, y);
+ return SkColorSetA(0, 255 * SkHalfToFloat(value));
+ }
+ case kRGB_565_SkColorType: {
+ return SkPixel16ToColor(*this->addr16(x, y));
+ }
+ case kARGB_4444_SkColorType: {
+ uint16_t value = *this->addr16(x, y);
+ SkPMColor c = SkPixel4444ToPixel32(value);
+ return toColor(c);
+ }
+ case kR8G8_unorm_SkColorType: {
+ uint16_t value = *this->addr16(x, y);
+ return (uint32_t)( ((value >> 0) & 0xff) ) << 16
+ | (uint32_t)( ((value >> 8) & 0xff) ) << 8
+ | 0xff000000;
+ }
+ case kR16G16_unorm_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+ return (uint32_t)( ((value >> 0) & 0xffff) * (255/65535.0f) ) << 16
+ | (uint32_t)( ((value >> 16) & 0xffff) * (255/65535.0f) ) << 8
+ | 0xff000000;
+ }
+ case kR16G16_float_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+ uint32_t r = 255 * SkHalfToFloat((value >> 0) & 0xffff);
+ uint32_t g = 255 * SkHalfToFloat((value >> 16) & 0xffff);
+ return (r << 16) | (g << 8) | 0xff000000;
+ }
+ case kRGB_888x_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+ return SkSwizzle_RB(value | 0xff000000);
+ }
+ case kBGRA_8888_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+ SkPMColor c = SkSwizzle_BGRA_to_PMColor(value);
+ return toColor(c);
+ }
+ case kRGBA_8888_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+ SkPMColor c = SkSwizzle_RGBA_to_PMColor(value);
+ return toColor(c);
+ }
+ case kSRGBA_8888_SkColorType: {
+ auto srgb_to_linear = [](float x) {
+ return (x <= 0.04045f) ? x * (1 / 12.92f)
+ : sk_float_pow(x * (1 / 1.055f) + (0.055f / 1.055f), 2.4f);
+ };
+
+ uint32_t value = *this->addr32(x, y);
+ float r = ((value >> 0) & 0xff) * (1/255.0f),
+ g = ((value >> 8) & 0xff) * (1/255.0f),
+ b = ((value >> 16) & 0xff) * (1/255.0f),
+ a = ((value >> 24) & 0xff) * (1/255.0f);
+ r = srgb_to_linear(r);
+ g = srgb_to_linear(g);
+ b = srgb_to_linear(b);
+ if (a != 0 && needsUnpremul) {
+ r = SkTPin(r/a, 0.0f, 1.0f);
+ g = SkTPin(g/a, 0.0f, 1.0f);
+ b = SkTPin(b/a, 0.0f, 1.0f);
+ }
+ return (uint32_t)( r * 255.0f ) << 16
+ | (uint32_t)( g * 255.0f ) << 8
+ | (uint32_t)( b * 255.0f ) << 0
+ | (uint32_t)( a * 255.0f ) << 24;
+ }
+ case kRGB_101010x_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+ // Convert 10-bit rgb to 8-bit bgr, and mask in 0xff alpha at the top.
+ return (uint32_t)( ((value >> 0) & 0x3ff) * (255/1023.0f) ) << 16
+ | (uint32_t)( ((value >> 10) & 0x3ff) * (255/1023.0f) ) << 8
+ | (uint32_t)( ((value >> 20) & 0x3ff) * (255/1023.0f) ) << 0
+ | 0xff000000;
+ }
+ case kBGR_101010x_XR_SkColorType: {
+ SkASSERT(false);
+ return 0;
+ }
+ case kBGR_101010x_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+ // Convert 10-bit bgr to 8-bit bgr, and mask in 0xff alpha at the top.
+ return (uint32_t)( ((value >> 0) & 0x3ff) * (255/1023.0f) ) << 0
+ | (uint32_t)( ((value >> 10) & 0x3ff) * (255/1023.0f) ) << 8
+ | (uint32_t)( ((value >> 20) & 0x3ff) * (255/1023.0f) ) << 16
+ | 0xff000000;
+ }
+ case kRGBA_1010102_SkColorType:
+ case kBGRA_1010102_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+
+ float r = ((value >> 0) & 0x3ff) * (1/1023.0f),
+ g = ((value >> 10) & 0x3ff) * (1/1023.0f),
+ b = ((value >> 20) & 0x3ff) * (1/1023.0f),
+ a = ((value >> 30) & 0x3 ) * (1/ 3.0f);
+ if (this->colorType() == kBGRA_1010102_SkColorType) {
+ std::swap(r,b);
+ }
+ if (a != 0 && needsUnpremul) {
+ r = SkTPin(r/a, 0.0f, 1.0f);
+ g = SkTPin(g/a, 0.0f, 1.0f);
+ b = SkTPin(b/a, 0.0f, 1.0f);
+ }
+ return (uint32_t)( r * 255.0f ) << 16
+ | (uint32_t)( g * 255.0f ) << 8
+ | (uint32_t)( b * 255.0f ) << 0
+ | (uint32_t)( a * 255.0f ) << 24;
+ }
+ case kR16G16B16A16_unorm_SkColorType: {
+ uint64_t value = *this->addr64(x, y);
+
+ float r = ((value ) & 0xffff) * (1/65535.0f),
+ g = ((value >> 16) & 0xffff) * (1/65535.0f),
+ b = ((value >> 32) & 0xffff) * (1/65535.0f),
+ a = ((value >> 48) & 0xffff) * (1/65535.0f);
+ if (a != 0 && needsUnpremul) {
+ r *= (1.0f/a);
+ g *= (1.0f/a);
+ b *= (1.0f/a);
+ }
+ return (uint32_t)( r * 255.0f ) << 16
+ | (uint32_t)( g * 255.0f ) << 8
+ | (uint32_t)( b * 255.0f ) << 0
+ | (uint32_t)( a * 255.0f ) << 24;
+ }
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType: {
+ const uint64_t* addr =
+ (const uint64_t*)fPixels + y * (fRowBytes >> 3) + x;
+ skvx::float4 p4 = SkHalfToFloat_finite_ftz(*addr);
+ if (p4[3] && needsUnpremul) {
+ float inva = 1 / p4[3];
+ p4 = p4 * skvx::float4(inva, inva, inva, 1);
+ }
+ // p4 is RGBA, but we want BGRA, so we need to swap next
+ return Sk4f_toL32(swizzle_rb(p4));
+ }
+ case kRGBA_F32_SkColorType: {
+ const float* rgba =
+ (const float*)fPixels + 4*y*(fRowBytes >> 4) + 4*x;
+ skvx::float4 p4 = skvx::float4::Load(rgba);
+ // From here on, just like F16:
+ if (p4[3] && needsUnpremul) {
+ float inva = 1 / p4[3];
+ p4 = p4 * skvx::float4(inva, inva, inva, 1);
+ }
+ // p4 is RGBA, but we want BGRA, so we need to swap next
+ return Sk4f_toL32(swizzle_rb(p4));
+ }
+ case kUnknown_SkColorType:
+ break;
+ }
+ SkDEBUGFAIL("");
+ return SkColorSetARGB(0, 0, 0, 0);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkColor4f SkPixmap::getColor4f(int x, int y) const {
+ SkASSERT(this->addr());
+ SkASSERT((unsigned)x < (unsigned)this->width());
+ SkASSERT((unsigned)y < (unsigned)this->height());
+
+ const bool needsUnpremul = (kPremul_SkAlphaType == fInfo.alphaType());
+ auto toColor = [needsUnpremul](uint32_t maybePremulColor) {
+ return needsUnpremul ? SkUnPreMultiply::PMColorToColor(maybePremulColor)
+ : SkSwizzle_BGRA_to_PMColor(maybePremulColor);
+ };
+
+ switch (this->colorType()) {
+ case kGray_8_SkColorType: {
+ float value = *this->addr8(x, y) / 255.0f;
+ return SkColor4f{value, value, value, 1.0};
+ }
+ case kR8_unorm_SkColorType: {
+ float value = *this->addr8(x, y) / 255.0f;
+ return SkColor4f{value, 0.0f, 0.0f, 1.0f};
+ }
+ case kAlpha_8_SkColorType: {
+ float value = *this->addr8(x, y) / 255.0f;
+ return SkColor4f{0.0f, 0.0f, 0.0f, value};
+ }
+ case kA16_unorm_SkColorType: {
+ float value = *this->addr16(x, y) / 65535.0f;
+ return SkColor4f{0.0f, 0.0f, 0.0f, value};
+ }
+ case kA16_float_SkColorType: {
+ SkHalf value = *this->addr16(x, y);
+ return SkColor4f{0.0f, 0.0f, 0.0f, SkHalfToFloat(value)};
+ }
+ case kRGB_565_SkColorType: {
+ SkColor c = SkPixel16ToColor(*this->addr16(x, y));
+ return SkColor4f::FromColor(c);
+ }
+ case kARGB_4444_SkColorType: {
+ uint16_t value = *this->addr16(x, y);
+ SkPMColor c = SkPixel4444ToPixel32(value);
+ return SkColor4f::FromColor(toColor(c));
+ }
+ case kR8G8_unorm_SkColorType: {
+ uint16_t value = *this->addr16(x, y);
+ SkColor c = (uint32_t)(((value >> 0) & 0xff)) << 16 |
+ (uint32_t)(((value >> 8) & 0xff)) << 8 | 0xff000000;
+ return SkColor4f::FromColor(c);
+ }
+ case kR16G16_unorm_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+ SkColor c = (uint32_t)(((value >> 0) & 0xffff) * (255 / 65535.0f)) << 16 |
+ (uint32_t)(((value >> 16) & 0xffff) * (255 / 65535.0f)) << 8 | 0xff000000;
+ return SkColor4f::FromColor(c);
+ }
+ case kR16G16_float_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+ float r = SkHalfToFloat((value >> 0) & 0xffff);
+ float g = SkHalfToFloat((value >> 16) & 0xffff);
+ return SkColor4f{r, g, 0.0, 1.0};
+ }
+ case kRGB_888x_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+ SkColor c = SkSwizzle_RB(value | 0xff000000);
+ return SkColor4f::FromColor(c);
+ }
+ case kBGRA_8888_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+ SkPMColor c = SkSwizzle_BGRA_to_PMColor(value);
+ return SkColor4f::FromColor(toColor(c));
+ }
+ case kRGBA_8888_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+ SkPMColor c = SkSwizzle_RGBA_to_PMColor(value);
+ return SkColor4f::FromColor(toColor(c));
+ }
+ case kSRGBA_8888_SkColorType: {
+ auto srgb_to_linear = [](float x) {
+ return (x <= 0.04045f) ? x * (1 / 12.92f)
+ : sk_float_pow(x * (1 / 1.055f) + (0.055f / 1.055f), 2.4f);
+ };
+
+ uint32_t value = *this->addr32(x, y);
+ float r = ((value >> 0) & 0xff) * (1 / 255.0f),
+ g = ((value >> 8) & 0xff) * (1 / 255.0f),
+ b = ((value >> 16) & 0xff) * (1 / 255.0f),
+ a = ((value >> 24) & 0xff) * (1 / 255.0f);
+ r = srgb_to_linear(r);
+ g = srgb_to_linear(g);
+ b = srgb_to_linear(b);
+ if (a != 0 && needsUnpremul) {
+ r = SkTPin(r / a, 0.0f, 1.0f);
+ g = SkTPin(g / a, 0.0f, 1.0f);
+ b = SkTPin(b / a, 0.0f, 1.0f);
+ }
+ return SkColor4f{r, g, b, a};
+ }
+ case kBGR_101010x_XR_SkColorType: {
+ SkASSERT(false);
+ return {};
+ }
+ case kRGB_101010x_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+ // Convert 10-bit rgb to float rgb, and mask in 0xff alpha at the top.
+ float r = (uint32_t)((value >> 0) & 0x3ff) / (1023.0f);
+ float g = (uint32_t)((value >> 10) & 0x3ff) / (1023.0f);
+ float b = (uint32_t)((value >> 20) & 0x3ff) / (1023.0f);
+ float a = 1.0f;
+ return SkColor4f{r, g, b, a};
+ }
+ case kBGR_101010x_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+ // Convert 10-bit bgr to float rgb, and mask in 0xff alpha at the top.
+ float r = (uint32_t)((value >> 20) & 0x3ff) / (1023.0f);
+ float g = (uint32_t)((value >> 10) & 0x3ff) / (1023.0f);
+ float b = (uint32_t)((value >> 0) & 0x3ff) / (1023.0f);
+ float a = 1.0f;
+ return SkColor4f{r, g, b, a};
+ }
+ case kRGBA_1010102_SkColorType:
+ case kBGRA_1010102_SkColorType: {
+ uint32_t value = *this->addr32(x, y);
+
+ float r = ((value >> 0) & 0x3ff) * (1 / 1023.0f),
+ g = ((value >> 10) & 0x3ff) * (1 / 1023.0f),
+ b = ((value >> 20) & 0x3ff) * (1 / 1023.0f),
+ a = ((value >> 30) & 0x3) * (1 / 3.0f);
+ if (this->colorType() == kBGRA_1010102_SkColorType) {
+ std::swap(r, b);
+ }
+ if (a != 0 && needsUnpremul) {
+ r = SkTPin(r / a, 0.0f, 1.0f);
+ g = SkTPin(g / a, 0.0f, 1.0f);
+ b = SkTPin(b / a, 0.0f, 1.0f);
+ }
+ return SkColor4f{r, g, b, a};
+ }
+ case kR16G16B16A16_unorm_SkColorType: {
+ uint64_t value = *this->addr64(x, y);
+
+ float r = ((value)&0xffff) * (1 / 65535.0f),
+ g = ((value >> 16) & 0xffff) * (1 / 65535.0f),
+ b = ((value >> 32) & 0xffff) * (1 / 65535.0f),
+ a = ((value >> 48) & 0xffff) * (1 / 65535.0f);
+ if (a != 0 && needsUnpremul) {
+ r *= (1.0f / a);
+ g *= (1.0f / a);
+ b *= (1.0f / a);
+ }
+ return SkColor4f{r, g, b, a};
+ }
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType: {
+ const uint64_t* addr = (const uint64_t*)fPixels + y * (fRowBytes >> 3) + x;
+ skvx::float4 p4 = SkHalfToFloat_finite_ftz(*addr);
+ if (p4[3] && needsUnpremul) {
+ float inva = 1 / p4[3];
+ p4 = p4 * skvx::float4(inva, inva, inva, 1);
+ }
+ return SkColor4f{p4[0], p4[1], p4[2], p4[3]};
+ }
+ case kRGBA_F32_SkColorType: {
+ const float* rgba = (const float*)fPixels + 4 * y * (fRowBytes >> 4) + 4 * x;
+ skvx::float4 p4 = skvx::float4::Load(rgba);
+ // From here on, just like F16:
+ if (p4[3] && needsUnpremul) {
+ float inva = 1 / p4[3];
+ p4 = p4 * skvx::float4(inva, inva, inva, 1);
+ }
+ return SkColor4f{p4[0], p4[1], p4[2], p4[3]};
+ }
+ case kUnknown_SkColorType:
+ break;
+ }
+ SkDEBUGFAIL("");
+ return SkColors::kTransparent;
+}
+
+bool SkPixmap::computeIsOpaque() const {
+ const int height = this->height();
+ const int width = this->width();
+
+ switch (this->colorType()) {
+ case kAlpha_8_SkColorType: {
+ unsigned a = 0xFF;
+ for (int y = 0; y < height; ++y) {
+ const uint8_t* row = this->addr8(0, y);
+ for (int x = 0; x < width; ++x) {
+ a &= row[x];
+ }
+ if (0xFF != a) {
+ return false;
+ }
+ }
+ return true;
+ }
+ case kA16_unorm_SkColorType: {
+ unsigned a = 0xFFFF;
+ for (int y = 0; y < height; ++y) {
+ const uint16_t* row = this->addr16(0, y);
+ for (int x = 0; x < width; ++x) {
+ a &= row[x];
+ }
+ if (0xFFFF != a) {
+ return false;
+ }
+ }
+ return true;
+ }
+ case kA16_float_SkColorType: {
+ for (int y = 0; y < height; ++y) {
+ const SkHalf* row = this->addr16(0, y);
+ for (int x = 0; x < width; ++x) {
+ if (row[x] < SK_Half1) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+ case kRGB_565_SkColorType:
+ case kGray_8_SkColorType:
+ case kR8G8_unorm_SkColorType:
+ case kR16G16_unorm_SkColorType:
+ case kR16G16_float_SkColorType:
+ case kRGB_888x_SkColorType:
+ case kRGB_101010x_SkColorType:
+ case kBGR_101010x_SkColorType:
+ case kBGR_101010x_XR_SkColorType:
+ case kR8_unorm_SkColorType:
+ return true;
+ case kARGB_4444_SkColorType: {
+ unsigned c = 0xFFFF;
+ for (int y = 0; y < height; ++y) {
+ const SkPMColor16* row = this->addr16(0, y);
+ for (int x = 0; x < width; ++x) {
+ c &= row[x];
+ }
+ if (0xF != SkGetPackedA4444(c)) {
+ return false;
+ }
+ }
+ return true;
+ }
+ case kBGRA_8888_SkColorType:
+ case kRGBA_8888_SkColorType:
+ case kSRGBA_8888_SkColorType: {
+ SkPMColor c = (SkPMColor)~0;
+ for (int y = 0; y < height; ++y) {
+ const SkPMColor* row = this->addr32(0, y);
+ for (int x = 0; x < width; ++x) {
+ c &= row[x];
+ }
+ if (0xFF != SkGetPackedA32(c)) {
+ return false;
+ }
+ }
+ return true;
+ }
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType: {
+ const SkHalf* row = (const SkHalf*)this->addr();
+ for (int y = 0; y < height; ++y) {
+ for (int x = 0; x < width; ++x) {
+ if (row[4 * x + 3] < SK_Half1) {
+ return false;
+ }
+ }
+ row += this->rowBytes() >> 1;
+ }
+ return true;
+ }
+ case kRGBA_F32_SkColorType: {
+ const float* row = (const float*)this->addr();
+ for (int y = 0; y < height; ++y) {
+ for (int x = 0; x < width; ++x) {
+ if (row[4 * x + 3] < 1.0f) {
+ return false;
+ }
+ }
+ row += this->rowBytes() >> 2;
+ }
+ return true;
+ }
+ case kRGBA_1010102_SkColorType:
+ case kBGRA_1010102_SkColorType: {
+ uint32_t c = ~0;
+ for (int y = 0; y < height; ++y) {
+ const uint32_t* row = this->addr32(0, y);
+ for (int x = 0; x < width; ++x) {
+ c &= row[x];
+ }
+ if (0b11 != c >> 30) {
+ return false;
+ }
+ }
+ return true;
+ }
+ case kR16G16B16A16_unorm_SkColorType: {
+ uint16_t acc = 0xFFFF;
+ for (int y = 0; y < height; ++y) {
+ const uint64_t* row = this->addr64(0, y);
+ for (int x = 0; x < width; ++x) {
+ acc &= (row[x] >> 48);
+ }
+ if (0xFFFF != acc) {
+ return false;
+ }
+ }
+ return true;
+ }
+ case kUnknown_SkColorType:
+ SkDEBUGFAIL("");
+ break;
+ }
+ return false;
+}
+
+bool SkPixmap::erase(SkColor color, const SkIRect& subset) const {
+ return this->erase(SkColor4f::FromColor(color), &subset);
+}
+
+bool SkPixmap::erase(const SkColor4f& color, SkColorSpace* srcCS, const SkIRect* subset) const {
+ if (this->colorType() == kUnknown_SkColorType) {
+ return false;
+ }
+
+ SkIRect clip = this->bounds();
+ if (subset && !clip.intersect(*subset)) {
+ return false; // is this check really needed (i.e. to return false in this case?)
+ }
+
+ // Erase is meant to simulate drawing in kSRC mode -- which means we have to convert out
+ // unpremul input into premul (which we always do when we draw).
+ const auto c = color.premul();
+
+ const auto dst = SkImageInfo::Make(1, 1, this->colorType(), this->alphaType(),
+ sk_ref_sp(this->colorSpace()));
+ const auto src = SkImageInfo::Make(1, 1, kRGBA_F32_SkColorType, kPremul_SkAlphaType,
+ sk_ref_sp(srcCS));
+
+ uint64_t dstPixel[2] = {}; // be large enough for our widest config (F32 x 4)
+ SkASSERT((size_t)dst.bytesPerPixel() <= sizeof(dstPixel));
+
+ if (!SkConvertPixels(dst, dstPixel, sizeof(dstPixel), src, &c, sizeof(c))) {
+ return false;
+ }
+
+ if (this->colorType() == kRGBA_F32_SkColorType) {
+ SkColor4f dstColor;
+ memcpy(&dstColor, dstPixel, sizeof(dstColor));
+ for (int y = clip.fTop; y < clip.fBottom; ++y) {
+ SkColor4f* addr = (SkColor4f*)this->writable_addr(clip.fLeft, y);
+ SK_OPTS_NS::memsetT(addr, dstColor, clip.width());
+ }
+ } else {
+ using MemSet = void(*)(void*, uint64_t c, int count);
+ const MemSet procs[] = {
+ [](void* addr, uint64_t c, int count) {
+ SkASSERT(c == (uint8_t)c);
+ SK_OPTS_NS::memsetT((uint8_t*)addr, (uint8_t)c, count);
+ },
+ [](void* addr, uint64_t c, int count) {
+ SkASSERT(c == (uint16_t)c);
+ SK_OPTS_NS::memsetT((uint16_t*)addr, (uint16_t)c, count);
+ },
+ [](void* addr, uint64_t c, int count) {
+ SkASSERT(c == (uint32_t)c);
+ SK_OPTS_NS::memsetT((uint32_t*)addr, (uint32_t)c, count);
+ },
+ [](void* addr, uint64_t c, int count) {
+ SK_OPTS_NS::memsetT((uint64_t*)addr, c, count);
+ },
+ };
+
+ unsigned shift = SkColorTypeShiftPerPixel(this->colorType());
+ SkASSERT(shift < std::size(procs));
+ auto proc = procs[shift];
+
+ for (int y = clip.fTop; y < clip.fBottom; ++y) {
+ proc(this->writable_addr(clip.fLeft, y), dstPixel[0], clip.width());
+ }
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkPixmapDraw.cpp b/gfx/skia/skia/src/core/SkPixmapDraw.cpp
new file mode 100644
index 0000000000..ff4c7ba379
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPixmapDraw.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * This file contains implementations of SkPixmap methods which require the CPU backend.
+ */
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkSurface.h"
+#include "include/core/SkTileMode.h"
+#include "src/shaders/SkImageShader.h"
+
+#include <utility>
+
+struct SkSamplingOptions;
+
+bool SkPixmap::scalePixels(const SkPixmap& actualDst, const SkSamplingOptions& sampling) const {
+ // We may need to tweak how we interpret these just a little below, so we make copies.
+ SkPixmap src = *this,
+ dst = actualDst;
+
+ // Can't do anthing with empty src or dst
+ if (src.width() <= 0 || src.height() <= 0 ||
+ dst.width() <= 0 || dst.height() <= 0) {
+ return false;
+ }
+
+ // no scaling involved?
+ if (src.width() == dst.width() && src.height() == dst.height()) {
+ return src.readPixels(dst);
+ }
+
+ // If src and dst are both unpremul, we'll fake the source out to appear as if premul,
+ // and mark the destination as opaque. This odd combination allows us to scale unpremul
+ // pixels without ever premultiplying them (perhaps losing information in the color channels).
+ // This is an idiosyncratic feature of scalePixels(), and is tested by scalepixels_unpremul GM.
+ bool clampAsIfUnpremul = false;
+ if (src.alphaType() == kUnpremul_SkAlphaType &&
+ dst.alphaType() == kUnpremul_SkAlphaType) {
+ src.reset(src.info().makeAlphaType(kPremul_SkAlphaType), src.addr(), src.rowBytes());
+ dst.reset(dst.info().makeAlphaType(kOpaque_SkAlphaType), dst.addr(), dst.rowBytes());
+
+ // We'll need to tell the image shader to clamp to [0,1] instead of the
+ // usual [0,a] when using a bicubic scaling (kHigh_SkFilterQuality).
+ clampAsIfUnpremul = true;
+ }
+
+ SkBitmap bitmap;
+ if (!bitmap.installPixels(src)) {
+ return false;
+ }
+ bitmap.setImmutable(); // Don't copy when we create an image.
+
+ SkMatrix scale = SkMatrix::RectToRect(SkRect::Make(src.bounds()), SkRect::Make(dst.bounds()));
+
+ sk_sp<SkShader> shader = SkImageShader::Make(bitmap.asImage(),
+ SkTileMode::kClamp,
+ SkTileMode::kClamp,
+ sampling,
+ &scale,
+ clampAsIfUnpremul);
+
+ sk_sp<SkSurface> surface = SkSurface::MakeRasterDirect(dst.info(),
+ dst.writable_addr(),
+ dst.rowBytes());
+ if (!shader || !surface) {
+ return false;
+ }
+
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kSrc);
+ paint.setShader(std::move(shader));
+ surface->getCanvas()->drawPaint(paint);
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkPoint.cpp b/gfx/skia/skia/src/core/SkPoint.cpp
new file mode 100644
index 0000000000..8cf6499a7b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPoint.cpp
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "src/core/SkPointPriv.h"
+
+#include <cmath>
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkPoint::scale(SkScalar scale, SkPoint* dst) const {
+ SkASSERT(dst);
+ dst->set(fX * scale, fY * scale);
+}
+
+bool SkPoint::normalize() {
+ return this->setLength(fX, fY, SK_Scalar1);
+}
+
+bool SkPoint::setNormalize(SkScalar x, SkScalar y) {
+ return this->setLength(x, y, SK_Scalar1);
+}
+
+bool SkPoint::setLength(SkScalar length) {
+ return this->setLength(fX, fY, length);
+}
+
+/*
+ * We have to worry about 2 tricky conditions:
+ * 1. underflow of mag2 (compared against nearlyzero^2)
+ * 2. overflow of mag2 (compared w/ isfinite)
+ *
+ * If we underflow, we return false. If we overflow, we compute again using
+ * doubles, which is much slower (3x in a desktop test) but will not overflow.
+ */
+template <bool use_rsqrt> bool set_point_length(SkPoint* pt, float x, float y, float length,
+ float* orig_length = nullptr) {
+ SkASSERT(!use_rsqrt || (orig_length == nullptr));
+
+ // our mag2 step overflowed to infinity, so use doubles instead.
+ // much slower, but needed when x or y are very large, other wise we
+ // divide by inf. and return (0,0) vector.
+ double xx = x;
+ double yy = y;
+ double dmag = sqrt(xx * xx + yy * yy);
+ double dscale = sk_ieee_double_divide(length, dmag);
+ x *= dscale;
+ y *= dscale;
+ // check if we're not finite, or we're zero-length
+ if (!sk_float_isfinite(x) || !sk_float_isfinite(y) || (x == 0 && y == 0)) {
+ pt->set(0, 0);
+ return false;
+ }
+ float mag = 0;
+ if (orig_length) {
+ mag = sk_double_to_float(dmag);
+ }
+ pt->set(x, y);
+ if (orig_length) {
+ *orig_length = mag;
+ }
+ return true;
+}
+
+SkScalar SkPoint::Normalize(SkPoint* pt) {
+ float mag;
+ if (set_point_length<false>(pt, pt->fX, pt->fY, 1.0f, &mag)) {
+ return mag;
+ }
+ return 0;
+}
+
+SkScalar SkPoint::Length(SkScalar dx, SkScalar dy) {
+ float mag2 = dx * dx + dy * dy;
+ if (SkScalarIsFinite(mag2)) {
+ return sk_float_sqrt(mag2);
+ } else {
+ double xx = dx;
+ double yy = dy;
+ return sk_double_to_float(sqrt(xx * xx + yy * yy));
+ }
+}
+
+bool SkPoint::setLength(float x, float y, float length) {
+ return set_point_length<false>(this, x, y, length);
+}
+
+bool SkPointPriv::SetLengthFast(SkPoint* pt, float length) {
+ return set_point_length<true>(pt, pt->fX, pt->fY, length);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkScalar SkPointPriv::DistanceToLineBetweenSqd(const SkPoint& pt, const SkPoint& a,
+ const SkPoint& b,
+ Side* side) {
+
+ SkVector u = b - a;
+ SkVector v = pt - a;
+
+ SkScalar uLengthSqd = LengthSqd(u);
+ SkScalar det = u.cross(v);
+ if (side) {
+ SkASSERT(-1 == kLeft_Side &&
+ 0 == kOn_Side &&
+ 1 == kRight_Side);
+ *side = (Side) SkScalarSignAsInt(det);
+ }
+ SkScalar temp = sk_ieee_float_divide(det, uLengthSqd);
+ temp *= det;
+ // It's possible we have a degenerate line vector, or we're so far away it looks degenerate
+ // In this case, return squared distance to point A.
+ if (!SkScalarIsFinite(temp)) {
+ return LengthSqd(v);
+ }
+ return temp;
+}
+
+SkScalar SkPointPriv::DistanceToLineSegmentBetweenSqd(const SkPoint& pt, const SkPoint& a,
+ const SkPoint& b) {
+ // See comments to distanceToLineBetweenSqd. If the projection of c onto
+ // u is between a and b then this returns the same result as that
+ // function. Otherwise, it returns the distance to the closer of a and
+ // b. Let the projection of v onto u be v'. There are three cases:
+ // 1. v' points opposite to u. c is not between a and b and is closer
+ // to a than b.
+ // 2. v' points along u and has magnitude less than y. c is between
+ // a and b and the distance to the segment is the same as distance
+ // to the line ab.
+ // 3. v' points along u and has greater magnitude than u. c is not
+ // not between a and b and is closer to b than a.
+ // v' = (u dot v) * u / |u|. So if (u dot v)/|u| is less than zero we're
+ // in case 1. If (u dot v)/|u| is > |u| we are in case 3. Otherwise
+ // we're in case 2. We actually compare (u dot v) to 0 and |u|^2 to
+ // avoid a sqrt to compute |u|.
+
+ SkVector u = b - a;
+ SkVector v = pt - a;
+
+ SkScalar uLengthSqd = LengthSqd(u);
+ SkScalar uDotV = SkPoint::DotProduct(u, v);
+
+ // closest point is point A
+ if (uDotV <= 0) {
+ return LengthSqd(v);
+ // closest point is point B
+ } else if (uDotV > uLengthSqd) {
+ return DistanceToSqd(b, pt);
+ // closest point is inside segment
+ } else {
+ SkScalar det = u.cross(v);
+ SkScalar temp = sk_ieee_float_divide(det, uLengthSqd);
+ temp *= det;
+ // It's possible we have a degenerate segment, or we're so far away it looks degenerate
+ // In this case, return squared distance to point A.
+ if (!SkScalarIsFinite(temp)) {
+ return LengthSqd(v);
+ }
+ return temp;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkPoint3.cpp b/gfx/skia/skia/src/core/SkPoint3.cpp
new file mode 100644
index 0000000000..901e90ee6f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPoint3.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPoint3.h"
+
+// Returns the square of the Euclidian distance to (x,y,z).
+static inline float get_length_squared(float x, float y, float z) {
+ return x * x + y * y + z * z;
+}
+
+// Calculates the square of the Euclidian distance to (x,y,z) and stores it in
+// *lengthSquared. Returns true if the distance is judged to be "nearly zero".
+//
+// This logic is encapsulated in a helper method to make it explicit that we
+// always perform this check in the same manner, to avoid inconsistencies
+// (see http://code.google.com/p/skia/issues/detail?id=560 ).
+static inline bool is_length_nearly_zero(float x, float y, float z, float *lengthSquared) {
+ *lengthSquared = get_length_squared(x, y, z);
+ return *lengthSquared <= (SK_ScalarNearlyZero * SK_ScalarNearlyZero);
+}
+
+SkScalar SkPoint3::Length(SkScalar x, SkScalar y, SkScalar z) {
+ float magSq = get_length_squared(x, y, z);
+ if (SkScalarIsFinite(magSq)) {
+ return sk_float_sqrt(magSq);
+ } else {
+ double xx = x;
+ double yy = y;
+ double zz = z;
+ return (float)sqrt(xx * xx + yy * yy + zz * zz);
+ }
+}
+
+/*
+ * We have to worry about 2 tricky conditions:
+ * 1. underflow of magSq (compared against nearlyzero^2)
+ * 2. overflow of magSq (compared w/ isfinite)
+ *
+ * If we underflow, we return false. If we overflow, we compute again using
+ * doubles, which is much slower (3x in a desktop test) but will not overflow.
+ */
+bool SkPoint3::normalize() {
+ float magSq;
+ if (is_length_nearly_zero(fX, fY, fZ, &magSq)) {
+ this->set(0, 0, 0);
+ return false;
+ }
+ // sqrtf does not provide enough precision; since sqrt takes a double,
+ // there's no additional penalty to storing invScale in a double
+ double invScale;
+ if (sk_float_isfinite(magSq)) {
+ invScale = magSq;
+ } else {
+ // our magSq step overflowed to infinity, so use doubles instead.
+ // much slower, but needed when x, y or z is very large, otherwise we
+ // divide by inf. and return (0,0,0) vector.
+ double xx = fX;
+ double yy = fY;
+ double zz = fZ;
+ invScale = xx * xx + yy * yy + zz * zz;
+ }
+ // using a float instead of a double for scale loses too much precision
+ double scale = 1 / sqrt(invScale);
+ fX *= scale;
+ fY *= scale;
+ fZ *= scale;
+ if (!sk_float_isfinite(fX) || !sk_float_isfinite(fY) || !sk_float_isfinite(fZ)) {
+ this->set(0, 0, 0);
+ return false;
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkPointPriv.h b/gfx/skia/skia/src/core/SkPointPriv.h
new file mode 100644
index 0000000000..c8a6d520e0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPointPriv.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPointPriv_DEFINED
+#define SkPointPriv_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+
+class SkPointPriv {
+public:
+ enum Side {
+ kLeft_Side = -1,
+ kOn_Side = 0,
+ kRight_Side = 1,
+ };
+
+ static bool AreFinite(const SkPoint array[], int count) {
+ return SkScalarsAreFinite(&array[0].fX, count << 1);
+ }
+
+ static const SkScalar* AsScalars(const SkPoint& pt) { return &pt.fX; }
+
+ static bool CanNormalize(SkScalar dx, SkScalar dy) {
+ return SkScalarsAreFinite(dx, dy) && (dx || dy);
+ }
+
+ static SkScalar DistanceToLineBetweenSqd(const SkPoint& pt, const SkPoint& a,
+ const SkPoint& b, Side* side = nullptr);
+
+ static SkScalar DistanceToLineBetween(const SkPoint& pt, const SkPoint& a,
+ const SkPoint& b, Side* side = nullptr) {
+ return SkScalarSqrt(DistanceToLineBetweenSqd(pt, a, b, side));
+ }
+
+ static SkScalar DistanceToLineSegmentBetweenSqd(const SkPoint& pt, const SkPoint& a,
+ const SkPoint& b);
+
+ static SkScalar DistanceToLineSegmentBetween(const SkPoint& pt, const SkPoint& a,
+ const SkPoint& b) {
+ return SkScalarSqrt(DistanceToLineSegmentBetweenSqd(pt, a, b));
+ }
+
+ static SkScalar DistanceToSqd(const SkPoint& pt, const SkPoint& a) {
+ SkScalar dx = pt.fX - a.fX;
+ SkScalar dy = pt.fY - a.fY;
+ return dx * dx + dy * dy;
+ }
+
+ static bool EqualsWithinTolerance(const SkPoint& p1, const SkPoint& p2) {
+ return !CanNormalize(p1.fX - p2.fX, p1.fY - p2.fY);
+ }
+
+ static bool EqualsWithinTolerance(const SkPoint& pt, const SkPoint& p, SkScalar tol) {
+ return SkScalarNearlyZero(pt.fX - p.fX, tol)
+ && SkScalarNearlyZero(pt.fY - p.fY, tol);
+ }
+
+ static SkScalar LengthSqd(const SkPoint& pt) {
+ return SkPoint::DotProduct(pt, pt);
+ }
+
+ static void Negate(SkIPoint& pt) {
+ pt.fX = -pt.fX;
+ pt.fY = -pt.fY;
+ }
+
+ static void RotateCCW(const SkPoint& src, SkPoint* dst) {
+ // use a tmp in case src == dst
+ SkScalar tmp = src.fX;
+ dst->fX = src.fY;
+ dst->fY = -tmp;
+ }
+
+ static void RotateCCW(SkPoint* pt) {
+ RotateCCW(*pt, pt);
+ }
+
+ static void RotateCW(const SkPoint& src, SkPoint* dst) {
+ // use a tmp in case src == dst
+ SkScalar tmp = src.fX;
+ dst->fX = -src.fY;
+ dst->fY = tmp;
+ }
+
+ static void RotateCW(SkPoint* pt) {
+ RotateCW(*pt, pt);
+ }
+
+ static bool SetLengthFast(SkPoint* pt, float length);
+
+ static SkPoint MakeOrthog(const SkPoint& vec, Side side = kLeft_Side) {
+ SkASSERT(side == kRight_Side || side == kLeft_Side);
+ return (side == kRight_Side) ? SkPoint{-vec.fY, vec.fX} : SkPoint{vec.fY, -vec.fX};
+ }
+
+ // counter-clockwise fan
+ static void SetRectFan(SkPoint v[], SkScalar l, SkScalar t, SkScalar r, SkScalar b,
+ size_t stride) {
+ SkASSERT(stride >= sizeof(SkPoint));
+
+ ((SkPoint*)((intptr_t)v + 0 * stride))->set(l, t);
+ ((SkPoint*)((intptr_t)v + 1 * stride))->set(l, b);
+ ((SkPoint*)((intptr_t)v + 2 * stride))->set(r, b);
+ ((SkPoint*)((intptr_t)v + 3 * stride))->set(r, t);
+ }
+
+ // tri strip with two counter-clockwise triangles
+ static void SetRectTriStrip(SkPoint v[], SkScalar l, SkScalar t, SkScalar r, SkScalar b,
+ size_t stride) {
+ SkASSERT(stride >= sizeof(SkPoint));
+
+ ((SkPoint*)((intptr_t)v + 0 * stride))->set(l, t);
+ ((SkPoint*)((intptr_t)v + 1 * stride))->set(l, b);
+ ((SkPoint*)((intptr_t)v + 2 * stride))->set(r, t);
+ ((SkPoint*)((intptr_t)v + 3 * stride))->set(r, b);
+ }
+ static void SetRectTriStrip(SkPoint v[], const SkRect& rect, size_t stride) {
+ SetRectTriStrip(v, rect.fLeft, rect.fTop, rect.fRight, rect.fBottom, stride);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkPromiseImageTexture.cpp b/gfx/skia/skia/src/core/SkPromiseImageTexture.cpp
new file mode 100644
index 0000000000..7912c3390b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPromiseImageTexture.cpp
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPromiseImageTexture.h"
+
+#if defined(SK_GANESH)
+
+SkPromiseImageTexture::SkPromiseImageTexture(const GrBackendTexture& backendTexture) {
+ SkASSERT(backendTexture.isValid());
+ fBackendTexture = backendTexture;
+}
+
+SkPromiseImageTexture::~SkPromiseImageTexture() {}
+
+#endif // defined(SK_GANESH)
diff --git a/gfx/skia/skia/src/core/SkPtrRecorder.cpp b/gfx/skia/skia/src/core/SkPtrRecorder.cpp
new file mode 100644
index 0000000000..eacd45546b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPtrRecorder.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/base/SkTSearch.h"
+#include "src/core/SkPtrRecorder.h"
+
+void SkPtrSet::reset() {
+ Pair* p = fList.begin();
+ Pair* stop = fList.end();
+ while (p < stop) {
+ this->decPtr(p->fPtr);
+ p += 1;
+ }
+ fList.reset();
+}
+
+bool SkPtrSet::Less(const Pair& a, const Pair& b) {
+ return (char*)a.fPtr < (char*)b.fPtr;
+}
+
+uint32_t SkPtrSet::find(void* ptr) const {
+ if (nullptr == ptr) {
+ return 0;
+ }
+
+ int count = fList.size();
+ Pair pair;
+ pair.fPtr = ptr;
+
+ int index = SkTSearch<Pair, Less>(fList.begin(), count, pair, sizeof(pair));
+ if (index < 0) {
+ return 0;
+ }
+ return fList[index].fIndex;
+}
+
+uint32_t SkPtrSet::add(void* ptr) {
+ if (nullptr == ptr) {
+ return 0;
+ }
+
+ int count = fList.size();
+ Pair pair;
+ pair.fPtr = ptr;
+
+ int index = SkTSearch<Pair, Less>(fList.begin(), count, pair, sizeof(pair));
+ if (index < 0) {
+ index = ~index; // turn it back into an index for insertion
+ this->incPtr(ptr);
+ pair.fIndex = count + 1;
+ *fList.insert(index) = pair;
+ return count + 1;
+ } else {
+ return fList[index].fIndex;
+ }
+}
+
+void SkPtrSet::copyToArray(void* array[]) const {
+ int count = fList.size();
+ if (count > 0) {
+ SkASSERT(array);
+ const Pair* p = fList.begin();
+ // p->fIndex is base-1, so we need to subtract to find its slot
+ for (int i = 0; i < count; i++) {
+ int index = p[i].fIndex - 1;
+ SkASSERT((unsigned)index < (unsigned)count);
+ array[index] = p[i].fPtr;
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkPtrRecorder.h b/gfx/skia/skia/src/core/SkPtrRecorder.h
new file mode 100644
index 0000000000..8179f2ff0c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkPtrRecorder.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkPtrSet_DEFINED
+#define SkPtrSet_DEFINED
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkRefCnt.h"
+#include "include/private/base/SkTDArray.h"
+
+/**
+ * Maintains a set of ptrs, assigning each a unique ID [1...N]. Duplicate ptrs
+ * return the same ID (since its a set). Subclasses can override inPtr()
+ * and decPtr(). incPtr() is called each time a unique ptr is added ot the
+ * set. decPtr() is called on each ptr when the set is destroyed or reset.
+ */
+class SkPtrSet : public SkRefCnt {
+public:
+
+
+ /**
+ * Search for the specified ptr in the set. If it is found, return its
+ * 32bit ID [1..N], or if not found, return 0. Always returns 0 for nullptr.
+ */
+ uint32_t find(void*) const;
+
+ /**
+ * Add the specified ptr to the set, returning a unique 32bit ID for it
+ * [1...N]. Duplicate ptrs will return the same ID.
+ *
+ * If the ptr is nullptr, it is not added, and 0 is returned.
+ */
+ uint32_t add(void*);
+
+ /**
+ * Return the number of (non-null) ptrs in the set.
+ */
+ int count() const { return fList.size(); }
+
+ /**
+ * Copy the ptrs in the set into the specified array (allocated by the
+ * caller). The ptrs are assgined to the array based on their corresponding
+ * ID. e.g. array[ptr.ID - 1] = ptr.
+ *
+ * incPtr() and decPtr() are not called during this operation.
+ */
+ void copyToArray(void* array[]) const;
+
+ /**
+ * Call decPtr() on each ptr in the set, and the reset the size of the set
+ * to 0.
+ */
+ void reset();
+
+ /**
+ * Set iterator.
+ */
+ class Iter {
+ public:
+ Iter(const SkPtrSet& set)
+ : fSet(set)
+ , fIndex(0) {}
+
+ /**
+ * Return the next ptr in the set or null if the end was reached.
+ */
+ void* next() {
+ return fIndex < fSet.fList.size() ? fSet.fList[fIndex++].fPtr : nullptr;
+ }
+
+ private:
+ const SkPtrSet& fSet;
+ int fIndex;
+ };
+
+protected:
+ virtual void incPtr(void*) {}
+ virtual void decPtr(void*) {}
+
+private:
+ struct Pair {
+ void* fPtr; // never nullptr
+ uint32_t fIndex; // 1...N
+ };
+
+ // we store the ptrs in sorted-order (using Cmp) so that we can efficiently
+ // detect duplicates when add() is called. Hence we need to store the
+ // ptr and its ID/fIndex explicitly, since the ptr's position in the array
+ // is not related to its "index".
+ SkTDArray<Pair> fList;
+
+ static bool Less(const Pair& a, const Pair& b);
+
+ using INHERITED = SkRefCnt;
+};
+
+/**
+ * Templated wrapper for SkPtrSet, just meant to automate typecasting
+ * parameters to and from void* (which the base class expects).
+ */
+template <typename T> class SkTPtrSet : public SkPtrSet {
+public:
+ uint32_t find(T ptr) {
+ return this->INHERITED::find((void*)ptr);
+ }
+ uint32_t add(T ptr) {
+ return this->INHERITED::add((void*)ptr);
+ }
+
+ void copyToArray(T* array) const {
+ this->INHERITED::copyToArray((void**)array);
+ }
+
+private:
+ using INHERITED = SkPtrSet;
+};
+
+/**
+ * Subclass of SkTPtrSet specialed to call ref() and unref() when the
+ * base class's incPtr() and decPtr() are called. This makes it a valid owner
+ * of each ptr, which is released when the set is reset or destroyed.
+ */
+class SkRefCntSet : public SkTPtrSet<SkRefCnt*> {
+public:
+ ~SkRefCntSet() override;
+
+protected:
+ // overrides
+ void incPtr(void*) override;
+ void decPtr(void*) override;
+};
+
+class SkFactorySet : public SkTPtrSet<SkFlattenable::Factory> {};
+
+/**
+ * Similar to SkFactorySet, but only allows Factorys that have registered names.
+ * Also has a function to return the next added Factory's name.
+ */
+class SkNamedFactorySet : public SkRefCnt {
+public:
+
+
+ SkNamedFactorySet();
+
+ /**
+ * Find the specified Factory in the set. If it is not already in the set,
+ * and has registered its name, add it to the set, and return its index.
+ * If the Factory has no registered name, return 0.
+ */
+ uint32_t find(SkFlattenable::Factory);
+
+ /**
+ * If new Factorys have been added to the set, return the name of the first
+ * Factory added after the Factory name returned by the last call to this
+ * function.
+ */
+ const char* getNextAddedFactoryName();
+private:
+ int fNextAddedFactory;
+ SkFactorySet fFactorySet;
+ SkTDArray<const char*> fNames;
+
+ using INHERITED = SkRefCnt;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkQuadClipper.cpp b/gfx/skia/skia/src/core/SkQuadClipper.cpp
new file mode 100644
index 0000000000..d265635b48
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkQuadClipper.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkGeometry.h"
+#include "src/core/SkQuadClipper.h"
+
+#include <utility>
+
+SkQuadClipper::SkQuadClipper() {
+ fClip.setEmpty();
+}
+
+void SkQuadClipper::setClip(const SkIRect& clip) {
+ // conver to scalars, since that's where we'll see the points
+ fClip.set(clip);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool chopMonoQuadAt(SkScalar c0, SkScalar c1, SkScalar c2,
+ SkScalar target, SkScalar* t) {
+ /* Solve F(t) = y where F(t) := [0](1-t)^2 + 2[1]t(1-t) + [2]t^2
+ * We solve for t, using quadratic equation, hence we have to rearrange
+ * our cooefficents to look like At^2 + Bt + C
+ */
+ SkScalar A = c0 - c1 - c1 + c2;
+ SkScalar B = 2*(c1 - c0);
+ SkScalar C = c0 - target;
+
+ SkScalar roots[2]; // we only expect one, but make room for 2 for safety
+ int count = SkFindUnitQuadRoots(A, B, C, roots);
+ if (count) {
+ *t = roots[0];
+ return true;
+ }
+ return false;
+}
+
+static bool chopMonoQuadAtY(SkPoint pts[3], SkScalar y, SkScalar* t) {
+ return chopMonoQuadAt(pts[0].fY, pts[1].fY, pts[2].fY, y, t);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* If we somehow returned the fact that we had to flip the pts in Y, we could
+ communicate that to setQuadratic, and then avoid having to flip it back
+ here (only to have setQuadratic do the flip again)
+ */
+bool SkQuadClipper::clipQuad(const SkPoint srcPts[3], SkPoint dst[3]) {
+ bool reverse;
+
+ // we need the data to be monotonically increasing in Y
+ if (srcPts[0].fY > srcPts[2].fY) {
+ dst[0] = srcPts[2];
+ dst[1] = srcPts[1];
+ dst[2] = srcPts[0];
+ reverse = true;
+ } else {
+ memcpy(dst, srcPts, 3 * sizeof(SkPoint));
+ reverse = false;
+ }
+
+ // are we completely above or below
+ const SkScalar ctop = fClip.fTop;
+ const SkScalar cbot = fClip.fBottom;
+ if (dst[2].fY <= ctop || dst[0].fY >= cbot) {
+ return false;
+ }
+
+ SkScalar t;
+ SkPoint tmp[5]; // for SkChopQuadAt
+
+ // are we partially above
+ if (dst[0].fY < ctop) {
+ if (chopMonoQuadAtY(dst, ctop, &t)) {
+ // take the 2nd chopped quad
+ SkChopQuadAt(dst, tmp, t);
+ dst[0] = tmp[2];
+ dst[1] = tmp[3];
+ } else {
+ // if chopMonoQuadAtY failed, then we may have hit inexact numerics
+ // so we just clamp against the top
+ for (int i = 0; i < 3; i++) {
+ if (dst[i].fY < ctop) {
+ dst[i].fY = ctop;
+ }
+ }
+ }
+ }
+
+ // are we partially below
+ if (dst[2].fY > cbot) {
+ if (chopMonoQuadAtY(dst, cbot, &t)) {
+ SkChopQuadAt(dst, tmp, t);
+ dst[1] = tmp[1];
+ dst[2] = tmp[2];
+ } else {
+ // if chopMonoQuadAtY failed, then we may have hit inexact numerics
+ // so we just clamp against the bottom
+ for (int i = 0; i < 3; i++) {
+ if (dst[i].fY > cbot) {
+ dst[i].fY = cbot;
+ }
+ }
+ }
+ }
+
+ if (reverse) {
+ using std::swap;
+ swap(dst[0], dst[2]);
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkQuadClipper.h b/gfx/skia/skia/src/core/SkQuadClipper.h
new file mode 100644
index 0000000000..c3f5d63c8a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkQuadClipper.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkQuadClipper_DEFINED
+#define SkQuadClipper_DEFINED
+
+#include "include/core/SkPath.h"
+
+/** This class is initialized with a clip rectangle, and then can be fed quads,
+ which must already be monotonic in Y.
+
+ In the future, it might return a series of segments, allowing it to clip
+ also in X, to ensure that all segments fit in a finite coordinate system.
+ */
+class SkQuadClipper {
+public:
+ SkQuadClipper();
+
+ void setClip(const SkIRect& clip);
+
+ bool clipQuad(const SkPoint src[3], SkPoint dst[3]);
+
+private:
+ SkRect fClip;
+};
+
+/** Iterator that returns the clipped segements of a quad clipped to a rect.
+ The segments will be either lines or quads (based on SkPath::Verb), and
+ will all be monotonic in Y
+ */
+class SkQuadClipper2 {
+public:
+ bool clipQuad(const SkPoint pts[3], const SkRect& clip);
+ bool clipCubic(const SkPoint pts[4], const SkRect& clip);
+
+ SkPath::Verb next(SkPoint pts[]);
+
+private:
+ SkPoint* fCurrPoint;
+ SkPath::Verb* fCurrVerb;
+
+ enum {
+ kMaxVerbs = 13,
+ kMaxPoints = 32
+ };
+ SkPoint fPoints[kMaxPoints];
+ SkPath::Verb fVerbs[kMaxVerbs];
+
+ void clipMonoQuad(const SkPoint srcPts[3], const SkRect& clip);
+ void clipMonoCubic(const SkPoint srcPts[4], const SkRect& clip);
+ void appendVLine(SkScalar x, SkScalar y0, SkScalar y1, bool reverse);
+ void appendQuad(const SkPoint pts[3], bool reverse);
+ void appendCubic(const SkPoint pts[4], bool reverse);
+};
+
+#ifdef SK_DEBUG
+ void sk_assert_monotonic_x(const SkPoint pts[], int count);
+ void sk_assert_monotonic_y(const SkPoint pts[], int count);
+#else
+ #define sk_assert_monotonic_x(pts, count)
+ #define sk_assert_monotonic_y(pts, count)
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRRect.cpp b/gfx/skia/skia/src/core/SkRRect.cpp
new file mode 100644
index 0000000000..f5668056e7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRRect.cpp
@@ -0,0 +1,917 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkRRect.h"
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "src/base/SkBuffer.h"
+#include "src/core/SkRRectPriv.h"
+#include "src/core/SkRectPriv.h"
+#include "src/core/SkScaleToSides.h"
+#include "src/core/SkStringUtils.h"
+
+#include <algorithm>
+#include <cstring>
+#include <iterator>
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkRRect::setOval(const SkRect& oval) {
+ if (!this->initializeRect(oval)) {
+ return;
+ }
+
+ SkScalar xRad = SkRectPriv::HalfWidth(fRect);
+ SkScalar yRad = SkRectPriv::HalfHeight(fRect);
+
+ if (xRad == 0.0f || yRad == 0.0f) {
+ // All the corners will be square
+ memset(fRadii, 0, sizeof(fRadii));
+ fType = kRect_Type;
+ } else {
+ for (int i = 0; i < 4; ++i) {
+ fRadii[i].set(xRad, yRad);
+ }
+ fType = kOval_Type;
+ }
+
+ SkASSERT(this->isValid());
+}
+
+void SkRRect::setRectXY(const SkRect& rect, SkScalar xRad, SkScalar yRad) {
+ if (!this->initializeRect(rect)) {
+ return;
+ }
+
+ if (!SkScalarsAreFinite(xRad, yRad)) {
+ xRad = yRad = 0; // devolve into a simple rect
+ }
+
+ if (fRect.width() < xRad+xRad || fRect.height() < yRad+yRad) {
+ // At most one of these two divides will be by zero, and neither numerator is zero.
+ SkScalar scale = std::min(sk_ieee_float_divide(fRect. width(), xRad + xRad),
+ sk_ieee_float_divide(fRect.height(), yRad + yRad));
+ SkASSERT(scale < SK_Scalar1);
+ xRad *= scale;
+ yRad *= scale;
+ }
+
+ if (xRad <= 0 || yRad <= 0) {
+ // all corners are square in this case
+ this->setRect(rect);
+ return;
+ }
+
+ for (int i = 0; i < 4; ++i) {
+ fRadii[i].set(xRad, yRad);
+ }
+ fType = kSimple_Type;
+ if (xRad >= SkScalarHalf(fRect.width()) && yRad >= SkScalarHalf(fRect.height())) {
+ fType = kOval_Type;
+ // TODO: assert that all the x&y radii are already W/2 & H/2
+ }
+
+ SkASSERT(this->isValid());
+}
+
+void SkRRect::setNinePatch(const SkRect& rect, SkScalar leftRad, SkScalar topRad,
+ SkScalar rightRad, SkScalar bottomRad) {
+ if (!this->initializeRect(rect)) {
+ return;
+ }
+
+ const SkScalar array[4] = { leftRad, topRad, rightRad, bottomRad };
+ if (!SkScalarsAreFinite(array, 4)) {
+ this->setRect(rect); // devolve into a simple rect
+ return;
+ }
+
+ leftRad = std::max(leftRad, 0.0f);
+ topRad = std::max(topRad, 0.0f);
+ rightRad = std::max(rightRad, 0.0f);
+ bottomRad = std::max(bottomRad, 0.0f);
+
+ SkScalar scale = SK_Scalar1;
+ if (leftRad + rightRad > fRect.width()) {
+ scale = fRect.width() / (leftRad + rightRad);
+ }
+ if (topRad + bottomRad > fRect.height()) {
+ scale = std::min(scale, fRect.height() / (topRad + bottomRad));
+ }
+
+ if (scale < SK_Scalar1) {
+ leftRad *= scale;
+ topRad *= scale;
+ rightRad *= scale;
+ bottomRad *= scale;
+ }
+
+ if (leftRad == rightRad && topRad == bottomRad) {
+ if (leftRad >= SkScalarHalf(fRect.width()) && topRad >= SkScalarHalf(fRect.height())) {
+ fType = kOval_Type;
+ } else if (0 == leftRad || 0 == topRad) {
+ // If the left and (by equality check above) right radii are zero then it is a rect.
+ // Same goes for top/bottom.
+ fType = kRect_Type;
+ leftRad = 0;
+ topRad = 0;
+ rightRad = 0;
+ bottomRad = 0;
+ } else {
+ fType = kSimple_Type;
+ }
+ } else {
+ fType = kNinePatch_Type;
+ }
+
+ fRadii[kUpperLeft_Corner].set(leftRad, topRad);
+ fRadii[kUpperRight_Corner].set(rightRad, topRad);
+ fRadii[kLowerRight_Corner].set(rightRad, bottomRad);
+ fRadii[kLowerLeft_Corner].set(leftRad, bottomRad);
+
+ SkASSERT(this->isValid());
+}
+
+// These parameters intentionally double. Apropos crbug.com/463920, if one of the
+// radii is huge while the other is small, single precision math can completely
+// miss the fact that a scale is required.
+static double compute_min_scale(double rad1, double rad2, double limit, double curMin) {
+ if ((rad1 + rad2) > limit) {
+ return std::min(curMin, limit / (rad1 + rad2));
+ }
+ return curMin;
+}
+
+static bool clamp_to_zero(SkVector radii[4]) {
+ bool allCornersSquare = true;
+
+ // Clamp negative radii to zero
+ for (int i = 0; i < 4; ++i) {
+ if (radii[i].fX <= 0 || radii[i].fY <= 0) {
+ // In this case we are being a little fast & loose. Since one of
+ // the radii is 0 the corner is square. However, the other radii
+ // could still be non-zero and play in the global scale factor
+ // computation.
+ radii[i].fX = 0;
+ radii[i].fY = 0;
+ } else {
+ allCornersSquare = false;
+ }
+ }
+
+ return allCornersSquare;
+}
+
+void SkRRect::setRectRadii(const SkRect& rect, const SkVector radii[4]) {
+ if (!this->initializeRect(rect)) {
+ return;
+ }
+
+ if (!SkScalarsAreFinite(&radii[0].fX, 8)) {
+ this->setRect(rect); // devolve into a simple rect
+ return;
+ }
+
+ memcpy(fRadii, radii, sizeof(fRadii));
+
+ if (clamp_to_zero(fRadii)) {
+ this->setRect(rect);
+ return;
+ }
+
+ this->scaleRadii();
+
+ if (!this->isValid()) {
+ this->setRect(rect);
+ return;
+ }
+}
+
+bool SkRRect::initializeRect(const SkRect& rect) {
+ // Check this before sorting because sorting can hide nans.
+ if (!rect.isFinite()) {
+ *this = SkRRect();
+ return false;
+ }
+ fRect = rect.makeSorted();
+ if (fRect.isEmpty()) {
+ memset(fRadii, 0, sizeof(fRadii));
+ fType = kEmpty_Type;
+ return false;
+ }
+ return true;
+}
+
+// If we can't distinguish one of the radii relative to the other, force it to zero so it
+// doesn't confuse us later. See crbug.com/850350
+//
+static void flush_to_zero(SkScalar& a, SkScalar& b) {
+ SkASSERT(a >= 0);
+ SkASSERT(b >= 0);
+ if (a + b == a) {
+ b = 0;
+ } else if (a + b == b) {
+ a = 0;
+ }
+}
+
+bool SkRRect::scaleRadii() {
+ // Proportionally scale down all radii to fit. Find the minimum ratio
+ // of a side and the radii on that side (for all four sides) and use
+ // that to scale down _all_ the radii. This algorithm is from the
+ // W3 spec (http://www.w3.org/TR/css3-background/) section 5.5 - Overlapping
+ // Curves:
+ // "Let f = min(Li/Si), where i is one of { top, right, bottom, left },
+ // Si is the sum of the two corresponding radii of the corners on side i,
+ // and Ltop = Lbottom = the width of the box,
+ // and Lleft = Lright = the height of the box.
+ // If f < 1, then all corner radii are reduced by multiplying them by f."
+ double scale = 1.0;
+
+ // The sides of the rectangle may be larger than a float.
+ double width = (double)fRect.fRight - (double)fRect.fLeft;
+ double height = (double)fRect.fBottom - (double)fRect.fTop;
+ scale = compute_min_scale(fRadii[0].fX, fRadii[1].fX, width, scale);
+ scale = compute_min_scale(fRadii[1].fY, fRadii[2].fY, height, scale);
+ scale = compute_min_scale(fRadii[2].fX, fRadii[3].fX, width, scale);
+ scale = compute_min_scale(fRadii[3].fY, fRadii[0].fY, height, scale);
+
+ flush_to_zero(fRadii[0].fX, fRadii[1].fX);
+ flush_to_zero(fRadii[1].fY, fRadii[2].fY);
+ flush_to_zero(fRadii[2].fX, fRadii[3].fX);
+ flush_to_zero(fRadii[3].fY, fRadii[0].fY);
+
+ if (scale < 1.0) {
+ SkScaleToSides::AdjustRadii(width, scale, &fRadii[0].fX, &fRadii[1].fX);
+ SkScaleToSides::AdjustRadii(height, scale, &fRadii[1].fY, &fRadii[2].fY);
+ SkScaleToSides::AdjustRadii(width, scale, &fRadii[2].fX, &fRadii[3].fX);
+ SkScaleToSides::AdjustRadii(height, scale, &fRadii[3].fY, &fRadii[0].fY);
+ }
+
+ // adjust radii may set x or y to zero; set companion to zero as well
+ clamp_to_zero(fRadii);
+
+ // May be simple, oval, or complex, or become a rect/empty if the radii adjustment made them 0
+ this->computeType();
+
+ // TODO: Why can't we assert this here?
+ //SkASSERT(this->isValid());
+
+ return scale < 1.0;
+}
+
+// This method determines if a point known to be inside the RRect's bounds is
+// inside all the corners.
+bool SkRRect::checkCornerContainment(SkScalar x, SkScalar y) const {
+ SkPoint canonicalPt; // (x,y) translated to one of the quadrants
+ int index;
+
+ if (kOval_Type == this->type()) {
+ canonicalPt.set(x - fRect.centerX(), y - fRect.centerY());
+ index = kUpperLeft_Corner; // any corner will do in this case
+ } else {
+ if (x < fRect.fLeft + fRadii[kUpperLeft_Corner].fX &&
+ y < fRect.fTop + fRadii[kUpperLeft_Corner].fY) {
+ // UL corner
+ index = kUpperLeft_Corner;
+ canonicalPt.set(x - (fRect.fLeft + fRadii[kUpperLeft_Corner].fX),
+ y - (fRect.fTop + fRadii[kUpperLeft_Corner].fY));
+ SkASSERT(canonicalPt.fX < 0 && canonicalPt.fY < 0);
+ } else if (x < fRect.fLeft + fRadii[kLowerLeft_Corner].fX &&
+ y > fRect.fBottom - fRadii[kLowerLeft_Corner].fY) {
+ // LL corner
+ index = kLowerLeft_Corner;
+ canonicalPt.set(x - (fRect.fLeft + fRadii[kLowerLeft_Corner].fX),
+ y - (fRect.fBottom - fRadii[kLowerLeft_Corner].fY));
+ SkASSERT(canonicalPt.fX < 0 && canonicalPt.fY > 0);
+ } else if (x > fRect.fRight - fRadii[kUpperRight_Corner].fX &&
+ y < fRect.fTop + fRadii[kUpperRight_Corner].fY) {
+ // UR corner
+ index = kUpperRight_Corner;
+ canonicalPt.set(x - (fRect.fRight - fRadii[kUpperRight_Corner].fX),
+ y - (fRect.fTop + fRadii[kUpperRight_Corner].fY));
+ SkASSERT(canonicalPt.fX > 0 && canonicalPt.fY < 0);
+ } else if (x > fRect.fRight - fRadii[kLowerRight_Corner].fX &&
+ y > fRect.fBottom - fRadii[kLowerRight_Corner].fY) {
+ // LR corner
+ index = kLowerRight_Corner;
+ canonicalPt.set(x - (fRect.fRight - fRadii[kLowerRight_Corner].fX),
+ y - (fRect.fBottom - fRadii[kLowerRight_Corner].fY));
+ SkASSERT(canonicalPt.fX > 0 && canonicalPt.fY > 0);
+ } else {
+ // not in any of the corners
+ return true;
+ }
+ }
+
+ // A point is in an ellipse (in standard position) if:
+ // x^2 y^2
+ // ----- + ----- <= 1
+ // a^2 b^2
+ // or :
+ // b^2*x^2 + a^2*y^2 <= (ab)^2
+ SkScalar dist = SkScalarSquare(canonicalPt.fX) * SkScalarSquare(fRadii[index].fY) +
+ SkScalarSquare(canonicalPt.fY) * SkScalarSquare(fRadii[index].fX);
+ return dist <= SkScalarSquare(fRadii[index].fX * fRadii[index].fY);
+}
+
+bool SkRRectPriv::IsNearlySimpleCircular(const SkRRect& rr, SkScalar tolerance) {
+ SkScalar simpleRadius = rr.fRadii[0].fX;
+ return SkScalarNearlyEqual(simpleRadius, rr.fRadii[0].fY, tolerance) &&
+ SkScalarNearlyEqual(simpleRadius, rr.fRadii[1].fX, tolerance) &&
+ SkScalarNearlyEqual(simpleRadius, rr.fRadii[1].fY, tolerance) &&
+ SkScalarNearlyEqual(simpleRadius, rr.fRadii[2].fX, tolerance) &&
+ SkScalarNearlyEqual(simpleRadius, rr.fRadii[2].fY, tolerance) &&
+ SkScalarNearlyEqual(simpleRadius, rr.fRadii[3].fX, tolerance) &&
+ SkScalarNearlyEqual(simpleRadius, rr.fRadii[3].fY, tolerance);
+}
+
+bool SkRRectPriv::AllCornersCircular(const SkRRect& rr, SkScalar tolerance) {
+ return SkScalarNearlyEqual(rr.fRadii[0].fX, rr.fRadii[0].fY, tolerance) &&
+ SkScalarNearlyEqual(rr.fRadii[1].fX, rr.fRadii[1].fY, tolerance) &&
+ SkScalarNearlyEqual(rr.fRadii[2].fX, rr.fRadii[2].fY, tolerance) &&
+ SkScalarNearlyEqual(rr.fRadii[3].fX, rr.fRadii[3].fY, tolerance);
+}
+
+bool SkRRect::contains(const SkRect& rect) const {
+ if (!this->getBounds().contains(rect)) {
+ // If 'rect' isn't contained by the RR's bounds then the
+ // RR definitely doesn't contain it
+ return false;
+ }
+
+ if (this->isRect()) {
+ // the prior test was sufficient
+ return true;
+ }
+
+ // At this point we know all four corners of 'rect' are inside the
+ // bounds of of this RR. Check to make sure all the corners are inside
+ // all the curves
+ return this->checkCornerContainment(rect.fLeft, rect.fTop) &&
+ this->checkCornerContainment(rect.fRight, rect.fTop) &&
+ this->checkCornerContainment(rect.fRight, rect.fBottom) &&
+ this->checkCornerContainment(rect.fLeft, rect.fBottom);
+}
+
+static bool radii_are_nine_patch(const SkVector radii[4]) {
+ return radii[SkRRect::kUpperLeft_Corner].fX == radii[SkRRect::kLowerLeft_Corner].fX &&
+ radii[SkRRect::kUpperLeft_Corner].fY == radii[SkRRect::kUpperRight_Corner].fY &&
+ radii[SkRRect::kUpperRight_Corner].fX == radii[SkRRect::kLowerRight_Corner].fX &&
+ radii[SkRRect::kLowerLeft_Corner].fY == radii[SkRRect::kLowerRight_Corner].fY;
+}
+
+// There is a simplified version of this method in setRectXY
+void SkRRect::computeType() {
+ if (fRect.isEmpty()) {
+ SkASSERT(fRect.isSorted());
+ for (size_t i = 0; i < std::size(fRadii); ++i) {
+ SkASSERT((fRadii[i] == SkVector{0, 0}));
+ }
+ fType = kEmpty_Type;
+ SkASSERT(this->isValid());
+ return;
+ }
+
+ bool allRadiiEqual = true; // are all x radii equal and all y radii?
+ bool allCornersSquare = 0 == fRadii[0].fX || 0 == fRadii[0].fY;
+
+ for (int i = 1; i < 4; ++i) {
+ if (0 != fRadii[i].fX && 0 != fRadii[i].fY) {
+ // if either radius is zero the corner is square so both have to
+ // be non-zero to have a rounded corner
+ allCornersSquare = false;
+ }
+ if (fRadii[i].fX != fRadii[i-1].fX || fRadii[i].fY != fRadii[i-1].fY) {
+ allRadiiEqual = false;
+ }
+ }
+
+ if (allCornersSquare) {
+ fType = kRect_Type;
+ SkASSERT(this->isValid());
+ return;
+ }
+
+ if (allRadiiEqual) {
+ if (fRadii[0].fX >= SkScalarHalf(fRect.width()) &&
+ fRadii[0].fY >= SkScalarHalf(fRect.height())) {
+ fType = kOval_Type;
+ } else {
+ fType = kSimple_Type;
+ }
+ SkASSERT(this->isValid());
+ return;
+ }
+
+ if (radii_are_nine_patch(fRadii)) {
+ fType = kNinePatch_Type;
+ } else {
+ fType = kComplex_Type;
+ }
+
+ if (!this->isValid()) {
+ this->setRect(this->rect());
+ SkASSERT(this->isValid());
+ }
+}
+
+bool SkRRect::transform(const SkMatrix& matrix, SkRRect* dst) const {
+ if (nullptr == dst) {
+ return false;
+ }
+
+ // Assert that the caller is not trying to do this in place, which
+ // would violate const-ness. Do not return false though, so that
+ // if they know what they're doing and want to violate it they can.
+ SkASSERT(dst != this);
+
+ if (matrix.isIdentity()) {
+ *dst = *this;
+ return true;
+ }
+
+ if (!matrix.preservesAxisAlignment()) {
+ return false;
+ }
+
+ SkRect newRect;
+ if (!matrix.mapRect(&newRect, fRect)) {
+ return false;
+ }
+
+ // The matrix may have scaled us to zero (or due to float madness, we now have collapsed
+ // some dimension of the rect, so we need to check for that. Note that matrix must be
+ // scale and translate and mapRect() produces a sorted rect. So an empty rect indicates
+ // loss of precision.
+ if (!newRect.isFinite() || newRect.isEmpty()) {
+ return false;
+ }
+
+ // At this point, this is guaranteed to succeed, so we can modify dst.
+ dst->fRect = newRect;
+
+ // Since the only transforms that were allowed are axis aligned, the type
+ // remains unchanged.
+ dst->fType = fType;
+
+ if (kRect_Type == fType) {
+ SkASSERT(dst->isValid());
+ return true;
+ }
+ if (kOval_Type == fType) {
+ for (int i = 0; i < 4; ++i) {
+ dst->fRadii[i].fX = SkScalarHalf(newRect.width());
+ dst->fRadii[i].fY = SkScalarHalf(newRect.height());
+ }
+ SkASSERT(dst->isValid());
+ return true;
+ }
+
+ // Now scale each corner
+ SkScalar xScale = matrix.getScaleX();
+ SkScalar yScale = matrix.getScaleY();
+
+ // There is a rotation of 90 (Clockwise 90) or 270 (Counter clockwise 90).
+ // 180 degrees rotations are simply flipX with a flipY and would come under
+ // a scale transform.
+ if (!matrix.isScaleTranslate()) {
+ const bool isClockwise = matrix.getSkewX() < 0;
+
+ // The matrix location for scale changes if there is a rotation.
+ xScale = matrix.getSkewY() * (isClockwise ? 1 : -1);
+ yScale = matrix.getSkewX() * (isClockwise ? -1 : 1);
+
+ const int dir = isClockwise ? 3 : 1;
+ for (int i = 0; i < 4; ++i) {
+ const int src = (i + dir) >= 4 ? (i + dir) % 4 : (i + dir);
+ // Swap X and Y axis for the radii.
+ dst->fRadii[i].fX = fRadii[src].fY;
+ dst->fRadii[i].fY = fRadii[src].fX;
+ }
+ } else {
+ for (int i = 0; i < 4; ++i) {
+ dst->fRadii[i].fX = fRadii[i].fX;
+ dst->fRadii[i].fY = fRadii[i].fY;
+ }
+ }
+
+ const bool flipX = xScale < 0;
+ if (flipX) {
+ xScale = -xScale;
+ }
+
+ const bool flipY = yScale < 0;
+ if (flipY) {
+ yScale = -yScale;
+ }
+
+ // Scale the radii without respecting the flip.
+ for (int i = 0; i < 4; ++i) {
+ dst->fRadii[i].fX *= xScale;
+ dst->fRadii[i].fY *= yScale;
+ }
+
+ // Now swap as necessary.
+ using std::swap;
+ if (flipX) {
+ if (flipY) {
+ // Swap with opposite corners
+ swap(dst->fRadii[kUpperLeft_Corner], dst->fRadii[kLowerRight_Corner]);
+ swap(dst->fRadii[kUpperRight_Corner], dst->fRadii[kLowerLeft_Corner]);
+ } else {
+ // Only swap in x
+ swap(dst->fRadii[kUpperRight_Corner], dst->fRadii[kUpperLeft_Corner]);
+ swap(dst->fRadii[kLowerRight_Corner], dst->fRadii[kLowerLeft_Corner]);
+ }
+ } else if (flipY) {
+ // Only swap in y
+ swap(dst->fRadii[kUpperLeft_Corner], dst->fRadii[kLowerLeft_Corner]);
+ swap(dst->fRadii[kUpperRight_Corner], dst->fRadii[kLowerRight_Corner]);
+ }
+
+ if (!AreRectAndRadiiValid(dst->fRect, dst->fRadii)) {
+ return false;
+ }
+
+ dst->scaleRadii();
+ dst->isValid(); // TODO: is this meant to be SkASSERT(dst->isValid())?
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkRRect::inset(SkScalar dx, SkScalar dy, SkRRect* dst) const {
+ SkRect r = fRect.makeInset(dx, dy);
+ bool degenerate = false;
+ if (r.fRight <= r.fLeft) {
+ degenerate = true;
+ r.fLeft = r.fRight = SkScalarAve(r.fLeft, r.fRight);
+ }
+ if (r.fBottom <= r.fTop) {
+ degenerate = true;
+ r.fTop = r.fBottom = SkScalarAve(r.fTop, r.fBottom);
+ }
+ if (degenerate) {
+ dst->fRect = r;
+ memset(dst->fRadii, 0, sizeof(dst->fRadii));
+ dst->fType = kEmpty_Type;
+ return;
+ }
+ if (!r.isFinite()) {
+ *dst = SkRRect();
+ return;
+ }
+
+ SkVector radii[4];
+ memcpy(radii, fRadii, sizeof(radii));
+ for (int i = 0; i < 4; ++i) {
+ if (radii[i].fX) {
+ radii[i].fX -= dx;
+ }
+ if (radii[i].fY) {
+ radii[i].fY -= dy;
+ }
+ }
+ dst->setRectRadii(r, radii);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+size_t SkRRect::writeToMemory(void* buffer) const {
+ // Serialize only the rect and corners, but not the derived type tag.
+ memcpy(buffer, this, kSizeInMemory);
+ return kSizeInMemory;
+}
+
+void SkRRectPriv::WriteToBuffer(const SkRRect& rr, SkWBuffer* buffer) {
+ // Serialize only the rect and corners, but not the derived type tag.
+ buffer->write(&rr, SkRRect::kSizeInMemory);
+}
+
+size_t SkRRect::readFromMemory(const void* buffer, size_t length) {
+ if (length < kSizeInMemory) {
+ return 0;
+ }
+
+ // The extra (void*) tells GCC not to worry that kSizeInMemory < sizeof(SkRRect).
+
+ SkRRect raw;
+ memcpy((void*)&raw, buffer, kSizeInMemory);
+ this->setRectRadii(raw.fRect, raw.fRadii);
+ return kSizeInMemory;
+}
+
+bool SkRRectPriv::ReadFromBuffer(SkRBuffer* buffer, SkRRect* rr) {
+ if (buffer->available() < SkRRect::kSizeInMemory) {
+ return false;
+ }
+ SkRRect storage;
+ return buffer->read(&storage, SkRRect::kSizeInMemory) &&
+ (rr->readFromMemory(&storage, SkRRect::kSizeInMemory) == SkRRect::kSizeInMemory);
+}
+
+SkString SkRRect::dumpToString(bool asHex) const {
+ SkScalarAsStringType asType = asHex ? kHex_SkScalarAsStringType : kDec_SkScalarAsStringType;
+
+ fRect.dump(asHex);
+ SkString line("const SkPoint corners[] = {\n");
+ for (int i = 0; i < 4; ++i) {
+ SkString strX, strY;
+ SkAppendScalar(&strX, fRadii[i].x(), asType);
+ SkAppendScalar(&strY, fRadii[i].y(), asType);
+ line.appendf(" { %s, %s },", strX.c_str(), strY.c_str());
+ if (asHex) {
+ line.appendf(" /* %f %f */", fRadii[i].x(), fRadii[i].y());
+ }
+ line.append("\n");
+ }
+ line.append("};");
+ return line;
+}
+
+void SkRRect::dump(bool asHex) const { SkDebugf("%s\n", this->dumpToString(asHex).c_str()); }
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * We need all combinations of predicates to be true to have a "safe" radius value.
+ */
+static bool are_radius_check_predicates_valid(SkScalar rad, SkScalar min, SkScalar max) {
+ return (min <= max) && (rad <= max - min) && (min + rad <= max) && (max - rad >= min) &&
+ rad >= 0;
+}
+
+bool SkRRect::isValid() const {
+ if (!AreRectAndRadiiValid(fRect, fRadii)) {
+ return false;
+ }
+
+ bool allRadiiZero = (0 == fRadii[0].fX && 0 == fRadii[0].fY);
+ bool allCornersSquare = (0 == fRadii[0].fX || 0 == fRadii[0].fY);
+ bool allRadiiSame = true;
+
+ for (int i = 1; i < 4; ++i) {
+ if (0 != fRadii[i].fX || 0 != fRadii[i].fY) {
+ allRadiiZero = false;
+ }
+
+ if (fRadii[i].fX != fRadii[i-1].fX || fRadii[i].fY != fRadii[i-1].fY) {
+ allRadiiSame = false;
+ }
+
+ if (0 != fRadii[i].fX && 0 != fRadii[i].fY) {
+ allCornersSquare = false;
+ }
+ }
+ bool patchesOfNine = radii_are_nine_patch(fRadii);
+
+ if (fType < 0 || fType > kLastType) {
+ return false;
+ }
+
+ switch (fType) {
+ case kEmpty_Type:
+ if (!fRect.isEmpty() || !allRadiiZero || !allRadiiSame || !allCornersSquare) {
+ return false;
+ }
+ break;
+ case kRect_Type:
+ if (fRect.isEmpty() || !allRadiiZero || !allRadiiSame || !allCornersSquare) {
+ return false;
+ }
+ break;
+ case kOval_Type:
+ if (fRect.isEmpty() || allRadiiZero || !allRadiiSame || allCornersSquare) {
+ return false;
+ }
+
+ for (int i = 0; i < 4; ++i) {
+ if (!SkScalarNearlyEqual(fRadii[i].fX, SkRectPriv::HalfWidth(fRect)) ||
+ !SkScalarNearlyEqual(fRadii[i].fY, SkRectPriv::HalfHeight(fRect))) {
+ return false;
+ }
+ }
+ break;
+ case kSimple_Type:
+ if (fRect.isEmpty() || allRadiiZero || !allRadiiSame || allCornersSquare) {
+ return false;
+ }
+ break;
+ case kNinePatch_Type:
+ if (fRect.isEmpty() || allRadiiZero || allRadiiSame || allCornersSquare ||
+ !patchesOfNine) {
+ return false;
+ }
+ break;
+ case kComplex_Type:
+ if (fRect.isEmpty() || allRadiiZero || allRadiiSame || allCornersSquare ||
+ patchesOfNine) {
+ return false;
+ }
+ break;
+ }
+
+ return true;
+}
+
+bool SkRRect::AreRectAndRadiiValid(const SkRect& rect, const SkVector radii[4]) {
+ if (!rect.isFinite() || !rect.isSorted()) {
+ return false;
+ }
+ for (int i = 0; i < 4; ++i) {
+ if (!are_radius_check_predicates_valid(radii[i].fX, rect.fLeft, rect.fRight) ||
+ !are_radius_check_predicates_valid(radii[i].fY, rect.fTop, rect.fBottom)) {
+ return false;
+ }
+ }
+ return true;
+}
+///////////////////////////////////////////////////////////////////////////////
+
+SkRect SkRRectPriv::InnerBounds(const SkRRect& rr) {
+ if (rr.isEmpty() || rr.isRect()) {
+ return rr.rect();
+ }
+
+ // We start with the outer bounds of the round rect and consider three subsets and take the
+ // one with maximum area. The first two are the horizontal and vertical rects inset from the
+ // corners, the third is the rect inscribed at the corner curves' maximal point. This forms
+ // the exact solution when all corners have the same radii (the radii do not have to be
+ // circular).
+ SkRect innerBounds = rr.getBounds();
+ SkVector tl = rr.radii(SkRRect::kUpperLeft_Corner);
+ SkVector tr = rr.radii(SkRRect::kUpperRight_Corner);
+ SkVector bl = rr.radii(SkRRect::kLowerLeft_Corner);
+ SkVector br = rr.radii(SkRRect::kLowerRight_Corner);
+
+ // Select maximum inset per edge, which may move an adjacent corner of the inscribed
+ // rectangle off of the rounded-rect path, but that is acceptable given that the general
+ // equation for inscribed area is non-trivial to evaluate.
+ SkScalar leftShift = std::max(tl.fX, bl.fX);
+ SkScalar topShift = std::max(tl.fY, tr.fY);
+ SkScalar rightShift = std::max(tr.fX, br.fX);
+ SkScalar bottomShift = std::max(bl.fY, br.fY);
+
+ SkScalar dw = leftShift + rightShift;
+ SkScalar dh = topShift + bottomShift;
+
+ // Area removed by shifting left/right
+ SkScalar horizArea = (innerBounds.width() - dw) * innerBounds.height();
+ // And by shifting top/bottom
+ SkScalar vertArea = (innerBounds.height() - dh) * innerBounds.width();
+ // And by shifting all edges: just considering a corner ellipse, the maximum inscribed rect has
+ // a corner at sqrt(2)/2 * (rX, rY), so scale all corner shifts by (1 - sqrt(2)/2) to get the
+ // safe shift per edge (since the shifts already are the max radius for that edge).
+ // - We actually scale by a value slightly increased to make it so that the shifted corners are
+ // safely inside the curves, otherwise numerical stability can cause it to fail contains().
+ static constexpr SkScalar kScale = (1.f - SK_ScalarRoot2Over2) + 1e-5f;
+ SkScalar innerArea = (innerBounds.width() - kScale * dw) * (innerBounds.height() - kScale * dh);
+
+ if (horizArea > vertArea && horizArea > innerArea) {
+ // Cut off corners by insetting left and right
+ innerBounds.fLeft += leftShift;
+ innerBounds.fRight -= rightShift;
+ } else if (vertArea > innerArea) {
+ // Cut off corners by insetting top and bottom
+ innerBounds.fTop += topShift;
+ innerBounds.fBottom -= bottomShift;
+ } else if (innerArea > 0.f) {
+ // Inset on all sides, scaled to touch
+ innerBounds.fLeft += kScale * leftShift;
+ innerBounds.fRight -= kScale * rightShift;
+ innerBounds.fTop += kScale * topShift;
+ innerBounds.fBottom -= kScale * bottomShift;
+ } else {
+ // Inner region would collapse to empty
+ return SkRect::MakeEmpty();
+ }
+
+ SkASSERT(innerBounds.isSorted() && !innerBounds.isEmpty());
+ return innerBounds;
+}
+
+SkRRect SkRRectPriv::ConservativeIntersect(const SkRRect& a, const SkRRect& b) {
+ // Returns the coordinate of the rect matching the corner enum.
+ auto getCorner = [](const SkRect& r, SkRRect::Corner corner) -> SkPoint {
+ switch(corner) {
+ case SkRRect::kUpperLeft_Corner: return {r.fLeft, r.fTop};
+ case SkRRect::kUpperRight_Corner: return {r.fRight, r.fTop};
+ case SkRRect::kLowerLeft_Corner: return {r.fLeft, r.fBottom};
+ case SkRRect::kLowerRight_Corner: return {r.fRight, r.fBottom};
+ default: SkUNREACHABLE;
+ }
+ };
+ // Returns true if shape A's extreme point is contained within shape B's extreme point, relative
+ // to the 'corner' location. If the two shapes' corners have the same ellipse radii, this
+ // is sufficient for A's ellipse arc to be contained by B's ellipse arc.
+ auto insideCorner = [](SkRRect::Corner corner, const SkPoint& a, const SkPoint& b) {
+ switch(corner) {
+ case SkRRect::kUpperLeft_Corner: return a.fX >= b.fX && a.fY >= b.fY;
+ case SkRRect::kUpperRight_Corner: return a.fX <= b.fX && a.fY >= b.fY;
+ case SkRRect::kLowerRight_Corner: return a.fX <= b.fX && a.fY <= b.fY;
+ case SkRRect::kLowerLeft_Corner: return a.fX >= b.fX && a.fY <= b.fY;
+ default: SkUNREACHABLE;
+ }
+ };
+
+ auto getIntersectionRadii = [&](const SkRect& r, SkRRect::Corner corner, SkVector* radii) {
+ SkPoint test = getCorner(r, corner);
+ SkPoint aCorner = getCorner(a.rect(), corner);
+ SkPoint bCorner = getCorner(b.rect(), corner);
+
+ if (test == aCorner && test == bCorner) {
+ // The round rects share a corner anchor, so pick A or B such that its X and Y radii
+ // are both larger than the other rrect's, or return false if neither A or B has the max
+ // corner radii (this is more permissive than the single corner tests below).
+ SkVector aRadii = a.radii(corner);
+ SkVector bRadii = b.radii(corner);
+ if (aRadii.fX >= bRadii.fX && aRadii.fY >= bRadii.fY) {
+ *radii = aRadii;
+ return true;
+ } else if (bRadii.fX >= aRadii.fX && bRadii.fY >= aRadii.fY) {
+ *radii = bRadii;
+ return true;
+ } else {
+ return false;
+ }
+ } else if (test == aCorner) {
+ // Test that A's ellipse is contained by B. This is a non-trivial function to evaluate
+ // so we resrict it to when the corners have the same radii. If not, we use the more
+ // conservative test that the extreme point of A's bounding box is contained in B.
+ *radii = a.radii(corner);
+ if (*radii == b.radii(corner)) {
+ return insideCorner(corner, aCorner, bCorner); // A inside B
+ } else {
+ return b.checkCornerContainment(aCorner.fX, aCorner.fY);
+ }
+ } else if (test == bCorner) {
+ // Mirror of the above
+ *radii = b.radii(corner);
+ if (*radii == a.radii(corner)) {
+ return insideCorner(corner, bCorner, aCorner); // B inside A
+ } else {
+ return a.checkCornerContainment(bCorner.fX, bCorner.fY);
+ }
+ } else {
+ // This is a corner formed by two straight edges of A and B, so confirm that it is
+ // contained in both (if not, then the intersection can't be a round rect).
+ *radii = {0.f, 0.f};
+ return a.checkCornerContainment(test.fX, test.fY) &&
+ b.checkCornerContainment(test.fX, test.fY);
+ }
+ };
+
+ // We fill in the SkRRect directly. Since the rect and radii are either 0s or determined by
+ // valid existing SkRRects, we know we are finite.
+ SkRRect intersection;
+ if (!intersection.fRect.intersect(a.rect(), b.rect())) {
+ // Definitely no intersection
+ return SkRRect::MakeEmpty();
+ }
+
+ const SkRRect::Corner corners[] = {
+ SkRRect::kUpperLeft_Corner,
+ SkRRect::kUpperRight_Corner,
+ SkRRect::kLowerRight_Corner,
+ SkRRect::kLowerLeft_Corner
+ };
+ // By definition, edges is contained in the bounds of 'a' and 'b', but now we need to consider
+ // the corners. If the bound's corner point is in both rrects, the corner radii will be 0s.
+ // If the bound's corner point matches a's edges and is inside 'b', we use a's radii.
+ // Same for b's radii. If any corner fails these conditions, we reject the intersection as an
+ // rrect. If after determining radii for all 4 corners, they would overlap, we also reject the
+ // intersection shape.
+ for (auto c : corners) {
+ if (!getIntersectionRadii(intersection.fRect, c, &intersection.fRadii[c])) {
+ return SkRRect::MakeEmpty(); // Resulting intersection is not a rrect
+ }
+ }
+
+ // Check for radius overlap along the four edges, since the earlier evaluation was only a
+ // one-sided corner check. If they aren't valid, a corner's radii doesn't fit within the rect.
+ // If the radii are scaled, the combination of radii from two adjacent corners doesn't fit.
+ // Normally for a regularly constructed SkRRect, we want this scaling, but in this case it means
+ // the intersection shape is definitively not a round rect.
+ if (!SkRRect::AreRectAndRadiiValid(intersection.fRect, intersection.fRadii) ||
+ intersection.scaleRadii()) {
+ return SkRRect::MakeEmpty();
+ }
+
+ // The intersection is an rrect of the given radii. Potentially all 4 corners could have
+ // been simplified to (0,0) radii, making the intersection a rectangle.
+ intersection.computeType();
+ return intersection;
+}
diff --git a/gfx/skia/skia/src/core/SkRRectPriv.h b/gfx/skia/skia/src/core/SkRRectPriv.h
new file mode 100644
index 0000000000..cea579c901
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRRectPriv.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRRectPriv_DEFINED
+#define SkRRectPriv_DEFINED
+
+#include "include/core/SkRRect.h"
+
+class SkRBuffer;
+class SkWBuffer;
+
+class SkRRectPriv {
+public:
+ static bool IsCircle(const SkRRect& rr) {
+ return rr.isOval() && SkScalarNearlyEqual(rr.fRadii[0].fX, rr.fRadii[0].fY);
+ }
+
+ static SkVector GetSimpleRadii(const SkRRect& rr) {
+ SkASSERT(!rr.isComplex());
+ return rr.fRadii[0];
+ }
+
+ static bool IsSimpleCircular(const SkRRect& rr) {
+ return rr.isSimple() && SkScalarNearlyEqual(rr.fRadii[0].fX, rr.fRadii[0].fY);
+ }
+
+ // Looser version of IsSimpleCircular, where the x & y values of the radii
+ // only have to be nearly equal instead of strictly equal.
+ static bool IsNearlySimpleCircular(const SkRRect& rr, SkScalar tolerance = SK_ScalarNearlyZero);
+
+ static bool EqualRadii(const SkRRect& rr) {
+ return rr.isRect() || SkRRectPriv::IsCircle(rr) || SkRRectPriv::IsSimpleCircular(rr);
+ }
+
+ static const SkVector* GetRadiiArray(const SkRRect& rr) { return rr.fRadii; }
+
+ static bool AllCornersCircular(const SkRRect& rr, SkScalar tolerance = SK_ScalarNearlyZero);
+
+ static bool ReadFromBuffer(SkRBuffer* buffer, SkRRect* rr);
+
+ static void WriteToBuffer(const SkRRect& rr, SkWBuffer* buffer);
+
+ // Test if a point is in the rrect, if it were a closed set.
+ static bool ContainsPoint(const SkRRect& rr, const SkPoint& p) {
+ return rr.getBounds().contains(p.fX, p.fY) && rr.checkCornerContainment(p.fX, p.fY);
+ }
+
+ // Compute an approximate largest inscribed bounding box of the rounded rect. For empty,
+ // rect, oval, and simple types this will be the largest inscribed rectangle. Otherwise it may
+ // not be the global maximum, but will be non-empty, touch at least one edge and be contained
+ // in the round rect.
+ static SkRect InnerBounds(const SkRRect& rr);
+
+ // Attempt to compute the intersection of two round rects. The intersection is not necessarily
+ // a round rect. This returns intersections only when the shape is representable as a new
+ // round rect (or rect). Empty is returned if 'a' and 'b' do not intersect or if the
+ // intersection is too complicated. This is conservative, it may not always detect that an
+ // intersection could be represented as a round rect. However, when it does return a round rect
+ // that intersection will be exact (i.e. it is NOT just a subset of the actual intersection).
+ static SkRRect ConservativeIntersect(const SkRRect& a, const SkRRect& b);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRSXform.cpp b/gfx/skia/skia/src/core/SkRSXform.cpp
new file mode 100644
index 0000000000..67f18345c6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRSXform.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkRSXform.h"
+
+void SkRSXform::toQuad(SkScalar width, SkScalar height, SkPoint quad[4]) const {
+#if 0
+ // This is the slow way, but it documents what we're doing
+ quad[0].set(0, 0);
+ quad[1].set(width, 0);
+ quad[2].set(width, height);
+ quad[3].set(0, height);
+ SkMatrix m;
+ m.setRSXform(*this).mapPoints(quad, quad, 4);
+#else
+ const SkScalar m00 = fSCos;
+ const SkScalar m01 = -fSSin;
+ const SkScalar m02 = fTx;
+ const SkScalar m10 = -m01;
+ const SkScalar m11 = m00;
+ const SkScalar m12 = fTy;
+
+ quad[0].set(m02, m12);
+ quad[1].set(m00 * width + m02, m10 * width + m12);
+ quad[2].set(m00 * width + m01 * height + m02, m10 * width + m11 * height + m12);
+ quad[3].set(m01 * height + m02, m11 * height + m12);
+#endif
+}
+
+void SkRSXform::toTriStrip(SkScalar width, SkScalar height, SkPoint strip[4]) const {
+ const SkScalar m00 = fSCos;
+ const SkScalar m01 = -fSSin;
+ const SkScalar m02 = fTx;
+ const SkScalar m10 = -m01;
+ const SkScalar m11 = m00;
+ const SkScalar m12 = fTy;
+
+ strip[0].set(m02, m12);
+ strip[1].set(m01 * height + m02, m11 * height + m12);
+ strip[2].set(m00 * width + m02, m10 * width + m12);
+ strip[3].set(m00 * width + m01 * height + m02, m10 * width + m11 * height + m12);
+}
diff --git a/gfx/skia/skia/src/core/SkRTree.cpp b/gfx/skia/skia/src/core/SkRTree.cpp
new file mode 100644
index 0000000000..eb996205a4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRTree.cpp
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkRTree.h"
+
+SkRTree::SkRTree() : fCount(0) {}
+
+void SkRTree::insert(const SkRect boundsArray[], int N) {
+ SkASSERT(0 == fCount);
+
+ std::vector<Branch> branches;
+ branches.reserve(N);
+
+ for (int i = 0; i < N; i++) {
+ const SkRect& bounds = boundsArray[i];
+ if (bounds.isEmpty()) {
+ continue;
+ }
+
+ Branch b;
+ b.fBounds = bounds;
+ b.fOpIndex = i;
+ branches.push_back(b);
+ }
+
+ fCount = (int)branches.size();
+ if (fCount) {
+ if (1 == fCount) {
+ fNodes.reserve(1);
+ Node* n = this->allocateNodeAtLevel(0);
+ n->fNumChildren = 1;
+ n->fChildren[0] = branches[0];
+ fRoot.fSubtree = n;
+ fRoot.fBounds = branches[0].fBounds;
+ } else {
+ fNodes.reserve(CountNodes(fCount));
+ fRoot = this->bulkLoad(&branches);
+ }
+ }
+}
+
+SkRTree::Node* SkRTree::allocateNodeAtLevel(uint16_t level) {
+ SkDEBUGCODE(Node* p = fNodes.data());
+ fNodes.push_back(Node{});
+ Node& out = fNodes.back();
+ SkASSERT(fNodes.data() == p); // If this fails, we didn't reserve() enough.
+ out.fNumChildren = 0;
+ out.fLevel = level;
+ return &out;
+}
+
+// This function parallels bulkLoad, but just counts how many nodes bulkLoad would allocate.
+int SkRTree::CountNodes(int branches) {
+ if (branches == 1) {
+ return 1;
+ }
+ int remainder = branches % kMaxChildren;
+ if (remainder > 0) {
+ if (remainder >= kMinChildren) {
+ remainder = 0;
+ } else {
+ remainder = kMinChildren - remainder;
+ }
+ }
+ int currentBranch = 0;
+ int nodes = 0;
+ while (currentBranch < branches) {
+ int incrementBy = kMaxChildren;
+ if (remainder != 0) {
+ if (remainder <= kMaxChildren - kMinChildren) {
+ incrementBy -= remainder;
+ remainder = 0;
+ } else {
+ incrementBy = kMinChildren;
+ remainder -= kMaxChildren - kMinChildren;
+ }
+ }
+ nodes++;
+ currentBranch++;
+ for (int k = 1; k < incrementBy && currentBranch < branches; ++k) {
+ currentBranch++;
+ }
+ }
+ return nodes + CountNodes(nodes);
+}
+
+SkRTree::Branch SkRTree::bulkLoad(std::vector<Branch>* branches, int level) {
+ if (branches->size() == 1) { // Only one branch. It will be the root.
+ return (*branches)[0];
+ }
+
+ // We might sort our branches here, but we expect Blink gives us a reasonable x,y order.
+ // Skipping a call to sort (in Y) here resulted in a 17% win for recording with negligible
+ // difference in playback speed.
+ int remainder = (int)branches->size() % kMaxChildren;
+ int newBranches = 0;
+
+ if (remainder > 0) {
+ // If the remainder isn't enough to fill a node, we'll add fewer nodes to other branches.
+ if (remainder >= kMinChildren) {
+ remainder = 0;
+ } else {
+ remainder = kMinChildren - remainder;
+ }
+ }
+
+ int currentBranch = 0;
+ while (currentBranch < (int)branches->size()) {
+ int incrementBy = kMaxChildren;
+ if (remainder != 0) {
+ // if need be, omit some nodes to make up for remainder
+ if (remainder <= kMaxChildren - kMinChildren) {
+ incrementBy -= remainder;
+ remainder = 0;
+ } else {
+ incrementBy = kMinChildren;
+ remainder -= kMaxChildren - kMinChildren;
+ }
+ }
+ Node* n = allocateNodeAtLevel(level);
+ n->fNumChildren = 1;
+ n->fChildren[0] = (*branches)[currentBranch];
+ Branch b;
+ b.fBounds = (*branches)[currentBranch].fBounds;
+ b.fSubtree = n;
+ ++currentBranch;
+ for (int k = 1; k < incrementBy && currentBranch < (int)branches->size(); ++k) {
+ b.fBounds.join((*branches)[currentBranch].fBounds);
+ n->fChildren[k] = (*branches)[currentBranch];
+ ++n->fNumChildren;
+ ++currentBranch;
+ }
+ (*branches)[newBranches] = b;
+ ++newBranches;
+ }
+ branches->resize(newBranches);
+ return this->bulkLoad(branches, level + 1);
+}
+
+void SkRTree::search(const SkRect& query, std::vector<int>* results) const {
+ if (fCount > 0 && SkRect::Intersects(fRoot.fBounds, query)) {
+ this->search(fRoot.fSubtree, query, results);
+ }
+}
+
+void SkRTree::search(Node* node, const SkRect& query, std::vector<int>* results) const {
+ for (int i = 0; i < node->fNumChildren; ++i) {
+ if (SkRect::Intersects(node->fChildren[i].fBounds, query)) {
+ if (0 == node->fLevel) {
+ results->push_back(node->fChildren[i].fOpIndex);
+ } else {
+ this->search(node->fChildren[i].fSubtree, query, results);
+ }
+ }
+ }
+}
+
+size_t SkRTree::bytesUsed() const {
+ size_t byteCount = sizeof(SkRTree);
+
+ byteCount += fNodes.capacity() * sizeof(Node);
+
+ return byteCount;
+}
diff --git a/gfx/skia/skia/src/core/SkRTree.h b/gfx/skia/skia/src/core/SkRTree.h
new file mode 100644
index 0000000000..03d55ebcd0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRTree.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRTree_DEFINED
+#define SkRTree_DEFINED
+
+#include "include/core/SkBBHFactory.h"
+#include "include/core/SkRect.h"
+
+/**
+ * An R-Tree implementation. In short, it is a balanced n-ary tree containing a hierarchy of
+ * bounding rectangles.
+ *
+ * It only supports bulk-loading, i.e. creation from a batch of bounding rectangles.
+ * This performs a bottom-up bulk load using the STR (sort-tile-recursive) algorithm.
+ *
+ * TODO: Experiment with other bulk-load algorithms (in particular the Hilbert pack variant,
+ * which groups rects by position on the Hilbert curve, is probably worth a look). There also
+ * exist top-down bulk load variants (VAMSplit, TopDownGreedy, etc).
+ *
+ * For more details see:
+ *
+ * Beckmann, N.; Kriegel, H. P.; Schneider, R.; Seeger, B. (1990). "The R*-tree:
+ * an efficient and robust access method for points and rectangles"
+ */
+class SkRTree : public SkBBoxHierarchy {
+public:
+ SkRTree();
+
+ void insert(const SkRect[], int N) override;
+ void search(const SkRect& query, std::vector<int>* results) const override;
+ size_t bytesUsed() const override;
+
+ // Methods and constants below here are only public for tests.
+
+ // Return the depth of the tree structure.
+ int getDepth() const { return fCount ? fRoot.fSubtree->fLevel + 1 : 0; }
+ // Insertion count (not overall node count, which may be greater).
+ int getCount() const { return fCount; }
+
+ // These values were empirically determined to produce reasonable performance in most cases.
+ static const int kMinChildren = 6,
+ kMaxChildren = 11;
+
+private:
+ struct Node;
+
+ struct Branch {
+ union {
+ Node* fSubtree;
+ int fOpIndex;
+ };
+ SkRect fBounds;
+ };
+
+ struct Node {
+ uint16_t fNumChildren;
+ uint16_t fLevel;
+ Branch fChildren[kMaxChildren];
+ };
+
+ void search(Node* root, const SkRect& query, std::vector<int>* results) const;
+
+ // Consumes the input array.
+ Branch bulkLoad(std::vector<Branch>* branches, int level = 0);
+
+ // How many times will bulkLoad() call allocateNodeAtLevel()?
+ static int CountNodes(int branches);
+
+ Node* allocateNodeAtLevel(uint16_t level);
+
+ // This is the count of data elements (rather than total nodes in the tree)
+ int fCount;
+ Branch fRoot;
+ std::vector<Node> fNodes;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRasterClip.cpp b/gfx/skia/skia/src/core/SkRasterClip.cpp
new file mode 100644
index 0000000000..524a47fdc8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRasterClip.cpp
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPath.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkRegionPriv.h"
+
+SkRasterClip::SkRasterClip(const SkRasterClip& that)
+ : fIsBW(that.fIsBW)
+ , fIsEmpty(that.fIsEmpty)
+ , fIsRect(that.fIsRect)
+ , fShader(that.fShader)
+{
+ AUTO_RASTERCLIP_VALIDATE(that);
+
+ if (fIsBW) {
+ fBW = that.fBW;
+ } else {
+ fAA = that.fAA;
+ }
+
+ SkDEBUGCODE(this->validate();)
+}
+
+SkRasterClip& SkRasterClip::operator=(const SkRasterClip& that) {
+ AUTO_RASTERCLIP_VALIDATE(that);
+
+ fIsBW = that.fIsBW;
+ if (fIsBW) {
+ fBW = that.fBW;
+ } else {
+ fAA = that.fAA;
+ }
+
+ fIsEmpty = that.isEmpty();
+ fIsRect = that.isRect();
+ fShader = that.fShader;
+ SkDEBUGCODE(this->validate();)
+ return *this;
+}
+
+SkRasterClip::SkRasterClip(const SkRegion& rgn) : fBW(rgn) {
+ fIsBW = true;
+ fIsEmpty = this->computeIsEmpty(); // bounds might be empty, so compute
+ fIsRect = !fIsEmpty;
+ SkDEBUGCODE(this->validate();)
+}
+
+SkRasterClip::SkRasterClip(const SkIRect& bounds) : fBW(bounds) {
+ fIsBW = true;
+ fIsEmpty = this->computeIsEmpty(); // bounds might be empty, so compute
+ fIsRect = !fIsEmpty;
+ SkDEBUGCODE(this->validate();)
+}
+
+SkRasterClip::SkRasterClip() {
+ fIsBW = true;
+ fIsEmpty = true;
+ fIsRect = false;
+ SkDEBUGCODE(this->validate();)
+}
+
+SkRasterClip::SkRasterClip(const SkPath& path, const SkIRect& bounds, bool doAA) {
+ if (doAA) {
+ fIsBW = false;
+ fAA.setPath(path, bounds, true);
+ } else {
+ fIsBW = true;
+ fBW.setPath(path, SkRegion(bounds));
+ }
+ fIsEmpty = this->computeIsEmpty(); // bounds might be empty, so compute
+ fIsRect = this->computeIsRect();
+ SkDEBUGCODE(this->validate();)
+}
+
+SkRasterClip::~SkRasterClip() {
+ SkDEBUGCODE(this->validate();)
+}
+
+bool SkRasterClip::setEmpty() {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ fIsBW = true;
+ fBW.setEmpty();
+ fAA.setEmpty();
+ fIsEmpty = true;
+ fIsRect = false;
+ return false;
+}
+
+bool SkRasterClip::setRect(const SkIRect& rect) {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ fIsBW = true;
+ fAA.setEmpty();
+ fIsRect = fBW.setRect(rect);
+ fIsEmpty = !fIsRect;
+ return fIsRect;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+
+bool SkRasterClip::op(const SkIRect& rect, SkClipOp op) {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ if (fIsBW) {
+ fBW.op(rect, (SkRegion::Op) op);
+ } else {
+ fAA.op(rect, op);
+ }
+ return this->updateCacheAndReturnNonEmpty();
+}
+
+bool SkRasterClip::op(const SkRegion& rgn, SkClipOp op) {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ if (fIsBW) {
+ (void)fBW.op(rgn, (SkRegion::Op) op);
+ } else {
+ SkAAClip tmp;
+ tmp.setRegion(rgn);
+ (void)fAA.op(tmp, op);
+ }
+ return this->updateCacheAndReturnNonEmpty();
+}
+
+/**
+ * Our antialiasing currently has a granularity of 1/4 of a pixel along each
+ * axis. Thus we can treat an axis coordinate as an integer if it differs
+ * from its nearest int by < half of that value (1/8 in this case).
+ */
+static bool nearly_integral(SkScalar x) {
+ static const SkScalar domain = SK_Scalar1 / 4;
+ static const SkScalar halfDomain = domain / 2;
+
+ x += halfDomain;
+ return x - SkScalarFloorToScalar(x) < domain;
+}
+
+bool SkRasterClip::op(const SkRect& localRect, const SkMatrix& matrix, SkClipOp op, bool doAA) {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ const bool isScaleTrans = matrix.isScaleTranslate();
+ if (!isScaleTrans) {
+ return this->op(SkPath::Rect(localRect), matrix, op, doAA);
+ }
+
+ SkRect devRect = matrix.mapRect(localRect);
+ if (fIsBW && doAA) {
+ // check that the rect really needs aa, or is it close enought to
+ // integer boundaries that we can just treat it as a BW rect?
+ if (nearly_integral(devRect.fLeft) && nearly_integral(devRect.fTop) &&
+ nearly_integral(devRect.fRight) && nearly_integral(devRect.fBottom)) {
+ doAA = false;
+ }
+ }
+
+ if (fIsBW && !doAA) {
+ (void)fBW.op(devRect.round(), (SkRegion::Op) op);
+ } else {
+ if (fIsBW) {
+ this->convertToAA();
+ }
+ (void)fAA.op(devRect, op, doAA);
+ }
+ return this->updateCacheAndReturnNonEmpty();
+}
+
+bool SkRasterClip::op(const SkRRect& rrect, const SkMatrix& matrix, SkClipOp op, bool doAA) {
+ return this->op(SkPath::RRect(rrect), matrix, op, doAA);
+}
+
+bool SkRasterClip::op(const SkPath& path, const SkMatrix& matrix, SkClipOp op, bool doAA) {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ SkPath devPath;
+ path.transform(matrix, &devPath);
+
+ // Since op is either intersect or difference, the clip is always shrinking; that means we can
+ // always use our current bounds as the limiting factor for region/aaclip operations.
+ if (this->isRect() && op == SkClipOp::kIntersect) {
+ // However, in the relatively common case of intersecting a new path with a rectangular
+ // clip, it's faster to convert the path into a region/aa-mask in place than evaluate the
+ // actual intersection. See skbug.com/12398
+ if (doAA && fIsBW) {
+ this->convertToAA();
+ }
+ if (fIsBW) {
+ fBW.setPath(devPath, SkRegion(this->getBounds()));
+ } else {
+ fAA.setPath(devPath, this->getBounds(), doAA);
+ }
+ return this->updateCacheAndReturnNonEmpty();
+ } else {
+ return this->op(SkRasterClip(devPath, this->getBounds(), doAA), op);
+ }
+}
+
+bool SkRasterClip::op(sk_sp<SkShader> sh) {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ if (!fShader) {
+ fShader = sh;
+ } else {
+ fShader = SkShaders::Blend(SkBlendMode::kSrcIn, sh, fShader);
+ }
+ return !this->isEmpty();
+}
+
+bool SkRasterClip::op(const SkRasterClip& clip, SkClipOp op) {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+ clip.validate();
+
+ if (this->isBW() && clip.isBW()) {
+ (void)fBW.op(clip.fBW, (SkRegion::Op) op);
+ } else {
+ SkAAClip tmp;
+ const SkAAClip* other;
+
+ if (this->isBW()) {
+ this->convertToAA();
+ }
+ if (clip.isBW()) {
+ tmp.setRegion(clip.bwRgn());
+ other = &tmp;
+ } else {
+ other = &clip.aaRgn();
+ }
+ (void)fAA.op(*other, op);
+ }
+ return this->updateCacheAndReturnNonEmpty();
+}
+
+void SkRasterClip::translate(int dx, int dy, SkRasterClip* dst) const {
+ if (nullptr == dst) {
+ return;
+ }
+
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ if (this->isEmpty()) {
+ dst->setEmpty();
+ return;
+ }
+ if (0 == (dx | dy)) {
+ *dst = *this;
+ return;
+ }
+
+ dst->fIsBW = fIsBW;
+ if (fIsBW) {
+ fBW.translate(dx, dy, &dst->fBW);
+ dst->fAA.setEmpty();
+ } else {
+ fAA.translate(dx, dy, &dst->fAA);
+ dst->fBW.setEmpty();
+ }
+ dst->updateCacheAndReturnNonEmpty();
+}
+
+void SkRasterClip::convertToAA() {
+ AUTO_RASTERCLIP_VALIDATE(*this);
+
+ SkASSERT(fIsBW);
+ fAA.setRegion(fBW);
+ fIsBW = false;
+
+ // since we are being explicitly asked to convert-to-aa, we pass false so we don't "optimize"
+ // ourselves back to BW.
+ (void)this->updateCacheAndReturnNonEmpty(false);
+}
+
+#ifdef SK_DEBUG
+void SkRasterClip::validate() const {
+ // can't ever assert that fBW is empty, since we may have called forceGetBW
+ if (fIsBW) {
+ SkASSERT(fAA.isEmpty());
+ }
+
+ SkRegionPriv::Validate(fBW);
+ fAA.validate();
+
+ SkASSERT(this->computeIsEmpty() == fIsEmpty);
+ SkASSERT(this->computeIsRect() == fIsRect);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkAAClipBlitterWrapper::SkAAClipBlitterWrapper() {
+ SkDEBUGCODE(fClipRgn = nullptr;)
+ SkDEBUGCODE(fBlitter = nullptr;)
+}
+
+SkAAClipBlitterWrapper::SkAAClipBlitterWrapper(const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ this->init(clip, blitter);
+}
+
+SkAAClipBlitterWrapper::SkAAClipBlitterWrapper(const SkAAClip* aaclip,
+ SkBlitter* blitter) {
+ SkASSERT(blitter);
+ SkASSERT(aaclip);
+ fBWRgn.setRect(aaclip->getBounds());
+ fAABlitter.init(blitter, aaclip);
+ // now our return values
+ fClipRgn = &fBWRgn;
+ fBlitter = &fAABlitter;
+}
+
+void SkAAClipBlitterWrapper::init(const SkRasterClip& clip, SkBlitter* blitter) {
+ SkASSERT(blitter);
+ if (clip.isBW()) {
+ fClipRgn = &clip.bwRgn();
+ fBlitter = blitter;
+ } else {
+ const SkAAClip& aaclip = clip.aaRgn();
+ fBWRgn.setRect(aaclip.getBounds());
+ fAABlitter.init(blitter, &aaclip);
+ // now our return values
+ fClipRgn = &fBWRgn;
+ fBlitter = &fAABlitter;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkRasterClip.h b/gfx/skia/skia/src/core/SkRasterClip.h
new file mode 100644
index 0000000000..b8362c38eb
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRasterClip.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRasterClip_DEFINED
+#define SkRasterClip_DEFINED
+
+#include "include/core/SkClipOp.h"
+#include "include/core/SkRegion.h"
+#include "include/core/SkShader.h"
+#include "include/private/base/SkMacros.h"
+#include "src/core/SkAAClip.h"
+
+class SkRRect;
+
+/**
+ * Wraps a SkRegion and SkAAClip, so we have a single object that can represent either our
+ * BW or antialiased clips.
+ */
+class SkRasterClip {
+public:
+ SkRasterClip();
+ explicit SkRasterClip(const SkIRect&);
+ explicit SkRasterClip(const SkRegion&);
+ explicit SkRasterClip(const SkRasterClip&);
+ SkRasterClip(const SkPath& path, const SkIRect& bounds, bool doAA);
+
+ ~SkRasterClip();
+
+ SkRasterClip& operator=(const SkRasterClip&);
+
+ bool isBW() const { return fIsBW; }
+ bool isAA() const { return !fIsBW; }
+ const SkRegion& bwRgn() const { SkASSERT(fIsBW); return fBW; }
+ const SkAAClip& aaRgn() const { SkASSERT(!fIsBW); return fAA; }
+
+ bool isEmpty() const {
+ SkASSERT(this->computeIsEmpty() == fIsEmpty);
+ return fIsEmpty;
+ }
+
+ bool isRect() const {
+ SkASSERT(this->computeIsRect() == fIsRect);
+ return fIsRect;
+ }
+
+ bool isComplex() const {
+ return fIsBW ? fBW.isComplex() : !fAA.isEmpty();
+ }
+ const SkIRect& getBounds() const {
+ return fIsBW ? fBW.getBounds() : fAA.getBounds();
+ }
+
+ bool setEmpty();
+ bool setRect(const SkIRect&);
+
+ bool op(const SkIRect&, SkClipOp);
+ bool op(const SkRegion&, SkClipOp);
+ bool op(const SkRect&, const SkMatrix& matrix, SkClipOp, bool doAA);
+ bool op(const SkRRect&, const SkMatrix& matrix, SkClipOp, bool doAA);
+ bool op(const SkPath&, const SkMatrix& matrix, SkClipOp, bool doAA);
+ bool op(sk_sp<SkShader>);
+
+ void translate(int dx, int dy, SkRasterClip* dst) const;
+
+ bool quickContains(const SkIRect& rect) const {
+ return fIsBW ? fBW.quickContains(rect) : fAA.quickContains(rect);
+ }
+
+ /**
+ * Return true if this region is empty, or if the specified rectangle does
+ * not intersect the region. Returning false is not a guarantee that they
+ * intersect, but returning true is a guarantee that they do not.
+ */
+ bool quickReject(const SkIRect& rect) const {
+ return !SkIRect::Intersects(this->getBounds(), rect);
+ }
+
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+ sk_sp<SkShader> clipShader() const { return fShader; }
+
+private:
+ SkRegion fBW;
+ SkAAClip fAA;
+ bool fIsBW;
+ // these 2 are caches based on querying the right obj based on fIsBW
+ bool fIsEmpty;
+ bool fIsRect;
+ // if present, this augments the clip, not replaces it
+ sk_sp<SkShader> fShader;
+
+ bool computeIsEmpty() const {
+ return fIsBW ? fBW.isEmpty() : fAA.isEmpty();
+ }
+
+ bool computeIsRect() const {
+ return fIsBW ? fBW.isRect() : fAA.isRect();
+ }
+
+ bool updateCacheAndReturnNonEmpty(bool detectAARect = true) {
+ fIsEmpty = this->computeIsEmpty();
+
+ // detect that our computed AA is really just a (hard-edged) rect
+ if (detectAARect && !fIsEmpty && !fIsBW && fAA.isRect()) {
+ fBW.setRect(fAA.getBounds());
+ fAA.setEmpty(); // don't need this anymore
+ fIsBW = true;
+ }
+
+ fIsRect = this->computeIsRect();
+ return !fIsEmpty;
+ }
+
+ void convertToAA();
+
+ bool op(const SkRasterClip&, SkClipOp);
+};
+
+class SkAutoRasterClipValidate : SkNoncopyable {
+public:
+ SkAutoRasterClipValidate(const SkRasterClip& rc) : fRC(rc) {
+ fRC.validate();
+ }
+ ~SkAutoRasterClipValidate() {
+ fRC.validate();
+ }
+private:
+ const SkRasterClip& fRC;
+};
+
+#ifdef SK_DEBUG
+ #define AUTO_RASTERCLIP_VALIDATE(rc) SkAutoRasterClipValidate arcv(rc)
+#else
+ #define AUTO_RASTERCLIP_VALIDATE(rc)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Encapsulates the logic of deciding if we need to change/wrap the blitter
+ * for aaclipping. If so, getRgn and getBlitter return modified values. If
+ * not, they return the raw blitter and (bw) clip region.
+ *
+ * We need to keep the constructor/destructor cost as small as possible, so we
+ * can freely put this on the stack, and not pay too much for the case when
+ * we're really BW anyways.
+ */
+class SkAAClipBlitterWrapper {
+public:
+ SkAAClipBlitterWrapper();
+ SkAAClipBlitterWrapper(const SkRasterClip&, SkBlitter*);
+ SkAAClipBlitterWrapper(const SkAAClip*, SkBlitter*);
+
+ void init(const SkRasterClip&, SkBlitter*);
+
+ const SkIRect& getBounds() const {
+ SkASSERT(fClipRgn);
+ return fClipRgn->getBounds();
+ }
+ const SkRegion& getRgn() const {
+ SkASSERT(fClipRgn);
+ return *fClipRgn;
+ }
+ SkBlitter* getBlitter() {
+ SkASSERT(fBlitter);
+ return fBlitter;
+ }
+
+private:
+ SkRegion fBWRgn;
+ SkAAClipBlitter fAABlitter;
+ // what we return
+ const SkRegion* fClipRgn;
+ SkBlitter* fBlitter;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRasterClipStack.h b/gfx/skia/skia/src/core/SkRasterClipStack.h
new file mode 100644
index 0000000000..0d73122d4d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRasterClipStack.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRasterClipStack_DEFINED
+#define SkRasterClipStack_DEFINED
+
+#include "include/core/SkClipOp.h"
+#include "src/base/SkTBlockList.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkScan.h"
+
+class SkRasterClipStack : SkNoncopyable {
+public:
+ SkRasterClipStack(int width, int height)
+ : fRootBounds(SkIRect::MakeWH(width, height))
+ , fDisableAA(SkScan::PathRequiresTiling(fRootBounds)) {
+ fStack.emplace_back(SkRasterClip(fRootBounds));
+ SkASSERT(fStack.count() == 1);
+ }
+
+ void setNewSize(int w, int h) {
+ fRootBounds.setXYWH(0, 0, w, h);
+
+ SkASSERT(fStack.count() == 1);
+ Rec& rec = fStack.back();
+ SkASSERT(rec.fDeferredCount == 0);
+ rec.fRC.setRect(fRootBounds);
+ }
+
+ const SkRasterClip& rc() const { return fStack.back().fRC; }
+
+ void save() {
+ SkDEBUGCODE(fCounter += 1);
+ SkASSERT(fStack.back().fDeferredCount >= 0);
+ fStack.back().fDeferredCount += 1;
+ }
+
+ void restore() {
+ SkDEBUGCODE(fCounter -= 1);
+ SkASSERT(fCounter >= 0);
+
+ if (--fStack.back().fDeferredCount < 0) {
+ SkASSERT(fStack.back().fDeferredCount == -1);
+ SkASSERT(fStack.count() > 1);
+ fStack.pop_back();
+ }
+ }
+
+ void clipRect(const SkMatrix& ctm, const SkRect& rect, SkClipOp op, bool aa) {
+ this->writable_rc().op(rect, ctm, op, this->finalAA(aa));
+ this->validate();
+ }
+
+ void clipRRect(const SkMatrix& ctm, const SkRRect& rrect, SkClipOp op, bool aa) {
+ this->writable_rc().op(rrect, ctm, op, this->finalAA(aa));
+ this->validate();
+ }
+
+ void clipPath(const SkMatrix& ctm, const SkPath& path, SkClipOp op, bool aa) {
+ this->writable_rc().op(path, ctm, op, this->finalAA(aa));
+ this->validate();
+ }
+
+ void clipShader(sk_sp<SkShader> sh) {
+ this->writable_rc().op(std::move(sh));
+ this->validate();
+ }
+
+ void clipRegion(const SkRegion& rgn, SkClipOp op) {
+ this->writable_rc().op(rgn, op);
+ this->validate();
+ }
+
+ void replaceClip(const SkIRect& rect) {
+ SkIRect devRect = rect;
+ if (!devRect.intersect(fRootBounds)) {
+ this->writable_rc().setEmpty();
+ } else {
+ this->writable_rc().setRect(devRect);
+ }
+ }
+
+ void validate() const {
+#ifdef SK_DEBUG
+ const SkRasterClip& clip = this->rc();
+ if (fRootBounds.isEmpty()) {
+ SkASSERT(clip.isEmpty());
+ } else if (!clip.isEmpty()) {
+ SkASSERT(fRootBounds.contains(clip.getBounds()));
+ }
+#endif
+ }
+
+private:
+ struct Rec {
+ SkRasterClip fRC;
+ int fDeferredCount; // 0 for a "normal" entry
+
+ Rec(const SkRasterClip& rc) : fRC(rc), fDeferredCount(0) {}
+ };
+
+ SkTBlockList<Rec, 16> fStack;
+ SkIRect fRootBounds;
+ bool fDisableAA;
+ SkDEBUGCODE(int fCounter = 0);
+
+ SkRasterClip& writable_rc() {
+ SkASSERT(fStack.back().fDeferredCount >= 0);
+ if (fStack.back().fDeferredCount > 0) {
+ fStack.back().fDeferredCount -= 1;
+ fStack.emplace_back(fStack.back().fRC);
+ }
+ return fStack.back().fRC;
+ }
+
+ bool finalAA(bool aa) const { return aa && !fDisableAA; }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRasterPipeline.cpp b/gfx/skia/skia/src/core/SkRasterPipeline.cpp
new file mode 100644
index 0000000000..df6ffdb064
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRasterPipeline.cpp
@@ -0,0 +1,499 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkRasterPipeline.h"
+
+#include "include/core/SkColorType.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkMatrix.h"
+#include "include/private/base/SkTemplates.h"
+#include "modules/skcms/skcms.h"
+#include "src/base/SkVx.h"
+#include "src/core/SkImageInfoPriv.h"
+#include "src/core/SkOpts.h"
+
+#include <algorithm>
+#include <cstring>
+#include <vector>
+
+using namespace skia_private;
+using Op = SkRasterPipelineOp;
+
+bool gForceHighPrecisionRasterPipeline;
+
+SkRasterPipeline::SkRasterPipeline(SkArenaAlloc* alloc) : fAlloc(alloc) {
+ this->reset();
+}
+void SkRasterPipeline::reset() {
+ fRewindCtx = nullptr;
+ fStages = nullptr;
+ fNumStages = 0;
+}
+
+void SkRasterPipeline::append(SkRasterPipelineOp op, void* ctx) {
+ SkASSERT(op != Op::uniform_color); // Please use append_constant_color().
+ SkASSERT(op != Op::unbounded_uniform_color); // Please use append_constant_color().
+ SkASSERT(op != Op::set_rgb); // Please use append_set_rgb().
+ SkASSERT(op != Op::unbounded_set_rgb); // Please use append_set_rgb().
+ SkASSERT(op != Op::parametric); // Please use append_transfer_function().
+ SkASSERT(op != Op::gamma_); // Please use append_transfer_function().
+ SkASSERT(op != Op::PQish); // Please use append_transfer_function().
+ SkASSERT(op != Op::HLGish); // Please use append_transfer_function().
+ SkASSERT(op != Op::HLGinvish); // Please use append_transfer_function().
+ SkASSERT(op != Op::stack_checkpoint); // Please use append_stack_rewind().
+ SkASSERT(op != Op::stack_rewind); // Please use append_stack_rewind().
+ this->unchecked_append(op, ctx);
+}
+void SkRasterPipeline::unchecked_append(SkRasterPipelineOp op, void* ctx) {
+ fStages = fAlloc->make<StageList>(StageList{fStages, op, ctx});
+ fNumStages += 1;
+}
+void SkRasterPipeline::append(SkRasterPipelineOp op, uintptr_t ctx) {
+ void* ptrCtx;
+ memcpy(&ptrCtx, &ctx, sizeof(ctx));
+ this->append(op, ptrCtx);
+}
+
+void SkRasterPipeline::extend(const SkRasterPipeline& src) {
+ if (src.empty()) {
+ return;
+ }
+ // Create a rewind context if `src` has one already, but we don't. If we _do_ already have one,
+ // we need to keep it, since we already have rewind ops that reference it. Either way, we need
+ // to rewrite all the rewind ops to point to _our_ rewind context; we only get that checkpoint.
+ if (src.fRewindCtx && !fRewindCtx) {
+ fRewindCtx = fAlloc->make<SkRasterPipeline_RewindCtx>();
+ }
+ auto stages = fAlloc->makeArrayDefault<StageList>(src.fNumStages);
+
+ int n = src.fNumStages;
+ const StageList* st = src.fStages;
+ while (n --> 1) {
+ stages[n] = *st;
+ stages[n].prev = &stages[n-1];
+
+ if (stages[n].stage == Op::stack_rewind) {
+ // We make sure that all stack rewinds use _our_ stack context.
+ stages[n].ctx = fRewindCtx;
+ }
+
+ st = st->prev;
+ }
+ stages[0] = *st;
+ stages[0].prev = fStages;
+
+ fStages = &stages[src.fNumStages - 1];
+ fNumStages += src.fNumStages;
+}
+
+const char* SkRasterPipeline::GetOpName(SkRasterPipelineOp op) {
+ const char* name = "";
+ switch (op) {
+ #define M(x) case Op::x: name = #x; break;
+ SK_RASTER_PIPELINE_OPS_ALL(M)
+ #undef M
+ }
+ return name;
+}
+
+void SkRasterPipeline::dump() const {
+ SkDebugf("SkRasterPipeline, %d stages\n", fNumStages);
+ std::vector<const char*> stages;
+ for (auto st = fStages; st; st = st->prev) {
+ stages.push_back(GetOpName(st->stage));
+ }
+ std::reverse(stages.begin(), stages.end());
+ for (const char* name : stages) {
+ SkDebugf("\t%s\n", name);
+ }
+ SkDebugf("\n");
+}
+
+void SkRasterPipeline::append_set_rgb(SkArenaAlloc* alloc, const float rgb[3]) {
+ auto arg = alloc->makeArrayDefault<float>(3);
+ arg[0] = rgb[0];
+ arg[1] = rgb[1];
+ arg[2] = rgb[2];
+
+ auto op = Op::unbounded_set_rgb;
+ if (0 <= rgb[0] && rgb[0] <= 1 &&
+ 0 <= rgb[1] && rgb[1] <= 1 &&
+ 0 <= rgb[2] && rgb[2] <= 1)
+ {
+ op = Op::set_rgb;
+ }
+
+ this->unchecked_append(op, arg);
+}
+
+void SkRasterPipeline::append_constant_color(SkArenaAlloc* alloc, const float rgba[4]) {
+ // r,g,b might be outside [0,1], but alpha should probably always be in [0,1].
+ SkASSERT(0 <= rgba[3] && rgba[3] <= 1);
+
+ if (rgba[0] == 0 && rgba[1] == 0 && rgba[2] == 0 && rgba[3] == 1) {
+ this->append(Op::black_color);
+ } else if (rgba[0] == 1 && rgba[1] == 1 && rgba[2] == 1 && rgba[3] == 1) {
+ this->append(Op::white_color);
+ } else {
+ auto ctx = alloc->make<SkRasterPipeline_UniformColorCtx>();
+ skvx::float4 color = skvx::float4::Load(rgba);
+ color.store(&ctx->r);
+
+ // uniform_color requires colors in range and can go lowp,
+ // while unbounded_uniform_color supports out-of-range colors too but not lowp.
+ if (0 <= rgba[0] && rgba[0] <= rgba[3] &&
+ 0 <= rgba[1] && rgba[1] <= rgba[3] &&
+ 0 <= rgba[2] && rgba[2] <= rgba[3]) {
+ // To make loads more direct, we store 8-bit values in 16-bit slots.
+ color = color * 255.0f + 0.5f;
+ ctx->rgba[0] = (uint16_t)color[0];
+ ctx->rgba[1] = (uint16_t)color[1];
+ ctx->rgba[2] = (uint16_t)color[2];
+ ctx->rgba[3] = (uint16_t)color[3];
+ this->unchecked_append(Op::uniform_color, ctx);
+ } else {
+ this->unchecked_append(Op::unbounded_uniform_color, ctx);
+ }
+ }
+}
+
+void SkRasterPipeline::append_matrix(SkArenaAlloc* alloc, const SkMatrix& matrix) {
+ SkMatrix::TypeMask mt = matrix.getType();
+
+ if (mt == SkMatrix::kIdentity_Mask) {
+ return;
+ }
+ if (mt == SkMatrix::kTranslate_Mask) {
+ float* trans = alloc->makeArrayDefault<float>(2);
+ trans[0] = matrix.getTranslateX();
+ trans[1] = matrix.getTranslateY();
+ this->append(Op::matrix_translate, trans);
+ } else if ((mt | (SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask)) ==
+ (SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask)) {
+ float* scaleTrans = alloc->makeArrayDefault<float>(4);
+ scaleTrans[0] = matrix.getScaleX();
+ scaleTrans[1] = matrix.getScaleY();
+ scaleTrans[2] = matrix.getTranslateX();
+ scaleTrans[3] = matrix.getTranslateY();
+ this->append(Op::matrix_scale_translate, scaleTrans);
+ } else {
+ float* storage = alloc->makeArrayDefault<float>(9);
+ matrix.get9(storage);
+ if (!matrix.hasPerspective()) {
+ // note: asAffine and the 2x3 stage really only need 6 entries
+ this->append(Op::matrix_2x3, storage);
+ } else {
+ this->append(Op::matrix_perspective, storage);
+ }
+ }
+}
+
+void SkRasterPipeline::append_load(SkColorType ct, const SkRasterPipeline_MemoryCtx* ctx) {
+ switch (ct) {
+ case kUnknown_SkColorType: SkASSERT(false); break;
+
+ case kAlpha_8_SkColorType: this->append(Op::load_a8, ctx); break;
+ case kA16_unorm_SkColorType: this->append(Op::load_a16, ctx); break;
+ case kA16_float_SkColorType: this->append(Op::load_af16, ctx); break;
+ case kRGB_565_SkColorType: this->append(Op::load_565, ctx); break;
+ case kARGB_4444_SkColorType: this->append(Op::load_4444, ctx); break;
+ case kR8G8_unorm_SkColorType: this->append(Op::load_rg88, ctx); break;
+ case kR16G16_unorm_SkColorType: this->append(Op::load_rg1616, ctx); break;
+ case kR16G16_float_SkColorType: this->append(Op::load_rgf16, ctx); break;
+ case kRGBA_8888_SkColorType: this->append(Op::load_8888, ctx); break;
+ case kRGBA_1010102_SkColorType: this->append(Op::load_1010102, ctx); break;
+ case kR16G16B16A16_unorm_SkColorType:this->append(Op::load_16161616,ctx); break;
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType: this->append(Op::load_f16, ctx); break;
+ case kRGBA_F32_SkColorType: this->append(Op::load_f32, ctx); break;
+
+ case kGray_8_SkColorType: this->append(Op::load_a8, ctx);
+ this->append(Op::alpha_to_gray);
+ break;
+
+ case kR8_unorm_SkColorType: this->append(Op::load_a8, ctx);
+ this->append(Op::alpha_to_red);
+ break;
+
+ case kRGB_888x_SkColorType: this->append(Op::load_8888, ctx);
+ this->append(Op::force_opaque);
+ break;
+
+ case kBGRA_1010102_SkColorType: this->append(Op::load_1010102, ctx);
+ this->append(Op::swap_rb);
+ break;
+
+ case kRGB_101010x_SkColorType: this->append(Op::load_1010102, ctx);
+ this->append(Op::force_opaque);
+ break;
+
+ case kBGR_101010x_SkColorType: this->append(Op::load_1010102, ctx);
+ this->append(Op::force_opaque);
+ this->append(Op::swap_rb);
+ break;
+
+ case kBGR_101010x_XR_SkColorType: this->append(Op::load_1010102_xr, ctx);
+ this->append(Op::force_opaque);
+ this->append(Op::swap_rb);
+ break;
+
+ case kBGRA_8888_SkColorType: this->append(Op::load_8888, ctx);
+ this->append(Op::swap_rb);
+ break;
+
+ case kSRGBA_8888_SkColorType:
+ this->append(Op::load_8888, ctx);
+ this->append_transfer_function(*skcms_sRGB_TransferFunction());
+ break;
+ }
+}
+
+void SkRasterPipeline::append_load_dst(SkColorType ct, const SkRasterPipeline_MemoryCtx* ctx) {
+ switch (ct) {
+ case kUnknown_SkColorType: SkASSERT(false); break;
+
+ case kAlpha_8_SkColorType: this->append(Op::load_a8_dst, ctx); break;
+ case kA16_unorm_SkColorType: this->append(Op::load_a16_dst, ctx); break;
+ case kA16_float_SkColorType: this->append(Op::load_af16_dst, ctx); break;
+ case kRGB_565_SkColorType: this->append(Op::load_565_dst, ctx); break;
+ case kARGB_4444_SkColorType: this->append(Op::load_4444_dst, ctx); break;
+ case kR8G8_unorm_SkColorType: this->append(Op::load_rg88_dst, ctx); break;
+ case kR16G16_unorm_SkColorType: this->append(Op::load_rg1616_dst, ctx); break;
+ case kR16G16_float_SkColorType: this->append(Op::load_rgf16_dst, ctx); break;
+ case kRGBA_8888_SkColorType: this->append(Op::load_8888_dst, ctx); break;
+ case kRGBA_1010102_SkColorType: this->append(Op::load_1010102_dst, ctx); break;
+ case kR16G16B16A16_unorm_SkColorType: this->append(Op::load_16161616_dst,ctx); break;
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType: this->append(Op::load_f16_dst, ctx); break;
+ case kRGBA_F32_SkColorType: this->append(Op::load_f32_dst, ctx); break;
+
+ case kGray_8_SkColorType: this->append(Op::load_a8_dst, ctx);
+ this->append(Op::alpha_to_gray_dst);
+ break;
+
+ case kR8_unorm_SkColorType: this->append(Op::load_a8_dst, ctx);
+ this->append(Op::alpha_to_red_dst);
+ break;
+
+ case kRGB_888x_SkColorType: this->append(Op::load_8888_dst, ctx);
+ this->append(Op::force_opaque_dst);
+ break;
+
+ case kBGRA_1010102_SkColorType: this->append(Op::load_1010102_dst, ctx);
+ this->append(Op::swap_rb_dst);
+ break;
+
+ case kRGB_101010x_SkColorType: this->append(Op::load_1010102_dst, ctx);
+ this->append(Op::force_opaque_dst);
+ break;
+
+ case kBGR_101010x_SkColorType: this->append(Op::load_1010102_dst, ctx);
+ this->append(Op::force_opaque_dst);
+ this->append(Op::swap_rb_dst);
+ break;
+
+ case kBGR_101010x_XR_SkColorType: this->append(Op::load_1010102_xr_dst, ctx);
+ this->append(Op::force_opaque_dst);
+ this->append(Op::swap_rb_dst);
+ break;
+
+ case kBGRA_8888_SkColorType: this->append(Op::load_8888_dst, ctx);
+ this->append(Op::swap_rb_dst);
+ break;
+
+ case kSRGBA_8888_SkColorType:
+ // TODO: We could remove the double-swap if we had _dst versions of all the TF stages
+ this->append(Op::load_8888_dst, ctx);
+ this->append(Op::swap_src_dst);
+ this->append_transfer_function(*skcms_sRGB_TransferFunction());
+ this->append(Op::swap_src_dst);
+ break;
+ }
+}
+
+void SkRasterPipeline::append_store(SkColorType ct, const SkRasterPipeline_MemoryCtx* ctx) {
+ switch (ct) {
+ case kUnknown_SkColorType: SkASSERT(false); break;
+
+ case kAlpha_8_SkColorType: this->append(Op::store_a8, ctx); break;
+ case kR8_unorm_SkColorType: this->append(Op::store_r8, ctx); break;
+ case kA16_unorm_SkColorType: this->append(Op::store_a16, ctx); break;
+ case kA16_float_SkColorType: this->append(Op::store_af16, ctx); break;
+ case kRGB_565_SkColorType: this->append(Op::store_565, ctx); break;
+ case kARGB_4444_SkColorType: this->append(Op::store_4444, ctx); break;
+ case kR8G8_unorm_SkColorType: this->append(Op::store_rg88, ctx); break;
+ case kR16G16_unorm_SkColorType: this->append(Op::store_rg1616, ctx); break;
+ case kR16G16_float_SkColorType: this->append(Op::store_rgf16, ctx); break;
+ case kRGBA_8888_SkColorType: this->append(Op::store_8888, ctx); break;
+ case kRGBA_1010102_SkColorType: this->append(Op::store_1010102, ctx); break;
+ case kR16G16B16A16_unorm_SkColorType: this->append(Op::store_16161616,ctx); break;
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType: this->append(Op::store_f16, ctx); break;
+ case kRGBA_F32_SkColorType: this->append(Op::store_f32, ctx); break;
+
+ case kRGB_888x_SkColorType: this->append(Op::force_opaque);
+ this->append(Op::store_8888, ctx);
+ break;
+
+ case kBGRA_1010102_SkColorType: this->append(Op::swap_rb);
+ this->append(Op::store_1010102, ctx);
+ break;
+
+ case kRGB_101010x_SkColorType: this->append(Op::force_opaque);
+ this->append(Op::store_1010102, ctx);
+ break;
+
+ case kBGR_101010x_SkColorType: this->append(Op::force_opaque);
+ this->append(Op::swap_rb);
+ this->append(Op::store_1010102, ctx);
+ break;
+
+ case kBGR_101010x_XR_SkColorType: this->append(Op::force_opaque);
+ this->append(Op::swap_rb);
+ this->append(Op::store_1010102_xr, ctx);
+ break;
+
+ case kGray_8_SkColorType: this->append(Op::bt709_luminance_or_luma_to_alpha);
+ this->append(Op::store_a8, ctx);
+ break;
+
+ case kBGRA_8888_SkColorType: this->append(Op::swap_rb);
+ this->append(Op::store_8888, ctx);
+ break;
+
+ case kSRGBA_8888_SkColorType:
+ this->append_transfer_function(*skcms_sRGB_Inverse_TransferFunction());
+ this->append(Op::store_8888, ctx);
+ break;
+ }
+}
+
+void SkRasterPipeline::append_transfer_function(const skcms_TransferFunction& tf) {
+ void* ctx = const_cast<void*>(static_cast<const void*>(&tf));
+ switch (skcms_TransferFunction_getType(&tf)) {
+ case skcms_TFType_Invalid: SkASSERT(false); break;
+
+ case skcms_TFType_sRGBish:
+ if (tf.a == 1 && tf.b == 0 && tf.c == 0 && tf.d == 0 && tf.e == 0 && tf.f == 0) {
+ this->unchecked_append(Op::gamma_, ctx);
+ } else {
+ this->unchecked_append(Op::parametric, ctx);
+ }
+ break;
+ case skcms_TFType_PQish: this->unchecked_append(Op::PQish, ctx); break;
+ case skcms_TFType_HLGish: this->unchecked_append(Op::HLGish, ctx); break;
+ case skcms_TFType_HLGinvish: this->unchecked_append(Op::HLGinvish, ctx); break;
+ }
+}
+
+// GPUs clamp all color channels to the limits of the format just before the blend step. To match
+// that auto-clamp, the RP blitter uses this helper immediately before appending blending stages.
+void SkRasterPipeline::append_clamp_if_normalized(const SkImageInfo& info) {
+ if (SkColorTypeIsNormalized(info.colorType())) {
+ this->unchecked_append(Op::clamp_01, nullptr);
+ }
+}
+
+void SkRasterPipeline::append_stack_rewind() {
+ if (!fRewindCtx) {
+ fRewindCtx = fAlloc->make<SkRasterPipeline_RewindCtx>();
+ }
+ this->unchecked_append(Op::stack_rewind, fRewindCtx);
+}
+
+static void prepend_to_pipeline(SkRasterPipelineStage*& ip, SkOpts::StageFn stageFn, void* ctx) {
+ --ip;
+ ip->fn = stageFn;
+ ip->ctx = ctx;
+}
+
+bool SkRasterPipeline::build_lowp_pipeline(SkRasterPipelineStage* ip) const {
+ if (gForceHighPrecisionRasterPipeline || fRewindCtx) {
+ return false;
+ }
+ // Stages are stored backwards in fStages; to compensate, we assemble the pipeline in reverse
+ // here, back to front.
+ prepend_to_pipeline(ip, SkOpts::just_return_lowp, /*ctx=*/nullptr);
+ for (const StageList* st = fStages; st; st = st->prev) {
+ int opIndex = (int)st->stage;
+ if (opIndex >= kNumRasterPipelineLowpOps || !SkOpts::ops_lowp[opIndex]) {
+ // This program contains a stage that doesn't exist in lowp.
+ return false;
+ }
+ prepend_to_pipeline(ip, SkOpts::ops_lowp[opIndex], st->ctx);
+ }
+ return true;
+}
+
+void SkRasterPipeline::build_highp_pipeline(SkRasterPipelineStage* ip) const {
+ // We assemble the pipeline in reverse, since the stage list is stored backwards.
+ prepend_to_pipeline(ip, SkOpts::just_return_highp, /*ctx=*/nullptr);
+ for (const StageList* st = fStages; st; st = st->prev) {
+ int opIndex = (int)st->stage;
+ prepend_to_pipeline(ip, SkOpts::ops_highp[opIndex], st->ctx);
+ }
+
+ // stack_checkpoint and stack_rewind are only implemented in highp. We only need these stages
+ // when generating long (or looping) pipelines from SkSL. The other stages used by the SkSL
+ // Raster Pipeline generator will only have highp implementations, because we can't execute SkSL
+ // code without floating point.
+ if (fRewindCtx) {
+ const int rewindIndex = (int)Op::stack_checkpoint;
+ prepend_to_pipeline(ip, SkOpts::ops_highp[rewindIndex], fRewindCtx);
+ }
+}
+
+SkRasterPipeline::StartPipelineFn SkRasterPipeline::build_pipeline(
+ SkRasterPipelineStage* ip) const {
+ // We try to build a lowp pipeline first; if that fails, we fall back to a highp float pipeline.
+ if (this->build_lowp_pipeline(ip)) {
+ return SkOpts::start_pipeline_lowp;
+ }
+
+ this->build_highp_pipeline(ip);
+ return SkOpts::start_pipeline_highp;
+}
+
+int SkRasterPipeline::stages_needed() const {
+ // Add 1 to budget for a `just_return` stage at the end.
+ int stages = fNumStages + 1;
+
+ // If we have any stack_rewind stages, we will need to inject a stack_checkpoint stage.
+ if (fRewindCtx) {
+ stages += 1;
+ }
+ return stages;
+}
+
+void SkRasterPipeline::run(size_t x, size_t y, size_t w, size_t h) const {
+ if (this->empty()) {
+ return;
+ }
+
+ int stagesNeeded = this->stages_needed();
+
+ // Best to not use fAlloc here... we can't bound how often run() will be called.
+ AutoSTMalloc<32, SkRasterPipelineStage> program(stagesNeeded);
+
+ auto start_pipeline = this->build_pipeline(program.get() + stagesNeeded);
+ start_pipeline(x,y,x+w,y+h, program.get());
+}
+
+std::function<void(size_t, size_t, size_t, size_t)> SkRasterPipeline::compile() const {
+ if (this->empty()) {
+ return [](size_t, size_t, size_t, size_t) {};
+ }
+
+ int stagesNeeded = this->stages_needed();
+
+ SkRasterPipelineStage* program = fAlloc->makeArray<SkRasterPipelineStage>(stagesNeeded);
+
+ auto start_pipeline = this->build_pipeline(program + stagesNeeded);
+ return [=](size_t x, size_t y, size_t w, size_t h) {
+ start_pipeline(x,y,x+w,y+h, program);
+ };
+}
diff --git a/gfx/skia/skia/src/core/SkRasterPipeline.h b/gfx/skia/skia/src/core/SkRasterPipeline.h
new file mode 100644
index 0000000000..766bb0c11d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRasterPipeline.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRasterPipeline_DEFINED
+#define SkRasterPipeline_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkMacros.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkRasterPipelineOpContexts.h"
+#include "src/core/SkRasterPipelineOpList.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <functional>
+
+class SkMatrix;
+enum SkColorType : int;
+struct SkImageInfo;
+struct skcms_TransferFunction;
+
+#if __has_cpp_attribute(clang::musttail) && !defined(__EMSCRIPTEN__) && !defined(SK_CPU_ARM32)
+ #define SK_HAS_MUSTTAIL 1
+#else
+ #define SK_HAS_MUSTTAIL 0
+#endif
+
+/**
+ * SkRasterPipeline provides a cheap way to chain together a pixel processing pipeline.
+ *
+ * It's particularly designed for situations where the potential pipeline is extremely
+ * combinatoric: {N dst formats} x {M source formats} x {K mask formats} x {C transfer modes} ...
+ * No one wants to write specialized routines for all those combinations, and if we did, we'd
+ * end up bloating our code size dramatically. SkRasterPipeline stages can be chained together
+ * at runtime, so we can scale this problem linearly rather than combinatorically.
+ *
+ * Each stage is represented by a function conforming to a common interface and by an
+ * arbitrary context pointer. The stage function arguments and calling convention are
+ * designed to maximize the amount of data we can pass along the pipeline cheaply, and
+ * vary depending on CPU feature detection.
+ */
+
+// Raster pipeline programs are stored as a contiguous array of SkRasterPipelineStages.
+SK_BEGIN_REQUIRE_DENSE
+struct SkRasterPipelineStage {
+ // A function pointer from `stages_lowp` or `stages_highp`. The exact function pointer type
+ // varies depending on architecture (specifically, see `Stage` in SkRasterPipeline_opts.h).
+ void (*fn)();
+
+ // Data used by the stage function. Most context structures are declared at the top of
+ // SkRasterPipeline.h, and have names ending in Ctx (e.g. "SkRasterPipeline_SamplerCtx").
+ void* ctx;
+};
+SK_END_REQUIRE_DENSE
+
+class SkRasterPipeline {
+public:
+ explicit SkRasterPipeline(SkArenaAlloc*);
+
+ SkRasterPipeline(const SkRasterPipeline&) = delete;
+ SkRasterPipeline(SkRasterPipeline&&) = default;
+
+ SkRasterPipeline& operator=(const SkRasterPipeline&) = delete;
+ SkRasterPipeline& operator=(SkRasterPipeline&&) = default;
+
+ void reset();
+
+ void append(SkRasterPipelineOp, void* = nullptr);
+ void append(SkRasterPipelineOp op, const void* ctx) { this->append(op,const_cast<void*>(ctx)); }
+ void append(SkRasterPipelineOp, uintptr_t ctx);
+
+ // Append all stages to this pipeline.
+ void extend(const SkRasterPipeline&);
+
+ // Runs the pipeline in 2d from (x,y) inclusive to (x+w,y+h) exclusive.
+ void run(size_t x, size_t y, size_t w, size_t h) const;
+
+ // Allocates a thunk which amortizes run() setup cost in alloc.
+ std::function<void(size_t, size_t, size_t, size_t)> compile() const;
+
+ // Callers can inspect the stage list for debugging purposes.
+ struct StageList {
+ StageList* prev;
+ SkRasterPipelineOp stage;
+ void* ctx;
+ };
+
+ static const char* GetOpName(SkRasterPipelineOp op);
+ const StageList* getStageList() const { return fStages; }
+ int getNumStages() const { return fNumStages; }
+
+ // Prints the entire StageList using SkDebugf.
+ void dump() const;
+
+ // Appends a stage for the specified matrix.
+ // Tries to optimize the stage by analyzing the type of matrix.
+ void append_matrix(SkArenaAlloc*, const SkMatrix&);
+
+ // Appends a stage for a constant uniform color.
+ // Tries to optimize the stage based on the color.
+ void append_constant_color(SkArenaAlloc*, const float rgba[4]);
+
+ void append_constant_color(SkArenaAlloc* alloc, const SkColor4f& color) {
+ this->append_constant_color(alloc, color.vec());
+ }
+
+ // Like append_constant_color() but only affecting r,g,b, ignoring the alpha channel.
+ void append_set_rgb(SkArenaAlloc*, const float rgb[3]);
+
+ void append_set_rgb(SkArenaAlloc* alloc, const SkColor4f& color) {
+ this->append_set_rgb(alloc, color.vec());
+ }
+
+ void append_load (SkColorType, const SkRasterPipeline_MemoryCtx*);
+ void append_load_dst(SkColorType, const SkRasterPipeline_MemoryCtx*);
+ void append_store (SkColorType, const SkRasterPipeline_MemoryCtx*);
+
+ void append_clamp_if_normalized(const SkImageInfo&);
+
+ void append_transfer_function(const skcms_TransferFunction&);
+
+ void append_stack_rewind();
+
+ bool empty() const { return fStages == nullptr; }
+
+private:
+ bool build_lowp_pipeline(SkRasterPipelineStage* ip) const;
+ void build_highp_pipeline(SkRasterPipelineStage* ip) const;
+
+ using StartPipelineFn = void(*)(size_t,size_t,size_t,size_t, SkRasterPipelineStage* program);
+ StartPipelineFn build_pipeline(SkRasterPipelineStage*) const;
+
+ void unchecked_append(SkRasterPipelineOp, void*);
+ int stages_needed() const;
+
+ SkArenaAlloc* fAlloc;
+ SkRasterPipeline_RewindCtx* fRewindCtx;
+ StageList* fStages;
+ int fNumStages;
+};
+
+template <size_t bytes>
+class SkRasterPipeline_ : public SkRasterPipeline {
+public:
+ SkRasterPipeline_()
+ : SkRasterPipeline(&fBuiltinAlloc) {}
+
+private:
+ SkSTArenaAlloc<bytes> fBuiltinAlloc;
+};
+
+
+#endif//SkRasterPipeline_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRasterPipelineBlitter.cpp b/gfx/skia/skia/src/core/SkRasterPipelineBlitter.cpp
new file mode 100644
index 0000000000..178f6d9dcd
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRasterPipelineBlitter.cpp
@@ -0,0 +1,607 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColor.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkShader.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/base/SkUtils.h"
+#include "src/core/SkBlendModePriv.h"
+#include "src/core/SkBlenderBase.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkColorFilterBase.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkMatrixProvider.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/shaders/SkShaderBase.h"
+
+#define SK_BLITTER_TRACE_IS_RASTER_PIPELINE
+#include "src/utils/SkBlitterTrace.h"
+
+class SkRasterPipelineBlitter final : public SkBlitter {
+public:
+ // This is our common entrypoint for creating the blitter once we've sorted out shaders.
+ static SkBlitter* Create(const SkPixmap& dst,
+ const SkPaint& paint,
+ const SkColor4f& dstPaintColor,
+ SkArenaAlloc* alloc,
+ const SkRasterPipeline& shaderPipeline,
+ bool is_opaque,
+ bool is_constant,
+ sk_sp<SkShader> clipShader);
+
+ SkRasterPipelineBlitter(SkPixmap dst,
+ SkArenaAlloc* alloc)
+ : fDst(dst)
+ , fAlloc(alloc)
+ , fColorPipeline(alloc)
+ , fBlendPipeline(alloc)
+ {}
+
+ void blitH (int x, int y, int w) override;
+ void blitAntiH (int x, int y, const SkAlpha[], const int16_t[]) override;
+ void blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) override;
+ void blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) override;
+ void blitMask (const SkMask&, const SkIRect& clip) override;
+ void blitRect (int x, int y, int width, int height) override;
+ void blitV (int x, int y, int height, SkAlpha alpha) override;
+
+private:
+ void blitRectWithTrace(int x, int y, int w, int h, bool trace);
+ void append_load_dst (SkRasterPipeline*) const;
+ void append_store (SkRasterPipeline*) const;
+
+ // these check internally, and only append if there was a native clipShader
+ void append_clip_scale (SkRasterPipeline*) const;
+ void append_clip_lerp (SkRasterPipeline*) const;
+
+ SkPixmap fDst;
+ SkArenaAlloc* fAlloc;
+ SkRasterPipeline fColorPipeline;
+ SkRasterPipeline fBlendPipeline;
+ // If the blender is a blend-mode, we retain that information for late-stage optimizations
+ std::optional<SkBlendMode> fBlendMode;
+ // set to pipeline storage (for alpha) if we have a clipShader
+ void* fClipShaderBuffer = nullptr; // "native" : float or U16
+
+ SkRasterPipeline_MemoryCtx
+ fDstPtr = {nullptr,0}, // Always points to the top-left of fDst.
+ fMaskPtr = {nullptr,0}; // Updated each call to blitMask().
+ SkRasterPipeline_EmbossCtx fEmbossCtx; // Used only for k3D_Format masks.
+
+ // We may be able to specialize blitH() or blitRect() into a memset.
+ void (*fMemset2D)(SkPixmap*, int x,int y, int w,int h, uint64_t color) = nullptr;
+ uint64_t fMemsetColor = 0; // Big enough for largest memsettable dst format, F16.
+
+ // Built lazily on first use.
+ std::function<void(size_t, size_t, size_t, size_t)> fBlitRect,
+ fBlitAntiH,
+ fBlitMaskA8,
+ fBlitMaskLCD16,
+ fBlitMask3D;
+
+ // These values are pointed to by the blit pipelines above,
+ // which allows us to adjust them from call to call.
+ float fCurrentCoverage = 0.0f;
+ float fDitherRate = 0.0f;
+
+ using INHERITED = SkBlitter;
+};
+
+static SkColor4f paint_color_to_dst(const SkPaint& paint, const SkPixmap& dst) {
+ SkColor4f paintColor = paint.getColor4f();
+ SkColorSpaceXformSteps(sk_srgb_singleton(), kUnpremul_SkAlphaType,
+ dst.colorSpace(), kUnpremul_SkAlphaType).apply(paintColor.vec());
+ return paintColor;
+}
+
+SkBlitter* SkCreateRasterPipelineBlitter(const SkPixmap& dst,
+ const SkPaint& paint,
+ const SkMatrix& ctm,
+ SkArenaAlloc* alloc,
+ sk_sp<SkShader> clipShader,
+ const SkSurfaceProps& props) {
+ SkColorSpace* dstCS = dst.colorSpace();
+ SkColorType dstCT = dst.colorType();
+ SkColor4f dstPaintColor = paint_color_to_dst(paint, dst);
+
+ auto shader = as_SB(paint.getShader());
+
+ SkRasterPipeline_<256> shaderPipeline;
+ if (!shader) {
+ // Having no shader makes things nice and easy... just use the paint color
+ shaderPipeline.append_constant_color(alloc, dstPaintColor.premul().vec());
+ bool is_opaque = dstPaintColor.fA == 1.0f,
+ is_constant = true;
+ return SkRasterPipelineBlitter::Create(dst, paint, dstPaintColor, alloc, shaderPipeline,
+ is_opaque, is_constant, std::move(clipShader));
+ }
+
+ bool is_opaque = shader->isOpaque() && dstPaintColor.fA == 1.0f;
+ bool is_constant = shader->isConstant();
+
+ if (shader->appendRootStages({&shaderPipeline, alloc, dstCT, dstCS, dstPaintColor, props},
+ ctm)) {
+ if (dstPaintColor.fA != 1.0f) {
+ shaderPipeline.append(SkRasterPipelineOp::scale_1_float,
+ alloc->make<float>(dstPaintColor.fA));
+ }
+ return SkRasterPipelineBlitter::Create(dst, paint, dstPaintColor, alloc, shaderPipeline,
+ is_opaque, is_constant, std::move(clipShader));
+ }
+
+ // The shader can't draw with SkRasterPipeline.
+ return nullptr;
+}
+
+SkBlitter* SkCreateRasterPipelineBlitter(const SkPixmap& dst,
+ const SkPaint& paint,
+ const SkRasterPipeline& shaderPipeline,
+ bool is_opaque,
+ SkArenaAlloc* alloc,
+ sk_sp<SkShader> clipShader) {
+ bool is_constant = false; // If this were the case, it'd be better to just set a paint color.
+ return SkRasterPipelineBlitter::Create(dst, paint, paint_color_to_dst(paint, dst), alloc,
+ shaderPipeline, is_opaque, is_constant,
+ std::move(clipShader));
+}
+
+SkBlitter* SkRasterPipelineBlitter::Create(const SkPixmap& dst,
+ const SkPaint& paint,
+ const SkColor4f& dstPaintColor,
+ SkArenaAlloc* alloc,
+ const SkRasterPipeline& shaderPipeline,
+ bool is_opaque,
+ bool is_constant,
+ sk_sp<SkShader> clipShader) {
+ auto blitter = alloc->make<SkRasterPipelineBlitter>(dst, alloc);
+
+ // Our job in this factory is to fill out the blitter's color and blend pipelines.
+ // The color pipeline is the common front of the full blit pipeline. The blend pipeline is just
+ // the portion that does the actual blending math (and assumes that src and dst are already
+ // loaded).
+ //
+ // The full blit pipelines are each constructed lazily on first use, and include the color
+ // pipeline, reading the dst, the blend pipeline, coverage, dithering, and writing the dst.
+
+ // Start with the color pipeline
+ auto colorPipeline = &blitter->fColorPipeline;
+
+ if (clipShader) {
+ auto clipP = colorPipeline;
+ SkColorType clipCT = kRGBA_8888_SkColorType;
+ SkColorSpace* clipCS = nullptr;
+ SkSurfaceProps props{}; // default OK; clipShader doesn't render text
+ SkStageRec rec = {clipP, alloc, clipCT, clipCS, SkColors::kBlack, props};
+ if (as_SB(clipShader)->appendRootStages(rec, SkMatrix::I())) {
+ struct Storage {
+ // large enough for highp (float) or lowp(U16)
+ float fA[SkRasterPipeline_kMaxStride];
+ };
+ auto storage = alloc->make<Storage>();
+ clipP->append(SkRasterPipelineOp::store_src_a, storage->fA);
+ blitter->fClipShaderBuffer = storage->fA;
+ is_constant = false;
+ } else {
+ return nullptr;
+ }
+ }
+
+ // Let's get the shader in first.
+ colorPipeline->extend(shaderPipeline);
+
+ // If there's a color filter it comes next.
+ if (auto colorFilter = paint.getColorFilter()) {
+ SkSurfaceProps props{}; // default OK; colorFilter doesn't render text
+ SkStageRec rec = {
+ colorPipeline, alloc, dst.colorType(), dst.colorSpace(), dstPaintColor, props};
+ if (!as_CFB(colorFilter)->appendStages(rec, is_opaque)) {
+ return nullptr;
+ }
+ is_opaque = is_opaque && as_CFB(colorFilter)->isAlphaUnchanged();
+ }
+
+ // Not all formats make sense to dither (think, F16). We set their dither rate
+ // to zero. We only dither non-constant shaders, so is_constant won't change here.
+ if (paint.isDither() && !is_constant) {
+ switch (dst.info().colorType()) {
+ case kARGB_4444_SkColorType:
+ blitter->fDitherRate = 1 / 15.0f;
+ break;
+ case kRGB_565_SkColorType:
+ blitter->fDitherRate = 1 / 63.0f;
+ break;
+ case kGray_8_SkColorType:
+ case kRGB_888x_SkColorType:
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ case kSRGBA_8888_SkColorType:
+ case kR8_unorm_SkColorType:
+ blitter->fDitherRate = 1 / 255.0f;
+ break;
+ case kRGB_101010x_SkColorType:
+ case kRGBA_1010102_SkColorType:
+ case kBGR_101010x_SkColorType:
+ case kBGRA_1010102_SkColorType:
+ blitter->fDitherRate = 1 / 1023.0f;
+ break;
+
+ case kUnknown_SkColorType:
+ case kAlpha_8_SkColorType:
+ case kBGR_101010x_XR_SkColorType:
+ case kRGBA_F16_SkColorType:
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F32_SkColorType:
+ case kR8G8_unorm_SkColorType:
+ case kA16_float_SkColorType:
+ case kA16_unorm_SkColorType:
+ case kR16G16_float_SkColorType:
+ case kR16G16_unorm_SkColorType:
+ case kR16G16B16A16_unorm_SkColorType:
+ blitter->fDitherRate = 0.0f;
+ break;
+ }
+ if (blitter->fDitherRate > 0.0f) {
+ colorPipeline->append(SkRasterPipelineOp::dither, &blitter->fDitherRate);
+ }
+ }
+
+ // Optimization: A pipeline that's still constant here can collapse back into a constant color.
+ if (is_constant) {
+ SkColor4f constantColor;
+ SkRasterPipeline_MemoryCtx constantColorPtr = { &constantColor, 0 };
+ // We could remove this clamp entirely, but if the destination is 8888, doing the clamp
+ // here allows the color pipeline to still run in lowp (we'll use uniform_color, rather than
+ // unbounded_uniform_color).
+ colorPipeline->append_clamp_if_normalized(dst.info());
+ colorPipeline->append(SkRasterPipelineOp::store_f32, &constantColorPtr);
+ colorPipeline->run(0,0,1,1);
+ colorPipeline->reset();
+ colorPipeline->append_constant_color(alloc, constantColor);
+
+ is_opaque = constantColor.fA == 1.0f;
+ }
+
+ // Now we'll build the blend pipeline
+ auto blendPipeline = &blitter->fBlendPipeline;
+
+ sk_sp<SkBlender> blender = paint.refBlender();
+ if (!blender) {
+ blender = SkBlender::Mode(SkBlendMode::kSrcOver);
+ }
+
+ // We can strength-reduce SrcOver into Src when opaque.
+ if (is_opaque && as_BB(blender)->asBlendMode() == SkBlendMode::kSrcOver) {
+ blender = SkBlender::Mode(SkBlendMode::kSrc);
+ }
+
+ // When we're drawing a constant color in Src mode, we can sometimes just memset.
+ // (The previous two optimizations help find more opportunities for this one.)
+ if (is_constant && as_BB(blender)->asBlendMode() == SkBlendMode::kSrc &&
+ dst.info().bytesPerPixel() <= static_cast<int>(sizeof(blitter->fMemsetColor))) {
+ // Run our color pipeline all the way through to produce what we'd memset when we can.
+ // Not all blits can memset, so we need to keep colorPipeline too.
+ SkRasterPipeline_<256> p;
+ p.extend(*colorPipeline);
+ blitter->fDstPtr = SkRasterPipeline_MemoryCtx{&blitter->fMemsetColor, 0};
+ blitter->append_store(&p);
+ p.run(0,0,1,1);
+
+ switch (blitter->fDst.shiftPerPixel()) {
+ case 0: blitter->fMemset2D = [](SkPixmap* dst, int x,int y, int w,int h, uint64_t c) {
+ void* p = dst->writable_addr(x,y);
+ while (h --> 0) {
+ memset(p, c, w);
+ p = SkTAddOffset<void>(p, dst->rowBytes());
+ }
+ }; break;
+
+ case 1: blitter->fMemset2D = [](SkPixmap* dst, int x,int y, int w,int h, uint64_t c) {
+ SkOpts::rect_memset16(dst->writable_addr16(x,y), c, w, dst->rowBytes(), h);
+ }; break;
+
+ case 2: blitter->fMemset2D = [](SkPixmap* dst, int x,int y, int w,int h, uint64_t c) {
+ SkOpts::rect_memset32(dst->writable_addr32(x,y), c, w, dst->rowBytes(), h);
+ }; break;
+
+ case 3: blitter->fMemset2D = [](SkPixmap* dst, int x,int y, int w,int h, uint64_t c) {
+ SkOpts::rect_memset64(dst->writable_addr64(x,y), c, w, dst->rowBytes(), h);
+ }; break;
+
+ // TODO(F32)?
+ }
+ }
+
+ {
+ SkSurfaceProps props{}; // default OK; blender doesn't render text
+ SkStageRec rec = {
+ blendPipeline, alloc, dst.colorType(), dst.colorSpace(), dstPaintColor, props};
+ if (!as_BB(blender)->appendStages(rec)) {
+ return nullptr;
+ }
+ blitter->fBlendMode = as_BB(blender)->asBlendMode();
+ }
+
+ blitter->fDstPtr = SkRasterPipeline_MemoryCtx{
+ blitter->fDst.writable_addr(),
+ blitter->fDst.rowBytesAsPixels(),
+ };
+
+ return blitter;
+}
+
+void SkRasterPipelineBlitter::append_load_dst(SkRasterPipeline* p) const {
+ p->append_load_dst(fDst.info().colorType(), &fDstPtr);
+ if (fDst.info().alphaType() == kUnpremul_SkAlphaType) {
+ p->append(SkRasterPipelineOp::premul_dst);
+ }
+}
+
+void SkRasterPipelineBlitter::append_store(SkRasterPipeline* p) const {
+ if (fDst.info().alphaType() == kUnpremul_SkAlphaType) {
+ p->append(SkRasterPipelineOp::unpremul);
+ }
+ p->append_store(fDst.info().colorType(), &fDstPtr);
+}
+
+void SkRasterPipelineBlitter::append_clip_scale(SkRasterPipeline* p) const {
+ if (fClipShaderBuffer) {
+ p->append(SkRasterPipelineOp::scale_native, fClipShaderBuffer);
+ }
+}
+
+void SkRasterPipelineBlitter::append_clip_lerp(SkRasterPipeline* p) const {
+ if (fClipShaderBuffer) {
+ p->append(SkRasterPipelineOp::lerp_native, fClipShaderBuffer);
+ }
+}
+
+void SkRasterPipelineBlitter::blitH(int x, int y, int w) {
+ this->blitRect(x,y,w,1);
+}
+
+void SkRasterPipelineBlitter::blitRect(int x, int y, int w, int h) {
+ this->blitRectWithTrace(x, y, w, h, true);
+}
+
+void SkRasterPipelineBlitter::blitRectWithTrace(int x, int y, int w, int h, bool trace) {
+ if (fMemset2D) {
+ SK_BLITTER_TRACE_STEP(blitRectByMemset,
+ trace,
+ /*scanlines=*/h,
+ /*pixels=*/w * h);
+ fMemset2D(&fDst, x,y, w,h, fMemsetColor);
+ return;
+ }
+
+ if (!fBlitRect) {
+ SkRasterPipeline p(fAlloc);
+ p.extend(fColorPipeline);
+ p.append_clamp_if_normalized(fDst.info());
+ if (fBlendMode == SkBlendMode::kSrcOver
+ && (fDst.info().colorType() == kRGBA_8888_SkColorType ||
+ fDst.info().colorType() == kBGRA_8888_SkColorType)
+ && !fDst.colorSpace()
+ && fDst.info().alphaType() != kUnpremul_SkAlphaType
+ && fDitherRate == 0.0f) {
+ if (fDst.info().colorType() == kBGRA_8888_SkColorType) {
+ p.append(SkRasterPipelineOp::swap_rb);
+ }
+ this->append_clip_scale(&p);
+ p.append(SkRasterPipelineOp::srcover_rgba_8888, &fDstPtr);
+ } else {
+ if (fBlendMode != SkBlendMode::kSrc) {
+ this->append_load_dst(&p);
+ p.extend(fBlendPipeline);
+ this->append_clip_lerp(&p);
+ } else if (fClipShaderBuffer) {
+ this->append_load_dst(&p);
+ this->append_clip_lerp(&p);
+ }
+ this->append_store(&p);
+ }
+ fBlitRect = p.compile();
+ }
+
+ SK_BLITTER_TRACE_STEP(blitRect, trace, /*scanlines=*/h, /*pixels=*/w * h);
+ fBlitRect(x,y,w,h);
+}
+
+void SkRasterPipelineBlitter::blitAntiH(int x, int y, const SkAlpha aa[], const int16_t runs[]) {
+ if (!fBlitAntiH) {
+ SkRasterPipeline p(fAlloc);
+ p.extend(fColorPipeline);
+ p.append_clamp_if_normalized(fDst.info());
+ if (fBlendMode.has_value() &&
+ SkBlendMode_ShouldPreScaleCoverage(*fBlendMode, /*rgb_coverage=*/false)) {
+ p.append(SkRasterPipelineOp::scale_1_float, &fCurrentCoverage);
+ this->append_clip_scale(&p);
+ this->append_load_dst(&p);
+ p.extend(fBlendPipeline);
+ } else {
+ this->append_load_dst(&p);
+ p.extend(fBlendPipeline);
+ p.append(SkRasterPipelineOp::lerp_1_float, &fCurrentCoverage);
+ this->append_clip_lerp(&p);
+ }
+
+ this->append_store(&p);
+ fBlitAntiH = p.compile();
+ }
+
+ SK_BLITTER_TRACE_STEP(blitAntiH, true, /*scanlines=*/1ul, /*pixels=*/0ul);
+ for (int16_t run = *runs; run > 0; run = *runs) {
+ SK_BLITTER_TRACE_STEP_ACCUMULATE(blitAntiH, /*pixels=*/run);
+ switch (*aa) {
+ case 0x00: break;
+ case 0xff:this->blitRectWithTrace(x,y,run, 1, false); break;
+ default:
+ fCurrentCoverage = *aa * (1/255.0f);
+ fBlitAntiH(x,y,run,1);
+ }
+ x += run;
+ runs += run;
+ aa += run;
+ }
+}
+
+void SkRasterPipelineBlitter::blitAntiH2(int x, int y, U8CPU a0, U8CPU a1) {
+ SkIRect clip = {x,y, x+2,y+1};
+ uint8_t coverage[] = { (uint8_t)a0, (uint8_t)a1 };
+
+ SkMask mask;
+ mask.fImage = coverage;
+ mask.fBounds = clip;
+ mask.fRowBytes = 2;
+ mask.fFormat = SkMask::kA8_Format;
+
+ this->blitMask(mask, clip);
+}
+
+void SkRasterPipelineBlitter::blitAntiV2(int x, int y, U8CPU a0, U8CPU a1) {
+ SkIRect clip = {x,y, x+1,y+2};
+ uint8_t coverage[] = { (uint8_t)a0, (uint8_t)a1 };
+
+ SkMask mask;
+ mask.fImage = coverage;
+ mask.fBounds = clip;
+ mask.fRowBytes = 1;
+ mask.fFormat = SkMask::kA8_Format;
+
+ this->blitMask(mask, clip);
+}
+
+void SkRasterPipelineBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ SkIRect clip = {x,y, x+1,y+height};
+
+ SkMask mask;
+ mask.fImage = &alpha;
+ mask.fBounds = clip;
+ mask.fRowBytes = 0; // so we reuse the 1 "row" for all of height
+ mask.fFormat = SkMask::kA8_Format;
+
+ this->blitMask(mask, clip);
+}
+
+void SkRasterPipelineBlitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ if (mask.fFormat == SkMask::kBW_Format) {
+ // TODO: native BW masks?
+ return INHERITED::blitMask(mask, clip);
+ }
+
+ // ARGB and SDF masks shouldn't make it here.
+ SkASSERT(mask.fFormat == SkMask::kA8_Format
+ || mask.fFormat == SkMask::kLCD16_Format
+ || mask.fFormat == SkMask::k3D_Format);
+
+ auto extract_mask_plane = [&mask](int plane, SkRasterPipeline_MemoryCtx* ctx) {
+ // LCD is 16-bit per pixel; A8 and 3D are 8-bit per pixel.
+ size_t bpp = mask.fFormat == SkMask::kLCD16_Format ? 2 : 1;
+
+ // Select the right mask plane. Usually plane == 0 and this is just mask.fImage.
+ auto ptr = (uintptr_t)mask.fImage
+ + plane * mask.computeImageSize();
+
+ // Update ctx to point "into" this current mask, but lined up with fDstPtr at (0,0).
+ // This sort of trickery upsets UBSAN (pointer-overflow) so our ptr must be a uintptr_t.
+ // mask.fRowBytes is a uint32_t, which would break our addressing math on 64-bit builds.
+ size_t rowBytes = mask.fRowBytes;
+ ctx->stride = rowBytes / bpp;
+ ctx->pixels = (void*)(ptr - mask.fBounds.left() * bpp
+ - mask.fBounds.top() * rowBytes);
+ };
+
+ extract_mask_plane(0, &fMaskPtr);
+ if (mask.fFormat == SkMask::k3D_Format) {
+ extract_mask_plane(1, &fEmbossCtx.mul);
+ extract_mask_plane(2, &fEmbossCtx.add);
+ }
+
+ // Lazily build whichever pipeline we need, specialized for each mask format.
+ if (mask.fFormat == SkMask::kA8_Format && !fBlitMaskA8) {
+ SkRasterPipeline p(fAlloc);
+ p.extend(fColorPipeline);
+ p.append_clamp_if_normalized(fDst.info());
+ if (fBlendMode.has_value() &&
+ SkBlendMode_ShouldPreScaleCoverage(*fBlendMode, /*rgb_coverage=*/false)) {
+ p.append(SkRasterPipelineOp::scale_u8, &fMaskPtr);
+ this->append_clip_scale(&p);
+ this->append_load_dst(&p);
+ p.extend(fBlendPipeline);
+ } else {
+ this->append_load_dst(&p);
+ p.extend(fBlendPipeline);
+ p.append(SkRasterPipelineOp::lerp_u8, &fMaskPtr);
+ this->append_clip_lerp(&p);
+ }
+ this->append_store(&p);
+ fBlitMaskA8 = p.compile();
+ }
+ if (mask.fFormat == SkMask::kLCD16_Format && !fBlitMaskLCD16) {
+ SkRasterPipeline p(fAlloc);
+ p.extend(fColorPipeline);
+ p.append_clamp_if_normalized(fDst.info());
+ if (fBlendMode.has_value() &&
+ SkBlendMode_ShouldPreScaleCoverage(*fBlendMode, /*rgb_coverage=*/true)) {
+ // Somewhat unusually, scale_565 needs dst loaded first.
+ this->append_load_dst(&p);
+ p.append(SkRasterPipelineOp::scale_565, &fMaskPtr);
+ this->append_clip_scale(&p);
+ p.extend(fBlendPipeline);
+ } else {
+ this->append_load_dst(&p);
+ p.extend(fBlendPipeline);
+ p.append(SkRasterPipelineOp::lerp_565, &fMaskPtr);
+ this->append_clip_lerp(&p);
+ }
+ this->append_store(&p);
+ fBlitMaskLCD16 = p.compile();
+ }
+ if (mask.fFormat == SkMask::k3D_Format && !fBlitMask3D) {
+ SkRasterPipeline p(fAlloc);
+ p.extend(fColorPipeline);
+ // This bit is where we differ from kA8_Format:
+ p.append(SkRasterPipelineOp::emboss, &fEmbossCtx);
+ // Now onward just as kA8.
+ p.append_clamp_if_normalized(fDst.info());
+ if (fBlendMode.has_value() &&
+ SkBlendMode_ShouldPreScaleCoverage(*fBlendMode, /*rgb_coverage=*/false)) {
+ p.append(SkRasterPipelineOp::scale_u8, &fMaskPtr);
+ this->append_clip_scale(&p);
+ this->append_load_dst(&p);
+ p.extend(fBlendPipeline);
+ } else {
+ this->append_load_dst(&p);
+ p.extend(fBlendPipeline);
+ p.append(SkRasterPipelineOp::lerp_u8, &fMaskPtr);
+ this->append_clip_lerp(&p);
+ }
+ this->append_store(&p);
+ fBlitMask3D = p.compile();
+ }
+
+ std::function<void(size_t,size_t,size_t,size_t)>* blitter = nullptr;
+ switch (mask.fFormat) {
+ case SkMask::kA8_Format: blitter = &fBlitMaskA8; break;
+ case SkMask::kLCD16_Format: blitter = &fBlitMaskLCD16; break;
+ case SkMask::k3D_Format: blitter = &fBlitMask3D; break;
+ default:
+ SkASSERT(false);
+ return;
+ }
+
+ SkASSERT(blitter);
+ SK_BLITTER_TRACE_STEP(blitMask,
+ true,
+ /*scanlines=*/clip.height(),
+ /*pixels=*/clip.width() * clip.height());
+ (*blitter)(clip.left(),clip.top(), clip.width(),clip.height());
+}
diff --git a/gfx/skia/skia/src/core/SkRasterPipelineOpContexts.h b/gfx/skia/skia/src/core/SkRasterPipelineOpContexts.h
new file mode 100644
index 0000000000..8ebbfdaff9
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRasterPipelineOpContexts.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2023 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRasterPipelineOpContexts_DEFINED
+#define SkRasterPipelineOpContexts_DEFINED
+
+// The largest number of pixels we handle at a time. We have a separate value for the largest number
+// of pixels we handle in the highp pipeline. Many of the context structs in this file are only used
+// by stages that have no lowp implementation. They can therefore use the (smaller) highp value to
+// save memory in the arena.
+inline static constexpr int SkRasterPipeline_kMaxStride = 16;
+inline static constexpr int SkRasterPipeline_kMaxStride_highp = 8;
+
+// These structs hold the context data for many of the Raster Pipeline ops.
+struct SkRasterPipeline_MemoryCtx {
+ void* pixels;
+ int stride;
+};
+
+struct SkRasterPipeline_GatherCtx {
+ const void* pixels;
+ int stride;
+ float width;
+ float height;
+ float weights[16]; // for bicubic and bicubic_clamp_8888
+ // Controls whether pixel i-1 or i is selected when floating point sample position is exactly i.
+ bool roundDownAtInteger = false;
+};
+
+// State shared by save_xy, accumulate, and bilinear_* / bicubic_*.
+struct SkRasterPipeline_SamplerCtx {
+ float x[SkRasterPipeline_kMaxStride_highp];
+ float y[SkRasterPipeline_kMaxStride_highp];
+ float fx[SkRasterPipeline_kMaxStride_highp];
+ float fy[SkRasterPipeline_kMaxStride_highp];
+ float scalex[SkRasterPipeline_kMaxStride_highp];
+ float scaley[SkRasterPipeline_kMaxStride_highp];
+
+ // for bicubic_[np][13][xy]
+ float weights[16];
+ float wx[4][SkRasterPipeline_kMaxStride_highp];
+ float wy[4][SkRasterPipeline_kMaxStride_highp];
+};
+
+struct SkRasterPipeline_TileCtx {
+ float scale;
+ float invScale; // cache of 1/scale
+ // When in the reflection portion of mirror tiling we need to snap the opposite direction
+ // at integer sample points than when in the forward direction. This controls which way we bias
+ // in the reflection. It should be 1 if SkRasterPipeline_GatherCtx::roundDownAtInteger is true
+ // and otherwise -1.
+ int mirrorBiasDir = -1;
+};
+
+struct SkRasterPipeline_DecalTileCtx {
+ uint32_t mask[SkRasterPipeline_kMaxStride];
+ float limit_x;
+ float limit_y;
+ // These control which edge of the interval is included (i.e. closed interval at 0 or at limit).
+ // They should be set to limit_x and limit_y if SkRasterPipeline_GatherCtx::roundDownAtInteger
+ // is true and otherwise zero.
+ float inclusiveEdge_x = 0;
+ float inclusiveEdge_y = 0;
+};
+
+// State used by mipmap_linear_*
+struct SkRasterPipeline_MipmapCtx {
+ // Original coords, saved before the base level logic
+ float x[SkRasterPipeline_kMaxStride_highp];
+ float y[SkRasterPipeline_kMaxStride_highp];
+
+ // Base level color
+ float r[SkRasterPipeline_kMaxStride_highp];
+ float g[SkRasterPipeline_kMaxStride_highp];
+ float b[SkRasterPipeline_kMaxStride_highp];
+ float a[SkRasterPipeline_kMaxStride_highp];
+
+ // Scale factors to transform base level coords to lower level coords
+ float scaleX;
+ float scaleY;
+
+ float lowerWeight;
+};
+
+struct SkRasterPipeline_CoordClampCtx {
+ float min_x, min_y;
+ float max_x, max_y;
+};
+
+struct SkRasterPipeline_CallbackCtx {
+ void (*fn)(SkRasterPipeline_CallbackCtx* self,
+ int active_pixels /*<= SkRasterPipeline_kMaxStride_highp*/);
+
+ // When called, fn() will have our active pixels available in rgba.
+ // When fn() returns, the pipeline will read back those active pixels from read_from.
+ float rgba[4*SkRasterPipeline_kMaxStride_highp];
+ float* read_from = rgba;
+};
+
+// state shared by stack_checkpoint and stack_rewind
+struct SkRasterPipelineStage;
+
+struct SkRasterPipeline_RewindCtx {
+ float r[SkRasterPipeline_kMaxStride_highp];
+ float g[SkRasterPipeline_kMaxStride_highp];
+ float b[SkRasterPipeline_kMaxStride_highp];
+ float a[SkRasterPipeline_kMaxStride_highp];
+ float dr[SkRasterPipeline_kMaxStride_highp];
+ float dg[SkRasterPipeline_kMaxStride_highp];
+ float db[SkRasterPipeline_kMaxStride_highp];
+ float da[SkRasterPipeline_kMaxStride_highp];
+ SkRasterPipelineStage* stage;
+};
+
+struct SkRasterPipeline_GradientCtx {
+ size_t stopCount;
+ float* fs[4];
+ float* bs[4];
+ float* ts;
+};
+
+struct SkRasterPipeline_EvenlySpaced2StopGradientCtx {
+ float f[4];
+ float b[4];
+};
+
+struct SkRasterPipeline_2PtConicalCtx {
+ uint32_t fMask[SkRasterPipeline_kMaxStride_highp];
+ float fP0,
+ fP1;
+};
+
+struct SkRasterPipeline_UniformColorCtx {
+ float r,g,b,a;
+ uint16_t rgba[4]; // [0,255] in a 16-bit lane.
+};
+
+struct SkRasterPipeline_EmbossCtx {
+ SkRasterPipeline_MemoryCtx mul,
+ add;
+};
+
+struct SkRasterPipeline_TablesCtx {
+ const uint8_t *r, *g, *b, *a;
+};
+
+struct SkRasterPipeline_BinaryOpCtx {
+ float *dst;
+ const float *src;
+};
+
+struct SkRasterPipeline_TernaryOpCtx {
+ float *dst;
+ const float *src0;
+ const float *src1;
+};
+
+struct SkRasterPipeline_SwizzleCtx {
+ float *ptr;
+ uint16_t offsets[4]; // values must be byte offsets (4 * highp-stride * component-index)
+};
+
+struct SkRasterPipeline_ShuffleCtx {
+ float *ptr;
+ int count;
+ uint16_t offsets[16]; // values must be byte offsets (4 * highp-stride * component-index)
+};
+
+struct SkRasterPipeline_SwizzleCopyCtx {
+ float *dst;
+ float *src; // src values must _not_ overlap dst values
+ uint16_t offsets[4]; // values must be byte offsets (4 * highp-stride * component-index)
+};
+
+struct SkRasterPipeline_CopyIndirectCtx {
+ float *dst;
+ const float *src;
+ const uint32_t *indirectOffset; // this applies to `src` or `dst` based on the op
+ uint32_t indirectLimit; // the indirect offset is clamped to this upper bound
+ uint32_t slots; // the number of slots to copy
+};
+
+struct SkRasterPipeline_SwizzleCopyIndirectCtx : public SkRasterPipeline_CopyIndirectCtx {
+ uint16_t offsets[4]; // values must be byte offsets (4 * highp-stride * component-index)
+};
+
+struct SkRasterPipeline_BranchCtx {
+ int offset; // contains the label ID during compilation, and the program offset when compiled
+};
+
+struct SkRasterPipeline_BranchIfEqualCtx : public SkRasterPipeline_BranchCtx {
+ int value;
+ const int *ptr;
+};
+
+struct SkRasterPipeline_CaseOpCtx {
+ int expectedValue;
+ int* ptr; // points to a pair of adjacent I32s: {I32 actualValue, I32 defaultMask}
+};
+
+#endif // SkRasterPipelineOpContexts_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRasterPipelineOpList.h b/gfx/skia/skia/src/core/SkRasterPipelineOpList.h
new file mode 100644
index 0000000000..d30da38ea1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRasterPipelineOpList.h
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2023 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRasterPipelineOpList_DEFINED
+#define SkRasterPipelineOpList_DEFINED
+
+// There are two macros here: The first defines ops that have lowp (and highp) implementations.
+// The second defines ops that are only present in the highp pipeline.
+#define SK_RASTER_PIPELINE_OPS_LOWP(M) \
+ M(move_src_dst) M(move_dst_src) M(swap_src_dst) \
+ M(clamp_01) M(clamp_gamut) \
+ M(premul) M(premul_dst) \
+ M(force_opaque) M(force_opaque_dst) \
+ M(set_rgb) M(swap_rb) M(swap_rb_dst) \
+ M(black_color) M(white_color) \
+ M(uniform_color) M(uniform_color_dst) \
+ M(seed_shader) \
+ M(load_a8) M(load_a8_dst) M(store_a8) M(gather_a8) \
+ M(load_565) M(load_565_dst) M(store_565) M(gather_565) \
+ M(load_4444) M(load_4444_dst) M(store_4444) M(gather_4444) \
+ M(load_8888) M(load_8888_dst) M(store_8888) M(gather_8888) \
+ M(load_rg88) M(load_rg88_dst) M(store_rg88) M(gather_rg88) \
+ M(store_r8) \
+ M(alpha_to_gray) M(alpha_to_gray_dst) \
+ M(alpha_to_red) M(alpha_to_red_dst) \
+ M(bt709_luminance_or_luma_to_alpha) M(bt709_luminance_or_luma_to_rgb) \
+ M(bilerp_clamp_8888) \
+ M(load_src) M(store_src) M(store_src_a) M(load_dst) M(store_dst) \
+ M(scale_u8) M(scale_565) M(scale_1_float) M(scale_native) \
+ M( lerp_u8) M( lerp_565) M( lerp_1_float) M(lerp_native) \
+ M(dstatop) M(dstin) M(dstout) M(dstover) \
+ M(srcatop) M(srcin) M(srcout) M(srcover) \
+ M(clear) M(modulate) M(multiply) M(plus_) M(screen) M(xor_) \
+ M(darken) M(difference) \
+ M(exclusion) M(hardlight) M(lighten) M(overlay) \
+ M(srcover_rgba_8888) \
+ M(matrix_translate) M(matrix_scale_translate) \
+ M(matrix_2x3) \
+ M(matrix_perspective) \
+ M(decal_x) M(decal_y) M(decal_x_and_y) \
+ M(check_decal_mask) \
+ M(clamp_x_1) M(mirror_x_1) M(repeat_x_1) \
+ M(clamp_x_and_y) \
+ M(evenly_spaced_gradient) \
+ M(gradient) \
+ M(evenly_spaced_2_stop_gradient) \
+ M(xy_to_unit_angle) \
+ M(xy_to_radius) \
+ M(emboss) \
+ M(swizzle)
+
+#define SK_RASTER_PIPELINE_OPS_HIGHP_ONLY(M) \
+ M(callback) \
+ M(stack_checkpoint) M(stack_rewind) \
+ M(unbounded_set_rgb) M(unbounded_uniform_color) \
+ M(unpremul) M(unpremul_polar) M(dither) \
+ M(load_16161616) M(load_16161616_dst) M(store_16161616) M(gather_16161616) \
+ M(load_a16) M(load_a16_dst) M(store_a16) M(gather_a16) \
+ M(load_rg1616) M(load_rg1616_dst) M(store_rg1616) M(gather_rg1616) \
+ M(load_f16) M(load_f16_dst) M(store_f16) M(gather_f16) \
+ M(load_af16) M(load_af16_dst) M(store_af16) M(gather_af16) \
+ M(load_rgf16) M(load_rgf16_dst) M(store_rgf16) M(gather_rgf16) \
+ M(load_f32) M(load_f32_dst) M(store_f32) M(gather_f32) \
+ M(load_rgf32) M(store_rgf32) \
+ M(load_1010102) M(load_1010102_dst) M(store_1010102) M(gather_1010102) \
+ M(load_1010102_xr) M(load_1010102_xr_dst) M(store_1010102_xr) \
+ M(store_u16_be) \
+ M(store_src_rg) M(load_src_rg) \
+ M(byte_tables) \
+ M(colorburn) M(colordodge) M(softlight) \
+ M(hue) M(saturation) M(color) M(luminosity) \
+ M(matrix_3x3) M(matrix_3x4) M(matrix_4x5) M(matrix_4x3) \
+ M(parametric) M(gamma_) M(PQish) M(HLGish) M(HLGinvish) \
+ M(rgb_to_hsl) M(hsl_to_rgb) \
+ M(css_lab_to_xyz) M(css_oklab_to_linear_srgb) \
+ M(css_hcl_to_lab) \
+ M(css_hsl_to_srgb) M(css_hwb_to_srgb) \
+ M(gauss_a_to_rgba) \
+ M(mirror_x) M(repeat_x) \
+ M(mirror_y) M(repeat_y) \
+ M(negate_x) \
+ M(bicubic_clamp_8888) \
+ M(bilinear_setup) \
+ M(bilinear_nx) M(bilinear_px) M(bilinear_ny) M(bilinear_py) \
+ M(bicubic_setup) \
+ M(bicubic_n3x) M(bicubic_n1x) M(bicubic_p1x) M(bicubic_p3x) \
+ M(bicubic_n3y) M(bicubic_n1y) M(bicubic_p1y) M(bicubic_p3y) \
+ M(accumulate) \
+ M(mipmap_linear_init) M(mipmap_linear_update) M(mipmap_linear_finish) \
+ M(xy_to_2pt_conical_strip) \
+ M(xy_to_2pt_conical_focal_on_circle) \
+ M(xy_to_2pt_conical_well_behaved) \
+ M(xy_to_2pt_conical_smaller) \
+ M(xy_to_2pt_conical_greater) \
+ M(alter_2pt_conical_compensate_focal) \
+ M(alter_2pt_conical_unswap) \
+ M(mask_2pt_conical_nan) \
+ M(mask_2pt_conical_degenerates) M(apply_vector_mask) \
+ /* Dedicated SkSL stages begin here: */ \
+ M(init_lane_masks) M(store_device_xy01) \
+ M(load_condition_mask) M(store_condition_mask) M(merge_condition_mask) \
+ M(load_loop_mask) M(store_loop_mask) M(mask_off_loop_mask) \
+ M(reenable_loop_mask) M(merge_loop_mask) M(case_op) \
+ M(load_return_mask) M(store_return_mask) M(mask_off_return_mask) \
+ M(branch_if_all_lanes_active) M(branch_if_any_lanes_active) M(branch_if_no_lanes_active) \
+ M(branch_if_no_active_lanes_eq) M(jump) \
+ M(bitwise_and_n_ints) \
+ M(bitwise_and_int) M(bitwise_and_2_ints) M(bitwise_and_3_ints) M(bitwise_and_4_ints) \
+ M(bitwise_or_n_ints) \
+ M(bitwise_or_int) M(bitwise_or_2_ints) M(bitwise_or_3_ints) M(bitwise_or_4_ints) \
+ M(bitwise_xor_n_ints) \
+ M(bitwise_xor_int) M(bitwise_xor_2_ints) M(bitwise_xor_3_ints) M(bitwise_xor_4_ints) \
+ M(bitwise_not_int) M(bitwise_not_2_ints) M(bitwise_not_3_ints) M(bitwise_not_4_ints) \
+ M(cast_to_float_from_int) M(cast_to_float_from_2_ints) \
+ M(cast_to_float_from_3_ints) M(cast_to_float_from_4_ints) \
+ M(cast_to_float_from_uint) M(cast_to_float_from_2_uints) \
+ M(cast_to_float_from_3_uints) M(cast_to_float_from_4_uints) \
+ M(cast_to_int_from_float) M(cast_to_int_from_2_floats) \
+ M(cast_to_int_from_3_floats) M(cast_to_int_from_4_floats) \
+ M(cast_to_uint_from_float) M(cast_to_uint_from_2_floats) \
+ M(cast_to_uint_from_3_floats) M(cast_to_uint_from_4_floats) \
+ M(abs_float) M(abs_2_floats) M(abs_3_floats) M(abs_4_floats) \
+ M(abs_int) M(abs_2_ints) M(abs_3_ints) M(abs_4_ints) \
+ M(floor_float) M(floor_2_floats) M(floor_3_floats) M(floor_4_floats) \
+ M(ceil_float) M(ceil_2_floats) M(ceil_3_floats) M(ceil_4_floats) \
+ M(invsqrt_float) M(invsqrt_2_floats) M(invsqrt_3_floats) M(invsqrt_4_floats) \
+ M(inverse_mat2) M(inverse_mat3) M(inverse_mat4) \
+ M(sin_float) M(cos_float) M(tan_float) \
+ M(asin_float) M(acos_float) M(atan_float) M(atan2_n_floats) \
+ M(sqrt_float) M(pow_n_floats) M(exp_float) M(exp2_float) \
+ M(log_float) M(log2_float) M(refract_4_floats) \
+ M(copy_constant) M(copy_2_constants) M(copy_3_constants) M(copy_4_constants) \
+ M(copy_slot_masked) M(copy_2_slots_masked) M(copy_3_slots_masked) M(copy_4_slots_masked) \
+ M(copy_from_indirect_unmasked) M(copy_from_indirect_uniform_unmasked) \
+ M(copy_to_indirect_masked) M(swizzle_copy_to_indirect_masked) \
+ M(copy_slot_unmasked) M(copy_2_slots_unmasked) \
+ M(copy_3_slots_unmasked) M(copy_4_slots_unmasked) \
+ M(zero_slot_unmasked) M(zero_2_slots_unmasked) \
+ M(zero_3_slots_unmasked) M(zero_4_slots_unmasked) \
+ M(swizzle_copy_slot_masked) M(swizzle_copy_2_slots_masked) \
+ M(swizzle_copy_3_slots_masked) M(swizzle_copy_4_slots_masked) \
+ M(swizzle_1) M(swizzle_2) M(swizzle_3) M(swizzle_4) M(shuffle) \
+ M(add_n_floats) M(add_float) M(add_2_floats) M(add_3_floats) M(add_4_floats) \
+ M(add_n_ints) M(add_int) M(add_2_ints) M(add_3_ints) M(add_4_ints) \
+ M(sub_n_floats) M(sub_float) M(sub_2_floats) M(sub_3_floats) M(sub_4_floats) \
+ M(sub_n_ints) M(sub_int) M(sub_2_ints) M(sub_3_ints) M(sub_4_ints) \
+ M(mul_n_floats) M(mul_float) M(mul_2_floats) M(mul_3_floats) M(mul_4_floats) \
+ M(mul_n_ints) M(mul_int) M(mul_2_ints) M(mul_3_ints) M(mul_4_ints) \
+ M(div_n_floats) M(div_float) M(div_2_floats) M(div_3_floats) M(div_4_floats) \
+ M(div_n_ints) M(div_int) M(div_2_ints) M(div_3_ints) M(div_4_ints) \
+ M(div_n_uints) M(div_uint) M(div_2_uints) M(div_3_uints) M(div_4_uints) \
+ M(max_n_floats) M(max_float) M(max_2_floats) M(max_3_floats) M(max_4_floats) \
+ M(max_n_ints) M(max_int) M(max_2_ints) M(max_3_ints) M(max_4_ints) \
+ M(max_n_uints) M(max_uint) M(max_2_uints) M(max_3_uints) M(max_4_uints) \
+ M(min_n_floats) M(min_float) M(min_2_floats) M(min_3_floats) M(min_4_floats) \
+ M(min_n_ints) M(min_int) M(min_2_ints) M(min_3_ints) M(min_4_ints) \
+ M(min_n_uints) M(min_uint) M(min_2_uints) M(min_3_uints) M(min_4_uints) \
+ M(mod_n_floats) M(mod_float) M(mod_2_floats) M(mod_3_floats) M(mod_4_floats) \
+ M(mix_n_floats) M(mix_float) M(mix_2_floats) M(mix_3_floats) M(mix_4_floats) \
+ M(mix_n_ints) M(mix_int) M(mix_2_ints) M(mix_3_ints) M(mix_4_ints) \
+ M(smoothstep_n_floats) M(dot_2_floats) M(dot_3_floats) M(dot_4_floats) \
+ M(cmplt_n_floats) M(cmplt_float) M(cmplt_2_floats) M(cmplt_3_floats) M(cmplt_4_floats) \
+ M(cmplt_n_ints) M(cmplt_int) M(cmplt_2_ints) M(cmplt_3_ints) M(cmplt_4_ints) \
+ M(cmplt_n_uints) M(cmplt_uint) M(cmplt_2_uints) M(cmplt_3_uints) M(cmplt_4_uints) \
+ M(cmple_n_floats) M(cmple_float) M(cmple_2_floats) M(cmple_3_floats) M(cmple_4_floats) \
+ M(cmple_n_ints) M(cmple_int) M(cmple_2_ints) M(cmple_3_ints) M(cmple_4_ints) \
+ M(cmple_n_uints) M(cmple_uint) M(cmple_2_uints) M(cmple_3_uints) M(cmple_4_uints) \
+ M(cmpeq_n_floats) M(cmpeq_float) M(cmpeq_2_floats) M(cmpeq_3_floats) M(cmpeq_4_floats) \
+ M(cmpeq_n_ints) M(cmpeq_int) M(cmpeq_2_ints) M(cmpeq_3_ints) M(cmpeq_4_ints) \
+ M(cmpne_n_floats) M(cmpne_float) M(cmpne_2_floats) M(cmpne_3_floats) M(cmpne_4_floats) \
+ M(cmpne_n_ints) M(cmpne_int) M(cmpne_2_ints) M(cmpne_3_ints) M(cmpne_4_ints)
+
+// The combined list of all RasterPipeline ops:
+#define SK_RASTER_PIPELINE_OPS_ALL(M) \
+ SK_RASTER_PIPELINE_OPS_LOWP(M) \
+ SK_RASTER_PIPELINE_OPS_HIGHP_ONLY(M)
+
+// An enumeration of every RasterPipeline op:
+enum class SkRasterPipelineOp {
+#define M(op) op,
+ SK_RASTER_PIPELINE_OPS_ALL(M)
+#undef M
+};
+
+// A count of raster pipeline ops:
+#define M(st) +1
+ static constexpr int kNumRasterPipelineLowpOps = SK_RASTER_PIPELINE_OPS_LOWP(M);
+ static constexpr int kNumRasterPipelineHighpOps = SK_RASTER_PIPELINE_OPS_ALL(M);
+#undef M
+
+#endif // SkRasterPipelineOpList_DEFINED
diff --git a/gfx/skia/skia/src/core/SkReadBuffer.cpp b/gfx/skia/skia/src/core/SkReadBuffer.cpp
new file mode 100644
index 0000000000..22655a6e4e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkReadBuffer.cpp
@@ -0,0 +1,504 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkReadBuffer.h"
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageGenerator.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkM44.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint3.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkRegion.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/base/SkMalloc.h"
+#include "src/base/SkAutoMalloc.h"
+#include "src/base/SkMathPriv.h"
+#include "src/base/SkSafeMath.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkMipmap.h"
+#include "src/core/SkMipmapBuilder.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <memory>
+#include <optional>
+#include <utility>
+
+namespace {
+ // This generator intentionally should always fail on all attempts to get its pixels,
+ // simulating a bad or empty codec stream.
+ class EmptyImageGenerator final : public SkImageGenerator {
+ public:
+ EmptyImageGenerator(const SkImageInfo& info) : SkImageGenerator(info) { }
+
+ };
+
+ static sk_sp<SkImage> MakeEmptyImage(int width, int height) {
+ return SkImage::MakeFromGenerator(
+ std::make_unique<EmptyImageGenerator>(SkImageInfo::MakeN32Premul(width, height)));
+ }
+
+} // anonymous namespace
+
+void SkReadBuffer::setMemory(const void* data, size_t size) {
+ this->validate(IsPtrAlign4(data) && (SkAlign4(size) == size));
+ if (!fError) {
+ fBase = fCurr = (const char*)data;
+ fStop = fBase + size;
+ }
+}
+
+void SkReadBuffer::setInvalid() {
+ if (!fError) {
+ // When an error is found, send the read cursor to the end of the stream
+ fCurr = fStop;
+ fError = true;
+ }
+}
+
+const void* SkReadBuffer::skip(size_t size) {
+ size_t inc = SkAlign4(size);
+ this->validate(inc >= size);
+ const void* addr = fCurr;
+ this->validate(IsPtrAlign4(addr) && this->isAvailable(inc));
+ if (fError) {
+ return nullptr;
+ }
+
+ fCurr += inc;
+ return addr;
+}
+
+const void* SkReadBuffer::skip(size_t count, size_t size) {
+ return this->skip(SkSafeMath::Mul(count, size));
+}
+
+void SkReadBuffer::setDeserialProcs(const SkDeserialProcs& procs) {
+ fProcs = procs;
+}
+
+bool SkReadBuffer::readBool() {
+ uint32_t value = this->readUInt();
+ // Boolean value should be either 0 or 1
+ this->validate(!(value & ~1));
+ return value != 0;
+}
+
+SkColor SkReadBuffer::readColor() {
+ return this->readUInt();
+}
+
+int32_t SkReadBuffer::readInt() {
+ const size_t inc = sizeof(int32_t);
+ if (!this->validate(IsPtrAlign4(fCurr) && this->isAvailable(inc))) {
+ return 0;
+ }
+ int32_t value = *((const int32_t*)fCurr);
+ fCurr += inc;
+ return value;
+}
+
+SkScalar SkReadBuffer::readScalar() {
+ const size_t inc = sizeof(SkScalar);
+ if (!this->validate(IsPtrAlign4(fCurr) && this->isAvailable(inc))) {
+ return 0;
+ }
+ SkScalar value = *((const SkScalar*)fCurr);
+ fCurr += inc;
+ return value;
+}
+
+uint32_t SkReadBuffer::readUInt() {
+ return this->readInt();
+}
+
+int32_t SkReadBuffer::read32() {
+ return this->readInt();
+}
+
+uint8_t SkReadBuffer::peekByte() {
+ if (this->available() <= 0) {
+ fError = true;
+ return 0;
+ }
+ return *((uint8_t*)fCurr);
+}
+
+bool SkReadBuffer::readPad32(void* buffer, size_t bytes) {
+ if (const void* src = this->skip(bytes)) {
+ // buffer might be null if bytes is zero (see SkAutoMalloc), hence we call
+ // the careful version of memcpy.
+ sk_careful_memcpy(buffer, src, bytes);
+ return true;
+ }
+ return false;
+}
+
+const char* SkReadBuffer::readString(size_t* len) {
+ *len = this->readUInt();
+
+ // The string is len characters and a terminating \0.
+ const char* c_str = this->skipT<char>(*len+1);
+
+ if (this->validate(c_str && c_str[*len] == '\0')) {
+ return c_str;
+ }
+ return nullptr;
+}
+
+void SkReadBuffer::readString(SkString* string) {
+ size_t len;
+ if (const char* c_str = this->readString(&len)) {
+ string->set(c_str, len);
+ return;
+ }
+ string->reset();
+}
+
+void SkReadBuffer::readColor4f(SkColor4f* color) {
+ if (!this->readPad32(color, sizeof(SkColor4f))) {
+ *color = {0, 0, 0, 0};
+ }
+}
+
+void SkReadBuffer::readPoint(SkPoint* point) {
+ point->fX = this->readScalar();
+ point->fY = this->readScalar();
+}
+
+void SkReadBuffer::readPoint3(SkPoint3* point) {
+ this->readPad32(point, sizeof(SkPoint3));
+}
+
+void SkReadBuffer::read(SkM44* matrix) {
+ if (this->isValid()) {
+ if (const float* m = (const float*)this->skip(sizeof(float) * 16)) {
+ *matrix = SkM44::ColMajor(m);
+ }
+ }
+ if (!this->isValid()) {
+ *matrix = SkM44();
+ }
+}
+
+void SkReadBuffer::readMatrix(SkMatrix* matrix) {
+ size_t size = 0;
+ if (this->isValid()) {
+ size = SkMatrixPriv::ReadFromMemory(matrix, fCurr, this->available());
+ (void)this->validate((SkAlign4(size) == size) && (0 != size));
+ }
+ if (!this->isValid()) {
+ matrix->reset();
+ }
+ (void)this->skip(size);
+}
+
+void SkReadBuffer::readIRect(SkIRect* rect) {
+ if (!this->readPad32(rect, sizeof(SkIRect))) {
+ rect->setEmpty();
+ }
+}
+
+void SkReadBuffer::readRect(SkRect* rect) {
+ if (!this->readPad32(rect, sizeof(SkRect))) {
+ rect->setEmpty();
+ }
+}
+
+SkRect SkReadBuffer::readRect() {
+ SkRect r;
+ if (!this->readPad32(&r, sizeof(SkRect))) {
+ r.setEmpty();
+ }
+ return r;
+}
+
+SkSamplingOptions SkReadBuffer::readSampling() {
+ if (!this->isVersionLT(SkPicturePriv::kAnisotropicFilter)) {
+ int maxAniso = this->readInt();
+ if (maxAniso != 0) {
+ return SkSamplingOptions::Aniso(maxAniso);
+ }
+ }
+ if (this->readBool()) {
+ float B = this->readScalar();
+ float C = this->readScalar();
+ return SkSamplingOptions({B, C});
+ } else {
+ SkFilterMode filter = this->read32LE(SkFilterMode::kLinear);
+ SkMipmapMode mipmap = this->read32LE(SkMipmapMode::kLinear);
+ return SkSamplingOptions(filter, mipmap);
+ }
+}
+
+void SkReadBuffer::readRRect(SkRRect* rrect) {
+ size_t size = 0;
+ if (!fError) {
+ size = rrect->readFromMemory(fCurr, this->available());
+ if (!this->validate((SkAlign4(size) == size) && (0 != size))) {
+ rrect->setEmpty();
+ }
+ }
+ (void)this->skip(size);
+}
+
+void SkReadBuffer::readRegion(SkRegion* region) {
+ size_t size = 0;
+ if (!fError) {
+ size = region->readFromMemory(fCurr, this->available());
+ if (!this->validate((SkAlign4(size) == size) && (0 != size))) {
+ region->setEmpty();
+ }
+ }
+ (void)this->skip(size);
+}
+
+void SkReadBuffer::readPath(SkPath* path) {
+ size_t size = 0;
+ if (!fError) {
+ size = path->readFromMemory(fCurr, this->available());
+ if (!this->validate((SkAlign4(size) == size) && (0 != size))) {
+ path->reset();
+ }
+ }
+ (void)this->skip(size);
+}
+
+bool SkReadBuffer::readArray(void* value, size_t size, size_t elementSize) {
+ const uint32_t count = this->readUInt();
+ return this->validate(size == count) &&
+ this->readPad32(value, SkSafeMath::Mul(size, elementSize));
+}
+
+bool SkReadBuffer::readByteArray(void* value, size_t size) {
+ return this->readArray(value, size, sizeof(uint8_t));
+}
+
+bool SkReadBuffer::readColorArray(SkColor* colors, size_t size) {
+ return this->readArray(colors, size, sizeof(SkColor));
+}
+
+bool SkReadBuffer::readColor4fArray(SkColor4f* colors, size_t size) {
+ return this->readArray(colors, size, sizeof(SkColor4f));
+}
+
+bool SkReadBuffer::readIntArray(int32_t* values, size_t size) {
+ return this->readArray(values, size, sizeof(int32_t));
+}
+
+bool SkReadBuffer::readPointArray(SkPoint* points, size_t size) {
+ return this->readArray(points, size, sizeof(SkPoint));
+}
+
+bool SkReadBuffer::readScalarArray(SkScalar* values, size_t size) {
+ return this->readArray(values, size, sizeof(SkScalar));
+}
+
+const void* SkReadBuffer::skipByteArray(size_t* size) {
+ const uint32_t count = this->readUInt();
+ const void* buf = this->skip(count);
+ if (size) {
+ *size = this->isValid() ? count : 0;
+ }
+ return buf;
+}
+
+sk_sp<SkData> SkReadBuffer::readByteArrayAsData() {
+ size_t numBytes = this->getArrayCount();
+ if (!this->validate(this->isAvailable(numBytes))) {
+ return nullptr;
+ }
+
+ SkAutoMalloc buffer(numBytes);
+ if (!this->readByteArray(buffer.get(), numBytes)) {
+ return nullptr;
+ }
+ return SkData::MakeFromMalloc(buffer.release(), numBytes);
+}
+
+uint32_t SkReadBuffer::getArrayCount() {
+ const size_t inc = sizeof(uint32_t);
+ if (!this->validate(IsPtrAlign4(fCurr) && this->isAvailable(inc))) {
+ return 0;
+ }
+ return *((uint32_t*)fCurr);
+}
+
+// If we see a corrupt stream, we return null (fail). If we just fail trying to decode
+// the image, we don't fail, but return a 1x1 empty image.
+sk_sp<SkImage> SkReadBuffer::readImage() {
+ uint32_t flags = this->read32();
+
+ sk_sp<SkImage> image;
+ {
+ sk_sp<SkData> data = this->readByteArrayAsData();
+ if (!data) {
+ this->validate(false);
+ return nullptr;
+ }
+ if (fProcs.fImageProc) {
+ image = fProcs.fImageProc(data->data(), data->size(), fProcs.fImageCtx);
+ }
+ if (!image) {
+ std::optional<SkAlphaType> alphaType = std::nullopt;
+ if (flags & SkWriteBufferImageFlags::kUnpremul) {
+ alphaType = kUnpremul_SkAlphaType;
+ }
+ image = SkImage::MakeFromEncoded(std::move(data), alphaType);
+ }
+ }
+
+ if (flags & SkWriteBufferImageFlags::kHasSubsetRect) {
+ SkIRect subset;
+ this->readIRect(&subset);
+ if (image) {
+ image = image->makeSubset(subset);
+ }
+ }
+
+ if (flags & SkWriteBufferImageFlags::kHasMipmap) {
+ sk_sp<SkData> data = this->readByteArrayAsData();
+ if (!data) {
+ this->validate(false);
+ return nullptr;
+ }
+ if (image) {
+ SkMipmapBuilder builder(image->imageInfo());
+ if (SkMipmap::Deserialize(&builder, data->data(), data->size())) {
+ // TODO: need to make lazy images support mips
+ if (auto ri = image->makeRasterImage()) {
+ image = ri;
+ }
+ image = builder.attachTo(image);
+ SkASSERT(image); // withMipmaps should never return null
+ }
+ }
+ }
+ return image ? image : MakeEmptyImage(1, 1);
+}
+
+sk_sp<SkTypeface> SkReadBuffer::readTypeface() {
+ // Read 32 bits (signed)
+ // 0 -- return null (default font)
+ // >0 -- index
+ // <0 -- custom (serial procs) : negative size in bytes
+
+ int32_t index = this->read32();
+ if (index == 0) {
+ return nullptr;
+ } else if (index > 0) {
+ if (!this->validate(index <= fTFCount)) {
+ return nullptr;
+ }
+ return fTFArray[index - 1];
+ } else { // custom
+ size_t size = sk_negate_to_size_t(index);
+ const void* data = this->skip(size);
+ if (!this->validate(data != nullptr && fProcs.fTypefaceProc)) {
+ return nullptr;
+ }
+ return fProcs.fTypefaceProc(data, size, fProcs.fTypefaceCtx);
+ }
+}
+
+SkFlattenable* SkReadBuffer::readRawFlattenable() {
+ SkFlattenable::Factory factory = nullptr;
+
+ if (fFactoryCount > 0) {
+ int32_t index = this->read32();
+ if (0 == index || !this->isValid()) {
+ return nullptr; // writer failed to give us the flattenable
+ }
+ if (index < 0) {
+ this->validate(false);
+ return nullptr;
+ }
+ index -= 1; // we stored the index-base-1
+ if ((unsigned)index >= (unsigned)fFactoryCount) {
+ this->validate(false);
+ return nullptr;
+ }
+ factory = fFactoryArray[index];
+ } else {
+ if (this->peekByte() != 0) {
+ // If the first byte is non-zero, the flattenable is specified by a string.
+ size_t ignored_length;
+ if (const char* name = this->readString(&ignored_length)) {
+ factory = SkFlattenable::NameToFactory(name);
+ fFlattenableDict.set(fFlattenableDict.count() + 1, factory);
+ }
+ } else {
+ // Read the index. We are guaranteed that the first byte
+ // is zeroed, so we must shift down a byte.
+ uint32_t index = this->readUInt() >> 8;
+ if (index == 0) {
+ return nullptr; // writer failed to give us the flattenable
+ }
+
+ if (SkFlattenable::Factory* found = fFlattenableDict.find(index)) {
+ factory = *found;
+ }
+ }
+
+ if (!this->validate(factory != nullptr)) {
+ return nullptr;
+ }
+ }
+
+ // if we get here, factory may still be null, but if that is the case, the
+ // failure was ours, not the writer.
+ sk_sp<SkFlattenable> obj;
+ uint32_t sizeRecorded = this->read32();
+ if (factory) {
+ size_t offset = this->offset();
+ obj = (*factory)(*this);
+ // check that we read the amount we expected
+ size_t sizeRead = this->offset() - offset;
+ if (sizeRecorded != sizeRead) {
+ this->validate(false);
+ return nullptr;
+ }
+ } else {
+ // we must skip the remaining data
+ this->skip(sizeRecorded);
+ }
+ if (!this->isValid()) {
+ return nullptr;
+ }
+ return obj.release();
+}
+
+SkFlattenable* SkReadBuffer::readFlattenable(SkFlattenable::Type ft) {
+ SkFlattenable* obj = this->readRawFlattenable();
+ if (obj && obj->getFlattenableType() != ft) {
+ this->validate(false);
+ obj->unref();
+ return nullptr;
+ }
+ return obj;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+int32_t SkReadBuffer::checkInt(int32_t min, int32_t max) {
+ SkASSERT(min <= max);
+ int32_t value = this->read32();
+ if (value < min || value > max) {
+ this->validate(false);
+ value = min;
+ }
+ return value;
+}
+
+SkLegacyFQ SkReadBuffer::checkFilterQuality() {
+ return this->checkRange<SkLegacyFQ>(kNone_SkLegacyFQ, kLast_SkLegacyFQ);
+}
diff --git a/gfx/skia/skia/src/core/SkReadBuffer.h b/gfx/skia/skia/src/core/SkReadBuffer.h
new file mode 100644
index 0000000000..45f4343eb4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkReadBuffer.h
@@ -0,0 +1,264 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkReadBuffer_DEFINED
+#define SkReadBuffer_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSerialProcs.h"
+#include "include/core/SkShader.h"
+#include "include/private/base/SkAlign.h"
+#include "include/private/base/SkAssert.h"
+#include "src/core/SkBlenderBase.h"
+#include "src/core/SkColorFilterBase.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/core/SkPicturePriv.h"
+#include "src/core/SkSamplingPriv.h"
+#include "src/core/SkTHash.h"
+#include "src/shaders/SkShaderBase.h"
+
+#include <cstddef>
+#include <cstdint>
+
+class SkBlender;
+class SkData;
+class SkImage;
+class SkM44;
+class SkMaskFilter;
+class SkMatrix;
+class SkPath;
+class SkRRect;
+class SkRegion;
+class SkString;
+class SkTypeface;
+struct SkPoint3;
+
+#ifdef SK_SUPPORT_LEGACY_DRAWLOOPER
+#include "include/core/SkDrawLooper.h"
+#endif
+
+class SkReadBuffer {
+public:
+ SkReadBuffer() = default;
+ SkReadBuffer(const void* data, size_t size) {
+ this->setMemory(data, size);
+ }
+
+ void setMemory(const void*, size_t);
+
+ /**
+ * Returns true IFF the version is older than the specified version.
+ */
+ bool isVersionLT(SkPicturePriv::Version targetVersion) const {
+ SkASSERT(targetVersion > 0);
+ return fVersion > 0 && fVersion < targetVersion;
+ }
+
+ uint32_t getVersion() const { return fVersion; }
+
+ /** This may be called at most once; most clients of SkReadBuffer should not mess with it. */
+ void setVersion(int version) {
+ SkASSERT(0 == fVersion || version == fVersion);
+ fVersion = version;
+ }
+
+ size_t size() const { return fStop - fBase; }
+ size_t offset() const { return fCurr - fBase; }
+ bool eof() { return fCurr >= fStop; }
+ const void* skip(size_t size);
+ const void* skip(size_t count, size_t size); // does safe multiply
+ size_t available() const { return fStop - fCurr; }
+
+ template <typename T> const T* skipT() {
+ return static_cast<const T*>(this->skip(sizeof(T)));
+ }
+ template <typename T> const T* skipT(size_t count) {
+ return static_cast<const T*>(this->skip(count, sizeof(T)));
+ }
+
+ // primitives
+ bool readBool();
+ SkColor readColor();
+ int32_t readInt();
+ SkScalar readScalar();
+ uint32_t readUInt();
+ int32_t read32();
+
+ template <typename T> T read32LE(T max) {
+ uint32_t value = this->readUInt();
+ if (!this->validate(value <= static_cast<uint32_t>(max))) {
+ value = 0;
+ }
+ return static_cast<T>(value);
+ }
+
+ // peek
+ uint8_t peekByte();
+
+ void readString(SkString* string);
+
+ // common data structures
+ void readColor4f(SkColor4f* color);
+ void readPoint(SkPoint* point);
+ SkPoint readPoint() { SkPoint p; this->readPoint(&p); return p; }
+ void readPoint3(SkPoint3* point);
+ void read(SkM44*);
+ void readMatrix(SkMatrix* matrix);
+ void readIRect(SkIRect* rect);
+ void readRect(SkRect* rect);
+ SkRect readRect();
+ void readRRect(SkRRect* rrect);
+ void readRegion(SkRegion* region);
+
+ void readPath(SkPath* path);
+
+ SkPaint readPaint() {
+ return SkPaintPriv::Unflatten(*this);
+ }
+
+ SkFlattenable* readRawFlattenable();
+ SkFlattenable* readFlattenable(SkFlattenable::Type);
+ template <typename T> sk_sp<T> readFlattenable() {
+ return sk_sp<T>((T*)this->readFlattenable(T::GetFlattenableType()));
+ }
+ sk_sp<SkColorFilter> readColorFilter() { return this->readFlattenable<SkColorFilterBase>(); }
+#ifdef SK_SUPPORT_LEGACY_DRAWLOOPER
+ sk_sp<SkDrawLooper> readDrawLooper() { return this->readFlattenable<SkDrawLooper>(); }
+#endif
+ sk_sp<SkImageFilter> readImageFilter() { return this->readFlattenable<SkImageFilter_Base>(); }
+ sk_sp<SkBlender> readBlender() { return this->readFlattenable<SkBlenderBase>(); }
+ sk_sp<SkMaskFilter> readMaskFilter() { return this->readFlattenable<SkMaskFilterBase>(); }
+ sk_sp<SkPathEffect> readPathEffect() { return this->readFlattenable<SkPathEffect>(); }
+ sk_sp<SkShader> readShader() { return this->readFlattenable<SkShaderBase>(); }
+
+ // Reads SkAlign4(bytes), but will only copy bytes into the buffer.
+ bool readPad32(void* buffer, size_t bytes);
+
+ // binary data and arrays
+ bool readByteArray(void* value, size_t size);
+ bool readColorArray(SkColor* colors, size_t size);
+ bool readColor4fArray(SkColor4f* colors, size_t size);
+ bool readIntArray(int32_t* values, size_t size);
+ bool readPointArray(SkPoint* points, size_t size);
+ bool readScalarArray(SkScalar* values, size_t size);
+
+ const void* skipByteArray(size_t* size);
+
+ sk_sp<SkData> readByteArrayAsData();
+
+ // helpers to get info about arrays and binary data
+ uint32_t getArrayCount();
+
+ // If there is a real error (e.g. data is corrupted) this returns null. If the image cannot
+ // be created (e.g. it was not originally encoded) then this returns an image that doesn't
+ // draw.
+ sk_sp<SkImage> readImage();
+ sk_sp<SkTypeface> readTypeface();
+
+ void setTypefaceArray(sk_sp<SkTypeface> array[], int count) {
+ fTFArray = array;
+ fTFCount = count;
+ }
+
+ /**
+ * Call this with a pre-loaded array of Factories, in the same order as
+ * were created/written by the writer. SkPicture uses this.
+ */
+ void setFactoryPlayback(SkFlattenable::Factory array[], int count) {
+ fFactoryArray = array;
+ fFactoryCount = count;
+ }
+
+ void setDeserialProcs(const SkDeserialProcs& procs);
+ const SkDeserialProcs& getDeserialProcs() const { return fProcs; }
+
+ /**
+ * If isValid is false, sets the buffer to be "invalid". Returns true if the buffer
+ * is still valid.
+ */
+ bool validate(bool isValid) {
+ if (!isValid) {
+ this->setInvalid();
+ }
+ return !fError;
+ }
+
+ /**
+ * Helper function to do a preflight check before a large allocation or read.
+ * Returns true if there is enough bytes in the buffer to read n elements of T.
+ * If not, the buffer will be "invalid" and false will be returned.
+ */
+ template <typename T>
+ bool validateCanReadN(size_t n) {
+ return this->validate(n <= (this->available() / sizeof(T)));
+ }
+
+ bool isValid() const { return !fError; }
+ bool validateIndex(int index, int count) {
+ return this->validate(index >= 0 && index < count);
+ }
+
+ // Utilities that mark the buffer invalid if the requested value is out-of-range
+
+ // If the read value is outside of the range, validate(false) is called, and min
+ // is returned, else the value is returned.
+ int32_t checkInt(int min, int max);
+
+ template <typename T> T checkRange(T min, T max) {
+ return static_cast<T>(this->checkInt(static_cast<int32_t>(min),
+ static_cast<int32_t>(max)));
+ }
+
+ SkLegacyFQ checkFilterQuality();
+
+ SkSamplingOptions readSampling();
+
+private:
+ const char* readString(size_t* length);
+
+ void setInvalid();
+ bool readArray(void* value, size_t size, size_t elementSize);
+ bool isAvailable(size_t size) const { return size <= this->available(); }
+
+ // These are always 4-byte aligned
+ const char* fCurr = nullptr; // current position within buffer
+ const char* fStop = nullptr; // end of buffer
+ const char* fBase = nullptr; // beginning of buffer
+
+ // Only used if we do not have an fFactoryArray.
+ SkTHashMap<uint32_t, SkFlattenable::Factory> fFlattenableDict;
+
+ int fVersion = 0;
+
+ sk_sp<SkTypeface>* fTFArray = nullptr;
+ int fTFCount = 0;
+
+ SkFlattenable::Factory* fFactoryArray = nullptr;
+ int fFactoryCount = 0;
+
+ SkDeserialProcs fProcs;
+
+ static bool IsPtrAlign4(const void* ptr) {
+ return SkIsAlign4((uintptr_t)ptr);
+ }
+
+ bool fError = false;
+};
+
+#endif // SkReadBuffer_DEFINED
diff --git a/gfx/skia/skia/src/core/SkReadPixelsRec.cpp b/gfx/skia/skia/src/core/SkReadPixelsRec.cpp
new file mode 100644
index 0000000000..505bfb51b3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkReadPixelsRec.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/core/SkReadPixelsRec.h"
+
+#include "include/core/SkRect.h"
+
+bool SkReadPixelsRec::trim(int srcWidth, int srcHeight) {
+ if (nullptr == fPixels || fRowBytes < fInfo.minRowBytes()) {
+ return false;
+ }
+ if (0 >= fInfo.width() || 0 >= fInfo.height()) {
+ return false;
+ }
+
+ int x = fX;
+ int y = fY;
+ SkIRect srcR = SkIRect::MakeXYWH(x, y, fInfo.width(), fInfo.height());
+ if (!srcR.intersect({0, 0, srcWidth, srcHeight})) {
+ return false;
+ }
+
+ // if x or y are negative, then we have to adjust pixels
+ if (x > 0) {
+ x = 0;
+ }
+ if (y > 0) {
+ y = 0;
+ }
+ // here x,y are either 0 or negative
+ // we negate and add them so UBSAN (pointer-overflow) doesn't get confused.
+ fPixels = ((char*)fPixels + -y*fRowBytes + -x*fInfo.bytesPerPixel());
+ // the intersect may have shrunk info's logical size
+ fInfo = fInfo.makeDimensions(srcR.size());
+ fX = srcR.x();
+ fY = srcR.y();
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkReadPixelsRec.h b/gfx/skia/skia/src/core/SkReadPixelsRec.h
new file mode 100644
index 0000000000..959b51b3b5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkReadPixelsRec.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkReadPixelsRec_DEFINED
+#define SkReadPixelsRec_DEFINED
+
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+
+#include <cstddef>
+
+/**
+ * Helper class to package and trim the parameters passed to readPixels()
+ */
+struct SkReadPixelsRec {
+ SkReadPixelsRec(const SkImageInfo& info, void* pixels, size_t rowBytes, int x, int y)
+ : fPixels(pixels)
+ , fRowBytes(rowBytes)
+ , fInfo(info)
+ , fX(x)
+ , fY(y)
+ {}
+
+ SkReadPixelsRec(const SkPixmap& pm, int x, int y)
+ : fPixels(pm.writable_addr())
+ , fRowBytes(pm.rowBytes())
+ , fInfo(pm.info())
+ , fX(x)
+ , fY(y)
+ {}
+
+ void* fPixels;
+ size_t fRowBytes;
+ SkImageInfo fInfo;
+ int fX;
+ int fY;
+
+ /*
+ * On true, may have modified its fields (except fRowBytes) to make it a legal subset
+ * of the specified src width/height.
+ *
+ * On false, leaves self unchanged, but indicates that it does not overlap src, or
+ * is not valid (e.g. bad fInfo) for readPixels().
+ */
+ bool trim(int srcWidth, int srcHeight);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRecord.cpp b/gfx/skia/skia/src/core/SkRecord.cpp
new file mode 100644
index 0000000000..6f93944b36
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecord.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkImage.h"
+#include "src/core/SkRecord.h"
+#include <algorithm>
+
+SkRecord::~SkRecord() {
+ Destroyer destroyer;
+ for (int i = 0; i < this->count(); i++) {
+ this->mutate(i, destroyer);
+ }
+}
+
+void SkRecord::grow() {
+ SkASSERT(fCount == fReserved);
+ fReserved = fReserved ? fReserved * 2 : 4;
+ fRecords.realloc(fReserved);
+}
+
+size_t SkRecord::bytesUsed() const {
+ size_t bytes = fApproxBytesAllocated + sizeof(SkRecord);
+ return bytes;
+}
+
+void SkRecord::defrag() {
+ // Remove all the NoOps, preserving the order of other ops, e.g.
+ // Save, ClipRect, NoOp, DrawRect, NoOp, NoOp, Restore
+ // -> Save, ClipRect, DrawRect, Restore
+ Record* noops = std::remove_if(fRecords.get(), fRecords.get() + fCount,
+ [](Record op) { return op.type() == SkRecords::NoOp_Type; });
+ fCount = noops - fRecords.get();
+}
diff --git a/gfx/skia/skia/src/core/SkRecord.h b/gfx/skia/skia/src/core/SkRecord.h
new file mode 100644
index 0000000000..d8c5efe54e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecord.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRecord_DEFINED
+#define SkRecord_DEFINED
+
+#include "include/private/base/SkTLogic.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkRecords.h"
+
+// SkRecord represents a sequence of SkCanvas calls, saved for future use.
+// These future uses may include: replay, optimization, serialization, or combinations of those.
+//
+// Though an enterprising user may find calling alloc(), append(), visit(), and mutate() enough to
+// work with SkRecord, you probably want to look at SkRecorder which presents an SkCanvas interface
+// for creating an SkRecord, and SkRecordDraw which plays an SkRecord back into another SkCanvas.
+//
+// SkRecord often looks like it's compatible with any type T, but really it's compatible with any
+// type T which has a static const SkRecords::Type kType. That is to say, SkRecord is compatible
+// only with SkRecords::* structs defined in SkRecords.h. Your compiler will helpfully yell if you
+// get this wrong.
+
+class SkRecord : public SkRefCnt {
+public:
+ SkRecord() = default;
+ ~SkRecord() override;
+
+ // Returns the number of canvas commands in this SkRecord.
+ int count() const { return fCount; }
+
+ // Visit the i-th canvas command with a functor matching this interface:
+ // template <typename T>
+ // R operator()(const T& record) { ... }
+ // This operator() must be defined for at least all SkRecords::*.
+ template <typename F>
+ auto visit(int i, F&& f) const -> decltype(f(SkRecords::NoOp())) {
+ return fRecords[i].visit(f);
+ }
+
+ // Mutate the i-th canvas command with a functor matching this interface:
+ // template <typename T>
+ // R operator()(T* record) { ... }
+ // This operator() must be defined for at least all SkRecords::*.
+ template <typename F>
+ auto mutate(int i, F&& f) -> decltype(f((SkRecords::NoOp*)nullptr)) {
+ return fRecords[i].mutate(f);
+ }
+
+ // Allocate contiguous space for count Ts, to be freed when the SkRecord is destroyed.
+ // Here T can be any class, not just those from SkRecords. Throws on failure.
+ template <typename T>
+ T* alloc(size_t count = 1) {
+ struct RawBytes {
+ alignas(T) char data[sizeof(T)];
+ };
+ fApproxBytesAllocated += count * sizeof(T) + alignof(T);
+ return (T*)fAlloc.makeArrayDefault<RawBytes>(count);
+ }
+
+ // Add a new command of type T to the end of this SkRecord.
+ // You are expected to placement new an object of type T onto this pointer.
+ template <typename T>
+ T* append() {
+ if (fCount == fReserved) {
+ this->grow();
+ }
+ return fRecords[fCount++].set(this->allocCommand<T>());
+ }
+
+ // Replace the i-th command with a new command of type T.
+ // You are expected to placement new an object of type T onto this pointer.
+ // References to the original command are invalidated.
+ template <typename T>
+ T* replace(int i) {
+ SkASSERT(i < this->count());
+
+ Destroyer destroyer;
+ this->mutate(i, destroyer);
+
+ return fRecords[i].set(this->allocCommand<T>());
+ }
+
+ // Does not return the bytes in any pointers embedded in the Records; callers
+ // need to iterate with a visitor to measure those they care for.
+ size_t bytesUsed() const;
+
+ // Rearrange and resize this record to eliminate any NoOps.
+ // May change count() and the indices of ops, but preserves their order.
+ void defrag();
+
+private:
+ // An SkRecord is structured as an array of pointers into a big chunk of memory where
+ // records representing each canvas draw call are stored:
+ //
+ // fRecords: [*][*][*]...
+ // | | |
+ // | | |
+ // | | +---------------------------------------+
+ // | +-----------------+ |
+ // | | |
+ // v v v
+ // fAlloc: [SkRecords::DrawRect][SkRecords::DrawPosTextH][SkRecords::DrawRect]...
+ //
+ // We store the types of each of the pointers alongside the pointer.
+ // The cost to append a T to this structure is 8 + sizeof(T) bytes.
+
+ // A mutator that can be used with replace to destroy canvas commands.
+ struct Destroyer {
+ template <typename T>
+ void operator()(T* record) { record->~T(); }
+ };
+
+ template <typename T>
+ std::enable_if_t<std::is_empty<T>::value, T*> allocCommand() {
+ static T singleton = {};
+ return &singleton;
+ }
+
+ template <typename T>
+ std::enable_if_t<!std::is_empty<T>::value, T*> allocCommand() { return this->alloc<T>(); }
+
+ void grow();
+
+ // A typed pointer to some bytes in fAlloc. visit() and mutate() allow polymorphic dispatch.
+ struct Record {
+ SkRecords::Type fType;
+ void* fPtr;
+
+ // Point this record to its data in fAlloc. Returns ptr for convenience.
+ template <typename T>
+ T* set(T* ptr) {
+ fType = T::kType;
+ fPtr = ptr;
+ SkASSERT(this->ptr() == ptr && this->type() == T::kType);
+ return ptr;
+ }
+
+ SkRecords::Type type() const { return fType; }
+ void* ptr() const { return fPtr; }
+
+ // Visit this record with functor F (see public API above).
+ template <typename F>
+ auto visit(F&& f) const -> decltype(f(SkRecords::NoOp())) {
+ #define CASE(T) case SkRecords::T##_Type: return f(*(const SkRecords::T*)this->ptr());
+ switch(this->type()) { SK_RECORD_TYPES(CASE) }
+ #undef CASE
+ SkDEBUGFAIL("Unreachable");
+ static const SkRecords::NoOp noop{};
+ return f(noop);
+ }
+
+ // Mutate this record with functor F (see public API above).
+ template <typename F>
+ auto mutate(F&& f) -> decltype(f((SkRecords::NoOp*)nullptr)) {
+ #define CASE(T) case SkRecords::T##_Type: return f((SkRecords::T*)this->ptr());
+ switch(this->type()) { SK_RECORD_TYPES(CASE) }
+ #undef CASE
+ SkDEBUGFAIL("Unreachable");
+ static const SkRecords::NoOp noop{};
+ return f(const_cast<SkRecords::NoOp*>(&noop));
+ }
+ };
+
+ // fRecords needs to be a data structure that can append fixed length data, and need to
+ // support efficient random access and forward iteration. (It doesn't need to be contiguous.)
+ int fCount{0},
+ fReserved{0};
+ skia_private::AutoTMalloc<Record> fRecords;
+
+ // fAlloc needs to be a data structure which can append variable length data in contiguous
+ // chunks, returning a stable handle to that data for later retrieval.
+ SkArenaAlloc fAlloc{256};
+ size_t fApproxBytesAllocated{0};
+};
+
+#endif//SkRecord_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRecordDraw.cpp b/gfx/skia/skia/src/core/SkRecordDraw.cpp
new file mode 100644
index 0000000000..1447cb049e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecordDraw.cpp
@@ -0,0 +1,590 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBBHFactory.h"
+#include "include/core/SkImage.h"
+#include "include/private/base/SkTDArray.h"
+#include "src/core/SkCanvasPriv.h"
+#include "src/core/SkColorFilterBase.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkRecordDraw.h"
+#include "src/utils/SkPatchUtils.h"
+
+void SkRecordDraw(const SkRecord& record,
+ SkCanvas* canvas,
+ SkPicture const* const drawablePicts[],
+ SkDrawable* const drawables[],
+ int drawableCount,
+ const SkBBoxHierarchy* bbh,
+ SkPicture::AbortCallback* callback) {
+ SkAutoCanvasRestore saveRestore(canvas, true /*save now, restore at exit*/);
+
+ if (bbh) {
+ // Draw only ops that affect pixels in the canvas's current clip.
+ // The SkRecord and BBH were recorded in identity space. This canvas
+ // is not necessarily in that same space. getLocalClipBounds() returns us
+ // this canvas' clip bounds transformed back into identity space, which
+ // lets us query the BBH.
+ SkRect query = canvas->getLocalClipBounds();
+
+ std::vector<int> ops;
+ bbh->search(query, &ops);
+
+ SkRecords::Draw draw(canvas, drawablePicts, drawables, drawableCount);
+ for (int i = 0; i < (int)ops.size(); i++) {
+ if (callback && callback->abort()) {
+ return;
+ }
+ // This visit call uses the SkRecords::Draw::operator() to call
+ // methods on the |canvas|, wrapped by methods defined with the
+ // DRAW() macro.
+ record.visit(ops[i], draw);
+ }
+ } else {
+ // Draw all ops.
+ SkRecords::Draw draw(canvas, drawablePicts, drawables, drawableCount);
+ for (int i = 0; i < record.count(); i++) {
+ if (callback && callback->abort()) {
+ return;
+ }
+ // This visit call uses the SkRecords::Draw::operator() to call
+ // methods on the |canvas|, wrapped by methods defined with the
+ // DRAW() macro.
+ record.visit(i, draw);
+ }
+ }
+}
+
+void SkRecordPartialDraw(const SkRecord& record, SkCanvas* canvas,
+ SkPicture const* const drawablePicts[], int drawableCount,
+ int start, int stop,
+ const SkM44& initialCTM) {
+ SkAutoCanvasRestore saveRestore(canvas, true /*save now, restore at exit*/);
+
+ stop = std::min(stop, record.count());
+ SkRecords::Draw draw(canvas, drawablePicts, nullptr, drawableCount, &initialCTM);
+ for (int i = start; i < stop; i++) {
+ record.visit(i, draw);
+ }
+}
+
+namespace SkRecords {
+
+// NoOps draw nothing.
+template <> void Draw::draw(const NoOp&) {}
+
+#define DRAW(T, call) template <> void Draw::draw(const T& r) { fCanvas->call; }
+DRAW(Flush, flush())
+DRAW(Restore, restore())
+DRAW(Save, save())
+DRAW(SaveLayer, saveLayer(SkCanvasPriv::ScaledBackdropLayer(r.bounds,
+ r.paint,
+ r.backdrop.get(),
+ r.backdropScale,
+ r.saveLayerFlags)))
+
+template <> void Draw::draw(const SaveBehind& r) {
+ SkCanvasPriv::SaveBehind(fCanvas, r.subset);
+}
+
+template <> void Draw::draw(const DrawBehind& r) {
+ SkCanvasPriv::DrawBehind(fCanvas, r.paint);
+}
+
+DRAW(SetMatrix, setMatrix(fInitialCTM.asM33() * r.matrix))
+DRAW(SetM44, setMatrix(fInitialCTM * r.matrix))
+DRAW(Concat44, concat(r.matrix))
+DRAW(Concat, concat(r.matrix))
+DRAW(Translate, translate(r.dx, r.dy))
+DRAW(Scale, scale(r.sx, r.sy))
+
+DRAW(ClipPath, clipPath(r.path, r.opAA.op(), r.opAA.aa()))
+DRAW(ClipRRect, clipRRect(r.rrect, r.opAA.op(), r.opAA.aa()))
+DRAW(ClipRect, clipRect(r.rect, r.opAA.op(), r.opAA.aa()))
+DRAW(ClipRegion, clipRegion(r.region, r.op))
+DRAW(ClipShader, clipShader(r.shader, r.op))
+
+template <> void Draw::draw(const ResetClip& r) {
+ SkCanvasPriv::ResetClip(fCanvas);
+}
+
+DRAW(DrawArc, drawArc(r.oval, r.startAngle, r.sweepAngle, r.useCenter, r.paint))
+DRAW(DrawDRRect, drawDRRect(r.outer, r.inner, r.paint))
+DRAW(DrawImage, drawImage(r.image.get(), r.left, r.top, r.sampling, r.paint))
+
+template <> void Draw::draw(const DrawImageLattice& r) {
+ SkCanvas::Lattice lattice;
+ lattice.fXCount = r.xCount;
+ lattice.fXDivs = r.xDivs;
+ lattice.fYCount = r.yCount;
+ lattice.fYDivs = r.yDivs;
+ lattice.fRectTypes = (0 == r.flagCount) ? nullptr : r.flags;
+ lattice.fColors = (0 == r.flagCount) ? nullptr : r.colors;
+ lattice.fBounds = &r.src;
+ fCanvas->drawImageLattice(r.image.get(), lattice, r.dst, r.filter, r.paint);
+}
+
+DRAW(DrawImageRect, drawImageRect(r.image.get(), r.src, r.dst, r.sampling, r.paint, r.constraint))
+DRAW(DrawOval, drawOval(r.oval, r.paint))
+DRAW(DrawPaint, drawPaint(r.paint))
+DRAW(DrawPath, drawPath(r.path, r.paint))
+DRAW(DrawPatch, drawPatch(r.cubics, r.colors, r.texCoords, r.bmode, r.paint))
+DRAW(DrawPicture, drawPicture(r.picture.get(), &r.matrix, r.paint))
+DRAW(DrawPoints, drawPoints(r.mode, r.count, r.pts, r.paint))
+DRAW(DrawRRect, drawRRect(r.rrect, r.paint))
+DRAW(DrawRect, drawRect(r.rect, r.paint))
+DRAW(DrawRegion, drawRegion(r.region, r.paint))
+DRAW(DrawTextBlob, drawTextBlob(r.blob.get(), r.x, r.y, r.paint))
+#if defined(SK_GANESH)
+DRAW(DrawSlug, drawSlug(r.slug.get()))
+#else
+// Turn draw into a nop.
+template <> void Draw::draw(const DrawSlug&) {}
+#endif
+DRAW(DrawAtlas, drawAtlas(r.atlas.get(), r.xforms, r.texs, r.colors, r.count, r.mode, r.sampling,
+ r.cull, r.paint))
+DRAW(DrawVertices, drawVertices(r.vertices, r.bmode, r.paint))
+#ifdef SK_ENABLE_SKSL
+DRAW(DrawMesh, drawMesh(r.mesh, r.blender, r.paint))
+#else
+// Turn draw into a nop.
+template <> void Draw::draw(const DrawMesh&) {}
+#endif
+DRAW(DrawShadowRec, private_draw_shadow_rec(r.path, r.rec))
+DRAW(DrawAnnotation, drawAnnotation(r.rect, r.key.c_str(), r.value.get()))
+
+DRAW(DrawEdgeAAQuad, experimental_DrawEdgeAAQuad(
+ r.rect, r.clip, r.aa, r.color, r.mode))
+DRAW(DrawEdgeAAImageSet, experimental_DrawEdgeAAImageSet(
+ r.set.get(), r.count, r.dstClips, r.preViewMatrices, r.sampling, r.paint, r.constraint))
+
+#undef DRAW
+
+template <> void Draw::draw(const DrawDrawable& r) {
+ SkASSERT(r.index >= 0);
+ SkASSERT(r.index < fDrawableCount);
+ if (fDrawables) {
+ SkASSERT(nullptr == fDrawablePicts);
+ fCanvas->drawDrawable(fDrawables[r.index], r.matrix);
+ } else {
+ fCanvas->drawPicture(fDrawablePicts[r.index], r.matrix, nullptr);
+ }
+}
+
+// This is an SkRecord visitor that fills an SkBBoxHierarchy.
+//
+// The interesting part here is how to calculate bounds for ops which don't
+// have intrinsic bounds. What is the bounds of a Save or a Translate?
+//
+// We answer this by thinking about a particular definition of bounds: if I
+// don't execute this op, pixels in this rectangle might draw incorrectly. So
+// the bounds of a Save, a Translate, a Restore, etc. are the union of the
+// bounds of Draw* ops that they might have an effect on. For any given
+// Save/Restore block, the bounds of the Save, the Restore, and any other
+// non-drawing ("control") ops inside are exactly the union of the bounds of
+// the drawing ops inside that block.
+//
+// To implement this, we keep a stack of active Save blocks. As we consume ops
+// inside the Save/Restore block, drawing ops are unioned with the bounds of
+// the block, and control ops are stashed away for later. When we finish the
+// block with a Restore, our bounds are complete, and we go back and fill them
+// in for all the control ops we stashed away.
+class FillBounds : SkNoncopyable {
+public:
+ FillBounds(const SkRect& cullRect, const SkRecord& record,
+ SkRect bounds[], SkBBoxHierarchy::Metadata meta[])
+ : fCullRect(cullRect)
+ , fBounds(bounds)
+ , fMeta(meta) {
+ fCTM = SkMatrix::I();
+
+ // We push an extra save block to track the bounds of any top-level control operations.
+ fSaveStack.push_back({ 0, Bounds::MakeEmpty(), nullptr, fCTM });
+ }
+
+ ~FillBounds() {
+ // If we have any lingering unpaired Saves, simulate restores to make
+ // sure all ops in those Save blocks have their bounds calculated.
+ while (!fSaveStack.empty()) {
+ this->popSaveBlock();
+ }
+
+ // Any control ops not part of any Save/Restore block draw everywhere.
+ while (!fControlIndices.empty()) {
+ this->popControl(fCullRect);
+ }
+ }
+
+ void setCurrentOp(int currentOp) { fCurrentOp = currentOp; }
+
+
+ template <typename T> void operator()(const T& op) {
+ this->updateCTM(op);
+ this->trackBounds(op);
+ }
+
+ // In this file, SkRect are in local coordinates, Bounds are translated back to identity space.
+ typedef SkRect Bounds;
+
+ // Adjust rect for all paints that may affect its geometry, then map it to identity space.
+ Bounds adjustAndMap(SkRect rect, const SkPaint* paint) const {
+ // Inverted rectangles really confuse our BBHs.
+ rect.sort();
+
+ // Adjust the rect for its own paint.
+ if (!AdjustForPaint(paint, &rect)) {
+ // The paint could do anything to our bounds. The only safe answer is the cull.
+ return fCullRect;
+ }
+
+ // Adjust rect for all the paints from the SaveLayers we're inside.
+ if (!this->adjustForSaveLayerPaints(&rect)) {
+ // Same deal as above.
+ return fCullRect;
+ }
+
+ // Map the rect back to identity space.
+ fCTM.mapRect(&rect);
+
+ // Nothing can draw outside the cull rect.
+ if (!rect.intersect(fCullRect)) {
+ return Bounds::MakeEmpty();
+ }
+
+ return rect;
+ }
+
+private:
+ struct SaveBounds {
+ int controlOps; // Number of control ops in this Save block, including the Save.
+ Bounds bounds; // Bounds of everything in the block.
+ const SkPaint* paint; // Unowned. If set, adjusts the bounds of all ops in this block.
+ SkMatrix ctm;
+ };
+
+ // Only Restore, SetMatrix, Concat, and Translate change the CTM.
+ template <typename T> void updateCTM(const T&) {}
+ void updateCTM(const Restore& op) { fCTM = op.matrix; }
+ void updateCTM(const SetMatrix& op) { fCTM = op.matrix; }
+ void updateCTM(const SetM44& op) { fCTM = op.matrix.asM33(); }
+ void updateCTM(const Concat44& op) { fCTM.preConcat(op.matrix.asM33()); }
+ void updateCTM(const Concat& op) { fCTM.preConcat(op.matrix); }
+ void updateCTM(const Scale& op) { fCTM.preScale(op.sx, op.sy); }
+ void updateCTM(const Translate& op) { fCTM.preTranslate(op.dx, op.dy); }
+
+ // The bounds of these ops must be calculated when we hit the Restore
+ // from the bounds of the ops in the same Save block.
+ void trackBounds(const Save&) { this->pushSaveBlock(nullptr); }
+ void trackBounds(const SaveLayer& op) { this->pushSaveBlock(op.paint); }
+ void trackBounds(const SaveBehind&) { this->pushSaveBlock(nullptr); }
+ void trackBounds(const Restore&) {
+ const bool isSaveLayer = fSaveStack.back().paint != nullptr;
+ fBounds[fCurrentOp] = this->popSaveBlock();
+ fMeta [fCurrentOp].isDraw = isSaveLayer;
+ }
+
+ void trackBounds(const SetMatrix&) { this->pushControl(); }
+ void trackBounds(const SetM44&) { this->pushControl(); }
+ void trackBounds(const Concat&) { this->pushControl(); }
+ void trackBounds(const Concat44&) { this->pushControl(); }
+ void trackBounds(const Scale&) { this->pushControl(); }
+ void trackBounds(const Translate&) { this->pushControl(); }
+ void trackBounds(const ClipRect&) { this->pushControl(); }
+ void trackBounds(const ClipRRect&) { this->pushControl(); }
+ void trackBounds(const ClipPath&) { this->pushControl(); }
+ void trackBounds(const ClipRegion&) { this->pushControl(); }
+ void trackBounds(const ClipShader&) { this->pushControl(); }
+ void trackBounds(const ResetClip&) { this->pushControl(); }
+
+
+ // For all other ops, we can calculate and store the bounds directly now.
+ template <typename T> void trackBounds(const T& op) {
+ fBounds[fCurrentOp] = this->bounds(op);
+ fMeta [fCurrentOp].isDraw = true;
+ this->updateSaveBounds(fBounds[fCurrentOp]);
+ }
+
+ void pushSaveBlock(const SkPaint* paint) {
+ // Starting a new Save block. Push a new entry to represent that.
+ SaveBounds sb;
+ sb.controlOps = 0;
+ // If the paint affects transparent black,
+ // the bound shouldn't be smaller than the cull.
+ sb.bounds =
+ PaintMayAffectTransparentBlack(paint) ? fCullRect : Bounds::MakeEmpty();
+ sb.paint = paint;
+ sb.ctm = this->fCTM;
+
+ fSaveStack.push_back(sb);
+ this->pushControl();
+ }
+
+ static bool PaintMayAffectTransparentBlack(const SkPaint* paint) {
+ if (paint) {
+ // FIXME: this is very conservative
+ if ((paint->getImageFilter() &&
+ as_IFB(paint->getImageFilter())->affectsTransparentBlack()) ||
+ (paint->getColorFilter() &&
+ as_CFB(paint->getColorFilter())->affectsTransparentBlack())) {
+ return true;
+ }
+ const auto bm = paint->asBlendMode();
+ if (!bm) {
+ return true; // can we query other blenders for this?
+ }
+
+ // Unusual blendmodes require us to process a saved layer
+ // even with operations outisde the clip.
+ // For example, DstIn is used by masking layers.
+ // https://code.google.com/p/skia/issues/detail?id=1291
+ // https://crbug.com/401593
+ switch (bm.value()) {
+ // For each of the following transfer modes, if the source
+ // alpha is zero (our transparent black), the resulting
+ // blended alpha is not necessarily equal to the original
+ // destination alpha.
+ case SkBlendMode::kClear:
+ case SkBlendMode::kSrc:
+ case SkBlendMode::kSrcIn:
+ case SkBlendMode::kDstIn:
+ case SkBlendMode::kSrcOut:
+ case SkBlendMode::kDstATop:
+ case SkBlendMode::kModulate:
+ return true;
+ default:
+ break;
+ }
+ }
+ return false;
+ }
+
+ Bounds popSaveBlock() {
+ // We're done the Save block. Apply the block's bounds to all control ops inside it.
+ SaveBounds sb = fSaveStack.back();
+ fSaveStack.pop_back();
+
+ while (sb.controlOps --> 0) {
+ this->popControl(sb.bounds);
+ }
+
+ // This whole Save block may be part another Save block.
+ this->updateSaveBounds(sb.bounds);
+
+ // If called from a real Restore (not a phony one for balance), it'll need the bounds.
+ return sb.bounds;
+ }
+
+ void pushControl() {
+ fControlIndices.push_back(fCurrentOp);
+ if (!fSaveStack.empty()) {
+ fSaveStack.back().controlOps++;
+ }
+ }
+
+ void popControl(const Bounds& bounds) {
+ fBounds[fControlIndices.back()] = bounds;
+ fMeta [fControlIndices.back()].isDraw = false;
+ fControlIndices.pop_back();
+ }
+
+ void updateSaveBounds(const Bounds& bounds) {
+ // If we're in a Save block, expand its bounds to cover these bounds too.
+ if (!fSaveStack.empty()) {
+ fSaveStack.back().bounds.join(bounds);
+ }
+ }
+
+ Bounds bounds(const Flush&) const { return fCullRect; }
+
+ Bounds bounds(const DrawPaint&) const { return fCullRect; }
+ Bounds bounds(const DrawBehind&) const { return fCullRect; }
+ Bounds bounds(const NoOp&) const { return Bounds::MakeEmpty(); } // NoOps don't draw.
+
+ Bounds bounds(const DrawRect& op) const { return this->adjustAndMap(op.rect, &op.paint); }
+ Bounds bounds(const DrawRegion& op) const {
+ SkRect rect = SkRect::Make(op.region.getBounds());
+ return this->adjustAndMap(rect, &op.paint);
+ }
+ Bounds bounds(const DrawOval& op) const { return this->adjustAndMap(op.oval, &op.paint); }
+ // Tighter arc bounds?
+ Bounds bounds(const DrawArc& op) const { return this->adjustAndMap(op.oval, &op.paint); }
+ Bounds bounds(const DrawRRect& op) const {
+ return this->adjustAndMap(op.rrect.rect(), &op.paint);
+ }
+ Bounds bounds(const DrawDRRect& op) const {
+ return this->adjustAndMap(op.outer.rect(), &op.paint);
+ }
+ Bounds bounds(const DrawImage& op) const {
+ const SkImage* image = op.image.get();
+ SkRect rect = SkRect::MakeXYWH(op.left, op.top, image->width(), image->height());
+
+ return this->adjustAndMap(rect, op.paint);
+ }
+ Bounds bounds(const DrawImageLattice& op) const {
+ return this->adjustAndMap(op.dst, op.paint);
+ }
+ Bounds bounds(const DrawImageRect& op) const {
+ return this->adjustAndMap(op.dst, op.paint);
+ }
+ Bounds bounds(const DrawPath& op) const {
+ return op.path.isInverseFillType() ? fCullRect
+ : this->adjustAndMap(op.path.getBounds(), &op.paint);
+ }
+ Bounds bounds(const DrawPoints& op) const {
+ SkRect dst;
+ dst.setBounds(op.pts, op.count);
+
+ // Pad the bounding box a little to make sure hairline points' bounds aren't empty.
+ SkScalar stroke = std::max(op.paint.getStrokeWidth(), 0.01f);
+ dst.outset(stroke/2, stroke/2);
+
+ return this->adjustAndMap(dst, &op.paint);
+ }
+ Bounds bounds(const DrawPatch& op) const {
+ SkRect dst;
+ dst.setBounds(op.cubics, SkPatchUtils::kNumCtrlPts);
+ return this->adjustAndMap(dst, &op.paint);
+ }
+ Bounds bounds(const DrawVertices& op) const {
+ return this->adjustAndMap(op.vertices->bounds(), &op.paint);
+ }
+ Bounds bounds(const DrawMesh& op) const {
+#ifdef SK_ENABLE_SKSL
+ return this->adjustAndMap(op.mesh.bounds(), &op.paint);
+#else
+ return SkRect::MakeEmpty();
+#endif
+ }
+ Bounds bounds(const DrawAtlas& op) const {
+ if (op.cull) {
+ // TODO: <reed> can we pass nullptr for the paint? Isn't cull already "correct"
+ // for the paint (by the caller)?
+ return this->adjustAndMap(*op.cull, op.paint);
+ } else {
+ return fCullRect;
+ }
+ }
+
+ Bounds bounds(const DrawShadowRec& op) const {
+ SkRect bounds;
+ SkDrawShadowMetrics::GetLocalBounds(op.path, op.rec, fCTM, &bounds);
+ return this->adjustAndMap(bounds, nullptr);
+ }
+
+ Bounds bounds(const DrawPicture& op) const {
+ SkRect dst = op.picture->cullRect();
+ op.matrix.mapRect(&dst);
+ return this->adjustAndMap(dst, op.paint);
+ }
+
+ Bounds bounds(const DrawTextBlob& op) const {
+ SkRect dst = op.blob->bounds();
+ dst.offset(op.x, op.y);
+ return this->adjustAndMap(dst, &op.paint);
+ }
+
+#if defined(SK_GANESH)
+ Bounds bounds(const DrawSlug& op) const {
+ SkRect dst = op.slug->sourceBoundsWithOrigin();
+ return this->adjustAndMap(dst, &op.slug->initialPaint());
+ }
+#else
+ Bounds bounds(const DrawSlug& op) const {
+ return SkRect::MakeEmpty();
+ }
+#endif
+
+ Bounds bounds(const DrawDrawable& op) const {
+ return this->adjustAndMap(op.worstCaseBounds, nullptr);
+ }
+
+ Bounds bounds(const DrawAnnotation& op) const {
+ return this->adjustAndMap(op.rect, nullptr);
+ }
+ Bounds bounds(const DrawEdgeAAQuad& op) const {
+ SkRect bounds = op.rect;
+ if (op.clip) {
+ bounds.setBounds(op.clip, 4);
+ }
+ return this->adjustAndMap(bounds, nullptr);
+ }
+ Bounds bounds(const DrawEdgeAAImageSet& op) const {
+ SkRect rect = SkRect::MakeEmpty();
+ int clipIndex = 0;
+ for (int i = 0; i < op.count; ++i) {
+ SkRect entryBounds = op.set[i].fDstRect;
+ if (op.set[i].fHasClip) {
+ entryBounds.setBounds(op.dstClips + clipIndex, 4);
+ clipIndex += 4;
+ }
+ if (op.set[i].fMatrixIndex >= 0) {
+ op.preViewMatrices[op.set[i].fMatrixIndex].mapRect(&entryBounds);
+ }
+ rect.join(this->adjustAndMap(entryBounds, nullptr));
+ }
+ return rect;
+ }
+
+ // Returns true if rect was meaningfully adjusted for the effects of paint,
+ // false if the paint could affect the rect in unknown ways.
+ static bool AdjustForPaint(const SkPaint* paint, SkRect* rect) {
+ if (paint) {
+ if (paint->canComputeFastBounds()) {
+ *rect = paint->computeFastBounds(*rect, rect);
+ return true;
+ }
+ return false;
+ }
+ return true;
+ }
+
+ bool adjustForSaveLayerPaints(SkRect* rect, int savesToIgnore = 0) const {
+ for (int i = fSaveStack.size() - 1 - savesToIgnore; i >= 0; i--) {
+ SkMatrix inverse;
+ if (!fSaveStack[i].ctm.invert(&inverse)) {
+ return false;
+ }
+ inverse.mapRect(rect);
+ if (!AdjustForPaint(fSaveStack[i].paint, rect)) {
+ return false;
+ }
+ fSaveStack[i].ctm.mapRect(rect);
+ }
+ return true;
+ }
+
+ // We do not guarantee anything for operations outside of the cull rect
+ const SkRect fCullRect;
+
+ // Conservative identity-space bounds for each op in the SkRecord.
+ Bounds* fBounds;
+
+ // Parallel array to fBounds, holding metadata for each bounds rect.
+ SkBBoxHierarchy::Metadata* fMeta;
+
+ // We walk fCurrentOp through the SkRecord,
+ // as we go using updateCTM() to maintain the exact CTM (fCTM).
+ int fCurrentOp;
+ SkMatrix fCTM;
+
+ // Used to track the bounds of Save/Restore blocks and the control ops inside them.
+ SkTDArray<SaveBounds> fSaveStack;
+ SkTDArray<int> fControlIndices;
+};
+
+} // namespace SkRecords
+
+void SkRecordFillBounds(const SkRect& cullRect, const SkRecord& record,
+ SkRect bounds[], SkBBoxHierarchy::Metadata meta[]) {
+ {
+ SkRecords::FillBounds visitor(cullRect, record, bounds, meta);
+ for (int i = 0; i < record.count(); i++) {
+ visitor.setCurrentOp(i);
+ record.visit(i, visitor);
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkRecordDraw.h b/gfx/skia/skia/src/core/SkRecordDraw.h
new file mode 100644
index 0000000000..ca060ea7f0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecordDraw.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRecordDraw_DEFINED
+#define SkRecordDraw_DEFINED
+
+#include "include/core/SkBBHFactory.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkMatrix.h"
+#include "src/core/SkBigPicture.h"
+#include "src/core/SkRecord.h"
+
+class SkDrawable;
+class SkLayerInfo;
+
+// Calculate conservative identity space bounds for each op in the record.
+void SkRecordFillBounds(const SkRect& cullRect, const SkRecord&,
+ SkRect bounds[], SkBBoxHierarchy::Metadata[]);
+
+// SkRecordFillBounds(), and gathers information about saveLayers and stores it for later
+// use (e.g., layer hoisting). The gathered information is sufficient to determine
+// where each saveLayer will land and which ops in the picture it represents.
+void SkRecordComputeLayers(const SkRect& cullRect, const SkRecord&, SkRect bounds[],
+ const SkBigPicture::SnapshotArray*, SkLayerInfo* data);
+
+// Draw an SkRecord into an SkCanvas. A convenience wrapper around SkRecords::Draw.
+void SkRecordDraw(const SkRecord&, SkCanvas*, SkPicture const* const drawablePicts[],
+ SkDrawable* const drawables[], int drawableCount,
+ const SkBBoxHierarchy*, SkPicture::AbortCallback*);
+
+// Draw a portion of an SkRecord into an SkCanvas.
+// When drawing a portion of an SkRecord the CTM on the passed in canvas must be
+// the composition of the replay matrix with the record-time CTM (for the portion
+// of the record that is being replayed). For setMatrix calls to behave correctly
+// the initialCTM parameter must set to just the replay matrix.
+void SkRecordPartialDraw(const SkRecord&, SkCanvas*,
+ SkPicture const* const drawablePicts[], int drawableCount,
+ int start, int stop, const SkM44& initialCTM);
+
+namespace SkRecords {
+
+// This is an SkRecord visitor that will draw that SkRecord to an SkCanvas.
+class Draw : SkNoncopyable {
+public:
+ explicit Draw(SkCanvas* canvas, SkPicture const* const drawablePicts[],
+ SkDrawable* const drawables[], int drawableCount,
+ const SkM44* initialCTM = nullptr)
+ : fInitialCTM(initialCTM ? *initialCTM : canvas->getLocalToDevice())
+ , fCanvas(canvas)
+ , fDrawablePicts(drawablePicts)
+ , fDrawables(drawables)
+ , fDrawableCount(drawableCount)
+ {}
+
+ // This operator calls methods on the |canvas|. The various draw() wrapper
+ // methods around SkCanvas are defined by the DRAW() macro in
+ // SkRecordDraw.cpp.
+ template <typename T> void operator()(const T& r) {
+ this->draw(r);
+ }
+
+protected:
+ SkPicture const* const* drawablePicts() const { return fDrawablePicts; }
+ int drawableCount() const { return fDrawableCount; }
+
+private:
+ // No base case, so we'll be compile-time checked that we implement all possibilities.
+ template <typename T> void draw(const T&);
+
+ const SkM44 fInitialCTM;
+ SkCanvas* fCanvas;
+ SkPicture const* const* fDrawablePicts;
+ SkDrawable* const* fDrawables;
+ int fDrawableCount;
+};
+
+} // namespace SkRecords
+
+#endif//SkRecordDraw_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRecordOpts.cpp b/gfx/skia/skia/src/core/SkRecordOpts.cpp
new file mode 100644
index 0000000000..88e8493064
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecordOpts.cpp
@@ -0,0 +1,314 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkRecordOpts.h"
+
+#include "include/private/base/SkTDArray.h"
+#include "src/core/SkCanvasPriv.h"
+#include "src/core/SkRecordPattern.h"
+#include "src/core/SkRecords.h"
+
+using namespace SkRecords;
+
+// Most of the optimizations in this file are pattern-based. These are all defined as structs with:
+// - a Match typedef
+// - a bool onMatch(SkRceord*, Match*, int begin, int end) method,
+// which returns true if it made changes and false if not.
+
+// Run a pattern-based optimization once across the SkRecord, returning true if it made any changes.
+// It looks for spans which match Pass::Match, and when found calls onMatch() with that pattern,
+// record, and [begin,end) span of the commands that matched.
+template <typename Pass>
+static bool apply(Pass* pass, SkRecord* record) {
+ typename Pass::Match match;
+ bool changed = false;
+ int begin, end = 0;
+
+ while (match.search(record, &begin, &end)) {
+ changed |= pass->onMatch(record, &match, begin, end);
+ }
+ return changed;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static void multiple_set_matrices(SkRecord* record) {
+ struct {
+ typedef Pattern<Is<SetMatrix>,
+ Greedy<Is<NoOp>>,
+ Is<SetMatrix> >
+ Match;
+
+ bool onMatch(SkRecord* record, Match* pattern, int begin, int end) {
+ record->replace<NoOp>(begin); // first SetMatrix
+ return true;
+ }
+ } pass;
+ while (apply(&pass, record));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if 0 // experimental, but needs knowledge of previous matrix to operate correctly
+static void apply_matrix_to_draw_params(SkRecord* record) {
+ struct {
+ typedef Pattern<Is<SetMatrix>,
+ Greedy<Is<NoOp>>,
+ Is<SetMatrix> >
+ Pattern;
+
+ bool onMatch(SkRecord* record, Pattern* pattern, int begin, int end) {
+ record->replace<NoOp>(begin); // first SetMatrix
+ return true;
+ }
+ } pass;
+ // No need to loop, as we never "open up" opportunities for more of this type of optimization.
+ apply(&pass, record);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+// Turns the logical NoOp Save and Restore in Save-Draw*-Restore patterns into actual NoOps.
+struct SaveOnlyDrawsRestoreNooper {
+ typedef Pattern<Is<Save>,
+ Greedy<Or<Is<NoOp>, IsDraw>>,
+ Is<Restore>>
+ Match;
+
+ bool onMatch(SkRecord* record, Match*, int begin, int end) {
+ record->replace<NoOp>(begin); // Save
+ record->replace<NoOp>(end-1); // Restore
+ return true;
+ }
+};
+
+static bool fold_opacity_layer_color_to_paint(const SkPaint* layerPaint,
+ bool isSaveLayer,
+ SkPaint* paint) {
+ // We assume layerPaint is always from a saveLayer. If isSaveLayer is
+ // true, we assume paint is too.
+
+ // The alpha folding can proceed if the filter layer paint does not have properties which cause
+ // the resulting filter layer to be "blended" in complex ways to the parent layer.
+ // TODO: most likely only some xfer modes are the hard constraints
+ if (!paint->isSrcOver()) {
+ return false;
+ }
+
+ if (!isSaveLayer && paint->getImageFilter()) {
+ // For normal draws, the paint color is used as one input for the color for the draw. Image
+ // filter will operate on the result, and thus we can not change the input.
+ // For layer saves, the image filter is applied to the layer contents. The layer is then
+ // modulated with the paint color, so it's fine to proceed with the fold for saveLayer
+ // paints with image filters.
+ return false;
+ }
+
+ if (paint->getColorFilter()) {
+ // Filter input depends on the paint color.
+
+ // Here we could filter the color if we knew the draw is going to be uniform color. This
+ // should be detectable as drawPath/drawRect/.. without a shader being uniform, while
+ // drawBitmap/drawSprite or a shader being non-uniform. However, current matchers don't
+ // give the type out easily, so just do not optimize that at the moment.
+ return false;
+ }
+
+ if (layerPaint) {
+ const uint32_t layerColor = layerPaint->getColor();
+ // The layer paint color must have only alpha component.
+ if (SK_ColorTRANSPARENT != SkColorSetA(layerColor, SK_AlphaTRANSPARENT)) {
+ return false;
+ }
+
+ // The layer paint can not have any effects.
+ if (layerPaint->getPathEffect() ||
+ layerPaint->getShader() ||
+ !layerPaint->isSrcOver() ||
+ layerPaint->getMaskFilter() ||
+ layerPaint->getColorFilter() ||
+ layerPaint->getImageFilter()) {
+ return false;
+ }
+ paint->setAlpha(SkMulDiv255Round(paint->getAlpha(), SkColorGetA(layerColor)));
+ }
+
+ return true;
+}
+
+// Turns logical no-op Save-[non-drawing command]*-Restore patterns into actual no-ops.
+struct SaveNoDrawsRestoreNooper {
+ // Greedy matches greedily, so we also have to exclude Save and Restore.
+ // Nested SaveLayers need to be excluded, or we'll match their Restore!
+ typedef Pattern<Is<Save>,
+ Greedy<Not<Or<Is<Save>,
+ Is<SaveLayer>,
+ Is<Restore>,
+ IsDraw>>>,
+ Is<Restore>>
+ Match;
+
+ bool onMatch(SkRecord* record, Match*, int begin, int end) {
+ // The entire span between Save and Restore (inclusively) does nothing.
+ for (int i = begin; i < end; i++) {
+ record->replace<NoOp>(i);
+ }
+ return true;
+ }
+};
+void SkRecordNoopSaveRestores(SkRecord* record) {
+ SaveOnlyDrawsRestoreNooper onlyDraws;
+ SaveNoDrawsRestoreNooper noDraws;
+
+ // Run until they stop changing things.
+ while (apply(&onlyDraws, record) || apply(&noDraws, record));
+}
+
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+static bool effectively_srcover(const SkPaint* paint) {
+ if (!paint || paint->isSrcOver()) {
+ return true;
+ }
+ // src-mode with opaque and no effects (which might change opaqueness) is ok too.
+ return !paint->getShader() && !paint->getColorFilter() && !paint->getImageFilter() &&
+ 0xFF == paint->getAlpha() && paint->asBlendMode() == SkBlendMode::kSrc;
+}
+
+// For some SaveLayer-[drawing command]-Restore patterns, merge the SaveLayer's alpha into the
+// draw, and no-op the SaveLayer and Restore.
+struct SaveLayerDrawRestoreNooper {
+ typedef Pattern<Is<SaveLayer>, IsDraw, Is<Restore>> Match;
+
+ bool onMatch(SkRecord* record, Match* match, int begin, int end) {
+ if (match->first<SaveLayer>()->backdrop) {
+ // can't throw away the layer if we have a backdrop
+ return false;
+ }
+
+ // A SaveLayer's bounds field is just a hint, so we should be free to ignore it.
+ SkPaint* layerPaint = match->first<SaveLayer>()->paint;
+ SkPaint* drawPaint = match->second<SkPaint>();
+
+ if (nullptr == layerPaint && effectively_srcover(drawPaint)) {
+ // There wasn't really any point to this SaveLayer at all.
+ return KillSaveLayerAndRestore(record, begin);
+ }
+
+ if (drawPaint == nullptr) {
+ // We can just give the draw the SaveLayer's paint.
+ // TODO(mtklein): figure out how to do this clearly
+ return false;
+ }
+
+ if (!fold_opacity_layer_color_to_paint(layerPaint, false /*isSaveLayer*/, drawPaint)) {
+ return false;
+ }
+
+ return KillSaveLayerAndRestore(record, begin);
+ }
+
+ static bool KillSaveLayerAndRestore(SkRecord* record, int saveLayerIndex) {
+ record->replace<NoOp>(saveLayerIndex); // SaveLayer
+ record->replace<NoOp>(saveLayerIndex+2); // Restore
+ return true;
+ }
+};
+void SkRecordNoopSaveLayerDrawRestores(SkRecord* record) {
+ SaveLayerDrawRestoreNooper pass;
+ apply(&pass, record);
+}
+#endif
+
+/* For SVG generated:
+ SaveLayer (non-opaque, typically for CSS opacity)
+ Save
+ ClipRect
+ SaveLayer (typically for SVG filter)
+ Restore
+ Restore
+ Restore
+*/
+struct SvgOpacityAndFilterLayerMergePass {
+ typedef Pattern<Is<SaveLayer>, Is<Save>, Is<ClipRect>, Is<SaveLayer>,
+ Is<Restore>, Is<Restore>, Is<Restore>> Match;
+
+ bool onMatch(SkRecord* record, Match* match, int begin, int end) {
+ if (match->first<SaveLayer>()->backdrop) {
+ // can't throw away the layer if we have a backdrop
+ return false;
+ }
+
+ SkPaint* opacityPaint = match->first<SaveLayer>()->paint;
+ if (nullptr == opacityPaint) {
+ // There wasn't really any point to this SaveLayer at all.
+ return KillSaveLayerAndRestore(record, begin);
+ }
+
+ // This layer typically contains a filter, but this should work for layers with for other
+ // purposes too.
+ SkPaint* filterLayerPaint = match->fourth<SaveLayer>()->paint;
+ if (filterLayerPaint == nullptr) {
+ // We can just give the inner SaveLayer the paint of the outer SaveLayer.
+ // TODO(mtklein): figure out how to do this clearly
+ return false;
+ }
+
+ if (!fold_opacity_layer_color_to_paint(opacityPaint, true /*isSaveLayer*/,
+ filterLayerPaint)) {
+ return false;
+ }
+
+ return KillSaveLayerAndRestore(record, begin);
+ }
+
+ static bool KillSaveLayerAndRestore(SkRecord* record, int saveLayerIndex) {
+ record->replace<NoOp>(saveLayerIndex); // SaveLayer
+ record->replace<NoOp>(saveLayerIndex + 6); // Restore
+ return true;
+ }
+};
+
+void SkRecordMergeSvgOpacityAndFilterLayers(SkRecord* record) {
+ SvgOpacityAndFilterLayerMergePass pass;
+ apply(&pass, record);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkRecordOptimize(SkRecord* record) {
+ // This might be useful as a first pass in the future if we want to weed
+ // out junk for other optimization passes. Right now, nothing needs it,
+ // and the bounding box hierarchy will do the work of skipping no-op
+ // Save-NoDraw-Restore sequences better than we can here.
+ // As there is a known problem with this peephole and drawAnnotation, disable this.
+ // If we want to enable this we must first fix this bug:
+ // https://bugs.chromium.org/p/skia/issues/detail?id=5548
+// SkRecordNoopSaveRestores(record);
+
+ // Turn off this optimization completely for Android framework
+ // because it makes the following Android CTS test fail:
+ // android.uirendering.cts.testclasses.LayerTests#testSaveLayerClippedWithAlpha
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ SkRecordNoopSaveLayerDrawRestores(record);
+#endif
+ SkRecordMergeSvgOpacityAndFilterLayers(record);
+
+ record->defrag();
+}
+
+void SkRecordOptimize2(SkRecord* record) {
+ multiple_set_matrices(record);
+ SkRecordNoopSaveRestores(record);
+ // See why we turn this off in SkRecordOptimize above.
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ SkRecordNoopSaveLayerDrawRestores(record);
+#endif
+ SkRecordMergeSvgOpacityAndFilterLayers(record);
+
+ record->defrag();
+}
diff --git a/gfx/skia/skia/src/core/SkRecordOpts.h b/gfx/skia/skia/src/core/SkRecordOpts.h
new file mode 100644
index 0000000000..a1e3c245a0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecordOpts.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRecordOpts_DEFINED
+#define SkRecordOpts_DEFINED
+
+#include "src/core/SkRecord.h"
+
+// Run all optimizations in recommended order.
+void SkRecordOptimize(SkRecord*);
+
+// Turns logical no-op Save-[non-drawing command]*-Restore patterns into actual no-ops.
+void SkRecordNoopSaveRestores(SkRecord*);
+
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+// For some SaveLayer-[drawing command]-Restore patterns, merge the SaveLayer's alpha into the
+// draw, and no-op the SaveLayer and Restore.
+void SkRecordNoopSaveLayerDrawRestores(SkRecord*);
+#endif
+
+// For SVG generated SaveLayer-Save-ClipRect-SaveLayer-3xRestore patterns, merge
+// the alpha of the first SaveLayer to the second SaveLayer.
+void SkRecordMergeSvgOpacityAndFilterLayers(SkRecord*);
+
+// Experimental optimizers
+void SkRecordOptimize2(SkRecord*);
+
+#endif//SkRecordOpts_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRecordPattern.h b/gfx/skia/skia/src/core/SkRecordPattern.h
new file mode 100644
index 0000000000..4244fbb3e6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecordPattern.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRecordPattern_DEFINED
+#define SkRecordPattern_DEFINED
+
+#include "include/private/base/SkTLogic.h"
+#include "src/core/SkRecord.h"
+
+namespace SkRecords {
+
+// First, some matchers. These match a single command in the SkRecord,
+// and may hang onto some data from it. If so, you can get the data by calling .get().
+
+// Matches a command of type T, and stores that command.
+template <typename T>
+class Is {
+public:
+ Is() : fPtr(nullptr) {}
+
+ typedef T type;
+ type* get() { return fPtr; }
+
+ bool operator()(T* ptr) {
+ fPtr = ptr;
+ return true;
+ }
+
+ template <typename U>
+ bool operator()(U*) {
+ fPtr = nullptr;
+ return false;
+ }
+
+private:
+ type* fPtr;
+};
+
+// Matches any command that draws, and stores its paint.
+class IsDraw {
+public:
+ IsDraw() : fPaint(nullptr) {}
+
+ typedef SkPaint type;
+ type* get() { return fPaint; }
+
+ template <typename T>
+ std::enable_if_t<(T::kTags & kDrawWithPaint_Tag) == kDrawWithPaint_Tag, bool>
+ operator()(T* draw) {
+ fPaint = AsPtr(draw->paint);
+ return true;
+ }
+
+ template <typename T>
+ std::enable_if_t<(T::kTags & kDrawWithPaint_Tag) == kDraw_Tag, bool> operator()(T* draw) {
+ fPaint = nullptr;
+ return true;
+ }
+
+ template <typename T>
+ std::enable_if_t<!(T::kTags & kDraw_Tag), bool> operator()(T* draw) {
+ fPaint = nullptr;
+ return false;
+ }
+
+private:
+ // Abstracts away whether the paint is always part of the command or optional.
+ template <typename T> static T* AsPtr(SkRecords::Optional<T>& x) { return x; }
+ template <typename T> static T* AsPtr(T& x) { return &x; }
+
+ type* fPaint;
+};
+
+// Matches if Matcher doesn't. Stores nothing.
+template <typename Matcher>
+struct Not {
+ template <typename T>
+ bool operator()(T* ptr) { return !Matcher()(ptr); }
+};
+
+// Matches if any of First or Rest... does. Stores nothing.
+template <typename First, typename... Rest>
+struct Or {
+ template <typename T>
+ bool operator()(T* ptr) { return First()(ptr) || Or<Rest...>()(ptr); }
+};
+template <typename First>
+struct Or<First> {
+ template <typename T>
+ bool operator()(T* ptr) { return First()(ptr); }
+};
+
+
+// Greedy is a special matcher that greedily matches Matcher 0 or more times. Stores nothing.
+template <typename Matcher>
+struct Greedy {
+ template <typename T>
+ bool operator()(T* ptr) { return Matcher()(ptr); }
+};
+
+// Pattern matches each of its matchers in order.
+//
+// This is the main entry point to pattern matching, and so provides a couple of extra API bits:
+// - search scans through the record to look for matches;
+// - first, second, third, ... return the data stored by their respective matchers in the pattern.
+
+template <typename... Matchers> class Pattern;
+
+template <> class Pattern<> {
+public:
+ // Bottoms out recursion. Just return whatever i the front decided on.
+ int match(SkRecord*, int i) { return i; }
+};
+
+template <typename First, typename... Rest>
+class Pattern<First, Rest...> {
+public:
+ // If this pattern matches the SkRecord starting from i,
+ // return the index just past the end of the pattern, otherwise return 0.
+ SK_ALWAYS_INLINE int match(SkRecord* record, int i) {
+ i = this->matchFirst(&fFirst, record, i);
+ return i > 0 ? fRest.match(record, i) : 0;
+ }
+
+ // Starting from *end, walk through the SkRecord to find the first span matching this pattern.
+ // If there is no such span, return false. If there is, return true and set [*begin, *end).
+ SK_ALWAYS_INLINE bool search(SkRecord* record, int* begin, int* end) {
+ for (*begin = *end; *begin < record->count(); ++(*begin)) {
+ *end = this->match(record, *begin);
+ if (*end != 0) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // TODO: some sort of smart get<i>()
+ template <typename T> T* first() { return fFirst.get(); }
+ template <typename T> T* second() { return fRest.template first<T>(); }
+ template <typename T> T* third() { return fRest.template second<T>(); }
+ template <typename T> T* fourth() { return fRest.template third<T>(); }
+
+private:
+ // If first isn't a Greedy, try to match at i once.
+ template <typename T>
+ int matchFirst(T* first, SkRecord* record, int i) {
+ if (i < record->count()) {
+ if (record->mutate(i, *first)) {
+ return i+1;
+ }
+ }
+ return 0;
+ }
+
+ // If first is a Greedy, walk i until it doesn't match.
+ template <typename T>
+ int matchFirst(Greedy<T>* first, SkRecord* record, int i) {
+ while (i < record->count()) {
+ if (!record->mutate(i, *first)) {
+ return i;
+ }
+ i++;
+ }
+ return 0;
+ }
+
+ First fFirst;
+ Pattern<Rest...> fRest;
+};
+
+} // namespace SkRecords
+
+#endif//SkRecordPattern_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRecordedDrawable.cpp b/gfx/skia/skia/src/core/SkRecordedDrawable.cpp
new file mode 100644
index 0000000000..0f24468c55
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecordedDrawable.cpp
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPictureRecorder.h"
+#include "src/core/SkPictureData.h"
+#include "src/core/SkPicturePlayback.h"
+#include "src/core/SkPictureRecord.h"
+#include "src/core/SkRecordDraw.h"
+#include "src/core/SkRecordedDrawable.h"
+
+#if defined(SK_GANESH)
+#include "include/private/chromium/Slug.h"
+#endif
+
+size_t SkRecordedDrawable::onApproximateBytesUsed() {
+ size_t drawablesSize = 0;
+ if (fDrawableList) {
+ for (auto&& drawable : *fDrawableList) {
+ drawablesSize += drawable->approximateBytesUsed();
+ }
+ }
+ return sizeof(*this) +
+ (fRecord ? fRecord->bytesUsed() : 0) +
+ (fBBH ? fBBH->bytesUsed() : 0) +
+ drawablesSize;
+}
+
+void SkRecordedDrawable::onDraw(SkCanvas* canvas) {
+ SkDrawable* const* drawables = nullptr;
+ int drawableCount = 0;
+ if (fDrawableList) {
+ drawables = fDrawableList->begin();
+ drawableCount = fDrawableList->count();
+ }
+ SkRecordDraw(*fRecord, canvas, nullptr, drawables, drawableCount, fBBH.get(), nullptr);
+}
+
+SkPicture* SkRecordedDrawable::onNewPictureSnapshot() {
+ // TODO: should we plumb-down the BBHFactory and recordFlags from our host
+ // PictureRecorder?
+ std::unique_ptr<SkBigPicture::SnapshotArray> pictList{
+ fDrawableList ? fDrawableList->newDrawableSnapshot() : nullptr
+ };
+
+ size_t subPictureBytes = 0;
+ for (int i = 0; pictList && i < pictList->count(); i++) {
+ subPictureBytes += pictList->begin()[i]->approximateBytesUsed();
+ }
+ return new SkBigPicture(fBounds, fRecord, std::move(pictList), fBBH, subPictureBytes);
+}
+
+void SkRecordedDrawable::flatten(SkWriteBuffer& buffer) const {
+ // Write the bounds.
+ buffer.writeRect(fBounds);
+
+ // Create an SkPictureRecord to record the draw commands.
+ SkPictInfo info;
+ SkPictureRecord pictureRecord(SkISize::Make(fBounds.width(), fBounds.height()), 0);
+
+ // If the query contains the whole picture, don't bother with the bounding box hierarchy.
+ SkBBoxHierarchy* bbh;
+ if (pictureRecord.getLocalClipBounds().contains(fBounds)) {
+ bbh = nullptr;
+ } else {
+ bbh = fBBH.get();
+ }
+
+ // Record the draw commands.
+ SkDrawable* const* drawables = fDrawableList ? fDrawableList->begin() : nullptr;
+ int drawableCount = fDrawableList ? fDrawableList->count() : 0;
+ pictureRecord.beginRecording();
+ SkRecordDraw(*fRecord, &pictureRecord, nullptr, drawables, drawableCount, bbh, nullptr);
+ pictureRecord.endRecording();
+
+ // Flatten the recorded commands and drawables.
+ SkPictureData pictureData(pictureRecord, info);
+ pictureData.flatten(buffer);
+}
+
+sk_sp<SkFlattenable> SkRecordedDrawable::CreateProc(SkReadBuffer& buffer) {
+ // Read the bounds.
+ SkRect bounds;
+ buffer.readRect(&bounds);
+
+ // Unflatten into a SkPictureData.
+ SkPictInfo info;
+ info.setVersion(buffer.getVersion());
+ info.fCullRect = bounds;
+ std::unique_ptr<SkPictureData> pictureData(SkPictureData::CreateFromBuffer(buffer, info));
+ if (!pictureData) {
+ return nullptr;
+ }
+
+ // Create a drawable.
+ SkPicturePlayback playback(pictureData.get());
+ SkPictureRecorder recorder;
+ playback.draw(recorder.beginRecording(bounds), nullptr, &buffer);
+ return recorder.finishRecordingAsDrawable();
+}
diff --git a/gfx/skia/skia/src/core/SkRecordedDrawable.h b/gfx/skia/skia/src/core/SkRecordedDrawable.h
new file mode 100644
index 0000000000..15914b4e9e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecordedDrawable.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkRecordedDrawable_DEFINED
+#define SkRecordedDrawable_DEFINED
+
+#include "include/core/SkDrawable.h"
+#include "src/core/SkRecord.h"
+#include "src/core/SkRecorder.h"
+
+class SkRecordedDrawable : public SkDrawable {
+public:
+ SkRecordedDrawable(sk_sp<SkRecord> record, sk_sp<SkBBoxHierarchy> bbh,
+ std::unique_ptr<SkDrawableList> drawableList, const SkRect& bounds)
+ : fRecord(std::move(record))
+ , fBBH(std::move(bbh))
+ , fDrawableList(std::move(drawableList))
+ , fBounds(bounds)
+ {}
+
+ void flatten(SkWriteBuffer& buffer) const override;
+
+protected:
+ SkRect onGetBounds() override { return fBounds; }
+ size_t onApproximateBytesUsed() override;
+
+ void onDraw(SkCanvas* canvas) override;
+
+ SkPicture* onNewPictureSnapshot() override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkRecordedDrawable)
+
+ sk_sp<SkRecord> fRecord;
+ sk_sp<SkBBoxHierarchy> fBBH;
+ std::unique_ptr<SkDrawableList> fDrawableList;
+ const SkRect fBounds;
+};
+#endif // SkRecordedDrawable_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRecorder.cpp b/gfx/skia/skia/src/core/SkRecorder.cpp
new file mode 100644
index 0000000000..b1eebded69
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecorder.cpp
@@ -0,0 +1,423 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkRecorder.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkData.h"
+#include "include/core/SkDrawable.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRSXform.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkString.h"
+#include "include/core/SkSurface.h"
+#include "include/core/SkTextBlob.h"
+#include "include/core/SkVertices.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkBigPicture.h"
+#include "src/core/SkCanvasPriv.h"
+#include "src/core/SkRecord.h"
+#include "src/core/SkRecords.h"
+#include "src/text/GlyphRun.h"
+#include "src/utils/SkPatchUtils.h"
+
+#if defined(SK_GANESH)
+#include "include/private/chromium/Slug.h"
+#endif
+
+#include <cstdint>
+#include <cstring>
+#include <memory>
+#include <new>
+
+class SkBlender;
+class SkMesh;
+class SkPath;
+class SkRRect;
+class SkRegion;
+class SkSurfaceProps;
+enum class SkBlendMode;
+enum class SkClipOp;
+struct SkDrawShadowRec;
+
+using namespace skia_private;
+
+SkDrawableList::~SkDrawableList() {
+ for(SkDrawable* p : fArray) {
+ p->unref();
+ }
+ fArray.reset();
+}
+
+SkBigPicture::SnapshotArray* SkDrawableList::newDrawableSnapshot() {
+ const int count = fArray.size();
+ if (0 == count) {
+ return nullptr;
+ }
+ AutoTMalloc<const SkPicture*> pics(count);
+ for (int i = 0; i < count; ++i) {
+ pics[i] = fArray[i]->newPictureSnapshot();
+ }
+ return new SkBigPicture::SnapshotArray(pics.release(), count);
+}
+
+void SkDrawableList::append(SkDrawable* drawable) {
+ *fArray.append() = SkRef(drawable);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////
+
+static SkIRect safe_picture_bounds(const SkRect& bounds) {
+ SkIRect picBounds = bounds.roundOut();
+ // roundOut() saturates the float edges to +/-SK_MaxS32FitsInFloat (~2billion), but this is
+ // large enough that width/height calculations will overflow, leading to negative dimensions.
+ static constexpr int32_t kSafeEdge = SK_MaxS32FitsInFloat / 2 - 1;
+ static constexpr SkIRect kSafeBounds = {-kSafeEdge, -kSafeEdge, kSafeEdge, kSafeEdge};
+ static_assert((kSafeBounds.fRight - kSafeBounds.fLeft) >= 0 &&
+ (kSafeBounds.fBottom - kSafeBounds.fTop) >= 0);
+ if (!picBounds.intersect(kSafeBounds)) {
+ picBounds.setEmpty();
+ }
+ return picBounds;
+}
+
+SkRecorder::SkRecorder(SkRecord* record, int width, int height)
+ : SkCanvasVirtualEnforcer<SkNoDrawCanvas>(width, height)
+ , fApproxBytesUsedBySubPictures(0)
+ , fRecord(record) {
+ SkASSERT(this->imageInfo().width() >= 0 && this->imageInfo().height() >= 0);
+}
+
+SkRecorder::SkRecorder(SkRecord* record, const SkRect& bounds)
+ : SkCanvasVirtualEnforcer<SkNoDrawCanvas>(safe_picture_bounds(bounds))
+ , fApproxBytesUsedBySubPictures(0)
+ , fRecord(record) {
+ SkASSERT(this->imageInfo().width() >= 0 && this->imageInfo().height() >= 0);
+}
+
+void SkRecorder::reset(SkRecord* record, const SkRect& bounds) {
+ this->forgetRecord();
+ fRecord = record;
+ this->resetCanvas(safe_picture_bounds(bounds));
+ SkASSERT(this->imageInfo().width() >= 0 && this->imageInfo().height() >= 0);
+}
+
+void SkRecorder::forgetRecord() {
+ fDrawableList.reset(nullptr);
+ fApproxBytesUsedBySubPictures = 0;
+ fRecord = nullptr;
+}
+
+// To make appending to fRecord a little less verbose.
+template<typename T, typename... Args>
+void SkRecorder::append(Args&&... args) {
+ new (fRecord->append<T>()) T{std::forward<Args>(args)...};
+}
+
+// For methods which must call back into SkNoDrawCanvas.
+#define INHERITED(method, ...) this->SkNoDrawCanvas::method(__VA_ARGS__)
+
+// Use copy() only for optional arguments, to be copied if present or skipped if not.
+// (For most types we just pass by value and let copy constructors do their thing.)
+template <typename T>
+T* SkRecorder::copy(const T* src) {
+ if (nullptr == src) {
+ return nullptr;
+ }
+ return new (fRecord->alloc<T>()) T(*src);
+}
+
+// This copy() is for arrays.
+// It will work with POD or non-POD, though currently we only use it for POD.
+template <typename T>
+T* SkRecorder::copy(const T src[], size_t count) {
+ if (nullptr == src) {
+ return nullptr;
+ }
+ T* dst = fRecord->alloc<T>(count);
+ for (size_t i = 0; i < count; i++) {
+ new (dst + i) T(src[i]);
+ }
+ return dst;
+}
+
+// Specialization for copying strings, using memcpy.
+// This measured around 2x faster for copying code points,
+// but I found no corresponding speedup for other arrays.
+template <>
+char* SkRecorder::copy(const char src[], size_t count) {
+ if (nullptr == src) {
+ return nullptr;
+ }
+ char* dst = fRecord->alloc<char>(count);
+ memcpy(dst, src, count);
+ return dst;
+}
+
+// As above, assuming and copying a terminating \0.
+template <>
+char* SkRecorder::copy(const char* src) {
+ return this->copy(src, strlen(src)+1);
+}
+
+void SkRecorder::onDrawPaint(const SkPaint& paint) {
+ this->append<SkRecords::DrawPaint>(paint);
+}
+
+void SkRecorder::onDrawBehind(const SkPaint& paint) {
+ this->append<SkRecords::DrawBehind>(paint);
+}
+
+void SkRecorder::onDrawPoints(PointMode mode,
+ size_t count,
+ const SkPoint pts[],
+ const SkPaint& paint) {
+ this->append<SkRecords::DrawPoints>(paint, mode, SkToUInt(count), this->copy(pts, count));
+}
+
+void SkRecorder::onDrawRect(const SkRect& rect, const SkPaint& paint) {
+ this->append<SkRecords::DrawRect>(paint, rect);
+}
+
+void SkRecorder::onDrawRegion(const SkRegion& region, const SkPaint& paint) {
+ this->append<SkRecords::DrawRegion>(paint, region);
+}
+
+void SkRecorder::onDrawOval(const SkRect& oval, const SkPaint& paint) {
+ this->append<SkRecords::DrawOval>(paint, oval);
+}
+
+void SkRecorder::onDrawArc(const SkRect& oval, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint) {
+ this->append<SkRecords::DrawArc>(paint, oval, startAngle, sweepAngle, useCenter);
+}
+
+void SkRecorder::onDrawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ this->append<SkRecords::DrawRRect>(paint, rrect);
+}
+
+void SkRecorder::onDrawDRRect(const SkRRect& outer, const SkRRect& inner, const SkPaint& paint) {
+ this->append<SkRecords::DrawDRRect>(paint, outer, inner);
+}
+
+void SkRecorder::onDrawDrawable(SkDrawable* drawable, const SkMatrix* matrix) {
+ if (!fDrawableList) {
+ fDrawableList = std::make_unique<SkDrawableList>();
+ }
+ fDrawableList->append(drawable);
+ this->append<SkRecords::DrawDrawable>(this->copy(matrix), drawable->getBounds(), fDrawableList->count() - 1);
+}
+
+void SkRecorder::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ this->append<SkRecords::DrawPath>(paint, path);
+}
+
+void SkRecorder::onDrawImage2(const SkImage* image, SkScalar x, SkScalar y,
+ const SkSamplingOptions& sampling, const SkPaint* paint) {
+ this->append<SkRecords::DrawImage>(this->copy(paint), sk_ref_sp(image), x, y, sampling);
+}
+
+void SkRecorder::onDrawImageRect2(const SkImage* image, const SkRect& src, const SkRect& dst,
+ const SkSamplingOptions& sampling, const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ this->append<SkRecords::DrawImageRect>(this->copy(paint), sk_ref_sp(image), src, dst,
+ sampling, constraint);
+}
+
+void SkRecorder::onDrawImageLattice2(const SkImage* image, const Lattice& lattice, const SkRect& dst,
+ SkFilterMode filter, const SkPaint* paint) {
+ int flagCount = lattice.fRectTypes ? (lattice.fXCount + 1) * (lattice.fYCount + 1) : 0;
+ SkASSERT(lattice.fBounds);
+ this->append<SkRecords::DrawImageLattice>(this->copy(paint), sk_ref_sp(image),
+ lattice.fXCount, this->copy(lattice.fXDivs, lattice.fXCount),
+ lattice.fYCount, this->copy(lattice.fYDivs, lattice.fYCount),
+ flagCount, this->copy(lattice.fRectTypes, flagCount),
+ this->copy(lattice.fColors, flagCount), *lattice.fBounds, dst, filter);
+}
+
+void SkRecorder::onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ this->append<SkRecords::DrawTextBlob>(paint, sk_ref_sp(blob), x, y);
+}
+
+#if defined(SK_GANESH)
+void SkRecorder::onDrawSlug(const sktext::gpu::Slug* slug) {
+ this->append<SkRecords::DrawSlug>(sk_ref_sp(slug));
+}
+#endif
+
+void SkRecorder::onDrawGlyphRunList(
+ const sktext::GlyphRunList& glyphRunList, const SkPaint& paint) {
+ sk_sp<SkTextBlob> blob = sk_ref_sp(glyphRunList.blob());
+ if (glyphRunList.blob() == nullptr) {
+ blob = glyphRunList.makeBlob();
+ }
+
+ this->onDrawTextBlob(blob.get(), glyphRunList.origin().x(), glyphRunList.origin().y(), paint);
+}
+
+void SkRecorder::onDrawPicture(const SkPicture* pic, const SkMatrix* matrix, const SkPaint* paint) {
+ fApproxBytesUsedBySubPictures += pic->approximateBytesUsed();
+ this->append<SkRecords::DrawPicture>(this->copy(paint), sk_ref_sp(pic), matrix ? *matrix : SkMatrix::I());
+}
+
+void SkRecorder::onDrawVerticesObject(const SkVertices* vertices, SkBlendMode bmode,
+ const SkPaint& paint) {
+ this->append<SkRecords::DrawVertices>(paint,
+ sk_ref_sp(const_cast<SkVertices*>(vertices)),
+ bmode);
+}
+
+#ifdef SK_ENABLE_SKSL
+void SkRecorder::onDrawMesh(const SkMesh& mesh, sk_sp<SkBlender> blender, const SkPaint& paint) {
+ this->append<SkRecords::DrawMesh>(paint, mesh, std::move(blender));
+}
+#endif
+
+void SkRecorder::onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode bmode,
+ const SkPaint& paint) {
+ this->append<SkRecords::DrawPatch>(paint,
+ cubics ? this->copy(cubics, SkPatchUtils::kNumCtrlPts) : nullptr,
+ colors ? this->copy(colors, SkPatchUtils::kNumCorners) : nullptr,
+ texCoords ? this->copy(texCoords, SkPatchUtils::kNumCorners) : nullptr,
+ bmode);
+}
+
+void SkRecorder::onDrawAtlas2(const SkImage* atlas, const SkRSXform xform[], const SkRect tex[],
+ const SkColor colors[], int count, SkBlendMode mode,
+ const SkSamplingOptions& sampling, const SkRect* cull,
+ const SkPaint* paint) {
+ this->append<SkRecords::DrawAtlas>(this->copy(paint),
+ sk_ref_sp(atlas),
+ this->copy(xform, count),
+ this->copy(tex, count),
+ this->copy(colors, count),
+ count,
+ mode,
+ sampling,
+ this->copy(cull));
+}
+
+void SkRecorder::onDrawShadowRec(const SkPath& path, const SkDrawShadowRec& rec) {
+ this->append<SkRecords::DrawShadowRec>(path, rec);
+}
+
+void SkRecorder::onDrawAnnotation(const SkRect& rect, const char key[], SkData* value) {
+ this->append<SkRecords::DrawAnnotation>(rect, SkString(key), sk_ref_sp(value));
+}
+
+void SkRecorder::onDrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4],
+ QuadAAFlags aa, const SkColor4f& color, SkBlendMode mode) {
+ this->append<SkRecords::DrawEdgeAAQuad>(
+ rect, this->copy(clip, 4), aa, color, mode);
+}
+
+void SkRecorder::onDrawEdgeAAImageSet2(const ImageSetEntry set[], int count,
+ const SkPoint dstClips[], const SkMatrix preViewMatrices[],
+ const SkSamplingOptions& sampling, const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ int totalDstClipCount, totalMatrixCount;
+ SkCanvasPriv::GetDstClipAndMatrixCounts(set, count, &totalDstClipCount, &totalMatrixCount);
+
+ AutoTArray<ImageSetEntry> setCopy(count);
+ for (int i = 0; i < count; ++i) {
+ setCopy[i] = set[i];
+ }
+
+ this->append<SkRecords::DrawEdgeAAImageSet>(this->copy(paint), std::move(setCopy), count,
+ this->copy(dstClips, totalDstClipCount),
+ this->copy(preViewMatrices, totalMatrixCount), sampling, constraint);
+}
+
+void SkRecorder::onFlush() {
+ this->append<SkRecords::Flush>();
+}
+
+void SkRecorder::willSave() {
+ this->append<SkRecords::Save>();
+}
+
+SkCanvas::SaveLayerStrategy SkRecorder::getSaveLayerStrategy(const SaveLayerRec& rec) {
+ this->append<SkRecords::SaveLayer>(this->copy(rec.fBounds)
+ , this->copy(rec.fPaint)
+ , sk_ref_sp(rec.fBackdrop)
+ , rec.fSaveLayerFlags
+ , SkCanvasPriv::GetBackdropScaleFactor(rec));
+ return SkCanvas::kNoLayer_SaveLayerStrategy;
+}
+
+bool SkRecorder::onDoSaveBehind(const SkRect* subset) {
+ this->append<SkRecords::SaveBehind>(this->copy(subset));
+ return false;
+}
+
+void SkRecorder::didRestore() {
+ this->append<SkRecords::Restore>(this->getTotalMatrix());
+}
+
+void SkRecorder::didConcat44(const SkM44& m) {
+ this->append<SkRecords::Concat44>(m);
+}
+
+void SkRecorder::didSetM44(const SkM44& m) {
+ this->append<SkRecords::SetM44>(m);
+}
+
+void SkRecorder::didScale(SkScalar sx, SkScalar sy) {
+ this->append<SkRecords::Scale>(sx, sy);
+}
+
+void SkRecorder::didTranslate(SkScalar dx, SkScalar dy) {
+ this->append<SkRecords::Translate>(dx, dy);
+}
+
+void SkRecorder::onClipRect(const SkRect& rect, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ INHERITED(onClipRect, rect, op, edgeStyle);
+ SkRecords::ClipOpAndAA opAA(op, kSoft_ClipEdgeStyle == edgeStyle);
+ this->append<SkRecords::ClipRect>(rect, opAA);
+}
+
+void SkRecorder::onClipRRect(const SkRRect& rrect, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ INHERITED(onClipRRect, rrect, op, edgeStyle);
+ SkRecords::ClipOpAndAA opAA(op, kSoft_ClipEdgeStyle == edgeStyle);
+ this->append<SkRecords::ClipRRect>(rrect, opAA);
+}
+
+void SkRecorder::onClipPath(const SkPath& path, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ INHERITED(onClipPath, path, op, edgeStyle);
+ SkRecords::ClipOpAndAA opAA(op, kSoft_ClipEdgeStyle == edgeStyle);
+ this->append<SkRecords::ClipPath>(path, opAA);
+}
+
+void SkRecorder::onClipShader(sk_sp<SkShader> cs, SkClipOp op) {
+ INHERITED(onClipShader, cs, op);
+ this->append<SkRecords::ClipShader>(std::move(cs), op);
+}
+
+void SkRecorder::onClipRegion(const SkRegion& deviceRgn, SkClipOp op) {
+ INHERITED(onClipRegion, deviceRgn, op);
+ this->append<SkRecords::ClipRegion>(deviceRgn, op);
+}
+
+void SkRecorder::onResetClip() {
+ INHERITED(onResetClip);
+ this->append<SkRecords::ResetClip>();
+}
+
+sk_sp<SkSurface> SkRecorder::onNewSurface(const SkImageInfo&, const SkSurfaceProps&) {
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkRecorder.h b/gfx/skia/skia/src/core/SkRecorder.h
new file mode 100644
index 0000000000..5ebab84f28
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecorder.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRecorder_DEFINED
+#define SkRecorder_DEFINED
+
+#include "include/core/SkCanvasVirtualEnforcer.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkM44.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkScalar.h"
+#include "include/private/base/SkNoncopyable.h"
+#include "include/private/base/SkTDArray.h"
+#include "include/utils/SkNoDrawCanvas.h"
+#include "src/core/SkBigPicture.h"
+
+#include <cstddef>
+#include <memory>
+#include <utility>
+
+class SkBlender;
+class SkData;
+class SkDrawable;
+class SkImage;
+class SkMatrix;
+class SkMesh;
+class SkPaint;
+class SkPath;
+class SkPicture;
+class SkRRect;
+class SkRecord;
+class SkRegion;
+class SkShader;
+class SkSurface;
+class SkSurfaceProps;
+class SkTextBlob;
+class SkVertices;
+enum class SkBlendMode;
+enum class SkClipOp;
+struct SkDrawShadowRec;
+struct SkImageInfo;
+struct SkPoint;
+struct SkRSXform;
+struct SkRect;
+
+namespace sktext {
+ class GlyphRunList;
+ namespace gpu { class Slug; }
+}
+
+class SkDrawableList : SkNoncopyable {
+public:
+ SkDrawableList() {}
+ ~SkDrawableList();
+
+ int count() const { return fArray.size(); }
+ SkDrawable* const* begin() const { return fArray.begin(); }
+ SkDrawable* const* end() const { return fArray.end(); }
+
+ void append(SkDrawable* drawable);
+
+ // Return a new or ref'd array of pictures that were snapped from our drawables.
+ SkBigPicture::SnapshotArray* newDrawableSnapshot();
+
+private:
+ SkTDArray<SkDrawable*> fArray;
+};
+
+// SkRecorder provides an SkCanvas interface for recording into an SkRecord.
+
+class SkRecorder final : public SkCanvasVirtualEnforcer<SkNoDrawCanvas> {
+public:
+ // Does not take ownership of the SkRecord.
+ SkRecorder(SkRecord*, int width, int height); // TODO: remove
+ SkRecorder(SkRecord*, const SkRect& bounds);
+
+ void reset(SkRecord*, const SkRect& bounds);
+
+ size_t approxBytesUsedBySubPictures() const { return fApproxBytesUsedBySubPictures; }
+
+ SkDrawableList* getDrawableList() const { return fDrawableList.get(); }
+ std::unique_ptr<SkDrawableList> detachDrawableList() { return std::move(fDrawableList); }
+
+ // Make SkRecorder forget entirely about its SkRecord*; all calls to SkRecorder will fail.
+ void forgetRecord();
+
+ void onFlush() override;
+
+ void willSave() override;
+ SaveLayerStrategy getSaveLayerStrategy(const SaveLayerRec&) override;
+ bool onDoSaveBehind(const SkRect*) override;
+ void willRestore() override {}
+ void didRestore() override;
+
+ void didConcat44(const SkM44&) override;
+ void didSetM44(const SkM44&) override;
+ void didScale(SkScalar, SkScalar) override;
+ void didTranslate(SkScalar, SkScalar) override;
+
+ void onDrawDRRect(const SkRRect&, const SkRRect&, const SkPaint&) override;
+ void onDrawDrawable(SkDrawable*, const SkMatrix*) override;
+ void onDrawTextBlob(const SkTextBlob* blob,
+ SkScalar x,
+ SkScalar y,
+ const SkPaint& paint) override;
+#if defined(SK_GANESH)
+ void onDrawSlug(const sktext::gpu::Slug* slug) override;
+#endif
+ void onDrawGlyphRunList(
+ const sktext::GlyphRunList& glyphRunList, const SkPaint& paint) override;
+ void onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode,
+ const SkPaint& paint) override;
+
+ void onDrawPaint(const SkPaint&) override;
+ void onDrawBehind(const SkPaint&) override;
+ void onDrawPoints(PointMode, size_t count, const SkPoint pts[], const SkPaint&) override;
+ void onDrawRect(const SkRect&, const SkPaint&) override;
+ void onDrawRegion(const SkRegion&, const SkPaint&) override;
+ void onDrawOval(const SkRect&, const SkPaint&) override;
+ void onDrawArc(const SkRect&, SkScalar, SkScalar, bool, const SkPaint&) override;
+ void onDrawRRect(const SkRRect&, const SkPaint&) override;
+ void onDrawPath(const SkPath&, const SkPaint&) override;
+
+ void onDrawImage2(const SkImage*, SkScalar, SkScalar, const SkSamplingOptions&,
+ const SkPaint*) override;
+ void onDrawImageRect2(const SkImage*, const SkRect&, const SkRect&, const SkSamplingOptions&,
+ const SkPaint*, SrcRectConstraint) override;
+ void onDrawImageLattice2(const SkImage*, const Lattice&, const SkRect&, SkFilterMode,
+ const SkPaint*) override;
+ void onDrawAtlas2(const SkImage*, const SkRSXform[], const SkRect[], const SkColor[], int,
+ SkBlendMode, const SkSamplingOptions&, const SkRect*, const SkPaint*) override;
+
+ void onDrawVerticesObject(const SkVertices*, SkBlendMode, const SkPaint&) override;
+#ifdef SK_ENABLE_SKSL
+ void onDrawMesh(const SkMesh&, sk_sp<SkBlender>, const SkPaint&) override;
+#endif
+ void onDrawShadowRec(const SkPath&, const SkDrawShadowRec&) override;
+
+ void onClipRect(const SkRect& rect, SkClipOp, ClipEdgeStyle) override;
+ void onClipRRect(const SkRRect& rrect, SkClipOp, ClipEdgeStyle) override;
+ void onClipPath(const SkPath& path, SkClipOp, ClipEdgeStyle) override;
+ void onClipShader(sk_sp<SkShader>, SkClipOp) override;
+ void onClipRegion(const SkRegion& deviceRgn, SkClipOp) override;
+ void onResetClip() override;
+
+ void onDrawPicture(const SkPicture*, const SkMatrix*, const SkPaint*) override;
+
+ void onDrawAnnotation(const SkRect&, const char[], SkData*) override;
+
+ void onDrawEdgeAAQuad(const SkRect&, const SkPoint[4], QuadAAFlags, const SkColor4f&,
+ SkBlendMode) override;
+ void onDrawEdgeAAImageSet2(const ImageSetEntry[], int count, const SkPoint[], const SkMatrix[],
+ const SkSamplingOptions&, const SkPaint*,
+ SrcRectConstraint) override;
+
+ sk_sp<SkSurface> onNewSurface(const SkImageInfo&, const SkSurfaceProps&) override;
+
+private:
+ template <typename T>
+ T* copy(const T*);
+
+ template <typename T>
+ T* copy(const T[], size_t count);
+
+ template<typename T, typename... Args>
+ void append(Args&&...);
+
+ size_t fApproxBytesUsedBySubPictures;
+ SkRecord* fRecord;
+ std::unique_ptr<SkDrawableList> fDrawableList;
+};
+
+#endif//SkRecorder_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRecords.cpp b/gfx/skia/skia/src/core/SkRecords.cpp
new file mode 100644
index 0000000000..29eb939ef6
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecords.cpp
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkRecords.h"
+
+namespace SkRecords {
+ PreCachedPath::PreCachedPath(const SkPath& path) : SkPath(path) {
+ this->updateBoundsCache();
+ (void)this->getGenerationID();
+#if 0 // Disabled to see if we ever really race on this. It costs time, chromium:496982.
+ SkPathPriv::FirstDirection junk;
+ (void)SkPathPriv::CheapComputeFirstDirection(*this, &junk);
+#endif
+ }
+
+ TypedMatrix::TypedMatrix(const SkMatrix& matrix) : SkMatrix(matrix) {
+ (void)this->getType();
+ }
+} // namespace SkRecords
diff --git a/gfx/skia/skia/src/core/SkRecords.h b/gfx/skia/skia/src/core/SkRecords.h
new file mode 100644
index 0000000000..4234077ea8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRecords.h
@@ -0,0 +1,362 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRecords_DEFINED
+#define SkRecords_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkData.h"
+#include "include/core/SkDrawable.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkM44.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkMesh.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkRSXform.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRegion.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTextBlob.h"
+#include "include/core/SkVertices.h"
+#include "src/core/SkDrawShadowInfo.h"
+
+#if defined(SK_GANESH)
+#include "include/private/chromium/Slug.h"
+#endif
+
+namespace SkRecords {
+
+// A list of all the types of canvas calls we can record.
+// Each of these is reified into a struct below.
+//
+// (We're using the macro-of-macro trick here to do several different things with the same list.)
+//
+// We leave this SK_RECORD_TYPES macro defined for use by code that wants to operate on SkRecords
+// types polymorphically. (See SkRecord::Record::{visit,mutate} for an example.)
+//
+// Order doesn't technically matter here, but the compiler can generally generate better code if
+// you keep them semantically grouped, especially the Draws. It's also nice to leave NoOp at 0.
+#define SK_RECORD_TYPES(M) \
+ M(NoOp) \
+ M(Flush) \
+ M(Restore) \
+ M(Save) \
+ M(SaveLayer) \
+ M(SaveBehind) \
+ M(SetMatrix) \
+ M(SetM44) \
+ M(Translate) \
+ M(Scale) \
+ M(Concat) \
+ M(Concat44) \
+ M(ClipPath) \
+ M(ClipRRect) \
+ M(ClipRect) \
+ M(ClipRegion) \
+ M(ClipShader) \
+ M(ResetClip) \
+ M(DrawArc) \
+ M(DrawDrawable) \
+ M(DrawImage) \
+ M(DrawImageLattice) \
+ M(DrawImageRect) \
+ M(DrawDRRect) \
+ M(DrawOval) \
+ M(DrawBehind) \
+ M(DrawPaint) \
+ M(DrawPath) \
+ M(DrawPatch) \
+ M(DrawPicture) \
+ M(DrawPoints) \
+ M(DrawRRect) \
+ M(DrawRect) \
+ M(DrawRegion) \
+ M(DrawTextBlob) \
+ M(DrawSlug) \
+ M(DrawAtlas) \
+ M(DrawVertices) \
+ M(DrawMesh) \
+ M(DrawShadowRec) \
+ M(DrawAnnotation) \
+ M(DrawEdgeAAQuad) \
+ M(DrawEdgeAAImageSet)
+
+
+// Defines SkRecords::Type, an enum of all record types.
+#define ENUM(T) T##_Type,
+enum Type { SK_RECORD_TYPES(ENUM) };
+#undef ENUM
+
+#define ACT_AS_PTR(ptr) \
+ operator T*() const { return ptr; } \
+ T* operator->() const { return ptr; }
+
+// An Optional doesn't own the pointer's memory, but may need to destroy non-POD data.
+template <typename T>
+class Optional {
+public:
+ Optional() : fPtr(nullptr) {}
+ Optional(T* ptr) : fPtr(ptr) {}
+ Optional(Optional&& o) : fPtr(o.fPtr) {
+ o.fPtr = nullptr;
+ }
+ ~Optional() { if (fPtr) fPtr->~T(); }
+
+ ACT_AS_PTR(fPtr)
+private:
+ T* fPtr;
+ Optional(const Optional&) = delete;
+ Optional& operator=(const Optional&) = delete;
+};
+
+// PODArray doesn't own the pointer's memory, and we assume the data is POD.
+template <typename T>
+class PODArray {
+public:
+ PODArray() {}
+ PODArray(T* ptr) : fPtr(ptr) {}
+ // Default copy and assign.
+
+ ACT_AS_PTR(fPtr)
+private:
+ T* fPtr;
+};
+
+#undef ACT_AS_PTR
+
+// SkPath::getBounds() isn't thread safe unless we precache the bounds in a singlethreaded context.
+// SkPath::cheapComputeDirection() is similar.
+// Recording is a convenient time to cache these, or we can delay it to between record and playback.
+struct PreCachedPath : public SkPath {
+ PreCachedPath() {}
+ PreCachedPath(const SkPath& path);
+};
+
+// Like SkPath::getBounds(), SkMatrix::getType() isn't thread safe unless we precache it.
+// This may not cover all SkMatrices used by the picture (e.g. some could be hiding in a shader).
+struct TypedMatrix : public SkMatrix {
+ TypedMatrix() {}
+ TypedMatrix(const SkMatrix& matrix);
+};
+
+enum Tags {
+ kDraw_Tag = 1, // May draw something (usually named DrawFoo).
+ kHasImage_Tag = 2, // Contains an SkImage or SkBitmap.
+ kHasText_Tag = 4, // Contains text.
+ kHasPaint_Tag = 8, // May have an SkPaint field, at least optionally.
+
+ kDrawWithPaint_Tag = kDraw_Tag | kHasPaint_Tag,
+};
+
+// A macro to make it a little easier to define a struct that can be stored in SkRecord.
+#define RECORD(T, tags, ...) \
+struct T { \
+ static const Type kType = T##_Type; \
+ static const int kTags = tags; \
+ __VA_ARGS__; \
+};
+
+RECORD(NoOp, 0)
+RECORD(Flush, 0)
+RECORD(Restore, 0,
+ TypedMatrix matrix)
+RECORD(Save, 0)
+
+RECORD(SaveLayer, kHasPaint_Tag,
+ Optional<SkRect> bounds;
+ Optional<SkPaint> paint;
+ sk_sp<const SkImageFilter> backdrop;
+ SkCanvas::SaveLayerFlags saveLayerFlags;
+ SkScalar backdropScale)
+
+RECORD(SaveBehind, 0,
+ Optional<SkRect> subset)
+
+RECORD(SetMatrix, 0,
+ TypedMatrix matrix)
+RECORD(SetM44, 0,
+ SkM44 matrix)
+RECORD(Concat, 0,
+ TypedMatrix matrix)
+RECORD(Concat44, 0,
+ SkM44 matrix)
+
+RECORD(Translate, 0,
+ SkScalar dx;
+ SkScalar dy)
+
+RECORD(Scale, 0,
+ SkScalar sx;
+ SkScalar sy)
+
+struct ClipOpAndAA {
+ ClipOpAndAA() {}
+ ClipOpAndAA(SkClipOp op, bool aa) : fOp(static_cast<unsigned>(op)), fAA(aa) {}
+
+ SkClipOp op() const { return static_cast<SkClipOp>(fOp); }
+ bool aa() const { return fAA != 0; }
+
+private:
+ unsigned fOp : 31; // This really only needs to be 3, but there's no win today to do so.
+ unsigned fAA : 1; // MSVC won't pack an enum with an bool, so we call this an unsigned.
+};
+static_assert(sizeof(ClipOpAndAA) == 4, "ClipOpAndAASize");
+
+RECORD(ClipPath, 0,
+ PreCachedPath path;
+ ClipOpAndAA opAA)
+RECORD(ClipRRect, 0,
+ SkRRect rrect;
+ ClipOpAndAA opAA)
+RECORD(ClipRect, 0,
+ SkRect rect;
+ ClipOpAndAA opAA)
+RECORD(ClipRegion, 0,
+ SkRegion region;
+ SkClipOp op)
+RECORD(ClipShader, 0,
+ sk_sp<SkShader> shader;
+ SkClipOp op)
+RECORD(ResetClip, 0)
+
+// While not strictly required, if you have an SkPaint, it's fastest to put it first.
+RECORD(DrawArc, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkRect oval;
+ SkScalar startAngle;
+ SkScalar sweepAngle;
+ unsigned useCenter)
+RECORD(DrawDRRect, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkRRect outer;
+ SkRRect inner)
+RECORD(DrawDrawable, kDraw_Tag,
+ Optional<SkMatrix> matrix;
+ SkRect worstCaseBounds;
+ int32_t index)
+RECORD(DrawImage, kDraw_Tag|kHasImage_Tag|kHasPaint_Tag,
+ Optional<SkPaint> paint;
+ sk_sp<const SkImage> image;
+ SkScalar left;
+ SkScalar top;
+ SkSamplingOptions sampling)
+RECORD(DrawImageLattice, kDraw_Tag|kHasImage_Tag|kHasPaint_Tag,
+ Optional<SkPaint> paint;
+ sk_sp<const SkImage> image;
+ int xCount;
+ PODArray<int> xDivs;
+ int yCount;
+ PODArray<int> yDivs;
+ int flagCount;
+ PODArray<SkCanvas::Lattice::RectType> flags;
+ PODArray<SkColor> colors;
+ SkIRect src;
+ SkRect dst;
+ SkFilterMode filter)
+RECORD(DrawImageRect, kDraw_Tag|kHasImage_Tag|kHasPaint_Tag,
+ Optional<SkPaint> paint;
+ sk_sp<const SkImage> image;
+ SkRect src;
+ SkRect dst;
+ SkSamplingOptions sampling;
+ SkCanvas::SrcRectConstraint constraint)
+RECORD(DrawOval, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkRect oval)
+RECORD(DrawPaint, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint)
+RECORD(DrawBehind, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint)
+RECORD(DrawPath, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ PreCachedPath path)
+RECORD(DrawPicture, kDraw_Tag|kHasPaint_Tag,
+ Optional<SkPaint> paint;
+ sk_sp<const SkPicture> picture;
+ TypedMatrix matrix)
+RECORD(DrawPoints, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkCanvas::PointMode mode;
+ unsigned count;
+ PODArray<SkPoint> pts)
+RECORD(DrawRRect, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkRRect rrect)
+RECORD(DrawRect, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkRect rect)
+RECORD(DrawRegion, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkRegion region)
+RECORD(DrawTextBlob, kDraw_Tag|kHasText_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ sk_sp<const SkTextBlob> blob;
+ SkScalar x;
+ SkScalar y)
+#if defined(SK_GANESH)
+RECORD(DrawSlug, kDraw_Tag|kHasText_Tag,
+ sk_sp<const sktext::gpu::Slug> slug)
+#else
+RECORD(DrawSlug, 0)
+#endif
+RECORD(DrawPatch, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ PODArray<SkPoint> cubics;
+ PODArray<SkColor> colors;
+ PODArray<SkPoint> texCoords;
+ SkBlendMode bmode)
+RECORD(DrawAtlas, kDraw_Tag|kHasImage_Tag|kHasPaint_Tag,
+ Optional<SkPaint> paint;
+ sk_sp<const SkImage> atlas;
+ PODArray<SkRSXform> xforms;
+ PODArray<SkRect> texs;
+ PODArray<SkColor> colors;
+ int count;
+ SkBlendMode mode;
+ SkSamplingOptions sampling;
+ Optional<SkRect> cull)
+RECORD(DrawVertices, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ sk_sp<SkVertices> vertices;
+ SkBlendMode bmode)
+#ifdef SK_ENABLE_SKSL
+RECORD(DrawMesh, kDraw_Tag|kHasPaint_Tag,
+ SkPaint paint;
+ SkMesh mesh;
+ sk_sp<SkBlender> blender)
+#else
+RECORD(DrawMesh, 0)
+#endif
+RECORD(DrawShadowRec, kDraw_Tag,
+ PreCachedPath path;
+ SkDrawShadowRec rec)
+RECORD(DrawAnnotation, 0, // TODO: kDraw_Tag, skia:5548
+ SkRect rect;
+ SkString key;
+ sk_sp<SkData> value)
+RECORD(DrawEdgeAAQuad, kDraw_Tag,
+ SkRect rect;
+ PODArray<SkPoint> clip;
+ SkCanvas::QuadAAFlags aa;
+ SkColor4f color;
+ SkBlendMode mode)
+RECORD(DrawEdgeAAImageSet, kDraw_Tag|kHasImage_Tag|kHasPaint_Tag,
+ Optional<SkPaint> paint;
+ skia_private::AutoTArray<SkCanvas::ImageSetEntry> set;
+ int count;
+ PODArray<SkPoint> dstClips;
+ PODArray<SkMatrix> preViewMatrices;
+ SkSamplingOptions sampling;
+ SkCanvas::SrcRectConstraint constraint)
+#undef RECORD
+
+} // namespace SkRecords
+
+#endif//SkRecords_DEFINED
diff --git a/gfx/skia/skia/src/core/SkRect.cpp b/gfx/skia/skia/src/core/SkRect.cpp
new file mode 100644
index 0000000000..254aab27ce
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRect.cpp
@@ -0,0 +1,309 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkRect.h"
+
+#include "include/core/SkM44.h"
+#include "include/private/base/SkDebug.h"
+#include "src/core/SkRectPriv.h"
+
+class SkMatrix;
+
+bool SkIRect::intersect(const SkIRect& a, const SkIRect& b) {
+ SkIRect tmp = {
+ std::max(a.fLeft, b.fLeft),
+ std::max(a.fTop, b.fTop),
+ std::min(a.fRight, b.fRight),
+ std::min(a.fBottom, b.fBottom)
+ };
+ if (tmp.isEmpty()) {
+ return false;
+ }
+ *this = tmp;
+ return true;
+}
+
+void SkIRect::join(const SkIRect& r) {
+ // do nothing if the params are empty
+ if (r.fLeft >= r.fRight || r.fTop >= r.fBottom) {
+ return;
+ }
+
+ // if we are empty, just assign
+ if (fLeft >= fRight || fTop >= fBottom) {
+ *this = r;
+ } else {
+ if (r.fLeft < fLeft) fLeft = r.fLeft;
+ if (r.fTop < fTop) fTop = r.fTop;
+ if (r.fRight > fRight) fRight = r.fRight;
+ if (r.fBottom > fBottom) fBottom = r.fBottom;
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+
+void SkRect::toQuad(SkPoint quad[4]) const {
+ SkASSERT(quad);
+
+ quad[0].set(fLeft, fTop);
+ quad[1].set(fRight, fTop);
+ quad[2].set(fRight, fBottom);
+ quad[3].set(fLeft, fBottom);
+}
+
+#include "src/base/SkVx.h"
+
+bool SkRect::setBoundsCheck(const SkPoint pts[], int count) {
+ SkASSERT((pts && count > 0) || count == 0);
+
+ if (count <= 0) {
+ this->setEmpty();
+ return true;
+ }
+
+ skvx::float4 min, max;
+ if (count & 1) {
+ min = max = skvx::float2::Load(pts).xyxy();
+ pts += 1;
+ count -= 1;
+ } else {
+ min = max = skvx::float4::Load(pts);
+ pts += 2;
+ count -= 2;
+ }
+
+ skvx::float4 accum = min * 0;
+ while (count) {
+ skvx::float4 xy = skvx::float4::Load(pts);
+ accum = accum * xy;
+ min = skvx::min(min, xy);
+ max = skvx::max(max, xy);
+ pts += 2;
+ count -= 2;
+ }
+
+ const bool all_finite = all(accum * 0 == 0);
+ if (all_finite) {
+ this->setLTRB(std::min(min[0], min[2]), std::min(min[1], min[3]),
+ std::max(max[0], max[2]), std::max(max[1], max[3]));
+ } else {
+ this->setEmpty();
+ }
+ return all_finite;
+}
+
+void SkRect::setBoundsNoCheck(const SkPoint pts[], int count) {
+ if (!this->setBoundsCheck(pts, count)) {
+ this->setLTRB(SK_ScalarNaN, SK_ScalarNaN, SK_ScalarNaN, SK_ScalarNaN);
+ }
+}
+
+#define CHECK_INTERSECT(al, at, ar, ab, bl, bt, br, bb) \
+ SkScalar L = std::max(al, bl); \
+ SkScalar R = std::min(ar, br); \
+ SkScalar T = std::max(at, bt); \
+ SkScalar B = std::min(ab, bb); \
+ do { if (!(L < R && T < B)) return false; } while (0)
+ // do the !(opposite) check so we return false if either arg is NaN
+
+bool SkRect::intersect(const SkRect& r) {
+ CHECK_INTERSECT(r.fLeft, r.fTop, r.fRight, r.fBottom, fLeft, fTop, fRight, fBottom);
+ this->setLTRB(L, T, R, B);
+ return true;
+}
+
+bool SkRect::intersect(const SkRect& a, const SkRect& b) {
+ CHECK_INTERSECT(a.fLeft, a.fTop, a.fRight, a.fBottom, b.fLeft, b.fTop, b.fRight, b.fBottom);
+ this->setLTRB(L, T, R, B);
+ return true;
+}
+
+void SkRect::join(const SkRect& r) {
+ if (r.isEmpty()) {
+ return;
+ }
+
+ if (this->isEmpty()) {
+ *this = r;
+ } else {
+ fLeft = std::min(fLeft, r.fLeft);
+ fTop = std::min(fTop, r.fTop);
+ fRight = std::max(fRight, r.fRight);
+ fBottom = std::max(fBottom, r.fBottom);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "include/core/SkString.h"
+#include "src/core/SkStringUtils.h"
+
+static const char* set_scalar(SkString* storage, SkScalar value, SkScalarAsStringType asType) {
+ storage->reset();
+ SkAppendScalar(storage, value, asType);
+ return storage->c_str();
+}
+
+void SkRect::dump(bool asHex) const {
+ SkScalarAsStringType asType = asHex ? kHex_SkScalarAsStringType : kDec_SkScalarAsStringType;
+
+ SkString line;
+ if (asHex) {
+ SkString tmp;
+ line.printf( "SkRect::MakeLTRB(%s, /* %f */\n", set_scalar(&tmp, fLeft, asType), fLeft);
+ line.appendf(" %s, /* %f */\n", set_scalar(&tmp, fTop, asType), fTop);
+ line.appendf(" %s, /* %f */\n", set_scalar(&tmp, fRight, asType), fRight);
+ line.appendf(" %s /* %f */);", set_scalar(&tmp, fBottom, asType), fBottom);
+ } else {
+ SkString strL, strT, strR, strB;
+ SkAppendScalarDec(&strL, fLeft);
+ SkAppendScalarDec(&strT, fTop);
+ SkAppendScalarDec(&strR, fRight);
+ SkAppendScalarDec(&strB, fBottom);
+ line.printf("SkRect::MakeLTRB(%s, %s, %s, %s);",
+ strL.c_str(), strT.c_str(), strR.c_str(), strB.c_str());
+ }
+ SkDebugf("%s\n", line.c_str());
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+template<typename R>
+static bool subtract(const R& a, const R& b, R* out) {
+ if (a.isEmpty() || b.isEmpty() || !R::Intersects(a, b)) {
+ // Either already empty, or subtracting the empty rect, or there's no intersection, so
+ // in all cases the answer is A.
+ *out = a;
+ return true;
+ }
+
+ // 4 rectangles to consider. If the edge in A is contained in B, the resulting difference can
+ // be represented exactly as a rectangle. Otherwise the difference is the largest subrectangle
+ // that is disjoint from B:
+ // 1. Left part of A: (A.left, A.top, B.left, A.bottom)
+ // 2. Right part of A: (B.right, A.top, A.right, A.bottom)
+ // 3. Top part of A: (A.left, A.top, A.right, B.top)
+ // 4. Bottom part of A: (A.left, B.bottom, A.right, A.bottom)
+ //
+ // Depending on how B intersects A, there will be 1 to 4 positive areas:
+ // - 4 occur when A contains B
+ // - 3 occur when B intersects a single edge
+ // - 2 occur when B intersects at a corner, or spans two opposing edges
+ // - 1 occurs when B spans two opposing edges and contains a 3rd, resulting in an exact rect
+ // - 0 occurs when B contains A, resulting in the empty rect
+ //
+ // Compute the relative areas of the 4 rects described above. Since each subrectangle shares
+ // either the width or height of A, we only have to divide by the other dimension, which avoids
+ // overflow on int32 types, and even if the float relative areas overflow to infinity, the
+ // comparisons work out correctly and (one of) the infinitely large subrects will be chosen.
+ float aHeight = (float) a.height();
+ float aWidth = (float) a.width();
+ float leftArea = 0.f, rightArea = 0.f, topArea = 0.f, bottomArea = 0.f;
+ int positiveCount = 0;
+ if (b.fLeft > a.fLeft) {
+ leftArea = (b.fLeft - a.fLeft) / aWidth;
+ positiveCount++;
+ }
+ if (a.fRight > b.fRight) {
+ rightArea = (a.fRight - b.fRight) / aWidth;
+ positiveCount++;
+ }
+ if (b.fTop > a.fTop) {
+ topArea = (b.fTop - a.fTop) / aHeight;
+ positiveCount++;
+ }
+ if (a.fBottom > b.fBottom) {
+ bottomArea = (a.fBottom - b.fBottom) / aHeight;
+ positiveCount++;
+ }
+
+ if (positiveCount == 0) {
+ SkASSERT(b.contains(a));
+ *out = R::MakeEmpty();
+ return true;
+ }
+
+ *out = a;
+ if (leftArea > rightArea && leftArea > topArea && leftArea > bottomArea) {
+ // Left chunk of A, so the new right edge is B's left edge
+ out->fRight = b.fLeft;
+ } else if (rightArea > topArea && rightArea > bottomArea) {
+ // Right chunk of A, so the new left edge is B's right edge
+ out->fLeft = b.fRight;
+ } else if (topArea > bottomArea) {
+ // Top chunk of A, so the new bottom edge is B's top edge
+ out->fBottom = b.fTop;
+ } else {
+ // Bottom chunk of A, so the new top edge is B's bottom edge
+ SkASSERT(bottomArea > 0.f);
+ out->fTop = b.fBottom;
+ }
+
+ // If we have 1 valid area, the disjoint shape is representable as a rectangle.
+ SkASSERT(!R::Intersects(*out, b));
+ return positiveCount == 1;
+}
+
+bool SkRectPriv::Subtract(const SkRect& a, const SkRect& b, SkRect* out) {
+ return subtract<SkRect>(a, b, out);
+}
+
+bool SkRectPriv::Subtract(const SkIRect& a, const SkIRect& b, SkIRect* out) {
+ return subtract<SkIRect>(a, b, out);
+}
+
+
+bool SkRectPriv::QuadContainsRect(const SkMatrix& m, const SkIRect& a, const SkIRect& b) {
+ return QuadContainsRect(SkM44(m), SkRect::Make(a), SkRect::Make(b));
+}
+
+bool SkRectPriv::QuadContainsRect(const SkM44& m, const SkRect& a, const SkRect& b) {
+ SkDEBUGCODE(SkM44 inverse;)
+ SkASSERT(m.invert(&inverse));
+ // With empty rectangles, the calculated edges could give surprising results. If 'a' were not
+ // sorted, its normals would point outside the sorted rectangle, so lots of potential rects
+ // would be seen as "contained". If 'a' is all 0s, its edge equations are also (0,0,0) so every
+ // point has a distance of 0, and would be interpreted as inside.
+ if (a.isEmpty()) {
+ return false;
+ }
+ // However, 'b' is only used to define its 4 corners to check against the transformed edges.
+ // This is valid regardless of b's emptiness or sortedness.
+
+ // Calculate the 4 homogenous coordinates of 'a' transformed by 'm' where Z=0 and W=1.
+ auto ax = skvx::float4{a.fLeft, a.fRight, a.fRight, a.fLeft};
+ auto ay = skvx::float4{a.fTop, a.fTop, a.fBottom, a.fBottom};
+
+ auto max = m.rc(0,0)*ax + m.rc(0,1)*ay + m.rc(0,3);
+ auto may = m.rc(1,0)*ax + m.rc(1,1)*ay + m.rc(1,3);
+ auto maw = m.rc(3,0)*ax + m.rc(3,1)*ay + m.rc(3,3);
+
+ if (all(maw < 0.f)) {
+ // If all points of A are mapped to w < 0, then the edge equations end up representing the
+ // convex hull of projected points when A should in fact be considered empty.
+ return false;
+ }
+
+ // Cross product of adjacent vertices provides homogenous lines for the 4 sides of the quad
+ auto lA = may*skvx::shuffle<1,2,3,0>(maw) - maw*skvx::shuffle<1,2,3,0>(may);
+ auto lB = maw*skvx::shuffle<1,2,3,0>(max) - max*skvx::shuffle<1,2,3,0>(maw);
+ auto lC = max*skvx::shuffle<1,2,3,0>(may) - may*skvx::shuffle<1,2,3,0>(max);
+
+ // Before transforming, the corners of 'a' were in CW order, but afterwards they may become CCW,
+ // so the sign corrects the direction of the edge normals to point inwards.
+ float sign = (lA[0]*lB[1] - lB[0]*lA[1]) < 0 ? -1.f : 1.f;
+
+ // Calculate distance from 'b' to each edge. Since 'b' has presumably been transformed by 'm'
+ // *and* projected, this assumes W = 1.
+ auto d0 = sign * (lA*b.fLeft + lB*b.fTop + lC);
+ auto d1 = sign * (lA*b.fRight + lB*b.fTop + lC);
+ auto d2 = sign * (lA*b.fRight + lB*b.fBottom + lC);
+ auto d3 = sign * (lA*b.fLeft + lB*b.fBottom + lC);
+
+ // 'b' is contained in the mapped rectangle if all distances are >= 0
+ return all((d0 >= 0.f) & (d1 >= 0.f) & (d2 >= 0.f) & (d3 >= 0.f));
+}
diff --git a/gfx/skia/skia/src/core/SkRectPriv.h b/gfx/skia/skia/src/core/SkRectPriv.h
new file mode 100644
index 0000000000..d4ac12461f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRectPriv.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRectPriv_DEFINED
+#define SkRectPriv_DEFINED
+
+#include "include/core/SkRect.h"
+#include "src/base/SkMathPriv.h"
+
+class SkM44;
+class SkMatrix;
+
+class SkRectPriv {
+public:
+ // Returns an irect that is very large, and can be safely round-trip with SkRect and still
+ // be considered non-empty (i.e. width/height > 0) even if we round-out the SkRect.
+ static SkIRect MakeILarge() {
+ // SK_MaxS32 >> 1 seemed better, but it did not survive round-trip with SkRect and rounding.
+ // Also, 1 << 29 can be perfectly represented in float, while SK_MaxS32 >> 1 cannot.
+ const int32_t large = 1 << 29;
+ return { -large, -large, large, large };
+ }
+
+ static SkIRect MakeILargestInverted() {
+ return { SK_MaxS32, SK_MaxS32, SK_MinS32, SK_MinS32 };
+ }
+
+ static SkRect MakeLargeS32() {
+ SkRect r;
+ r.set(MakeILarge());
+ return r;
+ }
+
+ static SkRect MakeLargest() {
+ return { SK_ScalarMin, SK_ScalarMin, SK_ScalarMax, SK_ScalarMax };
+ }
+
+ static constexpr SkRect MakeLargestInverted() {
+ return { SK_ScalarMax, SK_ScalarMax, SK_ScalarMin, SK_ScalarMin };
+ }
+
+ static void GrowToInclude(SkRect* r, const SkPoint& pt) {
+ r->fLeft = std::min(pt.fX, r->fLeft);
+ r->fRight = std::max(pt.fX, r->fRight);
+ r->fTop = std::min(pt.fY, r->fTop);
+ r->fBottom = std::max(pt.fY, r->fBottom);
+ }
+
+ // Conservative check if r can be expressed in fixed-point.
+ // Will return false for very large values that might have fit
+ static bool FitsInFixed(const SkRect& r) {
+ return SkFitsInFixed(r.fLeft) && SkFitsInFixed(r.fTop) &&
+ SkFitsInFixed(r.fRight) && SkFitsInFixed(r.fBottom);
+ }
+
+ static bool Is16Bit(const SkIRect& r) {
+ return SkTFitsIn<int16_t>(r.fLeft) && SkTFitsIn<int16_t>(r.fTop) &&
+ SkTFitsIn<int16_t>(r.fRight) && SkTFitsIn<int16_t>(r.fBottom);
+ }
+
+ // Returns r.width()/2 but divides first to avoid width() overflowing.
+ static SkScalar HalfWidth(const SkRect& r) {
+ return SkScalarHalf(r.fRight) - SkScalarHalf(r.fLeft);
+ }
+ // Returns r.height()/2 but divides first to avoid height() overflowing.
+ static SkScalar HalfHeight(const SkRect& r) {
+ return SkScalarHalf(r.fBottom) - SkScalarHalf(r.fTop);
+ }
+
+ // Evaluate A-B. If the difference shape cannot be represented as a rectangle then false is
+ // returned and 'out' is set to the largest rectangle contained in said shape. If true is
+ // returned then A-B is representable as a rectangle, which is stored in 'out'.
+ static bool Subtract(const SkRect& a, const SkRect& b, SkRect* out);
+ static bool Subtract(const SkIRect& a, const SkIRect& b, SkIRect* out);
+
+ // Evaluate A-B, and return the largest rectangle contained in that shape (since the difference
+ // may not be representable as rectangle). The returned rectangle will not intersect B.
+ static SkRect Subtract(const SkRect& a, const SkRect& b) {
+ SkRect diff;
+ Subtract(a, b, &diff);
+ return diff;
+ }
+ static SkIRect Subtract(const SkIRect& a, const SkIRect& b) {
+ SkIRect diff;
+ Subtract(a, b, &diff);
+ return diff;
+ }
+
+ // Returns true if the quadrilateral formed by transforming the four corners of 'a' contains 'b'
+ static bool QuadContainsRect(const SkMatrix& m, const SkIRect& a, const SkIRect& b);
+ static bool QuadContainsRect(const SkM44& m, const SkRect& a, const SkRect& b);
+};
+
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRegion.cpp b/gfx/skia/skia/src/core/SkRegion.cpp
new file mode 100644
index 0000000000..780a71c9ba
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRegion.cpp
@@ -0,0 +1,1584 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkRegion.h"
+
+#include "include/private/base/SkMacros.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkSafeMath.h"
+#include "src/core/SkRegionPriv.h"
+
+#include <algorithm>
+#include <utility>
+
+using namespace skia_private;
+
+/* Region Layout
+ *
+ * TOP
+ *
+ * [ Bottom, X-Intervals, [Left, Right]..., X-Sentinel ]
+ * ...
+ *
+ * Y-Sentinel
+ */
+
+/////////////////////////////////////////////////////////////////////////////////////////////////
+
+#define SkRegion_gEmptyRunHeadPtr ((SkRegionPriv::RunHead*)-1)
+#define SkRegion_gRectRunHeadPtr nullptr
+
+constexpr int kRunArrayStackCount = 256;
+
+// This is a simple data structure which is like a SkSTArray<N,T,true>, except that:
+// - It does not initialize memory.
+// - It does not distinguish between reserved space and initialized space.
+// - resizeToAtLeast() instead of resize()
+// - Uses sk_realloc_throw()
+// - Can never be made smaller.
+// Measurement: for the `region_union_16` benchmark, this is 6% faster.
+class RunArray {
+public:
+ RunArray() { fPtr = fStack; }
+ #ifdef SK_DEBUG
+ int count() const { return fCount; }
+ #endif
+ SkRegionPriv::RunType& operator[](int i) {
+ SkASSERT((unsigned)i < (unsigned)fCount);
+ return fPtr[i];
+ }
+ /** Resize the array to a size greater-than-or-equal-to count. */
+ void resizeToAtLeast(int count) {
+ if (count > fCount) {
+ // leave at least 50% extra space for future growth.
+ count += count >> 1;
+ fMalloc.realloc(count);
+ if (fPtr == fStack) {
+ memcpy(fMalloc.get(), fStack, fCount * sizeof(SkRegionPriv::RunType));
+ }
+ fPtr = fMalloc.get();
+ fCount = count;
+ }
+ }
+private:
+ SkRegionPriv::RunType fStack[kRunArrayStackCount];
+ AutoTMalloc<SkRegionPriv::RunType> fMalloc;
+ int fCount = kRunArrayStackCount;
+ SkRegionPriv::RunType* fPtr; // non-owning pointer
+};
+
+/* Pass in the beginning with the intervals.
+ * We back up 1 to read the interval-count.
+ * Return the beginning of the next scanline (i.e. the next Y-value)
+ */
+static SkRegionPriv::RunType* skip_intervals(const SkRegionPriv::RunType runs[]) {
+ int intervals = runs[-1];
+#ifdef SK_DEBUG
+ if (intervals > 0) {
+ SkASSERT(runs[0] < runs[1]);
+ SkASSERT(runs[1] < SkRegion_kRunTypeSentinel);
+ } else {
+ SkASSERT(0 == intervals);
+ SkASSERT(SkRegion_kRunTypeSentinel == runs[0]);
+ }
+#endif
+ runs += intervals * 2 + 1;
+ return const_cast<SkRegionPriv::RunType*>(runs);
+}
+
+bool SkRegion::RunsAreARect(const SkRegion::RunType runs[], int count,
+ SkIRect* bounds) {
+ assert_sentinel(runs[0], false); // top
+ SkASSERT(count >= kRectRegionRuns);
+
+ if (count == kRectRegionRuns) {
+ assert_sentinel(runs[1], false); // bottom
+ SkASSERT(1 == runs[2]);
+ assert_sentinel(runs[3], false); // left
+ assert_sentinel(runs[4], false); // right
+ assert_sentinel(runs[5], true);
+ assert_sentinel(runs[6], true);
+
+ SkASSERT(runs[0] < runs[1]); // valid height
+ SkASSERT(runs[3] < runs[4]); // valid width
+
+ bounds->setLTRB(runs[3], runs[0], runs[4], runs[1]);
+ return true;
+ }
+ return false;
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+SkRegion::SkRegion() {
+ fBounds.setEmpty();
+ fRunHead = SkRegion_gEmptyRunHeadPtr;
+}
+
+SkRegion::SkRegion(const SkRegion& src) {
+ fRunHead = SkRegion_gEmptyRunHeadPtr; // just need a value that won't trigger sk_free(fRunHead)
+ this->setRegion(src);
+}
+
+SkRegion::SkRegion(const SkIRect& rect) {
+ fRunHead = SkRegion_gEmptyRunHeadPtr; // just need a value that won't trigger sk_free(fRunHead)
+ this->setRect(rect);
+}
+
+SkRegion::~SkRegion() {
+ this->freeRuns();
+}
+
+void SkRegion::freeRuns() {
+ if (this->isComplex()) {
+ SkASSERT(fRunHead->fRefCnt >= 1);
+ if (--fRunHead->fRefCnt == 0) {
+ sk_free(fRunHead);
+ }
+ }
+}
+
+void SkRegion::allocateRuns(int count, int ySpanCount, int intervalCount) {
+ fRunHead = RunHead::Alloc(count, ySpanCount, intervalCount);
+}
+
+void SkRegion::allocateRuns(int count) {
+ fRunHead = RunHead::Alloc(count);
+}
+
+void SkRegion::allocateRuns(const RunHead& head) {
+ fRunHead = RunHead::Alloc(head.fRunCount,
+ head.getYSpanCount(),
+ head.getIntervalCount());
+}
+
+SkRegion& SkRegion::operator=(const SkRegion& src) {
+ (void)this->setRegion(src);
+ return *this;
+}
+
+void SkRegion::swap(SkRegion& other) {
+ using std::swap;
+ swap(fBounds, other.fBounds);
+ swap(fRunHead, other.fRunHead);
+}
+
+int SkRegion::computeRegionComplexity() const {
+ if (this->isEmpty()) {
+ return 0;
+ } else if (this->isRect()) {
+ return 1;
+ }
+ return fRunHead->getIntervalCount();
+}
+
+bool SkRegion::setEmpty() {
+ this->freeRuns();
+ fBounds.setEmpty();
+ fRunHead = SkRegion_gEmptyRunHeadPtr;
+ return false;
+}
+
+bool SkRegion::setRect(const SkIRect& r) {
+ if (r.isEmpty() ||
+ SkRegion_kRunTypeSentinel == r.right() ||
+ SkRegion_kRunTypeSentinel == r.bottom()) {
+ return this->setEmpty();
+ }
+ this->freeRuns();
+ fBounds = r;
+ fRunHead = SkRegion_gRectRunHeadPtr;
+ return true;
+}
+
+bool SkRegion::setRegion(const SkRegion& src) {
+ if (this != &src) {
+ this->freeRuns();
+
+ fBounds = src.fBounds;
+ fRunHead = src.fRunHead;
+ if (this->isComplex()) {
+ fRunHead->fRefCnt++;
+ }
+ }
+ return fRunHead != SkRegion_gEmptyRunHeadPtr;
+}
+
+bool SkRegion::op(const SkIRect& rect, const SkRegion& rgn, Op op) {
+ SkRegion tmp(rect);
+
+ return this->op(tmp, rgn, op);
+}
+
+bool SkRegion::op(const SkRegion& rgn, const SkIRect& rect, Op op) {
+ SkRegion tmp(rect);
+
+ return this->op(rgn, tmp, op);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+#include <stdio.h>
+char* SkRegion::toString() {
+ Iterator iter(*this);
+ int count = 0;
+ while (!iter.done()) {
+ count++;
+ iter.next();
+ }
+ // 4 ints, up to 10 digits each plus sign, 3 commas, '(', ')', SkRegion() and '\0'
+ const int max = (count*((11*4)+5))+11+1;
+ char* result = (char*)sk_malloc_throw(max);
+ if (result == nullptr) {
+ return nullptr;
+ }
+ count = snprintf(result, max, "SkRegion(");
+ iter.reset(*this);
+ while (!iter.done()) {
+ const SkIRect& r = iter.rect();
+ count += snprintf(result+count, max - count,
+ "(%d,%d,%d,%d)", r.fLeft, r.fTop, r.fRight, r.fBottom);
+ iter.next();
+ }
+ count += snprintf(result+count, max - count, ")");
+ return result;
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+int SkRegion::count_runtype_values(int* itop, int* ibot) const {
+ int maxT;
+
+ if (this->isRect()) {
+ maxT = 2;
+ } else {
+ SkASSERT(this->isComplex());
+ maxT = fRunHead->getIntervalCount() * 2;
+ }
+ *itop = fBounds.fTop;
+ *ibot = fBounds.fBottom;
+ return maxT;
+}
+
+static bool isRunCountEmpty(int count) {
+ return count <= 2;
+}
+
+bool SkRegion::setRuns(RunType runs[], int count) {
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+ SkASSERT(count > 0);
+
+ if (isRunCountEmpty(count)) {
+ // SkDEBUGF("setRuns: empty\n");
+ assert_sentinel(runs[count-1], true);
+ return this->setEmpty();
+ }
+
+ // trim off any empty spans from the top and bottom
+ // weird I should need this, perhaps op() could be smarter...
+ if (count > kRectRegionRuns) {
+ RunType* stop = runs + count;
+ assert_sentinel(runs[0], false); // top
+ assert_sentinel(runs[1], false); // bottom
+ // runs[2] is uncomputed intervalCount
+
+ if (runs[3] == SkRegion_kRunTypeSentinel) { // should be first left...
+ runs += 3; // skip empty initial span
+ runs[0] = runs[-2]; // set new top to prev bottom
+ assert_sentinel(runs[1], false); // bot: a sentinal would mean two in a row
+ assert_sentinel(runs[2], false); // intervalcount
+ assert_sentinel(runs[3], false); // left
+ assert_sentinel(runs[4], false); // right
+ }
+
+ assert_sentinel(stop[-1], true);
+ assert_sentinel(stop[-2], true);
+
+ // now check for a trailing empty span
+ if (stop[-5] == SkRegion_kRunTypeSentinel) { // eek, stop[-4] was a bottom with no x-runs
+ stop[-4] = SkRegion_kRunTypeSentinel; // kill empty last span
+ stop -= 3;
+ assert_sentinel(stop[-1], true); // last y-sentinel
+ assert_sentinel(stop[-2], true); // last x-sentinel
+ assert_sentinel(stop[-3], false); // last right
+ assert_sentinel(stop[-4], false); // last left
+ assert_sentinel(stop[-5], false); // last interval-count
+ assert_sentinel(stop[-6], false); // last bottom
+ }
+ count = (int)(stop - runs);
+ }
+
+ SkASSERT(count >= kRectRegionRuns);
+
+ if (SkRegion::RunsAreARect(runs, count, &fBounds)) {
+ return this->setRect(fBounds);
+ }
+
+ // if we get here, we need to become a complex region
+
+ if (!this->isComplex() || fRunHead->fRunCount != count) {
+ this->freeRuns();
+ this->allocateRuns(count);
+ SkASSERT(this->isComplex());
+ }
+
+ // must call this before we can write directly into runs()
+ // in case we are sharing the buffer with another region (copy on write)
+ fRunHead = fRunHead->ensureWritable();
+ memcpy(fRunHead->writable_runs(), runs, count * sizeof(RunType));
+ fRunHead->computeRunBounds(&fBounds);
+
+ // Our computed bounds might be too large, so we have to check here.
+ if (fBounds.isEmpty()) {
+ return this->setEmpty();
+ }
+
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+
+ return true;
+}
+
+void SkRegion::BuildRectRuns(const SkIRect& bounds,
+ RunType runs[kRectRegionRuns]) {
+ runs[0] = bounds.fTop;
+ runs[1] = bounds.fBottom;
+ runs[2] = 1; // 1 interval for this scanline
+ runs[3] = bounds.fLeft;
+ runs[4] = bounds.fRight;
+ runs[5] = SkRegion_kRunTypeSentinel;
+ runs[6] = SkRegion_kRunTypeSentinel;
+}
+
+bool SkRegion::contains(int32_t x, int32_t y) const {
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+
+ if (!fBounds.contains(x, y)) {
+ return false;
+ }
+ if (this->isRect()) {
+ return true;
+ }
+ SkASSERT(this->isComplex());
+
+ const RunType* runs = fRunHead->findScanline(y);
+
+ // Skip the Bottom and IntervalCount
+ runs += 2;
+
+ // Just walk this scanline, checking each interval. The X-sentinel will
+ // appear as a left-inteval (runs[0]) and should abort the search.
+ //
+ // We could do a bsearch, using interval-count (runs[1]), but need to time
+ // when that would be worthwhile.
+ //
+ for (;;) {
+ if (x < runs[0]) {
+ break;
+ }
+ if (x < runs[1]) {
+ return true;
+ }
+ runs += 2;
+ }
+ return false;
+}
+
+static SkRegionPriv::RunType scanline_bottom(const SkRegionPriv::RunType runs[]) {
+ return runs[0];
+}
+
+static const SkRegionPriv::RunType* scanline_next(const SkRegionPriv::RunType runs[]) {
+ // skip [B N [L R]... S]
+ return runs + 2 + runs[1] * 2 + 1;
+}
+
+static bool scanline_contains(const SkRegionPriv::RunType runs[],
+ SkRegionPriv::RunType L, SkRegionPriv::RunType R) {
+ runs += 2; // skip Bottom and IntervalCount
+ for (;;) {
+ if (L < runs[0]) {
+ break;
+ }
+ if (R <= runs[1]) {
+ return true;
+ }
+ runs += 2;
+ }
+ return false;
+}
+
+bool SkRegion::contains(const SkIRect& r) const {
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+
+ if (!fBounds.contains(r)) {
+ return false;
+ }
+ if (this->isRect()) {
+ return true;
+ }
+ SkASSERT(this->isComplex());
+
+ const RunType* scanline = fRunHead->findScanline(r.fTop);
+ for (;;) {
+ if (!scanline_contains(scanline, r.fLeft, r.fRight)) {
+ return false;
+ }
+ if (r.fBottom <= scanline_bottom(scanline)) {
+ break;
+ }
+ scanline = scanline_next(scanline);
+ }
+ return true;
+}
+
+bool SkRegion::contains(const SkRegion& rgn) const {
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+ SkDEBUGCODE(SkRegionPriv::Validate(rgn));
+
+ if (this->isEmpty() || rgn.isEmpty() || !fBounds.contains(rgn.fBounds)) {
+ return false;
+ }
+ if (this->isRect()) {
+ return true;
+ }
+ if (rgn.isRect()) {
+ return this->contains(rgn.getBounds());
+ }
+
+ /*
+ * A contains B is equivalent to
+ * B - A == 0
+ */
+ return !Oper(rgn, *this, kDifference_Op, nullptr);
+}
+
+const SkRegion::RunType* SkRegion::getRuns(RunType tmpStorage[],
+ int* intervals) const {
+ SkASSERT(tmpStorage && intervals);
+ const RunType* runs = tmpStorage;
+
+ if (this->isEmpty()) {
+ tmpStorage[0] = SkRegion_kRunTypeSentinel;
+ *intervals = 0;
+ } else if (this->isRect()) {
+ BuildRectRuns(fBounds, tmpStorage);
+ *intervals = 1;
+ } else {
+ runs = fRunHead->readonly_runs();
+ *intervals = fRunHead->getIntervalCount();
+ }
+ return runs;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool scanline_intersects(const SkRegionPriv::RunType runs[],
+ SkRegionPriv::RunType L, SkRegionPriv::RunType R) {
+ runs += 2; // skip Bottom and IntervalCount
+ for (;;) {
+ if (R <= runs[0]) {
+ break;
+ }
+ if (L < runs[1]) {
+ return true;
+ }
+ runs += 2;
+ }
+ return false;
+}
+
+bool SkRegion::intersects(const SkIRect& r) const {
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+
+ if (this->isEmpty() || r.isEmpty()) {
+ return false;
+ }
+
+ SkIRect sect;
+ if (!sect.intersect(fBounds, r)) {
+ return false;
+ }
+ if (this->isRect()) {
+ return true;
+ }
+ SkASSERT(this->isComplex());
+
+ const RunType* scanline = fRunHead->findScanline(sect.fTop);
+ for (;;) {
+ if (scanline_intersects(scanline, sect.fLeft, sect.fRight)) {
+ return true;
+ }
+ if (sect.fBottom <= scanline_bottom(scanline)) {
+ break;
+ }
+ scanline = scanline_next(scanline);
+ }
+ return false;
+}
+
+bool SkRegion::intersects(const SkRegion& rgn) const {
+ if (this->isEmpty() || rgn.isEmpty()) {
+ return false;
+ }
+
+ if (!SkIRect::Intersects(fBounds, rgn.fBounds)) {
+ return false;
+ }
+
+ bool weAreARect = this->isRect();
+ bool theyAreARect = rgn.isRect();
+
+ if (weAreARect && theyAreARect) {
+ return true;
+ }
+ if (weAreARect) {
+ return rgn.intersects(this->getBounds());
+ }
+ if (theyAreARect) {
+ return this->intersects(rgn.getBounds());
+ }
+
+ // both of us are complex
+ return Oper(*this, rgn, kIntersect_Op, nullptr);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkRegion::operator==(const SkRegion& b) const {
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+ SkDEBUGCODE(SkRegionPriv::Validate(b));
+
+ if (this == &b) {
+ return true;
+ }
+ if (fBounds != b.fBounds) {
+ return false;
+ }
+
+ const SkRegion::RunHead* ah = fRunHead;
+ const SkRegion::RunHead* bh = b.fRunHead;
+
+ // this catches empties and rects being equal
+ if (ah == bh) {
+ return true;
+ }
+ // now we insist that both are complex (but different ptrs)
+ if (!this->isComplex() || !b.isComplex()) {
+ return false;
+ }
+ return ah->fRunCount == bh->fRunCount &&
+ !memcmp(ah->readonly_runs(), bh->readonly_runs(),
+ ah->fRunCount * sizeof(SkRegion::RunType));
+}
+
+// Return a (new) offset such that when applied (+=) to min and max, we don't overflow/underflow
+static int32_t pin_offset_s32(int32_t min, int32_t max, int32_t offset) {
+ SkASSERT(min <= max);
+ const int32_t lo = -SK_MaxS32-1,
+ hi = +SK_MaxS32;
+ if ((int64_t)min + offset < lo) { offset = lo - min; }
+ if ((int64_t)max + offset > hi) { offset = hi - max; }
+ return offset;
+}
+
+void SkRegion::translate(int dx, int dy, SkRegion* dst) const {
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+
+ if (nullptr == dst) {
+ return;
+ }
+ if (this->isEmpty()) {
+ dst->setEmpty();
+ return;
+ }
+ // pin dx and dy so we don't overflow our existing bounds
+ dx = pin_offset_s32(fBounds.fLeft, fBounds.fRight, dx);
+ dy = pin_offset_s32(fBounds.fTop, fBounds.fBottom, dy);
+
+ if (this->isRect()) {
+ dst->setRect(fBounds.makeOffset(dx, dy));
+ } else {
+ if (this == dst) {
+ dst->fRunHead = dst->fRunHead->ensureWritable();
+ } else {
+ SkRegion tmp;
+ tmp.allocateRuns(*fRunHead);
+ SkASSERT(tmp.isComplex());
+ tmp.fBounds = fBounds;
+ dst->swap(tmp);
+ }
+
+ dst->fBounds.offset(dx, dy);
+
+ const RunType* sruns = fRunHead->readonly_runs();
+ RunType* druns = dst->fRunHead->writable_runs();
+
+ *druns++ = (SkRegion::RunType)(*sruns++ + dy); // top
+ for (;;) {
+ int bottom = *sruns++;
+ if (bottom == SkRegion_kRunTypeSentinel) {
+ break;
+ }
+ *druns++ = (SkRegion::RunType)(bottom + dy); // bottom;
+ *druns++ = *sruns++; // copy intervalCount;
+ for (;;) {
+ int x = *sruns++;
+ if (x == SkRegion_kRunTypeSentinel) {
+ break;
+ }
+ *druns++ = (SkRegion::RunType)(x + dx);
+ *druns++ = (SkRegion::RunType)(*sruns++ + dx);
+ }
+ *druns++ = SkRegion_kRunTypeSentinel; // x sentinel
+ }
+ *druns++ = SkRegion_kRunTypeSentinel; // y sentinel
+
+ SkASSERT(sruns - fRunHead->readonly_runs() == fRunHead->fRunCount);
+ SkASSERT(druns - dst->fRunHead->readonly_runs() == dst->fRunHead->fRunCount);
+ }
+
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkRegion::setRects(const SkIRect rects[], int count) {
+ if (0 == count) {
+ this->setEmpty();
+ } else {
+ this->setRect(rects[0]);
+ for (int i = 1; i < count; i++) {
+ this->op(rects[i], kUnion_Op);
+ }
+ }
+ return !this->isEmpty();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if defined _WIN32 // disable warning : local variable used without having been initialized
+#pragma warning ( push )
+#pragma warning ( disable : 4701 )
+#endif
+
+#ifdef SK_DEBUG
+static void assert_valid_pair(int left, int rite)
+{
+ SkASSERT(left == SkRegion_kRunTypeSentinel || left < rite);
+}
+#else
+ #define assert_valid_pair(left, rite)
+#endif
+
+struct spanRec {
+ const SkRegionPriv::RunType* fA_runs;
+ const SkRegionPriv::RunType* fB_runs;
+ int fA_left, fA_rite, fB_left, fB_rite;
+ int fLeft, fRite, fInside;
+
+ void init(const SkRegionPriv::RunType a_runs[],
+ const SkRegionPriv::RunType b_runs[]) {
+ fA_left = *a_runs++;
+ fA_rite = *a_runs++;
+ fB_left = *b_runs++;
+ fB_rite = *b_runs++;
+
+ fA_runs = a_runs;
+ fB_runs = b_runs;
+ }
+
+ bool done() const {
+ SkASSERT(fA_left <= SkRegion_kRunTypeSentinel);
+ SkASSERT(fB_left <= SkRegion_kRunTypeSentinel);
+ return fA_left == SkRegion_kRunTypeSentinel &&
+ fB_left == SkRegion_kRunTypeSentinel;
+ }
+
+ void next() {
+ assert_valid_pair(fA_left, fA_rite);
+ assert_valid_pair(fB_left, fB_rite);
+
+ int inside, left, rite SK_INIT_TO_AVOID_WARNING;
+ bool a_flush = false;
+ bool b_flush = false;
+
+ int a_left = fA_left;
+ int a_rite = fA_rite;
+ int b_left = fB_left;
+ int b_rite = fB_rite;
+
+ if (a_left < b_left) {
+ inside = 1;
+ left = a_left;
+ if (a_rite <= b_left) { // [...] <...>
+ rite = a_rite;
+ a_flush = true;
+ } else { // [...<..]...> or [...<...>...]
+ rite = a_left = b_left;
+ }
+ } else if (b_left < a_left) {
+ inside = 2;
+ left = b_left;
+ if (b_rite <= a_left) { // [...] <...>
+ rite = b_rite;
+ b_flush = true;
+ } else { // [...<..]...> or [...<...>...]
+ rite = b_left = a_left;
+ }
+ } else { // a_left == b_left
+ inside = 3;
+ left = a_left; // or b_left
+ if (a_rite <= b_rite) {
+ rite = b_left = a_rite;
+ a_flush = true;
+ }
+ if (b_rite <= a_rite) {
+ rite = a_left = b_rite;
+ b_flush = true;
+ }
+ }
+
+ if (a_flush) {
+ a_left = *fA_runs++;
+ a_rite = *fA_runs++;
+ }
+ if (b_flush) {
+ b_left = *fB_runs++;
+ b_rite = *fB_runs++;
+ }
+
+ SkASSERT(left <= rite);
+
+ // now update our state
+ fA_left = a_left;
+ fA_rite = a_rite;
+ fB_left = b_left;
+ fB_rite = b_rite;
+
+ fLeft = left;
+ fRite = rite;
+ fInside = inside;
+ }
+};
+
+static int distance_to_sentinel(const SkRegionPriv::RunType* runs) {
+ const SkRegionPriv::RunType* ptr = runs;
+ while (*ptr != SkRegion_kRunTypeSentinel) { ptr += 2; }
+ return ptr - runs;
+}
+
+static int operate_on_span(const SkRegionPriv::RunType a_runs[],
+ const SkRegionPriv::RunType b_runs[],
+ RunArray* array, int dstOffset,
+ int min, int max) {
+ // This is a worst-case for this span plus two for TWO terminating sentinels.
+ array->resizeToAtLeast(
+ dstOffset + distance_to_sentinel(a_runs) + distance_to_sentinel(b_runs) + 2);
+ SkRegionPriv::RunType* dst = &(*array)[dstOffset]; // get pointer AFTER resizing.
+
+ spanRec rec;
+ bool firstInterval = true;
+
+ rec.init(a_runs, b_runs);
+
+ while (!rec.done()) {
+ rec.next();
+
+ int left = rec.fLeft;
+ int rite = rec.fRite;
+
+ // add left,rite to our dst buffer (checking for coincidence
+ if ((unsigned)(rec.fInside - min) <= (unsigned)(max - min) &&
+ left < rite) { // skip if equal
+ if (firstInterval || *(dst - 1) < left) {
+ *dst++ = (SkRegionPriv::RunType)(left);
+ *dst++ = (SkRegionPriv::RunType)(rite);
+ firstInterval = false;
+ } else {
+ // update the right edge
+ *(dst - 1) = (SkRegionPriv::RunType)(rite);
+ }
+ }
+ }
+ SkASSERT(dst < &(*array)[array->count() - 1]);
+ *dst++ = SkRegion_kRunTypeSentinel;
+ return dst - &(*array)[0];
+}
+
+#if defined _WIN32
+#pragma warning ( pop )
+#endif
+
+static const struct {
+ uint8_t fMin;
+ uint8_t fMax;
+} gOpMinMax[] = {
+ { 1, 1 }, // Difference
+ { 3, 3 }, // Intersection
+ { 1, 3 }, // Union
+ { 1, 2 } // XOR
+};
+// need to ensure that the op enum lines up with our minmax array
+static_assert(0 == SkRegion::kDifference_Op, "");
+static_assert(1 == SkRegion::kIntersect_Op, "");
+static_assert(2 == SkRegion::kUnion_Op, "");
+static_assert(3 == SkRegion::kXOR_Op, "");
+
+class RgnOper {
+public:
+ RgnOper(int top, RunArray* array, SkRegion::Op op)
+ : fMin(gOpMinMax[op].fMin)
+ , fMax(gOpMinMax[op].fMax)
+ , fArray(array)
+ , fTop((SkRegionPriv::RunType)top) // just a first guess, we might update this
+ { SkASSERT((unsigned)op <= 3); }
+
+ void addSpan(int bottom, const SkRegionPriv::RunType a_runs[],
+ const SkRegionPriv::RunType b_runs[]) {
+ // skip X values and slots for the next Y+intervalCount
+ int start = fPrevDst + fPrevLen + 2;
+ // start points to beginning of dst interval
+ int stop = operate_on_span(a_runs, b_runs, fArray, start, fMin, fMax);
+ size_t len = SkToSizeT(stop - start);
+ SkASSERT(len >= 1 && (len & 1) == 1);
+ SkASSERT(SkRegion_kRunTypeSentinel == (*fArray)[stop - 1]);
+
+ // Assert memcmp won't exceed fArray->count().
+ SkASSERT(fArray->count() >= SkToInt(start + len - 1));
+ if (fPrevLen == len &&
+ (1 == len || !memcmp(&(*fArray)[fPrevDst],
+ &(*fArray)[start],
+ (len - 1) * sizeof(SkRegionPriv::RunType)))) {
+ // update Y value
+ (*fArray)[fPrevDst - 2] = (SkRegionPriv::RunType)bottom;
+ } else { // accept the new span
+ if (len == 1 && fPrevLen == 0) {
+ fTop = (SkRegionPriv::RunType)bottom; // just update our bottom
+ } else {
+ (*fArray)[start - 2] = (SkRegionPriv::RunType)bottom;
+ (*fArray)[start - 1] = SkToS32(len >> 1);
+ fPrevDst = start;
+ fPrevLen = len;
+ }
+ }
+ }
+
+ int flush() {
+ (*fArray)[fStartDst] = fTop;
+ // Previously reserved enough for TWO sentinals.
+ SkASSERT(fArray->count() > SkToInt(fPrevDst + fPrevLen));
+ (*fArray)[fPrevDst + fPrevLen] = SkRegion_kRunTypeSentinel;
+ return (int)(fPrevDst - fStartDst + fPrevLen + 1);
+ }
+
+ bool isEmpty() const { return 0 == fPrevLen; }
+
+ uint8_t fMin, fMax;
+
+private:
+ RunArray* fArray;
+ int fStartDst = 0;
+ int fPrevDst = 1;
+ size_t fPrevLen = 0; // will never match a length from operate_on_span
+ SkRegionPriv::RunType fTop;
+};
+
+// want a unique value to signal that we exited due to quickExit
+#define QUICK_EXIT_TRUE_COUNT (-1)
+
+static int operate(const SkRegionPriv::RunType a_runs[],
+ const SkRegionPriv::RunType b_runs[],
+ RunArray* dst,
+ SkRegion::Op op,
+ bool quickExit) {
+ const SkRegionPriv::RunType gEmptyScanline[] = {
+ 0, // fake bottom value
+ 0, // zero intervals
+ SkRegion_kRunTypeSentinel,
+ // just need a 2nd value, since spanRec.init() reads 2 values, even
+ // though if the first value is the sentinel, it ignores the 2nd value.
+ // w/o the 2nd value here, we might read uninitialized memory.
+ // This happens when we are using gSentinel, which is pointing at
+ // our sentinel value.
+ 0
+ };
+ const SkRegionPriv::RunType* const gSentinel = &gEmptyScanline[2];
+
+ int a_top = *a_runs++;
+ int a_bot = *a_runs++;
+ int b_top = *b_runs++;
+ int b_bot = *b_runs++;
+
+ a_runs += 1; // skip the intervalCount;
+ b_runs += 1; // skip the intervalCount;
+
+ // Now a_runs and b_runs to their intervals (or sentinel)
+
+ assert_sentinel(a_top, false);
+ assert_sentinel(a_bot, false);
+ assert_sentinel(b_top, false);
+ assert_sentinel(b_bot, false);
+
+ RgnOper oper(std::min(a_top, b_top), dst, op);
+
+ int prevBot = SkRegion_kRunTypeSentinel; // so we fail the first test
+
+ while (a_bot < SkRegion_kRunTypeSentinel ||
+ b_bot < SkRegion_kRunTypeSentinel) {
+ int top, bot SK_INIT_TO_AVOID_WARNING;
+ const SkRegionPriv::RunType* run0 = gSentinel;
+ const SkRegionPriv::RunType* run1 = gSentinel;
+ bool a_flush = false;
+ bool b_flush = false;
+
+ if (a_top < b_top) {
+ top = a_top;
+ run0 = a_runs;
+ if (a_bot <= b_top) { // [...] <...>
+ bot = a_bot;
+ a_flush = true;
+ } else { // [...<..]...> or [...<...>...]
+ bot = a_top = b_top;
+ }
+ } else if (b_top < a_top) {
+ top = b_top;
+ run1 = b_runs;
+ if (b_bot <= a_top) { // [...] <...>
+ bot = b_bot;
+ b_flush = true;
+ } else { // [...<..]...> or [...<...>...]
+ bot = b_top = a_top;
+ }
+ } else { // a_top == b_top
+ top = a_top; // or b_top
+ run0 = a_runs;
+ run1 = b_runs;
+ if (a_bot <= b_bot) {
+ bot = b_top = a_bot;
+ a_flush = true;
+ }
+ if (b_bot <= a_bot) {
+ bot = a_top = b_bot;
+ b_flush = true;
+ }
+ }
+
+ if (top > prevBot) {
+ oper.addSpan(top, gSentinel, gSentinel);
+ }
+ oper.addSpan(bot, run0, run1);
+
+ if (quickExit && !oper.isEmpty()) {
+ return QUICK_EXIT_TRUE_COUNT;
+ }
+
+ if (a_flush) {
+ a_runs = skip_intervals(a_runs);
+ a_top = a_bot;
+ a_bot = *a_runs++;
+ a_runs += 1; // skip uninitialized intervalCount
+ if (a_bot == SkRegion_kRunTypeSentinel) {
+ a_top = a_bot;
+ }
+ }
+ if (b_flush) {
+ b_runs = skip_intervals(b_runs);
+ b_top = b_bot;
+ b_bot = *b_runs++;
+ b_runs += 1; // skip uninitialized intervalCount
+ if (b_bot == SkRegion_kRunTypeSentinel) {
+ b_top = b_bot;
+ }
+ }
+
+ prevBot = bot;
+ }
+ return oper.flush();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/* Given count RunTypes in a complex region, return the worst case number of
+ logical intervals that represents (i.e. number of rects that would be
+ returned from the iterator).
+
+ We could just return count/2, since there must be at least 2 values per
+ interval, but we can first trim off the const overhead of the initial TOP
+ value, plus the final BOTTOM + 2 sentinels.
+ */
+#if 0 // UNUSED
+static int count_to_intervals(int count) {
+ SkASSERT(count >= 6); // a single rect is 6 values
+ return (count - 4) >> 1;
+}
+#endif
+
+static bool setEmptyCheck(SkRegion* result) {
+ return result ? result->setEmpty() : false;
+}
+
+static bool setRectCheck(SkRegion* result, const SkIRect& rect) {
+ return result ? result->setRect(rect) : !rect.isEmpty();
+}
+
+static bool setRegionCheck(SkRegion* result, const SkRegion& rgn) {
+ return result ? result->setRegion(rgn) : !rgn.isEmpty();
+}
+
+bool SkRegion::Oper(const SkRegion& rgnaOrig, const SkRegion& rgnbOrig, Op op,
+ SkRegion* result) {
+ SkASSERT((unsigned)op < kOpCount);
+
+ if (kReplace_Op == op) {
+ return setRegionCheck(result, rgnbOrig);
+ }
+
+ // swith to using pointers, so we can swap them as needed
+ const SkRegion* rgna = &rgnaOrig;
+ const SkRegion* rgnb = &rgnbOrig;
+ // after this point, do not refer to rgnaOrig or rgnbOrig!!!
+
+ // collaps difference and reverse-difference into just difference
+ if (kReverseDifference_Op == op) {
+ using std::swap;
+ swap(rgna, rgnb);
+ op = kDifference_Op;
+ }
+
+ SkIRect bounds;
+ bool a_empty = rgna->isEmpty();
+ bool b_empty = rgnb->isEmpty();
+ bool a_rect = rgna->isRect();
+ bool b_rect = rgnb->isRect();
+
+ switch (op) {
+ case kDifference_Op:
+ if (a_empty) {
+ return setEmptyCheck(result);
+ }
+ if (b_empty || !SkIRect::Intersects(rgna->fBounds, rgnb->fBounds)) {
+ return setRegionCheck(result, *rgna);
+ }
+ if (b_rect && rgnb->fBounds.containsNoEmptyCheck(rgna->fBounds)) {
+ return setEmptyCheck(result);
+ }
+ break;
+
+ case kIntersect_Op:
+ if ((a_empty | b_empty)
+ || !bounds.intersect(rgna->fBounds, rgnb->fBounds)) {
+ return setEmptyCheck(result);
+ }
+ if (a_rect & b_rect) {
+ return setRectCheck(result, bounds);
+ }
+ if (a_rect && rgna->fBounds.contains(rgnb->fBounds)) {
+ return setRegionCheck(result, *rgnb);
+ }
+ if (b_rect && rgnb->fBounds.contains(rgna->fBounds)) {
+ return setRegionCheck(result, *rgna);
+ }
+ break;
+
+ case kUnion_Op:
+ if (a_empty) {
+ return setRegionCheck(result, *rgnb);
+ }
+ if (b_empty) {
+ return setRegionCheck(result, *rgna);
+ }
+ if (a_rect && rgna->fBounds.contains(rgnb->fBounds)) {
+ return setRegionCheck(result, *rgna);
+ }
+ if (b_rect && rgnb->fBounds.contains(rgna->fBounds)) {
+ return setRegionCheck(result, *rgnb);
+ }
+ break;
+
+ case kXOR_Op:
+ if (a_empty) {
+ return setRegionCheck(result, *rgnb);
+ }
+ if (b_empty) {
+ return setRegionCheck(result, *rgna);
+ }
+ break;
+ default:
+ SkDEBUGFAIL("unknown region op");
+ return false;
+ }
+
+ RunType tmpA[kRectRegionRuns];
+ RunType tmpB[kRectRegionRuns];
+
+ int a_intervals, b_intervals;
+ const RunType* a_runs = rgna->getRuns(tmpA, &a_intervals);
+ const RunType* b_runs = rgnb->getRuns(tmpB, &b_intervals);
+
+ RunArray array;
+ int count = operate(a_runs, b_runs, &array, op, nullptr == result);
+ SkASSERT(count <= array.count());
+
+ if (result) {
+ SkASSERT(count >= 0);
+ return result->setRuns(&array[0], count);
+ } else {
+ return (QUICK_EXIT_TRUE_COUNT == count) || !isRunCountEmpty(count);
+ }
+}
+
+bool SkRegion::op(const SkRegion& rgna, const SkRegion& rgnb, Op op) {
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+ return SkRegion::Oper(rgna, rgnb, op, this);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "src/base/SkBuffer.h"
+
+size_t SkRegion::writeToMemory(void* storage) const {
+ if (nullptr == storage) {
+ size_t size = sizeof(int32_t); // -1 (empty), 0 (rect), runCount
+ if (!this->isEmpty()) {
+ size += sizeof(fBounds);
+ if (this->isComplex()) {
+ size += 2 * sizeof(int32_t); // ySpanCount + intervalCount
+ size += fRunHead->fRunCount * sizeof(RunType);
+ }
+ }
+ return size;
+ }
+
+ SkWBuffer buffer(storage);
+
+ if (this->isEmpty()) {
+ buffer.write32(-1);
+ } else {
+ bool isRect = this->isRect();
+
+ buffer.write32(isRect ? 0 : fRunHead->fRunCount);
+ buffer.write(&fBounds, sizeof(fBounds));
+
+ if (!isRect) {
+ buffer.write32(fRunHead->getYSpanCount());
+ buffer.write32(fRunHead->getIntervalCount());
+ buffer.write(fRunHead->readonly_runs(),
+ fRunHead->fRunCount * sizeof(RunType));
+ }
+ }
+ return buffer.pos();
+}
+
+static bool validate_run_count(int ySpanCount, int intervalCount, int runCount) {
+ // return 2 + 3 * ySpanCount + 2 * intervalCount;
+ if (ySpanCount < 1 || intervalCount < 2) {
+ return false;
+ }
+ SkSafeMath safeMath;
+ int sum = 2;
+ sum = safeMath.addInt(sum, ySpanCount);
+ sum = safeMath.addInt(sum, ySpanCount);
+ sum = safeMath.addInt(sum, ySpanCount);
+ sum = safeMath.addInt(sum, intervalCount);
+ sum = safeMath.addInt(sum, intervalCount);
+ return safeMath && sum == runCount;
+}
+
+// Validate that a memory sequence is a valid region.
+// Try to check all possible errors.
+// never read beyond &runs[runCount-1].
+static bool validate_run(const int32_t* runs,
+ int runCount,
+ const SkIRect& givenBounds,
+ int32_t ySpanCount,
+ int32_t intervalCount) {
+ // Region Layout:
+ // Top ( Bottom Span_Interval_Count ( Left Right )* Sentinel )+ Sentinel
+ if (!validate_run_count(SkToInt(ySpanCount), SkToInt(intervalCount), runCount)) {
+ return false;
+ }
+ SkASSERT(runCount >= 7); // 7==SkRegion::kRectRegionRuns
+ // quick safety check:
+ if (runs[runCount - 1] != SkRegion_kRunTypeSentinel ||
+ runs[runCount - 2] != SkRegion_kRunTypeSentinel) {
+ return false;
+ }
+ const int32_t* const end = runs + runCount;
+ SkIRect bounds = {0, 0, 0 ,0}; // calulated bounds
+ SkIRect rect = {0, 0, 0, 0}; // current rect
+ rect.fTop = *runs++;
+ if (rect.fTop == SkRegion_kRunTypeSentinel) {
+ return false; // no rect can contain SkRegion_kRunTypeSentinel
+ }
+ if (rect.fTop != givenBounds.fTop) {
+ return false; // Must not begin with empty span that does not contribute to bounds.
+ }
+ do {
+ --ySpanCount;
+ if (ySpanCount < 0) {
+ return false; // too many yspans
+ }
+ rect.fBottom = *runs++;
+ if (rect.fBottom == SkRegion_kRunTypeSentinel) {
+ return false;
+ }
+ if (rect.fBottom > givenBounds.fBottom) {
+ return false; // Must not end with empty span that does not contribute to bounds.
+ }
+ if (rect.fBottom <= rect.fTop) {
+ return false; // y-intervals must be ordered; rects must be non-empty.
+ }
+
+ int32_t xIntervals = *runs++;
+ SkASSERT(runs < end);
+ if (xIntervals < 0 || xIntervals > intervalCount || runs + 1 + 2 * xIntervals > end) {
+ return false;
+ }
+ intervalCount -= xIntervals;
+ bool firstInterval = true;
+ int32_t lastRight = 0; // check that x-intervals are distinct and ordered.
+ while (xIntervals-- > 0) {
+ rect.fLeft = *runs++;
+ rect.fRight = *runs++;
+ if (rect.fLeft == SkRegion_kRunTypeSentinel ||
+ rect.fRight == SkRegion_kRunTypeSentinel ||
+ rect.fLeft >= rect.fRight || // check non-empty rect
+ (!firstInterval && rect.fLeft <= lastRight)) {
+ return false;
+ }
+ lastRight = rect.fRight;
+ firstInterval = false;
+ bounds.join(rect);
+ }
+ if (*runs++ != SkRegion_kRunTypeSentinel) {
+ return false; // required check sentinal.
+ }
+ rect.fTop = rect.fBottom;
+ SkASSERT(runs < end);
+ } while (*runs != SkRegion_kRunTypeSentinel);
+ ++runs;
+ if (ySpanCount != 0 || intervalCount != 0 || givenBounds != bounds) {
+ return false;
+ }
+ SkASSERT(runs == end); // if ySpanCount && intervalCount are right, must be correct length.
+ return true;
+}
+size_t SkRegion::readFromMemory(const void* storage, size_t length) {
+ SkRBuffer buffer(storage, length);
+ SkRegion tmp;
+ int32_t count;
+
+ // Serialized Region Format:
+ // Empty:
+ // -1
+ // Simple Rect:
+ // 0 LEFT TOP RIGHT BOTTOM
+ // Complex Region:
+ // COUNT LEFT TOP RIGHT BOTTOM Y_SPAN_COUNT TOTAL_INTERVAL_COUNT [RUNS....]
+ if (!buffer.readS32(&count) || count < -1) {
+ return 0;
+ }
+ if (count >= 0) {
+ if (!buffer.read(&tmp.fBounds, sizeof(tmp.fBounds)) || tmp.fBounds.isEmpty()) {
+ return 0; // Short buffer or bad bounds for non-empty region; report failure.
+ }
+ if (count == 0) {
+ tmp.fRunHead = SkRegion_gRectRunHeadPtr;
+ } else {
+ int32_t ySpanCount, intervalCount;
+ if (!buffer.readS32(&ySpanCount) ||
+ !buffer.readS32(&intervalCount) ||
+ buffer.available() < count * sizeof(int32_t)) {
+ return 0;
+ }
+ if (!validate_run((const int32_t*)((const char*)storage + buffer.pos()), count,
+ tmp.fBounds, ySpanCount, intervalCount)) {
+ return 0; // invalid runs, don't even allocate
+ }
+ tmp.allocateRuns(count, ySpanCount, intervalCount);
+ SkASSERT(tmp.isComplex());
+ SkAssertResult(buffer.read(tmp.fRunHead->writable_runs(), count * sizeof(int32_t)));
+ }
+ }
+ SkASSERT(tmp.isValid());
+ SkASSERT(buffer.isValid());
+ this->swap(tmp);
+ return buffer.pos();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkRegion::isValid() const {
+ if (this->isEmpty()) {
+ return fBounds == SkIRect{0, 0, 0, 0};
+ }
+ if (fBounds.isEmpty()) {
+ return false;
+ }
+ if (this->isRect()) {
+ return true;
+ }
+ return fRunHead && fRunHead->fRefCnt > 0 &&
+ validate_run(fRunHead->readonly_runs(), fRunHead->fRunCount, fBounds,
+ fRunHead->getYSpanCount(), fRunHead->getIntervalCount());
+}
+
+#ifdef SK_DEBUG
+void SkRegionPriv::Validate(const SkRegion& rgn) { SkASSERT(rgn.isValid()); }
+
+void SkRegion::dump() const {
+ if (this->isEmpty()) {
+ SkDebugf(" rgn: empty\n");
+ } else {
+ SkDebugf(" rgn: [%d %d %d %d]", fBounds.fLeft, fBounds.fTop, fBounds.fRight, fBounds.fBottom);
+ if (this->isComplex()) {
+ const RunType* runs = fRunHead->readonly_runs();
+ for (int i = 0; i < fRunHead->fRunCount; i++)
+ SkDebugf(" %d", runs[i]);
+ }
+ SkDebugf("\n");
+ }
+}
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkRegion::Iterator::Iterator(const SkRegion& rgn) {
+ this->reset(rgn);
+}
+
+bool SkRegion::Iterator::rewind() {
+ if (fRgn) {
+ this->reset(*fRgn);
+ return true;
+ }
+ return false;
+}
+
+void SkRegion::Iterator::reset(const SkRegion& rgn) {
+ fRgn = &rgn;
+ if (rgn.isEmpty()) {
+ fDone = true;
+ } else {
+ fDone = false;
+ if (rgn.isRect()) {
+ fRect = rgn.fBounds;
+ fRuns = nullptr;
+ } else {
+ fRuns = rgn.fRunHead->readonly_runs();
+ fRect.setLTRB(fRuns[3], fRuns[0], fRuns[4], fRuns[1]);
+ fRuns += 5;
+ // Now fRuns points to the 2nd interval (or x-sentinel)
+ }
+ }
+}
+
+void SkRegion::Iterator::next() {
+ if (fDone) {
+ return;
+ }
+
+ if (fRuns == nullptr) { // rect case
+ fDone = true;
+ return;
+ }
+
+ const RunType* runs = fRuns;
+
+ if (runs[0] < SkRegion_kRunTypeSentinel) { // valid X value
+ fRect.fLeft = runs[0];
+ fRect.fRight = runs[1];
+ runs += 2;
+ } else { // we're at the end of a line
+ runs += 1;
+ if (runs[0] < SkRegion_kRunTypeSentinel) { // valid Y value
+ int intervals = runs[1];
+ if (0 == intervals) { // empty line
+ fRect.fTop = runs[0];
+ runs += 3;
+ } else {
+ fRect.fTop = fRect.fBottom;
+ }
+
+ fRect.fBottom = runs[0];
+ assert_sentinel(runs[2], false);
+ assert_sentinel(runs[3], false);
+ fRect.fLeft = runs[2];
+ fRect.fRight = runs[3];
+ runs += 4;
+ } else { // end of rgn
+ fDone = true;
+ }
+ }
+ fRuns = runs;
+}
+
+SkRegion::Cliperator::Cliperator(const SkRegion& rgn, const SkIRect& clip)
+ : fIter(rgn), fClip(clip), fDone(true) {
+ const SkIRect& r = fIter.rect();
+
+ while (!fIter.done()) {
+ if (r.fTop >= clip.fBottom) {
+ break;
+ }
+ if (fRect.intersect(clip, r)) {
+ fDone = false;
+ break;
+ }
+ fIter.next();
+ }
+}
+
+void SkRegion::Cliperator::next() {
+ if (fDone) {
+ return;
+ }
+
+ const SkIRect& r = fIter.rect();
+
+ fDone = true;
+ fIter.next();
+ while (!fIter.done()) {
+ if (r.fTop >= fClip.fBottom) {
+ break;
+ }
+ if (fRect.intersect(fClip, r)) {
+ fDone = false;
+ break;
+ }
+ fIter.next();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkRegion::Spanerator::Spanerator(const SkRegion& rgn, int y, int left,
+ int right) {
+ SkDEBUGCODE(SkRegionPriv::Validate(rgn));
+
+ const SkIRect& r = rgn.getBounds();
+
+ fDone = true;
+ if (!rgn.isEmpty() && y >= r.fTop && y < r.fBottom &&
+ right > r.fLeft && left < r.fRight) {
+ if (rgn.isRect()) {
+ if (left < r.fLeft) {
+ left = r.fLeft;
+ }
+ if (right > r.fRight) {
+ right = r.fRight;
+ }
+ fLeft = left;
+ fRight = right;
+ fRuns = nullptr; // means we're a rect, not a rgn
+ fDone = false;
+ } else {
+ const SkRegion::RunType* runs = rgn.fRunHead->findScanline(y);
+ runs += 2; // skip Bottom and IntervalCount
+ for (;;) {
+ // runs[0..1] is to the right of the span, so we're done
+ if (runs[0] >= right) {
+ break;
+ }
+ // runs[0..1] is to the left of the span, so continue
+ if (runs[1] <= left) {
+ runs += 2;
+ continue;
+ }
+ // runs[0..1] intersects the span
+ fRuns = runs;
+ fLeft = left;
+ fRight = right;
+ fDone = false;
+ break;
+ }
+ }
+ }
+}
+
+bool SkRegion::Spanerator::next(int* left, int* right) {
+ if (fDone) {
+ return false;
+ }
+
+ if (fRuns == nullptr) { // we're a rect
+ fDone = true; // ok, now we're done
+ if (left) {
+ *left = fLeft;
+ }
+ if (right) {
+ *right = fRight;
+ }
+ return true; // this interval is legal
+ }
+
+ const SkRegion::RunType* runs = fRuns;
+
+ if (runs[0] >= fRight) {
+ fDone = true;
+ return false;
+ }
+
+ SkASSERT(runs[1] > fLeft);
+
+ if (left) {
+ *left = std::max(fLeft, runs[0]);
+ }
+ if (right) {
+ *right = std::min(fRight, runs[1]);
+ }
+ fRuns = runs + 2;
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static void visit_pairs(int pairCount, int y, const int32_t pairs[],
+ const std::function<void(const SkIRect&)>& visitor) {
+ for (int i = 0; i < pairCount; ++i) {
+ visitor({ pairs[0], y, pairs[1], y + 1 });
+ pairs += 2;
+ }
+}
+
+void SkRegionPriv::VisitSpans(const SkRegion& rgn,
+ const std::function<void(const SkIRect&)>& visitor) {
+ if (rgn.isEmpty()) {
+ return;
+ }
+ if (rgn.isRect()) {
+ visitor(rgn.getBounds());
+ } else {
+ const int32_t* p = rgn.fRunHead->readonly_runs();
+ int32_t top = *p++;
+ int32_t bot = *p++;
+ do {
+ int pairCount = *p++;
+ if (pairCount == 1) {
+ visitor({ p[0], top, p[1], bot });
+ p += 2;
+ } else if (pairCount > 1) {
+ // we have to loop repeated in Y, sending each interval in Y -> X order
+ for (int y = top; y < bot; ++y) {
+ visit_pairs(pairCount, y, p, visitor);
+ }
+ p += pairCount * 2;
+ }
+ assert_sentinel(*p, true);
+ p += 1; // skip sentinel
+
+ // read next bottom or sentinel
+ top = bot;
+ bot = *p++;
+ } while (!SkRegionValueIsSentinel(bot));
+ }
+}
+
diff --git a/gfx/skia/skia/src/core/SkRegionPriv.h b/gfx/skia/skia/src/core/SkRegionPriv.h
new file mode 100644
index 0000000000..2a9cb9b3ed
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRegionPriv.h
@@ -0,0 +1,261 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRegionPriv_DEFINED
+#define SkRegionPriv_DEFINED
+
+#include "include/core/SkRegion.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkMath.h"
+#include "include/private/base/SkTo.h"
+
+#include <atomic>
+#include <functional>
+
+class SkRegionPriv {
+public:
+ inline static constexpr int kRunTypeSentinel = 0x7FFFFFFF;
+ typedef SkRegion::RunType RunType;
+ typedef SkRegion::RunHead RunHead;
+
+ // Call the function with each span, in Y -> X ascending order.
+ // We pass a rect, but we will still ensure the span Y->X ordering, so often the height
+ // of the rect may be 1. It should never be empty.
+ static void VisitSpans(const SkRegion& rgn, const std::function<void(const SkIRect&)>&);
+
+#ifdef SK_DEBUG
+ static void Validate(const SkRegion& rgn);
+#endif
+};
+
+static constexpr int SkRegion_kRunTypeSentinel = 0x7FFFFFFF;
+
+inline bool SkRegionValueIsSentinel(int32_t value) {
+ return value == (int32_t)SkRegion_kRunTypeSentinel;
+}
+
+#define assert_sentinel(value, isSentinel) \
+ SkASSERT(SkRegionValueIsSentinel(value) == isSentinel)
+
+#ifdef SK_DEBUG
+// Given the first interval (just past the interval-count), compute the
+// interval count, by search for the x-sentinel
+//
+static int compute_intervalcount(const SkRegionPriv::RunType runs[]) {
+ const SkRegionPriv::RunType* curr = runs;
+ while (*curr < SkRegion_kRunTypeSentinel) {
+ SkASSERT(curr[0] < curr[1]);
+ SkASSERT(curr[1] < SkRegion_kRunTypeSentinel);
+ curr += 2;
+ }
+ return SkToInt((curr - runs) >> 1);
+}
+#endif
+
+struct SkRegion::RunHead {
+private:
+
+public:
+ std::atomic<int32_t> fRefCnt;
+ int32_t fRunCount;
+
+ /**
+ * Number of spans with different Y values. This does not count the initial
+ * Top value, nor does it count the final Y-Sentinel value. In the logical
+ * case of a rectangle, this would return 1, and an empty region would
+ * return 0.
+ */
+ int getYSpanCount() const {
+ return fYSpanCount;
+ }
+
+ /**
+ * Number of intervals in the entire region. This equals the number of
+ * rects that would be returned by the Iterator. In the logical case of
+ * a rect, this would return 1, and an empty region would return 0.
+ */
+ int getIntervalCount() const {
+ return fIntervalCount;
+ }
+
+ static RunHead* Alloc(int count) {
+ if (count < SkRegion::kRectRegionRuns) {
+ return nullptr;
+ }
+
+ const int64_t size = sk_64_mul(count, sizeof(RunType)) + sizeof(RunHead);
+ if (count < 0 || !SkTFitsIn<int32_t>(size)) { SK_ABORT("Invalid Size"); }
+
+ RunHead* head = (RunHead*)sk_malloc_throw(size);
+ head->fRefCnt = 1;
+ head->fRunCount = count;
+ // these must be filled in later, otherwise we will be invalid
+ head->fYSpanCount = 0;
+ head->fIntervalCount = 0;
+ return head;
+ }
+
+ static RunHead* Alloc(int count, int yspancount, int intervalCount) {
+ if (yspancount <= 0 || intervalCount <= 1) {
+ return nullptr;
+ }
+
+ RunHead* head = Alloc(count);
+ if (!head) {
+ return nullptr;
+ }
+ head->fYSpanCount = yspancount;
+ head->fIntervalCount = intervalCount;
+ return head;
+ }
+
+ SkRegion::RunType* writable_runs() {
+ SkASSERT(fRefCnt == 1);
+ return (SkRegion::RunType*)(this + 1);
+ }
+
+ const SkRegion::RunType* readonly_runs() const {
+ return (const SkRegion::RunType*)(this + 1);
+ }
+
+ RunHead* ensureWritable() {
+ RunHead* writable = this;
+ if (fRefCnt > 1) {
+ // We need to alloc & copy the current region before decrease
+ // the refcount because it could be freed in the meantime.
+ writable = Alloc(fRunCount, fYSpanCount, fIntervalCount);
+ memcpy(writable->writable_runs(), this->readonly_runs(),
+ fRunCount * sizeof(RunType));
+
+ // fRefCount might have changed since we last checked.
+ // If we own the last reference at this point, we need to
+ // free the memory.
+ if (--fRefCnt == 0) {
+ sk_free(this);
+ }
+ }
+ return writable;
+ }
+
+ /**
+ * Given a scanline (including its Bottom value at runs[0]), return the next
+ * scanline. Asserts that there is one (i.e. runs[0] < Sentinel)
+ */
+ static SkRegion::RunType* SkipEntireScanline(const SkRegion::RunType runs[]) {
+ // we are not the Y Sentinel
+ SkASSERT(runs[0] < SkRegion_kRunTypeSentinel);
+
+ const int intervals = runs[1];
+ SkASSERT(runs[2 + intervals * 2] == SkRegion_kRunTypeSentinel);
+#ifdef SK_DEBUG
+ {
+ int n = compute_intervalcount(&runs[2]);
+ SkASSERT(n == intervals);
+ }
+#endif
+
+ // skip the entire line [B N [L R] S]
+ runs += 1 + 1 + intervals * 2 + 1;
+ return const_cast<SkRegion::RunType*>(runs);
+ }
+
+
+ /**
+ * Return the scanline that contains the Y value. This requires that the Y
+ * value is already known to be contained within the bounds of the region,
+ * and so this routine never returns nullptr.
+ *
+ * It returns the beginning of the scanline, starting with its Bottom value.
+ */
+ SkRegion::RunType* findScanline(int y) const {
+ const RunType* runs = this->readonly_runs();
+
+ // if the top-check fails, we didn't do a quick check on the bounds
+ SkASSERT(y >= runs[0]);
+
+ runs += 1; // skip top-Y
+ for (;;) {
+ int bottom = runs[0];
+ // If we hit this, we've walked off the region, and our bounds check
+ // failed.
+ SkASSERT(bottom < SkRegion_kRunTypeSentinel);
+ if (y < bottom) {
+ break;
+ }
+ runs = SkipEntireScanline(runs);
+ }
+ return const_cast<SkRegion::RunType*>(runs);
+ }
+
+ // Copy src runs into us, computing interval counts and bounds along the way
+ void computeRunBounds(SkIRect* bounds) {
+ RunType* runs = this->writable_runs();
+ bounds->fTop = *runs++;
+
+ int bot;
+ int ySpanCount = 0;
+ int intervalCount = 0;
+ int left = SK_MaxS32;
+ int rite = SK_MinS32;
+
+ do {
+ bot = *runs++;
+ SkASSERT(bot < SkRegion_kRunTypeSentinel);
+ ySpanCount += 1;
+
+ const int intervals = *runs++;
+ SkASSERT(intervals >= 0);
+ SkASSERT(intervals < SkRegion_kRunTypeSentinel);
+
+ if (intervals > 0) {
+#ifdef SK_DEBUG
+ {
+ int n = compute_intervalcount(runs);
+ SkASSERT(n == intervals);
+ }
+#endif
+ RunType L = runs[0];
+ SkASSERT(L < SkRegion_kRunTypeSentinel);
+ if (left > L) {
+ left = L;
+ }
+
+ runs += intervals * 2;
+ RunType R = runs[-1];
+ SkASSERT(R < SkRegion_kRunTypeSentinel);
+ if (rite < R) {
+ rite = R;
+ }
+
+ intervalCount += intervals;
+ }
+ SkASSERT(SkRegion_kRunTypeSentinel == *runs);
+ runs += 1; // skip x-sentinel
+
+ // test Y-sentinel
+ } while (SkRegion_kRunTypeSentinel > *runs);
+
+#ifdef SK_DEBUG
+ // +1 to skip the last Y-sentinel
+ int runCount = SkToInt(runs - this->writable_runs() + 1);
+ SkASSERT(runCount == fRunCount);
+#endif
+
+ fYSpanCount = ySpanCount;
+ fIntervalCount = intervalCount;
+
+ bounds->fLeft = left;
+ bounds->fRight = rite;
+ bounds->fBottom = bot;
+ }
+
+private:
+ int32_t fYSpanCount;
+ int32_t fIntervalCount;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkRegion_path.cpp b/gfx/skia/skia/src/core/SkRegion_path.cpp
new file mode 100644
index 0000000000..4c14b3e811
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRegion_path.cpp
@@ -0,0 +1,586 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPath.h"
+#include "include/private/base/SkTDArray.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkSafeMath.h"
+#include "src/base/SkTSort.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkRegionPriv.h"
+#include "src/core/SkScan.h"
+
+// The rgnbuilder caller *seems* to pass short counts, possible often seens early failure, so
+// we may not want to promote this to a "std" routine just yet.
+static bool sk_memeq32(const int32_t* SK_RESTRICT a, const int32_t* SK_RESTRICT b, int count) {
+ for (int i = 0; i < count; ++i) {
+ if (a[i] != b[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+class SkRgnBuilder : public SkBlitter {
+public:
+ SkRgnBuilder();
+ ~SkRgnBuilder() override;
+
+ // returns true if it could allocate the working storage needed
+ bool init(int maxHeight, int maxTransitions, bool pathIsInverse);
+
+ void done() {
+ if (fCurrScanline != nullptr) {
+ fCurrScanline->fXCount = (SkRegion::RunType)((int)(fCurrXPtr - fCurrScanline->firstX()));
+ if (!this->collapsWithPrev()) { // flush the last line
+ fCurrScanline = fCurrScanline->nextScanline();
+ }
+ }
+ }
+
+ int computeRunCount() const;
+ void copyToRect(SkIRect*) const;
+ void copyToRgn(SkRegion::RunType runs[]) const;
+
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override {
+ SkDEBUGFAIL("blitAntiH not implemented");
+ }
+
+#ifdef SK_DEBUG
+ void dump() const {
+ SkDebugf("SkRgnBuilder: Top = %d\n", fTop);
+ const Scanline* line = (Scanline*)fStorage;
+ while (line < fCurrScanline) {
+ SkDebugf("SkRgnBuilder::Scanline: LastY=%d, fXCount=%d", line->fLastY, line->fXCount);
+ for (int i = 0; i < line->fXCount; i++) {
+ SkDebugf(" %d", line->firstX()[i]);
+ }
+ SkDebugf("\n");
+
+ line = line->nextScanline();
+ }
+ }
+#endif
+private:
+ /*
+ * Scanline mimics a row in the region, nearly. A row in a region is:
+ * [Bottom IntervalCount [L R]... Sentinel]
+ * while a Scanline is
+ * [LastY XCount [L R]... uninitialized]
+ * The two are the same length (which is good), but we have to transmute
+ * the scanline a little when we convert it to a region-row.
+ *
+ * Potentially we could recode this to exactly match the row format, in
+ * which case copyToRgn() could be a single memcpy. Not sure that is worth
+ * the effort.
+ */
+ struct Scanline {
+ SkRegion::RunType fLastY;
+ SkRegion::RunType fXCount;
+
+ SkRegion::RunType* firstX() const { return (SkRegion::RunType*)(this + 1); }
+ Scanline* nextScanline() const {
+ // add final +1 for the x-sentinel
+ return (Scanline*)((SkRegion::RunType*)(this + 1) + fXCount + 1);
+ }
+ };
+ SkRegion::RunType* fStorage;
+ Scanline* fCurrScanline;
+ Scanline* fPrevScanline;
+ // points at next avialable x[] in fCurrScanline
+ SkRegion::RunType* fCurrXPtr;
+ SkRegion::RunType fTop; // first Y value
+
+ int fStorageCount;
+
+ bool collapsWithPrev() {
+ if (fPrevScanline != nullptr &&
+ fPrevScanline->fLastY + 1 == fCurrScanline->fLastY &&
+ fPrevScanline->fXCount == fCurrScanline->fXCount &&
+ sk_memeq32(fPrevScanline->firstX(), fCurrScanline->firstX(), fCurrScanline->fXCount))
+ {
+ // update the height of fPrevScanline
+ fPrevScanline->fLastY = fCurrScanline->fLastY;
+ return true;
+ }
+ return false;
+ }
+};
+
+SkRgnBuilder::SkRgnBuilder()
+ : fStorage(nullptr) {
+}
+
+SkRgnBuilder::~SkRgnBuilder() {
+ sk_free(fStorage);
+}
+
+bool SkRgnBuilder::init(int maxHeight, int maxTransitions, bool pathIsInverse) {
+ if ((maxHeight | maxTransitions) < 0) {
+ return false;
+ }
+
+ SkSafeMath safe;
+
+ if (pathIsInverse) {
+ // allow for additional X transitions to "invert" each scanline
+ // [ L' ... normal transitions ... R' ]
+ //
+ maxTransitions = safe.addInt(maxTransitions, 2);
+ }
+
+ // compute the count with +1 and +3 slop for the working buffer
+ size_t count = safe.mul(safe.addInt(maxHeight, 1), safe.addInt(3, maxTransitions));
+
+ if (pathIsInverse) {
+ // allow for two "empty" rows for the top and bottom
+ // [ Y, 1, L, R, S] == 5 (*2 for top and bottom)
+ count = safe.add(count, 10);
+ }
+
+ if (!safe || !SkTFitsIn<int32_t>(count)) {
+ return false;
+ }
+ fStorageCount = SkToS32(count);
+
+ fStorage = (SkRegion::RunType*)sk_malloc_canfail(fStorageCount, sizeof(SkRegion::RunType));
+ if (nullptr == fStorage) {
+ return false;
+ }
+
+ fCurrScanline = nullptr; // signal empty collection
+ fPrevScanline = nullptr; // signal first scanline
+ return true;
+}
+
+void SkRgnBuilder::blitH(int x, int y, int width) {
+ if (fCurrScanline == nullptr) { // first time
+ fTop = (SkRegion::RunType)(y);
+ fCurrScanline = (Scanline*)fStorage;
+ fCurrScanline->fLastY = (SkRegion::RunType)(y);
+ fCurrXPtr = fCurrScanline->firstX();
+ } else {
+ SkASSERT(y >= fCurrScanline->fLastY);
+
+ if (y > fCurrScanline->fLastY) {
+ // if we get here, we're done with fCurrScanline
+ fCurrScanline->fXCount = (SkRegion::RunType)((int)(fCurrXPtr - fCurrScanline->firstX()));
+
+ int prevLastY = fCurrScanline->fLastY;
+ if (!this->collapsWithPrev()) {
+ fPrevScanline = fCurrScanline;
+ fCurrScanline = fCurrScanline->nextScanline();
+
+ }
+ if (y - 1 > prevLastY) { // insert empty run
+ fCurrScanline->fLastY = (SkRegion::RunType)(y - 1);
+ fCurrScanline->fXCount = 0;
+ fCurrScanline = fCurrScanline->nextScanline();
+ }
+ // setup for the new curr line
+ fCurrScanline->fLastY = (SkRegion::RunType)(y);
+ fCurrXPtr = fCurrScanline->firstX();
+ }
+ }
+ // check if we should extend the current run, or add a new one
+ if (fCurrXPtr > fCurrScanline->firstX() && fCurrXPtr[-1] == x) {
+ fCurrXPtr[-1] = (SkRegion::RunType)(x + width);
+ } else {
+ fCurrXPtr[0] = (SkRegion::RunType)(x);
+ fCurrXPtr[1] = (SkRegion::RunType)(x + width);
+ fCurrXPtr += 2;
+ }
+ SkASSERT(fCurrXPtr - fStorage < fStorageCount);
+}
+
+int SkRgnBuilder::computeRunCount() const {
+ if (fCurrScanline == nullptr) {
+ return 0;
+ }
+
+ const SkRegion::RunType* line = fStorage;
+ const SkRegion::RunType* stop = (const SkRegion::RunType*)fCurrScanline;
+
+ return 2 + (int)(stop - line);
+}
+
+void SkRgnBuilder::copyToRect(SkIRect* r) const {
+ SkASSERT(fCurrScanline != nullptr);
+ // A rect's scanline is [bottom intervals left right sentinel] == 5
+ SkASSERT((const SkRegion::RunType*)fCurrScanline - fStorage == 5);
+
+ const Scanline* line = (const Scanline*)fStorage;
+ SkASSERT(line->fXCount == 2);
+
+ r->setLTRB(line->firstX()[0], fTop, line->firstX()[1], line->fLastY + 1);
+}
+
+void SkRgnBuilder::copyToRgn(SkRegion::RunType runs[]) const {
+ SkASSERT(fCurrScanline != nullptr);
+ SkASSERT((const SkRegion::RunType*)fCurrScanline - fStorage > 4);
+
+ const Scanline* line = (const Scanline*)fStorage;
+ const Scanline* stop = fCurrScanline;
+
+ *runs++ = fTop;
+ do {
+ *runs++ = (SkRegion::RunType)(line->fLastY + 1);
+ int count = line->fXCount;
+ *runs++ = count >> 1; // intervalCount
+ if (count) {
+ memcpy(runs, line->firstX(), count * sizeof(SkRegion::RunType));
+ runs += count;
+ }
+ *runs++ = SkRegion_kRunTypeSentinel;
+ line = line->nextScanline();
+ } while (line < stop);
+ SkASSERT(line == stop);
+ *runs = SkRegion_kRunTypeSentinel;
+}
+
+static unsigned verb_to_initial_last_index(unsigned verb) {
+ static const uint8_t gPathVerbToInitialLastIndex[] = {
+ 0, // kMove_Verb
+ 1, // kLine_Verb
+ 2, // kQuad_Verb
+ 2, // kConic_Verb
+ 3, // kCubic_Verb
+ 0, // kClose_Verb
+ 0 // kDone_Verb
+ };
+ SkASSERT((unsigned)verb < std::size(gPathVerbToInitialLastIndex));
+ return gPathVerbToInitialLastIndex[verb];
+}
+
+static unsigned verb_to_max_edges(unsigned verb) {
+ static const uint8_t gPathVerbToMaxEdges[] = {
+ 0, // kMove_Verb
+ 1, // kLine_Verb
+ 2, // kQuad_VerbB
+ 2, // kConic_VerbB
+ 3, // kCubic_Verb
+ 0, // kClose_Verb
+ 0 // kDone_Verb
+ };
+ SkASSERT((unsigned)verb < std::size(gPathVerbToMaxEdges));
+ return gPathVerbToMaxEdges[verb];
+}
+
+// If returns 0, ignore itop and ibot
+static int count_path_runtype_values(const SkPath& path, int* itop, int* ibot) {
+ SkPath::Iter iter(path, true);
+ SkPoint pts[4];
+ SkPath::Verb verb;
+
+ int maxEdges = 0;
+ SkScalar top = SkIntToScalar(SK_MaxS16);
+ SkScalar bot = SkIntToScalar(SK_MinS16);
+
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+ maxEdges += verb_to_max_edges(verb);
+
+ int lastIndex = verb_to_initial_last_index(verb);
+ if (lastIndex > 0) {
+ for (int i = 1; i <= lastIndex; i++) {
+ if (top > pts[i].fY) {
+ top = pts[i].fY;
+ } else if (bot < pts[i].fY) {
+ bot = pts[i].fY;
+ }
+ }
+ } else if (SkPath::kMove_Verb == verb) {
+ if (top > pts[0].fY) {
+ top = pts[0].fY;
+ } else if (bot < pts[0].fY) {
+ bot = pts[0].fY;
+ }
+ }
+ }
+ if (0 == maxEdges) {
+ return 0; // we have only moves+closes
+ }
+
+ SkASSERT(top <= bot);
+ *itop = SkScalarRoundToInt(top);
+ *ibot = SkScalarRoundToInt(bot);
+ return maxEdges;
+}
+
+static bool check_inverse_on_empty_return(SkRegion* dst, const SkPath& path, const SkRegion& clip) {
+ if (path.isInverseFillType()) {
+ return dst->set(clip);
+ } else {
+ return dst->setEmpty();
+ }
+}
+
+bool SkRegion::setPath(const SkPath& path, const SkRegion& clip) {
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+
+ if (clip.isEmpty() || !path.isFinite() || path.isEmpty()) {
+ // This treats non-finite paths as empty as well, so this returns empty or 'clip' if
+ // it's inverse-filled. If clip is also empty, path's fill type doesn't really matter
+ // and this region ends up empty.
+ return check_inverse_on_empty_return(this, path, clip);
+ }
+
+ // Our builder is very fragile, and can't be called with spans/rects out of Y->X order.
+ // To ensure this, we only "fill" clipped to a rect (the clip's bounds), and if the
+ // clip is more complex than that, we just post-intersect the result with the clip.
+ const SkIRect clipBounds = clip.getBounds();
+ if (clip.isComplex()) {
+ if (!this->setPath(path, SkRegion(clipBounds))) {
+ return false;
+ }
+ return this->op(clip, kIntersect_Op);
+ }
+
+ // SkScan::FillPath has limits on the coordinate range of the clipping SkRegion. If it's too
+ // big, tile the clip bounds and union the pieces back together.
+ if (SkScan::PathRequiresTiling(clipBounds)) {
+ static constexpr int kTileSize = 32767 >> 1; // Limit so coords can fit into SkFixed (16.16)
+ const SkIRect pathBounds = path.getBounds().roundOut();
+
+ this->setEmpty();
+
+ // Note: With large integers some intermediate calculations can overflow, but the
+ // end results will still be in integer range. Using int64_t for the intermediate
+ // values will handle this situation.
+ for (int64_t top = clipBounds.fTop; top < clipBounds.fBottom; top += kTileSize) {
+ int64_t bot = std::min(top + kTileSize, (int64_t)clipBounds.fBottom);
+ for (int64_t left = clipBounds.fLeft; left < clipBounds.fRight; left += kTileSize) {
+ int64_t right = std::min(left + kTileSize, (int64_t)clipBounds.fRight);
+
+ SkIRect tileClipBounds = {(int)left, (int)top, (int)right, (int)bot};
+ if (!SkIRect::Intersects(pathBounds, tileClipBounds)) {
+ continue;
+ }
+
+ // Shift coordinates so the top left is (0,0) during scan conversion and then
+ // translate the SkRegion afterwards.
+ tileClipBounds.offset(-left, -top);
+ SkASSERT(!SkScan::PathRequiresTiling(tileClipBounds));
+ SkRegion tile;
+ tile.setPath(path.makeTransform(SkMatrix::Translate(-left, -top)),
+ SkRegion(tileClipBounds));
+ tile.translate(left, top);
+ this->op(tile, kUnion_Op);
+ }
+ }
+ // During tiling we only applied the bounds of the tile, now that we have a full SkRegion,
+ // apply the original clip.
+ return this->op(clip, kIntersect_Op);
+ }
+
+ // compute worst-case rgn-size for the path
+ int pathTop, pathBot;
+ int pathTransitions = count_path_runtype_values(path, &pathTop, &pathBot);
+ if (0 == pathTransitions) {
+ return check_inverse_on_empty_return(this, path, clip);
+ }
+
+ int clipTop, clipBot;
+ int clipTransitions = clip.count_runtype_values(&clipTop, &clipBot);
+
+ int top = std::max(pathTop, clipTop);
+ int bot = std::min(pathBot, clipBot);
+ if (top >= bot) {
+ return check_inverse_on_empty_return(this, path, clip);
+ }
+
+ SkRgnBuilder builder;
+
+ if (!builder.init(bot - top,
+ std::max(pathTransitions, clipTransitions),
+ path.isInverseFillType())) {
+ // can't allocate working space, so return false
+ return this->setEmpty();
+ }
+
+ SkScan::FillPath(path, clip, &builder);
+ builder.done();
+
+ int count = builder.computeRunCount();
+ if (count == 0) {
+ return this->setEmpty();
+ } else if (count == kRectRegionRuns) {
+ builder.copyToRect(&fBounds);
+ this->setRect(fBounds);
+ } else {
+ SkRegion tmp;
+
+ tmp.fRunHead = RunHead::Alloc(count);
+ builder.copyToRgn(tmp.fRunHead->writable_runs());
+ tmp.fRunHead->computeRunBounds(&tmp.fBounds);
+ this->swap(tmp);
+ }
+ SkDEBUGCODE(SkRegionPriv::Validate(*this));
+ return true;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////
+
+struct Edge {
+ enum {
+ kY0Link = 0x01,
+ kY1Link = 0x02,
+
+ kCompleteLink = (kY0Link | kY1Link)
+ };
+
+ SkRegionPriv::RunType fX;
+ SkRegionPriv::RunType fY0, fY1;
+ uint8_t fFlags;
+ Edge* fNext;
+
+ void set(int x, int y0, int y1) {
+ SkASSERT(y0 != y1);
+
+ fX = (SkRegionPriv::RunType)(x);
+ fY0 = (SkRegionPriv::RunType)(y0);
+ fY1 = (SkRegionPriv::RunType)(y1);
+ fFlags = 0;
+ SkDEBUGCODE(fNext = nullptr;)
+ }
+
+ int top() const {
+ return std::min(fY0, fY1);
+ }
+};
+
+static void find_link(Edge* base, Edge* stop) {
+ SkASSERT(base < stop);
+
+ if (base->fFlags == Edge::kCompleteLink) {
+ SkASSERT(base->fNext);
+ return;
+ }
+
+ SkASSERT(base + 1 < stop);
+
+ int y0 = base->fY0;
+ int y1 = base->fY1;
+
+ Edge* e = base;
+ if ((base->fFlags & Edge::kY0Link) == 0) {
+ for (;;) {
+ e += 1;
+ if ((e->fFlags & Edge::kY1Link) == 0 && y0 == e->fY1) {
+ SkASSERT(nullptr == e->fNext);
+ e->fNext = base;
+ e->fFlags = SkToU8(e->fFlags | Edge::kY1Link);
+ break;
+ }
+ }
+ }
+
+ e = base;
+ if ((base->fFlags & Edge::kY1Link) == 0) {
+ for (;;) {
+ e += 1;
+ if ((e->fFlags & Edge::kY0Link) == 0 && y1 == e->fY0) {
+ SkASSERT(nullptr == base->fNext);
+ base->fNext = e;
+ e->fFlags = SkToU8(e->fFlags | Edge::kY0Link);
+ break;
+ }
+ }
+ }
+
+ base->fFlags = Edge::kCompleteLink;
+}
+
+static int extract_path(Edge* edge, Edge* stop, SkPath* path) {
+ while (0 == edge->fFlags) {
+ edge++; // skip over "used" edges
+ }
+
+ SkASSERT(edge < stop);
+
+ Edge* base = edge;
+ Edge* prev = edge;
+ edge = edge->fNext;
+ SkASSERT(edge != base);
+
+ int count = 1;
+ path->moveTo(SkIntToScalar(prev->fX), SkIntToScalar(prev->fY0));
+ prev->fFlags = 0;
+ do {
+ if (prev->fX != edge->fX || prev->fY1 != edge->fY0) { // skip collinear
+ path->lineTo(SkIntToScalar(prev->fX), SkIntToScalar(prev->fY1)); // V
+ path->lineTo(SkIntToScalar(edge->fX), SkIntToScalar(edge->fY0)); // H
+ }
+ prev = edge;
+ edge = edge->fNext;
+ count += 1;
+ prev->fFlags = 0;
+ } while (edge != base);
+ path->lineTo(SkIntToScalar(prev->fX), SkIntToScalar(prev->fY1)); // V
+ path->close();
+ return count;
+}
+
+struct EdgeLT {
+ bool operator()(const Edge& a, const Edge& b) const {
+ return (a.fX == b.fX) ? a.top() < b.top() : a.fX < b.fX;
+ }
+};
+
+bool SkRegion::getBoundaryPath(SkPath* path) const {
+ // path could safely be nullptr if we're empty, but the caller shouldn't
+ // *know* that
+ SkASSERT(path);
+
+ if (this->isEmpty()) {
+ return false;
+ }
+
+ const SkIRect& bounds = this->getBounds();
+
+ if (this->isRect()) {
+ SkRect r;
+ r.set(bounds); // this converts the ints to scalars
+ path->addRect(r);
+ return true;
+ }
+
+ SkRegion::Iterator iter(*this);
+ SkTDArray<Edge> edges;
+
+ for (const SkIRect& r = iter.rect(); !iter.done(); iter.next()) {
+ Edge* edge = edges.append(2);
+ edge[0].set(r.fLeft, r.fBottom, r.fTop);
+ edge[1].set(r.fRight, r.fTop, r.fBottom);
+ }
+
+ int count = edges.size();
+ Edge* start = edges.begin();
+ Edge* stop = start + count;
+ SkTQSort<Edge>(start, stop, EdgeLT());
+
+ Edge* e;
+ for (e = start; e != stop; e++) {
+ find_link(e, stop);
+ }
+
+#ifdef SK_DEBUG
+ for (e = start; e != stop; e++) {
+ SkASSERT(e->fNext != nullptr);
+ SkASSERT(e->fFlags == Edge::kCompleteLink);
+ }
+#endif
+
+ path->incReserve(count << 1);
+ do {
+ SkASSERT(count > 1);
+ count -= extract_path(start, stop, path);
+ } while (count > 0);
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkResourceCache.cpp b/gfx/skia/skia/src/core/SkResourceCache.cpp
new file mode 100644
index 0000000000..2c864f74ae
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkResourceCache.cpp
@@ -0,0 +1,614 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkResourceCache.h"
+
+#include "include/core/SkTraceMemoryDump.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkTo.h"
+#include "include/private/chromium/SkDiscardableMemory.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkMessageBus.h"
+#include "src/core/SkMipmap.h"
+#include "src/core/SkOpts.h"
+
+#include <stddef.h>
+#include <stdlib.h>
+
+using namespace skia_private;
+
+DECLARE_SKMESSAGEBUS_MESSAGE(SkResourceCache::PurgeSharedIDMessage, uint32_t, true)
+
+static inline bool SkShouldPostMessageToBus(
+ const SkResourceCache::PurgeSharedIDMessage&, uint32_t) {
+ // SkResourceCache is typically used as a singleton and we don't label Inboxes so all messages
+ // go to all inboxes.
+ return true;
+}
+
+// This can be defined by the caller's build system
+//#define SK_USE_DISCARDABLE_SCALEDIMAGECACHE
+
+#ifndef SK_DISCARDABLEMEMORY_SCALEDIMAGECACHE_COUNT_LIMIT
+# define SK_DISCARDABLEMEMORY_SCALEDIMAGECACHE_COUNT_LIMIT 1024
+#endif
+
+#ifndef SK_DEFAULT_IMAGE_CACHE_LIMIT
+ #define SK_DEFAULT_IMAGE_CACHE_LIMIT (32 * 1024 * 1024)
+#endif
+
+void SkResourceCache::Key::init(void* nameSpace, uint64_t sharedID, size_t dataSize) {
+ SkASSERT(SkAlign4(dataSize) == dataSize);
+
+ // fCount32 and fHash are not hashed
+ static const int kUnhashedLocal32s = 2; // fCache32 + fHash
+ static const int kSharedIDLocal32s = 2; // fSharedID_lo + fSharedID_hi
+ static const int kHashedLocal32s = kSharedIDLocal32s + (sizeof(fNamespace) >> 2);
+ static const int kLocal32s = kUnhashedLocal32s + kHashedLocal32s;
+
+ static_assert(sizeof(Key) == (kLocal32s << 2), "unaccounted_key_locals");
+ static_assert(sizeof(Key) == offsetof(Key, fNamespace) + sizeof(fNamespace),
+ "namespace_field_must_be_last");
+
+ fCount32 = SkToS32(kLocal32s + (dataSize >> 2));
+ fSharedID_lo = (uint32_t)(sharedID & 0xFFFFFFFF);
+ fSharedID_hi = (uint32_t)(sharedID >> 32);
+ fNamespace = nameSpace;
+ // skip unhashed fields when computing the hash
+ fHash = SkOpts::hash(this->as32() + kUnhashedLocal32s,
+ (fCount32 - kUnhashedLocal32s) << 2);
+}
+
+#include "src/core/SkTHash.h"
+
+namespace {
+ struct HashTraits {
+ static uint32_t Hash(const SkResourceCache::Key& key) { return key.hash(); }
+ static const SkResourceCache::Key& GetKey(const SkResourceCache::Rec* rec) {
+ return rec->getKey();
+ }
+ };
+} // namespace
+
+class SkResourceCache::Hash :
+ public SkTHashTable<SkResourceCache::Rec*, SkResourceCache::Key, HashTraits> {};
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkResourceCache::init() {
+ fHead = nullptr;
+ fTail = nullptr;
+ fHash = new Hash;
+ fTotalBytesUsed = 0;
+ fCount = 0;
+ fSingleAllocationByteLimit = 0;
+
+ // One of these should be explicit set by the caller after we return.
+ fTotalByteLimit = 0;
+ fDiscardableFactory = nullptr;
+}
+
+SkResourceCache::SkResourceCache(DiscardableFactory factory)
+ : fPurgeSharedIDInbox(SK_InvalidUniqueID) {
+ this->init();
+ fDiscardableFactory = factory;
+}
+
+SkResourceCache::SkResourceCache(size_t byteLimit)
+ : fPurgeSharedIDInbox(SK_InvalidUniqueID) {
+ this->init();
+ fTotalByteLimit = byteLimit;
+}
+
+SkResourceCache::~SkResourceCache() {
+ Rec* rec = fHead;
+ while (rec) {
+ Rec* next = rec->fNext;
+ delete rec;
+ rec = next;
+ }
+ delete fHash;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool SkResourceCache::find(const Key& key, FindVisitor visitor, void* context) {
+ this->checkMessages();
+
+ if (auto found = fHash->find(key)) {
+ Rec* rec = *found;
+ if (visitor(*rec, context)) {
+ this->moveToHead(rec); // for our LRU
+ return true;
+ } else {
+ this->remove(rec); // stale
+ return false;
+ }
+ }
+ return false;
+}
+
+static void make_size_str(size_t size, SkString* str) {
+ const char suffix[] = { 'b', 'k', 'm', 'g', 't', 0 };
+ int i = 0;
+ while (suffix[i] && (size > 1024)) {
+ i += 1;
+ size >>= 10;
+ }
+ str->printf("%zu%c", size, suffix[i]);
+}
+
+static bool gDumpCacheTransactions;
+
+void SkResourceCache::add(Rec* rec, void* payload) {
+ this->checkMessages();
+
+ SkASSERT(rec);
+ // See if we already have this key (racy inserts, etc.)
+ if (Rec** preexisting = fHash->find(rec->getKey())) {
+ Rec* prev = *preexisting;
+ if (prev->canBePurged()) {
+ // if it can be purged, the install may fail, so we have to remove it
+ this->remove(prev);
+ } else {
+ // if it cannot be purged, we reuse it and delete the new one
+ prev->postAddInstall(payload);
+ delete rec;
+ return;
+ }
+ }
+
+ this->addToHead(rec);
+ fHash->set(rec);
+ rec->postAddInstall(payload);
+
+ if (gDumpCacheTransactions) {
+ SkString bytesStr, totalStr;
+ make_size_str(rec->bytesUsed(), &bytesStr);
+ make_size_str(fTotalBytesUsed, &totalStr);
+ SkDebugf("RC: add %5s %12p key %08x -- total %5s, count %d\n",
+ bytesStr.c_str(), rec, rec->getHash(), totalStr.c_str(), fCount);
+ }
+
+ // since the new rec may push us over-budget, we perform a purge check now
+ this->purgeAsNeeded();
+}
+
+void SkResourceCache::remove(Rec* rec) {
+ SkASSERT(rec->canBePurged());
+ size_t used = rec->bytesUsed();
+ SkASSERT(used <= fTotalBytesUsed);
+
+ this->release(rec);
+ fHash->remove(rec->getKey());
+
+ fTotalBytesUsed -= used;
+ fCount -= 1;
+
+ //SkDebugf("-RC count [%3d] bytes %d\n", fCount, fTotalBytesUsed);
+
+ if (gDumpCacheTransactions) {
+ SkString bytesStr, totalStr;
+ make_size_str(used, &bytesStr);
+ make_size_str(fTotalBytesUsed, &totalStr);
+ SkDebugf("RC: remove %5s %12p key %08x -- total %5s, count %d\n",
+ bytesStr.c_str(), rec, rec->getHash(), totalStr.c_str(), fCount);
+ }
+
+ delete rec;
+}
+
+void SkResourceCache::purgeAsNeeded(bool forcePurge) {
+ size_t byteLimit;
+ int countLimit;
+
+ if (fDiscardableFactory) {
+ countLimit = SK_DISCARDABLEMEMORY_SCALEDIMAGECACHE_COUNT_LIMIT;
+ byteLimit = UINT32_MAX; // no limit based on bytes
+ } else {
+ countLimit = SK_MaxS32; // no limit based on count
+ byteLimit = fTotalByteLimit;
+ }
+
+ Rec* rec = fTail;
+ while (rec) {
+ if (!forcePurge && fTotalBytesUsed < byteLimit && fCount < countLimit) {
+ break;
+ }
+
+ Rec* prev = rec->fPrev;
+ if (rec->canBePurged()) {
+ this->remove(rec);
+ }
+ rec = prev;
+ }
+}
+
+//#define SK_TRACK_PURGE_SHAREDID_HITRATE
+
+#ifdef SK_TRACK_PURGE_SHAREDID_HITRATE
+static int gPurgeCallCounter;
+static int gPurgeHitCounter;
+#endif
+
+void SkResourceCache::purgeSharedID(uint64_t sharedID) {
+ if (0 == sharedID) {
+ return;
+ }
+
+#ifdef SK_TRACK_PURGE_SHAREDID_HITRATE
+ gPurgeCallCounter += 1;
+ bool found = false;
+#endif
+ // go backwards, just like purgeAsNeeded, just to make the code similar.
+ // could iterate either direction and still be correct.
+ Rec* rec = fTail;
+ while (rec) {
+ Rec* prev = rec->fPrev;
+ if (rec->getKey().getSharedID() == sharedID) {
+ // even though the "src" is now dead, caches could still be in-flight, so
+ // we have to check if it can be removed.
+ if (rec->canBePurged()) {
+ this->remove(rec);
+ }
+#ifdef SK_TRACK_PURGE_SHAREDID_HITRATE
+ found = true;
+#endif
+ }
+ rec = prev;
+ }
+
+#ifdef SK_TRACK_PURGE_SHAREDID_HITRATE
+ if (found) {
+ gPurgeHitCounter += 1;
+ }
+
+ SkDebugf("PurgeShared calls=%d hits=%d rate=%g\n", gPurgeCallCounter, gPurgeHitCounter,
+ gPurgeHitCounter * 100.0 / gPurgeCallCounter);
+#endif
+}
+
+void SkResourceCache::visitAll(Visitor visitor, void* context) {
+ // go backwards, just like purgeAsNeeded, just to make the code similar.
+ // could iterate either direction and still be correct.
+ Rec* rec = fTail;
+ while (rec) {
+ visitor(*rec, context);
+ rec = rec->fPrev;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+size_t SkResourceCache::setTotalByteLimit(size_t newLimit) {
+ size_t prevLimit = fTotalByteLimit;
+ fTotalByteLimit = newLimit;
+ if (newLimit < prevLimit) {
+ this->purgeAsNeeded();
+ }
+ return prevLimit;
+}
+
+SkCachedData* SkResourceCache::newCachedData(size_t bytes) {
+ this->checkMessages();
+
+ if (fDiscardableFactory) {
+ SkDiscardableMemory* dm = fDiscardableFactory(bytes);
+ return dm ? new SkCachedData(bytes, dm) : nullptr;
+ } else {
+ return new SkCachedData(sk_malloc_throw(bytes), bytes);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkResourceCache::release(Rec* rec) {
+ Rec* prev = rec->fPrev;
+ Rec* next = rec->fNext;
+
+ if (!prev) {
+ SkASSERT(fHead == rec);
+ fHead = next;
+ } else {
+ prev->fNext = next;
+ }
+
+ if (!next) {
+ fTail = prev;
+ } else {
+ next->fPrev = prev;
+ }
+
+ rec->fNext = rec->fPrev = nullptr;
+}
+
+void SkResourceCache::moveToHead(Rec* rec) {
+ if (fHead == rec) {
+ return;
+ }
+
+ SkASSERT(fHead);
+ SkASSERT(fTail);
+
+ this->validate();
+
+ this->release(rec);
+
+ fHead->fPrev = rec;
+ rec->fNext = fHead;
+ fHead = rec;
+
+ this->validate();
+}
+
+void SkResourceCache::addToHead(Rec* rec) {
+ this->validate();
+
+ rec->fPrev = nullptr;
+ rec->fNext = fHead;
+ if (fHead) {
+ fHead->fPrev = rec;
+ }
+ fHead = rec;
+ if (!fTail) {
+ fTail = rec;
+ }
+ fTotalBytesUsed += rec->bytesUsed();
+ fCount += 1;
+
+ this->validate();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+void SkResourceCache::validate() const {
+ if (nullptr == fHead) {
+ SkASSERT(nullptr == fTail);
+ SkASSERT(0 == fTotalBytesUsed);
+ return;
+ }
+
+ if (fHead == fTail) {
+ SkASSERT(nullptr == fHead->fPrev);
+ SkASSERT(nullptr == fHead->fNext);
+ SkASSERT(fHead->bytesUsed() == fTotalBytesUsed);
+ return;
+ }
+
+ SkASSERT(nullptr == fHead->fPrev);
+ SkASSERT(fHead->fNext);
+ SkASSERT(nullptr == fTail->fNext);
+ SkASSERT(fTail->fPrev);
+
+ size_t used = 0;
+ int count = 0;
+ const Rec* rec = fHead;
+ while (rec) {
+ count += 1;
+ used += rec->bytesUsed();
+ SkASSERT(used <= fTotalBytesUsed);
+ rec = rec->fNext;
+ }
+ SkASSERT(fCount == count);
+
+ rec = fTail;
+ while (rec) {
+ SkASSERT(count > 0);
+ count -= 1;
+ SkASSERT(used >= rec->bytesUsed());
+ used -= rec->bytesUsed();
+ rec = rec->fPrev;
+ }
+
+ SkASSERT(0 == count);
+ SkASSERT(0 == used);
+}
+#endif
+
+void SkResourceCache::dump() const {
+ this->validate();
+
+ SkDebugf("SkResourceCache: count=%d bytes=%zu %s\n",
+ fCount, fTotalBytesUsed, fDiscardableFactory ? "discardable" : "malloc");
+}
+
+size_t SkResourceCache::setSingleAllocationByteLimit(size_t newLimit) {
+ size_t oldLimit = fSingleAllocationByteLimit;
+ fSingleAllocationByteLimit = newLimit;
+ return oldLimit;
+}
+
+size_t SkResourceCache::getSingleAllocationByteLimit() const {
+ return fSingleAllocationByteLimit;
+}
+
+size_t SkResourceCache::getEffectiveSingleAllocationByteLimit() const {
+ // fSingleAllocationByteLimit == 0 means the caller is asking for our default
+ size_t limit = fSingleAllocationByteLimit;
+
+ // if we're not discardable (i.e. we are fixed-budget) then cap the single-limit
+ // to our budget.
+ if (nullptr == fDiscardableFactory) {
+ if (0 == limit) {
+ limit = fTotalByteLimit;
+ } else {
+ limit = std::min(limit, fTotalByteLimit);
+ }
+ }
+ return limit;
+}
+
+void SkResourceCache::checkMessages() {
+ TArray<PurgeSharedIDMessage> msgs;
+ fPurgeSharedIDInbox.poll(&msgs);
+ for (int i = 0; i < msgs.size(); ++i) {
+ this->purgeSharedID(msgs[i].fSharedID);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static SkResourceCache* gResourceCache = nullptr;
+static SkMutex& resource_cache_mutex() {
+ static SkMutex& mutex = *(new SkMutex);
+ return mutex;
+}
+
+/** Must hold resource_cache_mutex() when calling. */
+static SkResourceCache* get_cache() {
+ // resource_cache_mutex() is always held when this is called, so we don't need to be fancy in here.
+ resource_cache_mutex().assertHeld();
+ if (nullptr == gResourceCache) {
+#ifdef SK_USE_DISCARDABLE_SCALEDIMAGECACHE
+ gResourceCache = new SkResourceCache(SkDiscardableMemory::Create);
+#else
+ gResourceCache = new SkResourceCache(SK_DEFAULT_IMAGE_CACHE_LIMIT);
+#endif
+ }
+ return gResourceCache;
+}
+
+size_t SkResourceCache::GetTotalBytesUsed() {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ return get_cache()->getTotalBytesUsed();
+}
+
+size_t SkResourceCache::GetTotalByteLimit() {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ return get_cache()->getTotalByteLimit();
+}
+
+size_t SkResourceCache::SetTotalByteLimit(size_t newLimit) {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ return get_cache()->setTotalByteLimit(newLimit);
+}
+
+SkResourceCache::DiscardableFactory SkResourceCache::GetDiscardableFactory() {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ return get_cache()->discardableFactory();
+}
+
+SkCachedData* SkResourceCache::NewCachedData(size_t bytes) {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ return get_cache()->newCachedData(bytes);
+}
+
+void SkResourceCache::Dump() {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ get_cache()->dump();
+}
+
+size_t SkResourceCache::SetSingleAllocationByteLimit(size_t size) {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ return get_cache()->setSingleAllocationByteLimit(size);
+}
+
+size_t SkResourceCache::GetSingleAllocationByteLimit() {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ return get_cache()->getSingleAllocationByteLimit();
+}
+
+size_t SkResourceCache::GetEffectiveSingleAllocationByteLimit() {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ return get_cache()->getEffectiveSingleAllocationByteLimit();
+}
+
+void SkResourceCache::PurgeAll() {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ return get_cache()->purgeAll();
+}
+
+void SkResourceCache::CheckMessages() {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ return get_cache()->checkMessages();
+}
+
+bool SkResourceCache::Find(const Key& key, FindVisitor visitor, void* context) {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ return get_cache()->find(key, visitor, context);
+}
+
+void SkResourceCache::Add(Rec* rec, void* payload) {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ get_cache()->add(rec, payload);
+}
+
+void SkResourceCache::VisitAll(Visitor visitor, void* context) {
+ SkAutoMutexExclusive am(resource_cache_mutex());
+ get_cache()->visitAll(visitor, context);
+}
+
+void SkResourceCache::PostPurgeSharedID(uint64_t sharedID) {
+ if (sharedID) {
+ SkMessageBus<PurgeSharedIDMessage, uint32_t>::Post(PurgeSharedIDMessage(sharedID));
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "include/core/SkGraphics.h"
+#include "include/core/SkImageFilter.h"
+
+size_t SkGraphics::GetResourceCacheTotalBytesUsed() {
+ return SkResourceCache::GetTotalBytesUsed();
+}
+
+size_t SkGraphics::GetResourceCacheTotalByteLimit() {
+ return SkResourceCache::GetTotalByteLimit();
+}
+
+size_t SkGraphics::SetResourceCacheTotalByteLimit(size_t newLimit) {
+ return SkResourceCache::SetTotalByteLimit(newLimit);
+}
+
+size_t SkGraphics::GetResourceCacheSingleAllocationByteLimit() {
+ return SkResourceCache::GetSingleAllocationByteLimit();
+}
+
+size_t SkGraphics::SetResourceCacheSingleAllocationByteLimit(size_t newLimit) {
+ return SkResourceCache::SetSingleAllocationByteLimit(newLimit);
+}
+
+void SkGraphics::PurgeResourceCache() {
+ SkImageFilter_Base::PurgeCache();
+ return SkResourceCache::PurgeAll();
+}
+
+/////////////
+
+static void dump_visitor(const SkResourceCache::Rec& rec, void*) {
+ SkDebugf("RC: %12s bytes %9zu discardable %p\n",
+ rec.getCategory(), rec.bytesUsed(), rec.diagnostic_only_getDiscardable());
+}
+
+void SkResourceCache::TestDumpMemoryStatistics() {
+ VisitAll(dump_visitor, nullptr);
+}
+
+static void sk_trace_dump_visitor(const SkResourceCache::Rec& rec, void* context) {
+ SkTraceMemoryDump* dump = static_cast<SkTraceMemoryDump*>(context);
+ SkString dumpName = SkStringPrintf("skia/sk_resource_cache/%s_%p", rec.getCategory(), &rec);
+ SkDiscardableMemory* discardable = rec.diagnostic_only_getDiscardable();
+ if (discardable) {
+ dump->setDiscardableMemoryBacking(dumpName.c_str(), *discardable);
+
+ // The discardable memory size will be calculated by dumper, but we also dump what we think
+ // the size of object in memory is irrespective of whether object is live or dead.
+ dump->dumpNumericValue(dumpName.c_str(), "discardable_size", "bytes", rec.bytesUsed());
+ } else {
+ dump->dumpNumericValue(dumpName.c_str(), "size", "bytes", rec.bytesUsed());
+ dump->setMemoryBacking(dumpName.c_str(), "malloc", nullptr);
+ }
+}
+
+void SkResourceCache::DumpMemoryStatistics(SkTraceMemoryDump* dump) {
+ // Since resource could be backed by malloc or discardable, the cache always dumps detailed
+ // stats to be accurate.
+ VisitAll(sk_trace_dump_visitor, dump);
+}
diff --git a/gfx/skia/skia/src/core/SkResourceCache.h b/gfx/skia/skia/src/core/SkResourceCache.h
new file mode 100644
index 0000000000..3477b295b3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkResourceCache.h
@@ -0,0 +1,293 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkResourceCache_DEFINED
+#define SkResourceCache_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/private/base/SkTDArray.h"
+#include "src/core/SkMessageBus.h"
+
+class SkCachedData;
+class SkDiscardableMemory;
+class SkTraceMemoryDump;
+
+/**
+ * Cache object for bitmaps (with possible scale in X Y as part of the key).
+ *
+ * Multiple caches can be instantiated, but each instance is not implicitly
+ * thread-safe, so if a given instance is to be shared across threads, the
+ * caller must manage the access itself (e.g. via a mutex).
+ *
+ * As a convenience, a global instance is also defined, which can be safely
+ * access across threads via the static methods (e.g. FindAndLock, etc.).
+ */
+class SkResourceCache {
+public:
+ struct Key {
+ /** Key subclasses must call this after their own fields and data are initialized.
+ * All fields and data must be tightly packed.
+ * @param nameSpace must be unique per Key subclass.
+ * @param sharedID == 0 means ignore this field, does not support group purging.
+ * @param dataSize is size of fields and data of the subclass, must be a multiple of 4.
+ */
+ void init(void* nameSpace, uint64_t sharedID, size_t dataSize);
+
+ /** Returns the size of this key. */
+ size_t size() const {
+ return fCount32 << 2;
+ }
+
+ void* getNamespace() const { return fNamespace; }
+ uint64_t getSharedID() const { return ((uint64_t)fSharedID_hi << 32) | fSharedID_lo; }
+
+ // This is only valid after having called init().
+ uint32_t hash() const { return fHash; }
+
+ bool operator==(const Key& other) const {
+ const uint32_t* a = this->as32();
+ const uint32_t* b = other.as32();
+ for (int i = 0; i < fCount32; ++i) { // (This checks fCount == other.fCount first.)
+ if (a[i] != b[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ private:
+ int32_t fCount32; // local + user contents count32
+ uint32_t fHash;
+ // split uint64_t into hi and lo so we don't force ourselves to pad on 32bit machines.
+ uint32_t fSharedID_lo;
+ uint32_t fSharedID_hi;
+ void* fNamespace; // A unique namespace tag. This is hashed.
+ /* uint32_t fContents32[] */
+
+ const uint32_t* as32() const { return (const uint32_t*)this; }
+ };
+
+ struct Rec {
+ typedef SkResourceCache::Key Key;
+
+ Rec() {}
+ virtual ~Rec() {}
+
+ uint32_t getHash() const { return this->getKey().hash(); }
+
+ virtual const Key& getKey() const = 0;
+ virtual size_t bytesUsed() const = 0;
+
+ // Called if the cache needs to purge/remove/delete the Rec. Default returns true.
+ // Subclass may return false if there are outstanding references to it (e.g. bitmaps).
+ // Will only be deleted/removed-from-the-cache when this returns true.
+ virtual bool canBePurged() { return true; }
+
+ // A rec is first created/initialized, and then added to the cache. As part of the add(),
+ // the cache will callback into the rec with postAddInstall, passing in whatever payload
+ // was passed to add/Add.
+ //
+ // This late-install callback exists because the process of add-ing might end up deleting
+ // the new rec (if an existing rec in the cache has the same key and cannot be purged).
+ // If the new rec will be deleted during add, the pre-existing one (with the same key)
+ // will have postAddInstall() called on it instead, so that either way an "install" will
+ // happen during the add.
+ virtual void postAddInstall(void*) {}
+
+ // for memory usage diagnostics
+ virtual const char* getCategory() const = 0;
+ virtual SkDiscardableMemory* diagnostic_only_getDiscardable() const { return nullptr; }
+
+ private:
+ Rec* fNext;
+ Rec* fPrev;
+
+ friend class SkResourceCache;
+ };
+
+ // Used with SkMessageBus
+ struct PurgeSharedIDMessage {
+ PurgeSharedIDMessage(uint64_t sharedID) : fSharedID(sharedID) {}
+ uint64_t fSharedID;
+ };
+
+ typedef const Rec* ID;
+
+ /**
+ * Callback function for find(). If called, the cache will have found a match for the
+ * specified Key, and will pass in the corresponding Rec, along with a caller-specified
+ * context. The function can read the data in Rec, and copy whatever it likes into context
+ * (casting context to whatever it really is).
+ *
+ * The return value determines what the cache will do with the Rec. If the function returns
+ * true, then the Rec is considered "valid". If false is returned, the Rec will be considered
+ * "stale" and will be purged from the cache.
+ */
+ typedef bool (*FindVisitor)(const Rec&, void* context);
+
+ /**
+ * Returns a locked/pinned SkDiscardableMemory instance for the specified
+ * number of bytes, or nullptr on failure.
+ */
+ typedef SkDiscardableMemory* (*DiscardableFactory)(size_t bytes);
+
+ /*
+ * The following static methods are thread-safe wrappers around a global
+ * instance of this cache.
+ */
+
+ /**
+ * Returns true if the visitor was called on a matching Key, and the visitor returned true.
+ *
+ * Find() will search the cache for the specified Key. If no match is found, return false and
+ * do not call the FindVisitor. If a match is found, return whatever the visitor returns.
+ * Its return value is interpreted to mean:
+ * true : Rec is valid
+ * false : Rec is "stale" -- the cache will purge it.
+ */
+ static bool Find(const Key& key, FindVisitor, void* context);
+ static void Add(Rec*, void* payload = nullptr);
+
+ typedef void (*Visitor)(const Rec&, void* context);
+ // Call the visitor for every Rec in the cache.
+ static void VisitAll(Visitor, void* context);
+
+ static size_t GetTotalBytesUsed();
+ static size_t GetTotalByteLimit();
+ static size_t SetTotalByteLimit(size_t newLimit);
+
+ static size_t SetSingleAllocationByteLimit(size_t);
+ static size_t GetSingleAllocationByteLimit();
+ static size_t GetEffectiveSingleAllocationByteLimit();
+
+ static void PurgeAll();
+ static void CheckMessages();
+
+ static void TestDumpMemoryStatistics();
+
+ /** Dump memory usage statistics of every Rec in the cache using the
+ SkTraceMemoryDump interface.
+ */
+ static void DumpMemoryStatistics(SkTraceMemoryDump* dump);
+
+ /**
+ * Returns the DiscardableFactory used by the global cache, or nullptr.
+ */
+ static DiscardableFactory GetDiscardableFactory();
+
+ static SkCachedData* NewCachedData(size_t bytes);
+
+ static void PostPurgeSharedID(uint64_t sharedID);
+
+ /**
+ * Call SkDebugf() with diagnostic information about the state of the cache
+ */
+ static void Dump();
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ /**
+ * Construct the cache to call DiscardableFactory when it
+ * allocates memory for the pixels. In this mode, the cache has
+ * not explicit budget, and so methods like getTotalBytesUsed()
+ * and getTotalByteLimit() will return 0, and setTotalByteLimit
+ * will ignore its argument and return 0.
+ */
+ SkResourceCache(DiscardableFactory);
+
+ /**
+ * Construct the cache, allocating memory with malloc, and respect the
+ * byteLimit, purging automatically when a new image is added to the cache
+ * that pushes the total bytesUsed over the limit. Note: The limit can be
+ * changed at runtime with setTotalByteLimit.
+ */
+ explicit SkResourceCache(size_t byteLimit);
+ ~SkResourceCache();
+
+ /**
+ * Returns true if the visitor was called on a matching Key, and the visitor returned true.
+ *
+ * find() will search the cache for the specified Key. If no match is found, return false and
+ * do not call the FindVisitor. If a match is found, return whatever the visitor returns.
+ * Its return value is interpreted to mean:
+ * true : Rec is valid
+ * false : Rec is "stale" -- the cache will purge it.
+ */
+ bool find(const Key&, FindVisitor, void* context);
+ void add(Rec*, void* payload = nullptr);
+ void visitAll(Visitor, void* context);
+
+ size_t getTotalBytesUsed() const { return fTotalBytesUsed; }
+ size_t getTotalByteLimit() const { return fTotalByteLimit; }
+
+ /**
+ * This is respected by SkBitmapProcState::possiblyScaleImage.
+ * 0 is no maximum at all; this is the default.
+ * setSingleAllocationByteLimit() returns the previous value.
+ */
+ size_t setSingleAllocationByteLimit(size_t maximumAllocationSize);
+ size_t getSingleAllocationByteLimit() const;
+ // returns the logical single allocation size (pinning against the budget when the cache
+ // is not backed by discardable memory.
+ size_t getEffectiveSingleAllocationByteLimit() const;
+
+ /**
+ * Set the maximum number of bytes available to this cache. If the current
+ * cache exceeds this new value, it will be purged to try to fit within
+ * this new limit.
+ */
+ size_t setTotalByteLimit(size_t newLimit);
+
+ void purgeSharedID(uint64_t sharedID);
+
+ void purgeAll() {
+ this->purgeAsNeeded(true);
+ }
+
+ DiscardableFactory discardableFactory() const { return fDiscardableFactory; }
+
+ SkCachedData* newCachedData(size_t bytes);
+
+ /**
+ * Call SkDebugf() with diagnostic information about the state of the cache
+ */
+ void dump() const;
+
+private:
+ Rec* fHead;
+ Rec* fTail;
+
+ class Hash;
+ Hash* fHash;
+
+ DiscardableFactory fDiscardableFactory;
+
+ size_t fTotalBytesUsed;
+ size_t fTotalByteLimit;
+ size_t fSingleAllocationByteLimit;
+ int fCount;
+
+ SkMessageBus<PurgeSharedIDMessage, uint32_t>::Inbox fPurgeSharedIDInbox;
+
+ void checkMessages();
+ void purgeAsNeeded(bool forcePurge = false);
+
+ // linklist management
+ void moveToHead(Rec*);
+ void addToHead(Rec*);
+ void release(Rec*);
+ void remove(Rec*);
+
+ void init(); // called by constructors
+
+#ifdef SK_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+};
+#endif
diff --git a/gfx/skia/skia/src/core/SkRuntimeEffect.cpp b/gfx/skia/skia/src/core/SkRuntimeEffect.cpp
new file mode 100644
index 0000000000..740937a560
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRuntimeEffect.cpp
@@ -0,0 +1,2016 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkRuntimeEffect.h"
+
+#include "include/core/SkCapabilities.h"
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkData.h"
+#include "include/core/SkSurface.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkOnce.h"
+#include "include/sksl/DSLCore.h"
+#include "src/base/SkUtils.h"
+#include "src/core/SkBlenderBase.h"
+#include "src/core/SkCanvasPriv.h"
+#include "src/core/SkColorFilterBase.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkLRUCache.h"
+#include "src/core/SkMatrixProvider.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkRuntimeEffectPriv.h"
+#include "src/core/SkVM.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/SkLocalMatrixShader.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/SkSLUtil.h"
+#include "src/sksl/analysis/SkSLProgramUsage.h"
+#include "src/sksl/codegen/SkSLRasterPipelineBuilder.h"
+#include "src/sksl/codegen/SkSLVMCodeGenerator.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/tracing/SkVMDebugTrace.h"
+
+#if defined(SK_GANESH)
+#include "include/gpu/GrRecordingContext.h"
+#include "src/gpu/SkBackingFit.h"
+#include "src/gpu/ganesh/GrCaps.h"
+#include "src/gpu/ganesh/GrColorInfo.h"
+#include "src/gpu/ganesh/GrFPArgs.h"
+#include "src/gpu/ganesh/GrImageInfo.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/SurfaceFillContext.h"
+#include "src/gpu/ganesh/effects/GrMatrixEffect.h"
+#include "src/gpu/ganesh/effects/GrSkSLFP.h"
+#include "src/image/SkImage_Gpu.h"
+#endif
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/KeyContext.h"
+#include "src/gpu/graphite/KeyHelpers.h"
+#include "src/gpu/graphite/PaintParamsKey.h"
+#endif
+
+// This flag can be enabled to use the new Raster Pipeline code generator for SkSL.
+//#define SK_ENABLE_SKSL_IN_RASTER_PIPELINE
+
+#ifdef SK_ENABLE_SKSL_IN_RASTER_PIPELINE
+#include "src/core/SkStreamPriv.h"
+#include "src/sksl/codegen/SkSLRasterPipelineCodeGenerator.h"
+#include "src/sksl/tracing/SkRPDebugTrace.h"
+constexpr bool kRPEnableLiveTrace = false;
+#endif
+
+#include <algorithm>
+
+using namespace skia_private;
+
+#if defined(SK_BUILD_FOR_DEBUGGER)
+ #define SK_LENIENT_SKSL_DESERIALIZATION 1
+#else
+ #define SK_LENIENT_SKSL_DESERIALIZATION 0
+#endif
+
+#ifdef SK_ENABLE_SKSL
+
+using ChildType = SkRuntimeEffect::ChildType;
+
+static bool init_uniform_type(const SkSL::Context& ctx,
+ const SkSL::Type* type,
+ SkRuntimeEffect::Uniform* v) {
+ using Type = SkRuntimeEffect::Uniform::Type;
+ if (type->matches(*ctx.fTypes.fFloat)) { v->type = Type::kFloat; return true; }
+ if (type->matches(*ctx.fTypes.fHalf)) { v->type = Type::kFloat; return true; }
+ if (type->matches(*ctx.fTypes.fFloat2)) { v->type = Type::kFloat2; return true; }
+ if (type->matches(*ctx.fTypes.fHalf2)) { v->type = Type::kFloat2; return true; }
+ if (type->matches(*ctx.fTypes.fFloat3)) { v->type = Type::kFloat3; return true; }
+ if (type->matches(*ctx.fTypes.fHalf3)) { v->type = Type::kFloat3; return true; }
+ if (type->matches(*ctx.fTypes.fFloat4)) { v->type = Type::kFloat4; return true; }
+ if (type->matches(*ctx.fTypes.fHalf4)) { v->type = Type::kFloat4; return true; }
+ if (type->matches(*ctx.fTypes.fFloat2x2)) { v->type = Type::kFloat2x2; return true; }
+ if (type->matches(*ctx.fTypes.fHalf2x2)) { v->type = Type::kFloat2x2; return true; }
+ if (type->matches(*ctx.fTypes.fFloat3x3)) { v->type = Type::kFloat3x3; return true; }
+ if (type->matches(*ctx.fTypes.fHalf3x3)) { v->type = Type::kFloat3x3; return true; }
+ if (type->matches(*ctx.fTypes.fFloat4x4)) { v->type = Type::kFloat4x4; return true; }
+ if (type->matches(*ctx.fTypes.fHalf4x4)) { v->type = Type::kFloat4x4; return true; }
+
+ if (type->matches(*ctx.fTypes.fInt)) { v->type = Type::kInt; return true; }
+ if (type->matches(*ctx.fTypes.fInt2)) { v->type = Type::kInt2; return true; }
+ if (type->matches(*ctx.fTypes.fInt3)) { v->type = Type::kInt3; return true; }
+ if (type->matches(*ctx.fTypes.fInt4)) { v->type = Type::kInt4; return true; }
+
+ return false;
+}
+
+SkRuntimeEffect::Uniform SkRuntimeEffectPriv::VarAsUniform(const SkSL::Variable& var,
+ const SkSL::Context& context,
+ size_t* offset) {
+ using Uniform = SkRuntimeEffect::Uniform;
+ SkASSERT(var.modifiers().fFlags & SkSL::Modifiers::kUniform_Flag);
+ Uniform uni;
+ uni.name = var.name();
+ uni.flags = 0;
+ uni.count = 1;
+
+ const SkSL::Type* type = &var.type();
+ if (type->isArray()) {
+ uni.flags |= Uniform::kArray_Flag;
+ uni.count = type->columns();
+ type = &type->componentType();
+ }
+
+ if (type->hasPrecision() && !type->highPrecision()) {
+ uni.flags |= Uniform::kHalfPrecision_Flag;
+ }
+
+ SkAssertResult(init_uniform_type(context, type, &uni));
+ if (var.modifiers().fLayout.fFlags & SkSL::Layout::Flag::kColor_Flag) {
+ uni.flags |= Uniform::kColor_Flag;
+ }
+
+ uni.offset = *offset;
+ *offset += uni.sizeInBytes();
+ SkASSERT(SkIsAlign4(*offset));
+ return uni;
+}
+
+sk_sp<const SkData> SkRuntimeEffectPriv::TransformUniforms(
+ SkSpan<const SkRuntimeEffect::Uniform> uniforms,
+ sk_sp<const SkData> originalData,
+ const SkColorSpace* dstCS) {
+ SkColorSpaceXformSteps steps(sk_srgb_singleton(), kUnpremul_SkAlphaType,
+ dstCS, kUnpremul_SkAlphaType);
+ return TransformUniforms(uniforms, std::move(originalData), steps);
+}
+
+sk_sp<const SkData> SkRuntimeEffectPriv::TransformUniforms(
+ SkSpan<const SkRuntimeEffect::Uniform> uniforms,
+ sk_sp<const SkData> originalData,
+ const SkColorSpaceXformSteps& steps) {
+ using Flags = SkRuntimeEffect::Uniform::Flags;
+ using Type = SkRuntimeEffect::Uniform::Type;
+
+ sk_sp<SkData> data = nullptr;
+ auto writableData = [&]() {
+ if (!data) {
+ data = SkData::MakeWithCopy(originalData->data(), originalData->size());
+ }
+ return data->writable_data();
+ };
+
+ for (const auto& u : uniforms) {
+ if (u.flags & Flags::kColor_Flag) {
+ SkASSERT(u.type == Type::kFloat3 || u.type == Type::kFloat4);
+ if (steps.flags.mask()) {
+ float* color = SkTAddOffset<float>(writableData(), u.offset);
+ if (u.type == Type::kFloat4) {
+ // RGBA, easy case
+ for (int i = 0; i < u.count; ++i) {
+ steps.apply(color);
+ color += 4;
+ }
+ } else {
+ // RGB, need to pad out to include alpha. Technically, this isn't necessary,
+ // because steps shouldn't include unpremul or premul, and thus shouldn't
+ // read or write the fourth element. But let's be safe.
+ float rgba[4];
+ for (int i = 0; i < u.count; ++i) {
+ memcpy(rgba, color, 3 * sizeof(float));
+ rgba[3] = 1.0f;
+ steps.apply(rgba);
+ memcpy(color, rgba, 3 * sizeof(float));
+ color += 3;
+ }
+ }
+ }
+ }
+ }
+ return data ? data : originalData;
+}
+
+const SkSL::RP::Program* SkRuntimeEffect::getRPProgram() const {
+ // Lazily compile the program the first time `getRPProgram` is called.
+ // By using an SkOnce, we avoid thread hazards and behave in a conceptually const way, but we
+ // can avoid the cost of invoking the RP code generator until it's actually needed.
+ fCompileRPProgramOnce([&] {
+#ifdef SK_ENABLE_SKSL_IN_RASTER_PIPELINE
+ SkSL::SkRPDebugTrace debugTrace;
+ const_cast<SkRuntimeEffect*>(this)->fRPProgram =
+ MakeRasterPipelineProgram(*fBaseProgram,
+ fMain,
+ kRPEnableLiveTrace ? &debugTrace : nullptr);
+ if (kRPEnableLiveTrace) {
+ if (fRPProgram) {
+ SkDebugf("-----\n\n");
+ SkDebugfStream stream;
+ fRPProgram->dump(&stream);
+ SkDebugf("\n-----\n\n");
+ } else {
+ SkDebugf("----- RP unsupported -----\n\n");
+ }
+ }
+#endif
+ });
+
+ return fRPProgram.get();
+}
+
+[[maybe_unused]] static SkSpan<const float> uniforms_as_span(
+ SkSpan<const SkRuntimeEffect::Uniform> uniforms,
+ sk_sp<const SkData> originalData,
+ const SkColorSpace* destColorSpace,
+ SkArenaAlloc* alloc) {
+ // Transform the uniforms into the destination colorspace.
+ sk_sp<const SkData> transformedData = SkRuntimeEffectPriv::TransformUniforms(uniforms,
+ originalData,
+ destColorSpace);
+ // If we get the original uniforms back as-is, it's safe to return a pointer into existing data.
+ if (originalData == transformedData) {
+ return SkSpan{static_cast<const float*>(originalData->data()),
+ originalData->size() / sizeof(float)};
+ }
+ // The transformed uniform data will go out of scope when this function returns, so we must copy
+ // it directly into the alloc.
+ int numBytes = transformedData->size();
+ int numFloats = numBytes / sizeof(float);
+ float* uniformsInAlloc = alloc->makeArrayDefault<float>(numFloats);
+ memcpy(uniformsInAlloc, transformedData->data(), numBytes);
+ return SkSpan{uniformsInAlloc, numFloats};
+}
+
+class RuntimeEffectRPCallbacks : public SkSL::RP::Callbacks {
+public:
+ RuntimeEffectRPCallbacks(const SkStageRec& s,
+ const SkShaderBase::MatrixRec& m,
+ SkSpan<const SkRuntimeEffect::ChildPtr> c,
+ SkSpan<const SkSL::SampleUsage> u)
+ : fStage(s), fMatrix(m), fChildren(c), fSampleUsages(u) {}
+
+ bool appendShader(int index) override {
+ if (SkShader* shader = fChildren[index].shader()) {
+ if (fSampleUsages[index].isPassThrough()) {
+ // Given a passthrough sample, the total-matrix is still as valid as before.
+ return as_SB(shader)->appendStages(fStage, fMatrix);
+ }
+ // For a non-passthrough sample, we need to explicitly mark the total-matrix as invalid.
+ SkShaderBase::MatrixRec nonPassthroughMatrix = fMatrix;
+ nonPassthroughMatrix.markTotalMatrixInvalid();
+ return as_SB(shader)->appendStages(fStage, nonPassthroughMatrix);
+ }
+ // Return the paint color when a null child shader is evaluated.
+ fStage.fPipeline->append_constant_color(fStage.fAlloc, fStage.fPaintColor);
+ return true;
+ }
+ bool appendColorFilter(int index) override {
+ if (SkColorFilter* colorFilter = fChildren[index].colorFilter()) {
+ return as_CFB(colorFilter)->appendStages(fStage, /*shaderIsOpaque=*/false);
+ }
+ // Return the original color as-is when a null child color filter is evaluated.
+ return true;
+ }
+ bool appendBlender(int index) override {
+ if (SkBlender* blender = fChildren[index].blender()) {
+ return as_BB(blender)->appendStages(fStage);
+ }
+ // Return a source-over blend when a null blender is evaluated.
+ fStage.fPipeline->append(SkRasterPipelineOp::srcover);
+ return true;
+ }
+
+ // TODO: If an effect calls these intrinsics more than once, we could cache and re-use the steps
+ // object(s), rather than re-creating them in the arena repeatedly.
+ void toLinearSrgb() override {
+ if (!fStage.fDstCS) {
+ // These intrinsics do nothing when color management is disabled
+ return;
+ }
+ fStage.fAlloc
+ ->make<SkColorSpaceXformSteps>(fStage.fDstCS, kUnpremul_SkAlphaType,
+ sk_srgb_linear_singleton(), kUnpremul_SkAlphaType)
+ ->apply(fStage.fPipeline);
+ }
+ void fromLinearSrgb() override {
+ if (!fStage.fDstCS) {
+ // These intrinsics do nothing when color management is disabled
+ return;
+ }
+ fStage.fAlloc
+ ->make<SkColorSpaceXformSteps>(sk_srgb_linear_singleton(), kUnpremul_SkAlphaType,
+ fStage.fDstCS, kUnpremul_SkAlphaType)
+ ->apply(fStage.fPipeline);
+ }
+
+ const SkStageRec& fStage;
+ const SkShaderBase::MatrixRec& fMatrix;
+ SkSpan<const SkRuntimeEffect::ChildPtr> fChildren;
+ SkSpan<const SkSL::SampleUsage> fSampleUsages;
+};
+
+bool SkRuntimeEffectPriv::CanDraw(const SkCapabilities* caps, const SkSL::Program* program) {
+ SkASSERT(caps && program);
+ SkASSERT(program->fConfig->enforcesSkSLVersion());
+ return program->fConfig->fRequiredSkSLVersion <= caps->skslVersion();
+}
+
+bool SkRuntimeEffectPriv::CanDraw(const SkCapabilities* caps, const SkRuntimeEffect* effect) {
+ SkASSERT(effect);
+ return CanDraw(caps, effect->fBaseProgram.get());
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+static bool flattenable_is_valid_as_child(const SkFlattenable* f) {
+ if (!f) { return true; }
+ switch (f->getFlattenableType()) {
+ case SkFlattenable::kSkShader_Type:
+ case SkFlattenable::kSkColorFilter_Type:
+ case SkFlattenable::kSkBlender_Type:
+ return true;
+ default:
+ return false;
+ }
+}
+
+SkRuntimeEffect::ChildPtr::ChildPtr(sk_sp<SkFlattenable> f) : fChild(std::move(f)) {
+ SkASSERT(flattenable_is_valid_as_child(fChild.get()));
+}
+
+static sk_sp<SkSL::SkVMDebugTrace> make_skvm_debug_trace(SkRuntimeEffect* effect,
+ const SkIPoint& coord) {
+ auto debugTrace = sk_make_sp<SkSL::SkVMDebugTrace>();
+ debugTrace->setSource(effect->source());
+ debugTrace->setTraceCoord(coord);
+ return debugTrace;
+}
+
+static ChildType child_type(const SkSL::Type& type) {
+ switch (type.typeKind()) {
+ case SkSL::Type::TypeKind::kBlender: return ChildType::kBlender;
+ case SkSL::Type::TypeKind::kColorFilter: return ChildType::kColorFilter;
+ case SkSL::Type::TypeKind::kShader: return ChildType::kShader;
+ default: SkUNREACHABLE;
+ }
+}
+
+static bool verify_child_effects(const std::vector<SkRuntimeEffect::Child>& reflected,
+ SkSpan<SkRuntimeEffect::ChildPtr> effectPtrs) {
+ // Verify that the number of passed-in child-effect pointers matches the SkSL code.
+ if (reflected.size() != effectPtrs.size()) {
+ return false;
+ }
+
+ // Verify that each child object's type matches its declared type in the SkSL.
+ for (size_t i = 0; i < effectPtrs.size(); ++i) {
+ std::optional<ChildType> effectType = effectPtrs[i].type();
+ if (effectType && effectType != reflected[i].type) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * If `effect` is specified, then the number and type of child objects are validated against the
+ * children() of `effect`. If it's nullptr, this is skipped, allowing deserialization of children,
+ * even when the effect could not be constructed (ie, due to malformed SkSL).
+ */
+static bool read_child_effects(SkReadBuffer& buffer,
+ const SkRuntimeEffect* effect,
+ TArray<SkRuntimeEffect::ChildPtr>* children) {
+ size_t childCount = buffer.read32();
+ if (effect && !buffer.validate(childCount == effect->children().size())) {
+ return false;
+ }
+
+ children->clear();
+ children->reserve_back(childCount);
+
+ for (size_t i = 0; i < childCount; i++) {
+ sk_sp<SkFlattenable> obj(buffer.readRawFlattenable());
+ if (!flattenable_is_valid_as_child(obj.get())) {
+ buffer.validate(false);
+ return false;
+ }
+ children->push_back(std::move(obj));
+ }
+
+ // If we are validating against an effect, make sure any (non-null) children are the right type
+ if (effect) {
+ auto childInfo = effect->children();
+ SkASSERT(childInfo.size() == SkToSizeT(children->size()));
+ for (size_t i = 0; i < childCount; i++) {
+ std::optional<ChildType> ct = (*children)[i].type();
+ if (ct.has_value() && (*ct) != childInfo[i].type) {
+ buffer.validate(false);
+ }
+ }
+ }
+
+ return buffer.isValid();
+}
+
+static void write_child_effects(SkWriteBuffer& buffer,
+ const std::vector<SkRuntimeEffect::ChildPtr>& children) {
+ buffer.write32(children.size());
+ for (const auto& child : children) {
+ buffer.writeFlattenable(child.flattenable());
+ }
+}
+
+static std::vector<skvm::Val> make_skvm_uniforms(skvm::Builder* p,
+ skvm::Uniforms* uniforms,
+ size_t inputSize,
+ const SkData& inputs) {
+ SkASSERTF(!(inputSize & 3), "inputSize was %zu, expected a multiple of 4", inputSize);
+
+ const int32_t* data = reinterpret_cast<const int32_t*>(inputs.data());
+ const size_t uniformCount = inputSize / sizeof(int32_t);
+ std::vector<skvm::Val> uniform;
+ uniform.reserve(uniformCount);
+ for (size_t index = 0; index < uniformCount; ++index) {
+ int32_t bits;
+ memcpy(&bits, data + index, sizeof(int32_t));
+ uniform.push_back(p->uniform32(uniforms->push(bits)).id);
+ }
+
+ return uniform;
+}
+
+SkSL::ProgramSettings SkRuntimeEffect::MakeSettings(const Options& options) {
+ SkSL::ProgramSettings settings;
+ settings.fInlineThreshold = 0;
+ settings.fForceNoInline = options.forceUnoptimized;
+ settings.fOptimize = !options.forceUnoptimized;
+ settings.fMaxVersionAllowed = options.maxVersionAllowed;
+
+ // SkSL created by the GPU backend is typically parsed, converted to a backend format,
+ // and the IR is immediately discarded. In that situation, it makes sense to use node
+ // pools to accelerate the IR allocations. Here, SkRuntimeEffect instances are often
+ // long-lived (especially those created internally for runtime FPs). In this situation,
+ // we're willing to pay for a slightly longer compile so that we don't waste huge
+ // amounts of memory.
+ settings.fUseMemoryPool = false;
+ return settings;
+}
+
+// TODO: Many errors aren't caught until we process the generated Program here. Catching those
+// in the IR generator would provide better errors messages (with locations).
+#define RETURN_FAILURE(...) return Result{nullptr, SkStringPrintf(__VA_ARGS__)}
+
+SkRuntimeEffect::Result SkRuntimeEffect::MakeFromSource(SkString sksl,
+ const Options& options,
+ SkSL::ProgramKind kind) {
+ SkSL::Compiler compiler(SkSL::ShaderCapsFactory::Standalone());
+ SkSL::ProgramSettings settings = MakeSettings(options);
+ std::unique_ptr<SkSL::Program> program =
+ compiler.convertProgram(kind, std::string(sksl.c_str(), sksl.size()), settings);
+
+ if (!program) {
+ RETURN_FAILURE("%s", compiler.errorText().c_str());
+ }
+
+ return MakeInternal(std::move(program), options, kind);
+}
+
+SkRuntimeEffect::Result SkRuntimeEffect::MakeInternal(std::unique_ptr<SkSL::Program> program,
+ const Options& options,
+ SkSL::ProgramKind kind) {
+ SkSL::Compiler compiler(SkSL::ShaderCapsFactory::Standalone());
+
+ uint32_t flags = 0;
+ switch (kind) {
+ case SkSL::ProgramKind::kPrivateRuntimeColorFilter:
+ case SkSL::ProgramKind::kRuntimeColorFilter:
+ // TODO(skia:11209): Figure out a way to run ES3+ color filters on the CPU. This doesn't
+ // need to be fast - it could just be direct IR evaluation. But without it, there's no
+ // way for us to fully implement the SkColorFilter API (eg, `filterColor`)
+ if (!SkRuntimeEffectPriv::CanDraw(SkCapabilities::RasterBackend().get(),
+ program.get())) {
+ RETURN_FAILURE("SkSL color filters must target #version 100");
+ }
+ flags |= kAllowColorFilter_Flag;
+ break;
+ case SkSL::ProgramKind::kPrivateRuntimeShader:
+ case SkSL::ProgramKind::kRuntimeShader:
+ flags |= kAllowShader_Flag;
+ break;
+ case SkSL::ProgramKind::kPrivateRuntimeBlender:
+ case SkSL::ProgramKind::kRuntimeBlender:
+ flags |= kAllowBlender_Flag;
+ break;
+ default:
+ SkUNREACHABLE;
+ }
+
+ // Find 'main', then locate the sample coords parameter. (It might not be present.)
+ const SkSL::FunctionDeclaration* main = program->getFunction("main");
+ if (!main) {
+ RETURN_FAILURE("missing 'main' function");
+ }
+ const auto& mainParams = main->parameters();
+ auto iter = std::find_if(mainParams.begin(), mainParams.end(), [](const SkSL::Variable* p) {
+ return p->modifiers().fLayout.fBuiltin == SK_MAIN_COORDS_BUILTIN;
+ });
+ const SkSL::ProgramUsage::VariableCounts sampleCoordsUsage =
+ iter != mainParams.end() ? program->usage()->get(**iter)
+ : SkSL::ProgramUsage::VariableCounts{};
+
+ if (sampleCoordsUsage.fRead || sampleCoordsUsage.fWrite) {
+ flags |= kUsesSampleCoords_Flag;
+ }
+
+ // Color filters and blends are not allowed to depend on position (local or device) in any way.
+ // The signature of main, and the declarations in sksl_rt_colorfilter/sksl_rt_blend should
+ // guarantee this.
+ if (flags & (kAllowColorFilter_Flag | kAllowBlender_Flag)) {
+ SkASSERT(!(flags & kUsesSampleCoords_Flag));
+ SkASSERT(!SkSL::Analysis::ReferencesFragCoords(*program));
+ }
+
+ if (SkSL::Analysis::CallsSampleOutsideMain(*program)) {
+ flags |= kSamplesOutsideMain_Flag;
+ }
+
+ // Determine if this effect uses of the color transform intrinsics. Effects need to know this
+ // so they can allocate color transform objects, etc.
+ if (SkSL::Analysis::CallsColorTransformIntrinsics(*program)) {
+ flags |= kUsesColorTransform_Flag;
+ }
+
+ // Shaders are the only thing that cares about this, but it's inexpensive (and safe) to call.
+ if (SkSL::Analysis::ReturnsOpaqueColor(*main->definition())) {
+ flags |= kAlwaysOpaque_Flag;
+ }
+
+ size_t offset = 0;
+ std::vector<Uniform> uniforms;
+ std::vector<Child> children;
+ std::vector<SkSL::SampleUsage> sampleUsages;
+ int elidedSampleCoords = 0;
+ const SkSL::Context& ctx(compiler.context());
+
+ // Go through program elements, pulling out information that we need
+ for (const SkSL::ProgramElement* elem : program->elements()) {
+ // Variables (uniform, etc.)
+ if (elem->is<SkSL::GlobalVarDeclaration>()) {
+ const SkSL::GlobalVarDeclaration& global = elem->as<SkSL::GlobalVarDeclaration>();
+ const SkSL::VarDeclaration& varDecl = global.declaration()->as<SkSL::VarDeclaration>();
+
+ const SkSL::Variable& var = *varDecl.var();
+ const SkSL::Type& varType = var.type();
+
+ // Child effects that can be sampled ('shader', 'colorFilter', 'blender')
+ if (varType.isEffectChild()) {
+ Child c;
+ c.name = var.name();
+ c.type = child_type(varType);
+ c.index = children.size();
+ children.push_back(c);
+ auto usage = SkSL::Analysis::GetSampleUsage(
+ *program, var, sampleCoordsUsage.fWrite != 0, &elidedSampleCoords);
+ // If the child is never sampled, we pretend that it's actually in PassThrough mode.
+ // Otherwise, the GP code for collecting transforms and emitting transform code gets
+ // very confused, leading to asserts and bad (backend) shaders. There's an implicit
+ // assumption that every FP is used by its parent. (skbug.com/12429)
+ sampleUsages.push_back(usage.isSampled() ? usage
+ : SkSL::SampleUsage::PassThrough());
+ }
+ // 'uniform' variables
+ else if (var.modifiers().fFlags & SkSL::Modifiers::kUniform_Flag) {
+ uniforms.push_back(SkRuntimeEffectPriv::VarAsUniform(var, ctx, &offset));
+ }
+ }
+ }
+
+ // If the sample coords are never written to, then we will have converted sample calls that use
+ // them unmodified into "passthrough" sampling. If all references to the sample coords were of
+ // that form, then we don't actually "use" sample coords. We unset the flag to prevent creating
+ // an extra (unused) varying holding the coords.
+ if (elidedSampleCoords == sampleCoordsUsage.fRead && sampleCoordsUsage.fWrite == 0) {
+ flags &= ~kUsesSampleCoords_Flag;
+ }
+
+#undef RETURN_FAILURE
+
+ sk_sp<SkRuntimeEffect> effect(new SkRuntimeEffect(std::move(program),
+ options,
+ *main->definition(),
+ std::move(uniforms),
+ std::move(children),
+ std::move(sampleUsages),
+ flags));
+ return Result{std::move(effect), SkString()};
+}
+
+sk_sp<SkRuntimeEffect> SkRuntimeEffect::makeUnoptimizedClone() {
+ // Compile with maximally-permissive options; any restrictions we need to enforce were already
+ // handled when the original SkRuntimeEffect was made. We don't keep around the Options struct
+ // from when it was initially made so we don't know what was originally requested.
+ Options options;
+ options.forceUnoptimized = true;
+ options.maxVersionAllowed = SkSL::Version::k300;
+ options.allowPrivateAccess = true;
+
+ // We do know the original ProgramKind, so we don't need to re-derive it.
+ SkSL::ProgramKind kind = fBaseProgram->fConfig->fKind;
+
+ // Attempt to recompile the program's source with optimizations off. This ensures that the
+ // Debugger shows results on every line, even for things that could be optimized away (static
+ // branches, unused variables, etc). If recompilation fails, we fall back to the original code.
+ SkSL::Compiler compiler(SkSL::ShaderCapsFactory::Standalone());
+ SkSL::ProgramSettings settings = MakeSettings(options);
+ std::unique_ptr<SkSL::Program> program =
+ compiler.convertProgram(kind, *fBaseProgram->fSource, settings);
+
+ if (!program) {
+ // Turning off compiler optimizations can theoretically expose a program error that
+ // had been optimized away (e.g. "all control paths return a value" might be found on a path
+ // that is completely eliminated in the optimized program).
+ // If this happens, the debugger will just have to show the optimized code.
+ return sk_ref_sp(this);
+ }
+
+ SkRuntimeEffect::Result result = MakeInternal(std::move(program), options, kind);
+ if (!result.effect) {
+ // Nothing in MakeInternal should change as a result of optimizations being toggled.
+ SkDEBUGFAILF("makeUnoptimizedClone: MakeInternal failed\n%s",
+ result.errorText.c_str());
+ return sk_ref_sp(this);
+ }
+
+ return result.effect;
+}
+
+SkRuntimeEffect::Result SkRuntimeEffect::MakeForColorFilter(SkString sksl, const Options& options) {
+ auto programKind = options.allowPrivateAccess ? SkSL::ProgramKind::kPrivateRuntimeColorFilter
+ : SkSL::ProgramKind::kRuntimeColorFilter;
+ auto result = MakeFromSource(std::move(sksl), options, programKind);
+ SkASSERT(!result.effect || result.effect->allowColorFilter());
+ return result;
+}
+
+SkRuntimeEffect::Result SkRuntimeEffect::MakeForShader(SkString sksl, const Options& options) {
+ auto programKind = options.allowPrivateAccess ? SkSL::ProgramKind::kPrivateRuntimeShader
+ : SkSL::ProgramKind::kRuntimeShader;
+ auto result = MakeFromSource(std::move(sksl), options, programKind);
+ SkASSERT(!result.effect || result.effect->allowShader());
+ return result;
+}
+
+SkRuntimeEffect::Result SkRuntimeEffect::MakeForBlender(SkString sksl, const Options& options) {
+ auto programKind = options.allowPrivateAccess ? SkSL::ProgramKind::kPrivateRuntimeBlender
+ : SkSL::ProgramKind::kRuntimeBlender;
+ auto result = MakeFromSource(std::move(sksl), options, programKind);
+ SkASSERT(!result.effect || result.effect->allowBlender());
+ return result;
+}
+
+sk_sp<SkRuntimeEffect> SkMakeCachedRuntimeEffect(
+ SkRuntimeEffect::Result (*make)(SkString sksl, const SkRuntimeEffect::Options&),
+ SkString sksl) {
+ SK_BEGIN_REQUIRE_DENSE
+ struct Key {
+ uint32_t skslHashA;
+ uint32_t skslHashB;
+
+ bool operator==(const Key& that) const {
+ return this->skslHashA == that.skslHashA
+ && this->skslHashB == that.skslHashB;
+ }
+
+ explicit Key(const SkString& sksl)
+ : skslHashA(SkOpts::hash(sksl.c_str(), sksl.size(), 0))
+ , skslHashB(SkOpts::hash(sksl.c_str(), sksl.size(), 1)) {}
+ };
+ SK_END_REQUIRE_DENSE
+
+ static auto* mutex = new SkMutex;
+ static auto* cache = new SkLRUCache<Key, sk_sp<SkRuntimeEffect>>(11/*totally arbitrary*/);
+
+ Key key(sksl);
+ {
+ SkAutoMutexExclusive _(*mutex);
+ if (sk_sp<SkRuntimeEffect>* found = cache->find(key)) {
+ return *found;
+ }
+ }
+
+ SkRuntimeEffect::Options options;
+ SkRuntimeEffectPriv::AllowPrivateAccess(&options);
+
+ auto [effect, err] = make(std::move(sksl), options);
+ if (!effect) {
+ SkDEBUGFAILF("%s", err.c_str());
+ return nullptr;
+ }
+ SkASSERT(err.isEmpty());
+
+ {
+ SkAutoMutexExclusive _(*mutex);
+ cache->insert_or_update(key, effect);
+ }
+ return effect;
+}
+
+static size_t uniform_element_size(SkRuntimeEffect::Uniform::Type type) {
+ switch (type) {
+ case SkRuntimeEffect::Uniform::Type::kFloat: return sizeof(float);
+ case SkRuntimeEffect::Uniform::Type::kFloat2: return sizeof(float) * 2;
+ case SkRuntimeEffect::Uniform::Type::kFloat3: return sizeof(float) * 3;
+ case SkRuntimeEffect::Uniform::Type::kFloat4: return sizeof(float) * 4;
+
+ case SkRuntimeEffect::Uniform::Type::kFloat2x2: return sizeof(float) * 4;
+ case SkRuntimeEffect::Uniform::Type::kFloat3x3: return sizeof(float) * 9;
+ case SkRuntimeEffect::Uniform::Type::kFloat4x4: return sizeof(float) * 16;
+
+ case SkRuntimeEffect::Uniform::Type::kInt: return sizeof(int);
+ case SkRuntimeEffect::Uniform::Type::kInt2: return sizeof(int) * 2;
+ case SkRuntimeEffect::Uniform::Type::kInt3: return sizeof(int) * 3;
+ case SkRuntimeEffect::Uniform::Type::kInt4: return sizeof(int) * 4;
+ default: SkUNREACHABLE;
+ }
+}
+
+size_t SkRuntimeEffect::Uniform::sizeInBytes() const {
+ static_assert(sizeof(int) == sizeof(float));
+ return uniform_element_size(this->type) * this->count;
+}
+
+SkRuntimeEffect::SkRuntimeEffect(std::unique_ptr<SkSL::Program> baseProgram,
+ const Options& options,
+ const SkSL::FunctionDefinition& main,
+ std::vector<Uniform>&& uniforms,
+ std::vector<Child>&& children,
+ std::vector<SkSL::SampleUsage>&& sampleUsages,
+ uint32_t flags)
+ : fHash(SkOpts::hash_fn(baseProgram->fSource->c_str(), baseProgram->fSource->size(), 0))
+ , fBaseProgram(std::move(baseProgram))
+ , fMain(main)
+ , fUniforms(std::move(uniforms))
+ , fChildren(std::move(children))
+ , fSampleUsages(std::move(sampleUsages))
+ , fFlags(flags) {
+ SkASSERT(fBaseProgram);
+ SkASSERT(fChildren.size() == fSampleUsages.size());
+
+ // Everything from SkRuntimeEffect::Options which could influence the compiled result needs to
+ // be accounted for in `fHash`. If you've added a new field to Options and caused the static-
+ // assert below to trigger, please incorporate your field into `fHash` and update KnownOptions
+ // to match the layout of Options.
+ struct KnownOptions {
+ bool forceUnoptimized, allowPrivateAccess;
+ SkSL::Version maxVersionAllowed;
+ };
+ static_assert(sizeof(Options) == sizeof(KnownOptions));
+ fHash = SkOpts::hash_fn(&options.forceUnoptimized,
+ sizeof(options.forceUnoptimized), fHash);
+ fHash = SkOpts::hash_fn(&options.allowPrivateAccess,
+ sizeof(options.allowPrivateAccess), fHash);
+ fHash = SkOpts::hash_fn(&options.maxVersionAllowed,
+ sizeof(options.maxVersionAllowed), fHash);
+
+ fFilterColorProgram = SkFilterColorProgram::Make(this);
+}
+
+SkRuntimeEffect::~SkRuntimeEffect() = default;
+
+const std::string& SkRuntimeEffect::source() const {
+ return *fBaseProgram->fSource;
+}
+
+size_t SkRuntimeEffect::uniformSize() const {
+ return fUniforms.empty() ? 0
+ : SkAlign4(fUniforms.back().offset + fUniforms.back().sizeInBytes());
+}
+
+const SkRuntimeEffect::Uniform* SkRuntimeEffect::findUniform(std::string_view name) const {
+ auto iter = std::find_if(fUniforms.begin(), fUniforms.end(), [name](const Uniform& u) {
+ return u.name == name;
+ });
+ return iter == fUniforms.end() ? nullptr : &(*iter);
+}
+
+const SkRuntimeEffect::Child* SkRuntimeEffect::findChild(std::string_view name) const {
+ auto iter = std::find_if(fChildren.begin(), fChildren.end(), [name](const Child& c) {
+ return c.name == name;
+ });
+ return iter == fChildren.end() ? nullptr : &(*iter);
+}
+
+std::unique_ptr<SkFilterColorProgram> SkFilterColorProgram::Make(const SkRuntimeEffect* effect) {
+ // Our per-effect program technique is only possible (and necessary) for color filters
+ if (!effect->allowColorFilter()) {
+ return nullptr;
+ }
+
+ // TODO(skia:10479): Can we support this? When the color filter is invoked like this, there
+ // may not be a real working space? If there is, we'd need to add it as a parameter to eval,
+ // and then coordinate where the relevant uniforms go. For now, just fall back to the slow
+ // path if we see these intrinsics being called.
+ if (effect->usesColorTransform()) {
+ return nullptr;
+ }
+
+ // We require that any children are color filters (not shaders or blenders). In theory, we could
+ // detect the coords being passed to shader children, and replicate those calls, but that's very
+ // complicated, and has diminishing returns. (eg, for table lookup color filters).
+ if (!std::all_of(effect->fChildren.begin(),
+ effect->fChildren.end(),
+ [](const SkRuntimeEffect::Child& c) {
+ return c.type == ChildType::kColorFilter;
+ })) {
+ return nullptr;
+ }
+
+ skvm::Builder p;
+
+ // For SkSL uniforms, we reserve space and allocate skvm Uniform ids for each one. When we run
+ // the program, these ids will be loads from the *first* arg ptr, the uniform data of the
+ // specific color filter instance.
+ skvm::Uniforms skslUniforms{p.uniform(), 0};
+ const size_t uniformCount = effect->uniformSize() / 4;
+ std::vector<skvm::Val> uniform;
+ uniform.reserve(uniformCount);
+ for (size_t i = 0; i < uniformCount; i++) {
+ uniform.push_back(p.uniform32(skslUniforms.push(/*placeholder*/ 0)).id);
+ }
+
+ // We reserve a uniform color for each child invocation. While processing the SkSL, we record
+ // the index of the child, and the color being filtered (in a SampleCall struct).
+ // When we run this program later, we use the SampleCall to evaluate the correct child, and
+ // populate these uniform values. These Uniform ids are loads from the *second* arg ptr.
+ // If the color being passed is too complex for us to describe and re-create using SampleCall,
+ // we are unable to use this per-effect program, and callers will need to fall back to another
+ // (slower) implementation.
+ skvm::Uniforms childColorUniforms{p.uniform(), 0};
+ skvm::Color inputColor = p.uniformColor(/*placeholder*/ SkColors::kWhite, &childColorUniforms);
+ std::vector<SkFilterColorProgram::SampleCall> sampleCalls;
+
+ class Callbacks : public SkSL::SkVMCallbacks {
+ public:
+ Callbacks(skvm::Builder* builder,
+ const skvm::Uniforms* skslUniforms,
+ skvm::Uniforms* childColorUniforms,
+ skvm::Color inputColor,
+ std::vector<SkFilterColorProgram::SampleCall>* sampleCalls)
+ : fBuilder(builder)
+ , fSkslUniforms(skslUniforms)
+ , fChildColorUniforms(childColorUniforms)
+ , fInputColor(inputColor)
+ , fSampleCalls(sampleCalls) {}
+
+ bool isSimpleUniform(skvm::Color c, int* baseOffset) {
+ skvm::Uniform ur, ug, ub, ua;
+ if (!fBuilder->allUniform(c.r.id, &ur, c.g.id, &ug, c.b.id, &ub, c.a.id, &ua)) {
+ return false;
+ }
+ skvm::Ptr uniPtr = fSkslUniforms->base;
+ if (ur.ptr != uniPtr || ug.ptr != uniPtr || ub.ptr != uniPtr || ua.ptr != uniPtr) {
+ return false;
+ }
+ *baseOffset = ur.offset;
+ return ug.offset == ur.offset + 4 &&
+ ub.offset == ur.offset + 8 &&
+ ua.offset == ur.offset + 12;
+ }
+
+ static bool IDsEqual(skvm::Color x, skvm::Color y) {
+ return x.r.id == y.r.id && x.g.id == y.g.id && x.b.id == y.b.id && x.a.id == y.a.id;
+ }
+
+ skvm::Color sampleColorFilter(int ix, skvm::Color c) override {
+ skvm::Color result =
+ fBuilder->uniformColor(/*placeholder*/ SkColors::kWhite, fChildColorUniforms);
+ SkFilterColorProgram::SampleCall call;
+ call.fChild = ix;
+ if (IDsEqual(c, fInputColor)) {
+ call.fKind = SkFilterColorProgram::SampleCall::Kind::kInputColor;
+ } else if (fBuilder->allImm(c.r.id, &call.fImm.fR,
+ c.g.id, &call.fImm.fG,
+ c.b.id, &call.fImm.fB,
+ c.a.id, &call.fImm.fA)) {
+ call.fKind = SkFilterColorProgram::SampleCall::Kind::kImmediate;
+ } else if (auto it = std::find_if(fChildColors.begin(),
+ fChildColors.end(),
+ [&](skvm::Color x) { return IDsEqual(x, c); });
+ it != fChildColors.end()) {
+ call.fKind = SkFilterColorProgram::SampleCall::Kind::kPrevious;
+ call.fPrevious = SkTo<int>(it - fChildColors.begin());
+ } else if (isSimpleUniform(c, &call.fOffset)) {
+ call.fKind = SkFilterColorProgram::SampleCall::Kind::kUniform;
+ } else {
+ fAllSampleCallsSupported = false;
+ }
+ fSampleCalls->push_back(call);
+ fChildColors.push_back(result);
+ return result;
+ }
+
+ // We did an early return from this function if we saw any child that wasn't a shader, so
+ // it should be impossible for either of these callbacks to occur:
+ skvm::Color sampleShader(int, skvm::Coord) override {
+ SkDEBUGFAIL("Unexpected child type");
+ return {};
+ }
+ skvm::Color sampleBlender(int, skvm::Color, skvm::Color) override {
+ SkDEBUGFAIL("Unexpected child type");
+ return {};
+ }
+
+ // We did an early return from this function if we saw any call to these intrinsics, so it
+ // should be impossible for either of these callbacks to occur:
+ skvm::Color toLinearSrgb(skvm::Color color) override {
+ SkDEBUGFAIL("Unexpected color transform intrinsic");
+ return {};
+ }
+ skvm::Color fromLinearSrgb(skvm::Color color) override {
+ SkDEBUGFAIL("Unexpected color transform intrinsic");
+ return {};
+ }
+
+ skvm::Builder* fBuilder;
+ const skvm::Uniforms* fSkslUniforms;
+ skvm::Uniforms* fChildColorUniforms;
+ skvm::Color fInputColor;
+ std::vector<SkFilterColorProgram::SampleCall>* fSampleCalls;
+
+ std::vector<skvm::Color> fChildColors;
+ bool fAllSampleCallsSupported = true;
+ };
+ Callbacks callbacks(&p, &skslUniforms, &childColorUniforms, inputColor, &sampleCalls);
+
+ // Emit the skvm instructions for the SkSL
+ skvm::Coord zeroCoord = {p.splat(0.0f), p.splat(0.0f)};
+ skvm::Color result = SkSL::ProgramToSkVM(*effect->fBaseProgram,
+ effect->fMain,
+ &p,
+ /*debugTrace=*/nullptr,
+ SkSpan(uniform),
+ /*device=*/zeroCoord,
+ /*local=*/zeroCoord,
+ inputColor,
+ inputColor,
+ &callbacks);
+
+ // Then store the result to the *third* arg ptr
+ p.store({skvm::PixelFormat::FLOAT, 32, 32, 32, 32, 0, 32, 64, 96},
+ p.varying<skvm::F32>(), result);
+
+ if (!callbacks.fAllSampleCallsSupported) {
+ return nullptr;
+ }
+
+ // This is conservative. If a filter gets the input color by sampling a null child, we'll
+ // return an (acceptable) false negative. All internal runtime color filters should work.
+ bool alphaUnchanged = (inputColor.a.id == result.a.id);
+
+ // We'll use this program to filter one color at a time, don't bother with jit
+ return std::unique_ptr<SkFilterColorProgram>(
+ new SkFilterColorProgram(p.done(/*debug_name=*/nullptr, /*allow_jit=*/false),
+ std::move(sampleCalls),
+ alphaUnchanged));
+}
+
+SkFilterColorProgram::SkFilterColorProgram(skvm::Program program,
+ std::vector<SampleCall> sampleCalls,
+ bool alphaUnchanged)
+ : fProgram(std::move(program))
+ , fSampleCalls(std::move(sampleCalls))
+ , fAlphaUnchanged(alphaUnchanged) {}
+
+SkPMColor4f SkFilterColorProgram::eval(
+ const SkPMColor4f& inColor,
+ const void* uniformData,
+ std::function<SkPMColor4f(int, SkPMColor4f)> evalChild) const {
+ // Our program defines sampling any child as returning a uniform color. Assemble a buffer
+ // containing those colors. The first entry is always the input color. Subsequent entries
+ // are for each sample call, based on the information in fSampleCalls. For any null children,
+ // the sample result is just the passed-in color.
+ SkSTArray<4, SkPMColor4f, true> childColors;
+ childColors.push_back(inColor);
+ for (const auto& s : fSampleCalls) {
+ SkPMColor4f passedColor = inColor;
+ switch (s.fKind) {
+ case SampleCall::Kind::kInputColor: break;
+ case SampleCall::Kind::kImmediate: passedColor = s.fImm; break;
+ case SampleCall::Kind::kPrevious: passedColor = childColors[s.fPrevious + 1]; break;
+ case SampleCall::Kind::kUniform:
+ passedColor = *SkTAddOffset<const SkPMColor4f>(uniformData, s.fOffset);
+ break;
+ }
+ childColors.push_back(evalChild(s.fChild, passedColor));
+ }
+
+ SkPMColor4f result;
+ fProgram.eval(1, uniformData, childColors.begin(), result.vec());
+ return result;
+}
+
+const SkFilterColorProgram* SkRuntimeEffect::getFilterColorProgram() const {
+ return fFilterColorProgram.get();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if defined(SK_GANESH)
+static GrFPResult make_effect_fp(sk_sp<SkRuntimeEffect> effect,
+ const char* name,
+ sk_sp<const SkData> uniforms,
+ std::unique_ptr<GrFragmentProcessor> inputFP,
+ std::unique_ptr<GrFragmentProcessor> destColorFP,
+ SkSpan<const SkRuntimeEffect::ChildPtr> children,
+ const GrFPArgs& childArgs) {
+ SkSTArray<8, std::unique_ptr<GrFragmentProcessor>> childFPs;
+ for (const auto& child : children) {
+ std::optional<ChildType> type = child.type();
+ if (type == ChildType::kShader) {
+ // Convert a SkShader into a child FP.
+ SkShaderBase::MatrixRec mRec(SkMatrix::I());
+ mRec.markTotalMatrixInvalid();
+ auto childFP = as_SB(child.shader())->asFragmentProcessor(childArgs, mRec);
+ if (!childFP) {
+ return GrFPFailure(std::move(inputFP));
+ }
+ childFPs.push_back(std::move(childFP));
+ } else if (type == ChildType::kColorFilter) {
+ // Convert a SkColorFilter into a child FP.
+ auto [success, childFP] = as_CFB(child.colorFilter())
+ ->asFragmentProcessor(/*inputFP=*/nullptr,
+ childArgs.fContext,
+ *childArgs.fDstColorInfo,
+ childArgs.fSurfaceProps);
+ if (!success) {
+ return GrFPFailure(std::move(inputFP));
+ }
+ childFPs.push_back(std::move(childFP));
+ } else if (type == ChildType::kBlender) {
+ // Convert a SkBlender into a child FP.
+ auto childFP = as_BB(child.blender())->asFragmentProcessor(
+ /*srcFP=*/nullptr,
+ GrFragmentProcessor::DestColor(),
+ childArgs);
+ if (!childFP) {
+ return GrFPFailure(std::move(inputFP));
+ }
+ childFPs.push_back(std::move(childFP));
+ } else {
+ // We have a null child effect.
+ childFPs.push_back(nullptr);
+ }
+ }
+ auto fp = GrSkSLFP::MakeWithData(std::move(effect),
+ name,
+ childArgs.fDstColorInfo->refColorSpace(),
+ std::move(inputFP),
+ std::move(destColorFP),
+ std::move(uniforms),
+ SkSpan(childFPs));
+ SkASSERT(fp);
+ return GrFPSuccess(std::move(fp));
+}
+#endif
+
+#if defined(SK_GRAPHITE)
+static void add_children_to_key(SkSpan<const SkRuntimeEffect::ChildPtr> children,
+ SkSpan<const SkRuntimeEffect::Child> childInfo,
+ const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) {
+ using namespace skgpu::graphite;
+
+ SkASSERT(children.size() == childInfo.size());
+
+ for (size_t index = 0; index < children.size(); ++index) {
+ const SkRuntimeEffect::ChildPtr& child = children[index];
+ std::optional<ChildType> type = child.type();
+ if (type == ChildType::kShader) {
+ as_SB(child.shader())->addToKey(keyContext, builder, gatherer);
+ } else if (type == ChildType::kColorFilter) {
+ as_CFB(child.colorFilter())->addToKey(keyContext, builder, gatherer);
+ } else if (type == ChildType::kBlender) {
+ as_BB(child.blender())
+ ->addToKey(keyContext, builder, gatherer, DstColorType::kChildOutput);
+ } else {
+ // We don't have a child effect. Substitute in a no-op effect.
+ switch (childInfo[index].type) {
+ case ChildType::kShader:
+ case ChildType::kColorFilter:
+ // A "passthrough" shader returns the input color as-is.
+ PassthroughShaderBlock::BeginBlock(keyContext, builder, gatherer);
+ builder->endBlock();
+ break;
+
+ case ChildType::kBlender:
+ // A "passthrough" blender performs `blend_src_over(src, dest)`.
+ PassthroughBlenderBlock::BeginBlock(keyContext, builder, gatherer);
+ builder->endBlock();
+ break;
+ }
+ }
+ }
+}
+#endif
+
+class RuntimeEffectVMCallbacks : public SkSL::SkVMCallbacks {
+public:
+ RuntimeEffectVMCallbacks(skvm::Builder* builder,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc,
+ const std::vector<SkRuntimeEffect::ChildPtr>& children,
+ const SkShaderBase::MatrixRec& mRec,
+ skvm::Color inColor,
+ const SkColorInfo& colorInfo)
+ : fBuilder(builder)
+ , fUniforms(uniforms)
+ , fAlloc(alloc)
+ , fChildren(children)
+ , fMRec(mRec)
+ , fInColor(inColor)
+ , fColorInfo(colorInfo) {}
+
+ skvm::Color sampleShader(int ix, skvm::Coord coord) override {
+ // We haven't tracked device coords and the runtime effect could have arbitrarily
+ // manipulated the passed coords. We should be in a state where any pending matrix was
+ // already applied before the runtime effect's code could have manipulated the coords
+ // and the total matrix from child shader to device space is flagged as unknown.
+ SkASSERT(!fMRec.hasPendingMatrix());
+ SkASSERT(!fMRec.totalMatrixIsValid());
+ if (SkShader* shader = fChildren[ix].shader()) {
+ return as_SB(shader)->program(fBuilder,
+ coord,
+ coord,
+ fInColor,
+ fMRec,
+ fColorInfo,
+ fUniforms,
+ fAlloc);
+ }
+ return fInColor;
+ }
+
+ skvm::Color sampleColorFilter(int ix, skvm::Color color) override {
+ if (SkColorFilter* colorFilter = fChildren[ix].colorFilter()) {
+ return as_CFB(colorFilter)->program(fBuilder, color, fColorInfo, fUniforms, fAlloc);
+ }
+ return color;
+ }
+
+ skvm::Color sampleBlender(int ix, skvm::Color src, skvm::Color dst) override {
+ if (SkBlender* blender = fChildren[ix].blender()) {
+ return as_BB(blender)->program(fBuilder, src, dst, fColorInfo, fUniforms, fAlloc);
+ }
+ return blend(SkBlendMode::kSrcOver, src, dst);
+ }
+
+ skvm::Color toLinearSrgb(skvm::Color color) override {
+ if (!fColorInfo.colorSpace()) {
+ // These intrinsics do nothing when color management is disabled
+ return color;
+ }
+ return SkColorSpaceXformSteps{fColorInfo.colorSpace(), kUnpremul_SkAlphaType,
+ sk_srgb_linear_singleton(), kUnpremul_SkAlphaType}
+ .program(fBuilder, fUniforms, color);
+ }
+
+ skvm::Color fromLinearSrgb(skvm::Color color) override {
+ if (!fColorInfo.colorSpace()) {
+ // These intrinsics do nothing when color management is disabled
+ return color;
+ }
+ return SkColorSpaceXformSteps{sk_srgb_linear_singleton(), kUnpremul_SkAlphaType,
+ fColorInfo.colorSpace(), kUnpremul_SkAlphaType}
+ .program(fBuilder, fUniforms, color);
+ }
+
+ skvm::Builder* fBuilder;
+ skvm::Uniforms* fUniforms;
+ SkArenaAlloc* fAlloc;
+ const std::vector<SkRuntimeEffect::ChildPtr>& fChildren;
+ const SkShaderBase::MatrixRec& fMRec;
+ const skvm::Color fInColor;
+ const SkColorInfo& fColorInfo;
+};
+
+class SkRuntimeColorFilter : public SkColorFilterBase {
+public:
+ SkRuntimeColorFilter(sk_sp<SkRuntimeEffect> effect,
+ sk_sp<const SkData> uniforms,
+ SkSpan<SkRuntimeEffect::ChildPtr> children)
+ : fEffect(std::move(effect))
+ , fUniforms(std::move(uniforms))
+ , fChildren(children.begin(), children.end()) {}
+
+#if defined(SK_GANESH)
+ GrFPResult asFragmentProcessor(std::unique_ptr<GrFragmentProcessor> inputFP,
+ GrRecordingContext* context,
+ const GrColorInfo& colorInfo,
+ const SkSurfaceProps& props) const override {
+ sk_sp<const SkData> uniforms = SkRuntimeEffectPriv::TransformUniforms(
+ fEffect->uniforms(),
+ fUniforms,
+ colorInfo.colorSpace());
+ SkASSERT(uniforms);
+
+ GrFPArgs childArgs(context, &colorInfo, props);
+ return make_effect_fp(fEffect,
+ "runtime_color_filter",
+ std::move(uniforms),
+ std::move(inputFP),
+ /*destColorFP=*/nullptr,
+ SkSpan(fChildren),
+ childArgs);
+ }
+#endif
+
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const override {
+ using namespace skgpu::graphite;
+
+ sk_sp<const SkData> uniforms = SkRuntimeEffectPriv::TransformUniforms(
+ fEffect->uniforms(),
+ fUniforms,
+ keyContext.dstColorInfo().colorSpace());
+ SkASSERT(uniforms);
+
+ RuntimeEffectBlock::BeginBlock(keyContext, builder, gatherer,
+ { fEffect, std::move(uniforms) });
+
+ add_children_to_key(fChildren, fEffect->children(), keyContext, builder, gatherer);
+
+ builder->endBlock();
+ }
+#endif
+
+ bool appendStages(const SkStageRec& rec, bool) const override {
+#ifdef SK_ENABLE_SKSL_IN_RASTER_PIPELINE
+ if (!SkRuntimeEffectPriv::CanDraw(SkCapabilities::RasterBackend().get(), fEffect.get())) {
+ // SkRP has support for many parts of #version 300 already, but for now, we restrict its
+ // usage in runtime effects to just #version 100.
+ return false;
+ }
+ if (const SkSL::RP::Program* program = fEffect->getRPProgram()) {
+ SkSpan<const float> uniforms = uniforms_as_span(fEffect->uniforms(),
+ fUniforms,
+ rec.fDstCS,
+ rec.fAlloc);
+ SkShaderBase::MatrixRec matrix(SkMatrix::I());
+ matrix.markCTMApplied();
+ RuntimeEffectRPCallbacks callbacks(rec, matrix, fChildren, fEffect->fSampleUsages);
+ bool success = program->appendStages(rec.fPipeline, rec.fAlloc, &callbacks, uniforms);
+ return success;
+ }
+#endif
+ return false;
+ }
+
+ skvm::Color onProgram(skvm::Builder* p, skvm::Color c,
+ const SkColorInfo& colorInfo,
+ skvm::Uniforms* uniforms, SkArenaAlloc* alloc) const override {
+ SkASSERT(SkRuntimeEffectPriv::CanDraw(SkCapabilities::RasterBackend().get(),
+ fEffect.get()));
+
+ sk_sp<const SkData> inputs = SkRuntimeEffectPriv::TransformUniforms(
+ fEffect->uniforms(),
+ fUniforms,
+ colorInfo.colorSpace());
+ SkASSERT(inputs);
+
+ SkShaderBase::MatrixRec mRec(SkMatrix::I());
+ mRec.markTotalMatrixInvalid();
+ RuntimeEffectVMCallbacks callbacks(p, uniforms, alloc, fChildren, mRec, c, colorInfo);
+ std::vector<skvm::Val> uniform = make_skvm_uniforms(p, uniforms, fEffect->uniformSize(),
+ *inputs);
+
+ // There should be no way for the color filter to use device coords, but we need to supply
+ // something. (Uninitialized values can trigger asserts in skvm::Builder).
+ skvm::Coord zeroCoord = { p->splat(0.0f), p->splat(0.0f) };
+ return SkSL::ProgramToSkVM(*fEffect->fBaseProgram, fEffect->fMain, p,/*debugTrace=*/nullptr,
+ SkSpan(uniform), /*device=*/zeroCoord, /*local=*/zeroCoord,
+ c, c, &callbacks);
+ }
+
+ SkPMColor4f onFilterColor4f(const SkPMColor4f& color, SkColorSpace* dstCS) const override {
+ // Get the generic program for filtering a single color
+ const SkFilterColorProgram* program = fEffect->getFilterColorProgram();
+ if (!program) {
+ // We were unable to build a cached (per-effect) program. Use the base-class fallback,
+ // which builds a program for the specific filter instance.
+ return SkColorFilterBase::onFilterColor4f(color, dstCS);
+ }
+
+ // Get our specific uniform values
+ sk_sp<const SkData> inputs = SkRuntimeEffectPriv::TransformUniforms(
+ fEffect->uniforms(),
+ fUniforms,
+ dstCS);
+ SkASSERT(inputs);
+
+ auto evalChild = [&](int index, SkPMColor4f inColor) {
+ const auto& child = fChildren[index];
+
+ // SkFilterColorProgram::Make has guaranteed that any children will be color filters.
+ SkASSERT(!child.shader());
+ SkASSERT(!child.blender());
+ if (SkColorFilter* colorFilter = child.colorFilter()) {
+ return as_CFB(colorFilter)->onFilterColor4f(inColor, dstCS);
+ }
+ return inColor;
+ };
+
+ return program->eval(color, inputs->data(), evalChild);
+ }
+
+ bool onIsAlphaUnchanged() const override {
+ return fEffect->getFilterColorProgram() &&
+ fEffect->getFilterColorProgram()->isAlphaUnchanged();
+ }
+
+ void flatten(SkWriteBuffer& buffer) const override {
+ buffer.writeString(fEffect->source().c_str());
+ buffer.writeDataAsByteArray(fUniforms.get());
+ write_child_effects(buffer, fChildren);
+ }
+
+ SkRuntimeEffect* asRuntimeEffect() const override { return fEffect.get(); }
+
+ SK_FLATTENABLE_HOOKS(SkRuntimeColorFilter)
+
+private:
+ sk_sp<SkRuntimeEffect> fEffect;
+ sk_sp<const SkData> fUniforms;
+ std::vector<SkRuntimeEffect::ChildPtr> fChildren;
+};
+
+sk_sp<SkFlattenable> SkRuntimeColorFilter::CreateProc(SkReadBuffer& buffer) {
+ SkString sksl;
+ buffer.readString(&sksl);
+ sk_sp<SkData> uniforms = buffer.readByteArrayAsData();
+
+ auto effect = SkMakeCachedRuntimeEffect(SkRuntimeEffect::MakeForColorFilter, std::move(sksl));
+#if !SK_LENIENT_SKSL_DESERIALIZATION
+ if (!buffer.validate(effect != nullptr)) {
+ return nullptr;
+ }
+#endif
+
+ SkSTArray<4, SkRuntimeEffect::ChildPtr> children;
+ if (!read_child_effects(buffer, effect.get(), &children)) {
+ return nullptr;
+ }
+
+#if SK_LENIENT_SKSL_DESERIALIZATION
+ if (!effect) {
+ SkDebugf("Serialized SkSL failed to compile. Ignoring/dropping SkSL color filter.\n");
+ return nullptr;
+ }
+#endif
+
+ return effect->makeColorFilter(std::move(uniforms), SkSpan(children));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+using UniformsCallback = SkRuntimeEffectPriv::UniformsCallback;
+
+class SkRTShader : public SkShaderBase {
+public:
+ SkRTShader(sk_sp<SkRuntimeEffect> effect,
+ sk_sp<SkSL::SkVMDebugTrace> debugTrace,
+ sk_sp<const SkData> uniforms,
+ SkSpan<SkRuntimeEffect::ChildPtr> children)
+ : fEffect(std::move(effect))
+ , fDebugTrace(std::move(debugTrace))
+ , fUniformData(std::move(uniforms))
+ , fChildren(children.begin(), children.end()) {}
+
+ SkRTShader(sk_sp<SkRuntimeEffect> effect,
+ sk_sp<SkSL::SkVMDebugTrace> debugTrace,
+ UniformsCallback uniformsCallback,
+ SkSpan<SkRuntimeEffect::ChildPtr> children)
+ : fEffect(std::move(effect))
+ , fDebugTrace(std::move(debugTrace))
+ , fUniformsCallback(std::move(uniformsCallback))
+ , fChildren(children.begin(), children.end()) {}
+
+ SkRuntimeEffect::TracedShader makeTracedClone(const SkIPoint& coord) {
+ sk_sp<SkRuntimeEffect> unoptimized = fEffect->makeUnoptimizedClone();
+ sk_sp<SkSL::SkVMDebugTrace> debugTrace = make_skvm_debug_trace(unoptimized.get(), coord);
+ auto debugShader = sk_make_sp<SkRTShader>(
+ unoptimized, debugTrace, this->uniformData(nullptr), SkSpan(fChildren));
+
+ return SkRuntimeEffect::TracedShader{std::move(debugShader), std::move(debugTrace)};
+ }
+
+ bool isOpaque() const override { return fEffect->alwaysOpaque(); }
+
+#if defined(SK_GANESH)
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs& args,
+ const MatrixRec& mRec) const override {
+ if (!SkRuntimeEffectPriv::CanDraw(args.fContext->priv().caps(), fEffect.get())) {
+ return nullptr;
+ }
+
+ sk_sp<const SkData> uniforms = SkRuntimeEffectPriv::TransformUniforms(
+ fEffect->uniforms(),
+ this->uniformData(args.fDstColorInfo->colorSpace()),
+ args.fDstColorInfo->colorSpace());
+ SkASSERT(uniforms);
+
+ bool success;
+ std::unique_ptr<GrFragmentProcessor> fp;
+ std::tie(success, fp) = make_effect_fp(fEffect,
+ "runtime_shader",
+ std::move(uniforms),
+ /*inputFP=*/nullptr,
+ /*destColorFP=*/nullptr,
+ SkSpan(fChildren),
+ args);
+ if (!success) {
+ return nullptr;
+ }
+
+ std::tie(success, fp) = mRec.apply(std::move(fp));
+ if (!success) {
+ return nullptr;
+ }
+ return fp;
+ }
+#endif
+
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const override {
+ using namespace skgpu::graphite;
+
+ sk_sp<const SkData> uniforms = SkRuntimeEffectPriv::TransformUniforms(
+ fEffect->uniforms(),
+ this->uniformData(keyContext.dstColorInfo().colorSpace()),
+ keyContext.dstColorInfo().colorSpace());
+ SkASSERT(uniforms);
+
+ RuntimeEffectBlock::BeginBlock(keyContext, builder, gatherer,
+ { fEffect, std::move(uniforms) });
+
+ add_children_to_key(fChildren, fEffect->children(), keyContext, builder, gatherer);
+
+ builder->endBlock();
+ }
+#endif
+
+ bool appendStages(const SkStageRec& rec, const MatrixRec& mRec) const override {
+#ifdef SK_ENABLE_SKSL_IN_RASTER_PIPELINE
+ if (!SkRuntimeEffectPriv::CanDraw(SkCapabilities::RasterBackend().get(), fEffect.get())) {
+ // SkRP has support for many parts of #version 300 already, but for now, we restrict its
+ // usage in runtime effects to just #version 100.
+ return false;
+ }
+ if (fDebugTrace) {
+ // SkRP doesn't support debug traces yet; fall back to SkVM until this is implemented.
+ return false;
+ }
+ if (const SkSL::RP::Program* program = fEffect->getRPProgram()) {
+ std::optional<MatrixRec> newMRec = mRec.apply(rec);
+ if (!newMRec.has_value()) {
+ return false;
+ }
+ SkSpan<const float> uniforms = uniforms_as_span(fEffect->uniforms(),
+ this->uniformData(rec.fDstCS),
+ rec.fDstCS,
+ rec.fAlloc);
+ RuntimeEffectRPCallbacks callbacks(rec, *newMRec, fChildren, fEffect->fSampleUsages);
+ bool success = program->appendStages(rec.fPipeline, rec.fAlloc, &callbacks, uniforms);
+ return success;
+ }
+#endif
+ return false;
+ }
+
+ skvm::Color program(skvm::Builder* p,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color paint,
+ const MatrixRec& mRec,
+ const SkColorInfo& colorInfo,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const override {
+ if (!SkRuntimeEffectPriv::CanDraw(SkCapabilities::RasterBackend().get(), fEffect.get())) {
+ return {};
+ }
+
+ sk_sp<const SkData> inputs =
+ SkRuntimeEffectPriv::TransformUniforms(fEffect->uniforms(),
+ this->uniformData(colorInfo.colorSpace()),
+ colorInfo.colorSpace());
+ SkASSERT(inputs);
+
+ // Ensure any pending transform is applied before running the runtime shader's code, which
+ // gets to use and manipulate the coordinates.
+ std::optional<MatrixRec> newMRec = mRec.apply(p, &local, uniforms);
+ if (!newMRec.has_value()) {
+ return {};
+ }
+ // We could omit this for children that are only sampled with passthrough coords.
+ newMRec->markTotalMatrixInvalid();
+
+ RuntimeEffectVMCallbacks callbacks(p,
+ uniforms,
+ alloc,
+ fChildren,
+ *newMRec,
+ paint,
+ colorInfo);
+ std::vector<skvm::Val> uniform = make_skvm_uniforms(p, uniforms, fEffect->uniformSize(),
+ *inputs);
+
+ return SkSL::ProgramToSkVM(*fEffect->fBaseProgram, fEffect->fMain, p, fDebugTrace.get(),
+ SkSpan(uniform), device, local, paint, paint, &callbacks);
+ }
+
+ void flatten(SkWriteBuffer& buffer) const override {
+ buffer.writeString(fEffect->source().c_str());
+ buffer.writeDataAsByteArray(this->uniformData(nullptr).get());
+ write_child_effects(buffer, fChildren);
+ }
+
+ SkRuntimeEffect* asRuntimeEffect() const override { return fEffect.get(); }
+
+ SK_FLATTENABLE_HOOKS(SkRTShader)
+
+private:
+ enum Flags {
+ kHasLegacyLocalMatrix_Flag = 1 << 1,
+ };
+
+ sk_sp<const SkData> uniformData(const SkColorSpace* dstCS) const {
+ if (fUniformData) {
+ return fUniformData;
+ }
+
+ SkASSERT(fUniformsCallback);
+ sk_sp<const SkData> uniforms = fUniformsCallback({dstCS});
+ SkASSERT(uniforms && uniforms->size() == fEffect->uniformSize());
+ return uniforms;
+ }
+
+ sk_sp<SkRuntimeEffect> fEffect;
+ sk_sp<SkSL::SkVMDebugTrace> fDebugTrace;
+ sk_sp<const SkData> fUniformData;
+ UniformsCallback fUniformsCallback;
+ std::vector<SkRuntimeEffect::ChildPtr> fChildren;
+};
+
+sk_sp<SkFlattenable> SkRTShader::CreateProc(SkReadBuffer& buffer) {
+ SkString sksl;
+ buffer.readString(&sksl);
+ sk_sp<SkData> uniforms = buffer.readByteArrayAsData();
+
+ SkTLazy<SkMatrix> localM;
+ if (buffer.isVersionLT(SkPicturePriv::kNoShaderLocalMatrix)) {
+ uint32_t flags = buffer.read32();
+ if (flags & kHasLegacyLocalMatrix_Flag) {
+ buffer.readMatrix(localM.init());
+ }
+ }
+
+ auto effect = SkMakeCachedRuntimeEffect(SkRuntimeEffect::MakeForShader, std::move(sksl));
+#if !SK_LENIENT_SKSL_DESERIALIZATION
+ if (!buffer.validate(effect != nullptr)) {
+ return nullptr;
+ }
+#endif
+
+ SkSTArray<4, SkRuntimeEffect::ChildPtr> children;
+ if (!read_child_effects(buffer, effect.get(), &children)) {
+ return nullptr;
+ }
+
+#if SK_LENIENT_SKSL_DESERIALIZATION
+ if (!effect) {
+ // If any children were SkShaders, return the first one. This is a reasonable fallback.
+ for (int i = 0; i < children.size(); i++) {
+ if (children[i].shader()) {
+ SkDebugf("Serialized SkSL failed to compile. Replacing shader with child %d.\n", i);
+ return sk_ref_sp(children[i].shader());
+ }
+ }
+
+ // We don't know what to do, so just return nullptr (but *don't* poison the buffer).
+ SkDebugf("Serialized SkSL failed to compile. Ignoring/dropping SkSL shader.\n");
+ return nullptr;
+ }
+#endif
+
+ return effect->makeShader(std::move(uniforms), SkSpan(children), localM.getMaybeNull());
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SkRuntimeBlender : public SkBlenderBase {
+public:
+ SkRuntimeBlender(sk_sp<SkRuntimeEffect> effect,
+ sk_sp<const SkData> uniforms,
+ SkSpan<SkRuntimeEffect::ChildPtr> children)
+ : fEffect(std::move(effect))
+ , fUniforms(std::move(uniforms))
+ , fChildren(children.begin(), children.end()) {}
+
+ SkRuntimeEffect* asRuntimeEffect() const override { return fEffect.get(); }
+
+ bool onAppendStages(const SkStageRec& rec) const override {
+#ifdef SK_ENABLE_SKSL_IN_RASTER_PIPELINE
+ if (!SkRuntimeEffectPriv::CanDraw(SkCapabilities::RasterBackend().get(), fEffect.get())) {
+ // SkRP has support for many parts of #version 300 already, but for now, we restrict its
+ // usage in runtime effects to just #version 100.
+ return false;
+ }
+ if (const SkSL::RP::Program* program = fEffect->getRPProgram()) {
+ SkSpan<const float> uniforms = uniforms_as_span(fEffect->uniforms(),
+ fUniforms,
+ rec.fDstCS,
+ rec.fAlloc);
+ SkShaderBase::MatrixRec matrix(SkMatrix::I());
+ matrix.markCTMApplied();
+ RuntimeEffectRPCallbacks callbacks(rec, matrix, fChildren, fEffect->fSampleUsages);
+ bool success = program->appendStages(rec.fPipeline, rec.fAlloc, &callbacks, uniforms);
+ return success;
+ }
+#endif
+ return false;
+ }
+
+ skvm::Color onProgram(skvm::Builder* p, skvm::Color src, skvm::Color dst,
+ const SkColorInfo& colorInfo, skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const override {
+ if (!SkRuntimeEffectPriv::CanDraw(SkCapabilities::RasterBackend().get(), fEffect.get())) {
+ return {};
+ }
+
+ sk_sp<const SkData> inputs = SkRuntimeEffectPriv::TransformUniforms(fEffect->uniforms(),
+ fUniforms,
+ colorInfo.colorSpace());
+ SkASSERT(inputs);
+
+ SkShaderBase::MatrixRec mRec(SkMatrix::I());
+ mRec.markTotalMatrixInvalid();
+ RuntimeEffectVMCallbacks callbacks(p, uniforms, alloc, fChildren, mRec, src, colorInfo);
+ std::vector<skvm::Val> uniform = make_skvm_uniforms(p, uniforms, fEffect->uniformSize(),
+ *inputs);
+
+ // Emit the blend function as an SkVM program.
+ skvm::Coord zeroCoord = {p->splat(0.0f), p->splat(0.0f)};
+ return SkSL::ProgramToSkVM(*fEffect->fBaseProgram, fEffect->fMain, p,/*debugTrace=*/nullptr,
+ SkSpan(uniform), /*device=*/zeroCoord, /*local=*/zeroCoord,
+ src, dst, &callbacks);
+ }
+
+#if defined(SK_GANESH)
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(
+ std::unique_ptr<GrFragmentProcessor> srcFP,
+ std::unique_ptr<GrFragmentProcessor> dstFP,
+ const GrFPArgs& args) const override {
+ if (!SkRuntimeEffectPriv::CanDraw(args.fContext->priv().caps(), fEffect.get())) {
+ return nullptr;
+ }
+
+ sk_sp<const SkData> uniforms = SkRuntimeEffectPriv::TransformUniforms(
+ fEffect->uniforms(),
+ fUniforms,
+ args.fDstColorInfo->colorSpace());
+ SkASSERT(uniforms);
+ auto [success, fp] = make_effect_fp(fEffect,
+ "runtime_blender",
+ std::move(uniforms),
+ std::move(srcFP),
+ std::move(dstFP),
+ SkSpan(fChildren),
+ args);
+
+ return success ? std::move(fp) : nullptr;
+ }
+#endif
+
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer,
+ skgpu::graphite::DstColorType dstColorType) const override {
+ using namespace skgpu::graphite;
+ SkASSERT(dstColorType == DstColorType::kSurface ||
+ dstColorType == DstColorType::kChildOutput);
+
+ sk_sp<const SkData> uniforms = SkRuntimeEffectPriv::TransformUniforms(
+ fEffect->uniforms(),
+ fUniforms,
+ keyContext.dstColorInfo().colorSpace());
+ SkASSERT(uniforms);
+
+ // TODO(b/238757201): Pass into RuntimeEffectBlock::BeginBlock that this runtime effect
+ // needs a dst read, if dstColorType == kSurface.
+ RuntimeEffectBlock::BeginBlock(keyContext, builder, gatherer,
+ { fEffect, std::move(uniforms) });
+
+ add_children_to_key(fChildren, fEffect->children(), keyContext, builder, gatherer);
+
+ builder->endBlock();
+ }
+#endif
+
+ void flatten(SkWriteBuffer& buffer) const override {
+ buffer.writeString(fEffect->source().c_str());
+ buffer.writeDataAsByteArray(fUniforms.get());
+ write_child_effects(buffer, fChildren);
+ }
+
+ SK_FLATTENABLE_HOOKS(SkRuntimeBlender)
+
+private:
+ using INHERITED = SkBlenderBase;
+
+ sk_sp<SkRuntimeEffect> fEffect;
+ sk_sp<const SkData> fUniforms;
+ std::vector<SkRuntimeEffect::ChildPtr> fChildren;
+};
+
+sk_sp<SkFlattenable> SkRuntimeBlender::CreateProc(SkReadBuffer& buffer) {
+ SkString sksl;
+ buffer.readString(&sksl);
+ sk_sp<SkData> uniforms = buffer.readByteArrayAsData();
+
+ auto effect = SkMakeCachedRuntimeEffect(SkRuntimeEffect::MakeForBlender, std::move(sksl));
+#if !SK_LENIENT_SKSL_DESERIALIZATION
+ if (!buffer.validate(effect != nullptr)) {
+ return nullptr;
+ }
+#endif
+
+ SkSTArray<4, SkRuntimeEffect::ChildPtr> children;
+ if (!read_child_effects(buffer, effect.get(), &children)) {
+ return nullptr;
+ }
+
+#if SK_LENIENT_SKSL_DESERIALIZATION
+ if (!effect) {
+ SkDebugf("Serialized SkSL failed to compile. Ignoring/dropping SkSL blender.\n");
+ return nullptr;
+ }
+#endif
+
+ return effect->makeBlender(std::move(uniforms), SkSpan(children));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkShader> SkRuntimeEffectPriv::MakeDeferredShader(const SkRuntimeEffect* effect,
+ UniformsCallback uniformsCallback,
+ SkSpan<SkRuntimeEffect::ChildPtr> children,
+ const SkMatrix* localMatrix) {
+ if (!effect->allowShader()) {
+ return nullptr;
+ }
+ if (!verify_child_effects(effect->fChildren, children)) {
+ return nullptr;
+ }
+ if (!uniformsCallback) {
+ return nullptr;
+ }
+ return SkLocalMatrixShader::MakeWrapped<SkRTShader>(localMatrix,
+ sk_ref_sp(effect),
+ /*debugTrace=*/nullptr,
+ std::move(uniformsCallback),
+ children);
+}
+
+sk_sp<SkShader> SkRuntimeEffect::makeShader(sk_sp<const SkData> uniforms,
+ sk_sp<SkShader> childShaders[],
+ size_t childCount,
+ const SkMatrix* localMatrix) const {
+ SkSTArray<4, ChildPtr> children(childCount);
+ for (size_t i = 0; i < childCount; ++i) {
+ children.emplace_back(childShaders[i]);
+ }
+ return this->makeShader(std::move(uniforms), SkSpan(children), localMatrix);
+}
+
+sk_sp<SkShader> SkRuntimeEffect::makeShader(sk_sp<const SkData> uniforms,
+ SkSpan<ChildPtr> children,
+ const SkMatrix* localMatrix) const {
+ if (!this->allowShader()) {
+ return nullptr;
+ }
+ if (!verify_child_effects(fChildren, children)) {
+ return nullptr;
+ }
+ if (!uniforms) {
+ uniforms = SkData::MakeEmpty();
+ }
+ if (uniforms->size() != this->uniformSize()) {
+ return nullptr;
+ }
+ return SkLocalMatrixShader::MakeWrapped<SkRTShader>(localMatrix,
+ sk_ref_sp(this),
+ /*debugTrace=*/nullptr,
+ std::move(uniforms),
+ children);
+}
+
+sk_sp<SkImage> SkRuntimeEffect::makeImage(GrRecordingContext* rContext,
+ sk_sp<const SkData> uniforms,
+ SkSpan<ChildPtr> children,
+ const SkMatrix* localMatrix,
+ SkImageInfo resultInfo,
+ bool mipmapped) const {
+ if (resultInfo.alphaType() == kUnpremul_SkAlphaType ||
+ resultInfo.alphaType() == kUnknown_SkAlphaType) {
+ return nullptr;
+ }
+ sk_sp<SkSurface> surface;
+ if (rContext) {
+#if defined(SK_GANESH)
+ if (!rContext->priv().caps()->mipmapSupport()) {
+ mipmapped = false;
+ }
+ surface = SkSurface::MakeRenderTarget(rContext,
+ skgpu::Budgeted::kYes,
+ resultInfo,
+ 1,
+ kTopLeft_GrSurfaceOrigin,
+ nullptr,
+ mipmapped);
+#endif
+ } else {
+ surface = SkSurface::MakeRaster(resultInfo);
+ }
+ if (!surface) {
+ return nullptr;
+ }
+ SkCanvas* canvas = surface->getCanvas();
+ auto shader = this->makeShader(std::move(uniforms), children, localMatrix);
+ if (!shader) {
+ return nullptr;
+ }
+ SkPaint paint;
+ paint.setShader(std::move(shader));
+ paint.setBlendMode(SkBlendMode::kSrc);
+ canvas->drawPaint(paint);
+ return surface->makeImageSnapshot();
+}
+
+sk_sp<SkColorFilter> SkRuntimeEffect::makeColorFilter(sk_sp<const SkData> uniforms,
+ sk_sp<SkColorFilter> childColorFilters[],
+ size_t childCount) const {
+ SkSTArray<4, ChildPtr> children(childCount);
+ for (size_t i = 0; i < childCount; ++i) {
+ children.emplace_back(childColorFilters[i]);
+ }
+ return this->makeColorFilter(std::move(uniforms), SkSpan(children));
+}
+
+sk_sp<SkColorFilter> SkRuntimeEffect::makeColorFilter(sk_sp<const SkData> uniforms,
+ SkSpan<ChildPtr> children) const {
+ if (!this->allowColorFilter()) {
+ return nullptr;
+ }
+ if (!verify_child_effects(fChildren, children)) {
+ return nullptr;
+ }
+ if (!uniforms) {
+ uniforms = SkData::MakeEmpty();
+ }
+ if (uniforms->size() != this->uniformSize()) {
+ return nullptr;
+ }
+ return sk_make_sp<SkRuntimeColorFilter>(sk_ref_sp(this), std::move(uniforms), children);
+}
+
+sk_sp<SkColorFilter> SkRuntimeEffect::makeColorFilter(sk_sp<const SkData> uniforms) const {
+ return this->makeColorFilter(std::move(uniforms), /*children=*/{});
+}
+
+sk_sp<SkBlender> SkRuntimeEffect::makeBlender(sk_sp<const SkData> uniforms,
+ SkSpan<ChildPtr> children) const {
+ if (!this->allowBlender()) {
+ return nullptr;
+ }
+ if (!verify_child_effects(fChildren, children)) {
+ return nullptr;
+ }
+ if (!uniforms) {
+ uniforms = SkData::MakeEmpty();
+ }
+ if (uniforms->size() != this->uniformSize()) {
+ return nullptr;
+ }
+ return sk_make_sp<SkRuntimeBlender>(sk_ref_sp(this), std::move(uniforms), children);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkRuntimeEffect::TracedShader SkRuntimeEffect::MakeTraced(sk_sp<SkShader> shader,
+ const SkIPoint& traceCoord) {
+ SkRuntimeEffect* effect = as_SB(shader)->asRuntimeEffect();
+ if (!effect) {
+ return TracedShader{nullptr, nullptr};
+ }
+ // An SkShader with an attached SkRuntimeEffect must be an SkRTShader.
+ SkRTShader* rtShader = static_cast<SkRTShader*>(shader.get());
+ return rtShader->makeTracedClone(traceCoord);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+std::optional<ChildType> SkRuntimeEffect::ChildPtr::type() const {
+ if (fChild) {
+ switch (fChild->getFlattenableType()) {
+ case SkFlattenable::kSkShader_Type:
+ return ChildType::kShader;
+ case SkFlattenable::kSkColorFilter_Type:
+ return ChildType::kColorFilter;
+ case SkFlattenable::kSkBlender_Type:
+ return ChildType::kBlender;
+ default:
+ break;
+ }
+ }
+ return std::nullopt;
+}
+
+SkShader* SkRuntimeEffect::ChildPtr::shader() const {
+ return (fChild && fChild->getFlattenableType() == SkFlattenable::kSkShader_Type)
+ ? static_cast<SkShader*>(fChild.get())
+ : nullptr;
+}
+
+SkColorFilter* SkRuntimeEffect::ChildPtr::colorFilter() const {
+ return (fChild && fChild->getFlattenableType() == SkFlattenable::kSkColorFilter_Type)
+ ? static_cast<SkColorFilter*>(fChild.get())
+ : nullptr;
+}
+
+SkBlender* SkRuntimeEffect::ChildPtr::blender() const {
+ return (fChild && fChild->getFlattenableType() == SkFlattenable::kSkBlender_Type)
+ ? static_cast<SkBlender*>(fChild.get())
+ : nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkRuntimeEffect::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkRuntimeColorFilter);
+ SK_REGISTER_FLATTENABLE(SkRTShader);
+ SK_REGISTER_FLATTENABLE(SkRuntimeBlender);
+}
+
+SkRuntimeShaderBuilder::SkRuntimeShaderBuilder(sk_sp<SkRuntimeEffect> effect)
+ : INHERITED(std::move(effect)) {}
+
+SkRuntimeShaderBuilder::~SkRuntimeShaderBuilder() = default;
+
+sk_sp<SkImage> SkRuntimeShaderBuilder::makeImage(GrRecordingContext* recordingContext,
+ const SkMatrix* localMatrix,
+ SkImageInfo resultInfo,
+ bool mipmapped) {
+ return this->effect()->makeImage(recordingContext,
+ this->uniforms(),
+ this->children(),
+ localMatrix,
+ resultInfo,
+ mipmapped);
+}
+
+sk_sp<SkShader> SkRuntimeShaderBuilder::makeShader(const SkMatrix* localMatrix) {
+ return this->effect()->makeShader(this->uniforms(), this->children(), localMatrix);
+}
+
+SkRuntimeBlendBuilder::SkRuntimeBlendBuilder(sk_sp<SkRuntimeEffect> effect)
+ : INHERITED(std::move(effect)) {}
+
+SkRuntimeBlendBuilder::~SkRuntimeBlendBuilder() = default;
+
+sk_sp<SkBlender> SkRuntimeBlendBuilder::makeBlender() {
+ return this->effect()->makeBlender(this->uniforms(), this->children());
+}
+
+SkRuntimeColorFilterBuilder::SkRuntimeColorFilterBuilder(sk_sp<SkRuntimeEffect> effect)
+ : INHERITED(std::move(effect)) {}
+
+SkRuntimeColorFilterBuilder::~SkRuntimeColorFilterBuilder() = default;
+
+sk_sp<SkColorFilter> SkRuntimeColorFilterBuilder::makeColorFilter() {
+ return this->effect()->makeColorFilter(this->uniforms(), this->children());
+}
+
+#endif // SK_ENABLE_SKSL
diff --git a/gfx/skia/skia/src/core/SkRuntimeEffectPriv.h b/gfx/skia/skia/src/core/SkRuntimeEffectPriv.h
new file mode 100644
index 0000000000..46264adbe2
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkRuntimeEffectPriv.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRuntimeEffectPriv_DEFINED
+#define SkRuntimeEffectPriv_DEFINED
+
+#include "include/effects/SkRuntimeEffect.h"
+#include "include/private/SkColorData.h"
+#include "src/core/SkVM.h"
+
+#include <functional>
+
+#ifdef SK_ENABLE_SKSL
+
+namespace SkSL {
+class Context;
+class Variable;
+struct Program;
+}
+
+class SkCapabilities;
+struct SkColorSpaceXformSteps;
+
+class SkRuntimeEffectPriv {
+public:
+ struct UniformsCallbackContext {
+ const SkColorSpace* fDstColorSpace;
+ };
+
+ // Private (experimental) API for creating runtime shaders with late-bound uniforms.
+ // The callback must produce a uniform data blob of the correct size for the effect.
+ // It is invoked at "draw" time (essentially, when a draw call is made against the canvas
+ // using the resulting shader). There are no strong guarantees about timing.
+ // Serializing the resulting shader will immediately invoke the callback (and record the
+ // resulting uniforms).
+ using UniformsCallback = std::function<sk_sp<const SkData>(const UniformsCallbackContext&)>;
+ static sk_sp<SkShader> MakeDeferredShader(const SkRuntimeEffect* effect,
+ UniformsCallback uniformsCallback,
+ SkSpan<SkRuntimeEffect::ChildPtr> children,
+ const SkMatrix* localMatrix = nullptr);
+
+ // Helper function when creating an effect for a GrSkSLFP that verifies an effect will
+ // implement the constant output for constant input optimization flag.
+ static bool SupportsConstantOutputForConstantInput(const SkRuntimeEffect* effect) {
+ return effect->getFilterColorProgram();
+ }
+
+ static uint32_t Hash(const SkRuntimeEffect& effect) {
+ return effect.hash();
+ }
+
+ static const SkSL::Program& Program(const SkRuntimeEffect& effect) {
+ return *effect.fBaseProgram;
+ }
+
+ static SkRuntimeEffect::Options ES3Options() {
+ SkRuntimeEffect::Options options;
+ options.maxVersionAllowed = SkSL::Version::k300;
+ return options;
+ }
+
+ static void AllowPrivateAccess(SkRuntimeEffect::Options* options) {
+ options->allowPrivateAccess = true;
+ }
+
+ static SkRuntimeEffect::Uniform VarAsUniform(const SkSL::Variable&,
+ const SkSL::Context&,
+ size_t* offset);
+
+ // If there are layout(color) uniforms then this performs color space transformation on the
+ // color values and returns a new SkData. Otherwise, the original data is returned.
+ static sk_sp<const SkData> TransformUniforms(SkSpan<const SkRuntimeEffect::Uniform> uniforms,
+ sk_sp<const SkData> originalData,
+ const SkColorSpaceXformSteps&);
+ static sk_sp<const SkData> TransformUniforms(SkSpan<const SkRuntimeEffect::Uniform> uniforms,
+ sk_sp<const SkData> originalData,
+ const SkColorSpace* dstCS);
+
+ static bool CanDraw(const SkCapabilities*, const SkSL::Program*);
+ static bool CanDraw(const SkCapabilities*, const SkRuntimeEffect*);
+};
+
+// These internal APIs for creating runtime effects vary from the public API in two ways:
+//
+// 1) they're used in contexts where it's not useful to receive an error message;
+// 2) they're cached.
+//
+// Users of the public SkRuntimeEffect::Make*() can of course cache however they like themselves;
+// keeping these APIs private means users will not be forced into our cache or cache policy.
+
+sk_sp<SkRuntimeEffect> SkMakeCachedRuntimeEffect(
+ SkRuntimeEffect::Result (*make)(SkString sksl, const SkRuntimeEffect::Options&),
+ SkString sksl);
+
+inline sk_sp<SkRuntimeEffect> SkMakeCachedRuntimeEffect(
+ SkRuntimeEffect::Result (*make)(SkString, const SkRuntimeEffect::Options&),
+ const char* sksl) {
+ return SkMakeCachedRuntimeEffect(make, SkString{sksl});
+}
+
+// Internal API that assumes (and asserts) that the shader code is valid, but does no internal
+// caching. Used when the caller will cache the result in a static variable. Ownership is passed to
+// the caller; the effect will be leaked if it the pointer is not stored or explicitly deleted.
+inline SkRuntimeEffect* SkMakeRuntimeEffect(
+ SkRuntimeEffect::Result (*make)(SkString, const SkRuntimeEffect::Options&),
+ const char* sksl,
+ SkRuntimeEffect::Options options = SkRuntimeEffect::Options{}) {
+#if defined(SK_DEBUG)
+ // Our SKSL snippets we embed in Skia should not have comments or excess indentation.
+ // Removing them helps trim down code size and speeds up parsing
+ if (SkStrContains(sksl, "//") || SkStrContains(sksl, " ")) {
+ SkDEBUGFAILF("Found SkSL snippet that can be minified: \n %s\n", sksl);
+ }
+#endif
+ SkRuntimeEffectPriv::AllowPrivateAccess(&options);
+ auto result = make(SkString{sksl}, options);
+ if (!result.effect) {
+ SK_ABORT("%s", result.errorText.c_str());
+ }
+ return result.effect.release();
+}
+
+/**
+ * Runtime effects are often long lived & cached. Individual color filters or FPs created from them
+ * and are often short-lived. However, color filters and FPs may need to operate on a single color
+ * (on the CPU). This may be done at the paint level (eg, filter the paint color), or as part of
+ * FP tree analysis.
+ *
+ * SkFilterColorProgram is an skvm program representing a (color filter) SkRuntimeEffect. It can
+ * process a single color, without knowing the details of a particular instance (uniform values or
+ * children).
+ */
+class SkFilterColorProgram {
+public:
+ static std::unique_ptr<SkFilterColorProgram> Make(const SkRuntimeEffect* effect);
+
+ SkPMColor4f eval(const SkPMColor4f& inColor,
+ const void* uniformData,
+ std::function<SkPMColor4f(int, SkPMColor4f)> evalChild) const;
+
+ bool isAlphaUnchanged() const { return fAlphaUnchanged; }
+
+private:
+ struct SampleCall {
+ enum class Kind {
+ kInputColor, // eg child.eval(inputColor)
+ kImmediate, // eg child.eval(half4(1))
+ kPrevious, // eg child1.eval(child2.eval(...))
+ kUniform, // eg uniform half4 color; ... child.eval(color)
+ };
+
+ int fChild;
+ Kind fKind;
+ union {
+ SkPMColor4f fImm; // for kImmediate
+ int fPrevious; // for kPrevious
+ int fOffset; // for kUniform
+ };
+ };
+
+ SkFilterColorProgram(skvm::Program program,
+ std::vector<SampleCall> sampleCalls,
+ bool alphaUnchanged);
+
+ skvm::Program fProgram;
+ std::vector<SampleCall> fSampleCalls;
+ bool fAlphaUnchanged;
+};
+
+#endif // SK_ENABLE_SKSL
+
+#endif // SkRuntimeEffectPriv_DEFINED
diff --git a/gfx/skia/skia/src/core/SkSLTypeShared.cpp b/gfx/skia/skia/src/core/SkSLTypeShared.cpp
new file mode 100644
index 0000000000..847c4c46d3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSLTypeShared.cpp
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkSLTypeShared.h"
+
+const char* SkSLTypeString(SkSLType t) {
+ switch (t) {
+ case SkSLType::kVoid: return "void";
+ case SkSLType::kBool: return "bool";
+ case SkSLType::kBool2: return "bool2";
+ case SkSLType::kBool3: return "bool3";
+ case SkSLType::kBool4: return "bool4";
+ case SkSLType::kShort: return "short";
+ case SkSLType::kShort2: return "short2";
+ case SkSLType::kShort3: return "short3";
+ case SkSLType::kShort4: return "short4";
+ case SkSLType::kUShort: return "ushort";
+ case SkSLType::kUShort2: return "ushort2";
+ case SkSLType::kUShort3: return "ushort3";
+ case SkSLType::kUShort4: return "ushort4";
+ case SkSLType::kFloat: return "float";
+ case SkSLType::kFloat2: return "float2";
+ case SkSLType::kFloat3: return "float3";
+ case SkSLType::kFloat4: return "float4";
+ case SkSLType::kFloat2x2: return "float2x2";
+ case SkSLType::kFloat3x3: return "float3x3";
+ case SkSLType::kFloat4x4: return "float4x4";
+ case SkSLType::kHalf: return "half";
+ case SkSLType::kHalf2: return "half2";
+ case SkSLType::kHalf3: return "half3";
+ case SkSLType::kHalf4: return "half4";
+ case SkSLType::kHalf2x2: return "half2x2";
+ case SkSLType::kHalf3x3: return "half3x3";
+ case SkSLType::kHalf4x4: return "half4x4";
+ case SkSLType::kInt: return "int";
+ case SkSLType::kInt2: return "int2";
+ case SkSLType::kInt3: return "int3";
+ case SkSLType::kInt4: return "int4";
+ case SkSLType::kUInt: return "uint";
+ case SkSLType::kUInt2: return "uint2";
+ case SkSLType::kUInt3: return "uint3";
+ case SkSLType::kUInt4: return "uint4";
+ case SkSLType::kTexture2DSampler: return "sampler2D";
+ case SkSLType::kTextureExternalSampler: return "samplerExternalOES";
+ case SkSLType::kTexture2DRectSampler: return "sampler2DRect";
+ case SkSLType::kTexture2D: return "texture2D";
+ case SkSLType::kSampler: return "sampler";
+ case SkSLType::kInput: return "subpassInput";
+ }
+ SkUNREACHABLE;
+}
+
+/** Is the shading language type full precision? */
+bool SkSLTypeIsFullPrecisionNumericType(SkSLType type) {
+ switch (type) {
+ // Half-precision types:
+ case SkSLType::kShort:
+ case SkSLType::kShort2:
+ case SkSLType::kShort3:
+ case SkSLType::kShort4:
+ case SkSLType::kUShort:
+ case SkSLType::kUShort2:
+ case SkSLType::kUShort3:
+ case SkSLType::kUShort4:
+ case SkSLType::kHalf:
+ case SkSLType::kHalf2:
+ case SkSLType::kHalf3:
+ case SkSLType::kHalf4:
+ case SkSLType::kHalf2x2:
+ case SkSLType::kHalf3x3:
+ case SkSLType::kHalf4x4:
+ // Non-numeric types:
+ case SkSLType::kVoid:
+ case SkSLType::kTexture2DSampler:
+ case SkSLType::kTextureExternalSampler:
+ case SkSLType::kTexture2DRectSampler:
+ case SkSLType::kTexture2D:
+ case SkSLType::kSampler:
+ case SkSLType::kInput:
+ case SkSLType::kBool:
+ case SkSLType::kBool2:
+ case SkSLType::kBool3:
+ case SkSLType::kBool4:
+ return false;
+
+ // Full-precision numeric types:
+ case SkSLType::kInt:
+ case SkSLType::kInt2:
+ case SkSLType::kInt3:
+ case SkSLType::kInt4:
+ case SkSLType::kUInt:
+ case SkSLType::kUInt2:
+ case SkSLType::kUInt3:
+ case SkSLType::kUInt4:
+ case SkSLType::kFloat:
+ case SkSLType::kFloat2:
+ case SkSLType::kFloat3:
+ case SkSLType::kFloat4:
+ case SkSLType::kFloat2x2:
+ case SkSLType::kFloat3x3:
+ case SkSLType::kFloat4x4:
+ return true;
+ }
+ SkUNREACHABLE;
+}
+
+int SkSLTypeMatrixSize(SkSLType type) {
+ switch (type) {
+ case SkSLType::kFloat2x2:
+ case SkSLType::kHalf2x2:
+ return 2;
+
+ case SkSLType::kFloat3x3:
+ case SkSLType::kHalf3x3:
+ return 3;
+
+ case SkSLType::kFloat4x4:
+ case SkSLType::kHalf4x4:
+ return 4;
+
+ case SkSLType::kFloat:
+ case SkSLType::kHalf:
+ case SkSLType::kBool:
+ case SkSLType::kShort:
+ case SkSLType::kUShort:
+ case SkSLType::kInt:
+ case SkSLType::kUInt:
+ case SkSLType::kFloat2:
+ case SkSLType::kHalf2:
+ case SkSLType::kBool2:
+ case SkSLType::kShort2:
+ case SkSLType::kUShort2:
+ case SkSLType::kInt2:
+ case SkSLType::kUInt2:
+ case SkSLType::kFloat3:
+ case SkSLType::kHalf3:
+ case SkSLType::kBool3:
+ case SkSLType::kShort3:
+ case SkSLType::kUShort3:
+ case SkSLType::kInt3:
+ case SkSLType::kUInt3:
+ case SkSLType::kFloat4:
+ case SkSLType::kHalf4:
+ case SkSLType::kBool4:
+ case SkSLType::kShort4:
+ case SkSLType::kUShort4:
+ case SkSLType::kInt4:
+ case SkSLType::kUInt4:
+ case SkSLType::kVoid:
+ case SkSLType::kTexture2DSampler:
+ case SkSLType::kTextureExternalSampler:
+ case SkSLType::kTexture2DRectSampler:
+ case SkSLType::kTexture2D:
+ case SkSLType::kSampler:
+ case SkSLType::kInput:
+ return -1;
+ }
+ SkUNREACHABLE;
+}
+
+bool SkSLTypeIsCombinedSamplerType(SkSLType type) {
+ switch (type) {
+ case SkSLType::kTexture2DRectSampler:
+ case SkSLType::kTexture2DSampler:
+ case SkSLType::kTextureExternalSampler:
+ return true;
+
+ case SkSLType::kVoid:
+ case SkSLType::kFloat:
+ case SkSLType::kFloat2:
+ case SkSLType::kFloat3:
+ case SkSLType::kFloat4:
+ case SkSLType::kFloat2x2:
+ case SkSLType::kFloat3x3:
+ case SkSLType::kFloat4x4:
+ case SkSLType::kHalf:
+ case SkSLType::kHalf2:
+ case SkSLType::kHalf3:
+ case SkSLType::kHalf4:
+ case SkSLType::kHalf2x2:
+ case SkSLType::kHalf3x3:
+ case SkSLType::kHalf4x4:
+ case SkSLType::kInt:
+ case SkSLType::kInt2:
+ case SkSLType::kInt3:
+ case SkSLType::kInt4:
+ case SkSLType::kUInt:
+ case SkSLType::kUInt2:
+ case SkSLType::kUInt3:
+ case SkSLType::kUInt4:
+ case SkSLType::kBool:
+ case SkSLType::kBool2:
+ case SkSLType::kBool3:
+ case SkSLType::kBool4:
+ case SkSLType::kShort:
+ case SkSLType::kShort2:
+ case SkSLType::kShort3:
+ case SkSLType::kShort4:
+ case SkSLType::kUShort:
+ case SkSLType::kUShort2:
+ case SkSLType::kUShort3:
+ case SkSLType::kUShort4:
+ case SkSLType::kTexture2D:
+ case SkSLType::kSampler:
+ case SkSLType::kInput:
+ return false;
+ }
+ SkUNREACHABLE;
+}
diff --git a/gfx/skia/skia/src/core/SkSLTypeShared.h b/gfx/skia/skia/src/core/SkSLTypeShared.h
new file mode 100644
index 0000000000..d4b49b32a9
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSLTypeShared.h
@@ -0,0 +1,242 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSLTypeShared_DEFINED
+#define SkSLTypeShared_DEFINED
+
+#include "include/core/SkTypes.h"
+
+/**
+ * Types of shader-language-specific boxed variables we can create.
+ */
+enum class SkSLType : char {
+ kVoid,
+ kBool,
+ kBool2,
+ kBool3,
+ kBool4,
+ kShort,
+ kShort2,
+ kShort3,
+ kShort4,
+ kUShort,
+ kUShort2,
+ kUShort3,
+ kUShort4,
+ kFloat,
+ kFloat2,
+ kFloat3,
+ kFloat4,
+ kFloat2x2,
+ kFloat3x3,
+ kFloat4x4,
+ kHalf,
+ kHalf2,
+ kHalf3,
+ kHalf4,
+ kHalf2x2,
+ kHalf3x3,
+ kHalf4x4,
+ kInt,
+ kInt2,
+ kInt3,
+ kInt4,
+ kUInt,
+ kUInt2,
+ kUInt3,
+ kUInt4,
+ kTexture2DSampler,
+ kTextureExternalSampler,
+ kTexture2DRectSampler,
+ kTexture2D,
+ kSampler,
+ kInput,
+
+ kLast = kInput
+};
+static const int kSkSLTypeCount = static_cast<int>(SkSLType::kLast) + 1;
+
+/** Returns the SkSL typename for this type. */
+const char* SkSLTypeString(SkSLType t);
+
+/** Is the shading language type float (including vectors/matrices)? */
+static constexpr bool SkSLTypeIsFloatType(SkSLType type) {
+ switch (type) {
+ case SkSLType::kFloat:
+ case SkSLType::kFloat2:
+ case SkSLType::kFloat3:
+ case SkSLType::kFloat4:
+ case SkSLType::kFloat2x2:
+ case SkSLType::kFloat3x3:
+ case SkSLType::kFloat4x4:
+ case SkSLType::kHalf:
+ case SkSLType::kHalf2:
+ case SkSLType::kHalf3:
+ case SkSLType::kHalf4:
+ case SkSLType::kHalf2x2:
+ case SkSLType::kHalf3x3:
+ case SkSLType::kHalf4x4:
+ return true;
+
+ case SkSLType::kVoid:
+ case SkSLType::kTexture2DSampler:
+ case SkSLType::kTextureExternalSampler:
+ case SkSLType::kTexture2DRectSampler:
+ case SkSLType::kBool:
+ case SkSLType::kBool2:
+ case SkSLType::kBool3:
+ case SkSLType::kBool4:
+ case SkSLType::kShort:
+ case SkSLType::kShort2:
+ case SkSLType::kShort3:
+ case SkSLType::kShort4:
+ case SkSLType::kUShort:
+ case SkSLType::kUShort2:
+ case SkSLType::kUShort3:
+ case SkSLType::kUShort4:
+ case SkSLType::kInt:
+ case SkSLType::kInt2:
+ case SkSLType::kInt3:
+ case SkSLType::kInt4:
+ case SkSLType::kUInt:
+ case SkSLType::kUInt2:
+ case SkSLType::kUInt3:
+ case SkSLType::kUInt4:
+ case SkSLType::kTexture2D:
+ case SkSLType::kSampler:
+ case SkSLType::kInput:
+ return false;
+ }
+ SkUNREACHABLE;
+}
+
+/** Is the shading language type integral (including vectors)? */
+static constexpr bool SkSLTypeIsIntegralType(SkSLType type) {
+ switch (type) {
+ case SkSLType::kShort:
+ case SkSLType::kShort2:
+ case SkSLType::kShort3:
+ case SkSLType::kShort4:
+ case SkSLType::kUShort:
+ case SkSLType::kUShort2:
+ case SkSLType::kUShort3:
+ case SkSLType::kUShort4:
+ case SkSLType::kInt:
+ case SkSLType::kInt2:
+ case SkSLType::kInt3:
+ case SkSLType::kInt4:
+ case SkSLType::kUInt:
+ case SkSLType::kUInt2:
+ case SkSLType::kUInt3:
+ case SkSLType::kUInt4:
+ return true;
+
+ case SkSLType::kFloat:
+ case SkSLType::kFloat2:
+ case SkSLType::kFloat3:
+ case SkSLType::kFloat4:
+ case SkSLType::kFloat2x2:
+ case SkSLType::kFloat3x3:
+ case SkSLType::kFloat4x4:
+ case SkSLType::kHalf:
+ case SkSLType::kHalf2:
+ case SkSLType::kHalf3:
+ case SkSLType::kHalf4:
+ case SkSLType::kHalf2x2:
+ case SkSLType::kHalf3x3:
+ case SkSLType::kHalf4x4:
+ case SkSLType::kVoid:
+ case SkSLType::kTexture2DSampler:
+ case SkSLType::kTextureExternalSampler:
+ case SkSLType::kTexture2DRectSampler:
+ case SkSLType::kBool:
+ case SkSLType::kBool2:
+ case SkSLType::kBool3:
+ case SkSLType::kBool4:
+ case SkSLType::kTexture2D:
+ case SkSLType::kSampler:
+ case SkSLType::kInput:
+ return false;
+ }
+ SkUNREACHABLE;
+}
+
+/** If the type represents a single value or vector return the vector length; otherwise, -1. */
+static constexpr int SkSLTypeVecLength(SkSLType type) {
+ switch (type) {
+ case SkSLType::kFloat:
+ case SkSLType::kHalf:
+ case SkSLType::kBool:
+ case SkSLType::kShort:
+ case SkSLType::kUShort:
+ case SkSLType::kInt:
+ case SkSLType::kUInt:
+ return 1;
+
+ case SkSLType::kFloat2:
+ case SkSLType::kHalf2:
+ case SkSLType::kBool2:
+ case SkSLType::kShort2:
+ case SkSLType::kUShort2:
+ case SkSLType::kInt2:
+ case SkSLType::kUInt2:
+ return 2;
+
+ case SkSLType::kFloat3:
+ case SkSLType::kHalf3:
+ case SkSLType::kBool3:
+ case SkSLType::kShort3:
+ case SkSLType::kUShort3:
+ case SkSLType::kInt3:
+ case SkSLType::kUInt3:
+ return 3;
+
+ case SkSLType::kFloat4:
+ case SkSLType::kHalf4:
+ case SkSLType::kBool4:
+ case SkSLType::kShort4:
+ case SkSLType::kUShort4:
+ case SkSLType::kInt4:
+ case SkSLType::kUInt4:
+ return 4;
+
+ case SkSLType::kFloat2x2:
+ case SkSLType::kFloat3x3:
+ case SkSLType::kFloat4x4:
+ case SkSLType::kHalf2x2:
+ case SkSLType::kHalf3x3:
+ case SkSLType::kHalf4x4:
+ case SkSLType::kVoid:
+ case SkSLType::kTexture2DSampler:
+ case SkSLType::kTextureExternalSampler:
+ case SkSLType::kTexture2DRectSampler:
+ case SkSLType::kTexture2D:
+ case SkSLType::kSampler:
+ case SkSLType::kInput:
+ return -1;
+ }
+ SkUNREACHABLE;
+}
+
+/**
+ * Is the shading language type supported as a uniform (ie, does it have a corresponding set
+ * function on GrGLSLProgramDataManager)?
+ */
+static constexpr bool SkSLTypeCanBeUniformValue(SkSLType type) {
+ return SkSLTypeIsFloatType(type) || SkSLTypeIsIntegralType(type);
+}
+
+/** Is the shading language type full precision? */
+bool SkSLTypeIsFullPrecisionNumericType(SkSLType type);
+
+/** If the type represents a square matrix, return its size; otherwise, -1. */
+int SkSLTypeMatrixSize(SkSLType type);
+
+/** If the type represents a square matrix, return its size; otherwise, -1. */
+bool SkSLTypeIsCombinedSamplerType(SkSLType type);
+
+#endif // SkSLTypeShared_DEFINED
diff --git a/gfx/skia/skia/src/core/SkSafeRange.h b/gfx/skia/skia/src/core/SkSafeRange.h
new file mode 100644
index 0000000000..a8a944655c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSafeRange.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSafeRange_DEFINED
+#define SkSafeRange_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#include <cstdint>
+
+// SkSafeRange always check that a series of operations are in-range.
+// This check is sticky, so that if any one operation fails, the object will remember that and
+// return false from ok().
+
+class SkSafeRange {
+public:
+ explicit operator bool() const { return fOK; }
+
+ bool ok() const { return fOK; }
+
+ // checks 0 <= value <= max.
+ // On success, returns value
+ // On failure, returns 0 and sets ok() to false
+ template <typename T> T checkLE(uint64_t value, T max) {
+ SkASSERT(static_cast<int64_t>(max) >= 0);
+ if (value > static_cast<uint64_t>(max)) {
+ fOK = false;
+ value = 0;
+ }
+ return static_cast<T>(value);
+ }
+
+ int checkGE(int value, int min) {
+ if (value < min) {
+ fOK = false;
+ value = min;
+ }
+ return value;
+ }
+
+private:
+ bool fOK = true;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSamplingPriv.h b/gfx/skia/skia/src/core/SkSamplingPriv.h
new file mode 100644
index 0000000000..1972a2016c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSamplingPriv.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSamplingPriv_DEFINED
+#define SkSamplingPriv_DEFINED
+
+#include "include/core/SkSamplingOptions.h"
+
+class SkReadBuffer;
+class SkWriteBuffer;
+
+// Private copy of SkFilterQuality, just for legacy deserialization
+// Matches values in SkFilterQuality
+enum SkLegacyFQ {
+ kNone_SkLegacyFQ = 0, //!< nearest-neighbor; fastest but lowest quality
+ kLow_SkLegacyFQ = 1, //!< bilerp
+ kMedium_SkLegacyFQ = 2, //!< bilerp + mipmaps; good for down-scaling
+ kHigh_SkLegacyFQ = 3, //!< bicubic resampling; slowest but good quality
+
+ kLast_SkLegacyFQ = kHigh_SkLegacyFQ,
+};
+
+// Matches values in SkSamplingOptions::MediumBehavior
+enum SkMediumAs {
+ kNearest_SkMediumAs,
+ kLinear_SkMediumAs,
+};
+
+class SkSamplingPriv {
+public:
+ static size_t FlatSize(const SkSamplingOptions& options) {
+ size_t size = sizeof(uint32_t); // maxAniso
+ if (!options.isAniso()) {
+ size += 3 * sizeof(uint32_t); // bool32 + [2 floats | 2 ints]
+ }
+ return size;
+ }
+
+ // Returns true if the sampling can be ignored when the CTM is identity.
+ static bool NoChangeWithIdentityMatrix(const SkSamplingOptions& sampling) {
+ // If B == 0, the cubic resampler should have no effect for identity matrices
+ // https://entropymine.com/imageworsener/bicubic/
+ // We assume aniso has no effect with an identity transform.
+ return !sampling.useCubic || sampling.cubic.B == 0;
+ }
+
+ // Makes a fallback SkSamplingOptions for cases where anisotropic filtering is not allowed.
+ // anisotropic filtering can access mip levels if present, but we don't add mipmaps to non-
+ // mipmapped images when the user requests anisotropic. So we shouldn't fall back to a
+ // sampling that would trigger mip map creation.
+ static SkSamplingOptions AnisoFallback(bool imageIsMipped) {
+ auto mm = imageIsMipped ? SkMipmapMode::kLinear : SkMipmapMode::kNone;
+ return SkSamplingOptions(SkFilterMode::kLinear, mm);
+ }
+
+ static SkSamplingOptions FromFQ(SkLegacyFQ fq, SkMediumAs behavior = kNearest_SkMediumAs) {
+ switch (fq) {
+ case kHigh_SkLegacyFQ:
+ return SkSamplingOptions(SkCubicResampler{1/3.0f, 1/3.0f});
+ case kMedium_SkLegacyFQ:
+ return SkSamplingOptions(SkFilterMode::kLinear,
+ behavior == kNearest_SkMediumAs ? SkMipmapMode::kNearest
+ : SkMipmapMode::kLinear);
+ case kLow_SkLegacyFQ:
+ return SkSamplingOptions(SkFilterMode::kLinear, SkMipmapMode::kNone);
+ case kNone_SkLegacyFQ:
+ break;
+ }
+ return SkSamplingOptions(SkFilterMode::kNearest, SkMipmapMode::kNone);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkScalar.cpp b/gfx/skia/skia/src/core/SkScalar.cpp
new file mode 100644
index 0000000000..316cae4ce7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScalar.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkScalar.h"
+
+SkScalar SkScalarInterpFunc(SkScalar searchKey, const SkScalar keys[],
+ const SkScalar values[], int length) {
+ SkASSERT(length > 0);
+ SkASSERT(keys != nullptr);
+ SkASSERT(values != nullptr);
+#ifdef SK_DEBUG
+ for (int i = 1; i < length; i++) {
+ SkASSERT(keys[i-1] <= keys[i]);
+ }
+#endif
+ int right = 0;
+ while (right < length && keys[right] < searchKey) {
+ ++right;
+ }
+ // Could use sentinel values to eliminate conditionals, but since the
+ // tables are taken as input, a simpler format is better.
+ if (right == length) {
+ return values[length-1];
+ }
+ if (right == 0) {
+ return values[0];
+ }
+ // Otherwise, interpolate between right - 1 and right.
+ SkScalar leftKey = keys[right-1];
+ SkScalar rightKey = keys[right];
+ SkScalar fract = (searchKey - leftKey) / (rightKey - leftKey);
+ return SkScalarInterp(values[right-1], values[right], fract);
+}
diff --git a/gfx/skia/skia/src/core/SkScaleToSides.h b/gfx/skia/skia/src/core/SkScaleToSides.h
new file mode 100644
index 0000000000..f9ba845990
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScaleToSides.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkScaleToSides_DEFINED
+#define SkScaleToSides_DEFINED
+
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+#include <cmath>
+#include <utility>
+
+class SkScaleToSides {
+public:
+ // This code assumes that a and b fit in a float, and therefore the resulting smaller value
+ // of a and b will fit in a float. The side of the rectangle may be larger than a float.
+ // Scale must be less than or equal to the ratio limit / (*a + *b).
+ // This code assumes that NaN and Inf are never passed in.
+ static void AdjustRadii(double limit, double scale, SkScalar* a, SkScalar* b) {
+ SkASSERTF(scale < 1.0 && scale > 0.0, "scale: %g", scale);
+
+ *a = (float)((double)*a * scale);
+ *b = (float)((double)*b * scale);
+
+ if (*a + *b > limit) {
+ float* minRadius = a;
+ float* maxRadius = b;
+
+ // Force minRadius to be the smaller of the two.
+ if (*minRadius > *maxRadius) {
+ using std::swap;
+ swap(minRadius, maxRadius);
+ }
+
+ // newMinRadius must be float in order to give the actual value of the radius.
+ // The newMinRadius will always be smaller than limit. The largest that minRadius can be
+ // is 1/2 the ratio of minRadius : (minRadius + maxRadius), therefore in the resulting
+ // division, minRadius can be no larger than 1/2 limit + ULP.
+ float newMinRadius = *minRadius;
+
+ float newMaxRadius = (float)(limit - newMinRadius);
+
+ // Reduce newMaxRadius an ulp at a time until it fits. This usually never happens,
+ // but if it does it could be 1 or 2 times. In certain pathological cases it could be
+ // more. Max iterations seen so far is 17.
+ while (newMaxRadius + newMinRadius > limit) {
+ newMaxRadius = nextafterf(newMaxRadius, 0.0f);
+ }
+ *maxRadius = newMaxRadius;
+ }
+
+ SkASSERTF(*a >= 0.0f && *b >= 0.0f, "a: %g, b: %g, limit: %g, scale: %g", *a, *b, limit,
+ scale);
+
+ SkASSERTF(*a + *b <= limit,
+ "\nlimit: %.17f, sum: %.17f, a: %.10f, b: %.10f, scale: %.20f",
+ limit, *a + *b, *a, *b, scale);
+ }
+};
+#endif // ScaleToSides_DEFINED
diff --git a/gfx/skia/skia/src/core/SkScalerContext.cpp b/gfx/skia/skia/src/core/SkScalerContext.cpp
new file mode 100644
index 0000000000..8c8ec0509c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScalerContext.cpp
@@ -0,0 +1,1284 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPaint.h"
+#include "src/core/SkScalerContext.h"
+
+#include "include/core/SkDrawable.h"
+#include "include/core/SkFontMetrics.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkAutoMalloc.h"
+#include "src/core/SkAutoPixmapStorage.h"
+#include "src/core/SkBlitter_A8.h"
+#include "src/core/SkDescriptor.h"
+#include "src/core/SkDrawBase.h"
+#include "src/core/SkFontPriv.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkMaskGamma.h"
+#include "src/core/SkMatrixProvider.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkRectPriv.h"
+#include "src/core/SkStroke.h"
+#include "src/core/SkSurfacePriv.h"
+#include "src/core/SkTextFormatParams.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/utils/SkMatrix22.h"
+#include <new>
+
+///////////////////////////////////////////////////////////////////////////////
+
+namespace {
+static inline const constexpr bool kSkShowTextBlitCoverage = false;
+static inline const constexpr bool kSkScalerContextDumpRec = false;
+}
+
+SkScalerContextRec SkScalerContext::PreprocessRec(const SkTypeface& typeface,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor& desc) {
+ SkScalerContextRec rec =
+ *static_cast<const SkScalerContextRec*>(desc.findEntry(kRec_SkDescriptorTag, nullptr));
+
+ // Allow the typeface to adjust the rec.
+ typeface.onFilterRec(&rec);
+
+ if (effects.fMaskFilter) {
+ // Pre-blend is not currently applied to filtered text.
+ // The primary filter is blur, for which contrast makes no sense,
+ // and for which the destination guess error is more visible.
+ // Also, all existing users of blur have calibrated for linear.
+ rec.ignorePreBlend();
+ }
+
+ SkColor lumColor = rec.getLuminanceColor();
+
+ if (rec.fMaskFormat == SkMask::kA8_Format) {
+ U8CPU lum = SkComputeLuminance(SkColorGetR(lumColor),
+ SkColorGetG(lumColor),
+ SkColorGetB(lumColor));
+ lumColor = SkColorSetRGB(lum, lum, lum);
+ }
+
+ // TODO: remove CanonicalColor when we to fix up Chrome layout tests.
+ rec.setLuminanceColor(lumColor);
+
+ return rec;
+}
+
+SkScalerContext::SkScalerContext(sk_sp<SkTypeface> typeface, const SkScalerContextEffects& effects,
+ const SkDescriptor* desc)
+ : fRec(PreprocessRec(*typeface, effects, *desc))
+ , fTypeface(std::move(typeface))
+ , fPathEffect(sk_ref_sp(effects.fPathEffect))
+ , fMaskFilter(sk_ref_sp(effects.fMaskFilter))
+ // Initialize based on our settings. Subclasses can also force this.
+ , fGenerateImageFromPath(fRec.fFrameWidth >= 0 || fPathEffect != nullptr)
+
+ , fPreBlend(fMaskFilter ? SkMaskGamma::PreBlend() : SkScalerContext::GetMaskPreBlend(fRec))
+{
+ if constexpr (kSkScalerContextDumpRec) {
+ SkDebugf("SkScalerContext checksum %x count %d length %d\n",
+ desc->getChecksum(), desc->getCount(), desc->getLength());
+ SkDebugf("%s", fRec.dump().c_str());
+ SkDebugf(" effects %p\n", desc->findEntry(kEffects_SkDescriptorTag, nullptr));
+ }
+}
+
+SkScalerContext::~SkScalerContext() {}
+
+/**
+ * In order to call cachedDeviceLuminance, cachedPaintLuminance, or
+ * cachedMaskGamma the caller must hold the mask_gamma_cache_mutex and continue
+ * to hold it until the returned pointer is refed or forgotten.
+ */
+static SkMutex& mask_gamma_cache_mutex() {
+ static SkMutex& mutex = *(new SkMutex);
+ return mutex;
+}
+
+static SkMaskGamma* gLinearMaskGamma = nullptr;
+static SkMaskGamma* gMaskGamma = nullptr;
+static SkScalar gContrast = SK_ScalarMin;
+static SkScalar gPaintGamma = SK_ScalarMin;
+static SkScalar gDeviceGamma = SK_ScalarMin;
+
+/**
+ * The caller must hold the mask_gamma_cache_mutex() and continue to hold it until
+ * the returned SkMaskGamma pointer is refed or forgotten.
+ */
+static const SkMaskGamma& cached_mask_gamma(SkScalar contrast, SkScalar paintGamma,
+ SkScalar deviceGamma) {
+ mask_gamma_cache_mutex().assertHeld();
+ if (0 == contrast && SK_Scalar1 == paintGamma && SK_Scalar1 == deviceGamma) {
+ if (nullptr == gLinearMaskGamma) {
+ gLinearMaskGamma = new SkMaskGamma;
+ }
+ return *gLinearMaskGamma;
+ }
+ if (gContrast != contrast || gPaintGamma != paintGamma || gDeviceGamma != deviceGamma) {
+ SkSafeUnref(gMaskGamma);
+ gMaskGamma = new SkMaskGamma(contrast, paintGamma, deviceGamma);
+ gContrast = contrast;
+ gPaintGamma = paintGamma;
+ gDeviceGamma = deviceGamma;
+ }
+ return *gMaskGamma;
+}
+
+/**
+ * Expands fDeviceGamma, fPaintGamma, fContrast, and fLumBits into a mask pre-blend.
+ */
+SkMaskGamma::PreBlend SkScalerContext::GetMaskPreBlend(const SkScalerContextRec& rec) {
+ SkAutoMutexExclusive ama(mask_gamma_cache_mutex());
+
+ const SkMaskGamma& maskGamma = cached_mask_gamma(rec.getContrast(),
+ rec.getPaintGamma(),
+ rec.getDeviceGamma());
+
+ // TODO: remove CanonicalColor when we to fix up Chrome layout tests.
+ return maskGamma.preBlend(rec.getLuminanceColor());
+}
+
+size_t SkScalerContext::GetGammaLUTSize(SkScalar contrast, SkScalar paintGamma,
+ SkScalar deviceGamma, int* width, int* height) {
+ SkAutoMutexExclusive ama(mask_gamma_cache_mutex());
+ const SkMaskGamma& maskGamma = cached_mask_gamma(contrast,
+ paintGamma,
+ deviceGamma);
+
+ maskGamma.getGammaTableDimensions(width, height);
+ size_t size = (*width)*(*height)*sizeof(uint8_t);
+
+ return size;
+}
+
+bool SkScalerContext::GetGammaLUTData(SkScalar contrast, SkScalar paintGamma, SkScalar deviceGamma,
+ uint8_t* data) {
+ SkAutoMutexExclusive ama(mask_gamma_cache_mutex());
+ const SkMaskGamma& maskGamma = cached_mask_gamma(contrast,
+ paintGamma,
+ deviceGamma);
+ const uint8_t* gammaTables = maskGamma.getGammaTables();
+ if (!gammaTables) {
+ return false;
+ }
+
+ int width, height;
+ maskGamma.getGammaTableDimensions(&width, &height);
+ size_t size = width*height * sizeof(uint8_t);
+ memcpy(data, gammaTables, size);
+ return true;
+}
+
+SkGlyph SkScalerContext::makeGlyph(SkPackedGlyphID packedID, SkArenaAlloc* alloc) {
+ return internalMakeGlyph(packedID, fRec.fMaskFormat, alloc);
+}
+
+bool SkScalerContext::GenerateMetricsFromPath(
+ SkGlyph* glyph, const SkPath& devPath, SkMask::Format format,
+ const bool verticalLCD, const bool a8FromLCD, const bool hairline)
+{
+ // Only BW, A8, and LCD16 can be produced from paths.
+ if (glyph->fMaskFormat != SkMask::kBW_Format &&
+ glyph->fMaskFormat != SkMask::kA8_Format &&
+ glyph->fMaskFormat != SkMask::kLCD16_Format)
+ {
+ glyph->fMaskFormat = SkMask::kA8_Format;
+ }
+
+ const SkRect bounds = devPath.getBounds();
+ const SkIRect ir = bounds.roundOut();
+ if (!SkRectPriv::Is16Bit(ir)) {
+ return false;
+ }
+ glyph->fLeft = ir.fLeft;
+ glyph->fTop = ir.fTop;
+ glyph->fWidth = SkToU16(ir.width());
+ glyph->fHeight = SkToU16(ir.height());
+
+ if (!ir.isEmpty()) {
+ const bool fromLCD = (glyph->fMaskFormat == SkMask::kLCD16_Format) ||
+ (glyph->fMaskFormat == SkMask::kA8_Format && a8FromLCD);
+ const bool notEmptyAndFromLCD = 0 < glyph->fWidth && fromLCD;
+
+ const bool needExtraWidth = (notEmptyAndFromLCD && !verticalLCD) || hairline;
+ const bool needExtraHeight = (notEmptyAndFromLCD && verticalLCD) || hairline;
+ if (needExtraWidth) {
+ glyph->fWidth += 2;
+ glyph->fLeft -= 1;
+ }
+ if (needExtraHeight) {
+ glyph->fHeight += 2;
+ glyph->fTop -= 1;
+ }
+ }
+ return true;
+}
+
+SkGlyph SkScalerContext::internalMakeGlyph(SkPackedGlyphID packedID, SkMask::Format format, SkArenaAlloc* alloc) {
+ auto zeroBounds = [](SkGlyph& glyph) {
+ glyph.fLeft = 0;
+ glyph.fTop = 0;
+ glyph.fWidth = 0;
+ glyph.fHeight = 0;
+ };
+ SkGlyph glyph{packedID};
+ glyph.fMaskFormat = format;
+ // Must call to allow the subclass to determine the glyph representation to use.
+ this->generateMetrics(&glyph, alloc);
+ SkDEBUGCODE(glyph.fAdvancesBoundsFormatAndInitialPathDone = true;)
+ if (fGenerateImageFromPath) {
+ this->internalGetPath(glyph, alloc);
+ const SkPath* devPath = glyph.path();
+ if (devPath) {
+ // generateMetrics may have modified the glyph fMaskFormat.
+ glyph.fMaskFormat = format;
+ const bool doVert = SkToBool(fRec.fFlags & SkScalerContext::kLCD_Vertical_Flag);
+ const bool a8LCD = SkToBool(fRec.fFlags & SkScalerContext::kGenA8FromLCD_Flag);
+ const bool hairline = glyph.pathIsHairline();
+ if (!GenerateMetricsFromPath(&glyph, *devPath, format, doVert, a8LCD, hairline)) {
+ zeroBounds(glyph);
+ return glyph;
+ }
+ }
+ }
+
+ // if either dimension is empty, zap the image bounds of the glyph
+ if (0 == glyph.fWidth || 0 == glyph.fHeight) {
+ zeroBounds(glyph);
+ return glyph;
+ }
+
+ if (fMaskFilter) {
+ SkMask src = glyph.mask(),
+ dst;
+ SkMatrix matrix;
+
+ fRec.getMatrixFrom2x2(&matrix);
+
+ src.fImage = nullptr; // only want the bounds from the filter
+ if (as_MFB(fMaskFilter)->filterMask(&dst, src, matrix, nullptr)) {
+ if (dst.fBounds.isEmpty() || !SkRectPriv::Is16Bit(dst.fBounds)) {
+ zeroBounds(glyph);
+ return glyph;
+ }
+ SkASSERT(dst.fImage == nullptr);
+ glyph.fLeft = dst.fBounds.fLeft;
+ glyph.fTop = dst.fBounds.fTop;
+ glyph.fWidth = SkToU16(dst.fBounds.width());
+ glyph.fHeight = SkToU16(dst.fBounds.height());
+ glyph.fMaskFormat = dst.fFormat;
+ }
+ }
+ return glyph;
+}
+
+static void applyLUTToA8Mask(const SkMask& mask, const uint8_t* lut) {
+ uint8_t* SK_RESTRICT dst = (uint8_t*)mask.fImage;
+ unsigned rowBytes = mask.fRowBytes;
+
+ for (int y = mask.fBounds.height() - 1; y >= 0; --y) {
+ for (int x = mask.fBounds.width() - 1; x >= 0; --x) {
+ dst[x] = lut[dst[x]];
+ }
+ dst += rowBytes;
+ }
+}
+
+static void pack4xHToMask(const SkPixmap& src, const SkMask& dst,
+ const SkMaskGamma::PreBlend& maskPreBlend,
+ const bool doBGR, const bool doVert) {
+#define SAMPLES_PER_PIXEL 4
+#define LCD_PER_PIXEL 3
+ SkASSERT(kAlpha_8_SkColorType == src.colorType());
+
+ const bool toA8 = SkMask::kA8_Format == dst.fFormat;
+ SkASSERT(SkMask::kLCD16_Format == dst.fFormat || toA8);
+
+ // doVert in this function means swap x and y when writing to dst.
+ if (doVert) {
+ SkASSERT(src.width() == (dst.fBounds.height() - 2) * 4);
+ SkASSERT(src.height() == dst.fBounds.width());
+ } else {
+ SkASSERT(src.width() == (dst.fBounds.width() - 2) * 4);
+ SkASSERT(src.height() == dst.fBounds.height());
+ }
+
+ const int sample_width = src.width();
+ const int height = src.height();
+
+ uint8_t* dstImage = dst.fImage;
+ size_t dstRB = dst.fRowBytes;
+ // An N tap FIR is defined by
+ // out[n] = coeff[0]*x[n] + coeff[1]*x[n-1] + ... + coeff[N]*x[n-N]
+ // or
+ // out[n] = sum(i, 0, N, coeff[i]*x[n-i])
+
+ // The strategy is to use one FIR (different coefficients) for each of r, g, and b.
+ // This means using every 4th FIR output value of each FIR and discarding the rest.
+ // The FIRs are aligned, and the coefficients reach 5 samples to each side of their 'center'.
+ // (For r and b this is technically incorrect, but the coeffs outside round to zero anyway.)
+
+ // These are in some fixed point repesentation.
+ // Adding up to more than one simulates ink spread.
+ // For implementation reasons, these should never add up to more than two.
+
+ // Coefficients determined by a gausian where 5 samples = 3 std deviations (0x110 'contrast').
+ // Calculated using tools/generate_fir_coeff.py
+ // With this one almost no fringing is ever seen, but it is imperceptibly blurry.
+ // The lcd smoothed text is almost imperceptibly different from gray,
+ // but is still sharper on small stems and small rounded corners than gray.
+ // This also seems to be about as wide as one can get and only have a three pixel kernel.
+ // TODO: calculate these at runtime so parameters can be adjusted (esp contrast).
+ static const unsigned int coefficients[LCD_PER_PIXEL][SAMPLES_PER_PIXEL*3] = {
+ //The red subpixel is centered inside the first sample (at 1/6 pixel), and is shifted.
+ { 0x03, 0x0b, 0x1c, 0x33, 0x40, 0x39, 0x24, 0x10, 0x05, 0x01, 0x00, 0x00, },
+ //The green subpixel is centered between two samples (at 1/2 pixel), so is symetric
+ { 0x00, 0x02, 0x08, 0x16, 0x2b, 0x3d, 0x3d, 0x2b, 0x16, 0x08, 0x02, 0x00, },
+ //The blue subpixel is centered inside the last sample (at 5/6 pixel), and is shifted.
+ { 0x00, 0x00, 0x01, 0x05, 0x10, 0x24, 0x39, 0x40, 0x33, 0x1c, 0x0b, 0x03, },
+ };
+
+ size_t dstPB = toA8 ? sizeof(uint8_t) : sizeof(uint16_t);
+ for (int y = 0; y < height; ++y) {
+ uint8_t* dstP;
+ size_t dstPDelta;
+ if (doVert) {
+ dstP = SkTAddOffset<uint8_t>(dstImage, y * dstPB);
+ dstPDelta = dstRB;
+ } else {
+ dstP = SkTAddOffset<uint8_t>(dstImage, y * dstRB);
+ dstPDelta = dstPB;
+ }
+
+ const uint8_t* srcP = src.addr8(0, y);
+
+ // TODO: this fir filter implementation is straight forward, but slow.
+ // It should be possible to make it much faster.
+ for (int sample_x = -4; sample_x < sample_width + 4; sample_x += 4) {
+ int fir[LCD_PER_PIXEL] = { 0 };
+ for (int sample_index = std::max(0, sample_x - 4), coeff_index = sample_index - (sample_x - 4)
+ ; sample_index < std::min(sample_x + 8, sample_width)
+ ; ++sample_index, ++coeff_index)
+ {
+ int sample_value = srcP[sample_index];
+ for (int subpxl_index = 0; subpxl_index < LCD_PER_PIXEL; ++subpxl_index) {
+ fir[subpxl_index] += coefficients[subpxl_index][coeff_index] * sample_value;
+ }
+ }
+ for (int subpxl_index = 0; subpxl_index < LCD_PER_PIXEL; ++subpxl_index) {
+ fir[subpxl_index] /= 0x100;
+ fir[subpxl_index] = std::min(fir[subpxl_index], 255);
+ }
+
+ U8CPU r, g, b;
+ if (doBGR) {
+ r = fir[2];
+ g = fir[1];
+ b = fir[0];
+ } else {
+ r = fir[0];
+ g = fir[1];
+ b = fir[2];
+ }
+ if constexpr (kSkShowTextBlitCoverage) {
+ r = std::max(r, 10u);
+ g = std::max(g, 10u);
+ b = std::max(b, 10u);
+ }
+ if (toA8) {
+ U8CPU a = (r + g + b) / 3;
+ if (maskPreBlend.isApplicable()) {
+ a = maskPreBlend.fG[a];
+ }
+ *dstP = a;
+ } else {
+ if (maskPreBlend.isApplicable()) {
+ r = maskPreBlend.fR[r];
+ g = maskPreBlend.fG[g];
+ b = maskPreBlend.fB[b];
+ }
+ *(uint16_t*)dstP = SkPack888ToRGB16(r, g, b);
+ }
+ dstP = SkTAddOffset<uint8_t>(dstP, dstPDelta);
+ }
+ }
+}
+
+static inline int convert_8_to_1(unsigned byte) {
+ SkASSERT(byte <= 0xFF);
+ return byte >> 7;
+}
+
+static uint8_t pack_8_to_1(const uint8_t alpha[8]) {
+ unsigned bits = 0;
+ for (int i = 0; i < 8; ++i) {
+ bits <<= 1;
+ bits |= convert_8_to_1(alpha[i]);
+ }
+ return SkToU8(bits);
+}
+
+static void packA8ToA1(const SkMask& mask, const uint8_t* src, size_t srcRB) {
+ const int height = mask.fBounds.height();
+ const int width = mask.fBounds.width();
+ const int octs = width >> 3;
+ const int leftOverBits = width & 7;
+
+ uint8_t* dst = mask.fImage;
+ const int dstPad = mask.fRowBytes - SkAlign8(width)/8;
+ SkASSERT(dstPad >= 0);
+
+ SkASSERT(width >= 0);
+ SkASSERT(srcRB >= (size_t)width);
+ const size_t srcPad = srcRB - width;
+
+ for (int y = 0; y < height; ++y) {
+ for (int i = 0; i < octs; ++i) {
+ *dst++ = pack_8_to_1(src);
+ src += 8;
+ }
+ if (leftOverBits > 0) {
+ unsigned bits = 0;
+ int shift = 7;
+ for (int i = 0; i < leftOverBits; ++i, --shift) {
+ bits |= convert_8_to_1(*src++) << shift;
+ }
+ *dst++ = bits;
+ }
+ src += srcPad;
+ dst += dstPad;
+ }
+}
+
+void SkScalerContext::GenerateImageFromPath(
+ const SkMask& mask, const SkPath& path, const SkMaskGamma::PreBlend& maskPreBlend,
+ const bool doBGR, const bool verticalLCD, const bool a8FromLCD, const bool hairline)
+{
+ SkASSERT(mask.fFormat == SkMask::kBW_Format ||
+ mask.fFormat == SkMask::kA8_Format ||
+ mask.fFormat == SkMask::kLCD16_Format);
+
+ SkPaint paint;
+ SkPath strokePath;
+ const SkPath* pathToUse = &path;
+
+ int srcW = mask.fBounds.width();
+ int srcH = mask.fBounds.height();
+ int dstW = srcW;
+ int dstH = srcH;
+
+ SkMatrix matrix;
+ matrix.setTranslate(-SkIntToScalar(mask.fBounds.fLeft),
+ -SkIntToScalar(mask.fBounds.fTop));
+
+ paint.setStroke(hairline);
+ paint.setAntiAlias(SkMask::kBW_Format != mask.fFormat);
+
+ const bool fromLCD = (mask.fFormat == SkMask::kLCD16_Format) ||
+ (mask.fFormat == SkMask::kA8_Format && a8FromLCD);
+ const bool intermediateDst = fromLCD || mask.fFormat == SkMask::kBW_Format;
+ if (fromLCD) {
+ if (verticalLCD) {
+ dstW = 4*dstH - 8;
+ dstH = srcW;
+ matrix.setAll(0, 4, -SkIntToScalar(mask.fBounds.fTop + 1) * 4,
+ 1, 0, -SkIntToScalar(mask.fBounds.fLeft),
+ 0, 0, 1);
+ } else {
+ dstW = 4*dstW - 8;
+ matrix.setAll(4, 0, -SkIntToScalar(mask.fBounds.fLeft + 1) * 4,
+ 0, 1, -SkIntToScalar(mask.fBounds.fTop),
+ 0, 0, 1);
+ }
+
+ // LCD hairline doesn't line up with the pixels, so do it the expensive way.
+ SkStrokeRec rec(SkStrokeRec::kFill_InitStyle);
+ if (hairline) {
+ rec.setStrokeStyle(1.0f, false);
+ rec.setStrokeParams(SkPaint::kButt_Cap, SkPaint::kRound_Join, 0.0f);
+ }
+ if (rec.needToApply() && rec.applyToPath(&strokePath, path)) {
+ pathToUse = &strokePath;
+ paint.setStyle(SkPaint::kFill_Style);
+ }
+ }
+
+ SkRasterClip clip;
+ clip.setRect(SkIRect::MakeWH(dstW, dstH));
+
+ const SkImageInfo info = SkImageInfo::MakeA8(dstW, dstH);
+ SkAutoPixmapStorage dst;
+
+ if (intermediateDst) {
+ if (!dst.tryAlloc(info)) {
+ // can't allocate offscreen, so empty the mask and return
+ sk_bzero(mask.fImage, mask.computeImageSize());
+ return;
+ }
+ } else {
+ dst.reset(info, mask.fImage, mask.fRowBytes);
+ }
+ sk_bzero(dst.writable_addr(), dst.computeByteSize());
+
+ SkDrawBase draw;
+ SkMatrixProvider matrixProvider(matrix);
+ draw.fBlitterChooser = SkA8Blitter_Choose;
+ draw.fDst = dst;
+ draw.fRC = &clip;
+ draw.fMatrixProvider = &matrixProvider;
+ draw.drawPath(*pathToUse, paint);
+
+ switch (mask.fFormat) {
+ case SkMask::kBW_Format:
+ packA8ToA1(mask, dst.addr8(0, 0), dst.rowBytes());
+ break;
+ case SkMask::kA8_Format:
+ if (fromLCD) {
+ pack4xHToMask(dst, mask, maskPreBlend, doBGR, verticalLCD);
+ } else if (maskPreBlend.isApplicable()) {
+ applyLUTToA8Mask(mask, maskPreBlend.fG);
+ }
+ break;
+ case SkMask::kLCD16_Format:
+ pack4xHToMask(dst, mask, maskPreBlend, doBGR, verticalLCD);
+ break;
+ default:
+ break;
+ }
+}
+
+void SkScalerContext::getImage(const SkGlyph& origGlyph) {
+ SkASSERT(origGlyph.fAdvancesBoundsFormatAndInitialPathDone);
+
+ const SkGlyph* unfilteredGlyph = &origGlyph;
+ // in case we need to call generateImage on a mask-format that is different
+ // (i.e. larger) than what our caller allocated by looking at origGlyph.
+ SkAutoMalloc tmpGlyphImageStorage;
+ SkGlyph tmpGlyph;
+ SkSTArenaAlloc<sizeof(SkGlyph::PathData)> tmpGlyphPathDataStorage;
+ if (fMaskFilter) {
+ // need the original bounds, sans our maskfilter
+ sk_sp<SkMaskFilter> mf = std::move(fMaskFilter);
+ tmpGlyph = this->makeGlyph(origGlyph.getPackedID(), &tmpGlyphPathDataStorage);
+ fMaskFilter = std::move(mf);
+
+ // Use the origGlyph storage for the temporary unfiltered mask if it will fit.
+ if (tmpGlyph.fMaskFormat == origGlyph.fMaskFormat &&
+ tmpGlyph.imageSize() <= origGlyph.imageSize())
+ {
+ tmpGlyph.fImage = origGlyph.fImage;
+ } else {
+ tmpGlyphImageStorage.reset(tmpGlyph.imageSize());
+ tmpGlyph.fImage = tmpGlyphImageStorage.get();
+ }
+ unfilteredGlyph = &tmpGlyph;
+ }
+
+ if (!fGenerateImageFromPath) {
+ generateImage(*unfilteredGlyph);
+ } else {
+ SkASSERT(origGlyph.setPathHasBeenCalled());
+ const SkPath* devPath = origGlyph.path();
+
+ if (!devPath) {
+ generateImage(*unfilteredGlyph);
+ } else {
+ SkMask mask = unfilteredGlyph->mask();
+ SkASSERT(SkMask::kARGB32_Format != origGlyph.fMaskFormat);
+ SkASSERT(SkMask::kARGB32_Format != mask.fFormat);
+ const bool doBGR = SkToBool(fRec.fFlags & SkScalerContext::kLCD_BGROrder_Flag);
+ const bool doVert = SkToBool(fRec.fFlags & SkScalerContext::kLCD_Vertical_Flag);
+ const bool a8LCD = SkToBool(fRec.fFlags & SkScalerContext::kGenA8FromLCD_Flag);
+ const bool hairline = origGlyph.pathIsHairline();
+ GenerateImageFromPath(mask, *devPath, fPreBlend, doBGR, doVert, a8LCD, hairline);
+ }
+ }
+
+ if (fMaskFilter) {
+ // k3D_Format should not be mask filtered.
+ SkASSERT(SkMask::k3D_Format != unfilteredGlyph->fMaskFormat);
+
+ SkMask filteredMask;
+ SkMask srcMask;
+ SkMatrix m;
+ fRec.getMatrixFrom2x2(&m);
+
+ if (as_MFB(fMaskFilter)->filterMask(&filteredMask, unfilteredGlyph->mask(), m, nullptr)) {
+ // Filter succeeded; filteredMask.fImage was allocated.
+ srcMask = filteredMask;
+ } else if (unfilteredGlyph->fImage == tmpGlyphImageStorage.get()) {
+ // Filter did nothing; unfiltered mask is independent of origGlyph.fImage.
+ srcMask = unfilteredGlyph->mask();
+ } else if (origGlyph.iRect() == unfilteredGlyph->iRect()) {
+ // Filter did nothing; the unfiltered mask is in origGlyph.fImage and matches.
+ return;
+ } else {
+ // Filter did nothing; the unfiltered mask is in origGlyph.fImage and conflicts.
+ srcMask = unfilteredGlyph->mask();
+ size_t imageSize = unfilteredGlyph->imageSize();
+ tmpGlyphImageStorage.reset(imageSize);
+ srcMask.fImage = static_cast<uint8_t*>(tmpGlyphImageStorage.get());
+ memcpy(srcMask.fImage, unfilteredGlyph->fImage, imageSize);
+ }
+
+ SkASSERT_RELEASE(srcMask.fFormat == origGlyph.fMaskFormat);
+ SkMask dstMask = origGlyph.mask();
+ SkIRect origBounds = dstMask.fBounds;
+
+ // Find the intersection of src and dst while updating the fImages.
+ if (srcMask.fBounds.fTop < dstMask.fBounds.fTop) {
+ int32_t topDiff = dstMask.fBounds.fTop - srcMask.fBounds.fTop;
+ srcMask.fImage += srcMask.fRowBytes * topDiff;
+ srcMask.fBounds.fTop = dstMask.fBounds.fTop;
+ }
+ if (dstMask.fBounds.fTop < srcMask.fBounds.fTop) {
+ int32_t topDiff = srcMask.fBounds.fTop - dstMask.fBounds.fTop;
+ dstMask.fImage += dstMask.fRowBytes * topDiff;
+ dstMask.fBounds.fTop = srcMask.fBounds.fTop;
+ }
+
+ if (srcMask.fBounds.fLeft < dstMask.fBounds.fLeft) {
+ int32_t leftDiff = dstMask.fBounds.fLeft - srcMask.fBounds.fLeft;
+ srcMask.fImage += leftDiff;
+ srcMask.fBounds.fLeft = dstMask.fBounds.fLeft;
+ }
+ if (dstMask.fBounds.fLeft < srcMask.fBounds.fLeft) {
+ int32_t leftDiff = srcMask.fBounds.fLeft - dstMask.fBounds.fLeft;
+ dstMask.fImage += leftDiff;
+ dstMask.fBounds.fLeft = srcMask.fBounds.fLeft;
+ }
+
+ if (srcMask.fBounds.fBottom < dstMask.fBounds.fBottom) {
+ dstMask.fBounds.fBottom = srcMask.fBounds.fBottom;
+ }
+ if (dstMask.fBounds.fBottom < srcMask.fBounds.fBottom) {
+ srcMask.fBounds.fBottom = dstMask.fBounds.fBottom;
+ }
+
+ if (srcMask.fBounds.fRight < dstMask.fBounds.fRight) {
+ dstMask.fBounds.fRight = srcMask.fBounds.fRight;
+ }
+ if (dstMask.fBounds.fRight < srcMask.fBounds.fRight) {
+ srcMask.fBounds.fRight = dstMask.fBounds.fRight;
+ }
+
+ SkASSERT(srcMask.fBounds == dstMask.fBounds);
+ int width = srcMask.fBounds.width();
+ int height = srcMask.fBounds.height();
+ int dstRB = dstMask.fRowBytes;
+ int srcRB = srcMask.fRowBytes;
+
+ const uint8_t* src = srcMask.fImage;
+ uint8_t* dst = dstMask.fImage;
+
+ if (SkMask::k3D_Format == filteredMask.fFormat) {
+ // we have to copy 3 times as much
+ height *= 3;
+ }
+
+ // If not filling the full original glyph, clear it out first.
+ if (dstMask.fBounds != origBounds) {
+ sk_bzero(origGlyph.fImage, origGlyph.fHeight * origGlyph.rowBytes());
+ }
+
+ while (--height >= 0) {
+ memcpy(dst, src, width);
+ src += srcRB;
+ dst += dstRB;
+ }
+ SkMask::FreeImage(filteredMask.fImage);
+ }
+}
+
+void SkScalerContext::getPath(SkGlyph& glyph, SkArenaAlloc* alloc) {
+ this->internalGetPath(glyph, alloc);
+}
+
+sk_sp<SkDrawable> SkScalerContext::getDrawable(SkGlyph& glyph) {
+ return this->generateDrawable(glyph);
+}
+//TODO: make pure virtual
+sk_sp<SkDrawable> SkScalerContext::generateDrawable(const SkGlyph&) {
+ return nullptr;
+}
+
+void SkScalerContext::getFontMetrics(SkFontMetrics* fm) {
+ SkASSERT(fm);
+ this->generateFontMetrics(fm);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkScalerContext::internalGetPath(SkGlyph& glyph, SkArenaAlloc* alloc) {
+ SkASSERT(glyph.fAdvancesBoundsFormatAndInitialPathDone);
+
+ if (glyph.setPathHasBeenCalled()) {
+ return;
+ }
+
+ SkPath path;
+ SkPath devPath;
+ bool hairline = false;
+
+ SkPackedGlyphID glyphID = glyph.getPackedID();
+ if (!generatePath(glyph, &path)) {
+ glyph.setPath(alloc, (SkPath*)nullptr, hairline);
+ return;
+ }
+
+ if (fRec.fFlags & SkScalerContext::kSubpixelPositioning_Flag) {
+ SkFixed dx = glyphID.getSubXFixed();
+ SkFixed dy = glyphID.getSubYFixed();
+ if (dx | dy) {
+ path.offset(SkFixedToScalar(dx), SkFixedToScalar(dy));
+ }
+ }
+
+ if (fRec.fFrameWidth < 0 && fPathEffect == nullptr) {
+ devPath.swap(path);
+ } else {
+ // need the path in user-space, with only the point-size applied
+ // so that our stroking and effects will operate the same way they
+ // would if the user had extracted the path themself, and then
+ // called drawPath
+ SkPath localPath;
+ SkMatrix matrix;
+ SkMatrix inverse;
+
+ fRec.getMatrixFrom2x2(&matrix);
+ if (!matrix.invert(&inverse)) {
+ glyph.setPath(alloc, &devPath, hairline);
+ }
+ path.transform(inverse, &localPath);
+ // now localPath is only affected by the paint settings, and not the canvas matrix
+
+ SkStrokeRec rec(SkStrokeRec::kFill_InitStyle);
+
+ if (fRec.fFrameWidth >= 0) {
+ rec.setStrokeStyle(fRec.fFrameWidth,
+ SkToBool(fRec.fFlags & kFrameAndFill_Flag));
+ // glyphs are always closed contours, so cap type is ignored,
+ // so we just pass something.
+ rec.setStrokeParams((SkPaint::Cap)fRec.fStrokeCap,
+ (SkPaint::Join)fRec.fStrokeJoin,
+ fRec.fMiterLimit);
+ }
+
+ if (fPathEffect) {
+ SkPath effectPath;
+ if (fPathEffect->filterPath(&effectPath, localPath, &rec, nullptr, matrix)) {
+ localPath.swap(effectPath);
+ }
+ }
+
+ if (rec.needToApply()) {
+ SkPath strokePath;
+ if (rec.applyToPath(&strokePath, localPath)) {
+ localPath.swap(strokePath);
+ }
+ }
+
+ // The path effect may have modified 'rec', so wait to here to check hairline status.
+ if (rec.isHairlineStyle()) {
+ hairline = true;
+ }
+
+ localPath.transform(matrix, &devPath);
+ }
+ glyph.setPath(alloc, &devPath, hairline);
+}
+
+
+void SkScalerContextRec::getMatrixFrom2x2(SkMatrix* dst) const {
+ dst->setAll(fPost2x2[0][0], fPost2x2[0][1], 0,
+ fPost2x2[1][0], fPost2x2[1][1], 0,
+ 0, 0, 1);
+}
+
+void SkScalerContextRec::getLocalMatrix(SkMatrix* m) const {
+ *m = SkFontPriv::MakeTextMatrix(fTextSize, fPreScaleX, fPreSkewX);
+}
+
+void SkScalerContextRec::getSingleMatrix(SkMatrix* m) const {
+ this->getLocalMatrix(m);
+
+ // now concat the device matrix
+ SkMatrix deviceMatrix;
+ this->getMatrixFrom2x2(&deviceMatrix);
+ m->postConcat(deviceMatrix);
+}
+
+bool SkScalerContextRec::computeMatrices(PreMatrixScale preMatrixScale, SkVector* s, SkMatrix* sA,
+ SkMatrix* GsA, SkMatrix* G_inv, SkMatrix* A_out)
+{
+ // A is the 'total' matrix.
+ SkMatrix A;
+ this->getSingleMatrix(&A);
+
+ // The caller may find the 'total' matrix useful when dealing directly with EM sizes.
+ if (A_out) {
+ *A_out = A;
+ }
+
+ // GA is the matrix A with rotation removed.
+ SkMatrix GA;
+ bool skewedOrFlipped = A.getSkewX() || A.getSkewY() || A.getScaleX() < 0 || A.getScaleY() < 0;
+ if (skewedOrFlipped) {
+ // QR by Givens rotations. G is Q^T and GA is R. G is rotational (no reflections).
+ // h is where A maps the horizontal baseline.
+ SkPoint h = SkPoint::Make(SK_Scalar1, 0);
+ A.mapPoints(&h, 1);
+
+ // G is the Givens Matrix for A (rotational matrix where GA[0][1] == 0).
+ SkMatrix G;
+ SkComputeGivensRotation(h, &G);
+
+ GA = G;
+ GA.preConcat(A);
+
+ // The 'remainingRotation' is G inverse, which is fairly simple since G is 2x2 rotational.
+ if (G_inv) {
+ G_inv->setAll(
+ G.get(SkMatrix::kMScaleX), -G.get(SkMatrix::kMSkewX), G.get(SkMatrix::kMTransX),
+ -G.get(SkMatrix::kMSkewY), G.get(SkMatrix::kMScaleY), G.get(SkMatrix::kMTransY),
+ G.get(SkMatrix::kMPersp0), G.get(SkMatrix::kMPersp1), G.get(SkMatrix::kMPersp2));
+ }
+ } else {
+ GA = A;
+ if (G_inv) {
+ G_inv->reset();
+ }
+ }
+
+ // If the 'total' matrix is singular, set the 'scale' to something finite and zero the matrices.
+ // All underlying ports have issues with zero text size, so use the matricies to zero.
+ // If one of the scale factors is less than 1/256 then an EM filling square will
+ // never affect any pixels.
+ // If there are any nonfinite numbers in the matrix, bail out and set the matrices to zero.
+ if (SkScalarAbs(GA.get(SkMatrix::kMScaleX)) <= SK_ScalarNearlyZero ||
+ SkScalarAbs(GA.get(SkMatrix::kMScaleY)) <= SK_ScalarNearlyZero ||
+ !GA.isFinite())
+ {
+ s->fX = SK_Scalar1;
+ s->fY = SK_Scalar1;
+ sA->setScale(0, 0);
+ if (GsA) {
+ GsA->setScale(0, 0);
+ }
+ if (G_inv) {
+ G_inv->reset();
+ }
+ return false;
+ }
+
+ // At this point, given GA, create s.
+ switch (preMatrixScale) {
+ case PreMatrixScale::kFull:
+ s->fX = SkScalarAbs(GA.get(SkMatrix::kMScaleX));
+ s->fY = SkScalarAbs(GA.get(SkMatrix::kMScaleY));
+ break;
+ case PreMatrixScale::kVertical: {
+ SkScalar yScale = SkScalarAbs(GA.get(SkMatrix::kMScaleY));
+ s->fX = yScale;
+ s->fY = yScale;
+ break;
+ }
+ case PreMatrixScale::kVerticalInteger: {
+ SkScalar realYScale = SkScalarAbs(GA.get(SkMatrix::kMScaleY));
+ SkScalar intYScale = SkScalarRoundToScalar(realYScale);
+ if (intYScale == 0) {
+ intYScale = SK_Scalar1;
+ }
+ s->fX = intYScale;
+ s->fY = intYScale;
+ break;
+ }
+ }
+
+ // The 'remaining' matrix sA is the total matrix A without the scale.
+ if (!skewedOrFlipped && (
+ (PreMatrixScale::kFull == preMatrixScale) ||
+ (PreMatrixScale::kVertical == preMatrixScale && A.getScaleX() == A.getScaleY())))
+ {
+ // If GA == A and kFull, sA is identity.
+ // If GA == A and kVertical and A.scaleX == A.scaleY, sA is identity.
+ sA->reset();
+ } else if (!skewedOrFlipped && PreMatrixScale::kVertical == preMatrixScale) {
+ // If GA == A and kVertical, sA.scaleY is SK_Scalar1.
+ sA->reset();
+ sA->setScaleX(A.getScaleX() / s->fY);
+ } else {
+ // TODO: like kVertical, kVerticalInteger with int scales.
+ *sA = A;
+ sA->preScale(SkScalarInvert(s->fX), SkScalarInvert(s->fY));
+ }
+
+ // The 'remainingWithoutRotation' matrix GsA is the non-rotational part of A without the scale.
+ if (GsA) {
+ *GsA = GA;
+ // G is rotational so reorders with the scale.
+ GsA->preScale(SkScalarInvert(s->fX), SkScalarInvert(s->fY));
+ }
+
+ return true;
+}
+
+SkAxisAlignment SkScalerContext::computeAxisAlignmentForHText() const {
+ return fRec.computeAxisAlignmentForHText();
+}
+
+SkAxisAlignment SkScalerContextRec::computeAxisAlignmentForHText() const {
+ // Why fPost2x2 can be used here.
+ // getSingleMatrix multiplies in getLocalMatrix, which consists of
+ // * fTextSize (a scale, which has no effect)
+ // * fPreScaleX (a scale in x, which has no effect)
+ // * fPreSkewX (has no effect, but would on vertical text alignment).
+ // In other words, making the text bigger, stretching it along the
+ // horizontal axis, or fake italicizing it does not move the baseline.
+ if (!SkToBool(fFlags & SkScalerContext::kBaselineSnap_Flag)) {
+ return SkAxisAlignment::kNone;
+ }
+
+ if (0 == fPost2x2[1][0]) {
+ // The x axis is mapped onto the x axis.
+ return SkAxisAlignment::kX;
+ }
+ if (0 == fPost2x2[0][0]) {
+ // The x axis is mapped onto the y axis.
+ return SkAxisAlignment::kY;
+ }
+ return SkAxisAlignment::kNone;
+}
+
+void SkScalerContextRec::setLuminanceColor(SkColor c) {
+ fLumBits = SkMaskGamma::CanonicalColor(
+ SkColorSetRGB(SkColorGetR(c), SkColorGetG(c), SkColorGetB(c)));
+}
+
+/*
+ * Return the scalar with only limited fractional precision. Used to consolidate matrices
+ * that vary only slightly when we create our key into the font cache, since the font scaler
+ * typically returns the same looking resuts for tiny changes in the matrix.
+ */
+static SkScalar sk_relax(SkScalar x) {
+ SkScalar n = SkScalarRoundToScalar(x * 1024);
+ return n / 1024.0f;
+}
+
+static SkMask::Format compute_mask_format(const SkFont& font) {
+ switch (font.getEdging()) {
+ case SkFont::Edging::kAlias:
+ return SkMask::kBW_Format;
+ case SkFont::Edging::kAntiAlias:
+ return SkMask::kA8_Format;
+ case SkFont::Edging::kSubpixelAntiAlias:
+ return SkMask::kLCD16_Format;
+ }
+ SkASSERT(false);
+ return SkMask::kA8_Format;
+}
+
+// Beyond this size, LCD doesn't appreciably improve quality, but it always
+// cost more RAM and draws slower, so we set a cap.
+#ifndef SK_MAX_SIZE_FOR_LCDTEXT
+ #define SK_MAX_SIZE_FOR_LCDTEXT 48
+#endif
+
+const SkScalar gMaxSize2ForLCDText = SK_MAX_SIZE_FOR_LCDTEXT * SK_MAX_SIZE_FOR_LCDTEXT;
+
+static bool too_big_for_lcd(const SkScalerContextRec& rec, bool checkPost2x2) {
+ if (checkPost2x2) {
+ SkScalar area = rec.fPost2x2[0][0] * rec.fPost2x2[1][1] -
+ rec.fPost2x2[1][0] * rec.fPost2x2[0][1];
+ area *= rec.fTextSize * rec.fTextSize;
+ return area > gMaxSize2ForLCDText;
+ } else {
+ return rec.fTextSize > SK_MAX_SIZE_FOR_LCDTEXT;
+ }
+}
+
+// The only reason this is not file static is because it needs the context of SkScalerContext to
+// access SkPaint::computeLuminanceColor.
+void SkScalerContext::MakeRecAndEffects(const SkFont& font, const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps,
+ SkScalerContextFlags scalerContextFlags,
+ const SkMatrix& deviceMatrix,
+ SkScalerContextRec* rec,
+ SkScalerContextEffects* effects) {
+ SkASSERT(!deviceMatrix.hasPerspective());
+
+ sk_bzero(rec, sizeof(SkScalerContextRec));
+
+ SkTypeface* typeface = font.getTypefaceOrDefault();
+
+ rec->fTypefaceID = typeface->uniqueID();
+ rec->fTextSize = font.getSize();
+ rec->fPreScaleX = font.getScaleX();
+ rec->fPreSkewX = font.getSkewX();
+
+ bool checkPost2x2 = false;
+
+ const SkMatrix::TypeMask mask = deviceMatrix.getType();
+ if (mask & SkMatrix::kScale_Mask) {
+ rec->fPost2x2[0][0] = sk_relax(deviceMatrix.getScaleX());
+ rec->fPost2x2[1][1] = sk_relax(deviceMatrix.getScaleY());
+ checkPost2x2 = true;
+ } else {
+ rec->fPost2x2[0][0] = rec->fPost2x2[1][1] = SK_Scalar1;
+ }
+ if (mask & SkMatrix::kAffine_Mask) {
+ rec->fPost2x2[0][1] = sk_relax(deviceMatrix.getSkewX());
+ rec->fPost2x2[1][0] = sk_relax(deviceMatrix.getSkewY());
+ checkPost2x2 = true;
+ } else {
+ rec->fPost2x2[0][1] = rec->fPost2x2[1][0] = 0;
+ }
+
+ SkPaint::Style style = paint.getStyle();
+ SkScalar strokeWidth = paint.getStrokeWidth();
+
+ unsigned flags = 0;
+
+ if (font.isEmbolden()) {
+#ifdef SK_USE_FREETYPE_EMBOLDEN
+ flags |= SkScalerContext::kEmbolden_Flag;
+#else
+ SkScalar fakeBoldScale = SkScalarInterpFunc(font.getSize(),
+ kStdFakeBoldInterpKeys,
+ kStdFakeBoldInterpValues,
+ kStdFakeBoldInterpLength);
+ SkScalar extra = font.getSize() * fakeBoldScale;
+
+ if (style == SkPaint::kFill_Style) {
+ style = SkPaint::kStrokeAndFill_Style;
+ strokeWidth = extra; // ignore paint's strokeWidth if it was "fill"
+ } else {
+ strokeWidth += extra;
+ }
+#endif
+ }
+
+ if (style != SkPaint::kFill_Style && strokeWidth >= 0) {
+ rec->fFrameWidth = strokeWidth;
+ rec->fMiterLimit = paint.getStrokeMiter();
+ rec->fStrokeJoin = SkToU8(paint.getStrokeJoin());
+ rec->fStrokeCap = SkToU8(paint.getStrokeCap());
+
+ if (style == SkPaint::kStrokeAndFill_Style) {
+ flags |= SkScalerContext::kFrameAndFill_Flag;
+ }
+ } else {
+ rec->fFrameWidth = -1;
+ rec->fMiterLimit = 0;
+ rec->fStrokeJoin = 0;
+ rec->fStrokeCap = 0;
+ }
+
+ rec->fMaskFormat = compute_mask_format(font);
+
+ if (SkMask::kLCD16_Format == rec->fMaskFormat) {
+ if (too_big_for_lcd(*rec, checkPost2x2)) {
+ rec->fMaskFormat = SkMask::kA8_Format;
+ flags |= SkScalerContext::kGenA8FromLCD_Flag;
+ } else {
+ SkPixelGeometry geometry = surfaceProps.pixelGeometry();
+
+ switch (geometry) {
+ case kUnknown_SkPixelGeometry:
+ // eeek, can't support LCD
+ rec->fMaskFormat = SkMask::kA8_Format;
+ flags |= SkScalerContext::kGenA8FromLCD_Flag;
+ break;
+ case kRGB_H_SkPixelGeometry:
+ // our default, do nothing.
+ break;
+ case kBGR_H_SkPixelGeometry:
+ flags |= SkScalerContext::kLCD_BGROrder_Flag;
+ break;
+ case kRGB_V_SkPixelGeometry:
+ flags |= SkScalerContext::kLCD_Vertical_Flag;
+ break;
+ case kBGR_V_SkPixelGeometry:
+ flags |= SkScalerContext::kLCD_Vertical_Flag;
+ flags |= SkScalerContext::kLCD_BGROrder_Flag;
+ break;
+ }
+ }
+ }
+
+ if (font.isEmbeddedBitmaps()) {
+ flags |= SkScalerContext::kEmbeddedBitmapText_Flag;
+ }
+ if (font.isSubpixel()) {
+ flags |= SkScalerContext::kSubpixelPositioning_Flag;
+ }
+ if (font.isForceAutoHinting()) {
+ flags |= SkScalerContext::kForceAutohinting_Flag;
+ }
+ if (font.isLinearMetrics()) {
+ flags |= SkScalerContext::kLinearMetrics_Flag;
+ }
+ if (font.isBaselineSnap()) {
+ flags |= SkScalerContext::kBaselineSnap_Flag;
+ }
+ if (typeface->glyphMaskNeedsCurrentColor()) {
+ flags |= SkScalerContext::kNeedsForegroundColor_Flag;
+ rec->fForegroundColor = paint.getColor();
+ }
+ rec->fFlags = SkToU16(flags);
+
+ // these modify fFlags, so do them after assigning fFlags
+ rec->setHinting(font.getHinting());
+ rec->setLuminanceColor(SkPaintPriv::ComputeLuminanceColor(paint));
+
+ // For now always set the paint gamma equal to the device gamma.
+ // The math in SkMaskGamma can handle them being different,
+ // but it requires superluminous masks when
+ // Ex : deviceGamma(x) < paintGamma(x) and x is sufficiently large.
+ rec->setDeviceGamma(SK_GAMMA_EXPONENT);
+ rec->setPaintGamma(SK_GAMMA_EXPONENT);
+
+#ifdef SK_GAMMA_CONTRAST
+ rec->setContrast(SK_GAMMA_CONTRAST);
+#else
+ // A value of 0.5 for SK_GAMMA_CONTRAST appears to be a good compromise.
+ // With lower values small text appears washed out (though correctly so).
+ // With higher values lcd fringing is worse and the smoothing effect of
+ // partial coverage is diminished.
+ rec->setContrast(0.5f);
+#endif
+
+ if (!SkToBool(scalerContextFlags & SkScalerContextFlags::kFakeGamma)) {
+ rec->ignoreGamma();
+ }
+ if (!SkToBool(scalerContextFlags & SkScalerContextFlags::kBoostContrast)) {
+ rec->setContrast(0);
+ }
+
+ new (effects) SkScalerContextEffects{paint};
+}
+
+SkDescriptor* SkScalerContext::CreateDescriptorAndEffectsUsingPaint(
+ const SkFont& font, const SkPaint& paint, const SkSurfaceProps& surfaceProps,
+ SkScalerContextFlags scalerContextFlags, const SkMatrix& deviceMatrix, SkAutoDescriptor* ad,
+ SkScalerContextEffects* effects)
+{
+ SkScalerContextRec rec;
+ MakeRecAndEffects(font, paint, surfaceProps, scalerContextFlags, deviceMatrix, &rec, effects);
+ return AutoDescriptorGivenRecAndEffects(rec, *effects, ad);
+}
+
+static size_t calculate_size_and_flatten(const SkScalerContextRec& rec,
+ const SkScalerContextEffects& effects,
+ SkBinaryWriteBuffer* effectBuffer) {
+ size_t descSize = sizeof(rec);
+ int entryCount = 1;
+
+ if (effects.fPathEffect || effects.fMaskFilter) {
+ if (effects.fPathEffect) { effectBuffer->writeFlattenable(effects.fPathEffect); }
+ if (effects.fMaskFilter) { effectBuffer->writeFlattenable(effects.fMaskFilter); }
+ entryCount += 1;
+ descSize += effectBuffer->bytesWritten();
+ }
+
+ descSize += SkDescriptor::ComputeOverhead(entryCount);
+ return descSize;
+}
+
+static void generate_descriptor(const SkScalerContextRec& rec,
+ const SkBinaryWriteBuffer& effectBuffer,
+ SkDescriptor* desc) {
+ desc->addEntry(kRec_SkDescriptorTag, sizeof(rec), &rec);
+
+ if (effectBuffer.bytesWritten() > 0) {
+ effectBuffer.writeToMemory(desc->addEntry(kEffects_SkDescriptorTag,
+ effectBuffer.bytesWritten(),
+ nullptr));
+ }
+
+ desc->computeChecksum();
+}
+
+SkDescriptor* SkScalerContext::AutoDescriptorGivenRecAndEffects(
+ const SkScalerContextRec& rec,
+ const SkScalerContextEffects& effects,
+ SkAutoDescriptor* ad)
+{
+ SkBinaryWriteBuffer buf;
+
+ ad->reset(calculate_size_and_flatten(rec, effects, &buf));
+ generate_descriptor(rec, buf, ad->getDesc());
+
+ return ad->getDesc();
+}
+
+std::unique_ptr<SkDescriptor> SkScalerContext::DescriptorGivenRecAndEffects(
+ const SkScalerContextRec& rec,
+ const SkScalerContextEffects& effects)
+{
+ SkBinaryWriteBuffer buf;
+
+ auto desc = SkDescriptor::Alloc(calculate_size_and_flatten(rec, effects, &buf));
+ generate_descriptor(rec, buf, desc.get());
+
+ return desc;
+}
+
+void SkScalerContext::DescriptorBufferGiveRec(const SkScalerContextRec& rec, void* buffer) {
+ generate_descriptor(rec, SkBinaryWriteBuffer{}, (SkDescriptor*)buffer);
+}
+
+bool SkScalerContext::CheckBufferSizeForRec(const SkScalerContextRec& rec,
+ const SkScalerContextEffects& effects,
+ size_t size) {
+ SkBinaryWriteBuffer buf;
+ return size >= calculate_size_and_flatten(rec, effects, &buf);
+}
+
+std::unique_ptr<SkScalerContext> SkScalerContext::MakeEmpty(
+ sk_sp<SkTypeface> typeface, const SkScalerContextEffects& effects,
+ const SkDescriptor* desc) {
+ class SkScalerContext_Empty : public SkScalerContext {
+ public:
+ SkScalerContext_Empty(sk_sp<SkTypeface> typeface, const SkScalerContextEffects& effects,
+ const SkDescriptor* desc)
+ : SkScalerContext(std::move(typeface), effects, desc) {}
+
+ protected:
+ bool generateAdvance(SkGlyph* glyph) override {
+ glyph->zeroMetrics();
+ return true;
+ }
+ void generateMetrics(SkGlyph* glyph, SkArenaAlloc*) override {
+ glyph->fMaskFormat = fRec.fMaskFormat;
+ glyph->zeroMetrics();
+ }
+ void generateImage(const SkGlyph& glyph) override {}
+ bool generatePath(const SkGlyph& glyph, SkPath* path) override {
+ path->reset();
+ return false;
+ }
+ void generateFontMetrics(SkFontMetrics* metrics) override {
+ if (metrics) {
+ sk_bzero(metrics, sizeof(*metrics));
+ }
+ }
+ };
+
+ return std::make_unique<SkScalerContext_Empty>(std::move(typeface), effects, desc);
+}
+
+
+
+
diff --git a/gfx/skia/skia/src/core/SkScalerContext.h b/gfx/skia/skia/src/core/SkScalerContext.h
new file mode 100644
index 0000000000..47470fe23b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScalerContext.h
@@ -0,0 +1,464 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkScalerContext_DEFINED
+#define SkScalerContext_DEFINED
+
+#include <memory>
+
+#include "include/core/SkFont.h"
+#include "include/core/SkFontTypes.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/base/SkMacros.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkMaskGamma.h"
+#include "src/core/SkSurfacePriv.h"
+#include "src/core/SkWriteBuffer.h"
+
+class SkAutoDescriptor;
+class SkDescriptor;
+class SkMaskFilter;
+class SkPathEffect;
+class SkScalerContext;
+class SkScalerContext_DW;
+
+enum class SkScalerContextFlags : uint32_t {
+ kNone = 0,
+ kFakeGamma = 1 << 0,
+ kBoostContrast = 1 << 1,
+ kFakeGammaAndBoostContrast = kFakeGamma | kBoostContrast,
+};
+SK_MAKE_BITFIELD_OPS(SkScalerContextFlags)
+
+/*
+ * To allow this to be forward-declared, it must be its own typename, rather
+ * than a nested struct inside SkScalerContext (where it started).
+ *
+ * SkScalerContextRec must be dense, and all bytes must be set to a know quantity because this
+ * structure is used to calculate a checksum.
+ */
+SK_BEGIN_REQUIRE_DENSE
+struct SkScalerContextRec {
+ SkTypefaceID fTypefaceID;
+ SkScalar fTextSize, fPreScaleX, fPreSkewX;
+ SkScalar fPost2x2[2][2];
+ SkScalar fFrameWidth, fMiterLimit;
+
+ // This will be set if to the paint's foreground color if
+ // kNeedsForegroundColor is set, which will usually be the case for COLRv0 and
+ // COLRv1 fonts.
+ uint32_t fForegroundColor{SK_ColorBLACK};
+
+private:
+ //These describe the parameters to create (uniquely identify) the pre-blend.
+ uint32_t fLumBits;
+ uint8_t fDeviceGamma; //2.6, (0.0, 4.0) gamma, 0.0 for sRGB
+ uint8_t fPaintGamma; //2.6, (0.0, 4.0) gamma, 0.0 for sRGB
+ uint8_t fContrast; //0.8+1, [0.0, 1.0] artificial contrast
+ const uint8_t fReservedAlign{0};
+
+public:
+
+ SkScalar getDeviceGamma() const {
+ return SkIntToScalar(fDeviceGamma) / (1 << 6);
+ }
+ void setDeviceGamma(SkScalar dg) {
+ SkASSERT(0 <= dg && dg < SkIntToScalar(4));
+ fDeviceGamma = SkScalarFloorToInt(dg * (1 << 6));
+ }
+
+ SkScalar getPaintGamma() const {
+ return SkIntToScalar(fPaintGamma) / (1 << 6);
+ }
+ void setPaintGamma(SkScalar pg) {
+ SkASSERT(0 <= pg && pg < SkIntToScalar(4));
+ fPaintGamma = SkScalarFloorToInt(pg * (1 << 6));
+ }
+
+ SkScalar getContrast() const {
+ sk_ignore_unused_variable(fReservedAlign);
+ return SkIntToScalar(fContrast) / ((1 << 8) - 1);
+ }
+ void setContrast(SkScalar c) {
+ SkASSERT(0 <= c && c <= SK_Scalar1);
+ fContrast = SkScalarRoundToInt(c * ((1 << 8) - 1));
+ }
+
+ /**
+ * Causes the luminance color to be ignored, and the paint and device
+ * gamma to be effectively 1.0
+ */
+ void ignoreGamma() {
+ setLuminanceColor(SK_ColorTRANSPARENT);
+ setPaintGamma(SK_Scalar1);
+ setDeviceGamma(SK_Scalar1);
+ }
+
+ /**
+ * Causes the luminance color and contrast to be ignored, and the
+ * paint and device gamma to be effectively 1.0.
+ */
+ void ignorePreBlend() {
+ ignoreGamma();
+ setContrast(0);
+ }
+
+ SkMask::Format fMaskFormat;
+
+private:
+ uint8_t fStrokeJoin : 4;
+ uint8_t fStrokeCap : 4;
+
+public:
+ uint16_t fFlags;
+
+ // Warning: when adding members note that the size of this structure
+ // must be a multiple of 4. SkDescriptor requires that its arguments be
+ // multiples of four and this structure is put in an SkDescriptor in
+ // SkPaint::MakeRecAndEffects.
+
+ SkString dump() const {
+ SkString msg;
+ msg.appendf(" Rec\n");
+ msg.appendf(" textsize %a prescale %a preskew %a post [%a %a %a %a]\n",
+ fTextSize, fPreScaleX, fPreSkewX, fPost2x2[0][0],
+ fPost2x2[0][1], fPost2x2[1][0], fPost2x2[1][1]);
+ msg.appendf(" frame %g miter %g format %d join %d cap %d flags %#hx\n",
+ fFrameWidth, fMiterLimit, fMaskFormat, fStrokeJoin, fStrokeCap, fFlags);
+ msg.appendf(" lum bits %x, device gamma %d, paint gamma %d contrast %d\n", fLumBits,
+ fDeviceGamma, fPaintGamma, fContrast);
+ msg.appendf(" foreground color %x\n", fForegroundColor);
+ return msg;
+ }
+
+ void getMatrixFrom2x2(SkMatrix*) const;
+ void getLocalMatrix(SkMatrix*) const;
+ void getSingleMatrix(SkMatrix*) const;
+
+ /** The kind of scale which will be applied by the underlying port (pre-matrix). */
+ enum class PreMatrixScale {
+ kFull, // The underlying port can apply both x and y scale.
+ kVertical, // The underlying port can only apply a y scale.
+ kVerticalInteger // The underlying port can only apply an integer y scale.
+ };
+ /**
+ * Compute useful matrices for use with sizing in underlying libraries.
+ *
+ * There are two kinds of text size, a 'requested/logical size' which is like asking for size
+ * '12' and a 'real' size which is the size after the matrix is applied. The matrices produced
+ * by this method are based on the 'real' size. This method effectively finds the total device
+ * matrix and decomposes it in various ways.
+ *
+ * The most useful decomposition is into 'scale' and 'remaining'. The 'scale' is applied first
+ * and then the 'remaining' to fully apply the total matrix. This decomposition is useful when
+ * the text size ('scale') may have meaning apart from the total matrix. This is true when
+ * hinting, and sometimes true for other properties as well.
+ *
+ * The second (optional) decomposition is of 'remaining' into a non-rotational part
+ * 'remainingWithoutRotation' and a rotational part 'remainingRotation'. The 'scale' is applied
+ * first, then 'remainingWithoutRotation', then 'remainingRotation' to fully apply the total
+ * matrix. This decomposition is helpful when only horizontal metrics can be trusted, so the
+ * 'scale' and 'remainingWithoutRotation' will be handled by the underlying library, but
+ * the final rotation 'remainingRotation' will be handled manually.
+ *
+ * The 'total' matrix is also (optionally) available. This is useful in cases where the
+ * underlying library will not be used, often when working directly with font data.
+ *
+ * The parameters 'scale' and 'remaining' are required, the other pointers may be nullptr.
+ *
+ * @param preMatrixScale the kind of scale to extract from the total matrix.
+ * @param scale the scale extracted from the total matrix (both values positive).
+ * @param remaining apply after scale to apply the total matrix.
+ * @param remainingWithoutRotation apply after scale to apply the total matrix sans rotation.
+ * @param remainingRotation apply after remainingWithoutRotation to apply the total matrix.
+ * @param total the total matrix.
+ * @return false if the matrix was singular. The output will be valid but not invertible.
+ */
+ bool computeMatrices(PreMatrixScale preMatrixScale,
+ SkVector* scale, SkMatrix* remaining,
+ SkMatrix* remainingWithoutRotation = nullptr,
+ SkMatrix* remainingRotation = nullptr,
+ SkMatrix* total = nullptr);
+
+ SkAxisAlignment computeAxisAlignmentForHText() const;
+
+ inline SkFontHinting getHinting() const;
+ inline void setHinting(SkFontHinting);
+
+ SkMask::Format getFormat() const {
+ return fMaskFormat;
+ }
+
+ SkColor getLuminanceColor() const {
+ return fLumBits;
+ }
+
+ // setLuminanceColor forces the alpha to be 0xFF because the blitter that draws the glyph
+ // will apply the alpha from the paint. Don't apply the alpha twice.
+ void setLuminanceColor(SkColor c);
+
+private:
+ // TODO: remove
+ friend class SkScalerContext;
+};
+SK_END_REQUIRE_DENSE
+
+// TODO: rename SkScalerContextEffects -> SkStrikeEffects
+struct SkScalerContextEffects {
+ SkScalerContextEffects() : fPathEffect(nullptr), fMaskFilter(nullptr) {}
+ SkScalerContextEffects(SkPathEffect* pe, SkMaskFilter* mf)
+ : fPathEffect(pe), fMaskFilter(mf) {}
+ explicit SkScalerContextEffects(const SkPaint& paint)
+ : fPathEffect(paint.getPathEffect())
+ , fMaskFilter(paint.getMaskFilter()) {}
+
+ SkPathEffect* fPathEffect;
+ SkMaskFilter* fMaskFilter;
+};
+
+//The following typedef hides from the rest of the implementation the number of
+//most significant bits to consider when creating mask gamma tables. Two bits
+//per channel was chosen as a balance between fidelity (more bits) and cache
+//sizes (fewer bits). Three bits per channel was chosen when #303942; (used by
+//the Chrome UI) turned out too green.
+typedef SkTMaskGamma<3, 3, 3> SkMaskGamma;
+
+class SkScalerContext {
+public:
+ enum Flags {
+ kFrameAndFill_Flag = 0x0001,
+ kUnused = 0x0002,
+ kEmbeddedBitmapText_Flag = 0x0004,
+ kEmbolden_Flag = 0x0008,
+ kSubpixelPositioning_Flag = 0x0010,
+ kForceAutohinting_Flag = 0x0020, // Use auto instead of bytcode hinting if hinting.
+
+ // together, these two flags resulting in a two bit value which matches
+ // up with the SkPaint::Hinting enum.
+ kHinting_Shift = 7, // to shift into the other flags above
+ kHintingBit1_Flag = 0x0080,
+ kHintingBit2_Flag = 0x0100,
+
+ // Pixel geometry information.
+ // only meaningful if fMaskFormat is kLCD16
+ kLCD_Vertical_Flag = 0x0200, // else Horizontal
+ kLCD_BGROrder_Flag = 0x0400, // else RGB order
+
+ // Generate A8 from LCD source (for GDI and CoreGraphics).
+ // only meaningful if fMaskFormat is kA8
+ kGenA8FromLCD_Flag = 0x0800, // could be 0x200 (bit meaning dependent on fMaskFormat)
+ kLinearMetrics_Flag = 0x1000,
+ kBaselineSnap_Flag = 0x2000,
+
+ kNeedsForegroundColor_Flag = 0x4000,
+
+ kLightOnDark_Flag = 0x8000, // Moz + Mac only, used to distinguish different mask dilations
+ };
+
+ // computed values
+ enum {
+ kHinting_Mask = kHintingBit1_Flag | kHintingBit2_Flag,
+ };
+
+ SkScalerContext(sk_sp<SkTypeface>, const SkScalerContextEffects&, const SkDescriptor*);
+ virtual ~SkScalerContext();
+
+ SkTypeface* getTypeface() const { return fTypeface.get(); }
+
+ SkMask::Format getMaskFormat() const {
+ return fRec.fMaskFormat;
+ }
+
+ bool isSubpixel() const {
+ return SkToBool(fRec.fFlags & kSubpixelPositioning_Flag);
+ }
+
+ bool isLinearMetrics() const {
+ return SkToBool(fRec.fFlags & kLinearMetrics_Flag);
+ }
+
+ // DEPRECATED
+ bool isVertical() const { return false; }
+
+ SkGlyph makeGlyph(SkPackedGlyphID, SkArenaAlloc*);
+ void getImage(const SkGlyph&);
+ void getPath(SkGlyph&, SkArenaAlloc*);
+ sk_sp<SkDrawable> getDrawable(SkGlyph&);
+ void getFontMetrics(SkFontMetrics*);
+
+ /** Return the size in bytes of the associated gamma lookup table
+ */
+ static size_t GetGammaLUTSize(SkScalar contrast, SkScalar paintGamma, SkScalar deviceGamma,
+ int* width, int* height);
+
+ /** Get the associated gamma lookup table. The 'data' pointer must point to pre-allocated
+ * memory, with size in bytes greater than or equal to the return value of getGammaLUTSize().
+ *
+ * If the lookup table hasn't been initialized (e.g., it's linear), this will return false.
+ */
+ static bool GetGammaLUTData(SkScalar contrast, SkScalar paintGamma, SkScalar deviceGamma,
+ uint8_t* data);
+
+ static void MakeRecAndEffects(const SkFont& font, const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps,
+ SkScalerContextFlags scalerContextFlags,
+ const SkMatrix& deviceMatrix,
+ SkScalerContextRec* rec,
+ SkScalerContextEffects* effects);
+
+ // If we are creating rec and effects from a font only, then there is no device around either.
+ static void MakeRecAndEffectsFromFont(const SkFont& font,
+ SkScalerContextRec* rec,
+ SkScalerContextEffects* effects) {
+ SkPaint paint;
+ return MakeRecAndEffects(
+ font, paint, SkSurfaceProps(),
+ SkScalerContextFlags::kNone, SkMatrix::I(), rec, effects);
+ }
+
+ static std::unique_ptr<SkScalerContext> MakeEmpty(
+ sk_sp<SkTypeface> typeface, const SkScalerContextEffects& effects,
+ const SkDescriptor* desc);
+
+ static SkDescriptor* AutoDescriptorGivenRecAndEffects(
+ const SkScalerContextRec& rec,
+ const SkScalerContextEffects& effects,
+ SkAutoDescriptor* ad);
+
+ static std::unique_ptr<SkDescriptor> DescriptorGivenRecAndEffects(
+ const SkScalerContextRec& rec,
+ const SkScalerContextEffects& effects);
+
+ static void DescriptorBufferGiveRec(const SkScalerContextRec& rec, void* buffer);
+ static bool CheckBufferSizeForRec(const SkScalerContextRec& rec,
+ const SkScalerContextEffects& effects,
+ size_t size);
+
+ static SkMaskGamma::PreBlend GetMaskPreBlend(const SkScalerContextRec& rec);
+
+ const SkScalerContextRec& getRec() const { return fRec; }
+
+ SkScalerContextEffects getEffects() const {
+ return { fPathEffect.get(), fMaskFilter.get() };
+ }
+
+ /**
+ * Return the axis (if any) that the baseline for horizontal text should land on.
+ * As an example, the identity matrix will return SkAxisAlignment::kX.
+ */
+ SkAxisAlignment computeAxisAlignmentForHText() const;
+
+ static SkDescriptor* CreateDescriptorAndEffectsUsingPaint(
+ const SkFont&, const SkPaint&, const SkSurfaceProps&,
+ SkScalerContextFlags scalerContextFlags,
+ const SkMatrix& deviceMatrix, SkAutoDescriptor* ad,
+ SkScalerContextEffects* effects);
+
+protected:
+ SkScalerContextRec fRec;
+
+ /** Generates the contents of glyph.fAdvanceX and glyph.fAdvanceY if it can do so quickly.
+ * Returns true if it could, false otherwise.
+ */
+ virtual bool generateAdvance(SkGlyph* glyph) = 0;
+
+ /** Generates the contents of glyph.fWidth, fHeight, fTop, fLeft,
+ * as well as fAdvanceX and fAdvanceY if not already set.
+ * The fMaskFormat will already be set to a requested format but may be changed.
+ */
+ virtual void generateMetrics(SkGlyph* glyph, SkArenaAlloc*) = 0;
+ static bool GenerateMetricsFromPath(
+ SkGlyph* glyph, const SkPath& path, SkMask::Format format,
+ bool verticalLCD, bool a8FromLCD, bool hairline);
+
+ /** Generates the contents of glyph.fImage.
+ * When called, glyph.fImage will be pointing to a pre-allocated,
+ * uninitialized region of memory of size glyph.imageSize().
+ * This method may not change glyph.fMaskFormat.
+ *
+ * Because glyph.imageSize() will determine the size of fImage,
+ * generateMetrics will be called before generateImage.
+ */
+ virtual void generateImage(const SkGlyph& glyph) = 0;
+ static void GenerateImageFromPath(
+ const SkMask& mask, const SkPath& path, const SkMaskGamma::PreBlend& maskPreBlend,
+ bool doBGR, bool verticalLCD, bool a8FromLCD, bool hairline);
+
+ /** Sets the passed path to the glyph outline.
+ * If this cannot be done the path is set to empty;
+ * Does not apply subpixel positioning to the path.
+ * @return false if this glyph does not have any path.
+ */
+ virtual bool SK_WARN_UNUSED_RESULT generatePath(const SkGlyph&, SkPath*) = 0;
+
+ /** Returns the drawable for the glyph (if any).
+ *
+ * The generated drawable will be lifetime scoped to the lifetime of this scaler context.
+ * This means the drawable may refer to the scaler context and associated font data.
+ *
+ * The drawable does not need to be flattenable (e.g. implement getFactory and getTypeName).
+ * Any necessary serialization will be done with newPictureSnapshot.
+ */
+ virtual sk_sp<SkDrawable> generateDrawable(const SkGlyph&); // TODO: = 0
+
+ /** Retrieves font metrics. */
+ virtual void generateFontMetrics(SkFontMetrics*) = 0;
+
+ void forceGenerateImageFromPath() { fGenerateImageFromPath = true; }
+ void forceOffGenerateImageFromPath() { fGenerateImageFromPath = false; }
+
+private:
+ friend class PathText; // For debug purposes
+ friend class PathTextBench; // For debug purposes
+ friend class RandomScalerContext; // For debug purposes
+
+ static SkScalerContextRec PreprocessRec(const SkTypeface&,
+ const SkScalerContextEffects&,
+ const SkDescriptor&);
+
+ // never null
+ sk_sp<SkTypeface> fTypeface;
+
+ // optional objects, which may be null
+ sk_sp<SkPathEffect> fPathEffect;
+ sk_sp<SkMaskFilter> fMaskFilter;
+
+ // if this is set, we draw the image from a path, rather than
+ // calling generateImage.
+ bool fGenerateImageFromPath;
+
+ void internalGetPath(SkGlyph&, SkArenaAlloc*);
+ SkGlyph internalMakeGlyph(SkPackedGlyphID, SkMask::Format, SkArenaAlloc*);
+
+protected:
+ // SkMaskGamma::PreBlend converts linear masks to gamma correcting masks.
+ // Visible to subclasses so that generateImage can apply the pre-blend directly.
+ const SkMaskGamma::PreBlend fPreBlend;
+};
+
+#define kRec_SkDescriptorTag SkSetFourByteTag('s', 'r', 'e', 'c')
+#define kEffects_SkDescriptorTag SkSetFourByteTag('e', 'f', 'c', 't')
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkFontHinting SkScalerContextRec::getHinting() const {
+ unsigned hint = (fFlags & SkScalerContext::kHinting_Mask) >>
+ SkScalerContext::kHinting_Shift;
+ return static_cast<SkFontHinting>(hint);
+}
+
+void SkScalerContextRec::setHinting(SkFontHinting hinting) {
+ fFlags = (fFlags & ~SkScalerContext::kHinting_Mask) |
+ (static_cast<unsigned>(hinting) << SkScalerContext::kHinting_Shift);
+}
+
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkScan.cpp b/gfx/skia/skia/src/core/SkScan.cpp
new file mode 100644
index 0000000000..b93f5f4f07
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScan.cpp
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "src/core/SkBlitter.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkScan.h"
+
+std::atomic<bool> gSkUseAnalyticAA{true};
+std::atomic<bool> gSkForceAnalyticAA{false};
+
+static inline void blitrect(SkBlitter* blitter, const SkIRect& r) {
+ blitter->blitRect(r.fLeft, r.fTop, r.width(), r.height());
+}
+
+void SkScan::FillIRect(const SkIRect& r, const SkRegion* clip,
+ SkBlitter* blitter) {
+ if (!r.isEmpty()) {
+ if (clip) {
+ if (clip->isRect()) {
+ const SkIRect& clipBounds = clip->getBounds();
+
+ if (clipBounds.contains(r)) {
+ blitrect(blitter, r);
+ } else {
+ SkIRect rr = r;
+ if (rr.intersect(clipBounds)) {
+ blitrect(blitter, rr);
+ }
+ }
+ } else {
+ SkRegion::Cliperator cliper(*clip, r);
+ const SkIRect& rr = cliper.rect();
+
+ while (!cliper.done()) {
+ blitrect(blitter, rr);
+ cliper.next();
+ }
+ }
+ } else {
+ blitrect(blitter, r);
+ }
+ }
+}
+
+void SkScan::FillXRect(const SkXRect& xr, const SkRegion* clip,
+ SkBlitter* blitter) {
+ SkIRect r;
+
+ XRect_round(xr, &r);
+ SkScan::FillIRect(r, clip, blitter);
+}
+
+void SkScan::FillRect(const SkRect& r, const SkRegion* clip,
+ SkBlitter* blitter) {
+ SkIRect ir;
+
+ r.round(&ir);
+ SkScan::FillIRect(ir, clip, blitter);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkScan::FillIRect(const SkIRect& r, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isEmpty() || r.isEmpty()) {
+ return;
+ }
+
+ if (clip.isBW()) {
+ FillIRect(r, &clip.bwRgn(), blitter);
+ return;
+ }
+
+ SkAAClipBlitterWrapper wrapper(clip, blitter);
+ FillIRect(r, &wrapper.getRgn(), wrapper.getBlitter());
+}
+
+void SkScan::FillXRect(const SkXRect& xr, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isEmpty() || xr.isEmpty()) {
+ return;
+ }
+
+ if (clip.isBW()) {
+ FillXRect(xr, &clip.bwRgn(), blitter);
+ return;
+ }
+
+ SkAAClipBlitterWrapper wrapper(clip, blitter);
+ FillXRect(xr, &wrapper.getRgn(), wrapper.getBlitter());
+}
+
+void SkScan::FillRect(const SkRect& r, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isEmpty() || r.isEmpty()) {
+ return;
+ }
+
+ if (clip.isBW()) {
+ FillRect(r, &clip.bwRgn(), blitter);
+ return;
+ }
+
+ SkAAClipBlitterWrapper wrapper(clip, blitter);
+ FillRect(r, &wrapper.getRgn(), wrapper.getBlitter());
+}
diff --git a/gfx/skia/skia/src/core/SkScan.h b/gfx/skia/skia/src/core/SkScan.h
new file mode 100644
index 0000000000..635c6452b9
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScan.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkScan_DEFINED
+#define SkScan_DEFINED
+
+#include "include/core/SkRect.h"
+#include "include/private/base/SkFixed.h"
+#include <atomic>
+
+class SkRasterClip;
+class SkRegion;
+class SkBlitter;
+class SkPath;
+
+/** Defines a fixed-point rectangle, identical to the integer SkIRect, but its
+ coordinates are treated as SkFixed rather than int32_t.
+*/
+typedef SkIRect SkXRect;
+
+extern std::atomic<bool> gSkUseAnalyticAA;
+extern std::atomic<bool> gSkForceAnalyticAA;
+
+class AdditiveBlitter;
+
+class SkScan {
+public:
+ /*
+ * Draws count-1 line segments, one at a time:
+ * line(pts[0], pts[1])
+ * line(pts[1], pts[2])
+ * line(......, pts[count - 1])
+ */
+ typedef void (*HairRgnProc)(const SkPoint[], int count, const SkRegion*, SkBlitter*);
+ typedef void (*HairRCProc)(const SkPoint[], int count, const SkRasterClip&, SkBlitter*);
+
+ static void FillPath(const SkPath&, const SkIRect&, SkBlitter*);
+
+ // Paths of a certain size cannot be anti-aliased unless externally tiled (handled by SkDraw).
+ // SkBitmapDevice automatically tiles, SkAAClip does not so SkRasterClipStack converts AA clips
+ // to BW clips if that's the case. SkRegion uses this to know when to tile and union smaller
+ // SkRegions together.
+ static bool PathRequiresTiling(const SkIRect& bounds);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // rasterclip
+
+ static void FillIRect(const SkIRect&, const SkRasterClip&, SkBlitter*);
+ static void FillXRect(const SkXRect&, const SkRasterClip&, SkBlitter*);
+ static void FillRect(const SkRect&, const SkRasterClip&, SkBlitter*);
+ static void AntiFillRect(const SkRect&, const SkRasterClip&, SkBlitter*);
+ static void AntiFillXRect(const SkXRect&, const SkRasterClip&, SkBlitter*);
+ static void FillPath(const SkPath&, const SkRasterClip&, SkBlitter*);
+ static void AntiFillPath(const SkPath&, const SkRasterClip&, SkBlitter*);
+ static void FrameRect(const SkRect&, const SkPoint& strokeSize,
+ const SkRasterClip&, SkBlitter*);
+ static void AntiFrameRect(const SkRect&, const SkPoint& strokeSize,
+ const SkRasterClip&, SkBlitter*);
+ static void FillTriangle(const SkPoint pts[], const SkRasterClip&, SkBlitter*);
+ static void HairLine(const SkPoint[], int count, const SkRasterClip&, SkBlitter*);
+ static void AntiHairLine(const SkPoint[], int count, const SkRasterClip&, SkBlitter*);
+ static void HairRect(const SkRect&, const SkRasterClip&, SkBlitter*);
+ static void AntiHairRect(const SkRect&, const SkRasterClip&, SkBlitter*);
+ static void HairPath(const SkPath&, const SkRasterClip&, SkBlitter*);
+ static void AntiHairPath(const SkPath&, const SkRasterClip&, SkBlitter*);
+ static void HairSquarePath(const SkPath&, const SkRasterClip&, SkBlitter*);
+ static void AntiHairSquarePath(const SkPath&, const SkRasterClip&, SkBlitter*);
+ static void HairRoundPath(const SkPath&, const SkRasterClip&, SkBlitter*);
+ static void AntiHairRoundPath(const SkPath&, const SkRasterClip&, SkBlitter*);
+
+ // Needed by SkRegion::setPath
+ static void FillPath(const SkPath&, const SkRegion& clip, SkBlitter*);
+
+private:
+ friend class SkAAClip;
+ friend class SkRegion;
+
+ static void FillIRect(const SkIRect&, const SkRegion* clip, SkBlitter*);
+ static void FillXRect(const SkXRect&, const SkRegion* clip, SkBlitter*);
+ static void FillRect(const SkRect&, const SkRegion* clip, SkBlitter*);
+ static void AntiFillRect(const SkRect&, const SkRegion* clip, SkBlitter*);
+ static void AntiFillXRect(const SkXRect&, const SkRegion*, SkBlitter*);
+ static void AntiFillPath(const SkPath&, const SkRegion& clip, SkBlitter*, bool forceRLE);
+ static void FillTriangle(const SkPoint pts[], const SkRegion*, SkBlitter*);
+
+ static void AntiFrameRect(const SkRect&, const SkPoint& strokeSize,
+ const SkRegion*, SkBlitter*);
+ static void HairLineRgn(const SkPoint[], int count, const SkRegion*, SkBlitter*);
+ static void AntiHairLineRgn(const SkPoint[], int count, const SkRegion*, SkBlitter*);
+ static void AAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& pathIR,
+ const SkIRect& clipBounds, bool forceRLE);
+ static void SAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& pathIR,
+ const SkIRect& clipBounds, bool forceRLE);
+};
+
+/** Assign an SkXRect from a SkIRect, by promoting the src rect's coordinates
+ from int to SkFixed. Does not check for overflow if the src coordinates
+ exceed 32K
+*/
+static inline void XRect_set(SkXRect* xr, const SkIRect& src) {
+ xr->fLeft = SkIntToFixed(src.fLeft);
+ xr->fTop = SkIntToFixed(src.fTop);
+ xr->fRight = SkIntToFixed(src.fRight);
+ xr->fBottom = SkIntToFixed(src.fBottom);
+}
+
+/** Assign an SkXRect from a SkRect, by promoting the src rect's coordinates
+ from SkScalar to SkFixed. Does not check for overflow if the src coordinates
+ exceed 32K
+*/
+static inline void XRect_set(SkXRect* xr, const SkRect& src) {
+ xr->fLeft = SkScalarToFixed(src.fLeft);
+ xr->fTop = SkScalarToFixed(src.fTop);
+ xr->fRight = SkScalarToFixed(src.fRight);
+ xr->fBottom = SkScalarToFixed(src.fBottom);
+}
+
+/** Round the SkXRect coordinates, and store the result in the SkIRect.
+*/
+static inline void XRect_round(const SkXRect& xr, SkIRect* dst) {
+ dst->fLeft = SkFixedRoundToInt(xr.fLeft);
+ dst->fTop = SkFixedRoundToInt(xr.fTop);
+ dst->fRight = SkFixedRoundToInt(xr.fRight);
+ dst->fBottom = SkFixedRoundToInt(xr.fBottom);
+}
+
+/** Round the SkXRect coordinates out (i.e. use floor for left/top, and ceiling
+ for right/bottom), and store the result in the SkIRect.
+*/
+static inline void XRect_roundOut(const SkXRect& xr, SkIRect* dst) {
+ dst->fLeft = SkFixedFloorToInt(xr.fLeft);
+ dst->fTop = SkFixedFloorToInt(xr.fTop);
+ dst->fRight = SkFixedCeilToInt(xr.fRight);
+ dst->fBottom = SkFixedCeilToInt(xr.fBottom);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkScanPriv.h b/gfx/skia/skia/src/core/SkScanPriv.h
new file mode 100644
index 0000000000..929dd30afe
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScanPriv.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkScanPriv_DEFINED
+#define SkScanPriv_DEFINED
+
+#include "include/core/SkPath.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkScan.h"
+
+// controls how much we super-sample (when we use that scan convertion)
+#define SK_SUPERSAMPLE_SHIFT 2
+
+class SkScanClipper {
+public:
+ SkScanClipper(SkBlitter* blitter, const SkRegion* clip, const SkIRect& bounds,
+ bool skipRejectTest = false, bool boundsPreClipped = false);
+
+ SkBlitter* getBlitter() const { return fBlitter; }
+ const SkIRect* getClipRect() const { return fClipRect; }
+
+private:
+ SkRectClipBlitter fRectBlitter;
+ SkRgnClipBlitter fRgnBlitter;
+#ifdef SK_DEBUG
+ SkRectClipCheckBlitter fRectClipCheckBlitter;
+#endif
+ SkBlitter* fBlitter;
+ const SkIRect* fClipRect;
+};
+
+void sk_fill_path(const SkPath& path, const SkIRect& clipRect,
+ SkBlitter* blitter, int start_y, int stop_y, int shiftEdgesUp,
+ bool pathContainedInClip);
+
+// blit the rects above and below avoid, clipped to clip
+void sk_blit_above(SkBlitter*, const SkIRect& avoid, const SkRegion& clip);
+void sk_blit_below(SkBlitter*, const SkIRect& avoid, const SkRegion& clip);
+
+template<class EdgeType>
+static inline void remove_edge(EdgeType* edge) {
+ edge->fPrev->fNext = edge->fNext;
+ edge->fNext->fPrev = edge->fPrev;
+}
+
+template<class EdgeType>
+static inline void insert_edge_after(EdgeType* edge, EdgeType* afterMe) {
+ edge->fPrev = afterMe;
+ edge->fNext = afterMe->fNext;
+ afterMe->fNext->fPrev = edge;
+ afterMe->fNext = edge;
+}
+
+template<class EdgeType>
+void backward_insert_edge_based_on_x(EdgeType* edge) {
+ SkFixed x = edge->fX;
+ EdgeType* prev = edge->fPrev;
+ while (prev->fPrev && prev->fX > x) {
+ prev = prev->fPrev;
+ }
+ if (prev->fNext != edge) {
+ remove_edge(edge);
+ insert_edge_after(edge, prev);
+ }
+}
+
+// Start from the right side, searching backwards for the point to begin the new edge list
+// insertion, marching forwards from here. The implementation could have started from the left
+// of the prior insertion, and search to the right, or with some additional caching, binary
+// search the starting point. More work could be done to determine optimal new edge insertion.
+template<class EdgeType>
+EdgeType* backward_insert_start(EdgeType* prev, SkFixed x) {
+ while (prev->fPrev && prev->fX > x) {
+ prev = prev->fPrev;
+ }
+ return prev;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkScan_AAAPath.cpp b/gfx/skia/skia/src/core/SkScan_AAAPath.cpp
new file mode 100644
index 0000000000..0e7ff978a3
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScan_AAAPath.cpp
@@ -0,0 +1,2033 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPath.h"
+#include "include/core/SkRegion.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkAutoMalloc.h"
+#include "src/base/SkTSort.h"
+#include "src/core/SkAnalyticEdge.h"
+#include "src/core/SkAntiRun.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkEdge.h"
+#include "src/core/SkEdgeBuilder.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkQuadClipper.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkScan.h"
+#include "src/core/SkScanPriv.h"
+
+#include <utility>
+
+#if defined(SK_DISABLE_AAA)
+void SkScan::AAAFillPath(const SkPath&, SkBlitter*, const SkIRect&, const SkIRect&, bool) {
+ SkDEBUGFAIL("AAA Disabled");
+ return;
+}
+#else
+
+/*
+
+The following is a high-level overview of our analytic anti-aliasing
+algorithm. We consider a path as a collection of line segments, as
+quadratic/cubic curves are converted to small line segments. Without loss of
+generality, let's assume that the draw region is [0, W] x [0, H].
+
+Our algorithm is based on horizontal scan lines (y = c_i) as the previous
+sampling-based algorithm did. However, our algorithm uses non-equal-spaced
+scan lines, while the previous method always uses equal-spaced scan lines,
+such as (y = 1/2 + 0, 1/2 + 1, 1/2 + 2, ...) in the previous non-AA algorithm,
+and (y = 1/8 + 1/4, 1/8 + 2/4, 1/8 + 3/4, ...) in the previous
+16-supersampling AA algorithm.
+
+Our algorithm contains scan lines y = c_i for c_i that is either:
+
+1. an integer between [0, H]
+
+2. the y value of a line segment endpoint
+
+3. the y value of an intersection of two line segments
+
+For two consecutive scan lines y = c_i, y = c_{i+1}, we analytically computes
+the coverage of this horizontal strip of our path on each pixel. This can be
+done very efficiently because the strip of our path now only consists of
+trapezoids whose top and bottom edges are y = c_i, y = c_{i+1} (this includes
+rectangles and triangles as special cases).
+
+We now describe how the coverage of single pixel is computed against such a
+trapezoid. That coverage is essentially the intersection area of a rectangle
+(e.g., [0, 1] x [c_i, c_{i+1}]) and our trapezoid. However, that intersection
+could be complicated, as shown in the example region A below:
+
++-----------\----+
+| \ C|
+| \ |
+\ \ |
+|\ A \|
+| \ \
+| \ |
+| B \ |
++----\-----------+
+
+However, we don't have to compute the area of A directly. Instead, we can
+compute the excluded area, which are B and C, quite easily, because they're
+just triangles. In fact, we can prove that an excluded region (take B as an
+example) is either itself a simple trapezoid (including rectangles, triangles,
+and empty regions), or its opposite (the opposite of B is A + C) is a simple
+trapezoid. In any case, we can compute its area efficiently.
+
+In summary, our algorithm has a higher quality because it generates ground-
+truth coverages analytically. It is also faster because it has much fewer
+unnessasary horizontal scan lines. For example, given a triangle path, the
+number of scan lines in our algorithm is only about 3 + H while the
+16-supersampling algorithm has about 4H scan lines.
+
+*/
+
+static void add_alpha(SkAlpha* alpha, SkAlpha delta) {
+ SkASSERT(*alpha + delta <= 256);
+ *alpha = SkAlphaRuns::CatchOverflow(*alpha + delta);
+}
+
+static void safely_add_alpha(SkAlpha* alpha, SkAlpha delta) {
+ *alpha = std::min(0xFF, *alpha + delta);
+}
+
+class AdditiveBlitter : public SkBlitter {
+public:
+ ~AdditiveBlitter() override {}
+
+ virtual SkBlitter* getRealBlitter(bool forceRealBlitter = false) = 0;
+
+ virtual void blitAntiH(int x, int y, const SkAlpha antialias[], int len) = 0;
+ virtual void blitAntiH(int x, int y, const SkAlpha alpha) = 0;
+ virtual void blitAntiH(int x, int y, int width, const SkAlpha alpha) = 0;
+
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override {
+ SkDEBUGFAIL("Please call real blitter's blitAntiH instead.");
+ }
+
+ void blitV(int x, int y, int height, SkAlpha alpha) override {
+ SkDEBUGFAIL("Please call real blitter's blitV instead.");
+ }
+
+ void blitH(int x, int y, int width) override {
+ SkDEBUGFAIL("Please call real blitter's blitH instead.");
+ }
+
+ void blitRect(int x, int y, int width, int height) override {
+ SkDEBUGFAIL("Please call real blitter's blitRect instead.");
+ }
+
+ void blitAntiRect(int x, int y, int width, int height, SkAlpha leftAlpha, SkAlpha rightAlpha)
+ override {
+ SkDEBUGFAIL("Please call real blitter's blitAntiRect instead.");
+ }
+
+ virtual int getWidth() = 0;
+
+ // Flush the additive alpha cache if floor(y) and floor(nextY) is different
+ // (i.e., we'll start working on a new pixel row).
+ virtual void flush_if_y_changed(SkFixed y, SkFixed nextY) = 0;
+};
+
+// We need this mask blitter because it significantly accelerates small path filling.
+class MaskAdditiveBlitter : public AdditiveBlitter {
+public:
+ MaskAdditiveBlitter(SkBlitter* realBlitter,
+ const SkIRect& ir,
+ const SkIRect& clipBounds,
+ bool isInverse);
+ ~MaskAdditiveBlitter() override { fRealBlitter->blitMask(fMask, fClipRect); }
+
+ // Most of the time, we still consider this mask blitter as the real blitter
+ // so we can accelerate blitRect and others. But sometimes we want to return
+ // the absolute real blitter (e.g., when we fall back to the old code path).
+ SkBlitter* getRealBlitter(bool forceRealBlitter) override {
+ return forceRealBlitter ? fRealBlitter : this;
+ }
+
+ // Virtual function is slow. So don't use this. Directly add alpha to the mask instead.
+ void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
+
+ // Allowing following methods are used to blit rectangles during aaa_walk_convex_edges
+ // Since there aren't many rectangles, we can still bear the slow speed of virtual functions.
+ void blitAntiH(int x, int y, const SkAlpha alpha) override;
+ void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitRect(int x, int y, int width, int height) override;
+ void blitAntiRect(int x, int y, int width, int height, SkAlpha leftAlpha, SkAlpha rightAlpha)
+ override;
+
+ // The flush is only needed for RLE (RunBasedAdditiveBlitter)
+ void flush_if_y_changed(SkFixed y, SkFixed nextY) override {}
+
+ int getWidth() override { return fClipRect.width(); }
+
+ static bool CanHandleRect(const SkIRect& bounds) {
+ int width = bounds.width();
+ if (width > MaskAdditiveBlitter::kMAX_WIDTH) {
+ return false;
+ }
+ int64_t rb = SkAlign4(width);
+ // use 64bits to detect overflow
+ int64_t storage = rb * bounds.height();
+
+ return (width <= MaskAdditiveBlitter::kMAX_WIDTH) &&
+ (storage <= MaskAdditiveBlitter::kMAX_STORAGE);
+ }
+
+ // Return a pointer where pointer[x] corresonds to the alpha of (x, y)
+ uint8_t* getRow(int y) {
+ if (y != fY) {
+ fY = y;
+ fRow = fMask.fImage + (y - fMask.fBounds.fTop) * fMask.fRowBytes - fMask.fBounds.fLeft;
+ }
+ return fRow;
+ }
+
+private:
+ // so we don't try to do very wide things, where the RLE blitter would be faster
+ static const int kMAX_WIDTH = 32;
+ static const int kMAX_STORAGE = 1024;
+
+ SkBlitter* fRealBlitter;
+ SkMask fMask;
+ SkIRect fClipRect;
+ // we add 2 because we can write 1 extra byte at either end due to precision error
+ uint32_t fStorage[(kMAX_STORAGE >> 2) + 2];
+
+ uint8_t* fRow;
+ int fY;
+};
+
+MaskAdditiveBlitter::MaskAdditiveBlitter(SkBlitter* realBlitter,
+ const SkIRect& ir,
+ const SkIRect& clipBounds,
+ bool isInverse) {
+ SkASSERT(CanHandleRect(ir));
+ SkASSERT(!isInverse);
+
+ fRealBlitter = realBlitter;
+
+ fMask.fImage = (uint8_t*)fStorage + 1; // There's 1 extra byte at either end of fStorage
+ fMask.fBounds = ir;
+ fMask.fRowBytes = ir.width();
+ fMask.fFormat = SkMask::kA8_Format;
+
+ fY = ir.fTop - 1;
+ fRow = nullptr;
+
+ fClipRect = ir;
+ if (!fClipRect.intersect(clipBounds)) {
+ SkASSERT(0);
+ fClipRect.setEmpty();
+ }
+
+ memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 2);
+}
+
+void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
+ SK_ABORT("Don't use this; directly add alphas to the mask.");
+}
+
+void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
+ SkASSERT(x >= fMask.fBounds.fLeft - 1);
+ add_alpha(&this->getRow(y)[x], alpha);
+}
+
+void MaskAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
+ SkASSERT(x >= fMask.fBounds.fLeft - 1);
+ uint8_t* row = this->getRow(y);
+ for (int i = 0; i < width; ++i) {
+ add_alpha(&row[x + i], alpha);
+ }
+}
+
+void MaskAdditiveBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
+ if (alpha == 0) {
+ return;
+ }
+ SkASSERT(x >= fMask.fBounds.fLeft - 1);
+ // This must be called as if this is a real blitter.
+ // So we directly set alpha rather than adding it.
+ uint8_t* row = this->getRow(y);
+ for (int i = 0; i < height; ++i) {
+ row[x] = alpha;
+ row += fMask.fRowBytes;
+ }
+}
+
+void MaskAdditiveBlitter::blitRect(int x, int y, int width, int height) {
+ SkASSERT(x >= fMask.fBounds.fLeft - 1);
+ // This must be called as if this is a real blitter.
+ // So we directly set alpha rather than adding it.
+ uint8_t* row = this->getRow(y);
+ for (int i = 0; i < height; ++i) {
+ memset(row + x, 0xFF, width);
+ row += fMask.fRowBytes;
+ }
+}
+
+void MaskAdditiveBlitter::blitAntiRect(int x,
+ int y,
+ int width,
+ int height,
+ SkAlpha leftAlpha,
+ SkAlpha rightAlpha) {
+ blitV(x, y, height, leftAlpha);
+ blitV(x + 1 + width, y, height, rightAlpha);
+ blitRect(x + 1, y, width, height);
+}
+
+class RunBasedAdditiveBlitter : public AdditiveBlitter {
+public:
+ RunBasedAdditiveBlitter(SkBlitter* realBlitter,
+ const SkIRect& ir,
+ const SkIRect& clipBounds,
+ bool isInverse);
+
+ ~RunBasedAdditiveBlitter() override { this->flush(); }
+
+ SkBlitter* getRealBlitter(bool forceRealBlitter) override { return fRealBlitter; }
+
+ void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
+ void blitAntiH(int x, int y, const SkAlpha alpha) override;
+ void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
+
+ int getWidth() override { return fWidth; }
+
+ void flush_if_y_changed(SkFixed y, SkFixed nextY) override {
+ if (SkFixedFloorToInt(y) != SkFixedFloorToInt(nextY)) {
+ this->flush();
+ }
+ }
+
+protected:
+ SkBlitter* fRealBlitter;
+
+ int fCurrY; // Current y coordinate.
+ int fWidth; // Widest row of region to be blitted
+ int fLeft; // Leftmost x coordinate in any row
+ int fTop; // Initial y coordinate (top of bounds)
+
+ // The next three variables are used to track a circular buffer that
+ // contains the values used in SkAlphaRuns. These variables should only
+ // ever be updated in advanceRuns(), and fRuns should always point to
+ // a valid SkAlphaRuns...
+ int fRunsToBuffer;
+ void* fRunsBuffer;
+ int fCurrentRun;
+ SkAlphaRuns fRuns;
+
+ int fOffsetX;
+
+ bool check(int x, int width) const { return x >= 0 && x + width <= fWidth; }
+
+ // extra one to store the zero at the end
+ int getRunsSz() const { return (fWidth + 1 + (fWidth + 2) / 2) * sizeof(int16_t); }
+
+ // This function updates the fRuns variable to point to the next buffer space
+ // with adequate storage for a SkAlphaRuns. It mostly just advances fCurrentRun
+ // and resets fRuns to point to an empty scanline.
+ void advanceRuns() {
+ const size_t kRunsSz = this->getRunsSz();
+ fCurrentRun = (fCurrentRun + 1) % fRunsToBuffer;
+ fRuns.fRuns = reinterpret_cast<int16_t*>(reinterpret_cast<uint8_t*>(fRunsBuffer) +
+ fCurrentRun * kRunsSz);
+ fRuns.fAlpha = reinterpret_cast<SkAlpha*>(fRuns.fRuns + fWidth + 1);
+ fRuns.reset(fWidth);
+ }
+
+ // Blitting 0xFF and 0 is much faster so we snap alphas close to them
+ SkAlpha snapAlpha(SkAlpha alpha) { return alpha > 247 ? 0xFF : alpha < 8 ? 0x00 : alpha; }
+
+ void flush() {
+ if (fCurrY >= fTop) {
+ SkASSERT(fCurrentRun < fRunsToBuffer);
+ for (int x = 0; fRuns.fRuns[x]; x += fRuns.fRuns[x]) {
+ // It seems that blitting 255 or 0 is much faster than blitting 254 or 1
+ fRuns.fAlpha[x] = snapAlpha(fRuns.fAlpha[x]);
+ }
+ if (!fRuns.empty()) {
+ // SkDEBUGCODE(fRuns.dump();)
+ fRealBlitter->blitAntiH(fLeft, fCurrY, fRuns.fAlpha, fRuns.fRuns);
+ this->advanceRuns();
+ fOffsetX = 0;
+ }
+ fCurrY = fTop - 1;
+ }
+ }
+
+ void checkY(int y) {
+ if (y != fCurrY) {
+ this->flush();
+ fCurrY = y;
+ }
+ }
+};
+
+RunBasedAdditiveBlitter::RunBasedAdditiveBlitter(SkBlitter* realBlitter,
+ const SkIRect& ir,
+ const SkIRect& clipBounds,
+ bool isInverse) {
+ fRealBlitter = realBlitter;
+
+ SkIRect sectBounds;
+ if (isInverse) {
+ // We use the clip bounds instead of the ir, since we may be asked to
+ // draw outside of the rect when we're a inverse filltype
+ sectBounds = clipBounds;
+ } else {
+ if (!sectBounds.intersect(ir, clipBounds)) {
+ sectBounds.setEmpty();
+ }
+ }
+
+ const int left = sectBounds.left();
+ const int right = sectBounds.right();
+
+ fLeft = left;
+ fWidth = right - left;
+ fTop = sectBounds.top();
+ fCurrY = fTop - 1;
+
+ fRunsToBuffer = realBlitter->requestRowsPreserved();
+ fRunsBuffer = realBlitter->allocBlitMemory(fRunsToBuffer * this->getRunsSz());
+ fCurrentRun = -1;
+
+ this->advanceRuns();
+
+ fOffsetX = 0;
+}
+
+void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
+ checkY(y);
+ x -= fLeft;
+
+ if (x < 0) {
+ len += x;
+ antialias -= x;
+ x = 0;
+ }
+ len = std::min(len, fWidth - x);
+ SkASSERT(check(x, len));
+
+ if (x < fOffsetX) {
+ fOffsetX = 0;
+ }
+
+ fOffsetX = fRuns.add(x, 0, len, 0, 0, fOffsetX); // Break the run
+ for (int i = 0; i < len; i += fRuns.fRuns[x + i]) {
+ for (int j = 1; j < fRuns.fRuns[x + i]; j++) {
+ fRuns.fRuns[x + i + j] = 1;
+ fRuns.fAlpha[x + i + j] = fRuns.fAlpha[x + i];
+ }
+ fRuns.fRuns[x + i] = 1;
+ }
+ for (int i = 0; i < len; ++i) {
+ add_alpha(&fRuns.fAlpha[x + i], antialias[i]);
+ }
+}
+
+void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
+ checkY(y);
+ x -= fLeft;
+
+ if (x < fOffsetX) {
+ fOffsetX = 0;
+ }
+
+ if (this->check(x, 1)) {
+ fOffsetX = fRuns.add(x, 0, 1, 0, alpha, fOffsetX);
+ }
+}
+
+void RunBasedAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
+ checkY(y);
+ x -= fLeft;
+
+ if (x < fOffsetX) {
+ fOffsetX = 0;
+ }
+
+ if (this->check(x, width)) {
+ fOffsetX = fRuns.add(x, 0, width, 0, alpha, fOffsetX);
+ }
+}
+
+// This exists specifically for concave path filling.
+// In those cases, we can easily accumulate alpha greater than 0xFF.
+class SafeRLEAdditiveBlitter : public RunBasedAdditiveBlitter {
+public:
+ SafeRLEAdditiveBlitter(SkBlitter* realBlitter,
+ const SkIRect& ir,
+ const SkIRect& clipBounds,
+ bool isInverse)
+ : RunBasedAdditiveBlitter(realBlitter, ir, clipBounds, isInverse) {}
+
+ void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
+ void blitAntiH(int x, int y, const SkAlpha alpha) override;
+ void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
+};
+
+void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
+ checkY(y);
+ x -= fLeft;
+
+ if (x < 0) {
+ len += x;
+ antialias -= x;
+ x = 0;
+ }
+ len = std::min(len, fWidth - x);
+ SkASSERT(check(x, len));
+
+ if (x < fOffsetX) {
+ fOffsetX = 0;
+ }
+
+ fOffsetX = fRuns.add(x, 0, len, 0, 0, fOffsetX); // Break the run
+ for (int i = 0; i < len; i += fRuns.fRuns[x + i]) {
+ for (int j = 1; j < fRuns.fRuns[x + i]; j++) {
+ fRuns.fRuns[x + i + j] = 1;
+ fRuns.fAlpha[x + i + j] = fRuns.fAlpha[x + i];
+ }
+ fRuns.fRuns[x + i] = 1;
+ }
+ for (int i = 0; i < len; ++i) {
+ safely_add_alpha(&fRuns.fAlpha[x + i], antialias[i]);
+ }
+}
+
+void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
+ checkY(y);
+ x -= fLeft;
+
+ if (x < fOffsetX) {
+ fOffsetX = 0;
+ }
+
+ if (check(x, 1)) {
+ // Break the run
+ fOffsetX = fRuns.add(x, 0, 1, 0, 0, fOffsetX);
+ safely_add_alpha(&fRuns.fAlpha[x], alpha);
+ }
+}
+
+void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
+ checkY(y);
+ x -= fLeft;
+
+ if (x < fOffsetX) {
+ fOffsetX = 0;
+ }
+
+ if (check(x, width)) {
+ // Break the run
+ fOffsetX = fRuns.add(x, 0, width, 0, 0, fOffsetX);
+ for (int i = x; i < x + width; i += fRuns.fRuns[i]) {
+ safely_add_alpha(&fRuns.fAlpha[i], alpha);
+ }
+ }
+}
+
+// Return the alpha of a trapezoid whose height is 1
+static SkAlpha trapezoid_to_alpha(SkFixed l1, SkFixed l2) {
+ SkASSERT(l1 >= 0 && l2 >= 0);
+ SkFixed area = (l1 + l2) / 2;
+ return SkTo<SkAlpha>(area >> 8);
+}
+
+// The alpha of right-triangle (a, a*b)
+static SkAlpha partial_triangle_to_alpha(SkFixed a, SkFixed b) {
+ SkASSERT(a <= SK_Fixed1);
+#if 0
+ // TODO(mtklein): skia:8877
+ SkASSERT(b <= SK_Fixed1);
+#endif
+
+ // Approximating...
+ // SkFixed area = SkFixedMul(a, SkFixedMul(a,b)) / 2;
+ SkFixed area = (a >> 11) * (a >> 11) * (b >> 11);
+
+#if 0
+ // TODO(mtklein): skia:8877
+ return SkTo<SkAlpha>(area >> 8);
+#else
+ return SkTo<SkAlpha>((area >> 8) & 0xFF);
+#endif
+}
+
+static SkAlpha get_partial_alpha(SkAlpha alpha, SkFixed partialHeight) {
+ return SkToU8(SkFixedRoundToInt(alpha * partialHeight));
+}
+
+static SkAlpha get_partial_alpha(SkAlpha alpha, SkAlpha fullAlpha) {
+ return (alpha * fullAlpha) >> 8;
+}
+
+// For SkFixed that's close to SK_Fixed1, we can't convert it to alpha by just shifting right.
+// For example, when f = SK_Fixed1, right shifting 8 will get 256, but we need 255.
+// This is rarely the problem so we'll only use this for blitting rectangles.
+static SkAlpha fixed_to_alpha(SkFixed f) {
+ SkASSERT(f <= SK_Fixed1);
+ return get_partial_alpha(0xFF, f);
+}
+
+// Suppose that line (l1, y)-(r1, y+1) intersects with (l2, y)-(r2, y+1),
+// approximate (very coarsely) the x coordinate of the intersection.
+static SkFixed approximate_intersection(SkFixed l1, SkFixed r1, SkFixed l2, SkFixed r2) {
+ if (l1 > r1) {
+ std::swap(l1, r1);
+ }
+ if (l2 > r2) {
+ std::swap(l2, r2);
+ }
+ return (std::max(l1, l2) + std::min(r1, r2)) / 2;
+}
+
+// Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0]
+static void compute_alpha_above_line(SkAlpha* alphas,
+ SkFixed l,
+ SkFixed r,
+ SkFixed dY,
+ SkAlpha fullAlpha) {
+ SkASSERT(l <= r);
+ SkASSERT(l >> 16 == 0);
+ int R = SkFixedCeilToInt(r);
+ if (R == 0) {
+ return;
+ } else if (R == 1) {
+ alphas[0] = get_partial_alpha(((R << 17) - l - r) >> 9, fullAlpha);
+ } else {
+ SkFixed first = SK_Fixed1 - l; // horizontal edge length of the left-most triangle
+ SkFixed last = r - ((R - 1) << 16); // horizontal edge length of the right-most triangle
+ SkFixed firstH = SkFixedMul(first, dY); // vertical edge of the left-most triangle
+ alphas[0] = SkFixedMul(first, firstH) >> 9; // triangle alpha
+ SkFixed alpha16 = firstH + (dY >> 1); // rectangle plus triangle
+ for (int i = 1; i < R - 1; ++i) {
+ alphas[i] = alpha16 >> 8;
+ alpha16 += dY;
+ }
+ alphas[R - 1] = fullAlpha - partial_triangle_to_alpha(last, dY);
+ }
+}
+
+// Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0]
+static void compute_alpha_below_line(SkAlpha* alphas,
+ SkFixed l,
+ SkFixed r,
+ SkFixed dY,
+ SkAlpha fullAlpha) {
+ SkASSERT(l <= r);
+ SkASSERT(l >> 16 == 0);
+ int R = SkFixedCeilToInt(r);
+ if (R == 0) {
+ return;
+ } else if (R == 1) {
+ alphas[0] = get_partial_alpha(trapezoid_to_alpha(l, r), fullAlpha);
+ } else {
+ SkFixed first = SK_Fixed1 - l; // horizontal edge length of the left-most triangle
+ SkFixed last = r - ((R - 1) << 16); // horizontal edge length of the right-most triangle
+ SkFixed lastH = SkFixedMul(last, dY); // vertical edge of the right-most triangle
+ alphas[R - 1] = SkFixedMul(last, lastH) >> 9; // triangle alpha
+ SkFixed alpha16 = lastH + (dY >> 1); // rectangle plus triangle
+ for (int i = R - 2; i > 0; i--) {
+ alphas[i] = (alpha16 >> 8) & 0xFF;
+ alpha16 += dY;
+ }
+ alphas[0] = fullAlpha - partial_triangle_to_alpha(first, dY);
+ }
+}
+
+// Note that if fullAlpha != 0xFF, we'll multiply alpha by fullAlpha
+static void blit_single_alpha(AdditiveBlitter* blitter,
+ int y,
+ int x,
+ SkAlpha alpha,
+ SkAlpha fullAlpha,
+ SkAlpha* maskRow,
+ bool isUsingMask,
+ bool noRealBlitter,
+ bool needSafeCheck) {
+ if (isUsingMask) {
+ if (fullAlpha == 0xFF && !noRealBlitter) { // noRealBlitter is needed for concave paths
+ maskRow[x] = alpha;
+ } else if (needSafeCheck) {
+ safely_add_alpha(&maskRow[x], get_partial_alpha(alpha, fullAlpha));
+ } else {
+ add_alpha(&maskRow[x], get_partial_alpha(alpha, fullAlpha));
+ }
+ } else {
+ if (fullAlpha == 0xFF && !noRealBlitter) {
+ blitter->getRealBlitter()->blitV(x, y, 1, alpha);
+ } else {
+ blitter->blitAntiH(x, y, get_partial_alpha(alpha, fullAlpha));
+ }
+ }
+}
+
+static void blit_two_alphas(AdditiveBlitter* blitter,
+ int y,
+ int x,
+ SkAlpha a1,
+ SkAlpha a2,
+ SkAlpha fullAlpha,
+ SkAlpha* maskRow,
+ bool isUsingMask,
+ bool noRealBlitter,
+ bool needSafeCheck) {
+ if (isUsingMask) {
+ if (needSafeCheck) {
+ safely_add_alpha(&maskRow[x], a1);
+ safely_add_alpha(&maskRow[x + 1], a2);
+ } else {
+ add_alpha(&maskRow[x], a1);
+ add_alpha(&maskRow[x + 1], a2);
+ }
+ } else {
+ if (fullAlpha == 0xFF && !noRealBlitter) {
+ blitter->getRealBlitter()->blitAntiH2(x, y, a1, a2);
+ } else {
+ blitter->blitAntiH(x, y, a1);
+ blitter->blitAntiH(x + 1, y, a2);
+ }
+ }
+}
+
+static void blit_full_alpha(AdditiveBlitter* blitter,
+ int y,
+ int x,
+ int len,
+ SkAlpha fullAlpha,
+ SkAlpha* maskRow,
+ bool isUsingMask,
+ bool noRealBlitter,
+ bool needSafeCheck) {
+ if (isUsingMask) {
+ for (int i = 0; i < len; ++i) {
+ if (needSafeCheck) {
+ safely_add_alpha(&maskRow[x + i], fullAlpha);
+ } else {
+ add_alpha(&maskRow[x + i], fullAlpha);
+ }
+ }
+ } else {
+ if (fullAlpha == 0xFF && !noRealBlitter) {
+ blitter->getRealBlitter()->blitH(x, y, len);
+ } else {
+ blitter->blitAntiH(x, y, len, fullAlpha);
+ }
+ }
+}
+
+static void blit_aaa_trapezoid_row(AdditiveBlitter* blitter,
+ int y,
+ SkFixed ul,
+ SkFixed ur,
+ SkFixed ll,
+ SkFixed lr,
+ SkFixed lDY,
+ SkFixed rDY,
+ SkAlpha fullAlpha,
+ SkAlpha* maskRow,
+ bool isUsingMask,
+ bool noRealBlitter,
+ bool needSafeCheck) {
+ int L = SkFixedFloorToInt(ul), R = SkFixedCeilToInt(lr);
+ int len = R - L;
+
+ if (len == 1) {
+ SkAlpha alpha = trapezoid_to_alpha(ur - ul, lr - ll);
+ blit_single_alpha(blitter,
+ y,
+ L,
+ alpha,
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ needSafeCheck);
+ return;
+ }
+
+ const int kQuickLen = 31;
+ char quickMemory[(sizeof(SkAlpha) * 2 + sizeof(int16_t)) * (kQuickLen + 1)];
+ SkAlpha* alphas;
+
+ if (len <= kQuickLen) {
+ alphas = (SkAlpha*)quickMemory;
+ } else {
+ alphas = new SkAlpha[(len + 1) * (sizeof(SkAlpha) * 2 + sizeof(int16_t))];
+ }
+
+ SkAlpha* tempAlphas = alphas + len + 1;
+ int16_t* runs = (int16_t*)(alphas + (len + 1) * 2);
+
+ for (int i = 0; i < len; ++i) {
+ runs[i] = 1;
+ alphas[i] = fullAlpha;
+ }
+ runs[len] = 0;
+
+ int uL = SkFixedFloorToInt(ul);
+ int lL = SkFixedCeilToInt(ll);
+ if (uL + 2 == lL) { // We only need to compute two triangles, accelerate this special case
+ SkFixed first = SkIntToFixed(uL) + SK_Fixed1 - ul;
+ SkFixed second = ll - ul - first;
+ SkAlpha a1 = fullAlpha - partial_triangle_to_alpha(first, lDY);
+ SkAlpha a2 = partial_triangle_to_alpha(second, lDY);
+ alphas[0] = alphas[0] > a1 ? alphas[0] - a1 : 0;
+ alphas[1] = alphas[1] > a2 ? alphas[1] - a2 : 0;
+ } else {
+ compute_alpha_below_line(
+ tempAlphas + uL - L, ul - SkIntToFixed(uL), ll - SkIntToFixed(uL), lDY, fullAlpha);
+ for (int i = uL; i < lL; ++i) {
+ if (alphas[i - L] > tempAlphas[i - L]) {
+ alphas[i - L] -= tempAlphas[i - L];
+ } else {
+ alphas[i - L] = 0;
+ }
+ }
+ }
+
+ int uR = SkFixedFloorToInt(ur);
+ int lR = SkFixedCeilToInt(lr);
+ if (uR + 2 == lR) { // We only need to compute two triangles, accelerate this special case
+ SkFixed first = SkIntToFixed(uR) + SK_Fixed1 - ur;
+ SkFixed second = lr - ur - first;
+ SkAlpha a1 = partial_triangle_to_alpha(first, rDY);
+ SkAlpha a2 = fullAlpha - partial_triangle_to_alpha(second, rDY);
+ alphas[len - 2] = alphas[len - 2] > a1 ? alphas[len - 2] - a1 : 0;
+ alphas[len - 1] = alphas[len - 1] > a2 ? alphas[len - 1] - a2 : 0;
+ } else {
+ compute_alpha_above_line(
+ tempAlphas + uR - L, ur - SkIntToFixed(uR), lr - SkIntToFixed(uR), rDY, fullAlpha);
+ for (int i = uR; i < lR; ++i) {
+ if (alphas[i - L] > tempAlphas[i - L]) {
+ alphas[i - L] -= tempAlphas[i - L];
+ } else {
+ alphas[i - L] = 0;
+ }
+ }
+ }
+
+ if (isUsingMask) {
+ for (int i = 0; i < len; ++i) {
+ if (needSafeCheck) {
+ safely_add_alpha(&maskRow[L + i], alphas[i]);
+ } else {
+ add_alpha(&maskRow[L + i], alphas[i]);
+ }
+ }
+ } else {
+ if (fullAlpha == 0xFF && !noRealBlitter) {
+ // Real blitter is faster than RunBasedAdditiveBlitter
+ blitter->getRealBlitter()->blitAntiH(L, y, alphas, runs);
+ } else {
+ blitter->blitAntiH(L, y, alphas, len);
+ }
+ }
+
+ if (len > kQuickLen) {
+ delete[] alphas;
+ }
+}
+
+static void blit_trapezoid_row(AdditiveBlitter* blitter,
+ int y,
+ SkFixed ul,
+ SkFixed ur,
+ SkFixed ll,
+ SkFixed lr,
+ SkFixed lDY,
+ SkFixed rDY,
+ SkAlpha fullAlpha,
+ SkAlpha* maskRow,
+ bool isUsingMask,
+ bool noRealBlitter = false,
+ bool needSafeCheck = false) {
+ SkASSERT(lDY >= 0 && rDY >= 0); // We should only send in the absolte value
+
+ if (ul > ur) {
+ return;
+ }
+
+ // Edge crosses. Approximate it. This should only happend due to precision limit,
+ // so the approximation could be very coarse.
+ if (ll > lr) {
+ ll = lr = approximate_intersection(ul, ll, ur, lr);
+ }
+
+ if (ul == ur && ll == lr) {
+ return; // empty trapzoid
+ }
+
+ // We're going to use the left line ul-ll and the rite line ur-lr
+ // to exclude the area that's not covered by the path.
+ // Swapping (ul, ll) or (ur, lr) won't affect that exclusion
+ // so we'll do that for simplicity.
+ if (ul > ll) {
+ std::swap(ul, ll);
+ }
+ if (ur > lr) {
+ std::swap(ur, lr);
+ }
+
+ SkFixed joinLeft = SkFixedCeilToFixed(ll);
+ SkFixed joinRite = SkFixedFloorToFixed(ur);
+ if (joinLeft <= joinRite) { // There's a rect from joinLeft to joinRite that we can blit
+ if (ul < joinLeft) {
+ int len = SkFixedCeilToInt(joinLeft - ul);
+ if (len == 1) {
+ SkAlpha alpha = trapezoid_to_alpha(joinLeft - ul, joinLeft - ll);
+ blit_single_alpha(blitter,
+ y,
+ ul >> 16,
+ alpha,
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ needSafeCheck);
+ } else if (len == 2) {
+ SkFixed first = joinLeft - SK_Fixed1 - ul;
+ SkFixed second = ll - ul - first;
+ SkAlpha a1 = partial_triangle_to_alpha(first, lDY);
+ SkAlpha a2 = fullAlpha - partial_triangle_to_alpha(second, lDY);
+ blit_two_alphas(blitter,
+ y,
+ ul >> 16,
+ a1,
+ a2,
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ needSafeCheck);
+ } else {
+ blit_aaa_trapezoid_row(blitter,
+ y,
+ ul,
+ joinLeft,
+ ll,
+ joinLeft,
+ lDY,
+ SK_MaxS32,
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ needSafeCheck);
+ }
+ }
+ // SkAAClip requires that we blit from left to right.
+ // Hence we must blit [ul, joinLeft] before blitting [joinLeft, joinRite]
+ if (joinLeft < joinRite) {
+ blit_full_alpha(blitter,
+ y,
+ SkFixedFloorToInt(joinLeft),
+ SkFixedFloorToInt(joinRite - joinLeft),
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ needSafeCheck);
+ }
+ if (lr > joinRite) {
+ int len = SkFixedCeilToInt(lr - joinRite);
+ if (len == 1) {
+ SkAlpha alpha = trapezoid_to_alpha(ur - joinRite, lr - joinRite);
+ blit_single_alpha(blitter,
+ y,
+ joinRite >> 16,
+ alpha,
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ needSafeCheck);
+ } else if (len == 2) {
+ SkFixed first = joinRite + SK_Fixed1 - ur;
+ SkFixed second = lr - ur - first;
+ SkAlpha a1 = fullAlpha - partial_triangle_to_alpha(first, rDY);
+ SkAlpha a2 = partial_triangle_to_alpha(second, rDY);
+ blit_two_alphas(blitter,
+ y,
+ joinRite >> 16,
+ a1,
+ a2,
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ needSafeCheck);
+ } else {
+ blit_aaa_trapezoid_row(blitter,
+ y,
+ joinRite,
+ ur,
+ joinRite,
+ lr,
+ SK_MaxS32,
+ rDY,
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ needSafeCheck);
+ }
+ }
+ } else {
+ blit_aaa_trapezoid_row(blitter,
+ y,
+ ul,
+ ur,
+ ll,
+ lr,
+ lDY,
+ rDY,
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ needSafeCheck);
+ }
+}
+
+static bool operator<(const SkAnalyticEdge& a, const SkAnalyticEdge& b) {
+ int valuea = a.fUpperY;
+ int valueb = b.fUpperY;
+
+ if (valuea == valueb) {
+ valuea = a.fX;
+ valueb = b.fX;
+ }
+
+ if (valuea == valueb) {
+ valuea = a.fDX;
+ valueb = b.fDX;
+ }
+
+ return valuea < valueb;
+}
+
+static SkAnalyticEdge* sort_edges(SkAnalyticEdge* list[], int count, SkAnalyticEdge** last) {
+ SkTQSort(list, list + count);
+
+ // now make the edges linked in sorted order
+ for (int i = 1; i < count; ++i) {
+ list[i - 1]->fNext = list[i];
+ list[i]->fPrev = list[i - 1];
+ }
+
+ *last = list[count - 1];
+ return list[0];
+}
+
+static void validate_sort(const SkAnalyticEdge* edge) {
+#ifdef SK_DEBUG
+ SkFixed y = SkIntToFixed(-32768);
+
+ while (edge->fUpperY != SK_MaxS32) {
+ edge->validate();
+ SkASSERT(y <= edge->fUpperY);
+
+ y = edge->fUpperY;
+ edge = (SkAnalyticEdge*)edge->fNext;
+ }
+#endif
+}
+
+// For an edge, we consider it smooth if the Dx doesn't change much, and Dy is large enough
+// For curves that are updating, the Dx is not changing much if fQDx/fCDx and fQDy/fCDy are
+// relatively large compared to fQDDx/QCDDx and fQDDy/fCDDy
+static bool is_smooth_enough(SkAnalyticEdge* thisEdge, SkAnalyticEdge* nextEdge, int stop_y) {
+ if (thisEdge->fCurveCount < 0) {
+ const SkCubicEdge& cEdge = static_cast<SkAnalyticCubicEdge*>(thisEdge)->fCEdge;
+ int ddshift = cEdge.fCurveShift;
+ return SkAbs32(cEdge.fCDx) >> 1 >= SkAbs32(cEdge.fCDDx) >> ddshift &&
+ SkAbs32(cEdge.fCDy) >> 1 >= SkAbs32(cEdge.fCDDy) >> ddshift &&
+ // current Dy is (fCDy - (fCDDy >> ddshift)) >> dshift
+ (cEdge.fCDy - (cEdge.fCDDy >> ddshift)) >> cEdge.fCubicDShift >= SK_Fixed1;
+ } else if (thisEdge->fCurveCount > 0) {
+ const SkQuadraticEdge& qEdge = static_cast<SkAnalyticQuadraticEdge*>(thisEdge)->fQEdge;
+ return SkAbs32(qEdge.fQDx) >> 1 >= SkAbs32(qEdge.fQDDx) &&
+ SkAbs32(qEdge.fQDy) >> 1 >= SkAbs32(qEdge.fQDDy) &&
+ // current Dy is (fQDy - fQDDy) >> shift
+ (qEdge.fQDy - qEdge.fQDDy) >> qEdge.fCurveShift >= SK_Fixed1;
+ }
+ return SkAbs32(nextEdge->fDX - thisEdge->fDX) <= SK_Fixed1 && // DDx should be small
+ nextEdge->fLowerY - nextEdge->fUpperY >= SK_Fixed1; // Dy should be large
+}
+
+// Check if the leftE and riteE are changing smoothly in terms of fDX.
+// If yes, we can later skip the fractional y and directly jump to integer y.
+static bool is_smooth_enough(SkAnalyticEdge* leftE,
+ SkAnalyticEdge* riteE,
+ SkAnalyticEdge* currE,
+ int stop_y) {
+ if (currE->fUpperY >= SkLeftShift(stop_y, 16)) {
+ return false; // We're at the end so we won't skip anything
+ }
+ if (leftE->fLowerY + SK_Fixed1 < riteE->fLowerY) {
+ return is_smooth_enough(leftE, currE, stop_y); // Only leftE is changing
+ } else if (leftE->fLowerY > riteE->fLowerY + SK_Fixed1) {
+ return is_smooth_enough(riteE, currE, stop_y); // Only riteE is changing
+ }
+
+ // Now both edges are changing, find the second next edge
+ SkAnalyticEdge* nextCurrE = currE->fNext;
+ if (nextCurrE->fUpperY >= stop_y << 16) { // Check if we're at the end
+ return false;
+ }
+ // Ensure that currE is the next left edge and nextCurrE is the next right edge. Swap if not.
+ if (nextCurrE->fUpperX < currE->fUpperX) {
+ std::swap(currE, nextCurrE);
+ }
+ return is_smooth_enough(leftE, currE, stop_y) && is_smooth_enough(riteE, nextCurrE, stop_y);
+}
+
+static void aaa_walk_convex_edges(SkAnalyticEdge* prevHead,
+ AdditiveBlitter* blitter,
+ int start_y,
+ int stop_y,
+ SkFixed leftBound,
+ SkFixed riteBound,
+ bool isUsingMask) {
+ validate_sort((SkAnalyticEdge*)prevHead->fNext);
+
+ SkAnalyticEdge* leftE = (SkAnalyticEdge*)prevHead->fNext;
+ SkAnalyticEdge* riteE = (SkAnalyticEdge*)leftE->fNext;
+ SkAnalyticEdge* currE = (SkAnalyticEdge*)riteE->fNext;
+
+ SkFixed y = std::max(leftE->fUpperY, riteE->fUpperY);
+
+ for (;;) {
+ // We have to check fLowerY first because some edges might be alone (e.g., there's only
+ // a left edge but no right edge in a given y scan line) due to precision limit.
+ while (leftE->fLowerY <= y) { // Due to smooth jump, we may pass multiple short edges
+ if (!leftE->update(y)) {
+ if (SkFixedFloorToInt(currE->fUpperY) >= stop_y) {
+ goto END_WALK;
+ }
+ leftE = currE;
+ currE = (SkAnalyticEdge*)currE->fNext;
+ }
+ }
+ while (riteE->fLowerY <= y) { // Due to smooth jump, we may pass multiple short edges
+ if (!riteE->update(y)) {
+ if (SkFixedFloorToInt(currE->fUpperY) >= stop_y) {
+ goto END_WALK;
+ }
+ riteE = currE;
+ currE = (SkAnalyticEdge*)currE->fNext;
+ }
+ }
+
+ SkASSERT(leftE);
+ SkASSERT(riteE);
+
+ // check our bottom clip
+ if (SkFixedFloorToInt(y) >= stop_y) {
+ break;
+ }
+
+ SkASSERT(SkFixedFloorToInt(leftE->fUpperY) <= stop_y);
+ SkASSERT(SkFixedFloorToInt(riteE->fUpperY) <= stop_y);
+
+ leftE->goY(y);
+ riteE->goY(y);
+
+ if (leftE->fX > riteE->fX || (leftE->fX == riteE->fX && leftE->fDX > riteE->fDX)) {
+ std::swap(leftE, riteE);
+ }
+
+ SkFixed local_bot_fixed = std::min(leftE->fLowerY, riteE->fLowerY);
+ if (is_smooth_enough(leftE, riteE, currE, stop_y)) {
+ local_bot_fixed = SkFixedCeilToFixed(local_bot_fixed);
+ }
+ local_bot_fixed = std::min(local_bot_fixed, SkIntToFixed(stop_y));
+
+ SkFixed left = std::max(leftBound, leftE->fX);
+ SkFixed dLeft = leftE->fDX;
+ SkFixed rite = std::min(riteBound, riteE->fX);
+ SkFixed dRite = riteE->fDX;
+ if (0 == (dLeft | dRite)) {
+ int fullLeft = SkFixedCeilToInt(left);
+ int fullRite = SkFixedFloorToInt(rite);
+ SkFixed partialLeft = SkIntToFixed(fullLeft) - left;
+ SkFixed partialRite = rite - SkIntToFixed(fullRite);
+ int fullTop = SkFixedCeilToInt(y);
+ int fullBot = SkFixedFloorToInt(local_bot_fixed);
+ SkFixed partialTop = SkIntToFixed(fullTop) - y;
+ SkFixed partialBot = local_bot_fixed - SkIntToFixed(fullBot);
+ if (fullTop > fullBot) { // The rectangle is within one pixel height...
+ partialTop -= (SK_Fixed1 - partialBot);
+ partialBot = 0;
+ }
+
+ if (fullRite >= fullLeft) {
+ if (partialTop > 0) { // blit first partial row
+ if (partialLeft > 0) {
+ blitter->blitAntiH(fullLeft - 1,
+ fullTop - 1,
+ fixed_to_alpha(SkFixedMul(partialTop, partialLeft)));
+ }
+ blitter->blitAntiH(
+ fullLeft, fullTop - 1, fullRite - fullLeft, fixed_to_alpha(partialTop));
+ if (partialRite > 0) {
+ blitter->blitAntiH(fullRite,
+ fullTop - 1,
+ fixed_to_alpha(SkFixedMul(partialTop, partialRite)));
+ }
+ blitter->flush_if_y_changed(y, y + partialTop);
+ }
+
+ // Blit all full-height rows from fullTop to fullBot
+ if (fullBot > fullTop &&
+ // SkAAClip cannot handle the empty rect so check the non-emptiness here
+ // (bug chromium:662800)
+ (fullRite > fullLeft || fixed_to_alpha(partialLeft) > 0 ||
+ fixed_to_alpha(partialRite) > 0)) {
+ blitter->getRealBlitter()->blitAntiRect(fullLeft - 1,
+ fullTop,
+ fullRite - fullLeft,
+ fullBot - fullTop,
+ fixed_to_alpha(partialLeft),
+ fixed_to_alpha(partialRite));
+ }
+
+ if (partialBot > 0) { // blit last partial row
+ if (partialLeft > 0) {
+ blitter->blitAntiH(fullLeft - 1,
+ fullBot,
+ fixed_to_alpha(SkFixedMul(partialBot, partialLeft)));
+ }
+ blitter->blitAntiH(
+ fullLeft, fullBot, fullRite - fullLeft, fixed_to_alpha(partialBot));
+ if (partialRite > 0) {
+ blitter->blitAntiH(fullRite,
+ fullBot,
+ fixed_to_alpha(SkFixedMul(partialBot, partialRite)));
+ }
+ }
+ } else {
+ // Normal conditions, this means left and rite are within the same pixel, but if
+ // both left and rite were < leftBounds or > rightBounds, both edges are clipped and
+ // we should not do any blitting (particularly since the negative width saturates to
+ // full alpha).
+ SkFixed width = rite - left;
+ if (width > 0) {
+ if (partialTop > 0) {
+ blitter->blitAntiH(fullLeft - 1,
+ fullTop - 1,
+ 1,
+ fixed_to_alpha(SkFixedMul(partialTop, width)));
+ blitter->flush_if_y_changed(y, y + partialTop);
+ }
+ if (fullBot > fullTop) {
+ blitter->getRealBlitter()->blitV(
+ fullLeft - 1, fullTop, fullBot - fullTop, fixed_to_alpha(width));
+ }
+ if (partialBot > 0) {
+ blitter->blitAntiH(fullLeft - 1,
+ fullBot,
+ 1,
+ fixed_to_alpha(SkFixedMul(partialBot, width)));
+ }
+ }
+ }
+
+ y = local_bot_fixed;
+ } else {
+ // The following constant are used to snap X
+ // We snap X mainly for speedup (no tiny triangle) and
+ // avoiding edge cases caused by precision errors
+ const SkFixed kSnapDigit = SK_Fixed1 >> 4;
+ const SkFixed kSnapHalf = kSnapDigit >> 1;
+ const SkFixed kSnapMask = (-1 ^ (kSnapDigit - 1));
+ left += kSnapHalf;
+ rite += kSnapHalf; // For fast rounding
+
+ // Number of blit_trapezoid_row calls we'll have
+ int count = SkFixedCeilToInt(local_bot_fixed) - SkFixedFloorToInt(y);
+
+ // If we're using mask blitter, we advance the mask row in this function
+ // to save some "if" condition checks.
+ SkAlpha* maskRow = nullptr;
+ if (isUsingMask) {
+ maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
+ }
+
+ // Instead of writing one loop that handles both partial-row blit_trapezoid_row
+ // and full-row trapezoid_row together, we use the following 3-stage flow to
+ // handle partial-row blit and full-row blit separately. It will save us much time
+ // on changing y, left, and rite.
+ if (count > 1) {
+ if ((int)(y & 0xFFFF0000) != y) { // There's a partial-row on the top
+ count--;
+ SkFixed nextY = SkFixedCeilToFixed(y + 1);
+ SkFixed dY = nextY - y;
+ SkFixed nextLeft = left + SkFixedMul(dLeft, dY);
+ SkFixed nextRite = rite + SkFixedMul(dRite, dY);
+ SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
+ (nextLeft & kSnapMask) >= leftBound &&
+ (nextRite & kSnapMask) <= riteBound);
+ blit_trapezoid_row(blitter,
+ y >> 16,
+ left & kSnapMask,
+ rite & kSnapMask,
+ nextLeft & kSnapMask,
+ nextRite & kSnapMask,
+ leftE->fDY,
+ riteE->fDY,
+ get_partial_alpha(0xFF, dY),
+ maskRow,
+ isUsingMask);
+ blitter->flush_if_y_changed(y, nextY);
+ left = nextLeft;
+ rite = nextRite;
+ y = nextY;
+ }
+
+ while (count > 1) { // Full rows in the middle
+ count--;
+ if (isUsingMask) {
+ maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
+ }
+ SkFixed nextY = y + SK_Fixed1, nextLeft = left + dLeft, nextRite = rite + dRite;
+ SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
+ (nextLeft & kSnapMask) >= leftBound &&
+ (nextRite & kSnapMask) <= riteBound);
+ blit_trapezoid_row(blitter,
+ y >> 16,
+ left & kSnapMask,
+ rite & kSnapMask,
+ nextLeft & kSnapMask,
+ nextRite & kSnapMask,
+ leftE->fDY,
+ riteE->fDY,
+ 0xFF,
+ maskRow,
+ isUsingMask);
+ blitter->flush_if_y_changed(y, nextY);
+ left = nextLeft;
+ rite = nextRite;
+ y = nextY;
+ }
+ }
+
+ if (isUsingMask) {
+ maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
+ }
+
+ SkFixed dY = local_bot_fixed - y; // partial-row on the bottom
+ SkASSERT(dY <= SK_Fixed1);
+ // Smooth jumping to integer y may make the last nextLeft/nextRite out of bound.
+ // Take them back into the bound here.
+ // Note that we substract kSnapHalf later so we have to add them to leftBound/riteBound
+ SkFixed nextLeft = std::max(left + SkFixedMul(dLeft, dY), leftBound + kSnapHalf);
+ SkFixed nextRite = std::min(rite + SkFixedMul(dRite, dY), riteBound + kSnapHalf);
+ SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
+ (nextLeft & kSnapMask) >= leftBound && (nextRite & kSnapMask) <= riteBound);
+ blit_trapezoid_row(blitter,
+ y >> 16,
+ left & kSnapMask,
+ rite & kSnapMask,
+ nextLeft & kSnapMask,
+ nextRite & kSnapMask,
+ leftE->fDY,
+ riteE->fDY,
+ get_partial_alpha(0xFF, dY),
+ maskRow,
+ isUsingMask);
+ blitter->flush_if_y_changed(y, local_bot_fixed);
+ left = nextLeft;
+ rite = nextRite;
+ y = local_bot_fixed;
+ left -= kSnapHalf;
+ rite -= kSnapHalf;
+ }
+
+ leftE->fX = left;
+ riteE->fX = rite;
+ leftE->fY = riteE->fY = y;
+ }
+
+END_WALK:;
+}
+
+static void update_next_next_y(SkFixed y, SkFixed nextY, SkFixed* nextNextY) {
+ *nextNextY = y > nextY && y < *nextNextY ? y : *nextNextY;
+}
+
+static void check_intersection(const SkAnalyticEdge* edge, SkFixed nextY, SkFixed* nextNextY) {
+ if (edge->fPrev->fPrev && edge->fPrev->fX + edge->fPrev->fDX > edge->fX + edge->fDX) {
+ *nextNextY = nextY + (SK_Fixed1 >> SkAnalyticEdge::kDefaultAccuracy);
+ }
+}
+
+static void insert_new_edges(SkAnalyticEdge* newEdge, SkFixed y, SkFixed* nextNextY) {
+ if (newEdge->fUpperY > y) {
+ update_next_next_y(newEdge->fUpperY, y, nextNextY);
+ return;
+ }
+ SkAnalyticEdge* prev = newEdge->fPrev;
+ if (prev->fX <= newEdge->fX) {
+ while (newEdge->fUpperY <= y) {
+ check_intersection(newEdge, y, nextNextY);
+ update_next_next_y(newEdge->fLowerY, y, nextNextY);
+ newEdge = newEdge->fNext;
+ }
+ update_next_next_y(newEdge->fUpperY, y, nextNextY);
+ return;
+ }
+ // find first x pos to insert
+ SkAnalyticEdge* start = backward_insert_start(prev, newEdge->fX);
+ // insert the lot, fixing up the links as we go
+ do {
+ SkAnalyticEdge* next = newEdge->fNext;
+ do {
+ if (start->fNext == newEdge) {
+ goto nextEdge;
+ }
+ SkAnalyticEdge* after = start->fNext;
+ if (after->fX >= newEdge->fX) {
+ break;
+ }
+ SkASSERT(start != after);
+ start = after;
+ } while (true);
+ remove_edge(newEdge);
+ insert_edge_after(newEdge, start);
+ nextEdge:
+ check_intersection(newEdge, y, nextNextY);
+ update_next_next_y(newEdge->fLowerY, y, nextNextY);
+ start = newEdge;
+ newEdge = next;
+ } while (newEdge->fUpperY <= y);
+ update_next_next_y(newEdge->fUpperY, y, nextNextY);
+}
+
+static void validate_edges_for_y(const SkAnalyticEdge* edge, SkFixed y) {
+#ifdef SK_DEBUG
+ while (edge->fUpperY <= y) {
+ SkASSERT(edge->fPrev && edge->fNext);
+ SkASSERT(edge->fPrev->fNext == edge);
+ SkASSERT(edge->fNext->fPrev == edge);
+ SkASSERT(edge->fUpperY <= edge->fLowerY);
+ SkASSERT(edge->fPrev->fPrev == nullptr || edge->fPrev->fX <= edge->fX);
+ edge = edge->fNext;
+ }
+#endif
+}
+
+// Return true if prev->fX, next->fX are too close in the current pixel row.
+static bool edges_too_close(SkAnalyticEdge* prev, SkAnalyticEdge* next, SkFixed lowerY) {
+ // When next->fDX == 0, prev->fX >= next->fX - SkAbs32(next->fDX) would be false
+ // even if prev->fX and next->fX are close and within one pixel (e.g., prev->fX == 0.1,
+ // next->fX == 0.9). Adding SLACK = 1 to the formula would guarantee it to be true if two
+ // edges prev and next are within one pixel.
+ constexpr SkFixed SLACK = SK_Fixed1;
+
+ // Note that even if the following test failed, the edges might still be very close to each
+ // other at some point within the current pixel row because of prev->fDX and next->fDX.
+ // However, to handle that case, we have to sacrafice more performance.
+ // I think the current quality is good enough (mainly by looking at Nebraska-StateSeal.svg)
+ // so I'll ignore fDX for performance tradeoff.
+ return next && prev && next->fUpperY < lowerY &&
+ prev->fX + SLACK >= next->fX - SkAbs32(next->fDX);
+ // The following is more accurate but also slower.
+ // return (prev && prev->fPrev && next && next->fNext != nullptr && next->fUpperY < lowerY &&
+ // prev->fX + SkAbs32(prev->fDX) + SLACK >= next->fX - SkAbs32(next->fDX));
+}
+
+// This function exists for the case where the previous rite edge is removed because
+// its fLowerY <= nextY
+static bool edges_too_close(int prevRite, SkFixed ul, SkFixed ll) {
+ return prevRite > SkFixedFloorToInt(ul) || prevRite > SkFixedFloorToInt(ll);
+}
+
+static void blit_saved_trapezoid(SkAnalyticEdge* leftE,
+ SkFixed lowerY,
+ SkFixed lowerLeft,
+ SkFixed lowerRite,
+ AdditiveBlitter* blitter,
+ SkAlpha* maskRow,
+ bool isUsingMask,
+ bool noRealBlitter,
+ SkFixed leftClip,
+ SkFixed rightClip) {
+ SkAnalyticEdge* riteE = leftE->fRiteE;
+ SkASSERT(riteE);
+ SkASSERT(riteE->fNext == nullptr || leftE->fSavedY == riteE->fSavedY);
+ SkASSERT(SkFixedFloorToInt(lowerY - 1) == SkFixedFloorToInt(leftE->fSavedY));
+ int y = SkFixedFloorToInt(leftE->fSavedY);
+ // Instead of using fixed_to_alpha(lowerY - leftE->fSavedY), we use the following fullAlpha
+ // to elimiate cumulative error: if there are many fractional y scan lines within the
+ // same row, the former may accumulate the rounding error while the later won't.
+ SkAlpha fullAlpha = fixed_to_alpha(lowerY - SkIntToFixed(y)) -
+ fixed_to_alpha(leftE->fSavedY - SkIntToFixed(y));
+ // We need fSavedDY because the (quad or cubic) edge might be updated
+ blit_trapezoid_row(
+ blitter,
+ y,
+ std::max(leftE->fSavedX, leftClip),
+ std::min(riteE->fSavedX, rightClip),
+ std::max(lowerLeft, leftClip),
+ std::min(lowerRite, rightClip),
+ leftE->fSavedDY,
+ riteE->fSavedDY,
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter || (fullAlpha == 0xFF && (edges_too_close(leftE->fPrev, leftE, lowerY) ||
+ edges_too_close(riteE, riteE->fNext, lowerY))),
+ true);
+ leftE->fRiteE = nullptr;
+}
+
+static void deferred_blit(SkAnalyticEdge* leftE,
+ SkAnalyticEdge* riteE,
+ SkFixed left,
+ SkFixed leftDY, // don't save leftE->fX/fDY as they may have been updated
+ SkFixed y,
+ SkFixed nextY,
+ bool isIntegralNextY,
+ bool leftEnds,
+ bool riteEnds,
+ AdditiveBlitter* blitter,
+ SkAlpha* maskRow,
+ bool isUsingMask,
+ bool noRealBlitter,
+ SkFixed leftClip,
+ SkFixed rightClip,
+ int yShift) {
+ if (leftE->fRiteE && leftE->fRiteE != riteE) {
+ // leftE's right edge changed. Blit the saved trapezoid.
+ SkASSERT(leftE->fRiteE->fNext == nullptr || leftE->fRiteE->fY == y);
+ blit_saved_trapezoid(leftE,
+ y,
+ left,
+ leftE->fRiteE->fX,
+ blitter,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ leftClip,
+ rightClip);
+ }
+ if (!leftE->fRiteE) {
+ // Save and defer blitting the trapezoid
+ SkASSERT(riteE->fRiteE == nullptr);
+ SkASSERT(leftE->fPrev == nullptr || leftE->fY == nextY);
+ SkASSERT(riteE->fNext == nullptr || riteE->fY == y);
+ leftE->saveXY(left, y, leftDY);
+ riteE->saveXY(riteE->fX, y, riteE->fDY);
+ leftE->fRiteE = riteE;
+ }
+ SkASSERT(leftE->fPrev == nullptr || leftE->fY == nextY);
+ riteE->goY(nextY, yShift);
+ // Always blit when edges end or nextY is integral
+ if (isIntegralNextY || leftEnds || riteEnds) {
+ blit_saved_trapezoid(leftE,
+ nextY,
+ leftE->fX,
+ riteE->fX,
+ blitter,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ leftClip,
+ rightClip);
+ }
+}
+
+static void aaa_walk_edges(SkAnalyticEdge* prevHead,
+ SkAnalyticEdge* nextTail,
+ SkPathFillType fillType,
+ AdditiveBlitter* blitter,
+ int start_y,
+ int stop_y,
+ SkFixed leftClip,
+ SkFixed rightClip,
+ bool isUsingMask,
+ bool forceRLE,
+ bool useDeferred,
+ bool skipIntersect) {
+ prevHead->fX = prevHead->fUpperX = leftClip;
+ nextTail->fX = nextTail->fUpperX = rightClip;
+ SkFixed y = std::max(prevHead->fNext->fUpperY, SkIntToFixed(start_y));
+ SkFixed nextNextY = SK_MaxS32;
+
+ {
+ SkAnalyticEdge* edge;
+ for (edge = prevHead->fNext; edge->fUpperY <= y; edge = edge->fNext) {
+ edge->goY(y);
+ update_next_next_y(edge->fLowerY, y, &nextNextY);
+ }
+ update_next_next_y(edge->fUpperY, y, &nextNextY);
+ }
+
+ int windingMask = SkPathFillType_IsEvenOdd(fillType) ? 1 : -1;
+ bool isInverse = SkPathFillType_IsInverse(fillType);
+
+ if (isInverse && SkIntToFixed(start_y) != y) {
+ int width = SkFixedFloorToInt(rightClip - leftClip);
+ if (SkFixedFloorToInt(y) != start_y) {
+ blitter->getRealBlitter()->blitRect(
+ SkFixedFloorToInt(leftClip), start_y, width, SkFixedFloorToInt(y) - start_y);
+ start_y = SkFixedFloorToInt(y);
+ }
+ SkAlpha* maskRow =
+ isUsingMask ? static_cast<MaskAdditiveBlitter*>(blitter)->getRow(start_y) : nullptr;
+ blit_full_alpha(blitter,
+ start_y,
+ SkFixedFloorToInt(leftClip),
+ width,
+ fixed_to_alpha(y - SkIntToFixed(start_y)),
+ maskRow,
+ isUsingMask,
+ false,
+ false);
+ }
+
+ while (true) {
+ int w = 0;
+ bool in_interval = isInverse;
+ SkFixed prevX = prevHead->fX;
+ SkFixed nextY = std::min(nextNextY, SkFixedCeilToFixed(y + 1));
+ bool isIntegralNextY = (nextY & (SK_Fixed1 - 1)) == 0;
+ SkAnalyticEdge* currE = prevHead->fNext;
+ SkAnalyticEdge* leftE = prevHead;
+ SkFixed left = leftClip;
+ SkFixed leftDY = 0;
+ bool leftEnds = false;
+ int prevRite = SkFixedFloorToInt(leftClip);
+
+ nextNextY = SK_MaxS32;
+
+ SkASSERT((nextY & ((SK_Fixed1 >> 2) - 1)) == 0);
+ int yShift = 0;
+ if ((nextY - y) & (SK_Fixed1 >> 2)) {
+ yShift = 2;
+ nextY = y + (SK_Fixed1 >> 2);
+ } else if ((nextY - y) & (SK_Fixed1 >> 1)) {
+ yShift = 1;
+ SkASSERT(nextY == y + (SK_Fixed1 >> 1));
+ }
+
+ SkAlpha fullAlpha = fixed_to_alpha(nextY - y);
+
+ // If we're using mask blitter, we advance the mask row in this function
+ // to save some "if" condition checks.
+ SkAlpha* maskRow = nullptr;
+ if (isUsingMask) {
+ maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(SkFixedFloorToInt(y));
+ }
+
+ SkASSERT(currE->fPrev == prevHead);
+ validate_edges_for_y(currE, y);
+
+ // Even if next - y == SK_Fixed1, we can still break the left-to-right order requirement
+ // of the SKAAClip: |\| (two trapezoids with overlapping middle wedges)
+ bool noRealBlitter = forceRLE; // forceRLE && (nextY - y != SK_Fixed1);
+
+ while (currE->fUpperY <= y) {
+ SkASSERT(currE->fLowerY >= nextY);
+ SkASSERT(currE->fY == y);
+
+ w += currE->fWinding;
+ bool prev_in_interval = in_interval;
+ in_interval = !(w & windingMask) == isInverse;
+
+ bool isLeft = in_interval && !prev_in_interval;
+ bool isRite = !in_interval && prev_in_interval;
+ bool currEnds = currE->fLowerY == nextY;
+
+ if (useDeferred) {
+ if (currE->fRiteE && !isLeft) {
+ // currE is a left edge previously, but now it's not.
+ // Blit the trapezoid between fSavedY and y.
+ SkASSERT(currE->fRiteE->fY == y);
+ blit_saved_trapezoid(currE,
+ y,
+ currE->fX,
+ currE->fRiteE->fX,
+ blitter,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ leftClip,
+ rightClip);
+ }
+ if (leftE->fRiteE == currE && !isRite) {
+ // currE is a right edge previously, but now it's not.
+ // Moreover, its corresponding leftE doesn't change (otherwise we'll handle it
+ // in the previous if clause). Hence we blit the trapezoid.
+ blit_saved_trapezoid(leftE,
+ y,
+ left,
+ currE->fX,
+ blitter,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ leftClip,
+ rightClip);
+ }
+ }
+
+ if (isRite) {
+ if (useDeferred) {
+ deferred_blit(leftE,
+ currE,
+ left,
+ leftDY,
+ y,
+ nextY,
+ isIntegralNextY,
+ leftEnds,
+ currEnds,
+ blitter,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ leftClip,
+ rightClip,
+ yShift);
+ } else {
+ SkFixed rite = currE->fX;
+ currE->goY(nextY, yShift);
+ SkFixed nextLeft = std::max(leftClip, leftE->fX);
+ rite = std::min(rightClip, rite);
+ SkFixed nextRite = std::min(rightClip, currE->fX);
+ blit_trapezoid_row(
+ blitter,
+ y >> 16,
+ left,
+ rite,
+ nextLeft,
+ nextRite,
+ leftDY,
+ currE->fDY,
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter || (fullAlpha == 0xFF &&
+ (edges_too_close(prevRite, left, leftE->fX) ||
+ edges_too_close(currE, currE->fNext, nextY))),
+ true);
+ prevRite = SkFixedCeilToInt(std::max(rite, currE->fX));
+ }
+ } else {
+ if (isLeft) {
+ left = std::max(currE->fX, leftClip);
+ leftDY = currE->fDY;
+ leftE = currE;
+ leftEnds = leftE->fLowerY == nextY;
+ }
+ currE->goY(nextY, yShift);
+ }
+
+ SkAnalyticEdge* next = currE->fNext;
+ SkFixed newX;
+
+ while (currE->fLowerY <= nextY) {
+ if (currE->fCurveCount < 0) {
+ SkAnalyticCubicEdge* cubicEdge = (SkAnalyticCubicEdge*)currE;
+ cubicEdge->keepContinuous();
+ if (!cubicEdge->updateCubic()) {
+ break;
+ }
+ } else if (currE->fCurveCount > 0) {
+ SkAnalyticQuadraticEdge* quadEdge = (SkAnalyticQuadraticEdge*)currE;
+ quadEdge->keepContinuous();
+ if (!quadEdge->updateQuadratic()) {
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+ SkASSERT(currE->fY == nextY);
+
+ if (currE->fLowerY <= nextY) {
+ remove_edge(currE);
+ } else {
+ update_next_next_y(currE->fLowerY, nextY, &nextNextY);
+ newX = currE->fX;
+ SkASSERT(currE->fLowerY > nextY);
+ if (newX < prevX) { // ripple currE backwards until it is x-sorted
+ // If the crossing edge is a right edge, blit the saved trapezoid.
+ if (leftE->fRiteE == currE && useDeferred) {
+ SkASSERT(leftE->fY == nextY && currE->fY == nextY);
+ blit_saved_trapezoid(leftE,
+ nextY,
+ leftE->fX,
+ currE->fX,
+ blitter,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ leftClip,
+ rightClip);
+ }
+ backward_insert_edge_based_on_x(currE);
+ } else {
+ prevX = newX;
+ }
+ if (!skipIntersect) {
+ check_intersection(currE, nextY, &nextNextY);
+ }
+ }
+
+ currE = next;
+ SkASSERT(currE);
+ }
+
+ // was our right-edge culled away?
+ if (in_interval) {
+ if (useDeferred) {
+ deferred_blit(leftE,
+ nextTail,
+ left,
+ leftDY,
+ y,
+ nextY,
+ isIntegralNextY,
+ leftEnds,
+ false,
+ blitter,
+ maskRow,
+ isUsingMask,
+ noRealBlitter,
+ leftClip,
+ rightClip,
+ yShift);
+ } else {
+ blit_trapezoid_row(blitter,
+ y >> 16,
+ left,
+ rightClip,
+ std::max(leftClip, leftE->fX),
+ rightClip,
+ leftDY,
+ 0,
+ fullAlpha,
+ maskRow,
+ isUsingMask,
+ noRealBlitter || (fullAlpha == 0xFF &&
+ edges_too_close(leftE->fPrev, leftE, nextY)),
+ true);
+ }
+ }
+
+ if (forceRLE) {
+ ((RunBasedAdditiveBlitter*)blitter)->flush_if_y_changed(y, nextY);
+ }
+
+ y = nextY;
+ if (y >= SkIntToFixed(stop_y)) {
+ break;
+ }
+
+ // now currE points to the first edge with a fUpperY larger than the previous y
+ insert_new_edges(currE, y, &nextNextY);
+ }
+}
+
+static void aaa_fill_path(const SkPath& path,
+ const SkIRect& clipRect,
+ AdditiveBlitter* blitter,
+ int start_y,
+ int stop_y,
+ bool pathContainedInClip,
+ bool isUsingMask,
+ bool forceRLE) { // forceRLE implies that SkAAClip is calling us
+ SkASSERT(blitter);
+
+ SkAnalyticEdgeBuilder builder;
+ int count = builder.buildEdges(path, pathContainedInClip ? nullptr : &clipRect);
+ SkAnalyticEdge** list = builder.analyticEdgeList();
+
+ SkIRect rect = clipRect;
+ if (0 == count) {
+ if (path.isInverseFillType()) {
+ /*
+ * Since we are in inverse-fill, our caller has already drawn above
+ * our top (start_y) and will draw below our bottom (stop_y). Thus
+ * we need to restrict our drawing to the intersection of the clip
+ * and those two limits.
+ */
+ if (rect.fTop < start_y) {
+ rect.fTop = start_y;
+ }
+ if (rect.fBottom > stop_y) {
+ rect.fBottom = stop_y;
+ }
+ if (!rect.isEmpty()) {
+ blitter->getRealBlitter()->blitRect(
+ rect.fLeft, rect.fTop, rect.width(), rect.height());
+ }
+ }
+ return;
+ }
+
+ SkAnalyticEdge headEdge, tailEdge, *last;
+ // this returns the first and last edge after they're sorted into a dlink list
+ SkAnalyticEdge* edge = sort_edges(list, count, &last);
+
+ headEdge.fRiteE = nullptr;
+ headEdge.fPrev = nullptr;
+ headEdge.fNext = edge;
+ headEdge.fUpperY = headEdge.fLowerY = SK_MinS32;
+ headEdge.fX = SK_MinS32;
+ headEdge.fDX = 0;
+ headEdge.fDY = SK_MaxS32;
+ headEdge.fUpperX = SK_MinS32;
+ edge->fPrev = &headEdge;
+
+ tailEdge.fRiteE = nullptr;
+ tailEdge.fPrev = last;
+ tailEdge.fNext = nullptr;
+ tailEdge.fUpperY = tailEdge.fLowerY = SK_MaxS32;
+ tailEdge.fX = SK_MaxS32;
+ tailEdge.fDX = 0;
+ tailEdge.fDY = SK_MaxS32;
+ tailEdge.fUpperX = SK_MaxS32;
+ last->fNext = &tailEdge;
+
+ // now edge is the head of the sorted linklist
+
+ if (!pathContainedInClip && start_y < clipRect.fTop) {
+ start_y = clipRect.fTop;
+ }
+ if (!pathContainedInClip && stop_y > clipRect.fBottom) {
+ stop_y = clipRect.fBottom;
+ }
+
+ SkFixed leftBound = SkIntToFixed(rect.fLeft);
+ SkFixed rightBound = SkIntToFixed(rect.fRight);
+ if (isUsingMask) {
+ // If we're using mask, then we have to limit the bound within the path bounds.
+ // Otherwise, the edge drift may access an invalid address inside the mask.
+ SkIRect ir;
+ path.getBounds().roundOut(&ir);
+ leftBound = std::max(leftBound, SkIntToFixed(ir.fLeft));
+ rightBound = std::min(rightBound, SkIntToFixed(ir.fRight));
+ }
+
+ if (!path.isInverseFillType() && path.isConvex() && count >= 2) {
+ aaa_walk_convex_edges(
+ &headEdge, blitter, start_y, stop_y, leftBound, rightBound, isUsingMask);
+ } else {
+ // Only use deferred blitting if there are many edges.
+ bool useDeferred =
+ count >
+ (SkFixedFloorToInt(tailEdge.fPrev->fLowerY - headEdge.fNext->fUpperY) + 1) * 4;
+
+ // We skip intersection computation if there are many points which probably already
+ // give us enough fractional scan lines.
+ bool skipIntersect = path.countPoints() > (stop_y - start_y) * 2;
+
+ aaa_walk_edges(&headEdge,
+ &tailEdge,
+ path.getFillType(),
+ blitter,
+ start_y,
+ stop_y,
+ leftBound,
+ rightBound,
+ isUsingMask,
+ forceRLE,
+ useDeferred,
+ skipIntersect);
+ }
+}
+
+// Check if the path is a rect and fat enough after clipping; if so, blit it.
+static inline bool try_blit_fat_anti_rect(SkBlitter* blitter,
+ const SkPath& path,
+ const SkIRect& clip) {
+ SkRect rect;
+ if (!path.isRect(&rect)) {
+ return false; // not rect
+ }
+ if (!rect.intersect(SkRect::Make(clip))) {
+ return true; // The intersection is empty. Hence consider it done.
+ }
+ SkIRect bounds = rect.roundOut();
+ if (bounds.width() < 3) {
+ return false; // not fat
+ }
+ blitter->blitFatAntiRect(rect);
+ return true;
+}
+
+void SkScan::AAAFillPath(const SkPath& path,
+ SkBlitter* blitter,
+ const SkIRect& ir,
+ const SkIRect& clipBounds,
+ bool forceRLE) {
+ bool containedInClip = clipBounds.contains(ir);
+ bool isInverse = path.isInverseFillType();
+
+ // The mask blitter (where we store intermediate alpha values directly in a mask, and then call
+ // the real blitter once in the end to blit the whole mask) is faster than the RLE blitter when
+ // the blit region is small enough (i.e., CanHandleRect(ir)). When isInverse is true, the blit
+ // region is no longer the rectangle ir so we won't use the mask blitter. The caller may also
+ // use the forceRLE flag to force not using the mask blitter. Also, when the path is a simple
+ // rect, preparing a mask and blitting it might have too much overhead. Hence we'll use
+ // blitFatAntiRect to avoid the mask and its overhead.
+ if (MaskAdditiveBlitter::CanHandleRect(ir) && !isInverse && !forceRLE) {
+ // blitFatAntiRect is slower than the normal AAA flow without MaskAdditiveBlitter.
+ // Hence only tryBlitFatAntiRect when MaskAdditiveBlitter would have been used.
+ if (!try_blit_fat_anti_rect(blitter, path, clipBounds)) {
+ MaskAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
+ aaa_fill_path(path,
+ clipBounds,
+ &additiveBlitter,
+ ir.fTop,
+ ir.fBottom,
+ containedInClip,
+ true,
+ forceRLE);
+ }
+ } else if (!isInverse && path.isConvex()) {
+ // If the filling area is convex (i.e., path.isConvex && !isInverse), our simpler
+ // aaa_walk_convex_edges won't generate alphas above 255. Hence we don't need
+ // SafeRLEAdditiveBlitter (which is slow due to clamping). The basic RLE blitter
+ // RunBasedAdditiveBlitter would suffice.
+ RunBasedAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
+ aaa_fill_path(path,
+ clipBounds,
+ &additiveBlitter,
+ ir.fTop,
+ ir.fBottom,
+ containedInClip,
+ false,
+ forceRLE);
+ } else {
+ // If the filling area might not be convex, the more involved aaa_walk_edges would
+ // be called and we have to clamp the alpha downto 255. The SafeRLEAdditiveBlitter
+ // does that at a cost of performance.
+ SafeRLEAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
+ aaa_fill_path(path,
+ clipBounds,
+ &additiveBlitter,
+ ir.fTop,
+ ir.fBottom,
+ containedInClip,
+ false,
+ forceRLE);
+ }
+}
+#endif // defined(SK_DISABLE_AAA)
diff --git a/gfx/skia/skia/src/core/SkScan_AntiPath.cpp b/gfx/skia/skia/src/core/SkScan_AntiPath.cpp
new file mode 100644
index 0000000000..4ccc82ac30
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScan_AntiPath.cpp
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkScanPriv.h"
+
+#include "include/core/SkGraphics.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkRegion.h"
+#include "src/core/SkBlitter.h"
+
+static SkIRect safeRoundOut(const SkRect& src) {
+ // roundOut will pin huge floats to max/min int
+ SkIRect dst = src.roundOut();
+
+ // intersect with a smaller huge rect, so the rect will not be considered empty for being
+ // too large. e.g. { -SK_MaxS32 ... SK_MaxS32 } is considered empty because its width
+ // exceeds signed 32bit.
+ const int32_t limit = SK_MaxS32 >> SK_SUPERSAMPLE_SHIFT;
+ (void)dst.intersect({ -limit, -limit, limit, limit});
+
+ return dst;
+}
+
+SkGraphics::PathAnalyticAADeciderProc gAnalyticAADeciderProc = nullptr;
+
+void SkGraphics::SetPathAnalyticAADecider(SkGraphics::PathAnalyticAADeciderProc decider) {
+ gAnalyticAADeciderProc = decider;
+}
+
+static bool ShouldUseAAA(const SkPath& path) {
+#if defined(SK_DISABLE_AAA)
+ return false;
+#elif defined(SK_FORCE_AAA)
+ return true;
+#else
+ if (gAnalyticAADeciderProc) {
+ return gAnalyticAADeciderProc(path);
+ }
+
+ if (gSkForceAnalyticAA) {
+ return true;
+ }
+ if (!gSkUseAnalyticAA) {
+ return false;
+ }
+ if (path.isRect(nullptr)) {
+ return true;
+ }
+
+ const SkRect& bounds = path.getBounds();
+ // When the path have so many points compared to the size of its
+ // bounds/resolution, it indicates that the path is not quite smooth in
+ // the current resolution: the expected number of turning points in
+ // every pixel row/column is significantly greater than zero. Hence
+ // Aanlytic AA is not likely to produce visible quality improvements,
+ // and Analytic AA might be slower than supersampling.
+ return path.countPoints() < std::max(bounds.width(), bounds.height()) / 2 - 10;
+#endif
+}
+
+static int overflows_short_shift(int value, int shift) {
+ const int s = 16 + shift;
+ return (SkLeftShift(value, s) >> s) - value;
+}
+
+/**
+ Would any of the coordinates of this rectangle not fit in a short,
+ when left-shifted by shift?
+*/
+static int rect_overflows_short_shift(SkIRect rect, int shift) {
+ SkASSERT(!overflows_short_shift(8191, shift));
+ SkASSERT(overflows_short_shift(8192, shift));
+ SkASSERT(!overflows_short_shift(32767, 0));
+ SkASSERT(overflows_short_shift(32768, 0));
+
+ // Since we expect these to succeed, we bit-or together
+ // for a tiny extra bit of speed.
+ return overflows_short_shift(rect.fLeft, shift) |
+ overflows_short_shift(rect.fRight, shift) |
+ overflows_short_shift(rect.fTop, shift) |
+ overflows_short_shift(rect.fBottom, shift);
+}
+
+void SkScan::AntiFillPath(const SkPath& path, const SkRegion& origClip,
+ SkBlitter* blitter, bool forceRLE) {
+ if (origClip.isEmpty()) {
+ return;
+ }
+
+ const bool isInverse = path.isInverseFillType();
+ SkIRect ir = safeRoundOut(path.getBounds());
+ if (ir.isEmpty()) {
+ if (isInverse) {
+ blitter->blitRegion(origClip);
+ }
+ return;
+ }
+
+ // If the intersection of the path bounds and the clip bounds
+ // will overflow 32767 when << by SHIFT, we can't supersample,
+ // so draw without antialiasing.
+ SkIRect clippedIR;
+ if (isInverse) {
+ // If the path is an inverse fill, it's going to fill the entire
+ // clip, and we care whether the entire clip exceeds our limits.
+ clippedIR = origClip.getBounds();
+ } else {
+ if (!clippedIR.intersect(ir, origClip.getBounds())) {
+ return;
+ }
+ }
+ if (rect_overflows_short_shift(clippedIR, SK_SUPERSAMPLE_SHIFT)) {
+ SkScan::FillPath(path, origClip, blitter);
+ return;
+ }
+
+ // Our antialiasing can't handle a clip larger than 32767, so we restrict
+ // the clip to that limit here. (the runs[] uses int16_t for its index).
+ //
+ // A more general solution (one that could also eliminate the need to
+ // disable aa based on ir bounds (see overflows_short_shift) would be
+ // to tile the clip/target...
+ SkRegion tmpClipStorage;
+ const SkRegion* clipRgn = &origClip;
+ {
+ static const int32_t kMaxClipCoord = 32767;
+ const SkIRect& bounds = origClip.getBounds();
+ if (bounds.fRight > kMaxClipCoord || bounds.fBottom > kMaxClipCoord) {
+ SkIRect limit = { 0, 0, kMaxClipCoord, kMaxClipCoord };
+ tmpClipStorage.op(origClip, limit, SkRegion::kIntersect_Op);
+ clipRgn = &tmpClipStorage;
+ }
+ }
+ // for here down, use clipRgn, not origClip
+
+ SkScanClipper clipper(blitter, clipRgn, ir);
+
+ if (clipper.getBlitter() == nullptr) { // clipped out
+ if (isInverse) {
+ blitter->blitRegion(*clipRgn);
+ }
+ return;
+ }
+
+ SkASSERT(clipper.getClipRect() == nullptr ||
+ *clipper.getClipRect() == clipRgn->getBounds());
+
+ // now use the (possibly wrapped) blitter
+ blitter = clipper.getBlitter();
+
+ if (isInverse) {
+ sk_blit_above(blitter, ir, *clipRgn);
+ }
+
+ if (ShouldUseAAA(path)) {
+ // Do not use AAA if path is too complicated:
+ // there won't be any speedup or significant visual improvement.
+ SkScan::AAAFillPath(path, blitter, ir, clipRgn->getBounds(), forceRLE);
+ } else {
+ SkScan::SAAFillPath(path, blitter, ir, clipRgn->getBounds(), forceRLE);
+ }
+
+ if (isInverse) {
+ sk_blit_below(blitter, ir, *clipRgn);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "src/core/SkRasterClip.h"
+
+void SkScan::FillPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
+ if (clip.isEmpty() || !path.isFinite()) {
+ return;
+ }
+
+ if (clip.isBW()) {
+ FillPath(path, clip.bwRgn(), blitter);
+ } else {
+ SkRegion tmp;
+ SkAAClipBlitter aaBlitter;
+
+ tmp.setRect(clip.getBounds());
+ aaBlitter.init(blitter, &clip.aaRgn());
+ SkScan::FillPath(path, tmp, &aaBlitter);
+ }
+}
+
+void SkScan::AntiFillPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
+ if (clip.isEmpty() || !path.isFinite()) {
+ return;
+ }
+
+ if (clip.isBW()) {
+ AntiFillPath(path, clip.bwRgn(), blitter, false);
+ } else {
+ SkRegion tmp;
+ SkAAClipBlitter aaBlitter;
+
+ tmp.setRect(clip.getBounds());
+ aaBlitter.init(blitter, &clip.aaRgn());
+ AntiFillPath(path, tmp, &aaBlitter, true); // SkAAClipBlitter can blitMask, why forceRLE?
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkScan_Antihair.cpp b/gfx/skia/skia/src/core/SkScan_Antihair.cpp
new file mode 100644
index 0000000000..eceff4ca9c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScan_Antihair.cpp
@@ -0,0 +1,1014 @@
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkScan.h"
+
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkFDot6.h"
+#include "src/core/SkLineClipper.h"
+#include "src/core/SkRasterClip.h"
+
+#include <utility>
+
+/* Our attempt to compute the worst case "bounds" for the horizontal and
+ vertical cases has some numerical bug in it, and we sometimes undervalue
+ our extends. The bug is that when this happens, we will set the clip to
+ nullptr (for speed), and thus draw outside of the clip by a pixel, which might
+ only look bad, but it might also access memory outside of the valid range
+ allcoated for the device bitmap.
+
+ This define enables our fix to outset our "bounds" by 1, thus avoiding the
+ chance of the bug, but at the cost of sometimes taking the rectblitter
+ case (i.e. not setting the clip to nullptr) when we might not actually need
+ to. If we can improve/fix the actual calculations, then we can remove this
+ step.
+ */
+#define OUTSET_BEFORE_CLIP_TEST true
+
+#define HLINE_STACK_BUFFER 100
+
+static inline int SmallDot6Scale(int value, int dot6) {
+ SkASSERT((int16_t)value == value);
+ SkASSERT((unsigned)dot6 <= 64);
+ return (value * dot6) >> 6;
+}
+
+//#define TEST_GAMMA
+
+#ifdef TEST_GAMMA
+ static uint8_t gGammaTable[256];
+ #define ApplyGamma(table, alpha) (table)[alpha]
+
+ static void build_gamma_table() {
+ static bool gInit = false;
+
+ if (gInit == false) {
+ for (int i = 0; i < 256; i++) {
+ SkFixed n = i * 257;
+ n += n >> 15;
+ SkASSERT(n >= 0 && n <= SK_Fixed1);
+ n = SkFixedSqrt(n);
+ n = n * 255 >> 16;
+ // SkDebugf("morph %d -> %d\n", i, n);
+ gGammaTable[i] = SkToU8(n);
+ }
+ gInit = true;
+ }
+ }
+#else
+ #define ApplyGamma(table, alpha) SkToU8(alpha)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void call_hline_blitter(SkBlitter* blitter, int x, int y, int count,
+ U8CPU alpha) {
+ SkASSERT(count > 0);
+
+ int16_t runs[HLINE_STACK_BUFFER + 1];
+ uint8_t aa[HLINE_STACK_BUFFER];
+
+ do {
+ // In theory, we should be able to just do this once (outside of the loop),
+ // since aa[] and runs[] are supposed" to be const when we call the blitter.
+ // In reality, some wrapper-blitters (e.g. SkRgnClipBlitter) cast away that
+ // constness, and modify the buffers in-place. Hence the need to be defensive
+ // here and reseed the aa value.
+ aa[0] = ApplyGamma(gGammaTable, alpha);
+
+ int n = count;
+ if (n > HLINE_STACK_BUFFER) {
+ n = HLINE_STACK_BUFFER;
+ }
+ runs[0] = SkToS16(n);
+ runs[n] = 0;
+ blitter->blitAntiH(x, y, aa, runs);
+ x += n;
+ count -= n;
+ } while (count > 0);
+}
+
+class SkAntiHairBlitter {
+public:
+ SkAntiHairBlitter() : fBlitter(nullptr) {}
+ virtual ~SkAntiHairBlitter() {}
+
+ SkBlitter* getBlitter() const { return fBlitter; }
+
+ void setup(SkBlitter* blitter) {
+ fBlitter = blitter;
+ }
+
+ virtual SkFixed drawCap(int x, SkFixed fy, SkFixed slope, int mod64) = 0;
+ virtual SkFixed drawLine(int x, int stopx, SkFixed fy, SkFixed slope) = 0;
+
+private:
+ SkBlitter* fBlitter;
+};
+
+class HLine_SkAntiHairBlitter : public SkAntiHairBlitter {
+public:
+ SkFixed drawCap(int x, SkFixed fy, SkFixed slope, int mod64) override {
+ fy += SK_Fixed1/2;
+
+ int y = fy >> 16;
+ uint8_t a = (uint8_t)((fy >> 8) & 0xFF);
+
+ // lower line
+ unsigned ma = SmallDot6Scale(a, mod64);
+ if (ma) {
+ call_hline_blitter(this->getBlitter(), x, y, 1, ma);
+ }
+
+ // upper line
+ ma = SmallDot6Scale(255 - a, mod64);
+ if (ma) {
+ call_hline_blitter(this->getBlitter(), x, y - 1, 1, ma);
+ }
+
+ return fy - SK_Fixed1/2;
+ }
+
+ SkFixed drawLine(int x, int stopx, SkFixed fy, SkFixed slope) override {
+ SkASSERT(x < stopx);
+ int count = stopx - x;
+ fy += SK_Fixed1/2;
+
+ int y = fy >> 16;
+ uint8_t a = (uint8_t)((fy >> 8) & 0xFF);
+
+ // lower line
+ if (a) {
+ call_hline_blitter(this->getBlitter(), x, y, count, a);
+ }
+
+ // upper line
+ a = 255 - a;
+ if (a) {
+ call_hline_blitter(this->getBlitter(), x, y - 1, count, a);
+ }
+
+ return fy - SK_Fixed1/2;
+ }
+};
+
+class Horish_SkAntiHairBlitter : public SkAntiHairBlitter {
+public:
+ SkFixed drawCap(int x, SkFixed fy, SkFixed dy, int mod64) override {
+ fy += SK_Fixed1/2;
+
+ int lower_y = fy >> 16;
+ uint8_t a = (uint8_t)((fy >> 8) & 0xFF);
+ unsigned a0 = SmallDot6Scale(255 - a, mod64);
+ unsigned a1 = SmallDot6Scale(a, mod64);
+ this->getBlitter()->blitAntiV2(x, lower_y - 1, a0, a1);
+
+ return fy + dy - SK_Fixed1/2;
+ }
+
+ SkFixed drawLine(int x, int stopx, SkFixed fy, SkFixed dy) override {
+ SkASSERT(x < stopx);
+
+ fy += SK_Fixed1/2;
+ SkBlitter* blitter = this->getBlitter();
+ do {
+ int lower_y = fy >> 16;
+ uint8_t a = (uint8_t)((fy >> 8) & 0xFF);
+ blitter->blitAntiV2(x, lower_y - 1, 255 - a, a);
+ fy += dy;
+ } while (++x < stopx);
+
+ return fy - SK_Fixed1/2;
+ }
+};
+
+class VLine_SkAntiHairBlitter : public SkAntiHairBlitter {
+public:
+ SkFixed drawCap(int y, SkFixed fx, SkFixed dx, int mod64) override {
+ SkASSERT(0 == dx);
+ fx += SK_Fixed1/2;
+
+ int x = fx >> 16;
+ int a = (uint8_t)((fx >> 8) & 0xFF);
+
+ unsigned ma = SmallDot6Scale(a, mod64);
+ if (ma) {
+ this->getBlitter()->blitV(x, y, 1, ma);
+ }
+ ma = SmallDot6Scale(255 - a, mod64);
+ if (ma) {
+ this->getBlitter()->blitV(x - 1, y, 1, ma);
+ }
+
+ return fx - SK_Fixed1/2;
+ }
+
+ SkFixed drawLine(int y, int stopy, SkFixed fx, SkFixed dx) override {
+ SkASSERT(y < stopy);
+ SkASSERT(0 == dx);
+ fx += SK_Fixed1/2;
+
+ int x = fx >> 16;
+ int a = (uint8_t)((fx >> 8) & 0xFF);
+
+ if (a) {
+ this->getBlitter()->blitV(x, y, stopy - y, a);
+ }
+ a = 255 - a;
+ if (a) {
+ this->getBlitter()->blitV(x - 1, y, stopy - y, a);
+ }
+
+ return fx - SK_Fixed1/2;
+ }
+};
+
+class Vertish_SkAntiHairBlitter : public SkAntiHairBlitter {
+public:
+ SkFixed drawCap(int y, SkFixed fx, SkFixed dx, int mod64) override {
+ fx += SK_Fixed1/2;
+
+ int x = fx >> 16;
+ uint8_t a = (uint8_t)((fx >> 8) & 0xFF);
+ this->getBlitter()->blitAntiH2(x - 1, y,
+ SmallDot6Scale(255 - a, mod64), SmallDot6Scale(a, mod64));
+
+ return fx + dx - SK_Fixed1/2;
+ }
+
+ SkFixed drawLine(int y, int stopy, SkFixed fx, SkFixed dx) override {
+ SkASSERT(y < stopy);
+ fx += SK_Fixed1/2;
+ do {
+ int x = fx >> 16;
+ uint8_t a = (uint8_t)((fx >> 8) & 0xFF);
+ this->getBlitter()->blitAntiH2(x - 1, y, 255 - a, a);
+ fx += dx;
+ } while (++y < stopy);
+
+ return fx - SK_Fixed1/2;
+ }
+};
+
+static inline SkFixed fastfixdiv(SkFDot6 a, SkFDot6 b) {
+ SkASSERT((SkLeftShift(a, 16) >> 16) == a);
+ SkASSERT(b != 0);
+ return SkLeftShift(a, 16) / b;
+}
+
+#define SkBITCOUNT(x) (sizeof(x) << 3)
+
+#if 1
+// returns high-bit set iff x==0x8000...
+static inline int bad_int(int x) {
+ return x & -x;
+}
+
+static int any_bad_ints(int a, int b, int c, int d) {
+ return (bad_int(a) | bad_int(b) | bad_int(c) | bad_int(d)) >> (SkBITCOUNT(int) - 1);
+}
+#else
+static inline int good_int(int x) {
+ return x ^ (1 << (SkBITCOUNT(x) - 1));
+}
+
+static int any_bad_ints(int a, int b, int c, int d) {
+ return !(good_int(a) & good_int(b) & good_int(c) & good_int(d));
+}
+#endif
+
+#ifdef SK_DEBUG
+static bool canConvertFDot6ToFixed(SkFDot6 x) {
+ const int maxDot6 = SK_MaxS32 >> (16 - 6);
+ return SkAbs32(x) <= maxDot6;
+}
+#endif
+
+/*
+ * We want the fractional part of ordinate, but we want multiples of 64 to
+ * return 64, not 0, so we can't just say (ordinate & 63).
+ * We basically want to compute those bits, and if they're 0, return 64.
+ * We can do that w/o a branch with an extra sub and add.
+ */
+static int contribution_64(SkFDot6 ordinate) {
+#if 0
+ int result = ordinate & 63;
+ if (0 == result) {
+ result = 64;
+ }
+#else
+ int result = ((ordinate - 1) & 63) + 1;
+#endif
+ SkASSERT(result > 0 && result <= 64);
+ return result;
+}
+
+static void do_anti_hairline(SkFDot6 x0, SkFDot6 y0, SkFDot6 x1, SkFDot6 y1,
+ const SkIRect* clip, SkBlitter* blitter) {
+ // check for integer NaN (0x80000000) which we can't handle (can't negate it)
+ // It appears typically from a huge float (inf or nan) being converted to int.
+ // If we see it, just don't draw.
+ if (any_bad_ints(x0, y0, x1, y1)) {
+ return;
+ }
+
+ // The caller must clip the line to [-32767.0 ... 32767.0] ahead of time
+ // (in dot6 format)
+ SkASSERT(canConvertFDot6ToFixed(x0));
+ SkASSERT(canConvertFDot6ToFixed(y0));
+ SkASSERT(canConvertFDot6ToFixed(x1));
+ SkASSERT(canConvertFDot6ToFixed(y1));
+
+ if (SkAbs32(x1 - x0) > SkIntToFDot6(511) || SkAbs32(y1 - y0) > SkIntToFDot6(511)) {
+ /* instead of (x0 + x1) >> 1, we shift each separately. This is less
+ precise, but avoids overflowing the intermediate result if the
+ values are huge. A better fix might be to clip the original pts
+ directly (i.e. do the divide), so we don't spend time subdividing
+ huge lines at all.
+ */
+ int hx = (x0 >> 1) + (x1 >> 1);
+ int hy = (y0 >> 1) + (y1 >> 1);
+ do_anti_hairline(x0, y0, hx, hy, clip, blitter);
+ do_anti_hairline(hx, hy, x1, y1, clip, blitter);
+ return;
+ }
+
+ int scaleStart, scaleStop;
+ int istart, istop;
+ SkFixed fstart, slope;
+
+ HLine_SkAntiHairBlitter hline_blitter;
+ Horish_SkAntiHairBlitter horish_blitter;
+ VLine_SkAntiHairBlitter vline_blitter;
+ Vertish_SkAntiHairBlitter vertish_blitter;
+ SkAntiHairBlitter* hairBlitter = nullptr;
+
+ if (SkAbs32(x1 - x0) > SkAbs32(y1 - y0)) { // mostly horizontal
+ if (x0 > x1) { // we want to go left-to-right
+ using std::swap;
+ swap(x0, x1);
+ swap(y0, y1);
+ }
+
+ istart = SkFDot6Floor(x0);
+ istop = SkFDot6Ceil(x1);
+ fstart = SkFDot6ToFixed(y0);
+ if (y0 == y1) { // completely horizontal, take fast case
+ slope = 0;
+ hairBlitter = &hline_blitter;
+ } else {
+ slope = fastfixdiv(y1 - y0, x1 - x0);
+ SkASSERT(slope >= -SK_Fixed1 && slope <= SK_Fixed1);
+ fstart += (slope * (32 - (x0 & 63)) + 32) >> 6;
+ hairBlitter = &horish_blitter;
+ }
+
+ SkASSERT(istop > istart);
+ if (istop - istart == 1) {
+ // we are within a single pixel
+ scaleStart = x1 - x0;
+ SkASSERT(scaleStart >= 0 && scaleStart <= 64);
+ scaleStop = 0;
+ } else {
+ scaleStart = 64 - (x0 & 63);
+ scaleStop = x1 & 63;
+ }
+
+ if (clip){
+ if (istart >= clip->fRight || istop <= clip->fLeft) {
+ return;
+ }
+ if (istart < clip->fLeft) {
+ fstart += slope * (clip->fLeft - istart);
+ istart = clip->fLeft;
+ scaleStart = 64;
+ if (istop - istart == 1) {
+ // we are within a single pixel
+ scaleStart = contribution_64(x1);
+ scaleStop = 0;
+ }
+ }
+ if (istop > clip->fRight) {
+ istop = clip->fRight;
+ scaleStop = 0; // so we don't draw this last column
+ }
+
+ SkASSERT(istart <= istop);
+ if (istart == istop) {
+ return;
+ }
+ // now test if our Y values are completely inside the clip
+ int top, bottom;
+ if (slope >= 0) { // T2B
+ top = SkFixedFloorToInt(fstart - SK_FixedHalf);
+ bottom = SkFixedCeilToInt(fstart + (istop - istart - 1) * slope + SK_FixedHalf);
+ } else { // B2T
+ bottom = SkFixedCeilToInt(fstart + SK_FixedHalf);
+ top = SkFixedFloorToInt(fstart + (istop - istart - 1) * slope - SK_FixedHalf);
+ }
+#ifdef OUTSET_BEFORE_CLIP_TEST
+ top -= 1;
+ bottom += 1;
+#endif
+ if (top >= clip->fBottom || bottom <= clip->fTop) {
+ return;
+ }
+ if (clip->fTop <= top && clip->fBottom >= bottom) {
+ clip = nullptr;
+ }
+ }
+ } else { // mostly vertical
+ if (y0 > y1) { // we want to go top-to-bottom
+ using std::swap;
+ swap(x0, x1);
+ swap(y0, y1);
+ }
+
+ istart = SkFDot6Floor(y0);
+ istop = SkFDot6Ceil(y1);
+ fstart = SkFDot6ToFixed(x0);
+ if (x0 == x1) {
+ if (y0 == y1) { // are we zero length?
+ return; // nothing to do
+ }
+ slope = 0;
+ hairBlitter = &vline_blitter;
+ } else {
+ slope = fastfixdiv(x1 - x0, y1 - y0);
+ SkASSERT(slope <= SK_Fixed1 && slope >= -SK_Fixed1);
+ fstart += (slope * (32 - (y0 & 63)) + 32) >> 6;
+ hairBlitter = &vertish_blitter;
+ }
+
+ SkASSERT(istop > istart);
+ if (istop - istart == 1) {
+ // we are within a single pixel
+ scaleStart = y1 - y0;
+ SkASSERT(scaleStart >= 0 && scaleStart <= 64);
+ scaleStop = 0;
+ } else {
+ scaleStart = 64 - (y0 & 63);
+ scaleStop = y1 & 63;
+ }
+
+ if (clip) {
+ if (istart >= clip->fBottom || istop <= clip->fTop) {
+ return;
+ }
+ if (istart < clip->fTop) {
+ fstart += slope * (clip->fTop - istart);
+ istart = clip->fTop;
+ scaleStart = 64;
+ if (istop - istart == 1) {
+ // we are within a single pixel
+ scaleStart = contribution_64(y1);
+ scaleStop = 0;
+ }
+ }
+ if (istop > clip->fBottom) {
+ istop = clip->fBottom;
+ scaleStop = 0; // so we don't draw this last row
+ }
+
+ SkASSERT(istart <= istop);
+ if (istart == istop)
+ return;
+
+ // now test if our X values are completely inside the clip
+ int left, right;
+ if (slope >= 0) { // L2R
+ left = SkFixedFloorToInt(fstart - SK_FixedHalf);
+ right = SkFixedCeilToInt(fstart + (istop - istart - 1) * slope + SK_FixedHalf);
+ } else { // R2L
+ right = SkFixedCeilToInt(fstart + SK_FixedHalf);
+ left = SkFixedFloorToInt(fstart + (istop - istart - 1) * slope - SK_FixedHalf);
+ }
+#ifdef OUTSET_BEFORE_CLIP_TEST
+ left -= 1;
+ right += 1;
+#endif
+ if (left >= clip->fRight || right <= clip->fLeft) {
+ return;
+ }
+ if (clip->fLeft <= left && clip->fRight >= right) {
+ clip = nullptr;
+ }
+ }
+ }
+
+ SkRectClipBlitter rectClipper;
+ if (clip) {
+ rectClipper.init(blitter, *clip);
+ blitter = &rectClipper;
+ }
+
+ SkASSERT(hairBlitter);
+ hairBlitter->setup(blitter);
+
+#ifdef SK_DEBUG
+ if (scaleStart > 0 && scaleStop > 0) {
+ // be sure we don't draw twice in the same pixel
+ SkASSERT(istart < istop - 1);
+ }
+#endif
+
+ fstart = hairBlitter->drawCap(istart, fstart, slope, scaleStart);
+ istart += 1;
+ int fullSpans = istop - istart - (scaleStop > 0);
+ if (fullSpans > 0) {
+ fstart = hairBlitter->drawLine(istart, istart + fullSpans, fstart, slope);
+ }
+ if (scaleStop > 0) {
+ hairBlitter->drawCap(istop - 1, fstart, slope, scaleStop);
+ }
+}
+
+void SkScan::AntiHairLineRgn(const SkPoint array[], int arrayCount, const SkRegion* clip,
+ SkBlitter* blitter) {
+ if (clip && clip->isEmpty()) {
+ return;
+ }
+
+ SkASSERT(clip == nullptr || !clip->getBounds().isEmpty());
+
+#ifdef TEST_GAMMA
+ build_gamma_table();
+#endif
+
+ const SkScalar max = SkIntToScalar(32767);
+ const SkRect fixedBounds = SkRect::MakeLTRB(-max, -max, max, max);
+
+ SkRect clipBounds;
+ if (clip) {
+ clipBounds.set(clip->getBounds());
+ /* We perform integral clipping later on, but we do a scalar clip first
+ to ensure that our coordinates are expressible in fixed/integers.
+
+ antialiased hairlines can draw up to 1/2 of a pixel outside of
+ their bounds, so we need to outset the clip before calling the
+ clipper. To make the numerics safer, we outset by a whole pixel,
+ since the 1/2 pixel boundary is important to the antihair blitter,
+ we don't want to risk numerical fate by chopping on that edge.
+ */
+ clipBounds.outset(SK_Scalar1, SK_Scalar1);
+ }
+
+ for (int i = 0; i < arrayCount - 1; ++i) {
+ SkPoint pts[2];
+
+ // We have to pre-clip the line to fit in a SkFixed, so we just chop
+ // the line. TODO find a way to actually draw beyond that range.
+ if (!SkLineClipper::IntersectLine(&array[i], fixedBounds, pts)) {
+ continue;
+ }
+
+ if (clip && !SkLineClipper::IntersectLine(pts, clipBounds, pts)) {
+ continue;
+ }
+
+ SkFDot6 x0 = SkScalarToFDot6(pts[0].fX);
+ SkFDot6 y0 = SkScalarToFDot6(pts[0].fY);
+ SkFDot6 x1 = SkScalarToFDot6(pts[1].fX);
+ SkFDot6 y1 = SkScalarToFDot6(pts[1].fY);
+
+ if (clip) {
+ SkFDot6 left = std::min(x0, x1);
+ SkFDot6 top = std::min(y0, y1);
+ SkFDot6 right = std::max(x0, x1);
+ SkFDot6 bottom = std::max(y0, y1);
+ SkIRect ir;
+
+ ir.setLTRB(SkFDot6Floor(left) - 1,
+ SkFDot6Floor(top) - 1,
+ SkFDot6Ceil(right) + 1,
+ SkFDot6Ceil(bottom) + 1);
+
+ if (clip->quickReject(ir)) {
+ continue;
+ }
+ if (!clip->quickContains(ir)) {
+ SkRegion::Cliperator iter(*clip, ir);
+ const SkIRect* r = &iter.rect();
+
+ while (!iter.done()) {
+ do_anti_hairline(x0, y0, x1, y1, r, blitter);
+ iter.next();
+ }
+ continue;
+ }
+ // fall through to no-clip case
+ }
+ do_anti_hairline(x0, y0, x1, y1, nullptr, blitter);
+ }
+}
+
+void SkScan::AntiHairRect(const SkRect& rect, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ SkPoint pts[5];
+
+ pts[0].set(rect.fLeft, rect.fTop);
+ pts[1].set(rect.fRight, rect.fTop);
+ pts[2].set(rect.fRight, rect.fBottom);
+ pts[3].set(rect.fLeft, rect.fBottom);
+ pts[4] = pts[0];
+ SkScan::AntiHairLine(pts, 5, clip, blitter);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+typedef int FDot8; // 24.8 integer fixed point
+
+static inline FDot8 SkFixedToFDot8(SkFixed x) {
+ return (x + 0x80) >> 8;
+}
+
+static void do_scanline(FDot8 L, int top, FDot8 R, U8CPU alpha,
+ SkBlitter* blitter) {
+ SkASSERT(L < R);
+
+ if ((L >> 8) == ((R - 1) >> 8)) { // 1x1 pixel
+ blitter->blitV(L >> 8, top, 1, SkAlphaMul(alpha, R - L));
+ return;
+ }
+
+ int left = L >> 8;
+
+ if (L & 0xFF) {
+ blitter->blitV(left, top, 1, SkAlphaMul(alpha, 256 - (L & 0xFF)));
+ left += 1;
+ }
+
+ int rite = R >> 8;
+ int width = rite - left;
+ if (width > 0) {
+ call_hline_blitter(blitter, left, top, width, alpha);
+ }
+ if (R & 0xFF) {
+ blitter->blitV(rite, top, 1, SkAlphaMul(alpha, R & 0xFF));
+ }
+}
+
+static void antifilldot8(FDot8 L, FDot8 T, FDot8 R, FDot8 B, SkBlitter* blitter,
+ bool fillInner) {
+ // check for empty now that we're in our reduced precision space
+ if (L >= R || T >= B) {
+ return;
+ }
+ int top = T >> 8;
+ if (top == ((B - 1) >> 8)) { // just one scanline high
+ do_scanline(L, top, R, B - T - 1, blitter);
+ return;
+ }
+
+ if (T & 0xFF) {
+ do_scanline(L, top, R, 256 - (T & 0xFF), blitter);
+ top += 1;
+ }
+
+ int bot = B >> 8;
+ int height = bot - top;
+ if (height > 0) {
+ int left = L >> 8;
+ if (left == ((R - 1) >> 8)) { // just 1-pixel wide
+ blitter->blitV(left, top, height, R - L - 1);
+ } else {
+ if (L & 0xFF) {
+ blitter->blitV(left, top, height, 256 - (L & 0xFF));
+ left += 1;
+ }
+ int rite = R >> 8;
+ int width = rite - left;
+ if (width > 0 && fillInner) {
+ blitter->blitRect(left, top, width, height);
+ }
+ if (R & 0xFF) {
+ blitter->blitV(rite, top, height, R & 0xFF);
+ }
+ }
+ }
+
+ if (B & 0xFF) {
+ do_scanline(L, bot, R, B & 0xFF, blitter);
+ }
+}
+
+static void antifillrect(const SkXRect& xr, SkBlitter* blitter) {
+ antifilldot8(SkFixedToFDot8(xr.fLeft), SkFixedToFDot8(xr.fTop),
+ SkFixedToFDot8(xr.fRight), SkFixedToFDot8(xr.fBottom),
+ blitter, true);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkScan::AntiFillXRect(const SkXRect& xr, const SkRegion* clip,
+ SkBlitter* blitter) {
+ if (nullptr == clip) {
+ antifillrect(xr, blitter);
+ } else {
+ SkIRect outerBounds;
+ XRect_roundOut(xr, &outerBounds);
+
+ if (clip->isRect()) {
+ const SkIRect& clipBounds = clip->getBounds();
+
+ if (clipBounds.contains(outerBounds)) {
+ antifillrect(xr, blitter);
+ } else {
+ SkXRect tmpR;
+ // this keeps our original edges fractional
+ XRect_set(&tmpR, clipBounds);
+ if (tmpR.intersect(xr)) {
+ antifillrect(tmpR, blitter);
+ }
+ }
+ } else {
+ SkRegion::Cliperator clipper(*clip, outerBounds);
+ const SkIRect& rr = clipper.rect();
+
+ while (!clipper.done()) {
+ SkXRect tmpR;
+
+ // this keeps our original edges fractional
+ XRect_set(&tmpR, rr);
+ if (tmpR.intersect(xr)) {
+ antifillrect(tmpR, blitter);
+ }
+ clipper.next();
+ }
+ }
+ }
+}
+
+void SkScan::AntiFillXRect(const SkXRect& xr, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isBW()) {
+ AntiFillXRect(xr, &clip.bwRgn(), blitter);
+ } else {
+ SkIRect outerBounds;
+ XRect_roundOut(xr, &outerBounds);
+
+ if (clip.quickContains(outerBounds)) {
+ AntiFillXRect(xr, nullptr, blitter);
+ } else {
+ SkAAClipBlitterWrapper wrapper(clip, blitter);
+ AntiFillXRect(xr, &wrapper.getRgn(), wrapper.getBlitter());
+ }
+ }
+}
+
+/* This takes a float-rect, but with the key improvement that it has
+ already been clipped, so we know that it is safe to convert it into a
+ XRect (fixedpoint), as it won't overflow.
+*/
+static void antifillrect(const SkRect& r, SkBlitter* blitter) {
+ SkXRect xr;
+
+ XRect_set(&xr, r);
+ antifillrect(xr, blitter);
+}
+
+/* We repeat the clipping logic of AntiFillXRect because the float rect might
+ overflow if we blindly converted it to an XRect. This sucks that we have to
+ repeat the clipping logic, but I don't see how to share the code/logic.
+
+ We clip r (as needed) into one or more (smaller) float rects, and then pass
+ those to our version of antifillrect, which converts it into an XRect and
+ then calls the blit.
+*/
+void SkScan::AntiFillRect(const SkRect& origR, const SkRegion* clip,
+ SkBlitter* blitter) {
+ if (clip) {
+ SkRect newR;
+ newR.set(clip->getBounds());
+ if (!newR.intersect(origR)) {
+ return;
+ }
+
+ const SkIRect outerBounds = newR.roundOut();
+
+ if (clip->isRect()) {
+ antifillrect(newR, blitter);
+ } else {
+ SkRegion::Cliperator clipper(*clip, outerBounds);
+ while (!clipper.done()) {
+ newR.set(clipper.rect());
+ if (newR.intersect(origR)) {
+ antifillrect(newR, blitter);
+ }
+ clipper.next();
+ }
+ }
+ } else {
+ antifillrect(origR, blitter);
+ }
+}
+
+void SkScan::AntiFillRect(const SkRect& r, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isBW()) {
+ AntiFillRect(r, &clip.bwRgn(), blitter);
+ } else {
+ SkAAClipBlitterWrapper wrap(clip, blitter);
+ AntiFillRect(r, &wrap.getRgn(), wrap.getBlitter());
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define SkAlphaMulRound(a, b) SkMulDiv255Round(a, b)
+
+// calls blitRect() if the rectangle is non-empty
+static void fillcheckrect(int L, int T, int R, int B, SkBlitter* blitter) {
+ if (L < R && T < B) {
+ blitter->blitRect(L, T, R - L, B - T);
+ }
+}
+
+static inline FDot8 SkScalarToFDot8(SkScalar x) {
+ return (int)(x * 256);
+}
+
+static inline int FDot8Floor(FDot8 x) {
+ return x >> 8;
+}
+
+static inline int FDot8Ceil(FDot8 x) {
+ return (x + 0xFF) >> 8;
+}
+
+// 1 - (1 - a)*(1 - b)
+static inline U8CPU InvAlphaMul(U8CPU a, U8CPU b) {
+ // need precise rounding (not just SkAlphaMul) so that values like
+ // a=228, b=252 don't overflow the result
+ return SkToU8(a + b - SkAlphaMulRound(a, b));
+}
+
+static void inner_scanline(FDot8 L, int top, FDot8 R, U8CPU alpha,
+ SkBlitter* blitter) {
+ SkASSERT(L < R);
+
+ if ((L >> 8) == ((R - 1) >> 8)) { // 1x1 pixel
+ FDot8 widClamp = R - L;
+ // border case clamp 256 to 255 instead of going through call_hline_blitter
+ // see skbug/4406
+ widClamp = widClamp - (widClamp >> 8);
+ blitter->blitV(L >> 8, top, 1, InvAlphaMul(alpha, widClamp));
+ return;
+ }
+
+ int left = L >> 8;
+ if (L & 0xFF) {
+ blitter->blitV(left, top, 1, InvAlphaMul(alpha, L & 0xFF));
+ left += 1;
+ }
+
+ int rite = R >> 8;
+ int width = rite - left;
+ if (width > 0) {
+ call_hline_blitter(blitter, left, top, width, alpha);
+ }
+
+ if (R & 0xFF) {
+ blitter->blitV(rite, top, 1, InvAlphaMul(alpha, ~R & 0xFF));
+ }
+}
+
+static void innerstrokedot8(FDot8 L, FDot8 T, FDot8 R, FDot8 B,
+ SkBlitter* blitter) {
+ SkASSERT(L < R && T < B);
+
+ int top = T >> 8;
+ if (top == ((B - 1) >> 8)) { // just one scanline high
+ // We want the inverse of B-T, since we're the inner-stroke
+ int alpha = 256 - (B - T);
+ if (alpha) {
+ inner_scanline(L, top, R, alpha, blitter);
+ }
+ return;
+ }
+
+ if (T & 0xFF) {
+ inner_scanline(L, top, R, T & 0xFF, blitter);
+ top += 1;
+ }
+
+ int bot = B >> 8;
+ int height = bot - top;
+ if (height > 0) {
+ if (L & 0xFF) {
+ blitter->blitV(L >> 8, top, height, L & 0xFF);
+ }
+ if (R & 0xFF) {
+ blitter->blitV(R >> 8, top, height, ~R & 0xFF);
+ }
+ }
+
+ if (B & 0xFF) {
+ inner_scanline(L, bot, R, ~B & 0xFF, blitter);
+ }
+}
+
+static inline void align_thin_stroke(FDot8& edge1, FDot8& edge2) {
+ SkASSERT(edge1 <= edge2);
+
+ if (FDot8Floor(edge1) == FDot8Floor(edge2)) {
+ edge2 -= (edge1 & 0xFF);
+ edge1 &= ~0xFF;
+ }
+}
+
+void SkScan::AntiFrameRect(const SkRect& r, const SkPoint& strokeSize,
+ const SkRegion* clip, SkBlitter* blitter) {
+ SkASSERT(strokeSize.fX >= 0 && strokeSize.fY >= 0);
+
+ SkScalar rx = SkScalarHalf(strokeSize.fX);
+ SkScalar ry = SkScalarHalf(strokeSize.fY);
+
+ // outset by the radius
+ FDot8 outerL = SkScalarToFDot8(r.fLeft - rx);
+ FDot8 outerT = SkScalarToFDot8(r.fTop - ry);
+ FDot8 outerR = SkScalarToFDot8(r.fRight + rx);
+ FDot8 outerB = SkScalarToFDot8(r.fBottom + ry);
+
+ SkIRect outer;
+ // set outer to the outer rect of the outer section
+ outer.setLTRB(FDot8Floor(outerL), FDot8Floor(outerT), FDot8Ceil(outerR), FDot8Ceil(outerB));
+
+ SkBlitterClipper clipper;
+ if (clip) {
+ if (clip->quickReject(outer)) {
+ return;
+ }
+ if (!clip->contains(outer)) {
+ blitter = clipper.apply(blitter, clip, &outer);
+ }
+ // now we can ignore clip for the rest of the function
+ }
+
+ // in case we lost a bit with diameter/2
+ rx = strokeSize.fX - rx;
+ ry = strokeSize.fY - ry;
+
+ // inset by the radius
+ FDot8 innerL = SkScalarToFDot8(r.fLeft + rx);
+ FDot8 innerT = SkScalarToFDot8(r.fTop + ry);
+ FDot8 innerR = SkScalarToFDot8(r.fRight - rx);
+ FDot8 innerB = SkScalarToFDot8(r.fBottom - ry);
+
+ // For sub-unit strokes, tweak the hulls such that one of the edges coincides with the pixel
+ // edge. This ensures that the general rect stroking logic below
+ // a) doesn't blit the same scanline twice
+ // b) computes the correct coverage when both edges fall within the same pixel
+ if (strokeSize.fX < 1 || strokeSize.fY < 1) {
+ align_thin_stroke(outerL, innerL);
+ align_thin_stroke(outerT, innerT);
+ align_thin_stroke(innerR, outerR);
+ align_thin_stroke(innerB, outerB);
+ }
+
+ // stroke the outer hull
+ antifilldot8(outerL, outerT, outerR, outerB, blitter, false);
+
+ // set outer to the outer rect of the middle section
+ outer.setLTRB(FDot8Ceil(outerL), FDot8Ceil(outerT), FDot8Floor(outerR), FDot8Floor(outerB));
+
+ if (innerL >= innerR || innerT >= innerB) {
+ fillcheckrect(outer.fLeft, outer.fTop, outer.fRight, outer.fBottom,
+ blitter);
+ } else {
+ SkIRect inner;
+ // set inner to the inner rect of the middle section
+ inner.setLTRB(FDot8Floor(innerL), FDot8Floor(innerT), FDot8Ceil(innerR), FDot8Ceil(innerB));
+
+ // draw the frame in 4 pieces
+ fillcheckrect(outer.fLeft, outer.fTop, outer.fRight, inner.fTop,
+ blitter);
+ fillcheckrect(outer.fLeft, inner.fTop, inner.fLeft, inner.fBottom,
+ blitter);
+ fillcheckrect(inner.fRight, inner.fTop, outer.fRight, inner.fBottom,
+ blitter);
+ fillcheckrect(outer.fLeft, inner.fBottom, outer.fRight, outer.fBottom,
+ blitter);
+
+ // now stroke the inner rect, which is similar to antifilldot8() except that
+ // it treats the fractional coordinates with the inverse bias (since its
+ // inner).
+ innerstrokedot8(innerL, innerT, innerR, innerB, blitter);
+ }
+}
+
+void SkScan::AntiFrameRect(const SkRect& r, const SkPoint& strokeSize,
+ const SkRasterClip& clip, SkBlitter* blitter) {
+ if (clip.isBW()) {
+ AntiFrameRect(r, strokeSize, &clip.bwRgn(), blitter);
+ } else {
+ SkAAClipBlitterWrapper wrap(clip, blitter);
+ AntiFrameRect(r, strokeSize, &wrap.getRgn(), wrap.getBlitter());
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkScan_Hairline.cpp b/gfx/skia/skia/src/core/SkScan_Hairline.cpp
new file mode 100644
index 0000000000..9aee071c6e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScan_Hairline.cpp
@@ -0,0 +1,743 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPaint.h"
+#include "src/base/SkMathPriv.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkFDot6.h"
+#include "src/core/SkLineClipper.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkScan.h"
+
+#include <utility>
+
+static void horiline(int x, int stopx, SkFixed fy, SkFixed dy,
+ SkBlitter* blitter) {
+ SkASSERT(x < stopx);
+
+ do {
+ blitter->blitH(x, fy >> 16, 1);
+ fy += dy;
+ } while (++x < stopx);
+}
+
+static void vertline(int y, int stopy, SkFixed fx, SkFixed dx,
+ SkBlitter* blitter) {
+ SkASSERT(y < stopy);
+
+ do {
+ blitter->blitH(fx >> 16, y, 1);
+ fx += dx;
+ } while (++y < stopy);
+}
+
+#ifdef SK_DEBUG
+static bool canConvertFDot6ToFixed(SkFDot6 x) {
+ const int maxDot6 = SK_MaxS32 >> (16 - 6);
+ return SkAbs32(x) <= maxDot6;
+}
+#endif
+
+void SkScan::HairLineRgn(const SkPoint array[], int arrayCount, const SkRegion* clip,
+ SkBlitter* origBlitter) {
+ SkBlitterClipper clipper;
+ SkIRect clipR, ptsR;
+
+ const SkScalar max = SkIntToScalar(32767);
+ const SkRect fixedBounds = SkRect::MakeLTRB(-max, -max, max, max);
+
+ SkRect clipBounds;
+ if (clip) {
+ clipBounds.set(clip->getBounds());
+ }
+
+ for (int i = 0; i < arrayCount - 1; ++i) {
+ SkBlitter* blitter = origBlitter;
+
+ SkPoint pts[2];
+
+ // We have to pre-clip the line to fit in a SkFixed, so we just chop
+ // the line. TODO find a way to actually draw beyond that range.
+ if (!SkLineClipper::IntersectLine(&array[i], fixedBounds, pts)) {
+ continue;
+ }
+
+ // Perform a clip in scalar space, so we catch huge values which might
+ // be missed after we convert to SkFDot6 (overflow)
+ if (clip && !SkLineClipper::IntersectLine(pts, clipBounds, pts)) {
+ continue;
+ }
+
+ SkFDot6 x0 = SkScalarToFDot6(pts[0].fX);
+ SkFDot6 y0 = SkScalarToFDot6(pts[0].fY);
+ SkFDot6 x1 = SkScalarToFDot6(pts[1].fX);
+ SkFDot6 y1 = SkScalarToFDot6(pts[1].fY);
+
+ SkASSERT(canConvertFDot6ToFixed(x0));
+ SkASSERT(canConvertFDot6ToFixed(y0));
+ SkASSERT(canConvertFDot6ToFixed(x1));
+ SkASSERT(canConvertFDot6ToFixed(y1));
+
+ if (clip) {
+ // now perform clipping again, as the rounding to dot6 can wiggle us
+ // our rects are really dot6 rects, but since we've already used
+ // lineclipper, we know they will fit in 32bits (26.6)
+ const SkIRect& bounds = clip->getBounds();
+
+ clipR.setLTRB(SkIntToFDot6(bounds.fLeft), SkIntToFDot6(bounds.fTop),
+ SkIntToFDot6(bounds.fRight), SkIntToFDot6(bounds.fBottom));
+ ptsR.setLTRB(x0, y0, x1, y1);
+ ptsR.sort();
+
+ // outset the right and bottom, to account for how hairlines are
+ // actually drawn, which may hit the pixel to the right or below of
+ // the coordinate
+ ptsR.fRight += SK_FDot6One;
+ ptsR.fBottom += SK_FDot6One;
+
+ if (!SkIRect::Intersects(ptsR, clipR)) {
+ continue;
+ }
+ if (!clip->isRect() || !clipR.contains(ptsR)) {
+ blitter = clipper.apply(origBlitter, clip);
+ }
+ }
+
+ SkFDot6 dx = x1 - x0;
+ SkFDot6 dy = y1 - y0;
+
+ if (SkAbs32(dx) > SkAbs32(dy)) { // mostly horizontal
+ if (x0 > x1) { // we want to go left-to-right
+ using std::swap;
+ swap(x0, x1);
+ swap(y0, y1);
+ }
+ int ix0 = SkFDot6Round(x0);
+ int ix1 = SkFDot6Round(x1);
+ if (ix0 == ix1) {// too short to draw
+ continue;
+ }
+#if defined(SK_BUILD_FOR_FUZZER)
+ if ((ix1 - ix0) > 100000 || (ix1 - ix0) < 0) {
+ continue; // too big to draw
+ }
+#endif
+ SkFixed slope = SkFixedDiv(dy, dx);
+ SkFixed startY = SkFDot6ToFixed(y0) + (slope * ((32 - x0) & 63) >> 6);
+
+ horiline(ix0, ix1, startY, slope, blitter);
+ } else { // mostly vertical
+ if (y0 > y1) { // we want to go top-to-bottom
+ using std::swap;
+ swap(x0, x1);
+ swap(y0, y1);
+ }
+ int iy0 = SkFDot6Round(y0);
+ int iy1 = SkFDot6Round(y1);
+ if (iy0 == iy1) { // too short to draw
+ continue;
+ }
+#if defined(SK_BUILD_FOR_FUZZER)
+ if ((iy1 - iy0) > 100000 || (iy1 - iy0) < 0) {
+ continue; // too big to draw
+ }
+#endif
+ SkFixed slope = SkFixedDiv(dx, dy);
+ SkFixed startX = SkFDot6ToFixed(x0) + (slope * ((32 - y0) & 63) >> 6);
+
+ vertline(iy0, iy1, startX, slope, blitter);
+ }
+ }
+}
+
+// we don't just draw 4 lines, 'cause that can leave a gap in the bottom-right
+// and double-hit the top-left.
+void SkScan::HairRect(const SkRect& rect, const SkRasterClip& clip, SkBlitter* blitter) {
+ SkAAClipBlitterWrapper wrapper;
+ SkBlitterClipper clipper;
+ // Create the enclosing bounds of the hairrect. i.e. we will stroke the interior of r.
+ SkIRect r = SkIRect::MakeLTRB(SkScalarFloorToInt(rect.fLeft),
+ SkScalarFloorToInt(rect.fTop),
+ SkScalarFloorToInt(rect.fRight + 1),
+ SkScalarFloorToInt(rect.fBottom + 1));
+
+ // Note: r might be crazy big, if rect was huge, possibly getting pinned to max/min s32.
+ // We need to trim it back to something reasonable before we can query its width etc.
+ // since r.fRight - r.fLeft might wrap around to negative even if fRight > fLeft.
+ //
+ // We outset the clip bounds by 1 before intersecting, since r is being stroked and not filled
+ // so we don't want to pin an edge of it to the clip. The intersect's job is mostly to just
+ // get the actual edge values into a reasonable range (e.g. so width() can't overflow).
+ if (!r.intersect(clip.getBounds().makeOutset(1, 1))) {
+ return;
+ }
+
+ if (clip.quickReject(r)) {
+ return;
+ }
+ if (!clip.quickContains(r)) {
+ const SkRegion* clipRgn;
+ if (clip.isBW()) {
+ clipRgn = &clip.bwRgn();
+ } else {
+ wrapper.init(clip, blitter);
+ clipRgn = &wrapper.getRgn();
+ blitter = wrapper.getBlitter();
+ }
+ blitter = clipper.apply(blitter, clipRgn);
+ }
+
+ int width = r.width();
+ int height = r.height();
+
+ if ((width | height) == 0) {
+ return;
+ }
+ if (width <= 2 || height <= 2) {
+ blitter->blitRect(r.fLeft, r.fTop, width, height);
+ return;
+ }
+ // if we get here, we know we have 4 segments to draw
+ blitter->blitH(r.fLeft, r.fTop, width); // top
+ blitter->blitRect(r.fLeft, r.fTop + 1, 1, height - 2); // left
+ blitter->blitRect(r.fRight - 1, r.fTop + 1, 1, height - 2); // right
+ blitter->blitH(r.fLeft, r.fBottom - 1, width); // bottom
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "include/core/SkPath.h"
+#include "src/base/SkVx.h"
+#include "src/core/SkGeometry.h"
+
+#define kMaxCubicSubdivideLevel 9
+#define kMaxQuadSubdivideLevel 5
+
+using float2 = skvx::float2;
+
+static uint32_t compute_int_quad_dist(const SkPoint pts[3]) {
+ // compute the vector between the control point ([1]) and the middle of the
+ // line connecting the start and end ([0] and [2])
+ SkScalar dx = SkScalarHalf(pts[0].fX + pts[2].fX) - pts[1].fX;
+ SkScalar dy = SkScalarHalf(pts[0].fY + pts[2].fY) - pts[1].fY;
+ // we want everyone to be positive
+ dx = SkScalarAbs(dx);
+ dy = SkScalarAbs(dy);
+ // convert to whole pixel values (use ceiling to be conservative).
+ // assign to unsigned so we can safely add 1/2 of the smaller and still fit in
+ // uint32_t, since SkScalarCeilToInt() returns 31 bits at most.
+ uint32_t idx = SkScalarCeilToInt(dx);
+ uint32_t idy = SkScalarCeilToInt(dy);
+ // use the cheap approx for distance
+ if (idx > idy) {
+ return idx + (idy >> 1);
+ } else {
+ return idy + (idx >> 1);
+ }
+}
+
+static void hair_quad(const SkPoint pts[3], const SkRegion* clip,
+ SkBlitter* blitter, int level, SkScan::HairRgnProc lineproc) {
+ SkASSERT(level <= kMaxQuadSubdivideLevel);
+
+ SkQuadCoeff coeff(pts);
+
+ const int lines = 1 << level;
+ float2 t(0);
+ float2 dt(SK_Scalar1 / lines);
+
+ SkPoint tmp[(1 << kMaxQuadSubdivideLevel) + 1];
+ SkASSERT((unsigned)lines < std::size(tmp));
+
+ tmp[0] = pts[0];
+ float2 A = coeff.fA;
+ float2 B = coeff.fB;
+ float2 C = coeff.fC;
+ for (int i = 1; i < lines; ++i) {
+ t = t + dt;
+ ((A * t + B) * t + C).store(&tmp[i]);
+ }
+ tmp[lines] = pts[2];
+ lineproc(tmp, lines + 1, clip, blitter);
+}
+
+static SkRect compute_nocheck_quad_bounds(const SkPoint pts[3]) {
+ SkASSERT(SkScalarsAreFinite(&pts[0].fX, 6));
+
+ float2 min = float2::Load(pts);
+ float2 max = min;
+ for (int i = 1; i < 3; ++i) {
+ float2 pair = float2::Load(pts+i);
+ min = skvx::min(min, pair);
+ max = skvx::max(max, pair);
+ }
+ return { min[0], min[1], max[0], max[1] };
+}
+
+static bool is_inverted(const SkRect& r) {
+ return r.fLeft > r.fRight || r.fTop > r.fBottom;
+}
+
+// Can't call SkRect::intersects, since it cares about empty, and we don't (since we tracking
+// something to be stroked, so empty can still draw something (e.g. horizontal line)
+static bool geometric_overlap(const SkRect& a, const SkRect& b) {
+ SkASSERT(!is_inverted(a) && !is_inverted(b));
+ return a.fLeft < b.fRight && b.fLeft < a.fRight &&
+ a.fTop < b.fBottom && b.fTop < a.fBottom;
+}
+
+// Can't call SkRect::contains, since it cares about empty, and we don't (since we tracking
+// something to be stroked, so empty can still draw something (e.g. horizontal line)
+static bool geometric_contains(const SkRect& outer, const SkRect& inner) {
+ SkASSERT(!is_inverted(outer) && !is_inverted(inner));
+ return inner.fRight <= outer.fRight && inner.fLeft >= outer.fLeft &&
+ inner.fBottom <= outer.fBottom && inner.fTop >= outer.fTop;
+}
+
+static inline void hairquad(const SkPoint pts[3], const SkRegion* clip, const SkRect* insetClip, const SkRect* outsetClip,
+ SkBlitter* blitter, int level, SkScan::HairRgnProc lineproc) {
+ if (insetClip) {
+ SkASSERT(outsetClip);
+ SkRect bounds = compute_nocheck_quad_bounds(pts);
+ if (!geometric_overlap(*outsetClip, bounds)) {
+ return;
+ } else if (geometric_contains(*insetClip, bounds)) {
+ clip = nullptr;
+ }
+ }
+
+ hair_quad(pts, clip, blitter, level, lineproc);
+}
+
+static inline SkScalar max_component(const float2& value) {
+ SkScalar components[2];
+ value.store(components);
+ return std::max(components[0], components[1]);
+}
+
+static inline int compute_cubic_segs(const SkPoint pts[4]) {
+ float2 p0 = from_point(pts[0]);
+ float2 p1 = from_point(pts[1]);
+ float2 p2 = from_point(pts[2]);
+ float2 p3 = from_point(pts[3]);
+
+ const float2 oneThird(1.0f / 3.0f);
+ const float2 twoThird(2.0f / 3.0f);
+
+ float2 p13 = oneThird * p3 + twoThird * p0;
+ float2 p23 = oneThird * p0 + twoThird * p3;
+
+ SkScalar diff = max_component(max(abs(p1 - p13), abs(p2 - p23)));
+ SkScalar tol = SK_Scalar1 / 8;
+
+ for (int i = 0; i < kMaxCubicSubdivideLevel; ++i) {
+ if (diff < tol) {
+ return 1 << i;
+ }
+ tol *= 4;
+ }
+ return 1 << kMaxCubicSubdivideLevel;
+}
+
+static bool lt_90(SkPoint p0, SkPoint pivot, SkPoint p2) {
+ return SkVector::DotProduct(p0 - pivot, p2 - pivot) >= 0;
+}
+
+// The off-curve points are "inside" the limits of the on-curve pts
+static bool quick_cubic_niceness_check(const SkPoint pts[4]) {
+ return lt_90(pts[1], pts[0], pts[3]) &&
+ lt_90(pts[2], pts[0], pts[3]) &&
+ lt_90(pts[1], pts[3], pts[0]) &&
+ lt_90(pts[2], pts[3], pts[0]);
+}
+
+using mask2 = skvx::Vec<2, uint32_t>;
+
+static inline mask2 float2_is_finite(const float2& x) {
+ const mask2 exp_mask = mask2(0xFF << 23);
+ return (skvx::bit_pun<mask2>(x) & exp_mask) != exp_mask;
+}
+
+static void hair_cubic(const SkPoint pts[4], const SkRegion* clip, SkBlitter* blitter,
+ SkScan::HairRgnProc lineproc) {
+ const int lines = compute_cubic_segs(pts);
+ SkASSERT(lines > 0);
+ if (1 == lines) {
+ SkPoint tmp[2] = { pts[0], pts[3] };
+ lineproc(tmp, 2, clip, blitter);
+ return;
+ }
+
+ SkCubicCoeff coeff(pts);
+
+ const float2 dt(SK_Scalar1 / lines);
+ float2 t(0);
+
+ SkPoint tmp[(1 << kMaxCubicSubdivideLevel) + 1];
+ SkASSERT((unsigned)lines < std::size(tmp));
+
+ tmp[0] = pts[0];
+ float2 A = coeff.fA;
+ float2 B = coeff.fB;
+ float2 C = coeff.fC;
+ float2 D = coeff.fD;
+ mask2 is_finite(~0); // start out as true
+ for (int i = 1; i < lines; ++i) {
+ t = t + dt;
+ float2 p = ((A * t + B) * t + C) * t + D;
+ is_finite &= float2_is_finite(p);
+ p.store(&tmp[i]);
+ }
+ if (all(is_finite)) {
+ tmp[lines] = pts[3];
+ lineproc(tmp, lines + 1, clip, blitter);
+ } // else some point(s) are non-finite, so don't draw
+}
+
+static SkRect compute_nocheck_cubic_bounds(const SkPoint pts[4]) {
+ SkASSERT(SkScalarsAreFinite(&pts[0].fX, 8));
+
+ float2 min = float2::Load(pts);
+ float2 max = min;
+ for (int i = 1; i < 4; ++i) {
+ float2 pair = float2::Load(pts+i);
+ min = skvx::min(min, pair);
+ max = skvx::max(max, pair);
+ }
+ return { min[0], min[1], max[0], max[1] };
+}
+
+static inline void haircubic(const SkPoint pts[4], const SkRegion* clip, const SkRect* insetClip, const SkRect* outsetClip,
+ SkBlitter* blitter, int level, SkScan::HairRgnProc lineproc) {
+ if (insetClip) {
+ SkASSERT(outsetClip);
+ SkRect bounds = compute_nocheck_cubic_bounds(pts);
+ if (!geometric_overlap(*outsetClip, bounds)) {
+ return;
+ } else if (geometric_contains(*insetClip, bounds)) {
+ clip = nullptr;
+ }
+ }
+
+ if (quick_cubic_niceness_check(pts)) {
+ hair_cubic(pts, clip, blitter, lineproc);
+ } else {
+ SkPoint tmp[13];
+ SkScalar tValues[3];
+
+ int count = SkChopCubicAtMaxCurvature(pts, tmp, tValues);
+ for (int i = 0; i < count; i++) {
+ hair_cubic(&tmp[i * 3], clip, blitter, lineproc);
+ }
+ }
+}
+
+static int compute_quad_level(const SkPoint pts[3]) {
+ uint32_t d = compute_int_quad_dist(pts);
+ /* quadratics approach the line connecting their start and end points
+ 4x closer with each subdivision, so we compute the number of
+ subdivisions to be the minimum need to get that distance to be less
+ than a pixel.
+ */
+ int level = (33 - SkCLZ(d)) >> 1;
+ // safety check on level (from the previous version)
+ if (level > kMaxQuadSubdivideLevel) {
+ level = kMaxQuadSubdivideLevel;
+ }
+ return level;
+}
+
+/* Extend the points in the direction of the starting or ending tangent by 1/2 unit to
+ account for a round or square cap. If there's no distance between the end point and
+ the control point, use the next control point to create a tangent. If the curve
+ is degenerate, move the cap out 1/2 unit horizontally. */
+template <SkPaint::Cap capStyle>
+void extend_pts(SkPath::Verb prevVerb, SkPath::Verb nextVerb, SkPoint* pts, int ptCount) {
+ SkASSERT(SkPaint::kSquare_Cap == capStyle || SkPaint::kRound_Cap == capStyle);
+ // The area of a circle is PI*R*R. For a unit circle, R=1/2, and the cap covers half of that.
+ const SkScalar capOutset = SkPaint::kSquare_Cap == capStyle ? 0.5f : SK_ScalarPI / 8;
+ if (SkPath::kMove_Verb == prevVerb) {
+ SkPoint* first = pts;
+ SkPoint* ctrl = first;
+ int controls = ptCount - 1;
+ SkVector tangent;
+ do {
+ tangent = *first - *++ctrl;
+ } while (tangent.isZero() && --controls > 0);
+ if (tangent.isZero()) {
+ tangent.set(1, 0);
+ controls = ptCount - 1; // If all points are equal, move all but one
+ } else {
+ tangent.normalize();
+ }
+ do { // If the end point and control points are equal, loop to move them in tandem.
+ first->fX += tangent.fX * capOutset;
+ first->fY += tangent.fY * capOutset;
+ ++first;
+ } while (++controls < ptCount);
+ }
+ if (SkPath::kMove_Verb == nextVerb || SkPath::kDone_Verb == nextVerb
+ || SkPath::kClose_Verb == nextVerb) {
+ SkPoint* last = &pts[ptCount - 1];
+ SkPoint* ctrl = last;
+ int controls = ptCount - 1;
+ SkVector tangent;
+ do {
+ tangent = *last - *--ctrl;
+ } while (tangent.isZero() && --controls > 0);
+ if (tangent.isZero()) {
+ tangent.set(-1, 0);
+ controls = ptCount - 1;
+ } else {
+ tangent.normalize();
+ }
+ do {
+ last->fX += tangent.fX * capOutset;
+ last->fY += tangent.fY * capOutset;
+ --last;
+ } while (++controls < ptCount);
+ }
+}
+
+template <SkPaint::Cap capStyle>
+void hair_path(const SkPath& path, const SkRasterClip& rclip, SkBlitter* blitter,
+ SkScan::HairRgnProc lineproc) {
+ if (path.isEmpty()) {
+ return;
+ }
+
+ SkAAClipBlitterWrapper wrap;
+ const SkRegion* clip = nullptr;
+ SkRect insetStorage, outsetStorage;
+ const SkRect* insetClip = nullptr;
+ const SkRect* outsetClip = nullptr;
+
+ {
+ const int capOut = SkPaint::kButt_Cap == capStyle ? 1 : 2;
+ const SkIRect ibounds = path.getBounds().roundOut().makeOutset(capOut, capOut);
+ if (rclip.quickReject(ibounds)) {
+ return;
+ }
+ if (!rclip.quickContains(ibounds)) {
+ if (rclip.isBW()) {
+ clip = &rclip.bwRgn();
+ } else {
+ wrap.init(rclip, blitter);
+ blitter = wrap.getBlitter();
+ clip = &wrap.getRgn();
+ }
+
+ /*
+ * We now cache two scalar rects, to use for culling per-segment (e.g. cubic).
+ * Since we're hairlining, the "bounds" of the control points isn't necessairly the
+ * limit of where a segment can draw (it might draw up to 1 pixel beyond in aa-hairs).
+ *
+ * Compute the pt-bounds per segment is easy, so we do that, and then inversely adjust
+ * the culling bounds so we can just do a straight compare per segment.
+ *
+ * insetClip is use for quick-accept (i.e. the segment is not clipped), so we inset
+ * it from the clip-bounds (since segment bounds can be off by 1).
+ *
+ * outsetClip is used for quick-reject (i.e. the segment is entirely outside), so we
+ * outset it from the clip-bounds.
+ */
+ insetStorage.set(clip->getBounds());
+ outsetStorage = insetStorage.makeOutset(1, 1);
+ insetStorage.inset(1, 1);
+ if (is_inverted(insetStorage)) {
+ /*
+ * our bounds checks assume the rects are never inverted. If insetting has
+ * created that, we assume that the area is too small to safely perform a
+ * quick-accept, so we just mark the rect as empty (so the quick-accept check
+ * will always fail.
+ */
+ insetStorage.setEmpty(); // just so we don't pass an inverted rect
+ }
+ if (rclip.isRect()) {
+ insetClip = &insetStorage;
+ }
+ outsetClip = &outsetStorage;
+ }
+ }
+
+ SkPathPriv::RangeIter iter = SkPathPriv::Iterate(path).begin();
+ SkPathPriv::RangeIter end = SkPathPriv::Iterate(path).end();
+ SkPoint pts[4], firstPt, lastPt;
+ SkPath::Verb prevVerb;
+ SkAutoConicToQuads converter;
+
+ if (SkPaint::kButt_Cap != capStyle) {
+ prevVerb = SkPath::kDone_Verb;
+ }
+ while (iter != end) {
+ auto [pathVerb, pathPts, w] = *iter++;
+ SkPath::Verb verb = (SkPath::Verb)pathVerb;
+ SkPath::Verb nextVerb = (iter != end) ? (SkPath::Verb)iter.peekVerb() : SkPath::kDone_Verb;
+ memcpy(pts, pathPts, SkPathPriv::PtsInIter(verb) * sizeof(SkPoint));
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ firstPt = lastPt = pts[0];
+ break;
+ case SkPath::kLine_Verb:
+ if (SkPaint::kButt_Cap != capStyle) {
+ extend_pts<capStyle>(prevVerb, nextVerb, pts, 2);
+ }
+ lineproc(pts, 2, clip, blitter);
+ lastPt = pts[1];
+ break;
+ case SkPath::kQuad_Verb:
+ if (SkPaint::kButt_Cap != capStyle) {
+ extend_pts<capStyle>(prevVerb, nextVerb, pts, 3);
+ }
+ hairquad(pts, clip, insetClip, outsetClip, blitter, compute_quad_level(pts), lineproc);
+ lastPt = pts[2];
+ break;
+ case SkPath::kConic_Verb: {
+ if (SkPaint::kButt_Cap != capStyle) {
+ extend_pts<capStyle>(prevVerb, nextVerb, pts, 3);
+ }
+ // how close should the quads be to the original conic?
+ const SkScalar tol = SK_Scalar1 / 4;
+ const SkPoint* quadPts = converter.computeQuads(pts, *w, tol);
+ for (int i = 0; i < converter.countQuads(); ++i) {
+ int level = compute_quad_level(quadPts);
+ hairquad(quadPts, clip, insetClip, outsetClip, blitter, level, lineproc);
+ quadPts += 2;
+ }
+ lastPt = pts[2];
+ break;
+ }
+ case SkPath::kCubic_Verb: {
+ if (SkPaint::kButt_Cap != capStyle) {
+ extend_pts<capStyle>(prevVerb, nextVerb, pts, 4);
+ }
+ haircubic(pts, clip, insetClip, outsetClip, blitter, kMaxCubicSubdivideLevel, lineproc);
+ lastPt = pts[3];
+ } break;
+ case SkPath::kClose_Verb:
+ pts[0] = lastPt;
+ pts[1] = firstPt;
+ if (SkPaint::kButt_Cap != capStyle && prevVerb == SkPath::kMove_Verb) {
+ // cap moveTo/close to match svg expectations for degenerate segments
+ extend_pts<capStyle>(prevVerb, nextVerb, pts, 2);
+ }
+ lineproc(pts, 2, clip, blitter);
+ break;
+ case SkPath::kDone_Verb:
+ break;
+ }
+ if (SkPaint::kButt_Cap != capStyle) {
+ if (prevVerb == SkPath::kMove_Verb &&
+ verb >= SkPath::kLine_Verb && verb <= SkPath::kCubic_Verb) {
+ firstPt = pts[0]; // the curve moved the initial point, so close to it instead
+ }
+ prevVerb = verb;
+ }
+ }
+}
+
+void SkScan::HairPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
+ hair_path<SkPaint::kButt_Cap>(path, clip, blitter, SkScan::HairLineRgn);
+}
+
+void SkScan::AntiHairPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
+ hair_path<SkPaint::kButt_Cap>(path, clip, blitter, SkScan::AntiHairLineRgn);
+}
+
+void SkScan::HairSquarePath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
+ hair_path<SkPaint::kSquare_Cap>(path, clip, blitter, SkScan::HairLineRgn);
+}
+
+void SkScan::AntiHairSquarePath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
+ hair_path<SkPaint::kSquare_Cap>(path, clip, blitter, SkScan::AntiHairLineRgn);
+}
+
+void SkScan::HairRoundPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
+ hair_path<SkPaint::kRound_Cap>(path, clip, blitter, SkScan::HairLineRgn);
+}
+
+void SkScan::AntiHairRoundPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
+ hair_path<SkPaint::kRound_Cap>(path, clip, blitter, SkScan::AntiHairLineRgn);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkScan::FrameRect(const SkRect& r, const SkPoint& strokeSize,
+ const SkRasterClip& clip, SkBlitter* blitter) {
+ SkASSERT(strokeSize.fX >= 0 && strokeSize.fY >= 0);
+
+ if (strokeSize.fX < 0 || strokeSize.fY < 0) {
+ return;
+ }
+
+ const SkScalar dx = strokeSize.fX;
+ const SkScalar dy = strokeSize.fY;
+ SkScalar rx = SkScalarHalf(dx);
+ SkScalar ry = SkScalarHalf(dy);
+ SkRect outer, tmp;
+
+ outer.setLTRB(r.fLeft - rx, r.fTop - ry, r.fRight + rx, r.fBottom + ry);
+
+ if (r.width() <= dx || r.height() <= dy) {
+ SkScan::FillRect(outer, clip, blitter);
+ return;
+ }
+
+ tmp.setLTRB(outer.fLeft, outer.fTop, outer.fRight, outer.fTop + dy);
+ SkScan::FillRect(tmp, clip, blitter);
+ tmp.fTop = outer.fBottom - dy;
+ tmp.fBottom = outer.fBottom;
+ SkScan::FillRect(tmp, clip, blitter);
+
+ tmp.setLTRB(outer.fLeft, outer.fTop + dy, outer.fLeft + dx, outer.fBottom - dy);
+ SkScan::FillRect(tmp, clip, blitter);
+ tmp.fLeft = outer.fRight - dx;
+ tmp.fRight = outer.fRight;
+ SkScan::FillRect(tmp, clip, blitter);
+}
+
+void SkScan::HairLine(const SkPoint pts[], int count, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isBW()) {
+ HairLineRgn(pts, count, &clip.bwRgn(), blitter);
+ } else {
+ const SkRegion* clipRgn = nullptr;
+
+ SkRect r;
+ r.setBounds(pts, count);
+ r.outset(SK_ScalarHalf, SK_ScalarHalf);
+
+ SkAAClipBlitterWrapper wrap;
+ if (!clip.quickContains(r.roundOut())) {
+ wrap.init(clip, blitter);
+ blitter = wrap.getBlitter();
+ clipRgn = &wrap.getRgn();
+ }
+ HairLineRgn(pts, count, clipRgn, blitter);
+ }
+}
+
+void SkScan::AntiHairLine(const SkPoint pts[], int count, const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isBW()) {
+ AntiHairLineRgn(pts, count, &clip.bwRgn(), blitter);
+ } else {
+ const SkRegion* clipRgn = nullptr;
+
+ SkRect r;
+ r.setBounds(pts, count);
+
+ SkAAClipBlitterWrapper wrap;
+ if (!clip.quickContains(r.roundOut().makeOutset(1, 1))) {
+ wrap.init(clip, blitter);
+ blitter = wrap.getBlitter();
+ clipRgn = &wrap.getRgn();
+ }
+ AntiHairLineRgn(pts, count, clipRgn, blitter);
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkScan_Path.cpp b/gfx/skia/skia/src/core/SkScan_Path.cpp
new file mode 100644
index 0000000000..259e554a82
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScan_Path.cpp
@@ -0,0 +1,784 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPath.h"
+#include "include/core/SkRegion.h"
+#include "include/private/base/SkMacros.h"
+#include "include/private/base/SkSafe32.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/base/SkTSort.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkEdge.h"
+#include "src/core/SkEdgeBuilder.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkQuadClipper.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkRectPriv.h"
+#include "src/core/SkScanPriv.h"
+
+#include <utility>
+
+#define kEDGE_HEAD_Y SK_MinS32
+#define kEDGE_TAIL_Y SK_MaxS32
+
+#ifdef SK_DEBUG
+ static void validate_sort(const SkEdge* edge) {
+ int y = kEDGE_HEAD_Y;
+
+ while (edge->fFirstY != SK_MaxS32) {
+ edge->validate();
+ SkASSERT(y <= edge->fFirstY);
+
+ y = edge->fFirstY;
+ edge = edge->fNext;
+ }
+ }
+#else
+ #define validate_sort(edge)
+#endif
+
+static void insert_new_edges(SkEdge* newEdge, int curr_y) {
+ if (newEdge->fFirstY != curr_y) {
+ return;
+ }
+ SkEdge* prev = newEdge->fPrev;
+ if (prev->fX <= newEdge->fX) {
+ return;
+ }
+ // find first x pos to insert
+ SkEdge* start = backward_insert_start(prev, newEdge->fX);
+ // insert the lot, fixing up the links as we go
+ do {
+ SkEdge* next = newEdge->fNext;
+ do {
+ if (start->fNext == newEdge) {
+ goto nextEdge;
+ }
+ SkEdge* after = start->fNext;
+ if (after->fX >= newEdge->fX) {
+ break;
+ }
+ start = after;
+ } while (true);
+ remove_edge(newEdge);
+ insert_edge_after(newEdge, start);
+nextEdge:
+ start = newEdge;
+ newEdge = next;
+ } while (newEdge->fFirstY == curr_y);
+}
+
+#ifdef SK_DEBUG
+static void validate_edges_for_y(const SkEdge* edge, int curr_y) {
+ while (edge->fFirstY <= curr_y) {
+ SkASSERT(edge->fPrev && edge->fNext);
+ SkASSERT(edge->fPrev->fNext == edge);
+ SkASSERT(edge->fNext->fPrev == edge);
+ SkASSERT(edge->fFirstY <= edge->fLastY);
+
+ SkASSERT(edge->fPrev->fX <= edge->fX);
+ edge = edge->fNext;
+ }
+}
+#else
+ #define validate_edges_for_y(edge, curr_y)
+#endif
+
+#if defined _WIN32 // disable warning : local variable used without having been initialized
+#pragma warning ( push )
+#pragma warning ( disable : 4701 )
+#endif
+
+typedef void (*PrePostProc)(SkBlitter* blitter, int y, bool isStartOfScanline);
+#define PREPOST_START true
+#define PREPOST_END false
+
+static void walk_edges(SkEdge* prevHead, SkPathFillType fillType,
+ SkBlitter* blitter, int start_y, int stop_y,
+ PrePostProc proc, int rightClip) {
+ validate_sort(prevHead->fNext);
+
+ int curr_y = start_y;
+ int windingMask = SkPathFillType_IsEvenOdd(fillType) ? 1 : -1;
+
+ for (;;) {
+ int w = 0;
+ int left SK_INIT_TO_AVOID_WARNING;
+ SkEdge* currE = prevHead->fNext;
+ SkFixed prevX = prevHead->fX;
+
+ validate_edges_for_y(currE, curr_y);
+
+ if (proc) {
+ proc(blitter, curr_y, PREPOST_START); // pre-proc
+ }
+
+ while (currE->fFirstY <= curr_y) {
+ SkASSERT(currE->fLastY >= curr_y);
+
+ int x = SkFixedRoundToInt(currE->fX);
+
+ if ((w & windingMask) == 0) { // we're starting interval
+ left = x;
+ }
+
+ w += currE->fWinding;
+
+ if ((w & windingMask) == 0) { // we finished an interval
+ int width = x - left;
+ SkASSERT(width >= 0);
+ if (width > 0) {
+ blitter->blitH(left, curr_y, width);
+ }
+ }
+
+ SkEdge* next = currE->fNext;
+ SkFixed newX;
+
+ if (currE->fLastY == curr_y) { // are we done with this edge?
+ if (currE->fCurveCount > 0) {
+ if (((SkQuadraticEdge*)currE)->updateQuadratic()) {
+ newX = currE->fX;
+ goto NEXT_X;
+ }
+ } else if (currE->fCurveCount < 0) {
+ if (((SkCubicEdge*)currE)->updateCubic()) {
+ SkASSERT(currE->fFirstY == curr_y + 1);
+
+ newX = currE->fX;
+ goto NEXT_X;
+ }
+ }
+ remove_edge(currE);
+ } else {
+ SkASSERT(currE->fLastY > curr_y);
+ newX = currE->fX + currE->fDX;
+ currE->fX = newX;
+ NEXT_X:
+ if (newX < prevX) { // ripple currE backwards until it is x-sorted
+ backward_insert_edge_based_on_x(currE);
+ } else {
+ prevX = newX;
+ }
+ }
+ currE = next;
+ SkASSERT(currE);
+ }
+
+ if ((w & windingMask) != 0) { // was our right-edge culled away?
+ int width = rightClip - left;
+ if (width > 0) {
+ blitter->blitH(left, curr_y, width);
+ }
+ }
+
+ if (proc) {
+ proc(blitter, curr_y, PREPOST_END); // post-proc
+ }
+
+ curr_y += 1;
+ if (curr_y >= stop_y) {
+ break;
+ }
+ // now currE points to the first edge with a Yint larger than curr_y
+ insert_new_edges(currE, curr_y);
+ }
+}
+
+// return true if we're NOT done with this edge
+static bool update_edge(SkEdge* edge, int last_y) {
+ SkASSERT(edge->fLastY >= last_y);
+ if (last_y == edge->fLastY) {
+ if (edge->fCurveCount < 0) {
+ if (((SkCubicEdge*)edge)->updateCubic()) {
+ SkASSERT(edge->fFirstY == last_y + 1);
+ return true;
+ }
+ } else if (edge->fCurveCount > 0) {
+ if (((SkQuadraticEdge*)edge)->updateQuadratic()) {
+ SkASSERT(edge->fFirstY == last_y + 1);
+ return true;
+ }
+ }
+ return false;
+ }
+ return true;
+}
+
+// Unexpected conditions for which we need to return
+#define ASSERT_RETURN(cond) \
+ do { \
+ if (!(cond)) { \
+ SkDEBUGFAILF("assert(%s)", #cond); \
+ return; \
+ } \
+ } while (0)
+
+// Needs Y to only change once (looser than convex in X)
+static void walk_simple_edges(SkEdge* prevHead, SkBlitter* blitter, int start_y, int stop_y) {
+ validate_sort(prevHead->fNext);
+
+ SkEdge* leftE = prevHead->fNext;
+ SkEdge* riteE = leftE->fNext;
+ SkEdge* currE = riteE->fNext;
+
+ // our edge choppers for curves can result in the initial edges
+ // not lining up, so we take the max.
+ int local_top = std::max(leftE->fFirstY, riteE->fFirstY);
+ ASSERT_RETURN(local_top >= start_y);
+
+ while (local_top < stop_y) {
+ SkASSERT(leftE->fFirstY <= stop_y);
+ SkASSERT(riteE->fFirstY <= stop_y);
+
+ int local_bot = std::min(leftE->fLastY, riteE->fLastY);
+ local_bot = std::min(local_bot, stop_y - 1);
+ ASSERT_RETURN(local_top <= local_bot);
+
+ SkFixed left = leftE->fX;
+ SkFixed dLeft = leftE->fDX;
+ SkFixed rite = riteE->fX;
+ SkFixed dRite = riteE->fDX;
+ int count = local_bot - local_top;
+ ASSERT_RETURN(count >= 0);
+
+ if (0 == (dLeft | dRite)) {
+ int L = SkFixedRoundToInt(left);
+ int R = SkFixedRoundToInt(rite);
+ if (L > R) {
+ std::swap(L, R);
+ }
+ if (L < R) {
+ count += 1;
+ blitter->blitRect(L, local_top, R - L, count);
+ }
+ local_top = local_bot + 1;
+ } else {
+ do {
+ int L = SkFixedRoundToInt(left);
+ int R = SkFixedRoundToInt(rite);
+ if (L > R) {
+ std::swap(L, R);
+ }
+ if (L < R) {
+ blitter->blitH(L, local_top, R - L);
+ }
+ // Either/both of these might overflow, since we perform this step even if
+ // (later) we determine that we are done with the edge, and so the computed
+ // left or rite edge will not be used (see update_edge). Use this helper to
+ // silence UBSAN when we perform the add.
+ left = Sk32_can_overflow_add(left, dLeft);
+ rite = Sk32_can_overflow_add(rite, dRite);
+ local_top += 1;
+ } while (--count >= 0);
+ }
+
+ leftE->fX = left;
+ riteE->fX = rite;
+
+ if (!update_edge(leftE, local_bot)) {
+ if (currE->fFirstY >= stop_y) {
+ return; // we're done
+ }
+ leftE = currE;
+ currE = currE->fNext;
+ ASSERT_RETURN(leftE->fFirstY == local_top);
+ }
+ if (!update_edge(riteE, local_bot)) {
+ if (currE->fFirstY >= stop_y) {
+ return; // we're done
+ }
+ riteE = currE;
+ currE = currE->fNext;
+ ASSERT_RETURN(riteE->fFirstY == local_top);
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// this overrides blitH, and will call its proxy blitter with the inverse
+// of the spans it is given (clipped to the left/right of the cliprect)
+//
+// used to implement inverse filltypes on paths
+//
+class InverseBlitter : public SkBlitter {
+public:
+ void setBlitter(SkBlitter* blitter, const SkIRect& clip, int shift) {
+ fBlitter = blitter;
+ fFirstX = clip.fLeft << shift;
+ fLastX = clip.fRight << shift;
+ }
+ void prepost(int y, bool isStart) {
+ if (isStart) {
+ fPrevX = fFirstX;
+ } else {
+ int invWidth = fLastX - fPrevX;
+ if (invWidth > 0) {
+ fBlitter->blitH(fPrevX, y, invWidth);
+ }
+ }
+ }
+
+ // overrides
+ void blitH(int x, int y, int width) override {
+ int invWidth = x - fPrevX;
+ if (invWidth > 0) {
+ fBlitter->blitH(fPrevX, y, invWidth);
+ }
+ fPrevX = x + width;
+ }
+
+ // we do not expect to get called with these entrypoints
+ void blitAntiH(int, int, const SkAlpha[], const int16_t runs[]) override {
+ SkDEBUGFAIL("blitAntiH unexpected");
+ }
+ void blitV(int x, int y, int height, SkAlpha alpha) override {
+ SkDEBUGFAIL("blitV unexpected");
+ }
+ void blitRect(int x, int y, int width, int height) override {
+ SkDEBUGFAIL("blitRect unexpected");
+ }
+ void blitMask(const SkMask&, const SkIRect& clip) override {
+ SkDEBUGFAIL("blitMask unexpected");
+ }
+ const SkPixmap* justAnOpaqueColor(uint32_t* value) override {
+ SkDEBUGFAIL("justAnOpaqueColor unexpected");
+ return nullptr;
+ }
+
+private:
+ SkBlitter* fBlitter;
+ int fFirstX, fLastX, fPrevX;
+};
+
+static void PrePostInverseBlitterProc(SkBlitter* blitter, int y, bool isStart) {
+ ((InverseBlitter*)blitter)->prepost(y, isStart);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if defined _WIN32
+#pragma warning ( pop )
+#endif
+
+static bool operator<(const SkEdge& a, const SkEdge& b) {
+ int valuea = a.fFirstY;
+ int valueb = b.fFirstY;
+
+ if (valuea == valueb) {
+ valuea = a.fX;
+ valueb = b.fX;
+ }
+
+ return valuea < valueb;
+}
+
+static SkEdge* sort_edges(SkEdge* list[], int count, SkEdge** last) {
+ SkTQSort(list, list + count);
+
+ // now make the edges linked in sorted order
+ for (int i = 1; i < count; i++) {
+ list[i - 1]->fNext = list[i];
+ list[i]->fPrev = list[i - 1];
+ }
+
+ *last = list[count - 1];
+ return list[0];
+}
+
+// clipRect has not been shifted up
+void sk_fill_path(const SkPath& path, const SkIRect& clipRect, SkBlitter* blitter,
+ int start_y, int stop_y, int shiftEdgesUp, bool pathContainedInClip) {
+ SkASSERT(blitter);
+
+ SkIRect shiftedClip = clipRect;
+ shiftedClip.fLeft = SkLeftShift(shiftedClip.fLeft, shiftEdgesUp);
+ shiftedClip.fRight = SkLeftShift(shiftedClip.fRight, shiftEdgesUp);
+ shiftedClip.fTop = SkLeftShift(shiftedClip.fTop, shiftEdgesUp);
+ shiftedClip.fBottom = SkLeftShift(shiftedClip.fBottom, shiftEdgesUp);
+
+ SkBasicEdgeBuilder builder(shiftEdgesUp);
+ int count = builder.buildEdges(path, pathContainedInClip ? nullptr : &shiftedClip);
+ SkEdge** list = builder.edgeList();
+
+ if (0 == count) {
+ if (path.isInverseFillType()) {
+ /*
+ * Since we are in inverse-fill, our caller has already drawn above
+ * our top (start_y) and will draw below our bottom (stop_y). Thus
+ * we need to restrict our drawing to the intersection of the clip
+ * and those two limits.
+ */
+ SkIRect rect = clipRect;
+ if (rect.fTop < start_y) {
+ rect.fTop = start_y;
+ }
+ if (rect.fBottom > stop_y) {
+ rect.fBottom = stop_y;
+ }
+ if (!rect.isEmpty()) {
+ blitter->blitRect(rect.fLeft << shiftEdgesUp,
+ rect.fTop << shiftEdgesUp,
+ rect.width() << shiftEdgesUp,
+ rect.height() << shiftEdgesUp);
+ }
+ }
+ return;
+ }
+
+ SkEdge headEdge, tailEdge, *last;
+ // this returns the first and last edge after they're sorted into a dlink list
+ SkEdge* edge = sort_edges(list, count, &last);
+
+ headEdge.fPrev = nullptr;
+ headEdge.fNext = edge;
+ headEdge.fFirstY = kEDGE_HEAD_Y;
+ headEdge.fX = SK_MinS32;
+ edge->fPrev = &headEdge;
+
+ tailEdge.fPrev = last;
+ tailEdge.fNext = nullptr;
+ tailEdge.fFirstY = kEDGE_TAIL_Y;
+ last->fNext = &tailEdge;
+
+ // now edge is the head of the sorted linklist
+
+ start_y = SkLeftShift(start_y, shiftEdgesUp);
+ stop_y = SkLeftShift(stop_y, shiftEdgesUp);
+ if (!pathContainedInClip && start_y < shiftedClip.fTop) {
+ start_y = shiftedClip.fTop;
+ }
+ if (!pathContainedInClip && stop_y > shiftedClip.fBottom) {
+ stop_y = shiftedClip.fBottom;
+ }
+
+ InverseBlitter ib;
+ PrePostProc proc = nullptr;
+
+ if (path.isInverseFillType()) {
+ ib.setBlitter(blitter, clipRect, shiftEdgesUp);
+ blitter = &ib;
+ proc = PrePostInverseBlitterProc;
+ }
+
+ // count >= 2 is required as the convex walker does not handle missing right edges
+ if (path.isConvex() && (nullptr == proc) && count >= 2) {
+ walk_simple_edges(&headEdge, blitter, start_y, stop_y);
+ } else {
+ walk_edges(&headEdge, path.getFillType(), blitter, start_y, stop_y, proc,
+ shiftedClip.right());
+ }
+}
+
+void sk_blit_above(SkBlitter* blitter, const SkIRect& ir, const SkRegion& clip) {
+ const SkIRect& cr = clip.getBounds();
+ SkIRect tmp;
+
+ tmp.fLeft = cr.fLeft;
+ tmp.fRight = cr.fRight;
+ tmp.fTop = cr.fTop;
+ tmp.fBottom = ir.fTop;
+ if (!tmp.isEmpty()) {
+ blitter->blitRectRegion(tmp, clip);
+ }
+}
+
+void sk_blit_below(SkBlitter* blitter, const SkIRect& ir, const SkRegion& clip) {
+ const SkIRect& cr = clip.getBounds();
+ SkIRect tmp;
+
+ tmp.fLeft = cr.fLeft;
+ tmp.fRight = cr.fRight;
+ tmp.fTop = ir.fBottom;
+ tmp.fBottom = cr.fBottom;
+ if (!tmp.isEmpty()) {
+ blitter->blitRectRegion(tmp, clip);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * If the caller is drawing an inverse-fill path, then it pass true for
+ * skipRejectTest, so we don't abort drawing just because the src bounds (ir)
+ * is outside of the clip.
+ */
+SkScanClipper::SkScanClipper(SkBlitter* blitter, const SkRegion* clip,
+ const SkIRect& ir, bool skipRejectTest, bool irPreClipped) {
+ fBlitter = nullptr; // null means blit nothing
+ fClipRect = nullptr;
+
+ if (clip) {
+ fClipRect = &clip->getBounds();
+ if (!skipRejectTest && !SkIRect::Intersects(*fClipRect, ir)) { // completely clipped out
+ return;
+ }
+
+ if (clip->isRect()) {
+ if (!irPreClipped && fClipRect->contains(ir)) {
+#ifdef SK_DEBUG
+ fRectClipCheckBlitter.init(blitter, *fClipRect);
+ blitter = &fRectClipCheckBlitter;
+#endif
+ fClipRect = nullptr;
+ } else {
+ // only need a wrapper blitter if we're horizontally clipped
+ if (irPreClipped ||
+ fClipRect->fLeft > ir.fLeft || fClipRect->fRight < ir.fRight) {
+ fRectBlitter.init(blitter, *fClipRect);
+ blitter = &fRectBlitter;
+ } else {
+#ifdef SK_DEBUG
+ fRectClipCheckBlitter.init(blitter, *fClipRect);
+ blitter = &fRectClipCheckBlitter;
+#endif
+ }
+ }
+ } else {
+ fRgnBlitter.init(blitter, clip);
+ blitter = &fRgnBlitter;
+ }
+ }
+ fBlitter = blitter;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool clip_to_limit(const SkRegion& orig, SkRegion* reduced) {
+ // need to limit coordinates such that the width/height of our rect can be represented
+ // in SkFixed (16.16). See skbug.com/7998
+ const int32_t limit = 32767 >> 1;
+
+ SkIRect limitR;
+ limitR.setLTRB(-limit, -limit, limit, limit);
+ if (limitR.contains(orig.getBounds())) {
+ return false;
+ }
+ reduced->op(orig, limitR, SkRegion::kIntersect_Op);
+ return true;
+}
+
+// Bias used for conservative rounding of float rects to int rects, to nudge the irects a little
+// larger, so we don't "think" a path's bounds are inside a clip, when (due to numeric drift in
+// the scan-converter) we might walk beyond the predicted limits.
+//
+// This value has been determined trial and error: pick the smallest value (after the 0.5) that
+// fixes any problematic cases (e.g. crbug.com/844457)
+// NOTE: cubics appear to be the main reason for needing this slop. If we could (perhaps) have a
+// more accurate walker for cubics, we may be able to reduce this fudge factor.
+static const double kConservativeRoundBias = 0.5 + 1.5 / SK_FDot6One;
+
+/**
+ * Round the value down. This is used to round the top and left of a rectangle,
+ * and corresponds to the way the scan converter treats the top and left edges.
+ * It has a slight bias to make the "rounded" int smaller than a normal round, to create a more
+ * conservative int-bounds (larger) from a float rect.
+ */
+static inline int round_down_to_int(SkScalar x) {
+ double xx = x;
+ xx -= kConservativeRoundBias;
+ return sk_double_saturate2int(ceil(xx));
+}
+
+/**
+ * Round the value up. This is used to round the right and bottom of a rectangle.
+ * It has a slight bias to make the "rounded" int smaller than a normal round, to create a more
+ * conservative int-bounds (larger) from a float rect.
+ */
+static inline int round_up_to_int(SkScalar x) {
+ double xx = x;
+ xx += kConservativeRoundBias;
+ return sk_double_saturate2int(floor(xx));
+}
+
+/*
+ * Conservative rounding function, which effectively nudges the int-rect to be slightly larger
+ * than SkRect::round() might have produced. This is a safety-net for the scan-converter, which
+ * inspects the returned int-rect, and may disable clipping (for speed) if it thinks all of the
+ * edges will fit inside the clip's bounds. The scan-converter introduces slight numeric errors
+ * due to accumulated += of the slope, so this function is used to return a conservatively large
+ * int-bounds, and thus we will only disable clipping if we're sure the edges will stay in-bounds.
+ */
+static SkIRect conservative_round_to_int(const SkRect& src) {
+ return {
+ round_down_to_int(src.fLeft),
+ round_down_to_int(src.fTop),
+ round_up_to_int(src.fRight),
+ round_up_to_int(src.fBottom),
+ };
+}
+
+void SkScan::FillPath(const SkPath& path, const SkRegion& origClip,
+ SkBlitter* blitter) {
+ if (origClip.isEmpty()) {
+ return;
+ }
+
+ // Our edges are fixed-point, and don't like the bounds of the clip to
+ // exceed that. Here we trim the clip just so we don't overflow later on
+ const SkRegion* clipPtr = &origClip;
+ SkRegion finiteClip;
+ if (clip_to_limit(origClip, &finiteClip)) {
+ if (finiteClip.isEmpty()) {
+ return;
+ }
+ clipPtr = &finiteClip;
+ }
+ // don't reference "origClip" any more, just use clipPtr
+
+
+ SkRect bounds = path.getBounds();
+ bool irPreClipped = false;
+ if (!SkRectPriv::MakeLargeS32().contains(bounds)) {
+ if (!bounds.intersect(SkRectPriv::MakeLargeS32())) {
+ bounds.setEmpty();
+ }
+ irPreClipped = true;
+ }
+
+ SkIRect ir = conservative_round_to_int(bounds);
+ if (ir.isEmpty()) {
+ if (path.isInverseFillType()) {
+ blitter->blitRegion(*clipPtr);
+ }
+ return;
+ }
+
+ SkScanClipper clipper(blitter, clipPtr, ir, path.isInverseFillType(), irPreClipped);
+
+ blitter = clipper.getBlitter();
+ if (blitter) {
+ // we have to keep our calls to blitter in sorted order, so we
+ // must blit the above section first, then the middle, then the bottom.
+ if (path.isInverseFillType()) {
+ sk_blit_above(blitter, ir, *clipPtr);
+ }
+ SkASSERT(clipper.getClipRect() == nullptr ||
+ *clipper.getClipRect() == clipPtr->getBounds());
+ sk_fill_path(path, clipPtr->getBounds(), blitter, ir.fTop, ir.fBottom,
+ 0, clipper.getClipRect() == nullptr);
+ if (path.isInverseFillType()) {
+ sk_blit_below(blitter, ir, *clipPtr);
+ }
+ } else {
+ // what does it mean to not have a blitter if path.isInverseFillType???
+ }
+}
+
+void SkScan::FillPath(const SkPath& path, const SkIRect& ir,
+ SkBlitter* blitter) {
+ SkRegion rgn(ir);
+ FillPath(path, rgn, blitter);
+}
+
+bool SkScan::PathRequiresTiling(const SkIRect& bounds) {
+ SkRegion out; // ignored
+ return clip_to_limit(SkRegion(bounds), &out);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static int build_tri_edges(SkEdge edge[], const SkPoint pts[],
+ const SkIRect* clipRect, SkEdge* list[]) {
+ SkEdge** start = list;
+
+ if (edge->setLine(pts[0], pts[1], clipRect, 0)) {
+ *list++ = edge;
+ edge = (SkEdge*)((char*)edge + sizeof(SkEdge));
+ }
+ if (edge->setLine(pts[1], pts[2], clipRect, 0)) {
+ *list++ = edge;
+ edge = (SkEdge*)((char*)edge + sizeof(SkEdge));
+ }
+ if (edge->setLine(pts[2], pts[0], clipRect, 0)) {
+ *list++ = edge;
+ }
+ return (int)(list - start);
+}
+
+
+static void sk_fill_triangle(const SkPoint pts[], const SkIRect* clipRect,
+ SkBlitter* blitter, const SkIRect& ir) {
+ SkASSERT(pts && blitter);
+
+ SkEdge edgeStorage[3];
+ SkEdge* list[3];
+
+ int count = build_tri_edges(edgeStorage, pts, clipRect, list);
+ if (count < 2) {
+ return;
+ }
+
+ SkEdge headEdge, tailEdge, *last;
+
+ // this returns the first and last edge after they're sorted into a dlink list
+ SkEdge* edge = sort_edges(list, count, &last);
+
+ headEdge.fPrev = nullptr;
+ headEdge.fNext = edge;
+ headEdge.fFirstY = kEDGE_HEAD_Y;
+ headEdge.fX = SK_MinS32;
+ edge->fPrev = &headEdge;
+
+ tailEdge.fPrev = last;
+ tailEdge.fNext = nullptr;
+ tailEdge.fFirstY = kEDGE_TAIL_Y;
+ last->fNext = &tailEdge;
+
+ // now edge is the head of the sorted linklist
+ int stop_y = ir.fBottom;
+ if (clipRect && stop_y > clipRect->fBottom) {
+ stop_y = clipRect->fBottom;
+ }
+ int start_y = ir.fTop;
+ if (clipRect && start_y < clipRect->fTop) {
+ start_y = clipRect->fTop;
+ }
+ walk_simple_edges(&headEdge, blitter, start_y, stop_y);
+}
+
+void SkScan::FillTriangle(const SkPoint pts[], const SkRasterClip& clip,
+ SkBlitter* blitter) {
+ if (clip.isEmpty()) {
+ return;
+ }
+
+ SkRect r;
+ r.setBounds(pts, 3);
+ // If r is too large (larger than can easily fit in SkFixed) then we need perform geometric
+ // clipping. This is a bit of work, so we just call the general FillPath() to handle it.
+ // Use FixedMax/2 as the limit so we can subtract two edges and still store that in Fixed.
+ const SkScalar limit = SK_MaxS16 >> 1;
+ if (!SkRect::MakeLTRB(-limit, -limit, limit, limit).contains(r)) {
+ SkPath path;
+ path.addPoly(pts, 3, false);
+ FillPath(path, clip, blitter);
+ return;
+ }
+
+ SkIRect ir = conservative_round_to_int(r);
+ if (ir.isEmpty() || !SkIRect::Intersects(ir, clip.getBounds())) {
+ return;
+ }
+
+ SkAAClipBlitterWrapper wrap;
+ const SkRegion* clipRgn;
+ if (clip.isBW()) {
+ clipRgn = &clip.bwRgn();
+ } else {
+ wrap.init(clip, blitter);
+ clipRgn = &wrap.getRgn();
+ blitter = wrap.getBlitter();
+ }
+
+ SkScanClipper clipper(blitter, clipRgn, ir);
+ blitter = clipper.getBlitter();
+ if (blitter) {
+ sk_fill_triangle(pts, clipper.getClipRect(), blitter, ir);
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkScan_SAAPath.cpp b/gfx/skia/skia/src/core/SkScan_SAAPath.cpp
new file mode 100644
index 0000000000..ec9b1ecea0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkScan_SAAPath.cpp
@@ -0,0 +1,611 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkScanPriv.h"
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkRegion.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkAntiRun.h"
+#include "src/core/SkBlitter.h"
+
+#if defined(SK_FORCE_AAA)
+
+void SkScan::SAAFillPath(const SkPath&, SkBlitter*, const SkIRect&, const SkIRect&, bool) {
+ SkDEBUGFAIL("SAA Disabled");
+}
+
+#else
+
+#define SHIFT SK_SUPERSAMPLE_SHIFT
+#define SCALE (1 << SHIFT)
+#define MASK (SCALE - 1)
+
+/** @file
+ We have two techniques for capturing the output of the supersampler:
+ - SUPERMASK, which records a large mask-bitmap
+ this is often faster for small, complex objects
+ - RLE, which records a rle-encoded scanline
+ this is often faster for large objects with big spans
+
+ These blitters use two coordinate systems:
+ - destination coordinates, scale equal to the output - often
+ abbreviated with 'i' or 'I' in variable names
+ - supersampled coordinates, scale equal to the output * SCALE
+ */
+
+//#define FORCE_SUPERMASK
+//#define FORCE_RLE
+
+///////////////////////////////////////////////////////////////////////////////
+
+/// Base class for a single-pass supersampled blitter.
+class BaseSuperBlitter : public SkBlitter {
+public:
+ BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
+ const SkIRect& clipBounds, bool isInverse);
+
+ /// Must be explicitly defined on subclasses.
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override {
+ SkDEBUGFAIL("How did I get here?");
+ }
+ /// May not be called on BaseSuperBlitter because it blits out of order.
+ void blitV(int x, int y, int height, SkAlpha alpha) override {
+ SkDEBUGFAIL("How did I get here?");
+ }
+
+protected:
+ SkBlitter* fRealBlitter;
+ /// Current y coordinate, in destination coordinates.
+ int fCurrIY;
+ /// Widest row of region to be blitted, in destination coordinates.
+ int fWidth;
+ /// Leftmost x coordinate in any row, in destination coordinates.
+ int fLeft;
+ /// Leftmost x coordinate in any row, in supersampled coordinates.
+ int fSuperLeft;
+
+ SkDEBUGCODE(int fCurrX;)
+ /// Current y coordinate in supersampled coordinates.
+ int fCurrY;
+ /// Initial y coordinate (top of bounds).
+ int fTop;
+
+ SkIRect fSectBounds;
+};
+
+BaseSuperBlitter::BaseSuperBlitter(SkBlitter* realBlit, const SkIRect& ir,
+ const SkIRect& clipBounds, bool isInverse) {
+ fRealBlitter = realBlit;
+
+ SkIRect sectBounds;
+ if (isInverse) {
+ // We use the clip bounds instead of the ir, since we may be asked to
+ //draw outside of the rect when we're a inverse filltype
+ sectBounds = clipBounds;
+ } else {
+ if (!sectBounds.intersect(ir, clipBounds)) {
+ sectBounds.setEmpty();
+ }
+ }
+
+ const int left = sectBounds.left();
+ const int right = sectBounds.right();
+
+ fLeft = left;
+ fSuperLeft = SkLeftShift(left, SHIFT);
+ fWidth = right - left;
+ fTop = sectBounds.top();
+ fCurrIY = fTop - 1;
+ fCurrY = SkLeftShift(fTop, SHIFT) - 1;
+
+ SkDEBUGCODE(fCurrX = -1;)
+}
+
+/// Run-length-encoded supersampling antialiased blitter.
+class SuperBlitter : public BaseSuperBlitter {
+public:
+ SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds,
+ bool isInverse);
+
+ ~SuperBlitter() override {
+ this->flush();
+ }
+
+ /// Once fRuns contains a complete supersampled row, flush() blits
+ /// it out through the wrapped blitter.
+ void flush();
+
+ /// Blits a row of pixels, with location and width specified
+ /// in supersampled coordinates.
+ void blitH(int x, int y, int width) override;
+ /// Blits a rectangle of pixels, with location and size specified
+ /// in supersampled coordinates.
+ void blitRect(int x, int y, int width, int height) override;
+
+private:
+ // The next three variables are used to track a circular buffer that
+ // contains the values used in SkAlphaRuns. These variables should only
+ // ever be updated in advanceRuns(), and fRuns should always point to
+ // a valid SkAlphaRuns...
+ int fRunsToBuffer;
+ void* fRunsBuffer;
+ int fCurrentRun;
+ SkAlphaRuns fRuns;
+
+ // extra one to store the zero at the end
+ int getRunsSz() const { return (fWidth + 1 + (fWidth + 2)/2) * sizeof(int16_t); }
+
+ // This function updates the fRuns variable to point to the next buffer space
+ // with adequate storage for a SkAlphaRuns. It mostly just advances fCurrentRun
+ // and resets fRuns to point to an empty scanline.
+ void advanceRuns() {
+ const size_t kRunsSz = this->getRunsSz();
+ fCurrentRun = (fCurrentRun + 1) % fRunsToBuffer;
+ fRuns.fRuns = reinterpret_cast<int16_t*>(
+ reinterpret_cast<uint8_t*>(fRunsBuffer) + fCurrentRun * kRunsSz);
+ fRuns.fAlpha = reinterpret_cast<SkAlpha*>(fRuns.fRuns + fWidth + 1);
+ fRuns.reset(fWidth);
+ }
+
+ int fOffsetX;
+};
+
+SuperBlitter::SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds,
+ bool isInverse)
+ : BaseSuperBlitter(realBlitter, ir, clipBounds, isInverse)
+{
+ fRunsToBuffer = realBlitter->requestRowsPreserved();
+ fRunsBuffer = realBlitter->allocBlitMemory(fRunsToBuffer * this->getRunsSz());
+ fCurrentRun = -1;
+
+ this->advanceRuns();
+
+ fOffsetX = 0;
+}
+
+void SuperBlitter::flush() {
+ if (fCurrIY >= fTop) {
+
+ SkASSERT(fCurrentRun < fRunsToBuffer);
+ if (!fRuns.empty()) {
+ // SkDEBUGCODE(fRuns.dump();)
+ fRealBlitter->blitAntiH(fLeft, fCurrIY, fRuns.fAlpha, fRuns.fRuns);
+ this->advanceRuns();
+ fOffsetX = 0;
+ }
+
+ fCurrIY = fTop - 1;
+ SkDEBUGCODE(fCurrX = -1;)
+ }
+}
+
+/** coverage_to_partial_alpha() is being used by SkAlphaRuns, which
+ *accumulates* SCALE pixels worth of "alpha" in [0,(256/SCALE)]
+ to produce a final value in [0, 255] and handles clamping 256->255
+ itself, with the same (alpha - (alpha >> 8)) correction as
+ coverage_to_exact_alpha().
+*/
+static inline int coverage_to_partial_alpha(int aa) {
+ aa <<= 8 - 2*SHIFT;
+ return aa;
+}
+
+/** coverage_to_exact_alpha() is being used by our blitter, which wants
+ a final value in [0, 255].
+*/
+static inline int coverage_to_exact_alpha(int aa) {
+ int alpha = (256 >> SHIFT) * aa;
+ // clamp 256->255
+ return alpha - (alpha >> 8);
+}
+
+void SuperBlitter::blitH(int x, int y, int width) {
+ SkASSERT(width > 0);
+
+ int iy = y >> SHIFT;
+ SkASSERT(iy >= fCurrIY);
+
+ x -= fSuperLeft;
+ // hack, until I figure out why my cubics (I think) go beyond the bounds
+ if (x < 0) {
+ width += x;
+ x = 0;
+ }
+
+#ifdef SK_DEBUG
+ SkASSERT(y != fCurrY || x >= fCurrX);
+#endif
+ SkASSERT(y >= fCurrY);
+ if (fCurrY != y) {
+ fOffsetX = 0;
+ fCurrY = y;
+ }
+
+ if (iy != fCurrIY) { // new scanline
+ this->flush();
+ fCurrIY = iy;
+ }
+
+ int start = x;
+ int stop = x + width;
+
+ SkASSERT(start >= 0 && stop > start);
+ // integer-pixel-aligned ends of blit, rounded out
+ int fb = start & MASK;
+ int fe = stop & MASK;
+ int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
+
+ if (n < 0) {
+ fb = fe - fb;
+ n = 0;
+ fe = 0;
+ } else {
+ if (fb == 0) {
+ n += 1;
+ } else {
+ fb = SCALE - fb;
+ }
+ }
+
+ fOffsetX = fRuns.add(x >> SHIFT, coverage_to_partial_alpha(fb),
+ n, coverage_to_partial_alpha(fe),
+ (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT),
+ fOffsetX);
+
+#ifdef SK_DEBUG
+ fRuns.assertValid(y & MASK, (1 << (8 - SHIFT)));
+ fCurrX = x + width;
+#endif
+}
+
+#if 0 // UNUSED
+static void set_left_rite_runs(SkAlphaRuns& runs, int ileft, U8CPU leftA,
+ int n, U8CPU riteA) {
+ SkASSERT(leftA <= 0xFF);
+ SkASSERT(riteA <= 0xFF);
+
+ int16_t* run = runs.fRuns;
+ uint8_t* aa = runs.fAlpha;
+
+ if (ileft > 0) {
+ run[0] = ileft;
+ aa[0] = 0;
+ run += ileft;
+ aa += ileft;
+ }
+
+ SkASSERT(leftA < 0xFF);
+ if (leftA > 0) {
+ *run++ = 1;
+ *aa++ = leftA;
+ }
+
+ if (n > 0) {
+ run[0] = n;
+ aa[0] = 0xFF;
+ run += n;
+ aa += n;
+ }
+
+ SkASSERT(riteA < 0xFF);
+ if (riteA > 0) {
+ *run++ = 1;
+ *aa++ = riteA;
+ }
+ run[0] = 0;
+}
+#endif
+
+void SuperBlitter::blitRect(int x, int y, int width, int height) {
+ SkASSERT(width > 0);
+ SkASSERT(height > 0);
+
+ // blit leading rows
+ while ((y & MASK)) {
+ this->blitH(x, y++, width);
+ if (--height <= 0) {
+ return;
+ }
+ }
+ SkASSERT(height > 0);
+
+ // Since this is a rect, instead of blitting supersampled rows one at a
+ // time and then resolving to the destination canvas, we can blit
+ // directly to the destintion canvas one row per SCALE supersampled rows.
+ int start_y = y >> SHIFT;
+ int stop_y = (y + height) >> SHIFT;
+ int count = stop_y - start_y;
+ if (count > 0) {
+ y += count << SHIFT;
+ height -= count << SHIFT;
+
+ // save original X for our tail blitH() loop at the bottom
+ int origX = x;
+
+ x -= fSuperLeft;
+ // hack, until I figure out why my cubics (I think) go beyond the bounds
+ if (x < 0) {
+ width += x;
+ x = 0;
+ }
+
+ // There is always a left column, a middle, and a right column.
+ // ileft is the destination x of the first pixel of the entire rect.
+ // xleft is (SCALE - # of covered supersampled pixels) in that
+ // destination pixel.
+ int ileft = x >> SHIFT;
+ int xleft = x & MASK;
+ // irite is the destination x of the last pixel of the OPAQUE section.
+ // xrite is the number of supersampled pixels extending beyond irite;
+ // xrite/SCALE should give us alpha.
+ int irite = (x + width) >> SHIFT;
+ int xrite = (x + width) & MASK;
+ if (!xrite) {
+ xrite = SCALE;
+ irite--;
+ }
+
+ // Need to call flush() to clean up pending draws before we
+ // even consider blitV(), since otherwise it can look nonmonotonic.
+ SkASSERT(start_y > fCurrIY);
+ this->flush();
+
+ int n = irite - ileft - 1;
+ if (n < 0) {
+ // If n < 0, we'll only have a single partially-transparent column
+ // of pixels to render.
+ xleft = xrite - xleft;
+ SkASSERT(xleft <= SCALE);
+ SkASSERT(xleft > 0);
+ fRealBlitter->blitV(ileft + fLeft, start_y, count,
+ coverage_to_exact_alpha(xleft));
+ } else {
+ // With n = 0, we have two possibly-transparent columns of pixels
+ // to render; with n > 0, we have opaque columns between them.
+
+ xleft = SCALE - xleft;
+
+ // Using coverage_to_exact_alpha is not consistent with blitH()
+ const int coverageL = coverage_to_exact_alpha(xleft);
+ const int coverageR = coverage_to_exact_alpha(xrite);
+
+ SkASSERT(coverageL > 0 || n > 0 || coverageR > 0);
+ SkASSERT((coverageL != 0) + n + (coverageR != 0) <= fWidth);
+
+ fRealBlitter->blitAntiRect(ileft + fLeft, start_y, n, count,
+ coverageL, coverageR);
+ }
+
+ // preamble for our next call to blitH()
+ fCurrIY = stop_y - 1;
+ fOffsetX = 0;
+ fCurrY = y - 1;
+ fRuns.reset(fWidth);
+ x = origX;
+ }
+
+ // catch any remaining few rows
+ SkASSERT(height <= MASK);
+ while (--height >= 0) {
+ this->blitH(x, y++, width);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/// Masked supersampling antialiased blitter.
+class MaskSuperBlitter : public BaseSuperBlitter {
+public:
+ MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect&, bool isInverse);
+ ~MaskSuperBlitter() override {
+ fRealBlitter->blitMask(fMask, fClipRect);
+ }
+
+ void blitH(int x, int y, int width) override;
+
+ static bool CanHandleRect(const SkIRect& bounds) {
+#ifdef FORCE_RLE
+ return false;
+#endif
+ int width = bounds.width();
+ int64_t rb = SkAlign4(width);
+ // use 64bits to detect overflow
+ int64_t storage = rb * bounds.height();
+
+ return (width <= MaskSuperBlitter::kMAX_WIDTH) &&
+ (storage <= MaskSuperBlitter::kMAX_STORAGE);
+ }
+
+private:
+ enum {
+#ifdef FORCE_SUPERMASK
+ kMAX_WIDTH = 2048,
+ kMAX_STORAGE = 1024 * 1024 * 2
+#else
+ kMAX_WIDTH = 32, // so we don't try to do very wide things, where the RLE blitter would be faster
+ kMAX_STORAGE = 1024
+#endif
+ };
+
+ SkMask fMask;
+ SkIRect fClipRect;
+ // we add 1 because add_aa_span can write (unchanged) 1 extra byte at the end, rather than
+ // perform a test to see if stopAlpha != 0
+ uint32_t fStorage[(kMAX_STORAGE >> 2) + 1];
+};
+
+MaskSuperBlitter::MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
+ const SkIRect& clipBounds, bool isInverse)
+ : BaseSuperBlitter(realBlitter, ir, clipBounds, isInverse)
+{
+ SkASSERT(CanHandleRect(ir));
+ SkASSERT(!isInverse);
+
+ fMask.fImage = (uint8_t*)fStorage;
+ fMask.fBounds = ir;
+ fMask.fRowBytes = ir.width();
+ fMask.fFormat = SkMask::kA8_Format;
+
+ fClipRect = ir;
+ if (!fClipRect.intersect(clipBounds)) {
+ SkASSERT(0);
+ fClipRect.setEmpty();
+ }
+
+ // For valgrind, write 1 extra byte at the end so we don't read
+ // uninitialized memory. See comment in add_aa_span and fStorage[].
+ memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 1);
+}
+
+static void add_aa_span(uint8_t* alpha, U8CPU startAlpha) {
+ /* I should be able to just add alpha[x] + startAlpha.
+ However, if the trailing edge of the previous span and the leading
+ edge of the current span round to the same super-sampled x value,
+ I might overflow to 256 with this add, hence the funny subtract.
+ */
+ unsigned tmp = *alpha + startAlpha;
+ SkASSERT(tmp <= 256);
+ *alpha = SkToU8(tmp - (tmp >> 8));
+}
+
+static inline uint32_t quadplicate_byte(U8CPU value) {
+ uint32_t pair = (value << 8) | value;
+ return (pair << 16) | pair;
+}
+
+// Perform this tricky subtract, to avoid overflowing to 256. Our caller should
+// only ever call us with at most enough to hit 256 (never larger), so it is
+// enough to just subtract the high-bit. Actually clamping with a branch would
+// be slower (e.g. if (tmp > 255) tmp = 255;)
+//
+static inline void saturated_add(uint8_t* ptr, U8CPU add) {
+ unsigned tmp = *ptr + add;
+ SkASSERT(tmp <= 256);
+ *ptr = SkToU8(tmp - (tmp >> 8));
+}
+
+// minimum count before we want to setup an inner loop, adding 4-at-a-time
+#define MIN_COUNT_FOR_QUAD_LOOP 16
+
+static void add_aa_span(uint8_t* alpha, U8CPU startAlpha, int middleCount,
+ U8CPU stopAlpha, U8CPU maxValue) {
+ SkASSERT(middleCount >= 0);
+
+ saturated_add(alpha, startAlpha);
+ alpha += 1;
+
+ if (middleCount >= MIN_COUNT_FOR_QUAD_LOOP) {
+ // loop until we're quad-byte aligned
+ while (reinterpret_cast<intptr_t>(alpha) & 0x3) {
+ alpha[0] = SkToU8(alpha[0] + maxValue);
+ alpha += 1;
+ middleCount -= 1;
+ }
+
+ int bigCount = middleCount >> 2;
+ uint32_t* qptr = reinterpret_cast<uint32_t*>(alpha);
+ uint32_t qval = quadplicate_byte(maxValue);
+ do {
+ *qptr++ += qval;
+ } while (--bigCount > 0);
+
+ middleCount &= 3;
+ alpha = reinterpret_cast<uint8_t*> (qptr);
+ // fall through to the following while-loop
+ }
+
+ while (--middleCount >= 0) {
+ alpha[0] = SkToU8(alpha[0] + maxValue);
+ alpha += 1;
+ }
+
+ // potentially this can be off the end of our "legal" alpha values, but that
+ // only happens if stopAlpha is also 0. Rather than test for stopAlpha != 0
+ // every time (slow), we just do it, and ensure that we've allocated extra space
+ // (see the + 1 comment in fStorage[]
+ saturated_add(alpha, stopAlpha);
+}
+
+void MaskSuperBlitter::blitH(int x, int y, int width) {
+ int iy = (y >> SHIFT);
+
+ SkASSERT(iy >= fMask.fBounds.fTop && iy < fMask.fBounds.fBottom);
+ iy -= fMask.fBounds.fTop; // make it relative to 0
+
+ // This should never happen, but it does. Until the true cause is
+ // discovered, let's skip this span instead of crashing.
+ // See http://crbug.com/17569.
+ if (iy < 0) {
+ return;
+ }
+
+#ifdef SK_DEBUG
+ {
+ int ix = x >> SHIFT;
+ SkASSERT(ix >= fMask.fBounds.fLeft && ix < fMask.fBounds.fRight);
+ }
+#endif
+
+ x -= SkLeftShift(fMask.fBounds.fLeft, SHIFT);
+
+ // hack, until I figure out why my cubics (I think) go beyond the bounds
+ if (x < 0) {
+ width += x;
+ x = 0;
+ }
+
+ uint8_t* row = fMask.fImage + iy * fMask.fRowBytes + (x >> SHIFT);
+
+ int start = x;
+ int stop = x + width;
+
+ SkASSERT(start >= 0 && stop > start);
+ int fb = start & MASK;
+ int fe = stop & MASK;
+ int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
+
+
+ if (n < 0) {
+ SkASSERT(row >= fMask.fImage);
+ SkASSERT(row < fMask.fImage + kMAX_STORAGE + 1);
+ add_aa_span(row, coverage_to_partial_alpha(fe - fb));
+ } else {
+ fb = SCALE - fb;
+ SkASSERT(row >= fMask.fImage);
+ SkASSERT(row + n + 1 < fMask.fImage + kMAX_STORAGE + 1);
+ add_aa_span(row, coverage_to_partial_alpha(fb),
+ n, coverage_to_partial_alpha(fe),
+ (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT));
+ }
+
+#ifdef SK_DEBUG
+ fCurrX = x + width;
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkScan::SAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& ir,
+ const SkIRect& clipBounds, bool forceRLE) {
+ bool containedInClip = clipBounds.contains(ir);
+ bool isInverse = path.isInverseFillType();
+
+ // MaskSuperBlitter can't handle drawing outside of ir, so we can't use it
+ // if we're an inverse filltype
+ if (!isInverse && MaskSuperBlitter::CanHandleRect(ir) && !forceRLE) {
+ MaskSuperBlitter superBlit(blitter, ir, clipBounds, isInverse);
+ SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
+ sk_fill_path(path, clipBounds, &superBlit, ir.fTop, ir.fBottom, SHIFT, containedInClip);
+ } else {
+ SuperBlitter superBlit(blitter, ir, clipBounds, isInverse);
+ sk_fill_path(path, clipBounds, &superBlit, ir.fTop, ir.fBottom, SHIFT, containedInClip);
+ }
+}
+
+#endif // defined(SK_FORCE_AAA)
diff --git a/gfx/skia/skia/src/core/SkSharedMutex.cpp b/gfx/skia/skia/src/core/SkSharedMutex.cpp
new file mode 100644
index 0000000000..f2c62966cd
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSharedMutex.cpp
@@ -0,0 +1,368 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkSharedMutex.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkSemaphore.h"
+
+#include <cinttypes>
+
+#if !defined(__has_feature)
+ #define __has_feature(x) 0
+#endif
+
+#if __has_feature(thread_sanitizer)
+
+ /* Report that a lock has been created at address "lock". */
+ #define ANNOTATE_RWLOCK_CREATE(lock) \
+ AnnotateRWLockCreate(__FILE__, __LINE__, lock)
+
+ /* Report that the lock at address "lock" is about to be destroyed. */
+ #define ANNOTATE_RWLOCK_DESTROY(lock) \
+ AnnotateRWLockDestroy(__FILE__, __LINE__, lock)
+
+ /* Report that the lock at address "lock" has been acquired.
+ is_w=1 for writer lock, is_w=0 for reader lock. */
+ #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
+ AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w)
+
+ /* Report that the lock at address "lock" is about to be released. */
+ #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
+ AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w)
+
+ #if defined(DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK)
+ #if defined(__GNUC__)
+ #define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak))
+ #else
+ /* TODO(glider): for Windows support we may want to change this macro in order
+ to prepend __declspec(selectany) to the annotations' declarations. */
+ #error weak annotations are not supported for your compiler
+ #endif
+ #else
+ #define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK
+ #endif
+
+#ifdef __GNUC__
+#pragma GCC visibility push(default)
+#endif
+
+ extern "C" {
+ void AnnotateRWLockCreate(
+ const char *file, int line,
+ const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+ void AnnotateRWLockDestroy(
+ const char *file, int line,
+ const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+ void AnnotateRWLockAcquired(
+ const char *file, int line,
+ const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+ void AnnotateRWLockReleased(
+ const char *file, int line,
+ const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+ }
+
+#ifdef __GNUC__
+#pragma GCC visibility pop
+#endif
+
+#else
+
+ #define ANNOTATE_RWLOCK_CREATE(lock)
+ #define ANNOTATE_RWLOCK_DESTROY(lock)
+ #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)
+ #define ANNOTATE_RWLOCK_RELEASED(lock, is_w)
+
+#endif
+
+#ifdef SK_DEBUG
+
+ #include "include/private/base/SkTDArray.h"
+ #include "include/private/base/SkThreadID.h"
+
+ class SkSharedMutex::ThreadIDSet {
+ public:
+ // Returns true if threadID is in the set.
+ bool find(SkThreadID threadID) const {
+ for (auto& t : fThreadIDs) {
+ if (t == threadID) return true;
+ }
+ return false;
+ }
+
+ // Returns true if did not already exist.
+ bool tryAdd(SkThreadID threadID) {
+ for (auto& t : fThreadIDs) {
+ if (t == threadID) return false;
+ }
+ fThreadIDs.append(1, &threadID);
+ return true;
+ }
+ // Returns true if already exists in Set.
+ bool tryRemove(SkThreadID threadID) {
+ for (int i = 0; i < fThreadIDs.size(); ++i) {
+ if (fThreadIDs[i] == threadID) {
+ fThreadIDs.remove(i);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void swap(ThreadIDSet& other) {
+ fThreadIDs.swap(other.fThreadIDs);
+ }
+
+ int count() const {
+ return fThreadIDs.size();
+ }
+
+ private:
+ SkTDArray<SkThreadID> fThreadIDs;
+ };
+
+ SkSharedMutex::SkSharedMutex()
+ : fCurrentShared(new ThreadIDSet)
+ , fWaitingExclusive(new ThreadIDSet)
+ , fWaitingShared(new ThreadIDSet){
+ ANNOTATE_RWLOCK_CREATE(this);
+ }
+
+ SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); }
+
+ void SkSharedMutex::acquire() {
+ SkThreadID threadID(SkGetThreadID());
+ int currentSharedCount;
+ int waitingExclusiveCount;
+ {
+ SkAutoMutexExclusive l(fMu);
+
+ SkASSERTF(!fCurrentShared->find(threadID),
+ "Thread %" PRIx64 " already has an shared lock\n", threadID);
+
+ if (!fWaitingExclusive->tryAdd(threadID)) {
+ SkDEBUGFAILF("Thread %" PRIx64 " already has an exclusive lock\n", threadID);
+ }
+
+ currentSharedCount = fCurrentShared->count();
+ waitingExclusiveCount = fWaitingExclusive->count();
+ }
+
+ if (currentSharedCount > 0 || waitingExclusiveCount > 1) {
+ fExclusiveQueue.wait();
+ }
+
+ ANNOTATE_RWLOCK_ACQUIRED(this, 1);
+ }
+
+ // Implementation Detail:
+ // The shared threads need two separate queues to keep the threads that were added after the
+ // exclusive lock separate from the threads added before.
+ void SkSharedMutex::release() {
+ ANNOTATE_RWLOCK_RELEASED(this, 1);
+ SkThreadID threadID(SkGetThreadID());
+ int sharedWaitingCount;
+ int exclusiveWaitingCount;
+ int sharedQueueSelect;
+ {
+ SkAutoMutexExclusive l(fMu);
+ SkASSERT(0 == fCurrentShared->count());
+ if (!fWaitingExclusive->tryRemove(threadID)) {
+ SkDEBUGFAILF("Thread %" PRIx64 " did not have the lock held.\n", threadID);
+ }
+ exclusiveWaitingCount = fWaitingExclusive->count();
+ sharedWaitingCount = fWaitingShared->count();
+ fWaitingShared.swap(fCurrentShared);
+ sharedQueueSelect = fSharedQueueSelect;
+ if (sharedWaitingCount > 0) {
+ fSharedQueueSelect = 1 - fSharedQueueSelect;
+ }
+ }
+
+ if (sharedWaitingCount > 0) {
+ fSharedQueue[sharedQueueSelect].signal(sharedWaitingCount);
+ } else if (exclusiveWaitingCount > 0) {
+ fExclusiveQueue.signal();
+ }
+ }
+
+ void SkSharedMutex::assertHeld() const {
+ SkThreadID threadID(SkGetThreadID());
+ SkAutoMutexExclusive l(fMu);
+ SkASSERT(0 == fCurrentShared->count());
+ SkASSERT(fWaitingExclusive->find(threadID));
+ }
+
+ void SkSharedMutex::acquireShared() {
+ SkThreadID threadID(SkGetThreadID());
+ int exclusiveWaitingCount;
+ int sharedQueueSelect;
+ {
+ SkAutoMutexExclusive l(fMu);
+ exclusiveWaitingCount = fWaitingExclusive->count();
+ if (exclusiveWaitingCount > 0) {
+ if (!fWaitingShared->tryAdd(threadID)) {
+ SkDEBUGFAILF("Thread %" PRIx64 " was already waiting!\n", threadID);
+ }
+ } else {
+ if (!fCurrentShared->tryAdd(threadID)) {
+ SkDEBUGFAILF("Thread %" PRIx64 " already holds a shared lock!\n", threadID);
+ }
+ }
+ sharedQueueSelect = fSharedQueueSelect;
+ }
+
+ if (exclusiveWaitingCount > 0) {
+ fSharedQueue[sharedQueueSelect].wait();
+ }
+
+ ANNOTATE_RWLOCK_ACQUIRED(this, 0);
+ }
+
+ void SkSharedMutex::releaseShared() {
+ ANNOTATE_RWLOCK_RELEASED(this, 0);
+ SkThreadID threadID(SkGetThreadID());
+
+ int currentSharedCount;
+ int waitingExclusiveCount;
+ {
+ SkAutoMutexExclusive l(fMu);
+ if (!fCurrentShared->tryRemove(threadID)) {
+ SkDEBUGFAILF("Thread %" PRIx64 " does not hold a shared lock.\n", threadID);
+ }
+ currentSharedCount = fCurrentShared->count();
+ waitingExclusiveCount = fWaitingExclusive->count();
+ }
+
+ if (0 == currentSharedCount && waitingExclusiveCount > 0) {
+ fExclusiveQueue.signal();
+ }
+ }
+
+ void SkSharedMutex::assertHeldShared() const {
+ SkThreadID threadID(SkGetThreadID());
+ SkAutoMutexExclusive l(fMu);
+ SkASSERT(fCurrentShared->find(threadID));
+ }
+
+#else
+
+ // The fQueueCounts fields holds many counts in an int32_t in order to make managing them atomic.
+ // These three counts must be the same size, so each gets 10 bits. The 10 bits represent
+ // the log of the count which is 1024.
+ //
+ // The three counts held in fQueueCounts are:
+ // * Shared - the number of shared lock holders currently running.
+ // * WaitingExclusive - the number of threads waiting for an exclusive lock.
+ // * WaitingShared - the number of threads waiting to run while waiting for an exclusive thread
+ // to finish.
+ static const int kLogThreadCount = 10;
+
+ enum {
+ kSharedOffset = (0 * kLogThreadCount),
+ kWaitingExlusiveOffset = (1 * kLogThreadCount),
+ kWaitingSharedOffset = (2 * kLogThreadCount),
+ kSharedMask = ((1 << kLogThreadCount) - 1) << kSharedOffset,
+ kWaitingExclusiveMask = ((1 << kLogThreadCount) - 1) << kWaitingExlusiveOffset,
+ kWaitingSharedMask = ((1 << kLogThreadCount) - 1) << kWaitingSharedOffset,
+ };
+
+ SkSharedMutex::SkSharedMutex() : fQueueCounts(0) { ANNOTATE_RWLOCK_CREATE(this); }
+ SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); }
+ void SkSharedMutex::acquire() {
+ // Increment the count of exclusive queue waiters.
+ int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOffset,
+ std::memory_order_acquire);
+
+ // If there are no other exclusive waiters and no shared threads are running then run
+ // else wait.
+ if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kSharedMask) > 0) {
+ fExclusiveQueue.wait();
+ }
+ ANNOTATE_RWLOCK_ACQUIRED(this, 1);
+ }
+
+ void SkSharedMutex::release() {
+ ANNOTATE_RWLOCK_RELEASED(this, 1);
+
+ int32_t oldQueueCounts = fQueueCounts.load(std::memory_order_relaxed);
+ int32_t waitingShared;
+ int32_t newQueueCounts;
+ do {
+ newQueueCounts = oldQueueCounts;
+
+ // Decrement exclusive waiters.
+ newQueueCounts -= 1 << kWaitingExlusiveOffset;
+
+ // The number of threads waiting to acquire a shared lock.
+ waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSharedOffset;
+
+ // If there are any move the counts of all the shared waiters to actual shared. They are
+ // going to run next.
+ if (waitingShared > 0) {
+
+ // Set waiting shared to zero.
+ newQueueCounts &= ~kWaitingSharedMask;
+
+ // Because this is the exclusive release, then there are zero readers. So, the bits
+ // for shared locks should be zero. Since those bits are zero, we can just |= in the
+ // waitingShared count instead of clearing with an &= and then |= the count.
+ newQueueCounts |= waitingShared << kSharedOffset;
+ }
+
+ } while (!fQueueCounts.compare_exchange_strong(oldQueueCounts, newQueueCounts,
+ std::memory_order_release,
+ std::memory_order_relaxed));
+
+ if (waitingShared > 0) {
+ // Run all the shared.
+ fSharedQueue.signal(waitingShared);
+ } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
+ // Run a single exclusive waiter.
+ fExclusiveQueue.signal();
+ }
+ }
+
+ void SkSharedMutex::acquireShared() {
+ int32_t oldQueueCounts = fQueueCounts.load(std::memory_order_relaxed);
+ int32_t newQueueCounts;
+ do {
+ newQueueCounts = oldQueueCounts;
+ // If there are waiting exclusives then this shared lock waits else it runs.
+ if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
+ newQueueCounts += 1 << kWaitingSharedOffset;
+ } else {
+ newQueueCounts += 1 << kSharedOffset;
+ }
+ } while (!fQueueCounts.compare_exchange_strong(oldQueueCounts, newQueueCounts,
+ std::memory_order_acquire,
+ std::memory_order_relaxed));
+
+ // If there are waiting exclusives, then this shared waits until after it runs.
+ if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
+ fSharedQueue.wait();
+ }
+ ANNOTATE_RWLOCK_ACQUIRED(this, 0);
+
+ }
+
+ void SkSharedMutex::releaseShared() {
+ ANNOTATE_RWLOCK_RELEASED(this, 0);
+
+ // Decrement the shared count.
+ int32_t oldQueueCounts = fQueueCounts.fetch_sub(1 << kSharedOffset,
+ std::memory_order_release);
+
+ // If shared count is going to zero (because the old count == 1) and there are exclusive
+ // waiters, then run a single exclusive waiter.
+ if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1
+ && (oldQueueCounts & kWaitingExclusiveMask) > 0) {
+ fExclusiveQueue.signal();
+ }
+ }
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSharedMutex.h b/gfx/skia/skia/src/core/SkSharedMutex.h
new file mode 100644
index 0000000000..e3c3047aa5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSharedMutex.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSharedLock_DEFINED
+#define SkSharedLock_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkMacros.h"
+#include "include/private/base/SkSemaphore.h"
+#include "include/private/base/SkThreadAnnotations.h"
+#include <atomic>
+
+#ifdef SK_DEBUG
+ #include "include/private/base/SkMutex.h"
+ #include <memory>
+#endif // SK_DEBUG
+
+// There are two shared lock implementations one debug the other is high performance. They implement
+// an interface similar to pthread's rwlocks.
+// This is a shared lock implementation similar to pthreads rwlocks. The high performance
+// implementation is cribbed from Preshing's article:
+// http://preshing.com/20150316/semaphores-are-surprisingly-versatile/
+//
+// This lock does not obey strict queue ordering. It will always alternate between readers and
+// a single writer.
+class SK_CAPABILITY("mutex") SkSharedMutex {
+public:
+ SkSharedMutex();
+ ~SkSharedMutex();
+ // Acquire lock for exclusive use.
+ void acquire() SK_ACQUIRE();
+
+ // Release lock for exclusive use.
+ void release() SK_RELEASE_CAPABILITY();
+
+ // Fail if exclusive is not held.
+ void assertHeld() const SK_ASSERT_CAPABILITY(this);
+
+ // Acquire lock for shared use.
+ void acquireShared() SK_ACQUIRE_SHARED();
+
+ // Release lock for shared use.
+ void releaseShared() SK_RELEASE_SHARED_CAPABILITY();
+
+ // Fail if shared lock not held.
+ void assertHeldShared() const SK_ASSERT_SHARED_CAPABILITY(this);
+
+private:
+#ifdef SK_DEBUG
+ class ThreadIDSet;
+ std::unique_ptr<ThreadIDSet> fCurrentShared;
+ std::unique_ptr<ThreadIDSet> fWaitingExclusive;
+ std::unique_ptr<ThreadIDSet> fWaitingShared;
+ int fSharedQueueSelect{0};
+ mutable SkMutex fMu;
+ SkSemaphore fSharedQueue[2];
+ SkSemaphore fExclusiveQueue;
+#else
+ std::atomic<int32_t> fQueueCounts;
+ SkSemaphore fSharedQueue;
+ SkSemaphore fExclusiveQueue;
+#endif // SK_DEBUG
+};
+
+#ifndef SK_DEBUG
+inline void SkSharedMutex::assertHeld() const {}
+inline void SkSharedMutex::assertHeldShared() const {}
+#endif // SK_DEBUG
+
+class SK_SCOPED_CAPABILITY SkAutoSharedMutexExclusive {
+public:
+ explicit SkAutoSharedMutexExclusive(SkSharedMutex& lock) SK_ACQUIRE(lock)
+ : fLock(lock) {
+ lock.acquire();
+ }
+ ~SkAutoSharedMutexExclusive() SK_RELEASE_CAPABILITY() { fLock.release(); }
+
+private:
+ SkSharedMutex& fLock;
+};
+
+class SK_SCOPED_CAPABILITY SkAutoSharedMutexShared {
+public:
+ explicit SkAutoSharedMutexShared(SkSharedMutex& lock) SK_ACQUIRE_SHARED(lock)
+ : fLock(lock) {
+ lock.acquireShared();
+ }
+
+ // You would think this should be SK_RELEASE_SHARED_CAPABILITY, but SK_SCOPED_CAPABILITY
+ // doesn't fully understand the difference between shared and exclusive.
+ // Please review https://reviews.llvm.org/D52578 for more information.
+ ~SkAutoSharedMutexShared() SK_RELEASE_CAPABILITY() { fLock.releaseShared(); }
+
+private:
+ SkSharedMutex& fLock;
+};
+
+#endif // SkSharedLock_DEFINED
diff --git a/gfx/skia/skia/src/core/SkSpecialImage.cpp b/gfx/skia/skia/src/core/SkSpecialImage.cpp
new file mode 100644
index 0000000000..2cf9a51939
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpecialImage.cpp
@@ -0,0 +1,458 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file
+ */
+
+#include "src/core/SkSpecialImage.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkSurface.h"
+#include "include/core/SkTileMode.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkSurfacePriv.h"
+#include "src/image/SkImage_Base.h"
+
+#if defined(SK_GANESH)
+#include "include/gpu/GrDirectContext.h"
+#include "include/gpu/GrRecordingContext.h"
+#include "src/gpu/SkBackingFit.h"
+#include "src/gpu/ganesh/GrImageInfo.h"
+#include "src/gpu/ganesh/GrProxyProvider.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/GrTextureProxy.h"
+#include "src/image/SkImage_Gpu.h"
+#include "src/shaders/SkImageShader.h"
+#endif
+
+// Currently, the raster imagefilters can only handle certain imageinfos. Call this to know if
+// a given info is supported.
+static bool valid_for_imagefilters(const SkImageInfo& info) {
+ // no support for other swizzles/depths yet
+ return info.colorType() == kN32_SkColorType;
+}
+
+SkSpecialImage::SkSpecialImage(const SkIRect& subset,
+ uint32_t uniqueID,
+ const SkColorInfo& colorInfo,
+ const SkSurfaceProps& props)
+ : fSubset(subset)
+ , fUniqueID(kNeedNewImageUniqueID_SpecialImage == uniqueID ? SkNextID::ImageID() : uniqueID)
+ , fColorInfo(colorInfo)
+ , fProps(props) {
+}
+
+sk_sp<SkSpecialSurface> SkSpecialImage::makeSurface(SkColorType colorType,
+ const SkColorSpace* colorSpace,
+ const SkISize& size,
+ SkAlphaType at,
+ const SkSurfaceProps& props) const {
+ return this->onMakeSurface(colorType, colorSpace, size, at, props);
+}
+
+sk_sp<SkSurface> SkSpecialImage::makeTightSurface(SkColorType colorType,
+ const SkColorSpace* colorSpace,
+ const SkISize& size,
+ SkAlphaType at) const {
+ return this->onMakeTightSurface(colorType, colorSpace, size, at);
+}
+
+sk_sp<SkImage> SkSpecialImage::asImage(const SkIRect* subset) const {
+ if (subset) {
+ SkIRect absolute = subset->makeOffset(this->subset().topLeft());
+ return this->onAsImage(&absolute);
+ } else {
+ return this->onAsImage(nullptr);
+ }
+}
+
+sk_sp<SkShader> SkSpecialImage::asShader(SkTileMode tileMode,
+ const SkSamplingOptions& sampling,
+ const SkMatrix& lm) const {
+ return this->onAsShader(tileMode, sampling, lm);
+}
+
+sk_sp<SkShader> SkSpecialImage::asShader(const SkSamplingOptions& sampling) const {
+ return this->asShader(sampling, SkMatrix::I());
+}
+
+sk_sp<SkShader> SkSpecialImage::asShader(const SkSamplingOptions& sampling,
+ const SkMatrix& lm) const {
+ return this->asShader(SkTileMode::kClamp, sampling, lm);
+}
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/TextureProxyView.h"
+
+bool SkSpecialImage::isGraphiteBacked() const {
+ return SkToBool(this->textureProxyView());
+}
+
+skgpu::graphite::TextureProxyView SkSpecialImage::textureProxyView() const {
+ return this->onTextureProxyView();
+}
+
+skgpu::graphite::TextureProxyView SkSpecialImage::onTextureProxyView() const {
+ // To get here we would need to be trying to retrieve a Graphite-backed resource from
+ // either a raster or Ganesh-backed special image. That should never happen.
+ // TODO: re-enable this assert. Right now, since image filters can fallback to raster
+ // in Graphite, we can get here.
+ //SkASSERT(false);
+ return {};
+}
+#endif
+
+#ifdef SK_DEBUG
+bool SkSpecialImage::RectFits(const SkIRect& rect, int width, int height) {
+ if (0 == width && 0 == height) {
+ SkASSERT(0 == rect.fLeft && 0 == rect.fRight && 0 == rect.fTop && 0 == rect.fBottom);
+ return true;
+ }
+
+ return rect.fLeft >= 0 && rect.fLeft < width && rect.fLeft < rect.fRight &&
+ rect.fRight >= 0 && rect.fRight <= width &&
+ rect.fTop >= 0 && rect.fTop < height && rect.fTop < rect.fBottom &&
+ rect.fBottom >= 0 && rect.fBottom <= height;
+}
+#endif
+
+sk_sp<SkSpecialImage> SkSpecialImage::MakeFromImage(GrRecordingContext* rContext,
+ const SkIRect& subset,
+ sk_sp<SkImage> image,
+ const SkSurfaceProps& props) {
+ SkASSERT(RectFits(subset, image->width(), image->height()));
+
+#if defined(SK_GANESH)
+ if (rContext) {
+ auto [view, ct] = as_IB(image)->asView(rContext, GrMipmapped::kNo);
+ return MakeDeferredFromGpu(rContext,
+ subset,
+ image->uniqueID(),
+ std::move(view),
+ { ct, image->alphaType(), image->refColorSpace() },
+ props);
+ }
+#endif
+
+ // raster to gpu is supported here, but gpu to raster is not
+ SkBitmap bm;
+ if (as_IB(image)->getROPixels(nullptr, &bm)) {
+ return MakeFromRaster(subset, bm, props);
+ }
+ return nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkSpecialImage_Raster final : public SkSpecialImage {
+public:
+ SkSpecialImage_Raster(const SkIRect& subset, const SkBitmap& bm, const SkSurfaceProps& props)
+ : SkSpecialImage(subset, bm.getGenerationID(), bm.info().colorInfo(), props)
+ , fBitmap(bm) {
+ SkASSERT(bm.pixelRef());
+ SkASSERT(fBitmap.getPixels());
+ }
+
+ size_t getSize() const override { return fBitmap.computeByteSize(); }
+
+ void onDraw(SkCanvas* canvas, SkScalar x, SkScalar y, const SkSamplingOptions& sampling,
+ const SkPaint* paint) const override {
+ SkRect dst = SkRect::MakeXYWH(x, y,
+ this->subset().width(), this->subset().height());
+
+ canvas->drawImageRect(fBitmap.asImage(), SkRect::Make(this->subset()), dst,
+ sampling, paint, SkCanvas::kStrict_SrcRectConstraint);
+ }
+
+ bool onGetROPixels(SkBitmap* bm) const override {
+ return fBitmap.extractSubset(bm, this->subset());
+ }
+
+#if defined(SK_GANESH)
+ GrSurfaceProxyView onView(GrRecordingContext* context) const override {
+ if (context) {
+ return std::get<0>(GrMakeCachedBitmapProxyView(
+ context, fBitmap, /*label=*/"SpecialImageRaster_OnView", GrMipmapped::kNo));
+ }
+
+ return {};
+ }
+#endif
+
+ sk_sp<SkSpecialSurface> onMakeSurface(SkColorType colorType, const SkColorSpace* colorSpace,
+ const SkISize& size, SkAlphaType at,
+ const SkSurfaceProps& props) const override {
+ // Ignore the requested color type, the raster backend currently only supports N32
+ colorType = kN32_SkColorType; // TODO: find ways to allow f16
+ SkImageInfo info = SkImageInfo::Make(size, colorType, at, sk_ref_sp(colorSpace));
+ return SkSpecialSurface::MakeRaster(info, props);
+ }
+
+ sk_sp<SkSpecialImage> onMakeSubset(const SkIRect& subset) const override {
+ // No need to extract subset, onGetROPixels handles that when needed
+ return SkSpecialImage::MakeFromRaster(subset, fBitmap, this->props());
+ }
+
+ sk_sp<SkImage> onAsImage(const SkIRect* subset) const override {
+ if (subset) {
+ SkBitmap subsetBM;
+
+ if (!fBitmap.extractSubset(&subsetBM, *subset)) {
+ return nullptr;
+ }
+
+ return subsetBM.asImage();
+ }
+
+ return fBitmap.asImage();
+ }
+
+ sk_sp<SkShader> onAsShader(SkTileMode tileMode,
+ const SkSamplingOptions& sampling,
+ const SkMatrix& lm) const override {
+ // TODO(skbug.com/12784): SkImage::makeShader() doesn't support a subset yet, but SkBitmap
+ // supports subset views so create the shader from the subset bitmap instead of fBitmap.
+ SkBitmap subsetBM;
+ if (!this->getROPixels(&subsetBM)) {
+ return nullptr;
+ }
+ return subsetBM.asImage()->makeShader(tileMode, tileMode, sampling, lm);
+ }
+
+ sk_sp<SkSurface> onMakeTightSurface(SkColorType colorType, const SkColorSpace* colorSpace,
+ const SkISize& size, SkAlphaType at) const override {
+ // Ignore the requested color type, the raster backend currently only supports N32
+ colorType = kN32_SkColorType; // TODO: find ways to allow f16
+ SkImageInfo info = SkImageInfo::Make(size, colorType, at, sk_ref_sp(colorSpace));
+ return SkSurface::MakeRaster(info);
+ }
+
+private:
+ SkBitmap fBitmap;
+};
+
+sk_sp<SkSpecialImage> SkSpecialImage::MakeFromRaster(const SkIRect& subset,
+ const SkBitmap& bm,
+ const SkSurfaceProps& props) {
+ SkASSERT(RectFits(subset, bm.width(), bm.height()));
+
+ if (!bm.pixelRef()) {
+ return nullptr;
+ }
+
+ const SkBitmap* srcBM = &bm;
+ SkBitmap tmp;
+ // ImageFilters only handle N32 at the moment, so force our src to be that
+ if (!valid_for_imagefilters(bm.info())) {
+ if (!tmp.tryAllocPixels(bm.info().makeColorType(kN32_SkColorType)) ||
+ !bm.readPixels(tmp.info(), tmp.getPixels(), tmp.rowBytes(), 0, 0))
+ {
+ return nullptr;
+ }
+ srcBM = &tmp;
+ }
+ return sk_make_sp<SkSpecialImage_Raster>(subset, *srcBM, props);
+}
+
+sk_sp<SkSpecialImage> SkSpecialImage::CopyFromRaster(const SkIRect& subset,
+ const SkBitmap& bm,
+ const SkSurfaceProps& props) {
+ SkASSERT(RectFits(subset, bm.width(), bm.height()));
+
+ if (!bm.pixelRef()) {
+ return nullptr;
+ }
+
+ SkBitmap tmp;
+ SkImageInfo info = bm.info().makeDimensions(subset.size());
+ // As in MakeFromRaster, must force src to N32 for ImageFilters
+ if (!valid_for_imagefilters(bm.info())) {
+ info = info.makeColorType(kN32_SkColorType);
+ }
+ if (!tmp.tryAllocPixels(info)) {
+ return nullptr;
+ }
+ if (!bm.readPixels(tmp.info(), tmp.getPixels(), tmp.rowBytes(), subset.x(), subset.y())) {
+ return nullptr;
+ }
+
+ // Since we're making a copy of the raster, the resulting special image is the exact size
+ // of the requested subset of the original and no longer needs to be offset by subset's left
+ // and top, since those were relative to the original's buffer.
+ return sk_make_sp<SkSpecialImage_Raster>(
+ SkIRect::MakeWH(subset.width(), subset.height()), tmp, props);
+}
+
+#if defined(SK_GANESH)
+///////////////////////////////////////////////////////////////////////////////
+static sk_sp<SkImage> wrap_proxy_in_image(GrRecordingContext* context,
+ GrSurfaceProxyView view,
+ const SkColorInfo& colorInfo) {
+
+ return sk_make_sp<SkImage_Gpu>(sk_ref_sp(context),
+ kNeedNewImageUniqueID,
+ std::move(view),
+ colorInfo);
+}
+
+class SkSpecialImage_Gpu final : public SkSpecialImage {
+public:
+ SkSpecialImage_Gpu(GrRecordingContext* context,
+ const SkIRect& subset,
+ uint32_t uniqueID,
+ GrSurfaceProxyView view,
+ const SkColorInfo& colorInfo,
+ const SkSurfaceProps& props)
+ : SkSpecialImage(subset, uniqueID, colorInfo, props)
+ , fContext(context)
+ , fView(std::move(view)) {
+ }
+
+ size_t getSize() const override {
+ return fView.proxy()->gpuMemorySize();
+ }
+
+ void onDraw(SkCanvas* canvas, SkScalar x, SkScalar y, const SkSamplingOptions& sampling,
+ const SkPaint* paint) const override {
+ SkRect dst = SkRect::MakeXYWH(x, y,
+ this->subset().width(), this->subset().height());
+
+ // TODO: In this instance we know we're going to draw a sub-portion of the backing
+ // texture into the canvas so it is okay to wrap it in an SkImage. This poses
+ // some problems for full deferral however in that when the deferred SkImage_Gpu
+ // instantiates itself it is going to have to either be okay with having a larger
+ // than expected backing texture (unlikely) or the 'fit' of the SurfaceProxy needs
+ // to be tightened (if it is deferred).
+ sk_sp<SkImage> img = sk_sp<SkImage>(
+ new SkImage_Gpu(sk_ref_sp(canvas->recordingContext()),
+ this->uniqueID(),
+ fView,
+ this->colorInfo()));
+
+ canvas->drawImageRect(img, SkRect::Make(this->subset()), dst,
+ sampling, paint, SkCanvas::kStrict_SrcRectConstraint);
+ }
+
+ GrRecordingContext* onGetContext() const override { return fContext; }
+
+ GrSurfaceProxyView onView(GrRecordingContext* context) const override { return fView; }
+
+ bool onGetROPixels(SkBitmap* dst) const override {
+ // This should never be called: All GPU image filters are implemented entirely on the GPU,
+ // so we never perform read-back.
+ SkASSERT(false);
+ return false;
+ }
+
+ sk_sp<SkSpecialSurface> onMakeSurface(SkColorType colorType, const SkColorSpace* colorSpace,
+ const SkISize& size, SkAlphaType at,
+ const SkSurfaceProps& props) const override {
+ if (!fContext) {
+ return nullptr;
+ }
+
+ SkImageInfo ii = SkImageInfo::Make(size, colorType, at, sk_ref_sp(colorSpace));
+
+ return SkSpecialSurface::MakeRenderTarget(fContext, ii, props, fView.origin());
+ }
+
+ sk_sp<SkSpecialImage> onMakeSubset(const SkIRect& subset) const override {
+ return SkSpecialImage::MakeDeferredFromGpu(fContext,
+ subset,
+ this->uniqueID(),
+ fView,
+ this->colorInfo(),
+ this->props());
+ }
+
+ sk_sp<SkImage> onAsImage(const SkIRect* subset) const override {
+ GrSurfaceProxy* proxy = fView.proxy();
+ if (subset) {
+ if (proxy->isFunctionallyExact() && *subset == SkIRect::MakeSize(proxy->dimensions())) {
+ proxy->priv().exactify(false);
+ // The existing GrTexture is already tight so reuse it in the SkImage
+ return wrap_proxy_in_image(fContext, fView, this->colorInfo());
+ }
+
+ auto subsetView = GrSurfaceProxyView::Copy(fContext,
+ fView,
+ GrMipmapped::kNo,
+ *subset,
+ SkBackingFit::kExact,
+ skgpu::Budgeted::kYes,
+ /*label=*/"SkSpecialImage_AsImage");
+ if (!subsetView) {
+ return nullptr;
+ }
+ SkASSERT(subsetView.asTextureProxy());
+ SkASSERT(subsetView.proxy()->priv().isExact());
+
+ // MDB: this is acceptable (wrapping subsetProxy in an SkImage) bc Copy will
+ // return a kExact-backed proxy
+ return wrap_proxy_in_image(fContext, std::move(subsetView), this->colorInfo());
+ }
+
+ proxy->priv().exactify(true);
+
+ return wrap_proxy_in_image(fContext, fView, this->colorInfo());
+ }
+
+ sk_sp<SkShader> onAsShader(SkTileMode tileMode,
+ const SkSamplingOptions& sampling,
+ const SkMatrix& lm) const override {
+ // The special image's logical (0,0) is at its subset's topLeft() so we need to account for
+ // that in the local matrix used when sampling.
+ SkMatrix subsetOrigin = SkMatrix::Translate(-this->subset().topLeft());
+ subsetOrigin.postConcat(lm);
+ // However, we don't need to modify the subset itself since that is defined with respect to
+ // the base image, and the local matrix is applied before any tiling/clamping.
+ const SkRect subset = SkRect::Make(this->subset());
+
+ // asImage() w/o a subset makes no copy; create the SkImageShader directly to remember the
+ // subset used to access the image.
+ return SkImageShader::MakeSubset(
+ this->asImage(), subset, tileMode, tileMode, sampling, &subsetOrigin);
+ }
+
+ sk_sp<SkSurface> onMakeTightSurface(SkColorType colorType, const SkColorSpace* colorSpace,
+ const SkISize& size, SkAlphaType at) const override {
+ // TODO (michaelludwig): Why does this ignore colorType but onMakeSurface doesn't ignore it?
+ // Once makeTightSurface() goes away, should this type overriding behavior be moved into
+ // onMakeSurface() or is this unnecessary?
+ colorType = colorSpace && colorSpace->gammaIsLinear()
+ ? kRGBA_F16_SkColorType : kRGBA_8888_SkColorType;
+ SkImageInfo info = SkImageInfo::Make(size, colorType, at, sk_ref_sp(colorSpace));
+ return SkSurface::MakeRenderTarget(
+ fContext, skgpu::Budgeted::kYes, info, 0, fView.origin(), nullptr);
+ }
+
+private:
+ GrRecordingContext* fContext;
+ GrSurfaceProxyView fView;
+};
+
+sk_sp<SkSpecialImage> SkSpecialImage::MakeDeferredFromGpu(GrRecordingContext* context,
+ const SkIRect& subset,
+ uint32_t uniqueID,
+ GrSurfaceProxyView view,
+ const GrColorInfo& colorInfo,
+ const SkSurfaceProps& props) {
+ if (!context || context->abandoned() || !view.asTextureProxy()) {
+ return nullptr;
+ }
+
+ SkColorType ct = GrColorTypeToSkColorType(colorInfo.colorType());
+
+ SkASSERT(RectFits(subset, view.proxy()->width(), view.proxy()->height()));
+ return sk_make_sp<SkSpecialImage_Gpu>(context, subset, uniqueID, std::move(view),
+ SkColorInfo(ct,
+ colorInfo.alphaType(),
+ colorInfo.refColorSpace()),
+ props);
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkSpecialImage.h b/gfx/skia/skia/src/core/SkSpecialImage.h
new file mode 100644
index 0000000000..33bcfae2df
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpecialImage.h
@@ -0,0 +1,263 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file
+ */
+
+#ifndef SkSpecialImage_DEFINED
+#define SkSpecialImage_DEFINED
+
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkSurfaceProps.h"
+#include "src/core/SkNextID.h"
+
+#if defined(SK_GANESH)
+#include "include/private/gpu/ganesh/GrTypesPriv.h"
+#include "src/gpu/ganesh/GrSurfaceProxyView.h"
+#endif
+
+class GrColorInfo;
+class GrRecordingContext;
+class GrTextureProxy;
+class SkBitmap;
+class SkCanvas;
+class SkImage;
+struct SkImageInfo;
+class SkMatrix;
+class SkPaint;
+class SkPixmap;
+class SkShader;
+class SkSpecialSurface;
+class SkSurface;
+enum class SkTileMode;
+
+namespace skgpu::graphite {
+class Recorder;
+class TextureProxyView;
+}
+
+enum {
+ kNeedNewImageUniqueID_SpecialImage = 0
+};
+
+/**
+ * This is a restricted form of SkImage solely intended for internal use. It
+ * differs from SkImage in that:
+ * - it can only be backed by raster or gpu (no generators)
+ * - it can be backed by a GrTextureProxy larger than its nominal bounds
+ * - it can't be drawn tiled
+ * - it can't be drawn with MIPMAPs
+ * It is similar to SkImage in that it abstracts how the pixels are stored/represented.
+ *
+ * Note: the contents of the backing storage outside of the subset rect are undefined.
+ */
+class SkSpecialImage : public SkRefCnt {
+public:
+ typedef void* ReleaseContext;
+ typedef void(*RasterReleaseProc)(void* pixels, ReleaseContext);
+
+ const SkSurfaceProps& props() const { return fProps; }
+
+ int width() const { return fSubset.width(); }
+ int height() const { return fSubset.height(); }
+ SkISize dimensions() const { return { this->width(), this->height() }; }
+ const SkIRect& subset() const { return fSubset; }
+
+ uint32_t uniqueID() const { return fUniqueID; }
+
+ virtual size_t getSize() const = 0;
+
+ const SkColorInfo& colorInfo() const { return fColorInfo; }
+ SkAlphaType alphaType() const { return fColorInfo.alphaType(); }
+ SkColorType colorType() const { return fColorInfo.colorType(); }
+ SkColorSpace* getColorSpace() const { return fColorInfo.colorSpace(); }
+
+ /**
+ * Draw this SpecialImage into the canvas, automatically taking into account the image's subset
+ */
+ void draw(SkCanvas* canvas,
+ SkScalar x, SkScalar y,
+ const SkSamplingOptions& sampling,
+ const SkPaint* paint) const {
+ return this->onDraw(canvas, x, y, sampling, paint);
+ }
+ void draw(SkCanvas* canvas, SkScalar x, SkScalar y) const {
+ this->draw(canvas, x, y, SkSamplingOptions(), nullptr);
+ }
+
+ static sk_sp<SkSpecialImage> MakeFromImage(GrRecordingContext*,
+ const SkIRect& subset,
+ sk_sp<SkImage>,
+ const SkSurfaceProps&);
+ static sk_sp<SkSpecialImage> MakeFromRaster(const SkIRect& subset,
+ const SkBitmap&,
+ const SkSurfaceProps&);
+ static sk_sp<SkSpecialImage> CopyFromRaster(const SkIRect& subset,
+ const SkBitmap&,
+ const SkSurfaceProps&);
+#if defined(SK_GANESH)
+ static sk_sp<SkSpecialImage> MakeDeferredFromGpu(GrRecordingContext*,
+ const SkIRect& subset,
+ uint32_t uniqueID,
+ GrSurfaceProxyView,
+ const GrColorInfo&,
+ const SkSurfaceProps&);
+#endif
+
+#if defined(SK_GRAPHITE)
+ static sk_sp<SkSpecialImage> MakeGraphite(skgpu::graphite::Recorder*,
+ const SkIRect& subset,
+ uint32_t uniqueID,
+ skgpu::graphite::TextureProxyView,
+ const SkColorInfo&,
+ const SkSurfaceProps&);
+#endif
+
+ /**
+ * Create a new special surface with a backend that is compatible with this special image.
+ */
+ sk_sp<SkSpecialSurface> makeSurface(SkColorType,
+ const SkColorSpace*,
+ const SkISize& size,
+ SkAlphaType,
+ const SkSurfaceProps&) const;
+
+ /**
+ * Create a new surface with a backend that is compatible with this special image.
+ * TODO: switch this to makeSurface once we resolved the naming issue
+ * TODO (michaelludwig) - This is only used by SkTileImageFilter, which appears should be
+ * updated to work correctly with subsets and then makeTightSurface() can go away entirely.
+ */
+ sk_sp<SkSurface> makeTightSurface(SkColorType,
+ const SkColorSpace*,
+ const SkISize& size,
+ SkAlphaType = kPremul_SkAlphaType) const;
+
+ /**
+ * Extract a subset of this special image and return it as a special image.
+ * It may or may not point to the same backing memory. The input 'subset' is relative to the
+ * special image's content rect.
+ */
+ sk_sp<SkSpecialImage> makeSubset(const SkIRect& subset) const {
+ SkIRect absolute = subset.makeOffset(this->subset().topLeft());
+ return this->onMakeSubset(absolute);
+ }
+
+ /**
+ * Create an SkImage from the contents of this special image optionally extracting a subset.
+ * It may or may not point to the same backing memory.
+ * Note: when no 'subset' parameter is specified the the entire SkSpecialImage will be
+ * returned - including whatever extra padding may have resulted from a loose fit!
+ * When the 'subset' parameter is specified the returned image will be tight even if that
+ * entails a copy! The 'subset' is relative to this special image's content rect.
+ */
+ // TODO: The only version that uses the subset is the tile image filter, and that doesn't need
+ // to if it can be rewritten to use asShader() and SkTileModes. Similarly, the only use case of
+ // asImage() w/o a subset is SkImage::makeFiltered() and that could/should return an SkShader so
+ // that users don't need to worry about correctly applying the subset, etc.
+ sk_sp<SkImage> asImage(const SkIRect* subset = nullptr) const;
+
+ /**
+ * Create an SkShader that samples the contents of this special image, applying tile mode for
+ * any sample that falls outside its internal subset.
+ */
+ sk_sp<SkShader> asShader(SkTileMode, const SkSamplingOptions&, const SkMatrix& lm) const;
+ sk_sp<SkShader> asShader(const SkSamplingOptions& sampling) const;
+ sk_sp<SkShader> asShader(const SkSamplingOptions& sampling, const SkMatrix& lm) const;
+
+ /**
+ * If the SpecialImage is backed by a gpu texture, return true.
+ */
+ bool isTextureBacked() const { return SkToBool(this->onGetContext()); }
+
+ /**
+ * Return the GrRecordingContext if the SkSpecialImage is GrTexture-backed
+ */
+ GrRecordingContext* getContext() const { return this->onGetContext(); }
+
+#if defined(SK_GANESH)
+ /**
+ * Regardless of how the underlying backing data is stored, returns the contents as a
+ * GrSurfaceProxyView. The returned view's proxy represents the entire backing image, so texture
+ * coordinates must be mapped from the content rect (e.g. relative to 'subset()') to the proxy's
+ * space (offset by subset().topLeft()).
+ */
+ GrSurfaceProxyView view(GrRecordingContext* context) const { return this->onView(context); }
+#endif
+
+#if defined(SK_GRAPHITE)
+ bool isGraphiteBacked() const;
+
+ skgpu::graphite::TextureProxyView textureProxyView() const;
+#endif
+
+ /**
+ * Regardless of the underlying backing store, return the contents as an SkBitmap.
+ * The returned bitmap represents the subset accessed by this image, thus (0,0) refers to the
+ * top-left corner of 'subset'.
+ */
+ bool getROPixels(SkBitmap* bm) const {
+ return this->onGetROPixels(bm);
+ }
+
+protected:
+ SkSpecialImage(const SkIRect& subset,
+ uint32_t uniqueID,
+ const SkColorInfo&,
+ const SkSurfaceProps&);
+
+ virtual void onDraw(SkCanvas*,
+ SkScalar x, SkScalar y,
+ const SkSamplingOptions&,
+ const SkPaint*) const = 0;
+
+ virtual bool onGetROPixels(SkBitmap*) const = 0;
+
+ virtual GrRecordingContext* onGetContext() const { return nullptr; }
+
+#if defined(SK_GANESH)
+ virtual GrSurfaceProxyView onView(GrRecordingContext*) const = 0;
+#endif
+
+#if defined(SK_GRAPHITE)
+ virtual skgpu::graphite::TextureProxyView onTextureProxyView() const;
+#endif
+
+ // This subset is relative to the backing store's coordinate frame, it has already been mapped
+ // from the content rect by the non-virtual makeSubset().
+ virtual sk_sp<SkSpecialImage> onMakeSubset(const SkIRect& subset) const = 0;
+
+ virtual sk_sp<SkSpecialSurface> onMakeSurface(SkColorType colorType,
+ const SkColorSpace* colorSpace,
+ const SkISize& size,
+ SkAlphaType at,
+ const SkSurfaceProps&) const = 0;
+
+ // This subset (when not null) is relative to the backing store's coordinate frame, it has
+ // already been mapped from the content rect by the non-virtual asImage().
+ virtual sk_sp<SkImage> onAsImage(const SkIRect* subset) const = 0;
+
+ virtual sk_sp<SkShader> onAsShader(SkTileMode,
+ const SkSamplingOptions&,
+ const SkMatrix&) const = 0;
+
+ virtual sk_sp<SkSurface> onMakeTightSurface(SkColorType colorType,
+ const SkColorSpace* colorSpace,
+ const SkISize& size,
+ SkAlphaType at) const = 0;
+
+#ifdef SK_DEBUG
+ static bool RectFits(const SkIRect& rect, int width, int height);
+#endif
+
+private:
+ const SkIRect fSubset;
+ const uint32_t fUniqueID;
+ const SkColorInfo fColorInfo;
+ const SkSurfaceProps fProps;
+};
+
+#endif // SkSpecialImage_DEFINED
diff --git a/gfx/skia/skia/src/core/SkSpecialSurface.cpp b/gfx/skia/skia/src/core/SkSpecialSurface.cpp
new file mode 100644
index 0000000000..b90935b5aa
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpecialSurface.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file
+ */
+
+#include "src/core/SkSpecialSurface.h"
+
+#include <memory>
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkMallocPixelRef.h"
+#include "src/core/SkBitmapDevice.h"
+#include "src/core/SkCanvasPriv.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSurfacePriv.h"
+
+SkSpecialSurface::SkSpecialSurface(sk_sp<SkBaseDevice> device, const SkIRect& subset)
+ : fSubset(subset) {
+ SkASSERT(fSubset.width() > 0);
+ SkASSERT(fSubset.height() > 0);
+
+ fCanvas = std::make_unique<SkCanvas>(std::move(device));
+ fCanvas->clipRect(SkRect::Make(subset));
+#ifdef SK_IS_BOT
+ fCanvas->clear(SK_ColorRED); // catch any imageFilter sloppiness
+#endif
+}
+
+sk_sp<SkSpecialImage> SkSpecialSurface::makeImageSnapshot() {
+ fCanvas->restoreToCount(0);
+
+ // Because of the above 'restoreToCount(0)' we know we're getting the base device here.
+ SkBaseDevice* baseDevice = SkCanvasPriv::TopDevice(fCanvas.get());
+ if (!baseDevice) {
+ return nullptr;
+ }
+
+ sk_sp<SkSpecialImage> image = baseDevice->snapSpecial(this->subset());
+
+ fCanvas.reset();
+ return image;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+sk_sp<SkSpecialSurface> SkSpecialSurface::MakeRaster(const SkImageInfo& info,
+ const SkSurfaceProps& props) {
+ if (!SkSurfaceValidateRasterInfo(info)) {
+ return nullptr;
+ }
+
+ sk_sp<SkPixelRef> pr = SkMallocPixelRef::MakeAllocate(info, 0);
+ if (!pr) {
+ return nullptr;
+ }
+
+ SkBitmap bitmap;
+ bitmap.setInfo(info, info.minRowBytes());
+ bitmap.setPixelRef(std::move(pr), 0, 0);
+
+ sk_sp<SkBaseDevice> device(new SkBitmapDevice(bitmap,
+ { props.flags(), kUnknown_SkPixelGeometry }));
+ if (!device) {
+ return nullptr;
+ }
+
+ const SkIRect subset = SkIRect::MakeSize(info.dimensions());
+
+ return sk_make_sp<SkSpecialSurface>(std::move(device), subset);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+#if defined(SK_GANESH)
+#include "include/gpu/GrRecordingContext.h"
+#include "src/gpu/SkBackingFit.h"
+#include "src/gpu/ganesh/GrColorInfo.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+
+sk_sp<SkSpecialSurface> SkSpecialSurface::MakeRenderTarget(GrRecordingContext* rContext,
+ const SkImageInfo& ii,
+ const SkSurfaceProps& props,
+ GrSurfaceOrigin surfaceOrigin) {
+ if (!rContext) {
+ return nullptr;
+ }
+
+ auto device = rContext->priv().createDevice(skgpu::Budgeted::kYes,
+ ii,
+ SkBackingFit::kApprox,
+ 1,
+ GrMipmapped::kNo,
+ GrProtected::kNo,
+ surfaceOrigin,
+ {props.flags(), kUnknown_SkPixelGeometry},
+ skgpu::ganesh::Device::InitContents::kUninit);
+ if (!device) {
+ return nullptr;
+ }
+
+ const SkIRect subset = SkIRect::MakeSize(ii.dimensions());
+
+ return sk_make_sp<SkSpecialSurface>(std::move(device), subset);
+}
+
+#endif // defined(SK_GANESH)
+
+///////////////////////////////////////////////////////////////////////////////
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/Device.h"
+
+sk_sp<SkSpecialSurface> SkSpecialSurface::MakeGraphite(skgpu::graphite::Recorder* recorder,
+ const SkImageInfo& ii,
+ const SkSurfaceProps& props) {
+ using namespace skgpu::graphite;
+
+ if (!recorder) {
+ return nullptr;
+ }
+
+ sk_sp<Device> device = Device::Make(recorder,
+ ii,
+ skgpu::Budgeted::kYes,
+ skgpu::Mipmapped::kNo,
+ {props.flags(), kUnknown_SkPixelGeometry},
+ /* addInitialClear= */ false);
+ if (!device) {
+ return nullptr;
+ }
+
+ const SkIRect subset = SkIRect::MakeSize(ii.dimensions());
+
+ return sk_make_sp<SkSpecialSurface>(std::move(device), subset);
+}
+
+#endif // SK_GRAPHITE
diff --git a/gfx/skia/skia/src/core/SkSpecialSurface.h b/gfx/skia/skia/src/core/SkSpecialSurface.h
new file mode 100644
index 0000000000..4c33008940
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpecialSurface.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file
+ */
+
+#ifndef SkSpecialSurface_DEFINED
+#define SkSpecialSurface_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSurfaceProps.h"
+
+#if defined(SK_GANESH)
+#include "include/private/gpu/ganesh/GrTypesPriv.h"
+#endif
+
+#if defined(SK_GRAPHITE)
+namespace skgpu::graphite {
+ class Recorder;
+}
+#endif
+
+class GrBackendFormat;
+class GrRecordingContext;
+class SkBaseDevice;
+class SkBitmap;
+class SkCanvas;
+class SkSpecialImage;
+
+/**
+ * SkSpecialSurface is a restricted form of SkSurface solely for internal use. It differs
+ * from SkSurface in that:
+ * - it can be backed by GrTextures larger than [ fWidth, fHeight ]
+ * - it can't be used for tiling
+ * - it becomes inactive once a snapshot of it is taken (i.e., no copy-on-write)
+ * - it has no generation ID
+ */
+class SkSpecialSurface : public SkRefCnt {
+public:
+ SkSpecialSurface(sk_sp<SkBaseDevice>, const SkIRect& subset);
+
+#ifdef SK_DEBUG
+ SkSurfaceProps props() const { return fCanvas->getBaseProps(); }
+#endif
+
+ const SkIRect& subset() const { return fSubset; }
+ int width() const { return fSubset.width(); }
+ int height() const { return fSubset.height(); }
+
+ /**
+ * Return a canvas that will draw into this special surface. This will always
+ * return the same canvas for a given special surface, and is managed/owned by the
+ * special surface.
+ *
+ * The canvas will be invalid after 'makeImageSnapshot' is called.
+ */
+ SkCanvas* getCanvas() { return fCanvas.get(); }
+
+ /**
+ * Returns an image of the current state of the surface pixels up to this
+ * point. The canvas returned by 'getCanvas' becomes invalidated by this
+ * call and no more drawing to this surface is allowed.
+ */
+ sk_sp<SkSpecialImage> makeImageSnapshot();
+
+#if defined(SK_GANESH)
+ /**
+ * Allocate a new GPU-backed SkSpecialSurface. If the requested surface cannot
+ * be created, nullptr will be returned.
+ */
+ static sk_sp<SkSpecialSurface> MakeRenderTarget(GrRecordingContext*,
+ const SkImageInfo&,
+ const SkSurfaceProps&,
+ GrSurfaceOrigin);
+#endif
+
+#if defined(SK_GRAPHITE)
+ static sk_sp<SkSpecialSurface> MakeGraphite(skgpu::graphite::Recorder*,
+ const SkImageInfo&,
+ const SkSurfaceProps&);
+#endif
+
+ /**
+ * Return a new CPU-backed surface, with the memory for the pixels automatically
+ * allocated.
+ *
+ * If the requested surface cannot be created, or the request is not a
+ * supported configuration, nullptr will be returned.
+ */
+ static sk_sp<SkSpecialSurface> MakeRaster(const SkImageInfo&,
+ const SkSurfaceProps&);
+
+private:
+ std::unique_ptr<SkCanvas> fCanvas;
+ const SkIRect fSubset;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSpinlock.cpp b/gfx/skia/skia/src/core/SkSpinlock.cpp
new file mode 100644
index 0000000000..ece314ad2e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpinlock.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkSpinlock.h"
+#include "include/private/base/SkThreadAnnotations.h"
+
+#if 0
+ #include "include/private/base/SkMutex.h"
+ #include <execinfo.h>
+ #include <stdio.h>
+
+ static void debug_trace() {
+ void* stack[64];
+ int len = backtrace(stack, std::size(stack));
+
+ // As you might imagine, we can't use an SkSpinlock here...
+ static SkMutex lock;
+ {
+ SkAutoMutexExclusive locked(lock);
+ fprintf(stderr, "\n");
+ backtrace_symbols_fd(stack, len, 2/*stderr*/);
+ fprintf(stderr, "\n");
+ }
+ }
+#else
+ static void debug_trace() {}
+#endif
+
+// Renamed from "pause" to avoid conflict with function defined in unistd.h
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #include <emmintrin.h>
+ static void do_pause() { _mm_pause(); }
+#else
+ static void do_pause() { /*spin*/ }
+#endif
+
+void SkSpinlock::contendedAcquire() {
+ debug_trace();
+
+ // To act as a mutex, we need an acquire barrier when we acquire the lock.
+ SK_POTENTIALLY_BLOCKING_REGION_BEGIN;
+ while (fLocked.exchange(true, std::memory_order_acquire)) {
+ do_pause();
+ }
+ SK_POTENTIALLY_BLOCKING_REGION_END;
+}
diff --git a/gfx/skia/skia/src/core/SkSpriteBlitter.h b/gfx/skia/skia/src/core/SkSpriteBlitter.h
new file mode 100644
index 0000000000..dfb1194488
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpriteBlitter.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSpriteBlitter_DEFINED
+#define SkSpriteBlitter_DEFINED
+
+#include "include/core/SkPixmap.h"
+#include "include/core/SkShader.h"
+#include "src/core/SkBlitter.h"
+
+class SkPaint;
+
+// SkSpriteBlitter specializes SkBlitter in a way to move large rectangles of pixels around.
+// Because of this use, the main primitive shifts from blitH style things to the more efficient
+// blitRect.
+class SkSpriteBlitter : public SkBlitter {
+public:
+ SkSpriteBlitter(const SkPixmap& source);
+
+ virtual bool setup(const SkPixmap& dst, int left, int top, const SkPaint&);
+
+ // blitH, blitAntiH, blitV and blitMask should not be called on an SkSpriteBlitter.
+ void blitH(int x, int y, int width) override;
+ void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override;
+ void blitV(int x, int y, int height, SkAlpha alpha) override;
+ void blitMask(const SkMask&, const SkIRect& clip) override;
+
+ // A SkSpriteBlitter must implement blitRect.
+ void blitRect(int x, int y, int width, int height) override = 0;
+
+ static SkSpriteBlitter* ChooseL32(const SkPixmap& source, const SkPaint&, SkArenaAlloc*);
+
+protected:
+ SkPixmap fDst;
+ const SkPixmap fSource;
+ int fLeft, fTop;
+ const SkPaint* fPaint;
+
+private:
+ using INHERITED = SkBlitter;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSpriteBlitter_ARGB32.cpp b/gfx/skia/skia/src/core/SkSpriteBlitter_ARGB32.cpp
new file mode 100644
index 0000000000..1314fafdf0
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSpriteBlitter_ARGB32.cpp
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkPaint.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkBlitRow.h"
+#include "src/core/SkSpriteBlitter.h"
+#include "src/core/SkXfermodePriv.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+class Sprite_D32_S32 : public SkSpriteBlitter {
+public:
+ Sprite_D32_S32(const SkPixmap& src, U8CPU alpha) : INHERITED(src) {
+ SkASSERT(src.colorType() == kN32_SkColorType);
+
+ unsigned flags32 = 0;
+ if (255 != alpha) {
+ flags32 |= SkBlitRow::kGlobalAlpha_Flag32;
+ }
+ if (!src.isOpaque()) {
+ flags32 |= SkBlitRow::kSrcPixelAlpha_Flag32;
+ }
+
+ fProc32 = SkBlitRow::Factory32(flags32);
+ fAlpha = alpha;
+ }
+
+ void blitRect(int x, int y, int width, int height) override {
+ SkASSERT(width > 0 && height > 0);
+ uint32_t* SK_RESTRICT dst = fDst.writable_addr32(x, y);
+ const uint32_t* SK_RESTRICT src = fSource.addr32(x - fLeft, y - fTop);
+ size_t dstRB = fDst.rowBytes();
+ size_t srcRB = fSource.rowBytes();
+ SkBlitRow::Proc32 proc = fProc32;
+ U8CPU alpha = fAlpha;
+
+ do {
+ proc(dst, src, width, alpha);
+ dst = (uint32_t* SK_RESTRICT)((char*)dst + dstRB);
+ src = (const uint32_t* SK_RESTRICT)((const char*)src + srcRB);
+ } while (--height != 0);
+ }
+
+private:
+ SkBlitRow::Proc32 fProc32;
+ U8CPU fAlpha;
+
+ using INHERITED = SkSpriteBlitter;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class Sprite_D32_S32A_Xfer: public SkSpriteBlitter {
+public:
+ Sprite_D32_S32A_Xfer(const SkPixmap& source, const SkPaint& paint) : SkSpriteBlitter(source) {
+ fXfermode = SkXfermode::Peek(paint.getBlendMode_or(SkBlendMode::kSrcOver));
+ SkASSERT(fXfermode);
+ }
+
+ void blitRect(int x, int y, int width, int height) override {
+ SkASSERT(width > 0 && height > 0);
+ uint32_t* SK_RESTRICT dst = fDst.writable_addr32(x, y);
+ const uint32_t* SK_RESTRICT src = fSource.addr32(x - fLeft, y - fTop);
+ size_t dstRB = fDst.rowBytes();
+ size_t srcRB = fSource.rowBytes();
+ SkXfermode* xfermode = fXfermode;
+
+ do {
+ xfermode->xfer32(dst, src, width, nullptr);
+
+ dst = (uint32_t* SK_RESTRICT)((char*)dst + dstRB);
+ src = (const uint32_t* SK_RESTRICT)((const char*)src + srcRB);
+ } while (--height != 0);
+ }
+
+protected:
+ SkXfermode* fXfermode;
+
+private:
+ using INHERITED = SkSpriteBlitter;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkSpriteBlitter* SkSpriteBlitter::ChooseL32(const SkPixmap& source, const SkPaint& paint,
+ SkArenaAlloc* allocator) {
+ SkASSERT(allocator != nullptr);
+
+ if (paint.getColorFilter() != nullptr) {
+ return nullptr;
+ }
+ if (paint.getMaskFilter() != nullptr) {
+ return nullptr;
+ }
+ if (!paint.asBlendMode()) {
+ return nullptr;
+ }
+
+ U8CPU alpha = paint.getAlpha();
+
+ if (source.colorType() == kN32_SkColorType) {
+ if (paint.isSrcOver()) {
+ // this can handle alpha, but not xfermode
+ return allocator->make<Sprite_D32_S32>(source, alpha);
+ }
+ if (255 == alpha) {
+ // this can handle an xfermode, but not alpha
+ return allocator->make<Sprite_D32_S32A_Xfer>(source, paint);
+ }
+ }
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkStream.cpp b/gfx/skia/skia/src/core/SkStream.cpp
new file mode 100644
index 0000000000..e43cb7716c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStream.cpp
@@ -0,0 +1,986 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStream.h"
+
+#include "include/core/SkData.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkAlign.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkTFitsIn.h"
+#include "include/private/base/SkTPin.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkSafeMath.h"
+#include "src/core/SkOSFile.h"
+#include "src/core/SkStreamPriv.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <cstring>
+#include <limits>
+#include <new>
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkStream::readS8(int8_t* i) {
+ return this->read(i, sizeof(*i)) == sizeof(*i);
+}
+
+bool SkStream::readS16(int16_t* i) {
+ return this->read(i, sizeof(*i)) == sizeof(*i);
+}
+
+bool SkStream::readS32(int32_t* i) {
+ return this->read(i, sizeof(*i)) == sizeof(*i);
+}
+
+bool SkStream::readScalar(SkScalar* i) {
+ return this->read(i, sizeof(*i)) == sizeof(*i);
+}
+
+#define SK_MAX_BYTE_FOR_U8 0xFD
+#define SK_BYTE_SENTINEL_FOR_U16 0xFE
+#define SK_BYTE_SENTINEL_FOR_U32 0xFF
+
+bool SkStream::readPackedUInt(size_t* i) {
+ uint8_t byte;
+ if (!this->read(&byte, 1)) {
+ return false;
+ }
+ if (SK_BYTE_SENTINEL_FOR_U16 == byte) {
+ uint16_t i16;
+ if (!this->readU16(&i16)) { return false; }
+ *i = i16;
+ } else if (SK_BYTE_SENTINEL_FOR_U32 == byte) {
+ uint32_t i32;
+ if (!this->readU32(&i32)) { return false; }
+ *i = i32;
+ } else {
+ *i = byte;
+ }
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+
+SkWStream::~SkWStream()
+{
+}
+
+void SkWStream::flush()
+{
+}
+
+bool SkWStream::writeDecAsText(int32_t dec)
+{
+ char buffer[kSkStrAppendS32_MaxSize];
+ char* stop = SkStrAppendS32(buffer, dec);
+ return this->write(buffer, stop - buffer);
+}
+
+bool SkWStream::writeBigDecAsText(int64_t dec, int minDigits)
+{
+ char buffer[kSkStrAppendU64_MaxSize];
+ char* stop = SkStrAppendU64(buffer, dec, minDigits);
+ return this->write(buffer, stop - buffer);
+}
+
+bool SkWStream::writeHexAsText(uint32_t hex, int digits)
+{
+ SkString tmp;
+ tmp.appendHex(hex, digits);
+ return this->write(tmp.c_str(), tmp.size());
+}
+
+bool SkWStream::writeScalarAsText(SkScalar value)
+{
+ char buffer[kSkStrAppendScalar_MaxSize];
+ char* stop = SkStrAppendScalar(buffer, value);
+ return this->write(buffer, stop - buffer);
+}
+
+bool SkWStream::writeScalar(SkScalar value) {
+ return this->write(&value, sizeof(value));
+}
+
+int SkWStream::SizeOfPackedUInt(size_t value) {
+ if (value <= SK_MAX_BYTE_FOR_U8) {
+ return 1;
+ } else if (value <= 0xFFFF) {
+ return 3;
+ }
+ return 5;
+}
+
+bool SkWStream::writePackedUInt(size_t value) {
+ uint8_t data[5];
+ size_t len = 1;
+ if (value <= SK_MAX_BYTE_FOR_U8) {
+ data[0] = value;
+ len = 1;
+ } else if (value <= 0xFFFF) {
+ uint16_t value16 = value;
+ data[0] = SK_BYTE_SENTINEL_FOR_U16;
+ memcpy(&data[1], &value16, 2);
+ len = 3;
+ } else {
+ uint32_t value32 = SkToU32(value);
+ data[0] = SK_BYTE_SENTINEL_FOR_U32;
+ memcpy(&data[1], &value32, 4);
+ len = 5;
+ }
+ return this->write(data, len);
+}
+
+bool SkWStream::writeStream(SkStream* stream, size_t length) {
+ char scratch[1024];
+ const size_t MAX = sizeof(scratch);
+
+ while (length != 0) {
+ size_t n = length;
+ if (n > MAX) {
+ n = MAX;
+ }
+ stream->read(scratch, n);
+ if (!this->write(scratch, n)) {
+ return false;
+ }
+ length -= n;
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkFILEStream::SkFILEStream(std::shared_ptr<FILE> file, size_t end, size_t start, size_t current)
+ : fFILE(std::move(file))
+ , fEnd(end)
+ , fStart(std::min(start, fEnd))
+ , fCurrent(SkTPin(current, fStart, fEnd))
+{
+ SkASSERT(fStart == start);
+ SkASSERT(fCurrent == current);
+}
+
+SkFILEStream::SkFILEStream(std::shared_ptr<FILE> file, size_t end, size_t start)
+ : SkFILEStream(std::move(file), end, start, start)
+{ }
+
+SkFILEStream::SkFILEStream(FILE* file, size_t size, size_t start)
+ : SkFILEStream(std::shared_ptr<FILE>(file, sk_fclose), SkSafeMath::Add(start, size), start)
+{ }
+
+SkFILEStream::SkFILEStream(FILE* file, size_t size)
+ : SkFILEStream(file, size, file ? sk_ftell(file) : 0)
+{ }
+
+SkFILEStream::SkFILEStream(FILE* file)
+ : SkFILEStream(std::shared_ptr<FILE>(file, sk_fclose),
+ file ? sk_fgetsize(file) : 0,
+ file ? sk_ftell(file) : 0)
+{ }
+
+SkFILEStream::SkFILEStream(const char path[])
+ : SkFILEStream(path ? sk_fopen(path, kRead_SkFILE_Flag) : nullptr)
+{ }
+
+SkFILEStream::~SkFILEStream() {
+ this->close();
+}
+
+void SkFILEStream::close() {
+ fFILE.reset();
+ fEnd = 0;
+ fStart = 0;
+ fCurrent = 0;
+}
+
+size_t SkFILEStream::read(void* buffer, size_t size) {
+ if (size > fEnd - fCurrent) {
+ size = fEnd - fCurrent;
+ }
+ size_t bytesRead = size;
+ if (buffer) {
+ bytesRead = sk_qread(fFILE.get(), buffer, size, fCurrent);
+ }
+ if (bytesRead == SIZE_MAX) {
+ return 0;
+ }
+ fCurrent += bytesRead;
+ return bytesRead;
+}
+
+bool SkFILEStream::isAtEnd() const {
+ if (fCurrent == fEnd) {
+ return true;
+ }
+ return fCurrent >= sk_fgetsize(fFILE.get());
+}
+
+bool SkFILEStream::rewind() {
+ fCurrent = fStart;
+ return true;
+}
+
+SkStreamAsset* SkFILEStream::onDuplicate() const {
+ return new SkFILEStream(fFILE, fEnd, fStart, fStart);
+}
+
+size_t SkFILEStream::getPosition() const {
+ SkASSERT(fCurrent >= fStart);
+ return fCurrent - fStart;
+}
+
+bool SkFILEStream::seek(size_t position) {
+ fCurrent = std::min(SkSafeMath::Add(position, fStart), fEnd);
+ return true;
+}
+
+bool SkFILEStream::move(long offset) {
+ if (offset < 0) {
+ if (offset == std::numeric_limits<long>::min() ||
+ !SkTFitsIn<size_t>(-offset) ||
+ (size_t) (-offset) >= this->getPosition())
+ {
+ fCurrent = fStart;
+ } else {
+ fCurrent += offset;
+ }
+ } else if (!SkTFitsIn<size_t>(offset)) {
+ fCurrent = fEnd;
+ } else {
+ fCurrent = std::min(SkSafeMath::Add(fCurrent, (size_t) offset), fEnd);
+ }
+
+ SkASSERT(fCurrent >= fStart && fCurrent <= fEnd);
+ return true;
+}
+
+SkStreamAsset* SkFILEStream::onFork() const {
+ return new SkFILEStream(fFILE, fEnd, fStart, fCurrent);
+}
+
+size_t SkFILEStream::getLength() const {
+ return fEnd - fStart;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static sk_sp<SkData> newFromParams(const void* src, size_t size, bool copyData) {
+ if (copyData) {
+ return SkData::MakeWithCopy(src, size);
+ } else {
+ return SkData::MakeWithoutCopy(src, size);
+ }
+}
+
+SkMemoryStream::SkMemoryStream() {
+ fData = SkData::MakeEmpty();
+ fOffset = 0;
+}
+
+SkMemoryStream::SkMemoryStream(size_t size) {
+ fData = SkData::MakeUninitialized(size);
+ fOffset = 0;
+}
+
+SkMemoryStream::SkMemoryStream(const void* src, size_t size, bool copyData) {
+ fData = newFromParams(src, size, copyData);
+ fOffset = 0;
+}
+
+SkMemoryStream::SkMemoryStream(sk_sp<SkData> data) : fData(std::move(data)) {
+ if (nullptr == fData) {
+ fData = SkData::MakeEmpty();
+ }
+ fOffset = 0;
+}
+
+std::unique_ptr<SkMemoryStream> SkMemoryStream::MakeCopy(const void* data, size_t length) {
+ return std::make_unique<SkMemoryStream>(data, length, true);
+}
+
+std::unique_ptr<SkMemoryStream> SkMemoryStream::MakeDirect(const void* data, size_t length) {
+ return std::make_unique<SkMemoryStream>(data, length, false);
+}
+
+std::unique_ptr<SkMemoryStream> SkMemoryStream::Make(sk_sp<SkData> data) {
+ return std::make_unique<SkMemoryStream>(std::move(data));
+}
+
+void SkMemoryStream::setMemoryOwned(const void* src, size_t size) {
+ fData = SkData::MakeFromMalloc(src, size);
+ fOffset = 0;
+}
+
+void SkMemoryStream::setMemory(const void* src, size_t size, bool copyData) {
+ fData = newFromParams(src, size, copyData);
+ fOffset = 0;
+}
+
+void SkMemoryStream::setData(sk_sp<SkData> data) {
+ if (nullptr == data) {
+ fData = SkData::MakeEmpty();
+ } else {
+ fData = data;
+ }
+ fOffset = 0;
+}
+
+void SkMemoryStream::skipToAlign4() {
+ // cast to remove unary-minus warning
+ fOffset += -(int)fOffset & 0x03;
+}
+
+size_t SkMemoryStream::read(void* buffer, size_t size) {
+ size_t dataSize = fData->size();
+
+ if (size > dataSize - fOffset) {
+ size = dataSize - fOffset;
+ }
+ if (buffer) {
+ memcpy(buffer, fData->bytes() + fOffset, size);
+ }
+ fOffset += size;
+ return size;
+}
+
+size_t SkMemoryStream::peek(void* buffer, size_t size) const {
+ SkASSERT(buffer != nullptr);
+
+ const size_t currentOffset = fOffset;
+ SkMemoryStream* nonConstThis = const_cast<SkMemoryStream*>(this);
+ const size_t bytesRead = nonConstThis->read(buffer, size);
+ nonConstThis->fOffset = currentOffset;
+ return bytesRead;
+}
+
+bool SkMemoryStream::isAtEnd() const {
+ return fOffset == fData->size();
+}
+
+bool SkMemoryStream::rewind() {
+ fOffset = 0;
+ return true;
+}
+
+SkMemoryStream* SkMemoryStream::onDuplicate() const {
+ return new SkMemoryStream(fData);
+}
+
+size_t SkMemoryStream::getPosition() const {
+ return fOffset;
+}
+
+bool SkMemoryStream::seek(size_t position) {
+ fOffset = position > fData->size()
+ ? fData->size()
+ : position;
+ return true;
+}
+
+bool SkMemoryStream::move(long offset) {
+ return this->seek(fOffset + offset);
+}
+
+SkMemoryStream* SkMemoryStream::onFork() const {
+ std::unique_ptr<SkMemoryStream> that(this->duplicate());
+ that->seek(fOffset);
+ return that.release();
+}
+
+size_t SkMemoryStream::getLength() const {
+ return fData->size();
+}
+
+const void* SkMemoryStream::getMemoryBase() {
+ return fData->data();
+}
+
+const void* SkMemoryStream::getAtPos() {
+ return fData->bytes() + fOffset;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkFILEWStream::SkFILEWStream(const char path[])
+{
+ fFILE = sk_fopen(path, kWrite_SkFILE_Flag);
+}
+
+SkFILEWStream::~SkFILEWStream()
+{
+ if (fFILE) {
+ sk_fclose(fFILE);
+ }
+}
+
+size_t SkFILEWStream::bytesWritten() const {
+ return sk_ftell(fFILE);
+}
+
+bool SkFILEWStream::write(const void* buffer, size_t size)
+{
+ if (fFILE == nullptr) {
+ return false;
+ }
+
+ if (sk_fwrite(buffer, size, fFILE) != size)
+ {
+ SkDEBUGCODE(SkDebugf("SkFILEWStream failed writing %zu bytes\n", size);)
+ sk_fclose(fFILE);
+ fFILE = nullptr;
+ return false;
+ }
+ return true;
+}
+
+void SkFILEWStream::flush()
+{
+ if (fFILE) {
+ sk_fflush(fFILE);
+ }
+}
+
+void SkFILEWStream::fsync()
+{
+ flush();
+ if (fFILE) {
+ sk_fsync(fFILE);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////
+
+static inline void sk_memcpy_4bytes(void* dst, const void* src, size_t size) {
+ if (size == 4) {
+ memcpy(dst, src, 4);
+ } else {
+ memcpy(dst, src, size);
+ }
+}
+
+#define SkDynamicMemoryWStream_MinBlockSize 4096
+
+struct SkDynamicMemoryWStream::Block {
+ Block* fNext;
+ char* fCurr;
+ char* fStop;
+
+ const char* start() const { return (const char*)(this + 1); }
+ char* start() { return (char*)(this + 1); }
+ size_t avail() const { return fStop - fCurr; }
+ size_t written() const { return fCurr - this->start(); }
+
+ void init(size_t size) {
+ fNext = nullptr;
+ fCurr = this->start();
+ fStop = this->start() + size;
+ }
+
+ const void* append(const void* data, size_t size) {
+ SkASSERT((size_t)(fStop - fCurr) >= size);
+ sk_memcpy_4bytes(fCurr, data, size);
+ fCurr += size;
+ return (const void*)((const char*)data + size);
+ }
+};
+
+SkDynamicMemoryWStream::SkDynamicMemoryWStream(SkDynamicMemoryWStream&& other)
+ : fHead(other.fHead)
+ , fTail(other.fTail)
+ , fBytesWrittenBeforeTail(other.fBytesWrittenBeforeTail)
+{
+ other.fHead = nullptr;
+ other.fTail = nullptr;
+ other.fBytesWrittenBeforeTail = 0;
+}
+
+SkDynamicMemoryWStream& SkDynamicMemoryWStream::operator=(SkDynamicMemoryWStream&& other) {
+ if (this != &other) {
+ this->~SkDynamicMemoryWStream();
+ new (this) SkDynamicMemoryWStream(std::move(other));
+ }
+ return *this;
+}
+
+SkDynamicMemoryWStream::~SkDynamicMemoryWStream() {
+ this->reset();
+}
+
+void SkDynamicMemoryWStream::reset() {
+ Block* block = fHead;
+ while (block != nullptr) {
+ Block* next = block->fNext;
+ sk_free(block);
+ block = next;
+ }
+ fHead = fTail = nullptr;
+ fBytesWrittenBeforeTail = 0;
+}
+
+size_t SkDynamicMemoryWStream::bytesWritten() const {
+ this->validate();
+
+ if (fTail) {
+ return fBytesWrittenBeforeTail + fTail->written();
+ }
+ return 0;
+}
+
+bool SkDynamicMemoryWStream::write(const void* buffer, size_t count) {
+ if (count > 0) {
+ SkASSERT(buffer);
+ size_t size;
+
+ if (fTail) {
+ if (fTail->avail() > 0) {
+ size = std::min(fTail->avail(), count);
+ buffer = fTail->append(buffer, size);
+ SkASSERT(count >= size);
+ count -= size;
+ if (count == 0) {
+ return true;
+ }
+ }
+ // If we get here, we've just exhausted fTail, so update our tracker
+ fBytesWrittenBeforeTail += fTail->written();
+ }
+
+ size = std::max<size_t>(count, SkDynamicMemoryWStream_MinBlockSize - sizeof(Block));
+ size = SkAlign4(size); // ensure we're always a multiple of 4 (see padToAlign4())
+
+ Block* block = (Block*)sk_malloc_throw(sizeof(Block) + size);
+ block->init(size);
+ block->append(buffer, count);
+
+ if (fTail != nullptr) {
+ fTail->fNext = block;
+ } else {
+ fHead = fTail = block;
+ }
+ fTail = block;
+ this->validate();
+ }
+ return true;
+}
+
+bool SkDynamicMemoryWStream::writeToAndReset(SkDynamicMemoryWStream* dst) {
+ SkASSERT(dst);
+ SkASSERT(dst != this);
+ if (0 == this->bytesWritten()) {
+ return true;
+ }
+ if (0 == dst->bytesWritten()) {
+ *dst = std::move(*this);
+ return true;
+ }
+ dst->fTail->fNext = fHead;
+ dst->fBytesWrittenBeforeTail += fBytesWrittenBeforeTail + dst->fTail->written();
+ dst->fTail = fTail;
+ fHead = fTail = nullptr;
+ fBytesWrittenBeforeTail = 0;
+ return true;
+}
+
+void SkDynamicMemoryWStream::prependToAndReset(SkDynamicMemoryWStream* dst) {
+ SkASSERT(dst);
+ SkASSERT(dst != this);
+ if (0 == this->bytesWritten()) {
+ return;
+ }
+ if (0 == dst->bytesWritten()) {
+ *dst = std::move(*this);
+ return;
+ }
+ fTail->fNext = dst->fHead;
+ dst->fHead = fHead;
+ dst->fBytesWrittenBeforeTail += fBytesWrittenBeforeTail + fTail->written();
+ fHead = fTail = nullptr;
+ fBytesWrittenBeforeTail = 0;
+ return;
+}
+
+
+bool SkDynamicMemoryWStream::read(void* buffer, size_t offset, size_t count) {
+ if (offset + count > this->bytesWritten()) {
+ return false; // test does not partially modify
+ }
+ Block* block = fHead;
+ while (block != nullptr) {
+ size_t size = block->written();
+ if (offset < size) {
+ size_t part = offset + count > size ? size - offset : count;
+ memcpy(buffer, block->start() + offset, part);
+ if (count <= part) {
+ return true;
+ }
+ count -= part;
+ buffer = (void*) ((char* ) buffer + part);
+ }
+ offset = offset > size ? offset - size : 0;
+ block = block->fNext;
+ }
+ return false;
+}
+
+void SkDynamicMemoryWStream::copyTo(void* dst) const {
+ SkASSERT(dst);
+ Block* block = fHead;
+ while (block != nullptr) {
+ size_t size = block->written();
+ memcpy(dst, block->start(), size);
+ dst = (void*)((char*)dst + size);
+ block = block->fNext;
+ }
+}
+
+bool SkDynamicMemoryWStream::writeToStream(SkWStream* dst) const {
+ SkASSERT(dst);
+ for (Block* block = fHead; block != nullptr; block = block->fNext) {
+ if (!dst->write(block->start(), block->written())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void SkDynamicMemoryWStream::padToAlign4() {
+ // The contract is to write zeros until the entire stream has written a multiple of 4 bytes.
+ // Our Blocks are guaranteed always be (a) full (except the tail) and (b) a multiple of 4
+ // so it is sufficient to just examine the tail (if present).
+
+ if (fTail) {
+ // cast to remove unary-minus warning
+ int padBytes = -(int)fTail->written() & 0x03;
+ if (padBytes) {
+ int zero = 0;
+ fTail->append(&zero, padBytes);
+ }
+ }
+}
+
+
+void SkDynamicMemoryWStream::copyToAndReset(void* ptr) {
+ if (!ptr) {
+ this->reset();
+ return;
+ }
+ // By looping through the source and freeing as we copy, we
+ // can reduce real memory use with large streams.
+ char* dst = reinterpret_cast<char*>(ptr);
+ Block* block = fHead;
+ while (block != nullptr) {
+ size_t len = block->written();
+ memcpy(dst, block->start(), len);
+ dst += len;
+ Block* next = block->fNext;
+ sk_free(block);
+ block = next;
+ }
+ fHead = fTail = nullptr;
+ fBytesWrittenBeforeTail = 0;
+}
+
+bool SkDynamicMemoryWStream::writeToAndReset(SkWStream* dst) {
+ SkASSERT(dst);
+ // By looping through the source and freeing as we copy, we
+ // can reduce real memory use with large streams.
+ bool dstStreamGood = true;
+ for (Block* block = fHead; block != nullptr; ) {
+ if (dstStreamGood && !dst->write(block->start(), block->written())) {
+ dstStreamGood = false;
+ }
+ Block* next = block->fNext;
+ sk_free(block);
+ block = next;
+ }
+ fHead = fTail = nullptr;
+ fBytesWrittenBeforeTail = 0;
+ return dstStreamGood;
+}
+
+sk_sp<SkData> SkDynamicMemoryWStream::detachAsData() {
+ const size_t size = this->bytesWritten();
+ if (0 == size) {
+ return SkData::MakeEmpty();
+ }
+ sk_sp<SkData> data = SkData::MakeUninitialized(size);
+ this->copyToAndReset(data->writable_data());
+ return data;
+}
+
+#ifdef SK_DEBUG
+void SkDynamicMemoryWStream::validate() const {
+ if (!fHead) {
+ SkASSERT(!fTail);
+ SkASSERT(fBytesWrittenBeforeTail == 0);
+ return;
+ }
+ SkASSERT(fTail);
+
+ size_t bytes = 0;
+ const Block* block = fHead;
+ while (block) {
+ if (block->fNext) {
+ bytes += block->written();
+ }
+ block = block->fNext;
+ }
+ SkASSERT(bytes == fBytesWrittenBeforeTail);
+}
+#endif
+
+////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SkBlockMemoryRefCnt : public SkRefCnt {
+public:
+ explicit SkBlockMemoryRefCnt(SkDynamicMemoryWStream::Block* head) : fHead(head) { }
+
+ ~SkBlockMemoryRefCnt() override {
+ SkDynamicMemoryWStream::Block* block = fHead;
+ while (block != nullptr) {
+ SkDynamicMemoryWStream::Block* next = block->fNext;
+ sk_free(block);
+ block = next;
+ }
+ }
+
+ SkDynamicMemoryWStream::Block* const fHead;
+};
+
+class SkBlockMemoryStream : public SkStreamAsset {
+public:
+ SkBlockMemoryStream(sk_sp<SkBlockMemoryRefCnt> headRef, size_t size)
+ : fBlockMemory(std::move(headRef)), fCurrent(fBlockMemory->fHead)
+ , fSize(size) , fOffset(0), fCurrentOffset(0) { }
+
+ size_t read(void* buffer, size_t rawCount) override {
+ size_t count = rawCount;
+ if (fOffset + count > fSize) {
+ count = fSize - fOffset;
+ }
+ size_t bytesLeftToRead = count;
+ while (fCurrent != nullptr) {
+ size_t bytesLeftInCurrent = fCurrent->written() - fCurrentOffset;
+ size_t bytesFromCurrent = std::min(bytesLeftToRead, bytesLeftInCurrent);
+ if (buffer) {
+ memcpy(buffer, fCurrent->start() + fCurrentOffset, bytesFromCurrent);
+ buffer = SkTAddOffset<void>(buffer, bytesFromCurrent);
+ }
+ if (bytesLeftToRead <= bytesFromCurrent) {
+ fCurrentOffset += bytesFromCurrent;
+ fOffset += count;
+ return count;
+ }
+ bytesLeftToRead -= bytesFromCurrent;
+ fCurrent = fCurrent->fNext;
+ fCurrentOffset = 0;
+ }
+ SkASSERT(false);
+ return 0;
+ }
+
+ bool isAtEnd() const override {
+ return fOffset == fSize;
+ }
+
+ size_t peek(void* buff, size_t bytesToPeek) const override {
+ SkASSERT(buff != nullptr);
+
+ bytesToPeek = std::min(bytesToPeek, fSize - fOffset);
+
+ size_t bytesLeftToPeek = bytesToPeek;
+ char* buffer = static_cast<char*>(buff);
+ const SkDynamicMemoryWStream::Block* current = fCurrent;
+ size_t currentOffset = fCurrentOffset;
+ while (bytesLeftToPeek) {
+ SkASSERT(current);
+ size_t bytesFromCurrent = std::min(current->written() - currentOffset, bytesLeftToPeek);
+ memcpy(buffer, current->start() + currentOffset, bytesFromCurrent);
+ bytesLeftToPeek -= bytesFromCurrent;
+ buffer += bytesFromCurrent;
+ current = current->fNext;
+ currentOffset = 0;
+ }
+ return bytesToPeek;
+ }
+
+ bool rewind() override {
+ fCurrent = fBlockMemory->fHead;
+ fOffset = 0;
+ fCurrentOffset = 0;
+ return true;
+ }
+
+ SkBlockMemoryStream* onDuplicate() const override {
+ return new SkBlockMemoryStream(fBlockMemory, fSize);
+ }
+
+ size_t getPosition() const override {
+ return fOffset;
+ }
+
+ bool seek(size_t position) override {
+ // If possible, skip forward.
+ if (position >= fOffset) {
+ size_t skipAmount = position - fOffset;
+ return this->skip(skipAmount) == skipAmount;
+ }
+ // If possible, move backward within the current block.
+ size_t moveBackAmount = fOffset - position;
+ if (moveBackAmount <= fCurrentOffset) {
+ fCurrentOffset -= moveBackAmount;
+ fOffset -= moveBackAmount;
+ return true;
+ }
+ // Otherwise rewind and move forward.
+ return this->rewind() && this->skip(position) == position;
+ }
+
+ bool move(long offset) override {
+ return seek(fOffset + offset);
+ }
+
+ SkBlockMemoryStream* onFork() const override {
+ SkBlockMemoryStream* that = this->onDuplicate();
+ that->fCurrent = this->fCurrent;
+ that->fOffset = this->fOffset;
+ that->fCurrentOffset = this->fCurrentOffset;
+ return that;
+ }
+
+ size_t getLength() const override {
+ return fSize;
+ }
+
+ const void* getMemoryBase() override {
+ if (fBlockMemory->fHead && !fBlockMemory->fHead->fNext) {
+ return fBlockMemory->fHead->start();
+ }
+ return nullptr;
+ }
+
+private:
+ sk_sp<SkBlockMemoryRefCnt> const fBlockMemory;
+ SkDynamicMemoryWStream::Block const * fCurrent;
+ size_t const fSize;
+ size_t fOffset;
+ size_t fCurrentOffset;
+};
+
+std::unique_ptr<SkStreamAsset> SkDynamicMemoryWStream::detachAsStream() {
+ if (nullptr == fHead) {
+ // no need to reset.
+ return SkMemoryStream::Make(nullptr);
+ }
+ if (fHead == fTail) { // one block, may be worth shrinking.
+ ptrdiff_t used = fTail->fCurr - (char*)fTail;
+ fHead = fTail = (SkDynamicMemoryWStream::Block*)sk_realloc_throw(fTail, SkToSizeT(used));
+ fTail->fStop = fTail->fCurr = (char*)fTail + used; // Update pointers.
+ SkASSERT(nullptr == fTail->fNext);
+ SkASSERT(0 == fBytesWrittenBeforeTail);
+ }
+ std::unique_ptr<SkStreamAsset> stream
+ = std::make_unique<SkBlockMemoryStream>(sk_make_sp<SkBlockMemoryRefCnt>(fHead),
+ this->bytesWritten());
+ fHead = nullptr; // signal reset() to not free anything
+ this->reset();
+ return stream;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkDebugfStream::write(const void* buffer, size_t size) {
+ SkDebugf("%.*s", (int)size, (const char*)buffer);
+ fBytesWritten += size;
+ return true;
+}
+
+size_t SkDebugfStream::bytesWritten() const {
+ return fBytesWritten;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+static sk_sp<SkData> mmap_filename(const char path[]) {
+ FILE* file = sk_fopen(path, kRead_SkFILE_Flag);
+ if (nullptr == file) {
+ return nullptr;
+ }
+
+ auto data = SkData::MakeFromFILE(file);
+ sk_fclose(file);
+ return data;
+}
+
+std::unique_ptr<SkStreamAsset> SkStream::MakeFromFile(const char path[]) {
+ auto data(mmap_filename(path));
+ if (data) {
+ return std::make_unique<SkMemoryStream>(std::move(data));
+ }
+
+ // If we get here, then our attempt at using mmap failed, so try normal file access.
+ auto stream = std::make_unique<SkFILEStream>(path);
+ if (!stream->isValid()) {
+ return nullptr;
+ }
+ return std::move(stream);
+}
+
+// Declared in SkStreamPriv.h:
+sk_sp<SkData> SkCopyStreamToData(SkStream* stream) {
+ SkASSERT(stream != nullptr);
+
+ if (stream->hasLength()) {
+ return SkData::MakeFromStream(stream, stream->getLength());
+ }
+
+ SkDynamicMemoryWStream tempStream;
+ const size_t bufferSize = 4096;
+ char buffer[bufferSize];
+ do {
+ size_t bytesRead = stream->read(buffer, bufferSize);
+ tempStream.write(buffer, bytesRead);
+ } while (!stream->isAtEnd());
+ return tempStream.detachAsData();
+}
+
+bool SkStreamCopy(SkWStream* out, SkStream* input) {
+ const char* base = static_cast<const char*>(input->getMemoryBase());
+ if (base && input->hasPosition() && input->hasLength()) {
+ // Shortcut that avoids the while loop.
+ size_t position = input->getPosition();
+ size_t length = input->getLength();
+ SkASSERT(length >= position);
+ return out->write(&base[position], length - position);
+ }
+ char scratch[4096];
+ size_t count;
+ while (true) {
+ count = input->read(scratch, sizeof(scratch));
+ if (0 == count) {
+ return true;
+ }
+ if (!out->write(scratch, count)) {
+ return false;
+ }
+ }
+}
+
+bool StreamRemainingLengthIsBelow(SkStream* stream, size_t len) {
+ if (stream->hasLength() && stream->hasPosition()) {
+ size_t remainingBytes = stream->getLength() - stream->getPosition();
+ return len > remainingBytes;
+ }
+ return false;
+}
diff --git a/gfx/skia/skia/src/core/SkStreamPriv.h b/gfx/skia/skia/src/core/SkStreamPriv.h
new file mode 100644
index 0000000000..3aa7b6202b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStreamPriv.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStreamPriv_DEFINED
+#define SkStreamPriv_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkStream.h"
+
+class SkData;
+
+/**
+ * Copy the provided stream to an SkData variable.
+ *
+ * Note: Assumes the stream is at the beginning. If it has a length,
+ * but is not at the beginning, this call will fail (return NULL).
+ *
+ * @param stream SkStream to be copied into data.
+ * @return The resulting SkData after the copy, nullptr on failure.
+ */
+sk_sp<SkData> SkCopyStreamToData(SkStream* stream);
+
+/**
+ * Copies the input stream from the current position to the end.
+ * Does not rewind the input stream.
+ */
+bool SkStreamCopy(SkWStream* out, SkStream* input);
+
+/** A SkWStream that writes all output to SkDebugf, for debugging purposes. */
+class SkDebugfStream final : public SkWStream {
+public:
+ bool write(const void* buffer, size_t size) override;
+ size_t bytesWritten() const override;
+
+private:
+ size_t fBytesWritten = 0;
+};
+
+// If the stream supports identifying the current position and total length, this returns
+// true if there are not enough bytes in the stream to fulfill a read of the given length.
+// Otherwise, it returns false.
+// False does *not* mean a read will succeed of the given length, but true means we are
+// certain it will fail.
+bool StreamRemainingLengthIsBelow(SkStream* stream, size_t len);
+
+#endif // SkStreamPriv_DEFINED
diff --git a/gfx/skia/skia/src/core/SkStrike.cpp b/gfx/skia/skia/src/core/SkStrike.cpp
new file mode 100644
index 0000000000..8020df22bc
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrike.cpp
@@ -0,0 +1,456 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkStrike.h"
+
+#include "include/core/SkDrawable.h"
+#include "include/core/SkGraphics.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkTraceMemoryDump.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/base/SkAssert.h"
+#include "src/core/SkDistanceFieldGen.h"
+#include "src/core/SkEnumerate.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkScalerContext.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/text/StrikeForGPU.h"
+
+#include <cctype>
+#include <optional>
+
+#if defined(SK_GANESH)
+ #include "src/text/gpu/StrikeCache.h"
+#endif
+
+using namespace skglyph;
+
+static SkFontMetrics use_or_generate_metrics(
+ const SkFontMetrics* metrics, SkScalerContext* context) {
+ SkFontMetrics answer;
+ if (metrics) {
+ answer = *metrics;
+ } else {
+ context->getFontMetrics(&answer);
+ }
+ return answer;
+}
+
+SkStrike::SkStrike(SkStrikeCache* strikeCache,
+ const SkStrikeSpec& strikeSpec,
+ std::unique_ptr<SkScalerContext> scaler,
+ const SkFontMetrics* metrics,
+ std::unique_ptr<SkStrikePinner> pinner)
+ : fFontMetrics{use_or_generate_metrics(metrics, scaler.get())}
+ , fRoundingSpec{scaler->isSubpixel(),
+ scaler->computeAxisAlignmentForHText()}
+ , fStrikeSpec{strikeSpec}
+ , fStrikeCache{strikeCache}
+ , fScalerContext{std::move(scaler)}
+ , fPinner{std::move(pinner)} {
+ SkASSERT(fScalerContext != nullptr);
+}
+
+class SK_SCOPED_CAPABILITY SkStrike::Monitor {
+public:
+ Monitor(SkStrike* strike) SK_ACQUIRE(strike->fStrikeLock)
+ : fStrike{strike} {
+ fStrike->lock();
+ }
+
+ ~Monitor() SK_RELEASE_CAPABILITY() {
+ fStrike->unlock();
+ }
+
+private:
+ SkStrike* const fStrike;
+};
+
+void SkStrike::lock() {
+ fStrikeLock.acquire();
+ fMemoryIncrease = 0;
+}
+
+void SkStrike::unlock() {
+ const size_t memoryIncrease = fMemoryIncrease;
+ fStrikeLock.release();
+ this->updateMemoryUsage(memoryIncrease);
+}
+
+void
+SkStrike::FlattenGlyphsByType(SkWriteBuffer& buffer,
+ SkSpan<SkGlyph> images,
+ SkSpan<SkGlyph> paths,
+ SkSpan<SkGlyph> drawables) {
+ SkASSERT_RELEASE(SkTFitsIn<int>(images.size()) &&
+ SkTFitsIn<int>(paths.size()) &&
+ SkTFitsIn<int>(drawables.size()));
+
+ buffer.writeInt(images.size());
+ for (SkGlyph& glyph : images) {
+ SkASSERT(SkMask::IsValidFormat(glyph.maskFormat()));
+ glyph.flattenMetrics(buffer);
+ glyph.flattenImage(buffer);
+ }
+
+ buffer.writeInt(paths.size());
+ for (SkGlyph& glyph : paths) {
+ SkASSERT(SkMask::IsValidFormat(glyph.maskFormat()));
+ glyph.flattenMetrics(buffer);
+ glyph.flattenPath(buffer);
+ }
+
+ buffer.writeInt(drawables.size());
+ for (SkGlyph& glyph : drawables) {
+ SkASSERT(SkMask::IsValidFormat(glyph.maskFormat()));
+ glyph.flattenMetrics(buffer);
+ glyph.flattenDrawable(buffer);
+ }
+}
+
+bool SkStrike::mergeFromBuffer(SkReadBuffer& buffer) {
+ // Read glyphs with images for the current strike.
+ const int imagesCount = buffer.readInt();
+ if (imagesCount == 0 && !buffer.isValid()) {
+ return false;
+ }
+
+ {
+ Monitor m{this};
+ for (int curImage = 0; curImage < imagesCount; ++curImage) {
+ if (!this->mergeGlyphAndImageFromBuffer(buffer)) {
+ return false;
+ }
+ }
+ }
+
+ // Read glyphs with paths for the current strike.
+ const int pathsCount = buffer.readInt();
+ if (pathsCount == 0 && !buffer.isValid()) {
+ return false;
+ }
+ {
+ Monitor m{this};
+ for (int curPath = 0; curPath < pathsCount; ++curPath) {
+ if (!this->mergeGlyphAndPathFromBuffer(buffer)) {
+ return false;
+ }
+ }
+ }
+
+ // Read glyphs with drawables for the current strike.
+ const int drawablesCount = buffer.readInt();
+ if (drawablesCount == 0 && !buffer.isValid()) {
+ return false;
+ }
+ {
+ Monitor m{this};
+ for (int curDrawable = 0; curDrawable < drawablesCount; ++curDrawable) {
+ if (!this->mergeGlyphAndDrawableFromBuffer(buffer)) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+SkGlyph* SkStrike::mergeGlyphAndImage(SkPackedGlyphID toID, const SkGlyph& fromGlyph) {
+ Monitor m{this};
+ // TODO(herb): remove finding the glyph when setting the metrics and image are separated
+ SkGlyphDigest* digest = fDigestForPackedGlyphID.find(toID);
+ if (digest != nullptr) {
+ SkGlyph* glyph = fGlyphForIndex[digest->index()];
+ if (fromGlyph.setImageHasBeenCalled()) {
+ if (glyph->setImageHasBeenCalled()) {
+ // Should never set an image on a glyph which already has an image.
+ SkDEBUGFAIL("Re-adding image to existing glyph. This should not happen.");
+ }
+ // TODO: assert that any metrics on fromGlyph are the same.
+ fMemoryIncrease += glyph->setMetricsAndImage(&fAlloc, fromGlyph);
+ }
+ return glyph;
+ } else {
+ SkGlyph* glyph = fAlloc.make<SkGlyph>(toID);
+ fMemoryIncrease += glyph->setMetricsAndImage(&fAlloc, fromGlyph) + sizeof(SkGlyph);
+ (void)this->addGlyphAndDigest(glyph);
+ return glyph;
+ }
+}
+
+const SkPath* SkStrike::mergePath(SkGlyph* glyph, const SkPath* path, bool hairline) {
+ Monitor m{this};
+ if (glyph->setPathHasBeenCalled()) {
+ SkDEBUGFAIL("Re-adding path to existing glyph. This should not happen.");
+ }
+ if (glyph->setPath(&fAlloc, path, hairline)) {
+ fMemoryIncrease += glyph->path()->approximateBytesUsed();
+ }
+
+ return glyph->path();
+}
+
+const SkDrawable* SkStrike::mergeDrawable(SkGlyph* glyph, sk_sp<SkDrawable> drawable) {
+ Monitor m{this};
+ if (glyph->setDrawableHasBeenCalled()) {
+ SkDEBUGFAIL("Re-adding drawable to existing glyph. This should not happen.");
+ }
+ if (glyph->setDrawable(&fAlloc, std::move(drawable))) {
+ fMemoryIncrease += glyph->drawable()->approximateBytesUsed();
+ SkASSERT(fMemoryIncrease > 0);
+ }
+
+ return glyph->drawable();
+}
+
+void SkStrike::findIntercepts(const SkScalar bounds[2], SkScalar scale, SkScalar xPos,
+ SkGlyph* glyph, SkScalar* array, int* count) {
+ SkAutoMutexExclusive lock{fStrikeLock};
+ glyph->ensureIntercepts(bounds, scale, xPos, array, count, &fAlloc);
+}
+
+SkSpan<const SkGlyph*> SkStrike::metrics(
+ SkSpan<const SkGlyphID> glyphIDs, const SkGlyph* results[]) {
+ Monitor m{this};
+ return this->internalPrepare(glyphIDs, kMetricsOnly, results);
+}
+
+SkSpan<const SkGlyph*> SkStrike::preparePaths(
+ SkSpan<const SkGlyphID> glyphIDs, const SkGlyph* results[]) {
+ Monitor m{this};
+ return this->internalPrepare(glyphIDs, kMetricsAndPath, results);
+}
+
+SkSpan<const SkGlyph*> SkStrike::prepareImages(
+ SkSpan<const SkPackedGlyphID> glyphIDs, const SkGlyph* results[]) {
+ const SkGlyph** cursor = results;
+ Monitor m{this};
+ for (auto glyphID : glyphIDs) {
+ SkGlyph* glyph = this->glyph(glyphID);
+ this->prepareForImage(glyph);
+ *cursor++ = glyph;
+ }
+
+ return {results, glyphIDs.size()};
+}
+
+SkSpan<const SkGlyph*> SkStrike::prepareDrawables(
+ SkSpan<const SkGlyphID> glyphIDs, const SkGlyph* results[]) {
+ const SkGlyph** cursor = results;
+ {
+ Monitor m{this};
+ for (auto glyphID : glyphIDs) {
+ SkGlyph* glyph = this->glyph(SkPackedGlyphID{glyphID});
+ this->prepareForDrawable(glyph);
+ *cursor++ = glyph;
+ }
+ }
+
+ return {results, glyphIDs.size()};
+}
+
+void SkStrike::glyphIDsToPaths(SkSpan<sktext::IDOrPath> idsOrPaths) {
+ Monitor m{this};
+ for (sktext::IDOrPath& idOrPath : idsOrPaths) {
+ SkGlyph* glyph = this->glyph(SkPackedGlyphID{idOrPath.fGlyphID});
+ this->prepareForPath(glyph);
+ new (&idOrPath.fPath) SkPath{*glyph->path()};
+ }
+}
+
+void SkStrike::glyphIDsToDrawables(SkSpan<sktext::IDOrDrawable> idsOrDrawables) {
+ Monitor m{this};
+ for (sktext::IDOrDrawable& idOrDrawable : idsOrDrawables) {
+ SkGlyph* glyph = this->glyph(SkPackedGlyphID{idOrDrawable.fGlyphID});
+ this->prepareForDrawable(glyph);
+ SkASSERT(glyph->drawable() != nullptr);
+ idOrDrawable.fDrawable = glyph->drawable();
+ }
+}
+
+void SkStrike::dump() const {
+ SkAutoMutexExclusive lock{fStrikeLock};
+ const SkTypeface* face = fScalerContext->getTypeface();
+ const SkScalerContextRec& rec = fScalerContext->getRec();
+ SkMatrix matrix;
+ rec.getSingleMatrix(&matrix);
+ matrix.preScale(SkScalarInvert(rec.fTextSize), SkScalarInvert(rec.fTextSize));
+ SkString name;
+ face->getFamilyName(&name);
+
+ SkString msg;
+ SkFontStyle style = face->fontStyle();
+ msg.printf("cache typeface:%x %25s:(%d,%d,%d)\n %s glyphs:%3d",
+ face->uniqueID(), name.c_str(), style.weight(), style.width(), style.slant(),
+ rec.dump().c_str(), fDigestForPackedGlyphID.count());
+ SkDebugf("%s\n", msg.c_str());
+}
+
+void SkStrike::dumpMemoryStatistics(SkTraceMemoryDump* dump) const {
+ SkAutoMutexExclusive lock{fStrikeLock};
+ const SkTypeface* face = fScalerContext->getTypeface();
+ const SkScalerContextRec& rec = fScalerContext->getRec();
+
+ SkString fontName;
+ face->getFamilyName(&fontName);
+ // Replace all special characters with '_'.
+ for (size_t index = 0; index < fontName.size(); ++index) {
+ if (!std::isalnum(fontName[index])) {
+ fontName[index] = '_';
+ }
+ }
+
+ SkString dumpName = SkStringPrintf("%s/%s_%d/%p",
+ SkStrikeCache::kGlyphCacheDumpName,
+ fontName.c_str(),
+ rec.fTypefaceID,
+ this);
+
+ dump->dumpNumericValue(dumpName.c_str(), "size", "bytes", fMemoryUsed);
+ dump->dumpNumericValue(dumpName.c_str(),
+ "glyph_count", "objects",
+ fDigestForPackedGlyphID.count());
+ dump->setMemoryBacking(dumpName.c_str(), "malloc", nullptr);
+}
+
+SkGlyph* SkStrike::glyph(SkGlyphDigest digest) {
+ return fGlyphForIndex[digest.index()];
+}
+
+SkGlyph* SkStrike::glyph(SkPackedGlyphID packedGlyphID) {
+ SkGlyphDigest digest = this->digestFor(kDirectMask, packedGlyphID);
+ return this->glyph(digest);
+}
+
+SkGlyphDigest SkStrike::digestFor(ActionType actionType, SkPackedGlyphID packedGlyphID) {
+ SkGlyphDigest* digestPtr = fDigestForPackedGlyphID.find(packedGlyphID);
+ if (digestPtr != nullptr && digestPtr->actionFor(actionType) != GlyphAction::kUnset) {
+ return *digestPtr;
+ }
+
+ SkGlyph* glyph;
+ if (digestPtr != nullptr) {
+ glyph = fGlyphForIndex[digestPtr->index()];
+ } else {
+ glyph = fAlloc.make<SkGlyph>(fScalerContext->makeGlyph(packedGlyphID, &fAlloc));
+ fMemoryIncrease += sizeof(SkGlyph);
+ digestPtr = this->addGlyphAndDigest(glyph);
+ }
+
+ digestPtr->setActionFor(actionType, glyph, this);
+
+ return *digestPtr;
+}
+
+SkGlyphDigest* SkStrike::addGlyphAndDigest(SkGlyph* glyph) {
+ size_t index = fGlyphForIndex.size();
+ SkGlyphDigest digest = SkGlyphDigest{index, *glyph};
+ SkGlyphDigest* newDigest = fDigestForPackedGlyphID.set(digest);
+ fGlyphForIndex.push_back(glyph);
+ return newDigest;
+}
+
+bool SkStrike::prepareForImage(SkGlyph* glyph) {
+ if (glyph->setImage(&fAlloc, fScalerContext.get())) {
+ fMemoryIncrease += glyph->imageSize();
+ }
+ return glyph->image() != nullptr;
+}
+
+bool SkStrike::prepareForPath(SkGlyph* glyph) {
+ if (glyph->setPath(&fAlloc, fScalerContext.get())) {
+ fMemoryIncrease += glyph->path()->approximateBytesUsed();
+ }
+ return glyph->path() !=nullptr;
+}
+
+bool SkStrike::prepareForDrawable(SkGlyph* glyph) {
+ if (glyph->setDrawable(&fAlloc, fScalerContext.get())) {
+ size_t increase = glyph->drawable()->approximateBytesUsed();
+ SkASSERT(increase > 0);
+ fMemoryIncrease += increase;
+ }
+ return glyph->drawable() != nullptr;
+}
+
+SkGlyph* SkStrike::mergeGlyphFromBuffer(SkReadBuffer& buffer) {
+ SkASSERT(buffer.isValid());
+ std::optional<SkGlyph> prototypeGlyph = SkGlyph::MakeFromBuffer(buffer);
+ if (!buffer.validate(prototypeGlyph.has_value())) {
+ return nullptr;
+ }
+
+ // Check if this glyph has already been seen.
+ SkGlyphDigest* digestPtr = fDigestForPackedGlyphID.find(prototypeGlyph->getPackedID());
+ if (digestPtr != nullptr) {
+ return fGlyphForIndex[digestPtr->index()];
+ }
+
+ // This is the first time. Allocate a new glyph.
+ SkGlyph* glyph = fAlloc.make<SkGlyph>(prototypeGlyph.value());
+ fMemoryIncrease += sizeof(SkGlyph);
+ this->addGlyphAndDigest(glyph);
+ return glyph;
+}
+
+bool SkStrike::mergeGlyphAndImageFromBuffer(SkReadBuffer& buffer) {
+ SkASSERT(buffer.isValid());
+ SkGlyph* glyph = this->mergeGlyphFromBuffer(buffer);
+ if (!buffer.validate(glyph != nullptr)) {
+ return false;
+ }
+ fMemoryIncrease += glyph->addImageFromBuffer(buffer, &fAlloc);
+ return buffer.isValid();
+}
+
+bool SkStrike::mergeGlyphAndPathFromBuffer(SkReadBuffer& buffer) {
+ SkASSERT(buffer.isValid());
+ SkGlyph* glyph = this->mergeGlyphFromBuffer(buffer);
+ if (!buffer.validate(glyph != nullptr)) {
+ return false;
+ }
+ fMemoryIncrease += glyph->addPathFromBuffer(buffer, &fAlloc);
+ return buffer.isValid();
+}
+
+bool SkStrike::mergeGlyphAndDrawableFromBuffer(SkReadBuffer& buffer) {
+ SkASSERT(buffer.isValid());
+ SkGlyph* glyph = this->mergeGlyphFromBuffer(buffer);
+ if (!buffer.validate(glyph != nullptr)) {
+ return false;
+ }
+ fMemoryIncrease += glyph->addDrawableFromBuffer(buffer, &fAlloc);
+ return buffer.isValid();
+}
+
+SkSpan<const SkGlyph*> SkStrike::internalPrepare(
+ SkSpan<const SkGlyphID> glyphIDs, PathDetail pathDetail, const SkGlyph** results) {
+ const SkGlyph** cursor = results;
+ for (auto glyphID : glyphIDs) {
+ SkGlyph* glyph = this->glyph(SkPackedGlyphID{glyphID});
+ if (pathDetail == kMetricsAndPath) {
+ this->prepareForPath(glyph);
+ }
+ *cursor++ = glyph;
+ }
+
+ return {results, glyphIDs.size()};
+}
+
+void SkStrike::updateMemoryUsage(size_t increase) {
+ if (increase > 0) {
+ // fRemoved and the cache's total memory are managed under the cache's lock. This allows
+ // them to be accessed under LRU operation.
+ SkAutoMutexExclusive lock{fStrikeCache->fLock};
+ fMemoryUsed += increase;
+ if (!fRemoved) {
+ fStrikeCache->fTotalMemoryUsed += increase;
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkStrike.h b/gfx/skia/skia/src/core/SkStrike.h
new file mode 100644
index 0000000000..72eca3b486
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrike.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+ */
+
+#ifndef SkStrike_DEFINED
+#define SkStrike_DEFINED
+
+#include "include/core/SkFontMetrics.h"
+#include "include/core/SkFontTypes.h"
+#include "include/core/SkRefCnt.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkDescriptor.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkGlyphRunPainter.h"
+#include "src/core/SkStrikeSpec.h"
+#include "src/core/SkTHash.h"
+
+#include <memory>
+
+class SkScalerContext;
+class SkStrikeCache;
+class SkTraceMemoryDump;
+
+namespace sktext {
+union IDOrPath;
+union IDOrDrawable;
+} // namespace sktext
+
+class SkStrikePinner {
+public:
+ virtual ~SkStrikePinner() = default;
+ virtual bool canDelete() = 0;
+ virtual void assertValid() {}
+};
+
+// This class holds the results of an SkScalerContext, and owns a references to that scaler.
+class SkStrike final : public sktext::StrikeForGPU {
+public:
+ SkStrike(SkStrikeCache* strikeCache,
+ const SkStrikeSpec& strikeSpec,
+ std::unique_ptr<SkScalerContext> scaler,
+ const SkFontMetrics* metrics,
+ std::unique_ptr<SkStrikePinner> pinner);
+
+ void lock() override SK_ACQUIRE(fStrikeLock);
+ void unlock() override SK_RELEASE_CAPABILITY(fStrikeLock);
+ SkGlyphDigest digestFor(skglyph::ActionType, SkPackedGlyphID) override SK_REQUIRES(fStrikeLock);
+ bool prepareForImage(SkGlyph* glyph) override SK_REQUIRES(fStrikeLock);
+ bool prepareForPath(SkGlyph*) override SK_REQUIRES(fStrikeLock);
+ bool prepareForDrawable(SkGlyph*) override SK_REQUIRES(fStrikeLock);
+
+ bool mergeFromBuffer(SkReadBuffer& buffer) SK_EXCLUDES(fStrikeLock);
+ static void FlattenGlyphsByType(SkWriteBuffer& buffer,
+ SkSpan<SkGlyph> images,
+ SkSpan<SkGlyph> paths,
+ SkSpan<SkGlyph> drawables);
+
+ // Lookup (or create if needed) the returned glyph using toID. If that glyph is not initialized
+ // with an image, then use the information in fromGlyph to initialize the width, height top,
+ // left, format and image of the glyph. This is mainly used preserving the glyph if it was
+ // created by a search of desperation. This is deprecated.
+ SkGlyph* mergeGlyphAndImage(
+ SkPackedGlyphID toID, const SkGlyph& fromGlyph) SK_EXCLUDES(fStrikeLock);
+
+ // If the path has never been set, then add a path to glyph. This is deprecated.
+ const SkPath* mergePath(
+ SkGlyph* glyph, const SkPath* path, bool hairline) SK_EXCLUDES(fStrikeLock);
+
+ // If the drawable has never been set, then add a drawable to glyph. This is deprecated.
+ const SkDrawable* mergeDrawable(
+ SkGlyph* glyph, sk_sp<SkDrawable> drawable) SK_EXCLUDES(fStrikeLock);
+
+ // If the advance axis intersects the glyph's path, append the positions scaled and offset
+ // to the array (if non-null), and set the count to the updated array length.
+ // TODO: track memory usage.
+ void findIntercepts(const SkScalar bounds[2], SkScalar scale, SkScalar xPos,
+ SkGlyph*, SkScalar* array, int* count) SK_EXCLUDES(fStrikeLock);
+
+ const SkFontMetrics& getFontMetrics() const {
+ return fFontMetrics;
+ }
+
+ SkSpan<const SkGlyph*> metrics(
+ SkSpan<const SkGlyphID> glyphIDs, const SkGlyph* results[]) SK_EXCLUDES(fStrikeLock);
+
+ SkSpan<const SkGlyph*> preparePaths(
+ SkSpan<const SkGlyphID> glyphIDs, const SkGlyph* results[]) SK_EXCLUDES(fStrikeLock);
+
+ SkSpan<const SkGlyph*> prepareImages(SkSpan<const SkPackedGlyphID> glyphIDs,
+ const SkGlyph* results[]) SK_EXCLUDES(fStrikeLock);
+
+ SkSpan<const SkGlyph*> prepareDrawables(
+ SkSpan<const SkGlyphID> glyphIDs, const SkGlyph* results[]) SK_EXCLUDES(fStrikeLock);
+
+ // SkStrikeForGPU APIs
+ const SkDescriptor& getDescriptor() const override {
+ return fStrikeSpec.descriptor();
+ }
+
+ const SkGlyphPositionRoundingSpec& roundingSpec() const override {
+ return fRoundingSpec;
+ }
+
+ sktext::SkStrikePromise strikePromise() override {
+ return sktext::SkStrikePromise(sk_ref_sp<SkStrike>(this));
+ }
+
+ // Convert all the IDs into SkPaths in the span.
+ void glyphIDsToPaths(SkSpan<sktext::IDOrPath> idsOrPaths) SK_EXCLUDES(fStrikeLock);
+
+ // Convert all the IDs into SkDrawables in the span.
+ void glyphIDsToDrawables(SkSpan<sktext::IDOrDrawable> idsOrDrawables) SK_EXCLUDES(fStrikeLock);
+
+ const SkStrikeSpec& strikeSpec() const {
+ return fStrikeSpec;
+ }
+
+ void verifyPinnedStrike() const {
+ if (fPinner != nullptr) {
+ fPinner->assertValid();
+ }
+ }
+
+ void dump() const SK_EXCLUDES(fStrikeLock);
+ void dumpMemoryStatistics(SkTraceMemoryDump* dump) const SK_EXCLUDES(fStrikeLock);
+
+ SkGlyph* glyph(SkGlyphDigest) SK_REQUIRES(fStrikeLock);
+
+private:
+ friend class SkStrikeCache;
+ friend class SkStrikeTestingPeer;
+ class Monitor;
+
+ // Return a glyph. Create it if it doesn't exist, and initialize the glyph with metrics and
+ // advances using a scaler.
+ SkGlyph* glyph(SkPackedGlyphID) SK_REQUIRES(fStrikeLock);
+
+ // Generate the glyph digest information and update structures to add the glyph.
+ SkGlyphDigest* addGlyphAndDigest(SkGlyph* glyph) SK_REQUIRES(fStrikeLock);
+
+ SkGlyph* mergeGlyphFromBuffer(SkReadBuffer& buffer) SK_REQUIRES(fStrikeLock);
+ bool mergeGlyphAndImageFromBuffer(SkReadBuffer& buffer) SK_REQUIRES(fStrikeLock);
+ bool mergeGlyphAndPathFromBuffer(SkReadBuffer& buffer) SK_REQUIRES(fStrikeLock);
+ bool mergeGlyphAndDrawableFromBuffer(SkReadBuffer& buffer) SK_REQUIRES(fStrikeLock);
+
+ // Maintain memory use statistics.
+ void updateMemoryUsage(size_t increase) SK_EXCLUDES(fStrikeLock);
+
+ enum PathDetail {
+ kMetricsOnly,
+ kMetricsAndPath
+ };
+
+ // internalPrepare will only be called with a mutex already held.
+ SkSpan<const SkGlyph*> internalPrepare(
+ SkSpan<const SkGlyphID> glyphIDs,
+ PathDetail pathDetail,
+ const SkGlyph** results) SK_REQUIRES(fStrikeLock);
+
+ // The following are const and need no mutex protection.
+ const SkFontMetrics fFontMetrics;
+ const SkGlyphPositionRoundingSpec fRoundingSpec;
+ const SkStrikeSpec fStrikeSpec;
+ SkStrikeCache* const fStrikeCache;
+
+ // This mutex provides protection for this specific SkStrike.
+ mutable SkMutex fStrikeLock;
+
+ // Maps from a combined GlyphID and sub-pixel position to a SkGlyphDigest. The actual glyph is
+ // stored in the fAlloc. The pointer to the glyph is stored fGlyphForIndex. The
+ // SkGlyphDigest's fIndex field stores the index. This pointer provides an unchanging
+ // reference to the SkGlyph as long as the strike is alive, and fGlyphForIndex
+ // provides a dense index for glyphs.
+ SkTHashTable<SkGlyphDigest, SkPackedGlyphID, SkGlyphDigest>
+ fDigestForPackedGlyphID SK_GUARDED_BY(fStrikeLock);
+
+ // Maps from a glyphIndex to a glyph
+ std::vector<SkGlyph*> fGlyphForIndex SK_GUARDED_BY(fStrikeLock);
+
+ // Context that corresponds to the glyph information in this strike.
+ const std::unique_ptr<SkScalerContext> fScalerContext SK_GUARDED_BY(fStrikeLock);
+
+ // Used while changing the strike to track memory increase.
+ size_t fMemoryIncrease SK_GUARDED_BY(fStrikeLock) {0};
+
+ // So, we don't grow our arrays a lot.
+ inline static constexpr size_t kMinGlyphCount = 8;
+ inline static constexpr size_t kMinGlyphImageSize = 16 /* height */ * 8 /* width */;
+ inline static constexpr size_t kMinAllocAmount = kMinGlyphImageSize * kMinGlyphCount;
+
+ SkArenaAlloc fAlloc SK_GUARDED_BY(fStrikeLock) {kMinAllocAmount};
+
+ // The following are protected by the SkStrikeCache's mutex.
+ SkStrike* fNext{nullptr};
+ SkStrike* fPrev{nullptr};
+ std::unique_ptr<SkStrikePinner> fPinner;
+ size_t fMemoryUsed{sizeof(SkStrike)};
+ bool fRemoved{false};
+};
+
+#endif // SkStrike_DEFINED
diff --git a/gfx/skia/skia/src/core/SkStrikeCache.cpp b/gfx/skia/skia/src/core/SkStrikeCache.cpp
new file mode 100644
index 0000000000..4b2e255903
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrikeCache.cpp
@@ -0,0 +1,326 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkStrikeCache.h"
+
+#include <cctype>
+
+#include "include/core/SkGraphics.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTraceMemoryDump.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/core/SkStrike.h"
+
+#if defined(SK_GANESH)
+#include "src/text/gpu/StrikeCache.h"
+#endif
+
+using namespace sktext;
+
+bool gSkUseThreadLocalStrikeCaches_IAcknowledgeThisIsIncrediblyExperimental = false;
+
+SkStrikeCache* SkStrikeCache::GlobalStrikeCache() {
+ if (gSkUseThreadLocalStrikeCaches_IAcknowledgeThisIsIncrediblyExperimental) {
+ static thread_local auto* cache = new SkStrikeCache;
+ return cache;
+ }
+ static auto* cache = new SkStrikeCache;
+ return cache;
+}
+
+auto SkStrikeCache::findOrCreateStrike(const SkStrikeSpec& strikeSpec) -> sk_sp<SkStrike> {
+ SkAutoMutexExclusive ac(fLock);
+ sk_sp<SkStrike> strike = this->internalFindStrikeOrNull(strikeSpec.descriptor());
+ if (strike == nullptr) {
+ strike = this->internalCreateStrike(strikeSpec);
+ }
+ this->internalPurge();
+ return strike;
+}
+
+sk_sp<StrikeForGPU> SkStrikeCache::findOrCreateScopedStrike(const SkStrikeSpec& strikeSpec) {
+ return this->findOrCreateStrike(strikeSpec);
+}
+
+void SkStrikeCache::PurgeAll() {
+ GlobalStrikeCache()->purgeAll();
+}
+
+void SkStrikeCache::Dump() {
+ SkDebugf("GlyphCache [ used budget ]\n");
+ SkDebugf(" bytes [ %8zu %8zu ]\n",
+ SkGraphics::GetFontCacheUsed(), SkGraphics::GetFontCacheLimit());
+ SkDebugf(" count [ %8d %8d ]\n",
+ SkGraphics::GetFontCacheCountUsed(), SkGraphics::GetFontCacheCountLimit());
+
+ auto visitor = [](const SkStrike& strike) {
+ strike.dump();
+ };
+
+ GlobalStrikeCache()->forEachStrike(visitor);
+}
+
+void SkStrikeCache::DumpMemoryStatistics(SkTraceMemoryDump* dump) {
+ dump->dumpNumericValue(kGlyphCacheDumpName, "size", "bytes", SkGraphics::GetFontCacheUsed());
+ dump->dumpNumericValue(kGlyphCacheDumpName, "budget_size", "bytes",
+ SkGraphics::GetFontCacheLimit());
+ dump->dumpNumericValue(kGlyphCacheDumpName, "glyph_count", "objects",
+ SkGraphics::GetFontCacheCountUsed());
+ dump->dumpNumericValue(kGlyphCacheDumpName, "budget_glyph_count", "objects",
+ SkGraphics::GetFontCacheCountLimit());
+
+ if (dump->getRequestedDetails() == SkTraceMemoryDump::kLight_LevelOfDetail) {
+ dump->setMemoryBacking(kGlyphCacheDumpName, "malloc", nullptr);
+ return;
+ }
+
+ auto visitor = [&](const SkStrike& strike) {
+ strike.dumpMemoryStatistics(dump);
+ };
+
+ GlobalStrikeCache()->forEachStrike(visitor);
+}
+
+sk_sp<SkStrike> SkStrikeCache::findStrike(const SkDescriptor& desc) {
+ SkAutoMutexExclusive ac(fLock);
+ sk_sp<SkStrike> result = this->internalFindStrikeOrNull(desc);
+ this->internalPurge();
+ return result;
+}
+
+auto SkStrikeCache::internalFindStrikeOrNull(const SkDescriptor& desc) -> sk_sp<SkStrike> {
+
+ // Check head because it is likely the strike we are looking for.
+ if (fHead != nullptr && fHead->getDescriptor() == desc) { return sk_ref_sp(fHead); }
+
+ // Do the heavy search looking for the strike.
+ sk_sp<SkStrike>* strikeHandle = fStrikeLookup.find(desc);
+ if (strikeHandle == nullptr) { return nullptr; }
+ SkStrike* strikePtr = strikeHandle->get();
+ SkASSERT(strikePtr != nullptr);
+ if (fHead != strikePtr) {
+ // Make most recently used
+ strikePtr->fPrev->fNext = strikePtr->fNext;
+ if (strikePtr->fNext != nullptr) {
+ strikePtr->fNext->fPrev = strikePtr->fPrev;
+ } else {
+ fTail = strikePtr->fPrev;
+ }
+ fHead->fPrev = strikePtr;
+ strikePtr->fNext = fHead;
+ strikePtr->fPrev = nullptr;
+ fHead = strikePtr;
+ }
+ return sk_ref_sp(strikePtr);
+}
+
+sk_sp<SkStrike> SkStrikeCache::createStrike(
+ const SkStrikeSpec& strikeSpec,
+ SkFontMetrics* maybeMetrics,
+ std::unique_ptr<SkStrikePinner> pinner) {
+ SkAutoMutexExclusive ac(fLock);
+ return this->internalCreateStrike(strikeSpec, maybeMetrics, std::move(pinner));
+}
+
+auto SkStrikeCache::internalCreateStrike(
+ const SkStrikeSpec& strikeSpec,
+ SkFontMetrics* maybeMetrics,
+ std::unique_ptr<SkStrikePinner> pinner) -> sk_sp<SkStrike> {
+ std::unique_ptr<SkScalerContext> scaler = strikeSpec.createScalerContext();
+ auto strike =
+ sk_make_sp<SkStrike>(this, strikeSpec, std::move(scaler), maybeMetrics, std::move(pinner));
+ this->internalAttachToHead(strike);
+ return strike;
+}
+
+void SkStrikeCache::purgeAll() {
+ SkAutoMutexExclusive ac(fLock);
+ this->internalPurge(fTotalMemoryUsed);
+}
+
+size_t SkStrikeCache::getTotalMemoryUsed() const {
+ SkAutoMutexExclusive ac(fLock);
+ return fTotalMemoryUsed;
+}
+
+int SkStrikeCache::getCacheCountUsed() const {
+ SkAutoMutexExclusive ac(fLock);
+ return fCacheCount;
+}
+
+int SkStrikeCache::getCacheCountLimit() const {
+ SkAutoMutexExclusive ac(fLock);
+ return fCacheCountLimit;
+}
+
+size_t SkStrikeCache::setCacheSizeLimit(size_t newLimit) {
+ SkAutoMutexExclusive ac(fLock);
+
+ size_t prevLimit = fCacheSizeLimit;
+ fCacheSizeLimit = newLimit;
+ this->internalPurge();
+ return prevLimit;
+}
+
+size_t SkStrikeCache::getCacheSizeLimit() const {
+ SkAutoMutexExclusive ac(fLock);
+ return fCacheSizeLimit;
+}
+
+int SkStrikeCache::setCacheCountLimit(int newCount) {
+ if (newCount < 0) {
+ newCount = 0;
+ }
+
+ SkAutoMutexExclusive ac(fLock);
+
+ int prevCount = fCacheCountLimit;
+ fCacheCountLimit = newCount;
+ this->internalPurge();
+ return prevCount;
+}
+
+void SkStrikeCache::forEachStrike(std::function<void(const SkStrike&)> visitor) const {
+ SkAutoMutexExclusive ac(fLock);
+
+ this->validate();
+
+ for (SkStrike* strike = fHead; strike != nullptr; strike = strike->fNext) {
+ visitor(*strike);
+ }
+}
+
+size_t SkStrikeCache::internalPurge(size_t minBytesNeeded) {
+ size_t bytesNeeded = 0;
+ if (fTotalMemoryUsed > fCacheSizeLimit) {
+ bytesNeeded = fTotalMemoryUsed - fCacheSizeLimit;
+ }
+ bytesNeeded = std::max(bytesNeeded, minBytesNeeded);
+ if (bytesNeeded) {
+ // no small purges!
+ bytesNeeded = std::max(bytesNeeded, fTotalMemoryUsed >> 2);
+ }
+
+ int countNeeded = 0;
+ if (fCacheCount > fCacheCountLimit) {
+ countNeeded = fCacheCount - fCacheCountLimit;
+ // no small purges!
+ countNeeded = std::max(countNeeded, fCacheCount >> 2);
+ }
+
+ // early exit
+ if (!countNeeded && !bytesNeeded) {
+ return 0;
+ }
+
+ size_t bytesFreed = 0;
+ int countFreed = 0;
+
+ // Start at the tail and proceed backwards deleting; the list is in LRU
+ // order, with unimportant entries at the tail.
+ SkStrike* strike = fTail;
+ while (strike != nullptr && (bytesFreed < bytesNeeded || countFreed < countNeeded)) {
+ SkStrike* prev = strike->fPrev;
+
+ // Only delete if the strike is not pinned.
+ if (strike->fPinner == nullptr || strike->fPinner->canDelete()) {
+ bytesFreed += strike->fMemoryUsed;
+ countFreed += 1;
+ this->internalRemoveStrike(strike);
+ }
+ strike = prev;
+ }
+
+ this->validate();
+
+#ifdef SPEW_PURGE_STATUS
+ if (countFreed) {
+ SkDebugf("purging %dK from font cache [%d entries]\n",
+ (int)(bytesFreed >> 10), countFreed);
+ }
+#endif
+
+ return bytesFreed;
+}
+
+void SkStrikeCache::internalAttachToHead(sk_sp<SkStrike> strike) {
+ SkASSERT(fStrikeLookup.find(strike->getDescriptor()) == nullptr);
+ SkStrike* strikePtr = strike.get();
+ fStrikeLookup.set(std::move(strike));
+ SkASSERT(nullptr == strikePtr->fPrev && nullptr == strikePtr->fNext);
+
+ fCacheCount += 1;
+ fTotalMemoryUsed += strikePtr->fMemoryUsed;
+
+ if (fHead != nullptr) {
+ fHead->fPrev = strikePtr;
+ strikePtr->fNext = fHead;
+ }
+
+ if (fTail == nullptr) {
+ fTail = strikePtr;
+ }
+
+ fHead = strikePtr; // Transfer ownership of strike to the cache list.
+}
+
+void SkStrikeCache::internalRemoveStrike(SkStrike* strike) {
+ SkASSERT(fCacheCount > 0);
+ fCacheCount -= 1;
+ fTotalMemoryUsed -= strike->fMemoryUsed;
+
+ if (strike->fPrev) {
+ strike->fPrev->fNext = strike->fNext;
+ } else {
+ fHead = strike->fNext;
+ }
+ if (strike->fNext) {
+ strike->fNext->fPrev = strike->fPrev;
+ } else {
+ fTail = strike->fPrev;
+ }
+
+ strike->fPrev = strike->fNext = nullptr;
+ strike->fRemoved = true;
+ fStrikeLookup.remove(strike->getDescriptor());
+}
+
+void SkStrikeCache::validate() const {
+#ifdef SK_DEBUG
+ size_t computedBytes = 0;
+ int computedCount = 0;
+
+ const SkStrike* strike = fHead;
+ while (strike != nullptr) {
+ computedBytes += strike->fMemoryUsed;
+ computedCount += 1;
+ SkASSERT(fStrikeLookup.findOrNull(strike->getDescriptor()) != nullptr);
+ strike = strike->fNext;
+ }
+
+ if (fCacheCount != computedCount) {
+ SkDebugf("fCacheCount: %d, computedCount: %d", fCacheCount, computedCount);
+ SK_ABORT("fCacheCount != computedCount");
+ }
+ if (fTotalMemoryUsed != computedBytes) {
+ SkDebugf("fTotalMemoryUsed: %zu, computedBytes: %zu", fTotalMemoryUsed, computedBytes);
+ SK_ABORT("fTotalMemoryUsed == computedBytes");
+ }
+#endif
+}
+
+const SkDescriptor& SkStrikeCache::StrikeTraits::GetKey(const sk_sp<SkStrike>& strike) {
+ return strike->getDescriptor();
+}
+
+uint32_t SkStrikeCache::StrikeTraits::Hash(const SkDescriptor& descriptor) {
+ return descriptor.getChecksum();
+}
+
+
diff --git a/gfx/skia/skia/src/core/SkStrikeCache.h b/gfx/skia/skia/src/core/SkStrikeCache.h
new file mode 100644
index 0000000000..76d7d1eb98
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrikeCache.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStrikeCache_DEFINED
+#define SkStrikeCache_DEFINED
+
+#include "include/core/SkDrawable.h"
+#include "include/private/SkSpinlock.h"
+#include "include/private/base/SkLoadUserConfig.h" // IWYU pragma: keep
+#include "include/private/base/SkMutex.h"
+#include "src/core/SkDescriptor.h"
+#include "src/core/SkStrikeSpec.h"
+#include "src/text/StrikeForGPU.h"
+
+class SkStrike;
+class SkStrikePinner;
+class SkTraceMemoryDump;
+
+// SK_DEFAULT_FONT_CACHE_COUNT_LIMIT and SK_DEFAULT_FONT_CACHE_LIMIT can be set using -D on your
+// compiler commandline, or by using the defines in SkUserConfig.h
+#ifndef SK_DEFAULT_FONT_CACHE_COUNT_LIMIT
+ #define SK_DEFAULT_FONT_CACHE_COUNT_LIMIT 2048
+#endif
+
+#ifndef SK_DEFAULT_FONT_CACHE_LIMIT
+ #define SK_DEFAULT_FONT_CACHE_LIMIT (2 * 1024 * 1024)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkStrikeCache final : public sktext::StrikeForGPUCacheInterface {
+public:
+ SkStrikeCache() = default;
+
+ static SkStrikeCache* GlobalStrikeCache();
+
+ sk_sp<SkStrike> findStrike(const SkDescriptor& desc) SK_EXCLUDES(fLock);
+
+ sk_sp<SkStrike> createStrike(
+ const SkStrikeSpec& strikeSpec,
+ SkFontMetrics* maybeMetrics = nullptr,
+ std::unique_ptr<SkStrikePinner> = nullptr) SK_EXCLUDES(fLock);
+
+ sk_sp<SkStrike> findOrCreateStrike(const SkStrikeSpec& strikeSpec) SK_EXCLUDES(fLock);
+
+ sk_sp<sktext::StrikeForGPU> findOrCreateScopedStrike(
+ const SkStrikeSpec& strikeSpec) override SK_EXCLUDES(fLock);
+
+ static void PurgeAll();
+ static void Dump();
+
+ // Dump memory usage statistics of all the attaches caches in the process using the
+ // SkTraceMemoryDump interface.
+ static void DumpMemoryStatistics(SkTraceMemoryDump* dump);
+
+ void purgeAll() SK_EXCLUDES(fLock); // does not change budget
+
+ int getCacheCountLimit() const SK_EXCLUDES(fLock);
+ int setCacheCountLimit(int limit) SK_EXCLUDES(fLock);
+ int getCacheCountUsed() const SK_EXCLUDES(fLock);
+
+ size_t getCacheSizeLimit() const SK_EXCLUDES(fLock);
+ size_t setCacheSizeLimit(size_t limit) SK_EXCLUDES(fLock);
+ size_t getTotalMemoryUsed() const SK_EXCLUDES(fLock);
+
+private:
+ friend class SkStrike; // for SkStrike::updateDelta
+ static constexpr char kGlyphCacheDumpName[] = "skia/sk_glyph_cache";
+ sk_sp<SkStrike> internalFindStrikeOrNull(const SkDescriptor& desc) SK_REQUIRES(fLock);
+ sk_sp<SkStrike> internalCreateStrike(
+ const SkStrikeSpec& strikeSpec,
+ SkFontMetrics* maybeMetrics = nullptr,
+ std::unique_ptr<SkStrikePinner> = nullptr) SK_REQUIRES(fLock);
+
+ // The following methods can only be called when mutex is already held.
+ void internalRemoveStrike(SkStrike* strike) SK_REQUIRES(fLock);
+ void internalAttachToHead(sk_sp<SkStrike> strike) SK_REQUIRES(fLock);
+
+ // Checkout budgets, modulated by the specified min-bytes-needed-to-purge,
+ // and attempt to purge caches to match.
+ // Returns number of bytes freed.
+ size_t internalPurge(size_t minBytesNeeded = 0) SK_REQUIRES(fLock);
+
+ // A simple accounting of what each glyph cache reports and the strike cache total.
+ void validate() const SK_REQUIRES(fLock);
+
+ void forEachStrike(std::function<void(const SkStrike&)> visitor) const SK_EXCLUDES(fLock);
+
+ mutable SkMutex fLock;
+ SkStrike* fHead SK_GUARDED_BY(fLock) {nullptr};
+ SkStrike* fTail SK_GUARDED_BY(fLock) {nullptr};
+ struct StrikeTraits {
+ static const SkDescriptor& GetKey(const sk_sp<SkStrike>& strike);
+ static uint32_t Hash(const SkDescriptor& descriptor);
+ };
+ SkTHashTable<sk_sp<SkStrike>, SkDescriptor, StrikeTraits> fStrikeLookup SK_GUARDED_BY(fLock);
+
+ size_t fCacheSizeLimit{SK_DEFAULT_FONT_CACHE_LIMIT};
+ size_t fTotalMemoryUsed SK_GUARDED_BY(fLock) {0};
+ int32_t fCacheCountLimit{SK_DEFAULT_FONT_CACHE_COUNT_LIMIT};
+ int32_t fCacheCount SK_GUARDED_BY(fLock) {0};
+};
+
+#endif // SkStrikeCache_DEFINED
diff --git a/gfx/skia/skia/src/core/SkStrikeSpec.cpp b/gfx/skia/skia/src/core/SkStrikeSpec.cpp
new file mode 100644
index 0000000000..43a74eee0d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrikeSpec.cpp
@@ -0,0 +1,309 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkStrikeSpec.h"
+
+#include "include/core/SkGraphics.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPathEffect.h"
+#include "include/effects/SkDashPathEffect.h"
+#include "src/base/SkTLazy.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkFontPriv.h"
+#include "src/core/SkStrike.h"
+#include "src/core/SkStrikeCache.h"
+
+#if defined(SK_GANESH) || defined(SK_GRAPHITE)
+#include "src/text/gpu/SDFMaskFilter.h"
+#include "src/text/gpu/SDFTControl.h"
+#include "src/text/gpu/StrikeCache.h"
+#endif
+
+SkStrikeSpec::SkStrikeSpec(const SkDescriptor& descriptor, sk_sp<SkTypeface> typeface)
+ : fAutoDescriptor{descriptor}
+ , fTypeface{std::move(typeface)} {}
+
+SkStrikeSpec::SkStrikeSpec(const SkStrikeSpec&) = default;
+SkStrikeSpec::SkStrikeSpec(SkStrikeSpec&&) = default;
+SkStrikeSpec::~SkStrikeSpec() = default;
+
+SkStrikeSpec SkStrikeSpec::MakeMask(const SkFont& font, const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps,
+ SkScalerContextFlags scalerContextFlags,
+ const SkMatrix& deviceMatrix) {
+
+ return SkStrikeSpec(font, paint, surfaceProps, scalerContextFlags, deviceMatrix);
+}
+
+SkStrikeSpec SkStrikeSpec::MakeTransformMask(const SkFont& font,
+ const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps,
+ SkScalerContextFlags scalerContextFlags,
+ const SkMatrix& deviceMatrix) {
+ SkFont sourceFont{font};
+ sourceFont.setSubpixel(false);
+ return SkStrikeSpec(sourceFont, paint, surfaceProps, scalerContextFlags, deviceMatrix);
+}
+
+std::tuple<SkStrikeSpec, SkScalar> SkStrikeSpec::MakePath(
+ const SkFont& font, const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps,
+ SkScalerContextFlags scalerContextFlags) {
+
+ // setup our std runPaint, in hopes of getting hits in the cache
+ SkPaint pathPaint{paint};
+ SkFont pathFont{font};
+
+ // The sub-pixel position will always happen when transforming to the screen.
+ pathFont.setSubpixel(false);
+
+ // The factor to get from the size stored in the strike to the size needed for
+ // the source.
+ SkScalar strikeToSourceScale = pathFont.setupForAsPaths(&pathPaint);
+
+ return {SkStrikeSpec(pathFont, pathPaint, surfaceProps, scalerContextFlags, SkMatrix::I()),
+ strikeToSourceScale};
+}
+
+std::tuple<SkStrikeSpec, SkScalar> SkStrikeSpec::MakeCanonicalized(
+ const SkFont& font, const SkPaint* paint) {
+ SkPaint canonicalizedPaint;
+ if (paint != nullptr) {
+ canonicalizedPaint = *paint;
+ }
+
+ const SkFont* canonicalizedFont = &font;
+ SkTLazy<SkFont> pathFont;
+ SkScalar strikeToSourceScale = 1;
+ if (ShouldDrawAsPath(canonicalizedPaint, font, SkMatrix::I())) {
+ canonicalizedFont = pathFont.set(font);
+ strikeToSourceScale = pathFont->setupForAsPaths(nullptr);
+ canonicalizedPaint.reset();
+ }
+
+ return {SkStrikeSpec(*canonicalizedFont, canonicalizedPaint, SkSurfaceProps(),
+ SkScalerContextFlags::kFakeGammaAndBoostContrast, SkMatrix::I()),
+ strikeToSourceScale};
+}
+
+SkStrikeSpec SkStrikeSpec::MakeWithNoDevice(const SkFont& font, const SkPaint* paint) {
+ SkPaint setupPaint;
+ if (paint != nullptr) {
+ setupPaint = *paint;
+ }
+
+ return SkStrikeSpec(font, setupPaint, SkSurfaceProps(),
+ SkScalerContextFlags::kFakeGammaAndBoostContrast, SkMatrix::I());
+}
+
+bool SkStrikeSpec::ShouldDrawAsPath(
+ const SkPaint& paint, const SkFont& font, const SkMatrix& viewMatrix) {
+
+ // hairline glyphs are fast enough, so we don't need to cache them
+ if (SkPaint::kStroke_Style == paint.getStyle() && 0 == paint.getStrokeWidth()) {
+ return true;
+ }
+
+ // we don't cache perspective
+ if (viewMatrix.hasPerspective()) {
+ return true;
+ }
+
+ // Glyphs like Emojis can't be rendered as a path.
+ if (font.getTypeface() && font.getTypeface()->hasColorGlyphs()) {
+ return false;
+ }
+
+ SkMatrix textMatrix = SkFontPriv::MakeTextMatrix(font);
+ textMatrix.postConcat(viewMatrix);
+
+ // we have a self-imposed maximum, just to limit memory-usage
+ constexpr SkScalar memoryLimit = 256;
+ constexpr SkScalar maxSizeSquared = memoryLimit * memoryLimit;
+
+ auto distance = [&textMatrix](int XIndex, int YIndex) {
+ return textMatrix[XIndex] * textMatrix[XIndex] + textMatrix[YIndex] * textMatrix[YIndex];
+ };
+
+ return distance(SkMatrix::kMScaleX, SkMatrix::kMSkewY ) > maxSizeSquared
+ || distance(SkMatrix::kMSkewX, SkMatrix::kMScaleY) > maxSizeSquared;
+}
+
+SkString SkStrikeSpec::dump() const {
+ return fAutoDescriptor.getDesc()->dumpRec();
+}
+
+SkStrikeSpec SkStrikeSpec::MakePDFVector(const SkTypeface& typeface, int* size) {
+ SkFont font;
+ font.setHinting(SkFontHinting::kNone);
+ font.setEdging(SkFont::Edging::kAlias);
+ font.setTypeface(sk_ref_sp(&typeface));
+ int unitsPerEm = typeface.getUnitsPerEm();
+ if (unitsPerEm <= 0) {
+ unitsPerEm = 1024;
+ }
+ if (size) {
+ *size = unitsPerEm;
+ }
+ font.setSize((SkScalar)unitsPerEm);
+
+ return SkStrikeSpec(font,
+ SkPaint(),
+ SkSurfaceProps(0, kUnknown_SkPixelGeometry),
+ SkScalerContextFlags::kFakeGammaAndBoostContrast,
+ SkMatrix::I());
+}
+
+#if (defined(SK_GANESH) || defined(SK_GRAPHITE)) && !defined(SK_DISABLE_SDF_TEXT)
+std::tuple<SkStrikeSpec, SkScalar, sktext::gpu::SDFTMatrixRange>
+SkStrikeSpec::MakeSDFT(const SkFont& font, const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps, const SkMatrix& deviceMatrix,
+ const SkPoint& textLocation, const sktext::gpu::SDFTControl& control) {
+ // Add filter to the paint which creates the SDFT data for A8 masks.
+ SkPaint dfPaint{paint};
+ dfPaint.setMaskFilter(sktext::gpu::SDFMaskFilter::Make());
+
+ auto [dfFont, strikeToSourceScale, matrixRange] = control.getSDFFont(font, deviceMatrix,
+ textLocation);
+
+ // Adjust the stroke width by the scale factor for drawing the SDFT.
+ dfPaint.setStrokeWidth(paint.getStrokeWidth() / strikeToSourceScale);
+
+ // Check for dashing and adjust the intervals.
+ if (SkPathEffect* pathEffect = paint.getPathEffect(); pathEffect != nullptr) {
+ SkPathEffect::DashInfo dashInfo;
+ if (pathEffect->asADash(&dashInfo) == SkPathEffect::kDash_DashType) {
+ if (dashInfo.fCount > 0) {
+ // Allocate the intervals.
+ std::vector<SkScalar> scaledIntervals(dashInfo.fCount);
+ dashInfo.fIntervals = scaledIntervals.data();
+ // Call again to get the interval data.
+ (void)pathEffect->asADash(&dashInfo);
+ for (SkScalar& interval : scaledIntervals) {
+ interval /= strikeToSourceScale;
+ }
+ auto scaledDashes = SkDashPathEffect::Make(scaledIntervals.data(),
+ scaledIntervals.size(),
+ dashInfo.fPhase / strikeToSourceScale);
+ dfPaint.setPathEffect(scaledDashes);
+ }
+ }
+ }
+
+ // Fake-gamma and subpixel antialiasing are applied in the shader, so we ignore the
+ // passed-in scaler context flags. (It's only used when we fall-back to bitmap text).
+ SkScalerContextFlags flags = SkScalerContextFlags::kNone;
+ SkStrikeSpec strikeSpec(dfFont, dfPaint, surfaceProps, flags, SkMatrix::I());
+
+ return std::make_tuple(std::move(strikeSpec), strikeToSourceScale, matrixRange);
+}
+#endif
+
+SkStrikeSpec::SkStrikeSpec(const SkFont& font, const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps,
+ SkScalerContextFlags scalerContextFlags,
+ const SkMatrix& deviceMatrix) {
+ SkScalerContextEffects effects;
+
+ SkScalerContext::CreateDescriptorAndEffectsUsingPaint(
+ font, paint, surfaceProps, scalerContextFlags, deviceMatrix,
+ &fAutoDescriptor, &effects);
+
+ fMaskFilter = sk_ref_sp(effects.fMaskFilter);
+ fPathEffect = sk_ref_sp(effects.fPathEffect);
+ fTypeface = font.refTypefaceOrDefault();
+}
+
+sk_sp<sktext::StrikeForGPU> SkStrikeSpec::findOrCreateScopedStrike(
+ sktext::StrikeForGPUCacheInterface* cache) const {
+ return cache->findOrCreateScopedStrike(*this);
+}
+
+sk_sp<SkStrike> SkStrikeSpec::findOrCreateStrike() const {
+ SkScalerContextEffects effects{fPathEffect.get(), fMaskFilter.get()};
+ return SkStrikeCache::GlobalStrikeCache()->findOrCreateStrike(*this);
+}
+
+sk_sp<SkStrike> SkStrikeSpec::findOrCreateStrike(SkStrikeCache* cache) const {
+ SkScalerContextEffects effects{fPathEffect.get(), fMaskFilter.get()};
+ return cache->findOrCreateStrike(*this);
+}
+
+SkBulkGlyphMetrics::SkBulkGlyphMetrics(const SkStrikeSpec& spec)
+ : fStrike{spec.findOrCreateStrike()} { }
+
+SkSpan<const SkGlyph*> SkBulkGlyphMetrics::glyphs(SkSpan<const SkGlyphID> glyphIDs) {
+ fGlyphs.reset(glyphIDs.size());
+ return fStrike->metrics(glyphIDs, fGlyphs.get());
+}
+
+const SkGlyph* SkBulkGlyphMetrics::glyph(SkGlyphID glyphID) {
+ return this->glyphs(SkSpan<const SkGlyphID>{&glyphID, 1})[0];
+}
+
+SkBulkGlyphMetricsAndPaths::SkBulkGlyphMetricsAndPaths(const SkStrikeSpec& spec)
+ : fStrike{spec.findOrCreateStrike()} { }
+
+SkBulkGlyphMetricsAndPaths::SkBulkGlyphMetricsAndPaths(sk_sp<SkStrike>&& strike)
+ : fStrike{std::move(strike)} { }
+
+SkBulkGlyphMetricsAndPaths::~SkBulkGlyphMetricsAndPaths() = default;
+
+SkSpan<const SkGlyph*> SkBulkGlyphMetricsAndPaths::glyphs(SkSpan<const SkGlyphID> glyphIDs) {
+ fGlyphs.reset(glyphIDs.size());
+ return fStrike->preparePaths(glyphIDs, fGlyphs.get());
+}
+
+const SkGlyph* SkBulkGlyphMetricsAndPaths::glyph(SkGlyphID glyphID) {
+ return this->glyphs(SkSpan<const SkGlyphID>{&glyphID, 1})[0];
+}
+
+void SkBulkGlyphMetricsAndPaths::findIntercepts(
+ const SkScalar* bounds, SkScalar scale, SkScalar xPos,
+ const SkGlyph* glyph, SkScalar* array, int* count) {
+ // TODO(herb): remove this abominable const_cast. Do the intercepts really need to be on the
+ // glyph?
+ fStrike->findIntercepts(bounds, scale, xPos, const_cast<SkGlyph*>(glyph), array, count);
+}
+
+SkBulkGlyphMetricsAndDrawables::SkBulkGlyphMetricsAndDrawables(const SkStrikeSpec& spec)
+ : fStrike{spec.findOrCreateStrike()} { }
+
+SkBulkGlyphMetricsAndDrawables::SkBulkGlyphMetricsAndDrawables(sk_sp<SkStrike>&& strike)
+ : fStrike{std::move(strike)} { }
+
+SkBulkGlyphMetricsAndDrawables::~SkBulkGlyphMetricsAndDrawables() = default;
+
+SkSpan<const SkGlyph*> SkBulkGlyphMetricsAndDrawables::glyphs(SkSpan<const SkGlyphID> glyphIDs) {
+ fGlyphs.reset(glyphIDs.size());
+ return fStrike->prepareDrawables(glyphIDs, fGlyphs.get());
+}
+
+const SkGlyph* SkBulkGlyphMetricsAndDrawables::glyph(SkGlyphID glyphID) {
+ return this->glyphs(SkSpan<const SkGlyphID>{&glyphID, 1})[0];
+}
+
+SkBulkGlyphMetricsAndImages::SkBulkGlyphMetricsAndImages(const SkStrikeSpec& spec)
+ : fStrike{spec.findOrCreateStrike()} { }
+
+SkBulkGlyphMetricsAndImages::SkBulkGlyphMetricsAndImages(sk_sp<SkStrike>&& strike)
+ : fStrike{std::move(strike)} { }
+
+SkBulkGlyphMetricsAndImages::~SkBulkGlyphMetricsAndImages() = default;
+
+SkSpan<const SkGlyph*> SkBulkGlyphMetricsAndImages::glyphs(SkSpan<const SkPackedGlyphID> glyphIDs) {
+ fGlyphs.reset(glyphIDs.size());
+ return fStrike->prepareImages(glyphIDs, fGlyphs.get());
+}
+
+const SkGlyph* SkBulkGlyphMetricsAndImages::glyph(SkPackedGlyphID packedID) {
+ return this->glyphs(SkSpan<const SkPackedGlyphID>{&packedID, 1})[0];
+}
+
+const SkDescriptor& SkBulkGlyphMetricsAndImages::descriptor() const {
+ return fStrike->getDescriptor();
+}
diff --git a/gfx/skia/skia/src/core/SkStrikeSpec.h b/gfx/skia/skia/src/core/SkStrikeSpec.h
new file mode 100644
index 0000000000..dac2a5c0cf
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrikeSpec.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStrikeSpec_DEFINED
+#define SkStrikeSpec_DEFINED
+
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkSpan.h"
+#include "src/core/SkDescriptor.h"
+#include "src/text/StrikeForGPU.h"
+
+#include <tuple>
+
+#if defined(SK_GANESH) || defined(SK_GRAPHITE)
+#include "src/text/gpu/SDFTControl.h"
+
+namespace sktext::gpu {
+class StrikeCache;
+class TextStrike;
+}
+#endif
+
+class SkFont;
+class SkPaint;
+class SkStrike;
+class SkStrikeCache;
+class SkSurfaceProps;
+
+class SkStrikeSpec {
+public:
+ SkStrikeSpec(const SkDescriptor& descriptor, sk_sp<SkTypeface> typeface);
+ SkStrikeSpec(const SkStrikeSpec&);
+ SkStrikeSpec& operator=(const SkStrikeSpec&) = delete;
+
+ SkStrikeSpec(SkStrikeSpec&&);
+ SkStrikeSpec& operator=(SkStrikeSpec&&) = delete;
+
+ ~SkStrikeSpec();
+
+ // Create a strike spec for mask style cache entries.
+ static SkStrikeSpec MakeMask(
+ const SkFont& font,
+ const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps,
+ SkScalerContextFlags scalerContextFlags,
+ const SkMatrix& deviceMatrix);
+
+ // A strike for finding the max size for transforming masks. This is used to calculate the
+ // maximum dimension of a SubRun of text.
+ static SkStrikeSpec MakeTransformMask(
+ const SkFont& font,
+ const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps,
+ SkScalerContextFlags scalerContextFlags,
+ const SkMatrix& deviceMatrix);
+
+ // Create a strike spec for path style cache entries.
+ static std::tuple<SkStrikeSpec, SkScalar> MakePath(
+ const SkFont& font,
+ const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps,
+ SkScalerContextFlags scalerContextFlags);
+
+ // Create a canonical strike spec for device-less measurements.
+ static std::tuple<SkStrikeSpec, SkScalar> MakeCanonicalized(
+ const SkFont& font, const SkPaint* paint = nullptr);
+
+ // Create a strike spec without a device, and does not switch over to path for large sizes.
+ static SkStrikeSpec MakeWithNoDevice(const SkFont& font, const SkPaint* paint = nullptr);
+
+ // Make a strike spec for PDF Vector strikes
+ static SkStrikeSpec MakePDFVector(const SkTypeface& typeface, int* size);
+
+#if (defined(SK_GANESH) || defined(SK_GRAPHITE)) && !defined(SK_DISABLE_SDF_TEXT)
+ // Create a strike spec for scaled distance field text.
+ static std::tuple<SkStrikeSpec, SkScalar, sktext::gpu::SDFTMatrixRange> MakeSDFT(
+ const SkFont& font,
+ const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps,
+ const SkMatrix& deviceMatrix,
+ const SkPoint& textLocation,
+ const sktext::gpu::SDFTControl& control);
+#endif
+
+ sk_sp<sktext::StrikeForGPU> findOrCreateScopedStrike(
+ sktext::StrikeForGPUCacheInterface* cache) const;
+
+ sk_sp<SkStrike> findOrCreateStrike() const;
+
+ sk_sp<SkStrike> findOrCreateStrike(SkStrikeCache* cache) const;
+
+ std::unique_ptr<SkScalerContext> createScalerContext() const {
+ SkScalerContextEffects effects{fPathEffect.get(), fMaskFilter.get()};
+ return fTypeface->createScalerContext(effects, fAutoDescriptor.getDesc());
+ }
+
+ const SkDescriptor& descriptor() const { return *fAutoDescriptor.getDesc(); }
+ const SkTypeface& typeface() const { return *fTypeface; }
+ static bool ShouldDrawAsPath(const SkPaint& paint, const SkFont& font, const SkMatrix& matrix);
+ SkString dump() const;
+
+private:
+ SkStrikeSpec(
+ const SkFont& font,
+ const SkPaint& paint,
+ const SkSurfaceProps& surfaceProps,
+ SkScalerContextFlags scalerContextFlags,
+ const SkMatrix& deviceMatrix);
+
+ SkAutoDescriptor fAutoDescriptor;
+ sk_sp<SkMaskFilter> fMaskFilter{nullptr};
+ sk_sp<SkPathEffect> fPathEffect{nullptr};
+ sk_sp<SkTypeface> fTypeface;
+};
+
+class SkBulkGlyphMetrics {
+public:
+ explicit SkBulkGlyphMetrics(const SkStrikeSpec& spec);
+ SkSpan<const SkGlyph*> glyphs(SkSpan<const SkGlyphID> glyphIDs);
+ const SkGlyph* glyph(SkGlyphID glyphID);
+
+private:
+ inline static constexpr int kTypicalGlyphCount = 20;
+ skia_private::AutoSTArray<kTypicalGlyphCount, const SkGlyph*> fGlyphs;
+ sk_sp<SkStrike> fStrike;
+};
+
+class SkBulkGlyphMetricsAndPaths {
+public:
+ explicit SkBulkGlyphMetricsAndPaths(const SkStrikeSpec& spec);
+ explicit SkBulkGlyphMetricsAndPaths(sk_sp<SkStrike>&& strike);
+ ~SkBulkGlyphMetricsAndPaths();
+ SkSpan<const SkGlyph*> glyphs(SkSpan<const SkGlyphID> glyphIDs);
+ const SkGlyph* glyph(SkGlyphID glyphID);
+ void findIntercepts(const SkScalar bounds[2], SkScalar scale, SkScalar xPos,
+ const SkGlyph* glyph, SkScalar* array, int* count);
+
+private:
+ inline static constexpr int kTypicalGlyphCount = 20;
+ skia_private::AutoSTArray<kTypicalGlyphCount, const SkGlyph*> fGlyphs;
+ sk_sp<SkStrike> fStrike;
+};
+
+class SkBulkGlyphMetricsAndDrawables {
+public:
+ explicit SkBulkGlyphMetricsAndDrawables(const SkStrikeSpec& spec);
+ explicit SkBulkGlyphMetricsAndDrawables(sk_sp<SkStrike>&& strike);
+ ~SkBulkGlyphMetricsAndDrawables();
+ SkSpan<const SkGlyph*> glyphs(SkSpan<const SkGlyphID> glyphIDs);
+ const SkGlyph* glyph(SkGlyphID glyphID);
+
+private:
+ inline static constexpr int kTypicalGlyphCount = 20;
+ skia_private::AutoSTArray<kTypicalGlyphCount, const SkGlyph*> fGlyphs;
+ sk_sp<SkStrike> fStrike;
+};
+
+class SkBulkGlyphMetricsAndImages {
+public:
+ explicit SkBulkGlyphMetricsAndImages(const SkStrikeSpec& spec);
+ explicit SkBulkGlyphMetricsAndImages(sk_sp<SkStrike>&& strike);
+ ~SkBulkGlyphMetricsAndImages();
+ SkSpan<const SkGlyph*> glyphs(SkSpan<const SkPackedGlyphID> packedIDs);
+ const SkGlyph* glyph(SkPackedGlyphID packedID);
+ const SkDescriptor& descriptor() const;
+
+private:
+ inline static constexpr int kTypicalGlyphCount = 64;
+ skia_private::AutoSTArray<kTypicalGlyphCount, const SkGlyph*> fGlyphs;
+ sk_sp<SkStrike> fStrike;
+};
+
+#endif // SkStrikeSpec_DEFINED
diff --git a/gfx/skia/skia/src/core/SkString.cpp b/gfx/skia/skia/src/core/SkString.cpp
new file mode 100644
index 0000000000..64ef678469
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkString.cpp
@@ -0,0 +1,630 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkString.h"
+
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkTPin.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkSafeMath.h"
+#include "src/base/SkUTF.h"
+#include "src/base/SkUtils.h"
+
+#include <algorithm>
+#include <cstdio>
+#include <cstring>
+#include <new>
+#include <string_view>
+#include <utility>
+
+// number of bytes (on the stack) to receive the printf result
+static const size_t kBufferSize = 1024;
+
+struct StringBuffer {
+ char* fText;
+ int fLength;
+};
+
+template <int SIZE>
+static StringBuffer apply_format_string(const char* format, va_list args, char (&stackBuffer)[SIZE],
+ SkString* heapBuffer) SK_PRINTF_LIKE(1, 0);
+
+template <int SIZE>
+static StringBuffer apply_format_string(const char* format, va_list args, char (&stackBuffer)[SIZE],
+ SkString* heapBuffer) {
+ // First, attempt to print directly to the stack buffer.
+ va_list argsCopy;
+ va_copy(argsCopy, args);
+ int outLength = std::vsnprintf(stackBuffer, SIZE, format, args);
+ if (outLength < 0) {
+ SkDebugf("SkString: vsnprintf reported error.");
+ va_end(argsCopy);
+ return {stackBuffer, 0};
+ }
+ if (outLength < SIZE) {
+ va_end(argsCopy);
+ return {stackBuffer, outLength};
+ }
+
+ // Our text was too long to fit on the stack! However, we now know how much space we need to
+ // format it. Format the string into our heap buffer. `set` automatically reserves an extra
+ // byte at the end of the buffer for a null terminator, so we don't need to add one here.
+ heapBuffer->set(nullptr, outLength);
+ char* heapBufferDest = heapBuffer->data();
+ SkDEBUGCODE(int checkLength =) std::vsnprintf(heapBufferDest, outLength + 1, format, argsCopy);
+ SkASSERT(checkLength == outLength);
+ va_end(argsCopy);
+ return {heapBufferDest, outLength};
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkStrEndsWith(const char string[], const char suffixStr[]) {
+ SkASSERT(string);
+ SkASSERT(suffixStr);
+ size_t strLen = strlen(string);
+ size_t suffixLen = strlen(suffixStr);
+ return strLen >= suffixLen &&
+ !strncmp(string + strLen - suffixLen, suffixStr, suffixLen);
+}
+
+bool SkStrEndsWith(const char string[], const char suffixChar) {
+ SkASSERT(string);
+ size_t strLen = strlen(string);
+ if (0 == strLen) {
+ return false;
+ } else {
+ return (suffixChar == string[strLen-1]);
+ }
+}
+
+int SkStrStartsWithOneOf(const char string[], const char prefixes[]) {
+ int index = 0;
+ do {
+ const char* limit = strchr(prefixes, '\0');
+ if (!strncmp(string, prefixes, limit - prefixes)) {
+ return index;
+ }
+ prefixes = limit + 1;
+ index++;
+ } while (prefixes[0]);
+ return -1;
+}
+
+char* SkStrAppendU32(char string[], uint32_t dec) {
+ SkDEBUGCODE(char* start = string;)
+
+ char buffer[kSkStrAppendU32_MaxSize];
+ char* p = buffer + sizeof(buffer);
+
+ do {
+ *--p = SkToU8('0' + dec % 10);
+ dec /= 10;
+ } while (dec != 0);
+
+ SkASSERT(p >= buffer);
+ size_t cp_len = buffer + sizeof(buffer) - p;
+ memcpy(string, p, cp_len);
+ string += cp_len;
+
+ SkASSERT(string - start <= kSkStrAppendU32_MaxSize);
+ return string;
+}
+
+char* SkStrAppendS32(char string[], int32_t dec) {
+ uint32_t udec = dec;
+ if (dec < 0) {
+ *string++ = '-';
+ udec = ~udec + 1; // udec = -udec, but silences some warnings that are trying to be helpful
+ }
+ return SkStrAppendU32(string, udec);
+}
+
+char* SkStrAppendU64(char string[], uint64_t dec, int minDigits) {
+ SkDEBUGCODE(char* start = string;)
+
+ char buffer[kSkStrAppendU64_MaxSize];
+ char* p = buffer + sizeof(buffer);
+
+ do {
+ *--p = SkToU8('0' + (int32_t) (dec % 10));
+ dec /= 10;
+ minDigits--;
+ } while (dec != 0);
+
+ while (minDigits > 0) {
+ *--p = '0';
+ minDigits--;
+ }
+
+ SkASSERT(p >= buffer);
+ size_t cp_len = buffer + sizeof(buffer) - p;
+ memcpy(string, p, cp_len);
+ string += cp_len;
+
+ SkASSERT(string - start <= kSkStrAppendU64_MaxSize);
+ return string;
+}
+
+char* SkStrAppendS64(char string[], int64_t dec, int minDigits) {
+ uint64_t udec = dec;
+ if (dec < 0) {
+ *string++ = '-';
+ udec = ~udec + 1; // udec = -udec, but silences some warnings that are trying to be helpful
+ }
+ return SkStrAppendU64(string, udec, minDigits);
+}
+
+char* SkStrAppendScalar(char string[], SkScalar value) {
+ // Handle infinity and NaN ourselves to ensure consistent cross-platform results.
+ // (e.g.: `inf` versus `1.#INF00`, `nan` versus `-nan` for high-bit-set NaNs)
+ if (SkScalarIsNaN(value)) {
+ strcpy(string, "nan");
+ return string + 3;
+ }
+ if (!SkScalarIsFinite(value)) {
+ if (value > 0) {
+ strcpy(string, "inf");
+ return string + 3;
+ } else {
+ strcpy(string, "-inf");
+ return string + 4;
+ }
+ }
+
+ // since floats have at most 8 significant digits, we limit our %g to that.
+ static const char gFormat[] = "%.8g";
+ // make it 1 larger for the terminating 0
+ char buffer[kSkStrAppendScalar_MaxSize + 1];
+ int len = snprintf(buffer, sizeof(buffer), gFormat, value);
+ memcpy(string, buffer, len);
+ SkASSERT(len <= kSkStrAppendScalar_MaxSize);
+ return string + len;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+const SkString::Rec SkString::gEmptyRec(0, 0);
+
+#define SizeOfRec() (gEmptyRec.data() - (const char*)&gEmptyRec)
+
+static uint32_t trim_size_t_to_u32(size_t value) {
+ if (sizeof(size_t) > sizeof(uint32_t)) {
+ if (value > UINT32_MAX) {
+ value = UINT32_MAX;
+ }
+ }
+ return (uint32_t)value;
+}
+
+static size_t check_add32(size_t base, size_t extra) {
+ SkASSERT(base <= UINT32_MAX);
+ if (sizeof(size_t) > sizeof(uint32_t)) {
+ if (base + extra > UINT32_MAX) {
+ extra = UINT32_MAX - base;
+ }
+ }
+ return extra;
+}
+
+sk_sp<SkString::Rec> SkString::Rec::Make(const char text[], size_t len) {
+ if (0 == len) {
+ return sk_sp<SkString::Rec>(const_cast<Rec*>(&gEmptyRec));
+ }
+
+ SkSafeMath safe;
+ // We store a 32bit version of the length
+ uint32_t stringLen = safe.castTo<uint32_t>(len);
+ // Add SizeOfRec() for our overhead and 1 for null-termination
+ size_t allocationSize = safe.add(len, SizeOfRec() + sizeof(char));
+ // Align up to a multiple of 4
+ allocationSize = safe.alignUp(allocationSize, 4);
+
+ SkASSERT_RELEASE(safe.ok());
+
+ void* storage = ::operator new (allocationSize);
+ sk_sp<Rec> rec(new (storage) Rec(stringLen, 1));
+ if (text) {
+ memcpy(rec->data(), text, len);
+ }
+ rec->data()[len] = 0;
+ return rec;
+}
+
+void SkString::Rec::ref() const {
+ if (this == &SkString::gEmptyRec) {
+ return;
+ }
+ SkAssertResult(this->fRefCnt.fetch_add(+1, std::memory_order_relaxed));
+}
+
+void SkString::Rec::unref() const {
+ if (this == &SkString::gEmptyRec) {
+ return;
+ }
+ int32_t oldRefCnt = this->fRefCnt.fetch_add(-1, std::memory_order_acq_rel);
+ SkASSERT(oldRefCnt);
+ if (1 == oldRefCnt) {
+ delete this;
+ }
+}
+
+bool SkString::Rec::unique() const {
+ return fRefCnt.load(std::memory_order_acquire) == 1;
+}
+
+#ifdef SK_DEBUG
+int32_t SkString::Rec::getRefCnt() const {
+ return fRefCnt.load(std::memory_order_relaxed);
+}
+
+const SkString& SkString::validate() const {
+ // make sure no one has written over our global
+ SkASSERT(0 == gEmptyRec.fLength);
+ SkASSERT(0 == gEmptyRec.getRefCnt());
+ SkASSERT(0 == gEmptyRec.data()[0]);
+
+ if (fRec.get() != &gEmptyRec) {
+ SkASSERT(fRec->fLength > 0);
+ SkASSERT(fRec->getRefCnt() > 0);
+ SkASSERT(0 == fRec->data()[fRec->fLength]);
+ }
+ return *this;
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkString::SkString() : fRec(const_cast<Rec*>(&gEmptyRec)) {
+}
+
+SkString::SkString(size_t len) {
+ fRec = Rec::Make(nullptr, len);
+}
+
+SkString::SkString(const char text[]) {
+ size_t len = text ? strlen(text) : 0;
+
+ fRec = Rec::Make(text, len);
+}
+
+SkString::SkString(const char text[], size_t len) {
+ fRec = Rec::Make(text, len);
+}
+
+SkString::SkString(const SkString& src) : fRec(src.validate().fRec) {}
+
+SkString::SkString(SkString&& src) : fRec(std::move(src.validate().fRec)) {
+ src.fRec.reset(const_cast<Rec*>(&gEmptyRec));
+}
+
+SkString::SkString(const std::string& src) {
+ fRec = Rec::Make(src.c_str(), src.size());
+}
+
+SkString::SkString(std::string_view src) {
+ fRec = Rec::Make(src.data(), src.length());
+}
+
+SkString::~SkString() {
+ this->validate();
+}
+
+bool SkString::equals(const SkString& src) const {
+ return fRec == src.fRec || this->equals(src.c_str(), src.size());
+}
+
+bool SkString::equals(const char text[]) const {
+ return this->equals(text, text ? strlen(text) : 0);
+}
+
+bool SkString::equals(const char text[], size_t len) const {
+ SkASSERT(len == 0 || text != nullptr);
+
+ return fRec->fLength == len && !sk_careful_memcmp(fRec->data(), text, len);
+}
+
+SkString& SkString::operator=(const SkString& src) {
+ this->validate();
+ fRec = src.fRec; // sk_sp<Rec>::operator=(const sk_sp<Ref>&) checks for self-assignment.
+ return *this;
+}
+
+SkString& SkString::operator=(SkString&& src) {
+ this->validate();
+
+ if (fRec != src.fRec) {
+ this->swap(src);
+ }
+ return *this;
+}
+
+SkString& SkString::operator=(const char text[]) {
+ this->validate();
+ return *this = SkString(text);
+}
+
+void SkString::reset() {
+ this->validate();
+ fRec.reset(const_cast<Rec*>(&gEmptyRec));
+}
+
+char* SkString::data() {
+ this->validate();
+
+ if (fRec->fLength) {
+ if (!fRec->unique()) {
+ fRec = Rec::Make(fRec->data(), fRec->fLength);
+ }
+ }
+ return fRec->data();
+}
+
+void SkString::resize(size_t len) {
+ len = trim_size_t_to_u32(len);
+ if (0 == len) {
+ this->reset();
+ } else if (fRec->unique() && ((len >> 2) <= (fRec->fLength >> 2))) {
+ // Use less of the buffer we have without allocating a smaller one.
+ char* p = this->data();
+ p[len] = '\0';
+ fRec->fLength = SkToU32(len);
+ } else {
+ SkString newString(len);
+ char* dest = newString.data();
+ int copyLen = std::min<uint32_t>(len, this->size());
+ memcpy(dest, this->c_str(), copyLen);
+ dest[copyLen] = '\0';
+ this->swap(newString);
+ }
+}
+
+void SkString::set(const char text[]) {
+ this->set(text, text ? strlen(text) : 0);
+}
+
+void SkString::set(const char text[], size_t len) {
+ len = trim_size_t_to_u32(len);
+ if (0 == len) {
+ this->reset();
+ } else if (fRec->unique() && ((len >> 2) <= (fRec->fLength >> 2))) {
+ // Use less of the buffer we have without allocating a smaller one.
+ char* p = this->data();
+ if (text) {
+ memcpy(p, text, len);
+ }
+ p[len] = '\0';
+ fRec->fLength = SkToU32(len);
+ } else {
+ SkString tmp(text, len);
+ this->swap(tmp);
+ }
+}
+
+void SkString::insert(size_t offset, const char text[]) {
+ this->insert(offset, text, text ? strlen(text) : 0);
+}
+
+void SkString::insert(size_t offset, const char text[], size_t len) {
+ if (len) {
+ size_t length = fRec->fLength;
+ if (offset > length) {
+ offset = length;
+ }
+
+ // Check if length + len exceeds 32bits, we trim len
+ len = check_add32(length, len);
+ if (0 == len) {
+ return;
+ }
+
+ /* If we're the only owner, and we have room in our allocation for the insert,
+ do it in place, rather than allocating a new buffer.
+
+ To know we have room, compare the allocated sizes
+ beforeAlloc = SkAlign4(length + 1)
+ afterAlloc = SkAligh4(length + 1 + len)
+ but SkAlign4(x) is (x + 3) >> 2 << 2
+ which is equivalent for testing to (length + 1 + 3) >> 2 == (length + 1 + 3 + len) >> 2
+ and we can then eliminate the +1+3 since that doesn't affec the answer
+ */
+ if (fRec->unique() && (length >> 2) == ((length + len) >> 2)) {
+ char* dst = this->data();
+
+ if (offset < length) {
+ memmove(dst + offset + len, dst + offset, length - offset);
+ }
+ memcpy(dst + offset, text, len);
+
+ dst[length + len] = 0;
+ fRec->fLength = SkToU32(length + len);
+ } else {
+ /* Seems we should use realloc here, since that is safe if it fails
+ (we have the original data), and might be faster than alloc/copy/free.
+ */
+ SkString tmp(fRec->fLength + len);
+ char* dst = tmp.data();
+
+ if (offset > 0) {
+ memcpy(dst, fRec->data(), offset);
+ }
+ memcpy(dst + offset, text, len);
+ if (offset < fRec->fLength) {
+ memcpy(dst + offset + len, fRec->data() + offset,
+ fRec->fLength - offset);
+ }
+
+ this->swap(tmp);
+ }
+ }
+}
+
+void SkString::insertUnichar(size_t offset, SkUnichar uni) {
+ char buffer[SkUTF::kMaxBytesInUTF8Sequence];
+ size_t len = SkUTF::ToUTF8(uni, buffer);
+
+ if (len) {
+ this->insert(offset, buffer, len);
+ }
+}
+
+void SkString::insertS32(size_t offset, int32_t dec) {
+ char buffer[kSkStrAppendS32_MaxSize];
+ char* stop = SkStrAppendS32(buffer, dec);
+ this->insert(offset, buffer, stop - buffer);
+}
+
+void SkString::insertS64(size_t offset, int64_t dec, int minDigits) {
+ char buffer[kSkStrAppendS64_MaxSize];
+ char* stop = SkStrAppendS64(buffer, dec, minDigits);
+ this->insert(offset, buffer, stop - buffer);
+}
+
+void SkString::insertU32(size_t offset, uint32_t dec) {
+ char buffer[kSkStrAppendU32_MaxSize];
+ char* stop = SkStrAppendU32(buffer, dec);
+ this->insert(offset, buffer, stop - buffer);
+}
+
+void SkString::insertU64(size_t offset, uint64_t dec, int minDigits) {
+ char buffer[kSkStrAppendU64_MaxSize];
+ char* stop = SkStrAppendU64(buffer, dec, minDigits);
+ this->insert(offset, buffer, stop - buffer);
+}
+
+void SkString::insertHex(size_t offset, uint32_t hex, int minDigits) {
+ minDigits = SkTPin(minDigits, 0, 8);
+
+ char buffer[8];
+ char* p = buffer + sizeof(buffer);
+
+ do {
+ *--p = SkHexadecimalDigits::gUpper[hex & 0xF];
+ hex >>= 4;
+ minDigits -= 1;
+ } while (hex != 0);
+
+ while (--minDigits >= 0) {
+ *--p = '0';
+ }
+
+ SkASSERT(p >= buffer);
+ this->insert(offset, p, buffer + sizeof(buffer) - p);
+}
+
+void SkString::insertScalar(size_t offset, SkScalar value) {
+ char buffer[kSkStrAppendScalar_MaxSize];
+ char* stop = SkStrAppendScalar(buffer, value);
+ this->insert(offset, buffer, stop - buffer);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkString::printf(const char format[], ...) {
+ va_list args;
+ va_start(args, format);
+ this->printVAList(format, args);
+ va_end(args);
+}
+
+void SkString::printVAList(const char format[], va_list args) {
+ char stackBuffer[kBufferSize];
+ StringBuffer result = apply_format_string(format, args, stackBuffer, this);
+
+ if (result.fText == stackBuffer) {
+ this->set(result.fText, result.fLength);
+ }
+}
+
+void SkString::appendf(const char format[], ...) {
+ va_list args;
+ va_start(args, format);
+ this->appendVAList(format, args);
+ va_end(args);
+}
+
+void SkString::appendVAList(const char format[], va_list args) {
+ if (this->isEmpty()) {
+ this->printVAList(format, args);
+ return;
+ }
+
+ SkString overflow;
+ char stackBuffer[kBufferSize];
+ StringBuffer result = apply_format_string(format, args, stackBuffer, &overflow);
+
+ this->append(result.fText, result.fLength);
+}
+
+void SkString::prependf(const char format[], ...) {
+ va_list args;
+ va_start(args, format);
+ this->prependVAList(format, args);
+ va_end(args);
+}
+
+void SkString::prependVAList(const char format[], va_list args) {
+ if (this->isEmpty()) {
+ this->printVAList(format, args);
+ return;
+ }
+
+ SkString overflow;
+ char stackBuffer[kBufferSize];
+ StringBuffer result = apply_format_string(format, args, stackBuffer, &overflow);
+
+ this->prepend(result.fText, result.fLength);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkString::remove(size_t offset, size_t length) {
+ size_t size = this->size();
+
+ if (offset < size) {
+ if (length > size - offset) {
+ length = size - offset;
+ }
+ SkASSERT(length <= size);
+ SkASSERT(offset <= size - length);
+ if (length > 0) {
+ SkString tmp(size - length);
+ char* dst = tmp.data();
+ const char* src = this->c_str();
+
+ if (offset) {
+ memcpy(dst, src, offset);
+ }
+ size_t tail = size - (offset + length);
+ if (tail) {
+ memcpy(dst + offset, src + (offset + length), tail);
+ }
+ SkASSERT(dst[tmp.size()] == 0);
+ this->swap(tmp);
+ }
+ }
+}
+
+void SkString::swap(SkString& other) {
+ this->validate();
+ other.validate();
+
+ using std::swap;
+ swap(fRec, other.fRec);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkString SkStringPrintf(const char* format, ...) {
+ SkString formattedOutput;
+ va_list args;
+ va_start(args, format);
+ formattedOutput.printVAList(format, args);
+ va_end(args);
+ return formattedOutput;
+}
diff --git a/gfx/skia/skia/src/core/SkStringUtils.cpp b/gfx/skia/skia/src/core/SkStringUtils.cpp
new file mode 100644
index 0000000000..cfbe7d5563
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStringUtils.cpp
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkString.h"
+#include "include/private/base/SkTArray.h"
+#include "src/base/SkUTF.h"
+#include "src/core/SkStringUtils.h"
+
+void SkAppendScalar(SkString* str, SkScalar value, SkScalarAsStringType asType) {
+ switch (asType) {
+ case kHex_SkScalarAsStringType:
+ str->appendf("SkBits2Float(0x%08x)", SkFloat2Bits(value));
+ break;
+ case kDec_SkScalarAsStringType: {
+ SkString tmp;
+ tmp.printf("%.9g", value);
+ if (tmp.contains('.')) {
+ tmp.appendUnichar('f');
+ }
+ str->append(tmp);
+ break;
+ }
+ }
+}
+
+SkString SkTabString(const SkString& string, int tabCnt) {
+ if (tabCnt <= 0) {
+ return string;
+ }
+ SkString tabs;
+ for (int i = 0; i < tabCnt; ++i) {
+ tabs.append("\t");
+ }
+ SkString result;
+ static const char newline[] = "\n";
+ const char* input = string.c_str();
+ int nextNL = SkStrFind(input, newline);
+ while (nextNL >= 0) {
+ if (nextNL > 0) {
+ result.append(tabs);
+ }
+ result.append(input, nextNL + 1);
+ input += nextNL + 1;
+ nextNL = SkStrFind(input, newline);
+ }
+ if (*input != '\0') {
+ result.append(tabs);
+ result.append(input);
+ }
+ return result;
+}
+
+SkString SkStringFromUTF16(const uint16_t* src, size_t count) {
+ SkString ret;
+ const uint16_t* stop = src + count;
+ if (count > 0) {
+ SkASSERT(src);
+ size_t n = 0;
+ const uint16_t* end = src + count;
+ for (const uint16_t* ptr = src; ptr < end;) {
+ const uint16_t* last = ptr;
+ SkUnichar u = SkUTF::NextUTF16(&ptr, stop);
+ size_t s = SkUTF::ToUTF8(u);
+ if (n > UINT32_MAX - s) {
+ end = last; // truncate input string
+ break;
+ }
+ n += s;
+ }
+ ret = SkString(n);
+ char* out = ret.data();
+ for (const uint16_t* ptr = src; ptr < end;) {
+ out += SkUTF::ToUTF8(SkUTF::NextUTF16(&ptr, stop), out);
+ }
+ SkASSERT(out == ret.data() + n);
+ }
+ return ret;
+}
+
+void SkStrSplit(const char* str,
+ const char* delimiters,
+ SkStrSplitMode splitMode,
+ SkTArray<SkString>* out) {
+ if (splitMode == kCoalesce_SkStrSplitMode) {
+ // Skip any delimiters.
+ str += strspn(str, delimiters);
+ }
+ if (!*str) {
+ return;
+ }
+
+ while (true) {
+ // Find a token.
+ const size_t len = strcspn(str, delimiters);
+ if (splitMode == kStrict_SkStrSplitMode || len > 0) {
+ out->push_back().set(str, len);
+ str += len;
+ }
+
+ if (!*str) {
+ return;
+ }
+ if (splitMode == kCoalesce_SkStrSplitMode) {
+ // Skip any delimiters.
+ str += strspn(str, delimiters);
+ } else {
+ // Skip one delimiter.
+ str += 1;
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkStringUtils.h b/gfx/skia/skia/src/core/SkStringUtils.h
new file mode 100644
index 0000000000..1101b551de
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStringUtils.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStringUtils_DEFINED
+#define SkStringUtils_DEFINED
+
+#include "include/core/SkScalar.h"
+#include "include/core/SkString.h"
+#include "include/private/base/SkTArray.h"
+
+enum SkScalarAsStringType {
+ kDec_SkScalarAsStringType,
+ kHex_SkScalarAsStringType,
+};
+
+void SkAppendScalar(SkString*, SkScalar, SkScalarAsStringType);
+
+static inline void SkAppendScalarDec(SkString* str, SkScalar value) {
+ SkAppendScalar(str, value, kDec_SkScalarAsStringType);
+}
+
+static inline void SkAppendScalarHex(SkString* str, SkScalar value) {
+ SkAppendScalar(str, value, kHex_SkScalarAsStringType);
+}
+
+/** Indents every non-empty line of the string by tabCnt tabs */
+SkString SkTabString(const SkString& string, int tabCnt);
+
+SkString SkStringFromUTF16(const uint16_t* src, size_t count);
+
+#if defined(SK_BUILD_FOR_WIN)
+ #define SK_strcasecmp _stricmp
+#else
+ #define SK_strcasecmp strcasecmp
+#endif
+
+enum SkStrSplitMode {
+ // Strictly return all results. If the input is ",," and the separator is ',' this will return
+ // an array of three empty strings.
+ kStrict_SkStrSplitMode,
+
+ // Only nonempty results will be added to the results. Multiple separators will be
+ // coalesced. Separators at the beginning and end of the input will be ignored. If the input is
+ // ",," and the separator is ',', this will return an empty vector.
+ kCoalesce_SkStrSplitMode
+};
+
+// Split str on any characters in delimiters into out. (strtok with a non-destructive API.)
+void SkStrSplit(const char* str,
+ const char* delimiters,
+ SkStrSplitMode splitMode,
+ SkTArray<SkString>* out);
+
+inline void SkStrSplit(const char* str, const char* delimiters, SkTArray<SkString>* out) {
+ SkStrSplit(str, delimiters, kCoalesce_SkStrSplitMode, out);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkStroke.cpp b/gfx/skia/skia/src/core/SkStroke.cpp
new file mode 100644
index 0000000000..94c1ea2e82
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStroke.cpp
@@ -0,0 +1,1618 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkStrokerPriv.h"
+
+#include "include/private/base/SkMacros.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkPointPriv.h"
+
+#include <utility>
+
+enum {
+ kTangent_RecursiveLimit,
+ kCubic_RecursiveLimit,
+ kConic_RecursiveLimit,
+ kQuad_RecursiveLimit
+};
+
+// quads with extreme widths (e.g. (0,1) (1,6) (0,3) width=5e7) recurse to point of failure
+// largest seen for normal cubics : 5, 26
+// largest seen for normal quads : 11
+// 3x limits seen in practice, except for cubics (3x limit would be ~75).
+// For cubics, we never get close to 75 when running through dm. The limit of 24
+// was chosen because it's close to the peak in a count of cubic recursion depths visited
+// (define DEBUG_CUBIC_RECURSION_DEPTHS) and no diffs were produced on gold when using it.
+static const int kRecursiveLimits[] = { 5*3, 24, 11*3, 11*3 };
+
+static_assert(0 == kTangent_RecursiveLimit, "cubic_stroke_relies_on_tangent_equalling_zero");
+static_assert(1 == kCubic_RecursiveLimit, "cubic_stroke_relies_on_cubic_equalling_one");
+static_assert(std::size(kRecursiveLimits) == kQuad_RecursiveLimit + 1,
+ "recursive_limits_mismatch");
+
+#if defined SK_DEBUG && QUAD_STROKE_APPROX_EXTENDED_DEBUGGING
+ int gMaxRecursion[std::size(kRecursiveLimits)] = { 0 };
+#endif
+#ifndef DEBUG_QUAD_STROKER
+ #define DEBUG_QUAD_STROKER 0
+#endif
+
+#if DEBUG_QUAD_STROKER
+ /* Enable to show the decisions made in subdividing the curve -- helpful when the resulting
+ stroke has more than the optimal number of quadratics and lines */
+ #define STROKER_RESULT(resultType, depth, quadPts, format, ...) \
+ SkDebugf("[%d] %s " format "\n", depth, __FUNCTION__, __VA_ARGS__), \
+ SkDebugf(" " #resultType " t=(%g,%g)\n", quadPts->fStartT, quadPts->fEndT), \
+ resultType
+ #define STROKER_DEBUG_PARAMS(...) , __VA_ARGS__
+#else
+ #define STROKER_RESULT(resultType, depth, quadPts, format, ...) \
+ resultType
+ #define STROKER_DEBUG_PARAMS(...)
+#endif
+
+#ifndef DEBUG_CUBIC_RECURSION_DEPTHS
+#define DEBUG_CUBIC_RECURSION_DEPTHS 0
+#endif
+#if DEBUG_CUBIC_RECURSION_DEPTHS
+ /* Prints a histogram of recursion depths at process termination. */
+ static struct DepthHistogram {
+ inline static constexpr int kMaxDepth = 75;
+ int fCubicDepths[kMaxDepth + 1];
+
+ DepthHistogram() { memset(fCubicDepths, 0, sizeof(fCubicDepths)); }
+
+ ~DepthHistogram() {
+ SkDebugf("# times recursion terminated per depth:\n");
+ for (int i = 0; i <= kMaxDepth; i++) {
+ SkDebugf(" depth %d: %d\n", i, fCubicDepths[i]);
+ }
+ }
+
+ inline void incDepth(int depth) {
+ SkASSERT(depth >= 0 && depth <= kMaxDepth);
+ fCubicDepths[depth]++;
+ }
+ } sCubicDepthHistogram;
+
+#define DEBUG_CUBIC_RECURSION_TRACK_DEPTH(depth) sCubicDepthHistogram.incDepth(depth)
+#else
+#define DEBUG_CUBIC_RECURSION_TRACK_DEPTH(depth) (void)(depth)
+#endif
+
+static inline bool degenerate_vector(const SkVector& v) {
+ return !SkPointPriv::CanNormalize(v.fX, v.fY);
+}
+
+static bool set_normal_unitnormal(const SkPoint& before, const SkPoint& after, SkScalar scale,
+ SkScalar radius,
+ SkVector* normal, SkVector* unitNormal) {
+ if (!unitNormal->setNormalize((after.fX - before.fX) * scale,
+ (after.fY - before.fY) * scale)) {
+ return false;
+ }
+ SkPointPriv::RotateCCW(unitNormal);
+ unitNormal->scale(radius, normal);
+ return true;
+}
+
+static bool set_normal_unitnormal(const SkVector& vec,
+ SkScalar radius,
+ SkVector* normal, SkVector* unitNormal) {
+ if (!unitNormal->setNormalize(vec.fX, vec.fY)) {
+ return false;
+ }
+ SkPointPriv::RotateCCW(unitNormal);
+ unitNormal->scale(radius, normal);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct SkQuadConstruct { // The state of the quad stroke under construction.
+ SkPoint fQuad[3]; // the stroked quad parallel to the original curve
+ SkPoint fTangentStart; // a point tangent to fQuad[0]
+ SkPoint fTangentEnd; // a point tangent to fQuad[2]
+ SkScalar fStartT; // a segment of the original curve
+ SkScalar fMidT; // "
+ SkScalar fEndT; // "
+ bool fStartSet; // state to share common points across structs
+ bool fEndSet; // "
+ bool fOppositeTangents; // set if coincident tangents have opposite directions
+
+ // return false if start and end are too close to have a unique middle
+ bool init(SkScalar start, SkScalar end) {
+ fStartT = start;
+ fMidT = (start + end) * SK_ScalarHalf;
+ fEndT = end;
+ fStartSet = fEndSet = false;
+ return fStartT < fMidT && fMidT < fEndT;
+ }
+
+ bool initWithStart(SkQuadConstruct* parent) {
+ if (!init(parent->fStartT, parent->fMidT)) {
+ return false;
+ }
+ fQuad[0] = parent->fQuad[0];
+ fTangentStart = parent->fTangentStart;
+ fStartSet = true;
+ return true;
+ }
+
+ bool initWithEnd(SkQuadConstruct* parent) {
+ if (!init(parent->fMidT, parent->fEndT)) {
+ return false;
+ }
+ fQuad[2] = parent->fQuad[2];
+ fTangentEnd = parent->fTangentEnd;
+ fEndSet = true;
+ return true;
+ }
+};
+
+class SkPathStroker {
+public:
+ SkPathStroker(const SkPath& src,
+ SkScalar radius, SkScalar miterLimit, SkPaint::Cap,
+ SkPaint::Join, SkScalar resScale,
+ bool canIgnoreCenter);
+
+ bool hasOnlyMoveTo() const { return 0 == fSegmentCount; }
+ SkPoint moveToPt() const { return fFirstPt; }
+
+ void moveTo(const SkPoint&);
+ void lineTo(const SkPoint&, const SkPath::Iter* iter = nullptr);
+ void quadTo(const SkPoint&, const SkPoint&);
+ void conicTo(const SkPoint&, const SkPoint&, SkScalar weight);
+ void cubicTo(const SkPoint&, const SkPoint&, const SkPoint&);
+ void close(bool isLine) { this->finishContour(true, isLine); }
+
+ void done(SkPath* dst, bool isLine) {
+ this->finishContour(false, isLine);
+ dst->swap(fOuter);
+ }
+
+ SkScalar getResScale() const { return fResScale; }
+
+ bool isCurrentContourEmpty() const {
+ return fInner.isZeroLengthSincePoint(0) &&
+ fOuter.isZeroLengthSincePoint(fFirstOuterPtIndexInContour);
+ }
+
+private:
+ SkScalar fRadius;
+ SkScalar fInvMiterLimit;
+ SkScalar fResScale;
+ SkScalar fInvResScale;
+ SkScalar fInvResScaleSquared;
+
+ SkVector fFirstNormal, fPrevNormal, fFirstUnitNormal, fPrevUnitNormal;
+ SkPoint fFirstPt, fPrevPt; // on original path
+ SkPoint fFirstOuterPt;
+ int fFirstOuterPtIndexInContour;
+ int fSegmentCount;
+ bool fPrevIsLine;
+ bool fCanIgnoreCenter;
+
+ SkStrokerPriv::CapProc fCapper;
+ SkStrokerPriv::JoinProc fJoiner;
+
+ SkPath fInner, fOuter, fCusper; // outer is our working answer, inner is temp
+
+ enum StrokeType {
+ kOuter_StrokeType = 1, // use sign-opposite values later to flip perpendicular axis
+ kInner_StrokeType = -1
+ } fStrokeType;
+
+ enum ResultType {
+ kSplit_ResultType, // the caller should split the quad stroke in two
+ kDegenerate_ResultType, // the caller should add a line
+ kQuad_ResultType, // the caller should (continue to try to) add a quad stroke
+ };
+
+ enum ReductionType {
+ kPoint_ReductionType, // all curve points are practically identical
+ kLine_ReductionType, // the control point is on the line between the ends
+ kQuad_ReductionType, // the control point is outside the line between the ends
+ kDegenerate_ReductionType, // the control point is on the line but outside the ends
+ kDegenerate2_ReductionType, // two control points are on the line but outside ends (cubic)
+ kDegenerate3_ReductionType, // three areas of max curvature found (for cubic)
+ };
+
+ enum IntersectRayType {
+ kCtrlPt_RayType,
+ kResultType_RayType,
+ };
+
+ int fRecursionDepth; // track stack depth to abort if numerics run amok
+ bool fFoundTangents; // do less work until tangents meet (cubic)
+ bool fJoinCompleted; // previous join was not degenerate
+
+ void addDegenerateLine(const SkQuadConstruct* );
+ static ReductionType CheckConicLinear(const SkConic& , SkPoint* reduction);
+ static ReductionType CheckCubicLinear(const SkPoint cubic[4], SkPoint reduction[3],
+ const SkPoint** tanPtPtr);
+ static ReductionType CheckQuadLinear(const SkPoint quad[3], SkPoint* reduction);
+ ResultType compareQuadConic(const SkConic& , SkQuadConstruct* ) const;
+ ResultType compareQuadCubic(const SkPoint cubic[4], SkQuadConstruct* );
+ ResultType compareQuadQuad(const SkPoint quad[3], SkQuadConstruct* );
+ void conicPerpRay(const SkConic& , SkScalar t, SkPoint* tPt, SkPoint* onPt,
+ SkPoint* tangent) const;
+ void conicQuadEnds(const SkConic& , SkQuadConstruct* ) const;
+ bool conicStroke(const SkConic& , SkQuadConstruct* );
+ bool cubicMidOnLine(const SkPoint cubic[4], const SkQuadConstruct* ) const;
+ void cubicPerpRay(const SkPoint cubic[4], SkScalar t, SkPoint* tPt, SkPoint* onPt,
+ SkPoint* tangent) const;
+ void cubicQuadEnds(const SkPoint cubic[4], SkQuadConstruct* );
+ void cubicQuadMid(const SkPoint cubic[4], const SkQuadConstruct* , SkPoint* mid) const;
+ bool cubicStroke(const SkPoint cubic[4], SkQuadConstruct* );
+ void init(StrokeType strokeType, SkQuadConstruct* , SkScalar tStart, SkScalar tEnd);
+ ResultType intersectRay(SkQuadConstruct* , IntersectRayType STROKER_DEBUG_PARAMS(int) ) const;
+ bool ptInQuadBounds(const SkPoint quad[3], const SkPoint& pt) const;
+ void quadPerpRay(const SkPoint quad[3], SkScalar t, SkPoint* tPt, SkPoint* onPt,
+ SkPoint* tangent) const;
+ bool quadStroke(const SkPoint quad[3], SkQuadConstruct* );
+ void setConicEndNormal(const SkConic& ,
+ const SkVector& normalAB, const SkVector& unitNormalAB,
+ SkVector* normalBC, SkVector* unitNormalBC);
+ void setCubicEndNormal(const SkPoint cubic[4],
+ const SkVector& normalAB, const SkVector& unitNormalAB,
+ SkVector* normalCD, SkVector* unitNormalCD);
+ void setQuadEndNormal(const SkPoint quad[3],
+ const SkVector& normalAB, const SkVector& unitNormalAB,
+ SkVector* normalBC, SkVector* unitNormalBC);
+ void setRayPts(const SkPoint& tPt, SkVector* dxy, SkPoint* onPt, SkPoint* tangent) const;
+ static bool SlightAngle(SkQuadConstruct* );
+ ResultType strokeCloseEnough(const SkPoint stroke[3], const SkPoint ray[2],
+ SkQuadConstruct* STROKER_DEBUG_PARAMS(int depth) ) const;
+ ResultType tangentsMeet(const SkPoint cubic[4], SkQuadConstruct* );
+
+ void finishContour(bool close, bool isLine);
+ bool preJoinTo(const SkPoint&, SkVector* normal, SkVector* unitNormal,
+ bool isLine);
+ void postJoinTo(const SkPoint&, const SkVector& normal,
+ const SkVector& unitNormal);
+
+ void line_to(const SkPoint& currPt, const SkVector& normal);
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkPathStroker::preJoinTo(const SkPoint& currPt, SkVector* normal,
+ SkVector* unitNormal, bool currIsLine) {
+ SkASSERT(fSegmentCount >= 0);
+
+ SkScalar prevX = fPrevPt.fX;
+ SkScalar prevY = fPrevPt.fY;
+
+ if (!set_normal_unitnormal(fPrevPt, currPt, fResScale, fRadius, normal, unitNormal)) {
+ if (SkStrokerPriv::CapFactory(SkPaint::kButt_Cap) == fCapper) {
+ return false;
+ }
+ /* Square caps and round caps draw even if the segment length is zero.
+ Since the zero length segment has no direction, set the orientation
+ to upright as the default orientation */
+ normal->set(fRadius, 0);
+ unitNormal->set(1, 0);
+ }
+
+ if (fSegmentCount == 0) {
+ fFirstNormal = *normal;
+ fFirstUnitNormal = *unitNormal;
+ fFirstOuterPt.set(prevX + normal->fX, prevY + normal->fY);
+
+ fOuter.moveTo(fFirstOuterPt.fX, fFirstOuterPt.fY);
+ fInner.moveTo(prevX - normal->fX, prevY - normal->fY);
+ } else { // we have a previous segment
+ fJoiner(&fOuter, &fInner, fPrevUnitNormal, fPrevPt, *unitNormal,
+ fRadius, fInvMiterLimit, fPrevIsLine, currIsLine);
+ }
+ fPrevIsLine = currIsLine;
+ return true;
+}
+
+void SkPathStroker::postJoinTo(const SkPoint& currPt, const SkVector& normal,
+ const SkVector& unitNormal) {
+ fJoinCompleted = true;
+ fPrevPt = currPt;
+ fPrevUnitNormal = unitNormal;
+ fPrevNormal = normal;
+ fSegmentCount += 1;
+}
+
+void SkPathStroker::finishContour(bool close, bool currIsLine) {
+ if (fSegmentCount > 0) {
+ SkPoint pt;
+
+ if (close) {
+ fJoiner(&fOuter, &fInner, fPrevUnitNormal, fPrevPt,
+ fFirstUnitNormal, fRadius, fInvMiterLimit,
+ fPrevIsLine, currIsLine);
+ fOuter.close();
+
+ if (fCanIgnoreCenter) {
+ // If we can ignore the center just make sure the larger of the two paths
+ // is preserved and don't add the smaller one.
+ if (fInner.getBounds().contains(fOuter.getBounds())) {
+ fInner.swap(fOuter);
+ }
+ } else {
+ // now add fInner as its own contour
+ fInner.getLastPt(&pt);
+ fOuter.moveTo(pt.fX, pt.fY);
+ fOuter.reversePathTo(fInner);
+ fOuter.close();
+ }
+ } else { // add caps to start and end
+ // cap the end
+ fInner.getLastPt(&pt);
+ fCapper(&fOuter, fPrevPt, fPrevNormal, pt,
+ currIsLine ? &fInner : nullptr);
+ fOuter.reversePathTo(fInner);
+ // cap the start
+ fCapper(&fOuter, fFirstPt, -fFirstNormal, fFirstOuterPt,
+ fPrevIsLine ? &fInner : nullptr);
+ fOuter.close();
+ }
+ if (!fCusper.isEmpty()) {
+ fOuter.addPath(fCusper);
+ fCusper.rewind();
+ }
+ }
+ // since we may re-use fInner, we rewind instead of reset, to save on
+ // reallocating its internal storage.
+ fInner.rewind();
+ fSegmentCount = -1;
+ fFirstOuterPtIndexInContour = fOuter.countPoints();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPathStroker::SkPathStroker(const SkPath& src,
+ SkScalar radius, SkScalar miterLimit,
+ SkPaint::Cap cap, SkPaint::Join join, SkScalar resScale,
+ bool canIgnoreCenter)
+ : fRadius(radius)
+ , fResScale(resScale)
+ , fCanIgnoreCenter(canIgnoreCenter) {
+
+ /* This is only used when join is miter_join, but we initialize it here
+ so that it is always defined, to fis valgrind warnings.
+ */
+ fInvMiterLimit = 0;
+
+ if (join == SkPaint::kMiter_Join) {
+ if (miterLimit <= SK_Scalar1) {
+ join = SkPaint::kBevel_Join;
+ } else {
+ fInvMiterLimit = SkScalarInvert(miterLimit);
+ }
+ }
+ fCapper = SkStrokerPriv::CapFactory(cap);
+ fJoiner = SkStrokerPriv::JoinFactory(join);
+ fSegmentCount = -1;
+ fFirstOuterPtIndexInContour = 0;
+ fPrevIsLine = false;
+
+ // Need some estimate of how large our final result (fOuter)
+ // and our per-contour temp (fInner) will be, so we don't spend
+ // extra time repeatedly growing these arrays.
+ //
+ // 3x for result == inner + outer + join (swag)
+ // 1x for inner == 'wag' (worst contour length would be better guess)
+ fOuter.incReserve(src.countPoints() * 3);
+ fOuter.setIsVolatile(true);
+ fInner.incReserve(src.countPoints());
+ fInner.setIsVolatile(true);
+ // TODO : write a common error function used by stroking and filling
+ // The '4' below matches the fill scan converter's error term
+ fInvResScale = SkScalarInvert(resScale * 4);
+ fInvResScaleSquared = fInvResScale * fInvResScale;
+ fRecursionDepth = 0;
+}
+
+void SkPathStroker::moveTo(const SkPoint& pt) {
+ if (fSegmentCount > 0) {
+ this->finishContour(false, false);
+ }
+ fSegmentCount = 0;
+ fFirstPt = fPrevPt = pt;
+ fJoinCompleted = false;
+}
+
+void SkPathStroker::line_to(const SkPoint& currPt, const SkVector& normal) {
+ fOuter.lineTo(currPt.fX + normal.fX, currPt.fY + normal.fY);
+ fInner.lineTo(currPt.fX - normal.fX, currPt.fY - normal.fY);
+}
+
+static bool has_valid_tangent(const SkPath::Iter* iter) {
+ SkPath::Iter copy = *iter;
+ SkPath::Verb verb;
+ SkPoint pts[4];
+ while ((verb = copy.next(pts))) {
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ return false;
+ case SkPath::kLine_Verb:
+ if (pts[0] == pts[1]) {
+ continue;
+ }
+ return true;
+ case SkPath::kQuad_Verb:
+ case SkPath::kConic_Verb:
+ if (pts[0] == pts[1] && pts[0] == pts[2]) {
+ continue;
+ }
+ return true;
+ case SkPath::kCubic_Verb:
+ if (pts[0] == pts[1] && pts[0] == pts[2] && pts[0] == pts[3]) {
+ continue;
+ }
+ return true;
+ case SkPath::kClose_Verb:
+ case SkPath::kDone_Verb:
+ return false;
+ }
+ }
+ return false;
+}
+
+void SkPathStroker::lineTo(const SkPoint& currPt, const SkPath::Iter* iter) {
+ bool teenyLine = SkPointPriv::EqualsWithinTolerance(fPrevPt, currPt, SK_ScalarNearlyZero * fInvResScale);
+ if (SkStrokerPriv::CapFactory(SkPaint::kButt_Cap) == fCapper && teenyLine) {
+ return;
+ }
+ if (teenyLine && (fJoinCompleted || (iter && has_valid_tangent(iter)))) {
+ return;
+ }
+ SkVector normal, unitNormal;
+
+ if (!this->preJoinTo(currPt, &normal, &unitNormal, true)) {
+ return;
+ }
+ this->line_to(currPt, normal);
+ this->postJoinTo(currPt, normal, unitNormal);
+}
+
+void SkPathStroker::setQuadEndNormal(const SkPoint quad[3], const SkVector& normalAB,
+ const SkVector& unitNormalAB, SkVector* normalBC, SkVector* unitNormalBC) {
+ if (!set_normal_unitnormal(quad[1], quad[2], fResScale, fRadius, normalBC, unitNormalBC)) {
+ *normalBC = normalAB;
+ *unitNormalBC = unitNormalAB;
+ }
+}
+
+void SkPathStroker::setConicEndNormal(const SkConic& conic, const SkVector& normalAB,
+ const SkVector& unitNormalAB, SkVector* normalBC, SkVector* unitNormalBC) {
+ setQuadEndNormal(conic.fPts, normalAB, unitNormalAB, normalBC, unitNormalBC);
+}
+
+void SkPathStroker::setCubicEndNormal(const SkPoint cubic[4], const SkVector& normalAB,
+ const SkVector& unitNormalAB, SkVector* normalCD, SkVector* unitNormalCD) {
+ SkVector ab = cubic[1] - cubic[0];
+ SkVector cd = cubic[3] - cubic[2];
+
+ bool degenerateAB = degenerate_vector(ab);
+ bool degenerateCD = degenerate_vector(cd);
+
+ if (degenerateAB && degenerateCD) {
+ goto DEGENERATE_NORMAL;
+ }
+
+ if (degenerateAB) {
+ ab = cubic[2] - cubic[0];
+ degenerateAB = degenerate_vector(ab);
+ }
+ if (degenerateCD) {
+ cd = cubic[3] - cubic[1];
+ degenerateCD = degenerate_vector(cd);
+ }
+ if (degenerateAB || degenerateCD) {
+DEGENERATE_NORMAL:
+ *normalCD = normalAB;
+ *unitNormalCD = unitNormalAB;
+ return;
+ }
+ SkAssertResult(set_normal_unitnormal(cd, fRadius, normalCD, unitNormalCD));
+}
+
+void SkPathStroker::init(StrokeType strokeType, SkQuadConstruct* quadPts, SkScalar tStart,
+ SkScalar tEnd) {
+ fStrokeType = strokeType;
+ fFoundTangents = false;
+ quadPts->init(tStart, tEnd);
+}
+
+// returns the distance squared from the point to the line
+static SkScalar pt_to_line(const SkPoint& pt, const SkPoint& lineStart, const SkPoint& lineEnd) {
+ SkVector dxy = lineEnd - lineStart;
+ SkVector ab0 = pt - lineStart;
+ SkScalar numer = dxy.dot(ab0);
+ SkScalar denom = dxy.dot(dxy);
+ SkScalar t = sk_ieee_float_divide(numer, denom);
+ if (t >= 0 && t <= 1) {
+ SkPoint hit;
+ hit.fX = lineStart.fX * (1 - t) + lineEnd.fX * t;
+ hit.fY = lineStart.fY * (1 - t) + lineEnd.fY * t;
+ return SkPointPriv::DistanceToSqd(hit, pt);
+ } else {
+ return SkPointPriv::DistanceToSqd(pt, lineStart);
+ }
+}
+
+/* Given a cubic, determine if all four points are in a line.
+ Return true if the inner points is close to a line connecting the outermost points.
+
+ Find the outermost point by looking for the largest difference in X or Y.
+ Given the indices of the outermost points, and that outer_1 is greater than outer_2,
+ this table shows the index of the smaller of the remaining points:
+
+ outer_2
+ 0 1 2 3
+ outer_1 ----------------
+ 0 | - 2 1 1
+ 1 | - - 0 0
+ 2 | - - - 0
+ 3 | - - - -
+
+ If outer_1 == 0 and outer_2 == 1, the smaller of the remaining indices (2 and 3) is 2.
+
+ This table can be collapsed to: (1 + (2 >> outer_2)) >> outer_1
+
+ Given three indices (outer_1 outer_2 mid_1) from 0..3, the remaining index is:
+
+ mid_2 == (outer_1 ^ outer_2 ^ mid_1)
+ */
+static bool cubic_in_line(const SkPoint cubic[4]) {
+ SkScalar ptMax = -1;
+ int outer1 SK_INIT_TO_AVOID_WARNING;
+ int outer2 SK_INIT_TO_AVOID_WARNING;
+ for (int index = 0; index < 3; ++index) {
+ for (int inner = index + 1; inner < 4; ++inner) {
+ SkVector testDiff = cubic[inner] - cubic[index];
+ SkScalar testMax = std::max(SkScalarAbs(testDiff.fX), SkScalarAbs(testDiff.fY));
+ if (ptMax < testMax) {
+ outer1 = index;
+ outer2 = inner;
+ ptMax = testMax;
+ }
+ }
+ }
+ SkASSERT(outer1 >= 0 && outer1 <= 2);
+ SkASSERT(outer2 >= 1 && outer2 <= 3);
+ SkASSERT(outer1 < outer2);
+ int mid1 = (1 + (2 >> outer2)) >> outer1;
+ SkASSERT(mid1 >= 0 && mid1 <= 2);
+ SkASSERT(outer1 != mid1 && outer2 != mid1);
+ int mid2 = outer1 ^ outer2 ^ mid1;
+ SkASSERT(mid2 >= 1 && mid2 <= 3);
+ SkASSERT(mid2 != outer1 && mid2 != outer2 && mid2 != mid1);
+ SkASSERT(((1 << outer1) | (1 << outer2) | (1 << mid1) | (1 << mid2)) == 0x0f);
+ SkScalar lineSlop = ptMax * ptMax * 0.00001f; // this multiplier is pulled out of the air
+ return pt_to_line(cubic[mid1], cubic[outer1], cubic[outer2]) <= lineSlop
+ && pt_to_line(cubic[mid2], cubic[outer1], cubic[outer2]) <= lineSlop;
+}
+
+/* Given quad, see if all there points are in a line.
+ Return true if the inside point is close to a line connecting the outermost points.
+
+ Find the outermost point by looking for the largest difference in X or Y.
+ Since the XOR of the indices is 3 (0 ^ 1 ^ 2)
+ the missing index equals: outer_1 ^ outer_2 ^ 3
+ */
+static bool quad_in_line(const SkPoint quad[3]) {
+ SkScalar ptMax = -1;
+ int outer1 SK_INIT_TO_AVOID_WARNING;
+ int outer2 SK_INIT_TO_AVOID_WARNING;
+ for (int index = 0; index < 2; ++index) {
+ for (int inner = index + 1; inner < 3; ++inner) {
+ SkVector testDiff = quad[inner] - quad[index];
+ SkScalar testMax = std::max(SkScalarAbs(testDiff.fX), SkScalarAbs(testDiff.fY));
+ if (ptMax < testMax) {
+ outer1 = index;
+ outer2 = inner;
+ ptMax = testMax;
+ }
+ }
+ }
+ SkASSERT(outer1 >= 0 && outer1 <= 1);
+ SkASSERT(outer2 >= 1 && outer2 <= 2);
+ SkASSERT(outer1 < outer2);
+ int mid = outer1 ^ outer2 ^ 3;
+ const float kCurvatureSlop = 0.000005f; // this multiplier is pulled out of the air
+ SkScalar lineSlop = ptMax * ptMax * kCurvatureSlop;
+ return pt_to_line(quad[mid], quad[outer1], quad[outer2]) <= lineSlop;
+}
+
+static bool conic_in_line(const SkConic& conic) {
+ return quad_in_line(conic.fPts);
+}
+
+SkPathStroker::ReductionType SkPathStroker::CheckCubicLinear(const SkPoint cubic[4],
+ SkPoint reduction[3], const SkPoint** tangentPtPtr) {
+ bool degenerateAB = degenerate_vector(cubic[1] - cubic[0]);
+ bool degenerateBC = degenerate_vector(cubic[2] - cubic[1]);
+ bool degenerateCD = degenerate_vector(cubic[3] - cubic[2]);
+ if (degenerateAB & degenerateBC & degenerateCD) {
+ return kPoint_ReductionType;
+ }
+ if (degenerateAB + degenerateBC + degenerateCD == 2) {
+ return kLine_ReductionType;
+ }
+ if (!cubic_in_line(cubic)) {
+ *tangentPtPtr = degenerateAB ? &cubic[2] : &cubic[1];
+ return kQuad_ReductionType;
+ }
+ SkScalar tValues[3];
+ int count = SkFindCubicMaxCurvature(cubic, tValues);
+ int rCount = 0;
+ // Now loop over the t-values, and reject any that evaluate to either end-point
+ for (int index = 0; index < count; ++index) {
+ SkScalar t = tValues[index];
+ if (0 >= t || t >= 1) {
+ continue;
+ }
+ SkEvalCubicAt(cubic, t, &reduction[rCount], nullptr, nullptr);
+ if (reduction[rCount] != cubic[0] && reduction[rCount] != cubic[3]) {
+ ++rCount;
+ }
+ }
+ if (rCount == 0) {
+ return kLine_ReductionType;
+ }
+ static_assert(kQuad_ReductionType + 1 == kDegenerate_ReductionType, "enum_out_of_whack");
+ static_assert(kQuad_ReductionType + 2 == kDegenerate2_ReductionType, "enum_out_of_whack");
+ static_assert(kQuad_ReductionType + 3 == kDegenerate3_ReductionType, "enum_out_of_whack");
+
+ return (ReductionType) (kQuad_ReductionType + rCount);
+}
+
+SkPathStroker::ReductionType SkPathStroker::CheckConicLinear(const SkConic& conic,
+ SkPoint* reduction) {
+ bool degenerateAB = degenerate_vector(conic.fPts[1] - conic.fPts[0]);
+ bool degenerateBC = degenerate_vector(conic.fPts[2] - conic.fPts[1]);
+ if (degenerateAB & degenerateBC) {
+ return kPoint_ReductionType;
+ }
+ if (degenerateAB | degenerateBC) {
+ return kLine_ReductionType;
+ }
+ if (!conic_in_line(conic)) {
+ return kQuad_ReductionType;
+ }
+ // SkFindConicMaxCurvature would be a better solution, once we know how to
+ // implement it. Quad curvature is a reasonable substitute
+ SkScalar t = SkFindQuadMaxCurvature(conic.fPts);
+ if (0 == t) {
+ return kLine_ReductionType;
+ }
+ conic.evalAt(t, reduction, nullptr);
+ return kDegenerate_ReductionType;
+}
+
+SkPathStroker::ReductionType SkPathStroker::CheckQuadLinear(const SkPoint quad[3],
+ SkPoint* reduction) {
+ bool degenerateAB = degenerate_vector(quad[1] - quad[0]);
+ bool degenerateBC = degenerate_vector(quad[2] - quad[1]);
+ if (degenerateAB & degenerateBC) {
+ return kPoint_ReductionType;
+ }
+ if (degenerateAB | degenerateBC) {
+ return kLine_ReductionType;
+ }
+ if (!quad_in_line(quad)) {
+ return kQuad_ReductionType;
+ }
+ SkScalar t = SkFindQuadMaxCurvature(quad);
+ if (0 == t || 1 == t) {
+ return kLine_ReductionType;
+ }
+ *reduction = SkEvalQuadAt(quad, t);
+ return kDegenerate_ReductionType;
+}
+
+void SkPathStroker::conicTo(const SkPoint& pt1, const SkPoint& pt2, SkScalar weight) {
+ const SkConic conic(fPrevPt, pt1, pt2, weight);
+ SkPoint reduction;
+ ReductionType reductionType = CheckConicLinear(conic, &reduction);
+ if (kPoint_ReductionType == reductionType) {
+ /* If the stroke consists of a moveTo followed by a degenerate curve, treat it
+ as if it were followed by a zero-length line. Lines without length
+ can have square and round end caps. */
+ this->lineTo(pt2);
+ return;
+ }
+ if (kLine_ReductionType == reductionType) {
+ this->lineTo(pt2);
+ return;
+ }
+ if (kDegenerate_ReductionType == reductionType) {
+ this->lineTo(reduction);
+ SkStrokerPriv::JoinProc saveJoiner = fJoiner;
+ fJoiner = SkStrokerPriv::JoinFactory(SkPaint::kRound_Join);
+ this->lineTo(pt2);
+ fJoiner = saveJoiner;
+ return;
+ }
+ SkASSERT(kQuad_ReductionType == reductionType);
+ SkVector normalAB, unitAB, normalBC, unitBC;
+ if (!this->preJoinTo(pt1, &normalAB, &unitAB, false)) {
+ this->lineTo(pt2);
+ return;
+ }
+ SkQuadConstruct quadPts;
+ this->init(kOuter_StrokeType, &quadPts, 0, 1);
+ (void) this->conicStroke(conic, &quadPts);
+ this->init(kInner_StrokeType, &quadPts, 0, 1);
+ (void) this->conicStroke(conic, &quadPts);
+ this->setConicEndNormal(conic, normalAB, unitAB, &normalBC, &unitBC);
+ this->postJoinTo(pt2, normalBC, unitBC);
+}
+
+void SkPathStroker::quadTo(const SkPoint& pt1, const SkPoint& pt2) {
+ const SkPoint quad[3] = { fPrevPt, pt1, pt2 };
+ SkPoint reduction;
+ ReductionType reductionType = CheckQuadLinear(quad, &reduction);
+ if (kPoint_ReductionType == reductionType) {
+ /* If the stroke consists of a moveTo followed by a degenerate curve, treat it
+ as if it were followed by a zero-length line. Lines without length
+ can have square and round end caps. */
+ this->lineTo(pt2);
+ return;
+ }
+ if (kLine_ReductionType == reductionType) {
+ this->lineTo(pt2);
+ return;
+ }
+ if (kDegenerate_ReductionType == reductionType) {
+ this->lineTo(reduction);
+ SkStrokerPriv::JoinProc saveJoiner = fJoiner;
+ fJoiner = SkStrokerPriv::JoinFactory(SkPaint::kRound_Join);
+ this->lineTo(pt2);
+ fJoiner = saveJoiner;
+ return;
+ }
+ SkASSERT(kQuad_ReductionType == reductionType);
+ SkVector normalAB, unitAB, normalBC, unitBC;
+ if (!this->preJoinTo(pt1, &normalAB, &unitAB, false)) {
+ this->lineTo(pt2);
+ return;
+ }
+ SkQuadConstruct quadPts;
+ this->init(kOuter_StrokeType, &quadPts, 0, 1);
+ (void) this->quadStroke(quad, &quadPts);
+ this->init(kInner_StrokeType, &quadPts, 0, 1);
+ (void) this->quadStroke(quad, &quadPts);
+ this->setQuadEndNormal(quad, normalAB, unitAB, &normalBC, &unitBC);
+
+ this->postJoinTo(pt2, normalBC, unitBC);
+}
+
+// Given a point on the curve and its derivative, scale the derivative by the radius, and
+// compute the perpendicular point and its tangent.
+void SkPathStroker::setRayPts(const SkPoint& tPt, SkVector* dxy, SkPoint* onPt,
+ SkPoint* tangent) const {
+ if (!dxy->setLength(fRadius)) {
+ dxy->set(fRadius, 0);
+ }
+ SkScalar axisFlip = SkIntToScalar(fStrokeType); // go opposite ways for outer, inner
+ onPt->fX = tPt.fX + axisFlip * dxy->fY;
+ onPt->fY = tPt.fY - axisFlip * dxy->fX;
+ if (tangent) {
+ tangent->fX = onPt->fX + dxy->fX;
+ tangent->fY = onPt->fY + dxy->fY;
+ }
+}
+
+// Given a conic and t, return the point on curve, its perpendicular, and the perpendicular tangent.
+// Returns false if the perpendicular could not be computed (because the derivative collapsed to 0)
+void SkPathStroker::conicPerpRay(const SkConic& conic, SkScalar t, SkPoint* tPt, SkPoint* onPt,
+ SkPoint* tangent) const {
+ SkVector dxy;
+ conic.evalAt(t, tPt, &dxy);
+ if (dxy.fX == 0 && dxy.fY == 0) {
+ dxy = conic.fPts[2] - conic.fPts[0];
+ }
+ this->setRayPts(*tPt, &dxy, onPt, tangent);
+}
+
+// Given a conic and a t range, find the start and end if they haven't been found already.
+void SkPathStroker::conicQuadEnds(const SkConic& conic, SkQuadConstruct* quadPts) const {
+ if (!quadPts->fStartSet) {
+ SkPoint conicStartPt;
+ this->conicPerpRay(conic, quadPts->fStartT, &conicStartPt, &quadPts->fQuad[0],
+ &quadPts->fTangentStart);
+ quadPts->fStartSet = true;
+ }
+ if (!quadPts->fEndSet) {
+ SkPoint conicEndPt;
+ this->conicPerpRay(conic, quadPts->fEndT, &conicEndPt, &quadPts->fQuad[2],
+ &quadPts->fTangentEnd);
+ quadPts->fEndSet = true;
+ }
+}
+
+
+// Given a cubic and t, return the point on curve, its perpendicular, and the perpendicular tangent.
+void SkPathStroker::cubicPerpRay(const SkPoint cubic[4], SkScalar t, SkPoint* tPt, SkPoint* onPt,
+ SkPoint* tangent) const {
+ SkVector dxy;
+ SkPoint chopped[7];
+ SkEvalCubicAt(cubic, t, tPt, &dxy, nullptr);
+ if (dxy.fX == 0 && dxy.fY == 0) {
+ const SkPoint* cPts = cubic;
+ if (SkScalarNearlyZero(t)) {
+ dxy = cubic[2] - cubic[0];
+ } else if (SkScalarNearlyZero(1 - t)) {
+ dxy = cubic[3] - cubic[1];
+ } else {
+ // If the cubic inflection falls on the cusp, subdivide the cubic
+ // to find the tangent at that point.
+ SkChopCubicAt(cubic, chopped, t);
+ dxy = chopped[3] - chopped[2];
+ if (dxy.fX == 0 && dxy.fY == 0) {
+ dxy = chopped[3] - chopped[1];
+ cPts = chopped;
+ }
+ }
+ if (dxy.fX == 0 && dxy.fY == 0) {
+ dxy = cPts[3] - cPts[0];
+ }
+ }
+ setRayPts(*tPt, &dxy, onPt, tangent);
+}
+
+// Given a cubic and a t range, find the start and end if they haven't been found already.
+void SkPathStroker::cubicQuadEnds(const SkPoint cubic[4], SkQuadConstruct* quadPts) {
+ if (!quadPts->fStartSet) {
+ SkPoint cubicStartPt;
+ this->cubicPerpRay(cubic, quadPts->fStartT, &cubicStartPt, &quadPts->fQuad[0],
+ &quadPts->fTangentStart);
+ quadPts->fStartSet = true;
+ }
+ if (!quadPts->fEndSet) {
+ SkPoint cubicEndPt;
+ this->cubicPerpRay(cubic, quadPts->fEndT, &cubicEndPt, &quadPts->fQuad[2],
+ &quadPts->fTangentEnd);
+ quadPts->fEndSet = true;
+ }
+}
+
+void SkPathStroker::cubicQuadMid(const SkPoint cubic[4], const SkQuadConstruct* quadPts,
+ SkPoint* mid) const {
+ SkPoint cubicMidPt;
+ this->cubicPerpRay(cubic, quadPts->fMidT, &cubicMidPt, mid, nullptr);
+}
+
+// Given a quad and t, return the point on curve, its perpendicular, and the perpendicular tangent.
+void SkPathStroker::quadPerpRay(const SkPoint quad[3], SkScalar t, SkPoint* tPt, SkPoint* onPt,
+ SkPoint* tangent) const {
+ SkVector dxy;
+ SkEvalQuadAt(quad, t, tPt, &dxy);
+ if (dxy.fX == 0 && dxy.fY == 0) {
+ dxy = quad[2] - quad[0];
+ }
+ setRayPts(*tPt, &dxy, onPt, tangent);
+}
+
+// Find the intersection of the stroke tangents to construct a stroke quad.
+// Return whether the stroke is a degenerate (a line), a quad, or must be split.
+// Optionally compute the quad's control point.
+SkPathStroker::ResultType SkPathStroker::intersectRay(SkQuadConstruct* quadPts,
+ IntersectRayType intersectRayType STROKER_DEBUG_PARAMS(int depth)) const {
+ const SkPoint& start = quadPts->fQuad[0];
+ const SkPoint& end = quadPts->fQuad[2];
+ SkVector aLen = quadPts->fTangentStart - start;
+ SkVector bLen = quadPts->fTangentEnd - end;
+ /* Slopes match when denom goes to zero:
+ axLen / ayLen == bxLen / byLen
+ (ayLen * byLen) * axLen / ayLen == (ayLen * byLen) * bxLen / byLen
+ byLen * axLen == ayLen * bxLen
+ byLen * axLen - ayLen * bxLen ( == denom )
+ */
+ SkScalar denom = aLen.cross(bLen);
+ if (denom == 0 || !SkScalarIsFinite(denom)) {
+ quadPts->fOppositeTangents = aLen.dot(bLen) < 0;
+ return STROKER_RESULT(kDegenerate_ResultType, depth, quadPts, "denom == 0");
+ }
+ quadPts->fOppositeTangents = false;
+ SkVector ab0 = start - end;
+ SkScalar numerA = bLen.cross(ab0);
+ SkScalar numerB = aLen.cross(ab0);
+ if ((numerA >= 0) == (numerB >= 0)) { // if the control point is outside the quad ends
+ // if the perpendicular distances from the quad points to the opposite tangent line
+ // are small, a straight line is good enough
+ SkScalar dist1 = pt_to_line(start, end, quadPts->fTangentEnd);
+ SkScalar dist2 = pt_to_line(end, start, quadPts->fTangentStart);
+ if (std::max(dist1, dist2) <= fInvResScaleSquared) {
+ return STROKER_RESULT(kDegenerate_ResultType, depth, quadPts,
+ "std::max(dist1=%g, dist2=%g) <= fInvResScaleSquared", dist1, dist2);
+ }
+ return STROKER_RESULT(kSplit_ResultType, depth, quadPts,
+ "(numerA=%g >= 0) == (numerB=%g >= 0)", numerA, numerB);
+ }
+ // check to see if the denominator is teeny relative to the numerator
+ // if the offset by one will be lost, the ratio is too large
+ numerA /= denom;
+ bool validDivide = numerA > numerA - 1;
+ if (validDivide) {
+ if (kCtrlPt_RayType == intersectRayType) {
+ SkPoint* ctrlPt = &quadPts->fQuad[1];
+ // the intersection of the tangents need not be on the tangent segment
+ // so 0 <= numerA <= 1 is not necessarily true
+ ctrlPt->fX = start.fX * (1 - numerA) + quadPts->fTangentStart.fX * numerA;
+ ctrlPt->fY = start.fY * (1 - numerA) + quadPts->fTangentStart.fY * numerA;
+ }
+ return STROKER_RESULT(kQuad_ResultType, depth, quadPts,
+ "(numerA=%g >= 0) != (numerB=%g >= 0)", numerA, numerB);
+ }
+ quadPts->fOppositeTangents = aLen.dot(bLen) < 0;
+ // if the lines are parallel, straight line is good enough
+ return STROKER_RESULT(kDegenerate_ResultType, depth, quadPts,
+ "SkScalarNearlyZero(denom=%g)", denom);
+}
+
+// Given a cubic and a t-range, determine if the stroke can be described by a quadratic.
+SkPathStroker::ResultType SkPathStroker::tangentsMeet(const SkPoint cubic[4],
+ SkQuadConstruct* quadPts) {
+ this->cubicQuadEnds(cubic, quadPts);
+ return this->intersectRay(quadPts, kResultType_RayType STROKER_DEBUG_PARAMS(fRecursionDepth));
+}
+
+// Intersect the line with the quad and return the t values on the quad where the line crosses.
+static int intersect_quad_ray(const SkPoint line[2], const SkPoint quad[3], SkScalar roots[2]) {
+ SkVector vec = line[1] - line[0];
+ SkScalar r[3];
+ for (int n = 0; n < 3; ++n) {
+ r[n] = (quad[n].fY - line[0].fY) * vec.fX - (quad[n].fX - line[0].fX) * vec.fY;
+ }
+ SkScalar A = r[2];
+ SkScalar B = r[1];
+ SkScalar C = r[0];
+ A += C - 2 * B; // A = a - 2*b + c
+ B -= C; // B = -(b - c)
+ return SkFindUnitQuadRoots(A, 2 * B, C, roots);
+}
+
+// Return true if the point is close to the bounds of the quad. This is used as a quick reject.
+bool SkPathStroker::ptInQuadBounds(const SkPoint quad[3], const SkPoint& pt) const {
+ SkScalar xMin = std::min(std::min(quad[0].fX, quad[1].fX), quad[2].fX);
+ if (pt.fX + fInvResScale < xMin) {
+ return false;
+ }
+ SkScalar xMax = std::max(std::max(quad[0].fX, quad[1].fX), quad[2].fX);
+ if (pt.fX - fInvResScale > xMax) {
+ return false;
+ }
+ SkScalar yMin = std::min(std::min(quad[0].fY, quad[1].fY), quad[2].fY);
+ if (pt.fY + fInvResScale < yMin) {
+ return false;
+ }
+ SkScalar yMax = std::max(std::max(quad[0].fY, quad[1].fY), quad[2].fY);
+ if (pt.fY - fInvResScale > yMax) {
+ return false;
+ }
+ return true;
+}
+
+static bool points_within_dist(const SkPoint& nearPt, const SkPoint& farPt, SkScalar limit) {
+ return SkPointPriv::DistanceToSqd(nearPt, farPt) <= limit * limit;
+}
+
+static bool sharp_angle(const SkPoint quad[3]) {
+ SkVector smaller = quad[1] - quad[0];
+ SkVector larger = quad[1] - quad[2];
+ SkScalar smallerLen = SkPointPriv::LengthSqd(smaller);
+ SkScalar largerLen = SkPointPriv::LengthSqd(larger);
+ if (smallerLen > largerLen) {
+ using std::swap;
+ swap(smaller, larger);
+ largerLen = smallerLen;
+ }
+ if (!smaller.setLength(largerLen)) {
+ return false;
+ }
+ SkScalar dot = smaller.dot(larger);
+ return dot > 0;
+}
+
+SkPathStroker::ResultType SkPathStroker::strokeCloseEnough(const SkPoint stroke[3],
+ const SkPoint ray[2], SkQuadConstruct* quadPts STROKER_DEBUG_PARAMS(int depth)) const {
+ SkPoint strokeMid = SkEvalQuadAt(stroke, SK_ScalarHalf);
+ // measure the distance from the curve to the quad-stroke midpoint, compare to radius
+ if (points_within_dist(ray[0], strokeMid, fInvResScale)) { // if the difference is small
+ if (sharp_angle(quadPts->fQuad)) {
+ return STROKER_RESULT(kSplit_ResultType, depth, quadPts,
+ "sharp_angle (1) =%g,%g, %g,%g, %g,%g",
+ quadPts->fQuad[0].fX, quadPts->fQuad[0].fY,
+ quadPts->fQuad[1].fX, quadPts->fQuad[1].fY,
+ quadPts->fQuad[2].fX, quadPts->fQuad[2].fY);
+ }
+ return STROKER_RESULT(kQuad_ResultType, depth, quadPts,
+ "points_within_dist(ray[0]=%g,%g, strokeMid=%g,%g, fInvResScale=%g)",
+ ray[0].fX, ray[0].fY, strokeMid.fX, strokeMid.fY, fInvResScale);
+ }
+ // measure the distance to quad's bounds (quick reject)
+ // an alternative : look for point in triangle
+ if (!ptInQuadBounds(stroke, ray[0])) { // if far, subdivide
+ return STROKER_RESULT(kSplit_ResultType, depth, quadPts,
+ "!pt_in_quad_bounds(stroke=(%g,%g %g,%g %g,%g), ray[0]=%g,%g)",
+ stroke[0].fX, stroke[0].fY, stroke[1].fX, stroke[1].fY, stroke[2].fX, stroke[2].fY,
+ ray[0].fX, ray[0].fY);
+ }
+ // measure the curve ray distance to the quad-stroke
+ SkScalar roots[2];
+ int rootCount = intersect_quad_ray(ray, stroke, roots);
+ if (rootCount != 1) {
+ return STROKER_RESULT(kSplit_ResultType, depth, quadPts,
+ "rootCount=%d != 1", rootCount);
+ }
+ SkPoint quadPt = SkEvalQuadAt(stroke, roots[0]);
+ SkScalar error = fInvResScale * (SK_Scalar1 - SkScalarAbs(roots[0] - 0.5f) * 2);
+ if (points_within_dist(ray[0], quadPt, error)) { // if the difference is small, we're done
+ if (sharp_angle(quadPts->fQuad)) {
+ return STROKER_RESULT(kSplit_ResultType, depth, quadPts,
+ "sharp_angle (2) =%g,%g, %g,%g, %g,%g",
+ quadPts->fQuad[0].fX, quadPts->fQuad[0].fY,
+ quadPts->fQuad[1].fX, quadPts->fQuad[1].fY,
+ quadPts->fQuad[2].fX, quadPts->fQuad[2].fY);
+ }
+ return STROKER_RESULT(kQuad_ResultType, depth, quadPts,
+ "points_within_dist(ray[0]=%g,%g, quadPt=%g,%g, error=%g)",
+ ray[0].fX, ray[0].fY, quadPt.fX, quadPt.fY, error);
+ }
+ // otherwise, subdivide
+ return STROKER_RESULT(kSplit_ResultType, depth, quadPts, "%s", "fall through");
+}
+
+SkPathStroker::ResultType SkPathStroker::compareQuadCubic(const SkPoint cubic[4],
+ SkQuadConstruct* quadPts) {
+ // get the quadratic approximation of the stroke
+ this->cubicQuadEnds(cubic, quadPts);
+ ResultType resultType = this->intersectRay(quadPts, kCtrlPt_RayType
+ STROKER_DEBUG_PARAMS(fRecursionDepth) );
+ if (resultType != kQuad_ResultType) {
+ return resultType;
+ }
+ // project a ray from the curve to the stroke
+ SkPoint ray[2]; // points near midpoint on quad, midpoint on cubic
+ this->cubicPerpRay(cubic, quadPts->fMidT, &ray[1], &ray[0], nullptr);
+ return this->strokeCloseEnough(quadPts->fQuad, ray, quadPts
+ STROKER_DEBUG_PARAMS(fRecursionDepth));
+}
+
+SkPathStroker::ResultType SkPathStroker::compareQuadConic(const SkConic& conic,
+ SkQuadConstruct* quadPts) const {
+ // get the quadratic approximation of the stroke
+ this->conicQuadEnds(conic, quadPts);
+ ResultType resultType = this->intersectRay(quadPts, kCtrlPt_RayType
+ STROKER_DEBUG_PARAMS(fRecursionDepth) );
+ if (resultType != kQuad_ResultType) {
+ return resultType;
+ }
+ // project a ray from the curve to the stroke
+ SkPoint ray[2]; // points near midpoint on quad, midpoint on conic
+ this->conicPerpRay(conic, quadPts->fMidT, &ray[1], &ray[0], nullptr);
+ return this->strokeCloseEnough(quadPts->fQuad, ray, quadPts
+ STROKER_DEBUG_PARAMS(fRecursionDepth));
+}
+
+SkPathStroker::ResultType SkPathStroker::compareQuadQuad(const SkPoint quad[3],
+ SkQuadConstruct* quadPts) {
+ // get the quadratic approximation of the stroke
+ if (!quadPts->fStartSet) {
+ SkPoint quadStartPt;
+ this->quadPerpRay(quad, quadPts->fStartT, &quadStartPt, &quadPts->fQuad[0],
+ &quadPts->fTangentStart);
+ quadPts->fStartSet = true;
+ }
+ if (!quadPts->fEndSet) {
+ SkPoint quadEndPt;
+ this->quadPerpRay(quad, quadPts->fEndT, &quadEndPt, &quadPts->fQuad[2],
+ &quadPts->fTangentEnd);
+ quadPts->fEndSet = true;
+ }
+ ResultType resultType = this->intersectRay(quadPts, kCtrlPt_RayType
+ STROKER_DEBUG_PARAMS(fRecursionDepth));
+ if (resultType != kQuad_ResultType) {
+ return resultType;
+ }
+ // project a ray from the curve to the stroke
+ SkPoint ray[2];
+ this->quadPerpRay(quad, quadPts->fMidT, &ray[1], &ray[0], nullptr);
+ return this->strokeCloseEnough(quadPts->fQuad, ray, quadPts
+ STROKER_DEBUG_PARAMS(fRecursionDepth));
+}
+
+void SkPathStroker::addDegenerateLine(const SkQuadConstruct* quadPts) {
+ const SkPoint* quad = quadPts->fQuad;
+ SkPath* path = fStrokeType == kOuter_StrokeType ? &fOuter : &fInner;
+ path->lineTo(quad[2].fX, quad[2].fY);
+}
+
+bool SkPathStroker::cubicMidOnLine(const SkPoint cubic[4], const SkQuadConstruct* quadPts) const {
+ SkPoint strokeMid;
+ this->cubicQuadMid(cubic, quadPts, &strokeMid);
+ SkScalar dist = pt_to_line(strokeMid, quadPts->fQuad[0], quadPts->fQuad[2]);
+ return dist < fInvResScaleSquared;
+}
+
+bool SkPathStroker::cubicStroke(const SkPoint cubic[4], SkQuadConstruct* quadPts) {
+ if (!fFoundTangents) {
+ ResultType resultType = this->tangentsMeet(cubic, quadPts);
+ if (kQuad_ResultType != resultType) {
+ if ((kDegenerate_ResultType == resultType
+ || points_within_dist(quadPts->fQuad[0], quadPts->fQuad[2],
+ fInvResScale)) && cubicMidOnLine(cubic, quadPts)) {
+ addDegenerateLine(quadPts);
+ DEBUG_CUBIC_RECURSION_TRACK_DEPTH(fRecursionDepth);
+ return true;
+ }
+ } else {
+ fFoundTangents = true;
+ }
+ }
+ if (fFoundTangents) {
+ ResultType resultType = this->compareQuadCubic(cubic, quadPts);
+ if (kQuad_ResultType == resultType) {
+ SkPath* path = fStrokeType == kOuter_StrokeType ? &fOuter : &fInner;
+ const SkPoint* stroke = quadPts->fQuad;
+ path->quadTo(stroke[1].fX, stroke[1].fY, stroke[2].fX, stroke[2].fY);
+ DEBUG_CUBIC_RECURSION_TRACK_DEPTH(fRecursionDepth);
+ return true;
+ }
+ if (kDegenerate_ResultType == resultType) {
+ if (!quadPts->fOppositeTangents) {
+ addDegenerateLine(quadPts);
+ DEBUG_CUBIC_RECURSION_TRACK_DEPTH(fRecursionDepth);
+ return true;
+ }
+ }
+ }
+ if (!SkScalarIsFinite(quadPts->fQuad[2].fX) || !SkScalarIsFinite(quadPts->fQuad[2].fY)) {
+ DEBUG_CUBIC_RECURSION_TRACK_DEPTH(fRecursionDepth);
+ return false; // just abort if projected quad isn't representable
+ }
+#if QUAD_STROKE_APPROX_EXTENDED_DEBUGGING
+ SkDEBUGCODE(gMaxRecursion[fFoundTangents] = std::max(gMaxRecursion[fFoundTangents],
+ fRecursionDepth + 1));
+#endif
+ if (++fRecursionDepth > kRecursiveLimits[fFoundTangents]) {
+ DEBUG_CUBIC_RECURSION_TRACK_DEPTH(fRecursionDepth);
+ return false; // just abort if projected quad isn't representable
+ }
+ SkQuadConstruct half;
+ if (!half.initWithStart(quadPts)) {
+ addDegenerateLine(quadPts);
+ DEBUG_CUBIC_RECURSION_TRACK_DEPTH(fRecursionDepth);
+ --fRecursionDepth;
+ return true;
+ }
+ if (!this->cubicStroke(cubic, &half)) {
+ return false;
+ }
+ if (!half.initWithEnd(quadPts)) {
+ addDegenerateLine(quadPts);
+ DEBUG_CUBIC_RECURSION_TRACK_DEPTH(fRecursionDepth);
+ --fRecursionDepth;
+ return true;
+ }
+ if (!this->cubicStroke(cubic, &half)) {
+ return false;
+ }
+ --fRecursionDepth;
+ return true;
+}
+
+bool SkPathStroker::conicStroke(const SkConic& conic, SkQuadConstruct* quadPts) {
+ ResultType resultType = this->compareQuadConic(conic, quadPts);
+ if (kQuad_ResultType == resultType) {
+ const SkPoint* stroke = quadPts->fQuad;
+ SkPath* path = fStrokeType == kOuter_StrokeType ? &fOuter : &fInner;
+ path->quadTo(stroke[1].fX, stroke[1].fY, stroke[2].fX, stroke[2].fY);
+ return true;
+ }
+ if (kDegenerate_ResultType == resultType) {
+ addDegenerateLine(quadPts);
+ return true;
+ }
+#if QUAD_STROKE_APPROX_EXTENDED_DEBUGGING
+ SkDEBUGCODE(gMaxRecursion[kConic_RecursiveLimit] = std::max(gMaxRecursion[kConic_RecursiveLimit],
+ fRecursionDepth + 1));
+#endif
+ if (++fRecursionDepth > kRecursiveLimits[kConic_RecursiveLimit]) {
+ return false; // just abort if projected quad isn't representable
+ }
+ SkQuadConstruct half;
+ (void) half.initWithStart(quadPts);
+ if (!this->conicStroke(conic, &half)) {
+ return false;
+ }
+ (void) half.initWithEnd(quadPts);
+ if (!this->conicStroke(conic, &half)) {
+ return false;
+ }
+ --fRecursionDepth;
+ return true;
+}
+
+bool SkPathStroker::quadStroke(const SkPoint quad[3], SkQuadConstruct* quadPts) {
+ ResultType resultType = this->compareQuadQuad(quad, quadPts);
+ if (kQuad_ResultType == resultType) {
+ const SkPoint* stroke = quadPts->fQuad;
+ SkPath* path = fStrokeType == kOuter_StrokeType ? &fOuter : &fInner;
+ path->quadTo(stroke[1].fX, stroke[1].fY, stroke[2].fX, stroke[2].fY);
+ return true;
+ }
+ if (kDegenerate_ResultType == resultType) {
+ addDegenerateLine(quadPts);
+ return true;
+ }
+#if QUAD_STROKE_APPROX_EXTENDED_DEBUGGING
+ SkDEBUGCODE(gMaxRecursion[kQuad_RecursiveLimit] = std::max(gMaxRecursion[kQuad_RecursiveLimit],
+ fRecursionDepth + 1));
+#endif
+ if (++fRecursionDepth > kRecursiveLimits[kQuad_RecursiveLimit]) {
+ return false; // just abort if projected quad isn't representable
+ }
+ SkQuadConstruct half;
+ (void) half.initWithStart(quadPts);
+ if (!this->quadStroke(quad, &half)) {
+ return false;
+ }
+ (void) half.initWithEnd(quadPts);
+ if (!this->quadStroke(quad, &half)) {
+ return false;
+ }
+ --fRecursionDepth;
+ return true;
+}
+
+void SkPathStroker::cubicTo(const SkPoint& pt1, const SkPoint& pt2,
+ const SkPoint& pt3) {
+ const SkPoint cubic[4] = { fPrevPt, pt1, pt2, pt3 };
+ SkPoint reduction[3];
+ const SkPoint* tangentPt;
+ ReductionType reductionType = CheckCubicLinear(cubic, reduction, &tangentPt);
+ if (kPoint_ReductionType == reductionType) {
+ /* If the stroke consists of a moveTo followed by a degenerate curve, treat it
+ as if it were followed by a zero-length line. Lines without length
+ can have square and round end caps. */
+ this->lineTo(pt3);
+ return;
+ }
+ if (kLine_ReductionType == reductionType) {
+ this->lineTo(pt3);
+ return;
+ }
+ if (kDegenerate_ReductionType <= reductionType && kDegenerate3_ReductionType >= reductionType) {
+ this->lineTo(reduction[0]);
+ SkStrokerPriv::JoinProc saveJoiner = fJoiner;
+ fJoiner = SkStrokerPriv::JoinFactory(SkPaint::kRound_Join);
+ if (kDegenerate2_ReductionType <= reductionType) {
+ this->lineTo(reduction[1]);
+ }
+ if (kDegenerate3_ReductionType == reductionType) {
+ this->lineTo(reduction[2]);
+ }
+ this->lineTo(pt3);
+ fJoiner = saveJoiner;
+ return;
+ }
+ SkASSERT(kQuad_ReductionType == reductionType);
+ SkVector normalAB, unitAB, normalCD, unitCD;
+ if (!this->preJoinTo(*tangentPt, &normalAB, &unitAB, false)) {
+ this->lineTo(pt3);
+ return;
+ }
+ SkScalar tValues[2];
+ int count = SkFindCubicInflections(cubic, tValues);
+ SkScalar lastT = 0;
+ for (int index = 0; index <= count; ++index) {
+ SkScalar nextT = index < count ? tValues[index] : 1;
+ SkQuadConstruct quadPts;
+ this->init(kOuter_StrokeType, &quadPts, lastT, nextT);
+ (void) this->cubicStroke(cubic, &quadPts);
+ this->init(kInner_StrokeType, &quadPts, lastT, nextT);
+ (void) this->cubicStroke(cubic, &quadPts);
+ lastT = nextT;
+ }
+ SkScalar cusp = SkFindCubicCusp(cubic);
+ if (cusp > 0) {
+ SkPoint cuspLoc;
+ SkEvalCubicAt(cubic, cusp, &cuspLoc, nullptr, nullptr);
+ fCusper.addCircle(cuspLoc.fX, cuspLoc.fY, fRadius);
+ }
+ // emit the join even if one stroke succeeded but the last one failed
+ // this avoids reversing an inner stroke with a partial path followed by another moveto
+ this->setCubicEndNormal(cubic, normalAB, unitAB, &normalCD, &unitCD);
+
+ this->postJoinTo(pt3, normalCD, unitCD);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+#include "src/core/SkPaintDefaults.h"
+
+SkStroke::SkStroke() {
+ fWidth = SK_Scalar1;
+ fMiterLimit = SkPaintDefaults_MiterLimit;
+ fResScale = 1;
+ fCap = SkPaint::kDefault_Cap;
+ fJoin = SkPaint::kDefault_Join;
+ fDoFill = false;
+}
+
+SkStroke::SkStroke(const SkPaint& p) {
+ fWidth = p.getStrokeWidth();
+ fMiterLimit = p.getStrokeMiter();
+ fResScale = 1;
+ fCap = (uint8_t)p.getStrokeCap();
+ fJoin = (uint8_t)p.getStrokeJoin();
+ fDoFill = SkToU8(p.getStyle() == SkPaint::kStrokeAndFill_Style);
+}
+
+SkStroke::SkStroke(const SkPaint& p, SkScalar width) {
+ fWidth = width;
+ fMiterLimit = p.getStrokeMiter();
+ fResScale = 1;
+ fCap = (uint8_t)p.getStrokeCap();
+ fJoin = (uint8_t)p.getStrokeJoin();
+ fDoFill = SkToU8(p.getStyle() == SkPaint::kStrokeAndFill_Style);
+}
+
+void SkStroke::setWidth(SkScalar width) {
+ SkASSERT(width >= 0);
+ fWidth = width;
+}
+
+void SkStroke::setMiterLimit(SkScalar miterLimit) {
+ SkASSERT(miterLimit >= 0);
+ fMiterLimit = miterLimit;
+}
+
+void SkStroke::setCap(SkPaint::Cap cap) {
+ SkASSERT((unsigned)cap < SkPaint::kCapCount);
+ fCap = SkToU8(cap);
+}
+
+void SkStroke::setJoin(SkPaint::Join join) {
+ SkASSERT((unsigned)join < SkPaint::kJoinCount);
+ fJoin = SkToU8(join);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// If src==dst, then we use a tmp path to record the stroke, and then swap
+// its contents with src when we're done.
+class AutoTmpPath {
+public:
+ AutoTmpPath(const SkPath& src, SkPath** dst) : fSrc(src) {
+ if (&src == *dst) {
+ *dst = &fTmpDst;
+ fSwapWithSrc = true;
+ } else {
+ (*dst)->reset();
+ fSwapWithSrc = false;
+ }
+ }
+
+ ~AutoTmpPath() {
+ if (fSwapWithSrc) {
+ fTmpDst.swap(*const_cast<SkPath*>(&fSrc));
+ }
+ }
+
+private:
+ SkPath fTmpDst;
+ const SkPath& fSrc;
+ bool fSwapWithSrc;
+};
+
+void SkStroke::strokePath(const SkPath& src, SkPath* dst) const {
+ SkASSERT(dst);
+
+ SkScalar radius = SkScalarHalf(fWidth);
+
+ AutoTmpPath tmp(src, &dst);
+
+ if (radius <= 0) {
+ return;
+ }
+
+ // If src is really a rect, call our specialty strokeRect() method
+ {
+ SkRect rect;
+ bool isClosed = false;
+ SkPathDirection dir;
+ if (src.isRect(&rect, &isClosed, &dir) && isClosed) {
+ this->strokeRect(rect, dst, dir);
+ // our answer should preserve the inverseness of the src
+ if (src.isInverseFillType()) {
+ SkASSERT(!dst->isInverseFillType());
+ dst->toggleInverseFillType();
+ }
+ return;
+ }
+ }
+
+ // We can always ignore centers for stroke and fill convex line-only paths
+ // TODO: remove the line-only restriction
+ bool ignoreCenter = fDoFill && (src.getSegmentMasks() == SkPath::kLine_SegmentMask) &&
+ src.isLastContourClosed() && src.isConvex();
+
+ SkPathStroker stroker(src, radius, fMiterLimit, this->getCap(), this->getJoin(),
+ fResScale, ignoreCenter);
+ SkPath::Iter iter(src, false);
+ SkPath::Verb lastSegment = SkPath::kMove_Verb;
+
+ for (;;) {
+ SkPoint pts[4];
+ switch (iter.next(pts)) {
+ case SkPath::kMove_Verb:
+ stroker.moveTo(pts[0]);
+ break;
+ case SkPath::kLine_Verb:
+ stroker.lineTo(pts[1], &iter);
+ lastSegment = SkPath::kLine_Verb;
+ break;
+ case SkPath::kQuad_Verb:
+ stroker.quadTo(pts[1], pts[2]);
+ lastSegment = SkPath::kQuad_Verb;
+ break;
+ case SkPath::kConic_Verb: {
+ stroker.conicTo(pts[1], pts[2], iter.conicWeight());
+ lastSegment = SkPath::kConic_Verb;
+ } break;
+ case SkPath::kCubic_Verb:
+ stroker.cubicTo(pts[1], pts[2], pts[3]);
+ lastSegment = SkPath::kCubic_Verb;
+ break;
+ case SkPath::kClose_Verb:
+ if (SkPaint::kButt_Cap != this->getCap()) {
+ /* If the stroke consists of a moveTo followed by a close, treat it
+ as if it were followed by a zero-length line. Lines without length
+ can have square and round end caps. */
+ if (stroker.hasOnlyMoveTo()) {
+ stroker.lineTo(stroker.moveToPt());
+ goto ZERO_LENGTH;
+ }
+ /* If the stroke consists of a moveTo followed by one or more zero-length
+ verbs, then followed by a close, treat is as if it were followed by a
+ zero-length line. Lines without length can have square & round end caps. */
+ if (stroker.isCurrentContourEmpty()) {
+ ZERO_LENGTH:
+ lastSegment = SkPath::kLine_Verb;
+ break;
+ }
+ }
+ stroker.close(lastSegment == SkPath::kLine_Verb);
+ break;
+ case SkPath::kDone_Verb:
+ goto DONE;
+ }
+ }
+DONE:
+ stroker.done(dst, lastSegment == SkPath::kLine_Verb);
+
+ if (fDoFill && !ignoreCenter) {
+ if (SkPathPriv::ComputeFirstDirection(src) == SkPathFirstDirection::kCCW) {
+ dst->reverseAddPath(src);
+ } else {
+ dst->addPath(src);
+ }
+ } else {
+ // Seems like we can assume that a 2-point src would always result in
+ // a convex stroke, but testing has proved otherwise.
+ // TODO: fix the stroker to make this assumption true (without making
+ // it slower that the work that will be done in computeConvexity())
+#if 0
+ // this test results in a non-convex stroke :(
+ static void test(SkCanvas* canvas) {
+ SkPoint pts[] = { 146.333328, 192.333328, 300.333344, 293.333344 };
+ SkPaint paint;
+ paint.setStrokeWidth(7);
+ paint.setStrokeCap(SkPaint::kRound_Cap);
+ canvas->drawLine(pts[0].fX, pts[0].fY, pts[1].fX, pts[1].fY, paint);
+ }
+#endif
+#if 0
+ if (2 == src.countPoints()) {
+ dst->setIsConvex(true);
+ }
+#endif
+ }
+
+ // our answer should preserve the inverseness of the src
+ if (src.isInverseFillType()) {
+ SkASSERT(!dst->isInverseFillType());
+ dst->toggleInverseFillType();
+ }
+}
+
+static SkPathDirection reverse_direction(SkPathDirection dir) {
+ static const SkPathDirection gOpposite[] = { SkPathDirection::kCCW, SkPathDirection::kCW };
+ return gOpposite[(int)dir];
+}
+
+static void addBevel(SkPath* path, const SkRect& r, const SkRect& outer, SkPathDirection dir) {
+ SkPoint pts[8];
+
+ if (SkPathDirection::kCW == dir) {
+ pts[0].set(r.fLeft, outer.fTop);
+ pts[1].set(r.fRight, outer.fTop);
+ pts[2].set(outer.fRight, r.fTop);
+ pts[3].set(outer.fRight, r.fBottom);
+ pts[4].set(r.fRight, outer.fBottom);
+ pts[5].set(r.fLeft, outer.fBottom);
+ pts[6].set(outer.fLeft, r.fBottom);
+ pts[7].set(outer.fLeft, r.fTop);
+ } else {
+ pts[7].set(r.fLeft, outer.fTop);
+ pts[6].set(r.fRight, outer.fTop);
+ pts[5].set(outer.fRight, r.fTop);
+ pts[4].set(outer.fRight, r.fBottom);
+ pts[3].set(r.fRight, outer.fBottom);
+ pts[2].set(r.fLeft, outer.fBottom);
+ pts[1].set(outer.fLeft, r.fBottom);
+ pts[0].set(outer.fLeft, r.fTop);
+ }
+ path->addPoly(pts, 8, true);
+}
+
+void SkStroke::strokeRect(const SkRect& origRect, SkPath* dst,
+ SkPathDirection dir) const {
+ SkASSERT(dst != nullptr);
+ dst->reset();
+
+ SkScalar radius = SkScalarHalf(fWidth);
+ if (radius <= 0) {
+ return;
+ }
+
+ SkScalar rw = origRect.width();
+ SkScalar rh = origRect.height();
+ if ((rw < 0) ^ (rh < 0)) {
+ dir = reverse_direction(dir);
+ }
+ SkRect rect(origRect);
+ rect.sort();
+ // reassign these, now that we know they'll be >= 0
+ rw = rect.width();
+ rh = rect.height();
+
+ SkRect r(rect);
+ r.outset(radius, radius);
+
+ SkPaint::Join join = (SkPaint::Join)fJoin;
+ if (SkPaint::kMiter_Join == join && fMiterLimit < SK_ScalarSqrt2) {
+ join = SkPaint::kBevel_Join;
+ }
+
+ switch (join) {
+ case SkPaint::kMiter_Join:
+ dst->addRect(r, dir);
+ break;
+ case SkPaint::kBevel_Join:
+ addBevel(dst, rect, r, dir);
+ break;
+ case SkPaint::kRound_Join:
+ dst->addRoundRect(r, radius, radius, dir);
+ break;
+ default:
+ break;
+ }
+
+ if (fWidth < std::min(rw, rh) && !fDoFill) {
+ r = rect;
+ r.inset(radius, radius);
+ dst->addRect(r, reverse_direction(dir));
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkStroke.h b/gfx/skia/skia/src/core/SkStroke.h
new file mode 100644
index 0000000000..fb7323d827
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStroke.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkStroke_DEFINED
+#define SkStroke_DEFINED
+
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/private/base/SkTo.h"
+
+#ifdef SK_DEBUG
+extern bool gDebugStrokerErrorSet;
+extern SkScalar gDebugStrokerError;
+extern int gMaxRecursion[];
+#endif
+
+/** \class SkStroke
+ SkStroke is the utility class that constructs paths by stroking
+ geometries (lines, rects, ovals, roundrects, paths). This is
+ invoked when a geometry or text is drawn in a canvas with the
+ kStroke_Mask bit set in the paint.
+*/
+class SkStroke {
+public:
+ SkStroke();
+ SkStroke(const SkPaint&);
+ SkStroke(const SkPaint&, SkScalar width); // width overrides paint.getStrokeWidth()
+
+ SkPaint::Cap getCap() const { return (SkPaint::Cap)fCap; }
+ void setCap(SkPaint::Cap);
+
+ SkPaint::Join getJoin() const { return (SkPaint::Join)fJoin; }
+ void setJoin(SkPaint::Join);
+
+ void setMiterLimit(SkScalar);
+ void setWidth(SkScalar);
+
+ bool getDoFill() const { return SkToBool(fDoFill); }
+ void setDoFill(bool doFill) { fDoFill = SkToU8(doFill); }
+
+ /**
+ * ResScale is the "intended" resolution for the output.
+ * Default is 1.0.
+ * Larger values (res > 1) indicate that the result should be more precise, since it will
+ * be zoomed up, and small errors will be magnified.
+ * Smaller values (0 < res < 1) indicate that the result can be less precise, since it will
+ * be zoomed down, and small errors may be invisible.
+ */
+ SkScalar getResScale() const { return fResScale; }
+ void setResScale(SkScalar rs) {
+ SkASSERT(rs > 0 && SkScalarIsFinite(rs));
+ fResScale = rs;
+ }
+
+ /**
+ * Stroke the specified rect, winding it in the specified direction..
+ */
+ void strokeRect(const SkRect& rect, SkPath* result,
+ SkPathDirection = SkPathDirection::kCW) const;
+ void strokePath(const SkPath& path, SkPath*) const;
+
+ ////////////////////////////////////////////////////////////////
+
+private:
+ SkScalar fWidth, fMiterLimit;
+ SkScalar fResScale;
+ uint8_t fCap, fJoin;
+ bool fDoFill;
+
+ friend class SkPaint;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkStrokeRec.cpp b/gfx/skia/skia/src/core/SkStrokeRec.cpp
new file mode 100644
index 0000000000..d63079269f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrokeRec.cpp
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStrokeRec.h"
+
+#include "src/core/SkPaintDefaults.h"
+#include "src/core/SkStroke.h"
+
+#include <algorithm>
+
+// must be < 0, since ==0 means hairline, and >0 means normal stroke
+#define kStrokeRec_FillStyleWidth (-SK_Scalar1)
+
+SkStrokeRec::SkStrokeRec(InitStyle s) {
+ fResScale = 1;
+ fWidth = (kFill_InitStyle == s) ? kStrokeRec_FillStyleWidth : 0;
+ fMiterLimit = SkPaintDefaults_MiterLimit;
+ fCap = SkPaint::kDefault_Cap;
+ fJoin = SkPaint::kDefault_Join;
+ fStrokeAndFill = false;
+}
+
+SkStrokeRec::SkStrokeRec(const SkPaint& paint, SkScalar resScale) {
+ this->init(paint, paint.getStyle(), resScale);
+}
+
+SkStrokeRec::SkStrokeRec(const SkPaint& paint, SkPaint::Style styleOverride, SkScalar resScale) {
+ this->init(paint, styleOverride, resScale);
+}
+
+void SkStrokeRec::init(const SkPaint& paint, SkPaint::Style style, SkScalar resScale) {
+ fResScale = resScale;
+
+ switch (style) {
+ case SkPaint::kFill_Style:
+ fWidth = kStrokeRec_FillStyleWidth;
+ fStrokeAndFill = false;
+ break;
+ case SkPaint::kStroke_Style:
+ fWidth = paint.getStrokeWidth();
+ fStrokeAndFill = false;
+ break;
+ case SkPaint::kStrokeAndFill_Style:
+ if (0 == paint.getStrokeWidth()) {
+ // hairline+fill == fill
+ fWidth = kStrokeRec_FillStyleWidth;
+ fStrokeAndFill = false;
+ } else {
+ fWidth = paint.getStrokeWidth();
+ fStrokeAndFill = true;
+ }
+ break;
+ default:
+ SkDEBUGFAIL("unknown paint style");
+ // fall back on just fill
+ fWidth = kStrokeRec_FillStyleWidth;
+ fStrokeAndFill = false;
+ break;
+ }
+
+ // copy these from the paint, regardless of our "style"
+ fMiterLimit = paint.getStrokeMiter();
+ fCap = paint.getStrokeCap();
+ fJoin = paint.getStrokeJoin();
+}
+
+SkStrokeRec::Style SkStrokeRec::getStyle() const {
+ if (fWidth < 0) {
+ return kFill_Style;
+ } else if (0 == fWidth) {
+ return kHairline_Style;
+ } else {
+ return fStrokeAndFill ? kStrokeAndFill_Style : kStroke_Style;
+ }
+}
+
+void SkStrokeRec::setFillStyle() {
+ fWidth = kStrokeRec_FillStyleWidth;
+ fStrokeAndFill = false;
+}
+
+void SkStrokeRec::setHairlineStyle() {
+ fWidth = 0;
+ fStrokeAndFill = false;
+}
+
+void SkStrokeRec::setStrokeStyle(SkScalar width, bool strokeAndFill) {
+ if (strokeAndFill && (0 == width)) {
+ // hairline+fill == fill
+ this->setFillStyle();
+ } else {
+ fWidth = width;
+ fStrokeAndFill = strokeAndFill;
+ }
+}
+
+#ifdef SK_DEBUG
+ // enables tweaking these values at runtime from Viewer
+ bool gDebugStrokerErrorSet = false;
+ SkScalar gDebugStrokerError;
+#endif
+
+bool SkStrokeRec::applyToPath(SkPath* dst, const SkPath& src) const {
+ if (fWidth <= 0) { // hairline or fill
+ return false;
+ }
+
+ SkStroke stroker;
+ stroker.setCap((SkPaint::Cap)fCap);
+ stroker.setJoin((SkPaint::Join)fJoin);
+ stroker.setMiterLimit(fMiterLimit);
+ stroker.setWidth(fWidth);
+ stroker.setDoFill(fStrokeAndFill);
+#ifdef SK_DEBUG
+ stroker.setResScale(gDebugStrokerErrorSet ? gDebugStrokerError : fResScale);
+#else
+ stroker.setResScale(fResScale);
+#endif
+ stroker.strokePath(src, dst);
+ return true;
+}
+
+void SkStrokeRec::applyToPaint(SkPaint* paint) const {
+ if (fWidth < 0) { // fill
+ paint->setStyle(SkPaint::kFill_Style);
+ return;
+ }
+
+ paint->setStyle(fStrokeAndFill ? SkPaint::kStrokeAndFill_Style : SkPaint::kStroke_Style);
+ paint->setStrokeWidth(fWidth);
+ paint->setStrokeMiter(fMiterLimit);
+ paint->setStrokeCap((SkPaint::Cap)fCap);
+ paint->setStrokeJoin((SkPaint::Join)fJoin);
+}
+
+SkScalar SkStrokeRec::getInflationRadius() const {
+ return GetInflationRadius((SkPaint::Join)fJoin, fMiterLimit, (SkPaint::Cap)fCap, fWidth);
+}
+
+SkScalar SkStrokeRec::GetInflationRadius(const SkPaint& paint, SkPaint::Style style) {
+ SkScalar width = SkPaint::kFill_Style == style ? -SK_Scalar1 : paint.getStrokeWidth();
+ return GetInflationRadius(paint.getStrokeJoin(), paint.getStrokeMiter(), paint.getStrokeCap(),
+ width);
+
+}
+
+SkScalar SkStrokeRec::GetInflationRadius(SkPaint::Join join, SkScalar miterLimit, SkPaint::Cap cap,
+ SkScalar strokeWidth) {
+ if (strokeWidth < 0) { // fill
+ return 0;
+ } else if (0 == strokeWidth) {
+ // FIXME: We need a "matrixScale" parameter here in order to properly handle hairlines.
+ // Their with is determined in device space, unlike other strokes.
+ // http://skbug.com/8157
+ return SK_Scalar1;
+ }
+
+ // since we're stroked, outset the rect by the radius (and join type, caps)
+ SkScalar multiplier = SK_Scalar1;
+ if (SkPaint::kMiter_Join == join) {
+ multiplier = std::max(multiplier, miterLimit);
+ }
+ if (SkPaint::kSquare_Cap == cap) {
+ multiplier = std::max(multiplier, SK_ScalarSqrt2);
+ }
+ return strokeWidth/2 * multiplier;
+}
+
diff --git a/gfx/skia/skia/src/core/SkStrokerPriv.cpp b/gfx/skia/skia/src/core/SkStrokerPriv.cpp
new file mode 100644
index 0000000000..32cf9ecb4e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrokerPriv.cpp
@@ -0,0 +1,235 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPath.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkPointPriv.h"
+#include "src/core/SkStrokerPriv.h"
+
+#include <utility>
+
+static void ButtCapper(SkPath* path, const SkPoint& pivot, const SkVector& normal,
+ const SkPoint& stop, SkPath*) {
+ path->lineTo(stop.fX, stop.fY);
+}
+
+static void RoundCapper(SkPath* path, const SkPoint& pivot, const SkVector& normal,
+ const SkPoint& stop, SkPath*) {
+ SkVector parallel;
+ SkPointPriv::RotateCW(normal, &parallel);
+
+ SkPoint projectedCenter = pivot + parallel;
+
+ path->conicTo(projectedCenter + normal, projectedCenter, SK_ScalarRoot2Over2);
+ path->conicTo(projectedCenter - normal, stop, SK_ScalarRoot2Over2);
+}
+
+static void SquareCapper(SkPath* path, const SkPoint& pivot, const SkVector& normal,
+ const SkPoint& stop, SkPath* otherPath) {
+ SkVector parallel;
+ SkPointPriv::RotateCW(normal, &parallel);
+
+ if (otherPath) {
+ path->setLastPt(pivot.fX + normal.fX + parallel.fX, pivot.fY + normal.fY + parallel.fY);
+ path->lineTo(pivot.fX - normal.fX + parallel.fX, pivot.fY - normal.fY + parallel.fY);
+ } else {
+ path->lineTo(pivot.fX + normal.fX + parallel.fX, pivot.fY + normal.fY + parallel.fY);
+ path->lineTo(pivot.fX - normal.fX + parallel.fX, pivot.fY - normal.fY + parallel.fY);
+ path->lineTo(stop.fX, stop.fY);
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+
+static bool is_clockwise(const SkVector& before, const SkVector& after) {
+ return before.fX * after.fY > before.fY * after.fX;
+}
+
+enum AngleType {
+ kNearly180_AngleType,
+ kSharp_AngleType,
+ kShallow_AngleType,
+ kNearlyLine_AngleType
+};
+
+static AngleType Dot2AngleType(SkScalar dot) {
+// need more precise fixed normalization
+// SkASSERT(SkScalarAbs(dot) <= SK_Scalar1 + SK_ScalarNearlyZero);
+
+ if (dot >= 0) { // shallow or line
+ return SkScalarNearlyZero(SK_Scalar1 - dot) ? kNearlyLine_AngleType : kShallow_AngleType;
+ } else { // sharp or 180
+ return SkScalarNearlyZero(SK_Scalar1 + dot) ? kNearly180_AngleType : kSharp_AngleType;
+ }
+}
+
+static void HandleInnerJoin(SkPath* inner, const SkPoint& pivot, const SkVector& after) {
+#if 1
+ /* In the degenerate case that the stroke radius is larger than our segments
+ just connecting the two inner segments may "show through" as a funny
+ diagonal. To pseudo-fix this, we go through the pivot point. This adds
+ an extra point/edge, but I can't see a cheap way to know when this is
+ not needed :(
+ */
+ inner->lineTo(pivot.fX, pivot.fY);
+#endif
+
+ inner->lineTo(pivot.fX - after.fX, pivot.fY - after.fY);
+}
+
+static void BluntJoiner(SkPath* outer, SkPath* inner, const SkVector& beforeUnitNormal,
+ const SkPoint& pivot, const SkVector& afterUnitNormal,
+ SkScalar radius, SkScalar invMiterLimit, bool, bool) {
+ SkVector after;
+ afterUnitNormal.scale(radius, &after);
+
+ if (!is_clockwise(beforeUnitNormal, afterUnitNormal)) {
+ using std::swap;
+ swap(outer, inner);
+ after.negate();
+ }
+
+ outer->lineTo(pivot.fX + after.fX, pivot.fY + after.fY);
+ HandleInnerJoin(inner, pivot, after);
+}
+
+static void RoundJoiner(SkPath* outer, SkPath* inner, const SkVector& beforeUnitNormal,
+ const SkPoint& pivot, const SkVector& afterUnitNormal,
+ SkScalar radius, SkScalar invMiterLimit, bool, bool) {
+ SkScalar dotProd = SkPoint::DotProduct(beforeUnitNormal, afterUnitNormal);
+ AngleType angleType = Dot2AngleType(dotProd);
+
+ if (angleType == kNearlyLine_AngleType)
+ return;
+
+ SkVector before = beforeUnitNormal;
+ SkVector after = afterUnitNormal;
+ SkRotationDirection dir = kCW_SkRotationDirection;
+
+ if (!is_clockwise(before, after)) {
+ using std::swap;
+ swap(outer, inner);
+ before.negate();
+ after.negate();
+ dir = kCCW_SkRotationDirection;
+ }
+
+ SkMatrix matrix;
+ matrix.setScale(radius, radius);
+ matrix.postTranslate(pivot.fX, pivot.fY);
+ SkConic conics[SkConic::kMaxConicsForArc];
+ int count = SkConic::BuildUnitArc(before, after, dir, &matrix, conics);
+ if (count > 0) {
+ for (int i = 0; i < count; ++i) {
+ outer->conicTo(conics[i].fPts[1], conics[i].fPts[2], conics[i].fW);
+ }
+ after.scale(radius);
+ HandleInnerJoin(inner, pivot, after);
+ }
+}
+
+#define kOneOverSqrt2 (0.707106781f)
+
+static void MiterJoiner(SkPath* outer, SkPath* inner, const SkVector& beforeUnitNormal,
+ const SkPoint& pivot, const SkVector& afterUnitNormal,
+ SkScalar radius, SkScalar invMiterLimit,
+ bool prevIsLine, bool currIsLine) {
+ // negate the dot since we're using normals instead of tangents
+ SkScalar dotProd = SkPoint::DotProduct(beforeUnitNormal, afterUnitNormal);
+ AngleType angleType = Dot2AngleType(dotProd);
+ SkVector before = beforeUnitNormal;
+ SkVector after = afterUnitNormal;
+ SkVector mid;
+ SkScalar sinHalfAngle;
+ bool ccw;
+
+ if (angleType == kNearlyLine_AngleType) {
+ return;
+ }
+ if (angleType == kNearly180_AngleType) {
+ currIsLine = false;
+ goto DO_BLUNT;
+ }
+
+ ccw = !is_clockwise(before, after);
+ if (ccw) {
+ using std::swap;
+ swap(outer, inner);
+ before.negate();
+ after.negate();
+ }
+
+ /* Before we enter the world of square-roots and divides,
+ check if we're trying to join an upright right angle
+ (common case for stroking rectangles). If so, special case
+ that (for speed an accuracy).
+ Note: we only need to check one normal if dot==0
+ */
+ if (0 == dotProd && invMiterLimit <= kOneOverSqrt2) {
+ mid = (before + after) * radius;
+ goto DO_MITER;
+ }
+
+ /* midLength = radius / sinHalfAngle
+ if (midLength > miterLimit * radius) abort
+ if (radius / sinHalf > miterLimit * radius) abort
+ if (1 / sinHalf > miterLimit) abort
+ if (1 / miterLimit > sinHalf) abort
+ My dotProd is opposite sign, since it is built from normals and not tangents
+ hence 1 + dot instead of 1 - dot in the formula
+ */
+ sinHalfAngle = SkScalarSqrt(SkScalarHalf(SK_Scalar1 + dotProd));
+ if (sinHalfAngle < invMiterLimit) {
+ currIsLine = false;
+ goto DO_BLUNT;
+ }
+
+ // choose the most accurate way to form the initial mid-vector
+ if (angleType == kSharp_AngleType) {
+ mid.set(after.fY - before.fY, before.fX - after.fX);
+ if (ccw) {
+ mid.negate();
+ }
+ } else {
+ mid.set(before.fX + after.fX, before.fY + after.fY);
+ }
+
+ mid.setLength(radius / sinHalfAngle);
+DO_MITER:
+ if (prevIsLine) {
+ outer->setLastPt(pivot.fX + mid.fX, pivot.fY + mid.fY);
+ } else {
+ outer->lineTo(pivot.fX + mid.fX, pivot.fY + mid.fY);
+ }
+
+DO_BLUNT:
+ after.scale(radius);
+ if (!currIsLine) {
+ outer->lineTo(pivot.fX + after.fX, pivot.fY + after.fY);
+ }
+ HandleInnerJoin(inner, pivot, after);
+}
+
+/////////////////////////////////////////////////////////////////////////////
+
+SkStrokerPriv::CapProc SkStrokerPriv::CapFactory(SkPaint::Cap cap) {
+ const SkStrokerPriv::CapProc gCappers[] = {
+ ButtCapper, RoundCapper, SquareCapper
+ };
+
+ SkASSERT((unsigned)cap < SkPaint::kCapCount);
+ return gCappers[cap];
+}
+
+SkStrokerPriv::JoinProc SkStrokerPriv::JoinFactory(SkPaint::Join join) {
+ const SkStrokerPriv::JoinProc gJoiners[] = {
+ MiterJoiner, RoundJoiner, BluntJoiner
+ };
+
+ SkASSERT((unsigned)join < SkPaint::kJoinCount);
+ return gJoiners[join];
+}
diff --git a/gfx/skia/skia/src/core/SkStrokerPriv.h b/gfx/skia/skia/src/core/SkStrokerPriv.h
new file mode 100644
index 0000000000..a7294f7e27
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkStrokerPriv.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkStrokerPriv_DEFINED
+#define SkStrokerPriv_DEFINED
+
+#include "src/core/SkStroke.h"
+
+#define CWX(x, y) (-y)
+#define CWY(x, y) (x)
+#define CCWX(x, y) (y)
+#define CCWY(x, y) (-x)
+
+#define CUBIC_ARC_FACTOR ((SK_ScalarSqrt2 - SK_Scalar1) * 4 / 3)
+
+// this enables a global which is not thread-safe; doing so triggers a TSAN error in Chrome tests.
+#define QUAD_STROKE_APPROX_EXTENDED_DEBUGGING 0 // set to 1 to enable debugging in StrokerTest.cpp
+
+class SkStrokerPriv {
+public:
+ typedef void (*CapProc)(SkPath* path,
+ const SkPoint& pivot,
+ const SkVector& normal,
+ const SkPoint& stop,
+ SkPath* otherPath);
+
+ typedef void (*JoinProc)(SkPath* outer, SkPath* inner,
+ const SkVector& beforeUnitNormal,
+ const SkPoint& pivot,
+ const SkVector& afterUnitNormal,
+ SkScalar radius, SkScalar invMiterLimit,
+ bool prevIsLine, bool currIsLine);
+
+ static CapProc CapFactory(SkPaint::Cap);
+ static JoinProc JoinFactory(SkPaint::Join);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSurfaceCharacterization.cpp b/gfx/skia/skia/src/core/SkSurfaceCharacterization.cpp
new file mode 100644
index 0000000000..707c55b546
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSurfaceCharacterization.cpp
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkSurfaceCharacterization.h"
+
+#if defined(SK_GANESH)
+#include "src/gpu/ganesh/GrCaps.h"
+#include "src/gpu/ganesh/GrContextThreadSafeProxyPriv.h"
+
+#ifdef SK_VULKAN
+#include "include/gpu/vk/GrVkTypes.h"
+#endif
+
+#ifdef SK_DEBUG
+void SkSurfaceCharacterization::validate() const {
+ const GrCaps* caps = fContextInfo->priv().caps();
+
+ GrColorType grCT = SkColorTypeToGrColorType(this->colorType());
+ SkASSERT(fSampleCnt && caps->isFormatAsColorTypeRenderable(grCT, fBackendFormat, fSampleCnt));
+
+ SkASSERT(caps->areColorTypeAndFormatCompatible(grCT, fBackendFormat));
+
+ SkASSERT(MipMapped::kNo == fIsMipMapped || Textureable::kYes == fIsTextureable);
+ SkASSERT(Textureable::kNo == fIsTextureable || UsesGLFBO0::kNo == fUsesGLFBO0);
+ auto backend = fBackendFormat.backend();
+ SkASSERT(UsesGLFBO0::kNo == fUsesGLFBO0 || backend == GrBackendApi::kOpenGL);
+ SkASSERT((VulkanSecondaryCBCompatible::kNo == fVulkanSecondaryCBCompatible &&
+ VkRTSupportsInputAttachment::kNo == fVkRTSupportsInputAttachment) ||
+ backend == GrBackendApi::kVulkan);
+ SkASSERT(VulkanSecondaryCBCompatible::kNo == fVulkanSecondaryCBCompatible ||
+ VkRTSupportsInputAttachment::kNo == fVkRTSupportsInputAttachment);
+ SkASSERT(Textureable::kNo == fIsTextureable ||
+ VulkanSecondaryCBCompatible::kNo == fVulkanSecondaryCBCompatible);
+}
+#endif
+
+
+bool SkSurfaceCharacterization::operator==(const SkSurfaceCharacterization& other) const {
+ if (!this->isValid() || !other.isValid()) {
+ return false;
+ }
+
+ if (fContextInfo != other.fContextInfo) {
+ return false;
+ }
+
+ return fCacheMaxResourceBytes == other.fCacheMaxResourceBytes &&
+ fOrigin == other.fOrigin &&
+ fImageInfo == other.fImageInfo &&
+ fBackendFormat == other.fBackendFormat &&
+ fSampleCnt == other.fSampleCnt &&
+ fIsTextureable == other.fIsTextureable &&
+ fIsMipMapped == other.fIsMipMapped &&
+ fUsesGLFBO0 == other.fUsesGLFBO0 &&
+ fVulkanSecondaryCBCompatible == other.fVulkanSecondaryCBCompatible &&
+ fIsProtected == other.fIsProtected &&
+ fSurfaceProps == other.fSurfaceProps;
+}
+
+SkSurfaceCharacterization SkSurfaceCharacterization::createResized(int width, int height) const {
+ const GrCaps* caps = fContextInfo->priv().caps();
+ if (!caps) {
+ return SkSurfaceCharacterization();
+ }
+
+ if (width <= 0 || height <= 0 || width > caps->maxRenderTargetSize() ||
+ height > caps->maxRenderTargetSize()) {
+ return SkSurfaceCharacterization();
+ }
+
+ return SkSurfaceCharacterization(fContextInfo, fCacheMaxResourceBytes,
+ fImageInfo.makeWH(width, height), fBackendFormat, fOrigin,
+ fSampleCnt, fIsTextureable, fIsMipMapped, fUsesGLFBO0,
+ fVkRTSupportsInputAttachment,
+ fVulkanSecondaryCBCompatible,
+ fIsProtected, fSurfaceProps);
+}
+
+SkSurfaceCharacterization SkSurfaceCharacterization::createColorSpace(
+ sk_sp<SkColorSpace> cs) const {
+ if (!this->isValid()) {
+ return SkSurfaceCharacterization();
+ }
+
+ return SkSurfaceCharacterization(fContextInfo, fCacheMaxResourceBytes,
+ fImageInfo.makeColorSpace(std::move(cs)), fBackendFormat,
+ fOrigin, fSampleCnt, fIsTextureable, fIsMipMapped, fUsesGLFBO0,
+ fVkRTSupportsInputAttachment,
+ fVulkanSecondaryCBCompatible, fIsProtected, fSurfaceProps);
+}
+
+SkSurfaceCharacterization SkSurfaceCharacterization::createBackendFormat(
+ SkColorType colorType,
+ const GrBackendFormat& backendFormat) const {
+ if (!this->isValid()) {
+ return SkSurfaceCharacterization();
+ }
+
+ SkImageInfo newII = fImageInfo.makeColorType(colorType);
+
+ return SkSurfaceCharacterization(fContextInfo, fCacheMaxResourceBytes, newII, backendFormat,
+ fOrigin, fSampleCnt, fIsTextureable, fIsMipMapped, fUsesGLFBO0,
+ fVkRTSupportsInputAttachment,
+ fVulkanSecondaryCBCompatible, fIsProtected, fSurfaceProps);
+}
+
+SkSurfaceCharacterization SkSurfaceCharacterization::createFBO0(bool usesGLFBO0) const {
+ if (!this->isValid()) {
+ return SkSurfaceCharacterization();
+ }
+
+ // We can't create an FBO0 characterization that is textureable or has any non-gl specific flags
+ if (fIsTextureable == Textureable::kYes ||
+ fVkRTSupportsInputAttachment == VkRTSupportsInputAttachment::kYes ||
+ fVulkanSecondaryCBCompatible == VulkanSecondaryCBCompatible::kYes) {
+ return SkSurfaceCharacterization();
+ }
+
+ return SkSurfaceCharacterization(fContextInfo, fCacheMaxResourceBytes,
+ fImageInfo, fBackendFormat,
+ fOrigin, fSampleCnt, fIsTextureable, fIsMipMapped,
+ usesGLFBO0 ? UsesGLFBO0::kYes : UsesGLFBO0::kNo,
+ fVkRTSupportsInputAttachment,
+ fVulkanSecondaryCBCompatible, fIsProtected, fSurfaceProps);
+}
+
+bool SkSurfaceCharacterization::isCompatible(const GrBackendTexture& backendTex) const {
+ if (!this->isValid() || !backendTex.isValid()) {
+ return false;
+ }
+
+ if (fBackendFormat != backendTex.getBackendFormat()) {
+ return false;
+ }
+
+ if (this->usesGLFBO0()) {
+ // It is a backend texture so can't be wrapping FBO0
+ return false;
+ }
+
+ if (this->vulkanSecondaryCBCompatible()) {
+ return false;
+ }
+
+ if (this->vkRTSupportsInputAttachment()) {
+ if (backendTex.backend() != GrBackendApi::kVulkan) {
+ return false;
+ }
+#ifdef SK_VULKAN
+ GrVkImageInfo vkInfo;
+ if (!backendTex.getVkImageInfo(&vkInfo)) {
+ return false;
+ }
+ if (!SkToBool(vkInfo.fImageUsageFlags & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) {
+ return false;
+ }
+#endif // SK_VULKAN
+ }
+
+ if (this->isMipMapped() && !backendTex.hasMipmaps()) {
+ // backend texture is allowed to have mipmaps even if the characterization doesn't require
+ // them.
+ return false;
+ }
+
+ if (this->width() != backendTex.width() || this->height() != backendTex.height()) {
+ return false;
+ }
+
+ if (this->isProtected() != GrProtected(backendTex.isProtected())) {
+ return false;
+ }
+
+ return true;
+}
+
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSurfacePriv.h b/gfx/skia/skia/src/core/SkSurfacePriv.h
new file mode 100644
index 0000000000..26c70644f9
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSurfacePriv.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSurfacePriv_DEFINED
+#define SkSurfacePriv_DEFINED
+
+#include "include/core/SkSurfaceProps.h"
+
+struct SkImageInfo;
+
+static inline SkSurfaceProps SkSurfacePropsCopyOrDefault(const SkSurfaceProps* props) {
+ return props ? *props : SkSurfaceProps();
+}
+
+constexpr size_t kIgnoreRowBytesValue = static_cast<size_t>(~0);
+
+bool SkSurfaceValidateRasterInfo(const SkImageInfo&, size_t rb = kIgnoreRowBytesValue);
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkSwizzle.cpp b/gfx/skia/skia/src/core/SkSwizzle.cpp
new file mode 100644
index 0000000000..301b0184f1
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSwizzle.cpp
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkSwizzle.h"
+
+#include "src/core/SkOpts.h"
+
+void SkSwapRB(uint32_t* dest, const uint32_t* src, int count) {
+ SkOpts::RGBA_to_BGRA(dest, src, count);
+}
diff --git a/gfx/skia/skia/src/core/SkSwizzlePriv.h b/gfx/skia/skia/src/core/SkSwizzlePriv.h
new file mode 100644
index 0000000000..665d59ef37
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkSwizzlePriv.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkColorData.h"
+#include "src/base/SkVx.h"
+
+#include <cstdint>
+
+static inline skvx::float4 swizzle_rb(const skvx::float4& x) {
+ return skvx::shuffle<2, 1, 0, 3>(x);
+}
+
+static inline skvx::float4 swizzle_rb_if_bgra(const skvx::float4& x) {
+#if defined(SK_PMCOLOR_IS_BGRA)
+ return swizzle_rb(x);
+#else
+ return x;
+#endif
+}
+
+static inline skvx::float4 Sk4f_fromL32(uint32_t px) {
+ return skvx::cast<float>(skvx::byte4::Load(&px)) * (1 / 255.0f);
+}
+
+static inline uint32_t Sk4f_toL32(const skvx::float4& px) {
+ uint32_t l32;
+ // For the expected positive color values, the +0.5 before the pin and cast effectively rounds
+ // to the nearest int without having to call round() or lrint().
+ skvx::cast<uint8_t>(skvx::pin(px * 255.f + 0.5f, skvx::float4(0.f), skvx::float4(255.f)))
+ .store(&l32);
+ return l32;
+}
diff --git a/gfx/skia/skia/src/core/SkTDynamicHash.h b/gfx/skia/skia/src/core/SkTDynamicHash.h
new file mode 100644
index 0000000000..11176a52ce
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTDynamicHash.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTDynamicHash_DEFINED
+#define SkTDynamicHash_DEFINED
+
+// This is now a simple API wrapper around SkTHashTable<T*>;
+// please just use SkTHash{Map,Set,Table} directly for new code.
+#include "src/core/SkTHash.h"
+
+// Traits requires:
+// static const Key& GetKey(const T&) { ... }
+// static uint32_t Hash(const Key&) { ... }
+// We'll look on T for these by default, or you can pass a custom Traits type.
+template <typename T,
+ typename Key,
+ typename Traits = T>
+class SkTDynamicHash {
+public:
+ SkTDynamicHash() {}
+
+ // It is not safe to call set() or remove() while iterating with either foreach().
+ // If you mutate the entries be very careful not to change the Key.
+
+ template <typename Fn> // f(T*)
+ void foreach(Fn&& fn) {
+ fTable.foreach([&](T** entry) { fn(*entry); });
+ }
+ template <typename Fn> // f(T) or f(const T&)
+ void foreach(Fn&& fn) const {
+ fTable.foreach([&](T* entry) { fn(*entry); });
+ }
+
+ int count() const { return fTable.count(); }
+
+ size_t approxBytesUsed() const { return fTable.approxBytesUsed(); }
+
+ T* find(const Key& key) const { return fTable.findOrNull(key); }
+
+ void add(T* entry) { fTable.set(entry); }
+ void remove(const Key& key) { fTable.remove(key); }
+
+ void rewind() { fTable.reset(); }
+ void reset () { fTable.reset(); }
+
+private:
+ struct AdaptedTraits {
+ static const Key& GetKey(T* entry) { return Traits::GetKey(*entry); }
+ static uint32_t Hash(const Key& key) { return Traits::Hash(key); }
+ };
+ SkTHashTable<T*, Key, AdaptedTraits> fTable;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTHash.h b/gfx/skia/skia/src/core/SkTHash.h
new file mode 100644
index 0000000000..e40b06652b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTHash.h
@@ -0,0 +1,591 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTHash_DEFINED
+#define SkTHash_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkChecksum.h"
+#include "include/private/base/SkTemplates.h"
+
+#include <initializer_list>
+#include <new>
+#include <utility>
+
+// Before trying to use SkTHashTable, look below to see if SkTHashMap or SkTHashSet works for you.
+// They're easier to use, usually perform the same, and have fewer sharp edges.
+
+// T and K are treated as ordinary copyable C++ types.
+// Traits must have:
+// - static K GetKey(T)
+// - static uint32_t Hash(K)
+// If the key is large and stored inside T, you may want to make K a const&.
+// Similarly, if T is large you might want it to be a pointer.
+template <typename T, typename K, typename Traits = T>
+class SkTHashTable {
+public:
+ SkTHashTable() = default;
+ ~SkTHashTable() = default;
+
+ SkTHashTable(const SkTHashTable& that) { *this = that; }
+ SkTHashTable( SkTHashTable&& that) { *this = std::move(that); }
+
+ SkTHashTable& operator=(const SkTHashTable& that) {
+ if (this != &that) {
+ fCount = that.fCount;
+ fCapacity = that.fCapacity;
+ fSlots.reset(that.fCapacity);
+ for (int i = 0; i < fCapacity; i++) {
+ fSlots[i] = that.fSlots[i];
+ }
+ }
+ return *this;
+ }
+
+ SkTHashTable& operator=(SkTHashTable&& that) {
+ if (this != &that) {
+ fCount = that.fCount;
+ fCapacity = that.fCapacity;
+ fSlots = std::move(that.fSlots);
+
+ that.fCount = that.fCapacity = 0;
+ }
+ return *this;
+ }
+
+ // Clear the table.
+ void reset() { *this = SkTHashTable(); }
+
+ // How many entries are in the table?
+ int count() const { return fCount; }
+
+ // How many slots does the table contain? (Note that unlike an array, hash tables can grow
+ // before reaching 100% capacity.)
+ int capacity() const { return fCapacity; }
+
+ // Approximately how many bytes of memory do we use beyond sizeof(*this)?
+ size_t approxBytesUsed() const { return fCapacity * sizeof(Slot); }
+
+ // !!!!!!!!!!!!!!!!! CAUTION !!!!!!!!!!!!!!!!!
+ // set(), find() and foreach() all allow mutable access to table entries.
+ // If you change an entry so that it no longer has the same key, all hell
+ // will break loose. Do not do that!
+ //
+ // Please prefer to use SkTHashMap or SkTHashSet, which do not have this danger.
+
+ // The pointers returned by set() and find() are valid only until the next call to set().
+ // The pointers you receive in foreach() are only valid for its duration.
+
+ // Copy val into the hash table, returning a pointer to the copy now in the table.
+ // If there already is an entry in the table with the same key, we overwrite it.
+ T* set(T val) {
+ if (4 * fCount >= 3 * fCapacity) {
+ this->resize(fCapacity > 0 ? fCapacity * 2 : 4);
+ }
+ return this->uncheckedSet(std::move(val));
+ }
+
+ // If there is an entry in the table with this key, return a pointer to it. If not, null.
+ T* find(const K& key) const {
+ uint32_t hash = Hash(key);
+ int index = hash & (fCapacity-1);
+ for (int n = 0; n < fCapacity; n++) {
+ Slot& s = fSlots[index];
+ if (s.empty()) {
+ return nullptr;
+ }
+ if (hash == s.fHash && key == Traits::GetKey(*s)) {
+ return &*s;
+ }
+ index = this->next(index);
+ }
+ SkASSERT(fCapacity == fCount);
+ return nullptr;
+ }
+
+ // If there is an entry in the table with this key, return it. If not, null.
+ // This only works for pointer type T, and cannot be used to find an nullptr entry.
+ T findOrNull(const K& key) const {
+ if (T* p = this->find(key)) {
+ return *p;
+ }
+ return nullptr;
+ }
+
+ // Remove the value with this key from the hash table.
+ void remove(const K& key) {
+ SkASSERT(this->find(key));
+
+ uint32_t hash = Hash(key);
+ int index = hash & (fCapacity-1);
+ for (int n = 0; n < fCapacity; n++) {
+ Slot& s = fSlots[index];
+ SkASSERT(s.has_value());
+ if (hash == s.fHash && key == Traits::GetKey(*s)) {
+ this->removeSlot(index);
+ if (4 * fCount <= fCapacity && fCapacity > 4) {
+ this->resize(fCapacity / 2);
+ }
+ return;
+ }
+ index = this->next(index);
+ }
+ }
+
+ // Hash tables will automatically resize themselves when set() and remove() are called, but
+ // resize() can be called to manually grow capacity before a bulk insertion.
+ void resize(int capacity) {
+ SkASSERT(capacity >= fCount);
+ int oldCapacity = fCapacity;
+ SkDEBUGCODE(int oldCount = fCount);
+
+ fCount = 0;
+ fCapacity = capacity;
+ skia_private::AutoTArray<Slot> oldSlots = std::move(fSlots);
+ fSlots = skia_private::AutoTArray<Slot>(capacity);
+
+ for (int i = 0; i < oldCapacity; i++) {
+ Slot& s = oldSlots[i];
+ if (s.has_value()) {
+ this->uncheckedSet(*std::move(s));
+ }
+ }
+ SkASSERT(fCount == oldCount);
+ }
+
+ // Call fn on every entry in the table. You may mutate the entries, but be very careful.
+ template <typename Fn> // f(T*)
+ void foreach(Fn&& fn) {
+ for (int i = 0; i < fCapacity; i++) {
+ if (fSlots[i].has_value()) {
+ fn(&*fSlots[i]);
+ }
+ }
+ }
+
+ // Call fn on every entry in the table. You may not mutate anything.
+ template <typename Fn> // f(T) or f(const T&)
+ void foreach(Fn&& fn) const {
+ for (int i = 0; i < fCapacity; i++) {
+ if (fSlots[i].has_value()) {
+ fn(*fSlots[i]);
+ }
+ }
+ }
+
+ // A basic iterator-like class which disallows mutation; sufficient for range-based for loops.
+ // Intended for use by SkTHashMap and SkTHashSet via begin() and end().
+ // Adding or removing elements may invalidate all iterators.
+ template <typename SlotVal>
+ class Iter {
+ public:
+ using TTable = SkTHashTable<T, K, Traits>;
+
+ Iter(const TTable* table, int slot) : fTable(table), fSlot(slot) {}
+
+ static Iter MakeBegin(const TTable* table) {
+ return Iter{table, table->firstPopulatedSlot()};
+ }
+
+ static Iter MakeEnd(const TTable* table) {
+ return Iter{table, table->capacity()};
+ }
+
+ const SlotVal& operator*() const {
+ return *fTable->slot(fSlot);
+ }
+
+ const SlotVal* operator->() const {
+ return fTable->slot(fSlot);
+ }
+
+ bool operator==(const Iter& that) const {
+ // Iterators from different tables shouldn't be compared against each other.
+ SkASSERT(fTable == that.fTable);
+ return fSlot == that.fSlot;
+ }
+
+ bool operator!=(const Iter& that) const {
+ return !(*this == that);
+ }
+
+ Iter& operator++() {
+ fSlot = fTable->nextPopulatedSlot(fSlot);
+ return *this;
+ }
+
+ Iter operator++(int) {
+ Iter old = *this;
+ this->operator++();
+ return old;
+ }
+
+ protected:
+ const TTable* fTable;
+ int fSlot;
+ };
+
+private:
+ // Finds the first non-empty slot for an iterator.
+ int firstPopulatedSlot() const {
+ for (int i = 0; i < fCapacity; i++) {
+ if (fSlots[i].has_value()) {
+ return i;
+ }
+ }
+ return fCapacity;
+ }
+
+ // Increments an iterator's slot.
+ int nextPopulatedSlot(int currentSlot) const {
+ for (int i = currentSlot + 1; i < fCapacity; i++) {
+ if (fSlots[i].has_value()) {
+ return i;
+ }
+ }
+ return fCapacity;
+ }
+
+ // Reads from an iterator's slot.
+ const T* slot(int i) const {
+ SkASSERT(fSlots[i].has_value());
+ return &*fSlots[i];
+ }
+
+ T* uncheckedSet(T&& val) {
+ const K& key = Traits::GetKey(val);
+ SkASSERT(key == key);
+ uint32_t hash = Hash(key);
+ int index = hash & (fCapacity-1);
+ for (int n = 0; n < fCapacity; n++) {
+ Slot& s = fSlots[index];
+ if (s.empty()) {
+ // New entry.
+ s.emplace(std::move(val), hash);
+ fCount++;
+ return &*s;
+ }
+ if (hash == s.fHash && key == Traits::GetKey(*s)) {
+ // Overwrite previous entry.
+ // Note: this triggers extra copies when adding the same value repeatedly.
+ s.emplace(std::move(val), hash);
+ return &*s;
+ }
+
+ index = this->next(index);
+ }
+ SkASSERT(false);
+ return nullptr;
+ }
+
+ void removeSlot(int index) {
+ fCount--;
+
+ // Rearrange elements to restore the invariants for linear probing.
+ for (;;) {
+ Slot& emptySlot = fSlots[index];
+ int emptyIndex = index;
+ int originalIndex;
+ // Look for an element that can be moved into the empty slot.
+ // If the empty slot is in between where an element landed, and its native slot, then
+ // move it to the empty slot. Don't move it if its native slot is in between where
+ // the element landed and the empty slot.
+ // [native] <= [empty] < [candidate] == GOOD, can move candidate to empty slot
+ // [empty] < [native] < [candidate] == BAD, need to leave candidate where it is
+ do {
+ index = this->next(index);
+ Slot& s = fSlots[index];
+ if (s.empty()) {
+ // We're done shuffling elements around. Clear the last empty slot.
+ emptySlot.reset();
+ return;
+ }
+ originalIndex = s.fHash & (fCapacity - 1);
+ } while ((index <= originalIndex && originalIndex < emptyIndex)
+ || (originalIndex < emptyIndex && emptyIndex < index)
+ || (emptyIndex < index && index <= originalIndex));
+ // Move the element to the empty slot.
+ Slot& moveFrom = fSlots[index];
+ emptySlot = std::move(moveFrom);
+ }
+ }
+
+ int next(int index) const {
+ index--;
+ if (index < 0) { index += fCapacity; }
+ return index;
+ }
+
+ static uint32_t Hash(const K& key) {
+ uint32_t hash = Traits::Hash(key) & 0xffffffff;
+ return hash ? hash : 1; // We reserve hash 0 to mark empty.
+ }
+
+ class Slot {
+ public:
+ Slot() = default;
+ ~Slot() { this->reset(); }
+
+ Slot(const Slot& that) { *this = that; }
+ Slot& operator=(const Slot& that) {
+ if (this == &that) {
+ return *this;
+ }
+ if (fHash) {
+ if (that.fHash) {
+ fVal.fStorage = that.fVal.fStorage;
+ fHash = that.fHash;
+ } else {
+ this->reset();
+ }
+ } else {
+ if (that.fHash) {
+ new (&fVal.fStorage) T(that.fVal.fStorage);
+ fHash = that.fHash;
+ } else {
+ // do nothing, no value on either side
+ }
+ }
+ return *this;
+ }
+
+ Slot(Slot&& that) { *this = std::move(that); }
+ Slot& operator=(Slot&& that) {
+ if (this == &that) {
+ return *this;
+ }
+ if (fHash) {
+ if (that.fHash) {
+ fVal.fStorage = std::move(that.fVal.fStorage);
+ fHash = that.fHash;
+ } else {
+ this->reset();
+ }
+ } else {
+ if (that.fHash) {
+ new (&fVal.fStorage) T(std::move(that.fVal.fStorage));
+ fHash = that.fHash;
+ } else {
+ // do nothing, no value on either side
+ }
+ }
+ return *this;
+ }
+
+ T& operator*() & { return fVal.fStorage; }
+ const T& operator*() const& { return fVal.fStorage; }
+ T&& operator*() && { return std::move(fVal.fStorage); }
+ const T&& operator*() const&& { return std::move(fVal.fStorage); }
+
+ Slot& emplace(T&& v, uint32_t h) {
+ this->reset();
+ new (&fVal.fStorage) T(std::move(v));
+ fHash = h;
+ return *this;
+ }
+
+ bool has_value() const { return fHash != 0; }
+ explicit operator bool() const { return this->has_value(); }
+ bool empty() const { return !this->has_value(); }
+
+ void reset() {
+ if (fHash) {
+ fVal.fStorage.~T();
+ fHash = 0;
+ }
+ }
+
+ uint32_t fHash = 0;
+
+ private:
+ union Storage {
+ T fStorage;
+ Storage() {}
+ ~Storage() {}
+ } fVal;
+ };
+
+ int fCount = 0,
+ fCapacity = 0;
+ skia_private::AutoTArray<Slot> fSlots;
+};
+
+// Maps K->V. A more user-friendly wrapper around SkTHashTable, suitable for most use cases.
+// K and V are treated as ordinary copyable C++ types, with no assumed relationship between the two.
+template <typename K, typename V, typename HashK = SkGoodHash>
+class SkTHashMap {
+public:
+ // Allow default construction and assignment.
+ SkTHashMap() = default;
+
+ SkTHashMap(SkTHashMap<K, V, HashK>&& that) = default;
+ SkTHashMap(const SkTHashMap<K, V, HashK>& that) = default;
+
+ SkTHashMap<K, V, HashK>& operator=(SkTHashMap<K, V, HashK>&& that) = default;
+ SkTHashMap<K, V, HashK>& operator=(const SkTHashMap<K, V, HashK>& that) = default;
+
+ // Construct with an initializer list of key-value pairs.
+ struct Pair : public std::pair<K, V> {
+ using std::pair<K, V>::pair;
+ static const K& GetKey(const Pair& p) { return p.first; }
+ static auto Hash(const K& key) { return HashK()(key); }
+ };
+
+ SkTHashMap(std::initializer_list<Pair> pairs) {
+ fTable.resize(pairs.size() * 5 / 3);
+ for (const Pair& p : pairs) {
+ fTable.set(p);
+ }
+ }
+
+ // Clear the map.
+ void reset() { fTable.reset(); }
+
+ // How many key/value pairs are in the table?
+ int count() const { return fTable.count(); }
+
+ // Is empty?
+ bool empty() const { return fTable.count() == 0; }
+
+ // Approximately how many bytes of memory do we use beyond sizeof(*this)?
+ size_t approxBytesUsed() const { return fTable.approxBytesUsed(); }
+
+ // N.B. The pointers returned by set() and find() are valid only until the next call to set().
+
+ // Set key to val in the table, replacing any previous value with the same key.
+ // We copy both key and val, and return a pointer to the value copy now in the table.
+ V* set(K key, V val) {
+ Pair* out = fTable.set({std::move(key), std::move(val)});
+ return &out->second;
+ }
+
+ // If there is key/value entry in the table with this key, return a pointer to the value.
+ // If not, return null.
+ V* find(const K& key) const {
+ if (Pair* p = fTable.find(key)) {
+ return &p->second;
+ }
+ return nullptr;
+ }
+
+ V& operator[](const K& key) {
+ if (V* val = this->find(key)) {
+ return *val;
+ }
+ return *this->set(key, V{});
+ }
+
+ // Remove the key/value entry in the table with this key.
+ void remove(const K& key) {
+ SkASSERT(this->find(key));
+ fTable.remove(key);
+ }
+
+ // Call fn on every key/value pair in the table. You may mutate the value but not the key.
+ template <typename Fn> // f(K, V*) or f(const K&, V*)
+ void foreach(Fn&& fn) {
+ fTable.foreach([&fn](Pair* p){ fn(p->first, &p->second); });
+ }
+
+ // Call fn on every key/value pair in the table. You may not mutate anything.
+ template <typename Fn> // f(K, V), f(const K&, V), f(K, const V&) or f(const K&, const V&).
+ void foreach(Fn&& fn) const {
+ fTable.foreach([&fn](const Pair& p){ fn(p.first, p.second); });
+ }
+
+ // Dereferencing an iterator gives back a key-value pair, suitable for structured binding.
+ using Iter = typename SkTHashTable<Pair, K>::template Iter<std::pair<K, V>>;
+
+ Iter begin() const {
+ return Iter::MakeBegin(&fTable);
+ }
+
+ Iter end() const {
+ return Iter::MakeEnd(&fTable);
+ }
+
+private:
+ SkTHashTable<Pair, K> fTable;
+};
+
+// A set of T. T is treated as an ordinary copyable C++ type.
+template <typename T, typename HashT = SkGoodHash>
+class SkTHashSet {
+public:
+ // Allow default construction and assignment.
+ SkTHashSet() = default;
+
+ SkTHashSet(SkTHashSet<T, HashT>&& that) = default;
+ SkTHashSet(const SkTHashSet<T, HashT>& that) = default;
+
+ SkTHashSet<T, HashT>& operator=(SkTHashSet<T, HashT>&& that) = default;
+ SkTHashSet<T, HashT>& operator=(const SkTHashSet<T, HashT>& that) = default;
+
+ // Construct with an initializer list of Ts.
+ SkTHashSet(std::initializer_list<T> vals) {
+ fTable.resize(vals.size() * 5 / 3);
+ for (const T& val : vals) {
+ fTable.set(val);
+ }
+ }
+
+ // Clear the set.
+ void reset() { fTable.reset(); }
+
+ // How many items are in the set?
+ int count() const { return fTable.count(); }
+
+ // Is empty?
+ bool empty() const { return fTable.count() == 0; }
+
+ // Approximately how many bytes of memory do we use beyond sizeof(*this)?
+ size_t approxBytesUsed() const { return fTable.approxBytesUsed(); }
+
+ // Copy an item into the set.
+ void add(T item) { fTable.set(std::move(item)); }
+
+ // Is this item in the set?
+ bool contains(const T& item) const { return SkToBool(this->find(item)); }
+
+ // If an item equal to this is in the set, return a pointer to it, otherwise null.
+ // This pointer remains valid until the next call to add().
+ const T* find(const T& item) const { return fTable.find(item); }
+
+ // Remove the item in the set equal to this.
+ void remove(const T& item) {
+ SkASSERT(this->contains(item));
+ fTable.remove(item);
+ }
+
+ // Call fn on every item in the set. You may not mutate anything.
+ template <typename Fn> // f(T), f(const T&)
+ void foreach (Fn&& fn) const {
+ fTable.foreach(fn);
+ }
+
+private:
+ struct Traits {
+ static const T& GetKey(const T& item) { return item; }
+ static auto Hash(const T& item) { return HashT()(item); }
+ };
+
+public:
+ using Iter = typename SkTHashTable<T, T, Traits>::template Iter<T>;
+
+ Iter begin() const {
+ return Iter::MakeBegin(&fTable);
+ }
+
+ Iter end() const {
+ return Iter::MakeEnd(&fTable);
+ }
+
+private:
+ SkTHashTable<T, T, Traits> fTable;
+};
+
+#endif//SkTHash_DEFINED
diff --git a/gfx/skia/skia/src/core/SkTMultiMap.h b/gfx/skia/skia/src/core/SkTMultiMap.h
new file mode 100644
index 0000000000..c4df7c78d7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTMultiMap.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTMultiMap_DEFINED
+#define SkTMultiMap_DEFINED
+
+#include "src/core/SkTDynamicHash.h"
+
+/** A set that contains pointers to instances of T. Instances can be looked up with key Key.
+ * Multiple (possibly same) values can have the same key.
+ */
+template <typename T,
+ typename Key,
+ typename HashTraits=T>
+class SkTMultiMap {
+ struct ValueList {
+ explicit ValueList(T* value) : fValue(value), fNext(nullptr) {}
+
+ static const Key& GetKey(const ValueList& e) { return HashTraits::GetKey(*e.fValue); }
+ static uint32_t Hash(const Key& key) { return HashTraits::Hash(key); }
+ T* fValue;
+ ValueList* fNext;
+ };
+public:
+ SkTMultiMap() : fCount(0) {}
+
+ ~SkTMultiMap() {
+ this->reset();
+ }
+
+ void reset() {
+ fHash.foreach([&](ValueList* vl) {
+ ValueList* next;
+ for (ValueList* it = vl; it; it = next) {
+ HashTraits::OnFree(it->fValue);
+ next = it->fNext;
+ delete it;
+ }
+ });
+ fHash.reset();
+ fCount = 0;
+ }
+
+ void insert(const Key& key, T* value) {
+ ValueList* list = fHash.find(key);
+ if (list) {
+ // The new ValueList entry is inserted as the second element in the
+ // linked list, and it will contain the value of the first element.
+ ValueList* newEntry = new ValueList(list->fValue);
+ newEntry->fNext = list->fNext;
+ // The existing first ValueList entry is updated to contain the
+ // inserted value.
+ list->fNext = newEntry;
+ list->fValue = value;
+ } else {
+ fHash.add(new ValueList(value));
+ }
+
+ ++fCount;
+ }
+
+ void remove(const Key& key, const T* value) {
+ ValueList* list = fHash.find(key);
+ // Temporarily making this safe for remove entries not in the map because of
+ // crbug.com/877915.
+#if 0
+ // Since we expect the caller to be fully aware of what is stored, just
+ // assert that the caller removes an existing value.
+ SkASSERT(list);
+ ValueList* prev = nullptr;
+ while (list->fValue != value) {
+ prev = list;
+ list = list->fNext;
+ }
+ this->internalRemove(prev, list, key);
+#else
+ ValueList* prev = nullptr;
+ while (list && list->fValue != value) {
+ prev = list;
+ list = list->fNext;
+ }
+ // Crash in Debug since it'd be great to detect a repro of 877915.
+ SkASSERT(list);
+ if (list) {
+ this->internalRemove(prev, list, key);
+ }
+#endif
+ }
+
+ T* find(const Key& key) const {
+ ValueList* list = fHash.find(key);
+ if (list) {
+ return list->fValue;
+ }
+ return nullptr;
+ }
+
+ template<class FindPredicate>
+ T* find(const Key& key, const FindPredicate f) {
+ ValueList* list = fHash.find(key);
+ while (list) {
+ if (f(list->fValue)){
+ return list->fValue;
+ }
+ list = list->fNext;
+ }
+ return nullptr;
+ }
+
+ template<class FindPredicate>
+ T* findAndRemove(const Key& key, const FindPredicate f) {
+ ValueList* list = fHash.find(key);
+
+ ValueList* prev = nullptr;
+ while (list) {
+ if (f(list->fValue)){
+ T* value = list->fValue;
+ this->internalRemove(prev, list, key);
+ return value;
+ }
+ prev = list;
+ list = list->fNext;
+ }
+ return nullptr;
+ }
+
+ int count() const { return fCount; }
+
+#ifdef SK_DEBUG
+ template <typename Fn> // f(T) or f(const T&)
+ void foreach(Fn&& fn) const {
+ fHash.foreach([&](const ValueList& vl) {
+ for (const ValueList* it = &vl; it; it = it->fNext) {
+ fn(*it->fValue);
+ }
+ });
+ }
+
+ bool has(const T* value, const Key& key) const {
+ for (ValueList* list = fHash.find(key); list; list = list->fNext) {
+ if (list->fValue == value) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // This is not particularly fast and only used for validation, so debug only.
+ int countForKey(const Key& key) const {
+ int count = 0;
+ ValueList* list = fHash.find(key);
+ while (list) {
+ list = list->fNext;
+ ++count;
+ }
+ return count;
+ }
+#endif
+
+private:
+ SkTDynamicHash<ValueList, Key> fHash;
+ int fCount;
+
+ void internalRemove(ValueList* prev, ValueList* elem, const Key& key) {
+ if (elem->fNext) {
+ ValueList* next = elem->fNext;
+ elem->fValue = next->fValue;
+ elem->fNext = next->fNext;
+ delete next;
+ } else if (prev) {
+ prev->fNext = nullptr;
+ delete elem;
+ } else {
+ fHash.remove(key);
+ delete elem;
+ }
+
+ --fCount;
+ }
+
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTaskGroup.cpp b/gfx/skia/skia/src/core/SkTaskGroup.cpp
new file mode 100644
index 0000000000..8199a9b975
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTaskGroup.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkExecutor.h"
+#include "src/core/SkTaskGroup.h"
+
+#include <utility>
+
+SkTaskGroup::SkTaskGroup(SkExecutor& executor) : fPending(0), fExecutor(executor) {}
+
+void SkTaskGroup::add(std::function<void(void)> fn) {
+ fPending.fetch_add(+1, std::memory_order_relaxed);
+ fExecutor.add([this, fn{std::move(fn)}] {
+ fn();
+ fPending.fetch_add(-1, std::memory_order_release);
+ });
+}
+
+void SkTaskGroup::batch(int N, std::function<void(int)> fn) {
+ // TODO: I really thought we had some sort of more clever chunking logic.
+ fPending.fetch_add(+N, std::memory_order_relaxed);
+ for (int i = 0; i < N; i++) {
+ fExecutor.add([=] {
+ fn(i);
+ fPending.fetch_add(-1, std::memory_order_release);
+ });
+ }
+}
+
+bool SkTaskGroup::done() const {
+ return fPending.load(std::memory_order_acquire) == 0;
+}
+
+void SkTaskGroup::wait() {
+ // Actively help the executor do work until our task group is done.
+ // This lets SkTaskGroups nest arbitrarily deep on a single SkExecutor:
+ // no thread ever blocks waiting for others to do its work.
+ // (We may end up doing work that's not part of our task group. That's fine.)
+ while (!this->done()) {
+ fExecutor.borrow();
+ }
+}
+
+SkTaskGroup::Enabler::Enabler(int threads) {
+ if (threads) {
+ fThreadPool = SkExecutor::MakeLIFOThreadPool(threads);
+ SkExecutor::SetDefault(fThreadPool.get());
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkTaskGroup.h b/gfx/skia/skia/src/core/SkTaskGroup.h
new file mode 100644
index 0000000000..36f444617b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTaskGroup.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTaskGroup_DEFINED
+#define SkTaskGroup_DEFINED
+
+#include "include/core/SkExecutor.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkNoncopyable.h"
+#include <atomic>
+#include <functional>
+
+class SkTaskGroup : SkNoncopyable {
+public:
+ // Tasks added to this SkTaskGroup will run on its executor.
+ explicit SkTaskGroup(SkExecutor& executor = SkExecutor::GetDefault());
+ ~SkTaskGroup() { this->wait(); }
+
+ // Add a task to this SkTaskGroup.
+ void add(std::function<void(void)> fn);
+
+ // Add a batch of N tasks, all calling fn with different arguments.
+ void batch(int N, std::function<void(int)> fn);
+
+ // Returns true if all Tasks previously add()ed to this SkTaskGroup have run.
+ // It is safe to reuse this SkTaskGroup once done().
+ bool done() const;
+
+ // Block until done().
+ void wait();
+
+ // A convenience for testing tools.
+ // Creates and owns a thread pool, and passes it to SkExecutor::SetDefault().
+ struct Enabler {
+ explicit Enabler(int threads = -1); // -1 -> num_cores, 0 -> noop
+ std::unique_ptr<SkExecutor> fThreadPool;
+ };
+
+private:
+ std::atomic<int32_t> fPending;
+ SkExecutor& fExecutor;
+};
+
+#endif//SkTaskGroup_DEFINED
diff --git a/gfx/skia/skia/src/core/SkTextBlob.cpp b/gfx/skia/skia/src/core/SkTextBlob.cpp
new file mode 100644
index 0000000000..b1dadfdf47
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTextBlob.cpp
@@ -0,0 +1,1009 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTextBlob.h"
+
+#include "include/core/SkRSXform.h"
+#include "include/core/SkTypeface.h"
+#include "src/base/SkSafeMath.h"
+#include "src/core/SkFontPriv.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkStrikeSpec.h"
+#include "src/core/SkTextBlobPriv.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/text/GlyphRun.h"
+
+#include <atomic>
+#include <limits>
+#include <new>
+
+#if defined(SK_GANESH) || defined(SK_GRAPHITE)
+#include "src/text/gpu/TextBlobRedrawCoordinator.h"
+#endif
+
+using namespace skia_private;
+
+namespace {
+struct RunFontStorageEquivalent {
+ SkScalar fSize, fScaleX;
+ void* fTypeface;
+ SkScalar fSkewX;
+ uint32_t fFlags;
+};
+static_assert(sizeof(SkFont) == sizeof(RunFontStorageEquivalent), "runfont_should_stay_packed");
+} // namespace
+
+size_t SkTextBlob::RunRecord::StorageSize(uint32_t glyphCount, uint32_t textSize,
+ SkTextBlob::GlyphPositioning positioning,
+ SkSafeMath* safe) {
+ static_assert(SkIsAlign4(sizeof(SkScalar)), "SkScalar size alignment");
+
+ auto glyphSize = safe->mul(glyphCount, sizeof(uint16_t)),
+ posSize = safe->mul(PosCount(glyphCount, positioning, safe), sizeof(SkScalar));
+
+ // RunRecord object + (aligned) glyph buffer + position buffer
+ auto size = sizeof(SkTextBlob::RunRecord);
+ size = safe->add(size, safe->alignUp(glyphSize, 4));
+ size = safe->add(size, posSize);
+
+ if (textSize) { // Extended run.
+ size = safe->add(size, sizeof(uint32_t));
+ size = safe->add(size, safe->mul(glyphCount, sizeof(uint32_t)));
+ size = safe->add(size, textSize);
+ }
+
+ return safe->alignUp(size, sizeof(void*));
+}
+
+const SkTextBlob::RunRecord* SkTextBlob::RunRecord::First(const SkTextBlob* blob) {
+ // The first record (if present) is stored following the blob object.
+ // (aligned up to make the RunRecord aligned too)
+ return reinterpret_cast<const RunRecord*>(SkAlignPtr((uintptr_t)(blob + 1)));
+}
+
+const SkTextBlob::RunRecord* SkTextBlob::RunRecord::Next(const RunRecord* run) {
+ return SkToBool(run->fFlags & kLast_Flag) ? nullptr : NextUnchecked(run);
+}
+
+namespace {
+struct RunRecordStorageEquivalent {
+ SkFont fFont;
+ SkPoint fOffset;
+ uint32_t fCount;
+ uint32_t fFlags;
+ SkDEBUGCODE(unsigned fMagic;)
+};
+} // namespace
+
+void SkTextBlob::RunRecord::validate(const uint8_t* storageTop) const {
+ SkASSERT(kRunRecordMagic == fMagic);
+ SkASSERT((uint8_t*)NextUnchecked(this) <= storageTop);
+
+ SkASSERT(glyphBuffer() + fCount <= (uint16_t*)posBuffer());
+ SkASSERT(posBuffer() + fCount * ScalarsPerGlyph(positioning())
+ <= (SkScalar*)NextUnchecked(this));
+ if (isExtended()) {
+ SkASSERT(textSize() > 0);
+ SkASSERT(textSizePtr() < (uint32_t*)NextUnchecked(this));
+ SkASSERT(clusterBuffer() < (uint32_t*)NextUnchecked(this));
+ SkASSERT(textBuffer() + textSize() <= (char*)NextUnchecked(this));
+ }
+ static_assert(sizeof(SkTextBlob::RunRecord) == sizeof(RunRecordStorageEquivalent),
+ "runrecord_should_stay_packed");
+}
+
+const SkTextBlob::RunRecord* SkTextBlob::RunRecord::NextUnchecked(const RunRecord* run) {
+ SkSafeMath safe;
+ auto res = reinterpret_cast<const RunRecord*>(
+ reinterpret_cast<const uint8_t*>(run)
+ + StorageSize(run->glyphCount(), run->textSize(), run->positioning(), &safe));
+ SkASSERT(safe);
+ return res;
+}
+
+size_t SkTextBlob::RunRecord::PosCount(uint32_t glyphCount,
+ SkTextBlob::GlyphPositioning positioning,
+ SkSafeMath* safe) {
+ return safe->mul(glyphCount, ScalarsPerGlyph(positioning));
+}
+
+uint32_t* SkTextBlob::RunRecord::textSizePtr() const {
+ // textSize follows the position buffer.
+ SkASSERT(isExtended());
+ SkSafeMath safe;
+ auto res = (uint32_t*)(&this->posBuffer()[PosCount(fCount, positioning(), &safe)]);
+ SkASSERT(safe);
+ return res;
+}
+
+void SkTextBlob::RunRecord::grow(uint32_t count) {
+ SkScalar* initialPosBuffer = posBuffer();
+ uint32_t initialCount = fCount;
+ fCount += count;
+
+ // Move the initial pos scalars to their new location.
+ size_t copySize = initialCount * sizeof(SkScalar) * ScalarsPerGlyph(positioning());
+ SkASSERT((uint8_t*)posBuffer() + copySize <= (uint8_t*)NextUnchecked(this));
+
+ // memmove, as the buffers may overlap
+ memmove(posBuffer(), initialPosBuffer, copySize);
+}
+
+static int32_t next_id() {
+ static std::atomic<int32_t> nextID{1};
+ int32_t id;
+ do {
+ id = nextID.fetch_add(1, std::memory_order_relaxed);
+ } while (id == SK_InvalidGenID);
+ return id;
+}
+
+SkTextBlob::SkTextBlob(const SkRect& bounds)
+ : fBounds(bounds)
+ , fUniqueID(next_id())
+ , fCacheID(SK_InvalidUniqueID) {}
+
+SkTextBlob::~SkTextBlob() {
+#if defined(SK_GANESH) || defined(SK_GRAPHITE)
+ if (SK_InvalidUniqueID != fCacheID.load()) {
+ sktext::gpu::TextBlobRedrawCoordinator::PostPurgeBlobMessage(fUniqueID, fCacheID);
+ }
+#endif
+
+ const auto* run = RunRecord::First(this);
+ do {
+ const auto* nextRun = RunRecord::Next(run);
+ SkDEBUGCODE(run->validate((uint8_t*)this + fStorageSize);)
+ run->~RunRecord();
+ run = nextRun;
+ } while (run);
+}
+
+namespace {
+
+union PositioningAndExtended {
+ int32_t intValue;
+ struct {
+ uint8_t positioning;
+ uint8_t extended;
+ uint16_t padding;
+ };
+};
+
+static_assert(sizeof(PositioningAndExtended) == sizeof(int32_t), "");
+
+} // namespace
+
+enum SkTextBlob::GlyphPositioning : uint8_t {
+ kDefault_Positioning = 0, // Default glyph advances -- zero scalars per glyph.
+ kHorizontal_Positioning = 1, // Horizontal positioning -- one scalar per glyph.
+ kFull_Positioning = 2, // Point positioning -- two scalars per glyph.
+ kRSXform_Positioning = 3, // RSXform positioning -- four scalars per glyph.
+};
+
+unsigned SkTextBlob::ScalarsPerGlyph(GlyphPositioning pos) {
+ const uint8_t gScalarsPerPositioning[] = {
+ 0, // kDefault_Positioning
+ 1, // kHorizontal_Positioning
+ 2, // kFull_Positioning
+ 4, // kRSXform_Positioning
+ };
+ SkASSERT((unsigned)pos <= 3);
+ return gScalarsPerPositioning[pos];
+}
+
+void SkTextBlob::operator delete(void* p) {
+ sk_free(p);
+}
+
+void* SkTextBlob::operator new(size_t) {
+ SK_ABORT("All blobs are created by placement new.");
+}
+
+void* SkTextBlob::operator new(size_t, void* p) {
+ return p;
+}
+
+SkTextBlobRunIterator::SkTextBlobRunIterator(const SkTextBlob* blob)
+ : fCurrentRun(SkTextBlob::RunRecord::First(blob)) {
+ SkDEBUGCODE(fStorageTop = (uint8_t*)blob + blob->fStorageSize;)
+}
+
+void SkTextBlobRunIterator::next() {
+ SkASSERT(!this->done());
+
+ if (!this->done()) {
+ SkDEBUGCODE(fCurrentRun->validate(fStorageTop);)
+ fCurrentRun = SkTextBlob::RunRecord::Next(fCurrentRun);
+ }
+}
+
+SkTextBlobRunIterator::GlyphPositioning SkTextBlobRunIterator::positioning() const {
+ SkASSERT(!this->done());
+ static_assert(static_cast<GlyphPositioning>(SkTextBlob::kDefault_Positioning) ==
+ kDefault_Positioning, "");
+ static_assert(static_cast<GlyphPositioning>(SkTextBlob::kHorizontal_Positioning) ==
+ kHorizontal_Positioning, "");
+ static_assert(static_cast<GlyphPositioning>(SkTextBlob::kFull_Positioning) ==
+ kFull_Positioning, "");
+ static_assert(static_cast<GlyphPositioning>(SkTextBlob::kRSXform_Positioning) ==
+ kRSXform_Positioning, "");
+
+ return SkTo<GlyphPositioning>(fCurrentRun->positioning());
+}
+
+unsigned SkTextBlobRunIterator::scalarsPerGlyph() const {
+ return SkTextBlob::ScalarsPerGlyph(fCurrentRun->positioning());
+}
+
+bool SkTextBlobRunIterator::isLCD() const {
+ return fCurrentRun->font().getEdging() == SkFont::Edging::kSubpixelAntiAlias;
+}
+
+SkTextBlobBuilder::SkTextBlobBuilder()
+ : fStorageSize(0)
+ , fStorageUsed(0)
+ , fRunCount(0)
+ , fDeferredBounds(false)
+ , fLastRun(0) {
+ fBounds.setEmpty();
+}
+
+SkTextBlobBuilder::~SkTextBlobBuilder() {
+ if (nullptr != fStorage.get()) {
+ // We are abandoning runs and must destruct the associated font data.
+ // The easiest way to accomplish that is to use the blob destructor.
+ this->make();
+ }
+}
+
+static SkRect map_quad_to_rect(const SkRSXform& xform, const SkRect& rect) {
+ return SkMatrix().setRSXform(xform).mapRect(rect);
+}
+
+SkRect SkTextBlobBuilder::TightRunBounds(const SkTextBlob::RunRecord& run) {
+ const SkFont& font = run.font();
+ SkRect bounds;
+
+ if (SkTextBlob::kDefault_Positioning == run.positioning()) {
+ font.measureText(run.glyphBuffer(), run.glyphCount() * sizeof(uint16_t),
+ SkTextEncoding::kGlyphID, &bounds);
+ return bounds.makeOffset(run.offset().x(), run.offset().y());
+ }
+
+ AutoSTArray<16, SkRect> glyphBounds(run.glyphCount());
+ font.getBounds(run.glyphBuffer(), run.glyphCount(), glyphBounds.get(), nullptr);
+
+ if (SkTextBlob::kRSXform_Positioning == run.positioning()) {
+ bounds.setEmpty();
+ const SkRSXform* xform = run.xformBuffer();
+ SkASSERT((void*)(xform + run.glyphCount()) <= SkTextBlob::RunRecord::Next(&run));
+ for (unsigned i = 0; i < run.glyphCount(); ++i) {
+ bounds.join(map_quad_to_rect(xform[i], glyphBounds[i]));
+ }
+ } else {
+ SkASSERT(SkTextBlob::kFull_Positioning == run.positioning() ||
+ SkTextBlob::kHorizontal_Positioning == run.positioning());
+ // kFull_Positioning => [ x, y, x, y... ]
+ // kHorizontal_Positioning => [ x, x, x... ]
+ // (const y applied by runBounds.offset(run->offset()) later)
+ const SkScalar horizontalConstY = 0;
+ const SkScalar* glyphPosX = run.posBuffer();
+ const SkScalar* glyphPosY = (run.positioning() == SkTextBlob::kFull_Positioning) ?
+ glyphPosX + 1 : &horizontalConstY;
+ const unsigned posXInc = SkTextBlob::ScalarsPerGlyph(run.positioning());
+ const unsigned posYInc = (run.positioning() == SkTextBlob::kFull_Positioning) ?
+ posXInc : 0;
+
+ bounds.setEmpty();
+ for (unsigned i = 0; i < run.glyphCount(); ++i) {
+ bounds.join(glyphBounds[i].makeOffset(*glyphPosX, *glyphPosY));
+ glyphPosX += posXInc;
+ glyphPosY += posYInc;
+ }
+
+ SkASSERT((void*)glyphPosX <= SkTextBlob::RunRecord::Next(&run));
+ }
+ return bounds.makeOffset(run.offset().x(), run.offset().y());
+}
+
+SkRect SkTextBlobBuilder::ConservativeRunBounds(const SkTextBlob::RunRecord& run) {
+ SkASSERT(run.glyphCount() > 0);
+ SkASSERT(SkTextBlob::kFull_Positioning == run.positioning() ||
+ SkTextBlob::kHorizontal_Positioning == run.positioning() ||
+ SkTextBlob::kRSXform_Positioning == run.positioning());
+
+ const SkRect fontBounds = SkFontPriv::GetFontBounds(run.font());
+ if (fontBounds.isEmpty()) {
+ // Empty font bounds are likely a font bug. TightBounds has a better chance of
+ // producing useful results in this case.
+ return TightRunBounds(run);
+ }
+
+ // Compute the glyph position bbox.
+ SkRect bounds;
+ switch (run.positioning()) {
+ case SkTextBlob::kHorizontal_Positioning: {
+ const SkScalar* glyphPos = run.posBuffer();
+ SkASSERT((void*)(glyphPos + run.glyphCount()) <= SkTextBlob::RunRecord::Next(&run));
+
+ SkScalar minX = *glyphPos;
+ SkScalar maxX = *glyphPos;
+ for (unsigned i = 1; i < run.glyphCount(); ++i) {
+ SkScalar x = glyphPos[i];
+ minX = std::min(x, minX);
+ maxX = std::max(x, maxX);
+ }
+
+ bounds.setLTRB(minX, 0, maxX, 0);
+ } break;
+ case SkTextBlob::kFull_Positioning: {
+ const SkPoint* glyphPosPts = run.pointBuffer();
+ SkASSERT((void*)(glyphPosPts + run.glyphCount()) <= SkTextBlob::RunRecord::Next(&run));
+
+ bounds.setBounds(glyphPosPts, run.glyphCount());
+ } break;
+ case SkTextBlob::kRSXform_Positioning: {
+ const SkRSXform* xform = run.xformBuffer();
+ SkASSERT((void*)(xform + run.glyphCount()) <= SkTextBlob::RunRecord::Next(&run));
+ bounds.setEmpty();
+ for (unsigned i = 0; i < run.glyphCount(); ++i) {
+ bounds.join(map_quad_to_rect(xform[i], fontBounds));
+ }
+ } break;
+ default:
+ SK_ABORT("unsupported positioning mode");
+ }
+
+ if (run.positioning() != SkTextBlob::kRSXform_Positioning) {
+ // Expand by typeface glyph bounds.
+ bounds.fLeft += fontBounds.left();
+ bounds.fTop += fontBounds.top();
+ bounds.fRight += fontBounds.right();
+ bounds.fBottom += fontBounds.bottom();
+ }
+
+ // Offset by run position.
+ return bounds.makeOffset(run.offset().x(), run.offset().y());
+}
+
+void SkTextBlobBuilder::updateDeferredBounds() {
+ SkASSERT(!fDeferredBounds || fRunCount > 0);
+
+ if (!fDeferredBounds) {
+ return;
+ }
+
+ SkASSERT(fLastRun >= SkAlignPtr(sizeof(SkTextBlob)));
+ SkTextBlob::RunRecord* run = reinterpret_cast<SkTextBlob::RunRecord*>(fStorage.get() +
+ fLastRun);
+
+ // FIXME: we should also use conservative bounds for kDefault_Positioning.
+ SkRect runBounds = SkTextBlob::kDefault_Positioning == run->positioning() ?
+ TightRunBounds(*run) : ConservativeRunBounds(*run);
+ fBounds.join(runBounds);
+ fDeferredBounds = false;
+}
+
+void SkTextBlobBuilder::reserve(size_t size) {
+ SkSafeMath safe;
+
+ // We don't currently pre-allocate, but maybe someday...
+ if (safe.add(fStorageUsed, size) <= fStorageSize && safe) {
+ return;
+ }
+
+ if (0 == fRunCount) {
+ SkASSERT(nullptr == fStorage.get());
+ SkASSERT(0 == fStorageSize);
+ SkASSERT(0 == fStorageUsed);
+
+ // the first allocation also includes blob storage
+ // aligned up to a pointer alignment so SkTextBlob::RunRecords after it stay aligned.
+ fStorageUsed = SkAlignPtr(sizeof(SkTextBlob));
+ }
+
+ fStorageSize = safe.add(fStorageUsed, size);
+
+ // FYI: This relies on everything we store being relocatable, particularly SkPaint.
+ // Also, this is counting on the underlying realloc to throw when passed max().
+ fStorage.realloc(safe ? fStorageSize : std::numeric_limits<size_t>::max());
+}
+
+bool SkTextBlobBuilder::mergeRun(const SkFont& font, SkTextBlob::GlyphPositioning positioning,
+ uint32_t count, SkPoint offset) {
+ if (0 == fLastRun) {
+ SkASSERT(0 == fRunCount);
+ return false;
+ }
+
+ SkASSERT(fLastRun >= SkAlignPtr(sizeof(SkTextBlob)));
+ SkTextBlob::RunRecord* run = reinterpret_cast<SkTextBlob::RunRecord*>(fStorage.get() +
+ fLastRun);
+ SkASSERT(run->glyphCount() > 0);
+
+ if (run->textSize() != 0) {
+ return false;
+ }
+
+ if (run->positioning() != positioning
+ || run->font() != font
+ || (run->glyphCount() + count < run->glyphCount())) {
+ return false;
+ }
+
+ // we can merge same-font/same-positioning runs in the following cases:
+ // * fully positioned run following another fully positioned run
+ // * horizontally postioned run following another horizontally positioned run with the same
+ // y-offset
+ if (SkTextBlob::kFull_Positioning != positioning
+ && (SkTextBlob::kHorizontal_Positioning != positioning
+ || run->offset().y() != offset.y())) {
+ return false;
+ }
+
+ SkSafeMath safe;
+ size_t sizeDelta =
+ SkTextBlob::RunRecord::StorageSize(run->glyphCount() + count, 0, positioning, &safe) -
+ SkTextBlob::RunRecord::StorageSize(run->glyphCount() , 0, positioning, &safe);
+ if (!safe) {
+ return false;
+ }
+
+ this->reserve(sizeDelta);
+
+ // reserve may have realloced
+ run = reinterpret_cast<SkTextBlob::RunRecord*>(fStorage.get() + fLastRun);
+ uint32_t preMergeCount = run->glyphCount();
+ run->grow(count);
+
+ // Callers expect the buffers to point at the newly added slice, ant not at the beginning.
+ fCurrentRunBuffer.glyphs = run->glyphBuffer() + preMergeCount;
+ fCurrentRunBuffer.pos = run->posBuffer()
+ + preMergeCount * SkTextBlob::ScalarsPerGlyph(positioning);
+
+ fStorageUsed += sizeDelta;
+
+ SkASSERT(fStorageUsed <= fStorageSize);
+ run->validate(fStorage.get() + fStorageUsed);
+
+ return true;
+}
+
+void SkTextBlobBuilder::allocInternal(const SkFont& font,
+ SkTextBlob::GlyphPositioning positioning,
+ int count, int textSize, SkPoint offset,
+ const SkRect* bounds) {
+ if (count <= 0 || textSize < 0) {
+ fCurrentRunBuffer = { nullptr, nullptr, nullptr, nullptr };
+ return;
+ }
+
+ if (textSize != 0 || !this->mergeRun(font, positioning, count, offset)) {
+ this->updateDeferredBounds();
+
+ SkSafeMath safe;
+ size_t runSize = SkTextBlob::RunRecord::StorageSize(count, textSize, positioning, &safe);
+ if (!safe) {
+ fCurrentRunBuffer = { nullptr, nullptr, nullptr, nullptr };
+ return;
+ }
+
+ this->reserve(runSize);
+
+ SkASSERT(fStorageUsed >= SkAlignPtr(sizeof(SkTextBlob)));
+ SkASSERT(fStorageUsed + runSize <= fStorageSize);
+
+ SkTextBlob::RunRecord* run = new (fStorage.get() + fStorageUsed)
+ SkTextBlob::RunRecord(count, textSize, offset, font, positioning);
+ fCurrentRunBuffer.glyphs = run->glyphBuffer();
+ fCurrentRunBuffer.pos = run->posBuffer();
+ fCurrentRunBuffer.utf8text = run->textBuffer();
+ fCurrentRunBuffer.clusters = run->clusterBuffer();
+
+ fLastRun = fStorageUsed;
+ fStorageUsed += runSize;
+ fRunCount++;
+
+ SkASSERT(fStorageUsed <= fStorageSize);
+ run->validate(fStorage.get() + fStorageUsed);
+ }
+ SkASSERT(textSize > 0 || nullptr == fCurrentRunBuffer.utf8text);
+ SkASSERT(textSize > 0 || nullptr == fCurrentRunBuffer.clusters);
+ if (!fDeferredBounds) {
+ if (bounds) {
+ fBounds.join(*bounds);
+ } else {
+ fDeferredBounds = true;
+ }
+ }
+}
+
+// SkFont versions
+
+const SkTextBlobBuilder::RunBuffer& SkTextBlobBuilder::allocRun(const SkFont& font, int count,
+ SkScalar x, SkScalar y,
+ const SkRect* bounds) {
+ this->allocInternal(font, SkTextBlob::kDefault_Positioning, count, 0, {x, y}, bounds);
+ return fCurrentRunBuffer;
+}
+
+const SkTextBlobBuilder::RunBuffer& SkTextBlobBuilder::allocRunPosH(const SkFont& font, int count,
+ SkScalar y,
+ const SkRect* bounds) {
+ this->allocInternal(font, SkTextBlob::kHorizontal_Positioning, count, 0, {0, y}, bounds);
+ return fCurrentRunBuffer;
+}
+
+const SkTextBlobBuilder::RunBuffer& SkTextBlobBuilder::allocRunPos(const SkFont& font, int count,
+ const SkRect* bounds) {
+ this->allocInternal(font, SkTextBlob::kFull_Positioning, count, 0, {0, 0}, bounds);
+ return fCurrentRunBuffer;
+}
+
+const SkTextBlobBuilder::RunBuffer&
+SkTextBlobBuilder::allocRunRSXform(const SkFont& font, int count) {
+ this->allocInternal(font, SkTextBlob::kRSXform_Positioning, count, 0, {0, 0}, nullptr);
+ return fCurrentRunBuffer;
+}
+
+const SkTextBlobBuilder::RunBuffer& SkTextBlobBuilder::allocRunText(const SkFont& font, int count,
+ SkScalar x, SkScalar y,
+ int textByteCount,
+ const SkRect* bounds) {
+ this->allocInternal(font,
+ SkTextBlob::kDefault_Positioning,
+ count,
+ textByteCount,
+ SkPoint::Make(x, y),
+ bounds);
+ return fCurrentRunBuffer;
+}
+
+const SkTextBlobBuilder::RunBuffer& SkTextBlobBuilder::allocRunTextPosH(const SkFont& font,
+ int count,
+ SkScalar y,
+ int textByteCount,
+ const SkRect* bounds) {
+ this->allocInternal(font,
+ SkTextBlob::kHorizontal_Positioning,
+ count,
+ textByteCount,
+ SkPoint::Make(0, y),
+ bounds);
+ return fCurrentRunBuffer;
+}
+
+const SkTextBlobBuilder::RunBuffer& SkTextBlobBuilder::allocRunTextPos(const SkFont& font,
+ int count,
+ int textByteCount,
+ const SkRect *bounds) {
+ this->allocInternal(font,
+ SkTextBlob::kFull_Positioning,
+ count, textByteCount,
+ SkPoint::Make(0, 0),
+ bounds);
+ return fCurrentRunBuffer;
+}
+
+const SkTextBlobBuilder::RunBuffer& SkTextBlobBuilder::allocRunTextRSXform(const SkFont& font,
+ int count,
+ int textByteCount,
+ const SkRect *bounds) {
+ this->allocInternal(font,
+ SkTextBlob::kRSXform_Positioning,
+ count,
+ textByteCount,
+ {0, 0},
+ bounds);
+ return fCurrentRunBuffer;
+}
+
+sk_sp<SkTextBlob> SkTextBlobBuilder::make() {
+ if (!fRunCount) {
+ // We don't instantiate empty blobs.
+ SkASSERT(!fStorage.get());
+ SkASSERT(fStorageUsed == 0);
+ SkASSERT(fStorageSize == 0);
+ SkASSERT(fLastRun == 0);
+ SkASSERT(fBounds.isEmpty());
+ return nullptr;
+ }
+
+ this->updateDeferredBounds();
+
+ // Tag the last run as such.
+ auto* lastRun = reinterpret_cast<SkTextBlob::RunRecord*>(fStorage.get() + fLastRun);
+ lastRun->fFlags |= SkTextBlob::RunRecord::kLast_Flag;
+
+ SkTextBlob* blob = new (fStorage.release()) SkTextBlob(fBounds);
+ SkDEBUGCODE(const_cast<SkTextBlob*>(blob)->fStorageSize = fStorageSize;)
+
+ SkDEBUGCODE(
+ SkSafeMath safe;
+ size_t validateSize = SkAlignPtr(sizeof(SkTextBlob));
+ for (const auto* run = SkTextBlob::RunRecord::First(blob); run;
+ run = SkTextBlob::RunRecord::Next(run)) {
+ validateSize += SkTextBlob::RunRecord::StorageSize(
+ run->fCount, run->textSize(), run->positioning(), &safe);
+ run->validate(reinterpret_cast<const uint8_t*>(blob) + fStorageUsed);
+ fRunCount--;
+ }
+ SkASSERT(validateSize == fStorageUsed);
+ SkASSERT(fRunCount == 0);
+ SkASSERT(safe);
+ )
+
+ fStorageUsed = 0;
+ fStorageSize = 0;
+ fRunCount = 0;
+ fLastRun = 0;
+ fBounds.setEmpty();
+
+ return sk_sp<SkTextBlob>(blob);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+void SkTextBlobPriv::Flatten(const SkTextBlob& blob, SkWriteBuffer& buffer) {
+ // seems like we could skip this, and just recompute bounds in unflatten, but
+ // some cc_unittests fail if we remove this...
+ buffer.writeRect(blob.bounds());
+
+ SkTextBlobRunIterator it(&blob);
+ while (!it.done()) {
+ SkASSERT(it.glyphCount() > 0);
+
+ buffer.write32(it.glyphCount());
+ PositioningAndExtended pe;
+ pe.intValue = 0;
+ pe.positioning = it.positioning();
+ SkASSERT((int32_t)it.positioning() == pe.intValue); // backwards compat.
+
+ uint32_t textSize = it.textSize();
+ pe.extended = textSize > 0;
+ buffer.write32(pe.intValue);
+ if (pe.extended) {
+ buffer.write32(textSize);
+ }
+ buffer.writePoint(it.offset());
+
+ SkFontPriv::Flatten(it.font(), buffer);
+
+ buffer.writeByteArray(it.glyphs(), it.glyphCount() * sizeof(uint16_t));
+ buffer.writeByteArray(it.pos(),
+ it.glyphCount() * sizeof(SkScalar) *
+ SkTextBlob::ScalarsPerGlyph(
+ SkTo<SkTextBlob::GlyphPositioning>(it.positioning())));
+ if (pe.extended) {
+ buffer.writeByteArray(it.clusters(), sizeof(uint32_t) * it.glyphCount());
+ buffer.writeByteArray(it.text(), it.textSize());
+ }
+
+ it.next();
+ }
+
+ // Marker for the last run (0 is not a valid glyph count).
+ buffer.write32(0);
+}
+
+sk_sp<SkTextBlob> SkTextBlobPriv::MakeFromBuffer(SkReadBuffer& reader) {
+ SkRect bounds;
+ reader.readRect(&bounds);
+
+ SkTextBlobBuilder blobBuilder;
+ SkSafeMath safe;
+ for (;;) {
+ int glyphCount = reader.read32();
+ if (glyphCount == 0) {
+ // End-of-runs marker.
+ break;
+ }
+
+ PositioningAndExtended pe;
+ pe.intValue = reader.read32();
+ const auto pos = SkTo<SkTextBlob::GlyphPositioning>(pe.positioning);
+ if (glyphCount <= 0 || pos > SkTextBlob::kRSXform_Positioning) {
+ return nullptr;
+ }
+ int textSize = pe.extended ? reader.read32() : 0;
+ if (textSize < 0) {
+ return nullptr;
+ }
+
+ SkPoint offset;
+ reader.readPoint(&offset);
+ SkFont font;
+ SkFontPriv::Unflatten(&font, reader);
+
+ // Compute the expected size of the buffer and ensure we have enough to deserialize
+ // a run before allocating it.
+ const size_t glyphSize = safe.mul(glyphCount, sizeof(uint16_t)),
+ posSize =
+ safe.mul(glyphCount, safe.mul(sizeof(SkScalar),
+ SkTextBlob::ScalarsPerGlyph(pos))),
+ clusterSize = pe.extended ? safe.mul(glyphCount, sizeof(uint32_t)) : 0;
+ const size_t totalSize =
+ safe.add(safe.add(glyphSize, posSize), safe.add(clusterSize, textSize));
+
+ if (!reader.isValid() || !safe || totalSize > reader.available()) {
+ return nullptr;
+ }
+
+ const SkTextBlobBuilder::RunBuffer* buf = nullptr;
+ switch (pos) {
+ case SkTextBlob::kDefault_Positioning:
+ buf = &blobBuilder.allocRunText(font, glyphCount, offset.x(), offset.y(),
+ textSize, &bounds);
+ break;
+ case SkTextBlob::kHorizontal_Positioning:
+ buf = &blobBuilder.allocRunTextPosH(font, glyphCount, offset.y(),
+ textSize, &bounds);
+ break;
+ case SkTextBlob::kFull_Positioning:
+ buf = &blobBuilder.allocRunTextPos(font, glyphCount, textSize, &bounds);
+ break;
+ case SkTextBlob::kRSXform_Positioning:
+ buf = &blobBuilder.allocRunTextRSXform(font, glyphCount, textSize, &bounds);
+ break;
+ }
+
+ if (!buf->glyphs ||
+ !buf->pos ||
+ (pe.extended && (!buf->clusters || !buf->utf8text))) {
+ return nullptr;
+ }
+
+ if (!reader.readByteArray(buf->glyphs, glyphSize) ||
+ !reader.readByteArray(buf->pos, posSize)) {
+ return nullptr;
+ }
+
+ if (pe.extended) {
+ if (!reader.readByteArray(buf->clusters, clusterSize) ||
+ !reader.readByteArray(buf->utf8text, textSize)) {
+ return nullptr;
+ }
+ }
+ }
+
+ return blobBuilder.make();
+}
+
+sk_sp<SkTextBlob> SkTextBlob::MakeFromText(const void* text, size_t byteLength, const SkFont& font,
+ SkTextEncoding encoding) {
+ // Note: we deliberately promote this to fully positioned blobs, since we'd have to pay the
+ // same cost down stream (i.e. computing bounds), so its cheaper to pay the cost once now.
+ const int count = font.countText(text, byteLength, encoding);
+ if (count < 1) {
+ return nullptr;
+ }
+ SkTextBlobBuilder builder;
+ auto buffer = builder.allocRunPos(font, count);
+ font.textToGlyphs(text, byteLength, encoding, buffer.glyphs, count);
+ font.getPos(buffer.glyphs, count, buffer.points(), {0, 0});
+ return builder.make();
+}
+
+sk_sp<SkTextBlob> SkTextBlob::MakeFromPosText(const void* text, size_t byteLength,
+ const SkPoint pos[], const SkFont& font,
+ SkTextEncoding encoding) {
+ const int count = font.countText(text, byteLength, encoding);
+ if (count < 1) {
+ return nullptr;
+ }
+ SkTextBlobBuilder builder;
+ auto buffer = builder.allocRunPos(font, count);
+ font.textToGlyphs(text, byteLength, encoding, buffer.glyphs, count);
+ memcpy(buffer.points(), pos, count * sizeof(SkPoint));
+ return builder.make();
+}
+
+sk_sp<SkTextBlob> SkTextBlob::MakeFromPosTextH(const void* text, size_t byteLength,
+ const SkScalar xpos[], SkScalar constY,
+ const SkFont& font, SkTextEncoding encoding) {
+ const int count = font.countText(text, byteLength, encoding);
+ if (count < 1) {
+ return nullptr;
+ }
+ SkTextBlobBuilder builder;
+ auto buffer = builder.allocRunPosH(font, count, constY);
+ font.textToGlyphs(text, byteLength, encoding, buffer.glyphs, count);
+ memcpy(buffer.pos, xpos, count * sizeof(SkScalar));
+ return builder.make();
+}
+
+sk_sp<SkTextBlob> SkTextBlob::MakeFromRSXform(const void* text, size_t byteLength,
+ const SkRSXform xform[], const SkFont& font,
+ SkTextEncoding encoding) {
+ const int count = font.countText(text, byteLength, encoding);
+ if (count < 1) {
+ return nullptr;
+ }
+ SkTextBlobBuilder builder;
+ auto buffer = builder.allocRunRSXform(font, count);
+ font.textToGlyphs(text, byteLength, encoding, buffer.glyphs, count);
+ memcpy(buffer.xforms(), xform, count * sizeof(SkRSXform));
+ return builder.make();
+}
+
+sk_sp<SkData> SkTextBlob::serialize(const SkSerialProcs& procs) const {
+ SkBinaryWriteBuffer buffer;
+ buffer.setSerialProcs(procs);
+ SkTextBlobPriv::Flatten(*this, buffer);
+
+ size_t total = buffer.bytesWritten();
+ sk_sp<SkData> data = SkData::MakeUninitialized(total);
+ buffer.writeToMemory(data->writable_data());
+ return data;
+}
+
+sk_sp<SkTextBlob> SkTextBlob::Deserialize(const void* data, size_t length,
+ const SkDeserialProcs& procs) {
+ SkReadBuffer buffer(data, length);
+ buffer.setDeserialProcs(procs);
+ return SkTextBlobPriv::MakeFromBuffer(buffer);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+size_t SkTextBlob::serialize(const SkSerialProcs& procs, void* memory, size_t memory_size) const {
+ SkBinaryWriteBuffer buffer(memory, memory_size);
+ buffer.setSerialProcs(procs);
+ SkTextBlobPriv::Flatten(*this, buffer);
+ return buffer.usingInitialStorage() ? buffer.bytesWritten() : 0u;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+int get_glyph_run_intercepts(const sktext::GlyphRun& glyphRun,
+ const SkPaint& paint,
+ const SkScalar bounds[2],
+ SkScalar intervals[],
+ int* intervalCount) {
+ SkScalar scale = SK_Scalar1;
+ SkPaint interceptPaint{paint};
+ SkFont interceptFont{glyphRun.font()};
+
+ interceptPaint.setMaskFilter(nullptr); // don't want this affecting our path-cache lookup
+
+ // can't use our canonical size if we need to apply path effects
+ if (interceptPaint.getPathEffect() == nullptr) {
+ // If the wrong size is going to be used, don't hint anything.
+ interceptFont.setHinting(SkFontHinting::kNone);
+ interceptFont.setSubpixel(true);
+ scale = interceptFont.getSize() / SkFontPriv::kCanonicalTextSizeForPaths;
+ interceptFont.setSize(SkIntToScalar(SkFontPriv::kCanonicalTextSizeForPaths));
+ // Note: fScale can be zero here (even if it wasn't before the divide). It can also
+ // be very very small. We call sk_ieee_float_divide below to ensure IEEE divide behavior,
+ // since downstream we will check for the resulting coordinates being non-finite anyway.
+ // Thus we don't need to check for zero here.
+ if (interceptPaint.getStrokeWidth() > 0
+ && interceptPaint.getStyle() != SkPaint::kFill_Style) {
+ interceptPaint.setStrokeWidth(
+ sk_ieee_float_divide(interceptPaint.getStrokeWidth(), scale));
+ }
+ }
+
+ interceptPaint.setStyle(SkPaint::kFill_Style);
+ interceptPaint.setPathEffect(nullptr);
+
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakeWithNoDevice(interceptFont, &interceptPaint);
+ SkBulkGlyphMetricsAndPaths metricsAndPaths{strikeSpec};
+
+ const SkPoint* posCursor = glyphRun.positions().begin();
+ for (const SkGlyph* glyph : metricsAndPaths.glyphs(glyphRun.glyphsIDs())) {
+ SkPoint pos = *posCursor++;
+
+ if (glyph->path() != nullptr) {
+ // The typeface is scaled, so un-scale the bounds to be in the space of the typeface.
+ // Also ensure the bounds are properly offset by the vertical positioning of the glyph.
+ SkScalar scaledBounds[2] = {
+ (bounds[0] - pos.y()) / scale,
+ (bounds[1] - pos.y()) / scale
+ };
+ metricsAndPaths.findIntercepts(
+ scaledBounds, scale, pos.x(), glyph, intervals, intervalCount);
+ }
+ }
+ return *intervalCount;
+}
+} // namespace
+
+int SkTextBlob::getIntercepts(const SkScalar bounds[2], SkScalar intervals[],
+ const SkPaint* paint) const {
+ SkTLazy<SkPaint> defaultPaint;
+ if (paint == nullptr) {
+ defaultPaint.init();
+ paint = defaultPaint.get();
+ }
+
+ sktext::GlyphRunBuilder builder;
+ auto glyphRunList = builder.blobToGlyphRunList(*this, {0, 0});
+
+ int intervalCount = 0;
+ for (const sktext::GlyphRun& glyphRun : glyphRunList) {
+ // Ignore RSXForm runs.
+ if (glyphRun.scaledRotations().empty()) {
+ intervalCount = get_glyph_run_intercepts(
+ glyphRun, *paint, bounds, intervals, &intervalCount);
+ }
+ }
+
+ return intervalCount;
+}
+
+std::vector<SkScalar> SkFont::getIntercepts(const SkGlyphID glyphs[], int count,
+ const SkPoint positions[],
+ SkScalar top, SkScalar bottom,
+ const SkPaint* paintPtr) const {
+ if (count <= 0) {
+ return std::vector<SkScalar>();
+ }
+
+ const SkPaint paint(paintPtr ? *paintPtr : SkPaint());
+ const SkScalar bounds[] = {top, bottom};
+ const sktext::GlyphRun run(*this,
+ {positions, size_t(count)}, {glyphs, size_t(count)},
+ {nullptr, 0}, {nullptr, 0}, {nullptr, 0});
+
+ std::vector<SkScalar> result;
+ result.resize(count * 2); // worst case allocation
+ int intervalCount = 0;
+ intervalCount = get_glyph_run_intercepts(run, paint, bounds, result.data(), &intervalCount);
+ result.resize(intervalCount);
+ return result;
+}
+
+////////
+
+SkTextBlob::Iter::Iter(const SkTextBlob& blob) {
+ fRunRecord = RunRecord::First(&blob);
+}
+
+bool SkTextBlob::Iter::next(Run* rec) {
+ if (fRunRecord) {
+ if (rec) {
+ rec->fTypeface = fRunRecord->font().getTypeface();
+ rec->fGlyphCount = fRunRecord->glyphCount();
+ rec->fGlyphIndices = fRunRecord->glyphBuffer();
+#ifdef SK_UNTIL_CRBUG_1187654_IS_FIXED
+ rec->fClusterIndex_forTest = fRunRecord->clusterBuffer();
+ rec->fUtf8Size_forTest = fRunRecord->textSize();
+ rec->fUtf8_forTest = fRunRecord->textBuffer();
+#endif
+ }
+ if (fRunRecord->isLastRun()) {
+ fRunRecord = nullptr;
+ } else {
+ fRunRecord = RunRecord::Next(fRunRecord);
+ }
+ return true;
+ }
+ return false;
+}
+
+bool SkTextBlob::Iter::experimentalNext(ExperimentalRun* rec) {
+ if (fRunRecord) {
+ if (rec) {
+ rec->font = fRunRecord->font();
+ rec->count = fRunRecord->glyphCount();
+ rec->glyphs = fRunRecord->glyphBuffer();
+ rec->positions = fRunRecord->pointBuffer();
+ }
+ if (fRunRecord->isLastRun()) {
+ fRunRecord = nullptr;
+ } else {
+ fRunRecord = RunRecord::Next(fRunRecord);
+ }
+ return true;
+ }
+ return false;
+}
diff --git a/gfx/skia/skia/src/core/SkTextBlobPriv.h b/gfx/skia/skia/src/core/SkTextBlobPriv.h
new file mode 100644
index 0000000000..6a4c1531cf
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTextBlobPriv.h
@@ -0,0 +1,261 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTextBlobPriv_DEFINED
+#define SkTextBlobPriv_DEFINED
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkFont.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkTextBlob.h"
+#include "include/core/SkTypeface.h"
+#include "src/base/SkSafeMath.h"
+#include "src/core/SkPaintPriv.h"
+
+class SkReadBuffer;
+class SkWriteBuffer;
+
+class SkTextBlobPriv {
+public:
+ /**
+ * Serialize to a buffer.
+ */
+ static void Flatten(const SkTextBlob& , SkWriteBuffer&);
+
+ /**
+ * Recreate an SkTextBlob that was serialized into a buffer.
+ *
+ * @param SkReadBuffer Serialized blob data.
+ * @return A new SkTextBlob representing the serialized data, or NULL if the buffer is
+ * invalid.
+ */
+ static sk_sp<SkTextBlob> MakeFromBuffer(SkReadBuffer&);
+
+ static bool HasRSXForm(const SkTextBlob& blob);
+};
+
+//
+// Textblob data is laid out into externally-managed storage as follows:
+//
+// -----------------------------------------------------------------------------
+// | SkTextBlob | RunRecord | Glyphs[] | Pos[] | RunRecord | Glyphs[] | Pos[] | ...
+// -----------------------------------------------------------------------------
+//
+// Each run record describes a text blob run, and can be used to determine the (implicit)
+// location of the following record.
+//
+// Extended Textblob runs have more data after the Pos[] array:
+//
+// -------------------------------------------------------------------------
+// ... | RunRecord | Glyphs[] | Pos[] | TextSize | Clusters[] | Text[] | ...
+// -------------------------------------------------------------------------
+//
+// To determine the length of the extended run data, the TextSize must be read.
+//
+// Extended Textblob runs may be mixed with non-extended runs.
+
+SkDEBUGCODE(static const unsigned kRunRecordMagic = 0xb10bcafe;)
+
+class SkTextBlob::RunRecord {
+public:
+ RunRecord(uint32_t count, uint32_t textSize, const SkPoint& offset, const SkFont& font, GlyphPositioning pos)
+ : fFont(font)
+ , fCount(count)
+ , fOffset(offset)
+ , fFlags(pos) {
+ SkASSERT(static_cast<unsigned>(pos) <= Flags::kPositioning_Mask);
+
+ SkDEBUGCODE(fMagic = kRunRecordMagic);
+ if (textSize > 0) {
+ fFlags |= kExtended_Flag;
+ *this->textSizePtr() = textSize;
+ }
+ }
+
+ uint32_t glyphCount() const {
+ return fCount;
+ }
+
+ const SkPoint& offset() const {
+ return fOffset;
+ }
+
+ const SkFont& font() const {
+ return fFont;
+ }
+
+ GlyphPositioning positioning() const {
+ return static_cast<GlyphPositioning>(fFlags & kPositioning_Mask);
+ }
+
+ uint16_t* glyphBuffer() const {
+ static_assert(SkIsAlignPtr(sizeof(RunRecord)), "");
+ // Glyphs are stored immediately following the record.
+ return reinterpret_cast<uint16_t*>(const_cast<RunRecord*>(this) + 1);
+ }
+
+ // can be aliased with pointBuffer() or xformBuffer()
+ SkScalar* posBuffer() const {
+ // Position scalars follow the (aligned) glyph buffer.
+ return reinterpret_cast<SkScalar*>(reinterpret_cast<uint8_t*>(this->glyphBuffer()) +
+ SkAlign4(fCount * sizeof(uint16_t)));
+ }
+
+ // alias for posBuffer()
+ SkPoint* pointBuffer() const {
+ SkASSERT(this->positioning() == (GlyphPositioning)2);
+ return reinterpret_cast<SkPoint*>(this->posBuffer());
+ }
+
+ // alias for posBuffer()
+ SkRSXform* xformBuffer() const {
+ SkASSERT(this->positioning() == (GlyphPositioning)3);
+ return reinterpret_cast<SkRSXform*>(this->posBuffer());
+ }
+
+ uint32_t textSize() const { return isExtended() ? *this->textSizePtr() : 0; }
+
+ uint32_t* clusterBuffer() const {
+ // clusters follow the textSize.
+ return isExtended() ? 1 + this->textSizePtr() : nullptr;
+ }
+
+ char* textBuffer() const {
+ return isExtended()
+ ? reinterpret_cast<char*>(this->clusterBuffer() + fCount)
+ : nullptr;
+ }
+
+ bool isLastRun() const { return SkToBool(fFlags & kLast_Flag); }
+
+ static size_t StorageSize(uint32_t glyphCount, uint32_t textSize,
+ SkTextBlob::GlyphPositioning positioning,
+ SkSafeMath* safe);
+
+ static const RunRecord* First(const SkTextBlob* blob);
+
+ static const RunRecord* Next(const RunRecord* run);
+
+ void validate(const uint8_t* storageTop) const;
+
+private:
+ friend class SkTextBlobBuilder;
+
+ enum Flags {
+ kPositioning_Mask = 0x03, // bits 0-1 reserved for positioning
+ kLast_Flag = 0x04, // set for the last blob run
+ kExtended_Flag = 0x08, // set for runs with text/cluster info
+ };
+
+ static const RunRecord* NextUnchecked(const RunRecord* run);
+
+ static size_t PosCount(uint32_t glyphCount,
+ SkTextBlob::GlyphPositioning positioning,
+ SkSafeMath* safe);
+
+ uint32_t* textSizePtr() const;
+
+ void grow(uint32_t count);
+
+ bool isExtended() const {
+ return fFlags & kExtended_Flag;
+ }
+
+ SkFont fFont;
+ uint32_t fCount;
+ SkPoint fOffset;
+ uint32_t fFlags;
+
+ SkDEBUGCODE(unsigned fMagic;)
+};
+
+/**
+ * Iterate through all of the text runs of the text blob. For example:
+ * for (SkTextBlobRunIterator it(blob); !it.done(); it.next()) {
+ * .....
+ * }
+ */
+class SkTextBlobRunIterator {
+public:
+ SkTextBlobRunIterator(const SkTextBlob* blob);
+
+ enum GlyphPositioning : uint8_t {
+ kDefault_Positioning = 0, // Default glyph advances -- zero scalars per glyph.
+ kHorizontal_Positioning = 1, // Horizontal positioning -- one scalar per glyph.
+ kFull_Positioning = 2, // Point positioning -- two scalars per glyph.
+ kRSXform_Positioning = 3, // RSXform positioning -- four scalars per glyph.
+ };
+
+ bool done() const {
+ return !fCurrentRun;
+ }
+ void next();
+
+ uint32_t glyphCount() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->glyphCount();
+ }
+ const uint16_t* glyphs() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->glyphBuffer();
+ }
+ const SkScalar* pos() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->posBuffer();
+ }
+ // alias for pos()
+ const SkPoint* points() const {
+ return fCurrentRun->pointBuffer();
+ }
+ // alias for pos()
+ const SkRSXform* xforms() const {
+ return fCurrentRun->xformBuffer();
+ }
+ const SkPoint& offset() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->offset();
+ }
+ const SkFont& font() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->font();
+ }
+ GlyphPositioning positioning() const;
+ unsigned scalarsPerGlyph() const;
+ uint32_t* clusters() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->clusterBuffer();
+ }
+ uint32_t textSize() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->textSize();
+ }
+ char* text() const {
+ SkASSERT(!this->done());
+ return fCurrentRun->textBuffer();
+ }
+
+ bool isLCD() const;
+
+private:
+ const SkTextBlob::RunRecord* fCurrentRun;
+
+ SkDEBUGCODE(uint8_t* fStorageTop;)
+};
+
+inline bool SkTextBlobPriv::HasRSXForm(const SkTextBlob& blob) {
+ for (SkTextBlobRunIterator i{&blob}; !i.done(); i.next()) {
+ if (i.positioning() == SkTextBlobRunIterator::kRSXform_Positioning) {
+ return true;
+ }
+ }
+ return false;
+}
+
+#endif // SkTextBlobPriv_DEFINED
diff --git a/gfx/skia/skia/src/core/SkTextBlobTrace.cpp b/gfx/skia/skia/src/core/SkTextBlobTrace.cpp
new file mode 100644
index 0000000000..0e90c57875
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTextBlobTrace.cpp
@@ -0,0 +1,119 @@
+// Copyright 2019 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+
+#include "src/core/SkTextBlobTrace.h"
+
+#include "include/core/SkTextBlob.h"
+#include "src/core/SkFontPriv.h"
+#include "src/core/SkPtrRecorder.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkTextBlobPriv.h"
+#include "src/core/SkWriteBuffer.h"
+
+std::vector<SkTextBlobTrace::Record> SkTextBlobTrace::CreateBlobTrace(SkStream* stream) {
+ std::vector<SkTextBlobTrace::Record> trace;
+
+ uint32_t typefaceCount;
+ if (!stream->readU32(&typefaceCount)) {
+ return trace;
+ }
+
+ std::vector<sk_sp<SkTypeface>> typefaceArray;
+ for (uint32_t i = 0; i < typefaceCount; i++) {
+ typefaceArray.push_back(SkTypeface::MakeDeserialize(stream));
+ }
+
+ uint32_t restOfFile;
+ if (!stream->readU32(&restOfFile)) {
+ return trace;
+ }
+ sk_sp<SkData> data = SkData::MakeFromStream(stream, restOfFile);
+ SkReadBuffer readBuffer{data->data(), data->size()};
+ readBuffer.setTypefaceArray(typefaceArray.data(), typefaceArray.size());
+
+ while (!readBuffer.eof()) {
+ SkTextBlobTrace::Record record;
+ record.origUniqueID = readBuffer.readUInt();
+ record.paint = readBuffer.readPaint();
+ readBuffer.readPoint(&record.offset);
+ record.blob = SkTextBlobPriv::MakeFromBuffer(readBuffer);
+ trace.push_back(std::move(record));
+ }
+ return trace;
+}
+
+void SkTextBlobTrace::DumpTrace(const std::vector<SkTextBlobTrace::Record>& trace) {
+ for (const SkTextBlobTrace::Record& record : trace) {
+ const SkTextBlob* blob = record.blob.get();
+ const SkPaint& p = record.paint;
+ bool weirdPaint = p.getStyle() != SkPaint::kFill_Style
+ || p.getMaskFilter() != nullptr
+ || p.getPathEffect() != nullptr;
+
+ SkDebugf("Blob %d ( %g %g ) %d\n ",
+ blob->uniqueID(), record.offset.x(), record.offset.y(), weirdPaint);
+ SkTextBlobRunIterator iter(blob);
+ int runNumber = 0;
+ while (!iter.done()) {
+ SkDebugf("Run %d\n ", runNumber);
+ SkFont font = iter.font();
+ SkDebugf("Font %d %g %g %g %d %d %d\n ",
+ font.getTypefaceOrDefault()->uniqueID(),
+ font.getSize(),
+ font.getScaleX(),
+ font.getSkewX(),
+ SkFontPriv::Flags(font),
+ (int)font.getEdging(),
+ (int)font.getHinting());
+ uint32_t glyphCount = iter.glyphCount();
+ const uint16_t* glyphs = iter.glyphs();
+ for (uint32_t i = 0; i < glyphCount; i++) {
+ SkDebugf("%02X ", glyphs[i]);
+ }
+ SkDebugf("\n");
+ runNumber += 1;
+ iter.next();
+ }
+ }
+}
+
+SkTextBlobTrace::Capture::Capture() : fTypefaceSet(new SkRefCntSet) {
+ fWriteBuffer.setTypefaceRecorder(fTypefaceSet);
+}
+
+SkTextBlobTrace::Capture::~Capture() = default;
+
+void SkTextBlobTrace::Capture::capture(
+ const sktext::GlyphRunList& glyphRunList, const SkPaint& paint) {
+ const SkTextBlob* blob = glyphRunList.blob();
+ if (blob != nullptr) {
+ fWriteBuffer.writeUInt(blob->uniqueID());
+ fWriteBuffer.writePaint(paint);
+ fWriteBuffer.writePoint(glyphRunList.origin());
+ SkTextBlobPriv::Flatten(*blob, fWriteBuffer);
+ fBlobCount++;
+ }
+}
+
+void SkTextBlobTrace::Capture::dump(SkWStream* dst) const {
+ SkTLazy<SkFILEWStream> fileStream;
+ if (!dst) {
+ uint32_t id = SkChecksum::Mix(reinterpret_cast<uintptr_t>(this));
+ SkString f = SkStringPrintf("diff-canvas-%08x-%04zu.trace", id, fBlobCount);
+ dst = fileStream.init(f.c_str());
+ if (!fileStream->isValid()) {
+ SkDebugf("Error opening '%s'.\n", f.c_str());
+ return;
+ }
+ SkDebugf("Saving trace to '%s'.\n", f.c_str());
+ }
+ SkASSERT(dst);
+ int count = fTypefaceSet->count();
+ dst->write32(count);
+ SkPtrSet::Iter iter(*fTypefaceSet);
+ while (void* ptr = iter.next()) {
+ ((const SkTypeface*)ptr)->serialize(dst, SkTypeface::SerializeBehavior::kDoIncludeData);
+ }
+ dst->write32(fWriteBuffer.bytesWritten());
+ fWriteBuffer.writeToStream(dst);
+}
diff --git a/gfx/skia/skia/src/core/SkTextBlobTrace.h b/gfx/skia/skia/src/core/SkTextBlobTrace.h
new file mode 100644
index 0000000000..8aa776523e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTextBlobTrace.h
@@ -0,0 +1,49 @@
+// Copyright 2019 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+#ifndef SkTextBlobTrace_DEFINED
+#define SkTextBlobTrace_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#include "include/core/SkPaint.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTextBlob.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/text/GlyphRun.h"
+
+#include <cstdint>
+#include <vector>
+
+namespace SkTextBlobTrace {
+
+struct Record {
+ uint32_t origUniqueID;
+ SkPaint paint;
+ SkPoint offset;
+ sk_sp<SkTextBlob> blob;
+};
+
+std::vector<SkTextBlobTrace::Record> CreateBlobTrace(SkStream* stream);
+
+void DumpTrace(const std::vector<SkTextBlobTrace::Record>&);
+
+class Capture {
+public:
+ Capture();
+ ~Capture();
+ void capture(const sktext::GlyphRunList&, const SkPaint&);
+ // If `dst` is nullptr, write to a file.
+ void dump(SkWStream* dst = nullptr) const;
+
+private:
+ size_t fBlobCount = 0;
+ sk_sp<SkRefCntSet> fTypefaceSet;
+ SkBinaryWriteBuffer fWriteBuffer;
+
+ Capture(const Capture&) = delete;
+ Capture& operator=(const Capture&) = delete;
+};
+
+} // namespace SkTextBlobTrace
+#endif // SkTextBlobTrace_DEFINED
diff --git a/gfx/skia/skia/src/core/SkTextFormatParams.h b/gfx/skia/skia/src/core/SkTextFormatParams.h
new file mode 100644
index 0000000000..00f9fce65c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTextFormatParams.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkTextFormatParams_DEFINES
+#define SkTextFormatParams_DEFINES
+
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+
+// The fraction of text size to embolden fake bold text scales with text size.
+// At 9 points or below, the stroke width is increased by text size / 24.
+// At 36 points and above, it is increased by text size / 32. In between,
+// it is interpolated between those values.
+static const SkScalar kStdFakeBoldInterpKeys[] = {
+ SK_Scalar1*9,
+ SK_Scalar1*36,
+};
+static const SkScalar kStdFakeBoldInterpValues[] = {
+ SK_Scalar1/24,
+ SK_Scalar1/32,
+};
+static_assert(std::size(kStdFakeBoldInterpKeys) == std::size(kStdFakeBoldInterpValues),
+ "mismatched_array_size");
+static const int kStdFakeBoldInterpLength = std::size(kStdFakeBoldInterpKeys);
+
+#endif //SkTextFormatParams_DEFINES
diff --git a/gfx/skia/skia/src/core/SkTime.cpp b/gfx/skia/skia/src/core/SkTime.cpp
new file mode 100644
index 0000000000..cb2ed3b1c5
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTime.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTime.h"
+
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkLeanWindows.h"
+
+void SkTime::DateTime::toISO8601(SkString* dst) const {
+ if (dst) {
+ int timeZoneMinutes = SkToInt(fTimeZoneMinutes);
+ char timezoneSign = timeZoneMinutes >= 0 ? '+' : '-';
+ int timeZoneHours = SkTAbs(timeZoneMinutes) / 60;
+ timeZoneMinutes = SkTAbs(timeZoneMinutes) % 60;
+ dst->printf("%04u-%02u-%02uT%02u:%02u:%02u%c%02d:%02d",
+ static_cast<unsigned>(fYear), static_cast<unsigned>(fMonth),
+ static_cast<unsigned>(fDay), static_cast<unsigned>(fHour),
+ static_cast<unsigned>(fMinute),
+ static_cast<unsigned>(fSecond), timezoneSign, timeZoneHours,
+ timeZoneMinutes);
+ }
+}
+
+#ifdef SK_BUILD_FOR_WIN
+
+void SkTime::GetDateTime(DateTime* dt) {
+ if (dt) {
+ SYSTEMTIME st;
+ GetSystemTime(&st);
+ dt->fTimeZoneMinutes = 0;
+ dt->fYear = st.wYear;
+ dt->fMonth = SkToU8(st.wMonth);
+ dt->fDayOfWeek = SkToU8(st.wDayOfWeek);
+ dt->fDay = SkToU8(st.wDay);
+ dt->fHour = SkToU8(st.wHour);
+ dt->fMinute = SkToU8(st.wMinute);
+ dt->fSecond = SkToU8(st.wSecond);
+ }
+}
+
+#else // SK_BUILD_FOR_WIN
+
+#include <time.h>
+void SkTime::GetDateTime(DateTime* dt) {
+ if (dt) {
+ time_t m_time;
+ time(&m_time);
+ struct tm tstruct;
+ gmtime_r(&m_time, &tstruct);
+ dt->fTimeZoneMinutes = 0;
+ dt->fYear = tstruct.tm_year + 1900;
+ dt->fMonth = SkToU8(tstruct.tm_mon + 1);
+ dt->fDayOfWeek = SkToU8(tstruct.tm_wday);
+ dt->fDay = SkToU8(tstruct.tm_mday);
+ dt->fHour = SkToU8(tstruct.tm_hour);
+ dt->fMinute = SkToU8(tstruct.tm_min);
+ dt->fSecond = SkToU8(tstruct.tm_sec);
+ }
+}
+#endif // SK_BUILD_FOR_WIN
+
+#if !defined(__has_feature)
+ #define __has_feature(x) 0
+#endif
+
+#if __has_feature(memory_sanitizer) || defined(SK_BUILD_FOR_UNIX) || defined(SK_BUILD_FOR_ANDROID)
+#include <time.h>
+double SkTime::GetNSecs() {
+ // See skia:6504
+ struct timespec tp;
+ clock_gettime(CLOCK_MONOTONIC, &tp);
+ return tp.tv_sec * 1e9 + tp.tv_nsec;
+}
+#else
+#include <chrono>
+#include <ratio>
+double SkTime::GetNSecs() {
+ auto now = std::chrono::steady_clock::now();
+ std::chrono::duration<double, std::nano> ns = now.time_since_epoch();
+ return ns.count();
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkTraceEvent.h b/gfx/skia/skia/src/core/SkTraceEvent.h
new file mode 100644
index 0000000000..f18e48625f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTraceEvent.h
@@ -0,0 +1,419 @@
+// Copyright (c) 2014 Google Inc.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This header file defines implementation details of how the trace macros in
+// SkTraceEventCommon.h collect and store trace events. Anything not
+// implementation-specific should go in SkTraceEventCommon.h instead of here.
+
+#ifndef SkTraceEvent_DEFINED
+#define SkTraceEvent_DEFINED
+
+#include "include/utils/SkEventTracer.h"
+#include "src/core/SkTraceEventCommon.h"
+#include <atomic>
+
+#if defined(SK_ANDROID_FRAMEWORK_USE_PERFETTO)
+ #include <string>
+ #include <utility>
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+// Implementation specific tracing API definitions.
+
+// Makes it easier to add traces with a simple TRACE_EVENT0("skia", TRACE_FUNC).
+#if defined(_MSC_VER)
+ #define TRACE_FUNC __FUNCSIG__
+#else
+ #define TRACE_FUNC __PRETTY_FUNCTION__
+#endif
+
+
+#if defined(SK_ANDROID_FRAMEWORK_USE_PERFETTO)
+ // By default, const char* argument values are assumed to have long-lived scope
+ // and will not be copied. Use this macro to force a const char* to be copied.
+ //
+ // TRACE_STR_COPY should be used with short-lived strings that should be copied immediately.
+ // TRACE_STR_STATIC should be used with pointers to string literals with process lifetime.
+ // Neither should be used for string literals known at compile time.
+ //
+ // E.g. TRACE_EVENT0("skia", TRACE_STR_COPY(something.c_str()));
+ #define TRACE_STR_COPY(str) (::perfetto::DynamicString{str})
+
+ // Allows callers to pass static strings that aren't known at compile time to trace functions.
+ //
+ // TRACE_STR_COPY should be used with short-lived strings that should be copied immediately.
+ // TRACE_STR_STATIC should be used with pointers to string literals with process lifetime.
+ // Neither should be used for string literals known at compile time.
+ //
+ // E.g. TRACE_EVENT0("skia", TRACE_STR_STATIC(this->name()));
+ // No-op when Perfetto is disabled, or outside of Android framework.
+ #define TRACE_STR_STATIC(str) (::perfetto::StaticString{str})
+#else // !SK_ANDROID_FRAMEWORK_USE_PERFETTO
+ // By default, const char* argument values are assumed to have long-lived scope
+ // and will not be copied. Use this macro to force a const char* to be copied.
+ //
+ // TRACE_STR_COPY should be used with short-lived strings that should be copied immediately.
+ // TRACE_STR_STATIC should be used with pointers to string literals with process lifetime.
+ // Neither should be used for string literals known at compile time.
+ //
+ // E.g. TRACE_EVENT0("skia", TRACE_STR_COPY(something.c_str()));
+ #define TRACE_STR_COPY(str) (::skia_private::TraceStringWithCopy(str))
+
+ // Allows callers to pass static strings that aren't known at compile time to trace functions.
+ //
+ // TRACE_STR_COPY should be used with short-lived strings that should be copied immediately.
+ // TRACE_STR_STATIC should be used with pointers to string literals with process lifetime.
+ // Neither should be used for string literals known at compile time.
+ //
+ // E.g. TRACE_EVENT0("skia", TRACE_STR_STATIC(this->name()));
+ // No-op when Perfetto is disabled, or outside of Android framework.
+ #define TRACE_STR_STATIC(str) (str)
+#endif // SK_ANDROID_FRAMEWORK_USE_PERFETTO
+
+#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
+ *INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
+ (SkEventTracer::kEnabledForRecording_CategoryGroupEnabledFlags | \
+ SkEventTracer::kEnabledForEventCallback_CategoryGroupEnabledFlags)
+
+// Get a pointer to the enabled state of the given trace category. Only
+// long-lived literal strings should be given as the category group. The
+// returned pointer can be held permanently in a local static for example. If
+// the unsigned char is non-zero, tracing is enabled. If tracing is enabled,
+// TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled
+// between the load of the tracing state and the call to
+// TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out
+// for best performance when tracing is disabled.
+// const uint8_t*
+// TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(const char* category_group)
+#define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED \
+ SkEventTracer::GetInstance()->getCategoryGroupEnabled
+
+// Add a trace event to the platform tracing system.
+// SkEventTracer::Handle TRACE_EVENT_API_ADD_TRACE_EVENT(
+// char phase,
+// const uint8_t* category_group_enabled,
+// const char* name,
+// uint64_t id,
+// int num_args,
+// const char** arg_names,
+// const uint8_t* arg_types,
+// const uint64_t* arg_values,
+// unsigned char flags)
+#define TRACE_EVENT_API_ADD_TRACE_EVENT \
+ SkEventTracer::GetInstance()->addTraceEvent
+
+// Set the duration field of a COMPLETE trace event.
+// void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
+// const uint8_t* category_group_enabled,
+// const char* name,
+// SkEventTracer::Handle id)
+#define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \
+ SkEventTracer::GetInstance()->updateTraceEventDuration
+
+#ifdef SK_ANDROID_FRAMEWORK_USE_PERFETTO
+ #define TRACE_EVENT_API_NEW_TRACE_SECTION(...) do {} while (0)
+#else
+ // Start writing to a new trace output section (file, etc.).
+ // Accepts a label for the new section.
+ // void TRACE_EVENT_API_NEW_TRACE_SECTION(const char* name)
+ #define TRACE_EVENT_API_NEW_TRACE_SECTION \
+ SkEventTracer::GetInstance()->newTracingSection
+#endif
+
+// Defines visibility for classes in trace_event.h
+#define TRACE_EVENT_API_CLASS_EXPORT SK_API
+
+// We prepend this string to all category names, so that ALL Skia trace events are
+// disabled by default when tracing in Chrome.
+#define TRACE_CATEGORY_PREFIX "disabled-by-default-"
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Implementation detail: trace event macros create temporary variables
+// to keep instrumentation overhead low. These macros give each temporary
+// variable a unique name based on the line number to prevent name collisions.
+#define INTERNAL_TRACE_EVENT_UID3(a,b) \
+ trace_event_unique_##a##b
+#define INTERNAL_TRACE_EVENT_UID2(a,b) \
+ INTERNAL_TRACE_EVENT_UID3(a,b)
+#define INTERNAL_TRACE_EVENT_UID(name_prefix) \
+ INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__)
+
+// Implementation detail: internal macro to create static category.
+// No barriers are needed, because this code is designed to operate safely
+// even when the unsigned char* points to garbage data (which may be the case
+// on processors without cache coherency).
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
+ category_group, atomic, category_group_enabled) \
+ category_group_enabled = \
+ reinterpret_cast<const uint8_t*>(atomic.load(std::memory_order_relaxed)); \
+ if (!category_group_enabled) { \
+ category_group_enabled = TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group); \
+ atomic.store(reinterpret_cast<intptr_t>(category_group_enabled), \
+ std::memory_order_relaxed); \
+ }
+
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group) \
+ static std::atomic<intptr_t> INTERNAL_TRACE_EVENT_UID(atomic){0}; \
+ const uint8_t* INTERNAL_TRACE_EVENT_UID(category_group_enabled); \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
+ TRACE_CATEGORY_PREFIX category_group, \
+ INTERNAL_TRACE_EVENT_UID(atomic), \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled));
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ skia_private::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ skia_private::kNoEventId, flags, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \
+ flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ unsigned char trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
+ skia_private::TraceID trace_event_trace_id( \
+ id, &trace_event_flags); \
+ skia_private::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
+ name, trace_event_trace_id.data(), trace_event_flags, \
+ ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// Implementation detail: internal macro to create static category and add begin
+// event if the category is enabled. Also adds the end event when the scope
+// ends.
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ skia_private::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+ do { \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ SkEventTracer::Handle h = skia_private::AddTraceEvent( \
+ TRACE_EVENT_PHASE_COMPLETE, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
+ name, skia_private::kNoEventId, \
+ TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__); \
+ INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
+ } \
+ } while (0)
+
+namespace skia_private {
+
+// Specify these values when the corresponding argument of AddTraceEvent is not
+// used.
+const int kZeroNumArgs = 0;
+const uint64_t kNoEventId = 0;
+
+// TraceID encapsulates an ID that can either be an integer or pointer. Pointers
+// are by default mangled with the Process ID so that they are unlikely to
+// collide when the same pointer is used on different processes.
+class TraceID {
+public:
+ TraceID(const void* id, unsigned char* flags)
+ : data_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(id))) {
+ *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
+ }
+ TraceID(uint64_t id, unsigned char* flags)
+ : data_(id) { (void)flags; }
+ TraceID(unsigned int id, unsigned char* flags)
+ : data_(id) { (void)flags; }
+ TraceID(unsigned short id, unsigned char* flags)
+ : data_(id) { (void)flags; }
+ TraceID(unsigned char id, unsigned char* flags)
+ : data_(id) { (void)flags; }
+ TraceID(long long id, unsigned char* flags)
+ : data_(static_cast<uint64_t>(id)) { (void)flags; }
+ TraceID(long id, unsigned char* flags)
+ : data_(static_cast<uint64_t>(id)) { (void)flags; }
+ TraceID(int id, unsigned char* flags)
+ : data_(static_cast<uint64_t>(id)) { (void)flags; }
+ TraceID(short id, unsigned char* flags)
+ : data_(static_cast<uint64_t>(id)) { (void)flags; }
+ TraceID(signed char id, unsigned char* flags)
+ : data_(static_cast<uint64_t>(id)) { (void)flags; }
+
+ uint64_t data() const { return data_; }
+
+private:
+ uint64_t data_;
+};
+
+// Simple union to store various types as uint64_t.
+union TraceValueUnion {
+ bool as_bool;
+ uint64_t as_uint;
+ long long as_int;
+ double as_double;
+ const void* as_pointer;
+ const char* as_string;
+};
+
+// Simple container for const char* that should be copied instead of retained.
+class TraceStringWithCopy {
+ public:
+ explicit TraceStringWithCopy(const char* str) : str_(str) {}
+ operator const char* () const { return str_; }
+ private:
+ const char* str_;
+};
+
+// Define SetTraceValue for each allowed type. It stores the type and
+// value in the return arguments. This allows this API to avoid declaring any
+// structures so that it is portable to third_party libraries.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE(actual_type, \
+ union_member, \
+ value_type_id) \
+ static inline void SetTraceValue( \
+ actual_type arg, \
+ unsigned char* type, \
+ uint64_t* value) { \
+ TraceValueUnion type_value; \
+ type_value.union_member = arg; \
+ *type = value_type_id; \
+ *value = type_value.as_uint; \
+ }
+// Simpler form for int types that can be safely casted.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, \
+ value_type_id) \
+ static inline void SetTraceValue( \
+ actual_type arg, \
+ unsigned char* type, \
+ uint64_t* value) { \
+ *type = value_type_id; \
+ *value = static_cast<uint64_t>(arg); \
+ }
+
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint64_t, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned short, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long long, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(short, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE(bool, as_bool, TRACE_VALUE_TYPE_BOOL)
+INTERNAL_DECLARE_SET_TRACE_VALUE(double, as_double, TRACE_VALUE_TYPE_DOUBLE)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, as_pointer, TRACE_VALUE_TYPE_POINTER)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, as_string, TRACE_VALUE_TYPE_STRING)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, as_string,
+ TRACE_VALUE_TYPE_COPY_STRING)
+
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT
+
+// These AddTraceEvent and AddTraceEvent template
+// functions are defined here instead of in the macro, because the arg_values
+// could be temporary objects, such as std::string. In order to store
+// pointers to the internal c_str and pass through to the tracing API,
+// the arg_values must live throughout these procedures.
+
+static inline SkEventTracer::Handle
+AddTraceEvent(
+ char phase,
+ const uint8_t* category_group_enabled,
+ const char* name,
+ uint64_t id,
+ unsigned char flags) {
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(
+ phase, category_group_enabled, name, id,
+ kZeroNumArgs, nullptr, nullptr, nullptr, flags);
+}
+
+template<class ARG1_TYPE>
+static inline SkEventTracer::Handle
+AddTraceEvent(
+ char phase,
+ const uint8_t* category_group_enabled,
+ const char* name,
+ uint64_t id,
+ unsigned char flags,
+ const char* arg1_name,
+ const ARG1_TYPE& arg1_val) {
+ const int num_args = 1;
+ uint8_t arg_types[1];
+ uint64_t arg_values[1];
+ SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(
+ phase, category_group_enabled, name, id,
+ num_args, &arg1_name, arg_types, arg_values, flags);
+}
+
+template<class ARG1_TYPE, class ARG2_TYPE>
+static inline SkEventTracer::Handle
+AddTraceEvent(
+ char phase,
+ const uint8_t* category_group_enabled,
+ const char* name,
+ uint64_t id,
+ unsigned char flags,
+ const char* arg1_name,
+ const ARG1_TYPE& arg1_val,
+ const char* arg2_name,
+ const ARG2_TYPE& arg2_val) {
+ const int num_args = 2;
+ const char* arg_names[2] = { arg1_name, arg2_name };
+ unsigned char arg_types[2];
+ uint64_t arg_values[2];
+ SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+ SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(
+ phase, category_group_enabled, name, id,
+ num_args, arg_names, arg_types, arg_values, flags);
+}
+
+// Used by TRACE_EVENTx macros. Do not use directly.
+class TRACE_EVENT_API_CLASS_EXPORT ScopedTracer {
+ public:
+ // Note: members of data_ intentionally left uninitialized. See Initialize.
+ ScopedTracer() : p_data_(nullptr) {}
+
+ ~ScopedTracer() {
+ if (p_data_ && *data_.category_group_enabled)
+ TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
+ data_.category_group_enabled, data_.name, data_.event_handle);
+ }
+
+ void Initialize(const uint8_t* category_group_enabled,
+ const char* name,
+ SkEventTracer::Handle event_handle) {
+ data_.category_group_enabled = category_group_enabled;
+ data_.name = name;
+ data_.event_handle = event_handle;
+ p_data_ = &data_;
+ }
+
+ private:
+ ScopedTracer(const ScopedTracer&) = delete;
+ ScopedTracer& operator=(const ScopedTracer&) = delete;
+
+ // This Data struct workaround is to avoid initializing all the members
+ // in Data during construction of this object, since this object is always
+ // constructed, even when tracing is disabled. If the members of Data were
+ // members of this class instead, compiler warnings occur about potential
+ // uninitialized accesses.
+ struct Data {
+ const uint8_t* category_group_enabled;
+ const char* name;
+ SkEventTracer::Handle event_handle;
+ };
+ Data* p_data_;
+ Data data_;
+};
+
+} // namespace skia_private
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTraceEventCommon.h b/gfx/skia/skia/src/core/SkTraceEventCommon.h
new file mode 100644
index 0000000000..01d2b1876a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTraceEventCommon.h
@@ -0,0 +1,557 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef SkTraceEventCommon_DEFINED
+#define SkTraceEventCommon_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/utils/SkTraceEventPhase.h"
+
+// Trace events are for tracking application performance and resource usage.
+// Macros are provided to track:
+// Duration of scoped regions
+// Instantaneous events
+// Counters
+//
+// The first two arguments to all TRACE macros are the category and name. Both are strings, and
+// must have application lifetime (statics or literals). The same applies to arg_names, and string
+// argument values. However, you can force a copy of a string argument value with TRACE_STR_COPY:
+// TRACE_EVENT1("category", "name", "arg1", "literal string is only referenced");
+// TRACE_EVENT1("category", "name", "arg1", TRACE_STR_COPY("string will be copied"));
+//
+//
+// Categories are used to group events, and
+// can be enabled or disabled by the tracing framework. The trace system will automatically add the
+// process id, thread id, and microsecond timestamp to all events.
+//
+//
+// The TRACE_EVENT[0-2] macros trace the duration of entire scopes:
+// void doSomethingCostly() {
+// TRACE_EVENT0("MY_SUBSYSTEM", "doSomethingCostly");
+// ...
+// }
+//
+// Additional parameters can be associated with an event:
+// void doSomethingCostly2(int howMuch) {
+// TRACE_EVENT1("MY_SUBSYSTEM", "doSomethingCostly", "howMuch", howMuch);
+// ...
+// }
+//
+//
+// Trace event also supports counters, which is a way to track a quantity as it varies over time.
+// Counters are created with the following macro:
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter", g_myCounterValue);
+//
+// Counters are process-specific. The macro itself can be issued from any thread, however.
+//
+// Sometimes, you want to track two counters at once. You can do this with two counter macros:
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter0", g_myCounterValue[0]);
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter1", g_myCounterValue[1]);
+// Or you can do it with a combined macro:
+// TRACE_COUNTER2("MY_SUBSYSTEM", "myCounter",
+// "bytesPinned", g_myCounterValue[0],
+// "bytesAllocated", g_myCounterValue[1]);
+// The tracing UI will show these counters in a single graph, as a summed area chart.
+
+#if defined(TRACE_EVENT0)
+ #error "Another copy of this file has already been included."
+#endif
+
+// --- Temporary Perfetto migration shim preamble ---
+// Tracing in the Android framework, and tracing with Perfetto, are both in a partially migrated
+// state (but fully functional).
+//
+// See go/skia-perfetto
+//
+// For Android framework:
+// ---
+// 1. If SK_ANDROID_FRAMEWORK_USE_PERFETTO is not defined, then all tracing macros map to no-ops.
+// This is only relevant to host-mode builds, where ATrace isn't supported anyway, and tracing with
+// Perfetto seems unnecessary. Note that SkAndroidFrameworkTraceUtil is still defined (assuming
+// SK_BUILD_FOR_ANDROID_FRAMEWORK is defined) to support HWUI referencing it in host-mode builds.
+//
+// 2. If SK_ANDROID_FRAMEWORK_USE_PERFETTO *is* defined, then the tracing backend can be switched
+// between ATrace and Perfetto at runtime. This is currently *only* supported in Android framework.
+// SkAndroidFrameworkTraceUtil::setEnableTracing(bool) will still control broad tracing overall, but
+// SkAndroidFrameworkTraceUtil::setUsePerfettoTrackEvents(bool) will now determine whether that
+// tracing is done with ATrace (default/false) or Perfetto (true).
+//
+// Note: if setUsePerfettoTrackEvents(true) is called, then Perfetto will remain initialized until
+// the process ends. This means some minimal state overhead will remain even after subseqently
+// switching the process back to ATrace, but individual trace events will be correctly routed to
+// whichever system is active in the moment. However, trace events which have begun but have not yet
+// ended when a switch occurs will likely be corrupted. Thus, it's best to minimize the frequency of
+// switching backend tracing systems at runtime.
+//
+// For Perfetto outside of Android framework (e.g. tools):
+// ---
+// SK_USE_PERFETTO (mutually exclusive with SK_ANDROID_FRAMEWORK_USE_PERFETTO) can be used to unlock
+// SkPerfettoTrace, which can be used for in-process tracing via the standard Skia tracing flow of
+// SkEventTracer::SetInstance(...). This is enabled in tools with the `--trace perfetto` argument.
+// See https://skia.org/docs/dev/tools/tracing/#tracing-with-perfetto for more on SK_USE_PERFETTO.
+
+#ifdef SK_ANDROID_FRAMEWORK_USE_PERFETTO
+
+// PERFETTO_TRACK_EVENT_NAMESPACE must be defined before including Perfetto. This allows Skia to
+// maintain separate "track event" category storage, etc. from codebases linked into the same
+// executable, and avoid symbol duplication errors.
+//
+// NOTE: A side-effect of this is we must use skia::TrackEvent instead of perfetto::TrackEvent.
+#define PERFETTO_TRACK_EVENT_NAMESPACE skia
+#include <perfetto/tracing.h>
+
+#include <cutils/trace.h>
+#include <stdarg.h>
+#include <string_view>
+
+// WARNING: this list must be kept up to date with every category we use for tracing!
+//
+// When adding a new category it's likely best to add both "new_category" and "new_category.always",
+// though not strictly required. "new_category.always" is used internally when "new_category" is
+// given to TRACE_EVENT0_ALWAYS macros, which are used for core events that should always show up in
+// traces for the Android framework. Adding both to begin with will likely reduce churn if/when
+// "new_category" is used across both normal tracing macros and _ALWAYS variants in the future, but
+// it's not a strict requirement.
+//
+// See stages section of go/skia-perfetto for timeline of when this should improve.
+//
+// TODO(b/262718654): make this compilation failure happen sooner than the Skia -> Android roll.
+//
+// Currently kept entirely separate from SkPerfettoTrace for simplicity, which uses dynamic
+// categories and doesn't need these static category definitions.
+PERFETTO_DEFINE_CATEGORIES(
+ perfetto::Category("GM"),
+ perfetto::Category("skia"),
+ perfetto::Category("skia.android"),
+ perfetto::Category("skia.gpu"),
+ perfetto::Category("skia.gpu.cache"),
+ perfetto::Category("skia.objects"),
+ perfetto::Category("skia.shaders"),
+ perfetto::Category("skottie"),
+ perfetto::Category("test"),
+ perfetto::Category("test_cpu"),
+ perfetto::Category("test_ganesh"),
+ perfetto::Category("test_graphite"),
+ // ".always" variants are currently required for any category used in TRACE_EVENT0_ALWAYS.
+ perfetto::Category("GM.always").SetTags("skia.always"),
+ perfetto::Category("skia.always").SetTags("skia.always"),
+ perfetto::Category("skia.android.always").SetTags("skia.always"),
+ perfetto::Category("skia.gpu.always").SetTags("skia.always"),
+ perfetto::Category("skia.gpu.cache.always").SetTags("skia.always"),
+ perfetto::Category("skia.objects.always").SetTags("skia.always"),
+ perfetto::Category("skia.shaders.always").SetTags("skia.always"),
+ perfetto::Category("skottie.always").SetTags("skia.always"),
+ perfetto::Category("test.always").SetTags("skia.always"),
+ perfetto::Category("test_cpu.always").SetTags("skia.always"),
+ perfetto::Category("test_ganesh.always").SetTags("skia.always"),
+ perfetto::Category("test_graphite.always").SetTags("skia.always"),
+);
+
+#endif // SK_ANDROID_FRAMEWORK_USE_PERFETTO
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+
+#define SK_ANDROID_FRAMEWORK_ATRACE_BUFFER_SIZE 256
+
+class SkAndroidFrameworkTraceUtil {
+public:
+ SkAndroidFrameworkTraceUtil() = delete;
+
+ // Controls whether broad tracing is enabled. Warning: not thread-safe!
+ //
+ // Some key trace events may still be recorded when this is disabled, if a relevant tracing
+ // session is active.
+ //
+ // ATrace is used by default, but can be replaced with Perfetto by calling
+ // setUsePerfettoTrackEvents(true)
+ static void setEnableTracing(bool enableAndroidTracing) {
+ gEnableAndroidTracing = enableAndroidTracing;
+ }
+
+ // Controls whether tracing uses Perfetto instead of ATrace. Warning: not thread-safe!
+ //
+ // Returns true if Skia was built with Perfetto, false otherwise.
+ static bool setUsePerfettoTrackEvents(bool usePerfettoTrackEvents) {
+#ifdef SK_ANDROID_FRAMEWORK_USE_PERFETTO
+ // Ensure Perfetto is initialized if it wasn't already the preferred tracing backend.
+ if (!gUsePerfettoTrackEvents && usePerfettoTrackEvents) {
+ initPerfetto();
+ }
+ gUsePerfettoTrackEvents = usePerfettoTrackEvents;
+ return true;
+#else // !SK_ANDROID_FRAMEWORK_USE_PERFETTO
+ // Note: please reach out to skia-android@google.com if you encounter this unexpectedly.
+ SkDebugf("Tracing Skia with Perfetto is not supported in this environment (host build?)");
+ return false;
+#endif // SK_ANDROID_FRAMEWORK_USE_PERFETTO
+ }
+
+ static bool getEnableTracing() {
+ return gEnableAndroidTracing;
+ }
+
+ static bool getUsePerfettoTrackEvents() {
+ return gUsePerfettoTrackEvents;
+ }
+
+private:
+ static bool gEnableAndroidTracing;
+ static bool gUsePerfettoTrackEvents;
+
+#ifdef SK_ANDROID_FRAMEWORK_USE_PERFETTO
+ // Initializes tracing systems, and establishes a connection to the 'traced' daemon.
+ //
+ // Can be called multiple times.
+ static void initPerfetto() {
+ ::perfetto::TracingInitArgs perfettoArgs;
+ perfettoArgs.backends |= perfetto::kSystemBackend;
+ ::perfetto::Tracing::Initialize(perfettoArgs);
+ ::skia::TrackEvent::Register();
+ }
+#endif // SK_ANDROID_FRAMEWORK_USE_PERFETTO
+};
+#endif // SK_BUILD_FOR_ANDROID_FRAMEWORK
+
+#ifdef SK_DEBUG
+static void skprintf_like_noop(const char format[], ...) SK_PRINTF_LIKE(1, 2);
+static inline void skprintf_like_noop(const char format[], ...) {}
+static inline void sk_noop(...) {}
+#define TRACE_EMPTY(...) do { sk_noop(__VA_ARGS__); } while (0)
+#define TRACE_EMPTY_FMT(fmt, ...) do { skprintf_like_noop(fmt, ##__VA_ARGS__); } while (0)
+#else
+#define TRACE_EMPTY(...) do {} while (0)
+#define TRACE_EMPTY_FMT(fmt, ...) do {} while (0)
+#endif
+
+#if defined(SK_DISABLE_TRACING) || \
+ (defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) && !defined(SK_ANDROID_FRAMEWORK_USE_PERFETTO))
+
+ #define ATRACE_ANDROID_FRAMEWORK(fmt, ...) TRACE_EMPTY_FMT(fmt, ##__VA_ARGS__)
+ #define ATRACE_ANDROID_FRAMEWORK_ALWAYS(fmt, ...) TRACE_EMPTY_FMT(fmt, ##__VA_ARGS__)
+ #define TRACE_EVENT0(cg, n) TRACE_EMPTY(cg, n)
+ #define TRACE_EVENT0_ALWAYS(cg, n) TRACE_EMPTY(cg, n)
+ #define TRACE_EVENT1(cg, n, a1n, a1v) TRACE_EMPTY(cg, n, a1n, a1v)
+ #define TRACE_EVENT2(cg, n, a1n, a1v, a2n, a2v) TRACE_EMPTY(cg, n, a1n, a1v, a2n, a2v)
+ #define TRACE_EVENT_INSTANT0(cg, n, scope) TRACE_EMPTY(cg, n, scope)
+ #define TRACE_EVENT_INSTANT1(cg, n, scope, a1n, a1v) TRACE_EMPTY(cg, n, scope, a1n, a1v)
+ #define TRACE_EVENT_INSTANT2(cg, n, scope, a1n, a1v, a2n, a2v) \
+ TRACE_EMPTY(cg, n, scope, a1n, a1v, a2n, a2v)
+ #define TRACE_COUNTER1(cg, n, value) TRACE_EMPTY(cg, n, value)
+ #define TRACE_COUNTER2(cg, n, v1n, v1v, v2n, v2v) TRACE_EMPTY(cg, n, v1n, v1v, v2n, v2v)
+
+#elif defined(SK_ANDROID_FRAMEWORK_USE_PERFETTO)
+
+namespace skia_private {
+ // ATrace can't accept ::perfetto::DynamicString or ::perfetto::StaticString, so any trace event
+ // names that were wrapped in TRACE_STR_COPY or TRACE_STR_STATIC need to be unboxed back to
+ // char* before being passed to ATrace.
+ inline const char* UnboxPerfettoString(const ::perfetto::DynamicString& str) {
+ return str.value;
+ }
+ inline const char* UnboxPerfettoString(const ::perfetto::StaticString& str) {
+ return str.value;
+ }
+ inline const char* UnboxPerfettoString(const char* str) {
+ return str;
+ }
+
+ constexpr bool StrEndsWithAndLongerThan(const char* str, const char* suffix) {
+ auto strView = std::basic_string_view(str);
+ auto suffixView = std::basic_string_view(suffix);
+ // string_view::ends_with isn't available until C++20
+ return strView.size() > suffixView.size() &&
+ strView.compare(strView.size() - suffixView.size(),
+ std::string_view::npos, suffixView) == 0;
+ }
+}
+
+// Generate a unique variable name with a given prefix.
+// The indirection in this multi-level macro lets __LINE__ expand at the right time/place to get
+// prefix123 instead of prefix__LINE__.
+#define SK_PERFETTO_INTERNAL_CONCAT2(a, b) a##b
+#define SK_PERFETTO_INTERNAL_CONCAT(a, b) SK_PERFETTO_INTERNAL_CONCAT2(a, b)
+#define SK_PERFETTO_UID(prefix) SK_PERFETTO_INTERNAL_CONCAT(prefix, __LINE__)
+
+// Assuming there is an active tracing session, this call will create a trace event if tracing is
+// enabled (with SkAndroidFrameworkTraceUtil::setEnableTracing(true)) or if force_always_trace is
+// true. The event goes through ATrace by default, but can be routed to Perfetto instead by calling
+// SkAndroidFrameworkTraceUtil::setUsePerfettoTrackEvents(true).
+//
+// If force_always_trace = true, then the caller *must* append the ".always" suffix to the provided
+// category. This allows Perfetto tracing sessions to optionally filter to just the "skia.always"
+// category tag. This requirement is enforced at compile time.
+#define TRACE_EVENT_ATRACE_OR_PERFETTO_FORCEABLE(force_always_trace, category, name, ...) \
+ struct SK_PERFETTO_UID(ScopedEvent) { \
+ struct EventFinalizer { \
+ /* The ... parameter slot is an implementation detail. It allows the */ \
+ /* anonymous struct to use aggregate initialization to invoke the */ \
+ /* lambda (which emits the BEGIN event and returns an integer) */ \
+ /* with the proper reference capture for any */ \
+ /* TrackEventArgumentFunction in |__VA_ARGS__|. This is required so */ \
+ /* that the scoped event is exactly ONE line and can't escape the */ \
+ /* scope if used in a single line if statement. */ \
+ EventFinalizer(...) {} \
+ ~EventFinalizer() { \
+ if (force_always_trace || \
+ CC_UNLIKELY(SkAndroidFrameworkTraceUtil::getEnableTracing())) { \
+ if (SkAndroidFrameworkTraceUtil::getUsePerfettoTrackEvents()) { \
+ TRACE_EVENT_END(category); \
+ } else { \
+ ATRACE_END(); \
+ } \
+ } \
+ } \
+ \
+ EventFinalizer(const EventFinalizer&) = delete; \
+ EventFinalizer& operator=(const EventFinalizer&) = delete; \
+ \
+ EventFinalizer(EventFinalizer&&) = default; \
+ EventFinalizer& operator=(EventFinalizer&&) = delete; \
+ } finalizer; \
+ } SK_PERFETTO_UID(scoped_event) { \
+ [&]() { \
+ static_assert(!force_always_trace || \
+ ::skia_private::StrEndsWithAndLongerThan(category, ".always"), \
+ "[force_always_trace == true] requires [category] to end in '.always'"); \
+ if (force_always_trace || \
+ CC_UNLIKELY(SkAndroidFrameworkTraceUtil::getEnableTracing())) { \
+ if (SkAndroidFrameworkTraceUtil::getUsePerfettoTrackEvents()) { \
+ TRACE_EVENT_BEGIN(category, name, ##__VA_ARGS__); \
+ } else { \
+ ATRACE_BEGIN(::skia_private::UnboxPerfettoString(name)); \
+ } \
+ } \
+ return 0; \
+ }() \
+ }
+
+// Records an event with the current tracing backend, if overall Skia tracing is also enabled.
+#define TRACE_EVENT_ATRACE_OR_PERFETTO(category, name, ...) \
+ TRACE_EVENT_ATRACE_OR_PERFETTO_FORCEABLE( \
+ /* force_always_trace = */ false, category, name, ##__VA_ARGS__)
+
+#define ATRACE_ANDROID_FRAMEWORK(fmt, ...) \
+ char SK_PERFETTO_UID(skTraceStrBuf)[SK_ANDROID_FRAMEWORK_ATRACE_BUFFER_SIZE]; \
+ if (SkAndroidFrameworkTraceUtil::getEnableTracing()) { \
+ snprintf(SK_PERFETTO_UID(skTraceStrBuf), SK_ANDROID_FRAMEWORK_ATRACE_BUFFER_SIZE, \
+ fmt, ##__VA_ARGS__); \
+ } \
+ TRACE_EVENT0("skia.android", TRACE_STR_COPY(SK_PERFETTO_UID(skTraceStrBuf)))
+
+#define ATRACE_ANDROID_FRAMEWORK_ALWAYS(fmt, ...) \
+ char SK_PERFETTO_UID(skTraceStrBuf)[SK_ANDROID_FRAMEWORK_ATRACE_BUFFER_SIZE]; \
+ snprintf(SK_PERFETTO_UID(skTraceStrBuf), SK_ANDROID_FRAMEWORK_ATRACE_BUFFER_SIZE, \
+ fmt, ##__VA_ARGS__); \
+ TRACE_EVENT0_ALWAYS("skia.android", TRACE_STR_COPY(SK_PERFETTO_UID(skTraceStrBuf)))
+
+// Records a pair of begin and end events called "name" for the current scope, with 0, 1 or 2
+// associated arguments. Note that ATrace does not support trace arguments, so they are only
+// recorded when Perfetto is set as the current tracing backend.
+#define TRACE_EVENT0(category_group, name) \
+ TRACE_EVENT_ATRACE_OR_PERFETTO(category_group, name)
+// Note: ".always" suffix appended to category_group in TRACE_EVENT0_ALWAYS.
+#define TRACE_EVENT0_ALWAYS(category_group, name) TRACE_EVENT_ATRACE_OR_PERFETTO_FORCEABLE( \
+ /* force_always_trace = */ true, category_group ".always", name)
+#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+ TRACE_EVENT_ATRACE_OR_PERFETTO(category_group, name, arg1_name, arg1_val)
+#define TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val) \
+ TRACE_EVENT_ATRACE_OR_PERFETTO(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single event called "name" immediately, with 0, 1 or 2 associated arguments.
+// Note that ATrace does not support trace arguments, so they are only recorded when Perfetto is set
+// as the current tracing backend.
+#define TRACE_EVENT_INSTANT0(category_group, name, scope) \
+ do { TRACE_EVENT_ATRACE_OR_PERFETTO(category_group, name); } while(0)
+
+#define TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val) \
+ do { TRACE_EVENT_ATRACE_OR_PERFETTO(category_group, name, arg1_name, arg1_val); } while(0)
+
+#define TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ do { TRACE_EVENT_ATRACE_OR_PERFETTO(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val); } while(0)
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+#define TRACE_COUNTER1(category_group, name, value) \
+ if (CC_UNLIKELY(SkAndroidFrameworkTraceUtil::getEnableTracing())) { \
+ if (SkAndroidFrameworkTraceUtil::getUsePerfettoTrackEvents()) { \
+ TRACE_COUNTER(category_group, name, value); \
+ } else { \
+ ATRACE_INT(name, value); \
+ } \
+ }
+
+// Records the values of a multi-parted counter called "name" immediately.
+// In Chrome, this macro produces a stacked bar chart. Perfetto doesn't support
+// that (related: b/242349575), so this just produces two separate counters.
+#define TRACE_COUNTER2(category_group, name, value1_name, value1_val, value2_name, value2_val) \
+ if (CC_UNLIKELY(SkAndroidFrameworkTraceUtil::getEnableTracing())) { \
+ if (SkAndroidFrameworkTraceUtil::getUsePerfettoTrackEvents()) { \
+ TRACE_COUNTER(category_group, name "-" value1_name, value1_val); \
+ TRACE_COUNTER(category_group, name "-" value2_name, value2_val); \
+ } else { \
+ ATRACE_INT(name "-" value1_name, value1_val); \
+ ATRACE_INT(name "-" value2_name, value2_val); \
+ } \
+ }
+
+// ATrace has no object tracking, and would require a legacy shim for Perfetto (which likely no-ops
+// here). Further, these don't appear to currently be used outside of tests.
+#define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \
+ TRACE_EMPTY(category_group, name, id)
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, snapshot) \
+ TRACE_EMPTY(category_group, name, id, snapshot)
+#define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) \
+ TRACE_EMPTY(category_group, name, id)
+
+// Macro to efficiently determine if a given category group is enabled. Only works with Perfetto.
+// This is only used for some shader text logging that isn't supported in ATrace anyway.
+#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \
+ if (CC_UNLIKELY(SkAndroidFrameworkTraceUtil::getEnableTracing() && \
+ SkAndroidFrameworkTraceUtil::getUsePerfettoTrackEvents)) { \
+ *ret = TRACE_EVENT_CATEGORY_ENABLED(category_group); \
+ } else { \
+ *ret = false; \
+ }
+
+#else // Route through SkEventTracer (!SK_DISABLE_TRACING && !SK_ANDROID_FRAMEWORK_USE_PERFETTO)
+
+#define ATRACE_ANDROID_FRAMEWORK(fmt, ...) TRACE_EMPTY_FMT(fmt, ##__VA_ARGS__)
+#define ATRACE_ANDROID_FRAMEWORK_ALWAYS(fmt, ...) TRACE_EMPTY_FMT(fmt, ##__VA_ARGS__)
+
+// Records a pair of begin and end events called "name" for the current scope, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this does nothing.
+#define TRACE_EVENT0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name)
+
+#define TRACE_EVENT0_ALWAYS(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name)
+
+#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val)
+
+#define TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single event called "name" immediately, with 0, 1 or 2 associated arguments. If the
+// category is not enabled, then this does nothing.
+#define TRACE_EVENT_INSTANT0(category_group, name, scope) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_NONE | scope)
+
+#define TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_NONE | scope, arg1_name, arg1_val)
+
+#define TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_NONE | scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+#define TRACE_COUNTER1(category_group, name, value) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, "value", \
+ static_cast<int>(value))
+
+// Records the values of a multi-parted counter called "name" immediately.
+// The UI will treat value1 and value2 as parts of a whole, displaying their
+// values as a stacked-bar chart.
+#define TRACE_COUNTER2(category_group, name, value1_name, value1_val, \
+ value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, value1_name, \
+ static_cast<int>(value1_val), value2_name, \
+ static_cast<int>(value2_val))
+
+#define TRACE_EVENT_ASYNC_BEGIN0(category, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category, name, id, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_BEGIN1(category, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_BEGIN2(category, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+#define TRACE_EVENT_ASYNC_END0(category, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category, name, id, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_END1(category, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_END2(category, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Macros to track the life time and value of arbitrary client objects.
+#define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_CREATE_OBJECT, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, \
+ snapshot) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \
+ id, TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
+
+#define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_DELETE_OBJECT, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+
+// Macro to efficiently determine if a given category group is enabled.
+#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ *ret = true; \
+ } else { \
+ *ret = false; \
+ } \
+ } while (0)
+
+#endif
+
+// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
+#define TRACE_EVENT_FLAG_NONE (static_cast<unsigned int>(0))
+#define TRACE_EVENT_FLAG_COPY (static_cast<unsigned int>(1 << 0))
+#define TRACE_EVENT_FLAG_HAS_ID (static_cast<unsigned int>(1 << 1))
+#define TRACE_EVENT_FLAG_MANGLE_ID (static_cast<unsigned int>(1 << 2))
+#define TRACE_EVENT_FLAG_SCOPE_OFFSET (static_cast<unsigned int>(1 << 3))
+#define TRACE_EVENT_FLAG_SCOPE_EXTRA (static_cast<unsigned int>(1 << 4))
+#define TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP (static_cast<unsigned int>(1 << 5))
+#define TRACE_EVENT_FLAG_ASYNC_TTS (static_cast<unsigned int>(1 << 6))
+#define TRACE_EVENT_FLAG_BIND_TO_ENCLOSING (static_cast<unsigned int>(1 << 7))
+#define TRACE_EVENT_FLAG_FLOW_IN (static_cast<unsigned int>(1 << 8))
+#define TRACE_EVENT_FLAG_FLOW_OUT (static_cast<unsigned int>(1 << 9))
+#define TRACE_EVENT_FLAG_HAS_CONTEXT_ID (static_cast<unsigned int>(1 << 10))
+
+#define TRACE_EVENT_FLAG_SCOPE_MASK \
+ (static_cast<unsigned int>(TRACE_EVENT_FLAG_SCOPE_OFFSET | \
+ TRACE_EVENT_FLAG_SCOPE_EXTRA))
+
+// Type values for identifying types in the TraceValue union.
+#define TRACE_VALUE_TYPE_BOOL (static_cast<unsigned char>(1))
+#define TRACE_VALUE_TYPE_UINT (static_cast<unsigned char>(2))
+#define TRACE_VALUE_TYPE_INT (static_cast<unsigned char>(3))
+#define TRACE_VALUE_TYPE_DOUBLE (static_cast<unsigned char>(4))
+#define TRACE_VALUE_TYPE_POINTER (static_cast<unsigned char>(5))
+#define TRACE_VALUE_TYPE_STRING (static_cast<unsigned char>(6))
+#define TRACE_VALUE_TYPE_COPY_STRING (static_cast<unsigned char>(7))
+#define TRACE_VALUE_TYPE_CONVERTABLE (static_cast<unsigned char>(8))
+
+// Enum reflecting the scope of an INSTANT event. Must fit within TRACE_EVENT_FLAG_SCOPE_MASK.
+#define TRACE_EVENT_SCOPE_GLOBAL (static_cast<unsigned char>(0 << 3))
+#define TRACE_EVENT_SCOPE_PROCESS (static_cast<unsigned char>(1 << 3))
+#define TRACE_EVENT_SCOPE_THREAD (static_cast<unsigned char>(2 << 3))
+
+#define TRACE_EVENT_SCOPE_NAME_GLOBAL ('g')
+#define TRACE_EVENT_SCOPE_NAME_PROCESS ('p')
+#define TRACE_EVENT_SCOPE_NAME_THREAD ('t')
+
+#endif // SkTraceEventCommon_DEFINED
diff --git a/gfx/skia/skia/src/core/SkTypeface.cpp b/gfx/skia/skia/src/core/SkTypeface.cpp
new file mode 100644
index 0000000000..14e802e87a
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTypeface.cpp
@@ -0,0 +1,578 @@
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMetrics.h"
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkOnce.h"
+#include "include/utils/SkCustomTypeface.h"
+#include "src/base/SkEndian.h"
+#include "src/base/SkUTF.h"
+#include "src/core/SkAdvancedTypefaceMetrics.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkFontPriv.h"
+#include "src/core/SkScalerContext.h"
+#include "src/core/SkSurfacePriv.h"
+#include "src/core/SkTypefaceCache.h"
+#include "src/sfnt/SkOTTable_OS_2.h"
+
+#ifdef SK_TYPEFACE_FACTORY_FREETYPE
+#include "src/ports/SkFontHost_FreeType_common.h"
+#endif
+
+#ifdef SK_TYPEFACE_FACTORY_CORETEXT
+#include "src/ports/SkTypeface_mac_ct.h"
+#endif
+
+#ifdef SK_TYPEFACE_FACTORY_DIRECTWRITE
+#include "src/ports/SkTypeface_win_dw.h"
+#endif
+
+using namespace skia_private;
+
+SkTypeface::SkTypeface(const SkFontStyle& style, bool isFixedPitch)
+ : fUniqueID(SkTypefaceCache::NewTypefaceID()), fStyle(style), fIsFixedPitch(isFixedPitch) { }
+
+SkTypeface::~SkTypeface() { }
+
+///////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+class SkEmptyTypeface : public SkTypeface {
+public:
+ static sk_sp<SkTypeface> Make() { return sk_sp<SkTypeface>(new SkEmptyTypeface); }
+
+ static constexpr SkTypeface::FactoryId FactoryId = SkSetFourByteTag('e','m','t','y');
+ static sk_sp<SkTypeface> MakeFromStream(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments&) {
+ if (stream->getLength() == 0) {
+ return SkEmptyTypeface::Make();
+ }
+ return nullptr;
+ }
+protected:
+ SkEmptyTypeface() : SkTypeface(SkFontStyle(), true) { }
+
+ std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const override { return nullptr; }
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments& args) const override {
+ return sk_ref_sp(this);
+ }
+ std::unique_ptr<SkScalerContext> onCreateScalerContext(
+ const SkScalerContextEffects& effects, const SkDescriptor* desc) const override
+ {
+ return SkScalerContext::MakeEmpty(
+ sk_ref_sp(const_cast<SkEmptyTypeface*>(this)), effects, desc);
+ }
+ void onFilterRec(SkScalerContextRec*) const override { }
+ std::unique_ptr<SkAdvancedTypefaceMetrics> onGetAdvancedMetrics() const override {
+ return nullptr;
+ }
+ void onGetFontDescriptor(SkFontDescriptor* desc, bool* serialize) const override {
+ desc->setFactoryId(FactoryId);
+ *serialize = false;
+ }
+ void onCharsToGlyphs(const SkUnichar* chars, int count, SkGlyphID glyphs[]) const override {
+ sk_bzero(glyphs, count * sizeof(glyphs[0]));
+ }
+ int onCountGlyphs() const override { return 0; }
+ void getPostScriptGlyphNames(SkString*) const override {}
+ void getGlyphToUnicodeMap(SkUnichar*) const override {}
+ int onGetUPEM() const override { return 0; }
+ class EmptyLocalizedStrings : public SkTypeface::LocalizedStrings {
+ public:
+ bool next(SkTypeface::LocalizedString*) override { return false; }
+ };
+ void onGetFamilyName(SkString* familyName) const override {
+ familyName->reset();
+ }
+ bool onGetPostScriptName(SkString*) const override {
+ return false;
+ }
+ SkTypeface::LocalizedStrings* onCreateFamilyNameIterator() const override {
+ return new EmptyLocalizedStrings;
+ }
+ bool onGlyphMaskNeedsCurrentColor() const override {
+ return false;
+ }
+ int onGetVariationDesignPosition(SkFontArguments::VariationPosition::Coordinate coordinates[],
+ int coordinateCount) const override
+ {
+ return 0;
+ }
+ int onGetVariationDesignParameters(SkFontParameters::Variation::Axis parameters[],
+ int parameterCount) const override
+ {
+ return 0;
+ }
+ int onGetTableTags(SkFontTableTag tags[]) const override { return 0; }
+ size_t onGetTableData(SkFontTableTag, size_t, size_t, void*) const override {
+ return 0;
+ }
+};
+
+} // namespace
+
+SkFontStyle SkTypeface::FromOldStyle(Style oldStyle) {
+ return SkFontStyle((oldStyle & SkTypeface::kBold) ? SkFontStyle::kBold_Weight
+ : SkFontStyle::kNormal_Weight,
+ SkFontStyle::kNormal_Width,
+ (oldStyle & SkTypeface::kItalic) ? SkFontStyle::kItalic_Slant
+ : SkFontStyle::kUpright_Slant);
+}
+
+SkTypeface* SkTypeface::GetDefaultTypeface(Style style) {
+ static SkOnce once[4];
+ static sk_sp<SkTypeface> defaults[4];
+
+ SkASSERT((int)style < 4);
+ once[style]([style] {
+ sk_sp<SkFontMgr> fm(SkFontMgr::RefDefault());
+ auto t = fm->legacyMakeTypeface(nullptr, FromOldStyle(style));
+ defaults[style] = t ? t : SkEmptyTypeface::Make();
+ });
+ return defaults[style].get();
+}
+
+sk_sp<SkTypeface> SkTypeface::MakeDefault() {
+ return sk_ref_sp(GetDefaultTypeface());
+}
+
+uint32_t SkTypeface::UniqueID(const SkTypeface* face) {
+ if (nullptr == face) {
+ face = GetDefaultTypeface();
+ }
+ return face->uniqueID();
+}
+
+bool SkTypeface::Equal(const SkTypeface* facea, const SkTypeface* faceb) {
+ return facea == faceb || SkTypeface::UniqueID(facea) == SkTypeface::UniqueID(faceb);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+ struct DecoderProc {
+ SkFourByteTag id;
+ sk_sp<SkTypeface> (*makeFromStream)(std::unique_ptr<SkStreamAsset>, const SkFontArguments&);
+ };
+
+ std::vector<DecoderProc>* decoders() {
+ static auto* decoders = new std::vector<DecoderProc> {
+ { SkEmptyTypeface::FactoryId, SkEmptyTypeface::MakeFromStream },
+ { SkCustomTypefaceBuilder::FactoryId, SkCustomTypefaceBuilder::MakeFromStream },
+#ifdef SK_TYPEFACE_FACTORY_CORETEXT
+ { SkTypeface_Mac::FactoryId, SkTypeface_Mac::MakeFromStream },
+#endif
+#ifdef SK_TYPEFACE_FACTORY_DIRECTWRITE
+ { DWriteFontTypeface::FactoryId, DWriteFontTypeface::MakeFromStream },
+#endif
+#ifdef SK_TYPEFACE_FACTORY_FREETYPE
+ { SkTypeface_FreeType::FactoryId, SkTypeface_FreeType::MakeFromStream },
+#endif
+ };
+ return decoders;
+ }
+
+} // namespace
+
+sk_sp<SkTypeface> SkTypeface::MakeFromName(const char name[],
+ SkFontStyle fontStyle) {
+ if (nullptr == name && (fontStyle.slant() == SkFontStyle::kItalic_Slant ||
+ fontStyle.slant() == SkFontStyle::kUpright_Slant) &&
+ (fontStyle.weight() == SkFontStyle::kBold_Weight ||
+ fontStyle.weight() == SkFontStyle::kNormal_Weight)) {
+ return sk_ref_sp(GetDefaultTypeface(static_cast<SkTypeface::Style>(
+ (fontStyle.slant() == SkFontStyle::kItalic_Slant ? SkTypeface::kItalic :
+ SkTypeface::kNormal) |
+ (fontStyle.weight() == SkFontStyle::kBold_Weight ? SkTypeface::kBold :
+ SkTypeface::kNormal))));
+ }
+ return SkFontMgr::RefDefault()->legacyMakeTypeface(name, fontStyle);
+}
+
+sk_sp<SkTypeface> SkTypeface::MakeFromStream(std::unique_ptr<SkStreamAsset> stream, int index) {
+ if (!stream) {
+ return nullptr;
+ }
+ // TODO: Enable this while updating tests (FontHostStream), expectations, and nonativeFonts.
+#if 0
+ SkFontArguments args;
+ args.setCollectionIndex(index);
+ for (const DecoderProc& proc : *decoders()) {
+ sk_sp<SkTypeface> typeface = proc.makeFromStream(stream->duplicate(), args);
+ if (typeface) {
+ return typeface;
+ }
+ }
+#endif
+ return SkFontMgr::RefDefault()->makeFromStream(std::move(stream), index);
+}
+
+sk_sp<SkTypeface> SkTypeface::MakeFromData(sk_sp<SkData> data, int index) {
+ if (!data) {
+ return nullptr;
+ }
+ return SkFontMgr::RefDefault()->makeFromData(std::move(data), index);
+}
+
+sk_sp<SkTypeface> SkTypeface::MakeFromFile(const char path[], int index) {
+ return SkFontMgr::RefDefault()->makeFromFile(path, index);
+}
+
+sk_sp<SkTypeface> SkTypeface::makeClone(const SkFontArguments& args) const {
+ return this->onMakeClone(args);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkTypeface::Register(
+ FactoryId id,
+ sk_sp<SkTypeface> (*make)(std::unique_ptr<SkStreamAsset>, const SkFontArguments&)) {
+ decoders()->push_back(DecoderProc{id, make});
+}
+
+void SkTypeface::serialize(SkWStream* wstream, SerializeBehavior behavior) const {
+ bool isLocalData = false;
+ SkFontDescriptor desc;
+ this->onGetFontDescriptor(&desc, &isLocalData);
+ if (desc.getFactoryId() == 0) {
+ SkDEBUGF("Factory was not set for %s.\n", desc.getFamilyName());
+ }
+
+ bool shouldSerializeData = false;
+ switch (behavior) {
+ case SerializeBehavior::kDoIncludeData: shouldSerializeData = true; break;
+ case SerializeBehavior::kDontIncludeData: shouldSerializeData = false; break;
+ case SerializeBehavior::kIncludeDataIfLocal: shouldSerializeData = isLocalData; break;
+ }
+
+ if (shouldSerializeData) {
+ int index;
+ desc.setStream(this->openStream(&index));
+ if (desc.hasStream()) {
+ desc.setCollectionIndex(index);
+ }
+
+ int numAxes = this->getVariationDesignPosition(nullptr, 0);
+ if (0 < numAxes) {
+ numAxes = this->getVariationDesignPosition(desc.setVariationCoordinates(numAxes), numAxes);
+ if (numAxes <= 0) {
+ desc.setVariationCoordinates(0);
+ }
+ }
+ }
+ desc.serialize(wstream);
+}
+
+sk_sp<SkData> SkTypeface::serialize(SerializeBehavior behavior) const {
+ SkDynamicMemoryWStream stream;
+ this->serialize(&stream, behavior);
+ return stream.detachAsData();
+}
+
+sk_sp<SkTypeface> SkTypeface::MakeDeserialize(SkStream* stream) {
+ SkFontDescriptor desc;
+ if (!SkFontDescriptor::Deserialize(stream, &desc)) {
+ return nullptr;
+ }
+
+ if (desc.hasStream()) {
+ for (const DecoderProc& proc : *decoders()) {
+ if (proc.id == desc.getFactoryId()) {
+ return proc.makeFromStream(desc.detachStream(), desc.getFontArguments());
+ }
+ }
+
+ SkDEBUGCODE(FactoryId id = desc.getFactoryId();)
+ SkDEBUGF("Could not find factory %c%c%c%c for %s.\n",
+ (id >> 24) & 0xFF, (id >> 16) & 0xFF, (id >> 8) & 0xFF, (id >> 0) & 0xFF,
+ desc.getFamilyName());
+
+ sk_sp<SkFontMgr> defaultFm = SkFontMgr::RefDefault();
+ sk_sp<SkTypeface> typeface = defaultFm->makeFromStream(desc.detachStream(),
+ desc.getFontArguments());
+ if (typeface) {
+ return typeface;
+ }
+ }
+
+ return SkTypeface::MakeFromName(desc.getFamilyName(), desc.getStyle());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkTypeface::glyphMaskNeedsCurrentColor() const {
+ return this->onGlyphMaskNeedsCurrentColor();
+}
+
+int SkTypeface::getVariationDesignPosition(
+ SkFontArguments::VariationPosition::Coordinate coordinates[], int coordinateCount) const
+{
+ return this->onGetVariationDesignPosition(coordinates, coordinateCount);
+}
+
+int SkTypeface::getVariationDesignParameters(
+ SkFontParameters::Variation::Axis parameters[], int parameterCount) const
+{
+ return this->onGetVariationDesignParameters(parameters, parameterCount);
+}
+
+int SkTypeface::countTables() const {
+ return this->onGetTableTags(nullptr);
+}
+
+int SkTypeface::getTableTags(SkFontTableTag tags[]) const {
+ return this->onGetTableTags(tags);
+}
+
+size_t SkTypeface::getTableSize(SkFontTableTag tag) const {
+ return this->onGetTableData(tag, 0, ~0U, nullptr);
+}
+
+size_t SkTypeface::getTableData(SkFontTableTag tag, size_t offset, size_t length,
+ void* data) const {
+ return this->onGetTableData(tag, offset, length, data);
+}
+
+sk_sp<SkData> SkTypeface::copyTableData(SkFontTableTag tag) const {
+ return this->onCopyTableData(tag);
+}
+
+sk_sp<SkData> SkTypeface::onCopyTableData(SkFontTableTag tag) const {
+ size_t size = this->getTableSize(tag);
+ if (size) {
+ sk_sp<SkData> data = SkData::MakeUninitialized(size);
+ (void)this->getTableData(tag, 0, size, data->writable_data());
+ return data;
+ }
+ return nullptr;
+}
+
+std::unique_ptr<SkStreamAsset> SkTypeface::openStream(int* ttcIndex) const {
+ int ttcIndexStorage;
+ if (nullptr == ttcIndex) {
+ // So our subclasses don't need to check for null param
+ ttcIndex = &ttcIndexStorage;
+ }
+ return this->onOpenStream(ttcIndex);
+}
+
+std::unique_ptr<SkStreamAsset> SkTypeface::openExistingStream(int* ttcIndex) const {
+ int ttcIndexStorage;
+ if (nullptr == ttcIndex) {
+ // So our subclasses don't need to check for null param
+ ttcIndex = &ttcIndexStorage;
+ }
+ return this->onOpenExistingStream(ttcIndex);
+}
+
+std::unique_ptr<SkScalerContext> SkTypeface::createScalerContext(
+ const SkScalerContextEffects& effects, const SkDescriptor* desc) const {
+ std::unique_ptr<SkScalerContext> scalerContext = this->onCreateScalerContext(effects, desc);
+ SkASSERT(scalerContext);
+ return scalerContext;
+}
+
+void SkTypeface::unicharsToGlyphs(const SkUnichar uni[], int count, SkGlyphID glyphs[]) const {
+ if (count > 0 && glyphs && uni) {
+ this->onCharsToGlyphs(uni, count, glyphs);
+ }
+}
+
+SkGlyphID SkTypeface::unicharToGlyph(SkUnichar uni) const {
+ SkGlyphID glyphs[1] = { 0 };
+ this->onCharsToGlyphs(&uni, 1, glyphs);
+ return glyphs[0];
+}
+
+namespace {
+class SkConvertToUTF32 {
+public:
+ SkConvertToUTF32() {}
+
+ const SkUnichar* convert(const void* text, size_t byteLength, SkTextEncoding encoding) {
+ const SkUnichar* uni;
+ switch (encoding) {
+ case SkTextEncoding::kUTF8: {
+ uni = fStorage.reset(byteLength);
+ const char* ptr = (const char*)text;
+ const char* end = ptr + byteLength;
+ for (int i = 0; ptr < end; ++i) {
+ fStorage[i] = SkUTF::NextUTF8(&ptr, end);
+ }
+ } break;
+ case SkTextEncoding::kUTF16: {
+ uni = fStorage.reset(byteLength);
+ const uint16_t* ptr = (const uint16_t*)text;
+ const uint16_t* end = ptr + (byteLength >> 1);
+ for (int i = 0; ptr < end; ++i) {
+ fStorage[i] = SkUTF::NextUTF16(&ptr, end);
+ }
+ } break;
+ case SkTextEncoding::kUTF32:
+ uni = (const SkUnichar*)text;
+ break;
+ default:
+ SK_ABORT("unexpected enum");
+ }
+ return uni;
+ }
+
+private:
+ AutoSTMalloc<256, SkUnichar> fStorage;
+};
+}
+
+int SkTypeface::textToGlyphs(const void* text, size_t byteLength, SkTextEncoding encoding,
+ SkGlyphID glyphs[], int maxGlyphCount) const {
+ if (0 == byteLength) {
+ return 0;
+ }
+
+ SkASSERT(text);
+
+ int count = SkFontPriv::CountTextElements(text, byteLength, encoding);
+ if (!glyphs || count > maxGlyphCount) {
+ return count;
+ }
+
+ if (encoding == SkTextEncoding::kGlyphID) {
+ memcpy(glyphs, text, count << 1);
+ return count;
+ }
+
+ SkConvertToUTF32 storage;
+ const SkUnichar* uni = storage.convert(text, byteLength, encoding);
+
+ this->unicharsToGlyphs(uni, count, glyphs);
+ return count;
+}
+
+int SkTypeface::countGlyphs() const {
+ return this->onCountGlyphs();
+}
+
+int SkTypeface::getUnitsPerEm() const {
+ // should we try to cache this in the base-class?
+ return this->onGetUPEM();
+}
+
+bool SkTypeface::getKerningPairAdjustments(const uint16_t glyphs[], int count,
+ int32_t adjustments[]) const {
+ SkASSERT(count >= 0);
+ // check for the only legal way to pass a nullptr.. everything is 0
+ // in which case they just want to know if this face can possibly support
+ // kerning (true) or never (false).
+ if (nullptr == glyphs || nullptr == adjustments) {
+ SkASSERT(nullptr == glyphs);
+ SkASSERT(0 == count);
+ SkASSERT(nullptr == adjustments);
+ }
+ return this->onGetKerningPairAdjustments(glyphs, count, adjustments);
+}
+
+SkTypeface::LocalizedStrings* SkTypeface::createFamilyNameIterator() const {
+ return this->onCreateFamilyNameIterator();
+}
+
+void SkTypeface::getFamilyName(SkString* name) const {
+ SkASSERT(name);
+ this->onGetFamilyName(name);
+}
+
+bool SkTypeface::getPostScriptName(SkString* name) const {
+ return this->onGetPostScriptName(name);
+}
+
+void SkTypeface::getGlyphToUnicodeMap(SkUnichar* dst) const {
+ sk_bzero(dst, sizeof(SkUnichar) * this->countGlyphs());
+}
+
+std::unique_ptr<SkAdvancedTypefaceMetrics> SkTypeface::getAdvancedMetrics() const {
+ std::unique_ptr<SkAdvancedTypefaceMetrics> result = this->onGetAdvancedMetrics();
+ if (result && result->fPostScriptName.isEmpty()) {
+ result->fPostScriptName = result->fFontName;
+ }
+ if (result && result->fType == SkAdvancedTypefaceMetrics::kTrueType_Font) {
+ SkOTTableOS2::Version::V2::Type::Field fsType;
+ constexpr SkFontTableTag os2Tag = SkTEndian_SwapBE32(SkOTTableOS2::TAG);
+ constexpr size_t fsTypeOffset = offsetof(SkOTTableOS2::Version::V2, fsType);
+ if (this->getTableData(os2Tag, fsTypeOffset, sizeof(fsType), &fsType) == sizeof(fsType)) {
+ if (fsType.Bitmap || (fsType.Restricted && !(fsType.PreviewPrint || fsType.Editable))) {
+ result->fFlags |= SkAdvancedTypefaceMetrics::kNotEmbeddable_FontFlag;
+ }
+ if (fsType.NoSubsetting) {
+ result->fFlags |= SkAdvancedTypefaceMetrics::kNotSubsettable_FontFlag;
+ }
+ }
+ }
+ return result;
+}
+
+bool SkTypeface::onGetKerningPairAdjustments(const uint16_t glyphs[], int count,
+ int32_t adjustments[]) const {
+ return false;
+}
+
+std::unique_ptr<SkStreamAsset> SkTypeface::onOpenExistingStream(int* ttcIndex) const {
+ return this->onOpenStream(ttcIndex);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "include/core/SkPaint.h"
+#include "src/core/SkDescriptor.h"
+
+SkRect SkTypeface::getBounds() const {
+ fBoundsOnce([this] {
+ if (!this->onComputeBounds(&fBounds)) {
+ fBounds.setEmpty();
+ }
+ });
+ return fBounds;
+}
+
+bool SkTypeface::onComputeBounds(SkRect* bounds) const {
+ // we use a big size to ensure lots of significant bits from the scalercontext.
+ // then we scale back down to return our final answer (at 1-pt)
+ const SkScalar textSize = 2048;
+ const SkScalar invTextSize = 1 / textSize;
+
+ SkFont font;
+ font.setTypeface(sk_ref_sp(const_cast<SkTypeface*>(this)));
+ font.setSize(textSize);
+ font.setLinearMetrics(true);
+
+ SkScalerContextRec rec;
+ SkScalerContextEffects effects;
+
+ SkScalerContext::MakeRecAndEffectsFromFont(font, &rec, &effects);
+
+ SkAutoDescriptor ad;
+ SkScalerContextEffects noeffects;
+ SkScalerContext::AutoDescriptorGivenRecAndEffects(rec, noeffects, &ad);
+
+ std::unique_ptr<SkScalerContext> ctx = this->createScalerContext(noeffects, ad.getDesc());
+
+ SkFontMetrics fm;
+ ctx->getFontMetrics(&fm);
+ if (!fm.hasBounds()) {
+ return false;
+ }
+ bounds->setLTRB(fm.fXMin * invTextSize, fm.fTop * invTextSize,
+ fm.fXMax * invTextSize, fm.fBottom * invTextSize);
+ return true;
+}
+
+std::unique_ptr<SkAdvancedTypefaceMetrics> SkTypeface::onGetAdvancedMetrics() const {
+ SkDEBUGFAIL("Typefaces that need to work with PDF backend must override this.");
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkTypefaceCache.cpp b/gfx/skia/skia/src/core/SkTypefaceCache.cpp
new file mode 100644
index 0000000000..d94f2bae5c
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTypefaceCache.cpp
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/base/SkMutex.h"
+#include "src/core/SkTypefaceCache.h"
+#include <atomic>
+
+#define TYPEFACE_CACHE_LIMIT 1024
+
+SkTypefaceCache::SkTypefaceCache() {}
+
+void SkTypefaceCache::add(sk_sp<SkTypeface> face) {
+#ifndef SK_DISABLE_TYPEFACE_CACHE
+ if (fTypefaces.size() >= TYPEFACE_CACHE_LIMIT) {
+ this->purge(TYPEFACE_CACHE_LIMIT >> 2);
+ }
+
+ fTypefaces.emplace_back(std::move(face));
+#endif
+}
+
+sk_sp<SkTypeface> SkTypefaceCache::findByProcAndRef(FindProc proc, void* ctx) const {
+#ifndef SK_DISABLE_TYPEFACE_CACHE
+ for (const sk_sp<SkTypeface>& typeface : fTypefaces) {
+ if (proc(typeface.get(), ctx)) {
+ return typeface;
+ }
+ }
+#endif
+ return nullptr;
+}
+
+void SkTypefaceCache::purge(int numToPurge) {
+ int count = fTypefaces.size();
+ int i = 0;
+ while (i < count) {
+ if (fTypefaces[i]->unique()) {
+ fTypefaces.removeShuffle(i);
+ --count;
+ if (--numToPurge == 0) {
+ return;
+ }
+ } else {
+ ++i;
+ }
+ }
+}
+
+void SkTypefaceCache::purgeAll() {
+ this->purge(fTypefaces.size());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkTypefaceCache& SkTypefaceCache::Get() {
+ static SkTypefaceCache gCache;
+ return gCache;
+}
+
+SkTypefaceID SkTypefaceCache::NewTypefaceID() {
+ static std::atomic<int32_t> nextID{1};
+ return nextID.fetch_add(1, std::memory_order_relaxed);
+}
+
+static SkMutex& typeface_cache_mutex() {
+ static SkMutex& mutex = *(new SkMutex);
+ return mutex;
+}
+
+void SkTypefaceCache::Add(sk_sp<SkTypeface> face) {
+#ifndef SK_DISABLE_TYPEFACE_CACHE
+ SkAutoMutexExclusive ama(typeface_cache_mutex());
+ Get().add(std::move(face));
+#endif
+}
+
+sk_sp<SkTypeface> SkTypefaceCache::FindByProcAndRef(FindProc proc, void* ctx) {
+#ifndef SK_DISABLE_TYPEFACE_CACHE
+ SkAutoMutexExclusive ama(typeface_cache_mutex());
+ return Get().findByProcAndRef(proc, ctx);
+#else
+ return nullptr;
+#endif
+}
+
+void SkTypefaceCache::PurgeAll() {
+#ifndef SK_DISABLE_TYPEFACE_CACHE
+ SkAutoMutexExclusive ama(typeface_cache_mutex());
+ Get().purgeAll();
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_DEBUG
+static bool DumpProc(SkTypeface* face, void* ctx) {
+ SkString n;
+ face->getFamilyName(&n);
+ SkFontStyle s = face->fontStyle();
+ SkTypefaceID id = face->uniqueID();
+ SkDebugf("SkTypefaceCache: face %p typefaceID %d weight %d width %d style %d name %s\n",
+ face, id, s.weight(), s.width(), s.slant(), n.c_str());
+ return false;
+}
+#endif
+
+void SkTypefaceCache::Dump() {
+#ifdef SK_DEBUG
+ (void)Get().findByProcAndRef(DumpProc, nullptr);
+#endif
+}
diff --git a/gfx/skia/skia/src/core/SkTypefaceCache.h b/gfx/skia/skia/src/core/SkTypefaceCache.h
new file mode 100644
index 0000000000..e11f760d45
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTypefaceCache.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef SkTypefaceCache_DEFINED
+#define SkTypefaceCache_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/base/SkTArray.h"
+
+class SkTypefaceCache {
+public:
+ SkTypefaceCache();
+
+ /**
+ * Callback for FindByProc. Returns true if the given typeface is a match
+ * for the given context. The passed typeface is owned by the cache and is
+ * not additionally ref()ed. The typeface may be in the disposed state.
+ */
+ typedef bool(*FindProc)(SkTypeface*, void* context);
+
+ /**
+ * Add a typeface to the cache. Later, if we need to purge the cache,
+ * typefaces uniquely owned by the cache will be unref()ed.
+ */
+ void add(sk_sp<SkTypeface>);
+
+ /**
+ * Iterate through the cache, calling proc(typeface, ctx) for each typeface.
+ * If proc returns true, then return that typeface.
+ * If it never returns true, return nullptr.
+ */
+ sk_sp<SkTypeface> findByProcAndRef(FindProc proc, void* ctx) const;
+
+ /**
+ * This will unref all of the typefaces in the cache for which the cache
+ * is the only owner. Normally this is handled automatically as needed.
+ * This function is exposed for clients that explicitly want to purge the
+ * cache (e.g. to look for leaks).
+ */
+ void purgeAll();
+
+ /**
+ * Helper: returns a unique typefaceID to pass to the constructor of
+ * your subclass of SkTypeface
+ */
+ static SkTypefaceID NewTypefaceID();
+
+ // These are static wrappers around a global instance of a cache.
+
+ static void Add(sk_sp<SkTypeface>);
+ static sk_sp<SkTypeface> FindByProcAndRef(FindProc proc, void* ctx);
+ static void PurgeAll();
+
+ /**
+ * Debugging only: dumps the status of the typefaces in the cache
+ */
+ static void Dump();
+
+private:
+ static SkTypefaceCache& Get();
+
+ void purge(int count);
+
+ skia_private::TArray<sk_sp<SkTypeface>> fTypefaces;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkTypeface_remote.cpp b/gfx/skia/skia/src/core/SkTypeface_remote.cpp
new file mode 100644
index 0000000000..f43f7157fc
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTypeface_remote.cpp
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2018 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPaint.h"
+#include "include/private/chromium/SkChromeRemoteGlyphCache.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkStrike.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkTraceEvent.h"
+#include "src/core/SkTypeface_remote.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <optional>
+
+SkScalerContextProxy::SkScalerContextProxy(sk_sp<SkTypeface> tf,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc,
+ sk_sp<SkStrikeClient::DiscardableHandleManager> manager)
+ : SkScalerContext{std::move(tf), effects, desc}
+ , fDiscardableManager{std::move(manager)} {}
+
+bool SkScalerContextProxy::generateAdvance(SkGlyph* glyph) {
+ return false;
+}
+
+void SkScalerContextProxy::generateMetrics(SkGlyph* glyph, SkArenaAlloc*) {
+ TRACE_EVENT1("skia", "generateMetrics", "rec", TRACE_STR_COPY(this->getRec().dump().c_str()));
+ if (this->getProxyTypeface()->isLogging()) {
+ SkDebugf("GlyphCacheMiss generateMetrics looking for glyph: %x\n generateMetrics: %s\n",
+ glyph->getPackedID().value(), this->getRec().dump().c_str());
+ }
+
+ glyph->fMaskFormat = fRec.fMaskFormat;
+ glyph->zeroMetrics();
+ fDiscardableManager->notifyCacheMiss(
+ SkStrikeClient::CacheMissType::kGlyphMetrics, fRec.fTextSize);
+}
+
+void SkScalerContextProxy::generateImage(const SkGlyph& glyph) {
+ TRACE_EVENT1("skia", "generateImage", "rec", TRACE_STR_COPY(this->getRec().dump().c_str()));
+ if (this->getProxyTypeface()->isLogging()) {
+ SkDebugf("GlyphCacheMiss generateImage: %s\n", this->getRec().dump().c_str());
+ }
+
+ // There is no desperation search here, because if there was an image to be found it was
+ // copied over with the metrics search.
+ fDiscardableManager->notifyCacheMiss(
+ SkStrikeClient::CacheMissType::kGlyphImage, fRec.fTextSize);
+}
+
+bool SkScalerContextProxy::generatePath(const SkGlyph& glyph, SkPath* path) {
+ TRACE_EVENT1("skia", "generatePath", "rec", TRACE_STR_COPY(this->getRec().dump().c_str()));
+ if (this->getProxyTypeface()->isLogging()) {
+ SkDebugf("GlyphCacheMiss generatePath: %s\n", this->getRec().dump().c_str());
+ }
+
+ fDiscardableManager->notifyCacheMiss(
+ SkStrikeClient::CacheMissType::kGlyphPath, fRec.fTextSize);
+ return false;
+}
+
+sk_sp<SkDrawable> SkScalerContextProxy::generateDrawable(const SkGlyph&) {
+ TRACE_EVENT1("skia", "generateDrawable", "rec", TRACE_STR_COPY(this->getRec().dump().c_str()));
+ if (this->getProxyTypeface()->isLogging()) {
+ SkDebugf("GlyphCacheMiss generateDrawable: %s\n", this->getRec().dump().c_str());
+ }
+
+ fDiscardableManager->notifyCacheMiss(
+ SkStrikeClient::CacheMissType::kGlyphDrawable, fRec.fTextSize);
+ return nullptr;
+}
+
+void SkScalerContextProxy::generateFontMetrics(SkFontMetrics* metrics) {
+ TRACE_EVENT1(
+ "skia", "generateFontMetrics", "rec", TRACE_STR_COPY(this->getRec().dump().c_str()));
+ if (this->getProxyTypeface()->isLogging()) {
+ SkDebugf("GlyphCacheMiss generateFontMetrics: %s\n", this->getRec().dump().c_str());
+ }
+
+ // Font metrics aren't really used for render, so just zero out the data and return.
+ fDiscardableManager->notifyCacheMiss(
+ SkStrikeClient::CacheMissType::kFontMetrics, fRec.fTextSize);
+ sk_bzero(metrics, sizeof(*metrics));
+}
+
+std::optional<SkTypefaceProxyPrototype>
+SkTypefaceProxyPrototype::MakeFromBuffer(SkReadBuffer& buffer) {
+ SkASSERT(buffer.isValid());
+ const SkTypefaceID typefaceID = buffer.readUInt();
+ const int glyphCount = buffer.readInt();
+ const int32_t styleValue = buffer.read32();
+ const bool isFixedPitch = buffer.readBool();
+ const bool glyphMaskNeedsCurrentColor = buffer.readBool();
+
+ if (buffer.isValid()) {
+ return SkTypefaceProxyPrototype{
+ typefaceID, glyphCount, styleValue, isFixedPitch, glyphMaskNeedsCurrentColor};
+ }
+
+ return std::nullopt;
+}
+
+SkTypefaceProxyPrototype::SkTypefaceProxyPrototype(const SkTypeface& typeface)
+ : fServerTypefaceID{typeface.uniqueID()}
+ , fGlyphCount{typeface.countGlyphs()}
+ , fStyleValue{typeface.fontStyle().fValue}
+ , fIsFixedPitch{typeface.isFixedPitch()}
+ , fGlyphMaskNeedsCurrentColor{typeface.glyphMaskNeedsCurrentColor()} {}
+
+SkTypefaceProxyPrototype::SkTypefaceProxyPrototype(SkTypefaceID typefaceID, int glyphCount,
+ int32_t styleValue, bool isFixedPitch,
+ bool glyphMaskNeedsCurrentColor)
+ : fServerTypefaceID {typefaceID}
+ , fGlyphCount{glyphCount}
+ , fStyleValue{styleValue}
+ , fIsFixedPitch{isFixedPitch}
+ , fGlyphMaskNeedsCurrentColor{glyphMaskNeedsCurrentColor} {}
+
+void SkTypefaceProxyPrototype::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeUInt(fServerTypefaceID);
+ buffer.writeInt(fGlyphCount);
+ buffer.write32(fStyleValue);
+ buffer.writeBool(fIsFixedPitch);
+ buffer.writeBool(fGlyphMaskNeedsCurrentColor);
+}
+
+
+SkTypefaceProxy::SkTypefaceProxy(const SkTypefaceProxyPrototype& prototype,
+ sk_sp<SkStrikeClient::DiscardableHandleManager> manager,
+ bool isLogging)
+ : SkTypeface{prototype.style(), prototype.fIsFixedPitch}
+ , fTypefaceID{prototype.fServerTypefaceID}
+ , fGlyphCount{prototype.fGlyphCount}
+ , fIsLogging{isLogging}
+ , fGlyphMaskNeedsCurrentColor{prototype.fGlyphMaskNeedsCurrentColor}
+ , fDiscardableManager{std::move(manager)} {}
+
+SkTypefaceProxy::SkTypefaceProxy(SkTypefaceID typefaceID,
+ int glyphCount,
+ const SkFontStyle& style,
+ bool isFixedPitch,
+ bool glyphMaskNeedsCurrentColor,
+ sk_sp<SkStrikeClient::DiscardableHandleManager> manager,
+ bool isLogging)
+ : SkTypeface{style, isFixedPitch}
+ , fTypefaceID{typefaceID}
+ , fGlyphCount{glyphCount}
+ , fIsLogging{isLogging}
+ , fGlyphMaskNeedsCurrentColor(glyphMaskNeedsCurrentColor)
+ , fDiscardableManager{std::move(manager)} {}
+
+SkTypefaceProxy* SkScalerContextProxy::getProxyTypeface() const {
+ return (SkTypefaceProxy*)this->getTypeface();
+}
diff --git a/gfx/skia/skia/src/core/SkTypeface_remote.h b/gfx/skia/skia/src/core/SkTypeface_remote.h
new file mode 100644
index 0000000000..0f03d38b90
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkTypeface_remote.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRemoteTypeface_DEFINED
+#define SkRemoteTypeface_DEFINED
+
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/chromium/SkChromeRemoteGlyphCache.h"
+#include "src/core/SkAdvancedTypefaceMetrics.h"
+#include "src/core/SkDescriptor.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkScalerContext.h"
+
+class SkReadBuffer;
+class SkStrikeCache;
+class SkTypefaceProxy;
+class SkWriteBuffer;
+
+class SkScalerContextProxy : public SkScalerContext {
+public:
+ SkScalerContextProxy(sk_sp<SkTypeface> tf,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc,
+ sk_sp<SkStrikeClient::DiscardableHandleManager> manager);
+
+protected:
+ bool generateAdvance(SkGlyph* glyph) override;
+ void generateMetrics(SkGlyph* glyph, SkArenaAlloc*) override;
+ void generateImage(const SkGlyph& glyph) override;
+ bool generatePath(const SkGlyph& glyphID, SkPath* path) override;
+ sk_sp<SkDrawable> generateDrawable(const SkGlyph&) override;
+ void generateFontMetrics(SkFontMetrics* metrics) override;
+ SkTypefaceProxy* getProxyTypeface() const;
+
+private:
+ sk_sp<SkStrikeClient::DiscardableHandleManager> fDiscardableManager;
+ using INHERITED = SkScalerContext;
+};
+
+// SkTypefaceProxyPrototype is the serialization format for SkTypefaceProxy.
+class SkTypefaceProxyPrototype {
+public:
+ static std::optional<SkTypefaceProxyPrototype> MakeFromBuffer(SkReadBuffer& buffer);
+ explicit SkTypefaceProxyPrototype(const SkTypeface& typeface);
+ SkTypefaceProxyPrototype(SkTypefaceID typefaceID,
+ int glyphCount,
+ int32_t styleValue,
+ bool isFixedPitch,
+ bool glyphMaskNeedsCurrentColor);
+
+ void flatten(SkWriteBuffer&buffer) const;
+ SkTypefaceID serverTypefaceID() const { return fServerTypefaceID; }
+
+private:
+ friend class SkTypefaceProxy;
+ SkFontStyle style() const {
+ SkFontStyle style;
+ style.fValue = fStyleValue;
+ return style;
+ }
+ const SkTypefaceID fServerTypefaceID;
+ const int fGlyphCount;
+ const int32_t fStyleValue;
+ const bool fIsFixedPitch;
+ // Used for COLRv0 or COLRv1 fonts that may need the 0xFFFF special palette
+ // index to represent foreground color. This information needs to be on here
+ // to determine how this typeface can be cached.
+ const bool fGlyphMaskNeedsCurrentColor;
+};
+
+class SkTypefaceProxy : public SkTypeface {
+public:
+ SkTypefaceProxy(const SkTypefaceProxyPrototype& prototype,
+ sk_sp<SkStrikeClient::DiscardableHandleManager> manager,
+ bool isLogging = true);
+
+ SkTypefaceProxy(SkTypefaceID typefaceID,
+ int glyphCount,
+ const SkFontStyle& style,
+ bool isFixedPitch,
+ bool glyphMaskNeedsCurrentColor,
+ sk_sp<SkStrikeClient::DiscardableHandleManager> manager,
+ bool isLogging = true);
+
+ SkTypefaceID remoteTypefaceID() const {return fTypefaceID;}
+
+ int glyphCount() const {return fGlyphCount;}
+
+ bool isLogging() const {return fIsLogging;}
+
+protected:
+ int onGetUPEM() const override { SK_ABORT("Should never be called."); }
+ std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const override {
+ SK_ABORT("Should never be called.");
+ }
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments& args) const override {
+ SK_ABORT("Should never be called.");
+ }
+ bool onGlyphMaskNeedsCurrentColor() const override {
+ return fGlyphMaskNeedsCurrentColor;
+ }
+ int onGetVariationDesignPosition(SkFontArguments::VariationPosition::Coordinate coordinates[],
+ int coordinateCount) const override {
+ SK_ABORT("Should never be called.");
+ }
+ int onGetVariationDesignParameters(SkFontParameters::Variation::Axis parameters[],
+ int parameterCount) const override {
+ SK_ABORT("Should never be called.");
+ }
+ void onGetFamilyName(SkString* familyName) const override {
+ // Used by SkStrikeCache::DumpMemoryStatistics.
+ *familyName = "";
+ }
+ bool onGetPostScriptName(SkString*) const override {
+ SK_ABORT("Should never be called.");
+ }
+ SkTypeface::LocalizedStrings* onCreateFamilyNameIterator() const override {
+ SK_ABORT("Should never be called.");
+ }
+ int onGetTableTags(SkFontTableTag tags[]) const override {
+ SK_ABORT("Should never be called.");
+ }
+ size_t onGetTableData(SkFontTableTag, size_t offset, size_t length, void* data) const override {
+ SK_ABORT("Should never be called.");
+ }
+ std::unique_ptr<SkScalerContext> onCreateScalerContext(
+ const SkScalerContextEffects& effects, const SkDescriptor* desc) const override
+ {
+ return std::make_unique<SkScalerContextProxy>(
+ sk_ref_sp(const_cast<SkTypefaceProxy*>(this)), effects, desc, fDiscardableManager);
+ }
+ void onFilterRec(SkScalerContextRec* rec) const override {
+ // The rec filtering is already applied by the server when generating
+ // the glyphs.
+ }
+ void onGetFontDescriptor(SkFontDescriptor*, bool*) const override {
+ SK_ABORT("Should never be called.");
+ }
+ void getGlyphToUnicodeMap(SkUnichar*) const override {
+ SK_ABORT("Should never be called.");
+ }
+
+ void getPostScriptGlyphNames(SkString*) const override {
+ SK_ABORT("Should never be called.");
+ }
+
+ std::unique_ptr<SkAdvancedTypefaceMetrics> onGetAdvancedMetrics() const override {
+ SK_ABORT("Should never be called.");
+ }
+ void onCharsToGlyphs(const SkUnichar* chars, int count, SkGlyphID glyphs[]) const override {
+ SK_ABORT("Should never be called.");
+ }
+ int onCountGlyphs() const override {
+ return this->glyphCount();
+ }
+
+ void* onGetCTFontRef() const override {
+ SK_ABORT("Should never be called.");
+ }
+
+private:
+ const SkTypefaceID fTypefaceID;
+ const int fGlyphCount;
+ const bool fIsLogging;
+ const bool fGlyphMaskNeedsCurrentColor;
+ sk_sp<SkStrikeClient::DiscardableHandleManager> fDiscardableManager;
+};
+
+#endif // SkRemoteTypeface_DEFINED
diff --git a/gfx/skia/skia/src/core/SkUnPreMultiply.cpp b/gfx/skia/skia/src/core/SkUnPreMultiply.cpp
new file mode 100644
index 0000000000..2b999190ce
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkUnPreMultiply.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkUnPreMultiply.h"
+#include "include/private/SkColorData.h"
+
+SkColor SkUnPreMultiply::PMColorToColor(SkPMColor c) {
+ const unsigned a = SkGetPackedA32(c);
+ const Scale scale = GetScale(a);
+ return SkColorSetARGB(a,
+ ApplyScale(scale, SkGetPackedR32(c)),
+ ApplyScale(scale, SkGetPackedG32(c)),
+ ApplyScale(scale, SkGetPackedB32(c)));
+}
+
+const uint32_t SkUnPreMultiply::gTable[] = {
+ 0x00000000, 0xFF000000, 0x7F800000, 0x55000000, 0x3FC00000, 0x33000000, 0x2A800000, 0x246DB6DB,
+ 0x1FE00000, 0x1C555555, 0x19800000, 0x172E8BA3, 0x15400000, 0x139D89D9, 0x1236DB6E, 0x11000000,
+ 0x0FF00000, 0x0F000000, 0x0E2AAAAB, 0x0D6BCA1B, 0x0CC00000, 0x0C249249, 0x0B9745D1, 0x0B1642C8,
+ 0x0AA00000, 0x0A333333, 0x09CEC4EC, 0x0971C71C, 0x091B6DB7, 0x08CB08D4, 0x08800000, 0x0839CE74,
+ 0x07F80000, 0x07BA2E8C, 0x07800000, 0x07492492, 0x07155555, 0x06E45307, 0x06B5E50D, 0x0689D89E,
+ 0x06600000, 0x063831F4, 0x06124925, 0x05EE23B9, 0x05CBA2E9, 0x05AAAAAB, 0x058B2164, 0x056CEFA9,
+ 0x05500000, 0x05343EB2, 0x0519999A, 0x05000000, 0x04E76276, 0x04CFB2B8, 0x04B8E38E, 0x04A2E8BA,
+ 0x048DB6DB, 0x0479435E, 0x0465846A, 0x045270D0, 0x04400000, 0x042E29F8, 0x041CE73A, 0x040C30C3,
+ 0x03FC0000, 0x03EC4EC5, 0x03DD1746, 0x03CE540F, 0x03C00000, 0x03B21643, 0x03A49249, 0x03976FC6,
+ 0x038AAAAB, 0x037E3F20, 0x03722983, 0x03666666, 0x035AF287, 0x034FCACE, 0x0344EC4F, 0x033A5441,
+ 0x03300000, 0x0325ED09, 0x031C18FA, 0x0312818B, 0x03092492, 0x03000000, 0x02F711DC, 0x02EE5847,
+ 0x02E5D174, 0x02DD7BAF, 0x02D55555, 0x02CD5CD6, 0x02C590B2, 0x02BDEF7C, 0x02B677D4, 0x02AF286C,
+ 0x02A80000, 0x02A0FD5C, 0x029A1F59, 0x029364D9, 0x028CCCCD, 0x0286562E, 0x02800000, 0x0279C952,
+ 0x0273B13B, 0x026DB6DB, 0x0267D95C, 0x026217ED, 0x025C71C7, 0x0256E62A, 0x0251745D, 0x024C1BAD,
+ 0x0246DB6E, 0x0241B2F9, 0x023CA1AF, 0x0237A6F5, 0x0232C235, 0x022DF2DF, 0x02293868, 0x02249249,
+ 0x02200000, 0x021B810F, 0x021714FC, 0x0212BB51, 0x020E739D, 0x020A3D71, 0x02061862, 0x02020408,
+ 0x01FE0000, 0x01FA0BE8, 0x01F62762, 0x01F25214, 0x01EE8BA3, 0x01EAD3BB, 0x01E72A08, 0x01E38E39,
+ 0x01E00000, 0x01DC7F11, 0x01D90B21, 0x01D5A3EA, 0x01D24925, 0x01CEFA8E, 0x01CBB7E3, 0x01C880E5,
+ 0x01C55555, 0x01C234F7, 0x01BF1F90, 0x01BC14E6, 0x01B914C2, 0x01B61EED, 0x01B33333, 0x01B05161,
+ 0x01AD7943, 0x01AAAAAB, 0x01A7E567, 0x01A5294A, 0x01A27627, 0x019FCBD2, 0x019D2A20, 0x019A90E8,
+ 0x01980000, 0x01957741, 0x0192F685, 0x01907DA5, 0x018E0C7D, 0x018BA2E9, 0x018940C5, 0x0186E5F1,
+ 0x01849249, 0x018245AE, 0x01800000, 0x017DC11F, 0x017B88EE, 0x0179574E, 0x01772C23, 0x01750750,
+ 0x0172E8BA, 0x0170D045, 0x016EBDD8, 0x016CB157, 0x016AAAAB, 0x0168A9B9, 0x0166AE6B, 0x0164B8A8,
+ 0x0162C859, 0x0160DD68, 0x015EF7BE, 0x015D1746, 0x015B3BEA, 0x01596596, 0x01579436, 0x0155C7B5,
+ 0x01540000, 0x01523D04, 0x01507EAE, 0x014EC4EC, 0x014D0FAC, 0x014B5EDD, 0x0149B26D, 0x01480A4B,
+ 0x01466666, 0x0144C6B0, 0x01432B17, 0x0141938C, 0x01400000, 0x013E7064, 0x013CE4A9, 0x013B5CC1,
+ 0x0139D89E, 0x01385831, 0x0136DB6E, 0x01356246, 0x0133ECAE, 0x01327A97, 0x01310BF6, 0x012FA0BF,
+ 0x012E38E4, 0x012CD45A, 0x012B7315, 0x012A150B, 0x0128BA2F, 0x01276276, 0x01260DD6, 0x0124BC45,
+ 0x01236DB7, 0x01222222, 0x0120D97D, 0x011F93BC, 0x011E50D8, 0x011D10C5, 0x011BD37A, 0x011A98EF,
+ 0x0119611A, 0x01182BF3, 0x0116F970, 0x0115C988, 0x01149C34, 0x0113716B, 0x01124925, 0x01112359,
+ 0x01100000, 0x010EDF12, 0x010DC087, 0x010CA458, 0x010B8A7E, 0x010A72F0, 0x01095DA9, 0x01084AA0,
+ 0x010739CE, 0x01062B2E, 0x01051EB8, 0x01041466, 0x01030C31, 0x01020612, 0x01010204, 0x01000000
+};
+
+#ifdef BUILD_DIVIDE_TABLE
+void SkUnPreMultiply_BuildTable() {
+ for (unsigned i = 0; i <= 255; i++) {
+ uint32_t scale;
+
+ if (0 == i) {
+ scale = 0;
+ } else {
+ scale = ((255 << 24) + (i >> 1)) / i;
+ }
+
+ SkDebugf(" 0x%08X,", scale);
+ if ((i & 7) == 7) {
+ SkDebugf("\n");
+ }
+
+ // test the result
+ for (int j = 1; j <= i; j++) {
+ uint32_t test = (j * scale + (1 << 23)) >> 24;
+ uint32_t div = roundf(j * 255.0f / i);
+ int diff = SkAbs32(test - div);
+ SkASSERT(diff <= 1 && test <= 255);
+ }
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkVM.cpp b/gfx/skia/skia/src/core/SkVM.cpp
new file mode 100644
index 0000000000..e83b91632d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkVM.cpp
@@ -0,0 +1,4117 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/private/base/SkTFitsIn.h"
+#include "include/private/base/SkThreadID.h"
+#include "src/base/SkHalf.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkCpu.h"
+#include "src/core/SkEnumerate.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkStreamPriv.h"
+#include "src/core/SkVM.h"
+#include "src/utils/SkVMVisualizer.h"
+#include <algorithm>
+#include <atomic>
+#include <queue>
+
+#if !defined(SK_BUILD_FOR_WIN)
+#include <unistd.h>
+#endif
+
+bool gSkVMAllowJIT{false};
+bool gSkVMJITViaDylib{false};
+
+#if defined(SKVM_JIT)
+ #if defined(SK_BUILD_FOR_WIN)
+ #include "src/base/SkLeanWindows.h"
+ #include <memoryapi.h>
+
+ static void* alloc_jit_buffer(size_t* len) {
+ return VirtualAlloc(NULL, *len, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
+ }
+ static void remap_as_executable(void* ptr, size_t len) {
+ DWORD old;
+ VirtualProtect(ptr, len, PAGE_EXECUTE_READ, &old);
+ SkASSERT(old == PAGE_READWRITE);
+ }
+ static void unmap_jit_buffer(void* ptr, size_t len) {
+ VirtualFree(ptr, 0, MEM_RELEASE);
+ }
+ static void close_dylib(void* dylib) {
+ SkASSERT(false); // TODO? For now just assert we never make one.
+ }
+ #else
+ #include <dlfcn.h>
+ #include <sys/mman.h>
+
+ static void* alloc_jit_buffer(size_t* len) {
+ // While mprotect and VirtualAlloc both work at page granularity,
+ // mprotect doesn't round up for you, and instead requires *len is at page granularity.
+ const size_t page = sysconf(_SC_PAGESIZE);
+ *len = ((*len + page - 1) / page) * page;
+ return mmap(nullptr,*len, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1,0);
+ }
+ static void remap_as_executable(void* ptr, size_t len) {
+ mprotect(ptr, len, PROT_READ|PROT_EXEC);
+ __builtin___clear_cache((char*)ptr,
+ (char*)ptr + len);
+ }
+ static void unmap_jit_buffer(void* ptr, size_t len) {
+ munmap(ptr, len);
+ }
+ static void close_dylib(void* dylib) {
+ dlclose(dylib);
+ }
+ #endif
+#endif
+
+// JIT code isn't MSAN-instrumented, so we won't see when it uses
+// uninitialized memory, and we'll not see the writes it makes as properly
+// initializing memory. Instead force the interpreter, which should let
+// MSAN see everything our programs do properly.
+//
+// Similarly, we can't get ASAN's checks unless we let it instrument our interpreter.
+#if defined(__has_feature)
+ #if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
+ #define SKVM_JIT_BUT_IGNORE_IT
+ #endif
+#endif
+
+#if defined(SKSL_STANDALONE)
+ // skslc needs to link against this module (for the VM code generator). This module pulls in
+ // color-space code, but attempting to add those transitive dependencies to skslc gets out of
+ // hand. So we terminate the chain here with stub functions. Note that skslc's usage of SkVM
+ // never cares about color management.
+ skvm::F32 sk_program_transfer_fn(
+ skvm::F32 v, skcms_TFType tf_type,
+ skvm::F32 G, skvm::F32 A, skvm::F32 B, skvm::F32 C, skvm::F32 D, skvm::F32 E, skvm::F32 F) {
+ return v;
+ }
+
+ const skcms_TransferFunction* skcms_sRGB_TransferFunction() { return nullptr; }
+ const skcms_TransferFunction* skcms_sRGB_Inverse_TransferFunction() { return nullptr; }
+#endif
+
+namespace skvm {
+
+ static Features detect_features() {
+ static const bool fma =
+ #if defined(SK_CPU_X86)
+ SkCpu::Supports(SkCpu::HSW);
+ #elif defined(SK_CPU_ARM64)
+ true;
+ #else
+ false;
+ #endif
+
+ static const bool fp16 = false; // TODO
+
+ return { fma, fp16 };
+ }
+
+ Builder::Builder(bool createDuplicates)
+ : fFeatures(detect_features()), fCreateDuplicates(createDuplicates) {}
+ Builder::Builder(Features features, bool createDuplicates)
+ : fFeatures(features ), fCreateDuplicates(createDuplicates) {}
+
+ struct Program::Impl {
+ std::vector<InterpreterInstruction> instructions;
+ int regs = 0;
+ int loop = 0;
+ std::vector<int> strides;
+ std::vector<SkSL::TraceHook*> traceHooks;
+ std::unique_ptr<viz::Visualizer> visualizer;
+
+ std::atomic<void*> jit_entry{nullptr}; // TODO: minimal std::memory_orders
+ size_t jit_size = 0;
+ void* dylib = nullptr;
+ };
+
+ // Debugging tools, mostly for printing various data structures out to a stream.
+
+ namespace {
+ struct V { Val id; };
+ struct R { Reg id; };
+ struct Shift { int bits; };
+ struct Splat { int bits; };
+ struct Hex { int bits; };
+ struct TraceHookID { int bits; };
+ // For op `trace_line`
+ struct Line { int bits; };
+ // For op `trace_var`
+ struct VarSlot { int bits; };
+ // For op `trace_enter`/`trace_exit`
+ struct FnIdx { int bits; };
+
+ static void write(SkWStream* o, const char* s) {
+ o->writeText(s);
+ }
+
+ static const char* name(Op op) {
+ switch (op) {
+ #define M(x) case Op::x: return #x;
+ SKVM_OPS(M)
+ #undef M
+ }
+ return "unknown op";
+ }
+
+ static void write(SkWStream* o, Op op) {
+ o->writeText(name(op));
+ }
+ static void write(SkWStream* o, Ptr p) {
+ write(o, "ptr");
+ o->writeDecAsText(p.ix);
+ }
+ static void write(SkWStream* o, V v) {
+ write(o, "v");
+ o->writeDecAsText(v.id);
+ }
+ static void write(SkWStream* o, R r) {
+ write(o, "r");
+ o->writeDecAsText(r.id);
+ }
+ static void write(SkWStream* o, Shift s) {
+ o->writeDecAsText(s.bits);
+ }
+ static void write(SkWStream* o, Splat s) {
+ float f;
+ memcpy(&f, &s.bits, 4);
+ o->writeHexAsText(s.bits);
+ write(o, " (");
+ o->writeScalarAsText(f);
+ write(o, ")");
+ }
+ static void write(SkWStream* o, Hex h) {
+ o->writeHexAsText(h.bits);
+ }
+ static void write(SkWStream* o, TraceHookID h) {
+ o->writeDecAsText(h.bits);
+ }
+ static void write(SkWStream* o, Line d) {
+ write(o, "L");
+ o->writeDecAsText(d.bits);
+ }
+ static void write(SkWStream* o, VarSlot s) {
+ write(o, "$");
+ o->writeDecAsText(s.bits);
+ }
+ static void write(SkWStream* o, FnIdx s) {
+ write(o, "F");
+ o->writeDecAsText(s.bits);
+ }
+ template <typename T, typename... Ts>
+ static void write(SkWStream* o, T first, Ts... rest) {
+ write(o, first);
+ write(o, " ");
+ write(o, rest...);
+ }
+ } // namespace
+
+ static void write_one_instruction(Val id, const OptimizedInstruction& inst, SkWStream* o) {
+ Op op = inst.op;
+ Val x = inst.x,
+ y = inst.y,
+ z = inst.z,
+ w = inst.w;
+ int immA = inst.immA,
+ immB = inst.immB,
+ immC = inst.immC;
+ switch (op) {
+ case Op::assert_true: write(o, op, V{x}, V{y}); break;
+
+ case Op::trace_line: write(o, op, TraceHookID{immA}, V{x}, V{y}, Line{immB}); break;
+ case Op::trace_var: write(o, op, TraceHookID{immA}, V{x}, V{y},
+ VarSlot{immB}, "=", V{z}); break;
+ case Op::trace_enter: write(o, op, TraceHookID{immA}, V{x}, V{y}, FnIdx{immB}); break;
+ case Op::trace_exit: write(o, op, TraceHookID{immA}, V{x}, V{y}, FnIdx{immB}); break;
+ case Op::trace_scope: write(o, op, TraceHookID{immA}, V{x}, V{y}, Shift{immB}); break;
+
+ case Op::store8: write(o, op, Ptr{immA}, V{x} ); break;
+ case Op::store16: write(o, op, Ptr{immA}, V{x} ); break;
+ case Op::store32: write(o, op, Ptr{immA}, V{x} ); break;
+ case Op::store64: write(o, op, Ptr{immA}, V{x},V{y} ); break;
+ case Op::store128: write(o, op, Ptr{immA}, V{x},V{y},V{z},V{w}); break;
+
+ case Op::index: write(o, V{id}, "=", op); break;
+
+ case Op::load8: write(o, V{id}, "=", op, Ptr{immA}); break;
+ case Op::load16: write(o, V{id}, "=", op, Ptr{immA}); break;
+ case Op::load32: write(o, V{id}, "=", op, Ptr{immA}); break;
+ case Op::load64: write(o, V{id}, "=", op, Ptr{immA}, Hex{immB}); break;
+ case Op::load128: write(o, V{id}, "=", op, Ptr{immA}, Hex{immB}); break;
+
+ case Op::gather8: write(o, V{id}, "=", op, Ptr{immA}, Hex{immB}, V{x}); break;
+ case Op::gather16: write(o, V{id}, "=", op, Ptr{immA}, Hex{immB}, V{x}); break;
+ case Op::gather32: write(o, V{id}, "=", op, Ptr{immA}, Hex{immB}, V{x}); break;
+
+ case Op::uniform32: write(o, V{id}, "=", op, Ptr{immA}, Hex{immB}); break;
+ case Op::array32: write(o, V{id}, "=", op, Ptr{immA}, Hex{immB}, Hex{immC}); break;
+
+ case Op::splat: write(o, V{id}, "=", op, Splat{immA}); break;
+
+ case Op:: add_f32: write(o, V{id}, "=", op, V{x}, V{y} ); break;
+ case Op:: sub_f32: write(o, V{id}, "=", op, V{x}, V{y} ); break;
+ case Op:: mul_f32: write(o, V{id}, "=", op, V{x}, V{y} ); break;
+ case Op:: div_f32: write(o, V{id}, "=", op, V{x}, V{y} ); break;
+ case Op:: min_f32: write(o, V{id}, "=", op, V{x}, V{y} ); break;
+ case Op:: max_f32: write(o, V{id}, "=", op, V{x}, V{y} ); break;
+ case Op:: fma_f32: write(o, V{id}, "=", op, V{x}, V{y}, V{z}); break;
+ case Op:: fms_f32: write(o, V{id}, "=", op, V{x}, V{y}, V{z}); break;
+ case Op::fnma_f32: write(o, V{id}, "=", op, V{x}, V{y}, V{z}); break;
+
+
+ case Op::sqrt_f32: write(o, V{id}, "=", op, V{x}); break;
+
+ case Op:: eq_f32: write(o, V{id}, "=", op, V{x}, V{y}); break;
+ case Op::neq_f32: write(o, V{id}, "=", op, V{x}, V{y}); break;
+ case Op:: gt_f32: write(o, V{id}, "=", op, V{x}, V{y}); break;
+ case Op::gte_f32: write(o, V{id}, "=", op, V{x}, V{y}); break;
+
+
+ case Op::add_i32: write(o, V{id}, "=", op, V{x}, V{y}); break;
+ case Op::sub_i32: write(o, V{id}, "=", op, V{x}, V{y}); break;
+ case Op::mul_i32: write(o, V{id}, "=", op, V{x}, V{y}); break;
+
+ case Op::shl_i32: write(o, V{id}, "=", op, V{x}, Shift{immA}); break;
+ case Op::shr_i32: write(o, V{id}, "=", op, V{x}, Shift{immA}); break;
+ case Op::sra_i32: write(o, V{id}, "=", op, V{x}, Shift{immA}); break;
+
+ case Op::eq_i32: write(o, V{id}, "=", op, V{x}, V{y}); break;
+ case Op::gt_i32: write(o, V{id}, "=", op, V{x}, V{y}); break;
+
+
+ case Op::bit_and : write(o, V{id}, "=", op, V{x}, V{y}); break;
+ case Op::bit_or : write(o, V{id}, "=", op, V{x}, V{y}); break;
+ case Op::bit_xor : write(o, V{id}, "=", op, V{x}, V{y}); break;
+ case Op::bit_clear: write(o, V{id}, "=", op, V{x}, V{y}); break;
+
+ case Op::select: write(o, V{id}, "=", op, V{x}, V{y}, V{z}); break;
+
+ case Op::ceil: write(o, V{id}, "=", op, V{x}); break;
+ case Op::floor: write(o, V{id}, "=", op, V{x}); break;
+ case Op::to_f32: write(o, V{id}, "=", op, V{x}); break;
+ case Op::to_fp16: write(o, V{id}, "=", op, V{x}); break;
+ case Op::from_fp16: write(o, V{id}, "=", op, V{x}); break;
+ case Op::trunc: write(o, V{id}, "=", op, V{x}); break;
+ case Op::round: write(o, V{id}, "=", op, V{x}); break;
+
+ case Op::duplicate: write(o, V{id}, "=", op, Hex{immA}); break;
+ }
+
+ write(o, "\n");
+ }
+
+ void Builder::dump(SkWStream* o) const {
+ SkDebugfStream debug;
+ if (!o) { o = &debug; }
+
+ std::vector<OptimizedInstruction> optimized = this->optimize();
+ o->writeDecAsText(optimized.size());
+ o->writeText(" values (originally ");
+ o->writeDecAsText(fProgram.size());
+ o->writeText("):\n");
+ for (Val id = 0; id < (Val)optimized.size(); id++) {
+ const OptimizedInstruction& inst = optimized[id];
+ write(o, inst.can_hoist ? "↑ " : " ");
+ write_one_instruction(id, inst, o);
+ }
+ }
+
+ void Program::visualize(SkWStream* output) const {
+ if (fImpl->visualizer) {
+ fImpl->visualizer->dump(output);
+ }
+ }
+
+ viz::Visualizer* Program::visualizer() { return fImpl->visualizer.get(); }
+ void Program::dump(SkWStream* o) const {
+ SkDebugfStream debug;
+ if (!o) { o = &debug; }
+
+ o->writeDecAsText(fImpl->regs);
+ o->writeText(" registers, ");
+ o->writeDecAsText(fImpl->instructions.size());
+ o->writeText(" instructions:\n");
+ for (Val i = 0; i < (Val)fImpl->instructions.size(); i++) {
+ if (i == fImpl->loop) { write(o, "loop:\n"); }
+ o->writeDecAsText(i);
+ o->writeText("\t");
+ if (i >= fImpl->loop) { write(o, " "); }
+ const InterpreterInstruction& inst = fImpl->instructions[i];
+ Op op = inst.op;
+ Reg d = inst.d,
+ x = inst.x,
+ y = inst.y,
+ z = inst.z,
+ w = inst.w;
+ int immA = inst.immA,
+ immB = inst.immB,
+ immC = inst.immC;
+ switch (op) {
+ case Op::assert_true: write(o, op, R{x}, R{y}); break;
+
+ case Op::trace_line: write(o, op, TraceHookID{immA},
+ R{x}, R{y}, Line{immB}); break;
+ case Op::trace_var: write(o, op, TraceHookID{immA}, R{x}, R{y},
+ VarSlot{immB}, "=", R{z}); break;
+ case Op::trace_enter: write(o, op, TraceHookID{immA},
+ R{x}, R{y}, FnIdx{immB}); break;
+ case Op::trace_exit: write(o, op, TraceHookID{immA},
+ R{x}, R{y}, FnIdx{immB}); break;
+ case Op::trace_scope: write(o, op, TraceHookID{immA},
+ R{x}, R{y}, Shift{immB}); break;
+
+ case Op::store8: write(o, op, Ptr{immA}, R{x} ); break;
+ case Op::store16: write(o, op, Ptr{immA}, R{x} ); break;
+ case Op::store32: write(o, op, Ptr{immA}, R{x} ); break;
+ case Op::store64: write(o, op, Ptr{immA}, R{x}, R{y} ); break;
+ case Op::store128: write(o, op, Ptr{immA}, R{x}, R{y}, R{z}, R{w}); break;
+
+ case Op::index: write(o, R{d}, "=", op); break;
+
+ case Op::load8: write(o, R{d}, "=", op, Ptr{immA}); break;
+ case Op::load16: write(o, R{d}, "=", op, Ptr{immA}); break;
+ case Op::load32: write(o, R{d}, "=", op, Ptr{immA}); break;
+ case Op::load64: write(o, R{d}, "=", op, Ptr{immA}, Hex{immB}); break;
+ case Op::load128: write(o, R{d}, "=", op, Ptr{immA}, Hex{immB}); break;
+
+ case Op::gather8: write(o, R{d}, "=", op, Ptr{immA}, Hex{immB}, R{x}); break;
+ case Op::gather16: write(o, R{d}, "=", op, Ptr{immA}, Hex{immB}, R{x}); break;
+ case Op::gather32: write(o, R{d}, "=", op, Ptr{immA}, Hex{immB}, R{x}); break;
+
+ case Op::uniform32: write(o, R{d}, "=", op, Ptr{immA}, Hex{immB}); break;
+ case Op::array32: write(o, R{d}, "=", op, Ptr{immA}, Hex{immB}, Hex{immC}); break;
+
+ case Op::splat: write(o, R{d}, "=", op, Splat{immA}); break;
+
+ case Op::add_f32: write(o, R{d}, "=", op, R{x}, R{y} ); break;
+ case Op::sub_f32: write(o, R{d}, "=", op, R{x}, R{y} ); break;
+ case Op::mul_f32: write(o, R{d}, "=", op, R{x}, R{y} ); break;
+ case Op::div_f32: write(o, R{d}, "=", op, R{x}, R{y} ); break;
+ case Op::min_f32: write(o, R{d}, "=", op, R{x}, R{y} ); break;
+ case Op::max_f32: write(o, R{d}, "=", op, R{x}, R{y} ); break;
+ case Op::fma_f32: write(o, R{d}, "=", op, R{x}, R{y}, R{z}); break;
+ case Op::fms_f32: write(o, R{d}, "=", op, R{x}, R{y}, R{z}); break;
+ case Op::fnma_f32: write(o, R{d}, "=", op, R{x}, R{y}, R{z}); break;
+
+ case Op::sqrt_f32: write(o, R{d}, "=", op, R{x}); break;
+
+ case Op:: eq_f32: write(o, R{d}, "=", op, R{x}, R{y}); break;
+ case Op::neq_f32: write(o, R{d}, "=", op, R{x}, R{y}); break;
+ case Op:: gt_f32: write(o, R{d}, "=", op, R{x}, R{y}); break;
+ case Op::gte_f32: write(o, R{d}, "=", op, R{x}, R{y}); break;
+
+
+ case Op::add_i32: write(o, R{d}, "=", op, R{x}, R{y}); break;
+ case Op::sub_i32: write(o, R{d}, "=", op, R{x}, R{y}); break;
+ case Op::mul_i32: write(o, R{d}, "=", op, R{x}, R{y}); break;
+
+ case Op::shl_i32: write(o, R{d}, "=", op, R{x}, Shift{immA}); break;
+ case Op::shr_i32: write(o, R{d}, "=", op, R{x}, Shift{immA}); break;
+ case Op::sra_i32: write(o, R{d}, "=", op, R{x}, Shift{immA}); break;
+
+ case Op::eq_i32: write(o, R{d}, "=", op, R{x}, R{y}); break;
+ case Op::gt_i32: write(o, R{d}, "=", op, R{x}, R{y}); break;
+
+ case Op::bit_and : write(o, R{d}, "=", op, R{x}, R{y}); break;
+ case Op::bit_or : write(o, R{d}, "=", op, R{x}, R{y}); break;
+ case Op::bit_xor : write(o, R{d}, "=", op, R{x}, R{y}); break;
+ case Op::bit_clear: write(o, R{d}, "=", op, R{x}, R{y}); break;
+
+ case Op::select: write(o, R{d}, "=", op, R{x}, R{y}, R{z}); break;
+
+ case Op::ceil: write(o, R{d}, "=", op, R{x}); break;
+ case Op::floor: write(o, R{d}, "=", op, R{x}); break;
+ case Op::to_f32: write(o, R{d}, "=", op, R{x}); break;
+ case Op::to_fp16: write(o, R{d}, "=", op, R{x}); break;
+ case Op::from_fp16: write(o, R{d}, "=", op, R{x}); break;
+ case Op::trunc: write(o, R{d}, "=", op, R{x}); break;
+ case Op::round: write(o, R{d}, "=", op, R{x}); break;
+
+ case Op::duplicate: write(o, R{d}, "=", op, Hex{immA}); break;
+ }
+ write(o, "\n");
+ }
+ }
+ std::vector<Instruction> eliminate_dead_code(std::vector<Instruction> program,
+ viz::Visualizer* visualizer) {
+ // Determine which Instructions are live by working back from side effects.
+ std::vector<bool> live(program.size(), false);
+ for (Val id = program.size(); id--;) {
+ if (live[id] || has_side_effect(program[id].op)) {
+ live[id] = true;
+ const Instruction& inst = program[id];
+ for (Val arg : {inst.x, inst.y, inst.z, inst.w}) {
+ if (arg != NA) { live[arg] = true; }
+ }
+ }
+ }
+
+ // Rewrite the program with only live Instructions:
+ // - remap IDs in live Instructions to what they'll be once dead Instructions are removed;
+ // - then actually remove the dead Instructions.
+ std::vector<Val> new_id(program.size(), NA);
+ for (Val id = 0, next = 0; id < (Val)program.size(); id++) {
+ if (live[id]) {
+ Instruction& inst = program[id];
+ for (Val* arg : {&inst.x, &inst.y, &inst.z, &inst.w}) {
+ if (*arg != NA) {
+ *arg = new_id[*arg];
+ SkASSERT(*arg != NA);
+ }
+ }
+ new_id[id] = next++;
+ }
+ }
+
+ if (visualizer) {
+ visualizer->addInstructions(program);
+ visualizer->markAsDeadCode(live, new_id);
+ }
+
+ // Eliminate any non-live ops.
+ auto it = std::remove_if(program.begin(), program.end(), [&](const Instruction& inst) {
+ Val id = (Val)(&inst - program.data());
+ return !live[id];
+ });
+ program.erase(it, program.end());
+
+ return program;
+ }
+
+ std::vector<OptimizedInstruction> finalize(const std::vector<Instruction> program,
+ viz::Visualizer* visualizer) {
+ std::vector<OptimizedInstruction> optimized(program.size());
+ for (Val id = 0; id < (Val)program.size(); id++) {
+ Instruction inst = program[id];
+ optimized[id] = {inst.op, inst.x,inst.y,inst.z,inst.w,
+ inst.immA,inst.immB,inst.immC,
+ /*death=*/id, /*can_hoist=*/true};
+ }
+
+ // Each Instruction's inputs need to live at least until that Instruction issues.
+ for (Val id = 0; id < (Val)optimized.size(); id++) {
+ OptimizedInstruction& inst = optimized[id];
+ for (Val arg : {inst.x, inst.y, inst.z, inst.w}) {
+ // (We're walking in order, so this is the same as max()ing with the existing Val.)
+ if (arg != NA) { optimized[arg].death = id; }
+ }
+ }
+
+ // Mark which values don't depend on the loop and can be hoisted.
+ for (OptimizedInstruction& inst : optimized) {
+ // Varying loads (and gathers) and stores cannot be hoisted out of the loop.
+ if (is_always_varying(inst.op) || is_trace(inst.op)) {
+ inst.can_hoist = false;
+ }
+
+ // If any of an instruction's inputs can't be hoisted, it can't be hoisted itself.
+ if (inst.can_hoist) {
+ for (Val arg : {inst.x, inst.y, inst.z, inst.w}) {
+ if (arg != NA) { inst.can_hoist &= optimized[arg].can_hoist; }
+ }
+ }
+ }
+
+ // Extend the lifetime of any hoisted value that's used in the loop to infinity.
+ for (OptimizedInstruction& inst : optimized) {
+ if (!inst.can_hoist /*i.e. we're in the loop, so the arguments are used-in-loop*/) {
+ for (Val arg : {inst.x, inst.y, inst.z, inst.w}) {
+ if (arg != NA && optimized[arg].can_hoist) {
+ optimized[arg].death = (Val)program.size();
+ }
+ }
+ }
+ }
+
+ if (visualizer) {
+ visualizer->finalize(program, optimized);
+ }
+
+ return optimized;
+ }
+
+ std::vector<OptimizedInstruction> Builder::optimize(viz::Visualizer* visualizer) const {
+ std::vector<Instruction> program = this->program();
+ program = eliminate_dead_code(std::move(program), visualizer);
+ return finalize (std::move(program), visualizer);
+ }
+
+ Program Builder::done(const char* debug_name,
+ bool allow_jit) const {
+ return this->done(debug_name, allow_jit, /*visualizer=*/nullptr);
+ }
+
+ Program Builder::done(const char* debug_name,
+ bool allow_jit,
+ std::unique_ptr<viz::Visualizer> visualizer) const {
+ char buf[64] = "skvm-jit-";
+ if (!debug_name) {
+ *SkStrAppendU32(buf+9, this->hash()) = '\0';
+ debug_name = buf;
+ }
+
+ auto optimized = this->optimize(visualizer ? visualizer.get() : nullptr);
+ return {optimized,
+ std::move(visualizer),
+ fStrides,
+ fTraceHooks, debug_name, allow_jit};
+ }
+
+ uint64_t Builder::hash() const {
+ uint32_t lo = SkOpts::hash(fProgram.data(), fProgram.size() * sizeof(Instruction), 0),
+ hi = SkOpts::hash(fProgram.data(), fProgram.size() * sizeof(Instruction), 1);
+ return (uint64_t)lo | (uint64_t)hi << 32;
+ }
+
+ bool operator!=(Ptr a, Ptr b) { return a.ix != b.ix; }
+
+ bool operator==(const Instruction& a, const Instruction& b) {
+ return a.op == b.op
+ && a.x == b.x
+ && a.y == b.y
+ && a.z == b.z
+ && a.w == b.w
+ && a.immA == b.immA
+ && a.immB == b.immB
+ && a.immC == b.immC;
+ }
+
+ uint32_t InstructionHash::operator()(const Instruction& inst, uint32_t seed) const {
+ return SkOpts::hash(&inst, sizeof(inst), seed);
+ }
+
+
+ // Most instructions produce a value and return it by ID,
+ // the value-producing instruction's own index in the program vector.
+ Val Builder::push(Instruction inst) {
+ // Basic common subexpression elimination:
+ // if we've already seen this exact Instruction, use it instead of creating a new one.
+ //
+ // But we never dedup loads or stores: an intervening store could change that memory.
+ // Uniforms and gathers touch only uniform memory, so they're fine to dedup,
+ // and index is varying but doesn't touch memory, so it's fine to dedup too.
+ if (!touches_varying_memory(inst.op) && !is_trace(inst.op)) {
+ if (Val* id = fIndex.find(inst)) {
+ if (fCreateDuplicates) {
+ inst.op = Op::duplicate;
+ inst.immA = *id;
+ fProgram.push_back(inst);
+ }
+ return *id;
+ }
+ }
+
+ Val id = static_cast<Val>(fProgram.size());
+ fProgram.push_back(inst);
+ fIndex.set(inst, id);
+ return id;
+ }
+
+ Ptr Builder::arg(int stride) {
+ int ix = (int)fStrides.size();
+ fStrides.push_back(stride);
+ return {ix};
+ }
+
+ void Builder::assert_true(I32 cond, I32 debug) {
+ #ifdef SK_DEBUG
+ int imm;
+ if (this->allImm(cond.id,&imm)) { SkASSERT(imm); return; }
+ (void)push(Op::assert_true, cond.id, debug.id);
+ #endif
+ }
+
+ int Builder::attachTraceHook(SkSL::TraceHook* hook) {
+ int traceHookID = (int)fTraceHooks.size();
+ fTraceHooks.push_back(hook);
+ return traceHookID;
+ }
+
+ bool Builder::mergeMasks(I32& mask, I32& traceMask) {
+ if (this->isImm(mask.id, 0)) { return false; }
+ if (this->isImm(traceMask.id, 0)) { return false; }
+ if (this->isImm(mask.id, ~0)) { mask = traceMask; }
+ if (this->isImm(traceMask.id,~0)) { traceMask = mask; }
+ return true;
+ }
+
+ void Builder::trace_line(int traceHookID, I32 mask, I32 traceMask, int line) {
+ SkASSERT(traceHookID >= 0);
+ SkASSERT(traceHookID < (int)fTraceHooks.size());
+ if (!this->mergeMasks(mask, traceMask)) { return; }
+ (void)push(Op::trace_line, mask.id,traceMask.id,NA,NA, traceHookID, line);
+ }
+ void Builder::trace_var(int traceHookID, I32 mask, I32 traceMask, int slot, I32 val) {
+ SkASSERT(traceHookID >= 0);
+ SkASSERT(traceHookID < (int)fTraceHooks.size());
+ if (!this->mergeMasks(mask, traceMask)) { return; }
+ (void)push(Op::trace_var, mask.id,traceMask.id,val.id,NA, traceHookID, slot);
+ }
+ void Builder::trace_enter(int traceHookID, I32 mask, I32 traceMask, int fnIdx) {
+ SkASSERT(traceHookID >= 0);
+ SkASSERT(traceHookID < (int)fTraceHooks.size());
+ if (!this->mergeMasks(mask, traceMask)) { return; }
+ (void)push(Op::trace_enter, mask.id,traceMask.id,NA,NA, traceHookID, fnIdx);
+ }
+ void Builder::trace_exit(int traceHookID, I32 mask, I32 traceMask, int fnIdx) {
+ SkASSERT(traceHookID >= 0);
+ SkASSERT(traceHookID < (int)fTraceHooks.size());
+ if (!this->mergeMasks(mask, traceMask)) { return; }
+ (void)push(Op::trace_exit, mask.id,traceMask.id,NA,NA, traceHookID, fnIdx);
+ }
+ void Builder::trace_scope(int traceHookID, I32 mask, I32 traceMask, int delta) {
+ SkASSERT(traceHookID >= 0);
+ SkASSERT(traceHookID < (int)fTraceHooks.size());
+ if (!this->mergeMasks(mask, traceMask)) { return; }
+ (void)push(Op::trace_scope, mask.id,traceMask.id,NA,NA, traceHookID, delta);
+ }
+
+ void Builder::store8 (Ptr ptr, I32 val) { (void)push(Op::store8 , val.id,NA,NA,NA, ptr.ix); }
+ void Builder::store16(Ptr ptr, I32 val) { (void)push(Op::store16, val.id,NA,NA,NA, ptr.ix); }
+ void Builder::store32(Ptr ptr, I32 val) { (void)push(Op::store32, val.id,NA,NA,NA, ptr.ix); }
+ void Builder::store64(Ptr ptr, I32 lo, I32 hi) {
+ (void)push(Op::store64, lo.id,hi.id,NA,NA, ptr.ix);
+ }
+ void Builder::store128(Ptr ptr, I32 x, I32 y, I32 z, I32 w) {
+ (void)push(Op::store128, x.id,y.id,z.id,w.id, ptr.ix);
+ }
+
+ I32 Builder::index() { return {this, push(Op::index)}; }
+
+ I32 Builder::load8 (Ptr ptr) { return {this, push(Op::load8 , NA,NA,NA,NA, ptr.ix) }; }
+ I32 Builder::load16(Ptr ptr) { return {this, push(Op::load16, NA,NA,NA,NA, ptr.ix) }; }
+ I32 Builder::load32(Ptr ptr) { return {this, push(Op::load32, NA,NA,NA,NA, ptr.ix) }; }
+ I32 Builder::load64(Ptr ptr, int lane) {
+ return {this, push(Op::load64 , NA,NA,NA,NA, ptr.ix,lane) };
+ }
+ I32 Builder::load128(Ptr ptr, int lane) {
+ return {this, push(Op::load128, NA,NA,NA,NA, ptr.ix,lane) };
+ }
+
+ I32 Builder::gather8 (UPtr ptr, int offset, I32 index) {
+ return {this, push(Op::gather8 , index.id,NA,NA,NA, ptr.ix,offset)};
+ }
+ I32 Builder::gather16(UPtr ptr, int offset, I32 index) {
+ return {this, push(Op::gather16, index.id,NA,NA,NA, ptr.ix,offset)};
+ }
+ I32 Builder::gather32(UPtr ptr, int offset, I32 index) {
+ return {this, push(Op::gather32, index.id,NA,NA,NA, ptr.ix,offset)};
+ }
+
+ I32 Builder::uniform32(UPtr ptr, int offset) {
+ return {this, push(Op::uniform32, NA,NA,NA,NA, ptr.ix, offset)};
+ }
+
+ // Note: this converts the array index into a byte offset for the op.
+ I32 Builder::array32 (UPtr ptr, int offset, int index) {
+ return {this, push(Op::array32, NA,NA,NA,NA, ptr.ix, offset, index * sizeof(int))};
+ }
+
+ I32 Builder::splat(int n) { return {this, push(Op::splat, NA,NA,NA,NA, n) }; }
+
+ template <typename F32_or_I32>
+ void Builder::canonicalizeIdOrder(F32_or_I32& x, F32_or_I32& y) {
+ bool immX = fProgram[x.id].op == Op::splat;
+ bool immY = fProgram[y.id].op == Op::splat;
+ if (immX != immY) {
+ if (immX) {
+ // Prefer (val, imm) over (imm, val).
+ std::swap(x, y);
+ }
+ return;
+ }
+ if (x.id > y.id) {
+ // Prefer (lower-ID, higher-ID) over (higher-ID, lower-ID).
+ std::swap(x, y);
+ }
+ }
+
+ // Be careful peepholing float math! Transformations you might expect to
+ // be legal can fail in the face of NaN/Inf, e.g. 0*x is not always 0.
+ // Float peepholes must pass this equivalence test for all ~4B floats:
+ //
+ // bool equiv(float x, float y) { return (x == y) || (isnanf(x) && isnanf(y)); }
+ //
+ // unsigned bits = 0;
+ // do {
+ // float f;
+ // memcpy(&f, &bits, 4);
+ // if (!equiv(f, ...)) {
+ // abort();
+ // }
+ // } while (++bits != 0);
+
+ F32 Builder::add(F32 x, F32 y) {
+ if (float X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(X+Y); }
+ this->canonicalizeIdOrder(x, y);
+ if (this->isImm(y.id, 0.0f)) { return x; } // x+0 == x
+
+ if (fFeatures.fma) {
+ if (fProgram[x.id].op == Op::mul_f32) {
+ return {this, this->push(Op::fma_f32, fProgram[x.id].x, fProgram[x.id].y, y.id)};
+ }
+ if (fProgram[y.id].op == Op::mul_f32) {
+ return {this, this->push(Op::fma_f32, fProgram[y.id].x, fProgram[y.id].y, x.id)};
+ }
+ }
+ return {this, this->push(Op::add_f32, x.id, y.id)};
+ }
+
+ F32 Builder::sub(F32 x, F32 y) {
+ if (float X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(X-Y); }
+ if (this->isImm(y.id, 0.0f)) { return x; } // x-0 == x
+ if (fFeatures.fma) {
+ if (fProgram[x.id].op == Op::mul_f32) {
+ return {this, this->push(Op::fms_f32, fProgram[x.id].x, fProgram[x.id].y, y.id)};
+ }
+ if (fProgram[y.id].op == Op::mul_f32) {
+ return {this, this->push(Op::fnma_f32, fProgram[y.id].x, fProgram[y.id].y, x.id)};
+ }
+ }
+ return {this, this->push(Op::sub_f32, x.id, y.id)};
+ }
+
+ F32 Builder::mul(F32 x, F32 y) {
+ if (float X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(X*Y); }
+ this->canonicalizeIdOrder(x, y);
+ if (this->isImm(y.id, 1.0f)) { return x; } // x*1 == x
+ return {this, this->push(Op::mul_f32, x.id, y.id)};
+ }
+
+ F32 Builder::fast_mul(F32 x, F32 y) {
+ if (this->isImm(x.id, 0.0f) || this->isImm(y.id, 0.0f)) { return splat(0.0f); }
+ return mul(x,y);
+ }
+
+ F32 Builder::div(F32 x, F32 y) {
+ if (float X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(sk_ieee_float_divide(X,Y)); }
+ if (this->isImm(y.id, 1.0f)) { return x; } // x/1 == x
+ return {this, this->push(Op::div_f32, x.id, y.id)};
+ }
+
+ F32 Builder::sqrt(F32 x) {
+ if (float X; this->allImm(x.id,&X)) { return splat(std::sqrt(X)); }
+ return {this, this->push(Op::sqrt_f32, x.id)};
+ }
+
+ // See http://www.machinedlearnings.com/2011/06/fast-approximate-logarithm-exponential.html.
+ F32 Builder::approx_log2(F32 x) {
+ // e - 127 is a fair approximation of log2(x) in its own right...
+ F32 e = mul(to_F32(pun_to_I32(x)), splat(1.0f / (1<<23)));
+
+ // ... but using the mantissa to refine its error is _much_ better.
+ F32 m = pun_to_F32(bit_or(bit_and(pun_to_I32(x), 0x007fffff),
+ 0x3f000000));
+ F32 approx = sub(e, 124.225514990f);
+ approx = sub(approx, mul(1.498030302f, m));
+ approx = sub(approx, div(1.725879990f, add(0.3520887068f, m)));
+
+ return approx;
+ }
+
+ F32 Builder::approx_pow2(F32 x) {
+ constexpr float kInfinityBits = 0x7f800000;
+
+ F32 f = fract(x);
+ F32 approx = add(x, 121.274057500f);
+ approx = sub(approx, mul( 1.490129070f, f));
+ approx = add(approx, div(27.728023300f, sub(4.84252568f, f)));
+ approx = mul(1.0f * (1<<23), approx);
+ approx = clamp(approx, 0, kInfinityBits); // guard against underflow/overflow
+
+ return pun_to_F32(round(approx));
+ }
+
+ F32 Builder::approx_powf(F32 x, F32 y) {
+ // TODO: assert this instead? Sometimes x is very slightly negative. See skia:10210.
+ x = max(0.0f, x);
+
+ if (this->isImm(x.id, 1.0f)) { return x; } // 1^y is one
+ if (this->isImm(x.id, 2.0f)) { return this->approx_pow2(y); } // 2^y is pow2(y)
+ if (this->isImm(y.id, 0.5f)) { return this->sqrt(x); } // x^0.5 is sqrt(x)
+ if (this->isImm(y.id, 1.0f)) { return x; } // x^1 is x
+ if (this->isImm(y.id, 2.0f)) { return x * x; } // x^2 is x*x
+
+ auto is_x = bit_or(eq(x, 0.0f),
+ eq(x, 1.0f));
+ return select(is_x, x, approx_pow2(mul(approx_log2(x), y)));
+ }
+
+ // Bhaskara I's sine approximation
+ // 16x(pi - x) / (5*pi^2 - 4x(pi - x)
+ // ... divide by 4
+ // 4x(pi - x) / 5*pi^2/4 - x(pi - x)
+ //
+ // This is a good approximation only for 0 <= x <= pi, so we use symmetries to get
+ // radians into that range first.
+ //
+ F32 Builder::approx_sin(F32 radians) {
+ constexpr float Pi = SK_ScalarPI;
+ // x = radians mod 2pi
+ F32 x = fract(radians * (0.5f/Pi)) * (2*Pi);
+ I32 neg = x > Pi; // are we pi < x < 2pi --> need to negate result
+ x = select(neg, x - Pi, x);
+
+ F32 pair = x * (Pi - x);
+ x = 4.0f * pair / ((5*Pi*Pi/4) - pair);
+ x = select(neg, -x, x);
+ return x;
+ }
+
+ /* "GENERATING ACCURATE VALUES FOR THE TANGENT FUNCTION"
+ https://mae.ufl.edu/~uhk/ACCURATE-TANGENT.pdf
+
+ approx = x + (1/3)x^3 + (2/15)x^5 + (17/315)x^7 + (62/2835)x^9
+
+ Some simplifications:
+ 1. tan(x) is periodic, -PI/2 < x < PI/2
+ 2. tan(x) is odd, so tan(-x) = -tan(x)
+ 3. Our polynomial approximation is best near zero, so we use the following identity
+ tan(x) + tan(y)
+ tan(x + y) = -----------------
+ 1 - tan(x)*tan(y)
+ tan(PI/4) = 1
+
+ So for x > PI/8, we do the following refactor:
+ x' = x - PI/4
+
+ 1 + tan(x')
+ tan(x) = ------------
+ 1 - tan(x')
+ */
+ F32 Builder::approx_tan(F32 x) {
+ constexpr float Pi = SK_ScalarPI;
+ // periodic between -pi/2 ... pi/2
+ // shift to 0...Pi, scale 1/Pi to get into 0...1, then fract, scale-up, shift-back
+ x = fract((1/Pi)*x + 0.5f) * Pi - (Pi/2);
+
+ I32 neg = (x < 0.0f);
+ x = select(neg, -x, x);
+
+ // minimize total error by shifting if x > pi/8
+ I32 use_quotient = (x > (Pi/8));
+ x = select(use_quotient, x - (Pi/4), x);
+
+ // 9th order poly = 4th order(x^2) * x
+ x = poly(x*x, 62/2835.0f, 17/315.0f, 2/15.0f, 1/3.0f, 1.0f) * x;
+ x = select(use_quotient, (1+x)/(1-x), x);
+ x = select(neg, -x, x);
+ return x;
+ }
+
+ // http://mathforum.org/library/drmath/view/54137.html
+ // referencing Handbook of Mathematical Functions,
+ // by Milton Abramowitz and Irene Stegun
+ F32 Builder::approx_asin(F32 x) {
+ I32 neg = (x < 0.0f);
+ x = select(neg, -x, x);
+ x = SK_ScalarPI/2 - sqrt(1-x) * poly(x, -0.0187293f, 0.0742610f, -0.2121144f, 1.5707288f);
+ x = select(neg, -x, x);
+ return x;
+ }
+
+ /* Use 4th order polynomial approximation from https://arachnoid.com/polysolve/
+ * with 129 values of x,atan(x) for x:[0...1]
+ * This only works for 0 <= x <= 1
+ */
+ static F32 approx_atan_unit(F32 x) {
+ // for now we might be given NaN... let that through
+ x->assert_true((x != x) | ((x >= 0) & (x <= 1)));
+ return poly(x, 0.14130025741326729f,
+ -0.34312835980675116f,
+ -0.016172900528248768f,
+ 1.0037696976200385f,
+ -0.00014758242182738969f);
+ }
+
+ /* Use identity atan(x) = pi/2 - atan(1/x) for x > 1
+ */
+ F32 Builder::approx_atan(F32 x) {
+ I32 neg = (x < 0.0f);
+ x = select(neg, -x, x);
+ I32 flip = (x > 1.0f);
+ x = select(flip, 1/x, x);
+ x = approx_atan_unit(x);
+ x = select(flip, SK_ScalarPI/2 - x, x);
+ x = select(neg, -x, x);
+ return x;
+ }
+
+ /* Use identity atan(x) = pi/2 - atan(1/x) for x > 1
+ * By swapping y,x to ensure the ratio is <= 1, we can safely call atan_unit()
+ * which avoids a 2nd divide instruction if we had instead called atan().
+ */
+ F32 Builder::approx_atan2(F32 y0, F32 x0) {
+
+ I32 flip = (abs(y0) > abs(x0));
+ F32 y = select(flip, x0, y0);
+ F32 x = select(flip, y0, x0);
+ F32 arg = y/x;
+
+ I32 neg = (arg < 0.0f);
+ arg = select(neg, -arg, arg);
+
+ F32 r = approx_atan_unit(arg);
+ r = select(flip, SK_ScalarPI/2 - r, r);
+ r = select(neg, -r, r);
+
+ // handle quadrant distinctions
+ r = select((y0 >= 0) & (x0 < 0), r + SK_ScalarPI, r);
+ r = select((y0 < 0) & (x0 <= 0), r - SK_ScalarPI, r);
+ // Note: we don't try to handle 0,0 or infinities
+ return r;
+ }
+
+ F32 Builder::min(F32 x, F32 y) {
+ if (float X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(std::min(X,Y)); }
+ return {this, this->push(Op::min_f32, x.id, y.id)};
+ }
+ F32 Builder::max(F32 x, F32 y) {
+ if (float X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(std::max(X,Y)); }
+ return {this, this->push(Op::max_f32, x.id, y.id)};
+ }
+
+ SK_NO_SANITIZE("signed-integer-overflow")
+ I32 Builder::add(I32 x, I32 y) {
+ if (int X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(X+Y); }
+ this->canonicalizeIdOrder(x, y);
+ if (this->isImm(y.id, 0)) { return x; } // x+0 == x
+ return {this, this->push(Op::add_i32, x.id, y.id)};
+ }
+ SK_NO_SANITIZE("signed-integer-overflow")
+ I32 Builder::sub(I32 x, I32 y) {
+ if (int X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(X-Y); }
+ if (this->isImm(y.id, 0)) { return x; }
+ return {this, this->push(Op::sub_i32, x.id, y.id)};
+ }
+ SK_NO_SANITIZE("signed-integer-overflow")
+ I32 Builder::mul(I32 x, I32 y) {
+ if (int X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(X*Y); }
+ this->canonicalizeIdOrder(x, y);
+ if (this->isImm(y.id, 0)) { return splat(0); } // x*0 == 0
+ if (this->isImm(y.id, 1)) { return x; } // x*1 == x
+ return {this, this->push(Op::mul_i32, x.id, y.id)};
+ }
+
+ SK_NO_SANITIZE("shift")
+ I32 Builder::shl(I32 x, int bits) {
+ if (bits == 0) { return x; }
+ if (int X; this->allImm(x.id,&X)) { return splat(X << bits); }
+ return {this, this->push(Op::shl_i32, x.id,NA,NA,NA, bits)};
+ }
+ I32 Builder::shr(I32 x, int bits) {
+ if (bits == 0) { return x; }
+ if (int X; this->allImm(x.id,&X)) { return splat(unsigned(X) >> bits); }
+ return {this, this->push(Op::shr_i32, x.id,NA,NA,NA, bits)};
+ }
+ I32 Builder::sra(I32 x, int bits) {
+ if (bits == 0) { return x; }
+ if (int X; this->allImm(x.id,&X)) { return splat(X >> bits); }
+ return {this, this->push(Op::sra_i32, x.id,NA,NA,NA, bits)};
+ }
+
+ I32 Builder:: eq(F32 x, F32 y) {
+ if (float X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(X==Y ? ~0 : 0); }
+ this->canonicalizeIdOrder(x, y);
+ return {this, this->push(Op::eq_f32, x.id, y.id)};
+ }
+ I32 Builder::neq(F32 x, F32 y) {
+ if (float X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(X!=Y ? ~0 : 0); }
+ this->canonicalizeIdOrder(x, y);
+ return {this, this->push(Op::neq_f32, x.id, y.id)};
+ }
+ I32 Builder::lt(F32 x, F32 y) {
+ if (float X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(Y> X ? ~0 : 0); }
+ return {this, this->push(Op::gt_f32, y.id, x.id)};
+ }
+ I32 Builder::lte(F32 x, F32 y) {
+ if (float X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(Y>=X ? ~0 : 0); }
+ return {this, this->push(Op::gte_f32, y.id, x.id)};
+ }
+ I32 Builder::gt(F32 x, F32 y) {
+ if (float X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(X> Y ? ~0 : 0); }
+ return {this, this->push(Op::gt_f32, x.id, y.id)};
+ }
+ I32 Builder::gte(F32 x, F32 y) {
+ if (float X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(X>=Y ? ~0 : 0); }
+ return {this, this->push(Op::gte_f32, x.id, y.id)};
+ }
+
+ I32 Builder:: eq(I32 x, I32 y) {
+ if (x.id == y.id) { return splat(~0); }
+ if (int X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(X==Y ? ~0 : 0); }
+ this->canonicalizeIdOrder(x, y);
+ return {this, this->push(Op:: eq_i32, x.id, y.id)};
+ }
+ I32 Builder::neq(I32 x, I32 y) {
+ if (int X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(X!=Y ? ~0 : 0); }
+ return ~(x == y);
+ }
+ I32 Builder:: gt(I32 x, I32 y) {
+ if (int X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(X> Y ? ~0 : 0); }
+ return {this, this->push(Op:: gt_i32, x.id, y.id)};
+ }
+ I32 Builder::gte(I32 x, I32 y) {
+ if (x.id == y.id) { return splat(~0); }
+ if (int X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(X>=Y ? ~0 : 0); }
+ return ~(x < y);
+ }
+ I32 Builder:: lt(I32 x, I32 y) { return y>x; }
+ I32 Builder::lte(I32 x, I32 y) { return y>=x; }
+
+ Val Builder::holdsBitNot(Val id) {
+ // We represent `~x` as `x ^ ~0`.
+ if (fProgram[id].op == Op::bit_xor && this->isImm(fProgram[id].y, ~0)) {
+ return fProgram[id].x;
+ }
+ return NA;
+ }
+
+ I32 Builder::bit_and(I32 x, I32 y) {
+ if (x.id == y.id) { return x; }
+ if (int X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(X&Y); }
+ this->canonicalizeIdOrder(x, y);
+ if (this->isImm(y.id, 0)) { return splat(0); } // (x & false) == false
+ if (this->isImm(y.id,~0)) { return x; } // (x & true) == x
+ if (Val notX = this->holdsBitNot(x.id); notX != NA) { // (~x & y) == bit_clear(y, ~x)
+ return bit_clear(y, {this, notX});
+ }
+ if (Val notY = this->holdsBitNot(y.id); notY != NA) { // (x & ~y) == bit_clear(x, ~y)
+ return bit_clear(x, {this, notY});
+ }
+ return {this, this->push(Op::bit_and, x.id, y.id)};
+ }
+ I32 Builder::bit_or(I32 x, I32 y) {
+ if (x.id == y.id) { return x; }
+ if (int X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(X|Y); }
+ this->canonicalizeIdOrder(x, y);
+ if (this->isImm(y.id, 0)) { return x; } // (x | false) == x
+ if (this->isImm(y.id,~0)) { return splat(~0); } // (x | true) == true
+ return {this, this->push(Op::bit_or, x.id, y.id)};
+ }
+ I32 Builder::bit_xor(I32 x, I32 y) {
+ if (x.id == y.id) { return splat(0); }
+ if (int X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(X^Y); }
+ this->canonicalizeIdOrder(x, y);
+ if (this->isImm(y.id, 0)) { return x; } // (x ^ false) == x
+ return {this, this->push(Op::bit_xor, x.id, y.id)};
+ }
+
+ I32 Builder::bit_clear(I32 x, I32 y) {
+ if (x.id == y.id) { return splat(0); }
+ if (int X,Y; this->allImm(x.id,&X, y.id,&Y)) { return splat(X&~Y); }
+ if (this->isImm(y.id, 0)) { return x; } // (x & ~false) == x
+ if (this->isImm(y.id,~0)) { return splat(0); } // (x & ~true) == false
+ if (this->isImm(x.id, 0)) { return splat(0); } // (false & ~y) == false
+ return {this, this->push(Op::bit_clear, x.id, y.id)};
+ }
+
+ I32 Builder::select(I32 x, I32 y, I32 z) {
+ if (y.id == z.id) { return y; }
+ if (int X,Y,Z; this->allImm(x.id,&X, y.id,&Y, z.id,&Z)) { return splat(X?Y:Z); }
+ if (this->isImm(x.id,~0)) { return y; } // (true ? y : z) == y
+ if (this->isImm(x.id, 0)) { return z; } // (false ? y : z) == z
+ if (this->isImm(y.id, 0)) { return bit_clear(z,x); } // (x ? 0 : z) == ~x&z
+ if (this->isImm(z.id, 0)) { return bit_and (y,x); } // (x ? y : 0) == x&y
+ if (Val notX = this->holdsBitNot(x.id); notX != NA) { // (!x ? y : z) == (x ? z : y)
+ x.id = notX;
+ std::swap(y, z);
+ }
+ return {this, this->push(Op::select, x.id, y.id, z.id)};
+ }
+
+ I32 Builder::extract(I32 x, int bits, I32 z) {
+ if (unsigned Z; this->allImm(z.id,&Z) && (~0u>>bits) == Z) { return shr(x, bits); }
+ return bit_and(z, shr(x, bits));
+ }
+
+ I32 Builder::pack(I32 x, I32 y, int bits) {
+ return bit_or(x, shl(y, bits));
+ }
+
+ F32 Builder::ceil(F32 x) {
+ if (float X; this->allImm(x.id,&X)) { return splat(ceilf(X)); }
+ return {this, this->push(Op::ceil, x.id)};
+ }
+ F32 Builder::floor(F32 x) {
+ if (float X; this->allImm(x.id,&X)) { return splat(floorf(X)); }
+ return {this, this->push(Op::floor, x.id)};
+ }
+ F32 Builder::to_F32(I32 x) {
+ if (int X; this->allImm(x.id,&X)) { return splat((float)X); }
+ return {this, this->push(Op::to_f32, x.id)};
+ }
+ I32 Builder::trunc(F32 x) {
+ if (float X; this->allImm(x.id,&X)) { return splat((int)X); }
+ return {this, this->push(Op::trunc, x.id)};
+ }
+ I32 Builder::round(F32 x) {
+ if (float X; this->allImm(x.id,&X)) { return splat((int)lrintf(X)); }
+ return {this, this->push(Op::round, x.id)};
+ }
+
+ I32 Builder::to_fp16(F32 x) {
+ if (float X; this->allImm(x.id,&X)) { return splat((int)SkFloatToHalf(X)); }
+ return {this, this->push(Op::to_fp16, x.id)};
+ }
+ F32 Builder::from_fp16(I32 x) {
+ if (int X; this->allImm(x.id,&X)) { return splat(SkHalfToFloat(X)); }
+ return {this, this->push(Op::from_fp16, x.id)};
+ }
+
+ F32 Builder::from_unorm(int bits, I32 x) {
+ F32 limit = splat(1 / ((1<<bits)-1.0f));
+ return mul(to_F32(x), limit);
+ }
+ I32 Builder::to_unorm(int bits, F32 x) {
+ F32 limit = splat((1<<bits)-1.0f);
+ return round(mul(x, limit));
+ }
+
+ PixelFormat SkColorType_to_PixelFormat(SkColorType ct) {
+ auto UNORM = PixelFormat::UNORM,
+ SRGB = PixelFormat::SRGB,
+ FLOAT = PixelFormat::FLOAT,
+ XRNG = PixelFormat::XRNG;
+ switch (ct) {
+ case kUnknown_SkColorType: break;
+
+ case kRGBA_F32_SkColorType: return {FLOAT,32,32,32,32, 0,32,64,96};
+
+ case kRGBA_F16Norm_SkColorType: return {FLOAT,16,16,16,16, 0,16,32,48};
+ case kRGBA_F16_SkColorType: return {FLOAT,16,16,16,16, 0,16,32,48};
+ case kR16G16B16A16_unorm_SkColorType: return {UNORM,16,16,16,16, 0,16,32,48};
+
+ case kA16_float_SkColorType: return {FLOAT, 0, 0,0,16, 0, 0,0,0};
+ case kR16G16_float_SkColorType: return {FLOAT, 16,16,0, 0, 0,16,0,0};
+
+ case kAlpha_8_SkColorType: return {UNORM, 0,0,0,8, 0,0,0,0};
+ case kGray_8_SkColorType: return {UNORM, 8,8,8,0, 0,0,0,0}; // Subtle.
+ case kR8_unorm_SkColorType: return {UNORM, 8,0,0,0, 0,0,0,0};
+
+ case kRGB_565_SkColorType: return {UNORM, 5,6,5,0, 11,5,0,0}; // (BGR)
+ case kARGB_4444_SkColorType: return {UNORM, 4,4,4,4, 12,8,4,0}; // (ABGR)
+
+ case kRGBA_8888_SkColorType: return {UNORM, 8,8,8,8, 0,8,16,24};
+ case kRGB_888x_SkColorType: return {UNORM, 8,8,8,0, 0,8,16,32}; // 32-bit
+ case kBGRA_8888_SkColorType: return {UNORM, 8,8,8,8, 16,8, 0,24};
+ case kSRGBA_8888_SkColorType: return { SRGB, 8,8,8,8, 0,8,16,24};
+
+ case kRGBA_1010102_SkColorType: return {UNORM, 10,10,10,2, 0,10,20,30};
+ case kBGRA_1010102_SkColorType: return {UNORM, 10,10,10,2, 20,10, 0,30};
+ case kRGB_101010x_SkColorType: return {UNORM, 10,10,10,0, 0,10,20, 0};
+ case kBGR_101010x_SkColorType: return {UNORM, 10,10,10,0, 20,10, 0, 0};
+ case kBGR_101010x_XR_SkColorType: return { XRNG, 10,10,10,0, 20,10, 0, 0};
+
+ case kR8G8_unorm_SkColorType: return {UNORM, 8, 8,0, 0, 0, 8,0,0};
+ case kR16G16_unorm_SkColorType: return {UNORM, 16,16,0, 0, 0,16,0,0};
+ case kA16_unorm_SkColorType: return {UNORM, 0, 0,0,16, 0, 0,0,0};
+ }
+ SkASSERT(false);
+ return {UNORM, 0,0,0,0, 0,0,0,0};
+ }
+
+ static int byte_size(PixelFormat f) {
+ // What's the highest bit we read?
+ int bits = std::max(f.r_bits + f.r_shift,
+ std::max(f.g_bits + f.g_shift,
+ std::max(f.b_bits + f.b_shift,
+ f.a_bits + f.a_shift)));
+ // Round up to bytes.
+ return (bits + 7) / 8;
+ }
+
+ static Color unpack(PixelFormat f, I32 x) {
+ SkASSERT(byte_size(f) <= 4);
+
+ auto from_srgb = [](int bits, I32 channel) -> F32 {
+ const skcms_TransferFunction* tf = skcms_sRGB_TransferFunction();
+ F32 v = from_unorm(bits, channel);
+ return sk_program_transfer_fn(v, skcms_TFType_sRGBish,
+ v->splat(tf->g),
+ v->splat(tf->a),
+ v->splat(tf->b),
+ v->splat(tf->c),
+ v->splat(tf->d),
+ v->splat(tf->e),
+ v->splat(tf->f));
+ };
+ auto from_xr = [](int bits, I32 channel) -> F32 {
+ static constexpr float min = -0.752941f;
+ static constexpr float max = 1.25098f;
+ static constexpr float range = max - min;
+ F32 v = from_unorm(bits, channel);
+ return v * range + min;
+ };
+
+ auto unpack_rgb = [=](int bits, int shift) -> F32 {
+ I32 channel = extract(x, shift, (1<<bits)-1);
+ switch (f.encoding) {
+ case PixelFormat::UNORM: return from_unorm(bits, channel);
+ case PixelFormat:: SRGB: return from_srgb (bits, channel);
+ case PixelFormat::FLOAT: return from_fp16 ( channel);
+ case PixelFormat:: XRNG: return from_xr (bits, channel);
+ }
+ SkUNREACHABLE;
+ };
+ auto unpack_alpha = [=](int bits, int shift) -> F32 {
+ I32 channel = extract(x, shift, (1<<bits)-1);
+ switch (f.encoding) {
+ case PixelFormat::UNORM:
+ case PixelFormat:: SRGB: return from_unorm(bits, channel);
+ case PixelFormat::FLOAT: return from_fp16 ( channel);
+ case PixelFormat:: XRNG: return from_xr (bits, channel);
+ }
+ SkUNREACHABLE;
+ };
+ return {
+ f.r_bits ? unpack_rgb (f.r_bits, f.r_shift) : x->splat(0.0f),
+ f.g_bits ? unpack_rgb (f.g_bits, f.g_shift) : x->splat(0.0f),
+ f.b_bits ? unpack_rgb (f.b_bits, f.b_shift) : x->splat(0.0f),
+ f.a_bits ? unpack_alpha(f.a_bits, f.a_shift) : x->splat(1.0f),
+ };
+ }
+
+ static void split_disjoint_8byte_format(PixelFormat f, PixelFormat* lo, PixelFormat* hi) {
+ SkASSERT(byte_size(f) == 8);
+ // We assume some of the channels are in the low 32 bits, some in the high 32 bits.
+ // The assert on byte_size(lo) will trigger if this assumption is violated.
+ *lo = f;
+ if (f.r_shift >= 32) { lo->r_bits = 0; lo->r_shift = 32; }
+ if (f.g_shift >= 32) { lo->g_bits = 0; lo->g_shift = 32; }
+ if (f.b_shift >= 32) { lo->b_bits = 0; lo->b_shift = 32; }
+ if (f.a_shift >= 32) { lo->a_bits = 0; lo->a_shift = 32; }
+ SkASSERT(byte_size(*lo) == 4);
+
+ *hi = f;
+ if (f.r_shift < 32) { hi->r_bits = 0; hi->r_shift = 32; } else { hi->r_shift -= 32; }
+ if (f.g_shift < 32) { hi->g_bits = 0; hi->g_shift = 32; } else { hi->g_shift -= 32; }
+ if (f.b_shift < 32) { hi->b_bits = 0; hi->b_shift = 32; } else { hi->b_shift -= 32; }
+ if (f.a_shift < 32) { hi->a_bits = 0; hi->a_shift = 32; } else { hi->a_shift -= 32; }
+ SkASSERT(byte_size(*hi) == 4);
+ }
+
+ // The only 16-byte format we support today is RGBA F32,
+ // though, TODO, we could generalize that to any swizzle, and to allow UNORM too.
+ static void assert_16byte_is_rgba_f32(PixelFormat f) {
+ #if defined(SK_DEBUG)
+ SkASSERT(byte_size(f) == 16);
+ PixelFormat rgba_f32 = SkColorType_to_PixelFormat(kRGBA_F32_SkColorType);
+
+ SkASSERT(f.encoding == rgba_f32.encoding);
+
+ SkASSERT(f.r_bits == rgba_f32.r_bits);
+ SkASSERT(f.g_bits == rgba_f32.g_bits);
+ SkASSERT(f.b_bits == rgba_f32.b_bits);
+ SkASSERT(f.a_bits == rgba_f32.a_bits);
+
+ SkASSERT(f.r_shift == rgba_f32.r_shift);
+ SkASSERT(f.g_shift == rgba_f32.g_shift);
+ SkASSERT(f.b_shift == rgba_f32.b_shift);
+ SkASSERT(f.a_shift == rgba_f32.a_shift);
+ #endif
+ }
+
+ Color Builder::load(PixelFormat f, Ptr ptr) {
+ switch (byte_size(f)) {
+ case 1: return unpack(f, load8 (ptr));
+ case 2: return unpack(f, load16(ptr));
+ case 4: return unpack(f, load32(ptr));
+ case 8: {
+ PixelFormat lo,hi;
+ split_disjoint_8byte_format(f, &lo,&hi);
+ Color l = unpack(lo, load64(ptr, 0)),
+ h = unpack(hi, load64(ptr, 1));
+ return {
+ lo.r_bits ? l.r : h.r,
+ lo.g_bits ? l.g : h.g,
+ lo.b_bits ? l.b : h.b,
+ lo.a_bits ? l.a : h.a,
+ };
+ }
+ case 16: {
+ assert_16byte_is_rgba_f32(f);
+ return {
+ pun_to_F32(load128(ptr, 0)),
+ pun_to_F32(load128(ptr, 1)),
+ pun_to_F32(load128(ptr, 2)),
+ pun_to_F32(load128(ptr, 3)),
+ };
+ }
+ default: SkUNREACHABLE;
+ }
+ }
+
+ Color Builder::gather(PixelFormat f, UPtr ptr, int offset, I32 index) {
+ switch (byte_size(f)) {
+ case 1: return unpack(f, gather8 (ptr, offset, index));
+ case 2: return unpack(f, gather16(ptr, offset, index));
+ case 4: return unpack(f, gather32(ptr, offset, index));
+ case 8: {
+ PixelFormat lo,hi;
+ split_disjoint_8byte_format(f, &lo,&hi);
+ Color l = unpack(lo, gather32(ptr, offset, (index<<1)+0)),
+ h = unpack(hi, gather32(ptr, offset, (index<<1)+1));
+ return {
+ lo.r_bits ? l.r : h.r,
+ lo.g_bits ? l.g : h.g,
+ lo.b_bits ? l.b : h.b,
+ lo.a_bits ? l.a : h.a,
+ };
+ }
+ case 16: {
+ assert_16byte_is_rgba_f32(f);
+ return {
+ gatherF(ptr, offset, (index<<2)+0),
+ gatherF(ptr, offset, (index<<2)+1),
+ gatherF(ptr, offset, (index<<2)+2),
+ gatherF(ptr, offset, (index<<2)+3),
+ };
+ }
+ default: SkUNREACHABLE;
+ }
+ }
+
+ static I32 pack32(PixelFormat f, Color c) {
+ SkASSERT(byte_size(f) <= 4);
+
+ auto to_srgb = [](int bits, F32 v) {
+ const skcms_TransferFunction* tf = skcms_sRGB_Inverse_TransferFunction();
+ return to_unorm(bits, sk_program_transfer_fn(v, skcms_TFType_sRGBish,
+ v->splat(tf->g),
+ v->splat(tf->a),
+ v->splat(tf->b),
+ v->splat(tf->c),
+ v->splat(tf->d),
+ v->splat(tf->e),
+ v->splat(tf->f)));
+ };
+ auto to_xr = [](int bits, F32 v) {
+ static constexpr float min = -0.752941f;
+ static constexpr float max = 1.25098f;
+ static constexpr float range = max - min;
+ return to_unorm(bits, (v - min) * (1.0f / range));
+ };
+
+ I32 packed = c->splat(0);
+ auto pack_rgb = [&](F32 channel, int bits, int shift) {
+ I32 encoded;
+ switch (f.encoding) {
+ case PixelFormat::UNORM: encoded = to_unorm(bits, channel); break;
+ case PixelFormat:: SRGB: encoded = to_srgb (bits, channel); break;
+ case PixelFormat::FLOAT: encoded = to_fp16 ( channel); break;
+ case PixelFormat:: XRNG: encoded = to_xr (bits, channel); break;
+ }
+ packed = pack(packed, encoded, shift);
+ };
+ auto pack_alpha = [&](F32 channel, int bits, int shift) {
+ I32 encoded;
+ switch (f.encoding) {
+ case PixelFormat::UNORM:
+ case PixelFormat:: SRGB: encoded = to_unorm(bits, channel); break;
+ case PixelFormat::FLOAT: encoded = to_fp16 ( channel); break;
+ case PixelFormat:: XRNG: encoded = to_xr (bits, channel); break;
+ }
+ packed = pack(packed, encoded, shift);
+ };
+ if (f.r_bits) { pack_rgb (c.r, f.r_bits, f.r_shift); }
+ if (f.g_bits) { pack_rgb (c.g, f.g_bits, f.g_shift); }
+ if (f.b_bits) { pack_rgb (c.b, f.b_bits, f.b_shift); }
+ if (f.a_bits) { pack_alpha(c.a, f.a_bits, f.a_shift); }
+ return packed;
+ }
+
+ void Builder::store(PixelFormat f, Ptr ptr, Color c) {
+ // Detect a grayscale PixelFormat: r,g,b bit counts and shifts all equal.
+ if (f.r_bits == f.g_bits && f.g_bits == f.b_bits &&
+ f.r_shift == f.g_shift && f.g_shift == f.b_shift) {
+
+ // TODO: pull these coefficients from an SkColorSpace? This is sRGB luma/luminance.
+ c.r = c.r * 0.2126f
+ + c.g * 0.7152f
+ + c.b * 0.0722f;
+ f.g_bits = f.b_bits = 0;
+ }
+
+ switch (byte_size(f)) {
+ case 1: store8 (ptr, pack32(f,c)); break;
+ case 2: store16(ptr, pack32(f,c)); break;
+ case 4: store32(ptr, pack32(f,c)); break;
+ case 8: {
+ PixelFormat lo,hi;
+ split_disjoint_8byte_format(f, &lo,&hi);
+ store64(ptr, pack32(lo,c)
+ , pack32(hi,c));
+ break;
+ }
+ case 16: {
+ assert_16byte_is_rgba_f32(f);
+ store128(ptr, pun_to_I32(c.r), pun_to_I32(c.g), pun_to_I32(c.b), pun_to_I32(c.a));
+ break;
+ }
+ default: SkUNREACHABLE;
+ }
+ }
+
+ void Builder::unpremul(F32* r, F32* g, F32* b, F32 a) {
+ skvm::F32 invA = 1.0f / a,
+ inf = pun_to_F32(splat(0x7f800000));
+ // If a is 0, so are *r,*g,*b, so set invA to 0 to avoid 0*inf=NaN (instead 0*0 = 0).
+ invA = select(invA < inf, invA
+ , 0.0f);
+ *r *= invA;
+ *g *= invA;
+ *b *= invA;
+ }
+
+ void Builder::premul(F32* r, F32* g, F32* b, F32 a) {
+ *r *= a;
+ *g *= a;
+ *b *= a;
+ }
+
+ Color Builder::uniformColor(SkColor4f color, Uniforms* uniforms) {
+ auto [r,g,b,a] = color;
+ return {
+ uniformF(uniforms->pushF(r)),
+ uniformF(uniforms->pushF(g)),
+ uniformF(uniforms->pushF(b)),
+ uniformF(uniforms->pushF(a)),
+ };
+ }
+
+ F32 Builder::lerp(F32 lo, F32 hi, F32 t) {
+ if (this->isImm(t.id, 0.0f)) { return lo; }
+ if (this->isImm(t.id, 1.0f)) { return hi; }
+ return mad(sub(hi, lo), t, lo);
+ }
+
+ Color Builder::lerp(Color lo, Color hi, F32 t) {
+ return {
+ lerp(lo.r, hi.r, t),
+ lerp(lo.g, hi.g, t),
+ lerp(lo.b, hi.b, t),
+ lerp(lo.a, hi.a, t),
+ };
+ }
+
+ HSLA Builder::to_hsla(Color c) {
+ F32 mx = max(max(c.r,c.g),c.b),
+ mn = min(min(c.r,c.g),c.b),
+ d = mx - mn,
+ invd = 1.0f / d,
+ g_lt_b = select(c.g < c.b, splat(6.0f)
+ , splat(0.0f));
+
+ F32 h = (1/6.0f) * select(mx == mn, 0.0f,
+ select(mx == c.r, invd * (c.g - c.b) + g_lt_b,
+ select(mx == c.g, invd * (c.b - c.r) + 2.0f
+ , invd * (c.r - c.g) + 4.0f)));
+
+ F32 sum = mx + mn,
+ l = sum * 0.5f,
+ s = select(mx == mn, 0.0f
+ , d / select(l > 0.5f, 2.0f - sum
+ , sum));
+ return {h, s, l, c.a};
+ }
+
+ Color Builder::to_rgba(HSLA c) {
+ // See GrRGBToHSLFilterEffect.fp
+
+ auto [h,s,l,a] = c;
+ F32 x = s * (1.0f - abs(l + l - 1.0f));
+
+ auto hue_to_rgb = [&,l=l](auto hue) {
+ auto q = abs(6.0f * fract(hue) - 3.0f) - 1.0f;
+ return x * (clamp01(q) - 0.5f) + l;
+ };
+
+ return {
+ hue_to_rgb(h + 0/3.0f),
+ hue_to_rgb(h + 2/3.0f),
+ hue_to_rgb(h + 1/3.0f),
+ c.a,
+ };
+ }
+
+ // We're basing our implementation of non-separable blend modes on
+ // https://www.w3.org/TR/compositing-1/#blendingnonseparable.
+ // and
+ // https://www.khronos.org/registry/OpenGL/specs/es/3.2/es_spec_3.2.pdf
+ // They're equivalent, but ES' math has been better simplified.
+ //
+ // Anything extra we add beyond that is to make the math work with premul inputs.
+
+ static skvm::F32 saturation(skvm::F32 r, skvm::F32 g, skvm::F32 b) {
+ return max(r, max(g, b))
+ - min(r, min(g, b));
+ }
+
+ static skvm::F32 luminance(skvm::F32 r, skvm::F32 g, skvm::F32 b) {
+ return r*0.30f + g*0.59f + b*0.11f;
+ }
+
+ static void set_sat(skvm::F32* r, skvm::F32* g, skvm::F32* b, skvm::F32 s) {
+ F32 mn = min(*r, min(*g, *b)),
+ mx = max(*r, max(*g, *b)),
+ sat = mx - mn;
+
+ // Map min channel to 0, max channel to s, and scale the middle proportionally.
+ auto scale = [&](skvm::F32 c) {
+ auto scaled = ((c - mn) * s) / sat;
+ return select(is_finite(scaled), scaled, 0.0f);
+ };
+ *r = scale(*r);
+ *g = scale(*g);
+ *b = scale(*b);
+ }
+
+ static void set_lum(skvm::F32* r, skvm::F32* g, skvm::F32* b, skvm::F32 lu) {
+ auto diff = lu - luminance(*r, *g, *b);
+ *r += diff;
+ *g += diff;
+ *b += diff;
+ }
+
+ static void clip_color(skvm::F32* r, skvm::F32* g, skvm::F32* b, skvm::F32 a) {
+ F32 mn = min(*r, min(*g, *b)),
+ mx = max(*r, max(*g, *b)),
+ lu = luminance(*r, *g, *b);
+
+ auto clip = [&](auto c) {
+ c = select(mn < 0 & lu != mn, lu + ((c-lu)*( lu)) / (lu-mn), c);
+ c = select(mx > a & lu != mx, lu + ((c-lu)*(a-lu)) / (mx-lu), c);
+ return clamp01(c); // May be a little negative, or worse, NaN.
+ };
+ *r = clip(*r);
+ *g = clip(*g);
+ *b = clip(*b);
+ }
+
+ Color Builder::blend(SkBlendMode mode, Color src, Color dst) {
+ auto mma = [](skvm::F32 x, skvm::F32 y, skvm::F32 z, skvm::F32 w) {
+ return x*y + z*w;
+ };
+
+ auto two = [](skvm::F32 x) { return x+x; };
+
+ auto apply_rgba = [&](auto fn) {
+ return Color {
+ fn(src.r, dst.r),
+ fn(src.g, dst.g),
+ fn(src.b, dst.b),
+ fn(src.a, dst.a),
+ };
+ };
+
+ auto apply_rgb_srcover_a = [&](auto fn) {
+ return Color {
+ fn(src.r, dst.r),
+ fn(src.g, dst.g),
+ fn(src.b, dst.b),
+ mad(dst.a, 1-src.a, src.a), // srcover for alpha
+ };
+ };
+
+ auto non_sep = [&](auto R, auto G, auto B) {
+ return Color{
+ R + mma(src.r, 1-dst.a, dst.r, 1-src.a),
+ G + mma(src.g, 1-dst.a, dst.g, 1-src.a),
+ B + mma(src.b, 1-dst.a, dst.b, 1-src.a),
+ mad(dst.a, 1-src.a, src.a), // srcover for alpha
+ };
+ };
+
+ switch (mode) {
+ default:
+ SkASSERT(false);
+ [[fallthrough]]; /*but also, for safety, fallthrough*/
+
+ case SkBlendMode::kClear: return { splat(0.0f), splat(0.0f), splat(0.0f), splat(0.0f) };
+
+ case SkBlendMode::kSrc: return src;
+ case SkBlendMode::kDst: return dst;
+
+ case SkBlendMode::kDstOver: std::swap(src, dst); [[fallthrough]];
+ case SkBlendMode::kSrcOver:
+ return apply_rgba([&](auto s, auto d) {
+ return mad(d,1-src.a, s);
+ });
+
+ case SkBlendMode::kDstIn: std::swap(src, dst); [[fallthrough]];
+ case SkBlendMode::kSrcIn:
+ return apply_rgba([&](auto s, auto d) {
+ return s * dst.a;
+ });
+
+ case SkBlendMode::kDstOut: std::swap(src, dst); [[fallthrough]];
+
+ case SkBlendMode::kSrcOut:
+ return apply_rgba([&](auto s, auto d) {
+ return s * (1-dst.a);
+ });
+
+ case SkBlendMode::kDstATop: std::swap(src, dst); [[fallthrough]];
+ case SkBlendMode::kSrcATop:
+ return apply_rgba([&](auto s, auto d) {
+ return mma(s, dst.a, d, 1-src.a);
+ });
+
+ case SkBlendMode::kXor:
+ return apply_rgba([&](auto s, auto d) {
+ return mma(s, 1-dst.a, d, 1-src.a);
+ });
+
+ case SkBlendMode::kPlus:
+ return apply_rgba([&](auto s, auto d) {
+ return min(s+d, 1.0f);
+ });
+
+ case SkBlendMode::kModulate:
+ return apply_rgba([&](auto s, auto d) {
+ return s * d;
+ });
+
+ case SkBlendMode::kScreen:
+ // (s+d)-(s*d) gave us trouble with our "r,g,b <= after blending" asserts.
+ // It's kind of plausible that s + (d - sd) keeps more precision?
+ return apply_rgba([&](auto s, auto d) {
+ return s + (d - s*d);
+ });
+
+ case SkBlendMode::kDarken:
+ return apply_rgb_srcover_a([&](auto s, auto d) {
+ return s + (d - max(s * dst.a,
+ d * src.a));
+ });
+
+ case SkBlendMode::kLighten:
+ return apply_rgb_srcover_a([&](auto s, auto d) {
+ return s + (d - min(s * dst.a,
+ d * src.a));
+ });
+
+ case SkBlendMode::kDifference:
+ return apply_rgb_srcover_a([&](auto s, auto d) {
+ return s + (d - two(min(s * dst.a,
+ d * src.a)));
+ });
+
+ case SkBlendMode::kExclusion:
+ return apply_rgb_srcover_a([&](auto s, auto d) {
+ return s + (d - two(s * d));
+ });
+
+ case SkBlendMode::kColorBurn:
+ return apply_rgb_srcover_a([&](auto s, auto d) {
+ auto mn = min(dst.a,
+ src.a * (dst.a - d) / s),
+ burn = src.a * (dst.a - mn) + mma(s, 1-dst.a, d, 1-src.a);
+ return select(d == dst.a , s * (1-dst.a) + d,
+ select(is_finite(burn), burn
+ , d * (1-src.a) + s));
+ });
+
+ case SkBlendMode::kColorDodge:
+ return apply_rgb_srcover_a([&](auto s, auto d) {
+ auto dodge = src.a * min(dst.a,
+ d * src.a / (src.a - s))
+ + mma(s, 1-dst.a, d, 1-src.a);
+ return select(d == 0.0f , s * (1-dst.a) + d,
+ select(is_finite(dodge), dodge
+ , d * (1-src.a) + s));
+ });
+
+ case SkBlendMode::kHardLight:
+ return apply_rgb_srcover_a([&](auto s, auto d) {
+ return mma(s, 1-dst.a, d, 1-src.a) +
+ select(two(s) <= src.a,
+ two(s * d),
+ src.a * dst.a - two((dst.a - d) * (src.a - s)));
+ });
+
+ case SkBlendMode::kOverlay:
+ return apply_rgb_srcover_a([&](auto s, auto d) {
+ return mma(s, 1-dst.a, d, 1-src.a) +
+ select(two(d) <= dst.a,
+ two(s * d),
+ src.a * dst.a - two((dst.a - d) * (src.a - s)));
+ });
+
+ case SkBlendMode::kMultiply:
+ return apply_rgba([&](auto s, auto d) {
+ return mma(s, 1-dst.a, d, 1-src.a) + s * d;
+ });
+
+ case SkBlendMode::kSoftLight:
+ return apply_rgb_srcover_a([&](auto s, auto d) {
+ auto m = select(dst.a > 0.0f, d / dst.a
+ , 0.0f),
+ s2 = two(s),
+ m4 = 4*m;
+
+ // The logic forks three ways:
+ // 1. dark src?
+ // 2. light src, dark dst?
+ // 3. light src, light dst?
+
+ // Used in case 1
+ auto darkSrc = d * ((s2-src.a) * (1-m) + src.a),
+ // Used in case 2
+ darkDst = (m4 * m4 + m4) * (m-1) + 7*m,
+ // Used in case 3.
+ liteDst = sqrt(m) - m,
+ // Used in 2 or 3?
+ liteSrc = dst.a * (s2 - src.a) * select(4*d <= dst.a, darkDst
+ , liteDst)
+ + d * src.a;
+ return s * (1-dst.a) + d * (1-src.a) + select(s2 <= src.a, darkSrc
+ , liteSrc);
+ });
+
+ case SkBlendMode::kHue: {
+ skvm::F32 R = src.r * src.a,
+ G = src.g * src.a,
+ B = src.b * src.a;
+
+ set_sat (&R, &G, &B, src.a * saturation(dst.r, dst.g, dst.b));
+ set_lum (&R, &G, &B, src.a * luminance (dst.r, dst.g, dst.b));
+ clip_color(&R, &G, &B, src.a * dst.a);
+
+ return non_sep(R, G, B);
+ }
+
+ case SkBlendMode::kSaturation: {
+ skvm::F32 R = dst.r * src.a,
+ G = dst.g * src.a,
+ B = dst.b * src.a;
+
+ set_sat (&R, &G, &B, dst.a * saturation(src.r, src.g, src.b));
+ set_lum (&R, &G, &B, src.a * luminance (dst.r, dst.g, dst.b));
+ clip_color(&R, &G, &B, src.a * dst.a);
+
+ return non_sep(R, G, B);
+ }
+
+ case SkBlendMode::kColor: {
+ skvm::F32 R = src.r * dst.a,
+ G = src.g * dst.a,
+ B = src.b * dst.a;
+
+ set_lum (&R, &G, &B, src.a * luminance(dst.r, dst.g, dst.b));
+ clip_color(&R, &G, &B, src.a * dst.a);
+
+ return non_sep(R, G, B);
+ }
+
+ case SkBlendMode::kLuminosity: {
+ skvm::F32 R = dst.r * src.a,
+ G = dst.g * src.a,
+ B = dst.b * src.a;
+
+ set_lum (&R, &G, &B, dst.a * luminance(src.r, src.g, src.b));
+ clip_color(&R, &G, &B, dst.a * src.a);
+
+ return non_sep(R, G, B);
+ }
+ }
+ }
+
+ // ~~~~ Program::eval() and co. ~~~~ //
+
+ // Handy references for x86-64 instruction encoding:
+ // https://wiki.osdev.org/X86-64_Instruction_Encoding
+ // https://www-user.tu-chemnitz.de/~heha/viewchm.php/hs/x86.chm/x64.htm
+ // https://www-user.tu-chemnitz.de/~heha/viewchm.php/hs/x86.chm/x86.htm
+ // http://ref.x86asm.net/coder64.html
+
+ // Used for ModRM / immediate instruction encoding.
+ static uint8_t _233(int a, int b, int c) {
+ return (a & 3) << 6
+ | (b & 7) << 3
+ | (c & 7) << 0;
+ }
+
+ // ModRM byte encodes the arguments of an opcode.
+ enum class Mod { Indirect, OneByteImm, FourByteImm, Direct };
+ static uint8_t mod_rm(Mod mod, int reg, int rm) {
+ return _233((int)mod, reg, rm);
+ }
+
+ static Mod mod(int imm) {
+ if (imm == 0) { return Mod::Indirect; }
+ if (SkTFitsIn<int8_t>(imm)) { return Mod::OneByteImm; }
+ return Mod::FourByteImm;
+ }
+
+ static int imm_bytes(Mod mod) {
+ switch (mod) {
+ case Mod::Indirect: return 0;
+ case Mod::OneByteImm: return 1;
+ case Mod::FourByteImm: return 4;
+ case Mod::Direct: SkUNREACHABLE;
+ }
+ SkUNREACHABLE;
+ }
+
+ // SIB byte encodes a memory address, base + (index * scale).
+ static uint8_t sib(Assembler::Scale scale, int index, int base) {
+ return _233((int)scale, index, base);
+ }
+
+ // The REX prefix is used to extend most old 32-bit instructions to 64-bit.
+ static uint8_t rex(bool W, // If set, operation is 64-bit, otherwise default, usually 32-bit.
+ bool R, // Extra top bit to select ModRM reg, registers 8-15.
+ bool X, // Extra top bit for SIB index register.
+ bool B) { // Extra top bit for SIB base or ModRM rm register.
+ return 0b01000000 // Fixed 0100 for top four bits.
+ | (W << 3)
+ | (R << 2)
+ | (X << 1)
+ | (B << 0);
+ }
+
+
+ // The VEX prefix extends SSE operations to AVX. Used generally, even with XMM.
+ struct VEX {
+ int len;
+ uint8_t bytes[3];
+ };
+
+ static VEX vex(bool WE, // Like REX W for int operations, or opcode extension for float?
+ bool R, // Same as REX R. Pass high bit of dst register, dst>>3.
+ bool X, // Same as REX X.
+ bool B, // Same as REX B. Pass y>>3 for 3-arg ops, x>>3 for 2-arg.
+ int map, // SSE opcode map selector: 0x0f, 0x380f, 0x3a0f.
+ int vvvv, // 4-bit second operand register. Pass our x for 3-arg ops.
+ bool L, // Set for 256-bit ymm operations, off for 128-bit xmm.
+ int pp) { // SSE mandatory prefix: 0x66, 0xf3, 0xf2, else none.
+
+ // Pack x86 opcode map selector to 5-bit VEX encoding.
+ map = [map]{
+ switch (map) {
+ case 0x0f: return 0b00001;
+ case 0x380f: return 0b00010;
+ case 0x3a0f: return 0b00011;
+ // Several more cases only used by XOP / TBM.
+ }
+ SkUNREACHABLE;
+ }();
+
+ // Pack mandatory SSE opcode prefix byte to 2-bit VEX encoding.
+ pp = [pp]{
+ switch (pp) {
+ case 0x66: return 0b01;
+ case 0xf3: return 0b10;
+ case 0xf2: return 0b11;
+ }
+ return 0b00;
+ }();
+
+ VEX vex = {0, {0,0,0}};
+ if (X == 0 && B == 0 && WE == 0 && map == 0b00001) {
+ // With these conditions met, we can optionally compress VEX to 2-byte.
+ vex.len = 2;
+ vex.bytes[0] = 0xc5;
+ vex.bytes[1] = (pp & 3) << 0
+ | (L & 1) << 2
+ | (~vvvv & 15) << 3
+ | (~(int)R & 1) << 7;
+ } else {
+ // We could use this 3-byte VEX prefix all the time if we like.
+ vex.len = 3;
+ vex.bytes[0] = 0xc4;
+ vex.bytes[1] = (map & 31) << 0
+ | (~(int)B & 1) << 5
+ | (~(int)X & 1) << 6
+ | (~(int)R & 1) << 7;
+ vex.bytes[2] = (pp & 3) << 0
+ | (L & 1) << 2
+ | (~vvvv & 15) << 3
+ | (WE & 1) << 7;
+ }
+ return vex;
+ }
+
+ Assembler::Assembler(void* buf) : fCode((uint8_t*)buf), fSize(0) {}
+
+ size_t Assembler::size() const { return fSize; }
+
+ void Assembler::bytes(const void* p, int n) {
+ if (fCode) {
+ memcpy(fCode+fSize, p, n);
+ }
+ fSize += n;
+ }
+
+ void Assembler::byte(uint8_t b) { this->bytes(&b, 1); }
+ void Assembler::word(uint32_t w) { this->bytes(&w, 4); }
+
+ void Assembler::align(int mod) {
+ while (this->size() % mod) {
+ this->byte(0x00);
+ }
+ }
+
+ void Assembler::int3() {
+ this->byte(0xcc);
+ }
+
+ void Assembler::vzeroupper() {
+ this->byte(0xc5);
+ this->byte(0xf8);
+ this->byte(0x77);
+ }
+ void Assembler::ret() { this->byte(0xc3); }
+
+ void Assembler::op(int opcode, Operand dst, GP64 x) {
+ if (dst.kind == Operand::REG) {
+ this->byte(rex(W1,x>>3,0,dst.reg>>3));
+ this->bytes(&opcode, SkTFitsIn<uint8_t>(opcode) ? 1 : 2);
+ this->byte(mod_rm(Mod::Direct, x, dst.reg&7));
+ } else {
+ SkASSERT(dst.kind == Operand::MEM);
+ const Mem& m = dst.mem;
+ const bool need_SIB = (m.base&7) == rsp
+ || m.index != rsp;
+
+ this->byte(rex(W1,x>>3,m.index>>3,m.base>>3));
+ this->bytes(&opcode, SkTFitsIn<uint8_t>(opcode) ? 1 : 2);
+ this->byte(mod_rm(mod(m.disp), x&7, (need_SIB ? rsp : m.base)&7));
+ if (need_SIB) {
+ this->byte(sib(m.scale, m.index&7, m.base&7));
+ }
+ this->bytes(&m.disp, imm_bytes(mod(m.disp)));
+ }
+ }
+
+ void Assembler::op(int opcode, int opcode_ext, Operand dst, int imm) {
+ opcode |= 0b1000'0000; // top bit set for instructions with any immediate
+
+ int imm_bytes = 4;
+ if (SkTFitsIn<int8_t>(imm)) {
+ imm_bytes = 1;
+ opcode |= 0b0000'0010; // second bit set for 8-bit immediate, else 32-bit.
+ }
+
+ this->op(opcode, dst, (GP64)opcode_ext);
+ this->bytes(&imm, imm_bytes);
+ }
+
+ void Assembler::add(Operand dst, int imm) { this->op(0x01,0b000, dst,imm); }
+ void Assembler::sub(Operand dst, int imm) { this->op(0x01,0b101, dst,imm); }
+ void Assembler::cmp(Operand dst, int imm) { this->op(0x01,0b111, dst,imm); }
+
+ // These don't work quite like the other instructions with immediates:
+ // these immediates are always fixed size at 4 bytes or 1 byte.
+ void Assembler::mov(Operand dst, int imm) {
+ this->op(0xC7,dst,(GP64)0b000);
+ this->word(imm);
+ }
+ void Assembler::movb(Operand dst, int imm) {
+ this->op(0xC6,dst,(GP64)0b000);
+ this->byte(imm);
+ }
+
+ void Assembler::add (Operand dst, GP64 x) { this->op(0x01, dst,x); }
+ void Assembler::sub (Operand dst, GP64 x) { this->op(0x29, dst,x); }
+ void Assembler::cmp (Operand dst, GP64 x) { this->op(0x39, dst,x); }
+ void Assembler::mov (Operand dst, GP64 x) { this->op(0x89, dst,x); }
+ void Assembler::movb(Operand dst, GP64 x) { this->op(0x88, dst,x); }
+
+ void Assembler::add (GP64 dst, Operand x) { this->op(0x03, x,dst); }
+ void Assembler::sub (GP64 dst, Operand x) { this->op(0x2B, x,dst); }
+ void Assembler::cmp (GP64 dst, Operand x) { this->op(0x3B, x,dst); }
+ void Assembler::mov (GP64 dst, Operand x) { this->op(0x8B, x,dst); }
+ void Assembler::movb(GP64 dst, Operand x) { this->op(0x8A, x,dst); }
+
+ void Assembler::movzbq(GP64 dst, Operand x) { this->op(0xB60F, x,dst); }
+ void Assembler::movzwq(GP64 dst, Operand x) { this->op(0xB70F, x,dst); }
+
+ void Assembler::vpaddd (Ymm dst, Ymm x, Operand y) { this->op(0x66, 0x0f,0xfe, dst,x,y); }
+ void Assembler::vpsubd (Ymm dst, Ymm x, Operand y) { this->op(0x66, 0x0f,0xfa, dst,x,y); }
+ void Assembler::vpmulld(Ymm dst, Ymm x, Operand y) { this->op(0x66,0x380f,0x40, dst,x,y); }
+
+ void Assembler::vpaddw (Ymm dst, Ymm x, Operand y) { this->op(0x66, 0x0f,0xfd, dst,x,y); }
+ void Assembler::vpsubw (Ymm dst, Ymm x, Operand y) { this->op(0x66, 0x0f,0xf9, dst,x,y); }
+ void Assembler::vpmullw (Ymm dst, Ymm x, Operand y) { this->op(0x66, 0x0f,0xd5, dst,x,y); }
+ void Assembler::vpavgw (Ymm dst, Ymm x, Operand y) { this->op(0x66, 0x0f,0xe3, dst,x,y); }
+ void Assembler::vpmulhrsw(Ymm dst, Ymm x, Operand y) { this->op(0x66,0x380f,0x0b, dst,x,y); }
+ void Assembler::vpminsw (Ymm dst, Ymm x, Operand y) { this->op(0x66, 0x0f,0xea, dst,x,y); }
+ void Assembler::vpmaxsw (Ymm dst, Ymm x, Operand y) { this->op(0x66, 0x0f,0xee, dst,x,y); }
+ void Assembler::vpminuw (Ymm dst, Ymm x, Operand y) { this->op(0x66,0x380f,0x3a, dst,x,y); }
+ void Assembler::vpmaxuw (Ymm dst, Ymm x, Operand y) { this->op(0x66,0x380f,0x3e, dst,x,y); }
+
+ void Assembler::vpabsw(Ymm dst, Operand x) { this->op(0x66,0x380f,0x1d, dst,x); }
+
+
+ void Assembler::vpand (Ymm dst, Ymm x, Operand y) { this->op(0x66,0x0f,0xdb, dst,x,y); }
+ void Assembler::vpor (Ymm dst, Ymm x, Operand y) { this->op(0x66,0x0f,0xeb, dst,x,y); }
+ void Assembler::vpxor (Ymm dst, Ymm x, Operand y) { this->op(0x66,0x0f,0xef, dst,x,y); }
+ void Assembler::vpandn(Ymm dst, Ymm x, Operand y) { this->op(0x66,0x0f,0xdf, dst,x,y); }
+
+ void Assembler::vaddps(Ymm dst, Ymm x, Operand y) { this->op(0,0x0f,0x58, dst,x,y); }
+ void Assembler::vsubps(Ymm dst, Ymm x, Operand y) { this->op(0,0x0f,0x5c, dst,x,y); }
+ void Assembler::vmulps(Ymm dst, Ymm x, Operand y) { this->op(0,0x0f,0x59, dst,x,y); }
+ void Assembler::vdivps(Ymm dst, Ymm x, Operand y) { this->op(0,0x0f,0x5e, dst,x,y); }
+ void Assembler::vminps(Ymm dst, Ymm x, Operand y) { this->op(0,0x0f,0x5d, dst,x,y); }
+ void Assembler::vmaxps(Ymm dst, Ymm x, Operand y) { this->op(0,0x0f,0x5f, dst,x,y); }
+
+ void Assembler::vfmadd132ps(Ymm dst, Ymm x, Operand y) { this->op(0x66,0x380f,0x98, dst,x,y); }
+ void Assembler::vfmadd213ps(Ymm dst, Ymm x, Operand y) { this->op(0x66,0x380f,0xa8, dst,x,y); }
+ void Assembler::vfmadd231ps(Ymm dst, Ymm x, Operand y) { this->op(0x66,0x380f,0xb8, dst,x,y); }
+
+ void Assembler::vfmsub132ps(Ymm dst, Ymm x, Operand y) { this->op(0x66,0x380f,0x9a, dst,x,y); }
+ void Assembler::vfmsub213ps(Ymm dst, Ymm x, Operand y) { this->op(0x66,0x380f,0xaa, dst,x,y); }
+ void Assembler::vfmsub231ps(Ymm dst, Ymm x, Operand y) { this->op(0x66,0x380f,0xba, dst,x,y); }
+
+ void Assembler::vfnmadd132ps(Ymm dst, Ymm x, Operand y) { this->op(0x66,0x380f,0x9c, dst,x,y); }
+ void Assembler::vfnmadd213ps(Ymm dst, Ymm x, Operand y) { this->op(0x66,0x380f,0xac, dst,x,y); }
+ void Assembler::vfnmadd231ps(Ymm dst, Ymm x, Operand y) { this->op(0x66,0x380f,0xbc, dst,x,y); }
+
+ void Assembler::vpackusdw(Ymm dst, Ymm x, Operand y) { this->op(0x66,0x380f,0x2b, dst,x,y); }
+ void Assembler::vpackuswb(Ymm dst, Ymm x, Operand y) { this->op(0x66, 0x0f,0x67, dst,x,y); }
+
+ void Assembler::vpunpckldq(Ymm dst, Ymm x, Operand y) { this->op(0x66,0x0f,0x62, dst,x,y); }
+ void Assembler::vpunpckhdq(Ymm dst, Ymm x, Operand y) { this->op(0x66,0x0f,0x6a, dst,x,y); }
+
+ void Assembler::vpcmpeqd(Ymm dst, Ymm x, Operand y) { this->op(0x66,0x0f,0x76, dst,x,y); }
+ void Assembler::vpcmpeqw(Ymm dst, Ymm x, Operand y) { this->op(0x66,0x0f,0x75, dst,x,y); }
+ void Assembler::vpcmpgtd(Ymm dst, Ymm x, Operand y) { this->op(0x66,0x0f,0x66, dst,x,y); }
+ void Assembler::vpcmpgtw(Ymm dst, Ymm x, Operand y) { this->op(0x66,0x0f,0x65, dst,x,y); }
+
+
+ void Assembler::imm_byte_after_operand(const Operand& operand, int imm) {
+ // When we've embedded a label displacement in the middle of an instruction,
+ // we need to tweak it a little so that the resolved displacement starts
+ // from the end of the instruction and not the end of the displacement.
+ if (operand.kind == Operand::LABEL && fCode) {
+ int disp;
+ memcpy(&disp, fCode+fSize-4, 4);
+ disp--;
+ memcpy(fCode+fSize-4, &disp, 4);
+ }
+ this->byte(imm);
+ }
+
+ void Assembler::vcmpps(Ymm dst, Ymm x, Operand y, int imm) {
+ this->op(0,0x0f,0xc2, dst,x,y);
+ this->imm_byte_after_operand(y, imm);
+ }
+
+ void Assembler::vpblendvb(Ymm dst, Ymm x, Operand y, Ymm z) {
+ this->op(0x66,0x3a0f,0x4c, dst,x,y);
+ this->imm_byte_after_operand(y, z << 4);
+ }
+
+ // Shift instructions encode their opcode extension as "dst", dst as x, and x as y.
+ void Assembler::vpslld(Ymm dst, Ymm x, int imm) {
+ this->op(0x66,0x0f,0x72,(Ymm)6, dst,x);
+ this->byte(imm);
+ }
+ void Assembler::vpsrld(Ymm dst, Ymm x, int imm) {
+ this->op(0x66,0x0f,0x72,(Ymm)2, dst,x);
+ this->byte(imm);
+ }
+ void Assembler::vpsrad(Ymm dst, Ymm x, int imm) {
+ this->op(0x66,0x0f,0x72,(Ymm)4, dst,x);
+ this->byte(imm);
+ }
+ void Assembler::vpsllw(Ymm dst, Ymm x, int imm) {
+ this->op(0x66,0x0f,0x71,(Ymm)6, dst,x);
+ this->byte(imm);
+ }
+ void Assembler::vpsrlw(Ymm dst, Ymm x, int imm) {
+ this->op(0x66,0x0f,0x71,(Ymm)2, dst,x);
+ this->byte(imm);
+ }
+ void Assembler::vpsraw(Ymm dst, Ymm x, int imm) {
+ this->op(0x66,0x0f,0x71,(Ymm)4, dst,x);
+ this->byte(imm);
+ }
+
+ void Assembler::vpermq(Ymm dst, Operand x, int imm) {
+ // A bit unusual among the instructions we use, this is 64-bit operation, so we set W.
+ this->op(0x66,0x3a0f,0x00, dst,x,W1);
+ this->imm_byte_after_operand(x, imm);
+ }
+
+ void Assembler::vperm2f128(Ymm dst, Ymm x, Operand y, int imm) {
+ this->op(0x66,0x3a0f,0x06, dst,x,y);
+ this->imm_byte_after_operand(y, imm);
+ }
+
+ void Assembler::vpermps(Ymm dst, Ymm ix, Operand src) {
+ this->op(0x66,0x380f,0x16, dst,ix,src);
+ }
+
+ void Assembler::vroundps(Ymm dst, Operand x, Rounding imm) {
+ this->op(0x66,0x3a0f,0x08, dst,x);
+ this->imm_byte_after_operand(x, imm);
+ }
+
+ void Assembler::vmovdqa(Ymm dst, Operand src) { this->op(0x66,0x0f,0x6f, dst,src); }
+ void Assembler::vmovups(Ymm dst, Operand src) { this->op( 0,0x0f,0x10, dst,src); }
+ void Assembler::vmovups(Xmm dst, Operand src) { this->op( 0,0x0f,0x10, dst,src); }
+ void Assembler::vmovups(Operand dst, Ymm src) { this->op( 0,0x0f,0x11, src,dst); }
+ void Assembler::vmovups(Operand dst, Xmm src) { this->op( 0,0x0f,0x11, src,dst); }
+
+ void Assembler::vcvtdq2ps (Ymm dst, Operand x) { this->op( 0,0x0f,0x5b, dst,x); }
+ void Assembler::vcvttps2dq(Ymm dst, Operand x) { this->op(0xf3,0x0f,0x5b, dst,x); }
+ void Assembler::vcvtps2dq (Ymm dst, Operand x) { this->op(0x66,0x0f,0x5b, dst,x); }
+ void Assembler::vsqrtps (Ymm dst, Operand x) { this->op( 0,0x0f,0x51, dst,x); }
+
+ void Assembler::vcvtps2ph(Operand dst, Ymm x, Rounding imm) {
+ this->op(0x66,0x3a0f,0x1d, x,dst);
+ this->imm_byte_after_operand(dst, imm);
+ }
+ void Assembler::vcvtph2ps(Ymm dst, Operand x) {
+ this->op(0x66,0x380f,0x13, dst,x);
+ }
+
+ int Assembler::disp19(Label* l) {
+ SkASSERT(l->kind == Label::NotYetSet ||
+ l->kind == Label::ARMDisp19);
+ int here = (int)this->size();
+ l->kind = Label::ARMDisp19;
+ l->references.push_back(here);
+ // ARM 19-bit instruction count, from the beginning of this instruction.
+ return (l->offset - here) / 4;
+ }
+
+ int Assembler::disp32(Label* l) {
+ SkASSERT(l->kind == Label::NotYetSet ||
+ l->kind == Label::X86Disp32);
+ int here = (int)this->size();
+ l->kind = Label::X86Disp32;
+ l->references.push_back(here);
+ // x86 32-bit byte count, from the end of this instruction.
+ return l->offset - (here + 4);
+ }
+
+ void Assembler::op(int prefix, int map, int opcode, int dst, int x, Operand y, W w, L l) {
+ switch (y.kind) {
+ case Operand::REG: {
+ VEX v = vex(w, dst>>3, 0, y.reg>>3,
+ map, x, l, prefix);
+ this->bytes(v.bytes, v.len);
+ this->byte(opcode);
+ this->byte(mod_rm(Mod::Direct, dst&7, y.reg&7));
+ } return;
+
+ case Operand::MEM: {
+ // Passing rsp as the rm argument to mod_rm() signals an SIB byte follows;
+ // without an SIB byte, that's where the base register would usually go.
+ // This means we have to use an SIB byte if we want to use rsp as a base register.
+ const Mem& m = y.mem;
+ const bool need_SIB = m.base == rsp
+ || m.index != rsp;
+
+ VEX v = vex(w, dst>>3, m.index>>3, m.base>>3,
+ map, x, l, prefix);
+ this->bytes(v.bytes, v.len);
+ this->byte(opcode);
+ this->byte(mod_rm(mod(m.disp), dst&7, (need_SIB ? rsp : m.base)&7));
+ if (need_SIB) {
+ this->byte(sib(m.scale, m.index&7, m.base&7));
+ }
+ this->bytes(&m.disp, imm_bytes(mod(m.disp)));
+ } return;
+
+ case Operand::LABEL: {
+ // IP-relative addressing uses Mod::Indirect with the R/M encoded as-if rbp or r13.
+ const int rip = rbp;
+
+ VEX v = vex(w, dst>>3, 0, rip>>3,
+ map, x, l, prefix);
+ this->bytes(v.bytes, v.len);
+ this->byte(opcode);
+ this->byte(mod_rm(Mod::Indirect, dst&7, rip&7));
+ this->word(this->disp32(y.label));
+ } return;
+ }
+ }
+
+ void Assembler::vpshufb(Ymm dst, Ymm x, Operand y) { this->op(0x66,0x380f,0x00, dst,x,y); }
+
+ void Assembler::vptest(Ymm x, Operand y) { this->op(0x66, 0x380f, 0x17, x,y); }
+
+ void Assembler::vbroadcastss(Ymm dst, Operand y) { this->op(0x66,0x380f,0x18, dst,y); }
+
+ void Assembler::jump(uint8_t condition, Label* l) {
+ // These conditional jumps can be either 2 bytes (short) or 6 bytes (near):
+ // 7? one-byte-disp
+ // 0F 8? four-byte-disp
+ // We always use the near displacement to make updating labels simpler (no resizing).
+ this->byte(0x0f);
+ this->byte(condition);
+ this->word(this->disp32(l));
+ }
+ void Assembler::je (Label* l) { this->jump(0x84, l); }
+ void Assembler::jne(Label* l) { this->jump(0x85, l); }
+ void Assembler::jl (Label* l) { this->jump(0x8c, l); }
+ void Assembler::jc (Label* l) { this->jump(0x82, l); }
+
+ void Assembler::jmp(Label* l) {
+ // Like above in jump(), we could use 8-bit displacement here, but always use 32-bit.
+ this->byte(0xe9);
+ this->word(this->disp32(l));
+ }
+
+ void Assembler::vpmovzxwd(Ymm dst, Operand src) { this->op(0x66,0x380f,0x33, dst,src); }
+ void Assembler::vpmovzxbd(Ymm dst, Operand src) { this->op(0x66,0x380f,0x31, dst,src); }
+
+ void Assembler::vmovq(Operand dst, Xmm src) { this->op(0x66,0x0f,0xd6, src,dst); }
+
+ void Assembler::vmovd(Operand dst, Xmm src) { this->op(0x66,0x0f,0x7e, src,dst); }
+ void Assembler::vmovd(Xmm dst, Operand src) { this->op(0x66,0x0f,0x6e, dst,src); }
+
+ void Assembler::vpinsrd(Xmm dst, Xmm src, Operand y, int imm) {
+ this->op(0x66,0x3a0f,0x22, dst,src,y);
+ this->imm_byte_after_operand(y, imm);
+ }
+ void Assembler::vpinsrw(Xmm dst, Xmm src, Operand y, int imm) {
+ this->op(0x66,0x0f,0xc4, dst,src,y);
+ this->imm_byte_after_operand(y, imm);
+ }
+ void Assembler::vpinsrb(Xmm dst, Xmm src, Operand y, int imm) {
+ this->op(0x66,0x3a0f,0x20, dst,src,y);
+ this->imm_byte_after_operand(y, imm);
+ }
+
+ void Assembler::vextracti128(Operand dst, Ymm src, int imm) {
+ this->op(0x66,0x3a0f,0x39, src,dst);
+ SkASSERT(dst.kind != Operand::LABEL);
+ this->byte(imm);
+ }
+ void Assembler::vpextrd(Operand dst, Xmm src, int imm) {
+ this->op(0x66,0x3a0f,0x16, src,dst);
+ SkASSERT(dst.kind != Operand::LABEL);
+ this->byte(imm);
+ }
+ void Assembler::vpextrw(Operand dst, Xmm src, int imm) {
+ this->op(0x66,0x3a0f,0x15, src,dst);
+ SkASSERT(dst.kind != Operand::LABEL);
+ this->byte(imm);
+ }
+ void Assembler::vpextrb(Operand dst, Xmm src, int imm) {
+ this->op(0x66,0x3a0f,0x14, src,dst);
+ SkASSERT(dst.kind != Operand::LABEL);
+ this->byte(imm);
+ }
+
+ void Assembler::vgatherdps(Ymm dst, Scale scale, Ymm ix, GP64 base, Ymm mask) {
+ // Unlike most instructions, no aliasing is permitted here.
+ SkASSERT(dst != ix);
+ SkASSERT(dst != mask);
+ SkASSERT(mask != ix);
+
+ int prefix = 0x66,
+ map = 0x380f,
+ opcode = 0x92;
+ VEX v = vex(0, dst>>3, ix>>3, base>>3,
+ map, mask, /*ymm?*/1, prefix);
+ this->bytes(v.bytes, v.len);
+ this->byte(opcode);
+ this->byte(mod_rm(Mod::Indirect, dst&7, rsp/*use SIB*/));
+ this->byte(sib(scale, ix&7, base&7));
+ }
+
+ // https://static.docs.arm.com/ddi0596/a/DDI_0596_ARM_a64_instruction_set_architecture.pdf
+
+ static int mask(unsigned long long bits) { return (1<<(int)bits)-1; }
+
+ void Assembler::op(uint32_t hi, V m, uint32_t lo, V n, V d) {
+ this->word( (hi & mask(11)) << 21
+ | (m & mask(5)) << 16
+ | (lo & mask(6)) << 10
+ | (n & mask(5)) << 5
+ | (d & mask(5)) << 0);
+ }
+ void Assembler::op(uint32_t op22, V n, V d, int imm) {
+ this->word( (op22 & mask(22)) << 10
+ | imm // size and location depends on the instruction
+ | (n & mask(5)) << 5
+ | (d & mask(5)) << 0);
+ }
+
+ void Assembler::and16b(V d, V n, V m) { this->op(0b0'1'0'01110'00'1, m, 0b00011'1, n, d); }
+ void Assembler::orr16b(V d, V n, V m) { this->op(0b0'1'0'01110'10'1, m, 0b00011'1, n, d); }
+ void Assembler::eor16b(V d, V n, V m) { this->op(0b0'1'1'01110'00'1, m, 0b00011'1, n, d); }
+ void Assembler::bic16b(V d, V n, V m) { this->op(0b0'1'0'01110'01'1, m, 0b00011'1, n, d); }
+ void Assembler::bsl16b(V d, V n, V m) { this->op(0b0'1'1'01110'01'1, m, 0b00011'1, n, d); }
+ void Assembler::not16b(V d, V n) { this->op(0b0'1'1'01110'00'10000'00101'10, n, d); }
+
+ void Assembler::add4s(V d, V n, V m) { this->op(0b0'1'0'01110'10'1, m, 0b10000'1, n, d); }
+ void Assembler::sub4s(V d, V n, V m) { this->op(0b0'1'1'01110'10'1, m, 0b10000'1, n, d); }
+ void Assembler::mul4s(V d, V n, V m) { this->op(0b0'1'0'01110'10'1, m, 0b10011'1, n, d); }
+
+ void Assembler::cmeq4s(V d, V n, V m) { this->op(0b0'1'1'01110'10'1, m, 0b10001'1, n, d); }
+ void Assembler::cmgt4s(V d, V n, V m) { this->op(0b0'1'0'01110'10'1, m, 0b0011'0'1, n, d); }
+
+ void Assembler::sub8h(V d, V n, V m) { this->op(0b0'1'1'01110'01'1, m, 0b10000'1, n, d); }
+ void Assembler::mul8h(V d, V n, V m) { this->op(0b0'1'0'01110'01'1, m, 0b10011'1, n, d); }
+
+ void Assembler::fadd4s(V d, V n, V m) { this->op(0b0'1'0'01110'0'0'1, m, 0b11010'1, n, d); }
+ void Assembler::fsub4s(V d, V n, V m) { this->op(0b0'1'0'01110'1'0'1, m, 0b11010'1, n, d); }
+ void Assembler::fmul4s(V d, V n, V m) { this->op(0b0'1'1'01110'0'0'1, m, 0b11011'1, n, d); }
+ void Assembler::fdiv4s(V d, V n, V m) { this->op(0b0'1'1'01110'0'0'1, m, 0b11111'1, n, d); }
+ void Assembler::fmin4s(V d, V n, V m) { this->op(0b0'1'0'01110'1'0'1, m, 0b11110'1, n, d); }
+ void Assembler::fmax4s(V d, V n, V m) { this->op(0b0'1'0'01110'0'0'1, m, 0b11110'1, n, d); }
+
+ void Assembler::fneg4s (V d, V n) { this->op(0b0'1'1'01110'1'0'10000'01111'10, n,d); }
+ void Assembler::fsqrt4s(V d, V n) { this->op(0b0'1'1'01110'1'0'10000'11111'10, n,d); }
+
+ void Assembler::fcmeq4s(V d, V n, V m) { this->op(0b0'1'0'01110'0'0'1, m, 0b1110'0'1, n, d); }
+ void Assembler::fcmgt4s(V d, V n, V m) { this->op(0b0'1'1'01110'1'0'1, m, 0b1110'0'1, n, d); }
+ void Assembler::fcmge4s(V d, V n, V m) { this->op(0b0'1'1'01110'0'0'1, m, 0b1110'0'1, n, d); }
+
+ void Assembler::fmla4s(V d, V n, V m) { this->op(0b0'1'0'01110'0'0'1, m, 0b11001'1, n, d); }
+ void Assembler::fmls4s(V d, V n, V m) { this->op(0b0'1'0'01110'1'0'1, m, 0b11001'1, n, d); }
+
+ void Assembler::tbl(V d, V n, V m) { this->op(0b0'1'001110'00'0, m, 0b0'00'0'00, n, d); }
+
+ void Assembler::uzp14s(V d, V n, V m) { this->op(0b0'1'001110'10'0, m, 0b0'0'01'10, n, d); }
+ void Assembler::uzp24s(V d, V n, V m) { this->op(0b0'1'001110'10'0, m, 0b0'1'01'10, n, d); }
+ void Assembler::zip14s(V d, V n, V m) { this->op(0b0'1'001110'10'0, m, 0b0'0'11'10, n, d); }
+ void Assembler::zip24s(V d, V n, V m) { this->op(0b0'1'001110'10'0, m, 0b0'1'11'10, n, d); }
+
+ void Assembler::sli4s(V d, V n, int imm5) {
+ this->op(0b0'1'1'011110'0100'000'01010'1, n, d, ( imm5 & mask(5))<<16);
+ }
+ void Assembler::shl4s(V d, V n, int imm5) {
+ this->op(0b0'1'0'011110'0100'000'01010'1, n, d, ( imm5 & mask(5))<<16);
+ }
+ void Assembler::sshr4s(V d, V n, int imm5) {
+ this->op(0b0'1'0'011110'0100'000'00'0'0'0'1, n, d, (-imm5 & mask(5))<<16);
+ }
+ void Assembler::ushr4s(V d, V n, int imm5) {
+ this->op(0b0'1'1'011110'0100'000'00'0'0'0'1, n, d, (-imm5 & mask(5))<<16);
+ }
+ void Assembler::ushr8h(V d, V n, int imm4) {
+ this->op(0b0'1'1'011110'0010'000'00'0'0'0'1, n, d, (-imm4 & mask(4))<<16);
+ }
+
+ void Assembler::scvtf4s (V d, V n) { this->op(0b0'1'0'01110'0'0'10000'11101'10, n,d); }
+ void Assembler::fcvtzs4s(V d, V n) { this->op(0b0'1'0'01110'1'0'10000'1101'1'10, n,d); }
+ void Assembler::fcvtns4s(V d, V n) { this->op(0b0'1'0'01110'0'0'10000'1101'0'10, n,d); }
+ void Assembler::frintp4s(V d, V n) { this->op(0b0'1'0'01110'1'0'10000'1100'0'10, n,d); }
+ void Assembler::frintm4s(V d, V n) { this->op(0b0'1'0'01110'0'0'10000'1100'1'10, n,d); }
+
+ void Assembler::fcvtn(V d, V n) { this->op(0b0'0'0'01110'0'0'10000'10110'10, n,d); }
+ void Assembler::fcvtl(V d, V n) { this->op(0b0'0'0'01110'0'0'10000'10111'10, n,d); }
+
+ void Assembler::xtns2h(V d, V n) { this->op(0b0'0'0'01110'01'10000'10010'10, n,d); }
+ void Assembler::xtnh2b(V d, V n) { this->op(0b0'0'0'01110'00'10000'10010'10, n,d); }
+
+ void Assembler::uxtlb2h(V d, V n) { this->op(0b0'0'1'011110'0001'000'10100'1, n,d); }
+ void Assembler::uxtlh2s(V d, V n) { this->op(0b0'0'1'011110'0010'000'10100'1, n,d); }
+
+ void Assembler::uminv4s(V d, V n) { this->op(0b0'1'1'01110'10'11000'1'1010'10, n,d); }
+
+ void Assembler::brk(int imm16) {
+ this->op(0b11010100'001'00000000000, (imm16 & mask(16)) << 5);
+ }
+
+ void Assembler::ret(X n) { this->op(0b1101011'0'0'10'11111'0000'0'0, n, (X)0); }
+
+ void Assembler::add(X d, X n, int imm12) {
+ this->op(0b1'0'0'10001'00'000000000000, n,d, (imm12 & mask(12)) << 10);
+ }
+ void Assembler::sub(X d, X n, int imm12) {
+ this->op(0b1'1'0'10001'00'000000000000, n,d, (imm12 & mask(12)) << 10);
+ }
+ void Assembler::subs(X d, X n, int imm12) {
+ this->op(0b1'1'1'10001'00'000000000000, n,d, (imm12 & mask(12)) << 10);
+ }
+
+ void Assembler::add(X d, X n, X m, Shift shift, int imm6) {
+ SkASSERT(shift != ROR);
+
+ int imm = (imm6 & mask(6)) << 0
+ | (m & mask(5)) << 6
+ | (0 & mask(1)) << 11
+ | (shift & mask(2)) << 12;
+ this->op(0b1'0'0'01011'00'0'00000'000000, n,d, imm << 10);
+ }
+
+ void Assembler::b(Condition cond, Label* l) {
+ const int imm19 = this->disp19(l);
+ this->op(0b0101010'0'00000000000000, (X)0, (V)cond, (imm19 & mask(19)) << 5);
+ }
+ void Assembler::cbz(X t, Label* l) {
+ const int imm19 = this->disp19(l);
+ this->op(0b1'011010'0'00000000000000, (X)0, t, (imm19 & mask(19)) << 5);
+ }
+ void Assembler::cbnz(X t, Label* l) {
+ const int imm19 = this->disp19(l);
+ this->op(0b1'011010'1'00000000000000, (X)0, t, (imm19 & mask(19)) << 5);
+ }
+
+ void Assembler::ldrd(X dst, X src, int imm12) {
+ this->op(0b11'111'0'01'01'000000000000, src, dst, (imm12 & mask(12)) << 10);
+ }
+ void Assembler::ldrs(X dst, X src, int imm12) {
+ this->op(0b10'111'0'01'01'000000000000, src, dst, (imm12 & mask(12)) << 10);
+ }
+ void Assembler::ldrh(X dst, X src, int imm12) {
+ this->op(0b01'111'0'01'01'000000000000, src, dst, (imm12 & mask(12)) << 10);
+ }
+ void Assembler::ldrb(X dst, X src, int imm12) {
+ this->op(0b00'111'0'01'01'000000000000, src, dst, (imm12 & mask(12)) << 10);
+ }
+
+ void Assembler::ldrq(V dst, X src, int imm12) {
+ this->op(0b00'111'1'01'11'000000000000, src, dst, (imm12 & mask(12)) << 10);
+ }
+ void Assembler::ldrd(V dst, X src, int imm12) {
+ this->op(0b11'111'1'01'01'000000000000, src, dst, (imm12 & mask(12)) << 10);
+ }
+ void Assembler::ldrs(V dst, X src, int imm12) {
+ this->op(0b10'111'1'01'01'000000000000, src, dst, (imm12 & mask(12)) << 10);
+ }
+ void Assembler::ldrh(V dst, X src, int imm12) {
+ this->op(0b01'111'1'01'01'000000000000, src, dst, (imm12 & mask(12)) << 10);
+ }
+ void Assembler::ldrb(V dst, X src, int imm12) {
+ this->op(0b00'111'1'01'01'000000000000, src, dst, (imm12 & mask(12)) << 10);
+ }
+
+ void Assembler::strs(X src, X dst, int imm12) {
+ this->op(0b10'111'0'01'00'000000000000, dst, src, (imm12 & mask(12)) << 10);
+ }
+
+ void Assembler::strq(V src, X dst, int imm12) {
+ this->op(0b00'111'1'01'10'000000000000, dst, src, (imm12 & mask(12)) << 10);
+ }
+ void Assembler::strd(V src, X dst, int imm12) {
+ this->op(0b11'111'1'01'00'000000000000, dst, src, (imm12 & mask(12)) << 10);
+ }
+ void Assembler::strs(V src, X dst, int imm12) {
+ this->op(0b10'111'1'01'00'000000000000, dst, src, (imm12 & mask(12)) << 10);
+ }
+ void Assembler::strh(V src, X dst, int imm12) {
+ this->op(0b01'111'1'01'00'000000000000, dst, src, (imm12 & mask(12)) << 10);
+ }
+ void Assembler::strb(V src, X dst, int imm12) {
+ this->op(0b00'111'1'01'00'000000000000, dst, src, (imm12 & mask(12)) << 10);
+ }
+
+ void Assembler::movs(X dst, V src, int lane) {
+ int imm5 = (lane << 3) | 0b100;
+ this->op(0b0'0'0'01110000'00000'0'01'1'1'1, src, dst, (imm5 & mask(5)) << 16);
+ }
+ void Assembler::inss(V dst, X src, int lane) {
+ int imm5 = (lane << 3) | 0b100;
+ this->op(0b0'1'0'01110000'00000'0'0011'1, src, dst, (imm5 & mask(5)) << 16);
+ }
+
+
+ void Assembler::ldrq(V dst, Label* l) {
+ const int imm19 = this->disp19(l);
+ this->op(0b10'011'1'00'00000000000000, (V)0, dst, (imm19 & mask(19)) << 5);
+ }
+
+ void Assembler::dup4s(V dst, X src) {
+ this->op(0b0'1'0'01110000'00100'0'0001'1, src, dst);
+ }
+
+ void Assembler::ld1r4s(V dst, X src) {
+ this->op(0b0'1'0011010'1'0'00000'110'0'10, src, dst);
+ }
+ void Assembler::ld1r8h(V dst, X src) {
+ this->op(0b0'1'0011010'1'0'00000'110'0'01, src, dst);
+ }
+ void Assembler::ld1r16b(V dst, X src) {
+ this->op(0b0'1'0011010'1'0'00000'110'0'00, src, dst);
+ }
+
+ void Assembler::ld24s(V dst, X src) { this->op(0b0'1'0011000'1'000000'1000'10, src, dst); }
+ void Assembler::ld44s(V dst, X src) { this->op(0b0'1'0011000'1'000000'0000'10, src, dst); }
+ void Assembler::st24s(V src, X dst) { this->op(0b0'1'0011000'0'000000'1000'10, dst, src); }
+ void Assembler::st44s(V src, X dst) { this->op(0b0'1'0011000'0'000000'0000'10, dst, src); }
+
+ void Assembler::ld24s(V dst, X src, int lane) {
+ int Q = (lane & 2)>>1,
+ S = (lane & 1);
+ /* Q S */
+ this->op(0b0'0'0011010'1'1'00000'100'0'00, src, dst, (Q<<30)|(S<<12));
+ }
+ void Assembler::ld44s(V dst, X src, int lane) {
+ int Q = (lane & 2)>>1,
+ S = (lane & 1);
+ this->op(0b0'0'0011010'1'1'00000'101'0'00, src, dst, (Q<<30)|(S<<12));
+ }
+
+ void Assembler::label(Label* l) {
+ if (fCode) {
+ // The instructions all currently point to l->offset.
+ // We'll want to add a delta to point them to here.
+ int here = (int)this->size();
+ int delta = here - l->offset;
+ l->offset = here;
+
+ if (l->kind == Label::ARMDisp19) {
+ for (int ref : l->references) {
+ // ref points to a 32-bit instruction with 19-bit displacement in instructions.
+ uint32_t inst;
+ memcpy(&inst, fCode + ref, 4);
+
+ // [ 8 bits to preserve] [ 19 bit signed displacement ] [ 5 bits to preserve ]
+ int disp = (int)(inst << 8) >> 13;
+
+ disp += delta/4; // delta is in bytes, we want instructions.
+
+ // Put it all back together, preserving the high 8 bits and low 5.
+ inst = ((disp << 5) & (mask(19) << 5))
+ | ((inst ) & ~(mask(19) << 5));
+ memcpy(fCode + ref, &inst, 4);
+ }
+ }
+
+ if (l->kind == Label::X86Disp32) {
+ for (int ref : l->references) {
+ // ref points to a 32-bit displacement in bytes.
+ int disp;
+ memcpy(&disp, fCode + ref, 4);
+
+ disp += delta;
+
+ memcpy(fCode + ref, &disp, 4);
+ }
+ }
+ }
+ }
+
+ void Program::eval(int n, void* args[]) const {
+ #define SKVM_JIT_STATS 0
+ #if SKVM_JIT_STATS
+ static std::atomic<int64_t> calls{0}, jits{0},
+ pixels{0}, fast{0};
+ pixels += n;
+ if (0 == calls++) {
+ atexit([]{
+ int64_t num = jits .load(),
+ den = calls.load();
+ SkDebugf("%.3g%% of %lld eval() calls went through JIT.\n", (100.0 * num)/den, den);
+ num = fast .load();
+ den = pixels.load();
+ SkDebugf("%.3g%% of %lld pixels went through JIT.\n", (100.0 * num)/den, den);
+ });
+ }
+ #endif
+
+ #if !defined(SKVM_JIT_BUT_IGNORE_IT)
+ const void* jit_entry = fImpl->jit_entry.load();
+ // jit_entry may be null if we can't JIT
+ //
+ // Ordinarily we'd never find ourselves with non-null jit_entry and !gSkVMAllowJIT, but it
+ // can happen during interactive programs like Viewer that toggle gSkVMAllowJIT on and off,
+ // due to timing or program caching.
+ if (jit_entry != nullptr && gSkVMAllowJIT) {
+ #if SKVM_JIT_STATS
+ jits++;
+ fast += n;
+ #endif
+ void** a = args;
+ switch (fImpl->strides.size()) {
+ case 0: return ((void(*)(int ))jit_entry)(n );
+ case 1: return ((void(*)(int,void* ))jit_entry)(n,a[0] );
+ case 2: return ((void(*)(int,void*,void* ))jit_entry)(n,a[0],a[1] );
+ case 3: return ((void(*)(int,void*,void*,void* ))jit_entry)(n,a[0],a[1],a[2]);
+ case 4: return ((void(*)(int,void*,void*,void*,void*))jit_entry)
+ (n,a[0],a[1],a[2],a[3]);
+ case 5: return ((void(*)(int,void*,void*,void*,void*,void*))jit_entry)
+ (n,a[0],a[1],a[2],a[3],a[4]);
+ case 6: return ((void(*)(int,void*,void*,void*,void*,void*,void*))jit_entry)
+ (n,a[0],a[1],a[2],a[3],a[4],a[5]);
+ case 7: return ((void(*)(int,void*,void*,void*,void*,void*,void*,void*))jit_entry)
+ (n,a[0],a[1],a[2],a[3],a[4],a[5],a[6]);
+ default: break; //SkASSERT(fImpl->strides.size() <= 7);
+ }
+ }
+ #endif
+
+ // So we'll sometimes use the interpreter here even if later calls will use the JIT.
+ SkOpts::interpret_skvm(fImpl->instructions.data(), (int)fImpl->instructions.size(),
+ this->nregs(), this->loop(), fImpl->strides.data(),
+ fImpl->traceHooks.data(), fImpl->traceHooks.size(),
+ this->nargs(), n, args);
+ }
+
+ bool Program::hasTraceHooks() const {
+ // Identifies a program which has been instrumented for debugging.
+ return !fImpl->traceHooks.empty();
+ }
+
+ bool Program::hasJIT() const {
+ return fImpl->jit_entry.load() != nullptr;
+ }
+
+ void Program::dropJIT() {
+ #if defined(SKVM_JIT)
+ if (fImpl->dylib) {
+ close_dylib(fImpl->dylib);
+ } else if (auto jit_entry = fImpl->jit_entry.load()) {
+ unmap_jit_buffer(jit_entry, fImpl->jit_size);
+ }
+ #else
+ SkASSERT(!this->hasJIT());
+ #endif
+
+ fImpl->jit_entry.store(nullptr);
+ fImpl->jit_size = 0;
+ fImpl->dylib = nullptr;
+ }
+
+ Program::Program() : fImpl(std::make_unique<Impl>()) {}
+
+ Program::~Program() {
+ // Moved-from Programs may have fImpl == nullptr.
+ if (fImpl) {
+ this->dropJIT();
+ }
+ }
+
+ Program::Program(Program&& other) : fImpl(std::move(other.fImpl)) {}
+
+ Program& Program::operator=(Program&& other) {
+ fImpl = std::move(other.fImpl);
+ return *this;
+ }
+
+ Program::Program(const std::vector<OptimizedInstruction>& instructions,
+ std::unique_ptr<viz::Visualizer> visualizer,
+ const std::vector<int>& strides,
+ const std::vector<SkSL::TraceHook*>& traceHooks,
+ const char* debug_name, bool allow_jit) : Program() {
+ fImpl->visualizer = std::move(visualizer);
+ fImpl->strides = strides;
+ fImpl->traceHooks = traceHooks;
+ if (gSkVMAllowJIT && allow_jit) {
+ #if defined(SKVM_JIT)
+ this->setupJIT(instructions, debug_name);
+ #endif
+ }
+
+ this->setupInterpreter(instructions);
+ }
+
+ std::vector<InterpreterInstruction> Program::instructions() const { return fImpl->instructions; }
+ int Program::nargs() const { return (int)fImpl->strides.size(); }
+ int Program::nregs() const { return fImpl->regs; }
+ int Program::loop () const { return fImpl->loop; }
+ bool Program::empty() const { return fImpl->instructions.empty(); }
+
+ // Translate OptimizedInstructions to InterpreterInstructions.
+ void Program::setupInterpreter(const std::vector<OptimizedInstruction>& instructions) {
+ // Register each instruction is assigned to.
+ std::vector<Reg> reg(instructions.size());
+
+ // This next bit is a bit more complicated than strictly necessary;
+ // we could just assign every instruction to its own register.
+ //
+ // But recycling registers is fairly cheap, and good practice for the
+ // JITs where minimizing register pressure really is important.
+ //
+ // We have effectively infinite registers, so we hoist any value we can.
+ // (The JIT may choose a more complex policy to reduce register pressure.)
+
+ fImpl->regs = 0;
+ std::vector<Reg> avail;
+
+ // Assign this value to a register, recycling them where we can.
+ auto assign_register = [&](Val id) {
+ const OptimizedInstruction& inst = instructions[id];
+
+ // If this is a real input and it's lifetime ends at this instruction,
+ // we can recycle the register it's occupying.
+ auto maybe_recycle_register = [&](Val input) {
+ if (input != NA && instructions[input].death == id) {
+ avail.push_back(reg[input]);
+ }
+ };
+
+ // Take care to not recycle the same register twice.
+ const Val x = inst.x, y = inst.y, z = inst.z, w = inst.w;
+ if (true ) { maybe_recycle_register(x); }
+ if (y != x ) { maybe_recycle_register(y); }
+ if (z != x && z != y ) { maybe_recycle_register(z); }
+ if (w != x && w != y && w != z) { maybe_recycle_register(w); }
+
+ // Instructions that die at themselves (stores) don't need a register.
+ if (inst.death != id) {
+ // Allocate a register if we have to, preferring to reuse anything available.
+ if (avail.empty()) {
+ reg[id] = fImpl->regs++;
+ } else {
+ reg[id] = avail.back();
+ avail.pop_back();
+ }
+ }
+ };
+
+ // Assign a register to each hoisted instruction, then each non-hoisted loop instruction.
+ for (Val id = 0; id < (Val)instructions.size(); id++) {
+ if ( instructions[id].can_hoist) { assign_register(id); }
+ }
+ for (Val id = 0; id < (Val)instructions.size(); id++) {
+ if (!instructions[id].can_hoist) { assign_register(id); }
+ }
+
+ // Translate OptimizedInstructions to InterpreterIstructions by mapping values to
+ // registers. This will be two passes, first hoisted instructions, then inside the loop.
+
+ // The loop begins at the fImpl->loop'th Instruction.
+ fImpl->loop = 0;
+ fImpl->instructions.reserve(instructions.size());
+
+ // Add a mapping for the N/A sentinel Val to any arbitrary register
+ // so lookups don't have to know which arguments are used by which Ops.
+ auto lookup_register = [&](Val id) {
+ return id == NA ? (Reg)0
+ : reg[id];
+ };
+
+ auto push_instruction = [&](Val id, const OptimizedInstruction& inst) {
+ InterpreterInstruction pinst{
+ inst.op,
+ lookup_register(id),
+ lookup_register(inst.x),
+ lookup_register(inst.y),
+ lookup_register(inst.z),
+ lookup_register(inst.w),
+ inst.immA,
+ inst.immB,
+ inst.immC,
+ };
+ fImpl->instructions.push_back(pinst);
+ };
+
+ for (Val id = 0; id < (Val)instructions.size(); id++) {
+ const OptimizedInstruction& inst = instructions[id];
+ if (inst.can_hoist) {
+ push_instruction(id, inst);
+ fImpl->loop++;
+ }
+ }
+ for (Val id = 0; id < (Val)instructions.size(); id++) {
+ const OptimizedInstruction& inst = instructions[id];
+ if (!inst.can_hoist) {
+ push_instruction(id, inst);
+ }
+ }
+ }
+
+#if defined(SKVM_JIT)
+
+ namespace SkVMJitTypes {
+ #if defined(__x86_64__) || defined(_M_X64)
+ using Reg = Assembler::Ymm;
+ #elif defined(__aarch64__)
+ using Reg = Assembler::V;
+ #endif
+ } // namespace SkVMJitTypes
+
+ bool Program::jit(const std::vector<OptimizedInstruction>& instructions,
+ int* stack_hint,
+ uint32_t* registers_used,
+ Assembler* a) const {
+ using A = Assembler;
+ using SkVMJitTypes::Reg;
+
+ SkTHashMap<int, A::Label> constants; // Constants (mostly splats) share the same pool.
+ A::Label iota; // Varies per lane, for Op::index.
+ A::Label load64_index; // Used to load low or high half of 64-bit lanes.
+
+ // The `regs` array tracks everything we know about each register's state:
+ // - NA: empty
+ // - RES: reserved by ABI
+ // - TMP: holding a temporary
+ // - id: holding Val id
+ constexpr Val RES = NA-1,
+ TMP = RES-1;
+
+ // Map val -> stack slot.
+ std::vector<int> stack_slot(instructions.size(), NA);
+ int next_stack_slot = 0;
+
+ const int nstack_slots = *stack_hint >= 0 ? *stack_hint
+ : stack_slot.size();
+ #if defined(__x86_64__) || defined(_M_X64)
+ if (!SkCpu::Supports(SkCpu::HSW)) {
+ return false;
+ }
+ const int K = 8;
+ #if defined(_M_X64) // Important to check this first; clang-cl defines both.
+ const A::GP64 N = A::rcx,
+ GP0 = A::rax,
+ GP1 = A::r11,
+ arg[] = { A::rdx, A::r8, A::r9, A::r10, A::rdi, A::rsi };
+
+ // xmm6-15 need are callee-saved.
+ std::array<Val,16> regs = {
+ NA, NA, NA, NA, NA, NA,RES,RES,
+ RES,RES,RES,RES, RES,RES,RES,RES,
+ };
+ const uint32_t incoming_registers_used = *registers_used;
+
+ auto enter = [&]{
+ // rcx,rdx,r8,r9 are all already holding their correct values.
+ // Load caller-saved r10 from rsp+40 if there's a fourth arg.
+ if (fImpl->strides.size() >= 4) {
+ a->mov(A::r10, A::Mem{A::rsp, 40});
+ }
+ // Load callee-saved rdi from rsp+48 if there's a fifth arg,
+ // first saving it to ABI reserved shadow area rsp+8.
+ if (fImpl->strides.size() >= 5) {
+ a->mov(A::Mem{A::rsp, 8}, A::rdi);
+ a->mov(A::rdi, A::Mem{A::rsp, 48});
+ }
+ // Load callee-saved rsi from rsp+56 if there's a sixth arg,
+ // first saving it to ABI reserved shadow area rsp+16.
+ if (fImpl->strides.size() >= 6) {
+ a->mov(A::Mem{A::rsp, 16}, A::rsi);
+ a->mov(A::rsi, A::Mem{A::rsp, 56});
+ }
+
+ // Allocate stack for our values and callee-saved xmm6-15.
+ int stack_needed = nstack_slots*K*4;
+ for (int r = 6; r < 16; r++) {
+ if (incoming_registers_used & (1<<r)) {
+ stack_needed += 16;
+ }
+ }
+ if (stack_needed) { a->sub(A::rsp, stack_needed); }
+
+ int next_saved_xmm = nstack_slots*K*4;
+ for (int r = 6; r < 16; r++) {
+ if (incoming_registers_used & (1<<r)) {
+ a->vmovups(A::Mem{A::rsp, next_saved_xmm}, (A::Xmm)r);
+ next_saved_xmm += 16;
+ regs[r] = NA;
+ }
+ }
+ };
+ auto exit = [&]{
+ // The second pass of jit() shouldn't use any register it didn't in the first pass.
+ SkASSERT((*registers_used & incoming_registers_used) == *registers_used);
+
+ // Restore callee-saved xmm6-15 and the stack pointer.
+ int stack_used = nstack_slots*K*4;
+ for (int r = 6; r < 16; r++) {
+ if (incoming_registers_used & (1<<r)) {
+ a->vmovups((A::Xmm)r, A::Mem{A::rsp, stack_used});
+ stack_used += 16;
+ }
+ }
+ if (stack_used) { a->add(A::rsp, stack_used); }
+
+ // Restore callee-saved rdi/rsi if we used them.
+ if (fImpl->strides.size() >= 5) {
+ a->mov(A::rdi, A::Mem{A::rsp, 8});
+ }
+ if (fImpl->strides.size() >= 6) {
+ a->mov(A::rsi, A::Mem{A::rsp, 16});
+ }
+
+ a->vzeroupper();
+ a->ret();
+ };
+ #elif defined(__x86_64__)
+ const A::GP64 N = A::rdi,
+ GP0 = A::rax,
+ GP1 = A::r11,
+ arg[] = { A::rsi, A::rdx, A::rcx, A::r8, A::r9, A::r10 };
+
+ // All 16 ymm registers are available to use.
+ std::array<Val,16> regs = {
+ NA,NA,NA,NA, NA,NA,NA,NA,
+ NA,NA,NA,NA, NA,NA,NA,NA,
+ };
+
+ auto enter = [&]{
+ // Load caller-saved r10 from rsp+8 if there's a sixth arg.
+ if (fImpl->strides.size() >= 6) {
+ a->mov(A::r10, A::Mem{A::rsp, 8});
+ }
+ if (nstack_slots) { a->sub(A::rsp, nstack_slots*K*4); }
+ };
+ auto exit = [&]{
+ if (nstack_slots) { a->add(A::rsp, nstack_slots*K*4); }
+ a->vzeroupper();
+ a->ret();
+ };
+ #endif
+
+ auto load_from_memory = [&](Reg r, Val v) {
+ if (instructions[v].op == Op::splat) {
+ if (instructions[v].immA == 0) {
+ a->vpxor(r,r,r);
+ } else {
+ a->vmovups(r, constants.find(instructions[v].immA));
+ }
+ } else {
+ SkASSERT(stack_slot[v] != NA);
+ a->vmovups(r, A::Mem{A::rsp, stack_slot[v]*K*4});
+ }
+ };
+ auto store_to_stack = [&](Reg r, Val v) {
+ SkASSERT(next_stack_slot < nstack_slots);
+ stack_slot[v] = next_stack_slot++;
+ a->vmovups(A::Mem{A::rsp, stack_slot[v]*K*4}, r);
+ };
+ #elif defined(__aarch64__)
+ const int K = 4;
+ const A::X N = A::x0,
+ GP0 = A::x8,
+ GP1 = A::x9,
+ arg[] = { A::x1, A::x2, A::x3, A::x4, A::x5, A::x6, A::x7 };
+
+ // We can use v0-v7 and v16-v31 freely; we'd need to preserve v8-v15 in enter/exit.
+ std::array<Val,32> regs = {
+ NA, NA, NA, NA, NA, NA, NA, NA,
+ RES,RES,RES,RES, RES,RES,RES,RES,
+ NA, NA, NA, NA, NA, NA, NA, NA,
+ NA, NA, NA, NA, NA, NA, NA, NA,
+ };
+
+ auto enter = [&]{ if (nstack_slots) { a->sub(A::sp, A::sp, nstack_slots*K*4); } };
+ auto exit = [&]{ if (nstack_slots) { a->add(A::sp, A::sp, nstack_slots*K*4); }
+ a->ret(A::x30); };
+
+ auto load_from_memory = [&](Reg r, Val v) {
+ if (instructions[v].op == Op::splat) {
+ if (instructions[v].immA == 0) {
+ a->eor16b(r,r,r);
+ } else {
+ a->ldrq(r, constants.find(instructions[v].immA));
+ }
+ } else {
+ SkASSERT(stack_slot[v] != NA);
+ a->ldrq(r, A::sp, stack_slot[v]);
+ }
+ };
+ auto store_to_stack = [&](Reg r, Val v) {
+ SkASSERT(next_stack_slot < nstack_slots);
+ stack_slot[v] = next_stack_slot++;
+ a->strq(r, A::sp, stack_slot[v]);
+ };
+ #endif
+
+ *registers_used = 0; // We'll update this as we go.
+
+ if (std::size(arg) < fImpl->strides.size()) {
+ return false;
+ }
+
+ auto emit = [&](Val id, bool scalar) {
+ const int active_lanes = scalar ? 1 : K;
+ const OptimizedInstruction& inst = instructions[id];
+ const Op op = inst.op;
+ const Val x = inst.x,
+ y = inst.y,
+ z = inst.z,
+ w = inst.w;
+ const int immA = inst.immA,
+ immB = inst.immB,
+ immC = inst.immC;
+
+ // alloc_tmp() returns the first of N adjacent temporary registers,
+ // each freed manually with free_tmp() or noted as our result with mark_tmp_as_dst().
+ auto alloc_tmp = [&](int N=1) -> Reg {
+ auto needs_spill = [&](Val v) -> bool {
+ SkASSERT(v >= 0); // {NA,TMP,RES} need to be handled before calling this.
+ return stack_slot[v] == NA // We haven't spilled it already?
+ && instructions[v].op != Op::splat; // No need to spill constants.
+ };
+
+ // We want to find a block of N adjacent registers requiring the fewest spills.
+ int best_block = -1,
+ min_spills = 0x7fff'ffff;
+ for (int block = 0; block+N <= (int)regs.size(); block++) {
+ int spills = 0;
+ for (int r = block; r < block+N; r++) {
+ Val v = regs[r];
+ // Registers holding NA (nothing) are ideal, nothing to spill.
+ if (v == NA) {
+ continue;
+ }
+ // We can't spill anything REServed or that we'll need this instruction.
+ if (v == RES ||
+ v == TMP || v == id || v == x || v == y || v == z || v == w) {
+ spills = 0x7fff'ffff;
+ block = r; // (optimization) continue outer loop at next register.
+ break;
+ }
+ // Usually here we've got a value v that we'd have to spill to the stack
+ // before reusing its register, but sometimes even now we get a freebie.
+ spills += needs_spill(v) ? 1 : 0;
+ }
+
+ // TODO: non-arbitrary tie-breaking?
+ if (min_spills > spills) {
+ min_spills = spills;
+ best_block = block;
+ }
+ if (min_spills == 0) {
+ break; // (optimization) stop early if we find an unbeatable block.
+ }
+ }
+
+ // TODO: our search's success isn't obviously guaranteed... it depends on N
+ // and the number and relative position in regs of any unspillable values.
+ // I think we should be able to get away with N≤2 on x86-64 and N≤4 on arm64;
+ // we'll need to revisit this logic should this assert fire.
+ SkASSERT(min_spills <= N);
+
+ // Spill what needs spilling, and mark the block all as TMP.
+ for (int r = best_block; r < best_block+N; r++) {
+ Val& v = regs[r];
+ *registers_used |= (1<<r);
+
+ SkASSERT(v == NA || v >= 0);
+ if (v >= 0 && needs_spill(v)) {
+ store_to_stack((Reg)r, v);
+ SkASSERT(!needs_spill(v));
+ min_spills--;
+ }
+
+ v = TMP;
+ }
+ SkASSERT(min_spills == 0);
+ return (Reg)best_block;
+ };
+
+ auto free_tmp = [&](Reg r) {
+ SkASSERT(regs[r] == TMP);
+ regs[r] = NA;
+ };
+
+ // Which register holds dst,x,y,z,w for this instruction? NA if none does yet.
+ int rd = NA,
+ rx = NA,
+ ry = NA,
+ rz = NA,
+ rw = NA;
+
+ auto update_regs = [&](Reg r, Val v) {
+ if (v == id) { rd = r; }
+ if (v == x) { rx = r; }
+ if (v == y) { ry = r; }
+ if (v == z) { rz = r; }
+ if (v == w) { rw = r; }
+ return r;
+ };
+
+ auto find_existing_reg = [&](Val v) -> int {
+ // Quick-check our working registers.
+ if (v == id && rd != NA) { return rd; }
+ if (v == x && rx != NA) { return rx; }
+ if (v == y && ry != NA) { return ry; }
+ if (v == z && rz != NA) { return rz; }
+ if (v == w && rw != NA) { return rw; }
+
+ // Search inter-instruction register map.
+ for (auto [r,val] : SkMakeEnumerate(regs)) {
+ if (val == v) {
+ return update_regs((Reg)r, v);
+ }
+ }
+ return NA;
+ };
+
+ // Return a register for Val, holding that value if it already exists.
+ // During this instruction all calls to r(v) will return the same register.
+ auto r = [&](Val v) -> Reg {
+ SkASSERT(v >= 0);
+
+ if (int found = find_existing_reg(v); found != NA) {
+ return (Reg)found;
+ }
+
+ Reg r = alloc_tmp();
+ SkASSERT(regs[r] == TMP);
+
+ SkASSERT(v <= id);
+ if (v < id) {
+ // If v < id, we're loading one of this instruction's inputs.
+ // If v == id we're just allocating its destination register.
+ load_from_memory(r, v);
+ }
+ regs[r] = v;
+ return update_regs(r, v);
+ };
+
+ auto dies_here = [&](Val v) -> bool {
+ SkASSERT(v >= 0);
+ return instructions[v].death == id;
+ };
+
+ // Alias dst() to r(v) if dies_here(v).
+ auto try_alias = [&](Val v) -> bool {
+ SkASSERT(v == x || v == y || v == z || v == w);
+ if (dies_here(v)) {
+ rd = r(v); // Vals v and id share a register for this instruction.
+ regs[rd] = id; // Next instruction, Val id will be in the register, not Val v.
+ return true;
+ }
+ return false;
+ };
+
+ // Generally r(id),
+ // but with a hint, try to alias dst() to r(v) if dies_here(v).
+ auto dst = [&](Val hint1 = NA, Val hint2 = NA) -> Reg {
+ if (hint1 != NA && try_alias(hint1)) { return r(id); }
+ if (hint2 != NA && try_alias(hint2)) { return r(id); }
+ return r(id);
+ };
+
+ #if defined(__aarch64__) // Nothing sneaky, just unused on x86-64.
+ auto mark_tmp_as_dst = [&](Reg tmp) {
+ SkASSERT(regs[tmp] == TMP);
+ rd = tmp;
+ regs[rd] = id;
+ SkASSERT(dst() == tmp);
+ };
+ #endif
+
+ #if defined(__x86_64__) || defined(_M_X64)
+ // On x86 we can work with many values directly from the stack or program constant pool.
+ auto any = [&](Val v) -> A::Operand {
+ SkASSERT(v >= 0);
+ SkASSERT(v < id);
+
+ if (int found = find_existing_reg(v); found != NA) {
+ return (Reg)found;
+ }
+ if (instructions[v].op == Op::splat) {
+ return constants.find(instructions[v].immA);
+ }
+ return A::Mem{A::rsp, stack_slot[v]*K*4};
+ };
+
+ // This is never really worth asking except when any() might be used;
+ // if we need this value in ARM, might as well just call r(v) to get it into a register.
+ auto in_reg = [&](Val v) -> bool {
+ return find_existing_reg(v) != NA;
+ };
+ #endif
+
+ switch (op) {
+ // Make sure splat constants can be found by load_from_memory() or any().
+ case Op::splat:
+ (void)constants[immA];
+ break;
+
+ #if defined(__x86_64__) || defined(_M_X64)
+ case Op::assert_true: {
+ a->vptest (r(x), &constants[0xffffffff]);
+ A::Label all_true;
+ a->jc(&all_true);
+ a->int3();
+ a->label(&all_true);
+ } break;
+
+ case Op::trace_line:
+ case Op::trace_var:
+ case Op::trace_enter:
+ case Op::trace_exit:
+ case Op::trace_scope:
+ /* Force this program to run in the interpreter. */
+ return false;
+
+ case Op::store8:
+ if (scalar) {
+ a->vpextrb(A::Mem{arg[immA]}, (A::Xmm)r(x), 0);
+ } else {
+ a->vpackusdw(dst(x), r(x), r(x));
+ a->vpermq (dst(), dst(), 0xd8);
+ a->vpackuswb(dst(), dst(), dst());
+ a->vmovq (A::Mem{arg[immA]}, (A::Xmm)dst());
+ } break;
+
+ case Op::store16:
+ if (scalar) {
+ a->vpextrw(A::Mem{arg[immA]}, (A::Xmm)r(x), 0);
+ } else {
+ a->vpackusdw(dst(x), r(x), r(x));
+ a->vpermq (dst(), dst(), 0xd8);
+ a->vmovups (A::Mem{arg[immA]}, (A::Xmm)dst());
+ } break;
+
+ case Op::store32: if (scalar) { a->vmovd (A::Mem{arg[immA]}, (A::Xmm)r(x)); }
+ else { a->vmovups(A::Mem{arg[immA]}, r(x)); }
+ break;
+
+ case Op::store64: if (scalar) {
+ a->vmovd(A::Mem{arg[immA],0}, (A::Xmm)r(x));
+ a->vmovd(A::Mem{arg[immA],4}, (A::Xmm)r(y));
+ } else {
+ // r(x) = {a,b,c,d|e,f,g,h}
+ // r(y) = {i,j,k,l|m,n,o,p}
+ // We want to write a,i,b,j,c,k,d,l,e,m...
+ A::Ymm L = alloc_tmp(),
+ H = alloc_tmp();
+ a->vpunpckldq(L, r(x), any(y)); // L = {a,i,b,j|e,m,f,n}
+ a->vpunpckhdq(H, r(x), any(y)); // H = {c,k,d,l|g,o,h,p}
+ a->vperm2f128(dst(), L,H, 0x20); // = {a,i,b,j|c,k,d,l}
+ a->vmovups(A::Mem{arg[immA], 0}, dst());
+ a->vperm2f128(dst(), L,H, 0x31); // = {e,m,f,n|g,o,h,p}
+ a->vmovups(A::Mem{arg[immA],32}, dst());
+ free_tmp(L);
+ free_tmp(H);
+ } break;
+
+ case Op::store128: {
+ // TODO: >32-bit stores
+ a->vmovd (A::Mem{arg[immA], 0*16 + 0}, (A::Xmm)r(x) );
+ a->vmovd (A::Mem{arg[immA], 0*16 + 4}, (A::Xmm)r(y) );
+ a->vmovd (A::Mem{arg[immA], 0*16 + 8}, (A::Xmm)r(z) );
+ a->vmovd (A::Mem{arg[immA], 0*16 + 12}, (A::Xmm)r(w) );
+ if (scalar) { break; }
+
+ a->vpextrd(A::Mem{arg[immA], 1*16 + 0}, (A::Xmm)r(x), 1);
+ a->vpextrd(A::Mem{arg[immA], 1*16 + 4}, (A::Xmm)r(y), 1);
+ a->vpextrd(A::Mem{arg[immA], 1*16 + 8}, (A::Xmm)r(z), 1);
+ a->vpextrd(A::Mem{arg[immA], 1*16 + 12}, (A::Xmm)r(w), 1);
+
+ a->vpextrd(A::Mem{arg[immA], 2*16 + 0}, (A::Xmm)r(x), 2);
+ a->vpextrd(A::Mem{arg[immA], 2*16 + 4}, (A::Xmm)r(y), 2);
+ a->vpextrd(A::Mem{arg[immA], 2*16 + 8}, (A::Xmm)r(z), 2);
+ a->vpextrd(A::Mem{arg[immA], 2*16 + 12}, (A::Xmm)r(w), 2);
+
+ a->vpextrd(A::Mem{arg[immA], 3*16 + 0}, (A::Xmm)r(x), 3);
+ a->vpextrd(A::Mem{arg[immA], 3*16 + 4}, (A::Xmm)r(y), 3);
+ a->vpextrd(A::Mem{arg[immA], 3*16 + 8}, (A::Xmm)r(z), 3);
+ a->vpextrd(A::Mem{arg[immA], 3*16 + 12}, (A::Xmm)r(w), 3);
+ // Now we need to store the upper 128 bits of x,y,z,w.
+ // Storing in this order rather than interlacing minimizes temporaries.
+ a->vextracti128(dst(), r(x), 1);
+ a->vmovd (A::Mem{arg[immA], 4*16 + 0}, (A::Xmm)dst() );
+ a->vpextrd(A::Mem{arg[immA], 5*16 + 0}, (A::Xmm)dst(), 1);
+ a->vpextrd(A::Mem{arg[immA], 6*16 + 0}, (A::Xmm)dst(), 2);
+ a->vpextrd(A::Mem{arg[immA], 7*16 + 0}, (A::Xmm)dst(), 3);
+
+ a->vextracti128(dst(), r(y), 1);
+ a->vmovd (A::Mem{arg[immA], 4*16 + 4}, (A::Xmm)dst() );
+ a->vpextrd(A::Mem{arg[immA], 5*16 + 4}, (A::Xmm)dst(), 1);
+ a->vpextrd(A::Mem{arg[immA], 6*16 + 4}, (A::Xmm)dst(), 2);
+ a->vpextrd(A::Mem{arg[immA], 7*16 + 4}, (A::Xmm)dst(), 3);
+
+ a->vextracti128(dst(), r(z), 1);
+ a->vmovd (A::Mem{arg[immA], 4*16 + 8}, (A::Xmm)dst() );
+ a->vpextrd(A::Mem{arg[immA], 5*16 + 8}, (A::Xmm)dst(), 1);
+ a->vpextrd(A::Mem{arg[immA], 6*16 + 8}, (A::Xmm)dst(), 2);
+ a->vpextrd(A::Mem{arg[immA], 7*16 + 8}, (A::Xmm)dst(), 3);
+
+ a->vextracti128(dst(), r(w), 1);
+ a->vmovd (A::Mem{arg[immA], 4*16 + 12}, (A::Xmm)dst() );
+ a->vpextrd(A::Mem{arg[immA], 5*16 + 12}, (A::Xmm)dst(), 1);
+ a->vpextrd(A::Mem{arg[immA], 6*16 + 12}, (A::Xmm)dst(), 2);
+ a->vpextrd(A::Mem{arg[immA], 7*16 + 12}, (A::Xmm)dst(), 3);
+ } break;
+
+ case Op::load8: if (scalar) {
+ a->vpxor (dst(), dst(), dst());
+ a->vpinsrb((A::Xmm)dst(), (A::Xmm)dst(), A::Mem{arg[immA]}, 0);
+ } else {
+ a->vpmovzxbd(dst(), A::Mem{arg[immA]});
+ } break;
+
+ case Op::load16: if (scalar) {
+ a->vpxor (dst(), dst(), dst());
+ a->vpinsrw((A::Xmm)dst(), (A::Xmm)dst(), A::Mem{arg[immA]}, 0);
+ } else {
+ a->vpmovzxwd(dst(), A::Mem{arg[immA]});
+ } break;
+
+ case Op::load32: if (scalar) { a->vmovd ((A::Xmm)dst(), A::Mem{arg[immA]}); }
+ else { a->vmovups( dst(), A::Mem{arg[immA]}); }
+ break;
+
+ case Op::load64: if (scalar) {
+ a->vmovd((A::Xmm)dst(), A::Mem{arg[immA], 4*immB});
+ } else {
+ A::Ymm tmp = alloc_tmp();
+ a->vmovups(tmp, &load64_index);
+ a->vpermps(dst(), tmp, A::Mem{arg[immA], 0});
+ a->vpermps( tmp, tmp, A::Mem{arg[immA], 32});
+ // Low 128 bits holds immB=0 lanes, high 128 bits holds immB=1.
+ a->vperm2f128(dst(), dst(),tmp, immB ? 0x31 : 0x20);
+ free_tmp(tmp);
+ } break;
+
+ case Op::load128: if (scalar) {
+ a->vmovd((A::Xmm)dst(), A::Mem{arg[immA], 4*immB});
+ } else {
+ // Load 4 low values into xmm tmp,
+ A::Ymm tmp = alloc_tmp();
+ A::Xmm t = (A::Xmm)tmp;
+ a->vmovd (t, A::Mem{arg[immA], 0*16 + 4*immB} );
+ a->vpinsrd(t,t, A::Mem{arg[immA], 1*16 + 4*immB}, 1);
+ a->vpinsrd(t,t, A::Mem{arg[immA], 2*16 + 4*immB}, 2);
+ a->vpinsrd(t,t, A::Mem{arg[immA], 3*16 + 4*immB}, 3);
+
+ // Load 4 high values into xmm dst(),
+ A::Xmm d = (A::Xmm)dst();
+ a->vmovd (d, A::Mem{arg[immA], 4*16 + 4*immB} );
+ a->vpinsrd(d,d, A::Mem{arg[immA], 5*16 + 4*immB}, 1);
+ a->vpinsrd(d,d, A::Mem{arg[immA], 6*16 + 4*immB}, 2);
+ a->vpinsrd(d,d, A::Mem{arg[immA], 7*16 + 4*immB}, 3);
+
+ // Merge the two, ymm dst() = {xmm tmp|xmm dst()}
+ a->vperm2f128(dst(), tmp,dst(), 0x20);
+ free_tmp(tmp);
+ } break;
+
+ case Op::gather8: {
+ // As usual, the gather base pointer is immB bytes off of uniform immA.
+ a->mov(GP0, A::Mem{arg[immA], immB});
+
+ A::Ymm tmp = alloc_tmp();
+ a->vmovups(tmp, any(x));
+
+ for (int i = 0; i < active_lanes; i++) {
+ if (i == 4) {
+ // vpextrd can only pluck indices out from an Xmm register,
+ // so we manually swap over to the top when we're halfway through.
+ a->vextracti128((A::Xmm)tmp, tmp, 1);
+ }
+ a->vpextrd(GP1, (A::Xmm)tmp, i%4);
+ a->vpinsrb((A::Xmm)dst(), (A::Xmm)dst(), A::Mem{GP0,0,GP1,A::ONE}, i);
+ }
+ a->vpmovzxbd(dst(), dst());
+ free_tmp(tmp);
+ } break;
+
+ case Op::gather16: {
+ // Just as gather8 except vpinsrb->vpinsrw, ONE->TWO, and vpmovzxbd->vpmovzxwd.
+ a->mov(GP0, A::Mem{arg[immA], immB});
+
+ A::Ymm tmp = alloc_tmp();
+ a->vmovups(tmp, any(x));
+
+ for (int i = 0; i < active_lanes; i++) {
+ if (i == 4) {
+ a->vextracti128((A::Xmm)tmp, tmp, 1);
+ }
+ a->vpextrd(GP1, (A::Xmm)tmp, i%4);
+ a->vpinsrw((A::Xmm)dst(), (A::Xmm)dst(), A::Mem{GP0,0,GP1,A::TWO}, i);
+ }
+ a->vpmovzxwd(dst(), dst());
+ free_tmp(tmp);
+ } break;
+
+ case Op::gather32:
+ if (scalar) {
+ // Our gather base pointer is immB bytes off of uniform immA.
+ a->mov(GP0, A::Mem{arg[immA], immB});
+
+ // Grab our index from lane 0 of the index argument.
+ a->vmovd(GP1, (A::Xmm)r(x));
+
+ // dst = *(base + 4*index)
+ a->vmovd((A::Xmm)dst(x), A::Mem{GP0, 0, GP1, A::FOUR});
+ } else {
+ a->mov(GP0, A::Mem{arg[immA], immB});
+
+ A::Ymm mask = alloc_tmp();
+ a->vpcmpeqd(mask, mask, mask); // (All lanes enabled.)
+
+ a->vgatherdps(dst(), A::FOUR, r(x), GP0, mask);
+ free_tmp(mask);
+ }
+ break;
+
+ case Op::uniform32: a->vbroadcastss(dst(), A::Mem{arg[immA], immB});
+ break;
+
+ case Op::array32: a->mov(GP0, A::Mem{arg[immA], immB});
+ a->vbroadcastss(dst(), A::Mem{GP0, immC});
+ break;
+
+ case Op::index: a->vmovd((A::Xmm)dst(), N);
+ a->vbroadcastss(dst(), dst());
+ a->vpsubd(dst(), dst(), &iota);
+ break;
+
+ // We can swap the arguments of symmetric instructions to make better use of any().
+ case Op::add_f32:
+ if (in_reg(x)) { a->vaddps(dst(x), r(x), any(y)); }
+ else { a->vaddps(dst(y), r(y), any(x)); }
+ break;
+
+ case Op::mul_f32:
+ if (in_reg(x)) { a->vmulps(dst(x), r(x), any(y)); }
+ else { a->vmulps(dst(y), r(y), any(x)); }
+ break;
+
+ case Op::sub_f32: a->vsubps(dst(x), r(x), any(y)); break;
+ case Op::div_f32: a->vdivps(dst(x), r(x), any(y)); break;
+ case Op::min_f32: a->vminps(dst(y), r(y), any(x)); break; // Order matters,
+ case Op::max_f32: a->vmaxps(dst(y), r(y), any(x)); break; // see test SkVM_min_max.
+
+ case Op::fma_f32:
+ if (try_alias(x)) { a->vfmadd132ps(dst(x), r(z), any(y)); } else
+ if (try_alias(y)) { a->vfmadd213ps(dst(y), r(x), any(z)); } else
+ if (try_alias(z)) { a->vfmadd231ps(dst(z), r(x), any(y)); } else
+ { a->vmovups (dst(), any(x));
+ a->vfmadd132ps(dst(), r(z), any(y)); }
+ break;
+
+ case Op::fms_f32:
+ if (try_alias(x)) { a->vfmsub132ps(dst(x), r(z), any(y)); } else
+ if (try_alias(y)) { a->vfmsub213ps(dst(y), r(x), any(z)); } else
+ if (try_alias(z)) { a->vfmsub231ps(dst(z), r(x), any(y)); } else
+ { a->vmovups (dst(), any(x));
+ a->vfmsub132ps(dst(), r(z), any(y)); }
+ break;
+
+ case Op::fnma_f32:
+ if (try_alias(x)) { a->vfnmadd132ps(dst(x), r(z), any(y)); } else
+ if (try_alias(y)) { a->vfnmadd213ps(dst(y), r(x), any(z)); } else
+ if (try_alias(z)) { a->vfnmadd231ps(dst(z), r(x), any(y)); } else
+ { a->vmovups (dst(), any(x));
+ a->vfnmadd132ps(dst(), r(z), any(y)); }
+ break;
+
+ // In situations like this we want to try aliasing dst(x) when x is
+ // already in a register, but not if we'd have to load it from the stack
+ // just to alias it. That's done better directly into the new register.
+ case Op::sqrt_f32:
+ if (in_reg(x)) { a->vsqrtps(dst(x), r(x)); }
+ else { a->vsqrtps(dst(), any(x)); }
+ break;
+
+ case Op::add_i32:
+ if (in_reg(x)) { a->vpaddd(dst(x), r(x), any(y)); }
+ else { a->vpaddd(dst(y), r(y), any(x)); }
+ break;
+
+ case Op::mul_i32:
+ if (in_reg(x)) { a->vpmulld(dst(x), r(x), any(y)); }
+ else { a->vpmulld(dst(y), r(y), any(x)); }
+ break;
+
+ case Op::sub_i32: a->vpsubd(dst(x), r(x), any(y)); break;
+
+ case Op::bit_and:
+ if (in_reg(x)) { a->vpand(dst(x), r(x), any(y)); }
+ else { a->vpand(dst(y), r(y), any(x)); }
+ break;
+ case Op::bit_or:
+ if (in_reg(x)) { a->vpor(dst(x), r(x), any(y)); }
+ else { a->vpor(dst(y), r(y), any(x)); }
+ break;
+ case Op::bit_xor:
+ if (in_reg(x)) { a->vpxor(dst(x), r(x), any(y)); }
+ else { a->vpxor(dst(y), r(y), any(x)); }
+ break;
+
+ case Op::bit_clear: a->vpandn(dst(y), r(y), any(x)); break; // Notice, y then x.
+
+ case Op::select:
+ if (try_alias(z)) { a->vpblendvb(dst(z), r(z), any(y), r(x)); }
+ else { a->vpblendvb(dst(x), r(z), any(y), r(x)); }
+ break;
+
+ case Op::shl_i32: a->vpslld(dst(x), r(x), immA); break;
+ case Op::shr_i32: a->vpsrld(dst(x), r(x), immA); break;
+ case Op::sra_i32: a->vpsrad(dst(x), r(x), immA); break;
+
+ case Op::eq_i32:
+ if (in_reg(x)) { a->vpcmpeqd(dst(x), r(x), any(y)); }
+ else { a->vpcmpeqd(dst(y), r(y), any(x)); }
+ break;
+
+ case Op::gt_i32: a->vpcmpgtd(dst(), r(x), any(y)); break;
+
+ case Op::eq_f32:
+ if (in_reg(x)) { a->vcmpeqps(dst(x), r(x), any(y)); }
+ else { a->vcmpeqps(dst(y), r(y), any(x)); }
+ break;
+ case Op::neq_f32:
+ if (in_reg(x)) { a->vcmpneqps(dst(x), r(x), any(y)); }
+ else { a->vcmpneqps(dst(y), r(y), any(x)); }
+ break;
+
+ case Op:: gt_f32: a->vcmpltps (dst(y), r(y), any(x)); break;
+ case Op::gte_f32: a->vcmpleps (dst(y), r(y), any(x)); break;
+
+ case Op::ceil:
+ if (in_reg(x)) { a->vroundps(dst(x), r(x), Assembler::CEIL); }
+ else { a->vroundps(dst(), any(x), Assembler::CEIL); }
+ break;
+
+ case Op::floor:
+ if (in_reg(x)) { a->vroundps(dst(x), r(x), Assembler::FLOOR); }
+ else { a->vroundps(dst(), any(x), Assembler::FLOOR); }
+ break;
+
+ case Op::to_f32:
+ if (in_reg(x)) { a->vcvtdq2ps(dst(x), r(x)); }
+ else { a->vcvtdq2ps(dst(), any(x)); }
+ break;
+
+ case Op::trunc:
+ if (in_reg(x)) { a->vcvttps2dq(dst(x), r(x)); }
+ else { a->vcvttps2dq(dst(), any(x)); }
+ break;
+
+ case Op::round:
+ if (in_reg(x)) { a->vcvtps2dq(dst(x), r(x)); }
+ else { a->vcvtps2dq(dst(), any(x)); }
+ break;
+
+ case Op::to_fp16:
+ a->vcvtps2ph(dst(x), r(x), A::CURRENT); // f32 ymm -> f16 xmm
+ a->vpmovzxwd(dst(), dst()); // f16 xmm -> f16 ymm
+ break;
+
+ case Op::from_fp16:
+ a->vpackusdw(dst(x), r(x), r(x)); // f16 ymm -> f16 xmm
+ a->vpermq (dst(), dst(), 0xd8); // swap middle two 64-bit lanes
+ a->vcvtph2ps(dst(), dst()); // f16 xmm -> f32 ymm
+ break;
+
+ case Op::duplicate: break;
+
+ #elif defined(__aarch64__)
+ case Op::assert_true: {
+ a->uminv4s(dst(), r(x)); // uminv acts like an all() across the vector.
+ a->movs(GP0, dst(), 0);
+ A::Label all_true;
+ a->cbnz(GP0, &all_true);
+ a->brk(0);
+ a->label(&all_true);
+ } break;
+
+ case Op::trace_line:
+ case Op::trace_var:
+ case Op::trace_enter:
+ case Op::trace_exit:
+ case Op::trace_scope:
+ /* Force this program to run in the interpreter. */
+ return false;
+
+ case Op::index: {
+ A::V tmp = alloc_tmp();
+ a->ldrq (tmp, &iota);
+ a->dup4s(dst(), N);
+ a->sub4s(dst(), dst(), tmp);
+ free_tmp(tmp);
+ } break;
+
+ case Op::store8: a->xtns2h(dst(x), r(x));
+ a->xtnh2b(dst(), dst());
+ if (scalar) { a->strb (dst(), arg[immA]); }
+ else { a->strs (dst(), arg[immA]); }
+ break;
+
+ case Op::store16: a->xtns2h(dst(x), r(x));
+ if (scalar) { a->strh (dst(), arg[immA]); }
+ else { a->strd (dst(), arg[immA]); }
+ break;
+
+ case Op::store32: if (scalar) { a->strs(r(x), arg[immA]); }
+ else { a->strq(r(x), arg[immA]); }
+ break;
+
+ case Op::store64: if (scalar) {
+ a->strs(r(x), arg[immA], 0);
+ a->strs(r(y), arg[immA], 1);
+ } else if (r(y) == r(x)+1) {
+ a->st24s(r(x), arg[immA]);
+ } else {
+ Reg tmp0 = alloc_tmp(2),
+ tmp1 = (Reg)(tmp0+1);
+ a->orr16b(tmp0, r(x), r(x));
+ a->orr16b(tmp1, r(y), r(y));
+ a-> st24s(tmp0, arg[immA]);
+ free_tmp(tmp0);
+ free_tmp(tmp1);
+ } break;
+
+ case Op::store128:
+ if (scalar) {
+ a->strs(r(x), arg[immA], 0);
+ a->strs(r(y), arg[immA], 1);
+ a->strs(r(z), arg[immA], 2);
+ a->strs(r(w), arg[immA], 3);
+ } else if (r(y) == r(x)+1 &&
+ r(z) == r(x)+2 &&
+ r(w) == r(x)+3) {
+ a->st44s(r(x), arg[immA]);
+ } else {
+ Reg tmp0 = alloc_tmp(4),
+ tmp1 = (Reg)(tmp0+1),
+ tmp2 = (Reg)(tmp0+2),
+ tmp3 = (Reg)(tmp0+3);
+ a->orr16b(tmp0, r(x), r(x));
+ a->orr16b(tmp1, r(y), r(y));
+ a->orr16b(tmp2, r(z), r(z));
+ a->orr16b(tmp3, r(w), r(w));
+ a-> st44s(tmp0, arg[immA]);
+ free_tmp(tmp0);
+ free_tmp(tmp1);
+ free_tmp(tmp2);
+ free_tmp(tmp3);
+ } break;
+
+
+ case Op::load8: if (scalar) { a->ldrb(dst(), arg[immA]); }
+ else { a->ldrs(dst(), arg[immA]); }
+ a->uxtlb2h(dst(), dst());
+ a->uxtlh2s(dst(), dst());
+ break;
+
+ case Op::load16: if (scalar) { a->ldrh(dst(), arg[immA]); }
+ else { a->ldrd(dst(), arg[immA]); }
+ a->uxtlh2s(dst(), dst());
+ break;
+
+ case Op::load32: if (scalar) { a->ldrs(dst(), arg[immA]); }
+ else { a->ldrq(dst(), arg[immA]); }
+ break;
+
+ case Op::load64: if (scalar) {
+ a->ldrs(dst(), arg[immA], immB);
+ } else {
+ Reg tmp0 = alloc_tmp(2),
+ tmp1 = (Reg)(tmp0+1);
+ a->ld24s(tmp0, arg[immA]);
+ // TODO: return both
+ switch (immB) {
+ case 0: mark_tmp_as_dst(tmp0); free_tmp(tmp1); break;
+ case 1: mark_tmp_as_dst(tmp1); free_tmp(tmp0); break;
+ }
+ } break;
+
+ case Op::load128: if (scalar) {
+ a->ldrs(dst(), arg[immA], immB);
+ } else {
+ Reg tmp0 = alloc_tmp(4),
+ tmp1 = (Reg)(tmp0+1),
+ tmp2 = (Reg)(tmp0+2),
+ tmp3 = (Reg)(tmp0+3);
+ a->ld44s(tmp0, arg[immA]);
+ // TODO: return all four
+ switch (immB) {
+ case 0: mark_tmp_as_dst(tmp0); break;
+ case 1: mark_tmp_as_dst(tmp1); break;
+ case 2: mark_tmp_as_dst(tmp2); break;
+ case 3: mark_tmp_as_dst(tmp3); break;
+ }
+ if (immB != 0) { free_tmp(tmp0); }
+ if (immB != 1) { free_tmp(tmp1); }
+ if (immB != 2) { free_tmp(tmp2); }
+ if (immB != 3) { free_tmp(tmp3); }
+ } break;
+
+ case Op::uniform32: a->add(GP0, arg[immA], immB);
+ a->ld1r4s(dst(), GP0);
+ break;
+
+ case Op::array32: a->add(GP0, arg[immA], immB);
+ a->ldrd(GP0, GP0);
+ a->add(GP0, GP0, immC);
+ a->ld1r4s(dst(), GP0);
+ break;
+
+ case Op::gather8: {
+ // As usual, the gather base pointer is immB bytes off of uniform immA.
+ a->add (GP0, arg[immA], immB); // GP0 = &(gather base pointer)
+ a->ldrd(GP0, GP0); // GP0 = gather base pointer
+
+ for (int i = 0; i < active_lanes; i++) {
+ a->movs(GP1, r(x), i); // Extract index lane i into GP1.
+ a->add (GP1, GP0, GP1); // Add the gather base pointer.
+ a->ldrb(GP1, GP1); // Load that byte.
+ a->inss(dst(x), GP1, i); // Insert it into dst() lane i.
+ }
+ } break;
+
+ // See gather8 for general idea; comments here only where gather16 differs.
+ case Op::gather16: {
+ a->add (GP0, arg[immA], immB);
+ a->ldrd(GP0, GP0);
+ for (int i = 0; i < active_lanes; i++) {
+ a->movs(GP1, r(x), i);
+ a->add (GP1, GP0, GP1, A::LSL, 1); // Scale index 2x into a byte offset.
+ a->ldrh(GP1, GP1); // 2-byte load.
+ a->inss(dst(x), GP1, i);
+ }
+ } break;
+
+ // See gather8 for general idea; comments here only where gather32 differs.
+ case Op::gather32: {
+ a->add (GP0, arg[immA], immB);
+ a->ldrd(GP0, GP0);
+ for (int i = 0; i < active_lanes; i++) {
+ a->movs(GP1, r(x), i);
+ a->add (GP1, GP0, GP1, A::LSL, 2); // Scale index 4x into a byte offset.
+ a->ldrs(GP1, GP1); // 4-byte load.
+ a->inss(dst(x), GP1, i);
+ }
+ } break;
+
+ case Op::add_f32: a->fadd4s(dst(x,y), r(x), r(y)); break;
+ case Op::sub_f32: a->fsub4s(dst(x,y), r(x), r(y)); break;
+ case Op::mul_f32: a->fmul4s(dst(x,y), r(x), r(y)); break;
+ case Op::div_f32: a->fdiv4s(dst(x,y), r(x), r(y)); break;
+
+ case Op::sqrt_f32: a->fsqrt4s(dst(x), r(x)); break;
+
+ case Op::fma_f32: // fmla.4s is z += x*y
+ if (try_alias(z)) { a->fmla4s( r(z), r(x), r(y)); }
+ else { a->orr16b(dst(), r(z), r(z));
+ a->fmla4s(dst(), r(x), r(y)); }
+ break;
+
+ case Op::fnma_f32: // fmls.4s is z -= x*y
+ if (try_alias(z)) { a->fmls4s( r(z), r(x), r(y)); }
+ else { a->orr16b(dst(), r(z), r(z));
+ a->fmls4s(dst(), r(x), r(y)); }
+ break;
+
+ case Op::fms_f32: // calculate z - xy, then negate to xy - z
+ if (try_alias(z)) { a->fmls4s( r(z), r(x), r(y)); }
+ else { a->orr16b(dst(), r(z), r(z));
+ a->fmls4s(dst(), r(x), r(y)); }
+ a->fneg4s(dst(), dst());
+ break;
+
+ case Op:: gt_f32: a->fcmgt4s (dst(x,y), r(x), r(y)); break;
+ case Op::gte_f32: a->fcmge4s (dst(x,y), r(x), r(y)); break;
+ case Op:: eq_f32: a->fcmeq4s (dst(x,y), r(x), r(y)); break;
+ case Op::neq_f32: a->fcmeq4s (dst(x,y), r(x), r(y));
+ a->not16b (dst(), dst()); break;
+
+
+ case Op::add_i32: a->add4s(dst(x,y), r(x), r(y)); break;
+ case Op::sub_i32: a->sub4s(dst(x,y), r(x), r(y)); break;
+ case Op::mul_i32: a->mul4s(dst(x,y), r(x), r(y)); break;
+
+ case Op::bit_and : a->and16b(dst(x,y), r(x), r(y)); break;
+ case Op::bit_or : a->orr16b(dst(x,y), r(x), r(y)); break;
+ case Op::bit_xor : a->eor16b(dst(x,y), r(x), r(y)); break;
+ case Op::bit_clear: a->bic16b(dst(x,y), r(x), r(y)); break;
+
+ case Op::select: // bsl16b is x = x ? y : z
+ if (try_alias(x)) { a->bsl16b( r(x), r(y), r(z)); }
+ else { a->orr16b(dst(), r(x), r(x));
+ a->bsl16b(dst(), r(y), r(z)); }
+ break;
+
+ // fmin4s and fmax4s don't work the way we want with NaN,
+ // so we write them the long way:
+ case Op::min_f32: // min(x,y) = y<x ? y : x
+ a->fcmgt4s(dst(), r(x), r(y));
+ a->bsl16b (dst(), r(y), r(x));
+ break;
+
+ case Op::max_f32: // max(x,y) = x<y ? y : x
+ a->fcmgt4s(dst(), r(y), r(x));
+ a->bsl16b (dst(), r(y), r(x));
+ break;
+
+ case Op::shl_i32: a-> shl4s(dst(x), r(x), immA); break;
+ case Op::shr_i32: a->ushr4s(dst(x), r(x), immA); break;
+ case Op::sra_i32: a->sshr4s(dst(x), r(x), immA); break;
+
+ case Op::eq_i32: a->cmeq4s(dst(x,y), r(x), r(y)); break;
+ case Op::gt_i32: a->cmgt4s(dst(x,y), r(x), r(y)); break;
+
+ case Op::to_f32: a->scvtf4s (dst(x), r(x)); break;
+ case Op::trunc: a->fcvtzs4s(dst(x), r(x)); break;
+ case Op::round: a->fcvtns4s(dst(x), r(x)); break;
+ case Op::ceil: a->frintp4s(dst(x), r(x)); break;
+ case Op::floor: a->frintm4s(dst(x), r(x)); break;
+
+ case Op::to_fp16:
+ a->fcvtn (dst(x), r(x)); // 4x f32 -> 4x f16 in bottom four lanes
+ a->uxtlh2s(dst(), dst()); // expand to 4x f16 in even 16-bit lanes
+ break;
+
+ case Op::from_fp16:
+ a->xtns2h(dst(x), r(x)); // pack even 16-bit lanes into bottom four lanes
+ a->fcvtl (dst(), dst()); // 4x f16 -> 4x f32
+ break;
+
+ case Op::duplicate: break;
+ #endif
+ }
+
+ // Proactively free the registers holding any value that dies here.
+ if (rd != NA && dies_here(regs[rd])) { regs[rd] = NA; }
+ if (rx != NA && regs[rx] != NA && dies_here(regs[rx])) { regs[rx] = NA; }
+ if (ry != NA && regs[ry] != NA && dies_here(regs[ry])) { regs[ry] = NA; }
+ if (rz != NA && regs[rz] != NA && dies_here(regs[rz])) { regs[rz] = NA; }
+ if (rw != NA && regs[rw] != NA && dies_here(regs[rw])) { regs[rw] = NA; }
+ return true;
+ };
+
+ #if defined(__x86_64__) || defined(_M_X64)
+ auto jump_if_less = [&](A::Label* l) { a->jl (l); };
+ auto jump = [&](A::Label* l) { a->jmp(l); };
+
+ auto add = [&](A::GP64 gp, int imm) { a->add(gp, imm); };
+ auto sub = [&](A::GP64 gp, int imm) { a->sub(gp, imm); };
+ #elif defined(__aarch64__)
+ auto jump_if_less = [&](A::Label* l) { a->blt(l); };
+ auto jump = [&](A::Label* l) { a->b (l); };
+
+ auto add = [&](A::X gp, int imm) { a->add(gp, gp, imm); };
+ auto sub = [&](A::X gp, int imm) { a->sub(gp, gp, imm); };
+ #endif
+
+ A::Label body,
+ tail,
+ done;
+
+ enter();
+ for (Val id = 0; id < (Val)instructions.size(); id++) {
+ if (fImpl->visualizer && is_trace(instructions[id].op)) {
+ // Make sure trace commands stay on JIT for visualizer
+ continue;
+ }
+ if (instructions[id].can_hoist && !emit(id, /*scalar=*/false)) {
+ return false;
+ }
+ }
+
+ // This point marks a kind of canonical fixed point for register contents: if loop
+ // code is generated as if these registers are holding these values, the next time
+ // the loop comes around we'd better find those same registers holding those same values.
+ auto restore_incoming_regs = [&,incoming=regs,saved_stack_slot=stack_slot,
+ saved_next_stack_slot=next_stack_slot]{
+ for (int r = 0; r < (int)regs.size(); r++) {
+ if (regs[r] != incoming[r]) {
+ regs[r] = incoming[r];
+ if (regs[r] >= 0) {
+ load_from_memory((Reg)r, regs[r]);
+ }
+ }
+ }
+ *stack_hint = std::max(*stack_hint, next_stack_slot);
+ stack_slot = saved_stack_slot;
+ next_stack_slot = saved_next_stack_slot;
+ };
+
+ a->label(&body);
+ {
+ a->cmp(N, K);
+ jump_if_less(&tail);
+ for (Val id = 0; id < (Val)instructions.size(); id++) {
+ if (fImpl->visualizer != nullptr && is_trace(instructions[id].op)) {
+ // Make sure trace commands stay on JIT for visualizer
+ continue;
+ }
+ if (!instructions[id].can_hoist && !emit(id, /*scalar=*/false)) {
+ return false;
+ }
+ }
+ restore_incoming_regs();
+ for (int i = 0; i < (int)fImpl->strides.size(); i++) {
+ if (fImpl->strides[i]) {
+ add(arg[i], K*fImpl->strides[i]);
+ }
+ }
+ sub(N, K);
+ jump(&body);
+ }
+
+ a->label(&tail);
+ {
+ a->cmp(N, 1);
+ jump_if_less(&done);
+ for (Val id = 0; id < (Val)instructions.size(); id++) {
+ if (fImpl->visualizer && is_trace(instructions[id].op)) {
+ // Make sure trace commands stay on JIT for visualizer
+ continue;
+ }
+ if (!instructions[id].can_hoist && !emit(id, /*scalar=*/true)) {
+ return false;
+ }
+ }
+ restore_incoming_regs();
+ for (int i = 0; i < (int)fImpl->strides.size(); i++) {
+ if (fImpl->strides[i]) {
+ add(arg[i], 1*fImpl->strides[i]);
+ }
+ }
+ sub(N, 1);
+ jump(&tail);
+ }
+
+ a->label(&done);
+ {
+ exit();
+ }
+
+ // On ARM64, we use immediate offsets to adjust the stack pointer, and those are limited to
+ // 12 bits. If our function is going to require more than 4k of stack, just fail. We could
+ // tweak the code that adjusts `sp`, but then we risk exceeding the (larger) immediate limit
+ // on our sp-relative load and store opcodes.
+ #if defined(__aarch64__)
+ const int stack_bytes = (*stack_hint) * K * 4;
+ if (stack_bytes > mask(12)) {
+ return false;
+ }
+ #endif
+
+ // Except for explicit aligned load and store instructions, AVX allows
+ // memory operands to be unaligned. So even though we're creating 16
+ // byte patterns on ARM or 32-byte patterns on x86, we only need to
+ // align to 4 bytes, the element size and alignment requirement.
+
+ constants.foreach([&](int imm, A::Label* label) {
+ a->align(4);
+ a->label(label);
+ for (int i = 0; i < K; i++) {
+ a->word(imm);
+ }
+ });
+
+ if (!iota.references.empty()) {
+ a->align(4);
+ a->label(&iota); // 0,1,2,3,4,...
+ for (int i = 0; i < K; i++) {
+ a->word(i);
+ }
+ }
+
+ if (!load64_index.references.empty()) {
+ a->align(4);
+ a->label(&load64_index); // {0,2,4,6|1,3,5,7}
+ a->word(0); a->word(2); a->word(4); a->word(6);
+ a->word(1); a->word(3); a->word(5); a->word(7);
+ }
+
+ return true;
+ }
+
+ void Program::setupJIT(const std::vector<OptimizedInstruction>& instructions,
+ const char* debug_name) {
+ // Assemble with no buffer to determine a.size() (the number of bytes we'll assemble)
+ // and stack_hint/registers_used to feed forward into the next jit() call.
+ Assembler a{nullptr};
+ int stack_hint = -1;
+ uint32_t registers_used = 0xffff'ffff; // Start conservatively with all.
+ if (!this->jit(instructions, &stack_hint, &registers_used, &a)) {
+ return;
+ }
+
+ fImpl->jit_size = a.size();
+ void* jit_entry = alloc_jit_buffer(&fImpl->jit_size);
+ fImpl->jit_entry.store(jit_entry);
+
+ // Assemble the program for real with stack_hint/registers_used as feedback from first call.
+ a = Assembler{jit_entry};
+ SkAssertResult(this->jit(instructions, &stack_hint, &registers_used, &a));
+ SkASSERT(a.size() <= fImpl->jit_size);
+
+ // Remap as executable, and flush caches on platforms that need that.
+ remap_as_executable(jit_entry, fImpl->jit_size);
+
+ #if !defined(SK_BUILD_FOR_WIN)
+ // For profiling and debugging, it's helpful to have this code loaded
+ // dynamically rather than just jumping info fImpl->jit_entry.
+ if (gSkVMJITViaDylib) {
+ // Dump the raw program binary.
+ SkString path = SkStringPrintf("/tmp/%s.XXXXXX", debug_name);
+ int fd = mkstemp(path.data());
+ ::write(fd, jit_entry, a.size());
+ close(fd);
+
+ this->dropJIT(); // (unmap and null out fImpl->jit_entry.)
+
+ // Convert it in-place to a dynamic library with a single symbol "skvm_jit":
+ SkString cmd = SkStringPrintf(
+ "echo '.global _skvm_jit\n_skvm_jit: .incbin \"%s\"'"
+ " | clang -x assembler -shared - -o %s",
+ path.c_str(), path.c_str());
+ #if defined(__aarch64__)
+ cmd.append(" -arch arm64");
+ #endif
+ system(cmd.c_str());
+
+ // Load that dynamic library and look up skvm_jit().
+ fImpl->dylib = dlopen(path.c_str(), RTLD_NOW|RTLD_LOCAL);
+ void* sym = nullptr;
+ for (const char* name : {"skvm_jit", "_skvm_jit"} ) {
+ if (!sym) { sym = dlsym(fImpl->dylib, name); }
+ }
+ fImpl->jit_entry.store(sym);
+ }
+ #endif
+ }
+
+ void Program::disassemble(SkWStream* o) const {
+ #if !defined(SK_BUILD_FOR_WIN)
+ SkDebugfStream debug;
+ if (!o) { o = &debug; }
+
+ const void* jit_entry = fImpl->jit_entry.load();
+ size_t jit_size = fImpl->jit_size;
+
+ if (!jit_entry) {
+ o->writeText("Program not JIT'd. Did you pass --jit?\n");
+ return;
+ }
+
+ char path[] = "/tmp/skvm-jit.XXXXXX";
+ int fd = mkstemp(path);
+ ::write(fd, jit_entry, jit_size);
+ close(fd);
+
+ // Convert it in-place to a dynamic library with a single symbol "skvm_jit":
+ SkString cmd = SkStringPrintf(
+ "echo '.global _skvm_jit\n_skvm_jit: .incbin \"%s\"'"
+ " | clang -x assembler -shared - -o %s",
+ path, path);
+ #if defined(__aarch64__)
+ cmd.append(" -arch arm64");
+ #endif
+ system(cmd.c_str());
+
+ // Now objdump to disassemble our function:
+ // TODO: We could trim this down to just our code using '--disassemble=<symbol name>`,
+ // but the symbol name varies with OS, and that option may be missing from objdump on some
+ // machines? There also apears to be quite a bit of junk after the end of the JIT'd code.
+ // Trimming that would let us pass '--visualize-jumps' and get the loop annotated.
+ // With the junk, we tend to end up with a bunch of stray jumps that pollute the ASCII art.
+ cmd = SkStringPrintf("objdump -D %s", path);
+ #if defined(SK_BUILD_FOR_UNIX)
+ cmd.append(" --section=.text");
+ #endif
+ FILE* fp = popen(cmd.c_str(), "r");
+ if (!fp) {
+ o->writeText("objdump failed\n");
+ return;
+ }
+
+ char line[1024];
+ while (fgets(line, sizeof(line), fp)) {
+ o->writeText(line);
+ }
+
+ pclose(fp);
+ #endif
+ }
+
+#endif
+
+} // namespace skvm
diff --git a/gfx/skia/skia/src/core/SkVM.h b/gfx/skia/skia/src/core/SkVM.h
new file mode 100644
index 0000000000..89fe090252
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkVM.h
@@ -0,0 +1,1369 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkVM_DEFINED
+#define SkVM_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkSpan.h"
+#include "include/private/base/SkMacros.h"
+#include "include/private/base/SkTArray.h"
+#include "src/core/SkTHash.h"
+#include "src/core/SkVM_fwd.h"
+#include <vector> // std::vector
+
+class SkWStream;
+
+#if defined(SKVM_JIT_WHEN_POSSIBLE) && !defined(SK_BUILD_FOR_IOS)
+ #if defined(__x86_64__) || defined(_M_X64)
+ #if defined(_WIN32) || defined(__linux) || defined(__APPLE__)
+ #define SKVM_JIT
+ #endif
+ #endif
+ #if defined(__aarch64__)
+ #if defined(__ANDROID__) || defined(__APPLE__)
+ #define SKVM_JIT
+ #endif
+ #endif
+#endif
+
+#if 0
+ #undef SKVM_JIT
+#endif
+
+namespace SkSL {
+class TraceHook;
+}
+
+namespace skvm {
+
+ namespace viz {
+ class Visualizer;
+ }
+
+ class Assembler {
+ public:
+ explicit Assembler(void* buf);
+
+ size_t size() const;
+
+ // Order matters... GP64, Xmm, Ymm values match 4-bit register encoding for each.
+ enum GP64 {
+ rax, rcx, rdx, rbx, rsp, rbp, rsi, rdi,
+ r8 , r9 , r10, r11, r12, r13, r14, r15,
+ };
+ enum Xmm {
+ xmm0, xmm1, xmm2 , xmm3 , xmm4 , xmm5 , xmm6 , xmm7 ,
+ xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,
+ };
+ enum Ymm {
+ ymm0, ymm1, ymm2 , ymm3 , ymm4 , ymm5 , ymm6 , ymm7 ,
+ ymm8, ymm9, ymm10, ymm11, ymm12, ymm13, ymm14, ymm15,
+ };
+
+ // X and V values match 5-bit encoding for each (nothing tricky).
+ enum X {
+ x0 , x1 , x2 , x3 , x4 , x5 , x6 , x7 ,
+ x8 , x9 , x10, x11, x12, x13, x14, x15,
+ x16, x17, x18, x19, x20, x21, x22, x23,
+ x24, x25, x26, x27, x28, x29, x30, xzr, sp=xzr,
+ };
+ enum V {
+ v0 , v1 , v2 , v3 , v4 , v5 , v6 , v7 ,
+ v8 , v9 , v10, v11, v12, v13, v14, v15,
+ v16, v17, v18, v19, v20, v21, v22, v23,
+ v24, v25, v26, v27, v28, v29, v30, v31,
+ };
+
+ void bytes(const void*, int);
+ void byte(uint8_t);
+ void word(uint32_t);
+
+ struct Label {
+ int offset = 0;
+ enum { NotYetSet, ARMDisp19, X86Disp32 } kind = NotYetSet;
+ SkSTArray<2, int> references;
+ };
+
+ // x86-64
+
+ void align(int mod);
+
+ void int3();
+ void vzeroupper();
+ void ret();
+
+ // Mem represents a value at base + disp + scale*index,
+ // or simply at base + disp if index=rsp.
+ enum Scale { ONE, TWO, FOUR, EIGHT };
+ struct Mem {
+ GP64 base;
+ int disp = 0;
+ GP64 index = rsp;
+ Scale scale = ONE;
+ };
+
+ struct Operand {
+ union {
+ int reg;
+ Mem mem;
+ Label* label;
+ };
+ enum { REG, MEM, LABEL } kind;
+
+ Operand(GP64 r) : reg (r), kind(REG ) {}
+ Operand(Xmm r) : reg (r), kind(REG ) {}
+ Operand(Ymm r) : reg (r), kind(REG ) {}
+ Operand(Mem m) : mem (m), kind(MEM ) {}
+ Operand(Label* l) : label(l), kind(LABEL) {}
+ };
+
+ void vpand (Ymm dst, Ymm x, Operand y);
+ void vpandn(Ymm dst, Ymm x, Operand y);
+ void vpor (Ymm dst, Ymm x, Operand y);
+ void vpxor (Ymm dst, Ymm x, Operand y);
+
+ void vpaddd (Ymm dst, Ymm x, Operand y);
+ void vpsubd (Ymm dst, Ymm x, Operand y);
+ void vpmulld(Ymm dst, Ymm x, Operand y);
+
+ void vpaddw (Ymm dst, Ymm x, Operand y);
+ void vpsubw (Ymm dst, Ymm x, Operand y);
+ void vpmullw (Ymm dst, Ymm x, Operand y);
+
+ void vpabsw (Ymm dst, Operand x);
+ void vpavgw (Ymm dst, Ymm x, Operand y); // dst = (x+y+1)>>1, unsigned.
+ void vpmulhrsw(Ymm dst, Ymm x, Operand y); // dst = (x*y + (1<<14)) >> 15, signed.
+ void vpminsw (Ymm dst, Ymm x, Operand y);
+ void vpminuw (Ymm dst, Ymm x, Operand y);
+ void vpmaxsw (Ymm dst, Ymm x, Operand y);
+ void vpmaxuw (Ymm dst, Ymm x, Operand y);
+
+ void vaddps(Ymm dst, Ymm x, Operand y);
+ void vsubps(Ymm dst, Ymm x, Operand y);
+ void vmulps(Ymm dst, Ymm x, Operand y);
+ void vdivps(Ymm dst, Ymm x, Operand y);
+ void vminps(Ymm dst, Ymm x, Operand y);
+ void vmaxps(Ymm dst, Ymm x, Operand y);
+
+ void vsqrtps(Ymm dst, Operand x);
+
+ void vfmadd132ps(Ymm dst, Ymm x, Operand y);
+ void vfmadd213ps(Ymm dst, Ymm x, Operand y);
+ void vfmadd231ps(Ymm dst, Ymm x, Operand y);
+
+ void vfmsub132ps(Ymm dst, Ymm x, Operand y);
+ void vfmsub213ps(Ymm dst, Ymm x, Operand y);
+ void vfmsub231ps(Ymm dst, Ymm x, Operand y);
+
+ void vfnmadd132ps(Ymm dst, Ymm x, Operand y);
+ void vfnmadd213ps(Ymm dst, Ymm x, Operand y);
+ void vfnmadd231ps(Ymm dst, Ymm x, Operand y);
+
+ void vpackusdw(Ymm dst, Ymm x, Operand y);
+ void vpackuswb(Ymm dst, Ymm x, Operand y);
+
+ void vpunpckldq(Ymm dst, Ymm x, Operand y);
+ void vpunpckhdq(Ymm dst, Ymm x, Operand y);
+
+ void vpcmpeqd(Ymm dst, Ymm x, Operand y);
+ void vpcmpgtd(Ymm dst, Ymm x, Operand y);
+ void vpcmpeqw(Ymm dst, Ymm x, Operand y);
+ void vpcmpgtw(Ymm dst, Ymm x, Operand y);
+
+ void vcmpps (Ymm dst, Ymm x, Operand y, int imm);
+ void vcmpeqps (Ymm dst, Ymm x, Operand y) { this->vcmpps(dst,x,y,0); }
+ void vcmpltps (Ymm dst, Ymm x, Operand y) { this->vcmpps(dst,x,y,1); }
+ void vcmpleps (Ymm dst, Ymm x, Operand y) { this->vcmpps(dst,x,y,2); }
+ void vcmpneqps(Ymm dst, Ymm x, Operand y) { this->vcmpps(dst,x,y,4); }
+
+ // Sadly, the x parameter cannot be a general Operand for these shifts.
+ void vpslld(Ymm dst, Ymm x, int imm);
+ void vpsrld(Ymm dst, Ymm x, int imm);
+ void vpsrad(Ymm dst, Ymm x, int imm);
+
+ void vpsllw(Ymm dst, Ymm x, int imm);
+ void vpsrlw(Ymm dst, Ymm x, int imm);
+ void vpsraw(Ymm dst, Ymm x, int imm);
+
+ void vpermq (Ymm dst, Operand x, int imm);
+ void vperm2f128(Ymm dst, Ymm x, Operand y, int imm);
+ void vpermps (Ymm dst, Ymm ix, Operand src); // dst[i] = src[ix[i]]
+
+ enum Rounding { NEAREST, FLOOR, CEIL, TRUNC, CURRENT };
+ void vroundps(Ymm dst, Operand x, Rounding);
+
+ void vmovdqa(Ymm dst, Operand x);
+ void vmovups(Ymm dst, Operand x);
+ void vmovups(Xmm dst, Operand x);
+ void vmovups(Operand dst, Ymm x);
+ void vmovups(Operand dst, Xmm x);
+
+ void vcvtdq2ps (Ymm dst, Operand x);
+ void vcvttps2dq(Ymm dst, Operand x);
+ void vcvtps2dq (Ymm dst, Operand x);
+
+ void vcvtps2ph(Operand dst, Ymm x, Rounding);
+ void vcvtph2ps(Ymm dst, Operand x);
+
+ void vpblendvb(Ymm dst, Ymm x, Operand y, Ymm z);
+
+ void vpshufb(Ymm dst, Ymm x, Operand y);
+
+ void vptest(Ymm x, Operand y);
+
+ void vbroadcastss(Ymm dst, Operand y);
+
+ void vpmovzxwd(Ymm dst, Operand src); // dst = src, 128-bit, uint16_t -> int
+ void vpmovzxbd(Ymm dst, Operand src); // dst = src, 64-bit, uint8_t -> int
+
+ void vmovq(Operand dst, Xmm src); // dst = src, 64-bit
+ void vmovd(Operand dst, Xmm src); // dst = src, 32-bit
+ void vmovd(Xmm dst, Operand src); // dst = src, 32-bit
+
+ void vpinsrd(Xmm dst, Xmm src, Operand y, int imm); // dst = src; dst[imm] = y, 32-bit
+ void vpinsrw(Xmm dst, Xmm src, Operand y, int imm); // dst = src; dst[imm] = y, 16-bit
+ void vpinsrb(Xmm dst, Xmm src, Operand y, int imm); // dst = src; dst[imm] = y, 8-bit
+
+ void vextracti128(Operand dst, Ymm src, int imm); // dst = src[imm], 128-bit
+ void vpextrd (Operand dst, Xmm src, int imm); // dst = src[imm], 32-bit
+ void vpextrw (Operand dst, Xmm src, int imm); // dst = src[imm], 16-bit
+ void vpextrb (Operand dst, Xmm src, int imm); // dst = src[imm], 8-bit
+
+ // if (mask & 0x8000'0000) {
+ // dst = base[scale*ix];
+ // }
+ // mask = 0;
+ void vgatherdps(Ymm dst, Scale scale, Ymm ix, GP64 base, Ymm mask);
+
+
+ void label(Label*);
+
+ void jmp(Label*);
+ void je (Label*);
+ void jne(Label*);
+ void jl (Label*);
+ void jc (Label*);
+
+ void add (Operand dst, int imm);
+ void sub (Operand dst, int imm);
+ void cmp (Operand dst, int imm);
+ void mov (Operand dst, int imm);
+ void movb(Operand dst, int imm);
+
+ void add (Operand dst, GP64 x);
+ void sub (Operand dst, GP64 x);
+ void cmp (Operand dst, GP64 x);
+ void mov (Operand dst, GP64 x);
+ void movb(Operand dst, GP64 x);
+
+ void add (GP64 dst, Operand x);
+ void sub (GP64 dst, Operand x);
+ void cmp (GP64 dst, Operand x);
+ void mov (GP64 dst, Operand x);
+ void movb(GP64 dst, Operand x);
+
+ // Disambiguators... choice is arbitrary (but generates different code!).
+ void add (GP64 dst, GP64 x) { this->add (Operand(dst), x); }
+ void sub (GP64 dst, GP64 x) { this->sub (Operand(dst), x); }
+ void cmp (GP64 dst, GP64 x) { this->cmp (Operand(dst), x); }
+ void mov (GP64 dst, GP64 x) { this->mov (Operand(dst), x); }
+ void movb(GP64 dst, GP64 x) { this->movb(Operand(dst), x); }
+
+ void movzbq(GP64 dst, Operand x); // dst = x, uint8_t -> int
+ void movzwq(GP64 dst, Operand x); // dst = x, uint16_t -> int
+
+ // aarch64
+
+ // d = op(n,m)
+ using DOpNM = void(V d, V n, V m);
+ DOpNM and16b, orr16b, eor16b, bic16b, bsl16b,
+ add4s, sub4s, mul4s,
+ cmeq4s, cmgt4s,
+ sub8h, mul8h,
+ fadd4s, fsub4s, fmul4s, fdiv4s, fmin4s, fmax4s,
+ fcmeq4s, fcmgt4s, fcmge4s,
+ tbl,
+ uzp14s, uzp24s,
+ zip14s, zip24s;
+
+ // TODO: there are also float ==,<,<=,>,>= instructions with an immediate 0.0f,
+ // and the register comparison > and >= can also compare absolute values. Interesting.
+
+ // d += n*m
+ void fmla4s(V d, V n, V m);
+
+ // d -= n*m
+ void fmls4s(V d, V n, V m);
+
+ // d = op(n,imm)
+ using DOpNImm = void(V d, V n, int imm);
+ DOpNImm sli4s,
+ shl4s, sshr4s, ushr4s,
+ ushr8h;
+
+ // d = op(n)
+ using DOpN = void(V d, V n);
+ DOpN not16b, // d = ~n
+ fneg4s, // d = -n
+ fsqrt4s, // d = sqrtf(n)
+ scvtf4s, // int -> float
+ fcvtzs4s, // truncate float -> int
+ fcvtns4s, // round float -> int (nearest even)
+ frintp4s, // round float -> int as float, toward plus infinity (ceil)
+ frintm4s, // round float -> int as float, toward minus infinity (floor)
+ fcvtn, // f32 -> f16 in low half
+ fcvtl, // f16 in low half -> f32
+ xtns2h, // u32 -> u16
+ xtnh2b, // u16 -> u8
+ uxtlb2h, // u8 -> u16 (TODO: this is a special case of ushll.8h)
+ uxtlh2s, // u16 -> u32 (TODO: this is a special case of ushll.4s)
+ uminv4s; // dst[0] = min(n[0],n[1],n[2],n[3]), n as unsigned
+
+ void brk (int imm16);
+ void ret (X);
+ void add (X d, X n, int imm12);
+ void sub (X d, X n, int imm12);
+ void subs(X d, X n, int imm12); // subtract setting condition flags
+
+ enum Shift { LSL,LSR,ASR,ROR };
+ void add (X d, X n, X m, Shift=LSL, int imm6=0); // d=n+Shift(m,imm6), for Shift != ROR.
+
+ // There's another encoding for unconditional branches that can jump further,
+ // but this one encoded as b.al is simple to implement and should be fine.
+ void b (Label* l) { this->b(Condition::al, l); }
+ void bne(Label* l) { this->b(Condition::ne, l); }
+ void blt(Label* l) { this->b(Condition::lt, l); }
+
+ // "cmp ..." is just an assembler mnemonic for "subs xzr, ..."!
+ void cmp(X n, int imm12) { this->subs(xzr, n, imm12); }
+
+ // Compare and branch if zero/non-zero, as if
+ // cmp(t,0)
+ // beq/bne(l)
+ // but without setting condition flags.
+ void cbz (X t, Label* l);
+ void cbnz(X t, Label* l);
+
+ // TODO: there are ldur variants with unscaled imm, useful?
+ void ldrd(X dst, X src, int imm12=0); // 64-bit dst = *(src+imm12*8)
+ void ldrs(X dst, X src, int imm12=0); // 32-bit dst = *(src+imm12*4)
+ void ldrh(X dst, X src, int imm12=0); // 16-bit dst = *(src+imm12*2)
+ void ldrb(X dst, X src, int imm12=0); // 8-bit dst = *(src+imm12)
+
+ void ldrq(V dst, Label*); // 128-bit PC-relative load
+
+ void ldrq(V dst, X src, int imm12=0); // 128-bit dst = *(src+imm12*16)
+ void ldrd(V dst, X src, int imm12=0); // 64-bit dst = *(src+imm12*8)
+ void ldrs(V dst, X src, int imm12=0); // 32-bit dst = *(src+imm12*4)
+ void ldrh(V dst, X src, int imm12=0); // 16-bit dst = *(src+imm12*2)
+ void ldrb(V dst, X src, int imm12=0); // 8-bit dst = *(src+imm12)
+
+ void strs(X src, X dst, int imm12=0); // 32-bit *(dst+imm12*4) = src
+
+ void strq(V src, X dst, int imm12=0); // 128-bit *(dst+imm12*16) = src
+ void strd(V src, X dst, int imm12=0); // 64-bit *(dst+imm12*8) = src
+ void strs(V src, X dst, int imm12=0); // 32-bit *(dst+imm12*4) = src
+ void strh(V src, X dst, int imm12=0); // 16-bit *(dst+imm12*2) = src
+ void strb(V src, X dst, int imm12=0); // 8-bit *(dst+imm12) = src
+
+ void movs(X dst, V src, int lane); // dst = 32-bit src[lane]
+ void inss(V dst, X src, int lane); // dst[lane] = 32-bit src
+
+ void dup4s (V dst, X src); // Each 32-bit lane = src
+
+ void ld1r4s (V dst, X src); // Each 32-bit lane = *src
+ void ld1r8h (V dst, X src); // Each 16-bit lane = *src
+ void ld1r16b(V dst, X src); // Each 8-bit lane = *src
+
+ void ld24s(V dst, X src); // deinterleave(dst,dst+1) = 256-bit *src
+ void ld44s(V dst, X src); // deinterleave(dst,dst+1,dst+2,dst+3) = 512-bit *src
+ void st24s(V src, X dst); // 256-bit *dst = interleave_32bit_lanes(src,src+1)
+ void st44s(V src, X dst); // 512-bit *dst = interleave_32bit_lanes(src,src+1,src+2,src+3)
+
+ void ld24s(V dst, X src, int lane); // Load 2 32-bit values into given lane of dst..dst+1
+ void ld44s(V dst, X src, int lane); // Load 4 32-bit values into given lane of dst..dst+3
+
+ private:
+ uint8_t* fCode;
+ size_t fSize;
+
+ // x86-64
+ enum W { W0, W1 }; // Are the lanes 64-bit (W1) or default (W0)? Intel Vol 2A 2.3.5.5
+ enum L { L128, L256 }; // Is this a 128- or 256-bit operation? Intel Vol 2A 2.3.6.2
+
+ // Helpers for vector instructions.
+ void op(int prefix, int map, int opcode, int dst, int x, Operand y, W,L);
+ void op(int p, int m, int o, Ymm d, Ymm x, Operand y, W w=W0) { op(p,m,o, d,x,y,w,L256); }
+ void op(int p, int m, int o, Ymm d, Operand y, W w=W0) { op(p,m,o, d,0,y,w,L256); }
+ void op(int p, int m, int o, Xmm d, Xmm x, Operand y, W w=W0) { op(p,m,o, d,x,y,w,L128); }
+ void op(int p, int m, int o, Xmm d, Operand y, W w=W0) { op(p,m,o, d,0,y,w,L128); }
+
+ // Helpers for GP64 instructions.
+ void op(int opcode, Operand dst, GP64 x);
+ void op(int opcode, int opcode_ext, Operand dst, int imm);
+
+ void jump(uint8_t condition, Label*);
+ int disp32(Label*);
+ void imm_byte_after_operand(const Operand&, int byte);
+
+ // aarch64
+
+ // Opcode for 3-arguments ops is split between hi and lo:
+ // [11 bits hi] [5 bits m] [6 bits lo] [5 bits n] [5 bits d]
+ void op(uint32_t hi, V m, uint32_t lo, V n, V d);
+
+ // 0,1,2-argument ops, with or without an immediate:
+ // [ 22 bits op ] [5 bits n] [5 bits d]
+ // Any immediate falls in the middle somewhere overlapping with either op, n, or both.
+ void op(uint32_t op22, V n, V d, int imm=0);
+ void op(uint32_t op22, X n, V d, int imm=0) { this->op(op22,(V)n, d,imm); }
+ void op(uint32_t op22, V n, X d, int imm=0) { this->op(op22, n,(V)d,imm); }
+ void op(uint32_t op22, X n, X d, int imm=0) { this->op(op22,(V)n,(V)d,imm); }
+ void op(uint32_t op22, int imm=0) { this->op(op22,(V)0,(V)0,imm); }
+ // (1-argument ops don't seem to have a consistent convention of passing as n or d.)
+
+
+ // Order matters... value is 4-bit encoding for condition code.
+ enum class Condition { eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,al };
+ void b(Condition, Label*);
+ int disp19(Label*);
+ };
+
+ // Order matters a little: Ops <=store128 are treated as having side effects.
+ #define SKVM_OPS(M) \
+ M(assert_true) \
+ M(trace_line) M(trace_var) \
+ M(trace_enter) M(trace_exit) M(trace_scope) \
+ M(store8) M(store16) M(store32) M(store64) M(store128) \
+ M(load8) M(load16) M(load32) M(load64) M(load128) \
+ M(index) \
+ M(gather8) M(gather16) M(gather32) \
+ M(uniform32) \
+ M(array32) \
+ M(splat) \
+ M(add_f32) M(add_i32) \
+ M(sub_f32) M(sub_i32) \
+ M(mul_f32) M(mul_i32) \
+ M(div_f32) \
+ M(min_f32) M(max_f32) \
+ M(fma_f32) M(fms_f32) M(fnma_f32) \
+ M(sqrt_f32) \
+ M(shl_i32) M(shr_i32) M(sra_i32) \
+ M(ceil) M(floor) M(trunc) M(round) M(to_fp16) M(from_fp16) \
+ M(to_f32) \
+ M(neq_f32) M(eq_f32) M(eq_i32) \
+ M(gte_f32) M(gt_f32) M(gt_i32) \
+ M(bit_and) M(bit_or) M(bit_xor) M(bit_clear) \
+ M(select) \
+ M(duplicate)
+ // End of SKVM_OPS
+
+ enum class Op : int {
+ #define M(op) op,
+ SKVM_OPS(M)
+ #undef M
+ };
+
+ static inline bool has_side_effect(Op op) {
+ return op <= Op::store128;
+ }
+ static inline bool touches_varying_memory(Op op) {
+ return Op::store8 <= op && op <= Op::load128;
+ }
+ static inline bool is_always_varying(Op op) {
+ return Op::store8 <= op && op <= Op::index;
+ }
+ static inline bool is_trace(Op op) {
+ return Op::trace_line <= op && op <= Op::trace_scope;
+ }
+
+ using Val = int;
+ // We reserve an impossible Val ID as a sentinel
+ // NA meaning none, n/a, null, nil, etc.
+ static const Val NA = -1;
+
+ // Ptr and UPtr are an index into the registers args[]. The two styles of using args are
+ // varyings and uniforms. Varyings use Ptr, have a stride associated with them, and are
+ // evaluated everytime through the loop. Uniforms use UPtr, don't have a stride, and are
+ // usually hoisted above the loop.
+ struct Ptr { int ix; };
+ struct UPtr : public Ptr {};
+
+ bool operator!=(Ptr a, Ptr b);
+
+ struct I32 {
+ Builder* builder = nullptr;
+ Val id = NA;
+ explicit operator bool() const { return id != NA; }
+ Builder* operator->() const { return builder; }
+ };
+
+ struct F32 {
+ Builder* builder = nullptr;
+ Val id = NA;
+ explicit operator bool() const { return id != NA; }
+ Builder* operator->() const { return builder; }
+ };
+
+ struct Color {
+ F32 r,g,b,a;
+ explicit operator bool() const { return r && g && b && a; }
+ Builder* operator->() const { return a.operator->(); }
+ };
+
+ struct HSLA {
+ F32 h,s,l,a;
+ explicit operator bool() const { return h && s && l && a; }
+ Builder* operator->() const { return a.operator->(); }
+ };
+
+ struct Coord {
+ F32 x,y;
+ explicit operator bool() const { return x && y; }
+ Builder* operator->() const { return x.operator->(); }
+ };
+
+ struct Uniform {
+ UPtr ptr;
+ int offset;
+ };
+ struct Uniforms {
+ UPtr base;
+ std::vector<int> buf;
+
+ Uniforms(UPtr ptr, int init) : base(ptr), buf(init) {}
+
+ Uniform push(int val) {
+ buf.push_back(val);
+ return {base, (int)( sizeof(int)*(buf.size() - 1) )};
+ }
+
+ Uniform pushF(float val) {
+ int bits;
+ memcpy(&bits, &val, sizeof(int));
+ return this->push(bits);
+ }
+
+ Uniform pushPtr(const void* ptr) {
+ // Jam the pointer into 1 or 2 ints.
+ int ints[sizeof(ptr) / sizeof(int)];
+ memcpy(ints, &ptr, sizeof(ptr));
+ for (int bits : ints) {
+ buf.push_back(bits);
+ }
+ return {base, (int)( sizeof(int)*(buf.size() - std::size(ints)) )};
+ }
+
+ Uniform pushArray(int32_t a[]) {
+ return this->pushPtr(a);
+ }
+
+ Uniform pushArrayF(float a[]) {
+ return this->pushPtr(a);
+ }
+ };
+
+ struct PixelFormat {
+ enum { UNORM, SRGB, FLOAT, XRNG } encoding;
+ int r_bits, g_bits, b_bits, a_bits,
+ r_shift, g_shift, b_shift, a_shift;
+ };
+ PixelFormat SkColorType_to_PixelFormat(SkColorType);
+
+ SK_BEGIN_REQUIRE_DENSE
+ struct Instruction {
+ Op op; // v* = op(x,y,z,w,immA,immB), where * == index of this Instruction.
+ Val x,y,z,w; // Enough arguments for Op::store128.
+ int immA,immB,immC; // Immediate bit pattern, shift count, pointer index, byte offset, etc.
+ };
+ SK_END_REQUIRE_DENSE
+
+ bool operator==(const Instruction&, const Instruction&);
+ struct InstructionHash {
+ uint32_t operator()(const Instruction&, uint32_t seed=0) const;
+ };
+
+ struct OptimizedInstruction {
+ Op op;
+ Val x,y,z,w;
+ int immA,immB,immC;
+
+ Val death;
+ bool can_hoist;
+ };
+
+ struct Features {
+ bool fma = false;
+ bool fp16 = false;
+ };
+
+ class Builder {
+ public:
+ Builder(bool createDuplicates = false);
+ Builder(Features, bool createDuplicates = false);
+
+ Program done(const char* debug_name,
+ bool allow_jit,
+ std::unique_ptr<viz::Visualizer> visualizer) const;
+ Program done(const char* debug_name = nullptr,
+ bool allow_jit=true) const;
+
+ // Mostly for debugging, tests, etc.
+ std::vector<Instruction> program() const { return fProgram; }
+ std::vector<OptimizedInstruction> optimize(viz::Visualizer* visualizer = nullptr) const;
+
+ // Returns a trace-hook ID which must be passed to the trace opcodes.
+ int attachTraceHook(SkSL::TraceHook*);
+
+ // Convenience arg() wrappers for most common strides, sizeof(T) and 0.
+ template <typename T>
+ Ptr varying() { return this->arg(sizeof(T)); }
+ Ptr varying(int stride) { SkASSERT(stride > 0); return this->arg(stride); }
+ UPtr uniform() { Ptr p = this->arg(0); return UPtr{{p.ix}}; }
+
+ // TODO: allow uniform (i.e. Ptr) offsets to store* and load*?
+ // TODO: sign extension (signed types) for <32-bit loads?
+ // TODO: unsigned integer operations where relevant (just comparisons?)?
+
+ // Assert cond is true, printing debug when not.
+ void assert_true(I32 cond, I32 debug);
+ void assert_true(I32 cond, F32 debug) { assert_true(cond, pun_to_I32(debug)); }
+ void assert_true(I32 cond) { assert_true(cond, cond); }
+
+ // Insert debug traces into the instruction stream
+ bool mergeMasks(I32& mask, I32& traceMask);
+ void trace_line (int traceHookID, I32 mask, I32 traceMask, int line);
+ void trace_var (int traceHookID, I32 mask, I32 traceMask, int slot, I32 val);
+ void trace_enter(int traceHookID, I32 mask, I32 traceMask, int fnIdx);
+ void trace_exit (int traceHookID, I32 mask, I32 traceMask, int fnIdx);
+ void trace_scope(int traceHookID, I32 mask, I32 traceMask, int delta);
+
+ // Store {8,16,32,64,128}-bit varying.
+ void store8 (Ptr ptr, I32 val);
+ void store16 (Ptr ptr, I32 val);
+ void store32 (Ptr ptr, I32 val);
+ void storeF (Ptr ptr, F32 val) { store32(ptr, pun_to_I32(val)); }
+ void store64 (Ptr ptr, I32 lo, I32 hi); // *ptr = lo|(hi<<32)
+ void store128(Ptr ptr, I32 x, I32 y, I32 z, I32 w); // *ptr = x|(y<<32)|(z<<64)|(w<<96)
+
+ // Returns varying {n, n-1, n-2, ..., 1}, where n is the argument to Program::eval().
+ I32 index();
+
+ // Load {8,16,32,64,128}-bit varying.
+ I32 load8 (Ptr ptr);
+ I32 load16 (Ptr ptr);
+ I32 load32 (Ptr ptr);
+ F32 loadF (Ptr ptr) { return pun_to_F32(load32(ptr)); }
+ I32 load64 (Ptr ptr, int lane); // Load 32-bit lane 0-1 of 64-bit value.
+ I32 load128(Ptr ptr, int lane); // Load 32-bit lane 0-3 of 128-bit value.
+
+ // Load i32/f32 uniform with byte-count offset.
+ I32 uniform32(UPtr ptr, int offset);
+ F32 uniformF (UPtr ptr, int offset) { return pun_to_F32(uniform32(ptr,offset)); }
+
+ // Load i32/f32 uniform with byte-count offset and an c-style array index. The address of
+ // the element is (*(ptr + byte-count offset))[index].
+ I32 array32 (UPtr ptr, int offset, int index);
+ F32 arrayF (UPtr ptr, int offset, int index) {
+ return pun_to_F32(array32(ptr, offset, index));
+ }
+
+ // Push and load this color as a uniform.
+ Color uniformColor(SkColor4f, Uniforms*);
+
+ // Gather u8,u16,i32 with varying element-count index from *(ptr + byte-count offset).
+ I32 gather8 (UPtr ptr, int offset, I32 index);
+ I32 gather16(UPtr ptr, int offset, I32 index);
+ I32 gather32(UPtr ptr, int offset, I32 index);
+ F32 gatherF (UPtr ptr, int offset, I32 index) {
+ return pun_to_F32(gather32(ptr, offset, index));
+ }
+
+ // Convenience methods for working with skvm::Uniform(s).
+ I32 uniform32(Uniform u) { return this->uniform32(u.ptr, u.offset); }
+ F32 uniformF (Uniform u) { return this->uniformF (u.ptr, u.offset); }
+ I32 gather8 (Uniform u, I32 index) { return this->gather8 (u.ptr, u.offset, index); }
+ I32 gather16 (Uniform u, I32 index) { return this->gather16 (u.ptr, u.offset, index); }
+ I32 gather32 (Uniform u, I32 index) { return this->gather32 (u.ptr, u.offset, index); }
+ F32 gatherF (Uniform u, I32 index) { return this->gatherF (u.ptr, u.offset, index); }
+
+ // Convenience methods for working with array pointers in skvm::Uniforms. Index is an
+ // array index and not a byte offset. The array pointer is stored at u.
+ I32 array32 (Uniform a, int index) { return this->array32 (a.ptr, a.offset, index); }
+ F32 arrayF (Uniform a, int index) { return this->arrayF (a.ptr, a.offset, index); }
+
+ // Load an immediate constant.
+ I32 splat(int n);
+ I32 splat(unsigned u) { return splat((int)u); }
+ F32 splat(float f) {
+ int bits;
+ memcpy(&bits, &f, 4);
+ return pun_to_F32(splat(bits));
+ }
+
+ // Some operations make sense with immediate arguments,
+ // so we provide overloads inline to make that seamless.
+ //
+ // We omit overloads that may indicate a bug or performance issue.
+ // In general it does not make sense to pass immediates to unary operations,
+ // and even sometimes not for binary operations, e.g.
+ //
+ // div(x, y) -- normal every day divide
+ // div(3.0f, y) -- yep, makes sense
+ // div(x, 3.0f) -- omitted as a reminder you probably want mul(x, 1/3.0f).
+ //
+ // You can of course always splat() to override these opinions.
+
+ // float math, comparisons, etc.
+ F32 add(F32, F32);
+ F32 add(F32 x, float y) { return add(x, splat(y)); }
+ F32 add(float x, F32 y) { return add(splat(x), y); }
+
+ F32 sub(F32, F32);
+ F32 sub(F32 x, float y) { return sub(x, splat(y)); }
+ F32 sub(float x, F32 y) { return sub(splat(x), y); }
+
+ F32 mul(F32, F32);
+ F32 mul(F32 x, float y) { return mul(x, splat(y)); }
+ F32 mul(float x, F32 y) { return mul(splat(x), y); }
+
+ // mul(), but allowing optimizations not strictly legal under IEEE-754 rules.
+ F32 fast_mul(F32, F32);
+ F32 fast_mul(F32 x, float y) { return fast_mul(x, splat(y)); }
+ F32 fast_mul(float x, F32 y) { return fast_mul(splat(x), y); }
+
+ F32 div(F32, F32);
+ F32 div(float x, F32 y) { return div(splat(x), y); }
+
+ F32 min(F32, F32);
+ F32 min(F32 x, float y) { return min(x, splat(y)); }
+ F32 min(float x, F32 y) { return min(splat(x), y); }
+
+ F32 max(F32, F32);
+ F32 max(F32 x, float y) { return max(x, splat(y)); }
+ F32 max(float x, F32 y) { return max(splat(x), y); }
+
+ // TODO: remove mad()? It's just sugar.
+ F32 mad(F32 x, F32 y, F32 z) { return add(mul(x,y), z); }
+ F32 mad(F32 x, F32 y, float z) { return mad( x , y , splat(z)); }
+ F32 mad(F32 x, float y, F32 z) { return mad( x , splat(y), z ); }
+ F32 mad(F32 x, float y, float z) { return mad( x , splat(y), splat(z)); }
+ F32 mad(float x, F32 y, F32 z) { return mad(splat(x), y , z ); }
+ F32 mad(float x, F32 y, float z) { return mad(splat(x), y , splat(z)); }
+ F32 mad(float x, float y, F32 z) { return mad(splat(x), splat(y), z ); }
+
+ F32 sqrt(F32);
+ F32 approx_log2(F32);
+ F32 approx_pow2(F32);
+ F32 approx_log (F32 x) { return mul(0.69314718f, approx_log2(x)); }
+ F32 approx_exp (F32 x) { return approx_pow2(mul(x, 1.4426950408889634074f)); }
+
+ F32 approx_powf(F32 base, F32 exp);
+ F32 approx_powf(F32 base, float exp) { return approx_powf(base, splat(exp)); }
+ F32 approx_powf(float base, F32 exp) { return approx_powf(splat(base), exp); }
+
+
+ F32 approx_sin(F32 radians);
+ F32 approx_cos(F32 radians) { return approx_sin(add(radians, SK_ScalarPI/2)); }
+ F32 approx_tan(F32 radians);
+
+ F32 approx_asin(F32 x);
+ F32 approx_acos(F32 x) { return sub(SK_ScalarPI/2, approx_asin(x)); }
+ F32 approx_atan(F32 x);
+ F32 approx_atan2(F32 y, F32 x);
+
+ F32 lerp(F32 lo, F32 hi, F32 t);
+ F32 lerp(F32 lo, F32 hi, float t) { return lerp( lo , hi , splat(t)); }
+ F32 lerp(F32 lo, float hi, float t) { return lerp( lo , splat(hi), splat(t)); }
+ F32 lerp(F32 lo, float hi, F32 t) { return lerp( lo , splat(hi), t ); }
+ F32 lerp(float lo, F32 hi, F32 t) { return lerp(splat(lo), hi , t ); }
+ F32 lerp(float lo, F32 hi, float t) { return lerp(splat(lo), hi , splat(t)); }
+ F32 lerp(float lo, float hi, F32 t) { return lerp(splat(lo), splat(hi), t ); }
+
+ F32 clamp(F32 x, F32 lo, F32 hi) { return max(lo, min(x, hi)); }
+ F32 clamp(F32 x, F32 lo, float hi) { return clamp( x , lo , splat(hi)); }
+ F32 clamp(F32 x, float lo, float hi) { return clamp( x , splat(lo), splat(hi)); }
+ F32 clamp(F32 x, float lo, F32 hi) { return clamp( x , splat(lo), hi ); }
+ F32 clamp(float x, F32 lo, F32 hi) { return clamp(splat(x), lo , hi ); }
+ F32 clamp(float x, F32 lo, float hi) { return clamp(splat(x), lo , splat(hi)); }
+ F32 clamp(float x, float lo, F32 hi) { return clamp(splat(x), splat(lo), hi ); }
+
+ F32 clamp01(F32 x) { return clamp(x, 0.0f, 1.0f); }
+
+ F32 abs(F32 x) { return pun_to_F32(bit_and(pun_to_I32(x), 0x7fff'ffff)); }
+ F32 fract(F32 x) { return sub(x, floor(x)); }
+ F32 ceil(F32);
+ F32 floor(F32);
+ I32 is_NaN (F32 x) { return neq(x,x); }
+ I32 is_finite(F32 x) { return lt(bit_and(pun_to_I32(x), 0x7f80'0000), 0x7f80'0000); }
+
+ I32 trunc(F32 x);
+ I32 round(F32 x); // Round to int using current rounding mode (as if lrintf()).
+ I32 pun_to_I32(F32 x) { return {x.builder, x.id}; }
+
+ I32 to_fp16(F32 x);
+ F32 from_fp16(I32 x);
+
+ I32 eq(F32, F32);
+ I32 eq(F32 x, float y) { return eq(x, splat(y)); }
+ I32 eq(float x, F32 y) { return eq(splat(x), y); }
+
+ I32 neq(F32, F32);
+ I32 neq(F32 x, float y) { return neq(x, splat(y)); }
+ I32 neq(float x, F32 y) { return neq(splat(x), y); }
+
+ I32 lt(F32, F32);
+ I32 lt(F32 x, float y) { return lt(x, splat(y)); }
+ I32 lt(float x, F32 y) { return lt(splat(x), y); }
+
+ I32 lte(F32, F32);
+ I32 lte(F32 x, float y) { return lte(x, splat(y)); }
+ I32 lte(float x, F32 y) { return lte(splat(x), y); }
+
+ I32 gt(F32, F32);
+ I32 gt(F32 x, float y) { return gt(x, splat(y)); }
+ I32 gt(float x, F32 y) { return gt(splat(x), y); }
+
+ I32 gte(F32, F32);
+ I32 gte(F32 x, float y) { return gte(x, splat(y)); }
+ I32 gte(float x, F32 y) { return gte(splat(x), y); }
+
+ // int math, comparisons, etc.
+ I32 add(I32, I32);
+ I32 add(I32 x, int y) { return add(x, splat(y)); }
+ I32 add(int x, I32 y) { return add(splat(x), y); }
+
+ I32 sub(I32, I32);
+ I32 sub(I32 x, int y) { return sub(x, splat(y)); }
+ I32 sub(int x, I32 y) { return sub(splat(x), y); }
+
+ I32 mul(I32, I32);
+ I32 mul(I32 x, int y) { return mul(x, splat(y)); }
+ I32 mul(int x, I32 y) { return mul(splat(x), y); }
+
+ I32 shl(I32 x, int bits);
+ I32 shr(I32 x, int bits);
+ I32 sra(I32 x, int bits);
+
+ I32 eq(I32, I32);
+ I32 eq(I32 x, int y) { return eq(x, splat(y)); }
+ I32 eq(int x, I32 y) { return eq(splat(x), y); }
+
+ I32 neq(I32, I32);
+ I32 neq(I32 x, int y) { return neq(x, splat(y)); }
+ I32 neq(int x, I32 y) { return neq(splat(x), y); }
+
+ I32 lt(I32, I32);
+ I32 lt(I32 x, int y) { return lt(x, splat(y)); }
+ I32 lt(int x, I32 y) { return lt(splat(x), y); }
+
+ I32 lte(I32, I32);
+ I32 lte(I32 x, int y) { return lte(x, splat(y)); }
+ I32 lte(int x, I32 y) { return lte(splat(x), y); }
+
+ I32 gt(I32, I32);
+ I32 gt(I32 x, int y) { return gt(x, splat(y)); }
+ I32 gt(int x, I32 y) { return gt(splat(x), y); }
+
+ I32 gte(I32, I32);
+ I32 gte(I32 x, int y) { return gte(x, splat(y)); }
+ I32 gte(int x, I32 y) { return gte(splat(x), y); }
+
+ F32 to_F32(I32 x);
+ F32 pun_to_F32(I32 x) { return {x.builder, x.id}; }
+
+ // Bitwise operations.
+ I32 bit_and(I32, I32);
+ I32 bit_and(I32 x, int y) { return bit_and(x, splat(y)); }
+ I32 bit_and(int x, I32 y) { return bit_and(splat(x), y); }
+
+ I32 bit_or(I32, I32);
+ I32 bit_or(I32 x, int y) { return bit_or(x, splat(y)); }
+ I32 bit_or(int x, I32 y) { return bit_or(splat(x), y); }
+
+ I32 bit_xor(I32, I32);
+ I32 bit_xor(I32 x, int y) { return bit_xor(x, splat(y)); }
+ I32 bit_xor(int x, I32 y) { return bit_xor(splat(x), y); }
+
+ I32 bit_clear(I32, I32);
+ I32 bit_clear(I32 x, int y) { return bit_clear(x, splat(y)); }
+ I32 bit_clear(int x, I32 y) { return bit_clear(splat(x), y); }
+
+ I32 min(I32 x, I32 y) { return select(lte(x,y), x, y); }
+ I32 min(I32 x, int y) { return min(x, splat(y)); }
+ I32 min(int x, I32 y) { return min(splat(x), y); }
+
+ I32 max(I32 x, I32 y) { return select(gte(x,y), x, y); }
+ I32 max(I32 x, int y) { return max(x, splat(y)); }
+ I32 max(int x, I32 y) { return max(splat(x), y); }
+
+ I32 select(I32 cond, I32 t, I32 f); // cond ? t : f
+ I32 select(I32 cond, int t, I32 f) { return select(cond, splat(t), f ); }
+ I32 select(I32 cond, I32 t, int f) { return select(cond, t , splat(f)); }
+ I32 select(I32 cond, int t, int f) { return select(cond, splat(t), splat(f)); }
+
+ F32 select(I32 cond, F32 t, F32 f) {
+ return pun_to_F32(select(cond, pun_to_I32(t)
+ , pun_to_I32(f)));
+ }
+ F32 select(I32 cond, float t, F32 f) { return select(cond, splat(t), f ); }
+ F32 select(I32 cond, F32 t, float f) { return select(cond, t , splat(f)); }
+ F32 select(I32 cond, float t, float f) { return select(cond, splat(t), splat(f)); }
+
+ I32 extract(I32 x, int bits, I32 z); // (x>>bits) & z
+ I32 extract(I32 x, int bits, int z) { return extract(x, bits, splat(z)); }
+ I32 extract(int x, int bits, I32 z) { return extract(splat(x), bits, z); }
+
+ I32 pack(I32 x, I32 y, int bits); // x | (y<<bits)
+ I32 pack(I32 x, int y, int bits) { return pack(x, splat(y), bits); }
+ I32 pack(int x, I32 y, int bits) { return pack(splat(x), y, bits); }
+
+
+ // Common idioms used in several places, worth centralizing for consistency.
+ F32 from_unorm(int bits, I32); // E.g. from_unorm(8, x) -> x * (1/255.0f)
+ I32 to_unorm(int bits, F32); // E.g. to_unorm(8, x) -> round(x * 255)
+
+ Color load(PixelFormat, Ptr ptr);
+ void store(PixelFormat, Ptr ptr, Color);
+ Color gather(PixelFormat, UPtr ptr, int offset, I32 index);
+ Color gather(PixelFormat f, Uniform u, I32 index) {
+ return gather(f, u.ptr, u.offset, index);
+ }
+
+ void premul(F32* r, F32* g, F32* b, F32 a);
+ void unpremul(F32* r, F32* g, F32* b, F32 a);
+
+ Color premul(Color c) { this->premul(&c.r, &c.g, &c.b, c.a); return c; }
+ Color unpremul(Color c) { this->unpremul(&c.r, &c.g, &c.b, c.a); return c; }
+
+ Color lerp(Color lo, Color hi, F32 t);
+ Color blend(SkBlendMode, Color src, Color dst);
+
+ Color clamp01(Color c) {
+ return { clamp01(c.r), clamp01(c.g), clamp01(c.b), clamp01(c.a) };
+ }
+
+ HSLA to_hsla(Color);
+ Color to_rgba(HSLA);
+
+ void dump(SkWStream* = nullptr) const;
+
+ uint64_t hash() const;
+
+ Val push(Instruction);
+
+ bool allImm() const { return true; }
+
+ template <typename T, typename... Rest>
+ bool allImm(Val id, T* imm, Rest... rest) const {
+ if (fProgram[id].op == Op::splat) {
+ static_assert(sizeof(T) == 4);
+ memcpy(imm, &fProgram[id].immA, 4);
+ return this->allImm(rest...);
+ }
+ return false;
+ }
+
+ bool allUniform() const { return true; }
+
+ template <typename... Rest>
+ bool allUniform(Val id, Uniform* uni, Rest... rest) const {
+ if (fProgram[id].op == Op::uniform32) {
+ uni->ptr.ix = fProgram[id].immA;
+ uni->offset = fProgram[id].immB;
+ return this->allUniform(rest...);
+ }
+ return false;
+ }
+
+ private:
+ // Declare an argument with given stride (use stride=0 for uniforms).
+ Ptr arg(int stride);
+
+ Val push(
+ Op op, Val x=NA, Val y=NA, Val z=NA, Val w=NA, int immA=0, int immB=0, int immC=0) {
+ return this->push(Instruction{op, x,y,z,w, immA,immB,immC});
+ }
+
+ template <typename T>
+ bool isImm(Val id, T want) const {
+ T imm = 0;
+ return this->allImm(id, &imm) && imm == want;
+ }
+
+ // `canonicalizeIdOrder` and has two rules:
+ // - Immediate values go last; that is, `x + 1` is preferred over `1 + x`.
+ // - If both/neither of x and y are immediate, lower IDs go before higher IDs.
+ // Canonicalizing the IDs helps with opcode deduplication. Putting immediates in a
+ // consistent position makes it easier to detect no-op arithmetic like `x + 0`.
+ template <typename F32_or_I32>
+ void canonicalizeIdOrder(F32_or_I32& x, F32_or_I32& y);
+
+ // If the passed in ID is a bit-not, return the value being bit-notted. Otherwise, NA.
+ Val holdsBitNot(Val id);
+
+ SkTHashMap<Instruction, Val, InstructionHash> fIndex;
+ std::vector<Instruction> fProgram;
+ std::vector<SkSL::TraceHook*> fTraceHooks;
+ std::vector<int> fStrides;
+ const Features fFeatures;
+ bool fCreateDuplicates;
+ };
+
+ // Optimization passes and data structures normally used by Builder::optimize(),
+ // extracted here so they can be unit tested.
+ std::vector<Instruction> eliminate_dead_code(std::vector<Instruction>,
+ viz::Visualizer* visualizer = nullptr);
+ std::vector<OptimizedInstruction> finalize(std::vector<Instruction>,
+ viz::Visualizer* visualizer = nullptr);
+
+ using Reg = int;
+
+ // d = op(x,y,z,w, immA,immB)
+ struct InterpreterInstruction {
+ Op op;
+ Reg d,x,y,z,w;
+ int immA,immB,immC;
+ };
+
+ class Program {
+ public:
+ Program(const std::vector<OptimizedInstruction>& instructions,
+ std::unique_ptr<viz::Visualizer> visualizer,
+ const std::vector<int>& strides,
+ const std::vector<SkSL::TraceHook*>& traceHooks,
+ const char* debug_name, bool allow_jit);
+
+ Program();
+ ~Program();
+
+ Program(Program&&);
+ Program& operator=(Program&&);
+
+ Program(const Program&) = delete;
+ Program& operator=(const Program&) = delete;
+
+ void eval(int n, void* args[]) const;
+
+ template <typename... T>
+ void eval(int n, T*... arg) const {
+ SkASSERT(sizeof...(arg) == this->nargs());
+ // This nullptr isn't important except that it makes args[] non-empty if you pass none.
+ void* args[] = { (void*)arg..., nullptr };
+ this->eval(n, args);
+ }
+
+ std::vector<InterpreterInstruction> instructions() const;
+ int nargs() const;
+ int nregs() const;
+ int loop () const;
+ bool empty() const;
+
+ bool hasJIT() const; // Has this Program been JITted?
+ bool hasTraceHooks() const; // Is this program instrumented for debugging?
+
+ void visualize(SkWStream* output) const;
+ void dump(SkWStream* = nullptr) const;
+ void disassemble(SkWStream* = nullptr) const;
+ viz::Visualizer* visualizer();
+
+ private:
+ void setupInterpreter(const std::vector<OptimizedInstruction>&);
+ void setupJIT (const std::vector<OptimizedInstruction>&, const char* debug_name);
+
+ bool jit(const std::vector<OptimizedInstruction>&,
+ int* stack_hint, uint32_t* registers_used,
+ Assembler*) const;
+
+ void dropJIT();
+
+ struct Impl;
+ std::unique_ptr<Impl> fImpl;
+ };
+
+ // TODO: control flow
+ // TODO: 64-bit values?
+
+#define SI static inline
+
+ SI I32 operator+(I32 x, I32 y) { return x->add(x,y); }
+ SI I32 operator+(I32 x, int y) { return x->add(x,y); }
+ SI I32 operator+(int x, I32 y) { return y->add(x,y); }
+
+ SI I32 operator-(I32 x, I32 y) { return x->sub(x,y); }
+ SI I32 operator-(I32 x, int y) { return x->sub(x,y); }
+ SI I32 operator-(int x, I32 y) { return y->sub(x,y); }
+
+ SI I32 operator*(I32 x, I32 y) { return x->mul(x,y); }
+ SI I32 operator*(I32 x, int y) { return x->mul(x,y); }
+ SI I32 operator*(int x, I32 y) { return y->mul(x,y); }
+
+ SI I32 min(I32 x, I32 y) { return x->min(x,y); }
+ SI I32 min(I32 x, int y) { return x->min(x,y); }
+ SI I32 min(int x, I32 y) { return y->min(x,y); }
+
+ SI I32 max(I32 x, I32 y) { return x->max(x,y); }
+ SI I32 max(I32 x, int y) { return x->max(x,y); }
+ SI I32 max(int x, I32 y) { return y->max(x,y); }
+
+ SI I32 operator==(I32 x, I32 y) { return x->eq(x,y); }
+ SI I32 operator==(I32 x, int y) { return x->eq(x,y); }
+ SI I32 operator==(int x, I32 y) { return y->eq(x,y); }
+
+ SI I32 operator!=(I32 x, I32 y) { return x->neq(x,y); }
+ SI I32 operator!=(I32 x, int y) { return x->neq(x,y); }
+ SI I32 operator!=(int x, I32 y) { return y->neq(x,y); }
+
+ SI I32 operator< (I32 x, I32 y) { return x->lt(x,y); }
+ SI I32 operator< (I32 x, int y) { return x->lt(x,y); }
+ SI I32 operator< (int x, I32 y) { return y->lt(x,y); }
+
+ SI I32 operator<=(I32 x, I32 y) { return x->lte(x,y); }
+ SI I32 operator<=(I32 x, int y) { return x->lte(x,y); }
+ SI I32 operator<=(int x, I32 y) { return y->lte(x,y); }
+
+ SI I32 operator> (I32 x, I32 y) { return x->gt(x,y); }
+ SI I32 operator> (I32 x, int y) { return x->gt(x,y); }
+ SI I32 operator> (int x, I32 y) { return y->gt(x,y); }
+
+ SI I32 operator>=(I32 x, I32 y) { return x->gte(x,y); }
+ SI I32 operator>=(I32 x, int y) { return x->gte(x,y); }
+ SI I32 operator>=(int x, I32 y) { return y->gte(x,y); }
+
+
+ SI F32 operator+(F32 x, F32 y) { return x->add(x,y); }
+ SI F32 operator+(F32 x, float y) { return x->add(x,y); }
+ SI F32 operator+(float x, F32 y) { return y->add(x,y); }
+
+ SI F32 operator-(F32 x, F32 y) { return x->sub(x,y); }
+ SI F32 operator-(F32 x, float y) { return x->sub(x,y); }
+ SI F32 operator-(float x, F32 y) { return y->sub(x,y); }
+
+ SI F32 operator*(F32 x, F32 y) { return x->mul(x,y); }
+ SI F32 operator*(F32 x, float y) { return x->mul(x,y); }
+ SI F32 operator*(float x, F32 y) { return y->mul(x,y); }
+
+ SI F32 fast_mul(F32 x, F32 y) { return x->fast_mul(x,y); }
+ SI F32 fast_mul(F32 x, float y) { return x->fast_mul(x,y); }
+ SI F32 fast_mul(float x, F32 y) { return y->fast_mul(x,y); }
+
+ SI F32 operator/(F32 x, F32 y) { return x->div(x,y); }
+ SI F32 operator/(float x, F32 y) { return y->div(x,y); }
+
+ SI F32 min(F32 x, F32 y) { return x->min(x,y); }
+ SI F32 min(F32 x, float y) { return x->min(x,y); }
+ SI F32 min(float x, F32 y) { return y->min(x,y); }
+
+ SI F32 max(F32 x, F32 y) { return x->max(x,y); }
+ SI F32 max(F32 x, float y) { return x->max(x,y); }
+ SI F32 max(float x, F32 y) { return y->max(x,y); }
+
+ SI I32 operator==(F32 x, F32 y) { return x->eq(x,y); }
+ SI I32 operator==(F32 x, float y) { return x->eq(x,y); }
+ SI I32 operator==(float x, F32 y) { return y->eq(x,y); }
+
+ SI I32 operator!=(F32 x, F32 y) { return x->neq(x,y); }
+ SI I32 operator!=(F32 x, float y) { return x->neq(x,y); }
+ SI I32 operator!=(float x, F32 y) { return y->neq(x,y); }
+
+ SI I32 operator< (F32 x, F32 y) { return x->lt(x,y); }
+ SI I32 operator< (F32 x, float y) { return x->lt(x,y); }
+ SI I32 operator< (float x, F32 y) { return y->lt(x,y); }
+
+ SI I32 operator<=(F32 x, F32 y) { return x->lte(x,y); }
+ SI I32 operator<=(F32 x, float y) { return x->lte(x,y); }
+ SI I32 operator<=(float x, F32 y) { return y->lte(x,y); }
+
+ SI I32 operator> (F32 x, F32 y) { return x->gt(x,y); }
+ SI I32 operator> (F32 x, float y) { return x->gt(x,y); }
+ SI I32 operator> (float x, F32 y) { return y->gt(x,y); }
+
+ SI I32 operator>=(F32 x, F32 y) { return x->gte(x,y); }
+ SI I32 operator>=(F32 x, float y) { return x->gte(x,y); }
+ SI I32 operator>=(float x, F32 y) { return y->gte(x,y); }
+
+ SI I32& operator+=(I32& x, I32 y) { return (x = x + y); }
+ SI I32& operator+=(I32& x, int y) { return (x = x + y); }
+
+ SI I32& operator-=(I32& x, I32 y) { return (x = x - y); }
+ SI I32& operator-=(I32& x, int y) { return (x = x - y); }
+
+ SI I32& operator*=(I32& x, I32 y) { return (x = x * y); }
+ SI I32& operator*=(I32& x, int y) { return (x = x * y); }
+
+ SI F32& operator+=(F32& x, F32 y) { return (x = x + y); }
+ SI F32& operator+=(F32& x, float y) { return (x = x + y); }
+
+ SI F32& operator-=(F32& x, F32 y) { return (x = x - y); }
+ SI F32& operator-=(F32& x, float y) { return (x = x - y); }
+
+ SI F32& operator*=(F32& x, F32 y) { return (x = x * y); }
+ SI F32& operator*=(F32& x, float y) { return (x = x * y); }
+
+ SI F32& operator/=(F32& x, F32 y) { return (x = x / y); }
+
+ SI void assert_true(I32 cond, I32 debug) { cond->assert_true(cond,debug); }
+ SI void assert_true(I32 cond, F32 debug) { cond->assert_true(cond,debug); }
+ SI void assert_true(I32 cond) { cond->assert_true(cond); }
+
+ SI void store8 (Ptr ptr, I32 val) { val->store8 (ptr, val); }
+ SI void store16 (Ptr ptr, I32 val) { val->store16 (ptr, val); }
+ SI void store32 (Ptr ptr, I32 val) { val->store32 (ptr, val); }
+ SI void storeF (Ptr ptr, F32 val) { val->storeF (ptr, val); }
+ SI void store64 (Ptr ptr, I32 lo, I32 hi) { lo ->store64 (ptr, lo,hi); }
+ SI void store128(Ptr ptr, I32 x, I32 y, I32 z, I32 w) { x ->store128(ptr, x,y,z,w); }
+
+ SI I32 gather8 (UPtr ptr, int off, I32 ix) { return ix->gather8 (ptr, off, ix); }
+ SI I32 gather16(UPtr ptr, int off, I32 ix) { return ix->gather16(ptr, off, ix); }
+ SI I32 gather32(UPtr ptr, int off, I32 ix) { return ix->gather32(ptr, off, ix); }
+ SI F32 gatherF (UPtr ptr, int off, I32 ix) { return ix->gatherF (ptr, off, ix); }
+
+ SI I32 gather8 (Uniform u, I32 ix) { return ix->gather8 (u, ix); }
+ SI I32 gather16(Uniform u, I32 ix) { return ix->gather16(u, ix); }
+ SI I32 gather32(Uniform u, I32 ix) { return ix->gather32(u, ix); }
+ SI F32 gatherF (Uniform u, I32 ix) { return ix->gatherF (u, ix); }
+
+ SI F32 sqrt(F32 x) { return x-> sqrt(x); }
+ SI F32 approx_log2(F32 x) { return x->approx_log2(x); }
+ SI F32 approx_pow2(F32 x) { return x->approx_pow2(x); }
+ SI F32 approx_log (F32 x) { return x->approx_log (x); }
+ SI F32 approx_exp (F32 x) { return x->approx_exp (x); }
+
+ SI F32 approx_powf(F32 base, F32 exp) { return base->approx_powf(base, exp); }
+ SI F32 approx_powf(F32 base, float exp) { return base->approx_powf(base, exp); }
+ SI F32 approx_powf(float base, F32 exp) { return exp->approx_powf(base, exp); }
+
+ SI F32 approx_sin(F32 radians) { return radians->approx_sin(radians); }
+ SI F32 approx_cos(F32 radians) { return radians->approx_cos(radians); }
+ SI F32 approx_tan(F32 radians) { return radians->approx_tan(radians); }
+
+ SI F32 approx_asin(F32 x) { return x->approx_asin(x); }
+ SI F32 approx_acos(F32 x) { return x->approx_acos(x); }
+ SI F32 approx_atan(F32 x) { return x->approx_atan(x); }
+ SI F32 approx_atan2(F32 y, F32 x) { return x->approx_atan2(y, x); }
+
+ SI F32 clamp01(F32 x) { return x-> clamp01(x); }
+ SI F32 abs(F32 x) { return x-> abs(x); }
+ SI F32 ceil(F32 x) { return x-> ceil(x); }
+ SI F32 fract(F32 x) { return x-> fract(x); }
+ SI F32 floor(F32 x) { return x-> floor(x); }
+ SI I32 is_NaN(F32 x) { return x-> is_NaN(x); }
+ SI I32 is_finite(F32 x) { return x->is_finite(x); }
+
+ SI I32 trunc(F32 x) { return x-> trunc(x); }
+ SI I32 round(F32 x) { return x-> round(x); }
+ SI I32 pun_to_I32(F32 x) { return x-> pun_to_I32(x); }
+ SI F32 pun_to_F32(I32 x) { return x-> pun_to_F32(x); }
+ SI F32 to_F32(I32 x) { return x-> to_F32(x); }
+ SI I32 to_fp16(F32 x) { return x-> to_fp16(x); }
+ SI F32 from_fp16(I32 x) { return x-> from_fp16(x); }
+
+ SI F32 lerp(F32 lo, F32 hi, F32 t) { return lo->lerp(lo,hi,t); }
+ SI F32 lerp(F32 lo, F32 hi, float t) { return lo->lerp(lo,hi,t); }
+ SI F32 lerp(F32 lo, float hi, F32 t) { return lo->lerp(lo,hi,t); }
+ SI F32 lerp(F32 lo, float hi, float t) { return lo->lerp(lo,hi,t); }
+ SI F32 lerp(float lo, F32 hi, F32 t) { return hi->lerp(lo,hi,t); }
+ SI F32 lerp(float lo, F32 hi, float t) { return hi->lerp(lo,hi,t); }
+ SI F32 lerp(float lo, float hi, F32 t) { return t->lerp(lo,hi,t); }
+
+ SI F32 clamp(F32 x, F32 lo, F32 hi) { return x->clamp(x,lo,hi); }
+ SI F32 clamp(F32 x, F32 lo, float hi) { return x->clamp(x,lo,hi); }
+ SI F32 clamp(F32 x, float lo, F32 hi) { return x->clamp(x,lo,hi); }
+ SI F32 clamp(F32 x, float lo, float hi) { return x->clamp(x,lo,hi); }
+ SI F32 clamp(float x, F32 lo, F32 hi) { return lo->clamp(x,lo,hi); }
+ SI F32 clamp(float x, F32 lo, float hi) { return lo->clamp(x,lo,hi); }
+ SI F32 clamp(float x, float lo, F32 hi) { return hi->clamp(x,lo,hi); }
+
+ SI I32 operator<<(I32 x, int bits) { return x->shl(x, bits); }
+ SI I32 shl(I32 x, int bits) { return x->shl(x, bits); }
+ SI I32 shr(I32 x, int bits) { return x->shr(x, bits); }
+ SI I32 sra(I32 x, int bits) { return x->sra(x, bits); }
+
+ SI I32 operator&(I32 x, I32 y) { return x->bit_and(x,y); }
+ SI I32 operator&(I32 x, int y) { return x->bit_and(x,y); }
+ SI I32 operator&(int x, I32 y) { return y->bit_and(x,y); }
+
+ SI I32 operator|(I32 x, I32 y) { return x->bit_or (x,y); }
+ SI I32 operator|(I32 x, int y) { return x->bit_or (x,y); }
+ SI I32 operator|(int x, I32 y) { return y->bit_or (x,y); }
+
+ SI I32 operator^(I32 x, I32 y) { return x->bit_xor(x,y); }
+ SI I32 operator^(I32 x, int y) { return x->bit_xor(x,y); }
+ SI I32 operator^(int x, I32 y) { return y->bit_xor(x,y); }
+
+ SI I32& operator&=(I32& x, I32 y) { return (x = x & y); }
+ SI I32& operator&=(I32& x, int y) { return (x = x & y); }
+ SI I32& operator|=(I32& x, I32 y) { return (x = x | y); }
+ SI I32& operator|=(I32& x, int y) { return (x = x | y); }
+ SI I32& operator^=(I32& x, I32 y) { return (x = x ^ y); }
+ SI I32& operator^=(I32& x, int y) { return (x = x ^ y); }
+
+ SI I32 bit_clear(I32 x, I32 y) { return x->bit_clear(x,y); }
+ SI I32 bit_clear(I32 x, int y) { return x->bit_clear(x,y); }
+ SI I32 bit_clear(int x, I32 y) { return y->bit_clear(x,y); }
+
+ SI I32 select(I32 c, I32 t, I32 f) { return c->select(c, t , f ); }
+ SI I32 select(I32 c, I32 t, int f) { return c->select(c, t , c->splat(f)); }
+ SI I32 select(I32 c, int t, I32 f) { return c->select(c, c->splat(t), f ); }
+ SI I32 select(I32 c, int t, int f) { return c->select(c, c->splat(t), c->splat(f)); }
+
+ SI F32 select(I32 c, F32 t, F32 f) { return c->select(c, t , f ); }
+ SI F32 select(I32 c, F32 t, float f) { return c->select(c, t , c->splat(f)); }
+ SI F32 select(I32 c, float t, F32 f) { return c->select(c, c->splat(t), f ); }
+ SI F32 select(I32 c, float t, float f) { return c->select(c, c->splat(t), c->splat(f)); }
+
+ SI I32 extract(I32 x, int bits, I32 z) { return x->extract(x,bits,z); }
+ SI I32 extract(I32 x, int bits, int z) { return x->extract(x,bits,z); }
+ SI I32 extract(int x, int bits, I32 z) { return z->extract(x,bits,z); }
+
+ SI I32 pack(I32 x, I32 y, int bits) { return x->pack (x,y,bits); }
+ SI I32 pack(I32 x, int y, int bits) { return x->pack (x,y,bits); }
+ SI I32 pack(int x, I32 y, int bits) { return y->pack (x,y,bits); }
+
+ SI I32 operator~(I32 x) { return ~0 ^ x; }
+ SI I32 operator-(I32 x) { return 0 - x; }
+ SI F32 operator-(F32 x) { return 0.0f - x; }
+
+ SI F32 from_unorm(int bits, I32 x) { return x->from_unorm(bits,x); }
+ SI I32 to_unorm(int bits, F32 x) { return x-> to_unorm(bits,x); }
+
+ SI void store(PixelFormat f, Ptr p, Color c) { return c->store(f,p,c); }
+
+ SI Color gather(PixelFormat f, UPtr p, int off, I32 ix) { return ix->gather(f,p,off,ix); }
+ SI Color gather(PixelFormat f, Uniform u , I32 ix) { return ix->gather(f,u,ix); }
+
+ SI void premul(F32* r, F32* g, F32* b, F32 a) { a-> premul(r,g,b,a); }
+ SI void unpremul(F32* r, F32* g, F32* b, F32 a) { a->unpremul(r,g,b,a); }
+
+ SI Color premul(Color c) { return c-> premul(c); }
+ SI Color unpremul(Color c) { return c->unpremul(c); }
+
+ SI Color lerp(Color lo, Color hi, F32 t) { return t->lerp(lo,hi,t); }
+
+ SI Color blend(SkBlendMode m, Color s, Color d) { return s->blend(m,s,d); }
+
+ SI Color clamp01(Color c) { return c->clamp01(c); }
+
+ SI HSLA to_hsla(Color c) { return c->to_hsla(c); }
+ SI Color to_rgba(HSLA c) { return c->to_rgba(c); }
+
+ // Evaluate polynomials: ax^n + bx^(n-1) + ... for n >= 1
+ template <typename F32_or_float, typename... Rest>
+ SI F32 poly(F32 x, F32_or_float a, float b, Rest... rest) {
+ if constexpr (sizeof...(rest) == 0) {
+ return x*a+b;
+ } else {
+ return poly(x, x*a+b, rest...);
+ }
+ }
+#undef SI
+} // namespace skvm
+
+#endif//SkVM_DEFINED
diff --git a/gfx/skia/skia/src/core/SkVMBlitter.cpp b/gfx/skia/skia/src/core/SkVMBlitter.cpp
new file mode 100644
index 0000000000..4b0702666f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkVMBlitter.cpp
@@ -0,0 +1,815 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/base/SkMacros.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkBlendModePriv.h"
+#include "src/core/SkBlenderBase.h"
+#include "src/core/SkColorFilterBase.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkCoreBlitters.h"
+#include "src/core/SkImageInfoPriv.h"
+#include "src/core/SkLRUCache.h"
+#include "src/core/SkMatrixProvider.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/core/SkVM.h"
+#include "src/core/SkVMBlitter.h"
+#include "src/shaders/SkColorFilterShader.h"
+
+#include <cinttypes>
+
+#define SK_BLITTER_TRACE_IS_SKVM
+#include "src/utils/SkBlitterTrace.h"
+
+namespace {
+
+ // Uniforms set by the Blitter itself,
+ // rather than by the Shader, which follow this struct in the skvm::Uniforms buffer.
+ struct BlitterUniforms {
+ int right; // First device x + blit run length n, used to get device x coordinate.
+ int y; // Device y coordinate.
+ };
+ static_assert(SkIsAlign4(sizeof(BlitterUniforms)), "");
+ inline static constexpr int kBlitterUniformsCount = sizeof(BlitterUniforms) / 4;
+
+ static skvm::Coord device_coord(skvm::Builder* p, skvm::Uniforms* uniforms) {
+ skvm::I32 dx = p->uniform32(uniforms->base, offsetof(BlitterUniforms, right))
+ - p->index(),
+ dy = p->uniform32(uniforms->base, offsetof(BlitterUniforms, y));
+ return {
+ to_F32(dx) + 0.5f,
+ to_F32(dy) + 0.5f,
+ };
+ }
+
+ struct NoopColorFilter final : public SkColorFilterBase {
+ skvm::Color onProgram(skvm::Builder*, skvm::Color c,
+ const SkColorInfo&, skvm::Uniforms*, SkArenaAlloc*) const override {
+ return c;
+ }
+
+ bool appendStages(const SkStageRec&, bool) const override { return true; }
+
+ // Only created here, should never be flattened / unflattened.
+ Factory getFactory() const override { return nullptr; }
+ const char* getTypeName() const override { return "NoopColorFilter"; }
+ };
+
+ struct SpriteShader : public SkShaderBase {
+ explicit SpriteShader(SkPixmap sprite) : fSprite(sprite) {}
+
+ SkPixmap fSprite;
+
+ // Only created here temporarily... never serialized.
+ Factory getFactory() const override { return nullptr; }
+ const char* getTypeName() const override { return "SpriteShader"; }
+
+ bool isOpaque() const override { return fSprite.isOpaque(); }
+
+ skvm::Color program(skvm::Builder* p,
+ skvm::Coord /*device*/,
+ skvm::Coord /*local*/,
+ skvm::Color /*paint*/,
+ const MatrixRec&,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc*) const override {
+ const SkColorType ct = fSprite.colorType();
+
+ skvm::PixelFormat fmt = skvm::SkColorType_to_PixelFormat(ct);
+
+ skvm::Color c = p->load(fmt, p->varying(SkColorTypeBytesPerPixel(ct)));
+
+ return SkColorSpaceXformSteps{fSprite, dst}.program(p, uniforms, c);
+ }
+ };
+
+ struct DitherShader : public SkShaderBase {
+ explicit DitherShader(sk_sp<SkShader> shader) : fShader(std::move(shader)) {}
+
+ sk_sp<SkShader> fShader;
+
+ // Only created here temporarily... never serialized.
+ Factory getFactory() const override { return nullptr; }
+ const char* getTypeName() const override { return "DitherShader"; }
+
+ bool isOpaque() const override { return fShader->isOpaque(); }
+
+ skvm::Color program(skvm::Builder* p,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color paint,
+ const MatrixRec& mRec,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const override {
+ // Run our wrapped shader.
+ skvm::Color c = as_SB(fShader)->program(p,
+ device,
+ local,
+ paint,
+ mRec,
+ dst,
+ uniforms,
+ alloc);
+ if (!c) {
+ return {};
+ }
+
+ float rate = 0.0f;
+ switch (dst.colorType()) {
+ case kARGB_4444_SkColorType:
+ rate = 1 / 15.0f;
+ break;
+ case kRGB_565_SkColorType:
+ rate = 1 / 63.0f;
+ break;
+ case kGray_8_SkColorType:
+ case kRGB_888x_SkColorType:
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ case kSRGBA_8888_SkColorType:
+ case kR8_unorm_SkColorType:
+ rate = 1 / 255.0f;
+ break;
+ case kRGB_101010x_SkColorType:
+ case kRGBA_1010102_SkColorType:
+ case kBGR_101010x_SkColorType:
+ case kBGRA_1010102_SkColorType:
+ rate = 1 / 1023.0f;
+ break;
+
+ case kUnknown_SkColorType:
+ case kAlpha_8_SkColorType:
+ case kBGR_101010x_XR_SkColorType:
+ case kRGBA_F16_SkColorType:
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F32_SkColorType:
+ case kR8G8_unorm_SkColorType:
+ case kA16_float_SkColorType:
+ case kA16_unorm_SkColorType:
+ case kR16G16_float_SkColorType:
+ case kR16G16_unorm_SkColorType:
+ case kR16G16B16A16_unorm_SkColorType:
+ return c;
+ }
+
+ // See SkRasterPipeline dither stage.
+ // This is 8x8 ordered dithering. From here we'll only need dx and dx^dy.
+ SkASSERT(local.x.id == device.x.id);
+ SkASSERT(local.y.id == device.y.id);
+ skvm::I32 X = trunc(device.x - 0.5f),
+ Y = X ^ trunc(device.y - 0.5f);
+
+ // If X's low bits are abc and Y's def, M is fcebda,
+ // 6 bits producing all values [0,63] shuffled over an 8x8 grid.
+ skvm::I32 M = shl(Y & 1, 5)
+ | shl(X & 1, 4)
+ | shl(Y & 2, 2)
+ | shl(X & 2, 1)
+ | shr(Y & 4, 1)
+ | shr(X & 4, 2);
+
+ // Scale to [0,1) by /64, then to (-0.5,0.5) using 63/128 (~0.492) as 0.5-ε,
+ // and finally scale all that by rate. We keep dither strength strictly
+ // within ±0.5 to not change exact values like 0 or 1.
+
+ // rate could be a uniform, but since it's based on the destination SkColorType,
+ // we can bake it in without hurting the cache hit rate.
+ float scale = rate * ( 2/128.0f),
+ bias = rate * (-63/128.0f);
+ skvm::F32 dither = to_F32(M) * scale + bias;
+ c.r += dither;
+ c.g += dither;
+ c.b += dither;
+
+ c.r = clamp(c.r, 0.0f, c.a);
+ c.g = clamp(c.g, 0.0f, c.a);
+ c.b = clamp(c.b, 0.0f, c.a);
+ return c;
+ }
+ };
+} // namespace
+
+bool SkVMBlitter::Key::operator==(const Key& that) const {
+ return this->shader == that.shader
+ && this->clip == that.clip
+ && this->blender == that.blender
+ && this->colorSpace == that.colorSpace
+ && this->colorType == that.colorType
+ && this->alphaType == that.alphaType
+ && this->coverage == that.coverage;
+}
+
+SkVMBlitter::Key SkVMBlitter::Key::withCoverage(Coverage c) const {
+ Key k = *this;
+ k.coverage = SkToU8(c);
+ return k;
+}
+
+SkVMBlitter::Params SkVMBlitter::Params::withCoverage(Coverage c) const {
+ Params p = *this;
+ p.coverage = c;
+ return p;
+}
+
+SkVMBlitter::Params SkVMBlitter::EffectiveParams(const SkPixmap& device,
+ const SkPixmap* sprite,
+ SkPaint paint,
+ const SkMatrix& ctm,
+ sk_sp<SkShader> clip) {
+ // Sprites take priority over any shader. (There's rarely one set, and it's meaningless.)
+ if (sprite) {
+ paint.setShader(sk_make_sp<SpriteShader>(*sprite));
+ }
+
+ // Normal blitters will have already folded color filters into their shader,
+ // but we may still need to do that here for SpriteShaders.
+ if (paint.getColorFilter()) {
+ SkPaintPriv::RemoveColorFilter(&paint, device.colorSpace());
+ }
+ SkASSERT(!paint.getColorFilter());
+
+ // If there's no explicit shader, SkColorShader is the shader,
+ // but if there is a shader, it's modulated by the paint alpha.
+ sk_sp<SkShader> shader = paint.refShader();
+ if (!shader) {
+ shader = SkShaders::Color(paint.getColor4f(), nullptr);
+ if (!shader) {
+ // If the paint color is non-finite (possible after RemoveColorFilter), we might not
+ // have a shader. (oss-fuzz:49391)
+ shader = SkShaders::Color(SK_ColorTRANSPARENT);
+ }
+ } else if (paint.getAlphaf() < 1.0f) {
+ shader = sk_make_sp<SkColorFilterShader>(std::move(shader),
+ paint.getAlphaf(),
+ sk_make_sp<NoopColorFilter>());
+ paint.setAlphaf(1.0f);
+ }
+
+ // Add dither to the end of the shader pipeline if requested and needed.
+ if (paint.isDither() && !as_SB(shader)->isConstant()) {
+ shader = sk_make_sp<DitherShader>(std::move(shader));
+ }
+
+ // Add the blender.
+ sk_sp<SkBlender> blender = paint.refBlender();
+ if (!blender) {
+ blender = SkBlender::Mode(SkBlendMode::kSrcOver);
+ }
+
+ // The most common blend mode is SrcOver, and it can be strength-reduced
+ // _greatly_ to Src mode when the shader is opaque.
+ //
+ // In general all the information we use to make decisions here need to
+ // be reflected in Params and Key to make program caching sound, and it
+ // might appear that shader->isOpaque() is a property of the shader's
+ // uniforms than its fundamental program structure and so unsafe to use.
+ //
+ // Opacity is such a powerful property that SkShaderBase::program()
+ // forces opacity for any shader subclass that claims isOpaque(), so
+ // the opaque bit is strongly guaranteed to be part of the program and
+ // not just a property of the uniforms. The shader program hash includes
+ // this information, making it safe to use anywhere in the blitter codegen.
+ if (as_BB(blender)->asBlendMode() == SkBlendMode::kSrcOver && shader->isOpaque()) {
+ blender = SkBlender::Mode(SkBlendMode::kSrc);
+ }
+
+ SkColor4f paintColor = paint.getColor4f();
+ SkColorSpaceXformSteps{sk_srgb_singleton(), kUnpremul_SkAlphaType,
+ device.colorSpace(), kUnpremul_SkAlphaType}
+ .apply(paintColor.vec());
+
+ return {
+ std::move(shader),
+ std::move(clip),
+ std::move(blender),
+ { device.colorType(), device.alphaType(), device.refColorSpace() },
+ Coverage::Full, // Placeholder... withCoverage() will change as needed.
+ paintColor,
+ ctm,
+ };
+}
+
+skvm::Color SkVMBlitter::DstColor(skvm::Builder* p, const Params& params) {
+ skvm::PixelFormat dstFormat = skvm::SkColorType_to_PixelFormat(params.dst.colorType());
+ skvm::Ptr dst_ptr = p->varying(SkColorTypeBytesPerPixel(params.dst.colorType()));
+ return p->load(dstFormat, dst_ptr);
+}
+
+void SkVMBlitter::BuildProgram(skvm::Builder* p, const Params& params,
+ skvm::Uniforms* uniforms, SkArenaAlloc* alloc) {
+ // First two arguments are always uniforms and the destination buffer.
+ uniforms->base = p->uniform();
+ skvm::Ptr dst_ptr = p->varying(SkColorTypeBytesPerPixel(params.dst.colorType()));
+ // A SpriteShader (in this file) may next use one argument as its varying source.
+ // Subsequent arguments depend on params.coverage:
+ // - Full: (no more arguments)
+ // - Mask3D: mul varying, add varying, 8-bit coverage varying
+ // - MaskA8: 8-bit coverage varying
+ // - MaskLCD16: 565 coverage varying
+ // - UniformF: float coverage uniform
+
+ skvm::Coord device = device_coord(p, uniforms);
+ skvm::Color paint = p->uniformColor(params.paint, uniforms);
+
+ // See note about arguments above: a SpriteShader will call p->arg() once during program().
+ skvm::Color src = as_SB(params.shader)->rootProgram(p,
+ device,
+ paint,
+ params.ctm,
+ params.dst,
+ uniforms,
+ alloc);
+ SkASSERT(src);
+ if (params.coverage == Coverage::Mask3D) {
+ skvm::F32 M = from_unorm(8, p->load8(p->varying<uint8_t>())),
+ A = from_unorm(8, p->load8(p->varying<uint8_t>()));
+
+ src.r = min(src.r * M + A, src.a);
+ src.g = min(src.g * M + A, src.a);
+ src.b = min(src.b * M + A, src.a);
+ }
+
+ // GL clamps all its color channels to limits of the format just before the blend step (~here).
+ // TODO: Below, we also clamp after the blend step. If we can prove that none of the work here
+ // (especially blending, for built-in blend modes) will produce colors outside [0, 1] we may be
+ // able to skip the second clamp. For now, we clamp twice.
+ if (SkColorTypeIsNormalized(params.dst.colorType())) {
+ src = clamp01(src);
+ }
+
+ // Load the destination color.
+ skvm::PixelFormat dstFormat = skvm::SkColorType_to_PixelFormat(params.dst.colorType());
+ skvm::Color dst = p->load(dstFormat, dst_ptr);
+ if (params.dst.isOpaque()) {
+ // When a destination is known opaque, we may assume it both starts and stays fully
+ // opaque, ignoring any math that disagrees. This sometimes trims a little work.
+ dst.a = p->splat(1.0f);
+ } else if (params.dst.alphaType() == kUnpremul_SkAlphaType) {
+ // All our blending works in terms of premul.
+ dst = premul(dst);
+ }
+
+ // Load coverage.
+ skvm::Color cov;
+ switch (params.coverage) {
+ case Coverage::Full:
+ cov.r = cov.g = cov.b = cov.a = p->splat(1.0f);
+ break;
+
+ case Coverage::UniformF:
+ cov.r = cov.g = cov.b = cov.a = p->uniformF(p->uniform(), 0);
+ break;
+
+ case Coverage::Mask3D:
+ case Coverage::MaskA8:
+ cov.r = cov.g = cov.b = cov.a = from_unorm(8, p->load8(p->varying<uint8_t>()));
+ break;
+
+ case Coverage::MaskLCD16: {
+ skvm::PixelFormat fmt = skvm::SkColorType_to_PixelFormat(kRGB_565_SkColorType);
+ cov = p->load(fmt, p->varying<uint16_t>());
+ cov.a = select(src.a < dst.a, min(cov.r, min(cov.g, cov.b)),
+ max(cov.r, max(cov.g, cov.b)));
+ } break;
+
+ case Coverage::kCount:
+ SkUNREACHABLE;
+ }
+ if (params.clip) {
+ skvm::Color clip = as_SB(params.clip)->rootProgram(p,
+ device,
+ paint,
+ params.ctm,
+ params.dst,
+ uniforms,
+ alloc);
+ SkAssertResult(clip);
+ cov.r *= clip.a; // We use the alpha channel of clip for all four.
+ cov.g *= clip.a;
+ cov.b *= clip.a;
+ cov.a *= clip.a;
+ }
+
+ const SkBlenderBase* blender = as_BB(params.blender);
+ const auto as_blendmode = blender->asBlendMode();
+
+ // The math for some blend modes lets us fold coverage into src before the blend, which is
+ // simpler than the canonical post-blend lerp().
+ bool applyPostBlendCoverage = true;
+ if (as_blendmode &&
+ SkBlendMode_ShouldPreScaleCoverage(as_blendmode.value(),
+ params.coverage == Coverage::MaskLCD16)) {
+ applyPostBlendCoverage = false;
+ src.r *= cov.r;
+ src.g *= cov.g;
+ src.b *= cov.b;
+ src.a *= cov.a;
+ }
+
+ // Apply our blend function to the computed color.
+ src = blender->program(p, src, dst, params.dst, uniforms, alloc);
+
+ if (applyPostBlendCoverage) {
+ src.r = lerp(dst.r, src.r, cov.r);
+ src.g = lerp(dst.g, src.g, cov.g);
+ src.b = lerp(dst.b, src.b, cov.b);
+ src.a = lerp(dst.a, src.a, cov.a);
+ }
+
+ if (params.dst.isOpaque()) {
+ // (See the note above when loading the destination color.)
+ src.a = p->splat(1.0f);
+ } else if (params.dst.alphaType() == kUnpremul_SkAlphaType) {
+ src = unpremul(src);
+ }
+
+ // Clamp to fit destination color format if needed.
+ if (SkColorTypeIsNormalized(params.dst.colorType())) {
+ src = clamp01(src);
+ }
+
+ // Write it out!
+ store(dstFormat, dst_ptr, src);
+}
+
+// If BuildProgram() can't build this program, CacheKey() sets *ok to false.
+SkVMBlitter::Key SkVMBlitter::CacheKey(
+ const Params& params, skvm::Uniforms* uniforms, SkArenaAlloc* alloc, bool* ok) {
+ // Take care to match buildProgram()'s reuse of the paint color uniforms.
+ skvm::Uniform r = uniforms->pushF(params.paint.fR),
+ g = uniforms->pushF(params.paint.fG),
+ b = uniforms->pushF(params.paint.fB),
+ a = uniforms->pushF(params.paint.fA);
+
+ auto hash_shader = [&](skvm::Builder& p, const sk_sp<SkShader>& shader,
+ skvm::Color* outColor) {
+ const SkShaderBase* sb = as_SB(shader);
+
+ skvm::Coord device = device_coord(&p, uniforms);
+ skvm::Color paint = {
+ p.uniformF(r),
+ p.uniformF(g),
+ p.uniformF(b),
+ p.uniformF(a),
+ };
+
+ uint64_t hash = 0;
+ *outColor = sb->rootProgram(&p,
+ device,
+ paint,
+ params.ctm,
+ params.dst,
+ uniforms,
+ alloc);
+ if (*outColor) {
+ hash = p.hash();
+ // p.hash() folds in all instructions to produce r,g,b,a but does not know
+ // precisely which value we'll treat as which channel. Imagine the shader
+ // called std::swap(*r,*b)... it draws differently, but p.hash() is unchanged.
+ // We'll fold the hash of their IDs in order to disambiguate.
+ const skvm::Val outputs[] = {
+ outColor->r.id,
+ outColor->g.id,
+ outColor->b.id,
+ outColor->a.id
+ };
+ hash ^= SkOpts::hash(outputs, sizeof(outputs));
+ } else {
+ *ok = false;
+ }
+ return hash;
+ };
+
+ // Use this builder for shader, clip and blender, so that color objects that pass
+ // from one to the other all 'make sense' -- i.e. have the same builder and/or have
+ // meaningful values for the hash.
+ //
+ // Question: better if we just pass in mock uniform colors, so we don't need to
+ // explicitly use the output color from one stage as input to another?
+ //
+ skvm::Builder p;
+
+ // Calculate a hash for the color shader.
+ SkASSERT(params.shader);
+ skvm::Color src;
+ uint64_t shaderHash = hash_shader(p, params.shader, &src);
+
+ // Calculate a hash for the clip shader, if one exists.
+ uint64_t clipHash = 0;
+ if (params.clip) {
+ skvm::Color cov;
+ clipHash = hash_shader(p, params.clip, &cov);
+ if (clipHash == 0) {
+ clipHash = 1;
+ }
+ }
+
+ // Calculate a hash for the blender.
+ uint64_t blendHash = 0;
+ if (auto bm = as_BB(params.blender)->asBlendMode()) {
+ blendHash = static_cast<uint8_t>(bm.value());
+ } else if (*ok) {
+ const SkBlenderBase* blender = as_BB(params.blender);
+
+ skvm::Color dst = DstColor(&p, params);
+ skvm::Color outColor = blender->program(&p, src, dst, params.dst, uniforms, alloc);
+ if (outColor) {
+ blendHash = p.hash();
+ // Like in `hash_shader` above, we must fold the color component IDs into our hash.
+ const skvm::Val outputs[] = {
+ outColor.r.id,
+ outColor.g.id,
+ outColor.b.id,
+ outColor.a.id
+ };
+ blendHash ^= SkOpts::hash(outputs, sizeof(outputs));
+ } else {
+ *ok = false;
+ }
+ if (blendHash == 0) {
+ blendHash = 1;
+ }
+ }
+
+ return {
+ shaderHash,
+ clipHash,
+ blendHash,
+ params.dst.colorSpace() ? params.dst.colorSpace()->hash() : 0,
+ SkToU8(params.dst.colorType()),
+ SkToU8(params.dst.alphaType()),
+ SkToU8(params.coverage),
+ };
+}
+
+SkVMBlitter::SkVMBlitter(const SkPixmap& device,
+ const SkPaint& paint,
+ const SkPixmap* sprite,
+ SkIPoint spriteOffset,
+ const SkMatrix& ctm,
+ sk_sp<SkShader> clip,
+ bool* ok)
+ : fDevice(device)
+ , fSprite(sprite ? *sprite : SkPixmap{})
+ , fSpriteOffset(spriteOffset)
+ , fUniforms(skvm::UPtr{{0}}, kBlitterUniformsCount)
+ , fParams(EffectiveParams(device, sprite, paint, ctm, std::move(clip)))
+ , fKey(CacheKey(fParams, &fUniforms, &fAlloc, ok)) {}
+
+SkVMBlitter::~SkVMBlitter() {
+ if (fStoreToCache) {
+ if (SkLRUCache<Key, skvm::Program>* cache = TryAcquireProgramCache()) {
+ auto cache_program = [&](SkTLazy<skvm::Program>& program, Coverage coverage) {
+ if (program.isValid() && !program->hasTraceHooks()) {
+ cache->insert_or_update(fKey.withCoverage(coverage), std::move(*program));
+ }
+ };
+ for (int c = 0; c < Coverage::kCount; c++) {
+ cache_program(fPrograms[c], static_cast<Coverage>(c));
+ }
+
+ ReleaseProgramCache();
+ }
+ }
+}
+
+SkLRUCache<SkVMBlitter::Key, skvm::Program>* SkVMBlitter::TryAcquireProgramCache() {
+#if defined(SKVM_JIT)
+ thread_local static SkLRUCache<Key, skvm::Program> cache{64};
+ return &cache;
+#else
+ // iOS now supports thread_local since iOS 9.
+ // On the other hand, we'll never be able to JIT there anyway.
+ // It's probably fine to not cache any interpreted programs, anywhere.
+ return nullptr;
+#endif
+}
+
+SkString SkVMBlitter::DebugName(const Key& key) {
+ return SkStringPrintf("Shader-%" PRIx64 "_Clip-%" PRIx64 "_Blender-%" PRIx64
+ "_CS-%" PRIx64 "_CT-%d_AT-%d_Cov-%d",
+ key.shader,
+ key.clip,
+ key.blender,
+ key.colorSpace,
+ key.colorType,
+ key.alphaType,
+ key.coverage);
+}
+
+void SkVMBlitter::ReleaseProgramCache() {}
+
+skvm::Program* SkVMBlitter::buildProgram(Coverage coverage) {
+ // eg, blitter re-use...
+ if (fProgramPtrs[coverage]) {
+ return fProgramPtrs[coverage];
+ }
+
+ // Next, cache lookup...
+ Key key = fKey.withCoverage(coverage);
+ {
+ skvm::Program* p = nullptr;
+ if (SkLRUCache<Key, skvm::Program>* cache = TryAcquireProgramCache()) {
+ p = cache->find(key);
+ ReleaseProgramCache();
+ }
+ if (p) {
+ SkASSERT(!p->empty());
+ fProgramPtrs[coverage] = p;
+ return p;
+ }
+ }
+
+ // Okay, let's build it...
+ fStoreToCache = true;
+
+ // We don't really _need_ to rebuild fUniforms here.
+ // It's just more natural to have effects unconditionally emit them,
+ // and more natural to rebuild fUniforms than to emit them into a temporary buffer.
+ // fUniforms should reuse the exact same memory, so this is very cheap.
+ SkDEBUGCODE(size_t prev = fUniforms.buf.size();)
+ fUniforms.buf.resize(kBlitterUniformsCount);
+ skvm::Builder builder;
+ BuildProgram(&builder, fParams.withCoverage(coverage), &fUniforms, &fAlloc);
+ SkASSERTF(fUniforms.buf.size() == prev,
+ "%zu, prev was %zu", fUniforms.buf.size(), prev);
+
+ skvm::Program program = builder.done(DebugName(key).c_str());
+ if ((false)) {
+ static std::atomic<int> missed{0},
+ total{0};
+ if (!program.hasJIT()) {
+ SkDebugf("\ncouldn't JIT %s\n", DebugName(key).c_str());
+ builder.dump();
+ program.dump();
+
+ missed++;
+ }
+ if (0 == total++) {
+ atexit([]{ SkDebugf("SkVMBlitter compiled %d programs, %d without JIT.\n",
+ total.load(), missed.load()); });
+ }
+ }
+ fProgramPtrs[coverage] = fPrograms[coverage].set(std::move(program));
+ return fProgramPtrs[coverage];
+}
+
+void SkVMBlitter::updateUniforms(int right, int y) {
+ BlitterUniforms uniforms{right, y};
+ memcpy(fUniforms.buf.data(), &uniforms, sizeof(BlitterUniforms));
+}
+
+const void* SkVMBlitter::isSprite(int x, int y) const {
+ if (fSprite.colorType() != kUnknown_SkColorType) {
+ return fSprite.addr(x - fSpriteOffset.x(),
+ y - fSpriteOffset.y());
+ }
+ return nullptr;
+}
+
+void SkVMBlitter::blitH(int x, int y, int w) {
+ skvm::Program* blit_h = this->buildProgram(Coverage::Full);
+ this->updateUniforms(x+w, y);
+ if (const void* sprite = this->isSprite(x,y)) {
+ SK_BLITTER_TRACE_STEP(blitH1, true, /*scanlines=*/1, /*pixels=*/w);
+ blit_h->eval(w, fUniforms.buf.data(), fDevice.addr(x,y), sprite);
+ } else {
+ SK_BLITTER_TRACE_STEP(blitH2, true, /*scanlines=*/1, /*pixels=*/w);
+ blit_h->eval(w, fUniforms.buf.data(), fDevice.addr(x,y));
+ }
+}
+
+void SkVMBlitter::blitAntiH(int x, int y, const SkAlpha cov[], const int16_t runs[]) {
+ skvm::Program* blit_anti_h = this->buildProgram(Coverage::UniformF);
+ skvm::Program* blit_h = this->buildProgram(Coverage::Full);
+
+ SK_BLITTER_TRACE_STEP(blitAntiH, true, /*scanlines=*/1ul, /*pixels=*/0ul);
+ for (int16_t run = *runs; run > 0; run = *runs) {
+ SK_BLITTER_TRACE_STEP_ACCUMULATE(blitAntiH, /*pixels=*/run);
+ const SkAlpha coverage = *cov;
+ if (coverage != 0x00) {
+ this->updateUniforms(x+run, y);
+ const void* sprite = this->isSprite(x,y);
+ if (coverage == 0xFF) {
+ if (sprite) {
+ blit_h->eval(run, fUniforms.buf.data(), fDevice.addr(x,y), sprite);
+ } else {
+ blit_h->eval(run, fUniforms.buf.data(), fDevice.addr(x,y));
+ }
+ } else {
+ const float covF = *cov * (1/255.0f);
+ if (sprite) {
+ blit_anti_h->eval(run, fUniforms.buf.data(), fDevice.addr(x,y), sprite, &covF);
+ } else {
+ blit_anti_h->eval(run, fUniforms.buf.data(), fDevice.addr(x,y), &covF);
+ }
+ }
+ }
+ x += run;
+ runs += run;
+ cov += run;
+ }
+}
+
+void SkVMBlitter::blitMask(const SkMask& mask, const SkIRect& clip) {
+ if (mask.fFormat == SkMask::kBW_Format) {
+ return SkBlitter::blitMask(mask, clip);
+ }
+
+ const skvm::Program* program = nullptr;
+ switch (mask.fFormat) {
+ default: SkUNREACHABLE; // ARGB and SDF masks shouldn't make it here.
+
+ case SkMask::k3D_Format:
+ program = this->buildProgram(Coverage::Mask3D);
+ break;
+
+ case SkMask::kA8_Format:
+ program = this->buildProgram(Coverage::MaskA8);
+ break;
+
+ case SkMask::kLCD16_Format:
+ program = this->buildProgram(Coverage::MaskLCD16);
+ break;
+ }
+
+ SkASSERT(program);
+ if (program) {
+ SK_BLITTER_TRACE_STEP(blitMask,
+ true,
+ /*scanlines=*/clip.height(),
+ /*pixels=*/clip.width() * clip.height());
+
+ for (int y = clip.top(); y < clip.bottom(); y++) {
+ int x = clip.left(),
+ w = clip.width();
+ void* dptr = fDevice.writable_addr(x,y);
+ auto mptr = (const uint8_t*)mask.getAddr(x,y);
+ this->updateUniforms(x+w,y);
+
+ if (mask.fFormat == SkMask::k3D_Format) {
+ size_t plane = mask.computeImageSize();
+ if (const void* sprite = this->isSprite(x,y)) {
+ program->eval(w, fUniforms.buf.data(), dptr, sprite, mptr + 1*plane
+ , mptr + 2*plane
+ , mptr + 0*plane);
+ } else {
+ program->eval(w, fUniforms.buf.data(), dptr, mptr + 1*plane
+ , mptr + 2*plane
+ , mptr + 0*plane);
+ }
+ } else {
+ if (const void* sprite = this->isSprite(x,y)) {
+ program->eval(w, fUniforms.buf.data(), dptr, sprite, mptr);
+ } else {
+ program->eval(w, fUniforms.buf.data(), dptr, mptr);
+ }
+ }
+ }
+ }
+}
+
+SkVMBlitter* SkVMBlitter::Make(const SkPixmap& device,
+ const SkPaint& paint,
+ const SkMatrix& ctm,
+ SkArenaAlloc* alloc,
+ sk_sp<SkShader> clip) {
+ bool ok = true;
+ SkVMBlitter* blitter = alloc->make<SkVMBlitter>(device,
+ paint,
+ /*sprite=*/nullptr,
+ SkIPoint{0,0},
+ ctm,
+ std::move(clip),
+ &ok);
+ return ok ? blitter : nullptr;
+}
+
+SkVMBlitter* SkVMBlitter::Make(const SkPixmap& device,
+ const SkPaint& paint,
+ const SkPixmap& sprite,
+ int left, int top,
+ SkArenaAlloc* alloc,
+ sk_sp<SkShader> clip) {
+ if (paint.getMaskFilter()) {
+ // TODO: SkVM support for mask filters? definitely possible!
+ return nullptr;
+ }
+ bool ok = true;
+ auto blitter = alloc->make<SkVMBlitter>(device,
+ paint,
+ &sprite,
+ SkIPoint{left,top},
+ SkMatrix::I(),
+ std::move(clip),
+ &ok);
+ return ok ? blitter : nullptr;
+}
diff --git a/gfx/skia/skia/src/core/SkVMBlitter.h b/gfx/skia/skia/src/core/SkVMBlitter.h
new file mode 100644
index 0000000000..5c49be8a15
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkVMBlitter.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkVMBlitter_DEFINED
+#define SkVMBlitter_DEFINED
+
+#include "include/core/SkPixmap.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/base/SkTLazy.h"
+#include "src/core/SkBlitter.h"
+#include "src/core/SkLRUCache.h"
+#include "src/core/SkVM.h"
+
+class SkVMBlitter final : public SkBlitter {
+public:
+ static SkVMBlitter* Make(const SkPixmap& dst,
+ const SkPaint&,
+ const SkMatrix& ctm,
+ SkArenaAlloc*,
+ sk_sp<SkShader> clipShader);
+
+ static SkVMBlitter* Make(const SkPixmap& dst,
+ const SkPaint&,
+ const SkPixmap& sprite,
+ int left, int top,
+ SkArenaAlloc*,
+ sk_sp<SkShader> clipShader);
+
+ SkVMBlitter(const SkPixmap& device,
+ const SkPaint& paint,
+ const SkPixmap* sprite,
+ SkIPoint spriteOffset,
+ const SkMatrix& ctm,
+ sk_sp<SkShader> clip,
+ bool* ok);
+
+ ~SkVMBlitter() override;
+
+private:
+ enum Coverage { Full, UniformF, MaskA8, MaskLCD16, Mask3D, kCount };
+ struct Key {
+ uint64_t shader,
+ clip,
+ blender,
+ colorSpace;
+ uint8_t colorType,
+ alphaType,
+ coverage;
+ uint8_t padding8{0};
+ uint32_t padding{0};
+ // Params::{paint,quality,matrices} are only passed to {shader,clip}->program(),
+ // not used here by the blitter itself. No need to include them in the key;
+ // they'll be folded into the shader key if used.
+
+ bool operator==(const Key& that) const;
+ Key withCoverage(Coverage c) const;
+ };
+
+ struct Params {
+ sk_sp<SkShader> shader;
+ sk_sp<SkShader> clip;
+ sk_sp<SkBlender> blender; // never null
+ SkColorInfo dst;
+ Coverage coverage;
+ SkColor4f paint;
+ SkMatrix ctm;
+
+ Params withCoverage(Coverage c) const;
+ };
+
+ static Params EffectiveParams(const SkPixmap& device,
+ const SkPixmap* sprite,
+ SkPaint paint,
+ const SkMatrix& ctm,
+ sk_sp<SkShader> clip);
+ static skvm::Color DstColor(skvm::Builder* p, const Params& params);
+ static void BuildProgram(skvm::Builder* p, const Params& params,
+ skvm::Uniforms* uniforms, SkArenaAlloc* alloc);
+ static Key CacheKey(const Params& params,
+ skvm::Uniforms* uniforms, SkArenaAlloc* alloc, bool* ok);
+ static SkLRUCache<Key, skvm::Program>* TryAcquireProgramCache();
+ static SkString DebugName(const Key& key);
+ static void ReleaseProgramCache();
+
+ skvm::Program* buildProgram(Coverage coverage);
+ void updateUniforms(int right, int y);
+ const void* isSprite(int x, int y) const;
+
+ void blitH(int x, int y, int w) override;
+ void blitAntiH(int x, int y, const SkAlpha cov[], const int16_t runs[]) override;
+
+private:
+ void blitMask(const SkMask& mask, const SkIRect& clip) override;
+
+ SkPixmap fDevice;
+ const SkPixmap fSprite; // See isSprite().
+ const SkIPoint fSpriteOffset;
+ skvm::Uniforms fUniforms; // Most data is copied directly into fUniforms,
+ SkArenaAlloc fAlloc{2*sizeof(void*)}; // but a few effects need to ref large content.
+ const Params fParams;
+ const Key fKey;
+ bool fStoreToCache = false;
+
+ skvm::Program* fProgramPtrs[Coverage::kCount] = {nullptr};
+ SkTLazy<skvm::Program> fPrograms[Coverage::kCount];
+
+ friend class Viewer;
+};
+#endif // SkVMBlitter_DEFINED
diff --git a/gfx/skia/skia/src/core/SkVM_fwd.h b/gfx/skia/skia/src/core/SkVM_fwd.h
new file mode 100644
index 0000000000..2c2c044e35
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkVM_fwd.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkVM_fwd_DEFINED
+#define SkVM_fwd_DEFINED
+
+namespace skvm {
+ class Assembler;
+ class Builder;
+ class Program;
+ struct Ptr;
+ struct I32;
+ struct F32;
+ struct Color;
+ struct Coord;
+ struct Uniforms;
+} // namespace skvm
+
+#endif//SkVM_fwd_DEFINED
diff --git a/gfx/skia/skia/src/core/SkValidationUtils.h b/gfx/skia/skia/src/core/SkValidationUtils.h
new file mode 100644
index 0000000000..16dfdb3199
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkValidationUtils.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkValidationUtils_DEFINED
+#define SkValidationUtils_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkBlendMode.h"
+#include "src/core/SkXfermodePriv.h"
+
+/** Returns true if mode's value is in the SkBlendMode enum.
+ */
+static inline bool SkIsValidMode(SkBlendMode mode) {
+ return (unsigned)mode <= (unsigned)SkBlendMode::kLastMode;
+}
+
+/** Returns true if the rect's dimensions are between 0 and SK_MaxS32
+ */
+static inline bool SkIsValidIRect(const SkIRect& rect) {
+ return rect.width() >= 0 && rect.height() >= 0;
+}
+
+/** Returns true if the rect's dimensions are between 0 and SK_ScalarMax
+ */
+static inline bool SkIsValidRect(const SkRect& rect) {
+ return (rect.fLeft <= rect.fRight) &&
+ (rect.fTop <= rect.fBottom) &&
+ SkScalarIsFinite(rect.width()) &&
+ SkScalarIsFinite(rect.height());
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkVertState.cpp b/gfx/skia/skia/src/core/SkVertState.cpp
new file mode 100644
index 0000000000..d10a23ddde
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkVertState.cpp
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkVertState.h"
+
+bool VertState::Triangles(VertState* state) {
+ int index = state->fCurrIndex;
+ if (index + 3 > state->fCount) {
+ return false;
+ }
+ state->f0 = index + 0;
+ state->f1 = index + 1;
+ state->f2 = index + 2;
+ state->fCurrIndex = index + 3;
+ return true;
+}
+
+bool VertState::TrianglesX(VertState* state) {
+ const uint16_t* indices = state->fIndices;
+ int index = state->fCurrIndex;
+ if (index + 3 > state->fCount) {
+ return false;
+ }
+ state->f0 = indices[index + 0];
+ state->f1 = indices[index + 1];
+ state->f2 = indices[index + 2];
+ state->fCurrIndex = index + 3;
+ return true;
+}
+
+bool VertState::TriangleStrip(VertState* state) {
+ int index = state->fCurrIndex;
+ if (index + 3 > state->fCount) {
+ return false;
+ }
+ state->f2 = index + 2;
+ if (index & 1) {
+ state->f0 = index + 1;
+ state->f1 = index + 0;
+ } else {
+ state->f0 = index + 0;
+ state->f1 = index + 1;
+ }
+ state->fCurrIndex = index + 1;
+ return true;
+}
+
+bool VertState::TriangleStripX(VertState* state) {
+ const uint16_t* indices = state->fIndices;
+ int index = state->fCurrIndex;
+ if (index + 3 > state->fCount) {
+ return false;
+ }
+ state->f2 = indices[index + 2];
+ if (index & 1) {
+ state->f0 = indices[index + 1];
+ state->f1 = indices[index + 0];
+ } else {
+ state->f0 = indices[index + 0];
+ state->f1 = indices[index + 1];
+ }
+ state->fCurrIndex = index + 1;
+ return true;
+}
+
+bool VertState::TriangleFan(VertState* state) {
+ int index = state->fCurrIndex;
+ if (index + 3 > state->fCount) {
+ return false;
+ }
+ state->f0 = 0;
+ state->f1 = index + 1;
+ state->f2 = index + 2;
+ state->fCurrIndex = index + 1;
+ return true;
+}
+
+bool VertState::TriangleFanX(VertState* state) {
+ const uint16_t* indices = state->fIndices;
+ int index = state->fCurrIndex;
+ if (index + 3 > state->fCount) {
+ return false;
+ }
+ state->f0 = indices[0];
+ state->f1 = indices[index + 1];
+ state->f2 = indices[index + 2];
+ state->fCurrIndex = index + 1;
+ return true;
+}
+
+VertState::Proc VertState::chooseProc(SkVertices::VertexMode mode) {
+ switch (mode) {
+ case SkVertices::kTriangles_VertexMode:
+ return fIndices ? TrianglesX : Triangles;
+ case SkVertices::kTriangleStrip_VertexMode:
+ return fIndices ? TriangleStripX : TriangleStrip;
+ case SkVertices::kTriangleFan_VertexMode:
+ return fIndices ? TriangleFanX : TriangleFan;
+ default:
+ return nullptr;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkVertState.h b/gfx/skia/skia/src/core/SkVertState.h
new file mode 100644
index 0000000000..fb981b7c2e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkVertState.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkVertState_DEFINED
+#define SkVertState_DEFINED
+
+#include "include/core/SkVertices.h"
+
+/** \struct VertState
+ This is a helper for drawVertices(). It is used to iterate over the triangles
+ that are to be rendered based on an SkCanvas::VertexMode and (optionally) an
+ index array. It does not copy the index array and the client must ensure it
+ remains valid for the lifetime of the VertState object.
+*/
+
+struct VertState {
+ int f0, f1, f2;
+
+ /**
+ * Construct a VertState from a vertex count, index array, and index count.
+ * If the vertices are unindexed pass nullptr for indices.
+ */
+ VertState(int vCount, const uint16_t indices[], int indexCount)
+ : fIndices(indices) {
+ fCurrIndex = 0;
+ if (indices) {
+ fCount = indexCount;
+ } else {
+ fCount = vCount;
+ }
+ }
+
+ typedef bool (*Proc)(VertState*);
+
+ /**
+ * Choose an appropriate function to traverse the vertices.
+ * @param mode Specifies the SkCanvas::VertexMode.
+ */
+ Proc chooseProc(SkVertices::VertexMode mode);
+
+private:
+ int fCount;
+ int fCurrIndex;
+ const uint16_t* fIndices;
+
+ static bool Triangles(VertState*);
+ static bool TrianglesX(VertState*);
+ static bool TriangleStrip(VertState*);
+ static bool TriangleStripX(VertState*);
+ static bool TriangleFan(VertState*);
+ static bool TriangleFanX(VertState*);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkVertices.cpp b/gfx/skia/skia/src/core/SkVertices.cpp
new file mode 100644
index 0000000000..47ceaf919f
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkVertices.cpp
@@ -0,0 +1,338 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkVertices.h"
+
+#include "include/core/SkData.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkSafeMath.h"
+#include "src/core/SkCanvasPriv.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSafeRange.h"
+#include "src/core/SkVerticesPriv.h"
+#include "src/core/SkWriteBuffer.h"
+#include <atomic>
+#include <new>
+
+static int32_t next_id() {
+ static std::atomic<int32_t> nextID{1};
+
+ int32_t id;
+ do {
+ id = nextID.fetch_add(1, std::memory_order_relaxed);
+ } while (id == SK_InvalidGenID);
+ return id;
+}
+
+struct SkVertices::Desc {
+ VertexMode fMode;
+ int fVertexCount,
+ fIndexCount;
+ bool fHasTexs,
+ fHasColors;
+};
+
+struct SkVertices::Sizes {
+ Sizes(const Desc& desc) {
+ SkSafeMath safe;
+
+ fVSize = safe.mul(desc.fVertexCount, sizeof(SkPoint));
+ fTSize = desc.fHasTexs ? safe.mul(desc.fVertexCount, sizeof(SkPoint)) : 0;
+ fCSize = desc.fHasColors ? safe.mul(desc.fVertexCount, sizeof(SkColor)) : 0;
+
+ fBuilderTriFanISize = 0;
+ fISize = safe.mul(desc.fIndexCount, sizeof(uint16_t));
+ if (kTriangleFan_VertexMode == desc.fMode) {
+ int numFanTris = 0;
+ if (desc.fIndexCount) {
+ fBuilderTriFanISize = fISize;
+ numFanTris = desc.fIndexCount - 2;
+ } else {
+ numFanTris = desc.fVertexCount - 2;
+ // By forcing this to become indexed we are adding a constraint to the maximum
+ // number of vertices.
+ if (desc.fVertexCount > (SkTo<int>(UINT16_MAX) + 1)) {
+ sk_bzero(this, sizeof(*this));
+ return;
+ }
+ }
+ if (numFanTris <= 0) {
+ sk_bzero(this, sizeof(*this));
+ return;
+ }
+ fISize = safe.mul(numFanTris, 3 * sizeof(uint16_t));
+ }
+
+ fTotal = safe.add(sizeof(SkVertices),
+ safe.add(fVSize,
+ safe.add(fTSize,
+ safe.add(fCSize,
+ fISize))));
+
+ if (safe.ok()) {
+ fArrays = fVSize + fTSize + fCSize + fISize; // just the sum of the arrays
+ } else {
+ sk_bzero(this, sizeof(*this));
+ }
+ }
+
+ bool isValid() const { return fTotal != 0; }
+
+ size_t fTotal = 0; // size of entire SkVertices allocation (obj + arrays)
+ size_t fArrays; // size of all the data arrays (V + D + T + C + I)
+ size_t fVSize;
+ size_t fTSize;
+ size_t fCSize;
+ size_t fISize;
+
+ // For indexed tri-fans this is the number of amount of space fo indices needed in the builder
+ // before conversion to indexed triangles (or zero if not indexed or not a triangle fan).
+ size_t fBuilderTriFanISize;
+};
+
+SkVertices::Builder::Builder(VertexMode mode, int vertexCount, int indexCount,
+ uint32_t builderFlags) {
+ bool hasTexs = SkToBool(builderFlags & SkVertices::kHasTexCoords_BuilderFlag);
+ bool hasColors = SkToBool(builderFlags & SkVertices::kHasColors_BuilderFlag);
+ this->init({mode, vertexCount, indexCount, hasTexs, hasColors});
+}
+
+SkVertices::Builder::Builder(const Desc& desc) {
+ this->init(desc);
+}
+
+void SkVertices::Builder::init(const Desc& desc) {
+ Sizes sizes(desc);
+ if (!sizes.isValid()) {
+ SkASSERT(!this->isValid());
+ return;
+ }
+
+ void* storage = ::operator new (sizes.fTotal);
+ if (sizes.fBuilderTriFanISize) {
+ fIntermediateFanIndices.reset(new uint8_t[sizes.fBuilderTriFanISize]);
+ }
+
+ fVertices.reset(new (storage) SkVertices);
+
+ // need to point past the object to store the arrays
+ char* ptr = (char*)storage + sizeof(SkVertices);
+
+ // return the original ptr (or null), but then advance it by size
+ auto advance = [&ptr](size_t size) {
+ char* new_ptr = size ? ptr : nullptr;
+ ptr += size;
+ return new_ptr;
+ };
+
+ fVertices->fPositions = (SkPoint*) advance(sizes.fVSize);
+ fVertices->fTexs = (SkPoint*) advance(sizes.fTSize);
+ fVertices->fColors = (SkColor*) advance(sizes.fCSize);
+ fVertices->fIndices = (uint16_t*)advance(sizes.fISize);
+
+ fVertices->fVertexCount = desc.fVertexCount;
+ fVertices->fIndexCount = desc.fIndexCount;
+ fVertices->fMode = desc.fMode;
+
+ // We defer assigning fBounds and fUniqueID until detach() is called
+}
+
+sk_sp<SkVertices> SkVertices::Builder::detach() {
+ if (fVertices) {
+ fVertices->fBounds.setBounds(fVertices->fPositions, fVertices->fVertexCount);
+ if (fVertices->fMode == kTriangleFan_VertexMode) {
+ if (fIntermediateFanIndices) {
+ SkASSERT(fVertices->fIndexCount);
+ auto tempIndices = this->indices();
+ for (int t = 0; t < fVertices->fIndexCount - 2; ++t) {
+ fVertices->fIndices[3 * t + 0] = tempIndices[0];
+ fVertices->fIndices[3 * t + 1] = tempIndices[t + 1];
+ fVertices->fIndices[3 * t + 2] = tempIndices[t + 2];
+ }
+ fVertices->fIndexCount = 3 * (fVertices->fIndexCount - 2);
+ } else {
+ SkASSERT(!fVertices->fIndexCount);
+ for (int t = 0; t < fVertices->fVertexCount - 2; ++t) {
+ fVertices->fIndices[3 * t + 0] = 0;
+ fVertices->fIndices[3 * t + 1] = SkToU16(t + 1);
+ fVertices->fIndices[3 * t + 2] = SkToU16(t + 2);
+ }
+ fVertices->fIndexCount = 3 * (fVertices->fVertexCount - 2);
+ }
+ fVertices->fMode = kTriangles_VertexMode;
+ }
+ fVertices->fUniqueID = next_id();
+ return std::move(fVertices); // this will null fVertices after the return
+ }
+ return nullptr;
+}
+
+SkPoint* SkVertices::Builder::positions() {
+ return fVertices ? const_cast<SkPoint*>(fVertices->fPositions) : nullptr;
+}
+
+SkPoint* SkVertices::Builder::texCoords() {
+ return fVertices ? const_cast<SkPoint*>(fVertices->fTexs) : nullptr;
+}
+
+SkColor* SkVertices::Builder::colors() {
+ return fVertices ? const_cast<SkColor*>(fVertices->fColors) : nullptr;
+}
+
+uint16_t* SkVertices::Builder::indices() {
+ if (!fVertices) {
+ return nullptr;
+ }
+ if (fIntermediateFanIndices) {
+ return reinterpret_cast<uint16_t*>(fIntermediateFanIndices.get());
+ }
+ return const_cast<uint16_t*>(fVertices->fIndices);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkVertices> SkVertices::MakeCopy(VertexMode mode, int vertexCount,
+ const SkPoint pos[], const SkPoint texs[],
+ const SkColor colors[],
+ int indexCount, const uint16_t indices[]) {
+ auto desc = Desc{mode, vertexCount, indexCount, !!texs, !!colors};
+ Builder builder(desc);
+ if (!builder.isValid()) {
+ return nullptr;
+ }
+
+ Sizes sizes(desc);
+ SkASSERT(sizes.isValid());
+ sk_careful_memcpy(builder.positions(), pos, sizes.fVSize);
+ sk_careful_memcpy(builder.texCoords(), texs, sizes.fTSize);
+ sk_careful_memcpy(builder.colors(), colors, sizes.fCSize);
+ size_t isize = (mode == kTriangleFan_VertexMode) ? sizes.fBuilderTriFanISize : sizes.fISize;
+ sk_careful_memcpy(builder.indices(), indices, isize);
+
+ return builder.detach();
+}
+
+size_t SkVertices::approximateSize() const {
+ return this->getSizes().fTotal;
+}
+
+SkVertices::Sizes SkVertices::getSizes() const {
+ Sizes sizes({fMode, fVertexCount, fIndexCount, !!fTexs, !!fColors});
+ SkASSERT(sizes.isValid());
+ return sizes;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+// storage = packed | vertex_count | index_count | attr_count
+// | pos[] | custom[] | texs[] | colors[] | indices[]
+
+#define kMode_Mask 0x0FF
+#define kHasTexs_Mask 0x100
+#define kHasColors_Mask 0x200
+
+void SkVerticesPriv::encode(SkWriteBuffer& buffer) const {
+ // packed has room for additional flags in the future
+ uint32_t packed = static_cast<uint32_t>(fVertices->fMode);
+ SkASSERT((packed & ~kMode_Mask) == 0); // our mode fits in the mask bits
+ if (fVertices->fTexs) {
+ packed |= kHasTexs_Mask;
+ }
+ if (fVertices->fColors) {
+ packed |= kHasColors_Mask;
+ }
+
+ SkVertices::Sizes sizes = fVertices->getSizes();
+ SkASSERT(!sizes.fBuilderTriFanISize);
+
+ // Header
+ buffer.writeUInt(packed);
+ buffer.writeInt(fVertices->fVertexCount);
+ buffer.writeInt(fVertices->fIndexCount);
+
+ // Data arrays
+ buffer.writeByteArray(fVertices->fPositions, sizes.fVSize);
+ buffer.writeByteArray(fVertices->fTexs, sizes.fTSize);
+ buffer.writeByteArray(fVertices->fColors, sizes.fCSize);
+ // if index-count is odd, we won't be 4-bytes aligned, so we call the pad version
+ buffer.writeByteArray(fVertices->fIndices, sizes.fISize);
+}
+
+sk_sp<SkVertices> SkVerticesPriv::Decode(SkReadBuffer& buffer) {
+ auto decode = [](SkReadBuffer& buffer) -> sk_sp<SkVertices> {
+ SkSafeRange safe;
+ bool hasCustomData = buffer.isVersionLT(SkPicturePriv::kVerticesRemoveCustomData_Version);
+
+ const uint32_t packed = buffer.readUInt();
+ const int vertexCount = safe.checkGE(buffer.readInt(), 0);
+ const int indexCount = safe.checkGE(buffer.readInt(), 0);
+ const int attrCount = hasCustomData ? safe.checkGE(buffer.readInt(), 0) : 0;
+ const SkVertices::VertexMode mode = safe.checkLE<SkVertices::VertexMode>(
+ packed & kMode_Mask, SkVertices::kLast_VertexMode);
+ const bool hasTexs = SkToBool(packed & kHasTexs_Mask);
+ const bool hasColors = SkToBool(packed & kHasColors_Mask);
+
+ // Check that the header fields and buffer are valid. If this is data with the experimental
+ // custom attributes feature - we don't support that any more.
+ // We also don't support serialized triangle-fan data. We stopped writing that long ago,
+ // so it should never appear in valid encoded data.
+ if (!safe || !buffer.isValid() || attrCount ||
+ mode == SkVertices::kTriangleFan_VertexMode) {
+ return nullptr;
+ }
+
+ const SkVertices::Desc desc{mode, vertexCount, indexCount, hasTexs, hasColors};
+ SkVertices::Sizes sizes(desc);
+ if (!sizes.isValid() || sizes.fArrays > buffer.available()) {
+ return nullptr;
+ }
+
+ SkVertices::Builder builder(desc);
+ if (!builder.isValid()) {
+ return nullptr;
+ }
+
+ buffer.readByteArray(builder.positions(), sizes.fVSize);
+ if (hasCustomData) {
+ size_t customDataSize = 0;
+ buffer.skipByteArray(&customDataSize);
+ if (customDataSize != 0) {
+ return nullptr;
+ }
+ }
+ buffer.readByteArray(builder.texCoords(), sizes.fTSize);
+ buffer.readByteArray(builder.colors(), sizes.fCSize);
+ buffer.readByteArray(builder.indices(), sizes.fISize);
+
+ if (!buffer.isValid()) {
+ return nullptr;
+ }
+
+ if (indexCount > 0) {
+ // validate that the indices are in range
+ const uint16_t* indices = builder.indices();
+ for (int i = 0; i < indexCount; ++i) {
+ if (indices[i] >= (unsigned)vertexCount) {
+ return nullptr;
+ }
+ }
+ }
+
+ return builder.detach();
+ };
+
+ if (auto verts = decode(buffer)) {
+ return verts;
+ }
+ buffer.validate(false);
+ return nullptr;
+}
+
+void SkVertices::operator delete(void* p) {
+ ::operator delete(p);
+}
diff --git a/gfx/skia/skia/src/core/SkVerticesPriv.h b/gfx/skia/skia/src/core/SkVerticesPriv.h
new file mode 100644
index 0000000000..3aa0411ed7
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkVerticesPriv.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkVerticesPriv_DEFINED
+#define SkVerticesPriv_DEFINED
+
+#include "include/core/SkVertices.h"
+
+class SkReadBuffer;
+class SkWriteBuffer;
+
+struct SkVertices_DeprecatedBone { float values[6]; };
+
+/** Class that adds methods to SkVertices that are only intended for use internal to Skia.
+ This class is purely a privileged window into SkVertices. It should never have additional
+ data members or virtual methods. */
+class SkVerticesPriv {
+public:
+ SkVertices::VertexMode mode() const { return fVertices->fMode; }
+
+ bool hasColors() const { return SkToBool(fVertices->fColors); }
+ bool hasTexCoords() const { return SkToBool(fVertices->fTexs); }
+ bool hasIndices() const { return SkToBool(fVertices->fIndices); }
+
+ int vertexCount() const { return fVertices->fVertexCount; }
+ int indexCount() const { return fVertices->fIndexCount; }
+
+ const SkPoint* positions() const { return fVertices->fPositions; }
+ const SkPoint* texCoords() const { return fVertices->fTexs; }
+ const SkColor* colors() const { return fVertices->fColors; }
+ const uint16_t* indices() const { return fVertices->fIndices; }
+
+ // Never called due to RVO in priv(), but must exist for MSVC 2017.
+ SkVerticesPriv(const SkVerticesPriv&) = default;
+
+ void encode(SkWriteBuffer&) const;
+ static sk_sp<SkVertices> Decode(SkReadBuffer&);
+
+private:
+ explicit SkVerticesPriv(SkVertices* vertices) : fVertices(vertices) {}
+ SkVerticesPriv& operator=(const SkVerticesPriv&) = delete;
+
+ // No taking addresses of this type
+ const SkVerticesPriv* operator&() const = delete;
+ SkVerticesPriv* operator&() = delete;
+
+ SkVertices* fVertices;
+
+ friend class SkVertices; // to construct this type
+};
+
+inline SkVerticesPriv SkVertices::priv() { return SkVerticesPriv(this); }
+
+inline const SkVerticesPriv SkVertices::priv() const { // NOLINT(readability-const-return-type)
+ return SkVerticesPriv(const_cast<SkVertices*>(this));
+}
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkWriteBuffer.cpp b/gfx/skia/skia/src/core/SkWriteBuffer.cpp
new file mode 100644
index 0000000000..e02fc6d45b
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkWriteBuffer.cpp
@@ -0,0 +1,283 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkWriteBuffer.h"
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkData.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkPoint3.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkTFitsIn.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkMipmap.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/core/SkPtrRecorder.h"
+#include "src/image/SkImage_Base.h"
+
+#include <cstring>
+#include <utility>
+
+class SkMatrix;
+class SkPaint;
+class SkRegion;
+class SkStream;
+class SkWStream;
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkBinaryWriteBuffer::SkBinaryWriteBuffer()
+ : fFactorySet(nullptr)
+ , fTFSet(nullptr) {
+}
+
+SkBinaryWriteBuffer::SkBinaryWriteBuffer(void* storage, size_t storageSize)
+ : fFactorySet(nullptr)
+ , fTFSet(nullptr)
+ , fWriter(storage, storageSize)
+{}
+
+SkBinaryWriteBuffer::~SkBinaryWriteBuffer() {}
+
+bool SkBinaryWriteBuffer::usingInitialStorage() const {
+ return fWriter.usingInitialStorage();
+}
+
+void SkBinaryWriteBuffer::writeByteArray(const void* data, size_t size) {
+ fWriter.write32(SkToU32(size));
+ fWriter.writePad(data, size);
+}
+
+void SkBinaryWriteBuffer::writeBool(bool value) {
+ fWriter.writeBool(value);
+}
+
+void SkBinaryWriteBuffer::writeScalar(SkScalar value) {
+ fWriter.writeScalar(value);
+}
+
+void SkBinaryWriteBuffer::writeScalarArray(const SkScalar* value, uint32_t count) {
+ fWriter.write32(count);
+ fWriter.write(value, count * sizeof(SkScalar));
+}
+
+void SkBinaryWriteBuffer::writeInt(int32_t value) {
+ fWriter.write32(value);
+}
+
+void SkBinaryWriteBuffer::writeIntArray(const int32_t* value, uint32_t count) {
+ fWriter.write32(count);
+ fWriter.write(value, count * sizeof(int32_t));
+}
+
+void SkBinaryWriteBuffer::writeUInt(uint32_t value) {
+ fWriter.write32(value);
+}
+
+void SkBinaryWriteBuffer::writeString(std::string_view value) {
+ fWriter.writeString(value.data(), value.size());
+}
+
+void SkBinaryWriteBuffer::writeColor(SkColor color) {
+ fWriter.write32(color);
+}
+
+void SkBinaryWriteBuffer::writeColorArray(const SkColor* color, uint32_t count) {
+ fWriter.write32(count);
+ fWriter.write(color, count * sizeof(SkColor));
+}
+
+void SkBinaryWriteBuffer::writeColor4f(const SkColor4f& color) {
+ fWriter.write(&color, sizeof(SkColor4f));
+}
+
+void SkBinaryWriteBuffer::writeColor4fArray(const SkColor4f* color, uint32_t count) {
+ fWriter.write32(count);
+ fWriter.write(color, count * sizeof(SkColor4f));
+}
+
+void SkBinaryWriteBuffer::writePoint(const SkPoint& point) {
+ fWriter.writeScalar(point.fX);
+ fWriter.writeScalar(point.fY);
+}
+
+void SkBinaryWriteBuffer::writePoint3(const SkPoint3& point) {
+ this->writePad32(&point, sizeof(SkPoint3));
+}
+
+void SkBinaryWriteBuffer::writePointArray(const SkPoint* point, uint32_t count) {
+ fWriter.write32(count);
+ fWriter.write(point, count * sizeof(SkPoint));
+}
+
+void SkBinaryWriteBuffer::write(const SkM44& matrix) {
+ fWriter.write(SkMatrixPriv::M44ColMajor(matrix), sizeof(float) * 16);
+}
+
+void SkBinaryWriteBuffer::writeMatrix(const SkMatrix& matrix) {
+ fWriter.writeMatrix(matrix);
+}
+
+void SkBinaryWriteBuffer::writeIRect(const SkIRect& rect) {
+ fWriter.write(&rect, sizeof(SkIRect));
+}
+
+void SkBinaryWriteBuffer::writeRect(const SkRect& rect) {
+ fWriter.writeRect(rect);
+}
+
+void SkBinaryWriteBuffer::writeRegion(const SkRegion& region) {
+ fWriter.writeRegion(region);
+}
+
+void SkBinaryWriteBuffer::writeSampling(const SkSamplingOptions& sampling) {
+ fWriter.writeSampling(sampling);
+}
+
+void SkBinaryWriteBuffer::writePath(const SkPath& path) {
+ fWriter.writePath(path);
+}
+
+size_t SkBinaryWriteBuffer::writeStream(SkStream* stream, size_t length) {
+ fWriter.write32(SkToU32(length));
+ size_t bytesWritten = fWriter.readFromStream(stream, length);
+ if (bytesWritten < length) {
+ fWriter.reservePad(length - bytesWritten);
+ }
+ return bytesWritten;
+}
+
+bool SkBinaryWriteBuffer::writeToStream(SkWStream* stream) const {
+ return fWriter.writeToStream(stream);
+}
+
+/* Format:
+ * flags: U32
+ * encoded : size_32 + data[]
+ * [subset: IRect]
+ * [mips] : size_32 + data[]
+ */
+void SkBinaryWriteBuffer::writeImage(const SkImage* image) {
+ uint32_t flags = 0;
+ const SkMipmap* mips = as_IB(image)->onPeekMips();
+ if (mips) {
+ flags |= SkWriteBufferImageFlags::kHasMipmap;
+ }
+ if (image->alphaType() == kUnpremul_SkAlphaType) {
+ flags |= SkWriteBufferImageFlags::kUnpremul;
+ }
+
+ this->write32(flags);
+
+ sk_sp<SkData> data;
+ if (fProcs.fImageProc) {
+ data = fProcs.fImageProc(const_cast<SkImage*>(image), fProcs.fImageCtx);
+ }
+ if (!data) {
+ data = image->encodeToData();
+ }
+ this->writeDataAsByteArray(data.get());
+
+ if (flags & SkWriteBufferImageFlags::kHasMipmap) {
+ this->writeDataAsByteArray(mips->serialize().get());
+ }
+}
+
+void SkBinaryWriteBuffer::writeTypeface(SkTypeface* obj) {
+ // Write 32 bits (signed)
+ // 0 -- default font
+ // >0 -- index
+ // <0 -- custom (serial procs)
+
+ if (obj == nullptr) {
+ fWriter.write32(0);
+ } else if (fProcs.fTypefaceProc) {
+ auto data = fProcs.fTypefaceProc(obj, fProcs.fTypefaceCtx);
+ if (data) {
+ size_t size = data->size();
+ if (!SkTFitsIn<int32_t>(size)) {
+ size = 0; // fall back to default font
+ }
+ int32_t ssize = SkToS32(size);
+ fWriter.write32(-ssize); // negative to signal custom
+ if (size) {
+ this->writePad32(data->data(), size);
+ }
+ return;
+ }
+ // no data means fall through for std behavior
+ }
+ fWriter.write32(fTFSet ? fTFSet->add(obj) : 0);
+}
+
+void SkBinaryWriteBuffer::writePaint(const SkPaint& paint) {
+ SkPaintPriv::Flatten(paint, *this);
+}
+
+void SkBinaryWriteBuffer::setFactoryRecorder(sk_sp<SkFactorySet> rec) {
+ fFactorySet = std::move(rec);
+}
+
+void SkBinaryWriteBuffer::setTypefaceRecorder(sk_sp<SkRefCntSet> rec) {
+ fTFSet = std::move(rec);
+}
+
+void SkBinaryWriteBuffer::writeFlattenable(const SkFlattenable* flattenable) {
+ if (nullptr == flattenable) {
+ this->write32(0);
+ return;
+ }
+
+ /*
+ * We can write 1 of 2 versions of the flattenable:
+ *
+ * 1. index into fFactorySet: This assumes the writer will later resolve the function-ptrs
+ * into strings for its reader. SkPicture does exactly this, by writing a table of names
+ * (matching the indices) up front in its serialized form.
+ *
+ * 2. string name of the flattenable or index into fFlattenableDict: We store the string to
+ * allow the reader to specify its own factories after write time. In order to improve
+ * compression, if we have already written the string, we write its index instead.
+ */
+
+ if (SkFlattenable::Factory factory = flattenable->getFactory(); factory && fFactorySet) {
+ this->write32(fFactorySet->add(factory));
+ } else {
+ const char* name = flattenable->getTypeName();
+ SkASSERT(name);
+ SkASSERT(0 != strcmp("", name));
+
+ if (uint32_t* indexPtr = fFlattenableDict.find(name)) {
+ // We will write the index as a 32-bit int. We want the first byte
+ // that we send to be zero - this will act as a sentinel that we
+ // have an index (not a string). This means that we will send the
+ // the index shifted left by 8. The remaining 24-bits should be
+ // plenty to store the index. Note that this strategy depends on
+ // being little endian, and type names being non-empty.
+ SkASSERT(0 == *indexPtr >> 24);
+ this->write32(*indexPtr << 8);
+ } else {
+ this->writeString(name);
+ fFlattenableDict.set(name, fFlattenableDict.count() + 1);
+ }
+ }
+
+ // make room for the size of the flattened object
+ (void)fWriter.reserve(sizeof(uint32_t));
+ // record the current size, so we can subtract after the object writes.
+ size_t offset = fWriter.bytesWritten();
+ // now flatten the object
+ flattenable->flatten(*this);
+ size_t objSize = fWriter.bytesWritten() - offset;
+ // record the obj's size
+ fWriter.overwriteTAt(offset - sizeof(uint32_t), SkToU32(objSize));
+}
diff --git a/gfx/skia/skia/src/core/SkWriteBuffer.h b/gfx/skia/skia/src/core/SkWriteBuffer.h
new file mode 100644
index 0000000000..57f9819d08
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkWriteBuffer.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkWriteBuffer_DEFINED
+#define SkWriteBuffer_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkData.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSerialProcs.h"
+#include "src/core/SkTHash.h"
+#include "src/core/SkWriter32.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <string_view>
+
+class SkFactorySet;
+class SkFlattenable;
+class SkImage;
+class SkM44;
+class SkMatrix;
+class SkPaint;
+class SkPath;
+class SkRefCntSet;
+class SkRegion;
+class SkStream;
+class SkTypeface;
+class SkWStream;
+struct SkIRect;
+struct SkPoint3;
+struct SkPoint;
+struct SkRect;
+
+class SkWriteBuffer {
+public:
+ SkWriteBuffer() {}
+ virtual ~SkWriteBuffer() {}
+
+ virtual void writePad32(const void* buffer, size_t bytes) = 0;
+
+ virtual void writeByteArray(const void* data, size_t size) = 0;
+ void writeDataAsByteArray(const SkData* data) {
+ if (!data) {
+ this->write32(0);
+ } else {
+ this->writeByteArray(data->data(), data->size());
+ }
+ }
+
+ virtual void writeBool(bool value) = 0;
+ virtual void writeScalar(SkScalar value) = 0;
+ virtual void writeScalarArray(const SkScalar* value, uint32_t count) = 0;
+ virtual void writeInt(int32_t value) = 0;
+ virtual void writeIntArray(const int32_t* value, uint32_t count) = 0;
+ virtual void writeUInt(uint32_t value) = 0;
+ void write32(int32_t value) {
+ this->writeInt(value);
+ }
+ virtual void writeString(std::string_view value) = 0;
+
+ virtual void writeFlattenable(const SkFlattenable* flattenable) = 0;
+ virtual void writeColor(SkColor color) = 0;
+ virtual void writeColorArray(const SkColor* color, uint32_t count) = 0;
+ virtual void writeColor4f(const SkColor4f& color) = 0;
+ virtual void writeColor4fArray(const SkColor4f* color, uint32_t count) = 0;
+ virtual void writePoint(const SkPoint& point) = 0;
+ virtual void writePointArray(const SkPoint* point, uint32_t count) = 0;
+ virtual void writePoint3(const SkPoint3& point) = 0;
+ virtual void write(const SkM44&) = 0;
+ virtual void writeMatrix(const SkMatrix& matrix) = 0;
+ virtual void writeIRect(const SkIRect& rect) = 0;
+ virtual void writeRect(const SkRect& rect) = 0;
+ virtual void writeRegion(const SkRegion& region) = 0;
+ virtual void writeSampling(const SkSamplingOptions&) = 0;
+ virtual void writePath(const SkPath& path) = 0;
+ virtual size_t writeStream(SkStream* stream, size_t length) = 0;
+ virtual void writeImage(const SkImage*) = 0;
+ virtual void writeTypeface(SkTypeface* typeface) = 0;
+ virtual void writePaint(const SkPaint& paint) = 0;
+
+ void setSerialProcs(const SkSerialProcs& procs) { fProcs = procs; }
+
+protected:
+ SkSerialProcs fProcs;
+
+ friend class SkPicturePriv; // fProcs
+};
+
+/**
+ * Concrete implementation that serializes to a flat binary blob.
+ */
+class SkBinaryWriteBuffer : public SkWriteBuffer {
+public:
+ SkBinaryWriteBuffer();
+ SkBinaryWriteBuffer(void* initialStorage, size_t storageSize);
+ ~SkBinaryWriteBuffer() override;
+
+ void write(const void* buffer, size_t bytes) {
+ fWriter.write(buffer, bytes);
+ }
+ void writePad32(const void* buffer, size_t bytes) override {
+ fWriter.writePad(buffer, bytes);
+ }
+
+ void reset(void* storage = nullptr, size_t storageSize = 0) {
+ fWriter.reset(storage, storageSize);
+ }
+
+ size_t bytesWritten() const { return fWriter.bytesWritten(); }
+
+ // Returns true iff all of the bytes written so far are stored in the initial storage
+ // buffer provided in the constructor or the most recent call to reset.
+ bool usingInitialStorage() const;
+
+ void writeByteArray(const void* data, size_t size) override;
+ void writeBool(bool value) override;
+ void writeScalar(SkScalar value) override;
+ void writeScalarArray(const SkScalar* value, uint32_t count) override;
+ void writeInt(int32_t value) override;
+ void writeIntArray(const int32_t* value, uint32_t count) override;
+ void writeUInt(uint32_t value) override;
+ void writeString(std::string_view value) override;
+
+ void writeFlattenable(const SkFlattenable* flattenable) override;
+ void writeColor(SkColor color) override;
+ void writeColorArray(const SkColor* color, uint32_t count) override;
+ void writeColor4f(const SkColor4f& color) override;
+ void writeColor4fArray(const SkColor4f* color, uint32_t count) override;
+ void writePoint(const SkPoint& point) override;
+ void writePointArray(const SkPoint* point, uint32_t count) override;
+ void writePoint3(const SkPoint3& point) override;
+ void write(const SkM44&) override;
+ void writeMatrix(const SkMatrix& matrix) override;
+ void writeIRect(const SkIRect& rect) override;
+ void writeRect(const SkRect& rect) override;
+ void writeRegion(const SkRegion& region) override;
+ void writeSampling(const SkSamplingOptions&) override;
+ void writePath(const SkPath& path) override;
+ size_t writeStream(SkStream* stream, size_t length) override;
+ void writeImage(const SkImage*) override;
+ void writeTypeface(SkTypeface* typeface) override;
+ void writePaint(const SkPaint& paint) override;
+
+ bool writeToStream(SkWStream*) const;
+ void writeToMemory(void* dst) const { fWriter.flatten(dst); }
+ sk_sp<SkData> snapshotAsData() const { return fWriter.snapshotAsData(); }
+
+ void setFactoryRecorder(sk_sp<SkFactorySet>);
+ void setTypefaceRecorder(sk_sp<SkRefCntSet>);
+
+private:
+ sk_sp<SkFactorySet> fFactorySet;
+ sk_sp<SkRefCntSet> fTFSet;
+
+ SkWriter32 fWriter;
+
+ // Only used if we do not have an fFactorySet
+ SkTHashMap<const char*, uint32_t> fFlattenableDict;
+};
+
+enum SkWriteBufferImageFlags {
+ kVersion_bits = 8,
+ kCurrVersion = 0,
+
+ kHasSubsetRect = 1 << 8,
+ kHasMipmap = 1 << 9,
+ kUnpremul = 1 << 10,
+};
+
+
+#endif // SkWriteBuffer_DEFINED
diff --git a/gfx/skia/skia/src/core/SkWritePixelsRec.cpp b/gfx/skia/skia/src/core/SkWritePixelsRec.cpp
new file mode 100644
index 0000000000..20b2003f59
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkWritePixelsRec.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkWritePixelsRec.h"
+
+#include "include/core/SkRect.h"
+
+bool SkWritePixelsRec::trim(int dstWidth, int dstHeight) {
+ if (nullptr == fPixels || fRowBytes < fInfo.minRowBytes()) {
+ return false;
+ }
+ if (0 >= fInfo.width() || 0 >= fInfo.height()) {
+ return false;
+ }
+
+ int x = fX;
+ int y = fY;
+ SkIRect dstR = SkIRect::MakeXYWH(x, y, fInfo.width(), fInfo.height());
+ if (!dstR.intersect({0, 0, dstWidth, dstHeight})) {
+ return false;
+ }
+
+ // if x or y are negative, then we have to adjust pixels
+ if (x > 0) {
+ x = 0;
+ }
+ if (y > 0) {
+ y = 0;
+ }
+ // here x,y are either 0 or negative
+ // we negate and add them so UBSAN (pointer-overflow) doesn't get confused.
+ fPixels = ((const char*)fPixels + -y*fRowBytes + -x*fInfo.bytesPerPixel());
+ // the intersect may have shrunk info's logical size
+ fInfo = fInfo.makeDimensions(dstR.size());
+ fX = dstR.x();
+ fY = dstR.y();
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/core/SkWritePixelsRec.h b/gfx/skia/skia/src/core/SkWritePixelsRec.h
new file mode 100644
index 0000000000..1d191e5c8d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkWritePixelsRec.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkWritePixelsRec_DEFINED
+#define SkWritePixelsRec_DEFINED
+
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+
+#include <cstddef>
+
+/**
+ * Helper class to package and trim the parameters passed to writePixels()
+ */
+struct SkWritePixelsRec {
+ SkWritePixelsRec(const SkImageInfo& info, const void* pixels, size_t rowBytes, int x, int y)
+ : fPixels(pixels)
+ , fRowBytes(rowBytes)
+ , fInfo(info)
+ , fX(x)
+ , fY(y)
+ {}
+
+ SkWritePixelsRec(const SkPixmap& pm, int x, int y)
+ : fPixels(pm.addr())
+ , fRowBytes(pm.rowBytes())
+ , fInfo(pm.info())
+ , fX(x)
+ , fY(y)
+ {}
+
+ const void* fPixels;
+ size_t fRowBytes;
+ SkImageInfo fInfo;
+ int fX;
+ int fY;
+
+ /*
+ * On true, may have modified its fields (except fRowBytes) to make it a legal subset
+ * of the specified dst width/height.
+ *
+ * On false, leaves self unchanged, but indicates that it does not overlap dst, or
+ * is not valid (e.g. bad fInfo) for writePixels().
+ */
+ bool trim(int dstWidth, int dstHeight);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkWriter32.cpp b/gfx/skia/skia/src/core/SkWriter32.cpp
new file mode 100644
index 0000000000..50765f3741
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkWriter32.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkWriter32.h"
+
+#include "include/core/SkSamplingOptions.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkMatrixPriv.h"
+
+void SkWriter32::writeMatrix(const SkMatrix& matrix) {
+ size_t size = SkMatrixPriv::WriteToMemory(matrix, nullptr);
+ SkASSERT(SkAlign4(size) == size);
+ SkMatrixPriv::WriteToMemory(matrix, this->reserve(size));
+}
+
+void SkWriter32::writeSampling(const SkSamplingOptions& sampling) {
+ this->write32(sampling.maxAniso);
+ if (!sampling.isAniso()) {
+ this->writeBool(sampling.useCubic);
+ if (sampling.useCubic) {
+ this->writeScalar(sampling.cubic.B);
+ this->writeScalar(sampling.cubic.C);
+ } else {
+ this->write32((unsigned)sampling.filter);
+ this->write32((unsigned)sampling.mipmap);
+ }
+ }
+}
+
+void SkWriter32::writeString(const char str[], size_t len) {
+ if (nullptr == str) {
+ str = "";
+ len = 0;
+ }
+ if ((long)len < 0) {
+ len = strlen(str);
+ }
+
+ // [ 4 byte len ] [ str ... ] [1 - 4 \0s]
+ uint32_t* ptr = this->reservePad(sizeof(uint32_t) + len + 1);
+ *ptr = SkToU32(len);
+ char* chars = (char*)(ptr + 1);
+ memcpy(chars, str, len);
+ chars[len] = '\0';
+}
+
+size_t SkWriter32::WriteStringSize(const char* str, size_t len) {
+ if ((long)len < 0) {
+ SkASSERT(str);
+ len = strlen(str);
+ }
+ const size_t lenBytes = 4; // we use 4 bytes to record the length
+ // add 1 since we also write a terminating 0
+ return SkAlign4(lenBytes + len + 1);
+}
+
+void SkWriter32::growToAtLeast(size_t size) {
+ const bool wasExternal = (fExternal != nullptr) && (fData == fExternal);
+
+ fCapacity = 4096 + std::max(size, fCapacity + (fCapacity / 2));
+ fInternal.realloc(fCapacity);
+ fData = fInternal.get();
+
+ if (wasExternal) {
+ // we were external, so copy in the data
+ memcpy(fData, fExternal, fUsed);
+ }
+}
+
+sk_sp<SkData> SkWriter32::snapshotAsData() const {
+ return SkData::MakeWithCopy(fData, fUsed);
+}
diff --git a/gfx/skia/skia/src/core/SkWriter32.h b/gfx/skia/skia/src/core/SkWriter32.h
new file mode 100644
index 0000000000..6b6e2c34f4
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkWriter32.h
@@ -0,0 +1,279 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkWriter32_DEFINED
+#define SkWriter32_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkPoint3.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRegion.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkNoncopyable.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+
+struct SkSamplingOptions;
+
+class SkWriter32 : SkNoncopyable {
+public:
+ /**
+ * The caller can specify an initial block of storage, which the caller manages.
+ *
+ * SkWriter32 will try to back reserve and write calls with this external storage until the
+ * first time an allocation doesn't fit. From then it will use dynamically allocated storage.
+ * This used to be optional behavior, but pipe now relies on it.
+ */
+ SkWriter32(void* external = nullptr, size_t externalBytes = 0) {
+ this->reset(external, externalBytes);
+ }
+
+ // return the current offset (will always be a multiple of 4)
+ size_t bytesWritten() const { return fUsed; }
+
+ // Returns true iff all of the bytes written so far are stored in the initial storage
+ // buffer provided in the constructor or the most recent call to reset.
+ bool usingInitialStorage() const { return fData == fExternal; }
+
+ void reset(void* external = nullptr, size_t externalBytes = 0) {
+ // we cast this pointer to int* and float* at times, so assert that it is aligned.
+ SkASSERT(SkIsAlign4((uintptr_t)external));
+ // we always write multiples of 4-bytes, so truncate down the size to match that
+ externalBytes &= ~3;
+
+ fData = (uint8_t*)external;
+ fCapacity = externalBytes;
+ fUsed = 0;
+ fExternal = external;
+ }
+
+ // size MUST be multiple of 4
+ uint32_t* reserve(size_t size) {
+ SkASSERT(SkAlign4(size) == size);
+ size_t offset = fUsed;
+ size_t totalRequired = fUsed + size;
+ if (totalRequired > fCapacity) {
+ this->growToAtLeast(totalRequired);
+ }
+ fUsed = totalRequired;
+ return (uint32_t*)(fData + offset);
+ }
+
+ /**
+ * Read a T record at offset, which must be a multiple of 4. Only legal if the record
+ * was written atomically using the write methods below.
+ */
+ template<typename T>
+ const T& readTAt(size_t offset) const {
+ SkASSERT(SkAlign4(offset) == offset);
+ SkASSERT(offset < fUsed);
+ return *(T*)(fData + offset);
+ }
+
+ /**
+ * Overwrite a T record at offset, which must be a multiple of 4. Only legal if the record
+ * was written atomically using the write methods below.
+ */
+ template<typename T>
+ void overwriteTAt(size_t offset, const T& value) {
+ SkASSERT(SkAlign4(offset) == offset);
+ SkASSERT(offset < fUsed);
+ *(T*)(fData + offset) = value;
+ }
+
+ bool writeBool(bool value) {
+ this->write32(value);
+ return value;
+ }
+
+ void writeInt(int32_t value) {
+ this->write32(value);
+ }
+
+ void write8(int32_t value) {
+ *(int32_t*)this->reserve(sizeof(value)) = value & 0xFF;
+ }
+
+ void write16(int32_t value) {
+ *(int32_t*)this->reserve(sizeof(value)) = value & 0xFFFF;
+ }
+
+ void write32(int32_t value) {
+ *(int32_t*)this->reserve(sizeof(value)) = value;
+ }
+
+ void writeScalar(SkScalar value) {
+ *(SkScalar*)this->reserve(sizeof(value)) = value;
+ }
+
+ void writePoint(const SkPoint& pt) {
+ *(SkPoint*)this->reserve(sizeof(pt)) = pt;
+ }
+
+ void writePoint3(const SkPoint3& pt) {
+ *(SkPoint3*)this->reserve(sizeof(pt)) = pt;
+ }
+
+ void writeRect(const SkRect& rect) {
+ *(SkRect*)this->reserve(sizeof(rect)) = rect;
+ }
+
+ void writeIRect(const SkIRect& rect) {
+ *(SkIRect*)this->reserve(sizeof(rect)) = rect;
+ }
+
+ void writeRRect(const SkRRect& rrect) {
+ rrect.writeToMemory(this->reserve(SkRRect::kSizeInMemory));
+ }
+
+ void writePath(const SkPath& path) {
+ size_t size = path.writeToMemory(nullptr);
+ SkASSERT(SkAlign4(size) == size);
+ path.writeToMemory(this->reserve(size));
+ }
+
+ void writeMatrix(const SkMatrix& matrix);
+
+ void writeRegion(const SkRegion& rgn) {
+ size_t size = rgn.writeToMemory(nullptr);
+ SkASSERT(SkAlign4(size) == size);
+ rgn.writeToMemory(this->reserve(size));
+ }
+
+ void writeSampling(const SkSamplingOptions& sampling);
+
+ // write count bytes (must be a multiple of 4)
+ void writeMul4(const void* values, size_t size) {
+ this->write(values, size);
+ }
+
+ /**
+ * Write size bytes from values. size must be a multiple of 4, though
+ * values need not be 4-byte aligned.
+ */
+ void write(const void* values, size_t size) {
+ SkASSERT(SkAlign4(size) == size);
+ sk_careful_memcpy(this->reserve(size), values, size);
+ }
+
+ /**
+ * Reserve size bytes. Does not need to be 4 byte aligned. The remaining space (if any) will be
+ * filled in with zeroes.
+ */
+ uint32_t* reservePad(size_t size) {
+ size_t alignedSize = SkAlign4(size);
+ uint32_t* p = this->reserve(alignedSize);
+ if (alignedSize != size) {
+ SkASSERT(alignedSize >= 4);
+ p[alignedSize / 4 - 1] = 0;
+ }
+ return p;
+ }
+
+ /**
+ * Write size bytes from src, and pad to 4 byte alignment with zeroes.
+ */
+ void writePad(const void* src, size_t size) {
+ sk_careful_memcpy(this->reservePad(size), src, size);
+ }
+
+ /**
+ * Writes a string to the writer, which can be retrieved with SkReadBuffer::readString().
+ * The length can be specified, or if -1 is passed, it will be computed by calling strlen().
+ * The length must be < max size_t.
+ *
+ * If you write NULL, it will be read as "".
+ */
+ void writeString(const char* str, size_t len = (size_t)-1);
+
+ /**
+ * Computes the size (aligned to multiple of 4) need to write the string
+ * in a call to writeString(). If the length is not specified, it will be
+ * computed by calling strlen().
+ */
+ static size_t WriteStringSize(const char* str, size_t len = (size_t)-1);
+
+ void writeData(const SkData* data) {
+ uint32_t len = data ? SkToU32(data->size()) : 0;
+ this->write32(len);
+ if (data) {
+ this->writePad(data->data(), len);
+ }
+ }
+
+ static size_t WriteDataSize(const SkData* data) {
+ return 4 + SkAlign4(data ? data->size() : 0);
+ }
+
+ /**
+ * Move the cursor back to offset bytes from the beginning.
+ * offset must be a multiple of 4 no greater than size().
+ */
+ void rewindToOffset(size_t offset) {
+ SkASSERT(SkAlign4(offset) == offset);
+ SkASSERT(offset <= bytesWritten());
+ fUsed = offset;
+ }
+
+ // copy into a single buffer (allocated by caller). Must be at least size()
+ void flatten(void* dst) const {
+ memcpy(dst, fData, fUsed);
+ }
+
+ bool writeToStream(SkWStream* stream) const {
+ return stream->write(fData, fUsed);
+ }
+
+ // read from the stream, and write up to length bytes. Return the actual
+ // number of bytes written.
+ size_t readFromStream(SkStream* stream, size_t length) {
+ return stream->read(this->reservePad(length), length);
+ }
+
+ /**
+ * Captures a snapshot of the data as it is right now, and return it.
+ */
+ sk_sp<SkData> snapshotAsData() const;
+private:
+ void growToAtLeast(size_t size);
+
+ uint8_t* fData; // Points to either fInternal or fExternal.
+ size_t fCapacity; // Number of bytes we can write to fData.
+ size_t fUsed; // Number of bytes written.
+ void* fExternal; // Unmanaged memory block.
+ skia_private::AutoTMalloc<uint8_t> fInternal; // Managed memory block.
+};
+
+/**
+ * Helper class to allocated SIZE bytes as part of the writer, and to provide
+ * that storage to the constructor as its initial storage buffer.
+ *
+ * This wrapper ensures proper alignment rules are met for the storage.
+ */
+template <size_t SIZE> class SkSWriter32 : public SkWriter32 {
+public:
+ SkSWriter32() { this->reset(); }
+
+ void reset() {this->INHERITED::reset(fData.fStorage, SIZE); }
+
+private:
+ union {
+ void* fPtrAlignment;
+ double fDoubleAlignment;
+ char fStorage[SIZE];
+ } fData;
+
+ using INHERITED = SkWriter32;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkXfermode.cpp b/gfx/skia/skia/src/core/SkXfermode.cpp
new file mode 100644
index 0000000000..99684db1e8
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkXfermode.cpp
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkString.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkOnce.h"
+#include "src/base/SkMathPriv.h"
+#include "src/core/SkBlendModePriv.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/core/SkXfermodePriv.h"
+
+#if defined(SK_GANESH)
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/effects/GrCustomXfermode.h"
+#include "src/gpu/ganesh/effects/GrPorterDuffXferProcessor.h"
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SkProcCoeffXfermode : public SkXfermode {
+public:
+ SkProcCoeffXfermode(SkBlendMode mode) : fMode(mode) {}
+
+ void xfer32(SkPMColor dst[], const SkPMColor src[], int count,
+ const SkAlpha aa[]) const override {
+ SkASSERT(dst && src && count >= 0);
+
+ SkRasterPipeline_<256> p;
+
+ SkRasterPipeline_MemoryCtx dst_ctx = { (void*)dst, 0 },
+ src_ctx = { (void*)src, 0 },
+ aa_ctx = { (void*)aa, 0 };
+
+ p.append_load (kN32_SkColorType, &src_ctx);
+ p.append_load_dst(kN32_SkColorType, &dst_ctx);
+
+ if (SkBlendMode_ShouldPreScaleCoverage(fMode, /*rgb_coverage=*/false)) {
+ if (aa) {
+ p.append(SkRasterPipelineOp::scale_u8, &aa_ctx);
+ }
+ SkBlendMode_AppendStages(fMode, &p);
+ } else {
+ SkBlendMode_AppendStages(fMode, &p);
+ if (aa) {
+ p.append(SkRasterPipelineOp::lerp_u8, &aa_ctx);
+ }
+ }
+
+ p.append_store(kN32_SkColorType, &dst_ctx);
+ p.run(0, 0, count,1);
+ }
+
+private:
+ const SkBlendMode fMode;
+
+ using INHERITED = SkXfermode;
+};
+
+const char* SkBlendMode_Name(SkBlendMode bm) {
+ switch (bm) {
+ case SkBlendMode::kClear: return "Clear";
+ case SkBlendMode::kSrc: return "Src";
+ case SkBlendMode::kDst: return "Dst";
+ case SkBlendMode::kSrcOver: return "SrcOver";
+ case SkBlendMode::kDstOver: return "DstOver";
+ case SkBlendMode::kSrcIn: return "SrcIn";
+ case SkBlendMode::kDstIn: return "DstIn";
+ case SkBlendMode::kSrcOut: return "SrcOut";
+ case SkBlendMode::kDstOut: return "DstOut";
+ case SkBlendMode::kSrcATop: return "SrcATop";
+ case SkBlendMode::kDstATop: return "DstATop";
+ case SkBlendMode::kXor: return "Xor";
+ case SkBlendMode::kPlus: return "Plus";
+ case SkBlendMode::kModulate: return "Modulate";
+ case SkBlendMode::kScreen: return "Screen";
+
+ case SkBlendMode::kOverlay: return "Overlay";
+ case SkBlendMode::kDarken: return "Darken";
+ case SkBlendMode::kLighten: return "Lighten";
+ case SkBlendMode::kColorDodge: return "ColorDodge";
+ case SkBlendMode::kColorBurn: return "ColorBurn";
+ case SkBlendMode::kHardLight: return "HardLight";
+ case SkBlendMode::kSoftLight: return "SoftLight";
+ case SkBlendMode::kDifference: return "Difference";
+ case SkBlendMode::kExclusion: return "Exclusion";
+ case SkBlendMode::kMultiply: return "Multiply";
+
+ case SkBlendMode::kHue: return "Hue";
+ case SkBlendMode::kSaturation: return "Saturation";
+ case SkBlendMode::kColor: return "Color";
+ case SkBlendMode::kLuminosity: return "Luminosity";
+ }
+ SkUNREACHABLE;
+}
+
+sk_sp<SkXfermode> SkXfermode::Make(SkBlendMode mode) {
+ if ((unsigned)mode > (unsigned)SkBlendMode::kLastMode) {
+ // report error
+ return nullptr;
+ }
+
+ // Skia's "default" mode is srcover. nullptr in SkPaint is interpreted as srcover
+ // so we can just return nullptr from the factory.
+ if (SkBlendMode::kSrcOver == mode) {
+ return nullptr;
+ }
+
+ static SkOnce once[kSkBlendModeCount];
+ static SkXfermode* cached[kSkBlendModeCount];
+
+ once[(int)mode]([mode] {
+ if (auto xfermode = SkOpts::create_xfermode(mode)) {
+ cached[(int)mode] = xfermode;
+ } else {
+ cached[(int)mode] = new SkProcCoeffXfermode(mode);
+ }
+ });
+ return sk_ref_sp(cached[(int)mode]);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkXfermode::IsOpaque(SkBlendMode mode, SrcColorOpacity opacityType) {
+ SkBlendModeCoeff src, dst;
+ if (!SkBlendMode_AsCoeff(mode, &src, &dst)) {
+ return false;
+ }
+
+ switch (src) {
+ case SkBlendModeCoeff::kDA:
+ case SkBlendModeCoeff::kDC:
+ case SkBlendModeCoeff::kIDA:
+ case SkBlendModeCoeff::kIDC:
+ return false;
+ default:
+ break;
+ }
+
+ switch (dst) {
+ case SkBlendModeCoeff::kZero:
+ return true;
+ case SkBlendModeCoeff::kISA:
+ return kOpaque_SrcColorOpacity == opacityType;
+ case SkBlendModeCoeff::kSA:
+ return kTransparentBlack_SrcColorOpacity == opacityType ||
+ kTransparentAlpha_SrcColorOpacity == opacityType;
+ case SkBlendModeCoeff::kSC:
+ return kTransparentBlack_SrcColorOpacity == opacityType;
+ default:
+ return false;
+ }
+}
+
+#if defined(SK_GANESH)
+const GrXPFactory* SkBlendMode_AsXPFactory(SkBlendMode mode) {
+ if (SkBlendMode_AsCoeff(mode, nullptr, nullptr)) {
+ const GrXPFactory* result = GrPorterDuffXPFactory::Get(mode);
+ SkASSERT(result);
+ return result;
+ }
+
+ SkASSERT(GrCustomXfermode::IsSupportedMode(mode));
+ return GrCustomXfermode::Get(mode);
+}
+#endif
diff --git a/gfx/skia/skia/src/core/SkXfermodeInterpretation.cpp b/gfx/skia/skia/src/core/SkXfermodeInterpretation.cpp
new file mode 100644
index 0000000000..607f4c995d
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkXfermodeInterpretation.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkXfermodeInterpretation.h"
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkPaint.h"
+
+static bool just_solid_color(const SkPaint& p) {
+ return SK_AlphaOPAQUE == p.getAlpha() && !p.getColorFilter() && !p.getShader();
+}
+
+SkXfermodeInterpretation SkInterpretXfermode(const SkPaint& paint, bool dstIsOpaque) {
+ const auto bm = paint.asBlendMode();
+ if (!bm) {
+ return kNormal_SkXfermodeInterpretation;
+ }
+ switch (bm.value()) {
+ case SkBlendMode::kSrcOver:
+ return kSrcOver_SkXfermodeInterpretation;
+ case SkBlendMode::kSrc:
+ if (just_solid_color(paint)) {
+ return kSrcOver_SkXfermodeInterpretation;
+ }
+ return kNormal_SkXfermodeInterpretation;
+ case SkBlendMode::kDst:
+ return kSkipDrawing_SkXfermodeInterpretation;
+ case SkBlendMode::kDstOver:
+ if (dstIsOpaque) {
+ return kSkipDrawing_SkXfermodeInterpretation;
+ }
+ return kNormal_SkXfermodeInterpretation;
+ case SkBlendMode::kSrcIn:
+ if (dstIsOpaque && just_solid_color(paint)) {
+ return kSrcOver_SkXfermodeInterpretation;
+ }
+ return kNormal_SkXfermodeInterpretation;
+ case SkBlendMode::kDstIn:
+ if (just_solid_color(paint)) {
+ return kSkipDrawing_SkXfermodeInterpretation;
+ }
+ return kNormal_SkXfermodeInterpretation;
+ default:
+ return kNormal_SkXfermodeInterpretation;
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkXfermodeInterpretation.h b/gfx/skia/skia/src/core/SkXfermodeInterpretation.h
new file mode 100644
index 0000000000..d0a420f383
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkXfermodeInterpretation.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkXfermodeInterpretation_DEFINED
+#define SkXfermodeInterpretation_DEFINED
+
+class SkPaint;
+
+/**
+ * By analyzing the paint, we may decide we can take special
+ * action. This enum lists our possible actions.
+ */
+enum SkXfermodeInterpretation {
+ kNormal_SkXfermodeInterpretation, //< draw normally
+ kSrcOver_SkXfermodeInterpretation, //< draw as if in srcover mode
+ kSkipDrawing_SkXfermodeInterpretation //< draw nothing
+};
+
+/**
+ * Given a paint, determine whether the paint's transfer mode can be
+ * replaced with kSrcOver_Mode or not drawn at all. This is used by
+ * SkBlitter and SkPDFDevice.
+ */
+SkXfermodeInterpretation SkInterpretXfermode(const SkPaint&, bool dstIsOpaque);
+
+#endif // SkXfermodeInterpretation_DEFINED
diff --git a/gfx/skia/skia/src/core/SkXfermodePriv.h b/gfx/skia/skia/src/core/SkXfermodePriv.h
new file mode 100644
index 0000000000..0b7a920d8e
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkXfermodePriv.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkXfermodePriv_DEFINED
+#define SkXfermodePriv_DEFINED
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkRefCnt.h"
+
+class GrFragmentProcessor;
+class GrTexture;
+class GrXPFactory;
+class SkRasterPipeline;
+class SkString;
+
+class SkXfermode : public SkRefCnt {
+public:
+ virtual void xfer32(SkPMColor dst[], const SkPMColor src[], int count,
+ const SkAlpha aa[]) const = 0;
+
+ /** Return an SkXfermode object for the specified mode.
+ */
+ static sk_sp<SkXfermode> Make(SkBlendMode);
+
+ /**
+ * Skia maintains global xfermode objects corresponding to each BlendMode. This returns a
+ * ptr to that global xfermode (or null if the mode is srcover). Thus the caller may use
+ * the returned ptr, but it should leave its refcnt untouched.
+ */
+ static SkXfermode* Peek(SkBlendMode mode) {
+ sk_sp<SkXfermode> xfer = Make(mode);
+ if (!xfer) {
+ SkASSERT(SkBlendMode::kSrcOver == mode);
+ return nullptr;
+ }
+ SkASSERT(!xfer->unique());
+ return xfer.get();
+ }
+
+ enum SrcColorOpacity {
+ // The src color is known to be opaque (alpha == 255)
+ kOpaque_SrcColorOpacity = 0,
+ // The src color is known to be fully transparent (color == 0)
+ kTransparentBlack_SrcColorOpacity = 1,
+ // The src alpha is known to be fully transparent (alpha == 0)
+ kTransparentAlpha_SrcColorOpacity = 2,
+ // The src color opacity is unknown
+ kUnknown_SrcColorOpacity = 3
+ };
+
+ static bool IsOpaque(SkBlendMode, SrcColorOpacity);
+
+protected:
+ SkXfermode() {}
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkYUVAInfo.cpp b/gfx/skia/skia/src/core/SkYUVAInfo.cpp
new file mode 100644
index 0000000000..17367ce126
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkYUVAInfo.cpp
@@ -0,0 +1,376 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkYUVAInfo.h"
+#include "src/base/SkSafeMath.h"
+#include "src/core/SkYUVAInfoLocation.h"
+
+#include <algorithm>
+
+static bool is_plane_config_compatible_with_subsampling(SkYUVAInfo::PlaneConfig config,
+ SkYUVAInfo::Subsampling subsampling) {
+ if (config == SkYUVAInfo::PlaneConfig::kUnknown ||
+ subsampling == SkYUVAInfo::Subsampling::kUnknown) {
+ return false;
+ }
+ return subsampling == SkYUVAInfo::Subsampling::k444 ||
+ (config != SkYUVAInfo::PlaneConfig::kYUV &&
+ config != SkYUVAInfo::PlaneConfig::kYUVA &&
+ config != SkYUVAInfo::PlaneConfig::kUYV &&
+ config != SkYUVAInfo::PlaneConfig::kUYVA);
+}
+
+std::tuple<int, int> SkYUVAInfo::SubsamplingFactors(Subsampling subsampling) {
+ switch (subsampling) {
+ case Subsampling::kUnknown: return {0, 0};
+ case Subsampling::k444: return {1, 1};
+ case Subsampling::k422: return {2, 1};
+ case Subsampling::k420: return {2, 2};
+ case Subsampling::k440: return {1, 2};
+ case Subsampling::k411: return {4, 1};
+ case Subsampling::k410: return {4, 2};
+ }
+ SkUNREACHABLE;
+}
+
+std::tuple<int, int> SkYUVAInfo::PlaneSubsamplingFactors(PlaneConfig planeConfig,
+ Subsampling subsampling,
+ int planeIdx) {
+ if (!is_plane_config_compatible_with_subsampling(planeConfig, subsampling) ||
+ planeIdx < 0 ||
+ planeIdx > NumPlanes(planeConfig)) {
+ return {0, 0};
+ }
+ bool isSubsampledPlane = false;
+ switch (planeConfig) {
+ case PlaneConfig::kUnknown: SkUNREACHABLE;
+
+ case PlaneConfig::kY_U_V:
+ case PlaneConfig::kY_V_U:
+ case PlaneConfig::kY_U_V_A:
+ case PlaneConfig::kY_V_U_A:
+ isSubsampledPlane = planeIdx == 1 || planeIdx == 2;
+ break;
+
+ case PlaneConfig::kY_UV:
+ case PlaneConfig::kY_VU:
+ case PlaneConfig::kY_UV_A:
+ case PlaneConfig::kY_VU_A:
+ isSubsampledPlane = planeIdx == 1;
+ break;
+
+ case PlaneConfig::kYUV:
+ case PlaneConfig::kUYV:
+ case PlaneConfig::kYUVA:
+ case PlaneConfig::kUYVA:
+ break;
+ }
+ return isSubsampledPlane ? SubsamplingFactors(subsampling) : std::make_tuple(1, 1);
+}
+
+int SkYUVAInfo::PlaneDimensions(SkISize imageDimensions,
+ PlaneConfig planeConfig,
+ Subsampling subsampling,
+ SkEncodedOrigin origin,
+ SkISize planeDimensions[SkYUVAInfo::kMaxPlanes]) {
+ std::fill_n(planeDimensions, SkYUVAInfo::kMaxPlanes, SkISize{0, 0});
+ if (!is_plane_config_compatible_with_subsampling(planeConfig, subsampling)) {
+ return 0;
+ }
+
+ int w = imageDimensions.width();
+ int h = imageDimensions.height();
+ if (origin >= kLeftTop_SkEncodedOrigin) {
+ using std::swap;
+ swap(w, h);
+ }
+ auto down2 = [](int x) { return (x + 1)/2; };
+ auto down4 = [](int x) { return (x + 3)/4; };
+ SkISize uvSize;
+ switch (subsampling) {
+ case Subsampling::kUnknown: SkUNREACHABLE;
+
+ case Subsampling::k444: uvSize = { w , h }; break;
+ case Subsampling::k422: uvSize = {down2(w), h }; break;
+ case Subsampling::k420: uvSize = {down2(w), down2(h)}; break;
+ case Subsampling::k440: uvSize = { w , down2(h)}; break;
+ case Subsampling::k411: uvSize = {down4(w), h }; break;
+ case Subsampling::k410: uvSize = {down4(w), down2(h)}; break;
+ }
+ switch (planeConfig) {
+ case PlaneConfig::kUnknown: SkUNREACHABLE;
+
+ case PlaneConfig::kY_U_V:
+ case PlaneConfig::kY_V_U:
+ planeDimensions[0] = {w, h};
+ planeDimensions[1] = planeDimensions[2] = uvSize;
+ return 3;
+
+ case PlaneConfig::kY_UV:
+ case PlaneConfig::kY_VU:
+ planeDimensions[0] = {w, h};
+ planeDimensions[1] = uvSize;
+ return 2;
+
+ case PlaneConfig::kY_U_V_A:
+ case PlaneConfig::kY_V_U_A:
+ planeDimensions[0] = planeDimensions[3] = {w, h};
+ planeDimensions[1] = planeDimensions[2] = uvSize;
+ return 4;
+
+ case PlaneConfig::kY_UV_A:
+ case PlaneConfig::kY_VU_A:
+ planeDimensions[0] = planeDimensions[2] = {w, h};
+ planeDimensions[1] = uvSize;
+ return 3;
+
+ case PlaneConfig::kYUV:
+ case PlaneConfig::kUYV:
+ case PlaneConfig::kYUVA:
+ case PlaneConfig::kUYVA:
+ planeDimensions[0] = {w, h};
+ SkASSERT(planeDimensions[0] == uvSize);
+ return 1;
+ }
+ SkUNREACHABLE;
+}
+
+static bool channel_index_to_channel(uint32_t channelFlags,
+ int channelIdx,
+ SkColorChannel* channel) {
+ switch (channelFlags) {
+ case kGray_SkColorChannelFlag: // For gray returning any of R, G, or B for index 0 is ok.
+ case kRed_SkColorChannelFlag:
+ if (channelIdx == 0) {
+ *channel = SkColorChannel::kR;
+ return true;
+ }
+ return false;
+ case kGrayAlpha_SkColorChannelFlags:
+ switch (channelIdx) {
+ case 0: *channel = SkColorChannel::kR; return true;
+ case 1: *channel = SkColorChannel::kA; return true;
+
+ default: return false;
+ }
+ case kAlpha_SkColorChannelFlag:
+ if (channelIdx == 0) {
+ *channel = SkColorChannel::kA;
+ return true;
+ }
+ return false;
+ case kRG_SkColorChannelFlags:
+ if (channelIdx == 0 || channelIdx == 1) {
+ *channel = static_cast<SkColorChannel>(channelIdx);
+ return true;
+ }
+ return false;
+ case kRGB_SkColorChannelFlags:
+ if (channelIdx >= 0 && channelIdx <= 2) {
+ *channel = static_cast<SkColorChannel>(channelIdx);
+ return true;
+ }
+ return false;
+ case kRGBA_SkColorChannelFlags:
+ if (channelIdx >= 0 && channelIdx <= 3) {
+ *channel = static_cast<SkColorChannel>(channelIdx);
+ return true;
+ }
+ return false;
+ default:
+ return false;
+ }
+}
+
+SkYUVAInfo::YUVALocations SkYUVAInfo::GetYUVALocations(PlaneConfig config,
+ const uint32_t* planeChannelFlags) {
+ // Like YUVALocation but chanIdx refers to channels by index rather than absolute channel, e.g.
+ // A is the 0th channel of an alpha-only texture. We'll use this plus planeChannelFlags to get
+ // the actual channel.
+ struct PlaneAndIndex {int plane, chanIdx;};
+ const PlaneAndIndex* planesAndIndices = nullptr;
+ switch (config) {
+ case PlaneConfig::kUnknown:
+ return {};
+
+ case PlaneConfig::kY_U_V: {
+ static constexpr PlaneAndIndex kPlanesAndIndices[] = {{0, 0}, {1, 0}, {2, 0}, {-1, -1}};
+ planesAndIndices = kPlanesAndIndices;
+ break;
+ }
+ case PlaneConfig::kY_V_U: {
+ static constexpr PlaneAndIndex kPlanesAndIndices[] = {{0, 0}, {2, 0}, {1, 0}, {-1, -1}};
+ planesAndIndices = kPlanesAndIndices;
+ break;
+ }
+ case PlaneConfig::kY_UV: {
+ static constexpr PlaneAndIndex kPlanesAndIndices[] = {{0, 0}, {1, 0}, {1, 1}, {-1, -1}};
+ planesAndIndices = kPlanesAndIndices;
+ break;
+ }
+ case PlaneConfig::kY_VU: {
+ static constexpr PlaneAndIndex kPlanesAndIndices[] = {{0, 0}, {1, 1}, {1, 0}, {-1, -1}};
+ planesAndIndices = kPlanesAndIndices;
+ break;
+ }
+ case PlaneConfig::kYUV: {
+ static constexpr PlaneAndIndex kPlanesAndIndices[] = {{0, 0}, {0, 1}, {0, 2}, {-1, -1}};
+ planesAndIndices = kPlanesAndIndices;
+ break;
+ }
+ case PlaneConfig::kUYV: {
+ static constexpr PlaneAndIndex kPlanesAndIndices[] = {{0, 1}, {0, 0}, {0, 2}, {-1, -1}};
+ planesAndIndices = kPlanesAndIndices;
+ break;
+ }
+ case PlaneConfig::kY_U_V_A: {
+ static constexpr PlaneAndIndex kPlanesAndIndices[] = {{0, 0}, {1, 0}, {2, 0}, {3, 0}};
+ planesAndIndices = kPlanesAndIndices;
+ break;
+ }
+ case PlaneConfig::kY_V_U_A: {
+ static constexpr PlaneAndIndex kPlanesAndIndices[] = {{0, 0}, {2, 0}, {1, 0}, {3, 0}};
+ planesAndIndices = kPlanesAndIndices;
+ break;
+ }
+ case PlaneConfig::kY_UV_A: {
+ static constexpr PlaneAndIndex kPlanesAndIndices[] = {{0, 0}, {1, 0}, {1, 1}, {2, 0}};
+ planesAndIndices = kPlanesAndIndices;
+ break;
+ }
+ case PlaneConfig::kY_VU_A: {
+ static constexpr PlaneAndIndex kPlanesAndIndices[] = {{0, 0}, {1, 1}, {1, 0}, {2, 0}};
+ planesAndIndices = kPlanesAndIndices;
+ break;
+ }
+ case PlaneConfig::kYUVA: {
+ static constexpr PlaneAndIndex kPlanesAndIndices[] = {{0, 0}, {0, 1}, {0, 2}, {0, 3}};
+ planesAndIndices = kPlanesAndIndices;
+ break;
+ }
+ case PlaneConfig::kUYVA: {
+ static constexpr PlaneAndIndex kPlanesAndIndices[] = {{0, 1}, {0, 0}, {0, 2}, {0, 3}};
+ planesAndIndices = kPlanesAndIndices;
+ break;
+ }
+ }
+ SkASSERT(planesAndIndices);
+ YUVALocations yuvaLocations;
+ for (int i = 0; i < SkYUVAInfo::kYUVAChannelCount; ++i) {
+ auto [plane, chanIdx] = planesAndIndices[i];
+ SkColorChannel channel;
+ if (plane >= 0) {
+ if (!channel_index_to_channel(planeChannelFlags[plane], chanIdx, &channel)) {
+ return {};
+ }
+ yuvaLocations[i] = {plane, channel};
+ } else {
+ SkASSERT(i == 3);
+ yuvaLocations[i] = {-1, SkColorChannel::kR};
+ }
+ }
+ return yuvaLocations;
+}
+
+bool SkYUVAInfo::HasAlpha(PlaneConfig planeConfig) {
+ switch (planeConfig) {
+ case PlaneConfig::kUnknown: return false;
+
+ case PlaneConfig::kY_U_V: return false;
+ case PlaneConfig::kY_V_U: return false;
+ case PlaneConfig::kY_UV: return false;
+ case PlaneConfig::kY_VU: return false;
+ case PlaneConfig::kYUV: return false;
+ case PlaneConfig::kUYV: return false;
+
+ case PlaneConfig::kY_U_V_A: return true;
+ case PlaneConfig::kY_V_U_A: return true;
+ case PlaneConfig::kY_UV_A: return true;
+ case PlaneConfig::kY_VU_A: return true;
+ case PlaneConfig::kYUVA: return true;
+ case PlaneConfig::kUYVA: return true;
+ }
+ SkUNREACHABLE;
+}
+
+SkYUVAInfo::SkYUVAInfo(SkISize dimensions,
+ PlaneConfig planeConfig,
+ Subsampling subsampling,
+ SkYUVColorSpace yuvColorSpace,
+ SkEncodedOrigin origin,
+ Siting sitingX,
+ Siting sitingY)
+ : fDimensions(dimensions)
+ , fPlaneConfig(planeConfig)
+ , fSubsampling(subsampling)
+ , fYUVColorSpace(yuvColorSpace)
+ , fOrigin(origin)
+ , fSitingX(sitingX)
+ , fSitingY(sitingY) {
+ if (fDimensions.isEmpty() ||
+ !is_plane_config_compatible_with_subsampling(planeConfig, subsampling)) {
+ *this = {};
+ SkASSERT(!this->isValid());
+ return;
+ }
+ SkASSERT(this->isValid());
+}
+
+size_t SkYUVAInfo::computeTotalBytes(const size_t rowBytes[kMaxPlanes],
+ size_t planeSizes[kMaxPlanes]) const {
+ if (!this->isValid()) {
+ return 0;
+ }
+ SkSafeMath safe;
+ size_t totalBytes = 0;
+ SkISize planeDimensions[kMaxPlanes];
+ int n = this->planeDimensions(planeDimensions);
+ for (int i = 0; i < n; ++i) {
+ SkASSERT(!planeDimensions[i].isEmpty());
+ SkASSERT(rowBytes[i]);
+ size_t size = safe.mul(rowBytes[i], planeDimensions[i].height());
+ if (planeSizes) {
+ planeSizes[i] = size;
+ }
+ totalBytes = safe.add(totalBytes, size);
+ }
+ if (planeSizes) {
+ if (safe.ok()) {
+ for (int i = n; i < kMaxPlanes; ++i) {
+ planeSizes[i] = 0;
+ }
+ } else {
+ for (int i = 0; n < kMaxPlanes; ++i) {
+ planeSizes[i] = SIZE_MAX;
+ }
+ }
+ }
+
+ return safe.ok() ? totalBytes : SIZE_MAX;
+}
+
+SkYUVAInfo::YUVALocations SkYUVAInfo::toYUVALocations(const uint32_t* channelFlags) const {
+ return GetYUVALocations(fPlaneConfig, channelFlags);
+}
+
+SkYUVAInfo SkYUVAInfo::makeSubsampling(SkYUVAInfo::Subsampling subsampling) const {
+ return {fDimensions, fPlaneConfig, subsampling, fYUVColorSpace, fOrigin, fSitingX, fSitingY};
+}
+
+SkYUVAInfo SkYUVAInfo::makeDimensions(SkISize dimensions) const {
+ return {dimensions, fPlaneConfig, fSubsampling, fYUVColorSpace, fOrigin, fSitingX, fSitingY};
+}
+
+bool SkYUVAInfo::operator==(const SkYUVAInfo& that) const {
+ return fPlaneConfig == that.fPlaneConfig &&
+ fSubsampling == that.fSubsampling &&
+ fYUVColorSpace == that.fYUVColorSpace &&
+ fDimensions == that.fDimensions &&
+ fSitingX == that.fSitingX &&
+ fSitingY == that.fSitingY &&
+ fOrigin == that.fOrigin;
+}
diff --git a/gfx/skia/skia/src/core/SkYUVAInfoLocation.h b/gfx/skia/skia/src/core/SkYUVAInfoLocation.h
new file mode 100644
index 0000000000..5e52eb7400
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkYUVAInfoLocation.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkYUVAInfoLocation_DEFINED
+#define SkYUVAInfoLocation_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkYUVAInfo.h"
+
+#include <algorithm>
+
+/**
+ * The location of Y, U, V, or A values within the planes described by SkYUVAInfo. Computed from a
+ * SkYUVAInfo and the set of channels present in a set of pixmaps/textures.
+ */
+struct SkYUVAInfo::YUVALocation {
+ /** The index of the plane where the Y, U, V, or A value is to be found. */
+ int fPlane = -1;
+ /** The channel in the plane that contains the Y, U, V, or A value. */
+ SkColorChannel fChannel = SkColorChannel::kA;
+
+ bool operator==(const YUVALocation& that) const {
+ return fPlane == that.fPlane && fChannel == that.fChannel;
+ }
+ bool operator!=(const YUVALocation& that) const { return !(*this == that); }
+
+ static bool AreValidLocations(const SkYUVAInfo::YUVALocations& locations,
+ int* numPlanes = nullptr) {
+ int maxSlotUsed = -1;
+ bool used[SkYUVAInfo::kMaxPlanes] = {};
+ bool valid = true;
+ for (int i = 0; i < SkYUVAInfo::kYUVAChannelCount; ++i) {
+ if (locations[i].fPlane < 0) {
+ if (i != SkYUVAInfo::YUVAChannels::kA) {
+ valid = false; // only the 'A' plane can be omitted
+ }
+ } else if (locations[i].fPlane >= SkYUVAInfo::kMaxPlanes) {
+ valid = false; // A maximum of four input textures is allowed
+ } else {
+ maxSlotUsed = std::max(locations[i].fPlane, maxSlotUsed);
+ used[i] = true;
+ }
+ }
+
+ // All the used slots should be packed starting at 0 with no gaps
+ for (int i = 0; i <= maxSlotUsed; ++i) {
+ if (!used[i]) {
+ valid = false;
+ }
+ }
+
+ if (numPlanes) {
+ *numPlanes = valid ? maxSlotUsed + 1 : 0;
+ }
+ return valid;
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkYUVAPixmaps.cpp b/gfx/skia/skia/src/core/SkYUVAPixmaps.cpp
new file mode 100644
index 0000000000..aed0aea289
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkYUVAPixmaps.cpp
@@ -0,0 +1,297 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkYUVAPixmaps.h"
+
+#include "src/base/SkRectMemcpy.h"
+#include "src/core/SkImageInfoPriv.h"
+#include "src/core/SkYUVAInfoLocation.h"
+
+#if defined(SK_GANESH)
+#include "include/private/gpu/ganesh/GrImageContext.h"
+#endif
+
+
+SkYUVAPixmapInfo::SupportedDataTypes::SupportedDataTypes(const GrImageContext& context) {
+#if defined(SK_GANESH)
+ for (int n = 1; n <= 4; ++n) {
+ if (context.defaultBackendFormat(DefaultColorTypeForDataType(DataType::kUnorm8, n),
+ GrRenderable::kNo).isValid()) {
+ this->enableDataType(DataType::kUnorm8, n);
+ }
+ if (context.defaultBackendFormat(DefaultColorTypeForDataType(DataType::kUnorm16, n),
+ GrRenderable::kNo).isValid()) {
+ this->enableDataType(DataType::kUnorm16, n);
+ }
+ if (context.defaultBackendFormat(DefaultColorTypeForDataType(DataType::kFloat16, n),
+ GrRenderable::kNo).isValid()) {
+ this->enableDataType(DataType::kFloat16, n);
+ }
+ if (context.defaultBackendFormat(DefaultColorTypeForDataType(DataType::kUnorm10_Unorm2, n),
+ GrRenderable::kNo).isValid()) {
+ this->enableDataType(DataType::kUnorm10_Unorm2, n);
+ }
+ }
+#endif
+}
+
+void SkYUVAPixmapInfo::SupportedDataTypes::enableDataType(DataType type, int numChannels) {
+ if (numChannels < 1 || numChannels > 4) {
+ return;
+ }
+ fDataTypeSupport[static_cast<size_t>(type) + (numChannels - 1)*kDataTypeCnt] = true;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+std::tuple<int, SkYUVAPixmapInfo::DataType> SkYUVAPixmapInfo::NumChannelsAndDataType(
+ SkColorType ct) {
+ // We could allow BGR[A] color types, but then we'd have to decide whether B should be the 0th
+ // or 2nd channel. Our docs currently say channel order is always R=0, G=1, B=2[, A=3].
+ switch (ct) {
+ case kAlpha_8_SkColorType:
+ case kGray_8_SkColorType: return {1, DataType::kUnorm8 };
+ case kA16_unorm_SkColorType: return {1, DataType::kUnorm16};
+ case kA16_float_SkColorType: return {1, DataType::kFloat16};
+
+ case kR8G8_unorm_SkColorType: return {2, DataType::kUnorm8 };
+ case kR16G16_unorm_SkColorType: return {2, DataType::kUnorm16 };
+ case kR16G16_float_SkColorType: return {2, DataType::kFloat16 };
+
+ case kRGB_888x_SkColorType: return {3, DataType::kUnorm8 };
+ case kRGB_101010x_SkColorType: return {3, DataType::kUnorm10_Unorm2 };
+
+ case kRGBA_8888_SkColorType: return {4, DataType::kUnorm8 };
+ case kR16G16B16A16_unorm_SkColorType: return {4, DataType::kUnorm16 };
+ case kRGBA_F16_SkColorType: return {4, DataType::kFloat16 };
+ case kRGBA_F16Norm_SkColorType: return {4, DataType::kFloat16 };
+ case kRGBA_1010102_SkColorType: return {4, DataType::kUnorm10_Unorm2 };
+
+ default: return {0, DataType::kUnorm8 };
+ }
+}
+
+SkYUVAPixmapInfo::SkYUVAPixmapInfo(const SkYUVAInfo& yuvaInfo,
+ const SkColorType colorTypes[kMaxPlanes],
+ const size_t rowBytes[kMaxPlanes])
+ : fYUVAInfo(yuvaInfo) {
+ if (!yuvaInfo.isValid()) {
+ *this = {};
+ SkASSERT(!this->isValid());
+ return;
+ }
+ SkISize planeDimensions[4];
+ int n = yuvaInfo.planeDimensions(planeDimensions);
+ size_t tempRowBytes[kMaxPlanes];
+ if (!rowBytes) {
+ for (int i = 0; i < n; ++i) {
+ tempRowBytes[i] = SkColorTypeBytesPerPixel(colorTypes[i]) * planeDimensions[i].width();
+ }
+ rowBytes = tempRowBytes;
+ }
+ bool ok = true;
+ for (size_t i = 0; i < static_cast<size_t>(n); ++i) {
+ fRowBytes[i] = rowBytes[i];
+ // Use kUnpremul so that we never multiply alpha when copying data in.
+ fPlaneInfos[i] = SkImageInfo::Make(planeDimensions[i],
+ colorTypes[i],
+ kUnpremul_SkAlphaType);
+ int numRequiredChannels = yuvaInfo.numChannelsInPlane(i);
+ SkASSERT(numRequiredChannels > 0);
+ auto [numColorTypeChannels, colorTypeDataType] = NumChannelsAndDataType(colorTypes[i]);
+ ok &= i == 0 || colorTypeDataType == fDataType;
+ ok &= numColorTypeChannels >= numRequiredChannels;
+ ok &= fPlaneInfos[i].validRowBytes(fRowBytes[i]);
+ fDataType = colorTypeDataType;
+ }
+ if (!ok) {
+ *this = {};
+ SkASSERT(!this->isValid());
+ } else {
+ SkASSERT(this->isValid());
+ }
+}
+
+SkYUVAPixmapInfo::SkYUVAPixmapInfo(const SkYUVAInfo& yuvaInfo,
+ DataType dataType,
+ const size_t rowBytes[kMaxPlanes]) {
+ SkColorType colorTypes[kMaxPlanes] = {};
+ int numPlanes = yuvaInfo.numPlanes();
+ for (int i = 0; i < numPlanes; ++i) {
+ int numChannels = yuvaInfo.numChannelsInPlane(i);
+ colorTypes[i] = DefaultColorTypeForDataType(dataType, numChannels);
+ }
+ *this = SkYUVAPixmapInfo(yuvaInfo, colorTypes, rowBytes);
+}
+
+bool SkYUVAPixmapInfo::operator==(const SkYUVAPixmapInfo& that) const {
+ bool result = fYUVAInfo == that.fYUVAInfo &&
+ fPlaneInfos == that.fPlaneInfos &&
+ fRowBytes == that.fRowBytes;
+ SkASSERT(!result || fDataType == that.fDataType);
+ return result;
+}
+
+size_t SkYUVAPixmapInfo::computeTotalBytes(size_t planeSizes[kMaxPlanes]) const {
+ if (!this->isValid()) {
+ if (planeSizes) {
+ std::fill_n(planeSizes, kMaxPlanes, 0);
+ }
+ return 0;
+ }
+ return fYUVAInfo.computeTotalBytes(fRowBytes.data(), planeSizes);
+}
+
+bool SkYUVAPixmapInfo::initPixmapsFromSingleAllocation(void* memory,
+ SkPixmap pixmaps[kMaxPlanes]) const {
+ if (!this->isValid()) {
+ return false;
+ }
+ SkASSERT(pixmaps);
+ char* addr = static_cast<char*>(memory);
+ int n = this->numPlanes();
+ for (int i = 0; i < n; ++i) {
+ SkASSERT(fPlaneInfos[i].validRowBytes(fRowBytes[i]));
+ pixmaps[i].reset(fPlaneInfos[i], addr, fRowBytes[i]);
+ size_t planeSize = pixmaps[i].rowBytes()*pixmaps[i].height();
+ SkASSERT(planeSize);
+ addr += planeSize;
+ }
+ for (int i = n; i < kMaxPlanes; ++i) {
+ pixmaps[i] = {};
+ }
+ return true;
+}
+
+bool SkYUVAPixmapInfo::isSupported(const SupportedDataTypes& supportedDataTypes) const {
+ if (!this->isValid()) {
+ return false;
+ }
+ return supportedDataTypes.supported(fYUVAInfo.planeConfig(), fDataType);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+SkColorType SkYUVAPixmaps::RecommendedRGBAColorType(DataType dataType) {
+ switch (dataType) {
+ case DataType::kUnorm8: return kRGBA_8888_SkColorType;
+ // F16 has better GPU support than 16 bit unorm. Often "16" bit unorm values are actually
+ // lower precision.
+ case DataType::kUnorm16: return kRGBA_F16_SkColorType;
+ case DataType::kFloat16: return kRGBA_F16_SkColorType;
+ case DataType::kUnorm10_Unorm2: return kRGBA_1010102_SkColorType;
+ }
+ SkUNREACHABLE;
+}
+
+SkYUVAPixmaps SkYUVAPixmaps::Allocate(const SkYUVAPixmapInfo& yuvaPixmapInfo) {
+ if (!yuvaPixmapInfo.isValid()) {
+ return {};
+ }
+ return SkYUVAPixmaps(yuvaPixmapInfo,
+ SkData::MakeUninitialized(yuvaPixmapInfo.computeTotalBytes()));
+}
+
+SkYUVAPixmaps SkYUVAPixmaps::FromData(const SkYUVAPixmapInfo& yuvaPixmapInfo, sk_sp<SkData> data) {
+ if (!yuvaPixmapInfo.isValid()) {
+ return {};
+ }
+ if (yuvaPixmapInfo.computeTotalBytes() > data->size()) {
+ return {};
+ }
+ return SkYUVAPixmaps(yuvaPixmapInfo, std::move(data));
+}
+
+SkYUVAPixmaps SkYUVAPixmaps::MakeCopy(const SkYUVAPixmaps& src) {
+ if (!src.isValid()) {
+ return {};
+ }
+ SkYUVAPixmaps result = Allocate(src.pixmapsInfo());
+ int n = result.numPlanes();
+ for (int i = 0; i < n; ++i) {
+ // We use SkRectMemCpy rather than readPixels to ensure that we don't do any alpha type
+ // conversion.
+ const SkPixmap& s = src.plane(i);
+ const SkPixmap& d = result.plane(i);
+ SkRectMemcpy(d.writable_addr(),
+ d.rowBytes(),
+ s.addr(),
+ s.rowBytes(),
+ s.info().minRowBytes(),
+ s.height());
+ }
+ return result;
+}
+
+SkYUVAPixmaps SkYUVAPixmaps::FromExternalMemory(const SkYUVAPixmapInfo& yuvaPixmapInfo,
+ void* memory) {
+ if (!yuvaPixmapInfo.isValid()) {
+ return {};
+ }
+ SkPixmap pixmaps[kMaxPlanes];
+ yuvaPixmapInfo.initPixmapsFromSingleAllocation(memory, pixmaps);
+ return SkYUVAPixmaps(yuvaPixmapInfo.yuvaInfo(), yuvaPixmapInfo.dataType(), pixmaps);
+}
+
+SkYUVAPixmaps SkYUVAPixmaps::FromExternalPixmaps(const SkYUVAInfo& yuvaInfo,
+ const SkPixmap pixmaps[kMaxPlanes]) {
+ SkColorType colorTypes[kMaxPlanes] = {};
+ size_t rowBytes[kMaxPlanes] = {};
+ int numPlanes = yuvaInfo.numPlanes();
+ for (int i = 0; i < numPlanes; ++i) {
+ colorTypes[i] = pixmaps[i].colorType();
+ rowBytes[i] = pixmaps[i].rowBytes();
+ }
+ SkYUVAPixmapInfo yuvaPixmapInfo(yuvaInfo, colorTypes, rowBytes);
+ if (!yuvaPixmapInfo.isValid()) {
+ return {};
+ }
+ return SkYUVAPixmaps(yuvaInfo, yuvaPixmapInfo.dataType(), pixmaps);
+}
+
+SkYUVAPixmaps::SkYUVAPixmaps(const SkYUVAPixmapInfo& yuvaPixmapInfo, sk_sp<SkData> data)
+ : fData(std::move(data))
+ , fYUVAInfo(yuvaPixmapInfo.yuvaInfo())
+ , fDataType(yuvaPixmapInfo.dataType()) {
+ SkASSERT(yuvaPixmapInfo.isValid());
+ SkASSERT(yuvaPixmapInfo.computeTotalBytes() <= fData->size());
+ SkAssertResult(yuvaPixmapInfo.initPixmapsFromSingleAllocation(fData->writable_data(),
+ fPlanes.data()));
+}
+
+SkYUVAPixmaps::SkYUVAPixmaps(const SkYUVAInfo& yuvaInfo,
+ DataType dataType,
+ const SkPixmap pixmaps[kMaxPlanes])
+ : fYUVAInfo(yuvaInfo), fDataType(dataType) {
+ std::copy_n(pixmaps, yuvaInfo.numPlanes(), fPlanes.data());
+}
+
+SkYUVAPixmapInfo SkYUVAPixmaps::pixmapsInfo() const {
+ if (!this->isValid()) {
+ return {};
+ }
+ SkColorType colorTypes[kMaxPlanes] = {};
+ size_t rowBytes[kMaxPlanes] = {};
+ int numPlanes = this->numPlanes();
+ for (int i = 0; i < numPlanes; ++i) {
+ colorTypes[i] = fPlanes[i].colorType();
+ rowBytes[i] = fPlanes[i].rowBytes();
+ }
+ return {fYUVAInfo, colorTypes, rowBytes};
+}
+
+SkYUVAInfo::YUVALocations SkYUVAPixmaps::toYUVALocations() const {
+ uint32_t channelFlags[] = {SkColorTypeChannelFlags(fPlanes[0].colorType()),
+ SkColorTypeChannelFlags(fPlanes[1].colorType()),
+ SkColorTypeChannelFlags(fPlanes[2].colorType()),
+ SkColorTypeChannelFlags(fPlanes[3].colorType())};
+ auto result = fYUVAInfo.toYUVALocations(channelFlags);
+ SkDEBUGCODE(int numPlanes;)
+ SkASSERT(SkYUVAInfo::YUVALocation::AreValidLocations(result, &numPlanes));
+ SkASSERT(numPlanes == this->numPlanes());
+ return result;
+}
diff --git a/gfx/skia/skia/src/core/SkYUVMath.cpp b/gfx/skia/skia/src/core/SkYUVMath.cpp
new file mode 100644
index 0000000000..2debe75925
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkYUVMath.cpp
@@ -0,0 +1,339 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkM44.h"
+#include "src/core/SkYUVMath.h"
+
+// in SkColorMatrix order (row-major)
+// Created by running SkColorMatrix_DumpYUVMatrixTables()
+const float JPEG_full_rgb_to_yuv[] = {
+ 0.299000f, 0.587000f, 0.114000f, 0.000000f, 0.000000f,
+ -0.168736f, -0.331264f, 0.500000f, 0.000000f, 0.501961f,
+ 0.500000f, -0.418688f, -0.081312f, 0.000000f, 0.501961f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float JPEG_full_yuv_to_rgb[] = {
+ 1.000000f, -0.000000f, 1.402000f, 0.000000f, -0.703749f,
+ 1.000000f, -0.344136f, -0.714136f, 0.000000f, 0.531211f,
+ 1.000000f, 1.772000f, 0.000000f, 0.000000f, -0.889475f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float Rec601_limited_rgb_to_yuv[] = {
+ 0.256788f, 0.504129f, 0.097906f, 0.000000f, 0.062745f,
+ -0.148223f, -0.290993f, 0.439216f, 0.000000f, 0.501961f,
+ 0.439216f, -0.367788f, -0.071427f, 0.000000f, 0.501961f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float Rec601_limited_yuv_to_rgb[] = {
+ 1.164384f, -0.000000f, 1.596027f, 0.000000f, -0.874202f,
+ 1.164384f, -0.391762f, -0.812968f, 0.000000f, 0.531668f,
+ 1.164384f, 2.017232f, 0.000000f, 0.000000f, -1.085631f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float Rec709_full_rgb_to_yuv[] = {
+ 0.212600f, 0.715200f, 0.072200f, 0.000000f, 0.000000f,
+ -0.114572f, -0.385428f, 0.500000f, 0.000000f, 0.501961f,
+ 0.500000f, -0.454153f, -0.045847f, 0.000000f, 0.501961f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float Rec709_full_yuv_to_rgb[] = {
+ 1.000000f, -0.000000f, 1.574800f, 0.000000f, -0.790488f,
+ 1.000000f, -0.187324f, -0.468124f, 0.000000f, 0.329010f,
+ 1.000000f, 1.855600f, -0.000000f, 0.000000f, -0.931439f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float Rec709_limited_rgb_to_yuv[] = {
+ 0.182586f, 0.614231f, 0.062007f, 0.000000f, 0.062745f,
+ -0.100644f, -0.338572f, 0.439216f, 0.000000f, 0.501961f,
+ 0.439216f, -0.398942f, -0.040274f, 0.000000f, 0.501961f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float Rec709_limited_yuv_to_rgb[] = {
+ 1.164384f, -0.000000f, 1.792741f, 0.000000f, -0.972945f,
+ 1.164384f, -0.213249f, -0.532909f, 0.000000f, 0.301483f,
+ 1.164384f, 2.112402f, -0.000000f, 0.000000f, -1.133402f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float BT2020_8bit_full_rgb_to_yuv[] = {
+ 0.262700f, 0.678000f, 0.059300f, 0.000000f, 0.000000f,
+ -0.139630f, -0.360370f, 0.500000f, 0.000000f, 0.501961f,
+ 0.500000f, -0.459786f, -0.040214f, 0.000000f, 0.501961f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float BT2020_8bit_full_yuv_to_rgb[] = {
+ 1.000000f, -0.000000f, 1.474600f, 0.000000f, -0.740191f,
+ 1.000000f, -0.164553f, -0.571353f, 0.000000f, 0.369396f,
+ 1.000000f, 1.881400f, -0.000000f, 0.000000f, -0.944389f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float BT2020_8bit_limited_rgb_to_yuv[] = {
+ 0.225613f, 0.582282f, 0.050928f, 0.000000f, 0.062745f,
+ -0.122655f, -0.316560f, 0.439216f, 0.000000f, 0.501961f,
+ 0.439216f, -0.403890f, -0.035326f, 0.000000f, 0.501961f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float BT2020_8bit_limited_yuv_to_rgb[] = {
+ 1.164384f, -0.000000f, 1.678674f, 0.000000f, -0.915688f,
+ 1.164384f, -0.187326f, -0.650424f, 0.000000f, 0.347458f,
+ 1.164384f, 2.141772f, -0.000000f, 0.000000f, -1.148145f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float BT2020_10bit_full_rgb_to_yuv[] = {
+ 0.262700f, 0.678000f, 0.059300f, 0.000000f, 0.000000f,
+ -0.139630f, -0.360370f, 0.500000f, 0.000000f, 0.500489f,
+ 0.500000f, -0.459786f, -0.040214f, 0.000000f, 0.500489f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float BT2020_10bit_full_yuv_to_rgb[] = {
+ 1.000000f, -0.000000f, 1.474600f, 0.000000f, -0.738021f,
+ 1.000000f, -0.164553f, -0.571353f, 0.000000f, 0.368313f,
+ 1.000000f, 1.881400f, -0.000000f, 0.000000f, -0.941620f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float BT2020_10bit_limited_rgb_to_yuv[] = {
+ 0.224951f, 0.580575f, 0.050779f, 0.000000f, 0.062561f,
+ -0.122296f, -0.315632f, 0.437928f, 0.000000f, 0.500489f,
+ 0.437928f, -0.402706f, -0.035222f, 0.000000f, 0.500489f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float BT2020_10bit_limited_yuv_to_rgb[] = {
+ 1.167808f, -0.000000f, 1.683611f, 0.000000f, -0.915688f,
+ 1.167808f, -0.187877f, -0.652337f, 0.000000f, 0.347458f,
+ 1.167808f, 2.148072f, -0.000000f, 0.000000f, -1.148145f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float BT2020_12bit_full_rgb_to_yuv[] = {
+ 0.262700f, 0.678000f, 0.059300f, 0.000000f, 0.000000f,
+ -0.139630f, -0.360370f, 0.500000f, 0.000000f, 0.500122f,
+ 0.500000f, -0.459786f, -0.040214f, 0.000000f, 0.500122f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float BT2020_12bit_full_yuv_to_rgb[] = {
+ 1.000000f, -0.000000f, 1.474600f, 0.000000f, -0.737480f,
+ 1.000000f, -0.164553f, -0.571353f, 0.000000f, 0.368043f,
+ 1.000000f, 1.881400f, -0.000000f, 0.000000f, -0.940930f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float BT2020_12bit_limited_rgb_to_yuv[] = {
+ 0.224787f, 0.580149f, 0.050742f, 0.000000f, 0.062515f,
+ -0.122206f, -0.315401f, 0.437607f, 0.000000f, 0.500122f,
+ 0.437607f, -0.402411f, -0.035196f, 0.000000f, 0.500122f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+const float BT2020_12bit_limited_yuv_to_rgb[] = {
+ 1.168664f, -0.000000f, 1.684846f, 0.000000f, -0.915688f,
+ 1.168664f, -0.188015f, -0.652816f, 0.000000f, 0.347458f,
+ 1.168664f, 2.149647f, -0.000000f, 0.000000f, -1.148145f,
+ 0.000000f, 0.000000f, 0.000000f, 1.000000f, 0.000000f,
+};
+
+static_assert(kJPEG_Full_SkYUVColorSpace == 0, "");
+static_assert(kRec601_Limited_SkYUVColorSpace == 1, "");
+static_assert(kRec709_Full_SkYUVColorSpace == 2, "");
+static_assert(kRec709_Limited_SkYUVColorSpace == 3, "");
+static_assert(kBT2020_8bit_Full_SkYUVColorSpace == 4, "");
+static_assert(kBT2020_8bit_Limited_SkYUVColorSpace == 5, "");
+static_assert(kBT2020_10bit_Full_SkYUVColorSpace == 6, "");
+static_assert(kBT2020_10bit_Limited_SkYUVColorSpace == 7, "");
+static_assert(kBT2020_12bit_Full_SkYUVColorSpace == 8, "");
+static_assert(kBT2020_12bit_Limited_SkYUVColorSpace == 9, "");
+
+const float* yuv_to_rgb_array[] = {
+ JPEG_full_yuv_to_rgb,
+ Rec601_limited_yuv_to_rgb,
+ Rec709_full_yuv_to_rgb,
+ Rec709_limited_yuv_to_rgb,
+ BT2020_8bit_full_yuv_to_rgb,
+ BT2020_8bit_limited_yuv_to_rgb,
+ BT2020_10bit_full_yuv_to_rgb,
+ BT2020_10bit_limited_yuv_to_rgb,
+ BT2020_12bit_full_yuv_to_rgb,
+ BT2020_12bit_limited_yuv_to_rgb,
+};
+
+const float* rgb_to_yuv_array[] = {
+ JPEG_full_rgb_to_yuv,
+ Rec601_limited_rgb_to_yuv,
+ Rec709_full_rgb_to_yuv,
+ Rec709_limited_rgb_to_yuv,
+ BT2020_8bit_full_rgb_to_yuv,
+ BT2020_8bit_limited_rgb_to_yuv,
+ BT2020_10bit_full_rgb_to_yuv,
+ BT2020_10bit_limited_rgb_to_yuv,
+ BT2020_12bit_full_rgb_to_yuv,
+ BT2020_12bit_limited_rgb_to_yuv,
+};
+
+constexpr size_t kSizeOfColorMatrix = 20 * sizeof(float);
+
+void SkColorMatrix_RGB2YUV(SkYUVColorSpace cs, float m[20]) {
+ if ((unsigned)cs < (unsigned)kIdentity_SkYUVColorSpace) {
+ memcpy(m, rgb_to_yuv_array[(unsigned)cs], kSizeOfColorMatrix);
+ } else {
+ memset(m, 0, kSizeOfColorMatrix);
+ m[0] = m[6] = m[12] = m[18] = 1;
+ }
+}
+
+void SkColorMatrix_YUV2RGB(SkYUVColorSpace cs, float m[20]) {
+ if ((unsigned)cs < (unsigned)kIdentity_SkYUVColorSpace) {
+ memcpy(m, yuv_to_rgb_array[(unsigned)cs], kSizeOfColorMatrix);
+ } else {
+ memset(m, 0, kSizeOfColorMatrix);
+ m[0] = m[6] = m[12] = m[18] = 1;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+// we just drop the alpha rol/col from the colormatrix
+// output is | tr |
+// | 3x3 tg |
+// | tb |
+// | 0 0 0 1 |
+static void colormatrix_to_matrix44(const float src[20], SkM44* dst) {
+ *dst = SkM44(src[ 0], src[ 1], src[ 2], src[ 4],
+ src[ 5], src[ 6], src[ 7], src[ 9],
+ src[10], src[11], src[12], src[14],
+ 0, 0, 0, 1);
+}
+
+// input: ignore the bottom row
+// output: inject identity row/column for alpha
+static void matrix44_to_colormatrix(const SkM44& src, float dst[20]) {
+ dst[0] = src.rc(0,0);
+ dst[1] = src.rc(0,1);
+ dst[2] = src.rc(0,2);
+ dst[3] = 0;
+ dst[4] = src.rc(0,3); // tx
+
+ dst[5] = src.rc(1,0);
+ dst[6] = src.rc(1,1);
+ dst[7] = src.rc(1,2);
+ dst[8] = 0;
+ dst[9] = src.rc(1,3); // ty
+
+ dst[10] = src.rc(2,0);
+ dst[11] = src.rc(2,1);
+ dst[12] = src.rc(2,2);
+ dst[13] = 0;
+ dst[14] = src.rc(2,3); // tz
+
+ dst[15] = dst[16] = dst[17] = dst[19] = 0;
+ dst[18] = 1;
+}
+
+static void scale3(float m[], float s) {
+ for (int i = 0; i < 3; ++i) {
+ m[i] *= s;
+ }
+}
+
+namespace {
+enum Range { kFull, kLimited };
+struct YUVCoeff {
+ float Kr, Kb;
+ int bits;
+ Range range;
+};
+
+const YUVCoeff gCoeff[] = {
+ { 0.2990f, 0.1140f, 8, kFull }, // kJPEG_Full_SkYUVColorSpace
+ { 0.2990f, 0.1140f, 8, kLimited }, // kRec601_Limited_SkYUVColorSpace
+ { 0.2126f, 0.0722f, 8, kFull }, // kRec709_Full_SkYUVColorSpace
+ { 0.2126f, 0.0722f, 8, kLimited }, // kRec709_Limited_SkYUVColorSpace
+ { 0.2627f, 0.0593f, 8, kFull }, // kBT2020_8bit_Full_SkYUVColorSpace
+ { 0.2627f, 0.0593f, 8, kLimited }, // kBT2020_8bit_Limited_SkYUVColorSpace
+ { 0.2627f, 0.0593f, 10, kFull }, // kBT2020_10bit_Full_SkYUVColorSpace
+ { 0.2627f, 0.0593f, 10, kLimited }, // kBT2020_10bit_Limited_SkYUVColorSpace
+ { 0.2627f, 0.0593f, 12, kFull }, // kBT2020_12bit_Full_SkYUVColorSpace
+ { 0.2627f, 0.0593f, 12, kLimited }, // kBT2020_12bit_Limited_SkYUVColorSpace
+};
+} // namespace
+
+static void make_rgb_to_yuv_matrix(float mx[20], const YUVCoeff& c) {
+ SkASSERT(c.bits >= 8);
+ const float Kr = c.Kr;
+ const float Kb = c.Kb;
+ const float Kg = 1.0f - Kr - Kb;
+ const float Cr = 0.5f / (1.0f - Kb);
+ const float Cb = 0.5f / (1.0f - Kr);
+
+ const int shift = c.bits - 8;
+
+ const float denom = static_cast<float>((1 << c.bits) - 1);
+ float scaleY = 1.0f,
+ addY = 0.0f,
+ scaleUV = 1.0f,
+ addUV = (128 << shift) / denom;
+
+ if (c.range == kLimited) {
+ scaleY = (219 << shift) / denom;
+ addY = ( 16 << shift) / denom;
+ scaleUV = (224 << shift) / denom;
+ }
+
+ float m[20] = {
+ Kr, Kg, Kb, 0, addY,
+ -Kr, -Kg, 1-Kb, 0, addUV,
+ 1-Kr, -Kg, -Kb, 0, addUV,
+ 0, 0, 0, 1, 0,
+ };
+ memcpy(mx, m, sizeof(m));
+ scale3(mx + 0, scaleY );
+ scale3(mx + 5, Cr * scaleUV);
+ scale3(mx + 10, Cb * scaleUV);
+}
+
+static void dump(const float m[20], SkYUVColorSpace cs, bool rgb2yuv) {
+ const char* names[] = {
+ "JPEG_full",
+ "Rec601_limited",
+ "Rec709_full",
+ "Rec709_limited",
+ "BT2020_8bit_full",
+ "BT2020_8bit_limited",
+ "BT2020_10bit_full",
+ "BT2020_10bit_limited",
+ "BT2020_12bit_full",
+ "BT2020_12bit_limited",
+ };
+ const char* dirnames[] = {
+ "yuv_to_rgb", "rgb_to_yuv",
+ };
+ SkDebugf("const float %s_%s[] = {\n", names[cs], dirnames[rgb2yuv]);
+ for (int i = 0; i < 4; ++i) {
+ SkDebugf(" ");
+ for (int j = 0; j < 5; ++j) {
+ SkDebugf(" %9.6ff,", m[i * 5 + j]);
+ }
+ SkDebugf("\n");
+ }
+ SkDebugf("};\n");
+}
+
+// Used to create the prebuilt tables for each colorspace.
+// Don't remove this function, in case we want to recompute those tables in the future.
+void SkColorMatrix_DumpYUVMatrixTables() {
+ for (int i = 0; i < kLastEnum_SkYUVColorSpace; ++i) {
+ SkYUVColorSpace cs = static_cast<SkYUVColorSpace>(i);
+ float m[20];
+ make_rgb_to_yuv_matrix(m, gCoeff[(unsigned)cs]);
+ dump(m, cs, true);
+ SkM44 m44, im44;
+ colormatrix_to_matrix44(m, &m44);
+ float im[20];
+#ifdef SK_DEBUG
+ // be sure our coversion between matrix44 and colormatrix is perfect
+ matrix44_to_colormatrix(m44, im);
+ SkASSERT(memcmp(m, im, sizeof(im)) == 0);
+#endif
+ SkAssertResult(m44.invert(&im44));
+ matrix44_to_colormatrix(im44, im);
+ dump(im, cs, false);
+ }
+}
diff --git a/gfx/skia/skia/src/core/SkYUVMath.h b/gfx/skia/skia/src/core/SkYUVMath.h
new file mode 100644
index 0000000000..9ecd2c8366
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkYUVMath.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkYUVMath_DEFINED
+#define SkYUVMath_DEFINED
+
+#include "include/core/SkImageInfo.h"
+
+void SkColorMatrix_RGB2YUV(SkYUVColorSpace, float m[20]);
+void SkColorMatrix_YUV2RGB(SkYUVColorSpace, float m[20]);
+
+// Used to create the pre-compiled tables in SkYUVMath.cpp
+void SkColorMatrix_DumpYUVMatrixTables();
+
+#endif
diff --git a/gfx/skia/skia/src/core/SkYUVPlanesCache.cpp b/gfx/skia/skia/src/core/SkYUVPlanesCache.cpp
new file mode 100644
index 0000000000..3f33fce699
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkYUVPlanesCache.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkYUVPlanesCache.h"
+
+#include "include/core/SkYUVAPixmaps.h"
+#include "src/core/SkBitmapCache.h"
+#include "src/core/SkCachedData.h"
+#include "src/core/SkResourceCache.h"
+
+#define CHECK_LOCAL(localCache, localName, globalName, ...) \
+ ((localCache) ? localCache->localName(__VA_ARGS__) : SkResourceCache::globalName(__VA_ARGS__))
+
+namespace {
+static unsigned gYUVPlanesKeyNamespaceLabel;
+
+struct YUVValue {
+ SkYUVAPixmaps fPixmaps;
+ SkCachedData* fData;
+};
+
+struct YUVPlanesKey : public SkResourceCache::Key {
+ YUVPlanesKey(uint32_t genID)
+ : fGenID(genID)
+ {
+ this->init(&gYUVPlanesKeyNamespaceLabel, SkMakeResourceCacheSharedIDForBitmap(genID),
+ sizeof(genID));
+ }
+
+ uint32_t fGenID;
+};
+
+struct YUVPlanesRec : public SkResourceCache::Rec {
+ YUVPlanesRec(YUVPlanesKey key, SkCachedData* data, const SkYUVAPixmaps& pixmaps)
+ : fKey(key)
+ {
+ fValue.fData = data;
+ fValue.fPixmaps = pixmaps;
+ fValue.fData->attachToCacheAndRef();
+ }
+ ~YUVPlanesRec() override {
+ fValue.fData->detachFromCacheAndUnref();
+ }
+
+ YUVPlanesKey fKey;
+ YUVValue fValue;
+
+ const Key& getKey() const override { return fKey; }
+ size_t bytesUsed() const override { return sizeof(*this) + fValue.fData->size(); }
+ const char* getCategory() const override { return "yuv-planes"; }
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override {
+ return fValue.fData->diagnostic_only_getDiscardable();
+ }
+
+ static bool Visitor(const SkResourceCache::Rec& baseRec, void* contextData) {
+ const YUVPlanesRec& rec = static_cast<const YUVPlanesRec&>(baseRec);
+ YUVValue* result = static_cast<YUVValue*>(contextData);
+
+ SkCachedData* tmpData = rec.fValue.fData;
+ tmpData->ref();
+ if (nullptr == tmpData->data()) {
+ tmpData->unref();
+ return false;
+ }
+ result->fData = tmpData;
+ result->fPixmaps = rec.fValue.fPixmaps;
+ return true;
+ }
+};
+} // namespace
+
+SkCachedData* SkYUVPlanesCache::FindAndRef(uint32_t genID,
+ SkYUVAPixmaps* pixmaps,
+ SkResourceCache* localCache) {
+ YUVValue result;
+ YUVPlanesKey key(genID);
+ if (!CHECK_LOCAL(localCache, find, Find, key, YUVPlanesRec::Visitor, &result)) {
+ return nullptr;
+ }
+
+ *pixmaps = result.fPixmaps;
+ return result.fData;
+}
+
+void SkYUVPlanesCache::Add(uint32_t genID, SkCachedData* data, const SkYUVAPixmaps& pixmaps,
+ SkResourceCache* localCache) {
+ YUVPlanesKey key(genID);
+ return CHECK_LOCAL(localCache, add, Add, new YUVPlanesRec(key, data, pixmaps));
+}
diff --git a/gfx/skia/skia/src/core/SkYUVPlanesCache.h b/gfx/skia/skia/src/core/SkYUVPlanesCache.h
new file mode 100644
index 0000000000..dfe535f679
--- /dev/null
+++ b/gfx/skia/skia/src/core/SkYUVPlanesCache.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkYUVPlanesCache_DEFINED
+#define SkYUVPlanesCache_DEFINED
+
+#include "include/core/SkTypes.h"
+
+class SkCachedData;
+class SkResourceCache;
+class SkYUVAPixmaps;
+
+class SkYUVPlanesCache {
+public:
+ /**
+ * On success, return a ref to the SkCachedData that holds the pixel data. The SkYUVAPixmaps
+ * contains a description of the YUVA data and has a SkPixmap for each plane that points
+ * into the SkCachedData.
+ *
+ * On failure, return nullptr.
+ */
+ static SkCachedData* FindAndRef(uint32_t genID,
+ SkYUVAPixmaps* pixmaps,
+ SkResourceCache* localCache = nullptr);
+
+ /**
+ * Add a pixelRef ID and its YUV planes data to the cache. The SkYUVAPixmaps should contain
+ * SkPixmaps that store their pixel data in the SkCachedData.
+ */
+ static void Add(uint32_t genID, SkCachedData* data, const SkYUVAPixmaps& pixmaps,
+ SkResourceCache* localCache = nullptr);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/effects/Sk1DPathEffect.cpp b/gfx/skia/skia/src/effects/Sk1DPathEffect.cpp
new file mode 100644
index 0000000000..98da0d4c8e
--- /dev/null
+++ b/gfx/skia/skia/src/effects/Sk1DPathEffect.cpp
@@ -0,0 +1,259 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/Sk1DPathEffect.h"
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkPathMeasure.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/core/SkTypes.h"
+#include "src/core/SkPathEffectBase.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+struct SkRect;
+
+// Since we are stepping by a float, the do/while loop might go on forever (or nearly so).
+// Put in a governor to limit crash values from looping too long (and allocating too much ram).
+#define MAX_REASONABLE_ITERATIONS 100000
+
+class Sk1DPathEffect : public SkPathEffectBase {
+public:
+protected:
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*,
+ const SkMatrix&) const override {
+ SkPathMeasure meas(src, false);
+ do {
+ int governor = MAX_REASONABLE_ITERATIONS;
+ SkScalar length = meas.getLength();
+ SkScalar distance = this->begin(length);
+ while (distance < length && --governor >= 0) {
+ SkScalar delta = this->next(dst, distance, meas);
+ if (delta <= 0) {
+ break;
+ }
+ distance += delta;
+ }
+ if (governor < 0) {
+ return false;
+ }
+ } while (meas.nextContour());
+ return true;
+ }
+
+ /** Called at the start of each contour, returns the initial offset
+ into that contour.
+ */
+ virtual SkScalar begin(SkScalar contourLength) const = 0;
+ /** Called with the current distance along the path, with the current matrix
+ for the point/tangent at the specified distance.
+ Return the distance to travel for the next call. If return <= 0, then that
+ contour is done.
+ */
+ virtual SkScalar next(SkPath* dst, SkScalar dist, SkPathMeasure&) const = 0;
+
+private:
+ // For simplicity, assume fast bounds cannot be computed
+ bool computeFastBounds(SkRect*) const override { return false; }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkPath1DPathEffectImpl : public Sk1DPathEffect {
+public:
+ SkPath1DPathEffectImpl(const SkPath& path, SkScalar advance, SkScalar phase,
+ SkPath1DPathEffect::Style style) : fPath(path) {
+ SkASSERT(advance > 0 && !path.isEmpty());
+
+ // Make the path thread-safe.
+ fPath.updateBoundsCache();
+ (void)fPath.getGenerationID();
+
+ // cleanup their phase parameter, inverting it so that it becomes an
+ // offset along the path (to match the interpretation in PostScript)
+ if (phase < 0) {
+ phase = -phase;
+ if (phase > advance) {
+ phase = SkScalarMod(phase, advance);
+ }
+ } else {
+ if (phase > advance) {
+ phase = SkScalarMod(phase, advance);
+ }
+ phase = advance - phase;
+ }
+ // now catch the edge case where phase == advance (within epsilon)
+ if (phase >= advance) {
+ phase = 0;
+ }
+ SkASSERT(phase >= 0);
+
+ fAdvance = advance;
+ fInitialOffset = phase;
+ fStyle = style;
+ }
+
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cullRect, const SkMatrix& ctm) const override {
+ rec->setFillStyle();
+ return this->INHERITED::onFilterPath(dst, src, rec, cullRect, ctm);
+ }
+
+ SkScalar begin(SkScalar contourLength) const override {
+ return fInitialOffset;
+ }
+
+ SkScalar next(SkPath*, SkScalar, SkPathMeasure&) const override;
+
+ static sk_sp<SkFlattenable> CreateProc(SkReadBuffer& buffer) {
+ SkScalar advance = buffer.readScalar();
+ SkPath path;
+ buffer.readPath(&path);
+ SkScalar phase = buffer.readScalar();
+ SkPath1DPathEffect::Style style = buffer.read32LE(SkPath1DPathEffect::kLastEnum_Style);
+ return buffer.isValid() ? SkPath1DPathEffect::Make(path, advance, phase, style) : nullptr;
+ }
+
+ void flatten(SkWriteBuffer& buffer) const override {
+ buffer.writeScalar(fAdvance);
+ buffer.writePath(fPath);
+ buffer.writeScalar(fInitialOffset);
+ buffer.writeUInt(fStyle);
+ }
+
+ Factory getFactory() const override { return CreateProc; }
+ const char* getTypeName() const override { return "SkPath1DPathEffect"; }
+
+private:
+ SkPath fPath; // copied from constructor
+ SkScalar fAdvance; // copied from constructor
+ SkScalar fInitialOffset; // computed from phase
+ SkPath1DPathEffect::Style fStyle; // copied from constructor
+
+ using INHERITED = Sk1DPathEffect;
+};
+
+static bool morphpoints(SkPoint dst[], const SkPoint src[], int count,
+ SkPathMeasure& meas, SkScalar dist) {
+ for (int i = 0; i < count; i++) {
+ SkPoint pos;
+ SkVector tangent;
+
+ SkScalar sx = src[i].fX;
+ SkScalar sy = src[i].fY;
+
+ if (!meas.getPosTan(dist + sx, &pos, &tangent)) {
+ return false;
+ }
+
+ SkMatrix matrix;
+ SkPoint pt;
+
+ pt.set(sx, sy);
+ matrix.setSinCos(tangent.fY, tangent.fX, 0, 0);
+ matrix.preTranslate(-sx, 0);
+ matrix.postTranslate(pos.fX, pos.fY);
+ matrix.mapPoints(&dst[i], &pt, 1);
+ }
+ return true;
+}
+
+/* TODO
+
+Need differentially more subdivisions when the follow-path is curvy. Not sure how to
+determine that, but we need it. I guess a cheap answer is let the caller tell us,
+but that seems like a cop-out. Another answer is to get Rob Johnson to figure it out.
+*/
+static void morphpath(SkPath* dst, const SkPath& src, SkPathMeasure& meas,
+ SkScalar dist) {
+ SkPath::Iter iter(src, false);
+ SkPoint srcP[4], dstP[3];
+ SkPath::Verb verb;
+
+ while ((verb = iter.next(srcP)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ if (morphpoints(dstP, srcP, 1, meas, dist)) {
+ dst->moveTo(dstP[0]);
+ }
+ break;
+ case SkPath::kLine_Verb:
+ srcP[2] = srcP[1];
+ srcP[1].set(SkScalarAve(srcP[0].fX, srcP[2].fX),
+ SkScalarAve(srcP[0].fY, srcP[2].fY));
+ [[fallthrough]];
+ case SkPath::kQuad_Verb:
+ if (morphpoints(dstP, &srcP[1], 2, meas, dist)) {
+ dst->quadTo(dstP[0], dstP[1]);
+ }
+ break;
+ case SkPath::kConic_Verb:
+ if (morphpoints(dstP, &srcP[1], 2, meas, dist)) {
+ dst->conicTo(dstP[0], dstP[1], iter.conicWeight());
+ }
+ break;
+ case SkPath::kCubic_Verb:
+ if (morphpoints(dstP, &srcP[1], 3, meas, dist)) {
+ dst->cubicTo(dstP[0], dstP[1], dstP[2]);
+ }
+ break;
+ case SkPath::kClose_Verb:
+ dst->close();
+ break;
+ default:
+ SkDEBUGFAIL("unknown verb");
+ break;
+ }
+ }
+}
+
+SkScalar SkPath1DPathEffectImpl::next(SkPath* dst, SkScalar distance,
+ SkPathMeasure& meas) const {
+#if defined(SK_BUILD_FOR_FUZZER)
+ if (dst->countPoints() > 100000) {
+ return fAdvance;
+ }
+#endif
+ switch (fStyle) {
+ case SkPath1DPathEffect::kTranslate_Style: {
+ SkPoint pos;
+ if (meas.getPosTan(distance, &pos, nullptr)) {
+ dst->addPath(fPath, pos.fX, pos.fY);
+ }
+ } break;
+ case SkPath1DPathEffect::kRotate_Style: {
+ SkMatrix matrix;
+ if (meas.getMatrix(distance, &matrix)) {
+ dst->addPath(fPath, matrix);
+ }
+ } break;
+ case SkPath1DPathEffect::kMorph_Style:
+ morphpath(dst, fPath, meas, distance);
+ break;
+ }
+ return fAdvance;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkPathEffect> SkPath1DPathEffect::Make(const SkPath& path, SkScalar advance, SkScalar phase,
+ Style style) {
+ if (advance <= 0 || !SkScalarIsFinite(advance) || !SkScalarIsFinite(phase) || path.isEmpty()) {
+ return nullptr;
+ }
+ return sk_sp<SkPathEffect>(new SkPath1DPathEffectImpl(path, advance, phase, style));
+}
+
+void SkPath1DPathEffect::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkPath1DPathEffectImpl);
+}
diff --git a/gfx/skia/skia/src/effects/Sk2DPathEffect.cpp b/gfx/skia/skia/src/effects/Sk2DPathEffect.cpp
new file mode 100644
index 0000000000..5cb41549d3
--- /dev/null
+++ b/gfx/skia/skia/src/effects/Sk2DPathEffect.cpp
@@ -0,0 +1,223 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/Sk2DPathEffect.h"
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkRegion.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/core/SkTypes.h"
+#include "src/core/SkPathEffectBase.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+class Sk2DPathEffect : public SkPathEffectBase {
+public:
+ Sk2DPathEffect(const SkMatrix& mat) : fMatrix(mat) {
+ // Calling invert will set the type mask on both matrices, making them thread safe.
+ fMatrixIsInvertible = fMatrix.invert(&fInverse);
+ }
+
+protected:
+ /** New virtual, to be overridden by subclasses.
+ This is called once from filterPath, and provides the
+ uv parameter bounds for the path. Subsequent calls to
+ next() will receive u and v values within these bounds,
+ and then a call to end() will signal the end of processing.
+ */
+ virtual void begin(const SkIRect& uvBounds, SkPath* dst) const {}
+ virtual void next(const SkPoint& loc, int u, int v, SkPath* dst) const {}
+ virtual void end(SkPath* dst) const {}
+
+ /** Low-level virtual called per span of locations in the u-direction.
+ The default implementation calls next() repeatedly with each
+ location.
+ */
+ virtual void nextSpan(int x, int y, int ucount, SkPath* path) const {
+ if (!fMatrixIsInvertible) {
+ return;
+ }
+ #if defined(SK_BUILD_FOR_FUZZER)
+ if (ucount > 100) {
+ return;
+ }
+ #endif
+
+ const SkMatrix& mat = this->getMatrix();
+ SkPoint src, dst;
+
+ src.set(SkIntToScalar(x) + SK_ScalarHalf, SkIntToScalar(y) + SK_ScalarHalf);
+ do {
+ mat.mapPoints(&dst, &src, 1);
+ this->next(dst, x++, y, path);
+ src.fX += SK_Scalar1;
+ } while (--ucount > 0);
+ }
+
+ const SkMatrix& getMatrix() const { return fMatrix; }
+
+ void flatten(SkWriteBuffer& buffer) const override {
+ buffer.writeMatrix(fMatrix);
+ }
+
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cullRect, const SkMatrix&) const override {
+ if (!fMatrixIsInvertible) {
+ return false;
+ }
+
+ SkPath tmp;
+ SkIRect ir;
+
+ src.transform(fInverse, &tmp);
+ tmp.getBounds().round(&ir);
+ if (!ir.isEmpty()) {
+ this->begin(ir, dst);
+
+ SkRegion rgn;
+ rgn.setPath(tmp, SkRegion(ir));
+ SkRegion::Iterator iter(rgn);
+ for (; !iter.done(); iter.next()) {
+ const SkIRect& rect = iter.rect();
+#if defined(SK_BUILD_FOR_FUZZER)
+ if (rect.height() > 100) {
+ continue;
+ }
+#endif
+ for (int y = rect.fTop; y < rect.fBottom; ++y) {
+ this->nextSpan(rect.fLeft, y, rect.width(), dst);
+ }
+ }
+
+ this->end(dst);
+ }
+ return true;
+ }
+
+private:
+ SkMatrix fMatrix, fInverse;
+ bool fMatrixIsInvertible;
+
+ // For simplicity, assume fast bounds cannot be computed
+ bool computeFastBounds(SkRect*) const override { return false; }
+
+ friend class Sk2DPathEffectBlitter;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkLine2DPathEffectImpl : public Sk2DPathEffect {
+public:
+ SkLine2DPathEffectImpl(SkScalar width, const SkMatrix& matrix)
+ : Sk2DPathEffect(matrix)
+ , fWidth(width)
+ {
+ SkASSERT(width >= 0);
+ }
+
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cullRect, const SkMatrix& ctm) const override {
+ if (this->INHERITED::onFilterPath(dst, src, rec, cullRect, ctm)) {
+ rec->setStrokeStyle(fWidth);
+ return true;
+ }
+ return false;
+ }
+
+ void nextSpan(int u, int v, int ucount, SkPath* dst) const override {
+ if (ucount > 1) {
+ SkPoint src[2], dstP[2];
+
+ src[0].set(SkIntToScalar(u) + SK_ScalarHalf, SkIntToScalar(v) + SK_ScalarHalf);
+ src[1].set(SkIntToScalar(u+ucount) + SK_ScalarHalf, SkIntToScalar(v) + SK_ScalarHalf);
+ this->getMatrix().mapPoints(dstP, src, 2);
+
+ dst->moveTo(dstP[0]);
+ dst->lineTo(dstP[1]);
+ }
+ }
+
+ static sk_sp<SkFlattenable> CreateProc(SkReadBuffer& buffer) {
+ SkMatrix matrix;
+ buffer.readMatrix(&matrix);
+ SkScalar width = buffer.readScalar();
+ return SkLine2DPathEffect::Make(width, matrix);
+ }
+
+ void flatten(SkWriteBuffer &buffer) const override {
+ buffer.writeMatrix(this->getMatrix());
+ buffer.writeScalar(fWidth);
+ }
+
+ Factory getFactory() const override { return CreateProc; }
+ const char* getTypeName() const override { return "SkLine2DPathEffect"; }
+
+private:
+ SkScalar fWidth;
+
+ using INHERITED = Sk2DPathEffect;
+};
+
+/////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SK_API SkPath2DPathEffectImpl : public Sk2DPathEffect {
+public:
+ SkPath2DPathEffectImpl(const SkMatrix& m, const SkPath& p) : INHERITED(m), fPath(p) {}
+
+ void next(const SkPoint& loc, int u, int v, SkPath* dst) const override {
+ dst->addPath(fPath, loc.fX, loc.fY);
+ }
+
+ static sk_sp<SkFlattenable> CreateProc(SkReadBuffer& buffer) {
+ SkMatrix matrix;
+ buffer.readMatrix(&matrix);
+ SkPath path;
+ buffer.readPath(&path);
+ return SkPath2DPathEffect::Make(matrix, path);
+ }
+
+ void flatten(SkWriteBuffer& buffer) const override {
+ buffer.writeMatrix(this->getMatrix());
+ buffer.writePath(fPath);
+ }
+
+ Factory getFactory() const override { return CreateProc; }
+ const char* getTypeName() const override { return "SkPath2DPathEffect"; }
+
+private:
+ SkPath fPath;
+
+ using INHERITED = Sk2DPathEffect;
+};
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkPathEffect> SkLine2DPathEffect::Make(SkScalar width, const SkMatrix& matrix) {
+ if (!(width >= 0)) {
+ return nullptr;
+ }
+ return sk_sp<SkPathEffect>(new SkLine2DPathEffectImpl(width, matrix));
+}
+
+sk_sp<SkPathEffect> SkPath2DPathEffect::Make(const SkMatrix& matrix, const SkPath& path) {
+ return sk_sp<SkPathEffect>(new SkPath2DPathEffectImpl(matrix, path));
+}
+
+void SkLine2DPathEffect::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkLine2DPathEffectImpl);
+}
+
+void SkPath2DPathEffect::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkPath2DPathEffectImpl);
+}
diff --git a/gfx/skia/skia/src/effects/SkBlenders.cpp b/gfx/skia/skia/src/effects/SkBlenders.cpp
new file mode 100644
index 0000000000..ebd43d05b8
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkBlenders.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBlender.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+#include "include/effects/SkBlenders.h"
+
+#ifdef SK_ENABLE_SKSL
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkData.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkString.h"
+#include "include/effects/SkRuntimeEffect.h"
+
+sk_sp<SkBlender> SkBlenders::Arithmetic(float k1, float k2, float k3, float k4,
+ bool enforcePremul) {
+ if (!SkScalarIsFinite(k1) ||
+ !SkScalarIsFinite(k2) ||
+ !SkScalarIsFinite(k3) ||
+ !SkScalarIsFinite(k4)) {
+ return nullptr;
+ }
+
+ // Are we nearly a SkBlendMode?
+ const struct {
+ float k1, k2, k3, k4;
+ SkBlendMode mode;
+ } table[] = {
+ { 0, 1, 0, 0, SkBlendMode::kSrc },
+ { 0, 0, 1, 0, SkBlendMode::kDst },
+ { 0, 0, 0, 0, SkBlendMode::kClear },
+ };
+ for (const auto& t : table) {
+ if (SkScalarNearlyEqual(k1, t.k1) &&
+ SkScalarNearlyEqual(k2, t.k2) &&
+ SkScalarNearlyEqual(k3, t.k3) &&
+ SkScalarNearlyEqual(k4, t.k4)) {
+ return SkBlender::Mode(t.mode);
+ }
+ }
+
+ // If we get here, we need the actual blender effect.
+
+ static SkRuntimeEffect* gArithmeticEffect = []{
+ const char prog[] =
+ "uniform half4 k;"
+ "uniform half pmClamp;"
+
+ "half4 main(half4 src, half4 dst) {"
+ "half4 c = k.x * src * dst + k.y * src + k.z * dst + k.w;"
+ "c.rgb = min(c.rgb, max(c.a, pmClamp));"
+ // rely on skia to saturate our alpha
+ "return c;"
+ "}"
+ ;
+ auto result = SkRuntimeEffect::MakeForBlender(SkString(prog));
+ SkASSERTF(result.effect, "SkBlenders::Arithmetic: %s", result.errorText.c_str());
+ return result.effect.release();
+ }();
+
+ const float array[] = {
+ k1, k2, k3, k4,
+ enforcePremul ? 0.0f : 1.0f,
+ };
+ return gArithmeticEffect->makeBlender(SkData::MakeWithCopy(array, sizeof(array)));
+}
+#else // SK_ENABLE_SKSL
+sk_sp<SkBlender> SkBlenders::Arithmetic(float k1, float k2, float k3, float k4,
+ bool enforcePremul) {
+ // TODO(skia:12197)
+ return nullptr;
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkColorMatrix.cpp b/gfx/skia/skia/src/effects/SkColorMatrix.cpp
new file mode 100644
index 0000000000..e9ed2a509a
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkColorMatrix.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkColorMatrix.h"
+#include "src/core/SkYUVMath.h"
+
+enum SkYUVColorSpace : int;
+
+SkColorMatrix SkColorMatrix::RGBtoYUV(SkYUVColorSpace cs) {
+ SkColorMatrix m;
+ SkColorMatrix_RGB2YUV(cs, m.fMat.data());
+ return m;
+}
+
+SkColorMatrix SkColorMatrix::YUVtoRGB(SkYUVColorSpace cs) {
+ SkColorMatrix m;
+ SkColorMatrix_YUV2RGB(cs, m.fMat.data());
+ return m;
+}
+
+enum {
+ kR_Scale = 0,
+ kG_Scale = 6,
+ kB_Scale = 12,
+ kA_Scale = 18,
+
+ kR_Trans = 4,
+ kG_Trans = 9,
+ kB_Trans = 14,
+ kA_Trans = 19,
+};
+
+static void set_concat(float result[20], const float outer[20], const float inner[20]) {
+ float tmp[20];
+ float* target;
+
+ if (outer == result || inner == result) {
+ target = tmp; // will memcpy answer when we're done into result
+ } else {
+ target = result;
+ }
+
+ int index = 0;
+ for (int j = 0; j < 20; j += 5) {
+ for (int i = 0; i < 4; i++) {
+ target[index++] = outer[j + 0] * inner[i + 0] +
+ outer[j + 1] * inner[i + 5] +
+ outer[j + 2] * inner[i + 10] +
+ outer[j + 3] * inner[i + 15];
+ }
+ target[index++] = outer[j + 0] * inner[4] +
+ outer[j + 1] * inner[9] +
+ outer[j + 2] * inner[14] +
+ outer[j + 3] * inner[19] +
+ outer[j + 4];
+ }
+
+ if (target != result) {
+ std::copy_n(target, 20, result);
+ }
+}
+
+void SkColorMatrix::setIdentity() {
+ fMat.fill(0.0f);
+ fMat[kR_Scale] = fMat[kG_Scale] = fMat[kB_Scale] = fMat[kA_Scale] = 1;
+}
+
+void SkColorMatrix::setScale(float rScale, float gScale, float bScale, float aScale) {
+ fMat.fill(0.0f);
+ fMat[kR_Scale] = rScale;
+ fMat[kG_Scale] = gScale;
+ fMat[kB_Scale] = bScale;
+ fMat[kA_Scale] = aScale;
+}
+
+void SkColorMatrix::postTranslate(float dr, float dg, float db, float da) {
+ fMat[kR_Trans] += dr;
+ fMat[kG_Trans] += dg;
+ fMat[kB_Trans] += db;
+ fMat[kA_Trans] += da;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkColorMatrix::setConcat(const SkColorMatrix& matA, const SkColorMatrix& matB) {
+ set_concat(fMat.data(), matA.fMat.data(), matB.fMat.data());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void setrow(float row[], float r, float g, float b) {
+ row[0] = r;
+ row[1] = g;
+ row[2] = b;
+}
+
+static const float kHueR = 0.213f;
+static const float kHueG = 0.715f;
+static const float kHueB = 0.072f;
+
+void SkColorMatrix::setSaturation(float sat) {
+ fMat.fill(0.0f);
+
+ const float R = kHueR * (1 - sat);
+ const float G = kHueG * (1 - sat);
+ const float B = kHueB * (1 - sat);
+
+ setrow(fMat.data() + 0, R + sat, G, B);
+ setrow(fMat.data() + 5, R, G + sat, B);
+ setrow(fMat.data() + 10, R, G, B + sat);
+ fMat[kA_Scale] = 1;
+}
diff --git a/gfx/skia/skia/src/effects/SkColorMatrixFilter.cpp b/gfx/skia/skia/src/effects/SkColorMatrixFilter.cpp
new file mode 100644
index 0000000000..ea2f47c4a5
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkColorMatrixFilter.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/effects/SkColorMatrix.h"
+#include "include/private/base/SkCPUTypes.h"
+
+static SkScalar byte_to_unit_float(U8CPU byte) {
+ if (0xFF == byte) {
+ // want to get this exact
+ return 1;
+ } else {
+ return byte * 0.00392156862745f;
+ }
+}
+
+sk_sp<SkColorFilter> SkColorFilters::Lighting(SkColor mul, SkColor add) {
+ const SkColor opaqueAlphaMask = SK_ColorBLACK;
+ // omit the alpha and compare only the RGB values
+ if (0 == (add & ~opaqueAlphaMask)) {
+ return SkColorFilters::Blend(mul | opaqueAlphaMask, SkBlendMode::kModulate);
+ }
+
+ SkColorMatrix matrix;
+ matrix.setScale(byte_to_unit_float(SkColorGetR(mul)),
+ byte_to_unit_float(SkColorGetG(mul)),
+ byte_to_unit_float(SkColorGetB(mul)),
+ 1);
+ matrix.postTranslate(byte_to_unit_float(SkColorGetR(add)),
+ byte_to_unit_float(SkColorGetG(add)),
+ byte_to_unit_float(SkColorGetB(add)),
+ 0);
+ return SkColorFilters::Matrix(matrix);
+}
diff --git a/gfx/skia/skia/src/effects/SkCornerPathEffect.cpp b/gfx/skia/skia/src/effects/SkCornerPathEffect.cpp
new file mode 100644
index 0000000000..e19c6c6f74
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkCornerPathEffect.cpp
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkCornerPathEffect.h"
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "src/core/SkPathEffectBase.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+class SkMatrix;
+class SkStrokeRec;
+struct SkRect;
+
+static bool ComputeStep(const SkPoint& a, const SkPoint& b, SkScalar radius,
+ SkPoint* step) {
+ SkScalar dist = SkPoint::Distance(a, b);
+
+ *step = b - a;
+ if (dist <= radius * 2) {
+ *step *= SK_ScalarHalf;
+ return false;
+ } else {
+ *step *= radius / dist;
+ return true;
+ }
+}
+
+class SkCornerPathEffectImpl : public SkPathEffectBase {
+public:
+ explicit SkCornerPathEffectImpl(SkScalar radius) : fRadius(radius) {
+ SkASSERT(radius > 0);
+ }
+
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*,
+ const SkMatrix&) const override {
+ if (fRadius <= 0) {
+ return false;
+ }
+
+ SkPath::Iter iter(src, false);
+ SkPath::Verb verb, prevVerb = SkPath::kDone_Verb;
+ SkPoint pts[4];
+
+ bool closed;
+ SkPoint moveTo, lastCorner;
+ SkVector firstStep, step;
+ bool prevIsValid = true;
+
+ // to avoid warnings
+ step.set(0, 0);
+ moveTo.set(0, 0);
+ firstStep.set(0, 0);
+ lastCorner.set(0, 0);
+
+ for (;;) {
+ switch (verb = iter.next(pts)) {
+ case SkPath::kMove_Verb:
+ // close out the previous (open) contour
+ if (SkPath::kLine_Verb == prevVerb) {
+ dst->lineTo(lastCorner);
+ }
+ closed = iter.isClosedContour();
+ if (closed) {
+ moveTo = pts[0];
+ prevIsValid = false;
+ } else {
+ dst->moveTo(pts[0]);
+ prevIsValid = true;
+ }
+ break;
+ case SkPath::kLine_Verb: {
+ bool drawSegment = ComputeStep(pts[0], pts[1], fRadius, &step);
+ // prev corner
+ if (!prevIsValid) {
+ dst->moveTo(moveTo + step);
+ prevIsValid = true;
+ } else {
+ dst->quadTo(pts[0].fX, pts[0].fY, pts[0].fX + step.fX,
+ pts[0].fY + step.fY);
+ }
+ if (drawSegment) {
+ dst->lineTo(pts[1].fX - step.fX, pts[1].fY - step.fY);
+ }
+ lastCorner = pts[1];
+ prevIsValid = true;
+ break;
+ }
+ case SkPath::kQuad_Verb:
+ // TBD - just replicate the curve for now
+ if (!prevIsValid) {
+ dst->moveTo(pts[0]);
+ prevIsValid = true;
+ }
+ dst->quadTo(pts[1], pts[2]);
+ lastCorner = pts[2];
+ firstStep.set(0, 0);
+ break;
+ case SkPath::kConic_Verb:
+ // TBD - just replicate the curve for now
+ if (!prevIsValid) {
+ dst->moveTo(pts[0]);
+ prevIsValid = true;
+ }
+ dst->conicTo(pts[1], pts[2], iter.conicWeight());
+ lastCorner = pts[2];
+ firstStep.set(0, 0);
+ break;
+ case SkPath::kCubic_Verb:
+ if (!prevIsValid) {
+ dst->moveTo(pts[0]);
+ prevIsValid = true;
+ }
+ // TBD - just replicate the curve for now
+ dst->cubicTo(pts[1], pts[2], pts[3]);
+ lastCorner = pts[3];
+ firstStep.set(0, 0);
+ break;
+ case SkPath::kClose_Verb:
+ if (firstStep.fX || firstStep.fY) {
+ dst->quadTo(lastCorner.fX, lastCorner.fY,
+ lastCorner.fX + firstStep.fX,
+ lastCorner.fY + firstStep.fY);
+ }
+ dst->close();
+ prevIsValid = false;
+ break;
+ case SkPath::kDone_Verb:
+ if (prevIsValid) {
+ dst->lineTo(lastCorner);
+ }
+ return true;
+ default:
+ SkDEBUGFAIL("default should not be reached");
+ return false;
+ }
+
+ if (SkPath::kMove_Verb == prevVerb) {
+ firstStep = step;
+ }
+ prevVerb = verb;
+ }
+ }
+
+ bool computeFastBounds(SkRect*) const override {
+ // Rounding sharp corners within a path produces a new path that is still contained within
+ // the original's bounds, so leave 'bounds' unmodified.
+ return true;
+ }
+
+ static sk_sp<SkFlattenable> CreateProc(SkReadBuffer& buffer) {
+ return SkCornerPathEffect::Make(buffer.readScalar());
+ }
+
+ void flatten(SkWriteBuffer& buffer) const override {
+ buffer.writeScalar(fRadius);
+ }
+
+ Factory getFactory() const override { return CreateProc; }
+ const char* getTypeName() const override { return "SkCornerPathEffect"; }
+
+private:
+ const SkScalar fRadius;
+
+ using INHERITED = SkPathEffectBase;
+};
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkPathEffect> SkCornerPathEffect::Make(SkScalar radius) {
+ return SkScalarIsFinite(radius) && (radius > 0) ?
+ sk_sp<SkPathEffect>(new SkCornerPathEffectImpl(radius)) : nullptr;
+}
+
+void SkCornerPathEffect::RegisterFlattenables() {
+ SkFlattenable::Register("SkCornerPathEffect", SkCornerPathEffectImpl::CreateProc);
+}
diff --git a/gfx/skia/skia/src/effects/SkDashImpl.h b/gfx/skia/skia/src/effects/SkDashImpl.h
new file mode 100644
index 0000000000..8439ca0f19
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkDashImpl.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDashImpl_DEFINED
+#define SkDashImpl_DEFINED
+
+#include "src/core/SkPathEffectBase.h"
+
+class SkDashImpl : public SkPathEffectBase {
+public:
+ SkDashImpl(const SkScalar intervals[], int count, SkScalar phase);
+
+protected:
+ ~SkDashImpl() override;
+ void flatten(SkWriteBuffer&) const override;
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*,
+ const SkMatrix&) const override;
+
+ bool onAsPoints(PointData* results, const SkPath& src, const SkStrokeRec&, const SkMatrix&,
+ const SkRect*) const override;
+
+ DashType onAsADash(DashInfo* info) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkDashImpl)
+
+ bool computeFastBounds(SkRect* bounds) const override {
+ // Dashing a path returns a subset of the input path so just return true and leave
+ // bounds unmodified
+ return true;
+ }
+
+ SkScalar* fIntervals;
+ int32_t fCount;
+ SkScalar fPhase;
+ // computed from phase
+
+ SkScalar fInitialDashLength;
+ int32_t fInitialDashIndex;
+ SkScalar fIntervalLength;
+
+ using INHERITED = SkPathEffectBase;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/effects/SkDashPathEffect.cpp b/gfx/skia/skia/src/effects/SkDashPathEffect.cpp
new file mode 100644
index 0000000000..49ed5fea1c
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkDashPathEffect.cpp
@@ -0,0 +1,413 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkDashPathEffect.h"
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/private/base/SkAlign.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/effects/SkDashImpl.h"
+#include "src/utils/SkDashPathPriv.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <cstring>
+
+using namespace skia_private;
+
+SkDashImpl::SkDashImpl(const SkScalar intervals[], int count, SkScalar phase)
+ : fPhase(0)
+ , fInitialDashLength(-1)
+ , fInitialDashIndex(0)
+ , fIntervalLength(0) {
+ SkASSERT(intervals);
+ SkASSERT(count > 1 && SkIsAlign2(count));
+
+ fIntervals = (SkScalar*)sk_malloc_throw(sizeof(SkScalar) * count);
+ fCount = count;
+ for (int i = 0; i < count; i++) {
+ fIntervals[i] = intervals[i];
+ }
+
+ // set the internal data members
+ SkDashPath::CalcDashParameters(phase, fIntervals, fCount,
+ &fInitialDashLength, &fInitialDashIndex, &fIntervalLength, &fPhase);
+}
+
+SkDashImpl::~SkDashImpl() {
+ sk_free(fIntervals);
+}
+
+bool SkDashImpl::onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cullRect, const SkMatrix&) const {
+ return SkDashPath::InternalFilter(dst, src, rec, cullRect, fIntervals, fCount,
+ fInitialDashLength, fInitialDashIndex, fIntervalLength,
+ fPhase);
+}
+
+static void outset_for_stroke(SkRect* rect, const SkStrokeRec& rec) {
+ SkScalar radius = SkScalarHalf(rec.getWidth());
+ if (0 == radius) {
+ radius = SK_Scalar1; // hairlines
+ }
+ if (SkPaint::kMiter_Join == rec.getJoin()) {
+ radius *= rec.getMiter();
+ }
+ rect->outset(radius, radius);
+}
+
+// Attempt to trim the line to minimally cover the cull rect (currently
+// only works for horizontal and vertical lines).
+// Return true if processing should continue; false otherwise.
+static bool cull_line(SkPoint* pts, const SkStrokeRec& rec,
+ const SkMatrix& ctm, const SkRect* cullRect,
+ const SkScalar intervalLength) {
+ if (nullptr == cullRect) {
+ SkASSERT(false); // Shouldn't ever occur in practice
+ return false;
+ }
+
+ SkScalar dx = pts[1].x() - pts[0].x();
+ SkScalar dy = pts[1].y() - pts[0].y();
+
+ if ((dx && dy) || (!dx && !dy)) {
+ return false;
+ }
+
+ SkRect bounds = *cullRect;
+ outset_for_stroke(&bounds, rec);
+
+ // cullRect is in device space while pts are in the local coordinate system
+ // defined by the ctm. We want our answer in the local coordinate system.
+
+ SkASSERT(ctm.rectStaysRect());
+ SkMatrix inv;
+ if (!ctm.invert(&inv)) {
+ return false;
+ }
+
+ inv.mapRect(&bounds);
+
+ if (dx) {
+ SkASSERT(dx && !dy);
+ SkScalar minX = pts[0].fX;
+ SkScalar maxX = pts[1].fX;
+
+ if (dx < 0) {
+ using std::swap;
+ swap(minX, maxX);
+ }
+
+ SkASSERT(minX < maxX);
+ if (maxX <= bounds.fLeft || minX >= bounds.fRight) {
+ return false;
+ }
+
+ // Now we actually perform the chop, removing the excess to the left and
+ // right of the bounds (keeping our new line "in phase" with the dash,
+ // hence the (mod intervalLength).
+
+ if (minX < bounds.fLeft) {
+ minX = bounds.fLeft - SkScalarMod(bounds.fLeft - minX, intervalLength);
+ }
+ if (maxX > bounds.fRight) {
+ maxX = bounds.fRight + SkScalarMod(maxX - bounds.fRight, intervalLength);
+ }
+
+ SkASSERT(maxX > minX);
+ if (dx < 0) {
+ using std::swap;
+ swap(minX, maxX);
+ }
+ pts[0].fX = minX;
+ pts[1].fX = maxX;
+ } else {
+ SkASSERT(dy && !dx);
+ SkScalar minY = pts[0].fY;
+ SkScalar maxY = pts[1].fY;
+
+ if (dy < 0) {
+ using std::swap;
+ swap(minY, maxY);
+ }
+
+ SkASSERT(minY < maxY);
+ if (maxY <= bounds.fTop || minY >= bounds.fBottom) {
+ return false;
+ }
+
+ // Now we actually perform the chop, removing the excess to the top and
+ // bottom of the bounds (keeping our new line "in phase" with the dash,
+ // hence the (mod intervalLength).
+
+ if (minY < bounds.fTop) {
+ minY = bounds.fTop - SkScalarMod(bounds.fTop - minY, intervalLength);
+ }
+ if (maxY > bounds.fBottom) {
+ maxY = bounds.fBottom + SkScalarMod(maxY - bounds.fBottom, intervalLength);
+ }
+
+ SkASSERT(maxY > minY);
+ if (dy < 0) {
+ using std::swap;
+ swap(minY, maxY);
+ }
+ pts[0].fY = minY;
+ pts[1].fY = maxY;
+ }
+
+ return true;
+}
+
+// Currently asPoints is more restrictive then it needs to be. In the future
+// we need to:
+// allow kRound_Cap capping (could allow rotations in the matrix with this)
+// allow paths to be returned
+bool SkDashImpl::onAsPoints(PointData* results, const SkPath& src, const SkStrokeRec& rec,
+ const SkMatrix& matrix, const SkRect* cullRect) const {
+ // width < 0 -> fill && width == 0 -> hairline so requiring width > 0 rules both out
+ if (0 >= rec.getWidth()) {
+ return false;
+ }
+
+ // TODO: this next test could be eased up. We could allow any number of
+ // intervals as long as all the ons match and all the offs match.
+ // Additionally, they do not necessarily need to be integers.
+ // We cannot allow arbitrary intervals since we want the returned points
+ // to be uniformly sized.
+ if (fCount != 2 ||
+ !SkScalarNearlyEqual(fIntervals[0], fIntervals[1]) ||
+ !SkScalarIsInt(fIntervals[0]) ||
+ !SkScalarIsInt(fIntervals[1])) {
+ return false;
+ }
+
+ SkPoint pts[2];
+
+ if (!src.isLine(pts)) {
+ return false;
+ }
+
+ // TODO: this test could be eased up to allow circles
+ if (SkPaint::kButt_Cap != rec.getCap()) {
+ return false;
+ }
+
+ // TODO: this test could be eased up for circles. Rotations could be allowed.
+ if (!matrix.rectStaysRect()) {
+ return false;
+ }
+
+ // See if the line can be limited to something plausible.
+ if (!cull_line(pts, rec, matrix, cullRect, fIntervalLength)) {
+ return false;
+ }
+
+ SkScalar length = SkPoint::Distance(pts[1], pts[0]);
+
+ SkVector tangent = pts[1] - pts[0];
+ if (tangent.isZero()) {
+ return false;
+ }
+
+ tangent.scale(SkScalarInvert(length));
+
+ // TODO: make this test for horizontal & vertical lines more robust
+ bool isXAxis = true;
+ if (SkScalarNearlyEqual(SK_Scalar1, tangent.fX) ||
+ SkScalarNearlyEqual(-SK_Scalar1, tangent.fX)) {
+ results->fSize.set(SkScalarHalf(fIntervals[0]), SkScalarHalf(rec.getWidth()));
+ } else if (SkScalarNearlyEqual(SK_Scalar1, tangent.fY) ||
+ SkScalarNearlyEqual(-SK_Scalar1, tangent.fY)) {
+ results->fSize.set(SkScalarHalf(rec.getWidth()), SkScalarHalf(fIntervals[0]));
+ isXAxis = false;
+ } else if (SkPaint::kRound_Cap != rec.getCap()) {
+ // Angled lines don't have axis-aligned boxes.
+ return false;
+ }
+
+ if (results) {
+ results->fFlags = 0;
+ SkScalar clampedInitialDashLength = std::min(length, fInitialDashLength);
+
+ if (SkPaint::kRound_Cap == rec.getCap()) {
+ results->fFlags |= PointData::kCircles_PointFlag;
+ }
+
+ results->fNumPoints = 0;
+ SkScalar len2 = length;
+ if (clampedInitialDashLength > 0 || 0 == fInitialDashIndex) {
+ SkASSERT(len2 >= clampedInitialDashLength);
+ if (0 == fInitialDashIndex) {
+ if (clampedInitialDashLength > 0) {
+ if (clampedInitialDashLength >= fIntervals[0]) {
+ ++results->fNumPoints; // partial first dash
+ }
+ len2 -= clampedInitialDashLength;
+ }
+ len2 -= fIntervals[1]; // also skip first space
+ if (len2 < 0) {
+ len2 = 0;
+ }
+ } else {
+ len2 -= clampedInitialDashLength; // skip initial partial empty
+ }
+ }
+ // Too many midpoints can cause results->fNumPoints to overflow or
+ // otherwise cause the results->fPoints allocation below to OOM.
+ // Cap it to a sane value.
+ SkScalar numIntervals = len2 / fIntervalLength;
+ if (!SkScalarIsFinite(numIntervals) || numIntervals > SkDashPath::kMaxDashCount) {
+ return false;
+ }
+ int numMidPoints = SkScalarFloorToInt(numIntervals);
+ results->fNumPoints += numMidPoints;
+ len2 -= numMidPoints * fIntervalLength;
+ bool partialLast = false;
+ if (len2 > 0) {
+ if (len2 < fIntervals[0]) {
+ partialLast = true;
+ } else {
+ ++numMidPoints;
+ ++results->fNumPoints;
+ }
+ }
+
+ results->fPoints = new SkPoint[results->fNumPoints];
+
+ SkScalar distance = 0;
+ int curPt = 0;
+
+ if (clampedInitialDashLength > 0 || 0 == fInitialDashIndex) {
+ SkASSERT(clampedInitialDashLength <= length);
+
+ if (0 == fInitialDashIndex) {
+ if (clampedInitialDashLength > 0) {
+ // partial first block
+ SkASSERT(SkPaint::kRound_Cap != rec.getCap()); // can't handle partial circles
+ SkScalar x = pts[0].fX + tangent.fX * SkScalarHalf(clampedInitialDashLength);
+ SkScalar y = pts[0].fY + tangent.fY * SkScalarHalf(clampedInitialDashLength);
+ SkScalar halfWidth, halfHeight;
+ if (isXAxis) {
+ halfWidth = SkScalarHalf(clampedInitialDashLength);
+ halfHeight = SkScalarHalf(rec.getWidth());
+ } else {
+ halfWidth = SkScalarHalf(rec.getWidth());
+ halfHeight = SkScalarHalf(clampedInitialDashLength);
+ }
+ if (clampedInitialDashLength < fIntervals[0]) {
+ // This one will not be like the others
+ results->fFirst.addRect(x - halfWidth, y - halfHeight,
+ x + halfWidth, y + halfHeight);
+ } else {
+ SkASSERT(curPt < results->fNumPoints);
+ results->fPoints[curPt].set(x, y);
+ ++curPt;
+ }
+
+ distance += clampedInitialDashLength;
+ }
+
+ distance += fIntervals[1]; // skip over the next blank block too
+ } else {
+ distance += clampedInitialDashLength;
+ }
+ }
+
+ if (0 != numMidPoints) {
+ distance += SkScalarHalf(fIntervals[0]);
+
+ for (int i = 0; i < numMidPoints; ++i) {
+ SkScalar x = pts[0].fX + tangent.fX * distance;
+ SkScalar y = pts[0].fY + tangent.fY * distance;
+
+ SkASSERT(curPt < results->fNumPoints);
+ results->fPoints[curPt].set(x, y);
+ ++curPt;
+
+ distance += fIntervalLength;
+ }
+
+ distance -= SkScalarHalf(fIntervals[0]);
+ }
+
+ if (partialLast) {
+ // partial final block
+ SkASSERT(SkPaint::kRound_Cap != rec.getCap()); // can't handle partial circles
+ SkScalar temp = length - distance;
+ SkASSERT(temp < fIntervals[0]);
+ SkScalar x = pts[0].fX + tangent.fX * (distance + SkScalarHalf(temp));
+ SkScalar y = pts[0].fY + tangent.fY * (distance + SkScalarHalf(temp));
+ SkScalar halfWidth, halfHeight;
+ if (isXAxis) {
+ halfWidth = SkScalarHalf(temp);
+ halfHeight = SkScalarHalf(rec.getWidth());
+ } else {
+ halfWidth = SkScalarHalf(rec.getWidth());
+ halfHeight = SkScalarHalf(temp);
+ }
+ results->fLast.addRect(x - halfWidth, y - halfHeight,
+ x + halfWidth, y + halfHeight);
+ }
+
+ SkASSERT(curPt == results->fNumPoints);
+ }
+
+ return true;
+}
+
+SkPathEffect::DashType SkDashImpl::onAsADash(DashInfo* info) const {
+ if (info) {
+ if (info->fCount >= fCount && info->fIntervals) {
+ memcpy(info->fIntervals, fIntervals, fCount * sizeof(SkScalar));
+ }
+ info->fCount = fCount;
+ info->fPhase = fPhase;
+ }
+ return kDash_DashType;
+}
+
+void SkDashImpl::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeScalar(fPhase);
+ buffer.writeScalarArray(fIntervals, fCount);
+}
+
+sk_sp<SkFlattenable> SkDashImpl::CreateProc(SkReadBuffer& buffer) {
+ const SkScalar phase = buffer.readScalar();
+ uint32_t count = buffer.getArrayCount();
+
+ // Don't allocate gigantic buffers if there's not data for them.
+ if (!buffer.validateCanReadN<SkScalar>(count)) {
+ return nullptr;
+ }
+
+ AutoSTArray<32, SkScalar> intervals(count);
+ if (buffer.readScalarArray(intervals.get(), count)) {
+ return SkDashPathEffect::Make(intervals.get(), SkToInt(count), phase);
+ }
+ return nullptr;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkPathEffect> SkDashPathEffect::Make(const SkScalar intervals[], int count, SkScalar phase) {
+ if (!SkDashPath::ValidDashPath(phase, intervals, count)) {
+ return nullptr;
+ }
+ return sk_sp<SkPathEffect>(new SkDashImpl(intervals, count, phase));
+}
diff --git a/gfx/skia/skia/src/effects/SkDiscretePathEffect.cpp b/gfx/skia/skia/src/effects/SkDiscretePathEffect.cpp
new file mode 100644
index 0000000000..1453d5d7c8
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkDiscretePathEffect.cpp
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkPathMeasure.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/core/SkTypes.h"
+#include "include/effects/SkDiscretePathEffect.h"
+#include "include/private/base/SkFixed.h"
+#include "src/core/SkPathEffectBase.h"
+#include "src/core/SkPointPriv.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <algorithm>
+#include <cstdint>
+
+class SkMatrix;
+
+/** \class LCGRandom
+
+ Utility class that implements pseudo random 32bit numbers using a fast
+ linear equation. Unlike rand(), this class holds its own seed (initially
+ set to 0), so that multiple instances can be used with no side-effects.
+
+ Copied from the original implementation of SkRandom. Only contains the
+ methods used by SkDiscretePathEffect::filterPath, with methods that were
+ not called directly moved to private.
+*/
+class LCGRandom {
+public:
+ LCGRandom(uint32_t seed) : fSeed(seed) {}
+
+ /** Return the next pseudo random number expressed as a SkScalar
+ in the range [-SK_Scalar1..SK_Scalar1).
+ */
+ SkScalar nextSScalar1() { return SkFixedToScalar(this->nextSFixed1()); }
+
+private:
+ /** Return the next pseudo random number as an unsigned 32bit value.
+ */
+ uint32_t nextU() { uint32_t r = fSeed * kMul + kAdd; fSeed = r; return r; }
+
+ /** Return the next pseudo random number as a signed 32bit value.
+ */
+ int32_t nextS() { return (int32_t)this->nextU(); }
+
+ /** Return the next pseudo random number expressed as a signed SkFixed
+ in the range [-SK_Fixed1..SK_Fixed1).
+ */
+ SkFixed nextSFixed1() { return this->nextS() >> 15; }
+
+ // See "Numerical Recipes in C", 1992 page 284 for these constants
+ enum {
+ kMul = 1664525,
+ kAdd = 1013904223
+ };
+ uint32_t fSeed;
+};
+
+static void Perterb(SkPoint* p, const SkVector& tangent, SkScalar scale) {
+ SkVector normal = tangent;
+ SkPointPriv::RotateCCW(&normal);
+ normal.setLength(scale);
+ *p += normal;
+}
+
+class SK_API SkDiscretePathEffectImpl : public SkPathEffectBase {
+public:
+ SkDiscretePathEffectImpl(SkScalar segLength, SkScalar deviation, uint32_t seedAssist)
+ : fSegLength(segLength), fPerterb(deviation), fSeedAssist(seedAssist)
+ {
+ SkASSERT(SkScalarIsFinite(segLength));
+ SkASSERT(SkScalarIsFinite(deviation));
+ SkASSERT(segLength > SK_ScalarNearlyZero);
+ }
+
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect*, const SkMatrix&) const override {
+ bool doFill = rec->isFillStyle();
+
+ SkPathMeasure meas(src, doFill);
+
+ /* Caller may supply their own seed assist, which by default is 0 */
+ uint32_t seed = fSeedAssist ^ SkScalarRoundToInt(meas.getLength());
+
+ LCGRandom rand(seed ^ ((seed << 16) | (seed >> 16)));
+ SkScalar scale = fPerterb;
+ SkPoint p;
+ SkVector v;
+
+ do {
+ SkScalar length = meas.getLength();
+ #if defined(SK_BUILD_FOR_FUZZER)
+ if (length > 1000) {
+ return false;
+ }
+ #endif
+
+ if (fSegLength * (2 + doFill) > length) {
+ meas.getSegment(0, length, dst, true); // to short for us to mangle
+ } else {
+ int n = SkScalarRoundToInt(length / fSegLength);
+ constexpr int kMaxReasonableIterations = 100000;
+ n = std::min(n, kMaxReasonableIterations);
+ SkScalar delta = length / n;
+ SkScalar distance = 0;
+
+ if (meas.isClosed()) {
+ n -= 1;
+ distance += delta/2;
+ }
+
+ if (meas.getPosTan(distance, &p, &v)) {
+ Perterb(&p, v, rand.nextSScalar1() * scale);
+ dst->moveTo(p);
+ }
+ while (--n >= 0) {
+ distance += delta;
+ if (meas.getPosTan(distance, &p, &v)) {
+ Perterb(&p, v, rand.nextSScalar1() * scale);
+ dst->lineTo(p);
+ }
+ }
+ if (meas.isClosed()) {
+ dst->close();
+ }
+ }
+ } while (meas.nextContour());
+ return true;
+ }
+
+ bool computeFastBounds(SkRect* bounds) const override {
+ if (bounds) {
+ SkScalar maxOutset = SkScalarAbs(fPerterb);
+ bounds->outset(maxOutset, maxOutset);
+ }
+ return true;
+ }
+
+ static sk_sp<SkFlattenable> CreateProc(SkReadBuffer& buffer) {
+ SkScalar segLength = buffer.readScalar();
+ SkScalar perterb = buffer.readScalar();
+ uint32_t seed = buffer.readUInt();
+ return SkDiscretePathEffect::Make(segLength, perterb, seed);
+ }
+
+ void flatten(SkWriteBuffer& buffer) const override {
+ buffer.writeScalar(fSegLength);
+ buffer.writeScalar(fPerterb);
+ buffer.writeUInt(fSeedAssist);
+ }
+
+ Factory getFactory() const override { return CreateProc; }
+ const char* getTypeName() const override { return "SkDiscretePathEffect"; }
+
+private:
+ const SkScalar fSegLength,
+ fPerterb;
+ /* Caller-supplied 32 bit seed assist */
+ const uint32_t fSeedAssist;
+
+ using INHERITED = SkPathEffectBase;
+};
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkPathEffect> SkDiscretePathEffect::Make(SkScalar segLength, SkScalar deviation,
+ uint32_t seedAssist) {
+ if (!SkScalarIsFinite(segLength) || !SkScalarIsFinite(deviation)) {
+ return nullptr;
+ }
+ if (segLength <= SK_ScalarNearlyZero) {
+ return nullptr;
+ }
+ return sk_sp<SkPathEffect>(new SkDiscretePathEffectImpl(segLength, deviation, seedAssist));
+}
+
+void SkDiscretePathEffect::RegisterFlattenables() {
+ SkFlattenable::Register("SkDiscretePathEffect", SkDiscretePathEffectImpl::CreateProc);
+}
diff --git a/gfx/skia/skia/src/effects/SkEmbossMask.cpp b/gfx/skia/skia/src/effects/SkEmbossMask.cpp
new file mode 100644
index 0000000000..c2f9a9d18b
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkEmbossMask.cpp
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/effects/SkEmbossMask.h"
+
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkFixed.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkMathPriv.h"
+#include "src/core/SkMask.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+
+static inline int nonzero_to_one(int x) {
+#if 0
+ return x != 0;
+#else
+ return ((unsigned)(x | -x)) >> 31;
+#endif
+}
+
+static inline int neq_to_one(int x, int max) {
+#if 0
+ return x != max;
+#else
+ SkASSERT(x >= 0 && x <= max);
+ return ((unsigned)(x - max)) >> 31;
+#endif
+}
+
+static inline int neq_to_mask(int x, int max) {
+#if 0
+ return -(x != max);
+#else
+ SkASSERT(x >= 0 && x <= max);
+ return (x - max) >> 31;
+#endif
+}
+
+static inline unsigned div255(unsigned x) {
+ SkASSERT(x <= (255*255));
+ return x * ((1 << 24) / 255) >> 24;
+}
+
+#define kDelta 32 // small enough to show off angle differences
+
+void SkEmbossMask::Emboss(SkMask* mask, const SkEmbossMaskFilter::Light& light) {
+ SkASSERT(mask->fFormat == SkMask::k3D_Format);
+
+ int specular = light.fSpecular;
+ int ambient = light.fAmbient;
+ SkFixed lx = SkScalarToFixed(light.fDirection[0]);
+ SkFixed ly = SkScalarToFixed(light.fDirection[1]);
+ SkFixed lz = SkScalarToFixed(light.fDirection[2]);
+ SkFixed lz_dot_nz = lz * kDelta;
+ int lz_dot8 = lz >> 8;
+
+ size_t planeSize = mask->computeImageSize();
+ uint8_t* alpha = mask->fImage;
+ uint8_t* multiply = (uint8_t*)alpha + planeSize;
+ uint8_t* additive = multiply + planeSize;
+
+ int rowBytes = mask->fRowBytes;
+ int maxy = mask->fBounds.height() - 1;
+ int maxx = mask->fBounds.width() - 1;
+
+ int prev_row = 0;
+ for (int y = 0; y <= maxy; y++) {
+ int next_row = neq_to_mask(y, maxy) & rowBytes;
+
+ for (int x = 0; x <= maxx; x++) {
+ int nx = alpha[x + neq_to_one(x, maxx)] - alpha[x - nonzero_to_one(x)];
+ int ny = alpha[x + next_row] - alpha[x - prev_row];
+
+ SkFixed numer = lx * nx + ly * ny + lz_dot_nz;
+ int mul = ambient;
+ int add = 0;
+
+ if (numer > 0) { // preflight when numer/denom will be <= 0
+ int denom = SkSqrt32(nx * nx + ny * ny + kDelta*kDelta);
+ SkFixed dot = numer / denom;
+ dot >>= 8; // now dot is 2^8 instead of 2^16
+ mul = std::min(mul + dot, 255);
+
+ // now for the reflection
+
+ // R = 2 (Light * Normal) Normal - Light
+ // hilite = R * Eye(0, 0, 1)
+
+ int hilite = (2 * dot - lz_dot8) * lz_dot8 >> 8;
+ if (hilite > 0) {
+ // pin hilite to 255, since our fast math is also a little sloppy
+ hilite = std::min(hilite, 255);
+
+ // specular is 4.4
+ // would really like to compute the fractional part of this
+ // and then possibly cache a 256 table for a given specular
+ // value in the light, and just pass that in to this function.
+ add = hilite;
+ for (int i = specular >> 4; i > 0; --i) {
+ add = div255(add * hilite);
+ }
+ }
+ }
+ multiply[x] = SkToU8(mul);
+ additive[x] = SkToU8(add);
+ }
+ alpha += rowBytes;
+ multiply += rowBytes;
+ additive += rowBytes;
+ prev_row = rowBytes;
+ }
+}
diff --git a/gfx/skia/skia/src/effects/SkEmbossMask.h b/gfx/skia/skia/src/effects/SkEmbossMask.h
new file mode 100644
index 0000000000..9731732e0c
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkEmbossMask.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkEmbossMask_DEFINED
+#define SkEmbossMask_DEFINED
+
+#include "src/effects/SkEmbossMaskFilter.h"
+
+struct SkMask;
+
+class SkEmbossMask {
+public:
+ static void Emboss(SkMask* mask, const SkEmbossMaskFilter::Light&);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/effects/SkEmbossMaskFilter.cpp b/gfx/skia/skia/src/effects/SkEmbossMaskFilter.cpp
new file mode 100644
index 0000000000..a42cf083fa
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkEmbossMaskFilter.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/effects/SkEmbossMaskFilter.h"
+
+#include "include/core/SkBlurTypes.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkTypes.h"
+#include "src/core/SkBlurMask.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/effects/SkEmbossMask.h"
+
+#if defined(SK_SUPPORT_LEGACY_EMBOSSMASKFILTER)
+#include "include/effects/SkBlurMaskFilter.h"
+#endif
+
+#include <cstring>
+
+static void normalize3(SkScalar dst[3], const SkScalar src[3]) {
+ SkScalar mag = SkScalarSquare(src[0]) + SkScalarSquare(src[1]) + SkScalarSquare(src[2]);
+ SkScalar scale = SkScalarInvert(SkScalarSqrt(mag));
+
+ for (int i = 0; i < 3; i++) {
+ dst[i] = src[i] * scale;
+ }
+}
+
+sk_sp<SkMaskFilter> SkEmbossMaskFilter::Make(SkScalar blurSigma, const Light& light) {
+ if (!SkScalarIsFinite(blurSigma) || blurSigma <= 0) {
+ return nullptr;
+ }
+
+ Light newLight = light;
+ normalize3(newLight.fDirection, light.fDirection);
+ if (!SkScalarsAreFinite(newLight.fDirection, 3)) {
+ return nullptr;
+ }
+
+ return sk_sp<SkMaskFilter>(new SkEmbossMaskFilter(blurSigma, newLight));
+}
+
+#ifdef SK_SUPPORT_LEGACY_EMBOSSMASKFILTER
+sk_sp<SkMaskFilter> SkBlurMaskFilter::MakeEmboss(SkScalar blurSigma, const SkScalar direction[3],
+ SkScalar ambient, SkScalar specular) {
+ if (direction == nullptr) {
+ return nullptr;
+ }
+
+ SkEmbossMaskFilter::Light light;
+
+ memcpy(light.fDirection, direction, sizeof(light.fDirection));
+ // ambient should be 0...1 as a scalar
+ light.fAmbient = SkUnitScalarClampToByte(ambient);
+ // specular should be 0..15.99 as a scalar
+ static const SkScalar kSpecularMultiplier = SkIntToScalar(255) / 16;
+ light.fSpecular = static_cast<U8CPU>(SkTPin(specular, 0.0f, 16.0f) * kSpecularMultiplier + 0.5);
+
+ return SkEmbossMaskFilter::Make(blurSigma, light);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkEmbossMaskFilter::SkEmbossMaskFilter(SkScalar blurSigma, const Light& light)
+ : fLight(light), fBlurSigma(blurSigma)
+{
+ SkASSERT(fBlurSigma > 0);
+ SkASSERT(SkScalarsAreFinite(fLight.fDirection, 3));
+}
+
+SkMask::Format SkEmbossMaskFilter::getFormat() const {
+ return SkMask::k3D_Format;
+}
+
+bool SkEmbossMaskFilter::filterMask(SkMask* dst, const SkMask& src,
+ const SkMatrix& matrix, SkIPoint* margin) const {
+ if (src.fFormat != SkMask::kA8_Format) {
+ return false;
+ }
+
+ SkScalar sigma = matrix.mapRadius(fBlurSigma);
+
+ if (!SkBlurMask::BoxBlur(dst, src, sigma, kInner_SkBlurStyle)) {
+ return false;
+ }
+
+ dst->fFormat = SkMask::k3D_Format;
+ if (margin) {
+ margin->set(SkScalarCeilToInt(3*sigma), SkScalarCeilToInt(3*sigma));
+ }
+
+ if (src.fImage == nullptr) {
+ return true;
+ }
+
+ // create a larger buffer for the other two channels (should force fBlur to do this for us)
+
+ {
+ uint8_t* alphaPlane = dst->fImage;
+ size_t planeSize = dst->computeImageSize();
+ if (0 == planeSize) {
+ return false; // too big to allocate, abort
+ }
+ dst->fImage = SkMask::AllocImage(planeSize * 3);
+ memcpy(dst->fImage, alphaPlane, planeSize);
+ SkMask::FreeImage(alphaPlane);
+ }
+
+ // run the light direction through the matrix...
+ Light light = fLight;
+ matrix.mapVectors((SkVector*)(void*)light.fDirection,
+ (SkVector*)(void*)fLight.fDirection, 1);
+
+ // now restore the length of the XY component
+ // cast to SkVector so we can call setLength (this double cast silences alias warnings)
+ SkVector* vec = (SkVector*)(void*)light.fDirection;
+ vec->setLength(light.fDirection[0],
+ light.fDirection[1],
+ SkPoint::Length(fLight.fDirection[0], fLight.fDirection[1]));
+
+ SkEmbossMask::Emboss(dst, light);
+
+ // restore original alpha
+ memcpy(dst->fImage, src.fImage, src.computeImageSize());
+
+ return true;
+}
+
+sk_sp<SkFlattenable> SkEmbossMaskFilter::CreateProc(SkReadBuffer& buffer) {
+ Light light;
+ if (buffer.readByteArray(&light, sizeof(Light))) {
+ light.fPad = 0; // for the font-cache lookup to be clean
+ const SkScalar sigma = buffer.readScalar();
+ return Make(sigma, light);
+ }
+ return nullptr;
+}
+
+void SkEmbossMaskFilter::flatten(SkWriteBuffer& buffer) const {
+ Light tmpLight = fLight;
+ tmpLight.fPad = 0; // for the font-cache lookup to be clean
+ buffer.writeByteArray(&tmpLight, sizeof(tmpLight));
+ buffer.writeScalar(fBlurSigma);
+}
diff --git a/gfx/skia/skia/src/effects/SkEmbossMaskFilter.h b/gfx/skia/skia/src/effects/SkEmbossMaskFilter.h
new file mode 100644
index 0000000000..aff67d9140
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkEmbossMaskFilter.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkEmbossMaskFilter_DEFINED
+#define SkEmbossMaskFilter_DEFINED
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkMaskFilterBase.h"
+
+#include <cstdint>
+
+class SkMatrix;
+class SkReadBuffer;
+class SkWriteBuffer;
+struct SkIPoint;
+
+/** \class SkEmbossMaskFilter
+
+ This mask filter creates a 3D emboss look, by specifying a light and blur amount.
+*/
+class SkEmbossMaskFilter : public SkMaskFilterBase {
+public:
+ struct Light {
+ SkScalar fDirection[3]; // x,y,z
+ uint16_t fPad;
+ uint8_t fAmbient;
+ uint8_t fSpecular; // exponent, 4.4 right now
+ };
+
+ static sk_sp<SkMaskFilter> Make(SkScalar blurSigma, const Light& light);
+
+ // overrides from SkMaskFilter
+ // This method is not exported to java.
+ SkMask::Format getFormat() const override;
+ // This method is not exported to java.
+ bool filterMask(SkMask* dst, const SkMask& src, const SkMatrix&,
+ SkIPoint* margin) const override;
+
+protected:
+ SkEmbossMaskFilter(SkScalar blurSigma, const Light& light);
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkEmbossMaskFilter)
+
+ Light fLight;
+ SkScalar fBlurSigma;
+
+ using INHERITED = SkMaskFilter;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/effects/SkHighContrastFilter.cpp b/gfx/skia/skia/src/effects/SkHighContrastFilter.cpp
new file mode 100644
index 0000000000..b663839d22
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkHighContrastFilter.cpp
@@ -0,0 +1,104 @@
+/*
+* Copyright 2017 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "include/effects/SkHighContrastFilter.h"
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+#ifdef SK_ENABLE_SKSL
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkData.h"
+#include "include/core/SkString.h"
+#include "include/effects/SkRuntimeEffect.h"
+#include "include/private/base/SkTPin.h"
+#include "modules/skcms/skcms.h"
+#include "src/core/SkColorFilterPriv.h"
+#include "src/core/SkRuntimeEffectPriv.h"
+
+#include <cfloat>
+
+sk_sp<SkColorFilter> SkHighContrastFilter::Make(const SkHighContrastConfig& config) {
+ if (!config.isValid()) {
+ return nullptr;
+ }
+
+ struct Uniforms { float grayscale, invertStyle, contrast; };
+
+ static constexpr char kHighContrastFilterCode[] =
+ "uniform half grayscale, invertStyle, contrast;"
+
+ // TODO(skia:13540): Investigate using $rgb_to_hsl from sksl_shared instead.
+ "half3 rgb_to_hsl(half3 c) {"
+ "half mx = max(max(c.r,c.g),c.b),"
+ "mn = min(min(c.r,c.g),c.b),"
+ "d = mx-mn,"
+ "invd = 1.0 / d,"
+ "g_lt_b = c.g < c.b ? 6.0 : 0.0;"
+
+ // We'd prefer to write these tests like `mx == c.r`, but on some GPUs max(x,y) is
+ // not always equal to either x or y. So we use long form, c.r >= c.g && c.r >= c.b.
+ "half h = (1/6.0) * (mx == mn ? 0.0 :"
+ /*mx==c.r*/ "c.r >= c.g && c.r >= c.b ? invd * (c.g - c.b) + g_lt_b :"
+ /*mx==c.g*/ "c.g >= c.b ? invd * (c.b - c.r) + 2.0"
+ /*mx==c.b*/ ": invd * (c.r - c.g) + 4.0);"
+ "half sum = mx+mn,"
+ "l = sum * 0.5,"
+ "s = mx == mn ? 0.0"
+ ": d / (l > 0.5 ? 2.0 - sum : sum);"
+ "return half3(h,s,l);"
+ "}"
+ "half4 main(half4 inColor) {"
+ "half4 c = inColor;" // linear unpremul RGBA in dst gamut
+ "if (grayscale == 1) {"
+ "c.rgb = dot(half3(0.2126, 0.7152, 0.0722), c.rgb).rrr;"
+ "}"
+ "if (invertStyle == 1) {" // brightness
+ "c.rgb = 1 - c.rgb;"
+ "} else if (invertStyle == 2) {" // lightness
+ "c.rgb = rgb_to_hsl(c.rgb);"
+ "c.b = 1 - c.b;"
+ "c.rgb = $hsl_to_rgb(c.rgb);"
+ "}"
+ "c.rgb = mix(half3(0.5), c.rgb, contrast);"
+ "return half4(saturate(c.rgb), c.a);"
+ "}";
+
+ static const SkRuntimeEffect* effect = SkMakeCachedRuntimeEffect(
+ SkRuntimeEffect::MakeForColorFilter,
+ SkString(kHighContrastFilterCode)
+ ).release();
+
+ SkASSERT(effect);
+
+ // A contrast setting of exactly +1 would divide by zero (1+c)/(1-c), so pull in to +1-ε.
+ // I'm not exactly sure why we've historically pinned -1 up to -1+ε, maybe just symmetry?
+ float c = SkTPin(config.fContrast,
+ -1.0f + FLT_EPSILON,
+ +1.0f - FLT_EPSILON);
+
+ Uniforms uniforms = {
+ config.fGrayscale ? 1.0f : 0.0f,
+ (float)config.fInvertStyle, // 0.0f for none, 1.0f for brightness, 2.0f for lightness
+ (1+c)/(1-c),
+ };
+
+ skcms_TransferFunction linear = SkNamedTransferFn::kLinear;
+ SkAlphaType unpremul = kUnpremul_SkAlphaType;
+ return SkColorFilterPriv::WithWorkingFormat(
+ effect->makeColorFilter(SkData::MakeWithCopy(&uniforms,sizeof(uniforms))),
+ &linear, nullptr/*use dst gamut*/, &unpremul);
+}
+#else // SK_ENABLE_SKSL
+sk_sp<SkColorFilter> SkHighContrastFilter::Make(const SkHighContrastConfig& config) {
+ // TODO(skia:12197)
+ return nullptr;
+}
+#endif
+
diff --git a/gfx/skia/skia/src/effects/SkLayerDrawLooper.cpp b/gfx/skia/skia/src/effects/SkLayerDrawLooper.cpp
new file mode 100644
index 0000000000..3fafaf00e3
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkLayerDrawLooper.cpp
@@ -0,0 +1,339 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+
+#ifdef SK_SUPPORT_LEGACY_DRAWLOOPER
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkString.h"
+#include "include/core/SkUnPreMultiply.h"
+#include "include/effects/SkBlurDrawLooper.h"
+#include "include/effects/SkLayerDrawLooper.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkBlendModePriv.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkStringUtils.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/core/SkXfermodePriv.h"
+
+SkLayerDrawLooper::LayerInfo::LayerInfo() {
+ fPaintBits = 0; // ignore our paint fields
+ fColorMode = SkBlendMode::kDst; // ignore our color
+ fOffset.set(0, 0);
+ fPostTranslate = false;
+}
+
+SkLayerDrawLooper::SkLayerDrawLooper()
+ : fRecs(nullptr),
+ fCount(0) {
+}
+
+SkLayerDrawLooper::~SkLayerDrawLooper() {
+ Rec* rec = fRecs;
+ while (rec) {
+ Rec* next = rec->fNext;
+ delete rec;
+ rec = next;
+ }
+}
+
+SkLayerDrawLooper::Context*
+SkLayerDrawLooper::makeContext(SkArenaAlloc* alloc) const {
+ return alloc->make<LayerDrawLooperContext>(this);
+}
+
+static SkColor4f xferColor(const SkColor4f& src, const SkColor4f& dst, SkBlendMode mode) {
+ switch (mode) {
+ case SkBlendMode::kSrc:
+ return src;
+ case SkBlendMode::kDst:
+ return dst;
+ default: {
+ SkPMColor4f pmS = src.premul();
+ SkPMColor4f pmD = dst.premul();
+ return SkBlendMode_Apply(mode, pmS, pmD).unpremul();
+ }
+ }
+}
+
+// Even with kEntirePaint_Bits, we always ensure that the base paint's
+// text-encoding is respected, since that controls how we interpret the
+// text/length parameters of a draw[Pos]Text call.
+void SkLayerDrawLooper::LayerDrawLooperContext::ApplyInfo(
+ SkPaint* dst, const SkPaint& src, const LayerInfo& info) {
+ SkColor4f srcColor = src.getColor4f();
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ // The framework may respect the alpha value on the original paint.
+ // Match this legacy behavior.
+ if (src.getAlpha() == 255) {
+ srcColor.fA = dst->getColor4f().fA;
+ }
+#endif
+ dst->setColor4f(xferColor(srcColor, dst->getColor4f(), (SkBlendMode)info.fColorMode),
+ sk_srgb_singleton());
+
+ BitFlags bits = info.fPaintBits;
+
+ if (0 == bits) {
+ return;
+ }
+ if (kEntirePaint_Bits == bits) {
+ // we've already computed these, so save it from the assignment
+ bool aa = dst->isAntiAlias();
+ bool di = dst->isDither();
+ SkColor4f c = dst->getColor4f();
+ *dst = src;
+ dst->setAntiAlias(aa);
+ dst->setDither(di);
+ dst->setColor4f(c, sk_srgb_singleton());
+ return;
+ }
+
+ if (bits & kStyle_Bit) {
+ dst->setStyle(src.getStyle());
+ dst->setStrokeWidth(src.getStrokeWidth());
+ dst->setStrokeMiter(src.getStrokeMiter());
+ dst->setStrokeCap(src.getStrokeCap());
+ dst->setStrokeJoin(src.getStrokeJoin());
+ }
+
+ if (bits & kPathEffect_Bit) {
+ dst->setPathEffect(src.refPathEffect());
+ }
+ if (bits & kMaskFilter_Bit) {
+ dst->setMaskFilter(src.refMaskFilter());
+ }
+ if (bits & kShader_Bit) {
+ dst->setShader(src.refShader());
+ }
+ if (bits & kColorFilter_Bit) {
+ dst->setColorFilter(src.refColorFilter());
+ }
+ if (bits & kXfermode_Bit) {
+ dst->setBlender(src.refBlender());
+ }
+
+ // we don't override these
+#if 0
+ dst->setTypeface(src.getTypeface());
+ dst->setTextSize(src.getTextSize());
+ dst->setTextScaleX(src.getTextScaleX());
+ dst->setRasterizer(src.getRasterizer());
+ dst->setLooper(src.getLooper());
+ dst->setTextEncoding(src.getTextEncoding());
+ dst->setHinting(src.getHinting());
+#endif
+}
+
+SkLayerDrawLooper::LayerDrawLooperContext::LayerDrawLooperContext(
+ const SkLayerDrawLooper* looper) : fCurrRec(looper->fRecs) {}
+
+bool SkLayerDrawLooper::LayerDrawLooperContext::next(Info* info, SkPaint* paint) {
+ if (nullptr == fCurrRec) {
+ return false;
+ }
+
+ ApplyInfo(paint, fCurrRec->fPaint, fCurrRec->fInfo);
+
+ if (info) {
+ info->fTranslate = fCurrRec->fInfo.fOffset;
+ info->fApplyPostCTM = fCurrRec->fInfo.fPostTranslate;
+ }
+ fCurrRec = fCurrRec->fNext;
+ return true;
+}
+
+bool SkLayerDrawLooper::asABlurShadow(BlurShadowRec* bsRec) const {
+ if (fCount != 2) {
+ return false;
+ }
+ const Rec* rec = fRecs;
+
+ // bottom layer needs to be just blur(maskfilter)
+ if ((rec->fInfo.fPaintBits & ~kMaskFilter_Bit)) {
+ return false;
+ }
+ if (SkBlendMode::kSrc != (SkBlendMode)rec->fInfo.fColorMode) {
+ return false;
+ }
+ const SkMaskFilter* mf = rec->fPaint.getMaskFilter();
+ if (nullptr == mf) {
+ return false;
+ }
+ SkMaskFilterBase::BlurRec maskBlur;
+ if (!as_MFB(mf)->asABlur(&maskBlur)) {
+ return false;
+ }
+
+ rec = rec->fNext;
+ // top layer needs to be "plain"
+ if (rec->fInfo.fPaintBits) {
+ return false;
+ }
+ if (SkBlendMode::kDst != (SkBlendMode)rec->fInfo.fColorMode) {
+ return false;
+ }
+ if (!rec->fInfo.fOffset.equals(0, 0)) {
+ return false;
+ }
+
+ if (bsRec) {
+ bsRec->fSigma = maskBlur.fSigma;
+ bsRec->fOffset = fRecs->fInfo.fOffset;
+ // TODO: Update BlurShadowRec to use SkColor4f?
+ bsRec->fColor = fRecs->fPaint.getColor();
+ bsRec->fStyle = maskBlur.fStyle;
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkLayerDrawLooper::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeInt(fCount);
+
+ Rec* rec = fRecs;
+ for (int i = 0; i < fCount; i++) {
+ // Legacy "flagsmask" field -- now ignored, remove when we bump version
+ buffer.writeInt(0);
+
+ buffer.writeInt(rec->fInfo.fPaintBits);
+ buffer.writeInt((int)rec->fInfo.fColorMode);
+ buffer.writePoint(rec->fInfo.fOffset);
+ buffer.writeBool(rec->fInfo.fPostTranslate);
+ buffer.writePaint(rec->fPaint);
+ rec = rec->fNext;
+ }
+}
+
+sk_sp<SkFlattenable> SkLayerDrawLooper::CreateProc(SkReadBuffer& buffer) {
+ int count = buffer.readInt();
+
+#if defined(SK_BUILD_FOR_FUZZER)
+ if (count > 100) {
+ count = 100;
+ }
+#endif
+ Builder builder;
+ for (int i = 0; i < count; i++) {
+ LayerInfo info;
+ // Legacy "flagsmask" field -- now ignored, remove when we bump version
+ (void)buffer.readInt();
+
+ info.fPaintBits = buffer.readInt();
+ info.fColorMode = (SkBlendMode)buffer.readInt();
+ buffer.readPoint(&info.fOffset);
+ info.fPostTranslate = buffer.readBool();
+ *builder.addLayerOnTop(info) = buffer.readPaint();
+ if (!buffer.isValid()) {
+ return nullptr;
+ }
+ }
+ return builder.detach();
+}
+
+SkLayerDrawLooper::Builder::Builder()
+ : fRecs(nullptr),
+ fTopRec(nullptr),
+ fCount(0) {
+}
+
+SkLayerDrawLooper::Builder::~Builder() {
+ Rec* rec = fRecs;
+ while (rec) {
+ Rec* next = rec->fNext;
+ delete rec;
+ rec = next;
+ }
+}
+
+SkPaint* SkLayerDrawLooper::Builder::addLayer(const LayerInfo& info) {
+ fCount += 1;
+
+ Rec* rec = new Rec;
+ rec->fNext = fRecs;
+ rec->fInfo = info;
+ fRecs = rec;
+ if (nullptr == fTopRec) {
+ fTopRec = rec;
+ }
+
+ return &rec->fPaint;
+}
+
+void SkLayerDrawLooper::Builder::addLayer(SkScalar dx, SkScalar dy) {
+ LayerInfo info;
+
+ info.fOffset.set(dx, dy);
+ (void)this->addLayer(info);
+}
+
+SkPaint* SkLayerDrawLooper::Builder::addLayerOnTop(const LayerInfo& info) {
+ fCount += 1;
+
+ Rec* rec = new Rec;
+ rec->fNext = nullptr;
+ rec->fInfo = info;
+ if (nullptr == fRecs) {
+ fRecs = rec;
+ } else {
+ SkASSERT(fTopRec);
+ fTopRec->fNext = rec;
+ }
+ fTopRec = rec;
+
+ return &rec->fPaint;
+}
+
+sk_sp<SkDrawLooper> SkLayerDrawLooper::Builder::detach() {
+ SkLayerDrawLooper* looper = new SkLayerDrawLooper;
+ looper->fCount = fCount;
+ looper->fRecs = fRecs;
+
+ fCount = 0;
+ fRecs = nullptr;
+ fTopRec = nullptr;
+
+ return sk_sp<SkDrawLooper>(looper);
+}
+
+sk_sp<SkDrawLooper> SkBlurDrawLooper::Make(SkColor color, SkScalar sigma, SkScalar dx, SkScalar dy)
+{
+ return Make(SkColor4f::FromColor(color), sk_srgb_singleton(), sigma, dx, dy);
+}
+
+sk_sp<SkDrawLooper> SkBlurDrawLooper::Make(SkColor4f color, SkColorSpace* cs,
+ SkScalar sigma, SkScalar dx, SkScalar dy)
+{
+ sk_sp<SkMaskFilter> blur = nullptr;
+ if (sigma > 0.0f) {
+ blur = SkMaskFilter::MakeBlur(kNormal_SkBlurStyle, sigma, true);
+ }
+
+ SkLayerDrawLooper::Builder builder;
+
+ // First layer
+ SkLayerDrawLooper::LayerInfo defaultLayer;
+ builder.addLayer(defaultLayer);
+
+ // Blur layer
+ SkLayerDrawLooper::LayerInfo blurInfo;
+ blurInfo.fColorMode = SkBlendMode::kSrc;
+ blurInfo.fPaintBits = SkLayerDrawLooper::kMaskFilter_Bit;
+ blurInfo.fOffset = SkVector::Make(dx, dy);
+ SkPaint* paint = builder.addLayer(blurInfo);
+ paint->setMaskFilter(std::move(blur));
+ paint->setColor4f(color, cs);
+
+ return builder.detach();
+}
+
+#endif
diff --git a/gfx/skia/skia/src/effects/SkLumaColorFilter.cpp b/gfx/skia/skia/src/effects/SkLumaColorFilter.cpp
new file mode 100644
index 0000000000..045200f263
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkLumaColorFilter.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/effects/SkLumaColorFilter.h"
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+class SkColorFilter;
+
+#ifdef SK_ENABLE_SKSL
+#include "include/core/SkData.h"
+#include "include/effects/SkRuntimeEffect.h"
+#include "src/core/SkRuntimeEffectPriv.h"
+
+sk_sp<SkColorFilter> SkLumaColorFilter::Make() {
+
+ static const SkRuntimeEffect* effect = SkMakeCachedRuntimeEffect(
+ SkRuntimeEffect::MakeForColorFilter,
+ "half4 main(half4 inColor) {"
+ "return saturate(dot(half3(0.2126, 0.7152, 0.0722), inColor.rgb)).000r;"
+ "}"
+ ).release();
+ SkASSERT(effect);
+
+ return effect->makeColorFilter(SkData::MakeEmpty());
+}
+#else // SK_ENABLE_SKSL
+sk_sp<SkColorFilter> SkLumaColorFilter::Make() {
+ // TODO(skia:12197)
+ return nullptr;
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkOpPE.h b/gfx/skia/skia/src/effects/SkOpPE.h
new file mode 100644
index 0000000000..11c968fc6d
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkOpPE.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOpPE_DEFINED
+#define SkOpPE_DEFINED
+
+#include "include/pathops/SkPathOps.h"
+#include "src/core/SkPathEffectBase.h"
+
+class SkOpPE : public SkPathEffectBase {
+public:
+ SkOpPE(sk_sp<SkPathEffect> one, sk_sp<SkPathEffect> two, SkPathOp op);
+
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*,
+ const SkMatrix&) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkOpPE)
+
+ bool computeFastBounds(SkRect* bounds) const override;
+
+ sk_sp<SkPathEffect> fOne;
+ sk_sp<SkPathEffect> fTwo;
+ SkPathOp fOp;
+
+ using INHERITED = SkPathEffectBase;
+};
+
+class SkMatrixPE : public SkPathEffectBase {
+public:
+ SkMatrixPE(const SkMatrix&);
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*,
+ const SkMatrix&) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkMatrixPE)
+
+ bool computeFastBounds(SkRect* bounds) const override {
+ if (bounds) {
+ fMatrix.mapRect(bounds);
+ }
+ return true;
+ }
+
+ SkMatrix fMatrix;
+
+ using INHERITED = SkPathEffectBase;
+};
+
+class SkStrokePE : public SkPathEffectBase {
+public:
+ SkStrokePE(SkScalar width, SkPaint::Join, SkPaint::Cap, SkScalar miter);
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*,
+ const SkMatrix&) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkStrokePE)
+
+ bool computeFastBounds(SkRect* bounds) const override;
+
+ SkScalar fWidth,
+ fMiter;
+ SkPaint::Join fJoin;
+ SkPaint::Cap fCap;
+
+ using INHERITED = SkPathEffectBase;
+};
+
+class SkStrokeAndFillPE : public SkPathEffectBase {
+public:
+ SkStrokeAndFillPE() {}
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*,
+ const SkMatrix&) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkStrokeAndFillPE)
+
+ bool computeFastBounds(SkRect* bounds) const override {
+ // The effect's bounds depend on the StrokeRect that is not yet available
+ return false;
+ }
+
+ using INHERITED = SkPathEffectBase;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/effects/SkOpPathEffect.cpp b/gfx/skia/skia/src/effects/SkOpPathEffect.cpp
new file mode 100644
index 0000000000..722f1401ae
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkOpPathEffect.cpp
@@ -0,0 +1,242 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkOpPathEffect.h"
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/core/SkTypes.h"
+#include "include/pathops/SkPathOps.h"
+#include "include/private/base/SkPathEnums.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkPathEffectBase.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/effects/SkOpPE.h"
+
+#include <utility>
+
+sk_sp<SkPathEffect> SkMergePathEffect::Make(sk_sp<SkPathEffect> one, sk_sp<SkPathEffect> two,
+ SkPathOp op) {
+ return sk_sp<SkPathEffect>(new SkOpPE(std::move(one), std::move(two), op));
+}
+
+SkOpPE::SkOpPE(sk_sp<SkPathEffect> one, sk_sp<SkPathEffect> two, SkPathOp op)
+ : fOne(std::move(one)), fTwo(std::move(two)), fOp(op) {}
+
+bool SkOpPE::onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cull, const SkMatrix& ctm) const {
+ SkPath one, two;
+ if (fOne) {
+ if (!fOne->filterPath(&one, src, rec, cull, ctm)) {
+ return false;
+ }
+ } else {
+ one = src;
+ }
+ if (fTwo) {
+ if (!fTwo->filterPath(&two, src, rec, cull, ctm)) {
+ return false;
+ }
+ } else {
+ two = src;
+ }
+ return Op(one, two, fOp, dst);
+}
+
+bool SkOpPE::computeFastBounds(SkRect* bounds) const {
+ if (!bounds) {
+ return (!SkToBool(fOne) || as_PEB(fOne)->computeFastBounds(nullptr)) &&
+ (!SkToBool(fTwo) || as_PEB(fTwo)->computeFastBounds(nullptr));
+ }
+
+ // bounds will hold the result of the fOne while b2 holds the result of fTwo's fast bounds
+ SkRect b2 = *bounds;
+ if (fOne && !as_PEB(fOne)->computeFastBounds(bounds)) {
+ return false;
+ }
+ if (fTwo && !as_PEB(fTwo)->computeFastBounds(&b2)) {
+ return false;
+ }
+
+ switch (fOp) {
+ case SkPathOp::kIntersect_SkPathOp:
+ if (!bounds->intersect(b2)) {
+ bounds->setEmpty();
+ }
+ break;
+ case SkPathOp::kDifference_SkPathOp:
+ // (one - two) conservatively leaves one's bounds unmodified
+ break;
+ case SkPathOp::kReverseDifference_SkPathOp:
+ // (two - one) conservatively leaves two's bounds unmodified
+ *bounds = b2;
+ break;
+ case SkPathOp::kXOR_SkPathOp:
+ // fall through to union since XOR computes a subset of regular OR
+ case SkPathOp::kUnion_SkPathOp:
+ bounds->join(b2);
+ break;
+ }
+
+ return true;
+}
+
+void SkOpPE::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeFlattenable(fOne.get());
+ buffer.writeFlattenable(fTwo.get());
+ buffer.write32(fOp);
+}
+
+sk_sp<SkFlattenable> SkOpPE::CreateProc(SkReadBuffer& buffer) {
+ auto one = buffer.readPathEffect();
+ auto two = buffer.readPathEffect();
+ SkPathOp op = buffer.read32LE(kReverseDifference_SkPathOp);
+ return buffer.isValid() ? SkMergePathEffect::Make(std::move(one), std::move(two), op) : nullptr;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkPathEffect> SkMatrixPathEffect::MakeTranslate(SkScalar dx, SkScalar dy) {
+ if (!SkScalarsAreFinite(dx, dy)) {
+ return nullptr;
+ }
+ return sk_sp<SkPathEffect>(new SkMatrixPE(SkMatrix::Translate(dx, dy)));
+}
+
+sk_sp<SkPathEffect> SkMatrixPathEffect::Make(const SkMatrix& matrix) {
+ if (!matrix.isFinite()) {
+ return nullptr;
+ }
+ return sk_sp<SkPathEffect>(new SkMatrixPE(matrix));
+}
+
+SkMatrixPE::SkMatrixPE(const SkMatrix& matrix) : fMatrix(matrix) {
+ SkASSERT(matrix.isFinite());
+}
+
+bool SkMatrixPE::onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*,
+ const SkMatrix&) const {
+ src.transform(fMatrix, dst);
+ return true;
+}
+
+void SkMatrixPE::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeMatrix(fMatrix);
+}
+
+sk_sp<SkFlattenable> SkMatrixPE::CreateProc(SkReadBuffer& buffer) {
+ SkMatrix mx;
+ buffer.readMatrix(&mx);
+ return buffer.isValid() ? SkMatrixPathEffect::Make(mx) : nullptr;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkPathEffect> SkStrokePathEffect::Make(SkScalar width, SkPaint::Join join, SkPaint::Cap cap,
+ SkScalar miter) {
+ if (!SkScalarsAreFinite(width, miter) || width < 0 || miter < 0) {
+ return nullptr;
+ }
+ return sk_sp<SkPathEffect>(new SkStrokePE(width, join, cap, miter));
+}
+
+SkStrokePE::SkStrokePE(SkScalar width, SkPaint::Join join, SkPaint::Cap cap, SkScalar miter)
+ : fWidth(width), fMiter(miter), fJoin(join), fCap(cap) {}
+
+bool SkStrokePE::onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*,
+ const SkMatrix&) const {
+ SkStrokeRec rec(SkStrokeRec::kFill_InitStyle);
+ rec.setStrokeStyle(fWidth);
+ rec.setStrokeParams(fCap, fJoin, fMiter);
+ return rec.applyToPath(dst, src);
+}
+
+bool SkStrokePE::computeFastBounds(SkRect* bounds) const {
+ if (bounds) {
+ SkStrokeRec rec(SkStrokeRec::kFill_InitStyle);
+ rec.setStrokeStyle(fWidth);
+ rec.setStrokeParams(fCap, fJoin, fMiter);
+ bounds->outset(rec.getInflationRadius(), rec.getInflationRadius());
+ }
+ return true;
+}
+
+void SkStrokePE::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeScalar(fWidth);
+ buffer.writeScalar(fMiter);
+ buffer.write32(fJoin);
+ buffer.write32(fCap);
+}
+
+sk_sp<SkFlattenable> SkStrokePE::CreateProc(SkReadBuffer& buffer) {
+ SkScalar width = buffer.readScalar();
+ SkScalar miter = buffer.readScalar();
+ SkPaint::Join join = buffer.read32LE(SkPaint::kLast_Join);
+ SkPaint::Cap cap = buffer.read32LE(SkPaint::kLast_Cap);
+ return buffer.isValid() ? SkStrokePathEffect::Make(width, join, cap, miter) : nullptr;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include "include/effects/SkStrokeAndFillPathEffect.h"
+#include "src/core/SkPathPriv.h"
+
+sk_sp<SkPathEffect> SkStrokeAndFillPathEffect::Make() {
+ static SkPathEffect* strokeAndFill = new SkStrokeAndFillPE;
+ return sk_ref_sp(strokeAndFill);
+}
+
+void SkStrokeAndFillPE::flatten(SkWriteBuffer&) const {}
+
+static bool known_to_be_opposite_directions(const SkPath& a, const SkPath& b) {
+ auto a_dir = SkPathPriv::ComputeFirstDirection(a),
+ b_dir = SkPathPriv::ComputeFirstDirection(b);
+
+ return (a_dir == SkPathFirstDirection::kCCW &&
+ b_dir == SkPathFirstDirection::kCW)
+ ||
+ (a_dir == SkPathFirstDirection::kCW &&
+ b_dir == SkPathFirstDirection::kCCW);
+}
+
+bool SkStrokeAndFillPE::onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect*, const SkMatrix&) const {
+ // This one is weird, since we exist to allow this paint-style to go away. If we see it,
+ // just let the normal machine run its course.
+ if (rec->getStyle() == SkStrokeRec::kStrokeAndFill_Style) {
+ *dst = src;
+ return true;
+ }
+
+ if (rec->getStyle() == SkStrokeRec::kStroke_Style) {
+ if (!rec->applyToPath(dst, src)) {
+ return false;
+ }
+
+ if (known_to_be_opposite_directions(src, *dst)) {
+ dst->reverseAddPath(src);
+ } else {
+ dst->addPath(src);
+ }
+ } else {
+ *dst = src;
+ }
+ rec->setFillStyle();
+ return true;
+}
+
+sk_sp<SkFlattenable> SkStrokeAndFillPE::CreateProc(SkReadBuffer& buffer) {
+ return SkStrokeAndFillPathEffect::Make();
+}
diff --git a/gfx/skia/skia/src/effects/SkOverdrawColorFilter.cpp b/gfx/skia/skia/src/effects/SkOverdrawColorFilter.cpp
new file mode 100644
index 0000000000..5968ebbcf4
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkOverdrawColorFilter.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkOverdrawColorFilter.h"
+
+#include "include/core/SkColorFilter.h"
+
+#ifdef SK_ENABLE_SKSL
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkData.h"
+#include "include/effects/SkRuntimeEffect.h"
+#include "include/private/SkColorData.h"
+#include "src/core/SkRuntimeEffectPriv.h"
+
+#include <utility>
+
+sk_sp<SkColorFilter> SkOverdrawColorFilter::MakeWithSkColors(const SkColor colors[kNumColors]) {
+ static const SkRuntimeEffect* effect = SkMakeCachedRuntimeEffect(
+ SkRuntimeEffect::MakeForColorFilter,
+ "uniform half4 color0;"
+ "uniform half4 color1;"
+ "uniform half4 color2;"
+ "uniform half4 color3;"
+ "uniform half4 color4;"
+ "uniform half4 color5;"
+
+ "half4 main(half4 color) {"
+ "half alpha = 255.0 * color.a;"
+ "color = alpha < 0.5 ? color0"
+ ": alpha < 1.5 ? color1"
+ ": alpha < 2.5 ? color2"
+ ": alpha < 3.5 ? color3"
+ ": alpha < 4.5 ? color4 : color5;"
+ "return color;"
+ "}"
+ ).release();
+
+ if (effect) {
+ auto data = SkData::MakeUninitialized(kNumColors * sizeof(SkPMColor4f));
+ SkPMColor4f* premul = (SkPMColor4f*)data->writable_data();
+ for (int i = 0; i < kNumColors; ++i) {
+ premul[i] = SkColor4f::FromColor(colors[i]).premul();
+ }
+ return effect->makeColorFilter(std::move(data));
+ }
+ return nullptr;
+}
+#else // SK_ENABLE_SKSL
+sk_sp<SkColorFilter> SkOverdrawColorFilter::MakeWithSkColors(const SkColor colors[kNumColors]) {
+ // TODO(skia:12197)
+ return nullptr;
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/SkShaderMaskFilter.cpp b/gfx/skia/skia/src/effects/SkShaderMaskFilter.cpp
new file mode 100644
index 0000000000..d4b34f0b22
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkShaderMaskFilter.cpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkShaderMaskFilter.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkShader.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/SkShaderBase.h"
+
+#include <cstdint>
+#include <cstring>
+#include <memory>
+#include <utility>
+
+class SkMatrix;
+
+#if defined(SK_GANESH)
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+struct GrFPArgs;
+#endif
+
+class SkShaderMF : public SkMaskFilterBase {
+public:
+ SkShaderMF(sk_sp<SkShader> shader) : fShader(std::move(shader)) {}
+
+ SkMask::Format getFormat() const override { return SkMask::kA8_Format; }
+
+ bool filterMask(SkMask* dst, const SkMask& src, const SkMatrix&,
+ SkIPoint* margin) const override;
+
+ void computeFastBounds(const SkRect& src, SkRect* dst) const override {
+ *dst = src;
+ }
+
+ bool asABlur(BlurRec*) const override { return false; }
+
+protected:
+#if defined(SK_GANESH)
+ std::unique_ptr<GrFragmentProcessor> onAsFragmentProcessor(const GrFPArgs&,
+ const MatrixRec&) const override;
+ bool onHasFragmentProcessor() const override;
+#endif
+
+private:
+ SK_FLATTENABLE_HOOKS(SkShaderMF)
+
+ sk_sp<SkShader> fShader;
+
+ SkShaderMF(SkReadBuffer&);
+ void flatten(SkWriteBuffer&) const override;
+
+ friend class SkShaderMaskFilter;
+
+ using INHERITED = SkMaskFilter;
+};
+
+sk_sp<SkFlattenable> SkShaderMF::CreateProc(SkReadBuffer& buffer) {
+ return SkShaderMaskFilter::Make(buffer.readShader());
+}
+
+void SkShaderMF::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeFlattenable(fShader.get());
+}
+
+static void rect_memcpy(void* dst, size_t dstRB, const void* src, size_t srcRB,
+ size_t copyBytes, int rows) {
+ for (int i = 0; i < rows; ++i) {
+ memcpy(dst, src, copyBytes);
+ dst = (char*)dst + dstRB;
+ src = (const char*)src + srcRB;
+ }
+}
+
+bool SkShaderMF::filterMask(SkMask* dst, const SkMask& src, const SkMatrix& ctm,
+ SkIPoint* margin) const {
+ if (src.fFormat != SkMask::kA8_Format) {
+ return false;
+ }
+
+ if (margin) {
+ margin->set(0, 0);
+ }
+ dst->fBounds = src.fBounds;
+ dst->fRowBytes = src.fBounds.width(); // need alignment?
+ dst->fFormat = SkMask::kA8_Format;
+
+ if (src.fImage == nullptr) {
+ dst->fImage = nullptr;
+ return true;
+ }
+ size_t size = dst->computeImageSize();
+ if (0 == size) {
+ return false; // too big to allocate, abort
+ }
+
+ // Allocate and initialize dst image with a copy of the src image
+ dst->fImage = SkMask::AllocImage(size);
+ rect_memcpy(dst->fImage, dst->fRowBytes, src.fImage, src.fRowBytes,
+ src.fBounds.width() * sizeof(uint8_t), src.fBounds.height());
+
+ // Now we have a dst-mask, just need to setup a canvas and draw into it
+ SkBitmap bitmap;
+ if (!bitmap.installMaskPixels(*dst)) {
+ return false;
+ }
+
+ SkPaint paint;
+ paint.setShader(fShader);
+ // this blendmode is the trick: we only draw the shader where the mask is
+ paint.setBlendMode(SkBlendMode::kSrcIn);
+
+ SkCanvas canvas(bitmap);
+ canvas.translate(-SkIntToScalar(dst->fBounds.fLeft), -SkIntToScalar(dst->fBounds.fTop));
+ canvas.concat(ctm);
+ canvas.drawPaint(paint);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#if defined(SK_GANESH)
+
+std::unique_ptr<GrFragmentProcessor>
+SkShaderMF::onAsFragmentProcessor(const GrFPArgs& args, const MatrixRec& mRec) const {
+ auto fp = as_SB(fShader)->asFragmentProcessor(args, mRec);
+ return GrFragmentProcessor::MulInputByChildAlpha(std::move(fp));
+}
+
+bool SkShaderMF::onHasFragmentProcessor() const {
+ return true;
+}
+
+#endif
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkMaskFilter> SkShaderMaskFilter::Make(sk_sp<SkShader> shader) {
+ return shader ? sk_sp<SkMaskFilter>(new SkShaderMF(std::move(shader))) : nullptr;
+}
+
+void SkShaderMaskFilter::RegisterFlattenables() { SK_REGISTER_FLATTENABLE(SkShaderMF); }
diff --git a/gfx/skia/skia/src/effects/SkTableColorFilter.cpp b/gfx/skia/skia/src/effects/SkTableColorFilter.cpp
new file mode 100644
index 0000000000..caab146aee
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkTableColorFilter.cpp
@@ -0,0 +1,344 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLSampleUsage.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkColorFilterBase.h"
+#include "src/core/SkEffectPriv.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkRasterPipelineOpContexts.h"
+#include "src/core/SkRasterPipelineOpList.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <cstdint>
+#include <memory>
+#include <tuple>
+#include <utility>
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/Image_Graphite.h"
+#include "src/gpu/graphite/KeyContext.h"
+#include "src/gpu/graphite/KeyHelpers.h"
+#include "src/gpu/graphite/Log.h"
+#include "src/gpu/graphite/PaintParamsKey.h"
+#include "src/gpu/graphite/RecorderPriv.h"
+
+namespace skgpu::graphite {
+class PipelineDataGatherer;
+}
+#endif
+
+#if defined(SK_GANESH)
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/GrTypes.h"
+#include "src/gpu/ganesh/GrColorInfo.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/GrProcessorUnitTest.h"
+#include "src/gpu/ganesh/GrSurfaceProxyView.h"
+#include "src/gpu/ganesh/SkGr.h"
+#include "src/gpu/ganesh/effects/GrTextureEffect.h"
+#include "src/gpu/ganesh/glsl/GrGLSLFragmentShaderBuilder.h"
+
+class GrRecordingContext;
+struct GrShaderCaps;
+namespace skgpu { class KeyBuilder; }
+#endif
+
+#if GR_TEST_UTILS
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkSurfaceProps.h"
+#include "include/private/base/SkTo.h"
+#include "include/private/gpu/ganesh/GrTypesPriv.h"
+#include "src/base/SkRandom.h"
+#include "src/gpu/ganesh/GrTestUtils.h"
+#else
+class SkSurfaceProps;
+#endif
+
+#if defined(SK_ENABLE_SKSL)
+#include "src/core/SkVM.h"
+#endif
+
+class SkTable_ColorFilter final : public SkColorFilterBase {
+public:
+ SkTable_ColorFilter(const uint8_t tableA[], const uint8_t tableR[],
+ const uint8_t tableG[], const uint8_t tableB[]) {
+ fBitmap.allocPixels(SkImageInfo::MakeA8(256, 4));
+ uint8_t *a = fBitmap.getAddr8(0,0),
+ *r = fBitmap.getAddr8(0,1),
+ *g = fBitmap.getAddr8(0,2),
+ *b = fBitmap.getAddr8(0,3);
+ for (int i = 0; i < 256; i++) {
+ a[i] = tableA ? tableA[i] : i;
+ r[i] = tableR ? tableR[i] : i;
+ g[i] = tableG ? tableG[i] : i;
+ b[i] = tableB ? tableB[i] : i;
+ }
+ fBitmap.setImmutable();
+ }
+
+#if defined(SK_GANESH)
+ GrFPResult asFragmentProcessor(std::unique_ptr<GrFragmentProcessor> inputFP,
+ GrRecordingContext*, const GrColorInfo&,
+ const SkSurfaceProps&) const override;
+#endif
+
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext&,
+ skgpu::graphite::PaintParamsKeyBuilder*,
+ skgpu::graphite::PipelineDataGatherer*) const override;
+#endif
+
+ bool appendStages(const SkStageRec& rec, bool shaderIsOpaque) const override {
+ SkRasterPipeline* p = rec.fPipeline;
+ if (!shaderIsOpaque) {
+ p->append(SkRasterPipelineOp::unpremul);
+ }
+
+ SkRasterPipeline_TablesCtx* tables = rec.fAlloc->make<SkRasterPipeline_TablesCtx>();
+ tables->a = fBitmap.getAddr8(0, 0);
+ tables->r = fBitmap.getAddr8(0, 1);
+ tables->g = fBitmap.getAddr8(0, 2);
+ tables->b = fBitmap.getAddr8(0, 3);
+ p->append(SkRasterPipelineOp::byte_tables, tables);
+
+ bool definitelyOpaque = shaderIsOpaque && tables->a[0xff] == 0xff;
+ if (!definitelyOpaque) {
+ p->append(SkRasterPipelineOp::premul);
+ }
+ return true;
+ }
+
+ skvm::Color onProgram(skvm::Builder* p, skvm::Color c,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms, SkArenaAlloc*) const override {
+
+ auto apply_table_to_component = [&](skvm::F32 c, const uint8_t* bytePtr) -> skvm::F32 {
+ skvm::I32 index = to_unorm(8, clamp01(c));
+ skvm::Uniform table = uniforms->pushPtr(bytePtr);
+ return from_unorm(8, gather8(table, index));
+ };
+
+ c = unpremul(c);
+ c.a = apply_table_to_component(c.a, fBitmap.getAddr8(0,0));
+ c.r = apply_table_to_component(c.r, fBitmap.getAddr8(0,1));
+ c.g = apply_table_to_component(c.g, fBitmap.getAddr8(0,2));
+ c.b = apply_table_to_component(c.b, fBitmap.getAddr8(0,3));
+ return premul(c);
+ }
+
+ void flatten(SkWriteBuffer& buffer) const override {
+ buffer.writeByteArray(fBitmap.getAddr8(0,0), 4*256);
+ }
+
+private:
+ friend void ::SkRegisterTableColorFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkTable_ColorFilter)
+
+ SkBitmap fBitmap;
+};
+
+sk_sp<SkFlattenable> SkTable_ColorFilter::CreateProc(SkReadBuffer& buffer) {
+ uint8_t argb[4*256];
+ if (buffer.readByteArray(argb, sizeof(argb))) {
+ return SkColorFilters::TableARGB(argb+0*256, argb+1*256, argb+2*256, argb+3*256);
+ }
+ return nullptr;
+}
+
+#if defined(SK_GANESH)
+
+class ColorTableEffect : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(std::unique_ptr<GrFragmentProcessor> inputFP,
+ GrRecordingContext* context,
+ const SkBitmap& bitmap);
+
+ ~ColorTableEffect() override {}
+
+ const char* name() const override { return "ColorTableEffect"; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override {
+ return std::unique_ptr<GrFragmentProcessor>(new ColorTableEffect(*this));
+ }
+
+ inline static constexpr int kTexEffectFPIndex = 0;
+ inline static constexpr int kInputFPIndex = 1;
+
+private:
+ std::unique_ptr<ProgramImpl> onMakeProgramImpl() const override;
+
+ void onAddToKey(const GrShaderCaps&, skgpu::KeyBuilder*) const override {}
+
+ bool onIsEqual(const GrFragmentProcessor&) const override { return true; }
+
+ ColorTableEffect(std::unique_ptr<GrFragmentProcessor> inputFP, GrSurfaceProxyView view);
+
+ explicit ColorTableEffect(const ColorTableEffect& that);
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+
+ using INHERITED = GrFragmentProcessor;
+};
+
+ColorTableEffect::ColorTableEffect(std::unique_ptr<GrFragmentProcessor> inputFP,
+ GrSurfaceProxyView view)
+ // Not bothering with table-specific optimizations.
+ : INHERITED(kColorTableEffect_ClassID, kNone_OptimizationFlags) {
+ this->registerChild(GrTextureEffect::Make(std::move(view), kUnknown_SkAlphaType),
+ SkSL::SampleUsage::Explicit());
+ this->registerChild(std::move(inputFP));
+}
+
+ColorTableEffect::ColorTableEffect(const ColorTableEffect& that)
+ : INHERITED(that) {}
+
+std::unique_ptr<GrFragmentProcessor::ProgramImpl> ColorTableEffect::onMakeProgramImpl() const {
+ class Impl : public ProgramImpl {
+ public:
+ void emitCode(EmitArgs& args) override {
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkString inputColor = this->invokeChild(kInputFPIndex, args);
+ SkString a = this->invokeChild(kTexEffectFPIndex, args, "half2(coord.a, 0.5)");
+ SkString r = this->invokeChild(kTexEffectFPIndex, args, "half2(coord.r, 1.5)");
+ SkString g = this->invokeChild(kTexEffectFPIndex, args, "half2(coord.g, 2.5)");
+ SkString b = this->invokeChild(kTexEffectFPIndex, args, "half2(coord.b, 3.5)");
+ fragBuilder->codeAppendf(
+ "half4 coord = 255 * unpremul(%s) + 0.5;\n"
+ "half4 color = half4(%s.a, %s.a, %s.a, 1);\n"
+ "return color * %s.a;\n",
+ inputColor.c_str(), r.c_str(), g.c_str(), b.c_str(), a.c_str());
+ }
+ };
+
+ return std::make_unique<Impl>();
+}
+
+std::unique_ptr<GrFragmentProcessor> ColorTableEffect::Make(
+ std::unique_ptr<GrFragmentProcessor> inputFP,
+ GrRecordingContext* context, const SkBitmap& bitmap) {
+ SkASSERT(kPremul_SkAlphaType == bitmap.alphaType());
+ SkASSERT(bitmap.isImmutable());
+
+ auto view = std::get<0>(GrMakeCachedBitmapProxyView(context,
+ bitmap,
+ /*label=*/"MakeColorTableEffect",
+ GrMipmapped::kNo));
+ if (!view) {
+ return nullptr;
+ }
+
+ return std::unique_ptr<GrFragmentProcessor>(new ColorTableEffect(std::move(inputFP),
+ std::move(view)));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(ColorTableEffect)
+
+#if GR_TEST_UTILS
+
+
+std::unique_ptr<GrFragmentProcessor> ColorTableEffect::TestCreate(GrProcessorTestData* d) {
+ int flags = 0;
+ uint8_t luts[256][4];
+ do {
+ for (int i = 0; i < 4; ++i) {
+ flags |= d->fRandom->nextBool() ? (1 << i): 0;
+ }
+ } while (!flags);
+ for (int i = 0; i < 4; ++i) {
+ if (flags & (1 << i)) {
+ for (int j = 0; j < 256; ++j) {
+ luts[j][i] = SkToU8(d->fRandom->nextBits(8));
+ }
+ }
+ }
+ auto filter(SkColorFilters::TableARGB(
+ (flags & (1 << 0)) ? luts[0] : nullptr,
+ (flags & (1 << 1)) ? luts[1] : nullptr,
+ (flags & (1 << 2)) ? luts[2] : nullptr,
+ (flags & (1 << 3)) ? luts[3] : nullptr
+ ));
+ sk_sp<SkColorSpace> colorSpace = GrTest::TestColorSpace(d->fRandom);
+ SkSurfaceProps props; // default props for testing
+ auto [success, fp] = as_CFB(filter)->asFragmentProcessor(
+ d->inputFP(), d->context(),
+ GrColorInfo(GrColorType::kRGBA_8888, kUnknown_SkAlphaType, std::move(colorSpace)),
+ props);
+ SkASSERT(success);
+ return std::move(fp);
+}
+#endif
+
+GrFPResult SkTable_ColorFilter::asFragmentProcessor(std::unique_ptr<GrFragmentProcessor> inputFP,
+ GrRecordingContext* context,
+ const GrColorInfo&,
+ const SkSurfaceProps&) const {
+ auto cte = ColorTableEffect::Make(std::move(inputFP), context, fBitmap);
+ return cte ? GrFPSuccess(std::move(cte)) : GrFPFailure(nullptr);
+}
+
+#endif // defined(SK_GANESH)
+
+#if defined(SK_GRAPHITE)
+
+void SkTable_ColorFilter::addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const {
+ using namespace skgpu::graphite;
+
+ sk_sp<SkImage> image = RecorderPriv::CreateCachedImage(keyContext.recorder(), fBitmap);
+ if (!image) {
+ SKGPU_LOG_W("Couldn't create TableColorFilter's table");
+
+ // Return the input color as-is.
+ PassthroughShaderBlock::BeginBlock(keyContext, builder, gatherer);
+ builder->endBlock();
+ return;
+ }
+
+ TableColorFilterBlock::TableColorFilterData data;
+
+ auto [view, _] = as_IB(image)->asView(keyContext.recorder(), skgpu::Mipmapped::kNo);
+ data.fTextureProxy = view.refProxy();
+
+ TableColorFilterBlock::BeginBlock(keyContext, builder, gatherer, data);
+ builder->endBlock();
+}
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkColorFilter> SkColorFilters::Table(const uint8_t table[256]) {
+ return sk_make_sp<SkTable_ColorFilter>(table, table, table, table);
+}
+
+sk_sp<SkColorFilter> SkColorFilters::TableARGB(const uint8_t tableA[256],
+ const uint8_t tableR[256],
+ const uint8_t tableG[256],
+ const uint8_t tableB[256]) {
+ if (!tableA && !tableR && !tableG && !tableB) {
+ return nullptr;
+ }
+
+ return sk_make_sp<SkTable_ColorFilter>(tableA, tableR, tableG, tableB);
+}
+
+void SkRegisterTableColorFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkTable_ColorFilter);
+}
diff --git a/gfx/skia/skia/src/effects/SkTableMaskFilter.cpp b/gfx/skia/skia/src/effects/SkTableMaskFilter.cpp
new file mode 100644
index 0000000000..36f5dac777
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkTableMaskFilter.cpp
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkTableMaskFilter.h"
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkAlign.h"
+#include "include/private/base/SkFixed.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkTPin.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <cmath>
+#include <cstdint>
+#include <cstring>
+
+class SkMatrix;
+
+class SkTableMaskFilterImpl : public SkMaskFilterBase {
+public:
+ explicit SkTableMaskFilterImpl(const uint8_t table[256]);
+
+ SkMask::Format getFormat() const override;
+ bool filterMask(SkMask*, const SkMask&, const SkMatrix&, SkIPoint*) const override;
+
+protected:
+ ~SkTableMaskFilterImpl() override;
+
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkTableMaskFilterImpl)
+
+ SkTableMaskFilterImpl();
+
+ uint8_t fTable[256];
+
+ using INHERITED = SkMaskFilter;
+};
+
+SkTableMaskFilterImpl::SkTableMaskFilterImpl() {
+ for (int i = 0; i < 256; i++) {
+ fTable[i] = i;
+ }
+}
+
+SkTableMaskFilterImpl::SkTableMaskFilterImpl(const uint8_t table[256]) {
+ memcpy(fTable, table, sizeof(fTable));
+}
+
+SkTableMaskFilterImpl::~SkTableMaskFilterImpl() {}
+
+bool SkTableMaskFilterImpl::filterMask(SkMask* dst, const SkMask& src,
+ const SkMatrix&, SkIPoint* margin) const {
+ if (src.fFormat != SkMask::kA8_Format) {
+ return false;
+ }
+
+ dst->fBounds = src.fBounds;
+ dst->fRowBytes = SkAlign4(dst->fBounds.width());
+ dst->fFormat = SkMask::kA8_Format;
+ dst->fImage = nullptr;
+
+ if (src.fImage) {
+ dst->fImage = SkMask::AllocImage(dst->computeImageSize());
+
+ const uint8_t* srcP = src.fImage;
+ uint8_t* dstP = dst->fImage;
+ const uint8_t* table = fTable;
+ int dstWidth = dst->fBounds.width();
+ int extraZeros = dst->fRowBytes - dstWidth;
+
+ for (int y = dst->fBounds.height() - 1; y >= 0; --y) {
+ for (int x = dstWidth - 1; x >= 0; --x) {
+ dstP[x] = table[srcP[x]];
+ }
+ srcP += src.fRowBytes;
+ // we can't just inc dstP by rowbytes, because if it has any
+ // padding between its width and its rowbytes, we need to zero those
+ // so that the bitters can read those safely if that is faster for
+ // them
+ dstP += dstWidth;
+ for (int i = extraZeros - 1; i >= 0; --i) {
+ *dstP++ = 0;
+ }
+ }
+ }
+
+ if (margin) {
+ margin->set(0, 0);
+ }
+ return true;
+}
+
+SkMask::Format SkTableMaskFilterImpl::getFormat() const {
+ return SkMask::kA8_Format;
+}
+
+void SkTableMaskFilterImpl::flatten(SkWriteBuffer& wb) const {
+ wb.writeByteArray(fTable, 256);
+}
+
+sk_sp<SkFlattenable> SkTableMaskFilterImpl::CreateProc(SkReadBuffer& buffer) {
+ uint8_t table[256];
+ if (!buffer.readByteArray(table, 256)) {
+ return nullptr;
+ }
+ return sk_sp<SkFlattenable>(SkTableMaskFilter::Create(table));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkMaskFilter* SkTableMaskFilter::Create(const uint8_t table[256]) {
+ return new SkTableMaskFilterImpl(table);
+}
+
+SkMaskFilter* SkTableMaskFilter::CreateGamma(SkScalar gamma) {
+ uint8_t table[256];
+ MakeGammaTable(table, gamma);
+ return new SkTableMaskFilterImpl(table);
+}
+
+SkMaskFilter* SkTableMaskFilter::CreateClip(uint8_t min, uint8_t max) {
+ uint8_t table[256];
+ MakeClipTable(table, min, max);
+ return new SkTableMaskFilterImpl(table);
+}
+
+void SkTableMaskFilter::MakeGammaTable(uint8_t table[256], SkScalar gamma) {
+ const float dx = 1 / 255.0f;
+ const float g = SkScalarToFloat(gamma);
+
+ float x = 0;
+ for (int i = 0; i < 256; i++) {
+ // float ee = powf(x, g) * 255;
+ table[i] = SkTPin(sk_float_round2int(powf(x, g) * 255), 0, 255);
+ x += dx;
+ }
+}
+
+void SkTableMaskFilter::MakeClipTable(uint8_t table[256], uint8_t min,
+ uint8_t max) {
+ if (0 == max) {
+ max = 1;
+ }
+ if (min >= max) {
+ min = max - 1;
+ }
+ SkASSERT(min < max);
+
+ SkFixed scale = (1 << 16) * 255 / (max - min);
+ memset(table, 0, min + 1);
+ for (int i = min + 1; i < max; i++) {
+ int value = SkFixedRoundToInt(scale * (i - min));
+ SkASSERT(value <= 255);
+ table[i] = value;
+ }
+ memset(table + max, 255, 256 - max);
+
+#if 0
+ int j;
+ for (j = 0; j < 256; j++) {
+ if (table[j]) {
+ break;
+ }
+ }
+ SkDebugf("%d %d start [%d]", min, max, j);
+ for (; j < 256; j++) {
+ SkDebugf(" %d", table[j]);
+ }
+ SkDebugf("\n\n");
+#endif
+}
diff --git a/gfx/skia/skia/src/effects/SkTrimPE.h b/gfx/skia/skia/src/effects/SkTrimPE.h
new file mode 100644
index 0000000000..4aa9420b99
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkTrimPE.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTrimImpl_DEFINED
+#define SkTrimImpl_DEFINED
+
+#include "include/effects/SkTrimPathEffect.h"
+#include "src/core/SkPathEffectBase.h"
+
+class SkTrimPE : public SkPathEffectBase {
+public:
+ SkTrimPE(SkScalar startT, SkScalar stopT, SkTrimPathEffect::Mode);
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ bool onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*,
+ const SkMatrix&) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkTrimPE)
+
+ bool computeFastBounds(SkRect* bounds) const override {
+ // Trimming a path returns a subset of the input path so just return true and leave bounds
+ // unmodified
+ return true;
+ }
+
+ const SkScalar fStartT,
+ fStopT;
+ const SkTrimPathEffect::Mode fMode;
+
+ using INHERITED = SkPathEffectBase;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/effects/SkTrimPathEffect.cpp b/gfx/skia/skia/src/effects/SkTrimPathEffect.cpp
new file mode 100644
index 0000000000..abd25224c8
--- /dev/null
+++ b/gfx/skia/skia/src/effects/SkTrimPathEffect.cpp
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkTrimPathEffect.h"
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkPathMeasure.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTPin.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/effects/SkTrimPE.h"
+
+#include <cstddef>
+#include <cstdint>
+
+class SkMatrix;
+class SkStrokeRec;
+struct SkRect;
+
+namespace {
+
+// Returns the number of contours iterated to satisfy the request.
+static size_t add_segments(const SkPath& src, SkScalar start, SkScalar stop, SkPath* dst,
+ bool requires_moveto = true) {
+ SkASSERT(start < stop);
+
+ SkPathMeasure measure(src, false);
+
+ SkScalar current_segment_offset = 0;
+ size_t contour_count = 1;
+
+ do {
+ const auto next_offset = current_segment_offset + measure.getLength();
+
+ if (start < next_offset) {
+ measure.getSegment(start - current_segment_offset,
+ stop - current_segment_offset,
+ dst, requires_moveto);
+
+ if (stop <= next_offset)
+ break;
+ }
+
+ contour_count++;
+ current_segment_offset = next_offset;
+ } while (measure.nextContour());
+
+ return contour_count;
+}
+
+} // namespace
+
+SkTrimPE::SkTrimPE(SkScalar startT, SkScalar stopT, SkTrimPathEffect::Mode mode)
+ : fStartT(startT), fStopT(stopT), fMode(mode) {}
+
+bool SkTrimPE::onFilterPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*,
+ const SkMatrix&) const {
+ if (fStartT >= fStopT) {
+ SkASSERT(fMode == SkTrimPathEffect::Mode::kNormal);
+ return true;
+ }
+
+ // First pass: compute the total len.
+ SkScalar len = 0;
+ SkPathMeasure meas(src, false);
+ do {
+ len += meas.getLength();
+ } while (meas.nextContour());
+
+ const auto arcStart = len * fStartT,
+ arcStop = len * fStopT;
+
+ // Second pass: actually add segments.
+ if (fMode == SkTrimPathEffect::Mode::kNormal) {
+ // Normal mode -> one span.
+ if (arcStart < arcStop) {
+ add_segments(src, arcStart, arcStop, dst);
+ }
+ } else {
+ // Inverted mode -> one logical span which wraps around at the end -> two actual spans.
+ // In order to preserve closed path continuity:
+ //
+ // 1) add the second/tail span first
+ //
+ // 2) skip the head span move-to for single-closed-contour paths
+
+ bool requires_moveto = true;
+ if (arcStop < len) {
+ // since we're adding the "tail" first, this is the total number of contours
+ const auto contour_count = add_segments(src, arcStop, len, dst);
+
+ // if the path consists of a single closed contour, we don't want to disconnect
+ // the two parts with a moveto.
+ if (contour_count == 1 && src.isLastContourClosed()) {
+ requires_moveto = false;
+ }
+ }
+ if (0 < arcStart) {
+ add_segments(src, 0, arcStart, dst, requires_moveto);
+ }
+ }
+
+ return true;
+}
+
+void SkTrimPE::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeScalar(fStartT);
+ buffer.writeScalar(fStopT);
+ buffer.writeUInt(static_cast<uint32_t>(fMode));
+}
+
+sk_sp<SkFlattenable> SkTrimPE::CreateProc(SkReadBuffer& buffer) {
+ const auto start = buffer.readScalar(),
+ stop = buffer.readScalar();
+ const auto mode = buffer.readUInt();
+
+ return SkTrimPathEffect::Make(start, stop,
+ (mode & 1) ? SkTrimPathEffect::Mode::kInverted : SkTrimPathEffect::Mode::kNormal);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkPathEffect> SkTrimPathEffect::Make(SkScalar startT, SkScalar stopT, Mode mode) {
+ if (!SkScalarsAreFinite(startT, stopT)) {
+ return nullptr;
+ }
+
+ if (startT <= 0 && stopT >= 1 && mode == Mode::kNormal) {
+ return nullptr;
+ }
+
+ startT = SkTPin(startT, 0.f, 1.f);
+ stopT = SkTPin(stopT, 0.f, 1.f);
+
+ if (startT >= stopT && mode == Mode::kInverted) {
+ return nullptr;
+ }
+
+ return sk_sp<SkPathEffect>(new SkTrimPE(startT, stopT, mode));
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkAlphaThresholdImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkAlphaThresholdImageFilter.cpp
new file mode 100644
index 0000000000..f6dd4814e8
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkAlphaThresholdImageFilter.cpp
@@ -0,0 +1,334 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkRegion.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/effects/SkImageFilters.h"
+#include "include/effects/SkRuntimeEffect.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkCPUTypes.h"
+#include "include/private/base/SkTPin.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkRuntimeEffectPriv.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <cstdint>
+#include <memory>
+#include <utility>
+
+#if defined(SK_GANESH)
+#include "include/private/gpu/ganesh/GrTypesPriv.h"
+#include "src/gpu/SkBackingFit.h"
+#include "src/gpu/ganesh/GrColorSpaceXform.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/GrPaint.h"
+#include "src/gpu/ganesh/GrSurfaceProxy.h"
+#include "src/gpu/ganesh/GrSurfaceProxyView.h"
+#include "src/gpu/ganesh/SurfaceDrawContext.h"
+#include "src/gpu/ganesh/effects/GrSkSLFP.h"
+#include "src/gpu/ganesh/effects/GrTextureEffect.h"
+
+class GrRecordingContext;
+class SkSurfaceProps;
+enum GrSurfaceOrigin : int;
+namespace skgpu { enum class Protected : bool; }
+
+#endif // defined(SK_GANESH)
+
+namespace {
+
+class SkAlphaThresholdImageFilter final : public SkImageFilter_Base {
+public:
+ SkAlphaThresholdImageFilter(const SkRegion& region, SkScalar innerThreshold,
+ SkScalar outerThreshold, sk_sp<SkImageFilter> input,
+ const SkRect* cropRect = nullptr)
+ : INHERITED(&input, 1, cropRect)
+ , fRegion(region)
+ , fInnerThreshold(innerThreshold)
+ , fOuterThreshold(outerThreshold) {}
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+#if defined(SK_GANESH)
+ GrSurfaceProxyView createMaskTexture(GrRecordingContext*,
+ const SkMatrix&,
+ const SkIRect& bounds,
+ const SkSurfaceProps&) const;
+#endif
+
+private:
+ friend void ::SkRegisterAlphaThresholdImageFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkAlphaThresholdImageFilter)
+
+ SkRegion fRegion;
+ SkScalar fInnerThreshold;
+ SkScalar fOuterThreshold;
+
+ using INHERITED = SkImageFilter_Base;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkImageFilters::AlphaThreshold(
+ const SkRegion& region, SkScalar innerMin, SkScalar outerMax, sk_sp<SkImageFilter> input,
+ const CropRect& cropRect) {
+ innerMin = SkTPin(innerMin, 0.f, 1.f);
+ outerMax = SkTPin(outerMax, 0.f, 1.f);
+ if (!SkScalarIsFinite(innerMin) || !SkScalarIsFinite(outerMax)) {
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkAlphaThresholdImageFilter(
+ region, innerMin, outerMax, std::move(input), cropRect));
+}
+
+void SkRegisterAlphaThresholdImageFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkAlphaThresholdImageFilter);
+ SkFlattenable::Register("SkAlphaThresholdFilterImpl", SkAlphaThresholdImageFilter::CreateProc);
+}
+
+sk_sp<SkFlattenable> SkAlphaThresholdImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkScalar inner = buffer.readScalar();
+ SkScalar outer = buffer.readScalar();
+ SkRegion rgn;
+ buffer.readRegion(&rgn);
+ return SkImageFilters::AlphaThreshold(rgn, inner, outer, common.getInput(0), common.cropRect());
+}
+
+void SkAlphaThresholdImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeScalar(fInnerThreshold);
+ buffer.writeScalar(fOuterThreshold);
+ buffer.writeRegion(fRegion);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if defined(SK_GANESH)
+GrSurfaceProxyView SkAlphaThresholdImageFilter::createMaskTexture(
+ GrRecordingContext* rContext,
+ const SkMatrix& inMatrix,
+ const SkIRect& bounds,
+ const SkSurfaceProps& surfaceProps) const {
+ auto sdc = skgpu::ganesh::SurfaceDrawContext::MakeWithFallback(rContext,
+ GrColorType::kAlpha_8,
+ nullptr,
+ SkBackingFit::kApprox,
+ bounds.size(),
+ surfaceProps);
+ if (!sdc) {
+ return {};
+ }
+
+ SkRegion::Iterator iter(fRegion);
+ sdc->clear(SK_PMColor4fTRANSPARENT);
+
+ while (!iter.done()) {
+ GrPaint paint;
+ paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
+
+ SkRect rect = SkRect::Make(iter.rect());
+
+ sdc->drawRect(nullptr, std::move(paint), GrAA::kNo, inMatrix, rect);
+
+ iter.next();
+ }
+
+ return sdc->readSurfaceView();
+}
+
+static std::unique_ptr<GrFragmentProcessor> make_alpha_threshold_fp(
+ std::unique_ptr<GrFragmentProcessor> inputFP,
+ std::unique_ptr<GrFragmentProcessor> maskFP,
+ float innerThreshold,
+ float outerThreshold) {
+ static const SkRuntimeEffect* effect = SkMakeRuntimeEffect(SkRuntimeEffect::MakeForShader,
+ "uniform shader maskFP;"
+ "uniform shader inputFP;"
+ "uniform half innerThreshold;"
+ "uniform half outerThreshold;"
+
+ "half4 main(float2 xy) {"
+ "half4 color = inputFP.eval(xy);"
+ "half4 mask_color = maskFP.eval(xy);"
+ "if (mask_color.a < 0.5) {"
+ "if (color.a > outerThreshold) {"
+ "half scale = outerThreshold / color.a;"
+ "color.rgb *= scale;"
+ "color.a = outerThreshold;"
+ "}"
+ "} else if (color.a < innerThreshold) {"
+ "half scale = innerThreshold / max(0.001, color.a);"
+ "color.rgb *= scale;"
+ "color.a = innerThreshold;"
+ "}"
+ "return color;"
+ "}"
+ );
+
+ return GrSkSLFP::Make(effect, "AlphaThreshold", /*inputFP=*/nullptr,
+ (outerThreshold >= 1.0f) ? GrSkSLFP::OptFlags::kPreservesOpaqueInput
+ : GrSkSLFP::OptFlags::kNone,
+ "maskFP", GrSkSLFP::IgnoreOptFlags(std::move(maskFP)),
+ "inputFP", std::move(inputFP),
+ "innerThreshold", innerThreshold,
+ "outerThreshold", outerThreshold);
+}
+#endif // defined(SK_GANESH)
+
+sk_sp<SkSpecialImage> SkAlphaThresholdImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ const SkIRect inputBounds = SkIRect::MakeXYWH(inputOffset.x(), inputOffset.y(),
+ input->width(), input->height());
+
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, inputBounds, &bounds)) {
+ return nullptr;
+ }
+
+#if defined(SK_GANESH)
+ if (ctx.gpuBacked()) {
+ auto context = ctx.getContext();
+
+ GrSurfaceProxyView inputView = (input->view(context));
+ SkASSERT(inputView.asTextureProxy());
+ const skgpu::Protected isProtected = inputView.proxy()->isProtected();
+ const GrSurfaceOrigin origin = inputView.origin();
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+
+ bounds.offset(-inputOffset);
+
+ SkMatrix matrix(ctx.ctm());
+ matrix.postTranslate(SkIntToScalar(-offset->fX), SkIntToScalar(-offset->fY));
+
+ GrSurfaceProxyView maskView = this->createMaskTexture(context, matrix, bounds,
+ ctx.surfaceProps());
+ if (!maskView) {
+ return nullptr;
+ }
+ auto maskFP = GrTextureEffect::Make(std::move(maskView), kPremul_SkAlphaType,
+ SkMatrix::Translate(-bounds.x(), -bounds.y()));
+
+ auto textureFP = GrTextureEffect::Make(
+ std::move(inputView), input->alphaType(),
+ SkMatrix::Translate(input->subset().x(), input->subset().y()));
+ textureFP = GrColorSpaceXformEffect::Make(std::move(textureFP),
+ input->getColorSpace(), input->alphaType(),
+ ctx.colorSpace(), kPremul_SkAlphaType);
+ if (!textureFP) {
+ return nullptr;
+ }
+
+ auto thresholdFP = make_alpha_threshold_fp(
+ std::move(textureFP), std::move(maskFP), fInnerThreshold, fOuterThreshold);
+ if (!thresholdFP) {
+ return nullptr;
+ }
+
+ return DrawWithFP(context, std::move(thresholdFP), bounds, ctx.colorType(),
+ ctx.colorSpace(), ctx.surfaceProps(), origin, isProtected);
+ }
+#endif
+
+ SkBitmap inputBM;
+
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if (inputBM.colorType() != kN32_SkColorType) {
+ return nullptr;
+ }
+
+ if (!inputBM.getPixels() || inputBM.width() <= 0 || inputBM.height() <= 0) {
+ return nullptr;
+ }
+
+
+ SkMatrix localInverse;
+ if (!ctx.ctm().invert(&localInverse)) {
+ return nullptr;
+ }
+
+ SkImageInfo info = SkImageInfo::MakeN32(bounds.width(), bounds.height(),
+ kPremul_SkAlphaType);
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ U8CPU innerThreshold = (U8CPU)(fInnerThreshold * 0xFF);
+ U8CPU outerThreshold = (U8CPU)(fOuterThreshold * 0xFF);
+ SkColor* dptr = dst.getAddr32(0, 0);
+ int dstWidth = dst.width(), dstHeight = dst.height();
+ SkIPoint srcOffset = { bounds.fLeft - inputOffset.fX, bounds.fTop - inputOffset.fY };
+ for (int y = 0; y < dstHeight; ++y) {
+ const SkColor* sptr = inputBM.getAddr32(srcOffset.fX, srcOffset.fY+y);
+
+ for (int x = 0; x < dstWidth; ++x) {
+ const SkColor& source = sptr[x];
+ SkColor outputColor(source);
+ SkPoint position;
+ localInverse.mapXY((SkScalar)x + bounds.fLeft, (SkScalar)y + bounds.fTop, &position);
+ if (fRegion.contains((int32_t)position.x(), (int32_t)position.y())) {
+ if (SkColorGetA(source) < innerThreshold) {
+ U8CPU alpha = SkColorGetA(source);
+ if (alpha == 0) {
+ alpha = 1;
+ }
+ float scale = (float)innerThreshold / alpha;
+ outputColor = SkColorSetARGB(innerThreshold,
+ (U8CPU)(SkColorGetR(source) * scale),
+ (U8CPU)(SkColorGetG(source) * scale),
+ (U8CPU)(SkColorGetB(source) * scale));
+ }
+ } else {
+ if (SkColorGetA(source) > outerThreshold) {
+ float scale = (float)outerThreshold / SkColorGetA(source);
+ outputColor = SkColorSetARGB(outerThreshold,
+ (U8CPU)(SkColorGetR(source) * scale),
+ (U8CPU)(SkColorGetG(source) * scale),
+ (U8CPU)(SkColorGetB(source) * scale));
+ }
+ }
+ dptr[y * dstWidth + x] = outputColor;
+ }
+ }
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(bounds.width(), bounds.height()),
+ dst, ctx.surfaceProps());
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkArithmeticImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkArithmeticImageFilter.cpp
new file mode 100644
index 0000000000..c7fcb0662b
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkArithmeticImageFilter.cpp
@@ -0,0 +1,497 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkM44.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkRegion.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/effects/SkImageFilters.h"
+#include "include/effects/SkRuntimeEffect.h"
+#include "include/private/SkColorData.h"
+#include "src/base/SkVx.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkRuntimeEffectPriv.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+#include <utility>
+
+#if defined(SK_GANESH)
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/GrRecordingContext.h"
+#include "include/gpu/GrTypes.h"
+#include "src/gpu/SkBackingFit.h"
+#include "src/gpu/ganesh/GrColorSpaceXform.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/GrImageInfo.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/GrSamplerState.h"
+#include "src/gpu/ganesh/GrSurfaceProxy.h"
+#include "src/gpu/ganesh/GrSurfaceProxyView.h"
+#include "src/gpu/ganesh/SurfaceFillContext.h"
+#include "src/gpu/ganesh/effects/GrSkSLFP.h"
+#include "src/gpu/ganesh/effects/GrTextureEffect.h"
+#endif
+
+namespace {
+
+class SkArithmeticImageFilter final : public SkImageFilter_Base {
+public:
+ SkArithmeticImageFilter(float k1, float k2, float k3, float k4, bool enforcePMColor,
+ sk_sp<SkImageFilter> inputs[2], const SkRect* cropRect)
+ : INHERITED(inputs, 2, cropRect)
+ , fK{k1, k2, k3, k4}
+ , fEnforcePMColor(enforcePMColor) {}
+
+protected:
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+ SkIRect onFilterBounds(const SkIRect&, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+
+#if defined(SK_GANESH)
+ sk_sp<SkSpecialImage> filterImageGPU(const Context& ctx,
+ sk_sp<SkSpecialImage> background,
+ const SkIPoint& backgroundOffset,
+ sk_sp<SkSpecialImage> foreground,
+ const SkIPoint& foregroundOffset,
+ const SkIRect& bounds) const;
+#endif
+
+ void flatten(SkWriteBuffer& buffer) const override;
+
+ void drawForeground(SkCanvas* canvas, SkSpecialImage*, const SkIRect&) const;
+
+private:
+ friend void ::SkRegisterArithmeticImageFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkArithmeticImageFilter)
+
+ bool onAffectsTransparentBlack() const override { return !SkScalarNearlyZero(fK[3]); }
+
+ SkV4 fK;
+ bool fEnforcePMColor;
+
+ using INHERITED = SkImageFilter_Base;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkImageFilters::Arithmetic(
+ SkScalar k1, SkScalar k2, SkScalar k3, SkScalar k4, bool enforcePMColor,
+ sk_sp<SkImageFilter> background, sk_sp<SkImageFilter> foreground,
+ const CropRect& cropRect) {
+ if (!SkScalarIsFinite(k1) || !SkScalarIsFinite(k2) || !SkScalarIsFinite(k3) ||
+ !SkScalarIsFinite(k4)) {
+ return nullptr;
+ }
+
+ // are we nearly some other "std" mode?
+ int mode = -1; // illegal mode
+ if (SkScalarNearlyZero(k1) && SkScalarNearlyEqual(k2, SK_Scalar1) && SkScalarNearlyZero(k3) &&
+ SkScalarNearlyZero(k4)) {
+ mode = (int)SkBlendMode::kSrc;
+ } else if (SkScalarNearlyZero(k1) && SkScalarNearlyZero(k2) &&
+ SkScalarNearlyEqual(k3, SK_Scalar1) && SkScalarNearlyZero(k4)) {
+ mode = (int)SkBlendMode::kDst;
+ } else if (SkScalarNearlyZero(k1) && SkScalarNearlyZero(k2) && SkScalarNearlyZero(k3) &&
+ SkScalarNearlyZero(k4)) {
+ mode = (int)SkBlendMode::kClear;
+ }
+ if (mode >= 0) {
+ return SkImageFilters::Blend((SkBlendMode)mode, std::move(background),
+ std::move(foreground), cropRect);
+ }
+
+ sk_sp<SkImageFilter> inputs[2] = {std::move(background), std::move(foreground)};
+ return sk_sp<SkImageFilter>(
+ new SkArithmeticImageFilter(k1, k2, k3, k4, enforcePMColor, inputs, cropRect));
+}
+
+void SkRegisterArithmeticImageFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkArithmeticImageFilter);
+ SkFlattenable::Register("ArithmeticImageFilterImpl", SkArithmeticImageFilter::CreateProc);
+}
+
+sk_sp<SkFlattenable> SkArithmeticImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 2);
+ float k[4];
+ for (int i = 0; i < 4; ++i) {
+ k[i] = buffer.readScalar();
+ }
+ const bool enforcePMColor = buffer.readBool();
+ if (!buffer.isValid()) {
+ return nullptr;
+ }
+ return SkImageFilters::Arithmetic(k[0], k[1], k[2], k[3], enforcePMColor, common.getInput(0),
+ common.getInput(1), common.cropRect());
+}
+
+void SkArithmeticImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ for (int i = 0; i < 4; ++i) {
+ buffer.writeScalar(fK[i]);
+ }
+ buffer.writeBool(fEnforcePMColor);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+template <bool EnforcePMColor>
+void arith_span(const SkV4& k, SkPMColor dst[], const SkPMColor src[], int count) {
+ const skvx::float4 k1 = k[0] * (1/255.0f),
+ k2 = k[1],
+ k3 = k[2],
+ k4 = k[3] * 255.0f + 0.5f;
+
+ for (int i = 0; i < count; i++) {
+ skvx::float4 s = skvx::cast<float>(skvx::byte4::Load(src+i)),
+ d = skvx::cast<float>(skvx::byte4::Load(dst+i)),
+ r = pin(k1*s*d + k2*s + k3*d + k4, skvx::float4(0.f), skvx::float4(255.f));
+ if (EnforcePMColor) {
+ auto a = skvx::shuffle<3,3,3,3>(r);
+ r = min(a, r);
+ }
+ skvx::cast<uint8_t>(r).store(dst+i);
+ }
+}
+
+// apply mode to src==transparent (0)
+template<bool EnforcePMColor> void arith_transparent(const SkV4& k, SkPMColor dst[], int count) {
+ const skvx::float4 k3 = k[2],
+ k4 = k[3] * 255.0f + 0.5f;
+
+ for (int i = 0; i < count; i++) {
+ skvx::float4 d = skvx::cast<float>(skvx::byte4::Load(dst+i)),
+ r = pin(k3*d + k4, skvx::float4(0.f), skvx::float4(255.f));
+ if (EnforcePMColor) {
+ auto a = skvx::shuffle<3,3,3,3>(r);
+ r = min(a, r);
+ }
+ skvx::cast<uint8_t>(r).store(dst+i);
+ }
+}
+
+static bool intersect(SkPixmap* dst, SkPixmap* src, int srcDx, int srcDy) {
+ SkIRect dstR = SkIRect::MakeWH(dst->width(), dst->height());
+ SkIRect srcR = SkIRect::MakeXYWH(srcDx, srcDy, src->width(), src->height());
+ SkIRect sect;
+ if (!sect.intersect(dstR, srcR)) {
+ return false;
+ }
+ *dst = SkPixmap(dst->info().makeDimensions(sect.size()),
+ dst->addr(sect.fLeft, sect.fTop),
+ dst->rowBytes());
+ *src = SkPixmap(src->info().makeDimensions(sect.size()),
+ src->addr(std::max(0, -srcDx), std::max(0, -srcDy)),
+ src->rowBytes());
+ return true;
+}
+
+sk_sp<SkSpecialImage> SkArithmeticImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint backgroundOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> background(this->filterInput(0, ctx, &backgroundOffset));
+
+ SkIPoint foregroundOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> foreground(this->filterInput(1, ctx, &foregroundOffset));
+
+ SkIRect foregroundBounds = SkIRect::MakeEmpty();
+ if (foreground) {
+ foregroundBounds = SkIRect::MakeXYWH(foregroundOffset.x(), foregroundOffset.y(),
+ foreground->width(), foreground->height());
+ }
+
+ SkIRect srcBounds = SkIRect::MakeEmpty();
+ if (background) {
+ srcBounds = SkIRect::MakeXYWH(backgroundOffset.x(), backgroundOffset.y(),
+ background->width(), background->height());
+ }
+
+ srcBounds.join(foregroundBounds);
+ if (srcBounds.isEmpty()) {
+ return nullptr;
+ }
+
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, srcBounds, &bounds)) {
+ return nullptr;
+ }
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+
+#if defined(SK_GANESH)
+ if (ctx.gpuBacked()) {
+ return this->filterImageGPU(ctx, background, backgroundOffset, foreground,
+ foregroundOffset, bounds);
+ }
+#endif
+
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(bounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ canvas->clear(0x0); // can't count on background to fully clear the background
+ canvas->translate(SkIntToScalar(-bounds.left()), SkIntToScalar(-bounds.top()));
+
+ if (background) {
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kSrc);
+ background->draw(canvas, SkIntToScalar(backgroundOffset.fX),
+ SkIntToScalar(backgroundOffset.fY), SkSamplingOptions(), &paint);
+ }
+
+ this->drawForeground(canvas, foreground.get(), foregroundBounds);
+
+ return surf->makeImageSnapshot();
+}
+
+SkIRect SkArithmeticImageFilter::onFilterBounds(const SkIRect& src,
+ const SkMatrix& ctm,
+ MapDirection dir,
+ const SkIRect* inputRect) const {
+ if (kReverse_MapDirection == dir) {
+ return INHERITED::onFilterBounds(src, ctm, dir, inputRect);
+ }
+
+ SkASSERT(2 == this->countInputs());
+
+ // result(i1,i2) = k1*i1*i2 + k2*i1 + k3*i2 + k4
+ // Note that background (getInput(0)) is i2, and foreground (getInput(1)) is i1.
+ auto i2 = this->getInput(0) ? this->getInput(0)->filterBounds(src, ctm, dir, nullptr) : src;
+ auto i1 = this->getInput(1) ? this->getInput(1)->filterBounds(src, ctm, dir, nullptr) : src;
+
+ // Arithmetic with non-zero k4 may influence the complete filter primitive
+ // region. [k4 > 0 => result(0,0) = k4 => result(i1,i2) >= k4]
+ if (!SkScalarNearlyZero(fK[3])) {
+ i1.join(i2);
+ return i1;
+ }
+
+ // If both K2 or K3 are non-zero, both i1 and i2 appear.
+ if (!SkScalarNearlyZero(fK[1]) && !SkScalarNearlyZero(fK[2])) {
+ i1.join(i2);
+ return i1;
+ }
+
+ // If k2 is non-zero, output can be produced whenever i1 is non-transparent.
+ // [k3 = k4 = 0 => result(i1,i2) = k1*i1*i2 + k2*i1 = (k1*i2 + k2)*i1]
+ if (!SkScalarNearlyZero(fK[1])) {
+ return i1;
+ }
+
+ // If k3 is non-zero, output can be produced whenever i2 is non-transparent.
+ // [k2 = k4 = 0 => result(i1,i2) = k1*i1*i2 + k3*i2 = (k1*i1 + k3)*i2]
+ if (!SkScalarNearlyZero(fK[2])) {
+ return i2;
+ }
+
+ // If just k1 is non-zero, output will only be produce where both inputs
+ // are non-transparent. Use intersection.
+ // [k1 > 0 and k2 = k3 = k4 = 0 => result(i1,i2) = k1*i1*i2]
+ if (!SkScalarNearlyZero(fK[0])) {
+ if (!i1.intersect(i2)) {
+ return SkIRect::MakeEmpty();
+ }
+ return i1;
+ }
+
+ // [k1 = k2 = k3 = k4 = 0 => result(i1,i2) = 0]
+ return SkIRect::MakeEmpty();
+}
+
+#if defined(SK_GANESH)
+
+std::unique_ptr<GrFragmentProcessor> make_arithmetic_fp(
+ std::unique_ptr<GrFragmentProcessor> srcFP,
+ std::unique_ptr<GrFragmentProcessor> dstFP,
+ const SkV4& k,
+ bool enforcePMColor) {
+ static const SkRuntimeEffect* effect = SkMakeRuntimeEffect(SkRuntimeEffect::MakeForShader,
+ "uniform shader srcFP;"
+ "uniform shader dstFP;"
+ "uniform half4 k;"
+ "uniform half pmClamp;"
+ "half4 main(float2 xy) {"
+ "half4 src = srcFP.eval(xy);"
+ "half4 dst = dstFP.eval(xy);"
+ "half4 color = saturate(k.x * src * dst +"
+ "k.y * src +"
+ "k.z * dst +"
+ "k.w);"
+ "color.rgb = min(color.rgb, max(color.a, pmClamp));"
+ "return color;"
+ "}"
+ );
+ return GrSkSLFP::Make(effect, "arithmetic_fp", /*inputFP=*/nullptr, GrSkSLFP::OptFlags::kNone,
+ "srcFP", std::move(srcFP),
+ "dstFP", std::move(dstFP),
+ "k", k,
+ "pmClamp", enforcePMColor ? 0.0f : 1.0f);
+}
+
+sk_sp<SkSpecialImage> SkArithmeticImageFilter::filterImageGPU(
+ const Context& ctx,
+ sk_sp<SkSpecialImage> background,
+ const SkIPoint& backgroundOffset,
+ sk_sp<SkSpecialImage> foreground,
+ const SkIPoint& foregroundOffset,
+ const SkIRect& bounds) const {
+ SkASSERT(ctx.gpuBacked());
+
+ auto rContext = ctx.getContext();
+
+ GrSurfaceProxyView backgroundView, foregroundView;
+
+ GrProtected isProtected = GrProtected::kNo;
+ if (background) {
+ backgroundView = background->view(rContext);
+ SkASSERT(backgroundView.proxy());
+ isProtected = backgroundView.proxy()->isProtected();
+ }
+
+ if (foreground) {
+ foregroundView = foreground->view(rContext);
+ SkASSERT(foregroundView.proxy());
+ isProtected = foregroundView.proxy()->isProtected();
+ }
+
+ std::unique_ptr<GrFragmentProcessor> fp;
+ const auto& caps = *ctx.getContext()->priv().caps();
+ GrSamplerState sampler(GrSamplerState::WrapMode::kClampToBorder,
+ GrSamplerState::Filter::kNearest);
+
+ if (background) {
+ SkRect bgSubset = SkRect::Make(background->subset());
+ SkMatrix backgroundMatrix = SkMatrix::Translate(
+ SkIntToScalar(bgSubset.left() - backgroundOffset.fX),
+ SkIntToScalar(bgSubset.top() - backgroundOffset.fY));
+ fp = GrTextureEffect::MakeSubset(std::move(backgroundView),
+ background->alphaType(),
+ backgroundMatrix,
+ sampler,
+ bgSubset,
+ caps);
+ fp = GrColorSpaceXformEffect::Make(std::move(fp),
+ background->getColorSpace(),
+ background->alphaType(),
+ ctx.colorSpace(),
+ kPremul_SkAlphaType);
+ } else {
+ fp = GrFragmentProcessor::MakeColor(SK_PMColor4fTRANSPARENT);
+ }
+
+ if (foreground) {
+ SkRect fgSubset = SkRect::Make(foreground->subset());
+ SkMatrix foregroundMatrix = SkMatrix::Translate(
+ SkIntToScalar(fgSubset.left() - foregroundOffset.fX),
+ SkIntToScalar(fgSubset.top() - foregroundOffset.fY));
+ auto fgFP = GrTextureEffect::MakeSubset(std::move(foregroundView),
+ foreground->alphaType(),
+ foregroundMatrix,
+ sampler,
+ fgSubset,
+ caps);
+ fgFP = GrColorSpaceXformEffect::Make(std::move(fgFP),
+ foreground->getColorSpace(),
+ foreground->alphaType(),
+ ctx.colorSpace(),
+ kPremul_SkAlphaType);
+ fp = make_arithmetic_fp(std::move(fgFP), std::move(fp), fK, fEnforcePMColor);
+ }
+
+ GrImageInfo info(ctx.grColorType(), kPremul_SkAlphaType, ctx.refColorSpace(), bounds.size());
+ auto sfc = rContext->priv().makeSFC(info,
+ "ArithmeticImageFilter_FilterImageGPU",
+ SkBackingFit::kApprox,
+ 1,
+ GrMipmapped::kNo,
+ isProtected,
+ kBottomLeft_GrSurfaceOrigin);
+ if (!sfc) {
+ return nullptr;
+ }
+
+ sfc->fillRectToRectWithFP(bounds, SkIRect::MakeSize(bounds.size()), std::move(fp));
+
+ return SkSpecialImage::MakeDeferredFromGpu(rContext,
+ SkIRect::MakeWH(bounds.width(), bounds.height()),
+ kNeedNewImageUniqueID_SpecialImage,
+ sfc->readSurfaceView(),
+ sfc->colorInfo(),
+ ctx.surfaceProps());
+}
+#endif
+
+void SkArithmeticImageFilter::drawForeground(SkCanvas* canvas, SkSpecialImage* img,
+ const SkIRect& fgBounds) const {
+ SkPixmap dst;
+ if (!canvas->peekPixels(&dst)) {
+ return;
+ }
+
+ const SkMatrix& ctm = canvas->getTotalMatrix();
+ SkASSERT(ctm.getType() <= SkMatrix::kTranslate_Mask);
+ const int dx = SkScalarRoundToInt(ctm.getTranslateX());
+ const int dy = SkScalarRoundToInt(ctm.getTranslateY());
+ // be sure to perform this offset using SkIRect, since it saturates to avoid overflows
+ const SkIRect fgoffset = fgBounds.makeOffset(dx, dy);
+
+ if (img) {
+ SkBitmap srcBM;
+ SkPixmap src;
+ if (!img->getROPixels(&srcBM)) {
+ return;
+ }
+ if (!srcBM.peekPixels(&src)) {
+ return;
+ }
+
+ auto proc = fEnforcePMColor ? arith_span<true> : arith_span<false>;
+ SkPixmap tmpDst = dst;
+ if (intersect(&tmpDst, &src, fgoffset.fLeft, fgoffset.fTop)) {
+ for (int y = 0; y < tmpDst.height(); ++y) {
+ proc(fK, tmpDst.writable_addr32(0, y), src.addr32(0, y), tmpDst.width());
+ }
+ }
+ }
+
+ // Now apply the mode with transparent-color to the outside of the fg image
+ SkRegion outside(SkIRect::MakeWH(dst.width(), dst.height()));
+ outside.op(fgoffset, SkRegion::kDifference_Op);
+ auto proc = fEnforcePMColor ? arith_transparent<true> : arith_transparent<false>;
+ for (SkRegion::Iterator iter(outside); !iter.done(); iter.next()) {
+ const SkIRect r = iter.rect();
+ for (int y = r.fTop; y < r.fBottom; ++y) {
+ proc(fK, dst.writable_addr32(r.fLeft, y), r.width());
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkBlendImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkBlendImageFilter.cpp
new file mode 100644
index 0000000000..23080e26a0
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkBlendImageFilter.cpp
@@ -0,0 +1,351 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkBlender.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkClipOp.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSurfaceProps.h"
+#include "include/core/SkTypes.h"
+#include "include/effects/SkImageFilters.h"
+#include "include/private/SkColorData.h"
+#include "src/core/SkBlendModePriv.h"
+#include "src/core/SkBlenderBase.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <cstdint>
+#include <memory>
+#include <optional>
+#include <utility>
+
+#if defined(SK_GANESH)
+#include "include/gpu/GrRecordingContext.h"
+#include "src/gpu/SkBackingFit.h"
+#include "src/gpu/ganesh/GrColorSpaceXform.h"
+#include "src/gpu/ganesh/GrFPArgs.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/GrImageInfo.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/GrSamplerState.h"
+#include "src/gpu/ganesh/GrSurfaceProxyView.h"
+#include "src/gpu/ganesh/SurfaceFillContext.h"
+#include "src/gpu/ganesh/effects/GrTextureEffect.h"
+#endif
+
+namespace {
+
+class SkBlendImageFilter : public SkImageFilter_Base {
+public:
+ SkBlendImageFilter(sk_sp<SkBlender> blender, sk_sp<SkImageFilter> inputs[2],
+ const SkRect* cropRect)
+ : INHERITED(inputs, 2, cropRect)
+ , fBlender(std::move(blender))
+ {
+ SkASSERT(fBlender);
+ }
+
+protected:
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+ SkIRect onFilterBounds(const SkIRect&, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+
+#if defined(SK_GANESH)
+ sk_sp<SkSpecialImage> filterImageGPU(const Context& ctx,
+ sk_sp<SkSpecialImage> background,
+ const SkIPoint& backgroundOffset,
+ sk_sp<SkSpecialImage> foreground,
+ const SkIPoint& foregroundOffset,
+ const SkIRect& bounds) const;
+#endif
+
+ void flatten(SkWriteBuffer&) const override;
+
+ void drawForeground(SkCanvas* canvas, SkSpecialImage*, const SkIRect&) const;
+
+private:
+ friend void ::SkRegisterBlendImageFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkBlendImageFilter)
+
+ sk_sp<SkBlender> fBlender;
+
+ using INHERITED = SkImageFilter_Base;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkImageFilters::Blend(SkBlendMode mode,
+ sk_sp<SkImageFilter> background,
+ sk_sp<SkImageFilter> foreground,
+ const CropRect& cropRect) {
+ sk_sp<SkImageFilter> inputs[2] = { std::move(background), std::move(foreground) };
+ return sk_sp<SkImageFilter>(new SkBlendImageFilter(SkBlender::Mode(mode), inputs, cropRect));
+}
+
+sk_sp<SkImageFilter> SkImageFilters::Blend(sk_sp<SkBlender> blender,
+ sk_sp<SkImageFilter> background,
+ sk_sp<SkImageFilter> foreground,
+ const CropRect& cropRect) {
+ if (!blender) {
+ blender = SkBlender::Mode(SkBlendMode::kSrcOver);
+ }
+ sk_sp<SkImageFilter> inputs[2] = { std::move(background), std::move(foreground) };
+ return sk_sp<SkImageFilter>(new SkBlendImageFilter(blender, inputs, cropRect));
+}
+
+void SkRegisterBlendImageFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkBlendImageFilter);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkXfermodeImageFilter_Base", SkBlendImageFilter::CreateProc);
+ SkFlattenable::Register("SkXfermodeImageFilterImpl", SkBlendImageFilter::CreateProc);
+}
+
+sk_sp<SkFlattenable> SkBlendImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 2);
+
+ sk_sp<SkBlender> blender;
+ const uint32_t mode = buffer.read32();
+ if (mode == kCustom_SkBlendMode) {
+ blender = buffer.readBlender();
+ } else {
+ if (mode > (unsigned)SkBlendMode::kLastMode) {
+ buffer.validate(false);
+ return nullptr;
+ }
+ blender = SkBlender::Mode((SkBlendMode)mode);
+ }
+ return SkImageFilters::Blend(std::move(blender), common.getInput(0), common.getInput(1),
+ common.cropRect());
+}
+
+void SkBlendImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ if (auto bm = as_BB(fBlender)->asBlendMode()) {
+ buffer.write32((unsigned)bm.value());
+ } else {
+ buffer.write32(kCustom_SkBlendMode);
+ buffer.writeFlattenable(fBlender.get());
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkSpecialImage> SkBlendImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint backgroundOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> background(this->filterInput(0, ctx, &backgroundOffset));
+
+ SkIPoint foregroundOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> foreground(this->filterInput(1, ctx, &foregroundOffset));
+
+ SkIRect foregroundBounds = SkIRect::MakeEmpty();
+ if (foreground) {
+ foregroundBounds = SkIRect::MakeXYWH(foregroundOffset.x(), foregroundOffset.y(),
+ foreground->width(), foreground->height());
+ }
+
+ SkIRect srcBounds = SkIRect::MakeEmpty();
+ if (background) {
+ srcBounds = SkIRect::MakeXYWH(backgroundOffset.x(), backgroundOffset.y(),
+ background->width(), background->height());
+ }
+
+ srcBounds.join(foregroundBounds);
+ if (srcBounds.isEmpty()) {
+ return nullptr;
+ }
+
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, srcBounds, &bounds)) {
+ return nullptr;
+ }
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+
+#if defined(SK_GANESH)
+ if (ctx.gpuBacked()) {
+ return this->filterImageGPU(ctx, background, backgroundOffset,
+ foreground, foregroundOffset, bounds);
+ }
+#endif
+
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(bounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ canvas->clear(0x0); // can't count on background to fully clear the background
+ canvas->translate(SkIntToScalar(-bounds.left()), SkIntToScalar(-bounds.top()));
+
+ if (background) {
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kSrc);
+ background->draw(canvas,
+ SkIntToScalar(backgroundOffset.fX), SkIntToScalar(backgroundOffset.fY),
+ SkSamplingOptions(), &paint);
+ }
+
+ this->drawForeground(canvas, foreground.get(), foregroundBounds);
+
+ return surf->makeImageSnapshot();
+}
+
+SkIRect SkBlendImageFilter::onFilterBounds(const SkIRect& src,
+ const SkMatrix& ctm,
+ MapDirection dir,
+ const SkIRect* inputRect) const {
+ if (kReverse_MapDirection == dir) {
+ return INHERITED::onFilterBounds(src, ctm, dir, inputRect);
+ }
+
+ SkASSERT(!inputRect);
+ SkASSERT(2 == this->countInputs());
+ auto getBackground = [&]() {
+ return this->getInput(0) ? this->getInput(0)->filterBounds(src, ctm, dir, inputRect) : src;
+ };
+ auto getForeground = [&]() {
+ return this->getInput(1) ? this->getInput(1)->filterBounds(src, ctm, dir, inputRect) : src;
+ };
+ if (auto bm = as_BB(fBlender)->asBlendMode()) {
+ switch (bm.value()) {
+ case SkBlendMode::kClear:
+ return SkIRect::MakeEmpty();
+
+ case SkBlendMode::kSrc:
+ case SkBlendMode::kDstATop:
+ return getForeground();
+
+ case SkBlendMode::kDst:
+ case SkBlendMode::kSrcATop:
+ return getBackground();
+
+ case SkBlendMode::kSrcIn:
+ case SkBlendMode::kDstIn: {
+ auto result = getBackground();
+ if (!result.intersect(getForeground())) {
+ return SkIRect::MakeEmpty();
+ }
+ return result;
+ }
+ default: break;
+ }
+ }
+ auto result = getBackground();
+ result.join(getForeground());
+ return result;
+}
+
+void SkBlendImageFilter::drawForeground(SkCanvas* canvas, SkSpecialImage* img,
+ const SkIRect& fgBounds) const {
+ SkPaint paint;
+ paint.setBlender(fBlender);
+ if (img) {
+ img->draw(canvas, SkIntToScalar(fgBounds.fLeft), SkIntToScalar(fgBounds.fTop),
+ SkSamplingOptions(), &paint);
+ }
+
+ SkAutoCanvasRestore acr(canvas, true);
+ canvas->clipRect(SkRect::Make(fgBounds), SkClipOp::kDifference);
+ paint.setColor(0);
+ canvas->drawPaint(paint);
+}
+
+#if defined(SK_GANESH)
+
+sk_sp<SkSpecialImage> SkBlendImageFilter::filterImageGPU(const Context& ctx,
+ sk_sp<SkSpecialImage> background,
+ const SkIPoint& backgroundOffset,
+ sk_sp<SkSpecialImage> foreground,
+ const SkIPoint& foregroundOffset,
+ const SkIRect& bounds) const {
+ SkASSERT(ctx.gpuBacked());
+
+ auto rContext = ctx.getContext();
+
+ GrSurfaceProxyView backgroundView, foregroundView;
+
+ if (background) {
+ backgroundView = background->view(rContext);
+ }
+
+ if (foreground) {
+ foregroundView = foreground->view(rContext);
+ }
+
+ std::unique_ptr<GrFragmentProcessor> fp;
+ const auto& caps = *ctx.getContext()->priv().caps();
+ GrSamplerState sampler(GrSamplerState::WrapMode::kClampToBorder,
+ GrSamplerState::Filter::kNearest);
+
+ if (backgroundView.asTextureProxy()) {
+ SkRect bgSubset = SkRect::Make(background->subset());
+ SkMatrix bgMatrix = SkMatrix::Translate(
+ SkIntToScalar(bgSubset.left() - backgroundOffset.fX),
+ SkIntToScalar(bgSubset.top() - backgroundOffset.fY));
+ fp = GrTextureEffect::MakeSubset(std::move(backgroundView), background->alphaType(),
+ bgMatrix, sampler, bgSubset, caps);
+ fp = GrColorSpaceXformEffect::Make(std::move(fp), background->getColorSpace(),
+ background->alphaType(), ctx.colorSpace(),
+ kPremul_SkAlphaType);
+ } else {
+ fp = GrFragmentProcessor::MakeColor(SK_PMColor4fTRANSPARENT);
+ }
+
+ GrImageInfo info(ctx.grColorType(), kPremul_SkAlphaType, ctx.refColorSpace(), bounds.size());
+
+ if (foregroundView.asTextureProxy()) {
+ SkRect fgSubset = SkRect::Make(foreground->subset());
+ SkMatrix fgMatrix = SkMatrix::Translate(
+ SkIntToScalar(fgSubset.left() - foregroundOffset.fX),
+ SkIntToScalar(fgSubset.top() - foregroundOffset.fY));
+ auto fgFP = GrTextureEffect::MakeSubset(std::move(foregroundView), foreground->alphaType(),
+ fgMatrix, sampler, fgSubset, caps);
+ fgFP = GrColorSpaceXformEffect::Make(std::move(fgFP), foreground->getColorSpace(),
+ foreground->alphaType(), ctx.colorSpace(),
+ kPremul_SkAlphaType);
+
+ SkSurfaceProps props{}; // default OK; blend-image filters don't render text
+ GrFPArgs args(rContext, &info.colorInfo(), props);
+
+ fp = as_BB(fBlender)->asFragmentProcessor(std::move(fgFP), std::move(fp), args);
+ }
+
+ auto sfc = rContext->priv().makeSFC(
+ info, "BlendImageFilter_FilterImageGPU", SkBackingFit::kApprox);
+ if (!sfc) {
+ return nullptr;
+ }
+
+ sfc->fillRectToRectWithFP(bounds, SkIRect::MakeSize(bounds.size()), std::move(fp));
+
+ return SkSpecialImage::MakeDeferredFromGpu(rContext,
+ SkIRect::MakeWH(bounds.width(), bounds.height()),
+ kNeedNewImageUniqueID_SpecialImage,
+ sfc->readSurfaceView(),
+ sfc->colorInfo(),
+ ctx.surfaceProps());
+}
+
+#endif
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkBlurImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkBlurImageFilter.cpp
new file mode 100644
index 0000000000..d8c94d7e5d
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkBlurImageFilter.cpp
@@ -0,0 +1,1038 @@
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkTileMode.h"
+#include "include/core/SkTypes.h"
+#include "include/effects/SkImageFilters.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkMalloc.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/base/SkVx.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstdint>
+#include <cstring>
+#include <memory>
+#include <utility>
+
+#if defined(SK_GANESH)
+#include "include/private/gpu/ganesh/GrTypesPriv.h"
+#include "src/core/SkGpuBlurUtils.h"
+#include "src/gpu/ganesh/GrSurfaceProxyView.h"
+#include "src/gpu/ganesh/SurfaceDrawContext.h"
+#endif // defined(SK_GANESH)
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1
+ #include <xmmintrin.h>
+ #define SK_PREFETCH(ptr) _mm_prefetch(reinterpret_cast<const char*>(ptr), _MM_HINT_T0)
+#elif defined(__GNUC__)
+ #define SK_PREFETCH(ptr) __builtin_prefetch(ptr)
+#else
+ #define SK_PREFETCH(ptr)
+#endif
+
+namespace {
+
+class SkBlurImageFilter final : public SkImageFilter_Base {
+public:
+ SkBlurImageFilter(SkScalar sigmaX, SkScalar sigmaY, SkTileMode tileMode,
+ sk_sp<SkImageFilter> input, const SkRect* cropRect)
+ : INHERITED(&input, 1, cropRect)
+ , fSigma{sigmaX, sigmaY}
+ , fTileMode(tileMode) {}
+
+ SkRect computeFastBounds(const SkRect&) const override;
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+ SkIRect onFilterNodeBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+
+private:
+ friend void ::SkRegisterBlurImageFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkBlurImageFilter)
+
+#if defined(SK_GANESH)
+ sk_sp<SkSpecialImage> gpuFilter(
+ const Context& ctx, SkVector sigma,
+ const sk_sp<SkSpecialImage> &input,
+ SkIRect inputBounds, SkIRect dstBounds, SkIPoint inputOffset, SkIPoint* offset) const;
+#endif
+
+ SkSize fSigma;
+ SkTileMode fTileMode;
+
+ using INHERITED = SkImageFilter_Base;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkImageFilters::Blur(
+ SkScalar sigmaX, SkScalar sigmaY, SkTileMode tileMode, sk_sp<SkImageFilter> input,
+ const CropRect& cropRect) {
+ if (sigmaX < SK_ScalarNearlyZero && sigmaY < SK_ScalarNearlyZero && !cropRect) {
+ return input;
+ }
+ return sk_sp<SkImageFilter>(
+ new SkBlurImageFilter(sigmaX, sigmaY, tileMode, input, cropRect));
+}
+
+void SkRegisterBlurImageFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkBlurImageFilter);
+ SkFlattenable::Register("SkBlurImageFilterImpl", SkBlurImageFilter::CreateProc);
+}
+
+sk_sp<SkFlattenable> SkBlurImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkScalar sigmaX = buffer.readScalar();
+ SkScalar sigmaY = buffer.readScalar();
+ SkTileMode tileMode = buffer.read32LE(SkTileMode::kLastTileMode);
+ return SkImageFilters::Blur(
+ sigmaX, sigmaY, tileMode, common.getInput(0), common.cropRect());
+}
+
+void SkBlurImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeScalar(fSigma.fWidth);
+ buffer.writeScalar(fSigma.fHeight);
+
+ SkASSERT(fTileMode <= SkTileMode::kLastTileMode);
+ buffer.writeInt(static_cast<int>(fTileMode));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+namespace {
+// This is defined by the SVG spec:
+// https://drafts.fxtf.org/filter-effects/#feGaussianBlurElement
+int calculate_window(double sigma) {
+ auto possibleWindow = static_cast<int>(floor(sigma * 3 * sqrt(2 * SK_DoublePI) / 4 + 0.5));
+ return std::max(1, possibleWindow);
+}
+
+// This rather arbitrary-looking value results in a maximum box blur kernel size
+// of 1000 pixels on the raster path, which matches the WebKit and Firefox
+// implementations. Since the GPU path does not compute a box blur, putting
+// the limit on sigma ensures consistent behaviour between the GPU and
+// raster paths.
+static constexpr SkScalar kMaxSigma = 532.f;
+
+static SkVector map_sigma(const SkSize& localSigma, const SkMatrix& ctm) {
+ SkVector sigma = SkVector::Make(localSigma.width(), localSigma.height());
+ ctm.mapVectors(&sigma, 1);
+ sigma.fX = std::min(SkScalarAbs(sigma.fX), kMaxSigma);
+ sigma.fY = std::min(SkScalarAbs(sigma.fY), kMaxSigma);
+ // Disable blurring on axes that were never finite, or became non-finite after mapping by ctm.
+ if (!SkScalarIsFinite(sigma.fX)) {
+ sigma.fX = 0.f;
+ }
+ if (!SkScalarIsFinite(sigma.fY)) {
+ sigma.fY = 0.f;
+ }
+ return sigma;
+}
+
+
+class Pass {
+public:
+ explicit Pass(int border) : fBorder(border) {}
+ virtual ~Pass() = default;
+
+ void blur(int srcLeft, int srcRight, int dstRight,
+ const uint32_t* src, int srcStride,
+ uint32_t* dst, int dstStride) {
+ this->startBlur();
+
+ auto srcStart = srcLeft - fBorder,
+ srcEnd = srcRight - fBorder,
+ dstEnd = dstRight,
+ srcIdx = srcStart,
+ dstIdx = 0;
+
+ const uint32_t* srcCursor = src;
+ uint32_t* dstCursor = dst;
+
+ if (dstIdx < srcIdx) {
+ // The destination pixels are not effected by the src pixels,
+ // change to zero as per the spec.
+ // https://drafts.fxtf.org/filter-effects/#FilterPrimitivesOverviewIntro
+ while (dstIdx < srcIdx) {
+ *dstCursor = 0;
+ dstCursor += dstStride;
+ SK_PREFETCH(dstCursor);
+ dstIdx++;
+ }
+ } else if (srcIdx < dstIdx) {
+ // The edge of the source is before the edge of the destination. Calculate the sums for
+ // the pixels before the start of the destination.
+ if (int commonEnd = std::min(dstIdx, srcEnd); srcIdx < commonEnd) {
+ // Preload the blur with values from src before dst is entered.
+ int n = commonEnd - srcIdx;
+ this->blurSegment(n, srcCursor, srcStride, nullptr, 0);
+ srcIdx += n;
+ srcCursor += n * srcStride;
+ }
+ if (srcIdx < dstIdx) {
+ // The weird case where src is out of pixels before dst is even started.
+ int n = dstIdx - srcIdx;
+ this->blurSegment(n, nullptr, 0, nullptr, 0);
+ srcIdx += n;
+ }
+ }
+
+ // Both srcIdx and dstIdx are in sync now, and can run in a 1:1 fashion. This is the
+ // normal mode of operation.
+ SkASSERT(srcIdx == dstIdx);
+ if (int commonEnd = std::min(dstEnd, srcEnd); dstIdx < commonEnd) {
+ int n = commonEnd - dstIdx;
+ this->blurSegment(n, srcCursor, srcStride, dstCursor, dstStride);
+ srcCursor += n * srcStride;
+ dstCursor += n * dstStride;
+ dstIdx += n;
+ srcIdx += n;
+ }
+
+ // Drain the remaining blur values into dst assuming 0's for the leading edge.
+ if (dstIdx < dstEnd) {
+ int n = dstEnd - dstIdx;
+ this->blurSegment(n, nullptr, 0, dstCursor, dstStride);
+ }
+ }
+
+protected:
+ virtual void startBlur() = 0;
+ virtual void blurSegment(
+ int n, const uint32_t* src, int srcStride, uint32_t* dst, int dstStride) = 0;
+
+private:
+ const int fBorder;
+};
+
+class PassMaker {
+public:
+ explicit PassMaker(int window) : fWindow{window} {}
+ virtual ~PassMaker() = default;
+ virtual Pass* makePass(void* buffer, SkArenaAlloc* alloc) const = 0;
+ virtual size_t bufferSizeBytes() const = 0;
+ int window() const {return fWindow;}
+
+private:
+ const int fWindow;
+};
+
+// Implement a scanline processor that uses a three-box filter to approximate a Gaussian blur.
+// The GaussPass is limit to processing sigmas < 135.
+class GaussPass final : public Pass {
+public:
+ // NB 136 is the largest sigma that will not cause a buffer full of 255 mask values to overflow
+ // using the Gauss filter. It also limits the size of buffers used hold intermediate values.
+ // Explanation of maximums:
+ // sum0 = window * 255
+ // sum1 = window * sum0 -> window * window * 255
+ // sum2 = window * sum1 -> window * window * window * 255 -> window^3 * 255
+ //
+ // The value window^3 * 255 must fit in a uint32_t. So,
+ // window^3 < 2^32. window = 255.
+ //
+ // window = floor(sigma * 3 * sqrt(2 * kPi) / 4 + 0.5)
+ // For window <= 255, the largest value for sigma is 136.
+ static PassMaker* MakeMaker(double sigma, SkArenaAlloc* alloc) {
+ SkASSERT(0 <= sigma);
+ int window = calculate_window(sigma);
+ if (255 <= window) {
+ return nullptr;
+ }
+
+ class Maker : public PassMaker {
+ public:
+ explicit Maker(int window) : PassMaker{window} {}
+ Pass* makePass(void* buffer, SkArenaAlloc* alloc) const override {
+ return GaussPass::Make(this->window(), buffer, alloc);
+ }
+
+ size_t bufferSizeBytes() const override {
+ int window = this->window();
+ size_t onePassSize = window - 1;
+ // If the window is odd, then there is an obvious middle element. For even sizes
+ // 2 passes are shifted, and the last pass has an extra element. Like this:
+ // S
+ // aaaAaa
+ // bbBbbb
+ // cccCccc
+ // D
+ size_t bufferCount = (window & 1) == 1 ? 3 * onePassSize : 3 * onePassSize + 1;
+ return bufferCount * sizeof(skvx::Vec<4, uint32_t>);
+ }
+ };
+
+ return alloc->make<Maker>(window);
+ }
+
+ static GaussPass* Make(int window, void* buffers, SkArenaAlloc* alloc) {
+ // We don't need to store the trailing edge pixel in the buffer;
+ int passSize = window - 1;
+ skvx::Vec<4, uint32_t>* buffer0 = static_cast<skvx::Vec<4, uint32_t>*>(buffers);
+ skvx::Vec<4, uint32_t>* buffer1 = buffer0 + passSize;
+ skvx::Vec<4, uint32_t>* buffer2 = buffer1 + passSize;
+ // If the window is odd just one buffer is needed, but if it's even, then there is one
+ // more element on that pass.
+ skvx::Vec<4, uint32_t>* buffersEnd = buffer2 + ((window & 1) ? passSize : passSize + 1);
+
+ // Calculating the border is tricky. The border is the distance in pixels between the first
+ // dst pixel and the first src pixel (or the last src pixel and the last dst pixel).
+ // I will go through the odd case which is simpler, and then through the even case. Given a
+ // stack of filters seven wide for the odd case of three passes.
+ //
+ // S
+ // aaaAaaa
+ // bbbBbbb
+ // cccCccc
+ // D
+ //
+ // The furthest changed pixel is when the filters are in the following configuration.
+ //
+ // S
+ // aaaAaaa
+ // bbbBbbb
+ // cccCccc
+ // D
+ //
+ // The A pixel is calculated using the value S, the B uses A, and the C uses B, and
+ // finally D is C. So, with a window size of seven the border is nine. In the odd case, the
+ // border is 3*((window - 1)/2).
+ //
+ // For even cases the filter stack is more complicated. The spec specifies two passes
+ // of even filters and a final pass of odd filters. A stack for a width of six looks like
+ // this.
+ //
+ // S
+ // aaaAaa
+ // bbBbbb
+ // cccCccc
+ // D
+ //
+ // The furthest pixel looks like this.
+ //
+ // S
+ // aaaAaa
+ // bbBbbb
+ // cccCccc
+ // D
+ //
+ // For a window of six, the border value is eight. In the even case the border is 3 *
+ // (window/2) - 1.
+ int border = (window & 1) == 1 ? 3 * ((window - 1) / 2) : 3 * (window / 2) - 1;
+
+ // If the window is odd then the divisor is just window ^ 3 otherwise,
+ // it is window * window * (window + 1) = window ^ 3 + window ^ 2;
+ int window2 = window * window;
+ int window3 = window2 * window;
+ int divisor = (window & 1) == 1 ? window3 : window3 + window2;
+ return alloc->make<GaussPass>(buffer0, buffer1, buffer2, buffersEnd, border, divisor);
+ }
+
+ GaussPass(skvx::Vec<4, uint32_t>* buffer0,
+ skvx::Vec<4, uint32_t>* buffer1,
+ skvx::Vec<4, uint32_t>* buffer2,
+ skvx::Vec<4, uint32_t>* buffersEnd,
+ int border,
+ int divisor)
+ : Pass{border}
+ , fBuffer0{buffer0}
+ , fBuffer1{buffer1}
+ , fBuffer2{buffer2}
+ , fBuffersEnd{buffersEnd}
+ , fDivider(divisor) {}
+
+private:
+ void startBlur() override {
+ skvx::Vec<4, uint32_t> zero = {0u, 0u, 0u, 0u};
+ zero.store(fSum0);
+ zero.store(fSum1);
+ auto half = fDivider.half();
+ skvx::Vec<4, uint32_t>{half, half, half, half}.store(fSum2);
+ sk_bzero(fBuffer0, (fBuffersEnd - fBuffer0) * sizeof(skvx::Vec<4, uint32_t>));
+
+ fBuffer0Cursor = fBuffer0;
+ fBuffer1Cursor = fBuffer1;
+ fBuffer2Cursor = fBuffer2;
+ }
+
+ // GaussPass implements the common three pass box filter approximation of Gaussian blur,
+ // but combines all three passes into a single pass. This approach is facilitated by three
+ // circular buffers the width of the window which track values for trailing edges of each of
+ // the three passes. This allows the algorithm to use more precision in the calculation
+ // because the values are not rounded each pass. And this implementation also avoids a trap
+ // that's easy to fall into resulting in blending in too many zeroes near the edge.
+ //
+ // In general, a window sum has the form:
+ // sum_n+1 = sum_n + leading_edge - trailing_edge.
+ // If instead we do the subtraction at the end of the previous iteration, we can just
+ // calculate the sums instead of having to do the subtractions too.
+ //
+ // In previous iteration:
+ // sum_n+1 = sum_n - trailing_edge.
+ //
+ // In this iteration:
+ // sum_n+1 = sum_n + leading_edge.
+ //
+ // Now we can stack all three sums and do them at once. Sum0 gets its leading edge from the
+ // actual data. Sum1's leading edge is just Sum0, and Sum2's leading edge is Sum1. So, doing the
+ // three passes at the same time has the form:
+ //
+ // sum0_n+1 = sum0_n + leading edge
+ // sum1_n+1 = sum1_n + sum0_n+1
+ // sum2_n+1 = sum2_n + sum1_n+1
+ //
+ // sum2_n+1 / window^3 is the new value of the destination pixel.
+ //
+ // Reduce the sums by the trailing edges which were stored in the circular buffers for the
+ // next go around. This is the case for odd sized windows, even windows the the third
+ // circular buffer is one larger then the first two circular buffers.
+ //
+ // sum2_n+2 = sum2_n+1 - buffer2[i];
+ // buffer2[i] = sum1;
+ // sum1_n+2 = sum1_n+1 - buffer1[i];
+ // buffer1[i] = sum0;
+ // sum0_n+2 = sum0_n+1 - buffer0[i];
+ // buffer0[i] = leading edge
+ void blurSegment(
+ int n, const uint32_t* src, int srcStride, uint32_t* dst, int dstStride) override {
+ skvx::Vec<4, uint32_t>* buffer0Cursor = fBuffer0Cursor;
+ skvx::Vec<4, uint32_t>* buffer1Cursor = fBuffer1Cursor;
+ skvx::Vec<4, uint32_t>* buffer2Cursor = fBuffer2Cursor;
+ skvx::Vec<4, uint32_t> sum0 = skvx::Vec<4, uint32_t>::Load(fSum0);
+ skvx::Vec<4, uint32_t> sum1 = skvx::Vec<4, uint32_t>::Load(fSum1);
+ skvx::Vec<4, uint32_t> sum2 = skvx::Vec<4, uint32_t>::Load(fSum2);
+
+ // Given an expanded input pixel, move the window ahead using the leadingEdge value.
+ auto processValue = [&](const skvx::Vec<4, uint32_t>& leadingEdge) {
+ sum0 += leadingEdge;
+ sum1 += sum0;
+ sum2 += sum1;
+
+ skvx::Vec<4, uint32_t> blurred = fDivider.divide(sum2);
+
+ sum2 -= *buffer2Cursor;
+ *buffer2Cursor = sum1;
+ buffer2Cursor = (buffer2Cursor + 1) < fBuffersEnd ? buffer2Cursor + 1 : fBuffer2;
+ sum1 -= *buffer1Cursor;
+ *buffer1Cursor = sum0;
+ buffer1Cursor = (buffer1Cursor + 1) < fBuffer2 ? buffer1Cursor + 1 : fBuffer1;
+ sum0 -= *buffer0Cursor;
+ *buffer0Cursor = leadingEdge;
+ buffer0Cursor = (buffer0Cursor + 1) < fBuffer1 ? buffer0Cursor + 1 : fBuffer0;
+
+ return skvx::cast<uint8_t>(blurred);
+ };
+
+ auto loadEdge = [&](const uint32_t* srcCursor) {
+ return skvx::cast<uint32_t>(skvx::Vec<4, uint8_t>::Load(srcCursor));
+ };
+
+ if (!src && !dst) {
+ while (n --> 0) {
+ (void)processValue(0);
+ }
+ } else if (src && !dst) {
+ while (n --> 0) {
+ (void)processValue(loadEdge(src));
+ src += srcStride;
+ }
+ } else if (!src && dst) {
+ while (n --> 0) {
+ processValue(0u).store(dst);
+ dst += dstStride;
+ }
+ } else if (src && dst) {
+ while (n --> 0) {
+ processValue(loadEdge(src)).store(dst);
+ src += srcStride;
+ dst += dstStride;
+ }
+ }
+
+ // Store the state
+ fBuffer0Cursor = buffer0Cursor;
+ fBuffer1Cursor = buffer1Cursor;
+ fBuffer2Cursor = buffer2Cursor;
+
+ sum0.store(fSum0);
+ sum1.store(fSum1);
+ sum2.store(fSum2);
+ }
+
+ skvx::Vec<4, uint32_t>* const fBuffer0;
+ skvx::Vec<4, uint32_t>* const fBuffer1;
+ skvx::Vec<4, uint32_t>* const fBuffer2;
+ skvx::Vec<4, uint32_t>* const fBuffersEnd;
+ const skvx::ScaledDividerU32 fDivider;
+
+ // blur state
+ char fSum0[sizeof(skvx::Vec<4, uint32_t>)];
+ char fSum1[sizeof(skvx::Vec<4, uint32_t>)];
+ char fSum2[sizeof(skvx::Vec<4, uint32_t>)];
+ skvx::Vec<4, uint32_t>* fBuffer0Cursor;
+ skvx::Vec<4, uint32_t>* fBuffer1Cursor;
+ skvx::Vec<4, uint32_t>* fBuffer2Cursor;
+};
+
+// Implement a scanline processor that uses a two-box filter to approximate a Tent filter.
+// The TentPass is limit to processing sigmas < 2183.
+class TentPass final : public Pass {
+public:
+ // NB 2183 is the largest sigma that will not cause a buffer full of 255 mask values to overflow
+ // using the Tent filter. It also limits the size of buffers used hold intermediate values.
+ // Explanation of maximums:
+ // sum0 = window * 255
+ // sum1 = window * sum0 -> window * window * 255
+ //
+ // The value window^2 * 255 must fit in a uint32_t. So,
+ // window^2 < 2^32. window = 4104.
+ //
+ // window = floor(sigma * 3 * sqrt(2 * kPi) / 4 + 0.5)
+ // For window <= 4104, the largest value for sigma is 2183.
+ static PassMaker* MakeMaker(double sigma, SkArenaAlloc* alloc) {
+ SkASSERT(0 <= sigma);
+ int gaussianWindow = calculate_window(sigma);
+ // This is a naive method of using the window size for the Gaussian blur to calculate the
+ // window size for the Tent blur. This seems to work well in practice.
+ //
+ // We can use a single pixel to generate the effective blur area given a window size. For
+ // the Gaussian blur this is 3 * window size. For the Tent filter this is 2 * window size.
+ int tentWindow = 3 * gaussianWindow / 2;
+ if (tentWindow >= 4104) {
+ return nullptr;
+ }
+
+ class Maker : public PassMaker {
+ public:
+ explicit Maker(int window) : PassMaker{window} {}
+ Pass* makePass(void* buffer, SkArenaAlloc* alloc) const override {
+ return TentPass::Make(this->window(), buffer, alloc);
+ }
+
+ size_t bufferSizeBytes() const override {
+ size_t onePassSize = this->window() - 1;
+ // If the window is odd, then there is an obvious middle element. For even sizes 2
+ // passes are shifted, and the last pass has an extra element. Like this:
+ // S
+ // aaaAaa
+ // bbBbbb
+ // D
+ size_t bufferCount = 2 * onePassSize;
+ return bufferCount * sizeof(skvx::Vec<4, uint32_t>);
+ }
+ };
+
+ return alloc->make<Maker>(tentWindow);
+ }
+
+ static TentPass* Make(int window, void* buffers, SkArenaAlloc* alloc) {
+ if (window > 4104) {
+ return nullptr;
+ }
+
+ // We don't need to store the trailing edge pixel in the buffer;
+ int passSize = window - 1;
+ skvx::Vec<4, uint32_t>* buffer0 = static_cast<skvx::Vec<4, uint32_t>*>(buffers);
+ skvx::Vec<4, uint32_t>* buffer1 = buffer0 + passSize;
+ skvx::Vec<4, uint32_t>* buffersEnd = buffer1 + passSize;
+
+ // Calculating the border is tricky. The border is the distance in pixels between the first
+ // dst pixel and the first src pixel (or the last src pixel and the last dst pixel).
+ // I will go through the odd case which is simpler, and then through the even case. Given a
+ // stack of filters seven wide for the odd case of three passes.
+ //
+ // S
+ // aaaAaaa
+ // bbbBbbb
+ // D
+ //
+ // The furthest changed pixel is when the filters are in the following configuration.
+ //
+ // S
+ // aaaAaaa
+ // bbbBbbb
+ // D
+ //
+ // The A pixel is calculated using the value S, the B uses A, and the D uses B.
+ // So, with a window size of seven the border is nine. In the odd case, the border is
+ // window - 1.
+ //
+ // For even cases the filter stack is more complicated. It uses two passes
+ // of even filters offset from each other. A stack for a width of six looks like
+ // this.
+ //
+ // S
+ // aaaAaa
+ // bbBbbb
+ // D
+ //
+ // The furthest pixel looks like this.
+ //
+ // S
+ // aaaAaa
+ // bbBbbb
+ // D
+ //
+ // For a window of six, the border value is 5. In the even case the border is
+ // window - 1.
+ int border = window - 1;
+
+ int divisor = window * window;
+ return alloc->make<TentPass>(buffer0, buffer1, buffersEnd, border, divisor);
+ }
+
+ TentPass(skvx::Vec<4, uint32_t>* buffer0,
+ skvx::Vec<4, uint32_t>* buffer1,
+ skvx::Vec<4, uint32_t>* buffersEnd,
+ int border,
+ int divisor)
+ : Pass{border}
+ , fBuffer0{buffer0}
+ , fBuffer1{buffer1}
+ , fBuffersEnd{buffersEnd}
+ , fDivider(divisor) {}
+
+private:
+ void startBlur() override {
+ skvx::Vec<4, uint32_t>{0u, 0u, 0u, 0u}.store(fSum0);
+ auto half = fDivider.half();
+ skvx::Vec<4, uint32_t>{half, half, half, half}.store(fSum1);
+ sk_bzero(fBuffer0, (fBuffersEnd - fBuffer0) * sizeof(skvx::Vec<4, uint32_t>));
+
+ fBuffer0Cursor = fBuffer0;
+ fBuffer1Cursor = fBuffer1;
+ }
+
+ // TentPass implements the common two pass box filter approximation of Tent filter,
+ // but combines all both passes into a single pass. This approach is facilitated by two
+ // circular buffers the width of the window which track values for trailing edges of each of
+ // both passes. This allows the algorithm to use more precision in the calculation
+ // because the values are not rounded each pass. And this implementation also avoids a trap
+ // that's easy to fall into resulting in blending in too many zeroes near the edge.
+ //
+ // In general, a window sum has the form:
+ // sum_n+1 = sum_n + leading_edge - trailing_edge.
+ // If instead we do the subtraction at the end of the previous iteration, we can just
+ // calculate the sums instead of having to do the subtractions too.
+ //
+ // In previous iteration:
+ // sum_n+1 = sum_n - trailing_edge.
+ //
+ // In this iteration:
+ // sum_n+1 = sum_n + leading_edge.
+ //
+ // Now we can stack all three sums and do them at once. Sum0 gets its leading edge from the
+ // actual data. Sum1's leading edge is just Sum0, and Sum2's leading edge is Sum1. So, doing the
+ // three passes at the same time has the form:
+ //
+ // sum0_n+1 = sum0_n + leading edge
+ // sum1_n+1 = sum1_n + sum0_n+1
+ //
+ // sum1_n+1 / window^2 is the new value of the destination pixel.
+ //
+ // Reduce the sums by the trailing edges which were stored in the circular buffers for the
+ // next go around.
+ //
+ // sum1_n+2 = sum1_n+1 - buffer1[i];
+ // buffer1[i] = sum0;
+ // sum0_n+2 = sum0_n+1 - buffer0[i];
+ // buffer0[i] = leading edge
+ void blurSegment(
+ int n, const uint32_t* src, int srcStride, uint32_t* dst, int dstStride) override {
+ skvx::Vec<4, uint32_t>* buffer0Cursor = fBuffer0Cursor;
+ skvx::Vec<4, uint32_t>* buffer1Cursor = fBuffer1Cursor;
+ skvx::Vec<4, uint32_t> sum0 = skvx::Vec<4, uint32_t>::Load(fSum0);
+ skvx::Vec<4, uint32_t> sum1 = skvx::Vec<4, uint32_t>::Load(fSum1);
+
+ // Given an expanded input pixel, move the window ahead using the leadingEdge value.
+ auto processValue = [&](const skvx::Vec<4, uint32_t>& leadingEdge) {
+ sum0 += leadingEdge;
+ sum1 += sum0;
+
+ skvx::Vec<4, uint32_t> blurred = fDivider.divide(sum1);
+
+ sum1 -= *buffer1Cursor;
+ *buffer1Cursor = sum0;
+ buffer1Cursor = (buffer1Cursor + 1) < fBuffersEnd ? buffer1Cursor + 1 : fBuffer1;
+ sum0 -= *buffer0Cursor;
+ *buffer0Cursor = leadingEdge;
+ buffer0Cursor = (buffer0Cursor + 1) < fBuffer1 ? buffer0Cursor + 1 : fBuffer0;
+
+ return skvx::cast<uint8_t>(blurred);
+ };
+
+ auto loadEdge = [&](const uint32_t* srcCursor) {
+ return skvx::cast<uint32_t>(skvx::Vec<4, uint8_t>::Load(srcCursor));
+ };
+
+ if (!src && !dst) {
+ while (n --> 0) {
+ (void)processValue(0);
+ }
+ } else if (src && !dst) {
+ while (n --> 0) {
+ (void)processValue(loadEdge(src));
+ src += srcStride;
+ }
+ } else if (!src && dst) {
+ while (n --> 0) {
+ processValue(0u).store(dst);
+ dst += dstStride;
+ }
+ } else if (src && dst) {
+ while (n --> 0) {
+ processValue(loadEdge(src)).store(dst);
+ src += srcStride;
+ dst += dstStride;
+ }
+ }
+
+ // Store the state
+ fBuffer0Cursor = buffer0Cursor;
+ fBuffer1Cursor = buffer1Cursor;
+ sum0.store(fSum0);
+ sum1.store(fSum1);
+ }
+
+ skvx::Vec<4, uint32_t>* const fBuffer0;
+ skvx::Vec<4, uint32_t>* const fBuffer1;
+ skvx::Vec<4, uint32_t>* const fBuffersEnd;
+ const skvx::ScaledDividerU32 fDivider;
+
+ // blur state
+ char fSum0[sizeof(skvx::Vec<4, uint32_t>)];
+ char fSum1[sizeof(skvx::Vec<4, uint32_t>)];
+ skvx::Vec<4, uint32_t>* fBuffer0Cursor;
+ skvx::Vec<4, uint32_t>* fBuffer1Cursor;
+};
+
+sk_sp<SkSpecialImage> copy_image_with_bounds(
+ const SkImageFilter_Base::Context& ctx, const sk_sp<SkSpecialImage> &input,
+ SkIRect srcBounds, SkIRect dstBounds) {
+ SkBitmap inputBM;
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if (inputBM.colorType() != kN32_SkColorType) {
+ return nullptr;
+ }
+
+ SkBitmap src;
+ inputBM.extractSubset(&src, srcBounds);
+
+ // Make everything relative to the destination bounds.
+ srcBounds.offset(-dstBounds.x(), -dstBounds.y());
+ dstBounds.offset(-dstBounds.x(), -dstBounds.y());
+
+ auto srcW = srcBounds.width(),
+ dstW = dstBounds.width(),
+ dstH = dstBounds.height();
+
+ SkImageInfo dstInfo = SkImageInfo::Make(dstW, dstH, inputBM.colorType(), inputBM.alphaType());
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(dstInfo)) {
+ return nullptr;
+ }
+
+ // There is no blurring to do, but we still need to copy the source while accounting for the
+ // dstBounds. Remember that the src was intersected with the dst.
+ int y = 0;
+ size_t dstWBytes = dstW * sizeof(uint32_t);
+ for (;y < srcBounds.top(); y++) {
+ sk_bzero(dst.getAddr32(0, y), dstWBytes);
+ }
+
+ for (;y < srcBounds.bottom(); y++) {
+ int x = 0;
+ uint32_t* dstPtr = dst.getAddr32(0, y);
+ for (;x < srcBounds.left(); x++) {
+ *dstPtr++ = 0;
+ }
+
+ memcpy(dstPtr, src.getAddr32(x - srcBounds.left(), y - srcBounds.top()),
+ srcW * sizeof(uint32_t));
+
+ dstPtr += srcW;
+ x += srcW;
+
+ for (;x < dstBounds.right(); x++) {
+ *dstPtr++ = 0;
+ }
+ }
+
+ for (;y < dstBounds.bottom(); y++) {
+ sk_bzero(dst.getAddr32(0, y), dstWBytes);
+ }
+
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(dstBounds.width(),
+ dstBounds.height()),
+ dst, ctx.surfaceProps());
+}
+
+// TODO: Implement CPU backend for different fTileMode.
+sk_sp<SkSpecialImage> cpu_blur(
+ const SkImageFilter_Base::Context& ctx,
+ SkVector sigma, const sk_sp<SkSpecialImage> &input,
+ SkIRect srcBounds, SkIRect dstBounds) {
+ // map_sigma limits sigma to 532 to match 1000px box filter limit of WebKit and Firefox.
+ // Since this does not exceed the limits of the TentPass (2183), there won't be overflow when
+ // computing a kernel over a pixel window filled with 255.
+ static_assert(kMaxSigma <= 2183.0f);
+
+ SkSTArenaAlloc<1024> alloc;
+ auto makeMaker = [&](double sigma) -> PassMaker* {
+ SkASSERT(0 <= sigma && sigma <= 2183); // should be guaranteed after map_sigma
+ if (PassMaker* maker = GaussPass::MakeMaker(sigma, &alloc)) {
+ return maker;
+ }
+ if (PassMaker* maker = TentPass::MakeMaker(sigma, &alloc)) {
+ return maker;
+ }
+ SK_ABORT("Sigma is out of range.");
+ };
+
+ PassMaker* makerX = makeMaker(sigma.x());
+ PassMaker* makerY = makeMaker(sigma.y());
+
+ if (makerX->window() <= 1 && makerY->window() <= 1) {
+ return copy_image_with_bounds(ctx, input, srcBounds, dstBounds);
+ }
+
+ SkBitmap inputBM;
+
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if (inputBM.colorType() != kN32_SkColorType) {
+ return nullptr;
+ }
+
+ SkBitmap src;
+ inputBM.extractSubset(&src, srcBounds);
+
+ // Make everything relative to the destination bounds.
+ srcBounds.offset(-dstBounds.x(), -dstBounds.y());
+ dstBounds.offset(-dstBounds.x(), -dstBounds.y());
+
+ auto srcW = srcBounds.width(),
+ srcH = srcBounds.height(),
+ dstW = dstBounds.width(),
+ dstH = dstBounds.height();
+
+ SkImageInfo dstInfo = inputBM.info().makeWH(dstW, dstH);
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(dstInfo)) {
+ return nullptr;
+ }
+
+ size_t bufferSizeBytes = std::max(makerX->bufferSizeBytes(), makerY->bufferSizeBytes());
+ auto buffer = alloc.makeBytesAlignedTo(bufferSizeBytes, alignof(skvx::Vec<4, uint32_t>));
+
+ // Basic Plan: The three cases to handle
+ // * Horizontal and Vertical - blur horizontally while copying values from the source to
+ // the destination. Then, do an in-place vertical blur.
+ // * Horizontal only - blur horizontally copying values from the source to the destination.
+ // * Vertical only - blur vertically copying values from the source to the destination.
+
+ // Default to vertical only blur case. If a horizontal blur is needed, then these values
+ // will be adjusted while doing the horizontal blur.
+ auto intermediateSrc = static_cast<uint32_t *>(src.getPixels());
+ auto intermediateRowBytesAsPixels = src.rowBytesAsPixels();
+ auto intermediateWidth = srcW;
+
+ // Because the border is calculated before the fork of the GPU/CPU path. The border is
+ // the maximum of the two rendering methods. In the case where sigma is zero, then the
+ // src and dst left values are the same. If sigma is small resulting in a window size of
+ // 1, then border calculations add some pixels which will always be zero. Inset the
+ // destination by those zero pixels. This case is very rare.
+ auto intermediateDst = dst.getAddr32(srcBounds.left(), 0);
+
+ // The following code is executed very rarely, I have never seen it in a real web
+ // page. If sigma is small but not zero then shared GPU/CPU border calculation
+ // code adds extra pixels for the border. Just clear everything to clear those pixels.
+ // This solution is overkill, but very simple.
+ if (makerX->window() == 1 || makerY->window() == 1) {
+ dst.eraseColor(0);
+ }
+
+ if (makerX->window() > 1) {
+ Pass* pass = makerX->makePass(buffer, &alloc);
+ // Make int64 to avoid overflow in multiplication below.
+ int64_t shift = srcBounds.top() - dstBounds.top();
+
+ // For the horizontal blur, starts part way down in anticipation of the vertical blur.
+ // For a vertical sigma of zero shift should be zero. But, for small sigma,
+ // shift may be > 0 but the vertical window could be 1.
+ intermediateSrc = static_cast<uint32_t *>(dst.getPixels())
+ + (shift > 0 ? shift * dst.rowBytesAsPixels() : 0);
+ intermediateRowBytesAsPixels = dst.rowBytesAsPixels();
+ intermediateWidth = dstW;
+ intermediateDst = static_cast<uint32_t *>(dst.getPixels());
+
+ const uint32_t* srcCursor = static_cast<uint32_t*>(src.getPixels());
+ uint32_t* dstCursor = intermediateSrc;
+ for (auto y = 0; y < srcH; y++) {
+ pass->blur(srcBounds.left(), srcBounds.right(), dstBounds.right(),
+ srcCursor, 1, dstCursor, 1);
+ srcCursor += src.rowBytesAsPixels();
+ dstCursor += intermediateRowBytesAsPixels;
+ }
+ }
+
+ if (makerY->window() > 1) {
+ Pass* pass = makerY->makePass(buffer, &alloc);
+ const uint32_t* srcCursor = intermediateSrc;
+ uint32_t* dstCursor = intermediateDst;
+ for (auto x = 0; x < intermediateWidth; x++) {
+ pass->blur(srcBounds.top(), srcBounds.bottom(), dstBounds.bottom(),
+ srcCursor, intermediateRowBytesAsPixels,
+ dstCursor, dst.rowBytesAsPixels());
+ srcCursor += 1;
+ dstCursor += 1;
+ }
+ }
+
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(dstBounds.width(),
+ dstBounds.height()),
+ dst, ctx.surfaceProps());
+}
+} // namespace
+
+sk_sp<SkSpecialImage> SkBlurImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+
+ sk_sp<SkSpecialImage> input(this->filterInput(0, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ SkIRect inputBounds = SkIRect::MakeXYWH(inputOffset.fX, inputOffset.fY,
+ input->width(), input->height());
+
+ // Calculate the destination bounds.
+ SkIRect dstBounds;
+ if (!this->applyCropRect(this->mapContext(ctx), inputBounds, &dstBounds)) {
+ return nullptr;
+ }
+ if (!inputBounds.intersect(dstBounds)) {
+ return nullptr;
+ }
+
+ // Save the offset in preparation to make all rectangles relative to the inputOffset.
+ SkIPoint resultOffset = SkIPoint::Make(dstBounds.fLeft, dstBounds.fTop);
+
+ // Make all bounds relative to the inputOffset.
+ inputBounds.offset(-inputOffset);
+ dstBounds.offset(-inputOffset);
+
+ SkVector sigma = map_sigma(fSigma, ctx.ctm());
+ SkASSERT(SkScalarIsFinite(sigma.x()) && sigma.x() >= 0.f && sigma.x() <= kMaxSigma &&
+ SkScalarIsFinite(sigma.y()) && sigma.y() >= 0.f && sigma.y() <= kMaxSigma);
+
+ sk_sp<SkSpecialImage> result;
+#if defined(SK_GANESH)
+ if (ctx.gpuBacked()) {
+ // Ensure the input is in the destination's gamut. This saves us from having to do the
+ // xform during the filter itself.
+ input = ImageToColorSpace(input.get(), ctx.colorType(), ctx.colorSpace(),
+ ctx.surfaceProps());
+ result = this->gpuFilter(ctx, sigma, input, inputBounds, dstBounds, inputOffset,
+ &resultOffset);
+ } else
+#endif
+ {
+ result = cpu_blur(ctx, sigma, input, inputBounds, dstBounds);
+ }
+
+ // Return the resultOffset if the blur succeeded.
+ if (result != nullptr) {
+ *offset = resultOffset;
+ }
+ return result;
+}
+
+#if defined(SK_GANESH)
+sk_sp<SkSpecialImage> SkBlurImageFilter::gpuFilter(
+ const Context& ctx, SkVector sigma, const sk_sp<SkSpecialImage> &input, SkIRect inputBounds,
+ SkIRect dstBounds, SkIPoint inputOffset, SkIPoint* offset) const {
+ if (SkGpuBlurUtils::IsEffectivelyZeroSigma(sigma.x()) &&
+ SkGpuBlurUtils::IsEffectivelyZeroSigma(sigma.y())) {
+ offset->fX = inputBounds.x() + inputOffset.fX;
+ offset->fY = inputBounds.y() + inputOffset.fY;
+ return input->makeSubset(inputBounds);
+ }
+
+ auto context = ctx.getContext();
+
+ GrSurfaceProxyView inputView = input->view(context);
+ if (!inputView.proxy()) {
+ return nullptr;
+ }
+ SkASSERT(inputView.asTextureProxy());
+
+ dstBounds.offset(input->subset().topLeft());
+ inputBounds.offset(input->subset().topLeft());
+ auto sdc = SkGpuBlurUtils::GaussianBlur(
+ context,
+ std::move(inputView),
+ SkColorTypeToGrColorType(input->colorType()),
+ input->alphaType(),
+ ctx.refColorSpace(),
+ dstBounds,
+ inputBounds,
+ sigma.x(),
+ sigma.y(),
+ fTileMode);
+ if (!sdc) {
+ return nullptr;
+ }
+
+ return SkSpecialImage::MakeDeferredFromGpu(context,
+ SkIRect::MakeSize(dstBounds.size()),
+ kNeedNewImageUniqueID_SpecialImage,
+ sdc->readSurfaceView(),
+ sdc->colorInfo(),
+ ctx.surfaceProps());
+}
+#endif
+
+SkRect SkBlurImageFilter::computeFastBounds(const SkRect& src) const {
+ SkRect bounds = this->getInput(0) ? this->getInput(0)->computeFastBounds(src) : src;
+ bounds.outset(fSigma.width() * 3, fSigma.height() * 3);
+ return bounds;
+}
+
+SkIRect SkBlurImageFilter::onFilterNodeBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const {
+ SkVector sigma = map_sigma(fSigma, ctm);
+ return src.makeOutset(SkScalarCeilToInt(sigma.x() * 3), SkScalarCeilToInt(sigma.y() * 3));
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkColorFilterImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkColorFilterImageFilter.cpp
new file mode 100644
index 0000000000..1b8fdf6bfc
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkColorFilterImageFilter.cpp
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/effects/SkImageFilters.h"
+#include "src/core/SkColorFilterBase.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <utility>
+
+namespace {
+
+class SkColorFilterImageFilter final : public SkImageFilter_Base {
+public:
+ SkColorFilterImageFilter(sk_sp<SkColorFilter> cf, sk_sp<SkImageFilter> input,
+ const SkRect* cropRect)
+ : INHERITED(&input, 1, cropRect)
+ , fColorFilter(std::move(cf)) {}
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+ bool onIsColorFilterNode(SkColorFilter**) const override;
+ MatrixCapability onGetCTMCapability() const override { return MatrixCapability::kComplex; }
+ bool onAffectsTransparentBlack() const override;
+
+private:
+ friend void ::SkRegisterColorFilterImageFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkColorFilterImageFilter)
+
+ sk_sp<SkColorFilter> fColorFilter;
+
+ using INHERITED = SkImageFilter_Base;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkImageFilters::ColorFilter(
+ sk_sp<SkColorFilter> cf, sk_sp<SkImageFilter> input, const CropRect& cropRect) {
+ if (!cf) {
+ // The color filter is the identity, but 'cropRect' and 'input' may perform actions in the
+ // image filter graph.
+ const SkRect* crop = cropRect;
+ if (crop) {
+ // Wrap 'input' in an offset filter with (0,0) and the crop rect.
+ // TODO(michaelludwig): Replace this with SkCropImageFilter when that's ready for use.
+ return SkImageFilters::Offset(0.f, 0.f, std::move(input), cropRect);
+ } else {
+ // Just forward 'input' on
+ return input;
+ }
+ }
+
+ SkColorFilter* inputCF;
+ if (input && input->isColorFilterNode(&inputCF)) {
+ // This is an optimization, as it collapses the hierarchy by just combining the two
+ // colorfilters into a single one, which the new imagefilter will wrap.
+ sk_sp<SkColorFilter> newCF = cf->makeComposed(sk_sp<SkColorFilter>(inputCF));
+ if (newCF) {
+ return sk_sp<SkImageFilter>(new SkColorFilterImageFilter(
+ std::move(newCF), sk_ref_sp(input->getInput(0)), cropRect));
+ }
+ }
+
+ return sk_sp<SkImageFilter>(new SkColorFilterImageFilter(
+ std::move(cf), std::move(input), cropRect));
+}
+
+void SkRegisterColorFilterImageFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkColorFilterImageFilter);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkColorFilterImageFilterImpl", SkColorFilterImageFilter::CreateProc);
+}
+
+
+sk_sp<SkFlattenable> SkColorFilterImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ sk_sp<SkColorFilter> cf(buffer.readColorFilter());
+ return SkImageFilters::ColorFilter(std::move(cf), common.getInput(0), common.cropRect());
+}
+
+void SkColorFilterImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeFlattenable(fColorFilter.get());
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkSpecialImage> SkColorFilterImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, ctx, &inputOffset));
+
+ SkIRect inputBounds;
+ if (as_CFB(fColorFilter)->affectsTransparentBlack()) {
+ // If the color filter affects transparent black, the bounds are the entire clip.
+ inputBounds = ctx.clipBounds();
+ } else if (!input) {
+ return nullptr;
+ } else {
+ inputBounds = SkIRect::MakeXYWH(inputOffset.x(), inputOffset.y(),
+ input->width(), input->height());
+ }
+
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, inputBounds, &bounds)) {
+ return nullptr;
+ }
+
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(bounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ SkPaint paint;
+
+ paint.setBlendMode(SkBlendMode::kSrc);
+ paint.setColorFilter(fColorFilter);
+
+ // TODO: it may not be necessary to clear or drawPaint inside the input bounds
+ // (see skbug.com/5075)
+ if (as_CFB(fColorFilter)->affectsTransparentBlack()) {
+ // The subsequent input->draw() call may not fill the entire canvas. For filters which
+ // affect transparent black, ensure that the filter is applied everywhere.
+ paint.setColor(SK_ColorTRANSPARENT);
+ canvas->drawPaint(paint);
+ paint.setColor(SK_ColorBLACK);
+ } else {
+ canvas->clear(0x0);
+ }
+
+ if (input) {
+ input->draw(canvas,
+ SkIntToScalar(inputOffset.fX - bounds.fLeft),
+ SkIntToScalar(inputOffset.fY - bounds.fTop),
+ SkSamplingOptions(), &paint);
+ }
+
+ offset->fX = bounds.fLeft;
+ offset->fY = bounds.fTop;
+ return surf->makeImageSnapshot();
+}
+
+bool SkColorFilterImageFilter::onIsColorFilterNode(SkColorFilter** filter) const {
+ SkASSERT(1 == this->countInputs());
+ if (!this->cropRectIsSet()) {
+ if (filter) {
+ *filter = SkRef(fColorFilter.get());
+ }
+ return true;
+ }
+ return false;
+}
+
+bool SkColorFilterImageFilter::onAffectsTransparentBlack() const {
+ return as_CFB(fColorFilter)->affectsTransparentBlack();
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkComposeImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkComposeImageFilter.cpp
new file mode 100644
index 0000000000..b5692071f3
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkComposeImageFilter.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/effects/SkImageFilters.h"
+#include "src/core/SkImageFilterTypes.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkSpecialImage.h"
+
+#include <utility>
+
+class SkReadBuffer;
+
+namespace {
+
+class SkComposeImageFilter final : public SkImageFilter_Base {
+public:
+ explicit SkComposeImageFilter(sk_sp<SkImageFilter> inputs[2])
+ : INHERITED(inputs, 2, nullptr) {
+ SkASSERT(inputs[0].get());
+ SkASSERT(inputs[1].get());
+ }
+
+ SkRect computeFastBounds(const SkRect& src) const override;
+
+protected:
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+ SkIRect onFilterBounds(const SkIRect&, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+ MatrixCapability onGetCTMCapability() const override { return MatrixCapability::kComplex; }
+
+private:
+ friend void ::SkRegisterComposeImageFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkComposeImageFilter)
+
+ using INHERITED = SkImageFilter_Base;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkImageFilters::Compose(sk_sp<SkImageFilter> outer,
+ sk_sp<SkImageFilter> inner) {
+ if (!outer) {
+ return inner;
+ }
+ if (!inner) {
+ return outer;
+ }
+ sk_sp<SkImageFilter> inputs[2] = { std::move(outer), std::move(inner) };
+ return sk_sp<SkImageFilter>(new SkComposeImageFilter(inputs));
+}
+
+void SkRegisterComposeImageFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkComposeImageFilter);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkComposeImageFilterImpl", SkComposeImageFilter::CreateProc);
+}
+
+sk_sp<SkFlattenable> SkComposeImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 2);
+ return SkImageFilters::Compose(common.getInput(0), common.getInput(1));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkRect SkComposeImageFilter::computeFastBounds(const SkRect& src) const {
+ const SkImageFilter* outer = this->getInput(0);
+ const SkImageFilter* inner = this->getInput(1);
+
+ return outer->computeFastBounds(inner->computeFastBounds(src));
+}
+
+sk_sp<SkSpecialImage> SkComposeImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ // The bounds passed to the inner filter must be filtered by the outer
+ // filter, so that the inner filter produces the pixels that the outer
+ // filter requires as input. This matters if the outer filter moves pixels.
+ SkIRect innerClipBounds;
+ innerClipBounds = this->getInput(0)->filterBounds(ctx.clipBounds(), ctx.ctm(),
+ kReverse_MapDirection, &ctx.clipBounds());
+ Context innerContext = ctx.withNewDesiredOutput(skif::LayerSpace<SkIRect>(innerClipBounds));
+ SkIPoint innerOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> inner(this->filterInput(1, innerContext, &innerOffset));
+ if (!inner) {
+ return nullptr;
+ }
+
+ // TODO (michaelludwig) - Once all filters are updated to process coordinate spaces more
+ // robustly, we can allow source images to have non-(0,0) origins, which will mean that the
+ // CTM/clipBounds modifications for the outerContext can go away.
+ SkMatrix outerMatrix(ctx.ctm());
+ outerMatrix.postTranslate(SkIntToScalar(-innerOffset.x()), SkIntToScalar(-innerOffset.y()));
+ SkIRect clipBounds = ctx.clipBounds();
+ clipBounds.offset(-innerOffset.x(), -innerOffset.y());
+ // NOTE: This is the only spot in image filtering where the source image of the context
+ // is not constant for the entire DAG evaluation. Given that the inner and outer DAG branches
+ // were already created, there's no alternative way for the leaf nodes of the outer DAG to
+ // get the results of the inner DAG. Overriding the source image of the context has the correct
+ // effect, but means that the source image is not fixed for the entire filter process.
+ Context outerContext(outerMatrix, clipBounds, ctx.cache(), ctx.colorType(), ctx.colorSpace(),
+ inner.get());
+
+ SkIPoint outerOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> outer(this->filterInput(0, outerContext, &outerOffset));
+ if (!outer) {
+ return nullptr;
+ }
+
+ *offset = innerOffset + outerOffset;
+ return outer;
+}
+
+SkIRect SkComposeImageFilter::onFilterBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection dir, const SkIRect* inputRect) const {
+ const SkImageFilter* outer = this->getInput(0);
+ const SkImageFilter* inner = this->getInput(1);
+
+ if (dir == kReverse_MapDirection) {
+ // The output 'src' is processed by the outer filter, producing its required input bounds,
+ // which is then the output bounds required of the inner filter. We pass the inputRect to
+ // outer and not inner to match the default recursion logic of onGetInputLayerBounds
+ const SkIRect outerRect = outer->filterBounds(src, ctm, dir, inputRect);
+ return inner->filterBounds(outerRect, ctm, dir);
+ } else {
+ // The input 'src' is processed by the inner filter, producing the input bounds for the
+ // outer filter of the composition, which then produces the final forward output bounds
+ const SkIRect innerRect = inner->filterBounds(src, ctm, dir);
+ return outer->filterBounds(innerRect, ctm, dir);
+ }
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkCropImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkCropImageFilter.cpp
new file mode 100644
index 0000000000..c4ad58d6ee
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkCropImageFilter.cpp
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/effects/imagefilters/SkCropImageFilter.h"
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkRect.h"
+#include "src/core/SkImageFilterTypes.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkValidationUtils.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <utility>
+
+namespace {
+
+class SkCropImageFilter final : public SkImageFilter_Base {
+public:
+ SkCropImageFilter(const SkRect& cropRect, sk_sp<SkImageFilter> input)
+ : SkImageFilter_Base(&input, 1, /*cropRect=*/nullptr)
+ , fCropRect(cropRect) {
+ SkASSERT(cropRect.isFinite());
+ SkASSERT(cropRect.isSorted());
+ }
+
+ SkRect computeFastBounds(const SkRect& bounds) const override;
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ friend void ::SkRegisterCropImageFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkCropImageFilter)
+
+ skif::FilterResult onFilterImage(const skif::Context& context) const override;
+
+ skif::LayerSpace<SkIRect> onGetInputLayerBounds(
+ const skif::Mapping& mapping,
+ const skif::LayerSpace<SkIRect>& desiredOutput,
+ const skif::LayerSpace<SkIRect>& contentBounds,
+ VisitChildren recurse) const override;
+
+ skif::LayerSpace<SkIRect> onGetOutputLayerBounds(
+ const skif::Mapping& mapping,
+ const skif::LayerSpace<SkIRect>& contentBounds) const override;
+
+ // The crop rect is specified in floating point to allow cropping to partial local pixels,
+ // that could become whole pixels in the layer-space image if the canvas is scaled.
+ // For now it's always rounded to integer pixels as if it were non-AA.
+ skif::LayerSpace<SkIRect> cropRect(const skif::Mapping& mapping) const {
+ return mapping.paramToLayer(fCropRect).roundOut();
+ }
+
+ skif::ParameterSpace<SkRect> fCropRect;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkMakeCropImageFilter(const SkRect& rect, sk_sp<SkImageFilter> input) {
+ if (!rect.isFinite()) {
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkCropImageFilter(rect, std::move(input)));
+}
+
+void SkRegisterCropImageFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkCropImageFilter);
+}
+
+sk_sp<SkFlattenable> SkCropImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkRect cropRect = buffer.readRect();
+ if (!buffer.isValid() || !buffer.validate(SkIsValidRect(cropRect))) {
+ return nullptr;
+ }
+ return SkMakeCropImageFilter(cropRect, common.getInput(0));
+}
+
+void SkCropImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->SkImageFilter_Base::flatten(buffer);
+ buffer.writeRect(SkRect(fCropRect));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+skif::FilterResult SkCropImageFilter::onFilterImage(const skif::Context& context) const {
+ skif::LayerSpace<SkIRect> cropBounds = this->cropRect(context.mapping());
+ if (cropBounds.isEmpty()) {
+ // Don't bother evaluating the input filter if the crop wouldn't show anything
+ return {};
+ }
+
+ skif::FilterResult childOutput = this->filterInput(0, context);
+ // While filterInput() adjusts the context passed to our child filter to account for the
+ // crop rect and desired output, 'childOutput' does not necessarily fit that exactly. Calling
+ // applyCrop() ensures this is true, optimally avoiding rendering a new image if possible.
+ return childOutput.applyCrop(context, cropBounds);
+}
+
+// TODO(michaelludwig) - onGetInputLayerBounds() and onGetOutputLayerBounds() are tightly coupled
+// to both each other's behavior and to onFilterImage(). If onFilterImage() had a concept of a
+// dry-run (e.g. FilterResult had null images but tracked the bounds the images would be) then
+// onGetInputLayerBounds() is the union of all requested inputs at the leaf nodes of the DAG, and
+// onGetOutputLayerBounds() is the bounds of the dry-run result. This might have more overhead, but
+// would reduce the complexity of implementations by quite a bit.
+skif::LayerSpace<SkIRect> SkCropImageFilter::onGetInputLayerBounds(
+ const skif::Mapping& mapping,
+ const skif::LayerSpace<SkIRect>& desiredOutput,
+ const skif::LayerSpace<SkIRect>& contentBounds,
+ VisitChildren recurse) const {
+ // Assuming unbounded desired output, this filter only needs to process an image that's at most
+ // sized to our crop rect.
+ skif::LayerSpace<SkIRect> requiredInput = this->cropRect(mapping);
+ // But we can restrict the crop rect to just what's requested, since anything beyond that won't
+ // be rendered.
+ if (!requiredInput.intersect(desiredOutput)) {
+ // We wouldn't draw anything when filtering, so return empty bounds now to skip a layer.
+ return skif::LayerSpace<SkIRect>::Empty();
+ }
+
+ if (recurse == VisitChildren::kNo) {
+ return requiredInput;
+ } else {
+ // Our required input is the desired output for our child image filter.
+ return this->visitInputLayerBounds(mapping, requiredInput, contentBounds);
+ }
+}
+
+skif::LayerSpace<SkIRect> SkCropImageFilter::onGetOutputLayerBounds(
+ const skif::Mapping& mapping,
+ const skif::LayerSpace<SkIRect>& contentBounds) const {
+ // Assuming unbounded child content, our output is a decal-tiled image sized to our crop rect.
+ skif::LayerSpace<SkIRect> output = this->cropRect(mapping);
+ // But the child output image is drawn into our output surface with its own decal tiling, which
+ // may allow the output dimensions to be reduced.
+ skif::LayerSpace<SkIRect> childOutput = this->visitOutputLayerBounds(mapping, contentBounds);
+
+ if (output.intersect(childOutput)) {
+ return output;
+ } else {
+ // Nothing would be drawn into our crop rect, so nothing would be output.
+ return skif::LayerSpace<SkIRect>::Empty();
+ }
+}
+
+SkRect SkCropImageFilter::computeFastBounds(const SkRect& bounds) const {
+ // TODO(michaelludwig) - This is conceptually very similar to calling onGetOutputLayerBounds()
+ // with an identity skif::Mapping (hence why fCropRect can be used directly), but it also does
+ // not involve any rounding to pixels for both the content bounds or the output.
+ // FIXME(michaelludwig) - There is a limitation in the current system for "fast bounds", since
+ // there's no way for the crop image filter to hide the fact that a child affects transparent
+ // black, so the entire DAG still is treated as if it cannot compute fast bounds. If we migrate
+ // getOutputLayerBounds() to operate on float rects, and to report infinite bounds for
+ // nodes that affect transparent black, then fastBounds() and onAffectsTransparentBlack() impls
+ // can go away entirely. That's not feasible until everything else is migrated onto the new crop
+ // rect filter and the new APIs.
+ if (this->getInput(0) && !this->getInput(0)->canComputeFastBounds()) {
+ // The input bounds to the crop are effectively infinite so the output fills the crop rect.
+ return SkRect(fCropRect);
+ }
+
+ SkRect inputBounds = this->getInput(0) ? this->getInput(0)->computeFastBounds(bounds) : bounds;
+ if (!inputBounds.intersect(SkRect(fCropRect))) {
+ return SkRect::MakeEmpty();
+ }
+ return inputBounds;
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkCropImageFilter.h b/gfx/skia/skia/src/effects/imagefilters/SkCropImageFilter.h
new file mode 100644
index 0000000000..8901a677b2
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkCropImageFilter.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCropImageFilter_DEFINED
+#define SkCropImageFilter_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+class SkImageFilter;
+struct SkRect;
+
+// TODO (michaelludwig): Move to SkImageFilters::Crop when ready to expose to the public
+SK_API sk_sp<SkImageFilter> SkMakeCropImageFilter(const SkRect& rect, sk_sp<SkImageFilter> input);
+
+#endif
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkDisplacementMapImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkDisplacementMapImageFilter.cpp
new file mode 100644
index 0000000000..5a8d10e816
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkDisplacementMapImageFilter.cpp
@@ -0,0 +1,600 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/core/SkUnPreMultiply.h"
+#include "include/effects/SkImageFilters.h"
+#include "include/private/SkSLSampleUsage.h"
+#include "include/private/base/SkSafe32.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <cstdint>
+#include <memory>
+#include <utility>
+
+#if defined(SK_GANESH)
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/GrRecordingContext.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/gpu/ganesh/GrTypesPriv.h"
+#include "src/core/SkSLTypeShared.h"
+#include "src/gpu/KeyBuilder.h"
+#include "src/gpu/SkBackingFit.h"
+#include "src/gpu/ganesh/GrCaps.h"
+#include "src/gpu/ganesh/GrColorSpaceXform.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/GrImageInfo.h"
+#include "src/gpu/ganesh/GrProcessorUnitTest.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/GrSamplerState.h"
+#include "src/gpu/ganesh/GrSurfaceProxy.h"
+#include "src/gpu/ganesh/GrSurfaceProxyView.h"
+#include "src/gpu/ganesh/SurfaceFillContext.h"
+#include "src/gpu/ganesh/effects/GrTextureEffect.h"
+#include "src/gpu/ganesh/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/ganesh/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/ganesh/glsl/GrGLSLUniformHandler.h"
+
+struct GrShaderCaps;
+#endif
+
+#if GR_TEST_UTILS
+#include "src/base/SkRandom.h"
+#endif
+
+namespace {
+
+class SkDisplacementMapImageFilter final : public SkImageFilter_Base {
+public:
+ SkDisplacementMapImageFilter(SkColorChannel xChannelSelector, SkColorChannel yChannelSelector,
+ SkScalar scale, sk_sp<SkImageFilter> inputs[2],
+ const SkRect* cropRect)
+ : INHERITED(inputs, 2, cropRect)
+ , fXChannelSelector(xChannelSelector)
+ , fYChannelSelector(yChannelSelector)
+ , fScale(scale) {}
+
+ SkRect computeFastBounds(const SkRect& src) const override;
+
+ SkIRect onFilterBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+ SkIRect onFilterNodeBounds(const SkIRect&, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+
+protected:
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ friend void ::SkRegisterDisplacementMapImageFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkDisplacementMapImageFilter)
+
+ SkColorChannel fXChannelSelector;
+ SkColorChannel fYChannelSelector;
+ SkScalar fScale;
+
+ const SkImageFilter* getDisplacementInput() const { return getInput(0); }
+ const SkImageFilter* getColorInput() const { return getInput(1); }
+
+ using INHERITED = SkImageFilter_Base;
+};
+
+// Shift values to extract channels from an SkColor (SkColorGetR, SkColorGetG, etc)
+const uint8_t gChannelTypeToShift[] = {
+ 16, // R
+ 8, // G
+ 0, // B
+ 24, // A
+};
+struct Extractor {
+ Extractor(SkColorChannel typeX,
+ SkColorChannel typeY)
+ : fShiftX(gChannelTypeToShift[static_cast<int>(typeX)])
+ , fShiftY(gChannelTypeToShift[static_cast<int>(typeY)])
+ {}
+
+ unsigned fShiftX, fShiftY;
+
+ unsigned getX(SkColor c) const { return (c >> fShiftX) & 0xFF; }
+ unsigned getY(SkColor c) const { return (c >> fShiftY) & 0xFF; }
+};
+
+static bool channel_selector_type_is_valid(SkColorChannel cst) {
+ switch (cst) {
+ case SkColorChannel::kR:
+ case SkColorChannel::kG:
+ case SkColorChannel::kB:
+ case SkColorChannel::kA:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+} // anonymous namespace
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImageFilter> SkImageFilters::DisplacementMap(
+ SkColorChannel xChannelSelector, SkColorChannel yChannelSelector, SkScalar scale,
+ sk_sp<SkImageFilter> displacement, sk_sp<SkImageFilter> color, const CropRect& cropRect) {
+ if (!channel_selector_type_is_valid(xChannelSelector) ||
+ !channel_selector_type_is_valid(yChannelSelector)) {
+ return nullptr;
+ }
+
+ sk_sp<SkImageFilter> inputs[2] = { std::move(displacement), std::move(color) };
+ return sk_sp<SkImageFilter>(new SkDisplacementMapImageFilter(xChannelSelector, yChannelSelector,
+ scale, inputs, cropRect));
+}
+
+void SkRegisterDisplacementMapImageFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkDisplacementMapImageFilter);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkDisplacementMapEffect", SkDisplacementMapImageFilter::CreateProc);
+ SkFlattenable::Register("SkDisplacementMapEffectImpl",
+ SkDisplacementMapImageFilter::CreateProc);
+}
+
+sk_sp<SkFlattenable> SkDisplacementMapImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 2);
+
+ SkColorChannel xsel = buffer.read32LE(SkColorChannel::kLastEnum);
+ SkColorChannel ysel = buffer.read32LE(SkColorChannel::kLastEnum);
+ SkScalar scale = buffer.readScalar();
+
+ return SkImageFilters::DisplacementMap(xsel, ysel, scale, common.getInput(0),
+ common.getInput(1), common.cropRect());
+}
+
+void SkDisplacementMapImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeInt((int) fXChannelSelector);
+ buffer.writeInt((int) fYChannelSelector);
+ buffer.writeScalar(fScale);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if defined(SK_GANESH)
+
+namespace {
+
+class GrDisplacementMapEffect : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(SkColorChannel xChannelSelector,
+ SkColorChannel yChannelSelector,
+ SkVector scale,
+ GrSurfaceProxyView displacement,
+ const SkIRect& displSubset,
+ const SkMatrix& offsetMatrix,
+ GrSurfaceProxyView color,
+ const SkIRect& colorSubset,
+ const GrCaps&);
+
+ ~GrDisplacementMapEffect() override;
+
+ const char* name() const override { return "DisplacementMap"; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override;
+
+private:
+ class Impl;
+
+ explicit GrDisplacementMapEffect(const GrDisplacementMapEffect&);
+
+ std::unique_ptr<ProgramImpl> onMakeProgramImpl() const override;
+
+ void onAddToKey(const GrShaderCaps&, skgpu::KeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ GrDisplacementMapEffect(SkColorChannel xChannelSelector,
+ SkColorChannel yChannelSelector,
+ const SkVector& scale,
+ std::unique_ptr<GrFragmentProcessor> displacement,
+ std::unique_ptr<GrFragmentProcessor> color);
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+
+ SkColorChannel fXChannelSelector;
+ SkColorChannel fYChannelSelector;
+ SkVector fScale;
+
+ using INHERITED = GrFragmentProcessor;
+};
+
+} // anonymous namespace
+#endif
+
+static void compute_displacement(Extractor ex, const SkVector& scale, SkBitmap* dst,
+ const SkBitmap& displ, const SkIPoint& offset,
+ const SkBitmap& src,
+ const SkIRect& bounds) {
+ static const SkScalar Inv8bit = SkScalarInvert(255);
+ const int srcW = src.width();
+ const int srcH = src.height();
+ const SkVector scaleForColor = SkVector::Make(scale.fX * Inv8bit, scale.fY * Inv8bit);
+ const SkVector scaleAdj = SkVector::Make(SK_ScalarHalf - scale.fX * SK_ScalarHalf,
+ SK_ScalarHalf - scale.fY * SK_ScalarHalf);
+ SkPMColor* dstPtr = dst->getAddr32(0, 0);
+ for (int y = bounds.top(); y < bounds.bottom(); ++y) {
+ const SkPMColor* displPtr = displ.getAddr32(bounds.left() + offset.fX, y + offset.fY);
+ for (int x = bounds.left(); x < bounds.right(); ++x, ++displPtr) {
+ SkColor c = SkUnPreMultiply::PMColorToColor(*displPtr);
+
+ SkScalar displX = scaleForColor.fX * ex.getX(c) + scaleAdj.fX;
+ SkScalar displY = scaleForColor.fY * ex.getY(c) + scaleAdj.fY;
+ // Truncate the displacement values
+ const int32_t srcX = Sk32_sat_add(x, SkScalarTruncToInt(displX));
+ const int32_t srcY = Sk32_sat_add(y, SkScalarTruncToInt(displY));
+ *dstPtr++ = ((srcX < 0) || (srcX >= srcW) || (srcY < 0) || (srcY >= srcH)) ?
+ 0 : *(src.getAddr32(srcX, srcY));
+ }
+ }
+}
+
+sk_sp<SkSpecialImage> SkDisplacementMapImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint colorOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> color(this->filterInput(1, ctx, &colorOffset));
+ if (!color) {
+ return nullptr;
+ }
+
+ SkIPoint displOffset = SkIPoint::Make(0, 0);
+ // Creation of the displacement map should happen in a non-colorspace aware context. This
+ // texture is a purely mathematical construct, so we want to just operate on the stored
+ // values. Consider:
+ // User supplies an sRGB displacement map. If we're rendering to a wider gamut, then we could
+ // end up filtering the displacement map into that gamut, which has the effect of reducing
+ // the amount of displacement that it represents (as encoded values move away from the
+ // primaries).
+ // With a more complex DAG attached to this input, it's not clear that working in ANY specific
+ // color space makes sense, so we ignore color spaces (and gamma) entirely. This may not be
+ // ideal, but it's at least consistent and predictable.
+ Context displContext(ctx.mapping(), ctx.desiredOutput(), ctx.cache(),
+ kN32_SkColorType, nullptr, ctx.source());
+ sk_sp<SkSpecialImage> displ(this->filterInput(0, displContext, &displOffset));
+ if (!displ) {
+ return nullptr;
+ }
+
+ const SkIRect srcBounds = SkIRect::MakeXYWH(colorOffset.x(), colorOffset.y(),
+ color->width(), color->height());
+
+ // Both paths do bounds checking on color pixel access, we don't need to
+ // pad the color bitmap to bounds here.
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, srcBounds, &bounds)) {
+ return nullptr;
+ }
+
+ SkIRect displBounds;
+ displ = this->applyCropRectAndPad(ctx, displ.get(), &displOffset, &displBounds);
+ if (!displ) {
+ return nullptr;
+ }
+
+ if (!bounds.intersect(displBounds)) {
+ return nullptr;
+ }
+
+ const SkIRect colorBounds = bounds.makeOffset(-colorOffset);
+ // If the offset overflowed (saturated) then we have to abort, as we need their
+ // dimensions to be equal. See https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=7209
+ if (colorBounds.size() != bounds.size()) {
+ return nullptr;
+ }
+
+ SkVector scale = SkVector::Make(fScale, fScale);
+ ctx.ctm().mapVectors(&scale, 1);
+
+#if defined(SK_GANESH)
+ if (ctx.gpuBacked()) {
+ auto rContext = ctx.getContext();
+
+ GrSurfaceProxyView colorView = color->view(rContext);
+ GrSurfaceProxyView displView = displ->view(rContext);
+ if (!colorView.proxy() || !displView.proxy()) {
+ return nullptr;
+ }
+ const auto isProtected = colorView.proxy()->isProtected();
+
+ SkMatrix offsetMatrix = SkMatrix::Translate(SkIntToScalar(colorOffset.fX - displOffset.fX),
+ SkIntToScalar(colorOffset.fY - displOffset.fY));
+
+ std::unique_ptr<GrFragmentProcessor> fp =
+ GrDisplacementMapEffect::Make(fXChannelSelector,
+ fYChannelSelector,
+ scale,
+ std::move(displView),
+ displ->subset(),
+ offsetMatrix,
+ std::move(colorView),
+ color->subset(),
+ *rContext->priv().caps());
+ fp = GrColorSpaceXformEffect::Make(std::move(fp),
+ color->getColorSpace(), color->alphaType(),
+ ctx.colorSpace(), kPremul_SkAlphaType);
+ GrImageInfo info(ctx.grColorType(),
+ kPremul_SkAlphaType,
+ ctx.refColorSpace(),
+ bounds.size());
+ auto sfc = rContext->priv().makeSFC(info,
+ "DisplacementMapImageFilter_FilterImage",
+ SkBackingFit::kApprox,
+ 1,
+ GrMipmapped::kNo,
+ isProtected,
+ kBottomLeft_GrSurfaceOrigin);
+ if (!sfc) {
+ return nullptr;
+ }
+
+ sfc->fillRectToRectWithFP(colorBounds,
+ SkIRect::MakeSize(colorBounds.size()),
+ std::move(fp));
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ return SkSpecialImage::MakeDeferredFromGpu(rContext,
+ SkIRect::MakeWH(bounds.width(), bounds.height()),
+ kNeedNewImageUniqueID_SpecialImage,
+ sfc->readSurfaceView(),
+ sfc->colorInfo(),
+ ctx.surfaceProps());
+ }
+#endif
+
+ SkBitmap colorBM, displBM;
+
+ if (!color->getROPixels(&colorBM) || !displ->getROPixels(&displBM)) {
+ return nullptr;
+ }
+
+ if ((colorBM.colorType() != kN32_SkColorType) ||
+ (displBM.colorType() != kN32_SkColorType)) {
+ return nullptr;
+ }
+
+ if (!colorBM.getPixels() || !displBM.getPixels()) {
+ return nullptr;
+ }
+
+ SkImageInfo info = SkImageInfo::MakeN32(bounds.width(), bounds.height(),
+ colorBM.alphaType());
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ compute_displacement(Extractor(fXChannelSelector, fYChannelSelector), scale, &dst,
+ displBM, colorOffset - displOffset, colorBM, colorBounds);
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(bounds.width(), bounds.height()),
+ dst, ctx.surfaceProps());
+}
+
+SkRect SkDisplacementMapImageFilter::computeFastBounds(const SkRect& src) const {
+ SkRect bounds = this->getColorInput() ? this->getColorInput()->computeFastBounds(src) : src;
+ bounds.outset(SkScalarAbs(fScale) * SK_ScalarHalf, SkScalarAbs(fScale) * SK_ScalarHalf);
+ return bounds;
+}
+
+SkIRect SkDisplacementMapImageFilter::onFilterNodeBounds(
+ const SkIRect& src, const SkMatrix& ctm, MapDirection, const SkIRect* inputRect) const {
+ SkVector scale = SkVector::Make(fScale, fScale);
+ ctm.mapVectors(&scale, 1);
+ return src.makeOutset(SkScalarCeilToInt(SkScalarAbs(scale.fX) * SK_ScalarHalf),
+ SkScalarCeilToInt(SkScalarAbs(scale.fY) * SK_ScalarHalf));
+}
+
+SkIRect SkDisplacementMapImageFilter::onFilterBounds(
+ const SkIRect& src, const SkMatrix& ctm, MapDirection dir, const SkIRect* inputRect) const {
+ if (kReverse_MapDirection == dir) {
+ return INHERITED::onFilterBounds(src, ctm, dir, inputRect);
+ }
+ // Recurse only into color input.
+ if (this->getColorInput()) {
+ return this->getColorInput()->filterBounds(src, ctm, dir, inputRect);
+ }
+ return src;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if defined(SK_GANESH)
+class GrDisplacementMapEffect::Impl : public ProgramImpl {
+public:
+ void emitCode(EmitArgs&) override;
+
+private:
+ void onSetData(const GrGLSLProgramDataManager&, const GrFragmentProcessor&) override;
+
+ typedef GrGLSLProgramDataManager::UniformHandle UniformHandle;
+
+ UniformHandle fScaleUni;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+std::unique_ptr<GrFragmentProcessor> GrDisplacementMapEffect::Make(SkColorChannel xChannelSelector,
+ SkColorChannel yChannelSelector,
+ SkVector scale,
+ GrSurfaceProxyView displacement,
+ const SkIRect& displSubset,
+ const SkMatrix& offsetMatrix,
+ GrSurfaceProxyView color,
+ const SkIRect& colorSubset,
+ const GrCaps& caps) {
+ static constexpr GrSamplerState kColorSampler(GrSamplerState::WrapMode::kClampToBorder,
+ GrSamplerState::Filter::kNearest);
+ auto colorEffect = GrTextureEffect::MakeSubset(std::move(color),
+ kPremul_SkAlphaType,
+ SkMatrix::Translate(colorSubset.topLeft()),
+ kColorSampler,
+ SkRect::Make(colorSubset),
+ caps);
+
+ auto dispM = SkMatrix::Concat(SkMatrix::Translate(displSubset.topLeft()), offsetMatrix);
+ auto dispEffect = GrTextureEffect::Make(std::move(displacement),
+ kPremul_SkAlphaType,
+ dispM,
+ GrSamplerState::Filter::kNearest);
+
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrDisplacementMapEffect(xChannelSelector,
+ yChannelSelector,
+ scale,
+ std::move(dispEffect),
+ std::move(colorEffect)));
+}
+
+std::unique_ptr<GrFragmentProcessor::ProgramImpl>
+GrDisplacementMapEffect::onMakeProgramImpl() const {
+ return std::make_unique<Impl>();
+}
+
+void GrDisplacementMapEffect::onAddToKey(const GrShaderCaps& caps, skgpu::KeyBuilder* b) const {
+ static constexpr int kChannelSelectorKeyBits = 2; // Max value is 3, so 2 bits are required
+
+ uint32_t xKey = static_cast<uint32_t>(fXChannelSelector);
+ uint32_t yKey = static_cast<uint32_t>(fYChannelSelector) << kChannelSelectorKeyBits;
+
+ b->add32(xKey | yKey);
+}
+
+GrDisplacementMapEffect::GrDisplacementMapEffect(SkColorChannel xChannelSelector,
+ SkColorChannel yChannelSelector,
+ const SkVector& scale,
+ std::unique_ptr<GrFragmentProcessor> displacement,
+ std::unique_ptr<GrFragmentProcessor> color)
+ : INHERITED(kGrDisplacementMapEffect_ClassID, GrFragmentProcessor::kNone_OptimizationFlags)
+ , fXChannelSelector(xChannelSelector)
+ , fYChannelSelector(yChannelSelector)
+ , fScale(scale) {
+ this->registerChild(std::move(displacement));
+ this->registerChild(std::move(color), SkSL::SampleUsage::Explicit());
+ this->setUsesSampleCoordsDirectly();
+}
+
+GrDisplacementMapEffect::GrDisplacementMapEffect(const GrDisplacementMapEffect& that)
+ : INHERITED(that)
+ , fXChannelSelector(that.fXChannelSelector)
+ , fYChannelSelector(that.fYChannelSelector)
+ , fScale(that.fScale) {}
+
+GrDisplacementMapEffect::~GrDisplacementMapEffect() {}
+
+std::unique_ptr<GrFragmentProcessor> GrDisplacementMapEffect::clone() const {
+ return std::unique_ptr<GrFragmentProcessor>(new GrDisplacementMapEffect(*this));
+}
+
+bool GrDisplacementMapEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrDisplacementMapEffect& s = sBase.cast<GrDisplacementMapEffect>();
+ return fXChannelSelector == s.fXChannelSelector &&
+ fYChannelSelector == s.fYChannelSelector &&
+ fScale == s.fScale;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrDisplacementMapEffect)
+
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrDisplacementMapEffect::TestCreate(GrProcessorTestData* d) {
+ auto [dispView, ct1, at1] = d->randomView();
+ auto [colorView, ct2, at2] = d->randomView();
+ static const int kMaxComponent = static_cast<int>(SkColorChannel::kLastEnum);
+ SkColorChannel xChannelSelector =
+ static_cast<SkColorChannel>(d->fRandom->nextRangeU(1, kMaxComponent));
+ SkColorChannel yChannelSelector =
+ static_cast<SkColorChannel>(d->fRandom->nextRangeU(1, kMaxComponent));
+ SkVector scale;
+ scale.fX = d->fRandom->nextRangeScalar(0, 100.0f);
+ scale.fY = d->fRandom->nextRangeScalar(0, 100.0f);
+ SkISize colorDimensions;
+ colorDimensions.fWidth = d->fRandom->nextRangeU(0, colorView.width());
+ colorDimensions.fHeight = d->fRandom->nextRangeU(0, colorView.height());
+ SkIRect dispRect = SkIRect::MakeSize(dispView.dimensions());
+
+ return GrDisplacementMapEffect::Make(xChannelSelector,
+ yChannelSelector,
+ scale,
+ std::move(dispView),
+ dispRect,
+ SkMatrix::I(),
+ std::move(colorView),
+ SkIRect::MakeSize(colorDimensions),
+ *d->caps());
+}
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrDisplacementMapEffect::Impl::emitCode(EmitArgs& args) {
+ const GrDisplacementMapEffect& displacementMap = args.fFp.cast<GrDisplacementMapEffect>();
+
+ fScaleUni = args.fUniformHandler->addUniform(&displacementMap, kFragment_GrShaderFlag,
+ SkSLType::kHalf2, "Scale");
+ const char* scaleUni = args.fUniformHandler->getUniformCStr(fScaleUni);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkString displacementSample = this->invokeChild(/*childIndex=*/0, args);
+ fragBuilder->codeAppendf("half4 dColor = unpremul(%s);", displacementSample.c_str());
+
+ auto chanChar = [](SkColorChannel c) {
+ switch(c) {
+ case SkColorChannel::kR: return 'r';
+ case SkColorChannel::kG: return 'g';
+ case SkColorChannel::kB: return 'b';
+ case SkColorChannel::kA: return 'a';
+ default: SkUNREACHABLE;
+ }
+ };
+ fragBuilder->codeAppendf("float2 cCoords = %s + %s * (dColor.%c%c - half2(0.5));",
+ args.fSampleCoord,
+ scaleUni,
+ chanChar(displacementMap.fXChannelSelector),
+ chanChar(displacementMap.fYChannelSelector));
+
+ SkString colorSample = this->invokeChild(/*childIndex=*/1, args, "cCoords");
+
+ fragBuilder->codeAppendf("return %s;", colorSample.c_str());
+}
+
+void GrDisplacementMapEffect::Impl::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& proc) {
+ const auto& displacementMap = proc.cast<GrDisplacementMapEffect>();
+ pdman.set2f(fScaleUni, displacementMap.fScale.x(), displacementMap.fScale.y());
+}
+#endif
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkDropShadowImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkDropShadowImageFilter.cpp
new file mode 100644
index 0000000000..3f9842017e
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkDropShadowImageFilter.cpp
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/effects/SkImageFilters.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <utility>
+
+namespace {
+
+class SkDropShadowImageFilter final : public SkImageFilter_Base {
+public:
+ SkDropShadowImageFilter(SkScalar dx, SkScalar dy, SkScalar sigmaX, SkScalar sigmaY,
+ SkColor color, bool shadowOnly, sk_sp<SkImageFilter> input,
+ const SkRect* cropRect)
+ : INHERITED(&input, 1, cropRect)
+ , fDx(dx)
+ , fDy(dy)
+ , fSigmaX(sigmaX)
+ , fSigmaY(sigmaY)
+ , fColor(color)
+ , fShadowOnly(shadowOnly) {}
+
+ static sk_sp<SkImageFilter> Make(SkScalar dx, SkScalar dy, SkScalar sigmaX, SkScalar sigmaY,
+ SkColor color, bool shadowOnly, sk_sp<SkImageFilter> input,
+ const SkRect* cropRect) {
+ return sk_sp<SkImageFilter>(new SkDropShadowImageFilter(
+ dx, dy, sigmaX, sigmaY, color, shadowOnly, std::move(input), cropRect));
+ }
+
+ SkRect computeFastBounds(const SkRect&) const override;
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+ SkIRect onFilterNodeBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+
+private:
+ friend void ::SkRegisterDropShadowImageFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkDropShadowImageFilter)
+
+ SkScalar fDx, fDy, fSigmaX, fSigmaY;
+ SkColor fColor;
+ bool fShadowOnly;
+
+ using INHERITED = SkImageFilter_Base;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkImageFilters::DropShadow(
+ SkScalar dx, SkScalar dy, SkScalar sigmaX, SkScalar sigmaY, SkColor color,
+ sk_sp<SkImageFilter> input, const CropRect& cropRect) {
+ return SkDropShadowImageFilter::Make(dx, dy, sigmaX, sigmaY, color, /* shadowOnly */ false,
+ std::move(input), cropRect);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::DropShadowOnly(
+ SkScalar dx, SkScalar dy, SkScalar sigmaX, SkScalar sigmaY, SkColor color,
+ sk_sp<SkImageFilter> input, const CropRect& cropRect) {
+ return SkDropShadowImageFilter::Make(dx, dy, sigmaX, sigmaY, color, /* shadowOnly */ true,
+ std::move(input), cropRect);
+}
+
+void SkRegisterDropShadowImageFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkDropShadowImageFilter);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkDropShadowImageFilterImpl", SkDropShadowImageFilter::CreateProc);
+}
+
+sk_sp<SkFlattenable> SkDropShadowImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkScalar dx = buffer.readScalar();
+ SkScalar dy = buffer.readScalar();
+ SkScalar sigmaX = buffer.readScalar();
+ SkScalar sigmaY = buffer.readScalar();
+ SkColor color = buffer.readColor();
+
+ // For backwards compatibility, the shadow mode had been saved as an enum cast to a 32LE int,
+ // where shadow-and-foreground was 0 and shadow-only was 1. Other than the number of bits, this
+ // is equivalent to the bool that SkDropShadowImageFilter now uses.
+ bool shadowOnly = SkToBool(buffer.read32LE(1));
+ return SkDropShadowImageFilter::Make(dx, dy, sigmaX, sigmaY, color, shadowOnly,
+ common.getInput(0), common.cropRect());
+}
+
+void SkDropShadowImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeScalar(fDx);
+ buffer.writeScalar(fDy);
+ buffer.writeScalar(fSigmaX);
+ buffer.writeScalar(fSigmaY);
+ buffer.writeColor(fColor);
+ // See CreateProc, but we save the bool as an int to match previous enum serialization.
+ buffer.writeInt(fShadowOnly);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkSpecialImage> SkDropShadowImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ const SkIRect inputBounds = SkIRect::MakeXYWH(inputOffset.x(), inputOffset.y(),
+ input->width(), input->height());
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, inputBounds, &bounds)) {
+ return nullptr;
+ }
+
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(bounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ canvas->clear(0x0);
+
+ SkVector sigma = SkVector::Make(fSigmaX, fSigmaY);
+ ctx.ctm().mapVectors(&sigma, 1);
+ sigma.fX = SkScalarAbs(sigma.fX);
+ sigma.fY = SkScalarAbs(sigma.fY);
+
+ SkPaint paint;
+ paint.setAntiAlias(true);
+ paint.setImageFilter(SkImageFilters::Blur(sigma.fX, sigma.fY, nullptr));
+ paint.setColorFilter(SkColorFilters::Blend(fColor, SkBlendMode::kSrcIn));
+
+ SkVector offsetVec = SkVector::Make(fDx, fDy);
+ ctx.ctm().mapVectors(&offsetVec, 1);
+
+ canvas->translate(SkIntToScalar(inputOffset.fX) - SkIntToScalar(bounds.fLeft),
+ SkIntToScalar(inputOffset.fY) - SkIntToScalar(bounds.fTop));
+ input->draw(canvas, offsetVec.fX, offsetVec.fY, SkSamplingOptions(), &paint);
+
+ if (!fShadowOnly) {
+ input->draw(canvas, 0, 0);
+ }
+ offset->fX = bounds.fLeft;
+ offset->fY = bounds.fTop;
+ return surf->makeImageSnapshot();
+}
+
+SkRect SkDropShadowImageFilter::computeFastBounds(const SkRect& src) const {
+ SkRect bounds = this->getInput(0) ? this->getInput(0)->computeFastBounds(src) : src;
+ SkRect shadowBounds = bounds;
+ shadowBounds.offset(fDx, fDy);
+ shadowBounds.outset(fSigmaX * 3, fSigmaY * 3);
+ if (!fShadowOnly) {
+ bounds.join(shadowBounds);
+ } else {
+ bounds = shadowBounds;
+ }
+ return bounds;
+}
+
+SkIRect SkDropShadowImageFilter::onFilterNodeBounds(
+ const SkIRect& src, const SkMatrix& ctm, MapDirection dir, const SkIRect* inputRect) const {
+ SkVector offsetVec = SkVector::Make(fDx, fDy);
+ if (kReverse_MapDirection == dir) {
+ offsetVec.negate();
+ }
+ ctm.mapVectors(&offsetVec, 1);
+ SkIRect dst = src.makeOffset(SkScalarCeilToInt(offsetVec.x()),
+ SkScalarCeilToInt(offsetVec.y()));
+ SkVector sigma = SkVector::Make(fSigmaX, fSigmaY);
+ ctm.mapVectors(&sigma, 1);
+ dst.outset(
+ SkScalarCeilToInt(SkScalarAbs(sigma.x() * 3)),
+ SkScalarCeilToInt(SkScalarAbs(sigma.y() * 3)));
+ if (!fShadowOnly) {
+ dst.join(src);
+ }
+ return dst;
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkImageImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkImageImageFilter.cpp
new file mode 100644
index 0000000000..66ecf66052
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkImageImageFilter.cpp
@@ -0,0 +1,183 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkScalar.h"
+#include "include/effects/SkImageFilters.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkPicturePriv.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSamplingPriv.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <utility>
+
+namespace {
+
+class SkImageImageFilter final : public SkImageFilter_Base {
+public:
+ SkImageImageFilter(sk_sp<SkImage> image, const SkRect& srcRect, const SkRect& dstRect,
+ const SkSamplingOptions& sampling)
+ : INHERITED(nullptr, 0, nullptr)
+ , fImage(std::move(image))
+ , fSrcRect(srcRect)
+ , fDstRect(dstRect)
+ , fSampling(sampling) {}
+
+ SkRect computeFastBounds(const SkRect& src) const override;
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+ SkIRect onFilterNodeBounds(const SkIRect&, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+
+ MatrixCapability onGetCTMCapability() const override { return MatrixCapability::kComplex; }
+
+private:
+ friend void ::SkRegisterImageImageFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkImageImageFilter)
+
+ sk_sp<SkImage> fImage;
+ SkRect fSrcRect, fDstRect;
+ SkSamplingOptions fSampling;
+
+ using INHERITED = SkImageFilter_Base;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkImageFilters::Image(sk_sp<SkImage> image,
+ const SkRect& srcRect,
+ const SkRect& dstRect,
+ const SkSamplingOptions& sampling) {
+ if (!image || srcRect.width() <= 0.0f || srcRect.height() <= 0.0f) {
+ return nullptr;
+ }
+
+ return sk_sp<SkImageFilter>(new SkImageImageFilter(
+ std::move(image), srcRect, dstRect, sampling));
+}
+
+void SkRegisterImageImageFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkImageImageFilter);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkImageSourceImpl", SkImageImageFilter::CreateProc);
+}
+
+sk_sp<SkFlattenable> SkImageImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SkSamplingOptions sampling;
+ if (buffer.isVersionLT(SkPicturePriv::kImageFilterImageSampling_Version)) {
+ sampling = SkSamplingPriv::FromFQ(buffer.checkFilterQuality(), kLinear_SkMediumAs);
+ } else {
+ sampling = buffer.readSampling();
+ }
+
+ SkRect src, dst;
+ buffer.readRect(&src);
+ buffer.readRect(&dst);
+
+ sk_sp<SkImage> image(buffer.readImage());
+ if (!image) {
+ return nullptr;
+ }
+
+ return SkImageFilters::Image(std::move(image), src, dst, sampling);
+}
+
+void SkImageImageFilter::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeSampling(fSampling);
+ buffer.writeRect(fSrcRect);
+ buffer.writeRect(fDstRect);
+ buffer.writeImage(fImage.get());
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkSpecialImage> SkImageImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ const SkRect dstBounds = ctx.ctm().mapRect(fDstRect);
+ const SkIRect dstIBounds = dstBounds.roundOut();
+
+ // Quick check to see if we can return the image directly, which can be done if the transform
+ // ends up being an integer translate and sampling would have no effect on the output.
+ // TODO: This currently means cubic sampling can be skipped, even though it would change results
+ // for integer translation draws.
+ // TODO: This is prone to false negatives due to the floating point math; we could probably
+ // get away with dimensions and translates being epsilon close to integers.
+ const bool passthroughTransform = ctx.ctm().isScaleTranslate() &&
+ ctx.ctm().getScaleX() > 0.f &&
+ ctx.ctm().getScaleY() > 0.f;
+ const bool passthroughSrcOffsets = SkScalarIsInt(fSrcRect.fLeft) &&
+ SkScalarIsInt(fSrcRect.fTop);
+ const bool passthroughDstOffsets = SkScalarIsInt(dstBounds.fLeft) &&
+ SkScalarIsInt(dstBounds.fTop);
+ const bool passthroughDims =
+ SkScalarIsInt(fSrcRect.width()) && fSrcRect.width() == dstBounds.width() &&
+ SkScalarIsInt(fSrcRect.height()) && fSrcRect.height() == dstBounds.height();
+
+ if (passthroughTransform && passthroughSrcOffsets && passthroughDstOffsets && passthroughDims) {
+ // Can pass through fImage directly, applying the dst's location to 'offset'. If fSrcRect
+ // extends outside of the image, we adjust dst to match since those areas would have been
+ // transparent black anyways.
+ SkIRect srcIBounds = fSrcRect.roundOut();
+ SkIPoint srcOffset = srcIBounds.topLeft();
+ if (!srcIBounds.intersect(SkIRect::MakeWH(fImage->width(), fImage->height()))) {
+ return nullptr;
+ }
+
+ *offset = dstIBounds.topLeft() + srcIBounds.topLeft() - srcOffset;
+ return SkSpecialImage::MakeFromImage(ctx.getContext(), srcIBounds, fImage,
+ ctx.surfaceProps());
+ }
+
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(dstIBounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ // Subtract off the integer component of the translation (will be applied in offset, below).
+ canvas->translate(-dstIBounds.fLeft, -dstIBounds.fTop);
+ canvas->concat(ctx.ctm());
+ // TODO(skbug.com/5075): Canvases from GPU special surfaces come with unitialized content
+ canvas->clear(SK_ColorTRANSPARENT);
+ canvas->drawImageRect(fImage.get(), fSrcRect, fDstRect, fSampling, nullptr,
+ SkCanvas::kStrict_SrcRectConstraint);
+
+ *offset = dstIBounds.topLeft();
+ return surf->makeImageSnapshot();
+}
+
+SkRect SkImageImageFilter::computeFastBounds(const SkRect& src) const {
+ return fDstRect;
+}
+
+SkIRect SkImageImageFilter::onFilterNodeBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection direction,
+ const SkIRect* inputRect) const {
+ if (kReverse_MapDirection == direction) {
+ return INHERITED::onFilterNodeBounds(src, ctm, direction, inputRect);
+ }
+
+ SkRect dstRect = fDstRect;
+ ctm.mapRect(&dstRect);
+ return dstRect.roundOut();
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkLightingImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkLightingImageFilter.cpp
new file mode 100644
index 0000000000..990efdb2b7
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkLightingImageFilter.cpp
@@ -0,0 +1,2190 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkColorPriv.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkPoint3.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/effects/SkImageFilters.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkTPin.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <array>
+#include <cstdint>
+#include <memory>
+#include <utility>
+
+#if defined(SK_GANESH)
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/GrRecordingContext.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/SkSLSampleUsage.h"
+#include "include/private/gpu/ganesh/GrTypesPriv.h"
+#include "src/core/SkSLTypeShared.h"
+#include "src/gpu/KeyBuilder.h"
+#include "src/gpu/SkBackingFit.h"
+#include "src/gpu/ganesh/GrCaps.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/GrImageInfo.h"
+#include "src/gpu/ganesh/GrProcessorUnitTest.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/GrSamplerState.h"
+#include "src/gpu/ganesh/GrShaderVar.h"
+#include "src/gpu/ganesh/GrSurfaceProxy.h"
+#include "src/gpu/ganesh/GrSurfaceProxyView.h"
+#include "src/gpu/ganesh/SurfaceFillContext.h"
+#include "src/gpu/ganesh/effects/GrTextureEffect.h"
+#include "src/gpu/ganesh/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/ganesh/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/ganesh/glsl/GrGLSLUniformHandler.h"
+
+struct GrShaderCaps;
+
+// For brevity
+typedef GrGLSLProgramDataManager::UniformHandle UniformHandle;
+#endif
+
+#if GR_TEST_UTILS
+#include "src/base/SkRandom.h"
+#endif
+
+const SkScalar gOneThird = SkIntToScalar(1) / 3;
+const SkScalar gTwoThirds = SkIntToScalar(2) / 3;
+const SkScalar gOneHalf = 0.5f;
+const SkScalar gOneQuarter = 0.25f;
+
+#if defined(SK_GANESH)
+static void setUniformPoint3(const GrGLSLProgramDataManager& pdman, UniformHandle uni,
+ const SkPoint3& point) {
+ static_assert(sizeof(SkPoint3) == 3 * sizeof(float));
+ pdman.set3fv(uni, 1, &point.fX);
+}
+
+static void setUniformNormal3(const GrGLSLProgramDataManager& pdman, UniformHandle uni,
+ const SkPoint3& point) {
+ setUniformPoint3(pdman, uni, point);
+}
+#endif
+
+// Shift matrix components to the left, as we advance pixels to the right.
+static inline void shiftMatrixLeft(int m[9]) {
+ m[0] = m[1];
+ m[3] = m[4];
+ m[6] = m[7];
+ m[1] = m[2];
+ m[4] = m[5];
+ m[7] = m[8];
+}
+
+static inline void fast_normalize(SkPoint3* vector) {
+ // add a tiny bit so we don't have to worry about divide-by-zero
+ SkScalar magSq = vector->dot(*vector) + SK_ScalarNearlyZero;
+#if defined(_MSC_VER) && _MSC_VER >= 1920
+ // Visual Studio 2019 has some kind of code-generation bug in release builds involving the
+ // lighting math in this file. Using the portable rsqrt avoids the issue. This issue appears
+ // to be specific to the collection of (inline) functions in this file that call into this
+ // function, not with sk_float_rsqrt itself.
+ SkScalar scale = sk_float_rsqrt_portable(magSq);
+#else
+ SkScalar scale = sk_float_rsqrt(magSq);
+#endif
+ vector->fX *= scale;
+ vector->fY *= scale;
+ vector->fZ *= scale;
+}
+
+static SkPoint3 read_point3(SkReadBuffer& buffer) {
+ SkPoint3 point;
+ point.fX = buffer.readScalar();
+ point.fY = buffer.readScalar();
+ point.fZ = buffer.readScalar();
+ buffer.validate(SkScalarIsFinite(point.fX) &&
+ SkScalarIsFinite(point.fY) &&
+ SkScalarIsFinite(point.fZ));
+ return point;
+}
+
+static void write_point3(const SkPoint3& point, SkWriteBuffer& buffer) {
+ buffer.writeScalar(point.fX);
+ buffer.writeScalar(point.fY);
+ buffer.writeScalar(point.fZ);
+}
+
+namespace {
+class GpuLight;
+class SkImageFilterLight : public SkRefCnt {
+public:
+ enum LightType {
+ kDistant_LightType,
+ kPoint_LightType,
+ kSpot_LightType,
+
+ kLast_LightType = kSpot_LightType
+ };
+ virtual LightType type() const = 0;
+ const SkPoint3& color() const { return fColor; }
+ virtual std::unique_ptr<GpuLight> createGpuLight() const = 0;
+ virtual bool isEqual(const SkImageFilterLight& other) const {
+ return fColor == other.fColor;
+ }
+ virtual SkImageFilterLight* transform(const SkMatrix& matrix) const = 0;
+
+ // Defined below SkLight's subclasses.
+ void flattenLight(SkWriteBuffer& buffer) const;
+ static SkImageFilterLight* UnflattenLight(SkReadBuffer& buffer);
+
+ virtual SkPoint3 surfaceToLight(int x, int y, int z, SkScalar surfaceScale) const = 0;
+ virtual SkPoint3 lightColor(const SkPoint3& surfaceToLight) const = 0;
+
+protected:
+ SkImageFilterLight(SkColor color) {
+ fColor = SkPoint3::Make(SkIntToScalar(SkColorGetR(color)),
+ SkIntToScalar(SkColorGetG(color)),
+ SkIntToScalar(SkColorGetB(color)));
+ }
+ SkImageFilterLight(const SkPoint3& color) : fColor(color) {}
+
+ SkImageFilterLight(SkReadBuffer& buffer) {
+ fColor = read_point3(buffer);
+ }
+
+ virtual void onFlattenLight(SkWriteBuffer& buffer) const = 0;
+
+
+private:
+ using INHERITED = SkRefCnt;
+ SkPoint3 fColor;
+};
+
+class BaseLightingType {
+public:
+ BaseLightingType() {}
+ virtual ~BaseLightingType() {}
+
+ virtual SkPMColor light(const SkPoint3& normal, const SkPoint3& surfaceTolight,
+ const SkPoint3& lightColor) const= 0;
+};
+
+class DiffuseLightingType : public BaseLightingType {
+public:
+ DiffuseLightingType(SkScalar kd)
+ : fKD(kd) {}
+ SkPMColor light(const SkPoint3& normal, const SkPoint3& surfaceTolight,
+ const SkPoint3& lightColor) const override {
+ SkScalar colorScale = fKD * normal.dot(surfaceTolight);
+ SkPoint3 color = lightColor.makeScale(colorScale);
+ return SkPackARGB32(255,
+ SkTPin(SkScalarRoundToInt(color.fX), 0, 255),
+ SkTPin(SkScalarRoundToInt(color.fY), 0, 255),
+ SkTPin(SkScalarRoundToInt(color.fZ), 0, 255));
+ }
+private:
+ SkScalar fKD;
+};
+
+static SkScalar max_component(const SkPoint3& p) {
+ return p.x() > p.y() ? (p.x() > p.z() ? p.x() : p.z()) : (p.y() > p.z() ? p.y() : p.z());
+}
+
+class SpecularLightingType : public BaseLightingType {
+public:
+ SpecularLightingType(SkScalar ks, SkScalar shininess)
+ : fKS(ks), fShininess(shininess) {}
+ SkPMColor light(const SkPoint3& normal, const SkPoint3& surfaceTolight,
+ const SkPoint3& lightColor) const override {
+ SkPoint3 halfDir(surfaceTolight);
+ halfDir.fZ += SK_Scalar1; // eye position is always (0, 0, 1)
+ fast_normalize(&halfDir);
+ SkScalar colorScale = fKS * SkScalarPow(normal.dot(halfDir), fShininess);
+ SkPoint3 color = lightColor.makeScale(colorScale);
+ return SkPackARGB32(SkTPin(SkScalarRoundToInt(max_component(color)), 0, 255),
+ SkTPin(SkScalarRoundToInt(color.fX), 0, 255),
+ SkTPin(SkScalarRoundToInt(color.fY), 0, 255),
+ SkTPin(SkScalarRoundToInt(color.fZ), 0, 255));
+ }
+private:
+ SkScalar fKS;
+ SkScalar fShininess;
+};
+} // anonymous namespace
+
+static inline SkScalar sobel(int a, int b, int c, int d, int e, int f, SkScalar scale) {
+ return (-a + b - 2 * c + 2 * d -e + f) * scale;
+}
+
+static inline SkPoint3 pointToNormal(SkScalar x, SkScalar y, SkScalar surfaceScale) {
+ SkPoint3 vector = SkPoint3::Make(-x * surfaceScale, -y * surfaceScale, 1);
+ fast_normalize(&vector);
+ return vector;
+}
+
+static inline SkPoint3 topLeftNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel(0, 0, m[4], m[5], m[7], m[8], gTwoThirds),
+ sobel(0, 0, m[4], m[7], m[5], m[8], gTwoThirds),
+ surfaceScale);
+}
+
+static inline SkPoint3 topNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel( 0, 0, m[3], m[5], m[6], m[8], gOneThird),
+ sobel(m[3], m[6], m[4], m[7], m[5], m[8], gOneHalf),
+ surfaceScale);
+}
+
+static inline SkPoint3 topRightNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel( 0, 0, m[3], m[4], m[6], m[7], gTwoThirds),
+ sobel(m[3], m[6], m[4], m[7], 0, 0, gTwoThirds),
+ surfaceScale);
+}
+
+static inline SkPoint3 leftNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel(m[1], m[2], m[4], m[5], m[7], m[8], gOneHalf),
+ sobel( 0, 0, m[1], m[7], m[2], m[8], gOneThird),
+ surfaceScale);
+}
+
+
+static inline SkPoint3 interiorNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel(m[0], m[2], m[3], m[5], m[6], m[8], gOneQuarter),
+ sobel(m[0], m[6], m[1], m[7], m[2], m[8], gOneQuarter),
+ surfaceScale);
+}
+
+static inline SkPoint3 rightNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel(m[0], m[1], m[3], m[4], m[6], m[7], gOneHalf),
+ sobel(m[0], m[6], m[1], m[7], 0, 0, gOneThird),
+ surfaceScale);
+}
+
+static inline SkPoint3 bottomLeftNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel(m[1], m[2], m[4], m[5], 0, 0, gTwoThirds),
+ sobel( 0, 0, m[1], m[4], m[2], m[5], gTwoThirds),
+ surfaceScale);
+}
+
+static inline SkPoint3 bottomNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel(m[0], m[2], m[3], m[5], 0, 0, gOneThird),
+ sobel(m[0], m[3], m[1], m[4], m[2], m[5], gOneHalf),
+ surfaceScale);
+}
+
+static inline SkPoint3 bottomRightNormal(int m[9], SkScalar surfaceScale) {
+ return pointToNormal(sobel(m[0], m[1], m[3], m[4], 0, 0, gTwoThirds),
+ sobel(m[0], m[3], m[1], m[4], 0, 0, gTwoThirds),
+ surfaceScale);
+}
+
+namespace {
+class UncheckedPixelFetcher {
+public:
+ static inline uint32_t Fetch(const SkBitmap& src, int x, int y, const SkIRect& bounds) {
+ return SkGetPackedA32(*src.getAddr32(x, y));
+ }
+};
+
+// The DecalPixelFetcher is used when the destination crop rect exceeds the input bitmap bounds.
+class DecalPixelFetcher {
+public:
+ static inline uint32_t Fetch(const SkBitmap& src, int x, int y, const SkIRect& bounds) {
+ if (x < bounds.fLeft || x >= bounds.fRight || y < bounds.fTop || y >= bounds.fBottom) {
+ return 0;
+ } else {
+ return SkGetPackedA32(*src.getAddr32(x, y));
+ }
+ }
+};
+} // anonymous namespace
+
+template <class PixelFetcher>
+static void lightBitmap(const BaseLightingType& lightingType,
+ const SkImageFilterLight* l,
+ const SkBitmap& src,
+ SkBitmap* dst,
+ SkScalar surfaceScale,
+ const SkIRect& bounds) {
+ SkASSERT(dst->width() == bounds.width() && dst->height() == bounds.height());
+ int left = bounds.left(), right = bounds.right();
+ int bottom = bounds.bottom();
+ int y = bounds.top();
+ SkIRect srcBounds = src.bounds();
+ SkPMColor* dptr = dst->getAddr32(0, 0);
+ {
+ int x = left;
+ int m[9];
+ m[4] = PixelFetcher::Fetch(src, x, y, srcBounds);
+ m[5] = PixelFetcher::Fetch(src, x + 1, y, srcBounds);
+ m[7] = PixelFetcher::Fetch(src, x, y + 1, srcBounds);
+ m[8] = PixelFetcher::Fetch(src, x + 1, y + 1, srcBounds);
+ SkPoint3 surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(topLeftNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ for (++x; x < right - 1; ++x)
+ {
+ shiftMatrixLeft(m);
+ m[5] = PixelFetcher::Fetch(src, x + 1, y, srcBounds);
+ m[8] = PixelFetcher::Fetch(src, x + 1, y + 1, srcBounds);
+ surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(topNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ }
+ shiftMatrixLeft(m);
+ surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(topRightNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ }
+
+ for (++y; y < bottom - 1; ++y) {
+ int x = left;
+ int m[9];
+ m[1] = PixelFetcher::Fetch(src, x, y - 1, srcBounds);
+ m[2] = PixelFetcher::Fetch(src, x + 1, y - 1, srcBounds);
+ m[4] = PixelFetcher::Fetch(src, x, y, srcBounds);
+ m[5] = PixelFetcher::Fetch(src, x + 1, y, srcBounds);
+ m[7] = PixelFetcher::Fetch(src, x, y + 1, srcBounds);
+ m[8] = PixelFetcher::Fetch(src, x + 1, y + 1, srcBounds);
+ SkPoint3 surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(leftNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ for (++x; x < right - 1; ++x) {
+ shiftMatrixLeft(m);
+ m[2] = PixelFetcher::Fetch(src, x + 1, y - 1, srcBounds);
+ m[5] = PixelFetcher::Fetch(src, x + 1, y, srcBounds);
+ m[8] = PixelFetcher::Fetch(src, x + 1, y + 1, srcBounds);
+ surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(interiorNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ }
+ shiftMatrixLeft(m);
+ surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(rightNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ }
+
+ {
+ int x = left;
+ int m[9];
+ m[1] = PixelFetcher::Fetch(src, x, bottom - 2, srcBounds);
+ m[2] = PixelFetcher::Fetch(src, x + 1, bottom - 2, srcBounds);
+ m[4] = PixelFetcher::Fetch(src, x, bottom - 1, srcBounds);
+ m[5] = PixelFetcher::Fetch(src, x + 1, bottom - 1, srcBounds);
+ SkPoint3 surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(bottomLeftNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ for (++x; x < right - 1; ++x)
+ {
+ shiftMatrixLeft(m);
+ m[2] = PixelFetcher::Fetch(src, x + 1, bottom - 2, srcBounds);
+ m[5] = PixelFetcher::Fetch(src, x + 1, bottom - 1, srcBounds);
+ surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(bottomNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ }
+ shiftMatrixLeft(m);
+ surfaceToLight = l->surfaceToLight(x, y, m[4], surfaceScale);
+ *dptr++ = lightingType.light(bottomRightNormal(m, surfaceScale), surfaceToLight,
+ l->lightColor(surfaceToLight));
+ }
+}
+
+static void lightBitmap(const BaseLightingType& lightingType,
+ const SkImageFilterLight* light,
+ const SkBitmap& src,
+ SkBitmap* dst,
+ SkScalar surfaceScale,
+ const SkIRect& bounds) {
+ if (src.bounds().contains(bounds)) {
+ lightBitmap<UncheckedPixelFetcher>(
+ lightingType, light, src, dst, surfaceScale, bounds);
+ } else {
+ lightBitmap<DecalPixelFetcher>(
+ lightingType, light, src, dst, surfaceScale, bounds);
+ }
+}
+
+namespace {
+enum BoundaryMode {
+ kTopLeft_BoundaryMode,
+ kTop_BoundaryMode,
+ kTopRight_BoundaryMode,
+ kLeft_BoundaryMode,
+ kInterior_BoundaryMode,
+ kRight_BoundaryMode,
+ kBottomLeft_BoundaryMode,
+ kBottom_BoundaryMode,
+ kBottomRight_BoundaryMode,
+
+ kBoundaryModeCount,
+};
+
+class SkLightingImageFilterInternal : public SkImageFilter_Base {
+protected:
+ SkLightingImageFilterInternal(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ sk_sp<SkImageFilter> input,
+ const SkRect* cropRect)
+ : INHERITED(&input, 1, cropRect)
+ , fLight(std::move(light))
+ , fSurfaceScale(surfaceScale / 255) {}
+
+ void flatten(SkWriteBuffer& buffer) const override {
+ this->INHERITED::flatten(buffer);
+ fLight->flattenLight(buffer);
+ buffer.writeScalar(fSurfaceScale * 255);
+ }
+
+ bool onAffectsTransparentBlack() const override { return true; }
+
+ const SkImageFilterLight* light() const { return fLight.get(); }
+ inline sk_sp<const SkImageFilterLight> refLight() const { return fLight; }
+ SkScalar surfaceScale() const { return fSurfaceScale; }
+
+#if defined(SK_GANESH)
+ sk_sp<SkSpecialImage> filterImageGPU(const Context& ctx,
+ SkSpecialImage* input,
+ const SkIRect& bounds,
+ const SkMatrix& matrix) const;
+ virtual std::unique_ptr<GrFragmentProcessor> makeFragmentProcessor(GrSurfaceProxyView,
+ const SkIPoint& viewOffset,
+ const SkMatrix&,
+ const SkIRect* srcBounds,
+ BoundaryMode boundaryMode,
+ const GrCaps&) const = 0;
+#endif
+
+private:
+#if defined(SK_GANESH)
+ void drawRect(skgpu::ganesh::SurfaceFillContext*,
+ GrSurfaceProxyView srcView,
+ const SkIPoint& viewOffset,
+ const SkMatrix& matrix,
+ const SkIRect& dstRect,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds,
+ const SkIRect& bounds) const;
+#endif
+
+ sk_sp<SkImageFilterLight> fLight;
+ SkScalar fSurfaceScale;
+
+ using INHERITED = SkImageFilter_Base;
+};
+} // anonymous namespace
+
+#if defined(SK_GANESH)
+void SkLightingImageFilterInternal::drawRect(skgpu::ganesh::SurfaceFillContext* sfc,
+ GrSurfaceProxyView srcView,
+ const SkIPoint& viewOffset,
+ const SkMatrix& matrix,
+ const SkIRect& dstRect,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds,
+ const SkIRect& bounds) const {
+ SkIRect srcRect = dstRect.makeOffset(bounds.topLeft());
+ auto fp = this->makeFragmentProcessor(std::move(srcView), viewOffset, matrix, srcBounds,
+ boundaryMode, *sfc->caps());
+ sfc->fillRectToRectWithFP(srcRect, dstRect, std::move(fp));
+}
+
+sk_sp<SkSpecialImage> SkLightingImageFilterInternal::filterImageGPU(
+ const Context& ctx,
+ SkSpecialImage* input,
+ const SkIRect& offsetBounds,
+ const SkMatrix& matrix) const {
+ SkASSERT(ctx.gpuBacked());
+
+ auto rContext = ctx.getContext();
+
+ GrSurfaceProxyView inputView = input->view(rContext);
+ SkASSERT(inputView.asTextureProxy());
+
+ GrImageInfo info(ctx.grColorType(),
+ kPremul_SkAlphaType,
+ ctx.refColorSpace(),
+ offsetBounds.size());
+ auto sfc = rContext->priv().makeSFC(info,
+ "LightingImageFilterInternal_FilterImageGPU",
+ SkBackingFit::kApprox,
+ 1,
+ skgpu::Mipmapped::kNo,
+ inputView.proxy()->isProtected(),
+ kBottomLeft_GrSurfaceOrigin);
+ if (!sfc) {
+ return nullptr;
+ }
+
+ SkIRect dstRect = SkIRect::MakeWH(offsetBounds.width(), offsetBounds.height());
+
+ const SkIRect inputBounds = SkIRect::MakeWH(input->width(), input->height());
+ SkIRect topLeft = SkIRect::MakeXYWH(0, 0, 1, 1);
+ SkIRect top = SkIRect::MakeXYWH(1, 0, dstRect.width() - 2, 1);
+ SkIRect topRight = SkIRect::MakeXYWH(dstRect.width() - 1, 0, 1, 1);
+ SkIRect left = SkIRect::MakeXYWH(0, 1, 1, dstRect.height() - 2);
+ SkIRect interior = dstRect.makeInset(1, 1);
+ SkIRect right = SkIRect::MakeXYWH(dstRect.width() - 1, 1, 1, dstRect.height() - 2);
+ SkIRect bottomLeft = SkIRect::MakeXYWH(0, dstRect.height() - 1, 1, 1);
+ SkIRect bottom = SkIRect::MakeXYWH(1, dstRect.height() - 1, dstRect.width() - 2, 1);
+ SkIRect bottomRight = SkIRect::MakeXYWH(dstRect.width() - 1, dstRect.height() - 1, 1, 1);
+
+ const SkIRect* pSrcBounds = inputBounds.contains(offsetBounds) ? nullptr : &inputBounds;
+ const SkIPoint inputViewOffset = input->subset().topLeft();
+
+ this->drawRect(sfc.get(), inputView, inputViewOffset, matrix, topLeft,
+ kTopLeft_BoundaryMode, pSrcBounds, offsetBounds);
+ this->drawRect(sfc.get(), inputView, inputViewOffset, matrix, top,
+ kTop_BoundaryMode, pSrcBounds, offsetBounds);
+ this->drawRect(sfc.get(), inputView, inputViewOffset, matrix, topRight,
+ kTopRight_BoundaryMode, pSrcBounds, offsetBounds);
+ this->drawRect(sfc.get(), inputView, inputViewOffset, matrix, left,
+ kLeft_BoundaryMode, pSrcBounds, offsetBounds);
+ this->drawRect(sfc.get(), inputView, inputViewOffset, matrix, interior,
+ kInterior_BoundaryMode, pSrcBounds, offsetBounds);
+ this->drawRect(sfc.get(), inputView, inputViewOffset, matrix, right,
+ kRight_BoundaryMode, pSrcBounds, offsetBounds);
+ this->drawRect(sfc.get(), inputView, inputViewOffset, matrix, bottomLeft,
+ kBottomLeft_BoundaryMode, pSrcBounds, offsetBounds);
+ this->drawRect(sfc.get(), inputView, inputViewOffset, matrix, bottom,
+ kBottom_BoundaryMode, pSrcBounds, offsetBounds);
+ this->drawRect(sfc.get(), std::move(inputView), inputViewOffset, matrix, bottomRight,
+ kBottomRight_BoundaryMode, pSrcBounds, offsetBounds);
+
+ return SkSpecialImage::MakeDeferredFromGpu(
+ rContext,
+ SkIRect::MakeWH(offsetBounds.width(), offsetBounds.height()),
+ kNeedNewImageUniqueID_SpecialImage,
+ sfc->readSurfaceView(),
+ sfc->colorInfo(),
+ ctx.surfaceProps());
+}
+#endif
+
+namespace {
+class SkDiffuseLightingImageFilter : public SkLightingImageFilterInternal {
+public:
+ static sk_sp<SkImageFilter> Make(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ SkScalar kd,
+ sk_sp<SkImageFilter>,
+ const SkRect*);
+
+ SkScalar kd() const { return fKD; }
+
+protected:
+ SkDiffuseLightingImageFilter(sk_sp<SkImageFilterLight> light, SkScalar surfaceScale,
+ SkScalar kd,
+ sk_sp<SkImageFilter> input, const SkRect* cropRect);
+ void flatten(SkWriteBuffer& buffer) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+#if defined(SK_GANESH)
+ std::unique_ptr<GrFragmentProcessor> makeFragmentProcessor(GrSurfaceProxyView,
+ const SkIPoint& viewOffset,
+ const SkMatrix&,
+ const SkIRect* bounds,
+ BoundaryMode,
+ const GrCaps&) const override;
+#endif
+
+private:
+ SK_FLATTENABLE_HOOKS(SkDiffuseLightingImageFilter)
+ friend void ::SkRegisterLightingImageFilterFlattenables();
+ SkScalar fKD;
+
+ using INHERITED = SkLightingImageFilterInternal;
+};
+
+class SkSpecularLightingImageFilter : public SkLightingImageFilterInternal {
+public:
+ static sk_sp<SkImageFilter> Make(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ SkScalar ks, SkScalar shininess,
+ sk_sp<SkImageFilter>, const SkRect*);
+
+ SkScalar ks() const { return fKS; }
+ SkScalar shininess() const { return fShininess; }
+
+protected:
+ SkSpecularLightingImageFilter(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale, SkScalar ks,
+ SkScalar shininess,
+ sk_sp<SkImageFilter> input, const SkRect*);
+ void flatten(SkWriteBuffer& buffer) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+#if defined(SK_GANESH)
+ std::unique_ptr<GrFragmentProcessor> makeFragmentProcessor(GrSurfaceProxyView,
+ const SkIPoint& viewOffset,
+ const SkMatrix&,
+ const SkIRect* bounds,
+ BoundaryMode,
+ const GrCaps&) const override;
+#endif
+
+private:
+ SK_FLATTENABLE_HOOKS(SkSpecularLightingImageFilter)
+ friend void ::SkRegisterLightingImageFilterFlattenables();
+
+ SkScalar fKS;
+ SkScalar fShininess;
+
+ using INHERITED = SkLightingImageFilterInternal;
+};
+
+#if defined(SK_GANESH)
+
+class LightingEffect : public GrFragmentProcessor {
+public:
+ const SkImageFilterLight* light() const { return fLight.get(); }
+ SkScalar surfaceScale() const { return fSurfaceScale; }
+ const SkMatrix& filterMatrix() const { return fFilterMatrix; }
+ BoundaryMode boundaryMode() const { return fBoundaryMode; }
+
+protected:
+ class ImplBase;
+
+ LightingEffect(ClassID classID,
+ GrSurfaceProxyView,
+ const SkIPoint& viewOffset,
+ sk_sp<const SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ const SkMatrix& matrix,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds,
+ const GrCaps& caps);
+
+ explicit LightingEffect(const LightingEffect& that);
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+private:
+ void onAddToKey(const GrShaderCaps& caps, skgpu::KeyBuilder* b) const override {
+ b->add32(fBoundaryMode << 2 | fLight->type());
+ }
+
+ sk_sp<const SkImageFilterLight> fLight;
+ SkScalar fSurfaceScale;
+ SkMatrix fFilterMatrix;
+ BoundaryMode fBoundaryMode;
+
+ using INHERITED = GrFragmentProcessor;
+};
+
+class DiffuseLightingEffect : public LightingEffect {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(GrSurfaceProxyView view,
+ const SkIPoint& viewOffset,
+ sk_sp<const SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ const SkMatrix& matrix,
+ SkScalar kd,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds,
+ const GrCaps& caps) {
+ return std::unique_ptr<GrFragmentProcessor>(new DiffuseLightingEffect(std::move(view),
+ viewOffset,
+ std::move(light),
+ surfaceScale,
+ matrix,
+ kd,
+ boundaryMode,
+ srcBounds,
+ caps));
+ }
+
+ const char* name() const override { return "DiffuseLighting"; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override {
+ return std::unique_ptr<GrFragmentProcessor>(new DiffuseLightingEffect(*this));
+ }
+
+private:
+ class Impl;
+
+ std::unique_ptr<ProgramImpl> onMakeProgramImpl() const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ DiffuseLightingEffect(GrSurfaceProxyView view,
+ const SkIPoint& viewOffset,
+ sk_sp<const SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ const SkMatrix& matrix,
+ SkScalar kd,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds,
+ const GrCaps& caps);
+
+ explicit DiffuseLightingEffect(const DiffuseLightingEffect& that);
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ SkScalar fKD;
+
+ using INHERITED = LightingEffect;
+};
+
+class SpecularLightingEffect : public LightingEffect {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(GrSurfaceProxyView view,
+ const SkIPoint& viewOffset,
+ sk_sp<const SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ const SkMatrix& matrix,
+ SkScalar ks,
+ SkScalar shininess,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds,
+ const GrCaps& caps) {
+ return std::unique_ptr<GrFragmentProcessor>(new SpecularLightingEffect(std::move(view),
+ viewOffset,
+ std::move(light),
+ surfaceScale,
+ matrix,
+ ks,
+ shininess,
+ boundaryMode,
+ srcBounds,
+ caps));
+ }
+
+ const char* name() const override { return "SpecularLighting"; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override {
+ return std::unique_ptr<GrFragmentProcessor>(new SpecularLightingEffect(*this));
+ }
+
+ std::unique_ptr<ProgramImpl> onMakeProgramImpl() const override;
+
+private:
+ class Impl;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+
+ SpecularLightingEffect(GrSurfaceProxyView,
+ const SkIPoint& viewOffset,
+ sk_sp<const SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ const SkMatrix& matrix,
+ SkScalar ks,
+ SkScalar shininess,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds,
+ const GrCaps&);
+
+ explicit SpecularLightingEffect(const SpecularLightingEffect&);
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+ SkScalar fKS;
+ SkScalar fShininess;
+
+ using INHERITED = LightingEffect;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GpuLight {
+public:
+ virtual ~GpuLight() = default;
+
+ /**
+ * This is called by GrGLLightingEffect::emitCode() before either of the two virtual functions
+ * below. It adds a half3 uniform visible in the FS that represents the constant light color.
+ */
+ void emitLightColorUniform(const GrFragmentProcessor*, GrGLSLUniformHandler*);
+
+ /**
+ * These two functions are called from GrGLLightingEffect's emitCode() function.
+ * emitSurfaceToLight places an expression in param out that is the vector from the surface to
+ * the light. The expression will be used in the FS. emitLightColor writes an expression into
+ * the FS that is the color of the light. Either function may add functions and/or uniforms to
+ * the FS. The default of emitLightColor appends the name of the constant light color uniform
+ * and so this function only needs to be overridden if the light color varies spatially.
+ */
+ virtual void emitSurfaceToLight(const GrFragmentProcessor*,
+ GrGLSLUniformHandler*,
+ GrGLSLFPFragmentBuilder*,
+ const char* z) = 0;
+ virtual void emitLightColor(const GrFragmentProcessor*,
+ GrGLSLUniformHandler*,
+ GrGLSLFPFragmentBuilder*,
+ const char *surfaceToLight);
+
+ // This is called from GrGLLightingEffect's setData(). Subclasses of GrGLLight must call
+ // INHERITED::setData().
+ virtual void setData(const GrGLSLProgramDataManager&, const SkImageFilterLight* light) const;
+
+protected:
+ /**
+ * Gets the constant light color uniform. Subclasses can use this in their emitLightColor
+ * function.
+ */
+ UniformHandle lightColorUni() const { return fColorUni; }
+
+private:
+ UniformHandle fColorUni;
+
+ using INHERITED = SkRefCnt;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GpuDistantLight : public GpuLight {
+public:
+ void setData(const GrGLSLProgramDataManager&, const SkImageFilterLight* light) const override;
+ void emitSurfaceToLight(const GrFragmentProcessor*, GrGLSLUniformHandler*,
+ GrGLSLFPFragmentBuilder*, const char* z) override;
+
+private:
+ using INHERITED = GpuLight;
+ UniformHandle fDirectionUni;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GpuPointLight : public GpuLight {
+public:
+ void setData(const GrGLSLProgramDataManager&, const SkImageFilterLight* light) const override;
+ void emitSurfaceToLight(const GrFragmentProcessor*, GrGLSLUniformHandler*,
+ GrGLSLFPFragmentBuilder*, const char* z) override;
+
+private:
+ using INHERITED = GpuLight;
+ UniformHandle fLocationUni;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GpuSpotLight : public GpuLight {
+public:
+ void setData(const GrGLSLProgramDataManager&, const SkImageFilterLight* light) const override;
+ void emitSurfaceToLight(const GrFragmentProcessor*, GrGLSLUniformHandler*,
+ GrGLSLFPFragmentBuilder*, const char* z) override;
+ void emitLightColor(const GrFragmentProcessor*,
+ GrGLSLUniformHandler*,
+ GrGLSLFPFragmentBuilder*,
+ const char *surfaceToLight) override;
+
+private:
+ using INHERITED = GpuLight;
+
+ SkString fLightColorFunc;
+ UniformHandle fLocationUni;
+ UniformHandle fExponentUni;
+ UniformHandle fCosOuterConeAngleUni;
+ UniformHandle fCosInnerConeAngleUni;
+ UniformHandle fConeScaleUni;
+ UniformHandle fSUni;
+};
+
+#else
+
+class GpuLight {};
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkDistantLight : public SkImageFilterLight {
+public:
+ SkDistantLight(const SkPoint3& direction, SkColor color)
+ : INHERITED(color), fDirection(direction) {
+ }
+
+ SkPoint3 surfaceToLight(int x, int y, int z, SkScalar surfaceScale) const override {
+ return fDirection;
+ }
+ SkPoint3 lightColor(const SkPoint3&) const override { return this->color(); }
+ LightType type() const override { return kDistant_LightType; }
+ const SkPoint3& direction() const { return fDirection; }
+ std::unique_ptr<GpuLight> createGpuLight() const override {
+#if defined(SK_GANESH)
+ return std::make_unique<GpuDistantLight>();
+#else
+ SkDEBUGFAIL("Should not call in GPU-less build");
+ return nullptr;
+#endif
+ }
+
+ bool isEqual(const SkImageFilterLight& other) const override {
+ if (other.type() != kDistant_LightType) {
+ return false;
+ }
+
+ const SkDistantLight& o = static_cast<const SkDistantLight&>(other);
+ return INHERITED::isEqual(other) &&
+ fDirection == o.fDirection;
+ }
+
+ SkDistantLight(SkReadBuffer& buffer) : INHERITED(buffer) {
+ fDirection = read_point3(buffer);
+ }
+
+protected:
+ SkDistantLight(const SkPoint3& direction, const SkPoint3& color)
+ : INHERITED(color), fDirection(direction) {
+ }
+ SkImageFilterLight* transform(const SkMatrix& matrix) const override {
+ return new SkDistantLight(direction(), color());
+ }
+ void onFlattenLight(SkWriteBuffer& buffer) const override {
+ write_point3(fDirection, buffer);
+ }
+
+private:
+ SkPoint3 fDirection;
+
+ using INHERITED = SkImageFilterLight;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkPointLight : public SkImageFilterLight {
+public:
+ SkPointLight(const SkPoint3& location, SkColor color)
+ : INHERITED(color), fLocation(location) {}
+
+ SkPoint3 surfaceToLight(int x, int y, int z, SkScalar surfaceScale) const override {
+ SkPoint3 direction = SkPoint3::Make(fLocation.fX - SkIntToScalar(x),
+ fLocation.fY - SkIntToScalar(y),
+ fLocation.fZ - SkIntToScalar(z) * surfaceScale);
+ fast_normalize(&direction);
+ return direction;
+ }
+ SkPoint3 lightColor(const SkPoint3&) const override { return this->color(); }
+ LightType type() const override { return kPoint_LightType; }
+ const SkPoint3& location() const { return fLocation; }
+ std::unique_ptr<GpuLight> createGpuLight() const override {
+#if defined(SK_GANESH)
+ return std::make_unique<GpuPointLight>();
+#else
+ SkDEBUGFAIL("Should not call in GPU-less build");
+ return nullptr;
+#endif
+ }
+
+ bool isEqual(const SkImageFilterLight& other) const override {
+ if (other.type() != kPoint_LightType) {
+ return false;
+ }
+ const SkPointLight& o = static_cast<const SkPointLight&>(other);
+ return INHERITED::isEqual(other) &&
+ fLocation == o.fLocation;
+ }
+ SkImageFilterLight* transform(const SkMatrix& matrix) const override {
+ SkPoint location2 = SkPoint::Make(fLocation.fX, fLocation.fY);
+ matrix.mapPoints(&location2, 1);
+ // Use X scale and Y scale on Z and average the result
+ SkPoint locationZ = SkPoint::Make(fLocation.fZ, fLocation.fZ);
+ matrix.mapVectors(&locationZ, 1);
+ SkPoint3 location = SkPoint3::Make(location2.fX,
+ location2.fY,
+ SkScalarAve(locationZ.fX, locationZ.fY));
+ return new SkPointLight(location, color());
+ }
+
+ SkPointLight(SkReadBuffer& buffer) : INHERITED(buffer) {
+ fLocation = read_point3(buffer);
+ }
+
+protected:
+ SkPointLight(const SkPoint3& location, const SkPoint3& color)
+ : INHERITED(color), fLocation(location) {}
+ void onFlattenLight(SkWriteBuffer& buffer) const override {
+ write_point3(fLocation, buffer);
+ }
+
+private:
+ SkPoint3 fLocation;
+
+ using INHERITED = SkImageFilterLight;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkSpotLight : public SkImageFilterLight {
+public:
+ SkSpotLight(const SkPoint3& location,
+ const SkPoint3& target,
+ SkScalar specularExponent,
+ SkScalar cutoffAngle,
+ SkColor color)
+ : INHERITED(color),
+ fLocation(location),
+ fTarget(target),
+ fSpecularExponent(specularExponent)
+ {
+ fS = target - location;
+ fast_normalize(&fS);
+ fCosOuterConeAngle = SkScalarCos(SkDegreesToRadians(cutoffAngle));
+ const SkScalar antiAliasThreshold = 0.016f;
+ fCosInnerConeAngle = fCosOuterConeAngle + antiAliasThreshold;
+ fConeScale = SkScalarInvert(antiAliasThreshold);
+ }
+
+ SkImageFilterLight* transform(const SkMatrix& matrix) const override {
+ SkPoint location2 = SkPoint::Make(fLocation.fX, fLocation.fY);
+ matrix.mapPoints(&location2, 1);
+ // Use X scale and Y scale on Z and average the result
+ SkPoint locationZ = SkPoint::Make(fLocation.fZ, fLocation.fZ);
+ matrix.mapVectors(&locationZ, 1);
+ SkPoint3 location = SkPoint3::Make(location2.fX, location2.fY,
+ SkScalarAve(locationZ.fX, locationZ.fY));
+ SkPoint target2 = SkPoint::Make(fTarget.fX, fTarget.fY);
+ matrix.mapPoints(&target2, 1);
+ SkPoint targetZ = SkPoint::Make(fTarget.fZ, fTarget.fZ);
+ matrix.mapVectors(&targetZ, 1);
+ SkPoint3 target = SkPoint3::Make(target2.fX, target2.fY,
+ SkScalarAve(targetZ.fX, targetZ.fY));
+ SkPoint3 s = target - location;
+ fast_normalize(&s);
+ return new SkSpotLight(location,
+ target,
+ fSpecularExponent,
+ fCosOuterConeAngle,
+ fCosInnerConeAngle,
+ fConeScale,
+ s,
+ color());
+ }
+
+ SkPoint3 surfaceToLight(int x, int y, int z, SkScalar surfaceScale) const override {
+ SkPoint3 direction = SkPoint3::Make(fLocation.fX - SkIntToScalar(x),
+ fLocation.fY - SkIntToScalar(y),
+ fLocation.fZ - SkIntToScalar(z) * surfaceScale);
+ fast_normalize(&direction);
+ return direction;
+ }
+ SkPoint3 lightColor(const SkPoint3& surfaceToLight) const override {
+ SkScalar cosAngle = -surfaceToLight.dot(fS);
+ SkScalar scale = 0;
+ if (cosAngle >= fCosOuterConeAngle) {
+ scale = SkScalarPow(cosAngle, fSpecularExponent);
+ if (cosAngle < fCosInnerConeAngle) {
+ scale *= (cosAngle - fCosOuterConeAngle) * fConeScale;
+ }
+ }
+ return this->color().makeScale(scale);
+ }
+ std::unique_ptr<GpuLight> createGpuLight() const override {
+#if defined(SK_GANESH)
+ return std::make_unique<GpuSpotLight>();
+#else
+ SkDEBUGFAIL("Should not call in GPU-less build");
+ return nullptr;
+#endif
+ }
+ LightType type() const override { return kSpot_LightType; }
+ const SkPoint3& location() const { return fLocation; }
+ const SkPoint3& target() const { return fTarget; }
+ SkScalar specularExponent() const { return fSpecularExponent; }
+ SkScalar cosInnerConeAngle() const { return fCosInnerConeAngle; }
+ SkScalar cosOuterConeAngle() const { return fCosOuterConeAngle; }
+ SkScalar coneScale() const { return fConeScale; }
+ const SkPoint3& s() const { return fS; }
+
+ SkSpotLight(SkReadBuffer& buffer) : INHERITED(buffer) {
+ fLocation = read_point3(buffer);
+ fTarget = read_point3(buffer);
+ fSpecularExponent = buffer.readScalar();
+ fCosOuterConeAngle = buffer.readScalar();
+ fCosInnerConeAngle = buffer.readScalar();
+ fConeScale = buffer.readScalar();
+ fS = read_point3(buffer);
+ buffer.validate(SkScalarIsFinite(fSpecularExponent) &&
+ SkScalarIsFinite(fCosOuterConeAngle) &&
+ SkScalarIsFinite(fCosInnerConeAngle) &&
+ SkScalarIsFinite(fConeScale));
+ }
+protected:
+ SkSpotLight(const SkPoint3& location,
+ const SkPoint3& target,
+ SkScalar specularExponent,
+ SkScalar cosOuterConeAngle,
+ SkScalar cosInnerConeAngle,
+ SkScalar coneScale,
+ const SkPoint3& s,
+ const SkPoint3& color)
+ : INHERITED(color),
+ fLocation(location),
+ fTarget(target),
+ fSpecularExponent(specularExponent),
+ fCosOuterConeAngle(cosOuterConeAngle),
+ fCosInnerConeAngle(cosInnerConeAngle),
+ fConeScale(coneScale),
+ fS(s)
+ {
+ }
+ void onFlattenLight(SkWriteBuffer& buffer) const override {
+ write_point3(fLocation, buffer);
+ write_point3(fTarget, buffer);
+ buffer.writeScalar(fSpecularExponent);
+ buffer.writeScalar(fCosOuterConeAngle);
+ buffer.writeScalar(fCosInnerConeAngle);
+ buffer.writeScalar(fConeScale);
+ write_point3(fS, buffer);
+ }
+
+ bool isEqual(const SkImageFilterLight& other) const override {
+ if (other.type() != kSpot_LightType) {
+ return false;
+ }
+
+ const SkSpotLight& o = static_cast<const SkSpotLight&>(other);
+ return INHERITED::isEqual(other) &&
+ fLocation == o.fLocation &&
+ fTarget == o.fTarget &&
+ fSpecularExponent == o.fSpecularExponent &&
+ fCosOuterConeAngle == o.fCosOuterConeAngle;
+ }
+
+private:
+ SkPoint3 fLocation;
+ SkPoint3 fTarget;
+ SkScalar fSpecularExponent;
+ SkScalar fCosOuterConeAngle;
+ SkScalar fCosInnerConeAngle;
+ SkScalar fConeScale;
+ SkPoint3 fS;
+
+ using INHERITED = SkImageFilterLight;
+};
+} // anonymous namespace
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkImageFilterLight::flattenLight(SkWriteBuffer& buffer) const {
+ // Write type first, then baseclass, then subclass.
+ buffer.writeInt(this->type());
+ write_point3(fColor, buffer);
+ this->onFlattenLight(buffer);
+}
+
+/*static*/ SkImageFilterLight* SkImageFilterLight::UnflattenLight(SkReadBuffer& buffer) {
+ SkImageFilterLight::LightType type = buffer.read32LE(SkImageFilterLight::kLast_LightType);
+
+ switch (type) {
+ // Each of these constructors must first call SkLight's, so we'll read the baseclass
+ // then subclass, same order as flattenLight.
+ case SkImageFilterLight::kDistant_LightType:
+ return new SkDistantLight(buffer);
+ case SkImageFilterLight::kPoint_LightType:
+ return new SkPointLight(buffer);
+ case SkImageFilterLight::kSpot_LightType:
+ return new SkSpotLight(buffer);
+ default:
+ // Should never get here due to prior check of SkSafeRange
+ SkDEBUGFAIL("Unknown LightType.");
+ return nullptr;
+ }
+}
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImageFilter> SkImageFilters::DistantLitDiffuse(
+ const SkPoint3& direction, SkColor lightColor, SkScalar surfaceScale, SkScalar kd,
+ sk_sp<SkImageFilter> input, const CropRect& cropRect) {
+ sk_sp<SkImageFilterLight> light(new SkDistantLight(direction, lightColor));
+ return SkDiffuseLightingImageFilter::Make(std::move(light), surfaceScale, kd,
+ std::move(input), cropRect);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::PointLitDiffuse(
+ const SkPoint3& location, SkColor lightColor, SkScalar surfaceScale, SkScalar kd,
+ sk_sp<SkImageFilter> input, const CropRect& cropRect) {
+ sk_sp<SkImageFilterLight> light(new SkPointLight(location, lightColor));
+ return SkDiffuseLightingImageFilter::Make(std::move(light), surfaceScale, kd,
+ std::move(input), cropRect);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::SpotLitDiffuse(
+ const SkPoint3& location, const SkPoint3& target, SkScalar falloffExponent,
+ SkScalar cutoffAngle, SkColor lightColor, SkScalar surfaceScale, SkScalar kd,
+ sk_sp<SkImageFilter> input, const CropRect& cropRect) {
+ sk_sp<SkImageFilterLight> light(new SkSpotLight(location, target, falloffExponent,
+ cutoffAngle, lightColor));
+ return SkDiffuseLightingImageFilter::Make(std::move(light), surfaceScale, kd,
+ std::move(input), cropRect);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::DistantLitSpecular(
+ const SkPoint3& direction, SkColor lightColor, SkScalar surfaceScale, SkScalar ks,
+ SkScalar shininess, sk_sp<SkImageFilter> input, const CropRect& cropRect) {
+ sk_sp<SkImageFilterLight> light(new SkDistantLight(direction, lightColor));
+ return SkSpecularLightingImageFilter::Make(std::move(light), surfaceScale, ks, shininess,
+ std::move(input), cropRect);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::PointLitSpecular(
+ const SkPoint3& location, SkColor lightColor, SkScalar surfaceScale, SkScalar ks,
+ SkScalar shininess, sk_sp<SkImageFilter> input, const CropRect& cropRect) {
+ sk_sp<SkImageFilterLight> light(new SkPointLight(location, lightColor));
+ return SkSpecularLightingImageFilter::Make(std::move(light), surfaceScale, ks, shininess,
+ std::move(input), cropRect);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::SpotLitSpecular(
+ const SkPoint3& location, const SkPoint3& target, SkScalar falloffExponent,
+ SkScalar cutoffAngle, SkColor lightColor, SkScalar surfaceScale, SkScalar ks,
+ SkScalar shininess, sk_sp<SkImageFilter> input, const CropRect& cropRect) {
+ sk_sp<SkImageFilterLight> light(new SkSpotLight(location, target, falloffExponent,
+ cutoffAngle, lightColor));
+ return SkSpecularLightingImageFilter::Make(std::move(light), surfaceScale, ks, shininess,
+ std::move(input), cropRect);
+}
+
+void SkRegisterLightingImageFilterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkDiffuseLightingImageFilter);
+ SK_REGISTER_FLATTENABLE(SkSpecularLightingImageFilter);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImageFilter> SkDiffuseLightingImageFilter::Make(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ SkScalar kd,
+ sk_sp<SkImageFilter> input,
+ const SkRect* cropRect) {
+ if (!light) {
+ return nullptr;
+ }
+ if (!SkScalarIsFinite(surfaceScale) || !SkScalarIsFinite(kd)) {
+ return nullptr;
+ }
+ // According to the spec, kd can be any non-negative number :
+ // http://www.w3.org/TR/SVG/filters.html#feDiffuseLightingElement
+ if (kd < 0) {
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkDiffuseLightingImageFilter(std::move(light), surfaceScale,
+ kd, std::move(input), cropRect));
+}
+
+SkDiffuseLightingImageFilter::SkDiffuseLightingImageFilter(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ SkScalar kd,
+ sk_sp<SkImageFilter> input,
+ const SkRect* cropRect)
+ : INHERITED(std::move(light), surfaceScale, std::move(input), cropRect)
+ , fKD(kd) {
+}
+
+sk_sp<SkFlattenable> SkDiffuseLightingImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+
+ sk_sp<SkImageFilterLight> light(SkImageFilterLight::UnflattenLight(buffer));
+ SkScalar surfaceScale = buffer.readScalar();
+ SkScalar kd = buffer.readScalar();
+
+ return Make(std::move(light), surfaceScale, kd, common.getInput(0), common.cropRect());
+}
+
+void SkDiffuseLightingImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeScalar(fKD);
+}
+
+sk_sp<SkSpecialImage> SkDiffuseLightingImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ const SkIRect inputBounds = SkIRect::MakeXYWH(inputOffset.x(), inputOffset.y(),
+ input->width(), input->height());
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, inputBounds, &bounds)) {
+ return nullptr;
+ }
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ bounds.offset(-inputOffset);
+
+#if defined(SK_GANESH)
+ if (ctx.gpuBacked()) {
+ SkMatrix matrix(ctx.ctm());
+ matrix.postTranslate(SkIntToScalar(-offset->fX), SkIntToScalar(-offset->fY));
+
+ return this->filterImageGPU(ctx, input.get(), bounds, matrix);
+ }
+#endif
+
+ if (bounds.width() < 2 || bounds.height() < 2) {
+ return nullptr;
+ }
+
+ SkBitmap inputBM;
+
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if (inputBM.colorType() != kN32_SkColorType) {
+ return nullptr;
+ }
+
+ if (!inputBM.getPixels()) {
+ return nullptr;
+ }
+
+ const SkImageInfo info = SkImageInfo::MakeN32Premul(bounds.width(), bounds.height());
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ SkMatrix matrix(ctx.ctm());
+ matrix.postTranslate(SkIntToScalar(-inputOffset.x()), SkIntToScalar(-inputOffset.y()));
+
+ sk_sp<SkImageFilterLight> transformedLight(light()->transform(matrix));
+
+ DiffuseLightingType lightingType(fKD);
+ lightBitmap(lightingType,
+ transformedLight.get(),
+ inputBM,
+ &dst,
+ surfaceScale(),
+ bounds);
+
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(bounds.width(), bounds.height()),
+ dst, ctx.surfaceProps());
+}
+
+#if defined(SK_GANESH)
+std::unique_ptr<GrFragmentProcessor> SkDiffuseLightingImageFilter::makeFragmentProcessor(
+ GrSurfaceProxyView view,
+ const SkIPoint& viewOffset,
+ const SkMatrix& matrix,
+ const SkIRect* srcBounds,
+ BoundaryMode boundaryMode,
+ const GrCaps& caps) const {
+ SkScalar scale = this->surfaceScale() * 255;
+ return DiffuseLightingEffect::Make(std::move(view),
+ viewOffset,
+ this->refLight(),
+ scale,
+ matrix,
+ this->kd(),
+ boundaryMode,
+ srcBounds,
+ caps);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImageFilter> SkSpecularLightingImageFilter::Make(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ SkScalar ks,
+ SkScalar shininess,
+ sk_sp<SkImageFilter> input,
+ const SkRect* cropRect) {
+ if (!light) {
+ return nullptr;
+ }
+ if (!SkScalarIsFinite(surfaceScale) || !SkScalarIsFinite(ks) || !SkScalarIsFinite(shininess)) {
+ return nullptr;
+ }
+ // According to the spec, ks can be any non-negative number :
+ // http://www.w3.org/TR/SVG/filters.html#feSpecularLightingElement
+ if (ks < 0) {
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkSpecularLightingImageFilter(std::move(light), surfaceScale,
+ ks, shininess,
+ std::move(input), cropRect));
+}
+
+SkSpecularLightingImageFilter::SkSpecularLightingImageFilter(sk_sp<SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ SkScalar ks,
+ SkScalar shininess,
+ sk_sp<SkImageFilter> input,
+ const SkRect* cropRect)
+ : INHERITED(std::move(light), surfaceScale, std::move(input), cropRect)
+ , fKS(ks)
+ , fShininess(shininess) {
+}
+
+sk_sp<SkFlattenable> SkSpecularLightingImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ sk_sp<SkImageFilterLight> light(SkImageFilterLight::UnflattenLight(buffer));
+ SkScalar surfaceScale = buffer.readScalar();
+ SkScalar ks = buffer.readScalar();
+ SkScalar shine = buffer.readScalar();
+
+ return Make(std::move(light), surfaceScale, ks, shine, common.getInput(0),
+ common.cropRect());
+}
+
+void SkSpecularLightingImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeScalar(fKS);
+ buffer.writeScalar(fShininess);
+}
+
+sk_sp<SkSpecialImage> SkSpecularLightingImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ const SkIRect inputBounds = SkIRect::MakeXYWH(inputOffset.x(), inputOffset.y(),
+ input->width(), input->height());
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, inputBounds, &bounds)) {
+ return nullptr;
+ }
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ bounds.offset(-inputOffset);
+
+#if defined(SK_GANESH)
+ if (ctx.gpuBacked()) {
+ SkMatrix matrix(ctx.ctm());
+ matrix.postTranslate(SkIntToScalar(-offset->fX), SkIntToScalar(-offset->fY));
+
+ return this->filterImageGPU(ctx, input.get(), bounds, matrix);
+ }
+#endif
+
+ if (bounds.width() < 2 || bounds.height() < 2) {
+ return nullptr;
+ }
+
+ SkBitmap inputBM;
+
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if (inputBM.colorType() != kN32_SkColorType) {
+ return nullptr;
+ }
+
+ if (!inputBM.getPixels()) {
+ return nullptr;
+ }
+
+ const SkImageInfo info = SkImageInfo::MakeN32Premul(bounds.width(), bounds.height());
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ SpecularLightingType lightingType(fKS, fShininess);
+
+ SkMatrix matrix(ctx.ctm());
+ matrix.postTranslate(SkIntToScalar(-inputOffset.x()), SkIntToScalar(-inputOffset.y()));
+
+ sk_sp<SkImageFilterLight> transformedLight(light()->transform(matrix));
+
+ lightBitmap(lightingType,
+ transformedLight.get(),
+ inputBM,
+ &dst,
+ surfaceScale(),
+ bounds);
+
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(bounds.width(), bounds.height()), dst,
+ ctx.surfaceProps());
+}
+
+#if defined(SK_GANESH)
+std::unique_ptr<GrFragmentProcessor> SkSpecularLightingImageFilter::makeFragmentProcessor(
+ GrSurfaceProxyView view,
+ const SkIPoint& viewOffset,
+ const SkMatrix& matrix,
+ const SkIRect* srcBounds,
+ BoundaryMode boundaryMode,
+ const GrCaps& caps) const {
+ SkScalar scale = this->surfaceScale() * 255;
+ return SpecularLightingEffect::Make(std::move(view),
+ viewOffset,
+ this->refLight(),
+ scale,
+ matrix,
+ this->ks(),
+ this->shininess(),
+ boundaryMode,
+ srcBounds,
+ caps);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if defined(SK_GANESH)
+
+static SkString emitNormalFunc(BoundaryMode mode,
+ const char* pointToNormalName,
+ const char* sobelFuncName) {
+ SkString result;
+ switch (mode) {
+ case kTopLeft_BoundaryMode:
+ result.printf("return %s(%s(0.0, 0.0, m[4], m[5], m[7], m[8], %g),"
+ " %s(0.0, 0.0, m[4], m[7], m[5], m[8], %g),"
+ " surfaceScale);",
+ pointToNormalName, sobelFuncName, gTwoThirds,
+ sobelFuncName, gTwoThirds);
+ break;
+ case kTop_BoundaryMode:
+ result.printf("return %s(%s(0.0, 0.0, m[3], m[5], m[6], m[8], %g),"
+ " %s(0.0, 0.0, m[4], m[7], m[5], m[8], %g),"
+ " surfaceScale);",
+ pointToNormalName, sobelFuncName, gOneThird,
+ sobelFuncName, gOneHalf);
+ break;
+ case kTopRight_BoundaryMode:
+ result.printf("return %s(%s( 0.0, 0.0, m[3], m[4], m[6], m[7], %g),"
+ " %s(m[3], m[6], m[4], m[7], 0.0, 0.0, %g),"
+ " surfaceScale);",
+ pointToNormalName, sobelFuncName, gTwoThirds,
+ sobelFuncName, gTwoThirds);
+ break;
+ case kLeft_BoundaryMode:
+ result.printf("return %s(%s(m[1], m[2], m[4], m[5], m[7], m[8], %g),"
+ " %s( 0.0, 0.0, m[1], m[7], m[2], m[8], %g),"
+ " surfaceScale);",
+ pointToNormalName, sobelFuncName, gOneHalf,
+ sobelFuncName, gOneThird);
+ break;
+ case kInterior_BoundaryMode:
+ result.printf("return %s(%s(m[0], m[2], m[3], m[5], m[6], m[8], %g),"
+ " %s(m[0], m[6], m[1], m[7], m[2], m[8], %g),"
+ " surfaceScale);",
+ pointToNormalName, sobelFuncName, gOneQuarter,
+ sobelFuncName, gOneQuarter);
+ break;
+ case kRight_BoundaryMode:
+ result.printf("return %s(%s(m[0], m[1], m[3], m[4], m[6], m[7], %g),"
+ " %s(m[0], m[6], m[1], m[7], 0.0, 0.0, %g),"
+ " surfaceScale);",
+ pointToNormalName, sobelFuncName, gOneHalf,
+ sobelFuncName, gOneThird);
+ break;
+ case kBottomLeft_BoundaryMode:
+ result.printf("return %s(%s(m[1], m[2], m[4], m[5], 0.0, 0.0, %g),"
+ " %s( 0.0, 0.0, m[1], m[4], m[2], m[5], %g),"
+ " surfaceScale);",
+ pointToNormalName, sobelFuncName, gTwoThirds,
+ sobelFuncName, gTwoThirds);
+ break;
+ case kBottom_BoundaryMode:
+ result.printf("return %s(%s(m[0], m[2], m[3], m[5], 0.0, 0.0, %g),"
+ " %s(m[0], m[3], m[1], m[4], m[2], m[5], %g),"
+ " surfaceScale);",
+ pointToNormalName, sobelFuncName, gOneThird,
+ sobelFuncName, gOneHalf);
+ break;
+ case kBottomRight_BoundaryMode:
+ result.printf("return %s(%s(m[0], m[1], m[3], m[4], 0.0, 0.0, %g),"
+ " %s(m[0], m[3], m[1], m[4], 0.0, 0.0, %g),"
+ " surfaceScale);",
+ pointToNormalName, sobelFuncName, gTwoThirds,
+ sobelFuncName, gTwoThirds);
+ break;
+ default:
+ SkASSERT(false);
+ break;
+ }
+ return result;
+}
+
+namespace {
+class LightingEffect::ImplBase : public ProgramImpl {
+public:
+ void emitCode(EmitArgs&) override;
+
+protected:
+ /**
+ * Subclasses of LightingImpl must call INHERITED::onSetData();
+ */
+ void onSetData(const GrGLSLProgramDataManager&, const GrFragmentProcessor&) override;
+
+ virtual void emitLightFunc(const GrFragmentProcessor*,
+ GrGLSLUniformHandler*,
+ GrGLSLFPFragmentBuilder*,
+ SkString* funcName) = 0;
+
+private:
+ UniformHandle fSurfaceScaleUni;
+ std::unique_ptr<GpuLight> fLight;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class DiffuseLightingEffect::Impl : public ImplBase {
+public:
+ void emitLightFunc(const GrFragmentProcessor*,
+ GrGLSLUniformHandler*,
+ GrGLSLFPFragmentBuilder*,
+ SkString* funcName) override;
+
+private:
+ void onSetData(const GrGLSLProgramDataManager&, const GrFragmentProcessor&) override;
+
+ using INHERITED = ImplBase;
+
+ UniformHandle fKDUni;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SpecularLightingEffect::Impl : public ImplBase {
+public:
+ void emitLightFunc(const GrFragmentProcessor*,
+ GrGLSLUniformHandler*,
+ GrGLSLFPFragmentBuilder*,
+ SkString* funcName) override;
+
+private:
+ void onSetData(const GrGLSLProgramDataManager&, const GrFragmentProcessor&) override;
+
+ using INHERITED = ImplBase;
+
+ UniformHandle fKSUni;
+ UniformHandle fShininessUni;
+};
+} // anonymous namespace
+
+///////////////////////////////////////////////////////////////////////////////
+
+LightingEffect::LightingEffect(ClassID classID,
+ GrSurfaceProxyView view,
+ const SkIPoint& viewOffset,
+ sk_sp<const SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ const SkMatrix& matrix,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds,
+ const GrCaps& caps)
+ // Perhaps this could advertise the opaque or coverage-as-alpha optimizations?
+ : INHERITED(classID, kNone_OptimizationFlags)
+ , fLight(std::move(light))
+ , fSurfaceScale(surfaceScale)
+ , fFilterMatrix(matrix)
+ , fBoundaryMode(boundaryMode) {
+ static constexpr GrSamplerState kSampler(GrSamplerState::WrapMode::kClampToBorder,
+ GrSamplerState::Filter::kNearest);
+ std::unique_ptr<GrFragmentProcessor> child;
+ if (srcBounds) {
+ SkRect offsetSrcBounds = SkRect::Make(*srcBounds);
+ offsetSrcBounds.offset(viewOffset.fX, viewOffset.fY);
+ child = GrTextureEffect::MakeSubset(std::move(view), kPremul_SkAlphaType,
+ SkMatrix::Translate(viewOffset.fX, viewOffset.fY),
+ kSampler, offsetSrcBounds, caps);
+ } else {
+ child = GrTextureEffect::Make(std::move(view), kPremul_SkAlphaType,
+ SkMatrix::Translate(viewOffset.fX, viewOffset.fY),
+ kSampler, caps);
+ }
+ this->registerChild(std::move(child), SkSL::SampleUsage::Explicit());
+ this->setUsesSampleCoordsDirectly();
+}
+
+LightingEffect::LightingEffect(const LightingEffect& that)
+ : INHERITED(that)
+ , fLight(that.fLight)
+ , fSurfaceScale(that.fSurfaceScale)
+ , fFilterMatrix(that.fFilterMatrix)
+ , fBoundaryMode(that.fBoundaryMode) {}
+
+bool LightingEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const LightingEffect& s = sBase.cast<LightingEffect>();
+ return fLight->isEqual(*s.fLight) &&
+ fSurfaceScale == s.fSurfaceScale &&
+ fBoundaryMode == s.fBoundaryMode;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+DiffuseLightingEffect::DiffuseLightingEffect(GrSurfaceProxyView view,
+ const SkIPoint& viewOffset,
+ sk_sp<const SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ const SkMatrix& matrix,
+ SkScalar kd,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds,
+ const GrCaps& caps)
+ : INHERITED(kGrDiffuseLightingEffect_ClassID,
+ std::move(view),
+ viewOffset,
+ std::move(light),
+ surfaceScale,
+ matrix,
+ boundaryMode,
+ srcBounds,
+ caps)
+ , fKD(kd) {}
+
+DiffuseLightingEffect::DiffuseLightingEffect(const DiffuseLightingEffect& that)
+ : INHERITED(that), fKD(that.fKD) {}
+
+bool DiffuseLightingEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const DiffuseLightingEffect& s = sBase.cast<DiffuseLightingEffect>();
+ return INHERITED::onIsEqual(sBase) && fKD == s.fKD;
+}
+
+std::unique_ptr<GrFragmentProcessor::ProgramImpl> DiffuseLightingEffect::onMakeProgramImpl() const {
+ return std::make_unique<Impl>();
+}
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(DiffuseLightingEffect)
+
+#if GR_TEST_UTILS
+
+static SkPoint3 random_point3(SkRandom* random) {
+ return SkPoint3::Make(SkScalarToFloat(random->nextSScalar1()),
+ SkScalarToFloat(random->nextSScalar1()),
+ SkScalarToFloat(random->nextSScalar1()));
+}
+
+static SkImageFilterLight* create_random_light(SkRandom* random) {
+ int type = random->nextULessThan(3);
+ switch (type) {
+ case 0: {
+ return new SkDistantLight(random_point3(random), random->nextU());
+ }
+ case 1: {
+ return new SkPointLight(random_point3(random), random->nextU());
+ }
+ case 2: {
+ return new SkSpotLight(random_point3(random), random_point3(random),
+ random->nextUScalar1(), random->nextUScalar1(), random->nextU());
+ }
+ default:
+ SK_ABORT("Unexpected value.");
+ }
+}
+
+std::unique_ptr<GrFragmentProcessor> DiffuseLightingEffect::TestCreate(GrProcessorTestData* d) {
+ auto [view, ct, at] = d->randomView();
+ SkScalar surfaceScale = d->fRandom->nextSScalar1();
+ SkScalar kd = d->fRandom->nextUScalar1();
+ sk_sp<SkImageFilterLight> light(create_random_light(d->fRandom));
+ SkMatrix matrix;
+ for (int i = 0; i < 9; i++) {
+ matrix[i] = d->fRandom->nextUScalar1();
+ }
+
+ uint32_t boundsX = d->fRandom->nextRangeU(0, view.width());
+ uint32_t boundsY = d->fRandom->nextRangeU(0, view.height());
+ uint32_t boundsW = d->fRandom->nextRangeU(0, view.width());
+ uint32_t boundsH = d->fRandom->nextRangeU(0, view.height());
+ SkIRect srcBounds = SkIRect::MakeXYWH(boundsX, boundsY, boundsW, boundsH);
+ BoundaryMode mode = static_cast<BoundaryMode>(d->fRandom->nextU() % kBoundaryModeCount);
+
+ return DiffuseLightingEffect::Make(std::move(view),
+ SkIPoint(),
+ std::move(light),
+ surfaceScale,
+ matrix,
+ kd,
+ mode,
+ &srcBounds,
+ *d->caps());
+}
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+void LightingEffect::ImplBase::emitCode(EmitArgs& args) {
+ const LightingEffect& le = args.fFp.cast<LightingEffect>();
+ if (!fLight) {
+ fLight = le.light()->createGpuLight();
+ }
+
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ fSurfaceScaleUni = uniformHandler->addUniform(&le,
+ kFragment_GrShaderFlag,
+ SkSLType::kHalf, "SurfaceScale");
+ fLight->emitLightColorUniform(&le, uniformHandler);
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ SkString lightFunc;
+ this->emitLightFunc(&le, uniformHandler, fragBuilder, &lightFunc);
+ const GrShaderVar gSobelArgs[] = {
+ GrShaderVar("a", SkSLType::kHalf),
+ GrShaderVar("b", SkSLType::kHalf),
+ GrShaderVar("c", SkSLType::kHalf),
+ GrShaderVar("d", SkSLType::kHalf),
+ GrShaderVar("e", SkSLType::kHalf),
+ GrShaderVar("f", SkSLType::kHalf),
+ GrShaderVar("scale", SkSLType::kHalf),
+ };
+
+ SkString sobelFuncName = fragBuilder->getMangledFunctionName("sobel");
+ fragBuilder->emitFunction(SkSLType::kHalf,
+ sobelFuncName.c_str(),
+ {gSobelArgs, std::size(gSobelArgs)},
+ "return (-a + b - 2.0 * c + 2.0 * d -e + f) * scale;");
+ const GrShaderVar gPointToNormalArgs[] = {
+ GrShaderVar("x", SkSLType::kHalf),
+ GrShaderVar("y", SkSLType::kHalf),
+ GrShaderVar("scale", SkSLType::kHalf),
+ };
+ SkString pointToNormalName = fragBuilder->getMangledFunctionName("pointToNormal");
+ fragBuilder->emitFunction(SkSLType::kHalf3,
+ pointToNormalName.c_str(),
+ {gPointToNormalArgs, std::size(gPointToNormalArgs)},
+ "return normalize(half3(-x * scale, -y * scale, 1));");
+
+ const GrShaderVar gInteriorNormalArgs[] = {
+ GrShaderVar("m", SkSLType::kHalf, 9),
+ GrShaderVar("surfaceScale", SkSLType::kHalf),
+ };
+ SkString normalBody = emitNormalFunc(le.boundaryMode(),
+ pointToNormalName.c_str(),
+ sobelFuncName.c_str());
+ SkString normalName = fragBuilder->getMangledFunctionName("normal");
+ fragBuilder->emitFunction(SkSLType::kHalf3,
+ normalName.c_str(),
+ {gInteriorNormalArgs, std::size(gInteriorNormalArgs)},
+ normalBody.c_str());
+
+ fragBuilder->codeAppendf("float2 coord = %s;", args.fSampleCoord);
+ fragBuilder->codeAppend("half m[9];");
+
+ const char* surfScale = uniformHandler->getUniformCStr(fSurfaceScaleUni);
+
+ int index = 0;
+ for (int dy = -1; dy <= 1; ++dy) {
+ for (int dx = -1; dx <= 1; ++dx) {
+ SkString texCoords;
+ texCoords.appendf("coord + half2(%d, %d)", dx, dy);
+ auto sample = this->invokeChild(0, args, texCoords.c_str());
+ fragBuilder->codeAppendf("m[%d] = %s.a;", index, sample.c_str());
+ index++;
+ }
+ }
+ fragBuilder->codeAppend("half3 surfaceToLight = ");
+ SkString arg;
+ arg.appendf("%s * m[4]", surfScale);
+ fLight->emitSurfaceToLight(&le, uniformHandler, fragBuilder, arg.c_str());
+ fragBuilder->codeAppend(";");
+ fragBuilder->codeAppendf("return %s(%s(m, %s), surfaceToLight, ",
+ lightFunc.c_str(), normalName.c_str(), surfScale);
+ fLight->emitLightColor(&le, uniformHandler, fragBuilder, "surfaceToLight");
+ fragBuilder->codeAppend(");");
+}
+
+void LightingEffect::ImplBase::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& proc) {
+ const LightingEffect& lighting = proc.cast<LightingEffect>();
+ if (!fLight) {
+ fLight = lighting.light()->createGpuLight();
+ }
+
+ pdman.set1f(fSurfaceScaleUni, lighting.surfaceScale());
+ sk_sp<SkImageFilterLight> transformedLight(
+ lighting.light()->transform(lighting.filterMatrix()));
+ fLight->setData(pdman, transformedLight.get());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+
+void DiffuseLightingEffect::Impl::emitLightFunc(const GrFragmentProcessor* owner,
+ GrGLSLUniformHandler* uniformHandler,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ SkString* funcName) {
+ const char* kd;
+ fKDUni = uniformHandler->addUniform(owner, kFragment_GrShaderFlag, SkSLType::kHalf, "KD", &kd);
+
+ const GrShaderVar gLightArgs[] = {
+ GrShaderVar("normal", SkSLType::kHalf3),
+ GrShaderVar("surfaceToLight", SkSLType::kHalf3),
+ GrShaderVar("lightColor", SkSLType::kHalf3)
+ };
+ SkString lightBody;
+ lightBody.appendf("half colorScale = %s * dot(normal, surfaceToLight);", kd);
+ lightBody.appendf("return half4(saturate(lightColor * colorScale), 1.0);");
+ *funcName = fragBuilder->getMangledFunctionName("light");
+ fragBuilder->emitFunction(SkSLType::kHalf4,
+ funcName->c_str(),
+ {gLightArgs, std::size(gLightArgs)},
+ lightBody.c_str());
+}
+
+void DiffuseLightingEffect::Impl::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& proc) {
+ INHERITED::onSetData(pdman, proc);
+ const DiffuseLightingEffect& diffuse = proc.cast<DiffuseLightingEffect>();
+ pdman.set1f(fKDUni, diffuse.fKD);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SpecularLightingEffect::SpecularLightingEffect(GrSurfaceProxyView view,
+ const SkIPoint& viewOffset,
+ sk_sp<const SkImageFilterLight> light,
+ SkScalar surfaceScale,
+ const SkMatrix& matrix,
+ SkScalar ks,
+ SkScalar shininess,
+ BoundaryMode boundaryMode,
+ const SkIRect* srcBounds,
+ const GrCaps& caps)
+ : INHERITED(kGrSpecularLightingEffect_ClassID,
+ std::move(view),
+ viewOffset,
+ std::move(light),
+ surfaceScale,
+ matrix,
+ boundaryMode,
+ srcBounds,
+ caps)
+ , fKS(ks)
+ , fShininess(shininess) {}
+
+SpecularLightingEffect::SpecularLightingEffect(const SpecularLightingEffect& that)
+ : INHERITED(that), fKS(that.fKS), fShininess(that.fShininess) {}
+
+bool SpecularLightingEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const SpecularLightingEffect& s = sBase.cast<SpecularLightingEffect>();
+ return INHERITED::onIsEqual(sBase) && this->fKS == s.fKS && this->fShininess == s.fShininess;
+}
+
+std::unique_ptr<GrFragmentProcessor::ProgramImpl>
+SpecularLightingEffect::onMakeProgramImpl() const { return std::make_unique<Impl>(); }
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(SpecularLightingEffect)
+
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> SpecularLightingEffect::TestCreate(GrProcessorTestData* d) {
+ auto [view, ct, at] = d->randomView();
+ SkScalar surfaceScale = d->fRandom->nextSScalar1();
+ SkScalar ks = d->fRandom->nextUScalar1();
+ SkScalar shininess = d->fRandom->nextUScalar1();
+ sk_sp<SkImageFilterLight> light(create_random_light(d->fRandom));
+ SkMatrix matrix;
+ for (int i = 0; i < 9; i++) {
+ matrix[i] = d->fRandom->nextUScalar1();
+ }
+ BoundaryMode mode = static_cast<BoundaryMode>(d->fRandom->nextU() % kBoundaryModeCount);
+
+ uint32_t boundsX = d->fRandom->nextRangeU(0, view.width());
+ uint32_t boundsY = d->fRandom->nextRangeU(0, view.height());
+ uint32_t boundsW = d->fRandom->nextRangeU(0, view.width());
+ uint32_t boundsH = d->fRandom->nextRangeU(0, view.height());
+ SkIRect srcBounds = SkIRect::MakeXYWH(boundsX, boundsY, boundsW, boundsH);
+
+ return SpecularLightingEffect::Make(std::move(view),
+ SkIPoint(),
+ std::move(light),
+ surfaceScale,
+ matrix,
+ ks,
+ shininess,
+ mode,
+ &srcBounds,
+ *d->caps());
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SpecularLightingEffect::Impl::emitLightFunc(const GrFragmentProcessor* owner,
+ GrGLSLUniformHandler* uniformHandler,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ SkString* funcName) {
+ const char* ks;
+ const char* shininess;
+
+ fKSUni = uniformHandler->addUniform(owner, kFragment_GrShaderFlag, SkSLType::kHalf, "KS", &ks);
+ fShininessUni = uniformHandler->addUniform(owner,
+ kFragment_GrShaderFlag,
+ SkSLType::kHalf,
+ "Shininess",
+ &shininess);
+
+ const GrShaderVar gLightArgs[] = {
+ GrShaderVar("normal", SkSLType::kHalf3),
+ GrShaderVar("surfaceToLight", SkSLType::kHalf3),
+ GrShaderVar("lightColor", SkSLType::kHalf3)
+ };
+ SkString lightBody;
+ lightBody.appendf("half3 halfDir = half3(normalize(surfaceToLight + half3(0, 0, 1)));");
+ lightBody.appendf("half colorScale = half(%s * pow(dot(normal, halfDir), %s));",
+ ks, shininess);
+ lightBody.appendf("half3 color = saturate(lightColor * colorScale);");
+ lightBody.appendf("return half4(color, max(max(color.r, color.g), color.b));");
+ *funcName = fragBuilder->getMangledFunctionName("light");
+ fragBuilder->emitFunction(SkSLType::kHalf4,
+ funcName->c_str(),
+ {gLightArgs, std::size(gLightArgs)},
+ lightBody.c_str());
+}
+
+void SpecularLightingEffect::Impl::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& effect) {
+ INHERITED::onSetData(pdman, effect);
+ const SpecularLightingEffect& spec = effect.cast<SpecularLightingEffect>();
+ pdman.set1f(fKSUni, spec.fKS);
+ pdman.set1f(fShininessUni, spec.fShininess);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+void GpuLight::emitLightColorUniform(const GrFragmentProcessor* owner,
+ GrGLSLUniformHandler* uniformHandler) {
+ fColorUni = uniformHandler->addUniform(owner, kFragment_GrShaderFlag, SkSLType::kHalf3,
+ "LightColor");
+}
+
+void GpuLight::emitLightColor(const GrFragmentProcessor* owner,
+ GrGLSLUniformHandler* uniformHandler,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ const char* surfaceToLight) {
+ fragBuilder->codeAppend(uniformHandler->getUniformCStr(this->lightColorUni()));
+}
+
+void GpuLight::setData(const GrGLSLProgramDataManager& pdman,
+ const SkImageFilterLight* light) const {
+ setUniformPoint3(pdman, fColorUni,
+ light->color().makeScale(SkScalarInvert(SkIntToScalar(255))));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GpuDistantLight::setData(const GrGLSLProgramDataManager& pdman,
+ const SkImageFilterLight* light) const {
+ INHERITED::setData(pdman, light);
+ SkASSERT(light->type() == SkImageFilterLight::kDistant_LightType);
+ const SkDistantLight* distantLight = static_cast<const SkDistantLight*>(light);
+ setUniformNormal3(pdman, fDirectionUni, distantLight->direction());
+}
+
+void GpuDistantLight::emitSurfaceToLight(const GrFragmentProcessor* owner,
+ GrGLSLUniformHandler* uniformHandler,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ const char* z) {
+ const char* dir;
+ fDirectionUni = uniformHandler->addUniform(owner, kFragment_GrShaderFlag, SkSLType::kHalf3,
+ "LightDirection", &dir);
+ fragBuilder->codeAppend(dir);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GpuPointLight::setData(const GrGLSLProgramDataManager& pdman,
+ const SkImageFilterLight* light) const {
+ INHERITED::setData(pdman, light);
+ SkASSERT(light->type() == SkImageFilterLight::kPoint_LightType);
+ const SkPointLight* pointLight = static_cast<const SkPointLight*>(light);
+ setUniformPoint3(pdman, fLocationUni, pointLight->location());
+}
+
+void GpuPointLight::emitSurfaceToLight(const GrFragmentProcessor* owner,
+ GrGLSLUniformHandler* uniformHandler,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ const char* z) {
+ const char* loc;
+ fLocationUni = uniformHandler->addUniform(owner, kFragment_GrShaderFlag, SkSLType::kHalf3,
+ "LightLocation", &loc);
+ fragBuilder->codeAppendf("normalize(%s - half3(sk_FragCoord.xy, %s))",
+ loc, z);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GpuSpotLight::setData(const GrGLSLProgramDataManager& pdman,
+ const SkImageFilterLight* light) const {
+ INHERITED::setData(pdman, light);
+ SkASSERT(light->type() == SkImageFilterLight::kSpot_LightType);
+ const SkSpotLight* spotLight = static_cast<const SkSpotLight *>(light);
+ setUniformPoint3(pdman, fLocationUni, spotLight->location());
+ pdman.set1f(fExponentUni, spotLight->specularExponent());
+ pdman.set1f(fCosInnerConeAngleUni, spotLight->cosInnerConeAngle());
+ pdman.set1f(fCosOuterConeAngleUni, spotLight->cosOuterConeAngle());
+ pdman.set1f(fConeScaleUni, spotLight->coneScale());
+ setUniformNormal3(pdman, fSUni, spotLight->s());
+}
+
+void GpuSpotLight::emitSurfaceToLight(const GrFragmentProcessor* owner,
+ GrGLSLUniformHandler* uniformHandler,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ const char* z) {
+ const char* location;
+ fLocationUni = uniformHandler->addUniform(owner, kFragment_GrShaderFlag, SkSLType::kHalf3,
+ "LightLocation", &location);
+
+ fragBuilder->codeAppendf("normalize(%s - half3(sk_FragCoord.xy, %s))",
+ location, z);
+}
+
+void GpuSpotLight::emitLightColor(const GrFragmentProcessor* owner,
+ GrGLSLUniformHandler* uniformHandler,
+ GrGLSLFPFragmentBuilder* fragBuilder,
+ const char* surfaceToLight) {
+ const char* color = uniformHandler->getUniformCStr(this->lightColorUni()); // created by parent class.
+
+ const char* exponent;
+ const char* cosInner;
+ const char* cosOuter;
+ const char* coneScale;
+ const char* s;
+ fExponentUni = uniformHandler->addUniform(owner, kFragment_GrShaderFlag, SkSLType::kHalf,
+ "Exponent", &exponent);
+ fCosInnerConeAngleUni = uniformHandler->addUniform(owner, kFragment_GrShaderFlag,
+ SkSLType::kHalf, "CosInnerConeAngle",
+ &cosInner);
+ fCosOuterConeAngleUni = uniformHandler->addUniform(owner, kFragment_GrShaderFlag,
+ SkSLType::kHalf, "CosOuterConeAngle",
+ &cosOuter);
+ fConeScaleUni = uniformHandler->addUniform(owner, kFragment_GrShaderFlag, SkSLType::kHalf,
+ "ConeScale", &coneScale);
+ fSUni = uniformHandler->addUniform(owner, kFragment_GrShaderFlag, SkSLType::kHalf3, "S", &s);
+
+ const GrShaderVar gLightColorArgs[] = {
+ GrShaderVar("surfaceToLight", SkSLType::kHalf3)
+ };
+ SkString lightColorBody;
+ lightColorBody.appendf("half cosAngle = -dot(surfaceToLight, %s);", s);
+ lightColorBody.appendf("if (cosAngle < %s) {", cosOuter);
+ lightColorBody.appendf("return half3(0);");
+ lightColorBody.appendf("}");
+ lightColorBody.appendf("half scale = pow(cosAngle, %s);", exponent);
+ lightColorBody.appendf("if (cosAngle < %s) {", cosInner);
+ lightColorBody.appendf("return %s * scale * (cosAngle - %s) * %s;",
+ color, cosOuter, coneScale);
+ lightColorBody.appendf("}");
+ lightColorBody.appendf("return %s * scale;", color);
+ fLightColorFunc = fragBuilder->getMangledFunctionName("lightColor");
+ fragBuilder->emitFunction(SkSLType::kHalf3,
+ fLightColorFunc.c_str(),
+ {gLightColorArgs, std::size(gLightColorArgs)},
+ lightColorBody.c_str());
+
+ fragBuilder->codeAppendf("%s(%s)", fLightColorFunc.c_str(), surfaceToLight);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkMagnifierImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkMagnifierImageFilter.cpp
new file mode 100644
index 0000000000..6220434ec1
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkMagnifierImageFilter.cpp
@@ -0,0 +1,296 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkM44.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/effects/SkImageFilters.h"
+#include "include/effects/SkRuntimeEffect.h"
+#include "include/private/base/SkTPin.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkValidationUtils.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+
+#if defined(SK_GANESH)
+#include "src/core/SkRuntimeEffectPriv.h"
+#include "src/gpu/ganesh/GrColorSpaceXform.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/GrSurfaceProxy.h"
+#include "src/gpu/ganesh/GrSurfaceProxyView.h"
+#include "src/gpu/ganesh/effects/GrSkSLFP.h"
+#include "src/gpu/ganesh/effects/GrTextureEffect.h"
+#endif
+
+namespace {
+
+class SkMagnifierImageFilter final : public SkImageFilter_Base {
+public:
+ SkMagnifierImageFilter(const SkRect& srcRect, SkScalar inset, sk_sp<SkImageFilter> input,
+ const SkRect* cropRect)
+ : INHERITED(&input, 1, cropRect)
+ , fSrcRect(srcRect)
+ , fInset(inset) {
+ SkASSERT(srcRect.left() >= 0 && srcRect.top() >= 0 && inset >= 0);
+ }
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+private:
+ friend void ::SkRegisterMagnifierImageFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkMagnifierImageFilter)
+
+ SkRect fSrcRect;
+ SkScalar fInset;
+
+ using INHERITED = SkImageFilter_Base;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkImageFilters::Magnifier(
+ const SkRect& srcRect, SkScalar inset, sk_sp<SkImageFilter> input,
+ const CropRect& cropRect) {
+ if (!SkScalarIsFinite(inset) || !SkIsValidRect(srcRect)) {
+ return nullptr;
+ }
+ if (inset < 0) {
+ return nullptr;
+ }
+ // Negative numbers in src rect are not supported
+ if (srcRect.fLeft < 0 || srcRect.fTop < 0) {
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkMagnifierImageFilter(srcRect, inset, std::move(input),
+ cropRect));
+}
+
+void SkRegisterMagnifierImageFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkMagnifierImageFilter);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkMagnifierImageFilterImpl", SkMagnifierImageFilter::CreateProc);
+}
+
+sk_sp<SkFlattenable> SkMagnifierImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkRect src;
+ buffer.readRect(&src);
+ return SkImageFilters::Magnifier(src, buffer.readScalar(), common.getInput(0),
+ common.cropRect());
+}
+
+void SkMagnifierImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeRect(fSrcRect);
+ buffer.writeScalar(fInset);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+#if defined(SK_GANESH)
+static std::unique_ptr<GrFragmentProcessor> make_magnifier_fp(
+ std::unique_ptr<GrFragmentProcessor> input,
+ SkIRect bounds,
+ SkRect srcRect,
+ float xInvZoom,
+ float yInvZoom,
+ float xInvInset,
+ float yInvInset) {
+ static const SkRuntimeEffect* effect = SkMakeRuntimeEffect(SkRuntimeEffect::MakeForShader,
+ "uniform shader src;"
+ "uniform float4 boundsUniform;"
+ "uniform float xInvZoom;"
+ "uniform float yInvZoom;"
+ "uniform float xInvInset;"
+ "uniform float yInvInset;"
+ "uniform half2 offset;"
+
+ "half4 main(float2 coord) {"
+ "float2 zoom_coord = offset + coord * float2(xInvZoom, yInvZoom);"
+ "float2 delta = (coord - boundsUniform.xy) * boundsUniform.zw;"
+ "delta = min(delta, float2(1.0) - delta);"
+ "delta *= float2(xInvInset, yInvInset);"
+
+ "float weight = 0.0;"
+ "if (delta.s < 2.0 && delta.t < 2.0) {"
+ "delta = float2(2.0) - delta;"
+ "float dist = length(delta);"
+ "dist = max(2.0 - dist, 0.0);"
+ "weight = min(dist * dist, 1.0);"
+ "} else {"
+ "float2 delta_squared = delta * delta;"
+ "weight = min(min(delta_squared.x, delta_squared.y), 1.0);"
+ "}"
+
+ "return src.eval(mix(coord, zoom_coord, weight));"
+ "}"
+ );
+
+ SkV4 boundsUniform = {static_cast<float>(bounds.x()),
+ static_cast<float>(bounds.y()),
+ 1.f / bounds.width(),
+ 1.f / bounds.height()};
+
+ return GrSkSLFP::Make(effect, "magnifier_fp", /*inputFP=*/nullptr, GrSkSLFP::OptFlags::kNone,
+ "src", std::move(input),
+ "boundsUniform", boundsUniform,
+ "xInvZoom", xInvZoom,
+ "yInvZoom", yInvZoom,
+ "xInvInset", xInvInset,
+ "yInvInset", yInvInset,
+ "offset", SkV2{srcRect.x(), srcRect.y()});
+}
+#endif
+
+sk_sp<SkSpecialImage> SkMagnifierImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ const SkIRect inputBounds = SkIRect::MakeXYWH(inputOffset.x(), inputOffset.y(),
+ input->width(), input->height());
+
+ SkIRect bounds;
+ if (!this->applyCropRect(ctx, inputBounds, &bounds)) {
+ return nullptr;
+ }
+
+ SkScalar invInset = fInset > 0 ? SkScalarInvert(fInset) : SK_Scalar1;
+
+ SkScalar invXZoom = fSrcRect.width() / bounds.width();
+ SkScalar invYZoom = fSrcRect.height() / bounds.height();
+
+
+#if defined(SK_GANESH)
+ if (ctx.gpuBacked()) {
+ auto context = ctx.getContext();
+
+ GrSurfaceProxyView inputView = input->view(context);
+ SkASSERT(inputView.asTextureProxy());
+
+ const auto isProtected = inputView.proxy()->isProtected();
+ const auto origin = inputView.origin();
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ bounds.offset(-inputOffset);
+
+ // Map bounds and srcRect into the proxy space. Due to the zoom effect,
+ // it's not just an offset for fSrcRect.
+ bounds.offset(input->subset().x(), input->subset().y());
+ SkRect srcRect = fSrcRect.makeOffset((1.f - invXZoom) * input->subset().x(),
+ (1.f - invYZoom) * input->subset().y());
+ auto inputFP = GrTextureEffect::Make(std::move(inputView), kPremul_SkAlphaType);
+
+ auto fp = make_magnifier_fp(std::move(inputFP),
+ bounds,
+ srcRect,
+ invXZoom,
+ invYZoom,
+ bounds.width() * invInset,
+ bounds.height() * invInset);
+
+ fp = GrColorSpaceXformEffect::Make(std::move(fp),
+ input->getColorSpace(), input->alphaType(),
+ ctx.colorSpace(), kPremul_SkAlphaType);
+ if (!fp) {
+ return nullptr;
+ }
+
+ return DrawWithFP(context, std::move(fp), bounds, ctx.colorType(), ctx.colorSpace(),
+ ctx.surfaceProps(), origin, isProtected);
+ }
+#endif
+
+ SkBitmap inputBM;
+
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if ((inputBM.colorType() != kN32_SkColorType) ||
+ (fSrcRect.width() >= inputBM.width()) || (fSrcRect.height() >= inputBM.height())) {
+ return nullptr;
+ }
+
+ SkASSERT(inputBM.getPixels());
+ if (!inputBM.getPixels() || inputBM.width() <= 0 || inputBM.height() <= 0) {
+ return nullptr;
+ }
+
+ const SkImageInfo info = SkImageInfo::MakeN32Premul(bounds.width(), bounds.height());
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ SkColor* dptr = dst.getAddr32(0, 0);
+ int dstWidth = dst.width(), dstHeight = dst.height();
+ for (int y = 0; y < dstHeight; ++y) {
+ for (int x = 0; x < dstWidth; ++x) {
+ SkScalar x_dist = std::min(x, dstWidth - x - 1) * invInset;
+ SkScalar y_dist = std::min(y, dstHeight - y - 1) * invInset;
+ SkScalar weight = 0;
+
+ static const SkScalar kScalar2 = SkScalar(2);
+
+ // To create a smooth curve at the corners, we need to work on
+ // a square twice the size of the inset.
+ if (x_dist < kScalar2 && y_dist < kScalar2) {
+ x_dist = kScalar2 - x_dist;
+ y_dist = kScalar2 - y_dist;
+
+ SkScalar dist = SkScalarSqrt(SkScalarSquare(x_dist) +
+ SkScalarSquare(y_dist));
+ dist = std::max(kScalar2 - dist, 0.0f);
+ // SkTPin rather than std::max to handle potential NaN
+ weight = SkTPin(SkScalarSquare(dist), 0.0f, SK_Scalar1);
+ } else {
+ SkScalar sqDist = std::min(SkScalarSquare(x_dist),
+ SkScalarSquare(y_dist));
+ // SkTPin rather than std::max to handle potential NaN
+ weight = SkTPin(sqDist, 0.0f, SK_Scalar1);
+ }
+
+ SkScalar x_interp = weight * (fSrcRect.x() + x * invXZoom) + (1 - weight) * x;
+ SkScalar y_interp = weight * (fSrcRect.y() + y * invYZoom) + (1 - weight) * y;
+
+ int x_val = SkTPin(bounds.x() + SkScalarFloorToInt(x_interp), 0, inputBM.width() - 1);
+ int y_val = SkTPin(bounds.y() + SkScalarFloorToInt(y_interp), 0, inputBM.height() - 1);
+
+ *dptr = *inputBM.getAddr32(x_val, y_val);
+ dptr++;
+ }
+ }
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(bounds.width(), bounds.height()),
+ dst, ctx.surfaceProps());
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkMatrixConvolutionImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkMatrixConvolutionImageFilter.cpp
new file mode 100644
index 0000000000..90833e1052
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkMatrixConvolutionImageFilter.cpp
@@ -0,0 +1,529 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkColorPriv.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkTileMode.h"
+#include "include/core/SkTypes.h"
+#include "include/effects/SkImageFilters.h"
+#include "include/private/base/SkMath.h"
+#include "include/private/base/SkTPin.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <cstdint>
+#include <cstring>
+#include <memory>
+#include <utility>
+class SkMatrix;
+
+#if defined(SK_GANESH)
+#include "include/gpu/GrRecordingContext.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/GrSurfaceProxy.h"
+#include "src/gpu/ganesh/GrSurfaceProxyView.h"
+#include "src/gpu/ganesh/SkGr.h"
+#include "src/gpu/ganesh/effects/GrMatrixConvolutionEffect.h"
+#endif
+
+using namespace skia_private;
+
+namespace {
+
+class SkMatrixConvolutionImageFilter final : public SkImageFilter_Base {
+public:
+ SkMatrixConvolutionImageFilter(const SkISize& kernelSize, const SkScalar* kernel,
+ SkScalar gain, SkScalar bias, const SkIPoint& kernelOffset,
+ SkTileMode tileMode, bool convolveAlpha,
+ sk_sp<SkImageFilter> input, const SkRect* cropRect)
+ : INHERITED(&input, 1, cropRect)
+ , fKernelSize(kernelSize)
+ , fGain(gain)
+ , fBias(bias)
+ , fKernelOffset(kernelOffset)
+ , fTileMode(tileMode)
+ , fConvolveAlpha(convolveAlpha) {
+ size_t size = (size_t) sk_64_mul(fKernelSize.width(), fKernelSize.height());
+ fKernel = new SkScalar[size];
+ memcpy(fKernel, kernel, size * sizeof(SkScalar));
+ SkASSERT(kernelSize.fWidth >= 1 && kernelSize.fHeight >= 1);
+ SkASSERT(kernelOffset.fX >= 0 && kernelOffset.fX < kernelSize.fWidth);
+ SkASSERT(kernelOffset.fY >= 0 && kernelOffset.fY < kernelSize.fHeight);
+ }
+
+ ~SkMatrixConvolutionImageFilter() override {
+ delete[] fKernel;
+ }
+
+protected:
+
+ void flatten(SkWriteBuffer&) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+ SkIRect onFilterNodeBounds(const SkIRect&, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+ bool onAffectsTransparentBlack() const override;
+
+private:
+ friend void ::SkRegisterMatrixConvolutionImageFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkMatrixConvolutionImageFilter)
+
+ SkISize fKernelSize;
+ SkScalar* fKernel;
+ SkScalar fGain;
+ SkScalar fBias;
+ SkIPoint fKernelOffset;
+ SkTileMode fTileMode;
+ bool fConvolveAlpha;
+
+ template <class PixelFetcher, bool convolveAlpha>
+ void filterPixels(const SkBitmap& src,
+ SkBitmap* result,
+ SkIVector& offset,
+ SkIRect rect,
+ const SkIRect& bounds) const;
+ template <class PixelFetcher>
+ void filterPixels(const SkBitmap& src,
+ SkBitmap* result,
+ SkIVector& offset,
+ const SkIRect& rect,
+ const SkIRect& bounds) const;
+ void filterInteriorPixels(const SkBitmap& src,
+ SkBitmap* result,
+ SkIVector& offset,
+ const SkIRect& rect,
+ const SkIRect& bounds) const;
+ void filterBorderPixels(const SkBitmap& src,
+ SkBitmap* result,
+ SkIVector& offset,
+ const SkIRect& rect,
+ const SkIRect& bounds) const;
+
+ using INHERITED = SkImageFilter_Base;
+};
+
+class UncheckedPixelFetcher {
+public:
+ static inline SkPMColor fetch(const SkBitmap& src, int x, int y, const SkIRect& bounds) {
+ return *src.getAddr32(x, y);
+ }
+};
+
+class ClampPixelFetcher {
+public:
+ static inline SkPMColor fetch(const SkBitmap& src, int x, int y, const SkIRect& bounds) {
+ x = SkTPin(x, bounds.fLeft, bounds.fRight - 1);
+ y = SkTPin(y, bounds.fTop, bounds.fBottom - 1);
+ return *src.getAddr32(x, y);
+ }
+};
+
+class RepeatPixelFetcher {
+public:
+ static inline SkPMColor fetch(const SkBitmap& src, int x, int y, const SkIRect& bounds) {
+ x = (x - bounds.left()) % bounds.width() + bounds.left();
+ y = (y - bounds.top()) % bounds.height() + bounds.top();
+ if (x < bounds.left()) {
+ x += bounds.width();
+ }
+ if (y < bounds.top()) {
+ y += bounds.height();
+ }
+ return *src.getAddr32(x, y);
+ }
+};
+
+class ClampToBlackPixelFetcher {
+public:
+ static inline SkPMColor fetch(const SkBitmap& src, int x, int y, const SkIRect& bounds) {
+ if (x < bounds.fLeft || x >= bounds.fRight || y < bounds.fTop || y >= bounds.fBottom) {
+ return 0;
+ } else {
+ return *src.getAddr32(x, y);
+ }
+ }
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkImageFilters::MatrixConvolution(const SkISize& kernelSize,
+ const SkScalar kernel[],
+ SkScalar gain,
+ SkScalar bias,
+ const SkIPoint& kernelOffset,
+ SkTileMode tileMode,
+ bool convolveAlpha,
+ sk_sp<SkImageFilter> input,
+ const CropRect& cropRect) {
+ // We need to be able to read at most SK_MaxS32 bytes, so divide that
+ // by the size of a scalar to know how many scalars we can read.
+ static constexpr int32_t kMaxKernelSize = SK_MaxS32 / sizeof(SkScalar);
+
+ if (kernelSize.width() < 1 || kernelSize.height() < 1) {
+ return nullptr;
+ }
+ if (kMaxKernelSize / kernelSize.fWidth < kernelSize.fHeight) {
+ return nullptr;
+ }
+ if (!kernel) {
+ return nullptr;
+ }
+ if ((kernelOffset.fX < 0) || (kernelOffset.fX >= kernelSize.fWidth) ||
+ (kernelOffset.fY < 0) || (kernelOffset.fY >= kernelSize.fHeight)) {
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkMatrixConvolutionImageFilter(
+ kernelSize, kernel, gain, bias, kernelOffset, tileMode, convolveAlpha,
+ std::move(input), cropRect));
+}
+
+void SkRegisterMatrixConvolutionImageFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkMatrixConvolutionImageFilter);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkMatrixConvolutionImageFilterImpl",
+ SkMatrixConvolutionImageFilter::CreateProc);
+}
+
+sk_sp<SkFlattenable> SkMatrixConvolutionImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+
+ SkISize kernelSize;
+ kernelSize.fWidth = buffer.readInt();
+ kernelSize.fHeight = buffer.readInt();
+ const int count = buffer.getArrayCount();
+
+ const int64_t kernelArea = sk_64_mul(kernelSize.width(), kernelSize.height());
+ if (!buffer.validate(kernelArea == count)) {
+ return nullptr;
+ }
+ if (!buffer.validateCanReadN<SkScalar>(count)) {
+ return nullptr;
+ }
+ AutoSTArray<16, SkScalar> kernel(count);
+ if (!buffer.readScalarArray(kernel.get(), count)) {
+ return nullptr;
+ }
+ SkScalar gain = buffer.readScalar();
+ SkScalar bias = buffer.readScalar();
+ SkIPoint kernelOffset;
+ kernelOffset.fX = buffer.readInt();
+ kernelOffset.fY = buffer.readInt();
+
+ SkTileMode tileMode = buffer.read32LE(SkTileMode::kLastTileMode);
+ bool convolveAlpha = buffer.readBool();
+
+ if (!buffer.isValid()) {
+ return nullptr;
+ }
+ return SkImageFilters::MatrixConvolution(
+ kernelSize, kernel.get(), gain, bias, kernelOffset, tileMode,
+ convolveAlpha, common.getInput(0), common.cropRect());
+}
+
+void SkMatrixConvolutionImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeInt(fKernelSize.fWidth);
+ buffer.writeInt(fKernelSize.fHeight);
+ buffer.writeScalarArray(fKernel, fKernelSize.fWidth * fKernelSize.fHeight);
+ buffer.writeScalar(fGain);
+ buffer.writeScalar(fBias);
+ buffer.writeInt(fKernelOffset.fX);
+ buffer.writeInt(fKernelOffset.fY);
+ buffer.writeInt((int) fTileMode);
+ buffer.writeBool(fConvolveAlpha);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+template<class PixelFetcher, bool convolveAlpha>
+void SkMatrixConvolutionImageFilter::filterPixels(const SkBitmap& src,
+ SkBitmap* result,
+ SkIVector& offset,
+ SkIRect rect,
+ const SkIRect& bounds) const {
+ if (!rect.intersect(bounds)) {
+ return;
+ }
+ for (int y = rect.fTop; y < rect.fBottom; ++y) {
+ SkPMColor* dptr = result->getAddr32(rect.fLeft - offset.fX, y - offset.fY);
+ for (int x = rect.fLeft; x < rect.fRight; ++x) {
+ SkScalar sumA = 0, sumR = 0, sumG = 0, sumB = 0;
+ for (int cy = 0; cy < fKernelSize.fHeight; cy++) {
+ for (int cx = 0; cx < fKernelSize.fWidth; cx++) {
+ SkPMColor s = PixelFetcher::fetch(src,
+ x + cx - fKernelOffset.fX,
+ y + cy - fKernelOffset.fY,
+ bounds);
+ SkScalar k = fKernel[cy * fKernelSize.fWidth + cx];
+ if (convolveAlpha) {
+ sumA += SkGetPackedA32(s) * k;
+ }
+ sumR += SkGetPackedR32(s) * k;
+ sumG += SkGetPackedG32(s) * k;
+ sumB += SkGetPackedB32(s) * k;
+ }
+ }
+ int a = convolveAlpha
+ ? SkTPin(SkScalarFloorToInt(sumA * fGain + fBias), 0, 255)
+ : 255;
+ int r = SkTPin(SkScalarFloorToInt(sumR * fGain + fBias), 0, a);
+ int g = SkTPin(SkScalarFloorToInt(sumG * fGain + fBias), 0, a);
+ int b = SkTPin(SkScalarFloorToInt(sumB * fGain + fBias), 0, a);
+ if (!convolveAlpha) {
+ a = SkGetPackedA32(PixelFetcher::fetch(src, x, y, bounds));
+ *dptr++ = SkPreMultiplyARGB(a, r, g, b);
+ } else {
+ *dptr++ = SkPackARGB32(a, r, g, b);
+ }
+ }
+ }
+}
+
+template<class PixelFetcher>
+void SkMatrixConvolutionImageFilter::filterPixels(const SkBitmap& src,
+ SkBitmap* result,
+ SkIVector& offset,
+ const SkIRect& rect,
+ const SkIRect& bounds) const {
+ if (fConvolveAlpha) {
+ filterPixels<PixelFetcher, true>(src, result, offset, rect, bounds);
+ } else {
+ filterPixels<PixelFetcher, false>(src, result, offset, rect, bounds);
+ }
+}
+
+void SkMatrixConvolutionImageFilter::filterInteriorPixels(const SkBitmap& src,
+ SkBitmap* result,
+ SkIVector& offset,
+ const SkIRect& rect,
+ const SkIRect& bounds) const {
+ switch (fTileMode) {
+ case SkTileMode::kMirror:
+ // TODO (michaelludwig) - Implement mirror tiling, treat as repeat for now.
+ case SkTileMode::kRepeat:
+ // In repeat mode, we still need to wrap the samples around the src
+ filterPixels<RepeatPixelFetcher>(src, result, offset, rect, bounds);
+ break;
+ case SkTileMode::kClamp:
+ // Fall through
+ case SkTileMode::kDecal:
+ filterPixels<UncheckedPixelFetcher>(src, result, offset, rect, bounds);
+ break;
+ }
+}
+
+void SkMatrixConvolutionImageFilter::filterBorderPixels(const SkBitmap& src,
+ SkBitmap* result,
+ SkIVector& offset,
+ const SkIRect& rect,
+ const SkIRect& srcBounds) const {
+ switch (fTileMode) {
+ case SkTileMode::kClamp:
+ filterPixels<ClampPixelFetcher>(src, result, offset, rect, srcBounds);
+ break;
+ case SkTileMode::kMirror:
+ // TODO (michaelludwig) - Implement mirror tiling, treat as repeat for now.
+ case SkTileMode::kRepeat:
+ filterPixels<RepeatPixelFetcher>(src, result, offset, rect, srcBounds);
+ break;
+ case SkTileMode::kDecal:
+ filterPixels<ClampToBlackPixelFetcher>(src, result, offset, rect, srcBounds);
+ break;
+ }
+}
+
+sk_sp<SkSpecialImage> SkMatrixConvolutionImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ SkIRect dstBounds;
+ input = this->applyCropRectAndPad(this->mapContext(ctx), input.get(), &inputOffset, &dstBounds);
+ if (!input) {
+ return nullptr;
+ }
+
+ const SkIRect originalSrcBounds = SkIRect::MakeXYWH(inputOffset.fX, inputOffset.fY,
+ input->width(), input->height());
+
+ SkIRect srcBounds = this->onFilterNodeBounds(dstBounds, ctx.ctm(), kReverse_MapDirection,
+ &originalSrcBounds);
+
+ if (SkTileMode::kRepeat == fTileMode || SkTileMode::kMirror == fTileMode) {
+ srcBounds = DetermineRepeatedSrcBound(srcBounds, fKernelOffset,
+ fKernelSize, originalSrcBounds);
+ } else {
+ if (!srcBounds.intersect(dstBounds)) {
+ return nullptr;
+ }
+ }
+
+#if defined(SK_GANESH)
+ if (ctx.gpuBacked()) {
+ auto context = ctx.getContext();
+
+ // Ensure the input is in the destination color space. Typically applyCropRect will have
+ // called pad_image to account for our dilation of bounds, so the result will already be
+ // moved to the destination color space. If a filter DAG avoids that, then we use this
+ // fall-back, which saves us from having to do the xform during the filter itself.
+ input = ImageToColorSpace(input.get(), ctx.colorType(), ctx.colorSpace(),
+ ctx.surfaceProps());
+
+ GrSurfaceProxyView inputView = input->view(context);
+ SkASSERT(inputView.asTextureProxy());
+
+ const auto isProtected = inputView.proxy()->isProtected();
+ const auto origin = inputView.origin();
+
+ offset->fX = dstBounds.left();
+ offset->fY = dstBounds.top();
+ dstBounds.offset(-inputOffset);
+ srcBounds.offset(-inputOffset);
+ // Map srcBounds from input's logical image domain to that of the proxy
+ srcBounds.offset(input->subset().x(), input->subset().y());
+
+ auto fp = GrMatrixConvolutionEffect::Make(context,
+ std::move(inputView),
+ srcBounds,
+ fKernelSize,
+ fKernel,
+ fGain,
+ fBias,
+ fKernelOffset,
+ SkTileModeToWrapMode(fTileMode),
+ fConvolveAlpha,
+ *ctx.getContext()->priv().caps());
+ if (!fp) {
+ return nullptr;
+ }
+
+ // FIXME (michaelludwig) - Clean this up as part of the imagefilter refactor, some filters
+ // instead require a coord transform on the FP. At very least, be consistent, at best make
+ // it so that filter impls don't need to worry about the subset origin.
+
+ // Must also map the dstBounds since it is used as the src rect in DrawWithFP when
+ // evaluating the FP, and the dst rect just uses the size of dstBounds.
+ dstBounds.offset(input->subset().x(), input->subset().y());
+ return DrawWithFP(context, std::move(fp), dstBounds, ctx.colorType(), ctx.colorSpace(),
+ ctx.surfaceProps(), origin, isProtected);
+ }
+#endif
+
+ SkBitmap inputBM;
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if (inputBM.colorType() != kN32_SkColorType) {
+ return nullptr;
+ }
+
+ if (!fConvolveAlpha && !inputBM.isOpaque()) {
+ // This leaves the bitmap tagged as premul, which seems weird to me,
+ // but is consistent with old behavior.
+ inputBM.readPixels(inputBM.info().makeAlphaType(kUnpremul_SkAlphaType),
+ inputBM.getPixels(), inputBM.rowBytes(), 0,0);
+ }
+
+ if (!inputBM.getPixels()) {
+ return nullptr;
+ }
+
+ const SkImageInfo info = SkImageInfo::MakeN32(dstBounds.width(), dstBounds.height(),
+ inputBM.alphaType());
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ offset->fX = dstBounds.fLeft;
+ offset->fY = dstBounds.fTop;
+ dstBounds.offset(-inputOffset);
+ srcBounds.offset(-inputOffset);
+
+ SkIRect interior;
+ if (SkTileMode::kRepeat == fTileMode || SkTileMode::kMirror == fTileMode) {
+ // In repeat mode, the filterPixels calls will wrap around
+ // so we just need to render 'dstBounds'
+ interior = dstBounds;
+ } else {
+ interior = SkIRect::MakeXYWH(dstBounds.left() + fKernelOffset.fX,
+ dstBounds.top() + fKernelOffset.fY,
+ dstBounds.width() - fKernelSize.fWidth + 1,
+ dstBounds.height() - fKernelSize.fHeight + 1);
+ }
+
+ SkIRect top = SkIRect::MakeLTRB(dstBounds.left(), dstBounds.top(),
+ dstBounds.right(), interior.top());
+ SkIRect bottom = SkIRect::MakeLTRB(dstBounds.left(), interior.bottom(),
+ dstBounds.right(), dstBounds.bottom());
+ SkIRect left = SkIRect::MakeLTRB(dstBounds.left(), interior.top(),
+ interior.left(), interior.bottom());
+ SkIRect right = SkIRect::MakeLTRB(interior.right(), interior.top(),
+ dstBounds.right(), interior.bottom());
+
+ SkIVector dstContentOffset = { offset->fX - inputOffset.fX, offset->fY - inputOffset.fY };
+
+ this->filterBorderPixels(inputBM, &dst, dstContentOffset, top, srcBounds);
+ this->filterBorderPixels(inputBM, &dst, dstContentOffset, left, srcBounds);
+ this->filterInteriorPixels(inputBM, &dst, dstContentOffset, interior, srcBounds);
+ this->filterBorderPixels(inputBM, &dst, dstContentOffset, right, srcBounds);
+ this->filterBorderPixels(inputBM, &dst, dstContentOffset, bottom, srcBounds);
+
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(dstBounds.width(), dstBounds.height()),
+ dst, ctx.surfaceProps());
+}
+
+SkIRect SkMatrixConvolutionImageFilter::onFilterNodeBounds(
+ const SkIRect& src, const SkMatrix& ctm, MapDirection dir, const SkIRect* inputRect) const {
+ if (kReverse_MapDirection == dir && inputRect &&
+ (SkTileMode::kRepeat == fTileMode || SkTileMode::kMirror == fTileMode)) {
+ SkASSERT(inputRect);
+ return DetermineRepeatedSrcBound(src, fKernelOffset, fKernelSize, *inputRect);
+ }
+
+ SkIRect dst = src;
+ int w = fKernelSize.width() - 1, h = fKernelSize.height() - 1;
+
+ if (kReverse_MapDirection == dir) {
+ dst.adjust(-fKernelOffset.fX, -fKernelOffset.fY,
+ w - fKernelOffset.fX, h - fKernelOffset.fY);
+ } else {
+ dst.adjust(fKernelOffset.fX - w, fKernelOffset.fY - h, fKernelOffset.fX, fKernelOffset.fY);
+ }
+ return dst;
+}
+
+bool SkMatrixConvolutionImageFilter::onAffectsTransparentBlack() const {
+ // It seems that the only rational way for repeat sample mode to work is if the caller
+ // explicitly restricts the input in which case the input range is explicitly known and
+ // specified.
+ // TODO: is seems that this should be true for clamp mode too.
+
+ // For the other modes, because the kernel is applied in device-space, we have no idea what
+ // pixels it will affect in object-space.
+ return SkTileMode::kRepeat != fTileMode && SkTileMode::kMirror != fTileMode;
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkMatrixTransformImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkMatrixTransformImageFilter.cpp
new file mode 100644
index 0000000000..7064906b29
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkMatrixTransformImageFilter.cpp
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/effects/SkImageFilters.h"
+#include "src/core/SkImageFilterTypes.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkPicturePriv.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSamplingPriv.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/effects/imagefilters/SkCropImageFilter.h"
+
+#include <utility>
+
+struct SkISize;
+
+namespace {
+
+class SkMatrixTransformImageFilter final : public SkImageFilter_Base {
+public:
+ // TODO(michaelludwig): Update this to use SkM44.
+ SkMatrixTransformImageFilter(const SkMatrix& transform,
+ const SkSamplingOptions& sampling,
+ sk_sp<SkImageFilter> input)
+ : SkImageFilter_Base(&input, 1, nullptr)
+ , fTransform(transform)
+ , fSampling(sampling) {
+ // Pre-cache so future calls to fTransform.getType() are threadsafe.
+ (void) static_cast<const SkMatrix&>(fTransform).getType();
+ }
+
+ SkRect computeFastBounds(const SkRect&) const override;
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+
+private:
+ friend void ::SkRegisterMatrixTransformImageFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkMatrixTransformImageFilter)
+ static sk_sp<SkFlattenable> LegacyOffsetCreateProc(SkReadBuffer& buffer);
+
+ MatrixCapability onGetCTMCapability() const override { return MatrixCapability::kComplex; }
+
+ skif::FilterResult onFilterImage(const skif::Context& context) const override;
+
+ skif::LayerSpace<SkIRect> onGetInputLayerBounds(
+ const skif::Mapping& mapping,
+ const skif::LayerSpace<SkIRect>& desiredOutput,
+ const skif::LayerSpace<SkIRect>& contentBounds,
+ VisitChildren recurse) const override;
+
+ skif::LayerSpace<SkIRect> onGetOutputLayerBounds(
+ const skif::Mapping& mapping,
+ const skif::LayerSpace<SkIRect>& contentBounds) const override;
+
+ skif::ParameterSpace<SkMatrix> fTransform;
+ SkSamplingOptions fSampling;
+};
+
+} // namespace
+
+sk_sp<SkImageFilter> SkImageFilters::MatrixTransform(const SkMatrix& transform,
+ const SkSamplingOptions& sampling,
+ sk_sp<SkImageFilter> input) {
+ return sk_sp<SkImageFilter>(new SkMatrixTransformImageFilter(transform,
+ sampling,
+ std::move(input)));
+}
+
+sk_sp<SkImageFilter> SkImageFilters::Offset(SkScalar dx, SkScalar dy,
+ sk_sp<SkImageFilter> input,
+ const CropRect& cropRect) {
+ // The legacy ::Offset() implementation rounded its offset vector to layer-space pixels, which
+ // is roughly equivalent to using nearest-neighbor sampling with the translation matrix.
+ sk_sp<SkImageFilter> offset = SkImageFilters::MatrixTransform(
+ SkMatrix::Translate(dx, dy),
+ SkSamplingOptions{SkFilterMode::kNearest},
+ std::move(input));
+ // The legacy 'cropRect' applies only to the output of the offset filter.
+ if (cropRect) {
+ offset = SkMakeCropImageFilter(*cropRect, std::move(offset));
+ }
+ return offset;
+}
+
+void SkRegisterMatrixTransformImageFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkMatrixTransformImageFilter);
+ // TODO(michaelludwig): Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkMatrixImageFilter", SkMatrixTransformImageFilter::CreateProc);
+ // TODO(michaelludwig): Remove after grace period for SKPs to stop using old serialization
+ SkFlattenable::Register("SkOffsetImageFilter",
+ SkMatrixTransformImageFilter::LegacyOffsetCreateProc);
+ SkFlattenable::Register("SkOffsetImageFilterImpl",
+ SkMatrixTransformImageFilter::LegacyOffsetCreateProc);
+}
+
+sk_sp<SkFlattenable> SkMatrixTransformImageFilter::LegacyOffsetCreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkPoint offset;
+ buffer.readPoint(&offset);
+ return SkImageFilters::Offset(offset.x(), offset.y(), common.getInput(0), common.cropRect());
+}
+
+sk_sp<SkFlattenable> SkMatrixTransformImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkMatrix matrix;
+ buffer.readMatrix(&matrix);
+
+ auto sampling = [&]() {
+ if (buffer.isVersionLT(SkPicturePriv::kMatrixImageFilterSampling_Version)) {
+ return SkSamplingPriv::FromFQ(buffer.read32LE(kLast_SkLegacyFQ), kLinear_SkMediumAs);
+ } else {
+ return buffer.readSampling();
+ }
+ }();
+ return SkImageFilters::MatrixTransform(matrix, sampling, common.getInput(0));
+}
+
+void SkMatrixTransformImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->SkImageFilter_Base::flatten(buffer);
+ buffer.writeMatrix(SkMatrix(fTransform));
+ buffer.writeSampling(fSampling);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+skif::FilterResult SkMatrixTransformImageFilter::onFilterImage(const skif::Context& context) const {
+ skif::FilterResult childOutput = this->filterInput(0, context);
+ skif::LayerSpace<SkMatrix> transform = context.mapping().paramToLayer(fTransform);
+ return childOutput.applyTransform(context, transform, fSampling);
+}
+
+SkRect SkMatrixTransformImageFilter::computeFastBounds(const SkRect& src) const {
+ SkRect bounds = this->getInput(0) ? this->getInput(0)->computeFastBounds(src) : src;
+ return static_cast<const SkMatrix&>(fTransform).mapRect(bounds);
+}
+
+skif::LayerSpace<SkIRect> SkMatrixTransformImageFilter::onGetInputLayerBounds(
+ const skif::Mapping& mapping,
+ const skif::LayerSpace<SkIRect>& desiredOutput,
+ const skif::LayerSpace<SkIRect>& contentBounds,
+ VisitChildren recurse) const {
+ // The required input for this filter to cover 'desiredOutput' is the smallest rectangle such
+ // that after being transformed by the layer-space adjusted 'fTransform', it contains the output
+ skif::LayerSpace<SkMatrix> inverse;
+ if (!mapping.paramToLayer(fTransform).invert(&inverse)) {
+ return skif::LayerSpace<SkIRect>::Empty();
+ }
+ skif::LayerSpace<SkIRect> requiredInput = inverse.mapRect(desiredOutput);
+
+ // Additionally if there is any filtering beyond nearest neighbor, we request an extra buffer of
+ // pixels so that the content is available to the bilerp/bicubic kernel.
+ if (fSampling != SkSamplingOptions()) {
+ requiredInput.outset(skif::LayerSpace<SkISize>({1, 1}));
+ }
+
+ if (recurse == VisitChildren::kNo) {
+ return requiredInput;
+ } else {
+ // Our required input is the desired output for our child image filter.
+ return this->visitInputLayerBounds(mapping, requiredInput, contentBounds);
+ }
+}
+
+skif::LayerSpace<SkIRect> SkMatrixTransformImageFilter::onGetOutputLayerBounds(
+ const skif::Mapping& mapping,
+ const skif::LayerSpace<SkIRect>& contentBounds) const {
+ // The output of this filter is the transformed bounds of its child's output.
+ skif::LayerSpace<SkIRect> childOutput = this->visitOutputLayerBounds(mapping, contentBounds);
+ return mapping.paramToLayer(fTransform).mapRect(childOutput);
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkMergeImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkMergeImageFilter.cpp
new file mode 100644
index 0000000000..ff7bf0b85a
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkMergeImageFilter.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/effects/SkImageFilters.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+
+#include <memory>
+
+namespace {
+
+class SkMergeImageFilter final : public SkImageFilter_Base {
+public:
+ SkMergeImageFilter(sk_sp<SkImageFilter>* const filters, int count,
+ const SkRect* cropRect)
+ : INHERITED(filters, count, cropRect) {
+ SkASSERT(count >= 0);
+ }
+
+protected:
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+ MatrixCapability onGetCTMCapability() const override { return MatrixCapability::kComplex; }
+
+private:
+ friend void ::SkRegisterMergeImageFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkMergeImageFilter)
+
+ using INHERITED = SkImageFilter_Base;
+};
+
+} // end namespace
+sk_sp<SkImageFilter> SkImageFilters::Merge(sk_sp<SkImageFilter>* const filters, int count,
+ const CropRect& cropRect) {
+ return sk_sp<SkImageFilter>(new SkMergeImageFilter(filters, count, cropRect));
+}
+
+void SkRegisterMergeImageFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkMergeImageFilter);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkMergeImageFilterImpl", SkMergeImageFilter::CreateProc);
+}
+
+sk_sp<SkFlattenable> SkMergeImageFilter::CreateProc(SkReadBuffer& buffer) {
+ Common common;
+ if (!common.unflatten(buffer, -1) || !buffer.isValid()) {
+ return nullptr;
+ }
+ return SkImageFilters::Merge(common.inputs(), common.inputCount(), common.cropRect());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkSpecialImage> SkMergeImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ int inputCount = this->countInputs();
+ if (inputCount < 1) {
+ return nullptr;
+ }
+
+ SkIRect bounds;
+ bounds.setEmpty();
+
+ std::unique_ptr<sk_sp<SkSpecialImage>[]> inputs(new sk_sp<SkSpecialImage>[inputCount]);
+ std::unique_ptr<SkIPoint[]> offsets(new SkIPoint[inputCount]);
+
+ // Filter all of the inputs.
+ for (int i = 0; i < inputCount; ++i) {
+ offsets[i] = { 0, 0 };
+ inputs[i] = this->filterInput(i, ctx, &offsets[i]);
+ if (!inputs[i]) {
+ continue;
+ }
+ const SkIRect inputBounds = SkIRect::MakeXYWH(offsets[i].fX, offsets[i].fY,
+ inputs[i]->width(), inputs[i]->height());
+ bounds.join(inputBounds);
+ }
+ if (bounds.isEmpty()) {
+ return nullptr;
+ }
+
+ // Apply the crop rect to the union of the inputs' bounds.
+ // Note that the crop rect can only reduce the bounds, since this
+ // filter does not affect transparent black.
+ bool embiggen = false;
+ this->getCropRect().applyTo(bounds, ctx.ctm(), embiggen, &bounds);
+ if (!bounds.intersect(ctx.clipBounds())) {
+ return nullptr;
+ }
+
+ const int x0 = bounds.left();
+ const int y0 = bounds.top();
+
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(bounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ canvas->clear(0x0);
+
+ // Composite all of the filter inputs.
+ for (int i = 0; i < inputCount; ++i) {
+ if (!inputs[i]) {
+ continue;
+ }
+
+ inputs[i]->draw(canvas,
+ SkIntToScalar(offsets[i].x()) - x0, SkIntToScalar(offsets[i].y()) - y0);
+ }
+
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ return surf->makeImageSnapshot();
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkMorphologyImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkMorphologyImageFilter.cpp
new file mode 100644
index 0000000000..c11dd5c613
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkMorphologyImageFilter.cpp
@@ -0,0 +1,768 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/effects/SkImageFilters.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkSLSampleUsage.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+#include <utility>
+
+#if defined(SK_GANESH)
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/GrRecordingContext.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/gpu/ganesh/GrTypesPriv.h"
+#include "src/core/SkSLTypeShared.h"
+#include "src/gpu/KeyBuilder.h"
+#include "src/gpu/SkBackingFit.h"
+#include "src/gpu/ganesh/GrColorInfo.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/GrImageInfo.h"
+#include "src/gpu/ganesh/GrProcessorUnitTest.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/GrSurfaceProxy.h"
+#include "src/gpu/ganesh/GrSurfaceProxyView.h"
+#include "src/gpu/ganesh/SurfaceFillContext.h"
+#include "src/gpu/ganesh/effects/GrTextureEffect.h"
+#include "src/gpu/ganesh/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/ganesh/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/ganesh/glsl/GrGLSLUniformHandler.h"
+
+struct GrShaderCaps;
+#endif
+
+#if GR_TEST_UTILS
+#include "src/base/SkRandom.h"
+#endif
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #include <emmintrin.h>
+#endif
+
+#if defined(SK_ARM_HAS_NEON)
+ #include <arm_neon.h>
+#endif
+
+namespace {
+
+enum class MorphType {
+ kErode,
+ kDilate,
+ kLastType = kDilate
+};
+
+enum class MorphDirection { kX, kY };
+
+class SkMorphologyImageFilter final : public SkImageFilter_Base {
+public:
+ SkMorphologyImageFilter(MorphType type, SkScalar radiusX, SkScalar radiusY,
+ sk_sp<SkImageFilter> input, const SkRect* cropRect)
+ : INHERITED(&input, 1, cropRect)
+ , fType(type)
+ , fRadius(SkSize::Make(radiusX, radiusY)) {}
+
+ SkRect computeFastBounds(const SkRect& src) const override;
+ SkIRect onFilterNodeBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+
+ /**
+ * All morphology procs have the same signature: src is the source buffer, dst the
+ * destination buffer, radius is the morphology radius, width and height are the bounds
+ * of the destination buffer (in pixels), and srcStride and dstStride are the
+ * number of pixels per row in each buffer. All buffers are 8888.
+ */
+
+ typedef void (*Proc)(const SkPMColor* src, SkPMColor* dst, int radius,
+ int width, int height, int srcStride, int dstStride);
+
+protected:
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+ void flatten(SkWriteBuffer&) const override;
+
+ SkSize mappedRadius(const SkMatrix& ctm) const {
+ SkVector radiusVector = SkVector::Make(fRadius.width(), fRadius.height());
+ ctm.mapVectors(&radiusVector, 1);
+ radiusVector.setAbs(radiusVector);
+ return SkSize::Make(radiusVector.x(), radiusVector.y());
+ }
+
+private:
+ friend void ::SkRegisterMorphologyImageFilterFlattenables();
+
+ SK_FLATTENABLE_HOOKS(SkMorphologyImageFilter)
+
+ MorphType fType;
+ SkSize fRadius;
+
+ using INHERITED = SkImageFilter_Base;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkImageFilters::Dilate(SkScalar radiusX, SkScalar radiusY,
+ sk_sp<SkImageFilter> input,
+ const CropRect& cropRect) {
+ if (radiusX < 0 || radiusY < 0) {
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkMorphologyImageFilter(
+ MorphType::kDilate, radiusX, radiusY, std::move(input), cropRect));
+}
+
+sk_sp<SkImageFilter> SkImageFilters::Erode(SkScalar radiusX, SkScalar radiusY,
+ sk_sp<SkImageFilter> input,
+ const CropRect& cropRect) {
+ if (radiusX < 0 || radiusY < 0) {
+ return nullptr;
+ }
+ return sk_sp<SkImageFilter>(new SkMorphologyImageFilter(
+ MorphType::kErode, radiusX, radiusY, std::move(input), cropRect));
+}
+
+void SkRegisterMorphologyImageFilterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkMorphologyImageFilter);
+ // TODO (michaelludwig): Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkMorphologyImageFilterImpl", SkMorphologyImageFilter::CreateProc);
+}
+
+sk_sp<SkFlattenable> SkMorphologyImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+
+ SkScalar width = buffer.readScalar();
+ SkScalar height = buffer.readScalar();
+ MorphType filterType = buffer.read32LE(MorphType::kLastType);
+
+ if (filterType == MorphType::kDilate) {
+ return SkImageFilters::Dilate(width, height, common.getInput(0), common.cropRect());
+ } else if (filterType == MorphType::kErode) {
+ return SkImageFilters::Erode(width, height, common.getInput(0), common.cropRect());
+ } else {
+ return nullptr;
+ }
+}
+
+void SkMorphologyImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeScalar(fRadius.fWidth);
+ buffer.writeScalar(fRadius.fHeight);
+ buffer.writeInt(static_cast<int>(fType));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void call_proc_X(SkMorphologyImageFilter::Proc procX,
+ const SkBitmap& src, SkBitmap* dst,
+ int radiusX, const SkIRect& bounds) {
+ procX(src.getAddr32(bounds.left(), bounds.top()), dst->getAddr32(0, 0),
+ radiusX, bounds.width(), bounds.height(),
+ src.rowBytesAsPixels(), dst->rowBytesAsPixels());
+}
+
+static void call_proc_Y(SkMorphologyImageFilter::Proc procY,
+ const SkPMColor* src, int srcRowBytesAsPixels, SkBitmap* dst,
+ int radiusY, const SkIRect& bounds) {
+ procY(src, dst->getAddr32(0, 0),
+ radiusY, bounds.height(), bounds.width(),
+ srcRowBytesAsPixels, dst->rowBytesAsPixels());
+}
+
+SkRect SkMorphologyImageFilter::computeFastBounds(const SkRect& src) const {
+ SkRect bounds = this->getInput(0) ? this->getInput(0)->computeFastBounds(src) : src;
+ bounds.outset(fRadius.width(), fRadius.height());
+ return bounds;
+}
+
+SkIRect SkMorphologyImageFilter::onFilterNodeBounds(
+ const SkIRect& src, const SkMatrix& ctm, MapDirection, const SkIRect* inputRect) const {
+ SkSize radius = mappedRadius(ctm);
+ return src.makeOutset(SkScalarCeilToInt(radius.width()), SkScalarCeilToInt(radius.height()));
+}
+
+#if defined(SK_GANESH)
+
+///////////////////////////////////////////////////////////////////////////////
+/**
+ * Morphology effects. Depending upon the type of morphology, either the
+ * component-wise min (Erode_Type) or max (Dilate_Type) of all pixels in the
+ * kernel is selected as the new color. The new color is modulated by the input
+ * color.
+ */
+class GrMorphologyEffect : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(
+ std::unique_ptr<GrFragmentProcessor> inputFP, GrSurfaceProxyView view,
+ SkAlphaType srcAlphaType, MorphDirection dir, int radius, MorphType type) {
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrMorphologyEffect(std::move(inputFP), std::move(view), srcAlphaType, dir,
+ radius, type, /*range=*/nullptr));
+ }
+
+ static std::unique_ptr<GrFragmentProcessor> Make(
+ std::unique_ptr<GrFragmentProcessor> inputFP, GrSurfaceProxyView view,
+ SkAlphaType srcAlphaType, MorphDirection dir, int radius, MorphType type,
+ const float range[2]) {
+ return std::unique_ptr<GrFragmentProcessor>(new GrMorphologyEffect(
+ std::move(inputFP), std::move(view), srcAlphaType, dir, radius, type, range));
+ }
+
+ const char* name() const override { return "Morphology"; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override {
+ return std::unique_ptr<GrFragmentProcessor>(new GrMorphologyEffect(*this));
+ }
+
+private:
+ MorphDirection fDirection;
+ int fRadius;
+ MorphType fType;
+ bool fUseRange;
+ float fRange[2];
+
+ std::unique_ptr<ProgramImpl> onMakeProgramImpl() const override;
+
+ void onAddToKey(const GrShaderCaps&, skgpu::KeyBuilder*) const override;
+
+ bool onIsEqual(const GrFragmentProcessor&) const override;
+ GrMorphologyEffect(std::unique_ptr<GrFragmentProcessor> inputFP, GrSurfaceProxyView,
+ SkAlphaType srcAlphaType, MorphDirection, int radius, MorphType,
+ const float range[2]);
+ explicit GrMorphologyEffect(const GrMorphologyEffect&);
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+
+ using INHERITED = GrFragmentProcessor;
+};
+
+std::unique_ptr<GrFragmentProcessor::ProgramImpl> GrMorphologyEffect::onMakeProgramImpl() const {
+ class Impl : public ProgramImpl {
+ public:
+ void emitCode(EmitArgs& args) override {
+ constexpr int kInputFPIndex = 0;
+ constexpr int kTexEffectIndex = 1;
+
+ const GrMorphologyEffect& me = args.fFp.cast<GrMorphologyEffect>();
+
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+ fRangeUni = uniformHandler->addUniform(&me, kFragment_GrShaderFlag, SkSLType::kFloat2,
+ "Range");
+ const char* range = uniformHandler->getUniformCStr(fRangeUni);
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ const char* func = me.fType == MorphType::kErode ? "min" : "max";
+
+ char initialValue = me.fType == MorphType::kErode ? '1' : '0';
+ fragBuilder->codeAppendf("half4 color = half4(%c);", initialValue);
+
+ char dir = me.fDirection == MorphDirection::kX ? 'x' : 'y';
+
+ int width = 2 * me.fRadius + 1;
+
+ // float2 coord = coord2D;
+ fragBuilder->codeAppendf("float2 coord = %s;", args.fSampleCoord);
+ // coord.x -= radius;
+ fragBuilder->codeAppendf("coord.%c -= %d;", dir, me.fRadius);
+ if (me.fUseRange) {
+ // highBound = min(highBound, coord.x + (width-1));
+ fragBuilder->codeAppendf("float highBound = min(%s.y, coord.%c + %f);", range, dir,
+ float(width - 1));
+ // coord.x = max(lowBound, coord.x);
+ fragBuilder->codeAppendf("coord.%c = max(%s.x, coord.%c);", dir, range, dir);
+ }
+ fragBuilder->codeAppendf("for (int i = 0; i < %d; i++) {", width);
+ SkString sample = this->invokeChild(kTexEffectIndex, args, "coord");
+ fragBuilder->codeAppendf(" color = %s(color, %s);", func, sample.c_str());
+ // coord.x += 1;
+ fragBuilder->codeAppendf(" coord.%c += 1;", dir);
+ if (me.fUseRange) {
+ // coord.x = min(highBound, coord.x);
+ fragBuilder->codeAppendf(" coord.%c = min(highBound, coord.%c);", dir, dir);
+ }
+ fragBuilder->codeAppend("}");
+
+ SkString inputColor = this->invokeChild(kInputFPIndex, args);
+ fragBuilder->codeAppendf("return color * %s;", inputColor.c_str());
+ }
+
+ private:
+ void onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& proc) override {
+ const GrMorphologyEffect& m = proc.cast<GrMorphologyEffect>();
+ if (m.fUseRange) {
+ pdman.set2f(fRangeUni, m.fRange[0], m.fRange[1]);
+ }
+ }
+
+ GrGLSLProgramDataManager::UniformHandle fRangeUni;
+ };
+
+ return std::make_unique<Impl>();
+}
+
+void GrMorphologyEffect::onAddToKey(const GrShaderCaps& caps, skgpu::KeyBuilder* b) const {
+ uint32_t key = static_cast<uint32_t>(fRadius);
+ key |= (static_cast<uint32_t>(fType) << 8);
+ key |= (static_cast<uint32_t>(fDirection) << 9);
+ if (fUseRange) {
+ key |= 1 << 10;
+ }
+ b->add32(key);
+}
+
+GrMorphologyEffect::GrMorphologyEffect(std::unique_ptr<GrFragmentProcessor> inputFP,
+ GrSurfaceProxyView view,
+ SkAlphaType srcAlphaType,
+ MorphDirection direction,
+ int radius,
+ MorphType type,
+ const float range[2])
+ : INHERITED(kGrMorphologyEffect_ClassID, ModulateForClampedSamplerOptFlags(srcAlphaType))
+ , fDirection(direction)
+ , fRadius(radius)
+ , fType(type)
+ , fUseRange(SkToBool(range)) {
+ this->setUsesSampleCoordsDirectly();
+ this->registerChild(std::move(inputFP));
+ this->registerChild(GrTextureEffect::Make(std::move(view), srcAlphaType),
+ SkSL::SampleUsage::Explicit());
+ if (fUseRange) {
+ fRange[0] = range[0];
+ fRange[1] = range[1];
+ }
+}
+
+GrMorphologyEffect::GrMorphologyEffect(const GrMorphologyEffect& that)
+ : INHERITED(that)
+ , fDirection(that.fDirection)
+ , fRadius(that.fRadius)
+ , fType(that.fType)
+ , fUseRange(that.fUseRange) {
+ if (that.fUseRange) {
+ fRange[0] = that.fRange[0];
+ fRange[1] = that.fRange[1];
+ }
+}
+
+bool GrMorphologyEffect::onIsEqual(const GrFragmentProcessor& sBase) const {
+ const GrMorphologyEffect& s = sBase.cast<GrMorphologyEffect>();
+ return this->fRadius == s.fRadius &&
+ this->fDirection == s.fDirection &&
+ this->fUseRange == s.fUseRange &&
+ this->fType == s.fType;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrMorphologyEffect)
+
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrMorphologyEffect::TestCreate(GrProcessorTestData* d) {
+ auto [view, ct, at] = d->randomView();
+
+ MorphDirection dir = d->fRandom->nextBool() ? MorphDirection::kX : MorphDirection::kY;
+ static const int kMaxRadius = 10;
+ int radius = d->fRandom->nextRangeU(1, kMaxRadius);
+ MorphType type = d->fRandom->nextBool() ? MorphType::kErode : MorphType::kDilate;
+ return GrMorphologyEffect::Make(d->inputFP(), std::move(view), at, dir, radius, type);
+}
+#endif
+
+static void apply_morphology_rect(skgpu::ganesh::SurfaceFillContext* sfc,
+ GrSurfaceProxyView view,
+ SkAlphaType srcAlphaType,
+ const SkIRect& srcRect,
+ const SkIRect& dstRect,
+ int radius,
+ MorphType morphType,
+ const float range[2],
+ MorphDirection direction) {
+ auto fp = GrMorphologyEffect::Make(/*inputFP=*/nullptr,
+ std::move(view),
+ srcAlphaType,
+ direction,
+ radius,
+ morphType,
+ range);
+ sfc->fillRectToRectWithFP(srcRect, dstRect, std::move(fp));
+}
+
+static void apply_morphology_rect_no_bounds(skgpu::ganesh::SurfaceFillContext* sfc,
+ GrSurfaceProxyView view,
+ SkAlphaType srcAlphaType,
+ const SkIRect& srcRect,
+ const SkIRect& dstRect,
+ int radius,
+ MorphType morphType,
+ MorphDirection direction) {
+ auto fp = GrMorphologyEffect::Make(
+ /*inputFP=*/nullptr, std::move(view), srcAlphaType, direction, radius, morphType);
+ sfc->fillRectToRectWithFP(srcRect, dstRect, std::move(fp));
+}
+
+static void apply_morphology_pass(skgpu::ganesh::SurfaceFillContext* sfc,
+ GrSurfaceProxyView view,
+ SkAlphaType srcAlphaType,
+ const SkIRect& srcRect,
+ const SkIRect& dstRect,
+ int radius,
+ MorphType morphType,
+ MorphDirection direction) {
+ float bounds[2] = { 0.0f, 1.0f };
+ SkIRect lowerSrcRect = srcRect, lowerDstRect = dstRect;
+ SkIRect middleSrcRect = srcRect, middleDstRect = dstRect;
+ SkIRect upperSrcRect = srcRect, upperDstRect = dstRect;
+ if (direction == MorphDirection::kX) {
+ bounds[0] = SkIntToScalar(srcRect.left()) + 0.5f;
+ bounds[1] = SkIntToScalar(srcRect.right()) - 0.5f;
+ lowerSrcRect.fRight = srcRect.left() + radius;
+ lowerDstRect.fRight = dstRect.left() + radius;
+ upperSrcRect.fLeft = srcRect.right() - radius;
+ upperDstRect.fLeft = dstRect.right() - radius;
+ middleSrcRect.inset(radius, 0);
+ middleDstRect.inset(radius, 0);
+ } else {
+ bounds[0] = SkIntToScalar(srcRect.top()) + 0.5f;
+ bounds[1] = SkIntToScalar(srcRect.bottom()) - 0.5f;
+ lowerSrcRect.fBottom = srcRect.top() + radius;
+ lowerDstRect.fBottom = dstRect.top() + radius;
+ upperSrcRect.fTop = srcRect.bottom() - radius;
+ upperDstRect.fTop = dstRect.bottom() - radius;
+ middleSrcRect.inset(0, radius);
+ middleDstRect.inset(0, radius);
+ }
+ if (middleSrcRect.width() <= 0) {
+ // radius covers srcRect; use bounds over entire draw
+ apply_morphology_rect(sfc, std::move(view), srcAlphaType, srcRect,
+ dstRect, radius, morphType, bounds, direction);
+ } else {
+ // Draw upper and lower margins with bounds; middle without.
+ apply_morphology_rect(sfc, view, srcAlphaType, lowerSrcRect,
+ lowerDstRect, radius, morphType, bounds, direction);
+ apply_morphology_rect(sfc, view, srcAlphaType, upperSrcRect,
+ upperDstRect, radius, morphType, bounds, direction);
+ apply_morphology_rect_no_bounds(sfc, std::move(view), srcAlphaType,
+ middleSrcRect, middleDstRect, radius, morphType, direction);
+ }
+}
+
+static sk_sp<SkSpecialImage> apply_morphology(
+ GrRecordingContext* rContext, SkSpecialImage* input, const SkIRect& rect,
+ MorphType morphType, SkISize radius, const SkImageFilter_Base::Context& ctx) {
+ GrSurfaceProxyView srcView = input->view(rContext);
+ SkAlphaType srcAlphaType = input->alphaType();
+ SkASSERT(srcView.asTextureProxy());
+
+ GrSurfaceProxy* proxy = srcView.proxy();
+
+ const SkIRect dstRect = SkIRect::MakeWH(rect.width(), rect.height());
+ SkIRect srcRect = rect;
+ // Map into proxy space
+ srcRect.offset(input->subset().x(), input->subset().y());
+ SkASSERT(radius.width() > 0 || radius.height() > 0);
+
+ GrImageInfo info(ctx.grColorType(), kPremul_SkAlphaType, ctx.refColorSpace(), rect.size());
+
+ if (radius.fWidth > 0) {
+ auto dstFillContext =
+ rContext->priv().makeSFC(info,
+ "SpecialImage_ApplyMorphology_Width",
+ SkBackingFit::kApprox,
+ 1,
+ GrMipmapped::kNo,
+ proxy->isProtected(),
+ kBottomLeft_GrSurfaceOrigin);
+ if (!dstFillContext) {
+ return nullptr;
+ }
+
+ apply_morphology_pass(dstFillContext.get(), std::move(srcView), srcAlphaType,
+ srcRect, dstRect, radius.fWidth, morphType, MorphDirection::kX);
+ SkIRect clearRect = SkIRect::MakeXYWH(dstRect.fLeft, dstRect.fBottom,
+ dstRect.width(), radius.fHeight);
+ SkPMColor4f clearColor = MorphType::kErode == morphType
+ ? SK_PMColor4fWHITE : SK_PMColor4fTRANSPARENT;
+ dstFillContext->clear(clearRect, clearColor);
+
+ srcView = dstFillContext->readSurfaceView();
+ srcAlphaType = dstFillContext->colorInfo().alphaType();
+ srcRect = dstRect;
+ }
+ if (radius.fHeight > 0) {
+ auto dstFillContext =
+ rContext->priv().makeSFC(info,
+ "SpecialImage_ApplyMorphology_Height",
+ SkBackingFit::kApprox,
+ 1,
+ GrMipmapped::kNo,
+ srcView.proxy()->isProtected(),
+ kBottomLeft_GrSurfaceOrigin);
+ if (!dstFillContext) {
+ return nullptr;
+ }
+
+ apply_morphology_pass(dstFillContext.get(), std::move(srcView), srcAlphaType,
+ srcRect, dstRect, radius.fHeight, morphType, MorphDirection::kY);
+
+ srcView = dstFillContext->readSurfaceView();
+ }
+
+ return SkSpecialImage::MakeDeferredFromGpu(rContext,
+ SkIRect::MakeWH(rect.width(), rect.height()),
+ kNeedNewImageUniqueID_SpecialImage,
+ std::move(srcView),
+ info.colorInfo(),
+ input->props());
+}
+#endif
+
+namespace {
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ template<MorphType type, MorphDirection direction>
+ static void morph(const SkPMColor* src, SkPMColor* dst,
+ int radius, int width, int height, int srcStride, int dstStride) {
+ const int srcStrideX = direction == MorphDirection::kX ? 1 : srcStride;
+ const int dstStrideX = direction == MorphDirection::kX ? 1 : dstStride;
+ const int srcStrideY = direction == MorphDirection::kX ? srcStride : 1;
+ const int dstStrideY = direction == MorphDirection::kX ? dstStride : 1;
+ radius = std::min(radius, width - 1);
+ const SkPMColor* upperSrc = src + radius * srcStrideX;
+ for (int x = 0; x < width; ++x) {
+ const SkPMColor* lp = src;
+ const SkPMColor* up = upperSrc;
+ SkPMColor* dptr = dst;
+ for (int y = 0; y < height; ++y) {
+ __m128i extreme = (type == MorphType::kDilate) ? _mm_setzero_si128()
+ : _mm_set1_epi32(0xFFFFFFFF);
+ for (const SkPMColor* p = lp; p <= up; p += srcStrideX) {
+ __m128i src_pixel = _mm_cvtsi32_si128(*p);
+ extreme = (type == MorphType::kDilate) ? _mm_max_epu8(src_pixel, extreme)
+ : _mm_min_epu8(src_pixel, extreme);
+ }
+ *dptr = _mm_cvtsi128_si32(extreme);
+ dptr += dstStrideY;
+ lp += srcStrideY;
+ up += srcStrideY;
+ }
+ if (x >= radius) { src += srcStrideX; }
+ if (x + radius < width - 1) { upperSrc += srcStrideX; }
+ dst += dstStrideX;
+ }
+ }
+
+#elif defined(SK_ARM_HAS_NEON)
+ template<MorphType type, MorphDirection direction>
+ static void morph(const SkPMColor* src, SkPMColor* dst,
+ int radius, int width, int height, int srcStride, int dstStride) {
+ const int srcStrideX = direction == MorphDirection::kX ? 1 : srcStride;
+ const int dstStrideX = direction == MorphDirection::kX ? 1 : dstStride;
+ const int srcStrideY = direction == MorphDirection::kX ? srcStride : 1;
+ const int dstStrideY = direction == MorphDirection::kX ? dstStride : 1;
+ radius = std::min(radius, width - 1);
+ const SkPMColor* upperSrc = src + radius * srcStrideX;
+ for (int x = 0; x < width; ++x) {
+ const SkPMColor* lp = src;
+ const SkPMColor* up = upperSrc;
+ SkPMColor* dptr = dst;
+ for (int y = 0; y < height; ++y) {
+ uint8x8_t extreme = vdup_n_u8(type == MorphType::kDilate ? 0 : 255);
+ for (const SkPMColor* p = lp; p <= up; p += srcStrideX) {
+ uint8x8_t src_pixel = vreinterpret_u8_u32(vdup_n_u32(*p));
+ extreme = (type == MorphType::kDilate) ? vmax_u8(src_pixel, extreme)
+ : vmin_u8(src_pixel, extreme);
+ }
+ *dptr = vget_lane_u32(vreinterpret_u32_u8(extreme), 0);
+ dptr += dstStrideY;
+ lp += srcStrideY;
+ up += srcStrideY;
+ }
+ if (x >= radius) src += srcStrideX;
+ if (x + radius < width - 1) upperSrc += srcStrideX;
+ dst += dstStrideX;
+ }
+ }
+
+#else
+ template<MorphType type, MorphDirection direction>
+ static void morph(const SkPMColor* src, SkPMColor* dst,
+ int radius, int width, int height, int srcStride, int dstStride) {
+ const int srcStrideX = direction == MorphDirection::kX ? 1 : srcStride;
+ const int dstStrideX = direction == MorphDirection::kX ? 1 : dstStride;
+ const int srcStrideY = direction == MorphDirection::kX ? srcStride : 1;
+ const int dstStrideY = direction == MorphDirection::kX ? dstStride : 1;
+ radius = std::min(radius, width - 1);
+ const SkPMColor* upperSrc = src + radius * srcStrideX;
+ for (int x = 0; x < width; ++x) {
+ const SkPMColor* lp = src;
+ const SkPMColor* up = upperSrc;
+ SkPMColor* dptr = dst;
+ for (int y = 0; y < height; ++y) {
+ // If we're maxing (dilate), start from 0; if minning (erode), start from 255.
+ const int start = (type == MorphType::kDilate) ? 0 : 255;
+ int B = start, G = start, R = start, A = start;
+ for (const SkPMColor* p = lp; p <= up; p += srcStrideX) {
+ int b = SkGetPackedB32(*p),
+ g = SkGetPackedG32(*p),
+ r = SkGetPackedR32(*p),
+ a = SkGetPackedA32(*p);
+ if (type == MorphType::kDilate) {
+ B = std::max(b, B);
+ G = std::max(g, G);
+ R = std::max(r, R);
+ A = std::max(a, A);
+ } else {
+ B = std::min(b, B);
+ G = std::min(g, G);
+ R = std::min(r, R);
+ A = std::min(a, A);
+ }
+ }
+ *dptr = SkPackARGB32(A, R, G, B);
+ dptr += dstStrideY;
+ lp += srcStrideY;
+ up += srcStrideY;
+ }
+ if (x >= radius) { src += srcStrideX; }
+ if (x + radius < width - 1) { upperSrc += srcStrideX; }
+ dst += dstStrideX;
+ }
+ }
+#endif
+} // namespace
+
+sk_sp<SkSpecialImage> SkMorphologyImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ SkIRect bounds;
+ input = this->applyCropRectAndPad(this->mapContext(ctx), input.get(), &inputOffset, &bounds);
+ if (!input) {
+ return nullptr;
+ }
+
+ SkSize radius = mappedRadius(ctx.ctm());
+ int width = SkScalarRoundToInt(radius.width());
+ int height = SkScalarRoundToInt(radius.height());
+
+ // Width (or height) must fit in a signed 32-bit int to avoid UBSAN issues (crbug.com/1018190)
+ // Further, we limit the radius to something much smaller, to avoid extremely slow draw calls:
+ // (crbug.com/1123035):
+ constexpr int kMaxRadius = 100; // (std::numeric_limits<int>::max() - 1) / 2;
+
+ if (width < 0 || height < 0 || width > kMaxRadius || height > kMaxRadius) {
+ return nullptr;
+ }
+
+ SkIRect srcBounds = bounds;
+ srcBounds.offset(-inputOffset);
+
+ if (0 == width && 0 == height) {
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ return input->makeSubset(srcBounds);
+ }
+
+#if defined(SK_GANESH)
+ if (ctx.gpuBacked()) {
+ auto context = ctx.getContext();
+
+ // Ensure the input is in the destination color space. Typically applyCropRect will have
+ // called pad_image to account for our dilation of bounds, so the result will already be
+ // moved to the destination color space. If a filter DAG avoids that, then we use this
+ // fall-back, which saves us from having to do the xform during the filter itself.
+ input = ImageToColorSpace(input.get(), ctx.colorType(), ctx.colorSpace(),
+ ctx.surfaceProps());
+
+ sk_sp<SkSpecialImage> result(apply_morphology(context, input.get(), srcBounds, fType,
+ SkISize::Make(width, height), ctx));
+ if (result) {
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+ }
+ return result;
+ }
+#endif
+
+ SkBitmap inputBM;
+
+ if (!input->getROPixels(&inputBM)) {
+ return nullptr;
+ }
+
+ if (inputBM.colorType() != kN32_SkColorType) {
+ return nullptr;
+ }
+
+ SkImageInfo info = SkImageInfo::Make(bounds.size(), inputBM.colorType(), inputBM.alphaType());
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ SkMorphologyImageFilter::Proc procX, procY;
+
+ if (MorphType::kDilate == fType) {
+ procX = &morph<MorphType::kDilate, MorphDirection::kX>;
+ procY = &morph<MorphType::kDilate, MorphDirection::kY>;
+ } else {
+ procX = &morph<MorphType::kErode, MorphDirection::kX>;
+ procY = &morph<MorphType::kErode, MorphDirection::kY>;
+ }
+
+ if (width > 0 && height > 0) {
+ SkBitmap tmp;
+ if (!tmp.tryAllocPixels(info)) {
+ return nullptr;
+ }
+
+ call_proc_X(procX, inputBM, &tmp, width, srcBounds);
+ SkIRect tmpBounds = SkIRect::MakeWH(srcBounds.width(), srcBounds.height());
+ call_proc_Y(procY,
+ tmp.getAddr32(tmpBounds.left(), tmpBounds.top()), tmp.rowBytesAsPixels(),
+ &dst, height, tmpBounds);
+ } else if (width > 0) {
+ call_proc_X(procX, inputBM, &dst, width, srcBounds);
+ } else if (height > 0) {
+ call_proc_Y(procY,
+ inputBM.getAddr32(srcBounds.left(), srcBounds.top()),
+ inputBM.rowBytesAsPixels(),
+ &dst, height, srcBounds);
+ }
+ offset->fX = bounds.left();
+ offset->fY = bounds.top();
+
+ return SkSpecialImage::MakeFromRaster(SkIRect::MakeWH(bounds.width(), bounds.height()),
+ dst, ctx.surfaceProps());
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkPictureImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkPictureImageFilter.cpp
new file mode 100644
index 0000000000..17fc66b1b9
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkPictureImageFilter.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSurfaceProps.h"
+#include "include/core/SkTypes.h"
+#include "include/effects/SkImageFilters.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkPicturePriv.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <utility>
+
+namespace {
+
+class SkPictureImageFilter final : public SkImageFilter_Base {
+public:
+ SkPictureImageFilter(sk_sp<SkPicture> picture, const SkRect& cropRect)
+ : INHERITED(nullptr, 0, nullptr)
+ , fPicture(std::move(picture))
+ , fCropRect(cropRect) {}
+
+protected:
+ /* Constructs an SkPictureImageFilter object from an SkReadBuffer.
+ * Note: If the SkPictureImageFilter object construction requires bitmap
+ * decoding, the decoder must be set on the SkReadBuffer parameter by calling
+ * SkReadBuffer::setBitmapDecoder() before calling this constructor.
+ * @param SkReadBuffer Serialized picture data.
+ */
+ void flatten(SkWriteBuffer&) const override;
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+ SkRect computeFastBounds(const SkRect& src) const override;
+ SkIRect onFilterNodeBounds(const SkIRect&, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+
+private:
+ friend void ::SkRegisterPictureImageFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkPictureImageFilter)
+
+ sk_sp<SkPicture> fPicture;
+ SkRect fCropRect;
+
+ using INHERITED = SkImageFilter_Base;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkImageFilters::Picture(sk_sp<SkPicture> pic, const SkRect& targetRect) {
+ return sk_sp<SkImageFilter>(new SkPictureImageFilter(std::move(pic), targetRect));
+}
+
+void SkRegisterPictureImageFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkPictureImageFilter);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkPictureImageFilterImpl", SkPictureImageFilter::CreateProc);
+}
+
+sk_sp<SkFlattenable> SkPictureImageFilter::CreateProc(SkReadBuffer& buffer) {
+ sk_sp<SkPicture> picture;
+ SkRect cropRect;
+
+ if (buffer.readBool()) {
+ picture = SkPicturePriv::MakeFromBuffer(buffer);
+ }
+ buffer.readRect(&cropRect);
+
+ return SkImageFilters::Picture(std::move(picture), cropRect);
+}
+
+void SkPictureImageFilter::flatten(SkWriteBuffer& buffer) const {
+ bool hasPicture = (fPicture != nullptr);
+ buffer.writeBool(hasPicture);
+ if (hasPicture) {
+ SkPicturePriv::Flatten(fPicture, buffer);
+ }
+ buffer.writeRect(fCropRect);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkSpecialImage> SkPictureImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ if (!fPicture) {
+ return nullptr;
+ }
+
+ SkRect floatBounds;
+ ctx.ctm().mapRect(&floatBounds, fCropRect);
+ SkIRect bounds = floatBounds.roundOut();
+ if (!bounds.intersect(ctx.clipBounds())) {
+ return nullptr;
+ }
+
+ SkASSERT(!bounds.isEmpty());
+
+ // Given the standard usage of the picture image filter (i.e., to render content at a fixed
+ // resolution that, most likely, differs from the screen's) disable LCD text by removing any
+ // knowledge of the pixel geometry.
+ SkSurfaceProps props = ctx.surfaceProps().cloneWithPixelGeometry(kUnknown_SkPixelGeometry);
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(bounds.size(), &props));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkASSERT(kUnknown_SkPixelGeometry == surf->props().pixelGeometry());
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+ canvas->clear(0x0);
+
+ canvas->translate(-SkIntToScalar(bounds.fLeft), -SkIntToScalar(bounds.fTop));
+ canvas->concat(ctx.ctm());
+ canvas->drawPicture(fPicture);
+
+ offset->fX = bounds.fLeft;
+ offset->fY = bounds.fTop;
+ return surf->makeImageSnapshot();
+}
+
+SkRect SkPictureImageFilter::computeFastBounds(const SkRect& src) const {
+ return fCropRect;
+}
+
+SkIRect SkPictureImageFilter::onFilterNodeBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection direction,
+ const SkIRect* inputRect) const {
+ if (kReverse_MapDirection == direction) {
+ return INHERITED::onFilterNodeBounds(src, ctm, direction, inputRect);
+ }
+
+ SkRect dstRect = fCropRect;
+ ctm.mapRect(&dstRect);
+ return dstRect.roundOut();
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkRuntimeImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkRuntimeImageFilter.cpp
new file mode 100644
index 0000000000..2f113cda3b
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkRuntimeImageFilter.cpp
@@ -0,0 +1,284 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/effects/imagefilters/SkRuntimeImageFilter.h"
+
+#ifdef SK_ENABLE_SKSL
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkData.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkSpan.h"
+#include "include/core/SkString.h"
+#include "include/effects/SkImageFilters.h"
+#include "include/effects/SkRuntimeEffect.h"
+#include "include/private/SkSpinlock.h"
+#include "include/private/base/SkTArray.h"
+#include "src/core/SkImageFilterTypes.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkRuntimeEffectPriv.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <cstddef>
+#include <string>
+#include <string_view>
+#include <utility>
+
+class SkRuntimeImageFilter final : public SkImageFilter_Base {
+public:
+ SkRuntimeImageFilter(sk_sp<SkRuntimeEffect> effect,
+ sk_sp<SkData> uniforms,
+ sk_sp<SkImageFilter> input)
+ : INHERITED(&input, 1, /*cropRect=*/nullptr)
+ , fShaderBuilder(std::move(effect), std::move(uniforms)) {
+ std::string_view childName = fShaderBuilder.effect()->children().front().name;
+ fChildShaderNames.push_back(SkString(childName));
+ }
+ SkRuntimeImageFilter(const SkRuntimeShaderBuilder& builder,
+ std::string_view childShaderNames[],
+ const sk_sp<SkImageFilter> inputs[],
+ int inputCount)
+ : INHERITED(inputs, inputCount, /*cropRect=*/nullptr)
+ , fShaderBuilder(builder) {
+ fChildShaderNames.reserve_back(inputCount);
+ for (int i = 0; i < inputCount; i++) {
+ fChildShaderNames.push_back(SkString(childShaderNames[i]));
+ }
+ }
+
+ bool onAffectsTransparentBlack() const override { return true; }
+ MatrixCapability onGetCTMCapability() const override { return MatrixCapability::kTranslate; }
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+private:
+ friend void ::SkRegisterRuntimeImageFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkRuntimeImageFilter)
+
+ mutable SkSpinlock fShaderBuilderLock;
+ mutable SkRuntimeShaderBuilder fShaderBuilder;
+ SkSTArray<1, SkString> fChildShaderNames;
+
+ using INHERITED = SkImageFilter_Base;
+};
+
+sk_sp<SkImageFilter> SkMakeRuntimeImageFilter(sk_sp<SkRuntimeEffect> effect,
+ sk_sp<SkData> uniforms,
+ sk_sp<SkImageFilter> input) {
+ // Rather than replicate all of the checks from makeShader here, just try to create a shader
+ // once, to determine if everything is valid.
+ sk_sp<SkShader> child = nullptr;
+ auto shader = effect->makeShader(uniforms, &child, 1);
+ if (!shader) {
+ // Could be wrong signature, wrong uniform block size, wrong number/type of children, etc...
+ return nullptr;
+ }
+
+ return sk_sp<SkImageFilter>(
+ new SkRuntimeImageFilter(std::move(effect), std::move(uniforms), std::move(input)));
+}
+
+void SkRegisterRuntimeImageFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkRuntimeImageFilter);
+}
+
+sk_sp<SkFlattenable> SkRuntimeImageFilter::CreateProc(SkReadBuffer& buffer) {
+ // We don't know how many inputs to expect yet. Passing -1 allows any number of children.
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, -1);
+ if (common.cropRect()) {
+ return nullptr;
+ }
+
+ // Read the SkSL string and convert it into a runtime effect
+ SkString sksl;
+ buffer.readString(&sksl);
+ auto effect = SkMakeCachedRuntimeEffect(SkRuntimeEffect::MakeForShader, std::move(sksl));
+ if (!buffer.validate(effect != nullptr)) {
+ return nullptr;
+ }
+
+ // Read the uniform data and make sure it matches the size from the runtime effect
+ sk_sp<SkData> uniforms = buffer.readByteArrayAsData();
+ if (!buffer.validate(uniforms->size() == effect->uniformSize())) {
+ return nullptr;
+ }
+
+ // Read the child shader names
+ SkSTArray<4, std::string_view> childShaderNames;
+ SkSTArray<4, SkString> childShaderNameStrings;
+ childShaderNames.resize(common.inputCount());
+ childShaderNameStrings.resize(common.inputCount());
+ for (int i = 0; i < common.inputCount(); i++) {
+ buffer.readString(&childShaderNameStrings[i]);
+ childShaderNames[i] = childShaderNameStrings[i].c_str();
+ }
+
+ SkRuntimeShaderBuilder builder(std::move(effect), std::move(uniforms));
+
+ // Populate the builder with the corresponding children
+ for (const SkRuntimeEffect::Child& child : builder.effect()->children()) {
+ std::string_view name = child.name;
+ switch (child.type) {
+ case SkRuntimeEffect::ChildType::kBlender: {
+ builder.child(name) = buffer.readBlender();
+ break;
+ }
+ case SkRuntimeEffect::ChildType::kColorFilter: {
+ builder.child(name) = buffer.readColorFilter();
+ break;
+ }
+ case SkRuntimeEffect::ChildType::kShader: {
+ builder.child(name) = buffer.readShader();
+ break;
+ }
+ }
+ }
+
+ if (!buffer.isValid()) {
+ return nullptr;
+ }
+
+ return SkImageFilters::RuntimeShader(builder, childShaderNames.data(),
+ common.inputs(), common.inputCount());
+}
+
+void SkRuntimeImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ fShaderBuilderLock.acquire();
+ buffer.writeString(fShaderBuilder.effect()->source().c_str());
+ buffer.writeDataAsByteArray(fShaderBuilder.uniforms().get());
+ for (const SkString& name : fChildShaderNames) {
+ buffer.writeString(name.c_str());
+ }
+ for (size_t x = 0; x < fShaderBuilder.children().size(); x++) {
+ buffer.writeFlattenable(fShaderBuilder.children()[x].flattenable());
+ }
+ fShaderBuilderLock.release();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkSpecialImage> SkRuntimeImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIRect outputBounds = SkIRect(ctx.desiredOutput());
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(outputBounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkMatrix ctm = ctx.ctm();
+ SkMatrix inverse;
+ SkAssertResult(ctm.invert(&inverse));
+
+ const int inputCount = this->countInputs();
+ SkASSERT(inputCount == fChildShaderNames.size());
+
+ SkSTArray<1, sk_sp<SkShader>> inputShaders;
+ for (int i = 0; i < inputCount; i++) {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(i, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ SkMatrix localM = inverse * SkMatrix::Translate(inputOffset);
+ sk_sp<SkShader> inputShader =
+ input->asShader(SkSamplingOptions(SkFilterMode::kLinear), localM);
+ SkASSERT(inputShader);
+ inputShaders.push_back(std::move(inputShader));
+ }
+
+ // lock the mutation of the builder and creation of the shader so that the builder's state is
+ // const and is safe for multi-threaded access.
+ fShaderBuilderLock.acquire();
+ for (int i = 0; i < inputCount; i++) {
+ fShaderBuilder.child(fChildShaderNames[i].c_str()) = inputShaders[i];
+ }
+ sk_sp<SkShader> shader = fShaderBuilder.makeShader();
+ // Remove the inputs from the builder to avoid unnecessarily prolonging the shader's lifetime
+ for (int i = 0; i < inputCount; i++) {
+ fShaderBuilder.child(fChildShaderNames[i].c_str()) = nullptr;
+ }
+ fShaderBuilderLock.release();
+
+ SkASSERT(shader.get());
+
+ SkPaint paint;
+ paint.setShader(std::move(shader));
+ paint.setBlendMode(SkBlendMode::kSrc);
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ // Translate from layer space into surf's image space
+ canvas->translate(-outputBounds.fLeft, -outputBounds.fTop);
+ // Ensure shader parameters are relative to parameter space, not layer space
+ canvas->concat(ctx.ctm());
+
+ canvas->drawPaint(paint);
+
+ *offset = outputBounds.topLeft();
+ return surf->makeImageSnapshot();
+}
+
+static bool child_is_shader(const SkRuntimeEffect::Child* child) {
+ return child && child->type == SkRuntimeEffect::ChildType::kShader;
+}
+
+sk_sp<SkImageFilter> SkImageFilters::RuntimeShader(const SkRuntimeShaderBuilder& builder,
+ std::string_view childShaderName,
+ sk_sp<SkImageFilter> input) {
+ // If no childShaderName is provided, check to see if we can implicitly assign it to the only
+ // child in the effect.
+ if (childShaderName.empty()) {
+ auto children = builder.effect()->children();
+ if (children.size() != 1) {
+ return nullptr;
+ }
+ childShaderName = children.front().name;
+ }
+
+ return SkImageFilters::RuntimeShader(builder, &childShaderName, &input, 1);
+}
+
+sk_sp<SkImageFilter> SkImageFilters::RuntimeShader(const SkRuntimeShaderBuilder& builder,
+ std::string_view childShaderNames[],
+ const sk_sp<SkImageFilter> inputs[],
+ int inputCount) {
+ for (int i = 0; i < inputCount; i++) {
+ std::string_view name = childShaderNames[i];
+ // All names must be non-empty, and present as a child shader in the effect:
+ if (name.empty() || !child_is_shader(builder.effect()->findChild(name))) {
+ return nullptr;
+ }
+
+ // We don't allow duplicates, either:
+ for (int j = 0; j < i; j++) {
+ if (name == childShaderNames[j]) {
+ return nullptr;
+ }
+ }
+ }
+
+ return sk_sp<SkImageFilter>(new SkRuntimeImageFilter(builder, childShaderNames,
+ inputs, inputCount));
+}
+
+#endif // SK_ENABLE_SKSL
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkRuntimeImageFilter.h b/gfx/skia/skia/src/effects/imagefilters/SkRuntimeImageFilter.h
new file mode 100644
index 0000000000..e66a61c44b
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkRuntimeImageFilter.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRuntimeImageFilter_DEFINED
+#define SkRuntimeImageFilter_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+
+class SkData;
+class SkImageFilter;
+class SkRuntimeEffect;
+
+SK_API sk_sp<SkImageFilter> SkMakeRuntimeImageFilter(sk_sp<SkRuntimeEffect> effect,
+ sk_sp<SkData> uniforms,
+ sk_sp<SkImageFilter> input);
+
+#endif
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkShaderImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkShaderImageFilter.cpp
new file mode 100644
index 0000000000..a32eca4d46
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkShaderImageFilter.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkTypes.h"
+#include "include/effects/SkImageFilters.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkPicturePriv.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <utility>
+
+namespace {
+
+class SkShaderImageFilter final : public SkImageFilter_Base {
+public:
+ SkShaderImageFilter(sk_sp<SkShader> shader, SkImageFilters::Dither dither, const SkRect* rect)
+ : INHERITED(nullptr, 0, rect)
+ , fShader(std::move(shader))
+ , fDither(dither) {}
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+private:
+ friend void ::SkRegisterShaderImageFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkShaderImageFilter)
+
+ bool onAffectsTransparentBlack() const override { return true; }
+
+ sk_sp<SkShader> fShader;
+ SkImageFilters::Dither fDither;
+
+ using INHERITED = SkImageFilter_Base;
+};
+
+} // end namespace
+
+sk_sp<SkImageFilter> SkImageFilters::Shader(sk_sp<SkShader> shader,
+ Dither dither,
+ const CropRect& cropRect) {
+ return sk_sp<SkImageFilter>(new SkShaderImageFilter(std::move(shader), dither, cropRect));
+}
+
+void SkRegisterShaderImageFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkShaderImageFilter);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkPaintImageFilter", SkShaderImageFilter::CreateProc);
+ SkFlattenable::Register("SkPaintImageFilterImpl", SkShaderImageFilter::CreateProc);
+}
+
+sk_sp<SkFlattenable> SkShaderImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 0);
+ sk_sp<SkShader> shader;
+ bool dither;
+ if (buffer.isVersionLT(SkPicturePriv::kShaderImageFilterSerializeShader)) {
+ // The old implementation stored an entire SkPaint, but we only need the SkShader and dither
+ // boolean. We could fail if the paint stores more effects than that, but this is simpler.
+ SkPaint paint = buffer.readPaint();
+ shader = paint.getShader() ? paint.refShader()
+ : SkShaders::Color(paint.getColor4f(), nullptr);
+ dither = paint.isDither();
+ } else {
+ shader = buffer.readShader();
+ dither = buffer.readBool();
+ }
+ return SkImageFilters::Shader(std::move(shader),
+ SkImageFilters::Dither(dither),
+ common.cropRect());
+}
+
+void SkShaderImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeFlattenable(fShader.get());
+ buffer.writeBool(fDither == SkImageFilters::Dither::kYes);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkSpecialImage> SkShaderImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIRect bounds;
+ const SkIRect srcBounds = SkIRect::MakeWH(ctx.sourceImage()->width(),
+ ctx.sourceImage()->height());
+ if (!this->applyCropRect(ctx, srcBounds, &bounds)) {
+ return nullptr;
+ }
+
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(bounds.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ canvas->clear(0x0);
+
+ SkMatrix matrix(ctx.ctm());
+ matrix.postTranslate(SkIntToScalar(-bounds.left()), SkIntToScalar(-bounds.top()));
+ SkRect rect = SkRect::MakeIWH(bounds.width(), bounds.height());
+ SkMatrix inverse;
+ if (matrix.invert(&inverse)) {
+ inverse.mapRect(&rect);
+ }
+ canvas->setMatrix(matrix);
+ if (rect.isFinite()) {
+ SkPaint paint;
+ paint.setShader(fShader);
+ paint.setDither(fDither == SkImageFilters::Dither::kYes);
+ canvas->drawRect(rect, paint);
+ }
+
+ offset->fX = bounds.fLeft;
+ offset->fY = bounds.fTop;
+ return surf->makeImageSnapshot();
+}
diff --git a/gfx/skia/skia/src/effects/imagefilters/SkTileImageFilter.cpp b/gfx/skia/skia/src/effects/imagefilters/SkTileImageFilter.cpp
new file mode 100644
index 0000000000..0a1d49ddf4
--- /dev/null
+++ b/gfx/skia/skia/src/effects/imagefilters/SkTileImageFilter.cpp
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageFilter.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSurface.h"
+#include "include/core/SkTileMode.h"
+#include "include/core/SkTypes.h"
+#include "include/effects/SkImageFilters.h"
+#include "src/core/SkImageFilterTypes.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/core/SkSpecialSurface.h"
+#include "src/core/SkValidationUtils.h"
+#include "src/core/SkWriteBuffer.h"
+
+#include <utility>
+
+namespace {
+
+class SkTileImageFilter final : public SkImageFilter_Base {
+public:
+ SkTileImageFilter(const SkRect& srcRect, const SkRect& dstRect, sk_sp<SkImageFilter> input)
+ : INHERITED(&input, 1, nullptr)
+ , fSrcRect(srcRect)
+ , fDstRect(dstRect) {}
+
+ SkIRect onFilterBounds(const SkIRect& src, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+ SkIRect onFilterNodeBounds(const SkIRect&, const SkMatrix& ctm,
+ MapDirection, const SkIRect* inputRect) const override;
+ SkRect computeFastBounds(const SkRect& src) const override;
+
+protected:
+ void flatten(SkWriteBuffer& buffer) const override;
+
+ sk_sp<SkSpecialImage> onFilterImage(const Context&, SkIPoint* offset) const override;
+
+private:
+ friend void ::SkRegisterTileImageFilterFlattenable();
+ SK_FLATTENABLE_HOOKS(SkTileImageFilter)
+
+ SkRect fSrcRect;
+ SkRect fDstRect;
+
+ using INHERITED = SkImageFilter_Base;
+};
+
+} // end namespace
+
+
+sk_sp<SkImageFilter> SkImageFilters::Tile(const SkRect& src,
+ const SkRect& dst,
+ sk_sp<SkImageFilter> input) {
+ if (!SkIsValidRect(src) || !SkIsValidRect(dst)) {
+ return nullptr;
+ }
+ if (src.width() == dst.width() && src.height() == dst.height()) {
+ SkRect ir = dst;
+ if (!ir.intersect(src)) {
+ return input;
+ }
+ return SkImageFilters::Offset(dst.x() - src.x(), dst.y() - src.y(),
+ std::move(input), &ir);
+ }
+ return sk_sp<SkImageFilter>(new SkTileImageFilter(src, dst, std::move(input)));
+}
+
+void SkRegisterTileImageFilterFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkTileImageFilter);
+ // TODO (michaelludwig) - Remove after grace period for SKPs to stop using old name
+ SkFlattenable::Register("SkTileImageFilterImpl", SkTileImageFilter::CreateProc);
+}
+
+sk_sp<SkFlattenable> SkTileImageFilter::CreateProc(SkReadBuffer& buffer) {
+ SK_IMAGEFILTER_UNFLATTEN_COMMON(common, 1);
+ SkRect src, dst;
+ buffer.readRect(&src);
+ buffer.readRect(&dst);
+ return SkImageFilters::Tile(src, dst, common.getInput(0));
+}
+
+void SkTileImageFilter::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writeRect(fSrcRect);
+ buffer.writeRect(fDstRect);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkSpecialImage> SkTileImageFilter::onFilterImage(const Context& ctx,
+ SkIPoint* offset) const {
+ SkIPoint inputOffset = SkIPoint::Make(0, 0);
+ sk_sp<SkSpecialImage> input(this->filterInput(0, ctx, &inputOffset));
+ if (!input) {
+ return nullptr;
+ }
+
+ SkRect dstRect;
+ ctx.ctm().mapRect(&dstRect, fDstRect);
+ if (!dstRect.intersect(SkRect::Make(ctx.clipBounds()))) {
+ return nullptr;
+ }
+
+ const SkIRect dstIRect = skif::RoundOut(dstRect);
+ if (!fSrcRect.width() || !fSrcRect.height() || !dstIRect.width() || !dstIRect.height()) {
+ return nullptr;
+ }
+
+ SkRect srcRect;
+ ctx.ctm().mapRect(&srcRect, fSrcRect);
+ SkIRect srcIRect = skif::RoundOut(srcRect);
+ srcIRect.offset(-inputOffset);
+ const SkIRect inputBounds = SkIRect::MakeWH(input->width(), input->height());
+
+ if (!SkIRect::Intersects(srcIRect, inputBounds)) {
+ return nullptr;
+ }
+
+ // We create an SkImage here b.c. it needs to be a tight fit for the tiling
+ sk_sp<SkImage> subset;
+ if (inputBounds.contains(srcIRect)) {
+ subset = input->asImage(&srcIRect);
+ } else {
+ sk_sp<SkSurface> surf(input->makeTightSurface(ctx.colorType(), ctx.colorSpace(),
+ srcIRect.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kSrc);
+
+ input->draw(canvas,
+ SkIntToScalar(inputOffset.x()), SkIntToScalar(inputOffset.y()),
+ SkSamplingOptions(), &paint);
+
+ subset = surf->makeImageSnapshot();
+ }
+ if (!subset) {
+ return nullptr;
+ }
+ SkASSERT(subset->width() == srcIRect.width());
+ SkASSERT(subset->height() == srcIRect.height());
+
+ sk_sp<SkSpecialSurface> surf(ctx.makeSurface(dstIRect.size()));
+ if (!surf) {
+ return nullptr;
+ }
+
+ SkCanvas* canvas = surf->getCanvas();
+ SkASSERT(canvas);
+
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kSrc);
+ paint.setShader(subset->makeShader(SkTileMode::kRepeat, SkTileMode::kRepeat,
+ SkSamplingOptions()));
+ canvas->translate(-dstRect.fLeft, -dstRect.fTop);
+ canvas->drawRect(dstRect, paint);
+ offset->fX = dstIRect.fLeft;
+ offset->fY = dstIRect.fTop;
+ return surf->makeImageSnapshot();
+}
+
+SkIRect SkTileImageFilter::onFilterNodeBounds(
+ const SkIRect& src, const SkMatrix& ctm, MapDirection dir, const SkIRect* inputRect) const {
+ SkRect rect = kReverse_MapDirection == dir ? fSrcRect : fDstRect;
+ ctm.mapRect(&rect);
+ return rect.roundOut();
+}
+
+SkIRect SkTileImageFilter::onFilterBounds(const SkIRect& src, const SkMatrix&,
+ MapDirection, const SkIRect* inputRect) const {
+ // Don't recurse into inputs.
+ return src;
+}
+
+SkRect SkTileImageFilter::computeFastBounds(const SkRect& src) const {
+ return fDstRect;
+}
diff --git a/gfx/skia/skia/src/encode/SkEncoder.cpp b/gfx/skia/skia/src/encode/SkEncoder.cpp
new file mode 100644
index 0000000000..a2d6f05b46
--- /dev/null
+++ b/gfx/skia/skia/src/encode/SkEncoder.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/encode/SkEncoder.h"
+
+#include "include/private/base/SkAssert.h"
+
+bool SkEncoder::encodeRows(int numRows) {
+ SkASSERT(numRows > 0 && fCurrRow < fSrc.height());
+ if (numRows <= 0 || fCurrRow >= fSrc.height()) {
+ return false;
+ }
+
+ if (fCurrRow + numRows > fSrc.height()) {
+ numRows = fSrc.height() - fCurrRow;
+ }
+
+ if (!this->onEncodeRows(numRows)) {
+ // If we fail, short circuit any future calls.
+ fCurrRow = fSrc.height();
+ return false;
+ }
+
+ return true;
+}
diff --git a/gfx/skia/skia/src/encode/SkICC.cpp b/gfx/skia/skia/src/encode/SkICC.cpp
new file mode 100644
index 0000000000..7163563d61
--- /dev/null
+++ b/gfx/skia/skia/src/encode/SkICC.cpp
@@ -0,0 +1,762 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/encode/SkICC.h"
+
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkData.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkFixed.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "modules/skcms/skcms.h"
+#include "src/base/SkAutoMalloc.h"
+#include "src/base/SkEndian.h"
+#include "src/base/SkUtils.h"
+#include "src/core/SkMD5.h"
+#include "src/encode/SkICCPriv.h"
+
+#include <cmath>
+#include <cstring>
+#include <string>
+#include <utility>
+#include <vector>
+
+// The number of input and output channels.
+static constexpr size_t kNumChannels = 3;
+
+// The D50 illuminant.
+constexpr float kD50_x = 0.9642f;
+constexpr float kD50_y = 1.0000f;
+constexpr float kD50_z = 0.8249f;
+
+// This is like SkFloatToFixed, but rounds to nearest, preserving as much accuracy as possible
+// when going float -> fixed -> float (it has the same accuracy when going fixed -> float -> fixed).
+// The use of double is necessary to accommodate the full potential 32-bit mantissa of the 16.16
+// SkFixed value, and so avoiding rounding problems with float. Also, see the comment in SkFixed.h.
+static SkFixed float_round_to_fixed(float x) {
+ return sk_float_saturate2int((float)floor((double)x * SK_Fixed1 + 0.5));
+}
+
+static uint16_t float_round_to_unorm16(float x) {
+ x = x * 65535.f + 0.5;
+ if (x > 65535) return 65535;
+ if (x < 0) return 0;
+ return static_cast<uint16_t>(x);
+}
+
+struct ICCHeader {
+ // Size of the profile (computed)
+ uint32_t size;
+
+ // Preferred CMM type (ignored)
+ uint32_t cmm_type = 0;
+
+ // Version 4.3 or 4.4 if CICP is included.
+ uint32_t version = SkEndian_SwapBE32(0x04300000);
+
+ // Display device profile
+ uint32_t profile_class = SkEndian_SwapBE32(kDisplay_Profile);
+
+ // RGB input color space;
+ uint32_t data_color_space = SkEndian_SwapBE32(kRGB_ColorSpace);
+
+ // Profile connection space.
+ uint32_t pcs = SkEndian_SwapBE32(kXYZ_PCSSpace);
+
+ // Date and time (ignored)
+ uint16_t creation_date_year = SkEndian_SwapBE16(2016);
+ uint16_t creation_date_month = SkEndian_SwapBE16(1); // 1-12
+ uint16_t creation_date_day = SkEndian_SwapBE16(1); // 1-31
+ uint16_t creation_date_hours = 0; // 0-23
+ uint16_t creation_date_minutes = 0; // 0-59
+ uint16_t creation_date_seconds = 0; // 0-59
+
+ // Profile signature
+ uint32_t signature = SkEndian_SwapBE32(kACSP_Signature);
+
+ // Platform target (ignored)
+ uint32_t platform = 0;
+
+ // Flags: not embedded, can be used independently
+ uint32_t flags = 0x00000000;
+
+ // Device manufacturer (ignored)
+ uint32_t device_manufacturer = 0;
+
+ // Device model (ignored)
+ uint32_t device_model = 0;
+
+ // Device attributes (ignored)
+ uint8_t device_attributes[8] = {0};
+
+ // Relative colorimetric rendering intent
+ uint32_t rendering_intent = SkEndian_SwapBE32(1);
+
+ // D50 standard illuminant (X, Y, Z)
+ uint32_t illuminant_X = SkEndian_SwapBE32(float_round_to_fixed(kD50_x));
+ uint32_t illuminant_Y = SkEndian_SwapBE32(float_round_to_fixed(kD50_y));
+ uint32_t illuminant_Z = SkEndian_SwapBE32(float_round_to_fixed(kD50_z));
+
+ // Profile creator (ignored)
+ uint32_t creator = 0;
+
+ // Profile id checksum (ignored)
+ uint8_t profile_id[16] = {0};
+
+ // Reserved (ignored)
+ uint8_t reserved[28] = {0};
+
+ // Technically not part of header, but required
+ uint32_t tag_count = 0;
+};
+
+static sk_sp<SkData> write_xyz_tag(float x, float y, float z) {
+ uint32_t data[] = {
+ SkEndian_SwapBE32(kXYZ_PCSSpace),
+ 0,
+ SkEndian_SwapBE32(float_round_to_fixed(x)),
+ SkEndian_SwapBE32(float_round_to_fixed(y)),
+ SkEndian_SwapBE32(float_round_to_fixed(z)),
+ };
+ return SkData::MakeWithCopy(data, sizeof(data));
+}
+
+static bool nearly_equal(float x, float y) {
+ // A note on why I chose this tolerance: transfer_fn_almost_equal() uses a
+ // tolerance of 0.001f, which doesn't seem to be enough to distinguish
+ // between similar transfer functions, for example: gamma2.2 and sRGB.
+ //
+ // If the tolerance is 0.0f, then this we can't distinguish between two
+ // different encodings of what is clearly the same colorspace. Some
+ // experimentation with example files lead to this number:
+ static constexpr float kTolerance = 1.0f / (1 << 11);
+ return ::fabsf(x - y) <= kTolerance;
+}
+
+static bool nearly_equal(const skcms_TransferFunction& u,
+ const skcms_TransferFunction& v) {
+ return nearly_equal(u.g, v.g)
+ && nearly_equal(u.a, v.a)
+ && nearly_equal(u.b, v.b)
+ && nearly_equal(u.c, v.c)
+ && nearly_equal(u.d, v.d)
+ && nearly_equal(u.e, v.e)
+ && nearly_equal(u.f, v.f);
+}
+
+static bool nearly_equal(const skcms_Matrix3x3& u, const skcms_Matrix3x3& v) {
+ for (int r = 0; r < 3; r++) {
+ for (int c = 0; c < 3; c++) {
+ if (!nearly_equal(u.vals[r][c], v.vals[r][c])) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+static constexpr uint32_t kCICPPrimariesSRGB = 1;
+static constexpr uint32_t kCICPPrimariesP3 = 12;
+static constexpr uint32_t kCICPPrimariesRec2020 = 9;
+
+static uint32_t get_cicp_primaries(const skcms_Matrix3x3& toXYZD50) {
+ if (nearly_equal(toXYZD50, SkNamedGamut::kSRGB)) {
+ return kCICPPrimariesSRGB;
+ } else if (nearly_equal(toXYZD50, SkNamedGamut::kDisplayP3)) {
+ return kCICPPrimariesP3;
+ } else if (nearly_equal(toXYZD50, SkNamedGamut::kRec2020)) {
+ return kCICPPrimariesRec2020;
+ }
+ return 0;
+}
+
+static constexpr uint32_t kCICPTrfnSRGB = 1;
+static constexpr uint32_t kCICPTrfn2Dot2 = 4;
+static constexpr uint32_t kCICPTrfnLinear = 8;
+static constexpr uint32_t kCICPTrfnPQ = 16;
+static constexpr uint32_t kCICPTrfnHLG = 18;
+
+static uint32_t get_cicp_trfn(const skcms_TransferFunction& fn) {
+ switch (skcms_TransferFunction_getType(&fn)) {
+ case skcms_TFType_Invalid:
+ return 0;
+ case skcms_TFType_sRGBish:
+ if (nearly_equal(fn, SkNamedTransferFn::kSRGB)) {
+ return kCICPTrfnSRGB;
+ } else if (nearly_equal(fn, SkNamedTransferFn::k2Dot2)) {
+ return kCICPTrfn2Dot2;
+ } else if (nearly_equal(fn, SkNamedTransferFn::kLinear)) {
+ return kCICPTrfnLinear;
+ }
+ break;
+ case skcms_TFType_PQish:
+ // All PQ transfer functions are mapped to the single PQ value,
+ // ignoring their SDR white level.
+ return kCICPTrfnPQ;
+ case skcms_TFType_HLGish:
+ // All HLG transfer functions are mapped to the single HLG value.
+ return kCICPTrfnHLG;
+ case skcms_TFType_HLGinvish:
+ return 0;
+ }
+ return 0;
+}
+
+static std::string get_desc_string(const skcms_TransferFunction& fn,
+ const skcms_Matrix3x3& toXYZD50) {
+ const uint32_t cicp_trfn = get_cicp_trfn(fn);
+ const uint32_t cicp_primaries = get_cicp_primaries(toXYZD50);
+
+ // Use a unique string for sRGB.
+ if (cicp_trfn == kCICPPrimariesSRGB && cicp_primaries == kCICPTrfnSRGB) {
+ return "sRGB";
+ }
+
+ // If available, use the named CICP primaries and transfer function.
+ if (cicp_primaries && cicp_trfn) {
+ std::string result;
+ switch (cicp_primaries) {
+ case kCICPPrimariesSRGB:
+ result += "sRGB";
+ break;
+ case kCICPPrimariesP3:
+ result += "Display P3";
+ break;
+ case kCICPPrimariesRec2020:
+ result += "Rec2020";
+ break;
+ default:
+ result += "Unknown";
+ break;
+ }
+ result += " Gamut with ";
+ switch (cicp_trfn) {
+ case kCICPTrfnSRGB:
+ result += "sRGB";
+ break;
+ case kCICPTrfnLinear:
+ result += "Linear";
+ break;
+ case kCICPTrfn2Dot2:
+ result += "2.2";
+ break;
+ case kCICPTrfnPQ:
+ result += "PQ";
+ break;
+ case kCICPTrfnHLG:
+ result += "HLG";
+ break;
+ default:
+ result += "Unknown";
+ break;
+ }
+ result += " Transfer";
+ return result;
+ }
+
+ // Fall back to a prefix plus md5 hash.
+ SkMD5 md5;
+ md5.write(&toXYZD50, sizeof(toXYZD50));
+ md5.write(&fn, sizeof(fn));
+ SkMD5::Digest digest = md5.finish();
+ std::string md5_hexstring(2 * sizeof(SkMD5::Digest), ' ');
+ for (unsigned i = 0; i < sizeof(SkMD5::Digest); ++i) {
+ uint8_t byte = digest.data[i];
+ md5_hexstring[2 * i + 0] = SkHexadecimalDigits::gUpper[byte >> 4];
+ md5_hexstring[2 * i + 1] = SkHexadecimalDigits::gUpper[byte & 0xF];
+ }
+ return "Google/Skia/" + md5_hexstring;
+}
+
+static sk_sp<SkData> write_text_tag(const char* text) {
+ uint32_t text_length = strlen(text);
+ uint32_t header[] = {
+ SkEndian_SwapBE32(kTAG_TextType), // Type signature
+ 0, // Reserved
+ SkEndian_SwapBE32(1), // Number of records
+ SkEndian_SwapBE32(12), // Record size (must be 12)
+ SkEndian_SwapBE32(SkSetFourByteTag('e', 'n', 'U', 'S')), // English USA
+ SkEndian_SwapBE32(2 * text_length), // Length of string in bytes
+ SkEndian_SwapBE32(28), // Offset of string
+ };
+ SkDynamicMemoryWStream s;
+ s.write(header, sizeof(header));
+ for (size_t i = 0; i < text_length; i++) {
+ // Convert ASCII to big-endian UTF-16.
+ s.write8(0);
+ s.write8(text[i]);
+ }
+ s.padToAlign4();
+ return s.detachAsData();
+}
+
+// Write a CICP tag.
+static sk_sp<SkData> write_cicp_tag(const skcms_CICP& cicp) {
+ SkDynamicMemoryWStream s;
+ s.write32(SkEndian_SwapBE32(kTAG_cicp)); // Type signature
+ s.write32(0); // Reserved
+ s.write8(cicp.color_primaries); // Color primaries
+ s.write8(cicp.transfer_characteristics); // Transfer characteristics
+ s.write8(cicp.matrix_coefficients); // RGB matrix
+ s.write8(cicp.video_full_range_flag); // Full range
+ return s.detachAsData();
+}
+
+// Perform a matrix-vector multiplication. Overwrite the input vector with the result.
+static void skcms_Matrix3x3_apply(const skcms_Matrix3x3* m, float* x) {
+ float y0 = x[0] * m->vals[0][0] + x[1] * m->vals[0][1] + x[2] * m->vals[0][2];
+ float y1 = x[0] * m->vals[1][0] + x[1] * m->vals[1][1] + x[2] * m->vals[1][2];
+ float y2 = x[0] * m->vals[2][0] + x[1] * m->vals[2][1] + x[2] * m->vals[2][2];
+ x[0] = y0;
+ x[1] = y1;
+ x[2] = y2;
+}
+
+void SkICCFloatXYZD50ToGrid16Lab(const float* xyz_float, uint8_t* grid16_lab) {
+ float v[3] = {
+ xyz_float[0] / kD50_x,
+ xyz_float[1] / kD50_y,
+ xyz_float[2] / kD50_z,
+ };
+ for (size_t i = 0; i < 3; ++i) {
+ v[i] = v[i] > 0.008856f ? cbrtf(v[i]) : v[i] * 7.787f + (16 / 116.0f);
+ }
+ const float L = v[1] * 116.0f - 16.0f;
+ const float a = (v[0] - v[1]) * 500.0f;
+ const float b = (v[1] - v[2]) * 200.0f;
+ const float Lab_unorm[3] = {
+ L * (1 / 100.f),
+ (a + 128.0f) * (1 / 255.0f),
+ (b + 128.0f) * (1 / 255.0f),
+ };
+ // This will encode L=1 as 0xFFFF. This matches how skcms will interpret the
+ // table, but the spec appears to indicate that the value should be 0xFF00.
+ // https://crbug.com/skia/13807
+ for (size_t i = 0; i < 3; ++i) {
+ reinterpret_cast<uint16_t*>(grid16_lab)[i] =
+ SkEndian_SwapBE16(float_round_to_unorm16(Lab_unorm[i]));
+ }
+}
+
+void SkICCFloatToTable16(const float f, uint8_t* table_16) {
+ *reinterpret_cast<uint16_t*>(table_16) = SkEndian_SwapBE16(float_round_to_unorm16(f));
+}
+
+// Compute the tone mapping gain for luminance value L. The gain should be
+// applied after the transfer function is applied.
+float compute_tone_map_gain(const skcms_TransferFunction& fn, float L) {
+ if (L <= 0.f) {
+ return 1.f;
+ }
+ if (skcms_TransferFunction_isPQish(&fn)) {
+ // The PQ transfer function will map to the range [0, 1]. Linearly scale
+ // it up to the range [0, 10,000/203]. We will then tone map that back
+ // down to [0, 1].
+ constexpr float kInputMaxLuminance = 10000 / 203.f;
+ constexpr float kOutputMaxLuminance = 1.0;
+ L *= kInputMaxLuminance;
+
+ // Compute the tone map gain which will tone map from 10,000/203 to 1.0.
+ constexpr float kToneMapA = kOutputMaxLuminance / (kInputMaxLuminance * kInputMaxLuminance);
+ constexpr float kToneMapB = 1.f / kOutputMaxLuminance;
+ return kInputMaxLuminance * (1.f + kToneMapA * L) / (1.f + kToneMapB * L);
+ }
+ if (skcms_TransferFunction_isHLGish(&fn)) {
+ // Let Lw be the brightness of the display in nits.
+ constexpr float Lw = 203.f;
+ const float gamma = 1.2f + 0.42f * std::log(Lw / 1000.f) / std::log(10.f);
+ return std::pow(L, gamma - 1.f);
+ }
+ return 1.f;
+}
+
+// Write a lookup table based curve, potentially including tone mapping.
+static sk_sp<SkData> write_trc_tag(const skcms_Curve& trc) {
+ SkDynamicMemoryWStream s;
+ if (trc.table_entries) {
+ s.write32(SkEndian_SwapBE32(kTAG_CurveType)); // Type
+ s.write32(0); // Reserved
+ s.write32(SkEndian_SwapBE32(trc.table_entries)); // Value count
+ for (uint32_t i = 0; i < trc.table_entries; ++i) {
+ uint16_t value = reinterpret_cast<const uint16_t*>(trc.table_16)[i];
+ s.write16(value);
+ }
+ } else {
+ s.write32(SkEndian_SwapBE32(kTAG_ParaCurveType)); // Type
+ s.write32(0); // Reserved
+ const auto& fn = trc.parametric;
+ SkASSERT(skcms_TransferFunction_isSRGBish(&fn));
+ if (fn.a == 1.f && fn.b == 0.f && fn.c == 0.f && fn.d == 0.f && fn.e == 0.f &&
+ fn.f == 0.f) {
+ s.write32(SkEndian_SwapBE16(kExponential_ParaCurveType));
+ s.write32(SkEndian_SwapBE32(float_round_to_fixed(fn.g)));
+ } else {
+ s.write32(SkEndian_SwapBE16(kGABCDEF_ParaCurveType));
+ s.write32(SkEndian_SwapBE32(float_round_to_fixed(fn.g)));
+ s.write32(SkEndian_SwapBE32(float_round_to_fixed(fn.a)));
+ s.write32(SkEndian_SwapBE32(float_round_to_fixed(fn.b)));
+ s.write32(SkEndian_SwapBE32(float_round_to_fixed(fn.c)));
+ s.write32(SkEndian_SwapBE32(float_round_to_fixed(fn.d)));
+ s.write32(SkEndian_SwapBE32(float_round_to_fixed(fn.e)));
+ s.write32(SkEndian_SwapBE32(float_round_to_fixed(fn.f)));
+ }
+ }
+ s.padToAlign4();
+ return s.detachAsData();
+}
+
+void compute_lut_entry(const skcms_Matrix3x3& src_to_XYZD50, float rgb[3]) {
+ // Compute the matrices to convert from source to Rec2020, and from Rec2020 to XYZD50.
+ skcms_Matrix3x3 src_to_rec2020;
+ const skcms_Matrix3x3 rec2020_to_XYZD50 = SkNamedGamut::kRec2020;
+ {
+ skcms_Matrix3x3 XYZD50_to_rec2020;
+ skcms_Matrix3x3_invert(&rec2020_to_XYZD50, &XYZD50_to_rec2020);
+ src_to_rec2020 = skcms_Matrix3x3_concat(&XYZD50_to_rec2020, &src_to_XYZD50);
+ }
+
+ // Convert the source signal to linear.
+ for (size_t i = 0; i < kNumChannels; ++i) {
+ rgb[i] = skcms_TransferFunction_eval(&SkNamedTransferFn::kPQ, rgb[i]);
+ }
+
+ // Convert source gamut to Rec2020.
+ skcms_Matrix3x3_apply(&src_to_rec2020, rgb);
+
+ // Compute the luminance of the signal.
+ constexpr float kLr = 0.2627f;
+ constexpr float kLg = 0.6780f;
+ constexpr float kLb = 0.0593f;
+ float L = rgb[0] * kLr + rgb[1] * kLg + rgb[2] * kLb;
+
+ // Compute the tone map gain based on the luminance.
+ float tone_map_gain = compute_tone_map_gain(SkNamedTransferFn::kPQ, L);
+
+ // Apply the tone map gain.
+ for (size_t i = 0; i < kNumChannels; ++i) {
+ rgb[i] *= tone_map_gain;
+ }
+
+ // Convert from Rec2020-linear to XYZD50.
+ skcms_Matrix3x3_apply(&rec2020_to_XYZD50, rgb);
+}
+
+sk_sp<SkData> write_clut(const uint8_t* grid_points, const uint8_t* grid_16) {
+ SkDynamicMemoryWStream s;
+ for (size_t i = 0; i < 16; ++i) {
+ s.write8(i < kNumChannels ? grid_points[i] : 0); // Grid size
+ }
+ s.write8(2); // Grid byte width (always 16-bit)
+ s.write8(0); // Reserved
+ s.write8(0); // Reserved
+ s.write8(0); // Reserved
+
+ uint32_t value_count = kNumChannels;
+ for (uint32_t i = 0; i < kNumChannels; ++i) {
+ value_count *= grid_points[i];
+ }
+ for (uint32_t i = 0; i < value_count; ++i) {
+ uint16_t value = reinterpret_cast<const uint16_t*>(grid_16)[i];
+ s.write16(value);
+ }
+ s.padToAlign4();
+ return s.detachAsData();
+}
+
+// Write an A2B or B2A tag.
+sk_sp<SkData> write_mAB_or_mBA_tag(uint32_t type,
+ const skcms_Curve* b_curves,
+ const skcms_Curve* a_curves,
+ const uint8_t* grid_points,
+ const uint8_t* grid_16,
+ const skcms_Curve* m_curves,
+ const skcms_Matrix3x4* matrix) {
+ const size_t b_curves_offset = 32;
+ sk_sp<SkData> b_curves_data[kNumChannels];
+ size_t clut_offset = 0;
+ sk_sp<SkData> clut;
+ size_t a_curves_offset = 0;
+ sk_sp<SkData> a_curves_data[kNumChannels];
+
+ // The "B" curve is required.
+ SkASSERT(b_curves);
+ for (size_t i = 0; i < kNumChannels; ++i) {
+ b_curves_data[i] = write_trc_tag(b_curves[i]);
+ SkASSERT(b_curves_data[i]);
+ }
+
+ // The "A" curve and CLUT are optional.
+ if (a_curves) {
+ SkASSERT(grid_points);
+ SkASSERT(grid_16);
+
+ clut_offset = b_curves_offset;
+ for (size_t i = 0; i < kNumChannels; ++i) {
+ clut_offset += b_curves_data[i]->size();
+ }
+ clut = write_clut(grid_points, grid_16);
+ SkASSERT(clut);
+
+ a_curves_offset = clut_offset + clut->size();
+ for (size_t i = 0; i < kNumChannels; ++i) {
+ a_curves_data[i] = write_trc_tag(a_curves[i]);
+ SkASSERT(a_curves_data[i]);
+ }
+ }
+
+ // The "M" curves and matrix are not supported yet.
+ SkASSERT(!m_curves);
+ SkASSERT(!matrix);
+
+ SkDynamicMemoryWStream s;
+ s.write32(SkEndian_SwapBE32(type)); // Type signature
+ s.write32(0); // Reserved
+ s.write8(kNumChannels); // Input channels
+ s.write8(kNumChannels); // Output channels
+ s.write16(0); // Reserved
+ s.write32(SkEndian_SwapBE32(b_curves_offset)); // B curve offset
+ s.write32(SkEndian_SwapBE32(0)); // Matrix offset (ignored)
+ s.write32(SkEndian_SwapBE32(0)); // M curve offset (ignored)
+ s.write32(SkEndian_SwapBE32(clut_offset)); // CLUT offset
+ s.write32(SkEndian_SwapBE32(a_curves_offset)); // A curve offset
+ SkASSERT(s.bytesWritten() == b_curves_offset);
+ for (size_t i = 0; i < kNumChannels; ++i) {
+ s.write(b_curves_data[i]->data(), b_curves_data[i]->size());
+ }
+ if (a_curves) {
+ SkASSERT(s.bytesWritten() == clut_offset);
+ s.write(clut->data(), clut->size());
+ SkASSERT(s.bytesWritten() == a_curves_offset);
+ for (size_t i = 0; i < kNumChannels; ++i) {
+ s.write(a_curves_data[i]->data(), a_curves_data[i]->size());
+ }
+ }
+ return s.detachAsData();
+}
+
+sk_sp<SkData> SkWriteICCProfile(const skcms_ICCProfile* profile, const char* desc) {
+ ICCHeader header;
+
+ std::vector<std::pair<uint32_t, sk_sp<SkData>>> tags;
+
+ // Compute profile description tag
+ tags.emplace_back(kTAG_desc, write_text_tag(desc));
+
+ // Compute primaries.
+ if (profile->has_toXYZD50) {
+ const auto& m = profile->toXYZD50;
+ tags.emplace_back(kTAG_rXYZ, write_xyz_tag(m.vals[0][0], m.vals[1][0], m.vals[2][0]));
+ tags.emplace_back(kTAG_gXYZ, write_xyz_tag(m.vals[0][1], m.vals[1][1], m.vals[2][1]));
+ tags.emplace_back(kTAG_bXYZ, write_xyz_tag(m.vals[0][2], m.vals[1][2], m.vals[2][2]));
+ }
+
+ // Compute white point tag (must be D50)
+ tags.emplace_back(kTAG_wtpt, write_xyz_tag(kD50_x, kD50_y, kD50_z));
+
+ // Compute transfer curves.
+ if (profile->has_trc) {
+ tags.emplace_back(kTAG_rTRC, write_trc_tag(profile->trc[0]));
+
+ // Use empty data to indicate that the entry should use the previous tag's
+ // data.
+ if (!memcmp(&profile->trc[1], &profile->trc[0], sizeof(profile->trc[0]))) {
+ tags.emplace_back(kTAG_gTRC, SkData::MakeEmpty());
+ } else {
+ tags.emplace_back(kTAG_gTRC, write_trc_tag(profile->trc[1]));
+ }
+
+ if (!memcmp(&profile->trc[2], &profile->trc[1], sizeof(profile->trc[1]))) {
+ tags.emplace_back(kTAG_bTRC, SkData::MakeEmpty());
+ } else {
+ tags.emplace_back(kTAG_bTRC, write_trc_tag(profile->trc[2]));
+ }
+ }
+
+ // Compute CICP.
+ if (profile->has_CICP) {
+ // The CICP tag is present in ICC 4.4, so update the header's version.
+ header.version = SkEndian_SwapBE32(0x04400000);
+ tags.emplace_back(kTAG_cicp, write_cicp_tag(profile->CICP));
+ }
+
+ // Compute A2B0.
+ if (profile->has_A2B) {
+ const auto& a2b = profile->A2B;
+ SkASSERT(a2b.output_channels == kNumChannels);
+ auto a2b_data = write_mAB_or_mBA_tag(kTAG_mABType,
+ a2b.output_curves,
+ a2b.input_channels ? a2b.input_curves : nullptr,
+ a2b.input_channels ? a2b.grid_points : nullptr,
+ a2b.input_channels ? a2b.grid_16 : nullptr,
+ a2b.matrix_channels ? a2b.matrix_curves : nullptr,
+ a2b.matrix_channels ? &a2b.matrix : nullptr);
+ tags.emplace_back(kTAG_A2B0, std::move(a2b_data));
+ }
+
+ // Compute B2A0.
+ if (profile->has_B2A) {
+ const auto& b2a = profile->B2A;
+ SkASSERT(b2a.input_channels == kNumChannels);
+ auto b2a_data = write_mAB_or_mBA_tag(kTAG_mBAType,
+ b2a.input_curves,
+ b2a.output_channels ? b2a.input_curves : nullptr,
+ b2a.output_channels ? b2a.grid_points : nullptr,
+ b2a.output_channels ? b2a.grid_16 : nullptr,
+ b2a.matrix_channels ? b2a.matrix_curves : nullptr,
+ b2a.matrix_channels ? &b2a.matrix : nullptr);
+ tags.emplace_back(kTAG_B2A0, std::move(b2a_data));
+ }
+
+ // Compute copyright tag
+ tags.emplace_back(kTAG_cprt, write_text_tag("Google Inc. 2016"));
+
+ // Compute the size of the profile.
+ size_t tag_data_size = 0;
+ for (const auto& tag : tags) {
+ tag_data_size += tag.second->size();
+ }
+ size_t tag_table_size = kICCTagTableEntrySize * tags.size();
+ size_t profile_size = kICCHeaderSize + tag_table_size + tag_data_size;
+
+ // Write the header.
+ header.data_color_space = SkEndian_SwapBE32(profile->data_color_space);
+ header.pcs = SkEndian_SwapBE32(profile->pcs);
+ header.size = SkEndian_SwapBE32(profile_size);
+ header.tag_count = SkEndian_SwapBE32(tags.size());
+
+ SkAutoMalloc profile_data(profile_size);
+ uint8_t* ptr = (uint8_t*)profile_data.get();
+ memcpy(ptr, &header, sizeof(header));
+ ptr += sizeof(header);
+
+ // Write the tag table. Track the offset and size of the previous tag to
+ // compute each tag's offset. An empty SkData indicates that the previous
+ // tag is to be reused.
+ size_t last_tag_offset = sizeof(header) + tag_table_size;
+ size_t last_tag_size = 0;
+ for (const auto& tag : tags) {
+ if (!tag.second->isEmpty()) {
+ last_tag_offset = last_tag_offset + last_tag_size;
+ last_tag_size = tag.second->size();
+ }
+ uint32_t tag_table_entry[3] = {
+ SkEndian_SwapBE32(tag.first),
+ SkEndian_SwapBE32(last_tag_offset),
+ SkEndian_SwapBE32(last_tag_size),
+ };
+ memcpy(ptr, tag_table_entry, sizeof(tag_table_entry));
+ ptr += sizeof(tag_table_entry);
+ }
+
+ // Write the tags.
+ for (const auto& tag : tags) {
+ if (tag.second->isEmpty()) continue;
+ memcpy(ptr, tag.second->data(), tag.second->size());
+ ptr += tag.second->size();
+ }
+
+ SkASSERT(profile_size == static_cast<size_t>(ptr - (uint8_t*)profile_data.get()));
+ return SkData::MakeFromMalloc(profile_data.release(), profile_size);
+}
+
+sk_sp<SkData> SkWriteICCProfile(const skcms_TransferFunction& fn, const skcms_Matrix3x3& toXYZD50) {
+ skcms_ICCProfile profile;
+ memset(&profile, 0, sizeof(profile));
+ std::vector<uint8_t> trc_table;
+ std::vector<uint8_t> a2b_grid;
+
+ profile.data_color_space = skcms_Signature_RGB;
+ profile.pcs = skcms_Signature_XYZ;
+
+ // Populate toXYZD50.
+ {
+ profile.has_toXYZD50 = true;
+ profile.toXYZD50 = toXYZD50;
+ }
+
+ // Populate TRC (except for PQ).
+ if (!skcms_TransferFunction_isPQish(&fn)) {
+ profile.has_trc = true;
+ if (skcms_TransferFunction_isSRGBish(&fn)) {
+ profile.trc[0].table_entries = 0;
+ profile.trc[0].parametric = fn;
+ } else if (skcms_TransferFunction_isHLGish(&fn)) {
+ skcms_TransferFunction scaled_hlg = SkNamedTransferFn::kHLG;
+ scaled_hlg.f = 1 / 12.f - 1.f;
+ constexpr uint32_t kTrcTableSize = 65;
+ trc_table.resize(kTrcTableSize * 2);
+ for (uint32_t i = 0; i < kTrcTableSize; ++i) {
+ float x = i / (kTrcTableSize - 1.f);
+ float y = skcms_TransferFunction_eval(&scaled_hlg, x);
+ y *= compute_tone_map_gain(scaled_hlg, y);
+ SkICCFloatToTable16(y, &trc_table[2 * i]);
+ }
+
+ profile.trc[0].table_entries = kTrcTableSize;
+ profile.trc[0].table_16 = reinterpret_cast<uint8_t*>(trc_table.data());
+ }
+ memcpy(&profile.trc[1], &profile.trc[0], sizeof(profile.trc[0]));
+ memcpy(&profile.trc[2], &profile.trc[0], sizeof(profile.trc[0]));
+ }
+
+ // Populate A2B (PQ only).
+ if (skcms_TransferFunction_isPQish(&fn)) {
+ profile.pcs = skcms_Signature_Lab;
+
+ constexpr uint32_t kGridSize = 17;
+ profile.has_A2B = true;
+ profile.A2B.input_channels = kNumChannels;
+ profile.A2B.output_channels = kNumChannels;
+ for (size_t i = 0; i < 3; ++i) {
+ profile.A2B.input_curves[i].parametric = SkNamedTransferFn::kLinear;
+ profile.A2B.output_curves[i].parametric = SkNamedTransferFn::kLinear;
+ profile.A2B.grid_points[i] = kGridSize;
+ }
+
+ a2b_grid.resize(kGridSize * kGridSize * kGridSize * kNumChannels * 2);
+ size_t a2b_grid_index = 0;
+ for (uint32_t r_index = 0; r_index < kGridSize; ++r_index) {
+ for (uint32_t g_index = 0; g_index < kGridSize; ++g_index) {
+ for (uint32_t b_index = 0; b_index < kGridSize; ++b_index) {
+ float rgb[3] = {
+ r_index / (kGridSize - 1.f),
+ g_index / (kGridSize - 1.f),
+ b_index / (kGridSize - 1.f),
+ };
+ compute_lut_entry(toXYZD50, rgb);
+ SkICCFloatXYZD50ToGrid16Lab(rgb, &a2b_grid[a2b_grid_index]);
+ a2b_grid_index += 6;
+ }
+ }
+ }
+ for (size_t i = 0; i < kNumChannels; ++i) {
+ profile.A2B.grid_points[i] = kGridSize;
+ }
+ profile.A2B.grid_16 = reinterpret_cast<const uint8_t*>(a2b_grid.data());
+
+ profile.has_B2A = true;
+ profile.B2A.input_channels = kNumChannels;
+ for (size_t i = 0; i < 3; ++i) {
+ profile.B2A.input_curves[i].parametric = SkNamedTransferFn::kLinear;
+ }
+ }
+
+ // Populate CICP.
+ if (skcms_TransferFunction_isHLGish(&fn) || skcms_TransferFunction_isPQish(&fn)) {
+ profile.has_CICP = true;
+ profile.CICP.color_primaries = get_cicp_primaries(toXYZD50);
+ profile.CICP.transfer_characteristics = get_cicp_trfn(fn);
+ profile.CICP.matrix_coefficients = 0;
+ profile.CICP.video_full_range_flag = 1;
+ SkASSERT(profile.CICP.color_primaries);
+ SkASSERT(profile.CICP.transfer_characteristics);
+ }
+
+ std::string description = get_desc_string(fn, toXYZD50);
+ return SkWriteICCProfile(&profile, description.c_str());
+}
diff --git a/gfx/skia/skia/src/encode/SkICCPriv.h b/gfx/skia/skia/src/encode/SkICCPriv.h
new file mode 100644
index 0000000000..18757705ea
--- /dev/null
+++ b/gfx/skia/skia/src/encode/SkICCPriv.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkICCPriv_DEFINED
+#define SkICCPriv_DEFINED
+
+#include "include/core/SkTypes.h" // SkSetFourByteTag
+
+#include <cstddef>
+#include <cstdint>
+
+// This is equal to the header size according to the ICC specification (128)
+// plus the size of the tag count (4). We include the tag count since we
+// always require it to be present anyway.
+static constexpr size_t kICCHeaderSize = 132;
+
+// Contains a signature (4), offset (4), and size (4).
+static constexpr size_t kICCTagTableEntrySize = 12;
+
+static constexpr uint32_t kRGB_ColorSpace = SkSetFourByteTag('R', 'G', 'B', ' ');
+static constexpr uint32_t kCMYK_ColorSpace = SkSetFourByteTag('C', 'M', 'Y', 'K');
+static constexpr uint32_t kGray_ColorSpace = SkSetFourByteTag('G', 'R', 'A', 'Y');
+static constexpr uint32_t kDisplay_Profile = SkSetFourByteTag('m', 'n', 't', 'r');
+static constexpr uint32_t kInput_Profile = SkSetFourByteTag('s', 'c', 'n', 'r');
+static constexpr uint32_t kOutput_Profile = SkSetFourByteTag('p', 'r', 't', 'r');
+static constexpr uint32_t kColorSpace_Profile = SkSetFourByteTag('s', 'p', 'a', 'c');
+static constexpr uint32_t kXYZ_PCSSpace = SkSetFourByteTag('X', 'Y', 'Z', ' ');
+static constexpr uint32_t kLAB_PCSSpace = SkSetFourByteTag('L', 'a', 'b', ' ');
+static constexpr uint32_t kACSP_Signature = SkSetFourByteTag('a', 'c', 's', 'p');
+
+static constexpr uint32_t kTAG_rXYZ = SkSetFourByteTag('r', 'X', 'Y', 'Z');
+static constexpr uint32_t kTAG_gXYZ = SkSetFourByteTag('g', 'X', 'Y', 'Z');
+static constexpr uint32_t kTAG_bXYZ = SkSetFourByteTag('b', 'X', 'Y', 'Z');
+static constexpr uint32_t kTAG_rTRC = SkSetFourByteTag('r', 'T', 'R', 'C');
+static constexpr uint32_t kTAG_gTRC = SkSetFourByteTag('g', 'T', 'R', 'C');
+static constexpr uint32_t kTAG_bTRC = SkSetFourByteTag('b', 'T', 'R', 'C');
+static constexpr uint32_t kTAG_kTRC = SkSetFourByteTag('k', 'T', 'R', 'C');
+static constexpr uint32_t kTAG_A2B0 = SkSetFourByteTag('A', '2', 'B', '0');
+static constexpr uint32_t kTAG_B2A0 = SkSetFourByteTag('B', '2', 'A', '0');
+static constexpr uint32_t kTAG_desc = SkSetFourByteTag('d', 'e', 's', 'c');
+static constexpr uint32_t kTAG_cicp = SkSetFourByteTag('c', 'i', 'c', 'p');
+static constexpr uint32_t kTAG_wtpt = SkSetFourByteTag('w', 't', 'p', 't');
+static constexpr uint32_t kTAG_cprt = SkSetFourByteTag('c', 'p', 'r', 't');
+
+static constexpr uint32_t kTAG_CurveType = SkSetFourByteTag('c', 'u', 'r', 'v');
+static constexpr uint32_t kTAG_ParaCurveType = SkSetFourByteTag('p', 'a', 'r', 'a');
+static constexpr uint32_t kTAG_TextType = SkSetFourByteTag('m', 'l', 'u', 'c');
+static constexpr uint32_t kTAG_mABType = SkSetFourByteTag('m', 'A', 'B', ' ');
+static constexpr uint32_t kTAG_mBAType = SkSetFourByteTag('m', 'B', 'A', ' ');
+
+enum ParaCurveType {
+ kExponential_ParaCurveType = 0,
+ kGAB_ParaCurveType = 1,
+ kGABC_ParaCurveType = 2,
+ kGABDE_ParaCurveType = 3,
+ kGABCDEF_ParaCurveType = 4,
+};
+
+#endif // SkICCPriv_DEFINED
diff --git a/gfx/skia/skia/src/encode/SkImageEncoder.cpp b/gfx/skia/skia/src/encode/SkImageEncoder.cpp
new file mode 100644
index 0000000000..72c4cf8b28
--- /dev/null
+++ b/gfx/skia/skia/src/encode/SkImageEncoder.cpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2009 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkImageEncoder.h"
+
+#include "include/codec/SkEncodedImageFormat.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkData.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypes.h"
+#include "include/encode/SkJpegEncoder.h"
+#include "include/encode/SkPngEncoder.h"
+#include "include/encode/SkWebpEncoder.h"
+
+#if SK_ENABLE_NDK_IMAGES || SK_USE_CG_ENCODER || SK_USE_WIC_ENCODER
+#include "src/encode/SkImageEncoderPriv.h"
+#endif
+
+#if !defined(SK_ENCODE_JPEG)|| !defined(SK_ENCODE_PNG) || !defined(SK_ENCODE_WEBP)
+#include <memory>
+
+class SkEncoder;
+#endif
+
+#if !defined(SK_ENCODE_JPEG)
+bool SkJpegEncoder::Encode(SkWStream*, const SkPixmap&, const Options&) { return false; }
+std::unique_ptr<SkEncoder> SkJpegEncoder::Make(SkWStream*, const SkPixmap&, const Options&) {
+ return nullptr;
+}
+#endif
+
+#if !defined(SK_ENCODE_PNG)
+bool SkPngEncoder::Encode(SkWStream*, const SkPixmap&, const Options&) { return false; }
+std::unique_ptr<SkEncoder> SkPngEncoder::Make(SkWStream*, const SkPixmap&, const Options&) {
+ return nullptr;
+}
+#endif
+
+#if !defined(SK_ENCODE_WEBP)
+bool SkWebpEncoder::Encode(SkWStream*, const SkPixmap&, const Options&) { return false; }
+#endif
+
+bool SkEncodeImage(SkWStream* dst, const SkBitmap& src, SkEncodedImageFormat f, int q) {
+ SkPixmap pixmap;
+ return src.peekPixels(&pixmap) && SkEncodeImage(dst, pixmap, f, q);
+}
+
+bool SkEncodeImage(SkWStream* dst, const SkPixmap& src,
+ SkEncodedImageFormat format, int quality) {
+ #ifdef SK_USE_CG_ENCODER
+ (void)quality;
+ return SkEncodeImageWithCG(dst, src, format);
+ #elif SK_USE_WIC_ENCODER
+ return SkEncodeImageWithWIC(dst, src, format, quality);
+ #elif SK_ENABLE_NDK_IMAGES
+ return SkEncodeImageWithNDK(dst, src, format, quality);
+ #else
+ switch(format) {
+ case SkEncodedImageFormat::kJPEG: {
+ SkJpegEncoder::Options opts;
+ opts.fQuality = quality;
+ return SkJpegEncoder::Encode(dst, src, opts);
+ }
+ case SkEncodedImageFormat::kPNG: {
+ SkPngEncoder::Options opts;
+ return SkPngEncoder::Encode(dst, src, opts);
+ }
+ case SkEncodedImageFormat::kWEBP: {
+ SkWebpEncoder::Options opts;
+ if (quality == 100) {
+ opts.fCompression = SkWebpEncoder::Compression::kLossless;
+ // Note: SkEncodeImage treats 0 quality as the lowest quality
+ // (greatest compression) and 100 as the highest quality (least
+ // compression). For kLossy, this matches libwebp's
+ // interpretation, so it is passed directly to libwebp. But
+ // with kLossless, libwebp always creates the highest quality
+ // image. In this case, fQuality is reinterpreted as how much
+ // effort (time) to put into making a smaller file. This API
+ // does not provide a way to specify this value (though it can
+ // be specified by using SkWebpEncoder::Encode) so we have to
+ // pick one arbitrarily. This value matches that chosen by
+ // blink::ImageEncoder::ComputeWebpOptions as well
+ // WebPConfigInit.
+ opts.fQuality = 75;
+ } else {
+ opts.fCompression = SkWebpEncoder::Compression::kLossy;
+ opts.fQuality = quality;
+ }
+ return SkWebpEncoder::Encode(dst, src, opts);
+ }
+ default:
+ return false;
+ }
+ #endif
+}
+
+sk_sp<SkData> SkEncodePixmap(const SkPixmap& src, SkEncodedImageFormat format, int quality) {
+ SkDynamicMemoryWStream stream;
+ return SkEncodeImage(&stream, src, format, quality) ? stream.detachAsData() : nullptr;
+}
+
+sk_sp<SkData> SkEncodeBitmap(const SkBitmap& src, SkEncodedImageFormat format, int quality) {
+ SkPixmap pixmap;
+ return src.peekPixels(&pixmap) ? SkEncodePixmap(pixmap, format, quality) : nullptr;
+}
diff --git a/gfx/skia/skia/src/encode/SkImageEncoderFns.h b/gfx/skia/skia/src/encode/SkImageEncoderFns.h
new file mode 100644
index 0000000000..c567716280
--- /dev/null
+++ b/gfx/skia/skia/src/encode/SkImageEncoderFns.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2012 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageEncoderFns_DEFINED
+#define SkImageEncoderFns_DEFINED
+
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+#include "include/encode/SkICC.h"
+#include "modules/skcms/skcms.h"
+
+#include <cstring>
+
+typedef void (*transform_scanline_proc)(char* dst, const char* src, int width, int bpp);
+
+static inline void transform_scanline_memcpy(char* dst, const char* src, int width, int bpp) {
+ memcpy(dst, src, width * bpp);
+}
+
+static inline void transform_scanline_A8_to_GrayAlpha(char* dst, const char* src, int width, int) {
+ for (int i = 0; i < width; i++) {
+ *dst++ = 0;
+ *dst++ = *src++;
+ }
+}
+
+
+static void skcms(char* dst, const char* src, int n,
+ skcms_PixelFormat srcFmt, skcms_AlphaFormat srcAlpha,
+ skcms_PixelFormat dstFmt, skcms_AlphaFormat dstAlpha) {
+ SkAssertResult(skcms_Transform(src, srcFmt, srcAlpha, nullptr,
+ dst, dstFmt, dstAlpha, nullptr, n));
+}
+
+static inline void transform_scanline_gray(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_G_8, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGB_888, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_565(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_BGR_565, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGB_888, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_RGBX(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_8888, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGB_888 , skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_BGRX(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_BGRA_8888, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGB_888 , skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_444(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_ABGR_4444, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGB_888 , skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_rgbA(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_8888, skcms_AlphaFormat_PremulAsEncoded,
+ skcms_PixelFormat_RGBA_8888, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_bgrA(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_BGRA_8888, skcms_AlphaFormat_PremulAsEncoded,
+ skcms_PixelFormat_RGBA_8888, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_to_premul_legacy(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_8888, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGBA_8888, skcms_AlphaFormat_PremulAsEncoded);
+}
+
+static inline void transform_scanline_BGRA(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_BGRA_8888, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGBA_8888, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_4444(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_ABGR_4444, skcms_AlphaFormat_PremulAsEncoded,
+ skcms_PixelFormat_RGBA_8888, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_101010x(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_1010102, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGB_161616BE, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_1010102(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_1010102, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGBA_16161616BE, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_1010102_premul(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_1010102, skcms_AlphaFormat_PremulAsEncoded,
+ skcms_PixelFormat_RGBA_16161616BE, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_bgr_101010x(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_BGRA_1010102, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGB_161616BE, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_bgra_1010102(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_BGRA_1010102, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGBA_16161616BE, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_bgra_1010102_premul(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_BGRA_1010102, skcms_AlphaFormat_PremulAsEncoded,
+ skcms_PixelFormat_RGBA_16161616BE, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_F16(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_hhhh, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGBA_16161616BE, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_F16_premul(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_hhhh, skcms_AlphaFormat_PremulAsEncoded,
+ skcms_PixelFormat_RGBA_16161616BE, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_F16_to_8888(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_hhhh, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGBA_8888, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_F16_premul_to_8888(char* dst,
+ const char* src,
+ int width,
+ int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_hhhh, skcms_AlphaFormat_PremulAsEncoded,
+ skcms_PixelFormat_RGBA_8888, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_F16_to_premul_8888(char* dst,
+ const char* src,
+ int width,
+ int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_hhhh, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGBA_8888, skcms_AlphaFormat_PremulAsEncoded);
+}
+
+static inline void transform_scanline_F32(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_ffff, skcms_AlphaFormat_Unpremul,
+ skcms_PixelFormat_RGBA_16161616BE, skcms_AlphaFormat_Unpremul);
+}
+
+static inline void transform_scanline_F32_premul(char* dst, const char* src, int width, int) {
+ skcms(dst, src, width,
+ skcms_PixelFormat_RGBA_ffff, skcms_AlphaFormat_PremulAsEncoded,
+ skcms_PixelFormat_RGBA_16161616BE, skcms_AlphaFormat_Unpremul);
+}
+
+static inline sk_sp<SkData> icc_from_color_space(const SkColorSpace* cs,
+ const skcms_ICCProfile* profile,
+ const char* profile_description) {
+ // TODO(ccameron): Remove this check.
+ if (!cs) {
+ return nullptr;
+ }
+
+ if (profile) {
+ return SkWriteICCProfile(profile, profile_description);
+ }
+
+ skcms_Matrix3x3 toXYZD50;
+ if (cs->toXYZD50(&toXYZD50)) {
+ skcms_TransferFunction fn;
+ cs->transferFn(&fn);
+ return SkWriteICCProfile(fn, toXYZD50);
+ }
+ return nullptr;
+}
+
+static inline sk_sp<SkData> icc_from_color_space(const SkImageInfo& info,
+ const skcms_ICCProfile* profile,
+ const char* profile_description) {
+ return icc_from_color_space(info.colorSpace(), profile, profile_description);
+}
+
+#endif // SkImageEncoderFns_DEFINED
diff --git a/gfx/skia/skia/src/encode/SkImageEncoderPriv.h b/gfx/skia/skia/src/encode/SkImageEncoderPriv.h
new file mode 100644
index 0000000000..9fedae51f6
--- /dev/null
+++ b/gfx/skia/skia/src/encode/SkImageEncoderPriv.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageEncoderPriv_DEFINED
+#define SkImageEncoderPriv_DEFINED
+
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "src/core/SkImageInfoPriv.h"
+
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS) || \
+ defined(SK_BUILD_FOR_WIN) || defined(SK_ENABLE_NDK_IMAGES)
+#include "include/codec/SkEncodedImageFormat.h"
+class SkWStream;
+#endif
+
+static inline bool SkPixmapIsValid(const SkPixmap& src) {
+ if (!SkImageInfoIsValid(src.info())) {
+ return false;
+ }
+
+ if (!src.addr() || src.rowBytes() < src.info().minRowBytes()) {
+ return false;
+ }
+
+ return true;
+}
+
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+ bool SkEncodeImageWithCG(SkWStream*, const SkPixmap&, SkEncodedImageFormat);
+#else
+ #define SkEncodeImageWithCG(...) false
+#endif
+
+#ifdef SK_BUILD_FOR_WIN
+ bool SkEncodeImageWithWIC(SkWStream*, const SkPixmap&, SkEncodedImageFormat, int quality);
+#else
+ #define SkEncodeImageWithWIC(...) false
+#endif
+
+#ifdef SK_ENABLE_NDK_IMAGES
+ bool SkEncodeImageWithNDK(SkWStream*, const SkPixmap&, SkEncodedImageFormat, int quality);
+#else
+ #define SkEncodeImageWithNDK(...) false
+#endif
+
+#endif // SkImageEncoderPriv_DEFINED
diff --git a/gfx/skia/skia/src/encode/SkJPEGWriteUtility.cpp b/gfx/skia/skia/src/encode/SkJPEGWriteUtility.cpp
new file mode 100644
index 0000000000..024242d545
--- /dev/null
+++ b/gfx/skia/skia/src/encode/SkJPEGWriteUtility.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "src/encode/SkJPEGWriteUtility.h"
+
+#include "include/core/SkStream.h"
+#include "include/private/base/SkTArray.h"
+#include "src/codec/SkJpegPriv.h"
+
+#include <csetjmp>
+#include <cstddef>
+
+extern "C" {
+ #include "jerror.h"
+ #include "jmorecfg.h"
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void sk_init_destination(j_compress_ptr cinfo) {
+ skjpeg_destination_mgr* dest = (skjpeg_destination_mgr*)cinfo->dest;
+
+ dest->next_output_byte = dest->fBuffer;
+ dest->free_in_buffer = skjpeg_destination_mgr::kBufferSize;
+}
+
+static boolean sk_empty_output_buffer(j_compress_ptr cinfo) {
+ skjpeg_destination_mgr* dest = (skjpeg_destination_mgr*)cinfo->dest;
+
+// if (!dest->fStream->write(dest->fBuffer, skjpeg_destination_mgr::kBufferSize - dest->free_in_buffer))
+ if (!dest->fStream->write(dest->fBuffer,
+ skjpeg_destination_mgr::kBufferSize)) {
+ ERREXIT(cinfo, JERR_FILE_WRITE);
+ return FALSE;
+ }
+
+ dest->next_output_byte = dest->fBuffer;
+ dest->free_in_buffer = skjpeg_destination_mgr::kBufferSize;
+ return TRUE;
+}
+
+static void sk_term_destination (j_compress_ptr cinfo) {
+ skjpeg_destination_mgr* dest = (skjpeg_destination_mgr*)cinfo->dest;
+
+ size_t size = skjpeg_destination_mgr::kBufferSize - dest->free_in_buffer;
+ if (size > 0) {
+ if (!dest->fStream->write(dest->fBuffer, size)) {
+ ERREXIT(cinfo, JERR_FILE_WRITE);
+ return;
+ }
+ }
+
+ dest->fStream->flush();
+}
+
+skjpeg_destination_mgr::skjpeg_destination_mgr(SkWStream* stream) : fStream(stream) {
+ this->init_destination = sk_init_destination;
+ this->empty_output_buffer = sk_empty_output_buffer;
+ this->term_destination = sk_term_destination;
+}
+
+void skjpeg_error_exit(j_common_ptr cinfo) {
+ skjpeg_error_mgr* error = (skjpeg_error_mgr*)cinfo->err;
+
+ (*error->output_message) (cinfo);
+
+ /* Let the memory manager delete any temp files before we die */
+ jpeg_destroy(cinfo);
+
+ if (error->fJmpBufStack.empty()) {
+ SK_ABORT("JPEG error with no jmp_buf set.");
+ }
+ longjmp(*error->fJmpBufStack.back(), -1);
+}
diff --git a/gfx/skia/skia/src/encode/SkJPEGWriteUtility.h b/gfx/skia/skia/src/encode/SkJPEGWriteUtility.h
new file mode 100644
index 0000000000..c534bbf6c1
--- /dev/null
+++ b/gfx/skia/skia/src/encode/SkJPEGWriteUtility.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkJpegUtility_DEFINED
+#define SkJpegUtility_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#include <cstdint>
+
+extern "C" {
+ // We need to include stdio.h before jpeg because jpeg does not include it, but uses FILE
+ // See https://github.com/libjpeg-turbo/libjpeg-turbo/issues/17
+ #include <stdio.h> // IWYU pragma: keep
+ #include "jpeglib.h"
+}
+
+class SkWStream;
+
+void skjpeg_error_exit(j_common_ptr cinfo);
+
+/////////////////////////////////////////////////////////////////////////////
+/* Our destination struct for directing decompressed pixels to our stream
+ * object.
+ */
+struct SK_SPI skjpeg_destination_mgr : jpeg_destination_mgr {
+ skjpeg_destination_mgr(SkWStream* stream);
+
+ SkWStream* const fStream;
+
+ enum {
+ kBufferSize = 1024
+ };
+ uint8_t fBuffer[kBufferSize];
+};
+
+#endif
diff --git a/gfx/skia/skia/src/encode/SkJpegEncoder.cpp b/gfx/skia/skia/src/encode/SkJpegEncoder.cpp
new file mode 100644
index 0000000000..d764a52ebc
--- /dev/null
+++ b/gfx/skia/skia/src/encode/SkJpegEncoder.cpp
@@ -0,0 +1,419 @@
+/*
+ * Copyright 2007 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+
+#ifdef SK_ENCODE_JPEG
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkYUVAInfo.h"
+#include "include/core/SkYUVAPixmaps.h"
+#include "include/encode/SkEncoder.h"
+#include "include/encode/SkJpegEncoder.h"
+#include "include/private/base/SkNoncopyable.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/base/SkMSAN.h"
+#include "src/codec/SkJpegConstants.h"
+#include "src/codec/SkJpegPriv.h"
+#include "src/encode/SkImageEncoderFns.h"
+#include "src/encode/SkImageEncoderPriv.h"
+#include "src/encode/SkJPEGWriteUtility.h"
+
+#include <csetjmp>
+#include <cstdint>
+#include <cstring>
+#include <memory>
+#include <utility>
+
+class SkColorSpace;
+
+extern "C" {
+ #include "jpeglib.h"
+ #include "jmorecfg.h"
+}
+
+class SkJpegEncoderMgr final : SkNoncopyable {
+public:
+ /*
+ * Create the decode manager
+ * Does not take ownership of stream.
+ */
+ static std::unique_ptr<SkJpegEncoderMgr> Make(SkWStream* stream) {
+ return std::unique_ptr<SkJpegEncoderMgr>(new SkJpegEncoderMgr(stream));
+ }
+
+ bool setParams(const SkImageInfo& srcInfo, const SkJpegEncoder::Options& options);
+ bool setParams(const SkYUVAPixmapInfo& srcInfo, const SkJpegEncoder::Options& options);
+
+ jpeg_compress_struct* cinfo() { return &fCInfo; }
+
+ skjpeg_error_mgr* errorMgr() { return &fErrMgr; }
+
+ transform_scanline_proc proc() const { return fProc; }
+
+ ~SkJpegEncoderMgr() {
+ jpeg_destroy_compress(&fCInfo);
+ }
+
+private:
+ SkJpegEncoderMgr(SkWStream* stream) : fDstMgr(stream), fProc(nullptr) {
+ fCInfo.err = jpeg_std_error(&fErrMgr);
+ fErrMgr.error_exit = skjpeg_error_exit;
+ jpeg_create_compress(&fCInfo);
+ fCInfo.dest = &fDstMgr;
+ }
+
+ jpeg_compress_struct fCInfo;
+ skjpeg_error_mgr fErrMgr;
+ skjpeg_destination_mgr fDstMgr;
+ transform_scanline_proc fProc;
+};
+
+bool SkJpegEncoderMgr::setParams(const SkImageInfo& srcInfo, const SkJpegEncoder::Options& options)
+{
+ auto chooseProc8888 = [&]() {
+ if (kUnpremul_SkAlphaType == srcInfo.alphaType() &&
+ options.fAlphaOption == SkJpegEncoder::AlphaOption::kBlendOnBlack) {
+ return transform_scanline_to_premul_legacy;
+ }
+ return (transform_scanline_proc) nullptr;
+ };
+
+ J_COLOR_SPACE jpegColorType = JCS_EXT_RGBA;
+ int numComponents = 0;
+ switch (srcInfo.colorType()) {
+ case kRGBA_8888_SkColorType:
+ fProc = chooseProc8888();
+ jpegColorType = JCS_EXT_RGBA;
+ numComponents = 4;
+ break;
+ case kBGRA_8888_SkColorType:
+ fProc = chooseProc8888();
+ jpegColorType = JCS_EXT_BGRA;
+ numComponents = 4;
+ break;
+ case kRGB_565_SkColorType:
+ fProc = transform_scanline_565;
+ jpegColorType = JCS_RGB;
+ numComponents = 3;
+ break;
+ case kARGB_4444_SkColorType:
+ if (SkJpegEncoder::AlphaOption::kBlendOnBlack == options.fAlphaOption) {
+ return false;
+ }
+
+ fProc = transform_scanline_444;
+ jpegColorType = JCS_RGB;
+ numComponents = 3;
+ break;
+ case kGray_8_SkColorType:
+ case kAlpha_8_SkColorType:
+ case kR8_unorm_SkColorType:
+ jpegColorType = JCS_GRAYSCALE;
+ numComponents = 1;
+ break;
+ case kRGBA_F16_SkColorType:
+ if (kUnpremul_SkAlphaType == srcInfo.alphaType() &&
+ options.fAlphaOption == SkJpegEncoder::AlphaOption::kBlendOnBlack) {
+ fProc = transform_scanline_F16_to_premul_8888;
+ } else {
+ fProc = transform_scanline_F16_to_8888;
+ }
+ jpegColorType = JCS_EXT_RGBA;
+ numComponents = 4;
+ break;
+ default:
+ return false;
+ }
+
+ fCInfo.image_width = srcInfo.width();
+ fCInfo.image_height = srcInfo.height();
+ fCInfo.in_color_space = jpegColorType;
+ fCInfo.input_components = numComponents;
+ jpeg_set_defaults(&fCInfo);
+
+ if (numComponents != 1) {
+ switch (options.fDownsample) {
+ case SkJpegEncoder::Downsample::k420:
+ SkASSERT(2 == fCInfo.comp_info[0].h_samp_factor);
+ SkASSERT(2 == fCInfo.comp_info[0].v_samp_factor);
+ SkASSERT(1 == fCInfo.comp_info[1].h_samp_factor);
+ SkASSERT(1 == fCInfo.comp_info[1].v_samp_factor);
+ SkASSERT(1 == fCInfo.comp_info[2].h_samp_factor);
+ SkASSERT(1 == fCInfo.comp_info[2].v_samp_factor);
+ break;
+ case SkJpegEncoder::Downsample::k422:
+ fCInfo.comp_info[0].h_samp_factor = 2;
+ fCInfo.comp_info[0].v_samp_factor = 1;
+ SkASSERT(1 == fCInfo.comp_info[1].h_samp_factor);
+ SkASSERT(1 == fCInfo.comp_info[1].v_samp_factor);
+ SkASSERT(1 == fCInfo.comp_info[2].h_samp_factor);
+ SkASSERT(1 == fCInfo.comp_info[2].v_samp_factor);
+ break;
+ case SkJpegEncoder::Downsample::k444:
+ fCInfo.comp_info[0].h_samp_factor = 1;
+ fCInfo.comp_info[0].v_samp_factor = 1;
+ SkASSERT(1 == fCInfo.comp_info[1].h_samp_factor);
+ SkASSERT(1 == fCInfo.comp_info[1].v_samp_factor);
+ SkASSERT(1 == fCInfo.comp_info[2].h_samp_factor);
+ SkASSERT(1 == fCInfo.comp_info[2].v_samp_factor);
+ break;
+ }
+ }
+
+ // Tells libjpeg-turbo to compute optimal Huffman coding tables
+ // for the image. This improves compression at the cost of
+ // slower encode performance.
+ fCInfo.optimize_coding = TRUE;
+ return true;
+}
+
+// Convert a row of an SkYUVAPixmaps to a row of Y,U,V triples.
+// TODO(ccameron): This is horribly inefficient.
+static void yuva_copy_row(const SkYUVAPixmaps* src, int row, uint8_t* dst) {
+ int width = src->plane(0).width();
+ switch (src->yuvaInfo().planeConfig()) {
+ case SkYUVAInfo::PlaneConfig::kY_U_V: {
+ auto [ssWidthU, ssHeightU] = src->yuvaInfo().planeSubsamplingFactors(1);
+ auto [ssWidthV, ssHeightV] = src->yuvaInfo().planeSubsamplingFactors(2);
+ const uint8_t* srcY = reinterpret_cast<const uint8_t*>(src->plane(0).addr(0, row));
+ const uint8_t* srcU =
+ reinterpret_cast<const uint8_t*>(src->plane(1).addr(0, row / ssHeightU));
+ const uint8_t* srcV =
+ reinterpret_cast<const uint8_t*>(src->plane(2).addr(0, row / ssHeightV));
+ for (int col = 0; col < width; ++col) {
+ dst[3 * col + 0] = srcY[col];
+ dst[3 * col + 1] = srcU[col / ssWidthU];
+ dst[3 * col + 2] = srcV[col / ssWidthV];
+ }
+ break;
+ }
+ case SkYUVAInfo::PlaneConfig::kY_UV: {
+ auto [ssWidthUV, ssHeightUV] = src->yuvaInfo().planeSubsamplingFactors(1);
+ const uint8_t* srcY = reinterpret_cast<const uint8_t*>(src->plane(0).addr(0, row));
+ const uint8_t* srcUV =
+ reinterpret_cast<const uint8_t*>(src->plane(1).addr(0, row / ssHeightUV));
+ for (int col = 0; col < width; ++col) {
+ dst[3 * col + 0] = srcY[col];
+ dst[3 * col + 1] = srcUV[2 * (col / ssWidthUV) + 0];
+ dst[3 * col + 2] = srcUV[2 * (col / ssWidthUV) + 1];
+ }
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+bool SkJpegEncoderMgr::setParams(const SkYUVAPixmapInfo& srcInfo,
+ const SkJpegEncoder::Options& options) {
+ fCInfo.image_width = srcInfo.yuvaInfo().width();
+ fCInfo.image_height = srcInfo.yuvaInfo().height();
+ fCInfo.in_color_space = JCS_YCbCr;
+ fCInfo.input_components = 3;
+ jpeg_set_defaults(&fCInfo);
+
+ // Support no color space conversion.
+ if (srcInfo.yuvColorSpace() != kJPEG_Full_SkYUVColorSpace) {
+ return false;
+ }
+
+ // Support only 8-bit data.
+ switch (srcInfo.dataType()) {
+ case SkYUVAPixmapInfo::DataType::kUnorm8:
+ break;
+ default:
+ return false;
+ }
+
+ // Support only Y,U,V and Y,UV configurations (they are the only ones supported by
+ // yuva_copy_row).
+ switch (srcInfo.yuvaInfo().planeConfig()) {
+ case SkYUVAInfo::PlaneConfig::kY_U_V:
+ case SkYUVAInfo::PlaneConfig::kY_UV:
+ break;
+ default:
+ return false;
+ }
+
+ // Specify to the encoder to use the same subsampling as the input image. The U and V planes
+ // always have a sampling factor of 1.
+ auto [ssHoriz, ssVert] = SkYUVAInfo::SubsamplingFactors(srcInfo.yuvaInfo().subsampling());
+ fCInfo.comp_info[0].h_samp_factor = ssHoriz;
+ fCInfo.comp_info[0].v_samp_factor = ssVert;
+
+ fCInfo.optimize_coding = TRUE;
+ return true;
+}
+
+std::unique_ptr<SkEncoder> SkJpegEncoder::Make(SkWStream* dst,
+ const SkPixmap& src,
+ const Options& options) {
+ return Make(dst, &src, nullptr, nullptr, options);
+}
+
+std::unique_ptr<SkEncoder> SkJpegEncoder::Make(SkWStream* dst,
+ const SkYUVAPixmaps& src,
+ const SkColorSpace* srcColorSpace,
+ const Options& options) {
+ return Make(dst, nullptr, &src, srcColorSpace, options);
+}
+
+std::unique_ptr<SkEncoder> SkJpegEncoder::Make(SkWStream* dst,
+ const SkPixmap* src,
+ const SkYUVAPixmaps* srcYUVA,
+ const SkColorSpace* srcYUVAColorSpace,
+ const Options& options) {
+ // Exactly one of |src| or |srcYUVA| should be specified.
+ if (srcYUVA) {
+ SkASSERT(!src);
+ if (!srcYUVA->isValid()) {
+ return nullptr;
+ }
+ } else {
+ SkASSERT(src);
+ if (!src || !SkPixmapIsValid(*src)) {
+ return nullptr;
+ }
+ }
+
+ std::unique_ptr<SkJpegEncoderMgr> encoderMgr = SkJpegEncoderMgr::Make(dst);
+
+ skjpeg_error_mgr::AutoPushJmpBuf jmp(encoderMgr->errorMgr());
+ if (setjmp(jmp)) {
+ return nullptr;
+ }
+
+ if (srcYUVA) {
+ if (!encoderMgr->setParams(srcYUVA->pixmapsInfo(), options)) {
+ return nullptr;
+ }
+ } else {
+ if (!encoderMgr->setParams(src->info(), options)) {
+ return nullptr;
+ }
+ }
+
+ jpeg_set_quality(encoderMgr->cinfo(), options.fQuality, TRUE);
+ jpeg_start_compress(encoderMgr->cinfo(), TRUE);
+
+ // Write XMP metadata. This will only write the standard XMP segment.
+ // TODO(ccameron): Split this into a standard and extended XMP segment if needed.
+ if (options.xmpMetadata) {
+ SkDynamicMemoryWStream s;
+ s.write(kXMPStandardSig, sizeof(kXMPStandardSig));
+ s.write(options.xmpMetadata->data(), options.xmpMetadata->size());
+ auto data = s.detachAsData();
+ jpeg_write_marker(encoderMgr->cinfo(), kXMPMarker, data->bytes(), data->size());
+ }
+
+ // Write the ICC profile.
+ // TODO(ccameron): This limits ICC profile size to a single segment's parameters (less than
+ // 64k). Split larger profiles into more segments.
+ sk_sp<SkData> icc = icc_from_color_space(srcYUVA ? srcYUVAColorSpace : src->colorSpace(),
+ options.fICCProfile,
+ options.fICCProfileDescription);
+ if (icc) {
+ // Create a contiguous block of memory with the icc signature followed by the profile.
+ sk_sp<SkData> markerData =
+ SkData::MakeUninitialized(kICCMarkerHeaderSize + icc->size());
+ uint8_t* ptr = (uint8_t*) markerData->writable_data();
+ memcpy(ptr, kICCSig, sizeof(kICCSig));
+ ptr += sizeof(kICCSig);
+ *ptr++ = 1; // This is the first marker.
+ *ptr++ = 1; // Out of one total markers.
+ memcpy(ptr, icc->data(), icc->size());
+
+ jpeg_write_marker(encoderMgr->cinfo(), kICCMarker, markerData->bytes(), markerData->size());
+ }
+
+ if (srcYUVA) {
+ return std::unique_ptr<SkJpegEncoder>(new SkJpegEncoder(std::move(encoderMgr), srcYUVA));
+ }
+ return std::unique_ptr<SkJpegEncoder>(new SkJpegEncoder(std::move(encoderMgr), *src));
+}
+
+SkJpegEncoder::SkJpegEncoder(std::unique_ptr<SkJpegEncoderMgr> encoderMgr, const SkPixmap& src)
+ : INHERITED(src,
+ encoderMgr->proc() ? encoderMgr->cinfo()->input_components * src.width() : 0)
+ , fEncoderMgr(std::move(encoderMgr)) {}
+
+SkJpegEncoder::SkJpegEncoder(std::unique_ptr<SkJpegEncoderMgr> encoderMgr, const SkYUVAPixmaps* src)
+ : INHERITED(src->plane(0), encoderMgr->cinfo()->input_components * src->yuvaInfo().width())
+ , fEncoderMgr(std::move(encoderMgr))
+ , fSrcYUVA(src) {}
+
+SkJpegEncoder::~SkJpegEncoder() {}
+
+bool SkJpegEncoder::onEncodeRows(int numRows) {
+ skjpeg_error_mgr::AutoPushJmpBuf jmp(fEncoderMgr->errorMgr());
+ if (setjmp(jmp)) {
+ return false;
+ }
+
+ if (fSrcYUVA) {
+ // TODO(ccameron): Consider using jpeg_write_raw_data, to avoid having to re-pack the data.
+ for (int i = 0; i < numRows; i++) {
+ yuva_copy_row(fSrcYUVA, fCurrRow + i, fStorage.get());
+ JSAMPLE* jpegSrcRow = fStorage.get();
+ jpeg_write_scanlines(fEncoderMgr->cinfo(), &jpegSrcRow, 1);
+ }
+ } else {
+ const size_t srcBytes = SkColorTypeBytesPerPixel(fSrc.colorType()) * fSrc.width();
+ const size_t jpegSrcBytes = fEncoderMgr->cinfo()->input_components * fSrc.width();
+ const void* srcRow = fSrc.addr(0, fCurrRow);
+ for (int i = 0; i < numRows; i++) {
+ JSAMPLE* jpegSrcRow = (JSAMPLE*)srcRow;
+ if (fEncoderMgr->proc()) {
+ sk_msan_assert_initialized(srcRow, SkTAddOffset<const void>(srcRow, srcBytes));
+ fEncoderMgr->proc()((char*)fStorage.get(),
+ (const char*)srcRow,
+ fSrc.width(),
+ fEncoderMgr->cinfo()->input_components);
+ jpegSrcRow = fStorage.get();
+ sk_msan_assert_initialized(jpegSrcRow,
+ SkTAddOffset<const void>(jpegSrcRow, jpegSrcBytes));
+ } else {
+ // Same as above, but this repetition allows determining whether a
+ // proc was used when msan asserts.
+ sk_msan_assert_initialized(jpegSrcRow,
+ SkTAddOffset<const void>(jpegSrcRow, jpegSrcBytes));
+ }
+
+ jpeg_write_scanlines(fEncoderMgr->cinfo(), &jpegSrcRow, 1);
+ srcRow = SkTAddOffset<const void>(srcRow, fSrc.rowBytes());
+ }
+ }
+
+ fCurrRow += numRows;
+ if (fCurrRow == fSrc.height()) {
+ jpeg_finish_compress(fEncoderMgr->cinfo());
+ }
+
+ return true;
+}
+
+bool SkJpegEncoder::Encode(SkWStream* dst, const SkPixmap& src, const Options& options) {
+ auto encoder = SkJpegEncoder::Make(dst, src, options);
+ return encoder.get() && encoder->encodeRows(src.height());
+}
+
+bool SkJpegEncoder::Encode(SkWStream* dst,
+ const SkYUVAPixmaps& src,
+ const SkColorSpace* srcColorSpace,
+ const Options& options) {
+ auto encoder = SkJpegEncoder::Make(dst, src, srcColorSpace, options);
+ return encoder.get() && encoder->encodeRows(src.yuvaInfo().height());
+}
+
+#endif
diff --git a/gfx/skia/skia/src/encode/SkJpegGainmapEncoder.cpp b/gfx/skia/skia/src/encode/SkJpegGainmapEncoder.cpp
new file mode 100644
index 0000000000..80709def8c
--- /dev/null
+++ b/gfx/skia/skia/src/encode/SkJpegGainmapEncoder.cpp
@@ -0,0 +1,413 @@
+/*
+ * Copyright 2023 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkJpegGainmapEncoder.h"
+
+#ifdef SK_ENCODE_JPEG
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkStream.h"
+#include "include/encode/SkJpegEncoder.h"
+#include "include/private/SkGainmapInfo.h"
+#include "src/codec/SkCodecPriv.h"
+#include "src/codec/SkJpegConstants.h"
+#include "src/codec/SkJpegMultiPicture.h"
+#include "src/codec/SkJpegPriv.h"
+#include "src/codec/SkJpegSegmentScan.h"
+
+#include <vector>
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// XMP helpers
+
+void xmp_write_prefix(SkDynamicMemoryWStream& s, const std::string& ns, const std::string& attrib) {
+ s.writeText(ns.c_str());
+ s.writeText(":");
+ s.writeText(attrib.c_str());
+ s.writeText("=\"");
+}
+
+void xmp_write_suffix(SkDynamicMemoryWStream& s, bool newLine) {
+ s.writeText("\"");
+ if (newLine) {
+ s.writeText("\n");
+ }
+}
+
+void xmp_write_per_channel_attr(SkDynamicMemoryWStream& s,
+ const std::string& ns,
+ const std::string& attrib,
+ SkScalar r,
+ SkScalar g,
+ SkScalar b,
+ bool newLine = true) {
+ xmp_write_prefix(s, ns, attrib);
+ if (r == g && r == b) {
+ s.writeScalarAsText(r);
+ } else {
+ s.writeScalarAsText(r);
+ s.writeText(",");
+ s.writeScalarAsText(g);
+ s.writeText(",");
+ s.writeScalarAsText(b);
+ }
+ xmp_write_suffix(s, newLine);
+}
+
+void xmp_write_scalar_attr(SkDynamicMemoryWStream& s,
+ const std::string& ns,
+ const std::string& attrib,
+ SkScalar value,
+ bool newLine = true) {
+ xmp_write_prefix(s, ns, attrib);
+ s.writeScalarAsText(value);
+ xmp_write_suffix(s, newLine);
+}
+
+void xmp_write_decimal_attr(SkDynamicMemoryWStream& s,
+ const std::string& ns,
+ const std::string& attrib,
+ int32_t value,
+ bool newLine = true) {
+ xmp_write_prefix(s, ns, attrib);
+ s.writeDecAsText(value);
+ xmp_write_suffix(s, newLine);
+}
+
+void xmp_write_string_attr(SkDynamicMemoryWStream& s,
+ const std::string& ns,
+ const std::string& attrib,
+ const std::string& value,
+ bool newLine = true) {
+ xmp_write_prefix(s, ns, attrib);
+ s.writeText(value.c_str());
+ xmp_write_suffix(s, newLine);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// JpegR encoding
+
+bool SkJpegGainmapEncoder::EncodeJpegR(SkWStream* dst,
+ const SkPixmap& base,
+ const SkJpegEncoder::Options& baseOptions,
+ const SkPixmap& gainmap,
+ const SkJpegEncoder::Options& gainmapOptions,
+ const SkGainmapInfo& gainmapInfo) {
+ return EncodeHDRGM(dst, base, baseOptions, gainmap, gainmapOptions, gainmapInfo);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// HDRGM encoding
+
+// Generate the XMP metadata for an HDRGM file.
+sk_sp<SkData> get_hdrgm_xmp_data(const SkGainmapInfo& gainmapInfo) {
+ const float kLog2 = sk_float_log(2.f);
+ SkDynamicMemoryWStream s;
+ s.writeText(
+ "<x:xmpmeta xmlns:x=\"adobe:ns:meta/\" x:xmptk=\"XMP Core 5.5.0\">\n"
+ " <rdf:RDF xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\">\n"
+ " <rdf:Description rdf:about=\"\"\n"
+ " xmlns:hdrgm=\"http://ns.adobe.com/hdr-gain-map/1.0/\"\n");
+ const std::string hdrgmPrefix = " hdrgm";
+ xmp_write_string_attr(s, hdrgmPrefix, "Version", "1.0");
+ xmp_write_per_channel_attr(s,
+ hdrgmPrefix,
+ "GainMapMin",
+ sk_float_log(gainmapInfo.fGainmapRatioMin.fR) / kLog2,
+ sk_float_log(gainmapInfo.fGainmapRatioMin.fG) / kLog2,
+ sk_float_log(gainmapInfo.fGainmapRatioMin.fB) / kLog2);
+ xmp_write_per_channel_attr(s,
+ hdrgmPrefix,
+ "GainMapMax",
+ sk_float_log(gainmapInfo.fGainmapRatioMax.fR) / kLog2,
+ sk_float_log(gainmapInfo.fGainmapRatioMax.fG) / kLog2,
+ sk_float_log(gainmapInfo.fGainmapRatioMax.fB) / kLog2);
+ xmp_write_per_channel_attr(s,
+ hdrgmPrefix,
+ "Gamma",
+ gainmapInfo.fGainmapGamma.fR,
+ gainmapInfo.fGainmapGamma.fG,
+ gainmapInfo.fGainmapGamma.fB);
+ xmp_write_per_channel_attr(s,
+ hdrgmPrefix,
+ "OffsetSDR",
+ gainmapInfo.fEpsilonSdr.fR,
+ gainmapInfo.fEpsilonSdr.fG,
+ gainmapInfo.fEpsilonSdr.fB);
+ xmp_write_per_channel_attr(s,
+ hdrgmPrefix,
+ "OffsetHDR",
+ gainmapInfo.fEpsilonHdr.fR,
+ gainmapInfo.fEpsilonHdr.fG,
+ gainmapInfo.fEpsilonHdr.fB);
+ xmp_write_scalar_attr(
+ s, hdrgmPrefix, "HDRCapacityMin", sk_float_log(gainmapInfo.fDisplayRatioSdr) / kLog2);
+ xmp_write_scalar_attr(
+ s, hdrgmPrefix, "HDRCapacityMax", sk_float_log(gainmapInfo.fDisplayRatioHdr) / kLog2);
+ switch (gainmapInfo.fBaseImageType) {
+ case SkGainmapInfo::BaseImageType::kSDR:
+ xmp_write_string_attr(s, hdrgmPrefix, "BaseRendition", "SDR", /*newLine=*/false);
+ break;
+ case SkGainmapInfo::BaseImageType::kHDR:
+ xmp_write_string_attr(s, hdrgmPrefix, "BaseRendition", "HDR", /*newLine=*/false);
+ break;
+ }
+ s.writeText(
+ "/>\n"
+ " </rdf:RDF>\n"
+ "</x:xmpmeta>");
+ return s.detachAsData();
+}
+
+// Generate the GContainer metadata for an image with a JPEG gainmap.
+static sk_sp<SkData> get_gcontainer_xmp_data(size_t gainmapItemLength) {
+ SkDynamicMemoryWStream s;
+ s.writeText(
+ "<x:xmpmeta xmlns:x=\"adobe:ns:meta/\" x:xmptk=\"Adobe XMP Core 5.1.2\">\n"
+ " <rdf:RDF xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\">\n"
+ " <rdf:Description\n"
+ " xmlns:Container=\"http://ns.google.com/photos/1.0/container/\"\n"
+ " xmlns:Item=\"http://ns.google.com/photos/1.0/container/item/\">\n"
+ " <Container:Directory>\n"
+ " <rdf:Seq>\n"
+ " <rdf:li>\n"
+ " <Container:Item\n"
+ " Item:Semantic=\"Primary\"\n"
+ " Item:Mime=\"image/jpeg\"/>\n"
+ " </rdf:li>\n"
+ " <rdf:li>\n"
+ " <Container:Item\n"
+ " Item:Semantic=\"RecoveryMap\"\n"
+ " Item:Mime=\"image/jpeg\"\n"
+ " ");
+ xmp_write_decimal_attr(s, "Item", "Length", gainmapItemLength, /*newLine=*/false);
+ s.writeText(
+ "/>\n"
+ " </rdf:li>\n"
+ " </rdf:Seq>\n"
+ " </Container:Directory>\n"
+ " </rdf:Description>\n"
+ " </rdf:RDF>\n"
+ "</x:xmpmeta>\n");
+ return s.detachAsData();
+}
+
+// Split an SkData into segments.
+std::vector<sk_sp<SkData>> get_hdrgm_image_segments(sk_sp<SkData> image,
+ size_t segmentMaxDataSize) {
+ // Compute the total size of the header to a gainmap image segment (not including the 2 bytes
+ // for the segment size, which the encoder is responsible for writing).
+ constexpr size_t kGainmapHeaderSize = sizeof(kGainmapSig) + 2 * kGainmapMarkerIndexSize;
+
+ // Compute the payload size for each segment.
+ const size_t kGainmapPayloadSize = segmentMaxDataSize - kGainmapHeaderSize;
+
+ // Compute the number of segments we'll need.
+ const size_t segmentCount = (image->size() + kGainmapPayloadSize - 1) / kGainmapPayloadSize;
+ std::vector<sk_sp<SkData>> result;
+ result.reserve(segmentCount);
+
+ // Move |imageData| through |image| until it hits |imageDataEnd|.
+ const uint8_t* imageData = image->bytes();
+ const uint8_t* imageDataEnd = image->bytes() + image->size();
+ while (imageData < imageDataEnd) {
+ SkDynamicMemoryWStream segmentStream;
+
+ // Write the signature.
+ segmentStream.write(kGainmapSig, sizeof(kGainmapSig));
+
+ // Write the segment index as big-endian.
+ size_t segmentIndex = result.size() + 1;
+ uint8_t segmentIndexBytes[2] = {
+ static_cast<uint8_t>(segmentIndex / 256u),
+ static_cast<uint8_t>(segmentIndex % 256u),
+ };
+ segmentStream.write(segmentIndexBytes, sizeof(segmentIndexBytes));
+
+ // Write the segment count as big-endian.
+ uint8_t segmentCountBytes[2] = {
+ static_cast<uint8_t>(segmentCount / 256u),
+ static_cast<uint8_t>(segmentCount % 256u),
+ };
+ segmentStream.write(segmentCountBytes, sizeof(segmentCountBytes));
+
+ // Verify that our header size math is correct.
+ SkASSERT(segmentStream.bytesWritten() == kGainmapHeaderSize);
+
+ // Write the rest of the segment.
+ size_t bytesToWrite =
+ std::min(imageDataEnd - imageData, static_cast<intptr_t>(kGainmapPayloadSize));
+ segmentStream.write(imageData, bytesToWrite);
+ imageData += bytesToWrite;
+
+ // Verify that our data size math is correct.
+ if (segmentIndex == segmentCount) {
+ SkASSERT(segmentStream.bytesWritten() <= segmentMaxDataSize);
+ } else {
+ SkASSERT(segmentStream.bytesWritten() == segmentMaxDataSize);
+ }
+ result.push_back(segmentStream.detachAsData());
+ }
+
+ // Verify that our segment count math was correct.
+ SkASSERT(imageData == imageDataEnd);
+ SkASSERT(result.size() == segmentCount);
+ return result;
+}
+
+static sk_sp<SkData> encode_to_data(const SkPixmap& pm,
+ const SkJpegEncoder::Options& options,
+ SkData* xmpMetadata) {
+ SkJpegEncoder::Options optionsWithXmp = options;
+ optionsWithXmp.xmpMetadata = xmpMetadata;
+ SkDynamicMemoryWStream encodeStream;
+ auto encoder = SkJpegEncoder::Make(&encodeStream, pm, optionsWithXmp);
+ if (!encoder || !encoder->encodeRows(pm.height())) {
+ return nullptr;
+ }
+ return encodeStream.detachAsData();
+}
+
+static sk_sp<SkData> get_mpf_segment(const SkJpegMultiPictureParameters& mpParams) {
+ SkDynamicMemoryWStream s;
+ auto segmentParameters = mpParams.serialize();
+ const size_t mpParameterLength = kJpegSegmentParameterLengthSize + segmentParameters->size();
+ s.write8(0xFF);
+ s.write8(kMpfMarker);
+ s.write8(mpParameterLength / 256);
+ s.write8(mpParameterLength % 256);
+ s.write(segmentParameters->data(), segmentParameters->size());
+ return s.detachAsData();
+}
+
+bool SkJpegGainmapEncoder::EncodeHDRGM(SkWStream* dst,
+ const SkPixmap& base,
+ const SkJpegEncoder::Options& baseOptions,
+ const SkPixmap& gainmap,
+ const SkJpegEncoder::Options& gainmapOptions,
+ const SkGainmapInfo& gainmapInfo) {
+ // Encode the gainmap image with the HDRGM XMP metadata.
+ sk_sp<SkData> gainmapData;
+ {
+ // We will include the HDRGM XMP metadata in the gainmap image.
+ auto hdrgmXmp = get_hdrgm_xmp_data(gainmapInfo);
+ gainmapData = encode_to_data(gainmap, gainmapOptions, hdrgmXmp.get());
+ if (!gainmapData) {
+ SkCodecPrintf("Failed to encode gainmap image.\n");
+ return false;
+ }
+ }
+
+ // Encode the base image with the Container XMP metadata.
+ sk_sp<SkData> baseData;
+ {
+ auto containerXmp = get_gcontainer_xmp_data(static_cast<int32_t>(gainmapData->size()));
+ baseData = encode_to_data(base, baseOptions, containerXmp.get());
+ if (!baseData) {
+ SkCodecPrintf("Failed to encode base image.\n");
+ return false;
+ }
+ }
+
+ // Combine them into an MPF.
+ const SkData* images[] = {
+ baseData.get(),
+ gainmapData.get(),
+ };
+ return MakeMPF(dst, images, 2);
+}
+
+bool SkJpegGainmapEncoder::MakeMPF(SkWStream* dst, const SkData** images, size_t imageCount) {
+ if (imageCount < 1) {
+ return true;
+ }
+
+ // Create a scan of the primary image.
+ SkJpegSegmentScanner primaryScan;
+ primaryScan.onBytes(images[0]->data(), images[0]->size());
+ if (!primaryScan.isDone()) {
+ SkCodecPrintf("Failed to scan encoded primary image header.\n");
+ return false;
+ }
+
+ // Copy the primary image up to its StartOfScan, then insert the MPF segment, then copy the rest
+ // of the primary image, and all other images.
+ size_t bytesRead = 0;
+ size_t bytesWritten = 0;
+ for (const auto& segment : primaryScan.getSegments()) {
+ // Write all ECD before this segment.
+ {
+ size_t ecdBytesToWrite = segment.offset - bytesRead;
+ if (!dst->write(images[0]->bytes() + bytesRead, ecdBytesToWrite)) {
+ SkCodecPrintf("Failed to write entropy coded data.\n");
+ return false;
+ }
+ bytesWritten += ecdBytesToWrite;
+ bytesRead = segment.offset;
+ }
+
+ // If this isn't a StartOfScan, write just the segment.
+ if (segment.marker != kJpegMarkerStartOfScan) {
+ const size_t bytesToWrite = kJpegMarkerCodeSize + segment.parameterLength;
+ if (!dst->write(images[0]->bytes() + bytesRead, bytesToWrite)) {
+ SkCodecPrintf("Failed to copy segment.\n");
+ return false;
+ }
+ bytesWritten += bytesToWrite;
+ bytesRead += bytesToWrite;
+ continue;
+ }
+
+ // We're now at the StartOfScan.
+ const size_t bytesRemaining = images[0]->size() - bytesRead;
+
+ // Compute the MPF offsets for the images.
+ SkJpegMultiPictureParameters mpParams;
+ {
+ mpParams.images.resize(imageCount);
+ const size_t mpSegmentSize = kJpegMarkerCodeSize + kJpegSegmentParameterLengthSize +
+ mpParams.serialize()->size();
+ mpParams.images[0].size =
+ static_cast<uint32_t>(bytesWritten + mpSegmentSize + bytesRemaining);
+ uint32_t offset =
+ static_cast<uint32_t>(bytesRemaining + mpSegmentSize - kJpegMarkerCodeSize -
+ kJpegSegmentParameterLengthSize - sizeof(kMpfSig));
+ for (size_t i = 0; i < imageCount; ++i) {
+ mpParams.images[i].dataOffset = offset;
+ mpParams.images[i].size = static_cast<uint32_t>(images[i]->size());
+ offset += mpParams.images[i].size;
+ }
+ }
+
+ // Write the MPF segment.
+ auto mpfSegment = get_mpf_segment(mpParams);
+ if (!dst->write(mpfSegment->data(), mpfSegment->size())) {
+ SkCodecPrintf("Failed to write MPF segment.\n");
+ return false;
+ }
+
+ // Write the rest of the primary file.
+ if (!dst->write(images[0]->bytes() + bytesRead, bytesRemaining)) {
+ SkCodecPrintf("Failed to write remainder of primary image.\n");
+ return false;
+ }
+ bytesRead += bytesRemaining;
+ SkASSERT(bytesRead == images[0]->size());
+ break;
+ }
+
+ // Write the remaining files.
+ for (size_t i = 1; i < imageCount; ++i) {
+ if (!dst->write(images[i]->data(), images[i]->size())) {
+ SkCodecPrintf("Failed to write auxiliary image.\n");
+ }
+ }
+ return true;
+}
+
+#endif // SK_ENCODE_JPEG
diff --git a/gfx/skia/skia/src/encode/SkPngEncoder.cpp b/gfx/skia/skia/src/encode/SkPngEncoder.cpp
new file mode 100644
index 0000000000..55ca9f5239
--- /dev/null
+++ b/gfx/skia/skia/src/encode/SkPngEncoder.cpp
@@ -0,0 +1,493 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+
+#ifdef SK_ENCODE_PNG
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkData.h"
+#include "include/core/SkDataTable.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/encode/SkEncoder.h"
+#include "include/encode/SkPngEncoder.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkNoncopyable.h"
+#include "include/private/base/SkTemplates.h"
+#include "modules/skcms/skcms.h"
+#include "src/base/SkMSAN.h"
+#include "src/codec/SkPngPriv.h"
+#include "src/encode/SkImageEncoderFns.h"
+#include "src/encode/SkImageEncoderPriv.h"
+
+#include <algorithm>
+#include <csetjmp>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <png.h>
+#include <pngconf.h>
+
+static_assert(PNG_FILTER_NONE == (int)SkPngEncoder::FilterFlag::kNone, "Skia libpng filter err.");
+static_assert(PNG_FILTER_SUB == (int)SkPngEncoder::FilterFlag::kSub, "Skia libpng filter err.");
+static_assert(PNG_FILTER_UP == (int)SkPngEncoder::FilterFlag::kUp, "Skia libpng filter err.");
+static_assert(PNG_FILTER_AVG == (int)SkPngEncoder::FilterFlag::kAvg, "Skia libpng filter err.");
+static_assert(PNG_FILTER_PAETH == (int)SkPngEncoder::FilterFlag::kPaeth, "Skia libpng filter err.");
+static_assert(PNG_ALL_FILTERS == (int)SkPngEncoder::FilterFlag::kAll, "Skia libpng filter err.");
+
+static constexpr bool kSuppressPngEncodeWarnings = true;
+
+static void sk_error_fn(png_structp png_ptr, png_const_charp msg) {
+ if (!kSuppressPngEncodeWarnings) {
+ SkDebugf("libpng encode error: %s\n", msg);
+ }
+
+ longjmp(png_jmpbuf(png_ptr), 1);
+}
+
+static void sk_write_fn(png_structp png_ptr, png_bytep data, png_size_t len) {
+ SkWStream* stream = (SkWStream*)png_get_io_ptr(png_ptr);
+ if (!stream->write(data, len)) {
+ png_error(png_ptr, "sk_write_fn cannot write to stream");
+ }
+}
+
+class SkPngEncoderMgr final : SkNoncopyable {
+public:
+
+ /*
+ * Create the decode manager
+ * Does not take ownership of stream
+ */
+ static std::unique_ptr<SkPngEncoderMgr> Make(SkWStream* stream);
+
+ bool setHeader(const SkImageInfo& srcInfo, const SkPngEncoder::Options& options);
+ bool setColorSpace(const SkImageInfo& info, const SkPngEncoder::Options& options);
+ bool writeInfo(const SkImageInfo& srcInfo);
+ void chooseProc(const SkImageInfo& srcInfo);
+
+ png_structp pngPtr() { return fPngPtr; }
+ png_infop infoPtr() { return fInfoPtr; }
+ int pngBytesPerPixel() const { return fPngBytesPerPixel; }
+ transform_scanline_proc proc() const { return fProc; }
+
+ ~SkPngEncoderMgr() {
+ png_destroy_write_struct(&fPngPtr, &fInfoPtr);
+ }
+
+private:
+
+ SkPngEncoderMgr(png_structp pngPtr, png_infop infoPtr)
+ : fPngPtr(pngPtr)
+ , fInfoPtr(infoPtr)
+ {}
+
+ png_structp fPngPtr;
+ png_infop fInfoPtr;
+ int fPngBytesPerPixel;
+ transform_scanline_proc fProc;
+};
+
+std::unique_ptr<SkPngEncoderMgr> SkPngEncoderMgr::Make(SkWStream* stream) {
+ png_structp pngPtr =
+ png_create_write_struct(PNG_LIBPNG_VER_STRING, nullptr, sk_error_fn, nullptr);
+ if (!pngPtr) {
+ return nullptr;
+ }
+
+ png_infop infoPtr = png_create_info_struct(pngPtr);
+ if (!infoPtr) {
+ png_destroy_write_struct(&pngPtr, nullptr);
+ return nullptr;
+ }
+
+ png_set_write_fn(pngPtr, (void*)stream, sk_write_fn, nullptr);
+ return std::unique_ptr<SkPngEncoderMgr>(new SkPngEncoderMgr(pngPtr, infoPtr));
+}
+
+bool SkPngEncoderMgr::setHeader(const SkImageInfo& srcInfo, const SkPngEncoder::Options& options) {
+ if (setjmp(png_jmpbuf(fPngPtr))) {
+ return false;
+ }
+
+ int pngColorType;
+ png_color_8 sigBit;
+ int bitDepth = 8;
+ switch (srcInfo.colorType()) {
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType:
+ case kRGBA_F32_SkColorType:
+ sigBit.red = 16;
+ sigBit.green = 16;
+ sigBit.blue = 16;
+ sigBit.alpha = 16;
+ bitDepth = 16;
+ pngColorType = srcInfo.isOpaque() ? PNG_COLOR_TYPE_RGB : PNG_COLOR_TYPE_RGB_ALPHA;
+ fPngBytesPerPixel = 8;
+ break;
+ case kGray_8_SkColorType:
+ sigBit.gray = 8;
+ pngColorType = PNG_COLOR_TYPE_GRAY;
+ fPngBytesPerPixel = 1;
+ SkASSERT(srcInfo.isOpaque());
+ break;
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ sigBit.red = 8;
+ sigBit.green = 8;
+ sigBit.blue = 8;
+ sigBit.alpha = 8;
+ pngColorType = srcInfo.isOpaque() ? PNG_COLOR_TYPE_RGB : PNG_COLOR_TYPE_RGB_ALPHA;
+ fPngBytesPerPixel = srcInfo.isOpaque() ? 3 : 4;
+ break;
+ case kRGB_888x_SkColorType:
+ sigBit.red = 8;
+ sigBit.green = 8;
+ sigBit.blue = 8;
+ pngColorType = PNG_COLOR_TYPE_RGB;
+ fPngBytesPerPixel = 3;
+ SkASSERT(srcInfo.isOpaque());
+ break;
+ case kARGB_4444_SkColorType:
+ if (kUnpremul_SkAlphaType == srcInfo.alphaType()) {
+ return false;
+ }
+
+ sigBit.red = 4;
+ sigBit.green = 4;
+ sigBit.blue = 4;
+ sigBit.alpha = 4;
+ pngColorType = srcInfo.isOpaque() ? PNG_COLOR_TYPE_RGB : PNG_COLOR_TYPE_RGB_ALPHA;
+ fPngBytesPerPixel = srcInfo.isOpaque() ? 3 : 4;
+ break;
+ case kRGB_565_SkColorType:
+ sigBit.red = 5;
+ sigBit.green = 6;
+ sigBit.blue = 5;
+ pngColorType = PNG_COLOR_TYPE_RGB;
+ fPngBytesPerPixel = 3;
+ SkASSERT(srcInfo.isOpaque());
+ break;
+ case kAlpha_8_SkColorType: // store as gray+alpha, but ignore gray
+ sigBit.gray = kGraySigBit_GrayAlphaIsJustAlpha;
+ sigBit.alpha = 8;
+ pngColorType = PNG_COLOR_TYPE_GRAY_ALPHA;
+ fPngBytesPerPixel = 2;
+ break;
+ case kRGBA_1010102_SkColorType:
+ bitDepth = 16;
+ sigBit.red = 10;
+ sigBit.green = 10;
+ sigBit.blue = 10;
+ sigBit.alpha = 2;
+ pngColorType = srcInfo.isOpaque() ? PNG_COLOR_TYPE_RGB : PNG_COLOR_TYPE_RGB_ALPHA;
+ fPngBytesPerPixel = 8;
+ break;
+ case kRGB_101010x_SkColorType:
+ bitDepth = 16;
+ sigBit.red = 10;
+ sigBit.green = 10;
+ sigBit.blue = 10;
+ pngColorType = PNG_COLOR_TYPE_RGB;
+ fPngBytesPerPixel = 6;
+ break;
+ default:
+ return false;
+ }
+
+ png_set_IHDR(fPngPtr, fInfoPtr, srcInfo.width(), srcInfo.height(),
+ bitDepth, pngColorType,
+ PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_BASE,
+ PNG_FILTER_TYPE_BASE);
+ png_set_sBIT(fPngPtr, fInfoPtr, &sigBit);
+
+ int filters = (int)options.fFilterFlags & (int)SkPngEncoder::FilterFlag::kAll;
+ SkASSERT(filters == (int)options.fFilterFlags);
+ png_set_filter(fPngPtr, PNG_FILTER_TYPE_BASE, filters);
+
+ int zlibLevel = std::min(std::max(0, options.fZLibLevel), 9);
+ SkASSERT(zlibLevel == options.fZLibLevel);
+ png_set_compression_level(fPngPtr, zlibLevel);
+
+ // Set comments in tEXt chunk
+ const sk_sp<SkDataTable>& comments = options.fComments;
+ if (comments != nullptr) {
+ std::vector<png_text> png_texts(comments->count());
+ std::vector<SkString> clippedKeys;
+ for (int i = 0; i < comments->count() / 2; ++i) {
+ const char* keyword;
+ const char* originalKeyword = comments->atStr(2 * i);
+ const char* text = comments->atStr(2 * i + 1);
+ if (strlen(originalKeyword) <= PNG_KEYWORD_MAX_LENGTH) {
+ keyword = originalKeyword;
+ } else {
+ SkDEBUGFAILF("PNG tEXt keyword should be no longer than %d.",
+ PNG_KEYWORD_MAX_LENGTH);
+ clippedKeys.emplace_back(originalKeyword, PNG_KEYWORD_MAX_LENGTH);
+ keyword = clippedKeys.back().c_str();
+ }
+ // It seems safe to convert png_const_charp to png_charp for key/text,
+ // and we don't have to provide text_length and other fields as we're providing
+ // 0-terminated c_str with PNG_TEXT_COMPRESSION_NONE (no compression, no itxt).
+ png_texts[i].compression = PNG_TEXT_COMPRESSION_NONE;
+ png_texts[i].key = (png_charp)keyword;
+ png_texts[i].text = (png_charp)text;
+ }
+ png_set_text(fPngPtr, fInfoPtr, png_texts.data(), png_texts.size());
+ }
+
+ return true;
+}
+
+static transform_scanline_proc choose_proc(const SkImageInfo& info) {
+ switch (info.colorType()) {
+ case kUnknown_SkColorType:
+ break;
+
+ // TODO: I don't think this can just use kRGBA's procs.
+ // kPremul is especially tricky here, since it's presumably TF⁻¹(rgb * a),
+ // so to get at unpremul rgb we'd need to undo the transfer function first.
+ case kSRGBA_8888_SkColorType: return nullptr;
+
+ case kRGBA_8888_SkColorType:
+ switch (info.alphaType()) {
+ case kOpaque_SkAlphaType:
+ return transform_scanline_RGBX;
+ case kUnpremul_SkAlphaType:
+ return transform_scanline_memcpy;
+ case kPremul_SkAlphaType:
+ return transform_scanline_rgbA;
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+ case kBGRA_8888_SkColorType:
+ switch (info.alphaType()) {
+ case kOpaque_SkAlphaType:
+ return transform_scanline_BGRX;
+ case kUnpremul_SkAlphaType:
+ return transform_scanline_BGRA;
+ case kPremul_SkAlphaType:
+ return transform_scanline_bgrA;
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+ case kRGB_565_SkColorType:
+ return transform_scanline_565;
+ case kRGB_888x_SkColorType:
+ return transform_scanline_RGBX;
+ case kARGB_4444_SkColorType:
+ switch (info.alphaType()) {
+ case kOpaque_SkAlphaType:
+ return transform_scanline_444;
+ case kPremul_SkAlphaType:
+ return transform_scanline_4444;
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+ case kGray_8_SkColorType:
+ return transform_scanline_memcpy;
+
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType:
+ switch (info.alphaType()) {
+ case kOpaque_SkAlphaType:
+ case kUnpremul_SkAlphaType:
+ return transform_scanline_F16;
+ case kPremul_SkAlphaType:
+ return transform_scanline_F16_premul;
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+ case kRGBA_F32_SkColorType:
+ switch (info.alphaType()) {
+ case kOpaque_SkAlphaType:
+ case kUnpremul_SkAlphaType:
+ return transform_scanline_F32;
+ case kPremul_SkAlphaType:
+ return transform_scanline_F32_premul;
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+ case kRGBA_1010102_SkColorType:
+ switch (info.alphaType()) {
+ case kOpaque_SkAlphaType:
+ case kUnpremul_SkAlphaType:
+ return transform_scanline_1010102;
+ case kPremul_SkAlphaType:
+ return transform_scanline_1010102_premul;
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+ case kBGRA_1010102_SkColorType:
+ switch (info.alphaType()) {
+ case kOpaque_SkAlphaType:
+ case kUnpremul_SkAlphaType:
+ return transform_scanline_bgra_1010102;
+ case kPremul_SkAlphaType:
+ return transform_scanline_bgra_1010102_premul;
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+ case kRGB_101010x_SkColorType: return transform_scanline_101010x;
+ case kBGR_101010x_SkColorType: return transform_scanline_bgr_101010x;
+ case kBGR_101010x_XR_SkColorType: SkASSERT(false); return nullptr;
+
+ case kAlpha_8_SkColorType:
+ return transform_scanline_A8_to_GrayAlpha;
+ case kR8G8_unorm_SkColorType:
+ case kR16G16_unorm_SkColorType:
+ case kR16G16_float_SkColorType:
+ case kA16_unorm_SkColorType:
+ case kA16_float_SkColorType:
+ case kR16G16B16A16_unorm_SkColorType:
+ case kR8_unorm_SkColorType:
+ return nullptr;
+ }
+ SkASSERT(false);
+ return nullptr;
+}
+
+static void set_icc(png_structp png_ptr,
+ png_infop info_ptr,
+ const SkImageInfo& info,
+ const skcms_ICCProfile* profile,
+ const char* profile_description) {
+ sk_sp<SkData> icc = icc_from_color_space(info, profile, profile_description);
+ if (!icc) {
+ return;
+ }
+
+#if PNG_LIBPNG_VER_MAJOR > 1 || (PNG_LIBPNG_VER_MAJOR == 1 && PNG_LIBPNG_VER_MINOR >= 5)
+ const char* name = "Skia";
+ png_const_bytep iccPtr = icc->bytes();
+#else
+ SkString str("Skia");
+ char* name = str.data();
+ png_charp iccPtr = (png_charp) icc->writable_data();
+#endif
+ png_set_iCCP(png_ptr, info_ptr, name, 0, iccPtr, icc->size());
+}
+
+bool SkPngEncoderMgr::setColorSpace(const SkImageInfo& info, const SkPngEncoder::Options& options) {
+ if (setjmp(png_jmpbuf(fPngPtr))) {
+ return false;
+ }
+
+ if (info.colorSpace() && info.colorSpace()->isSRGB()) {
+ png_set_sRGB(fPngPtr, fInfoPtr, PNG_sRGB_INTENT_PERCEPTUAL);
+ } else {
+ set_icc(fPngPtr, fInfoPtr, info, options.fICCProfile, options.fICCProfileDescription);
+ }
+
+ return true;
+}
+
+bool SkPngEncoderMgr::writeInfo(const SkImageInfo& srcInfo) {
+ if (setjmp(png_jmpbuf(fPngPtr))) {
+ return false;
+ }
+
+ png_write_info(fPngPtr, fInfoPtr);
+ if (kRGBA_F16_SkColorType == srcInfo.colorType() &&
+ kOpaque_SkAlphaType == srcInfo.alphaType())
+ {
+ // For kOpaque, kRGBA_F16, we will keep the row as RGBA and tell libpng
+ // to skip the alpha channel.
+ png_set_filler(fPngPtr, 0, PNG_FILLER_AFTER);
+ }
+
+ return true;
+}
+
+void SkPngEncoderMgr::chooseProc(const SkImageInfo& srcInfo) {
+ fProc = choose_proc(srcInfo);
+}
+
+std::unique_ptr<SkEncoder> SkPngEncoder::Make(SkWStream* dst, const SkPixmap& src,
+ const Options& options) {
+ if (!SkPixmapIsValid(src)) {
+ return nullptr;
+ }
+
+ std::unique_ptr<SkPngEncoderMgr> encoderMgr = SkPngEncoderMgr::Make(dst);
+ if (!encoderMgr) {
+ return nullptr;
+ }
+
+ if (!encoderMgr->setHeader(src.info(), options)) {
+ return nullptr;
+ }
+
+ if (!encoderMgr->setColorSpace(src.info(), options)) {
+ return nullptr;
+ }
+
+ if (!encoderMgr->writeInfo(src.info())) {
+ return nullptr;
+ }
+
+ encoderMgr->chooseProc(src.info());
+
+ return std::unique_ptr<SkPngEncoder>(new SkPngEncoder(std::move(encoderMgr), src));
+}
+
+SkPngEncoder::SkPngEncoder(std::unique_ptr<SkPngEncoderMgr> encoderMgr, const SkPixmap& src)
+ : INHERITED(src, encoderMgr->pngBytesPerPixel() * src.width())
+ , fEncoderMgr(std::move(encoderMgr))
+{}
+
+SkPngEncoder::~SkPngEncoder() {}
+
+bool SkPngEncoder::onEncodeRows(int numRows) {
+ if (setjmp(png_jmpbuf(fEncoderMgr->pngPtr()))) {
+ return false;
+ }
+
+ const void* srcRow = fSrc.addr(0, fCurrRow);
+ for (int y = 0; y < numRows; y++) {
+ sk_msan_assert_initialized(srcRow,
+ (const uint8_t*)srcRow + (fSrc.width() << fSrc.shiftPerPixel()));
+ fEncoderMgr->proc()((char*)fStorage.get(),
+ (const char*)srcRow,
+ fSrc.width(),
+ SkColorTypeBytesPerPixel(fSrc.colorType()));
+
+ png_bytep rowPtr = (png_bytep) fStorage.get();
+ png_write_rows(fEncoderMgr->pngPtr(), &rowPtr, 1);
+ srcRow = SkTAddOffset<const void>(srcRow, fSrc.rowBytes());
+ }
+
+ fCurrRow += numRows;
+ if (fCurrRow == fSrc.height()) {
+ png_write_end(fEncoderMgr->pngPtr(), fEncoderMgr->infoPtr());
+ }
+
+ return true;
+}
+
+bool SkPngEncoder::Encode(SkWStream* dst, const SkPixmap& src, const Options& options) {
+ auto encoder = SkPngEncoder::Make(dst, src, options);
+ return encoder.get() && encoder->encodeRows(src.height());
+}
+
+#endif
diff --git a/gfx/skia/skia/src/encode/SkWebpEncoder.cpp b/gfx/skia/skia/src/encode/SkWebpEncoder.cpp
new file mode 100644
index 0000000000..2189b807a4
--- /dev/null
+++ b/gfx/skia/skia/src/encode/SkWebpEncoder.cpp
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+
+#ifdef SK_ENCODE_WEBP
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSpan.h"
+#include "include/core/SkStream.h"
+#include "include/encode/SkEncoder.h"
+#include "include/encode/SkWebpEncoder.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/core/SkImageInfoPriv.h"
+#include "src/encode/SkImageEncoderFns.h"
+#include "src/encode/SkImageEncoderPriv.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+
+// A WebP encoder only, on top of (subset of) libwebp
+// For more information on WebP image format, and libwebp library, see:
+// http://code.google.com/speed/webp/
+// http://www.webmproject.org/code/#libwebp_webp_image_decoder_library
+// http://review.webmproject.org/gitweb?p=libwebp.git
+
+extern "C" {
+// If moving libwebp out of skia source tree, path for webp headers must be
+// updated accordingly. Here, we enforce using local copy in webp sub-directory.
+#include "webp/encode.h"
+#include "webp/mux.h"
+#include "webp/mux_types.h"
+}
+
+static int stream_writer(const uint8_t* data, size_t data_size,
+ const WebPPicture* const picture) {
+ SkWStream* const stream = (SkWStream*)picture->custom_ptr;
+ return stream->write(data, data_size) ? 1 : 0;
+}
+
+using WebPPictureImportProc = int (*) (WebPPicture* picture, const uint8_t* pixels, int stride);
+
+static bool preprocess_webp_picture(WebPPicture* pic,
+ WebPConfig* webp_config,
+ const SkPixmap& pixmap,
+ const SkWebpEncoder::Options& opts) {
+ if (!SkPixmapIsValid(pixmap)) {
+ return false;
+ }
+
+ if (SkColorTypeIsAlphaOnly(pixmap.colorType())) {
+ // Maintain the existing behavior of not supporting encoding alpha-only images.
+ // TODO: Support encoding alpha only to an image with alpha but no color?
+ return false;
+ }
+
+ if (nullptr == pixmap.addr()) {
+ return false;
+ }
+
+ pic->width = pixmap.width();
+ pic->height = pixmap.height();
+
+ // Set compression, method, and pixel format.
+ // libwebp recommends using BGRA for lossless and YUV for lossy.
+ // The choices of |webp_config.method| currently just match Chrome's defaults. We
+ // could potentially expose this decision to the client.
+ if (SkWebpEncoder::Compression::kLossy == opts.fCompression) {
+ webp_config->lossless = 0;
+#ifndef SK_WEBP_ENCODER_USE_DEFAULT_METHOD
+ webp_config->method = 3;
+#endif
+ pic->use_argb = 0;
+ } else {
+ webp_config->lossless = 1;
+ webp_config->method = 0;
+ pic->use_argb = 1;
+ }
+
+ {
+ const SkColorType ct = pixmap.colorType();
+ const bool premul = pixmap.alphaType() == kPremul_SkAlphaType;
+
+ SkBitmap tmpBm;
+ WebPPictureImportProc importProc = nullptr;
+ const SkPixmap* src = &pixmap;
+ if (ct == kRGB_888x_SkColorType) {
+ importProc = WebPPictureImportRGBX;
+ } else if (!premul && ct == kRGBA_8888_SkColorType) {
+ importProc = WebPPictureImportRGBA;
+ }
+#ifdef WebPPictureImportBGRA
+ else if (!premul && ct == kBGRA_8888_SkColorType) {
+ importProc = WebPPictureImportBGRA;
+ }
+#endif
+ else {
+ importProc = WebPPictureImportRGBA;
+ auto info = pixmap.info()
+ .makeColorType(kRGBA_8888_SkColorType)
+ .makeAlphaType(kUnpremul_SkAlphaType);
+ if (!tmpBm.tryAllocPixels(info) ||
+ !pixmap.readPixels(tmpBm.info(), tmpBm.getPixels(), tmpBm.rowBytes())) {
+ return false;
+ }
+ src = &tmpBm.pixmap();
+ }
+
+ if (!importProc(pic, reinterpret_cast<const uint8_t*>(src->addr()), src->rowBytes())) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool SkWebpEncoder::Encode(SkWStream* stream, const SkPixmap& pixmap, const Options& opts) {
+ if (!stream) {
+ return false;
+ }
+
+ WebPConfig webp_config;
+ if (!WebPConfigPreset(&webp_config, WEBP_PRESET_DEFAULT, opts.fQuality)) {
+ return false;
+ }
+
+ WebPPicture pic;
+ WebPPictureInit(&pic);
+ SkAutoTCallVProc<WebPPicture, WebPPictureFree> autoPic(&pic);
+
+ if (!preprocess_webp_picture(&pic, &webp_config, pixmap, opts)) {
+ return false;
+ }
+
+ // If there is no need to embed an ICC profile, we write directly to the input stream.
+ // Otherwise, we will first encode to |tmp| and use a mux to add the ICC chunk. libwebp
+ // forces us to have an encoded image before we can add a profile.
+ sk_sp<SkData> icc =
+ icc_from_color_space(pixmap.info(), opts.fICCProfile, opts.fICCProfileDescription);
+ SkDynamicMemoryWStream tmp;
+ pic.custom_ptr = icc ? (void*)&tmp : (void*)stream;
+ pic.writer = stream_writer;
+
+ if (!WebPEncode(&webp_config, &pic)) {
+ return false;
+ }
+
+ if (icc) {
+ sk_sp<SkData> encodedData = tmp.detachAsData();
+ WebPData encoded = { encodedData->bytes(), encodedData->size() };
+ WebPData iccChunk = { icc->bytes(), icc->size() };
+
+ SkAutoTCallVProc<WebPMux, WebPMuxDelete> mux(WebPMuxNew());
+ if (WEBP_MUX_OK != WebPMuxSetImage(mux, &encoded, 0)) {
+ return false;
+ }
+
+ if (WEBP_MUX_OK != WebPMuxSetChunk(mux, "ICCP", &iccChunk, 0)) {
+ return false;
+ }
+
+ WebPData assembled;
+ if (WEBP_MUX_OK != WebPMuxAssemble(mux, &assembled)) {
+ return false;
+ }
+
+ stream->write(assembled.bytes, assembled.size);
+ WebPDataClear(&assembled);
+ }
+
+ return true;
+}
+
+bool SkWebpEncoder::EncodeAnimated(SkWStream* stream,
+ SkSpan<const SkEncoder::Frame> frames,
+ const Options& opts) {
+ if (!stream || !frames.size()) {
+ return false;
+ }
+
+ const int canvasWidth = frames.front().pixmap.width();
+ const int canvasHeight = frames.front().pixmap.height();
+ int timestamp = 0;
+
+ std::unique_ptr<WebPAnimEncoder, void (*)(WebPAnimEncoder*)> enc(
+ WebPAnimEncoderNew(canvasWidth, canvasHeight, nullptr), WebPAnimEncoderDelete);
+ if (!enc) {
+ return false;
+ }
+
+ for (const auto& frame : frames) {
+ const auto& pixmap = frame.pixmap;
+
+ if (pixmap.width() != canvasWidth || pixmap.height() != canvasHeight) {
+ return false;
+ }
+
+ WebPConfig webp_config;
+ if (!WebPConfigPreset(&webp_config, WEBP_PRESET_DEFAULT, opts.fQuality)) {
+ return false;
+ }
+
+ WebPPicture pic;
+ WebPPictureInit(&pic);
+ SkAutoTCallVProc<WebPPicture, WebPPictureFree> autoPic(&pic);
+
+ if (!preprocess_webp_picture(&pic, &webp_config, pixmap, opts)) {
+ return false;
+ }
+
+ if (!WebPEncode(&webp_config, &pic)) {
+ return false;
+ }
+
+ if (!WebPAnimEncoderAdd(enc.get(), &pic, timestamp, &webp_config)) {
+ return false;
+ }
+
+ timestamp += frame.duration;
+ }
+
+ // Add a last fake frame to signal the last duration.
+ if (!WebPAnimEncoderAdd(enc.get(), nullptr, timestamp, nullptr)) {
+ return false;
+ }
+
+ WebPData assembled;
+ SkAutoTCallVProc<WebPData, WebPDataClear> autoWebPData(&assembled);
+ if (!WebPAnimEncoderAssemble(enc.get(), &assembled)) {
+ return false;
+ }
+
+ enc.reset();
+
+ return stream->write(assembled.bytes, assembled.size);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/fonts/SkFontMgr_indirect.cpp b/gfx/skia/skia/src/fonts/SkFontMgr_indirect.cpp
new file mode 100644
index 0000000000..98c9663684
--- /dev/null
+++ b/gfx/skia/skia/src/fonts/SkFontMgr_indirect.cpp
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypeface.h"
+#include "include/core/SkTypes.h"
+#include "include/ports/SkFontMgr_indirect.h"
+#include "include/ports/SkRemotableFontMgr.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkOnce.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTemplates.h"
+
+class SkData;
+
+class SkStyleSet_Indirect : public SkFontStyleSet {
+public:
+ /** Takes ownership of the SkRemotableFontIdentitySet. */
+ SkStyleSet_Indirect(const SkFontMgr_Indirect* owner, int familyIndex,
+ SkRemotableFontIdentitySet* data)
+ : fOwner(SkRef(owner)), fFamilyIndex(familyIndex), fData(data)
+ { }
+
+ int count() override { return fData->count(); }
+
+ void getStyle(int index, SkFontStyle* fs, SkString* style) override {
+ if (fs) {
+ *fs = fData->at(index).fFontStyle;
+ }
+ if (style) {
+ // TODO: is this useful? Current locale?
+ style->reset();
+ }
+ }
+
+ SkTypeface* createTypeface(int index) override {
+ return fOwner->createTypefaceFromFontId(fData->at(index));
+ }
+
+ SkTypeface* matchStyle(const SkFontStyle& pattern) override {
+ if (fFamilyIndex >= 0) {
+ SkFontIdentity id = fOwner->fProxy->matchIndexStyle(fFamilyIndex, pattern);
+ return fOwner->createTypefaceFromFontId(id);
+ }
+
+ return this->matchStyleCSS3(pattern);
+ }
+private:
+ sk_sp<const SkFontMgr_Indirect> fOwner;
+ int fFamilyIndex;
+ sk_sp<SkRemotableFontIdentitySet> fData;
+};
+
+int SkFontMgr_Indirect::onCountFamilies() const {
+ return 0;
+}
+
+void SkFontMgr_Indirect::onGetFamilyName(int index, SkString* familyName) const {
+ SK_ABORT("Not implemented");
+}
+
+SkFontStyleSet* SkFontMgr_Indirect::onCreateStyleSet(int index) const {
+ SK_ABORT("Not implemented");
+}
+
+SkFontStyleSet* SkFontMgr_Indirect::onMatchFamily(const char familyName[]) const {
+ return new SkStyleSet_Indirect(this, -1, fProxy->matchName(familyName));
+}
+
+SkTypeface* SkFontMgr_Indirect::createTypefaceFromFontId(const SkFontIdentity& id) const {
+ if (id.fDataId == SkFontIdentity::kInvalidDataId) {
+ return nullptr;
+ }
+
+ SkAutoMutexExclusive ama(fDataCacheMutex);
+
+ sk_sp<SkTypeface> dataTypeface;
+ int dataTypefaceIndex = 0;
+ for (int i = 0; i < fDataCache.size(); ++i) {
+ const DataEntry& entry = fDataCache[i];
+ if (entry.fDataId == id.fDataId) {
+ if (entry.fTtcIndex == id.fTtcIndex &&
+ !entry.fTypeface->weak_expired() && entry.fTypeface->try_ref())
+ {
+ return entry.fTypeface;
+ }
+ if (dataTypeface.get() == nullptr &&
+ !entry.fTypeface->weak_expired() && entry.fTypeface->try_ref())
+ {
+ dataTypeface.reset(entry.fTypeface);
+ dataTypefaceIndex = entry.fTtcIndex;
+ }
+ }
+
+ if (entry.fTypeface->weak_expired()) {
+ fDataCache.removeShuffle(i);
+ --i;
+ }
+ }
+
+ // No exact match, but did find a data match.
+ if (dataTypeface.get() != nullptr) {
+ std::unique_ptr<SkStreamAsset> stream(dataTypeface->openStream(nullptr));
+ if (stream.get() != nullptr) {
+ return fImpl->makeFromStream(std::move(stream), dataTypefaceIndex).release();
+ }
+ }
+
+ // No data match, request data and add entry.
+ std::unique_ptr<SkStreamAsset> stream(fProxy->getData(id.fDataId));
+ if (stream.get() == nullptr) {
+ return nullptr;
+ }
+
+ sk_sp<SkTypeface> typeface(fImpl->makeFromStream(std::move(stream), id.fTtcIndex));
+ if (typeface.get() == nullptr) {
+ return nullptr;
+ }
+
+ DataEntry& newEntry = fDataCache.push_back();
+ typeface->weak_ref();
+ newEntry.fDataId = id.fDataId;
+ newEntry.fTtcIndex = id.fTtcIndex;
+ newEntry.fTypeface = typeface.get(); // weak reference passed to new entry.
+
+ return typeface.release();
+}
+
+SkTypeface* SkFontMgr_Indirect::onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& fontStyle) const {
+ SkFontIdentity id = fProxy->matchNameStyle(familyName, fontStyle);
+ return this->createTypefaceFromFontId(id);
+}
+
+SkTypeface* SkFontMgr_Indirect::onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle& style,
+ const char* bcp47[],
+ int bcp47Count,
+ SkUnichar character) const {
+ SkFontIdentity id = fProxy->matchNameStyleCharacter(familyName, style, bcp47,
+ bcp47Count, character);
+ return this->createTypefaceFromFontId(id);
+}
+
+sk_sp<SkTypeface> SkFontMgr_Indirect::onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset> stream,
+ int ttcIndex) const {
+ return fImpl->makeFromStream(std::move(stream), ttcIndex);
+}
+
+sk_sp<SkTypeface> SkFontMgr_Indirect::onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments& args) const {
+ return fImpl->makeFromStream(std::move(stream), args);
+}
+
+sk_sp<SkTypeface> SkFontMgr_Indirect::onMakeFromFile(const char path[], int ttcIndex) const {
+ return fImpl->makeFromFile(path, ttcIndex);
+}
+
+sk_sp<SkTypeface> SkFontMgr_Indirect::onMakeFromData(sk_sp<SkData> data, int ttcIndex) const {
+ return fImpl->makeFromData(std::move(data), ttcIndex);
+}
+
+sk_sp<SkTypeface> SkFontMgr_Indirect::onLegacyMakeTypeface(const char familyName[],
+ SkFontStyle style) const {
+ sk_sp<SkTypeface> face(this->matchFamilyStyle(familyName, style));
+
+ if (nullptr == face.get()) {
+ face.reset(this->matchFamilyStyle(nullptr, style));
+ }
+
+ if (nullptr == face.get()) {
+ SkFontIdentity fontId = this->fProxy->matchIndexStyle(0, style);
+ face.reset(this->createTypefaceFromFontId(fontId));
+ }
+
+ return face;
+}
diff --git a/gfx/skia/skia/src/fonts/SkRemotableFontMgr.cpp b/gfx/skia/skia/src/fonts/SkRemotableFontMgr.cpp
new file mode 100644
index 0000000000..d8c904a566
--- /dev/null
+++ b/gfx/skia/skia/src/fonts/SkRemotableFontMgr.cpp
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/ports/SkRemotableFontMgr.h"
+#include "include/private/base/SkOnce.h"
+
+SkRemotableFontIdentitySet::SkRemotableFontIdentitySet(int count, SkFontIdentity** data)
+ : fCount(count), fData(count)
+{
+ SkASSERT(data);
+ *data = fData.get();
+}
+
+SkRemotableFontIdentitySet* SkRemotableFontIdentitySet::NewEmpty() {
+ static SkOnce once;
+ static SkRemotableFontIdentitySet* empty;
+ once([]{ empty = new SkRemotableFontIdentitySet; });
+ return SkRef(empty);
+}
diff --git a/gfx/skia/skia/src/image/SkImage.cpp b/gfx/skia/skia/src/image/SkImage.cpp
new file mode 100644
index 0000000000..6787044445
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage.cpp
@@ -0,0 +1,540 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkImage.h"
+
+#include "include/codec/SkEncodedImageFormat.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImageEncoder.h"
+#include "include/core/SkImageGenerator.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkSurfaceProps.h"
+#include "include/core/SkTileMode.h"
+#include "include/core/SkTypes.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkImageFilterCache.h"
+#include "src/core/SkImageFilterTypes.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkImageInfoPriv.h"
+#include "src/core/SkImagePriv.h"
+#include "src/core/SkMipmap.h"
+#include "src/core/SkNextID.h"
+#include "src/core/SkSpecialImage.h"
+#include "src/image/SkImage_Base.h"
+#include "src/shaders/SkImageShader.h"
+
+#include <utility>
+
+class SkShader;
+
+#if defined(SK_GANESH)
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrDirectContext.h"
+#include "include/gpu/GrRecordingContext.h"
+#include "include/private/gpu/ganesh/GrImageContext.h"
+#include "src/gpu/ganesh/GrImageContextPriv.h"
+#endif
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/Image_Graphite.h"
+#include "src/gpu/graphite/Log.h"
+#endif
+
+SkImage::SkImage(const SkImageInfo& info, uint32_t uniqueID)
+ : fInfo(info)
+ , fUniqueID(kNeedNewImageUniqueID == uniqueID ? SkNextID::ImageID() : uniqueID) {
+ SkASSERT(info.width() > 0);
+ SkASSERT(info.height() > 0);
+}
+
+bool SkImage::peekPixels(SkPixmap* pm) const {
+ SkPixmap tmp;
+ if (!pm) {
+ pm = &tmp;
+ }
+ return as_IB(this)->onPeekPixels(pm);
+}
+
+bool SkImage::readPixels(GrDirectContext* dContext, const SkImageInfo& dstInfo, void* dstPixels,
+ size_t dstRowBytes, int srcX, int srcY, CachingHint chint) const {
+ return as_IB(this)->onReadPixels(dContext, dstInfo, dstPixels, dstRowBytes, srcX, srcY, chint);
+}
+
+#ifndef SK_IMAGE_READ_PIXELS_DISABLE_LEGACY_API
+bool SkImage::readPixels(const SkImageInfo& dstInfo, void* dstPixels,
+ size_t dstRowBytes, int srcX, int srcY, CachingHint chint) const {
+ auto dContext = as_IB(this)->directContext();
+ return this->readPixels(dContext, dstInfo, dstPixels, dstRowBytes, srcX, srcY, chint);
+}
+#endif
+
+void SkImage::asyncRescaleAndReadPixels(const SkImageInfo& info,
+ const SkIRect& srcRect,
+ RescaleGamma rescaleGamma,
+ RescaleMode rescaleMode,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context) const {
+ if (!SkIRect::MakeWH(this->width(), this->height()).contains(srcRect) ||
+ !SkImageInfoIsValid(info)) {
+ callback(context, nullptr);
+ return;
+ }
+ as_IB(this)->onAsyncRescaleAndReadPixels(
+ info, srcRect, rescaleGamma, rescaleMode, callback, context);
+}
+
+void SkImage::asyncRescaleAndReadPixelsYUV420(SkYUVColorSpace yuvColorSpace,
+ sk_sp<SkColorSpace> dstColorSpace,
+ const SkIRect& srcRect,
+ const SkISize& dstSize,
+ RescaleGamma rescaleGamma,
+ RescaleMode rescaleMode,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context) const {
+ if (!SkIRect::MakeWH(this->width(), this->height()).contains(srcRect) || dstSize.isZero() ||
+ (dstSize.width() & 0b1) || (dstSize.height() & 0b1)) {
+ callback(context, nullptr);
+ return;
+ }
+ as_IB(this)->onAsyncRescaleAndReadPixelsYUV420(yuvColorSpace,
+ std::move(dstColorSpace),
+ srcRect,
+ dstSize,
+ rescaleGamma,
+ rescaleMode,
+ callback,
+ context);
+}
+
+bool SkImage::scalePixels(const SkPixmap& dst, const SkSamplingOptions& sampling,
+ CachingHint chint) const {
+ // Context TODO: Elevate GrDirectContext requirement to public API.
+ auto dContext = as_IB(this)->directContext();
+ if (this->width() == dst.width() && this->height() == dst.height()) {
+ return this->readPixels(dContext, dst, 0, 0, chint);
+ }
+
+ // Idea: If/when SkImageGenerator supports a native-scaling API (where the generator itself
+ // can scale more efficiently) we should take advantage of it here.
+ //
+ SkBitmap bm;
+ if (as_IB(this)->getROPixels(dContext, &bm, chint)) {
+ SkPixmap pmap;
+ // Note: By calling the pixmap scaler, we never cache the final result, so the chint
+ // is (currently) only being applied to the getROPixels. If we get a request to
+ // also attempt to cache the final (scaled) result, we would add that logic here.
+ //
+ return bm.peekPixels(&pmap) && pmap.scalePixels(dst, sampling);
+ }
+ return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkColorType SkImage::colorType() const { return fInfo.colorType(); }
+
+SkAlphaType SkImage::alphaType() const { return fInfo.alphaType(); }
+
+SkColorSpace* SkImage::colorSpace() const { return fInfo.colorSpace(); }
+
+sk_sp<SkColorSpace> SkImage::refColorSpace() const { return fInfo.refColorSpace(); }
+
+sk_sp<SkShader> SkImage::makeShader(const SkSamplingOptions& sampling, const SkMatrix& lm) const {
+ return SkImageShader::Make(sk_ref_sp(const_cast<SkImage*>(this)),
+ SkTileMode::kClamp, SkTileMode::kClamp,
+ sampling, &lm);
+}
+
+sk_sp<SkShader> SkImage::makeShader(const SkSamplingOptions& sampling, const SkMatrix* lm) const {
+ return SkImageShader::Make(sk_ref_sp(const_cast<SkImage*>(this)),
+ SkTileMode::kClamp, SkTileMode::kClamp,
+ sampling, lm);
+}
+
+sk_sp<SkShader> SkImage::makeShader(SkTileMode tmx, SkTileMode tmy,
+ const SkSamplingOptions& sampling,
+ const SkMatrix& lm) const {
+ return SkImageShader::Make(sk_ref_sp(const_cast<SkImage*>(this)), tmx, tmy,
+ sampling, &lm);
+}
+
+sk_sp<SkShader> SkImage::makeShader(SkTileMode tmx, SkTileMode tmy,
+ const SkSamplingOptions& sampling,
+ const SkMatrix* localMatrix) const {
+ return SkImageShader::Make(sk_ref_sp(const_cast<SkImage*>(this)), tmx, tmy,
+ sampling, localMatrix);
+}
+
+sk_sp<SkShader> SkImage::makeRawShader(SkTileMode tmx, SkTileMode tmy,
+ const SkSamplingOptions& sampling,
+ const SkMatrix& lm) const {
+ return SkImageShader::MakeRaw(sk_ref_sp(const_cast<SkImage*>(this)), tmx, tmy,
+ sampling, &lm);
+}
+
+sk_sp<SkShader> SkImage::makeRawShader(const SkSamplingOptions& sampling,
+ const SkMatrix& lm) const {
+ return SkImageShader::MakeRaw(sk_ref_sp(const_cast<SkImage*>(this)),
+ SkTileMode::kClamp, SkTileMode::kClamp,
+ sampling, &lm);
+}
+
+sk_sp<SkShader> SkImage::makeRawShader(const SkSamplingOptions& sampling,
+ const SkMatrix* localMatrix) const {
+ return SkImageShader::MakeRaw(sk_ref_sp(const_cast<SkImage*>(this)),
+ SkTileMode::kClamp, SkTileMode::kClamp,
+ sampling, localMatrix);
+}
+
+sk_sp<SkShader> SkImage::makeRawShader(SkTileMode tmx, SkTileMode tmy,
+ const SkSamplingOptions& sampling,
+ const SkMatrix* localMatrix) const {
+ return SkImageShader::MakeRaw(sk_ref_sp(const_cast<SkImage*>(this)), tmx, tmy,
+ sampling, localMatrix);
+}
+
+sk_sp<SkData> SkImage::encodeToData(GrDirectContext* context, SkEncodedImageFormat type,
+ int quality) const {
+ SkBitmap bm;
+ if (as_IB(this)->getROPixels(context, &bm)) {
+ return SkEncodeBitmap(bm, type, quality);
+ }
+ return nullptr;
+}
+
+sk_sp<SkData> SkImage::encodeToData(GrDirectContext* context) const {
+ if (auto encoded = this->refEncodedData()) {
+ return encoded;
+ }
+
+ return this->encodeToData(context, SkEncodedImageFormat::kPNG, 100);
+}
+
+#ifndef SK_IMAGE_READ_PIXELS_DISABLE_LEGACY_API
+sk_sp<SkData> SkImage::encodeToData(SkEncodedImageFormat type, int quality) const {
+ auto dContext = as_IB(this)->directContext();
+ return this->encodeToData(dContext, type, quality);
+}
+
+sk_sp<SkData> SkImage::encodeToData() const {
+ auto dContext = as_IB(this)->directContext();
+ return this->encodeToData(dContext);
+}
+#endif
+
+sk_sp<SkData> SkImage::refEncodedData() const {
+ return sk_sp<SkData>(as_IB(this)->onRefEncoded());
+}
+
+sk_sp<SkImage> SkImage::MakeFromEncoded(sk_sp<SkData> encoded,
+ std::optional<SkAlphaType> alphaType) {
+ if (nullptr == encoded || 0 == encoded->size()) {
+ return nullptr;
+ }
+ return SkImage::MakeFromGenerator(
+ SkImageGenerator::MakeFromEncoded(std::move(encoded), alphaType));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImage> SkImage::makeSubset(const SkIRect& subset, GrDirectContext* direct) const {
+ if (subset.isEmpty()) {
+ return nullptr;
+ }
+
+ const SkIRect bounds = SkIRect::MakeWH(this->width(), this->height());
+ if (!bounds.contains(subset)) {
+ return nullptr;
+ }
+
+#if defined(SK_GANESH)
+ auto myContext = as_IB(this)->context();
+ // This check is also performed in the subclass, but we do it here for the short-circuit below.
+ if (myContext && !myContext->priv().matches(direct)) {
+ return nullptr;
+ }
+#endif
+
+ // optimization : return self if the subset == our bounds
+ if (bounds == subset) {
+ return sk_ref_sp(const_cast<SkImage*>(this));
+ }
+
+ return as_IB(this)->onMakeSubset(subset, direct);
+}
+
+#if defined(SK_GANESH)
+
+bool SkImage::isTextureBacked() const {
+ return as_IB(this)->isGaneshBacked() || as_IB(this)->isGraphiteBacked();
+}
+
+size_t SkImage::textureSize() const { return as_IB(this)->onTextureSize(); }
+
+GrBackendTexture SkImage::getBackendTexture(bool flushPendingGrContextIO,
+ GrSurfaceOrigin* origin) const {
+ return as_IB(this)->onGetBackendTexture(flushPendingGrContextIO, origin);
+}
+
+bool SkImage::isValid(GrRecordingContext* rContext) const {
+ if (rContext && rContext->abandoned()) {
+ return false;
+ }
+ return as_IB(this)->onIsValid(rContext);
+}
+
+GrSemaphoresSubmitted SkImage::flush(GrDirectContext* dContext,
+ const GrFlushInfo& flushInfo) const {
+ return as_IB(this)->onFlush(dContext, flushInfo);
+}
+
+void SkImage::flushAndSubmit(GrDirectContext* dContext) const {
+ this->flush(dContext, {});
+ dContext->submit();
+}
+
+#else
+
+bool SkImage::isTextureBacked() const { return false; }
+
+bool SkImage::isValid(GrRecordingContext* rContext) const {
+ if (rContext) {
+ return false;
+ }
+ return as_IB(this)->onIsValid(nullptr);
+}
+
+#endif
+
+bool SkImage::readPixels(GrDirectContext* dContext, const SkPixmap& pmap, int srcX, int srcY,
+ CachingHint chint) const {
+ return this->readPixels(dContext, pmap.info(), pmap.writable_addr(), pmap.rowBytes(), srcX,
+ srcY, chint);
+}
+
+#ifndef SK_IMAGE_READ_PIXELS_DISABLE_LEGACY_API
+bool SkImage::readPixels(const SkPixmap& pmap, int srcX, int srcY, CachingHint chint) const {
+ auto dContext = as_IB(this)->directContext();
+ return this->readPixels(dContext, pmap, srcX, srcY, chint);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImage> SkImage::MakeFromBitmap(const SkBitmap& bm) {
+ if (!bm.pixelRef()) {
+ return nullptr;
+ }
+
+ return SkMakeImageFromRasterBitmap(bm, kIfMutable_SkCopyPixelsMode);
+}
+
+bool SkImage::asLegacyBitmap(SkBitmap* bitmap, LegacyBitmapMode ) const {
+ // Context TODO: Elevate GrDirectContext requirement to public API.
+ auto dContext = as_IB(this)->directContext();
+ return as_IB(this)->onAsLegacyBitmap(dContext, bitmap);
+}
+
+sk_sp<SkImage> SkImage::MakeFromPicture(sk_sp<SkPicture> picture, const SkISize& dimensions,
+ const SkMatrix* matrix, const SkPaint* paint,
+ BitDepth bitDepth, sk_sp<SkColorSpace> colorSpace) {
+ return SkImage::MakeFromPicture(picture, dimensions, matrix, paint, bitDepth, colorSpace, {});
+}
+
+sk_sp<SkImage> SkImage::MakeFromPicture(sk_sp<SkPicture> picture, const SkISize& dimensions,
+ const SkMatrix* matrix, const SkPaint* paint,
+ BitDepth bitDepth, sk_sp<SkColorSpace> colorSpace,
+ SkSurfaceProps props) {
+ return MakeFromGenerator(SkImageGenerator::MakeFromPicture(dimensions, std::move(picture),
+ matrix, paint, bitDepth,
+ std::move(colorSpace), props));
+}
+
+sk_sp<SkImage> SkImage::makeWithFilter(GrRecordingContext* rContext, const SkImageFilter* filter,
+ const SkIRect& subset, const SkIRect& clipBounds,
+ SkIRect* outSubset, SkIPoint* offset) const {
+
+ if (!filter || !outSubset || !offset || !this->bounds().contains(subset)) {
+ return nullptr;
+ }
+ sk_sp<SkSpecialImage> srcSpecialImage;
+#if defined(SK_GANESH)
+ auto myContext = as_IB(this)->context();
+ if (myContext && !myContext->priv().matches(rContext)) {
+ return nullptr;
+ }
+ srcSpecialImage = SkSpecialImage::MakeFromImage(rContext, subset,
+ sk_ref_sp(const_cast<SkImage*>(this)),
+ SkSurfaceProps());
+#else
+ srcSpecialImage = SkSpecialImage::MakeFromImage(nullptr, subset,
+ sk_ref_sp(const_cast<SkImage*>(this)),
+ SkSurfaceProps());
+#endif
+ if (!srcSpecialImage) {
+ return nullptr;
+ }
+
+ sk_sp<SkImageFilterCache> cache(
+ SkImageFilterCache::Create(SkImageFilterCache::kDefaultTransientSize));
+
+ // The filters operate in the local space of the src image, where (0,0) corresponds to the
+ // subset's top left corner. But the clip bounds and any crop rects on the filters are in the
+ // original coordinate system, so configure the CTM to correct crop rects and explicitly adjust
+ // the clip bounds (since it is assumed to already be in image space).
+ SkImageFilter_Base::Context context(SkMatrix::Translate(-subset.x(), -subset.y()),
+ clipBounds.makeOffset(-subset.topLeft()),
+ cache.get(), fInfo.colorType(), fInfo.colorSpace(),
+ srcSpecialImage.get());
+
+ sk_sp<SkSpecialImage> result = as_IFB(filter)->filterImage(context).imageAndOffset(offset);
+ if (!result) {
+ return nullptr;
+ }
+
+ // The output image and offset are relative to the subset rectangle, so the offset needs to
+ // be shifted to put it in the correct spot with respect to the original coordinate system
+ offset->fX += subset.x();
+ offset->fY += subset.y();
+
+ // Final clip against the exact clipBounds (the clip provided in the context gets adjusted
+ // to account for pixel-moving filters so doesn't always exactly match when finished). The
+ // clipBounds are translated into the clippedDstRect coordinate space, including the
+ // result->subset() ensures that the result's image pixel origin does not affect results.
+ SkIRect dstRect = result->subset();
+ SkIRect clippedDstRect = dstRect;
+ if (!clippedDstRect.intersect(clipBounds.makeOffset(result->subset().topLeft() - *offset))) {
+ return nullptr;
+ }
+
+ // Adjust the geometric offset if the top-left corner moved as well
+ offset->fX += (clippedDstRect.x() - dstRect.x());
+ offset->fY += (clippedDstRect.y() - dstRect.y());
+ *outSubset = clippedDstRect;
+ return result->asImage();
+}
+
+bool SkImage::isLazyGenerated() const {
+ return as_IB(this)->onIsLazyGenerated();
+}
+
+bool SkImage::isAlphaOnly() const { return SkColorTypeIsAlphaOnly(fInfo.colorType()); }
+
+sk_sp<SkImage> SkImage::makeColorSpace(sk_sp<SkColorSpace> target, GrDirectContext* direct) const {
+ return this->makeColorTypeAndColorSpace(this->colorType(), std::move(target), direct);
+}
+
+sk_sp<SkImage> SkImage::makeColorTypeAndColorSpace(SkColorType targetColorType,
+ sk_sp<SkColorSpace> targetColorSpace,
+ GrDirectContext* dContext) const {
+ if (kUnknown_SkColorType == targetColorType || !targetColorSpace) {
+ return nullptr;
+ }
+
+#if defined(SK_GANESH)
+ auto myContext = as_IB(this)->context();
+ // This check is also performed in the subclass, but we do it here for the short-circuit below.
+ if (myContext && !myContext->priv().matches(dContext)) {
+ return nullptr;
+ }
+#endif
+
+ SkColorType colorType = this->colorType();
+ SkColorSpace* colorSpace = this->colorSpace();
+ if (!colorSpace) {
+ colorSpace = sk_srgb_singleton();
+ }
+ if (colorType == targetColorType &&
+ (SkColorSpace::Equals(colorSpace, targetColorSpace.get()) || this->isAlphaOnly())) {
+ return sk_ref_sp(const_cast<SkImage*>(this));
+ }
+
+ return as_IB(this)->onMakeColorTypeAndColorSpace(targetColorType,
+ std::move(targetColorSpace), dContext);
+}
+
+sk_sp<SkImage> SkImage::reinterpretColorSpace(sk_sp<SkColorSpace> target) const {
+ if (!target) {
+ return nullptr;
+ }
+
+ // No need to create a new image if:
+ // (1) The color spaces are equal.
+ // (2) The color type is kAlpha8.
+ SkColorSpace* colorSpace = this->colorSpace();
+ if (!colorSpace) {
+ colorSpace = sk_srgb_singleton();
+ }
+ if (SkColorSpace::Equals(colorSpace, target.get()) || this->isAlphaOnly()) {
+ return sk_ref_sp(const_cast<SkImage*>(this));
+ }
+
+ return as_IB(this)->onReinterpretColorSpace(std::move(target));
+}
+
+sk_sp<SkImage> SkImage::makeNonTextureImage() const {
+ if (!this->isTextureBacked()) {
+ return sk_ref_sp(const_cast<SkImage*>(this));
+ }
+ return this->makeRasterImage();
+}
+
+sk_sp<SkImage> SkImage::makeRasterImage(CachingHint chint) const {
+ SkPixmap pm;
+ if (this->peekPixels(&pm)) {
+ return sk_ref_sp(const_cast<SkImage*>(this));
+ }
+
+ const size_t rowBytes = fInfo.minRowBytes();
+ size_t size = fInfo.computeByteSize(rowBytes);
+ if (SkImageInfo::ByteSizeOverflowed(size)) {
+ return nullptr;
+ }
+
+ // Context TODO: Elevate GrDirectContext requirement to public API.
+ auto dContext = as_IB(this)->directContext();
+ sk_sp<SkData> data = SkData::MakeUninitialized(size);
+ pm = {fInfo.makeColorSpace(nullptr), data->writable_data(), fInfo.minRowBytes()};
+ if (!this->readPixels(dContext, pm, 0, 0, chint)) {
+ return nullptr;
+ }
+
+ return SkImage::MakeRasterData(fInfo, std::move(data), rowBytes);
+}
+
+bool SkImage_pinAsTexture(const SkImage* image, GrRecordingContext* rContext) {
+ SkASSERT(image);
+ SkASSERT(rContext);
+ return as_IB(image)->onPinAsTexture(rContext);
+}
+
+void SkImage_unpinAsTexture(const SkImage* image, GrRecordingContext* rContext) {
+ SkASSERT(image);
+ SkASSERT(rContext);
+ as_IB(image)->onUnpinAsTexture(rContext);
+}
+
+bool SkImage::hasMipmaps() const { return as_IB(this)->onHasMipmaps(); }
+
+sk_sp<SkImage> SkImage::withMipmaps(sk_sp<SkMipmap> mips) const {
+ if (mips == nullptr || mips->validForRootLevel(this->imageInfo())) {
+ if (auto result = as_IB(this)->onMakeWithMipmaps(std::move(mips))) {
+ return result;
+ }
+ }
+ return sk_ref_sp((const_cast<SkImage*>(this)));
+}
+
+sk_sp<SkImage> SkImage::withDefaultMipmaps() const {
+ return this->withMipmaps(nullptr);
+}
diff --git a/gfx/skia/skia/src/image/SkImage_AndroidFactories.cpp b/gfx/skia/skia/src/image/SkImage_AndroidFactories.cpp
new file mode 100644
index 0000000000..9a03a0ae20
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_AndroidFactories.cpp
@@ -0,0 +1,201 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+
+#if defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26
+
+#include "include/android/SkImageAndroid.h"
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageGenerator.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkSurface.h"
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrContextThreadSafeProxy.h"
+#include "include/gpu/GrDirectContext.h"
+#include "include/gpu/GrRecordingContext.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/gpu/ganesh/GrImageContext.h"
+#include "include/private/gpu/ganesh/GrTypesPriv.h"
+#include "src/core/SkAutoPixmapStorage.h"
+#include "src/core/SkImageInfoPriv.h"
+#include "src/gpu/RefCntedCallback.h"
+#include "src/gpu/SkBackingFit.h"
+#include "src/gpu/ganesh/GrAHardwareBufferImageGenerator.h"
+#include "src/gpu/ganesh/GrAHardwareBufferUtils_impl.h"
+#include "src/gpu/ganesh/GrBackendTextureImageGenerator.h"
+#include "src/gpu/ganesh/GrBackendUtils.h"
+#include "src/gpu/ganesh/GrCaps.h"
+#include "src/gpu/ganesh/GrColorInfo.h"
+#include "src/gpu/ganesh/GrColorSpaceXform.h"
+#include "src/gpu/ganesh/GrContextThreadSafeProxyPriv.h"
+#include "src/gpu/ganesh/GrDirectContextPriv.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/GrGpu.h"
+#include "src/gpu/ganesh/GrGpuResourcePriv.h"
+#include "src/gpu/ganesh/GrImageContextPriv.h"
+#include "src/gpu/ganesh/GrImageInfo.h"
+#include "src/gpu/ganesh/GrProxyProvider.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/GrRenderTask.h"
+#include "src/gpu/ganesh/GrSemaphore.h"
+#include "src/gpu/ganesh/GrSurfaceProxy.h"
+#include "src/gpu/ganesh/GrTexture.h"
+#include "src/gpu/ganesh/GrTextureProxy.h"
+#include "src/gpu/ganesh/SkGr.h"
+#include "src/gpu/ganesh/SurfaceContext.h"
+#include "src/gpu/ganesh/SurfaceFillContext.h"
+#include "src/gpu/ganesh/effects/GrTextureEffect.h"
+#include "src/image/SkImage_Base.h"
+#include "src/image/SkImage_Gpu.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <utility>
+
+namespace sk_image_factory {
+
+sk_sp<SkImage> MakeFromAHardwareBuffer(AHardwareBuffer* graphicBuffer, SkAlphaType at) {
+ auto gen = GrAHardwareBufferImageGenerator::Make(graphicBuffer, at, nullptr,
+ kTopLeft_GrSurfaceOrigin);
+ return SkImage::MakeFromGenerator(std::move(gen));
+}
+
+sk_sp<SkImage> MakeFromAHardwareBuffer(AHardwareBuffer* graphicBuffer, SkAlphaType at,
+ sk_sp<SkColorSpace> cs,
+ GrSurfaceOrigin surfaceOrigin) {
+ auto gen = GrAHardwareBufferImageGenerator::Make(graphicBuffer, at, cs, surfaceOrigin);
+ return SkImage::MakeFromGenerator(std::move(gen));
+}
+
+sk_sp<SkImage> MakeFromAHardwareBufferWithData(GrDirectContext* dContext,
+ const SkPixmap& pixmap,
+ AHardwareBuffer* hardwareBuffer,
+ GrSurfaceOrigin surfaceOrigin) {
+ AHardwareBuffer_Desc bufferDesc;
+ AHardwareBuffer_describe(hardwareBuffer, &bufferDesc);
+
+ if (!SkToBool(bufferDesc.usage & AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE)) {
+ return nullptr;
+ }
+
+
+ GrBackendFormat backendFormat = GrAHardwareBufferUtils::GetBackendFormat(dContext,
+ hardwareBuffer,
+ bufferDesc.format,
+ true);
+
+ if (!backendFormat.isValid()) {
+ return nullptr;
+ }
+
+ GrAHardwareBufferUtils::DeleteImageProc deleteImageProc = nullptr;
+ GrAHardwareBufferUtils::UpdateImageProc updateImageProc = nullptr;
+ GrAHardwareBufferUtils::TexImageCtx deleteImageCtx = nullptr;
+
+ const bool isRenderable = SkToBool(bufferDesc.usage & AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER);
+
+ GrBackendTexture backendTexture =
+ GrAHardwareBufferUtils::MakeBackendTexture(dContext, hardwareBuffer,
+ bufferDesc.width, bufferDesc.height,
+ &deleteImageProc, &updateImageProc,
+ &deleteImageCtx, false, backendFormat,
+ isRenderable);
+ if (!backendTexture.isValid()) {
+ return nullptr;
+ }
+ SkASSERT(deleteImageProc);
+
+ auto releaseHelper = skgpu::RefCntedCallback::Make(deleteImageProc, deleteImageCtx);
+
+ SkColorType colorType =
+ GrAHardwareBufferUtils::GetSkColorTypeFromBufferFormat(bufferDesc.format);
+
+ GrColorType grColorType = SkColorTypeToGrColorType(colorType);
+
+ GrProxyProvider* proxyProvider = dContext->priv().proxyProvider();
+ if (!proxyProvider) {
+ return nullptr;
+ }
+
+ sk_sp<GrTextureProxy> proxy = proxyProvider->wrapBackendTexture(
+ backendTexture, kBorrow_GrWrapOwnership, GrWrapCacheable::kNo, kRW_GrIOType,
+ std::move(releaseHelper));
+ if (!proxy) {
+ return nullptr;
+ }
+
+ skgpu::Swizzle swizzle = dContext->priv().caps()->getReadSwizzle(backendFormat, grColorType);
+ GrSurfaceProxyView framebufferView(std::move(proxy), surfaceOrigin, swizzle);
+ SkColorInfo colorInfo = pixmap.info().colorInfo().makeColorType(colorType);
+ sk_sp<SkImage> image = sk_make_sp<SkImage_Gpu>(sk_ref_sp(dContext),
+ kNeedNewImageUniqueID,
+ framebufferView,
+ std::move(colorInfo));
+ if (!image) {
+ return nullptr;
+ }
+
+ GrDrawingManager* drawingManager = dContext->priv().drawingManager();
+ if (!drawingManager) {
+ return nullptr;
+ }
+
+ skgpu::ganesh::SurfaceContext surfaceContext(
+ dContext, std::move(framebufferView), image->imageInfo().colorInfo());
+
+ surfaceContext.writePixels(dContext, pixmap, {0, 0});
+
+ GrSurfaceProxy* p[1] = {surfaceContext.asSurfaceProxy()};
+ drawingManager->flush(p, SkSurface::BackendSurfaceAccess::kNoAccess, {}, nullptr);
+
+ return image;
+}
+
+} // namespace sk_image_factory
+
+#if !defined(SK_DISABLE_LEGACY_IMAGE_FACTORIES)
+
+sk_sp<SkImage> SkImage::MakeFromAHardwareBuffer(
+ AHardwareBuffer* hardwareBuffer,
+ SkAlphaType alphaType) {
+ return sk_image_factory::MakeFromAHardwareBuffer(hardwareBuffer, alphaType);
+}
+
+sk_sp<SkImage> SkImage::MakeFromAHardwareBuffer(
+ AHardwareBuffer* hardwareBuffer,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ GrSurfaceOrigin surfaceOrigin) {
+ return sk_image_factory::MakeFromAHardwareBuffer(hardwareBuffer, alphaType,
+ colorSpace, surfaceOrigin);
+}
+
+sk_sp<SkImage> SkImage::MakeFromAHardwareBufferWithData(
+ GrDirectContext* context,
+ const SkPixmap& pixmap,
+ AHardwareBuffer* hardwareBuffer,
+ GrSurfaceOrigin surfaceOrigin) {
+ return sk_image_factory::MakeFromAHardwareBufferWithData(context,
+ pixmap,
+ hardwareBuffer,
+ surfaceOrigin);
+}
+
+#endif // SK_DISABLE_LEGACY_IMAGE_FACTORIES
+
+#endif // defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26
diff --git a/gfx/skia/skia/src/image/SkImage_Base.cpp b/gfx/skia/skia/src/image/SkImage_Base.cpp
new file mode 100644
index 0000000000..f8d766a64f
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_Base.cpp
@@ -0,0 +1,367 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/image/SkImage_Base.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkTypes.h"
+#include "src/core/SkBitmapCache.h"
+#include "src/core/SkSamplingPriv.h"
+#include "src/image/SkRescaleAndReadPixels.h"
+
+#include <atomic>
+#include <string_view>
+#include <tuple>
+#include <utility>
+
+#if defined(SK_GANESH)
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrRecordingContext.h"
+#include "include/private/gpu/ganesh/GrImageContext.h"
+#include "src/gpu/ResourceKey.h"
+#include "src/gpu/SkBackingFit.h"
+#include "src/gpu/ganesh/GrCaps.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/GrProxyProvider.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/GrSamplerState.h"
+#include "src/gpu/ganesh/GrSurfaceProxy.h"
+#include "src/gpu/ganesh/GrSurfaceProxyView.h"
+#include "src/gpu/ganesh/GrTextureProxy.h"
+#include "src/gpu/ganesh/SkGr.h"
+#include "src/gpu/ganesh/effects/GrBicubicEffect.h"
+#include "src/gpu/ganesh/effects/GrTextureEffect.h"
+enum class GrColorType;
+#endif
+
+#if defined(SK_GRAPHITE)
+#include "src/core/SkColorSpacePriv.h"
+#include "src/gpu/graphite/Image_Graphite.h"
+#include "src/gpu/graphite/Log.h"
+#endif
+
+SkImage_Base::SkImage_Base(const SkImageInfo& info, uint32_t uniqueID)
+ : SkImage(info, uniqueID), fAddedToRasterCache(false) {}
+
+SkImage_Base::~SkImage_Base() {
+ if (fAddedToRasterCache.load()) {
+ SkNotifyBitmapGenIDIsStale(this->uniqueID());
+ }
+}
+
+void SkImage_Base::onAsyncRescaleAndReadPixels(const SkImageInfo& info,
+ SkIRect origSrcRect,
+ RescaleGamma rescaleGamma,
+ RescaleMode rescaleMode,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context) const {
+ SkBitmap src;
+ SkPixmap peek;
+ SkIRect srcRect;
+ if (this->peekPixels(&peek)) {
+ src.installPixels(peek);
+ srcRect = origSrcRect;
+ } else {
+ // Context TODO: Elevate GrDirectContext requirement to public API.
+ auto dContext = as_IB(this)->directContext();
+ src.setInfo(this->imageInfo().makeDimensions(origSrcRect.size()));
+ src.allocPixels();
+ if (!this->readPixels(dContext, src.pixmap(), origSrcRect.x(), origSrcRect.y())) {
+ callback(context, nullptr);
+ return;
+ }
+ srcRect = SkIRect::MakeSize(src.dimensions());
+ }
+ return SkRescaleAndReadPixels(src, info, srcRect, rescaleGamma, rescaleMode, callback, context);
+}
+
+void SkImage_Base::onAsyncRescaleAndReadPixelsYUV420(SkYUVColorSpace,
+ sk_sp<SkColorSpace> dstColorSpace,
+ SkIRect srcRect,
+ SkISize dstSize,
+ RescaleGamma,
+ RescaleMode,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context) const {
+ // TODO: Call non-YUV asyncRescaleAndReadPixels and then make our callback convert to YUV and
+ // call client's callback.
+ callback(context, nullptr);
+}
+
+#if defined(SK_GANESH)
+std::tuple<GrSurfaceProxyView, GrColorType> SkImage_Base::asView(GrRecordingContext* context,
+ GrMipmapped mipmapped,
+ GrImageTexGenPolicy policy) const {
+ if (!context) {
+ return {};
+ }
+ if (!context->priv().caps()->mipmapSupport() || this->dimensions().area() <= 1) {
+ mipmapped = GrMipmapped::kNo;
+ }
+ return this->onAsView(context, mipmapped, policy);
+}
+
+std::unique_ptr<GrFragmentProcessor> SkImage_Base::asFragmentProcessor(
+ GrRecordingContext* rContext,
+ SkSamplingOptions sampling,
+ const SkTileMode tileModes[2],
+ const SkMatrix& m,
+ const SkRect* subset,
+ const SkRect* domain) const {
+ if (!rContext) {
+ return {};
+ }
+ if (sampling.useCubic && !GrValidCubicResampler(sampling.cubic)) {
+ return {};
+ }
+ if (sampling.mipmap != SkMipmapMode::kNone &&
+ (!rContext->priv().caps()->mipmapSupport() || this->dimensions().area() <= 1)) {
+ sampling = SkSamplingOptions(sampling.filter);
+ }
+ return this->onAsFragmentProcessor(rContext, sampling, tileModes, m, subset, domain);
+}
+
+std::unique_ptr<GrFragmentProcessor> SkImage_Base::MakeFragmentProcessorFromView(
+ GrRecordingContext* rContext,
+ GrSurfaceProxyView view,
+ SkAlphaType at,
+ SkSamplingOptions sampling,
+ const SkTileMode tileModes[2],
+ const SkMatrix& m,
+ const SkRect* subset,
+ const SkRect* domain) {
+ if (!view) {
+ return nullptr;
+ }
+ const GrCaps& caps = *rContext->priv().caps();
+ auto wmx = SkTileModeToWrapMode(tileModes[0]);
+ auto wmy = SkTileModeToWrapMode(tileModes[1]);
+ if (sampling.useCubic) {
+ if (subset) {
+ if (domain) {
+ return GrBicubicEffect::MakeSubset(std::move(view),
+ at,
+ m,
+ wmx,
+ wmy,
+ *subset,
+ *domain,
+ sampling.cubic,
+ GrBicubicEffect::Direction::kXY,
+ *rContext->priv().caps());
+ }
+ return GrBicubicEffect::MakeSubset(std::move(view),
+ at,
+ m,
+ wmx,
+ wmy,
+ *subset,
+ sampling.cubic,
+ GrBicubicEffect::Direction::kXY,
+ *rContext->priv().caps());
+ }
+ return GrBicubicEffect::Make(std::move(view),
+ at,
+ m,
+ wmx,
+ wmy,
+ sampling.cubic,
+ GrBicubicEffect::Direction::kXY,
+ *rContext->priv().caps());
+ }
+ if (sampling.isAniso()) {
+ if (!rContext->priv().caps()->anisoSupport()) {
+ // Fallback to linear
+ sampling = SkSamplingPriv::AnisoFallback(view.mipmapped() == GrMipmapped::kYes);
+ }
+ } else if (view.mipmapped() == GrMipmapped::kNo) {
+ sampling = SkSamplingOptions(sampling.filter);
+ }
+ GrSamplerState sampler;
+ if (sampling.isAniso()) {
+ sampler = GrSamplerState::Aniso(wmx, wmy, sampling.maxAniso, view.mipmapped());
+ } else {
+ sampler = GrSamplerState(wmx, wmy, sampling.filter, sampling.mipmap);
+ }
+ if (subset) {
+ if (domain) {
+ return GrTextureEffect::MakeSubset(std::move(view),
+ at,
+ m,
+ sampler,
+ *subset,
+ *domain,
+ caps);
+ }
+ return GrTextureEffect::MakeSubset(std::move(view),
+ at,
+ m,
+ sampler,
+ *subset,
+ caps);
+ } else {
+ return GrTextureEffect::Make(std::move(view), at, m, sampler, caps);
+ }
+}
+
+GrSurfaceProxyView SkImage_Base::FindOrMakeCachedMipmappedView(GrRecordingContext* rContext,
+ GrSurfaceProxyView view,
+ uint32_t imageUniqueID) {
+ SkASSERT(rContext);
+ SkASSERT(imageUniqueID != SK_InvalidUniqueID);
+
+ if (!view || view.proxy()->asTextureProxy()->mipmapped() == GrMipmapped::kYes) {
+ return view;
+ }
+ GrProxyProvider* proxyProvider = rContext->priv().proxyProvider();
+
+ skgpu::UniqueKey baseKey;
+ GrMakeKeyFromImageID(&baseKey, imageUniqueID, SkIRect::MakeSize(view.dimensions()));
+ SkASSERT(baseKey.isValid());
+ skgpu::UniqueKey mipmappedKey;
+ static const skgpu::UniqueKey::Domain kMipmappedDomain = skgpu::UniqueKey::GenerateDomain();
+ { // No extra values beyond the domain are required. Must name the var to please
+ // clang-tidy.
+ skgpu::UniqueKey::Builder b(&mipmappedKey, baseKey, kMipmappedDomain, 0);
+ }
+ SkASSERT(mipmappedKey.isValid());
+ if (sk_sp<GrTextureProxy> cachedMippedView =
+ proxyProvider->findOrCreateProxyByUniqueKey(mipmappedKey)) {
+ return {std::move(cachedMippedView), view.origin(), view.swizzle()};
+ }
+
+ auto copy = GrCopyBaseMipMapToView(rContext, view);
+ if (!copy) {
+ return view;
+ }
+ // TODO: If we move listeners up from SkImage_Lazy to SkImage_Base then add one here.
+ proxyProvider->assignUniqueKeyToProxy(mipmappedKey, copy.asTextureProxy());
+ return copy;
+}
+
+GrBackendTexture SkImage_Base::onGetBackendTexture(bool flushPendingGrContextIO,
+ GrSurfaceOrigin* origin) const {
+ return GrBackendTexture(); // invalid
+}
+
+GrSurfaceProxyView SkImage_Base::CopyView(GrRecordingContext* context,
+ GrSurfaceProxyView src,
+ GrMipmapped mipmapped,
+ GrImageTexGenPolicy policy,
+ std::string_view label) {
+ skgpu::Budgeted budgeted = policy == GrImageTexGenPolicy::kNew_Uncached_Budgeted
+ ? skgpu::Budgeted::kYes
+ : skgpu::Budgeted::kNo;
+ return GrSurfaceProxyView::Copy(context,
+ std::move(src),
+ mipmapped,
+ SkBackingFit::kExact,
+ budgeted,
+ /*label=*/label);
+}
+
+#endif // defined(SK_GANESH)
+
+#if defined(SK_GRAPHITE)
+std::tuple<skgpu::graphite::TextureProxyView, SkColorType> SkImage_Base::asView(
+ skgpu::graphite::Recorder* recorder,
+ skgpu::Mipmapped mipmapped) const {
+ if (!recorder) {
+ return {};
+ }
+
+ if (!as_IB(this)->isGraphiteBacked()) {
+ return {};
+ }
+ // TODO(b/238756380): YUVA not supported yet
+ if (as_IB(this)->isYUVA()) {
+ return {};
+ }
+
+ auto image = reinterpret_cast<const skgpu::graphite::Image*>(this);
+
+ if (this->dimensions().area() <= 1) {
+ mipmapped = skgpu::Mipmapped::kNo;
+ }
+
+ if (mipmapped == skgpu::Mipmapped::kYes &&
+ image->textureProxyView().proxy()->mipmapped() != skgpu::Mipmapped::kYes) {
+ SKGPU_LOG_W("Graphite does not auto-generate mipmap levels");
+ return {};
+ }
+
+ SkColorType ct = this->colorType();
+ return { image->textureProxyView(), ct };
+}
+
+sk_sp<SkImage> SkImage::makeColorSpace(sk_sp<SkColorSpace> targetColorSpace,
+ skgpu::graphite::Recorder* recorder,
+ RequiredImageProperties requiredProps) const {
+ return this->makeColorTypeAndColorSpace(this->colorType(), std::move(targetColorSpace),
+ recorder, requiredProps);
+}
+
+sk_sp<SkImage> SkImage::makeColorTypeAndColorSpace(SkColorType targetColorType,
+ sk_sp<SkColorSpace> targetColorSpace,
+ skgpu::graphite::Recorder* recorder,
+ RequiredImageProperties requiredProps) const {
+ if (kUnknown_SkColorType == targetColorType || !targetColorSpace) {
+ return nullptr;
+ }
+
+ SkColorType colorType = this->colorType();
+ SkColorSpace* colorSpace = this->colorSpace();
+ if (!colorSpace) {
+ colorSpace = sk_srgb_singleton();
+ }
+ if (colorType == targetColorType &&
+ (SkColorSpace::Equals(colorSpace, targetColorSpace.get()) || this->isAlphaOnly())) {
+ return sk_ref_sp(const_cast<SkImage*>(this));
+ }
+
+ return as_IB(this)->onMakeColorTypeAndColorSpace(targetColorType,
+ std::move(targetColorSpace),
+ recorder,
+ requiredProps);
+}
+
+#endif // SK_GRAPHITE
+
+GrDirectContext* SkImage_Base::directContext() const {
+#if defined(SK_GANESH)
+ return GrAsDirectContext(this->context());
+#else
+ return nullptr;
+#endif
+}
+
+bool SkImage_Base::onAsLegacyBitmap(GrDirectContext* dContext, SkBitmap* bitmap) const {
+ // As the base-class, all we can do is make a copy (regardless of mode).
+ // Subclasses that want to be more optimal should override.
+ SkImageInfo info = fInfo.makeColorType(kN32_SkColorType).makeColorSpace(nullptr);
+ if (!bitmap->tryAllocPixels(info)) {
+ return false;
+ }
+
+ if (!this->readPixels(dContext, bitmap->info(), bitmap->getPixels(), bitmap->rowBytes(),
+ 0, 0)) {
+ bitmap->reset();
+ return false;
+ }
+
+ bitmap->setImmutable();
+ return true;
+}
diff --git a/gfx/skia/skia/src/image/SkImage_Base.h b/gfx/skia/skia/src/image/SkImage_Base.h
new file mode 100644
index 0000000000..f5fc008523
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_Base.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImage_Base_DEFINED
+#define SkImage_Base_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkTypes.h"
+#include "src/core/SkMipmap.h"
+
+#include <atomic>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <string_view>
+#include <tuple>
+
+#if defined(SK_GANESH)
+#include "include/gpu/GrTypes.h"
+#include "src/gpu/ganesh/SkGr.h"
+#endif
+
+#if defined(SK_GRAPHITE)
+namespace skgpu {
+namespace graphite {
+class TextureProxyView;
+}
+}
+#endif
+
+class GrBackendTexture;
+class GrDirectContext;
+class GrFragmentProcessor;
+class GrImageContext;
+class GrRecordingContext;
+class GrSurfaceProxyView;
+class SkBitmap;
+class SkColorSpace;
+class SkMatrix;
+class SkPixmap;
+enum SkAlphaType : int;
+enum SkColorType : int;
+enum SkYUVColorSpace : int;
+enum class GrColorType;
+enum class SkTileMode;
+struct SkIRect;
+struct SkISize;
+struct SkImageInfo;
+struct SkRect;
+
+enum {
+ kNeedNewImageUniqueID = 0
+};
+
+class SkImage_Base : public SkImage {
+public:
+ ~SkImage_Base() override;
+
+ virtual bool onPeekPixels(SkPixmap*) const { return false; }
+
+ virtual const SkBitmap* onPeekBitmap() const { return nullptr; }
+
+ virtual bool onReadPixels(GrDirectContext*,
+ const SkImageInfo& dstInfo,
+ void* dstPixels,
+ size_t dstRowBytes,
+ int srcX,
+ int srcY,
+ CachingHint) const = 0;
+
+ virtual bool onHasMipmaps() const = 0;
+
+ virtual SkMipmap* onPeekMips() const { return nullptr; }
+
+ sk_sp<SkMipmap> refMips() const {
+ return sk_ref_sp(this->onPeekMips());
+ }
+
+ /**
+ * Default implementation does a rescale/read and then calls the callback.
+ */
+ virtual void onAsyncRescaleAndReadPixels(const SkImageInfo&,
+ SkIRect srcRect,
+ RescaleGamma,
+ RescaleMode,
+ ReadPixelsCallback,
+ ReadPixelsContext) const;
+ /**
+ * Default implementation does a rescale/read/yuv conversion and then calls the callback.
+ */
+ virtual void onAsyncRescaleAndReadPixelsYUV420(SkYUVColorSpace,
+ sk_sp<SkColorSpace> dstColorSpace,
+ SkIRect srcRect,
+ SkISize dstSize,
+ RescaleGamma,
+ RescaleMode,
+ ReadPixelsCallback,
+ ReadPixelsContext) const;
+
+ virtual GrImageContext* context() const { return nullptr; }
+
+ /** this->context() try-casted to GrDirectContext. Useful for migrations – avoid otherwise! */
+ GrDirectContext* directContext() const;
+
+#if defined(SK_GANESH)
+ virtual GrSemaphoresSubmitted onFlush(GrDirectContext*, const GrFlushInfo&) const {
+ return GrSemaphoresSubmitted::kNo;
+ }
+
+ // Returns a GrSurfaceProxyView representation of the image, if possible. This also returns
+ // a color type. This may be different than the image's color type when the image is not
+ // texture-backed and the capabilities of the GPU require a data type conversion to put
+ // the data in a texture.
+ std::tuple<GrSurfaceProxyView, GrColorType> asView(
+ GrRecordingContext* context,
+ GrMipmapped mipmapped,
+ GrImageTexGenPolicy policy = GrImageTexGenPolicy::kDraw) const;
+
+ /**
+ * Returns a GrFragmentProcessor that can be used with the passed GrRecordingContext to
+ * draw the image. SkSamplingOptions indicates the filter and SkTileMode[] indicates the x and
+ * y tile modes. The passed matrix is applied to the coordinates before sampling the image.
+ * Optional 'subset' indicates whether the tile modes should be applied to a subset of the image
+ * Optional 'domain' is a bound on the coordinates of the image that will be required and can be
+ * used to optimize the shader if 'subset' is also specified.
+ */
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(GrRecordingContext*,
+ SkSamplingOptions,
+ const SkTileMode[2],
+ const SkMatrix&,
+ const SkRect* subset = nullptr,
+ const SkRect* domain = nullptr) const;
+
+ // If this image is the current cached image snapshot of a surface then this is called when the
+ // surface is destroyed to indicate no further writes may happen to surface backing store.
+ virtual void generatingSurfaceIsDeleted() {}
+
+ virtual GrBackendTexture onGetBackendTexture(bool flushPendingGrContextIO,
+ GrSurfaceOrigin* origin) const;
+#endif
+#if defined(SK_GRAPHITE)
+ // Returns a TextureProxyView representation of the image, if possible. This also returns
+ // a color type. This may be different than the image's color type when the image is not
+ // texture-backed and the capabilities of the GPU require a data type conversion to put
+ // the data in a texture.
+ std::tuple<skgpu::graphite::TextureProxyView, SkColorType> asView(
+ skgpu::graphite::Recorder*,
+ skgpu::Mipmapped) const;
+
+#endif
+#if defined(SK_GANESH) || defined(SK_GRAPHITE)
+ bool isYUVA() const {
+ return this->type() == Type::kGaneshYUVA || this->type() == Type::kGraphiteYUVA;
+ }
+#endif
+
+ virtual bool onPinAsTexture(GrRecordingContext*) const { return false; }
+ virtual void onUnpinAsTexture(GrRecordingContext*) const {}
+ virtual bool isPinnedOnContext(GrRecordingContext*) const { return false; }
+
+ // return a read-only copy of the pixels. We promise to not modify them,
+ // but only inspect them (or encode them).
+ virtual bool getROPixels(GrDirectContext*, SkBitmap*,
+ CachingHint = kAllow_CachingHint) const = 0;
+
+ virtual sk_sp<SkImage> onMakeSubset(const SkIRect&, GrDirectContext*) const = 0;
+
+ virtual sk_sp<SkData> onRefEncoded() const { return nullptr; }
+
+ virtual bool onAsLegacyBitmap(GrDirectContext*, SkBitmap*) const;
+
+ enum class Type {
+ kUnknown,
+ kRaster,
+ kRasterPinnable,
+ kLazy,
+ kGanesh,
+ kGaneshYUVA,
+ kGraphite,
+ kGraphiteYUVA,
+ };
+
+ virtual Type type() const { return Type::kUnknown; }
+
+ // True for picture-backed and codec-backed
+ bool onIsLazyGenerated() const { return this->type() == Type::kLazy; }
+
+ // True for images instantiated by Ganesh in GPU memory
+ bool isGaneshBacked() const {
+ return this->type() == Type::kGanesh || this->type() == Type::kGaneshYUVA;
+ }
+
+ // True for images instantiated by Graphite in GPU memory
+ bool isGraphiteBacked() const {
+ return this->type() == Type::kGraphite || this->type() == Type::kGraphiteYUVA;
+ }
+
+ // Amount of texture memory used by texture-backed images.
+ virtual size_t onTextureSize() const { return 0; }
+
+ // Call when this image is part of the key to a resourcecache entry. This allows the cache
+ // to know automatically those entries can be purged when this SkImage deleted.
+ virtual void notifyAddedToRasterCache() const {
+ fAddedToRasterCache.store(true);
+ }
+
+ virtual bool onIsValid(GrRecordingContext*) const = 0;
+
+ virtual sk_sp<SkImage> onMakeColorTypeAndColorSpace(SkColorType, sk_sp<SkColorSpace>,
+ GrDirectContext*) const = 0;
+
+ virtual sk_sp<SkImage> onReinterpretColorSpace(sk_sp<SkColorSpace>) const = 0;
+
+ // on failure, returns nullptr
+ virtual sk_sp<SkImage> onMakeWithMipmaps(sk_sp<SkMipmap>) const {
+ return nullptr;
+ }
+
+#if defined(SK_GRAPHITE)
+ virtual sk_sp<SkImage> onMakeTextureImage(skgpu::graphite::Recorder*,
+ RequiredImageProperties) const = 0;
+ virtual sk_sp<SkImage> onMakeSubset(const SkIRect&,
+ skgpu::graphite::Recorder*,
+ RequiredImageProperties) const = 0;
+ virtual sk_sp<SkImage> onMakeColorTypeAndColorSpace(SkColorType,
+ sk_sp<SkColorSpace>,
+ skgpu::graphite::Recorder*,
+ RequiredImageProperties) const = 0;
+#endif
+
+protected:
+ SkImage_Base(const SkImageInfo& info, uint32_t uniqueID);
+
+#if defined(SK_GANESH)
+ // Utility for making a copy of an existing view when the GrImageTexGenPolicy is not kDraw.
+ static GrSurfaceProxyView CopyView(GrRecordingContext*,
+ GrSurfaceProxyView src,
+ GrMipmapped,
+ GrImageTexGenPolicy,
+ std::string_view label);
+
+ static std::unique_ptr<GrFragmentProcessor> MakeFragmentProcessorFromView(GrRecordingContext*,
+ GrSurfaceProxyView,
+ SkAlphaType,
+ SkSamplingOptions,
+ const SkTileMode[2],
+ const SkMatrix&,
+ const SkRect* subset,
+ const SkRect* domain);
+
+ /**
+ * Returns input view if it is already mipmapped. Otherwise, attempts to make a mipmapped view
+ * with the same contents. If the mipmapped copy is successfully created it will be cached
+ * using the image unique ID. A subsequent call with the same unique ID will return the cached
+ * view if it has not been purged. The view is cached with a key domain specific to this
+ * function.
+ */
+ static GrSurfaceProxyView FindOrMakeCachedMipmappedView(GrRecordingContext*,
+ GrSurfaceProxyView,
+ uint32_t imageUniqueID);
+#endif
+
+private:
+#if defined(SK_GANESH)
+ virtual std::tuple<GrSurfaceProxyView, GrColorType> onAsView(
+ GrRecordingContext*,
+ GrMipmapped,
+ GrImageTexGenPolicy policy) const = 0;
+
+ virtual std::unique_ptr<GrFragmentProcessor> onAsFragmentProcessor(
+ GrRecordingContext*,
+ SkSamplingOptions,
+ const SkTileMode[2],
+ const SkMatrix&,
+ const SkRect* subset,
+ const SkRect* domain) const = 0;
+#endif
+
+ // Set true by caches when they cache content that's derived from the current pixels.
+ mutable std::atomic<bool> fAddedToRasterCache;
+};
+
+static inline SkImage_Base* as_IB(SkImage* image) {
+ return static_cast<SkImage_Base*>(image);
+}
+
+static inline SkImage_Base* as_IB(const sk_sp<SkImage>& image) {
+ return static_cast<SkImage_Base*>(image.get());
+}
+
+static inline const SkImage_Base* as_IB(const SkImage* image) {
+ return static_cast<const SkImage_Base*>(image);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkImage_Gpu.cpp b/gfx/skia/skia/src/image/SkImage_Gpu.cpp
new file mode 100644
index 0000000000..a8d9818b79
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_Gpu.cpp
@@ -0,0 +1,821 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/image/SkImage_Gpu.h"
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageGenerator.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkSurface.h"
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrContextThreadSafeProxy.h"
+#include "include/gpu/GrDirectContext.h"
+#include "include/gpu/GrRecordingContext.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/gpu/ganesh/GrImageContext.h"
+#include "include/private/gpu/ganesh/GrTypesPriv.h"
+#include "src/core/SkAutoPixmapStorage.h"
+#include "src/core/SkImageInfoPriv.h"
+#include "src/gpu/RefCntedCallback.h"
+#include "src/gpu/SkBackingFit.h"
+#include "src/gpu/ganesh/GrBackendTextureImageGenerator.h"
+#include "src/gpu/ganesh/GrBackendUtils.h"
+#include "src/gpu/ganesh/GrCaps.h"
+#include "src/gpu/ganesh/GrColorInfo.h"
+#include "src/gpu/ganesh/GrColorSpaceXform.h"
+#include "src/gpu/ganesh/GrContextThreadSafeProxyPriv.h"
+#include "src/gpu/ganesh/GrDirectContextPriv.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/GrGpu.h"
+#include "src/gpu/ganesh/GrGpuResourcePriv.h"
+#include "src/gpu/ganesh/GrImageContextPriv.h"
+#include "src/gpu/ganesh/GrImageInfo.h"
+#include "src/gpu/ganesh/GrProxyProvider.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/GrRenderTask.h"
+#include "src/gpu/ganesh/GrSemaphore.h"
+#include "src/gpu/ganesh/GrSurfaceProxy.h"
+#include "src/gpu/ganesh/GrTexture.h"
+#include "src/gpu/ganesh/GrTextureProxy.h"
+#include "src/gpu/ganesh/SkGr.h"
+#include "src/gpu/ganesh/SurfaceContext.h"
+#include "src/gpu/ganesh/SurfaceFillContext.h"
+#include "src/gpu/ganesh/effects/GrTextureEffect.h"
+#include "src/image/SkImage_Base.h"
+
+#if defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26
+#include "src/gpu/ganesh/GrAHardwareBufferImageGenerator.h"
+#include "src/gpu/ganesh/GrAHardwareBufferUtils_impl.h"
+#endif
+
+#include <algorithm>
+#include <cstddef>
+#include <utility>
+
+class SkMatrix;
+enum SkColorType : int;
+enum class SkTextureCompressionType;
+enum class SkTileMode;
+
+inline SkImage_Gpu::ProxyChooser::ProxyChooser(sk_sp<GrSurfaceProxy> stableProxy)
+ : fStableProxy(std::move(stableProxy)) {
+ SkASSERT(fStableProxy);
+}
+
+inline SkImage_Gpu::ProxyChooser::ProxyChooser(sk_sp<GrSurfaceProxy> stableProxy,
+ sk_sp<GrSurfaceProxy> volatileProxy,
+ sk_sp<GrRenderTask> copyTask,
+ int volatileProxyTargetCount)
+ : fStableProxy(std::move(stableProxy))
+ , fVolatileProxy(std::move(volatileProxy))
+ , fVolatileToStableCopyTask(std::move(copyTask))
+ , fVolatileProxyTargetCount(volatileProxyTargetCount) {
+ SkASSERT(fStableProxy);
+ SkASSERT(fVolatileProxy);
+ SkASSERT(fVolatileToStableCopyTask);
+}
+
+inline SkImage_Gpu::ProxyChooser::~ProxyChooser() {
+ // The image is being destroyed. If there is a stable copy proxy but we've been able to use
+ // the volatile proxy for all requests then we can skip the copy.
+ if (fVolatileToStableCopyTask) {
+ fVolatileToStableCopyTask->makeSkippable();
+ }
+}
+
+inline sk_sp<GrSurfaceProxy> SkImage_Gpu::ProxyChooser::chooseProxy(GrRecordingContext* context) {
+ SkAutoSpinlock hold(fLock);
+ if (fVolatileProxy) {
+ SkASSERT(fVolatileProxyTargetCount <= fVolatileProxy->getTaskTargetCount());
+ // If this image is used off the direct context it originated on, i.e. on a recording-only
+ // context, we don't know how the recording context's actions are ordered WRT direct context
+ // actions until the recording context's DAG is imported into the direct context.
+ if (context->asDirectContext() &&
+ fVolatileProxyTargetCount == fVolatileProxy->getTaskTargetCount()) {
+ return fVolatileProxy;
+ }
+ fVolatileProxy.reset();
+ fVolatileToStableCopyTask.reset();
+ return fStableProxy;
+ }
+ return fStableProxy;
+}
+
+inline sk_sp<GrSurfaceProxy> SkImage_Gpu::ProxyChooser::switchToStableProxy() {
+ SkAutoSpinlock hold(fLock);
+ fVolatileProxy.reset();
+ fVolatileToStableCopyTask.reset();
+ return fStableProxy;
+}
+
+inline sk_sp<GrSurfaceProxy> SkImage_Gpu::ProxyChooser::makeVolatileProxyStable() {
+ SkAutoSpinlock hold(fLock);
+ if (fVolatileProxy) {
+ fStableProxy = std::move(fVolatileProxy);
+ fVolatileToStableCopyTask->makeSkippable();
+ fVolatileToStableCopyTask.reset();
+ }
+ return fStableProxy;
+}
+
+inline bool SkImage_Gpu::ProxyChooser::surfaceMustCopyOnWrite(GrSurfaceProxy* surfaceProxy) const {
+ SkAutoSpinlock hold(fLock);
+ return surfaceProxy->underlyingUniqueID() == fStableProxy->underlyingUniqueID();
+}
+
+inline size_t SkImage_Gpu::ProxyChooser::gpuMemorySize() const {
+ SkAutoSpinlock hold(fLock);
+ size_t size = fStableProxy->gpuMemorySize();
+ if (fVolatileProxy) {
+ SkASSERT(fVolatileProxy->gpuMemorySize() == size);
+ }
+ return size;
+}
+
+inline GrMipmapped SkImage_Gpu::ProxyChooser::mipmapped() const {
+ SkAutoSpinlock hold(fLock);
+ GrMipmapped mipmapped = fStableProxy->asTextureProxy()->mipmapped();
+ if (fVolatileProxy) {
+ SkASSERT(fVolatileProxy->asTextureProxy()->mipmapped() == mipmapped);
+ }
+ return mipmapped;
+}
+
+#ifdef SK_DEBUG
+inline GrBackendFormat SkImage_Gpu::ProxyChooser::backendFormat() {
+ SkAutoSpinlock hold(fLock);
+ if (fVolatileProxy) {
+ SkASSERT(fVolatileProxy->backendFormat() == fStableProxy->backendFormat());
+ }
+ return fStableProxy->backendFormat();
+}
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+
+SkImage_Gpu::SkImage_Gpu(sk_sp<GrImageContext> context,
+ uint32_t uniqueID,
+ GrSurfaceProxyView view,
+ SkColorInfo info)
+ : INHERITED(std::move(context),
+ SkImageInfo::Make(view.proxy()->backingStoreDimensions(), std::move(info)),
+ uniqueID)
+ , fChooser(view.detachProxy())
+ , fSwizzle(view.swizzle())
+ , fOrigin(view.origin()) {
+#ifdef SK_DEBUG
+ const GrBackendFormat& format = fChooser.backendFormat();
+ const GrCaps* caps = this->context()->priv().caps();
+ GrColorType grCT = SkColorTypeToGrColorType(this->colorType());
+ SkASSERT(caps->isFormatCompressed(format) ||
+ caps->areColorTypeAndFormatCompatible(grCT, format));
+#endif
+}
+
+SkImage_Gpu::SkImage_Gpu(sk_sp<GrDirectContext> dContext,
+ GrSurfaceProxyView volatileSrc,
+ sk_sp<GrSurfaceProxy> stableCopy,
+ sk_sp<GrRenderTask> copyTask,
+ int volatileSrcTargetCount,
+ SkColorInfo info)
+ : INHERITED(std::move(dContext),
+ SkImageInfo::Make(volatileSrc.proxy()->backingStoreDimensions(),
+ std::move(info)),
+ kNeedNewImageUniqueID)
+ , fChooser(std::move(stableCopy),
+ volatileSrc.detachProxy(),
+ std::move(copyTask),
+ volatileSrcTargetCount)
+ , fSwizzle(volatileSrc.swizzle())
+ , fOrigin(volatileSrc.origin()) {
+#ifdef SK_DEBUG
+ const GrBackendFormat& format = fChooser.backendFormat();
+ const GrCaps* caps = this->context()->priv().caps();
+ GrColorType grCT = SkColorTypeToGrColorType(this->colorType());
+ SkASSERT(caps->isFormatCompressed(format) ||
+ caps->areColorTypeAndFormatCompatible(grCT, format));
+#endif
+}
+
+sk_sp<SkImage> SkImage_Gpu::MakeWithVolatileSrc(sk_sp<GrRecordingContext> rContext,
+ GrSurfaceProxyView volatileSrc,
+ SkColorInfo colorInfo) {
+ SkASSERT(rContext);
+ SkASSERT(volatileSrc);
+ SkASSERT(volatileSrc.proxy()->asTextureProxy());
+ GrMipmapped mm = volatileSrc.proxy()->asTextureProxy()->mipmapped();
+ sk_sp<GrRenderTask> copyTask;
+ auto copy = GrSurfaceProxy::Copy(rContext.get(),
+ volatileSrc.refProxy(),
+ volatileSrc.origin(),
+ mm,
+ SkBackingFit::kExact,
+ skgpu::Budgeted::kYes,
+ /*label=*/"ImageGpu_MakeWithVolatileSrc",
+ &copyTask);
+ if (!copy) {
+ return nullptr;
+ }
+ // We only attempt to make a dual-proxy image on a direct context. This optimziation requires
+ // knowing how things are ordered and recording-only contexts are not well ordered WRT other
+ // recording contexts.
+ if (auto direct = sk_ref_sp(rContext->asDirectContext())) {
+ int targetCount = volatileSrc.proxy()->getTaskTargetCount();
+ return sk_sp<SkImage>(new SkImage_Gpu(std::move(direct),
+ std::move(volatileSrc),
+ std::move(copy),
+ std::move(copyTask),
+ targetCount,
+ std::move(colorInfo)));
+ }
+ GrSurfaceProxyView copyView(std::move(copy), volatileSrc.origin(), volatileSrc.swizzle());
+ return sk_make_sp<SkImage_Gpu>(std::move(rContext),
+ kNeedNewImageUniqueID,
+ std::move(copyView),
+ std::move(colorInfo));
+}
+
+SkImage_Gpu::~SkImage_Gpu() = default;
+
+bool SkImage_Gpu::surfaceMustCopyOnWrite(GrSurfaceProxy* surfaceProxy) const {
+ return fChooser.surfaceMustCopyOnWrite(surfaceProxy);
+}
+
+bool SkImage_Gpu::onHasMipmaps() const { return fChooser.mipmapped() == GrMipmapped::kYes; }
+
+GrSemaphoresSubmitted SkImage_Gpu::onFlush(GrDirectContext* dContext,
+ const GrFlushInfo& info) const {
+ if (!fContext->priv().matches(dContext) || dContext->abandoned()) {
+ if (info.fSubmittedProc) {
+ info.fSubmittedProc(info.fSubmittedContext, false);
+ }
+ if (info.fFinishedProc) {
+ info.fFinishedProc(info.fFinishedContext);
+ }
+ return GrSemaphoresSubmitted::kNo;
+ }
+
+ sk_sp<GrSurfaceProxy> proxy = fChooser.chooseProxy(dContext);
+ return dContext->priv().flushSurface(proxy.get(),
+ SkSurface::BackendSurfaceAccess::kNoAccess,
+ info);
+}
+
+GrBackendTexture SkImage_Gpu::onGetBackendTexture(bool flushPendingGrContextIO,
+ GrSurfaceOrigin* origin) const {
+ auto direct = fContext->asDirectContext();
+ if (!direct) {
+ // This image was created with a DDL context and cannot be instantiated.
+ return GrBackendTexture(); // invalid
+ }
+ if (direct->abandoned()) {
+ return GrBackendTexture(); // invalid;
+ }
+
+ // We don't know how client's use of the texture will be ordered WRT Skia's. Ensure the
+ // texture seen by the client won't be mutated by a SkSurface.
+ sk_sp<GrSurfaceProxy> proxy = fChooser.switchToStableProxy();
+
+ if (!proxy->isInstantiated()) {
+ auto resourceProvider = direct->priv().resourceProvider();
+
+ if (!proxy->instantiate(resourceProvider)) {
+ return GrBackendTexture(); // invalid
+ }
+ }
+
+ GrTexture* texture = proxy->peekTexture();
+ if (texture) {
+ if (flushPendingGrContextIO) {
+ direct->priv().flushSurface(proxy.get());
+ }
+ if (origin) {
+ *origin = fOrigin;
+ }
+ return texture->getBackendTexture();
+ }
+ return GrBackendTexture(); // invalid
+}
+
+size_t SkImage_Gpu::onTextureSize() const { return fChooser.gpuMemorySize(); }
+
+sk_sp<SkImage> SkImage_Gpu::onMakeColorTypeAndColorSpace(SkColorType targetCT,
+ sk_sp<SkColorSpace> targetCS,
+ GrDirectContext* dContext) const {
+ SkColorInfo info(targetCT, this->alphaType(), std::move(targetCS));
+ if (!fContext->priv().matches(dContext)) {
+ return nullptr;
+ }
+
+ auto sfc = dContext->priv().makeSFCWithFallback(GrImageInfo(info, this->dimensions()),
+ SkBackingFit::kExact);
+ if (!sfc) {
+ return nullptr;
+ }
+ // We respecify info's CT because we called MakeWithFallback.
+ auto ct = GrColorTypeToSkColorType(sfc->colorInfo().colorType());
+ info = info.makeColorType(ct);
+
+ // Draw this image's texture into the SFC.
+ auto [view, _] = this->asView(dContext, GrMipmapped(this->hasMipmaps()));
+ auto texFP = GrTextureEffect::Make(std::move(view), this->alphaType());
+ auto colorFP = GrColorSpaceXformEffect::Make(std::move(texFP),
+ this->imageInfo().colorInfo(),
+ info);
+ sfc->fillWithFP(std::move(colorFP));
+
+ return sk_make_sp<SkImage_Gpu>(sk_ref_sp(dContext),
+ kNeedNewImageUniqueID,
+ sfc->readSurfaceView(),
+ std::move(info));
+}
+
+sk_sp<SkImage> SkImage_Gpu::onReinterpretColorSpace(sk_sp<SkColorSpace> newCS) const {
+ // It doesn't seem worth the complexity of trying to share the ProxyChooser among multiple
+ // images. Just fall back to the stable copy.
+ GrSurfaceProxyView view(fChooser.switchToStableProxy(), fOrigin, fSwizzle);
+ return sk_make_sp<SkImage_Gpu>(fContext,
+ kNeedNewImageUniqueID,
+ std::move(view),
+ this->imageInfo().colorInfo().makeColorSpace(std::move(newCS)));
+}
+
+void SkImage_Gpu::onAsyncRescaleAndReadPixels(const SkImageInfo& info,
+ SkIRect srcRect,
+ RescaleGamma rescaleGamma,
+ RescaleMode rescaleMode,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context) const {
+ auto dContext = fContext->asDirectContext();
+ if (!dContext) {
+ // DDL TODO: buffer up the readback so it occurs when the DDL is drawn?
+ callback(context, nullptr);
+ return;
+ }
+ auto ctx = dContext->priv().makeSC(this->makeView(dContext), this->imageInfo().colorInfo());
+ if (!ctx) {
+ callback(context, nullptr);
+ return;
+ }
+ ctx->asyncRescaleAndReadPixels(dContext, info, srcRect, rescaleGamma, rescaleMode,
+ callback, context);
+}
+
+void SkImage_Gpu::onAsyncRescaleAndReadPixelsYUV420(SkYUVColorSpace yuvColorSpace,
+ sk_sp<SkColorSpace> dstColorSpace,
+ SkIRect srcRect,
+ SkISize dstSize,
+ RescaleGamma rescaleGamma,
+ RescaleMode rescaleMode,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context) const {
+ auto dContext = fContext->asDirectContext();
+ if (!dContext) {
+ // DDL TODO: buffer up the readback so it occurs when the DDL is drawn?
+ callback(context, nullptr);
+ return;
+ }
+ auto ctx = dContext->priv().makeSC(this->makeView(dContext), this->imageInfo().colorInfo());
+ if (!ctx) {
+ callback(context, nullptr);
+ return;
+ }
+ ctx->asyncRescaleAndReadPixelsYUV420(dContext,
+ yuvColorSpace,
+ std::move(dstColorSpace),
+ srcRect,
+ dstSize,
+ rescaleGamma,
+ rescaleMode,
+ callback,
+ context);
+}
+
+void SkImage_Gpu::generatingSurfaceIsDeleted() { fChooser.makeVolatileProxyStable(); }
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static sk_sp<SkImage> new_wrapped_texture_common(GrRecordingContext* rContext,
+ const GrBackendTexture& backendTex,
+ GrColorType colorType,
+ GrSurfaceOrigin origin,
+ SkAlphaType at,
+ sk_sp<SkColorSpace> colorSpace,
+ GrWrapOwnership ownership,
+ sk_sp<skgpu::RefCntedCallback> releaseHelper) {
+ if (!backendTex.isValid() || backendTex.width() <= 0 || backendTex.height() <= 0) {
+ return nullptr;
+ }
+
+ GrProxyProvider* proxyProvider = rContext->priv().proxyProvider();
+ sk_sp<GrTextureProxy> proxy = proxyProvider->wrapBackendTexture(
+ backendTex, ownership, GrWrapCacheable::kNo, kRead_GrIOType, std::move(releaseHelper));
+ if (!proxy) {
+ return nullptr;
+ }
+
+ skgpu::Swizzle swizzle = rContext->priv().caps()->getReadSwizzle(proxy->backendFormat(),
+ colorType);
+ GrSurfaceProxyView view(std::move(proxy), origin, swizzle);
+ SkColorInfo info(GrColorTypeToSkColorType(colorType), at, std::move(colorSpace));
+ return sk_make_sp<SkImage_Gpu>(sk_ref_sp(rContext),
+ kNeedNewImageUniqueID,
+ std::move(view),
+ std::move(info));
+}
+
+sk_sp<SkImage> SkImage::MakeFromCompressedTexture(GrRecordingContext* rContext,
+ const GrBackendTexture& tex,
+ GrSurfaceOrigin origin,
+ SkAlphaType at,
+ sk_sp<SkColorSpace> cs,
+ TextureReleaseProc releaseP,
+ ReleaseContext releaseC) {
+ auto releaseHelper = skgpu::RefCntedCallback::Make(releaseP, releaseC);
+
+ if (!rContext) {
+ return nullptr;
+ }
+
+ const GrCaps* caps = rContext->priv().caps();
+
+ if (!SkImage_GpuBase::ValidateCompressedBackendTexture(caps, tex, at)) {
+ return nullptr;
+ }
+
+ GrProxyProvider* proxyProvider = rContext->priv().proxyProvider();
+ sk_sp<GrTextureProxy> proxy = proxyProvider->wrapCompressedBackendTexture(
+ tex, kBorrow_GrWrapOwnership, GrWrapCacheable::kNo, std::move(releaseHelper));
+ if (!proxy) {
+ return nullptr;
+ }
+
+ SkTextureCompressionType type = GrBackendFormatToCompressionType(tex.getBackendFormat());
+ SkColorType ct = GrCompressionTypeToSkColorType(type);
+
+ GrSurfaceProxyView view(std::move(proxy), origin, skgpu::Swizzle::RGBA());
+ return sk_make_sp<SkImage_Gpu>(sk_ref_sp(rContext),
+ kNeedNewImageUniqueID,
+ std::move(view),
+ SkColorInfo(ct, at, std::move(cs)));
+}
+
+sk_sp<SkImage> SkImage::MakeFromTexture(GrRecordingContext* rContext,
+ const GrBackendTexture& tex, GrSurfaceOrigin origin,
+ SkColorType ct, SkAlphaType at, sk_sp<SkColorSpace> cs,
+ TextureReleaseProc releaseP, ReleaseContext releaseC) {
+ auto releaseHelper = skgpu::RefCntedCallback::Make(releaseP, releaseC);
+
+ if (!rContext) {
+ return nullptr;
+ }
+
+ const GrCaps* caps = rContext->priv().caps();
+
+ GrColorType grColorType = SkColorTypeToGrColorType(ct);
+ if (GrColorType::kUnknown == grColorType) {
+ return nullptr;
+ }
+
+ if (!SkImage_GpuBase::ValidateBackendTexture(caps, tex, grColorType, ct, at, cs)) {
+ return nullptr;
+ }
+
+ return new_wrapped_texture_common(rContext, tex, grColorType, origin, at, std::move(cs),
+ kBorrow_GrWrapOwnership, std::move(releaseHelper));
+}
+
+sk_sp<SkImage> SkImage::MakeFromAdoptedTexture(GrRecordingContext* context,
+ const GrBackendTexture& backendTexture,
+ GrSurfaceOrigin textureOrigin,
+ SkColorType colorType) {
+ return SkImage::MakeFromAdoptedTexture(context, backendTexture, textureOrigin,
+ colorType, kPremul_SkAlphaType, nullptr);
+}
+sk_sp<SkImage> SkImage::MakeFromAdoptedTexture(GrRecordingContext* context,
+ const GrBackendTexture& backendTexture,
+ GrSurfaceOrigin textureOrigin,
+ SkColorType colorType,
+ SkAlphaType alphaType) {
+ return SkImage::MakeFromAdoptedTexture(context, backendTexture, textureOrigin,
+ colorType, alphaType, nullptr);
+}
+
+sk_sp<SkImage> SkImage::MakeFromAdoptedTexture(GrRecordingContext* rContext,
+ const GrBackendTexture& tex, GrSurfaceOrigin origin,
+ SkColorType ct, SkAlphaType at,
+ sk_sp<SkColorSpace> cs) {
+ auto dContext = GrAsDirectContext(rContext);
+ if (!dContext) {
+ // We have a DDL context and we don't support adopted textures for them.
+ return nullptr;
+ }
+
+ const GrCaps* caps = dContext->priv().caps();
+
+ GrColorType grColorType = SkColorTypeToGrColorType(ct);
+ if (GrColorType::kUnknown == grColorType) {
+ return nullptr;
+ }
+
+ if (!SkImage_GpuBase::ValidateBackendTexture(caps, tex, grColorType, ct, at, cs)) {
+ return nullptr;
+ }
+
+ return new_wrapped_texture_common(dContext, tex, grColorType, origin, at, std::move(cs),
+ kAdopt_GrWrapOwnership, nullptr);
+}
+
+sk_sp<SkImage> SkImage::MakeTextureFromCompressed(GrDirectContext* direct, sk_sp<SkData> data,
+ int width, int height, SkTextureCompressionType type,
+ GrMipmapped mipmapped,
+ GrProtected isProtected) {
+ if (!direct || !data) {
+ return nullptr;
+ }
+
+ GrBackendFormat beFormat = direct->compressedBackendFormat(type);
+ if (!beFormat.isValid()) {
+ sk_sp<SkImage> tmp = MakeRasterFromCompressed(std::move(data), width, height, type);
+ if (!tmp) {
+ return nullptr;
+ }
+ return tmp->makeTextureImage(direct, mipmapped);
+ }
+
+ GrProxyProvider* proxyProvider = direct->priv().proxyProvider();
+ sk_sp<GrTextureProxy> proxy = proxyProvider->createCompressedTextureProxy(
+ {width, height}, skgpu::Budgeted::kYes, mipmapped, isProtected, type, std::move(data));
+ if (!proxy) {
+ return nullptr;
+ }
+ GrSurfaceProxyView view(std::move(proxy));
+
+ SkColorType colorType = GrCompressionTypeToSkColorType(type);
+
+ return sk_make_sp<SkImage_Gpu>(sk_ref_sp(direct),
+ kNeedNewImageUniqueID,
+ std::move(view),
+ SkColorInfo(colorType, kOpaque_SkAlphaType, nullptr));
+}
+
+sk_sp<SkImage> SkImage::makeTextureImage(GrDirectContext* dContext,
+ GrMipmapped mipmapped,
+ skgpu::Budgeted budgeted) const {
+ if (!dContext) {
+ return nullptr;
+ }
+ if (!dContext->priv().caps()->mipmapSupport() || this->dimensions().area() <= 1) {
+ mipmapped = GrMipmapped::kNo;
+ }
+
+ if (as_IB(this)->isGaneshBacked()) {
+ if (!as_IB(this)->context()->priv().matches(dContext)) {
+ return nullptr;
+ }
+
+ if (mipmapped == GrMipmapped::kNo || this->hasMipmaps()) {
+ return sk_ref_sp(const_cast<SkImage*>(this));
+ }
+ }
+ GrImageTexGenPolicy policy = budgeted == skgpu::Budgeted::kYes
+ ? GrImageTexGenPolicy::kNew_Uncached_Budgeted
+ : GrImageTexGenPolicy::kNew_Uncached_Unbudgeted;
+ // TODO: Don't flatten YUVA images here. Add mips to the planes instead.
+ auto [view, ct] = as_IB(this)->asView(dContext, mipmapped, policy);
+ if (!view) {
+ return nullptr;
+ }
+ SkASSERT(view.asTextureProxy());
+ SkASSERT(mipmapped == GrMipmapped::kNo ||
+ view.asTextureProxy()->mipmapped() == GrMipmapped::kYes);
+ SkColorInfo colorInfo(GrColorTypeToSkColorType(ct), this->alphaType(), this->refColorSpace());
+ return sk_make_sp<SkImage_Gpu>(sk_ref_sp(dContext),
+ this->uniqueID(),
+ std::move(view),
+ std::move(colorInfo));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImage> SkImage::MakePromiseTexture(sk_sp<GrContextThreadSafeProxy> threadSafeProxy,
+ const GrBackendFormat& backendFormat,
+ SkISize dimensions,
+ GrMipmapped mipmapped,
+ GrSurfaceOrigin origin,
+ SkColorType colorType,
+ SkAlphaType alphaType,
+ sk_sp<SkColorSpace> colorSpace,
+ PromiseImageTextureFulfillProc textureFulfillProc,
+ PromiseImageTextureReleaseProc textureReleaseProc,
+ PromiseImageTextureContext textureContext) {
+ // Our contract is that we will always call the release proc even on failure.
+ // We use the helper to convey the context, so we need to ensure make doesn't fail.
+ textureReleaseProc = textureReleaseProc ? textureReleaseProc : [](void*) {};
+ auto releaseHelper = skgpu::RefCntedCallback::Make(textureReleaseProc, textureContext);
+ SkImageInfo info = SkImageInfo::Make(dimensions, colorType, alphaType, colorSpace);
+ if (!SkImageInfoIsValid(info)) {
+ return nullptr;
+ }
+
+ if (!threadSafeProxy) {
+ return nullptr;
+ }
+
+ if (dimensions.isEmpty()) {
+ return nullptr;
+ }
+
+ GrColorType grColorType = SkColorTypeToGrColorType(colorType);
+ if (GrColorType::kUnknown == grColorType) {
+ return nullptr;
+ }
+
+ if (!threadSafeProxy->priv().caps()->areColorTypeAndFormatCompatible(grColorType,
+ backendFormat)) {
+ return nullptr;
+ }
+
+ auto proxy = SkImage_GpuBase::MakePromiseImageLazyProxy(threadSafeProxy.get(),
+ dimensions,
+ backendFormat,
+ mipmapped,
+ textureFulfillProc,
+ std::move(releaseHelper));
+ if (!proxy) {
+ return nullptr;
+ }
+ skgpu::Swizzle swizzle = threadSafeProxy->priv().caps()->getReadSwizzle(backendFormat,
+ grColorType);
+ GrSurfaceProxyView view(std::move(proxy), origin, swizzle);
+ sk_sp<GrImageContext> ctx(GrImageContextPriv::MakeForPromiseImage(std::move(threadSafeProxy)));
+ return sk_make_sp<SkImage_Gpu>(std::move(ctx),
+ kNeedNewImageUniqueID,
+ std::move(view),
+ SkColorInfo(colorType, alphaType, std::move(colorSpace)));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImage> SkImage::MakeCrossContextFromPixmap(GrDirectContext* dContext,
+ const SkPixmap& originalPixmap, bool buildMips,
+ bool limitToMaxTextureSize) {
+ // Some backends or drivers don't support (safely) moving resources between contexts
+ if (!dContext || !dContext->priv().caps()->crossContextTextureSupport()) {
+ return SkImage::MakeRasterCopy(originalPixmap);
+ }
+
+ // If non-power-of-two mipmapping isn't supported, ignore the client's request
+ if (!dContext->priv().caps()->mipmapSupport()) {
+ buildMips = false;
+ }
+
+ const SkPixmap* pixmap = &originalPixmap;
+ SkAutoPixmapStorage resized;
+ int maxTextureSize = dContext->priv().caps()->maxTextureSize();
+ int maxDim = std::max(originalPixmap.width(), originalPixmap.height());
+ if (limitToMaxTextureSize && maxDim > maxTextureSize) {
+ float scale = static_cast<float>(maxTextureSize) / maxDim;
+ int newWidth = std::min(static_cast<int>(originalPixmap.width() * scale), maxTextureSize);
+ int newHeight = std::min(static_cast<int>(originalPixmap.height() * scale), maxTextureSize);
+ SkImageInfo info = originalPixmap.info().makeWH(newWidth, newHeight);
+ SkSamplingOptions sampling(SkFilterMode::kLinear);
+ if (!resized.tryAlloc(info) || !originalPixmap.scalePixels(resized, sampling)) {
+ return nullptr;
+ }
+ pixmap = &resized;
+ }
+ // Turn the pixmap into a GrTextureProxy
+ SkBitmap bmp;
+ bmp.installPixels(*pixmap);
+ GrMipmapped mipmapped = buildMips ? GrMipmapped::kYes : GrMipmapped::kNo;
+ auto [view, ct] = GrMakeUncachedBitmapProxyView(dContext, bmp, mipmapped);
+ if (!view) {
+ return SkImage::MakeRasterCopy(*pixmap);
+ }
+
+ sk_sp<GrTexture> texture = sk_ref_sp(view.proxy()->peekTexture());
+
+ // Flush any writes or uploads
+ dContext->priv().flushSurface(view.proxy());
+ GrGpu* gpu = dContext->priv().getGpu();
+
+ std::unique_ptr<GrSemaphore> sema = gpu->prepareTextureForCrossContextUsage(texture.get());
+
+ SkColorType skCT = GrColorTypeToSkColorType(ct);
+ auto gen = GrBackendTextureImageGenerator::Make(std::move(texture), view.origin(),
+ std::move(sema), skCT,
+ pixmap->alphaType(),
+ pixmap->info().refColorSpace());
+ return SkImage::MakeFromGenerator(std::move(gen));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkImage::MakeBackendTextureFromSkImage(GrDirectContext* direct,
+ sk_sp<SkImage> image,
+ GrBackendTexture* backendTexture,
+ BackendTextureReleaseProc* releaseProc) {
+ if (!image || !backendTexture || !releaseProc) {
+ return false;
+ }
+
+ auto [view, ct] = as_IB(image)->asView(direct, GrMipmapped::kNo);
+
+ if (!view) {
+ return false;
+ }
+
+ // Flush any pending IO on the texture.
+ direct->priv().flushSurface(view.proxy());
+
+ GrTexture* texture = view.asTextureProxy()->peekTexture();
+ if (!texture) {
+ return false;
+ }
+ // We must make a copy of the image if the image is not unique, if the GrTexture owned by the
+ // image is not unique, or if the texture wraps an external object.
+ if (!image->unique() || !texture->unique() ||
+ texture->resourcePriv().refsWrappedObjects()) {
+ // onMakeSubset will always copy the image.
+ image = as_IB(image)->onMakeSubset(image->bounds(), direct);
+ if (!image) {
+ return false;
+ }
+ return MakeBackendTextureFromSkImage(direct, std::move(image), backendTexture, releaseProc);
+ }
+
+ SkASSERT(!texture->resourcePriv().refsWrappedObjects());
+ SkASSERT(texture->unique());
+ SkASSERT(image->unique());
+
+ // Take a reference to the GrTexture and release the image.
+ sk_sp<GrTexture> textureRef = sk_ref_sp(texture);
+ view.reset();
+ image = nullptr;
+ SkASSERT(textureRef->unique());
+
+ // Steal the backend texture from the GrTexture, releasing the GrTexture in the process.
+ return GrTexture::StealBackendTexture(std::move(textureRef), backendTexture, releaseProc);
+}
+
+std::tuple<GrSurfaceProxyView, GrColorType> SkImage_Gpu::onAsView(
+ GrRecordingContext* recordingContext,
+ GrMipmapped mipmapped,
+ GrImageTexGenPolicy policy) const {
+ if (!fContext->priv().matches(recordingContext)) {
+ return {};
+ }
+ if (policy != GrImageTexGenPolicy::kDraw) {
+ return {CopyView(recordingContext,
+ this->makeView(recordingContext),
+ mipmapped,
+ policy,
+ /*label=*/"SkImageGpu_AsView"),
+ SkColorTypeToGrColorType(this->colorType())};
+ }
+ GrSurfaceProxyView view = this->makeView(recordingContext);
+ GrColorType ct = SkColorTypeToGrColorType(this->colorType());
+ if (mipmapped == GrMipmapped::kYes) {
+ view = FindOrMakeCachedMipmappedView(recordingContext, std::move(view), this->uniqueID());
+ }
+ return {std::move(view), ct};
+}
+
+std::unique_ptr<GrFragmentProcessor> SkImage_Gpu::onAsFragmentProcessor(
+ GrRecordingContext* rContext,
+ SkSamplingOptions sampling,
+ const SkTileMode tileModes[2],
+ const SkMatrix& m,
+ const SkRect* subset,
+ const SkRect* domain) const {
+ if (!fContext->priv().matches(rContext)) {
+ return {};
+ }
+ auto mm = sampling.mipmap == SkMipmapMode::kNone ? GrMipmapped::kNo : GrMipmapped::kYes;
+ return MakeFragmentProcessorFromView(rContext,
+ std::get<0>(this->asView(rContext, mm)),
+ this->alphaType(),
+ sampling,
+ tileModes,
+ m,
+ subset,
+ domain);
+}
+
+GrSurfaceProxyView SkImage_Gpu::makeView(GrRecordingContext* rContext) const {
+ return {fChooser.chooseProxy(rContext), fOrigin, fSwizzle};
+}
diff --git a/gfx/skia/skia/src/image/SkImage_Gpu.h b/gfx/skia/skia/src/image/SkImage_Gpu.h
new file mode 100644
index 0000000000..71cebb0eb1
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_Gpu.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImage_Gpu_DEFINED
+#define SkImage_Gpu_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/private/SkSpinlock.h"
+#include "include/private/base/SkThreadAnnotations.h"
+#include "src/gpu/Swizzle.h"
+#include "src/gpu/ganesh/GrSurfaceProxyView.h"
+#include "src/image/SkImage_Base.h"
+#include "src/image/SkImage_GpuBase.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <tuple>
+
+class GrDirectContext;
+class GrFragmentProcessor;
+class GrImageContext;
+class GrRecordingContext;
+class GrRenderTask;
+class GrSurfaceProxy;
+class SkColorInfo;
+class SkColorSpace;
+class SkImage;
+class SkMatrix;
+enum GrSurfaceOrigin : int;
+enum SkColorType : int;
+enum SkYUVColorSpace : int;
+enum class GrColorType;
+enum class GrImageTexGenPolicy : int;
+enum class GrSemaphoresSubmitted : bool;
+enum class SkTileMode;
+struct GrFlushInfo;
+struct SkIRect;
+struct SkISize;
+struct SkImageInfo;
+struct SkRect;
+
+namespace skgpu {
+enum class Mipmapped : bool;
+}
+
+class SkImage_Gpu final : public SkImage_GpuBase {
+public:
+ SkImage_Gpu(sk_sp<GrImageContext> context,
+ uint32_t uniqueID,
+ GrSurfaceProxyView view,
+ SkColorInfo info);
+
+ static sk_sp<SkImage> MakeWithVolatileSrc(sk_sp<GrRecordingContext> rContext,
+ GrSurfaceProxyView volatileSrc,
+ SkColorInfo colorInfo);
+
+ ~SkImage_Gpu() override;
+
+ // If this is image is a cached SkSurface snapshot then this method is called by the SkSurface
+ // before a write to check if the surface must make a copy to avoid modifying the image's
+ // contents.
+ bool surfaceMustCopyOnWrite(GrSurfaceProxy* surfaceProxy) const;
+
+ bool onHasMipmaps() const override;
+
+ GrSemaphoresSubmitted onFlush(GrDirectContext*, const GrFlushInfo&) const override;
+
+ GrBackendTexture onGetBackendTexture(bool flushPendingGrContextIO,
+ GrSurfaceOrigin* origin) const final;
+
+ SkImage_Base::Type type() const override { return SkImage_Base::Type::kGanesh; }
+
+ size_t onTextureSize() const override;
+
+ using SkImage_GpuBase::onMakeColorTypeAndColorSpace;
+ sk_sp<SkImage> onMakeColorTypeAndColorSpace(SkColorType,
+ sk_sp<SkColorSpace>,
+ GrDirectContext*) const final;
+
+ sk_sp<SkImage> onReinterpretColorSpace(sk_sp<SkColorSpace>) const final;
+
+ void onAsyncRescaleAndReadPixels(const SkImageInfo&,
+ SkIRect srcRect,
+ RescaleGamma,
+ RescaleMode,
+ ReadPixelsCallback,
+ ReadPixelsContext) const override;
+
+ void onAsyncRescaleAndReadPixelsYUV420(SkYUVColorSpace,
+ sk_sp<SkColorSpace>,
+ SkIRect srcRect,
+ SkISize dstSize,
+ RescaleGamma,
+ RescaleMode,
+ ReadPixelsCallback,
+ ReadPixelsContext) const override;
+
+ void generatingSurfaceIsDeleted() override;
+
+private:
+ SkImage_Gpu(sk_sp<GrDirectContext>,
+ GrSurfaceProxyView volatileSrc,
+ sk_sp<GrSurfaceProxy> stableCopy,
+ sk_sp<GrRenderTask> copyTask,
+ int volatileSrcTargetCount,
+ SkColorInfo);
+
+ std::tuple<GrSurfaceProxyView, GrColorType> onAsView(GrRecordingContext*,
+ skgpu::Mipmapped,
+ GrImageTexGenPolicy) const override;
+
+ std::unique_ptr<GrFragmentProcessor> onAsFragmentProcessor(GrRecordingContext*,
+ SkSamplingOptions,
+ const SkTileMode[2],
+ const SkMatrix&,
+ const SkRect*,
+ const SkRect*) const override;
+
+ GrSurfaceProxyView makeView(GrRecordingContext*) const;
+
+ // Thread-safe wrapper around the proxies backing this image. Handles dynamically switching
+ // from a "volatile" proxy that may be overwritten (by an SkSurface that this image was snapped
+ // from) to a "stable" proxy that is a copy of the volatile proxy. It allows the image to cancel
+ // the copy if the stable proxy is never required because the contents of the volatile proxy
+ // were never mutated by the SkSurface during the image lifetime.
+ class ProxyChooser {
+ public:
+ ProxyChooser(sk_sp<GrSurfaceProxy> stableProxy,
+ sk_sp<GrSurfaceProxy> volatileProxy,
+ sk_sp<GrRenderTask> copyTask,
+ int volatileProxyTargetCount);
+
+ ProxyChooser(sk_sp<GrSurfaceProxy> stableProxy);
+
+ ~ProxyChooser();
+
+ // Checks if there is a volatile proxy that is safe to use. If so returns it, otherwise
+ // returns the stable proxy (and drops the volatile one if it exists).
+ sk_sp<GrSurfaceProxy> chooseProxy(GrRecordingContext* context) SK_EXCLUDES(fLock);
+ // Call when it is known copy is necessary.
+ sk_sp<GrSurfaceProxy> switchToStableProxy() SK_EXCLUDES(fLock);
+ // Call when it is known for sure copy won't be necessary.
+ sk_sp<GrSurfaceProxy> makeVolatileProxyStable() SK_EXCLUDES(fLock);
+
+ bool surfaceMustCopyOnWrite(GrSurfaceProxy* surfaceProxy) const SK_EXCLUDES(fLock);
+
+ // Queries that should be independent of which proxy is in use.
+ size_t gpuMemorySize() const SK_EXCLUDES(fLock);
+ skgpu::Mipmapped mipmapped() const SK_EXCLUDES(fLock);
+#ifdef SK_DEBUG
+ GrBackendFormat backendFormat() SK_EXCLUDES(fLock);
+#endif
+
+ private:
+ mutable SkSpinlock fLock;
+ sk_sp<GrSurfaceProxy> fStableProxy SK_GUARDED_BY(fLock);
+ sk_sp<GrSurfaceProxy> fVolatileProxy SK_GUARDED_BY(fLock);
+ sk_sp<GrRenderTask> fVolatileToStableCopyTask;
+ // The number of GrRenderTasks targeting the volatile proxy at creation time. If the
+ // proxy's target count increases it indicates additional writes and we must switch
+ // to using the stable copy.
+ const int fVolatileProxyTargetCount = 0;
+ };
+
+ mutable ProxyChooser fChooser;
+ skgpu::Swizzle fSwizzle;
+ GrSurfaceOrigin fOrigin;
+
+ using INHERITED = SkImage_GpuBase;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkImage_GpuBase.cpp b/gfx/skia/skia/src/image/SkImage_GpuBase.cpp
new file mode 100644
index 0000000000..903b03c838
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_GpuBase.cpp
@@ -0,0 +1,360 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/image/SkImage_GpuBase.h"
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkPromiseImageTexture.h"
+#include "include/core/SkSize.h"
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrDirectContext.h"
+#include "include/gpu/GrRecordingContext.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/gpu/ganesh/GrTypesPriv.h"
+#include "src/core/SkBitmapCache.h"
+#include "src/core/SkImageInfoPriv.h"
+#include "src/gpu/RefCntedCallback.h"
+#include "src/gpu/SkBackingFit.h"
+#include "src/gpu/ganesh/GrCaps.h"
+#include "src/gpu/ganesh/GrColorInfo.h"
+#include "src/gpu/ganesh/GrDirectContextPriv.h"
+#include "src/gpu/ganesh/GrImageContextPriv.h"
+#include "src/gpu/ganesh/GrProxyProvider.h"
+#include "src/gpu/ganesh/GrResourceCache.h"
+#include "src/gpu/ganesh/GrResourceProvider.h"
+#include "src/gpu/ganesh/GrSurface.h"
+#include "src/gpu/ganesh/GrSurfaceProxy.h"
+#include "src/gpu/ganesh/GrSurfaceProxyView.h"
+#include "src/gpu/ganesh/GrTexture.h"
+#include "src/gpu/ganesh/GrTextureProxy.h"
+#include "src/gpu/ganesh/SurfaceContext.h"
+#include "src/image/SkImage_Gpu.h"
+
+#include <functional>
+#include <memory>
+#include <utility>
+
+class GrContextThreadSafeProxy;
+class SkImage;
+enum SkColorType : int;
+struct SkIRect;
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/Log.h"
+#endif
+
+SkImage_GpuBase::SkImage_GpuBase(sk_sp<GrImageContext> context, SkImageInfo info, uint32_t uniqueID)
+ : SkImage_Base(std::move(info), uniqueID)
+ , fContext(std::move(context)) {}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkImage_GpuBase::ValidateBackendTexture(const GrCaps* caps, const GrBackendTexture& tex,
+ GrColorType grCT, SkColorType ct, SkAlphaType at,
+ sk_sp<SkColorSpace> cs) {
+ if (!tex.isValid()) {
+ return false;
+ }
+ SkColorInfo info(ct, at, cs);
+ if (!SkColorInfoIsValid(info)) {
+ return false;
+ }
+ GrBackendFormat backendFormat = tex.getBackendFormat();
+ if (!backendFormat.isValid()) {
+ return false;
+ }
+
+ return caps->areColorTypeAndFormatCompatible(grCT, backendFormat);
+}
+
+bool SkImage_GpuBase::ValidateCompressedBackendTexture(const GrCaps* caps,
+ const GrBackendTexture& tex,
+ SkAlphaType at) {
+ if (!tex.isValid() || tex.width() <= 0 || tex.height() <= 0) {
+ return false;
+ }
+
+ if (tex.width() > caps->maxTextureSize() || tex.height() > caps->maxTextureSize()) {
+ return false;
+ }
+
+ if (at == kUnknown_SkAlphaType) {
+ return false;
+ }
+
+ GrBackendFormat backendFormat = tex.getBackendFormat();
+ if (!backendFormat.isValid()) {
+ return false;
+ }
+
+ if (!caps->isFormatCompressed(backendFormat)) {
+ return false;
+ }
+
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkImage_GpuBase::getROPixels(GrDirectContext* dContext,
+ SkBitmap* dst,
+ CachingHint chint) const {
+ if (!fContext->priv().matches(dContext)) {
+ return false;
+ }
+
+ const auto desc = SkBitmapCacheDesc::Make(this);
+ if (SkBitmapCache::Find(desc, dst)) {
+ SkASSERT(dst->isImmutable());
+ SkASSERT(dst->getPixels());
+ return true;
+ }
+
+ SkBitmapCache::RecPtr rec = nullptr;
+ SkPixmap pmap;
+ if (kAllow_CachingHint == chint) {
+ rec = SkBitmapCache::Alloc(desc, this->imageInfo(), &pmap);
+ if (!rec) {
+ return false;
+ }
+ } else {
+ if (!dst->tryAllocPixels(this->imageInfo()) || !dst->peekPixels(&pmap)) {
+ return false;
+ }
+ }
+
+ auto [view, ct] = this->asView(dContext, skgpu::Mipmapped::kNo);
+ if (!view) {
+ return false;
+ }
+
+ GrColorInfo colorInfo(ct, this->alphaType(), this->refColorSpace());
+ auto sContext = dContext->priv().makeSC(std::move(view), std::move(colorInfo));
+ if (!sContext) {
+ return false;
+ }
+
+ if (!sContext->readPixels(dContext, pmap, {0, 0})) {
+ return false;
+ }
+
+ if (rec) {
+ SkBitmapCache::Add(std::move(rec), dst);
+ this->notifyAddedToRasterCache();
+ }
+ return true;
+}
+
+sk_sp<SkImage> SkImage_GpuBase::onMakeSubset(const SkIRect& subset,
+ GrDirectContext* direct) const {
+ if (!fContext->priv().matches(direct)) {
+ return nullptr;
+ }
+
+ auto [view, ct] = this->asView(direct, skgpu::Mipmapped::kNo);
+ SkASSERT(view);
+ SkASSERT(ct == SkColorTypeToGrColorType(this->colorType()));
+
+ skgpu::Budgeted isBudgeted = view.proxy()->isBudgeted();
+ auto copyView = GrSurfaceProxyView::Copy(direct,
+ std::move(view),
+ skgpu::Mipmapped::kNo,
+ subset,
+ SkBackingFit::kExact,
+ isBudgeted,
+ /*label=*/"ImageGpuBase_MakeSubset");
+
+ if (!copyView) {
+ return nullptr;
+ }
+
+ return sk_make_sp<SkImage_Gpu>(sk_ref_sp(direct),
+ kNeedNewImageUniqueID,
+ std::move(copyView),
+ this->imageInfo().colorInfo());
+}
+
+#if defined(SK_GRAPHITE)
+sk_sp<SkImage> SkImage_GpuBase::onMakeTextureImage(skgpu::graphite::Recorder*,
+ SkImage::RequiredImageProperties) const {
+ SKGPU_LOG_W("Cannot convert Ganesh-backed image to Graphite");
+ return nullptr;
+}
+
+sk_sp<SkImage> SkImage_GpuBase::onMakeSubset(const SkIRect&,
+ skgpu::graphite::Recorder*,
+ RequiredImageProperties) const {
+ SKGPU_LOG_W("Cannot convert Ganesh-backed image to Graphite");
+ return nullptr;
+}
+
+sk_sp<SkImage> SkImage_GpuBase::onMakeColorTypeAndColorSpace(SkColorType,
+ sk_sp<SkColorSpace>,
+ skgpu::graphite::Recorder*,
+ RequiredImageProperties) const {
+ SKGPU_LOG_W("Cannot convert Ganesh-backed image to Graphite");
+ return nullptr;
+}
+#endif
+
+bool SkImage_GpuBase::onReadPixels(GrDirectContext* dContext,
+ const SkImageInfo& dstInfo,
+ void* dstPixels,
+ size_t dstRB,
+ int srcX,
+ int srcY,
+ CachingHint) const {
+ if (!fContext->priv().matches(dContext) ||
+ !SkImageInfoValidConversion(dstInfo, this->imageInfo())) {
+ return false;
+ }
+
+ auto [view, ct] = this->asView(dContext, skgpu::Mipmapped::kNo);
+ SkASSERT(view);
+
+ GrColorInfo colorInfo(ct, this->alphaType(), this->refColorSpace());
+ auto sContext = dContext->priv().makeSC(std::move(view), colorInfo);
+ if (!sContext) {
+ return false;
+ }
+
+ return sContext->readPixels(dContext, {dstInfo, dstPixels, dstRB}, {srcX, srcY});
+}
+
+bool SkImage_GpuBase::onIsValid(GrRecordingContext* context) const {
+ // The base class has already checked that 'context' isn't abandoned (if it's not nullptr)
+ if (fContext->priv().abandoned()) {
+ return false;
+ }
+
+ if (context && !fContext->priv().matches(context)) {
+ return false;
+ }
+
+ return true;
+}
+
+sk_sp<GrTextureProxy> SkImage_GpuBase::MakePromiseImageLazyProxy(
+ GrContextThreadSafeProxy* tsp,
+ SkISize dimensions,
+ GrBackendFormat backendFormat,
+ skgpu::Mipmapped mipmapped,
+ PromiseImageTextureFulfillProc fulfillProc,
+ sk_sp<skgpu::RefCntedCallback> releaseHelper) {
+ SkASSERT(tsp);
+ SkASSERT(!dimensions.isEmpty());
+ SkASSERT(releaseHelper);
+
+ if (!fulfillProc) {
+ return nullptr;
+ }
+
+ if (mipmapped == skgpu::Mipmapped::kYes &&
+ GrTextureTypeHasRestrictedSampling(backendFormat.textureType())) {
+ // It is invalid to have a GL_TEXTURE_EXTERNAL or GL_TEXTURE_RECTANGLE and have mips as
+ // well.
+ return nullptr;
+ }
+
+ /**
+ * This class is the lazy instantiation callback for promise images. It manages calling the
+ * client's Fulfill and Release procs. It attempts to reuse a GrTexture instance in
+ * cases where the client provides the same SkPromiseImageTexture as Fulfill results for
+ * multiple SkImages. The created GrTexture is given a key based on a unique ID associated with
+ * the SkPromiseImageTexture.
+ *
+ * A key invalidation message is installed on the SkPromiseImageTexture so that the GrTexture
+ * is deleted once it can no longer be used to instantiate a proxy.
+ */
+ class PromiseLazyInstantiateCallback {
+ public:
+ PromiseLazyInstantiateCallback(PromiseImageTextureFulfillProc fulfillProc,
+ sk_sp<skgpu::RefCntedCallback> releaseHelper)
+ : fFulfillProc(fulfillProc), fReleaseHelper(std::move(releaseHelper)) {}
+ PromiseLazyInstantiateCallback(PromiseLazyInstantiateCallback&&) = default;
+ PromiseLazyInstantiateCallback(const PromiseLazyInstantiateCallback&) {
+ // Because we get wrapped in std::function we must be copyable. But we should never
+ // be copied.
+ SkASSERT(false);
+ }
+ PromiseLazyInstantiateCallback& operator=(PromiseLazyInstantiateCallback&&) = default;
+ PromiseLazyInstantiateCallback& operator=(const PromiseLazyInstantiateCallback&) {
+ SkASSERT(false);
+ return *this;
+ }
+
+ ~PromiseLazyInstantiateCallback() {
+ // Our destructor can run on any thread. We trigger the unref of fTexture by message.
+ if (fTexture) {
+ GrResourceCache::ReturnResourceFromThread(std::move(fTexture), fTextureContextID);
+ }
+ }
+
+ GrSurfaceProxy::LazyCallbackResult operator()(GrResourceProvider* resourceProvider,
+ const GrSurfaceProxy::LazySurfaceDesc&) {
+ // We use the unique key in a way that is unrelated to the SkImage-based key that the
+ // proxy may receive, hence kUnsynced.
+ static constexpr auto kKeySyncMode =
+ GrSurfaceProxy::LazyInstantiationKeyMode::kUnsynced;
+
+ // In order to make the SkImage "thread safe" we rely on holding an extra ref to the
+ // texture in the callback and signalling the unref via a message to the resource cache.
+ // We need to extend the callback's lifetime to that of the proxy.
+ static constexpr auto kReleaseCallbackOnInstantiation = false;
+
+ // Our proxy is getting instantiated for the second+ time. We are only allowed to call
+ // Fulfill once. So return our cached result.
+ if (fTexture) {
+ return {fTexture, kReleaseCallbackOnInstantiation, kKeySyncMode};
+ } else if (fFulfillProcFailed) {
+ // We've already called fulfill and it failed. Our contract says that we should only
+ // call each callback once.
+ return {};
+ }
+
+ PromiseImageTextureContext textureContext = fReleaseHelper->context();
+ sk_sp<SkPromiseImageTexture> promiseTexture = fFulfillProc(textureContext);
+
+ if (!promiseTexture) {
+ fFulfillProcFailed = true;
+ return {};
+ }
+
+ const GrBackendTexture& backendTexture = promiseTexture->backendTexture();
+ if (!backendTexture.isValid()) {
+ return {};
+ }
+
+ fTexture = resourceProvider->wrapBackendTexture(backendTexture,
+ kBorrow_GrWrapOwnership,
+ GrWrapCacheable::kNo,
+ kRead_GrIOType);
+ if (!fTexture) {
+ return {};
+ }
+ fTexture->setRelease(fReleaseHelper);
+ auto dContext = fTexture->getContext();
+ fTextureContextID = dContext->directContextID();
+ return {fTexture, kReleaseCallbackOnInstantiation, kKeySyncMode};
+ }
+
+ private:
+ PromiseImageTextureFulfillProc fFulfillProc;
+ sk_sp<skgpu::RefCntedCallback> fReleaseHelper;
+ sk_sp<GrTexture> fTexture;
+ GrDirectContext::DirectContextID fTextureContextID;
+ bool fFulfillProcFailed = false;
+ } callback(fulfillProc, std::move(releaseHelper));
+
+ return GrProxyProvider::CreatePromiseProxy(tsp, std::move(callback), backendFormat, dimensions,
+ mipmapped);
+}
diff --git a/gfx/skia/skia/src/image/SkImage_GpuBase.h b/gfx/skia/skia/src/image/SkImage_GpuBase.h
new file mode 100644
index 0000000000..657ec4cf2c
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_GpuBase.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImage_GpuBase_DEFINED
+#define SkImage_GpuBase_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/private/gpu/ganesh/GrImageContext.h"
+#include "src/image/SkImage_Base.h"
+
+#include <cstddef>
+#include <cstdint>
+
+class GrBackendFormat;
+class GrBackendTexture;
+class GrCaps;
+class GrContextThreadSafeProxy;
+class GrDirectContext;
+class GrRecordingContext;
+class GrTextureProxy;
+class SkBitmap;
+class SkColorSpace;
+class SkImage;
+enum SkAlphaType : int;
+enum SkColorType : int;
+enum class GrColorType;
+struct SkIRect;
+struct SkISize;
+struct SkImageInfo;
+namespace skgpu {
+enum class Mipmapped : bool;
+class RefCntedCallback;
+}
+
+class SkImage_GpuBase : public SkImage_Base {
+public:
+ GrImageContext* context() const final { return fContext.get(); }
+
+ bool getROPixels(GrDirectContext*, SkBitmap*, CachingHint) const final;
+ sk_sp<SkImage> onMakeSubset(const SkIRect& subset, GrDirectContext*) const final;
+
+ bool onReadPixels(GrDirectContext *dContext,
+ const SkImageInfo& dstInfo,
+ void* dstPixels,
+ size_t dstRB,
+ int srcX,
+ int srcY,
+ CachingHint) const override;
+
+ bool onIsValid(GrRecordingContext*) const final;
+
+ static bool ValidateBackendTexture(const GrCaps*, const GrBackendTexture& tex,
+ GrColorType grCT, SkColorType ct, SkAlphaType at,
+ sk_sp<SkColorSpace> cs);
+ static bool ValidateCompressedBackendTexture(const GrCaps*, const GrBackendTexture& tex,
+ SkAlphaType);
+
+ // Helper for making a lazy proxy for a promise image.
+ // PromiseImageTextureFulfillProc must not be null.
+ static sk_sp<GrTextureProxy> MakePromiseImageLazyProxy(
+ GrContextThreadSafeProxy*,
+ SkISize dimensions,
+ GrBackendFormat,
+ skgpu::Mipmapped,
+ PromiseImageTextureFulfillProc,
+ sk_sp<skgpu::RefCntedCallback> releaseHelper);
+
+protected:
+ SkImage_GpuBase(sk_sp<GrImageContext>, SkImageInfo, uint32_t uniqueID);
+
+ sk_sp<GrImageContext> fContext;
+
+#if defined(SK_GRAPHITE)
+ sk_sp<SkImage> onMakeTextureImage(skgpu::graphite::Recorder*,
+ RequiredImageProperties) const final;
+ sk_sp<SkImage> onMakeSubset(const SkIRect& subset,
+ skgpu::graphite::Recorder*,
+ RequiredImageProperties) const final;
+ sk_sp<SkImage> onMakeColorTypeAndColorSpace(SkColorType,
+ sk_sp<SkColorSpace>,
+ skgpu::graphite::Recorder*,
+ RequiredImageProperties) const final;
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkImage_GpuYUVA.cpp b/gfx/skia/skia/src/image/SkImage_GpuYUVA.cpp
new file mode 100644
index 0000000000..a069c6a5a5
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_GpuYUVA.cpp
@@ -0,0 +1,440 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/image/SkImage_GpuYUVA.h"
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkSurface.h"
+#include "include/core/SkYUVAInfo.h"
+#include "include/core/SkYUVAPixmaps.h"
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/GrBackendSurface.h" // IWYU pragma: keep
+#include "include/gpu/GrContextThreadSafeProxy.h"
+#include "include/gpu/GrDirectContext.h"
+#include "include/gpu/GrRecordingContext.h"
+#include "include/gpu/GrTypes.h"
+#include "include/gpu/GrYUVABackendTextures.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/gpu/ganesh/GrImageContext.h"
+#include "include/private/gpu/ganesh/GrTypesPriv.h"
+#include "src/core/SkImageInfoPriv.h"
+#include "src/core/SkSamplingPriv.h"
+#include "src/gpu/RefCntedCallback.h"
+#include "src/gpu/SkBackingFit.h"
+#include "src/gpu/Swizzle.h"
+#include "src/gpu/ganesh/GrCaps.h"
+#include "src/gpu/ganesh/GrColorInfo.h"
+#include "src/gpu/ganesh/GrColorSpaceXform.h"
+#include "src/gpu/ganesh/GrDirectContextPriv.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/GrImageContextPriv.h"
+#include "src/gpu/ganesh/GrImageInfo.h"
+#include "src/gpu/ganesh/GrProxyProvider.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/GrSamplerState.h"
+#include "src/gpu/ganesh/GrSurfaceProxy.h"
+#include "src/gpu/ganesh/GrSurfaceProxyView.h"
+#include "src/gpu/ganesh/GrTextureProxy.h"
+#include "src/gpu/ganesh/SkGr.h"
+#include "src/gpu/ganesh/SurfaceFillContext.h"
+#include "src/gpu/ganesh/effects/GrBicubicEffect.h"
+#include "src/gpu/ganesh/effects/GrYUVtoRGBEffect.h"
+#include "src/image/SkImage_Base.h"
+
+#include <algorithm>
+#include <utility>
+
+enum class SkTileMode;
+struct SkRect;
+
+static constexpr auto kAssumedColorType = kRGBA_8888_SkColorType;
+
+SkImage_GpuYUVA::SkImage_GpuYUVA(sk_sp<GrImageContext> context,
+ uint32_t uniqueID,
+ GrYUVATextureProxies proxies,
+ sk_sp<SkColorSpace> imageColorSpace)
+ : INHERITED(std::move(context),
+ SkImageInfo::Make(proxies.yuvaInfo().dimensions(),
+ kAssumedColorType,
+ // If an alpha channel is present we always use kPremul. This
+ // is because, although the planar data is always un-premul,
+ // the final interleaved RGBA sample produced in the shader
+ // is premul (and similar if flattened via asView).
+ proxies.yuvaInfo().hasAlpha() ? kPremul_SkAlphaType
+ : kOpaque_SkAlphaType,
+ std::move(imageColorSpace)),
+ uniqueID)
+ , fYUVAProxies(std::move(proxies)) {
+ // The caller should have checked this, just verifying.
+ SkASSERT(fYUVAProxies.isValid());
+}
+
+// For onMakeColorTypeAndColorSpace() / onReinterpretColorSpace()
+SkImage_GpuYUVA::SkImage_GpuYUVA(sk_sp<GrImageContext> context,
+ const SkImage_GpuYUVA* image,
+ sk_sp<SkColorSpace> targetCS,
+ ColorSpaceMode csMode)
+ : INHERITED(std::move(context),
+ image->imageInfo().makeColorSpace(std::move(targetCS)),
+ kNeedNewImageUniqueID)
+ , fYUVAProxies(image->fYUVAProxies)
+ // If we're *reinterpreting* in a new color space, leave fFromColorSpace null.
+ // If we're *converting* to a new color space, it must be non-null, so turn null into sRGB.
+ , fFromColorSpace(csMode == ColorSpaceMode::kReinterpret
+ ? nullptr
+ : (image->colorSpace() ? image->refColorSpace()
+ : SkColorSpace::MakeSRGB())) {}
+
+bool SkImage_GpuYUVA::setupMipmapsForPlanes(GrRecordingContext* context) const {
+ if (!context || !fContext->priv().matches(context)) {
+ return false;
+ }
+ if (!context->priv().caps()->mipmapSupport()) {
+ // We succeed in this case by doing nothing.
+ return true;
+ }
+ int n = fYUVAProxies.yuvaInfo().numPlanes();
+ sk_sp<GrSurfaceProxy> newProxies[4];
+ for (int i = 0; i < n; ++i) {
+ auto* t = fYUVAProxies.proxy(i)->asTextureProxy();
+ if (t->mipmapped() == GrMipmapped::kNo && (t->width() > 1 || t->height() > 1)) {
+ auto newView = GrCopyBaseMipMapToView(context, fYUVAProxies.makeView(i));
+ if (!newView) {
+ return false;
+ }
+ SkASSERT(newView.swizzle() == fYUVAProxies.makeView(i).swizzle());
+ newProxies[i] = newView.detachProxy();
+ } else {
+ newProxies[i] = fYUVAProxies.refProxy(i);
+ }
+ }
+ fYUVAProxies = GrYUVATextureProxies(fYUVAProxies.yuvaInfo(),
+ newProxies,
+ fYUVAProxies.textureOrigin());
+ SkASSERT(fYUVAProxies.isValid());
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+GrSemaphoresSubmitted SkImage_GpuYUVA::onFlush(GrDirectContext* dContext,
+ const GrFlushInfo& info) const {
+ if (!fContext->priv().matches(dContext) || dContext->abandoned()) {
+ if (info.fSubmittedProc) {
+ info.fSubmittedProc(info.fSubmittedContext, false);
+ }
+ if (info.fFinishedProc) {
+ info.fFinishedProc(info.fFinishedContext);
+ }
+ return GrSemaphoresSubmitted::kNo;
+ }
+
+ GrSurfaceProxy* proxies[SkYUVAInfo::kMaxPlanes] = {};
+ size_t numProxies = fYUVAProxies.numPlanes();
+ for (size_t i = 0; i < numProxies; ++i) {
+ proxies[i] = fYUVAProxies.proxy(i);
+ }
+ return dContext->priv().flushSurfaces({proxies, numProxies},
+ SkSurface::BackendSurfaceAccess::kNoAccess,
+ info);
+}
+
+bool SkImage_GpuYUVA::onHasMipmaps() const { return fYUVAProxies.mipmapped() == GrMipmapped::kYes; }
+
+size_t SkImage_GpuYUVA::onTextureSize() const {
+ size_t size = 0;
+ for (int i = 0; i < fYUVAProxies.numPlanes(); ++i) {
+ size += fYUVAProxies.proxy(i)->gpuMemorySize();
+ }
+ return size;
+}
+
+sk_sp<SkImage> SkImage_GpuYUVA::onMakeColorTypeAndColorSpace(SkColorType,
+ sk_sp<SkColorSpace> targetCS,
+ GrDirectContext* direct) const {
+ // We explicitly ignore color type changes, for now.
+
+ // we may need a mutex here but for now we expect usage to be in a single thread
+ if (fOnMakeColorSpaceTarget &&
+ SkColorSpace::Equals(targetCS.get(), fOnMakeColorSpaceTarget.get())) {
+ return fOnMakeColorSpaceResult;
+ }
+ sk_sp<SkImage> result = sk_sp<SkImage>(
+ new SkImage_GpuYUVA(sk_ref_sp(direct), this, targetCS, ColorSpaceMode::kConvert));
+ if (result) {
+ fOnMakeColorSpaceTarget = targetCS;
+ fOnMakeColorSpaceResult = result;
+ }
+ return result;
+}
+
+sk_sp<SkImage> SkImage_GpuYUVA::onReinterpretColorSpace(sk_sp<SkColorSpace> newCS) const {
+ return sk_sp<SkImage>(
+ new SkImage_GpuYUVA(fContext, this, std::move(newCS), ColorSpaceMode::kReinterpret));
+}
+
+std::tuple<GrSurfaceProxyView, GrColorType> SkImage_GpuYUVA::onAsView(
+ GrRecordingContext* rContext,
+ GrMipmapped mipmapped,
+ GrImageTexGenPolicy) const {
+ if (!fContext->priv().matches(rContext)) {
+ return {};
+ }
+ auto sfc = rContext->priv().makeSFC(this->imageInfo(),
+ "Image_GpuYUVA_ReinterpretColorSpace",
+ SkBackingFit::kExact,
+ /*sample count*/ 1,
+ mipmapped,
+ GrProtected::kNo,
+ kTopLeft_GrSurfaceOrigin,
+ skgpu::Budgeted::kYes);
+ if (!sfc) {
+ return {};
+ }
+
+ const GrCaps& caps = *rContext->priv().caps();
+ auto fp = GrYUVtoRGBEffect::Make(fYUVAProxies, GrSamplerState::Filter::kNearest, caps);
+ if (fFromColorSpace) {
+ fp = GrColorSpaceXformEffect::Make(std::move(fp),
+ fFromColorSpace.get(), this->alphaType(),
+ this->colorSpace() , this->alphaType());
+ }
+ sfc->fillWithFP(std::move(fp));
+
+ return {sfc->readSurfaceView(), sfc->colorInfo().colorType()};
+}
+
+std::unique_ptr<GrFragmentProcessor> SkImage_GpuYUVA::onAsFragmentProcessor(
+ GrRecordingContext* context,
+ SkSamplingOptions sampling,
+ const SkTileMode tileModes[2],
+ const SkMatrix& m,
+ const SkRect* subset,
+ const SkRect* domain) const {
+ if (!fContext->priv().matches(context)) {
+ return {};
+ }
+ // At least for now we do not attempt aniso filtering on YUVA images.
+ if (sampling.isAniso()) {
+ sampling = SkSamplingPriv::AnisoFallback(fYUVAProxies.mipmapped() == GrMipmapped::kYes);
+ }
+
+ auto wmx = SkTileModeToWrapMode(tileModes[0]);
+ auto wmy = SkTileModeToWrapMode(tileModes[1]);
+ GrSamplerState sampler(wmx, wmy, sampling.filter, sampling.mipmap);
+ if (sampler.mipmapped() == GrMipmapped::kYes && !this->setupMipmapsForPlanes(context)) {
+ sampler = GrSamplerState(sampler.wrapModeX(),
+ sampler.wrapModeY(),
+ sampler.filter(),
+ GrSamplerState::MipmapMode::kNone);
+ }
+
+ const auto& yuvM = sampling.useCubic ? SkMatrix::I() : m;
+ auto fp = GrYUVtoRGBEffect::Make(fYUVAProxies,
+ sampler,
+ *context->priv().caps(),
+ yuvM,
+ subset,
+ domain);
+ if (sampling.useCubic) {
+ fp = GrBicubicEffect::Make(std::move(fp),
+ this->alphaType(),
+ m,
+ sampling.cubic,
+ GrBicubicEffect::Direction::kXY);
+ }
+ if (fFromColorSpace) {
+ fp = GrColorSpaceXformEffect::Make(std::move(fp),
+ fFromColorSpace.get(), this->alphaType(),
+ this->colorSpace() , this->alphaType());
+ }
+ return fp;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+sk_sp<SkImage> SkImage::MakeFromYUVATextures(GrRecordingContext* context,
+ const GrYUVABackendTextures& yuvaTextures) {
+ return SkImage::MakeFromYUVATextures(context, yuvaTextures, nullptr, nullptr, nullptr);
+}
+
+sk_sp<SkImage> SkImage::MakeFromYUVATextures(GrRecordingContext* context,
+ const GrYUVABackendTextures& yuvaTextures,
+ sk_sp<SkColorSpace> imageColorSpace,
+ TextureReleaseProc textureReleaseProc,
+ ReleaseContext releaseContext) {
+ auto releaseHelper = skgpu::RefCntedCallback::Make(textureReleaseProc, releaseContext);
+
+ GrProxyProvider* proxyProvider = context->priv().proxyProvider();
+ int numPlanes = yuvaTextures.yuvaInfo().numPlanes();
+ sk_sp<GrSurfaceProxy> proxies[SkYUVAInfo::kMaxPlanes];
+ for (int plane = 0; plane < numPlanes; ++plane) {
+ proxies[plane] = proxyProvider->wrapBackendTexture(yuvaTextures.texture(plane),
+ kBorrow_GrWrapOwnership,
+ GrWrapCacheable::kNo,
+ kRead_GrIOType,
+ releaseHelper);
+ if (!proxies[plane]) {
+ return {};
+ }
+ }
+ GrYUVATextureProxies yuvaProxies(yuvaTextures.yuvaInfo(),
+ proxies,
+ yuvaTextures.textureOrigin());
+
+ if (!yuvaProxies.isValid()) {
+ return nullptr;
+ }
+
+ return sk_make_sp<SkImage_GpuYUVA>(sk_ref_sp(context),
+ kNeedNewImageUniqueID,
+ yuvaProxies,
+ imageColorSpace);
+}
+
+sk_sp<SkImage> SkImage::MakeFromYUVAPixmaps(GrRecordingContext* context,
+ const SkYUVAPixmaps& pixmaps,
+ GrMipmapped buildMips,
+ bool limitToMaxTextureSize) {
+ return SkImage::MakeFromYUVAPixmaps(context, pixmaps, buildMips, limitToMaxTextureSize,
+ nullptr);
+}
+
+sk_sp<SkImage> SkImage::MakeFromYUVAPixmaps(GrRecordingContext* context,
+ const SkYUVAPixmaps& pixmaps,
+ GrMipmapped buildMips,
+ bool limitToMaxTextureSize,
+ sk_sp<SkColorSpace> imageColorSpace) {
+ if (!context) {
+ return nullptr; // until we impl this for raster backend
+ }
+
+ if (!pixmaps.isValid()) {
+ return nullptr;
+ }
+
+ if (!context->priv().caps()->mipmapSupport()) {
+ buildMips = GrMipmapped::kNo;
+ }
+
+ // Resize the pixmaps if necessary.
+ int numPlanes = pixmaps.numPlanes();
+ int maxTextureSize = context->priv().caps()->maxTextureSize();
+ int maxDim = std::max(pixmaps.yuvaInfo().width(), pixmaps.yuvaInfo().height());
+
+ SkYUVAPixmaps tempPixmaps;
+ const SkYUVAPixmaps* pixmapsToUpload = &pixmaps;
+ // We assume no plane is larger than the image size (and at least one plane is as big).
+ if (maxDim > maxTextureSize) {
+ if (!limitToMaxTextureSize) {
+ return nullptr;
+ }
+ float scale = static_cast<float>(maxTextureSize)/maxDim;
+ SkISize newDimensions = {
+ std::min(static_cast<int>(pixmaps.yuvaInfo().width() *scale), maxTextureSize),
+ std::min(static_cast<int>(pixmaps.yuvaInfo().height()*scale), maxTextureSize)
+ };
+ SkYUVAInfo newInfo = pixmaps.yuvaInfo().makeDimensions(newDimensions);
+ SkYUVAPixmapInfo newPixmapInfo(newInfo, pixmaps.dataType(), /*row bytes*/ nullptr);
+ tempPixmaps = SkYUVAPixmaps::Allocate(newPixmapInfo);
+ SkSamplingOptions sampling(SkFilterMode::kLinear);
+ if (!tempPixmaps.isValid()) {
+ return nullptr;
+ }
+ for (int i = 0; i < numPlanes; ++i) {
+ if (!pixmaps.plane(i).scalePixels(tempPixmaps.plane(i), sampling)) {
+ return nullptr;
+ }
+ }
+ pixmapsToUpload = &tempPixmaps;
+ }
+
+ // Convert to texture proxies.
+ GrSurfaceProxyView views[SkYUVAInfo::kMaxPlanes];
+ GrColorType pixmapColorTypes[SkYUVAInfo::kMaxPlanes];
+ for (int i = 0; i < numPlanes; ++i) {
+ // Turn the pixmap into a GrTextureProxy
+ SkBitmap bmp;
+ bmp.installPixels(pixmapsToUpload->plane(i));
+ std::tie(views[i], std::ignore) = GrMakeUncachedBitmapProxyView(context, bmp, buildMips);
+ if (!views[i]) {
+ return nullptr;
+ }
+ pixmapColorTypes[i] = SkColorTypeToGrColorType(bmp.colorType());
+ }
+
+ GrYUVATextureProxies yuvaProxies(pixmapsToUpload->yuvaInfo(), views, pixmapColorTypes);
+ SkASSERT(yuvaProxies.isValid());
+ return sk_make_sp<SkImage_GpuYUVA>(sk_ref_sp(context),
+ kNeedNewImageUniqueID,
+ std::move(yuvaProxies),
+ std::move(imageColorSpace));
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImage> SkImage::MakePromiseYUVATexture(sk_sp<GrContextThreadSafeProxy> threadSafeProxy,
+ const GrYUVABackendTextureInfo& backendTextureInfo,
+ sk_sp<SkColorSpace> imageColorSpace,
+ PromiseImageTextureFulfillProc textureFulfillProc,
+ PromiseImageTextureReleaseProc textureReleaseProc,
+ PromiseImageTextureContext textureContexts[]) {
+ if (!backendTextureInfo.isValid()) {
+ return nullptr;
+ }
+
+ SkISize planeDimensions[SkYUVAInfo::kMaxPlanes];
+ int n = backendTextureInfo.yuvaInfo().planeDimensions(planeDimensions);
+
+ // Our contract is that we will always call the release proc even on failure.
+ // We use the helper to convey the context, so we need to ensure make doesn't fail.
+ textureReleaseProc = textureReleaseProc ? textureReleaseProc : [](void*) {};
+ sk_sp<skgpu::RefCntedCallback> releaseHelpers[4];
+ for (int i = 0; i < n; ++i) {
+ releaseHelpers[i] = skgpu::RefCntedCallback::Make(textureReleaseProc, textureContexts[i]);
+ }
+
+ if (!threadSafeProxy) {
+ return nullptr;
+ }
+
+ SkAlphaType at = backendTextureInfo.yuvaInfo().hasAlpha() ? kPremul_SkAlphaType
+ : kOpaque_SkAlphaType;
+ SkImageInfo info = SkImageInfo::Make(backendTextureInfo.yuvaInfo().dimensions(),
+ kAssumedColorType, at, imageColorSpace);
+ if (!SkImageInfoIsValid(info)) {
+ return nullptr;
+ }
+
+ // Make a lazy proxy for each plane and wrap in a view.
+ sk_sp<GrSurfaceProxy> proxies[4];
+ for (int i = 0; i < n; ++i) {
+ proxies[i] = SkImage_GpuBase::MakePromiseImageLazyProxy(threadSafeProxy.get(),
+ planeDimensions[i],
+ backendTextureInfo.planeFormat(i),
+ GrMipmapped::kNo,
+ textureFulfillProc,
+ std::move(releaseHelpers[i]));
+ if (!proxies[i]) {
+ return nullptr;
+ }
+ }
+ GrYUVATextureProxies yuvaTextureProxies(backendTextureInfo.yuvaInfo(),
+ proxies,
+ backendTextureInfo.textureOrigin());
+ SkASSERT(yuvaTextureProxies.isValid());
+ sk_sp<GrImageContext> ctx(GrImageContextPriv::MakeForPromiseImage(std::move(threadSafeProxy)));
+ return sk_make_sp<SkImage_GpuYUVA>(std::move(ctx),
+ kNeedNewImageUniqueID,
+ std::move(yuvaTextureProxies),
+ std::move(imageColorSpace));
+}
diff --git a/gfx/skia/skia/src/image/SkImage_GpuYUVA.h b/gfx/skia/skia/src/image/SkImage_GpuYUVA.h
new file mode 100644
index 0000000000..c71a8b5a0e
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_GpuYUVA.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImage_GpuYUVA_DEFINED
+#define SkImage_GpuYUVA_DEFINED
+
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "src/gpu/ganesh/GrYUVATextureProxies.h"
+#include "src/image/SkImage_Base.h"
+#include "src/image/SkImage_GpuBase.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <tuple>
+
+class GrDirectContext;
+class GrFragmentProcessor;
+class GrImageContext;
+class GrRecordingContext;
+class GrSurfaceProxyView;
+class SkMatrix;
+enum SkColorType : int;
+enum class GrColorType;
+enum class GrImageTexGenPolicy : int;
+enum class GrSemaphoresSubmitted : bool;
+enum class SkTileMode;
+struct GrFlushInfo;
+struct SkRect;
+
+namespace skgpu {
+enum class Mipmapped : bool;
+}
+
+// Wraps the 1 to 4 planes of a YUVA image for consumption by the GPU.
+// Initially any direct rendering will be done by passing the individual planes to a shader.
+// Once any method requests a flattened image (e.g., onReadPixels), the flattened RGB
+// proxy will be stored and used for any future rendering.
+class SkImage_GpuYUVA final : public SkImage_GpuBase {
+public:
+ SkImage_GpuYUVA(sk_sp<GrImageContext>,
+ uint32_t uniqueID,
+ GrYUVATextureProxies proxies,
+ sk_sp<SkColorSpace>);
+
+ bool onHasMipmaps() const override;
+
+ GrSemaphoresSubmitted onFlush(GrDirectContext*, const GrFlushInfo&) const override;
+
+ SkImage_Base::Type type() const override { return SkImage_Base::Type::kGaneshYUVA; }
+
+ size_t onTextureSize() const override;
+
+ using SkImage_GpuBase::onMakeColorTypeAndColorSpace;
+ sk_sp<SkImage> onMakeColorTypeAndColorSpace(SkColorType, sk_sp<SkColorSpace>,
+ GrDirectContext*) const final;
+
+ sk_sp<SkImage> onReinterpretColorSpace(sk_sp<SkColorSpace>) const final;
+
+ bool setupMipmapsForPlanes(GrRecordingContext*) const;
+
+private:
+ enum class ColorSpaceMode {
+ kConvert,
+ kReinterpret,
+ };
+ SkImage_GpuYUVA(sk_sp<GrImageContext>,
+ const SkImage_GpuYUVA* image,
+ sk_sp<SkColorSpace> targetCS,
+ ColorSpaceMode csMode);
+
+ std::tuple<GrSurfaceProxyView, GrColorType> onAsView(GrRecordingContext*,
+ skgpu::Mipmapped,
+ GrImageTexGenPolicy) const override;
+
+ std::unique_ptr<GrFragmentProcessor> onAsFragmentProcessor(GrRecordingContext*,
+ SkSamplingOptions,
+ const SkTileMode[2],
+ const SkMatrix&,
+ const SkRect*,
+ const SkRect*) const override;
+
+ mutable GrYUVATextureProxies fYUVAProxies;
+
+ // If this is non-null then the planar data should be converted from fFromColorSpace to
+ // this->colorSpace(). Otherwise we assume the planar data (post YUV->RGB conversion) is already
+ // in this->colorSpace().
+ const sk_sp<SkColorSpace> fFromColorSpace;
+
+ // Repeated calls to onMakeColorSpace will result in a proliferation of unique IDs and
+ // SkImage_GpuYUVA instances. Cache the result of the last successful onMakeColorSpace call.
+ mutable sk_sp<SkColorSpace> fOnMakeColorSpaceTarget;
+ mutable sk_sp<SkImage> fOnMakeColorSpaceResult;
+
+ using INHERITED = SkImage_GpuBase;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkImage_Lazy.cpp b/gfx/skia/skia/src/image/SkImage_Lazy.cpp
new file mode 100644
index 0000000000..6597795462
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_Lazy.cpp
@@ -0,0 +1,689 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/image/SkImage_Lazy.h"
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImageGenerator.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkYUVAInfo.h"
+#include "src/core/SkBitmapCache.h"
+#include "src/core/SkCachedData.h"
+#include "src/core/SkNextID.h"
+#include "src/core/SkResourceCache.h"
+#include "src/core/SkYUVPlanesCache.h"
+
+#if defined(SK_GANESH)
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrContextOptions.h"
+#include "include/gpu/GrDirectContext.h"
+#include "include/gpu/GrRecordingContext.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/gpu/ganesh/GrTypesPriv.h"
+#include "src/gpu/ResourceKey.h"
+#include "src/gpu/SkBackingFit.h"
+#include "src/gpu/Swizzle.h"
+#include "src/gpu/ganesh/GrCaps.h"
+#include "src/gpu/ganesh/GrColorInfo.h"
+#include "src/gpu/ganesh/GrColorSpaceXform.h"
+#include "src/gpu/ganesh/GrDirectContextPriv.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/GrImageInfo.h"
+#include "src/gpu/ganesh/GrProxyProvider.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/GrSamplerState.h"
+#include "src/gpu/ganesh/GrSurfaceProxy.h"
+#include "src/gpu/ganesh/GrSurfaceProxyView.h"
+#include "src/gpu/ganesh/GrTextureProxy.h"
+#include "src/gpu/ganesh/GrYUVATextureProxies.h"
+#include "src/gpu/ganesh/SkGr.h"
+#include "src/gpu/ganesh/SurfaceContext.h"
+#include "src/gpu/ganesh/SurfaceFillContext.h"
+#include "src/gpu/ganesh/effects/GrYUVtoRGBEffect.h"
+#endif
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/TextureUtils.h"
+#endif
+
+#include <utility>
+
+class SkMatrix;
+enum SkColorType : int;
+enum class SkTileMode;
+
+// Ref-counted tuple(SkImageGenerator, SkMutex) which allows sharing one generator among N images
+class SharedGenerator final : public SkNVRefCnt<SharedGenerator> {
+public:
+ static sk_sp<SharedGenerator> Make(std::unique_ptr<SkImageGenerator> gen) {
+ return gen ? sk_sp<SharedGenerator>(new SharedGenerator(std::move(gen))) : nullptr;
+ }
+
+ // This is thread safe. It is a const field set in the constructor.
+ const SkImageInfo& getInfo() { return fGenerator->getInfo(); }
+
+private:
+ explicit SharedGenerator(std::unique_ptr<SkImageGenerator> gen)
+ : fGenerator(std::move(gen)) {
+ SkASSERT(fGenerator);
+ }
+
+ friend class ScopedGenerator;
+ friend class SkImage_Lazy;
+
+ std::unique_ptr<SkImageGenerator> fGenerator;
+ SkMutex fMutex;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkImage_Lazy::Validator::Validator(sk_sp<SharedGenerator> gen, const SkColorType* colorType,
+ sk_sp<SkColorSpace> colorSpace)
+ : fSharedGenerator(std::move(gen)) {
+ if (!fSharedGenerator) {
+ return;
+ }
+
+ // The following generator accessors are safe without acquiring the mutex (const getters).
+ // TODO: refactor to use a ScopedGenerator instead, for clarity.
+ fInfo = fSharedGenerator->fGenerator->getInfo();
+ if (fInfo.isEmpty()) {
+ fSharedGenerator.reset();
+ return;
+ }
+
+ fUniqueID = fSharedGenerator->fGenerator->uniqueID();
+
+ if (colorType && (*colorType == fInfo.colorType())) {
+ colorType = nullptr;
+ }
+
+ if (colorType || colorSpace) {
+ if (colorType) {
+ fInfo = fInfo.makeColorType(*colorType);
+ }
+ if (colorSpace) {
+ fInfo = fInfo.makeColorSpace(colorSpace);
+ }
+ fUniqueID = SkNextID::ImageID();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Helper for exclusive access to a shared generator.
+class SkImage_Lazy::ScopedGenerator {
+public:
+ ScopedGenerator(const sk_sp<SharedGenerator>& gen)
+ : fSharedGenerator(gen)
+ , fAutoAquire(gen->fMutex) {}
+
+ SkImageGenerator* operator->() const {
+ fSharedGenerator->fMutex.assertHeld();
+ return fSharedGenerator->fGenerator.get();
+ }
+
+ operator SkImageGenerator*() const {
+ fSharedGenerator->fMutex.assertHeld();
+ return fSharedGenerator->fGenerator.get();
+ }
+
+private:
+ const sk_sp<SharedGenerator>& fSharedGenerator;
+ SkAutoMutexExclusive fAutoAquire;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkImage_Lazy::SkImage_Lazy(Validator* validator)
+ : SkImage_Base(validator->fInfo, validator->fUniqueID)
+ , fSharedGenerator(std::move(validator->fSharedGenerator))
+{
+ SkASSERT(fSharedGenerator);
+}
+
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkImage_Lazy::getROPixels(GrDirectContext* ctx, SkBitmap* bitmap,
+ SkImage::CachingHint chint) const {
+ auto check_output_bitmap = [bitmap]() {
+ SkASSERT(bitmap->isImmutable());
+ SkASSERT(bitmap->getPixels());
+ (void)bitmap;
+ };
+
+ auto desc = SkBitmapCacheDesc::Make(this);
+ if (SkBitmapCache::Find(desc, bitmap)) {
+ check_output_bitmap();
+ return true;
+ }
+
+ if (SkImage::kAllow_CachingHint == chint) {
+ SkPixmap pmap;
+ SkBitmapCache::RecPtr cacheRec = SkBitmapCache::Alloc(desc, this->imageInfo(), &pmap);
+ if (!cacheRec) {
+ return false;
+ }
+ bool success = false;
+ { // make sure ScopedGenerator goes out of scope before we try readPixelsProxy
+ success = ScopedGenerator(fSharedGenerator)->getPixels(pmap);
+ }
+ if (!success && !this->readPixelsProxy(ctx, pmap)) {
+ return false;
+ }
+ SkBitmapCache::Add(std::move(cacheRec), bitmap);
+ this->notifyAddedToRasterCache();
+ } else {
+ if (!bitmap->tryAllocPixels(this->imageInfo())) {
+ return false;
+ }
+ bool success = false;
+ { // make sure ScopedGenerator goes out of scope before we try readPixelsProxy
+ success = ScopedGenerator(fSharedGenerator)->getPixels(bitmap->pixmap());
+ }
+ if (!success && !this->readPixelsProxy(ctx, bitmap->pixmap())) {
+ return false;
+ }
+ bitmap->setImmutable();
+ }
+ check_output_bitmap();
+ return true;
+}
+
+bool SkImage_Lazy::readPixelsProxy(GrDirectContext* ctx, const SkPixmap& pixmap) const {
+#if defined(SK_GANESH)
+ if (!ctx) {
+ return false;
+ }
+ GrSurfaceProxyView view = this->lockTextureProxyView(ctx,
+ GrImageTexGenPolicy::kDraw,
+ GrMipmapped::kNo);
+
+ if (!view) {
+ return false;
+ }
+
+ GrColorType ct = this->colorTypeOfLockTextureProxy(ctx->priv().caps());
+ GrColorInfo colorInfo(ct, this->alphaType(), this->refColorSpace());
+ auto sContext = ctx->priv().makeSC(std::move(view), colorInfo);
+ if (!sContext) {
+ return false;
+ }
+ size_t rowBytes = this->imageInfo().minRowBytes();
+ return sContext->readPixels(ctx, {this->imageInfo(), pixmap.writable_addr(), rowBytes}, {0, 0});
+#else
+ return false;
+#endif // defined(SK_GANESH)
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool SkImage_Lazy::onReadPixels(GrDirectContext* dContext,
+ const SkImageInfo& dstInfo,
+ void* dstPixels,
+ size_t dstRB,
+ int srcX,
+ int srcY,
+ CachingHint chint) const {
+ SkBitmap bm;
+ if (this->getROPixels(dContext, &bm, chint)) {
+ return bm.readPixels(dstInfo, dstPixels, dstRB, srcX, srcY);
+ }
+ return false;
+}
+
+sk_sp<SkData> SkImage_Lazy::onRefEncoded() const {
+ // check that we aren't a subset or colortype/etc modification of the original
+ if (fSharedGenerator->fGenerator->uniqueID() == this->uniqueID()) {
+ ScopedGenerator generator(fSharedGenerator);
+ return generator->refEncodedData();
+ }
+ return nullptr;
+}
+
+bool SkImage_Lazy::onIsValid(GrRecordingContext* context) const {
+ ScopedGenerator generator(fSharedGenerator);
+ return generator->isValid(context);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImage> SkImage_Lazy::onMakeSubset(const SkIRect& subset, GrDirectContext* direct) const {
+ // TODO: can we do this more efficiently, by telling the generator we want to
+ // "realize" a subset?
+
+#if defined(SK_GANESH)
+ auto pixels = direct ? this->makeTextureImage(direct)
+ : this->makeRasterImage();
+#else
+ auto pixels = this->makeRasterImage();
+#endif
+ return pixels ? pixels->makeSubset(subset, direct) : nullptr;
+}
+
+#if defined(SK_GRAPHITE)
+
+sk_sp<SkImage> SkImage_Lazy::onMakeSubset(const SkIRect& subset,
+ skgpu::graphite::Recorder* recorder,
+ RequiredImageProperties requiredProperties) const {
+ // TODO: can we do this more efficiently, by telling the generator we want to
+ // "realize" a subset?
+
+ sk_sp<SkImage> nonLazyImg = recorder ? this->makeTextureImage(recorder, requiredProperties)
+ : this->makeRasterImage();
+
+ return nonLazyImg ? nonLazyImg->makeSubset(subset, recorder, requiredProperties) : nullptr;
+}
+
+#endif // SK_GRAPHITE
+
+sk_sp<SkImage> SkImage_Lazy::onMakeColorTypeAndColorSpace(SkColorType targetCT,
+ sk_sp<SkColorSpace> targetCS,
+ GrDirectContext*) const {
+ SkAutoMutexExclusive autoAquire(fOnMakeColorTypeAndSpaceMutex);
+ if (fOnMakeColorTypeAndSpaceResult &&
+ targetCT == fOnMakeColorTypeAndSpaceResult->colorType() &&
+ SkColorSpace::Equals(targetCS.get(), fOnMakeColorTypeAndSpaceResult->colorSpace())) {
+ return fOnMakeColorTypeAndSpaceResult;
+ }
+ Validator validator(fSharedGenerator, &targetCT, targetCS);
+ sk_sp<SkImage> result = validator ? sk_sp<SkImage>(new SkImage_Lazy(&validator)) : nullptr;
+ if (result) {
+ fOnMakeColorTypeAndSpaceResult = result;
+ }
+ return result;
+}
+
+sk_sp<SkImage> SkImage_Lazy::onReinterpretColorSpace(sk_sp<SkColorSpace> newCS) const {
+ // TODO: The correct thing is to clone the generator, and modify its color space. That's hard,
+ // because we don't have a clone method, and generator is public (and derived-from by clients).
+ // So do the simple/inefficient thing here, and fallback to raster when this is called.
+
+ // We allocate the bitmap with the new color space, then generate the image using the original.
+ SkBitmap bitmap;
+ if (bitmap.tryAllocPixels(this->imageInfo().makeColorSpace(std::move(newCS)))) {
+ SkPixmap pixmap = bitmap.pixmap();
+ pixmap.setColorSpace(this->refColorSpace());
+ if (ScopedGenerator(fSharedGenerator)->getPixels(pixmap)) {
+ bitmap.setImmutable();
+ return bitmap.asImage();
+ }
+ }
+ return nullptr;
+}
+
+sk_sp<SkImage> SkImage::MakeFromGenerator(std::unique_ptr<SkImageGenerator> generator) {
+ SkImage_Lazy::Validator
+ validator(SharedGenerator::Make(std::move(generator)), nullptr, nullptr);
+
+ return validator ? sk_make_sp<SkImage_Lazy>(&validator) : nullptr;
+}
+
+#if defined(SK_GANESH)
+
+std::tuple<GrSurfaceProxyView, GrColorType> SkImage_Lazy::onAsView(
+ GrRecordingContext* context,
+ GrMipmapped mipmapped,
+ GrImageTexGenPolicy policy) const {
+ GrColorType ct = this->colorTypeOfLockTextureProxy(context->priv().caps());
+ return {this->lockTextureProxyView(context, policy, mipmapped), ct};
+}
+
+std::unique_ptr<GrFragmentProcessor> SkImage_Lazy::onAsFragmentProcessor(
+ GrRecordingContext* rContext,
+ SkSamplingOptions sampling,
+ const SkTileMode tileModes[2],
+ const SkMatrix& m,
+ const SkRect* subset,
+ const SkRect* domain) const {
+ // TODO: If the CPU data is extracted as planes return a FP that reconstructs the image from
+ // the planes.
+ auto mm = sampling.mipmap == SkMipmapMode::kNone ? GrMipmapped::kNo : GrMipmapped::kYes;
+ return MakeFragmentProcessorFromView(rContext,
+ std::get<0>(this->asView(rContext, mm)),
+ this->alphaType(),
+ sampling,
+ tileModes,
+ m,
+ subset,
+ domain);
+}
+
+GrSurfaceProxyView SkImage_Lazy::textureProxyViewFromPlanes(GrRecordingContext* ctx,
+ skgpu::Budgeted budgeted) const {
+ SkYUVAPixmapInfo::SupportedDataTypes supportedDataTypes(*ctx);
+ SkYUVAPixmaps yuvaPixmaps;
+ sk_sp<SkCachedData> dataStorage = this->getPlanes(supportedDataTypes, &yuvaPixmaps);
+ if (!dataStorage) {
+ return {};
+ }
+
+ GrSurfaceProxyView views[SkYUVAInfo::kMaxPlanes];
+ GrColorType pixmapColorTypes[SkYUVAInfo::kMaxPlanes];
+ for (int i = 0; i < yuvaPixmaps.numPlanes(); ++i) {
+ // If the sizes of the components are not all the same we choose to create exact-match
+ // textures for the smaller ones rather than add a texture domain to the draw.
+ // TODO: revisit this decision to improve texture reuse?
+ SkBackingFit fit = yuvaPixmaps.plane(i).dimensions() == this->dimensions()
+ ? SkBackingFit::kApprox
+ : SkBackingFit::kExact;
+
+ // We grab a ref to cached yuv data. When the SkBitmap we create below goes away it will
+ // call releaseProc which will release this ref.
+ // DDL TODO: Currently we end up creating a lazy proxy that will hold onto a ref to the
+ // SkImage in its lambda. This means that we'll keep the ref on the YUV data around for the
+ // life time of the proxy and not just upload. For non-DDL draws we should look into
+ // releasing this SkImage after uploads (by deleting the lambda after instantiation).
+ auto releaseProc = [](void*, void* data) {
+ auto cachedData = static_cast<SkCachedData*>(data);
+ SkASSERT(cachedData);
+ cachedData->unref();
+ };
+ SkBitmap bitmap;
+ bitmap.installPixels(yuvaPixmaps.plane(i).info(),
+ yuvaPixmaps.plane(i).writable_addr(),
+ yuvaPixmaps.plane(i).rowBytes(),
+ releaseProc,
+ SkRef(dataStorage.get()));
+ bitmap.setImmutable();
+
+ std::tie(views[i], std::ignore) = GrMakeUncachedBitmapProxyView(ctx,
+ bitmap,
+ GrMipmapped::kNo,
+ fit);
+ if (!views[i]) {
+ return {};
+ }
+ pixmapColorTypes[i] = SkColorTypeToGrColorType(bitmap.colorType());
+ }
+
+ // TODO: investigate preallocating mip maps here
+ GrImageInfo info(SkColorTypeToGrColorType(this->colorType()),
+ kPremul_SkAlphaType,
+ /*color space*/ nullptr,
+ this->dimensions());
+
+ auto sfc = ctx->priv().makeSFC(info,
+ "ImageLazy_TextureProxyViewFromPlanes",
+ SkBackingFit::kExact,
+ 1,
+ GrMipmapped::kNo,
+ GrProtected::kNo,
+ kTopLeft_GrSurfaceOrigin,
+ budgeted);
+ if (!sfc) {
+ return {};
+ }
+
+ GrYUVATextureProxies yuvaProxies(yuvaPixmaps.yuvaInfo(), views, pixmapColorTypes);
+ SkAssertResult(yuvaProxies.isValid());
+
+ std::unique_ptr<GrFragmentProcessor> fp = GrYUVtoRGBEffect::Make(
+ yuvaProxies,
+ GrSamplerState::Filter::kNearest,
+ *ctx->priv().caps());
+
+ // The pixels after yuv->rgb will be in the generator's color space.
+ // If onMakeColorTypeAndColorSpace has been called then this will not match this image's
+ // color space. To correct this, apply a color space conversion from the generator's color
+ // space to this image's color space.
+ SkColorSpace* srcColorSpace;
+ {
+ ScopedGenerator generator(fSharedGenerator);
+ srcColorSpace = generator->getInfo().colorSpace();
+ }
+ SkColorSpace* dstColorSpace = this->colorSpace();
+
+ // If the caller expects the pixels in a different color space than the one from the image,
+ // apply a color conversion to do this.
+ fp = GrColorSpaceXformEffect::Make(std::move(fp),
+ srcColorSpace, kOpaque_SkAlphaType,
+ dstColorSpace, kOpaque_SkAlphaType);
+ sfc->fillWithFP(std::move(fp));
+
+ return sfc->readSurfaceView();
+}
+
+sk_sp<SkCachedData> SkImage_Lazy::getPlanes(
+ const SkYUVAPixmapInfo::SupportedDataTypes& supportedDataTypes,
+ SkYUVAPixmaps* yuvaPixmaps) const {
+ ScopedGenerator generator(fSharedGenerator);
+
+ sk_sp<SkCachedData> data(SkYUVPlanesCache::FindAndRef(generator->uniqueID(), yuvaPixmaps));
+
+ if (data) {
+ SkASSERT(yuvaPixmaps->isValid());
+ SkASSERT(yuvaPixmaps->yuvaInfo().dimensions() == this->dimensions());
+ return data;
+ }
+ SkYUVAPixmapInfo yuvaPixmapInfo;
+ if (!generator->queryYUVAInfo(supportedDataTypes, &yuvaPixmapInfo) ||
+ yuvaPixmapInfo.yuvaInfo().dimensions() != this->dimensions()) {
+ return nullptr;
+ }
+ data.reset(SkResourceCache::NewCachedData(yuvaPixmapInfo.computeTotalBytes()));
+ SkYUVAPixmaps tempPixmaps = SkYUVAPixmaps::FromExternalMemory(yuvaPixmapInfo,
+ data->writable_data());
+ SkASSERT(tempPixmaps.isValid());
+ if (!generator->getYUVAPlanes(tempPixmaps)) {
+ return nullptr;
+ }
+ // Decoding is done, cache the resulting YUV planes
+ *yuvaPixmaps = tempPixmaps;
+ SkYUVPlanesCache::Add(this->uniqueID(), data.get(), *yuvaPixmaps);
+ return data;
+}
+
+/*
+ * We have 4 ways to try to return a texture (in sorted order)
+ *
+ * 1. Check the cache for a pre-existing one
+ * 2. Ask the generator to natively create one
+ * 3. Ask the generator to return YUV planes, which the GPU can convert
+ * 4. Ask the generator to return RGB(A) data, which the GPU can convert
+ */
+GrSurfaceProxyView SkImage_Lazy::lockTextureProxyView(GrRecordingContext* rContext,
+ GrImageTexGenPolicy texGenPolicy,
+ GrMipmapped mipmapped) const {
+ // Values representing the various texture lock paths we can take. Used for logging the path
+ // taken to a histogram.
+ enum LockTexturePath {
+ kFailure_LockTexturePath,
+ kPreExisting_LockTexturePath,
+ kNative_LockTexturePath,
+ kCompressed_LockTexturePath, // Deprecated
+ kYUV_LockTexturePath,
+ kRGBA_LockTexturePath,
+ };
+
+ enum { kLockTexturePathCount = kRGBA_LockTexturePath + 1 };
+
+ skgpu::UniqueKey key;
+ if (texGenPolicy == GrImageTexGenPolicy::kDraw) {
+ GrMakeKeyFromImageID(&key, this->uniqueID(), SkIRect::MakeSize(this->dimensions()));
+ }
+
+ const GrCaps* caps = rContext->priv().caps();
+ GrProxyProvider* proxyProvider = rContext->priv().proxyProvider();
+
+ auto installKey = [&](const GrSurfaceProxyView& view) {
+ SkASSERT(view && view.asTextureProxy());
+ if (key.isValid()) {
+ auto listener = GrMakeUniqueKeyInvalidationListener(&key, rContext->priv().contextID());
+ this->addUniqueIDListener(std::move(listener));
+ proxyProvider->assignUniqueKeyToProxy(key, view.asTextureProxy());
+ }
+ };
+
+ auto ct = this->colorTypeOfLockTextureProxy(caps);
+
+ // 1. Check the cache for a pre-existing one.
+ if (key.isValid()) {
+ auto proxy = proxyProvider->findOrCreateProxyByUniqueKey(key);
+ if (proxy) {
+ skgpu::Swizzle swizzle = caps->getReadSwizzle(proxy->backendFormat(), ct);
+ GrSurfaceOrigin origin = ScopedGenerator(fSharedGenerator)->origin();
+ GrSurfaceProxyView view(std::move(proxy), origin, swizzle);
+ if (mipmapped == GrMipmapped::kNo ||
+ view.asTextureProxy()->mipmapped() == GrMipmapped::kYes) {
+ return view;
+ } else {
+ // We need a mipped proxy, but we found a cached proxy that wasn't mipped. Thus we
+ // generate a new mipped surface and copy the original proxy into the base layer. We
+ // will then let the gpu generate the rest of the mips.
+ auto mippedView = GrCopyBaseMipMapToView(rContext, view);
+ if (!mippedView) {
+ // We failed to make a mipped proxy with the base copied into it. This could
+ // have been from failure to make the proxy or failure to do the copy. Thus we
+ // will fall back to just using the non mipped proxy; See skbug.com/7094.
+ return view;
+ }
+ proxyProvider->removeUniqueKeyFromProxy(view.asTextureProxy());
+ installKey(mippedView);
+ return mippedView;
+ }
+ }
+ }
+
+ // 2. Ask the generator to natively create one.
+ {
+ ScopedGenerator generator(fSharedGenerator);
+ if (auto view = generator->generateTexture(rContext,
+ this->imageInfo(),
+ mipmapped,
+ texGenPolicy)) {
+ installKey(view);
+ return view;
+ }
+ }
+
+ // 3. Ask the generator to return YUV planes, which the GPU can convert. If we will be mipping
+ // the texture we skip this step so the CPU generate non-planar MIP maps for us.
+ if (mipmapped == GrMipmapped::kNo && !rContext->priv().options().fDisableGpuYUVConversion) {
+ // TODO: Update to create the mipped surface in the textureProxyViewFromPlanes generator and
+ // draw the base layer directly into the mipped surface.
+ skgpu::Budgeted budgeted = texGenPolicy == GrImageTexGenPolicy::kNew_Uncached_Unbudgeted
+ ? skgpu::Budgeted::kNo
+ : skgpu::Budgeted::kYes;
+ auto view = this->textureProxyViewFromPlanes(rContext, budgeted);
+ if (view) {
+ installKey(view);
+ return view;
+ }
+ }
+
+ // 4. Ask the generator to return a bitmap, which the GPU can convert.
+ auto hint = texGenPolicy == GrImageTexGenPolicy::kDraw ? CachingHint::kAllow_CachingHint
+ : CachingHint::kDisallow_CachingHint;
+ if (SkBitmap bitmap; this->getROPixels(nullptr, &bitmap, hint)) {
+ // We always make an uncached bitmap here because we will cache it based on passed in policy
+ // with *our* key, not a key derived from bitmap. We're just making the proxy here.
+ auto budgeted = texGenPolicy == GrImageTexGenPolicy::kNew_Uncached_Unbudgeted
+ ? skgpu::Budgeted::kNo
+ : skgpu::Budgeted::kYes;
+ auto view = std::get<0>(GrMakeUncachedBitmapProxyView(rContext,
+ bitmap,
+ mipmapped,
+ SkBackingFit::kExact,
+ budgeted));
+ if (view) {
+ installKey(view);
+ return view;
+ }
+ }
+
+ return {};
+}
+
+GrColorType SkImage_Lazy::colorTypeOfLockTextureProxy(const GrCaps* caps) const {
+ GrColorType ct = SkColorTypeToGrColorType(this->colorType());
+ GrBackendFormat format = caps->getDefaultBackendFormat(ct, GrRenderable::kNo);
+ if (!format.isValid()) {
+ ct = GrColorType::kRGBA_8888;
+ }
+ return ct;
+}
+
+void SkImage_Lazy::addUniqueIDListener(sk_sp<SkIDChangeListener> listener) const {
+ fUniqueIDListeners.add(std::move(listener));
+}
+#endif // defined(SK_GANESH)
+
+#if defined(SK_GRAPHITE)
+
+/*
+ * We only have 2 ways to create a Graphite-backed image.
+ *
+ * 1. Ask the generator to natively create one
+ * 2. Ask the generator to return RGB(A) data, which the GPU can convert
+ */
+sk_sp<SkImage> SkImage_Lazy::onMakeTextureImage(skgpu::graphite::Recorder* recorder,
+ RequiredImageProperties requiredProps) const {
+ using namespace skgpu::graphite;
+
+ // 1. Ask the generator to natively create one.
+ {
+ // Disable mipmaps here bc Graphite doesn't currently support mipmap regeneration
+ // In this case, we would allocate the mipmaps and fill in the base layer but the mipmap
+ // levels would never be filled out - yielding incorrect draws. Please see: b/238754357.
+ requiredProps.fMipmapped = skgpu::Mipmapped::kNo;
+
+ ScopedGenerator generator(fSharedGenerator);
+ sk_sp<SkImage> newImage = generator->makeTextureImage(recorder,
+ this->imageInfo(),
+ requiredProps.fMipmapped);
+ if (newImage) {
+ SkASSERT(as_IB(newImage)->isGraphiteBacked());
+ return newImage;
+ }
+ }
+
+ // 2. Ask the generator to return a bitmap, which the GPU can convert.
+ if (SkBitmap bitmap; this->getROPixels(nullptr, &bitmap, CachingHint::kDisallow_CachingHint)) {
+ return skgpu::graphite::MakeFromBitmap(recorder,
+ this->imageInfo().colorInfo(),
+ bitmap,
+ nullptr,
+ skgpu::Budgeted::kNo,
+ requiredProps);
+ }
+
+ return nullptr;
+}
+
+sk_sp<SkImage> SkImage_Lazy::onMakeColorTypeAndColorSpace(
+ SkColorType targetCT,
+ sk_sp<SkColorSpace> targetCS,
+ skgpu::graphite::Recorder* recorder,
+ RequiredImageProperties requiredProps) const {
+ SkAutoMutexExclusive autoAquire(fOnMakeColorTypeAndSpaceMutex);
+ if (fOnMakeColorTypeAndSpaceResult &&
+ targetCT == fOnMakeColorTypeAndSpaceResult->colorType() &&
+ SkColorSpace::Equals(targetCS.get(), fOnMakeColorTypeAndSpaceResult->colorSpace())) {
+ return fOnMakeColorTypeAndSpaceResult;
+ }
+ Validator validator(fSharedGenerator, &targetCT, targetCS);
+ sk_sp<SkImage> result = validator ? sk_sp<SkImage>(new SkImage_Lazy(&validator)) : nullptr;
+ if (result) {
+ fOnMakeColorTypeAndSpaceResult = result;
+ }
+
+ if (recorder) {
+ return result->makeTextureImage(recorder, requiredProps);
+ } else {
+ return result;
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkImage_Lazy.h b/gfx/skia/skia/src/image/SkImage_Lazy.h
new file mode 100644
index 0000000000..f380d40801
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_Lazy.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImage_Lazy_DEFINED
+#define SkImage_Lazy_DEFINED
+
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkIDChangeListener.h"
+#include "include/private/base/SkMutex.h"
+#include "src/image/SkImage_Base.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <tuple>
+
+#if defined(SK_GANESH)
+#include "include/core/SkYUVAPixmaps.h"
+class GrCaps;
+class GrDirectContext;
+class GrFragmentProcessor;
+class GrRecordingContext;
+class GrSurfaceProxyView;
+#endif
+
+class SharedGenerator;
+class SkBitmap;
+class SkCachedData;
+class SkData;
+class SkMatrix;
+class SkPixmap;
+enum SkColorType : int;
+enum class GrColorType;
+enum class GrImageTexGenPolicy : int;
+enum class SkTileMode;
+struct SkIRect;
+struct SkRect;
+
+namespace skgpu {
+enum class Budgeted : bool;
+enum class Mipmapped : bool;
+}
+
+class SkImage_Lazy : public SkImage_Base {
+public:
+ struct Validator {
+ Validator(sk_sp<SharedGenerator>, const SkColorType*, sk_sp<SkColorSpace>);
+
+ explicit operator bool() const { return fSharedGenerator.get(); }
+
+ sk_sp<SharedGenerator> fSharedGenerator;
+ SkImageInfo fInfo;
+ sk_sp<SkColorSpace> fColorSpace;
+ uint32_t fUniqueID;
+ };
+
+ SkImage_Lazy(Validator* validator);
+
+ bool onHasMipmaps() const override {
+ // TODO: Should we defer to the generator? The generator interface currently doesn't have
+ // a way to provide content for levels other than via SkImageGenerator::generateTexture().
+ return false;
+ }
+
+ bool onReadPixels(GrDirectContext*, const SkImageInfo&, void*, size_t, int srcX, int srcY,
+ CachingHint) const override;
+ sk_sp<SkData> onRefEncoded() const override;
+ sk_sp<SkImage> onMakeSubset(const SkIRect&, GrDirectContext*) const override;
+#if defined(SK_GRAPHITE)
+ sk_sp<SkImage> onMakeSubset(const SkIRect&,
+ skgpu::graphite::Recorder*,
+ RequiredImageProperties) const override;
+ sk_sp<SkImage> onMakeColorTypeAndColorSpace(SkColorType targetCT,
+ sk_sp<SkColorSpace> targetCS,
+ skgpu::graphite::Recorder*,
+ RequiredImageProperties) const override;
+#endif
+ bool getROPixels(GrDirectContext*, SkBitmap*, CachingHint) const override;
+ SkImage_Base::Type type() const override { return SkImage_Base::Type::kLazy; }
+ sk_sp<SkImage> onMakeColorTypeAndColorSpace(SkColorType, sk_sp<SkColorSpace>,
+ GrDirectContext*) const override;
+ sk_sp<SkImage> onReinterpretColorSpace(sk_sp<SkColorSpace>) const final;
+
+ bool onIsValid(GrRecordingContext*) const override;
+
+#if defined(SK_GANESH)
+ // Returns the texture proxy. CachingHint refers to whether the generator's output should be
+ // cached in CPU memory. We will always cache the generated texture on success.
+ GrSurfaceProxyView lockTextureProxyView(GrRecordingContext*,
+ GrImageTexGenPolicy,
+ skgpu::Mipmapped) const;
+
+ // Returns the GrColorType to use with the GrTextureProxy returned from lockTextureProxy. This
+ // may be different from the color type on the image in the case where we need up upload CPU
+ // data to a texture but the GPU doesn't support the format of CPU data. In this case we convert
+ // the data to RGBA_8888 unorm on the CPU then upload that.
+ GrColorType colorTypeOfLockTextureProxy(const GrCaps* caps) const;
+#endif
+
+private:
+ void addUniqueIDListener(sk_sp<SkIDChangeListener>) const;
+ bool readPixelsProxy(GrDirectContext*, const SkPixmap&) const;
+#if defined(SK_GANESH)
+ std::tuple<GrSurfaceProxyView, GrColorType> onAsView(GrRecordingContext*,
+ skgpu::Mipmapped,
+ GrImageTexGenPolicy) const override;
+ std::unique_ptr<GrFragmentProcessor> onAsFragmentProcessor(GrRecordingContext*,
+ SkSamplingOptions,
+ const SkTileMode[2],
+ const SkMatrix&,
+ const SkRect*,
+ const SkRect*) const override;
+
+ GrSurfaceProxyView textureProxyViewFromPlanes(GrRecordingContext*, skgpu::Budgeted) const;
+ sk_sp<SkCachedData> getPlanes(const SkYUVAPixmapInfo::SupportedDataTypes& supportedDataTypes,
+ SkYUVAPixmaps* pixmaps) const;
+#endif
+
+#if defined(SK_GRAPHITE)
+ sk_sp<SkImage> onMakeTextureImage(skgpu::graphite::Recorder*,
+ RequiredImageProperties) const override;
+#endif
+
+ class ScopedGenerator;
+
+ // Note that this->imageInfo() is not necessarily the info from the generator. It may be
+ // cropped by onMakeSubset and its color type/space may be changed by
+ // onMakeColorTypeAndColorSpace.
+ sk_sp<SharedGenerator> fSharedGenerator;
+
+ // Repeated calls to onMakeColorTypeAndColorSpace will result in a proliferation of unique IDs
+ // and SkImage_Lazy instances. Cache the result of the last successful call.
+ mutable SkMutex fOnMakeColorTypeAndSpaceMutex;
+ mutable sk_sp<SkImage> fOnMakeColorTypeAndSpaceResult;
+
+#if defined(SK_GANESH)
+ // When the SkImage_Lazy goes away, we will iterate over all the listeners to inform them
+ // of the unique ID's demise. This is used to remove cached textures from GrContext.
+ mutable SkIDChangeListener::List fUniqueIDListeners;
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkImage_Raster.cpp b/gfx/skia/skia/src/image/SkImage_Raster.cpp
new file mode 100644
index 0000000000..d082ae39a9
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_Raster.cpp
@@ -0,0 +1,467 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/image/SkImage_Raster.h"
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixelRef.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkMath.h"
+#include "src/base/SkRectMemcpy.h"
+#include "src/core/SkCompressedDataUtils.h"
+#include "src/core/SkImageInfoPriv.h"
+#include "src/core/SkImagePriv.h"
+#include "src/image/SkImage_Base.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <tuple>
+#include <utility>
+
+class GrDirectContext;
+class SkMatrix;
+enum class SkTextureCompressionType;
+enum class SkTileMode;
+
+#if defined(SK_GANESH)
+#include "include/gpu/GpuTypes.h"
+#include "src/gpu/SkBackingFit.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h" // IWYU pragma: keep
+#include "src/gpu/ganesh/GrSurfaceProxyView.h" // IWYU pragma: keep
+#include "src/gpu/ganesh/SkGr.h"
+#endif
+
+#if defined(SK_GRAPHITE)
+#include "include/gpu/graphite/GraphiteTypes.h"
+#include "include/gpu/graphite/Recorder.h"
+#include "src/gpu/graphite/Buffer.h"
+#include "src/gpu/graphite/Caps.h"
+#include "src/gpu/graphite/CommandBuffer.h"
+#include "src/gpu/graphite/RecorderPriv.h"
+#include "src/gpu/graphite/TextureUtils.h"
+#include "src/gpu/graphite/UploadTask.h"
+#endif
+
+static void release_data(void* addr, void* context) {
+ SkData* data = static_cast<SkData*>(context);
+ data->unref();
+}
+
+SkImage_Raster::SkImage_Raster(const SkImageInfo& info, sk_sp<SkData> data, size_t rowBytes,
+ uint32_t id)
+ : SkImage_Base(info, id) {
+ void* addr = const_cast<void*>(data->data());
+
+ fBitmap.installPixels(info, addr, rowBytes, release_data, data.release());
+ fBitmap.setImmutable();
+}
+
+// fixes https://bug.skia.org/5096
+static bool is_not_subset(const SkBitmap& bm) {
+ SkASSERT(bm.pixelRef());
+ SkISize dim = SkISize::Make(bm.pixelRef()->width(), bm.pixelRef()->height());
+ SkASSERT(dim != bm.dimensions() || bm.pixelRefOrigin().isZero());
+ return dim == bm.dimensions();
+}
+
+SkImage_Raster::SkImage_Raster(const SkBitmap& bm, bool bitmapMayBeMutable)
+ : SkImage_Base(bm.info(),
+ is_not_subset(bm) ? bm.getGenerationID() : (uint32_t)kNeedNewImageUniqueID)
+ , fBitmap(bm) {
+ SkASSERT(bitmapMayBeMutable || fBitmap.isImmutable());
+}
+
+SkImage_Raster::~SkImage_Raster() {}
+
+bool SkImage_Raster::onReadPixels(GrDirectContext*,
+ const SkImageInfo& dstInfo,
+ void* dstPixels,
+ size_t dstRowBytes,
+ int srcX,
+ int srcY,
+ CachingHint) const {
+ SkBitmap shallowCopy(fBitmap);
+ return shallowCopy.readPixels(dstInfo, dstPixels, dstRowBytes, srcX, srcY);
+}
+
+bool SkImage_Raster::onPeekPixels(SkPixmap* pm) const {
+ return fBitmap.peekPixels(pm);
+}
+
+bool SkImage_Raster::getROPixels(GrDirectContext*, SkBitmap* dst, CachingHint) const {
+ *dst = fBitmap;
+ return true;
+}
+
+static SkBitmap copy_bitmap_subset(const SkBitmap& orig, const SkIRect& subset) {
+ SkImageInfo info = orig.info().makeDimensions(subset.size());
+ SkBitmap bitmap;
+ if (!bitmap.tryAllocPixels(info)) {
+ return {};
+ }
+
+ void* dst = bitmap.getPixels();
+ void* src = orig.getAddr(subset.x(), subset.y());
+ if (!dst || !src) {
+ SkDEBUGFAIL("SkImage_Raster::onMakeSubset with nullptr src or dst");
+ return {};
+ }
+
+ SkRectMemcpy(dst, bitmap.rowBytes(), src, orig.rowBytes(), bitmap.rowBytes(),
+ subset.height());
+
+ bitmap.setImmutable();
+ return bitmap;
+}
+
+sk_sp<SkImage> SkImage_Raster::onMakeSubset(const SkIRect& subset, GrDirectContext*) const {
+ SkBitmap copy = copy_bitmap_subset(fBitmap, subset);
+ if (copy.isNull()) {
+ return nullptr;
+ } else {
+ return copy.asImage();
+ }
+}
+
+#if defined(SK_GRAPHITE)
+static sk_sp<SkMipmap> copy_mipmaps(const SkBitmap& src, SkMipmap* srcMips) {
+ if (!srcMips) {
+ return nullptr;
+ }
+
+ sk_sp<SkMipmap> dst;
+ dst.reset(SkMipmap::Build(src.pixmap(), nullptr, /* computeContents= */ false));
+ for (int i = 0; i < dst->countLevels(); ++i) {
+ SkMipmap::Level srcLevel, dstLevel;
+ srcMips->getLevel(i, &srcLevel);
+ dst->getLevel(i, &dstLevel);
+ srcLevel.fPixmap.readPixels(dstLevel.fPixmap);
+ }
+
+ return dst;
+}
+
+sk_sp<SkImage> SkImage_Raster::onMakeSubset(const SkIRect& subset,
+ skgpu::graphite::Recorder* recorder,
+ RequiredImageProperties requiredProperties) const {
+ sk_sp<SkImage> img;
+
+ if (requiredProperties.fMipmapped == skgpu::Mipmapped::kYes) {
+ bool fullCopy = subset == SkIRect::MakeSize(fBitmap.dimensions());
+
+ sk_sp<SkMipmap> mips = fullCopy ? copy_mipmaps(fBitmap, fBitmap.fMips.get()) : nullptr;
+
+ // SkImage::withMipmaps will always make a copy for us so we can temporarily share
+ // the pixel ref with fBitmap
+ SkBitmap tmpSubset;
+ if (!fBitmap.extractSubset(&tmpSubset, subset)) {
+ return nullptr;
+ }
+
+ sk_sp<SkImage> tmp(new SkImage_Raster(tmpSubset, /* bitmapMayBeMutable= */ true));
+
+ // withMipmaps will auto generate the mipmaps if a nullptr is passed in
+ SkASSERT(!mips || mips->validForRootLevel(tmp->imageInfo()));
+ img = tmp->withMipmaps(std::move(mips));
+ } else {
+ SkBitmap copy = copy_bitmap_subset(fBitmap, subset);
+ if (!copy.isNull()) {
+ img = copy.asImage();
+ }
+ }
+
+ if (!img) {
+ return nullptr;
+ }
+
+ if (recorder) {
+ return img->makeTextureImage(recorder, requiredProperties);
+ } else {
+ return img;
+ }
+}
+#endif // SK_GRAPHITE
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool valid_args(const SkImageInfo& info, size_t rowBytes, size_t* minSize) {
+ const int maxDimension = SK_MaxS32 >> 2;
+
+ // TODO(mtklein): eliminate anything here that setInfo() has already checked.
+ SkBitmap b;
+ if (!b.setInfo(info, rowBytes)) {
+ return false;
+ }
+
+ if (info.width() <= 0 || info.height() <= 0) {
+ return false;
+ }
+ if (info.width() > maxDimension || info.height() > maxDimension) {
+ return false;
+ }
+ if ((unsigned)info.colorType() > (unsigned)kLastEnum_SkColorType) {
+ return false;
+ }
+ if ((unsigned)info.alphaType() > (unsigned)kLastEnum_SkAlphaType) {
+ return false;
+ }
+
+ if (kUnknown_SkColorType == info.colorType()) {
+ return false;
+ }
+ if (!info.validRowBytes(rowBytes)) {
+ return false;
+ }
+
+ size_t size = info.computeByteSize(rowBytes);
+ if (SkImageInfo::ByteSizeOverflowed(size)) {
+ return false;
+ }
+
+ if (minSize) {
+ *minSize = size;
+ }
+ return true;
+}
+
+sk_sp<SkImage> MakeRasterCopyPriv(const SkPixmap& pmap, uint32_t id) {
+ size_t size;
+ if (!valid_args(pmap.info(), pmap.rowBytes(), &size) || !pmap.addr()) {
+ return nullptr;
+ }
+
+ // Here we actually make a copy of the caller's pixel data
+ sk_sp<SkData> data(SkData::MakeWithCopy(pmap.addr(), size));
+ return sk_make_sp<SkImage_Raster>(pmap.info(), std::move(data), pmap.rowBytes(), id);
+}
+
+sk_sp<SkImage> SkImage::MakeRasterCopy(const SkPixmap& pmap) {
+ return MakeRasterCopyPriv(pmap, kNeedNewImageUniqueID);
+}
+
+sk_sp<SkImage> SkImage::MakeRasterData(const SkImageInfo& info, sk_sp<SkData> data,
+ size_t rowBytes) {
+ size_t size;
+ if (!valid_args(info, rowBytes, &size) || !data) {
+ return nullptr;
+ }
+
+ // did they give us enough data?
+ if (data->size() < size) {
+ return nullptr;
+ }
+
+ return sk_make_sp<SkImage_Raster>(info, std::move(data), rowBytes);
+}
+
+// TODO: this could be improved to decode and make use of the mipmap
+// levels potentially present in the compressed data. For now, any
+// mipmap levels are discarded.
+sk_sp<SkImage> SkImage::MakeRasterFromCompressed(sk_sp<SkData> data,
+ int width, int height,
+ SkTextureCompressionType type) {
+ size_t expectedSize = SkCompressedFormatDataSize(type, { width, height }, false);
+ if (!data || data->size() < expectedSize) {
+ return nullptr;
+ }
+
+ SkAlphaType at = SkTextureCompressionTypeIsOpaque(type) ? kOpaque_SkAlphaType
+ : kPremul_SkAlphaType;
+
+ SkImageInfo ii = SkImageInfo::MakeN32(width, height, at);
+
+ if (!valid_args(ii, ii.minRowBytes(), nullptr)) {
+ return nullptr;
+ }
+
+ SkBitmap bitmap;
+ if (!bitmap.tryAllocPixels(ii)) {
+ return nullptr;
+ }
+
+ if (!SkDecompress(std::move(data), { width, height }, type, &bitmap)) {
+ return nullptr;
+ }
+
+ bitmap.setImmutable();
+ return MakeFromBitmap(bitmap);
+}
+
+sk_sp<SkImage> SkImage::MakeFromRaster(const SkPixmap& pmap, RasterReleaseProc proc,
+ ReleaseContext ctx) {
+ size_t size;
+ if (!valid_args(pmap.info(), pmap.rowBytes(), &size) || !pmap.addr()) {
+ return nullptr;
+ }
+
+ sk_sp<SkData> data(SkData::MakeWithProc(pmap.addr(), size, proc, ctx));
+ return sk_make_sp<SkImage_Raster>(pmap.info(), std::move(data), pmap.rowBytes());
+}
+
+sk_sp<SkImage> SkMakeImageFromRasterBitmapPriv(const SkBitmap& bm, SkCopyPixelsMode cpm,
+ uint32_t idForCopy) {
+ if (kAlways_SkCopyPixelsMode == cpm || (!bm.isImmutable() && kNever_SkCopyPixelsMode != cpm)) {
+ SkPixmap pmap;
+ if (bm.peekPixels(&pmap)) {
+ return MakeRasterCopyPriv(pmap, idForCopy);
+ } else {
+ return sk_sp<SkImage>();
+ }
+ }
+
+ return sk_make_sp<SkImage_Raster>(bm, kNever_SkCopyPixelsMode == cpm);
+}
+
+sk_sp<SkImage> SkMakeImageFromRasterBitmap(const SkBitmap& bm, SkCopyPixelsMode cpm) {
+ if (!SkImageInfoIsValid(bm.info()) || bm.rowBytes() < bm.info().minRowBytes()) {
+ return nullptr;
+ }
+
+ return SkMakeImageFromRasterBitmapPriv(bm, cpm, kNeedNewImageUniqueID);
+}
+
+const SkPixelRef* SkBitmapImageGetPixelRef(const SkImage* image) {
+ return ((const SkImage_Raster*)image)->getPixelRef();
+}
+
+bool SkImage_Raster::onAsLegacyBitmap(GrDirectContext*, SkBitmap* bitmap) const {
+ // When we're a snapshot from a surface, our bitmap may not be marked immutable
+ // even though logically always we are, but in that case we can't physically share our
+ // pixelref since the caller might call setImmutable() themselves
+ // (thus changing our state).
+ if (fBitmap.isImmutable()) {
+ SkIPoint origin = fBitmap.pixelRefOrigin();
+ bitmap->setInfo(fBitmap.info(), fBitmap.rowBytes());
+ bitmap->setPixelRef(sk_ref_sp(fBitmap.pixelRef()), origin.x(), origin.y());
+ return true;
+ }
+ return this->SkImage_Base::onAsLegacyBitmap(nullptr, bitmap);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkImage> SkImage_Raster::onMakeColorTypeAndColorSpace(SkColorType targetCT,
+ sk_sp<SkColorSpace> targetCS,
+ GrDirectContext*) const {
+ SkPixmap src;
+ SkAssertResult(fBitmap.peekPixels(&src));
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(fBitmap.info().makeColorType(targetCT).makeColorSpace(targetCS))) {
+ return nullptr;
+ }
+
+ SkAssertResult(dst.writePixels(src));
+ dst.setImmutable();
+ return dst.asImage();
+}
+
+sk_sp<SkImage> SkImage_Raster::onReinterpretColorSpace(sk_sp<SkColorSpace> newCS) const {
+ // TODO: If our bitmap is immutable, then we could theoretically create another image sharing
+ // our pixelRef. That doesn't work (without more invasive logic), because the image gets its
+ // gen ID from the bitmap, which gets it from the pixelRef.
+ SkPixmap pixmap = fBitmap.pixmap();
+ pixmap.setColorSpace(std::move(newCS));
+ return SkImage::MakeRasterCopy(pixmap);
+}
+
+#if defined(SK_GANESH)
+std::tuple<GrSurfaceProxyView, GrColorType> SkImage_Raster::onAsView(
+ GrRecordingContext* rContext,
+ GrMipmapped mipmapped,
+ GrImageTexGenPolicy policy) const {
+ if (policy == GrImageTexGenPolicy::kDraw) {
+ // If the draw doesn't require mipmaps but this SkImage has them go ahead and make a
+ // mipmapped texture. There are three reasons for this:
+ // 1) Avoiding another texture creation if a later draw requires mipmaps.
+ // 2) Ensuring we upload the bitmap's levels instead of generating on the GPU from the base.
+ if (this->hasMipmaps()) {
+ mipmapped = GrMipmapped::kYes;
+ }
+ return GrMakeCachedBitmapProxyView(rContext,
+ fBitmap,
+ /*label=*/"TextureForImageRasterWithPolicyEqualKDraw",
+ mipmapped);
+ }
+ auto budgeted = (policy == GrImageTexGenPolicy::kNew_Uncached_Unbudgeted)
+ ? skgpu::Budgeted::kNo
+ : skgpu::Budgeted::kYes;
+ return GrMakeUncachedBitmapProxyView(rContext,
+ fBitmap,
+ mipmapped,
+ SkBackingFit::kExact,
+ budgeted);
+}
+
+std::unique_ptr<GrFragmentProcessor> SkImage_Raster::onAsFragmentProcessor(
+ GrRecordingContext* rContext,
+ SkSamplingOptions sampling,
+ const SkTileMode tileModes[2],
+ const SkMatrix& m,
+ const SkRect* subset,
+ const SkRect* domain) const {
+ auto mm = sampling.mipmap == SkMipmapMode::kNone ? GrMipmapped::kNo : GrMipmapped::kYes;
+ return MakeFragmentProcessorFromView(rContext,
+ std::get<0>(this->asView(rContext, mm)),
+ this->alphaType(),
+ sampling,
+ tileModes,
+ m,
+ subset,
+ domain);
+}
+#endif
+
+#if defined(SK_GRAPHITE)
+sk_sp<SkImage> SkImage_Raster::onMakeTextureImage(skgpu::graphite::Recorder* recorder,
+ RequiredImageProperties requiredProps) const {
+ return skgpu::graphite::MakeFromBitmap(recorder,
+ this->imageInfo().colorInfo(),
+ fBitmap,
+ this->refMips(),
+ skgpu::Budgeted::kNo,
+ requiredProps);
+}
+
+sk_sp<SkImage> SkImage_Raster::onMakeColorTypeAndColorSpace(
+ SkColorType targetCT,
+ sk_sp<SkColorSpace> targetCS,
+ skgpu::graphite::Recorder* recorder,
+ RequiredImageProperties requiredProps) const {
+ SkPixmap src;
+ SkAssertResult(fBitmap.peekPixels(&src));
+
+ SkBitmap dst;
+ if (!dst.tryAllocPixels(fBitmap.info().makeColorType(targetCT).makeColorSpace(targetCS))) {
+ return nullptr;
+ }
+
+ SkAssertResult(dst.writePixels(src));
+ dst.setImmutable();
+
+ sk_sp<SkImage> tmp = dst.asImage();
+ if (recorder) {
+ return tmp->makeTextureImage(recorder, requiredProps);
+ } else {
+ return tmp;
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkImage_Raster.h b/gfx/skia/skia/src/image/SkImage_Raster.h
new file mode 100644
index 0000000000..0fbbc0a5c1
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkImage_Raster.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImage_Raster_DEFINED
+#define SkImage_Raster_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkPixelRef.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkImagePriv.h"
+#include "src/core/SkMipmap.h"
+#include "src/image/SkImage_Base.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <tuple>
+#include <utility>
+
+class GrDirectContext;
+class GrFragmentProcessor;
+class GrRecordingContext;
+class GrSurfaceProxyView;
+class SkColorSpace;
+class SkData;
+class SkMatrix;
+class SkPixmap;
+enum SkColorType : int;
+enum class GrColorType;
+enum class GrImageTexGenPolicy : int;
+enum class SkTileMode;
+struct SkIRect;
+struct SkImageInfo;
+struct SkRect;
+
+#if defined(SK_GANESH)
+#include "include/gpu/GrTypes.h"
+#endif
+
+#if defined(SK_GRAPHITE)
+#include "include/gpu/graphite/GraphiteTypes.h"
+#include "include/gpu/graphite/Recorder.h"
+#include "src/gpu/graphite/Buffer.h"
+#include "src/gpu/graphite/Caps.h"
+#include "src/gpu/graphite/CommandBuffer.h"
+#include "src/gpu/graphite/RecorderPriv.h"
+#include "src/gpu/graphite/TextureUtils.h"
+#include "src/gpu/graphite/UploadTask.h"
+#endif
+
+class SkImage_Raster : public SkImage_Base {
+public:
+ SkImage_Raster(const SkImageInfo&, sk_sp<SkData>, size_t rb,
+ uint32_t id = kNeedNewImageUniqueID);
+ SkImage_Raster(const SkBitmap& bm, bool bitmapMayBeMutable = false);
+ ~SkImage_Raster() override;
+
+ bool onReadPixels(GrDirectContext*, const SkImageInfo&, void*, size_t, int srcX, int srcY,
+ CachingHint) const override;
+ bool onPeekPixels(SkPixmap*) const override;
+ const SkBitmap* onPeekBitmap() const override { return &fBitmap; }
+
+ bool getROPixels(GrDirectContext*, SkBitmap*, CachingHint) const override;
+ sk_sp<SkImage> onMakeSubset(const SkIRect&, GrDirectContext*) const override;
+#if defined(SK_GRAPHITE)
+ sk_sp<SkImage> onMakeSubset(const SkIRect&,
+ skgpu::graphite::Recorder*,
+ RequiredImageProperties) const override;
+#endif
+
+ SkPixelRef* getPixelRef() const { return fBitmap.pixelRef(); }
+
+ bool onAsLegacyBitmap(GrDirectContext*, SkBitmap*) const override;
+
+ sk_sp<SkImage> onMakeColorTypeAndColorSpace(SkColorType, sk_sp<SkColorSpace>,
+ GrDirectContext*) const override;
+
+ sk_sp<SkImage> onReinterpretColorSpace(sk_sp<SkColorSpace>) const override;
+
+ bool onIsValid(GrRecordingContext* context) const override { return true; }
+ void notifyAddedToRasterCache() const override {
+ // We explicitly DON'T want to call INHERITED::notifyAddedToRasterCache. That ties the
+ // lifetime of derived/cached resources to the image. In this case, we only want cached
+ // data (eg mips) tied to the lifetime of the underlying pixelRef.
+ SkASSERT(fBitmap.pixelRef());
+ fBitmap.pixelRef()->notifyAddedToCache();
+ }
+
+ SkImage_Base::Type type() const override { return SkImage_Base::Type::kRaster; }
+
+ bool onHasMipmaps() const override { return SkToBool(fBitmap.fMips); }
+
+ SkMipmap* onPeekMips() const override { return fBitmap.fMips.get(); }
+
+ sk_sp<SkImage> onMakeWithMipmaps(sk_sp<SkMipmap> mips) const override {
+ // It's dangerous to have two SkBitmaps that share a SkPixelRef but have different SkMipmaps
+ // since various caches key on SkPixelRef's generation ID. Also, SkPixelRefs that back
+ // SkSurfaces are marked "temporarily immutable" and making an image that uses the same
+ // SkPixelRef can interact badly with SkSurface/SkImage copy-on-write. So we just always
+ // make a copy with a new ID.
+ static auto constexpr kCopyMode = SkCopyPixelsMode::kAlways_SkCopyPixelsMode;
+ sk_sp<SkImage> img = SkMakeImageFromRasterBitmap(fBitmap, kCopyMode);
+ auto imgRaster = static_cast<SkImage_Raster*>(img.get());
+ if (mips) {
+ imgRaster->fBitmap.fMips = std::move(mips);
+ } else {
+ imgRaster->fBitmap.fMips.reset(SkMipmap::Build(fBitmap.pixmap(), nullptr));
+ }
+ return img;
+ }
+
+protected:
+ SkBitmap fBitmap;
+
+private:
+#if defined(SK_GANESH)
+ std::tuple<GrSurfaceProxyView, GrColorType> onAsView(GrRecordingContext*,
+ GrMipmapped,
+ GrImageTexGenPolicy) const override;
+
+ std::unique_ptr<GrFragmentProcessor> onAsFragmentProcessor(GrRecordingContext*,
+ SkSamplingOptions,
+ const SkTileMode[2],
+ const SkMatrix&,
+ const SkRect*,
+ const SkRect*) const override;
+#endif
+#if defined(SK_GRAPHITE)
+ sk_sp<SkImage> onMakeTextureImage(skgpu::graphite::Recorder*,
+ RequiredImageProperties) const override;
+ sk_sp<SkImage> onMakeColorTypeAndColorSpace(SkColorType targetCT,
+ sk_sp<SkColorSpace> targetCS,
+ skgpu::graphite::Recorder*,
+ RequiredImageProperties) const override;
+#endif
+
+};
+
+#endif // SkImage_Raster_DEFINED
diff --git a/gfx/skia/skia/src/image/SkRescaleAndReadPixels.cpp b/gfx/skia/skia/src/image/SkRescaleAndReadPixels.cpp
new file mode 100644
index 0000000000..85747e859e
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkRescaleAndReadPixels.cpp
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/image/SkRescaleAndReadPixels.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkSurface.h"
+
+#include <cmath>
+#include <cstddef>
+#include <memory>
+#include <utility>
+
+void SkRescaleAndReadPixels(SkBitmap bmp,
+ const SkImageInfo& resultInfo,
+ const SkIRect& srcRect,
+ SkImage::RescaleGamma rescaleGamma,
+ SkImage::RescaleMode rescaleMode,
+ SkImage::ReadPixelsCallback callback,
+ SkImage::ReadPixelsContext context) {
+ int srcW = srcRect.width();
+ int srcH = srcRect.height();
+
+ float sx = (float)resultInfo.width() / srcW;
+ float sy = (float)resultInfo.height() / srcH;
+ // How many bilerp/bicubic steps to do in X and Y. + means upscaling, - means downscaling.
+ int stepsX;
+ int stepsY;
+ if (rescaleMode != SkImage::RescaleMode::kNearest) {
+ stepsX = static_cast<int>((sx > 1.f) ? std::ceil(std::log2f(sx))
+ : std::floor(std::log2f(sx)));
+ stepsY = static_cast<int>((sy > 1.f) ? std::ceil(std::log2f(sy))
+ : std::floor(std::log2f(sy)));
+ } else {
+ stepsX = sx != 1.f;
+ stepsY = sy != 1.f;
+ }
+
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kSrc);
+ if (stepsX < 0 || stepsY < 0) {
+ // Don't trigger MIP generation. We don't currently have a way to trigger bicubic for
+ // downscaling draws.
+
+ // TODO: should we trigger cubic now that we can?
+ if (rescaleMode != SkImage::RescaleMode::kNearest) {
+ rescaleMode = SkImage::RescaleMode::kRepeatedLinear;
+ }
+ }
+
+ auto rescaling_to_sampling = [](SkImage::RescaleMode rescaleMode) {
+ SkSamplingOptions sampling;
+ if (rescaleMode == SkImage::RescaleMode::kRepeatedLinear) {
+ sampling = SkSamplingOptions(SkFilterMode::kLinear);
+ } else if (rescaleMode == SkImage::RescaleMode::kRepeatedCubic) {
+ sampling = SkSamplingOptions({1.0f/3, 1.0f/3});
+ }
+ return sampling;
+ };
+ SkSamplingOptions sampling = rescaling_to_sampling(rescaleMode);
+
+ sk_sp<SkSurface> tempSurf;
+ sk_sp<SkImage> srcImage;
+ int srcX = srcRect.fLeft;
+ int srcY = srcRect.fTop;
+ SkCanvas::SrcRectConstraint constraint = SkCanvas::kStrict_SrcRectConstraint;
+ // Assume we should ignore the rescale linear request if the surface has no color space since
+ // it's unclear how we'd linearize from an unknown color space.
+ if (rescaleGamma == SkSurface::RescaleGamma::kLinear && bmp.info().colorSpace() &&
+ !bmp.info().colorSpace()->gammaIsLinear()) {
+ auto cs = bmp.info().colorSpace()->makeLinearGamma();
+ // Promote to F16 color type to preserve precision.
+ auto ii = SkImageInfo::Make(srcW, srcH, kRGBA_F16_SkColorType, bmp.info().alphaType(),
+ std::move(cs));
+ auto linearSurf = SkSurface::MakeRaster(ii);
+ if (!linearSurf) {
+ callback(context, nullptr);
+ return;
+ }
+ linearSurf->getCanvas()->drawImage(bmp.asImage().get(), -srcX, -srcY, sampling, &paint);
+ tempSurf = std::move(linearSurf);
+ srcImage = tempSurf->makeImageSnapshot();
+ srcX = 0;
+ srcY = 0;
+ constraint = SkCanvas::kFast_SrcRectConstraint;
+ } else {
+ // MakeFromBitmap would trigger a copy if bmp is mutable.
+ srcImage = SkImage::MakeFromRaster(bmp.pixmap(), nullptr, nullptr);
+ }
+ while (stepsX || stepsY) {
+ int nextW = resultInfo.width();
+ int nextH = resultInfo.height();
+ if (stepsX < 0) {
+ nextW = resultInfo.width() << (-stepsX - 1);
+ stepsX++;
+ } else if (stepsX != 0) {
+ if (stepsX > 1) {
+ nextW = srcW * 2;
+ }
+ --stepsX;
+ }
+ if (stepsY < 0) {
+ nextH = resultInfo.height() << (-stepsY - 1);
+ stepsY++;
+ } else if (stepsY != 0) {
+ if (stepsY > 1) {
+ nextH = srcH * 2;
+ }
+ --stepsY;
+ }
+ auto ii = srcImage->imageInfo().makeWH(nextW, nextH);
+ if (!stepsX && !stepsY) {
+ // Might as well fold conversion to final info in the last step.
+ ii = resultInfo;
+ }
+ auto next = SkSurface::MakeRaster(ii);
+ if (!next) {
+ callback(context, nullptr);
+ return;
+ }
+ next->getCanvas()->drawImageRect(
+ srcImage.get(), SkRect::Make(SkIRect::MakeXYWH(srcX, srcY, srcW, srcH)),
+ SkRect::MakeIWH(nextW, nextH), sampling, &paint, constraint);
+ tempSurf = std::move(next);
+ srcImage = tempSurf->makeImageSnapshot();
+ srcX = srcY = 0;
+ srcW = nextW;
+ srcH = nextH;
+ constraint = SkCanvas::kFast_SrcRectConstraint;
+ }
+
+ size_t rowBytes = resultInfo.minRowBytes();
+ std::unique_ptr<char[]> data(new char[resultInfo.height() * rowBytes]);
+ SkPixmap pm(resultInfo, data.get(), rowBytes);
+ if (srcImage->readPixels(nullptr, pm, srcX, srcY)) {
+ class Result : public SkImage::AsyncReadResult {
+ public:
+ Result(std::unique_ptr<const char[]> data, size_t rowBytes)
+ : fData(std::move(data)), fRowBytes(rowBytes) {}
+ int count() const override { return 1; }
+ const void* data(int i) const override { return fData.get(); }
+ size_t rowBytes(int i) const override { return fRowBytes; }
+
+ private:
+ std::unique_ptr<const char[]> fData;
+ size_t fRowBytes;
+ };
+ callback(context, std::make_unique<Result>(std::move(data), rowBytes));
+ } else {
+ callback(context, nullptr);
+ }
+}
diff --git a/gfx/skia/skia/src/image/SkRescaleAndReadPixels.h b/gfx/skia/skia/src/image/SkRescaleAndReadPixels.h
new file mode 100644
index 0000000000..ab555d4316
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkRescaleAndReadPixels.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkImage.h"
+
+class SkBitmap;
+struct SkIRect;
+struct SkImageInfo;
+
+/** Generic/synchronous implementation for SkImage:: and SkSurface::asyncRescaleAndReadPixels. */
+void SkRescaleAndReadPixels(SkBitmap src,
+ const SkImageInfo& resultInfo,
+ const SkIRect& srcRect,
+ SkImage::RescaleGamma,
+ SkImage::RescaleMode,
+ SkImage::ReadPixelsCallback,
+ SkImage::ReadPixelsContext);
diff --git a/gfx/skia/skia/src/image/SkSurface.cpp b/gfx/skia/skia/src/image/SkSurface.cpp
new file mode 100644
index 0000000000..8a2940edd6
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkSurface.cpp
@@ -0,0 +1,300 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkSurface.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkCapabilities.h" // IWYU pragma: keep
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkDeferredDisplayList.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkSurfaceProps.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/core/SkImageInfoPriv.h"
+#include "src/core/SkSurfacePriv.h"
+#include "src/image/SkSurface_Base.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <utility>
+
+class GrBackendSemaphore;
+class GrRecordingContext;
+class SkPaint;
+class SkSurfaceCharacterization;
+namespace skgpu { class MutableTextureState; }
+namespace skgpu { namespace graphite { class Recorder; } }
+
+#if defined(SK_GANESH)
+#include "include/gpu/GrBackendSurface.h"
+#endif
+
+SkSurfaceProps::SkSurfaceProps() : fFlags(0), fPixelGeometry(kUnknown_SkPixelGeometry) {}
+
+SkSurfaceProps::SkSurfaceProps(uint32_t flags, SkPixelGeometry pg)
+ : fFlags(flags), fPixelGeometry(pg)
+{}
+
+SkSurfaceProps::SkSurfaceProps(const SkSurfaceProps&) = default;
+SkSurfaceProps& SkSurfaceProps::operator=(const SkSurfaceProps&) = default;
+
+SkSurface::SkSurface(int width, int height, const SkSurfaceProps* props)
+ : fProps(SkSurfacePropsCopyOrDefault(props)), fWidth(width), fHeight(height)
+{
+ SkASSERT(fWidth > 0);
+ SkASSERT(fHeight > 0);
+ fGenerationID = 0;
+}
+
+SkSurface::SkSurface(const SkImageInfo& info, const SkSurfaceProps* props)
+ : fProps(SkSurfacePropsCopyOrDefault(props)), fWidth(info.width()), fHeight(info.height())
+{
+ SkASSERT(fWidth > 0);
+ SkASSERT(fHeight > 0);
+ fGenerationID = 0;
+}
+
+uint32_t SkSurface::generationID() {
+ if (0 == fGenerationID) {
+ fGenerationID = asSB(this)->newGenerationID();
+ }
+ return fGenerationID;
+}
+
+void SkSurface::notifyContentWillChange(ContentChangeMode mode) {
+ sk_ignore_unused_variable(asSB(this)->aboutToDraw(mode));
+}
+
+SkCanvas* SkSurface::getCanvas() {
+ return asSB(this)->getCachedCanvas();
+}
+
+sk_sp<const SkCapabilities> SkSurface::capabilities() {
+ return asSB(this)->onCapabilities();
+}
+
+sk_sp<SkImage> SkSurface::makeImageSnapshot() {
+ return asSB(this)->refCachedImage();
+}
+
+sk_sp<SkImage> SkSurface::makeImageSnapshot(const SkIRect& srcBounds) {
+ const SkIRect surfBounds = { 0, 0, fWidth, fHeight };
+ SkIRect bounds = srcBounds;
+ if (!bounds.intersect(surfBounds)) {
+ return nullptr;
+ }
+ SkASSERT(!bounds.isEmpty());
+ if (bounds == surfBounds) {
+ return this->makeImageSnapshot();
+ } else {
+ return asSB(this)->onNewImageSnapshot(&bounds);
+ }
+}
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/Log.h"
+
+sk_sp<SkImage> SkSurface::asImage() {
+ if (asSB(this)->fCachedImage) {
+ SKGPU_LOG_W("Intermingling makeImageSnapshot and asImage calls may produce "
+ "unexpected results. Please use either the old _or_ new API.");
+ }
+
+ return asSB(this)->onAsImage();
+}
+
+sk_sp<SkImage> SkSurface::makeImageCopy(const SkIRect* subset,
+ skgpu::Mipmapped mipmapped) {
+ if (asSB(this)->fCachedImage) {
+ SKGPU_LOG_W("Intermingling makeImageSnapshot and makeImageCopy calls may produce "
+ "unexpected results. Please use either the old _or_ new API.");
+ }
+
+ return asSB(this)->onMakeImageCopy(subset, mipmapped);
+}
+#endif
+
+sk_sp<SkSurface> SkSurface::makeSurface(const SkImageInfo& info) {
+ return asSB(this)->onNewSurface(info);
+}
+
+sk_sp<SkSurface> SkSurface::makeSurface(int width, int height) {
+ return this->makeSurface(this->imageInfo().makeWH(width, height));
+}
+
+void SkSurface::draw(SkCanvas* canvas, SkScalar x, SkScalar y, const SkSamplingOptions& sampling,
+ const SkPaint* paint) {
+ asSB(this)->onDraw(canvas, x, y, sampling, paint);
+}
+
+bool SkSurface::peekPixels(SkPixmap* pmap) {
+ return this->getCanvas()->peekPixels(pmap);
+}
+
+bool SkSurface::readPixels(const SkPixmap& pm, int srcX, int srcY) {
+ return this->getCanvas()->readPixels(pm, srcX, srcY);
+}
+
+bool SkSurface::readPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRowBytes,
+ int srcX, int srcY) {
+ return this->readPixels({dstInfo, dstPixels, dstRowBytes}, srcX, srcY);
+}
+
+bool SkSurface::readPixels(const SkBitmap& bitmap, int srcX, int srcY) {
+ SkPixmap pm;
+ return bitmap.peekPixels(&pm) && this->readPixels(pm, srcX, srcY);
+}
+
+void SkSurface::asyncRescaleAndReadPixels(const SkImageInfo& info,
+ const SkIRect& srcRect,
+ RescaleGamma rescaleGamma,
+ RescaleMode rescaleMode,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context) {
+ if (!SkIRect::MakeWH(this->width(), this->height()).contains(srcRect) ||
+ !SkImageInfoIsValid(info)) {
+ callback(context, nullptr);
+ return;
+ }
+ asSB(this)->onAsyncRescaleAndReadPixels(
+ info, srcRect, rescaleGamma, rescaleMode, callback, context);
+}
+
+void SkSurface::asyncRescaleAndReadPixelsYUV420(SkYUVColorSpace yuvColorSpace,
+ sk_sp<SkColorSpace> dstColorSpace,
+ const SkIRect& srcRect,
+ const SkISize& dstSize,
+ RescaleGamma rescaleGamma,
+ RescaleMode rescaleMode,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context) {
+ if (!SkIRect::MakeWH(this->width(), this->height()).contains(srcRect) || dstSize.isZero() ||
+ (dstSize.width() & 0b1) || (dstSize.height() & 0b1)) {
+ callback(context, nullptr);
+ return;
+ }
+ asSB(this)->onAsyncRescaleAndReadPixelsYUV420(yuvColorSpace,
+ std::move(dstColorSpace),
+ srcRect,
+ dstSize,
+ rescaleGamma,
+ rescaleMode,
+ callback,
+ context);
+}
+
+void SkSurface::writePixels(const SkPixmap& pmap, int x, int y) {
+ if (pmap.addr() == nullptr || pmap.width() <= 0 || pmap.height() <= 0) {
+ return;
+ }
+
+ const SkIRect srcR = SkIRect::MakeXYWH(x, y, pmap.width(), pmap.height());
+ const SkIRect dstR = SkIRect::MakeWH(this->width(), this->height());
+ if (SkIRect::Intersects(srcR, dstR)) {
+ ContentChangeMode mode = kRetain_ContentChangeMode;
+ if (srcR.contains(dstR)) {
+ mode = kDiscard_ContentChangeMode;
+ }
+ if (!asSB(this)->aboutToDraw(mode)) {
+ return;
+ }
+ asSB(this)->onWritePixels(pmap, x, y);
+ }
+}
+
+void SkSurface::writePixels(const SkBitmap& src, int x, int y) {
+ SkPixmap pm;
+ if (src.peekPixels(&pm)) {
+ this->writePixels(pm, x, y);
+ }
+}
+
+GrRecordingContext* SkSurface::recordingContext() {
+ return asSB(this)->onGetRecordingContext();
+}
+
+skgpu::graphite::Recorder* SkSurface::recorder() {
+ return asSB(this)->onGetRecorder();
+}
+
+bool SkSurface::wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores,
+ bool deleteSemaphoresAfterWait) {
+ return asSB(this)->onWait(numSemaphores, waitSemaphores, deleteSemaphoresAfterWait);
+}
+
+bool SkSurface::characterize(SkSurfaceCharacterization* characterization) const {
+ return asConstSB(this)->onCharacterize(characterization);
+}
+
+bool SkSurface::isCompatible(const SkSurfaceCharacterization& characterization) const {
+ return asConstSB(this)->onIsCompatible(characterization);
+}
+
+bool SkSurface::draw(sk_sp<const SkDeferredDisplayList> ddl, int xOffset, int yOffset) {
+ if (xOffset != 0 || yOffset != 0) {
+ return false; // the offsets currently aren't supported
+ }
+
+ return asSB(this)->onDraw(std::move(ddl), { xOffset, yOffset });
+}
+
+#if defined(SK_GANESH)
+GrBackendTexture SkSurface::getBackendTexture(BackendHandleAccess access) {
+ return asSB(this)->onGetBackendTexture(access);
+}
+
+GrBackendRenderTarget SkSurface::getBackendRenderTarget(BackendHandleAccess access) {
+ return asSB(this)->onGetBackendRenderTarget(access);
+}
+
+bool SkSurface::replaceBackendTexture(const GrBackendTexture& backendTexture,
+ GrSurfaceOrigin origin, ContentChangeMode mode,
+ TextureReleaseProc textureReleaseProc,
+ ReleaseContext releaseContext) {
+ return asSB(this)->onReplaceBackendTexture(backendTexture, origin, mode, textureReleaseProc,
+ releaseContext);
+}
+
+void SkSurface::resolveMSAA() {
+ asSB(this)->onResolveMSAA();
+}
+
+GrSemaphoresSubmitted SkSurface::flush(BackendSurfaceAccess access, const GrFlushInfo& flushInfo) {
+ return asSB(this)->onFlush(access, flushInfo, nullptr);
+}
+
+GrSemaphoresSubmitted SkSurface::flush(const GrFlushInfo& info,
+ const skgpu::MutableTextureState* newState) {
+ return asSB(this)->onFlush(BackendSurfaceAccess::kNoAccess, info, newState);
+}
+
+void SkSurface::flush() {
+ this->flush({});
+}
+#else
+void SkSurface::flush() {} // Flush is a no-op for CPU surfaces
+
+void SkSurface::flushAndSubmit(bool syncCpu) {}
+
+// TODO(kjlubick, scroggo) Remove this once Android is updated.
+sk_sp<SkSurface> SkSurface::MakeRenderTarget(GrRecordingContext*,
+ skgpu::Budgeted,
+ const SkImageInfo&,
+ int,
+ GrSurfaceOrigin,
+ const SkSurfaceProps*,
+ bool) {
+ return nullptr;
+}
+#endif
diff --git a/gfx/skia/skia/src/image/SkSurface_Base.cpp b/gfx/skia/skia/src/image/SkSurface_Base.cpp
new file mode 100644
index 0000000000..b5b993b7c1
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkSurface_Base.cpp
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/image/SkSurface_Base.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkCapabilities.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSize.h"
+#include "src/image/SkImage_Base.h"
+#include "src/image/SkRescaleAndReadPixels.h"
+
+#include <atomic>
+#include <cstdint>
+#include <memory>
+
+class GrRecordingContext;
+class SkColorSpace;
+class SkPaint;
+class SkSurfaceProps;
+namespace skgpu { namespace graphite { class Recorder; } }
+
+#if defined(SK_GANESH)
+#include "include/gpu/GrBackendSurface.h"
+#endif
+
+
+SkSurface_Base::SkSurface_Base(int width, int height, const SkSurfaceProps* props)
+ : INHERITED(width, height, props) {
+}
+
+SkSurface_Base::SkSurface_Base(const SkImageInfo& info, const SkSurfaceProps* props)
+ : INHERITED(info, props) {
+}
+
+SkSurface_Base::~SkSurface_Base() {
+ // in case the canvas outsurvives us, we null the callback
+ if (fCachedCanvas) {
+ fCachedCanvas->setSurfaceBase(nullptr);
+ }
+#if defined(SK_GANESH)
+ if (fCachedImage) {
+ as_IB(fCachedImage.get())->generatingSurfaceIsDeleted();
+ }
+#endif
+}
+
+GrRecordingContext* SkSurface_Base::onGetRecordingContext() {
+ return nullptr;
+}
+
+skgpu::graphite::Recorder* SkSurface_Base::onGetRecorder() {
+ return nullptr;
+}
+
+#if defined(SK_GANESH)
+GrBackendTexture SkSurface_Base::onGetBackendTexture(BackendHandleAccess) {
+ return GrBackendTexture(); // invalid
+}
+
+GrBackendRenderTarget SkSurface_Base::onGetBackendRenderTarget(BackendHandleAccess) {
+ return GrBackendRenderTarget(); // invalid
+}
+
+bool SkSurface_Base::onReplaceBackendTexture(const GrBackendTexture&,
+ GrSurfaceOrigin, ContentChangeMode,
+ TextureReleaseProc,
+ ReleaseContext) {
+ return false;
+}
+#endif
+
+void SkSurface_Base::onDraw(SkCanvas* canvas, SkScalar x, SkScalar y,
+ const SkSamplingOptions& sampling, const SkPaint* paint) {
+ auto image = this->makeImageSnapshot();
+ if (image) {
+ canvas->drawImage(image.get(), x, y, sampling, paint);
+ }
+}
+
+void SkSurface_Base::onAsyncRescaleAndReadPixels(const SkImageInfo& info,
+ SkIRect origSrcRect,
+ SkSurface::RescaleGamma rescaleGamma,
+ RescaleMode rescaleMode,
+ SkSurface::ReadPixelsCallback callback,
+ SkSurface::ReadPixelsContext context) {
+ SkBitmap src;
+ SkPixmap peek;
+ SkIRect srcRect;
+ if (this->peekPixels(&peek)) {
+ src.installPixels(peek);
+ srcRect = origSrcRect;
+ } else {
+ src.setInfo(this->imageInfo().makeDimensions(origSrcRect.size()));
+ src.allocPixels();
+ if (!this->readPixels(src, origSrcRect.x(), origSrcRect.y())) {
+ callback(context, nullptr);
+ return;
+ }
+ srcRect = SkIRect::MakeSize(src.dimensions());
+ }
+ return SkRescaleAndReadPixels(src, info, srcRect, rescaleGamma, rescaleMode, callback,
+ context);
+}
+
+void SkSurface_Base::onAsyncRescaleAndReadPixelsYUV420(
+ SkYUVColorSpace yuvColorSpace, sk_sp<SkColorSpace> dstColorSpace, SkIRect srcRect,
+ SkISize dstSize, RescaleGamma rescaleGamma, RescaleMode,
+ ReadPixelsCallback callback, ReadPixelsContext context) {
+ // TODO: Call non-YUV asyncRescaleAndReadPixels and then make our callback convert to YUV and
+ // call client's callback.
+ callback(context, nullptr);
+}
+
+bool SkSurface_Base::outstandingImageSnapshot() const {
+ return fCachedImage && !fCachedImage->unique();
+}
+
+bool SkSurface_Base::aboutToDraw(ContentChangeMode mode) {
+ this->dirtyGenerationID();
+
+ SkASSERT(!fCachedCanvas || fCachedCanvas->getSurfaceBase() == this);
+
+ if (fCachedImage) {
+ // the surface may need to fork its backend, if its sharing it with
+ // the cached image. Note: we only call if there is an outstanding owner
+ // on the image (besides us).
+ bool unique = fCachedImage->unique();
+ if (!unique) {
+ if (!this->onCopyOnWrite(mode)) {
+ return false;
+ }
+ }
+
+ // regardless of copy-on-write, we must drop our cached image now, so
+ // that the next request will get our new contents.
+ fCachedImage.reset();
+
+ if (unique) {
+ // Our content isn't held by any image now, so we can consider that content mutable.
+ // Raster surfaces need to be told it's safe to consider its pixels mutable again.
+ // We make this call after the ->unref() so the subclass can assert there are no images.
+ this->onRestoreBackingMutability();
+ }
+ } else if (kDiscard_ContentChangeMode == mode) {
+ this->onDiscard();
+ }
+ return true;
+}
+
+uint32_t SkSurface_Base::newGenerationID() {
+ SkASSERT(!fCachedCanvas || fCachedCanvas->getSurfaceBase() == this);
+ static std::atomic<uint32_t> nextID{1};
+ return nextID.fetch_add(1, std::memory_order_relaxed);
+}
+
+sk_sp<const SkCapabilities> SkSurface_Base::onCapabilities() {
+ return SkCapabilities::RasterBackend();
+}
diff --git a/gfx/skia/skia/src/image/SkSurface_Base.h b/gfx/skia/skia/src/image/SkSurface_Base.h
new file mode 100644
index 0000000000..13634b2010
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkSurface_Base.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSurface_Base_DEFINED
+#define SkSurface_Base_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkDeferredDisplayList.h" // IWYU pragma: keep
+#include "include/core/SkImage.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSurface.h"
+#include "include/core/SkTypes.h"
+
+#if defined(SK_GANESH)
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrTypes.h"
+#endif
+
+#include <cstdint>
+#include <memory>
+
+class GrBackendSemaphore;
+class GrRecordingContext;
+class SkCapabilities;
+class SkColorSpace;
+class SkPaint;
+class SkPixmap;
+class SkSurfaceCharacterization;
+class SkSurfaceProps;
+enum SkYUVColorSpace : int;
+namespace skgpu { class MutableTextureState; }
+namespace skgpu { namespace graphite { class Recorder; } }
+struct SkIRect;
+struct SkISize;
+struct SkImageInfo;
+
+class SkSurface_Base : public SkSurface {
+public:
+ SkSurface_Base(int width, int height, const SkSurfaceProps*);
+ SkSurface_Base(const SkImageInfo&, const SkSurfaceProps*);
+ ~SkSurface_Base() override;
+
+ virtual GrRecordingContext* onGetRecordingContext();
+ virtual skgpu::graphite::Recorder* onGetRecorder();
+
+#if defined(SK_GANESH)
+ virtual GrBackendTexture onGetBackendTexture(BackendHandleAccess);
+ virtual GrBackendRenderTarget onGetBackendRenderTarget(BackendHandleAccess);
+ virtual bool onReplaceBackendTexture(const GrBackendTexture&,
+ GrSurfaceOrigin,
+ ContentChangeMode,
+ TextureReleaseProc,
+ ReleaseContext);
+
+ virtual void onResolveMSAA() {}
+
+ /**
+ * Issue any pending surface IO to the current backend 3D API and resolve any surface MSAA.
+ * Inserts the requested number of semaphores for the gpu to signal when work is complete on the
+ * gpu and inits the array of GrBackendSemaphores with the signaled semaphores.
+ */
+ virtual GrSemaphoresSubmitted onFlush(BackendSurfaceAccess access, const GrFlushInfo&,
+ const skgpu::MutableTextureState*) {
+ return GrSemaphoresSubmitted::kNo;
+ }
+#endif
+
+ /**
+ * Allocate a canvas that will draw into this surface. We will cache this
+ * canvas, to return the same object to the caller multiple times. We
+ * take ownership, and will call unref() on the canvas when we go out of
+ * scope.
+ */
+ virtual SkCanvas* onNewCanvas() = 0;
+
+ virtual sk_sp<SkSurface> onNewSurface(const SkImageInfo&) = 0;
+
+ /**
+ * Allocate an SkImage that represents the current contents of the surface.
+ * This needs to be able to outlive the surface itself (if need be), and
+ * must faithfully represent the current contents, even if the surface
+ * is changed after this called (e.g. it is drawn to via its canvas).
+ *
+ * If a subset is specified, the the impl must make a copy, rather than try to wait
+ * on copy-on-write.
+ */
+ virtual sk_sp<SkImage> onNewImageSnapshot(const SkIRect* subset = nullptr) { return nullptr; }
+
+#if defined(SK_GRAPHITE)
+ virtual sk_sp<SkImage> onAsImage() { return nullptr; }
+
+ virtual sk_sp<SkImage> onMakeImageCopy(const SkIRect* /* subset */,
+ skgpu::Mipmapped) {
+ return nullptr;
+ }
+#endif
+
+ virtual void onWritePixels(const SkPixmap&, int x, int y) = 0;
+
+ /**
+ * Default implementation does a rescale/read and then calls the callback.
+ */
+ virtual void onAsyncRescaleAndReadPixels(const SkImageInfo&,
+ const SkIRect srcRect,
+ RescaleGamma,
+ RescaleMode,
+ ReadPixelsCallback,
+ ReadPixelsContext);
+ /**
+ * Default implementation does a rescale/read/yuv conversion and then calls the callback.
+ */
+ virtual void onAsyncRescaleAndReadPixelsYUV420(SkYUVColorSpace,
+ sk_sp<SkColorSpace> dstColorSpace,
+ SkIRect srcRect,
+ SkISize dstSize,
+ RescaleGamma,
+ RescaleMode,
+ ReadPixelsCallback,
+ ReadPixelsContext);
+
+ /**
+ * Default implementation:
+ *
+ * image = this->newImageSnapshot();
+ * if (image) {
+ * image->draw(canvas, ...);
+ * image->unref();
+ * }
+ */
+ virtual void onDraw(SkCanvas*, SkScalar x, SkScalar y, const SkSamplingOptions&,const SkPaint*);
+
+ /**
+ * Called as a performance hint when the Surface is allowed to make it's contents
+ * undefined.
+ */
+ virtual void onDiscard() {}
+
+ /**
+ * If the surface is about to change, we call this so that our subclass
+ * can optionally fork their backend (copy-on-write) in case it was
+ * being shared with the cachedImage.
+ *
+ * Returns false if the backing cannot be un-shared.
+ */
+ virtual bool SK_WARN_UNUSED_RESULT onCopyOnWrite(ContentChangeMode) = 0;
+
+ /**
+ * Signal the surface to remind its backing store that it's mutable again.
+ * Called only when we _didn't_ copy-on-write; we assume the copies start mutable.
+ */
+ virtual void onRestoreBackingMutability() {}
+
+ /**
+ * Caused the current backend 3D API to wait on the passed in semaphores before executing new
+ * commands on the gpu. Any previously submitting commands will not be blocked by these
+ * semaphores.
+ */
+ virtual bool onWait(int numSemaphores, const GrBackendSemaphore* waitSemaphores,
+ bool deleteSemaphoresAfterWait) {
+ return false;
+ }
+
+ virtual bool onCharacterize(SkSurfaceCharacterization*) const { return false; }
+ virtual bool onIsCompatible(const SkSurfaceCharacterization&) const { return false; }
+ virtual bool onDraw(sk_sp<const SkDeferredDisplayList>, SkIPoint offset) {
+ return false;
+ }
+
+ // TODO: Remove this (make it pure virtual) after updating Android (which has a class derived
+ // from SkSurface_Base).
+ virtual sk_sp<const SkCapabilities> onCapabilities();
+
+ // True for surfaces instantiated by Graphite in GPU memory
+ virtual bool isGraphiteBacked() const { return false; }
+
+ inline SkCanvas* getCachedCanvas();
+ inline sk_sp<SkImage> refCachedImage();
+
+ bool hasCachedImage() const { return fCachedImage != nullptr; }
+
+ // called by SkSurface to compute a new genID
+ uint32_t newGenerationID();
+
+private:
+ std::unique_ptr<SkCanvas> fCachedCanvas;
+ sk_sp<SkImage> fCachedImage;
+
+ // Returns false if drawing should not take place (allocation failure).
+ bool SK_WARN_UNUSED_RESULT aboutToDraw(ContentChangeMode mode);
+
+ // Returns true if there is an outstanding image-snapshot, indicating that a call to aboutToDraw
+ // would trigger a copy-on-write.
+ bool outstandingImageSnapshot() const;
+
+ friend class SkCanvas;
+ friend class SkSurface;
+
+ using INHERITED = SkSurface;
+};
+
+SkCanvas* SkSurface_Base::getCachedCanvas() {
+ if (nullptr == fCachedCanvas) {
+ fCachedCanvas = std::unique_ptr<SkCanvas>(this->onNewCanvas());
+ if (fCachedCanvas) {
+ fCachedCanvas->setSurfaceBase(this);
+ }
+ }
+ return fCachedCanvas.get();
+}
+
+sk_sp<SkImage> SkSurface_Base::refCachedImage() {
+ if (fCachedImage) {
+ return fCachedImage;
+ }
+
+ fCachedImage = this->onNewImageSnapshot();
+
+ SkASSERT(!fCachedCanvas || fCachedCanvas->getSurfaceBase() == this);
+ return fCachedImage;
+}
+
+static inline SkSurface_Base* asSB(SkSurface* surface) {
+ return static_cast<SkSurface_Base*>(surface);
+}
+
+static inline const SkSurface_Base* asConstSB(const SkSurface* surface) {
+ return static_cast<const SkSurface_Base*>(surface);
+}
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkSurface_Gpu.cpp b/gfx/skia/skia/src/image/SkSurface_Gpu.cpp
new file mode 100644
index 0000000000..0a3c52071a
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkSurface_Gpu.cpp
@@ -0,0 +1,813 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/image/SkSurface_Gpu.h"
+
+#if defined(SK_GANESH)
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkDeferredDisplayList.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkSurface.h"
+#include "include/core/SkSurfaceCharacterization.h"
+#include "include/core/SkSurfaceProps.h"
+#include "include/gpu/GpuTypes.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/GrContextThreadSafeProxy.h"
+#include "include/gpu/GrDirectContext.h"
+#include "include/gpu/GrRecordingContext.h"
+#include "include/gpu/GrTypes.h"
+#include "include/private/base/SkTo.h"
+#include "include/private/gpu/ganesh/GrTypesPriv.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkSurfacePriv.h"
+#include "src/gpu/RefCntedCallback.h"
+#include "src/gpu/SkBackingFit.h"
+#include "src/gpu/SkRenderEngineAbortf.h"
+#include "src/gpu/ganesh/Device_v1.h"
+#include "src/gpu/ganesh/GrCaps.h"
+#include "src/gpu/ganesh/GrContextThreadSafeProxyPriv.h"
+#include "src/gpu/ganesh/GrDirectContextPriv.h"
+#include "src/gpu/ganesh/GrGpuResourcePriv.h"
+#include "src/gpu/ganesh/GrProxyProvider.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/GrRenderTarget.h"
+#include "src/gpu/ganesh/GrRenderTargetProxy.h"
+#include "src/gpu/ganesh/GrSurfaceProxy.h"
+#include "src/gpu/ganesh/GrSurfaceProxyPriv.h"
+#include "src/gpu/ganesh/GrSurfaceProxyView.h"
+#include "src/gpu/ganesh/GrTexture.h"
+#include "src/gpu/ganesh/GrTextureProxy.h"
+#include "src/image/SkImage_Base.h"
+#include "src/image/SkImage_Gpu.h"
+
+#if defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26
+#include "src/gpu/ganesh/GrAHardwareBufferUtils_impl.h"
+#endif
+
+#include <algorithm>
+#include <cstddef>
+#include <utility>
+
+class GrBackendSemaphore;
+class SkCapabilities;
+class SkPaint;
+class SkPixmap;
+namespace skgpu { class MutableTextureState; }
+
+SkSurface_Gpu::SkSurface_Gpu(sk_sp<skgpu::ganesh::Device> device)
+ : INHERITED(device->width(), device->height(), &device->surfaceProps())
+ , fDevice(std::move(device)) {
+ SkASSERT(fDevice->targetProxy()->priv().isExact());
+}
+
+SkSurface_Gpu::~SkSurface_Gpu() {
+}
+
+GrRecordingContext* SkSurface_Gpu::onGetRecordingContext() {
+ return fDevice->recordingContext();
+}
+
+skgpu::ganesh::Device* SkSurface_Gpu::getDevice() { return fDevice.get(); }
+
+SkImageInfo SkSurface_Gpu::imageInfo() const {
+ return fDevice->imageInfo();
+}
+
+static GrRenderTarget* prepare_rt_for_external_access(SkSurface_Gpu* surface,
+ SkSurface::BackendHandleAccess access) {
+ auto dContext = surface->recordingContext()->asDirectContext();
+ if (!dContext) {
+ return nullptr;
+ }
+ if (dContext->abandoned()) {
+ return nullptr;
+ }
+
+ switch (access) {
+ case SkSurface::kFlushRead_BackendHandleAccess:
+ break;
+ case SkSurface::kFlushWrite_BackendHandleAccess:
+ case SkSurface::kDiscardWrite_BackendHandleAccess:
+ // for now we don't special-case on Discard, but we may in the future.
+ surface->notifyContentWillChange(SkSurface::kRetain_ContentChangeMode);
+ break;
+ }
+
+ dContext->priv().flushSurface(surface->getDevice()->targetProxy());
+
+ // Grab the render target *after* firing notifications, as it may get switched if CoW kicks in.
+ return surface->getDevice()->targetProxy()->peekRenderTarget();
+}
+
+GrBackendTexture SkSurface_Gpu::onGetBackendTexture(BackendHandleAccess access) {
+ GrRenderTarget* rt = prepare_rt_for_external_access(this, access);
+ if (!rt) {
+ return GrBackendTexture(); // invalid
+ }
+ GrTexture* texture = rt->asTexture();
+ if (texture) {
+ return texture->getBackendTexture();
+ }
+ return GrBackendTexture(); // invalid
+}
+
+GrBackendRenderTarget SkSurface_Gpu::onGetBackendRenderTarget(BackendHandleAccess access) {
+ GrRenderTarget* rt = prepare_rt_for_external_access(this, access);
+ if (!rt) {
+ return GrBackendRenderTarget(); // invalid
+ }
+
+ return rt->getBackendRenderTarget();
+}
+
+SkCanvas* SkSurface_Gpu::onNewCanvas() { return new SkCanvas(fDevice); }
+
+sk_sp<SkSurface> SkSurface_Gpu::onNewSurface(const SkImageInfo& info) {
+ GrSurfaceProxyView targetView = fDevice->readSurfaceView();
+ int sampleCount = targetView.asRenderTargetProxy()->numSamples();
+ GrSurfaceOrigin origin = targetView.origin();
+ // TODO: Make caller specify this (change virtual signature of onNewSurface).
+ static const skgpu::Budgeted kBudgeted = skgpu::Budgeted::kNo;
+ return SkSurface::MakeRenderTarget(fDevice->recordingContext(), kBudgeted, info, sampleCount,
+ origin, &this->props());
+}
+
+sk_sp<SkImage> SkSurface_Gpu::onNewImageSnapshot(const SkIRect* subset) {
+ GrRenderTargetProxy* rtp = fDevice->targetProxy();
+ if (!rtp) {
+ return nullptr;
+ }
+
+ auto rContext = fDevice->recordingContext();
+
+ GrSurfaceProxyView srcView = fDevice->readSurfaceView();
+
+ skgpu::Budgeted budgeted = rtp->isBudgeted();
+
+ if (subset || !srcView.asTextureProxy() || rtp->refsWrappedObjects()) {
+ // If the original render target is a buffer originally created by the client, then we don't
+ // want to ever retarget the SkSurface at another buffer we create. If the source is a
+ // texture (and the image is not subsetted) we make a dual-proxied SkImage that will
+ // attempt to share the backing store until the surface writes to the shared backing store
+ // at which point it uses a copy.
+ if (!subset && srcView.asTextureProxy()) {
+ return SkImage_Gpu::MakeWithVolatileSrc(sk_ref_sp(rContext),
+ srcView,
+ fDevice->imageInfo().colorInfo());
+ }
+ auto rect = subset ? *subset : SkIRect::MakeSize(srcView.dimensions());
+ GrMipmapped mipmapped = srcView.mipmapped();
+ srcView = GrSurfaceProxyView::Copy(rContext,
+ std::move(srcView),
+ mipmapped,
+ rect,
+ SkBackingFit::kExact,
+ budgeted,
+ /*label=*/"SurfaceGpu_NewImageSnapshot");
+ }
+
+ const SkImageInfo info = fDevice->imageInfo();
+ if (!srcView.asTextureProxy()) {
+ return nullptr;
+ }
+ // The surfaceDrawContext coming out of SkGpuDevice should always be exact and the
+ // above copy creates a kExact surfaceContext.
+ SkASSERT(srcView.proxy()->priv().isExact());
+ return sk_make_sp<SkImage_Gpu>(sk_ref_sp(rContext),
+ kNeedNewImageUniqueID,
+ std::move(srcView),
+ info.colorInfo());
+}
+
+void SkSurface_Gpu::onWritePixels(const SkPixmap& src, int x, int y) {
+ fDevice->writePixels(src, x, y);
+}
+
+void SkSurface_Gpu::onAsyncRescaleAndReadPixels(const SkImageInfo& info,
+ SkIRect srcRect,
+ RescaleGamma rescaleGamma,
+ RescaleMode rescaleMode,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context) {
+ fDevice->asyncRescaleAndReadPixels(info,
+ srcRect,
+ rescaleGamma,
+ rescaleMode,
+ callback,
+ context);
+}
+
+void SkSurface_Gpu::onAsyncRescaleAndReadPixelsYUV420(SkYUVColorSpace yuvColorSpace,
+ sk_sp<SkColorSpace> dstColorSpace,
+ SkIRect srcRect,
+ SkISize dstSize,
+ RescaleGamma rescaleGamma,
+ RescaleMode rescaleMode,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context) {
+ fDevice->asyncRescaleAndReadPixelsYUV420(yuvColorSpace,
+ std::move(dstColorSpace),
+ srcRect,
+ dstSize,
+ rescaleGamma,
+ rescaleMode,
+ callback,
+ context);
+}
+
+// Create a new render target and, if necessary, copy the contents of the old
+// render target into it. Note that this flushes the SkGpuDevice but
+// doesn't force an OpenGL flush.
+bool SkSurface_Gpu::onCopyOnWrite(ContentChangeMode mode) {
+ GrSurfaceProxyView readSurfaceView = fDevice->readSurfaceView();
+
+ // are we sharing our backing proxy with the image? Note this call should never create a new
+ // image because onCopyOnWrite is only called when there is a cached image.
+ sk_sp<SkImage> image = this->refCachedImage();
+ SkASSERT(image);
+
+ if (static_cast<SkImage_Gpu*>(image.get())->surfaceMustCopyOnWrite(readSurfaceView.proxy())) {
+ if (!fDevice->replaceBackingProxy(mode)) {
+ return false;
+ }
+ } else if (kDiscard_ContentChangeMode == mode) {
+ this->SkSurface_Gpu::onDiscard();
+ }
+ return true;
+}
+
+void SkSurface_Gpu::onDiscard() { fDevice->discard(); }
+
+void SkSurface_Gpu::onResolveMSAA() { fDevice->resolveMSAA(); }
+
+GrSemaphoresSubmitted SkSurface_Gpu::onFlush(BackendSurfaceAccess access, const GrFlushInfo& info,
+ const skgpu::MutableTextureState* newState) {
+
+ auto dContext = fDevice->recordingContext()->asDirectContext();
+ if (!dContext) {
+ return GrSemaphoresSubmitted::kNo;
+ }
+
+ GrRenderTargetProxy* rtp = fDevice->targetProxy();
+
+ return dContext->priv().flushSurface(rtp, access, info, newState);
+}
+
+bool SkSurface_Gpu::onWait(int numSemaphores, const GrBackendSemaphore* waitSemaphores,
+ bool deleteSemaphoresAfterWait) {
+ return fDevice->wait(numSemaphores, waitSemaphores, deleteSemaphoresAfterWait);
+}
+
+bool SkSurface_Gpu::onCharacterize(SkSurfaceCharacterization* characterization) const {
+ auto direct = fDevice->recordingContext()->asDirectContext();
+ if (!direct) {
+ return false;
+ }
+
+ SkImageInfo ii = fDevice->imageInfo();
+ if (ii.colorType() == kUnknown_SkColorType) {
+ return false;
+ }
+
+ GrSurfaceProxyView readSurfaceView = fDevice->readSurfaceView();
+ size_t maxResourceBytes = direct->getResourceCacheLimit();
+
+ bool mipmapped = readSurfaceView.asTextureProxy()
+ ? GrMipmapped::kYes == readSurfaceView.asTextureProxy()->mipmapped()
+ : false;
+
+ bool usesGLFBO0 = readSurfaceView.asRenderTargetProxy()->glRTFBOIDIs0();
+ // We should never get in the situation where we have a texture render target that is also
+ // backend by FBO 0.
+ SkASSERT(!usesGLFBO0 || !SkToBool(readSurfaceView.asTextureProxy()));
+
+ bool vkRTSupportsInputAttachment =
+ readSurfaceView.asRenderTargetProxy()->supportsVkInputAttachment();
+
+ GrBackendFormat format = readSurfaceView.proxy()->backendFormat();
+ int numSamples = readSurfaceView.asRenderTargetProxy()->numSamples();
+ GrProtected isProtected = readSurfaceView.asRenderTargetProxy()->isProtected();
+
+ characterization->set(
+ direct->threadSafeProxy(),
+ maxResourceBytes,
+ ii,
+ format,
+ readSurfaceView.origin(),
+ numSamples,
+ SkSurfaceCharacterization::Textureable(SkToBool(readSurfaceView.asTextureProxy())),
+ SkSurfaceCharacterization::MipMapped(mipmapped),
+ SkSurfaceCharacterization::UsesGLFBO0(usesGLFBO0),
+ SkSurfaceCharacterization::VkRTSupportsInputAttachment(vkRTSupportsInputAttachment),
+ SkSurfaceCharacterization::VulkanSecondaryCBCompatible(false),
+ isProtected,
+ this->props());
+ return true;
+}
+
+void SkSurface_Gpu::onDraw(SkCanvas* canvas, SkScalar x, SkScalar y,
+ const SkSamplingOptions& sampling, const SkPaint* paint) {
+ // If the dst is also GPU we try to not force a new image snapshot (by calling the base class
+ // onDraw) since that may not always perform the copy-on-write optimization.
+ auto tryDraw = [&] {
+ auto surfaceContext = fDevice->recordingContext();
+ auto canvasContext = GrAsDirectContext(canvas->recordingContext());
+ if (!canvasContext) {
+ return false;
+ }
+ if (canvasContext->priv().contextID() != surfaceContext->priv().contextID()) {
+ return false;
+ }
+ GrSurfaceProxyView srcView = fDevice->readSurfaceView();
+ if (!srcView.asTextureProxyRef()) {
+ return false;
+ }
+ // Possibly we could skip making an image here if SkGpuDevice exposed a lower level way
+ // of drawing a texture proxy.
+ const SkImageInfo info = fDevice->imageInfo();
+ sk_sp<SkImage> image = sk_make_sp<SkImage_Gpu>(sk_ref_sp(canvasContext),
+ kNeedNewImageUniqueID,
+ std::move(srcView),
+ info.colorInfo());
+ canvas->drawImage(image.get(), x, y, sampling, paint);
+ return true;
+ };
+ if (!tryDraw()) {
+ INHERITED::onDraw(canvas, x, y, sampling, paint);
+ }
+}
+
+bool SkSurface_Gpu::onIsCompatible(const SkSurfaceCharacterization& characterization) const {
+ auto direct = fDevice->recordingContext()->asDirectContext();
+ if (!direct) {
+ return false;
+ }
+
+ if (!characterization.isValid()) {
+ return false;
+ }
+
+ if (characterization.vulkanSecondaryCBCompatible()) {
+ return false;
+ }
+
+ SkImageInfo ii = fDevice->imageInfo();
+ if (ii.colorType() == kUnknown_SkColorType) {
+ return false;
+ }
+
+ GrSurfaceProxyView targetView = fDevice->readSurfaceView();
+ // As long as the current state if the context allows for greater or equal resources,
+ // we allow the DDL to be replayed.
+ // DDL TODO: should we just remove the resource check and ignore the cache limits on playback?
+ size_t maxResourceBytes = direct->getResourceCacheLimit();
+
+ if (characterization.isTextureable()) {
+ if (!targetView.asTextureProxy()) {
+ // If the characterization was textureable we require the replay dest to also be
+ // textureable. If the characterized surface wasn't textureable we allow the replay
+ // dest to be textureable.
+ return false;
+ }
+
+ if (characterization.isMipMapped() &&
+ GrMipmapped::kNo == targetView.asTextureProxy()->mipmapped()) {
+ // Fail if the DDL's surface was mipmapped but the replay surface is not.
+ // Allow drawing to proceed if the DDL was not mipmapped but the replay surface is.
+ return false;
+ }
+ }
+
+ if (characterization.usesGLFBO0() != targetView.asRenderTargetProxy()->glRTFBOIDIs0()) {
+ // FBO0-ness effects how MSAA and window rectangles work. If the characterization was
+ // tagged as FBO0 it would never have been allowed to use window rectangles. If MSAA
+ // was also never used then a DDL recorded with this characterization should be replayable
+ // on a non-FBO0 surface.
+ if (!characterization.usesGLFBO0() || characterization.sampleCount() > 1) {
+ return false;
+ }
+ }
+
+ GrBackendFormat format = targetView.asRenderTargetProxy()->backendFormat();
+ int numSamples = targetView.asRenderTargetProxy()->numSamples();
+ GrProtected isProtected = targetView.proxy()->isProtected();
+
+ return characterization.contextInfo() &&
+ characterization.contextInfo()->priv().matches(direct) &&
+ characterization.cacheMaxResourceBytes() <= maxResourceBytes &&
+ characterization.origin() == targetView.origin() &&
+ characterization.backendFormat() == format &&
+ characterization.width() == ii.width() &&
+ characterization.height() == ii.height() &&
+ characterization.colorType() == ii.colorType() &&
+ characterization.sampleCount() == numSamples &&
+ SkColorSpace::Equals(characterization.colorSpace(), ii.colorInfo().colorSpace()) &&
+ characterization.isProtected() == isProtected &&
+ characterization.surfaceProps() == fDevice->surfaceProps();
+}
+
+bool SkSurface_Gpu::onDraw(sk_sp<const SkDeferredDisplayList> ddl, SkIPoint offset) {
+ if (!ddl || !this->isCompatible(ddl->characterization())) {
+ return false;
+ }
+
+ auto direct = fDevice->recordingContext()->asDirectContext();
+ if (!direct || direct->abandoned()) {
+ return false;
+ }
+
+ GrSurfaceProxyView view = fDevice->readSurfaceView();
+
+ direct->priv().createDDLTask(std::move(ddl), view.asRenderTargetProxyRef(), offset);
+ return true;
+}
+
+sk_sp<const SkCapabilities> SkSurface_Gpu::onCapabilities() {
+ return fDevice->recordingContext()->skCapabilities();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkSurface> SkSurface::MakeRenderTarget(GrRecordingContext* rContext,
+ const SkSurfaceCharacterization& c,
+ skgpu::Budgeted budgeted) {
+ if (!rContext || !c.isValid()) {
+ return nullptr;
+ }
+
+ if (c.usesGLFBO0()) {
+ // If we are making the surface we will never use FBO0.
+ return nullptr;
+ }
+
+ if (c.vulkanSecondaryCBCompatible()) {
+ return nullptr;
+ }
+
+ auto device = rContext->priv().createDevice(budgeted,
+ c.imageInfo(),
+ SkBackingFit::kExact,
+ c.sampleCount(),
+ GrMipmapped(c.isMipMapped()),
+ c.isProtected(),
+ c.origin(),
+ c.surfaceProps(),
+ skgpu::ganesh::Device::InitContents::kClear);
+ if (!device) {
+ return nullptr;
+ }
+
+ sk_sp<SkSurface> result = sk_make_sp<SkSurface_Gpu>(std::move(device));
+#ifdef SK_DEBUG
+ if (result) {
+ SkASSERT(result->isCompatible(c));
+ }
+#endif
+
+ return result;
+}
+
+static bool validate_backend_texture(const GrCaps* caps, const GrBackendTexture& tex,
+ int sampleCnt, GrColorType grCT,
+ bool texturable) {
+ if (!tex.isValid()) {
+ return false;
+ }
+
+ GrBackendFormat backendFormat = tex.getBackendFormat();
+ if (!backendFormat.isValid()) {
+ RENDERENGINE_ABORTF("%s failed due to an invalid format", __func__);
+ return false;
+ }
+
+ if (!caps->areColorTypeAndFormatCompatible(grCT, backendFormat)) {
+ RENDERENGINE_ABORTF("%s failed due to an invalid format and colorType combination",
+ __func__);
+ return false;
+ }
+
+ if (!caps->isFormatAsColorTypeRenderable(grCT, backendFormat, sampleCnt)) {
+ RENDERENGINE_ABORTF(
+ "%s failed due to no supported rendering path for the selected "
+ "format and colorType",
+ __func__);
+ return false;
+ }
+
+ if (texturable && !caps->isFormatTexturable(backendFormat, tex.textureType())) {
+ RENDERENGINE_ABORTF(
+ "%s failed due to no texturing support for the selected format and "
+ "colorType",
+ __func__);
+ return false;
+ }
+
+ return true;
+}
+
+sk_sp<SkSurface> SkSurface::MakeRenderTarget(GrRecordingContext* rContext,
+ skgpu::Budgeted budgeted,
+ const SkImageInfo& info,
+ int sampleCount,
+ GrSurfaceOrigin origin,
+ const SkSurfaceProps* props,
+ bool shouldCreateWithMips) {
+ if (!rContext) {
+ return nullptr;
+ }
+ sampleCount = std::max(1, sampleCount);
+ GrMipmapped mipmapped = shouldCreateWithMips ? GrMipmapped::kYes : GrMipmapped::kNo;
+
+ if (!rContext->priv().caps()->mipmapSupport()) {
+ mipmapped = GrMipmapped::kNo;
+ }
+
+ auto device = rContext->priv().createDevice(budgeted,
+ info,
+ SkBackingFit::kExact,
+ sampleCount,
+ mipmapped,
+ GrProtected::kNo,
+ origin,
+ SkSurfacePropsCopyOrDefault(props),
+ skgpu::ganesh::Device::InitContents::kClear);
+ if (!device) {
+ return nullptr;
+ }
+ return sk_make_sp<SkSurface_Gpu>(std::move(device));
+}
+
+sk_sp<SkSurface> SkSurface::MakeFromBackendTexture(GrRecordingContext* rContext,
+ const GrBackendTexture& tex,
+ GrSurfaceOrigin origin,
+ int sampleCnt,
+ SkColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* props,
+ SkSurface::TextureReleaseProc textureReleaseProc,
+ SkSurface::ReleaseContext releaseContext) {
+ auto releaseHelper = skgpu::RefCntedCallback::Make(textureReleaseProc, releaseContext);
+
+ if (!rContext) {
+ RENDERENGINE_ABORTF("%s failed due to a null context ", __func__);
+ return nullptr;
+ }
+ sampleCnt = std::max(1, sampleCnt);
+
+ GrColorType grColorType = SkColorTypeToGrColorType(colorType);
+ if (grColorType == GrColorType::kUnknown) {
+ RENDERENGINE_ABORTF(
+ "%s failed due to an unsupported colorType %d", __func__, colorType);
+ return nullptr;
+ }
+
+ if (!validate_backend_texture(rContext->priv().caps(), tex, sampleCnt, grColorType, true)) {
+ return nullptr;
+ }
+
+ sk_sp<GrTextureProxy> proxy(rContext->priv().proxyProvider()->wrapRenderableBackendTexture(
+ tex, sampleCnt, kBorrow_GrWrapOwnership, GrWrapCacheable::kNo,
+ std::move(releaseHelper)));
+ if (!proxy) {
+#ifdef SK_IN_RENDERENGINE
+ GrGLTextureInfo textureInfo;
+ bool retrievedTextureInfo = tex.getGLTextureInfo(&textureInfo);
+ RENDERENGINE_ABORTF("%s failed to wrap the texture into a renderable target "
+ "\n\tGrBackendTexture: (%i x %i) hasMipmaps: %i isProtected: %i texType: %i"
+ "\n\t\tGrGLTextureInfo: success: %i fTarget: %u fFormat: %u"
+ "\n\tmaxRenderTargetSize: %d",
+ __func__, tex.width(), tex.height(), tex.hasMipmaps(),
+ tex.isProtected(), static_cast<int>(tex.textureType()),
+ retrievedTextureInfo, textureInfo.fTarget, textureInfo.fFormat,
+ rContext->priv().caps()->maxRenderTargetSize());
+#endif
+ return nullptr;
+ }
+
+ auto device = rContext->priv().createDevice(grColorType,
+ std::move(proxy),
+ std::move(colorSpace),
+ origin,
+ SkSurfacePropsCopyOrDefault(props),
+ skgpu::ganesh::Device::InitContents::kUninit);
+ if (!device) {
+ RENDERENGINE_ABORTF("%s failed to wrap the renderTarget into a surface", __func__);
+ return nullptr;
+ }
+
+ return sk_make_sp<SkSurface_Gpu>(std::move(device));
+}
+
+bool SkSurface_Gpu::onReplaceBackendTexture(const GrBackendTexture& backendTexture,
+ GrSurfaceOrigin origin,
+ ContentChangeMode mode,
+ TextureReleaseProc releaseProc,
+ ReleaseContext releaseContext) {
+ auto releaseHelper = skgpu::RefCntedCallback::Make(releaseProc, releaseContext);
+
+ auto rContext = fDevice->recordingContext();
+ if (rContext->abandoned()) {
+ return false;
+ }
+ if (!backendTexture.isValid()) {
+ return false;
+ }
+ if (backendTexture.width() != this->width() || backendTexture.height() != this->height()) {
+ return false;
+ }
+ auto* oldRTP = fDevice->targetProxy();
+ auto oldProxy = sk_ref_sp(oldRTP->asTextureProxy());
+ if (!oldProxy) {
+ return false;
+ }
+ auto* oldTexture = oldProxy->peekTexture();
+ if (!oldTexture) {
+ return false;
+ }
+ if (!oldTexture->resourcePriv().refsWrappedObjects()) {
+ return false;
+ }
+ if (oldTexture->backendFormat() != backendTexture.getBackendFormat()) {
+ return false;
+ }
+ if (oldTexture->getBackendTexture().isSameTexture(backendTexture)) {
+ return false;
+ }
+ SkASSERT(oldTexture->asRenderTarget());
+ int sampleCnt = oldTexture->asRenderTarget()->numSamples();
+ GrColorType grColorType = SkColorTypeToGrColorType(this->getCanvas()->imageInfo().colorType());
+ if (!validate_backend_texture(rContext->priv().caps(), backendTexture,
+ sampleCnt, grColorType, true)) {
+ return false;
+ }
+
+ sk_sp<SkColorSpace> colorSpace = fDevice->imageInfo().refColorSpace();
+
+ SkASSERT(sampleCnt > 0);
+ sk_sp<GrTextureProxy> proxy(rContext->priv().proxyProvider()->wrapRenderableBackendTexture(
+ backendTexture, sampleCnt, kBorrow_GrWrapOwnership, GrWrapCacheable::kNo,
+ std::move(releaseHelper)));
+ if (!proxy) {
+ return false;
+ }
+
+ return fDevice->replaceBackingProxy(mode, sk_ref_sp(proxy->asRenderTargetProxy()), grColorType,
+ std::move(colorSpace), origin, this->props());
+}
+
+bool validate_backend_render_target(const GrCaps* caps, const GrBackendRenderTarget& rt,
+ GrColorType grCT) {
+ if (!caps->areColorTypeAndFormatCompatible(grCT, rt.getBackendFormat())) {
+ return false;
+ }
+
+ if (!caps->isFormatAsColorTypeRenderable(grCT, rt.getBackendFormat(), rt.sampleCnt())) {
+ return false;
+ }
+
+ // We require the stencil bits to be either 0, 8, or 16.
+ int stencilBits = rt.stencilBits();
+ if (stencilBits != 0 && stencilBits != 8 && stencilBits != 16) {
+ return false;
+ }
+
+ return true;
+}
+
+sk_sp<SkSurface> SkSurface::MakeFromBackendRenderTarget(GrRecordingContext* rContext,
+ const GrBackendRenderTarget& rt,
+ GrSurfaceOrigin origin,
+ SkColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* props,
+ SkSurface::RenderTargetReleaseProc relProc,
+ SkSurface::ReleaseContext releaseContext) {
+ auto releaseHelper = skgpu::RefCntedCallback::Make(relProc, releaseContext);
+
+ if (!rContext) {
+ return nullptr;
+ }
+
+ GrColorType grColorType = SkColorTypeToGrColorType(colorType);
+ if (grColorType == GrColorType::kUnknown) {
+ return nullptr;
+ }
+
+ if (!validate_backend_render_target(rContext->priv().caps(), rt, grColorType)) {
+ return nullptr;
+ }
+
+ auto proxyProvider = rContext->priv().proxyProvider();
+ auto proxy = proxyProvider->wrapBackendRenderTarget(rt, std::move(releaseHelper));
+ if (!proxy) {
+ return nullptr;
+ }
+
+ auto device = rContext->priv().createDevice(grColorType,
+ std::move(proxy),
+ std::move(colorSpace),
+ origin,
+ SkSurfacePropsCopyOrDefault(props),
+ skgpu::ganesh::Device::InitContents::kUninit);
+ if (!device) {
+ return nullptr;
+ }
+
+ return sk_make_sp<SkSurface_Gpu>(std::move(device));
+}
+
+#if defined(SK_BUILD_FOR_ANDROID) && __ANDROID_API__ >= 26
+sk_sp<SkSurface> SkSurface::MakeFromAHardwareBuffer(GrDirectContext* dContext,
+ AHardwareBuffer* hardwareBuffer,
+ GrSurfaceOrigin origin,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ , bool fromWindow
+#endif
+ ) {
+ AHardwareBuffer_Desc bufferDesc;
+ AHardwareBuffer_describe(hardwareBuffer, &bufferDesc);
+
+ if (!SkToBool(bufferDesc.usage & AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT)) {
+ return nullptr;
+ }
+
+ bool isTextureable = SkToBool(bufferDesc.usage & AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE);
+
+ GrBackendFormat backendFormat = GrAHardwareBufferUtils::GetBackendFormat(dContext,
+ hardwareBuffer,
+ bufferDesc.format,
+ true);
+ if (!backendFormat.isValid()) {
+ return nullptr;
+ }
+
+ if (isTextureable) {
+ GrAHardwareBufferUtils::DeleteImageProc deleteImageProc = nullptr;
+ GrAHardwareBufferUtils::UpdateImageProc updateImageProc = nullptr;
+ GrAHardwareBufferUtils::TexImageCtx deleteImageCtx = nullptr;
+
+ bool isProtectedContent =
+ SkToBool(bufferDesc.usage & AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT);
+
+ bool fromWindowLocal = false;
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ fromWindowLocal = fromWindow;
+#endif
+
+ GrBackendTexture backendTexture =
+ GrAHardwareBufferUtils::MakeBackendTexture(dContext,
+ hardwareBuffer,
+ bufferDesc.width,
+ bufferDesc.height,
+ &deleteImageProc,
+ &updateImageProc,
+ &deleteImageCtx,
+ isProtectedContent,
+ backendFormat,
+ true,
+ fromWindowLocal);
+ if (!backendTexture.isValid()) {
+ return nullptr;
+ }
+
+ SkColorType colorType =
+ GrAHardwareBufferUtils::GetSkColorTypeFromBufferFormat(bufferDesc.format);
+
+ sk_sp<SkSurface> surface = SkSurface::MakeFromBackendTexture(dContext, backendTexture,
+ origin, 0, colorType, std::move(colorSpace), surfaceProps, deleteImageProc,
+ deleteImageCtx);
+
+ if (!surface) {
+ SkASSERT(deleteImageProc);
+ deleteImageProc(deleteImageCtx);
+ }
+
+ return surface;
+ } else {
+ return nullptr;
+ }
+}
+#endif
+
+void SkSurface::flushAndSubmit(bool syncCpu) {
+ this->flush(BackendSurfaceAccess::kNoAccess, GrFlushInfo());
+
+ auto direct = GrAsDirectContext(this->recordingContext());
+ if (direct) {
+ direct->submit(syncCpu);
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkSurface_Gpu.h b/gfx/skia/skia/src/image/SkSurface_Gpu.h
new file mode 100644
index 0000000000..71bc2c5983
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkSurface_Gpu.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSurface_Gpu_DEFINED
+#define SkSurface_Gpu_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#if defined(SK_GANESH)
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkScalar.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "src/image/SkSurface_Base.h"
+
+class GrBackendSemaphore;
+class GrRecordingContext;
+class SkCanvas;
+class SkCapabilities;
+class SkColorSpace;
+class SkDeferredDisplayList;
+class SkImage;
+class SkPaint;
+class SkPixmap;
+class SkSurface;
+class SkSurfaceCharacterization;
+enum GrSurfaceOrigin : int;
+enum class GrSemaphoresSubmitted : bool;
+namespace skgpu { class MutableTextureState; }
+namespace skgpu {
+namespace ganesh {
+class Device;
+}
+} // namespace skgpu
+struct GrFlushInfo;
+struct SkIPoint;
+struct SkIRect;
+struct SkISize;
+
+class SkSurface_Gpu : public SkSurface_Base {
+public:
+ SkSurface_Gpu(sk_sp<skgpu::ganesh::Device>);
+ ~SkSurface_Gpu() override;
+
+ SkImageInfo imageInfo() const override;
+
+ GrRecordingContext* onGetRecordingContext() override;
+
+ GrBackendTexture onGetBackendTexture(BackendHandleAccess) override;
+ GrBackendRenderTarget onGetBackendRenderTarget(BackendHandleAccess) override;
+ bool onReplaceBackendTexture(const GrBackendTexture&, GrSurfaceOrigin, ContentChangeMode, TextureReleaseProc,
+ ReleaseContext) override;
+
+ SkCanvas* onNewCanvas() override;
+ sk_sp<SkSurface> onNewSurface(const SkImageInfo&) override;
+ sk_sp<SkImage> onNewImageSnapshot(const SkIRect* subset) override;
+ void onWritePixels(const SkPixmap&, int x, int y) override;
+ void onAsyncRescaleAndReadPixels(const SkImageInfo& info, SkIRect srcRect,
+ RescaleGamma rescaleGamma, RescaleMode,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context) override;
+ void onAsyncRescaleAndReadPixelsYUV420(SkYUVColorSpace yuvColorSpace,
+ sk_sp<SkColorSpace> dstColorSpace,
+ SkIRect srcRect,
+ SkISize dstSize,
+ RescaleGamma rescaleGamma,
+ RescaleMode,
+ ReadPixelsCallback callback,
+ ReadPixelsContext context) override;
+ bool onCopyOnWrite(ContentChangeMode) override;
+ void onDiscard() override;
+ void onResolveMSAA() override;
+ GrSemaphoresSubmitted onFlush(BackendSurfaceAccess access, const GrFlushInfo& info,
+ const skgpu::MutableTextureState*) override;
+ bool onWait(int numSemaphores, const GrBackendSemaphore* waitSemaphores,
+ bool deleteSemaphoresAfterWait) override;
+ bool onCharacterize(SkSurfaceCharacterization*) const override;
+ bool onIsCompatible(const SkSurfaceCharacterization&) const override;
+ void onDraw(SkCanvas* canvas, SkScalar x, SkScalar y, const SkSamplingOptions&,
+ const SkPaint* paint) override;
+ bool onDraw(sk_sp<const SkDeferredDisplayList>, SkIPoint offset) override;
+
+ sk_sp<const SkCapabilities> onCapabilities() override;
+ skgpu::ganesh::Device* getDevice();
+
+private:
+ sk_sp<skgpu::ganesh::Device> fDevice;
+
+ using INHERITED = SkSurface_Base;
+};
+
+#endif // defined(SK_GANESH)
+
+#endif // SkSurface_Gpu_DEFINED
diff --git a/gfx/skia/skia/src/image/SkSurface_GpuMtl.mm b/gfx/skia/skia/src/image/SkSurface_GpuMtl.mm
new file mode 100644
index 0000000000..cad448ae80
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkSurface_GpuMtl.mm
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSurface.h"
+#include "include/gpu/GrBackendSurface.h"
+#include "include/gpu/mtl/GrMtlTypes.h"
+#include "src/gpu/ganesh/GrProxyProvider.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/GrResourceProvider.h"
+#include "src/gpu/ganesh/GrResourceProviderPriv.h"
+#include "src/gpu/ganesh/SurfaceDrawContext.h"
+#include "src/image/SkSurface_Gpu.h"
+
+#if defined(SK_GANESH)
+
+#include "src/gpu/ganesh/GrSurface.h"
+#include "src/gpu/ganesh/mtl/GrMtlTextureRenderTarget.h"
+
+#ifdef SK_METAL
+#import <Metal/Metal.h>
+#import <QuartzCore/CAMetalLayer.h>
+#import <MetalKit/MetalKit.h>
+
+sk_sp<SkSurface> SkSurface::MakeFromCAMetalLayer(GrRecordingContext* rContext,
+ GrMTLHandle layer,
+ GrSurfaceOrigin origin,
+ int sampleCnt,
+ SkColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps,
+ GrMTLHandle* drawable) {
+ GrProxyProvider* proxyProvider = rContext->priv().proxyProvider();
+
+ CAMetalLayer* metalLayer = (__bridge CAMetalLayer*)layer;
+ GrBackendFormat backendFormat = GrBackendFormat::MakeMtl(metalLayer.pixelFormat);
+
+ GrColorType grColorType = SkColorTypeToGrColorType(colorType);
+
+ SkISize dims = {(int)metalLayer.drawableSize.width, (int)metalLayer.drawableSize.height};
+
+ GrProxyProvider::TextureInfo texInfo;
+ texInfo.fMipmapped = GrMipmapped::kNo;
+ texInfo.fTextureType = GrTextureType::k2D;
+
+ sk_sp<GrRenderTargetProxy> proxy = proxyProvider->createLazyRenderTargetProxy(
+ [layer, drawable](GrResourceProvider* resourceProvider,
+ const GrSurfaceProxy::LazySurfaceDesc& desc) {
+ CAMetalLayer* metalLayer = (__bridge CAMetalLayer*)layer;
+ id<CAMetalDrawable> currentDrawable = [metalLayer nextDrawable];
+
+ GrMtlGpu* mtlGpu = (GrMtlGpu*) resourceProvider->priv().gpu();
+ sk_sp<GrRenderTarget> surface;
+ if (metalLayer.framebufferOnly) {
+ surface = GrMtlRenderTarget::MakeWrappedRenderTarget(
+ mtlGpu, desc.fDimensions, desc.fSampleCnt, currentDrawable.texture);
+ } else {
+ surface = GrMtlTextureRenderTarget::MakeWrappedTextureRenderTarget(
+ mtlGpu, desc.fDimensions, desc.fSampleCnt, currentDrawable.texture,
+ GrWrapCacheable::kNo);
+ }
+ if (surface && desc.fSampleCnt > 1) {
+ surface->setRequiresManualMSAAResolve();
+ }
+
+ *drawable = (__bridge_retained GrMTLHandle) currentDrawable;
+ return GrSurfaceProxy::LazyCallbackResult(std::move(surface));
+ },
+ backendFormat,
+ dims,
+ sampleCnt,
+ sampleCnt > 1 ? GrInternalSurfaceFlags::kRequiresManualMSAAResolve
+ : GrInternalSurfaceFlags::kNone,
+ metalLayer.framebufferOnly ? nullptr : &texInfo,
+ GrMipmapStatus::kNotAllocated,
+ SkBackingFit::kExact,
+ skgpu::Budgeted::kYes,
+ GrProtected::kNo,
+ false,
+ GrSurfaceProxy::UseAllocator::kYes);
+
+ auto device = rContext->priv().createDevice(grColorType,
+ std::move(proxy),
+ std::move(colorSpace),
+ origin,
+ SkSurfacePropsCopyOrDefault(surfaceProps),
+ skgpu::ganesh::Device::InitContents::kUninit);
+ if (!device) {
+ return nullptr;
+ }
+
+ return sk_make_sp<SkSurface_Gpu>(std::move(device));
+}
+
+sk_sp<SkSurface> SkSurface::MakeFromMTKView(GrRecordingContext* rContext,
+ GrMTLHandle view,
+ GrSurfaceOrigin origin,
+ int sampleCnt,
+ SkColorType colorType,
+ sk_sp<SkColorSpace> colorSpace,
+ const SkSurfaceProps* surfaceProps) {
+ GrProxyProvider* proxyProvider = rContext->priv().proxyProvider();
+
+ MTKView* mtkView = (__bridge MTKView*)view;
+ GrBackendFormat backendFormat = GrBackendFormat::MakeMtl(mtkView.colorPixelFormat);
+
+ GrColorType grColorType = SkColorTypeToGrColorType(colorType);
+
+ SkISize dims = {(int)mtkView.drawableSize.width, (int)mtkView.drawableSize.height};
+
+ GrProxyProvider::TextureInfo texInfo;
+ texInfo.fMipmapped = GrMipmapped::kNo;
+ texInfo.fTextureType = GrTextureType::k2D;
+
+ sk_sp<GrRenderTargetProxy> proxy = proxyProvider->createLazyRenderTargetProxy(
+ [view](GrResourceProvider* resourceProvider,
+ const GrSurfaceProxy::LazySurfaceDesc& desc) {
+ MTKView* mtkView = (__bridge MTKView*)view;
+ id<CAMetalDrawable> currentDrawable = [mtkView currentDrawable];
+
+ GrMtlGpu* mtlGpu = (GrMtlGpu*) resourceProvider->priv().gpu();
+ sk_sp<GrRenderTarget> surface;
+ if (mtkView.framebufferOnly) {
+ surface = GrMtlRenderTarget::MakeWrappedRenderTarget(
+ mtlGpu, desc.fDimensions, desc.fSampleCnt, currentDrawable.texture);
+ } else {
+ surface = GrMtlTextureRenderTarget::MakeWrappedTextureRenderTarget(
+ mtlGpu, desc.fDimensions, desc.fSampleCnt, currentDrawable.texture,
+ GrWrapCacheable::kNo);
+ }
+ if (surface && desc.fSampleCnt > 1) {
+ surface->setRequiresManualMSAAResolve();
+ }
+
+ return GrSurfaceProxy::LazyCallbackResult(std::move(surface));
+ },
+ backendFormat,
+ dims,
+ sampleCnt,
+ sampleCnt > 1 ? GrInternalSurfaceFlags::kRequiresManualMSAAResolve
+ : GrInternalSurfaceFlags::kNone,
+ mtkView.framebufferOnly ? nullptr : &texInfo,
+ GrMipmapStatus::kNotAllocated,
+ SkBackingFit::kExact,
+ skgpu::Budgeted::kYes,
+ GrProtected::kNo,
+ false,
+ GrSurfaceProxy::UseAllocator::kYes);
+
+ auto device = rContext->priv().createDevice(grColorType,
+ std::move(proxy),
+ std::move(colorSpace),
+ origin,
+ SkSurfacePropsCopyOrDefault(surfaceProps),
+ skgpu::ganesh::Device::InitContents::kUninit);
+ if (!device) {
+ return nullptr;
+ }
+
+ return sk_make_sp<SkSurface_Gpu>(std::move(device));
+}
+
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/image/SkSurface_Null.cpp b/gfx/skia/skia/src/image/SkSurface_Null.cpp
new file mode 100644
index 0000000000..099a1c86bc
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkSurface_Null.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCapabilities.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSurface.h"
+#include "include/utils/SkNoDrawCanvas.h"
+#include "src/image/SkSurface_Base.h"
+
+class SkCanvas;
+class SkPaint;
+class SkPixmap;
+struct SkIRect;
+struct SkSamplingOptions;
+
+class SkNullSurface : public SkSurface_Base {
+public:
+ SkNullSurface(int width, int height) : SkSurface_Base(width, height, nullptr) {}
+
+protected:
+ SkCanvas* onNewCanvas() override {
+ return new SkNoDrawCanvas(this->width(), this->height());
+ }
+ sk_sp<SkSurface> onNewSurface(const SkImageInfo& info) override {
+ return MakeNull(info.width(), info.height());
+ }
+ sk_sp<SkImage> onNewImageSnapshot(const SkIRect* subsetOrNull) override { return nullptr; }
+ void onWritePixels(const SkPixmap&, int x, int y) override {}
+ void onDraw(SkCanvas*, SkScalar, SkScalar, const SkSamplingOptions&, const SkPaint*) override {}
+ bool onCopyOnWrite(ContentChangeMode) override { return true; }
+ sk_sp<const SkCapabilities> onCapabilities() override {
+ // Not really, but we have to return *something*
+ return SkCapabilities::RasterBackend();
+ }
+ SkImageInfo imageInfo() const override {
+ return SkImageInfo::MakeUnknown(this->width(), this->height());
+ }
+};
+
+sk_sp<SkSurface> SkSurface::MakeNull(int width, int height) {
+ if (width < 1 || height < 1) {
+ return nullptr;
+ }
+ return sk_sp<SkSurface>(new SkNullSurface(width, height));
+}
diff --git a/gfx/skia/skia/src/image/SkSurface_Raster.cpp b/gfx/skia/skia/src/image/SkSurface_Raster.cpp
new file mode 100644
index 0000000000..c8a844f994
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkSurface_Raster.cpp
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/image/SkSurface_Raster.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkCapabilities.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkMallocPixelRef.h"
+#include "include/core/SkPixelRef.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSurface.h"
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkMath.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkImageInfoPriv.h"
+#include "src/core/SkImagePriv.h"
+#include "src/core/SkSurfacePriv.h"
+
+#include <cstdint>
+#include <cstring>
+#include <utility>
+
+class SkImage;
+class SkPaint;
+class SkPixmap;
+class SkSurfaceProps;
+
+bool SkSurfaceValidateRasterInfo(const SkImageInfo& info, size_t rowBytes) {
+ if (!SkImageInfoIsValid(info)) {
+ return false;
+ }
+
+ if (kIgnoreRowBytesValue == rowBytes) {
+ return true;
+ }
+
+ if (!info.validRowBytes(rowBytes)) {
+ return false;
+ }
+
+ uint64_t size = sk_64_mul(info.height(), rowBytes);
+ static const size_t kMaxTotalSize = SK_MaxS32;
+ if (size > kMaxTotalSize) {
+ return false;
+ }
+
+ return true;
+}
+
+SkSurface_Raster::SkSurface_Raster(const SkImageInfo& info, void* pixels, size_t rb,
+ void (*releaseProc)(void* pixels, void* context), void* context,
+ const SkSurfaceProps* props)
+ : INHERITED(info, props)
+{
+ fBitmap.installPixels(info, pixels, rb, releaseProc, context);
+ fWeOwnThePixels = false; // We are "Direct"
+}
+
+SkSurface_Raster::SkSurface_Raster(const SkImageInfo& info, sk_sp<SkPixelRef> pr,
+ const SkSurfaceProps* props)
+ : INHERITED(pr->width(), pr->height(), props)
+{
+ fBitmap.setInfo(info, pr->rowBytes());
+ fBitmap.setPixelRef(std::move(pr), 0, 0);
+ fWeOwnThePixels = true;
+}
+
+SkCanvas* SkSurface_Raster::onNewCanvas() { return new SkCanvas(fBitmap, this->props()); }
+
+sk_sp<SkSurface> SkSurface_Raster::onNewSurface(const SkImageInfo& info) {
+ return SkSurface::MakeRaster(info, &this->props());
+}
+
+void SkSurface_Raster::onDraw(SkCanvas* canvas, SkScalar x, SkScalar y,
+ const SkSamplingOptions& sampling, const SkPaint* paint) {
+ canvas->drawImage(fBitmap.asImage().get(), x, y, sampling, paint);
+}
+
+sk_sp<SkImage> SkSurface_Raster::onNewImageSnapshot(const SkIRect* subset) {
+ if (subset) {
+ SkASSERT(SkIRect::MakeWH(fBitmap.width(), fBitmap.height()).contains(*subset));
+ SkBitmap dst;
+ dst.allocPixels(fBitmap.info().makeDimensions(subset->size()));
+ SkAssertResult(fBitmap.readPixels(dst.pixmap(), subset->left(), subset->top()));
+ dst.setImmutable(); // key, so MakeFromBitmap doesn't make a copy of the buffer
+ return dst.asImage();
+ }
+
+ SkCopyPixelsMode cpm = kIfMutable_SkCopyPixelsMode;
+ if (fWeOwnThePixels) {
+ // SkImage_raster requires these pixels are immutable for its full lifetime.
+ // We'll undo this via onRestoreBackingMutability() if we can avoid the COW.
+ if (SkPixelRef* pr = fBitmap.pixelRef()) {
+ pr->setTemporarilyImmutable();
+ }
+ } else {
+ cpm = kAlways_SkCopyPixelsMode;
+ }
+
+ // Our pixels are in memory, so read access on the snapshot SkImage could be cheap.
+ // Lock the shared pixel ref to ensure peekPixels() is usable.
+ return SkMakeImageFromRasterBitmap(fBitmap, cpm);
+}
+
+void SkSurface_Raster::onWritePixels(const SkPixmap& src, int x, int y) {
+ fBitmap.writePixels(src, x, y);
+}
+
+void SkSurface_Raster::onRestoreBackingMutability() {
+ SkASSERT(!this->hasCachedImage()); // Shouldn't be any snapshots out there.
+ if (SkPixelRef* pr = fBitmap.pixelRef()) {
+ pr->restoreMutability();
+ }
+}
+
+bool SkSurface_Raster::onCopyOnWrite(ContentChangeMode mode) {
+ // are we sharing pixelrefs with the image?
+ sk_sp<SkImage> cached(this->refCachedImage());
+ SkASSERT(cached);
+ if (SkBitmapImageGetPixelRef(cached.get()) == fBitmap.pixelRef()) {
+ SkASSERT(fWeOwnThePixels);
+ if (kDiscard_ContentChangeMode == mode) {
+ if (!fBitmap.tryAllocPixels()) {
+ return false;
+ }
+ } else {
+ SkBitmap prev(fBitmap);
+ if (!fBitmap.tryAllocPixels()) {
+ return false;
+ }
+ SkASSERT(prev.info() == fBitmap.info());
+ SkASSERT(prev.rowBytes() == fBitmap.rowBytes());
+ memcpy(fBitmap.getPixels(), prev.getPixels(), fBitmap.computeByteSize());
+ }
+
+ // Now fBitmap is a deep copy of itself (and therefore different from
+ // what is being used by the image. Next we update the canvas to use
+ // this as its backend, so we can't modify the image's pixels anymore.
+ SkASSERT(this->getCachedCanvas());
+ this->getCachedCanvas()->baseDevice()->replaceBitmapBackendForRasterSurface(fBitmap);
+ }
+ return true;
+}
+
+sk_sp<const SkCapabilities> SkSurface_Raster::onCapabilities() {
+ return SkCapabilities::RasterBackend();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkSurface> SkSurface::MakeRasterDirectReleaseProc(const SkImageInfo& info, void* pixels,
+ size_t rb, void (*releaseProc)(void* pixels, void* context), void* context,
+ const SkSurfaceProps* props) {
+ if (nullptr == releaseProc) {
+ context = nullptr;
+ }
+ if (!SkSurfaceValidateRasterInfo(info, rb)) {
+ return nullptr;
+ }
+ if (nullptr == pixels) {
+ return nullptr;
+ }
+
+ return sk_make_sp<SkSurface_Raster>(info, pixels, rb, releaseProc, context, props);
+}
+
+sk_sp<SkSurface> SkSurface::MakeRasterDirect(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const SkSurfaceProps* props) {
+ return MakeRasterDirectReleaseProc(info, pixels, rowBytes, nullptr, nullptr, props);
+}
+
+sk_sp<SkSurface> SkSurface::MakeRaster(const SkImageInfo& info, size_t rowBytes,
+ const SkSurfaceProps* props) {
+ if (!SkSurfaceValidateRasterInfo(info)) {
+ return nullptr;
+ }
+
+ sk_sp<SkPixelRef> pr = SkMallocPixelRef::MakeAllocate(info, rowBytes);
+ if (!pr) {
+ return nullptr;
+ }
+ if (rowBytes) {
+ SkASSERT(pr->rowBytes() == rowBytes);
+ }
+ return sk_make_sp<SkSurface_Raster>(info, std::move(pr), props);
+}
+
+sk_sp<SkSurface> SkSurface::MakeRasterN32Premul(int width, int height,
+ const SkSurfaceProps* surfaceProps) {
+ return MakeRaster(SkImageInfo::MakeN32Premul(width, height), surfaceProps);
+}
diff --git a/gfx/skia/skia/src/image/SkSurface_Raster.h b/gfx/skia/skia/src/image/SkSurface_Raster.h
new file mode 100644
index 0000000000..14ca2aaafd
--- /dev/null
+++ b/gfx/skia/skia/src/image/SkSurface_Raster.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSurface_Raster_DEFINED
+#define SkSurface_Raster_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkScalar.h"
+#include "src/image/SkSurface_Base.h"
+
+#include <cstring>
+
+class SkCanvas;
+class SkCapabilities;
+class SkImage;
+class SkPaint;
+class SkPixelRef;
+class SkPixmap;
+class SkSurface;
+class SkSurfaceProps;
+struct SkIRect;
+
+class SkSurface_Raster : public SkSurface_Base {
+public:
+ SkSurface_Raster(const SkImageInfo&, void*, size_t rb,
+ void (*releaseProc)(void* pixels, void* context), void* context,
+ const SkSurfaceProps*);
+ SkSurface_Raster(const SkImageInfo& info, sk_sp<SkPixelRef>, const SkSurfaceProps*);
+
+ SkImageInfo imageInfo() const override { return fBitmap.info(); }
+
+ SkCanvas* onNewCanvas() override;
+ sk_sp<SkSurface> onNewSurface(const SkImageInfo&) override;
+ sk_sp<SkImage> onNewImageSnapshot(const SkIRect* subset) override;
+ void onWritePixels(const SkPixmap&, int x, int y) override;
+ void onDraw(SkCanvas*, SkScalar, SkScalar, const SkSamplingOptions&, const SkPaint*) override;
+ bool onCopyOnWrite(ContentChangeMode) override;
+ void onRestoreBackingMutability() override;
+ sk_sp<const SkCapabilities> onCapabilities() override;
+
+private:
+ SkBitmap fBitmap;
+ bool fWeOwnThePixels;
+
+ using INHERITED = SkSurface_Base;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.cpp b/gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.cpp
new file mode 100644
index 0000000000..9184851b13
--- /dev/null
+++ b/gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.cpp
@@ -0,0 +1,242 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/chromium/SkDiscardableMemory.h"
+#include "src/base/SkTInternalLList.h"
+#include "src/lazy/SkDiscardableMemoryPool.h"
+
+using namespace skia_private;
+
+// Note:
+// A PoolDiscardableMemory is memory that is counted in a pool.
+// A DiscardableMemoryPool is a pool of PoolDiscardableMemorys.
+
+namespace {
+
+class PoolDiscardableMemory;
+
+/**
+ * This non-global pool can be used for unit tests to verify that the
+ * pool works.
+ */
+class DiscardableMemoryPool : public SkDiscardableMemoryPool {
+public:
+ DiscardableMemoryPool(size_t budget);
+ ~DiscardableMemoryPool() override;
+
+ std::unique_ptr<SkDiscardableMemory> make(size_t bytes);
+ SkDiscardableMemory* create(size_t bytes) override {
+ return this->make(bytes).release(); // TODO: change API
+ }
+
+ size_t getRAMUsed() override;
+ void setRAMBudget(size_t budget) override;
+ size_t getRAMBudget() override { return fBudget; }
+
+ /** purges all unlocked DMs */
+ void dumpPool() override;
+
+ #if SK_LAZY_CACHE_STATS // Defined in SkDiscardableMemoryPool.h
+ int getCacheHits() override { return fCacheHits; }
+ int getCacheMisses() override { return fCacheMisses; }
+ void resetCacheHitsAndMisses() override {
+ fCacheHits = fCacheMisses = 0;
+ }
+ int fCacheHits;
+ int fCacheMisses;
+ #endif // SK_LAZY_CACHE_STATS
+
+private:
+ SkMutex fMutex;
+ size_t fBudget;
+ size_t fUsed;
+ SkTInternalLList<PoolDiscardableMemory> fList;
+
+ /** Function called to free memory if needed */
+ void dumpDownTo(size_t budget);
+ /** called by DiscardableMemoryPool upon destruction */
+ void removeFromPool(PoolDiscardableMemory* dm);
+ /** called by DiscardableMemoryPool::lock() */
+ bool lock(PoolDiscardableMemory* dm);
+ /** called by DiscardableMemoryPool::unlock() */
+ void unlock(PoolDiscardableMemory* dm);
+
+ friend class PoolDiscardableMemory;
+
+ using INHERITED = SkDiscardableMemory::Factory;
+};
+
+/**
+ * A PoolDiscardableMemory is a SkDiscardableMemory that relies on
+ * a DiscardableMemoryPool object to manage the memory.
+ */
+class PoolDiscardableMemory : public SkDiscardableMemory {
+public:
+ PoolDiscardableMemory(sk_sp<DiscardableMemoryPool> pool, UniqueVoidPtr pointer, size_t bytes);
+ ~PoolDiscardableMemory() override;
+ bool lock() override;
+ void* data() override;
+ void unlock() override;
+ friend class DiscardableMemoryPool;
+private:
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(PoolDiscardableMemory);
+ sk_sp<DiscardableMemoryPool> fPool;
+ bool fLocked;
+ UniqueVoidPtr fPointer;
+ const size_t fBytes;
+};
+
+PoolDiscardableMemory::PoolDiscardableMemory(sk_sp<DiscardableMemoryPool> pool,
+ UniqueVoidPtr pointer,
+ size_t bytes)
+ : fPool(std::move(pool)), fLocked(true), fPointer(std::move(pointer)), fBytes(bytes) {
+ SkASSERT(fPool != nullptr);
+ SkASSERT(fPointer != nullptr);
+ SkASSERT(fBytes > 0);
+}
+
+PoolDiscardableMemory::~PoolDiscardableMemory() {
+ SkASSERT(!fLocked); // contract for SkDiscardableMemory
+ fPool->removeFromPool(this);
+}
+
+bool PoolDiscardableMemory::lock() {
+ SkASSERT(!fLocked); // contract for SkDiscardableMemory
+ return fPool->lock(this);
+}
+
+void* PoolDiscardableMemory::data() {
+ SkASSERT(fLocked); // contract for SkDiscardableMemory
+ return fPointer.get();
+}
+
+void PoolDiscardableMemory::unlock() {
+ SkASSERT(fLocked); // contract for SkDiscardableMemory
+ fPool->unlock(this);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+DiscardableMemoryPool::DiscardableMemoryPool(size_t budget)
+ : fBudget(budget)
+ , fUsed(0) {
+ #if SK_LAZY_CACHE_STATS
+ fCacheHits = 0;
+ fCacheMisses = 0;
+ #endif // SK_LAZY_CACHE_STATS
+}
+DiscardableMemoryPool::~DiscardableMemoryPool() {
+ // PoolDiscardableMemory objects that belong to this pool are
+ // always deleted before deleting this pool since each one has a
+ // ref to the pool.
+ SkASSERT(fList.isEmpty());
+}
+
+void DiscardableMemoryPool::dumpDownTo(size_t budget) {
+ fMutex.assertHeld();
+ if (fUsed <= budget) {
+ return;
+ }
+ using Iter = SkTInternalLList<PoolDiscardableMemory>::Iter;
+ Iter iter;
+ PoolDiscardableMemory* cur = iter.init(fList, Iter::kTail_IterStart);
+ while ((fUsed > budget) && (cur)) {
+ if (!cur->fLocked) {
+ PoolDiscardableMemory* dm = cur;
+ SkASSERT(dm->fPointer != nullptr);
+ dm->fPointer = nullptr;
+ SkASSERT(fUsed >= dm->fBytes);
+ fUsed -= dm->fBytes;
+ cur = iter.prev();
+ // Purged DMs are taken out of the list. This saves times
+ // looking them up. Purged DMs are NOT deleted.
+ fList.remove(dm);
+ } else {
+ cur = iter.prev();
+ }
+ }
+}
+
+std::unique_ptr<SkDiscardableMemory> DiscardableMemoryPool::make(size_t bytes) {
+ UniqueVoidPtr addr(sk_malloc_canfail(bytes));
+ if (nullptr == addr) {
+ return nullptr;
+ }
+ auto dm = std::make_unique<PoolDiscardableMemory>(sk_ref_sp(this), std::move(addr), bytes);
+ SkAutoMutexExclusive autoMutexAcquire(fMutex);
+ fList.addToHead(dm.get());
+ fUsed += bytes;
+ this->dumpDownTo(fBudget);
+ return std::move(dm);
+}
+
+void DiscardableMemoryPool::removeFromPool(PoolDiscardableMemory* dm) {
+ SkAutoMutexExclusive autoMutexAcquire(fMutex);
+ // This is called by dm's destructor.
+ if (dm->fPointer != nullptr) {
+ SkASSERT(fUsed >= dm->fBytes);
+ fUsed -= dm->fBytes;
+ fList.remove(dm);
+ } else {
+ SkASSERT(!fList.isInList(dm));
+ }
+}
+
+bool DiscardableMemoryPool::lock(PoolDiscardableMemory* dm) {
+ SkASSERT(dm != nullptr);
+ SkAutoMutexExclusive autoMutexAcquire(fMutex);
+ if (nullptr == dm->fPointer) {
+ // May have been purged while waiting for lock.
+ #if SK_LAZY_CACHE_STATS
+ ++fCacheMisses;
+ #endif // SK_LAZY_CACHE_STATS
+ return false;
+ }
+ dm->fLocked = true;
+ fList.remove(dm);
+ fList.addToHead(dm);
+ #if SK_LAZY_CACHE_STATS
+ ++fCacheHits;
+ #endif // SK_LAZY_CACHE_STATS
+ return true;
+}
+
+void DiscardableMemoryPool::unlock(PoolDiscardableMemory* dm) {
+ SkASSERT(dm != nullptr);
+ SkAutoMutexExclusive autoMutexAcquire(fMutex);
+ dm->fLocked = false;
+ this->dumpDownTo(fBudget);
+}
+
+size_t DiscardableMemoryPool::getRAMUsed() {
+ return fUsed;
+}
+void DiscardableMemoryPool::setRAMBudget(size_t budget) {
+ SkAutoMutexExclusive autoMutexAcquire(fMutex);
+ fBudget = budget;
+ this->dumpDownTo(fBudget);
+}
+void DiscardableMemoryPool::dumpPool() {
+ SkAutoMutexExclusive autoMutexAcquire(fMutex);
+ this->dumpDownTo(0);
+}
+
+} // namespace
+
+sk_sp<SkDiscardableMemoryPool> SkDiscardableMemoryPool::Make(size_t size) {
+ return sk_make_sp<DiscardableMemoryPool>(size);
+}
+
+SkDiscardableMemoryPool* SkGetGlobalDiscardableMemoryPool() {
+ // Intentionally leak this global pool.
+ static SkDiscardableMemoryPool* global =
+ new DiscardableMemoryPool(SK_DEFAULT_GLOBAL_DISCARDABLE_MEMORY_POOL_SIZE);
+ return global;
+}
diff --git a/gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.h b/gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.h
new file mode 100644
index 0000000000..61aedc279f
--- /dev/null
+++ b/gfx/skia/skia/src/lazy/SkDiscardableMemoryPool.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDiscardableMemoryPool_DEFINED
+#define SkDiscardableMemoryPool_DEFINED
+
+#include "include/private/base/SkMutex.h"
+#include "include/private/chromium/SkDiscardableMemory.h"
+
+#ifndef SK_LAZY_CACHE_STATS
+ #ifdef SK_DEBUG
+ #define SK_LAZY_CACHE_STATS 1
+ #else
+ #define SK_LAZY_CACHE_STATS 0
+ #endif
+#endif
+
+/**
+ * An implementation of Discardable Memory that manages a fixed-size
+ * budget of memory. When the allocated memory exceeds this size,
+ * unlocked blocks of memory are purged. If all memory is locked, it
+ * can exceed the memory-use budget.
+ */
+class SkDiscardableMemoryPool : public SkDiscardableMemory::Factory {
+public:
+ virtual size_t getRAMUsed() = 0;
+ virtual void setRAMBudget(size_t budget) = 0;
+ virtual size_t getRAMBudget() = 0;
+
+ /** purges all unlocked DMs */
+ virtual void dumpPool() = 0;
+
+ #if SK_LAZY_CACHE_STATS
+ /**
+ * These two values are a count of the number of successful and
+ * failed calls to SkDiscardableMemory::lock() for all DMs managed
+ * by this pool.
+ */
+ virtual int getCacheHits() = 0;
+ virtual int getCacheMisses() = 0;
+ virtual void resetCacheHitsAndMisses() = 0;
+ #endif
+
+ /**
+ * This non-global pool can be used for unit tests to verify that
+ * the pool works.
+ */
+ static sk_sp<SkDiscardableMemoryPool> Make(size_t size);
+};
+
+/**
+ * Returns (and creates if needed) a threadsafe global
+ * SkDiscardableMemoryPool.
+ */
+SkDiscardableMemoryPool* SkGetGlobalDiscardableMemoryPool();
+
+#if !defined(SK_DEFAULT_GLOBAL_DISCARDABLE_MEMORY_POOL_SIZE)
+#define SK_DEFAULT_GLOBAL_DISCARDABLE_MEMORY_POOL_SIZE (128 * 1024 * 1024)
+#endif
+
+#endif // SkDiscardableMemoryPool_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkBitmapProcState_opts.h b/gfx/skia/skia/src/opts/SkBitmapProcState_opts.h
new file mode 100644
index 0000000000..f2b04ab45c
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBitmapProcState_opts.h
@@ -0,0 +1,545 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitmapProcState_opts_DEFINED
+#define SkBitmapProcState_opts_DEFINED
+
+#include "src/base/SkMSAN.h"
+#include "src/base/SkVx.h"
+#include "src/core/SkBitmapProcState.h"
+
+// SkBitmapProcState optimized Shader, Sample, or Matrix procs.
+//
+// Only S32_alpha_D32_filter_DX exploits instructions beyond
+// our common baseline SSE2/NEON instruction sets, so that's
+// all that lives here.
+//
+// The rest are scattershot at the moment but I want to get them
+// all migrated to be normal code inside SkBitmapProcState.cpp.
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #include <immintrin.h>
+#elif defined(SK_ARM_HAS_NEON)
+ #include <arm_neon.h>
+#endif
+
+namespace SK_OPTS_NS {
+
+// This same basic packing scheme is used throughout the file.
+template <typename U32, typename Out>
+static void decode_packed_coordinates_and_weight(U32 packed, Out* v0, Out* v1, Out* w) {
+ *v0 = (packed >> 18); // Integer coordinate x0 or y0.
+ *v1 = (packed & 0x3fff); // Integer coordinate x1 or y1.
+ *w = (packed >> 14) & 0xf; // Lerp weight for v1; weight for v0 is 16-w.
+}
+
+#if 1 && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ /*not static*/ inline
+ void S32_alpha_D32_filter_DX(const SkBitmapProcState& s,
+ const uint32_t* xy, int count, uint32_t* colors) {
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(s.fBilerp);
+ SkASSERT(kN32_SkColorType == s.fPixmap.colorType());
+ SkASSERT(s.fAlphaScale <= 256);
+
+ // In a _DX variant only X varies; all samples share y0/y1 coordinates and wy weight.
+ int y0, y1, wy;
+ decode_packed_coordinates_and_weight(*xy++, &y0, &y1, &wy);
+
+ const uint32_t* row0 = s.fPixmap.addr32(0,y0);
+ const uint32_t* row1 = s.fPixmap.addr32(0,y1);
+
+ auto bilerp = [&](skvx::Vec<8,uint32_t> packed_x_coordinates) -> skvx::Vec<8,uint32_t> {
+ // Decode up to 8 output pixels' x-coordinates and weights.
+ skvx::Vec<8,uint32_t> x0,x1,wx;
+ decode_packed_coordinates_and_weight(packed_x_coordinates, &x0, &x1, &wx);
+
+ // Splat wx to each color channel.
+ wx = (wx << 0)
+ | (wx << 8)
+ | (wx << 16)
+ | (wx << 24);
+
+ auto gather = [](const uint32_t* ptr, skvx::Vec<8,uint32_t> ix) {
+ #if 1
+ // Drop into AVX2 intrinsics for vpgatherdd.
+ return skvx::bit_pun<skvx::Vec<8,uint32_t>>(
+ _mm256_i32gather_epi32((const int*)ptr, skvx::bit_pun<__m256i>(ix), 4));
+ #else
+ // Portable version... sometimes I don't trust vpgatherdd.
+ return skvx::Vec<8,uint32_t>{
+ ptr[ix[0]], ptr[ix[1]], ptr[ix[2]], ptr[ix[3]],
+ ptr[ix[4]], ptr[ix[5]], ptr[ix[6]], ptr[ix[7]],
+ };
+ #endif
+ };
+
+ // Gather the 32 32-bit pixels that we'll bilerp into our 8 output pixels.
+ skvx::Vec<8,uint32_t> tl = gather(row0, x0), tr = gather(row0, x1),
+ bl = gather(row1, x0), br = gather(row1, x1);
+
+ #if 1
+ // We'll use _mm256_maddubs_epi16() to lerp much like in the SSSE3 code.
+ auto lerp_x = [&](skvx::Vec<8,uint32_t> L, skvx::Vec<8,uint32_t> R) {
+ __m256i l = skvx::bit_pun<__m256i>(L),
+ r = skvx::bit_pun<__m256i>(R),
+ wr = skvx::bit_pun<__m256i>(wx),
+ wl = _mm256_sub_epi8(_mm256_set1_epi8(16), wr);
+
+ // Interlace l,r bytewise and line them up with their weights, then lerp.
+ __m256i lo = _mm256_maddubs_epi16(_mm256_unpacklo_epi8( l, r),
+ _mm256_unpacklo_epi8(wl,wr));
+ __m256i hi = _mm256_maddubs_epi16(_mm256_unpackhi_epi8( l, r),
+ _mm256_unpackhi_epi8(wl,wr));
+
+ // Those _mm256_unpack??_epi8() calls left us in a bit of an odd order:
+ //
+ // if l = a b c d | e f g h
+ // and r = A B C D | E F G H
+ //
+ // then lo = a A b B | e E f F (low half of each input)
+ // and hi = c C d D | g G h H (high half of each input)
+ //
+ // To get everything back in original order we need to transpose that.
+ __m256i abcd = _mm256_permute2x128_si256(lo, hi, 0x20),
+ efgh = _mm256_permute2x128_si256(lo, hi, 0x31);
+
+ return skvx::join(skvx::bit_pun<skvx::Vec<16,uint16_t>>(abcd),
+ skvx::bit_pun<skvx::Vec<16,uint16_t>>(efgh));
+ };
+
+ skvx::Vec<32, uint16_t> top = lerp_x(tl, tr),
+ bot = lerp_x(bl, br),
+ sum = 16*top + (bot-top)*wy;
+ #else
+ // Treat 32-bit pixels as 4 8-bit values, and expand to 16-bit for room to multiply.
+ auto to_16x4 = [](auto v) -> skvx::Vec<32, uint16_t> {
+ return skvx::cast<uint16_t>(skvx::bit_pun<skvx::Vec<32, uint8_t>>(v));
+ };
+
+ // Sum up weighted sample pixels. The naive, redundant math would be,
+ //
+ // sum = tl * (16-wy) * (16-wx)
+ // + bl * ( wy) * (16-wx)
+ // + tr * (16-wy) * ( wx)
+ // + br * ( wy) * ( wx)
+ //
+ // But we refactor to eliminate a bunch of those common factors.
+ auto lerp = [](auto lo, auto hi, auto w) {
+ return 16*lo + (hi-lo)*w;
+ };
+ skvx::Vec<32, uint16_t> sum = lerp(lerp(to_16x4(tl), to_16x4(bl), wy),
+ lerp(to_16x4(tr), to_16x4(br), wy), to_16x4(wx));
+ #endif
+
+ // Get back to [0,255] by dividing by maximum weight 16x16 = 256.
+ sum >>= 8;
+
+ // Scale by alpha if needed.
+ if(s.fAlphaScale < 256) {
+ sum *= s.fAlphaScale;
+ sum >>= 8;
+ }
+
+ // Pack back to 8-bit channels, undoing to_16x4().
+ return skvx::bit_pun<skvx::Vec<8,uint32_t>>(skvx::cast<uint8_t>(sum));
+ };
+
+ while (count >= 8) {
+ bilerp(skvx::Vec<8,uint32_t>::Load(xy)).store(colors);
+ xy += 8;
+ colors += 8;
+ count -= 8;
+ }
+ if (count > 0) {
+ __m256i active = skvx::bit_pun<__m256i>( count > skvx::Vec<8,int>{0,1,2,3, 4,5,6,7} ),
+ coords = _mm256_maskload_epi32((const int*)xy, active),
+ pixels;
+
+ bilerp(skvx::bit_pun<skvx::Vec<8,uint32_t>>(coords)).store(&pixels);
+ _mm256_maskstore_epi32((int*)colors, active, pixels);
+
+ sk_msan_mark_initialized(colors, colors+count,
+ "MSAN still doesn't understand AVX2 mask loads and stores.");
+ }
+ }
+
+#elif 1 && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+
+ /*not static*/ inline
+ void S32_alpha_D32_filter_DX(const SkBitmapProcState& s,
+ const uint32_t* xy, int count, uint32_t* colors) {
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(s.fBilerp);
+ SkASSERT(kN32_SkColorType == s.fPixmap.colorType());
+ SkASSERT(s.fAlphaScale <= 256);
+
+ // interpolate_in_x() is the crux of the SSSE3 implementation,
+ // interpolating in X for up to two output pixels (A and B) using _mm_maddubs_epi16().
+ auto interpolate_in_x = [](uint32_t A0, uint32_t A1,
+ uint32_t B0, uint32_t B1,
+ __m128i interlaced_x_weights) {
+ // _mm_maddubs_epi16() is a little idiosyncratic, but great as the core of a lerp.
+ //
+ // It takes two arguments interlaced byte-wise:
+ // - first arg: [ l,r, ... 7 more pairs of unsigned 8-bit values ...]
+ // - second arg: [ w,W, ... 7 more pairs of signed 8-bit values ...]
+ // and returns 8 signed 16-bit values: [ l*w + r*W, ... 7 more ... ].
+ //
+ // That's why we go to all this trouble to make interlaced_x_weights,
+ // and here we're about to interlace A0 with A1 and B0 with B1 to match.
+ //
+ // Our interlaced_x_weights are all in [0,16], and so we need not worry about
+ // the signedness of that input nor about the signedness of the output.
+
+ __m128i interlaced_A = _mm_unpacklo_epi8(_mm_cvtsi32_si128(A0), _mm_cvtsi32_si128(A1)),
+ interlaced_B = _mm_unpacklo_epi8(_mm_cvtsi32_si128(B0), _mm_cvtsi32_si128(B1));
+
+ return _mm_maddubs_epi16(_mm_unpacklo_epi64(interlaced_A, interlaced_B),
+ interlaced_x_weights);
+ };
+
+ // Interpolate {A0..A3} --> output pixel A, and {B0..B3} --> output pixel B.
+ // Returns two pixels, with each color channel in a 16-bit lane of the __m128i.
+ auto interpolate_in_x_and_y = [&](uint32_t A0, uint32_t A1,
+ uint32_t A2, uint32_t A3,
+ uint32_t B0, uint32_t B1,
+ uint32_t B2, uint32_t B3,
+ __m128i interlaced_x_weights,
+ int wy) {
+ // Interpolate each row in X, leaving 16-bit lanes scaled by interlaced_x_weights.
+ __m128i top = interpolate_in_x(A0,A1, B0,B1, interlaced_x_weights),
+ bot = interpolate_in_x(A2,A3, B2,B3, interlaced_x_weights);
+
+ // Interpolate in Y. As in the SSE2 code, we calculate top*(16-wy) + bot*wy
+ // as 16*top + (bot-top)*wy to save a multiply.
+ __m128i px = _mm_add_epi16(_mm_slli_epi16(top, 4),
+ _mm_mullo_epi16(_mm_sub_epi16(bot, top),
+ _mm_set1_epi16(wy)));
+
+ // Scale down by total max weight 16x16 = 256.
+ px = _mm_srli_epi16(px, 8);
+
+ // Scale by alpha if needed.
+ if (s.fAlphaScale < 256) {
+ px = _mm_srli_epi16(_mm_mullo_epi16(px, _mm_set1_epi16(s.fAlphaScale)), 8);
+ }
+ return px;
+ };
+
+ // We're in _DX mode here, so we're only varying in X.
+ // That means the first entry of xy is our constant pair of Y coordinates and weight in Y.
+ // All the other entries in xy will be pairs of X coordinates and the X weight.
+ int y0, y1, wy;
+ decode_packed_coordinates_and_weight(*xy++, &y0, &y1, &wy);
+
+ auto row0 = (const uint32_t*)((const uint8_t*)s.fPixmap.addr() + y0 * s.fPixmap.rowBytes()),
+ row1 = (const uint32_t*)((const uint8_t*)s.fPixmap.addr() + y1 * s.fPixmap.rowBytes());
+
+ while (count >= 4) {
+ // We can really get going, loading 4 X-pairs at a time to produce 4 output pixels.
+ int x0[4],
+ x1[4];
+ __m128i wx;
+
+ // decode_packed_coordinates_and_weight(), 4x.
+ __m128i packed = _mm_loadu_si128((const __m128i*)xy);
+ _mm_storeu_si128((__m128i*)x0, _mm_srli_epi32(packed, 18));
+ _mm_storeu_si128((__m128i*)x1, _mm_and_si128 (packed, _mm_set1_epi32(0x3fff)));
+ wx = _mm_and_si128(_mm_srli_epi32(packed, 14), _mm_set1_epi32(0xf)); // [0,15]
+
+ // Splat each x weight 4x (for each color channel) as wr for pixels on the right at x1,
+ // and sixteen minus that as wl for pixels on the left at x0.
+ __m128i wr = _mm_shuffle_epi8(wx, _mm_setr_epi8(0,0,0,0,4,4,4,4,8,8,8,8,12,12,12,12)),
+ wl = _mm_sub_epi8(_mm_set1_epi8(16), wr);
+
+ // We need to interlace wl and wr for _mm_maddubs_epi16().
+ __m128i interlaced_x_weights_AB = _mm_unpacklo_epi8(wl,wr),
+ interlaced_x_weights_CD = _mm_unpackhi_epi8(wl,wr);
+
+ enum { A,B,C,D };
+
+ // interpolate_in_x_and_y() can produce two output pixels (A and B) at a time
+ // from eight input pixels {A0..A3} and {B0..B3}, arranged in a 2x2 grid for each.
+ __m128i AB = interpolate_in_x_and_y(row0[x0[A]], row0[x1[A]],
+ row1[x0[A]], row1[x1[A]],
+ row0[x0[B]], row0[x1[B]],
+ row1[x0[B]], row1[x1[B]],
+ interlaced_x_weights_AB, wy);
+
+ // Once more with the other half of the x-weights for two more pixels C,D.
+ __m128i CD = interpolate_in_x_and_y(row0[x0[C]], row0[x1[C]],
+ row1[x0[C]], row1[x1[C]],
+ row0[x0[D]], row0[x1[D]],
+ row1[x0[D]], row1[x1[D]],
+ interlaced_x_weights_CD, wy);
+
+ // Scale by alpha, pack back together to 8-bit lanes, and write out four pixels!
+ _mm_storeu_si128((__m128i*)colors, _mm_packus_epi16(AB, CD));
+ xy += 4;
+ colors += 4;
+ count -= 4;
+ }
+
+ while (count --> 0) {
+ // This is exactly the same flow as the count >= 4 loop above, but writing one pixel.
+ int x0, x1, wx;
+ decode_packed_coordinates_and_weight(*xy++, &x0, &x1, &wx);
+
+ // As above, splat out wx four times as wr, and sixteen minus that as wl.
+ __m128i wr = _mm_set1_epi8(wx), // This splats it out 16 times, but that's fine.
+ wl = _mm_sub_epi8(_mm_set1_epi8(16), wr);
+
+ __m128i interlaced_x_weights = _mm_unpacklo_epi8(wl, wr);
+
+ __m128i A = interpolate_in_x_and_y(row0[x0], row0[x1],
+ row1[x0], row1[x1],
+ 0, 0,
+ 0, 0,
+ interlaced_x_weights, wy);
+
+ *colors++ = _mm_cvtsi128_si32(_mm_packus_epi16(A, _mm_setzero_si128()));
+ }
+ }
+
+
+#elif 1 && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+
+ /*not static*/ inline
+ void S32_alpha_D32_filter_DX(const SkBitmapProcState& s,
+ const uint32_t* xy, int count, uint32_t* colors) {
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(s.fBilerp);
+ SkASSERT(kN32_SkColorType == s.fPixmap.colorType());
+ SkASSERT(s.fAlphaScale <= 256);
+
+ int y0, y1, wy;
+ decode_packed_coordinates_and_weight(*xy++, &y0, &y1, &wy);
+
+ auto row0 = (const uint32_t*)( (const char*)s.fPixmap.addr() + y0 * s.fPixmap.rowBytes() ),
+ row1 = (const uint32_t*)( (const char*)s.fPixmap.addr() + y1 * s.fPixmap.rowBytes() );
+
+ // We'll put one pixel in the low 4 16-bit lanes to line up with wy,
+ // and another in the upper 4 16-bit lanes to line up with 16 - wy.
+ const __m128i allY = _mm_unpacklo_epi64(_mm_set1_epi16( wy), // Bottom pixel goes here.
+ _mm_set1_epi16(16-wy)); // Top pixel goes here.
+
+ while (count --> 0) {
+ int x0, x1, wx;
+ decode_packed_coordinates_and_weight(*xy++, &x0, &x1, &wx);
+
+ // Load the 4 pixels we're interpolating, in this grid:
+ // | tl tr |
+ // | bl br |
+ const __m128i tl = _mm_cvtsi32_si128(row0[x0]), tr = _mm_cvtsi32_si128(row0[x1]),
+ bl = _mm_cvtsi32_si128(row1[x0]), br = _mm_cvtsi32_si128(row1[x1]);
+
+ // We want to calculate a sum of 4 pixels weighted in two directions:
+ //
+ // sum = tl * (16-wy) * (16-wx)
+ // + bl * ( wy) * (16-wx)
+ // + tr * (16-wy) * ( wx)
+ // + br * ( wy) * ( wx)
+ //
+ // (Notice top --> 16-wy, bottom --> wy, left --> 16-wx, right --> wx.)
+ //
+ // We've already prepared allY as a vector containing [wy, 16-wy] as a way
+ // to apply those y-direction weights. So we'll start on the x-direction
+ // first, grouping into left and right halves, lined up with allY:
+ //
+ // L = [bl, tl]
+ // R = [br, tr]
+ //
+ // sum = horizontalSum( allY * (L*(16-wx) + R*wx) )
+ //
+ // Rewriting that one more step, we can replace a multiply with a shift:
+ //
+ // sum = horizontalSum( allY * (16*L + (R-L)*wx) )
+ //
+ // That's how we'll actually do this math.
+
+ __m128i L = _mm_unpacklo_epi8(_mm_unpacklo_epi32(bl, tl), _mm_setzero_si128()),
+ R = _mm_unpacklo_epi8(_mm_unpacklo_epi32(br, tr), _mm_setzero_si128());
+
+ __m128i inner = _mm_add_epi16(_mm_slli_epi16(L, 4),
+ _mm_mullo_epi16(_mm_sub_epi16(R,L), _mm_set1_epi16(wx)));
+
+ __m128i sum_in_x = _mm_mullo_epi16(inner, allY);
+
+ // sum = horizontalSum( ... )
+ __m128i sum = _mm_add_epi16(sum_in_x, _mm_srli_si128(sum_in_x, 8));
+
+ // Get back to [0,255] by dividing by maximum weight 16x16 = 256.
+ sum = _mm_srli_epi16(sum, 8);
+
+ if (s.fAlphaScale < 256) {
+ // Scale by alpha, which is in [0,256].
+ sum = _mm_mullo_epi16(sum, _mm_set1_epi16(s.fAlphaScale));
+ sum = _mm_srli_epi16(sum, 8);
+ }
+
+ // Pack back into 8-bit values and store.
+ *colors++ = _mm_cvtsi128_si32(_mm_packus_epi16(sum, _mm_setzero_si128()));
+ }
+ }
+
+#else
+
+ // The NEON code only actually differs from the portable code in the
+ // filtering step after we've loaded all four pixels we want to bilerp.
+
+ #if defined(SK_ARM_HAS_NEON)
+ static void filter_and_scale_by_alpha(unsigned x, unsigned y,
+ SkPMColor a00, SkPMColor a01,
+ SkPMColor a10, SkPMColor a11,
+ SkPMColor *dst,
+ uint16_t scale) {
+ uint8x8_t vy, vconst16_8, v16_y, vres;
+ uint16x4_t vx, vconst16_16, v16_x, tmp, vscale;
+ uint32x2_t va0, va1;
+ uint16x8_t tmp1, tmp2;
+
+ vy = vdup_n_u8(y); // duplicate y into vy
+ vconst16_8 = vmov_n_u8(16); // set up constant in vconst16_8
+ v16_y = vsub_u8(vconst16_8, vy); // v16_y = 16-y
+
+ va0 = vdup_n_u32(a00); // duplicate a00
+ va1 = vdup_n_u32(a10); // duplicate a10
+ va0 = vset_lane_u32(a01, va0, 1); // set top to a01
+ va1 = vset_lane_u32(a11, va1, 1); // set top to a11
+
+ tmp1 = vmull_u8(vreinterpret_u8_u32(va0), v16_y); // tmp1 = [a01|a00] * (16-y)
+ tmp2 = vmull_u8(vreinterpret_u8_u32(va1), vy); // tmp2 = [a11|a10] * y
+
+ vx = vdup_n_u16(x); // duplicate x into vx
+ vconst16_16 = vmov_n_u16(16); // set up constant in vconst16_16
+ v16_x = vsub_u16(vconst16_16, vx); // v16_x = 16-x
+
+ tmp = vmul_u16(vget_high_u16(tmp1), vx); // tmp = a01 * x
+ tmp = vmla_u16(tmp, vget_high_u16(tmp2), vx); // tmp += a11 * x
+ tmp = vmla_u16(tmp, vget_low_u16(tmp1), v16_x); // tmp += a00 * (16-x)
+ tmp = vmla_u16(tmp, vget_low_u16(tmp2), v16_x); // tmp += a10 * (16-x)
+
+ if (scale < 256) {
+ vscale = vdup_n_u16(scale); // duplicate scale
+ tmp = vshr_n_u16(tmp, 8); // shift down result by 8
+ tmp = vmul_u16(tmp, vscale); // multiply result by scale
+ }
+
+ vres = vshrn_n_u16(vcombine_u16(tmp, vcreate_u16((uint64_t)0)), 8); // shift down result by 8
+ vst1_lane_u32(dst, vreinterpret_u32_u8(vres), 0); // store result
+ }
+ #else
+ static void filter_and_scale_by_alpha(unsigned x, unsigned y,
+ SkPMColor a00, SkPMColor a01,
+ SkPMColor a10, SkPMColor a11,
+ SkPMColor* dstColor,
+ unsigned alphaScale) {
+ SkASSERT((unsigned)x <= 0xF);
+ SkASSERT((unsigned)y <= 0xF);
+ SkASSERT(alphaScale <= 256);
+
+ int xy = x * y;
+ const uint32_t mask = 0xFF00FF;
+
+ int scale = 256 - 16*y - 16*x + xy;
+ uint32_t lo = (a00 & mask) * scale;
+ uint32_t hi = ((a00 >> 8) & mask) * scale;
+
+ scale = 16*x - xy;
+ lo += (a01 & mask) * scale;
+ hi += ((a01 >> 8) & mask) * scale;
+
+ scale = 16*y - xy;
+ lo += (a10 & mask) * scale;
+ hi += ((a10 >> 8) & mask) * scale;
+
+ lo += (a11 & mask) * xy;
+ hi += ((a11 >> 8) & mask) * xy;
+
+ if (alphaScale < 256) {
+ lo = ((lo >> 8) & mask) * alphaScale;
+ hi = ((hi >> 8) & mask) * alphaScale;
+ }
+
+ *dstColor = ((lo >> 8) & mask) | (hi & ~mask);
+ }
+ #endif
+
+
+ /*not static*/ inline
+ void S32_alpha_D32_filter_DX(const SkBitmapProcState& s,
+ const uint32_t* xy, int count, SkPMColor* colors) {
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(s.fBilerp);
+ SkASSERT(4 == s.fPixmap.info().bytesPerPixel());
+ SkASSERT(s.fAlphaScale <= 256);
+
+ int y0, y1, wy;
+ decode_packed_coordinates_and_weight(*xy++, &y0, &y1, &wy);
+
+ auto row0 = (const uint32_t*)( (const char*)s.fPixmap.addr() + y0 * s.fPixmap.rowBytes() ),
+ row1 = (const uint32_t*)( (const char*)s.fPixmap.addr() + y1 * s.fPixmap.rowBytes() );
+
+ while (count --> 0) {
+ int x0, x1, wx;
+ decode_packed_coordinates_and_weight(*xy++, &x0, &x1, &wx);
+
+ filter_and_scale_by_alpha(wx, wy,
+ row0[x0], row0[x1],
+ row1[x0], row1[x1],
+ colors++,
+ s.fAlphaScale);
+ }
+ }
+
+#endif
+
+#if defined(SK_ARM_HAS_NEON)
+ /*not static*/ inline
+ void S32_alpha_D32_filter_DXDY(const SkBitmapProcState& s,
+ const uint32_t* xy, int count, SkPMColor* colors) {
+ SkASSERT(count > 0 && colors != nullptr);
+ SkASSERT(s.fBilerp);
+ SkASSERT(4 == s.fPixmap.info().bytesPerPixel());
+ SkASSERT(s.fAlphaScale <= 256);
+
+ auto src = (const char*)s.fPixmap.addr();
+ size_t rb = s.fPixmap.rowBytes();
+
+ while (count --> 0) {
+ int y0, y1, wy,
+ x0, x1, wx;
+ decode_packed_coordinates_and_weight(*xy++, &y0, &y1, &wy);
+ decode_packed_coordinates_and_weight(*xy++, &x0, &x1, &wx);
+
+ auto row0 = (const uint32_t*)(src + y0*rb),
+ row1 = (const uint32_t*)(src + y1*rb);
+
+ filter_and_scale_by_alpha(wx, wy,
+ row0[x0], row0[x1],
+ row1[x0], row1[x1],
+ colors++,
+ s.fAlphaScale);
+ }
+ }
+#else
+ // It's not yet clear whether it's worthwhile specializing for SSE2/SSSE3/AVX2.
+ constexpr static void (*S32_alpha_D32_filter_DXDY)(const SkBitmapProcState&,
+ const uint32_t*, int, SkPMColor*) = nullptr;
+#endif
+
+} // namespace SK_OPTS_NS
+
+namespace sktests {
+ template <typename U32, typename Out>
+ void decode_packed_coordinates_and_weight(U32 packed, Out* v0, Out* v1, Out* w) {
+ SK_OPTS_NS::decode_packed_coordinates_and_weight<U32, Out>(packed, v0, v1, w);
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/opts/SkBlitMask_opts.h b/gfx/skia/skia/src/opts/SkBlitMask_opts.h
new file mode 100644
index 0000000000..8e673a9728
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBlitMask_opts.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlitMask_opts_DEFINED
+#define SkBlitMask_opts_DEFINED
+
+#include "include/private/base/SkFeatures.h"
+#include "src/core/Sk4px.h"
+
+#if defined(SK_ARM_HAS_NEON)
+ #include <arm_neon.h>
+#endif
+
+namespace SK_OPTS_NS {
+
+#if defined(SK_ARM_HAS_NEON)
+ // The Sk4px versions below will work fine with NEON, but we have had many indications
+ // that it doesn't perform as well as this NEON-specific code. TODO(mtklein): why?
+
+ #define NEON_A (SK_A32_SHIFT / 8)
+ #define NEON_R (SK_R32_SHIFT / 8)
+ #define NEON_G (SK_G32_SHIFT / 8)
+ #define NEON_B (SK_B32_SHIFT / 8)
+
+ static inline uint16x8_t SkAlpha255To256_neon8(uint8x8_t alpha) {
+ return vaddw_u8(vdupq_n_u16(1), alpha);
+ }
+
+ static inline uint8x8_t SkAlphaMul_neon8(uint8x8_t color, uint16x8_t scale) {
+ return vshrn_n_u16(vmovl_u8(color) * scale, 8);
+ }
+
+ static inline uint8x8x4_t SkAlphaMulQ_neon8(uint8x8x4_t color, uint16x8_t scale) {
+ uint8x8x4_t ret;
+
+ ret.val[0] = SkAlphaMul_neon8(color.val[0], scale);
+ ret.val[1] = SkAlphaMul_neon8(color.val[1], scale);
+ ret.val[2] = SkAlphaMul_neon8(color.val[2], scale);
+ ret.val[3] = SkAlphaMul_neon8(color.val[3], scale);
+
+ return ret;
+ }
+
+
+ template <bool isColor>
+ static void D32_A8_Opaque_Color_neon(void* SK_RESTRICT dst, size_t dstRB,
+ const void* SK_RESTRICT maskPtr, size_t maskRB,
+ SkColor color, int width, int height) {
+ SkPMColor pmc = SkPreMultiplyColor(color);
+ SkPMColor* SK_RESTRICT device = (SkPMColor*)dst;
+ const uint8_t* SK_RESTRICT mask = (const uint8_t*)maskPtr;
+ uint8x8x4_t vpmc;
+
+ // Nine patch may set maskRB to 0 to blit the same row repeatedly.
+ ptrdiff_t mask_adjust = (ptrdiff_t)maskRB - width;
+ dstRB -= (width << 2);
+
+ if (width >= 8) {
+ vpmc.val[NEON_A] = vdup_n_u8(SkGetPackedA32(pmc));
+ vpmc.val[NEON_R] = vdup_n_u8(SkGetPackedR32(pmc));
+ vpmc.val[NEON_G] = vdup_n_u8(SkGetPackedG32(pmc));
+ vpmc.val[NEON_B] = vdup_n_u8(SkGetPackedB32(pmc));
+ }
+ do {
+ int w = width;
+ while (w >= 8) {
+ uint8x8_t vmask = vld1_u8(mask);
+ uint16x8_t vscale, vmask256 = SkAlpha255To256_neon8(vmask);
+ if (isColor) {
+ vscale = vsubw_u8(vdupq_n_u16(256),
+ SkAlphaMul_neon8(vpmc.val[NEON_A], vmask256));
+ } else {
+ vscale = vsubw_u8(vdupq_n_u16(256), vmask);
+ }
+ uint8x8x4_t vdev = vld4_u8((uint8_t*)device);
+
+ vdev.val[NEON_A] = SkAlphaMul_neon8(vpmc.val[NEON_A], vmask256)
+ + SkAlphaMul_neon8(vdev.val[NEON_A], vscale);
+ vdev.val[NEON_R] = SkAlphaMul_neon8(vpmc.val[NEON_R], vmask256)
+ + SkAlphaMul_neon8(vdev.val[NEON_R], vscale);
+ vdev.val[NEON_G] = SkAlphaMul_neon8(vpmc.val[NEON_G], vmask256)
+ + SkAlphaMul_neon8(vdev.val[NEON_G], vscale);
+ vdev.val[NEON_B] = SkAlphaMul_neon8(vpmc.val[NEON_B], vmask256)
+ + SkAlphaMul_neon8(vdev.val[NEON_B], vscale);
+
+ vst4_u8((uint8_t*)device, vdev);
+
+ mask += 8;
+ device += 8;
+ w -= 8;
+ }
+
+ while (w--) {
+ unsigned aa = *mask++;
+ if (isColor) {
+ *device = SkBlendARGB32(pmc, *device, aa);
+ } else {
+ *device = SkAlphaMulQ(pmc, SkAlpha255To256(aa))
+ + SkAlphaMulQ(*device, SkAlpha255To256(255 - aa));
+ }
+ device += 1;
+ }
+
+ device = (uint32_t*)((char*)device + dstRB);
+ mask += mask_adjust;
+
+ } while (--height != 0);
+ }
+
+ static void blit_mask_d32_a8_general(SkPMColor* dst, size_t dstRB,
+ const SkAlpha* mask, size_t maskRB,
+ SkColor color, int w, int h) {
+ D32_A8_Opaque_Color_neon<true>(dst, dstRB, mask, maskRB, color, w, h);
+ }
+
+ // As above, but made slightly simpler by requiring that color is opaque.
+ static void blit_mask_d32_a8_opaque(SkPMColor* dst, size_t dstRB,
+ const SkAlpha* mask, size_t maskRB,
+ SkColor color, int w, int h) {
+ D32_A8_Opaque_Color_neon<false>(dst, dstRB, mask, maskRB, color, w, h);
+ }
+
+ // Same as _opaque, but assumes color == SK_ColorBLACK, a very common and even simpler case.
+ static void blit_mask_d32_a8_black(SkPMColor* dst, size_t dstRB,
+ const SkAlpha* maskPtr, size_t maskRB,
+ int width, int height) {
+ SkPMColor* SK_RESTRICT device = (SkPMColor*)dst;
+ const uint8_t* SK_RESTRICT mask = (const uint8_t*)maskPtr;
+
+ // Nine patch may set maskRB to 0 to blit the same row repeatedly.
+ ptrdiff_t mask_adjust = (ptrdiff_t)maskRB - width;
+ dstRB -= (width << 2);
+ do {
+ int w = width;
+ while (w >= 8) {
+ uint8x8_t vmask = vld1_u8(mask);
+ uint16x8_t vscale = vsubw_u8(vdupq_n_u16(256), vmask);
+ uint8x8x4_t vdevice = vld4_u8((uint8_t*)device);
+
+ vdevice = SkAlphaMulQ_neon8(vdevice, vscale);
+ vdevice.val[NEON_A] += vmask;
+
+ vst4_u8((uint8_t*)device, vdevice);
+
+ mask += 8;
+ device += 8;
+ w -= 8;
+ }
+ while (w-- > 0) {
+ unsigned aa = *mask++;
+ *device = (aa << SK_A32_SHIFT)
+ + SkAlphaMulQ(*device, SkAlpha255To256(255 - aa));
+ device += 1;
+ }
+ device = (uint32_t*)((char*)device + dstRB);
+ mask += mask_adjust;
+ } while (--height != 0);
+ }
+
+#else
+ static void blit_mask_d32_a8_general(SkPMColor* dst, size_t dstRB,
+ const SkAlpha* mask, size_t maskRB,
+ SkColor color, int w, int h) {
+ auto s = Sk4px::DupPMColor(SkPreMultiplyColor(color));
+ auto fn = [&](const Sk4px& d, const Sk4px& aa) {
+ // = (s + d(1-sa))aa + d(1-aa)
+ // = s*aa + d(1-sa*aa)
+ auto left = s.approxMulDiv255(aa),
+ right = d.approxMulDiv255(left.alphas().inv());
+ return left + right; // This does not overflow (exhaustively checked).
+ };
+ while (h --> 0) {
+ Sk4px::MapDstAlpha(w, dst, mask, fn);
+ dst += dstRB / sizeof(*dst);
+ mask += maskRB / sizeof(*mask);
+ }
+ }
+
+ // As above, but made slightly simpler by requiring that color is opaque.
+ static void blit_mask_d32_a8_opaque(SkPMColor* dst, size_t dstRB,
+ const SkAlpha* mask, size_t maskRB,
+ SkColor color, int w, int h) {
+ SkASSERT(SkColorGetA(color) == 0xFF);
+ auto s = Sk4px::DupPMColor(SkPreMultiplyColor(color));
+ auto fn = [&](const Sk4px& d, const Sk4px& aa) {
+ // = (s + d(1-sa))aa + d(1-aa)
+ // = s*aa + d(1-sa*aa)
+ // ~~~>
+ // = s*aa + d(1-aa)
+ return s.approxMulDiv255(aa) + d.approxMulDiv255(aa.inv());
+ };
+ while (h --> 0) {
+ Sk4px::MapDstAlpha(w, dst, mask, fn);
+ dst += dstRB / sizeof(*dst);
+ mask += maskRB / sizeof(*mask);
+ }
+ }
+
+ // Same as _opaque, but assumes color == SK_ColorBLACK, a very common and even simpler case.
+ static void blit_mask_d32_a8_black(SkPMColor* dst, size_t dstRB,
+ const SkAlpha* mask, size_t maskRB,
+ int w, int h) {
+ auto fn = [](const Sk4px& d, const Sk4px& aa) {
+ // = (s + d(1-sa))aa + d(1-aa)
+ // = s*aa + d(1-sa*aa)
+ // ~~~>
+ // a = 1*aa + d(1-1*aa) = aa + d(1-aa)
+ // c = 0*aa + d(1-1*aa) = d(1-aa)
+ return (aa & Sk4px(skvx::byte16{0,0,0,255, 0,0,0,255, 0,0,0,255, 0,0,0,255}))
+ + d.approxMulDiv255(aa.inv());
+ };
+ while (h --> 0) {
+ Sk4px::MapDstAlpha(w, dst, mask, fn);
+ dst += dstRB / sizeof(*dst);
+ mask += maskRB / sizeof(*mask);
+ }
+ }
+#endif
+
+/*not static*/ inline void blit_mask_d32_a8(SkPMColor* dst, size_t dstRB,
+ const SkAlpha* mask, size_t maskRB,
+ SkColor color, int w, int h) {
+ if (color == SK_ColorBLACK) {
+ blit_mask_d32_a8_black(dst, dstRB, mask, maskRB, w, h);
+ } else if (SkColorGetA(color) == 0xFF) {
+ blit_mask_d32_a8_opaque(dst, dstRB, mask, maskRB, color, w, h);
+ } else {
+ blit_mask_d32_a8_general(dst, dstRB, mask, maskRB, color, w, h);
+ }
+}
+
+} // namespace SK_OPTS_NS
+
+#endif//SkBlitMask_opts_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkBlitRow_opts.h b/gfx/skia/skia/src/opts/SkBlitRow_opts.h
new file mode 100644
index 0000000000..36c5c396be
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkBlitRow_opts.h
@@ -0,0 +1,256 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlitRow_opts_DEFINED
+#define SkBlitRow_opts_DEFINED
+
+#include "include/private/SkColorData.h"
+#include "src/base/SkMSAN.h"
+#include "src/base/SkVx.h"
+
+// Helpers for blit_row_s32a_opaque(),
+// then blit_row_s32a_opaque() itself,
+// then unrelated blit_row_color32() at the bottom.
+//
+// To keep Skia resistant to timing attacks, it's important not to branch on pixel data.
+// In particular, don't be tempted to [v]ptest, pmovmskb, etc. to branch on the source alpha.
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SKX
+ #include <immintrin.h>
+
+ static inline __m512i SkPMSrcOver_SKX(const __m512i& src, const __m512i& dst) {
+ // Detailed explanations in SkPMSrcOver_AVX2
+ // b = s + (d*(256-srcA)) >> 8
+
+ // Shuffle each pixel's srcA to the low byte of each 16-bit half of the pixel.
+ const uint8_t _ = -1; // fills a literal 0 byte.
+ const uint8_t mask[64] = { 3, _,3, _, 7, _,7, _, 11,_,11,_, 15,_,15,_,
+ 19,_,19,_, 23,_,23,_, 27,_,27,_, 31,_,31,_,
+ 35,_,35,_, 39,_,39,_, 43,_,43,_, 47,_,47,_,
+ 51,_,51,_, 55,_,55,_, 59,_,59,_, 63,_,63,_ };
+ __m512i srcA_x2 = _mm512_shuffle_epi8(src, _mm512_loadu_si512(mask));
+ __m512i scale_x2 = _mm512_sub_epi16(_mm512_set1_epi16(256),
+ srcA_x2);
+
+ // Scale red and blue, leaving results in the low byte of each 16-bit lane.
+ __m512i rb = _mm512_and_si512(_mm512_set1_epi32(0x00ff00ff), dst);
+ rb = _mm512_mullo_epi16(rb, scale_x2);
+ rb = _mm512_srli_epi16(rb, 8);
+
+ // Scale green and alpha, leaving results in the high byte, masking off the low bits.
+ __m512i ga = _mm512_srli_epi16(dst, 8);
+ ga = _mm512_mullo_epi16(ga, scale_x2);
+ ga = _mm512_andnot_si512(_mm512_set1_epi32(0x00ff00ff), ga);
+
+ return _mm512_adds_epu8(src, _mm512_or_si512(rb, ga));
+ }
+#endif
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ #include <immintrin.h>
+
+ static inline __m256i SkPMSrcOver_AVX2(const __m256i& src, const __m256i& dst) {
+ // Abstractly srcover is
+ // b = s + d*(1-srcA)
+ //
+ // In terms of unorm8 bytes, that works out to
+ // b = s + (d*(255-srcA) + 127) / 255
+ //
+ // But we approximate that to within a bit with
+ // b = s + (d*(255-srcA) + d) / 256
+ // a.k.a
+ // b = s + (d*(256-srcA)) >> 8
+
+ // The bottleneck of this math is the multiply, and we want to do it as
+ // narrowly as possible, here getting inputs into 16-bit lanes and
+ // using 16-bit multiplies. We can do twice as many multiplies at once
+ // as using naive 32-bit multiplies, and on top of that, the 16-bit multiplies
+ // are themselves a couple cycles quicker. Win-win.
+
+ // We'll get everything in 16-bit lanes for two multiplies, one
+ // handling dst red and blue, the other green and alpha. (They're
+ // conveniently 16-bits apart, you see.) We don't need the individual
+ // src channels beyond alpha until the very end when we do the "s + "
+ // add, and we don't even need to unpack them; the adds cannot overflow.
+
+ // Shuffle each pixel's srcA to the low byte of each 16-bit half of the pixel.
+ const int _ = -1; // fills a literal 0 byte.
+ __m256i srcA_x2 = _mm256_shuffle_epi8(src,
+ _mm256_setr_epi8(3,_,3,_, 7,_,7,_, 11,_,11,_, 15,_,15,_,
+ 3,_,3,_, 7,_,7,_, 11,_,11,_, 15,_,15,_));
+ __m256i scale_x2 = _mm256_sub_epi16(_mm256_set1_epi16(256),
+ srcA_x2);
+
+ // Scale red and blue, leaving results in the low byte of each 16-bit lane.
+ __m256i rb = _mm256_and_si256(_mm256_set1_epi32(0x00ff00ff), dst);
+ rb = _mm256_mullo_epi16(rb, scale_x2);
+ rb = _mm256_srli_epi16 (rb, 8);
+
+ // Scale green and alpha, leaving results in the high byte, masking off the low bits.
+ __m256i ga = _mm256_srli_epi16(dst, 8);
+ ga = _mm256_mullo_epi16(ga, scale_x2);
+ ga = _mm256_andnot_si256(_mm256_set1_epi32(0x00ff00ff), ga);
+
+ return _mm256_adds_epu8(src, _mm256_or_si256(rb, ga));
+ }
+#endif
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #include <immintrin.h>
+
+ static inline __m128i SkPMSrcOver_SSE2(const __m128i& src, const __m128i& dst) {
+ __m128i scale = _mm_sub_epi32(_mm_set1_epi32(256),
+ _mm_srli_epi32(src, 24));
+ __m128i scale_x2 = _mm_or_si128(_mm_slli_epi32(scale, 16), scale);
+
+ __m128i rb = _mm_and_si128(_mm_set1_epi32(0x00ff00ff), dst);
+ rb = _mm_mullo_epi16(rb, scale_x2);
+ rb = _mm_srli_epi16(rb, 8);
+
+ __m128i ga = _mm_srli_epi16(dst, 8);
+ ga = _mm_mullo_epi16(ga, scale_x2);
+ ga = _mm_andnot_si128(_mm_set1_epi32(0x00ff00ff), ga);
+
+ return _mm_adds_epu8(src, _mm_or_si128(rb, ga));
+ }
+#endif
+
+#if defined(SK_ARM_HAS_NEON)
+ #include <arm_neon.h>
+
+ // SkMulDiv255Round() applied to each lane.
+ static inline uint8x8_t SkMulDiv255Round_neon8(uint8x8_t x, uint8x8_t y) {
+ uint16x8_t prod = vmull_u8(x, y);
+ return vraddhn_u16(prod, vrshrq_n_u16(prod, 8));
+ }
+
+ static inline uint8x8x4_t SkPMSrcOver_neon8(uint8x8x4_t dst, uint8x8x4_t src) {
+ uint8x8_t nalphas = vmvn_u8(src.val[3]); // 256 - alpha
+ return {
+ vqadd_u8(src.val[0], SkMulDiv255Round_neon8(nalphas, dst.val[0])),
+ vqadd_u8(src.val[1], SkMulDiv255Round_neon8(nalphas, dst.val[1])),
+ vqadd_u8(src.val[2], SkMulDiv255Round_neon8(nalphas, dst.val[2])),
+ vqadd_u8(src.val[3], SkMulDiv255Round_neon8(nalphas, dst.val[3])),
+ };
+ }
+
+ // Variant assuming dst and src contain the color components of two consecutive pixels.
+ static inline uint8x8_t SkPMSrcOver_neon2(uint8x8_t dst, uint8x8_t src) {
+ const uint8x8_t alpha_indices = vcreate_u8(0x0707070703030303);
+ uint8x8_t nalphas = vmvn_u8(vtbl1_u8(src, alpha_indices));
+ return vqadd_u8(src, SkMulDiv255Round_neon8(nalphas, dst));
+ }
+
+#endif
+
+namespace SK_OPTS_NS {
+
+/*not static*/
+inline void blit_row_s32a_opaque(SkPMColor* dst, const SkPMColor* src, int len, U8CPU alpha) {
+ SkASSERT(alpha == 0xFF);
+ sk_msan_assert_initialized(src, src+len);
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SKX
+ while (len >= 16) {
+ _mm512_storeu_si512((__m512*)dst,
+ SkPMSrcOver_SKX(_mm512_loadu_si512((const __m512i*)src),
+ _mm512_loadu_si512((const __m512i*)dst)));
+ src += 16;
+ dst += 16;
+ len -= 16;
+ }
+#endif
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ while (len >= 8) {
+ _mm256_storeu_si256((__m256i*)dst,
+ SkPMSrcOver_AVX2(_mm256_loadu_si256((const __m256i*)src),
+ _mm256_loadu_si256((const __m256i*)dst)));
+ src += 8;
+ dst += 8;
+ len -= 8;
+ }
+#endif
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ while (len >= 4) {
+ _mm_storeu_si128((__m128i*)dst, SkPMSrcOver_SSE2(_mm_loadu_si128((const __m128i*)src),
+ _mm_loadu_si128((const __m128i*)dst)));
+ src += 4;
+ dst += 4;
+ len -= 4;
+ }
+#endif
+
+#if defined(SK_ARM_HAS_NEON)
+ while (len >= 8) {
+ vst4_u8((uint8_t*)dst, SkPMSrcOver_neon8(vld4_u8((const uint8_t*)dst),
+ vld4_u8((const uint8_t*)src)));
+ src += 8;
+ dst += 8;
+ len -= 8;
+ }
+
+ while (len >= 2) {
+ vst1_u8((uint8_t*)dst, SkPMSrcOver_neon2(vld1_u8((const uint8_t*)dst),
+ vld1_u8((const uint8_t*)src)));
+ src += 2;
+ dst += 2;
+ len -= 2;
+ }
+
+ if (len != 0) {
+ uint8x8_t result = SkPMSrcOver_neon2(vcreate_u8((uint64_t)*dst),
+ vcreate_u8((uint64_t)*src));
+ vst1_lane_u32(dst, vreinterpret_u32_u8(result), 0);
+ }
+ return;
+#endif
+
+ while (len --> 0) {
+ *dst = SkPMSrcOver(*src, *dst);
+ src++;
+ dst++;
+ }
+}
+
+// Blend constant color over count src pixels, writing into dst.
+/*not static*/
+inline void blit_row_color32(SkPMColor* dst, const SkPMColor* src, int count, SkPMColor color) {
+ constexpr int N = 4; // 8, 16 also reasonable choices
+ using U32 = skvx::Vec< N, uint32_t>;
+ using U16 = skvx::Vec<4*N, uint16_t>;
+ using U8 = skvx::Vec<4*N, uint8_t>;
+
+ auto kernel = [color](U32 src) {
+ unsigned invA = 255 - SkGetPackedA32(color);
+ invA += invA >> 7;
+ SkASSERT(0 < invA && invA < 256); // We handle alpha == 0 or alpha == 255 specially.
+
+ // (src * invA + (color << 8) + 128) >> 8
+ // Should all fit in 16 bits.
+ U8 s = skvx::bit_pun<U8>(src),
+ a = U8(invA);
+ U16 c = skvx::cast<uint16_t>(skvx::bit_pun<U8>(U32(color))),
+ d = (mull(s,a) + (c << 8) + 128)>>8;
+ return skvx::bit_pun<U32>(skvx::cast<uint8_t>(d));
+ };
+
+ while (count >= N) {
+ kernel(U32::Load(src)).store(dst);
+ src += N;
+ dst += N;
+ count -= N;
+ }
+ while (count --> 0) {
+ *dst++ = kernel(U32{*src++})[0];
+ }
+}
+
+} // namespace SK_OPTS_NS
+
+#endif//SkBlitRow_opts_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkChecksum_opts.h b/gfx/skia/skia/src/opts/SkChecksum_opts.h
new file mode 100644
index 0000000000..53c7edf373
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkChecksum_opts.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkChecksum_opts_DEFINED
+#define SkChecksum_opts_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkChecksum.h"
+#include "src/base/SkUtils.h" // sk_unaligned_load
+
+// This function is designed primarily to deliver consistent results no matter the platform,
+// but then also is optimized for speed on modern machines with CRC32c instructions.
+// (ARM supports both CRC32 and CRC32c, but Intel only CRC32c, so we use CRC32c.)
+
+#if 1 && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE42
+ #include <immintrin.h>
+ static uint32_t crc32c_1(uint32_t seed, uint8_t v) { return _mm_crc32_u8(seed, v); }
+ static uint32_t crc32c_4(uint32_t seed, uint32_t v) { return _mm_crc32_u32(seed, v); }
+ static uint32_t crc32c_8(uint32_t seed, uint64_t v) {
+ #if 1 && (defined(__x86_64__) || defined(_M_X64))
+ return _mm_crc32_u64(seed, v);
+ #else
+ seed = _mm_crc32_u32(seed, (uint32_t)(v ));
+ return _mm_crc32_u32(seed, (uint32_t)(v >> 32));
+ #endif
+ }
+#elif 1 && defined(SK_ARM_HAS_CRC32)
+ #include <arm_acle.h>
+ static uint32_t crc32c_1(uint32_t seed, uint8_t v) { return __crc32cb(seed, v); }
+ static uint32_t crc32c_4(uint32_t seed, uint32_t v) { return __crc32cw(seed, v); }
+ static uint32_t crc32c_8(uint32_t seed, uint64_t v) { return __crc32cd(seed, v); }
+#else
+ // See https://www.w3.org/TR/PNG/#D-CRCAppendix,
+ // but this is CRC32c, so built with 0x82f63b78, not 0xedb88320 like you'll see there.
+ #if 0
+ static std::array<uint32_t, 256> table = []{
+ std::array<uint32_t, 256> t;
+ for (int i = 0; i < 256; i++) {
+ t[i] = i;
+ for (int bits = 8; bits --> 0; ) {
+ t[i] = (t[i] & 1) ? (t[i] >> 1) ^ 0x82f63b78
+ : (t[i] >> 1);
+ }
+ printf("0x%08x,%s", t[i], (i+1) % 8 ? "" : "\n");
+ }
+ return t;
+ }();
+ #endif
+ static constexpr uint32_t crc32c_table[256] = {
+ 0x00000000,0xf26b8303,0xe13b70f7,0x1350f3f4, 0xc79a971f,0x35f1141c,0x26a1e7e8,0xd4ca64eb,
+ 0x8ad958cf,0x78b2dbcc,0x6be22838,0x9989ab3b, 0x4d43cfd0,0xbf284cd3,0xac78bf27,0x5e133c24,
+ 0x105ec76f,0xe235446c,0xf165b798,0x030e349b, 0xd7c45070,0x25afd373,0x36ff2087,0xc494a384,
+ 0x9a879fa0,0x68ec1ca3,0x7bbcef57,0x89d76c54, 0x5d1d08bf,0xaf768bbc,0xbc267848,0x4e4dfb4b,
+ 0x20bd8ede,0xd2d60ddd,0xc186fe29,0x33ed7d2a, 0xe72719c1,0x154c9ac2,0x061c6936,0xf477ea35,
+ 0xaa64d611,0x580f5512,0x4b5fa6e6,0xb93425e5, 0x6dfe410e,0x9f95c20d,0x8cc531f9,0x7eaeb2fa,
+ 0x30e349b1,0xc288cab2,0xd1d83946,0x23b3ba45, 0xf779deae,0x05125dad,0x1642ae59,0xe4292d5a,
+ 0xba3a117e,0x4851927d,0x5b016189,0xa96ae28a, 0x7da08661,0x8fcb0562,0x9c9bf696,0x6ef07595,
+ 0x417b1dbc,0xb3109ebf,0xa0406d4b,0x522bee48, 0x86e18aa3,0x748a09a0,0x67dafa54,0x95b17957,
+ 0xcba24573,0x39c9c670,0x2a993584,0xd8f2b687, 0x0c38d26c,0xfe53516f,0xed03a29b,0x1f682198,
+ 0x5125dad3,0xa34e59d0,0xb01eaa24,0x42752927, 0x96bf4dcc,0x64d4cecf,0x77843d3b,0x85efbe38,
+ 0xdbfc821c,0x2997011f,0x3ac7f2eb,0xc8ac71e8, 0x1c661503,0xee0d9600,0xfd5d65f4,0x0f36e6f7,
+ 0x61c69362,0x93ad1061,0x80fde395,0x72966096, 0xa65c047d,0x5437877e,0x4767748a,0xb50cf789,
+ 0xeb1fcbad,0x197448ae,0x0a24bb5a,0xf84f3859, 0x2c855cb2,0xdeeedfb1,0xcdbe2c45,0x3fd5af46,
+ 0x7198540d,0x83f3d70e,0x90a324fa,0x62c8a7f9, 0xb602c312,0x44694011,0x5739b3e5,0xa55230e6,
+ 0xfb410cc2,0x092a8fc1,0x1a7a7c35,0xe811ff36, 0x3cdb9bdd,0xceb018de,0xdde0eb2a,0x2f8b6829,
+
+ 0x82f63b78,0x709db87b,0x63cd4b8f,0x91a6c88c, 0x456cac67,0xb7072f64,0xa457dc90,0x563c5f93,
+ 0x082f63b7,0xfa44e0b4,0xe9141340,0x1b7f9043, 0xcfb5f4a8,0x3dde77ab,0x2e8e845f,0xdce5075c,
+ 0x92a8fc17,0x60c37f14,0x73938ce0,0x81f80fe3, 0x55326b08,0xa759e80b,0xb4091bff,0x466298fc,
+ 0x1871a4d8,0xea1a27db,0xf94ad42f,0x0b21572c, 0xdfeb33c7,0x2d80b0c4,0x3ed04330,0xccbbc033,
+ 0xa24bb5a6,0x502036a5,0x4370c551,0xb11b4652, 0x65d122b9,0x97baa1ba,0x84ea524e,0x7681d14d,
+ 0x2892ed69,0xdaf96e6a,0xc9a99d9e,0x3bc21e9d, 0xef087a76,0x1d63f975,0x0e330a81,0xfc588982,
+ 0xb21572c9,0x407ef1ca,0x532e023e,0xa145813d, 0x758fe5d6,0x87e466d5,0x94b49521,0x66df1622,
+ 0x38cc2a06,0xcaa7a905,0xd9f75af1,0x2b9cd9f2, 0xff56bd19,0x0d3d3e1a,0x1e6dcdee,0xec064eed,
+ 0xc38d26c4,0x31e6a5c7,0x22b65633,0xd0ddd530, 0x0417b1db,0xf67c32d8,0xe52cc12c,0x1747422f,
+ 0x49547e0b,0xbb3ffd08,0xa86f0efc,0x5a048dff, 0x8ecee914,0x7ca56a17,0x6ff599e3,0x9d9e1ae0,
+ 0xd3d3e1ab,0x21b862a8,0x32e8915c,0xc083125f, 0x144976b4,0xe622f5b7,0xf5720643,0x07198540,
+ 0x590ab964,0xab613a67,0xb831c993,0x4a5a4a90, 0x9e902e7b,0x6cfbad78,0x7fab5e8c,0x8dc0dd8f,
+ 0xe330a81a,0x115b2b19,0x020bd8ed,0xf0605bee, 0x24aa3f05,0xd6c1bc06,0xc5914ff2,0x37faccf1,
+ 0x69e9f0d5,0x9b8273d6,0x88d28022,0x7ab90321, 0xae7367ca,0x5c18e4c9,0x4f48173d,0xbd23943e,
+ 0xf36e6f75,0x0105ec76,0x12551f82,0xe03e9c81, 0x34f4f86a,0xc69f7b69,0xd5cf889d,0x27a40b9e,
+ 0x79b737ba,0x8bdcb4b9,0x988c474d,0x6ae7c44e, 0xbe2da0a5,0x4c4623a6,0x5f16d052,0xad7d5351,
+ };
+ static uint32_t crc32c_1(uint32_t seed, uint8_t v) {
+ return crc32c_table[(seed ^ v) & 0xff]
+ ^ (seed >> 8);
+ }
+ static uint32_t crc32c_4(uint32_t seed, uint32_t v) {
+ // Nothing special... just crc32c_1() each byte.
+ for (int i = 0; i < 4; i++) {
+ seed = crc32c_1(seed, (uint8_t)v);
+ v >>= 8;
+ }
+ return seed;
+ }
+ static uint32_t crc32c_8(uint32_t seed, uint64_t v) {
+ // Nothing special... just crc32c_1() each byte.
+ for (int i = 0; i < 8; i++) {
+ seed = crc32c_1(seed, (uint8_t)v);
+ v >>= 8;
+ }
+ return seed;
+ }
+#endif
+
+namespace SK_OPTS_NS {
+
+ inline uint32_t hash_fn(const void* data, size_t len, uint32_t seed) {
+ auto ptr = (const uint8_t*)data;
+
+ // Handle the bulk with a few data-parallel independent hashes,
+ // taking advantage of pipelining and superscalar execution.
+ if (len >= 24) {
+ uint32_t a = seed,
+ b = seed,
+ c = seed;
+ while (len >= 24) {
+ a = crc32c_8(a, sk_unaligned_load<uint64_t>(ptr + 0));
+ b = crc32c_8(b, sk_unaligned_load<uint64_t>(ptr + 8));
+ c = crc32c_8(c, sk_unaligned_load<uint64_t>(ptr + 16));
+ ptr += 24;
+ len -= 24;
+ }
+ seed = crc32c_4(a, crc32c_4(b,c));
+ }
+ while (len >= 8) {
+ seed = crc32c_8(seed, sk_unaligned_load<uint64_t>(ptr));
+ ptr += 8;
+ len -= 8;
+ }
+ while (len >= 1) {
+ seed = crc32c_1(seed, sk_unaligned_load<uint8_t >(ptr));
+ ptr += 1;
+ len -= 1;
+ }
+ return seed;
+ }
+
+} // namespace SK_OPTS_NS
+
+#endif//SkChecksum_opts_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkOpts_avx.cpp b/gfx/skia/skia/src/opts/SkOpts_avx.cpp
new file mode 100644
index 0000000000..bceb3e115b
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkOpts_avx.cpp
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkOpts.h"
+
+#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
+
+#define SK_OPTS_NS avx
+#include "src/opts/SkUtils_opts.h"
+
+namespace SkOpts {
+ void Init_avx() {
+ memset16 = SK_OPTS_NS::memset16;
+ memset32 = SK_OPTS_NS::memset32;
+ memset64 = SK_OPTS_NS::memset64;
+
+ rect_memset16 = SK_OPTS_NS::rect_memset16;
+ rect_memset32 = SK_OPTS_NS::rect_memset32;
+ rect_memset64 = SK_OPTS_NS::rect_memset64;
+ }
+} // namespace SkOpts
+
+#endif // SK_ENABLE_OPTIMIZE_SIZE
diff --git a/gfx/skia/skia/src/opts/SkOpts_crc32.cpp b/gfx/skia/skia/src/opts/SkOpts_crc32.cpp
new file mode 100644
index 0000000000..5de8c39a69
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkOpts_crc32.cpp
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkOpts.h"
+
+#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
+
+#define SK_OPTS_NS crc32
+#include "src/opts/SkChecksum_opts.h"
+
+namespace SkOpts {
+ void Init_crc32() {
+ hash_fn = crc32::hash_fn;
+ }
+}
+
+#endif // SK_ENABLE_OPTIMIZE_SIZE
diff --git a/gfx/skia/skia/src/opts/SkOpts_hsw.cpp b/gfx/skia/skia/src/opts/SkOpts_hsw.cpp
new file mode 100644
index 0000000000..34f2ccda61
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkOpts_hsw.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkOpts.h"
+
+#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
+
+#define SK_OPTS_NS hsw
+#include "src/core/SkCubicSolver.h"
+#include "src/opts/SkBitmapProcState_opts.h"
+#include "src/opts/SkBlitRow_opts.h"
+#include "src/opts/SkRasterPipeline_opts.h"
+#include "src/opts/SkSwizzler_opts.h"
+#include "src/opts/SkUtils_opts.h"
+#include "src/opts/SkVM_opts.h"
+
+namespace SkOpts {
+ void Init_hsw() {
+ blit_row_color32 = hsw::blit_row_color32;
+ blit_row_s32a_opaque = hsw::blit_row_s32a_opaque;
+
+ S32_alpha_D32_filter_DX = hsw::S32_alpha_D32_filter_DX;
+
+ cubic_solver = SK_OPTS_NS::cubic_solver;
+
+ RGBA_to_BGRA = SK_OPTS_NS::RGBA_to_BGRA;
+ RGBA_to_rgbA = SK_OPTS_NS::RGBA_to_rgbA;
+ RGBA_to_bgrA = SK_OPTS_NS::RGBA_to_bgrA;
+ gray_to_RGB1 = SK_OPTS_NS::gray_to_RGB1;
+ grayA_to_RGBA = SK_OPTS_NS::grayA_to_RGBA;
+ grayA_to_rgbA = SK_OPTS_NS::grayA_to_rgbA;
+ inverted_CMYK_to_RGB1 = SK_OPTS_NS::inverted_CMYK_to_RGB1;
+ inverted_CMYK_to_BGR1 = SK_OPTS_NS::inverted_CMYK_to_BGR1;
+
+ raster_pipeline_lowp_stride = SK_OPTS_NS::raster_pipeline_lowp_stride();
+ raster_pipeline_highp_stride = SK_OPTS_NS::raster_pipeline_highp_stride();
+
+ #define M(st) ops_highp[(int)SkRasterPipelineOp::st] = (StageFn)SK_OPTS_NS::st;
+ SK_RASTER_PIPELINE_OPS_ALL(M)
+ just_return_highp = (StageFn)SK_OPTS_NS::just_return;
+ start_pipeline_highp = SK_OPTS_NS::start_pipeline;
+ #undef M
+
+ #define M(st) ops_lowp[(int)SkRasterPipelineOp::st] = (StageFn)SK_OPTS_NS::lowp::st;
+ SK_RASTER_PIPELINE_OPS_LOWP(M)
+ just_return_lowp = (StageFn)SK_OPTS_NS::lowp::just_return;
+ start_pipeline_lowp = SK_OPTS_NS::lowp::start_pipeline;
+ #undef M
+
+ interpret_skvm = SK_OPTS_NS::interpret_skvm;
+ }
+} // namespace SkOpts
+
+#endif // SK_ENABLE_OPTIMIZE_SIZE
diff --git a/gfx/skia/skia/src/opts/SkOpts_skx.cpp b/gfx/skia/skia/src/opts/SkOpts_skx.cpp
new file mode 100644
index 0000000000..7e8ff2df55
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkOpts_skx.cpp
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkOpts.h"
+
+#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
+
+#define SK_OPTS_NS skx
+#include "src/opts/SkVM_opts.h"
+
+namespace SkOpts {
+ void Init_skx() {
+ interpret_skvm = SK_OPTS_NS::interpret_skvm;
+ }
+} // namespace SkOpts
+
+#endif // SK_ENABLE_OPTIMIZE_SIZE
diff --git a/gfx/skia/skia/src/opts/SkOpts_sse42.cpp b/gfx/skia/skia/src/opts/SkOpts_sse42.cpp
new file mode 100644
index 0000000000..aa210014d0
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkOpts_sse42.cpp
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkOpts.h"
+
+#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
+
+#define SK_OPTS_NS sse42
+#include "src/opts/SkChecksum_opts.h"
+
+namespace SkOpts {
+ void Init_sse42() {
+ hash_fn = sse42::hash_fn;
+ }
+} // namespace SkOpts
+
+#endif // SK_ENABLE_OPTIMIZE_SIZE
diff --git a/gfx/skia/skia/src/opts/SkOpts_ssse3.cpp b/gfx/skia/skia/src/opts/SkOpts_ssse3.cpp
new file mode 100644
index 0000000000..15196ecf43
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkOpts_ssse3.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkOpts.h"
+
+#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
+
+#define SK_OPTS_NS ssse3
+#include "src/opts/SkBitmapProcState_opts.h"
+#include "src/opts/SkBlitMask_opts.h"
+#include "src/opts/SkSwizzler_opts.h"
+#include "src/opts/SkXfermode_opts.h"
+
+namespace SkOpts {
+ void Init_ssse3() {
+ create_xfermode = ssse3::create_xfermode;
+ blit_mask_d32_a8 = ssse3::blit_mask_d32_a8;
+
+ RGBA_to_BGRA = ssse3::RGBA_to_BGRA;
+ RGBA_to_rgbA = ssse3::RGBA_to_rgbA;
+ RGBA_to_bgrA = ssse3::RGBA_to_bgrA;
+ RGB_to_RGB1 = ssse3::RGB_to_RGB1;
+ RGB_to_BGR1 = ssse3::RGB_to_BGR1;
+ gray_to_RGB1 = ssse3::gray_to_RGB1;
+ grayA_to_RGBA = ssse3::grayA_to_RGBA;
+ grayA_to_rgbA = ssse3::grayA_to_rgbA;
+ inverted_CMYK_to_RGB1 = ssse3::inverted_CMYK_to_RGB1;
+ inverted_CMYK_to_BGR1 = ssse3::inverted_CMYK_to_BGR1;
+
+ S32_alpha_D32_filter_DX = ssse3::S32_alpha_D32_filter_DX;
+ }
+} // namespace SkOpts
+
+#endif // SK_ENABLE_OPTIMIZE_SIZE
diff --git a/gfx/skia/skia/src/opts/SkRasterPipeline_opts.h b/gfx/skia/skia/src/opts/SkRasterPipeline_opts.h
new file mode 100644
index 0000000000..fa47902e47
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkRasterPipeline_opts.h
@@ -0,0 +1,5666 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkRasterPipeline_opts_DEFINED
+#define SkRasterPipeline_opts_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkMalloc.h"
+#include "modules/skcms/skcms.h"
+#include "src/base/SkUtils.h" // unaligned_{load,store}
+#include "src/core/SkRasterPipeline.h"
+#include <cstdint>
+
+// Every function in this file should be marked static and inline using SI.
+#if defined(__clang__) || defined(__GNUC__)
+ #define SI __attribute__((always_inline)) static inline
+#else
+ #define SI static inline
+#endif
+
+template <typename Dst, typename Src>
+SI Dst widen_cast(const Src& src) {
+ static_assert(sizeof(Dst) > sizeof(Src));
+ static_assert(std::is_trivially_copyable<Dst>::value);
+ static_assert(std::is_trivially_copyable<Src>::value);
+ Dst dst;
+ memcpy(&dst, &src, sizeof(Src));
+ return dst;
+}
+
+struct Ctx {
+ SkRasterPipelineStage* fStage;
+
+ template <typename T>
+ operator T*() {
+ return (T*)fStage->ctx;
+ }
+};
+
+using NoCtx = const void*;
+
+#if defined(SK_ARM_HAS_NEON)
+ #define JUMPER_IS_NEON
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SKX
+ #define JUMPER_IS_SKX
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ #define JUMPER_IS_HSW
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX
+ #define JUMPER_IS_AVX
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
+ #define JUMPER_IS_SSE41
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #define JUMPER_IS_SSE2
+#else
+ #define JUMPER_IS_SCALAR
+#endif
+
+// Older Clangs seem to crash when generating non-optimized NEON code for ARMv7.
+#if defined(__clang__) && !defined(__OPTIMIZE__) && defined(SK_CPU_ARM32)
+ // Apple Clang 9 and vanilla Clang 5 are fine, and may even be conservative.
+ #if defined(__apple_build_version__) && __clang_major__ < 9
+ #define JUMPER_IS_SCALAR
+ #elif __clang_major__ < 5
+ #define JUMPER_IS_SCALAR
+ #endif
+
+ #if defined(JUMPER_IS_NEON) && defined(JUMPER_IS_SCALAR)
+ #undef JUMPER_IS_NEON
+ #endif
+#endif
+
+#if defined(JUMPER_IS_SCALAR)
+ #include <math.h>
+#elif defined(JUMPER_IS_NEON)
+ #include <arm_neon.h>
+#else
+ #include <immintrin.h>
+#endif
+
+#if !defined(__clang__) && !defined(JUMPER_IS_SCALAR)
+#include "src/base/SkVx.h"
+#endif
+
+#ifdef __clang__
+#define SK_ASSUME(cond) __builtin_assume(cond)
+#elif defined(__GNUC__)
+#define SK_ASSUME(cond) ((cond) ? (void)0 : __builtin_unreachable())
+#elif defined(_MSC_VER)
+#define SK_ASSUME(cond) __assume(cond)
+#else
+#define SK_ASSUME(cond) ((void)0)
+#endif
+
+#if defined(__clang__) || defined(__GNUC__)
+#define SK_EXPECT(exp, p) __builtin_expect(exp, p)
+#else
+#define SK_EXPECT(exp, p) (exp)
+#endif
+
+#ifdef __clang__
+#define SK_VECTORTYPE(type, size) type __attribute__((ext_vector_type(size)))
+#else
+#define SK_VECTORTYPE(type, size) skvx::Vec<size, type>
+#endif
+
+#if defined(JUMPER_IS_SCALAR)
+#define SK_CONVERTVECTOR(vec, type) ((type)(vec))
+#elif defined(__clang__)
+#define SK_CONVERTVECTOR(vec, type) __builtin_convertvector(vec, type)
+#else
+#define SK_CONVERTVECTOR(vec, type) skvx::cast<typename type::elem_type>(vec)
+#endif
+
+// Notes:
+// * rcp_fast and rcp_precise both produce a reciprocal, but rcp_fast is an estimate with at least
+// 12 bits of precision while rcp_precise should be accurate for float size. For ARM rcp_precise
+// requires 2 Newton-Raphson refinement steps because its estimate has 8 bit precision, and for
+// Intel this requires one additional step because its estimate has 12 bit precision.
+
+namespace SK_OPTS_NS {
+#if defined(JUMPER_IS_SCALAR)
+ // This path should lead to portable scalar code.
+ using F = float ;
+ using I32 = int32_t;
+ using U64 = uint64_t;
+ using U32 = uint32_t;
+ using U16 = uint16_t;
+ using U8 = uint8_t ;
+
+ SI F min(F a, F b) { return fminf(a,b); }
+ SI I32 min(I32 a, I32 b) { return a < b ? a : b; }
+ SI U32 min(U32 a, U32 b) { return a < b ? a : b; }
+ SI F max(F a, F b) { return fmaxf(a,b); }
+ SI I32 max(I32 a, I32 b) { return a > b ? a : b; }
+ SI U32 max(U32 a, U32 b) { return a > b ? a : b; }
+
+ SI F mad(F f, F m, F a) { return f*m+a; }
+ SI F abs_ (F v) { return fabsf(v); }
+ SI I32 abs_ (I32 v) { return v < 0 ? -v : v; }
+ SI F floor_(F v) { return floorf(v); }
+ SI F ceil_(F v) { return ceilf(v); }
+ SI F rcp_fast(F v) { return 1.0f / v; }
+ SI F rsqrt (F v) { return 1.0f / sqrtf(v); }
+ SI F sqrt_ (F v) { return sqrtf(v); }
+ SI F rcp_precise (F v) { return 1.0f / v; }
+
+ SI U32 round (F v, F scale) { return (uint32_t)(v*scale + 0.5f); }
+ SI U16 pack(U32 v) { return (U16)v; }
+ SI U8 pack(U16 v) { return (U8)v; }
+
+ SI F if_then_else(I32 c, F t, F e) { return c ? t : e; }
+ SI bool any(I32 c) { return c != 0; }
+ SI bool all(I32 c) { return c != 0; }
+
+ template <typename T>
+ SI T gather(const T* p, U32 ix) { return p[ix]; }
+
+ template <typename T>
+ SI void scatter_masked(T src, T* dst, U32 ix, I32 mask) {
+ dst[ix] = mask ? src : dst[ix];
+ }
+
+ SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
+ *r = ptr[0];
+ *g = ptr[1];
+ }
+ SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
+ ptr[0] = r;
+ ptr[1] = g;
+ }
+ SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
+ *r = ptr[0];
+ *g = ptr[1];
+ *b = ptr[2];
+ }
+ SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
+ *r = ptr[0];
+ *g = ptr[1];
+ *b = ptr[2];
+ *a = ptr[3];
+ }
+ SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
+ ptr[0] = r;
+ ptr[1] = g;
+ ptr[2] = b;
+ ptr[3] = a;
+ }
+
+ SI void load2(const float* ptr, size_t tail, F* r, F* g) {
+ *r = ptr[0];
+ *g = ptr[1];
+ }
+ SI void store2(float* ptr, size_t tail, F r, F g) {
+ ptr[0] = r;
+ ptr[1] = g;
+ }
+ SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
+ *r = ptr[0];
+ *g = ptr[1];
+ *b = ptr[2];
+ *a = ptr[3];
+ }
+ SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
+ ptr[0] = r;
+ ptr[1] = g;
+ ptr[2] = b;
+ ptr[3] = a;
+ }
+
+#elif defined(JUMPER_IS_NEON)
+ // Since we know we're using Clang, we can use its vector extensions.
+ template <typename T> using V = SK_VECTORTYPE(T, 4);
+ using F = V<float >;
+ using I32 = V< int32_t>;
+ using U64 = V<uint64_t>;
+ using U32 = V<uint32_t>;
+ using U16 = V<uint16_t>;
+ using U8 = V<uint8_t >;
+
+ // We polyfill a few routines that Clang doesn't build into ext_vector_types.
+ SI F min(F a, F b) { return vminq_f32(a,b); }
+ SI I32 min(I32 a, I32 b) { return vminq_s32(a,b); }
+ SI U32 min(U32 a, U32 b) { return vminq_u32(a,b); }
+ SI F max(F a, F b) { return vmaxq_f32(a,b); }
+ SI I32 max(I32 a, I32 b) { return vmaxq_s32(a,b); }
+ SI U32 max(U32 a, U32 b) { return vmaxq_u32(a,b); }
+
+ SI F abs_ (F v) { return vabsq_f32(v); }
+ SI I32 abs_ (I32 v) { return vabsq_s32(v); }
+ SI F rcp_fast(F v) { auto e = vrecpeq_f32 (v); return vrecpsq_f32 (v,e ) * e; }
+ SI F rcp_precise (F v) { float32x4_t e = rcp_fast(v); return vrecpsq_f32(v,e) * e; }
+ SI F rsqrt (F v) { auto e = vrsqrteq_f32(v); return vrsqrtsq_f32(v,e*e) * e; }
+
+ SI U16 pack(U32 v) { return SK_CONVERTVECTOR(v, U16); }
+ SI U8 pack(U16 v) { return SK_CONVERTVECTOR(v, U8); }
+
+ SI F if_then_else(I32 c, F t, F e) { return vbslq_f32(vreinterpretq_u32_s32(c),t,e); }
+
+ #if defined(SK_CPU_ARM64)
+ SI bool any(I32 c) { return vmaxvq_u32(vreinterpretq_u32_s32(c)) != 0; }
+ SI bool all(I32 c) { return vminvq_u32(vreinterpretq_u32_s32(c)) != 0; }
+
+ SI F mad(F f, F m, F a) { return vfmaq_f32(a,f,m); }
+ SI F floor_(F v) { return vrndmq_f32(v); }
+ SI F ceil_(F v) { return vrndpq_f32(v); }
+ SI F sqrt_(F v) { return vsqrtq_f32(v); }
+ SI U32 round(F v, F scale) { return vcvtnq_u32_f32(v*scale); }
+ #else
+ SI bool any(I32 c) { return c[0] | c[1] | c[2] | c[3]; }
+ SI bool all(I32 c) { return c[0] & c[1] & c[2] & c[3]; }
+
+ SI F mad(F f, F m, F a) { return vmlaq_f32(a,f,m); }
+ SI F floor_(F v) {
+ F roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v));
+ return roundtrip - if_then_else(roundtrip > v, 1, 0);
+ }
+
+ SI F ceil_(F v) {
+ F roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v));
+ return roundtrip + if_then_else(roundtrip < v, 1, 0);
+ }
+
+ SI F sqrt_(F v) {
+ auto e = vrsqrteq_f32(v); // Estimate and two refinement steps for e = rsqrt(v).
+ e *= vrsqrtsq_f32(v,e*e);
+ e *= vrsqrtsq_f32(v,e*e);
+ return v*F(e); // sqrt(v) == v*rsqrt(v).
+ }
+
+ SI U32 round(F v, F scale) {
+ return vcvtq_u32_f32(mad(v,scale,0.5f));
+ }
+ #endif
+
+ template <typename T>
+ SI V<T> gather(const T* p, U32 ix) {
+ return {p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]]};
+ }
+ template <typename V, typename S>
+ SI void scatter_masked(V src, S* dst, U32 ix, I32 mask) {
+ V before = gather(dst, ix);
+ V after = if_then_else(mask, src, before);
+ dst[ix[0]] = after[0];
+ dst[ix[1]] = after[1];
+ dst[ix[2]] = after[2];
+ dst[ix[3]] = after[3];
+ }
+ SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
+ uint16x4x2_t rg;
+ if (SK_EXPECT(tail,0)) {
+ if ( true ) { rg = vld2_lane_u16(ptr + 0, rg, 0); }
+ if (tail > 1) { rg = vld2_lane_u16(ptr + 2, rg, 1); }
+ if (tail > 2) { rg = vld2_lane_u16(ptr + 4, rg, 2); }
+ } else {
+ rg = vld2_u16(ptr);
+ }
+ *r = rg.val[0];
+ *g = rg.val[1];
+ }
+ SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
+ if (SK_EXPECT(tail,0)) {
+ if ( true ) { vst2_lane_u16(ptr + 0, (uint16x4x2_t{{r,g}}), 0); }
+ if (tail > 1) { vst2_lane_u16(ptr + 2, (uint16x4x2_t{{r,g}}), 1); }
+ if (tail > 2) { vst2_lane_u16(ptr + 4, (uint16x4x2_t{{r,g}}), 2); }
+ } else {
+ vst2_u16(ptr, (uint16x4x2_t{{r,g}}));
+ }
+ }
+ SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
+ uint16x4x3_t rgb;
+ if (SK_EXPECT(tail,0)) {
+ if ( true ) { rgb = vld3_lane_u16(ptr + 0, rgb, 0); }
+ if (tail > 1) { rgb = vld3_lane_u16(ptr + 3, rgb, 1); }
+ if (tail > 2) { rgb = vld3_lane_u16(ptr + 6, rgb, 2); }
+ } else {
+ rgb = vld3_u16(ptr);
+ }
+ *r = rgb.val[0];
+ *g = rgb.val[1];
+ *b = rgb.val[2];
+ }
+ SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
+ uint16x4x4_t rgba;
+ if (SK_EXPECT(tail,0)) {
+ if ( true ) { rgba = vld4_lane_u16(ptr + 0, rgba, 0); }
+ if (tail > 1) { rgba = vld4_lane_u16(ptr + 4, rgba, 1); }
+ if (tail > 2) { rgba = vld4_lane_u16(ptr + 8, rgba, 2); }
+ } else {
+ rgba = vld4_u16(ptr);
+ }
+ *r = rgba.val[0];
+ *g = rgba.val[1];
+ *b = rgba.val[2];
+ *a = rgba.val[3];
+ }
+
+ SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
+ if (SK_EXPECT(tail,0)) {
+ if ( true ) { vst4_lane_u16(ptr + 0, (uint16x4x4_t{{r,g,b,a}}), 0); }
+ if (tail > 1) { vst4_lane_u16(ptr + 4, (uint16x4x4_t{{r,g,b,a}}), 1); }
+ if (tail > 2) { vst4_lane_u16(ptr + 8, (uint16x4x4_t{{r,g,b,a}}), 2); }
+ } else {
+ vst4_u16(ptr, (uint16x4x4_t{{r,g,b,a}}));
+ }
+ }
+ SI void load2(const float* ptr, size_t tail, F* r, F* g) {
+ float32x4x2_t rg;
+ if (SK_EXPECT(tail,0)) {
+ if ( true ) { rg = vld2q_lane_f32(ptr + 0, rg, 0); }
+ if (tail > 1) { rg = vld2q_lane_f32(ptr + 2, rg, 1); }
+ if (tail > 2) { rg = vld2q_lane_f32(ptr + 4, rg, 2); }
+ } else {
+ rg = vld2q_f32(ptr);
+ }
+ *r = rg.val[0];
+ *g = rg.val[1];
+ }
+ SI void store2(float* ptr, size_t tail, F r, F g) {
+ if (SK_EXPECT(tail,0)) {
+ if ( true ) { vst2q_lane_f32(ptr + 0, (float32x4x2_t{{r,g}}), 0); }
+ if (tail > 1) { vst2q_lane_f32(ptr + 2, (float32x4x2_t{{r,g}}), 1); }
+ if (tail > 2) { vst2q_lane_f32(ptr + 4, (float32x4x2_t{{r,g}}), 2); }
+ } else {
+ vst2q_f32(ptr, (float32x4x2_t{{r,g}}));
+ }
+ }
+ SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
+ float32x4x4_t rgba;
+ if (SK_EXPECT(tail,0)) {
+ if ( true ) { rgba = vld4q_lane_f32(ptr + 0, rgba, 0); }
+ if (tail > 1) { rgba = vld4q_lane_f32(ptr + 4, rgba, 1); }
+ if (tail > 2) { rgba = vld4q_lane_f32(ptr + 8, rgba, 2); }
+ } else {
+ rgba = vld4q_f32(ptr);
+ }
+ *r = rgba.val[0];
+ *g = rgba.val[1];
+ *b = rgba.val[2];
+ *a = rgba.val[3];
+ }
+ SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
+ if (SK_EXPECT(tail,0)) {
+ if ( true ) { vst4q_lane_f32(ptr + 0, (float32x4x4_t{{r,g,b,a}}), 0); }
+ if (tail > 1) { vst4q_lane_f32(ptr + 4, (float32x4x4_t{{r,g,b,a}}), 1); }
+ if (tail > 2) { vst4q_lane_f32(ptr + 8, (float32x4x4_t{{r,g,b,a}}), 2); }
+ } else {
+ vst4q_f32(ptr, (float32x4x4_t{{r,g,b,a}}));
+ }
+ }
+
+#elif defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
+ // These are __m256 and __m256i, but friendlier and strongly-typed.
+ template <typename T> using V = SK_VECTORTYPE(T, 8);
+ using F = V<float >;
+ using I32 = V< int32_t>;
+ using U64 = V<uint64_t>;
+ using U32 = V<uint32_t>;
+ using U16 = V<uint16_t>;
+ using U8 = V<uint8_t >;
+
+ SI F mad(F f, F m, F a) { return _mm256_fmadd_ps(f, m, a); }
+
+ SI F min(F a, F b) { return _mm256_min_ps(a,b); }
+ SI I32 min(I32 a, I32 b) { return _mm256_min_epi32(a,b); }
+ SI U32 min(U32 a, U32 b) { return _mm256_min_epu32(a,b); }
+ SI F max(F a, F b) { return _mm256_max_ps(a,b); }
+ SI I32 max(I32 a, I32 b) { return _mm256_max_epi32(a,b); }
+ SI U32 max(U32 a, U32 b) { return _mm256_max_epu32(a,b); }
+
+ SI F abs_ (F v) { return _mm256_and_ps(v, 0-v); }
+ SI I32 abs_ (I32 v) { return _mm256_abs_epi32(v); }
+ SI F floor_(F v) { return _mm256_floor_ps(v); }
+ SI F ceil_(F v) { return _mm256_ceil_ps(v); }
+ SI F rcp_fast(F v) { return _mm256_rcp_ps (v); }
+ SI F rsqrt (F v) { return _mm256_rsqrt_ps(v); }
+ SI F sqrt_ (F v) { return _mm256_sqrt_ps (v); }
+ SI F rcp_precise (F v) {
+ F e = rcp_fast(v);
+ return _mm256_mul_ps(_mm256_fnmadd_ps(v, e, _mm256_set1_ps(2.0f)), e);
+ }
+
+ SI U32 round (F v, F scale) { return _mm256_cvtps_epi32(v*scale); }
+ SI U16 pack(U32 v) {
+ return _mm_packus_epi32(_mm256_extractf128_si256(v, 0),
+ _mm256_extractf128_si256(v, 1));
+ }
+ SI U8 pack(U16 v) {
+ auto r = _mm_packus_epi16(v,v);
+ return sk_unaligned_load<U8>(&r);
+ }
+
+ SI F if_then_else(I32 c, F t, F e) { return _mm256_blendv_ps(e, t, _mm256_castsi256_ps(c)); }
+ // NOTE: This version of 'all' only works with mask values (true == all bits set)
+ SI bool any(I32 c) { return !_mm256_testz_si256(c, _mm256_set1_epi32(-1)); }
+ SI bool all(I32 c) { return _mm256_testc_si256(c, _mm256_set1_epi32(-1)); }
+
+ template <typename T>
+ SI V<T> gather(const T* p, U32 ix) {
+ return { p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]],
+ p[ix[4]], p[ix[5]], p[ix[6]], p[ix[7]], };
+ }
+ SI F gather(const float* p, U32 ix) { return _mm256_i32gather_ps (p, ix, 4); }
+ SI U32 gather(const uint32_t* p, U32 ix) { return _mm256_i32gather_epi32((const int*)p, ix, 4); }
+ SI U64 gather(const uint64_t* p, U32 ix) {
+ __m256i parts[] = {
+ _mm256_i32gather_epi64((const long long int*)p, _mm256_extracti128_si256(ix,0), 8),
+ _mm256_i32gather_epi64((const long long int*)p, _mm256_extracti128_si256(ix,1), 8),
+ };
+ return sk_bit_cast<U64>(parts);
+ }
+ template <typename V, typename S>
+ SI void scatter_masked(V src, S* dst, U32 ix, I32 mask) {
+ V before = gather(dst, ix);
+ V after = if_then_else(mask, src, before);
+ dst[ix[0]] = after[0];
+ dst[ix[1]] = after[1];
+ dst[ix[2]] = after[2];
+ dst[ix[3]] = after[3];
+ dst[ix[4]] = after[4];
+ dst[ix[5]] = after[5];
+ dst[ix[6]] = after[6];
+ dst[ix[7]] = after[7];
+ }
+
+ SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
+ U16 _0123, _4567;
+ if (SK_EXPECT(tail,0)) {
+ _0123 = _4567 = _mm_setzero_si128();
+ auto* d = &_0123;
+ if (tail > 3) {
+ *d = _mm_loadu_si128(((__m128i*)ptr) + 0);
+ tail -= 4;
+ ptr += 8;
+ d = &_4567;
+ }
+ bool high = false;
+ if (tail > 1) {
+ *d = _mm_loadl_epi64((__m128i*)ptr);
+ tail -= 2;
+ ptr += 4;
+ high = true;
+ }
+ if (tail > 0) {
+ (*d)[high ? 4 : 0] = *(ptr + 0);
+ (*d)[high ? 5 : 1] = *(ptr + 1);
+ }
+ } else {
+ _0123 = _mm_loadu_si128(((__m128i*)ptr) + 0);
+ _4567 = _mm_loadu_si128(((__m128i*)ptr) + 1);
+ }
+ *r = _mm_packs_epi32(_mm_srai_epi32(_mm_slli_epi32(_0123, 16), 16),
+ _mm_srai_epi32(_mm_slli_epi32(_4567, 16), 16));
+ *g = _mm_packs_epi32(_mm_srai_epi32(_0123, 16),
+ _mm_srai_epi32(_4567, 16));
+ }
+ SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
+ auto _0123 = _mm_unpacklo_epi16(r, g),
+ _4567 = _mm_unpackhi_epi16(r, g);
+ if (SK_EXPECT(tail,0)) {
+ const auto* s = &_0123;
+ if (tail > 3) {
+ _mm_storeu_si128((__m128i*)ptr, *s);
+ s = &_4567;
+ tail -= 4;
+ ptr += 8;
+ }
+ bool high = false;
+ if (tail > 1) {
+ _mm_storel_epi64((__m128i*)ptr, *s);
+ ptr += 4;
+ tail -= 2;
+ high = true;
+ }
+ if (tail > 0) {
+ if (high) {
+ *(int32_t*)ptr = _mm_extract_epi32(*s, 2);
+ } else {
+ *(int32_t*)ptr = _mm_cvtsi128_si32(*s);
+ }
+ }
+ } else {
+ _mm_storeu_si128((__m128i*)ptr + 0, _0123);
+ _mm_storeu_si128((__m128i*)ptr + 1, _4567);
+ }
+ }
+
+ SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
+ __m128i _0,_1,_2,_3,_4,_5,_6,_7;
+ if (SK_EXPECT(tail,0)) {
+ auto load_rgb = [](const uint16_t* src) {
+ auto v = _mm_cvtsi32_si128(*(const uint32_t*)src);
+ return _mm_insert_epi16(v, src[2], 2);
+ };
+ _1 = _2 = _3 = _4 = _5 = _6 = _7 = _mm_setzero_si128();
+ if ( true ) { _0 = load_rgb(ptr + 0); }
+ if (tail > 1) { _1 = load_rgb(ptr + 3); }
+ if (tail > 2) { _2 = load_rgb(ptr + 6); }
+ if (tail > 3) { _3 = load_rgb(ptr + 9); }
+ if (tail > 4) { _4 = load_rgb(ptr + 12); }
+ if (tail > 5) { _5 = load_rgb(ptr + 15); }
+ if (tail > 6) { _6 = load_rgb(ptr + 18); }
+ } else {
+ // Load 0+1, 2+3, 4+5 normally, and 6+7 backed up 4 bytes so we don't run over.
+ auto _01 = _mm_loadu_si128((const __m128i*)(ptr + 0)) ;
+ auto _23 = _mm_loadu_si128((const __m128i*)(ptr + 6)) ;
+ auto _45 = _mm_loadu_si128((const __m128i*)(ptr + 12)) ;
+ auto _67 = _mm_srli_si128(_mm_loadu_si128((const __m128i*)(ptr + 16)), 4);
+ _0 = _01; _1 = _mm_srli_si128(_01, 6);
+ _2 = _23; _3 = _mm_srli_si128(_23, 6);
+ _4 = _45; _5 = _mm_srli_si128(_45, 6);
+ _6 = _67; _7 = _mm_srli_si128(_67, 6);
+ }
+
+ auto _02 = _mm_unpacklo_epi16(_0, _2), // r0 r2 g0 g2 b0 b2 xx xx
+ _13 = _mm_unpacklo_epi16(_1, _3),
+ _46 = _mm_unpacklo_epi16(_4, _6),
+ _57 = _mm_unpacklo_epi16(_5, _7);
+
+ auto rg0123 = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
+ bx0123 = _mm_unpackhi_epi16(_02, _13), // b0 b1 b2 b3 xx xx xx xx
+ rg4567 = _mm_unpacklo_epi16(_46, _57),
+ bx4567 = _mm_unpackhi_epi16(_46, _57);
+
+ *r = _mm_unpacklo_epi64(rg0123, rg4567);
+ *g = _mm_unpackhi_epi64(rg0123, rg4567);
+ *b = _mm_unpacklo_epi64(bx0123, bx4567);
+ }
+ SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
+ __m128i _01, _23, _45, _67;
+ if (SK_EXPECT(tail,0)) {
+ auto src = (const double*)ptr;
+ _01 = _23 = _45 = _67 = _mm_setzero_si128();
+ if (tail > 0) { _01 = _mm_castpd_si128(_mm_loadl_pd(_mm_castsi128_pd(_01), src+0)); }
+ if (tail > 1) { _01 = _mm_castpd_si128(_mm_loadh_pd(_mm_castsi128_pd(_01), src+1)); }
+ if (tail > 2) { _23 = _mm_castpd_si128(_mm_loadl_pd(_mm_castsi128_pd(_23), src+2)); }
+ if (tail > 3) { _23 = _mm_castpd_si128(_mm_loadh_pd(_mm_castsi128_pd(_23), src+3)); }
+ if (tail > 4) { _45 = _mm_castpd_si128(_mm_loadl_pd(_mm_castsi128_pd(_45), src+4)); }
+ if (tail > 5) { _45 = _mm_castpd_si128(_mm_loadh_pd(_mm_castsi128_pd(_45), src+5)); }
+ if (tail > 6) { _67 = _mm_castpd_si128(_mm_loadl_pd(_mm_castsi128_pd(_67), src+6)); }
+ } else {
+ _01 = _mm_loadu_si128(((__m128i*)ptr) + 0);
+ _23 = _mm_loadu_si128(((__m128i*)ptr) + 1);
+ _45 = _mm_loadu_si128(((__m128i*)ptr) + 2);
+ _67 = _mm_loadu_si128(((__m128i*)ptr) + 3);
+ }
+
+ auto _02 = _mm_unpacklo_epi16(_01, _23), // r0 r2 g0 g2 b0 b2 a0 a2
+ _13 = _mm_unpackhi_epi16(_01, _23), // r1 r3 g1 g3 b1 b3 a1 a3
+ _46 = _mm_unpacklo_epi16(_45, _67),
+ _57 = _mm_unpackhi_epi16(_45, _67);
+
+ auto rg0123 = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
+ ba0123 = _mm_unpackhi_epi16(_02, _13), // b0 b1 b2 b3 a0 a1 a2 a3
+ rg4567 = _mm_unpacklo_epi16(_46, _57),
+ ba4567 = _mm_unpackhi_epi16(_46, _57);
+
+ *r = _mm_unpacklo_epi64(rg0123, rg4567);
+ *g = _mm_unpackhi_epi64(rg0123, rg4567);
+ *b = _mm_unpacklo_epi64(ba0123, ba4567);
+ *a = _mm_unpackhi_epi64(ba0123, ba4567);
+ }
+ SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
+ auto rg0123 = _mm_unpacklo_epi16(r, g), // r0 g0 r1 g1 r2 g2 r3 g3
+ rg4567 = _mm_unpackhi_epi16(r, g), // r4 g4 r5 g5 r6 g6 r7 g7
+ ba0123 = _mm_unpacklo_epi16(b, a),
+ ba4567 = _mm_unpackhi_epi16(b, a);
+
+ auto _01 = _mm_unpacklo_epi32(rg0123, ba0123),
+ _23 = _mm_unpackhi_epi32(rg0123, ba0123),
+ _45 = _mm_unpacklo_epi32(rg4567, ba4567),
+ _67 = _mm_unpackhi_epi32(rg4567, ba4567);
+
+ if (SK_EXPECT(tail,0)) {
+ auto dst = (double*)ptr;
+ if (tail > 0) { _mm_storel_pd(dst+0, _mm_castsi128_pd(_01)); }
+ if (tail > 1) { _mm_storeh_pd(dst+1, _mm_castsi128_pd(_01)); }
+ if (tail > 2) { _mm_storel_pd(dst+2, _mm_castsi128_pd(_23)); }
+ if (tail > 3) { _mm_storeh_pd(dst+3, _mm_castsi128_pd(_23)); }
+ if (tail > 4) { _mm_storel_pd(dst+4, _mm_castsi128_pd(_45)); }
+ if (tail > 5) { _mm_storeh_pd(dst+5, _mm_castsi128_pd(_45)); }
+ if (tail > 6) { _mm_storel_pd(dst+6, _mm_castsi128_pd(_67)); }
+ } else {
+ _mm_storeu_si128((__m128i*)ptr + 0, _01);
+ _mm_storeu_si128((__m128i*)ptr + 1, _23);
+ _mm_storeu_si128((__m128i*)ptr + 2, _45);
+ _mm_storeu_si128((__m128i*)ptr + 3, _67);
+ }
+ }
+
+ SI void load2(const float* ptr, size_t tail, F* r, F* g) {
+ F _0123, _4567;
+ if (SK_EXPECT(tail, 0)) {
+ _0123 = _4567 = _mm256_setzero_ps();
+ F* d = &_0123;
+ if (tail > 3) {
+ *d = _mm256_loadu_ps(ptr);
+ ptr += 8;
+ tail -= 4;
+ d = &_4567;
+ }
+ bool high = false;
+ if (tail > 1) {
+ *d = _mm256_castps128_ps256(_mm_loadu_ps(ptr));
+ ptr += 4;
+ tail -= 2;
+ high = true;
+ }
+ if (tail > 0) {
+ *d = high ? _mm256_insertf128_ps(*d, _mm_castsi128_ps(_mm_loadl_epi64((__m128i*)ptr)), 1)
+ : _mm256_insertf128_ps(*d, _mm_castsi128_ps(_mm_loadl_epi64((__m128i*)ptr)), 0);
+ }
+ } else {
+ _0123 = _mm256_loadu_ps(ptr + 0);
+ _4567 = _mm256_loadu_ps(ptr + 8);
+ }
+
+ F _0145 = _mm256_castpd_ps(_mm256_permute2f128_pd(_mm256_castps_pd(_0123), _mm256_castps_pd(_4567), 0x20)),
+ _2367 = _mm256_castpd_ps(_mm256_permute2f128_pd(_mm256_castps_pd(_0123), _mm256_castps_pd(_4567), 0x31));
+
+ *r = _mm256_shuffle_ps(_0145, _2367, 0x88);
+ *g = _mm256_shuffle_ps(_0145, _2367, 0xDD);
+ }
+ SI void store2(float* ptr, size_t tail, F r, F g) {
+ F _0145 = _mm256_unpacklo_ps(r, g),
+ _2367 = _mm256_unpackhi_ps(r, g);
+ F _0123 = _mm256_castpd_ps(_mm256_permute2f128_pd(_mm256_castps_pd(_0145), _mm256_castps_pd(_2367), 0x20)),
+ _4567 = _mm256_castpd_ps(_mm256_permute2f128_pd(_mm256_castps_pd(_0145), _mm256_castps_pd(_2367), 0x31));
+
+ if (SK_EXPECT(tail, 0)) {
+ const __m256* s = (__m256*)&_0123;
+ if (tail > 3) {
+ _mm256_storeu_ps(ptr, *s);
+ s = (__m256*)&_4567;
+ tail -= 4;
+ ptr += 8;
+ }
+ bool high = false;
+ if (tail > 1) {
+ _mm_storeu_ps(ptr, _mm256_extractf128_ps(*s, 0));
+ ptr += 4;
+ tail -= 2;
+ high = true;
+ }
+ if (tail > 0) {
+ *(ptr + 0) = (*s)[ high ? 4 : 0];
+ *(ptr + 1) = (*s)[ high ? 5 : 1];
+ }
+ } else {
+ _mm256_storeu_ps(ptr + 0, _0123);
+ _mm256_storeu_ps(ptr + 8, _4567);
+ }
+ }
+
+ SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
+ F _04, _15, _26, _37;
+ _04 = _15 = _26 = _37 = 0;
+ switch (tail) {
+ case 0: _37 = _mm256_insertf128_ps(_37, _mm_loadu_ps(ptr+28), 1); [[fallthrough]];
+ case 7: _26 = _mm256_insertf128_ps(_26, _mm_loadu_ps(ptr+24), 1); [[fallthrough]];
+ case 6: _15 = _mm256_insertf128_ps(_15, _mm_loadu_ps(ptr+20), 1); [[fallthrough]];
+ case 5: _04 = _mm256_insertf128_ps(_04, _mm_loadu_ps(ptr+16), 1); [[fallthrough]];
+ case 4: _37 = _mm256_insertf128_ps(_37, _mm_loadu_ps(ptr+12), 0); [[fallthrough]];
+ case 3: _26 = _mm256_insertf128_ps(_26, _mm_loadu_ps(ptr+ 8), 0); [[fallthrough]];
+ case 2: _15 = _mm256_insertf128_ps(_15, _mm_loadu_ps(ptr+ 4), 0); [[fallthrough]];
+ case 1: _04 = _mm256_insertf128_ps(_04, _mm_loadu_ps(ptr+ 0), 0);
+ }
+
+ F rg0145 = _mm256_unpacklo_ps(_04,_15), // r0 r1 g0 g1 | r4 r5 g4 g5
+ ba0145 = _mm256_unpackhi_ps(_04,_15),
+ rg2367 = _mm256_unpacklo_ps(_26,_37),
+ ba2367 = _mm256_unpackhi_ps(_26,_37);
+
+ *r = _mm256_castpd_ps(_mm256_unpacklo_pd(_mm256_castps_pd(rg0145), _mm256_castps_pd(rg2367)));
+ *g = _mm256_castpd_ps(_mm256_unpackhi_pd(_mm256_castps_pd(rg0145), _mm256_castps_pd(rg2367)));
+ *b = _mm256_castpd_ps(_mm256_unpacklo_pd(_mm256_castps_pd(ba0145), _mm256_castps_pd(ba2367)));
+ *a = _mm256_castpd_ps(_mm256_unpackhi_pd(_mm256_castps_pd(ba0145), _mm256_castps_pd(ba2367)));
+ }
+ SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
+ F rg0145 = _mm256_unpacklo_ps(r, g), // r0 g0 r1 g1 | r4 g4 r5 g5
+ rg2367 = _mm256_unpackhi_ps(r, g), // r2 ... | r6 ...
+ ba0145 = _mm256_unpacklo_ps(b, a), // b0 a0 b1 a1 | b4 a4 b5 a5
+ ba2367 = _mm256_unpackhi_ps(b, a); // b2 ... | b6 ...
+
+ F _04 = _mm256_castpd_ps(_mm256_unpacklo_pd(_mm256_castps_pd(rg0145), _mm256_castps_pd(ba0145))), // r0 g0 b0 a0 | r4 g4 b4 a4
+ _15 = _mm256_castpd_ps(_mm256_unpackhi_pd(_mm256_castps_pd(rg0145), _mm256_castps_pd(ba0145))), // r1 ... | r5 ...
+ _26 = _mm256_castpd_ps(_mm256_unpacklo_pd(_mm256_castps_pd(rg2367), _mm256_castps_pd(ba2367))), // r2 ... | r6 ...
+ _37 = _mm256_castpd_ps(_mm256_unpackhi_pd(_mm256_castps_pd(rg2367), _mm256_castps_pd(ba2367))); // r3 ... | r7 ...
+
+ if (SK_EXPECT(tail, 0)) {
+ if (tail > 0) { _mm_storeu_ps(ptr+ 0, _mm256_extractf128_ps(_04, 0)); }
+ if (tail > 1) { _mm_storeu_ps(ptr+ 4, _mm256_extractf128_ps(_15, 0)); }
+ if (tail > 2) { _mm_storeu_ps(ptr+ 8, _mm256_extractf128_ps(_26, 0)); }
+ if (tail > 3) { _mm_storeu_ps(ptr+12, _mm256_extractf128_ps(_37, 0)); }
+ if (tail > 4) { _mm_storeu_ps(ptr+16, _mm256_extractf128_ps(_04, 1)); }
+ if (tail > 5) { _mm_storeu_ps(ptr+20, _mm256_extractf128_ps(_15, 1)); }
+ if (tail > 6) { _mm_storeu_ps(ptr+24, _mm256_extractf128_ps(_26, 1)); }
+ } else {
+ F _01 = _mm256_permute2f128_ps(_04, _15, 32), // 32 == 0010 0000 == lo, lo
+ _23 = _mm256_permute2f128_ps(_26, _37, 32),
+ _45 = _mm256_permute2f128_ps(_04, _15, 49), // 49 == 0011 0001 == hi, hi
+ _67 = _mm256_permute2f128_ps(_26, _37, 49);
+ _mm256_storeu_ps(ptr+ 0, _01);
+ _mm256_storeu_ps(ptr+ 8, _23);
+ _mm256_storeu_ps(ptr+16, _45);
+ _mm256_storeu_ps(ptr+24, _67);
+ }
+ }
+
+#elif defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
+template <typename T> using V = SK_VECTORTYPE(T, 4);
+ using F = V<float >;
+ using I32 = V< int32_t>;
+ using U64 = V<uint64_t>;
+ using U32 = V<uint32_t>;
+ using U16 = V<uint16_t>;
+ using U8 = V<uint8_t >;
+
+ SI F if_then_else(I32 c, F t, F e) {
+ return _mm_or_ps(_mm_and_ps(_mm_castsi128_ps(c), t), _mm_andnot_ps(_mm_castsi128_ps(c), e));
+ }
+
+ SI F min(F a, F b) { return _mm_min_ps(a,b); }
+ SI F max(F a, F b) { return _mm_max_ps(a,b); }
+#if defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
+ SI I32 min(I32 a, I32 b) { return _mm_min_epi32(a,b); }
+ SI U32 min(U32 a, U32 b) { return _mm_min_epu32(a,b); }
+ SI I32 max(I32 a, I32 b) { return _mm_max_epi32(a,b); }
+ SI U32 max(U32 a, U32 b) { return _mm_max_epu32(a,b); }
+#else
+ SI I32 min(I32 a, I32 b) {
+ return sk_bit_cast<I32>(if_then_else(sk_bit_cast<I32>(a < b), sk_bit_cast<F>(a), sk_bit_cast<F>(b)));
+ }
+ SI U32 min(U32 a, U32 b) {
+ return sk_bit_cast<U32>(if_then_else(sk_bit_cast<I32>(a < b), sk_bit_cast<F>(a), sk_bit_cast<F>(b)));
+ }
+ SI I32 max(I32 a, I32 b) {
+ return sk_bit_cast<I32>(if_then_else(sk_bit_cast<I32>(a > b), sk_bit_cast<F>(a), sk_bit_cast<F>(b)));
+ }
+ SI U32 max(U32 a, U32 b) {
+ return sk_bit_cast<U32>(if_then_else(sk_bit_cast<I32>(a > b), sk_bit_cast<F>(a), sk_bit_cast<F>(b)));
+ }
+#endif
+
+ SI F mad(F f, F m, F a) { return f*m+a; }
+ SI F abs_(F v) { return _mm_and_ps(v, 0-v); }
+#if defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
+ SI I32 abs_(I32 v) { return _mm_abs_epi32(v); }
+#else
+ SI I32 abs_(I32 v) { return max(v, -v); }
+#endif
+ SI F rcp_fast(F v) { return _mm_rcp_ps (v); }
+ SI F rcp_precise (F v) { F e = rcp_fast(v); return e * (2.0f - v * e); }
+ SI F rsqrt (F v) { return _mm_rsqrt_ps(v); }
+ SI F sqrt_(F v) { return _mm_sqrt_ps (v); }
+
+ SI U32 round(F v, F scale) { return _mm_cvtps_epi32(v*scale); }
+
+ SI U16 pack(U32 v) {
+ #if defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
+ auto p = _mm_packus_epi32(v,v);
+ #else
+ // Sign extend so that _mm_packs_epi32() does the pack we want.
+ auto p = _mm_srai_epi32(_mm_slli_epi32(v, 16), 16);
+ p = _mm_packs_epi32(p,p);
+ #endif
+ return sk_unaligned_load<U16>(&p); // We have two copies. Return (the lower) one.
+ }
+ SI U8 pack(U16 v) {
+ auto r = widen_cast<__m128i>(v);
+ r = _mm_packus_epi16(r,r);
+ return sk_unaligned_load<U8>(&r);
+ }
+
+ // NOTE: This only checks the top bit of each lane, and is incorrect with non-mask values.
+ SI bool any(I32 c) { return _mm_movemask_ps(_mm_castsi128_ps(c)) != 0b0000; }
+ SI bool all(I32 c) { return _mm_movemask_ps(_mm_castsi128_ps(c)) == 0b1111; }
+
+ SI F floor_(F v) {
+ #if defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
+ return _mm_floor_ps(v);
+ #else
+ F roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(v));
+ return roundtrip - if_then_else(roundtrip > v, 1, 0);
+ #endif
+ }
+
+ SI F ceil_(F v) {
+ #if defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
+ return _mm_ceil_ps(v);
+ #else
+ F roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(v));
+ return roundtrip + if_then_else(roundtrip < v, 1, 0);
+ #endif
+ }
+
+ template <typename T>
+ SI V<T> gather(const T* p, U32 ix) {
+ return {p[ix[0]], p[ix[1]], p[ix[2]], p[ix[3]]};
+ }
+ template <typename V, typename S>
+ SI void scatter_masked(V src, S* dst, U32 ix, I32 mask) {
+ V before = gather(dst, ix);
+ V after = if_then_else(mask, src, before);
+ dst[ix[0]] = after[0];
+ dst[ix[1]] = after[1];
+ dst[ix[2]] = after[2];
+ dst[ix[3]] = after[3];
+ }
+ SI void load2(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
+ __m128i _01;
+ if (SK_EXPECT(tail,0)) {
+ _01 = _mm_setzero_si128();
+ if (tail > 1) {
+ _01 = _mm_castpd_si128(_mm_loadl_pd(_mm_castsi128_pd(_01), (const double*)ptr)); // r0 g0 r1 g1 00 00 00 00
+ if (tail > 2) {
+ _01 = _mm_insert_epi16(_01, *(ptr+4), 4); // r0 g0 r1 g1 r2 00 00 00
+ _01 = _mm_insert_epi16(_01, *(ptr+5), 5); // r0 g0 r1 g1 r2 g2 00 00
+ }
+ } else {
+ _01 = _mm_cvtsi32_si128(*(const uint32_t*)ptr); // r0 g0 00 00 00 00 00 00
+ }
+ } else {
+ _01 = _mm_loadu_si128(((__m128i*)ptr) + 0); // r0 g0 r1 g1 r2 g2 r3 g3
+ }
+ auto rg01_23 = _mm_shufflelo_epi16(_01, 0xD8); // r0 r1 g0 g1 r2 g2 r3 g3
+ auto rg = _mm_shufflehi_epi16(rg01_23, 0xD8); // r0 r1 g0 g1 r2 r3 g2 g3
+
+ auto R = _mm_shuffle_epi32(rg, 0x88); // r0 r1 r2 r3 r0 r1 r2 r3
+ auto G = _mm_shuffle_epi32(rg, 0xDD); // g0 g1 g2 g3 g0 g1 g2 g3
+ *r = sk_unaligned_load<U16>(&R);
+ *g = sk_unaligned_load<U16>(&G);
+ }
+ SI void store2(uint16_t* ptr, size_t tail, U16 r, U16 g) {
+ U32 rg = _mm_unpacklo_epi16(widen_cast<__m128i>(r), widen_cast<__m128i>(g));
+ if (SK_EXPECT(tail, 0)) {
+ if (tail > 1) {
+ _mm_storel_epi64((__m128i*)ptr, rg);
+ if (tail > 2) {
+ int32_t rgpair = rg[2];
+ memcpy(ptr + 4, &rgpair, sizeof(rgpair));
+ }
+ } else {
+ int32_t rgpair = rg[0];
+ memcpy(ptr, &rgpair, sizeof(rgpair));
+ }
+ } else {
+ _mm_storeu_si128((__m128i*)ptr + 0, rg);
+ }
+ }
+
+ SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
+ __m128i _0, _1, _2, _3;
+ if (SK_EXPECT(tail,0)) {
+ _1 = _2 = _3 = _mm_setzero_si128();
+ auto load_rgb = [](const uint16_t* src) {
+ auto v = _mm_cvtsi32_si128(*(const uint32_t*)src);
+ return _mm_insert_epi16(v, src[2], 2);
+ };
+ if ( true ) { _0 = load_rgb(ptr + 0); }
+ if (tail > 1) { _1 = load_rgb(ptr + 3); }
+ if (tail > 2) { _2 = load_rgb(ptr + 6); }
+ } else {
+ // Load slightly weirdly to make sure we don't load past the end of 4x48 bits.
+ auto _01 = _mm_loadu_si128((const __m128i*)(ptr + 0)) ,
+ _23 = _mm_srli_si128(_mm_loadu_si128((const __m128i*)(ptr + 4)), 4);
+
+ // Each _N holds R,G,B for pixel N in its lower 3 lanes (upper 5 are ignored).
+ _0 = _01;
+ _1 = _mm_srli_si128(_01, 6);
+ _2 = _23;
+ _3 = _mm_srli_si128(_23, 6);
+ }
+
+ // De-interlace to R,G,B.
+ auto _02 = _mm_unpacklo_epi16(_0, _2), // r0 r2 g0 g2 b0 b2 xx xx
+ _13 = _mm_unpacklo_epi16(_1, _3); // r1 r3 g1 g3 b1 b3 xx xx
+
+ auto R = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
+ G = _mm_srli_si128(R, 8),
+ B = _mm_unpackhi_epi16(_02, _13); // b0 b1 b2 b3 xx xx xx xx
+
+ *r = sk_unaligned_load<U16>(&R);
+ *g = sk_unaligned_load<U16>(&G);
+ *b = sk_unaligned_load<U16>(&B);
+ }
+
+ SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
+ __m128i _01, _23;
+ if (SK_EXPECT(tail,0)) {
+ _01 = _23 = _mm_setzero_si128();
+ auto src = (const double*)ptr;
+ if ( true ) { _01 = _mm_castpd_si128(_mm_loadl_pd(_mm_castsi128_pd(_01), src + 0)); } // r0 g0 b0 a0 00 00 00 00
+ if (tail > 1) { _01 = _mm_castpd_si128(_mm_loadh_pd(_mm_castsi128_pd(_01), src + 1)); } // r0 g0 b0 a0 r1 g1 b1 a1
+ if (tail > 2) { _23 = _mm_castpd_si128(_mm_loadl_pd(_mm_castsi128_pd(_23), src + 2)); } // r2 g2 b2 a2 00 00 00 00
+ } else {
+ _01 = _mm_loadu_si128(((__m128i*)ptr) + 0); // r0 g0 b0 a0 r1 g1 b1 a1
+ _23 = _mm_loadu_si128(((__m128i*)ptr) + 1); // r2 g2 b2 a2 r3 g3 b3 a3
+ }
+
+ auto _02 = _mm_unpacklo_epi16(_01, _23), // r0 r2 g0 g2 b0 b2 a0 a2
+ _13 = _mm_unpackhi_epi16(_01, _23); // r1 r3 g1 g3 b1 b3 a1 a3
+
+ auto rg = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
+ ba = _mm_unpackhi_epi16(_02, _13); // b0 b1 b2 b3 a0 a1 a2 a3
+
+ *r = sk_unaligned_load<U16>((uint16_t*)&rg + 0);
+ *g = sk_unaligned_load<U16>((uint16_t*)&rg + 4);
+ *b = sk_unaligned_load<U16>((uint16_t*)&ba + 0);
+ *a = sk_unaligned_load<U16>((uint16_t*)&ba + 4);
+ }
+
+ SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
+ auto rg = _mm_unpacklo_epi16(widen_cast<__m128i>(r), widen_cast<__m128i>(g)),
+ ba = _mm_unpacklo_epi16(widen_cast<__m128i>(b), widen_cast<__m128i>(a));
+
+ if (SK_EXPECT(tail, 0)) {
+ auto dst = (double*)ptr;
+ if ( true ) { _mm_storel_pd(dst + 0, _mm_castsi128_pd(_mm_unpacklo_epi32(rg, ba))); }
+ if (tail > 1) { _mm_storeh_pd(dst + 1, _mm_castsi128_pd(_mm_unpacklo_epi32(rg, ba))); }
+ if (tail > 2) { _mm_storel_pd(dst + 2, _mm_castsi128_pd(_mm_unpackhi_epi32(rg, ba))); }
+ } else {
+ _mm_storeu_si128((__m128i*)ptr + 0, _mm_unpacklo_epi32(rg, ba));
+ _mm_storeu_si128((__m128i*)ptr + 1, _mm_unpackhi_epi32(rg, ba));
+ }
+ }
+
+ SI void load2(const float* ptr, size_t tail, F* r, F* g) {
+ F _01, _23;
+ if (SK_EXPECT(tail, 0)) {
+ _01 = _23 = _mm_setzero_ps();
+ if ( true ) { _01 = _mm_loadl_pi(_01, (__m64 const*)(ptr + 0)); }
+ if (tail > 1) { _01 = _mm_loadh_pi(_01, (__m64 const*)(ptr + 2)); }
+ if (tail > 2) { _23 = _mm_loadl_pi(_23, (__m64 const*)(ptr + 4)); }
+ } else {
+ _01 = _mm_loadu_ps(ptr + 0);
+ _23 = _mm_loadu_ps(ptr + 4);
+ }
+ *r = _mm_shuffle_ps(_01, _23, 0x88);
+ *g = _mm_shuffle_ps(_01, _23, 0xDD);
+ }
+ SI void store2(float* ptr, size_t tail, F r, F g) {
+ F _01 = _mm_unpacklo_ps(r, g),
+ _23 = _mm_unpackhi_ps(r, g);
+ if (SK_EXPECT(tail, 0)) {
+ if ( true ) { _mm_storel_pi((__m64*)(ptr + 0), _01); }
+ if (tail > 1) { _mm_storeh_pi((__m64*)(ptr + 2), _01); }
+ if (tail > 2) { _mm_storel_pi((__m64*)(ptr + 4), _23); }
+ } else {
+ _mm_storeu_ps(ptr + 0, _01);
+ _mm_storeu_ps(ptr + 4, _23);
+ }
+ }
+
+ SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
+ F _0, _1, _2, _3;
+ if (SK_EXPECT(tail, 0)) {
+ _1 = _2 = _3 = _mm_setzero_ps();
+ if ( true ) { _0 = _mm_loadu_ps(ptr + 0); }
+ if (tail > 1) { _1 = _mm_loadu_ps(ptr + 4); }
+ if (tail > 2) { _2 = _mm_loadu_ps(ptr + 8); }
+ } else {
+ _0 = _mm_loadu_ps(ptr + 0);
+ _1 = _mm_loadu_ps(ptr + 4);
+ _2 = _mm_loadu_ps(ptr + 8);
+ _3 = _mm_loadu_ps(ptr +12);
+ }
+ _MM_TRANSPOSE4_PS(_0,_1,_2,_3);
+ *r = _0;
+ *g = _1;
+ *b = _2;
+ *a = _3;
+ }
+
+ SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
+ _MM_TRANSPOSE4_PS(r,g,b,a);
+ if (SK_EXPECT(tail, 0)) {
+ if ( true ) { _mm_storeu_ps(ptr + 0, r); }
+ if (tail > 1) { _mm_storeu_ps(ptr + 4, g); }
+ if (tail > 2) { _mm_storeu_ps(ptr + 8, b); }
+ } else {
+ _mm_storeu_ps(ptr + 0, r);
+ _mm_storeu_ps(ptr + 4, g);
+ _mm_storeu_ps(ptr + 8, b);
+ _mm_storeu_ps(ptr +12, a);
+ }
+ }
+#endif
+
+// We need to be a careful with casts.
+// (F)x means cast x to float in the portable path, but bit_cast x to float in the others.
+// These named casts and bit_cast() are always what they seem to be.
+#if defined(JUMPER_IS_SCALAR)
+ SI F cast (U32 v) { return (F)v; }
+ SI F cast64(U64 v) { return (F)v; }
+ SI U32 trunc_(F v) { return (U32)v; }
+ SI U32 expand(U16 v) { return (U32)v; }
+ SI U32 expand(U8 v) { return (U32)v; }
+#else
+ SI F cast (U32 v) { return SK_CONVERTVECTOR(sk_bit_cast<I32>(v), F); }
+ SI F cast64(U64 v) { return SK_CONVERTVECTOR( v, F); }
+ SI U32 trunc_(F v) { return sk_bit_cast<U32>(SK_CONVERTVECTOR(v, I32)); }
+ SI U32 expand(U16 v) { return SK_CONVERTVECTOR( v, U32); }
+ SI U32 expand(U8 v) { return SK_CONVERTVECTOR( v, U32); }
+#endif
+
+SI U32 if_then_else(I32 c, U32 t, U32 e) {
+ return sk_bit_cast<U32>(if_then_else(c, sk_bit_cast<F>(t), sk_bit_cast<F>(e)));
+}
+
+SI I32 if_then_else(I32 c, I32 t, I32 e) {
+ return sk_bit_cast<I32>(if_then_else(c, sk_bit_cast<F>(t), sk_bit_cast<F>(e)));
+}
+
+SI U16 bswap(U16 x) {
+#if defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41)
+ // Somewhat inexplicably Clang decides to do (x<<8) | (x>>8) in 32-bit lanes
+ // when generating code for SSE2 and SSE4.1. We'll do it manually...
+ auto v = widen_cast<__m128i>(x);
+ v = _mm_slli_epi16(v,8) | _mm_srli_epi16(v,8);
+ return sk_unaligned_load<U16>(&v);
+#else
+ return (x<<8) | (x>>8);
+#endif
+}
+
+SI F fract(F v) { return v - floor_(v); }
+
+// See http://www.machinedlearnings.com/2011/06/fast-approximate-logarithm-exponential.html
+SI F approx_log2(F x) {
+ // e - 127 is a fair approximation of log2(x) in its own right...
+ F e = cast(sk_bit_cast<U32>(x)) * (1.0f / (1<<23));
+
+ // ... but using the mantissa to refine its error is _much_ better.
+ F m = sk_bit_cast<F>((sk_bit_cast<U32>(x) & 0x007fffff) | 0x3f000000);
+ return e
+ - 124.225514990f
+ - 1.498030302f * m
+ - 1.725879990f / (0.3520887068f + m);
+}
+
+SI F approx_log(F x) {
+ const float ln2 = 0.69314718f;
+ return ln2 * approx_log2(x);
+}
+
+SI F approx_pow2(F x) {
+ F f = fract(x);
+ return sk_bit_cast<F>(round(1.0f * (1<<23),
+ x + 121.274057500f
+ - 1.490129070f * f
+ + 27.728023300f / (4.84252568f - f)));
+}
+
+SI F approx_exp(F x) {
+ const float log2_e = 1.4426950408889634074f;
+ return approx_pow2(log2_e * x);
+}
+
+SI F approx_powf(F x, F y) {
+ return if_then_else((x == 0)|(x == 1), x
+ , approx_pow2(approx_log2(x) * y));
+}
+
+SI F from_half(U16 h) {
+#if defined(JUMPER_IS_NEON) && defined(SK_CPU_ARM64) \
+ && !defined(SK_BUILD_FOR_GOOGLE3) // Temporary workaround for some Google3 builds.
+ return vcvt_f32_f16(sk_bit_cast<float16x4_t>(h));
+
+#elif defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
+ return _mm256_cvtph_ps(h);
+
+#else
+ // Remember, a half is 1-5-10 (sign-exponent-mantissa) with 15 exponent bias.
+ U32 sem = expand(h),
+ s = sem & 0x8000,
+ em = sem ^ s;
+
+ // Convert to 1-8-23 float with 127 bias, flushing denorm halfs (including zero) to zero.
+ auto denorm = sk_bit_cast<I32>(em) < 0x0400; // I32 comparison is often quicker, and always safe here.
+ return if_then_else(denorm, F(0)
+ , sk_bit_cast<F>( (s<<16) + (em<<13) + ((127-15)<<23) ));
+#endif
+}
+
+SI U16 to_half(F f) {
+#if defined(JUMPER_IS_NEON) && defined(SK_CPU_ARM64) \
+ && !defined(SK_BUILD_FOR_GOOGLE3) // Temporary workaround for some Google3 builds.
+ return sk_bit_cast<U16>(vcvt_f16_f32(f));
+
+#elif defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
+ return _mm256_cvtps_ph(f, _MM_FROUND_CUR_DIRECTION);
+
+#else
+ // Remember, a float is 1-8-23 (sign-exponent-mantissa) with 127 exponent bias.
+ U32 sem = sk_bit_cast<U32>(f),
+ s = sem & 0x80000000,
+ em = sem ^ s;
+
+ // Convert to 1-5-10 half with 15 bias, flushing denorm halfs (including zero) to zero.
+ auto denorm = sk_bit_cast<I32>(em) < 0x38800000; // I32 comparison is often quicker, and always safe here.
+ return pack(if_then_else(denorm, U32(0)
+ , (s>>16) + (em>>13) - ((127-15)<<10)));
+#endif
+}
+
+// Our fundamental vector depth is our pixel stride.
+static constexpr size_t N = sizeof(F) / sizeof(float);
+
+// We're finally going to get to what a Stage function looks like!
+// tail == 0 ~~> work on a full N pixels
+// tail != 0 ~~> work on only the first tail pixels
+// tail is always < N.
+
+// Any custom ABI to use for all (non-externally-facing) stage functions?
+// Also decide here whether to use narrow (compromise) or wide (ideal) stages.
+#if defined(SK_CPU_ARM32) && defined(JUMPER_IS_NEON)
+ // This lets us pass vectors more efficiently on 32-bit ARM.
+ // We can still only pass 16 floats, so best as 4x {r,g,b,a}.
+ #define ABI __attribute__((pcs("aapcs-vfp")))
+ #define JUMPER_NARROW_STAGES 1
+#elif defined(_MSC_VER)
+ // Even if not vectorized, this lets us pass {r,g,b,a} as registers,
+ // instead of {b,a} on the stack. Narrow stages work best for __vectorcall.
+ #define ABI __vectorcall
+ #define JUMPER_NARROW_STAGES 1
+#elif defined(__x86_64__) || defined(SK_CPU_ARM64)
+ // These platforms are ideal for wider stages, and their default ABI is ideal.
+ #define ABI
+ #define JUMPER_NARROW_STAGES 0
+#else
+ // 32-bit or unknown... shunt them down the narrow path.
+ // Odds are these have few registers and are better off there.
+ #define ABI
+ #define JUMPER_NARROW_STAGES 1
+#endif
+
+#if JUMPER_NARROW_STAGES
+ struct Params {
+ size_t dx, dy, tail;
+ F dr,dg,db,da;
+ };
+ using Stage = void(ABI*)(Params*, SkRasterPipelineStage* program, F r, F g, F b, F a);
+#else
+ using Stage = void(ABI*)(size_t tail, SkRasterPipelineStage* program, size_t dx, size_t dy,
+ F,F,F,F, F,F,F,F);
+#endif
+
+static void start_pipeline(size_t dx, size_t dy,
+ size_t xlimit, size_t ylimit,
+ SkRasterPipelineStage* program) {
+ auto start = (Stage)program->fn;
+ const size_t x0 = dx;
+ for (; dy < ylimit; dy++) {
+ #if JUMPER_NARROW_STAGES
+ Params params = { x0,dy,0, 0,0,0,0 };
+ while (params.dx + N <= xlimit) {
+ start(&params,program, 0,0,0,0);
+ params.dx += N;
+ }
+ if (size_t tail = xlimit - params.dx) {
+ params.tail = tail;
+ start(&params,program, 0,0,0,0);
+ }
+ #else
+ dx = x0;
+ while (dx + N <= xlimit) {
+ start(0,program,dx,dy, 0,0,0,0, 0,0,0,0);
+ dx += N;
+ }
+ if (size_t tail = xlimit - dx) {
+ start(tail,program,dx,dy, 0,0,0,0, 0,0,0,0);
+ }
+ #endif
+ }
+}
+
+#if SK_HAS_MUSTTAIL
+ #define JUMPER_MUSTTAIL [[clang::musttail]]
+#else
+ #define JUMPER_MUSTTAIL
+#endif
+
+#if JUMPER_NARROW_STAGES
+ #define DECLARE_STAGE(name, ARG, STAGE_RET, INC, OFFSET, MUSTTAIL) \
+ SI STAGE_RET name##_k(ARG, size_t dx, size_t dy, size_t tail, \
+ F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da); \
+ static void ABI name(Params* params, SkRasterPipelineStage* program, \
+ F r, F g, F b, F a) { \
+ OFFSET name##_k(Ctx{program},params->dx,params->dy,params->tail, r,g,b,a,\
+ params->dr, params->dg, params->db, params->da); \
+ INC; \
+ auto fn = (Stage)program->fn; \
+ MUSTTAIL return fn(params, program, r,g,b,a); \
+ } \
+ SI STAGE_RET name##_k(ARG, size_t dx, size_t dy, size_t tail, \
+ F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da)
+#else
+ #define DECLARE_STAGE(name, ARG, STAGE_RET, INC, OFFSET, MUSTTAIL) \
+ SI STAGE_RET name##_k(ARG, size_t dx, size_t dy, size_t tail, \
+ F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da); \
+ static void ABI name(size_t tail, SkRasterPipelineStage* program, size_t dx, size_t dy, \
+ F r, F g, F b, F a, F dr, F dg, F db, F da) { \
+ OFFSET name##_k(Ctx{program},dx,dy,tail, r,g,b,a, dr,dg,db,da); \
+ INC; \
+ auto fn = (Stage)program->fn; \
+ MUSTTAIL return fn(tail, program, dx,dy, r,g,b,a, dr,dg,db,da); \
+ } \
+ SI STAGE_RET name##_k(ARG, size_t dx, size_t dy, size_t tail, \
+ F& r, F& g, F& b, F& a, F& dr, F& dg, F& db, F& da)
+#endif
+
+// A typical stage returns void, always increments the program counter by 1, and lets the optimizer
+// decide whether or not tail-calling is appropriate.
+#define STAGE(name, arg) \
+ DECLARE_STAGE(name, arg, void, ++program, /*no offset*/, /*no musttail*/)
+
+// A tail stage returns void, always increments the program counter by 1, and uses tail-calling.
+// Tail-calling is necessary in SkSL-generated programs, which can be thousands of ops long, and
+// could overflow the stack (particularly in debug).
+#define STAGE_TAIL(name, arg) \
+ DECLARE_STAGE(name, arg, void, ++program, /*no offset*/, JUMPER_MUSTTAIL)
+
+// A branch stage returns an integer, which is added directly to the program counter, and tailcalls.
+#define STAGE_BRANCH(name, arg) \
+ DECLARE_STAGE(name, arg, int, /*no increment*/, program +=, JUMPER_MUSTTAIL)
+
+// just_return() is a simple no-op stage that only exists to end the chain,
+// returning back up to start_pipeline(), and from there to the caller.
+#if JUMPER_NARROW_STAGES
+ static void ABI just_return(Params*, SkRasterPipelineStage*, F,F,F,F) {}
+#else
+ static void ABI just_return(size_t, SkRasterPipelineStage*, size_t,size_t, F,F,F,F, F,F,F,F) {}
+#endif
+
+// Note that in release builds, most stages consume no stack (thanks to tail call optimization).
+// However: certain builds (especially with non-clang compilers) may fail to optimize tail
+// calls, resulting in actual stack frames being generated.
+//
+// stack_checkpoint() and stack_rewind() are special stages that can be used to manage stack growth.
+// If a pipeline contains a stack_checkpoint, followed by any number of stack_rewind (at any point),
+// the C++ stack will be reset to the state it was at when the stack_checkpoint was initially hit.
+//
+// All instances of stack_rewind (as well as the one instance of stack_checkpoint near the start of
+// a pipeline) share a single context (of type SkRasterPipeline_RewindCtx). That context holds the
+// full state of the mutable registers that are normally passed to the next stage in the program.
+//
+// stack_rewind is the only stage other than just_return that actually returns (rather than jumping
+// to the next stage in the program). Before it does so, it stashes all of the registers in the
+// context. This includes the updated `program` pointer. Unlike stages that tail call exactly once,
+// stack_checkpoint calls the next stage in the program repeatedly, as long as the `program` in the
+// context is overwritten (i.e., as long as a stack_rewind was the reason the pipeline returned,
+// rather than a just_return).
+//
+// Normally, just_return is the only stage that returns, and no other stage does anything after a
+// subsequent (called) stage returns, so the stack just unwinds all the way to start_pipeline.
+// With stack_checkpoint on the stack, any stack_rewind stages will return all the way up to the
+// stack_checkpoint. That grabs the values that would have been passed to the next stage (from the
+// context), and continues the linear execution of stages, but has reclaimed all of the stack frames
+// pushed before the stack_rewind before doing so.
+#if JUMPER_NARROW_STAGES
+ static void ABI stack_checkpoint(Params* params, SkRasterPipelineStage* program,
+ F r, F g, F b, F a) {
+ SkRasterPipeline_RewindCtx* ctx = Ctx{program};
+ while (program) {
+ auto next = (Stage)(++program)->fn;
+
+ ctx->stage = nullptr;
+ next(params, program, r, g, b, a);
+ program = ctx->stage;
+
+ if (program) {
+ r = sk_unaligned_load<F>(ctx->r );
+ g = sk_unaligned_load<F>(ctx->g );
+ b = sk_unaligned_load<F>(ctx->b );
+ a = sk_unaligned_load<F>(ctx->a );
+ params->dr = sk_unaligned_load<F>(ctx->dr);
+ params->dg = sk_unaligned_load<F>(ctx->dg);
+ params->db = sk_unaligned_load<F>(ctx->db);
+ params->da = sk_unaligned_load<F>(ctx->da);
+ }
+ }
+ }
+ static void ABI stack_rewind(Params* params, SkRasterPipelineStage* program,
+ F r, F g, F b, F a) {
+ SkRasterPipeline_RewindCtx* ctx = Ctx{program};
+ sk_unaligned_store(ctx->r , r );
+ sk_unaligned_store(ctx->g , g );
+ sk_unaligned_store(ctx->b , b );
+ sk_unaligned_store(ctx->a , a );
+ sk_unaligned_store(ctx->dr, params->dr);
+ sk_unaligned_store(ctx->dg, params->dg);
+ sk_unaligned_store(ctx->db, params->db);
+ sk_unaligned_store(ctx->da, params->da);
+ ctx->stage = program;
+ }
+#else
+ static void ABI stack_checkpoint(size_t tail, SkRasterPipelineStage* program,
+ size_t dx, size_t dy,
+ F r, F g, F b, F a, F dr, F dg, F db, F da) {
+ SkRasterPipeline_RewindCtx* ctx = Ctx{program};
+ while (program) {
+ auto next = (Stage)(++program)->fn;
+
+ ctx->stage = nullptr;
+ next(tail, program, dx, dy, r, g, b, a, dr, dg, db, da);
+ program = ctx->stage;
+
+ if (program) {
+ r = sk_unaligned_load<F>(ctx->r );
+ g = sk_unaligned_load<F>(ctx->g );
+ b = sk_unaligned_load<F>(ctx->b );
+ a = sk_unaligned_load<F>(ctx->a );
+ dr = sk_unaligned_load<F>(ctx->dr);
+ dg = sk_unaligned_load<F>(ctx->dg);
+ db = sk_unaligned_load<F>(ctx->db);
+ da = sk_unaligned_load<F>(ctx->da);
+ }
+ }
+ }
+ static void ABI stack_rewind(size_t tail, SkRasterPipelineStage* program,
+ size_t dx, size_t dy,
+ F r, F g, F b, F a, F dr, F dg, F db, F da) {
+ SkRasterPipeline_RewindCtx* ctx = Ctx{program};
+ sk_unaligned_store(ctx->r , r );
+ sk_unaligned_store(ctx->g , g );
+ sk_unaligned_store(ctx->b , b );
+ sk_unaligned_store(ctx->a , a );
+ sk_unaligned_store(ctx->dr, dr);
+ sk_unaligned_store(ctx->dg, dg);
+ sk_unaligned_store(ctx->db, db);
+ sk_unaligned_store(ctx->da, da);
+ ctx->stage = program;
+ }
+#endif
+
+
+// We could start defining normal Stages now. But first, some helper functions.
+
+// These load() and store() methods are tail-aware,
+// but focus mainly on keeping the at-stride tail==0 case fast.
+
+template <typename V, typename T>
+SI V load(const T* src, size_t tail) {
+#if !defined(JUMPER_IS_SCALAR)
+ SK_ASSUME(tail < N);
+ if (SK_EXPECT(tail, 0)) {
+ V v{}; // Any inactive lanes are zeroed.
+ switch (tail) {
+ case 7: v[6] = src[6]; [[fallthrough]];
+ case 6: v[5] = src[5]; [[fallthrough]];
+ case 5: v[4] = src[4]; [[fallthrough]];
+ case 4: memcpy(&v, src, 4*sizeof(T)); break;
+ case 3: v[2] = src[2]; [[fallthrough]];
+ case 2: memcpy(&v, src, 2*sizeof(T)); break;
+ case 1: memcpy(&v, src, 1*sizeof(T)); break;
+ }
+ return v;
+ }
+#endif
+ return sk_unaligned_load<V>(src);
+}
+
+template <typename V, typename T>
+SI void store(T* dst, V v, size_t tail) {
+#if !defined(JUMPER_IS_SCALAR)
+ SK_ASSUME(tail < N);
+ if (SK_EXPECT(tail, 0)) {
+ switch (tail) {
+ case 7: dst[6] = v[6]; [[fallthrough]];
+ case 6: dst[5] = v[5]; [[fallthrough]];
+ case 5: dst[4] = v[4]; [[fallthrough]];
+ case 4: memcpy(dst, &v, 4*sizeof(T)); break;
+ case 3: dst[2] = v[2]; [[fallthrough]];
+ case 2: memcpy(dst, &v, 2*sizeof(T)); break;
+ case 1: memcpy(dst, &v, 1*sizeof(T)); break;
+ }
+ return;
+ }
+#endif
+ sk_unaligned_store(dst, v);
+}
+
+SI F from_byte(U8 b) {
+ return cast(expand(b)) * (1/255.0f);
+}
+SI F from_short(U16 s) {
+ return cast(expand(s)) * (1/65535.0f);
+}
+SI void from_565(U16 _565, F* r, F* g, F* b) {
+ U32 wide = expand(_565);
+ *r = cast(wide & (31<<11)) * (1.0f / (31<<11));
+ *g = cast(wide & (63<< 5)) * (1.0f / (63<< 5));
+ *b = cast(wide & (31<< 0)) * (1.0f / (31<< 0));
+}
+SI void from_4444(U16 _4444, F* r, F* g, F* b, F* a) {
+ U32 wide = expand(_4444);
+ *r = cast(wide & (15<<12)) * (1.0f / (15<<12));
+ *g = cast(wide & (15<< 8)) * (1.0f / (15<< 8));
+ *b = cast(wide & (15<< 4)) * (1.0f / (15<< 4));
+ *a = cast(wide & (15<< 0)) * (1.0f / (15<< 0));
+}
+SI void from_8888(U32 _8888, F* r, F* g, F* b, F* a) {
+ *r = cast((_8888 ) & 0xff) * (1/255.0f);
+ *g = cast((_8888 >> 8) & 0xff) * (1/255.0f);
+ *b = cast((_8888 >> 16) & 0xff) * (1/255.0f);
+ *a = cast((_8888 >> 24) ) * (1/255.0f);
+}
+SI void from_88(U16 _88, F* r, F* g) {
+ U32 wide = expand(_88);
+ *r = cast((wide ) & 0xff) * (1/255.0f);
+ *g = cast((wide >> 8) & 0xff) * (1/255.0f);
+}
+SI void from_1010102(U32 rgba, F* r, F* g, F* b, F* a) {
+ *r = cast((rgba ) & 0x3ff) * (1/1023.0f);
+ *g = cast((rgba >> 10) & 0x3ff) * (1/1023.0f);
+ *b = cast((rgba >> 20) & 0x3ff) * (1/1023.0f);
+ *a = cast((rgba >> 30) ) * (1/ 3.0f);
+}
+SI void from_1010102_xr(U32 rgba, F* r, F* g, F* b, F* a) {
+ static constexpr float min = -0.752941f;
+ static constexpr float max = 1.25098f;
+ static constexpr float range = max - min;
+ *r = cast((rgba ) & 0x3ff) * (1/1023.0f) * range + min;
+ *g = cast((rgba >> 10) & 0x3ff) * (1/1023.0f) * range + min;
+ *b = cast((rgba >> 20) & 0x3ff) * (1/1023.0f) * range + min;
+ *a = cast((rgba >> 30) ) * (1/ 3.0f);
+}
+SI void from_1616(U32 _1616, F* r, F* g) {
+ *r = cast((_1616 ) & 0xffff) * (1/65535.0f);
+ *g = cast((_1616 >> 16) & 0xffff) * (1/65535.0f);
+}
+SI void from_16161616(U64 _16161616, F* r, F* g, F* b, F* a) {
+ *r = cast64((_16161616 ) & 0xffff) * (1/65535.0f);
+ *g = cast64((_16161616 >> 16) & 0xffff) * (1/65535.0f);
+ *b = cast64((_16161616 >> 32) & 0xffff) * (1/65535.0f);
+ *a = cast64((_16161616 >> 48) & 0xffff) * (1/65535.0f);
+}
+
+// Used by load_ and store_ stages to get to the right (dx,dy) starting point of contiguous memory.
+template <typename T>
+SI T* ptr_at_xy(const SkRasterPipeline_MemoryCtx* ctx, size_t dx, size_t dy) {
+ return (T*)ctx->pixels + dy*ctx->stride + dx;
+}
+
+// clamp v to [0,limit).
+SI F clamp(F v, F limit) {
+ F inclusive = sk_bit_cast<F>( sk_bit_cast<U32>(limit) - 1 ); // Exclusive -> inclusive.
+ return min(max(0.0f, v), inclusive);
+}
+
+// clamp to (0,limit).
+SI F clamp_ex(F v, F limit) {
+ const F inclusiveZ = std::numeric_limits<float>::min(),
+ inclusiveL = sk_bit_cast<F>( sk_bit_cast<U32>(limit) - 1 );
+ return min(max(inclusiveZ, v), inclusiveL);
+}
+
+// Bhaskara I's sine approximation
+// 16x(pi - x) / (5*pi^2 - 4x(pi - x)
+// ... divide by 4
+// 4x(pi - x) / 5*pi^2/4 - x(pi - x)
+//
+// This is a good approximation only for 0 <= x <= pi, so we use symmetries to get
+// radians into that range first.
+SI F sin_(F v) {
+ constexpr float Pi = SK_ScalarPI;
+ F x = fract(v * (0.5f/Pi)) * (2*Pi);
+ I32 neg = x > Pi;
+ x = if_then_else(neg, x - Pi, x);
+
+ F pair = x * (Pi - x);
+ x = 4.0f * pair / ((5*Pi*Pi/4) - pair);
+ x = if_then_else(neg, -x, x);
+ return x;
+}
+
+SI F cos_(F v) {
+ return sin_(v + (SK_ScalarPI/2));
+}
+
+/* "GENERATING ACCURATE VALUES FOR THE TANGENT FUNCTION"
+ https://mae.ufl.edu/~uhk/ACCURATE-TANGENT.pdf
+
+ approx = x + (1/3)x^3 + (2/15)x^5 + (17/315)x^7 + (62/2835)x^9
+
+ Some simplifications:
+ 1. tan(x) is periodic, -PI/2 < x < PI/2
+ 2. tan(x) is odd, so tan(-x) = -tan(x)
+ 3. Our polynomial approximation is best near zero, so we use the following identity
+ tan(x) + tan(y)
+ tan(x + y) = -----------------
+ 1 - tan(x)*tan(y)
+ tan(PI/4) = 1
+
+ So for x > PI/8, we do the following refactor:
+ x' = x - PI/4
+
+ 1 + tan(x')
+ tan(x) = ------------
+ 1 - tan(x')
+ */
+SI F tan_(F x) {
+ constexpr float Pi = SK_ScalarPI;
+ // periodic between -pi/2 ... pi/2
+ // shift to 0...Pi, scale 1/Pi to get into 0...1, then fract, scale-up, shift-back
+ x = fract((1/Pi)*x + 0.5f) * Pi - (Pi/2);
+
+ I32 neg = (x < 0.0f);
+ x = if_then_else(neg, -x, x);
+
+ // minimize total error by shifting if x > pi/8
+ I32 use_quotient = (x > (Pi/8));
+ x = if_then_else(use_quotient, x - (Pi/4), x);
+
+ // 9th order poly = 4th order(x^2) * x
+ const float c4 = 62 / 2835.0f;
+ const float c3 = 17 / 315.0f;
+ const float c2 = 2 / 15.0f;
+ const float c1 = 1 / 3.0f;
+ const float c0 = 1.0f;
+ F x2 = x * x;
+ x *= mad(x2, mad(x2, mad(x2, mad(x2, c4, c3), c2), c1), c0);
+ x = if_then_else(use_quotient, (1+x)/(1-x), x);
+ x = if_then_else(neg, -x, x);
+ return x;
+}
+
+/* Use 4th order polynomial approximation from https://arachnoid.com/polysolve/
+ with 129 values of x,atan(x) for x:[0...1]
+ This only works for 0 <= x <= 1
+ */
+SI F approx_atan_unit(F x) {
+ // y = 0.14130025741326729 x⁴
+ // - 0.34312835980675116 x³
+ // - 0.016172900528248768 x²
+ // + 1.00376969762003850 x
+ // - 0.00014758242182738969
+ const float c4 = 0.14130025741326729f;
+ const float c3 = -0.34312835980675116f;
+ const float c2 = -0.016172900528248768f;
+ const float c1 = 1.0037696976200385f;
+ const float c0 = -0.00014758242182738969f;
+ return mad(x, mad(x, mad(x, mad(x, c4, c3), c2), c1), c0);
+}
+
+// Use identity atan(x) = pi/2 - atan(1/x) for x > 1
+SI F atan_(F x) {
+ I32 neg = (x < 0.0f);
+ x = if_then_else(neg, -x, x);
+ I32 flip = (x > 1.0f);
+ x = if_then_else(flip, 1/x, x);
+ x = approx_atan_unit(x);
+ x = if_then_else(flip, SK_ScalarPI/2 - x, x);
+ x = if_then_else(neg, -x, x);
+ return x;
+}
+
+// Handbook of Mathematical Functions, by Milton Abramowitz and Irene Stegun:
+// https://books.google.com/books/content?id=ZboM5tOFWtsC&pg=PA81&img=1&zoom=3&hl=en&bul=1&sig=ACfU3U2M75tG_iGVOS92eQspr14LTq02Nw&ci=0%2C15%2C999%2C1279&edge=0
+// http://screen/8YGJxUGFQ49bVX6
+SI F asin_(F x) {
+ I32 neg = (x < 0.0f);
+ x = if_then_else(neg, -x, x);
+ const float c3 = -0.0187293f;
+ const float c2 = 0.0742610f;
+ const float c1 = -0.2121144f;
+ const float c0 = 1.5707288f;
+ F poly = mad(x, mad(x, mad(x, c3, c2), c1), c0);
+ x = SK_ScalarPI/2 - sqrt_(1 - x) * poly;
+ x = if_then_else(neg, -x, x);
+ return x;
+}
+
+SI F acos_(F x) {
+ return SK_ScalarPI/2 - asin_(x);
+}
+
+/* Use identity atan(x) = pi/2 - atan(1/x) for x > 1
+ By swapping y,x to ensure the ratio is <= 1, we can safely call atan_unit()
+ which avoids a 2nd divide instruction if we had instead called atan().
+ */
+SI F atan2_(F y0, F x0) {
+ I32 flip = (abs_(y0) > abs_(x0));
+ F y = if_then_else(flip, x0, y0);
+ F x = if_then_else(flip, y0, x0);
+ F arg = y/x;
+
+ I32 neg = (arg < 0.0f);
+ arg = if_then_else(neg, -arg, arg);
+
+ F r = approx_atan_unit(arg);
+ r = if_then_else(flip, SK_ScalarPI/2 - r, r);
+ r = if_then_else(neg, -r, r);
+
+ // handle quadrant distinctions
+ r = if_then_else((y0 >= 0) & (x0 < 0), r + SK_ScalarPI, r);
+ r = if_then_else((y0 < 0) & (x0 <= 0), r - SK_ScalarPI, r);
+ // Note: we don't try to handle 0,0 or infinities
+ return r;
+}
+
+// Used by gather_ stages to calculate the base pointer and a vector of indices to load.
+template <typename T>
+SI U32 ix_and_ptr(T** ptr, const SkRasterPipeline_GatherCtx* ctx, F x, F y) {
+ // We use exclusive clamp so that our min value is > 0 because ULP subtraction using U32 would
+ // produce a NaN if applied to +0.f.
+ x = clamp_ex(x, ctx->width );
+ y = clamp_ex(y, ctx->height);
+ x = sk_bit_cast<F>(sk_bit_cast<U32>(x) - (uint32_t)ctx->roundDownAtInteger);
+ y = sk_bit_cast<F>(sk_bit_cast<U32>(y) - (uint32_t)ctx->roundDownAtInteger);
+ *ptr = (const T*)ctx->pixels;
+ return trunc_(y)*ctx->stride + trunc_(x);
+}
+
+// We often have a nominally [0,1] float value we need to scale and convert to an integer,
+// whether for a table lookup or to pack back down into bytes for storage.
+//
+// In practice, especially when dealing with interesting color spaces, that notionally
+// [0,1] float may be out of [0,1] range. Unorms cannot represent that, so we must clamp.
+//
+// You can adjust the expected input to [0,bias] by tweaking that parameter.
+SI U32 to_unorm(F v, F scale, F bias = 1.0f) {
+ // Any time we use round() we probably want to use to_unorm().
+ return round(min(max(0.0f, v), bias), scale);
+}
+
+SI I32 cond_to_mask(I32 cond) {
+#if defined(JUMPER_IS_SCALAR)
+ // In scalar mode, conditions are bools (0 or 1), but we want to store and operate on masks
+ // (eg, using bitwise operations to select values).
+ return if_then_else(cond, I32(~0), I32(0));
+#else
+ // In SIMD mode, our various instruction sets already represent conditions as masks.
+ return cond;
+#endif
+}
+
+SI I32 cond_to_mask(U32 cond) {
+ return cond_to_mask(sk_bit_cast<I32>(cond));
+}
+
+// Now finally, normal Stages!
+
+STAGE(seed_shader, NoCtx) {
+ static constexpr float iota[] = {
+ 0.5f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5f, 6.5f, 7.5f,
+ 8.5f, 9.5f,10.5f,11.5f,12.5f,13.5f,14.5f,15.5f,
+ };
+ // It's important for speed to explicitly cast(dx) and cast(dy),
+ // which has the effect of splatting them to vectors before converting to floats.
+ // On Intel this breaks a data dependency on previous loop iterations' registers.
+ r = cast(dx) + sk_unaligned_load<F>(iota);
+ g = cast(dy) + 0.5f;
+ b = 1.0f; // This is w=1 for matrix multiplies by the device coords.
+ a = 0;
+}
+
+STAGE(store_device_xy01, F* dst) {
+ // This is very similar to `seed_shader + store_src`, but b/a are backwards.
+ // (sk_FragCoord actually puts w=1 in the w slot.)
+ static constexpr float iota[] = {
+ 0.5f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5f, 6.5f, 7.5f,
+ 8.5f, 9.5f,10.5f,11.5f,12.5f,13.5f,14.5f,15.5f,
+ };
+ dst[0] = cast(dx) + sk_unaligned_load<F>(iota);
+ dst[1] = cast(dy) + 0.5f;
+ dst[2] = 0.0f;
+ dst[3] = 1.0f;
+}
+
+STAGE(dither, const float* rate) {
+ // Get [(dx,dy), (dx+1,dy), (dx+2,dy), ...] loaded up in integer vectors.
+ uint32_t iota[] = {0,1,2,3,4,5,6,7};
+ U32 X = dx + sk_unaligned_load<U32>(iota),
+ Y = dy;
+
+ // We're doing 8x8 ordered dithering, see https://en.wikipedia.org/wiki/Ordered_dithering.
+ // In this case n=8 and we're using the matrix that looks like 1/64 x [ 0 48 12 60 ... ].
+
+ // We only need X and X^Y from here on, so it's easier to just think of that as "Y".
+ Y ^= X;
+
+ // We'll mix the bottom 3 bits of each of X and Y to make 6 bits,
+ // for 2^6 == 64 == 8x8 matrix values. If X=abc and Y=def, we make fcebda.
+ U32 M = (Y & 1) << 5 | (X & 1) << 4
+ | (Y & 2) << 2 | (X & 2) << 1
+ | (Y & 4) >> 1 | (X & 4) >> 2;
+
+ // Scale that dither to [0,1), then (-0.5,+0.5), here using 63/128 = 0.4921875 as 0.5-epsilon.
+ // We want to make sure our dither is less than 0.5 in either direction to keep exact values
+ // like 0 and 1 unchanged after rounding.
+ F dither = cast(M) * (2/128.0f) - (63/128.0f);
+
+ r += *rate*dither;
+ g += *rate*dither;
+ b += *rate*dither;
+
+ r = max(0.0f, min(r, a));
+ g = max(0.0f, min(g, a));
+ b = max(0.0f, min(b, a));
+}
+
+// load 4 floats from memory, and splat them into r,g,b,a
+STAGE(uniform_color, const SkRasterPipeline_UniformColorCtx* c) {
+ r = c->r;
+ g = c->g;
+ b = c->b;
+ a = c->a;
+}
+STAGE(unbounded_uniform_color, const SkRasterPipeline_UniformColorCtx* c) {
+ r = c->r;
+ g = c->g;
+ b = c->b;
+ a = c->a;
+}
+// load 4 floats from memory, and splat them into dr,dg,db,da
+STAGE(uniform_color_dst, const SkRasterPipeline_UniformColorCtx* c) {
+ dr = c->r;
+ dg = c->g;
+ db = c->b;
+ da = c->a;
+}
+
+// splats opaque-black into r,g,b,a
+STAGE(black_color, NoCtx) {
+ r = g = b = 0.0f;
+ a = 1.0f;
+}
+
+STAGE(white_color, NoCtx) {
+ r = g = b = a = 1.0f;
+}
+
+// load registers r,g,b,a from context (mirrors store_src)
+STAGE(load_src, const float* ptr) {
+ r = sk_unaligned_load<F>(ptr + 0*N);
+ g = sk_unaligned_load<F>(ptr + 1*N);
+ b = sk_unaligned_load<F>(ptr + 2*N);
+ a = sk_unaligned_load<F>(ptr + 3*N);
+}
+
+// store registers r,g,b,a into context (mirrors load_src)
+STAGE(store_src, float* ptr) {
+ sk_unaligned_store(ptr + 0*N, r);
+ sk_unaligned_store(ptr + 1*N, g);
+ sk_unaligned_store(ptr + 2*N, b);
+ sk_unaligned_store(ptr + 3*N, a);
+}
+// store registers r,g into context
+STAGE(store_src_rg, float* ptr) {
+ sk_unaligned_store(ptr + 0*N, r);
+ sk_unaligned_store(ptr + 1*N, g);
+}
+// load registers r,g from context
+STAGE(load_src_rg, float* ptr) {
+ r = sk_unaligned_load<F>(ptr + 0*N);
+ g = sk_unaligned_load<F>(ptr + 1*N);
+}
+// store register a into context
+STAGE(store_src_a, float* ptr) {
+ sk_unaligned_store(ptr, a);
+}
+
+// load registers dr,dg,db,da from context (mirrors store_dst)
+STAGE(load_dst, const float* ptr) {
+ dr = sk_unaligned_load<F>(ptr + 0*N);
+ dg = sk_unaligned_load<F>(ptr + 1*N);
+ db = sk_unaligned_load<F>(ptr + 2*N);
+ da = sk_unaligned_load<F>(ptr + 3*N);
+}
+
+// store registers dr,dg,db,da into context (mirrors load_dst)
+STAGE(store_dst, float* ptr) {
+ sk_unaligned_store(ptr + 0*N, dr);
+ sk_unaligned_store(ptr + 1*N, dg);
+ sk_unaligned_store(ptr + 2*N, db);
+ sk_unaligned_store(ptr + 3*N, da);
+}
+
+// Most blend modes apply the same logic to each channel.
+#define BLEND_MODE(name) \
+ SI F name##_channel(F s, F d, F sa, F da); \
+ STAGE(name, NoCtx) { \
+ r = name##_channel(r,dr,a,da); \
+ g = name##_channel(g,dg,a,da); \
+ b = name##_channel(b,db,a,da); \
+ a = name##_channel(a,da,a,da); \
+ } \
+ SI F name##_channel(F s, F d, F sa, F da)
+
+SI F inv(F x) { return 1.0f - x; }
+SI F two(F x) { return x + x; }
+
+
+BLEND_MODE(clear) { return 0; }
+BLEND_MODE(srcatop) { return s*da + d*inv(sa); }
+BLEND_MODE(dstatop) { return d*sa + s*inv(da); }
+BLEND_MODE(srcin) { return s * da; }
+BLEND_MODE(dstin) { return d * sa; }
+BLEND_MODE(srcout) { return s * inv(da); }
+BLEND_MODE(dstout) { return d * inv(sa); }
+BLEND_MODE(srcover) { return mad(d, inv(sa), s); }
+BLEND_MODE(dstover) { return mad(s, inv(da), d); }
+
+BLEND_MODE(modulate) { return s*d; }
+BLEND_MODE(multiply) { return s*inv(da) + d*inv(sa) + s*d; }
+BLEND_MODE(plus_) { return min(s + d, 1.0f); } // We can clamp to either 1 or sa.
+BLEND_MODE(screen) { return s + d - s*d; }
+BLEND_MODE(xor_) { return s*inv(da) + d*inv(sa); }
+#undef BLEND_MODE
+
+// Most other blend modes apply the same logic to colors, and srcover to alpha.
+#define BLEND_MODE(name) \
+ SI F name##_channel(F s, F d, F sa, F da); \
+ STAGE(name, NoCtx) { \
+ r = name##_channel(r,dr,a,da); \
+ g = name##_channel(g,dg,a,da); \
+ b = name##_channel(b,db,a,da); \
+ a = mad(da, inv(a), a); \
+ } \
+ SI F name##_channel(F s, F d, F sa, F da)
+
+BLEND_MODE(darken) { return s + d - max(s*da, d*sa) ; }
+BLEND_MODE(lighten) { return s + d - min(s*da, d*sa) ; }
+BLEND_MODE(difference) { return s + d - two(min(s*da, d*sa)); }
+BLEND_MODE(exclusion) { return s + d - two(s*d); }
+
+BLEND_MODE(colorburn) {
+ return if_then_else(d == da, d + s*inv(da),
+ if_then_else(s == 0, /* s + */ d*inv(sa),
+ sa*(da - min(da, (da-d)*sa*rcp_fast(s))) + s*inv(da) + d*inv(sa)));
+}
+BLEND_MODE(colordodge) {
+ return if_then_else(d == 0, /* d + */ s*inv(da),
+ if_then_else(s == sa, s + d*inv(sa),
+ sa*min(da, (d*sa)*rcp_fast(sa - s)) + s*inv(da) + d*inv(sa)));
+}
+BLEND_MODE(hardlight) {
+ return s*inv(da) + d*inv(sa)
+ + if_then_else(two(s) <= sa, two(s*d), sa*da - two((da-d)*(sa-s)));
+}
+BLEND_MODE(overlay) {
+ return s*inv(da) + d*inv(sa)
+ + if_then_else(two(d) <= da, two(s*d), sa*da - two((da-d)*(sa-s)));
+}
+
+BLEND_MODE(softlight) {
+ F m = if_then_else(da > 0, d / da, F(0)),
+ s2 = two(s),
+ m4 = two(two(m));
+
+ // The logic forks three ways:
+ // 1. dark src?
+ // 2. light src, dark dst?
+ // 3. light src, light dst?
+ F darkSrc = d*(sa + (s2 - sa)*(1.0f - m)), // Used in case 1.
+ darkDst = (m4*m4 + m4)*(m - 1.0f) + 7.0f*m, // Used in case 2.
+ liteDst = sqrt_(m) - m,
+ liteSrc = d*sa + da*(s2 - sa) * if_then_else(two(two(d)) <= da, darkDst, liteDst); // 2 or 3?
+ return s*inv(da) + d*inv(sa) + if_then_else(s2 <= sa, darkSrc, liteSrc); // 1 or (2 or 3)?
+}
+#undef BLEND_MODE
+
+// We're basing our implemenation of non-separable blend modes on
+// https://www.w3.org/TR/compositing-1/#blendingnonseparable.
+// and
+// https://www.khronos.org/registry/OpenGL/specs/es/3.2/es_spec_3.2.pdf
+// They're equivalent, but ES' math has been better simplified.
+//
+// Anything extra we add beyond that is to make the math work with premul inputs.
+
+SI F sat(F r, F g, F b) { return max(r, max(g,b)) - min(r, min(g,b)); }
+
+#if defined(SK_USE_LEGACY_RP_LUMINANCE)
+SI F lum(F r, F g, F b) { return r*0.30f + g*0.59f + b*0.11f; }
+#else
+SI F lum(F r, F g, F b) { return mad(r, 0.30f, mad(g, 0.59f, b*0.11f)); }
+#endif
+
+SI void set_sat(F* r, F* g, F* b, F s) {
+ F mn = min(*r, min(*g,*b)),
+ mx = max(*r, max(*g,*b)),
+ sat = mx - mn;
+
+ // Map min channel to 0, max channel to s, and scale the middle proportionally.
+ auto scale = [=](F c) {
+ return if_then_else(sat == 0, F(0), (c - mn) * s / sat);
+ };
+ *r = scale(*r);
+ *g = scale(*g);
+ *b = scale(*b);
+}
+SI void set_lum(F* r, F* g, F* b, F l) {
+ F diff = l - lum(*r, *g, *b);
+ *r += diff;
+ *g += diff;
+ *b += diff;
+}
+SI void clip_color(F* r, F* g, F* b, F a) {
+ F mn = min(*r, min(*g, *b)),
+ mx = max(*r, max(*g, *b)),
+ l = lum(*r, *g, *b);
+
+ auto clip = [=](F c) {
+ c = if_then_else(mn < 0 && l != mn, l + (c - l) * ( l) / (l - mn), c);
+ c = if_then_else(mx > a && l != mx, l + (c - l) * (a - l) / (mx - l), c);
+ c = max(c, 0.0f); // Sometimes without this we may dip just a little negative.
+ return c;
+ };
+ *r = clip(*r);
+ *g = clip(*g);
+ *b = clip(*b);
+}
+
+STAGE(hue, NoCtx) {
+ F R = r*a,
+ G = g*a,
+ B = b*a;
+
+ set_sat(&R, &G, &B, sat(dr,dg,db)*a);
+ set_lum(&R, &G, &B, lum(dr,dg,db)*a);
+ clip_color(&R,&G,&B, a*da);
+
+ r = r*inv(da) + dr*inv(a) + R;
+ g = g*inv(da) + dg*inv(a) + G;
+ b = b*inv(da) + db*inv(a) + B;
+ a = a + da - a*da;
+}
+STAGE(saturation, NoCtx) {
+ F R = dr*a,
+ G = dg*a,
+ B = db*a;
+
+ set_sat(&R, &G, &B, sat( r, g, b)*da);
+ set_lum(&R, &G, &B, lum(dr,dg,db)* a); // (This is not redundant.)
+ clip_color(&R,&G,&B, a*da);
+
+ r = r*inv(da) + dr*inv(a) + R;
+ g = g*inv(da) + dg*inv(a) + G;
+ b = b*inv(da) + db*inv(a) + B;
+ a = a + da - a*da;
+}
+STAGE(color, NoCtx) {
+ F R = r*da,
+ G = g*da,
+ B = b*da;
+
+ set_lum(&R, &G, &B, lum(dr,dg,db)*a);
+ clip_color(&R,&G,&B, a*da);
+
+ r = r*inv(da) + dr*inv(a) + R;
+ g = g*inv(da) + dg*inv(a) + G;
+ b = b*inv(da) + db*inv(a) + B;
+ a = a + da - a*da;
+}
+STAGE(luminosity, NoCtx) {
+ F R = dr*a,
+ G = dg*a,
+ B = db*a;
+
+ set_lum(&R, &G, &B, lum(r,g,b)*da);
+ clip_color(&R,&G,&B, a*da);
+
+ r = r*inv(da) + dr*inv(a) + R;
+ g = g*inv(da) + dg*inv(a) + G;
+ b = b*inv(da) + db*inv(a) + B;
+ a = a + da - a*da;
+}
+
+STAGE(srcover_rgba_8888, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
+
+ U32 dst = load<U32>(ptr, tail);
+ dr = cast((dst ) & 0xff);
+ dg = cast((dst >> 8) & 0xff);
+ db = cast((dst >> 16) & 0xff);
+ da = cast((dst >> 24) );
+ // {dr,dg,db,da} are in [0,255]
+ // { r, g, b, a} are in [0, 1] (but may be out of gamut)
+
+ r = mad(dr, inv(a), r*255.0f);
+ g = mad(dg, inv(a), g*255.0f);
+ b = mad(db, inv(a), b*255.0f);
+ a = mad(da, inv(a), a*255.0f);
+ // { r, g, b, a} are now in [0,255] (but may be out of gamut)
+
+ // to_unorm() clamps back to gamut. Scaling by 1 since we're already 255-biased.
+ dst = to_unorm(r, 1, 255)
+ | to_unorm(g, 1, 255) << 8
+ | to_unorm(b, 1, 255) << 16
+ | to_unorm(a, 1, 255) << 24;
+ store(ptr, dst, tail);
+}
+
+SI F clamp_01_(F v) { return min(max(0.0f, v), 1.0f); }
+
+STAGE(clamp_01, NoCtx) {
+ r = clamp_01_(r);
+ g = clamp_01_(g);
+ b = clamp_01_(b);
+ a = clamp_01_(a);
+}
+
+STAGE(clamp_gamut, NoCtx) {
+ a = min(max(a, 0.0f), 1.0f);
+ r = min(max(r, 0.0f), a);
+ g = min(max(g, 0.0f), a);
+ b = min(max(b, 0.0f), a);
+}
+
+STAGE(set_rgb, const float* rgb) {
+ r = rgb[0];
+ g = rgb[1];
+ b = rgb[2];
+}
+
+STAGE(unbounded_set_rgb, const float* rgb) {
+ r = rgb[0];
+ g = rgb[1];
+ b = rgb[2];
+}
+
+STAGE(swap_rb, NoCtx) {
+ auto tmp = r;
+ r = b;
+ b = tmp;
+}
+STAGE(swap_rb_dst, NoCtx) {
+ auto tmp = dr;
+ dr = db;
+ db = tmp;
+}
+
+STAGE(move_src_dst, NoCtx) {
+ dr = r;
+ dg = g;
+ db = b;
+ da = a;
+}
+STAGE(move_dst_src, NoCtx) {
+ r = dr;
+ g = dg;
+ b = db;
+ a = da;
+}
+STAGE(swap_src_dst, NoCtx) {
+ std::swap(r, dr);
+ std::swap(g, dg);
+ std::swap(b, db);
+ std::swap(a, da);
+}
+
+STAGE(premul, NoCtx) {
+ r = r * a;
+ g = g * a;
+ b = b * a;
+}
+STAGE(premul_dst, NoCtx) {
+ dr = dr * da;
+ dg = dg * da;
+ db = db * da;
+}
+STAGE(unpremul, NoCtx) {
+ float inf = sk_bit_cast<float>(0x7f800000);
+ auto scale = if_then_else(1.0f/a < inf, 1.0f/a, F(0));
+ r *= scale;
+ g *= scale;
+ b *= scale;
+}
+STAGE(unpremul_polar, NoCtx) {
+ float inf = sk_bit_cast<float>(0x7f800000);
+ auto scale = if_then_else(1.0f/a < inf, 1.0f/a, F(0));
+ g *= scale;
+ b *= scale;
+}
+
+STAGE(force_opaque , NoCtx) { a = 1; }
+STAGE(force_opaque_dst, NoCtx) { da = 1; }
+
+STAGE(rgb_to_hsl, NoCtx) {
+ F mx = max(r, max(g,b)),
+ mn = min(r, min(g,b)),
+ d = mx - mn,
+ d_rcp = 1.0f / d;
+
+ F h = (1/6.0f) *
+ if_then_else(mx == mn, F(0),
+ if_then_else(mx == r, (g-b)*d_rcp + if_then_else(g < b, F(6.0f), F(0)),
+ if_then_else(mx == g, (b-r)*d_rcp + 2.0f,
+ (r-g)*d_rcp + 4.0f)));
+
+ F l = (mx + mn) * 0.5f;
+ F s = if_then_else(mx == mn, F(0),
+ d / if_then_else(l > 0.5f, 2.0f-mx-mn, mx+mn));
+
+ r = h;
+ g = s;
+ b = l;
+}
+STAGE(hsl_to_rgb, NoCtx) {
+ // See GrRGBToHSLFilterEffect.fp
+
+ F h = r,
+ s = g,
+ l = b,
+ c = (1.0f - abs_(2.0f * l - 1)) * s;
+
+ auto hue_to_rgb = [&](F hue) {
+ F q = clamp_01_(abs_(fract(hue) * 6.0f - 3.0f) - 1.0f);
+ return (q - 0.5f) * c + l;
+ };
+
+ r = hue_to_rgb(h + 0.0f/3.0f);
+ g = hue_to_rgb(h + 2.0f/3.0f);
+ b = hue_to_rgb(h + 1.0f/3.0f);
+}
+
+// Color conversion functions used in gradient interpolation, based on
+// https://www.w3.org/TR/css-color-4/#color-conversion-code
+STAGE(css_lab_to_xyz, NoCtx) {
+ constexpr float k = 24389 / 27.0f;
+ constexpr float e = 216 / 24389.0f;
+
+ F f[3];
+ f[1] = (r + 16) * (1 / 116.0f);
+ f[0] = (g * (1 / 500.0f)) + f[1];
+ f[2] = f[1] - (b * (1 / 200.0f));
+
+ F f_cubed[3] = { f[0]*f[0]*f[0], f[1]*f[1]*f[1], f[2]*f[2]*f[2] };
+
+ F xyz[3] = {
+ if_then_else(f_cubed[0] > e, f_cubed[0], (116 * f[0] - 16) * (1 / k)),
+ if_then_else(r > k * e, f_cubed[1], r * (1 / k)),
+ if_then_else(f_cubed[2] > e, f_cubed[2], (116 * f[2] - 16) * (1 / k))
+ };
+
+ constexpr float D50[3] = { 0.3457f / 0.3585f, 1.0f, (1.0f - 0.3457f - 0.3585f) / 0.3585f };
+ r = xyz[0]*D50[0];
+ g = xyz[1]*D50[1];
+ b = xyz[2]*D50[2];
+}
+
+STAGE(css_oklab_to_linear_srgb, NoCtx) {
+ F l_ = r + 0.3963377774f * g + 0.2158037573f * b,
+ m_ = r - 0.1055613458f * g - 0.0638541728f * b,
+ s_ = r - 0.0894841775f * g - 1.2914855480f * b;
+
+ F l = l_*l_*l_,
+ m = m_*m_*m_,
+ s = s_*s_*s_;
+
+ r = +4.0767416621f * l - 3.3077115913f * m + 0.2309699292f * s;
+ g = -1.2684380046f * l + 2.6097574011f * m - 0.3413193965f * s;
+ b = -0.0041960863f * l - 0.7034186147f * m + 1.7076147010f * s;
+}
+
+// Skia stores all polar colors with hue in the first component, so this "LCH -> Lab" transform
+// actually takes "HCL". This is also used to do the same polar transform for OkHCL to OkLAB.
+// See similar comments & logic in SkGradientShaderBase.cpp.
+STAGE(css_hcl_to_lab, NoCtx) {
+ F H = r,
+ C = g,
+ L = b;
+
+ F hueRadians = H * (SK_FloatPI / 180);
+
+ r = L;
+ g = C * cos_(hueRadians);
+ b = C * sin_(hueRadians);
+}
+
+SI F mod_(F x, float y) {
+ return x - y * floor_(x * (1 / y));
+}
+
+struct RGB { F r, g, b; };
+
+SI RGB css_hsl_to_srgb_(F h, F s, F l) {
+ h = mod_(h, 360);
+
+ s *= 0.01f;
+ l *= 0.01f;
+
+ F k[3] = {
+ mod_(0 + h * (1 / 30.0f), 12),
+ mod_(8 + h * (1 / 30.0f), 12),
+ mod_(4 + h * (1 / 30.0f), 12)
+ };
+ F a = s * min(l, 1 - l);
+ return {
+ l - a * max(-1.0f, min(min(k[0] - 3.0f, 9.0f - k[0]), 1.0f)),
+ l - a * max(-1.0f, min(min(k[1] - 3.0f, 9.0f - k[1]), 1.0f)),
+ l - a * max(-1.0f, min(min(k[2] - 3.0f, 9.0f - k[2]), 1.0f))
+ };
+}
+
+STAGE(css_hsl_to_srgb, NoCtx) {
+ RGB rgb = css_hsl_to_srgb_(r, g, b);
+ r = rgb.r;
+ g = rgb.g;
+ b = rgb.b;
+}
+
+STAGE(css_hwb_to_srgb, NoCtx) {
+ g *= 0.01f;
+ b *= 0.01f;
+
+ F gray = g / (g + b);
+
+ RGB rgb = css_hsl_to_srgb_(r, 100.0f, 50.0f);
+ rgb.r = rgb.r * (1 - g - b) + g;
+ rgb.g = rgb.g * (1 - g - b) + g;
+ rgb.b = rgb.b * (1 - g - b) + g;
+
+ auto isGray = (g + b) >= 1;
+
+ r = if_then_else(isGray, gray, rgb.r);
+ g = if_then_else(isGray, gray, rgb.g);
+ b = if_then_else(isGray, gray, rgb.b);
+}
+
+// Derive alpha's coverage from rgb coverage and the values of src and dst alpha.
+SI F alpha_coverage_from_rgb_coverage(F a, F da, F cr, F cg, F cb) {
+ return if_then_else(a < da, min(cr, min(cg,cb))
+ , max(cr, max(cg,cb)));
+}
+
+STAGE(scale_1_float, const float* c) {
+ r = r * *c;
+ g = g * *c;
+ b = b * *c;
+ a = a * *c;
+}
+STAGE(scale_u8, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
+
+ auto scales = load<U8>(ptr, tail);
+ auto c = from_byte(scales);
+
+ r = r * c;
+ g = g * c;
+ b = b * c;
+ a = a * c;
+}
+STAGE(scale_565, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
+
+ F cr,cg,cb;
+ from_565(load<U16>(ptr, tail), &cr, &cg, &cb);
+
+ F ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
+
+ r = r * cr;
+ g = g * cg;
+ b = b * cb;
+ a = a * ca;
+}
+
+SI F lerp(F from, F to, F t) {
+ return mad(to-from, t, from);
+}
+
+STAGE(lerp_1_float, const float* c) {
+ r = lerp(dr, r, *c);
+ g = lerp(dg, g, *c);
+ b = lerp(db, b, *c);
+ a = lerp(da, a, *c);
+}
+STAGE(scale_native, const float scales[]) {
+ auto c = sk_unaligned_load<F>(scales);
+ r = r * c;
+ g = g * c;
+ b = b * c;
+ a = a * c;
+}
+STAGE(lerp_native, const float scales[]) {
+ auto c = sk_unaligned_load<F>(scales);
+ r = lerp(dr, r, c);
+ g = lerp(dg, g, c);
+ b = lerp(db, b, c);
+ a = lerp(da, a, c);
+}
+STAGE(lerp_u8, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
+
+ auto scales = load<U8>(ptr, tail);
+ auto c = from_byte(scales);
+
+ r = lerp(dr, r, c);
+ g = lerp(dg, g, c);
+ b = lerp(db, b, c);
+ a = lerp(da, a, c);
+}
+STAGE(lerp_565, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
+
+ F cr,cg,cb;
+ from_565(load<U16>(ptr, tail), &cr, &cg, &cb);
+
+ F ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
+
+ r = lerp(dr, r, cr);
+ g = lerp(dg, g, cg);
+ b = lerp(db, b, cb);
+ a = lerp(da, a, ca);
+}
+
+STAGE(emboss, const SkRasterPipeline_EmbossCtx* ctx) {
+ auto mptr = ptr_at_xy<const uint8_t>(&ctx->mul, dx,dy),
+ aptr = ptr_at_xy<const uint8_t>(&ctx->add, dx,dy);
+
+ F mul = from_byte(load<U8>(mptr, tail)),
+ add = from_byte(load<U8>(aptr, tail));
+
+ r = mad(r, mul, add);
+ g = mad(g, mul, add);
+ b = mad(b, mul, add);
+}
+
+STAGE(byte_tables, const SkRasterPipeline_TablesCtx* tables) {
+ r = from_byte(gather(tables->r, to_unorm(r, 255)));
+ g = from_byte(gather(tables->g, to_unorm(g, 255)));
+ b = from_byte(gather(tables->b, to_unorm(b, 255)));
+ a = from_byte(gather(tables->a, to_unorm(a, 255)));
+}
+
+SI F strip_sign(F x, U32* sign) {
+ U32 bits = sk_bit_cast<U32>(x);
+ *sign = bits & 0x80000000;
+ return sk_bit_cast<F>(bits ^ *sign);
+}
+
+SI F apply_sign(F x, U32 sign) {
+ return sk_bit_cast<F>(sign | sk_bit_cast<U32>(x));
+}
+
+STAGE(parametric, const skcms_TransferFunction* ctx) {
+ auto fn = [&](F v) {
+ U32 sign;
+ v = strip_sign(v, &sign);
+
+ F r = if_then_else(v <= ctx->d, mad(ctx->c, v, ctx->f)
+ , approx_powf(mad(ctx->a, v, ctx->b), ctx->g) + ctx->e);
+ return apply_sign(r, sign);
+ };
+ r = fn(r);
+ g = fn(g);
+ b = fn(b);
+}
+
+STAGE(gamma_, const float* G) {
+ auto fn = [&](F v) {
+ U32 sign;
+ v = strip_sign(v, &sign);
+ return apply_sign(approx_powf(v, *G), sign);
+ };
+ r = fn(r);
+ g = fn(g);
+ b = fn(b);
+}
+
+STAGE(PQish, const skcms_TransferFunction* ctx) {
+ auto fn = [&](F v) {
+ U32 sign;
+ v = strip_sign(v, &sign);
+
+ F r = approx_powf(max(mad(ctx->b, approx_powf(v, ctx->c), ctx->a), 0.0f)
+ / (mad(ctx->e, approx_powf(v, ctx->c), ctx->d)),
+ ctx->f);
+
+ return apply_sign(r, sign);
+ };
+ r = fn(r);
+ g = fn(g);
+ b = fn(b);
+}
+
+STAGE(HLGish, const skcms_TransferFunction* ctx) {
+ auto fn = [&](F v) {
+ U32 sign;
+ v = strip_sign(v, &sign);
+
+ const float R = ctx->a, G = ctx->b,
+ a = ctx->c, b = ctx->d, c = ctx->e,
+ K = ctx->f + 1.0f;
+
+ F r = if_then_else(v*R <= 1, approx_powf(v*R, G)
+ , approx_exp((v-c)*a) + b);
+
+ return K * apply_sign(r, sign);
+ };
+ r = fn(r);
+ g = fn(g);
+ b = fn(b);
+}
+
+STAGE(HLGinvish, const skcms_TransferFunction* ctx) {
+ auto fn = [&](F v) {
+ U32 sign;
+ v = strip_sign(v, &sign);
+
+ const float R = ctx->a, G = ctx->b,
+ a = ctx->c, b = ctx->d, c = ctx->e,
+ K = ctx->f + 1.0f;
+
+ v /= K;
+ F r = if_then_else(v <= 1, R * approx_powf(v, G)
+ , a * approx_log(v - b) + c);
+
+ return apply_sign(r, sign);
+ };
+ r = fn(r);
+ g = fn(g);
+ b = fn(b);
+}
+
+STAGE(load_a8, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
+
+ r = g = b = 0.0f;
+ a = from_byte(load<U8>(ptr, tail));
+}
+STAGE(load_a8_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint8_t>(ctx, dx,dy);
+
+ dr = dg = db = 0.0f;
+ da = from_byte(load<U8>(ptr, tail));
+}
+STAGE(gather_a8, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint8_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r,g);
+ r = g = b = 0.0f;
+ a = from_byte(gather(ptr, ix));
+}
+STAGE(store_a8, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint8_t>(ctx, dx,dy);
+
+ U8 packed = pack(pack(to_unorm(a, 255)));
+ store(ptr, packed, tail);
+}
+STAGE(store_r8, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint8_t>(ctx, dx,dy);
+
+ U8 packed = pack(pack(to_unorm(r, 255)));
+ store(ptr, packed, tail);
+}
+
+STAGE(load_565, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
+
+ from_565(load<U16>(ptr, tail), &r,&g,&b);
+ a = 1.0f;
+}
+STAGE(load_565_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
+
+ from_565(load<U16>(ptr, tail), &dr,&dg,&db);
+ da = 1.0f;
+}
+STAGE(gather_565, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint16_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r,g);
+ from_565(gather(ptr, ix), &r,&g,&b);
+ a = 1.0f;
+}
+STAGE(store_565, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
+
+ U16 px = pack( to_unorm(r, 31) << 11
+ | to_unorm(g, 63) << 5
+ | to_unorm(b, 31) );
+ store(ptr, px, tail);
+}
+
+STAGE(load_4444, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
+ from_4444(load<U16>(ptr, tail), &r,&g,&b,&a);
+}
+STAGE(load_4444_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
+ from_4444(load<U16>(ptr, tail), &dr,&dg,&db,&da);
+}
+STAGE(gather_4444, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint16_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r,g);
+ from_4444(gather(ptr, ix), &r,&g,&b,&a);
+}
+STAGE(store_4444, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
+ U16 px = pack( to_unorm(r, 15) << 12
+ | to_unorm(g, 15) << 8
+ | to_unorm(b, 15) << 4
+ | to_unorm(a, 15) );
+ store(ptr, px, tail);
+}
+
+STAGE(load_8888, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
+ from_8888(load<U32>(ptr, tail), &r,&g,&b,&a);
+}
+STAGE(load_8888_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
+ from_8888(load<U32>(ptr, tail), &dr,&dg,&db,&da);
+}
+STAGE(gather_8888, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint32_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r,g);
+ from_8888(gather(ptr, ix), &r,&g,&b,&a);
+}
+STAGE(store_8888, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
+
+ U32 px = to_unorm(r, 255)
+ | to_unorm(g, 255) << 8
+ | to_unorm(b, 255) << 16
+ | to_unorm(a, 255) << 24;
+ store(ptr, px, tail);
+}
+
+STAGE(load_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
+ from_88(load<U16>(ptr, tail), &r, &g);
+ b = 0;
+ a = 1;
+}
+STAGE(load_rg88_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
+ from_88(load<U16>(ptr, tail), &dr, &dg);
+ db = 0;
+ da = 1;
+}
+STAGE(gather_rg88, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint16_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r, g);
+ from_88(gather(ptr, ix), &r, &g);
+ b = 0;
+ a = 1;
+}
+STAGE(store_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint16_t>(ctx, dx, dy);
+ U16 px = pack( to_unorm(r, 255) | to_unorm(g, 255) << 8 );
+ store(ptr, px, tail);
+}
+
+STAGE(load_a16, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
+ r = g = b = 0;
+ a = from_short(load<U16>(ptr, tail));
+}
+STAGE(load_a16_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
+ dr = dg = db = 0.0f;
+ da = from_short(load<U16>(ptr, tail));
+}
+STAGE(gather_a16, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint16_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r, g);
+ r = g = b = 0.0f;
+ a = from_short(gather(ptr, ix));
+}
+STAGE(store_a16, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
+
+ U16 px = pack(to_unorm(a, 65535));
+ store(ptr, px, tail);
+}
+
+STAGE(load_rg1616, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
+ b = 0; a = 1;
+ from_1616(load<U32>(ptr, tail), &r,&g);
+}
+STAGE(load_rg1616_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
+ from_1616(load<U32>(ptr, tail), &dr, &dg);
+ db = 0;
+ da = 1;
+}
+STAGE(gather_rg1616, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint32_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r, g);
+ from_1616(gather(ptr, ix), &r, &g);
+ b = 0;
+ a = 1;
+}
+STAGE(store_rg1616, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
+
+ U32 px = to_unorm(r, 65535)
+ | to_unorm(g, 65535) << 16;
+ store(ptr, px, tail);
+}
+
+STAGE(load_16161616, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint64_t>(ctx, dx, dy);
+ from_16161616(load<U64>(ptr, tail), &r,&g, &b, &a);
+}
+STAGE(load_16161616_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint64_t>(ctx, dx, dy);
+ from_16161616(load<U64>(ptr, tail), &dr, &dg, &db, &da);
+}
+STAGE(gather_16161616, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint64_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r, g);
+ from_16161616(gather(ptr, ix), &r, &g, &b, &a);
+}
+STAGE(store_16161616, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint16_t>(ctx, 4*dx,4*dy);
+
+ U16 R = pack(to_unorm(r, 65535)),
+ G = pack(to_unorm(g, 65535)),
+ B = pack(to_unorm(b, 65535)),
+ A = pack(to_unorm(a, 65535));
+
+ store4(ptr,tail, R,G,B,A);
+}
+
+
+STAGE(load_1010102, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
+ from_1010102(load<U32>(ptr, tail), &r,&g,&b,&a);
+}
+STAGE(load_1010102_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
+ from_1010102(load<U32>(ptr, tail), &dr,&dg,&db,&da);
+}
+STAGE(load_1010102_xr, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
+ from_1010102_xr(load<U32>(ptr, tail), &r,&g,&b,&a);
+}
+STAGE(load_1010102_xr_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint32_t>(ctx, dx,dy);
+ from_1010102_xr(load<U32>(ptr, tail), &dr,&dg,&db,&da);
+}
+STAGE(gather_1010102, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint32_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r,g);
+ from_1010102(gather(ptr, ix), &r,&g,&b,&a);
+}
+STAGE(store_1010102, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
+
+ U32 px = to_unorm(r, 1023)
+ | to_unorm(g, 1023) << 10
+ | to_unorm(b, 1023) << 20
+ | to_unorm(a, 3) << 30;
+ store(ptr, px, tail);
+}
+STAGE(store_1010102_xr, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
+ static constexpr float min = -0.752941f;
+ static constexpr float max = 1.25098f;
+ static constexpr float range = max - min;
+ U32 px = to_unorm((r - min) / range, 1023)
+ | to_unorm((g - min) / range, 1023) << 10
+ | to_unorm((b - min) / range, 1023) << 20
+ | to_unorm(a, 3) << 30;
+ store(ptr, px, tail);
+}
+
+STAGE(load_f16, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint64_t>(ctx, dx,dy);
+
+ U16 R,G,B,A;
+ load4((const uint16_t*)ptr,tail, &R,&G,&B,&A);
+ r = from_half(R);
+ g = from_half(G);
+ b = from_half(B);
+ a = from_half(A);
+}
+STAGE(load_f16_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint64_t>(ctx, dx,dy);
+
+ U16 R,G,B,A;
+ load4((const uint16_t*)ptr,tail, &R,&G,&B,&A);
+ dr = from_half(R);
+ dg = from_half(G);
+ db = from_half(B);
+ da = from_half(A);
+}
+STAGE(gather_f16, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint64_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r,g);
+ auto px = gather(ptr, ix);
+
+ U16 R,G,B,A;
+ load4((const uint16_t*)&px,0, &R,&G,&B,&A);
+ r = from_half(R);
+ g = from_half(G);
+ b = from_half(B);
+ a = from_half(A);
+}
+STAGE(store_f16, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint64_t>(ctx, dx,dy);
+ store4((uint16_t*)ptr,tail, to_half(r)
+ , to_half(g)
+ , to_half(b)
+ , to_half(a));
+}
+
+STAGE(store_u16_be, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint16_t>(ctx, 4*dx,dy);
+
+ U16 R = bswap(pack(to_unorm(r, 65535))),
+ G = bswap(pack(to_unorm(g, 65535))),
+ B = bswap(pack(to_unorm(b, 65535))),
+ A = bswap(pack(to_unorm(a, 65535)));
+
+ store4(ptr,tail, R,G,B,A);
+}
+
+STAGE(load_af16, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx,dy);
+
+ U16 A = load<U16>((const uint16_t*)ptr, tail);
+ r = 0;
+ g = 0;
+ b = 0;
+ a = from_half(A);
+}
+STAGE(load_af16_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint16_t>(ctx, dx, dy);
+
+ U16 A = load<U16>((const uint16_t*)ptr, tail);
+ dr = dg = db = 0.0f;
+ da = from_half(A);
+}
+STAGE(gather_af16, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint16_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r, g);
+ r = g = b = 0.0f;
+ a = from_half(gather(ptr, ix));
+}
+STAGE(store_af16, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint16_t>(ctx, dx,dy);
+ store(ptr, to_half(a), tail);
+}
+
+STAGE(load_rgf16, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
+
+ U16 R,G;
+ load2((const uint16_t*)ptr, tail, &R, &G);
+ r = from_half(R);
+ g = from_half(G);
+ b = 0;
+ a = 1;
+}
+STAGE(load_rgf16_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const uint32_t>(ctx, dx, dy);
+
+ U16 R,G;
+ load2((const uint16_t*)ptr, tail, &R, &G);
+ dr = from_half(R);
+ dg = from_half(G);
+ db = 0;
+ da = 1;
+}
+STAGE(gather_rgf16, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint32_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r, g);
+ auto px = gather(ptr, ix);
+
+ U16 R,G;
+ load2((const uint16_t*)&px, 0, &R, &G);
+ r = from_half(R);
+ g = from_half(G);
+ b = 0;
+ a = 1;
+}
+STAGE(store_rgf16, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint32_t>(ctx, dx, dy);
+ store2((uint16_t*)ptr, tail, to_half(r)
+ , to_half(g));
+}
+
+STAGE(load_f32, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const float>(ctx, 4*dx,4*dy);
+ load4(ptr,tail, &r,&g,&b,&a);
+}
+STAGE(load_f32_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const float>(ctx, 4*dx,4*dy);
+ load4(ptr,tail, &dr,&dg,&db,&da);
+}
+STAGE(gather_f32, const SkRasterPipeline_GatherCtx* ctx) {
+ const float* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, r,g);
+ r = gather(ptr, 4*ix + 0);
+ g = gather(ptr, 4*ix + 1);
+ b = gather(ptr, 4*ix + 2);
+ a = gather(ptr, 4*ix + 3);
+}
+STAGE(store_f32, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<float>(ctx, 4*dx,4*dy);
+ store4(ptr,tail, r,g,b,a);
+}
+
+STAGE(load_rgf32, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<const float>(ctx, 2*dx,2*dy);
+ load2(ptr, tail, &r, &g);
+ b = 0;
+ a = 1;
+}
+STAGE(store_rgf32, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<float>(ctx, 2*dx,2*dy);
+ store2(ptr, tail, r, g);
+}
+
+SI F exclusive_repeat(F v, const SkRasterPipeline_TileCtx* ctx) {
+ return v - floor_(v*ctx->invScale)*ctx->scale;
+}
+SI F exclusive_mirror(F v, const SkRasterPipeline_TileCtx* ctx) {
+ auto limit = ctx->scale;
+ auto invLimit = ctx->invScale;
+
+ // This is "repeat" over the range 0..2*limit
+ auto u = v - floor_(v*invLimit*0.5f)*2*limit;
+ // s will be 0 when moving forward (e.g. [0, limit)) and 1 when moving backward (e.g.
+ // [limit, 2*limit)).
+ auto s = floor_(u*invLimit);
+ // This is the mirror result.
+ auto m = u - 2*s*(u - limit);
+ // Apply a bias to m if moving backwards so that we snap consistently at exact integer coords in
+ // the logical infinite image. This is tested by mirror_tile GM. Note that all values
+ // that have a non-zero bias applied are > 0.
+ auto biasInUlps = trunc_(s);
+ return sk_bit_cast<F>(sk_bit_cast<U32>(m) + ctx->mirrorBiasDir*biasInUlps);
+}
+// Tile x or y to [0,limit) == [0,limit - 1 ulp] (think, sampling from images).
+// The gather stages will hard clamp the output of these stages to [0,limit)...
+// we just need to do the basic repeat or mirroring.
+STAGE(repeat_x, const SkRasterPipeline_TileCtx* ctx) { r = exclusive_repeat(r, ctx); }
+STAGE(repeat_y, const SkRasterPipeline_TileCtx* ctx) { g = exclusive_repeat(g, ctx); }
+STAGE(mirror_x, const SkRasterPipeline_TileCtx* ctx) { r = exclusive_mirror(r, ctx); }
+STAGE(mirror_y, const SkRasterPipeline_TileCtx* ctx) { g = exclusive_mirror(g, ctx); }
+
+STAGE( clamp_x_1, NoCtx) { r = clamp_01_(r); }
+STAGE(repeat_x_1, NoCtx) { r = clamp_01_(r - floor_(r)); }
+STAGE(mirror_x_1, NoCtx) { r = clamp_01_(abs_( (r-1.0f) - two(floor_((r-1.0f)*0.5f)) - 1.0f )); }
+
+STAGE(clamp_x_and_y, const SkRasterPipeline_CoordClampCtx* ctx) {
+ r = min(ctx->max_x, max(ctx->min_x, r));
+ g = min(ctx->max_y, max(ctx->min_y, g));
+}
+
+// Decal stores a 32bit mask after checking the coordinate (x and/or y) against its domain:
+// mask == 0x00000000 if the coordinate(s) are out of bounds
+// mask == 0xFFFFFFFF if the coordinate(s) are in bounds
+// After the gather stage, the r,g,b,a values are AND'd with this mask, setting them to 0
+// if either of the coordinates were out of bounds.
+
+STAGE(decal_x, SkRasterPipeline_DecalTileCtx* ctx) {
+ auto w = ctx->limit_x;
+ auto e = ctx->inclusiveEdge_x;
+ auto cond = ((0 < r) & (r < w)) | (r == e);
+ sk_unaligned_store(ctx->mask, cond_to_mask(cond));
+}
+STAGE(decal_y, SkRasterPipeline_DecalTileCtx* ctx) {
+ auto h = ctx->limit_y;
+ auto e = ctx->inclusiveEdge_y;
+ auto cond = ((0 < g) & (g < h)) | (g == e);
+ sk_unaligned_store(ctx->mask, cond_to_mask(cond));
+}
+STAGE(decal_x_and_y, SkRasterPipeline_DecalTileCtx* ctx) {
+ auto w = ctx->limit_x;
+ auto h = ctx->limit_y;
+ auto ex = ctx->inclusiveEdge_x;
+ auto ey = ctx->inclusiveEdge_y;
+ auto cond = (((0 < r) & (r < w)) | (r == ex))
+ & (((0 < g) & (g < h)) | (g == ey));
+ sk_unaligned_store(ctx->mask, cond_to_mask(cond));
+}
+STAGE(check_decal_mask, SkRasterPipeline_DecalTileCtx* ctx) {
+ auto mask = sk_unaligned_load<U32>(ctx->mask);
+ r = sk_bit_cast<F>(sk_bit_cast<U32>(r) & mask);
+ g = sk_bit_cast<F>(sk_bit_cast<U32>(g) & mask);
+ b = sk_bit_cast<F>(sk_bit_cast<U32>(b) & mask);
+ a = sk_bit_cast<F>(sk_bit_cast<U32>(a) & mask);
+}
+
+STAGE(alpha_to_gray, NoCtx) {
+ r = g = b = a;
+ a = 1;
+}
+STAGE(alpha_to_gray_dst, NoCtx) {
+ dr = dg = db = da;
+ da = 1;
+}
+STAGE(alpha_to_red, NoCtx) {
+ r = a;
+ a = 1;
+}
+STAGE(alpha_to_red_dst, NoCtx) {
+ dr = da;
+ da = 1;
+}
+
+STAGE(bt709_luminance_or_luma_to_alpha, NoCtx) {
+ a = r*0.2126f + g*0.7152f + b*0.0722f;
+ r = g = b = 0;
+}
+STAGE(bt709_luminance_or_luma_to_rgb, NoCtx) {
+ r = g = b = r*0.2126f + g*0.7152f + b*0.0722f;
+}
+
+STAGE(matrix_translate, const float* m) {
+ r += m[0];
+ g += m[1];
+}
+STAGE(matrix_scale_translate, const float* m) {
+ r = mad(r,m[0], m[2]);
+ g = mad(g,m[1], m[3]);
+}
+STAGE(matrix_2x3, const float* m) {
+ auto R = mad(r,m[0], mad(g,m[1], m[2])),
+ G = mad(r,m[3], mad(g,m[4], m[5]));
+ r = R;
+ g = G;
+}
+STAGE(matrix_3x3, const float* m) {
+ auto R = mad(r,m[0], mad(g,m[3], b*m[6])),
+ G = mad(r,m[1], mad(g,m[4], b*m[7])),
+ B = mad(r,m[2], mad(g,m[5], b*m[8]));
+ r = R;
+ g = G;
+ b = B;
+}
+STAGE(matrix_3x4, const float* m) {
+ auto R = mad(r,m[0], mad(g,m[3], mad(b,m[6], m[ 9]))),
+ G = mad(r,m[1], mad(g,m[4], mad(b,m[7], m[10]))),
+ B = mad(r,m[2], mad(g,m[5], mad(b,m[8], m[11])));
+ r = R;
+ g = G;
+ b = B;
+}
+STAGE(matrix_4x5, const float* m) {
+ auto R = mad(r,m[ 0], mad(g,m[ 1], mad(b,m[ 2], mad(a,m[ 3], m[ 4])))),
+ G = mad(r,m[ 5], mad(g,m[ 6], mad(b,m[ 7], mad(a,m[ 8], m[ 9])))),
+ B = mad(r,m[10], mad(g,m[11], mad(b,m[12], mad(a,m[13], m[14])))),
+ A = mad(r,m[15], mad(g,m[16], mad(b,m[17], mad(a,m[18], m[19]))));
+ r = R;
+ g = G;
+ b = B;
+ a = A;
+}
+STAGE(matrix_4x3, const float* m) {
+ auto X = r,
+ Y = g;
+
+ r = mad(X, m[0], mad(Y, m[4], m[ 8]));
+ g = mad(X, m[1], mad(Y, m[5], m[ 9]));
+ b = mad(X, m[2], mad(Y, m[6], m[10]));
+ a = mad(X, m[3], mad(Y, m[7], m[11]));
+}
+STAGE(matrix_perspective, const float* m) {
+ // N.B. Unlike the other matrix_ stages, this matrix is row-major.
+ auto R = mad(r,m[0], mad(g,m[1], m[2])),
+ G = mad(r,m[3], mad(g,m[4], m[5])),
+ Z = mad(r,m[6], mad(g,m[7], m[8]));
+ r = R * rcp_precise(Z);
+ g = G * rcp_precise(Z);
+}
+
+SI void gradient_lookup(const SkRasterPipeline_GradientCtx* c, U32 idx, F t,
+ F* r, F* g, F* b, F* a) {
+ F fr, br, fg, bg, fb, bb, fa, ba;
+#if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
+ if (c->stopCount <=8) {
+ fr = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[0]), idx);
+ br = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[0]), idx);
+ fg = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[1]), idx);
+ bg = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[1]), idx);
+ fb = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[2]), idx);
+ bb = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[2]), idx);
+ fa = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[3]), idx);
+ ba = _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[3]), idx);
+ } else
+#endif
+ {
+ fr = gather(c->fs[0], idx);
+ br = gather(c->bs[0], idx);
+ fg = gather(c->fs[1], idx);
+ bg = gather(c->bs[1], idx);
+ fb = gather(c->fs[2], idx);
+ bb = gather(c->bs[2], idx);
+ fa = gather(c->fs[3], idx);
+ ba = gather(c->bs[3], idx);
+ }
+
+ *r = mad(t, fr, br);
+ *g = mad(t, fg, bg);
+ *b = mad(t, fb, bb);
+ *a = mad(t, fa, ba);
+}
+
+STAGE(evenly_spaced_gradient, const SkRasterPipeline_GradientCtx* c) {
+ auto t = r;
+ auto idx = trunc_(t * (c->stopCount-1));
+ gradient_lookup(c, idx, t, &r, &g, &b, &a);
+}
+
+STAGE(gradient, const SkRasterPipeline_GradientCtx* c) {
+ auto t = r;
+ U32 idx = 0;
+
+ // N.B. The loop starts at 1 because idx 0 is the color to use before the first stop.
+ for (size_t i = 1; i < c->stopCount; i++) {
+ idx += if_then_else(t >= c->ts[i], U32(1), U32(0));
+ }
+
+ gradient_lookup(c, idx, t, &r, &g, &b, &a);
+}
+
+STAGE(evenly_spaced_2_stop_gradient, const SkRasterPipeline_EvenlySpaced2StopGradientCtx* c) {
+ auto t = r;
+ r = mad(t, c->f[0], c->b[0]);
+ g = mad(t, c->f[1], c->b[1]);
+ b = mad(t, c->f[2], c->b[2]);
+ a = mad(t, c->f[3], c->b[3]);
+}
+
+STAGE(xy_to_unit_angle, NoCtx) {
+ F X = r,
+ Y = g;
+ F xabs = abs_(X),
+ yabs = abs_(Y);
+
+ F slope = min(xabs, yabs)/max(xabs, yabs);
+ F s = slope * slope;
+
+ // Use a 7th degree polynomial to approximate atan.
+ // This was generated using sollya.gforge.inria.fr.
+ // A float optimized polynomial was generated using the following command.
+ // P1 = fpminimax((1/(2*Pi))*atan(x),[|1,3,5,7|],[|24...|],[2^(-40),1],relative);
+ F phi = slope
+ * (0.15912117063999176025390625f + s
+ * (-5.185396969318389892578125e-2f + s
+ * (2.476101927459239959716796875e-2f + s
+ * (-7.0547382347285747528076171875e-3f))));
+
+ phi = if_then_else(xabs < yabs, 1.0f/4.0f - phi, phi);
+ phi = if_then_else(X < 0.0f , 1.0f/2.0f - phi, phi);
+ phi = if_then_else(Y < 0.0f , 1.0f - phi , phi);
+ phi = if_then_else(phi != phi , F(0) , phi); // Check for NaN.
+ r = phi;
+}
+
+STAGE(xy_to_radius, NoCtx) {
+ F X2 = r * r,
+ Y2 = g * g;
+ r = sqrt_(X2 + Y2);
+}
+
+// Please see https://skia.org/dev/design/conical for how our 2pt conical shader works.
+
+STAGE(negate_x, NoCtx) { r = -r; }
+
+STAGE(xy_to_2pt_conical_strip, const SkRasterPipeline_2PtConicalCtx* ctx) {
+ F x = r, y = g, &t = r;
+ t = x + sqrt_(ctx->fP0 - y*y); // ctx->fP0 = r0 * r0
+}
+
+STAGE(xy_to_2pt_conical_focal_on_circle, NoCtx) {
+ F x = r, y = g, &t = r;
+ t = x + y*y / x; // (x^2 + y^2) / x
+}
+
+STAGE(xy_to_2pt_conical_well_behaved, const SkRasterPipeline_2PtConicalCtx* ctx) {
+ F x = r, y = g, &t = r;
+ t = sqrt_(x*x + y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
+}
+
+STAGE(xy_to_2pt_conical_greater, const SkRasterPipeline_2PtConicalCtx* ctx) {
+ F x = r, y = g, &t = r;
+ t = sqrt_(x*x - y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
+}
+
+STAGE(xy_to_2pt_conical_smaller, const SkRasterPipeline_2PtConicalCtx* ctx) {
+ F x = r, y = g, &t = r;
+ t = -sqrt_(x*x - y*y) - x * ctx->fP0; // ctx->fP0 = 1/r1
+}
+
+STAGE(alter_2pt_conical_compensate_focal, const SkRasterPipeline_2PtConicalCtx* ctx) {
+ F& t = r;
+ t = t + ctx->fP1; // ctx->fP1 = f
+}
+
+STAGE(alter_2pt_conical_unswap, NoCtx) {
+ F& t = r;
+ t = 1 - t;
+}
+
+STAGE(mask_2pt_conical_nan, SkRasterPipeline_2PtConicalCtx* c) {
+ F& t = r;
+ auto is_degenerate = (t != t); // NaN
+ t = if_then_else(is_degenerate, F(0), t);
+ sk_unaligned_store(&c->fMask, cond_to_mask(!is_degenerate));
+}
+
+STAGE(mask_2pt_conical_degenerates, SkRasterPipeline_2PtConicalCtx* c) {
+ F& t = r;
+ auto is_degenerate = (t <= 0) | (t != t);
+ t = if_then_else(is_degenerate, F(0), t);
+ sk_unaligned_store(&c->fMask, cond_to_mask(!is_degenerate));
+}
+
+STAGE(apply_vector_mask, const uint32_t* ctx) {
+ const U32 mask = sk_unaligned_load<U32>(ctx);
+ r = sk_bit_cast<F>(sk_bit_cast<U32>(r) & mask);
+ g = sk_bit_cast<F>(sk_bit_cast<U32>(g) & mask);
+ b = sk_bit_cast<F>(sk_bit_cast<U32>(b) & mask);
+ a = sk_bit_cast<F>(sk_bit_cast<U32>(a) & mask);
+}
+
+SI void save_xy(F* r, F* g, SkRasterPipeline_SamplerCtx* c) {
+ // Whether bilinear or bicubic, all sample points are at the same fractional offset (fx,fy).
+ // They're either the 4 corners of a logical 1x1 pixel or the 16 corners of a 3x3 grid
+ // surrounding (x,y) at (0.5,0.5) off-center.
+ F fx = fract(*r + 0.5f),
+ fy = fract(*g + 0.5f);
+
+ // Samplers will need to load x and fx, or y and fy.
+ sk_unaligned_store(c->x, *r);
+ sk_unaligned_store(c->y, *g);
+ sk_unaligned_store(c->fx, fx);
+ sk_unaligned_store(c->fy, fy);
+}
+
+STAGE(accumulate, const SkRasterPipeline_SamplerCtx* c) {
+ // Bilinear and bicubic filters are both separable, so we produce independent contributions
+ // from x and y, multiplying them together here to get each pixel's total scale factor.
+ auto scale = sk_unaligned_load<F>(c->scalex)
+ * sk_unaligned_load<F>(c->scaley);
+ dr = mad(scale, r, dr);
+ dg = mad(scale, g, dg);
+ db = mad(scale, b, db);
+ da = mad(scale, a, da);
+}
+
+// In bilinear interpolation, the 4 pixels at +/- 0.5 offsets from the sample pixel center
+// are combined in direct proportion to their area overlapping that logical query pixel.
+// At positive offsets, the x-axis contribution to that rectangle is fx, or (1-fx) at negative x.
+// The y-axis is symmetric.
+
+template <int kScale>
+SI void bilinear_x(SkRasterPipeline_SamplerCtx* ctx, F* x) {
+ *x = sk_unaligned_load<F>(ctx->x) + (kScale * 0.5f);
+ F fx = sk_unaligned_load<F>(ctx->fx);
+
+ F scalex;
+ if (kScale == -1) { scalex = 1.0f - fx; }
+ if (kScale == +1) { scalex = fx; }
+ sk_unaligned_store(ctx->scalex, scalex);
+}
+template <int kScale>
+SI void bilinear_y(SkRasterPipeline_SamplerCtx* ctx, F* y) {
+ *y = sk_unaligned_load<F>(ctx->y) + (kScale * 0.5f);
+ F fy = sk_unaligned_load<F>(ctx->fy);
+
+ F scaley;
+ if (kScale == -1) { scaley = 1.0f - fy; }
+ if (kScale == +1) { scaley = fy; }
+ sk_unaligned_store(ctx->scaley, scaley);
+}
+
+STAGE(bilinear_setup, SkRasterPipeline_SamplerCtx* ctx) {
+ save_xy(&r, &g, ctx);
+ // Init for accumulate
+ dr = dg = db = da = 0;
+}
+
+STAGE(bilinear_nx, SkRasterPipeline_SamplerCtx* ctx) { bilinear_x<-1>(ctx, &r); }
+STAGE(bilinear_px, SkRasterPipeline_SamplerCtx* ctx) { bilinear_x<+1>(ctx, &r); }
+STAGE(bilinear_ny, SkRasterPipeline_SamplerCtx* ctx) { bilinear_y<-1>(ctx, &g); }
+STAGE(bilinear_py, SkRasterPipeline_SamplerCtx* ctx) { bilinear_y<+1>(ctx, &g); }
+
+
+// In bicubic interpolation, the 16 pixels and +/- 0.5 and +/- 1.5 offsets from the sample
+// pixel center are combined with a non-uniform cubic filter, with higher values near the center.
+//
+// This helper computes the total weight along one axis (our bicubic filter is separable), given one
+// column of the sampling matrix, and a fractional pixel offset. See SkCubicResampler for details.
+
+SI F bicubic_wts(F t, float A, float B, float C, float D) {
+ return mad(t, mad(t, mad(t, D, C), B), A);
+}
+
+template <int kScale>
+SI void bicubic_x(SkRasterPipeline_SamplerCtx* ctx, F* x) {
+ *x = sk_unaligned_load<F>(ctx->x) + (kScale * 0.5f);
+
+ F scalex;
+ if (kScale == -3) { scalex = sk_unaligned_load<F>(ctx->wx[0]); }
+ if (kScale == -1) { scalex = sk_unaligned_load<F>(ctx->wx[1]); }
+ if (kScale == +1) { scalex = sk_unaligned_load<F>(ctx->wx[2]); }
+ if (kScale == +3) { scalex = sk_unaligned_load<F>(ctx->wx[3]); }
+ sk_unaligned_store(ctx->scalex, scalex);
+}
+template <int kScale>
+SI void bicubic_y(SkRasterPipeline_SamplerCtx* ctx, F* y) {
+ *y = sk_unaligned_load<F>(ctx->y) + (kScale * 0.5f);
+
+ F scaley;
+ if (kScale == -3) { scaley = sk_unaligned_load<F>(ctx->wy[0]); }
+ if (kScale == -1) { scaley = sk_unaligned_load<F>(ctx->wy[1]); }
+ if (kScale == +1) { scaley = sk_unaligned_load<F>(ctx->wy[2]); }
+ if (kScale == +3) { scaley = sk_unaligned_load<F>(ctx->wy[3]); }
+ sk_unaligned_store(ctx->scaley, scaley);
+}
+
+STAGE(bicubic_setup, SkRasterPipeline_SamplerCtx* ctx) {
+ save_xy(&r, &g, ctx);
+
+ const float* w = ctx->weights;
+
+ F fx = sk_unaligned_load<F>(ctx->fx);
+ sk_unaligned_store(ctx->wx[0], bicubic_wts(fx, w[0], w[4], w[ 8], w[12]));
+ sk_unaligned_store(ctx->wx[1], bicubic_wts(fx, w[1], w[5], w[ 9], w[13]));
+ sk_unaligned_store(ctx->wx[2], bicubic_wts(fx, w[2], w[6], w[10], w[14]));
+ sk_unaligned_store(ctx->wx[3], bicubic_wts(fx, w[3], w[7], w[11], w[15]));
+
+ F fy = sk_unaligned_load<F>(ctx->fy);
+ sk_unaligned_store(ctx->wy[0], bicubic_wts(fy, w[0], w[4], w[ 8], w[12]));
+ sk_unaligned_store(ctx->wy[1], bicubic_wts(fy, w[1], w[5], w[ 9], w[13]));
+ sk_unaligned_store(ctx->wy[2], bicubic_wts(fy, w[2], w[6], w[10], w[14]));
+ sk_unaligned_store(ctx->wy[3], bicubic_wts(fy, w[3], w[7], w[11], w[15]));
+
+ // Init for accumulate
+ dr = dg = db = da = 0;
+}
+
+STAGE(bicubic_n3x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<-3>(ctx, &r); }
+STAGE(bicubic_n1x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<-1>(ctx, &r); }
+STAGE(bicubic_p1x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<+1>(ctx, &r); }
+STAGE(bicubic_p3x, SkRasterPipeline_SamplerCtx* ctx) { bicubic_x<+3>(ctx, &r); }
+
+STAGE(bicubic_n3y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<-3>(ctx, &g); }
+STAGE(bicubic_n1y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<-1>(ctx, &g); }
+STAGE(bicubic_p1y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<+1>(ctx, &g); }
+STAGE(bicubic_p3y, SkRasterPipeline_SamplerCtx* ctx) { bicubic_y<+3>(ctx, &g); }
+
+STAGE(mipmap_linear_init, SkRasterPipeline_MipmapCtx* ctx) {
+ sk_unaligned_store(ctx->x, r);
+ sk_unaligned_store(ctx->y, g);
+}
+
+STAGE(mipmap_linear_update, SkRasterPipeline_MipmapCtx* ctx) {
+ sk_unaligned_store(ctx->r, r);
+ sk_unaligned_store(ctx->g, g);
+ sk_unaligned_store(ctx->b, b);
+ sk_unaligned_store(ctx->a, a);
+
+ r = sk_unaligned_load<F>(ctx->x) * ctx->scaleX;
+ g = sk_unaligned_load<F>(ctx->y) * ctx->scaleY;
+}
+
+STAGE(mipmap_linear_finish, SkRasterPipeline_MipmapCtx* ctx) {
+ r = lerp(sk_unaligned_load<F>(ctx->r), r, ctx->lowerWeight);
+ g = lerp(sk_unaligned_load<F>(ctx->g), g, ctx->lowerWeight);
+ b = lerp(sk_unaligned_load<F>(ctx->b), b, ctx->lowerWeight);
+ a = lerp(sk_unaligned_load<F>(ctx->a), a, ctx->lowerWeight);
+}
+
+STAGE(callback, SkRasterPipeline_CallbackCtx* c) {
+ store4(c->rgba,0, r,g,b,a);
+ c->fn(c, tail ? tail : N);
+ load4(c->read_from,0, &r,&g,&b,&a);
+}
+
+// All control flow stages used by SkSL maintain some state in the common registers:
+// dr: condition mask
+// dg: loop mask
+// db: return mask
+// da: execution mask (intersection of all three masks)
+// After updating dr/dg/db, you must invoke update_execution_mask().
+#define execution_mask() sk_bit_cast<I32>(da)
+#define update_execution_mask() da = sk_bit_cast<F>(sk_bit_cast<I32>(dr) & \
+ sk_bit_cast<I32>(dg) & \
+ sk_bit_cast<I32>(db))
+
+STAGE_TAIL(init_lane_masks, NoCtx) {
+ uint32_t iota[] = {0,1,2,3,4,5,6,7};
+ I32 mask = tail ? cond_to_mask(sk_unaligned_load<U32>(iota) < tail) : I32(~0);
+ dr = dg = db = da = sk_bit_cast<F>(mask);
+}
+
+STAGE_TAIL(load_condition_mask, F* ctx) {
+ dr = sk_unaligned_load<F>(ctx);
+ update_execution_mask();
+}
+
+STAGE_TAIL(store_condition_mask, F* ctx) {
+ sk_unaligned_store(ctx, dr);
+}
+
+STAGE_TAIL(merge_condition_mask, I32* ptr) {
+ // Set the condition-mask to the intersection of two adjacent masks at the pointer.
+ dr = sk_bit_cast<F>(ptr[0] & ptr[1]);
+ update_execution_mask();
+}
+
+STAGE_TAIL(load_loop_mask, F* ctx) {
+ dg = sk_unaligned_load<F>(ctx);
+ update_execution_mask();
+}
+
+STAGE_TAIL(store_loop_mask, F* ctx) {
+ sk_unaligned_store(ctx, dg);
+}
+
+STAGE_TAIL(mask_off_loop_mask, NoCtx) {
+ // We encountered a break statement. If a lane was active, it should be masked off now, and stay
+ // masked-off until the termination of the loop.
+ dg = sk_bit_cast<F>(sk_bit_cast<I32>(dg) & ~execution_mask());
+ update_execution_mask();
+}
+
+STAGE_TAIL(reenable_loop_mask, I32* ptr) {
+ // Set the loop-mask to the union of the current loop-mask with the mask at the pointer.
+ dg = sk_bit_cast<F>(sk_bit_cast<I32>(dg) | ptr[0]);
+ update_execution_mask();
+}
+
+STAGE_TAIL(merge_loop_mask, I32* ptr) {
+ // Set the loop-mask to the intersection of the current loop-mask with the mask at the pointer.
+ // (Note: this behavior subtly differs from merge_condition_mask!)
+ dg = sk_bit_cast<F>(sk_bit_cast<I32>(dg) & ptr[0]);
+ update_execution_mask();
+}
+
+STAGE_TAIL(case_op, SkRasterPipeline_CaseOpCtx* ctx) {
+ // Check each lane to see if the case value matches the expectation.
+ I32* actualValue = (I32*)ctx->ptr;
+ I32 caseMatches = cond_to_mask(*actualValue == ctx->expectedValue);
+
+ // In lanes where we found a match, enable the loop mask...
+ dg = sk_bit_cast<F>(sk_bit_cast<I32>(dg) | caseMatches);
+ update_execution_mask();
+
+ // ... and clear the default-case mask.
+ I32* defaultMask = actualValue + 1;
+ *defaultMask &= ~caseMatches;
+}
+
+STAGE_TAIL(load_return_mask, F* ctx) {
+ db = sk_unaligned_load<F>(ctx);
+ update_execution_mask();
+}
+
+STAGE_TAIL(store_return_mask, F* ctx) {
+ sk_unaligned_store(ctx, db);
+}
+
+STAGE_TAIL(mask_off_return_mask, NoCtx) {
+ // We encountered a return statement. If a lane was active, it should be masked off now, and
+ // stay masked-off until the end of the function.
+ db = sk_bit_cast<F>(sk_bit_cast<I32>(db) & ~execution_mask());
+ update_execution_mask();
+}
+
+STAGE_BRANCH(branch_if_all_lanes_active, SkRasterPipeline_BranchCtx* ctx) {
+ if (tail) {
+ uint32_t iota[] = {0,1,2,3,4,5,6,7};
+ I32 tailLanes = cond_to_mask(tail <= sk_unaligned_load<U32>(iota));
+ return all(execution_mask() | tailLanes) ? ctx->offset : 1;
+ } else {
+ return all(execution_mask()) ? ctx->offset : 1;
+ }
+}
+
+STAGE_BRANCH(branch_if_any_lanes_active, SkRasterPipeline_BranchCtx* ctx) {
+ return any(execution_mask()) ? ctx->offset : 1;
+}
+
+STAGE_BRANCH(branch_if_no_lanes_active, SkRasterPipeline_BranchCtx* ctx) {
+ return any(execution_mask()) ? 1 : ctx->offset;
+}
+
+STAGE_BRANCH(jump, SkRasterPipeline_BranchCtx* ctx) {
+ return ctx->offset;
+}
+
+STAGE_BRANCH(branch_if_no_active_lanes_eq, SkRasterPipeline_BranchIfEqualCtx* ctx) {
+ // Compare each lane against the expected value...
+ I32 match = cond_to_mask(*(I32*)ctx->ptr == ctx->value);
+ // ... but mask off lanes that aren't executing.
+ match &= execution_mask();
+ // If any lanes matched, don't take the branch.
+ return any(match) ? 1 : ctx->offset;
+}
+
+STAGE_TAIL(zero_slot_unmasked, F* dst) {
+ // We don't even bother masking off the tail; we're filling slots, not the destination surface.
+ sk_bzero(dst, sizeof(F) * 1);
+}
+STAGE_TAIL(zero_2_slots_unmasked, F* dst) {
+ sk_bzero(dst, sizeof(F) * 2);
+}
+STAGE_TAIL(zero_3_slots_unmasked, F* dst) {
+ sk_bzero(dst, sizeof(F) * 3);
+}
+STAGE_TAIL(zero_4_slots_unmasked, F* dst) {
+ sk_bzero(dst, sizeof(F) * 4);
+}
+
+STAGE_TAIL(copy_constant, SkRasterPipeline_BinaryOpCtx* ctx) {
+ const float* src = ctx->src;
+ F* dst = (F*)ctx->dst;
+ dst[0] = src[0];
+}
+STAGE_TAIL(copy_2_constants, SkRasterPipeline_BinaryOpCtx* ctx) {
+ const float* src = ctx->src;
+ F* dst = (F*)ctx->dst;
+ dst[0] = src[0];
+ dst[1] = src[1];
+}
+STAGE_TAIL(copy_3_constants, SkRasterPipeline_BinaryOpCtx* ctx) {
+ const float* src = ctx->src;
+ F* dst = (F*)ctx->dst;
+ dst[0] = src[0];
+ dst[1] = src[1];
+ dst[2] = src[2];
+}
+STAGE_TAIL(copy_4_constants, SkRasterPipeline_BinaryOpCtx* ctx) {
+ const float* src = ctx->src;
+ F* dst = (F*)ctx->dst;
+ dst[0] = src[0];
+ dst[1] = src[1];
+ dst[2] = src[2];
+ dst[3] = src[3];
+}
+
+STAGE_TAIL(copy_slot_unmasked, SkRasterPipeline_BinaryOpCtx* ctx) {
+ // We don't even bother masking off the tail; we're filling slots, not the destination surface.
+ memcpy(ctx->dst, ctx->src, sizeof(F) * 1);
+}
+STAGE_TAIL(copy_2_slots_unmasked, SkRasterPipeline_BinaryOpCtx* ctx) {
+ memcpy(ctx->dst, ctx->src, sizeof(F) * 2);
+}
+STAGE_TAIL(copy_3_slots_unmasked, SkRasterPipeline_BinaryOpCtx* ctx) {
+ memcpy(ctx->dst, ctx->src, sizeof(F) * 3);
+}
+STAGE_TAIL(copy_4_slots_unmasked, SkRasterPipeline_BinaryOpCtx* ctx) {
+ memcpy(ctx->dst, ctx->src, sizeof(F) * 4);
+}
+
+template <int NumSlots>
+SI void copy_n_slots_masked_fn(SkRasterPipeline_BinaryOpCtx* ctx, I32 mask) {
+ if (any(mask)) {
+ // Get pointers to our slots.
+ F* dst = (F*)ctx->dst;
+ F* src = (F*)ctx->src;
+
+ // Mask off and copy slots.
+ for (int count = 0; count < NumSlots; ++count) {
+ *dst = if_then_else(mask, *src, *dst);
+ dst += 1;
+ src += 1;
+ }
+ }
+}
+
+STAGE_TAIL(copy_slot_masked, SkRasterPipeline_BinaryOpCtx* ctx) {
+ copy_n_slots_masked_fn<1>(ctx, execution_mask());
+}
+STAGE_TAIL(copy_2_slots_masked, SkRasterPipeline_BinaryOpCtx* ctx) {
+ copy_n_slots_masked_fn<2>(ctx, execution_mask());
+}
+STAGE_TAIL(copy_3_slots_masked, SkRasterPipeline_BinaryOpCtx* ctx) {
+ copy_n_slots_masked_fn<3>(ctx, execution_mask());
+}
+STAGE_TAIL(copy_4_slots_masked, SkRasterPipeline_BinaryOpCtx* ctx) {
+ copy_n_slots_masked_fn<4>(ctx, execution_mask());
+}
+
+template <int LoopCount>
+SI void shuffle_fn(F* dst, uint16_t* offsets, int numSlots) {
+ F scratch[16];
+ std::byte* src = (std::byte*)dst;
+ for (int count = 0; count < LoopCount; ++count) {
+ scratch[count] = *(F*)(src + offsets[count]);
+ }
+ // Surprisingly, this switch generates significantly better code than a memcpy (on x86-64) when
+ // the number of slots is unknown at compile time, and generates roughly identical code when the
+ // number of slots is hardcoded. Using a switch allows `scratch` to live in ymm0-ymm15 instead
+ // of being written out to the stack and then read back in. Also, the intrinsic memcpy assumes
+ // that `numSlots` could be arbitrarily large, and so it emits more code than we need.
+ switch (numSlots) {
+ case 16: dst[15] = scratch[15]; [[fallthrough]];
+ case 15: dst[14] = scratch[14]; [[fallthrough]];
+ case 14: dst[13] = scratch[13]; [[fallthrough]];
+ case 13: dst[12] = scratch[12]; [[fallthrough]];
+ case 12: dst[11] = scratch[11]; [[fallthrough]];
+ case 11: dst[10] = scratch[10]; [[fallthrough]];
+ case 10: dst[ 9] = scratch[ 9]; [[fallthrough]];
+ case 9: dst[ 8] = scratch[ 8]; [[fallthrough]];
+ case 8: dst[ 7] = scratch[ 7]; [[fallthrough]];
+ case 7: dst[ 6] = scratch[ 6]; [[fallthrough]];
+ case 6: dst[ 5] = scratch[ 5]; [[fallthrough]];
+ case 5: dst[ 4] = scratch[ 4]; [[fallthrough]];
+ case 4: dst[ 3] = scratch[ 3]; [[fallthrough]];
+ case 3: dst[ 2] = scratch[ 2]; [[fallthrough]];
+ case 2: dst[ 1] = scratch[ 1]; [[fallthrough]];
+ case 1: dst[ 0] = scratch[ 0];
+ }
+}
+
+STAGE_TAIL(swizzle_1, SkRasterPipeline_SwizzleCtx* ctx) {
+ shuffle_fn<1>((F*)ctx->ptr, ctx->offsets, 1);
+}
+STAGE_TAIL(swizzle_2, SkRasterPipeline_SwizzleCtx* ctx) {
+ shuffle_fn<2>((F*)ctx->ptr, ctx->offsets, 2);
+}
+STAGE_TAIL(swizzle_3, SkRasterPipeline_SwizzleCtx* ctx) {
+ shuffle_fn<3>((F*)ctx->ptr, ctx->offsets, 3);
+}
+STAGE_TAIL(swizzle_4, SkRasterPipeline_SwizzleCtx* ctx) {
+ shuffle_fn<4>((F*)ctx->ptr, ctx->offsets, 4);
+}
+STAGE_TAIL(shuffle, SkRasterPipeline_ShuffleCtx* ctx) {
+ shuffle_fn<16>((F*)ctx->ptr, ctx->offsets, ctx->count);
+}
+
+template <int NumSlots>
+SI void swizzle_copy_masked_fn(F* dst, const F* src, uint16_t* offsets, I32 mask) {
+ std::byte* dstB = (std::byte*)dst;
+ for (int count = 0; count < NumSlots; ++count) {
+ F* dstS = (F*)(dstB + *offsets);
+ *dstS = if_then_else(mask, *src, *dstS);
+ offsets += 1;
+ src += 1;
+ }
+}
+
+STAGE_TAIL(swizzle_copy_slot_masked, SkRasterPipeline_SwizzleCopyCtx* ctx) {
+ swizzle_copy_masked_fn<1>((F*)ctx->dst, (F*)ctx->src, ctx->offsets, execution_mask());
+}
+STAGE_TAIL(swizzle_copy_2_slots_masked, SkRasterPipeline_SwizzleCopyCtx* ctx) {
+ swizzle_copy_masked_fn<2>((F*)ctx->dst, (F*)ctx->src, ctx->offsets, execution_mask());
+}
+STAGE_TAIL(swizzle_copy_3_slots_masked, SkRasterPipeline_SwizzleCopyCtx* ctx) {
+ swizzle_copy_masked_fn<3>((F*)ctx->dst, (F*)ctx->src, ctx->offsets, execution_mask());
+}
+STAGE_TAIL(swizzle_copy_4_slots_masked, SkRasterPipeline_SwizzleCopyCtx* ctx) {
+ swizzle_copy_masked_fn<4>((F*)ctx->dst, (F*)ctx->src, ctx->offsets, execution_mask());
+}
+
+STAGE_TAIL(copy_from_indirect_unmasked, SkRasterPipeline_CopyIndirectCtx* ctx) {
+ // Clamp the indirect offsets to stay within the limit.
+ U32 offsets = *(U32*)ctx->indirectOffset;
+ offsets = min(offsets, ctx->indirectLimit);
+
+ // Scale up the offsets to account for the N lanes per value.
+ offsets *= N;
+
+ // Adjust the offsets forward so that they fetch from the correct lane.
+ static constexpr uint32_t iota[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+ offsets += sk_unaligned_load<U32>(iota);
+
+ // Use gather to perform indirect lookups; write the results into `dst`.
+ const float* src = ctx->src;
+ F* dst = (F*)ctx->dst;
+ F* end = dst + ctx->slots;
+ do {
+ *dst = gather(src, offsets);
+ dst += 1;
+ src += N;
+ } while (dst != end);
+}
+
+STAGE_TAIL(copy_from_indirect_uniform_unmasked, SkRasterPipeline_CopyIndirectCtx* ctx) {
+ // Clamp the indirect offsets to stay within the limit.
+ U32 offsets = *(U32*)ctx->indirectOffset;
+ offsets = min(offsets, ctx->indirectLimit);
+
+ // Use gather to perform indirect lookups; write the results into `dst`.
+ const float* src = ctx->src;
+ F* dst = (F*)ctx->dst;
+ F* end = dst + ctx->slots;
+ do {
+ *dst = gather(src, offsets);
+ dst += 1;
+ src += 1;
+ } while (dst != end);
+}
+
+STAGE_TAIL(copy_to_indirect_masked, SkRasterPipeline_CopyIndirectCtx* ctx) {
+ // Clamp the indirect offsets to stay within the limit.
+ U32 offsets = *(U32*)ctx->indirectOffset;
+ offsets = min(offsets, ctx->indirectLimit);
+
+ // Scale up the offsets to account for the N lanes per value.
+ offsets *= N;
+
+ // Adjust the offsets forward so that they store into the correct lane.
+ static constexpr uint32_t iota[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+ offsets += sk_unaligned_load<U32>(iota);
+
+ // Perform indirect, masked writes into `dst`.
+ const F* src = (F*)ctx->src;
+ const F* end = src + ctx->slots;
+ float* dst = ctx->dst;
+ I32 mask = execution_mask();
+ do {
+ scatter_masked(*src, dst, offsets, mask);
+ dst += N;
+ src += 1;
+ } while (src != end);
+}
+
+STAGE_TAIL(swizzle_copy_to_indirect_masked, SkRasterPipeline_SwizzleCopyIndirectCtx* ctx) {
+ // Clamp the indirect offsets to stay within the limit.
+ U32 offsets = *(U32*)ctx->indirectOffset;
+ offsets = min(offsets, ctx->indirectLimit);
+
+ // Scale up the offsets to account for the N lanes per value.
+ offsets *= N;
+
+ // Adjust the offsets forward so that they store into the correct lane.
+ static constexpr uint32_t iota[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+ offsets += sk_unaligned_load<U32>(iota);
+
+ // Perform indirect, masked, swizzled writes into `dst`.
+ const F* src = (F*)ctx->src;
+ const F* end = src + ctx->slots;
+ std::byte* dstB = (std::byte*)ctx->dst;
+ const uint16_t* swizzle = ctx->offsets;
+ I32 mask = execution_mask();
+ do {
+ float* dst = (float*)(dstB + *swizzle);
+ scatter_masked(*src, dst, offsets, mask);
+ swizzle += 1;
+ src += 1;
+ } while (src != end);
+}
+
+// Unary operations take a single input, and overwrite it with their output.
+// Unlike binary or ternary operations, we provide variations of 1-4 slots, but don't provide
+// an arbitrary-width "n-slot" variation; the Builder can chain together longer sequences manually.
+template <typename T, void (*ApplyFn)(T*)>
+SI void apply_adjacent_unary(T* dst, T* end) {
+ do {
+ ApplyFn(dst);
+ dst += 1;
+ } while (dst != end);
+}
+
+SI void bitwise_not_fn(I32* dst) {
+ *dst = ~*dst;
+}
+
+#if defined(JUMPER_IS_SCALAR)
+template <typename T>
+SI void cast_to_float_from_fn(T* dst) {
+ *dst = sk_bit_cast<T>((F)*dst);
+}
+SI void cast_to_int_from_fn(F* dst) {
+ *dst = sk_bit_cast<F>((I32)*dst);
+}
+SI void cast_to_uint_from_fn(F* dst) {
+ *dst = sk_bit_cast<F>((U32)*dst);
+}
+#else
+template <typename T>
+SI void cast_to_float_from_fn(T* dst) {
+ *dst = sk_bit_cast<T>(SK_CONVERTVECTOR(*dst, F));
+}
+SI void cast_to_int_from_fn(F* dst) {
+ *dst = sk_bit_cast<F>(SK_CONVERTVECTOR(*dst, I32));
+}
+SI void cast_to_uint_from_fn(F* dst) {
+ *dst = sk_bit_cast<F>(SK_CONVERTVECTOR(*dst, U32));
+}
+#endif
+
+template <typename T>
+SI void abs_fn(T* dst) {
+ *dst = abs_(*dst);
+}
+
+SI void floor_fn(F* dst) {
+ *dst = floor_(*dst);
+}
+
+SI void ceil_fn(F* dst) {
+ *dst = ceil_(*dst);
+}
+
+SI void invsqrt_fn(F* dst) {
+ *dst = rsqrt(*dst);
+}
+
+#define DECLARE_UNARY_FLOAT(name) \
+ STAGE_TAIL(name##_float, F* dst) { apply_adjacent_unary<F, &name##_fn>(dst, dst + 1); } \
+ STAGE_TAIL(name##_2_floats, F* dst) { apply_adjacent_unary<F, &name##_fn>(dst, dst + 2); } \
+ STAGE_TAIL(name##_3_floats, F* dst) { apply_adjacent_unary<F, &name##_fn>(dst, dst + 3); } \
+ STAGE_TAIL(name##_4_floats, F* dst) { apply_adjacent_unary<F, &name##_fn>(dst, dst + 4); }
+
+#define DECLARE_UNARY_INT(name) \
+ STAGE_TAIL(name##_int, I32* dst) { apply_adjacent_unary<I32, &name##_fn>(dst, dst + 1); } \
+ STAGE_TAIL(name##_2_ints, I32* dst) { apply_adjacent_unary<I32, &name##_fn>(dst, dst + 2); } \
+ STAGE_TAIL(name##_3_ints, I32* dst) { apply_adjacent_unary<I32, &name##_fn>(dst, dst + 3); } \
+ STAGE_TAIL(name##_4_ints, I32* dst) { apply_adjacent_unary<I32, &name##_fn>(dst, dst + 4); }
+
+#define DECLARE_UNARY_UINT(name) \
+ STAGE_TAIL(name##_uint, U32* dst) { apply_adjacent_unary<U32, &name##_fn>(dst, dst + 1); } \
+ STAGE_TAIL(name##_2_uints, U32* dst) { apply_adjacent_unary<U32, &name##_fn>(dst, dst + 2); } \
+ STAGE_TAIL(name##_3_uints, U32* dst) { apply_adjacent_unary<U32, &name##_fn>(dst, dst + 3); } \
+ STAGE_TAIL(name##_4_uints, U32* dst) { apply_adjacent_unary<U32, &name##_fn>(dst, dst + 4); }
+
+DECLARE_UNARY_INT(bitwise_not)
+DECLARE_UNARY_INT(cast_to_float_from) DECLARE_UNARY_UINT(cast_to_float_from)
+DECLARE_UNARY_FLOAT(cast_to_int_from)
+DECLARE_UNARY_FLOAT(cast_to_uint_from)
+DECLARE_UNARY_FLOAT(abs) DECLARE_UNARY_INT(abs)
+DECLARE_UNARY_FLOAT(floor)
+DECLARE_UNARY_FLOAT(ceil)
+DECLARE_UNARY_FLOAT(invsqrt)
+
+#undef DECLARE_UNARY_FLOAT
+#undef DECLARE_UNARY_INT
+#undef DECLARE_UNARY_UINT
+
+// For complex unary ops, we only provide a 1-slot version to reduce code bloat.
+STAGE_TAIL(sin_float, F* dst) { *dst = sin_(*dst); }
+STAGE_TAIL(cos_float, F* dst) { *dst = cos_(*dst); }
+STAGE_TAIL(tan_float, F* dst) { *dst = tan_(*dst); }
+STAGE_TAIL(asin_float, F* dst) { *dst = asin_(*dst); }
+STAGE_TAIL(acos_float, F* dst) { *dst = acos_(*dst); }
+STAGE_TAIL(atan_float, F* dst) { *dst = atan_(*dst); }
+STAGE_TAIL(sqrt_float, F* dst) { *dst = sqrt_(*dst); }
+STAGE_TAIL(exp_float, F* dst) { *dst = approx_exp(*dst); }
+STAGE_TAIL(exp2_float, F* dst) { *dst = approx_pow2(*dst); }
+STAGE_TAIL(log_float, F* dst) { *dst = approx_log(*dst); }
+STAGE_TAIL(log2_float, F* dst) { *dst = approx_log2(*dst); }
+
+STAGE_TAIL(inverse_mat2, F* dst) {
+ F a00 = dst[0], a01 = dst[1],
+ a10 = dst[2], a11 = dst[3];
+ F det = mad(a00, a11, -a01 * a10),
+ invdet = rcp_precise(det);
+ dst[0] = invdet * a11;
+ dst[1] = -invdet * a01;
+ dst[2] = -invdet * a10;
+ dst[3] = invdet * a00;
+}
+
+STAGE_TAIL(inverse_mat3, F* dst) {
+ F a00 = dst[0], a01 = dst[1], a02 = dst[2],
+ a10 = dst[3], a11 = dst[4], a12 = dst[5],
+ a20 = dst[6], a21 = dst[7], a22 = dst[8];
+ F b01 = mad(a22, a11, -a12 * a21),
+ b11 = mad(a12, a20, -a22 * a10),
+ b21 = mad(a21, a10, -a11 * a20);
+ F det = mad(a00, b01, mad(a01, b11, a02 * b21)),
+ invdet = rcp_precise(det);
+ dst[0] = invdet * b01;
+ dst[1] = invdet * mad(a02, a21, -a22 * a01);
+ dst[2] = invdet * mad(a12, a01, -a02 * a11);
+ dst[3] = invdet * b11;
+ dst[4] = invdet * mad(a22, a00, -a02 * a20);
+ dst[5] = invdet * mad(a02, a10, -a12 * a00);
+ dst[6] = invdet * b21;
+ dst[7] = invdet * mad(a01, a20, -a21 * a00);
+ dst[8] = invdet * mad(a11, a00, -a01 * a10);
+}
+
+STAGE_TAIL(inverse_mat4, F* dst) {
+ F a00 = dst[0], a01 = dst[1], a02 = dst[2], a03 = dst[3],
+ a10 = dst[4], a11 = dst[5], a12 = dst[6], a13 = dst[7],
+ a20 = dst[8], a21 = dst[9], a22 = dst[10], a23 = dst[11],
+ a30 = dst[12], a31 = dst[13], a32 = dst[14], a33 = dst[15];
+ F b00 = mad(a00, a11, -a01 * a10),
+ b01 = mad(a00, a12, -a02 * a10),
+ b02 = mad(a00, a13, -a03 * a10),
+ b03 = mad(a01, a12, -a02 * a11),
+ b04 = mad(a01, a13, -a03 * a11),
+ b05 = mad(a02, a13, -a03 * a12),
+ b06 = mad(a20, a31, -a21 * a30),
+ b07 = mad(a20, a32, -a22 * a30),
+ b08 = mad(a20, a33, -a23 * a30),
+ b09 = mad(a21, a32, -a22 * a31),
+ b10 = mad(a21, a33, -a23 * a31),
+ b11 = mad(a22, a33, -a23 * a32),
+ det = mad(b00, b11, b05 * b06) + mad(b02, b09, b03 * b08) - mad(b01, b10, b04 * b07),
+ invdet = rcp_precise(det);
+ b00 *= invdet;
+ b01 *= invdet;
+ b02 *= invdet;
+ b03 *= invdet;
+ b04 *= invdet;
+ b05 *= invdet;
+ b06 *= invdet;
+ b07 *= invdet;
+ b08 *= invdet;
+ b09 *= invdet;
+ b10 *= invdet;
+ b11 *= invdet;
+ dst[0] = mad(a11, b11, a13*b09) - a12*b10;
+ dst[1] = a02*b10 - mad(a01, b11, a03*b09);
+ dst[2] = mad(a31, b05, a33*b03) - a32*b04;
+ dst[3] = a22*b04 - mad(a21, b05, a23*b03);
+ dst[4] = a12*b08 - mad(a10, b11, a13*b07);
+ dst[5] = mad(a00, b11, a03*b07) - a02*b08;
+ dst[6] = a32*b02 - mad(a30, b05, a33*b01);
+ dst[7] = mad(a20, b05, a23*b01) - a22*b02;
+ dst[8] = mad(a10, b10, a13*b06) - a11*b08;
+ dst[9] = a01*b08 - mad(a00, b10, a03*b06);
+ dst[10] = mad(a30, b04, a33*b00) - a31*b02;
+ dst[11] = a21*b02 - mad(a20, b04, a23*b00);
+ dst[12] = a11*b07 - mad(a10, b09, a12*b06);
+ dst[13] = mad(a00, b09, a02*b06) - a01*b07;
+ dst[14] = a31*b01 - mad(a30, b03, a32*b00);
+ dst[15] = mad(a20, b03, a22*b00) - a21*b01;
+}
+
+// Binary operations take two adjacent inputs, and write their output in the first position.
+template <typename T, void (*ApplyFn)(T*, T*)>
+SI void apply_adjacent_binary(T* dst, T* src) {
+ T* end = src;
+ do {
+ ApplyFn(dst, src);
+ dst += 1;
+ src += 1;
+ } while (dst != end);
+}
+
+template <typename T>
+SI void add_fn(T* dst, T* src) {
+ *dst += *src;
+}
+
+template <typename T>
+SI void sub_fn(T* dst, T* src) {
+ *dst -= *src;
+}
+
+template <typename T>
+SI void mul_fn(T* dst, T* src) {
+ *dst *= *src;
+}
+
+template <typename T>
+SI void div_fn(T* dst, T* src) {
+ T divisor = *src;
+ if constexpr (!std::is_same_v<T, F>) {
+ // We will crash if we integer-divide against zero. Convert 0 to ~0 to avoid this.
+ divisor |= sk_bit_cast<T>(cond_to_mask(divisor == 0));
+ }
+ *dst /= divisor;
+}
+
+SI void bitwise_and_fn(I32* dst, I32* src) {
+ *dst &= *src;
+}
+
+SI void bitwise_or_fn(I32* dst, I32* src) {
+ *dst |= *src;
+}
+
+SI void bitwise_xor_fn(I32* dst, I32* src) {
+ *dst ^= *src;
+}
+
+template <typename T>
+SI void max_fn(T* dst, T* src) {
+ *dst = max(*dst, *src);
+}
+
+template <typename T>
+SI void min_fn(T* dst, T* src) {
+ *dst = min(*dst, *src);
+}
+
+template <typename T>
+SI void cmplt_fn(T* dst, T* src) {
+ static_assert(sizeof(T) == sizeof(I32));
+ I32 result = cond_to_mask(*dst < *src);
+ memcpy(dst, &result, sizeof(I32));
+}
+
+template <typename T>
+SI void cmple_fn(T* dst, T* src) {
+ static_assert(sizeof(T) == sizeof(I32));
+ I32 result = cond_to_mask(*dst <= *src);
+ memcpy(dst, &result, sizeof(I32));
+}
+
+template <typename T>
+SI void cmpeq_fn(T* dst, T* src) {
+ static_assert(sizeof(T) == sizeof(I32));
+ I32 result = cond_to_mask(*dst == *src);
+ memcpy(dst, &result, sizeof(I32));
+}
+
+template <typename T>
+SI void cmpne_fn(T* dst, T* src) {
+ static_assert(sizeof(T) == sizeof(I32));
+ I32 result = cond_to_mask(*dst != *src);
+ memcpy(dst, &result, sizeof(I32));
+}
+
+SI void atan2_fn(F* dst, F* src) {
+ *dst = atan2_(*dst, *src);
+}
+
+SI void pow_fn(F* dst, F* src) {
+ *dst = approx_powf(*dst, *src);
+}
+
+SI void mod_fn(F* dst, F* src) {
+ *dst = *dst - *src * floor_(*dst / *src);
+}
+
+#define DECLARE_N_WAY_BINARY_FLOAT(name) \
+ STAGE_TAIL(name##_n_floats, SkRasterPipeline_BinaryOpCtx* ctx) { \
+ apply_adjacent_binary<F, &name##_fn>((F*)ctx->dst, (F*)ctx->src); \
+ }
+
+#define DECLARE_BINARY_FLOAT(name) \
+ STAGE_TAIL(name##_float, F* dst) { apply_adjacent_binary<F, &name##_fn>(dst, dst + 1); } \
+ STAGE_TAIL(name##_2_floats, F* dst) { apply_adjacent_binary<F, &name##_fn>(dst, dst + 2); } \
+ STAGE_TAIL(name##_3_floats, F* dst) { apply_adjacent_binary<F, &name##_fn>(dst, dst + 3); } \
+ STAGE_TAIL(name##_4_floats, F* dst) { apply_adjacent_binary<F, &name##_fn>(dst, dst + 4); } \
+ DECLARE_N_WAY_BINARY_FLOAT(name)
+
+#define DECLARE_N_WAY_BINARY_INT(name) \
+ STAGE_TAIL(name##_n_ints, SkRasterPipeline_BinaryOpCtx* ctx) { \
+ apply_adjacent_binary<I32, &name##_fn>((I32*)ctx->dst, (I32*)ctx->src); \
+ }
+
+#define DECLARE_BINARY_INT(name) \
+ STAGE_TAIL(name##_int, I32* dst) { apply_adjacent_binary<I32, &name##_fn>(dst, dst + 1); } \
+ STAGE_TAIL(name##_2_ints, I32* dst) { apply_adjacent_binary<I32, &name##_fn>(dst, dst + 2); } \
+ STAGE_TAIL(name##_3_ints, I32* dst) { apply_adjacent_binary<I32, &name##_fn>(dst, dst + 3); } \
+ STAGE_TAIL(name##_4_ints, I32* dst) { apply_adjacent_binary<I32, &name##_fn>(dst, dst + 4); } \
+ DECLARE_N_WAY_BINARY_INT(name)
+
+#define DECLARE_N_WAY_BINARY_UINT(name) \
+ STAGE_TAIL(name##_n_uints, SkRasterPipeline_BinaryOpCtx* ctx) { \
+ apply_adjacent_binary<U32, &name##_fn>((U32*)ctx->dst, (U32*)ctx->src); \
+ }
+
+#define DECLARE_BINARY_UINT(name) \
+ STAGE_TAIL(name##_uint, U32* dst) { apply_adjacent_binary<U32, &name##_fn>(dst, dst + 1); } \
+ STAGE_TAIL(name##_2_uints, U32* dst) { apply_adjacent_binary<U32, &name##_fn>(dst, dst + 2); } \
+ STAGE_TAIL(name##_3_uints, U32* dst) { apply_adjacent_binary<U32, &name##_fn>(dst, dst + 3); } \
+ STAGE_TAIL(name##_4_uints, U32* dst) { apply_adjacent_binary<U32, &name##_fn>(dst, dst + 4); } \
+ DECLARE_N_WAY_BINARY_UINT(name)
+
+// Many ops reuse the int stages when performing uint arithmetic, since they're equivalent on a
+// two's-complement machine. (Even multiplication is equivalent in the lower 32 bits.)
+DECLARE_BINARY_FLOAT(add) DECLARE_BINARY_INT(add)
+DECLARE_BINARY_FLOAT(sub) DECLARE_BINARY_INT(sub)
+DECLARE_BINARY_FLOAT(mul) DECLARE_BINARY_INT(mul)
+DECLARE_BINARY_FLOAT(div) DECLARE_BINARY_INT(div) DECLARE_BINARY_UINT(div)
+ DECLARE_BINARY_INT(bitwise_and)
+ DECLARE_BINARY_INT(bitwise_or)
+ DECLARE_BINARY_INT(bitwise_xor)
+DECLARE_BINARY_FLOAT(mod)
+DECLARE_BINARY_FLOAT(min) DECLARE_BINARY_INT(min) DECLARE_BINARY_UINT(min)
+DECLARE_BINARY_FLOAT(max) DECLARE_BINARY_INT(max) DECLARE_BINARY_UINT(max)
+DECLARE_BINARY_FLOAT(cmplt) DECLARE_BINARY_INT(cmplt) DECLARE_BINARY_UINT(cmplt)
+DECLARE_BINARY_FLOAT(cmple) DECLARE_BINARY_INT(cmple) DECLARE_BINARY_UINT(cmple)
+DECLARE_BINARY_FLOAT(cmpeq) DECLARE_BINARY_INT(cmpeq)
+DECLARE_BINARY_FLOAT(cmpne) DECLARE_BINARY_INT(cmpne)
+
+// Sufficiently complex ops only provide an N-way version, to avoid code bloat from the dedicated
+// 1-4 slot versions.
+DECLARE_N_WAY_BINARY_FLOAT(atan2)
+DECLARE_N_WAY_BINARY_FLOAT(pow)
+
+#undef DECLARE_BINARY_FLOAT
+#undef DECLARE_BINARY_INT
+#undef DECLARE_BINARY_UINT
+#undef DECLARE_N_WAY_BINARY_FLOAT
+#undef DECLARE_N_WAY_BINARY_INT
+#undef DECLARE_N_WAY_BINARY_UINT
+
+// Dots can be represented with multiply and add ops, but they are so foundational that it's worth
+// having dedicated ops.
+STAGE_TAIL(dot_2_floats, F* dst) {
+ dst[0] = mad(dst[0], dst[2],
+ dst[1] * dst[3]);
+}
+
+STAGE_TAIL(dot_3_floats, F* dst) {
+ dst[0] = mad(dst[0], dst[3],
+ mad(dst[1], dst[4],
+ dst[2] * dst[5]));
+}
+
+STAGE_TAIL(dot_4_floats, F* dst) {
+ dst[0] = mad(dst[0], dst[4],
+ mad(dst[1], dst[5],
+ mad(dst[2], dst[6],
+ dst[3] * dst[7])));
+}
+
+// Refract always operates on 4-wide incident and normal vectors; for narrower inputs, the code
+// generator fills in the input columns with zero, and discards the extra output columns.
+STAGE_TAIL(refract_4_floats, F* dst) {
+ // Algorithm adapted from https://registry.khronos.org/OpenGL-Refpages/gl4/html/refract.xhtml
+ F *incident = dst + 0;
+ F *normal = dst + 4;
+ F eta = dst[8];
+
+ F dotNI = mad(normal[0], incident[0],
+ mad(normal[1], incident[1],
+ mad(normal[2], incident[2],
+ normal[3] * incident[3])));
+
+ F k = 1.0 - eta * eta * (1.0 - dotNI * dotNI);
+ F sqrt_k = sqrt_(k);
+
+ for (int idx = 0; idx < 4; ++idx) {
+ dst[idx] = if_then_else(k >= 0,
+ eta * incident[idx] - (eta * dotNI + sqrt_k) * normal[idx],
+ F(0));
+ }
+}
+
+// Ternary operations work like binary ops (see immediately above) but take two source inputs.
+template <typename T, void (*ApplyFn)(T*, T*, T*)>
+SI void apply_adjacent_ternary(T* dst, T* src0, T* src1) {
+ T* end = src0;
+ do {
+ ApplyFn(dst, src0, src1);
+ dst += 1;
+ src0 += 1;
+ src1 += 1;
+ } while (dst != end);
+}
+
+SI void mix_fn(F* a, F* x, F* y) {
+ // We reorder the arguments here to match lerp's GLSL-style order (interpolation point last).
+ *a = lerp(*x, *y, *a);
+}
+
+SI void mix_fn(I32* a, I32* x, I32* y) {
+ // We reorder the arguments here to match if_then_else's expected order (y before x).
+ *a = if_then_else(*a, *y, *x);
+}
+
+SI void smoothstep_fn(F* edge0, F* edge1, F* x) {
+ F t = clamp_01_((*x - *edge0) / (*edge1 - *edge0));
+ *edge0 = t * t * (3.0 - 2.0 * t);
+}
+
+#define DECLARE_N_WAY_TERNARY_FLOAT(name) \
+ STAGE_TAIL(name##_n_floats, SkRasterPipeline_TernaryOpCtx* ctx) { \
+ apply_adjacent_ternary<F, &name##_fn>((F*)ctx->dst, (F*)ctx->src0, (F*)ctx->src1); \
+ }
+
+#define DECLARE_TERNARY_FLOAT(name) \
+ STAGE_TAIL(name##_float, F* p) { apply_adjacent_ternary<F, &name##_fn>(p, p+1, p+2); } \
+ STAGE_TAIL(name##_2_floats, F* p) { apply_adjacent_ternary<F, &name##_fn>(p, p+2, p+4); } \
+ STAGE_TAIL(name##_3_floats, F* p) { apply_adjacent_ternary<F, &name##_fn>(p, p+3, p+6); } \
+ STAGE_TAIL(name##_4_floats, F* p) { apply_adjacent_ternary<F, &name##_fn>(p, p+4, p+8); } \
+ DECLARE_N_WAY_TERNARY_FLOAT(name)
+
+#define DECLARE_TERNARY_INT(name) \
+ STAGE_TAIL(name##_int, I32* p) { apply_adjacent_ternary<I32, &name##_fn>(p, p+1, p+2); } \
+ STAGE_TAIL(name##_2_ints, I32* p) { apply_adjacent_ternary<I32, &name##_fn>(p, p+2, p+4); } \
+ STAGE_TAIL(name##_3_ints, I32* p) { apply_adjacent_ternary<I32, &name##_fn>(p, p+3, p+6); } \
+ STAGE_TAIL(name##_4_ints, I32* p) { apply_adjacent_ternary<I32, &name##_fn>(p, p+4, p+8); } \
+ STAGE_TAIL(name##_n_ints, SkRasterPipeline_TernaryOpCtx* ctx) { \
+ apply_adjacent_ternary<I32, &name##_fn>((I32*)ctx->dst, (I32*)ctx->src0, (I32*)ctx->src1); \
+ }
+
+DECLARE_N_WAY_TERNARY_FLOAT(smoothstep)
+DECLARE_TERNARY_FLOAT(mix)
+DECLARE_TERNARY_INT(mix)
+
+#undef DECLARE_N_WAY_TERNARY_FLOAT
+#undef DECLARE_TERNARY_FLOAT
+#undef DECLARE_TERNARY_INT
+
+STAGE(gauss_a_to_rgba, NoCtx) {
+ // x = 1 - x;
+ // exp(-x * x * 4) - 0.018f;
+ // ... now approximate with quartic
+ //
+ const float c4 = -2.26661229133605957031f;
+ const float c3 = 2.89795351028442382812f;
+ const float c2 = 0.21345567703247070312f;
+ const float c1 = 0.15489584207534790039f;
+ const float c0 = 0.00030726194381713867f;
+ a = mad(a, mad(a, mad(a, mad(a, c4, c3), c2), c1), c0);
+ r = a;
+ g = a;
+ b = a;
+}
+
+// A specialized fused image shader for clamp-x, clamp-y, non-sRGB sampling.
+STAGE(bilerp_clamp_8888, const SkRasterPipeline_GatherCtx* ctx) {
+ // (cx,cy) are the center of our sample.
+ F cx = r,
+ cy = g;
+
+ // All sample points are at the same fractional offset (fx,fy).
+ // They're the 4 corners of a logical 1x1 pixel surrounding (x,y) at (0.5,0.5) offsets.
+ F fx = fract(cx + 0.5f),
+ fy = fract(cy + 0.5f);
+
+ // We'll accumulate the color of all four samples into {r,g,b,a} directly.
+ r = g = b = a = 0;
+
+ for (float py = -0.5f; py <= +0.5f; py += 1.0f)
+ for (float px = -0.5f; px <= +0.5f; px += 1.0f) {
+ // (x,y) are the coordinates of this sample point.
+ F x = cx + px,
+ y = cy + py;
+
+ // ix_and_ptr() will clamp to the image's bounds for us.
+ const uint32_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, x,y);
+
+ F sr,sg,sb,sa;
+ from_8888(gather(ptr, ix), &sr,&sg,&sb,&sa);
+
+ // In bilinear interpolation, the 4 pixels at +/- 0.5 offsets from the sample pixel center
+ // are combined in direct proportion to their area overlapping that logical query pixel.
+ // At positive offsets, the x-axis contribution to that rectangle is fx,
+ // or (1-fx) at negative x. Same deal for y.
+ F sx = (px > 0) ? fx : 1.0f - fx,
+ sy = (py > 0) ? fy : 1.0f - fy,
+ area = sx * sy;
+
+ r += sr * area;
+ g += sg * area;
+ b += sb * area;
+ a += sa * area;
+ }
+}
+
+// A specialized fused image shader for clamp-x, clamp-y, non-sRGB sampling.
+STAGE(bicubic_clamp_8888, const SkRasterPipeline_GatherCtx* ctx) {
+ // (cx,cy) are the center of our sample.
+ F cx = r,
+ cy = g;
+
+ // All sample points are at the same fractional offset (fx,fy).
+ // They're the 4 corners of a logical 1x1 pixel surrounding (x,y) at (0.5,0.5) offsets.
+ F fx = fract(cx + 0.5f),
+ fy = fract(cy + 0.5f);
+
+ // We'll accumulate the color of all four samples into {r,g,b,a} directly.
+ r = g = b = a = 0;
+
+ const float* w = ctx->weights;
+ const F scaley[4] = {bicubic_wts(fy, w[0], w[4], w[ 8], w[12]),
+ bicubic_wts(fy, w[1], w[5], w[ 9], w[13]),
+ bicubic_wts(fy, w[2], w[6], w[10], w[14]),
+ bicubic_wts(fy, w[3], w[7], w[11], w[15])};
+ const F scalex[4] = {bicubic_wts(fx, w[0], w[4], w[ 8], w[12]),
+ bicubic_wts(fx, w[1], w[5], w[ 9], w[13]),
+ bicubic_wts(fx, w[2], w[6], w[10], w[14]),
+ bicubic_wts(fx, w[3], w[7], w[11], w[15])};
+
+ F sample_y = cy - 1.5f;
+ for (int yy = 0; yy <= 3; ++yy) {
+ F sample_x = cx - 1.5f;
+ for (int xx = 0; xx <= 3; ++xx) {
+ F scale = scalex[xx] * scaley[yy];
+
+ // ix_and_ptr() will clamp to the image's bounds for us.
+ const uint32_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, sample_x, sample_y);
+
+ F sr,sg,sb,sa;
+ from_8888(gather(ptr, ix), &sr,&sg,&sb,&sa);
+
+ r = mad(scale, sr, r);
+ g = mad(scale, sg, g);
+ b = mad(scale, sb, b);
+ a = mad(scale, sa, a);
+
+ sample_x += 1;
+ }
+ sample_y += 1;
+ }
+}
+
+// ~~~~~~ skgpu::Swizzle stage ~~~~~~ //
+
+STAGE(swizzle, void* ctx) {
+ auto ir = r, ig = g, ib = b, ia = a;
+ F* o[] = {&r, &g, &b, &a};
+ char swiz[4];
+ memcpy(swiz, &ctx, sizeof(swiz));
+
+ for (int i = 0; i < 4; ++i) {
+ switch (swiz[i]) {
+ case 'r': *o[i] = ir; break;
+ case 'g': *o[i] = ig; break;
+ case 'b': *o[i] = ib; break;
+ case 'a': *o[i] = ia; break;
+ case '0': *o[i] = F(0); break;
+ case '1': *o[i] = F(1); break;
+ default: break;
+ }
+ }
+}
+
+namespace lowp {
+#if defined(JUMPER_IS_SCALAR) || defined(SK_DISABLE_LOWP_RASTER_PIPELINE)
+ // If we're not compiled by Clang, or otherwise switched into scalar mode (old Clang, manually),
+ // we don't generate lowp stages. All these nullptrs will tell SkJumper.cpp to always use the
+ // highp float pipeline.
+ #define M(st) static void (*st)(void) = nullptr;
+ SK_RASTER_PIPELINE_OPS_LOWP(M)
+ #undef M
+ static void (*just_return)(void) = nullptr;
+
+ static void start_pipeline(size_t,size_t,size_t,size_t, SkRasterPipelineStage*) {}
+
+#else // We are compiling vector code with Clang... let's make some lowp stages!
+
+#if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
+ using U8 = SK_VECTORTYPE(uint8_t, 16);
+ using U16 = SK_VECTORTYPE(uint16_t, 16);
+ using I16 = SK_VECTORTYPE(int16_t, 16);
+ using I32 = SK_VECTORTYPE(int32_t, 16);
+ using U32 = SK_VECTORTYPE(uint32_t, 16);
+ using I64 = SK_VECTORTYPE(int64_t, 16);
+ using U64 = SK_VECTORTYPE(uint64_t, 16);
+ using F = SK_VECTORTYPE(float, 16);
+#else
+ using U8 = SK_VECTORTYPE(uint8_t, 8);
+ using U16 = SK_VECTORTYPE(uint16_t, 8);
+ using I16 = SK_VECTORTYPE(int16_t, 8);
+ using I32 = SK_VECTORTYPE(int32_t, 8);
+ using U32 = SK_VECTORTYPE(uint32_t, 8);
+ using I64 = SK_VECTORTYPE(int64_t, 8);
+ using U64 = SK_VECTORTYPE(uint64_t, 8);
+ using F = SK_VECTORTYPE(float, 8);
+#endif
+
+static constexpr size_t N = sizeof(U16) / sizeof(uint16_t);
+
+// Once again, some platforms benefit from a restricted Stage calling convention,
+// but others can pass tons and tons of registers and we're happy to exploit that.
+// It's exactly the same decision and implementation strategy as the F stages above.
+#if JUMPER_NARROW_STAGES
+ struct Params {
+ size_t dx, dy, tail;
+ U16 dr,dg,db,da;
+ };
+ using Stage = void (ABI*)(Params*, SkRasterPipelineStage* program, U16 r, U16 g, U16 b, U16 a);
+#else
+ using Stage = void (ABI*)(size_t tail, SkRasterPipelineStage* program,
+ size_t dx, size_t dy,
+ U16 r, U16 g, U16 b, U16 a,
+ U16 dr, U16 dg, U16 db, U16 da);
+#endif
+
+static void start_pipeline(const size_t x0, const size_t y0,
+ const size_t xlimit, const size_t ylimit,
+ SkRasterPipelineStage* program) {
+ auto start = (Stage)program->fn;
+ for (size_t dy = y0; dy < ylimit; dy++) {
+ #if JUMPER_NARROW_STAGES
+ Params params = { x0,dy,0, 0,0,0,0 };
+ for (; params.dx + N <= xlimit; params.dx += N) {
+ start(&params, program, 0,0,0,0);
+ }
+ if (size_t tail = xlimit - params.dx) {
+ params.tail = tail;
+ start(&params, program, 0,0,0,0);
+ }
+ #else
+ size_t dx = x0;
+ for (; dx + N <= xlimit; dx += N) {
+ start( 0, program, dx,dy, 0,0,0,0, 0,0,0,0);
+ }
+ if (size_t tail = xlimit - dx) {
+ start(tail, program, dx,dy, 0,0,0,0, 0,0,0,0);
+ }
+ #endif
+ }
+}
+
+#if JUMPER_NARROW_STAGES
+ static void ABI just_return(Params*, SkRasterPipelineStage*, U16,U16,U16,U16) {}
+#else
+ static void ABI just_return(size_t, SkRasterPipelineStage*,size_t,size_t,
+ U16,U16,U16,U16, U16,U16,U16,U16) {}
+#endif
+
+// All stages use the same function call ABI to chain into each other, but there are three types:
+// GG: geometry in, geometry out -- think, a matrix
+// GP: geometry in, pixels out. -- think, a memory gather
+// PP: pixels in, pixels out. -- think, a blend mode
+//
+// (Some stages ignore their inputs or produce no logical output. That's perfectly fine.)
+//
+// These three STAGE_ macros let you define each type of stage,
+// and will have (x,y) geometry and/or (r,g,b,a, dr,dg,db,da) pixel arguments as appropriate.
+
+#if JUMPER_NARROW_STAGES
+ #define STAGE_GG(name, ARG) \
+ SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, F& x, F& y); \
+ static void ABI name(Params* params, SkRasterPipelineStage* program, \
+ U16 r, U16 g, U16 b, U16 a) { \
+ auto x = join<F>(r,g), \
+ y = join<F>(b,a); \
+ name##_k(Ctx{program}, params->dx,params->dy,params->tail, x,y); \
+ split(x, &r,&g); \
+ split(y, &b,&a); \
+ auto fn = (Stage)(++program)->fn; \
+ fn(params, program, r,g,b,a); \
+ } \
+ SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, F& x, F& y)
+
+ #define STAGE_GP(name, ARG) \
+ SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, F x, F y, \
+ U16& r, U16& g, U16& b, U16& a, \
+ U16& dr, U16& dg, U16& db, U16& da); \
+ static void ABI name(Params* params, SkRasterPipelineStage* program, \
+ U16 r, U16 g, U16 b, U16 a) { \
+ auto x = join<F>(r,g), \
+ y = join<F>(b,a); \
+ name##_k(Ctx{program}, params->dx,params->dy,params->tail, x,y, r,g,b,a, \
+ params->dr,params->dg,params->db,params->da); \
+ auto fn = (Stage)(++program)->fn; \
+ fn(params, program, r,g,b,a); \
+ } \
+ SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, F x, F y, \
+ U16& r, U16& g, U16& b, U16& a, \
+ U16& dr, U16& dg, U16& db, U16& da)
+
+ #define STAGE_PP(name, ARG) \
+ SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, \
+ U16& r, U16& g, U16& b, U16& a, \
+ U16& dr, U16& dg, U16& db, U16& da); \
+ static void ABI name(Params* params, SkRasterPipelineStage* program, \
+ U16 r, U16 g, U16 b, U16 a) { \
+ name##_k(Ctx{program}, params->dx,params->dy,params->tail, r,g,b,a, \
+ params->dr,params->dg,params->db,params->da); \
+ auto fn = (Stage)(++program)->fn; \
+ fn(params, program, r,g,b,a); \
+ } \
+ SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, \
+ U16& r, U16& g, U16& b, U16& a, \
+ U16& dr, U16& dg, U16& db, U16& da)
+#else
+ #define STAGE_GG(name, ARG) \
+ SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, F& x, F& y); \
+ static void ABI name(size_t tail, SkRasterPipelineStage* program, \
+ size_t dx, size_t dy, \
+ U16 r, U16 g, U16 b, U16 a, \
+ U16 dr, U16 dg, U16 db, U16 da) { \
+ auto x = join<F>(r,g), \
+ y = join<F>(b,a); \
+ name##_k(Ctx{program}, dx,dy,tail, x,y); \
+ split(x, &r,&g); \
+ split(y, &b,&a); \
+ auto fn = (Stage)(++program)->fn; \
+ fn(tail, program, dx,dy, r,g,b,a, dr,dg,db,da); \
+ } \
+ SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, F& x, F& y)
+
+ #define STAGE_GP(name, ARG) \
+ SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, F x, F y, \
+ U16& r, U16& g, U16& b, U16& a, \
+ U16& dr, U16& dg, U16& db, U16& da); \
+ static void ABI name(size_t tail, SkRasterPipelineStage* program, \
+ size_t dx, size_t dy, \
+ U16 r, U16 g, U16 b, U16 a, \
+ U16 dr, U16 dg, U16 db, U16 da) { \
+ auto x = join<F>(r,g), \
+ y = join<F>(b,a); \
+ name##_k(Ctx{program}, dx,dy,tail, x,y, r,g,b,a, dr,dg,db,da); \
+ auto fn = (Stage)(++program)->fn; \
+ fn(tail, program, dx,dy, r,g,b,a, dr,dg,db,da); \
+ } \
+ SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, F x, F y, \
+ U16& r, U16& g, U16& b, U16& a, \
+ U16& dr, U16& dg, U16& db, U16& da)
+
+ #define STAGE_PP(name, ARG) \
+ SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, \
+ U16& r, U16& g, U16& b, U16& a, \
+ U16& dr, U16& dg, U16& db, U16& da); \
+ static void ABI name(size_t tail, SkRasterPipelineStage* program, \
+ size_t dx, size_t dy, \
+ U16 r, U16 g, U16 b, U16 a, \
+ U16 dr, U16 dg, U16 db, U16 da) { \
+ name##_k(Ctx{program}, dx,dy,tail, r,g,b,a, dr,dg,db,da); \
+ auto fn = (Stage)(++program)->fn; \
+ fn(tail, program, dx,dy, r,g,b,a, dr,dg,db,da); \
+ } \
+ SI void name##_k(ARG, size_t dx, size_t dy, size_t tail, \
+ U16& r, U16& g, U16& b, U16& a, \
+ U16& dr, U16& dg, U16& db, U16& da)
+#endif
+
+// ~~~~~~ Commonly used helper functions ~~~~~~ //
+
+/**
+ * Helpers to to properly rounded division (by 255). The ideal answer we want to compute is slow,
+ * thanks to a division by a non-power of two:
+ * [1] (v + 127) / 255
+ *
+ * There is a two-step process that computes the correct answer for all inputs:
+ * [2] (v + 128 + ((v + 128) >> 8)) >> 8
+ *
+ * There is also a single iteration approximation, but it's wrong (+-1) ~25% of the time:
+ * [3] (v + 255) >> 8;
+ *
+ * We offer two different implementations here, depending on the requirements of the calling stage.
+ */
+
+/**
+ * div255 favors speed over accuracy. It uses formula [2] on NEON (where we can compute it as fast
+ * as [3]), and uses [3] elsewhere.
+ */
+SI U16 div255(U16 v) {
+#if defined(JUMPER_IS_NEON)
+ // With NEON we can compute [2] just as fast as [3], so let's be correct.
+ // First we compute v + ((v+128)>>8), then one more round of (...+128)>>8 to finish up:
+ return vrshrq_n_u16(vrsraq_n_u16(v, v, 8), 8);
+#else
+ // Otherwise, use [3], which is never wrong by more than 1:
+ return (v+255)/256;
+#endif
+}
+
+/**
+ * div255_accurate guarantees the right answer on all platforms, at the expense of performance.
+ */
+SI U16 div255_accurate(U16 v) {
+#if defined(JUMPER_IS_NEON)
+ // Our NEON implementation of div255 is already correct for all inputs:
+ return div255(v);
+#else
+ // This is [2] (the same formulation as NEON), but written without the benefit of intrinsics:
+ v += 128;
+ return (v+(v/256))/256;
+#endif
+}
+
+SI U16 inv(U16 v) { return 255-v; }
+
+SI U16 if_then_else(I16 c, U16 t, U16 e) { return (t & sk_bit_cast<U16>(c)) | (e & ~sk_bit_cast<U16>(c)); }
+SI U32 if_then_else(I32 c, U32 t, U32 e) { return (t & sk_bit_cast<U32>(c)) | (e & ~sk_bit_cast<U32>(c)); }
+
+SI U16 max(U16 x, U16 y) { return if_then_else(x < y, y, x); }
+SI U16 min(U16 x, U16 y) { return if_then_else(x < y, x, y); }
+
+SI U16 from_float(float f) { return f * 255.0f + 0.5f; }
+
+SI U16 lerp(U16 from, U16 to, U16 t) { return div255( from*inv(t) + to*t ); }
+
+template <typename D, typename S>
+SI D convert(S src) {
+ return SK_CONVERTVECTOR(src, D);
+}
+
+#define cast convert
+
+template <typename D, typename S>
+SI void split(S v, D* lo, D* hi) {
+ static_assert(2*sizeof(D) == sizeof(S), "");
+ memcpy(lo, (const char*)&v + 0*sizeof(D), sizeof(D));
+ memcpy(hi, (const char*)&v + 1*sizeof(D), sizeof(D));
+}
+template <typename D, typename S>
+SI D join(S lo, S hi) {
+ static_assert(sizeof(D) == 2*sizeof(S), "");
+ D v;
+ memcpy((char*)&v + 0*sizeof(S), &lo, sizeof(S));
+ memcpy((char*)&v + 1*sizeof(S), &hi, sizeof(S));
+ return v;
+}
+
+SI F if_then_else(I32 c, F t, F e) {
+ return sk_bit_cast<F>( (sk_bit_cast<I32>(t) & c) | (sk_bit_cast<I32>(e) & ~c) );
+}
+SI F max(F x, F y) { return if_then_else(x < y, y, x); }
+SI F min(F x, F y) { return if_then_else(x < y, x, y); }
+
+SI I32 if_then_else(I32 c, I32 t, I32 e) {
+ return (t & c) | (e & ~c);
+}
+SI I32 max(I32 x, I32 y) { return if_then_else(x < y, y, x); }
+SI I32 min(I32 x, I32 y) { return if_then_else(x < y, x, y); }
+
+SI F mad(F f, F m, F a) { return f*m+a; }
+SI U32 trunc_(F x) { return cast<U32>(cast<I32>(x)); }
+
+// Use approximate instructions and one Newton-Raphson step to calculate 1/x.
+SI F rcp_precise(F x) {
+#if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
+ __m256 lo,hi;
+ split(x, &lo,&hi);
+ return join<F>(SK_OPTS_NS::rcp_precise(lo), SK_OPTS_NS::rcp_precise(hi));
+#elif defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
+ __m128 lo,hi;
+ split(x, &lo,&hi);
+ return join<F>(SK_OPTS_NS::rcp_precise(lo), SK_OPTS_NS::rcp_precise(hi));
+#elif defined(JUMPER_IS_NEON)
+ float32x4_t lo,hi;
+ split(x, &lo,&hi);
+ return join<F>(SK_OPTS_NS::rcp_precise(lo), SK_OPTS_NS::rcp_precise(hi));
+#else
+ return 1.0f / x;
+#endif
+}
+SI F sqrt_(F x) {
+#if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
+ __m256 lo,hi;
+ split(x, &lo,&hi);
+ return join<F>(_mm256_sqrt_ps(lo), _mm256_sqrt_ps(hi));
+#elif defined(JUMPER_IS_SSE2) || defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
+ __m128 lo,hi;
+ split(x, &lo,&hi);
+ return join<F>(_mm_sqrt_ps(lo), _mm_sqrt_ps(hi));
+#elif defined(SK_CPU_ARM64)
+ float32x4_t lo,hi;
+ split(x, &lo,&hi);
+ return join<F>(vsqrtq_f32(lo), vsqrtq_f32(hi));
+#elif defined(JUMPER_IS_NEON)
+ auto sqrt = [](float32x4_t v) {
+ auto est = vrsqrteq_f32(v); // Estimate and two refinement steps for est = rsqrt(v).
+ est *= vrsqrtsq_f32(v,est*est);
+ est *= vrsqrtsq_f32(v,est*est);
+ return v*est; // sqrt(v) == v*rsqrt(v).
+ };
+ float32x4_t lo,hi;
+ split(x, &lo,&hi);
+ return join<F>(sqrt(lo), sqrt(hi));
+#else
+ return F{
+ sqrtf(x[0]), sqrtf(x[1]), sqrtf(x[2]), sqrtf(x[3]),
+ sqrtf(x[4]), sqrtf(x[5]), sqrtf(x[6]), sqrtf(x[7]),
+ };
+#endif
+}
+
+SI F floor_(F x) {
+#if defined(SK_CPU_ARM64)
+ float32x4_t lo,hi;
+ split(x, &lo,&hi);
+ return join<F>(vrndmq_f32(lo), vrndmq_f32(hi));
+#elif defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
+ __m256 lo,hi;
+ split(x, &lo,&hi);
+ return join<F>(_mm256_floor_ps(lo), _mm256_floor_ps(hi));
+#elif defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
+ __m128 lo,hi;
+ split(x, &lo,&hi);
+ return join<F>(_mm_floor_ps(lo), _mm_floor_ps(hi));
+#else
+ F roundtrip = cast<F>(cast<I32>(x));
+ return roundtrip - if_then_else(roundtrip > x, F(1), F(0));
+#endif
+}
+
+// scaled_mult interprets a and b as number on [-1, 1) which are numbers in Q15 format. Functionally
+// this multiply is:
+// (2 * a * b + (1 << 15)) >> 16
+// The result is a number on [-1, 1).
+// Note: on neon this is a saturating multiply while the others are not.
+SI I16 scaled_mult(I16 a, I16 b) {
+#if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
+ return _mm256_mulhrs_epi16(a, b);
+#elif defined(JUMPER_IS_SSE41) || defined(JUMPER_IS_AVX)
+ return _mm_mulhrs_epi16(a, b);
+#elif defined(SK_CPU_ARM64)
+ return vqrdmulhq_s16(a, b);
+#elif defined(JUMPER_IS_NEON)
+ return vqrdmulhq_s16(a, b);
+#else
+ const I32 roundingTerm = 1 << 14;
+ return cast<I16>((cast<I32>(a) * cast<I32>(b) + roundingTerm) >> 15);
+#endif
+}
+
+// This sum is to support lerp where the result will always be a positive number. In general,
+// a sum like this would require an additional bit, but because we know the range of the result
+// we know that the extra bit will always be zero.
+SI U16 constrained_add(I16 a, U16 b) {
+ #if defined(SK_DEBUG)
+ for (size_t i = 0; i < N; i++) {
+ // Ensure that a + b is on the interval [0, UINT16_MAX]
+ int ia = a[i],
+ ib = b[i];
+ // Use 65535 here because fuchsia's compiler evaluates UINT16_MAX - ib, which is
+ // 65536U - ib, as an uint32_t instead of an int32_t. This was forcing ia to be
+ // interpreted as an uint32_t.
+ SkASSERT(-ib <= ia && ia <= 65535 - ib);
+ }
+ #endif
+ return b + cast<U16>(a);
+}
+
+SI F fract(F x) { return x - floor_(x); }
+SI F abs_(F x) { return sk_bit_cast<F>( sk_bit_cast<I32>(x) & 0x7fffffff ); }
+
+// ~~~~~~ Basic / misc. stages ~~~~~~ //
+
+STAGE_GG(seed_shader, NoCtx) {
+ static constexpr float iota[] = {
+ 0.5f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5f, 6.5f, 7.5f,
+ 8.5f, 9.5f,10.5f,11.5f,12.5f,13.5f,14.5f,15.5f,
+ };
+ x = cast<F>(I32(dx)) + sk_unaligned_load<F>(iota);
+ y = cast<F>(I32(dy)) + 0.5f;
+}
+
+STAGE_GG(matrix_translate, const float* m) {
+ x += m[0];
+ y += m[1];
+}
+STAGE_GG(matrix_scale_translate, const float* m) {
+ x = mad(x,m[0], m[2]);
+ y = mad(y,m[1], m[3]);
+}
+STAGE_GG(matrix_2x3, const float* m) {
+ auto X = mad(x,m[0], mad(y,m[1], m[2])),
+ Y = mad(x,m[3], mad(y,m[4], m[5]));
+ x = X;
+ y = Y;
+}
+STAGE_GG(matrix_perspective, const float* m) {
+ // N.B. Unlike the other matrix_ stages, this matrix is row-major.
+ auto X = mad(x,m[0], mad(y,m[1], m[2])),
+ Y = mad(x,m[3], mad(y,m[4], m[5])),
+ Z = mad(x,m[6], mad(y,m[7], m[8]));
+ x = X * rcp_precise(Z);
+ y = Y * rcp_precise(Z);
+}
+
+STAGE_PP(uniform_color, const SkRasterPipeline_UniformColorCtx* c) {
+ r = c->rgba[0];
+ g = c->rgba[1];
+ b = c->rgba[2];
+ a = c->rgba[3];
+}
+STAGE_PP(uniform_color_dst, const SkRasterPipeline_UniformColorCtx* c) {
+ dr = c->rgba[0];
+ dg = c->rgba[1];
+ db = c->rgba[2];
+ da = c->rgba[3];
+}
+STAGE_PP(black_color, NoCtx) { r = g = b = 0; a = 255; }
+STAGE_PP(white_color, NoCtx) { r = g = b = 255; a = 255; }
+
+STAGE_PP(set_rgb, const float rgb[3]) {
+ r = from_float(rgb[0]);
+ g = from_float(rgb[1]);
+ b = from_float(rgb[2]);
+}
+
+// No need to clamp against 0 here (values are unsigned)
+STAGE_PP(clamp_01, NoCtx) {
+ r = min(r, 255);
+ g = min(g, 255);
+ b = min(b, 255);
+ a = min(a, 255);
+}
+
+STAGE_PP(clamp_gamut, NoCtx) {
+ a = min(a, 255);
+ r = min(r, a);
+ g = min(g, a);
+ b = min(b, a);
+}
+
+STAGE_PP(premul, NoCtx) {
+ r = div255_accurate(r * a);
+ g = div255_accurate(g * a);
+ b = div255_accurate(b * a);
+}
+STAGE_PP(premul_dst, NoCtx) {
+ dr = div255_accurate(dr * da);
+ dg = div255_accurate(dg * da);
+ db = div255_accurate(db * da);
+}
+
+STAGE_PP(force_opaque , NoCtx) { a = 255; }
+STAGE_PP(force_opaque_dst, NoCtx) { da = 255; }
+
+STAGE_PP(swap_rb, NoCtx) {
+ auto tmp = r;
+ r = b;
+ b = tmp;
+}
+STAGE_PP(swap_rb_dst, NoCtx) {
+ auto tmp = dr;
+ dr = db;
+ db = tmp;
+}
+
+STAGE_PP(move_src_dst, NoCtx) {
+ dr = r;
+ dg = g;
+ db = b;
+ da = a;
+}
+
+STAGE_PP(move_dst_src, NoCtx) {
+ r = dr;
+ g = dg;
+ b = db;
+ a = da;
+}
+
+STAGE_PP(swap_src_dst, NoCtx) {
+ std::swap(r, dr);
+ std::swap(g, dg);
+ std::swap(b, db);
+ std::swap(a, da);
+}
+
+// ~~~~~~ Blend modes ~~~~~~ //
+
+// The same logic applied to all 4 channels.
+#define BLEND_MODE(name) \
+ SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da); \
+ STAGE_PP(name, NoCtx) { \
+ r = name##_channel(r,dr,a,da); \
+ g = name##_channel(g,dg,a,da); \
+ b = name##_channel(b,db,a,da); \
+ a = name##_channel(a,da,a,da); \
+ } \
+ SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da)
+
+ BLEND_MODE(clear) { return 0; }
+ BLEND_MODE(srcatop) { return div255( s*da + d*inv(sa) ); }
+ BLEND_MODE(dstatop) { return div255( d*sa + s*inv(da) ); }
+ BLEND_MODE(srcin) { return div255( s*da ); }
+ BLEND_MODE(dstin) { return div255( d*sa ); }
+ BLEND_MODE(srcout) { return div255( s*inv(da) ); }
+ BLEND_MODE(dstout) { return div255( d*inv(sa) ); }
+ BLEND_MODE(srcover) { return s + div255( d*inv(sa) ); }
+ BLEND_MODE(dstover) { return d + div255( s*inv(da) ); }
+ BLEND_MODE(modulate) { return div255( s*d ); }
+ BLEND_MODE(multiply) { return div255( s*inv(da) + d*inv(sa) + s*d ); }
+ BLEND_MODE(plus_) { return min(s+d, 255); }
+ BLEND_MODE(screen) { return s + d - div255( s*d ); }
+ BLEND_MODE(xor_) { return div255( s*inv(da) + d*inv(sa) ); }
+#undef BLEND_MODE
+
+// The same logic applied to color, and srcover for alpha.
+#define BLEND_MODE(name) \
+ SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da); \
+ STAGE_PP(name, NoCtx) { \
+ r = name##_channel(r,dr,a,da); \
+ g = name##_channel(g,dg,a,da); \
+ b = name##_channel(b,db,a,da); \
+ a = a + div255( da*inv(a) ); \
+ } \
+ SI U16 name##_channel(U16 s, U16 d, U16 sa, U16 da)
+
+ BLEND_MODE(darken) { return s + d - div255( max(s*da, d*sa) ); }
+ BLEND_MODE(lighten) { return s + d - div255( min(s*da, d*sa) ); }
+ BLEND_MODE(difference) { return s + d - 2*div255( min(s*da, d*sa) ); }
+ BLEND_MODE(exclusion) { return s + d - 2*div255( s*d ); }
+
+ BLEND_MODE(hardlight) {
+ return div255( s*inv(da) + d*inv(sa) +
+ if_then_else(2*s <= sa, 2*s*d, sa*da - 2*(sa-s)*(da-d)) );
+ }
+ BLEND_MODE(overlay) {
+ return div255( s*inv(da) + d*inv(sa) +
+ if_then_else(2*d <= da, 2*s*d, sa*da - 2*(sa-s)*(da-d)) );
+ }
+#undef BLEND_MODE
+
+// ~~~~~~ Helpers for interacting with memory ~~~~~~ //
+
+template <typename T>
+SI T* ptr_at_xy(const SkRasterPipeline_MemoryCtx* ctx, size_t dx, size_t dy) {
+ return (T*)ctx->pixels + dy*ctx->stride + dx;
+}
+
+template <typename T>
+SI U32 ix_and_ptr(T** ptr, const SkRasterPipeline_GatherCtx* ctx, F x, F y) {
+ // Exclusive -> inclusive.
+ const F w = sk_bit_cast<float>( sk_bit_cast<uint32_t>(ctx->width ) - 1),
+ h = sk_bit_cast<float>( sk_bit_cast<uint32_t>(ctx->height) - 1);
+
+ const F z = std::numeric_limits<float>::min();
+
+ x = min(max(z, x), w);
+ y = min(max(z, y), h);
+
+ x = sk_bit_cast<F>(sk_bit_cast<U32>(x) - (uint32_t)ctx->roundDownAtInteger);
+ y = sk_bit_cast<F>(sk_bit_cast<U32>(y) - (uint32_t)ctx->roundDownAtInteger);
+
+ *ptr = (const T*)ctx->pixels;
+ return trunc_(y)*ctx->stride + trunc_(x);
+}
+
+template <typename T>
+SI U32 ix_and_ptr(T** ptr, const SkRasterPipeline_GatherCtx* ctx, I32 x, I32 y) {
+ // This flag doesn't make sense when the coords are integers.
+ SkASSERT(ctx->roundDownAtInteger == 0);
+ // Exclusive -> inclusive.
+ const I32 w = ctx->width - 1,
+ h = ctx->height - 1;
+
+ U32 ax = cast<U32>(min(max(0, x), w)),
+ ay = cast<U32>(min(max(0, y), h));
+
+ *ptr = (const T*)ctx->pixels;
+ return ay * ctx->stride + ax;
+}
+
+template <typename V, typename T>
+SI V load(const T* ptr, size_t tail) {
+ V v = 0;
+ switch (tail & (N-1)) {
+ case 0: memcpy(&v, ptr, sizeof(v)); break;
+ #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
+ case 15: v[14] = ptr[14]; [[fallthrough]];
+ case 14: v[13] = ptr[13]; [[fallthrough]];
+ case 13: v[12] = ptr[12]; [[fallthrough]];
+ case 12: memcpy(&v, ptr, 12*sizeof(T)); break;
+ case 11: v[10] = ptr[10]; [[fallthrough]];
+ case 10: v[ 9] = ptr[ 9]; [[fallthrough]];
+ case 9: v[ 8] = ptr[ 8]; [[fallthrough]];
+ case 8: memcpy(&v, ptr, 8*sizeof(T)); break;
+ #endif
+ case 7: v[ 6] = ptr[ 6]; [[fallthrough]];
+ case 6: v[ 5] = ptr[ 5]; [[fallthrough]];
+ case 5: v[ 4] = ptr[ 4]; [[fallthrough]];
+ case 4: memcpy(&v, ptr, 4*sizeof(T)); break;
+ case 3: v[ 2] = ptr[ 2]; [[fallthrough]];
+ case 2: memcpy(&v, ptr, 2*sizeof(T)); break;
+ case 1: v[ 0] = ptr[ 0];
+ }
+ return v;
+}
+template <typename V, typename T>
+SI void store(T* ptr, size_t tail, V v) {
+ switch (tail & (N-1)) {
+ case 0: memcpy(ptr, &v, sizeof(v)); break;
+ #if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
+ case 15: ptr[14] = v[14]; [[fallthrough]];
+ case 14: ptr[13] = v[13]; [[fallthrough]];
+ case 13: ptr[12] = v[12]; [[fallthrough]];
+ case 12: memcpy(ptr, &v, 12*sizeof(T)); break;
+ case 11: ptr[10] = v[10]; [[fallthrough]];
+ case 10: ptr[ 9] = v[ 9]; [[fallthrough]];
+ case 9: ptr[ 8] = v[ 8]; [[fallthrough]];
+ case 8: memcpy(ptr, &v, 8*sizeof(T)); break;
+ #endif
+ case 7: ptr[ 6] = v[ 6]; [[fallthrough]];
+ case 6: ptr[ 5] = v[ 5]; [[fallthrough]];
+ case 5: ptr[ 4] = v[ 4]; [[fallthrough]];
+ case 4: memcpy(ptr, &v, 4*sizeof(T)); break;
+ case 3: ptr[ 2] = v[ 2]; [[fallthrough]];
+ case 2: memcpy(ptr, &v, 2*sizeof(T)); break;
+ case 1: ptr[ 0] = v[ 0];
+ }
+}
+
+#if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
+ template <typename V, typename T>
+ SI V gather(const T* ptr, U32 ix) {
+ return V{ ptr[ix[ 0]], ptr[ix[ 1]], ptr[ix[ 2]], ptr[ix[ 3]],
+ ptr[ix[ 4]], ptr[ix[ 5]], ptr[ix[ 6]], ptr[ix[ 7]],
+ ptr[ix[ 8]], ptr[ix[ 9]], ptr[ix[10]], ptr[ix[11]],
+ ptr[ix[12]], ptr[ix[13]], ptr[ix[14]], ptr[ix[15]], };
+ }
+
+ template<>
+ F gather(const float* ptr, U32 ix) {
+ __m256i lo, hi;
+ split(ix, &lo, &hi);
+
+ return join<F>(_mm256_i32gather_ps(ptr, lo, 4),
+ _mm256_i32gather_ps(ptr, hi, 4));
+ }
+
+ template<>
+ U32 gather(const uint32_t* ptr, U32 ix) {
+ __m256i lo, hi;
+ split(ix, &lo, &hi);
+
+ return join<U32>(_mm256_i32gather_epi32((const int*)ptr, lo, 4),
+ _mm256_i32gather_epi32((const int*)ptr, hi, 4));
+ }
+#else
+ template <typename V, typename T>
+ SI V gather(const T* ptr, U32 ix) {
+ return V{ ptr[ix[ 0]], ptr[ix[ 1]], ptr[ix[ 2]], ptr[ix[ 3]],
+ ptr[ix[ 4]], ptr[ix[ 5]], ptr[ix[ 6]], ptr[ix[ 7]], };
+ }
+#endif
+
+
+// ~~~~~~ 32-bit memory loads and stores ~~~~~~ //
+
+SI void from_8888(U32 rgba, U16* r, U16* g, U16* b, U16* a) {
+#if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
+ // Swap the middle 128-bit lanes to make _mm256_packus_epi32() in cast_U16() work out nicely.
+ __m256i _01,_23;
+ split(rgba, &_01, &_23);
+ __m256i _02 = _mm256_permute2x128_si256(_01,_23, 0x20),
+ _13 = _mm256_permute2x128_si256(_01,_23, 0x31);
+ rgba = join<U32>(_02, _13);
+
+ auto cast_U16 = [](U32 v) -> U16 {
+ __m256i _02,_13;
+ split(v, &_02,&_13);
+ return _mm256_packus_epi32(_02,_13);
+ };
+#else
+ auto cast_U16 = [](U32 v) -> U16 {
+ return cast<U16>(v);
+ };
+#endif
+ *r = cast_U16(rgba & 65535) & 255;
+ *g = cast_U16(rgba & 65535) >> 8;
+ *b = cast_U16(rgba >> 16) & 255;
+ *a = cast_U16(rgba >> 16) >> 8;
+}
+
+SI void load_8888_(const uint32_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
+#if 1 && defined(JUMPER_IS_NEON)
+ uint8x8x4_t rgba;
+ switch (tail & (N-1)) {
+ case 0: rgba = vld4_u8 ((const uint8_t*)(ptr+0) ); break;
+ case 7: rgba = vld4_lane_u8((const uint8_t*)(ptr+6), rgba, 6); [[fallthrough]];
+ case 6: rgba = vld4_lane_u8((const uint8_t*)(ptr+5), rgba, 5); [[fallthrough]];
+ case 5: rgba = vld4_lane_u8((const uint8_t*)(ptr+4), rgba, 4); [[fallthrough]];
+ case 4: rgba = vld4_lane_u8((const uint8_t*)(ptr+3), rgba, 3); [[fallthrough]];
+ case 3: rgba = vld4_lane_u8((const uint8_t*)(ptr+2), rgba, 2); [[fallthrough]];
+ case 2: rgba = vld4_lane_u8((const uint8_t*)(ptr+1), rgba, 1); [[fallthrough]];
+ case 1: rgba = vld4_lane_u8((const uint8_t*)(ptr+0), rgba, 0);
+ }
+ *r = cast<U16>(sk_bit_cast<U8>(rgba.val[0]));
+ *g = cast<U16>(sk_bit_cast<U8>(rgba.val[1]));
+ *b = cast<U16>(sk_bit_cast<U8>(rgba.val[2]));
+ *a = cast<U16>(sk_bit_cast<U8>(rgba.val[3]));
+#else
+ from_8888(load<U32>(ptr, tail), r,g,b,a);
+#endif
+}
+SI void store_8888_(uint32_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
+ r = min(r, 255);
+ g = min(g, 255);
+ b = min(b, 255);
+ a = min(a, 255);
+
+#if 1 && defined(JUMPER_IS_NEON)
+ uint8x8x4_t rgba = {{
+ cast<U8>(r),
+ cast<U8>(g),
+ cast<U8>(b),
+ cast<U8>(a),
+ }};
+ switch (tail & (N-1)) {
+ case 0: vst4_u8 ((uint8_t*)(ptr+0), rgba ); break;
+ case 7: vst4_lane_u8((uint8_t*)(ptr+6), rgba, 6); [[fallthrough]];
+ case 6: vst4_lane_u8((uint8_t*)(ptr+5), rgba, 5); [[fallthrough]];
+ case 5: vst4_lane_u8((uint8_t*)(ptr+4), rgba, 4); [[fallthrough]];
+ case 4: vst4_lane_u8((uint8_t*)(ptr+3), rgba, 3); [[fallthrough]];
+ case 3: vst4_lane_u8((uint8_t*)(ptr+2), rgba, 2); [[fallthrough]];
+ case 2: vst4_lane_u8((uint8_t*)(ptr+1), rgba, 1); [[fallthrough]];
+ case 1: vst4_lane_u8((uint8_t*)(ptr+0), rgba, 0);
+ }
+#else
+ store(ptr, tail, cast<U32>(r | (g<<8)) << 0
+ | cast<U32>(b | (a<<8)) << 16);
+#endif
+}
+
+STAGE_PP(load_8888, const SkRasterPipeline_MemoryCtx* ctx) {
+ load_8888_(ptr_at_xy<const uint32_t>(ctx, dx,dy), tail, &r,&g,&b,&a);
+}
+STAGE_PP(load_8888_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ load_8888_(ptr_at_xy<const uint32_t>(ctx, dx,dy), tail, &dr,&dg,&db,&da);
+}
+STAGE_PP(store_8888, const SkRasterPipeline_MemoryCtx* ctx) {
+ store_8888_(ptr_at_xy<uint32_t>(ctx, dx,dy), tail, r,g,b,a);
+}
+STAGE_GP(gather_8888, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint32_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, x,y);
+ from_8888(gather<U32>(ptr, ix), &r, &g, &b, &a);
+}
+
+// ~~~~~~ 16-bit memory loads and stores ~~~~~~ //
+
+SI void from_565(U16 rgb, U16* r, U16* g, U16* b) {
+ // Format for 565 buffers: 15|rrrrr gggggg bbbbb|0
+ U16 R = (rgb >> 11) & 31,
+ G = (rgb >> 5) & 63,
+ B = (rgb >> 0) & 31;
+
+ // These bit replications are the same as multiplying by 255/31 or 255/63 to scale to 8-bit.
+ *r = (R << 3) | (R >> 2);
+ *g = (G << 2) | (G >> 4);
+ *b = (B << 3) | (B >> 2);
+}
+SI void load_565_(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
+ from_565(load<U16>(ptr, tail), r,g,b);
+}
+SI void store_565_(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b) {
+ r = min(r, 255);
+ g = min(g, 255);
+ b = min(b, 255);
+
+ // Round from [0,255] to [0,31] or [0,63], as if x * (31/255.0f) + 0.5f.
+ // (Don't feel like you need to find some fundamental truth in these...
+ // they were brute-force searched.)
+ U16 R = (r * 9 + 36) / 74, // 9/74 ≈ 31/255, plus 36/74, about half.
+ G = (g * 21 + 42) / 85, // 21/85 = 63/255 exactly.
+ B = (b * 9 + 36) / 74;
+ // Pack them back into 15|rrrrr gggggg bbbbb|0.
+ store(ptr, tail, R << 11
+ | G << 5
+ | B << 0);
+}
+
+STAGE_PP(load_565, const SkRasterPipeline_MemoryCtx* ctx) {
+ load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &r,&g,&b);
+ a = 255;
+}
+STAGE_PP(load_565_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &dr,&dg,&db);
+ da = 255;
+}
+STAGE_PP(store_565, const SkRasterPipeline_MemoryCtx* ctx) {
+ store_565_(ptr_at_xy<uint16_t>(ctx, dx,dy), tail, r,g,b);
+}
+STAGE_GP(gather_565, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint16_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, x,y);
+ from_565(gather<U16>(ptr, ix), &r, &g, &b);
+ a = 255;
+}
+
+SI void from_4444(U16 rgba, U16* r, U16* g, U16* b, U16* a) {
+ // Format for 4444 buffers: 15|rrrr gggg bbbb aaaa|0.
+ U16 R = (rgba >> 12) & 15,
+ G = (rgba >> 8) & 15,
+ B = (rgba >> 4) & 15,
+ A = (rgba >> 0) & 15;
+
+ // Scale [0,15] to [0,255].
+ *r = (R << 4) | R;
+ *g = (G << 4) | G;
+ *b = (B << 4) | B;
+ *a = (A << 4) | A;
+}
+SI void load_4444_(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
+ from_4444(load<U16>(ptr, tail), r,g,b,a);
+}
+SI void store_4444_(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
+ r = min(r, 255);
+ g = min(g, 255);
+ b = min(b, 255);
+ a = min(a, 255);
+
+ // Round from [0,255] to [0,15], producing the same value as (x*(15/255.0f) + 0.5f).
+ U16 R = (r + 8) / 17,
+ G = (g + 8) / 17,
+ B = (b + 8) / 17,
+ A = (a + 8) / 17;
+ // Pack them back into 15|rrrr gggg bbbb aaaa|0.
+ store(ptr, tail, R << 12
+ | G << 8
+ | B << 4
+ | A << 0);
+}
+
+STAGE_PP(load_4444, const SkRasterPipeline_MemoryCtx* ctx) {
+ load_4444_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &r,&g,&b,&a);
+}
+STAGE_PP(load_4444_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ load_4444_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &dr,&dg,&db,&da);
+}
+STAGE_PP(store_4444, const SkRasterPipeline_MemoryCtx* ctx) {
+ store_4444_(ptr_at_xy<uint16_t>(ctx, dx,dy), tail, r,g,b,a);
+}
+STAGE_GP(gather_4444, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint16_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, x,y);
+ from_4444(gather<U16>(ptr, ix), &r,&g,&b,&a);
+}
+
+SI void from_88(U16 rg, U16* r, U16* g) {
+ *r = (rg & 0xFF);
+ *g = (rg >> 8);
+}
+
+SI void load_88_(const uint16_t* ptr, size_t tail, U16* r, U16* g) {
+#if 1 && defined(JUMPER_IS_NEON)
+ uint8x8x2_t rg;
+ switch (tail & (N-1)) {
+ case 0: rg = vld2_u8 ((const uint8_t*)(ptr+0) ); break;
+ case 7: rg = vld2_lane_u8((const uint8_t*)(ptr+6), rg, 6); [[fallthrough]];
+ case 6: rg = vld2_lane_u8((const uint8_t*)(ptr+5), rg, 5); [[fallthrough]];
+ case 5: rg = vld2_lane_u8((const uint8_t*)(ptr+4), rg, 4); [[fallthrough]];
+ case 4: rg = vld2_lane_u8((const uint8_t*)(ptr+3), rg, 3); [[fallthrough]];
+ case 3: rg = vld2_lane_u8((const uint8_t*)(ptr+2), rg, 2); [[fallthrough]];
+ case 2: rg = vld2_lane_u8((const uint8_t*)(ptr+1), rg, 1); [[fallthrough]];
+ case 1: rg = vld2_lane_u8((const uint8_t*)(ptr+0), rg, 0);
+ }
+ *r = cast<U16>(U8(rg.val[0]));
+ *g = cast<U16>(U8(rg.val[1]));
+#else
+ from_88(load<U16>(ptr, tail), r,g);
+#endif
+}
+
+SI void store_88_(uint16_t* ptr, size_t tail, U16 r, U16 g) {
+ r = min(r, 255);
+ g = min(g, 255);
+
+#if 1 && defined(JUMPER_IS_NEON)
+ uint8x8x2_t rg = {{
+ cast<U8>(r),
+ cast<U8>(g),
+ }};
+ switch (tail & (N-1)) {
+ case 0: vst2_u8 ((uint8_t*)(ptr+0), rg ); break;
+ case 7: vst2_lane_u8((uint8_t*)(ptr+6), rg, 6); [[fallthrough]];
+ case 6: vst2_lane_u8((uint8_t*)(ptr+5), rg, 5); [[fallthrough]];
+ case 5: vst2_lane_u8((uint8_t*)(ptr+4), rg, 4); [[fallthrough]];
+ case 4: vst2_lane_u8((uint8_t*)(ptr+3), rg, 3); [[fallthrough]];
+ case 3: vst2_lane_u8((uint8_t*)(ptr+2), rg, 2); [[fallthrough]];
+ case 2: vst2_lane_u8((uint8_t*)(ptr+1), rg, 1); [[fallthrough]];
+ case 1: vst2_lane_u8((uint8_t*)(ptr+0), rg, 0);
+ }
+#else
+ store(ptr, tail, cast<U16>(r | (g<<8)) << 0);
+#endif
+}
+
+STAGE_PP(load_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
+ load_88_(ptr_at_xy<const uint16_t>(ctx, dx, dy), tail, &r, &g);
+ b = 0;
+ a = 255;
+}
+STAGE_PP(load_rg88_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ load_88_(ptr_at_xy<const uint16_t>(ctx, dx, dy), tail, &dr, &dg);
+ db = 0;
+ da = 255;
+}
+STAGE_PP(store_rg88, const SkRasterPipeline_MemoryCtx* ctx) {
+ store_88_(ptr_at_xy<uint16_t>(ctx, dx, dy), tail, r, g);
+}
+STAGE_GP(gather_rg88, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint16_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, x, y);
+ from_88(gather<U16>(ptr, ix), &r, &g);
+ b = 0;
+ a = 255;
+}
+
+// ~~~~~~ 8-bit memory loads and stores ~~~~~~ //
+
+SI U16 load_8(const uint8_t* ptr, size_t tail) {
+ return cast<U16>(load<U8>(ptr, tail));
+}
+SI void store_8(uint8_t* ptr, size_t tail, U16 v) {
+ v = min(v, 255);
+ store(ptr, tail, cast<U8>(v));
+}
+
+STAGE_PP(load_a8, const SkRasterPipeline_MemoryCtx* ctx) {
+ r = g = b = 0;
+ a = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
+}
+STAGE_PP(load_a8_dst, const SkRasterPipeline_MemoryCtx* ctx) {
+ dr = dg = db = 0;
+ da = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
+}
+STAGE_PP(store_a8, const SkRasterPipeline_MemoryCtx* ctx) {
+ store_8(ptr_at_xy<uint8_t>(ctx, dx,dy), tail, a);
+}
+STAGE_GP(gather_a8, const SkRasterPipeline_GatherCtx* ctx) {
+ const uint8_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, x,y);
+ r = g = b = 0;
+ a = cast<U16>(gather<U8>(ptr, ix));
+}
+STAGE_PP(store_r8, const SkRasterPipeline_MemoryCtx* ctx) {
+ store_8(ptr_at_xy<uint8_t>(ctx, dx,dy), tail, r);
+}
+
+STAGE_PP(alpha_to_gray, NoCtx) {
+ r = g = b = a;
+ a = 255;
+}
+STAGE_PP(alpha_to_gray_dst, NoCtx) {
+ dr = dg = db = da;
+ da = 255;
+}
+STAGE_PP(alpha_to_red, NoCtx) {
+ r = a;
+ a = 255;
+}
+STAGE_PP(alpha_to_red_dst, NoCtx) {
+ dr = da;
+ da = 255;
+}
+
+STAGE_PP(bt709_luminance_or_luma_to_alpha, NoCtx) {
+ a = (r*54 + g*183 + b*19)/256; // 0.2126, 0.7152, 0.0722 with 256 denominator.
+ r = g = b = 0;
+}
+STAGE_PP(bt709_luminance_or_luma_to_rgb, NoCtx) {
+ r = g = b =(r*54 + g*183 + b*19)/256; // 0.2126, 0.7152, 0.0722 with 256 denominator.
+}
+
+// ~~~~~~ Coverage scales / lerps ~~~~~~ //
+
+STAGE_PP(load_src, const uint16_t* ptr) {
+ r = sk_unaligned_load<U16>(ptr + 0*N);
+ g = sk_unaligned_load<U16>(ptr + 1*N);
+ b = sk_unaligned_load<U16>(ptr + 2*N);
+ a = sk_unaligned_load<U16>(ptr + 3*N);
+}
+STAGE_PP(store_src, uint16_t* ptr) {
+ sk_unaligned_store(ptr + 0*N, r);
+ sk_unaligned_store(ptr + 1*N, g);
+ sk_unaligned_store(ptr + 2*N, b);
+ sk_unaligned_store(ptr + 3*N, a);
+}
+STAGE_PP(store_src_a, uint16_t* ptr) {
+ sk_unaligned_store(ptr, a);
+}
+STAGE_PP(load_dst, const uint16_t* ptr) {
+ dr = sk_unaligned_load<U16>(ptr + 0*N);
+ dg = sk_unaligned_load<U16>(ptr + 1*N);
+ db = sk_unaligned_load<U16>(ptr + 2*N);
+ da = sk_unaligned_load<U16>(ptr + 3*N);
+}
+STAGE_PP(store_dst, uint16_t* ptr) {
+ sk_unaligned_store(ptr + 0*N, dr);
+ sk_unaligned_store(ptr + 1*N, dg);
+ sk_unaligned_store(ptr + 2*N, db);
+ sk_unaligned_store(ptr + 3*N, da);
+}
+
+// ~~~~~~ Coverage scales / lerps ~~~~~~ //
+
+STAGE_PP(scale_1_float, const float* f) {
+ U16 c = from_float(*f);
+ r = div255( r * c );
+ g = div255( g * c );
+ b = div255( b * c );
+ a = div255( a * c );
+}
+STAGE_PP(lerp_1_float, const float* f) {
+ U16 c = from_float(*f);
+ r = lerp(dr, r, c);
+ g = lerp(dg, g, c);
+ b = lerp(db, b, c);
+ a = lerp(da, a, c);
+}
+STAGE_PP(scale_native, const uint16_t scales[]) {
+ auto c = sk_unaligned_load<U16>(scales);
+ r = div255( r * c );
+ g = div255( g * c );
+ b = div255( b * c );
+ a = div255( a * c );
+}
+
+STAGE_PP(lerp_native, const uint16_t scales[]) {
+ auto c = sk_unaligned_load<U16>(scales);
+ r = lerp(dr, r, c);
+ g = lerp(dg, g, c);
+ b = lerp(db, b, c);
+ a = lerp(da, a, c);
+}
+
+STAGE_PP(scale_u8, const SkRasterPipeline_MemoryCtx* ctx) {
+ U16 c = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
+ r = div255( r * c );
+ g = div255( g * c );
+ b = div255( b * c );
+ a = div255( a * c );
+}
+STAGE_PP(lerp_u8, const SkRasterPipeline_MemoryCtx* ctx) {
+ U16 c = load_8(ptr_at_xy<const uint8_t>(ctx, dx,dy), tail);
+ r = lerp(dr, r, c);
+ g = lerp(dg, g, c);
+ b = lerp(db, b, c);
+ a = lerp(da, a, c);
+}
+
+// Derive alpha's coverage from rgb coverage and the values of src and dst alpha.
+SI U16 alpha_coverage_from_rgb_coverage(U16 a, U16 da, U16 cr, U16 cg, U16 cb) {
+ return if_then_else(a < da, min(cr, min(cg,cb))
+ , max(cr, max(cg,cb)));
+}
+STAGE_PP(scale_565, const SkRasterPipeline_MemoryCtx* ctx) {
+ U16 cr,cg,cb;
+ load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &cr,&cg,&cb);
+ U16 ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
+
+ r = div255( r * cr );
+ g = div255( g * cg );
+ b = div255( b * cb );
+ a = div255( a * ca );
+}
+STAGE_PP(lerp_565, const SkRasterPipeline_MemoryCtx* ctx) {
+ U16 cr,cg,cb;
+ load_565_(ptr_at_xy<const uint16_t>(ctx, dx,dy), tail, &cr,&cg,&cb);
+ U16 ca = alpha_coverage_from_rgb_coverage(a,da, cr,cg,cb);
+
+ r = lerp(dr, r, cr);
+ g = lerp(dg, g, cg);
+ b = lerp(db, b, cb);
+ a = lerp(da, a, ca);
+}
+
+STAGE_PP(emboss, const SkRasterPipeline_EmbossCtx* ctx) {
+ U16 mul = load_8(ptr_at_xy<const uint8_t>(&ctx->mul, dx,dy), tail),
+ add = load_8(ptr_at_xy<const uint8_t>(&ctx->add, dx,dy), tail);
+
+ r = min(div255(r*mul) + add, a);
+ g = min(div255(g*mul) + add, a);
+ b = min(div255(b*mul) + add, a);
+}
+
+
+// ~~~~~~ Gradient stages ~~~~~~ //
+
+// Clamp x to [0,1], both sides inclusive (think, gradients).
+// Even repeat and mirror funnel through a clamp to handle bad inputs like +Inf, NaN.
+SI F clamp_01_(F v) { return min(max(0, v), 1); }
+
+STAGE_GG(clamp_x_1 , NoCtx) { x = clamp_01_(x); }
+STAGE_GG(repeat_x_1, NoCtx) { x = clamp_01_(x - floor_(x)); }
+STAGE_GG(mirror_x_1, NoCtx) {
+ auto two = [](F x){ return x+x; };
+ x = clamp_01_(abs_( (x-1.0f) - two(floor_((x-1.0f)*0.5f)) - 1.0f ));
+}
+
+SI I16 cond_to_mask_16(I32 cond) { return cast<I16>(cond); }
+
+STAGE_GG(decal_x, SkRasterPipeline_DecalTileCtx* ctx) {
+ auto w = ctx->limit_x;
+ sk_unaligned_store(ctx->mask, cond_to_mask_16((0 <= x) & (x < w)));
+}
+STAGE_GG(decal_y, SkRasterPipeline_DecalTileCtx* ctx) {
+ auto h = ctx->limit_y;
+ sk_unaligned_store(ctx->mask, cond_to_mask_16((0 <= y) & (y < h)));
+}
+STAGE_GG(decal_x_and_y, SkRasterPipeline_DecalTileCtx* ctx) {
+ auto w = ctx->limit_x;
+ auto h = ctx->limit_y;
+ sk_unaligned_store(ctx->mask, cond_to_mask_16((0 <= x) & (x < w) & (0 <= y) & (y < h)));
+}
+STAGE_GG(clamp_x_and_y, SkRasterPipeline_CoordClampCtx* ctx) {
+ x = min(ctx->max_x, max(ctx->min_x, x));
+ y = min(ctx->max_y, max(ctx->min_y, y));
+}
+STAGE_PP(check_decal_mask, SkRasterPipeline_DecalTileCtx* ctx) {
+ auto mask = sk_unaligned_load<U16>(ctx->mask);
+ r = r & mask;
+ g = g & mask;
+ b = b & mask;
+ a = a & mask;
+}
+
+SI void round_F_to_U16(F R, F G, F B, F A, U16* r, U16* g, U16* b, U16* a) {
+ auto round = [](F x) { return cast<U16>(x * 255.0f + 0.5f); };
+
+ *r = round(min(max(0, R), 1));
+ *g = round(min(max(0, G), 1));
+ *b = round(min(max(0, B), 1));
+ *a = round(A); // we assume alpha is already in [0,1].
+}
+
+SI void gradient_lookup(const SkRasterPipeline_GradientCtx* c, U32 idx, F t,
+ U16* r, U16* g, U16* b, U16* a) {
+
+ F fr, fg, fb, fa, br, bg, bb, ba;
+#if defined(JUMPER_IS_HSW) || defined(JUMPER_IS_SKX)
+ if (c->stopCount <=8) {
+ __m256i lo, hi;
+ split(idx, &lo, &hi);
+
+ fr = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[0]), lo),
+ _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[0]), hi));
+ br = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[0]), lo),
+ _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[0]), hi));
+ fg = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[1]), lo),
+ _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[1]), hi));
+ bg = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[1]), lo),
+ _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[1]), hi));
+ fb = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[2]), lo),
+ _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[2]), hi));
+ bb = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[2]), lo),
+ _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[2]), hi));
+ fa = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[3]), lo),
+ _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->fs[3]), hi));
+ ba = join<F>(_mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[3]), lo),
+ _mm256_permutevar8x32_ps(_mm256_loadu_ps(c->bs[3]), hi));
+ } else
+#endif
+ {
+ fr = gather<F>(c->fs[0], idx);
+ fg = gather<F>(c->fs[1], idx);
+ fb = gather<F>(c->fs[2], idx);
+ fa = gather<F>(c->fs[3], idx);
+ br = gather<F>(c->bs[0], idx);
+ bg = gather<F>(c->bs[1], idx);
+ bb = gather<F>(c->bs[2], idx);
+ ba = gather<F>(c->bs[3], idx);
+ }
+ round_F_to_U16(mad(t, fr, br),
+ mad(t, fg, bg),
+ mad(t, fb, bb),
+ mad(t, fa, ba),
+ r,g,b,a);
+}
+
+STAGE_GP(gradient, const SkRasterPipeline_GradientCtx* c) {
+ auto t = x;
+ U32 idx = 0;
+
+ // N.B. The loop starts at 1 because idx 0 is the color to use before the first stop.
+ for (size_t i = 1; i < c->stopCount; i++) {
+ idx += if_then_else(t >= c->ts[i], U32(1), U32(0));
+ }
+
+ gradient_lookup(c, idx, t, &r, &g, &b, &a);
+}
+
+STAGE_GP(evenly_spaced_gradient, const SkRasterPipeline_GradientCtx* c) {
+ auto t = x;
+ auto idx = trunc_(t * (c->stopCount-1));
+ gradient_lookup(c, idx, t, &r, &g, &b, &a);
+}
+
+STAGE_GP(evenly_spaced_2_stop_gradient, const SkRasterPipeline_EvenlySpaced2StopGradientCtx* c) {
+ auto t = x;
+ round_F_to_U16(mad(t, c->f[0], c->b[0]),
+ mad(t, c->f[1], c->b[1]),
+ mad(t, c->f[2], c->b[2]),
+ mad(t, c->f[3], c->b[3]),
+ &r,&g,&b,&a);
+}
+
+STAGE_GP(bilerp_clamp_8888, const SkRasterPipeline_GatherCtx* ctx) {
+ // Quantize sample point and transform into lerp coordinates converting them to 16.16 fixed
+ // point number.
+ I32 qx = cast<I32>(floor_(65536.0f * x + 0.5f)) - 32768,
+ qy = cast<I32>(floor_(65536.0f * y + 0.5f)) - 32768;
+
+ // Calculate screen coordinates sx & sy by flooring qx and qy.
+ I32 sx = qx >> 16,
+ sy = qy >> 16;
+
+ // We are going to perform a change of parameters for qx on [0, 1) to tx on [-1, 1).
+ // This will put tx in Q15 format for use with q_mult.
+ // Calculate tx and ty on the interval of [-1, 1). Give {qx} and {qy} are on the interval
+ // [0, 1), where {v} is fract(v), we can transform to tx in the following manner ty follows
+ // the same math:
+ // tx = 2 * {qx} - 1, so
+ // {qx} = (tx + 1) / 2.
+ // Calculate {qx} - 1 and {qy} - 1 where the {} operation is handled by the cast, and the - 1
+ // is handled by the ^ 0x8000, dividing by 2 is deferred and handled in lerpX and lerpY in
+ // order to use the full 16-bit resolution.
+ I16 tx = cast<I16>(qx ^ 0x8000),
+ ty = cast<I16>(qy ^ 0x8000);
+
+ // Substituting the {qx} by the equation for tx from above into the lerp equation where v is
+ // the lerped value:
+ // v = {qx}*(R - L) + L,
+ // v = 1/2*(tx + 1)*(R - L) + L
+ // 2 * v = (tx + 1)*(R - L) + 2*L
+ // = tx*R - tx*L + R - L + 2*L
+ // = tx*(R - L) + (R + L).
+ // Since R and L are on [0, 255] we need them on the interval [0, 1/2] to get them into form
+ // for Q15_mult. If L and R where in 16.16 format, this would be done by dividing by 2^9. In
+ // code, we can multiply by 2^7 to get the value directly.
+ // 2 * v = tx*(R - L) + (R + L)
+ // 2^-9 * 2 * v = tx*(R - L)*2^-9 + (R + L)*2^-9
+ // 2^-8 * v = 2^-9 * (tx*(R - L) + (R + L))
+ // v = 1/2 * (tx*(R - L) + (R + L))
+ auto lerpX = [&](U16 left, U16 right) -> U16 {
+ I16 width = cast<I16>(right - left) << 7;
+ U16 middle = (right + left) << 7;
+ // The constrained_add is the most subtle part of lerp. The first term is on the interval
+ // [-1, 1), and the second term is on the interval is on the interval [0, 1) because
+ // both terms are too high by a factor of 2 which will be handled below. (Both R and L are
+ // on [0, 1/2), but the sum R + L is on the interval [0, 1).) Generally, the sum below
+ // should overflow, but because we know that sum produces an output on the
+ // interval [0, 1) we know that the extra bit that would be needed will always be 0. So
+ // we need to be careful to treat this sum as an unsigned positive number in the divide
+ // by 2 below. Add +1 for rounding.
+ U16 v2 = constrained_add(scaled_mult(tx, width), middle) + 1;
+ // Divide by 2 to calculate v and at the same time bring the intermediate value onto the
+ // interval [0, 1/2] to set up for the lerpY.
+ return v2 >> 1;
+ };
+
+ const uint32_t* ptr;
+ U32 ix = ix_and_ptr(&ptr, ctx, sx, sy);
+ U16 leftR, leftG, leftB, leftA;
+ from_8888(gather<U32>(ptr, ix), &leftR,&leftG,&leftB,&leftA);
+
+ ix = ix_and_ptr(&ptr, ctx, sx+1, sy);
+ U16 rightR, rightG, rightB, rightA;
+ from_8888(gather<U32>(ptr, ix), &rightR,&rightG,&rightB,&rightA);
+
+ U16 topR = lerpX(leftR, rightR),
+ topG = lerpX(leftG, rightG),
+ topB = lerpX(leftB, rightB),
+ topA = lerpX(leftA, rightA);
+
+ ix = ix_and_ptr(&ptr, ctx, sx, sy+1);
+ from_8888(gather<U32>(ptr, ix), &leftR,&leftG,&leftB,&leftA);
+
+ ix = ix_and_ptr(&ptr, ctx, sx+1, sy+1);
+ from_8888(gather<U32>(ptr, ix), &rightR,&rightG,&rightB,&rightA);
+
+ U16 bottomR = lerpX(leftR, rightR),
+ bottomG = lerpX(leftG, rightG),
+ bottomB = lerpX(leftB, rightB),
+ bottomA = lerpX(leftA, rightA);
+
+ // lerpY plays the same mathematical tricks as lerpX, but the final divide is by 256 resulting
+ // in a value on [0, 255].
+ auto lerpY = [&](U16 top, U16 bottom) -> U16 {
+ I16 width = cast<I16>(bottom - top);
+ U16 middle = bottom + top;
+ // Add + 0x80 for rounding.
+ U16 blend = constrained_add(scaled_mult(ty, width), middle) + 0x80;
+
+ return blend >> 8;
+ };
+
+ r = lerpY(topR, bottomR);
+ g = lerpY(topG, bottomG);
+ b = lerpY(topB, bottomB);
+ a = lerpY(topA, bottomA);
+}
+
+STAGE_GG(xy_to_unit_angle, NoCtx) {
+ F xabs = abs_(x),
+ yabs = abs_(y);
+
+ F slope = min(xabs, yabs)/max(xabs, yabs);
+ F s = slope * slope;
+
+ // Use a 7th degree polynomial to approximate atan.
+ // This was generated using sollya.gforge.inria.fr.
+ // A float optimized polynomial was generated using the following command.
+ // P1 = fpminimax((1/(2*Pi))*atan(x),[|1,3,5,7|],[|24...|],[2^(-40),1],relative);
+ F phi = slope
+ * (0.15912117063999176025390625f + s
+ * (-5.185396969318389892578125e-2f + s
+ * (2.476101927459239959716796875e-2f + s
+ * (-7.0547382347285747528076171875e-3f))));
+
+ phi = if_then_else(xabs < yabs, 1.0f/4.0f - phi, phi);
+ phi = if_then_else(x < 0.0f , 1.0f/2.0f - phi, phi);
+ phi = if_then_else(y < 0.0f , 1.0f - phi , phi);
+ phi = if_then_else(phi != phi , F(0) , phi); // Check for NaN.
+ x = phi;
+}
+STAGE_GG(xy_to_radius, NoCtx) {
+ x = sqrt_(x*x + y*y);
+}
+
+// ~~~~~~ Compound stages ~~~~~~ //
+
+STAGE_PP(srcover_rgba_8888, const SkRasterPipeline_MemoryCtx* ctx) {
+ auto ptr = ptr_at_xy<uint32_t>(ctx, dx,dy);
+
+ load_8888_(ptr, tail, &dr,&dg,&db,&da);
+ r = r + div255( dr*inv(a) );
+ g = g + div255( dg*inv(a) );
+ b = b + div255( db*inv(a) );
+ a = a + div255( da*inv(a) );
+ store_8888_(ptr, tail, r,g,b,a);
+}
+
+// ~~~~~~ skgpu::Swizzle stage ~~~~~~ //
+
+STAGE_PP(swizzle, void* ctx) {
+ auto ir = r, ig = g, ib = b, ia = a;
+ U16* o[] = {&r, &g, &b, &a};
+ char swiz[4];
+ memcpy(swiz, &ctx, sizeof(swiz));
+
+ for (int i = 0; i < 4; ++i) {
+ switch (swiz[i]) {
+ case 'r': *o[i] = ir; break;
+ case 'g': *o[i] = ig; break;
+ case 'b': *o[i] = ib; break;
+ case 'a': *o[i] = ia; break;
+ case '0': *o[i] = U16(0); break;
+ case '1': *o[i] = U16(255); break;
+ default: break;
+ }
+ }
+}
+
+#undef cast
+
+#endif//defined(JUMPER_IS_SCALAR) controlling whether we build lowp stages
+} // namespace lowp
+
+/* This gives us SK_OPTS::lowp::N if lowp::N has been set, or SK_OPTS::N if it hasn't. */
+namespace lowp { static constexpr size_t lowp_N = N; }
+
+/** Allow outside code to access the Raster Pipeline pixel stride. */
+constexpr size_t raster_pipeline_lowp_stride() { return lowp::lowp_N; }
+constexpr size_t raster_pipeline_highp_stride() { return N; }
+
+} // namespace SK_OPTS_NS
+
+#undef SI
+
+#endif//SkRasterPipeline_opts_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkSwizzler_opts.h b/gfx/skia/skia/src/opts/SkSwizzler_opts.h
new file mode 100644
index 0000000000..1c7b3833e9
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkSwizzler_opts.h
@@ -0,0 +1,1389 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSwizzler_opts_DEFINED
+#define SkSwizzler_opts_DEFINED
+
+#include "include/private/SkColorData.h"
+#include "src/base/SkVx.h"
+#include <utility>
+
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ #include <immintrin.h>
+#elif defined(SK_ARM_HAS_NEON)
+ #include <arm_neon.h>
+#endif
+
+namespace SK_OPTS_NS {
+
+static void RGBA_to_rgbA_portable(uint32_t* dst, const uint32_t* src, int count) {
+ for (int i = 0; i < count; i++) {
+ uint8_t a = (src[i] >> 24) & 0xFF,
+ b = (src[i] >> 16) & 0xFF,
+ g = (src[i] >> 8) & 0xFF,
+ r = (src[i] >> 0) & 0xFF;
+ b = (b*a+127)/255;
+ g = (g*a+127)/255;
+ r = (r*a+127)/255;
+ dst[i] = (uint32_t)a << 24
+ | (uint32_t)b << 16
+ | (uint32_t)g << 8
+ | (uint32_t)r << 0;
+ }
+}
+
+static void RGBA_to_bgrA_portable(uint32_t* dst, const uint32_t* src, int count) {
+ for (int i = 0; i < count; i++) {
+ uint8_t a = (src[i] >> 24) & 0xFF,
+ b = (src[i] >> 16) & 0xFF,
+ g = (src[i] >> 8) & 0xFF,
+ r = (src[i] >> 0) & 0xFF;
+ b = (b*a+127)/255;
+ g = (g*a+127)/255;
+ r = (r*a+127)/255;
+ dst[i] = (uint32_t)a << 24
+ | (uint32_t)r << 16
+ | (uint32_t)g << 8
+ | (uint32_t)b << 0;
+ }
+}
+
+static void RGBA_to_BGRA_portable(uint32_t* dst, const uint32_t* src, int count) {
+ for (int i = 0; i < count; i++) {
+ uint8_t a = (src[i] >> 24) & 0xFF,
+ b = (src[i] >> 16) & 0xFF,
+ g = (src[i] >> 8) & 0xFF,
+ r = (src[i] >> 0) & 0xFF;
+ dst[i] = (uint32_t)a << 24
+ | (uint32_t)r << 16
+ | (uint32_t)g << 8
+ | (uint32_t)b << 0;
+ }
+}
+
+static void grayA_to_RGBA_portable(uint32_t dst[], const uint8_t* src, int count) {
+ for (int i = 0; i < count; i++) {
+ uint8_t g = src[0],
+ a = src[1];
+ src += 2;
+ dst[i] = (uint32_t)a << 24
+ | (uint32_t)g << 16
+ | (uint32_t)g << 8
+ | (uint32_t)g << 0;
+ }
+}
+
+static void grayA_to_rgbA_portable(uint32_t dst[], const uint8_t* src, int count) {
+ for (int i = 0; i < count; i++) {
+ uint8_t g = src[0],
+ a = src[1];
+ src += 2;
+ g = (g*a+127)/255;
+ dst[i] = (uint32_t)a << 24
+ | (uint32_t)g << 16
+ | (uint32_t)g << 8
+ | (uint32_t)g << 0;
+ }
+}
+
+static void inverted_CMYK_to_RGB1_portable(uint32_t* dst, const uint32_t* src, int count) {
+ for (int i = 0; i < count; i++) {
+ uint8_t k = (src[i] >> 24) & 0xFF,
+ y = (src[i] >> 16) & 0xFF,
+ m = (src[i] >> 8) & 0xFF,
+ c = (src[i] >> 0) & 0xFF;
+ // See comments in SkSwizzler.cpp for details on the conversion formula.
+ uint8_t b = (y*k+127)/255,
+ g = (m*k+127)/255,
+ r = (c*k+127)/255;
+ dst[i] = (uint32_t)0xFF << 24
+ | (uint32_t) b << 16
+ | (uint32_t) g << 8
+ | (uint32_t) r << 0;
+ }
+}
+
+static void inverted_CMYK_to_BGR1_portable(uint32_t* dst, const uint32_t* src, int count) {
+ for (int i = 0; i < count; i++) {
+ uint8_t k = (src[i] >> 24) & 0xFF,
+ y = (src[i] >> 16) & 0xFF,
+ m = (src[i] >> 8) & 0xFF,
+ c = (src[i] >> 0) & 0xFF;
+ uint8_t b = (y*k+127)/255,
+ g = (m*k+127)/255,
+ r = (c*k+127)/255;
+ dst[i] = (uint32_t)0xFF << 24
+ | (uint32_t) r << 16
+ | (uint32_t) g << 8
+ | (uint32_t) b << 0;
+ }
+}
+
+#if defined(SK_ARM_HAS_NEON)
+
+// Rounded divide by 255, (x + 127) / 255
+static uint8x8_t div255_round(uint16x8_t x) {
+ // result = (x + 127) / 255
+ // result = (x + 127) / 256 + error1
+ //
+ // error1 = (x + 127) / (255 * 256)
+ // error1 = (x + 127) / (256 * 256) + error2
+ //
+ // error2 = (x + 127) / (255 * 256 * 256)
+ //
+ // The maximum value of error2 is too small to matter. Thus:
+ // result = (x + 127) / 256 + (x + 127) / (256 * 256)
+ // result = ((x + 127) / 256 + x + 127) / 256
+ // result = ((x + 127) >> 8 + x + 127) >> 8
+ //
+ // Use >>> to represent "rounded right shift" which, conveniently,
+ // NEON supports in one instruction.
+ // result = ((x >>> 8) + x) >>> 8
+ //
+ // Note that the second right shift is actually performed as an
+ // "add, round, and narrow back to 8-bits" instruction.
+ return vraddhn_u16(x, vrshrq_n_u16(x, 8));
+}
+
+// Scale a byte by another, (x * y + 127) / 255
+static uint8x8_t scale(uint8x8_t x, uint8x8_t y) {
+ return div255_round(vmull_u8(x, y));
+}
+
+static void premul_should_swapRB(bool kSwapRB, uint32_t* dst, const uint32_t* src, int count) {
+ while (count >= 8) {
+ // Load 8 pixels.
+ uint8x8x4_t rgba = vld4_u8((const uint8_t*) src);
+
+ uint8x8_t a = rgba.val[3],
+ b = rgba.val[2],
+ g = rgba.val[1],
+ r = rgba.val[0];
+
+ // Premultiply.
+ b = scale(b, a);
+ g = scale(g, a);
+ r = scale(r, a);
+
+ // Store 8 premultiplied pixels.
+ if (kSwapRB) {
+ rgba.val[2] = r;
+ rgba.val[1] = g;
+ rgba.val[0] = b;
+ } else {
+ rgba.val[2] = b;
+ rgba.val[1] = g;
+ rgba.val[0] = r;
+ }
+ vst4_u8((uint8_t*) dst, rgba);
+ src += 8;
+ dst += 8;
+ count -= 8;
+ }
+
+ // Call portable code to finish up the tail of [0,8) pixels.
+ auto proc = kSwapRB ? RGBA_to_bgrA_portable : RGBA_to_rgbA_portable;
+ proc(dst, src, count);
+}
+
+/*not static*/ inline void RGBA_to_rgbA(uint32_t* dst, const uint32_t* src, int count) {
+ premul_should_swapRB(false, dst, src, count);
+}
+
+/*not static*/ inline void RGBA_to_bgrA(uint32_t* dst, const uint32_t* src, int count) {
+ premul_should_swapRB(true, dst, src, count);
+}
+
+/*not static*/ inline void RGBA_to_BGRA(uint32_t* dst, const uint32_t* src, int count) {
+ using std::swap;
+ while (count >= 16) {
+ // Load 16 pixels.
+ uint8x16x4_t rgba = vld4q_u8((const uint8_t*) src);
+
+ // Swap r and b.
+ swap(rgba.val[0], rgba.val[2]);
+
+ // Store 16 pixels.
+ vst4q_u8((uint8_t*) dst, rgba);
+ src += 16;
+ dst += 16;
+ count -= 16;
+ }
+
+ if (count >= 8) {
+ // Load 8 pixels.
+ uint8x8x4_t rgba = vld4_u8((const uint8_t*) src);
+
+ // Swap r and b.
+ swap(rgba.val[0], rgba.val[2]);
+
+ // Store 8 pixels.
+ vst4_u8((uint8_t*) dst, rgba);
+ src += 8;
+ dst += 8;
+ count -= 8;
+ }
+
+ RGBA_to_BGRA_portable(dst, src, count);
+}
+
+static void expand_grayA(bool kPremul, uint32_t dst[], const uint8_t* src, int count) {
+ while (count >= 16) {
+ // Load 16 pixels.
+ uint8x16x2_t ga = vld2q_u8(src);
+
+ // Premultiply if requested.
+ if (kPremul) {
+ ga.val[0] = vcombine_u8(
+ scale(vget_low_u8(ga.val[0]), vget_low_u8(ga.val[1])),
+ scale(vget_high_u8(ga.val[0]), vget_high_u8(ga.val[1])));
+ }
+
+ // Set each of the color channels.
+ uint8x16x4_t rgba;
+ rgba.val[0] = ga.val[0];
+ rgba.val[1] = ga.val[0];
+ rgba.val[2] = ga.val[0];
+ rgba.val[3] = ga.val[1];
+
+ // Store 16 pixels.
+ vst4q_u8((uint8_t*) dst, rgba);
+ src += 16*2;
+ dst += 16;
+ count -= 16;
+ }
+
+ if (count >= 8) {
+ // Load 8 pixels.
+ uint8x8x2_t ga = vld2_u8(src);
+
+ // Premultiply if requested.
+ if (kPremul) {
+ ga.val[0] = scale(ga.val[0], ga.val[1]);
+ }
+
+ // Set each of the color channels.
+ uint8x8x4_t rgba;
+ rgba.val[0] = ga.val[0];
+ rgba.val[1] = ga.val[0];
+ rgba.val[2] = ga.val[0];
+ rgba.val[3] = ga.val[1];
+
+ // Store 8 pixels.
+ vst4_u8((uint8_t*) dst, rgba);
+ src += 8*2;
+ dst += 8;
+ count -= 8;
+ }
+
+ auto proc = kPremul ? grayA_to_rgbA_portable : grayA_to_RGBA_portable;
+ proc(dst, src, count);
+}
+
+/*not static*/ inline void grayA_to_RGBA(uint32_t dst[], const uint8_t* src, int count) {
+ expand_grayA(false, dst, src, count);
+}
+
+/*not static*/ inline void grayA_to_rgbA(uint32_t dst[], const uint8_t* src, int count) {
+ expand_grayA(true, dst, src, count);
+}
+
+enum Format { kRGB1, kBGR1 };
+static void inverted_cmyk_to(Format format, uint32_t* dst, const uint32_t* src, int count) {
+ while (count >= 8) {
+ // Load 8 cmyk pixels.
+ uint8x8x4_t pixels = vld4_u8((const uint8_t*) src);
+
+ uint8x8_t k = pixels.val[3],
+ y = pixels.val[2],
+ m = pixels.val[1],
+ c = pixels.val[0];
+
+ // Scale to r, g, b.
+ uint8x8_t b = scale(y, k);
+ uint8x8_t g = scale(m, k);
+ uint8x8_t r = scale(c, k);
+
+ // Store 8 rgba pixels.
+ if (kBGR1 == format) {
+ pixels.val[3] = vdup_n_u8(0xFF);
+ pixels.val[2] = r;
+ pixels.val[1] = g;
+ pixels.val[0] = b;
+ } else {
+ pixels.val[3] = vdup_n_u8(0xFF);
+ pixels.val[2] = b;
+ pixels.val[1] = g;
+ pixels.val[0] = r;
+ }
+ vst4_u8((uint8_t*) dst, pixels);
+ src += 8;
+ dst += 8;
+ count -= 8;
+ }
+
+ auto proc = (kBGR1 == format) ? inverted_CMYK_to_BGR1_portable : inverted_CMYK_to_RGB1_portable;
+ proc(dst, src, count);
+}
+
+/*not static*/ inline void inverted_CMYK_to_RGB1(uint32_t dst[], const uint32_t* src, int count) {
+ inverted_cmyk_to(kRGB1, dst, src, count);
+}
+
+/*not static*/ inline void inverted_CMYK_to_BGR1(uint32_t dst[], const uint32_t* src, int count) {
+ inverted_cmyk_to(kBGR1, dst, src, count);
+}
+
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SKX
+// Scale a byte by another.
+// Inputs are stored in 16-bit lanes, but are not larger than 8-bits.
+static __m512i scale(__m512i x, __m512i y) {
+ const __m512i _128 = _mm512_set1_epi16(128);
+ const __m512i _257 = _mm512_set1_epi16(257);
+
+ // (x+127)/255 == ((x+128)*257)>>16 for 0 <= x <= 255*255.
+ return _mm512_mulhi_epu16(_mm512_add_epi16(_mm512_mullo_epi16(x, y), _128), _257);
+}
+
+static void premul_should_swapRB(bool kSwapRB, uint32_t* dst, const uint32_t* src, int count) {
+
+ auto premul8 = [=](__m512i* lo, __m512i* hi) {
+ const __m512i zeros = _mm512_setzero_si512();
+ skvx::Vec<64, uint8_t> mask;
+ if (kSwapRB) {
+ mask = { 2,6,10,14, 1,5,9,13, 0,4,8,12, 3,7,11,15,
+ 2,6,10,14, 1,5,9,13, 0,4,8,12, 3,7,11,15,
+ 2,6,10,14, 1,5,9,13, 0,4,8,12, 3,7,11,15,
+ 2,6,10,14, 1,5,9,13, 0,4,8,12, 3,7,11,15 };
+ } else {
+ mask = { 0,4,8,12, 1,5,9,13, 2,6,10,14, 3,7,11,15,
+ 0,4,8,12, 1,5,9,13, 2,6,10,14, 3,7,11,15,
+ 0,4,8,12, 1,5,9,13, 2,6,10,14, 3,7,11,15,
+ 0,4,8,12, 1,5,9,13, 2,6,10,14, 3,7,11,15 };
+ }
+ __m512i planar = skvx::bit_pun<__m512i>(mask);
+
+ // Swizzle the pixels to 8-bit planar.
+ *lo = _mm512_shuffle_epi8(*lo, planar);
+ *hi = _mm512_shuffle_epi8(*hi, planar);
+ __m512i rg = _mm512_unpacklo_epi32(*lo, *hi),
+ ba = _mm512_unpackhi_epi32(*lo, *hi);
+
+ // Unpack to 16-bit planar.
+ __m512i r = _mm512_unpacklo_epi8(rg, zeros),
+ g = _mm512_unpackhi_epi8(rg, zeros),
+ b = _mm512_unpacklo_epi8(ba, zeros),
+ a = _mm512_unpackhi_epi8(ba, zeros);
+
+ // Premultiply!
+ r = scale(r, a);
+ g = scale(g, a);
+ b = scale(b, a);
+
+ // Repack into interlaced pixels.
+ rg = _mm512_or_si512(r, _mm512_slli_epi16(g, 8));
+ ba = _mm512_or_si512(b, _mm512_slli_epi16(a, 8));
+ *lo = _mm512_unpacklo_epi16(rg, ba);
+ *hi = _mm512_unpackhi_epi16(rg, ba);
+ };
+
+ while (count >= 32) {
+ __m512i lo = _mm512_loadu_si512((const __m512i*) (src + 0)),
+ hi = _mm512_loadu_si512((const __m512i*) (src + 16));
+
+ premul8(&lo, &hi);
+
+ _mm512_storeu_si512((__m512i*) (dst + 0), lo);
+ _mm512_storeu_si512((__m512i*) (dst + 16), hi);
+
+ src += 32;
+ dst += 32;
+ count -= 32;
+ }
+
+ if (count >= 16) {
+ __m512i lo = _mm512_loadu_si512((const __m512i*) src),
+ hi = _mm512_setzero_si512();
+
+ premul8(&lo, &hi);
+
+ _mm512_storeu_si512((__m512i*) dst, lo);
+
+ src += 16;
+ dst += 16;
+ count -= 16;
+ }
+
+ // Call portable code to finish up the tail of [0,16) pixels.
+ auto proc = kSwapRB ? RGBA_to_bgrA_portable : RGBA_to_rgbA_portable;
+ proc(dst, src, count);
+}
+
+/*not static*/ inline void RGBA_to_rgbA(uint32_t* dst, const uint32_t* src, int count) {
+ premul_should_swapRB(false, dst, src, count);
+}
+
+/*not static*/ inline void RGBA_to_bgrA(uint32_t* dst, const uint32_t* src, int count) {
+ premul_should_swapRB(true, dst, src, count);
+}
+
+/*not static*/ inline void RGBA_to_BGRA(uint32_t* dst, const uint32_t* src, int count) {
+ const uint8_t mask[64] = { 2,1,0,3, 6,5,4,7, 10,9,8,11, 14,13,12,15,
+ 2,1,0,3, 6,5,4,7, 10,9,8,11, 14,13,12,15,
+ 2,1,0,3, 6,5,4,7, 10,9,8,11, 14,13,12,15,
+ 2,1,0,3, 6,5,4,7, 10,9,8,11, 14,13,12,15 };
+ const __m512i swapRB = _mm512_loadu_si512(mask);
+
+ while (count >= 16) {
+ __m512i rgba = _mm512_loadu_si512((const __m512i*) src);
+ __m512i bgra = _mm512_shuffle_epi8(rgba, swapRB);
+ _mm512_storeu_si512((__m512i*) dst, bgra);
+
+ src += 16;
+ dst += 16;
+ count -= 16;
+ }
+
+ RGBA_to_BGRA_portable(dst, src, count);
+}
+
+// Use SSSE3 impl as AVX2 / AVX-512 impl regresses performance for RGB_to_RGB1 / RGB_to_BGR1.
+
+// Use AVX2 impl as AVX-512 impl regresses performance for gray_to_RGB1.
+
+/*not static*/ inline void grayA_to_RGBA(uint32_t dst[], const uint8_t* src, int count) {
+ while (count >= 32) {
+ __m512i ga = _mm512_loadu_si512((const __m512i*) src);
+
+ __m512i gg = _mm512_or_si512(_mm512_and_si512(ga, _mm512_set1_epi16(0x00FF)),
+ _mm512_slli_epi16(ga, 8));
+
+ __m512i ggga_lo = _mm512_unpacklo_epi16(gg, ga);
+ __m512i ggga_hi = _mm512_unpackhi_epi16(gg, ga);
+
+ // 1st shuffle for pixel reorder.
+ // Note. 'p' stands for 'ggga'
+ // Before 1st shuffle:
+ // ggga_lo = p0 p1 p2 p3 | p8 p9 p10 p11 | p16 p17 p18 p19 | p24 p25 p26 p27
+ // ggga_hi = p4 p5 p6 p7 | p12 p13 p14 p15 | p20 p21 p22 p23 | p28 p29 p30 p31
+ //
+ // After 1st shuffle:
+ // ggga_lo_shuffle_1 =
+ // p0 p1 p2 p3 | p8 p9 p10 p11 | p4 p5 p6 p7 | p12 p13 p14 p15
+ // ggga_hi_shuffle_1 =
+ // p16 p17 p18 p19 | p24 p25 p26 p27 | p20 p21 p22 p23 | p28 p29 p30 p31
+ __m512i ggga_lo_shuffle_1 = _mm512_shuffle_i32x4(ggga_lo, ggga_hi, 0x44),
+ ggga_hi_shuffle_1 = _mm512_shuffle_i32x4(ggga_lo, ggga_hi, 0xee);
+
+ // 2nd shuffle for pixel reorder.
+ // After the 2nd shuffle:
+ // ggga_lo_shuffle_2 =
+ // p0 p1 p2 p3 | p4 p5 p6 p7 | p8 p9 p10 p11 | p12 p13 p14 p15
+ // ggga_hi_shuffle_2 =
+ // p16 p17 p18 p19 | p20 p21 p22 p23 | p24 p25 p26 p27 | p28 p29 p30 p31
+ __m512i ggga_lo_shuffle_2 = _mm512_shuffle_i32x4(ggga_lo_shuffle_1,
+ ggga_lo_shuffle_1, 0xd8),
+ ggga_hi_shuffle_2 = _mm512_shuffle_i32x4(ggga_hi_shuffle_1,
+ ggga_hi_shuffle_1, 0xd8);
+
+ _mm512_storeu_si512((__m512i*) (dst + 0), ggga_lo_shuffle_2);
+ _mm512_storeu_si512((__m512i*) (dst + 16), ggga_hi_shuffle_2);
+
+ src += 32*2;
+ dst += 32;
+ count -= 32;
+ }
+
+ grayA_to_RGBA_portable(dst, src, count);
+}
+
+/*not static*/ inline void grayA_to_rgbA(uint32_t dst[], const uint8_t* src, int count) {
+ while (count >= 32) {
+ __m512i grayA = _mm512_loadu_si512((const __m512i*) src);
+
+ __m512i g0 = _mm512_and_si512(grayA, _mm512_set1_epi16(0x00FF));
+ __m512i a0 = _mm512_srli_epi16(grayA, 8);
+
+ // Premultiply
+ g0 = scale(g0, a0);
+
+ __m512i gg = _mm512_or_si512(g0, _mm512_slli_epi16(g0, 8));
+ __m512i ga = _mm512_or_si512(g0, _mm512_slli_epi16(a0, 8));
+
+ __m512i ggga_lo = _mm512_unpacklo_epi16(gg, ga);
+ __m512i ggga_hi = _mm512_unpackhi_epi16(gg, ga);
+
+ // 1st shuffle for pixel reorder, same as grayA_to_RGBA.
+ __m512i ggga_lo_shuffle_1 = _mm512_shuffle_i32x4(ggga_lo, ggga_hi, 0x44),
+ ggga_hi_shuffle_1 = _mm512_shuffle_i32x4(ggga_lo, ggga_hi, 0xee);
+
+ // 2nd shuffle for pixel reorder, same as grayA_to_RGBA.
+ __m512i ggga_lo_shuffle_2 = _mm512_shuffle_i32x4(ggga_lo_shuffle_1,
+ ggga_lo_shuffle_1, 0xd8),
+ ggga_hi_shuffle_2 = _mm512_shuffle_i32x4(ggga_hi_shuffle_1,
+ ggga_hi_shuffle_1, 0xd8);
+
+ _mm512_storeu_si512((__m512i*) (dst + 0), ggga_lo_shuffle_2);
+ _mm512_storeu_si512((__m512i*) (dst + 16), ggga_hi_shuffle_2);
+
+ src += 32*2;
+ dst += 32;
+ count -= 32;
+ }
+
+ grayA_to_rgbA_portable(dst, src, count);
+}
+
+enum Format { kRGB1, kBGR1 };
+static void inverted_cmyk_to(Format format, uint32_t* dst, const uint32_t* src, int count) {
+ auto convert8 = [=](__m512i* lo, __m512i* hi) {
+ const __m512i zeros = _mm512_setzero_si512();
+ skvx::Vec<64, uint8_t> mask;
+ if (kBGR1 == format) {
+ mask = { 2,6,10,14, 1,5,9,13, 0,4,8,12, 3,7,11,15,
+ 2,6,10,14, 1,5,9,13, 0,4,8,12, 3,7,11,15,
+ 2,6,10,14, 1,5,9,13, 0,4,8,12, 3,7,11,15,
+ 2,6,10,14, 1,5,9,13, 0,4,8,12, 3,7,11,15 };
+ } else {
+ mask = { 0,4,8,12, 1,5,9,13, 2,6,10,14, 3,7,11,15,
+ 0,4,8,12, 1,5,9,13, 2,6,10,14, 3,7,11,15,
+ 0,4,8,12, 1,5,9,13, 2,6,10,14, 3,7,11,15,
+ 0,4,8,12, 1,5,9,13, 2,6,10,14, 3,7,11,15 };
+ }
+ __m512i planar = skvx::bit_pun<__m512i>(mask);
+
+ // Swizzle the pixels to 8-bit planar.
+ *lo = _mm512_shuffle_epi8(*lo, planar);
+ *hi = _mm512_shuffle_epi8(*hi, planar);
+ __m512i cm = _mm512_unpacklo_epi32(*lo, *hi),
+ yk = _mm512_unpackhi_epi32(*lo, *hi);
+
+ // Unpack to 16-bit planar.
+ __m512i c = _mm512_unpacklo_epi8(cm, zeros),
+ m = _mm512_unpackhi_epi8(cm, zeros),
+ y = _mm512_unpacklo_epi8(yk, zeros),
+ k = _mm512_unpackhi_epi8(yk, zeros);
+
+ // Scale to r, g, b.
+ __m512i r = scale(c, k),
+ g = scale(m, k),
+ b = scale(y, k);
+
+ // Repack into interlaced pixels.
+ __m512i rg = _mm512_or_si512(r, _mm512_slli_epi16(g, 8)),
+ ba = _mm512_or_si512(b, _mm512_set1_epi16((uint16_t) 0xFF00));
+ *lo = _mm512_unpacklo_epi16(rg, ba);
+ *hi = _mm512_unpackhi_epi16(rg, ba);
+ };
+
+ while (count >= 32) {
+ __m512i lo = _mm512_loadu_si512((const __m512i*) (src + 0)),
+ hi = _mm512_loadu_si512((const __m512i*) (src + 16));
+
+ convert8(&lo, &hi);
+
+ _mm512_storeu_si512((__m512i*) (dst + 0), lo);
+ _mm512_storeu_si512((__m512i*) (dst + 16), hi);
+
+ src += 32;
+ dst += 32;
+ count -= 32;
+ }
+
+ if (count >= 16) {
+ __m512i lo = _mm512_loadu_si512((const __m512i*) src),
+ hi = _mm512_setzero_si512();
+
+ convert8(&lo, &hi);
+
+ _mm512_storeu_si512((__m512i*) dst, lo);
+
+ src += 16;
+ dst += 16;
+ count -= 16;
+ }
+
+ auto proc = (kBGR1 == format) ? inverted_CMYK_to_BGR1_portable : inverted_CMYK_to_RGB1_portable;
+ proc(dst, src, count);
+}
+
+/*not static*/ inline void inverted_CMYK_to_RGB1(uint32_t dst[], const uint32_t* src, int count) {
+ inverted_cmyk_to(kRGB1, dst, src, count);
+}
+
+/*not static*/ inline void inverted_CMYK_to_BGR1(uint32_t dst[], const uint32_t* src, int count) {
+ inverted_cmyk_to(kBGR1, dst, src, count);
+}
+
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+
+// Scale a byte by another.
+// Inputs are stored in 16-bit lanes, but are not larger than 8-bits.
+static __m256i scale(__m256i x, __m256i y) {
+ const __m256i _128 = _mm256_set1_epi16(128);
+ const __m256i _257 = _mm256_set1_epi16(257);
+
+ // (x+127)/255 == ((x+128)*257)>>16 for 0 <= x <= 255*255.
+ return _mm256_mulhi_epu16(_mm256_add_epi16(_mm256_mullo_epi16(x, y), _128), _257);
+}
+
+static void premul_should_swapRB(bool kSwapRB, uint32_t* dst, const uint32_t* src, int count) {
+
+ auto premul8 = [=](__m256i* lo, __m256i* hi) {
+ const __m256i zeros = _mm256_setzero_si256();
+ __m256i planar;
+ if (kSwapRB) {
+ planar = _mm256_setr_epi8(2,6,10,14, 1,5,9,13, 0,4,8,12, 3,7,11,15,
+ 2,6,10,14, 1,5,9,13, 0,4,8,12, 3,7,11,15);
+ } else {
+ planar = _mm256_setr_epi8(0,4,8,12, 1,5,9,13, 2,6,10,14, 3,7,11,15,
+ 0,4,8,12, 1,5,9,13, 2,6,10,14, 3,7,11,15);
+ }
+
+ // Swizzle the pixels to 8-bit planar.
+ *lo = _mm256_shuffle_epi8(*lo, planar); // rrrrgggg bbbbaaaa rrrrgggg bbbbaaaa
+ *hi = _mm256_shuffle_epi8(*hi, planar); // RRRRGGGG BBBBAAAA RRRRGGGG BBBBAAAA
+ __m256i rg = _mm256_unpacklo_epi32(*lo, *hi), // rrrrRRRR ggggGGGG rrrrRRRR ggggGGGG
+ ba = _mm256_unpackhi_epi32(*lo, *hi); // bbbbBBBB aaaaAAAA bbbbBBBB aaaaAAAA
+
+ // Unpack to 16-bit planar.
+ __m256i r = _mm256_unpacklo_epi8(rg, zeros), // r_r_r_r_ R_R_R_R_ r_r_r_r_ R_R_R_R_
+ g = _mm256_unpackhi_epi8(rg, zeros), // g_g_g_g_ G_G_G_G_ g_g_g_g_ G_G_G_G_
+ b = _mm256_unpacklo_epi8(ba, zeros), // b_b_b_b_ B_B_B_B_ b_b_b_b_ B_B_B_B_
+ a = _mm256_unpackhi_epi8(ba, zeros); // a_a_a_a_ A_A_A_A_ a_a_a_a_ A_A_A_A_
+
+ // Premultiply!
+ r = scale(r, a);
+ g = scale(g, a);
+ b = scale(b, a);
+
+ // Repack into interlaced pixels.
+ rg = _mm256_or_si256(r, _mm256_slli_epi16(g, 8)); // rgrgrgrg RGRGRGRG rgrgrgrg RGRGRGRG
+ ba = _mm256_or_si256(b, _mm256_slli_epi16(a, 8)); // babababa BABABABA babababa BABABABA
+ *lo = _mm256_unpacklo_epi16(rg, ba); // rgbargba rgbargba rgbargba rgbargba
+ *hi = _mm256_unpackhi_epi16(rg, ba); // RGBARGBA RGBARGBA RGBARGBA RGBARGBA
+ };
+
+ while (count >= 16) {
+ __m256i lo = _mm256_loadu_si256((const __m256i*) (src + 0)),
+ hi = _mm256_loadu_si256((const __m256i*) (src + 8));
+
+ premul8(&lo, &hi);
+
+ _mm256_storeu_si256((__m256i*) (dst + 0), lo);
+ _mm256_storeu_si256((__m256i*) (dst + 8), hi);
+
+ src += 16;
+ dst += 16;
+ count -= 16;
+ }
+
+ if (count >= 8) {
+ __m256i lo = _mm256_loadu_si256((const __m256i*) src),
+ hi = _mm256_setzero_si256();
+
+ premul8(&lo, &hi);
+
+ _mm256_storeu_si256((__m256i*) dst, lo);
+
+ src += 8;
+ dst += 8;
+ count -= 8;
+ }
+
+ // Call portable code to finish up the tail of [0,8) pixels.
+ auto proc = kSwapRB ? RGBA_to_bgrA_portable : RGBA_to_rgbA_portable;
+ proc(dst, src, count);
+}
+
+/*not static*/ inline void RGBA_to_rgbA(uint32_t* dst, const uint32_t* src, int count) {
+ premul_should_swapRB(false, dst, src, count);
+}
+
+/*not static*/ inline void RGBA_to_bgrA(uint32_t* dst, const uint32_t* src, int count) {
+ premul_should_swapRB(true, dst, src, count);
+}
+
+/*not static*/ inline void RGBA_to_BGRA(uint32_t* dst, const uint32_t* src, int count) {
+ const __m256i swapRB = _mm256_setr_epi8(2,1,0,3, 6,5,4,7, 10,9,8,11, 14,13,12,15,
+ 2,1,0,3, 6,5,4,7, 10,9,8,11, 14,13,12,15);
+
+ while (count >= 8) {
+ __m256i rgba = _mm256_loadu_si256((const __m256i*) src);
+ __m256i bgra = _mm256_shuffle_epi8(rgba, swapRB);
+ _mm256_storeu_si256((__m256i*) dst, bgra);
+
+ src += 8;
+ dst += 8;
+ count -= 8;
+ }
+
+ RGBA_to_BGRA_portable(dst, src, count);
+}
+
+/*not static*/ inline void grayA_to_RGBA(uint32_t dst[], const uint8_t* src, int count) {
+ while (count >= 16) {
+ __m256i ga = _mm256_loadu_si256((const __m256i*) src);
+
+ __m256i gg = _mm256_or_si256(_mm256_and_si256(ga, _mm256_set1_epi16(0x00FF)),
+ _mm256_slli_epi16(ga, 8));
+
+ __m256i ggga_lo = _mm256_unpacklo_epi16(gg, ga);
+ __m256i ggga_hi = _mm256_unpackhi_epi16(gg, ga);
+
+ // Shuffle for pixel reorder
+ // Note. 'p' stands for 'ggga'
+ // Before shuffle:
+ // ggga_lo = p0 p1 p2 p3 | p8 p9 p10 p11
+ // ggga_hi = p4 p5 p6 p7 | p12 p13 p14 p15
+ //
+ // After shuffle:
+ // ggga_lo_shuffle = p0 p1 p2 p3 | p4 p5 p6 p7
+ // ggga_hi_shuffle = p8 p9 p10 p11 | p12 p13 p14 p15
+ __m256i ggga_lo_shuffle = _mm256_permute2x128_si256(ggga_lo, ggga_hi, 0x20),
+ ggga_hi_shuffle = _mm256_permute2x128_si256(ggga_lo, ggga_hi, 0x31);
+
+ _mm256_storeu_si256((__m256i*) (dst + 0), ggga_lo_shuffle);
+ _mm256_storeu_si256((__m256i*) (dst + 8), ggga_hi_shuffle);
+
+ src += 16*2;
+ dst += 16;
+ count -= 16;
+ }
+
+ grayA_to_RGBA_portable(dst, src, count);
+}
+
+/*not static*/ inline void grayA_to_rgbA(uint32_t dst[], const uint8_t* src, int count) {
+ while (count >= 16) {
+ __m256i grayA = _mm256_loadu_si256((const __m256i*) src);
+
+ __m256i g0 = _mm256_and_si256(grayA, _mm256_set1_epi16(0x00FF));
+ __m256i a0 = _mm256_srli_epi16(grayA, 8);
+
+ // Premultiply
+ g0 = scale(g0, a0);
+
+ __m256i gg = _mm256_or_si256(g0, _mm256_slli_epi16(g0, 8));
+ __m256i ga = _mm256_or_si256(g0, _mm256_slli_epi16(a0, 8));
+
+ __m256i ggga_lo = _mm256_unpacklo_epi16(gg, ga);
+ __m256i ggga_hi = _mm256_unpackhi_epi16(gg, ga);
+
+ // Shuffle for pixel reorder, similar as grayA_to_RGBA
+ __m256i ggga_lo_shuffle = _mm256_permute2x128_si256(ggga_lo, ggga_hi, 0x20),
+ ggga_hi_shuffle = _mm256_permute2x128_si256(ggga_lo, ggga_hi, 0x31);
+
+ _mm256_storeu_si256((__m256i*) (dst + 0), ggga_lo_shuffle);
+ _mm256_storeu_si256((__m256i*) (dst + 8), ggga_hi_shuffle);
+
+ src += 16*2;
+ dst += 16;
+ count -= 16;
+ }
+
+ grayA_to_rgbA_portable(dst, src, count);
+}
+
+enum Format { kRGB1, kBGR1 };
+static void inverted_cmyk_to(Format format, uint32_t* dst, const uint32_t* src, int count) {
+ auto convert8 = [=](__m256i* lo, __m256i* hi) {
+ const __m256i zeros = _mm256_setzero_si256();
+ __m256i planar;
+ if (kBGR1 == format) {
+ planar = _mm256_setr_epi8(2,6,10,14, 1,5,9,13, 0,4,8,12, 3,7,11,15,
+ 2,6,10,14, 1,5,9,13, 0,4,8,12, 3,7,11,15);
+ } else {
+ planar = _mm256_setr_epi8(0,4,8,12, 1,5,9,13, 2,6,10,14, 3,7,11,15,
+ 0,4,8,12, 1,5,9,13, 2,6,10,14, 3,7,11,15);
+ }
+
+ // Swizzle the pixels to 8-bit planar.
+ *lo = _mm256_shuffle_epi8(*lo, planar); // ccccmmmm yyyykkkk ccccmmmm yyyykkkk
+ *hi = _mm256_shuffle_epi8(*hi, planar); // CCCCMMMM YYYYKKKK CCCCMMMM YYYYKKKK
+ __m256i cm = _mm256_unpacklo_epi32(*lo, *hi), // ccccCCCC mmmmMMMM ccccCCCC mmmmMMMM
+ yk = _mm256_unpackhi_epi32(*lo, *hi); // yyyyYYYY kkkkKKKK yyyyYYYY kkkkKKKK
+
+ // Unpack to 16-bit planar.
+ __m256i c = _mm256_unpacklo_epi8(cm, zeros), // c_c_c_c_ C_C_C_C_ c_c_c_c_ C_C_C_C_
+ m = _mm256_unpackhi_epi8(cm, zeros), // m_m_m_m_ M_M_M_M_ m_m_m_m_ M_M_M_M_
+ y = _mm256_unpacklo_epi8(yk, zeros), // y_y_y_y_ Y_Y_Y_Y_ y_y_y_y_ Y_Y_Y_Y_
+ k = _mm256_unpackhi_epi8(yk, zeros); // k_k_k_k_ K_K_K_K_ k_k_k_k_ K_K_K_K_
+
+ // Scale to r, g, b.
+ __m256i r = scale(c, k),
+ g = scale(m, k),
+ b = scale(y, k);
+
+ // Repack into interlaced pixels:
+ // rg = rgrgrgrg RGRGRGRG rgrgrgrg RGRGRGRG
+ // ba = b1b1b1b1 B1B1B1B1 b1b1b1b1 B1B1B1B1
+ __m256i rg = _mm256_or_si256(r, _mm256_slli_epi16(g, 8)),
+ ba = _mm256_or_si256(b, _mm256_set1_epi16((uint16_t) 0xFF00));
+ *lo = _mm256_unpacklo_epi16(rg, ba); // rgb1rgb1 rgb1rgb1 rgb1rgb1 rgb1rgb1
+ *hi = _mm256_unpackhi_epi16(rg, ba); // RGB1RGB1 RGB1RGB1 RGB1RGB1 RGB1RGB1
+ };
+
+ while (count >= 16) {
+ __m256i lo = _mm256_loadu_si256((const __m256i*) (src + 0)),
+ hi = _mm256_loadu_si256((const __m256i*) (src + 8));
+
+ convert8(&lo, &hi);
+
+ _mm256_storeu_si256((__m256i*) (dst + 0), lo);
+ _mm256_storeu_si256((__m256i*) (dst + 8), hi);
+
+ src += 16;
+ dst += 16;
+ count -= 16;
+ }
+
+ if (count >= 8) {
+ __m256i lo = _mm256_loadu_si256((const __m256i*) src),
+ hi = _mm256_setzero_si256();
+
+ convert8(&lo, &hi);
+
+ _mm256_storeu_si256((__m256i*) dst, lo);
+
+ src += 8;
+ dst += 8;
+ count -= 8;
+ }
+
+ auto proc = (kBGR1 == format) ? inverted_CMYK_to_BGR1_portable : inverted_CMYK_to_RGB1_portable;
+ proc(dst, src, count);
+}
+
+/*not static*/ inline void inverted_CMYK_to_RGB1(uint32_t dst[], const uint32_t* src, int count) {
+ inverted_cmyk_to(kRGB1, dst, src, count);
+}
+
+/*not static*/ inline void inverted_CMYK_to_BGR1(uint32_t dst[], const uint32_t* src, int count) {
+ inverted_cmyk_to(kBGR1, dst, src, count);
+}
+
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+
+// Scale a byte by another.
+// Inputs are stored in 16-bit lanes, but are not larger than 8-bits.
+static __m128i scale(__m128i x, __m128i y) {
+ const __m128i _128 = _mm_set1_epi16(128);
+ const __m128i _257 = _mm_set1_epi16(257);
+
+ // (x+127)/255 == ((x+128)*257)>>16 for 0 <= x <= 255*255.
+ return _mm_mulhi_epu16(_mm_add_epi16(_mm_mullo_epi16(x, y), _128), _257);
+}
+
+static void premul_should_swapRB(bool kSwapRB, uint32_t* dst, const uint32_t* src, int count) {
+
+ auto premul8 = [=](__m128i* lo, __m128i* hi) {
+ const __m128i zeros = _mm_setzero_si128();
+ __m128i planar;
+ if (kSwapRB) {
+ planar = _mm_setr_epi8(2,6,10,14, 1,5,9,13, 0,4,8,12, 3,7,11,15);
+ } else {
+ planar = _mm_setr_epi8(0,4,8,12, 1,5,9,13, 2,6,10,14, 3,7,11,15);
+ }
+
+ // Swizzle the pixels to 8-bit planar.
+ *lo = _mm_shuffle_epi8(*lo, planar); // rrrrgggg bbbbaaaa
+ *hi = _mm_shuffle_epi8(*hi, planar); // RRRRGGGG BBBBAAAA
+ __m128i rg = _mm_unpacklo_epi32(*lo, *hi), // rrrrRRRR ggggGGGG
+ ba = _mm_unpackhi_epi32(*lo, *hi); // bbbbBBBB aaaaAAAA
+
+ // Unpack to 16-bit planar.
+ __m128i r = _mm_unpacklo_epi8(rg, zeros), // r_r_r_r_ R_R_R_R_
+ g = _mm_unpackhi_epi8(rg, zeros), // g_g_g_g_ G_G_G_G_
+ b = _mm_unpacklo_epi8(ba, zeros), // b_b_b_b_ B_B_B_B_
+ a = _mm_unpackhi_epi8(ba, zeros); // a_a_a_a_ A_A_A_A_
+
+ // Premultiply!
+ r = scale(r, a);
+ g = scale(g, a);
+ b = scale(b, a);
+
+ // Repack into interlaced pixels.
+ rg = _mm_or_si128(r, _mm_slli_epi16(g, 8)); // rgrgrgrg RGRGRGRG
+ ba = _mm_or_si128(b, _mm_slli_epi16(a, 8)); // babababa BABABABA
+ *lo = _mm_unpacklo_epi16(rg, ba); // rgbargba rgbargba
+ *hi = _mm_unpackhi_epi16(rg, ba); // RGBARGBA RGBARGBA
+ };
+
+ while (count >= 8) {
+ __m128i lo = _mm_loadu_si128((const __m128i*) (src + 0)),
+ hi = _mm_loadu_si128((const __m128i*) (src + 4));
+
+ premul8(&lo, &hi);
+
+ _mm_storeu_si128((__m128i*) (dst + 0), lo);
+ _mm_storeu_si128((__m128i*) (dst + 4), hi);
+
+ src += 8;
+ dst += 8;
+ count -= 8;
+ }
+
+ if (count >= 4) {
+ __m128i lo = _mm_loadu_si128((const __m128i*) src),
+ hi = _mm_setzero_si128();
+
+ premul8(&lo, &hi);
+
+ _mm_storeu_si128((__m128i*) dst, lo);
+
+ src += 4;
+ dst += 4;
+ count -= 4;
+ }
+
+ // Call portable code to finish up the tail of [0,4) pixels.
+ auto proc = kSwapRB ? RGBA_to_bgrA_portable : RGBA_to_rgbA_portable;
+ proc(dst, src, count);
+}
+
+/*not static*/ inline void RGBA_to_rgbA(uint32_t* dst, const uint32_t* src, int count) {
+ premul_should_swapRB(false, dst, src, count);
+}
+
+/*not static*/ inline void RGBA_to_bgrA(uint32_t* dst, const uint32_t* src, int count) {
+ premul_should_swapRB(true, dst, src, count);
+}
+
+/*not static*/ inline void RGBA_to_BGRA(uint32_t* dst, const uint32_t* src, int count) {
+ const __m128i swapRB = _mm_setr_epi8(2,1,0,3, 6,5,4,7, 10,9,8,11, 14,13,12,15);
+
+ while (count >= 4) {
+ __m128i rgba = _mm_loadu_si128((const __m128i*) src);
+ __m128i bgra = _mm_shuffle_epi8(rgba, swapRB);
+ _mm_storeu_si128((__m128i*) dst, bgra);
+
+ src += 4;
+ dst += 4;
+ count -= 4;
+ }
+
+ RGBA_to_BGRA_portable(dst, src, count);
+}
+
+/*not static*/ inline void grayA_to_RGBA(uint32_t dst[], const uint8_t* src, int count) {
+ while (count >= 8) {
+ __m128i ga = _mm_loadu_si128((const __m128i*) src);
+
+ __m128i gg = _mm_or_si128(_mm_and_si128(ga, _mm_set1_epi16(0x00FF)),
+ _mm_slli_epi16(ga, 8));
+
+ __m128i ggga_lo = _mm_unpacklo_epi16(gg, ga);
+ __m128i ggga_hi = _mm_unpackhi_epi16(gg, ga);
+
+ _mm_storeu_si128((__m128i*) (dst + 0), ggga_lo);
+ _mm_storeu_si128((__m128i*) (dst + 4), ggga_hi);
+
+ src += 8*2;
+ dst += 8;
+ count -= 8;
+ }
+
+ grayA_to_RGBA_portable(dst, src, count);
+}
+
+/*not static*/ inline void grayA_to_rgbA(uint32_t dst[], const uint8_t* src, int count) {
+ while (count >= 8) {
+ __m128i grayA = _mm_loadu_si128((const __m128i*) src);
+
+ __m128i g0 = _mm_and_si128(grayA, _mm_set1_epi16(0x00FF));
+ __m128i a0 = _mm_srli_epi16(grayA, 8);
+
+ // Premultiply
+ g0 = scale(g0, a0);
+
+ __m128i gg = _mm_or_si128(g0, _mm_slli_epi16(g0, 8));
+ __m128i ga = _mm_or_si128(g0, _mm_slli_epi16(a0, 8));
+
+
+ __m128i ggga_lo = _mm_unpacklo_epi16(gg, ga);
+ __m128i ggga_hi = _mm_unpackhi_epi16(gg, ga);
+
+ _mm_storeu_si128((__m128i*) (dst + 0), ggga_lo);
+ _mm_storeu_si128((__m128i*) (dst + 4), ggga_hi);
+
+ src += 8*2;
+ dst += 8;
+ count -= 8;
+ }
+
+ grayA_to_rgbA_portable(dst, src, count);
+}
+
+enum Format { kRGB1, kBGR1 };
+static void inverted_cmyk_to(Format format, uint32_t* dst, const uint32_t* src, int count) {
+ auto convert8 = [=](__m128i* lo, __m128i* hi) {
+ const __m128i zeros = _mm_setzero_si128();
+ __m128i planar;
+ if (kBGR1 == format) {
+ planar = _mm_setr_epi8(2,6,10,14, 1,5,9,13, 0,4,8,12, 3,7,11,15);
+ } else {
+ planar = _mm_setr_epi8(0,4,8,12, 1,5,9,13, 2,6,10,14, 3,7,11,15);
+ }
+
+ // Swizzle the pixels to 8-bit planar.
+ *lo = _mm_shuffle_epi8(*lo, planar); // ccccmmmm yyyykkkk
+ *hi = _mm_shuffle_epi8(*hi, planar); // CCCCMMMM YYYYKKKK
+ __m128i cm = _mm_unpacklo_epi32(*lo, *hi), // ccccCCCC mmmmMMMM
+ yk = _mm_unpackhi_epi32(*lo, *hi); // yyyyYYYY kkkkKKKK
+
+ // Unpack to 16-bit planar.
+ __m128i c = _mm_unpacklo_epi8(cm, zeros), // c_c_c_c_ C_C_C_C_
+ m = _mm_unpackhi_epi8(cm, zeros), // m_m_m_m_ M_M_M_M_
+ y = _mm_unpacklo_epi8(yk, zeros), // y_y_y_y_ Y_Y_Y_Y_
+ k = _mm_unpackhi_epi8(yk, zeros); // k_k_k_k_ K_K_K_K_
+
+ // Scale to r, g, b.
+ __m128i r = scale(c, k),
+ g = scale(m, k),
+ b = scale(y, k);
+
+ // Repack into interlaced pixels.
+ __m128i rg = _mm_or_si128(r, _mm_slli_epi16(g, 8)), // rgrgrgrg RGRGRGRG
+ ba = _mm_or_si128(b, _mm_set1_epi16((uint16_t) 0xFF00)); // b1b1b1b1 B1B1B1B1
+ *lo = _mm_unpacklo_epi16(rg, ba); // rgbargba rgbargba
+ *hi = _mm_unpackhi_epi16(rg, ba); // RGB1RGB1 RGB1RGB1
+ };
+
+ while (count >= 8) {
+ __m128i lo = _mm_loadu_si128((const __m128i*) (src + 0)),
+ hi = _mm_loadu_si128((const __m128i*) (src + 4));
+
+ convert8(&lo, &hi);
+
+ _mm_storeu_si128((__m128i*) (dst + 0), lo);
+ _mm_storeu_si128((__m128i*) (dst + 4), hi);
+
+ src += 8;
+ dst += 8;
+ count -= 8;
+ }
+
+ if (count >= 4) {
+ __m128i lo = _mm_loadu_si128((const __m128i*) src),
+ hi = _mm_setzero_si128();
+
+ convert8(&lo, &hi);
+
+ _mm_storeu_si128((__m128i*) dst, lo);
+
+ src += 4;
+ dst += 4;
+ count -= 4;
+ }
+
+ auto proc = (kBGR1 == format) ? inverted_CMYK_to_BGR1_portable : inverted_CMYK_to_RGB1_portable;
+ proc(dst, src, count);
+}
+
+/*not static*/ inline void inverted_CMYK_to_RGB1(uint32_t dst[], const uint32_t* src, int count) {
+ inverted_cmyk_to(kRGB1, dst, src, count);
+}
+
+/*not static*/ inline void inverted_CMYK_to_BGR1(uint32_t dst[], const uint32_t* src, int count) {
+ inverted_cmyk_to(kBGR1, dst, src, count);
+}
+
+#else
+
+/*not static*/ inline void RGBA_to_rgbA(uint32_t* dst, const uint32_t* src, int count) {
+ RGBA_to_rgbA_portable(dst, src, count);
+}
+
+/*not static*/ inline void RGBA_to_bgrA(uint32_t* dst, const uint32_t* src, int count) {
+ RGBA_to_bgrA_portable(dst, src, count);
+}
+
+/*not static*/ inline void RGBA_to_BGRA(uint32_t* dst, const uint32_t* src, int count) {
+ RGBA_to_BGRA_portable(dst, src, count);
+}
+
+/*not static*/ inline void grayA_to_RGBA(uint32_t dst[], const uint8_t* src, int count) {
+ grayA_to_RGBA_portable(dst, src, count);
+}
+
+/*not static*/ inline void grayA_to_rgbA(uint32_t dst[], const uint8_t* src, int count) {
+ grayA_to_rgbA_portable(dst, src, count);
+}
+
+/*not static*/ inline void inverted_CMYK_to_RGB1(uint32_t dst[], const uint32_t* src, int count) {
+ inverted_CMYK_to_RGB1_portable(dst, src, count);
+}
+
+/*not static*/ inline void inverted_CMYK_to_BGR1(uint32_t dst[], const uint32_t* src, int count) {
+ inverted_CMYK_to_BGR1_portable(dst, src, count);
+}
+
+#endif
+
+// Basically as above, but we found no benefit from AVX-512 for gray_to_RGB1.
+static void gray_to_RGB1_portable(uint32_t dst[], const uint8_t* src, int count) {
+ for (int i = 0; i < count; i++) {
+ dst[i] = (uint32_t)0xFF << 24
+ | (uint32_t)src[i] << 16
+ | (uint32_t)src[i] << 8
+ | (uint32_t)src[i] << 0;
+ }
+}
+#if defined(SK_ARM_HAS_NEON)
+ /*not static*/ inline void gray_to_RGB1(uint32_t dst[], const uint8_t* src, int count) {
+ while (count >= 16) {
+ // Load 16 pixels.
+ uint8x16_t gray = vld1q_u8(src);
+
+ // Set each of the color channels.
+ uint8x16x4_t rgba;
+ rgba.val[0] = gray;
+ rgba.val[1] = gray;
+ rgba.val[2] = gray;
+ rgba.val[3] = vdupq_n_u8(0xFF);
+
+ // Store 16 pixels.
+ vst4q_u8((uint8_t*) dst, rgba);
+ src += 16;
+ dst += 16;
+ count -= 16;
+ }
+ if (count >= 8) {
+ // Load 8 pixels.
+ uint8x8_t gray = vld1_u8(src);
+
+ // Set each of the color channels.
+ uint8x8x4_t rgba;
+ rgba.val[0] = gray;
+ rgba.val[1] = gray;
+ rgba.val[2] = gray;
+ rgba.val[3] = vdup_n_u8(0xFF);
+
+ // Store 8 pixels.
+ vst4_u8((uint8_t*) dst, rgba);
+ src += 8;
+ dst += 8;
+ count -= 8;
+ }
+ gray_to_RGB1_portable(dst, src, count);
+ }
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ /*not static*/ inline void gray_to_RGB1(uint32_t dst[], const uint8_t* src, int count) {
+ const __m256i alphas = _mm256_set1_epi8((uint8_t) 0xFF);
+ while (count >= 32) {
+ __m256i grays = _mm256_loadu_si256((const __m256i*) src);
+
+ __m256i gg_lo = _mm256_unpacklo_epi8(grays, grays);
+ __m256i gg_hi = _mm256_unpackhi_epi8(grays, grays);
+ __m256i ga_lo = _mm256_unpacklo_epi8(grays, alphas);
+ __m256i ga_hi = _mm256_unpackhi_epi8(grays, alphas);
+
+ __m256i ggga0 = _mm256_unpacklo_epi16(gg_lo, ga_lo);
+ __m256i ggga1 = _mm256_unpackhi_epi16(gg_lo, ga_lo);
+ __m256i ggga2 = _mm256_unpacklo_epi16(gg_hi, ga_hi);
+ __m256i ggga3 = _mm256_unpackhi_epi16(gg_hi, ga_hi);
+
+ // Shuffle for pixel reorder.
+ // Note. 'p' stands for 'ggga'
+ // Before shuffle:
+ // ggga0 = p0 p1 p2 p3 | p16 p17 p18 p19
+ // ggga1 = p4 p5 p6 p7 | p20 p21 p22 p23
+ // ggga2 = p8 p9 p10 p11 | p24 p25 p26 p27
+ // ggga3 = p12 p13 p14 p15 | p28 p29 p30 p31
+ //
+ // After shuffle:
+ // ggga0_shuffle = p0 p1 p2 p3 | p4 p5 p6 p7
+ // ggga1_shuffle = p8 p9 p10 p11 | p12 p13 p14 p15
+ // ggga2_shuffle = p16 p17 p18 p19 | p20 p21 p22 p23
+ // ggga3_shuffle = p24 p25 p26 p27 | p28 p29 p30 p31
+ __m256i ggga0_shuffle = _mm256_permute2x128_si256(ggga0, ggga1, 0x20),
+ ggga1_shuffle = _mm256_permute2x128_si256(ggga2, ggga3, 0x20),
+ ggga2_shuffle = _mm256_permute2x128_si256(ggga0, ggga1, 0x31),
+ ggga3_shuffle = _mm256_permute2x128_si256(ggga2, ggga3, 0x31);
+
+ _mm256_storeu_si256((__m256i*) (dst + 0), ggga0_shuffle);
+ _mm256_storeu_si256((__m256i*) (dst + 8), ggga1_shuffle);
+ _mm256_storeu_si256((__m256i*) (dst + 16), ggga2_shuffle);
+ _mm256_storeu_si256((__m256i*) (dst + 24), ggga3_shuffle);
+
+ src += 32;
+ dst += 32;
+ count -= 32;
+ }
+ gray_to_RGB1_portable(dst, src, count);
+ }
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 // TODO: just check >= SSE2?
+ /*not static*/ inline void gray_to_RGB1(uint32_t dst[], const uint8_t* src, int count) {
+ const __m128i alphas = _mm_set1_epi8((uint8_t) 0xFF);
+ while (count >= 16) {
+ __m128i grays = _mm_loadu_si128((const __m128i*) src);
+
+ __m128i gg_lo = _mm_unpacklo_epi8(grays, grays);
+ __m128i gg_hi = _mm_unpackhi_epi8(grays, grays);
+ __m128i ga_lo = _mm_unpacklo_epi8(grays, alphas);
+ __m128i ga_hi = _mm_unpackhi_epi8(grays, alphas);
+
+ __m128i ggga0 = _mm_unpacklo_epi16(gg_lo, ga_lo);
+ __m128i ggga1 = _mm_unpackhi_epi16(gg_lo, ga_lo);
+ __m128i ggga2 = _mm_unpacklo_epi16(gg_hi, ga_hi);
+ __m128i ggga3 = _mm_unpackhi_epi16(gg_hi, ga_hi);
+
+ _mm_storeu_si128((__m128i*) (dst + 0), ggga0);
+ _mm_storeu_si128((__m128i*) (dst + 4), ggga1);
+ _mm_storeu_si128((__m128i*) (dst + 8), ggga2);
+ _mm_storeu_si128((__m128i*) (dst + 12), ggga3);
+
+ src += 16;
+ dst += 16;
+ count -= 16;
+ }
+ gray_to_RGB1_portable(dst, src, count);
+ }
+#else
+ /*not static*/ inline void gray_to_RGB1(uint32_t dst[], const uint8_t* src, int count) {
+ gray_to_RGB1_portable(dst, src, count);
+ }
+#endif
+
+// Again as above, this time not even finding benefit from AVX2 for RGB_to_{RGB,BGR}1.
+static void RGB_to_RGB1_portable(uint32_t dst[], const uint8_t* src, int count) {
+ for (int i = 0; i < count; i++) {
+ uint8_t r = src[0],
+ g = src[1],
+ b = src[2];
+ src += 3;
+ dst[i] = (uint32_t)0xFF << 24
+ | (uint32_t)b << 16
+ | (uint32_t)g << 8
+ | (uint32_t)r << 0;
+ }
+}
+static void RGB_to_BGR1_portable(uint32_t dst[], const uint8_t* src, int count) {
+ for (int i = 0; i < count; i++) {
+ uint8_t r = src[0],
+ g = src[1],
+ b = src[2];
+ src += 3;
+ dst[i] = (uint32_t)0xFF << 24
+ | (uint32_t)r << 16
+ | (uint32_t)g << 8
+ | (uint32_t)b << 0;
+ }
+}
+#if defined(SK_ARM_HAS_NEON)
+ static void insert_alpha_should_swaprb(bool kSwapRB,
+ uint32_t dst[], const uint8_t* src, int count) {
+ while (count >= 16) {
+ // Load 16 pixels.
+ uint8x16x3_t rgb = vld3q_u8(src);
+
+ // Insert an opaque alpha channel and swap if needed.
+ uint8x16x4_t rgba;
+ if (kSwapRB) {
+ rgba.val[0] = rgb.val[2];
+ rgba.val[2] = rgb.val[0];
+ } else {
+ rgba.val[0] = rgb.val[0];
+ rgba.val[2] = rgb.val[2];
+ }
+ rgba.val[1] = rgb.val[1];
+ rgba.val[3] = vdupq_n_u8(0xFF);
+
+ // Store 16 pixels.
+ vst4q_u8((uint8_t*) dst, rgba);
+ src += 16*3;
+ dst += 16;
+ count -= 16;
+ }
+
+ if (count >= 8) {
+ // Load 8 pixels.
+ uint8x8x3_t rgb = vld3_u8(src);
+
+ // Insert an opaque alpha channel and swap if needed.
+ uint8x8x4_t rgba;
+ if (kSwapRB) {
+ rgba.val[0] = rgb.val[2];
+ rgba.val[2] = rgb.val[0];
+ } else {
+ rgba.val[0] = rgb.val[0];
+ rgba.val[2] = rgb.val[2];
+ }
+ rgba.val[1] = rgb.val[1];
+ rgba.val[3] = vdup_n_u8(0xFF);
+
+ // Store 8 pixels.
+ vst4_u8((uint8_t*) dst, rgba);
+ src += 8*3;
+ dst += 8;
+ count -= 8;
+ }
+
+ // Call portable code to finish up the tail of [0,8) pixels.
+ auto proc = kSwapRB ? RGB_to_BGR1_portable : RGB_to_RGB1_portable;
+ proc(dst, src, count);
+ }
+
+ /*not static*/ inline void RGB_to_RGB1(uint32_t dst[], const uint8_t* src, int count) {
+ insert_alpha_should_swaprb(false, dst, src, count);
+ }
+ /*not static*/ inline void RGB_to_BGR1(uint32_t dst[], const uint8_t* src, int count) {
+ insert_alpha_should_swaprb(true, dst, src, count);
+ }
+#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ static void insert_alpha_should_swaprb(bool kSwapRB,
+ uint32_t dst[], const uint8_t* src, int count) {
+ const __m128i alphaMask = _mm_set1_epi32(0xFF000000);
+ __m128i expand;
+ const uint8_t X = 0xFF; // Used a placeholder. The value of X is irrelevant.
+ if (kSwapRB) {
+ expand = _mm_setr_epi8(2,1,0,X, 5,4,3,X, 8,7,6,X, 11,10,9,X);
+ } else {
+ expand = _mm_setr_epi8(0,1,2,X, 3,4,5,X, 6,7,8,X, 9,10,11,X);
+ }
+
+ while (count >= 6) {
+ // Load a vector. While this actually contains 5 pixels plus an
+ // extra component, we will discard all but the first four pixels on
+ // this iteration.
+ __m128i rgb = _mm_loadu_si128((const __m128i*) src);
+
+ // Expand the first four pixels to RGBX and then mask to RGB(FF).
+ __m128i rgba = _mm_or_si128(_mm_shuffle_epi8(rgb, expand), alphaMask);
+
+ // Store 4 pixels.
+ _mm_storeu_si128((__m128i*) dst, rgba);
+
+ src += 4*3;
+ dst += 4;
+ count -= 4;
+ }
+
+ // Call portable code to finish up the tail of [0,4) pixels.
+ auto proc = kSwapRB ? RGB_to_BGR1_portable : RGB_to_RGB1_portable;
+ proc(dst, src, count);
+ }
+
+ /*not static*/ inline void RGB_to_RGB1(uint32_t dst[], const uint8_t* src, int count) {
+ insert_alpha_should_swaprb(false, dst, src, count);
+ }
+ /*not static*/ inline void RGB_to_BGR1(uint32_t dst[], const uint8_t* src, int count) {
+ insert_alpha_should_swaprb(true, dst, src, count);
+ }
+#else
+ /*not static*/ inline void RGB_to_RGB1(uint32_t dst[], const uint8_t* src, int count) {
+ RGB_to_RGB1_portable(dst, src, count);
+ }
+ /*not static*/ inline void RGB_to_BGR1(uint32_t dst[], const uint8_t* src, int count) {
+ RGB_to_BGR1_portable(dst, src, count);
+ }
+#endif
+
+} // namespace SK_OPTS_NS
+
+#endif // SkSwizzler_opts_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkUtils_opts.h b/gfx/skia/skia/src/opts/SkUtils_opts.h
new file mode 100644
index 0000000000..2ec42285c8
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkUtils_opts.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkUtils_opts_DEFINED
+#define SkUtils_opts_DEFINED
+
+#include <stdint.h>
+#include "src/base/SkVx.h"
+
+namespace SK_OPTS_NS {
+
+ template <typename T>
+ static void memsetT(T buffer[], T value, int count) {
+ #if defined(SK_CPU_SSE_LEVEL) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX
+ static constexpr int N = 32 / sizeof(T);
+ #else
+ static constexpr int N = 16 / sizeof(T);
+ #endif
+ static_assert(N > 0, "T is too big for memsetT");
+ // Create an N-wide version of value
+ skvx::Vec<N,T> wideValue(value);
+ while (count >= N) {
+ // N at a time, copy the values into the destination buffer
+ wideValue.store(buffer);
+ buffer += N;
+ count -= N;
+ }
+ // If count was not an even multiple of N, take care of the last few.
+ while (count --> 0) {
+ *buffer++ = value;
+ }
+ }
+
+ /*not static*/ inline void memset16(uint16_t buffer[], uint16_t value, int count) {
+ memsetT(buffer, value, count);
+ }
+ /*not static*/ inline void memset32(uint32_t buffer[], uint32_t value, int count) {
+ memsetT(buffer, value, count);
+ }
+ /*not static*/ inline void memset64(uint64_t buffer[], uint64_t value, int count) {
+ memsetT(buffer, value, count);
+ }
+
+ template <typename T>
+ static void rect_memsetT(T buffer[], T value, int count, size_t rowBytes, int height) {
+ while (height --> 0) {
+ memsetT(buffer, value, count);
+ buffer = (T*)((char*)buffer + rowBytes);
+ }
+ }
+
+ /*not static*/ inline void rect_memset16(uint16_t buffer[], uint16_t value, int count,
+ size_t rowBytes, int height) {
+ rect_memsetT(buffer, value, count, rowBytes, height);
+ }
+ /*not static*/ inline void rect_memset32(uint32_t buffer[], uint32_t value, int count,
+ size_t rowBytes, int height) {
+ rect_memsetT(buffer, value, count, rowBytes, height);
+ }
+ /*not static*/ inline void rect_memset64(uint64_t buffer[], uint64_t value, int count,
+ size_t rowBytes, int height) {
+ rect_memsetT(buffer, value, count, rowBytes, height);
+ }
+
+} // namespace SK_OPTS_NS
+
+#endif//SkUtils_opts_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkVM_opts.h b/gfx/skia/skia/src/opts/SkVM_opts.h
new file mode 100644
index 0000000000..8acb53ef15
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkVM_opts.h
@@ -0,0 +1,351 @@
+// Copyright 2020 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+
+#ifndef SkVM_opts_DEFINED
+#define SkVM_opts_DEFINED
+
+#include "src/base/SkVx.h"
+#include "src/core/SkVM.h"
+#include "src/sksl/tracing/SkSLTraceHook.h"
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ #include <immintrin.h>
+#endif
+
+template <int N>
+static inline skvx::Vec<N,int> gather32(const int* ptr, const skvx::Vec<N,int>& ix) {
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ if constexpr (N == 8) {
+ return skvx::bit_pun<skvx::Vec<N,int>>(
+ _mm256_i32gather_epi32(ptr, skvx::bit_pun<__m256i>(ix), 4));
+ }
+#endif
+ // Try to recurse on specializations, falling back on standard scalar map()-based impl.
+ if constexpr (N > 8) {
+ return join(gather32(ptr, ix.lo),
+ gather32(ptr, ix.hi));
+ }
+ return map([&](int i) { return ptr[i]; }, ix);
+}
+
+namespace SK_OPTS_NS {
+
+namespace SkVMInterpreterTypes {
+#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
+ constexpr inline int K = 32; // 1024-bit: 4 ymm or 2 zmm at a time
+#else
+ constexpr inline int K = 8; // 256-bit: 2 xmm, 2 v-registers, etc.
+#endif
+ using I32 = skvx::Vec<K, int>;
+ using I16 = skvx::Vec<K, int16_t>;
+ using F32 = skvx::Vec<K, float>;
+ using U64 = skvx::Vec<K, uint64_t>;
+ using U32 = skvx::Vec<K, uint32_t>;
+ using U16 = skvx::Vec<K, uint16_t>;
+ using U8 = skvx::Vec<K, uint8_t>;
+ union Slot {
+ F32 f32;
+ I32 i32;
+ U32 u32;
+ I16 i16;
+ U16 u16;
+ };
+} // namespace SkVMInterpreterTypes
+
+ inline void interpret_skvm(const skvm::InterpreterInstruction insts[], const int ninsts,
+ const int nregs, const int loop,
+ const int strides[],
+ SkSL::TraceHook* traceHooks[], const int nTraceHooks,
+ const int nargs, int n, void* args[]) {
+ using namespace skvm;
+
+ using SkVMInterpreterTypes::K;
+ using SkVMInterpreterTypes::I32;
+ using SkVMInterpreterTypes::I16;
+ using SkVMInterpreterTypes::F32;
+ using SkVMInterpreterTypes::U64;
+ using SkVMInterpreterTypes::U32;
+ using SkVMInterpreterTypes::U16;
+ using SkVMInterpreterTypes::U8;
+ using SkVMInterpreterTypes::Slot;
+
+ // We'll operate in SIMT style, knocking off K-size chunks from n while possible.
+
+ Slot few_regs[16];
+ std::unique_ptr<char[]> many_regs;
+
+ Slot* r = few_regs;
+
+ if (nregs > (int)std::size(few_regs)) {
+ // Annoyingly we can't trust that malloc() or new will work with Slot because
+ // the skvx::Vec types may have alignment greater than what they provide.
+ // We'll overallocate one extra register so we can align manually.
+ many_regs.reset(new char[ sizeof(Slot) * (nregs + 1) ]);
+
+ uintptr_t addr = (uintptr_t)many_regs.get();
+ addr += alignof(Slot) -
+ (addr & (alignof(Slot) - 1));
+ SkASSERT((addr & (alignof(Slot) - 1)) == 0);
+ r = (Slot*)addr;
+ }
+
+ const auto should_trace = [&](int stride, int immA, Reg x, Reg y) -> bool {
+ if (immA < 0 || immA >= nTraceHooks) {
+ return false;
+ }
+ // When stride == K, all lanes are used.
+ if (stride == K) {
+ return any(r[x].i32 & r[y].i32);
+ }
+ // When stride == 1, only the first lane is used; the rest are not meaningful.
+ return r[x].i32[0] & r[y].i32[0];
+ };
+
+ // Step each argument pointer ahead by its stride a number of times.
+ auto step_args = [&](int times) {
+ for (int i = 0; i < nargs; i++) {
+ args[i] = (void*)( (char*)args[i] + times * strides[i] );
+ }
+ };
+
+ int start = 0,
+ stride;
+ for ( ; n > 0; start = loop, n -= stride, step_args(stride)) {
+ stride = n >= K ? K : 1;
+
+ for (int instIdx = start; instIdx < ninsts; instIdx++) {
+ InterpreterInstruction inst = insts[instIdx];
+
+ // d = op(x,y,z,w, immA,immB)
+ Reg d = inst.d,
+ x = inst.x,
+ y = inst.y,
+ z = inst.z,
+ w = inst.w;
+ int immA = inst.immA,
+ immB = inst.immB,
+ immC = inst.immC;
+
+ // Ops that interact with memory need to know whether we're stride=1 or K,
+ // but all non-memory ops can run the same code no matter the stride.
+ switch (2*(int)inst.op + (stride == K ? 1 : 0)) {
+ default: SkUNREACHABLE;
+
+ #define STRIDE_1(op) case 2*(int)op
+ #define STRIDE_K(op) case 2*(int)op + 1
+ STRIDE_1(Op::store8 ): memcpy(args[immA], &r[x].i32, 1); break;
+ STRIDE_1(Op::store16): memcpy(args[immA], &r[x].i32, 2); break;
+ STRIDE_1(Op::store32): memcpy(args[immA], &r[x].i32, 4); break;
+ STRIDE_1(Op::store64): memcpy((char*)args[immA]+0, &r[x].i32, 4);
+ memcpy((char*)args[immA]+4, &r[y].i32, 4); break;
+
+ STRIDE_K(Op::store8 ): skvx::cast<uint8_t> (r[x].i32).store(args[immA]); break;
+ STRIDE_K(Op::store16): skvx::cast<uint16_t>(r[x].i32).store(args[immA]); break;
+ STRIDE_K(Op::store32): (r[x].i32).store(args[immA]); break;
+ STRIDE_K(Op::store64): (skvx::cast<uint64_t>(r[x].u32) << 0 |
+ skvx::cast<uint64_t>(r[y].u32) << 32).store(args[immA]);
+ break;
+
+ STRIDE_1(Op::load8 ): r[d].i32 = 0; memcpy(&r[d].i32, args[immA], 1); break;
+ STRIDE_1(Op::load16): r[d].i32 = 0; memcpy(&r[d].i32, args[immA], 2); break;
+ STRIDE_1(Op::load32): r[d].i32 = 0; memcpy(&r[d].i32, args[immA], 4); break;
+ STRIDE_1(Op::load64):
+ r[d].i32 = 0; memcpy(&r[d].i32, (char*)args[immA] + 4*immB, 4); break;
+
+ STRIDE_K(Op::load8 ): r[d].i32= skvx::cast<int>(U8 ::Load(args[immA])); break;
+ STRIDE_K(Op::load16): r[d].i32= skvx::cast<int>(U16::Load(args[immA])); break;
+ STRIDE_K(Op::load32): r[d].i32= I32::Load(args[immA]) ; break;
+ STRIDE_K(Op::load64):
+ // Low 32 bits if immB=0, or high 32 bits if immB=1.
+ r[d].i32 = skvx::cast<int>(U64::Load(args[immA]) >> (32*immB)); break;
+
+ // The pointer we base our gather on is loaded indirectly from a uniform:
+ // - args[immA] is the uniform holding our gather base pointer somewhere;
+ // - (const uint8_t*)args[immA] + immB points to the gather base pointer;
+ // - memcpy() loads the gather base and into a pointer of the right type.
+ // After all that we have an ordinary (uniform) pointer `ptr` to load from,
+ // and we then gather from it using the varying indices in r[x].
+ STRIDE_1(Op::gather8): {
+ const uint8_t* ptr;
+ memcpy(&ptr, (const uint8_t*)args[immA] + immB, sizeof(ptr));
+ r[d].i32 = ptr[ r[x].i32[0] ];
+ } break;
+ STRIDE_1(Op::gather16): {
+ const uint16_t* ptr;
+ memcpy(&ptr, (const uint8_t*)args[immA] + immB, sizeof(ptr));
+ r[d].i32 = ptr[ r[x].i32[0] ];
+ } break;
+ STRIDE_1(Op::gather32): {
+ const int* ptr;
+ memcpy(&ptr, (const uint8_t*)args[immA] + immB, sizeof(ptr));
+ r[d].i32 = ptr[ r[x].i32[0] ];
+ } break;
+
+ STRIDE_K(Op::gather8): {
+ const uint8_t* ptr;
+ memcpy(&ptr, (const uint8_t*)args[immA] + immB, sizeof(ptr));
+ r[d].i32 = map([&](int ix) { return (int)ptr[ix]; }, r[x].i32);
+ } break;
+ STRIDE_K(Op::gather16): {
+ const uint16_t* ptr;
+ memcpy(&ptr, (const uint8_t*)args[immA] + immB, sizeof(ptr));
+ r[d].i32 = map([&](int ix) { return (int)ptr[ix]; }, r[x].i32);
+ } break;
+ STRIDE_K(Op::gather32): {
+ const int* ptr;
+ memcpy(&ptr, (const uint8_t*)args[immA] + immB, sizeof(ptr));
+ r[d].i32 = gather32(ptr, r[x].i32);
+ } break;
+
+ #undef STRIDE_1
+ #undef STRIDE_K
+
+ // Ops that don't interact with memory should never care about the stride.
+ #define CASE(op) case 2*(int)op: /*fallthrough*/ case 2*(int)op+1
+
+ // These 128-bit ops are implemented serially for simplicity.
+ CASE(Op::store128): {
+ U64 lo = (skvx::cast<uint64_t>(r[x].u32) << 0 |
+ skvx::cast<uint64_t>(r[y].u32) << 32),
+ hi = (skvx::cast<uint64_t>(r[z].u32) << 0 |
+ skvx::cast<uint64_t>(r[w].u32) << 32);
+ for (int i = 0; i < stride; i++) {
+ memcpy((char*)args[immA] + 16*i + 0, &lo[i], 8);
+ memcpy((char*)args[immA] + 16*i + 8, &hi[i], 8);
+ }
+ } break;
+
+ CASE(Op::load128):
+ r[d].i32 = 0;
+ for (int i = 0; i < stride; i++) {
+ memcpy(&r[d].i32[i], (const char*)args[immA] + 16*i+ 4*immB, 4);
+ } break;
+
+ CASE(Op::assert_true):
+ #ifdef SK_DEBUG
+ if (!all(r[x].i32)) {
+ SkDebugf("inst %d, register %d\n", instIdx, y);
+ for (int i = 0; i < K; i++) {
+ SkDebugf("\t%2d: %08x (%g)\n",
+ instIdx, r[y].i32[instIdx], r[y].f32[instIdx]);
+ }
+ SkASSERT(false);
+ }
+ #endif
+ break;
+
+ CASE(Op::trace_line):
+ if (should_trace(stride, immA, x, y)) {
+ traceHooks[immA]->line(immB);
+ }
+ break;
+
+ CASE(Op::trace_var):
+ if (should_trace(stride, immA, x, y)) {
+ for (int i = 0; i < K; ++i) {
+ if (r[x].i32[i] & r[y].i32[i]) {
+ traceHooks[immA]->var(immB, r[z].i32[i]);
+ break;
+ }
+ }
+ }
+ break;
+
+ CASE(Op::trace_enter):
+ if (should_trace(stride, immA, x, y)) {
+ traceHooks[immA]->enter(immB);
+ }
+ break;
+
+ CASE(Op::trace_exit):
+ if (should_trace(stride, immA, x, y)) {
+ traceHooks[immA]->exit(immB);
+ }
+ break;
+
+ CASE(Op::trace_scope):
+ if (should_trace(stride, immA, x, y)) {
+ traceHooks[immA]->scope(immB);
+ }
+ break;
+
+ CASE(Op::index): {
+ const int iota[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,
+ 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
+ 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
+ 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63 };
+ static_assert(K <= std::size(iota), "");
+
+ r[d].i32 = n - I32::Load(iota);
+ } break;
+
+ CASE(Op::uniform32):
+ r[d].i32 = *(const int*)( (const char*)args[immA] + immB );
+ break;
+
+ CASE(Op::array32):
+ const int* ptr;
+ memcpy(&ptr, (const uint8_t*)args[immA] + immB, sizeof(ptr));
+ r[d].i32 = ptr[immC/sizeof(int)];
+ break;
+
+ CASE(Op::splat): r[d].i32 = immA; break;
+
+ CASE(Op::add_f32): r[d].f32 = r[x].f32 + r[y].f32; break;
+ CASE(Op::sub_f32): r[d].f32 = r[x].f32 - r[y].f32; break;
+ CASE(Op::mul_f32): r[d].f32 = r[x].f32 * r[y].f32; break;
+ CASE(Op::div_f32): r[d].f32 = r[x].f32 / r[y].f32; break;
+ CASE(Op::min_f32): r[d].f32 = min(r[x].f32, r[y].f32); break;
+ CASE(Op::max_f32): r[d].f32 = max(r[x].f32, r[y].f32); break;
+
+ CASE(Op::fma_f32): r[d].f32 = fma( r[x].f32, r[y].f32, r[z].f32); break;
+ CASE(Op::fms_f32): r[d].f32 = fma( r[x].f32, r[y].f32, -r[z].f32); break;
+ CASE(Op::fnma_f32): r[d].f32 = fma(-r[x].f32, r[y].f32, r[z].f32); break;
+
+ CASE(Op::sqrt_f32): r[d].f32 = sqrt(r[x].f32); break;
+
+ CASE(Op::add_i32): r[d].i32 = r[x].i32 + r[y].i32; break;
+ CASE(Op::sub_i32): r[d].i32 = r[x].i32 - r[y].i32; break;
+ CASE(Op::mul_i32): r[d].i32 = r[x].i32 * r[y].i32; break;
+
+ CASE(Op::shl_i32): r[d].i32 = r[x].i32 << immA; break;
+ CASE(Op::sra_i32): r[d].i32 = r[x].i32 >> immA; break;
+ CASE(Op::shr_i32): r[d].u32 = r[x].u32 >> immA; break;
+
+ CASE(Op:: eq_f32): r[d].i32 = r[x].f32 == r[y].f32; break;
+ CASE(Op::neq_f32): r[d].i32 = r[x].f32 != r[y].f32; break;
+ CASE(Op:: gt_f32): r[d].i32 = r[x].f32 > r[y].f32; break;
+ CASE(Op::gte_f32): r[d].i32 = r[x].f32 >= r[y].f32; break;
+
+ CASE(Op:: eq_i32): r[d].i32 = r[x].i32 == r[y].i32; break;
+ CASE(Op:: gt_i32): r[d].i32 = r[x].i32 > r[y].i32; break;
+
+ CASE(Op::bit_and ): r[d].i32 = r[x].i32 & r[y].i32; break;
+ CASE(Op::bit_or ): r[d].i32 = r[x].i32 | r[y].i32; break;
+ CASE(Op::bit_xor ): r[d].i32 = r[x].i32 ^ r[y].i32; break;
+ CASE(Op::bit_clear): r[d].i32 = r[x].i32 & ~r[y].i32; break;
+
+ CASE(Op::select): r[d].i32 = skvx::if_then_else(r[x].i32, r[y].i32, r[z].i32);
+ break;
+
+ CASE(Op::ceil): r[d].f32 = skvx::ceil(r[x].f32) ; break;
+ CASE(Op::floor): r[d].f32 = skvx::floor(r[x].f32) ; break;
+ CASE(Op::to_f32): r[d].f32 = skvx::cast<float>( r[x].i32 ); break;
+ CASE(Op::trunc): r[d].i32 = skvx::cast<int> ( r[x].f32 ); break;
+ CASE(Op::round): r[d].i32 = skvx::cast<int> (skvx::lrint(r[x].f32)); break;
+
+ CASE(Op::to_fp16):
+ r[d].i32 = skvx::cast<int>(skvx::to_half(r[x].f32));
+ break;
+ CASE(Op::from_fp16):
+ r[d].f32 = skvx::from_half(skvx::cast<uint16_t>(r[x].i32));
+ break;
+
+ #undef CASE
+ }
+ }
+ }
+ }
+
+} // namespace SK_OPTS_NS
+
+#endif//SkVM_opts_DEFINED
diff --git a/gfx/skia/skia/src/opts/SkXfermode_opts.h b/gfx/skia/skia/src/opts/SkXfermode_opts.h
new file mode 100644
index 0000000000..15714791a3
--- /dev/null
+++ b/gfx/skia/skia/src/opts/SkXfermode_opts.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef Sk4pxXfermode_DEFINED
+#define Sk4pxXfermode_DEFINED
+
+#include "src/base/SkMSAN.h"
+#include "src/core/Sk4px.h"
+#include "src/core/SkXfermodePriv.h"
+
+#ifdef SK_FORCE_RASTER_PIPELINE_BLITTER
+
+namespace SK_OPTS_NS {
+ /*not static*/ inline SkXfermode* create_xfermode(SkBlendMode) { return nullptr; }
+}
+
+#else
+
+namespace { // NOLINT(google-build-namespaces)
+
+// Most xfermodes can be done most efficiently 4 pixels at a time in 8 or 16-bit fixed point.
+#define XFERMODE(Xfermode) \
+ struct Xfermode { Sk4px operator()(const Sk4px&, const Sk4px&) const; }; \
+ inline Sk4px Xfermode::operator()(const Sk4px& d, const Sk4px& s) const
+
+XFERMODE(Clear) { return Sk4px::DupPMColor(0); }
+XFERMODE(Src) { return s; }
+XFERMODE(Dst) { return d; }
+XFERMODE(SrcIn) { return s.approxMulDiv255(d.alphas() ); }
+XFERMODE(SrcOut) { return s.approxMulDiv255(d.alphas().inv()); }
+XFERMODE(SrcOver) { return s + d.approxMulDiv255(s.alphas().inv()); }
+XFERMODE(DstIn) { return SrcIn ()(s,d); }
+XFERMODE(DstOut) { return SrcOut ()(s,d); }
+XFERMODE(DstOver) { return SrcOver()(s,d); }
+
+// [ S * Da + (1 - Sa) * D]
+XFERMODE(SrcATop) { return (s * d.alphas() + d * s.alphas().inv()).div255(); }
+XFERMODE(DstATop) { return SrcATop()(s,d); }
+//[ S * (1 - Da) + (1 - Sa) * D ]
+XFERMODE(Xor) { return (s * d.alphas().inv() + d * s.alphas().inv()).div255(); }
+// [S + D ]
+XFERMODE(Plus) { return s.saturatedAdd(d); }
+// [S * D ]
+XFERMODE(Modulate) { return s.approxMulDiv255(d); }
+// [S + D - S * D]
+XFERMODE(Screen) {
+ // Doing the math as S + (1-S)*D or S + (D - S*D) means the add and subtract can be done
+ // in 8-bit space without overflow. S + (1-S)*D is a touch faster because inv() is cheap.
+ return s + d.approxMulDiv255(s.inv());
+}
+
+#undef XFERMODE
+
+// A reasonable fallback mode for doing AA is to simply apply the transfermode first,
+// then linearly interpolate the AA.
+template <typename Xfermode>
+static Sk4px xfer_aa(const Sk4px& d, const Sk4px& s, const Sk4px& aa) {
+ Sk4px bw = Xfermode()(d, s);
+ return (bw * aa + d * aa.inv()).div255();
+}
+
+// For some transfermodes we specialize AA, either for correctness or performance.
+#define XFERMODE_AA(Xfermode) \
+ template <> inline Sk4px xfer_aa<Xfermode>(const Sk4px& d, const Sk4px& s, const Sk4px& aa)
+
+// Plus' clamp needs to happen after AA. skia:3852
+XFERMODE_AA(Plus) { // [ clamp( (1-AA)D + (AA)(S+D) ) == clamp(D + AA*S) ]
+ return d.saturatedAdd(s.approxMulDiv255(aa));
+}
+
+#undef XFERMODE_AA
+
+// Src and Clear modes are safe to use with uninitialized dst buffers,
+// even if the implementation branches based on bytes from dst (e.g. asserts in Debug mode).
+// For those modes, just lie to MSAN that dst is always intialized.
+template <typename Xfermode> static void mark_dst_initialized_if_safe(void*, void*) {}
+template <> inline void mark_dst_initialized_if_safe<Src>(void* dst, void* end) {
+ sk_msan_mark_initialized(dst, end, "Src doesn't read dst.");
+}
+template <> inline void mark_dst_initialized_if_safe<Clear>(void* dst, void* end) {
+ sk_msan_mark_initialized(dst, end, "Clear doesn't read dst.");
+}
+
+template <typename Xfermode>
+class Sk4pxXfermode : public SkXfermode {
+public:
+ Sk4pxXfermode() {}
+
+ void xfer32(SkPMColor dst[], const SkPMColor src[], int n, const SkAlpha aa[]) const override {
+ mark_dst_initialized_if_safe<Xfermode>(dst, dst+n);
+ if (nullptr == aa) {
+ Sk4px::MapDstSrc(n, dst, src, Xfermode());
+ } else {
+ Sk4px::MapDstSrcAlpha(n, dst, src, aa, xfer_aa<Xfermode>);
+ }
+ }
+};
+
+} // namespace
+
+namespace SK_OPTS_NS {
+
+/*not static*/ inline SkXfermode* create_xfermode(SkBlendMode mode) {
+ switch (mode) {
+#define CASE(Xfermode) \
+ case SkBlendMode::k##Xfermode: return new Sk4pxXfermode<Xfermode>()
+ CASE(Clear);
+ CASE(Src);
+ CASE(Dst);
+ CASE(SrcOver);
+ CASE(DstOver);
+ CASE(SrcIn);
+ CASE(DstIn);
+ CASE(SrcOut);
+ CASE(DstOut);
+ CASE(SrcATop);
+ CASE(DstATop);
+ CASE(Xor);
+ CASE(Plus);
+ CASE(Modulate);
+ CASE(Screen);
+ #undef CASE
+
+ default: break;
+ }
+ return nullptr;
+}
+
+} // namespace SK_OPTS_NS
+
+#endif // #ifdef SK_FORCE_RASTER_PIPELINE_BLITTER
+
+#endif//Sk4pxXfermode_DEFINED
diff --git a/gfx/skia/skia/src/pathops/SkAddIntersections.cpp b/gfx/skia/skia/src/pathops/SkAddIntersections.cpp
new file mode 100644
index 0000000000..913db87230
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkAddIntersections.cpp
@@ -0,0 +1,595 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pathops/SkAddIntersections.h"
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkDebug.h"
+#include "src/pathops/SkIntersectionHelper.h"
+#include "src/pathops/SkIntersections.h"
+#include "src/pathops/SkOpCoincidence.h"
+#include "src/pathops/SkOpContour.h"
+#include "src/pathops/SkOpSegment.h"
+#include "src/pathops/SkOpSpan.h"
+#include "src/pathops/SkPathOpsBounds.h"
+#include "src/pathops/SkPathOpsConic.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkPathOpsQuad.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+#include <cmath>
+#include <utility>
+
+#if DEBUG_ADD_INTERSECTING_TS
+
+static void debugShowLineIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " LINE_DEBUG_STR " " LINE_DEBUG_STR "\n",
+ __FUNCTION__, LINE_DEBUG_DATA(wt.pts()), LINE_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " LINE_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], LINE_DEBUG_DATA(wt.pts()), PT_DEBUG_DATA(i, 0));
+ if (pts == 2) {
+ SkDebugf(" " T_DEBUG_STR(wtTs, 1) " " PT_DEBUG_STR, i[0][1], PT_DEBUG_DATA(i, 1));
+ }
+ SkDebugf(" wnTs[0]=%g " LINE_DEBUG_STR, i[1][0], LINE_DEBUG_DATA(wn.pts()));
+ if (pts == 2) {
+ SkDebugf(" " T_DEBUG_STR(wnTs, 1), i[1][1]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowQuadLineIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn,
+ const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " QUAD_DEBUG_STR " " LINE_DEBUG_STR "\n",
+ __FUNCTION__, QUAD_DEBUG_DATA(wt.pts()), LINE_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " QUAD_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], QUAD_DEBUG_DATA(wt.pts()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " LINE_DEBUG_STR, i[1][0], LINE_DEBUG_DATA(wn.pts()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowQuadIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " QUAD_DEBUG_STR " " QUAD_DEBUG_STR "\n",
+ __FUNCTION__, QUAD_DEBUG_DATA(wt.pts()), QUAD_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " QUAD_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], QUAD_DEBUG_DATA(wt.pts()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " QUAD_DEBUG_STR, i[1][0], QUAD_DEBUG_DATA(wn.pts()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowConicLineIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " CONIC_DEBUG_STR " " LINE_DEBUG_STR "\n",
+ __FUNCTION__, CONIC_DEBUG_DATA(wt.pts(), wt.weight()), LINE_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " CONIC_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], CONIC_DEBUG_DATA(wt.pts(), wt.weight()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " LINE_DEBUG_STR, i[1][0], LINE_DEBUG_DATA(wn.pts()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowConicQuadIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " CONIC_DEBUG_STR " " QUAD_DEBUG_STR "\n",
+ __FUNCTION__, CONIC_DEBUG_DATA(wt.pts(), wt.weight()), QUAD_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " CONIC_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], CONIC_DEBUG_DATA(wt.pts(), wt.weight()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " QUAD_DEBUG_STR, i[1][0], QUAD_DEBUG_DATA(wn.pts()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowConicIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " CONIC_DEBUG_STR " " CONIC_DEBUG_STR "\n",
+ __FUNCTION__, CONIC_DEBUG_DATA(wt.pts(), wt.weight()),
+ CONIC_DEBUG_DATA(wn.pts(), wn.weight()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " CONIC_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], CONIC_DEBUG_DATA(wt.pts(), wt.weight()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " CONIC_DEBUG_STR, i[1][0], CONIC_DEBUG_DATA(wn.pts(), wn.weight()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowCubicLineIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " CUBIC_DEBUG_STR " " LINE_DEBUG_STR "\n",
+ __FUNCTION__, CUBIC_DEBUG_DATA(wt.pts()), LINE_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " CUBIC_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], CUBIC_DEBUG_DATA(wt.pts()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " LINE_DEBUG_STR, i[1][0], LINE_DEBUG_DATA(wn.pts()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowCubicQuadIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " CUBIC_DEBUG_STR " " QUAD_DEBUG_STR "\n",
+ __FUNCTION__, CUBIC_DEBUG_DATA(wt.pts()), QUAD_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " CUBIC_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], CUBIC_DEBUG_DATA(wt.pts()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " QUAD_DEBUG_STR, i[1][0], QUAD_DEBUG_DATA(wn.pts()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowCubicConicIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " CUBIC_DEBUG_STR " " CONIC_DEBUG_STR "\n",
+ __FUNCTION__, CUBIC_DEBUG_DATA(wt.pts()), CONIC_DEBUG_DATA(wn.pts(), wn.weight()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " CUBIC_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], CUBIC_DEBUG_DATA(wt.pts()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " CONIC_DEBUG_STR, i[1][0], CONIC_DEBUG_DATA(wn.pts(), wn.weight()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+static void debugShowCubicIntersection(int pts, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn, const SkIntersections& i) {
+ SkASSERT(i.used() == pts);
+ if (!pts) {
+ SkDebugf("%s no intersect " CUBIC_DEBUG_STR " " CUBIC_DEBUG_STR "\n",
+ __FUNCTION__, CUBIC_DEBUG_DATA(wt.pts()), CUBIC_DEBUG_DATA(wn.pts()));
+ return;
+ }
+ SkDebugf("%s " T_DEBUG_STR(wtTs, 0) " " CUBIC_DEBUG_STR " " PT_DEBUG_STR, __FUNCTION__,
+ i[0][0], CUBIC_DEBUG_DATA(wt.pts()), PT_DEBUG_DATA(i, 0));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wtTs) " " PT_DEBUG_STR, n, i[0][n], PT_DEBUG_DATA(i, n));
+ }
+ SkDebugf(" wnTs[0]=%g " CUBIC_DEBUG_STR, i[1][0], CUBIC_DEBUG_DATA(wn.pts()));
+ for (int n = 1; n < pts; ++n) {
+ SkDebugf(" " TX_DEBUG_STR(wnTs), n, i[1][n]);
+ }
+ SkDebugf("\n");
+}
+
+#else
+static void debugShowLineIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowQuadLineIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowQuadIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowConicLineIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowConicQuadIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowConicIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowCubicLineIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowCubicQuadIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowCubicConicIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+
+static void debugShowCubicIntersection(int , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& , const SkIntersections& ) {
+}
+#endif
+
+bool AddIntersectTs(SkOpContour* test, SkOpContour* next, SkOpCoincidence* coincidence) {
+ if (test != next) {
+ if (AlmostLessUlps(test->bounds().fBottom, next->bounds().fTop)) {
+ return false;
+ }
+ // OPTIMIZATION: outset contour bounds a smidgen instead?
+ if (!SkPathOpsBounds::Intersects(test->bounds(), next->bounds())) {
+ return true;
+ }
+ }
+ SkIntersectionHelper wt;
+ wt.init(test);
+ do {
+ SkIntersectionHelper wn;
+ wn.init(next);
+ test->debugValidate();
+ next->debugValidate();
+ if (test == next && !wn.startAfter(wt)) {
+ continue;
+ }
+ do {
+ if (!SkPathOpsBounds::Intersects(wt.bounds(), wn.bounds())) {
+ continue;
+ }
+ int pts = 0;
+ SkIntersections ts { SkDEBUGCODE(test->globalState()) };
+ bool swap = false;
+ SkDQuad quad1, quad2;
+ SkDConic conic1, conic2;
+ SkDCubic cubic1, cubic2;
+ switch (wt.segmentType()) {
+ case SkIntersectionHelper::kHorizontalLine_Segment:
+ swap = true;
+ switch (wn.segmentType()) {
+ case SkIntersectionHelper::kHorizontalLine_Segment:
+ case SkIntersectionHelper::kVerticalLine_Segment:
+ case SkIntersectionHelper::kLine_Segment:
+ pts = ts.lineHorizontal(wn.pts(), wt.left(),
+ wt.right(), wt.y(), wt.xFlipped());
+ debugShowLineIntersection(pts, wn, wt, ts);
+ break;
+ case SkIntersectionHelper::kQuad_Segment:
+ pts = ts.quadHorizontal(wn.pts(), wt.left(),
+ wt.right(), wt.y(), wt.xFlipped());
+ debugShowQuadLineIntersection(pts, wn, wt, ts);
+ break;
+ case SkIntersectionHelper::kConic_Segment:
+ pts = ts.conicHorizontal(wn.pts(), wn.weight(), wt.left(),
+ wt.right(), wt.y(), wt.xFlipped());
+ debugShowConicLineIntersection(pts, wn, wt, ts);
+ break;
+ case SkIntersectionHelper::kCubic_Segment:
+ pts = ts.cubicHorizontal(wn.pts(), wt.left(),
+ wt.right(), wt.y(), wt.xFlipped());
+ debugShowCubicLineIntersection(pts, wn, wt, ts);
+ break;
+ default:
+ SkASSERT(0);
+ }
+ break;
+ case SkIntersectionHelper::kVerticalLine_Segment:
+ swap = true;
+ switch (wn.segmentType()) {
+ case SkIntersectionHelper::kHorizontalLine_Segment:
+ case SkIntersectionHelper::kVerticalLine_Segment:
+ case SkIntersectionHelper::kLine_Segment: {
+ pts = ts.lineVertical(wn.pts(), wt.top(),
+ wt.bottom(), wt.x(), wt.yFlipped());
+ debugShowLineIntersection(pts, wn, wt, ts);
+ break;
+ }
+ case SkIntersectionHelper::kQuad_Segment: {
+ pts = ts.quadVertical(wn.pts(), wt.top(),
+ wt.bottom(), wt.x(), wt.yFlipped());
+ debugShowQuadLineIntersection(pts, wn, wt, ts);
+ break;
+ }
+ case SkIntersectionHelper::kConic_Segment: {
+ pts = ts.conicVertical(wn.pts(), wn.weight(), wt.top(),
+ wt.bottom(), wt.x(), wt.yFlipped());
+ debugShowConicLineIntersection(pts, wn, wt, ts);
+ break;
+ }
+ case SkIntersectionHelper::kCubic_Segment: {
+ pts = ts.cubicVertical(wn.pts(), wt.top(),
+ wt.bottom(), wt.x(), wt.yFlipped());
+ debugShowCubicLineIntersection(pts, wn, wt, ts);
+ break;
+ }
+ default:
+ SkASSERT(0);
+ }
+ break;
+ case SkIntersectionHelper::kLine_Segment:
+ switch (wn.segmentType()) {
+ case SkIntersectionHelper::kHorizontalLine_Segment:
+ pts = ts.lineHorizontal(wt.pts(), wn.left(),
+ wn.right(), wn.y(), wn.xFlipped());
+ debugShowLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kVerticalLine_Segment:
+ pts = ts.lineVertical(wt.pts(), wn.top(),
+ wn.bottom(), wn.x(), wn.yFlipped());
+ debugShowLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kLine_Segment:
+ pts = ts.lineLine(wt.pts(), wn.pts());
+ debugShowLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kQuad_Segment:
+ swap = true;
+ pts = ts.quadLine(wn.pts(), wt.pts());
+ debugShowQuadLineIntersection(pts, wn, wt, ts);
+ break;
+ case SkIntersectionHelper::kConic_Segment:
+ swap = true;
+ pts = ts.conicLine(wn.pts(), wn.weight(), wt.pts());
+ debugShowConicLineIntersection(pts, wn, wt, ts);
+ break;
+ case SkIntersectionHelper::kCubic_Segment:
+ swap = true;
+ pts = ts.cubicLine(wn.pts(), wt.pts());
+ debugShowCubicLineIntersection(pts, wn, wt, ts);
+ break;
+ default:
+ SkASSERT(0);
+ }
+ break;
+ case SkIntersectionHelper::kQuad_Segment:
+ switch (wn.segmentType()) {
+ case SkIntersectionHelper::kHorizontalLine_Segment:
+ pts = ts.quadHorizontal(wt.pts(), wn.left(),
+ wn.right(), wn.y(), wn.xFlipped());
+ debugShowQuadLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kVerticalLine_Segment:
+ pts = ts.quadVertical(wt.pts(), wn.top(),
+ wn.bottom(), wn.x(), wn.yFlipped());
+ debugShowQuadLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kLine_Segment:
+ pts = ts.quadLine(wt.pts(), wn.pts());
+ debugShowQuadLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kQuad_Segment: {
+ pts = ts.intersect(quad1.set(wt.pts()), quad2.set(wn.pts()));
+ debugShowQuadIntersection(pts, wt, wn, ts);
+ break;
+ }
+ case SkIntersectionHelper::kConic_Segment: {
+ swap = true;
+ pts = ts.intersect(conic2.set(wn.pts(), wn.weight()),
+ quad1.set(wt.pts()));
+ debugShowConicQuadIntersection(pts, wn, wt, ts);
+ break;
+ }
+ case SkIntersectionHelper::kCubic_Segment: {
+ swap = true;
+ pts = ts.intersect(cubic2.set(wn.pts()), quad1.set(wt.pts()));
+ debugShowCubicQuadIntersection(pts, wn, wt, ts);
+ break;
+ }
+ default:
+ SkASSERT(0);
+ }
+ break;
+ case SkIntersectionHelper::kConic_Segment:
+ switch (wn.segmentType()) {
+ case SkIntersectionHelper::kHorizontalLine_Segment:
+ pts = ts.conicHorizontal(wt.pts(), wt.weight(), wn.left(),
+ wn.right(), wn.y(), wn.xFlipped());
+ debugShowConicLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kVerticalLine_Segment:
+ pts = ts.conicVertical(wt.pts(), wt.weight(), wn.top(),
+ wn.bottom(), wn.x(), wn.yFlipped());
+ debugShowConicLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kLine_Segment:
+ pts = ts.conicLine(wt.pts(), wt.weight(), wn.pts());
+ debugShowConicLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kQuad_Segment: {
+ pts = ts.intersect(conic1.set(wt.pts(), wt.weight()),
+ quad2.set(wn.pts()));
+ debugShowConicQuadIntersection(pts, wt, wn, ts);
+ break;
+ }
+ case SkIntersectionHelper::kConic_Segment: {
+ pts = ts.intersect(conic1.set(wt.pts(), wt.weight()),
+ conic2.set(wn.pts(), wn.weight()));
+ debugShowConicIntersection(pts, wt, wn, ts);
+ break;
+ }
+ case SkIntersectionHelper::kCubic_Segment: {
+ swap = true;
+ pts = ts.intersect(cubic2.set(wn.pts()
+ SkDEBUGPARAMS(ts.globalState())),
+ conic1.set(wt.pts(), wt.weight()
+ SkDEBUGPARAMS(ts.globalState())));
+ debugShowCubicConicIntersection(pts, wn, wt, ts);
+ break;
+ }
+ }
+ break;
+ case SkIntersectionHelper::kCubic_Segment:
+ switch (wn.segmentType()) {
+ case SkIntersectionHelper::kHorizontalLine_Segment:
+ pts = ts.cubicHorizontal(wt.pts(), wn.left(),
+ wn.right(), wn.y(), wn.xFlipped());
+ debugShowCubicLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kVerticalLine_Segment:
+ pts = ts.cubicVertical(wt.pts(), wn.top(),
+ wn.bottom(), wn.x(), wn.yFlipped());
+ debugShowCubicLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kLine_Segment:
+ pts = ts.cubicLine(wt.pts(), wn.pts());
+ debugShowCubicLineIntersection(pts, wt, wn, ts);
+ break;
+ case SkIntersectionHelper::kQuad_Segment: {
+ pts = ts.intersect(cubic1.set(wt.pts()), quad2.set(wn.pts()));
+ debugShowCubicQuadIntersection(pts, wt, wn, ts);
+ break;
+ }
+ case SkIntersectionHelper::kConic_Segment: {
+ pts = ts.intersect(cubic1.set(wt.pts()
+ SkDEBUGPARAMS(ts.globalState())),
+ conic2.set(wn.pts(), wn.weight()
+ SkDEBUGPARAMS(ts.globalState())));
+ debugShowCubicConicIntersection(pts, wt, wn, ts);
+ break;
+ }
+ case SkIntersectionHelper::kCubic_Segment: {
+ pts = ts.intersect(cubic1.set(wt.pts()), cubic2.set(wn.pts()));
+ debugShowCubicIntersection(pts, wt, wn, ts);
+ break;
+ }
+ default:
+ SkASSERT(0);
+ }
+ break;
+ default:
+ SkASSERT(0);
+ }
+#if DEBUG_T_SECT_LOOP_COUNT
+ test->globalState()->debugAddLoopCount(&ts, wt, wn);
+#endif
+ int coinIndex = -1;
+ SkOpPtT* coinPtT[2];
+ for (int pt = 0; pt < pts; ++pt) {
+ SkASSERT(ts[0][pt] >= 0 && ts[0][pt] <= 1);
+ SkASSERT(ts[1][pt] >= 0 && ts[1][pt] <= 1);
+ wt.segment()->debugValidate();
+ // if t value is used to compute pt in addT, error may creep in and
+ // rect intersections may result in non-rects. if pt value from intersection
+ // is passed in, current tests break. As a workaround, pass in pt
+ // value from intersection only if pt.x and pt.y is integral
+ SkPoint iPt = ts.pt(pt).asSkPoint();
+ bool iPtIsIntegral = iPt.fX == floor(iPt.fX) && iPt.fY == floor(iPt.fY);
+ SkOpPtT* testTAt = iPtIsIntegral ? wt.segment()->addT(ts[swap][pt], iPt)
+ : wt.segment()->addT(ts[swap][pt]);
+ wn.segment()->debugValidate();
+ SkOpPtT* nextTAt = iPtIsIntegral ? wn.segment()->addT(ts[!swap][pt], iPt)
+ : wn.segment()->addT(ts[!swap][pt]);
+ if (!testTAt->contains(nextTAt)) {
+ SkOpPtT* oppPrev = testTAt->oppPrev(nextTAt); // Returns nullptr if pair
+ if (oppPrev) { // already share a pt-t loop.
+ testTAt->span()->mergeMatches(nextTAt->span());
+ testTAt->addOpp(nextTAt, oppPrev);
+ }
+ if (testTAt->fPt != nextTAt->fPt) {
+ testTAt->span()->unaligned();
+ nextTAt->span()->unaligned();
+ }
+ wt.segment()->debugValidate();
+ wn.segment()->debugValidate();
+ }
+ if (!ts.isCoincident(pt)) {
+ continue;
+ }
+ if (coinIndex < 0) {
+ coinPtT[0] = testTAt;
+ coinPtT[1] = nextTAt;
+ coinIndex = pt;
+ continue;
+ }
+ if (coinPtT[0]->span() == testTAt->span()) {
+ coinIndex = -1;
+ continue;
+ }
+ if (coinPtT[1]->span() == nextTAt->span()) {
+ coinIndex = -1; // coincidence span collapsed
+ continue;
+ }
+ if (swap) {
+ using std::swap;
+ swap(coinPtT[0], coinPtT[1]);
+ swap(testTAt, nextTAt);
+ }
+ SkASSERT(coincidence->globalState()->debugSkipAssert()
+ || coinPtT[0]->span()->t() < testTAt->span()->t());
+ if (coinPtT[0]->span()->deleted()) {
+ coinIndex = -1;
+ continue;
+ }
+ if (testTAt->span()->deleted()) {
+ coinIndex = -1;
+ continue;
+ }
+ coincidence->add(coinPtT[0], testTAt, coinPtT[1], nextTAt);
+ wt.segment()->debugValidate();
+ wn.segment()->debugValidate();
+ coinIndex = -1;
+ }
+ SkOPOBJASSERT(coincidence, coinIndex < 0); // expect coincidence to be paired
+ } while (wn.advance());
+ } while (wt.advance());
+ return true;
+}
diff --git a/gfx/skia/skia/src/pathops/SkAddIntersections.h b/gfx/skia/skia/src/pathops/SkAddIntersections.h
new file mode 100644
index 0000000000..d9a11df254
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkAddIntersections.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkAddIntersections_DEFINED
+#define SkAddIntersections_DEFINED
+
+class SkOpCoincidence;
+class SkOpContour;
+
+bool AddIntersectTs(SkOpContour* test, SkOpContour* next, SkOpCoincidence* coincidence);
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkDConicLineIntersection.cpp b/gfx/skia/skia/src/pathops/SkDConicLineIntersection.cpp
new file mode 100644
index 0000000000..54c8178f7b
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkDConicLineIntersection.cpp
@@ -0,0 +1,396 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkDebug.h"
+#include "src/pathops/SkIntersections.h"
+#include "src/pathops/SkPathOpsConic.h"
+#include "src/pathops/SkPathOpsCurve.h"
+#include "src/pathops/SkPathOpsDebug.h"
+#include "src/pathops/SkPathOpsLine.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkPathOpsQuad.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+#include <algorithm>
+#include <cmath>
+
+class LineConicIntersections {
+public:
+ enum PinTPoint {
+ kPointUninitialized,
+ kPointInitialized
+ };
+
+ LineConicIntersections(const SkDConic& c, const SkDLine& l, SkIntersections* i)
+ : fConic(c)
+ , fLine(&l)
+ , fIntersections(i)
+ , fAllowNear(true) {
+ i->setMax(4); // allow short partial coincidence plus discrete intersection
+ }
+
+ LineConicIntersections(const SkDConic& c)
+ : fConic(c)
+ SkDEBUGPARAMS(fLine(nullptr))
+ SkDEBUGPARAMS(fIntersections(nullptr))
+ SkDEBUGPARAMS(fAllowNear(false)) {
+ }
+
+ void allowNear(bool allow) {
+ fAllowNear = allow;
+ }
+
+ void checkCoincident() {
+ int last = fIntersections->used() - 1;
+ for (int index = 0; index < last; ) {
+ double conicMidT = ((*fIntersections)[0][index] + (*fIntersections)[0][index + 1]) / 2;
+ SkDPoint conicMidPt = fConic.ptAtT(conicMidT);
+ double t = fLine->nearPoint(conicMidPt, nullptr);
+ if (t < 0) {
+ ++index;
+ continue;
+ }
+ if (fIntersections->isCoincident(index)) {
+ fIntersections->removeOne(index);
+ --last;
+ } else if (fIntersections->isCoincident(index + 1)) {
+ fIntersections->removeOne(index + 1);
+ --last;
+ } else {
+ fIntersections->setCoincident(index++);
+ }
+ fIntersections->setCoincident(index);
+ }
+ }
+
+#ifdef SK_DEBUG
+ static bool close_to(double a, double b, const double c[3]) {
+ double max = std::max(-std::min(std::min(c[0], c[1]), c[2]), std::max(std::max(c[0], c[1]), c[2]));
+ return approximately_zero_when_compared_to(a - b, max);
+ }
+#endif
+ int horizontalIntersect(double axisIntercept, double roots[2]) {
+ double conicVals[] = { fConic[0].fY, fConic[1].fY, fConic[2].fY };
+ return this->validT(conicVals, axisIntercept, roots);
+ }
+
+ int horizontalIntersect(double axisIntercept, double left, double right, bool flipped) {
+ this->addExactHorizontalEndPoints(left, right, axisIntercept);
+ if (fAllowNear) {
+ this->addNearHorizontalEndPoints(left, right, axisIntercept);
+ }
+ double roots[2];
+ int count = this->horizontalIntersect(axisIntercept, roots);
+ for (int index = 0; index < count; ++index) {
+ double conicT = roots[index];
+ SkDPoint pt = fConic.ptAtT(conicT);
+ SkDEBUGCODE(double conicVals[] = { fConic[0].fY, fConic[1].fY, fConic[2].fY });
+ SkOPOBJASSERT(fIntersections, close_to(pt.fY, axisIntercept, conicVals));
+ double lineT = (pt.fX - left) / (right - left);
+ if (this->pinTs(&conicT, &lineT, &pt, kPointInitialized)
+ && this->uniqueAnswer(conicT, pt)) {
+ fIntersections->insert(conicT, lineT, pt);
+ }
+ }
+ if (flipped) {
+ fIntersections->flip();
+ }
+ this->checkCoincident();
+ return fIntersections->used();
+ }
+
+ int intersect() {
+ this->addExactEndPoints();
+ if (fAllowNear) {
+ this->addNearEndPoints();
+ }
+ double rootVals[2];
+ int roots = this->intersectRay(rootVals);
+ for (int index = 0; index < roots; ++index) {
+ double conicT = rootVals[index];
+ double lineT = this->findLineT(conicT);
+#ifdef SK_DEBUG
+ if (!fIntersections->globalState()
+ || !fIntersections->globalState()->debugSkipAssert()) {
+ SkDEBUGCODE(SkDPoint conicPt = fConic.ptAtT(conicT));
+ SkDEBUGCODE(SkDPoint linePt = fLine->ptAtT(lineT));
+ SkASSERT(conicPt.approximatelyDEqual(linePt));
+ }
+#endif
+ SkDPoint pt;
+ if (this->pinTs(&conicT, &lineT, &pt, kPointUninitialized)
+ && this->uniqueAnswer(conicT, pt)) {
+ fIntersections->insert(conicT, lineT, pt);
+ }
+ }
+ this->checkCoincident();
+ return fIntersections->used();
+ }
+
+ int intersectRay(double roots[2]) {
+ double adj = (*fLine)[1].fX - (*fLine)[0].fX;
+ double opp = (*fLine)[1].fY - (*fLine)[0].fY;
+ double r[3];
+ for (int n = 0; n < 3; ++n) {
+ r[n] = (fConic[n].fY - (*fLine)[0].fY) * adj - (fConic[n].fX - (*fLine)[0].fX) * opp;
+ }
+ return this->validT(r, 0, roots);
+ }
+
+ int validT(double r[3], double axisIntercept, double roots[2]) {
+ double A = r[2];
+ double B = r[1] * fConic.fWeight - axisIntercept * fConic.fWeight + axisIntercept;
+ double C = r[0];
+ A += C - 2 * B; // A = a + c - 2*(b*w - xCept*w + xCept)
+ B -= C; // B = b*w - w * xCept + xCept - a
+ C -= axisIntercept;
+ return SkDQuad::RootsValidT(A, 2 * B, C, roots);
+ }
+
+ int verticalIntersect(double axisIntercept, double roots[2]) {
+ double conicVals[] = { fConic[0].fX, fConic[1].fX, fConic[2].fX };
+ return this->validT(conicVals, axisIntercept, roots);
+ }
+
+ int verticalIntersect(double axisIntercept, double top, double bottom, bool flipped) {
+ this->addExactVerticalEndPoints(top, bottom, axisIntercept);
+ if (fAllowNear) {
+ this->addNearVerticalEndPoints(top, bottom, axisIntercept);
+ }
+ double roots[2];
+ int count = this->verticalIntersect(axisIntercept, roots);
+ for (int index = 0; index < count; ++index) {
+ double conicT = roots[index];
+ SkDPoint pt = fConic.ptAtT(conicT);
+ SkDEBUGCODE(double conicVals[] = { fConic[0].fX, fConic[1].fX, fConic[2].fX });
+ SkOPOBJASSERT(fIntersections, close_to(pt.fX, axisIntercept, conicVals));
+ double lineT = (pt.fY - top) / (bottom - top);
+ if (this->pinTs(&conicT, &lineT, &pt, kPointInitialized)
+ && this->uniqueAnswer(conicT, pt)) {
+ fIntersections->insert(conicT, lineT, pt);
+ }
+ }
+ if (flipped) {
+ fIntersections->flip();
+ }
+ this->checkCoincident();
+ return fIntersections->used();
+ }
+
+protected:
+// OPTIMIZE: Functions of the form add .. points are indentical to the conic routines.
+ // add endpoints first to get zero and one t values exactly
+ void addExactEndPoints() {
+ for (int cIndex = 0; cIndex < SkDConic::kPointCount; cIndex += SkDConic::kPointLast) {
+ double lineT = fLine->exactPoint(fConic[cIndex]);
+ if (lineT < 0) {
+ continue;
+ }
+ double conicT = (double) (cIndex >> 1);
+ fIntersections->insert(conicT, lineT, fConic[cIndex]);
+ }
+ }
+
+ void addNearEndPoints() {
+ for (int cIndex = 0; cIndex < SkDConic::kPointCount; cIndex += SkDConic::kPointLast) {
+ double conicT = (double) (cIndex >> 1);
+ if (fIntersections->hasT(conicT)) {
+ continue;
+ }
+ double lineT = fLine->nearPoint(fConic[cIndex], nullptr);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(conicT, lineT, fConic[cIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ void addLineNearEndPoints() {
+ for (int lIndex = 0; lIndex < 2; ++lIndex) {
+ double lineT = (double) lIndex;
+ if (fIntersections->hasOppT(lineT)) {
+ continue;
+ }
+ double conicT = ((SkDCurve*) &fConic)->nearPoint(SkPath::kConic_Verb,
+ (*fLine)[lIndex], (*fLine)[!lIndex]);
+ if (conicT < 0) {
+ continue;
+ }
+ fIntersections->insert(conicT, lineT, (*fLine)[lIndex]);
+ }
+ }
+
+ void addExactHorizontalEndPoints(double left, double right, double y) {
+ for (int cIndex = 0; cIndex < SkDConic::kPointCount; cIndex += SkDConic::kPointLast) {
+ double lineT = SkDLine::ExactPointH(fConic[cIndex], left, right, y);
+ if (lineT < 0) {
+ continue;
+ }
+ double conicT = (double) (cIndex >> 1);
+ fIntersections->insert(conicT, lineT, fConic[cIndex]);
+ }
+ }
+
+ void addNearHorizontalEndPoints(double left, double right, double y) {
+ for (int cIndex = 0; cIndex < SkDConic::kPointCount; cIndex += SkDConic::kPointLast) {
+ double conicT = (double) (cIndex >> 1);
+ if (fIntersections->hasT(conicT)) {
+ continue;
+ }
+ double lineT = SkDLine::NearPointH(fConic[cIndex], left, right, y);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(conicT, lineT, fConic[cIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ void addExactVerticalEndPoints(double top, double bottom, double x) {
+ for (int cIndex = 0; cIndex < SkDConic::kPointCount; cIndex += SkDConic::kPointLast) {
+ double lineT = SkDLine::ExactPointV(fConic[cIndex], top, bottom, x);
+ if (lineT < 0) {
+ continue;
+ }
+ double conicT = (double) (cIndex >> 1);
+ fIntersections->insert(conicT, lineT, fConic[cIndex]);
+ }
+ }
+
+ void addNearVerticalEndPoints(double top, double bottom, double x) {
+ for (int cIndex = 0; cIndex < SkDConic::kPointCount; cIndex += SkDConic::kPointLast) {
+ double conicT = (double) (cIndex >> 1);
+ if (fIntersections->hasT(conicT)) {
+ continue;
+ }
+ double lineT = SkDLine::NearPointV(fConic[cIndex], top, bottom, x);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(conicT, lineT, fConic[cIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ double findLineT(double t) {
+ SkDPoint xy = fConic.ptAtT(t);
+ double dx = (*fLine)[1].fX - (*fLine)[0].fX;
+ double dy = (*fLine)[1].fY - (*fLine)[0].fY;
+ if (fabs(dx) > fabs(dy)) {
+ return (xy.fX - (*fLine)[0].fX) / dx;
+ }
+ return (xy.fY - (*fLine)[0].fY) / dy;
+ }
+
+ bool pinTs(double* conicT, double* lineT, SkDPoint* pt, PinTPoint ptSet) {
+ if (!approximately_one_or_less_double(*lineT)) {
+ return false;
+ }
+ if (!approximately_zero_or_more_double(*lineT)) {
+ return false;
+ }
+ double qT = *conicT = SkPinT(*conicT);
+ double lT = *lineT = SkPinT(*lineT);
+ if (lT == 0 || lT == 1 || (ptSet == kPointUninitialized && qT != 0 && qT != 1)) {
+ *pt = (*fLine).ptAtT(lT);
+ } else if (ptSet == kPointUninitialized) {
+ *pt = fConic.ptAtT(qT);
+ }
+ SkPoint gridPt = pt->asSkPoint();
+ if (SkDPoint::ApproximatelyEqual(gridPt, (*fLine)[0].asSkPoint())) {
+ *pt = (*fLine)[0];
+ *lineT = 0;
+ } else if (SkDPoint::ApproximatelyEqual(gridPt, (*fLine)[1].asSkPoint())) {
+ *pt = (*fLine)[1];
+ *lineT = 1;
+ }
+ if (fIntersections->used() > 0 && approximately_equal((*fIntersections)[1][0], *lineT)) {
+ return false;
+ }
+ if (gridPt == fConic[0].asSkPoint()) {
+ *pt = fConic[0];
+ *conicT = 0;
+ } else if (gridPt == fConic[2].asSkPoint()) {
+ *pt = fConic[2];
+ *conicT = 1;
+ }
+ return true;
+ }
+
+ bool uniqueAnswer(double conicT, const SkDPoint& pt) {
+ for (int inner = 0; inner < fIntersections->used(); ++inner) {
+ if (fIntersections->pt(inner) != pt) {
+ continue;
+ }
+ double existingConicT = (*fIntersections)[0][inner];
+ if (conicT == existingConicT) {
+ return false;
+ }
+ // check if midway on conic is also same point. If so, discard this
+ double conicMidT = (existingConicT + conicT) / 2;
+ SkDPoint conicMidPt = fConic.ptAtT(conicMidT);
+ if (conicMidPt.approximatelyEqual(pt)) {
+ return false;
+ }
+ }
+#if ONE_OFF_DEBUG
+ SkDPoint qPt = fConic.ptAtT(conicT);
+ SkDebugf("%s pt=(%1.9g,%1.9g) cPt=(%1.9g,%1.9g)\n", __FUNCTION__, pt.fX, pt.fY,
+ qPt.fX, qPt.fY);
+#endif
+ return true;
+ }
+
+private:
+ const SkDConic& fConic;
+ const SkDLine* fLine;
+ SkIntersections* fIntersections;
+ bool fAllowNear;
+};
+
+int SkIntersections::horizontal(const SkDConic& conic, double left, double right, double y,
+ bool flipped) {
+ SkDLine line = {{{ left, y }, { right, y }}};
+ LineConicIntersections c(conic, line, this);
+ return c.horizontalIntersect(y, left, right, flipped);
+}
+
+int SkIntersections::vertical(const SkDConic& conic, double top, double bottom, double x,
+ bool flipped) {
+ SkDLine line = {{{ x, top }, { x, bottom }}};
+ LineConicIntersections c(conic, line, this);
+ return c.verticalIntersect(x, top, bottom, flipped);
+}
+
+int SkIntersections::intersect(const SkDConic& conic, const SkDLine& line) {
+ LineConicIntersections c(conic, line, this);
+ c.allowNear(fAllowNear);
+ return c.intersect();
+}
+
+int SkIntersections::intersectRay(const SkDConic& conic, const SkDLine& line) {
+ LineConicIntersections c(conic, line, this);
+ fUsed = c.intersectRay(fT[0]);
+ for (int index = 0; index < fUsed; ++index) {
+ fPt[index] = conic.ptAtT(fT[0][index]);
+ }
+ return fUsed;
+}
+
+int SkIntersections::HorizontalIntercept(const SkDConic& conic, SkScalar y, double* roots) {
+ LineConicIntersections c(conic);
+ return c.horizontalIntersect(y, roots);
+}
+
+int SkIntersections::VerticalIntercept(const SkDConic& conic, SkScalar x, double* roots) {
+ LineConicIntersections c(conic);
+ return c.verticalIntersect(x, roots);
+}
diff --git a/gfx/skia/skia/src/pathops/SkDCubicLineIntersection.cpp b/gfx/skia/skia/src/pathops/SkDCubicLineIntersection.cpp
new file mode 100644
index 0000000000..e6c286058f
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkDCubicLineIntersection.cpp
@@ -0,0 +1,464 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkDebug.h"
+#include "src/pathops/SkIntersections.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsCurve.h"
+#include "src/pathops/SkPathOpsDebug.h"
+#include "src/pathops/SkPathOpsLine.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+#include <cmath>
+
+/*
+Find the intersection of a line and cubic by solving for valid t values.
+
+Analogous to line-quadratic intersection, solve line-cubic intersection by
+representing the cubic as:
+ x = a(1-t)^3 + 2b(1-t)^2t + c(1-t)t^2 + dt^3
+ y = e(1-t)^3 + 2f(1-t)^2t + g(1-t)t^2 + ht^3
+and the line as:
+ y = i*x + j (if the line is more horizontal)
+or:
+ x = i*y + j (if the line is more vertical)
+
+Then using Mathematica, solve for the values of t where the cubic intersects the
+line:
+
+ (in) Resultant[
+ a*(1 - t)^3 + 3*b*(1 - t)^2*t + 3*c*(1 - t)*t^2 + d*t^3 - x,
+ e*(1 - t)^3 + 3*f*(1 - t)^2*t + 3*g*(1 - t)*t^2 + h*t^3 - i*x - j, x]
+ (out) -e + j +
+ 3 e t - 3 f t -
+ 3 e t^2 + 6 f t^2 - 3 g t^2 +
+ e t^3 - 3 f t^3 + 3 g t^3 - h t^3 +
+ i ( a -
+ 3 a t + 3 b t +
+ 3 a t^2 - 6 b t^2 + 3 c t^2 -
+ a t^3 + 3 b t^3 - 3 c t^3 + d t^3 )
+
+if i goes to infinity, we can rewrite the line in terms of x. Mathematica:
+
+ (in) Resultant[
+ a*(1 - t)^3 + 3*b*(1 - t)^2*t + 3*c*(1 - t)*t^2 + d*t^3 - i*y - j,
+ e*(1 - t)^3 + 3*f*(1 - t)^2*t + 3*g*(1 - t)*t^2 + h*t^3 - y, y]
+ (out) a - j -
+ 3 a t + 3 b t +
+ 3 a t^2 - 6 b t^2 + 3 c t^2 -
+ a t^3 + 3 b t^3 - 3 c t^3 + d t^3 -
+ i ( e -
+ 3 e t + 3 f t +
+ 3 e t^2 - 6 f t^2 + 3 g t^2 -
+ e t^3 + 3 f t^3 - 3 g t^3 + h t^3 )
+
+Solving this with Mathematica produces an expression with hundreds of terms;
+instead, use Numeric Solutions recipe to solve the cubic.
+
+The near-horizontal case, in terms of: Ax^3 + Bx^2 + Cx + D == 0
+ A = (-(-e + 3*f - 3*g + h) + i*(-a + 3*b - 3*c + d) )
+ B = 3*(-( e - 2*f + g ) + i*( a - 2*b + c ) )
+ C = 3*(-(-e + f ) + i*(-a + b ) )
+ D = (-( e ) + i*( a ) + j )
+
+The near-vertical case, in terms of: Ax^3 + Bx^2 + Cx + D == 0
+ A = ( (-a + 3*b - 3*c + d) - i*(-e + 3*f - 3*g + h) )
+ B = 3*( ( a - 2*b + c ) - i*( e - 2*f + g ) )
+ C = 3*( (-a + b ) - i*(-e + f ) )
+ D = ( ( a ) - i*( e ) - j )
+
+For horizontal lines:
+(in) Resultant[
+ a*(1 - t)^3 + 3*b*(1 - t)^2*t + 3*c*(1 - t)*t^2 + d*t^3 - j,
+ e*(1 - t)^3 + 3*f*(1 - t)^2*t + 3*g*(1 - t)*t^2 + h*t^3 - y, y]
+(out) e - j -
+ 3 e t + 3 f t +
+ 3 e t^2 - 6 f t^2 + 3 g t^2 -
+ e t^3 + 3 f t^3 - 3 g t^3 + h t^3
+ */
+
+class LineCubicIntersections {
+public:
+ enum PinTPoint {
+ kPointUninitialized,
+ kPointInitialized
+ };
+
+ LineCubicIntersections(const SkDCubic& c, const SkDLine& l, SkIntersections* i)
+ : fCubic(c)
+ , fLine(l)
+ , fIntersections(i)
+ , fAllowNear(true) {
+ i->setMax(4);
+ }
+
+ void allowNear(bool allow) {
+ fAllowNear = allow;
+ }
+
+ void checkCoincident() {
+ int last = fIntersections->used() - 1;
+ for (int index = 0; index < last; ) {
+ double cubicMidT = ((*fIntersections)[0][index] + (*fIntersections)[0][index + 1]) / 2;
+ SkDPoint cubicMidPt = fCubic.ptAtT(cubicMidT);
+ double t = fLine.nearPoint(cubicMidPt, nullptr);
+ if (t < 0) {
+ ++index;
+ continue;
+ }
+ if (fIntersections->isCoincident(index)) {
+ fIntersections->removeOne(index);
+ --last;
+ } else if (fIntersections->isCoincident(index + 1)) {
+ fIntersections->removeOne(index + 1);
+ --last;
+ } else {
+ fIntersections->setCoincident(index++);
+ }
+ fIntersections->setCoincident(index);
+ }
+ }
+
+ // see parallel routine in line quadratic intersections
+ int intersectRay(double roots[3]) {
+ double adj = fLine[1].fX - fLine[0].fX;
+ double opp = fLine[1].fY - fLine[0].fY;
+ SkDCubic c;
+ SkDEBUGCODE(c.fDebugGlobalState = fIntersections->globalState());
+ for (int n = 0; n < 4; ++n) {
+ c[n].fX = (fCubic[n].fY - fLine[0].fY) * adj - (fCubic[n].fX - fLine[0].fX) * opp;
+ }
+ double A, B, C, D;
+ SkDCubic::Coefficients(&c[0].fX, &A, &B, &C, &D);
+ int count = SkDCubic::RootsValidT(A, B, C, D, roots);
+ for (int index = 0; index < count; ++index) {
+ SkDPoint calcPt = c.ptAtT(roots[index]);
+ if (!approximately_zero(calcPt.fX)) {
+ for (int n = 0; n < 4; ++n) {
+ c[n].fY = (fCubic[n].fY - fLine[0].fY) * opp
+ + (fCubic[n].fX - fLine[0].fX) * adj;
+ }
+ double extremeTs[6];
+ int extrema = SkDCubic::FindExtrema(&c[0].fX, extremeTs);
+ count = c.searchRoots(extremeTs, extrema, 0, SkDCubic::kXAxis, roots);
+ break;
+ }
+ }
+ return count;
+ }
+
+ int intersect() {
+ addExactEndPoints();
+ if (fAllowNear) {
+ addNearEndPoints();
+ }
+ double rootVals[3];
+ int roots = intersectRay(rootVals);
+ for (int index = 0; index < roots; ++index) {
+ double cubicT = rootVals[index];
+ double lineT = findLineT(cubicT);
+ SkDPoint pt;
+ if (pinTs(&cubicT, &lineT, &pt, kPointUninitialized) && uniqueAnswer(cubicT, pt)) {
+ fIntersections->insert(cubicT, lineT, pt);
+ }
+ }
+ checkCoincident();
+ return fIntersections->used();
+ }
+
+ static int HorizontalIntersect(const SkDCubic& c, double axisIntercept, double roots[3]) {
+ double A, B, C, D;
+ SkDCubic::Coefficients(&c[0].fY, &A, &B, &C, &D);
+ D -= axisIntercept;
+ int count = SkDCubic::RootsValidT(A, B, C, D, roots);
+ for (int index = 0; index < count; ++index) {
+ SkDPoint calcPt = c.ptAtT(roots[index]);
+ if (!approximately_equal(calcPt.fY, axisIntercept)) {
+ double extremeTs[6];
+ int extrema = SkDCubic::FindExtrema(&c[0].fY, extremeTs);
+ count = c.searchRoots(extremeTs, extrema, axisIntercept, SkDCubic::kYAxis, roots);
+ break;
+ }
+ }
+ return count;
+ }
+
+ int horizontalIntersect(double axisIntercept, double left, double right, bool flipped) {
+ addExactHorizontalEndPoints(left, right, axisIntercept);
+ if (fAllowNear) {
+ addNearHorizontalEndPoints(left, right, axisIntercept);
+ }
+ double roots[3];
+ int count = HorizontalIntersect(fCubic, axisIntercept, roots);
+ for (int index = 0; index < count; ++index) {
+ double cubicT = roots[index];
+ SkDPoint pt = { fCubic.ptAtT(cubicT).fX, axisIntercept };
+ double lineT = (pt.fX - left) / (right - left);
+ if (pinTs(&cubicT, &lineT, &pt, kPointInitialized) && uniqueAnswer(cubicT, pt)) {
+ fIntersections->insert(cubicT, lineT, pt);
+ }
+ }
+ if (flipped) {
+ fIntersections->flip();
+ }
+ checkCoincident();
+ return fIntersections->used();
+ }
+
+ bool uniqueAnswer(double cubicT, const SkDPoint& pt) {
+ for (int inner = 0; inner < fIntersections->used(); ++inner) {
+ if (fIntersections->pt(inner) != pt) {
+ continue;
+ }
+ double existingCubicT = (*fIntersections)[0][inner];
+ if (cubicT == existingCubicT) {
+ return false;
+ }
+ // check if midway on cubic is also same point. If so, discard this
+ double cubicMidT = (existingCubicT + cubicT) / 2;
+ SkDPoint cubicMidPt = fCubic.ptAtT(cubicMidT);
+ if (cubicMidPt.approximatelyEqual(pt)) {
+ return false;
+ }
+ }
+#if ONE_OFF_DEBUG
+ SkDPoint cPt = fCubic.ptAtT(cubicT);
+ SkDebugf("%s pt=(%1.9g,%1.9g) cPt=(%1.9g,%1.9g)\n", __FUNCTION__, pt.fX, pt.fY,
+ cPt.fX, cPt.fY);
+#endif
+ return true;
+ }
+
+ static int VerticalIntersect(const SkDCubic& c, double axisIntercept, double roots[3]) {
+ double A, B, C, D;
+ SkDCubic::Coefficients(&c[0].fX, &A, &B, &C, &D);
+ D -= axisIntercept;
+ int count = SkDCubic::RootsValidT(A, B, C, D, roots);
+ for (int index = 0; index < count; ++index) {
+ SkDPoint calcPt = c.ptAtT(roots[index]);
+ if (!approximately_equal(calcPt.fX, axisIntercept)) {
+ double extremeTs[6];
+ int extrema = SkDCubic::FindExtrema(&c[0].fX, extremeTs);
+ count = c.searchRoots(extremeTs, extrema, axisIntercept, SkDCubic::kXAxis, roots);
+ break;
+ }
+ }
+ return count;
+ }
+
+ int verticalIntersect(double axisIntercept, double top, double bottom, bool flipped) {
+ addExactVerticalEndPoints(top, bottom, axisIntercept);
+ if (fAllowNear) {
+ addNearVerticalEndPoints(top, bottom, axisIntercept);
+ }
+ double roots[3];
+ int count = VerticalIntersect(fCubic, axisIntercept, roots);
+ for (int index = 0; index < count; ++index) {
+ double cubicT = roots[index];
+ SkDPoint pt = { axisIntercept, fCubic.ptAtT(cubicT).fY };
+ double lineT = (pt.fY - top) / (bottom - top);
+ if (pinTs(&cubicT, &lineT, &pt, kPointInitialized) && uniqueAnswer(cubicT, pt)) {
+ fIntersections->insert(cubicT, lineT, pt);
+ }
+ }
+ if (flipped) {
+ fIntersections->flip();
+ }
+ checkCoincident();
+ return fIntersections->used();
+ }
+
+ protected:
+
+ void addExactEndPoints() {
+ for (int cIndex = 0; cIndex < 4; cIndex += 3) {
+ double lineT = fLine.exactPoint(fCubic[cIndex]);
+ if (lineT < 0) {
+ continue;
+ }
+ double cubicT = (double) (cIndex >> 1);
+ fIntersections->insert(cubicT, lineT, fCubic[cIndex]);
+ }
+ }
+
+ /* Note that this does not look for endpoints of the line that are near the cubic.
+ These points are found later when check ends looks for missing points */
+ void addNearEndPoints() {
+ for (int cIndex = 0; cIndex < 4; cIndex += 3) {
+ double cubicT = (double) (cIndex >> 1);
+ if (fIntersections->hasT(cubicT)) {
+ continue;
+ }
+ double lineT = fLine.nearPoint(fCubic[cIndex], nullptr);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(cubicT, lineT, fCubic[cIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ void addLineNearEndPoints() {
+ for (int lIndex = 0; lIndex < 2; ++lIndex) {
+ double lineT = (double) lIndex;
+ if (fIntersections->hasOppT(lineT)) {
+ continue;
+ }
+ double cubicT = ((SkDCurve*) &fCubic)->nearPoint(SkPath::kCubic_Verb,
+ fLine[lIndex], fLine[!lIndex]);
+ if (cubicT < 0) {
+ continue;
+ }
+ fIntersections->insert(cubicT, lineT, fLine[lIndex]);
+ }
+ }
+
+ void addExactHorizontalEndPoints(double left, double right, double y) {
+ for (int cIndex = 0; cIndex < 4; cIndex += 3) {
+ double lineT = SkDLine::ExactPointH(fCubic[cIndex], left, right, y);
+ if (lineT < 0) {
+ continue;
+ }
+ double cubicT = (double) (cIndex >> 1);
+ fIntersections->insert(cubicT, lineT, fCubic[cIndex]);
+ }
+ }
+
+ void addNearHorizontalEndPoints(double left, double right, double y) {
+ for (int cIndex = 0; cIndex < 4; cIndex += 3) {
+ double cubicT = (double) (cIndex >> 1);
+ if (fIntersections->hasT(cubicT)) {
+ continue;
+ }
+ double lineT = SkDLine::NearPointH(fCubic[cIndex], left, right, y);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(cubicT, lineT, fCubic[cIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ void addExactVerticalEndPoints(double top, double bottom, double x) {
+ for (int cIndex = 0; cIndex < 4; cIndex += 3) {
+ double lineT = SkDLine::ExactPointV(fCubic[cIndex], top, bottom, x);
+ if (lineT < 0) {
+ continue;
+ }
+ double cubicT = (double) (cIndex >> 1);
+ fIntersections->insert(cubicT, lineT, fCubic[cIndex]);
+ }
+ }
+
+ void addNearVerticalEndPoints(double top, double bottom, double x) {
+ for (int cIndex = 0; cIndex < 4; cIndex += 3) {
+ double cubicT = (double) (cIndex >> 1);
+ if (fIntersections->hasT(cubicT)) {
+ continue;
+ }
+ double lineT = SkDLine::NearPointV(fCubic[cIndex], top, bottom, x);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(cubicT, lineT, fCubic[cIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ double findLineT(double t) {
+ SkDPoint xy = fCubic.ptAtT(t);
+ double dx = fLine[1].fX - fLine[0].fX;
+ double dy = fLine[1].fY - fLine[0].fY;
+ if (fabs(dx) > fabs(dy)) {
+ return (xy.fX - fLine[0].fX) / dx;
+ }
+ return (xy.fY - fLine[0].fY) / dy;
+ }
+
+ bool pinTs(double* cubicT, double* lineT, SkDPoint* pt, PinTPoint ptSet) {
+ if (!approximately_one_or_less(*lineT)) {
+ return false;
+ }
+ if (!approximately_zero_or_more(*lineT)) {
+ return false;
+ }
+ double cT = *cubicT = SkPinT(*cubicT);
+ double lT = *lineT = SkPinT(*lineT);
+ SkDPoint lPt = fLine.ptAtT(lT);
+ SkDPoint cPt = fCubic.ptAtT(cT);
+ if (!lPt.roughlyEqual(cPt)) {
+ return false;
+ }
+ // FIXME: if points are roughly equal but not approximately equal, need to do
+ // a binary search like quad/quad intersection to find more precise t values
+ if (lT == 0 || lT == 1 || (ptSet == kPointUninitialized && cT != 0 && cT != 1)) {
+ *pt = lPt;
+ } else if (ptSet == kPointUninitialized) {
+ *pt = cPt;
+ }
+ SkPoint gridPt = pt->asSkPoint();
+ if (gridPt == fLine[0].asSkPoint()) {
+ *lineT = 0;
+ } else if (gridPt == fLine[1].asSkPoint()) {
+ *lineT = 1;
+ }
+ if (gridPt == fCubic[0].asSkPoint() && approximately_equal(*cubicT, 0)) {
+ *cubicT = 0;
+ } else if (gridPt == fCubic[3].asSkPoint() && approximately_equal(*cubicT, 1)) {
+ *cubicT = 1;
+ }
+ return true;
+ }
+
+private:
+ const SkDCubic& fCubic;
+ const SkDLine& fLine;
+ SkIntersections* fIntersections;
+ bool fAllowNear;
+};
+
+int SkIntersections::horizontal(const SkDCubic& cubic, double left, double right, double y,
+ bool flipped) {
+ SkDLine line = {{{ left, y }, { right, y }}};
+ LineCubicIntersections c(cubic, line, this);
+ return c.horizontalIntersect(y, left, right, flipped);
+}
+
+int SkIntersections::vertical(const SkDCubic& cubic, double top, double bottom, double x,
+ bool flipped) {
+ SkDLine line = {{{ x, top }, { x, bottom }}};
+ LineCubicIntersections c(cubic, line, this);
+ return c.verticalIntersect(x, top, bottom, flipped);
+}
+
+int SkIntersections::intersect(const SkDCubic& cubic, const SkDLine& line) {
+ LineCubicIntersections c(cubic, line, this);
+ c.allowNear(fAllowNear);
+ return c.intersect();
+}
+
+int SkIntersections::intersectRay(const SkDCubic& cubic, const SkDLine& line) {
+ LineCubicIntersections c(cubic, line, this);
+ fUsed = c.intersectRay(fT[0]);
+ for (int index = 0; index < fUsed; ++index) {
+ fPt[index] = cubic.ptAtT(fT[0][index]);
+ }
+ return fUsed;
+}
+
+// SkDCubic accessors to Intersection utilities
+
+int SkDCubic::horizontalIntersect(double yIntercept, double roots[3]) const {
+ return LineCubicIntersections::HorizontalIntersect(*this, yIntercept, roots);
+}
+
+int SkDCubic::verticalIntersect(double xIntercept, double roots[3]) const {
+ return LineCubicIntersections::VerticalIntersect(*this, xIntercept, roots);
+}
diff --git a/gfx/skia/skia/src/pathops/SkDCubicToQuads.cpp b/gfx/skia/skia/src/pathops/SkDCubicToQuads.cpp
new file mode 100644
index 0000000000..c7c7944580
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkDCubicToQuads.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/*
+http://stackoverflow.com/questions/2009160/how-do-i-convert-the-2-control-points-of-a-cubic-curve-to-the-single-control-poi
+*/
+
+/*
+Let's call the control points of the cubic Q0..Q3 and the control points of the quadratic P0..P2.
+Then for degree elevation, the equations are:
+
+Q0 = P0
+Q1 = 1/3 P0 + 2/3 P1
+Q2 = 2/3 P1 + 1/3 P2
+Q3 = P2
+In your case you have Q0..Q3 and you're solving for P0..P2. There are two ways to compute P1 from
+ the equations above:
+
+P1 = 3/2 Q1 - 1/2 Q0
+P1 = 3/2 Q2 - 1/2 Q3
+If this is a degree-elevated cubic, then both equations will give the same answer for P1. Since
+ it's likely not, your best bet is to average them. So,
+
+P1 = -1/4 Q0 + 3/4 Q1 + 3/4 Q2 - 1/4 Q3
+*/
+
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkPathOpsQuad.h"
+
+// used for testing only
+SkDQuad SkDCubic::toQuad() const {
+ SkDQuad quad;
+ quad[0] = fPts[0];
+ const SkDPoint fromC1 = {(3 * fPts[1].fX - fPts[0].fX) / 2, (3 * fPts[1].fY - fPts[0].fY) / 2};
+ const SkDPoint fromC2 = {(3 * fPts[2].fX - fPts[3].fX) / 2, (3 * fPts[2].fY - fPts[3].fY) / 2};
+ quad[1].fX = (fromC1.fX + fromC2.fX) / 2;
+ quad[1].fY = (fromC1.fY + fromC2.fY) / 2;
+ quad[2] = fPts[3];
+ return quad;
+}
diff --git a/gfx/skia/skia/src/pathops/SkDLineIntersection.cpp b/gfx/skia/skia/src/pathops/SkDLineIntersection.cpp
new file mode 100644
index 0000000000..2660786f9c
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkDLineIntersection.cpp
@@ -0,0 +1,344 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkTypes.h"
+#include "src/pathops/SkIntersections.h"
+#include "src/pathops/SkPathOpsLine.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+#include <cmath>
+#include <cstdint>
+#include <utility>
+
+void SkIntersections::cleanUpParallelLines(bool parallel) {
+ while (fUsed > 2) {
+ removeOne(1);
+ }
+ if (fUsed == 2 && !parallel) {
+ bool startMatch = fT[0][0] == 0 || zero_or_one(fT[1][0]);
+ bool endMatch = fT[0][1] == 1 || zero_or_one(fT[1][1]);
+ if ((!startMatch && !endMatch) || approximately_equal(fT[0][0], fT[0][1])) {
+ SkASSERT(startMatch || endMatch);
+ if (startMatch && endMatch && (fT[0][0] != 0 || !zero_or_one(fT[1][0]))
+ && fT[0][1] == 1 && zero_or_one(fT[1][1])) {
+ removeOne(0);
+ } else {
+ removeOne(endMatch);
+ }
+ }
+ }
+ if (fUsed == 2) {
+ fIsCoincident[0] = fIsCoincident[1] = 0x03;
+ }
+}
+
+void SkIntersections::computePoints(const SkDLine& line, int used) {
+ fPt[0] = line.ptAtT(fT[0][0]);
+ if ((fUsed = used) == 2) {
+ fPt[1] = line.ptAtT(fT[0][1]);
+ }
+}
+
+int SkIntersections::intersectRay(const SkDLine& a, const SkDLine& b) {
+ fMax = 2;
+ SkDVector aLen = a[1] - a[0];
+ SkDVector bLen = b[1] - b[0];
+ /* Slopes match when denom goes to zero:
+ axLen / ayLen == bxLen / byLen
+ (ayLen * byLen) * axLen / ayLen == (ayLen * byLen) * bxLen / byLen
+ byLen * axLen == ayLen * bxLen
+ byLen * axLen - ayLen * bxLen == 0 ( == denom )
+ */
+ double denom = bLen.fY * aLen.fX - aLen.fY * bLen.fX;
+ int used;
+ if (!approximately_zero(denom)) {
+ SkDVector ab0 = a[0] - b[0];
+ double numerA = ab0.fY * bLen.fX - bLen.fY * ab0.fX;
+ double numerB = ab0.fY * aLen.fX - aLen.fY * ab0.fX;
+ numerA /= denom;
+ numerB /= denom;
+ fT[0][0] = numerA;
+ fT[1][0] = numerB;
+ used = 1;
+ } else {
+ /* See if the axis intercepts match:
+ ay - ax * ayLen / axLen == by - bx * ayLen / axLen
+ axLen * (ay - ax * ayLen / axLen) == axLen * (by - bx * ayLen / axLen)
+ axLen * ay - ax * ayLen == axLen * by - bx * ayLen
+ */
+ if (!AlmostEqualUlps(aLen.fX * a[0].fY - aLen.fY * a[0].fX,
+ aLen.fX * b[0].fY - aLen.fY * b[0].fX)) {
+ return fUsed = 0;
+ }
+ // there's no great answer for intersection points for coincident rays, but return something
+ fT[0][0] = fT[1][0] = 0;
+ fT[1][0] = fT[1][1] = 1;
+ used = 2;
+ }
+ computePoints(a, used);
+ return fUsed;
+}
+
+// note that this only works if both lines are neither horizontal nor vertical
+int SkIntersections::intersect(const SkDLine& a, const SkDLine& b) {
+ fMax = 3; // note that we clean up so that there is no more than two in the end
+ // see if end points intersect the opposite line
+ double t;
+ for (int iA = 0; iA < 2; ++iA) {
+ if ((t = b.exactPoint(a[iA])) >= 0) {
+ insert(iA, t, a[iA]);
+ }
+ }
+ for (int iB = 0; iB < 2; ++iB) {
+ if ((t = a.exactPoint(b[iB])) >= 0) {
+ insert(t, iB, b[iB]);
+ }
+ }
+ /* Determine the intersection point of two line segments
+ Return FALSE if the lines don't intersect
+ from: http://paulbourke.net/geometry/lineline2d/ */
+ double axLen = a[1].fX - a[0].fX;
+ double ayLen = a[1].fY - a[0].fY;
+ double bxLen = b[1].fX - b[0].fX;
+ double byLen = b[1].fY - b[0].fY;
+ /* Slopes match when denom goes to zero:
+ axLen / ayLen == bxLen / byLen
+ (ayLen * byLen) * axLen / ayLen == (ayLen * byLen) * bxLen / byLen
+ byLen * axLen == ayLen * bxLen
+ byLen * axLen - ayLen * bxLen == 0 ( == denom )
+ */
+ double axByLen = axLen * byLen;
+ double ayBxLen = ayLen * bxLen;
+ // detect parallel lines the same way here and in SkOpAngle operator <
+ // so that non-parallel means they are also sortable
+ bool unparallel = fAllowNear ? NotAlmostEqualUlps_Pin(axByLen, ayBxLen)
+ : NotAlmostDequalUlps(axByLen, ayBxLen);
+ if (unparallel && fUsed == 0) {
+ double ab0y = a[0].fY - b[0].fY;
+ double ab0x = a[0].fX - b[0].fX;
+ double numerA = ab0y * bxLen - byLen * ab0x;
+ double numerB = ab0y * axLen - ayLen * ab0x;
+ double denom = axByLen - ayBxLen;
+ if (between(0, numerA, denom) && between(0, numerB, denom)) {
+ fT[0][0] = numerA / denom;
+ fT[1][0] = numerB / denom;
+ computePoints(a, 1);
+ }
+ }
+/* Allow tracking that both sets of end points are near each other -- the lines are entirely
+ coincident -- even when the end points are not exactly the same.
+ Mark this as a 'wild card' for the end points, so that either point is considered totally
+ coincident. Then, avoid folding the lines over each other, but allow either end to mate
+ to the next set of lines.
+ */
+ if (fAllowNear || !unparallel) {
+ double aNearB[2];
+ double bNearA[2];
+ bool aNotB[2] = {false, false};
+ bool bNotA[2] = {false, false};
+ int nearCount = 0;
+ for (int index = 0; index < 2; ++index) {
+ aNearB[index] = t = b.nearPoint(a[index], &aNotB[index]);
+ nearCount += t >= 0;
+ bNearA[index] = t = a.nearPoint(b[index], &bNotA[index]);
+ nearCount += t >= 0;
+ }
+ if (nearCount > 0) {
+ // Skip if each segment contributes to one end point.
+ if (nearCount != 2 || aNotB[0] == aNotB[1]) {
+ for (int iA = 0; iA < 2; ++iA) {
+ if (!aNotB[iA]) {
+ continue;
+ }
+ int nearer = aNearB[iA] > 0.5;
+ if (!bNotA[nearer]) {
+ continue;
+ }
+ SkASSERT(a[iA] != b[nearer]);
+ SkOPASSERT(iA == (bNearA[nearer] > 0.5));
+ insertNear(iA, nearer, a[iA], b[nearer]);
+ aNearB[iA] = -1;
+ bNearA[nearer] = -1;
+ nearCount -= 2;
+ }
+ }
+ if (nearCount > 0) {
+ for (int iA = 0; iA < 2; ++iA) {
+ if (aNearB[iA] >= 0) {
+ insert(iA, aNearB[iA], a[iA]);
+ }
+ }
+ for (int iB = 0; iB < 2; ++iB) {
+ if (bNearA[iB] >= 0) {
+ insert(bNearA[iB], iB, b[iB]);
+ }
+ }
+ }
+ }
+ }
+ cleanUpParallelLines(!unparallel);
+ SkASSERT(fUsed <= 2);
+ return fUsed;
+}
+
+static int horizontal_coincident(const SkDLine& line, double y) {
+ double min = line[0].fY;
+ double max = line[1].fY;
+ if (min > max) {
+ using std::swap;
+ swap(min, max);
+ }
+ if (min > y || max < y) {
+ return 0;
+ }
+ if (AlmostEqualUlps(min, max) && max - min < fabs(line[0].fX - line[1].fX)) {
+ return 2;
+ }
+ return 1;
+}
+
+double SkIntersections::HorizontalIntercept(const SkDLine& line, double y) {
+ SkASSERT(line[1].fY != line[0].fY);
+ return SkPinT((y - line[0].fY) / (line[1].fY - line[0].fY));
+}
+
+int SkIntersections::horizontal(const SkDLine& line, double left, double right,
+ double y, bool flipped) {
+ fMax = 3; // clean up parallel at the end will limit the result to 2 at the most
+ // see if end points intersect the opposite line
+ double t;
+ const SkDPoint leftPt = { left, y };
+ if ((t = line.exactPoint(leftPt)) >= 0) {
+ insert(t, (double) flipped, leftPt);
+ }
+ if (left != right) {
+ const SkDPoint rightPt = { right, y };
+ if ((t = line.exactPoint(rightPt)) >= 0) {
+ insert(t, (double) !flipped, rightPt);
+ }
+ for (int index = 0; index < 2; ++index) {
+ if ((t = SkDLine::ExactPointH(line[index], left, right, y)) >= 0) {
+ insert((double) index, flipped ? 1 - t : t, line[index]);
+ }
+ }
+ }
+ int result = horizontal_coincident(line, y);
+ if (result == 1 && fUsed == 0) {
+ fT[0][0] = HorizontalIntercept(line, y);
+ double xIntercept = line[0].fX + fT[0][0] * (line[1].fX - line[0].fX);
+ if (between(left, xIntercept, right)) {
+ fT[1][0] = (xIntercept - left) / (right - left);
+ if (flipped) {
+ // OPTIMIZATION: ? instead of swapping, pass original line, use [1].fX - [0].fX
+ for (int index = 0; index < result; ++index) {
+ fT[1][index] = 1 - fT[1][index];
+ }
+ }
+ fPt[0].fX = xIntercept;
+ fPt[0].fY = y;
+ fUsed = 1;
+ }
+ }
+ if (fAllowNear || result == 2) {
+ if ((t = line.nearPoint(leftPt, nullptr)) >= 0) {
+ insert(t, (double) flipped, leftPt);
+ }
+ if (left != right) {
+ const SkDPoint rightPt = { right, y };
+ if ((t = line.nearPoint(rightPt, nullptr)) >= 0) {
+ insert(t, (double) !flipped, rightPt);
+ }
+ for (int index = 0; index < 2; ++index) {
+ if ((t = SkDLine::NearPointH(line[index], left, right, y)) >= 0) {
+ insert((double) index, flipped ? 1 - t : t, line[index]);
+ }
+ }
+ }
+ }
+ cleanUpParallelLines(result == 2);
+ return fUsed;
+}
+
+static int vertical_coincident(const SkDLine& line, double x) {
+ double min = line[0].fX;
+ double max = line[1].fX;
+ if (min > max) {
+ using std::swap;
+ swap(min, max);
+ }
+ if (!precisely_between(min, x, max)) {
+ return 0;
+ }
+ if (AlmostEqualUlps(min, max)) {
+ return 2;
+ }
+ return 1;
+}
+
+double SkIntersections::VerticalIntercept(const SkDLine& line, double x) {
+ SkASSERT(line[1].fX != line[0].fX);
+ return SkPinT((x - line[0].fX) / (line[1].fX - line[0].fX));
+}
+
+int SkIntersections::vertical(const SkDLine& line, double top, double bottom,
+ double x, bool flipped) {
+ fMax = 3; // cleanup parallel lines will bring this back line
+ // see if end points intersect the opposite line
+ double t;
+ SkDPoint topPt = { x, top };
+ if ((t = line.exactPoint(topPt)) >= 0) {
+ insert(t, (double) flipped, topPt);
+ }
+ if (top != bottom) {
+ SkDPoint bottomPt = { x, bottom };
+ if ((t = line.exactPoint(bottomPt)) >= 0) {
+ insert(t, (double) !flipped, bottomPt);
+ }
+ for (int index = 0; index < 2; ++index) {
+ if ((t = SkDLine::ExactPointV(line[index], top, bottom, x)) >= 0) {
+ insert((double) index, flipped ? 1 - t : t, line[index]);
+ }
+ }
+ }
+ int result = vertical_coincident(line, x);
+ if (result == 1 && fUsed == 0) {
+ fT[0][0] = VerticalIntercept(line, x);
+ double yIntercept = line[0].fY + fT[0][0] * (line[1].fY - line[0].fY);
+ if (between(top, yIntercept, bottom)) {
+ fT[1][0] = (yIntercept - top) / (bottom - top);
+ if (flipped) {
+ // OPTIMIZATION: instead of swapping, pass original line, use [1].fY - [0].fY
+ for (int index = 0; index < result; ++index) {
+ fT[1][index] = 1 - fT[1][index];
+ }
+ }
+ fPt[0].fX = x;
+ fPt[0].fY = yIntercept;
+ fUsed = 1;
+ }
+ }
+ if (fAllowNear || result == 2) {
+ if ((t = line.nearPoint(topPt, nullptr)) >= 0) {
+ insert(t, (double) flipped, topPt);
+ }
+ if (top != bottom) {
+ SkDPoint bottomPt = { x, bottom };
+ if ((t = line.nearPoint(bottomPt, nullptr)) >= 0) {
+ insert(t, (double) !flipped, bottomPt);
+ }
+ for (int index = 0; index < 2; ++index) {
+ if ((t = SkDLine::NearPointV(line[index], top, bottom, x)) >= 0) {
+ insert((double) index, flipped ? 1 - t : t, line[index]);
+ }
+ }
+ }
+ }
+ cleanUpParallelLines(result == 2);
+ SkASSERT(fUsed <= 2);
+ return fUsed;
+}
+
diff --git a/gfx/skia/skia/src/pathops/SkDQuadLineIntersection.cpp b/gfx/skia/skia/src/pathops/SkDQuadLineIntersection.cpp
new file mode 100644
index 0000000000..2caeaeb8d7
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkDQuadLineIntersection.cpp
@@ -0,0 +1,478 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "src/pathops/SkIntersections.h"
+#include "src/pathops/SkPathOpsCurve.h"
+#include "src/pathops/SkPathOpsDebug.h"
+#include "src/pathops/SkPathOpsLine.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkPathOpsQuad.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+#include <cmath>
+
+/*
+Find the intersection of a line and quadratic by solving for valid t values.
+
+From http://stackoverflow.com/questions/1853637/how-to-find-the-mathematical-function-defining-a-bezier-curve
+
+"A Bezier curve is a parametric function. A quadratic Bezier curve (i.e. three
+control points) can be expressed as: F(t) = A(1 - t)^2 + B(1 - t)t + Ct^2 where
+A, B and C are points and t goes from zero to one.
+
+This will give you two equations:
+
+ x = a(1 - t)^2 + b(1 - t)t + ct^2
+ y = d(1 - t)^2 + e(1 - t)t + ft^2
+
+If you add for instance the line equation (y = kx + m) to that, you'll end up
+with three equations and three unknowns (x, y and t)."
+
+Similar to above, the quadratic is represented as
+ x = a(1-t)^2 + 2b(1-t)t + ct^2
+ y = d(1-t)^2 + 2e(1-t)t + ft^2
+and the line as
+ y = g*x + h
+
+Using Mathematica, solve for the values of t where the quadratic intersects the
+line:
+
+ (in) t1 = Resultant[a*(1 - t)^2 + 2*b*(1 - t)*t + c*t^2 - x,
+ d*(1 - t)^2 + 2*e*(1 - t)*t + f*t^2 - g*x - h, x]
+ (out) -d + h + 2 d t - 2 e t - d t^2 + 2 e t^2 - f t^2 +
+ g (a - 2 a t + 2 b t + a t^2 - 2 b t^2 + c t^2)
+ (in) Solve[t1 == 0, t]
+ (out) {
+ {t -> (-2 d + 2 e + 2 a g - 2 b g -
+ Sqrt[(2 d - 2 e - 2 a g + 2 b g)^2 -
+ 4 (-d + 2 e - f + a g - 2 b g + c g) (-d + a g + h)]) /
+ (2 (-d + 2 e - f + a g - 2 b g + c g))
+ },
+ {t -> (-2 d + 2 e + 2 a g - 2 b g +
+ Sqrt[(2 d - 2 e - 2 a g + 2 b g)^2 -
+ 4 (-d + 2 e - f + a g - 2 b g + c g) (-d + a g + h)]) /
+ (2 (-d + 2 e - f + a g - 2 b g + c g))
+ }
+ }
+
+Using the results above (when the line tends towards horizontal)
+ A = (-(d - 2*e + f) + g*(a - 2*b + c) )
+ B = 2*( (d - e ) - g*(a - b ) )
+ C = (-(d ) + g*(a ) + h )
+
+If g goes to infinity, we can rewrite the line in terms of x.
+ x = g'*y + h'
+
+And solve accordingly in Mathematica:
+
+ (in) t2 = Resultant[a*(1 - t)^2 + 2*b*(1 - t)*t + c*t^2 - g'*y - h',
+ d*(1 - t)^2 + 2*e*(1 - t)*t + f*t^2 - y, y]
+ (out) a - h' - 2 a t + 2 b t + a t^2 - 2 b t^2 + c t^2 -
+ g' (d - 2 d t + 2 e t + d t^2 - 2 e t^2 + f t^2)
+ (in) Solve[t2 == 0, t]
+ (out) {
+ {t -> (2 a - 2 b - 2 d g' + 2 e g' -
+ Sqrt[(-2 a + 2 b + 2 d g' - 2 e g')^2 -
+ 4 (a - 2 b + c - d g' + 2 e g' - f g') (a - d g' - h')]) /
+ (2 (a - 2 b + c - d g' + 2 e g' - f g'))
+ },
+ {t -> (2 a - 2 b - 2 d g' + 2 e g' +
+ Sqrt[(-2 a + 2 b + 2 d g' - 2 e g')^2 -
+ 4 (a - 2 b + c - d g' + 2 e g' - f g') (a - d g' - h')])/
+ (2 (a - 2 b + c - d g' + 2 e g' - f g'))
+ }
+ }
+
+Thus, if the slope of the line tends towards vertical, we use:
+ A = ( (a - 2*b + c) - g'*(d - 2*e + f) )
+ B = 2*(-(a - b ) + g'*(d - e ) )
+ C = ( (a ) - g'*(d ) - h' )
+ */
+
+class LineQuadraticIntersections {
+public:
+ enum PinTPoint {
+ kPointUninitialized,
+ kPointInitialized
+ };
+
+ LineQuadraticIntersections(const SkDQuad& q, const SkDLine& l, SkIntersections* i)
+ : fQuad(q)
+ , fLine(&l)
+ , fIntersections(i)
+ , fAllowNear(true) {
+ i->setMax(5); // allow short partial coincidence plus discrete intersections
+ }
+
+ LineQuadraticIntersections(const SkDQuad& q)
+ : fQuad(q)
+ SkDEBUGPARAMS(fLine(nullptr))
+ SkDEBUGPARAMS(fIntersections(nullptr))
+ SkDEBUGPARAMS(fAllowNear(false)) {
+ }
+
+ void allowNear(bool allow) {
+ fAllowNear = allow;
+ }
+
+ void checkCoincident() {
+ int last = fIntersections->used() - 1;
+ for (int index = 0; index < last; ) {
+ double quadMidT = ((*fIntersections)[0][index] + (*fIntersections)[0][index + 1]) / 2;
+ SkDPoint quadMidPt = fQuad.ptAtT(quadMidT);
+ double t = fLine->nearPoint(quadMidPt, nullptr);
+ if (t < 0) {
+ ++index;
+ continue;
+ }
+ if (fIntersections->isCoincident(index)) {
+ fIntersections->removeOne(index);
+ --last;
+ } else if (fIntersections->isCoincident(index + 1)) {
+ fIntersections->removeOne(index + 1);
+ --last;
+ } else {
+ fIntersections->setCoincident(index++);
+ }
+ fIntersections->setCoincident(index);
+ }
+ }
+
+ int intersectRay(double roots[2]) {
+ /*
+ solve by rotating line+quad so line is horizontal, then finding the roots
+ set up matrix to rotate quad to x-axis
+ |cos(a) -sin(a)|
+ |sin(a) cos(a)|
+ note that cos(a) = A(djacent) / Hypoteneuse
+ sin(a) = O(pposite) / Hypoteneuse
+ since we are computing Ts, we can ignore hypoteneuse, the scale factor:
+ | A -O |
+ | O A |
+ A = line[1].fX - line[0].fX (adjacent side of the right triangle)
+ O = line[1].fY - line[0].fY (opposite side of the right triangle)
+ for each of the three points (e.g. n = 0 to 2)
+ quad[n].fY' = (quad[n].fY - line[0].fY) * A - (quad[n].fX - line[0].fX) * O
+ */
+ double adj = (*fLine)[1].fX - (*fLine)[0].fX;
+ double opp = (*fLine)[1].fY - (*fLine)[0].fY;
+ double r[3];
+ for (int n = 0; n < 3; ++n) {
+ r[n] = (fQuad[n].fY - (*fLine)[0].fY) * adj - (fQuad[n].fX - (*fLine)[0].fX) * opp;
+ }
+ double A = r[2];
+ double B = r[1];
+ double C = r[0];
+ A += C - 2 * B; // A = a - 2*b + c
+ B -= C; // B = -(b - c)
+ return SkDQuad::RootsValidT(A, 2 * B, C, roots);
+ }
+
+ int intersect() {
+ addExactEndPoints();
+ if (fAllowNear) {
+ addNearEndPoints();
+ }
+ double rootVals[2];
+ int roots = intersectRay(rootVals);
+ for (int index = 0; index < roots; ++index) {
+ double quadT = rootVals[index];
+ double lineT = findLineT(quadT);
+ SkDPoint pt;
+ if (pinTs(&quadT, &lineT, &pt, kPointUninitialized) && uniqueAnswer(quadT, pt)) {
+ fIntersections->insert(quadT, lineT, pt);
+ }
+ }
+ checkCoincident();
+ return fIntersections->used();
+ }
+
+ int horizontalIntersect(double axisIntercept, double roots[2]) {
+ double D = fQuad[2].fY; // f
+ double E = fQuad[1].fY; // e
+ double F = fQuad[0].fY; // d
+ D += F - 2 * E; // D = d - 2*e + f
+ E -= F; // E = -(d - e)
+ F -= axisIntercept;
+ return SkDQuad::RootsValidT(D, 2 * E, F, roots);
+ }
+
+ int horizontalIntersect(double axisIntercept, double left, double right, bool flipped) {
+ addExactHorizontalEndPoints(left, right, axisIntercept);
+ if (fAllowNear) {
+ addNearHorizontalEndPoints(left, right, axisIntercept);
+ }
+ double rootVals[2];
+ int roots = horizontalIntersect(axisIntercept, rootVals);
+ for (int index = 0; index < roots; ++index) {
+ double quadT = rootVals[index];
+ SkDPoint pt = fQuad.ptAtT(quadT);
+ double lineT = (pt.fX - left) / (right - left);
+ if (pinTs(&quadT, &lineT, &pt, kPointInitialized) && uniqueAnswer(quadT, pt)) {
+ fIntersections->insert(quadT, lineT, pt);
+ }
+ }
+ if (flipped) {
+ fIntersections->flip();
+ }
+ checkCoincident();
+ return fIntersections->used();
+ }
+
+ bool uniqueAnswer(double quadT, const SkDPoint& pt) {
+ for (int inner = 0; inner < fIntersections->used(); ++inner) {
+ if (fIntersections->pt(inner) != pt) {
+ continue;
+ }
+ double existingQuadT = (*fIntersections)[0][inner];
+ if (quadT == existingQuadT) {
+ return false;
+ }
+ // check if midway on quad is also same point. If so, discard this
+ double quadMidT = (existingQuadT + quadT) / 2;
+ SkDPoint quadMidPt = fQuad.ptAtT(quadMidT);
+ if (quadMidPt.approximatelyEqual(pt)) {
+ return false;
+ }
+ }
+#if ONE_OFF_DEBUG
+ SkDPoint qPt = fQuad.ptAtT(quadT);
+ SkDebugf("%s pt=(%1.9g,%1.9g) cPt=(%1.9g,%1.9g)\n", __FUNCTION__, pt.fX, pt.fY,
+ qPt.fX, qPt.fY);
+#endif
+ return true;
+ }
+
+ int verticalIntersect(double axisIntercept, double roots[2]) {
+ double D = fQuad[2].fX; // f
+ double E = fQuad[1].fX; // e
+ double F = fQuad[0].fX; // d
+ D += F - 2 * E; // D = d - 2*e + f
+ E -= F; // E = -(d - e)
+ F -= axisIntercept;
+ return SkDQuad::RootsValidT(D, 2 * E, F, roots);
+ }
+
+ int verticalIntersect(double axisIntercept, double top, double bottom, bool flipped) {
+ addExactVerticalEndPoints(top, bottom, axisIntercept);
+ if (fAllowNear) {
+ addNearVerticalEndPoints(top, bottom, axisIntercept);
+ }
+ double rootVals[2];
+ int roots = verticalIntersect(axisIntercept, rootVals);
+ for (int index = 0; index < roots; ++index) {
+ double quadT = rootVals[index];
+ SkDPoint pt = fQuad.ptAtT(quadT);
+ double lineT = (pt.fY - top) / (bottom - top);
+ if (pinTs(&quadT, &lineT, &pt, kPointInitialized) && uniqueAnswer(quadT, pt)) {
+ fIntersections->insert(quadT, lineT, pt);
+ }
+ }
+ if (flipped) {
+ fIntersections->flip();
+ }
+ checkCoincident();
+ return fIntersections->used();
+ }
+
+protected:
+ // add endpoints first to get zero and one t values exactly
+ void addExactEndPoints() {
+ for (int qIndex = 0; qIndex < 3; qIndex += 2) {
+ double lineT = fLine->exactPoint(fQuad[qIndex]);
+ if (lineT < 0) {
+ continue;
+ }
+ double quadT = (double) (qIndex >> 1);
+ fIntersections->insert(quadT, lineT, fQuad[qIndex]);
+ }
+ }
+
+ void addNearEndPoints() {
+ for (int qIndex = 0; qIndex < 3; qIndex += 2) {
+ double quadT = (double) (qIndex >> 1);
+ if (fIntersections->hasT(quadT)) {
+ continue;
+ }
+ double lineT = fLine->nearPoint(fQuad[qIndex], nullptr);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(quadT, lineT, fQuad[qIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ void addLineNearEndPoints() {
+ for (int lIndex = 0; lIndex < 2; ++lIndex) {
+ double lineT = (double) lIndex;
+ if (fIntersections->hasOppT(lineT)) {
+ continue;
+ }
+ double quadT = ((SkDCurve*) &fQuad)->nearPoint(SkPath::kQuad_Verb,
+ (*fLine)[lIndex], (*fLine)[!lIndex]);
+ if (quadT < 0) {
+ continue;
+ }
+ fIntersections->insert(quadT, lineT, (*fLine)[lIndex]);
+ }
+ }
+
+ void addExactHorizontalEndPoints(double left, double right, double y) {
+ for (int qIndex = 0; qIndex < 3; qIndex += 2) {
+ double lineT = SkDLine::ExactPointH(fQuad[qIndex], left, right, y);
+ if (lineT < 0) {
+ continue;
+ }
+ double quadT = (double) (qIndex >> 1);
+ fIntersections->insert(quadT, lineT, fQuad[qIndex]);
+ }
+ }
+
+ void addNearHorizontalEndPoints(double left, double right, double y) {
+ for (int qIndex = 0; qIndex < 3; qIndex += 2) {
+ double quadT = (double) (qIndex >> 1);
+ if (fIntersections->hasT(quadT)) {
+ continue;
+ }
+ double lineT = SkDLine::NearPointH(fQuad[qIndex], left, right, y);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(quadT, lineT, fQuad[qIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ void addExactVerticalEndPoints(double top, double bottom, double x) {
+ for (int qIndex = 0; qIndex < 3; qIndex += 2) {
+ double lineT = SkDLine::ExactPointV(fQuad[qIndex], top, bottom, x);
+ if (lineT < 0) {
+ continue;
+ }
+ double quadT = (double) (qIndex >> 1);
+ fIntersections->insert(quadT, lineT, fQuad[qIndex]);
+ }
+ }
+
+ void addNearVerticalEndPoints(double top, double bottom, double x) {
+ for (int qIndex = 0; qIndex < 3; qIndex += 2) {
+ double quadT = (double) (qIndex >> 1);
+ if (fIntersections->hasT(quadT)) {
+ continue;
+ }
+ double lineT = SkDLine::NearPointV(fQuad[qIndex], top, bottom, x);
+ if (lineT < 0) {
+ continue;
+ }
+ fIntersections->insert(quadT, lineT, fQuad[qIndex]);
+ }
+ this->addLineNearEndPoints();
+ }
+
+ double findLineT(double t) {
+ SkDPoint xy = fQuad.ptAtT(t);
+ double dx = (*fLine)[1].fX - (*fLine)[0].fX;
+ double dy = (*fLine)[1].fY - (*fLine)[0].fY;
+ if (fabs(dx) > fabs(dy)) {
+ return (xy.fX - (*fLine)[0].fX) / dx;
+ }
+ return (xy.fY - (*fLine)[0].fY) / dy;
+ }
+
+ bool pinTs(double* quadT, double* lineT, SkDPoint* pt, PinTPoint ptSet) {
+ if (!approximately_one_or_less_double(*lineT)) {
+ return false;
+ }
+ if (!approximately_zero_or_more_double(*lineT)) {
+ return false;
+ }
+ double qT = *quadT = SkPinT(*quadT);
+ double lT = *lineT = SkPinT(*lineT);
+ if (lT == 0 || lT == 1 || (ptSet == kPointUninitialized && qT != 0 && qT != 1)) {
+ *pt = (*fLine).ptAtT(lT);
+ } else if (ptSet == kPointUninitialized) {
+ *pt = fQuad.ptAtT(qT);
+ }
+ SkPoint gridPt = pt->asSkPoint();
+ if (SkDPoint::ApproximatelyEqual(gridPt, (*fLine)[0].asSkPoint())) {
+ *pt = (*fLine)[0];
+ *lineT = 0;
+ } else if (SkDPoint::ApproximatelyEqual(gridPt, (*fLine)[1].asSkPoint())) {
+ *pt = (*fLine)[1];
+ *lineT = 1;
+ }
+ if (fIntersections->used() > 0 && approximately_equal((*fIntersections)[1][0], *lineT)) {
+ return false;
+ }
+ if (gridPt == fQuad[0].asSkPoint()) {
+ *pt = fQuad[0];
+ *quadT = 0;
+ } else if (gridPt == fQuad[2].asSkPoint()) {
+ *pt = fQuad[2];
+ *quadT = 1;
+ }
+ return true;
+ }
+
+private:
+ const SkDQuad& fQuad;
+ const SkDLine* fLine;
+ SkIntersections* fIntersections;
+ bool fAllowNear;
+};
+
+int SkIntersections::horizontal(const SkDQuad& quad, double left, double right, double y,
+ bool flipped) {
+ SkDLine line = {{{ left, y }, { right, y }}};
+ LineQuadraticIntersections q(quad, line, this);
+ return q.horizontalIntersect(y, left, right, flipped);
+}
+
+int SkIntersections::vertical(const SkDQuad& quad, double top, double bottom, double x,
+ bool flipped) {
+ SkDLine line = {{{ x, top }, { x, bottom }}};
+ LineQuadraticIntersections q(quad, line, this);
+ return q.verticalIntersect(x, top, bottom, flipped);
+}
+
+int SkIntersections::intersect(const SkDQuad& quad, const SkDLine& line) {
+ LineQuadraticIntersections q(quad, line, this);
+ q.allowNear(fAllowNear);
+ return q.intersect();
+}
+
+int SkIntersections::intersectRay(const SkDQuad& quad, const SkDLine& line) {
+ LineQuadraticIntersections q(quad, line, this);
+ fUsed = q.intersectRay(fT[0]);
+ for (int index = 0; index < fUsed; ++index) {
+ fPt[index] = quad.ptAtT(fT[0][index]);
+ }
+ return fUsed;
+}
+
+int SkIntersections::HorizontalIntercept(const SkDQuad& quad, SkScalar y, double* roots) {
+ LineQuadraticIntersections q(quad);
+ return q.horizontalIntersect(y, roots);
+}
+
+int SkIntersections::VerticalIntercept(const SkDQuad& quad, SkScalar x, double* roots) {
+ LineQuadraticIntersections q(quad);
+ return q.verticalIntersect(x, roots);
+}
+
+// SkDQuad accessors to Intersection utilities
+
+int SkDQuad::horizontalIntersect(double yIntercept, double roots[2]) const {
+ return SkIntersections::HorizontalIntercept(*this, yIntercept, roots);
+}
+
+int SkDQuad::verticalIntersect(double xIntercept, double roots[2]) const {
+ return SkIntersections::VerticalIntercept(*this, xIntercept, roots);
+}
diff --git a/gfx/skia/skia/src/pathops/SkIntersectionHelper.h b/gfx/skia/skia/src/pathops/SkIntersectionHelper.h
new file mode 100644
index 0000000000..9eb7cbf807
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkIntersectionHelper.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkIntersectionHelper_DEFINED
+#define SkIntersectionHelper_DEFINED
+
+#include "include/core/SkPath.h"
+#include "src/pathops/SkOpContour.h"
+#include "src/pathops/SkOpSegment.h"
+
+#ifdef SK_DEBUG
+#include "src/pathops/SkPathOpsPoint.h"
+#endif
+
+class SkIntersectionHelper {
+public:
+ enum SegmentType {
+ kHorizontalLine_Segment = -1,
+ kVerticalLine_Segment = 0,
+ kLine_Segment = SkPath::kLine_Verb,
+ kQuad_Segment = SkPath::kQuad_Verb,
+ kConic_Segment = SkPath::kConic_Verb,
+ kCubic_Segment = SkPath::kCubic_Verb,
+ };
+
+ bool advance() {
+ fSegment = fSegment->next();
+ return fSegment != nullptr;
+ }
+
+ SkScalar bottom() const {
+ return bounds().fBottom;
+ }
+
+ const SkPathOpsBounds& bounds() const {
+ return fSegment->bounds();
+ }
+
+ SkOpContour* contour() const {
+ return fSegment->contour();
+ }
+
+ void init(SkOpContour* contour) {
+ fSegment = contour->first();
+ }
+
+ SkScalar left() const {
+ return bounds().fLeft;
+ }
+
+ const SkPoint* pts() const {
+ return fSegment->pts();
+ }
+
+ SkScalar right() const {
+ return bounds().fRight;
+ }
+
+ SkOpSegment* segment() const {
+ return fSegment;
+ }
+
+ SegmentType segmentType() const {
+ SegmentType type = (SegmentType) fSegment->verb();
+ if (type != kLine_Segment) {
+ return type;
+ }
+ if (fSegment->isHorizontal()) {
+ return kHorizontalLine_Segment;
+ }
+ if (fSegment->isVertical()) {
+ return kVerticalLine_Segment;
+ }
+ return kLine_Segment;
+ }
+
+ bool startAfter(const SkIntersectionHelper& after) {
+ fSegment = after.fSegment->next();
+ return fSegment != nullptr;
+ }
+
+ SkScalar top() const {
+ return bounds().fTop;
+ }
+
+ SkScalar weight() const {
+ return fSegment->weight();
+ }
+
+ SkScalar x() const {
+ return bounds().fLeft;
+ }
+
+ bool xFlipped() const {
+ return x() != pts()[0].fX;
+ }
+
+ SkScalar y() const {
+ return bounds().fTop;
+ }
+
+ bool yFlipped() const {
+ return y() != pts()[0].fY;
+ }
+
+private:
+ SkOpSegment* fSegment;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkIntersections.cpp b/gfx/skia/skia/src/pathops/SkIntersections.cpp
new file mode 100644
index 0000000000..4e49ee21e8
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkIntersections.cpp
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pathops/SkIntersections.h"
+
+#include <string>
+
+int SkIntersections::closestTo(double rangeStart, double rangeEnd, const SkDPoint& testPt,
+ double* closestDist) const {
+ int closest = -1;
+ *closestDist = SK_ScalarMax;
+ for (int index = 0; index < fUsed; ++index) {
+ if (!between(rangeStart, fT[0][index], rangeEnd)) {
+ continue;
+ }
+ const SkDPoint& iPt = fPt[index];
+ double dist = testPt.distanceSquared(iPt);
+ if (*closestDist > dist) {
+ *closestDist = dist;
+ closest = index;
+ }
+ }
+ return closest;
+}
+
+void SkIntersections::flip() {
+ for (int index = 0; index < fUsed; ++index) {
+ fT[1][index] = 1 - fT[1][index];
+ }
+}
+
+int SkIntersections::insert(double one, double two, const SkDPoint& pt) {
+ if (fIsCoincident[0] == 3 && between(fT[0][0], one, fT[0][1])) {
+ // For now, don't allow a mix of coincident and non-coincident intersections
+ return -1;
+ }
+ SkASSERT(fUsed <= 1 || fT[0][0] <= fT[0][1]);
+ int index;
+ for (index = 0; index < fUsed; ++index) {
+ double oldOne = fT[0][index];
+ double oldTwo = fT[1][index];
+ if (one == oldOne && two == oldTwo) {
+ return -1;
+ }
+ if (more_roughly_equal(oldOne, one) && more_roughly_equal(oldTwo, two)) {
+ if ((!precisely_zero(one) || precisely_zero(oldOne))
+ && (!precisely_equal(one, 1) || precisely_equal(oldOne, 1))
+ && (!precisely_zero(two) || precisely_zero(oldTwo))
+ && (!precisely_equal(two, 1) || precisely_equal(oldTwo, 1))) {
+ return -1;
+ }
+ SkASSERT(one >= 0 && one <= 1);
+ SkASSERT(two >= 0 && two <= 1);
+ // remove this and reinsert below in case replacing would make list unsorted
+ int remaining = fUsed - index - 1;
+ memmove(&fPt[index], &fPt[index + 1], sizeof(fPt[0]) * remaining);
+ memmove(&fT[0][index], &fT[0][index + 1], sizeof(fT[0][0]) * remaining);
+ memmove(&fT[1][index], &fT[1][index + 1], sizeof(fT[1][0]) * remaining);
+ int clearMask = ~((1 << index) - 1);
+ fIsCoincident[0] -= (fIsCoincident[0] >> 1) & clearMask;
+ fIsCoincident[1] -= (fIsCoincident[1] >> 1) & clearMask;
+ --fUsed;
+ break;
+ }
+ #if ONE_OFF_DEBUG
+ if (pt.roughlyEqual(fPt[index])) {
+ SkDebugf("%s t=%1.9g pts roughly equal\n", __FUNCTION__, one);
+ }
+ #endif
+ }
+ for (index = 0; index < fUsed; ++index) {
+ if (fT[0][index] > one) {
+ break;
+ }
+ }
+ if (fUsed >= fMax) {
+ SkOPASSERT(0); // FIXME : this error, if it is to be handled at runtime in release, must
+ // be propagated all the way back down to the caller, and return failure.
+ fUsed = 0;
+ return 0;
+ }
+ int remaining = fUsed - index;
+ if (remaining > 0) {
+ memmove(&fPt[index + 1], &fPt[index], sizeof(fPt[0]) * remaining);
+ memmove(&fT[0][index + 1], &fT[0][index], sizeof(fT[0][0]) * remaining);
+ memmove(&fT[1][index + 1], &fT[1][index], sizeof(fT[1][0]) * remaining);
+ int clearMask = ~((1 << index) - 1);
+ fIsCoincident[0] += fIsCoincident[0] & clearMask;
+ fIsCoincident[1] += fIsCoincident[1] & clearMask;
+ }
+ fPt[index] = pt;
+ if (one < 0 || one > 1) {
+ return -1;
+ }
+ if (two < 0 || two > 1) {
+ return -1;
+ }
+ fT[0][index] = one;
+ fT[1][index] = two;
+ ++fUsed;
+ SkASSERT(fUsed <= std::size(fPt));
+ return index;
+}
+
+void SkIntersections::insertNear(double one, double two, const SkDPoint& pt1, const SkDPoint& pt2) {
+ SkASSERT(one == 0 || one == 1);
+ SkASSERT(two == 0 || two == 1);
+ SkASSERT(pt1 != pt2);
+ fNearlySame[one ? 1 : 0] = true;
+ (void) insert(one, two, pt1);
+ fPt2[one ? 1 : 0] = pt2;
+}
+
+int SkIntersections::insertCoincident(double one, double two, const SkDPoint& pt) {
+ int index = insertSwap(one, two, pt);
+ if (index >= 0) {
+ setCoincident(index);
+ }
+ return index;
+}
+
+void SkIntersections::setCoincident(int index) {
+ SkASSERT(index >= 0);
+ int bit = 1 << index;
+ fIsCoincident[0] |= bit;
+ fIsCoincident[1] |= bit;
+}
+
+void SkIntersections::merge(const SkIntersections& a, int aIndex, const SkIntersections& b,
+ int bIndex) {
+ this->reset();
+ fT[0][0] = a.fT[0][aIndex];
+ fT[1][0] = b.fT[0][bIndex];
+ fPt[0] = a.fPt[aIndex];
+ fPt2[0] = b.fPt[bIndex];
+ fUsed = 1;
+}
+
+int SkIntersections::mostOutside(double rangeStart, double rangeEnd, const SkDPoint& origin) const {
+ int result = -1;
+ for (int index = 0; index < fUsed; ++index) {
+ if (!between(rangeStart, fT[0][index], rangeEnd)) {
+ continue;
+ }
+ if (result < 0) {
+ result = index;
+ continue;
+ }
+ SkDVector best = fPt[result] - origin;
+ SkDVector test = fPt[index] - origin;
+ if (test.crossCheck(best) < 0) {
+ result = index;
+ }
+ }
+ return result;
+}
+
+void SkIntersections::removeOne(int index) {
+ int remaining = --fUsed - index;
+ if (remaining <= 0) {
+ return;
+ }
+ memmove(&fPt[index], &fPt[index + 1], sizeof(fPt[0]) * remaining);
+ memmove(&fT[0][index], &fT[0][index + 1], sizeof(fT[0][0]) * remaining);
+ memmove(&fT[1][index], &fT[1][index + 1], sizeof(fT[1][0]) * remaining);
+// SkASSERT(fIsCoincident[0] == 0);
+ int coBit = fIsCoincident[0] & (1 << index);
+ fIsCoincident[0] -= ((fIsCoincident[0] >> 1) & ~((1 << index) - 1)) + coBit;
+ SkASSERT(!(coBit ^ (fIsCoincident[1] & (1 << index))));
+ fIsCoincident[1] -= ((fIsCoincident[1] >> 1) & ~((1 << index) - 1)) + coBit;
+}
diff --git a/gfx/skia/skia/src/pathops/SkIntersections.h b/gfx/skia/skia/src/pathops/SkIntersections.h
new file mode 100644
index 0000000000..ff4c63debd
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkIntersections.h
@@ -0,0 +1,346 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkIntersections_DEFINE
+#define SkIntersections_DEFINE
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkMalloc.h"
+#include "src/pathops/SkPathOpsConic.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsDebug.h"
+#include "src/pathops/SkPathOpsLine.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkPathOpsQuad.h"
+#include "src/pathops/SkPathOpsTCurve.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+#include <array>
+#include <cstdint>
+
+struct SkDRect;
+
+class SkIntersections {
+public:
+ SkIntersections(SkDEBUGCODE(SkOpGlobalState* globalState = nullptr))
+ : fSwap(0)
+#ifdef SK_DEBUG
+ SkDEBUGPARAMS(fDebugGlobalState(globalState))
+ , fDepth(0)
+#endif
+ {
+ sk_bzero(fPt, sizeof(fPt));
+ sk_bzero(fPt2, sizeof(fPt2));
+ sk_bzero(fT, sizeof(fT));
+ sk_bzero(fNearlySame, sizeof(fNearlySame));
+#if DEBUG_T_SECT_LOOP_COUNT
+ sk_bzero(fDebugLoopCount, sizeof(fDebugLoopCount));
+#endif
+ reset();
+ fMax = 0; // require that the caller set the max
+ }
+
+ class TArray {
+ public:
+ explicit TArray(const double ts[10]) : fTArray(ts) {}
+ double operator[](int n) const {
+ return fTArray[n];
+ }
+ const double* fTArray;
+ };
+ TArray operator[](int n) const { return TArray(fT[n]); }
+
+ void allowNear(bool nearAllowed) {
+ fAllowNear = nearAllowed;
+ }
+
+ void clearCoincidence(int index) {
+ SkASSERT(index >= 0);
+ int bit = 1 << index;
+ fIsCoincident[0] &= ~bit;
+ fIsCoincident[1] &= ~bit;
+ }
+
+ int conicHorizontal(const SkPoint a[3], SkScalar weight, SkScalar left, SkScalar right,
+ SkScalar y, bool flipped) {
+ SkDConic conic;
+ conic.set(a, weight);
+ fMax = 2;
+ return horizontal(conic, left, right, y, flipped);
+ }
+
+ int conicVertical(const SkPoint a[3], SkScalar weight, SkScalar top, SkScalar bottom,
+ SkScalar x, bool flipped) {
+ SkDConic conic;
+ conic.set(a, weight);
+ fMax = 2;
+ return vertical(conic, top, bottom, x, flipped);
+ }
+
+ int conicLine(const SkPoint a[3], SkScalar weight, const SkPoint b[2]) {
+ SkDConic conic;
+ conic.set(a, weight);
+ SkDLine line;
+ line.set(b);
+ fMax = 3; // 2; permit small coincident segment + non-coincident intersection
+ return intersect(conic, line);
+ }
+
+ int cubicHorizontal(const SkPoint a[4], SkScalar left, SkScalar right, SkScalar y,
+ bool flipped) {
+ SkDCubic cubic;
+ cubic.set(a);
+ fMax = 3;
+ return horizontal(cubic, left, right, y, flipped);
+ }
+
+ int cubicVertical(const SkPoint a[4], SkScalar top, SkScalar bottom, SkScalar x, bool flipped) {
+ SkDCubic cubic;
+ cubic.set(a);
+ fMax = 3;
+ return vertical(cubic, top, bottom, x, flipped);
+ }
+
+ int cubicLine(const SkPoint a[4], const SkPoint b[2]) {
+ SkDCubic cubic;
+ cubic.set(a);
+ SkDLine line;
+ line.set(b);
+ fMax = 3;
+ return intersect(cubic, line);
+ }
+
+#ifdef SK_DEBUG
+ SkOpGlobalState* globalState() const { return fDebugGlobalState; }
+#endif
+
+ bool hasT(double t) const {
+ SkASSERT(t == 0 || t == 1);
+ return fUsed > 0 && (t == 0 ? fT[0][0] == 0 : fT[0][fUsed - 1] == 1);
+ }
+
+ bool hasOppT(double t) const {
+ SkASSERT(t == 0 || t == 1);
+ return fUsed > 0 && (fT[1][0] == t || fT[1][fUsed - 1] == t);
+ }
+
+ int insertSwap(double one, double two, const SkDPoint& pt) {
+ if (fSwap) {
+ return insert(two, one, pt);
+ } else {
+ return insert(one, two, pt);
+ }
+ }
+
+ bool isCoincident(int index) {
+ return (fIsCoincident[0] & 1 << index) != 0;
+ }
+
+ int lineHorizontal(const SkPoint a[2], SkScalar left, SkScalar right, SkScalar y,
+ bool flipped) {
+ SkDLine line;
+ line.set(a);
+ fMax = 2;
+ return horizontal(line, left, right, y, flipped);
+ }
+
+ int lineVertical(const SkPoint a[2], SkScalar top, SkScalar bottom, SkScalar x, bool flipped) {
+ SkDLine line;
+ line.set(a);
+ fMax = 2;
+ return vertical(line, top, bottom, x, flipped);
+ }
+
+ int lineLine(const SkPoint a[2], const SkPoint b[2]) {
+ SkDLine aLine, bLine;
+ aLine.set(a);
+ bLine.set(b);
+ fMax = 2;
+ return intersect(aLine, bLine);
+ }
+
+ bool nearlySame(int index) const {
+ SkASSERT(index == 0 || index == 1);
+ return fNearlySame[index];
+ }
+
+ const SkDPoint& pt(int index) const {
+ return fPt[index];
+ }
+
+ const SkDPoint& pt2(int index) const {
+ return fPt2[index];
+ }
+
+ int quadHorizontal(const SkPoint a[3], SkScalar left, SkScalar right, SkScalar y,
+ bool flipped) {
+ SkDQuad quad;
+ quad.set(a);
+ fMax = 2;
+ return horizontal(quad, left, right, y, flipped);
+ }
+
+ int quadVertical(const SkPoint a[3], SkScalar top, SkScalar bottom, SkScalar x, bool flipped) {
+ SkDQuad quad;
+ quad.set(a);
+ fMax = 2;
+ return vertical(quad, top, bottom, x, flipped);
+ }
+
+ int quadLine(const SkPoint a[3], const SkPoint b[2]) {
+ SkDQuad quad;
+ quad.set(a);
+ SkDLine line;
+ line.set(b);
+ return intersect(quad, line);
+ }
+
+ // leaves swap, max alone
+ void reset() {
+ fAllowNear = true;
+ fUsed = 0;
+ sk_bzero(fIsCoincident, sizeof(fIsCoincident));
+ }
+
+ void set(bool swap, int tIndex, double t) {
+ fT[(int) swap][tIndex] = t;
+ }
+
+ void setMax(int max) {
+ SkASSERT(max <= (int) std::size(fPt));
+ fMax = max;
+ }
+
+ void swap() {
+ fSwap ^= true;
+ }
+
+ bool swapped() const {
+ return fSwap;
+ }
+
+ int used() const {
+ return fUsed;
+ }
+
+ void downDepth() {
+ SkASSERT(--fDepth >= 0);
+ }
+
+ bool unBumpT(int index) {
+ SkASSERT(fUsed == 1);
+ fT[0][index] = fT[0][index] * (1 + BUMP_EPSILON * 2) - BUMP_EPSILON;
+ if (!between(0, fT[0][index], 1)) {
+ fUsed = 0;
+ return false;
+ }
+ return true;
+ }
+
+ void upDepth() {
+ SkASSERT(++fDepth < 16);
+ }
+
+ void alignQuadPts(const SkPoint a[3], const SkPoint b[3]);
+ int cleanUpCoincidence();
+ int closestTo(double rangeStart, double rangeEnd, const SkDPoint& testPt, double* dist) const;
+ void cubicInsert(double one, double two, const SkDPoint& pt, const SkDCubic& c1,
+ const SkDCubic& c2);
+ void flip();
+ int horizontal(const SkDLine&, double left, double right, double y, bool flipped);
+ int horizontal(const SkDQuad&, double left, double right, double y, bool flipped);
+ int horizontal(const SkDQuad&, double left, double right, double y, double tRange[2]);
+ int horizontal(const SkDCubic&, double y, double tRange[3]);
+ int horizontal(const SkDConic&, double left, double right, double y, bool flipped);
+ int horizontal(const SkDCubic&, double left, double right, double y, bool flipped);
+ int horizontal(const SkDCubic&, double left, double right, double y, double tRange[3]);
+ static double HorizontalIntercept(const SkDLine& line, double y);
+ static int HorizontalIntercept(const SkDQuad& quad, SkScalar y, double* roots);
+ static int HorizontalIntercept(const SkDConic& conic, SkScalar y, double* roots);
+ // FIXME : does not respect swap
+ int insert(double one, double two, const SkDPoint& pt);
+ void insertNear(double one, double two, const SkDPoint& pt1, const SkDPoint& pt2);
+ // start if index == 0 : end if index == 1
+ int insertCoincident(double one, double two, const SkDPoint& pt);
+ int intersect(const SkDLine&, const SkDLine&);
+ int intersect(const SkDQuad&, const SkDLine&);
+ int intersect(const SkDQuad&, const SkDQuad&);
+ int intersect(const SkDConic&, const SkDLine&);
+ int intersect(const SkDConic&, const SkDQuad&);
+ int intersect(const SkDConic&, const SkDConic&);
+ int intersect(const SkDCubic&, const SkDLine&);
+ int intersect(const SkDCubic&, const SkDQuad&);
+ int intersect(const SkDCubic&, const SkDConic&);
+ int intersect(const SkDCubic&, const SkDCubic&);
+ int intersectRay(const SkDLine&, const SkDLine&);
+ int intersectRay(const SkDQuad&, const SkDLine&);
+ int intersectRay(const SkDConic&, const SkDLine&);
+ int intersectRay(const SkDCubic&, const SkDLine&);
+ int intersectRay(const SkTCurve& tCurve, const SkDLine& line) {
+ return tCurve.intersectRay(this, line);
+ }
+
+ void merge(const SkIntersections& , int , const SkIntersections& , int );
+ int mostOutside(double rangeStart, double rangeEnd, const SkDPoint& origin) const;
+ void removeOne(int index);
+ void setCoincident(int index);
+ int vertical(const SkDLine&, double top, double bottom, double x, bool flipped);
+ int vertical(const SkDQuad&, double top, double bottom, double x, bool flipped);
+ int vertical(const SkDConic&, double top, double bottom, double x, bool flipped);
+ int vertical(const SkDCubic&, double top, double bottom, double x, bool flipped);
+ static double VerticalIntercept(const SkDLine& line, double x);
+ static int VerticalIntercept(const SkDQuad& quad, SkScalar x, double* roots);
+ static int VerticalIntercept(const SkDConic& conic, SkScalar x, double* roots);
+
+ int depth() const {
+#ifdef SK_DEBUG
+ return fDepth;
+#else
+ return 0;
+#endif
+ }
+
+ enum DebugLoop {
+ kIterations_DebugLoop,
+ kCoinCheck_DebugLoop,
+ kComputePerp_DebugLoop,
+ };
+
+ void debugBumpLoopCount(DebugLoop );
+ int debugCoincidentUsed() const;
+ int debugLoopCount(DebugLoop ) const;
+ void debugResetLoopCount();
+ void dump() const; // implemented for testing only
+
+private:
+ bool cubicCheckCoincidence(const SkDCubic& c1, const SkDCubic& c2);
+ bool cubicExactEnd(const SkDCubic& cubic1, bool start, const SkDCubic& cubic2);
+ void cubicNearEnd(const SkDCubic& cubic1, bool start, const SkDCubic& cubic2, const SkDRect& );
+ void cleanUpParallelLines(bool parallel);
+ void computePoints(const SkDLine& line, int used);
+
+ SkDPoint fPt[13]; // FIXME: since scans store points as SkPoint, this should also
+ SkDPoint fPt2[2]; // used by nearly same to store alternate intersection point
+ double fT[2][13];
+ uint16_t fIsCoincident[2]; // bit set for each curve's coincident T
+ bool fNearlySame[2]; // true if end points nearly match
+ unsigned char fUsed;
+ unsigned char fMax;
+ bool fAllowNear;
+ bool fSwap;
+#ifdef SK_DEBUG
+ SkOpGlobalState* fDebugGlobalState;
+ int fDepth;
+#endif
+#if DEBUG_T_SECT_LOOP_COUNT
+ int fDebugLoopCount[3];
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkLineParameters.h b/gfx/skia/skia/src/pathops/SkLineParameters.h
new file mode 100644
index 0000000000..45d1ed4ed6
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkLineParameters.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLineParameters_DEFINED
+#define SkLineParameters_DEFINED
+
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsLine.h"
+#include "src/pathops/SkPathOpsQuad.h"
+
+// Sources
+// computer-aided design - volume 22 number 9 november 1990 pp 538 - 549
+// online at http://cagd.cs.byu.edu/~tom/papers/bezclip.pdf
+
+// This turns a line segment into a parameterized line, of the form
+// ax + by + c = 0
+// When a^2 + b^2 == 1, the line is normalized.
+// The distance to the line for (x, y) is d(x,y) = ax + by + c
+//
+// Note that the distances below are not necessarily normalized. To get the true
+// distance, it's necessary to either call normalize() after xxxEndPoints(), or
+// divide the result of xxxDistance() by sqrt(normalSquared())
+
+class SkLineParameters {
+public:
+
+ bool cubicEndPoints(const SkDCubic& pts) {
+ int endIndex = 1;
+ cubicEndPoints(pts, 0, endIndex);
+ if (dy() != 0) {
+ return true;
+ }
+ if (dx() == 0) {
+ cubicEndPoints(pts, 0, ++endIndex);
+ SkASSERT(endIndex == 2);
+ if (dy() != 0) {
+ return true;
+ }
+ if (dx() == 0) {
+ cubicEndPoints(pts, 0, ++endIndex); // line
+ SkASSERT(endIndex == 3);
+ return false;
+ }
+ }
+ // FIXME: after switching to round sort, remove bumping fA
+ if (dx() < 0) { // only worry about y bias when breaking cw/ccw tie
+ return true;
+ }
+ // if cubic tangent is on x axis, look at next control point to break tie
+ // control point may be approximate, so it must move significantly to account for error
+ if (NotAlmostEqualUlps(pts[0].fY, pts[++endIndex].fY)) {
+ if (pts[0].fY > pts[endIndex].fY) {
+ fA = DBL_EPSILON; // push it from 0 to slightly negative (y() returns -a)
+ }
+ return true;
+ }
+ if (endIndex == 3) {
+ return true;
+ }
+ SkASSERT(endIndex == 2);
+ if (pts[0].fY > pts[3].fY) {
+ fA = DBL_EPSILON; // push it from 0 to slightly negative (y() returns -a)
+ }
+ return true;
+ }
+
+ void cubicEndPoints(const SkDCubic& pts, int s, int e) {
+ fA = pts[s].fY - pts[e].fY;
+ fB = pts[e].fX - pts[s].fX;
+ fC = pts[s].fX * pts[e].fY - pts[e].fX * pts[s].fY;
+ }
+
+ double cubicPart(const SkDCubic& part) {
+ cubicEndPoints(part);
+ if (part[0] == part[1] || ((const SkDLine& ) part[0]).nearRay(part[2])) {
+ return pointDistance(part[3]);
+ }
+ return pointDistance(part[2]);
+ }
+
+ void lineEndPoints(const SkDLine& pts) {
+ fA = pts[0].fY - pts[1].fY;
+ fB = pts[1].fX - pts[0].fX;
+ fC = pts[0].fX * pts[1].fY - pts[1].fX * pts[0].fY;
+ }
+
+ bool quadEndPoints(const SkDQuad& pts) {
+ quadEndPoints(pts, 0, 1);
+ if (dy() != 0) {
+ return true;
+ }
+ if (dx() == 0) {
+ quadEndPoints(pts, 0, 2);
+ return false;
+ }
+ if (dx() < 0) { // only worry about y bias when breaking cw/ccw tie
+ return true;
+ }
+ // FIXME: after switching to round sort, remove this
+ if (pts[0].fY > pts[2].fY) {
+ fA = DBL_EPSILON;
+ }
+ return true;
+ }
+
+ void quadEndPoints(const SkDQuad& pts, int s, int e) {
+ fA = pts[s].fY - pts[e].fY;
+ fB = pts[e].fX - pts[s].fX;
+ fC = pts[s].fX * pts[e].fY - pts[e].fX * pts[s].fY;
+ }
+
+ double quadPart(const SkDQuad& part) {
+ quadEndPoints(part);
+ return pointDistance(part[2]);
+ }
+
+ double normalSquared() const {
+ return fA * fA + fB * fB;
+ }
+
+ bool normalize() {
+ double normal = sqrt(normalSquared());
+ if (approximately_zero(normal)) {
+ fA = fB = fC = 0;
+ return false;
+ }
+ double reciprocal = 1 / normal;
+ fA *= reciprocal;
+ fB *= reciprocal;
+ fC *= reciprocal;
+ return true;
+ }
+
+ void cubicDistanceY(const SkDCubic& pts, SkDCubic& distance) const {
+ double oneThird = 1 / 3.0;
+ for (int index = 0; index < 4; ++index) {
+ distance[index].fX = index * oneThird;
+ distance[index].fY = fA * pts[index].fX + fB * pts[index].fY + fC;
+ }
+ }
+
+ void quadDistanceY(const SkDQuad& pts, SkDQuad& distance) const {
+ double oneHalf = 1 / 2.0;
+ for (int index = 0; index < 3; ++index) {
+ distance[index].fX = index * oneHalf;
+ distance[index].fY = fA * pts[index].fX + fB * pts[index].fY + fC;
+ }
+ }
+
+ double controlPtDistance(const SkDCubic& pts, int index) const {
+ SkASSERT(index == 1 || index == 2);
+ return fA * pts[index].fX + fB * pts[index].fY + fC;
+ }
+
+ double controlPtDistance(const SkDQuad& pts) const {
+ return fA * pts[1].fX + fB * pts[1].fY + fC;
+ }
+
+ double pointDistance(const SkDPoint& pt) const {
+ return fA * pt.fX + fB * pt.fY + fC;
+ }
+
+ double dx() const {
+ return fB;
+ }
+
+ double dy() const {
+ return -fA;
+ }
+
+private:
+ double fA;
+ double fB;
+ double fC;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkOpAngle.cpp b/gfx/skia/skia/src/pathops/SkOpAngle.cpp
new file mode 100644
index 0000000000..d36b3ec7fc
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpAngle.cpp
@@ -0,0 +1,1156 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkOpAngle.h"
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/base/SkTSort.h"
+#include "src/pathops/SkIntersections.h"
+#include "src/pathops/SkOpSegment.h"
+#include "src/pathops/SkOpSpan.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsCurve.h"
+#include "src/pathops/SkPathOpsLine.h"
+#include "src/pathops/SkPathOpsPoint.h"
+
+#include <algorithm>
+#include <cmath>
+
+/* Angles are sorted counterclockwise. The smallest angle has a positive x and the smallest
+ positive y. The largest angle has a positive x and a zero y. */
+
+#if DEBUG_ANGLE
+ static bool CompareResult(const char* func, SkString* bugOut, SkString* bugPart, int append,
+ bool compare) {
+ SkDebugf("%s %c %d\n", bugOut->c_str(), compare ? 'T' : 'F', append);
+ SkDebugf("%sPart %s\n", func, bugPart[0].c_str());
+ SkDebugf("%sPart %s\n", func, bugPart[1].c_str());
+ SkDebugf("%sPart %s\n", func, bugPart[2].c_str());
+ return compare;
+ }
+
+ #define COMPARE_RESULT(append, compare) CompareResult(__FUNCTION__, &bugOut, bugPart, append, \
+ compare)
+#else
+ #define COMPARE_RESULT(append, compare) compare
+#endif
+
+/* quarter angle values for sector
+
+31 x > 0, y == 0 horizontal line (to the right)
+0 x > 0, y == epsilon quad/cubic horizontal tangent eventually going +y
+1 x > 0, y > 0, x > y nearer horizontal angle
+2 x + e == y quad/cubic 45 going horiz
+3 x > 0, y > 0, x == y 45 angle
+4 x == y + e quad/cubic 45 going vert
+5 x > 0, y > 0, x < y nearer vertical angle
+6 x == epsilon, y > 0 quad/cubic vertical tangent eventually going +x
+7 x == 0, y > 0 vertical line (to the top)
+
+ 8 7 6
+ 9 | 5
+ 10 | 4
+ 11 | 3
+ 12 \ | / 2
+ 13 | 1
+ 14 | 0
+ 15 --------------+------------- 31
+ 16 | 30
+ 17 | 29
+ 18 / | \ 28
+ 19 | 27
+ 20 | 26
+ 21 | 25
+ 22 23 24
+*/
+
+// return true if lh < this < rh
+bool SkOpAngle::after(SkOpAngle* test) {
+ SkOpAngle* lh = test;
+ SkOpAngle* rh = lh->fNext;
+ SkASSERT(lh != rh);
+ fPart.fCurve = fOriginalCurvePart;
+ // Adjust lh and rh to share the same origin (floating point error in intersections can mean
+ // they aren't exactly the same).
+ lh->fPart.fCurve = lh->fOriginalCurvePart;
+ lh->fPart.fCurve[0] = fPart.fCurve[0];
+ rh->fPart.fCurve = rh->fOriginalCurvePart;
+ rh->fPart.fCurve[0] = fPart.fCurve[0];
+
+#if DEBUG_ANGLE
+ SkString bugOut;
+ bugOut.printf("%s [%d/%d] %d/%d tStart=%1.9g tEnd=%1.9g"
+ " < [%d/%d] %d/%d tStart=%1.9g tEnd=%1.9g"
+ " < [%d/%d] %d/%d tStart=%1.9g tEnd=%1.9g ", __FUNCTION__,
+ lh->segment()->debugID(), lh->debugID(), lh->fSectorStart, lh->fSectorEnd,
+ lh->fStart->t(), lh->fEnd->t(),
+ segment()->debugID(), debugID(), fSectorStart, fSectorEnd, fStart->t(), fEnd->t(),
+ rh->segment()->debugID(), rh->debugID(), rh->fSectorStart, rh->fSectorEnd,
+ rh->fStart->t(), rh->fEnd->t());
+ SkString bugPart[3] = { lh->debugPart(), this->debugPart(), rh->debugPart() };
+#endif
+ if (lh->fComputeSector && !lh->computeSector()) {
+ return COMPARE_RESULT(1, true);
+ }
+ if (fComputeSector && !this->computeSector()) {
+ return COMPARE_RESULT(2, true);
+ }
+ if (rh->fComputeSector && !rh->computeSector()) {
+ return COMPARE_RESULT(3, true);
+ }
+#if DEBUG_ANGLE // reset bugOut with computed sectors
+ bugOut.printf("%s [%d/%d] %d/%d tStart=%1.9g tEnd=%1.9g"
+ " < [%d/%d] %d/%d tStart=%1.9g tEnd=%1.9g"
+ " < [%d/%d] %d/%d tStart=%1.9g tEnd=%1.9g ", __FUNCTION__,
+ lh->segment()->debugID(), lh->debugID(), lh->fSectorStart, lh->fSectorEnd,
+ lh->fStart->t(), lh->fEnd->t(),
+ segment()->debugID(), debugID(), fSectorStart, fSectorEnd, fStart->t(), fEnd->t(),
+ rh->segment()->debugID(), rh->debugID(), rh->fSectorStart, rh->fSectorEnd,
+ rh->fStart->t(), rh->fEnd->t());
+#endif
+ bool ltrOverlap = (lh->fSectorMask | rh->fSectorMask) & fSectorMask;
+ bool lrOverlap = lh->fSectorMask & rh->fSectorMask;
+ int lrOrder; // set to -1 if either order works
+ if (!lrOverlap) { // no lh/rh sector overlap
+ if (!ltrOverlap) { // no lh/this/rh sector overlap
+ return COMPARE_RESULT(4, (lh->fSectorEnd > rh->fSectorStart)
+ ^ (fSectorStart > lh->fSectorEnd) ^ (fSectorStart > rh->fSectorStart));
+ }
+ int lrGap = (rh->fSectorStart - lh->fSectorStart + 32) & 0x1f;
+ /* A tiny change can move the start +/- 4. The order can only be determined if
+ lr gap is not 12 to 20 or -12 to -20.
+ -31 ..-21 1
+ -20 ..-12 -1
+ -11 .. -1 0
+ 0 shouldn't get here
+ 11 .. 1 1
+ 12 .. 20 -1
+ 21 .. 31 0
+ */
+ lrOrder = lrGap > 20 ? 0 : lrGap > 11 ? -1 : 1;
+ } else {
+ lrOrder = lh->orderable(rh);
+ if (!ltrOverlap && lrOrder >= 0) {
+ return COMPARE_RESULT(5, !lrOrder);
+ }
+ }
+ int ltOrder;
+ SkASSERT((lh->fSectorMask & fSectorMask) || (rh->fSectorMask & fSectorMask) || -1 == lrOrder);
+ if (lh->fSectorMask & fSectorMask) {
+ ltOrder = lh->orderable(this);
+ } else {
+ int ltGap = (fSectorStart - lh->fSectorStart + 32) & 0x1f;
+ ltOrder = ltGap > 20 ? 0 : ltGap > 11 ? -1 : 1;
+ }
+ int trOrder;
+ if (rh->fSectorMask & fSectorMask) {
+ trOrder = this->orderable(rh);
+ } else {
+ int trGap = (rh->fSectorStart - fSectorStart + 32) & 0x1f;
+ trOrder = trGap > 20 ? 0 : trGap > 11 ? -1 : 1;
+ }
+ this->alignmentSameSide(lh, &ltOrder);
+ this->alignmentSameSide(rh, &trOrder);
+ if (lrOrder >= 0 && ltOrder >= 0 && trOrder >= 0) {
+ return COMPARE_RESULT(7, lrOrder ? (ltOrder & trOrder) : (ltOrder | trOrder));
+ }
+// SkASSERT(lrOrder >= 0 || ltOrder >= 0 || trOrder >= 0);
+// There's not enough information to sort. Get the pairs of angles in opposite planes.
+// If an order is < 0, the pair is already in an opposite plane. Check the remaining pairs.
+ // FIXME : once all variants are understood, rewrite this more simply
+ if (ltOrder == 0 && lrOrder == 0) {
+ SkASSERT(trOrder < 0);
+ // FIXME : once this is verified to work, remove one opposite angle call
+ SkDEBUGCODE(bool lrOpposite = lh->oppositePlanes(rh));
+ bool ltOpposite = lh->oppositePlanes(this);
+ SkOPASSERT(lrOpposite != ltOpposite);
+ return COMPARE_RESULT(8, ltOpposite);
+ } else if (ltOrder == 1 && trOrder == 0) {
+ SkASSERT(lrOrder < 0);
+ bool trOpposite = oppositePlanes(rh);
+ return COMPARE_RESULT(9, trOpposite);
+ } else if (lrOrder == 1 && trOrder == 1) {
+ SkASSERT(ltOrder < 0);
+// SkDEBUGCODE(bool trOpposite = oppositePlanes(rh));
+ bool lrOpposite = lh->oppositePlanes(rh);
+// SkASSERT(lrOpposite != trOpposite);
+ return COMPARE_RESULT(10, lrOpposite);
+ }
+ // If a pair couldn't be ordered, there's not enough information to determine the sort.
+ // Refer to: https://docs.google.com/drawings/d/1KV-8SJTedku9fj4K6fd1SB-8divuV_uivHVsSgwXICQ
+ if (fUnorderable || lh->fUnorderable || rh->fUnorderable) {
+ // limit to lines; should work with curves, but wait for a failing test to verify
+ if (!fPart.isCurve() && !lh->fPart.isCurve() && !rh->fPart.isCurve()) {
+ // see if original raw data is orderable
+ // if two share a point, check if third has both points in same half plane
+ int ltShare = lh->fOriginalCurvePart[0] == fOriginalCurvePart[0];
+ int lrShare = lh->fOriginalCurvePart[0] == rh->fOriginalCurvePart[0];
+ int trShare = fOriginalCurvePart[0] == rh->fOriginalCurvePart[0];
+ // if only one pair are the same, the third point touches neither of the pair
+ if (ltShare + lrShare + trShare == 1) {
+ if (lrShare) {
+ int ltOOrder = lh->linesOnOriginalSide(this);
+ int rtOOrder = rh->linesOnOriginalSide(this);
+ if ((rtOOrder ^ ltOOrder) == 1) {
+ return ltOOrder;
+ }
+ } else if (trShare) {
+ int tlOOrder = this->linesOnOriginalSide(lh);
+ int rlOOrder = rh->linesOnOriginalSide(lh);
+ if ((tlOOrder ^ rlOOrder) == 1) {
+ return rlOOrder;
+ }
+ } else {
+ SkASSERT(ltShare);
+ int trOOrder = rh->linesOnOriginalSide(this);
+ int lrOOrder = lh->linesOnOriginalSide(rh);
+ // result must be 0 and 1 or 1 and 0 to be valid
+ if ((lrOOrder ^ trOOrder) == 1) {
+ return trOOrder;
+ }
+ }
+ }
+ }
+ }
+ if (lrOrder < 0) {
+ if (ltOrder < 0) {
+ return COMPARE_RESULT(11, trOrder);
+ }
+ return COMPARE_RESULT(12, ltOrder);
+ }
+ return COMPARE_RESULT(13, !lrOrder);
+}
+
+int SkOpAngle::lineOnOneSide(const SkDPoint& origin, const SkDVector& line, const SkOpAngle* test,
+ bool useOriginal) const {
+ double crosses[3];
+ SkPath::Verb testVerb = test->segment()->verb();
+ int iMax = SkPathOpsVerbToPoints(testVerb);
+// SkASSERT(origin == test.fCurveHalf[0]);
+ const SkDCurve& testCurve = useOriginal ? test->fOriginalCurvePart : test->fPart.fCurve;
+ for (int index = 1; index <= iMax; ++index) {
+ double xy1 = line.fX * (testCurve[index].fY - origin.fY);
+ double xy2 = line.fY * (testCurve[index].fX - origin.fX);
+ crosses[index - 1] = AlmostBequalUlps(xy1, xy2) ? 0 : xy1 - xy2;
+ }
+ if (crosses[0] * crosses[1] < 0) {
+ return -1;
+ }
+ if (SkPath::kCubic_Verb == testVerb) {
+ if (crosses[0] * crosses[2] < 0 || crosses[1] * crosses[2] < 0) {
+ return -1;
+ }
+ }
+ if (crosses[0]) {
+ return crosses[0] < 0;
+ }
+ if (crosses[1]) {
+ return crosses[1] < 0;
+ }
+ if (SkPath::kCubic_Verb == testVerb && crosses[2]) {
+ return crosses[2] < 0;
+ }
+ return -2;
+}
+
+// given a line, see if the opposite curve's convex hull is all on one side
+// returns -1=not on one side 0=this CW of test 1=this CCW of test
+int SkOpAngle::lineOnOneSide(const SkOpAngle* test, bool useOriginal) {
+ SkASSERT(!fPart.isCurve());
+ SkASSERT(test->fPart.isCurve());
+ SkDPoint origin = fPart.fCurve[0];
+ SkDVector line = fPart.fCurve[1] - origin;
+ int result = this->lineOnOneSide(origin, line, test, useOriginal);
+ if (-2 == result) {
+ fUnorderable = true;
+ result = -1;
+ }
+ return result;
+}
+
+// experiment works only with lines for now
+int SkOpAngle::linesOnOriginalSide(const SkOpAngle* test) {
+ SkASSERT(!fPart.isCurve());
+ SkASSERT(!test->fPart.isCurve());
+ SkDPoint origin = fOriginalCurvePart[0];
+ SkDVector line = fOriginalCurvePart[1] - origin;
+ double dots[2];
+ double crosses[2];
+ const SkDCurve& testCurve = test->fOriginalCurvePart;
+ for (int index = 0; index < 2; ++index) {
+ SkDVector testLine = testCurve[index] - origin;
+ double xy1 = line.fX * testLine.fY;
+ double xy2 = line.fY * testLine.fX;
+ dots[index] = line.fX * testLine.fX + line.fY * testLine.fY;
+ crosses[index] = AlmostBequalUlps(xy1, xy2) ? 0 : xy1 - xy2;
+ }
+ if (crosses[0] * crosses[1] < 0) {
+ return -1;
+ }
+ if (crosses[0]) {
+ return crosses[0] < 0;
+ }
+ if (crosses[1]) {
+ return crosses[1] < 0;
+ }
+ if ((!dots[0] && dots[1] < 0) || (dots[0] < 0 && !dots[1])) {
+ return 2; // 180 degrees apart
+ }
+ fUnorderable = true;
+ return -1;
+}
+
+// To sort the angles, all curves are translated to have the same starting point.
+// If the curve's control point in its original position is on one side of a compared line,
+// and translated is on the opposite side, reverse the previously computed order.
+void SkOpAngle::alignmentSameSide(const SkOpAngle* test, int* order) const {
+ if (*order < 0) {
+ return;
+ }
+ if (fPart.isCurve()) {
+ // This should support all curve types, but only bug that requires this has lines
+ // Turning on for curves causes existing tests to fail
+ return;
+ }
+ if (test->fPart.isCurve()) {
+ return;
+ }
+ const SkDPoint& xOrigin = test->fPart.fCurve.fLine[0];
+ const SkDPoint& oOrigin = test->fOriginalCurvePart.fLine[0];
+ if (xOrigin == oOrigin) {
+ return;
+ }
+ int iMax = SkPathOpsVerbToPoints(this->segment()->verb());
+ SkDVector xLine = test->fPart.fCurve.fLine[1] - xOrigin;
+ SkDVector oLine = test->fOriginalCurvePart.fLine[1] - oOrigin;
+ for (int index = 1; index <= iMax; ++index) {
+ const SkDPoint& testPt = fPart.fCurve[index];
+ double xCross = oLine.crossCheck(testPt - xOrigin);
+ double oCross = xLine.crossCheck(testPt - oOrigin);
+ if (oCross * xCross < 0) {
+ *order ^= 1;
+ break;
+ }
+ }
+}
+
+bool SkOpAngle::checkCrossesZero() const {
+ int start = std::min(fSectorStart, fSectorEnd);
+ int end = std::max(fSectorStart, fSectorEnd);
+ bool crossesZero = end - start > 16;
+ return crossesZero;
+}
+
+bool SkOpAngle::checkParallel(SkOpAngle* rh) {
+ SkDVector scratch[2];
+ const SkDVector* sweep, * tweep;
+ if (this->fPart.isOrdered()) {
+ sweep = this->fPart.fSweep;
+ } else {
+ scratch[0] = this->fPart.fCurve[1] - this->fPart.fCurve[0];
+ sweep = &scratch[0];
+ }
+ if (rh->fPart.isOrdered()) {
+ tweep = rh->fPart.fSweep;
+ } else {
+ scratch[1] = rh->fPart.fCurve[1] - rh->fPart.fCurve[0];
+ tweep = &scratch[1];
+ }
+ double s0xt0 = sweep->crossCheck(*tweep);
+ if (tangentsDiverge(rh, s0xt0)) {
+ return s0xt0 < 0;
+ }
+ // compute the perpendicular to the endpoints and see where it intersects the opposite curve
+ // if the intersections within the t range, do a cross check on those
+ bool inside;
+ if (!fEnd->contains(rh->fEnd)) {
+ if (this->endToSide(rh, &inside)) {
+ return inside;
+ }
+ if (rh->endToSide(this, &inside)) {
+ return !inside;
+ }
+ }
+ if (this->midToSide(rh, &inside)) {
+ return inside;
+ }
+ if (rh->midToSide(this, &inside)) {
+ return !inside;
+ }
+ // compute the cross check from the mid T values (last resort)
+ SkDVector m0 = segment()->dPtAtT(this->midT()) - this->fPart.fCurve[0];
+ SkDVector m1 = rh->segment()->dPtAtT(rh->midT()) - rh->fPart.fCurve[0];
+ double m0xm1 = m0.crossCheck(m1);
+ if (m0xm1 == 0) {
+ this->fUnorderable = true;
+ rh->fUnorderable = true;
+ return true;
+ }
+ return m0xm1 < 0;
+}
+
+// the original angle is too short to get meaningful sector information
+// lengthen it until it is long enough to be meaningful or leave it unset if lengthening it
+// would cause it to intersect one of the adjacent angles
+bool SkOpAngle::computeSector() {
+ if (fComputedSector) {
+ return !fUnorderable;
+ }
+ fComputedSector = true;
+ bool stepUp = fStart->t() < fEnd->t();
+ SkOpSpanBase* checkEnd = fEnd;
+ if (checkEnd->final() && stepUp) {
+ fUnorderable = true;
+ return false;
+ }
+ do {
+// advance end
+ const SkOpSegment* other = checkEnd->segment();
+ const SkOpSpanBase* oSpan = other->head();
+ do {
+ if (oSpan->segment() != segment()) {
+ continue;
+ }
+ if (oSpan == checkEnd) {
+ continue;
+ }
+ if (!approximately_equal(oSpan->t(), checkEnd->t())) {
+ continue;
+ }
+ goto recomputeSector;
+ } while (!oSpan->final() && (oSpan = oSpan->upCast()->next()));
+ checkEnd = stepUp ? !checkEnd->final()
+ ? checkEnd->upCast()->next() : nullptr
+ : checkEnd->prev();
+ } while (checkEnd);
+recomputeSector:
+ SkOpSpanBase* computedEnd = stepUp ? checkEnd ? checkEnd->prev() : fEnd->segment()->head()
+ : checkEnd ? checkEnd->upCast()->next() : fEnd->segment()->tail();
+ if (checkEnd == fEnd || computedEnd == fEnd || computedEnd == fStart) {
+ fUnorderable = true;
+ return false;
+ }
+ if (stepUp != (fStart->t() < computedEnd->t())) {
+ fUnorderable = true;
+ return false;
+ }
+ SkOpSpanBase* saveEnd = fEnd;
+ fComputedEnd = fEnd = computedEnd;
+ setSpans();
+ setSector();
+ fEnd = saveEnd;
+ return !fUnorderable;
+}
+
+int SkOpAngle::convexHullOverlaps(const SkOpAngle* rh) {
+ const SkDVector* sweep = this->fPart.fSweep;
+ const SkDVector* tweep = rh->fPart.fSweep;
+ double s0xs1 = sweep[0].crossCheck(sweep[1]);
+ double s0xt0 = sweep[0].crossCheck(tweep[0]);
+ double s1xt0 = sweep[1].crossCheck(tweep[0]);
+ bool tBetweenS = s0xs1 > 0 ? s0xt0 > 0 && s1xt0 < 0 : s0xt0 < 0 && s1xt0 > 0;
+ double s0xt1 = sweep[0].crossCheck(tweep[1]);
+ double s1xt1 = sweep[1].crossCheck(tweep[1]);
+ tBetweenS |= s0xs1 > 0 ? s0xt1 > 0 && s1xt1 < 0 : s0xt1 < 0 && s1xt1 > 0;
+ double t0xt1 = tweep[0].crossCheck(tweep[1]);
+ if (tBetweenS) {
+ return -1;
+ }
+ if ((s0xt0 == 0 && s1xt1 == 0) || (s1xt0 == 0 && s0xt1 == 0)) { // s0 to s1 equals t0 to t1
+ return -1;
+ }
+ bool sBetweenT = t0xt1 > 0 ? s0xt0 < 0 && s0xt1 > 0 : s0xt0 > 0 && s0xt1 < 0;
+ sBetweenT |= t0xt1 > 0 ? s1xt0 < 0 && s1xt1 > 0 : s1xt0 > 0 && s1xt1 < 0;
+ if (sBetweenT) {
+ return -1;
+ }
+ // if all of the sweeps are in the same half plane, then the order of any pair is enough
+ if (s0xt0 >= 0 && s0xt1 >= 0 && s1xt0 >= 0 && s1xt1 >= 0) {
+ return 0;
+ }
+ if (s0xt0 <= 0 && s0xt1 <= 0 && s1xt0 <= 0 && s1xt1 <= 0) {
+ return 1;
+ }
+ // if the outside sweeps are greater than 180 degress:
+ // first assume the inital tangents are the ordering
+ // if the midpoint direction matches the inital order, that is enough
+ SkDVector m0 = this->segment()->dPtAtT(this->midT()) - this->fPart.fCurve[0];
+ SkDVector m1 = rh->segment()->dPtAtT(rh->midT()) - rh->fPart.fCurve[0];
+ double m0xm1 = m0.crossCheck(m1);
+ if (s0xt0 > 0 && m0xm1 > 0) {
+ return 0;
+ }
+ if (s0xt0 < 0 && m0xm1 < 0) {
+ return 1;
+ }
+ if (tangentsDiverge(rh, s0xt0)) {
+ return s0xt0 < 0;
+ }
+ return m0xm1 < 0;
+}
+
+// OPTIMIZATION: longest can all be either lazily computed here or precomputed in setup
+double SkOpAngle::distEndRatio(double dist) const {
+ double longest = 0;
+ const SkOpSegment& segment = *this->segment();
+ int ptCount = SkPathOpsVerbToPoints(segment.verb());
+ const SkPoint* pts = segment.pts();
+ for (int idx1 = 0; idx1 <= ptCount - 1; ++idx1) {
+ for (int idx2 = idx1 + 1; idx2 <= ptCount; ++idx2) {
+ if (idx1 == idx2) {
+ continue;
+ }
+ SkDVector v;
+ v.set(pts[idx2] - pts[idx1]);
+ double lenSq = v.lengthSquared();
+ longest = std::max(longest, lenSq);
+ }
+ }
+ return sqrt(longest) / dist;
+}
+
+bool SkOpAngle::endsIntersect(SkOpAngle* rh) {
+ SkPath::Verb lVerb = this->segment()->verb();
+ SkPath::Verb rVerb = rh->segment()->verb();
+ int lPts = SkPathOpsVerbToPoints(lVerb);
+ int rPts = SkPathOpsVerbToPoints(rVerb);
+ SkDLine rays[] = {{{this->fPart.fCurve[0], rh->fPart.fCurve[rPts]}},
+ {{this->fPart.fCurve[0], this->fPart.fCurve[lPts]}}};
+ if (this->fEnd->contains(rh->fEnd)) {
+ return checkParallel(rh);
+ }
+ double smallTs[2] = {-1, -1};
+ bool limited[2] = {false, false};
+ for (int index = 0; index < 2; ++index) {
+ SkPath::Verb cVerb = index ? rVerb : lVerb;
+ // if the curve is a line, then the line and the ray intersect only at their crossing
+ if (cVerb == SkPath::kLine_Verb) {
+ continue;
+ }
+ const SkOpSegment& segment = index ? *rh->segment() : *this->segment();
+ SkIntersections i;
+ (*CurveIntersectRay[cVerb])(segment.pts(), segment.weight(), rays[index], &i);
+ double tStart = index ? rh->fStart->t() : this->fStart->t();
+ double tEnd = index ? rh->fComputedEnd->t() : this->fComputedEnd->t();
+ bool testAscends = tStart < (index ? rh->fComputedEnd->t() : this->fComputedEnd->t());
+ double t = testAscends ? 0 : 1;
+ for (int idx2 = 0; idx2 < i.used(); ++idx2) {
+ double testT = i[0][idx2];
+ if (!approximately_between_orderable(tStart, testT, tEnd)) {
+ continue;
+ }
+ if (approximately_equal_orderable(tStart, testT)) {
+ continue;
+ }
+ smallTs[index] = t = testAscends ? std::max(t, testT) : std::min(t, testT);
+ limited[index] = approximately_equal_orderable(t, tEnd);
+ }
+ }
+ bool sRayLonger = false;
+ SkDVector sCept = {0, 0};
+ double sCeptT = -1;
+ int sIndex = -1;
+ bool useIntersect = false;
+ for (int index = 0; index < 2; ++index) {
+ if (smallTs[index] < 0) {
+ continue;
+ }
+ const SkOpSegment& segment = index ? *rh->segment() : *this->segment();
+ const SkDPoint& dPt = segment.dPtAtT(smallTs[index]);
+ SkDVector cept = dPt - rays[index][0];
+ // If this point is on the curve, it should have been detected earlier by ordinary
+ // curve intersection. This may be hard to determine in general, but for lines,
+ // the point could be close to or equal to its end, but shouldn't be near the start.
+ if ((index ? lPts : rPts) == 1) {
+ SkDVector total = rays[index][1] - rays[index][0];
+ if (cept.lengthSquared() * 2 < total.lengthSquared()) {
+ continue;
+ }
+ }
+ SkDVector end = rays[index][1] - rays[index][0];
+ if (cept.fX * end.fX < 0 || cept.fY * end.fY < 0) {
+ continue;
+ }
+ double rayDist = cept.length();
+ double endDist = end.length();
+ bool rayLonger = rayDist > endDist;
+ if (limited[0] && limited[1] && rayLonger) {
+ useIntersect = true;
+ sRayLonger = rayLonger;
+ sCept = cept;
+ sCeptT = smallTs[index];
+ sIndex = index;
+ break;
+ }
+ double delta = fabs(rayDist - endDist);
+ double minX, minY, maxX, maxY;
+ minX = minY = SK_ScalarInfinity;
+ maxX = maxY = -SK_ScalarInfinity;
+ const SkDCurve& curve = index ? rh->fPart.fCurve : this->fPart.fCurve;
+ int ptCount = index ? rPts : lPts;
+ for (int idx2 = 0; idx2 <= ptCount; ++idx2) {
+ minX = std::min(minX, curve[idx2].fX);
+ minY = std::min(minY, curve[idx2].fY);
+ maxX = std::max(maxX, curve[idx2].fX);
+ maxY = std::max(maxY, curve[idx2].fY);
+ }
+ double maxWidth = std::max(maxX - minX, maxY - minY);
+ delta = sk_ieee_double_divide(delta, maxWidth);
+ // FIXME: move these magic numbers
+ // This fixes skbug.com/8380
+ // Larger changes (like changing the constant in the next block) cause other
+ // tests to fail as documented in the bug.
+ // This could probably become a more general test: e.g., if translating the
+ // curve causes the cross product of any control point or end point to change
+ // sign with regard to the opposite curve's hull, treat the curves as parallel.
+
+ // Moreso, this points to the general fragility of this approach of assigning
+ // winding by sorting the angles of curves sharing a common point, as mentioned
+ // in the bug.
+ if (delta < 4e-3 && delta > 1e-3 && !useIntersect && fPart.isCurve()
+ && rh->fPart.isCurve() && fOriginalCurvePart[0] != fPart.fCurve.fLine[0]) {
+ // see if original curve is on one side of hull; translated is on the other
+ const SkDPoint& origin = rh->fOriginalCurvePart[0];
+ int count = SkPathOpsVerbToPoints(rh->segment()->verb());
+ const SkDVector line = rh->fOriginalCurvePart[count] - origin;
+ int originalSide = rh->lineOnOneSide(origin, line, this, true);
+ if (originalSide >= 0) {
+ int translatedSide = rh->lineOnOneSide(origin, line, this, false);
+ if (originalSide != translatedSide) {
+ continue;
+ }
+ }
+ }
+ if (delta > 1e-3 && (useIntersect ^= true)) {
+ sRayLonger = rayLonger;
+ sCept = cept;
+ sCeptT = smallTs[index];
+ sIndex = index;
+ }
+ }
+ if (useIntersect) {
+ const SkDCurve& curve = sIndex ? rh->fPart.fCurve : this->fPart.fCurve;
+ const SkOpSegment& segment = sIndex ? *rh->segment() : *this->segment();
+ double tStart = sIndex ? rh->fStart->t() : fStart->t();
+ SkDVector mid = segment.dPtAtT(tStart + (sCeptT - tStart) / 2) - curve[0];
+ double septDir = mid.crossCheck(sCept);
+ if (!septDir) {
+ return checkParallel(rh);
+ }
+ return sRayLonger ^ (sIndex == 0) ^ (septDir < 0);
+ } else {
+ return checkParallel(rh);
+ }
+}
+
+bool SkOpAngle::endToSide(const SkOpAngle* rh, bool* inside) const {
+ const SkOpSegment* segment = this->segment();
+ SkPath::Verb verb = segment->verb();
+ SkDLine rayEnd;
+ rayEnd[0].set(this->fEnd->pt());
+ rayEnd[1] = rayEnd[0];
+ SkDVector slopeAtEnd = (*CurveDSlopeAtT[verb])(segment->pts(), segment->weight(),
+ this->fEnd->t());
+ rayEnd[1].fX += slopeAtEnd.fY;
+ rayEnd[1].fY -= slopeAtEnd.fX;
+ SkIntersections iEnd;
+ const SkOpSegment* oppSegment = rh->segment();
+ SkPath::Verb oppVerb = oppSegment->verb();
+ (*CurveIntersectRay[oppVerb])(oppSegment->pts(), oppSegment->weight(), rayEnd, &iEnd);
+ double endDist;
+ int closestEnd = iEnd.closestTo(rh->fStart->t(), rh->fEnd->t(), rayEnd[0], &endDist);
+ if (closestEnd < 0) {
+ return false;
+ }
+ if (!endDist) {
+ return false;
+ }
+ SkDPoint start;
+ start.set(this->fStart->pt());
+ // OPTIMIZATION: multiple times in the code we find the max scalar
+ double minX, minY, maxX, maxY;
+ minX = minY = SK_ScalarInfinity;
+ maxX = maxY = -SK_ScalarInfinity;
+ const SkDCurve& curve = rh->fPart.fCurve;
+ int oppPts = SkPathOpsVerbToPoints(oppVerb);
+ for (int idx2 = 0; idx2 <= oppPts; ++idx2) {
+ minX = std::min(minX, curve[idx2].fX);
+ minY = std::min(minY, curve[idx2].fY);
+ maxX = std::max(maxX, curve[idx2].fX);
+ maxY = std::max(maxY, curve[idx2].fY);
+ }
+ double maxWidth = std::max(maxX - minX, maxY - minY);
+ endDist = sk_ieee_double_divide(endDist, maxWidth);
+ if (!(endDist >= 5e-12)) { // empirically found
+ return false; // ! above catches NaN
+ }
+ const SkDPoint* endPt = &rayEnd[0];
+ SkDPoint oppPt = iEnd.pt(closestEnd);
+ SkDVector vLeft = *endPt - start;
+ SkDVector vRight = oppPt - start;
+ double dir = vLeft.crossNoNormalCheck(vRight);
+ if (!dir) {
+ return false;
+ }
+ *inside = dir < 0;
+ return true;
+}
+
+/* y<0 y==0 y>0 x<0 x==0 x>0 xy<0 xy==0 xy>0
+ 0 x x x
+ 1 x x x
+ 2 x x x
+ 3 x x x
+ 4 x x x
+ 5 x x x
+ 6 x x x
+ 7 x x x
+ 8 x x x
+ 9 x x x
+ 10 x x x
+ 11 x x x
+ 12 x x x
+ 13 x x x
+ 14 x x x
+ 15 x x x
+*/
+int SkOpAngle::findSector(SkPath::Verb verb, double x, double y) const {
+ double absX = fabs(x);
+ double absY = fabs(y);
+ double xy = SkPath::kLine_Verb == verb || !AlmostEqualUlps(absX, absY) ? absX - absY : 0;
+ // If there are four quadrants and eight octants, and since the Latin for sixteen is sedecim,
+ // one could coin the term sedecimant for a space divided into 16 sections.
+ // http://english.stackexchange.com/questions/133688/word-for-something-partitioned-into-16-parts
+ static const int sedecimant[3][3][3] = {
+ // y<0 y==0 y>0
+ // x<0 x==0 x>0 x<0 x==0 x>0 x<0 x==0 x>0
+ {{ 4, 3, 2}, { 7, -1, 15}, {10, 11, 12}}, // abs(x) < abs(y)
+ {{ 5, -1, 1}, {-1, -1, -1}, { 9, -1, 13}}, // abs(x) == abs(y)
+ {{ 6, 3, 0}, { 7, -1, 15}, { 8, 11, 14}}, // abs(x) > abs(y)
+ };
+ int sector = sedecimant[(xy >= 0) + (xy > 0)][(y >= 0) + (y > 0)][(x >= 0) + (x > 0)] * 2 + 1;
+// SkASSERT(SkPath::kLine_Verb == verb || sector >= 0);
+ return sector;
+}
+
+SkOpGlobalState* SkOpAngle::globalState() const {
+ return this->segment()->globalState();
+}
+
+
+// OPTIMIZE: if this loops to only one other angle, after first compare fails, insert on other side
+// OPTIMIZE: return where insertion succeeded. Then, start next insertion on opposite side
+bool SkOpAngle::insert(SkOpAngle* angle) {
+ if (angle->fNext) {
+ if (loopCount() >= angle->loopCount()) {
+ if (!merge(angle)) {
+ return true;
+ }
+ } else if (fNext) {
+ if (!angle->merge(this)) {
+ return true;
+ }
+ } else {
+ angle->insert(this);
+ }
+ return true;
+ }
+ bool singleton = nullptr == fNext;
+ if (singleton) {
+ fNext = this;
+ }
+ SkOpAngle* next = fNext;
+ if (next->fNext == this) {
+ if (singleton || angle->after(this)) {
+ this->fNext = angle;
+ angle->fNext = next;
+ } else {
+ next->fNext = angle;
+ angle->fNext = this;
+ }
+ debugValidateNext();
+ return true;
+ }
+ SkOpAngle* last = this;
+ bool flipAmbiguity = false;
+ do {
+ SkASSERT(last->fNext == next);
+ if (angle->after(last) ^ (angle->tangentsAmbiguous() & flipAmbiguity)) {
+ last->fNext = angle;
+ angle->fNext = next;
+ debugValidateNext();
+ break;
+ }
+ last = next;
+ if (last == this) {
+ FAIL_IF(flipAmbiguity);
+ // We're in a loop. If a sort was ambiguous, flip it to end the loop.
+ flipAmbiguity = true;
+ }
+ next = next->fNext;
+ } while (true);
+ return true;
+}
+
+SkOpSpanBase* SkOpAngle::lastMarked() const {
+ if (fLastMarked) {
+ if (fLastMarked->chased()) {
+ return nullptr;
+ }
+ fLastMarked->setChased(true);
+ }
+ return fLastMarked;
+}
+
+bool SkOpAngle::loopContains(const SkOpAngle* angle) const {
+ if (!fNext) {
+ return false;
+ }
+ const SkOpAngle* first = this;
+ const SkOpAngle* loop = this;
+ const SkOpSegment* tSegment = angle->fStart->segment();
+ double tStart = angle->fStart->t();
+ double tEnd = angle->fEnd->t();
+ do {
+ const SkOpSegment* lSegment = loop->fStart->segment();
+ if (lSegment != tSegment) {
+ continue;
+ }
+ double lStart = loop->fStart->t();
+ if (lStart != tEnd) {
+ continue;
+ }
+ double lEnd = loop->fEnd->t();
+ if (lEnd == tStart) {
+ return true;
+ }
+ } while ((loop = loop->fNext) != first);
+ return false;
+}
+
+int SkOpAngle::loopCount() const {
+ int count = 0;
+ const SkOpAngle* first = this;
+ const SkOpAngle* next = this;
+ do {
+ next = next->fNext;
+ ++count;
+ } while (next && next != first);
+ return count;
+}
+
+bool SkOpAngle::merge(SkOpAngle* angle) {
+ SkASSERT(fNext);
+ SkASSERT(angle->fNext);
+ SkOpAngle* working = angle;
+ do {
+ if (this == working) {
+ return false;
+ }
+ working = working->fNext;
+ } while (working != angle);
+ do {
+ SkOpAngle* next = working->fNext;
+ working->fNext = nullptr;
+ insert(working);
+ working = next;
+ } while (working != angle);
+ // it's likely that a pair of the angles are unorderable
+ debugValidateNext();
+ return true;
+}
+
+double SkOpAngle::midT() const {
+ return (fStart->t() + fEnd->t()) / 2;
+}
+
+bool SkOpAngle::midToSide(const SkOpAngle* rh, bool* inside) const {
+ const SkOpSegment* segment = this->segment();
+ SkPath::Verb verb = segment->verb();
+ const SkPoint& startPt = this->fStart->pt();
+ const SkPoint& endPt = this->fEnd->pt();
+ SkDPoint dStartPt;
+ dStartPt.set(startPt);
+ SkDLine rayMid;
+ rayMid[0].fX = (startPt.fX + endPt.fX) / 2;
+ rayMid[0].fY = (startPt.fY + endPt.fY) / 2;
+ rayMid[1].fX = rayMid[0].fX + (endPt.fY - startPt.fY);
+ rayMid[1].fY = rayMid[0].fY - (endPt.fX - startPt.fX);
+ SkIntersections iMid;
+ (*CurveIntersectRay[verb])(segment->pts(), segment->weight(), rayMid, &iMid);
+ int iOutside = iMid.mostOutside(this->fStart->t(), this->fEnd->t(), dStartPt);
+ if (iOutside < 0) {
+ return false;
+ }
+ const SkOpSegment* oppSegment = rh->segment();
+ SkPath::Verb oppVerb = oppSegment->verb();
+ SkIntersections oppMid;
+ (*CurveIntersectRay[oppVerb])(oppSegment->pts(), oppSegment->weight(), rayMid, &oppMid);
+ int oppOutside = oppMid.mostOutside(rh->fStart->t(), rh->fEnd->t(), dStartPt);
+ if (oppOutside < 0) {
+ return false;
+ }
+ SkDVector iSide = iMid.pt(iOutside) - dStartPt;
+ SkDVector oppSide = oppMid.pt(oppOutside) - dStartPt;
+ double dir = iSide.crossCheck(oppSide);
+ if (!dir) {
+ return false;
+ }
+ *inside = dir < 0;
+ return true;
+}
+
+bool SkOpAngle::oppositePlanes(const SkOpAngle* rh) const {
+ int startSpan = SkTAbs(rh->fSectorStart - fSectorStart);
+ return startSpan >= 8;
+}
+
+int SkOpAngle::orderable(SkOpAngle* rh) {
+ int result;
+ if (!fPart.isCurve()) {
+ if (!rh->fPart.isCurve()) {
+ double leftX = fTangentHalf.dx();
+ double leftY = fTangentHalf.dy();
+ double rightX = rh->fTangentHalf.dx();
+ double rightY = rh->fTangentHalf.dy();
+ double x_ry = leftX * rightY;
+ double rx_y = rightX * leftY;
+ if (x_ry == rx_y) {
+ if (leftX * rightX < 0 || leftY * rightY < 0) {
+ return 1; // exactly 180 degrees apart
+ }
+ goto unorderable;
+ }
+ SkASSERT(x_ry != rx_y); // indicates an undetected coincidence -- worth finding earlier
+ return x_ry < rx_y ? 1 : 0;
+ }
+ if ((result = this->lineOnOneSide(rh, false)) >= 0) {
+ return result;
+ }
+ if (fUnorderable || approximately_zero(rh->fSide)) {
+ goto unorderable;
+ }
+ } else if (!rh->fPart.isCurve()) {
+ if ((result = rh->lineOnOneSide(this, false)) >= 0) {
+ return result ? 0 : 1;
+ }
+ if (rh->fUnorderable || approximately_zero(fSide)) {
+ goto unorderable;
+ }
+ } else if ((result = this->convexHullOverlaps(rh)) >= 0) {
+ return result;
+ }
+ return this->endsIntersect(rh) ? 1 : 0;
+unorderable:
+ fUnorderable = true;
+ rh->fUnorderable = true;
+ return -1;
+}
+
+// OPTIMIZE: if this shows up in a profile, add a previous pointer
+// as is, this should be rarely called
+SkOpAngle* SkOpAngle::previous() const {
+ SkOpAngle* last = fNext;
+ do {
+ SkOpAngle* next = last->fNext;
+ if (next == this) {
+ return last;
+ }
+ last = next;
+ } while (true);
+}
+
+SkOpSegment* SkOpAngle::segment() const {
+ return fStart->segment();
+}
+
+void SkOpAngle::set(SkOpSpanBase* start, SkOpSpanBase* end) {
+ fStart = start;
+ fComputedEnd = fEnd = end;
+ SkASSERT(start != end);
+ fNext = nullptr;
+ fComputeSector = fComputedSector = fCheckCoincidence = fTangentsAmbiguous = false;
+ setSpans();
+ setSector();
+ SkDEBUGCODE(fID = start ? start->globalState()->nextAngleID() : -1);
+}
+
+void SkOpAngle::setSpans() {
+ fUnorderable = false;
+ fLastMarked = nullptr;
+ if (!fStart) {
+ fUnorderable = true;
+ return;
+ }
+ const SkOpSegment* segment = fStart->segment();
+ const SkPoint* pts = segment->pts();
+ SkDEBUGCODE(fPart.fCurve.fVerb = SkPath::kCubic_Verb); // required for SkDCurve debug check
+ SkDEBUGCODE(fPart.fCurve[2].fX = fPart.fCurve[2].fY = fPart.fCurve[3].fX = fPart.fCurve[3].fY
+ = SK_ScalarNaN); // make the non-line part uninitialized
+ SkDEBUGCODE(fPart.fCurve.fVerb = segment->verb()); // set the curve type for real
+ segment->subDivide(fStart, fEnd, &fPart.fCurve); // set at least the line part if not more
+ fOriginalCurvePart = fPart.fCurve;
+ const SkPath::Verb verb = segment->verb();
+ fPart.setCurveHullSweep(verb);
+ if (SkPath::kLine_Verb != verb && !fPart.isCurve()) {
+ SkDLine lineHalf;
+ fPart.fCurve[1] = fPart.fCurve[SkPathOpsVerbToPoints(verb)];
+ fOriginalCurvePart[1] = fPart.fCurve[1];
+ lineHalf[0].set(fPart.fCurve[0].asSkPoint());
+ lineHalf[1].set(fPart.fCurve[1].asSkPoint());
+ fTangentHalf.lineEndPoints(lineHalf);
+ fSide = 0;
+ }
+ switch (verb) {
+ case SkPath::kLine_Verb: {
+ SkASSERT(fStart != fEnd);
+ const SkPoint& cP1 = pts[fStart->t() < fEnd->t()];
+ SkDLine lineHalf;
+ lineHalf[0].set(fStart->pt());
+ lineHalf[1].set(cP1);
+ fTangentHalf.lineEndPoints(lineHalf);
+ fSide = 0;
+ } return;
+ case SkPath::kQuad_Verb:
+ case SkPath::kConic_Verb: {
+ SkLineParameters tangentPart;
+ (void) tangentPart.quadEndPoints(fPart.fCurve.fQuad);
+ fSide = -tangentPart.pointDistance(fPart.fCurve[2]); // not normalized -- compare sign only
+ } break;
+ case SkPath::kCubic_Verb: {
+ SkLineParameters tangentPart;
+ (void) tangentPart.cubicPart(fPart.fCurve.fCubic);
+ fSide = -tangentPart.pointDistance(fPart.fCurve[3]);
+ double testTs[4];
+ // OPTIMIZATION: keep inflections precomputed with cubic segment?
+ int testCount = SkDCubic::FindInflections(pts, testTs);
+ double startT = fStart->t();
+ double endT = fEnd->t();
+ double limitT = endT;
+ int index;
+ for (index = 0; index < testCount; ++index) {
+ if (!::between(startT, testTs[index], limitT)) {
+ testTs[index] = -1;
+ }
+ }
+ testTs[testCount++] = startT;
+ testTs[testCount++] = endT;
+ SkTQSort<double>(testTs, testTs + testCount);
+ double bestSide = 0;
+ int testCases = (testCount << 1) - 1;
+ index = 0;
+ while (testTs[index] < 0) {
+ ++index;
+ }
+ index <<= 1;
+ for (; index < testCases; ++index) {
+ int testIndex = index >> 1;
+ double testT = testTs[testIndex];
+ if (index & 1) {
+ testT = (testT + testTs[testIndex + 1]) / 2;
+ }
+ // OPTIMIZE: could avoid call for t == startT, endT
+ SkDPoint pt = dcubic_xy_at_t(pts, segment->weight(), testT);
+ SkLineParameters testPart;
+ testPart.cubicEndPoints(fPart.fCurve.fCubic);
+ double testSide = testPart.pointDistance(pt);
+ if (fabs(bestSide) < fabs(testSide)) {
+ bestSide = testSide;
+ }
+ }
+ fSide = -bestSide; // compare sign only
+ } break;
+ default:
+ SkASSERT(0);
+ }
+}
+
+void SkOpAngle::setSector() {
+ if (!fStart) {
+ fUnorderable = true;
+ return;
+ }
+ const SkOpSegment* segment = fStart->segment();
+ SkPath::Verb verb = segment->verb();
+ fSectorStart = this->findSector(verb, fPart.fSweep[0].fX, fPart.fSweep[0].fY);
+ if (fSectorStart < 0) {
+ goto deferTilLater;
+ }
+ if (!fPart.isCurve()) { // if it's a line or line-like, note that both sectors are the same
+ SkASSERT(fSectorStart >= 0);
+ fSectorEnd = fSectorStart;
+ fSectorMask = 1 << fSectorStart;
+ return;
+ }
+ SkASSERT(SkPath::kLine_Verb != verb);
+ fSectorEnd = this->findSector(verb, fPart.fSweep[1].fX, fPart.fSweep[1].fY);
+ if (fSectorEnd < 0) {
+deferTilLater:
+ fSectorStart = fSectorEnd = -1;
+ fSectorMask = 0;
+ fComputeSector = true; // can't determine sector until segment length can be found
+ return;
+ }
+ if (fSectorEnd == fSectorStart
+ && (fSectorStart & 3) != 3) { // if the sector has no span, it can't be an exact angle
+ fSectorMask = 1 << fSectorStart;
+ return;
+ }
+ bool crossesZero = this->checkCrossesZero();
+ int start = std::min(fSectorStart, fSectorEnd);
+ bool curveBendsCCW = (fSectorStart == start) ^ crossesZero;
+ // bump the start and end of the sector span if they are on exact compass points
+ if ((fSectorStart & 3) == 3) {
+ fSectorStart = (fSectorStart + (curveBendsCCW ? 1 : 31)) & 0x1f;
+ }
+ if ((fSectorEnd & 3) == 3) {
+ fSectorEnd = (fSectorEnd + (curveBendsCCW ? 31 : 1)) & 0x1f;
+ }
+ crossesZero = this->checkCrossesZero();
+ start = std::min(fSectorStart, fSectorEnd);
+ int end = std::max(fSectorStart, fSectorEnd);
+ if (!crossesZero) {
+ fSectorMask = (unsigned) -1 >> (31 - end + start) << start;
+ } else {
+ fSectorMask = (unsigned) -1 >> (31 - start) | ((unsigned) -1 << end);
+ }
+}
+
+SkOpSpan* SkOpAngle::starter() {
+ return fStart->starter(fEnd);
+}
+
+bool SkOpAngle::tangentsDiverge(const SkOpAngle* rh, double s0xt0) {
+ if (s0xt0 == 0) {
+ return false;
+ }
+ // if the ctrl tangents are not nearly parallel, use them
+ // solve for opposite direction displacement scale factor == m
+ // initial dir = v1.cross(v2) == v2.x * v1.y - v2.y * v1.x
+ // displacement of q1[1] : dq1 = { -m * v1.y, m * v1.x } + q1[1]
+ // straight angle when : v2.x * (dq1.y - q1[0].y) == v2.y * (dq1.x - q1[0].x)
+ // v2.x * (m * v1.x + v1.y) == v2.y * (-m * v1.y + v1.x)
+ // - m * (v2.x * v1.x + v2.y * v1.y) == v2.x * v1.y - v2.y * v1.x
+ // m = (v2.y * v1.x - v2.x * v1.y) / (v2.x * v1.x + v2.y * v1.y)
+ // m = v1.cross(v2) / v1.dot(v2)
+ const SkDVector* sweep = fPart.fSweep;
+ const SkDVector* tweep = rh->fPart.fSweep;
+ double s0dt0 = sweep[0].dot(tweep[0]);
+ if (!s0dt0) {
+ return true;
+ }
+ SkASSERT(s0dt0 != 0);
+ double m = s0xt0 / s0dt0;
+ double sDist = sweep[0].length() * m;
+ double tDist = tweep[0].length() * m;
+ bool useS = fabs(sDist) < fabs(tDist);
+ double mFactor = fabs(useS ? this->distEndRatio(sDist) : rh->distEndRatio(tDist));
+ fTangentsAmbiguous = mFactor >= 50 && mFactor < 200;
+ return mFactor < 50; // empirically found limit
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpAngle.h b/gfx/skia/skia/src/pathops/SkOpAngle.h
new file mode 100644
index 0000000000..eeff91d09f
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpAngle.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkOpAngle_DEFINED
+#define SkOpAngle_DEFINED
+
+#include "include/core/SkPath.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkDebug.h"
+#include "src/pathops/SkLineParameters.h"
+#include "src/pathops/SkPathOpsCurve.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+#if DEBUG_ANGLE
+#include "include/core/SkString.h"
+#endif
+
+#include <cstdint>
+
+class SkOpCoincidence;
+class SkOpContour;
+class SkOpPtT;
+class SkOpSegment;
+class SkOpSpan;
+class SkOpSpanBase;
+struct SkDPoint;
+struct SkDVector;
+
+class SkOpAngle {
+public:
+ enum IncludeType {
+ kUnaryWinding,
+ kUnaryXor,
+ kBinarySingle,
+ kBinaryOpp,
+ };
+
+ const SkOpAngle* debugAngle(int id) const;
+ const SkOpCoincidence* debugCoincidence() const;
+ SkOpContour* debugContour(int id) const;
+
+ int debugID() const {
+ return SkDEBUGRELEASE(fID, -1);
+ }
+
+#if DEBUG_SORT
+ void debugLoop() const;
+#endif
+
+#if DEBUG_ANGLE
+ bool debugCheckCoincidence() const { return fCheckCoincidence; }
+ void debugCheckNearCoincidence() const;
+ SkString debugPart() const;
+#endif
+ const SkOpPtT* debugPtT(int id) const;
+ const SkOpSegment* debugSegment(int id) const;
+ int debugSign() const;
+ const SkOpSpanBase* debugSpan(int id) const;
+ void debugValidate() const;
+ void debugValidateNext() const; // in debug builds, verify that angle loop is uncorrupted
+ double distEndRatio(double dist) const;
+ // available to testing only
+ void dump() const;
+ void dumpCurves() const;
+ void dumpLoop() const;
+ void dumpOne(bool functionHeader) const;
+ void dumpTo(const SkOpSegment* fromSeg, const SkOpAngle* ) const;
+ void dumpTest() const;
+
+ SkOpSpanBase* end() const {
+ return fEnd;
+ }
+
+ bool insert(SkOpAngle* );
+ SkOpSpanBase* lastMarked() const;
+ bool loopContains(const SkOpAngle* ) const;
+ int loopCount() const;
+
+ SkOpAngle* next() const {
+ return fNext;
+ }
+
+ SkOpAngle* previous() const;
+ SkOpSegment* segment() const;
+ void set(SkOpSpanBase* start, SkOpSpanBase* end);
+
+ void setLastMarked(SkOpSpanBase* marked) {
+ fLastMarked = marked;
+ }
+
+ SkOpSpanBase* start() const {
+ return fStart;
+ }
+
+ SkOpSpan* starter();
+
+ bool tangentsAmbiguous() const {
+ return fTangentsAmbiguous;
+ }
+
+ bool unorderable() const {
+ return fUnorderable;
+ }
+
+private:
+ bool after(SkOpAngle* test);
+ void alignmentSameSide(const SkOpAngle* test, int* order) const;
+ bool checkCrossesZero() const;
+ bool checkParallel(SkOpAngle* );
+ bool computeSector();
+ int convexHullOverlaps(const SkOpAngle* );
+ bool endToSide(const SkOpAngle* rh, bool* inside) const;
+ bool endsIntersect(SkOpAngle* );
+ int findSector(SkPath::Verb verb, double x, double y) const;
+ SkOpGlobalState* globalState() const;
+ int lineOnOneSide(const SkDPoint& origin, const SkDVector& line, const SkOpAngle* test,
+ bool useOriginal) const;
+ int lineOnOneSide(const SkOpAngle* test, bool useOriginal);
+ int linesOnOriginalSide(const SkOpAngle* test);
+ bool merge(SkOpAngle* );
+ double midT() const;
+ bool midToSide(const SkOpAngle* rh, bool* inside) const;
+ bool oppositePlanes(const SkOpAngle* rh) const;
+ int orderable(SkOpAngle* rh); // false == this < rh ; true == this > rh; -1 == unorderable
+ void setSector();
+ void setSpans();
+ bool tangentsDiverge(const SkOpAngle* rh, double s0xt0);
+
+ SkDCurve fOriginalCurvePart; // the curve from start to end
+ SkDCurveSweep fPart; // the curve from start to end offset as needed
+ double fSide;
+ SkLineParameters fTangentHalf; // used only to sort a pair of lines or line-like sections
+ SkOpAngle* fNext;
+ SkOpSpanBase* fLastMarked;
+ SkOpSpanBase* fStart;
+ SkOpSpanBase* fEnd;
+ SkOpSpanBase* fComputedEnd;
+ int fSectorMask;
+ int8_t fSectorStart; // in 32nds of a circle
+ int8_t fSectorEnd;
+ bool fUnorderable;
+ bool fComputeSector;
+ bool fComputedSector;
+ bool fCheckCoincidence;
+ bool fTangentsAmbiguous;
+ SkDEBUGCODE(int fID);
+
+ friend class PathOpsAngleTester;
+};
+
+
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkOpBuilder.cpp b/gfx/skia/skia/src/pathops/SkOpBuilder.cpp
new file mode 100644
index 0000000000..57752e3a57
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpBuilder.cpp
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPathTypes.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkTypes.h"
+#include "include/pathops/SkPathOps.h"
+#include "include/private/base/SkPathEnums.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTDArray.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkPathPriv.h"
+#include "src/pathops/SkOpContour.h"
+#include "src/pathops/SkOpEdgeBuilder.h"
+#include "src/pathops/SkOpSegment.h"
+#include "src/pathops/SkOpSpan.h"
+#include "src/pathops/SkPathOpsCommon.h"
+#include "src/pathops/SkPathOpsTypes.h"
+#include "src/pathops/SkPathWriter.h"
+
+#include <cstdint>
+
+static bool one_contour(const SkPath& path) {
+ SkSTArenaAlloc<256> allocator;
+ int verbCount = path.countVerbs();
+ uint8_t* verbs = (uint8_t*) allocator.makeArrayDefault<uint8_t>(verbCount);
+ (void) path.getVerbs(verbs, verbCount);
+ for (int index = 1; index < verbCount; ++index) {
+ if (verbs[index] == SkPath::kMove_Verb) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void SkOpBuilder::ReversePath(SkPath* path) {
+ SkPath temp;
+ SkPoint lastPt;
+ SkAssertResult(path->getLastPt(&lastPt));
+ temp.moveTo(lastPt);
+ temp.reversePathTo(*path);
+ temp.close();
+ *path = temp;
+}
+
+bool SkOpBuilder::FixWinding(SkPath* path) {
+ SkPathFillType fillType = path->getFillType();
+ if (fillType == SkPathFillType::kInverseEvenOdd) {
+ fillType = SkPathFillType::kInverseWinding;
+ } else if (fillType == SkPathFillType::kEvenOdd) {
+ fillType = SkPathFillType::kWinding;
+ }
+ if (one_contour(*path)) {
+ SkPathFirstDirection dir = SkPathPriv::ComputeFirstDirection(*path);
+ if (dir != SkPathFirstDirection::kUnknown) {
+ if (dir == SkPathFirstDirection::kCW) {
+ ReversePath(path);
+ }
+ path->setFillType(fillType);
+ return true;
+ }
+ }
+ SkSTArenaAlloc<4096> allocator;
+ SkOpContourHead contourHead;
+ SkOpGlobalState globalState(&contourHead, &allocator SkDEBUGPARAMS(false)
+ SkDEBUGPARAMS(nullptr));
+ SkOpEdgeBuilder builder(*path, &contourHead, &globalState);
+ if (builder.unparseable() || !builder.finish()) {
+ return false;
+ }
+ if (!contourHead.count()) {
+ return true;
+ }
+ if (!contourHead.next()) {
+ return false;
+ }
+ contourHead.joinAllSegments();
+ contourHead.resetReverse();
+ bool writePath = false;
+ SkOpSpan* topSpan;
+ globalState.setPhase(SkOpPhase::kFixWinding);
+ while ((topSpan = FindSortableTop(&contourHead))) {
+ SkOpSegment* topSegment = topSpan->segment();
+ SkOpContour* topContour = topSegment->contour();
+ SkASSERT(topContour->isCcw() >= 0);
+#if DEBUG_WINDING
+ SkDebugf("%s id=%d nested=%d ccw=%d\n", __FUNCTION__,
+ topSegment->debugID(), globalState.nested(), topContour->isCcw());
+#endif
+ if ((globalState.nested() & 1) != SkToBool(topContour->isCcw())) {
+ topContour->setReverse();
+ writePath = true;
+ }
+ topContour->markAllDone();
+ globalState.clearNested();
+ }
+ if (!writePath) {
+ path->setFillType(fillType);
+ return true;
+ }
+ SkPath empty;
+ SkPathWriter woundPath(empty);
+ SkOpContour* test = &contourHead;
+ do {
+ if (!test->count()) {
+ continue;
+ }
+ if (test->reversed()) {
+ test->toReversePath(&woundPath);
+ } else {
+ test->toPath(&woundPath);
+ }
+ } while ((test = test->next()));
+ *path = *woundPath.nativePath();
+ path->setFillType(fillType);
+ return true;
+}
+
+void SkOpBuilder::add(const SkPath& path, SkPathOp op) {
+ if (fOps.empty() && op != kUnion_SkPathOp) {
+ fPathRefs.push_back() = SkPath();
+ *fOps.append() = kUnion_SkPathOp;
+ }
+ fPathRefs.push_back() = path;
+ *fOps.append() = op;
+}
+
+void SkOpBuilder::reset() {
+ fPathRefs.clear();
+ fOps.reset();
+}
+
+/* OPTIMIZATION: Union doesn't need to be all-or-nothing. A run of three or more convex
+ paths with union ops could be locally resolved and still improve over doing the
+ ops one at a time. */
+bool SkOpBuilder::resolve(SkPath* result) {
+ SkPath original = *result;
+ int count = fOps.size();
+ bool allUnion = true;
+ SkPathFirstDirection firstDir = SkPathFirstDirection::kUnknown;
+ for (int index = 0; index < count; ++index) {
+ SkPath* test = &fPathRefs[index];
+ if (kUnion_SkPathOp != fOps[index] || test->isInverseFillType()) {
+ allUnion = false;
+ break;
+ }
+ // If all paths are convex, track direction, reversing as needed.
+ if (test->isConvex()) {
+ SkPathFirstDirection dir = SkPathPriv::ComputeFirstDirection(*test);
+ if (dir == SkPathFirstDirection::kUnknown) {
+ allUnion = false;
+ break;
+ }
+ if (firstDir == SkPathFirstDirection::kUnknown) {
+ firstDir = dir;
+ } else if (firstDir != dir) {
+ ReversePath(test);
+ }
+ continue;
+ }
+ // If the path is not convex but its bounds do not intersect the others, simplify is enough.
+ const SkRect& testBounds = test->getBounds();
+ for (int inner = 0; inner < index; ++inner) {
+ // OPTIMIZE: check to see if the contour bounds do not intersect other contour bounds?
+ if (SkRect::Intersects(fPathRefs[inner].getBounds(), testBounds)) {
+ allUnion = false;
+ break;
+ }
+ }
+ }
+ if (!allUnion) {
+ *result = fPathRefs[0];
+ for (int index = 1; index < count; ++index) {
+ if (!Op(*result, fPathRefs[index], fOps[index], result)) {
+ reset();
+ *result = original;
+ return false;
+ }
+ }
+ reset();
+ return true;
+ }
+ SkPath sum;
+ for (int index = 0; index < count; ++index) {
+ if (!Simplify(fPathRefs[index], &fPathRefs[index])) {
+ reset();
+ *result = original;
+ return false;
+ }
+ if (!fPathRefs[index].isEmpty()) {
+ // convert the even odd result back to winding form before accumulating it
+ if (!FixWinding(&fPathRefs[index])) {
+ *result = original;
+ return false;
+ }
+ sum.addPath(fPathRefs[index]);
+ }
+ }
+ reset();
+ bool success = Simplify(sum, result);
+ if (!success) {
+ *result = original;
+ }
+ return success;
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpCoincidence.cpp b/gfx/skia/skia/src/pathops/SkOpCoincidence.cpp
new file mode 100644
index 0000000000..4a8bcec1d8
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpCoincidence.cpp
@@ -0,0 +1,1456 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkOpCoincidence.h"
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "include/private/base/SkTDArray.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/pathops/SkIntersections.h"
+#include "src/pathops/SkOpSegment.h"
+#include "src/pathops/SkPathOpsCurve.h"
+#include "src/pathops/SkPathOpsLine.h"
+#include "src/pathops/SkPathOpsPoint.h"
+
+#include <algorithm>
+
+// returns true if coincident span's start and end are the same
+bool SkCoincidentSpans::collapsed(const SkOpPtT* test) const {
+ return (fCoinPtTStart == test && fCoinPtTEnd->contains(test))
+ || (fCoinPtTEnd == test && fCoinPtTStart->contains(test))
+ || (fOppPtTStart == test && fOppPtTEnd->contains(test))
+ || (fOppPtTEnd == test && fOppPtTStart->contains(test));
+}
+
+// out of line since this function is referenced by address
+const SkOpPtT* SkCoincidentSpans::coinPtTEnd() const {
+ return fCoinPtTEnd;
+}
+
+// out of line since this function is referenced by address
+const SkOpPtT* SkCoincidentSpans::coinPtTStart() const {
+ return fCoinPtTStart;
+}
+
+// sets the span's end to the ptT referenced by the previous-next
+void SkCoincidentSpans::correctOneEnd(
+ const SkOpPtT* (SkCoincidentSpans::* getEnd)() const,
+ void (SkCoincidentSpans::*setEnd)(const SkOpPtT* ptT) ) {
+ const SkOpPtT* origPtT = (this->*getEnd)();
+ const SkOpSpanBase* origSpan = origPtT->span();
+ const SkOpSpan* prev = origSpan->prev();
+ const SkOpPtT* testPtT = prev ? prev->next()->ptT()
+ : origSpan->upCast()->next()->prev()->ptT();
+ if (origPtT != testPtT) {
+ (this->*setEnd)(testPtT);
+ }
+}
+
+/* Please keep this in sync with debugCorrectEnds */
+// FIXME: member pointers have fallen out of favor and can be replaced with
+// an alternative approach.
+// makes all span ends agree with the segment's spans that define them
+void SkCoincidentSpans::correctEnds() {
+ this->correctOneEnd(&SkCoincidentSpans::coinPtTStart, &SkCoincidentSpans::setCoinPtTStart);
+ this->correctOneEnd(&SkCoincidentSpans::coinPtTEnd, &SkCoincidentSpans::setCoinPtTEnd);
+ this->correctOneEnd(&SkCoincidentSpans::oppPtTStart, &SkCoincidentSpans::setOppPtTStart);
+ this->correctOneEnd(&SkCoincidentSpans::oppPtTEnd, &SkCoincidentSpans::setOppPtTEnd);
+}
+
+/* Please keep this in sync with debugExpand */
+// expand the range by checking adjacent spans for coincidence
+bool SkCoincidentSpans::expand() {
+ bool expanded = false;
+ const SkOpSegment* segment = coinPtTStart()->segment();
+ const SkOpSegment* oppSegment = oppPtTStart()->segment();
+ do {
+ const SkOpSpan* start = coinPtTStart()->span()->upCast();
+ const SkOpSpan* prev = start->prev();
+ const SkOpPtT* oppPtT;
+ if (!prev || !(oppPtT = prev->contains(oppSegment))) {
+ break;
+ }
+ double midT = (prev->t() + start->t()) / 2;
+ if (!segment->isClose(midT, oppSegment)) {
+ break;
+ }
+ setStarts(prev->ptT(), oppPtT);
+ expanded = true;
+ } while (true);
+ do {
+ const SkOpSpanBase* end = coinPtTEnd()->span();
+ SkOpSpanBase* next = end->final() ? nullptr : end->upCast()->next();
+ if (next && next->deleted()) {
+ break;
+ }
+ const SkOpPtT* oppPtT;
+ if (!next || !(oppPtT = next->contains(oppSegment))) {
+ break;
+ }
+ double midT = (end->t() + next->t()) / 2;
+ if (!segment->isClose(midT, oppSegment)) {
+ break;
+ }
+ setEnds(next->ptT(), oppPtT);
+ expanded = true;
+ } while (true);
+ return expanded;
+}
+
+// increase the range of this span
+bool SkCoincidentSpans::extend(const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd,
+ const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd) {
+ bool result = false;
+ if (fCoinPtTStart->fT > coinPtTStart->fT || (this->flipped()
+ ? fOppPtTStart->fT < oppPtTStart->fT : fOppPtTStart->fT > oppPtTStart->fT)) {
+ this->setStarts(coinPtTStart, oppPtTStart);
+ result = true;
+ }
+ if (fCoinPtTEnd->fT < coinPtTEnd->fT || (this->flipped()
+ ? fOppPtTEnd->fT > oppPtTEnd->fT : fOppPtTEnd->fT < oppPtTEnd->fT)) {
+ this->setEnds(coinPtTEnd, oppPtTEnd);
+ result = true;
+ }
+ return result;
+}
+
+// set the range of this span
+void SkCoincidentSpans::set(SkCoincidentSpans* next, const SkOpPtT* coinPtTStart,
+ const SkOpPtT* coinPtTEnd, const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd) {
+ SkASSERT(SkOpCoincidence::Ordered(coinPtTStart, oppPtTStart));
+ fNext = next;
+ this->setStarts(coinPtTStart, oppPtTStart);
+ this->setEnds(coinPtTEnd, oppPtTEnd);
+}
+
+// returns true if both points are inside this
+bool SkCoincidentSpans::contains(const SkOpPtT* s, const SkOpPtT* e) const {
+ if (s->fT > e->fT) {
+ using std::swap;
+ swap(s, e);
+ }
+ if (s->segment() == fCoinPtTStart->segment()) {
+ return fCoinPtTStart->fT <= s->fT && e->fT <= fCoinPtTEnd->fT;
+ } else {
+ SkASSERT(s->segment() == fOppPtTStart->segment());
+ double oppTs = fOppPtTStart->fT;
+ double oppTe = fOppPtTEnd->fT;
+ if (oppTs > oppTe) {
+ using std::swap;
+ swap(oppTs, oppTe);
+ }
+ return oppTs <= s->fT && e->fT <= oppTe;
+ }
+}
+
+// out of line since this function is referenced by address
+const SkOpPtT* SkCoincidentSpans::oppPtTStart() const {
+ return fOppPtTStart;
+}
+
+// out of line since this function is referenced by address
+const SkOpPtT* SkCoincidentSpans::oppPtTEnd() const {
+ return fOppPtTEnd;
+}
+
+// A coincident span is unordered if the pairs of points in the main and opposite curves'
+// t values do not ascend or descend. For instance, if a tightly arced quadratic is
+// coincident with another curve, it may intersect it out of order.
+bool SkCoincidentSpans::ordered(bool* result) const {
+ const SkOpSpanBase* start = this->coinPtTStart()->span();
+ const SkOpSpanBase* end = this->coinPtTEnd()->span();
+ const SkOpSpanBase* next = start->upCast()->next();
+ if (next == end) {
+ *result = true;
+ return true;
+ }
+ bool flipped = this->flipped();
+ const SkOpSegment* oppSeg = this->oppPtTStart()->segment();
+ double oppLastT = fOppPtTStart->fT;
+ do {
+ const SkOpPtT* opp = next->contains(oppSeg);
+ if (!opp) {
+// SkOPOBJASSERT(start, 0); // may assert if coincident span isn't fully processed
+ return false;
+ }
+ if ((oppLastT > opp->fT) != flipped) {
+ *result = false;
+ return true;
+ }
+ oppLastT = opp->fT;
+ if (next == end) {
+ break;
+ }
+ if (!next->upCastable()) {
+ *result = false;
+ return true;
+ }
+ next = next->upCast()->next();
+ } while (true);
+ *result = true;
+ return true;
+}
+
+// if there is an existing pair that overlaps the addition, extend it
+bool SkOpCoincidence::extend(const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd,
+ const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd) {
+ SkCoincidentSpans* test = fHead;
+ if (!test) {
+ return false;
+ }
+ const SkOpSegment* coinSeg = coinPtTStart->segment();
+ const SkOpSegment* oppSeg = oppPtTStart->segment();
+ if (!Ordered(coinPtTStart, oppPtTStart)) {
+ using std::swap;
+ swap(coinSeg, oppSeg);
+ swap(coinPtTStart, oppPtTStart);
+ swap(coinPtTEnd, oppPtTEnd);
+ if (coinPtTStart->fT > coinPtTEnd->fT) {
+ swap(coinPtTStart, coinPtTEnd);
+ swap(oppPtTStart, oppPtTEnd);
+ }
+ }
+ double oppMinT = std::min(oppPtTStart->fT, oppPtTEnd->fT);
+ SkDEBUGCODE(double oppMaxT = std::max(oppPtTStart->fT, oppPtTEnd->fT));
+ do {
+ if (coinSeg != test->coinPtTStart()->segment()) {
+ continue;
+ }
+ if (oppSeg != test->oppPtTStart()->segment()) {
+ continue;
+ }
+ double oTestMinT = std::min(test->oppPtTStart()->fT, test->oppPtTEnd()->fT);
+ double oTestMaxT = std::max(test->oppPtTStart()->fT, test->oppPtTEnd()->fT);
+ // if debug check triggers, caller failed to check if extended already exists
+ SkASSERT(test->coinPtTStart()->fT > coinPtTStart->fT
+ || coinPtTEnd->fT > test->coinPtTEnd()->fT
+ || oTestMinT > oppMinT || oppMaxT > oTestMaxT);
+ if ((test->coinPtTStart()->fT <= coinPtTEnd->fT
+ && coinPtTStart->fT <= test->coinPtTEnd()->fT)
+ || (oTestMinT <= oTestMaxT && oppMinT <= oTestMaxT)) {
+ test->extend(coinPtTStart, coinPtTEnd, oppPtTStart, oppPtTEnd);
+ return true;
+ }
+ } while ((test = test->next()));
+ return false;
+}
+
+// verifies that the coincidence hasn't already been added
+static void DebugCheckAdd(const SkCoincidentSpans* check, const SkOpPtT* coinPtTStart,
+ const SkOpPtT* coinPtTEnd, const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd) {
+#if DEBUG_COINCIDENCE
+ while (check) {
+ SkASSERT(check->coinPtTStart() != coinPtTStart || check->coinPtTEnd() != coinPtTEnd
+ || check->oppPtTStart() != oppPtTStart || check->oppPtTEnd() != oppPtTEnd);
+ SkASSERT(check->coinPtTStart() != oppPtTStart || check->coinPtTEnd() != oppPtTEnd
+ || check->oppPtTStart() != coinPtTStart || check->oppPtTEnd() != coinPtTEnd);
+ check = check->next();
+ }
+#endif
+}
+
+// adds a new coincident pair
+void SkOpCoincidence::add(SkOpPtT* coinPtTStart, SkOpPtT* coinPtTEnd, SkOpPtT* oppPtTStart,
+ SkOpPtT* oppPtTEnd) {
+ // OPTIMIZE: caller should have already sorted
+ if (!Ordered(coinPtTStart, oppPtTStart)) {
+ if (oppPtTStart->fT < oppPtTEnd->fT) {
+ this->add(oppPtTStart, oppPtTEnd, coinPtTStart, coinPtTEnd);
+ } else {
+ this->add(oppPtTEnd, oppPtTStart, coinPtTEnd, coinPtTStart);
+ }
+ return;
+ }
+ SkASSERT(Ordered(coinPtTStart, oppPtTStart));
+ // choose the ptT at the front of the list to track
+ coinPtTStart = coinPtTStart->span()->ptT();
+ coinPtTEnd = coinPtTEnd->span()->ptT();
+ oppPtTStart = oppPtTStart->span()->ptT();
+ oppPtTEnd = oppPtTEnd->span()->ptT();
+ SkOPASSERT(coinPtTStart->fT < coinPtTEnd->fT);
+ SkOPASSERT(oppPtTStart->fT != oppPtTEnd->fT);
+ SkOPASSERT(!coinPtTStart->deleted());
+ SkOPASSERT(!coinPtTEnd->deleted());
+ SkOPASSERT(!oppPtTStart->deleted());
+ SkOPASSERT(!oppPtTEnd->deleted());
+ DebugCheckAdd(fHead, coinPtTStart, coinPtTEnd, oppPtTStart, oppPtTEnd);
+ DebugCheckAdd(fTop, coinPtTStart, coinPtTEnd, oppPtTStart, oppPtTEnd);
+ SkCoincidentSpans* coinRec = this->globalState()->allocator()->make<SkCoincidentSpans>();
+ coinRec->init(SkDEBUGCODE(fGlobalState));
+ coinRec->set(this->fHead, coinPtTStart, coinPtTEnd, oppPtTStart, oppPtTEnd);
+ fHead = coinRec;
+}
+
+// description below
+bool SkOpCoincidence::addEndMovedSpans(const SkOpSpan* base, const SkOpSpanBase* testSpan) {
+ const SkOpPtT* testPtT = testSpan->ptT();
+ const SkOpPtT* stopPtT = testPtT;
+ const SkOpSegment* baseSeg = base->segment();
+ int escapeHatch = 100000; // this is 100 times larger than the debugLoopLimit test
+ while ((testPtT = testPtT->next()) != stopPtT) {
+ if (--escapeHatch <= 0) {
+ return false; // if triggered (likely by a fuzz-generated test) too complex to succeed
+ }
+ const SkOpSegment* testSeg = testPtT->segment();
+ if (testPtT->deleted()) {
+ continue;
+ }
+ if (testSeg == baseSeg) {
+ continue;
+ }
+ if (testPtT->span()->ptT() != testPtT) {
+ continue;
+ }
+ if (this->contains(baseSeg, testSeg, testPtT->fT)) {
+ continue;
+ }
+ // intersect perp with base->ptT() with testPtT->segment()
+ SkDVector dxdy = baseSeg->dSlopeAtT(base->t());
+ const SkPoint& pt = base->pt();
+ SkDLine ray = {{{pt.fX, pt.fY}, {pt.fX + dxdy.fY, pt.fY - dxdy.fX}}};
+ SkIntersections i SkDEBUGCODE((this->globalState()));
+ (*CurveIntersectRay[testSeg->verb()])(testSeg->pts(), testSeg->weight(), ray, &i);
+ for (int index = 0; index < i.used(); ++index) {
+ double t = i[0][index];
+ if (!between(0, t, 1)) {
+ continue;
+ }
+ SkDPoint oppPt = i.pt(index);
+ if (!oppPt.approximatelyEqual(pt)) {
+ continue;
+ }
+ SkOpSegment* writableSeg = const_cast<SkOpSegment*>(testSeg);
+ SkOpPtT* oppStart = writableSeg->addT(t);
+ if (oppStart == testPtT) {
+ continue;
+ }
+ SkOpSpan* writableBase = const_cast<SkOpSpan*>(base);
+ oppStart->span()->addOpp(writableBase);
+ if (oppStart->deleted()) {
+ continue;
+ }
+ SkOpSegment* coinSeg = base->segment();
+ SkOpSegment* oppSeg = oppStart->segment();
+ double coinTs, coinTe, oppTs, oppTe;
+ if (Ordered(coinSeg, oppSeg)) {
+ coinTs = base->t();
+ coinTe = testSpan->t();
+ oppTs = oppStart->fT;
+ oppTe = testPtT->fT;
+ } else {
+ using std::swap;
+ swap(coinSeg, oppSeg);
+ coinTs = oppStart->fT;
+ coinTe = testPtT->fT;
+ oppTs = base->t();
+ oppTe = testSpan->t();
+ }
+ if (coinTs > coinTe) {
+ using std::swap;
+ swap(coinTs, coinTe);
+ swap(oppTs, oppTe);
+ }
+ bool added;
+ FAIL_IF(!this->addOrOverlap(coinSeg, oppSeg, coinTs, coinTe, oppTs, oppTe, &added));
+ }
+ }
+ return true;
+}
+
+// description below
+bool SkOpCoincidence::addEndMovedSpans(const SkOpPtT* ptT) {
+ FAIL_IF(!ptT->span()->upCastable());
+ const SkOpSpan* base = ptT->span()->upCast();
+ const SkOpSpan* prev = base->prev();
+ FAIL_IF(!prev);
+ if (!prev->isCanceled()) {
+ if (!this->addEndMovedSpans(base, base->prev())) {
+ return false;
+ }
+ }
+ if (!base->isCanceled()) {
+ if (!this->addEndMovedSpans(base, base->next())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/* If A is coincident with B and B includes an endpoint, and A's matching point
+ is not the endpoint (i.e., there's an implied line connecting B-end and A)
+ then assume that the same implied line may intersect another curve close to B.
+ Since we only care about coincidence that was undetected, look at the
+ ptT list on B-segment adjacent to the B-end/A ptT loop (not in the loop, but
+ next door) and see if the A matching point is close enough to form another
+ coincident pair. If so, check for a new coincident span between B-end/A ptT loop
+ and the adjacent ptT loop.
+*/
+bool SkOpCoincidence::addEndMovedSpans(DEBUG_COIN_DECLARE_ONLY_PARAMS()) {
+ DEBUG_SET_PHASE();
+ SkCoincidentSpans* span = fHead;
+ if (!span) {
+ return true;
+ }
+ fTop = span;
+ fHead = nullptr;
+ do {
+ if (span->coinPtTStart()->fPt != span->oppPtTStart()->fPt) {
+ FAIL_IF(1 == span->coinPtTStart()->fT);
+ bool onEnd = span->coinPtTStart()->fT == 0;
+ bool oOnEnd = zero_or_one(span->oppPtTStart()->fT);
+ if (onEnd) {
+ if (!oOnEnd) { // if both are on end, any nearby intersect was already found
+ if (!this->addEndMovedSpans(span->oppPtTStart())) {
+ return false;
+ }
+ }
+ } else if (oOnEnd) {
+ if (!this->addEndMovedSpans(span->coinPtTStart())) {
+ return false;
+ }
+ }
+ }
+ if (span->coinPtTEnd()->fPt != span->oppPtTEnd()->fPt) {
+ bool onEnd = span->coinPtTEnd()->fT == 1;
+ bool oOnEnd = zero_or_one(span->oppPtTEnd()->fT);
+ if (onEnd) {
+ if (!oOnEnd) {
+ if (!this->addEndMovedSpans(span->oppPtTEnd())) {
+ return false;
+ }
+ }
+ } else if (oOnEnd) {
+ if (!this->addEndMovedSpans(span->coinPtTEnd())) {
+ return false;
+ }
+ }
+ }
+ } while ((span = span->next()));
+ this->restoreHead();
+ return true;
+}
+
+/* Please keep this in sync with debugAddExpanded */
+// for each coincident pair, match the spans
+// if the spans don't match, add the missing pt to the segment and loop it in the opposite span
+bool SkOpCoincidence::addExpanded(DEBUG_COIN_DECLARE_ONLY_PARAMS()) {
+ DEBUG_SET_PHASE();
+ SkCoincidentSpans* coin = this->fHead;
+ if (!coin) {
+ return true;
+ }
+ do {
+ const SkOpPtT* startPtT = coin->coinPtTStart();
+ const SkOpPtT* oStartPtT = coin->oppPtTStart();
+ double priorT = startPtT->fT;
+ double oPriorT = oStartPtT->fT;
+ FAIL_IF(!startPtT->contains(oStartPtT));
+ SkOPASSERT(coin->coinPtTEnd()->contains(coin->oppPtTEnd()));
+ const SkOpSpanBase* start = startPtT->span();
+ const SkOpSpanBase* oStart = oStartPtT->span();
+ const SkOpSpanBase* end = coin->coinPtTEnd()->span();
+ const SkOpSpanBase* oEnd = coin->oppPtTEnd()->span();
+ FAIL_IF(oEnd->deleted());
+ FAIL_IF(!start->upCastable());
+ const SkOpSpanBase* test = start->upCast()->next();
+ FAIL_IF(!coin->flipped() && !oStart->upCastable());
+ const SkOpSpanBase* oTest = coin->flipped() ? oStart->prev() : oStart->upCast()->next();
+ FAIL_IF(!oTest);
+ SkOpSegment* seg = start->segment();
+ SkOpSegment* oSeg = oStart->segment();
+ while (test != end || oTest != oEnd) {
+ const SkOpPtT* containedOpp = test->ptT()->contains(oSeg);
+ const SkOpPtT* containedThis = oTest->ptT()->contains(seg);
+ if (!containedOpp || !containedThis) {
+ // choose the ends, or the first common pt-t list shared by both
+ double nextT, oNextT;
+ if (containedOpp) {
+ nextT = test->t();
+ oNextT = containedOpp->fT;
+ } else if (containedThis) {
+ nextT = containedThis->fT;
+ oNextT = oTest->t();
+ } else {
+ // iterate through until a pt-t list found that contains the other
+ const SkOpSpanBase* walk = test;
+ const SkOpPtT* walkOpp;
+ do {
+ FAIL_IF(!walk->upCastable());
+ walk = walk->upCast()->next();
+ } while (!(walkOpp = walk->ptT()->contains(oSeg))
+ && walk != coin->coinPtTEnd()->span());
+ FAIL_IF(!walkOpp);
+ nextT = walk->t();
+ oNextT = walkOpp->fT;
+ }
+ // use t ranges to guess which one is missing
+ double startRange = nextT - priorT;
+ FAIL_IF(!startRange);
+ double startPart = (test->t() - priorT) / startRange;
+ double oStartRange = oNextT - oPriorT;
+ FAIL_IF(!oStartRange);
+ double oStartPart = (oTest->t() - oPriorT) / oStartRange;
+ FAIL_IF(startPart == oStartPart);
+ bool addToOpp = !containedOpp && !containedThis ? startPart < oStartPart
+ : !!containedThis;
+ bool startOver = false;
+ bool success = addToOpp ? oSeg->addExpanded(
+ oPriorT + oStartRange * startPart, test, &startOver)
+ : seg->addExpanded(
+ priorT + startRange * oStartPart, oTest, &startOver);
+ FAIL_IF(!success);
+ if (startOver) {
+ test = start;
+ oTest = oStart;
+ }
+ end = coin->coinPtTEnd()->span();
+ oEnd = coin->oppPtTEnd()->span();
+ }
+ if (test != end) {
+ FAIL_IF(!test->upCastable());
+ priorT = test->t();
+ test = test->upCast()->next();
+ }
+ if (oTest != oEnd) {
+ oPriorT = oTest->t();
+ if (coin->flipped()) {
+ oTest = oTest->prev();
+ } else {
+ FAIL_IF(!oTest->upCastable());
+ oTest = oTest->upCast()->next();
+ }
+ FAIL_IF(!oTest);
+ }
+
+ }
+ } while ((coin = coin->next()));
+ return true;
+}
+
+// given a t span, map the same range on the coincident span
+/*
+the curves may not scale linearly, so interpolation may only happen within known points
+remap over1s, over1e, cointPtTStart, coinPtTEnd to smallest range that captures over1s
+then repeat to capture over1e
+*/
+double SkOpCoincidence::TRange(const SkOpPtT* overS, double t,
+ const SkOpSegment* coinSeg SkDEBUGPARAMS(const SkOpPtT* overE)) {
+ const SkOpSpanBase* work = overS->span();
+ const SkOpPtT* foundStart = nullptr;
+ const SkOpPtT* foundEnd = nullptr;
+ const SkOpPtT* coinStart = nullptr;
+ const SkOpPtT* coinEnd = nullptr;
+ do {
+ const SkOpPtT* contained = work->contains(coinSeg);
+ if (!contained) {
+ if (work->final()) {
+ break;
+ }
+ continue;
+ }
+ if (work->t() <= t) {
+ coinStart = contained;
+ foundStart = work->ptT();
+ }
+ if (work->t() >= t) {
+ coinEnd = contained;
+ foundEnd = work->ptT();
+ break;
+ }
+ SkASSERT(work->ptT() != overE);
+ } while ((work = work->upCast()->next()));
+ if (!coinStart || !coinEnd) {
+ return 1;
+ }
+ // while overS->fT <=t and overS contains coinSeg
+ double denom = foundEnd->fT - foundStart->fT;
+ double sRatio = denom ? (t - foundStart->fT) / denom : 1;
+ return coinStart->fT + (coinEnd->fT - coinStart->fT) * sRatio;
+}
+
+// return true if span overlaps existing and needs to adjust the coincident list
+bool SkOpCoincidence::checkOverlap(SkCoincidentSpans* check,
+ const SkOpSegment* coinSeg, const SkOpSegment* oppSeg,
+ double coinTs, double coinTe, double oppTs, double oppTe,
+ SkTDArray<SkCoincidentSpans*>* overlaps) const {
+ if (!Ordered(coinSeg, oppSeg)) {
+ if (oppTs < oppTe) {
+ return this->checkOverlap(check, oppSeg, coinSeg, oppTs, oppTe, coinTs, coinTe,
+ overlaps);
+ }
+ return this->checkOverlap(check, oppSeg, coinSeg, oppTe, oppTs, coinTe, coinTs, overlaps);
+ }
+ bool swapOpp = oppTs > oppTe;
+ if (swapOpp) {
+ using std::swap;
+ swap(oppTs, oppTe);
+ }
+ do {
+ if (check->coinPtTStart()->segment() != coinSeg) {
+ continue;
+ }
+ if (check->oppPtTStart()->segment() != oppSeg) {
+ continue;
+ }
+ double checkTs = check->coinPtTStart()->fT;
+ double checkTe = check->coinPtTEnd()->fT;
+ bool coinOutside = coinTe < checkTs || coinTs > checkTe;
+ double oCheckTs = check->oppPtTStart()->fT;
+ double oCheckTe = check->oppPtTEnd()->fT;
+ if (swapOpp) {
+ if (oCheckTs <= oCheckTe) {
+ return false;
+ }
+ using std::swap;
+ swap(oCheckTs, oCheckTe);
+ }
+ bool oppOutside = oppTe < oCheckTs || oppTs > oCheckTe;
+ if (coinOutside && oppOutside) {
+ continue;
+ }
+ bool coinInside = coinTe <= checkTe && coinTs >= checkTs;
+ bool oppInside = oppTe <= oCheckTe && oppTs >= oCheckTs;
+ if (coinInside && oppInside) { // already included, do nothing
+ return false;
+ }
+ *overlaps->append() = check; // partial overlap, extend existing entry
+ } while ((check = check->next()));
+ return true;
+}
+
+/* Please keep this in sync with debugAddIfMissing() */
+// note that over1s, over1e, over2s, over2e are ordered
+bool SkOpCoincidence::addIfMissing(const SkOpPtT* over1s, const SkOpPtT* over2s,
+ double tStart, double tEnd, SkOpSegment* coinSeg, SkOpSegment* oppSeg, bool* added
+ SkDEBUGPARAMS(const SkOpPtT* over1e) SkDEBUGPARAMS(const SkOpPtT* over2e)) {
+ SkASSERT(tStart < tEnd);
+ SkASSERT(over1s->fT < over1e->fT);
+ SkASSERT(between(over1s->fT, tStart, over1e->fT));
+ SkASSERT(between(over1s->fT, tEnd, over1e->fT));
+ SkASSERT(over2s->fT < over2e->fT);
+ SkASSERT(between(over2s->fT, tStart, over2e->fT));
+ SkASSERT(between(over2s->fT, tEnd, over2e->fT));
+ SkASSERT(over1s->segment() == over1e->segment());
+ SkASSERT(over2s->segment() == over2e->segment());
+ SkASSERT(over1s->segment() == over2s->segment());
+ SkASSERT(over1s->segment() != coinSeg);
+ SkASSERT(over1s->segment() != oppSeg);
+ SkASSERT(coinSeg != oppSeg);
+ double coinTs, coinTe, oppTs, oppTe;
+ coinTs = TRange(over1s, tStart, coinSeg SkDEBUGPARAMS(over1e));
+ coinTe = TRange(over1s, tEnd, coinSeg SkDEBUGPARAMS(over1e));
+ SkOpSpanBase::Collapsed result = coinSeg->collapsed(coinTs, coinTe);
+ if (SkOpSpanBase::Collapsed::kNo != result) {
+ return SkOpSpanBase::Collapsed::kYes == result;
+ }
+ oppTs = TRange(over2s, tStart, oppSeg SkDEBUGPARAMS(over2e));
+ oppTe = TRange(over2s, tEnd, oppSeg SkDEBUGPARAMS(over2e));
+ result = oppSeg->collapsed(oppTs, oppTe);
+ if (SkOpSpanBase::Collapsed::kNo != result) {
+ return SkOpSpanBase::Collapsed::kYes == result;
+ }
+ if (coinTs > coinTe) {
+ using std::swap;
+ swap(coinTs, coinTe);
+ swap(oppTs, oppTe);
+ }
+ (void) this->addOrOverlap(coinSeg, oppSeg, coinTs, coinTe, oppTs, oppTe, added);
+ return true;
+}
+
+/* Please keep this in sync with debugAddOrOverlap() */
+// If this is called by addEndMovedSpans(), a returned false propogates out to an abort.
+// If this is called by AddIfMissing(), a returned false indicates there was nothing to add
+bool SkOpCoincidence::addOrOverlap(SkOpSegment* coinSeg, SkOpSegment* oppSeg,
+ double coinTs, double coinTe, double oppTs, double oppTe, bool* added) {
+ SkTDArray<SkCoincidentSpans*> overlaps;
+ FAIL_IF(!fTop);
+ if (!this->checkOverlap(fTop, coinSeg, oppSeg, coinTs, coinTe, oppTs, oppTe, &overlaps)) {
+ return true;
+ }
+ if (fHead && !this->checkOverlap(fHead, coinSeg, oppSeg, coinTs,
+ coinTe, oppTs, oppTe, &overlaps)) {
+ return true;
+ }
+ SkCoincidentSpans* overlap = !overlaps.empty() ? overlaps[0] : nullptr;
+ for (int index = 1; index < overlaps.size(); ++index) { // combine overlaps before continuing
+ SkCoincidentSpans* test = overlaps[index];
+ if (overlap->coinPtTStart()->fT > test->coinPtTStart()->fT) {
+ overlap->setCoinPtTStart(test->coinPtTStart());
+ }
+ if (overlap->coinPtTEnd()->fT < test->coinPtTEnd()->fT) {
+ overlap->setCoinPtTEnd(test->coinPtTEnd());
+ }
+ if (overlap->flipped()
+ ? overlap->oppPtTStart()->fT < test->oppPtTStart()->fT
+ : overlap->oppPtTStart()->fT > test->oppPtTStart()->fT) {
+ overlap->setOppPtTStart(test->oppPtTStart());
+ }
+ if (overlap->flipped()
+ ? overlap->oppPtTEnd()->fT > test->oppPtTEnd()->fT
+ : overlap->oppPtTEnd()->fT < test->oppPtTEnd()->fT) {
+ overlap->setOppPtTEnd(test->oppPtTEnd());
+ }
+ if (!fHead || !this->release(fHead, test)) {
+ SkAssertResult(this->release(fTop, test));
+ }
+ }
+ const SkOpPtT* cs = coinSeg->existing(coinTs, oppSeg);
+ const SkOpPtT* ce = coinSeg->existing(coinTe, oppSeg);
+ if (overlap && cs && ce && overlap->contains(cs, ce)) {
+ return true;
+ }
+ FAIL_IF(cs == ce && cs);
+ const SkOpPtT* os = oppSeg->existing(oppTs, coinSeg);
+ const SkOpPtT* oe = oppSeg->existing(oppTe, coinSeg);
+ if (overlap && os && oe && overlap->contains(os, oe)) {
+ return true;
+ }
+ FAIL_IF(cs && cs->deleted());
+ FAIL_IF(os && os->deleted());
+ FAIL_IF(ce && ce->deleted());
+ FAIL_IF(oe && oe->deleted());
+ const SkOpPtT* csExisting = !cs ? coinSeg->existing(coinTs, nullptr) : nullptr;
+ const SkOpPtT* ceExisting = !ce ? coinSeg->existing(coinTe, nullptr) : nullptr;
+ FAIL_IF(csExisting && csExisting == ceExisting);
+// FAIL_IF(csExisting && (csExisting == ce ||
+// csExisting->contains(ceExisting ? ceExisting : ce)));
+ FAIL_IF(ceExisting && (ceExisting == cs ||
+ ceExisting->contains(csExisting ? csExisting : cs)));
+ const SkOpPtT* osExisting = !os ? oppSeg->existing(oppTs, nullptr) : nullptr;
+ const SkOpPtT* oeExisting = !oe ? oppSeg->existing(oppTe, nullptr) : nullptr;
+ FAIL_IF(osExisting && osExisting == oeExisting);
+ FAIL_IF(osExisting && (osExisting == oe ||
+ osExisting->contains(oeExisting ? oeExisting : oe)));
+ FAIL_IF(oeExisting && (oeExisting == os ||
+ oeExisting->contains(osExisting ? osExisting : os)));
+ // extra line in debug code
+ this->debugValidate();
+ if (!cs || !os) {
+ SkOpPtT* csWritable = cs ? const_cast<SkOpPtT*>(cs)
+ : coinSeg->addT(coinTs);
+ if (csWritable == ce) {
+ return true;
+ }
+ SkOpPtT* osWritable = os ? const_cast<SkOpPtT*>(os)
+ : oppSeg->addT(oppTs);
+ FAIL_IF(!csWritable || !osWritable);
+ csWritable->span()->addOpp(osWritable->span());
+ cs = csWritable;
+ os = osWritable->active();
+ FAIL_IF(!os);
+ FAIL_IF((ce && ce->deleted()) || (oe && oe->deleted()));
+ }
+ if (!ce || !oe) {
+ SkOpPtT* ceWritable = ce ? const_cast<SkOpPtT*>(ce)
+ : coinSeg->addT(coinTe);
+ SkOpPtT* oeWritable = oe ? const_cast<SkOpPtT*>(oe)
+ : oppSeg->addT(oppTe);
+ FAIL_IF(!ceWritable->span()->addOpp(oeWritable->span()));
+ ce = ceWritable;
+ oe = oeWritable;
+ }
+ this->debugValidate();
+ FAIL_IF(cs->deleted());
+ FAIL_IF(os->deleted());
+ FAIL_IF(ce->deleted());
+ FAIL_IF(oe->deleted());
+ FAIL_IF(cs->contains(ce) || os->contains(oe));
+ bool result = true;
+ if (overlap) {
+ if (overlap->coinPtTStart()->segment() == coinSeg) {
+ result = overlap->extend(cs, ce, os, oe);
+ } else {
+ if (os->fT > oe->fT) {
+ using std::swap;
+ swap(cs, ce);
+ swap(os, oe);
+ }
+ result = overlap->extend(os, oe, cs, ce);
+ }
+#if DEBUG_COINCIDENCE_VERBOSE
+ if (result) {
+ overlaps[0]->debugShow();
+ }
+#endif
+ } else {
+ this->add(cs, ce, os, oe);
+#if DEBUG_COINCIDENCE_VERBOSE
+ fHead->debugShow();
+#endif
+ }
+ this->debugValidate();
+ if (result) {
+ *added = true;
+ }
+ return true;
+}
+
+// Please keep this in sync with debugAddMissing()
+/* detects overlaps of different coincident runs on same segment */
+/* does not detect overlaps for pairs without any segments in common */
+// returns true if caller should loop again
+bool SkOpCoincidence::addMissing(bool* added DEBUG_COIN_DECLARE_PARAMS()) {
+ SkCoincidentSpans* outer = fHead;
+ *added = false;
+ if (!outer) {
+ return true;
+ }
+ fTop = outer;
+ fHead = nullptr;
+ do {
+ // addifmissing can modify the list that this is walking
+ // save head so that walker can iterate over old data unperturbed
+ // addifmissing adds to head freely then add saved head in the end
+ const SkOpPtT* ocs = outer->coinPtTStart();
+ FAIL_IF(ocs->deleted());
+ const SkOpSegment* outerCoin = ocs->segment();
+ FAIL_IF(outerCoin->done());
+ const SkOpPtT* oos = outer->oppPtTStart();
+ if (oos->deleted()) {
+ return true;
+ }
+ const SkOpSegment* outerOpp = oos->segment();
+ SkOPASSERT(!outerOpp->done());
+ SkOpSegment* outerCoinWritable = const_cast<SkOpSegment*>(outerCoin);
+ SkOpSegment* outerOppWritable = const_cast<SkOpSegment*>(outerOpp);
+ SkCoincidentSpans* inner = outer;
+#ifdef SK_BUILD_FOR_FUZZER
+ int safetyNet = 1000;
+#endif
+ while ((inner = inner->next())) {
+#ifdef SK_BUILD_FOR_FUZZER
+ if (!--safetyNet) {
+ return false;
+ }
+#endif
+ this->debugValidate();
+ double overS, overE;
+ const SkOpPtT* ics = inner->coinPtTStart();
+ FAIL_IF(ics->deleted());
+ const SkOpSegment* innerCoin = ics->segment();
+ FAIL_IF(innerCoin->done());
+ const SkOpPtT* ios = inner->oppPtTStart();
+ FAIL_IF(ios->deleted());
+ const SkOpSegment* innerOpp = ios->segment();
+ SkOPASSERT(!innerOpp->done());
+ SkOpSegment* innerCoinWritable = const_cast<SkOpSegment*>(innerCoin);
+ SkOpSegment* innerOppWritable = const_cast<SkOpSegment*>(innerOpp);
+ if (outerCoin == innerCoin) {
+ const SkOpPtT* oce = outer->coinPtTEnd();
+ if (oce->deleted()) {
+ return true;
+ }
+ const SkOpPtT* ice = inner->coinPtTEnd();
+ FAIL_IF(ice->deleted());
+ if (outerOpp != innerOpp && this->overlap(ocs, oce, ics, ice, &overS, &overE)) {
+ FAIL_IF(!this->addIfMissing(ocs->starter(oce), ics->starter(ice),
+ overS, overE, outerOppWritable, innerOppWritable, added
+ SkDEBUGPARAMS(ocs->debugEnder(oce))
+ SkDEBUGPARAMS(ics->debugEnder(ice))));
+ }
+ } else if (outerCoin == innerOpp) {
+ const SkOpPtT* oce = outer->coinPtTEnd();
+ FAIL_IF(oce->deleted());
+ const SkOpPtT* ioe = inner->oppPtTEnd();
+ FAIL_IF(ioe->deleted());
+ if (outerOpp != innerCoin && this->overlap(ocs, oce, ios, ioe, &overS, &overE)) {
+ FAIL_IF(!this->addIfMissing(ocs->starter(oce), ios->starter(ioe),
+ overS, overE, outerOppWritable, innerCoinWritable, added
+ SkDEBUGPARAMS(ocs->debugEnder(oce))
+ SkDEBUGPARAMS(ios->debugEnder(ioe))));
+ }
+ } else if (outerOpp == innerCoin) {
+ const SkOpPtT* ooe = outer->oppPtTEnd();
+ FAIL_IF(ooe->deleted());
+ const SkOpPtT* ice = inner->coinPtTEnd();
+ FAIL_IF(ice->deleted());
+ SkASSERT(outerCoin != innerOpp);
+ if (this->overlap(oos, ooe, ics, ice, &overS, &overE)) {
+ FAIL_IF(!this->addIfMissing(oos->starter(ooe), ics->starter(ice),
+ overS, overE, outerCoinWritable, innerOppWritable, added
+ SkDEBUGPARAMS(oos->debugEnder(ooe))
+ SkDEBUGPARAMS(ics->debugEnder(ice))));
+ }
+ } else if (outerOpp == innerOpp) {
+ const SkOpPtT* ooe = outer->oppPtTEnd();
+ FAIL_IF(ooe->deleted());
+ const SkOpPtT* ioe = inner->oppPtTEnd();
+ if (ioe->deleted()) {
+ return true;
+ }
+ SkASSERT(outerCoin != innerCoin);
+ if (this->overlap(oos, ooe, ios, ioe, &overS, &overE)) {
+ FAIL_IF(!this->addIfMissing(oos->starter(ooe), ios->starter(ioe),
+ overS, overE, outerCoinWritable, innerCoinWritable, added
+ SkDEBUGPARAMS(oos->debugEnder(ooe))
+ SkDEBUGPARAMS(ios->debugEnder(ioe))));
+ }
+ }
+ this->debugValidate();
+ }
+ } while ((outer = outer->next()));
+ this->restoreHead();
+ return true;
+}
+
+bool SkOpCoincidence::addOverlap(const SkOpSegment* seg1, const SkOpSegment* seg1o,
+ const SkOpSegment* seg2, const SkOpSegment* seg2o,
+ const SkOpPtT* overS, const SkOpPtT* overE) {
+ const SkOpPtT* s1 = overS->find(seg1);
+ const SkOpPtT* e1 = overE->find(seg1);
+ FAIL_IF(!s1);
+ FAIL_IF(!e1);
+ if (!s1->starter(e1)->span()->upCast()->windValue()) {
+ s1 = overS->find(seg1o);
+ e1 = overE->find(seg1o);
+ FAIL_IF(!s1);
+ FAIL_IF(!e1);
+ if (!s1->starter(e1)->span()->upCast()->windValue()) {
+ return true;
+ }
+ }
+ const SkOpPtT* s2 = overS->find(seg2);
+ const SkOpPtT* e2 = overE->find(seg2);
+ FAIL_IF(!s2);
+ FAIL_IF(!e2);
+ if (!s2->starter(e2)->span()->upCast()->windValue()) {
+ s2 = overS->find(seg2o);
+ e2 = overE->find(seg2o);
+ FAIL_IF(!s2);
+ FAIL_IF(!e2);
+ if (!s2->starter(e2)->span()->upCast()->windValue()) {
+ return true;
+ }
+ }
+ if (s1->segment() == s2->segment()) {
+ return true;
+ }
+ if (s1->fT > e1->fT) {
+ using std::swap;
+ swap(s1, e1);
+ swap(s2, e2);
+ }
+ this->add(s1, e1, s2, e2);
+ return true;
+}
+
+bool SkOpCoincidence::contains(const SkOpSegment* seg, const SkOpSegment* opp, double oppT) const {
+ if (this->contains(fHead, seg, opp, oppT)) {
+ return true;
+ }
+ if (this->contains(fTop, seg, opp, oppT)) {
+ return true;
+ }
+ return false;
+}
+
+bool SkOpCoincidence::contains(const SkCoincidentSpans* coin, const SkOpSegment* seg,
+ const SkOpSegment* opp, double oppT) const {
+ if (!coin) {
+ return false;
+ }
+ do {
+ if (coin->coinPtTStart()->segment() == seg && coin->oppPtTStart()->segment() == opp
+ && between(coin->oppPtTStart()->fT, oppT, coin->oppPtTEnd()->fT)) {
+ return true;
+ }
+ if (coin->oppPtTStart()->segment() == seg && coin->coinPtTStart()->segment() == opp
+ && between(coin->coinPtTStart()->fT, oppT, coin->coinPtTEnd()->fT)) {
+ return true;
+ }
+ } while ((coin = coin->next()));
+ return false;
+}
+
+bool SkOpCoincidence::contains(const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd,
+ const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd) const {
+ const SkCoincidentSpans* test = fHead;
+ if (!test) {
+ return false;
+ }
+ const SkOpSegment* coinSeg = coinPtTStart->segment();
+ const SkOpSegment* oppSeg = oppPtTStart->segment();
+ if (!Ordered(coinPtTStart, oppPtTStart)) {
+ using std::swap;
+ swap(coinSeg, oppSeg);
+ swap(coinPtTStart, oppPtTStart);
+ swap(coinPtTEnd, oppPtTEnd);
+ if (coinPtTStart->fT > coinPtTEnd->fT) {
+ swap(coinPtTStart, coinPtTEnd);
+ swap(oppPtTStart, oppPtTEnd);
+ }
+ }
+ double oppMinT = std::min(oppPtTStart->fT, oppPtTEnd->fT);
+ double oppMaxT = std::max(oppPtTStart->fT, oppPtTEnd->fT);
+ do {
+ if (coinSeg != test->coinPtTStart()->segment()) {
+ continue;
+ }
+ if (coinPtTStart->fT < test->coinPtTStart()->fT) {
+ continue;
+ }
+ if (coinPtTEnd->fT > test->coinPtTEnd()->fT) {
+ continue;
+ }
+ if (oppSeg != test->oppPtTStart()->segment()) {
+ continue;
+ }
+ if (oppMinT < std::min(test->oppPtTStart()->fT, test->oppPtTEnd()->fT)) {
+ continue;
+ }
+ if (oppMaxT > std::max(test->oppPtTStart()->fT, test->oppPtTEnd()->fT)) {
+ continue;
+ }
+ return true;
+ } while ((test = test->next()));
+ return false;
+}
+
+void SkOpCoincidence::correctEnds(DEBUG_COIN_DECLARE_ONLY_PARAMS()) {
+ DEBUG_SET_PHASE();
+ SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return;
+ }
+ do {
+ coin->correctEnds();
+ } while ((coin = coin->next()));
+}
+
+// walk span sets in parallel, moving winding from one to the other
+bool SkOpCoincidence::apply(DEBUG_COIN_DECLARE_ONLY_PARAMS()) {
+ DEBUG_SET_PHASE();
+ SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return true;
+ }
+ do {
+ SkOpSpanBase* startSpan = coin->coinPtTStartWritable()->span();
+ FAIL_IF(!startSpan->upCastable());
+ SkOpSpan* start = startSpan->upCast();
+ if (start->deleted()) {
+ continue;
+ }
+ const SkOpSpanBase* end = coin->coinPtTEnd()->span();
+ FAIL_IF(start != start->starter(end));
+ bool flipped = coin->flipped();
+ SkOpSpanBase* oStartBase = (flipped ? coin->oppPtTEndWritable()
+ : coin->oppPtTStartWritable())->span();
+ FAIL_IF(!oStartBase->upCastable());
+ SkOpSpan* oStart = oStartBase->upCast();
+ if (oStart->deleted()) {
+ continue;
+ }
+ const SkOpSpanBase* oEnd = (flipped ? coin->oppPtTStart() : coin->oppPtTEnd())->span();
+ SkASSERT(oStart == oStart->starter(oEnd));
+ SkOpSegment* segment = start->segment();
+ SkOpSegment* oSegment = oStart->segment();
+ bool operandSwap = segment->operand() != oSegment->operand();
+ if (flipped) {
+ if (oEnd->deleted()) {
+ continue;
+ }
+ do {
+ SkOpSpanBase* oNext = oStart->next();
+ if (oNext == oEnd) {
+ break;
+ }
+ FAIL_IF(!oNext->upCastable());
+ oStart = oNext->upCast();
+ } while (true);
+ }
+ do {
+ int windValue = start->windValue();
+ int oppValue = start->oppValue();
+ int oWindValue = oStart->windValue();
+ int oOppValue = oStart->oppValue();
+ // winding values are added or subtracted depending on direction and wind type
+ // same or opposite values are summed depending on the operand value
+ int windDiff = operandSwap ? oOppValue : oWindValue;
+ int oWindDiff = operandSwap ? oppValue : windValue;
+ if (!flipped) {
+ windDiff = -windDiff;
+ oWindDiff = -oWindDiff;
+ }
+ bool addToStart = windValue && (windValue > windDiff || (windValue == windDiff
+ && oWindValue <= oWindDiff));
+ if (addToStart ? start->done() : oStart->done()) {
+ addToStart ^= true;
+ }
+ if (addToStart) {
+ if (operandSwap) {
+ using std::swap;
+ swap(oWindValue, oOppValue);
+ }
+ if (flipped) {
+ windValue -= oWindValue;
+ oppValue -= oOppValue;
+ } else {
+ windValue += oWindValue;
+ oppValue += oOppValue;
+ }
+ if (segment->isXor()) {
+ windValue &= 1;
+ }
+ if (segment->oppXor()) {
+ oppValue &= 1;
+ }
+ oWindValue = oOppValue = 0;
+ } else {
+ if (operandSwap) {
+ using std::swap;
+ swap(windValue, oppValue);
+ }
+ if (flipped) {
+ oWindValue -= windValue;
+ oOppValue -= oppValue;
+ } else {
+ oWindValue += windValue;
+ oOppValue += oppValue;
+ }
+ if (oSegment->isXor()) {
+ oWindValue &= 1;
+ }
+ if (oSegment->oppXor()) {
+ oOppValue &= 1;
+ }
+ windValue = oppValue = 0;
+ }
+#if 0 && DEBUG_COINCIDENCE
+ SkDebugf("seg=%d span=%d windValue=%d oppValue=%d\n", segment->debugID(),
+ start->debugID(), windValue, oppValue);
+ SkDebugf("seg=%d span=%d windValue=%d oppValue=%d\n", oSegment->debugID(),
+ oStart->debugID(), oWindValue, oOppValue);
+#endif
+ FAIL_IF(windValue <= -1);
+ start->setWindValue(windValue);
+ start->setOppValue(oppValue);
+ FAIL_IF(oWindValue <= -1);
+ oStart->setWindValue(oWindValue);
+ oStart->setOppValue(oOppValue);
+ if (!windValue && !oppValue) {
+ segment->markDone(start);
+ }
+ if (!oWindValue && !oOppValue) {
+ oSegment->markDone(oStart);
+ }
+ SkOpSpanBase* next = start->next();
+ SkOpSpanBase* oNext = flipped ? oStart->prev() : oStart->next();
+ if (next == end) {
+ break;
+ }
+ FAIL_IF(!next->upCastable());
+ start = next->upCast();
+ // if the opposite ran out too soon, just reuse the last span
+ if (!oNext || !oNext->upCastable()) {
+ oNext = oStart;
+ }
+ oStart = oNext->upCast();
+ } while (true);
+ } while ((coin = coin->next()));
+ return true;
+}
+
+// Please keep this in sync with debugRelease()
+bool SkOpCoincidence::release(SkCoincidentSpans* coin, SkCoincidentSpans* remove) {
+ SkCoincidentSpans* head = coin;
+ SkCoincidentSpans* prev = nullptr;
+ SkCoincidentSpans* next;
+ do {
+ next = coin->next();
+ if (coin == remove) {
+ if (prev) {
+ prev->setNext(next);
+ } else if (head == fHead) {
+ fHead = next;
+ } else {
+ fTop = next;
+ }
+ break;
+ }
+ prev = coin;
+ } while ((coin = next));
+ return coin != nullptr;
+}
+
+void SkOpCoincidence::releaseDeleted(SkCoincidentSpans* coin) {
+ if (!coin) {
+ return;
+ }
+ SkCoincidentSpans* head = coin;
+ SkCoincidentSpans* prev = nullptr;
+ SkCoincidentSpans* next;
+ do {
+ next = coin->next();
+ if (coin->coinPtTStart()->deleted()) {
+ SkOPASSERT(coin->flipped() ? coin->oppPtTEnd()->deleted() :
+ coin->oppPtTStart()->deleted());
+ if (prev) {
+ prev->setNext(next);
+ } else if (head == fHead) {
+ fHead = next;
+ } else {
+ fTop = next;
+ }
+ } else {
+ SkOPASSERT(coin->flipped() ? !coin->oppPtTEnd()->deleted() :
+ !coin->oppPtTStart()->deleted());
+ prev = coin;
+ }
+ } while ((coin = next));
+}
+
+void SkOpCoincidence::releaseDeleted() {
+ this->releaseDeleted(fHead);
+ this->releaseDeleted(fTop);
+}
+
+void SkOpCoincidence::restoreHead() {
+ SkCoincidentSpans** headPtr = &fHead;
+ while (*headPtr) {
+ headPtr = (*headPtr)->nextPtr();
+ }
+ *headPtr = fTop;
+ fTop = nullptr;
+ // segments may have collapsed in the meantime; remove empty referenced segments
+ headPtr = &fHead;
+ while (*headPtr) {
+ SkCoincidentSpans* test = *headPtr;
+ if (test->coinPtTStart()->segment()->done() || test->oppPtTStart()->segment()->done()) {
+ *headPtr = test->next();
+ continue;
+ }
+ headPtr = (*headPtr)->nextPtr();
+ }
+}
+
+// Please keep this in sync with debugExpand()
+// expand the range by checking adjacent spans for coincidence
+bool SkOpCoincidence::expand(DEBUG_COIN_DECLARE_ONLY_PARAMS()) {
+ DEBUG_SET_PHASE();
+ SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return false;
+ }
+ bool expanded = false;
+ do {
+ if (coin->expand()) {
+ // check to see if multiple spans expanded so they are now identical
+ SkCoincidentSpans* test = fHead;
+ do {
+ if (coin == test) {
+ continue;
+ }
+ if (coin->coinPtTStart() == test->coinPtTStart()
+ && coin->oppPtTStart() == test->oppPtTStart()) {
+ this->release(fHead, test);
+ break;
+ }
+ } while ((test = test->next()));
+ expanded = true;
+ }
+ } while ((coin = coin->next()));
+ return expanded;
+}
+
+bool SkOpCoincidence::findOverlaps(SkOpCoincidence* overlaps DEBUG_COIN_DECLARE_PARAMS()) const {
+ DEBUG_SET_PHASE();
+ overlaps->fHead = overlaps->fTop = nullptr;
+ SkCoincidentSpans* outer = fHead;
+ while (outer) {
+ const SkOpSegment* outerCoin = outer->coinPtTStart()->segment();
+ const SkOpSegment* outerOpp = outer->oppPtTStart()->segment();
+ SkCoincidentSpans* inner = outer;
+ while ((inner = inner->next())) {
+ const SkOpSegment* innerCoin = inner->coinPtTStart()->segment();
+ if (outerCoin == innerCoin) {
+ continue; // both winners are the same segment, so there's no additional overlap
+ }
+ const SkOpSegment* innerOpp = inner->oppPtTStart()->segment();
+ const SkOpPtT* overlapS;
+ const SkOpPtT* overlapE;
+ if ((outerOpp == innerCoin && SkOpPtT::Overlaps(outer->oppPtTStart(),
+ outer->oppPtTEnd(),inner->coinPtTStart(), inner->coinPtTEnd(), &overlapS,
+ &overlapE))
+ || (outerCoin == innerOpp && SkOpPtT::Overlaps(outer->coinPtTStart(),
+ outer->coinPtTEnd(), inner->oppPtTStart(), inner->oppPtTEnd(),
+ &overlapS, &overlapE))
+ || (outerOpp == innerOpp && SkOpPtT::Overlaps(outer->oppPtTStart(),
+ outer->oppPtTEnd(), inner->oppPtTStart(), inner->oppPtTEnd(),
+ &overlapS, &overlapE))) {
+ if (!overlaps->addOverlap(outerCoin, outerOpp, innerCoin, innerOpp,
+ overlapS, overlapE)) {
+ return false;
+ }
+ }
+ }
+ outer = outer->next();
+ }
+ return true;
+}
+
+void SkOpCoincidence::fixUp(SkOpPtT* deleted, const SkOpPtT* kept) {
+ SkOPASSERT(deleted != kept);
+ if (fHead) {
+ this->fixUp(fHead, deleted, kept);
+ }
+ if (fTop) {
+ this->fixUp(fTop, deleted, kept);
+ }
+}
+
+void SkOpCoincidence::fixUp(SkCoincidentSpans* coin, SkOpPtT* deleted, const SkOpPtT* kept) {
+ SkCoincidentSpans* head = coin;
+ do {
+ if (coin->coinPtTStart() == deleted) {
+ if (coin->coinPtTEnd()->span() == kept->span()) {
+ this->release(head, coin);
+ continue;
+ }
+ coin->setCoinPtTStart(kept);
+ }
+ if (coin->coinPtTEnd() == deleted) {
+ if (coin->coinPtTStart()->span() == kept->span()) {
+ this->release(head, coin);
+ continue;
+ }
+ coin->setCoinPtTEnd(kept);
+ }
+ if (coin->oppPtTStart() == deleted) {
+ if (coin->oppPtTEnd()->span() == kept->span()) {
+ this->release(head, coin);
+ continue;
+ }
+ coin->setOppPtTStart(kept);
+ }
+ if (coin->oppPtTEnd() == deleted) {
+ if (coin->oppPtTStart()->span() == kept->span()) {
+ this->release(head, coin);
+ continue;
+ }
+ coin->setOppPtTEnd(kept);
+ }
+ } while ((coin = coin->next()));
+}
+
+// Please keep this in sync with debugMark()
+/* this sets up the coincidence links in the segments when the coincidence crosses multiple spans */
+bool SkOpCoincidence::mark(DEBUG_COIN_DECLARE_ONLY_PARAMS()) {
+ DEBUG_SET_PHASE();
+ SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return true;
+ }
+ do {
+ SkOpSpanBase* startBase = coin->coinPtTStartWritable()->span();
+ FAIL_IF(!startBase->upCastable());
+ SkOpSpan* start = startBase->upCast();
+ FAIL_IF(start->deleted());
+ SkOpSpanBase* end = coin->coinPtTEndWritable()->span();
+ SkOPASSERT(!end->deleted());
+ SkOpSpanBase* oStart = coin->oppPtTStartWritable()->span();
+ SkOPASSERT(!oStart->deleted());
+ SkOpSpanBase* oEnd = coin->oppPtTEndWritable()->span();
+ FAIL_IF(oEnd->deleted());
+ bool flipped = coin->flipped();
+ if (flipped) {
+ using std::swap;
+ swap(oStart, oEnd);
+ }
+ /* coin and opp spans may not match up. Mark the ends, and then let the interior
+ get marked as many times as the spans allow */
+ FAIL_IF(!oStart->upCastable());
+ start->insertCoincidence(oStart->upCast());
+ end->insertCoinEnd(oEnd);
+ const SkOpSegment* segment = start->segment();
+ const SkOpSegment* oSegment = oStart->segment();
+ SkOpSpanBase* next = start;
+ SkOpSpanBase* oNext = oStart;
+ bool ordered;
+ FAIL_IF(!coin->ordered(&ordered));
+ while ((next = next->upCast()->next()) != end) {
+ FAIL_IF(!next->upCastable());
+ FAIL_IF(!next->upCast()->insertCoincidence(oSegment, flipped, ordered));
+ }
+ while ((oNext = oNext->upCast()->next()) != oEnd) {
+ FAIL_IF(!oNext->upCastable());
+ FAIL_IF(!oNext->upCast()->insertCoincidence(segment, flipped, ordered));
+ }
+ } while ((coin = coin->next()));
+ return true;
+}
+
+// Please keep in sync with debugMarkCollapsed()
+void SkOpCoincidence::markCollapsed(SkCoincidentSpans* coin, SkOpPtT* test) {
+ SkCoincidentSpans* head = coin;
+ while (coin) {
+ if (coin->collapsed(test)) {
+ if (zero_or_one(coin->coinPtTStart()->fT) && zero_or_one(coin->coinPtTEnd()->fT)) {
+ coin->coinPtTStartWritable()->segment()->markAllDone();
+ }
+ if (zero_or_one(coin->oppPtTStart()->fT) && zero_or_one(coin->oppPtTEnd()->fT)) {
+ coin->oppPtTStartWritable()->segment()->markAllDone();
+ }
+ this->release(head, coin);
+ }
+ coin = coin->next();
+ }
+}
+
+// Please keep in sync with debugMarkCollapsed()
+void SkOpCoincidence::markCollapsed(SkOpPtT* test) {
+ markCollapsed(fHead, test);
+ markCollapsed(fTop, test);
+}
+
+bool SkOpCoincidence::Ordered(const SkOpSegment* coinSeg, const SkOpSegment* oppSeg) {
+ if (coinSeg->verb() < oppSeg->verb()) {
+ return true;
+ }
+ if (coinSeg->verb() > oppSeg->verb()) {
+ return false;
+ }
+ int count = (SkPathOpsVerbToPoints(coinSeg->verb()) + 1) * 2;
+ const SkScalar* cPt = &coinSeg->pts()[0].fX;
+ const SkScalar* oPt = &oppSeg->pts()[0].fX;
+ for (int index = 0; index < count; ++index) {
+ if (*cPt < *oPt) {
+ return true;
+ }
+ if (*cPt > *oPt) {
+ return false;
+ }
+ ++cPt;
+ ++oPt;
+ }
+ return true;
+}
+
+bool SkOpCoincidence::overlap(const SkOpPtT* coin1s, const SkOpPtT* coin1e,
+ const SkOpPtT* coin2s, const SkOpPtT* coin2e, double* overS, double* overE) const {
+ SkASSERT(coin1s->segment() == coin2s->segment());
+ *overS = std::max(std::min(coin1s->fT, coin1e->fT), std::min(coin2s->fT, coin2e->fT));
+ *overE = std::min(std::max(coin1s->fT, coin1e->fT), std::max(coin2s->fT, coin2e->fT));
+ return *overS < *overE;
+}
+
+// Commented-out lines keep this in sync with debugRelease()
+void SkOpCoincidence::release(const SkOpSegment* deleted) {
+ SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return;
+ }
+ do {
+ if (coin->coinPtTStart()->segment() == deleted
+ || coin->coinPtTEnd()->segment() == deleted
+ || coin->oppPtTStart()->segment() == deleted
+ || coin->oppPtTEnd()->segment() == deleted) {
+ this->release(fHead, coin);
+ }
+ } while ((coin = coin->next()));
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpCoincidence.h b/gfx/skia/skia/src/pathops/SkOpCoincidence.h
new file mode 100644
index 0000000000..7db9369050
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpCoincidence.h
@@ -0,0 +1,307 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkOpCoincidence_DEFINED
+#define SkOpCoincidence_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkMalloc.h"
+#include "src/pathops/SkOpSpan.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+class SkOpAngle;
+class SkOpContour;
+class SkOpSegment;
+
+template <typename T> class SkTDArray;
+
+class SkCoincidentSpans {
+public:
+ const SkOpPtT* coinPtTEnd() const;
+ const SkOpPtT* coinPtTStart() const;
+
+ // These return non-const pointers so that, as copies, they can be added
+ // to a new span pair
+ SkOpPtT* coinPtTEndWritable() const { return const_cast<SkOpPtT*>(fCoinPtTEnd); }
+ SkOpPtT* coinPtTStartWritable() const { return const_cast<SkOpPtT*>(fCoinPtTStart); }
+
+ bool collapsed(const SkOpPtT* ) const;
+ bool contains(const SkOpPtT* s, const SkOpPtT* e) const;
+ void correctEnds();
+ void correctOneEnd(const SkOpPtT* (SkCoincidentSpans::* getEnd)() const,
+ void (SkCoincidentSpans::* setEnd)(const SkOpPtT* ptT) );
+
+#if DEBUG_COIN
+ void debugCorrectEnds(SkPathOpsDebug::GlitchLog* log) const;
+ void debugCorrectOneEnd(SkPathOpsDebug::GlitchLog* log,
+ const SkOpPtT* (SkCoincidentSpans::* getEnd)() const,
+ void (SkCoincidentSpans::* setEnd)(const SkOpPtT* ptT) const) const;
+ bool debugExpand(SkPathOpsDebug::GlitchLog* log) const;
+#endif
+
+ const char* debugID() const {
+#if DEBUG_COIN
+ return fGlobalState->debugCoinDictEntry().fFunctionName;
+#else
+ return nullptr;
+#endif
+ }
+
+ void debugShow() const;
+#ifdef SK_DEBUG
+ void debugStartCheck(const SkOpSpanBase* outer, const SkOpSpanBase* over,
+ const SkOpGlobalState* debugState) const;
+#endif
+ void dump() const;
+ bool expand();
+ bool extend(const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd,
+ const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd);
+ bool flipped() const { return fOppPtTStart->fT > fOppPtTEnd->fT; }
+ SkDEBUGCODE(SkOpGlobalState* globalState() { return fGlobalState; })
+
+ void init(SkDEBUGCODE(SkOpGlobalState* globalState)) {
+ sk_bzero(this, sizeof(*this));
+ SkDEBUGCODE(fGlobalState = globalState);
+ }
+
+ SkCoincidentSpans* next() { return fNext; }
+ const SkCoincidentSpans* next() const { return fNext; }
+ SkCoincidentSpans** nextPtr() { return &fNext; }
+ const SkOpPtT* oppPtTStart() const;
+ const SkOpPtT* oppPtTEnd() const;
+ // These return non-const pointers so that, as copies, they can be added
+ // to a new span pair
+ SkOpPtT* oppPtTStartWritable() const { return const_cast<SkOpPtT*>(fOppPtTStart); }
+ SkOpPtT* oppPtTEndWritable() const { return const_cast<SkOpPtT*>(fOppPtTEnd); }
+ bool ordered(bool* result) const;
+
+ void set(SkCoincidentSpans* next, const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd,
+ const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd);
+
+ void setCoinPtTEnd(const SkOpPtT* ptT) {
+ SkOPASSERT(ptT == ptT->span()->ptT());
+ SkOPASSERT(!fCoinPtTStart || ptT->fT != fCoinPtTStart->fT);
+ SkASSERT(!fCoinPtTStart || fCoinPtTStart->segment() == ptT->segment());
+ fCoinPtTEnd = ptT;
+ ptT->setCoincident();
+ }
+
+ void setCoinPtTStart(const SkOpPtT* ptT) {
+ SkOPASSERT(ptT == ptT->span()->ptT());
+ SkOPASSERT(!fCoinPtTEnd || ptT->fT != fCoinPtTEnd->fT);
+ SkASSERT(!fCoinPtTEnd || fCoinPtTEnd->segment() == ptT->segment());
+ fCoinPtTStart = ptT;
+ ptT->setCoincident();
+ }
+
+ void setEnds(const SkOpPtT* coinPtTEnd, const SkOpPtT* oppPtTEnd) {
+ this->setCoinPtTEnd(coinPtTEnd);
+ this->setOppPtTEnd(oppPtTEnd);
+ }
+
+ void setOppPtTEnd(const SkOpPtT* ptT) {
+ SkOPASSERT(ptT == ptT->span()->ptT());
+ SkOPASSERT(!fOppPtTStart || ptT->fT != fOppPtTStart->fT);
+ SkASSERT(!fOppPtTStart || fOppPtTStart->segment() == ptT->segment());
+ fOppPtTEnd = ptT;
+ ptT->setCoincident();
+ }
+
+ void setOppPtTStart(const SkOpPtT* ptT) {
+ SkOPASSERT(ptT == ptT->span()->ptT());
+ SkOPASSERT(!fOppPtTEnd || ptT->fT != fOppPtTEnd->fT);
+ SkASSERT(!fOppPtTEnd || fOppPtTEnd->segment() == ptT->segment());
+ fOppPtTStart = ptT;
+ ptT->setCoincident();
+ }
+
+ void setStarts(const SkOpPtT* coinPtTStart, const SkOpPtT* oppPtTStart) {
+ this->setCoinPtTStart(coinPtTStart);
+ this->setOppPtTStart(oppPtTStart);
+ }
+
+ void setNext(SkCoincidentSpans* next) { fNext = next; }
+
+private:
+ SkCoincidentSpans* fNext;
+ const SkOpPtT* fCoinPtTStart;
+ const SkOpPtT* fCoinPtTEnd;
+ const SkOpPtT* fOppPtTStart;
+ const SkOpPtT* fOppPtTEnd;
+ SkDEBUGCODE(SkOpGlobalState* fGlobalState);
+};
+
+class SkOpCoincidence {
+public:
+ SkOpCoincidence(SkOpGlobalState* globalState)
+ : fHead(nullptr)
+ , fTop(nullptr)
+ , fGlobalState(globalState)
+ , fContinue(false)
+ , fSpanDeleted(false)
+ , fPtAllocated(false)
+ , fCoinExtended(false)
+ , fSpanMerged(false) {
+ globalState->setCoincidence(this);
+ }
+
+ void add(SkOpPtT* coinPtTStart, SkOpPtT* coinPtTEnd, SkOpPtT* oppPtTStart,
+ SkOpPtT* oppPtTEnd);
+ bool addEndMovedSpans(DEBUG_COIN_DECLARE_ONLY_PARAMS());
+ bool addExpanded(DEBUG_COIN_DECLARE_ONLY_PARAMS());
+ bool addMissing(bool* added DEBUG_COIN_DECLARE_PARAMS());
+ bool apply(DEBUG_COIN_DECLARE_ONLY_PARAMS());
+ bool contains(const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd,
+ const SkOpPtT* oppPtTStart, const SkOpPtT* oppPtTEnd) const;
+ void correctEnds(DEBUG_COIN_DECLARE_ONLY_PARAMS());
+
+#if DEBUG_COIN
+ void debugAddEndMovedSpans(SkPathOpsDebug::GlitchLog* log) const;
+ void debugAddExpanded(SkPathOpsDebug::GlitchLog* ) const;
+ void debugAddMissing(SkPathOpsDebug::GlitchLog* , bool* added) const;
+ void debugAddOrOverlap(SkPathOpsDebug::GlitchLog* log,
+ const SkOpSegment* coinSeg, const SkOpSegment* oppSeg,
+ double coinTs, double coinTe, double oppTs, double oppTe,
+ bool* added) const;
+#endif
+
+ const SkOpAngle* debugAngle(int id) const {
+ return SkDEBUGRELEASE(fGlobalState->debugAngle(id), nullptr);
+ }
+
+ void debugCheckBetween() const;
+
+#if DEBUG_COIN
+ void debugCheckValid(SkPathOpsDebug::GlitchLog* log) const;
+#endif
+
+ SkOpContour* debugContour(int id) const {
+ return SkDEBUGRELEASE(fGlobalState->debugContour(id), nullptr);
+ }
+
+#if DEBUG_COIN
+ void debugCorrectEnds(SkPathOpsDebug::GlitchLog* log) const;
+ bool debugExpand(SkPathOpsDebug::GlitchLog* ) const;
+ void debugMark(SkPathOpsDebug::GlitchLog* ) const;
+ void debugMarkCollapsed(SkPathOpsDebug::GlitchLog* ,
+ const SkCoincidentSpans* coin, const SkOpPtT* test) const;
+ void debugMarkCollapsed(SkPathOpsDebug::GlitchLog* , const SkOpPtT* test) const;
+#endif
+
+ const SkOpPtT* debugPtT(int id) const {
+ return SkDEBUGRELEASE(fGlobalState->debugPtT(id), nullptr);
+ }
+
+ const SkOpSegment* debugSegment(int id) const {
+ return SkDEBUGRELEASE(fGlobalState->debugSegment(id), nullptr);
+ }
+
+#if DEBUG_COIN
+ void debugRelease(SkPathOpsDebug::GlitchLog* , const SkCoincidentSpans* ,
+ const SkCoincidentSpans* ) const;
+ void debugRelease(SkPathOpsDebug::GlitchLog* , const SkOpSegment* ) const;
+#endif
+ void debugShowCoincidence() const;
+
+ const SkOpSpanBase* debugSpan(int id) const {
+ return SkDEBUGRELEASE(fGlobalState->debugSpan(id), nullptr);
+ }
+
+ void debugValidate() const;
+ void dump() const;
+ bool expand(DEBUG_COIN_DECLARE_ONLY_PARAMS());
+ bool extend(const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd, const SkOpPtT* oppPtTStart,
+ const SkOpPtT* oppPtTEnd);
+ bool findOverlaps(SkOpCoincidence* DEBUG_COIN_DECLARE_PARAMS()) const;
+ void fixUp(SkOpPtT* deleted, const SkOpPtT* kept);
+
+ SkOpGlobalState* globalState() {
+ return fGlobalState;
+ }
+
+ const SkOpGlobalState* globalState() const {
+ return fGlobalState;
+ }
+
+ bool isEmpty() const {
+ return !fHead && !fTop;
+ }
+
+ bool mark(DEBUG_COIN_DECLARE_ONLY_PARAMS());
+ void markCollapsed(SkOpPtT* );
+
+ static bool Ordered(const SkOpPtT* coinPtTStart, const SkOpPtT* oppPtTStart) {
+ return Ordered(coinPtTStart->segment(), oppPtTStart->segment());
+ }
+
+ static bool Ordered(const SkOpSegment* coin, const SkOpSegment* opp);
+ void release(const SkOpSegment* );
+ void releaseDeleted();
+
+private:
+ void add(const SkOpPtT* coinPtTStart, const SkOpPtT* coinPtTEnd, const SkOpPtT* oppPtTStart,
+ const SkOpPtT* oppPtTEnd) {
+ this->add(const_cast<SkOpPtT*>(coinPtTStart), const_cast<SkOpPtT*>(coinPtTEnd),
+ const_cast<SkOpPtT*>(oppPtTStart), const_cast<SkOpPtT*>(oppPtTEnd));
+ }
+
+ bool addEndMovedSpans(const SkOpSpan* base, const SkOpSpanBase* testSpan);
+ bool addEndMovedSpans(const SkOpPtT* ptT);
+
+ bool addIfMissing(const SkOpPtT* over1s, const SkOpPtT* over2s,
+ double tStart, double tEnd, SkOpSegment* coinSeg, SkOpSegment* oppSeg,
+ bool* added
+ SkDEBUGPARAMS(const SkOpPtT* over1e) SkDEBUGPARAMS(const SkOpPtT* over2e));
+ bool addOrOverlap(SkOpSegment* coinSeg, SkOpSegment* oppSeg,
+ double coinTs, double coinTe, double oppTs, double oppTe, bool* added);
+ bool addOverlap(const SkOpSegment* seg1, const SkOpSegment* seg1o,
+ const SkOpSegment* seg2, const SkOpSegment* seg2o,
+ const SkOpPtT* overS, const SkOpPtT* overE);
+ bool checkOverlap(SkCoincidentSpans* check,
+ const SkOpSegment* coinSeg, const SkOpSegment* oppSeg,
+ double coinTs, double coinTe, double oppTs, double oppTe,
+ SkTDArray<SkCoincidentSpans*>* overlaps) const;
+ bool contains(const SkOpSegment* seg, const SkOpSegment* opp, double oppT) const;
+ bool contains(const SkCoincidentSpans* coin, const SkOpSegment* seg,
+ const SkOpSegment* opp, double oppT) const;
+#if DEBUG_COIN
+ void debugAddIfMissing(SkPathOpsDebug::GlitchLog* ,
+ const SkCoincidentSpans* outer, const SkOpPtT* over1s,
+ const SkOpPtT* over1e) const;
+ void debugAddIfMissing(SkPathOpsDebug::GlitchLog* ,
+ const SkOpPtT* over1s, const SkOpPtT* over2s,
+ double tStart, double tEnd,
+ const SkOpSegment* coinSeg, const SkOpSegment* oppSeg, bool* added,
+ const SkOpPtT* over1e, const SkOpPtT* over2e) const;
+ void debugAddEndMovedSpans(SkPathOpsDebug::GlitchLog* ,
+ const SkOpSpan* base, const SkOpSpanBase* testSpan) const;
+ void debugAddEndMovedSpans(SkPathOpsDebug::GlitchLog* ,
+ const SkOpPtT* ptT) const;
+#endif
+ void fixUp(SkCoincidentSpans* coin, SkOpPtT* deleted, const SkOpPtT* kept);
+ void markCollapsed(SkCoincidentSpans* head, SkOpPtT* test);
+ bool overlap(const SkOpPtT* coinStart1, const SkOpPtT* coinEnd1,
+ const SkOpPtT* coinStart2, const SkOpPtT* coinEnd2,
+ double* overS, double* overE) const;
+ bool release(SkCoincidentSpans* coin, SkCoincidentSpans* );
+ void releaseDeleted(SkCoincidentSpans* );
+ void restoreHead();
+ // return coinPtT->segment()->t mapped from overS->fT <= t <= overE->fT
+ static double TRange(const SkOpPtT* overS, double t, const SkOpSegment* coinPtT
+ SkDEBUGPARAMS(const SkOpPtT* overE));
+
+ SkCoincidentSpans* fHead;
+ SkCoincidentSpans* fTop;
+ SkOpGlobalState* fGlobalState;
+ bool fContinue;
+ bool fSpanDeleted;
+ bool fPtAllocated;
+ bool fCoinExtended;
+ bool fSpanMerged;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkOpContour.cpp b/gfx/skia/skia/src/pathops/SkOpContour.cpp
new file mode 100644
index 0000000000..433dbcaebd
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpContour.cpp
@@ -0,0 +1,110 @@
+/*
+* Copyright 2013 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+#include "src/pathops/SkOpContour.h"
+
+#include "src/pathops/SkPathWriter.h"
+
+#include <cstring>
+
+void SkOpContour::toPath(SkPathWriter* path) const {
+ if (!this->count()) {
+ return;
+ }
+ const SkOpSegment* segment = &fHead;
+ do {
+ SkAssertResult(segment->addCurveTo(segment->head(), segment->tail(), path));
+ } while ((segment = segment->next()));
+ path->finishContour();
+ path->assemble();
+}
+
+void SkOpContour::toReversePath(SkPathWriter* path) const {
+ const SkOpSegment* segment = fTail;
+ do {
+ SkAssertResult(segment->addCurveTo(segment->tail(), segment->head(), path));
+ } while ((segment = segment->prev()));
+ path->finishContour();
+ path->assemble();
+}
+
+SkOpSpan* SkOpContour::undoneSpan() {
+ SkOpSegment* testSegment = &fHead;
+ do {
+ if (testSegment->done()) {
+ continue;
+ }
+ return testSegment->undoneSpan();
+ } while ((testSegment = testSegment->next()));
+ fDone = true;
+ return nullptr;
+}
+
+void SkOpContourBuilder::addConic(SkPoint pts[3], SkScalar weight) {
+ this->flush();
+ fContour->addConic(pts, weight);
+}
+
+void SkOpContourBuilder::addCubic(SkPoint pts[4]) {
+ this->flush();
+ fContour->addCubic(pts);
+}
+
+void SkOpContourBuilder::addCurve(SkPath::Verb verb, const SkPoint pts[4], SkScalar weight) {
+ if (SkPath::kLine_Verb == verb) {
+ this->addLine(pts);
+ return;
+ }
+ SkArenaAlloc* allocator = fContour->globalState()->allocator();
+ switch (verb) {
+ case SkPath::kQuad_Verb: {
+ SkPoint* ptStorage = allocator->makeArrayDefault<SkPoint>(3);
+ memcpy(ptStorage, pts, sizeof(SkPoint) * 3);
+ this->addQuad(ptStorage);
+ } break;
+ case SkPath::kConic_Verb: {
+ SkPoint* ptStorage = allocator->makeArrayDefault<SkPoint>(3);
+ memcpy(ptStorage, pts, sizeof(SkPoint) * 3);
+ this->addConic(ptStorage, weight);
+ } break;
+ case SkPath::kCubic_Verb: {
+ SkPoint* ptStorage = allocator->makeArrayDefault<SkPoint>(4);
+ memcpy(ptStorage, pts, sizeof(SkPoint) * 4);
+ this->addCubic(ptStorage);
+ } break;
+ default:
+ SkASSERT(0);
+ }
+}
+
+void SkOpContourBuilder::addLine(const SkPoint pts[2]) {
+ // if the previous line added is the exact opposite, eliminate both
+ if (fLastIsLine) {
+ if (fLastLine[0] == pts[1] && fLastLine[1] == pts[0]) {
+ fLastIsLine = false;
+ return;
+ } else {
+ flush();
+ }
+ }
+ memcpy(fLastLine, pts, sizeof(fLastLine));
+ fLastIsLine = true;
+}
+
+void SkOpContourBuilder::addQuad(SkPoint pts[3]) {
+ this->flush();
+ fContour->addQuad(pts);
+}
+
+void SkOpContourBuilder::flush() {
+ if (!fLastIsLine)
+ return;
+ SkArenaAlloc* allocator = fContour->globalState()->allocator();
+ SkPoint* ptStorage = allocator->makeArrayDefault<SkPoint>(2);
+ memcpy(ptStorage, fLastLine, sizeof(fLastLine));
+ (void) fContour->addLine(ptStorage);
+ fLastIsLine = false;
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpContour.h b/gfx/skia/skia/src/pathops/SkOpContour.h
new file mode 100644
index 0000000000..2d55c50082
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpContour.h
@@ -0,0 +1,464 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkOpContour_DEFINED
+#define SkOpContour_DEFINED
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/pathops/SkPathOps.h"
+#include "include/private/base/SkDebug.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/pathops/SkOpSegment.h"
+#include "src/pathops/SkOpSpan.h"
+#include "src/pathops/SkPathOpsBounds.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+class SkOpAngle;
+class SkOpCoincidence;
+class SkPathWriter;
+enum class SkOpRayDir;
+struct SkOpRayHit;
+
+class SkOpContour {
+public:
+ SkOpContour() {
+ reset();
+ }
+
+ bool operator<(const SkOpContour& rh) const {
+ return fBounds.fTop == rh.fBounds.fTop
+ ? fBounds.fLeft < rh.fBounds.fLeft
+ : fBounds.fTop < rh.fBounds.fTop;
+ }
+
+ void addConic(SkPoint pts[3], SkScalar weight) {
+ appendSegment().addConic(pts, weight, this);
+ }
+
+ void addCubic(SkPoint pts[4]) {
+ appendSegment().addCubic(pts, this);
+ }
+
+ SkOpSegment* addLine(SkPoint pts[2]) {
+ SkASSERT(pts[0] != pts[1]);
+ return appendSegment().addLine(pts, this);
+ }
+
+ void addQuad(SkPoint pts[3]) {
+ appendSegment().addQuad(pts, this);
+ }
+
+ SkOpSegment& appendSegment() {
+ SkOpSegment* result = fCount++ ? this->globalState()->allocator()->make<SkOpSegment>()
+ : &fHead;
+ result->setPrev(fTail);
+ if (fTail) {
+ fTail->setNext(result);
+ }
+ fTail = result;
+ return *result;
+ }
+
+ const SkPathOpsBounds& bounds() const {
+ return fBounds;
+ }
+
+ void calcAngles() {
+ SkASSERT(fCount > 0);
+ SkOpSegment* segment = &fHead;
+ do {
+ segment->calcAngles();
+ } while ((segment = segment->next()));
+ }
+
+ void complete() {
+ setBounds();
+ }
+
+ int count() const {
+ return fCount;
+ }
+
+ int debugID() const {
+ return SkDEBUGRELEASE(fID, -1);
+ }
+
+ int debugIndent() const {
+ return SkDEBUGRELEASE(fDebugIndent, 0);
+ }
+
+
+ const SkOpAngle* debugAngle(int id) const {
+ return SkDEBUGRELEASE(this->globalState()->debugAngle(id), nullptr);
+ }
+
+ const SkOpCoincidence* debugCoincidence() const {
+ return this->globalState()->coincidence();
+ }
+
+#if DEBUG_COIN
+ void debugCheckHealth(SkPathOpsDebug::GlitchLog* ) const;
+#endif
+
+ SkOpContour* debugContour(int id) const {
+ return SkDEBUGRELEASE(this->globalState()->debugContour(id), nullptr);
+ }
+
+#if DEBUG_COIN
+ void debugMissingCoincidence(SkPathOpsDebug::GlitchLog* log) const;
+ void debugMoveMultiples(SkPathOpsDebug::GlitchLog* ) const;
+ void debugMoveNearby(SkPathOpsDebug::GlitchLog* log) const;
+#endif
+
+ const SkOpPtT* debugPtT(int id) const {
+ return SkDEBUGRELEASE(this->globalState()->debugPtT(id), nullptr);
+ }
+
+ const SkOpSegment* debugSegment(int id) const {
+ return SkDEBUGRELEASE(this->globalState()->debugSegment(id), nullptr);
+ }
+
+#if DEBUG_ACTIVE_SPANS
+ void debugShowActiveSpans(SkString* str) {
+ SkOpSegment* segment = &fHead;
+ do {
+ segment->debugShowActiveSpans(str);
+ } while ((segment = segment->next()));
+ }
+#endif
+
+ const SkOpSpanBase* debugSpan(int id) const {
+ return SkDEBUGRELEASE(this->globalState()->debugSpan(id), nullptr);
+ }
+
+ SkOpGlobalState* globalState() const {
+ return fState;
+ }
+
+ void debugValidate() const {
+#if DEBUG_VALIDATE
+ const SkOpSegment* segment = &fHead;
+ const SkOpSegment* prior = nullptr;
+ do {
+ segment->debugValidate();
+ SkASSERT(segment->prev() == prior);
+ prior = segment;
+ } while ((segment = segment->next()));
+ SkASSERT(prior == fTail);
+#endif
+ }
+
+ bool done() const {
+ return fDone;
+ }
+
+ void dump() const;
+ void dumpAll() const;
+ void dumpAngles() const;
+ void dumpContours() const;
+ void dumpContoursAll() const;
+ void dumpContoursAngles() const;
+ void dumpContoursPts() const;
+ void dumpContoursPt(int segmentID) const;
+ void dumpContoursSegment(int segmentID) const;
+ void dumpContoursSpan(int segmentID) const;
+ void dumpContoursSpans() const;
+ void dumpPt(int ) const;
+ void dumpPts(const char* prefix = "seg") const;
+ void dumpPtsX(const char* prefix) const;
+ void dumpSegment(int ) const;
+ void dumpSegments(const char* prefix = "seg", SkPathOp op = (SkPathOp) -1) const;
+ void dumpSpan(int ) const;
+ void dumpSpans() const;
+
+ const SkPoint& end() const {
+ return fTail->pts()[SkPathOpsVerbToPoints(fTail->verb())];
+ }
+
+ SkOpSpan* findSortableTop(SkOpContour* );
+
+ SkOpSegment* first() {
+ SkASSERT(fCount > 0);
+ return &fHead;
+ }
+
+ const SkOpSegment* first() const {
+ SkASSERT(fCount > 0);
+ return &fHead;
+ }
+
+ void indentDump() const {
+ SkDEBUGCODE(fDebugIndent += 2);
+ }
+
+ void init(SkOpGlobalState* globalState, bool operand, bool isXor) {
+ fState = globalState;
+ fOperand = operand;
+ fXor = isXor;
+ SkDEBUGCODE(fID = globalState->nextContourID());
+ }
+
+ int isCcw() const {
+ return fCcw;
+ }
+
+ bool isXor() const {
+ return fXor;
+ }
+
+ void joinSegments() {
+ SkOpSegment* segment = &fHead;
+ SkOpSegment* next;
+ do {
+ next = segment->next();
+ segment->joinEnds(next ? next : &fHead);
+ } while ((segment = next));
+ }
+
+ void markAllDone() {
+ SkOpSegment* segment = &fHead;
+ do {
+ segment->markAllDone();
+ } while ((segment = segment->next()));
+ }
+
+ // Please keep this aligned with debugMissingCoincidence()
+ bool missingCoincidence() {
+ SkASSERT(fCount > 0);
+ SkOpSegment* segment = &fHead;
+ bool result = false;
+ do {
+ if (segment->missingCoincidence()) {
+ result = true;
+ }
+ segment = segment->next();
+ } while (segment);
+ return result;
+ }
+
+ bool moveMultiples() {
+ SkASSERT(fCount > 0);
+ SkOpSegment* segment = &fHead;
+ do {
+ if (!segment->moveMultiples()) {
+ return false;
+ }
+ } while ((segment = segment->next()));
+ return true;
+ }
+
+ bool moveNearby() {
+ SkASSERT(fCount > 0);
+ SkOpSegment* segment = &fHead;
+ do {
+ if (!segment->moveNearby()) {
+ return false;
+ }
+ } while ((segment = segment->next()));
+ return true;
+ }
+
+ SkOpContour* next() {
+ return fNext;
+ }
+
+ const SkOpContour* next() const {
+ return fNext;
+ }
+
+ bool operand() const {
+ return fOperand;
+ }
+
+ bool oppXor() const {
+ return fOppXor;
+ }
+
+ void outdentDump() const {
+ SkDEBUGCODE(fDebugIndent -= 2);
+ }
+
+ void rayCheck(const SkOpRayHit& base, SkOpRayDir dir, SkOpRayHit** hits, SkArenaAlloc*);
+
+ void reset() {
+ fTail = nullptr;
+ fNext = nullptr;
+ fCount = 0;
+ fDone = false;
+ SkDEBUGCODE(fBounds.setLTRB(SK_ScalarMax, SK_ScalarMax, SK_ScalarMin, SK_ScalarMin));
+ SkDEBUGCODE(fFirstSorted = -1);
+ SkDEBUGCODE(fDebugIndent = 0);
+ }
+
+ void resetReverse() {
+ SkOpContour* next = this;
+ do {
+ if (!next->count()) {
+ continue;
+ }
+ next->fCcw = -1;
+ next->fReverse = false;
+ } while ((next = next->next()));
+ }
+
+ bool reversed() const {
+ return fReverse;
+ }
+
+ void setBounds() {
+ SkASSERT(fCount > 0);
+ const SkOpSegment* segment = &fHead;
+ fBounds = segment->bounds();
+ while ((segment = segment->next())) {
+ fBounds.add(segment->bounds());
+ }
+ }
+
+ void setCcw(int ccw) {
+ fCcw = ccw;
+ }
+
+ void setGlobalState(SkOpGlobalState* state) {
+ fState = state;
+ }
+
+ void setNext(SkOpContour* contour) {
+// SkASSERT(!fNext == !!contour);
+ fNext = contour;
+ }
+
+ void setOperand(bool isOp) {
+ fOperand = isOp;
+ }
+
+ void setOppXor(bool isOppXor) {
+ fOppXor = isOppXor;
+ }
+
+ void setReverse() {
+ fReverse = true;
+ }
+
+ void setXor(bool isXor) {
+ fXor = isXor;
+ }
+
+ bool sortAngles() {
+ SkASSERT(fCount > 0);
+ SkOpSegment* segment = &fHead;
+ do {
+ FAIL_IF(!segment->sortAngles());
+ } while ((segment = segment->next()));
+ return true;
+ }
+
+ const SkPoint& start() const {
+ return fHead.pts()[0];
+ }
+
+ void toPartialBackward(SkPathWriter* path) const {
+ const SkOpSegment* segment = fTail;
+ do {
+ SkAssertResult(segment->addCurveTo(segment->tail(), segment->head(), path));
+ } while ((segment = segment->prev()));
+ }
+
+ void toPartialForward(SkPathWriter* path) const {
+ const SkOpSegment* segment = &fHead;
+ do {
+ SkAssertResult(segment->addCurveTo(segment->head(), segment->tail(), path));
+ } while ((segment = segment->next()));
+ }
+
+ void toReversePath(SkPathWriter* path) const;
+ void toPath(SkPathWriter* path) const;
+ SkOpSpan* undoneSpan();
+
+protected:
+ SkOpGlobalState* fState;
+ SkOpSegment fHead;
+ SkOpSegment* fTail;
+ SkOpContour* fNext;
+ SkPathOpsBounds fBounds;
+ int fCcw;
+ int fCount;
+ int fFirstSorted;
+ bool fDone; // set by find top segment
+ bool fOperand; // true for the second argument to a binary operator
+ bool fReverse; // true if contour should be reverse written to path (used only by fix winding)
+ bool fXor; // set if original path had even-odd fill
+ bool fOppXor; // set if opposite path had even-odd fill
+ SkDEBUGCODE(int fID);
+ SkDEBUGCODE(mutable int fDebugIndent);
+};
+
+class SkOpContourHead : public SkOpContour {
+public:
+ SkOpContour* appendContour() {
+ SkOpContour* contour = this->globalState()->allocator()->make<SkOpContour>();
+ contour->setNext(nullptr);
+ SkOpContour* prev = this;
+ SkOpContour* next;
+ while ((next = prev->next())) {
+ prev = next;
+ }
+ prev->setNext(contour);
+ return contour;
+ }
+
+ void joinAllSegments() {
+ SkOpContour* next = this;
+ do {
+ if (!next->count()) {
+ continue;
+ }
+ next->joinSegments();
+ } while ((next = next->next()));
+ }
+
+ void remove(SkOpContour* contour) {
+ if (contour == this) {
+ SkASSERT(this->count() == 0);
+ return;
+ }
+ SkASSERT(contour->next() == nullptr);
+ SkOpContour* prev = this;
+ SkOpContour* next;
+ while ((next = prev->next()) != contour) {
+ SkASSERT(next);
+ prev = next;
+ }
+ SkASSERT(prev);
+ prev->setNext(nullptr);
+ }
+
+};
+
+class SkOpContourBuilder {
+public:
+ SkOpContourBuilder(SkOpContour* contour)
+ : fContour(contour)
+ , fLastIsLine(false) {
+ }
+
+ void addConic(SkPoint pts[3], SkScalar weight);
+ void addCubic(SkPoint pts[4]);
+ void addCurve(SkPath::Verb verb, const SkPoint pts[4], SkScalar weight = 1);
+ void addLine(const SkPoint pts[2]);
+ void addQuad(SkPoint pts[3]);
+ void flush();
+ SkOpContour* contour() { return fContour; }
+ void setContour(SkOpContour* contour) { flush(); fContour = contour; }
+protected:
+ SkOpContour* fContour;
+ SkPoint fLastLine[2];
+ bool fLastIsLine;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkOpCubicHull.cpp b/gfx/skia/skia/src/pathops/SkOpCubicHull.cpp
new file mode 100644
index 0000000000..39c77abe8b
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpCubicHull.cpp
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+#include <algorithm>
+#include <cstddef>
+
+static bool rotate(const SkDCubic& cubic, int zero, int index, SkDCubic& rotPath) {
+ double dy = cubic[index].fY - cubic[zero].fY;
+ double dx = cubic[index].fX - cubic[zero].fX;
+ if (approximately_zero(dy)) {
+ if (approximately_zero(dx)) {
+ return false;
+ }
+ rotPath = cubic;
+ if (dy) {
+ rotPath[index].fY = cubic[zero].fY;
+ int mask = other_two(index, zero);
+ int side1 = index ^ mask;
+ int side2 = zero ^ mask;
+ if (approximately_equal(cubic[side1].fY, cubic[zero].fY)) {
+ rotPath[side1].fY = cubic[zero].fY;
+ }
+ if (approximately_equal(cubic[side2].fY, cubic[zero].fY)) {
+ rotPath[side2].fY = cubic[zero].fY;
+ }
+ }
+ return true;
+ }
+ for (int i = 0; i < 4; ++i) {
+ rotPath[i].fX = cubic[i].fX * dx + cubic[i].fY * dy;
+ rotPath[i].fY = cubic[i].fY * dx - cubic[i].fX * dy;
+ }
+ return true;
+}
+
+
+// Returns 0 if negative, 1 if zero, 2 if positive
+static int side(double x) {
+ return (x > 0) + (x >= 0);
+}
+
+/* Given a cubic, find the convex hull described by the end and control points.
+ The hull may have 3 or 4 points. Cubics that degenerate into a point or line
+ are not considered.
+
+ The hull is computed by assuming that three points, if unique and non-linear,
+ form a triangle. The fourth point may replace one of the first three, may be
+ discarded if in the triangle or on an edge, or may be inserted between any of
+ the three to form a convex quadralateral.
+
+ The indices returned in order describe the convex hull.
+*/
+int SkDCubic::convexHull(char order[4]) const {
+ size_t index;
+ // find top point
+ size_t yMin = 0;
+ for (index = 1; index < 4; ++index) {
+ if (fPts[yMin].fY > fPts[index].fY || (fPts[yMin].fY == fPts[index].fY
+ && fPts[yMin].fX > fPts[index].fX)) {
+ yMin = index;
+ }
+ }
+ order[0] = yMin;
+ int midX = -1;
+ int backupYMin = -1;
+ for (int pass = 0; pass < 2; ++pass) {
+ for (index = 0; index < 4; ++index) {
+ if (index == yMin) {
+ continue;
+ }
+ // rotate line from (yMin, index) to axis
+ // see if remaining two points are both above or below
+ // use this to find mid
+ int mask = other_two(yMin, index);
+ int side1 = yMin ^ mask;
+ int side2 = index ^ mask;
+ SkDCubic rotPath;
+ if (!rotate(*this, yMin, index, rotPath)) { // ! if cbc[yMin]==cbc[idx]
+ order[1] = side1;
+ order[2] = side2;
+ return 3;
+ }
+ int sides = side(rotPath[side1].fY - rotPath[yMin].fY);
+ sides ^= side(rotPath[side2].fY - rotPath[yMin].fY);
+ if (sides == 2) { // '2' means one remaining point <0, one >0
+ if (midX >= 0) {
+ // one of the control points is equal to an end point
+ order[0] = 0;
+ order[1] = 3;
+ if (fPts[1] == fPts[0] || fPts[1] == fPts[3]) {
+ order[2] = 2;
+ return 3;
+ }
+ if (fPts[2] == fPts[0] || fPts[2] == fPts[3]) {
+ order[2] = 1;
+ return 3;
+ }
+ // one of the control points may be very nearly but not exactly equal --
+ double dist1_0 = fPts[1].distanceSquared(fPts[0]);
+ double dist1_3 = fPts[1].distanceSquared(fPts[3]);
+ double dist2_0 = fPts[2].distanceSquared(fPts[0]);
+ double dist2_3 = fPts[2].distanceSquared(fPts[3]);
+ double smallest1distSq = std::min(dist1_0, dist1_3);
+ double smallest2distSq = std::min(dist2_0, dist2_3);
+ if (approximately_zero(std::min(smallest1distSq, smallest2distSq))) {
+ order[2] = smallest1distSq < smallest2distSq ? 2 : 1;
+ return 3;
+ }
+ }
+ midX = index;
+ } else if (sides == 0) { // '0' means both to one side or the other
+ backupYMin = index;
+ }
+ }
+ if (midX >= 0) {
+ break;
+ }
+ if (backupYMin < 0) {
+ break;
+ }
+ yMin = backupYMin;
+ backupYMin = -1;
+ }
+ if (midX < 0) {
+ midX = yMin ^ 3; // choose any other point
+ }
+ int mask = other_two(yMin, midX);
+ int least = yMin ^ mask;
+ int most = midX ^ mask;
+ order[0] = yMin;
+ order[1] = least;
+
+ // see if mid value is on same side of line (least, most) as yMin
+ SkDCubic midPath;
+ if (!rotate(*this, least, most, midPath)) { // ! if cbc[least]==cbc[most]
+ order[2] = midX;
+ return 3;
+ }
+ int midSides = side(midPath[yMin].fY - midPath[least].fY);
+ midSides ^= side(midPath[midX].fY - midPath[least].fY);
+ if (midSides != 2) { // if mid point is not between
+ order[2] = most;
+ return 3; // result is a triangle
+ }
+ order[2] = midX;
+ order[3] = most;
+ return 4; // result is a quadralateral
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpEdgeBuilder.cpp b/gfx/skia/skia/src/pathops/SkOpEdgeBuilder.cpp
new file mode 100644
index 0000000000..7078f2d67c
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpEdgeBuilder.cpp
@@ -0,0 +1,360 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkOpEdgeBuilder.h"
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkTypes.h"
+#include "src/base/SkTSort.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkPathPriv.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkReduceOrder.h"
+
+#include <algorithm>
+#include <array>
+
+void SkOpEdgeBuilder::init() {
+ fOperand = false;
+ fXorMask[0] = fXorMask[1] = ((int)fPath->getFillType() & 1) ? kEvenOdd_PathOpsMask
+ : kWinding_PathOpsMask;
+ fUnparseable = false;
+ fSecondHalf = preFetch();
+}
+
+// very tiny points cause numerical instability : don't allow them
+static SkPoint force_small_to_zero(const SkPoint& pt) {
+ SkPoint ret = pt;
+ if (SkScalarAbs(ret.fX) < FLT_EPSILON_ORDERABLE_ERR) {
+ ret.fX = 0;
+ }
+ if (SkScalarAbs(ret.fY) < FLT_EPSILON_ORDERABLE_ERR) {
+ ret.fY = 0;
+ }
+ return ret;
+}
+
+static bool can_add_curve(SkPath::Verb verb, SkPoint* curve) {
+ if (SkPath::kMove_Verb == verb) {
+ return false;
+ }
+ for (int index = 0; index <= SkPathOpsVerbToPoints(verb); ++index) {
+ curve[index] = force_small_to_zero(curve[index]);
+ }
+ return SkPath::kLine_Verb != verb || !SkDPoint::ApproximatelyEqual(curve[0], curve[1]);
+}
+
+void SkOpEdgeBuilder::addOperand(const SkPath& path) {
+ SkASSERT(!fPathVerbs.empty() && fPathVerbs.back() == SkPath::kDone_Verb);
+ fPathVerbs.pop_back();
+ fPath = &path;
+ fXorMask[1] = ((int)fPath->getFillType() & 1) ? kEvenOdd_PathOpsMask
+ : kWinding_PathOpsMask;
+ preFetch();
+}
+
+bool SkOpEdgeBuilder::finish() {
+ fOperand = false;
+ if (fUnparseable || !walk()) {
+ return false;
+ }
+ complete();
+ SkOpContour* contour = fContourBuilder.contour();
+ if (contour && !contour->count()) {
+ fContoursHead->remove(contour);
+ }
+ return true;
+}
+
+void SkOpEdgeBuilder::closeContour(const SkPoint& curveEnd, const SkPoint& curveStart) {
+ if (!SkDPoint::ApproximatelyEqual(curveEnd, curveStart)) {
+ *fPathVerbs.append() = SkPath::kLine_Verb;
+ *fPathPts.append() = curveStart;
+ } else {
+ int verbCount = fPathVerbs.size();
+ int ptsCount = fPathPts.size();
+ if (SkPath::kLine_Verb == fPathVerbs[verbCount - 1]
+ && fPathPts[ptsCount - 2] == curveStart) {
+ fPathVerbs.pop_back();
+ fPathPts.pop_back();
+ } else {
+ fPathPts[ptsCount - 1] = curveStart;
+ }
+ }
+ *fPathVerbs.append() = SkPath::kClose_Verb;
+}
+
+int SkOpEdgeBuilder::preFetch() {
+ if (!fPath->isFinite()) {
+ fUnparseable = true;
+ return 0;
+ }
+ SkPoint curveStart;
+ SkPoint curve[4];
+ bool lastCurve = false;
+ for (auto [pathVerb, pts, w] : SkPathPriv::Iterate(*fPath)) {
+ auto verb = static_cast<SkPath::Verb>(pathVerb);
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ if (!fAllowOpenContours && lastCurve) {
+ closeContour(curve[0], curveStart);
+ }
+ *fPathVerbs.append() = verb;
+ curve[0] = force_small_to_zero(pts[0]);
+ *fPathPts.append() = curve[0];
+ curveStart = curve[0];
+ lastCurve = false;
+ continue;
+ case SkPath::kLine_Verb:
+ curve[1] = force_small_to_zero(pts[1]);
+ if (SkDPoint::ApproximatelyEqual(curve[0], curve[1])) {
+ uint8_t lastVerb = fPathVerbs.back();
+ if (lastVerb != SkPath::kLine_Verb && lastVerb != SkPath::kMove_Verb) {
+ fPathPts.back() = curve[0] = curve[1];
+ }
+ continue; // skip degenerate points
+ }
+ break;
+ case SkPath::kQuad_Verb:
+ curve[1] = force_small_to_zero(pts[1]);
+ curve[2] = force_small_to_zero(pts[2]);
+ verb = SkReduceOrder::Quad(curve, curve);
+ if (verb == SkPath::kMove_Verb) {
+ continue; // skip degenerate points
+ }
+ break;
+ case SkPath::kConic_Verb:
+ curve[1] = force_small_to_zero(pts[1]);
+ curve[2] = force_small_to_zero(pts[2]);
+ verb = SkReduceOrder::Quad(curve, curve);
+ if (SkPath::kQuad_Verb == verb && 1 != *w) {
+ verb = SkPath::kConic_Verb;
+ } else if (verb == SkPath::kMove_Verb) {
+ continue; // skip degenerate points
+ }
+ break;
+ case SkPath::kCubic_Verb:
+ curve[1] = force_small_to_zero(pts[1]);
+ curve[2] = force_small_to_zero(pts[2]);
+ curve[3] = force_small_to_zero(pts[3]);
+ verb = SkReduceOrder::Cubic(curve, curve);
+ if (verb == SkPath::kMove_Verb) {
+ continue; // skip degenerate points
+ }
+ break;
+ case SkPath::kClose_Verb:
+ closeContour(curve[0], curveStart);
+ lastCurve = false;
+ continue;
+ case SkPath::kDone_Verb:
+ continue;
+ }
+ *fPathVerbs.append() = verb;
+ int ptCount = SkPathOpsVerbToPoints(verb);
+ fPathPts.append(ptCount, &curve[1]);
+ if (verb == SkPath::kConic_Verb) {
+ *fWeights.append() = *w;
+ }
+ curve[0] = curve[ptCount];
+ lastCurve = true;
+ }
+ if (!fAllowOpenContours && lastCurve) {
+ closeContour(curve[0], curveStart);
+ }
+ *fPathVerbs.append() = SkPath::kDone_Verb;
+ return fPathVerbs.size() - 1;
+}
+
+bool SkOpEdgeBuilder::close() {
+ complete();
+ return true;
+}
+
+bool SkOpEdgeBuilder::walk() {
+ uint8_t* verbPtr = fPathVerbs.begin();
+ uint8_t* endOfFirstHalf = &verbPtr[fSecondHalf];
+ SkPoint* pointsPtr = fPathPts.begin();
+ SkScalar* weightPtr = fWeights.begin();
+ SkPath::Verb verb;
+ SkOpContour* contour = fContourBuilder.contour();
+ int moveToPtrBump = 0;
+ while ((verb = (SkPath::Verb) *verbPtr) != SkPath::kDone_Verb) {
+ if (verbPtr == endOfFirstHalf) {
+ fOperand = true;
+ }
+ verbPtr++;
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ if (contour && contour->count()) {
+ if (fAllowOpenContours) {
+ complete();
+ } else if (!close()) {
+ return false;
+ }
+ }
+ if (!contour) {
+ fContourBuilder.setContour(contour = fContoursHead->appendContour());
+ }
+ contour->init(fGlobalState, fOperand,
+ fXorMask[fOperand] == kEvenOdd_PathOpsMask);
+ pointsPtr += moveToPtrBump;
+ moveToPtrBump = 1;
+ continue;
+ case SkPath::kLine_Verb:
+ fContourBuilder.addLine(pointsPtr);
+ break;
+ case SkPath::kQuad_Verb:
+ {
+ SkVector vec1 = pointsPtr[1] - pointsPtr[0];
+ SkVector vec2 = pointsPtr[2] - pointsPtr[1];
+ if (vec1.dot(vec2) < 0) {
+ SkPoint pair[5];
+ if (SkChopQuadAtMaxCurvature(pointsPtr, pair) == 1) {
+ goto addOneQuad;
+ }
+ if (!SkScalarsAreFinite(&pair[0].fX, std::size(pair) * 2)) {
+ return false;
+ }
+ for (unsigned index = 0; index < std::size(pair); ++index) {
+ pair[index] = force_small_to_zero(pair[index]);
+ }
+ SkPoint cStorage[2][2];
+ SkPath::Verb v1 = SkReduceOrder::Quad(&pair[0], cStorage[0]);
+ SkPath::Verb v2 = SkReduceOrder::Quad(&pair[2], cStorage[1]);
+ SkPoint* curve1 = v1 != SkPath::kLine_Verb ? &pair[0] : cStorage[0];
+ SkPoint* curve2 = v2 != SkPath::kLine_Verb ? &pair[2] : cStorage[1];
+ if (can_add_curve(v1, curve1) && can_add_curve(v2, curve2)) {
+ fContourBuilder.addCurve(v1, curve1);
+ fContourBuilder.addCurve(v2, curve2);
+ break;
+ }
+ }
+ }
+ addOneQuad:
+ fContourBuilder.addQuad(pointsPtr);
+ break;
+ case SkPath::kConic_Verb: {
+ SkVector vec1 = pointsPtr[1] - pointsPtr[0];
+ SkVector vec2 = pointsPtr[2] - pointsPtr[1];
+ SkScalar weight = *weightPtr++;
+ if (vec1.dot(vec2) < 0) {
+ // FIXME: max curvature for conics hasn't been implemented; use placeholder
+ SkScalar maxCurvature = SkFindQuadMaxCurvature(pointsPtr);
+ if (0 < maxCurvature && maxCurvature < 1) {
+ SkConic conic(pointsPtr, weight);
+ SkConic pair[2];
+ if (!conic.chopAt(maxCurvature, pair)) {
+ // if result can't be computed, use original
+ fContourBuilder.addConic(pointsPtr, weight);
+ break;
+ }
+ SkPoint cStorage[2][3];
+ SkPath::Verb v1 = SkReduceOrder::Conic(pair[0], cStorage[0]);
+ SkPath::Verb v2 = SkReduceOrder::Conic(pair[1], cStorage[1]);
+ SkPoint* curve1 = v1 != SkPath::kLine_Verb ? pair[0].fPts : cStorage[0];
+ SkPoint* curve2 = v2 != SkPath::kLine_Verb ? pair[1].fPts : cStorage[1];
+ if (can_add_curve(v1, curve1) && can_add_curve(v2, curve2)) {
+ fContourBuilder.addCurve(v1, curve1, pair[0].fW);
+ fContourBuilder.addCurve(v2, curve2, pair[1].fW);
+ break;
+ }
+ }
+ }
+ fContourBuilder.addConic(pointsPtr, weight);
+ } break;
+ case SkPath::kCubic_Verb:
+ {
+ // Split complex cubics (such as self-intersecting curves or
+ // ones with difficult curvature) in two before proceeding.
+ // This can be required for intersection to succeed.
+ SkScalar splitT[3];
+ int breaks = SkDCubic::ComplexBreak(pointsPtr, splitT);
+ if (!breaks) {
+ fContourBuilder.addCubic(pointsPtr);
+ break;
+ }
+ SkASSERT(breaks <= (int) std::size(splitT));
+ struct Splitsville {
+ double fT[2];
+ SkPoint fPts[4];
+ SkPoint fReduced[4];
+ SkPath::Verb fVerb;
+ bool fCanAdd;
+ } splits[4];
+ SkASSERT(std::size(splits) == std::size(splitT) + 1);
+ SkTQSort(splitT, splitT + breaks);
+ for (int index = 0; index <= breaks; ++index) {
+ Splitsville* split = &splits[index];
+ split->fT[0] = index ? splitT[index - 1] : 0;
+ split->fT[1] = index < breaks ? splitT[index] : 1;
+ SkDCubic part = SkDCubic::SubDivide(pointsPtr, split->fT[0], split->fT[1]);
+ if (!part.toFloatPoints(split->fPts)) {
+ return false;
+ }
+ split->fVerb = SkReduceOrder::Cubic(split->fPts, split->fReduced);
+ SkPoint* curve = SkPath::kCubic_Verb == split->fVerb
+ ? split->fPts : split->fReduced;
+ split->fCanAdd = can_add_curve(split->fVerb, curve);
+ }
+ for (int index = 0; index <= breaks; ++index) {
+ Splitsville* split = &splits[index];
+ if (!split->fCanAdd) {
+ continue;
+ }
+ int prior = index;
+ while (prior > 0 && !splits[prior - 1].fCanAdd) {
+ --prior;
+ }
+ if (prior < index) {
+ split->fT[0] = splits[prior].fT[0];
+ split->fPts[0] = splits[prior].fPts[0];
+ }
+ int next = index;
+ int breakLimit = std::min(breaks, (int) std::size(splits) - 1);
+ while (next < breakLimit && !splits[next + 1].fCanAdd) {
+ ++next;
+ }
+ if (next > index) {
+ split->fT[1] = splits[next].fT[1];
+ split->fPts[3] = splits[next].fPts[3];
+ }
+ if (prior < index || next > index) {
+ split->fVerb = SkReduceOrder::Cubic(split->fPts, split->fReduced);
+ }
+ SkPoint* curve = SkPath::kCubic_Verb == split->fVerb
+ ? split->fPts : split->fReduced;
+ if (!can_add_curve(split->fVerb, curve)) {
+ return false;
+ }
+ fContourBuilder.addCurve(split->fVerb, curve);
+ }
+ }
+ break;
+ case SkPath::kClose_Verb:
+ SkASSERT(contour);
+ if (!close()) {
+ return false;
+ }
+ contour = nullptr;
+ continue;
+ default:
+ SkDEBUGFAIL("bad verb");
+ return false;
+ }
+ SkASSERT(contour);
+ if (contour->count()) {
+ contour->debugValidate();
+ }
+ pointsPtr += SkPathOpsVerbToPoints(verb);
+ }
+ fContourBuilder.flush();
+ if (contour && contour->count() &&!fAllowOpenContours && !close()) {
+ return false;
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpEdgeBuilder.h b/gfx/skia/skia/src/pathops/SkOpEdgeBuilder.h
new file mode 100644
index 0000000000..7c01756226
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpEdgeBuilder.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkOpEdgeBuilder_DEFINED
+#define SkOpEdgeBuilder_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "include/private/base/SkTDArray.h"
+#include "src/pathops/SkOpContour.h"
+#include "src/pathops/SkPathOpsTypes.h"
+#include "src/pathops/SkPathWriter.h"
+
+#include <cstdint>
+
+class SkPath;
+
+class SkOpEdgeBuilder {
+public:
+ SkOpEdgeBuilder(const SkPathWriter& path, SkOpContourHead* contours2,
+ SkOpGlobalState* globalState)
+ : fGlobalState(globalState)
+ , fPath(path.nativePath())
+ , fContourBuilder(contours2)
+ , fContoursHead(contours2)
+ , fAllowOpenContours(true) {
+ init();
+ }
+
+ SkOpEdgeBuilder(const SkPath& path, SkOpContourHead* contours2, SkOpGlobalState* globalState)
+ : fGlobalState(globalState)
+ , fPath(&path)
+ , fContourBuilder(contours2)
+ , fContoursHead(contours2)
+ , fAllowOpenContours(false) {
+ init();
+ }
+
+ void addOperand(const SkPath& path);
+
+ void complete() {
+ fContourBuilder.flush();
+ SkOpContour* contour = fContourBuilder.contour();
+ if (contour && contour->count()) {
+ contour->complete();
+ fContourBuilder.setContour(nullptr);
+ }
+ }
+
+ bool finish();
+
+ const SkOpContour* head() const {
+ return fContoursHead;
+ }
+
+ void init();
+ bool unparseable() const { return fUnparseable; }
+ SkPathOpsMask xorMask() const { return fXorMask[fOperand]; }
+
+private:
+ void closeContour(const SkPoint& curveEnd, const SkPoint& curveStart);
+ bool close();
+ int preFetch();
+ bool walk();
+
+ SkOpGlobalState* fGlobalState;
+ const SkPath* fPath;
+ SkTDArray<SkPoint> fPathPts;
+ SkTDArray<SkScalar> fWeights;
+ SkTDArray<uint8_t> fPathVerbs;
+ SkOpContourBuilder fContourBuilder;
+ SkOpContourHead* fContoursHead;
+ SkPathOpsMask fXorMask[2];
+ int fSecondHalf;
+ bool fOperand;
+ bool fAllowOpenContours;
+ bool fUnparseable;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkOpSegment.cpp b/gfx/skia/skia/src/pathops/SkOpSegment.cpp
new file mode 100644
index 0000000000..6a1d406c07
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpSegment.cpp
@@ -0,0 +1,1787 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkOpSegment.h"
+
+#include "include/private/base/SkTDArray.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/core/SkPointPriv.h"
+#include "src/pathops/SkIntersections.h"
+#include "src/pathops/SkOpCoincidence.h"
+#include "src/pathops/SkOpContour.h"
+#include "src/pathops/SkPathOpsLine.h"
+#include "src/pathops/SkPathWriter.h"
+
+#include <algorithm>
+#include <cfloat>
+
+/*
+After computing raw intersections, post process all segments to:
+- find small collections of points that can be collapsed to a single point
+- find missing intersections to resolve differences caused by different algorithms
+
+Consider segments containing tiny or small intervals. Consider coincident segments
+because coincidence finds intersections through distance measurement that non-coincident
+intersection tests cannot.
+ */
+
+#define F (false) // discard the edge
+#define T (true) // keep the edge
+
+static const bool gUnaryActiveEdge[2][2] = {
+// from=0 from=1
+// to=0,1 to=0,1
+ {F, T}, {T, F},
+};
+
+static const bool gActiveEdge[kXOR_SkPathOp + 1][2][2][2][2] = {
+// miFrom=0 miFrom=1
+// miTo=0 miTo=1 miTo=0 miTo=1
+// suFrom=0 1 suFrom=0 1 suFrom=0 1 suFrom=0 1
+// suTo=0,1 suTo=0,1 suTo=0,1 suTo=0,1 suTo=0,1 suTo=0,1 suTo=0,1 suTo=0,1
+ {{{{F, F}, {F, F}}, {{T, F}, {T, F}}}, {{{T, T}, {F, F}}, {{F, T}, {T, F}}}}, // mi - su
+ {{{{F, F}, {F, F}}, {{F, T}, {F, T}}}, {{{F, F}, {T, T}}, {{F, T}, {T, F}}}}, // mi & su
+ {{{{F, T}, {T, F}}, {{T, T}, {F, F}}}, {{{T, F}, {T, F}}, {{F, F}, {F, F}}}}, // mi | su
+ {{{{F, T}, {T, F}}, {{T, F}, {F, T}}}, {{{T, F}, {F, T}}, {{F, T}, {T, F}}}}, // mi ^ su
+};
+
+#undef F
+#undef T
+
+SkOpAngle* SkOpSegment::activeAngle(SkOpSpanBase* start, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr, bool* done) {
+ if (SkOpAngle* result = activeAngleInner(start, startPtr, endPtr, done)) {
+ return result;
+ }
+ if (SkOpAngle* result = activeAngleOther(start, startPtr, endPtr, done)) {
+ return result;
+ }
+ return nullptr;
+}
+
+SkOpAngle* SkOpSegment::activeAngleInner(SkOpSpanBase* start, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr, bool* done) {
+ SkOpSpan* upSpan = start->upCastable();
+ if (upSpan) {
+ if (upSpan->windValue() || upSpan->oppValue()) {
+ SkOpSpanBase* next = upSpan->next();
+ if (!*endPtr) {
+ *startPtr = start;
+ *endPtr = next;
+ }
+ if (!upSpan->done()) {
+ if (upSpan->windSum() != SK_MinS32) {
+ return spanToAngle(start, next);
+ }
+ *done = false;
+ }
+ } else {
+ SkASSERT(upSpan->done());
+ }
+ }
+ SkOpSpan* downSpan = start->prev();
+ // edge leading into junction
+ if (downSpan) {
+ if (downSpan->windValue() || downSpan->oppValue()) {
+ if (!*endPtr) {
+ *startPtr = start;
+ *endPtr = downSpan;
+ }
+ if (!downSpan->done()) {
+ if (downSpan->windSum() != SK_MinS32) {
+ return spanToAngle(start, downSpan);
+ }
+ *done = false;
+ }
+ } else {
+ SkASSERT(downSpan->done());
+ }
+ }
+ return nullptr;
+}
+
+SkOpAngle* SkOpSegment::activeAngleOther(SkOpSpanBase* start, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr, bool* done) {
+ SkOpPtT* oPtT = start->ptT()->next();
+ SkOpSegment* other = oPtT->segment();
+ SkOpSpanBase* oSpan = oPtT->span();
+ return other->activeAngleInner(oSpan, startPtr, endPtr, done);
+}
+
+bool SkOpSegment::activeOp(SkOpSpanBase* start, SkOpSpanBase* end, int xorMiMask, int xorSuMask,
+ SkPathOp op) {
+ int sumMiWinding = this->updateWinding(end, start);
+ int sumSuWinding = this->updateOppWinding(end, start);
+#if DEBUG_LIMIT_WIND_SUM
+ SkASSERT(abs(sumMiWinding) <= DEBUG_LIMIT_WIND_SUM);
+ SkASSERT(abs(sumSuWinding) <= DEBUG_LIMIT_WIND_SUM);
+#endif
+ if (this->operand()) {
+ using std::swap;
+ swap(sumMiWinding, sumSuWinding);
+ }
+ return this->activeOp(xorMiMask, xorSuMask, start, end, op, &sumMiWinding, &sumSuWinding);
+}
+
+bool SkOpSegment::activeOp(int xorMiMask, int xorSuMask, SkOpSpanBase* start, SkOpSpanBase* end,
+ SkPathOp op, int* sumMiWinding, int* sumSuWinding) {
+ int maxWinding, sumWinding, oppMaxWinding, oppSumWinding;
+ this->setUpWindings(start, end, sumMiWinding, sumSuWinding,
+ &maxWinding, &sumWinding, &oppMaxWinding, &oppSumWinding);
+ bool miFrom;
+ bool miTo;
+ bool suFrom;
+ bool suTo;
+ if (operand()) {
+ miFrom = (oppMaxWinding & xorMiMask) != 0;
+ miTo = (oppSumWinding & xorMiMask) != 0;
+ suFrom = (maxWinding & xorSuMask) != 0;
+ suTo = (sumWinding & xorSuMask) != 0;
+ } else {
+ miFrom = (maxWinding & xorMiMask) != 0;
+ miTo = (sumWinding & xorMiMask) != 0;
+ suFrom = (oppMaxWinding & xorSuMask) != 0;
+ suTo = (oppSumWinding & xorSuMask) != 0;
+ }
+ bool result = gActiveEdge[op][miFrom][miTo][suFrom][suTo];
+#if DEBUG_ACTIVE_OP
+ SkDebugf("%s id=%d t=%1.9g tEnd=%1.9g op=%s miFrom=%d miTo=%d suFrom=%d suTo=%d result=%d\n",
+ __FUNCTION__, debugID(), start->t(), end->t(),
+ SkPathOpsDebug::kPathOpStr[op], miFrom, miTo, suFrom, suTo, result);
+#endif
+ return result;
+}
+
+bool SkOpSegment::activeWinding(SkOpSpanBase* start, SkOpSpanBase* end) {
+ int sumWinding = updateWinding(end, start);
+ return activeWinding(start, end, &sumWinding);
+}
+
+bool SkOpSegment::activeWinding(SkOpSpanBase* start, SkOpSpanBase* end, int* sumWinding) {
+ int maxWinding;
+ setUpWinding(start, end, &maxWinding, sumWinding);
+ bool from = maxWinding != 0;
+ bool to = *sumWinding != 0;
+ bool result = gUnaryActiveEdge[from][to];
+ return result;
+}
+
+bool SkOpSegment::addCurveTo(const SkOpSpanBase* start, const SkOpSpanBase* end,
+ SkPathWriter* path) const {
+ const SkOpSpan* spanStart = start->starter(end);
+ FAIL_IF(spanStart->alreadyAdded());
+ const_cast<SkOpSpan*>(spanStart)->markAdded();
+ SkDCurveSweep curvePart;
+ start->segment()->subDivide(start, end, &curvePart.fCurve);
+ curvePart.setCurveHullSweep(fVerb);
+ SkPath::Verb verb = curvePart.isCurve() ? fVerb : SkPath::kLine_Verb;
+ path->deferredMove(start->ptT());
+ switch (verb) {
+ case SkPath::kLine_Verb:
+ FAIL_IF(!path->deferredLine(end->ptT()));
+ break;
+ case SkPath::kQuad_Verb:
+ path->quadTo(curvePart.fCurve.fQuad[1].asSkPoint(), end->ptT());
+ break;
+ case SkPath::kConic_Verb:
+ path->conicTo(curvePart.fCurve.fConic[1].asSkPoint(), end->ptT(),
+ curvePart.fCurve.fConic.fWeight);
+ break;
+ case SkPath::kCubic_Verb:
+ path->cubicTo(curvePart.fCurve.fCubic[1].asSkPoint(),
+ curvePart.fCurve.fCubic[2].asSkPoint(), end->ptT());
+ break;
+ default:
+ SkASSERT(0);
+ }
+ return true;
+}
+
+const SkOpPtT* SkOpSegment::existing(double t, const SkOpSegment* opp) const {
+ const SkOpSpanBase* test = &fHead;
+ const SkOpPtT* testPtT;
+ SkPoint pt = this->ptAtT(t);
+ do {
+ testPtT = test->ptT();
+ if (testPtT->fT == t) {
+ break;
+ }
+ if (!this->match(testPtT, this, t, pt)) {
+ if (t < testPtT->fT) {
+ return nullptr;
+ }
+ continue;
+ }
+ if (!opp) {
+ return testPtT;
+ }
+ const SkOpPtT* loop = testPtT->next();
+ while (loop != testPtT) {
+ if (loop->segment() == this && loop->fT == t && loop->fPt == pt) {
+ goto foundMatch;
+ }
+ loop = loop->next();
+ }
+ return nullptr;
+ } while ((test = test->upCast()->next()));
+foundMatch:
+ return opp && !test->contains(opp) ? nullptr : testPtT;
+}
+
+// break the span so that the coincident part does not change the angle of the remainder
+bool SkOpSegment::addExpanded(double newT, const SkOpSpanBase* test, bool* startOver) {
+ if (this->contains(newT)) {
+ return true;
+ }
+ this->globalState()->resetAllocatedOpSpan();
+ FAIL_IF(!between(0, newT, 1));
+ SkOpPtT* newPtT = this->addT(newT);
+ *startOver |= this->globalState()->allocatedOpSpan();
+ if (!newPtT) {
+ return false;
+ }
+ newPtT->fPt = this->ptAtT(newT);
+ SkOpPtT* oppPrev = test->ptT()->oppPrev(newPtT);
+ if (oppPrev) {
+ // const cast away to change linked list; pt/t values stays unchanged
+ SkOpSpanBase* writableTest = const_cast<SkOpSpanBase*>(test);
+ writableTest->mergeMatches(newPtT->span());
+ writableTest->ptT()->addOpp(newPtT, oppPrev);
+ writableTest->checkForCollapsedCoincidence();
+ }
+ return true;
+}
+
+// Please keep this in sync with debugAddT()
+SkOpPtT* SkOpSegment::addT(double t, const SkPoint& pt) {
+ debugValidate();
+ SkOpSpanBase* spanBase = &fHead;
+ do {
+ SkOpPtT* result = spanBase->ptT();
+ if (t == result->fT || (!zero_or_one(t) && this->match(result, this, t, pt))) {
+ spanBase->bumpSpanAdds();
+ return result;
+ }
+ if (t < result->fT) {
+ SkOpSpan* prev = result->span()->prev();
+ FAIL_WITH_NULL_IF(!prev);
+ // marks in global state that new op span has been allocated
+ SkOpSpan* span = this->insert(prev);
+ span->init(this, prev, t, pt);
+ this->debugValidate();
+#if DEBUG_ADD_T
+ SkDebugf("%s insert t=%1.9g segID=%d spanID=%d\n", __FUNCTION__, t,
+ span->segment()->debugID(), span->debugID());
+#endif
+ span->bumpSpanAdds();
+ return span->ptT();
+ }
+ FAIL_WITH_NULL_IF(spanBase == &fTail);
+ } while ((spanBase = spanBase->upCast()->next()));
+ SkASSERT(0);
+ return nullptr; // we never get here, but need this to satisfy compiler
+}
+
+SkOpPtT* SkOpSegment::addT(double t) {
+ return addT(t, this->ptAtT(t));
+}
+
+void SkOpSegment::calcAngles() {
+ bool activePrior = !fHead.isCanceled();
+ if (activePrior && !fHead.simple()) {
+ addStartSpan();
+ }
+ SkOpSpan* prior = &fHead;
+ SkOpSpanBase* spanBase = fHead.next();
+ while (spanBase != &fTail) {
+ if (activePrior) {
+ SkOpAngle* priorAngle = this->globalState()->allocator()->make<SkOpAngle>();
+ priorAngle->set(spanBase, prior);
+ spanBase->setFromAngle(priorAngle);
+ }
+ SkOpSpan* span = spanBase->upCast();
+ bool active = !span->isCanceled();
+ SkOpSpanBase* next = span->next();
+ if (active) {
+ SkOpAngle* angle = this->globalState()->allocator()->make<SkOpAngle>();
+ angle->set(span, next);
+ span->setToAngle(angle);
+ }
+ activePrior = active;
+ prior = span;
+ spanBase = next;
+ }
+ if (activePrior && !fTail.simple()) {
+ addEndSpan();
+ }
+}
+
+// Please keep this in sync with debugClearAll()
+void SkOpSegment::clearAll() {
+ SkOpSpan* span = &fHead;
+ do {
+ this->clearOne(span);
+ } while ((span = span->next()->upCastable()));
+ this->globalState()->coincidence()->release(this);
+}
+
+// Please keep this in sync with debugClearOne()
+void SkOpSegment::clearOne(SkOpSpan* span) {
+ span->setWindValue(0);
+ span->setOppValue(0);
+ this->markDone(span);
+}
+
+SkOpSpanBase::Collapsed SkOpSegment::collapsed(double s, double e) const {
+ const SkOpSpanBase* span = &fHead;
+ do {
+ SkOpSpanBase::Collapsed result = span->collapsed(s, e);
+ if (SkOpSpanBase::Collapsed::kNo != result) {
+ return result;
+ }
+ } while (span->upCastable() && (span = span->upCast()->next()));
+ return SkOpSpanBase::Collapsed::kNo;
+}
+
+bool SkOpSegment::ComputeOneSum(const SkOpAngle* baseAngle, SkOpAngle* nextAngle,
+ SkOpAngle::IncludeType includeType) {
+ SkOpSegment* baseSegment = baseAngle->segment();
+ int sumMiWinding = baseSegment->updateWindingReverse(baseAngle);
+ int sumSuWinding;
+ bool binary = includeType >= SkOpAngle::kBinarySingle;
+ if (binary) {
+ sumSuWinding = baseSegment->updateOppWindingReverse(baseAngle);
+ if (baseSegment->operand()) {
+ using std::swap;
+ swap(sumMiWinding, sumSuWinding);
+ }
+ }
+ SkOpSegment* nextSegment = nextAngle->segment();
+ int maxWinding, sumWinding;
+ SkOpSpanBase* last = nullptr;
+ if (binary) {
+ int oppMaxWinding, oppSumWinding;
+ nextSegment->setUpWindings(nextAngle->start(), nextAngle->end(), &sumMiWinding,
+ &sumSuWinding, &maxWinding, &sumWinding, &oppMaxWinding, &oppSumWinding);
+ if (!nextSegment->markAngle(maxWinding, sumWinding, oppMaxWinding, oppSumWinding,
+ nextAngle, &last)) {
+ return false;
+ }
+ } else {
+ nextSegment->setUpWindings(nextAngle->start(), nextAngle->end(), &sumMiWinding,
+ &maxWinding, &sumWinding);
+ if (!nextSegment->markAngle(maxWinding, sumWinding, nextAngle, &last)) {
+ return false;
+ }
+ }
+ nextAngle->setLastMarked(last);
+ return true;
+}
+
+bool SkOpSegment::ComputeOneSumReverse(SkOpAngle* baseAngle, SkOpAngle* nextAngle,
+ SkOpAngle::IncludeType includeType) {
+ SkOpSegment* baseSegment = baseAngle->segment();
+ int sumMiWinding = baseSegment->updateWinding(baseAngle);
+ int sumSuWinding;
+ bool binary = includeType >= SkOpAngle::kBinarySingle;
+ if (binary) {
+ sumSuWinding = baseSegment->updateOppWinding(baseAngle);
+ if (baseSegment->operand()) {
+ using std::swap;
+ swap(sumMiWinding, sumSuWinding);
+ }
+ }
+ SkOpSegment* nextSegment = nextAngle->segment();
+ int maxWinding, sumWinding;
+ SkOpSpanBase* last = nullptr;
+ if (binary) {
+ int oppMaxWinding, oppSumWinding;
+ nextSegment->setUpWindings(nextAngle->end(), nextAngle->start(), &sumMiWinding,
+ &sumSuWinding, &maxWinding, &sumWinding, &oppMaxWinding, &oppSumWinding);
+ if (!nextSegment->markAngle(maxWinding, sumWinding, oppMaxWinding, oppSumWinding,
+ nextAngle, &last)) {
+ return false;
+ }
+ } else {
+ nextSegment->setUpWindings(nextAngle->end(), nextAngle->start(), &sumMiWinding,
+ &maxWinding, &sumWinding);
+ if (!nextSegment->markAngle(maxWinding, sumWinding, nextAngle, &last)) {
+ return false;
+ }
+ }
+ nextAngle->setLastMarked(last);
+ return true;
+}
+
+// at this point, the span is already ordered, or unorderable
+int SkOpSegment::computeSum(SkOpSpanBase* start, SkOpSpanBase* end,
+ SkOpAngle::IncludeType includeType) {
+ SkASSERT(includeType != SkOpAngle::kUnaryXor);
+ SkOpAngle* firstAngle = this->spanToAngle(end, start);
+ if (nullptr == firstAngle || nullptr == firstAngle->next()) {
+ return SK_NaN32;
+ }
+ // if all angles have a computed winding,
+ // or if no adjacent angles are orderable,
+ // or if adjacent orderable angles have no computed winding,
+ // there's nothing to do
+ // if two orderable angles are adjacent, and both are next to orderable angles,
+ // and one has winding computed, transfer to the other
+ SkOpAngle* baseAngle = nullptr;
+ bool tryReverse = false;
+ // look for counterclockwise transfers
+ SkOpAngle* angle = firstAngle->previous();
+ SkOpAngle* next = angle->next();
+ firstAngle = next;
+ do {
+ SkOpAngle* prior = angle;
+ angle = next;
+ next = angle->next();
+ SkASSERT(prior->next() == angle);
+ SkASSERT(angle->next() == next);
+ if (prior->unorderable() || angle->unorderable() || next->unorderable()) {
+ baseAngle = nullptr;
+ continue;
+ }
+ int testWinding = angle->starter()->windSum();
+ if (SK_MinS32 != testWinding) {
+ baseAngle = angle;
+ tryReverse = true;
+ continue;
+ }
+ if (baseAngle) {
+ ComputeOneSum(baseAngle, angle, includeType);
+ baseAngle = SK_MinS32 != angle->starter()->windSum() ? angle : nullptr;
+ }
+ } while (next != firstAngle);
+ if (baseAngle && SK_MinS32 == firstAngle->starter()->windSum()) {
+ firstAngle = baseAngle;
+ tryReverse = true;
+ }
+ if (tryReverse) {
+ baseAngle = nullptr;
+ SkOpAngle* prior = firstAngle;
+ do {
+ angle = prior;
+ prior = angle->previous();
+ SkASSERT(prior->next() == angle);
+ next = angle->next();
+ if (prior->unorderable() || angle->unorderable() || next->unorderable()) {
+ baseAngle = nullptr;
+ continue;
+ }
+ int testWinding = angle->starter()->windSum();
+ if (SK_MinS32 != testWinding) {
+ baseAngle = angle;
+ continue;
+ }
+ if (baseAngle) {
+ ComputeOneSumReverse(baseAngle, angle, includeType);
+ baseAngle = SK_MinS32 != angle->starter()->windSum() ? angle : nullptr;
+ }
+ } while (prior != firstAngle);
+ }
+ return start->starter(end)->windSum();
+}
+
+bool SkOpSegment::contains(double newT) const {
+ const SkOpSpanBase* spanBase = &fHead;
+ do {
+ if (spanBase->ptT()->contains(this, newT)) {
+ return true;
+ }
+ if (spanBase == &fTail) {
+ break;
+ }
+ spanBase = spanBase->upCast()->next();
+ } while (true);
+ return false;
+}
+
+void SkOpSegment::release(const SkOpSpan* span) {
+ if (span->done()) {
+ --fDoneCount;
+ }
+ --fCount;
+ SkOPASSERT(fCount >= fDoneCount);
+}
+
+#if DEBUG_ANGLE
+// called only by debugCheckNearCoincidence
+double SkOpSegment::distSq(double t, const SkOpAngle* oppAngle) const {
+ SkDPoint testPt = this->dPtAtT(t);
+ SkDLine testPerp = {{ testPt, testPt }};
+ SkDVector slope = this->dSlopeAtT(t);
+ testPerp[1].fX += slope.fY;
+ testPerp[1].fY -= slope.fX;
+ SkIntersections i;
+ const SkOpSegment* oppSegment = oppAngle->segment();
+ (*CurveIntersectRay[oppSegment->verb()])(oppSegment->pts(), oppSegment->weight(), testPerp, &i);
+ double closestDistSq = SK_ScalarInfinity;
+ for (int index = 0; index < i.used(); ++index) {
+ if (!between(oppAngle->start()->t(), i[0][index], oppAngle->end()->t())) {
+ continue;
+ }
+ double testDistSq = testPt.distanceSquared(i.pt(index));
+ if (closestDistSq > testDistSq) {
+ closestDistSq = testDistSq;
+ }
+ }
+ return closestDistSq;
+}
+#endif
+
+/*
+ The M and S variable name parts stand for the operators.
+ Mi stands for Minuend (see wiki subtraction, analogous to difference)
+ Su stands for Subtrahend
+ The Opp variable name part designates that the value is for the Opposite operator.
+ Opposite values result from combining coincident spans.
+ */
+SkOpSegment* SkOpSegment::findNextOp(SkTDArray<SkOpSpanBase*>* chase, SkOpSpanBase** nextStart,
+ SkOpSpanBase** nextEnd, bool* unsortable, bool* simple,
+ SkPathOp op, int xorMiMask, int xorSuMask) {
+ SkOpSpanBase* start = *nextStart;
+ SkOpSpanBase* end = *nextEnd;
+ SkASSERT(start != end);
+ int step = start->step(end);
+ SkOpSegment* other = this->isSimple(nextStart, &step); // advances nextStart
+ if ((*simple = other)) {
+ // mark the smaller of startIndex, endIndex done, and all adjacent
+ // spans with the same T value (but not 'other' spans)
+#if DEBUG_WINDING
+ SkDebugf("%s simple\n", __FUNCTION__);
+#endif
+ SkOpSpan* startSpan = start->starter(end);
+ if (startSpan->done()) {
+ return nullptr;
+ }
+ markDone(startSpan);
+ *nextEnd = step > 0 ? (*nextStart)->upCast()->next() : (*nextStart)->prev();
+ return other;
+ }
+ SkOpSpanBase* endNear = step > 0 ? (*nextStart)->upCast()->next() : (*nextStart)->prev();
+ SkASSERT(endNear == end); // is this ever not end?
+ SkASSERT(endNear);
+ SkASSERT(start != endNear);
+ SkASSERT((start->t() < endNear->t()) ^ (step < 0));
+ // more than one viable candidate -- measure angles to find best
+ int calcWinding = computeSum(start, endNear, SkOpAngle::kBinaryOpp);
+ bool sortable = calcWinding != SK_NaN32;
+ if (!sortable) {
+ *unsortable = true;
+ markDone(start->starter(end));
+ return nullptr;
+ }
+ SkOpAngle* angle = this->spanToAngle(end, start);
+ if (angle->unorderable()) {
+ *unsortable = true;
+ markDone(start->starter(end));
+ return nullptr;
+ }
+#if DEBUG_SORT
+ SkDebugf("%s\n", __FUNCTION__);
+ angle->debugLoop();
+#endif
+ int sumMiWinding = updateWinding(end, start);
+ if (sumMiWinding == SK_MinS32) {
+ *unsortable = true;
+ markDone(start->starter(end));
+ return nullptr;
+ }
+ int sumSuWinding = updateOppWinding(end, start);
+ if (operand()) {
+ using std::swap;
+ swap(sumMiWinding, sumSuWinding);
+ }
+ SkOpAngle* nextAngle = angle->next();
+ const SkOpAngle* foundAngle = nullptr;
+ bool foundDone = false;
+ // iterate through the angle, and compute everyone's winding
+ SkOpSegment* nextSegment;
+ int activeCount = 0;
+ do {
+ nextSegment = nextAngle->segment();
+ bool activeAngle = nextSegment->activeOp(xorMiMask, xorSuMask, nextAngle->start(),
+ nextAngle->end(), op, &sumMiWinding, &sumSuWinding);
+ if (activeAngle) {
+ ++activeCount;
+ if (!foundAngle || (foundDone && activeCount & 1)) {
+ foundAngle = nextAngle;
+ foundDone = nextSegment->done(nextAngle);
+ }
+ }
+ if (nextSegment->done()) {
+ continue;
+ }
+ if (!activeAngle) {
+ (void) nextSegment->markAndChaseDone(nextAngle->start(), nextAngle->end(), nullptr);
+ }
+ SkOpSpanBase* last = nextAngle->lastMarked();
+ if (last) {
+ SkASSERT(!SkPathOpsDebug::ChaseContains(*chase, last));
+ *chase->append() = last;
+#if DEBUG_WINDING
+ SkDebugf("%s chase.append segment=%d span=%d", __FUNCTION__,
+ last->segment()->debugID(), last->debugID());
+ if (!last->final()) {
+ SkDebugf(" windSum=%d", last->upCast()->windSum());
+ }
+ SkDebugf("\n");
+#endif
+ }
+ } while ((nextAngle = nextAngle->next()) != angle);
+ start->segment()->markDone(start->starter(end));
+ if (!foundAngle) {
+ return nullptr;
+ }
+ *nextStart = foundAngle->start();
+ *nextEnd = foundAngle->end();
+ nextSegment = foundAngle->segment();
+#if DEBUG_WINDING
+ SkDebugf("%s from:[%d] to:[%d] start=%p end=%p\n",
+ __FUNCTION__, debugID(), nextSegment->debugID(), *nextStart, *nextEnd);
+ #endif
+ return nextSegment;
+}
+
+SkOpSegment* SkOpSegment::findNextWinding(SkTDArray<SkOpSpanBase*>* chase,
+ SkOpSpanBase** nextStart, SkOpSpanBase** nextEnd, bool* unsortable) {
+ SkOpSpanBase* start = *nextStart;
+ SkOpSpanBase* end = *nextEnd;
+ SkASSERT(start != end);
+ int step = start->step(end);
+ SkOpSegment* other = this->isSimple(nextStart, &step); // advances nextStart
+ if (other) {
+ // mark the smaller of startIndex, endIndex done, and all adjacent
+ // spans with the same T value (but not 'other' spans)
+#if DEBUG_WINDING
+ SkDebugf("%s simple\n", __FUNCTION__);
+#endif
+ SkOpSpan* startSpan = start->starter(end);
+ if (startSpan->done()) {
+ return nullptr;
+ }
+ markDone(startSpan);
+ *nextEnd = step > 0 ? (*nextStart)->upCast()->next() : (*nextStart)->prev();
+ return other;
+ }
+ SkOpSpanBase* endNear = step > 0 ? (*nextStart)->upCast()->next() : (*nextStart)->prev();
+ SkASSERT(endNear == end); // is this ever not end?
+ SkASSERT(endNear);
+ SkASSERT(start != endNear);
+ SkASSERT((start->t() < endNear->t()) ^ (step < 0));
+ // more than one viable candidate -- measure angles to find best
+ int calcWinding = computeSum(start, endNear, SkOpAngle::kUnaryWinding);
+ bool sortable = calcWinding != SK_NaN32;
+ if (!sortable) {
+ *unsortable = true;
+ markDone(start->starter(end));
+ return nullptr;
+ }
+ SkOpAngle* angle = this->spanToAngle(end, start);
+ if (angle->unorderable()) {
+ *unsortable = true;
+ markDone(start->starter(end));
+ return nullptr;
+ }
+#if DEBUG_SORT
+ SkDebugf("%s\n", __FUNCTION__);
+ angle->debugLoop();
+#endif
+ int sumWinding = updateWinding(end, start);
+ SkOpAngle* nextAngle = angle->next();
+ const SkOpAngle* foundAngle = nullptr;
+ bool foundDone = false;
+ // iterate through the angle, and compute everyone's winding
+ SkOpSegment* nextSegment;
+ int activeCount = 0;
+ do {
+ nextSegment = nextAngle->segment();
+ bool activeAngle = nextSegment->activeWinding(nextAngle->start(), nextAngle->end(),
+ &sumWinding);
+ if (activeAngle) {
+ ++activeCount;
+ if (!foundAngle || (foundDone && activeCount & 1)) {
+ foundAngle = nextAngle;
+ foundDone = nextSegment->done(nextAngle);
+ }
+ }
+ if (nextSegment->done()) {
+ continue;
+ }
+ if (!activeAngle) {
+ (void) nextSegment->markAndChaseDone(nextAngle->start(), nextAngle->end(), nullptr);
+ }
+ SkOpSpanBase* last = nextAngle->lastMarked();
+ if (last) {
+ SkASSERT(!SkPathOpsDebug::ChaseContains(*chase, last));
+ *chase->append() = last;
+#if DEBUG_WINDING
+ SkDebugf("%s chase.append segment=%d span=%d", __FUNCTION__,
+ last->segment()->debugID(), last->debugID());
+ if (!last->final()) {
+ SkDebugf(" windSum=%d", last->upCast()->windSum());
+ }
+ SkDebugf("\n");
+#endif
+ }
+ } while ((nextAngle = nextAngle->next()) != angle);
+ start->segment()->markDone(start->starter(end));
+ if (!foundAngle) {
+ return nullptr;
+ }
+ *nextStart = foundAngle->start();
+ *nextEnd = foundAngle->end();
+ nextSegment = foundAngle->segment();
+#if DEBUG_WINDING
+ SkDebugf("%s from:[%d] to:[%d] start=%p end=%p\n",
+ __FUNCTION__, debugID(), nextSegment->debugID(), *nextStart, *nextEnd);
+ #endif
+ return nextSegment;
+}
+
+SkOpSegment* SkOpSegment::findNextXor(SkOpSpanBase** nextStart, SkOpSpanBase** nextEnd,
+ bool* unsortable) {
+ SkOpSpanBase* start = *nextStart;
+ SkOpSpanBase* end = *nextEnd;
+ SkASSERT(start != end);
+ int step = start->step(end);
+ SkOpSegment* other = this->isSimple(nextStart, &step); // advances nextStart
+ if (other) {
+ // mark the smaller of startIndex, endIndex done, and all adjacent
+ // spans with the same T value (but not 'other' spans)
+#if DEBUG_WINDING
+ SkDebugf("%s simple\n", __FUNCTION__);
+#endif
+ SkOpSpan* startSpan = start->starter(end);
+ if (startSpan->done()) {
+ return nullptr;
+ }
+ markDone(startSpan);
+ *nextEnd = step > 0 ? (*nextStart)->upCast()->next() : (*nextStart)->prev();
+ return other;
+ }
+ SkDEBUGCODE(SkOpSpanBase* endNear = step > 0 ? (*nextStart)->upCast()->next() \
+ : (*nextStart)->prev());
+ SkASSERT(endNear == end); // is this ever not end?
+ SkASSERT(endNear);
+ SkASSERT(start != endNear);
+ SkASSERT((start->t() < endNear->t()) ^ (step < 0));
+ SkOpAngle* angle = this->spanToAngle(end, start);
+ if (!angle || angle->unorderable()) {
+ *unsortable = true;
+ markDone(start->starter(end));
+ return nullptr;
+ }
+#if DEBUG_SORT
+ SkDebugf("%s\n", __FUNCTION__);
+ angle->debugLoop();
+#endif
+ SkOpAngle* nextAngle = angle->next();
+ const SkOpAngle* foundAngle = nullptr;
+ bool foundDone = false;
+ // iterate through the angle, and compute everyone's winding
+ SkOpSegment* nextSegment;
+ int activeCount = 0;
+ do {
+ if (!nextAngle) {
+ return nullptr;
+ }
+ nextSegment = nextAngle->segment();
+ ++activeCount;
+ if (!foundAngle || (foundDone && activeCount & 1)) {
+ foundAngle = nextAngle;
+ if (!(foundDone = nextSegment->done(nextAngle))) {
+ break;
+ }
+ }
+ nextAngle = nextAngle->next();
+ } while (nextAngle != angle);
+ start->segment()->markDone(start->starter(end));
+ if (!foundAngle) {
+ return nullptr;
+ }
+ *nextStart = foundAngle->start();
+ *nextEnd = foundAngle->end();
+ nextSegment = foundAngle->segment();
+#if DEBUG_WINDING
+ SkDebugf("%s from:[%d] to:[%d] start=%p end=%p\n",
+ __FUNCTION__, debugID(), nextSegment->debugID(), *nextStart, *nextEnd);
+ #endif
+ return nextSegment;
+}
+
+SkOpGlobalState* SkOpSegment::globalState() const {
+ return contour()->globalState();
+}
+
+void SkOpSegment::init(SkPoint pts[], SkScalar weight, SkOpContour* contour, SkPath::Verb verb) {
+ fContour = contour;
+ fNext = nullptr;
+ fPts = pts;
+ fWeight = weight;
+ fVerb = verb;
+ fCount = 0;
+ fDoneCount = 0;
+ fVisited = false;
+ SkOpSpan* zeroSpan = &fHead;
+ zeroSpan->init(this, nullptr, 0, fPts[0]);
+ SkOpSpanBase* oneSpan = &fTail;
+ zeroSpan->setNext(oneSpan);
+ oneSpan->initBase(this, zeroSpan, 1, fPts[SkPathOpsVerbToPoints(fVerb)]);
+ SkDEBUGCODE(fID = globalState()->nextSegmentID());
+}
+
+bool SkOpSegment::isClose(double t, const SkOpSegment* opp) const {
+ SkDPoint cPt = this->dPtAtT(t);
+ SkDVector dxdy = (*CurveDSlopeAtT[this->verb()])(this->pts(), this->weight(), t);
+ SkDLine perp = {{ cPt, {cPt.fX + dxdy.fY, cPt.fY - dxdy.fX} }};
+ SkIntersections i;
+ (*CurveIntersectRay[opp->verb()])(opp->pts(), opp->weight(), perp, &i);
+ int used = i.used();
+ for (int index = 0; index < used; ++index) {
+ if (cPt.roughlyEqual(i.pt(index))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool SkOpSegment::isXor() const {
+ return fContour->isXor();
+}
+
+void SkOpSegment::markAllDone() {
+ SkOpSpan* span = this->head();
+ do {
+ this->markDone(span);
+ } while ((span = span->next()->upCastable()));
+}
+
+ bool SkOpSegment::markAndChaseDone(SkOpSpanBase* start, SkOpSpanBase* end, SkOpSpanBase** found) {
+ int step = start->step(end);
+ SkOpSpan* minSpan = start->starter(end);
+ markDone(minSpan);
+ SkOpSpanBase* last = nullptr;
+ SkOpSegment* other = this;
+ SkOpSpan* priorDone = nullptr;
+ SkOpSpan* lastDone = nullptr;
+ int safetyNet = 100000;
+ while ((other = other->nextChase(&start, &step, &minSpan, &last))) {
+ if (!--safetyNet) {
+ return false;
+ }
+ if (other->done()) {
+ SkASSERT(!last);
+ break;
+ }
+ if (lastDone == minSpan || priorDone == minSpan) {
+ if (found) {
+ *found = nullptr;
+ }
+ return true;
+ }
+ other->markDone(minSpan);
+ priorDone = lastDone;
+ lastDone = minSpan;
+ }
+ if (found) {
+ *found = last;
+ }
+ return true;
+}
+
+bool SkOpSegment::markAndChaseWinding(SkOpSpanBase* start, SkOpSpanBase* end, int winding,
+ SkOpSpanBase** lastPtr) {
+ SkOpSpan* spanStart = start->starter(end);
+ int step = start->step(end);
+ bool success = markWinding(spanStart, winding);
+ SkOpSpanBase* last = nullptr;
+ SkOpSegment* other = this;
+ int safetyNet = 100000;
+ while ((other = other->nextChase(&start, &step, &spanStart, &last))) {
+ if (!--safetyNet) {
+ return false;
+ }
+ if (spanStart->windSum() != SK_MinS32) {
+// SkASSERT(spanStart->windSum() == winding); // FIXME: is this assert too aggressive?
+ SkASSERT(!last);
+ break;
+ }
+ (void) other->markWinding(spanStart, winding);
+ }
+ if (lastPtr) {
+ *lastPtr = last;
+ }
+ return success;
+}
+
+bool SkOpSegment::markAndChaseWinding(SkOpSpanBase* start, SkOpSpanBase* end,
+ int winding, int oppWinding, SkOpSpanBase** lastPtr) {
+ SkOpSpan* spanStart = start->starter(end);
+ int step = start->step(end);
+ bool success = markWinding(spanStart, winding, oppWinding);
+ SkOpSpanBase* last = nullptr;
+ SkOpSegment* other = this;
+ int safetyNet = 100000;
+ while ((other = other->nextChase(&start, &step, &spanStart, &last))) {
+ if (!--safetyNet) {
+ return false;
+ }
+ if (spanStart->windSum() != SK_MinS32) {
+ if (this->operand() == other->operand()) {
+ if (spanStart->windSum() != winding || spanStart->oppSum() != oppWinding) {
+ this->globalState()->setWindingFailed();
+ return true; // ... but let it succeed anyway
+ }
+ } else {
+ FAIL_IF(spanStart->windSum() != oppWinding);
+ FAIL_IF(spanStart->oppSum() != winding);
+ }
+ SkASSERT(!last);
+ break;
+ }
+ if (this->operand() == other->operand()) {
+ (void) other->markWinding(spanStart, winding, oppWinding);
+ } else {
+ (void) other->markWinding(spanStart, oppWinding, winding);
+ }
+ }
+ if (lastPtr) {
+ *lastPtr = last;
+ }
+ return success;
+}
+
+bool SkOpSegment::markAngle(int maxWinding, int sumWinding, const SkOpAngle* angle,
+ SkOpSpanBase** result) {
+ SkASSERT(angle->segment() == this);
+ if (UseInnerWinding(maxWinding, sumWinding)) {
+ maxWinding = sumWinding;
+ }
+ if (!markAndChaseWinding(angle->start(), angle->end(), maxWinding, result)) {
+ return false;
+ }
+#if DEBUG_WINDING
+ SkOpSpanBase* last = *result;
+ if (last) {
+ SkDebugf("%s last seg=%d span=%d", __FUNCTION__,
+ last->segment()->debugID(), last->debugID());
+ if (!last->final()) {
+ SkDebugf(" windSum=");
+ SkPathOpsDebug::WindingPrintf(last->upCast()->windSum());
+ }
+ SkDebugf("\n");
+ }
+#endif
+ return true;
+}
+
+bool SkOpSegment::markAngle(int maxWinding, int sumWinding, int oppMaxWinding,
+ int oppSumWinding, const SkOpAngle* angle, SkOpSpanBase** result) {
+ SkASSERT(angle->segment() == this);
+ if (UseInnerWinding(maxWinding, sumWinding)) {
+ maxWinding = sumWinding;
+ }
+ if (oppMaxWinding != oppSumWinding && UseInnerWinding(oppMaxWinding, oppSumWinding)) {
+ oppMaxWinding = oppSumWinding;
+ }
+ // caller doesn't require that this marks anything
+ if (!markAndChaseWinding(angle->start(), angle->end(), maxWinding, oppMaxWinding, result)) {
+ return false;
+ }
+#if DEBUG_WINDING
+ if (result) {
+ SkOpSpanBase* last = *result;
+ if (last) {
+ SkDebugf("%s last segment=%d span=%d", __FUNCTION__,
+ last->segment()->debugID(), last->debugID());
+ if (!last->final()) {
+ SkDebugf(" windSum=");
+ SkPathOpsDebug::WindingPrintf(last->upCast()->windSum());
+ }
+ SkDebugf(" \n");
+ }
+ }
+#endif
+ return true;
+}
+
+void SkOpSegment::markDone(SkOpSpan* span) {
+ SkASSERT(this == span->segment());
+ if (span->done()) {
+ return;
+ }
+#if DEBUG_MARK_DONE
+ debugShowNewWinding(__FUNCTION__, span, span->windSum(), span->oppSum());
+#endif
+ span->setDone(true);
+ ++fDoneCount;
+ debugValidate();
+}
+
+bool SkOpSegment::markWinding(SkOpSpan* span, int winding) {
+ SkASSERT(this == span->segment());
+ SkASSERT(winding);
+ if (span->done()) {
+ return false;
+ }
+#if DEBUG_MARK_DONE
+ debugShowNewWinding(__FUNCTION__, span, winding);
+#endif
+ span->setWindSum(winding);
+ debugValidate();
+ return true;
+}
+
+bool SkOpSegment::markWinding(SkOpSpan* span, int winding, int oppWinding) {
+ SkASSERT(this == span->segment());
+ SkASSERT(winding || oppWinding);
+ if (span->done()) {
+ return false;
+ }
+#if DEBUG_MARK_DONE
+ debugShowNewWinding(__FUNCTION__, span, winding, oppWinding);
+#endif
+ span->setWindSum(winding);
+ span->setOppSum(oppWinding);
+ debugValidate();
+ return true;
+}
+
+bool SkOpSegment::match(const SkOpPtT* base, const SkOpSegment* testParent, double testT,
+ const SkPoint& testPt) const {
+ SkASSERT(this == base->segment());
+ if (this == testParent) {
+ if (precisely_equal(base->fT, testT)) {
+ return true;
+ }
+ }
+ if (!SkDPoint::ApproximatelyEqual(testPt, base->fPt)) {
+ return false;
+ }
+ return this != testParent || !this->ptsDisjoint(base->fT, base->fPt, testT, testPt);
+}
+
+static SkOpSegment* set_last(SkOpSpanBase** last, SkOpSpanBase* endSpan) {
+ if (last) {
+ *last = endSpan;
+ }
+ return nullptr;
+}
+
+SkOpSegment* SkOpSegment::nextChase(SkOpSpanBase** startPtr, int* stepPtr, SkOpSpan** minPtr,
+ SkOpSpanBase** last) const {
+ SkOpSpanBase* origStart = *startPtr;
+ int step = *stepPtr;
+ SkOpSpanBase* endSpan = step > 0 ? origStart->upCast()->next() : origStart->prev();
+ SkASSERT(endSpan);
+ SkOpAngle* angle = step > 0 ? endSpan->fromAngle() : endSpan->upCast()->toAngle();
+ SkOpSpanBase* foundSpan;
+ SkOpSpanBase* otherEnd;
+ SkOpSegment* other;
+ if (angle == nullptr) {
+ if (endSpan->t() != 0 && endSpan->t() != 1) {
+ return nullptr;
+ }
+ SkOpPtT* otherPtT = endSpan->ptT()->next();
+ other = otherPtT->segment();
+ foundSpan = otherPtT->span();
+ otherEnd = step > 0
+ ? foundSpan->upCastable() ? foundSpan->upCast()->next() : nullptr
+ : foundSpan->prev();
+ } else {
+ int loopCount = angle->loopCount();
+ if (loopCount > 2) {
+ return set_last(last, endSpan);
+ }
+ const SkOpAngle* next = angle->next();
+ if (nullptr == next) {
+ return nullptr;
+ }
+#if DEBUG_WINDING
+ if (angle->debugSign() != next->debugSign() && !angle->segment()->contour()->isXor()
+ && !next->segment()->contour()->isXor()) {
+ SkDebugf("%s mismatched signs\n", __FUNCTION__);
+ }
+#endif
+ other = next->segment();
+ foundSpan = endSpan = next->start();
+ otherEnd = next->end();
+ }
+ if (!otherEnd) {
+ return nullptr;
+ }
+ int foundStep = foundSpan->step(otherEnd);
+ if (*stepPtr != foundStep) {
+ return set_last(last, endSpan);
+ }
+ SkASSERT(*startPtr);
+// SkASSERT(otherEnd >= 0);
+ SkOpSpan* origMin = step < 0 ? origStart->prev() : origStart->upCast();
+ SkOpSpan* foundMin = foundSpan->starter(otherEnd);
+ if (foundMin->windValue() != origMin->windValue()
+ || foundMin->oppValue() != origMin->oppValue()) {
+ return set_last(last, endSpan);
+ }
+ *startPtr = foundSpan;
+ *stepPtr = foundStep;
+ if (minPtr) {
+ *minPtr = foundMin;
+ }
+ return other;
+}
+
+// Please keep this in sync with DebugClearVisited()
+void SkOpSegment::ClearVisited(SkOpSpanBase* span) {
+ // reset visited flag back to false
+ do {
+ SkOpPtT* ptT = span->ptT(), * stopPtT = ptT;
+ while ((ptT = ptT->next()) != stopPtT) {
+ SkOpSegment* opp = ptT->segment();
+ opp->resetVisited();
+ }
+ } while (!span->final() && (span = span->upCast()->next()));
+}
+
+// Please keep this in sync with debugMissingCoincidence()
+// look for pairs of undetected coincident curves
+// assumes that segments going in have visited flag clear
+// Even though pairs of curves correct detect coincident runs, a run may be missed
+// if the coincidence is a product of multiple intersections. For instance, given
+// curves A, B, and C:
+// A-B intersect at a point 1; A-C and B-C intersect at point 2, so near
+// the end of C that the intersection is replaced with the end of C.
+// Even though A-B correctly do not detect an intersection at point 2,
+// the resulting run from point 1 to point 2 is coincident on A and B.
+bool SkOpSegment::missingCoincidence() {
+ if (this->done()) {
+ return false;
+ }
+ SkOpSpan* prior = nullptr;
+ SkOpSpanBase* spanBase = &fHead;
+ bool result = false;
+ int safetyNet = 100000;
+ do {
+ SkOpPtT* ptT = spanBase->ptT(), * spanStopPtT = ptT;
+ SkOPASSERT(ptT->span() == spanBase);
+ while ((ptT = ptT->next()) != spanStopPtT) {
+ if (!--safetyNet) {
+ return false;
+ }
+ if (ptT->deleted()) {
+ continue;
+ }
+ SkOpSegment* opp = ptT->span()->segment();
+ if (opp->done()) {
+ continue;
+ }
+ // when opp is encounted the 1st time, continue; on 2nd encounter, look for coincidence
+ if (!opp->visited()) {
+ continue;
+ }
+ if (spanBase == &fHead) {
+ continue;
+ }
+ if (ptT->segment() == this) {
+ continue;
+ }
+ SkOpSpan* span = spanBase->upCastable();
+ // FIXME?: this assumes that if the opposite segment is coincident then no more
+ // coincidence needs to be detected. This may not be true.
+ if (span && span->containsCoincidence(opp)) {
+ continue;
+ }
+ if (spanBase->containsCoinEnd(opp)) {
+ continue;
+ }
+ SkOpPtT* priorPtT = nullptr, * priorStopPtT;
+ // find prior span containing opp segment
+ SkOpSegment* priorOpp = nullptr;
+ SkOpSpan* priorTest = spanBase->prev();
+ while (!priorOpp && priorTest) {
+ priorStopPtT = priorPtT = priorTest->ptT();
+ while ((priorPtT = priorPtT->next()) != priorStopPtT) {
+ if (priorPtT->deleted()) {
+ continue;
+ }
+ SkOpSegment* segment = priorPtT->span()->segment();
+ if (segment == opp) {
+ prior = priorTest;
+ priorOpp = opp;
+ break;
+ }
+ }
+ priorTest = priorTest->prev();
+ }
+ if (!priorOpp) {
+ continue;
+ }
+ if (priorPtT == ptT) {
+ continue;
+ }
+ SkOpPtT* oppStart = prior->ptT();
+ SkOpPtT* oppEnd = spanBase->ptT();
+ bool swapped = priorPtT->fT > ptT->fT;
+ if (swapped) {
+ using std::swap;
+ swap(priorPtT, ptT);
+ swap(oppStart, oppEnd);
+ }
+ SkOpCoincidence* coincidences = this->globalState()->coincidence();
+ SkOpPtT* rootPriorPtT = priorPtT->span()->ptT();
+ SkOpPtT* rootPtT = ptT->span()->ptT();
+ SkOpPtT* rootOppStart = oppStart->span()->ptT();
+ SkOpPtT* rootOppEnd = oppEnd->span()->ptT();
+ if (coincidences->contains(rootPriorPtT, rootPtT, rootOppStart, rootOppEnd)) {
+ goto swapBack;
+ }
+ if (this->testForCoincidence(rootPriorPtT, rootPtT, prior, spanBase, opp)) {
+ // mark coincidence
+#if DEBUG_COINCIDENCE_VERBOSE
+ SkDebugf("%s coinSpan=%d endSpan=%d oppSpan=%d oppEndSpan=%d\n", __FUNCTION__,
+ rootPriorPtT->debugID(), rootPtT->debugID(), rootOppStart->debugID(),
+ rootOppEnd->debugID());
+#endif
+ if (!coincidences->extend(rootPriorPtT, rootPtT, rootOppStart, rootOppEnd)) {
+ coincidences->add(rootPriorPtT, rootPtT, rootOppStart, rootOppEnd);
+ }
+#if DEBUG_COINCIDENCE
+ SkASSERT(coincidences->contains(rootPriorPtT, rootPtT, rootOppStart, rootOppEnd));
+#endif
+ result = true;
+ }
+ swapBack:
+ if (swapped) {
+ using std::swap;
+ swap(priorPtT, ptT);
+ }
+ }
+ } while ((spanBase = spanBase->final() ? nullptr : spanBase->upCast()->next()));
+ ClearVisited(&fHead);
+ return result;
+}
+
+// please keep this in sync with debugMoveMultiples()
+// if a span has more than one intersection, merge the other segments' span as needed
+bool SkOpSegment::moveMultiples() {
+ debugValidate();
+ SkOpSpanBase* test = &fHead;
+ do {
+ int addCount = test->spanAddsCount();
+// FAIL_IF(addCount < 1);
+ if (addCount <= 1) {
+ continue;
+ }
+ SkOpPtT* startPtT = test->ptT();
+ SkOpPtT* testPtT = startPtT;
+ int safetyHatch = 1000000;
+ do { // iterate through all spans associated with start
+ if (!--safetyHatch) {
+ return false;
+ }
+ SkOpSpanBase* oppSpan = testPtT->span();
+ if (oppSpan->spanAddsCount() == addCount) {
+ continue;
+ }
+ if (oppSpan->deleted()) {
+ continue;
+ }
+ SkOpSegment* oppSegment = oppSpan->segment();
+ if (oppSegment == this) {
+ continue;
+ }
+ // find range of spans to consider merging
+ SkOpSpanBase* oppPrev = oppSpan;
+ SkOpSpanBase* oppFirst = oppSpan;
+ while ((oppPrev = oppPrev->prev())) {
+ if (!roughly_equal(oppPrev->t(), oppSpan->t())) {
+ break;
+ }
+ if (oppPrev->spanAddsCount() == addCount) {
+ continue;
+ }
+ if (oppPrev->deleted()) {
+ continue;
+ }
+ oppFirst = oppPrev;
+ }
+ SkOpSpanBase* oppNext = oppSpan;
+ SkOpSpanBase* oppLast = oppSpan;
+ while ((oppNext = oppNext->final() ? nullptr : oppNext->upCast()->next())) {
+ if (!roughly_equal(oppNext->t(), oppSpan->t())) {
+ break;
+ }
+ if (oppNext->spanAddsCount() == addCount) {
+ continue;
+ }
+ if (oppNext->deleted()) {
+ continue;
+ }
+ oppLast = oppNext;
+ }
+ if (oppFirst == oppLast) {
+ continue;
+ }
+ SkOpSpanBase* oppTest = oppFirst;
+ do {
+ if (oppTest == oppSpan) {
+ continue;
+ }
+ // check to see if the candidate meets specific criteria:
+ // it contains spans of segments in test's loop but not including 'this'
+ SkOpPtT* oppStartPtT = oppTest->ptT();
+ SkOpPtT* oppPtT = oppStartPtT;
+ while ((oppPtT = oppPtT->next()) != oppStartPtT) {
+ SkOpSegment* oppPtTSegment = oppPtT->segment();
+ if (oppPtTSegment == this) {
+ goto tryNextSpan;
+ }
+ SkOpPtT* matchPtT = startPtT;
+ do {
+ if (matchPtT->segment() == oppPtTSegment) {
+ goto foundMatch;
+ }
+ } while ((matchPtT = matchPtT->next()) != startPtT);
+ goto tryNextSpan;
+ foundMatch: // merge oppTest and oppSpan
+ oppSegment->debugValidate();
+ oppTest->mergeMatches(oppSpan);
+ oppTest->addOpp(oppSpan);
+ oppSegment->debugValidate();
+ goto checkNextSpan;
+ }
+ tryNextSpan:
+ ;
+ } while (oppTest != oppLast && (oppTest = oppTest->upCast()->next()));
+ } while ((testPtT = testPtT->next()) != startPtT);
+checkNextSpan:
+ ;
+ } while ((test = test->final() ? nullptr : test->upCast()->next()));
+ debugValidate();
+ return true;
+}
+
+// adjacent spans may have points close by
+bool SkOpSegment::spansNearby(const SkOpSpanBase* refSpan, const SkOpSpanBase* checkSpan,
+ bool* found) const {
+ const SkOpPtT* refHead = refSpan->ptT();
+ const SkOpPtT* checkHead = checkSpan->ptT();
+// if the first pt pair from adjacent spans are far apart, assume that all are far enough apart
+ if (!SkDPoint::WayRoughlyEqual(refHead->fPt, checkHead->fPt)) {
+#if DEBUG_COINCIDENCE
+ // verify that no combination of points are close
+ const SkOpPtT* dBugRef = refHead;
+ do {
+ const SkOpPtT* dBugCheck = checkHead;
+ do {
+ SkOPASSERT(!SkDPoint::ApproximatelyEqual(dBugRef->fPt, dBugCheck->fPt));
+ dBugCheck = dBugCheck->next();
+ } while (dBugCheck != checkHead);
+ dBugRef = dBugRef->next();
+ } while (dBugRef != refHead);
+#endif
+ *found = false;
+ return true;
+ }
+ // check only unique points
+ SkScalar distSqBest = SK_ScalarMax;
+ const SkOpPtT* refBest = nullptr;
+ const SkOpPtT* checkBest = nullptr;
+ const SkOpPtT* ref = refHead;
+ do {
+ if (ref->deleted()) {
+ continue;
+ }
+ while (ref->ptAlreadySeen(refHead)) {
+ ref = ref->next();
+ if (ref == refHead) {
+ goto doneCheckingDistance;
+ }
+ }
+ const SkOpPtT* check = checkHead;
+ const SkOpSegment* refSeg = ref->segment();
+ int escapeHatch = 100000; // defend against infinite loops
+ do {
+ if (check->deleted()) {
+ continue;
+ }
+ while (check->ptAlreadySeen(checkHead)) {
+ check = check->next();
+ if (check == checkHead) {
+ goto nextRef;
+ }
+ }
+ SkScalar distSq = SkPointPriv::DistanceToSqd(ref->fPt, check->fPt);
+ if (distSqBest > distSq && (refSeg != check->segment()
+ || !refSeg->ptsDisjoint(*ref, *check))) {
+ distSqBest = distSq;
+ refBest = ref;
+ checkBest = check;
+ }
+ if (--escapeHatch <= 0) {
+ return false;
+ }
+ } while ((check = check->next()) != checkHead);
+ nextRef:
+ ;
+ } while ((ref = ref->next()) != refHead);
+doneCheckingDistance:
+ *found = checkBest && refBest->segment()->match(refBest, checkBest->segment(), checkBest->fT,
+ checkBest->fPt);
+ return true;
+}
+
+// Please keep this function in sync with debugMoveNearby()
+// Move nearby t values and pts so they all hang off the same span. Alignment happens later.
+bool SkOpSegment::moveNearby() {
+ debugValidate();
+ // release undeleted spans pointing to this seg that are linked to the primary span
+ SkOpSpanBase* spanBase = &fHead;
+ int escapeHatch = 9999; // the largest count for a regular test is 50; for a fuzzer, 500
+ do {
+ SkOpPtT* ptT = spanBase->ptT();
+ const SkOpPtT* headPtT = ptT;
+ while ((ptT = ptT->next()) != headPtT) {
+ if (!--escapeHatch) {
+ return false;
+ }
+ SkOpSpanBase* test = ptT->span();
+ if (ptT->segment() == this && !ptT->deleted() && test != spanBase
+ && test->ptT() == ptT) {
+ if (test->final()) {
+ if (spanBase == &fHead) {
+ this->clearAll();
+ return true;
+ }
+ spanBase->upCast()->release(ptT);
+ } else if (test->prev()) {
+ test->upCast()->release(headPtT);
+ }
+ break;
+ }
+ }
+ spanBase = spanBase->upCast()->next();
+ } while (!spanBase->final());
+ // This loop looks for adjacent spans which are near by
+ spanBase = &fHead;
+ do { // iterate through all spans associated with start
+ SkOpSpanBase* test = spanBase->upCast()->next();
+ bool found;
+ if (!this->spansNearby(spanBase, test, &found)) {
+ return false;
+ }
+ if (found) {
+ if (test->final()) {
+ if (spanBase->prev()) {
+ test->merge(spanBase->upCast());
+ } else {
+ this->clearAll();
+ return true;
+ }
+ } else {
+ spanBase->merge(test->upCast());
+ }
+ }
+ spanBase = test;
+ } while (!spanBase->final());
+ debugValidate();
+ return true;
+}
+
+bool SkOpSegment::operand() const {
+ return fContour->operand();
+}
+
+bool SkOpSegment::oppXor() const {
+ return fContour->oppXor();
+}
+
+bool SkOpSegment::ptsDisjoint(double t1, const SkPoint& pt1, double t2, const SkPoint& pt2) const {
+ if (fVerb == SkPath::kLine_Verb) {
+ return false;
+ }
+ // quads (and cubics) can loop back to nearly a line so that an opposite curve
+ // hits in two places with very different t values.
+ // OPTIMIZATION: curves could be preflighted so that, for example, something like
+ // 'controls contained by ends' could avoid this check for common curves
+ // 'ends are extremes in x or y' is cheaper to compute and real-world common
+ // on the other hand, the below check is relatively inexpensive
+ double midT = (t1 + t2) / 2;
+ SkPoint midPt = this->ptAtT(midT);
+ double seDistSq = std::max(SkPointPriv::DistanceToSqd(pt1, pt2) * 2, FLT_EPSILON * 2);
+ return SkPointPriv::DistanceToSqd(midPt, pt1) > seDistSq ||
+ SkPointPriv::DistanceToSqd(midPt, pt2) > seDistSq;
+}
+
+void SkOpSegment::setUpWindings(SkOpSpanBase* start, SkOpSpanBase* end, int* sumMiWinding,
+ int* maxWinding, int* sumWinding) {
+ int deltaSum = SpanSign(start, end);
+ *maxWinding = *sumMiWinding;
+ *sumWinding = *sumMiWinding -= deltaSum;
+ SkASSERT(!DEBUG_LIMIT_WIND_SUM || SkTAbs(*sumWinding) <= DEBUG_LIMIT_WIND_SUM);
+}
+
+void SkOpSegment::setUpWindings(SkOpSpanBase* start, SkOpSpanBase* end, int* sumMiWinding,
+ int* sumSuWinding, int* maxWinding, int* sumWinding, int* oppMaxWinding,
+ int* oppSumWinding) {
+ int deltaSum = SpanSign(start, end);
+ int oppDeltaSum = OppSign(start, end);
+ if (operand()) {
+ *maxWinding = *sumSuWinding;
+ *sumWinding = *sumSuWinding -= deltaSum;
+ *oppMaxWinding = *sumMiWinding;
+ *oppSumWinding = *sumMiWinding -= oppDeltaSum;
+ } else {
+ *maxWinding = *sumMiWinding;
+ *sumWinding = *sumMiWinding -= deltaSum;
+ *oppMaxWinding = *sumSuWinding;
+ *oppSumWinding = *sumSuWinding -= oppDeltaSum;
+ }
+ SkASSERT(!DEBUG_LIMIT_WIND_SUM || SkTAbs(*sumWinding) <= DEBUG_LIMIT_WIND_SUM);
+ SkASSERT(!DEBUG_LIMIT_WIND_SUM || SkTAbs(*oppSumWinding) <= DEBUG_LIMIT_WIND_SUM);
+}
+
+bool SkOpSegment::sortAngles() {
+ SkOpSpanBase* span = &this->fHead;
+ do {
+ SkOpAngle* fromAngle = span->fromAngle();
+ SkOpAngle* toAngle = span->final() ? nullptr : span->upCast()->toAngle();
+ if (!fromAngle && !toAngle) {
+ continue;
+ }
+#if DEBUG_ANGLE
+ bool wroteAfterHeader = false;
+#endif
+ SkOpAngle* baseAngle = fromAngle;
+ if (fromAngle && toAngle) {
+#if DEBUG_ANGLE
+ SkDebugf("%s [%d] tStart=%1.9g [%d]\n", __FUNCTION__, debugID(), span->t(),
+ span->debugID());
+ wroteAfterHeader = true;
+#endif
+ FAIL_IF(!fromAngle->insert(toAngle));
+ } else if (!fromAngle) {
+ baseAngle = toAngle;
+ }
+ SkOpPtT* ptT = span->ptT(), * stopPtT = ptT;
+ int safetyNet = 1000000;
+ do {
+ if (!--safetyNet) {
+ return false;
+ }
+ SkOpSpanBase* oSpan = ptT->span();
+ if (oSpan == span) {
+ continue;
+ }
+ SkOpAngle* oAngle = oSpan->fromAngle();
+ if (oAngle) {
+#if DEBUG_ANGLE
+ if (!wroteAfterHeader) {
+ SkDebugf("%s [%d] tStart=%1.9g [%d]\n", __FUNCTION__, debugID(),
+ span->t(), span->debugID());
+ wroteAfterHeader = true;
+ }
+#endif
+ if (!oAngle->loopContains(baseAngle)) {
+ baseAngle->insert(oAngle);
+ }
+ }
+ if (!oSpan->final()) {
+ oAngle = oSpan->upCast()->toAngle();
+ if (oAngle) {
+#if DEBUG_ANGLE
+ if (!wroteAfterHeader) {
+ SkDebugf("%s [%d] tStart=%1.9g [%d]\n", __FUNCTION__, debugID(),
+ span->t(), span->debugID());
+ wroteAfterHeader = true;
+ }
+#endif
+ if (!oAngle->loopContains(baseAngle)) {
+ baseAngle->insert(oAngle);
+ }
+ }
+ }
+ } while ((ptT = ptT->next()) != stopPtT);
+ if (baseAngle->loopCount() == 1) {
+ span->setFromAngle(nullptr);
+ if (toAngle) {
+ span->upCast()->setToAngle(nullptr);
+ }
+ baseAngle = nullptr;
+ }
+#if DEBUG_SORT
+ SkASSERT(!baseAngle || baseAngle->loopCount() > 1);
+#endif
+ } while (!span->final() && (span = span->upCast()->next()));
+ return true;
+}
+
+bool SkOpSegment::subDivide(const SkOpSpanBase* start, const SkOpSpanBase* end,
+ SkDCurve* edge) const {
+ SkASSERT(start != end);
+ const SkOpPtT& startPtT = *start->ptT();
+ const SkOpPtT& endPtT = *end->ptT();
+ SkDEBUGCODE(edge->fVerb = fVerb);
+ edge->fCubic[0].set(startPtT.fPt);
+ int points = SkPathOpsVerbToPoints(fVerb);
+ edge->fCubic[points].set(endPtT.fPt);
+ if (fVerb == SkPath::kLine_Verb) {
+ return false;
+ }
+ double startT = startPtT.fT;
+ double endT = endPtT.fT;
+ if ((startT == 0 || endT == 0) && (startT == 1 || endT == 1)) {
+ // don't compute midpoints if we already have them
+ if (fVerb == SkPath::kQuad_Verb) {
+ edge->fLine[1].set(fPts[1]);
+ return false;
+ }
+ if (fVerb == SkPath::kConic_Verb) {
+ edge->fConic[1].set(fPts[1]);
+ edge->fConic.fWeight = fWeight;
+ return false;
+ }
+ SkASSERT(fVerb == SkPath::kCubic_Verb);
+ if (startT == 0) {
+ edge->fCubic[1].set(fPts[1]);
+ edge->fCubic[2].set(fPts[2]);
+ return false;
+ }
+ edge->fCubic[1].set(fPts[2]);
+ edge->fCubic[2].set(fPts[1]);
+ return false;
+ }
+ if (fVerb == SkPath::kQuad_Verb) {
+ edge->fQuad[1] = SkDQuad::SubDivide(fPts, edge->fQuad[0], edge->fQuad[2], startT, endT);
+ } else if (fVerb == SkPath::kConic_Verb) {
+ edge->fConic[1] = SkDConic::SubDivide(fPts, fWeight, edge->fQuad[0], edge->fQuad[2],
+ startT, endT, &edge->fConic.fWeight);
+ } else {
+ SkASSERT(fVerb == SkPath::kCubic_Verb);
+ SkDCubic::SubDivide(fPts, edge->fCubic[0], edge->fCubic[3], startT, endT, &edge->fCubic[1]);
+ }
+ return true;
+}
+
+bool SkOpSegment::testForCoincidence(const SkOpPtT* priorPtT, const SkOpPtT* ptT,
+ const SkOpSpanBase* prior, const SkOpSpanBase* spanBase, const SkOpSegment* opp) const {
+ // average t, find mid pt
+ double midT = (prior->t() + spanBase->t()) / 2;
+ SkPoint midPt = this->ptAtT(midT);
+ bool coincident = true;
+ // if the mid pt is not near either end pt, project perpendicular through opp seg
+ if (!SkDPoint::ApproximatelyEqual(priorPtT->fPt, midPt)
+ && !SkDPoint::ApproximatelyEqual(ptT->fPt, midPt)) {
+ if (priorPtT->span() == ptT->span()) {
+ return false;
+ }
+ coincident = false;
+ SkIntersections i;
+ SkDCurve curvePart;
+ this->subDivide(prior, spanBase, &curvePart);
+ SkDVector dxdy = (*CurveDDSlopeAtT[fVerb])(curvePart, 0.5f);
+ SkDPoint partMidPt = (*CurveDDPointAtT[fVerb])(curvePart, 0.5f);
+ SkDLine ray = {{{midPt.fX, midPt.fY}, {partMidPt.fX + dxdy.fY, partMidPt.fY - dxdy.fX}}};
+ SkDCurve oppPart;
+ opp->subDivide(priorPtT->span(), ptT->span(), &oppPart);
+ (*CurveDIntersectRay[opp->verb()])(oppPart, ray, &i);
+ // measure distance and see if it's small enough to denote coincidence
+ for (int index = 0; index < i.used(); ++index) {
+ if (!between(0, i[0][index], 1)) {
+ continue;
+ }
+ SkDPoint oppPt = i.pt(index);
+ if (oppPt.approximatelyDEqual(midPt)) {
+ // the coincidence can occur at almost any angle
+ coincident = true;
+ }
+ }
+ }
+ return coincident;
+}
+
+SkOpSpan* SkOpSegment::undoneSpan() {
+ SkOpSpan* span = &fHead;
+ SkOpSpanBase* next;
+ do {
+ next = span->next();
+ if (!span->done()) {
+ return span;
+ }
+ } while (!next->final() && (span = next->upCast()));
+ return nullptr;
+}
+
+int SkOpSegment::updateOppWinding(const SkOpSpanBase* start, const SkOpSpanBase* end) const {
+ const SkOpSpan* lesser = start->starter(end);
+ int oppWinding = lesser->oppSum();
+ int oppSpanWinding = SkOpSegment::OppSign(start, end);
+ if (oppSpanWinding && UseInnerWinding(oppWinding - oppSpanWinding, oppWinding)
+ && oppWinding != SK_MaxS32) {
+ oppWinding -= oppSpanWinding;
+ }
+ return oppWinding;
+}
+
+int SkOpSegment::updateOppWinding(const SkOpAngle* angle) const {
+ const SkOpSpanBase* startSpan = angle->start();
+ const SkOpSpanBase* endSpan = angle->end();
+ return updateOppWinding(endSpan, startSpan);
+}
+
+int SkOpSegment::updateOppWindingReverse(const SkOpAngle* angle) const {
+ const SkOpSpanBase* startSpan = angle->start();
+ const SkOpSpanBase* endSpan = angle->end();
+ return updateOppWinding(startSpan, endSpan);
+}
+
+int SkOpSegment::updateWinding(SkOpSpanBase* start, SkOpSpanBase* end) {
+ SkOpSpan* lesser = start->starter(end);
+ int winding = lesser->windSum();
+ if (winding == SK_MinS32) {
+ winding = lesser->computeWindSum();
+ }
+ if (winding == SK_MinS32) {
+ return winding;
+ }
+ int spanWinding = SkOpSegment::SpanSign(start, end);
+ if (winding && UseInnerWinding(winding - spanWinding, winding)
+ && winding != SK_MaxS32) {
+ winding -= spanWinding;
+ }
+ return winding;
+}
+
+int SkOpSegment::updateWinding(SkOpAngle* angle) {
+ SkOpSpanBase* startSpan = angle->start();
+ SkOpSpanBase* endSpan = angle->end();
+ return updateWinding(endSpan, startSpan);
+}
+
+int SkOpSegment::updateWindingReverse(const SkOpAngle* angle) {
+ SkOpSpanBase* startSpan = angle->start();
+ SkOpSpanBase* endSpan = angle->end();
+ return updateWinding(startSpan, endSpan);
+}
+
+// OPTIMIZATION: does the following also work, and is it any faster?
+// return outerWinding * innerWinding > 0
+// || ((outerWinding + innerWinding < 0) ^ ((outerWinding - innerWinding) < 0)))
+bool SkOpSegment::UseInnerWinding(int outerWinding, int innerWinding) {
+ SkASSERT(outerWinding != SK_MaxS32);
+ SkASSERT(innerWinding != SK_MaxS32);
+ int absOut = SkTAbs(outerWinding);
+ int absIn = SkTAbs(innerWinding);
+ bool result = absOut == absIn ? outerWinding < 0 : absOut < absIn;
+ return result;
+}
+
+int SkOpSegment::windSum(const SkOpAngle* angle) const {
+ const SkOpSpan* minSpan = angle->start()->starter(angle->end());
+ return minSpan->windSum();
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpSegment.h b/gfx/skia/skia/src/pathops/SkOpSegment.h
new file mode 100644
index 0000000000..4da3c5a51f
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpSegment.h
@@ -0,0 +1,466 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkOpSegment_DEFINE
+#define SkOpSegment_DEFINE
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/pathops/SkPathOps.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkMath.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/pathops/SkOpAngle.h"
+#include "src/pathops/SkOpSpan.h"
+#include "src/pathops/SkPathOpsBounds.h"
+#include "src/pathops/SkPathOpsConic.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsCurve.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkPathOpsQuad.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+enum class SkOpRayDir;
+class SkOpCoincidence;
+class SkOpContour;
+class SkPathWriter;
+struct SkOpRayHit;
+template <typename T> class SkTDArray;
+
+class SkOpSegment {
+public:
+ bool operator<(const SkOpSegment& rh) const {
+ return fBounds.fTop < rh.fBounds.fTop;
+ }
+
+ SkOpAngle* activeAngle(SkOpSpanBase* start, SkOpSpanBase** startPtr, SkOpSpanBase** endPtr,
+ bool* done);
+ SkOpAngle* activeAngleInner(SkOpSpanBase* start, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr, bool* done);
+ SkOpAngle* activeAngleOther(SkOpSpanBase* start, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr, bool* done);
+ bool activeOp(SkOpSpanBase* start, SkOpSpanBase* end, int xorMiMask, int xorSuMask,
+ SkPathOp op);
+ bool activeOp(int xorMiMask, int xorSuMask, SkOpSpanBase* start, SkOpSpanBase* end, SkPathOp op,
+ int* sumMiWinding, int* sumSuWinding);
+
+ bool activeWinding(SkOpSpanBase* start, SkOpSpanBase* end);
+ bool activeWinding(SkOpSpanBase* start, SkOpSpanBase* end, int* sumWinding);
+
+ SkOpSegment* addConic(SkPoint pts[3], SkScalar weight, SkOpContour* parent) {
+ init(pts, weight, parent, SkPath::kConic_Verb);
+ SkDCurve curve;
+ curve.fConic.set(pts, weight);
+ curve.setConicBounds(pts, weight, 0, 1, &fBounds);
+ return this;
+ }
+
+ SkOpSegment* addCubic(SkPoint pts[4], SkOpContour* parent) {
+ init(pts, 1, parent, SkPath::kCubic_Verb);
+ SkDCurve curve;
+ curve.fCubic.set(pts);
+ curve.setCubicBounds(pts, 1, 0, 1, &fBounds);
+ return this;
+ }
+
+ bool addCurveTo(const SkOpSpanBase* start, const SkOpSpanBase* end, SkPathWriter* path) const;
+
+ SkOpAngle* addEndSpan() {
+ SkOpAngle* angle = this->globalState()->allocator()->make<SkOpAngle>();
+ angle->set(&fTail, fTail.prev());
+ fTail.setFromAngle(angle);
+ return angle;
+ }
+
+ bool addExpanded(double newT, const SkOpSpanBase* test, bool* startOver);
+
+ SkOpSegment* addLine(SkPoint pts[2], SkOpContour* parent) {
+ SkASSERT(pts[0] != pts[1]);
+ init(pts, 1, parent, SkPath::kLine_Verb);
+ fBounds.setBounds(pts, 2);
+ return this;
+ }
+
+ SkOpPtT* addMissing(double t, SkOpSegment* opp, bool* allExist);
+
+ SkOpAngle* addStartSpan() {
+ SkOpAngle* angle = this->globalState()->allocator()->make<SkOpAngle>();
+ angle->set(&fHead, fHead.next());
+ fHead.setToAngle(angle);
+ return angle;
+ }
+
+ SkOpSegment* addQuad(SkPoint pts[3], SkOpContour* parent) {
+ init(pts, 1, parent, SkPath::kQuad_Verb);
+ SkDCurve curve;
+ curve.fQuad.set(pts);
+ curve.setQuadBounds(pts, 1, 0, 1, &fBounds);
+ return this;
+ }
+
+ SkOpPtT* addT(double t);
+ SkOpPtT* addT(double t, const SkPoint& pt);
+
+ const SkPathOpsBounds& bounds() const {
+ return fBounds;
+ }
+
+ void bumpCount() {
+ ++fCount;
+ }
+
+ void calcAngles();
+ SkOpSpanBase::Collapsed collapsed(double startT, double endT) const;
+ static bool ComputeOneSum(const SkOpAngle* baseAngle, SkOpAngle* nextAngle,
+ SkOpAngle::IncludeType );
+ static bool ComputeOneSumReverse(SkOpAngle* baseAngle, SkOpAngle* nextAngle,
+ SkOpAngle::IncludeType );
+ int computeSum(SkOpSpanBase* start, SkOpSpanBase* end, SkOpAngle::IncludeType includeType);
+
+ void clearAll();
+ void clearOne(SkOpSpan* span);
+ static void ClearVisited(SkOpSpanBase* span);
+ bool contains(double t) const;
+
+ SkOpContour* contour() const {
+ return fContour;
+ }
+
+ int count() const {
+ return fCount;
+ }
+
+ void debugAddAngle(double startT, double endT);
+#if DEBUG_COIN
+ const SkOpPtT* debugAddT(double t, SkPathOpsDebug::GlitchLog* ) const;
+#endif
+ const SkOpAngle* debugAngle(int id) const;
+#if DEBUG_ANGLE
+ void debugCheckAngleCoin() const;
+#endif
+#if DEBUG_COIN
+ void debugCheckHealth(SkPathOpsDebug::GlitchLog* ) const;
+ void debugClearAll(SkPathOpsDebug::GlitchLog* glitches) const;
+ void debugClearOne(const SkOpSpan* span, SkPathOpsDebug::GlitchLog* glitches) const;
+#endif
+ const SkOpCoincidence* debugCoincidence() const;
+ SkOpContour* debugContour(int id) const;
+
+ int debugID() const {
+ return SkDEBUGRELEASE(fID, -1);
+ }
+
+ SkOpAngle* debugLastAngle();
+#if DEBUG_COIN
+ void debugMissingCoincidence(SkPathOpsDebug::GlitchLog* glitches) const;
+ void debugMoveMultiples(SkPathOpsDebug::GlitchLog* glitches) const;
+ void debugMoveNearby(SkPathOpsDebug::GlitchLog* glitches) const;
+#endif
+ const SkOpPtT* debugPtT(int id) const;
+ void debugReset();
+ const SkOpSegment* debugSegment(int id) const;
+
+#if DEBUG_ACTIVE_SPANS
+ void debugShowActiveSpans(SkString* str) const;
+#endif
+#if DEBUG_MARK_DONE
+ void debugShowNewWinding(const char* fun, const SkOpSpan* span, int winding);
+ void debugShowNewWinding(const char* fun, const SkOpSpan* span, int winding, int oppWinding);
+#endif
+
+ const SkOpSpanBase* debugSpan(int id) const;
+ void debugValidate() const;
+
+#if DEBUG_COINCIDENCE_ORDER
+ void debugResetCoinT() const;
+ void debugSetCoinT(int, SkScalar ) const;
+#endif
+
+#if DEBUG_COIN
+ static void DebugClearVisited(const SkOpSpanBase* span);
+
+ bool debugVisited() const {
+ if (!fDebugVisited) {
+ fDebugVisited = true;
+ return false;
+ }
+ return true;
+ }
+#endif
+
+#if DEBUG_ANGLE
+ double distSq(double t, const SkOpAngle* opp) const;
+#endif
+
+ bool done() const {
+ SkOPASSERT(fDoneCount <= fCount);
+ return fDoneCount == fCount;
+ }
+
+ bool done(const SkOpAngle* angle) const {
+ return angle->start()->starter(angle->end())->done();
+ }
+
+ SkDPoint dPtAtT(double mid) const {
+ return (*CurveDPointAtT[fVerb])(fPts, fWeight, mid);
+ }
+
+ SkDVector dSlopeAtT(double mid) const {
+ return (*CurveDSlopeAtT[fVerb])(fPts, fWeight, mid);
+ }
+
+ void dump() const;
+ void dumpAll() const;
+ void dumpAngles() const;
+ void dumpCoin() const;
+ void dumpPts(const char* prefix = "seg") const;
+ void dumpPtsInner(const char* prefix = "seg") const;
+
+ const SkOpPtT* existing(double t, const SkOpSegment* opp) const;
+ SkOpSegment* findNextOp(SkTDArray<SkOpSpanBase*>* chase, SkOpSpanBase** nextStart,
+ SkOpSpanBase** nextEnd, bool* unsortable, bool* simple,
+ SkPathOp op, int xorMiMask, int xorSuMask);
+ SkOpSegment* findNextWinding(SkTDArray<SkOpSpanBase*>* chase, SkOpSpanBase** nextStart,
+ SkOpSpanBase** nextEnd, bool* unsortable);
+ SkOpSegment* findNextXor(SkOpSpanBase** nextStart, SkOpSpanBase** nextEnd, bool* unsortable);
+ SkOpSpan* findSortableTop(SkOpContour* );
+ SkOpGlobalState* globalState() const;
+
+ const SkOpSpan* head() const {
+ return &fHead;
+ }
+
+ SkOpSpan* head() {
+ return &fHead;
+ }
+
+ void init(SkPoint pts[], SkScalar weight, SkOpContour* parent, SkPath::Verb verb);
+
+ SkOpSpan* insert(SkOpSpan* prev) {
+ SkOpGlobalState* globalState = this->globalState();
+ globalState->setAllocatedOpSpan();
+ SkOpSpan* result = globalState->allocator()->make<SkOpSpan>();
+ SkOpSpanBase* next = prev->next();
+ result->setPrev(prev);
+ prev->setNext(result);
+ SkDEBUGCODE(result->ptT()->fT = 0);
+ result->setNext(next);
+ if (next) {
+ next->setPrev(result);
+ }
+ return result;
+ }
+
+ bool isClose(double t, const SkOpSegment* opp) const;
+
+ bool isHorizontal() const {
+ return fBounds.fTop == fBounds.fBottom;
+ }
+
+ SkOpSegment* isSimple(SkOpSpanBase** end, int* step) const {
+ return nextChase(end, step, nullptr, nullptr);
+ }
+
+ bool isVertical() const {
+ return fBounds.fLeft == fBounds.fRight;
+ }
+
+ bool isVertical(SkOpSpanBase* start, SkOpSpanBase* end) const {
+ return (*CurveIsVertical[fVerb])(fPts, fWeight, start->t(), end->t());
+ }
+
+ bool isXor() const;
+
+ void joinEnds(SkOpSegment* start) {
+ fTail.ptT()->addOpp(start->fHead.ptT(), start->fHead.ptT());
+ }
+
+ const SkPoint& lastPt() const {
+ return fPts[SkPathOpsVerbToPoints(fVerb)];
+ }
+
+ void markAllDone();
+ bool markAndChaseDone(SkOpSpanBase* start, SkOpSpanBase* end, SkOpSpanBase** found);
+ bool markAndChaseWinding(SkOpSpanBase* start, SkOpSpanBase* end, int winding,
+ SkOpSpanBase** lastPtr);
+ bool markAndChaseWinding(SkOpSpanBase* start, SkOpSpanBase* end, int winding,
+ int oppWinding, SkOpSpanBase** lastPtr);
+ bool markAngle(int maxWinding, int sumWinding, const SkOpAngle* angle, SkOpSpanBase** result);
+ bool markAngle(int maxWinding, int sumWinding, int oppMaxWinding, int oppSumWinding,
+ const SkOpAngle* angle, SkOpSpanBase** result);
+ void markDone(SkOpSpan* );
+ bool markWinding(SkOpSpan* , int winding);
+ bool markWinding(SkOpSpan* , int winding, int oppWinding);
+ bool match(const SkOpPtT* span, const SkOpSegment* parent, double t, const SkPoint& pt) const;
+ bool missingCoincidence();
+ bool moveMultiples();
+ bool moveNearby();
+
+ SkOpSegment* next() const {
+ return fNext;
+ }
+
+ SkOpSegment* nextChase(SkOpSpanBase** , int* step, SkOpSpan** , SkOpSpanBase** last) const;
+ bool operand() const;
+
+ static int OppSign(const SkOpSpanBase* start, const SkOpSpanBase* end) {
+ int result = start->t() < end->t() ? -start->upCast()->oppValue()
+ : end->upCast()->oppValue();
+ return result;
+ }
+
+ bool oppXor() const;
+
+ const SkOpSegment* prev() const {
+ return fPrev;
+ }
+
+ SkPoint ptAtT(double mid) const {
+ return (*CurvePointAtT[fVerb])(fPts, fWeight, mid);
+ }
+
+ const SkPoint* pts() const {
+ return fPts;
+ }
+
+ bool ptsDisjoint(const SkOpPtT& span, const SkOpPtT& test) const {
+ SkASSERT(this == span.segment());
+ SkASSERT(this == test.segment());
+ return ptsDisjoint(span.fT, span.fPt, test.fT, test.fPt);
+ }
+
+ bool ptsDisjoint(const SkOpPtT& span, double t, const SkPoint& pt) const {
+ SkASSERT(this == span.segment());
+ return ptsDisjoint(span.fT, span.fPt, t, pt);
+ }
+
+ bool ptsDisjoint(double t1, const SkPoint& pt1, double t2, const SkPoint& pt2) const;
+
+ void rayCheck(const SkOpRayHit& base, SkOpRayDir dir, SkOpRayHit** hits, SkArenaAlloc*);
+ void release(const SkOpSpan* );
+
+#if DEBUG_COIN
+ void resetDebugVisited() const {
+ fDebugVisited = false;
+ }
+#endif
+
+ void resetVisited() {
+ fVisited = false;
+ }
+
+ void setContour(SkOpContour* contour) {
+ fContour = contour;
+ }
+
+ void setNext(SkOpSegment* next) {
+ fNext = next;
+ }
+
+ void setPrev(SkOpSegment* prev) {
+ fPrev = prev;
+ }
+
+ void setUpWinding(SkOpSpanBase* start, SkOpSpanBase* end, int* maxWinding, int* sumWinding) {
+ int deltaSum = SpanSign(start, end);
+ *maxWinding = *sumWinding;
+ if (*sumWinding == SK_MinS32) {
+ return;
+ }
+ *sumWinding -= deltaSum;
+ }
+
+ void setUpWindings(SkOpSpanBase* start, SkOpSpanBase* end, int* sumMiWinding,
+ int* maxWinding, int* sumWinding);
+ void setUpWindings(SkOpSpanBase* start, SkOpSpanBase* end, int* sumMiWinding, int* sumSuWinding,
+ int* maxWinding, int* sumWinding, int* oppMaxWinding, int* oppSumWinding);
+ bool sortAngles();
+ bool spansNearby(const SkOpSpanBase* ref, const SkOpSpanBase* check, bool* found) const;
+
+ static int SpanSign(const SkOpSpanBase* start, const SkOpSpanBase* end) {
+ int result = start->t() < end->t() ? -start->upCast()->windValue()
+ : end->upCast()->windValue();
+ return result;
+ }
+
+ SkOpAngle* spanToAngle(SkOpSpanBase* start, SkOpSpanBase* end) {
+ SkASSERT(start != end);
+ return start->t() < end->t() ? start->upCast()->toAngle() : start->fromAngle();
+ }
+
+ bool subDivide(const SkOpSpanBase* start, const SkOpSpanBase* end, SkDCurve* result) const;
+
+ const SkOpSpanBase* tail() const {
+ return &fTail;
+ }
+
+ SkOpSpanBase* tail() {
+ return &fTail;
+ }
+
+ bool testForCoincidence(const SkOpPtT* priorPtT, const SkOpPtT* ptT, const SkOpSpanBase* prior,
+ const SkOpSpanBase* spanBase, const SkOpSegment* opp) const;
+
+ SkOpSpan* undoneSpan();
+ int updateOppWinding(const SkOpSpanBase* start, const SkOpSpanBase* end) const;
+ int updateOppWinding(const SkOpAngle* angle) const;
+ int updateOppWindingReverse(const SkOpAngle* angle) const;
+ int updateWinding(SkOpSpanBase* start, SkOpSpanBase* end);
+ int updateWinding(SkOpAngle* angle);
+ int updateWindingReverse(const SkOpAngle* angle);
+
+ static bool UseInnerWinding(int outerWinding, int innerWinding);
+
+ SkPath::Verb verb() const {
+ return fVerb;
+ }
+
+ // look for two different spans that point to the same opposite segment
+ bool visited() {
+ if (!fVisited) {
+ fVisited = true;
+ return false;
+ }
+ return true;
+ }
+
+ SkScalar weight() const {
+ return fWeight;
+ }
+
+ SkOpSpan* windingSpanAtT(double tHit);
+ int windSum(const SkOpAngle* angle) const;
+
+private:
+ SkOpSpan fHead; // the head span always has its t set to zero
+ SkOpSpanBase fTail; // the tail span always has its t set to one
+ SkOpContour* fContour;
+ SkOpSegment* fNext; // forward-only linked list used by contour to walk the segments
+ const SkOpSegment* fPrev;
+ SkPoint* fPts; // pointer into array of points owned by edge builder that may be tweaked
+ SkPathOpsBounds fBounds; // tight bounds
+ SkScalar fWeight;
+ int fCount; // number of spans (one for a non-intersecting segment)
+ int fDoneCount; // number of processed spans (zero initially)
+ SkPath::Verb fVerb;
+ bool fVisited; // used by missing coincidence check
+#if DEBUG_COIN
+ mutable bool fDebugVisited; // used by debug missing coincidence check
+#endif
+#if DEBUG_COINCIDENCE_ORDER
+ mutable int fDebugBaseIndex;
+ mutable SkScalar fDebugBaseMin; // if > 0, the 1st t value in this seg vis-a-vis the ref seg
+ mutable SkScalar fDebugBaseMax;
+ mutable int fDebugLastIndex;
+ mutable SkScalar fDebugLastMin; // if > 0, the last t -- next t val - base has same sign
+ mutable SkScalar fDebugLastMax;
+#endif
+ SkDEBUGCODE(int fID);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkOpSpan.cpp b/gfx/skia/skia/src/pathops/SkOpSpan.cpp
new file mode 100644
index 0000000000..a7e898915a
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpSpan.cpp
@@ -0,0 +1,490 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkPoint.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/pathops/SkOpCoincidence.h"
+#include "src/pathops/SkOpContour.h"
+#include "src/pathops/SkOpSegment.h"
+#include "src/pathops/SkOpSpan.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+#include <algorithm>
+
+bool SkOpPtT::alias() const {
+ return this->span()->ptT() != this;
+}
+
+const SkOpPtT* SkOpPtT::active() const {
+ if (!fDeleted) {
+ return this;
+ }
+ const SkOpPtT* ptT = this;
+ const SkOpPtT* stopPtT = ptT;
+ while ((ptT = ptT->next()) != stopPtT) {
+ if (ptT->fSpan == fSpan && !ptT->fDeleted) {
+ return ptT;
+ }
+ }
+ return nullptr; // should never return deleted; caller must abort
+}
+
+bool SkOpPtT::contains(const SkOpPtT* check) const {
+ SkOPASSERT(this != check);
+ const SkOpPtT* ptT = this;
+ const SkOpPtT* stopPtT = ptT;
+ while ((ptT = ptT->next()) != stopPtT) {
+ if (ptT == check) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool SkOpPtT::contains(const SkOpSegment* segment, const SkPoint& pt) const {
+ SkASSERT(this->segment() != segment);
+ const SkOpPtT* ptT = this;
+ const SkOpPtT* stopPtT = ptT;
+ while ((ptT = ptT->next()) != stopPtT) {
+ if (ptT->fPt == pt && ptT->segment() == segment) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool SkOpPtT::contains(const SkOpSegment* segment, double t) const {
+ const SkOpPtT* ptT = this;
+ const SkOpPtT* stopPtT = ptT;
+ while ((ptT = ptT->next()) != stopPtT) {
+ if (ptT->fT == t && ptT->segment() == segment) {
+ return true;
+ }
+ }
+ return false;
+}
+
+const SkOpPtT* SkOpPtT::contains(const SkOpSegment* check) const {
+ SkASSERT(this->segment() != check);
+ const SkOpPtT* ptT = this;
+ const SkOpPtT* stopPtT = ptT;
+ while ((ptT = ptT->next()) != stopPtT) {
+ if (ptT->segment() == check && !ptT->deleted()) {
+ return ptT;
+ }
+ }
+ return nullptr;
+}
+
+SkOpContour* SkOpPtT::contour() const {
+ return segment()->contour();
+}
+
+const SkOpPtT* SkOpPtT::find(const SkOpSegment* segment) const {
+ const SkOpPtT* ptT = this;
+ const SkOpPtT* stopPtT = ptT;
+ do {
+ if (ptT->segment() == segment && !ptT->deleted()) {
+ return ptT;
+ }
+ ptT = ptT->fNext;
+ } while (stopPtT != ptT);
+// SkASSERT(0);
+ return nullptr;
+}
+
+SkOpGlobalState* SkOpPtT::globalState() const {
+ return contour()->globalState();
+}
+
+void SkOpPtT::init(SkOpSpanBase* span, double t, const SkPoint& pt, bool duplicate) {
+ fT = t;
+ fPt = pt;
+ fSpan = span;
+ fNext = this;
+ fDuplicatePt = duplicate;
+ fDeleted = false;
+ fCoincident = false;
+ SkDEBUGCODE(fID = span->globalState()->nextPtTID());
+}
+
+bool SkOpPtT::onEnd() const {
+ const SkOpSpanBase* span = this->span();
+ if (span->ptT() != this) {
+ return false;
+ }
+ const SkOpSegment* segment = this->segment();
+ return span == segment->head() || span == segment->tail();
+}
+
+bool SkOpPtT::ptAlreadySeen(const SkOpPtT* check) const {
+ while (this != check) {
+ if (this->fPt == check->fPt) {
+ return true;
+ }
+ check = check->fNext;
+ }
+ return false;
+}
+
+SkOpPtT* SkOpPtT::prev() {
+ SkOpPtT* result = this;
+ SkOpPtT* next = this;
+ while ((next = next->fNext) != this) {
+ result = next;
+ }
+ SkASSERT(result->fNext == this);
+ return result;
+}
+
+const SkOpSegment* SkOpPtT::segment() const {
+ return span()->segment();
+}
+
+SkOpSegment* SkOpPtT::segment() {
+ return span()->segment();
+}
+
+void SkOpPtT::setDeleted() {
+ SkASSERT(this->span()->debugDeleted() || this->span()->ptT() != this);
+ SkOPASSERT(!fDeleted);
+ fDeleted = true;
+}
+
+bool SkOpSpanBase::addOpp(SkOpSpanBase* opp) {
+ SkOpPtT* oppPrev = this->ptT()->oppPrev(opp->ptT());
+ if (!oppPrev) {
+ return true;
+ }
+ FAIL_IF(!this->mergeMatches(opp));
+ this->ptT()->addOpp(opp->ptT(), oppPrev);
+ this->checkForCollapsedCoincidence();
+ return true;
+}
+
+SkOpSpanBase::Collapsed SkOpSpanBase::collapsed(double s, double e) const {
+ const SkOpPtT* start = &fPtT;
+ const SkOpPtT* startNext = nullptr;
+ const SkOpPtT* walk = start;
+ double min = walk->fT;
+ double max = min;
+ const SkOpSegment* segment = this->segment();
+ int safetyNet = 100000;
+ while ((walk = walk->next()) != start) {
+ if (!--safetyNet) {
+ return Collapsed::kError;
+ }
+ if (walk == startNext) {
+ return Collapsed::kError;
+ }
+ if (walk->segment() != segment) {
+ continue;
+ }
+ min = std::min(min, walk->fT);
+ max = std::max(max, walk->fT);
+ if (between(min, s, max) && between(min, e, max)) {
+ return Collapsed::kYes;
+ }
+ startNext = start->next();
+ }
+ return Collapsed::kNo;
+}
+
+bool SkOpSpanBase::contains(const SkOpSpanBase* span) const {
+ const SkOpPtT* start = &fPtT;
+ const SkOpPtT* check = &span->fPtT;
+ SkOPASSERT(start != check);
+ const SkOpPtT* walk = start;
+ while ((walk = walk->next()) != start) {
+ if (walk == check) {
+ return true;
+ }
+ }
+ return false;
+}
+
+const SkOpPtT* SkOpSpanBase::contains(const SkOpSegment* segment) const {
+ const SkOpPtT* start = &fPtT;
+ const SkOpPtT* walk = start;
+ while ((walk = walk->next()) != start) {
+ if (walk->deleted()) {
+ continue;
+ }
+ if (walk->segment() == segment && walk->span()->ptT() == walk) {
+ return walk;
+ }
+ }
+ return nullptr;
+}
+
+bool SkOpSpanBase::containsCoinEnd(const SkOpSegment* segment) const {
+ SkASSERT(this->segment() != segment);
+ const SkOpSpanBase* next = this;
+ while ((next = next->fCoinEnd) != this) {
+ if (next->segment() == segment) {
+ return true;
+ }
+ }
+ return false;
+}
+
+SkOpContour* SkOpSpanBase::contour() const {
+ return segment()->contour();
+}
+
+SkOpGlobalState* SkOpSpanBase::globalState() const {
+ return contour()->globalState();
+}
+
+void SkOpSpanBase::initBase(SkOpSegment* segment, SkOpSpan* prev, double t, const SkPoint& pt) {
+ fSegment = segment;
+ fPtT.init(this, t, pt, false);
+ fCoinEnd = this;
+ fFromAngle = nullptr;
+ fPrev = prev;
+ fSpanAdds = 0;
+ fAligned = true;
+ fChased = false;
+ SkDEBUGCODE(fCount = 1);
+ SkDEBUGCODE(fID = globalState()->nextSpanID());
+ SkDEBUGCODE(fDebugDeleted = false);
+}
+
+// this pair of spans share a common t value or point; merge them and eliminate duplicates
+// this does not compute the best t or pt value; this merely moves all data into a single list
+void SkOpSpanBase::merge(SkOpSpan* span) {
+ SkOpPtT* spanPtT = span->ptT();
+ SkASSERT(this->t() != spanPtT->fT);
+ SkASSERT(!zero_or_one(spanPtT->fT));
+ span->release(this->ptT());
+ if (this->contains(span)) {
+ SkOPASSERT(0); // check to see if this ever happens -- should have been found earlier
+ return; // merge is already in the ptT loop
+ }
+ SkOpPtT* remainder = spanPtT->next();
+ this->ptT()->insert(spanPtT);
+ while (remainder != spanPtT) {
+ SkOpPtT* next = remainder->next();
+ SkOpPtT* compare = spanPtT->next();
+ while (compare != spanPtT) {
+ SkOpPtT* nextC = compare->next();
+ if (nextC->span() == remainder->span() && nextC->fT == remainder->fT) {
+ goto tryNextRemainder;
+ }
+ compare = nextC;
+ }
+ spanPtT->insert(remainder);
+tryNextRemainder:
+ remainder = next;
+ }
+ fSpanAdds += span->fSpanAdds;
+}
+
+// please keep in sync with debugCheckForCollapsedCoincidence()
+void SkOpSpanBase::checkForCollapsedCoincidence() {
+ SkOpCoincidence* coins = this->globalState()->coincidence();
+ if (coins->isEmpty()) {
+ return;
+ }
+// the insert above may have put both ends of a coincident run in the same span
+// for each coincident ptT in loop; see if its opposite in is also in the loop
+// this implementation is the motivation for marking that a ptT is referenced by a coincident span
+ SkOpPtT* head = this->ptT();
+ SkOpPtT* test = head;
+ do {
+ if (!test->coincident()) {
+ continue;
+ }
+ coins->markCollapsed(test);
+ } while ((test = test->next()) != head);
+ coins->releaseDeleted();
+}
+
+// please keep in sync with debugMergeMatches()
+// Look to see if pt-t linked list contains same segment more than once
+// if so, and if each pt-t is directly pointed to by spans in that segment,
+// merge them
+// keep the points, but remove spans so that the segment doesn't have 2 or more
+// spans pointing to the same pt-t loop at different loop elements
+bool SkOpSpanBase::mergeMatches(SkOpSpanBase* opp) {
+ SkOpPtT* test = &fPtT;
+ SkOpPtT* testNext;
+ const SkOpPtT* stop = test;
+ int safetyHatch = 1000000;
+ do {
+ if (!--safetyHatch) {
+ return false;
+ }
+ testNext = test->next();
+ if (test->deleted()) {
+ continue;
+ }
+ SkOpSpanBase* testBase = test->span();
+ SkASSERT(testBase->ptT() == test);
+ SkOpSegment* segment = test->segment();
+ if (segment->done()) {
+ continue;
+ }
+ SkOpPtT* inner = opp->ptT();
+ const SkOpPtT* innerStop = inner;
+ do {
+ if (inner->segment() != segment) {
+ continue;
+ }
+ if (inner->deleted()) {
+ continue;
+ }
+ SkOpSpanBase* innerBase = inner->span();
+ SkASSERT(innerBase->ptT() == inner);
+ // when the intersection is first detected, the span base is marked if there are
+ // more than one point in the intersection.
+ if (!zero_or_one(inner->fT)) {
+ innerBase->upCast()->release(test);
+ } else {
+ SkOPASSERT(inner->fT != test->fT);
+ if (!zero_or_one(test->fT)) {
+ testBase->upCast()->release(inner);
+ } else {
+ segment->markAllDone(); // mark segment as collapsed
+ SkDEBUGCODE(testBase->debugSetDeleted());
+ test->setDeleted();
+ SkDEBUGCODE(innerBase->debugSetDeleted());
+ inner->setDeleted();
+ }
+ }
+#ifdef SK_DEBUG // assert if another undeleted entry points to segment
+ const SkOpPtT* debugInner = inner;
+ while ((debugInner = debugInner->next()) != innerStop) {
+ if (debugInner->segment() != segment) {
+ continue;
+ }
+ if (debugInner->deleted()) {
+ continue;
+ }
+ SkOPASSERT(0);
+ }
+#endif
+ break;
+ } while ((inner = inner->next()) != innerStop);
+ } while ((test = testNext) != stop);
+ this->checkForCollapsedCoincidence();
+ return true;
+}
+
+int SkOpSpan::computeWindSum() {
+ SkOpGlobalState* globals = this->globalState();
+ SkOpContour* contourHead = globals->contourHead();
+ int windTry = 0;
+ while (!this->sortableTop(contourHead) && ++windTry < SkOpGlobalState::kMaxWindingTries) {
+ }
+ return this->windSum();
+}
+
+bool SkOpSpan::containsCoincidence(const SkOpSegment* segment) const {
+ SkASSERT(this->segment() != segment);
+ const SkOpSpan* next = fCoincident;
+ do {
+ if (next->segment() == segment) {
+ return true;
+ }
+ } while ((next = next->fCoincident) != this);
+ return false;
+}
+
+void SkOpSpan::init(SkOpSegment* segment, SkOpSpan* prev, double t, const SkPoint& pt) {
+ SkASSERT(t != 1);
+ initBase(segment, prev, t, pt);
+ fCoincident = this;
+ fToAngle = nullptr;
+ fWindSum = fOppSum = SK_MinS32;
+ fWindValue = 1;
+ fOppValue = 0;
+ fTopTTry = 0;
+ fChased = fDone = false;
+ segment->bumpCount();
+ fAlreadyAdded = false;
+}
+
+// Please keep this in sync with debugInsertCoincidence()
+bool SkOpSpan::insertCoincidence(const SkOpSegment* segment, bool flipped, bool ordered) {
+ if (this->containsCoincidence(segment)) {
+ return true;
+ }
+ SkOpPtT* next = &fPtT;
+ while ((next = next->next()) != &fPtT) {
+ if (next->segment() == segment) {
+ SkOpSpan* span;
+ SkOpSpanBase* base = next->span();
+ if (!ordered) {
+ const SkOpPtT* spanEndPtT = fNext->contains(segment);
+ FAIL_IF(!spanEndPtT);
+ const SkOpSpanBase* spanEnd = spanEndPtT->span();
+ const SkOpPtT* start = base->ptT()->starter(spanEnd->ptT());
+ FAIL_IF(!start->span()->upCastable());
+ span = const_cast<SkOpSpan*>(start->span()->upCast());
+ } else if (flipped) {
+ span = base->prev();
+ FAIL_IF(!span);
+ } else {
+ FAIL_IF(!base->upCastable());
+ span = base->upCast();
+ }
+ this->insertCoincidence(span);
+ return true;
+ }
+ }
+#if DEBUG_COINCIDENCE
+ SkASSERT(0); // FIXME? if we get here, the span is missing its opposite segment...
+#endif
+ return true;
+}
+
+void SkOpSpan::release(const SkOpPtT* kept) {
+ SkDEBUGCODE(fDebugDeleted = true);
+ SkOPASSERT(kept->span() != this);
+ SkASSERT(!final());
+ SkOpSpan* prev = this->prev();
+ SkASSERT(prev);
+ SkOpSpanBase* next = this->next();
+ SkASSERT(next);
+ prev->setNext(next);
+ next->setPrev(prev);
+ this->segment()->release(this);
+ SkOpCoincidence* coincidence = this->globalState()->coincidence();
+ if (coincidence) {
+ coincidence->fixUp(this->ptT(), kept);
+ }
+ this->ptT()->setDeleted();
+ SkOpPtT* stopPtT = this->ptT();
+ SkOpPtT* testPtT = stopPtT;
+ const SkOpSpanBase* keptSpan = kept->span();
+ do {
+ if (this == testPtT->span()) {
+ testPtT->setSpan(keptSpan);
+ }
+ } while ((testPtT = testPtT->next()) != stopPtT);
+}
+
+void SkOpSpan::setOppSum(int oppSum) {
+ SkASSERT(!final());
+ if (fOppSum != SK_MinS32 && fOppSum != oppSum) {
+ this->globalState()->setWindingFailed();
+ return;
+ }
+ SkASSERT(!DEBUG_LIMIT_WIND_SUM || SkTAbs(oppSum) <= DEBUG_LIMIT_WIND_SUM);
+ fOppSum = oppSum;
+}
+
+void SkOpSpan::setWindSum(int windSum) {
+ SkASSERT(!final());
+ if (fWindSum != SK_MinS32 && fWindSum != windSum) {
+ this->globalState()->setWindingFailed();
+ return;
+ }
+ SkASSERT(!DEBUG_LIMIT_WIND_SUM || SkTAbs(windSum) <= DEBUG_LIMIT_WIND_SUM);
+ fWindSum = windSum;
+}
diff --git a/gfx/skia/skia/src/pathops/SkOpSpan.h b/gfx/skia/skia/src/pathops/SkOpSpan.h
new file mode 100644
index 0000000000..7323ed38aa
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkOpSpan.h
@@ -0,0 +1,578 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkOpSpan_DEFINED
+#define SkOpSpan_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkMath.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+class SkOpAngle;
+class SkOpCoincidence;
+class SkOpContour;
+class SkOpSegment;
+class SkOpSpan;
+class SkOpSpanBase;
+
+// subset of op span used by terminal span (when t is equal to one)
+class SkOpPtT {
+public:
+ enum {
+ kIsAlias = 1,
+ kIsDuplicate = 1
+ };
+
+ const SkOpPtT* active() const;
+
+ // please keep in sync with debugAddOpp()
+ void addOpp(SkOpPtT* opp, SkOpPtT* oppPrev) {
+ SkOpPtT* oldNext = this->fNext;
+ SkASSERT(this != opp);
+ this->fNext = opp;
+ SkASSERT(oppPrev != oldNext);
+ oppPrev->fNext = oldNext;
+ }
+
+ bool alias() const;
+ bool coincident() const { return fCoincident; }
+ bool contains(const SkOpPtT* ) const;
+ bool contains(const SkOpSegment*, const SkPoint& ) const;
+ bool contains(const SkOpSegment*, double t) const;
+ const SkOpPtT* contains(const SkOpSegment* ) const;
+ SkOpContour* contour() const;
+
+ int debugID() const {
+ return SkDEBUGRELEASE(fID, -1);
+ }
+
+ void debugAddOpp(const SkOpPtT* opp, const SkOpPtT* oppPrev) const;
+ const SkOpAngle* debugAngle(int id) const;
+ const SkOpCoincidence* debugCoincidence() const;
+ bool debugContains(const SkOpPtT* ) const;
+ const SkOpPtT* debugContains(const SkOpSegment* check) const;
+ SkOpContour* debugContour(int id) const;
+ const SkOpPtT* debugEnder(const SkOpPtT* end) const;
+ int debugLoopLimit(bool report) const;
+ bool debugMatchID(int id) const;
+ const SkOpPtT* debugOppPrev(const SkOpPtT* opp) const;
+ const SkOpPtT* debugPtT(int id) const;
+ void debugResetCoinT() const;
+ const SkOpSegment* debugSegment(int id) const;
+ void debugSetCoinT(int ) const;
+ const SkOpSpanBase* debugSpan(int id) const;
+ void debugValidate() const;
+
+ bool deleted() const {
+ return fDeleted;
+ }
+
+ bool duplicate() const {
+ return fDuplicatePt;
+ }
+
+ void dump() const; // available to testing only
+ void dumpAll() const;
+ void dumpBase() const;
+
+ const SkOpPtT* find(const SkOpSegment* ) const;
+ SkOpGlobalState* globalState() const;
+ void init(SkOpSpanBase* , double t, const SkPoint& , bool dup);
+
+ void insert(SkOpPtT* span) {
+ SkASSERT(span != this);
+ span->fNext = fNext;
+ fNext = span;
+ }
+
+ const SkOpPtT* next() const {
+ return fNext;
+ }
+
+ SkOpPtT* next() {
+ return fNext;
+ }
+
+ bool onEnd() const;
+
+ // returns nullptr if this is already in the opp ptT loop
+ SkOpPtT* oppPrev(const SkOpPtT* opp) const {
+ // find the fOpp ptr to opp
+ SkOpPtT* oppPrev = opp->fNext;
+ if (oppPrev == this) {
+ return nullptr;
+ }
+ while (oppPrev->fNext != opp) {
+ oppPrev = oppPrev->fNext;
+ if (oppPrev == this) {
+ return nullptr;
+ }
+ }
+ return oppPrev;
+ }
+
+ static bool Overlaps(const SkOpPtT* s1, const SkOpPtT* e1, const SkOpPtT* s2,
+ const SkOpPtT* e2, const SkOpPtT** sOut, const SkOpPtT** eOut) {
+ const SkOpPtT* start1 = s1->fT < e1->fT ? s1 : e1;
+ const SkOpPtT* start2 = s2->fT < e2->fT ? s2 : e2;
+ *sOut = between(s1->fT, start2->fT, e1->fT) ? start2
+ : between(s2->fT, start1->fT, e2->fT) ? start1 : nullptr;
+ const SkOpPtT* end1 = s1->fT < e1->fT ? e1 : s1;
+ const SkOpPtT* end2 = s2->fT < e2->fT ? e2 : s2;
+ *eOut = between(s1->fT, end2->fT, e1->fT) ? end2
+ : between(s2->fT, end1->fT, e2->fT) ? end1 : nullptr;
+ if (*sOut == *eOut) {
+ SkOPOBJASSERT(s1, start1->fT >= end2->fT || start2->fT >= end1->fT);
+ return false;
+ }
+ SkASSERT(!*sOut || *sOut != *eOut);
+ return *sOut && *eOut;
+ }
+
+ bool ptAlreadySeen(const SkOpPtT* head) const;
+ SkOpPtT* prev();
+
+ const SkOpSegment* segment() const;
+ SkOpSegment* segment();
+
+ void setCoincident() const {
+ SkOPASSERT(!fDeleted);
+ fCoincident = true;
+ }
+
+ void setDeleted();
+
+ void setSpan(const SkOpSpanBase* span) {
+ fSpan = const_cast<SkOpSpanBase*>(span);
+ }
+
+ const SkOpSpanBase* span() const {
+ return fSpan;
+ }
+
+ SkOpSpanBase* span() {
+ return fSpan;
+ }
+
+ const SkOpPtT* starter(const SkOpPtT* end) const {
+ return fT < end->fT ? this : end;
+ }
+
+ double fT;
+ SkPoint fPt; // cache of point value at this t
+protected:
+ SkOpSpanBase* fSpan; // contains winding data
+ SkOpPtT* fNext; // intersection on opposite curve or alias on this curve
+ bool fDeleted; // set if removed from span list
+ bool fDuplicatePt; // set if identical pt is somewhere in the next loop
+ // below mutable since referrer is otherwise always const
+ mutable bool fCoincident; // set if at some point a coincident span pointed here
+ SkDEBUGCODE(int fID);
+};
+
+class SkOpSpanBase {
+public:
+ enum class Collapsed {
+ kNo,
+ kYes,
+ kError,
+ };
+
+ bool addOpp(SkOpSpanBase* opp);
+
+ void bumpSpanAdds() {
+ ++fSpanAdds;
+ }
+
+ bool chased() const {
+ return fChased;
+ }
+
+ void checkForCollapsedCoincidence();
+
+ const SkOpSpanBase* coinEnd() const {
+ return fCoinEnd;
+ }
+
+ Collapsed collapsed(double s, double e) const;
+ bool contains(const SkOpSpanBase* ) const;
+ const SkOpPtT* contains(const SkOpSegment* ) const;
+
+ bool containsCoinEnd(const SkOpSpanBase* coin) const {
+ SkASSERT(this != coin);
+ const SkOpSpanBase* next = this;
+ while ((next = next->fCoinEnd) != this) {
+ if (next == coin) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool containsCoinEnd(const SkOpSegment* ) const;
+ SkOpContour* contour() const;
+
+ int debugBumpCount() {
+ return SkDEBUGRELEASE(++fCount, -1);
+ }
+
+ int debugID() const {
+ return SkDEBUGRELEASE(fID, -1);
+ }
+
+#if DEBUG_COIN
+ void debugAddOpp(SkPathOpsDebug::GlitchLog* , const SkOpSpanBase* opp) const;
+#endif
+ bool debugAlignedEnd(double t, const SkPoint& pt) const;
+ bool debugAlignedInner() const;
+ const SkOpAngle* debugAngle(int id) const;
+#if DEBUG_COIN
+ void debugCheckForCollapsedCoincidence(SkPathOpsDebug::GlitchLog* ) const;
+#endif
+ const SkOpCoincidence* debugCoincidence() const;
+ bool debugCoinEndLoopCheck() const;
+ SkOpContour* debugContour(int id) const;
+#ifdef SK_DEBUG
+ bool debugDeleted() const { return fDebugDeleted; }
+#endif
+#if DEBUG_COIN
+ void debugInsertCoinEnd(SkPathOpsDebug::GlitchLog* ,
+ const SkOpSpanBase* ) const;
+ void debugMergeMatches(SkPathOpsDebug::GlitchLog* log,
+ const SkOpSpanBase* opp) const;
+#endif
+ const SkOpPtT* debugPtT(int id) const;
+ void debugResetCoinT() const;
+ const SkOpSegment* debugSegment(int id) const;
+ void debugSetCoinT(int ) const;
+#ifdef SK_DEBUG
+ void debugSetDeleted() { fDebugDeleted = true; }
+#endif
+ const SkOpSpanBase* debugSpan(int id) const;
+ const SkOpSpan* debugStarter(SkOpSpanBase const** endPtr) const;
+ SkOpGlobalState* globalState() const;
+ void debugValidate() const;
+
+ bool deleted() const {
+ return fPtT.deleted();
+ }
+
+ void dump() const; // available to testing only
+ void dumpCoin() const;
+ void dumpAll() const;
+ void dumpBase() const;
+ void dumpHead() const;
+
+ bool final() const {
+ return fPtT.fT == 1;
+ }
+
+ SkOpAngle* fromAngle() const {
+ return fFromAngle;
+ }
+
+ void initBase(SkOpSegment* parent, SkOpSpan* prev, double t, const SkPoint& pt);
+
+ // Please keep this in sync with debugInsertCoinEnd()
+ void insertCoinEnd(SkOpSpanBase* coin) {
+ if (containsCoinEnd(coin)) {
+ SkASSERT(coin->containsCoinEnd(this));
+ return;
+ }
+ debugValidate();
+ SkASSERT(this != coin);
+ SkOpSpanBase* coinNext = coin->fCoinEnd;
+ coin->fCoinEnd = this->fCoinEnd;
+ this->fCoinEnd = coinNext;
+ debugValidate();
+ }
+
+ void merge(SkOpSpan* span);
+ bool mergeMatches(SkOpSpanBase* opp);
+
+ const SkOpSpan* prev() const {
+ return fPrev;
+ }
+
+ SkOpSpan* prev() {
+ return fPrev;
+ }
+
+ const SkPoint& pt() const {
+ return fPtT.fPt;
+ }
+
+ const SkOpPtT* ptT() const {
+ return &fPtT;
+ }
+
+ SkOpPtT* ptT() {
+ return &fPtT;
+ }
+
+ SkOpSegment* segment() const {
+ return fSegment;
+ }
+
+ void setAligned() {
+ fAligned = true;
+ }
+
+ void setChased(bool chased) {
+ fChased = chased;
+ }
+
+ void setFromAngle(SkOpAngle* angle) {
+ fFromAngle = angle;
+ }
+
+ void setPrev(SkOpSpan* prev) {
+ fPrev = prev;
+ }
+
+ bool simple() const {
+ fPtT.debugValidate();
+ return fPtT.next()->next() == &fPtT;
+ }
+
+ int spanAddsCount() const {
+ return fSpanAdds;
+ }
+
+ const SkOpSpan* starter(const SkOpSpanBase* end) const {
+ const SkOpSpanBase* result = t() < end->t() ? this : end;
+ return result->upCast();
+ }
+
+ SkOpSpan* starter(SkOpSpanBase* end) {
+ SkASSERT(this->segment() == end->segment());
+ SkOpSpanBase* result = t() < end->t() ? this : end;
+ return result->upCast();
+ }
+
+ SkOpSpan* starter(SkOpSpanBase** endPtr) {
+ SkOpSpanBase* end = *endPtr;
+ SkASSERT(this->segment() == end->segment());
+ SkOpSpanBase* result;
+ if (t() < end->t()) {
+ result = this;
+ } else {
+ result = end;
+ *endPtr = this;
+ }
+ return result->upCast();
+ }
+
+ int step(const SkOpSpanBase* end) const {
+ return t() < end->t() ? 1 : -1;
+ }
+
+ double t() const {
+ return fPtT.fT;
+ }
+
+ void unaligned() {
+ fAligned = false;
+ }
+
+ SkOpSpan* upCast() {
+ SkASSERT(!final());
+ return (SkOpSpan*) this;
+ }
+
+ const SkOpSpan* upCast() const {
+ SkOPASSERT(!final());
+ return (const SkOpSpan*) this;
+ }
+
+ SkOpSpan* upCastable() {
+ return final() ? nullptr : upCast();
+ }
+
+ const SkOpSpan* upCastable() const {
+ return final() ? nullptr : upCast();
+ }
+
+private:
+ void alignInner();
+
+protected: // no direct access to internals to avoid treating a span base as a span
+ SkOpPtT fPtT; // list of points and t values associated with the start of this span
+ SkOpSegment* fSegment; // segment that contains this span
+ SkOpSpanBase* fCoinEnd; // linked list of coincident spans that end here (may point to itself)
+ SkOpAngle* fFromAngle; // points to next angle from span start to end
+ SkOpSpan* fPrev; // previous intersection point
+ int fSpanAdds; // number of times intersections have been added to span
+ bool fAligned;
+ bool fChased; // set after span has been added to chase array
+ SkDEBUGCODE(int fCount); // number of pt/t pairs added
+ SkDEBUGCODE(int fID);
+ SkDEBUGCODE(bool fDebugDeleted); // set when span was merged with another span
+};
+
+class SkOpSpan : public SkOpSpanBase {
+public:
+ bool alreadyAdded() const {
+ if (fAlreadyAdded) {
+ return true;
+ }
+ return false;
+ }
+
+ bool clearCoincident() {
+ SkASSERT(!final());
+ if (fCoincident == this) {
+ return false;
+ }
+ fCoincident = this;
+ return true;
+ }
+
+ int computeWindSum();
+ bool containsCoincidence(const SkOpSegment* ) const;
+
+ bool containsCoincidence(const SkOpSpan* coin) const {
+ SkASSERT(this != coin);
+ const SkOpSpan* next = this;
+ while ((next = next->fCoincident) != this) {
+ if (next == coin) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool debugCoinLoopCheck() const;
+#if DEBUG_COIN
+ void debugInsertCoincidence(SkPathOpsDebug::GlitchLog* , const SkOpSpan* ) const;
+ void debugInsertCoincidence(SkPathOpsDebug::GlitchLog* ,
+ const SkOpSegment* , bool flipped, bool ordered) const;
+#endif
+ void dumpCoin() const;
+ bool dumpSpan() const;
+
+ bool done() const {
+ SkASSERT(!final());
+ return fDone;
+ }
+
+ void init(SkOpSegment* parent, SkOpSpan* prev, double t, const SkPoint& pt);
+ bool insertCoincidence(const SkOpSegment* , bool flipped, bool ordered);
+
+ // Please keep this in sync with debugInsertCoincidence()
+ void insertCoincidence(SkOpSpan* coin) {
+ if (containsCoincidence(coin)) {
+ SkASSERT(coin->containsCoincidence(this));
+ return;
+ }
+ debugValidate();
+ SkASSERT(this != coin);
+ SkOpSpan* coinNext = coin->fCoincident;
+ coin->fCoincident = this->fCoincident;
+ this->fCoincident = coinNext;
+ debugValidate();
+ }
+
+ bool isCanceled() const {
+ SkASSERT(!final());
+ return fWindValue == 0 && fOppValue == 0;
+ }
+
+ bool isCoincident() const {
+ SkASSERT(!final());
+ return fCoincident != this;
+ }
+
+ void markAdded() {
+ fAlreadyAdded = true;
+ }
+
+ SkOpSpanBase* next() const {
+ SkASSERT(!final());
+ return fNext;
+ }
+
+ int oppSum() const {
+ SkASSERT(!final());
+ return fOppSum;
+ }
+
+ int oppValue() const {
+ SkASSERT(!final());
+ return fOppValue;
+ }
+
+ void release(const SkOpPtT* );
+
+ SkOpPtT* setCoinStart(SkOpSpan* oldCoinStart, SkOpSegment* oppSegment);
+
+ void setDone(bool done) {
+ SkASSERT(!final());
+ fDone = done;
+ }
+
+ void setNext(SkOpSpanBase* nextT) {
+ SkASSERT(!final());
+ fNext = nextT;
+ }
+
+ void setOppSum(int oppSum);
+
+ void setOppValue(int oppValue) {
+ SkASSERT(!final());
+ SkASSERT(fOppSum == SK_MinS32);
+ SkOPASSERT(!oppValue || !fDone);
+ fOppValue = oppValue;
+ }
+
+ void setToAngle(SkOpAngle* angle) {
+ SkASSERT(!final());
+ fToAngle = angle;
+ }
+
+ void setWindSum(int windSum);
+
+ void setWindValue(int windValue) {
+ SkASSERT(!final());
+ SkASSERT(windValue >= 0);
+ SkASSERT(fWindSum == SK_MinS32);
+ SkOPASSERT(!windValue || !fDone);
+ fWindValue = windValue;
+ }
+
+ bool sortableTop(SkOpContour* );
+
+ SkOpAngle* toAngle() const {
+ SkASSERT(!final());
+ return fToAngle;
+ }
+
+ int windSum() const {
+ SkASSERT(!final());
+ return fWindSum;
+ }
+
+ int windValue() const {
+ SkOPASSERT(!final());
+ return fWindValue;
+ }
+
+private: // no direct access to internals to avoid treating a span base as a span
+ SkOpSpan* fCoincident; // linked list of spans coincident with this one (may point to itself)
+ SkOpAngle* fToAngle; // points to next angle from span start to end
+ SkOpSpanBase* fNext; // next intersection point
+ int fWindSum; // accumulated from contours surrounding this one.
+ int fOppSum; // for binary operators: the opposite winding sum
+ int fWindValue; // 0 == canceled; 1 == normal; >1 == coincident
+ int fOppValue; // normally 0 -- when binary coincident edges combine, opp value goes here
+ int fTopTTry; // specifies direction and t value to try next
+ bool fDone; // if set, this span to next higher T has been processed
+ bool fAlreadyAdded;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsAsWinding.cpp b/gfx/skia/skia/src/pathops/SkPathOpsAsWinding.cpp
new file mode 100644
index 0000000000..7e2912ea03
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsAsWinding.cpp
@@ -0,0 +1,457 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkPath.h"
+#include "include/core/SkPathBuilder.h"
+#include "include/core/SkPathTypes.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/pathops/SkPathOps.h"
+#include "include/private/base/SkMacros.h"
+#include "src/core/SkPathPriv.h"
+#include "src/pathops/SkPathOpsConic.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsCurve.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkPathOpsQuad.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+#include <algorithm>
+#include <vector>
+
+using std::vector;
+
+struct Contour {
+ enum class Direction { // SkPathDirection doesn't have 'none' state
+ kCCW = -1,
+ kNone,
+ kCW,
+ };
+
+ Contour(const SkRect& bounds, int lastStart, int verbStart)
+ : fBounds(bounds)
+ , fVerbStart(lastStart)
+ , fVerbEnd(verbStart) {
+ }
+
+ vector<Contour*> fChildren;
+ const SkRect fBounds;
+ SkPoint fMinXY{SK_ScalarMax, SK_ScalarMax};
+ const int fVerbStart;
+ const int fVerbEnd;
+ Direction fDirection{Direction::kNone};
+ bool fContained{false};
+ bool fReverse{false};
+};
+
+static const int kPtCount[] = { 1, 1, 2, 2, 3, 0 };
+static const int kPtIndex[] = { 0, 1, 1, 1, 1, 0 };
+
+static Contour::Direction to_direction(SkScalar dy) {
+ return dy > 0 ? Contour::Direction::kCCW : dy < 0 ? Contour::Direction::kCW :
+ Contour::Direction::kNone;
+}
+
+static int contains_edge(SkPoint pts[4], SkPath::Verb verb, SkScalar weight, const SkPoint& edge) {
+ SkRect bounds;
+ bounds.setBounds(pts, kPtCount[verb] + 1);
+ if (bounds.fTop > edge.fY) {
+ return 0;
+ }
+ if (bounds.fBottom <= edge.fY) { // check to see if y is at line end to avoid double counting
+ return 0;
+ }
+ if (bounds.fLeft >= edge.fX) {
+ return 0;
+ }
+ int winding = 0;
+ double tVals[3];
+ Contour::Direction directions[3];
+ // must intersect horz ray with curve in case it intersects more than once
+ int count = (*CurveIntercept[verb * 2])(pts, weight, edge.fY, tVals);
+ SkASSERT(between(0, count, 3));
+ // remove results to the right of edge
+ for (int index = 0; index < count; ) {
+ SkScalar intersectX = (*CurvePointAtT[verb])(pts, weight, tVals[index]).fX;
+ if (intersectX < edge.fX) {
+ ++index;
+ continue;
+ }
+ if (intersectX > edge.fX) {
+ tVals[index] = tVals[--count];
+ continue;
+ }
+ // if intersect x equals edge x, we need to determine if pts is to the left or right of edge
+ if (pts[0].fX < edge.fX && pts[kPtCount[verb]].fX < edge.fX) {
+ ++index;
+ continue;
+ }
+ // TODO : other cases need discriminating. need op angle code to figure it out
+ // example: edge ends 45 degree diagonal going up. If pts is to the left of edge, keep.
+ // if pts is to the right of edge, discard. With code as is, can't distiguish the two cases.
+ tVals[index] = tVals[--count];
+ }
+ // use first derivative to determine if intersection is contributing +1 or -1 to winding
+ for (int index = 0; index < count; ++index) {
+ directions[index] = to_direction((*CurveSlopeAtT[verb])(pts, weight, tVals[index]).fY);
+ }
+ for (int index = 0; index < count; ++index) {
+ // skip intersections that end at edge and go up
+ if (zero_or_one(tVals[index]) && Contour::Direction::kCCW != directions[index]) {
+ continue;
+ }
+ winding += (int) directions[index];
+ }
+ return winding; // note winding indicates containership, not contour direction
+}
+
+static SkScalar conic_weight(const SkPath::Iter& iter, SkPath::Verb verb) {
+ return SkPath::kConic_Verb == verb ? iter.conicWeight() : 1;
+}
+
+static SkPoint left_edge(SkPoint pts[4], SkPath::Verb verb, SkScalar weight) {
+ SkASSERT(SkPath::kLine_Verb <= verb && verb <= SkPath::kCubic_Verb);
+ SkPoint result;
+ double t SK_INIT_TO_AVOID_WARNING;
+ int roots = 0;
+ if (SkPath::kLine_Verb == verb) {
+ result = pts[0].fX < pts[1].fX ? pts[0] : pts[1];
+ } else if (SkPath::kQuad_Verb == verb) {
+ SkDQuad quad;
+ quad.set(pts);
+ if (!quad.monotonicInX()) {
+ roots = SkDQuad::FindExtrema(&quad[0].fX, &t);
+ }
+ if (roots) {
+ result = quad.ptAtT(t).asSkPoint();
+ } else {
+ result = pts[0].fX < pts[2].fX ? pts[0] : pts[2];
+ }
+ } else if (SkPath::kConic_Verb == verb) {
+ SkDConic conic;
+ conic.set(pts, weight);
+ if (!conic.monotonicInX()) {
+ roots = SkDConic::FindExtrema(&conic[0].fX, weight, &t);
+ }
+ if (roots) {
+ result = conic.ptAtT(t).asSkPoint();
+ } else {
+ result = pts[0].fX < pts[2].fX ? pts[0] : pts[2];
+ }
+ } else {
+ SkASSERT(SkPath::kCubic_Verb == verb);
+ SkDCubic cubic;
+ cubic.set(pts);
+ if (!cubic.monotonicInX()) {
+ double tValues[2];
+ roots = SkDCubic::FindExtrema(&cubic[0].fX, tValues);
+ SkASSERT(roots <= 2);
+ for (int index = 0; index < roots; ++index) {
+ SkPoint temp = cubic.ptAtT(tValues[index]).asSkPoint();
+ if (0 == index || result.fX > temp.fX) {
+ result = temp;
+ }
+ }
+ }
+ if (roots) {
+ result = cubic.ptAtT(t).asSkPoint();
+ } else {
+ result = pts[0].fX < pts[3].fX ? pts[0] : pts[3];
+ }
+ }
+ return result;
+}
+
+class OpAsWinding {
+public:
+ enum class Edge {
+ kInitial,
+ kCompare,
+ };
+
+ OpAsWinding(const SkPath& path)
+ : fPath(path) {
+ }
+
+ void contourBounds(vector<Contour>* containers) {
+ SkRect bounds;
+ bounds.setEmpty();
+ int lastStart = 0;
+ int verbStart = 0;
+ for (auto [verb, pts, w] : SkPathPriv::Iterate(fPath)) {
+ if (SkPathVerb::kMove == verb) {
+ if (!bounds.isEmpty()) {
+ containers->emplace_back(bounds, lastStart, verbStart);
+ lastStart = verbStart;
+ }
+ bounds.setBounds(&pts[kPtIndex[SkPath::kMove_Verb]], kPtCount[SkPath::kMove_Verb]);
+ }
+ if (SkPathVerb::kLine <= verb && verb <= SkPathVerb::kCubic) {
+ SkRect verbBounds;
+ verbBounds.setBounds(&pts[kPtIndex[(int)verb]], kPtCount[(int)verb]);
+ bounds.joinPossiblyEmptyRect(verbBounds);
+ }
+ ++verbStart;
+ }
+ if (!bounds.isEmpty()) {
+ containers->emplace_back(bounds, lastStart, ++verbStart);
+ }
+ }
+
+ Contour::Direction getDirection(Contour& contour) {
+ SkPath::Iter iter(fPath, true);
+ int verbCount = -1;
+ SkPath::Verb verb;
+ SkPoint pts[4];
+
+ SkScalar total_signed_area = 0;
+ do {
+ verb = iter.next(pts);
+ if (++verbCount < contour.fVerbStart) {
+ continue;
+ }
+ if (verbCount >= contour.fVerbEnd) {
+ continue;
+ }
+ if (SkPath::kLine_Verb > verb || verb > SkPath::kCubic_Verb) {
+ continue;
+ }
+
+ switch (verb)
+ {
+ case SkPath::kLine_Verb:
+ total_signed_area += (pts[0].fY - pts[1].fY) * (pts[0].fX + pts[1].fX);
+ break;
+ case SkPath::kQuad_Verb:
+ case SkPath::kConic_Verb:
+ total_signed_area += (pts[0].fY - pts[2].fY) * (pts[0].fX + pts[2].fX);
+ break;
+ case SkPath::kCubic_Verb:
+ total_signed_area += (pts[0].fY - pts[3].fY) * (pts[0].fX + pts[3].fX);
+ break;
+ default:
+ break;
+ }
+ } while (SkPath::kDone_Verb != verb);
+
+ return total_signed_area < 0 ? Contour::Direction::kCCW: Contour::Direction::kCW;
+ }
+
+ int nextEdge(Contour& contour, Edge edge) {
+ SkPath::Iter iter(fPath, true);
+ SkPoint pts[4];
+ SkPath::Verb verb;
+ int verbCount = -1;
+ int winding = 0;
+ do {
+ verb = iter.next(pts);
+ if (++verbCount < contour.fVerbStart) {
+ continue;
+ }
+ if (verbCount >= contour.fVerbEnd) {
+ continue;
+ }
+ if (SkPath::kLine_Verb > verb || verb > SkPath::kCubic_Verb) {
+ continue;
+ }
+ bool horizontal = true;
+ for (int index = 1; index <= kPtCount[verb]; ++index) {
+ if (pts[0].fY != pts[index].fY) {
+ horizontal = false;
+ break;
+ }
+ }
+ if (horizontal) {
+ continue;
+ }
+ if (edge == Edge::kCompare) {
+ winding += contains_edge(pts, verb, conic_weight(iter, verb), contour.fMinXY);
+ continue;
+ }
+ SkASSERT(edge == Edge::kInitial);
+ SkPoint minXY = left_edge(pts, verb, conic_weight(iter, verb));
+ if (minXY.fX > contour.fMinXY.fX) {
+ continue;
+ }
+ if (minXY.fX == contour.fMinXY.fX) {
+ if (minXY.fY != contour.fMinXY.fY) {
+ continue;
+ }
+ }
+ contour.fMinXY = minXY;
+ } while (SkPath::kDone_Verb != verb);
+ return winding;
+ }
+
+ bool containerContains(Contour& contour, Contour& test) {
+ // find outside point on lesser contour
+ // arbitrarily, choose non-horizontal edge where point <= bounds left
+ // note that if leftmost point is control point, may need tight bounds
+ // to find edge with minimum-x
+ if (SK_ScalarMax == test.fMinXY.fX) {
+ this->nextEdge(test, Edge::kInitial);
+ }
+ // find all edges on greater equal or to the left of one on lesser
+ contour.fMinXY = test.fMinXY;
+ int winding = this->nextEdge(contour, Edge::kCompare);
+ // if edge is up, mark contour cw, otherwise, ccw
+ // sum of greater edges direction should be cw, 0, ccw
+ test.fContained = winding != 0;
+ return -1 <= winding && winding <= 1;
+ }
+
+ void inParent(Contour& contour, Contour& parent) {
+ // move contour into sibling list contained by parent
+ for (auto test : parent.fChildren) {
+ if (test->fBounds.contains(contour.fBounds)) {
+ inParent(contour, *test);
+ return;
+ }
+ }
+ // move parent's children into contour's children if contained by contour
+ for (auto iter = parent.fChildren.begin(); iter != parent.fChildren.end(); ) {
+ if (contour.fBounds.contains((*iter)->fBounds)) {
+ contour.fChildren.push_back(*iter);
+ iter = parent.fChildren.erase(iter);
+ continue;
+ }
+ ++iter;
+ }
+ parent.fChildren.push_back(&contour);
+ }
+
+ bool checkContainerChildren(Contour* parent, Contour* child) {
+ for (auto grandChild : child->fChildren) {
+ if (!checkContainerChildren(child, grandChild)) {
+ return false;
+ }
+ }
+ if (parent) {
+ if (!containerContains(*parent, *child)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool markReverse(Contour* parent, Contour* child) {
+ bool reversed = false;
+ for (auto grandChild : child->fChildren) {
+ reversed |= markReverse(grandChild->fContained ? child : parent, grandChild);
+ }
+
+ child->fDirection = getDirection(*child);
+ if (parent && parent->fDirection == child->fDirection) {
+ child->fReverse = true;
+ child->fDirection = (Contour::Direction) -(int) child->fDirection;
+ return true;
+ }
+ return reversed;
+ }
+
+ SkPath reverseMarkedContours(vector<Contour>& contours, SkPathFillType fillType) {
+ SkPathPriv::Iterate iterate(fPath);
+ auto iter = iterate.begin();
+ int verbCount = 0;
+
+ SkPathBuilder result;
+ result.setFillType(fillType);
+ for (const Contour& contour : contours) {
+ SkPathBuilder reverse;
+ SkPathBuilder* temp = contour.fReverse ? &reverse : &result;
+ for (; iter != iterate.end() && verbCount < contour.fVerbEnd; ++iter, ++verbCount) {
+ auto [verb, pts, w] = *iter;
+ switch (verb) {
+ case SkPathVerb::kMove:
+ temp->moveTo(pts[0]);
+ break;
+ case SkPathVerb::kLine:
+ temp->lineTo(pts[1]);
+ break;
+ case SkPathVerb::kQuad:
+ temp->quadTo(pts[1], pts[2]);
+ break;
+ case SkPathVerb::kConic:
+ temp->conicTo(pts[1], pts[2], *w);
+ break;
+ case SkPathVerb::kCubic:
+ temp->cubicTo(pts[1], pts[2], pts[3]);
+ break;
+ case SkPathVerb::kClose:
+ temp->close();
+ break;
+ }
+ }
+ if (contour.fReverse) {
+ SkASSERT(temp == &reverse);
+ SkPathPriv::ReverseAddPath(&result, reverse.detach());
+ }
+ }
+ return result.detach();
+ }
+
+private:
+ const SkPath& fPath;
+};
+
+static bool set_result_path(SkPath* result, const SkPath& path, SkPathFillType fillType) {
+ *result = path;
+ result->setFillType(fillType);
+ return true;
+}
+
+bool AsWinding(const SkPath& path, SkPath* result) {
+ if (!path.isFinite()) {
+ return false;
+ }
+ SkPathFillType fillType = path.getFillType();
+ if (fillType == SkPathFillType::kWinding
+ || fillType == SkPathFillType::kInverseWinding ) {
+ return set_result_path(result, path, fillType);
+ }
+ fillType = path.isInverseFillType() ? SkPathFillType::kInverseWinding :
+ SkPathFillType::kWinding;
+ if (path.isEmpty() || path.isConvex()) {
+ return set_result_path(result, path, fillType);
+ }
+ // count contours
+ vector<Contour> contours; // one per contour
+ OpAsWinding winder(path);
+ winder.contourBounds(&contours);
+ if (contours.size() <= 1) {
+ return set_result_path(result, path, fillType);
+ }
+ // create contour bounding box tree
+ Contour sorted(SkRect(), 0, 0);
+ for (auto& contour : contours) {
+ winder.inParent(contour, sorted);
+ }
+ // if sorted has no grandchildren, no child has to fix its children's winding
+ if (std::all_of(sorted.fChildren.begin(), sorted.fChildren.end(),
+ [](const Contour* contour) -> bool { return !contour->fChildren.size(); } )) {
+ return set_result_path(result, path, fillType);
+ }
+ // starting with outermost and moving inward, see if one path contains another
+ for (auto contour : sorted.fChildren) {
+ winder.nextEdge(*contour, OpAsWinding::Edge::kInitial);
+ contour->fDirection = winder.getDirection(*contour);
+ if (!winder.checkContainerChildren(nullptr, contour)) {
+ return false;
+ }
+ }
+ // starting with outermost and moving inward, mark paths to reverse
+ bool reversed = false;
+ for (auto contour : sorted.fChildren) {
+ reversed |= winder.markReverse(nullptr, contour);
+ }
+ if (!reversed) {
+ return set_result_path(result, path, fillType);
+ }
+ *result = winder.reverseMarkedContours(contours, fillType);
+ return true;
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsBounds.h b/gfx/skia/skia/src/pathops/SkPathOpsBounds.h
new file mode 100644
index 0000000000..bcf578a3a9
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsBounds.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpBounds_DEFINED
+#define SkPathOpBounds_DEFINED
+
+#include "include/core/SkRect.h"
+#include "src/pathops/SkPathOpsRect.h"
+
+// SkPathOpsBounds, unlike SkRect, does not consider a line to be empty.
+struct SkPathOpsBounds : public SkRect {
+ static bool Intersects(const SkPathOpsBounds& a, const SkPathOpsBounds& b) {
+ return AlmostLessOrEqualUlps(a.fLeft, b.fRight)
+ && AlmostLessOrEqualUlps(b.fLeft, a.fRight)
+ && AlmostLessOrEqualUlps(a.fTop, b.fBottom)
+ && AlmostLessOrEqualUlps(b.fTop, a.fBottom);
+ }
+
+ // Note that add(), unlike SkRect::join() or SkRect::growToInclude()
+ // does not treat the bounds of horizontal and vertical lines as
+ // empty rectangles.
+ void add(SkScalar left, SkScalar top, SkScalar right, SkScalar bottom) {
+ if (left < fLeft) fLeft = left;
+ if (top < fTop) fTop = top;
+ if (right > fRight) fRight = right;
+ if (bottom > fBottom) fBottom = bottom;
+ }
+
+ void add(const SkPathOpsBounds& toAdd) {
+ add(toAdd.fLeft, toAdd.fTop, toAdd.fRight, toAdd.fBottom);
+ }
+
+ void add(const SkPoint& pt) {
+ if (pt.fX < fLeft) fLeft = pt.fX;
+ if (pt.fY < fTop) fTop = pt.fY;
+ if (pt.fX > fRight) fRight = pt.fX;
+ if (pt.fY > fBottom) fBottom = pt.fY;
+ }
+
+ void add(const SkDPoint& pt) {
+ if (pt.fX < fLeft) fLeft = SkDoubleToScalar(pt.fX);
+ if (pt.fY < fTop) fTop = SkDoubleToScalar(pt.fY);
+ if (pt.fX > fRight) fRight = SkDoubleToScalar(pt.fX);
+ if (pt.fY > fBottom) fBottom = SkDoubleToScalar(pt.fY);
+ }
+
+ bool almostContains(const SkPoint& pt) const {
+ return AlmostLessOrEqualUlps(fLeft, pt.fX)
+ && AlmostLessOrEqualUlps(pt.fX, fRight)
+ && AlmostLessOrEqualUlps(fTop, pt.fY)
+ && AlmostLessOrEqualUlps(pt.fY, fBottom);
+ }
+
+ bool contains(const SkPoint& pt) const {
+ return fLeft <= pt.fX && fTop <= pt.fY &&
+ fRight >= pt.fX && fBottom >= pt.fY;
+ }
+
+ using INHERITED = SkRect;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsCommon.cpp b/gfx/skia/skia/src/pathops/SkPathOpsCommon.cpp
new file mode 100644
index 0000000000..18a5005d7e
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsCommon.cpp
@@ -0,0 +1,338 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pathops/SkPathOpsCommon.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkMacros.h"
+#include "include/private/base/SkMath.h"
+#include "include/private/base/SkTDArray.h"
+#include "src/base/SkTSort.h"
+#include "src/pathops/SkOpAngle.h"
+#include "src/pathops/SkOpCoincidence.h"
+#include "src/pathops/SkOpContour.h"
+#include "src/pathops/SkOpSegment.h"
+#include "src/pathops/SkOpSpan.h"
+
+const SkOpAngle* AngleWinding(SkOpSpanBase* start, SkOpSpanBase* end, int* windingPtr,
+ bool* sortablePtr) {
+ // find first angle, initialize winding to computed fWindSum
+ SkOpSegment* segment = start->segment();
+ const SkOpAngle* angle = segment->spanToAngle(start, end);
+ if (!angle) {
+ *windingPtr = SK_MinS32;
+ return nullptr;
+ }
+ bool computeWinding = false;
+ const SkOpAngle* firstAngle = angle;
+ bool loop = false;
+ bool unorderable = false;
+ int winding = SK_MinS32;
+ do {
+ angle = angle->next();
+ if (!angle) {
+ return nullptr;
+ }
+ unorderable |= angle->unorderable();
+ if ((computeWinding = unorderable || (angle == firstAngle && loop))) {
+ break; // if we get here, there's no winding, loop is unorderable
+ }
+ loop |= angle == firstAngle;
+ segment = angle->segment();
+ winding = segment->windSum(angle);
+ } while (winding == SK_MinS32);
+ // if the angle loop contains an unorderable span, the angle order may be useless
+ // directly compute the winding in this case for each span
+ if (computeWinding) {
+ firstAngle = angle;
+ winding = SK_MinS32;
+ do {
+ SkOpSpanBase* startSpan = angle->start();
+ SkOpSpanBase* endSpan = angle->end();
+ SkOpSpan* lesser = startSpan->starter(endSpan);
+ int testWinding = lesser->windSum();
+ if (testWinding == SK_MinS32) {
+ testWinding = lesser->computeWindSum();
+ }
+ if (testWinding != SK_MinS32) {
+ segment = angle->segment();
+ winding = testWinding;
+ }
+ angle = angle->next();
+ } while (angle != firstAngle);
+ }
+ *sortablePtr = !unorderable;
+ *windingPtr = winding;
+ return angle;
+}
+
+SkOpSpan* FindUndone(SkOpContourHead* contourHead) {
+ SkOpContour* contour = contourHead;
+ do {
+ if (contour->done()) {
+ continue;
+ }
+ SkOpSpan* result = contour->undoneSpan();
+ if (result) {
+ return result;
+ }
+ } while ((contour = contour->next()));
+ return nullptr;
+}
+
+SkOpSegment* FindChase(SkTDArray<SkOpSpanBase*>* chase, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr) {
+ while (!chase->empty()) {
+ SkOpSpanBase* span = chase->back();
+ chase->pop_back();
+ SkOpSegment* segment = span->segment();
+ *startPtr = span->ptT()->next()->span();
+ bool done = true;
+ *endPtr = nullptr;
+ if (SkOpAngle* last = segment->activeAngle(*startPtr, startPtr, endPtr, &done)) {
+ *startPtr = last->start();
+ *endPtr = last->end();
+ #if TRY_ROTATE
+ *chase->insert(0) = span;
+ #else
+ *chase->append() = span;
+ #endif
+ return last->segment();
+ }
+ if (done) {
+ continue;
+ }
+ // find first angle, initialize winding to computed wind sum
+ int winding;
+ bool sortable;
+ const SkOpAngle* angle = AngleWinding(*startPtr, *endPtr, &winding, &sortable);
+ if (!angle) {
+ return nullptr;
+ }
+ if (winding == SK_MinS32) {
+ continue;
+ }
+ int sumWinding SK_INIT_TO_AVOID_WARNING;
+ if (sortable) {
+ segment = angle->segment();
+ sumWinding = segment->updateWindingReverse(angle);
+ }
+ SkOpSegment* first = nullptr;
+ const SkOpAngle* firstAngle = angle;
+ while ((angle = angle->next()) != firstAngle) {
+ segment = angle->segment();
+ SkOpSpanBase* start = angle->start();
+ SkOpSpanBase* end = angle->end();
+ int maxWinding SK_INIT_TO_AVOID_WARNING;
+ if (sortable) {
+ segment->setUpWinding(start, end, &maxWinding, &sumWinding);
+ }
+ if (!segment->done(angle)) {
+ if (!first && (sortable || start->starter(end)->windSum() != SK_MinS32)) {
+ first = segment;
+ *startPtr = start;
+ *endPtr = end;
+ }
+ // OPTIMIZATION: should this also add to the chase?
+ if (sortable) {
+ // TODO: add error handling
+ SkAssertResult(segment->markAngle(maxWinding, sumWinding, angle, nullptr));
+ }
+ }
+ }
+ if (first) {
+ #if TRY_ROTATE
+ *chase->insert(0) = span;
+ #else
+ *chase->append() = span;
+ #endif
+ return first;
+ }
+ }
+ return nullptr;
+}
+
+bool SortContourList(SkOpContourHead** contourList, bool evenOdd, bool oppEvenOdd) {
+ SkTDArray<SkOpContour* > list;
+ SkOpContour* contour = *contourList;
+ do {
+ if (contour->count()) {
+ contour->setOppXor(contour->operand() ? evenOdd : oppEvenOdd);
+ *list.append() = contour;
+ }
+ } while ((contour = contour->next()));
+ int count = list.size();
+ if (!count) {
+ return false;
+ }
+ if (count > 1) {
+ SkTQSort<SkOpContour>(list.begin(), list.end());
+ }
+ contour = list[0];
+ SkOpContourHead* contourHead = static_cast<SkOpContourHead*>(contour);
+ contour->globalState()->setContourHead(contourHead);
+ *contourList = contourHead;
+ for (int index = 1; index < count; ++index) {
+ SkOpContour* next = list[index];
+ contour->setNext(next);
+ contour = next;
+ }
+ contour->setNext(nullptr);
+ return true;
+}
+
+static void calc_angles(SkOpContourHead* contourList DEBUG_COIN_DECLARE_PARAMS()) {
+ DEBUG_STATIC_SET_PHASE(contourList);
+ SkOpContour* contour = contourList;
+ do {
+ contour->calcAngles();
+ } while ((contour = contour->next()));
+}
+
+static bool missing_coincidence(SkOpContourHead* contourList DEBUG_COIN_DECLARE_PARAMS()) {
+ DEBUG_STATIC_SET_PHASE(contourList);
+ SkOpContour* contour = contourList;
+ bool result = false;
+ do {
+ result |= contour->missingCoincidence();
+ } while ((contour = contour->next()));
+ return result;
+}
+
+static bool move_multiples(SkOpContourHead* contourList DEBUG_COIN_DECLARE_PARAMS()) {
+ DEBUG_STATIC_SET_PHASE(contourList);
+ SkOpContour* contour = contourList;
+ do {
+ if (!contour->moveMultiples()) {
+ return false;
+ }
+ } while ((contour = contour->next()));
+ return true;
+}
+
+static bool move_nearby(SkOpContourHead* contourList DEBUG_COIN_DECLARE_PARAMS()) {
+ DEBUG_STATIC_SET_PHASE(contourList);
+ SkOpContour* contour = contourList;
+ do {
+ if (!contour->moveNearby()) {
+ return false;
+ }
+ } while ((contour = contour->next()));
+ return true;
+}
+
+static bool sort_angles(SkOpContourHead* contourList) {
+ SkOpContour* contour = contourList;
+ do {
+ if (!contour->sortAngles()) {
+ return false;
+ }
+ } while ((contour = contour->next()));
+ return true;
+}
+
+bool HandleCoincidence(SkOpContourHead* contourList, SkOpCoincidence* coincidence) {
+ SkOpGlobalState* globalState = contourList->globalState();
+ // match up points within the coincident runs
+ if (!coincidence->addExpanded(DEBUG_PHASE_ONLY_PARAMS(kIntersecting))) {
+ return false;
+ }
+ // combine t values when multiple intersections occur on some segments but not others
+ if (!move_multiples(contourList DEBUG_PHASE_PARAMS(kWalking))) {
+ return false;
+ }
+ // move t values and points together to eliminate small/tiny gaps
+ if (!move_nearby(contourList DEBUG_COIN_PARAMS())) {
+ return false;
+ }
+ // add coincidence formed by pairing on curve points and endpoints
+ coincidence->correctEnds(DEBUG_PHASE_ONLY_PARAMS(kIntersecting));
+ if (!coincidence->addEndMovedSpans(DEBUG_COIN_ONLY_PARAMS())) {
+ return false;
+ }
+ const int SAFETY_COUNT = 3;
+ int safetyHatch = SAFETY_COUNT;
+ // look for coincidence present in A-B and A-C but missing in B-C
+ do {
+ bool added;
+ if (!coincidence->addMissing(&added DEBUG_ITER_PARAMS(SAFETY_COUNT - safetyHatch))) {
+ return false;
+ }
+ if (!added) {
+ break;
+ }
+ if (!--safetyHatch) {
+ SkASSERT(globalState->debugSkipAssert());
+ return false;
+ }
+ move_nearby(contourList DEBUG_ITER_PARAMS(SAFETY_COUNT - safetyHatch - 1));
+ } while (true);
+ // check to see if, loosely, coincident ranges may be expanded
+ if (coincidence->expand(DEBUG_COIN_ONLY_PARAMS())) {
+ bool added;
+ if (!coincidence->addMissing(&added DEBUG_COIN_PARAMS())) {
+ return false;
+ }
+ if (!coincidence->addExpanded(DEBUG_COIN_ONLY_PARAMS())) {
+ return false;
+ }
+ if (!move_multiples(contourList DEBUG_COIN_PARAMS())) {
+ return false;
+ }
+ move_nearby(contourList DEBUG_COIN_PARAMS());
+ }
+ // the expanded ranges may not align -- add the missing spans
+ if (!coincidence->addExpanded(DEBUG_PHASE_ONLY_PARAMS(kWalking))) {
+ return false;
+ }
+ // mark spans of coincident segments as coincident
+ coincidence->mark(DEBUG_COIN_ONLY_PARAMS());
+ // look for coincidence lines and curves undetected by intersection
+ if (missing_coincidence(contourList DEBUG_COIN_PARAMS())) {
+ (void) coincidence->expand(DEBUG_PHASE_ONLY_PARAMS(kIntersecting));
+ if (!coincidence->addExpanded(DEBUG_COIN_ONLY_PARAMS())) {
+ return false;
+ }
+ if (!coincidence->mark(DEBUG_PHASE_ONLY_PARAMS(kWalking))) {
+ return false;
+ }
+ } else {
+ (void) coincidence->expand(DEBUG_COIN_ONLY_PARAMS());
+ }
+ (void) coincidence->expand(DEBUG_COIN_ONLY_PARAMS());
+
+ SkOpCoincidence overlaps(globalState);
+ safetyHatch = SAFETY_COUNT;
+ do {
+ SkOpCoincidence* pairs = overlaps.isEmpty() ? coincidence : &overlaps;
+ // adjust the winding value to account for coincident edges
+ if (!pairs->apply(DEBUG_ITER_ONLY_PARAMS(SAFETY_COUNT - safetyHatch))) {
+ return false;
+ }
+ // For each coincident pair that overlaps another, when the receivers (the 1st of the pair)
+ // are different, construct a new pair to resolve their mutual span
+ if (!pairs->findOverlaps(&overlaps DEBUG_ITER_PARAMS(SAFETY_COUNT - safetyHatch))) {
+ return false;
+ }
+ if (!--safetyHatch) {
+ SkASSERT(globalState->debugSkipAssert());
+ return false;
+ }
+ } while (!overlaps.isEmpty());
+ calc_angles(contourList DEBUG_COIN_PARAMS());
+ if (!sort_angles(contourList)) {
+ return false;
+ }
+#if DEBUG_COINCIDENCE_VERBOSE
+ coincidence->debugShowCoincidence();
+#endif
+#if DEBUG_COINCIDENCE
+ coincidence->debugValidate();
+#endif
+ SkPathOpsDebug::ShowActiveSpans(contourList);
+ return true;
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsCommon.h b/gfx/skia/skia/src/pathops/SkPathOpsCommon.h
new file mode 100644
index 0000000000..cca3b40421
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsCommon.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsCommon_DEFINED
+#define SkPathOpsCommon_DEFINED
+
+#include "include/pathops/SkPathOps.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+class SkOpAngle;
+class SkOpCoincidence;
+class SkOpContourHead;
+class SkOpSegment;
+class SkOpSpan;
+class SkOpSpanBase;
+class SkPath;
+
+template <typename T> class SkTDArray;
+
+const SkOpAngle* AngleWinding(SkOpSpanBase* start, SkOpSpanBase* end, int* windingPtr,
+ bool* sortable);
+SkOpSegment* FindChase(SkTDArray<SkOpSpanBase*>* chase, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr);
+SkOpSpan* FindSortableTop(SkOpContourHead* );
+SkOpSpan* FindUndone(SkOpContourHead* );
+bool FixWinding(SkPath* path);
+bool SortContourList(SkOpContourHead** , bool evenOdd, bool oppEvenOdd);
+bool HandleCoincidence(SkOpContourHead* , SkOpCoincidence* );
+bool OpDebug(const SkPath& one, const SkPath& two, SkPathOp op, SkPath* result
+ SkDEBUGPARAMS(bool skipAssert)
+ SkDEBUGPARAMS(const char* testName));
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsConic.cpp b/gfx/skia/skia/src/pathops/SkPathOpsConic.cpp
new file mode 100644
index 0000000000..98b7c50d68
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsConic.cpp
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkPathOpsConic.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "src/pathops/SkIntersections.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsQuad.h"
+#include "src/pathops/SkPathOpsRect.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+#include <cmath>
+
+struct SkDLine;
+
+// cribbed from the float version in SkGeometry.cpp
+static void conic_deriv_coeff(const double src[],
+ SkScalar w,
+ double coeff[3]) {
+ const double P20 = src[4] - src[0];
+ const double P10 = src[2] - src[0];
+ const double wP10 = w * P10;
+ coeff[0] = w * P20 - P20;
+ coeff[1] = P20 - 2 * wP10;
+ coeff[2] = wP10;
+}
+
+static double conic_eval_tan(const double coord[], SkScalar w, double t) {
+ double coeff[3];
+ conic_deriv_coeff(coord, w, coeff);
+ return t * (t * coeff[0] + coeff[1]) + coeff[2];
+}
+
+int SkDConic::FindExtrema(const double src[], SkScalar w, double t[1]) {
+ double coeff[3];
+ conic_deriv_coeff(src, w, coeff);
+
+ double tValues[2];
+ int roots = SkDQuad::RootsValidT(coeff[0], coeff[1], coeff[2], tValues);
+ // In extreme cases, the number of roots returned can be 2. Pathops
+ // will fail later on, so there's no advantage to plumbing in an error
+ // return here.
+ // SkASSERT(0 == roots || 1 == roots);
+
+ if (1 == roots) {
+ t[0] = tValues[0];
+ return 1;
+ }
+ return 0;
+}
+
+SkDVector SkDConic::dxdyAtT(double t) const {
+ SkDVector result = {
+ conic_eval_tan(&fPts[0].fX, fWeight, t),
+ conic_eval_tan(&fPts[0].fY, fWeight, t)
+ };
+ if (result.fX == 0 && result.fY == 0) {
+ if (zero_or_one(t)) {
+ result = fPts[2] - fPts[0];
+ } else {
+ // incomplete
+ SkDebugf("!k");
+ }
+ }
+ return result;
+}
+
+static double conic_eval_numerator(const double src[], SkScalar w, double t) {
+ SkASSERT(src);
+ SkASSERT(t >= 0 && t <= 1);
+ double src2w = src[2] * w;
+ double C = src[0];
+ double A = src[4] - 2 * src2w + C;
+ double B = 2 * (src2w - C);
+ return (A * t + B) * t + C;
+}
+
+
+static double conic_eval_denominator(SkScalar w, double t) {
+ double B = 2 * (w - 1);
+ double C = 1;
+ double A = -B;
+ return (A * t + B) * t + C;
+}
+
+bool SkDConic::hullIntersects(const SkDCubic& cubic, bool* isLinear) const {
+ return cubic.hullIntersects(*this, isLinear);
+}
+
+SkDPoint SkDConic::ptAtT(double t) const {
+ if (t == 0) {
+ return fPts[0];
+ }
+ if (t == 1) {
+ return fPts[2];
+ }
+ double denominator = conic_eval_denominator(fWeight, t);
+ SkDPoint result = {
+ sk_ieee_double_divide(conic_eval_numerator(&fPts[0].fX, fWeight, t), denominator),
+ sk_ieee_double_divide(conic_eval_numerator(&fPts[0].fY, fWeight, t), denominator)
+ };
+ return result;
+}
+
+/* see quad subdivide for point rationale */
+/* w rationale : the mid point between t1 and t2 could be determined from the computed a/b/c
+ values if the computed w was known. Since we know the mid point at (t1+t2)/2, we'll assume
+ that it is the same as the point on the new curve t==(0+1)/2.
+
+ d / dz == conic_poly(dst, unknownW, .5) / conic_weight(unknownW, .5);
+
+ conic_poly(dst, unknownW, .5)
+ = a / 4 + (b * unknownW) / 2 + c / 4
+ = (a + c) / 4 + (bx * unknownW) / 2
+
+ conic_weight(unknownW, .5)
+ = unknownW / 2 + 1 / 2
+
+ d / dz == ((a + c) / 2 + b * unknownW) / (unknownW + 1)
+ d / dz * (unknownW + 1) == (a + c) / 2 + b * unknownW
+ unknownW = ((a + c) / 2 - d / dz) / (d / dz - b)
+
+ Thus, w is the ratio of the distance from the mid of end points to the on-curve point, and the
+ distance of the on-curve point to the control point.
+ */
+SkDConic SkDConic::subDivide(double t1, double t2) const {
+ double ax, ay, az;
+ if (t1 == 0) {
+ ax = fPts[0].fX;
+ ay = fPts[0].fY;
+ az = 1;
+ } else if (t1 != 1) {
+ ax = conic_eval_numerator(&fPts[0].fX, fWeight, t1);
+ ay = conic_eval_numerator(&fPts[0].fY, fWeight, t1);
+ az = conic_eval_denominator(fWeight, t1);
+ } else {
+ ax = fPts[2].fX;
+ ay = fPts[2].fY;
+ az = 1;
+ }
+ double midT = (t1 + t2) / 2;
+ double dx = conic_eval_numerator(&fPts[0].fX, fWeight, midT);
+ double dy = conic_eval_numerator(&fPts[0].fY, fWeight, midT);
+ double dz = conic_eval_denominator(fWeight, midT);
+ double cx, cy, cz;
+ if (t2 == 1) {
+ cx = fPts[2].fX;
+ cy = fPts[2].fY;
+ cz = 1;
+ } else if (t2 != 0) {
+ cx = conic_eval_numerator(&fPts[0].fX, fWeight, t2);
+ cy = conic_eval_numerator(&fPts[0].fY, fWeight, t2);
+ cz = conic_eval_denominator(fWeight, t2);
+ } else {
+ cx = fPts[0].fX;
+ cy = fPts[0].fY;
+ cz = 1;
+ }
+ double bx = 2 * dx - (ax + cx) / 2;
+ double by = 2 * dy - (ay + cy) / 2;
+ double bz = 2 * dz - (az + cz) / 2;
+ if (!bz) {
+ bz = 1; // if bz is 0, weight is 0, control point has no effect: any value will do
+ }
+ SkDConic dst = {{{{ax / az, ay / az}, {bx / bz, by / bz}, {cx / cz, cy / cz}}
+ SkDEBUGPARAMS(fPts.fDebugGlobalState) },
+ SkDoubleToScalar(bz / sqrt(az * cz)) };
+ return dst;
+}
+
+SkDPoint SkDConic::subDivide(const SkDPoint& a, const SkDPoint& c, double t1, double t2,
+ SkScalar* weight) const {
+ SkDConic chopped = this->subDivide(t1, t2);
+ *weight = chopped.fWeight;
+ return chopped[1];
+}
+
+int SkTConic::intersectRay(SkIntersections* i, const SkDLine& line) const {
+ return i->intersectRay(fConic, line);
+}
+
+bool SkTConic::hullIntersects(const SkDQuad& quad, bool* isLinear) const {
+ return quad.hullIntersects(fConic, isLinear);
+}
+
+bool SkTConic::hullIntersects(const SkDCubic& cubic, bool* isLinear) const {
+ return cubic.hullIntersects(fConic, isLinear);
+}
+
+void SkTConic::setBounds(SkDRect* rect) const {
+ rect->setBounds(fConic);
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsConic.h b/gfx/skia/skia/src/pathops/SkPathOpsConic.h
new file mode 100644
index 0000000000..334dbebb60
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsConic.h
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathOpsConic_DEFINED
+#define SkPathOpsConic_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "include/private/base/SkDebug.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/pathops/SkPathOpsDebug.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkPathOpsQuad.h"
+#include "src/pathops/SkPathOpsTCurve.h"
+
+class SkIntersections;
+class SkOpGlobalState;
+struct SkDCubic;
+struct SkDLine;
+struct SkDRect;
+
+struct SkDConic {
+ static const int kPointCount = 3;
+ static const int kPointLast = kPointCount - 1;
+ static const int kMaxIntersections = 4;
+
+ SkDQuad fPts;
+ SkScalar fWeight;
+
+ bool collapsed() const {
+ return fPts.collapsed();
+ }
+
+ bool controlsInside() const {
+ return fPts.controlsInside();
+ }
+
+ void debugInit() {
+ fPts.debugInit();
+ fWeight = 0;
+ }
+
+ void debugSet(const SkDPoint* pts, SkScalar weight);
+
+ SkDConic flip() const {
+ SkDConic result = {{{fPts[2], fPts[1], fPts[0]}
+ SkDEBUGPARAMS(fPts.fDebugGlobalState) }, fWeight};
+ return result;
+ }
+
+#ifdef SK_DEBUG
+ SkOpGlobalState* globalState() const { return fPts.globalState(); }
+#endif
+
+ static bool IsConic() { return true; }
+
+ const SkDConic& set(const SkPoint pts[kPointCount], SkScalar weight
+ SkDEBUGPARAMS(SkOpGlobalState* state = nullptr)) {
+ fPts.set(pts SkDEBUGPARAMS(state));
+ fWeight = weight;
+ return *this;
+ }
+
+ const SkDPoint& operator[](int n) const { return fPts[n]; }
+ SkDPoint& operator[](int n) { return fPts[n]; }
+
+ static int AddValidTs(double s[], int realRoots, double* t) {
+ return SkDQuad::AddValidTs(s, realRoots, t);
+ }
+
+ void align(int endIndex, SkDPoint* dstPt) const {
+ fPts.align(endIndex, dstPt);
+ }
+
+ SkDVector dxdyAtT(double t) const;
+ static int FindExtrema(const double src[], SkScalar weight, double tValue[1]);
+
+ bool hullIntersects(const SkDQuad& quad, bool* isLinear) const {
+ return fPts.hullIntersects(quad, isLinear);
+ }
+
+ bool hullIntersects(const SkDConic& conic, bool* isLinear) const {
+ return fPts.hullIntersects(conic.fPts, isLinear);
+ }
+
+ bool hullIntersects(const SkDCubic& cubic, bool* isLinear) const;
+
+ bool isLinear(int startIndex, int endIndex) const {
+ return fPts.isLinear(startIndex, endIndex);
+ }
+
+ static int maxIntersections() { return kMaxIntersections; }
+
+ bool monotonicInX() const {
+ return fPts.monotonicInX();
+ }
+
+ bool monotonicInY() const {
+ return fPts.monotonicInY();
+ }
+
+ void otherPts(int oddMan, const SkDPoint* endPt[2]) const {
+ fPts.otherPts(oddMan, endPt);
+ }
+
+ static int pointCount() { return kPointCount; }
+ static int pointLast() { return kPointLast; }
+ SkDPoint ptAtT(double t) const;
+
+ static int RootsReal(double A, double B, double C, double t[2]) {
+ return SkDQuad::RootsReal(A, B, C, t);
+ }
+
+ static int RootsValidT(const double A, const double B, const double C, double s[2]) {
+ return SkDQuad::RootsValidT(A, B, C, s);
+ }
+
+ SkDConic subDivide(double t1, double t2) const;
+ void subDivide(double t1, double t2, SkDConic* c) const { *c = this->subDivide(t1, t2); }
+
+ static SkDConic SubDivide(const SkPoint a[kPointCount], SkScalar weight, double t1, double t2) {
+ SkDConic conic;
+ conic.set(a, weight);
+ return conic.subDivide(t1, t2);
+ }
+
+ SkDPoint subDivide(const SkDPoint& a, const SkDPoint& c, double t1, double t2,
+ SkScalar* weight) const;
+
+ static SkDPoint SubDivide(const SkPoint pts[kPointCount], SkScalar weight,
+ const SkDPoint& a, const SkDPoint& c,
+ double t1, double t2, SkScalar* newWeight) {
+ SkDConic conic;
+ conic.set(pts, weight);
+ return conic.subDivide(a, c, t1, t2, newWeight);
+ }
+
+ // utilities callable by the user from the debugger when the implementation code is linked in
+ void dump() const;
+ void dumpID(int id) const;
+ void dumpInner() const;
+
+};
+
+class SkTConic : public SkTCurve {
+public:
+ SkDConic fConic;
+
+ SkTConic() {}
+
+ SkTConic(const SkDConic& c)
+ : fConic(c) {
+ }
+
+ ~SkTConic() override {}
+
+ const SkDPoint& operator[](int n) const override { return fConic[n]; }
+ SkDPoint& operator[](int n) override { return fConic[n]; }
+
+ bool collapsed() const override { return fConic.collapsed(); }
+ bool controlsInside() const override { return fConic.controlsInside(); }
+ void debugInit() override { return fConic.debugInit(); }
+#if DEBUG_T_SECT
+ void dumpID(int id) const override { return fConic.dumpID(id); }
+#endif
+ SkDVector dxdyAtT(double t) const override { return fConic.dxdyAtT(t); }
+#ifdef SK_DEBUG
+ SkOpGlobalState* globalState() const override { return fConic.globalState(); }
+#endif
+ bool hullIntersects(const SkDQuad& quad, bool* isLinear) const override;
+
+ bool hullIntersects(const SkDConic& conic, bool* isLinear) const override {
+ return conic.hullIntersects(fConic, isLinear);
+ }
+
+ bool hullIntersects(const SkDCubic& cubic, bool* isLinear) const override;
+
+ bool hullIntersects(const SkTCurve& curve, bool* isLinear) const override {
+ return curve.hullIntersects(fConic, isLinear);
+ }
+
+ int intersectRay(SkIntersections* i, const SkDLine& line) const override;
+ bool IsConic() const override { return true; }
+ SkTCurve* make(SkArenaAlloc& heap) const override { return heap.make<SkTConic>(); }
+
+ int maxIntersections() const override { return SkDConic::kMaxIntersections; }
+
+ void otherPts(int oddMan, const SkDPoint* endPt[2]) const override {
+ fConic.otherPts(oddMan, endPt);
+ }
+
+ int pointCount() const override { return SkDConic::kPointCount; }
+ int pointLast() const override { return SkDConic::kPointLast; }
+ SkDPoint ptAtT(double t) const override { return fConic.ptAtT(t); }
+ void setBounds(SkDRect* ) const override;
+
+ void subDivide(double t1, double t2, SkTCurve* curve) const override {
+ ((SkTConic*) curve)->fConic = fConic.subDivide(t1, t2);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsCubic.cpp b/gfx/skia/skia/src/pathops/SkPathOpsCubic.cpp
new file mode 100644
index 0000000000..138072dd72
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsCubic.cpp
@@ -0,0 +1,763 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkPathOpsCubic.h"
+
+#include "include/private/base/SkTPin.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkTSort.h"
+#include "src/core/SkGeometry.h"
+#include "src/pathops/SkIntersections.h"
+#include "src/pathops/SkLineParameters.h"
+#include "src/pathops/SkPathOpsConic.h"
+#include "src/pathops/SkPathOpsQuad.h"
+#include "src/pathops/SkPathOpsRect.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+#include <algorithm>
+#include <cmath>
+
+struct SkDLine;
+
+const int SkDCubic::gPrecisionUnit = 256; // FIXME: test different values in test framework
+
+void SkDCubic::align(int endIndex, int ctrlIndex, SkDPoint* dstPt) const {
+ if (fPts[endIndex].fX == fPts[ctrlIndex].fX) {
+ dstPt->fX = fPts[endIndex].fX;
+ }
+ if (fPts[endIndex].fY == fPts[ctrlIndex].fY) {
+ dstPt->fY = fPts[endIndex].fY;
+ }
+}
+
+// give up when changing t no longer moves point
+// also, copy point rather than recompute it when it does change
+double SkDCubic::binarySearch(double min, double max, double axisIntercept,
+ SearchAxis xAxis) const {
+ double t = (min + max) / 2;
+ double step = (t - min) / 2;
+ SkDPoint cubicAtT = ptAtT(t);
+ double calcPos = (&cubicAtT.fX)[xAxis];
+ double calcDist = calcPos - axisIntercept;
+ do {
+ double priorT = std::max(min, t - step);
+ SkDPoint lessPt = ptAtT(priorT);
+ if (approximately_equal_half(lessPt.fX, cubicAtT.fX)
+ && approximately_equal_half(lessPt.fY, cubicAtT.fY)) {
+ return -1; // binary search found no point at this axis intercept
+ }
+ double lessDist = (&lessPt.fX)[xAxis] - axisIntercept;
+#if DEBUG_CUBIC_BINARY_SEARCH
+ SkDebugf("t=%1.9g calc=%1.9g dist=%1.9g step=%1.9g less=%1.9g\n", t, calcPos, calcDist,
+ step, lessDist);
+#endif
+ double lastStep = step;
+ step /= 2;
+ if (calcDist > 0 ? calcDist > lessDist : calcDist < lessDist) {
+ t = priorT;
+ } else {
+ double nextT = t + lastStep;
+ if (nextT > max) {
+ return -1;
+ }
+ SkDPoint morePt = ptAtT(nextT);
+ if (approximately_equal_half(morePt.fX, cubicAtT.fX)
+ && approximately_equal_half(morePt.fY, cubicAtT.fY)) {
+ return -1; // binary search found no point at this axis intercept
+ }
+ double moreDist = (&morePt.fX)[xAxis] - axisIntercept;
+ if (calcDist > 0 ? calcDist <= moreDist : calcDist >= moreDist) {
+ continue;
+ }
+ t = nextT;
+ }
+ SkDPoint testAtT = ptAtT(t);
+ cubicAtT = testAtT;
+ calcPos = (&cubicAtT.fX)[xAxis];
+ calcDist = calcPos - axisIntercept;
+ } while (!approximately_equal(calcPos, axisIntercept));
+ return t;
+}
+
+// get the rough scale of the cubic; used to determine if curvature is extreme
+double SkDCubic::calcPrecision() const {
+ return ((fPts[1] - fPts[0]).length()
+ + (fPts[2] - fPts[1]).length()
+ + (fPts[3] - fPts[2]).length()) / gPrecisionUnit;
+}
+
+/* classic one t subdivision */
+static void interp_cubic_coords(const double* src, double* dst, double t) {
+ double ab = SkDInterp(src[0], src[2], t);
+ double bc = SkDInterp(src[2], src[4], t);
+ double cd = SkDInterp(src[4], src[6], t);
+ double abc = SkDInterp(ab, bc, t);
+ double bcd = SkDInterp(bc, cd, t);
+ double abcd = SkDInterp(abc, bcd, t);
+
+ dst[0] = src[0];
+ dst[2] = ab;
+ dst[4] = abc;
+ dst[6] = abcd;
+ dst[8] = bcd;
+ dst[10] = cd;
+ dst[12] = src[6];
+}
+
+SkDCubicPair SkDCubic::chopAt(double t) const {
+ SkDCubicPair dst;
+ if (t == 0.5) {
+ dst.pts[0] = fPts[0];
+ dst.pts[1].fX = (fPts[0].fX + fPts[1].fX) / 2;
+ dst.pts[1].fY = (fPts[0].fY + fPts[1].fY) / 2;
+ dst.pts[2].fX = (fPts[0].fX + 2 * fPts[1].fX + fPts[2].fX) / 4;
+ dst.pts[2].fY = (fPts[0].fY + 2 * fPts[1].fY + fPts[2].fY) / 4;
+ dst.pts[3].fX = (fPts[0].fX + 3 * (fPts[1].fX + fPts[2].fX) + fPts[3].fX) / 8;
+ dst.pts[3].fY = (fPts[0].fY + 3 * (fPts[1].fY + fPts[2].fY) + fPts[3].fY) / 8;
+ dst.pts[4].fX = (fPts[1].fX + 2 * fPts[2].fX + fPts[3].fX) / 4;
+ dst.pts[4].fY = (fPts[1].fY + 2 * fPts[2].fY + fPts[3].fY) / 4;
+ dst.pts[5].fX = (fPts[2].fX + fPts[3].fX) / 2;
+ dst.pts[5].fY = (fPts[2].fY + fPts[3].fY) / 2;
+ dst.pts[6] = fPts[3];
+ return dst;
+ }
+ interp_cubic_coords(&fPts[0].fX, &dst.pts[0].fX, t);
+ interp_cubic_coords(&fPts[0].fY, &dst.pts[0].fY, t);
+ return dst;
+}
+
+// TODO(skbug.com/14063) deduplicate this with SkBezierCubic::ConvertToPolynomial
+void SkDCubic::Coefficients(const double* src, double* A, double* B, double* C, double* D) {
+ *A = src[6]; // d
+ *B = src[4] * 3; // 3*c
+ *C = src[2] * 3; // 3*b
+ *D = src[0]; // a
+ *A -= *D - *C + *B; // A = -a + 3*b - 3*c + d
+ *B += 3 * *D - 2 * *C; // B = 3*a - 6*b + 3*c
+ *C -= 3 * *D; // C = -3*a + 3*b
+}
+
+bool SkDCubic::endsAreExtremaInXOrY() const {
+ return (between(fPts[0].fX, fPts[1].fX, fPts[3].fX)
+ && between(fPts[0].fX, fPts[2].fX, fPts[3].fX))
+ || (between(fPts[0].fY, fPts[1].fY, fPts[3].fY)
+ && between(fPts[0].fY, fPts[2].fY, fPts[3].fY));
+}
+
+// Do a quick reject by rotating all points relative to a line formed by
+// a pair of one cubic's points. If the 2nd cubic's points
+// are on the line or on the opposite side from the 1st cubic's 'odd man', the
+// curves at most intersect at the endpoints.
+/* if returning true, check contains true if cubic's hull collapsed, making the cubic linear
+ if returning false, check contains true if the the cubic pair have only the end point in common
+*/
+bool SkDCubic::hullIntersects(const SkDPoint* pts, int ptCount, bool* isLinear) const {
+ bool linear = true;
+ char hullOrder[4];
+ int hullCount = convexHull(hullOrder);
+ int end1 = hullOrder[0];
+ int hullIndex = 0;
+ const SkDPoint* endPt[2];
+ endPt[0] = &fPts[end1];
+ do {
+ hullIndex = (hullIndex + 1) % hullCount;
+ int end2 = hullOrder[hullIndex];
+ endPt[1] = &fPts[end2];
+ double origX = endPt[0]->fX;
+ double origY = endPt[0]->fY;
+ double adj = endPt[1]->fX - origX;
+ double opp = endPt[1]->fY - origY;
+ int oddManMask = other_two(end1, end2);
+ int oddMan = end1 ^ oddManMask;
+ double sign = (fPts[oddMan].fY - origY) * adj - (fPts[oddMan].fX - origX) * opp;
+ int oddMan2 = end2 ^ oddManMask;
+ double sign2 = (fPts[oddMan2].fY - origY) * adj - (fPts[oddMan2].fX - origX) * opp;
+ if (sign * sign2 < 0) {
+ continue;
+ }
+ if (approximately_zero(sign)) {
+ sign = sign2;
+ if (approximately_zero(sign)) {
+ continue;
+ }
+ }
+ linear = false;
+ bool foundOutlier = false;
+ for (int n = 0; n < ptCount; ++n) {
+ double test = (pts[n].fY - origY) * adj - (pts[n].fX - origX) * opp;
+ if (test * sign > 0 && !precisely_zero(test)) {
+ foundOutlier = true;
+ break;
+ }
+ }
+ if (!foundOutlier) {
+ return false;
+ }
+ endPt[0] = endPt[1];
+ end1 = end2;
+ } while (hullIndex);
+ *isLinear = linear;
+ return true;
+}
+
+bool SkDCubic::hullIntersects(const SkDCubic& c2, bool* isLinear) const {
+ return hullIntersects(c2.fPts, SkDCubic::kPointCount, isLinear);
+}
+
+bool SkDCubic::hullIntersects(const SkDQuad& quad, bool* isLinear) const {
+ return hullIntersects(quad.fPts, SkDQuad::kPointCount, isLinear);
+}
+
+bool SkDCubic::hullIntersects(const SkDConic& conic, bool* isLinear) const {
+
+ return hullIntersects(conic.fPts, isLinear);
+}
+
+bool SkDCubic::isLinear(int startIndex, int endIndex) const {
+ if (fPts[0].approximatelyDEqual(fPts[3])) {
+ return ((const SkDQuad *) this)->isLinear(0, 2);
+ }
+ SkLineParameters lineParameters;
+ lineParameters.cubicEndPoints(*this, startIndex, endIndex);
+ // FIXME: maybe it's possible to avoid this and compare non-normalized
+ lineParameters.normalize();
+ double tiniest = std::min(std::min(std::min(std::min(std::min(std::min(std::min(fPts[0].fX, fPts[0].fY),
+ fPts[1].fX), fPts[1].fY), fPts[2].fX), fPts[2].fY), fPts[3].fX), fPts[3].fY);
+ double largest = std::max(std::max(std::max(std::max(std::max(std::max(std::max(fPts[0].fX, fPts[0].fY),
+ fPts[1].fX), fPts[1].fY), fPts[2].fX), fPts[2].fY), fPts[3].fX), fPts[3].fY);
+ largest = std::max(largest, -tiniest);
+ double distance = lineParameters.controlPtDistance(*this, 1);
+ if (!approximately_zero_when_compared_to(distance, largest)) {
+ return false;
+ }
+ distance = lineParameters.controlPtDistance(*this, 2);
+ return approximately_zero_when_compared_to(distance, largest);
+}
+
+// from http://www.cs.sunysb.edu/~qin/courses/geometry/4.pdf
+// c(t) = a(1-t)^3 + 3bt(1-t)^2 + 3c(1-t)t^2 + dt^3
+// c'(t) = -3a(1-t)^2 + 3b((1-t)^2 - 2t(1-t)) + 3c(2t(1-t) - t^2) + 3dt^2
+// = 3(b-a)(1-t)^2 + 6(c-b)t(1-t) + 3(d-c)t^2
+static double derivative_at_t(const double* src, double t) {
+ double one_t = 1 - t;
+ double a = src[0];
+ double b = src[2];
+ double c = src[4];
+ double d = src[6];
+ return 3 * ((b - a) * one_t * one_t + 2 * (c - b) * t * one_t + (d - c) * t * t);
+}
+
+int SkDCubic::ComplexBreak(const SkPoint pointsPtr[4], SkScalar* t) {
+ SkDCubic cubic;
+ cubic.set(pointsPtr);
+ if (cubic.monotonicInX() && cubic.monotonicInY()) {
+ return 0;
+ }
+ double tt[2], ss[2];
+ SkCubicType cubicType = SkClassifyCubic(pointsPtr, tt, ss);
+ switch (cubicType) {
+ case SkCubicType::kLoop: {
+ const double &td = tt[0], &te = tt[1], &sd = ss[0], &se = ss[1];
+ if (roughly_between(0, td, sd) && roughly_between(0, te, se)) {
+ t[0] = static_cast<SkScalar>((td * se + te * sd) / (2 * sd * se));
+ return (int) (t[0] > 0 && t[0] < 1);
+ }
+ }
+ [[fallthrough]]; // fall through if no t value found
+ case SkCubicType::kSerpentine:
+ case SkCubicType::kLocalCusp:
+ case SkCubicType::kCuspAtInfinity: {
+ double inflectionTs[2];
+ int infTCount = cubic.findInflections(inflectionTs);
+ double maxCurvature[3];
+ int roots = cubic.findMaxCurvature(maxCurvature);
+ #if DEBUG_CUBIC_SPLIT
+ SkDebugf("%s\n", __FUNCTION__);
+ cubic.dump();
+ for (int index = 0; index < infTCount; ++index) {
+ SkDebugf("inflectionsTs[%d]=%1.9g ", index, inflectionTs[index]);
+ SkDPoint pt = cubic.ptAtT(inflectionTs[index]);
+ SkDVector dPt = cubic.dxdyAtT(inflectionTs[index]);
+ SkDLine perp = {{pt - dPt, pt + dPt}};
+ perp.dump();
+ }
+ for (int index = 0; index < roots; ++index) {
+ SkDebugf("maxCurvature[%d]=%1.9g ", index, maxCurvature[index]);
+ SkDPoint pt = cubic.ptAtT(maxCurvature[index]);
+ SkDVector dPt = cubic.dxdyAtT(maxCurvature[index]);
+ SkDLine perp = {{pt - dPt, pt + dPt}};
+ perp.dump();
+ }
+ #endif
+ if (infTCount == 2) {
+ for (int index = 0; index < roots; ++index) {
+ if (between(inflectionTs[0], maxCurvature[index], inflectionTs[1])) {
+ t[0] = maxCurvature[index];
+ return (int) (t[0] > 0 && t[0] < 1);
+ }
+ }
+ } else {
+ int resultCount = 0;
+ // FIXME: constant found through experimentation -- maybe there's a better way....
+ double precision = cubic.calcPrecision() * 2;
+ for (int index = 0; index < roots; ++index) {
+ double testT = maxCurvature[index];
+ if (0 >= testT || testT >= 1) {
+ continue;
+ }
+ // don't call dxdyAtT since we want (0,0) results
+ SkDVector dPt = { derivative_at_t(&cubic.fPts[0].fX, testT),
+ derivative_at_t(&cubic.fPts[0].fY, testT) };
+ double dPtLen = dPt.length();
+ if (dPtLen < precision) {
+ t[resultCount++] = testT;
+ }
+ }
+ if (!resultCount && infTCount == 1) {
+ t[0] = inflectionTs[0];
+ resultCount = (int) (t[0] > 0 && t[0] < 1);
+ }
+ return resultCount;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return 0;
+}
+
+bool SkDCubic::monotonicInX() const {
+ return precisely_between(fPts[0].fX, fPts[1].fX, fPts[3].fX)
+ && precisely_between(fPts[0].fX, fPts[2].fX, fPts[3].fX);
+}
+
+bool SkDCubic::monotonicInY() const {
+ return precisely_between(fPts[0].fY, fPts[1].fY, fPts[3].fY)
+ && precisely_between(fPts[0].fY, fPts[2].fY, fPts[3].fY);
+}
+
+void SkDCubic::otherPts(int index, const SkDPoint* o1Pts[kPointCount - 1]) const {
+ int offset = (int) !SkToBool(index);
+ o1Pts[0] = &fPts[offset];
+ o1Pts[1] = &fPts[++offset];
+ o1Pts[2] = &fPts[++offset];
+}
+
+int SkDCubic::searchRoots(double extremeTs[6], int extrema, double axisIntercept,
+ SearchAxis xAxis, double* validRoots) const {
+ extrema += findInflections(&extremeTs[extrema]);
+ extremeTs[extrema++] = 0;
+ extremeTs[extrema] = 1;
+ SkASSERT(extrema < 6);
+ SkTQSort(extremeTs, extremeTs + extrema + 1);
+ int validCount = 0;
+ for (int index = 0; index < extrema; ) {
+ double min = extremeTs[index];
+ double max = extremeTs[++index];
+ if (min == max) {
+ continue;
+ }
+ double newT = binarySearch(min, max, axisIntercept, xAxis);
+ if (newT >= 0) {
+ if (validCount >= 3) {
+ return 0;
+ }
+ validRoots[validCount++] = newT;
+ }
+ }
+ return validCount;
+}
+
+// cubic roots
+
+static const double PI = 3.141592653589793;
+
+// from SkGeometry.cpp (and Numeric Solutions, 5.6)
+// // TODO(skbug.com/14063) Deduplicate with SkCubics::RootsValidT
+int SkDCubic::RootsValidT(double A, double B, double C, double D, double t[3]) {
+ double s[3];
+ int realRoots = RootsReal(A, B, C, D, s);
+ int foundRoots = SkDQuad::AddValidTs(s, realRoots, t);
+ for (int index = 0; index < realRoots; ++index) {
+ double tValue = s[index];
+ if (!approximately_one_or_less(tValue) && between(1, tValue, 1.00005)) {
+ for (int idx2 = 0; idx2 < foundRoots; ++idx2) {
+ if (approximately_equal(t[idx2], 1)) {
+ goto nextRoot;
+ }
+ }
+ SkASSERT(foundRoots < 3);
+ t[foundRoots++] = 1;
+ } else if (!approximately_zero_or_more(tValue) && between(-0.00005, tValue, 0)) {
+ for (int idx2 = 0; idx2 < foundRoots; ++idx2) {
+ if (approximately_equal(t[idx2], 0)) {
+ goto nextRoot;
+ }
+ }
+ SkASSERT(foundRoots < 3);
+ t[foundRoots++] = 0;
+ }
+nextRoot:
+ ;
+ }
+ return foundRoots;
+}
+
+// TODO(skbug.com/14063) Deduplicate with SkCubics::RootsReal
+int SkDCubic::RootsReal(double A, double B, double C, double D, double s[3]) {
+#ifdef SK_DEBUG
+ #if ONE_OFF_DEBUG && ONE_OFF_DEBUG_MATHEMATICA
+ // create a string mathematica understands
+ // GDB set print repe 15 # if repeated digits is a bother
+ // set print elements 400 # if line doesn't fit
+ char str[1024];
+ sk_bzero(str, sizeof(str));
+ snprintf(str, sizeof(str), "Solve[%1.19g x^3 + %1.19g x^2 + %1.19g x + %1.19g == 0, x]",
+ A, B, C, D);
+ SkPathOpsDebug::MathematicaIze(str, sizeof(str));
+ SkDebugf("%s\n", str);
+ #endif
+#endif
+ if (approximately_zero(A)
+ && approximately_zero_when_compared_to(A, B)
+ && approximately_zero_when_compared_to(A, C)
+ && approximately_zero_when_compared_to(A, D)) { // we're just a quadratic
+ return SkDQuad::RootsReal(B, C, D, s);
+ }
+ if (approximately_zero_when_compared_to(D, A)
+ && approximately_zero_when_compared_to(D, B)
+ && approximately_zero_when_compared_to(D, C)) { // 0 is one root
+ int num = SkDQuad::RootsReal(A, B, C, s);
+ for (int i = 0; i < num; ++i) {
+ if (approximately_zero(s[i])) {
+ return num;
+ }
+ }
+ s[num++] = 0;
+ return num;
+ }
+ if (approximately_zero(A + B + C + D)) { // 1 is one root
+ int num = SkDQuad::RootsReal(A, A + B, -D, s);
+ for (int i = 0; i < num; ++i) {
+ if (AlmostDequalUlps(s[i], 1)) {
+ return num;
+ }
+ }
+ s[num++] = 1;
+ return num;
+ }
+ double a, b, c;
+ {
+ double invA = 1 / A;
+ a = B * invA;
+ b = C * invA;
+ c = D * invA;
+ }
+ double a2 = a * a;
+ double Q = (a2 - b * 3) / 9;
+ double R = (2 * a2 * a - 9 * a * b + 27 * c) / 54;
+ double R2 = R * R;
+ double Q3 = Q * Q * Q;
+ double R2MinusQ3 = R2 - Q3;
+ double adiv3 = a / 3;
+ double r;
+ double* roots = s;
+ if (R2MinusQ3 < 0) { // we have 3 real roots
+ // the divide/root can, due to finite precisions, be slightly outside of -1...1
+ double theta = acos(SkTPin(R / sqrt(Q3), -1., 1.));
+ double neg2RootQ = -2 * sqrt(Q);
+
+ r = neg2RootQ * cos(theta / 3) - adiv3;
+ *roots++ = r;
+
+ r = neg2RootQ * cos((theta + 2 * PI) / 3) - adiv3;
+ if (!AlmostDequalUlps(s[0], r)) {
+ *roots++ = r;
+ }
+ r = neg2RootQ * cos((theta - 2 * PI) / 3) - adiv3;
+ if (!AlmostDequalUlps(s[0], r) && (roots - s == 1 || !AlmostDequalUlps(s[1], r))) {
+ *roots++ = r;
+ }
+ } else { // we have 1 real root
+ double sqrtR2MinusQ3 = sqrt(R2MinusQ3);
+ A = fabs(R) + sqrtR2MinusQ3;
+ A = std::cbrt(A); // cube root
+ if (R > 0) {
+ A = -A;
+ }
+ if (A != 0) {
+ A += Q / A;
+ }
+ r = A - adiv3;
+ *roots++ = r;
+ if (AlmostDequalUlps((double) R2, (double) Q3)) {
+ r = -A / 2 - adiv3;
+ if (!AlmostDequalUlps(s[0], r)) {
+ *roots++ = r;
+ }
+ }
+ }
+ return static_cast<int>(roots - s);
+}
+
+// OPTIMIZE? compute t^2, t(1-t), and (1-t)^2 and pass them to another version of derivative at t?
+SkDVector SkDCubic::dxdyAtT(double t) const {
+ SkDVector result = { derivative_at_t(&fPts[0].fX, t), derivative_at_t(&fPts[0].fY, t) };
+ if (result.fX == 0 && result.fY == 0) {
+ if (t == 0) {
+ result = fPts[2] - fPts[0];
+ } else if (t == 1) {
+ result = fPts[3] - fPts[1];
+ } else {
+ // incomplete
+ SkDebugf("!c");
+ }
+ if (result.fX == 0 && result.fY == 0 && zero_or_one(t)) {
+ result = fPts[3] - fPts[0];
+ }
+ }
+ return result;
+}
+
+// OPTIMIZE? share code with formulate_F1DotF2
+// e.g. https://stackoverflow.com/a/35927917
+int SkDCubic::findInflections(double tValues[2]) const {
+ double Ax = fPts[1].fX - fPts[0].fX;
+ double Ay = fPts[1].fY - fPts[0].fY;
+ double Bx = fPts[2].fX - 2 * fPts[1].fX + fPts[0].fX;
+ double By = fPts[2].fY - 2 * fPts[1].fY + fPts[0].fY;
+ double Cx = fPts[3].fX + 3 * (fPts[1].fX - fPts[2].fX) - fPts[0].fX;
+ double Cy = fPts[3].fY + 3 * (fPts[1].fY - fPts[2].fY) - fPts[0].fY;
+ return SkDQuad::RootsValidT(Bx * Cy - By * Cx, Ax * Cy - Ay * Cx, Ax * By - Ay * Bx, tValues);
+}
+
+static void formulate_F1DotF2(const double src[], double coeff[4]) {
+ double a = src[2] - src[0];
+ double b = src[4] - 2 * src[2] + src[0];
+ double c = src[6] + 3 * (src[2] - src[4]) - src[0];
+ coeff[0] = c * c;
+ coeff[1] = 3 * b * c;
+ coeff[2] = 2 * b * b + c * a;
+ coeff[3] = a * b;
+}
+
+/** SkDCubic'(t) = At^2 + Bt + C, where
+ A = 3(-a + 3(b - c) + d)
+ B = 6(a - 2b + c)
+ C = 3(b - a)
+ Solve for t, keeping only those that fit between 0 < t < 1
+*/
+int SkDCubic::FindExtrema(const double src[], double tValues[2]) {
+ // we divide A,B,C by 3 to simplify
+ double a = src[0];
+ double b = src[2];
+ double c = src[4];
+ double d = src[6];
+ double A = d - a + 3 * (b - c);
+ double B = 2 * (a - b - b + c);
+ double C = b - a;
+
+ return SkDQuad::RootsValidT(A, B, C, tValues);
+}
+
+/* from SkGeometry.cpp
+ Looking for F' dot F'' == 0
+
+ A = b - a
+ B = c - 2b + a
+ C = d - 3c + 3b - a
+
+ F' = 3Ct^2 + 6Bt + 3A
+ F'' = 6Ct + 6B
+
+ F' dot F'' -> CCt^3 + 3BCt^2 + (2BB + CA)t + AB
+*/
+int SkDCubic::findMaxCurvature(double tValues[]) const {
+ double coeffX[4], coeffY[4];
+ int i;
+ formulate_F1DotF2(&fPts[0].fX, coeffX);
+ formulate_F1DotF2(&fPts[0].fY, coeffY);
+ for (i = 0; i < 4; i++) {
+ coeffX[i] = coeffX[i] + coeffY[i];
+ }
+ return RootsValidT(coeffX[0], coeffX[1], coeffX[2], coeffX[3], tValues);
+}
+
+SkDPoint SkDCubic::ptAtT(double t) const {
+ if (0 == t) {
+ return fPts[0];
+ }
+ if (1 == t) {
+ return fPts[3];
+ }
+ double one_t = 1 - t;
+ double one_t2 = one_t * one_t;
+ double a = one_t2 * one_t;
+ double b = 3 * one_t2 * t;
+ double t2 = t * t;
+ double c = 3 * one_t * t2;
+ double d = t2 * t;
+ SkDPoint result = {a * fPts[0].fX + b * fPts[1].fX + c * fPts[2].fX + d * fPts[3].fX,
+ a * fPts[0].fY + b * fPts[1].fY + c * fPts[2].fY + d * fPts[3].fY};
+ return result;
+}
+
+/*
+ Given a cubic c, t1, and t2, find a small cubic segment.
+
+ The new cubic is defined as points A, B, C, and D, where
+ s1 = 1 - t1
+ s2 = 1 - t2
+ A = c[0]*s1*s1*s1 + 3*c[1]*s1*s1*t1 + 3*c[2]*s1*t1*t1 + c[3]*t1*t1*t1
+ D = c[0]*s2*s2*s2 + 3*c[1]*s2*s2*t2 + 3*c[2]*s2*t2*t2 + c[3]*t2*t2*t2
+
+ We don't have B or C. So We define two equations to isolate them.
+ First, compute two reference T values 1/3 and 2/3 from t1 to t2:
+
+ c(at (2*t1 + t2)/3) == E
+ c(at (t1 + 2*t2)/3) == F
+
+ Next, compute where those values must be if we know the values of B and C:
+
+ _12 = A*2/3 + B*1/3
+ 12_ = A*1/3 + B*2/3
+ _23 = B*2/3 + C*1/3
+ 23_ = B*1/3 + C*2/3
+ _34 = C*2/3 + D*1/3
+ 34_ = C*1/3 + D*2/3
+ _123 = (A*2/3 + B*1/3)*2/3 + (B*2/3 + C*1/3)*1/3 = A*4/9 + B*4/9 + C*1/9
+ 123_ = (A*1/3 + B*2/3)*1/3 + (B*1/3 + C*2/3)*2/3 = A*1/9 + B*4/9 + C*4/9
+ _234 = (B*2/3 + C*1/3)*2/3 + (C*2/3 + D*1/3)*1/3 = B*4/9 + C*4/9 + D*1/9
+ 234_ = (B*1/3 + C*2/3)*1/3 + (C*1/3 + D*2/3)*2/3 = B*1/9 + C*4/9 + D*4/9
+ _1234 = (A*4/9 + B*4/9 + C*1/9)*2/3 + (B*4/9 + C*4/9 + D*1/9)*1/3
+ = A*8/27 + B*12/27 + C*6/27 + D*1/27
+ = E
+ 1234_ = (A*1/9 + B*4/9 + C*4/9)*1/3 + (B*1/9 + C*4/9 + D*4/9)*2/3
+ = A*1/27 + B*6/27 + C*12/27 + D*8/27
+ = F
+ E*27 = A*8 + B*12 + C*6 + D
+ F*27 = A + B*6 + C*12 + D*8
+
+Group the known values on one side:
+
+ M = E*27 - A*8 - D = B*12 + C* 6
+ N = F*27 - A - D*8 = B* 6 + C*12
+ M*2 - N = B*18
+ N*2 - M = C*18
+ B = (M*2 - N)/18
+ C = (N*2 - M)/18
+ */
+
+static double interp_cubic_coords(const double* src, double t) {
+ double ab = SkDInterp(src[0], src[2], t);
+ double bc = SkDInterp(src[2], src[4], t);
+ double cd = SkDInterp(src[4], src[6], t);
+ double abc = SkDInterp(ab, bc, t);
+ double bcd = SkDInterp(bc, cd, t);
+ double abcd = SkDInterp(abc, bcd, t);
+ return abcd;
+}
+
+SkDCubic SkDCubic::subDivide(double t1, double t2) const {
+ if (t1 == 0 || t2 == 1) {
+ if (t1 == 0 && t2 == 1) {
+ return *this;
+ }
+ SkDCubicPair pair = chopAt(t1 == 0 ? t2 : t1);
+ SkDCubic dst = t1 == 0 ? pair.first() : pair.second();
+ return dst;
+ }
+ SkDCubic dst;
+ double ax = dst[0].fX = interp_cubic_coords(&fPts[0].fX, t1);
+ double ay = dst[0].fY = interp_cubic_coords(&fPts[0].fY, t1);
+ double ex = interp_cubic_coords(&fPts[0].fX, (t1*2+t2)/3);
+ double ey = interp_cubic_coords(&fPts[0].fY, (t1*2+t2)/3);
+ double fx = interp_cubic_coords(&fPts[0].fX, (t1+t2*2)/3);
+ double fy = interp_cubic_coords(&fPts[0].fY, (t1+t2*2)/3);
+ double dx = dst[3].fX = interp_cubic_coords(&fPts[0].fX, t2);
+ double dy = dst[3].fY = interp_cubic_coords(&fPts[0].fY, t2);
+ double mx = ex * 27 - ax * 8 - dx;
+ double my = ey * 27 - ay * 8 - dy;
+ double nx = fx * 27 - ax - dx * 8;
+ double ny = fy * 27 - ay - dy * 8;
+ /* bx = */ dst[1].fX = (mx * 2 - nx) / 18;
+ /* by = */ dst[1].fY = (my * 2 - ny) / 18;
+ /* cx = */ dst[2].fX = (nx * 2 - mx) / 18;
+ /* cy = */ dst[2].fY = (ny * 2 - my) / 18;
+ // FIXME: call align() ?
+ return dst;
+}
+
+void SkDCubic::subDivide(const SkDPoint& a, const SkDPoint& d,
+ double t1, double t2, SkDPoint dst[2]) const {
+ SkASSERT(t1 != t2);
+ // this approach assumes that the control points computed directly are accurate enough
+ SkDCubic sub = subDivide(t1, t2);
+ dst[0] = sub[1] + (a - sub[0]);
+ dst[1] = sub[2] + (d - sub[3]);
+ if (t1 == 0 || t2 == 0) {
+ align(0, 1, t1 == 0 ? &dst[0] : &dst[1]);
+ }
+ if (t1 == 1 || t2 == 1) {
+ align(3, 2, t1 == 1 ? &dst[0] : &dst[1]);
+ }
+ if (AlmostBequalUlps(dst[0].fX, a.fX)) {
+ dst[0].fX = a.fX;
+ }
+ if (AlmostBequalUlps(dst[0].fY, a.fY)) {
+ dst[0].fY = a.fY;
+ }
+ if (AlmostBequalUlps(dst[1].fX, d.fX)) {
+ dst[1].fX = d.fX;
+ }
+ if (AlmostBequalUlps(dst[1].fY, d.fY)) {
+ dst[1].fY = d.fY;
+ }
+}
+
+bool SkDCubic::toFloatPoints(SkPoint* pts) const {
+ const double* dCubic = &fPts[0].fX;
+ SkScalar* cubic = &pts[0].fX;
+ for (int index = 0; index < kPointCount * 2; ++index) {
+ cubic[index] = SkDoubleToScalar(dCubic[index]);
+ if (SkScalarAbs(cubic[index]) < FLT_EPSILON_ORDERABLE_ERR) {
+ cubic[index] = 0;
+ }
+ }
+ return SkScalarsAreFinite(&pts->fX, kPointCount * 2);
+}
+
+double SkDCubic::top(const SkDCubic& dCurve, double startT, double endT, SkDPoint*topPt) const {
+ double extremeTs[2];
+ double topT = -1;
+ int roots = SkDCubic::FindExtrema(&fPts[0].fY, extremeTs);
+ for (int index = 0; index < roots; ++index) {
+ double t = startT + (endT - startT) * extremeTs[index];
+ SkDPoint mid = dCurve.ptAtT(t);
+ if (topPt->fY > mid.fY || (topPt->fY == mid.fY && topPt->fX > mid.fX)) {
+ topT = t;
+ *topPt = mid;
+ }
+ }
+ return topT;
+}
+
+int SkTCubic::intersectRay(SkIntersections* i, const SkDLine& line) const {
+ return i->intersectRay(fCubic, line);
+}
+
+bool SkTCubic::hullIntersects(const SkDQuad& quad, bool* isLinear) const {
+ return quad.hullIntersects(fCubic, isLinear);
+}
+
+bool SkTCubic::hullIntersects(const SkDConic& conic, bool* isLinear) const {
+ return conic.hullIntersects(fCubic, isLinear);
+}
+
+void SkTCubic::setBounds(SkDRect* rect) const {
+ rect->setBounds(fCubic);
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsCubic.h b/gfx/skia/skia/src/pathops/SkPathOpsCubic.h
new file mode 100644
index 0000000000..242ca34bdc
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsCubic.h
@@ -0,0 +1,252 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathOpsCubic_DEFINED
+#define SkPathOpsCubic_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkMalloc.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/pathops/SkPathOpsDebug.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkPathOpsTCurve.h"
+
+class SkIntersections;
+class SkOpGlobalState;
+struct SkDConic;
+struct SkDCubicPair;
+struct SkDLine;
+struct SkDQuad;
+struct SkDRect;
+
+struct SkDCubic {
+ static const int kPointCount = 4;
+ static const int kPointLast = kPointCount - 1;
+ static const int kMaxIntersections = 9;
+
+ enum SearchAxis {
+ kXAxis,
+ kYAxis
+ };
+
+ bool collapsed() const {
+ return fPts[0].approximatelyEqual(fPts[1]) && fPts[0].approximatelyEqual(fPts[2])
+ && fPts[0].approximatelyEqual(fPts[3]);
+ }
+
+ bool controlsInside() const {
+ SkDVector v01 = fPts[0] - fPts[1];
+ SkDVector v02 = fPts[0] - fPts[2];
+ SkDVector v03 = fPts[0] - fPts[3];
+ SkDVector v13 = fPts[1] - fPts[3];
+ SkDVector v23 = fPts[2] - fPts[3];
+ return v03.dot(v01) > 0 && v03.dot(v02) > 0 && v03.dot(v13) > 0 && v03.dot(v23) > 0;
+ }
+
+ static bool IsConic() { return false; }
+
+ const SkDPoint& operator[](int n) const { SkASSERT(n >= 0 && n < kPointCount); return fPts[n]; }
+ SkDPoint& operator[](int n) { SkASSERT(n >= 0 && n < kPointCount); return fPts[n]; }
+
+ void align(int endIndex, int ctrlIndex, SkDPoint* dstPt) const;
+ double binarySearch(double min, double max, double axisIntercept, SearchAxis xAxis) const;
+ double calcPrecision() const;
+ SkDCubicPair chopAt(double t) const;
+ static void Coefficients(const double* cubic, double* A, double* B, double* C, double* D);
+ static int ComplexBreak(const SkPoint pts[4], SkScalar* t);
+ int convexHull(char order[kPointCount]) const;
+
+ void debugInit() {
+ sk_bzero(fPts, sizeof(fPts));
+ }
+
+ void debugSet(const SkDPoint* pts);
+
+ void dump() const; // callable from the debugger when the implementation code is linked in
+ void dumpID(int id) const;
+ void dumpInner() const;
+ SkDVector dxdyAtT(double t) const;
+ bool endsAreExtremaInXOrY() const;
+ static int FindExtrema(const double src[], double tValue[2]);
+ int findInflections(double tValues[2]) const;
+
+ static int FindInflections(const SkPoint a[kPointCount], double tValues[2]) {
+ SkDCubic cubic;
+ return cubic.set(a).findInflections(tValues);
+ }
+
+ int findMaxCurvature(double tValues[]) const;
+
+#ifdef SK_DEBUG
+ SkOpGlobalState* globalState() const { return fDebugGlobalState; }
+#endif
+
+ bool hullIntersects(const SkDCubic& c2, bool* isLinear) const;
+ bool hullIntersects(const SkDConic& c, bool* isLinear) const;
+ bool hullIntersects(const SkDQuad& c2, bool* isLinear) const;
+ bool hullIntersects(const SkDPoint* pts, int ptCount, bool* isLinear) const;
+ bool isLinear(int startIndex, int endIndex) const;
+ static int maxIntersections() { return kMaxIntersections; }
+ bool monotonicInX() const;
+ bool monotonicInY() const;
+ void otherPts(int index, const SkDPoint* o1Pts[kPointCount - 1]) const;
+ static int pointCount() { return kPointCount; }
+ static int pointLast() { return kPointLast; }
+ SkDPoint ptAtT(double t) const;
+ static int RootsReal(double A, double B, double C, double D, double t[3]);
+ static int RootsValidT(const double A, const double B, const double C, double D, double s[3]);
+
+ int searchRoots(double extremes[6], int extrema, double axisIntercept,
+ SearchAxis xAxis, double* validRoots) const;
+
+ bool toFloatPoints(SkPoint* ) const;
+ /**
+ * Return the number of valid roots (0 < root < 1) for this cubic intersecting the
+ * specified horizontal line.
+ */
+ int horizontalIntersect(double yIntercept, double roots[3]) const;
+ /**
+ * Return the number of valid roots (0 < root < 1) for this cubic intersecting the
+ * specified vertical line.
+ */
+ int verticalIntersect(double xIntercept, double roots[3]) const;
+
+// add debug only global pointer so asserts can be skipped by fuzzers
+ const SkDCubic& set(const SkPoint pts[kPointCount]
+ SkDEBUGPARAMS(SkOpGlobalState* state = nullptr)) {
+ fPts[0] = pts[0];
+ fPts[1] = pts[1];
+ fPts[2] = pts[2];
+ fPts[3] = pts[3];
+ SkDEBUGCODE(fDebugGlobalState = state);
+ return *this;
+ }
+
+ SkDCubic subDivide(double t1, double t2) const;
+ void subDivide(double t1, double t2, SkDCubic* c) const { *c = this->subDivide(t1, t2); }
+
+ static SkDCubic SubDivide(const SkPoint a[kPointCount], double t1, double t2) {
+ SkDCubic cubic;
+ return cubic.set(a).subDivide(t1, t2);
+ }
+
+ void subDivide(const SkDPoint& a, const SkDPoint& d, double t1, double t2, SkDPoint p[2]) const;
+
+ static void SubDivide(const SkPoint pts[kPointCount], const SkDPoint& a, const SkDPoint& d, double t1,
+ double t2, SkDPoint p[2]) {
+ SkDCubic cubic;
+ cubic.set(pts).subDivide(a, d, t1, t2, p);
+ }
+
+ double top(const SkDCubic& dCurve, double startT, double endT, SkDPoint*topPt) const;
+ SkDQuad toQuad() const;
+
+ static const int gPrecisionUnit;
+ SkDPoint fPts[kPointCount];
+ SkDEBUGCODE(SkOpGlobalState* fDebugGlobalState);
+};
+
+/* Given the set [0, 1, 2, 3], and two of the four members, compute an XOR mask
+ that computes the other two. Note that:
+
+ one ^ two == 3 for (0, 3), (1, 2)
+ one ^ two < 3 for (0, 1), (0, 2), (1, 3), (2, 3)
+ 3 - (one ^ two) is either 0, 1, or 2
+ 1 >> (3 - (one ^ two)) is either 0 or 1
+thus:
+ returned == 2 for (0, 3), (1, 2)
+ returned == 3 for (0, 1), (0, 2), (1, 3), (2, 3)
+given that:
+ (0, 3) ^ 2 -> (2, 1) (1, 2) ^ 2 -> (3, 0)
+ (0, 1) ^ 3 -> (3, 2) (0, 2) ^ 3 -> (3, 1) (1, 3) ^ 3 -> (2, 0) (2, 3) ^ 3 -> (1, 0)
+*/
+inline int other_two(int one, int two) {
+ return 1 >> (3 - (one ^ two)) ^ 3;
+}
+
+struct SkDCubicPair {
+ SkDCubic first() const {
+#ifdef SK_DEBUG
+ SkDCubic result;
+ result.debugSet(&pts[0]);
+ return result;
+#else
+ return (const SkDCubic&) pts[0];
+#endif
+ }
+ SkDCubic second() const {
+#ifdef SK_DEBUG
+ SkDCubic result;
+ result.debugSet(&pts[3]);
+ return result;
+#else
+ return (const SkDCubic&) pts[3];
+#endif
+ }
+ SkDPoint pts[7];
+};
+
+class SkTCubic : public SkTCurve {
+public:
+ SkDCubic fCubic;
+
+ SkTCubic() {}
+
+ SkTCubic(const SkDCubic& c)
+ : fCubic(c) {
+ }
+
+ ~SkTCubic() override {}
+
+ const SkDPoint& operator[](int n) const override { return fCubic[n]; }
+ SkDPoint& operator[](int n) override { return fCubic[n]; }
+
+ bool collapsed() const override { return fCubic.collapsed(); }
+ bool controlsInside() const override { return fCubic.controlsInside(); }
+ void debugInit() override { return fCubic.debugInit(); }
+#if DEBUG_T_SECT
+ void dumpID(int id) const override { return fCubic.dumpID(id); }
+#endif
+ SkDVector dxdyAtT(double t) const override { return fCubic.dxdyAtT(t); }
+#ifdef SK_DEBUG
+ SkOpGlobalState* globalState() const override { return fCubic.globalState(); }
+#endif
+ bool hullIntersects(const SkDQuad& quad, bool* isLinear) const override;
+ bool hullIntersects(const SkDConic& conic, bool* isLinear) const override;
+
+ bool hullIntersects(const SkDCubic& cubic, bool* isLinear) const override {
+ return cubic.hullIntersects(fCubic, isLinear);
+ }
+
+ bool hullIntersects(const SkTCurve& curve, bool* isLinear) const override {
+ return curve.hullIntersects(fCubic, isLinear);
+ }
+
+ int intersectRay(SkIntersections* i, const SkDLine& line) const override;
+ bool IsConic() const override { return false; }
+ SkTCurve* make(SkArenaAlloc& heap) const override { return heap.make<SkTCubic>(); }
+
+ int maxIntersections() const override { return SkDCubic::kMaxIntersections; }
+
+ void otherPts(int oddMan, const SkDPoint* endPt[2]) const override {
+ fCubic.otherPts(oddMan, endPt);
+ }
+
+ int pointCount() const override { return SkDCubic::kPointCount; }
+ int pointLast() const override { return SkDCubic::kPointLast; }
+ SkDPoint ptAtT(double t) const override { return fCubic.ptAtT(t); }
+ void setBounds(SkDRect* ) const override;
+
+ void subDivide(double t1, double t2, SkTCurve* curve) const override {
+ ((SkTCubic*) curve)->fCubic = fCubic.subDivide(t1, t2);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsCurve.cpp b/gfx/skia/skia/src/pathops/SkPathOpsCurve.cpp
new file mode 100644
index 0000000000..ad02d61116
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsCurve.cpp
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkPathOpsCurve.h"
+
+#include "include/private/base/SkTemplates.h"
+#include "src/pathops/SkPathOpsBounds.h"
+#include "src/pathops/SkPathOpsRect.h"
+
+#include <algorithm>
+#include <cfloat>
+
+ // this cheats and assumes that the perpendicular to the point is the closest ray to the curve
+ // this case (where the line and the curve are nearly coincident) may be the only case that counts
+double SkDCurve::nearPoint(SkPath::Verb verb, const SkDPoint& xy, const SkDPoint& opp) const {
+ int count = SkPathOpsVerbToPoints(verb);
+ double minX = fCubic.fPts[0].fX;
+ double maxX = minX;
+ for (int index = 1; index <= count; ++index) {
+ minX = std::min(minX, fCubic.fPts[index].fX);
+ maxX = std::max(maxX, fCubic.fPts[index].fX);
+ }
+ if (!AlmostBetweenUlps(minX, xy.fX, maxX)) {
+ return -1;
+ }
+ double minY = fCubic.fPts[0].fY;
+ double maxY = minY;
+ for (int index = 1; index <= count; ++index) {
+ minY = std::min(minY, fCubic.fPts[index].fY);
+ maxY = std::max(maxY, fCubic.fPts[index].fY);
+ }
+ if (!AlmostBetweenUlps(minY, xy.fY, maxY)) {
+ return -1;
+ }
+ SkIntersections i;
+ SkDLine perp = {{ xy, { xy.fX + opp.fY - xy.fY, xy.fY + xy.fX - opp.fX }}};
+ (*CurveDIntersectRay[verb])(*this, perp, &i);
+ int minIndex = -1;
+ double minDist = FLT_MAX;
+ for (int index = 0; index < i.used(); ++index) {
+ double dist = xy.distance(i.pt(index));
+ if (minDist > dist) {
+ minDist = dist;
+ minIndex = index;
+ }
+ }
+ if (minIndex < 0) {
+ return -1;
+ }
+ double largest = std::max(std::max(maxX, maxY), -std::min(minX, minY));
+ if (!AlmostEqualUlps_Pin(largest, largest + minDist)) { // is distance within ULPS tolerance?
+ return -1;
+ }
+ return SkPinT(i[0][minIndex]);
+}
+
+void SkDCurve::setConicBounds(const SkPoint curve[3], SkScalar curveWeight,
+ double tStart, double tEnd, SkPathOpsBounds* bounds) {
+ SkDConic dCurve;
+ dCurve.set(curve, curveWeight);
+ SkDRect dRect;
+ dRect.setBounds(dCurve, fConic, tStart, tEnd);
+ bounds->setLTRB(SkDoubleToScalar(dRect.fLeft), SkDoubleToScalar(dRect.fTop),
+ SkDoubleToScalar(dRect.fRight), SkDoubleToScalar(dRect.fBottom));
+}
+
+void SkDCurve::setCubicBounds(const SkPoint curve[4], SkScalar ,
+ double tStart, double tEnd, SkPathOpsBounds* bounds) {
+ SkDCubic dCurve;
+ dCurve.set(curve);
+ SkDRect dRect;
+ dRect.setBounds(dCurve, fCubic, tStart, tEnd);
+ bounds->setLTRB(SkDoubleToScalar(dRect.fLeft), SkDoubleToScalar(dRect.fTop),
+ SkDoubleToScalar(dRect.fRight), SkDoubleToScalar(dRect.fBottom));
+}
+
+void SkDCurve::setQuadBounds(const SkPoint curve[3], SkScalar ,
+ double tStart, double tEnd, SkPathOpsBounds* bounds) {
+ SkDQuad dCurve;
+ dCurve.set(curve);
+ SkDRect dRect;
+ dRect.setBounds(dCurve, fQuad, tStart, tEnd);
+ bounds->setLTRB(SkDoubleToScalar(dRect.fLeft), SkDoubleToScalar(dRect.fTop),
+ SkDoubleToScalar(dRect.fRight), SkDoubleToScalar(dRect.fBottom));
+}
+
+void SkDCurveSweep::setCurveHullSweep(SkPath::Verb verb) {
+ fOrdered = true;
+ fSweep[0] = fCurve[1] - fCurve[0];
+ if (SkPath::kLine_Verb == verb) {
+ fSweep[1] = fSweep[0];
+ fIsCurve = false;
+ return;
+ }
+ fSweep[1] = fCurve[2] - fCurve[0];
+ // OPTIMIZE: I do the following float check a lot -- probably need a
+ // central place for this val-is-small-compared-to-curve check
+ double maxVal = 0;
+ for (int index = 0; index <= SkPathOpsVerbToPoints(verb); ++index) {
+ maxVal = std::max(maxVal, std::max(SkTAbs(fCurve[index].fX),
+ SkTAbs(fCurve[index].fY)));
+ }
+ {
+ if (SkPath::kCubic_Verb != verb) {
+ if (roughly_zero_when_compared_to(fSweep[0].fX, maxVal)
+ && roughly_zero_when_compared_to(fSweep[0].fY, maxVal)) {
+ fSweep[0] = fSweep[1];
+ }
+ goto setIsCurve;
+ }
+ SkDVector thirdSweep = fCurve[3] - fCurve[0];
+ if (fSweep[0].fX == 0 && fSweep[0].fY == 0) {
+ fSweep[0] = fSweep[1];
+ fSweep[1] = thirdSweep;
+ if (roughly_zero_when_compared_to(fSweep[0].fX, maxVal)
+ && roughly_zero_when_compared_to(fSweep[0].fY, maxVal)) {
+ fSweep[0] = fSweep[1];
+ fCurve[1] = fCurve[3];
+ }
+ goto setIsCurve;
+ }
+ double s1x3 = fSweep[0].crossCheck(thirdSweep);
+ double s3x2 = thirdSweep.crossCheck(fSweep[1]);
+ if (s1x3 * s3x2 >= 0) { // if third vector is on or between first two vectors
+ goto setIsCurve;
+ }
+ double s2x1 = fSweep[1].crossCheck(fSweep[0]);
+ // FIXME: If the sweep of the cubic is greater than 180 degrees, we're in trouble
+ // probably such wide sweeps should be artificially subdivided earlier so that never happens
+ SkASSERT(s1x3 * s2x1 < 0 || s1x3 * s3x2 < 0);
+ if (s3x2 * s2x1 < 0) {
+ SkASSERT(s2x1 * s1x3 > 0);
+ fSweep[0] = fSweep[1];
+ fOrdered = false;
+ }
+ fSweep[1] = thirdSweep;
+ }
+setIsCurve:
+ fIsCurve = fSweep[0].crossCheck(fSweep[1]) != 0;
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsCurve.h b/gfx/skia/skia/src/pathops/SkPathOpsCurve.h
new file mode 100644
index 0000000000..1729dc27ab
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsCurve.h
@@ -0,0 +1,427 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsCurve_DEFINE
+#define SkPathOpsCurve_DEFINE
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkDebug.h"
+#include "src/pathops/SkIntersections.h"
+#include "src/pathops/SkPathOpsConic.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsLine.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkPathOpsQuad.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+struct SkPathOpsBounds;
+
+struct SkOpCurve {
+ SkPoint fPts[4];
+ SkScalar fWeight;
+ SkDEBUGCODE(SkPath::Verb fVerb);
+
+ const SkPoint& operator[](int n) const {
+ SkASSERT(n >= 0 && n <= SkPathOpsVerbToPoints(fVerb));
+ return fPts[n];
+ }
+
+ void dump() const;
+
+ void set(const SkDQuad& quad) {
+ for (int index = 0; index < SkDQuad::kPointCount; ++index) {
+ fPts[index] = quad[index].asSkPoint();
+ }
+ SkDEBUGCODE(fWeight = 1);
+ SkDEBUGCODE(fVerb = SkPath::kQuad_Verb);
+ }
+
+ void set(const SkDCubic& cubic) {
+ for (int index = 0; index < SkDCubic::kPointCount; ++index) {
+ fPts[index] = cubic[index].asSkPoint();
+ }
+ SkDEBUGCODE(fWeight = 1);
+ SkDEBUGCODE(fVerb = SkPath::kCubic_Verb);
+ }
+
+};
+
+struct SkDCurve {
+ union {
+ SkDLine fLine;
+ SkDQuad fQuad;
+ SkDConic fConic;
+ SkDCubic fCubic;
+ };
+ SkDEBUGCODE(SkPath::Verb fVerb);
+
+ const SkDPoint& operator[](int n) const {
+ SkASSERT(n >= 0 && n <= SkPathOpsVerbToPoints(fVerb));
+ return fCubic[n];
+ }
+
+ SkDPoint& operator[](int n) {
+ SkASSERT(n >= 0 && n <= SkPathOpsVerbToPoints(fVerb));
+ return fCubic[n];
+ }
+
+ SkDPoint conicTop(const SkPoint curve[3], SkScalar curveWeight,
+ double s, double e, double* topT);
+ SkDPoint cubicTop(const SkPoint curve[4], SkScalar , double s, double e, double* topT);
+ void dump() const;
+ void dumpID(int ) const;
+ SkDPoint lineTop(const SkPoint[2], SkScalar , double , double , double* topT);
+ double nearPoint(SkPath::Verb verb, const SkDPoint& xy, const SkDPoint& opp) const;
+ SkDPoint quadTop(const SkPoint curve[3], SkScalar , double s, double e, double* topT);
+
+ void setConicBounds(const SkPoint curve[3], SkScalar curveWeight,
+ double s, double e, SkPathOpsBounds* );
+ void setCubicBounds(const SkPoint curve[4], SkScalar ,
+ double s, double e, SkPathOpsBounds* );
+ void setQuadBounds(const SkPoint curve[3], SkScalar ,
+ double s, double e, SkPathOpsBounds*);
+};
+
+class SkDCurveSweep {
+public:
+ bool isCurve() const { return fIsCurve; }
+ bool isOrdered() const { return fOrdered; }
+ void setCurveHullSweep(SkPath::Verb verb);
+
+ SkDCurve fCurve;
+ SkDVector fSweep[2];
+private:
+ bool fIsCurve;
+ bool fOrdered; // cleared when a cubic's control point isn't between the sweep vectors
+
+};
+
+extern SkDPoint (SkDCurve::* const Top[])(const SkPoint curve[], SkScalar cWeight,
+ double tStart, double tEnd, double* topT);
+
+static SkDPoint dline_xy_at_t(const SkPoint a[2], SkScalar , double t) {
+ SkDLine line;
+ line.set(a);
+ return line.ptAtT(t);
+}
+
+static SkDPoint dquad_xy_at_t(const SkPoint a[3], SkScalar , double t) {
+ SkDQuad quad;
+ quad.set(a);
+ return quad.ptAtT(t);
+}
+
+static SkDPoint dconic_xy_at_t(const SkPoint a[3], SkScalar weight, double t) {
+ SkDConic conic;
+ conic.set(a, weight);
+ return conic.ptAtT(t);
+}
+
+static SkDPoint dcubic_xy_at_t(const SkPoint a[4], SkScalar , double t) {
+ SkDCubic cubic;
+ cubic.set(a);
+ return cubic.ptAtT(t);
+}
+
+static SkDPoint (* const CurveDPointAtT[])(const SkPoint[], SkScalar , double ) = {
+ nullptr,
+ dline_xy_at_t,
+ dquad_xy_at_t,
+ dconic_xy_at_t,
+ dcubic_xy_at_t
+};
+
+static SkDPoint ddline_xy_at_t(const SkDCurve& c, double t) {
+ return c.fLine.ptAtT(t);
+}
+
+static SkDPoint ddquad_xy_at_t(const SkDCurve& c, double t) {
+ return c.fQuad.ptAtT(t);
+}
+
+static SkDPoint ddconic_xy_at_t(const SkDCurve& c, double t) {
+ return c.fConic.ptAtT(t);
+}
+
+static SkDPoint ddcubic_xy_at_t(const SkDCurve& c, double t) {
+ return c.fCubic.ptAtT(t);
+}
+
+static SkDPoint (* const CurveDDPointAtT[])(const SkDCurve& , double ) = {
+ nullptr,
+ ddline_xy_at_t,
+ ddquad_xy_at_t,
+ ddconic_xy_at_t,
+ ddcubic_xy_at_t
+};
+
+static SkPoint fline_xy_at_t(const SkPoint a[2], SkScalar weight, double t) {
+ return dline_xy_at_t(a, weight, t).asSkPoint();
+}
+
+static SkPoint fquad_xy_at_t(const SkPoint a[3], SkScalar weight, double t) {
+ return dquad_xy_at_t(a, weight, t).asSkPoint();
+}
+
+static SkPoint fconic_xy_at_t(const SkPoint a[3], SkScalar weight, double t) {
+ return dconic_xy_at_t(a, weight, t).asSkPoint();
+}
+
+static SkPoint fcubic_xy_at_t(const SkPoint a[4], SkScalar weight, double t) {
+ return dcubic_xy_at_t(a, weight, t).asSkPoint();
+}
+
+static SkPoint (* const CurvePointAtT[])(const SkPoint[], SkScalar , double ) = {
+ nullptr,
+ fline_xy_at_t,
+ fquad_xy_at_t,
+ fconic_xy_at_t,
+ fcubic_xy_at_t
+};
+
+static SkDVector dline_dxdy_at_t(const SkPoint a[2], SkScalar , double ) {
+ SkDLine line;
+ line.set(a);
+ return line[1] - line[0];
+}
+
+static SkDVector dquad_dxdy_at_t(const SkPoint a[3], SkScalar , double t) {
+ SkDQuad quad;
+ quad.set(a);
+ return quad.dxdyAtT(t);
+}
+
+static SkDVector dconic_dxdy_at_t(const SkPoint a[3], SkScalar weight, double t) {
+ SkDConic conic;
+ conic.set(a, weight);
+ return conic.dxdyAtT(t);
+}
+
+static SkDVector dcubic_dxdy_at_t(const SkPoint a[4], SkScalar , double t) {
+ SkDCubic cubic;
+ cubic.set(a);
+ return cubic.dxdyAtT(t);
+}
+
+static SkDVector (* const CurveDSlopeAtT[])(const SkPoint[], SkScalar , double ) = {
+ nullptr,
+ dline_dxdy_at_t,
+ dquad_dxdy_at_t,
+ dconic_dxdy_at_t,
+ dcubic_dxdy_at_t
+};
+
+static SkDVector ddline_dxdy_at_t(const SkDCurve& c, double ) {
+ return c.fLine.fPts[1] - c.fLine.fPts[0];
+}
+
+static SkDVector ddquad_dxdy_at_t(const SkDCurve& c, double t) {
+ return c.fQuad.dxdyAtT(t);
+}
+
+static SkDVector ddconic_dxdy_at_t(const SkDCurve& c, double t) {
+ return c.fConic.dxdyAtT(t);
+}
+
+static SkDVector ddcubic_dxdy_at_t(const SkDCurve& c, double t) {
+ return c.fCubic.dxdyAtT(t);
+}
+
+static SkDVector (* const CurveDDSlopeAtT[])(const SkDCurve& , double ) = {
+ nullptr,
+ ddline_dxdy_at_t,
+ ddquad_dxdy_at_t,
+ ddconic_dxdy_at_t,
+ ddcubic_dxdy_at_t
+};
+
+static SkVector fline_dxdy_at_t(const SkPoint a[2], SkScalar , double ) {
+ return a[1] - a[0];
+}
+
+static SkVector fquad_dxdy_at_t(const SkPoint a[3], SkScalar weight, double t) {
+ return dquad_dxdy_at_t(a, weight, t).asSkVector();
+}
+
+static SkVector fconic_dxdy_at_t(const SkPoint a[3], SkScalar weight, double t) {
+ return dconic_dxdy_at_t(a, weight, t).asSkVector();
+}
+
+static SkVector fcubic_dxdy_at_t(const SkPoint a[4], SkScalar weight, double t) {
+ return dcubic_dxdy_at_t(a, weight, t).asSkVector();
+}
+
+static SkVector (* const CurveSlopeAtT[])(const SkPoint[], SkScalar , double ) = {
+ nullptr,
+ fline_dxdy_at_t,
+ fquad_dxdy_at_t,
+ fconic_dxdy_at_t,
+ fcubic_dxdy_at_t
+};
+
+static bool line_is_vertical(const SkPoint a[2], SkScalar , double startT, double endT) {
+ SkDLine line;
+ line.set(a);
+ SkDPoint dst[2] = { line.ptAtT(startT), line.ptAtT(endT) };
+ return AlmostEqualUlps(dst[0].fX, dst[1].fX);
+}
+
+static bool quad_is_vertical(const SkPoint a[3], SkScalar , double startT, double endT) {
+ SkDQuad quad;
+ quad.set(a);
+ SkDQuad dst = quad.subDivide(startT, endT);
+ return AlmostEqualUlps(dst[0].fX, dst[1].fX) && AlmostEqualUlps(dst[1].fX, dst[2].fX);
+}
+
+static bool conic_is_vertical(const SkPoint a[3], SkScalar weight, double startT, double endT) {
+ SkDConic conic;
+ conic.set(a, weight);
+ SkDConic dst = conic.subDivide(startT, endT);
+ return AlmostEqualUlps(dst[0].fX, dst[1].fX) && AlmostEqualUlps(dst[1].fX, dst[2].fX);
+}
+
+static bool cubic_is_vertical(const SkPoint a[4], SkScalar , double startT, double endT) {
+ SkDCubic cubic;
+ cubic.set(a);
+ SkDCubic dst = cubic.subDivide(startT, endT);
+ return AlmostEqualUlps(dst[0].fX, dst[1].fX) && AlmostEqualUlps(dst[1].fX, dst[2].fX)
+ && AlmostEqualUlps(dst[2].fX, dst[3].fX);
+}
+
+static bool (* const CurveIsVertical[])(const SkPoint[], SkScalar , double , double) = {
+ nullptr,
+ line_is_vertical,
+ quad_is_vertical,
+ conic_is_vertical,
+ cubic_is_vertical
+};
+
+static void line_intersect_ray(const SkPoint a[2], SkScalar , const SkDLine& ray,
+ SkIntersections* i) {
+ SkDLine line;
+ line.set(a);
+ i->intersectRay(line, ray);
+}
+
+static void quad_intersect_ray(const SkPoint a[3], SkScalar , const SkDLine& ray,
+ SkIntersections* i) {
+ SkDQuad quad;
+ quad.set(a);
+ i->intersectRay(quad, ray);
+}
+
+static void conic_intersect_ray(const SkPoint a[3], SkScalar weight, const SkDLine& ray,
+ SkIntersections* i) {
+ SkDConic conic;
+ conic.set(a, weight);
+ i->intersectRay(conic, ray);
+}
+
+static void cubic_intersect_ray(const SkPoint a[4], SkScalar , const SkDLine& ray,
+ SkIntersections* i) {
+ SkDCubic cubic;
+ cubic.set(a);
+ i->intersectRay(cubic, ray);
+}
+
+static void (* const CurveIntersectRay[])(const SkPoint[] , SkScalar , const SkDLine& ,
+ SkIntersections* ) = {
+ nullptr,
+ line_intersect_ray,
+ quad_intersect_ray,
+ conic_intersect_ray,
+ cubic_intersect_ray
+};
+
+static void dline_intersect_ray(const SkDCurve& c, const SkDLine& ray, SkIntersections* i) {
+ i->intersectRay(c.fLine, ray);
+}
+
+static void dquad_intersect_ray(const SkDCurve& c, const SkDLine& ray, SkIntersections* i) {
+ i->intersectRay(c.fQuad, ray);
+}
+
+static void dconic_intersect_ray(const SkDCurve& c, const SkDLine& ray, SkIntersections* i) {
+ i->intersectRay(c.fConic, ray);
+}
+
+static void dcubic_intersect_ray(const SkDCurve& c, const SkDLine& ray, SkIntersections* i) {
+ i->intersectRay(c.fCubic, ray);
+}
+
+static void (* const CurveDIntersectRay[])(const SkDCurve& , const SkDLine& , SkIntersections* ) = {
+ nullptr,
+ dline_intersect_ray,
+ dquad_intersect_ray,
+ dconic_intersect_ray,
+ dcubic_intersect_ray
+};
+
+static int line_intercept_h(const SkPoint a[2], SkScalar , SkScalar y, double* roots) {
+ if (a[0].fY == a[1].fY) {
+ return false;
+ }
+ SkDLine line;
+ roots[0] = SkIntersections::HorizontalIntercept(line.set(a), y);
+ return between(0, roots[0], 1);
+}
+
+static int line_intercept_v(const SkPoint a[2], SkScalar , SkScalar x, double* roots) {
+ if (a[0].fX == a[1].fX) {
+ return false;
+ }
+ SkDLine line;
+ roots[0] = SkIntersections::VerticalIntercept(line.set(a), x);
+ return between(0, roots[0], 1);
+}
+
+static int quad_intercept_h(const SkPoint a[2], SkScalar , SkScalar y, double* roots) {
+ SkDQuad quad;
+ return SkIntersections::HorizontalIntercept(quad.set(a), y, roots);
+}
+
+static int quad_intercept_v(const SkPoint a[2], SkScalar , SkScalar x, double* roots) {
+ SkDQuad quad;
+ return SkIntersections::VerticalIntercept(quad.set(a), x, roots);
+}
+
+static int conic_intercept_h(const SkPoint a[2], SkScalar w, SkScalar y, double* roots) {
+ SkDConic conic;
+ return SkIntersections::HorizontalIntercept(conic.set(a, w), y, roots);
+}
+
+static int conic_intercept_v(const SkPoint a[2], SkScalar w, SkScalar x, double* roots) {
+ SkDConic conic;
+ return SkIntersections::VerticalIntercept(conic.set(a, w), x, roots);
+}
+
+static int cubic_intercept_h(const SkPoint a[3], SkScalar , SkScalar y, double* roots) {
+ SkDCubic cubic;
+ return cubic.set(a).horizontalIntersect(y, roots);
+}
+
+static int cubic_intercept_v(const SkPoint a[3], SkScalar , SkScalar x, double* roots) {
+ SkDCubic cubic;
+ return cubic.set(a).verticalIntersect(x, roots);
+}
+
+static int (* const CurveIntercept[])(const SkPoint[] , SkScalar , SkScalar , double* ) = {
+ nullptr,
+ nullptr,
+ line_intercept_h,
+ line_intercept_v,
+ quad_intercept_h,
+ quad_intercept_v,
+ conic_intercept_h,
+ conic_intercept_v,
+ cubic_intercept_h,
+ cubic_intercept_v,
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsDebug.cpp b/gfx/skia/skia/src/pathops/SkPathOpsDebug.cpp
new file mode 100644
index 0000000000..9213df0c3f
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsDebug.cpp
@@ -0,0 +1,3096 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pathops/SkPathOpsDebug.h"
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPathTypes.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkString.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkMath.h"
+#include "include/private/base/SkMutex.h"
+#include "src/core/SkPathPriv.h"
+#include "src/pathops/SkIntersections.h"
+#include "src/pathops/SkOpAngle.h"
+#include "src/pathops/SkOpCoincidence.h"
+#include "src/pathops/SkOpSegment.h"
+#include "src/pathops/SkOpSpan.h"
+#include "src/pathops/SkPathOpsConic.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkPathOpsQuad.h"
+#include "src/pathops/SkPathOpsRect.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+#include <cstdint>
+#include <cstring>
+
+#if DEBUG_DUMP_VERIFY
+bool SkPathOpsDebug::gDumpOp; // set to true to write op to file before a crash
+bool SkPathOpsDebug::gVerifyOp; // set to true to compare result against regions
+#endif
+
+bool SkPathOpsDebug::gRunFail; // set to true to check for success on tests known to fail
+bool SkPathOpsDebug::gVeryVerbose; // set to true to run extensive checking tests
+
+#define FAIL_IF_COIN(cond, coin) \
+ do { if (cond) log->record(SkPathOpsDebug::kFail_Glitch, coin); } while (false)
+
+#undef FAIL_WITH_NULL_IF
+#define FAIL_WITH_NULL_IF(cond, span) \
+ do { if (cond) log->record(SkPathOpsDebug::kFail_Glitch, span); } while (false)
+
+#define RETURN_FALSE_IF(cond, span) \
+ do { if (cond) log->record(SkPathOpsDebug::kReturnFalse_Glitch, span); \
+ } while (false)
+
+#if DEBUG_SORT
+int SkPathOpsDebug::gSortCountDefault = SK_MaxS32;
+int SkPathOpsDebug::gSortCount;
+#endif
+
+#if DEBUG_ACTIVE_OP
+const char* SkPathOpsDebug::kPathOpStr[] = {"diff", "sect", "union", "xor", "rdiff"};
+#endif
+
+#if defined SK_DEBUG || !FORCE_RELEASE
+
+int SkPathOpsDebug::gContourID = 0;
+int SkPathOpsDebug::gSegmentID = 0;
+
+bool SkPathOpsDebug::ChaseContains(const SkTDArray<SkOpSpanBase* >& chaseArray,
+ const SkOpSpanBase* span) {
+ for (int index = 0; index < chaseArray.size(); ++index) {
+ const SkOpSpanBase* entry = chaseArray[index];
+ if (entry == span) {
+ return true;
+ }
+ }
+ return false;
+}
+#endif
+
+#if DEBUG_ACTIVE_SPANS
+SkString SkPathOpsDebug::gActiveSpans;
+#endif
+
+#if DEBUG_COIN
+class SkCoincidentSpans;
+
+SkPathOpsDebug::CoinDict SkPathOpsDebug::gCoinSumChangedDict;
+SkPathOpsDebug::CoinDict SkPathOpsDebug::gCoinSumVisitedDict;
+
+static const int kGlitchType_Count = SkPathOpsDebug::kUnalignedTail_Glitch + 1;
+
+struct SpanGlitch {
+ const SkOpSpanBase* fBase;
+ const SkOpSpanBase* fSuspect;
+ const SkOpSegment* fSegment;
+ const SkOpSegment* fOppSegment;
+ const SkOpPtT* fCoinSpan;
+ const SkOpPtT* fEndSpan;
+ const SkOpPtT* fOppSpan;
+ const SkOpPtT* fOppEndSpan;
+ double fStartT;
+ double fEndT;
+ double fOppStartT;
+ double fOppEndT;
+ SkPoint fPt;
+ SkPathOpsDebug::GlitchType fType;
+
+ void dumpType() const;
+};
+
+struct SkPathOpsDebug::GlitchLog {
+ void init(const SkOpGlobalState* state) {
+ fGlobalState = state;
+ }
+
+ SpanGlitch* recordCommon(GlitchType type) {
+ SpanGlitch* glitch = fGlitches.push();
+ glitch->fBase = nullptr;
+ glitch->fSuspect = nullptr;
+ glitch->fSegment = nullptr;
+ glitch->fOppSegment = nullptr;
+ glitch->fCoinSpan = nullptr;
+ glitch->fEndSpan = nullptr;
+ glitch->fOppSpan = nullptr;
+ glitch->fOppEndSpan = nullptr;
+ glitch->fStartT = SK_ScalarNaN;
+ glitch->fEndT = SK_ScalarNaN;
+ glitch->fOppStartT = SK_ScalarNaN;
+ glitch->fOppEndT = SK_ScalarNaN;
+ glitch->fPt = { SK_ScalarNaN, SK_ScalarNaN };
+ glitch->fType = type;
+ return glitch;
+ }
+
+ void record(GlitchType type, const SkOpSpanBase* base,
+ const SkOpSpanBase* suspect = NULL) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fBase = base;
+ glitch->fSuspect = suspect;
+ }
+
+ void record(GlitchType type, const SkOpSpanBase* base,
+ const SkOpPtT* ptT) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fBase = base;
+ glitch->fCoinSpan = ptT;
+ }
+
+ void record(GlitchType type, const SkCoincidentSpans* coin,
+ const SkCoincidentSpans* opp = NULL) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fCoinSpan = coin->coinPtTStart();
+ glitch->fEndSpan = coin->coinPtTEnd();
+ if (opp) {
+ glitch->fOppSpan = opp->coinPtTStart();
+ glitch->fOppEndSpan = opp->coinPtTEnd();
+ }
+ }
+
+ void record(GlitchType type, const SkOpSpanBase* base,
+ const SkOpSegment* seg, double t, SkPoint pt) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fBase = base;
+ glitch->fSegment = seg;
+ glitch->fStartT = t;
+ glitch->fPt = pt;
+ }
+
+ void record(GlitchType type, const SkOpSpanBase* base, double t,
+ SkPoint pt) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fBase = base;
+ glitch->fStartT = t;
+ glitch->fPt = pt;
+ }
+
+ void record(GlitchType type, const SkCoincidentSpans* coin,
+ const SkOpPtT* coinSpan, const SkOpPtT* endSpan) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fCoinSpan = coin->coinPtTStart();
+ glitch->fEndSpan = coin->coinPtTEnd();
+ glitch->fEndSpan = endSpan;
+ glitch->fOppSpan = coinSpan;
+ glitch->fOppEndSpan = endSpan;
+ }
+
+ void record(GlitchType type, const SkCoincidentSpans* coin,
+ const SkOpSpanBase* base) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fBase = base;
+ glitch->fCoinSpan = coin->coinPtTStart();
+ glitch->fEndSpan = coin->coinPtTEnd();
+ }
+
+ void record(GlitchType type, const SkOpPtT* ptTS, const SkOpPtT* ptTE,
+ const SkOpPtT* oPtTS, const SkOpPtT* oPtTE) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fCoinSpan = ptTS;
+ glitch->fEndSpan = ptTE;
+ glitch->fOppSpan = oPtTS;
+ glitch->fOppEndSpan = oPtTE;
+ }
+
+ void record(GlitchType type, const SkOpSegment* seg, double startT,
+ double endT, const SkOpSegment* oppSeg, double oppStartT, double oppEndT) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fSegment = seg;
+ glitch->fStartT = startT;
+ glitch->fEndT = endT;
+ glitch->fOppSegment = oppSeg;
+ glitch->fOppStartT = oppStartT;
+ glitch->fOppEndT = oppEndT;
+ }
+
+ void record(GlitchType type, const SkOpSegment* seg,
+ const SkOpSpan* span) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fSegment = seg;
+ glitch->fBase = span;
+ }
+
+ void record(GlitchType type, double t, const SkOpSpanBase* span) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fStartT = t;
+ glitch->fBase = span;
+ }
+
+ void record(GlitchType type, const SkOpSegment* seg) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fSegment = seg;
+ }
+
+ void record(GlitchType type, const SkCoincidentSpans* coin,
+ const SkOpPtT* ptT) {
+ SpanGlitch* glitch = recordCommon(type);
+ glitch->fCoinSpan = coin->coinPtTStart();
+ glitch->fEndSpan = ptT;
+ }
+
+ SkTDArray<SpanGlitch> fGlitches;
+ const SkOpGlobalState* fGlobalState;
+};
+
+
+void SkPathOpsDebug::CoinDict::add(const SkPathOpsDebug::CoinDict& dict) {
+ int count = dict.fDict.count();
+ for (int index = 0; index < count; ++index) {
+ this->add(dict.fDict[index]);
+ }
+}
+
+void SkPathOpsDebug::CoinDict::add(const CoinDictEntry& key) {
+ int count = fDict.count();
+ for (int index = 0; index < count; ++index) {
+ CoinDictEntry* entry = &fDict[index];
+ if (entry->fIteration == key.fIteration && entry->fLineNumber == key.fLineNumber) {
+ SkASSERT(!strcmp(entry->fFunctionName, key.fFunctionName));
+ if (entry->fGlitchType == kUninitialized_Glitch) {
+ entry->fGlitchType = key.fGlitchType;
+ }
+ return;
+ }
+ }
+ *fDict.append() = key;
+}
+
+#endif
+
+#if DEBUG_COIN
+static void missing_coincidence(SkPathOpsDebug::GlitchLog* glitches, const SkOpContourHead* contourList) {
+ const SkOpContour* contour = contourList;
+ // bool result = false;
+ do {
+ /* result |= */ contour->debugMissingCoincidence(glitches);
+ } while ((contour = contour->next()));
+ return;
+}
+
+static void move_multiples(SkPathOpsDebug::GlitchLog* glitches, const SkOpContourHead* contourList) {
+ const SkOpContour* contour = contourList;
+ do {
+ contour->debugMoveMultiples(glitches);
+ } while ((contour = contour->next()));
+ return;
+}
+
+static void move_nearby(SkPathOpsDebug::GlitchLog* glitches, const SkOpContourHead* contourList) {
+ const SkOpContour* contour = contourList;
+ do {
+ contour->debugMoveNearby(glitches);
+ } while ((contour = contour->next()));
+}
+
+
+#endif
+
+#if DEBUG_COIN
+void SkOpGlobalState::debugAddToCoinChangedDict() {
+
+#if DEBUG_COINCIDENCE
+ SkPathOpsDebug::CheckHealth(fContourHead);
+#endif
+ // see if next coincident operation makes a change; if so, record it
+ SkPathOpsDebug::GlitchLog glitches;
+ const char* funcName = fCoinDictEntry.fFunctionName;
+ if (!strcmp("calc_angles", funcName)) {
+ //
+ } else if (!strcmp("missing_coincidence", funcName)) {
+ missing_coincidence(&glitches, fContourHead);
+ } else if (!strcmp("move_multiples", funcName)) {
+ move_multiples(&glitches, fContourHead);
+ } else if (!strcmp("move_nearby", funcName)) {
+ move_nearby(&glitches, fContourHead);
+ } else if (!strcmp("addExpanded", funcName)) {
+ fCoincidence->debugAddExpanded(&glitches);
+ } else if (!strcmp("addMissing", funcName)) {
+ bool added;
+ fCoincidence->debugAddMissing(&glitches, &added);
+ } else if (!strcmp("addEndMovedSpans", funcName)) {
+ fCoincidence->debugAddEndMovedSpans(&glitches);
+ } else if (!strcmp("correctEnds", funcName)) {
+ fCoincidence->debugCorrectEnds(&glitches);
+ } else if (!strcmp("expand", funcName)) {
+ fCoincidence->debugExpand(&glitches);
+ } else if (!strcmp("findOverlaps", funcName)) {
+ //
+ } else if (!strcmp("mark", funcName)) {
+ fCoincidence->debugMark(&glitches);
+ } else if (!strcmp("apply", funcName)) {
+ //
+ } else {
+ SkASSERT(0); // add missing case
+ }
+ if (glitches.fGlitches.count()) {
+ fCoinDictEntry.fGlitchType = glitches.fGlitches[0].fType;
+ }
+ fCoinChangedDict.add(fCoinDictEntry);
+}
+#endif
+
+void SkPathOpsDebug::ShowActiveSpans(SkOpContourHead* contourList) {
+#if DEBUG_ACTIVE_SPANS
+ SkString str;
+ SkOpContour* contour = contourList;
+ do {
+ contour->debugShowActiveSpans(&str);
+ } while ((contour = contour->next()));
+ if (!gActiveSpans.equals(str)) {
+ const char* s = str.c_str();
+ const char* end;
+ while ((end = strchr(s, '\n'))) {
+ SkDebugf("%.*s", (int) (end - s + 1), s);
+ s = end + 1;
+ }
+ gActiveSpans.set(str);
+ }
+#endif
+}
+
+#if DEBUG_COINCIDENCE || DEBUG_COIN
+void SkPathOpsDebug::CheckHealth(SkOpContourHead* contourList) {
+#if DEBUG_COINCIDENCE
+ contourList->globalState()->debugSetCheckHealth(true);
+#endif
+#if DEBUG_COIN
+ GlitchLog glitches;
+ const SkOpContour* contour = contourList;
+ const SkOpCoincidence* coincidence = contour->globalState()->coincidence();
+ coincidence->debugCheckValid(&glitches); // don't call validate; spans may be inconsistent
+ do {
+ contour->debugCheckHealth(&glitches);
+ contour->debugMissingCoincidence(&glitches);
+ } while ((contour = contour->next()));
+ bool added;
+ coincidence->debugAddMissing(&glitches, &added);
+ coincidence->debugExpand(&glitches);
+ coincidence->debugAddExpanded(&glitches);
+ coincidence->debugMark(&glitches);
+ unsigned mask = 0;
+ for (int index = 0; index < glitches.fGlitches.count(); ++index) {
+ const SpanGlitch& glitch = glitches.fGlitches[index];
+ mask |= 1 << glitch.fType;
+ }
+ for (int index = 0; index < kGlitchType_Count; ++index) {
+ SkDebugf(mask & (1 << index) ? "x" : "-");
+ }
+ SkDebugf(" %s\n", contourList->globalState()->debugCoinDictEntry().fFunctionName);
+ for (int index = 0; index < glitches.fGlitches.count(); ++index) {
+ const SpanGlitch& glitch = glitches.fGlitches[index];
+ SkDebugf("%02d: ", index);
+ if (glitch.fBase) {
+ SkDebugf(" seg/base=%d/%d", glitch.fBase->segment()->debugID(),
+ glitch.fBase->debugID());
+ }
+ if (glitch.fSuspect) {
+ SkDebugf(" seg/base=%d/%d", glitch.fSuspect->segment()->debugID(),
+ glitch.fSuspect->debugID());
+ }
+ if (glitch.fSegment) {
+ SkDebugf(" segment=%d", glitch.fSegment->debugID());
+ }
+ if (glitch.fCoinSpan) {
+ SkDebugf(" coinSeg/Span/PtT=%d/%d/%d", glitch.fCoinSpan->segment()->debugID(),
+ glitch.fCoinSpan->span()->debugID(), glitch.fCoinSpan->debugID());
+ }
+ if (glitch.fEndSpan) {
+ SkDebugf(" endSpan=%d", glitch.fEndSpan->debugID());
+ }
+ if (glitch.fOppSpan) {
+ SkDebugf(" oppSeg/Span/PtT=%d/%d/%d", glitch.fOppSpan->segment()->debugID(),
+ glitch.fOppSpan->span()->debugID(), glitch.fOppSpan->debugID());
+ }
+ if (glitch.fOppEndSpan) {
+ SkDebugf(" oppEndSpan=%d", glitch.fOppEndSpan->debugID());
+ }
+ if (!SkScalarIsNaN(glitch.fStartT)) {
+ SkDebugf(" startT=%g", glitch.fStartT);
+ }
+ if (!SkScalarIsNaN(glitch.fEndT)) {
+ SkDebugf(" endT=%g", glitch.fEndT);
+ }
+ if (glitch.fOppSegment) {
+ SkDebugf(" segment=%d", glitch.fOppSegment->debugID());
+ }
+ if (!SkScalarIsNaN(glitch.fOppStartT)) {
+ SkDebugf(" oppStartT=%g", glitch.fOppStartT);
+ }
+ if (!SkScalarIsNaN(glitch.fOppEndT)) {
+ SkDebugf(" oppEndT=%g", glitch.fOppEndT);
+ }
+ if (!SkScalarIsNaN(glitch.fPt.fX) || !SkScalarIsNaN(glitch.fPt.fY)) {
+ SkDebugf(" pt=%g,%g", glitch.fPt.fX, glitch.fPt.fY);
+ }
+ DumpGlitchType(glitch.fType);
+ SkDebugf("\n");
+ }
+#if DEBUG_COINCIDENCE
+ contourList->globalState()->debugSetCheckHealth(false);
+#endif
+#if 01 && DEBUG_ACTIVE_SPANS
+// SkDebugf("active after %s:\n", id);
+ ShowActiveSpans(contourList);
+#endif
+#endif
+}
+#endif
+
+#if DEBUG_COIN
+void SkPathOpsDebug::DumpGlitchType(GlitchType glitchType) {
+ switch (glitchType) {
+ case kAddCorruptCoin_Glitch: SkDebugf(" AddCorruptCoin"); break;
+ case kAddExpandedCoin_Glitch: SkDebugf(" AddExpandedCoin"); break;
+ case kAddExpandedFail_Glitch: SkDebugf(" AddExpandedFail"); break;
+ case kAddIfCollapsed_Glitch: SkDebugf(" AddIfCollapsed"); break;
+ case kAddIfMissingCoin_Glitch: SkDebugf(" AddIfMissingCoin"); break;
+ case kAddMissingCoin_Glitch: SkDebugf(" AddMissingCoin"); break;
+ case kAddMissingExtend_Glitch: SkDebugf(" AddMissingExtend"); break;
+ case kAddOrOverlap_Glitch: SkDebugf(" AAddOrOverlap"); break;
+ case kCollapsedCoin_Glitch: SkDebugf(" CollapsedCoin"); break;
+ case kCollapsedDone_Glitch: SkDebugf(" CollapsedDone"); break;
+ case kCollapsedOppValue_Glitch: SkDebugf(" CollapsedOppValue"); break;
+ case kCollapsedSpan_Glitch: SkDebugf(" CollapsedSpan"); break;
+ case kCollapsedWindValue_Glitch: SkDebugf(" CollapsedWindValue"); break;
+ case kCorrectEnd_Glitch: SkDebugf(" CorrectEnd"); break;
+ case kDeletedCoin_Glitch: SkDebugf(" DeletedCoin"); break;
+ case kExpandCoin_Glitch: SkDebugf(" ExpandCoin"); break;
+ case kFail_Glitch: SkDebugf(" Fail"); break;
+ case kMarkCoinEnd_Glitch: SkDebugf(" MarkCoinEnd"); break;
+ case kMarkCoinInsert_Glitch: SkDebugf(" MarkCoinInsert"); break;
+ case kMarkCoinMissing_Glitch: SkDebugf(" MarkCoinMissing"); break;
+ case kMarkCoinStart_Glitch: SkDebugf(" MarkCoinStart"); break;
+ case kMergeMatches_Glitch: SkDebugf(" MergeMatches"); break;
+ case kMissingCoin_Glitch: SkDebugf(" MissingCoin"); break;
+ case kMissingDone_Glitch: SkDebugf(" MissingDone"); break;
+ case kMissingIntersection_Glitch: SkDebugf(" MissingIntersection"); break;
+ case kMoveMultiple_Glitch: SkDebugf(" MoveMultiple"); break;
+ case kMoveNearbyClearAll_Glitch: SkDebugf(" MoveNearbyClearAll"); break;
+ case kMoveNearbyClearAll2_Glitch: SkDebugf(" MoveNearbyClearAll2"); break;
+ case kMoveNearbyMerge_Glitch: SkDebugf(" MoveNearbyMerge"); break;
+ case kMoveNearbyMergeFinal_Glitch: SkDebugf(" MoveNearbyMergeFinal"); break;
+ case kMoveNearbyRelease_Glitch: SkDebugf(" MoveNearbyRelease"); break;
+ case kMoveNearbyReleaseFinal_Glitch: SkDebugf(" MoveNearbyReleaseFinal"); break;
+ case kReleasedSpan_Glitch: SkDebugf(" ReleasedSpan"); break;
+ case kReturnFalse_Glitch: SkDebugf(" ReturnFalse"); break;
+ case kUnaligned_Glitch: SkDebugf(" Unaligned"); break;
+ case kUnalignedHead_Glitch: SkDebugf(" UnalignedHead"); break;
+ case kUnalignedTail_Glitch: SkDebugf(" UnalignedTail"); break;
+ case kUninitialized_Glitch: break;
+ default: SkASSERT(0);
+ }
+}
+#endif
+
+#if defined SK_DEBUG || !FORCE_RELEASE
+void SkPathOpsDebug::MathematicaIze(char* str, size_t bufferLen) {
+ size_t len = strlen(str);
+ bool num = false;
+ for (size_t idx = 0; idx < len; ++idx) {
+ if (num && str[idx] == 'e') {
+ if (len + 2 >= bufferLen) {
+ return;
+ }
+ memmove(&str[idx + 2], &str[idx + 1], len - idx);
+ str[idx] = '*';
+ str[idx + 1] = '^';
+ ++len;
+ }
+ num = str[idx] >= '0' && str[idx] <= '9';
+ }
+}
+
+bool SkPathOpsDebug::ValidWind(int wind) {
+ return wind > SK_MinS32 + 0xFFFF && wind < SK_MaxS32 - 0xFFFF;
+}
+
+void SkPathOpsDebug::WindingPrintf(int wind) {
+ if (wind == SK_MinS32) {
+ SkDebugf("?");
+ } else {
+ SkDebugf("%d", wind);
+ }
+}
+#endif // defined SK_DEBUG || !FORCE_RELEASE
+
+
+static void show_function_header(const char* functionName) {
+ SkDebugf("\nstatic void %s(skiatest::Reporter* reporter, const char* filename) {\n", functionName);
+ if (strcmp("skphealth_com76", functionName) == 0) {
+ SkDebugf("found it\n");
+ }
+}
+
+static const char* gOpStrs[] = {
+ "kDifference_SkPathOp",
+ "kIntersect_SkPathOp",
+ "kUnion_SkPathOp",
+ "kXOR_PathOp",
+ "kReverseDifference_SkPathOp",
+};
+
+const char* SkPathOpsDebug::OpStr(SkPathOp op) {
+ return gOpStrs[op];
+}
+
+static void show_op(SkPathOp op, const char* pathOne, const char* pathTwo) {
+ SkDebugf(" testPathOp(reporter, %s, %s, %s, filename);\n", pathOne, pathTwo, gOpStrs[op]);
+ SkDebugf("}\n");
+}
+
+void SkPathOpsDebug::ShowPath(const SkPath& a, const SkPath& b, SkPathOp shapeOp,
+ const char* testName) {
+ static SkMutex& mutex = *(new SkMutex);
+
+ SkAutoMutexExclusive ac(mutex);
+ show_function_header(testName);
+ ShowOnePath(a, "path", true);
+ ShowOnePath(b, "pathB", true);
+ show_op(shapeOp, "path", "pathB");
+}
+
+#if DEBUG_COIN
+
+void SkOpGlobalState::debugAddToGlobalCoinDicts() {
+ static SkMutex& mutex = *(new SkMutex);
+ SkAutoMutexExclusive ac(mutex);
+ SkPathOpsDebug::gCoinSumChangedDict.add(fCoinChangedDict);
+ SkPathOpsDebug::gCoinSumVisitedDict.add(fCoinVisitedDict);
+}
+
+#endif
+
+#if DEBUG_T_SECT_LOOP_COUNT
+void SkOpGlobalState::debugAddLoopCount(SkIntersections* i, const SkIntersectionHelper& wt,
+ const SkIntersectionHelper& wn) {
+ for (int index = 0; index < (int) std::size(fDebugLoopCount); ++index) {
+ SkIntersections::DebugLoop looper = (SkIntersections::DebugLoop) index;
+ if (fDebugLoopCount[index] >= i->debugLoopCount(looper)) {
+ continue;
+ }
+ fDebugLoopCount[index] = i->debugLoopCount(looper);
+ fDebugWorstVerb[index * 2] = wt.segment()->verb();
+ fDebugWorstVerb[index * 2 + 1] = wn.segment()->verb();
+ sk_bzero(&fDebugWorstPts[index * 8], sizeof(SkPoint) * 8);
+ memcpy(&fDebugWorstPts[index * 2 * 4], wt.pts(),
+ (SkPathOpsVerbToPoints(wt.segment()->verb()) + 1) * sizeof(SkPoint));
+ memcpy(&fDebugWorstPts[(index * 2 + 1) * 4], wn.pts(),
+ (SkPathOpsVerbToPoints(wn.segment()->verb()) + 1) * sizeof(SkPoint));
+ fDebugWorstWeight[index * 2] = wt.weight();
+ fDebugWorstWeight[index * 2 + 1] = wn.weight();
+ }
+ i->debugResetLoopCount();
+}
+
+void SkOpGlobalState::debugDoYourWorst(SkOpGlobalState* local) {
+ for (int index = 0; index < (int) std::size(fDebugLoopCount); ++index) {
+ if (fDebugLoopCount[index] >= local->fDebugLoopCount[index]) {
+ continue;
+ }
+ fDebugLoopCount[index] = local->fDebugLoopCount[index];
+ fDebugWorstVerb[index * 2] = local->fDebugWorstVerb[index * 2];
+ fDebugWorstVerb[index * 2 + 1] = local->fDebugWorstVerb[index * 2 + 1];
+ memcpy(&fDebugWorstPts[index * 2 * 4], &local->fDebugWorstPts[index * 2 * 4],
+ sizeof(SkPoint) * 8);
+ fDebugWorstWeight[index * 2] = local->fDebugWorstWeight[index * 2];
+ fDebugWorstWeight[index * 2 + 1] = local->fDebugWorstWeight[index * 2 + 1];
+ }
+ local->debugResetLoopCounts();
+}
+
+static void dump_curve(SkPath::Verb verb, const SkPoint& pts, float weight) {
+ if (!verb) {
+ return;
+ }
+ const char* verbs[] = { "", "line", "quad", "conic", "cubic" };
+ SkDebugf("%s: {{", verbs[verb]);
+ int ptCount = SkPathOpsVerbToPoints(verb);
+ for (int index = 0; index <= ptCount; ++index) {
+ SkDPoint::Dump((&pts)[index]);
+ if (index < ptCount - 1) {
+ SkDebugf(", ");
+ }
+ }
+ SkDebugf("}");
+ if (weight != 1) {
+ SkDebugf(", ");
+ if (weight == floorf(weight)) {
+ SkDebugf("%.0f", weight);
+ } else {
+ SkDebugf("%1.9gf", weight);
+ }
+ }
+ SkDebugf("}\n");
+}
+
+void SkOpGlobalState::debugLoopReport() {
+ const char* loops[] = { "iterations", "coinChecks", "perpCalcs" };
+ SkDebugf("\n");
+ for (int index = 0; index < (int) std::size(fDebugLoopCount); ++index) {
+ SkDebugf("%s: %d\n", loops[index], fDebugLoopCount[index]);
+ dump_curve(fDebugWorstVerb[index * 2], fDebugWorstPts[index * 2 * 4],
+ fDebugWorstWeight[index * 2]);
+ dump_curve(fDebugWorstVerb[index * 2 + 1], fDebugWorstPts[(index * 2 + 1) * 4],
+ fDebugWorstWeight[index * 2 + 1]);
+ }
+}
+
+void SkOpGlobalState::debugResetLoopCounts() {
+ sk_bzero(fDebugLoopCount, sizeof(fDebugLoopCount));
+ sk_bzero(fDebugWorstVerb, sizeof(fDebugWorstVerb));
+ sk_bzero(fDebugWorstPts, sizeof(fDebugWorstPts));
+ sk_bzero(fDebugWorstWeight, sizeof(fDebugWorstWeight));
+}
+#endif
+
+bool SkOpGlobalState::DebugRunFail() {
+ return SkPathOpsDebug::gRunFail;
+}
+
+// this is const so it can be called by const methods that overwise don't alter state
+#if DEBUG_VALIDATE || DEBUG_COIN
+void SkOpGlobalState::debugSetPhase(const char* funcName DEBUG_COIN_DECLARE_PARAMS()) const {
+ auto writable = const_cast<SkOpGlobalState*>(this);
+#if DEBUG_VALIDATE
+ writable->setPhase(phase);
+#endif
+#if DEBUG_COIN
+ SkPathOpsDebug::CoinDictEntry* entry = &writable->fCoinDictEntry;
+ writable->fPreviousFuncName = entry->fFunctionName;
+ entry->fIteration = iteration;
+ entry->fLineNumber = lineNo;
+ entry->fGlitchType = SkPathOpsDebug::kUninitialized_Glitch;
+ entry->fFunctionName = funcName;
+ writable->fCoinVisitedDict.add(*entry);
+ writable->debugAddToCoinChangedDict();
+#endif
+}
+#endif
+
+#if DEBUG_T_SECT_LOOP_COUNT
+void SkIntersections::debugBumpLoopCount(DebugLoop index) {
+ fDebugLoopCount[index]++;
+}
+
+int SkIntersections::debugLoopCount(DebugLoop index) const {
+ return fDebugLoopCount[index];
+}
+
+void SkIntersections::debugResetLoopCount() {
+ sk_bzero(fDebugLoopCount, sizeof(fDebugLoopCount));
+}
+#endif
+
+SkDCubic SkDQuad::debugToCubic() const {
+ SkDCubic cubic;
+ cubic[0] = fPts[0];
+ cubic[2] = fPts[1];
+ cubic[3] = fPts[2];
+ cubic[1].fX = (cubic[0].fX + cubic[2].fX * 2) / 3;
+ cubic[1].fY = (cubic[0].fY + cubic[2].fY * 2) / 3;
+ cubic[2].fX = (cubic[3].fX + cubic[2].fX * 2) / 3;
+ cubic[2].fY = (cubic[3].fY + cubic[2].fY * 2) / 3;
+ return cubic;
+}
+
+void SkDQuad::debugSet(const SkDPoint* pts) {
+ memcpy(fPts, pts, sizeof(fPts));
+ SkDEBUGCODE(fDebugGlobalState = nullptr);
+}
+
+void SkDCubic::debugSet(const SkDPoint* pts) {
+ memcpy(fPts, pts, sizeof(fPts));
+ SkDEBUGCODE(fDebugGlobalState = nullptr);
+}
+
+void SkDConic::debugSet(const SkDPoint* pts, SkScalar weight) {
+ fPts.debugSet(pts);
+ fWeight = weight;
+}
+
+void SkDRect::debugInit() {
+ fLeft = fTop = fRight = fBottom = SK_ScalarNaN;
+}
+
+#if DEBUG_COIN
+// commented-out lines keep this in sync with addT()
+ const SkOpPtT* SkOpSegment::debugAddT(double t, SkPathOpsDebug::GlitchLog* log) const {
+ debugValidate();
+ SkPoint pt = this->ptAtT(t);
+ const SkOpSpanBase* span = &fHead;
+ do {
+ const SkOpPtT* result = span->ptT();
+ if (t == result->fT || this->match(result, this, t, pt)) {
+// span->bumpSpanAdds();
+ return result;
+ }
+ if (t < result->fT) {
+ const SkOpSpan* prev = result->span()->prev();
+ FAIL_WITH_NULL_IF(!prev, span);
+ // marks in global state that new op span has been allocated
+ this->globalState()->setAllocatedOpSpan();
+// span->init(this, prev, t, pt);
+ this->debugValidate();
+// #if DEBUG_ADD_T
+// SkDebugf("%s insert t=%1.9g segID=%d spanID=%d\n", __FUNCTION__, t,
+// span->segment()->debugID(), span->debugID());
+// #endif
+// span->bumpSpanAdds();
+ return nullptr;
+ }
+ FAIL_WITH_NULL_IF(span != &fTail, span);
+ } while ((span = span->upCast()->next()));
+ SkASSERT(0);
+ return nullptr; // we never get here, but need this to satisfy compiler
+}
+#endif
+
+#if DEBUG_ANGLE
+void SkOpSegment::debugCheckAngleCoin() const {
+ const SkOpSpanBase* base = &fHead;
+ const SkOpSpan* span;
+ do {
+ const SkOpAngle* angle = base->fromAngle();
+ if (angle && angle->debugCheckCoincidence()) {
+ angle->debugCheckNearCoincidence();
+ }
+ if (base->final()) {
+ break;
+ }
+ span = base->upCast();
+ angle = span->toAngle();
+ if (angle && angle->debugCheckCoincidence()) {
+ angle->debugCheckNearCoincidence();
+ }
+ } while ((base = span->next()));
+}
+#endif
+
+#if DEBUG_COIN
+// this mimics the order of the checks in handle coincidence
+void SkOpSegment::debugCheckHealth(SkPathOpsDebug::GlitchLog* glitches) const {
+ debugMoveMultiples(glitches);
+ debugMoveNearby(glitches);
+ debugMissingCoincidence(glitches);
+}
+
+// commented-out lines keep this in sync with clearAll()
+void SkOpSegment::debugClearAll(SkPathOpsDebug::GlitchLog* glitches) const {
+ const SkOpSpan* span = &fHead;
+ do {
+ this->debugClearOne(span, glitches);
+ } while ((span = span->next()->upCastable()));
+ this->globalState()->coincidence()->debugRelease(glitches, this);
+}
+
+// commented-out lines keep this in sync with clearOne()
+void SkOpSegment::debugClearOne(const SkOpSpan* span, SkPathOpsDebug::GlitchLog* glitches) const {
+ if (span->windValue()) glitches->record(SkPathOpsDebug::kCollapsedWindValue_Glitch, span);
+ if (span->oppValue()) glitches->record(SkPathOpsDebug::kCollapsedOppValue_Glitch, span);
+ if (!span->done()) glitches->record(SkPathOpsDebug::kCollapsedDone_Glitch, span);
+}
+#endif
+
+SkOpAngle* SkOpSegment::debugLastAngle() {
+ SkOpAngle* result = nullptr;
+ SkOpSpan* span = this->head();
+ do {
+ if (span->toAngle()) {
+ SkASSERT(!result);
+ result = span->toAngle();
+ }
+ } while ((span = span->next()->upCastable()));
+ SkASSERT(result);
+ return result;
+}
+
+#if DEBUG_COIN
+// commented-out lines keep this in sync with ClearVisited
+void SkOpSegment::DebugClearVisited(const SkOpSpanBase* span) {
+ // reset visited flag back to false
+ do {
+ const SkOpPtT* ptT = span->ptT(), * stopPtT = ptT;
+ while ((ptT = ptT->next()) != stopPtT) {
+ const SkOpSegment* opp = ptT->segment();
+ opp->resetDebugVisited();
+ }
+ } while (!span->final() && (span = span->upCast()->next()));
+}
+#endif
+
+#if DEBUG_COIN
+// commented-out lines keep this in sync with missingCoincidence()
+// look for pairs of undetected coincident curves
+// assumes that segments going in have visited flag clear
+// Even though pairs of curves correct detect coincident runs, a run may be missed
+// if the coincidence is a product of multiple intersections. For instance, given
+// curves A, B, and C:
+// A-B intersect at a point 1; A-C and B-C intersect at point 2, so near
+// the end of C that the intersection is replaced with the end of C.
+// Even though A-B correctly do not detect an intersection at point 2,
+// the resulting run from point 1 to point 2 is coincident on A and B.
+void SkOpSegment::debugMissingCoincidence(SkPathOpsDebug::GlitchLog* log) const {
+ if (this->done()) {
+ return;
+ }
+ const SkOpSpan* prior = nullptr;
+ const SkOpSpanBase* spanBase = &fHead;
+// bool result = false;
+ do {
+ const SkOpPtT* ptT = spanBase->ptT(), * spanStopPtT = ptT;
+ SkASSERT(ptT->span() == spanBase);
+ while ((ptT = ptT->next()) != spanStopPtT) {
+ if (ptT->deleted()) {
+ continue;
+ }
+ const SkOpSegment* opp = ptT->span()->segment();
+ if (opp->done()) {
+ continue;
+ }
+ // when opp is encounted the 1st time, continue; on 2nd encounter, look for coincidence
+ if (!opp->debugVisited()) {
+ continue;
+ }
+ if (spanBase == &fHead) {
+ continue;
+ }
+ if (ptT->segment() == this) {
+ continue;
+ }
+ const SkOpSpan* span = spanBase->upCastable();
+ // FIXME?: this assumes that if the opposite segment is coincident then no more
+ // coincidence needs to be detected. This may not be true.
+ if (span && span->segment() != opp && span->containsCoincidence(opp)) { // debug has additional condition since it may be called before inner duplicate points have been deleted
+ continue;
+ }
+ if (spanBase->segment() != opp && spanBase->containsCoinEnd(opp)) { // debug has additional condition since it may be called before inner duplicate points have been deleted
+ continue;
+ }
+ const SkOpPtT* priorPtT = nullptr, * priorStopPtT;
+ // find prior span containing opp segment
+ const SkOpSegment* priorOpp = nullptr;
+ const SkOpSpan* priorTest = spanBase->prev();
+ while (!priorOpp && priorTest) {
+ priorStopPtT = priorPtT = priorTest->ptT();
+ while ((priorPtT = priorPtT->next()) != priorStopPtT) {
+ if (priorPtT->deleted()) {
+ continue;
+ }
+ const SkOpSegment* segment = priorPtT->span()->segment();
+ if (segment == opp) {
+ prior = priorTest;
+ priorOpp = opp;
+ break;
+ }
+ }
+ priorTest = priorTest->prev();
+ }
+ if (!priorOpp) {
+ continue;
+ }
+ if (priorPtT == ptT) {
+ continue;
+ }
+ const SkOpPtT* oppStart = prior->ptT();
+ const SkOpPtT* oppEnd = spanBase->ptT();
+ bool swapped = priorPtT->fT > ptT->fT;
+ if (swapped) {
+ using std::swap;
+ swap(priorPtT, ptT);
+ swap(oppStart, oppEnd);
+ }
+ const SkOpCoincidence* coincidence = this->globalState()->coincidence();
+ const SkOpPtT* rootPriorPtT = priorPtT->span()->ptT();
+ const SkOpPtT* rootPtT = ptT->span()->ptT();
+ const SkOpPtT* rootOppStart = oppStart->span()->ptT();
+ const SkOpPtT* rootOppEnd = oppEnd->span()->ptT();
+ if (coincidence->contains(rootPriorPtT, rootPtT, rootOppStart, rootOppEnd)) {
+ goto swapBack;
+ }
+ if (testForCoincidence(rootPriorPtT, rootPtT, prior, spanBase, opp)) {
+ // mark coincidence
+#if DEBUG_COINCIDENCE_VERBOSE
+// SkDebugf("%s coinSpan=%d endSpan=%d oppSpan=%d oppEndSpan=%d\n", __FUNCTION__,
+// rootPriorPtT->debugID(), rootPtT->debugID(), rootOppStart->debugID(),
+// rootOppEnd->debugID());
+#endif
+ log->record(SkPathOpsDebug::kMissingCoin_Glitch, priorPtT, ptT, oppStart, oppEnd);
+ // coincidences->add(rootPriorPtT, rootPtT, rootOppStart, rootOppEnd);
+ // }
+#if DEBUG_COINCIDENCE
+// SkASSERT(coincidences->contains(rootPriorPtT, rootPtT, rootOppStart, rootOppEnd);
+#endif
+ // result = true;
+ }
+ swapBack:
+ if (swapped) {
+ using std::swap;
+ swap(priorPtT, ptT);
+ }
+ }
+ } while ((spanBase = spanBase->final() ? nullptr : spanBase->upCast()->next()));
+ DebugClearVisited(&fHead);
+ return;
+}
+
+// commented-out lines keep this in sync with moveMultiples()
+// if a span has more than one intersection, merge the other segments' span as needed
+void SkOpSegment::debugMoveMultiples(SkPathOpsDebug::GlitchLog* glitches) const {
+ debugValidate();
+ const SkOpSpanBase* test = &fHead;
+ do {
+ int addCount = test->spanAddsCount();
+// SkASSERT(addCount >= 1);
+ if (addCount <= 1) {
+ continue;
+ }
+ const SkOpPtT* startPtT = test->ptT();
+ const SkOpPtT* testPtT = startPtT;
+ do { // iterate through all spans associated with start
+ const SkOpSpanBase* oppSpan = testPtT->span();
+ if (oppSpan->spanAddsCount() == addCount) {
+ continue;
+ }
+ if (oppSpan->deleted()) {
+ continue;
+ }
+ const SkOpSegment* oppSegment = oppSpan->segment();
+ if (oppSegment == this) {
+ continue;
+ }
+ // find range of spans to consider merging
+ const SkOpSpanBase* oppPrev = oppSpan;
+ const SkOpSpanBase* oppFirst = oppSpan;
+ while ((oppPrev = oppPrev->prev())) {
+ if (!roughly_equal(oppPrev->t(), oppSpan->t())) {
+ break;
+ }
+ if (oppPrev->spanAddsCount() == addCount) {
+ continue;
+ }
+ if (oppPrev->deleted()) {
+ continue;
+ }
+ oppFirst = oppPrev;
+ }
+ const SkOpSpanBase* oppNext = oppSpan;
+ const SkOpSpanBase* oppLast = oppSpan;
+ while ((oppNext = oppNext->final() ? nullptr : oppNext->upCast()->next())) {
+ if (!roughly_equal(oppNext->t(), oppSpan->t())) {
+ break;
+ }
+ if (oppNext->spanAddsCount() == addCount) {
+ continue;
+ }
+ if (oppNext->deleted()) {
+ continue;
+ }
+ oppLast = oppNext;
+ }
+ if (oppFirst == oppLast) {
+ continue;
+ }
+ const SkOpSpanBase* oppTest = oppFirst;
+ do {
+ if (oppTest == oppSpan) {
+ continue;
+ }
+ // check to see if the candidate meets specific criteria:
+ // it contains spans of segments in test's loop but not including 'this'
+ const SkOpPtT* oppStartPtT = oppTest->ptT();
+ const SkOpPtT* oppPtT = oppStartPtT;
+ while ((oppPtT = oppPtT->next()) != oppStartPtT) {
+ const SkOpSegment* oppPtTSegment = oppPtT->segment();
+ if (oppPtTSegment == this) {
+ goto tryNextSpan;
+ }
+ const SkOpPtT* matchPtT = startPtT;
+ do {
+ if (matchPtT->segment() == oppPtTSegment) {
+ goto foundMatch;
+ }
+ } while ((matchPtT = matchPtT->next()) != startPtT);
+ goto tryNextSpan;
+ foundMatch: // merge oppTest and oppSpan
+ oppSegment->debugValidate();
+ oppTest->debugMergeMatches(glitches, oppSpan);
+ oppTest->debugAddOpp(glitches, oppSpan);
+ oppSegment->debugValidate();
+ goto checkNextSpan;
+ }
+ tryNextSpan:
+ ;
+ } while (oppTest != oppLast && (oppTest = oppTest->upCast()->next()));
+ } while ((testPtT = testPtT->next()) != startPtT);
+checkNextSpan:
+ ;
+ } while ((test = test->final() ? nullptr : test->upCast()->next()));
+ debugValidate();
+ return;
+}
+
+// commented-out lines keep this in sync with moveNearby()
+// Move nearby t values and pts so they all hang off the same span. Alignment happens later.
+void SkOpSegment::debugMoveNearby(SkPathOpsDebug::GlitchLog* glitches) const {
+ debugValidate();
+ // release undeleted spans pointing to this seg that are linked to the primary span
+ const SkOpSpanBase* spanBase = &fHead;
+ do {
+ const SkOpPtT* ptT = spanBase->ptT();
+ const SkOpPtT* headPtT = ptT;
+ while ((ptT = ptT->next()) != headPtT) {
+ const SkOpSpanBase* test = ptT->span();
+ if (ptT->segment() == this && !ptT->deleted() && test != spanBase
+ && test->ptT() == ptT) {
+ if (test->final()) {
+ if (spanBase == &fHead) {
+ glitches->record(SkPathOpsDebug::kMoveNearbyClearAll_Glitch, this);
+// return;
+ }
+ glitches->record(SkPathOpsDebug::kMoveNearbyReleaseFinal_Glitch, spanBase, ptT);
+ } else if (test->prev()) {
+ glitches->record(SkPathOpsDebug::kMoveNearbyRelease_Glitch, test, headPtT);
+ }
+// break;
+ }
+ }
+ spanBase = spanBase->upCast()->next();
+ } while (!spanBase->final());
+
+ // This loop looks for adjacent spans which are near by
+ spanBase = &fHead;
+ do { // iterate through all spans associated with start
+ const SkOpSpanBase* test = spanBase->upCast()->next();
+ bool found;
+ if (!this->spansNearby(spanBase, test, &found)) {
+ glitches->record(SkPathOpsDebug::kMoveNearbyMergeFinal_Glitch, test);
+ }
+ if (found) {
+ if (test->final()) {
+ if (spanBase->prev()) {
+ glitches->record(SkPathOpsDebug::kMoveNearbyMergeFinal_Glitch, test);
+ } else {
+ glitches->record(SkPathOpsDebug::kMoveNearbyClearAll2_Glitch, this);
+ // return
+ }
+ } else {
+ glitches->record(SkPathOpsDebug::kMoveNearbyMerge_Glitch, spanBase);
+ }
+ }
+ spanBase = test;
+ } while (!spanBase->final());
+ debugValidate();
+}
+#endif
+
+void SkOpSegment::debugReset() {
+ this->init(this->fPts, this->fWeight, this->contour(), this->verb());
+}
+
+#if DEBUG_COINCIDENCE_ORDER
+void SkOpSegment::debugSetCoinT(int index, SkScalar t) const {
+ if (fDebugBaseMax < 0 || fDebugBaseIndex == index) {
+ fDebugBaseIndex = index;
+ fDebugBaseMin = std::min(t, fDebugBaseMin);
+ fDebugBaseMax = std::max(t, fDebugBaseMax);
+ return;
+ }
+ SkASSERT(fDebugBaseMin >= t || t >= fDebugBaseMax);
+ if (fDebugLastMax < 0 || fDebugLastIndex == index) {
+ fDebugLastIndex = index;
+ fDebugLastMin = std::min(t, fDebugLastMin);
+ fDebugLastMax = std::max(t, fDebugLastMax);
+ return;
+ }
+ SkASSERT(fDebugLastMin >= t || t >= fDebugLastMax);
+ SkASSERT((t - fDebugBaseMin > 0) == (fDebugLastMin - fDebugBaseMin > 0));
+}
+#endif
+
+#if DEBUG_ACTIVE_SPANS
+void SkOpSegment::debugShowActiveSpans(SkString* str) const {
+ debugValidate();
+ if (done()) {
+ return;
+ }
+ int lastId = -1;
+ double lastT = -1;
+ const SkOpSpan* span = &fHead;
+ do {
+ if (span->done()) {
+ continue;
+ }
+ if (lastId == this->debugID() && lastT == span->t()) {
+ continue;
+ }
+ lastId = this->debugID();
+ lastT = span->t();
+ str->appendf("%s id=%d", __FUNCTION__, this->debugID());
+ // since endpoints may have be adjusted, show actual computed curves
+ SkDCurve curvePart;
+ this->subDivide(span, span->next(), &curvePart);
+ const SkDPoint* pts = curvePart.fCubic.fPts;
+ str->appendf(" (%1.9g,%1.9g", pts[0].fX, pts[0].fY);
+ for (int vIndex = 1; vIndex <= SkPathOpsVerbToPoints(fVerb); ++vIndex) {
+ str->appendf(" %1.9g,%1.9g", pts[vIndex].fX, pts[vIndex].fY);
+ }
+ if (SkPath::kConic_Verb == fVerb) {
+ str->appendf(" %1.9gf", curvePart.fConic.fWeight);
+ }
+ str->appendf(") t=%1.9g tEnd=%1.9g", span->t(), span->next()->t());
+ if (span->windSum() == SK_MinS32) {
+ str->appendf(" windSum=?");
+ } else {
+ str->appendf(" windSum=%d", span->windSum());
+ }
+ if (span->oppValue() && span->oppSum() == SK_MinS32) {
+ str->appendf(" oppSum=?");
+ } else if (span->oppValue() || span->oppSum() != SK_MinS32) {
+ str->appendf(" oppSum=%d", span->oppSum());
+ }
+ str->appendf(" windValue=%d", span->windValue());
+ if (span->oppValue() || span->oppSum() != SK_MinS32) {
+ str->appendf(" oppValue=%d", span->oppValue());
+ }
+ str->appendf("\n");
+ } while ((span = span->next()->upCastable()));
+}
+#endif
+
+#if DEBUG_MARK_DONE
+void SkOpSegment::debugShowNewWinding(const char* fun, const SkOpSpan* span, int winding) {
+ const SkPoint& pt = span->ptT()->fPt;
+ SkDebugf("%s id=%d", fun, this->debugID());
+ SkDebugf(" (%1.9g,%1.9g", fPts[0].fX, fPts[0].fY);
+ for (int vIndex = 1; vIndex <= SkPathOpsVerbToPoints(fVerb); ++vIndex) {
+ SkDebugf(" %1.9g,%1.9g", fPts[vIndex].fX, fPts[vIndex].fY);
+ }
+ SkDebugf(") t=%1.9g [%d] (%1.9g,%1.9g) tEnd=%1.9g newWindSum=",
+ span->t(), span->debugID(), pt.fX, pt.fY, span->next()->t());
+ if (winding == SK_MinS32) {
+ SkDebugf("?");
+ } else {
+ SkDebugf("%d", winding);
+ }
+ SkDebugf(" windSum=");
+ if (span->windSum() == SK_MinS32) {
+ SkDebugf("?");
+ } else {
+ SkDebugf("%d", span->windSum());
+ }
+ SkDebugf(" windValue=%d\n", span->windValue());
+}
+
+void SkOpSegment::debugShowNewWinding(const char* fun, const SkOpSpan* span, int winding,
+ int oppWinding) {
+ const SkPoint& pt = span->ptT()->fPt;
+ SkDebugf("%s id=%d", fun, this->debugID());
+ SkDebugf(" (%1.9g,%1.9g", fPts[0].fX, fPts[0].fY);
+ for (int vIndex = 1; vIndex <= SkPathOpsVerbToPoints(fVerb); ++vIndex) {
+ SkDebugf(" %1.9g,%1.9g", fPts[vIndex].fX, fPts[vIndex].fY);
+ }
+ SkDebugf(") t=%1.9g [%d] (%1.9g,%1.9g) tEnd=%1.9g newWindSum=",
+ span->t(), span->debugID(), pt.fX, pt.fY, span->next()->t());
+ if (winding == SK_MinS32) {
+ SkDebugf("?");
+ } else {
+ SkDebugf("%d", winding);
+ }
+ SkDebugf(" newOppSum=");
+ if (oppWinding == SK_MinS32) {
+ SkDebugf("?");
+ } else {
+ SkDebugf("%d", oppWinding);
+ }
+ SkDebugf(" oppSum=");
+ if (span->oppSum() == SK_MinS32) {
+ SkDebugf("?");
+ } else {
+ SkDebugf("%d", span->oppSum());
+ }
+ SkDebugf(" windSum=");
+ if (span->windSum() == SK_MinS32) {
+ SkDebugf("?");
+ } else {
+ SkDebugf("%d", span->windSum());
+ }
+ SkDebugf(" windValue=%d oppValue=%d\n", span->windValue(), span->oppValue());
+}
+
+#endif
+
+// loop looking for a pair of angle parts that are too close to be sorted
+/* This is called after other more simple intersection and angle sorting tests have been exhausted.
+ This should be rarely called -- the test below is thorough and time consuming.
+ This checks the distance between start points; the distance between
+*/
+#if DEBUG_ANGLE
+void SkOpAngle::debugCheckNearCoincidence() const {
+ const SkOpAngle* test = this;
+ do {
+ const SkOpSegment* testSegment = test->segment();
+ double testStartT = test->start()->t();
+ SkDPoint testStartPt = testSegment->dPtAtT(testStartT);
+ double testEndT = test->end()->t();
+ SkDPoint testEndPt = testSegment->dPtAtT(testEndT);
+ double testLenSq = testStartPt.distanceSquared(testEndPt);
+ SkDebugf("%s testLenSq=%1.9g id=%d\n", __FUNCTION__, testLenSq, testSegment->debugID());
+ double testMidT = (testStartT + testEndT) / 2;
+ const SkOpAngle* next = test;
+ while ((next = next->fNext) != this) {
+ SkOpSegment* nextSegment = next->segment();
+ double testMidDistSq = testSegment->distSq(testMidT, next);
+ double testEndDistSq = testSegment->distSq(testEndT, next);
+ double nextStartT = next->start()->t();
+ SkDPoint nextStartPt = nextSegment->dPtAtT(nextStartT);
+ double distSq = testStartPt.distanceSquared(nextStartPt);
+ double nextEndT = next->end()->t();
+ double nextMidT = (nextStartT + nextEndT) / 2;
+ double nextMidDistSq = nextSegment->distSq(nextMidT, test);
+ double nextEndDistSq = nextSegment->distSq(nextEndT, test);
+ SkDebugf("%s distSq=%1.9g testId=%d nextId=%d\n", __FUNCTION__, distSq,
+ testSegment->debugID(), nextSegment->debugID());
+ SkDebugf("%s testMidDistSq=%1.9g\n", __FUNCTION__, testMidDistSq);
+ SkDebugf("%s testEndDistSq=%1.9g\n", __FUNCTION__, testEndDistSq);
+ SkDebugf("%s nextMidDistSq=%1.9g\n", __FUNCTION__, nextMidDistSq);
+ SkDebugf("%s nextEndDistSq=%1.9g\n", __FUNCTION__, nextEndDistSq);
+ SkDPoint nextEndPt = nextSegment->dPtAtT(nextEndT);
+ double nextLenSq = nextStartPt.distanceSquared(nextEndPt);
+ SkDebugf("%s nextLenSq=%1.9g\n", __FUNCTION__, nextLenSq);
+ SkDebugf("\n");
+ }
+ test = test->fNext;
+ } while (test->fNext != this);
+}
+#endif
+
+#if DEBUG_ANGLE
+SkString SkOpAngle::debugPart() const {
+ SkString result;
+ switch (this->segment()->verb()) {
+ case SkPath::kLine_Verb:
+ result.printf(LINE_DEBUG_STR " id=%d", LINE_DEBUG_DATA(fPart.fCurve),
+ this->segment()->debugID());
+ break;
+ case SkPath::kQuad_Verb:
+ result.printf(QUAD_DEBUG_STR " id=%d", QUAD_DEBUG_DATA(fPart.fCurve),
+ this->segment()->debugID());
+ break;
+ case SkPath::kConic_Verb:
+ result.printf(CONIC_DEBUG_STR " id=%d",
+ CONIC_DEBUG_DATA(fPart.fCurve, fPart.fCurve.fConic.fWeight),
+ this->segment()->debugID());
+ break;
+ case SkPath::kCubic_Verb:
+ result.printf(CUBIC_DEBUG_STR " id=%d", CUBIC_DEBUG_DATA(fPart.fCurve),
+ this->segment()->debugID());
+ break;
+ default:
+ SkASSERT(0);
+ }
+ return result;
+}
+#endif
+
+#if DEBUG_SORT
+void SkOpAngle::debugLoop() const {
+ const SkOpAngle* first = this;
+ const SkOpAngle* next = this;
+ do {
+ next->dumpOne(true);
+ SkDebugf("\n");
+ next = next->fNext;
+ } while (next && next != first);
+ next = first;
+ do {
+ next->debugValidate();
+ next = next->fNext;
+ } while (next && next != first);
+}
+#endif
+
+void SkOpAngle::debugValidate() const {
+#if DEBUG_COINCIDENCE
+ if (this->globalState()->debugCheckHealth()) {
+ return;
+ }
+#endif
+#if DEBUG_VALIDATE
+ const SkOpAngle* first = this;
+ const SkOpAngle* next = this;
+ int wind = 0;
+ int opp = 0;
+ int lastXor = -1;
+ int lastOppXor = -1;
+ do {
+ if (next->unorderable()) {
+ return;
+ }
+ const SkOpSpan* minSpan = next->start()->starter(next->end());
+ if (minSpan->windValue() == SK_MinS32) {
+ return;
+ }
+ bool op = next->segment()->operand();
+ bool isXor = next->segment()->isXor();
+ bool oppXor = next->segment()->oppXor();
+ SkASSERT(!DEBUG_LIMIT_WIND_SUM || between(0, minSpan->windValue(), DEBUG_LIMIT_WIND_SUM));
+ SkASSERT(!DEBUG_LIMIT_WIND_SUM
+ || between(-DEBUG_LIMIT_WIND_SUM, minSpan->oppValue(), DEBUG_LIMIT_WIND_SUM));
+ bool useXor = op ? oppXor : isXor;
+ SkASSERT(lastXor == -1 || lastXor == (int) useXor);
+ lastXor = (int) useXor;
+ wind += next->debugSign() * (op ? minSpan->oppValue() : minSpan->windValue());
+ if (useXor) {
+ wind &= 1;
+ }
+ useXor = op ? isXor : oppXor;
+ SkASSERT(lastOppXor == -1 || lastOppXor == (int) useXor);
+ lastOppXor = (int) useXor;
+ opp += next->debugSign() * (op ? minSpan->windValue() : minSpan->oppValue());
+ if (useXor) {
+ opp &= 1;
+ }
+ next = next->fNext;
+ } while (next && next != first);
+ SkASSERT(wind == 0 || !SkPathOpsDebug::gRunFail);
+ SkASSERT(opp == 0 || !SkPathOpsDebug::gRunFail);
+#endif
+}
+
+void SkOpAngle::debugValidateNext() const {
+#if !FORCE_RELEASE
+ const SkOpAngle* first = this;
+ const SkOpAngle* next = first;
+ SkTDArray<const SkOpAngle*> angles;
+ do {
+// SkASSERT_RELEASE(next->fSegment->debugContains(next));
+ angles.push_back(next);
+ next = next->next();
+ if (next == first) {
+ break;
+ }
+ SkASSERT_RELEASE(!angles.contains(next));
+ if (!next) {
+ return;
+ }
+ } while (true);
+#endif
+}
+
+#ifdef SK_DEBUG
+void SkCoincidentSpans::debugStartCheck(const SkOpSpanBase* outer, const SkOpSpanBase* over,
+ const SkOpGlobalState* debugState) const {
+ SkASSERT(coinPtTEnd()->span() == over || !SkOpGlobalState::DebugRunFail());
+ SkASSERT(oppPtTEnd()->span() == outer || !SkOpGlobalState::DebugRunFail());
+}
+#endif
+
+#if DEBUG_COIN
+// sets the span's end to the ptT referenced by the previous-next
+void SkCoincidentSpans::debugCorrectOneEnd(SkPathOpsDebug::GlitchLog* log,
+ const SkOpPtT* (SkCoincidentSpans::* getEnd)() const,
+ void (SkCoincidentSpans::*setEnd)(const SkOpPtT* ptT) const ) const {
+ const SkOpPtT* origPtT = (this->*getEnd)();
+ const SkOpSpanBase* origSpan = origPtT->span();
+ const SkOpSpan* prev = origSpan->prev();
+ const SkOpPtT* testPtT = prev ? prev->next()->ptT()
+ : origSpan->upCast()->next()->prev()->ptT();
+ if (origPtT != testPtT) {
+ log->record(SkPathOpsDebug::kCorrectEnd_Glitch, this, origPtT, testPtT);
+ }
+}
+
+
+/* Commented-out lines keep this in sync with correctEnds */
+// FIXME: member pointers have fallen out of favor and can be replaced with
+// an alternative approach.
+// makes all span ends agree with the segment's spans that define them
+void SkCoincidentSpans::debugCorrectEnds(SkPathOpsDebug::GlitchLog* log) const {
+ this->debugCorrectOneEnd(log, &SkCoincidentSpans::coinPtTStart, nullptr);
+ this->debugCorrectOneEnd(log, &SkCoincidentSpans::coinPtTEnd, nullptr);
+ this->debugCorrectOneEnd(log, &SkCoincidentSpans::oppPtTStart, nullptr);
+ this->debugCorrectOneEnd(log, &SkCoincidentSpans::oppPtTEnd, nullptr);
+}
+
+/* Commented-out lines keep this in sync with expand */
+// expand the range by checking adjacent spans for coincidence
+bool SkCoincidentSpans::debugExpand(SkPathOpsDebug::GlitchLog* log) const {
+ bool expanded = false;
+ const SkOpSegment* segment = coinPtTStart()->segment();
+ const SkOpSegment* oppSegment = oppPtTStart()->segment();
+ do {
+ const SkOpSpan* start = coinPtTStart()->span()->upCast();
+ const SkOpSpan* prev = start->prev();
+ const SkOpPtT* oppPtT;
+ if (!prev || !(oppPtT = prev->contains(oppSegment))) {
+ break;
+ }
+ double midT = (prev->t() + start->t()) / 2;
+ if (!segment->isClose(midT, oppSegment)) {
+ break;
+ }
+ if (log) log->record(SkPathOpsDebug::kExpandCoin_Glitch, this, prev->ptT(), oppPtT);
+ expanded = true;
+ } while (false); // actual continues while expansion is possible
+ do {
+ const SkOpSpanBase* end = coinPtTEnd()->span();
+ SkOpSpanBase* next = end->final() ? nullptr : end->upCast()->next();
+ if (next && next->deleted()) {
+ break;
+ }
+ const SkOpPtT* oppPtT;
+ if (!next || !(oppPtT = next->contains(oppSegment))) {
+ break;
+ }
+ double midT = (end->t() + next->t()) / 2;
+ if (!segment->isClose(midT, oppSegment)) {
+ break;
+ }
+ if (log) log->record(SkPathOpsDebug::kExpandCoin_Glitch, this, next->ptT(), oppPtT);
+ expanded = true;
+ } while (false); // actual continues while expansion is possible
+ return expanded;
+}
+
+// description below
+void SkOpCoincidence::debugAddEndMovedSpans(SkPathOpsDebug::GlitchLog* log, const SkOpSpan* base, const SkOpSpanBase* testSpan) const {
+ const SkOpPtT* testPtT = testSpan->ptT();
+ const SkOpPtT* stopPtT = testPtT;
+ const SkOpSegment* baseSeg = base->segment();
+ while ((testPtT = testPtT->next()) != stopPtT) {
+ const SkOpSegment* testSeg = testPtT->segment();
+ if (testPtT->deleted()) {
+ continue;
+ }
+ if (testSeg == baseSeg) {
+ continue;
+ }
+ if (testPtT->span()->ptT() != testPtT) {
+ continue;
+ }
+ if (this->contains(baseSeg, testSeg, testPtT->fT)) {
+ continue;
+ }
+ // intersect perp with base->ptT() with testPtT->segment()
+ SkDVector dxdy = baseSeg->dSlopeAtT(base->t());
+ const SkPoint& pt = base->pt();
+ SkDLine ray = {{{pt.fX, pt.fY}, {pt.fX + dxdy.fY, pt.fY - dxdy.fX}}};
+ SkIntersections i;
+ (*CurveIntersectRay[testSeg->verb()])(testSeg->pts(), testSeg->weight(), ray, &i);
+ for (int index = 0; index < i.used(); ++index) {
+ double t = i[0][index];
+ if (!between(0, t, 1)) {
+ continue;
+ }
+ SkDPoint oppPt = i.pt(index);
+ if (!oppPt.approximatelyEqual(pt)) {
+ continue;
+ }
+ SkOpSegment* writableSeg = const_cast<SkOpSegment*>(testSeg);
+ SkOpPtT* oppStart = writableSeg->addT(t);
+ if (oppStart == testPtT) {
+ continue;
+ }
+ SkOpSpan* writableBase = const_cast<SkOpSpan*>(base);
+ oppStart->span()->addOpp(writableBase);
+ if (oppStart->deleted()) {
+ continue;
+ }
+ SkOpSegment* coinSeg = base->segment();
+ SkOpSegment* oppSeg = oppStart->segment();
+ double coinTs, coinTe, oppTs, oppTe;
+ if (Ordered(coinSeg, oppSeg)) {
+ coinTs = base->t();
+ coinTe = testSpan->t();
+ oppTs = oppStart->fT;
+ oppTe = testPtT->fT;
+ } else {
+ using std::swap;
+ swap(coinSeg, oppSeg);
+ coinTs = oppStart->fT;
+ coinTe = testPtT->fT;
+ oppTs = base->t();
+ oppTe = testSpan->t();
+ }
+ if (coinTs > coinTe) {
+ using std::swap;
+ swap(coinTs, coinTe);
+ swap(oppTs, oppTe);
+ }
+ bool added;
+ this->debugAddOrOverlap(log, coinSeg, oppSeg, coinTs, coinTe, oppTs, oppTe, &added);
+ }
+ }
+ return;
+}
+
+// description below
+void SkOpCoincidence::debugAddEndMovedSpans(SkPathOpsDebug::GlitchLog* log, const SkOpPtT* ptT) const {
+ FAIL_IF_COIN(!ptT->span()->upCastable(), ptT->span());
+ const SkOpSpan* base = ptT->span()->upCast();
+ const SkOpSpan* prev = base->prev();
+ FAIL_IF_COIN(!prev, ptT->span());
+ if (!prev->isCanceled()) {
+ this->debugAddEndMovedSpans(log, base, base->prev());
+ }
+ if (!base->isCanceled()) {
+ this->debugAddEndMovedSpans(log, base, base->next());
+ }
+ return;
+}
+
+/* If A is coincident with B and B includes an endpoint, and A's matching point
+ is not the endpoint (i.e., there's an implied line connecting B-end and A)
+ then assume that the same implied line may intersect another curve close to B.
+ Since we only care about coincidence that was undetected, look at the
+ ptT list on B-segment adjacent to the B-end/A ptT loop (not in the loop, but
+ next door) and see if the A matching point is close enough to form another
+ coincident pair. If so, check for a new coincident span between B-end/A ptT loop
+ and the adjacent ptT loop.
+*/
+void SkOpCoincidence::debugAddEndMovedSpans(SkPathOpsDebug::GlitchLog* log) const {
+ const SkCoincidentSpans* span = fHead;
+ if (!span) {
+ return;
+ }
+// fTop = span;
+// fHead = nullptr;
+ do {
+ if (span->coinPtTStart()->fPt != span->oppPtTStart()->fPt) {
+ FAIL_IF_COIN(1 == span->coinPtTStart()->fT, span);
+ bool onEnd = span->coinPtTStart()->fT == 0;
+ bool oOnEnd = zero_or_one(span->oppPtTStart()->fT);
+ if (onEnd) {
+ if (!oOnEnd) { // if both are on end, any nearby intersect was already found
+ this->debugAddEndMovedSpans(log, span->oppPtTStart());
+ }
+ } else if (oOnEnd) {
+ this->debugAddEndMovedSpans(log, span->coinPtTStart());
+ }
+ }
+ if (span->coinPtTEnd()->fPt != span->oppPtTEnd()->fPt) {
+ bool onEnd = span->coinPtTEnd()->fT == 1;
+ bool oOnEnd = zero_or_one(span->oppPtTEnd()->fT);
+ if (onEnd) {
+ if (!oOnEnd) {
+ this->debugAddEndMovedSpans(log, span->oppPtTEnd());
+ }
+ } else if (oOnEnd) {
+ this->debugAddEndMovedSpans(log, span->coinPtTEnd());
+ }
+ }
+ } while ((span = span->next()));
+// this->restoreHead();
+ return;
+}
+
+/* Commented-out lines keep this in sync with addExpanded */
+// for each coincident pair, match the spans
+// if the spans don't match, add the mssing pt to the segment and loop it in the opposite span
+void SkOpCoincidence::debugAddExpanded(SkPathOpsDebug::GlitchLog* log) const {
+// DEBUG_SET_PHASE();
+ const SkCoincidentSpans* coin = this->fHead;
+ if (!coin) {
+ return;
+ }
+ do {
+ const SkOpPtT* startPtT = coin->coinPtTStart();
+ const SkOpPtT* oStartPtT = coin->oppPtTStart();
+ double priorT = startPtT->fT;
+ double oPriorT = oStartPtT->fT;
+ FAIL_IF_COIN(!startPtT->contains(oStartPtT), coin);
+ SkOPASSERT(coin->coinPtTEnd()->contains(coin->oppPtTEnd()));
+ const SkOpSpanBase* start = startPtT->span();
+ const SkOpSpanBase* oStart = oStartPtT->span();
+ const SkOpSpanBase* end = coin->coinPtTEnd()->span();
+ const SkOpSpanBase* oEnd = coin->oppPtTEnd()->span();
+ FAIL_IF_COIN(oEnd->deleted(), coin);
+ FAIL_IF_COIN(!start->upCastable(), coin);
+ const SkOpSpanBase* test = start->upCast()->next();
+ FAIL_IF_COIN(!coin->flipped() && !oStart->upCastable(), coin);
+ const SkOpSpanBase* oTest = coin->flipped() ? oStart->prev() : oStart->upCast()->next();
+ FAIL_IF_COIN(!oTest, coin);
+ const SkOpSegment* seg = start->segment();
+ const SkOpSegment* oSeg = oStart->segment();
+ while (test != end || oTest != oEnd) {
+ const SkOpPtT* containedOpp = test->ptT()->contains(oSeg);
+ const SkOpPtT* containedThis = oTest->ptT()->contains(seg);
+ if (!containedOpp || !containedThis) {
+ // choose the ends, or the first common pt-t list shared by both
+ double nextT, oNextT;
+ if (containedOpp) {
+ nextT = test->t();
+ oNextT = containedOpp->fT;
+ } else if (containedThis) {
+ nextT = containedThis->fT;
+ oNextT = oTest->t();
+ } else {
+ // iterate through until a pt-t list found that contains the other
+ const SkOpSpanBase* walk = test;
+ const SkOpPtT* walkOpp;
+ do {
+ FAIL_IF_COIN(!walk->upCastable(), coin);
+ walk = walk->upCast()->next();
+ } while (!(walkOpp = walk->ptT()->contains(oSeg))
+ && walk != coin->coinPtTEnd()->span());
+ FAIL_IF_COIN(!walkOpp, coin);
+ nextT = walk->t();
+ oNextT = walkOpp->fT;
+ }
+ // use t ranges to guess which one is missing
+ double startRange = nextT - priorT;
+ FAIL_IF_COIN(!startRange, coin);
+ double startPart = (test->t() - priorT) / startRange;
+ double oStartRange = oNextT - oPriorT;
+ FAIL_IF_COIN(!oStartRange, coin);
+ double oStartPart = (oTest->t() - oStartPtT->fT) / oStartRange;
+ FAIL_IF_COIN(startPart == oStartPart, coin);
+ bool addToOpp = !containedOpp && !containedThis ? startPart < oStartPart
+ : !!containedThis;
+ bool startOver = false;
+ addToOpp ? log->record(SkPathOpsDebug::kAddExpandedCoin_Glitch,
+ oPriorT + oStartRange * startPart, test)
+ : log->record(SkPathOpsDebug::kAddExpandedCoin_Glitch,
+ priorT + startRange * oStartPart, oTest);
+ // FAIL_IF_COIN(!success, coin);
+ if (startOver) {
+ test = start;
+ oTest = oStart;
+ }
+ end = coin->coinPtTEnd()->span();
+ oEnd = coin->oppPtTEnd()->span();
+ }
+ if (test != end) {
+ FAIL_IF_COIN(!test->upCastable(), coin);
+ priorT = test->t();
+ test = test->upCast()->next();
+ }
+ if (oTest != oEnd) {
+ oPriorT = oTest->t();
+ oTest = coin->flipped() ? oTest->prev() : oTest->upCast()->next();
+ FAIL_IF_COIN(!oTest, coin);
+ }
+ }
+ } while ((coin = coin->next()));
+ return;
+}
+
+/* Commented-out lines keep this in sync addIfMissing() */
+// note that over1s, over1e, over2s, over2e are ordered
+void SkOpCoincidence::debugAddIfMissing(SkPathOpsDebug::GlitchLog* log, const SkOpPtT* over1s, const SkOpPtT* over2s,
+ double tStart, double tEnd, const SkOpSegment* coinSeg, const SkOpSegment* oppSeg, bool* added,
+ const SkOpPtT* over1e, const SkOpPtT* over2e) const {
+ SkASSERT(tStart < tEnd);
+ SkASSERT(over1s->fT < over1e->fT);
+ SkASSERT(between(over1s->fT, tStart, over1e->fT));
+ SkASSERT(between(over1s->fT, tEnd, over1e->fT));
+ SkASSERT(over2s->fT < over2e->fT);
+ SkASSERT(between(over2s->fT, tStart, over2e->fT));
+ SkASSERT(between(over2s->fT, tEnd, over2e->fT));
+ SkASSERT(over1s->segment() == over1e->segment());
+ SkASSERT(over2s->segment() == over2e->segment());
+ SkASSERT(over1s->segment() == over2s->segment());
+ SkASSERT(over1s->segment() != coinSeg);
+ SkASSERT(over1s->segment() != oppSeg);
+ SkASSERT(coinSeg != oppSeg);
+ double coinTs, coinTe, oppTs, oppTe;
+ coinTs = TRange(over1s, tStart, coinSeg SkDEBUGPARAMS(over1e));
+ coinTe = TRange(over1s, tEnd, coinSeg SkDEBUGPARAMS(over1e));
+ SkOpSpanBase::Collapsed result = coinSeg->collapsed(coinTs, coinTe);
+ if (SkOpSpanBase::Collapsed::kNo != result) {
+ return log->record(SkPathOpsDebug::kAddIfCollapsed_Glitch, coinSeg);
+ }
+ oppTs = TRange(over2s, tStart, oppSeg SkDEBUGPARAMS(over2e));
+ oppTe = TRange(over2s, tEnd, oppSeg SkDEBUGPARAMS(over2e));
+ result = oppSeg->collapsed(oppTs, oppTe);
+ if (SkOpSpanBase::Collapsed::kNo != result) {
+ return log->record(SkPathOpsDebug::kAddIfCollapsed_Glitch, oppSeg);
+ }
+ if (coinTs > coinTe) {
+ using std::swap;
+ swap(coinTs, coinTe);
+ swap(oppTs, oppTe);
+ }
+ this->debugAddOrOverlap(log, coinSeg, oppSeg, coinTs, coinTe, oppTs, oppTe, added);
+ return;
+}
+
+/* Commented-out lines keep this in sync addOrOverlap() */
+// If this is called by addEndMovedSpans(), a returned false propogates out to an abort.
+// If this is called by AddIfMissing(), a returned false indicates there was nothing to add
+void SkOpCoincidence::debugAddOrOverlap(SkPathOpsDebug::GlitchLog* log,
+ const SkOpSegment* coinSeg, const SkOpSegment* oppSeg,
+ double coinTs, double coinTe, double oppTs, double oppTe, bool* added) const {
+ SkTDArray<SkCoincidentSpans*> overlaps;
+ SkOPASSERT(!fTop); // this is (correctly) reversed in addifMissing()
+ if (fTop && !this->checkOverlap(fTop, coinSeg, oppSeg, coinTs, coinTe, oppTs, oppTe,
+ &overlaps)) {
+ return;
+ }
+ if (fHead && !this->checkOverlap(fHead, coinSeg, oppSeg, coinTs,
+ coinTe, oppTs, oppTe, &overlaps)) {
+ return;
+ }
+ const SkCoincidentSpans* overlap = overlaps.count() ? overlaps[0] : nullptr;
+ for (int index = 1; index < overlaps.count(); ++index) { // combine overlaps before continuing
+ const SkCoincidentSpans* test = overlaps[index];
+ if (overlap->coinPtTStart()->fT > test->coinPtTStart()->fT) {
+ log->record(SkPathOpsDebug::kAddOrOverlap_Glitch, overlap, test->coinPtTStart());
+ }
+ if (overlap->coinPtTEnd()->fT < test->coinPtTEnd()->fT) {
+ log->record(SkPathOpsDebug::kAddOrOverlap_Glitch, overlap, test->coinPtTEnd());
+ }
+ if (overlap->flipped()
+ ? overlap->oppPtTStart()->fT < test->oppPtTStart()->fT
+ : overlap->oppPtTStart()->fT > test->oppPtTStart()->fT) {
+ log->record(SkPathOpsDebug::kAddOrOverlap_Glitch, overlap, test->oppPtTStart());
+ }
+ if (overlap->flipped()
+ ? overlap->oppPtTEnd()->fT > test->oppPtTEnd()->fT
+ : overlap->oppPtTEnd()->fT < test->oppPtTEnd()->fT) {
+ log->record(SkPathOpsDebug::kAddOrOverlap_Glitch, overlap, test->oppPtTEnd());
+ }
+ if (!fHead) { this->debugRelease(log, fHead, test);
+ this->debugRelease(log, fTop, test);
+ }
+ }
+ const SkOpPtT* cs = coinSeg->existing(coinTs, oppSeg);
+ const SkOpPtT* ce = coinSeg->existing(coinTe, oppSeg);
+ RETURN_FALSE_IF(overlap && cs && ce && overlap->contains(cs, ce), coinSeg);
+ RETURN_FALSE_IF(cs != ce || !cs, coinSeg);
+ const SkOpPtT* os = oppSeg->existing(oppTs, coinSeg);
+ const SkOpPtT* oe = oppSeg->existing(oppTe, coinSeg);
+ RETURN_FALSE_IF(overlap && os && oe && overlap->contains(os, oe), oppSeg);
+ SkASSERT(true || !cs || !cs->deleted());
+ SkASSERT(true || !os || !os->deleted());
+ SkASSERT(true || !ce || !ce->deleted());
+ SkASSERT(true || !oe || !oe->deleted());
+ const SkOpPtT* csExisting = !cs ? coinSeg->existing(coinTs, nullptr) : nullptr;
+ const SkOpPtT* ceExisting = !ce ? coinSeg->existing(coinTe, nullptr) : nullptr;
+ RETURN_FALSE_IF(csExisting && csExisting == ceExisting, coinSeg);
+ RETURN_FALSE_IF(csExisting && (csExisting == ce ||
+ csExisting->contains(ceExisting ? ceExisting : ce)), coinSeg);
+ RETURN_FALSE_IF(ceExisting && (ceExisting == cs ||
+ ceExisting->contains(csExisting ? csExisting : cs)), coinSeg);
+ const SkOpPtT* osExisting = !os ? oppSeg->existing(oppTs, nullptr) : nullptr;
+ const SkOpPtT* oeExisting = !oe ? oppSeg->existing(oppTe, nullptr) : nullptr;
+ RETURN_FALSE_IF(osExisting && osExisting == oeExisting, oppSeg);
+ RETURN_FALSE_IF(osExisting && (osExisting == oe ||
+ osExisting->contains(oeExisting ? oeExisting : oe)), oppSeg);
+ RETURN_FALSE_IF(oeExisting && (oeExisting == os ||
+ oeExisting->contains(osExisting ? osExisting : os)), oppSeg);
+ bool csDeleted = false, osDeleted = false, ceDeleted = false, oeDeleted = false;
+ this->debugValidate();
+ if (!cs || !os) {
+ if (!cs)
+ cs = coinSeg->debugAddT(coinTs, log);
+ if (!os)
+ os = oppSeg->debugAddT(oppTs, log);
+// RETURN_FALSE_IF(callerAborts, !csWritable || !osWritable);
+ if (cs && os) cs->span()->debugAddOpp(log, os->span());
+// cs = csWritable;
+// os = osWritable->active();
+ RETURN_FALSE_IF((ce && ce->deleted()) || (oe && oe->deleted()), coinSeg);
+ }
+ if (!ce || !oe) {
+ if (!ce)
+ ce = coinSeg->debugAddT(coinTe, log);
+ if (!oe)
+ oe = oppSeg->debugAddT(oppTe, log);
+ if (ce && oe) ce->span()->debugAddOpp(log, oe->span());
+// ce = ceWritable;
+// oe = oeWritable;
+ }
+ this->debugValidate();
+ RETURN_FALSE_IF(csDeleted, coinSeg);
+ RETURN_FALSE_IF(osDeleted, oppSeg);
+ RETURN_FALSE_IF(ceDeleted, coinSeg);
+ RETURN_FALSE_IF(oeDeleted, oppSeg);
+ RETURN_FALSE_IF(!cs || !ce || cs == ce || cs->contains(ce) || !os || !oe || os == oe || os->contains(oe), coinSeg);
+ bool result = true;
+ if (overlap) {
+ if (overlap->coinPtTStart()->segment() == coinSeg) {
+ log->record(SkPathOpsDebug::kAddMissingExtend_Glitch, coinSeg, coinTs, coinTe, oppSeg, oppTs, oppTe);
+ } else {
+ if (oppTs > oppTe) {
+ using std::swap;
+ swap(coinTs, coinTe);
+ swap(oppTs, oppTe);
+ }
+ log->record(SkPathOpsDebug::kAddMissingExtend_Glitch, oppSeg, oppTs, oppTe, coinSeg, coinTs, coinTe);
+ }
+#if 0 && DEBUG_COINCIDENCE_VERBOSE
+ if (result) {
+ overlap->debugShow();
+ }
+#endif
+ } else {
+ log->record(SkPathOpsDebug::kAddMissingCoin_Glitch, coinSeg, coinTs, coinTe, oppSeg, oppTs, oppTe);
+#if 0 && DEBUG_COINCIDENCE_VERBOSE
+ fHead->debugShow();
+#endif
+ }
+ this->debugValidate();
+ return (void) result;
+}
+
+// Extra commented-out lines keep this in sync with addMissing()
+/* detects overlaps of different coincident runs on same segment */
+/* does not detect overlaps for pairs without any segments in common */
+// returns true if caller should loop again
+void SkOpCoincidence::debugAddMissing(SkPathOpsDebug::GlitchLog* log, bool* added) const {
+ const SkCoincidentSpans* outer = fHead;
+ *added = false;
+ if (!outer) {
+ return;
+ }
+ // fTop = outer;
+ // fHead = nullptr;
+ do {
+ // addifmissing can modify the list that this is walking
+ // save head so that walker can iterate over old data unperturbed
+ // addifmissing adds to head freely then add saved head in the end
+ const SkOpPtT* ocs = outer->coinPtTStart();
+ SkASSERT(!ocs->deleted());
+ const SkOpSegment* outerCoin = ocs->segment();
+ SkASSERT(!outerCoin->done()); // if it's done, should have already been removed from list
+ const SkOpPtT* oos = outer->oppPtTStart();
+ if (oos->deleted()) {
+ return;
+ }
+ const SkOpSegment* outerOpp = oos->segment();
+ SkASSERT(!outerOpp->done());
+// SkOpSegment* outerCoinWritable = const_cast<SkOpSegment*>(outerCoin);
+// SkOpSegment* outerOppWritable = const_cast<SkOpSegment*>(outerOpp);
+ const SkCoincidentSpans* inner = outer;
+ while ((inner = inner->next())) {
+ this->debugValidate();
+ double overS, overE;
+ const SkOpPtT* ics = inner->coinPtTStart();
+ SkASSERT(!ics->deleted());
+ const SkOpSegment* innerCoin = ics->segment();
+ SkASSERT(!innerCoin->done());
+ const SkOpPtT* ios = inner->oppPtTStart();
+ SkASSERT(!ios->deleted());
+ const SkOpSegment* innerOpp = ios->segment();
+ SkASSERT(!innerOpp->done());
+// SkOpSegment* innerCoinWritable = const_cast<SkOpSegment*>(innerCoin);
+// SkOpSegment* innerOppWritable = const_cast<SkOpSegment*>(innerOpp);
+ if (outerCoin == innerCoin) {
+ const SkOpPtT* oce = outer->coinPtTEnd();
+ if (oce->deleted()) {
+ return;
+ }
+ const SkOpPtT* ice = inner->coinPtTEnd();
+ SkASSERT(!ice->deleted());
+ if (outerOpp != innerOpp && this->overlap(ocs, oce, ics, ice, &overS, &overE)) {
+ this->debugAddIfMissing(log, ocs->starter(oce), ics->starter(ice),
+ overS, overE, outerOpp, innerOpp, added,
+ ocs->debugEnder(oce),
+ ics->debugEnder(ice));
+ }
+ } else if (outerCoin == innerOpp) {
+ const SkOpPtT* oce = outer->coinPtTEnd();
+ SkASSERT(!oce->deleted());
+ const SkOpPtT* ioe = inner->oppPtTEnd();
+ SkASSERT(!ioe->deleted());
+ if (outerOpp != innerCoin && this->overlap(ocs, oce, ios, ioe, &overS, &overE)) {
+ this->debugAddIfMissing(log, ocs->starter(oce), ios->starter(ioe),
+ overS, overE, outerOpp, innerCoin, added,
+ ocs->debugEnder(oce),
+ ios->debugEnder(ioe));
+ }
+ } else if (outerOpp == innerCoin) {
+ const SkOpPtT* ooe = outer->oppPtTEnd();
+ SkASSERT(!ooe->deleted());
+ const SkOpPtT* ice = inner->coinPtTEnd();
+ SkASSERT(!ice->deleted());
+ SkASSERT(outerCoin != innerOpp);
+ if (this->overlap(oos, ooe, ics, ice, &overS, &overE)) {
+ this->debugAddIfMissing(log, oos->starter(ooe), ics->starter(ice),
+ overS, overE, outerCoin, innerOpp, added,
+ oos->debugEnder(ooe),
+ ics->debugEnder(ice));
+ }
+ } else if (outerOpp == innerOpp) {
+ const SkOpPtT* ooe = outer->oppPtTEnd();
+ SkASSERT(!ooe->deleted());
+ const SkOpPtT* ioe = inner->oppPtTEnd();
+ if (ioe->deleted()) {
+ return;
+ }
+ SkASSERT(outerCoin != innerCoin);
+ if (this->overlap(oos, ooe, ios, ioe, &overS, &overE)) {
+ this->debugAddIfMissing(log, oos->starter(ooe), ios->starter(ioe),
+ overS, overE, outerCoin, innerCoin, added,
+ oos->debugEnder(ooe),
+ ios->debugEnder(ioe));
+ }
+ }
+ this->debugValidate();
+ }
+ } while ((outer = outer->next()));
+ // this->restoreHead();
+ return;
+}
+
+// Commented-out lines keep this in sync with release()
+void SkOpCoincidence::debugRelease(SkPathOpsDebug::GlitchLog* log, const SkCoincidentSpans* coin, const SkCoincidentSpans* remove) const {
+ const SkCoincidentSpans* head = coin;
+ const SkCoincidentSpans* prev = nullptr;
+ const SkCoincidentSpans* next;
+ do {
+ next = coin->next();
+ if (coin == remove) {
+ if (prev) {
+// prev->setNext(next);
+ } else if (head == fHead) {
+// fHead = next;
+ } else {
+// fTop = next;
+ }
+ log->record(SkPathOpsDebug::kReleasedSpan_Glitch, coin);
+ }
+ prev = coin;
+ } while ((coin = next));
+ return;
+}
+
+void SkOpCoincidence::debugRelease(SkPathOpsDebug::GlitchLog* log, const SkOpSegment* deleted) const {
+ const SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return;
+ }
+ do {
+ if (coin->coinPtTStart()->segment() == deleted
+ || coin->coinPtTEnd()->segment() == deleted
+ || coin->oppPtTStart()->segment() == deleted
+ || coin->oppPtTEnd()->segment() == deleted) {
+ log->record(SkPathOpsDebug::kReleasedSpan_Glitch, coin);
+ }
+ } while ((coin = coin->next()));
+}
+
+// Commented-out lines keep this in sync with expand()
+// expand the range by checking adjacent spans for coincidence
+bool SkOpCoincidence::debugExpand(SkPathOpsDebug::GlitchLog* log) const {
+ const SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return false;
+ }
+ bool expanded = false;
+ do {
+ if (coin->debugExpand(log)) {
+ // check to see if multiple spans expanded so they are now identical
+ const SkCoincidentSpans* test = fHead;
+ do {
+ if (coin == test) {
+ continue;
+ }
+ if (coin->coinPtTStart() == test->coinPtTStart()
+ && coin->oppPtTStart() == test->oppPtTStart()) {
+ if (log) log->record(SkPathOpsDebug::kExpandCoin_Glitch, fHead, test->coinPtTStart());
+ break;
+ }
+ } while ((test = test->next()));
+ expanded = true;
+ }
+ } while ((coin = coin->next()));
+ return expanded;
+}
+
+// Commented-out lines keep this in sync with mark()
+/* this sets up the coincidence links in the segments when the coincidence crosses multiple spans */
+void SkOpCoincidence::debugMark(SkPathOpsDebug::GlitchLog* log) const {
+ const SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return;
+ }
+ do {
+ FAIL_IF_COIN(!coin->coinPtTStartWritable()->span()->upCastable(), coin);
+ const SkOpSpan* start = coin->coinPtTStartWritable()->span()->upCast();
+// SkASSERT(start->deleted());
+ const SkOpSpanBase* end = coin->coinPtTEndWritable()->span();
+// SkASSERT(end->deleted());
+ const SkOpSpanBase* oStart = coin->oppPtTStartWritable()->span();
+// SkASSERT(oStart->deleted());
+ const SkOpSpanBase* oEnd = coin->oppPtTEndWritable()->span();
+// SkASSERT(oEnd->deleted());
+ bool flipped = coin->flipped();
+ if (flipped) {
+ using std::swap;
+ swap(oStart, oEnd);
+ }
+ /* coin and opp spans may not match up. Mark the ends, and then let the interior
+ get marked as many times as the spans allow */
+ start->debugInsertCoincidence(log, oStart->upCast());
+ end->debugInsertCoinEnd(log, oEnd);
+ const SkOpSegment* segment = start->segment();
+ const SkOpSegment* oSegment = oStart->segment();
+ const SkOpSpanBase* next = start;
+ const SkOpSpanBase* oNext = oStart;
+ bool ordered;
+ FAIL_IF_COIN(!coin->ordered(&ordered), coin);
+ while ((next = next->upCast()->next()) != end) {
+ FAIL_IF_COIN(!next->upCastable(), coin);
+ next->upCast()->debugInsertCoincidence(log, oSegment, flipped, ordered);
+ }
+ while ((oNext = oNext->upCast()->next()) != oEnd) {
+ FAIL_IF_COIN(!oNext->upCastable(), coin);
+ oNext->upCast()->debugInsertCoincidence(log, segment, flipped, ordered);
+ }
+ } while ((coin = coin->next()));
+ return;
+}
+#endif // DEBUG_COIN
+
+#if DEBUG_COIN
+// Commented-out lines keep this in sync with markCollapsed()
+void SkOpCoincidence::debugMarkCollapsed(SkPathOpsDebug::GlitchLog* log, const SkCoincidentSpans* coin, const SkOpPtT* test) const {
+ const SkCoincidentSpans* head = coin;
+ while (coin) {
+ if (coin->collapsed(test)) {
+ if (zero_or_one(coin->coinPtTStart()->fT) && zero_or_one(coin->coinPtTEnd()->fT)) {
+ log->record(SkPathOpsDebug::kCollapsedCoin_Glitch, coin);
+ }
+ if (zero_or_one(coin->oppPtTStart()->fT) && zero_or_one(coin->oppPtTEnd()->fT)) {
+ log->record(SkPathOpsDebug::kCollapsedCoin_Glitch, coin);
+ }
+ this->debugRelease(log, head, coin);
+ }
+ coin = coin->next();
+ }
+}
+
+// Commented-out lines keep this in sync with markCollapsed()
+void SkOpCoincidence::debugMarkCollapsed(SkPathOpsDebug::GlitchLog* log, const SkOpPtT* test) const {
+ this->debugMarkCollapsed(log, fHead, test);
+ this->debugMarkCollapsed(log, fTop, test);
+}
+#endif // DEBUG_COIN
+
+void SkCoincidentSpans::debugShow() const {
+ SkDebugf("coinSpan - id=%d t=%1.9g tEnd=%1.9g\n", coinPtTStart()->segment()->debugID(),
+ coinPtTStart()->fT, coinPtTEnd()->fT);
+ SkDebugf("coinSpan + id=%d t=%1.9g tEnd=%1.9g\n", oppPtTStart()->segment()->debugID(),
+ oppPtTStart()->fT, oppPtTEnd()->fT);
+}
+
+void SkOpCoincidence::debugShowCoincidence() const {
+#if DEBUG_COINCIDENCE
+ const SkCoincidentSpans* span = fHead;
+ while (span) {
+ span->debugShow();
+ span = span->next();
+ }
+#endif // DEBUG_COINCIDENCE
+}
+
+#if DEBUG_COIN
+static void DebugCheckBetween(const SkOpSpanBase* next, const SkOpSpanBase* end,
+ double oStart, double oEnd, const SkOpSegment* oSegment,
+ SkPathOpsDebug::GlitchLog* log) {
+ SkASSERT(next != end);
+ SkASSERT(!next->contains(end) || log);
+ if (next->t() > end->t()) {
+ using std::swap;
+ swap(next, end);
+ }
+ do {
+ const SkOpPtT* ptT = next->ptT();
+ int index = 0;
+ bool somethingBetween = false;
+ do {
+ ++index;
+ ptT = ptT->next();
+ const SkOpPtT* checkPtT = next->ptT();
+ if (ptT == checkPtT) {
+ break;
+ }
+ bool looped = false;
+ for (int check = 0; check < index; ++check) {
+ if ((looped = checkPtT == ptT)) {
+ break;
+ }
+ checkPtT = checkPtT->next();
+ }
+ if (looped) {
+ SkASSERT(0);
+ break;
+ }
+ if (ptT->deleted()) {
+ continue;
+ }
+ if (ptT->segment() != oSegment) {
+ continue;
+ }
+ somethingBetween |= between(oStart, ptT->fT, oEnd);
+ } while (true);
+ SkASSERT(somethingBetween);
+ } while (next != end && (next = next->upCast()->next()));
+}
+
+static void DebugCheckOverlap(const SkCoincidentSpans* test, const SkCoincidentSpans* list,
+ SkPathOpsDebug::GlitchLog* log) {
+ if (!list) {
+ return;
+ }
+ const SkOpSegment* coinSeg = test->coinPtTStart()->segment();
+ SkASSERT(coinSeg == test->coinPtTEnd()->segment());
+ const SkOpSegment* oppSeg = test->oppPtTStart()->segment();
+ SkASSERT(oppSeg == test->oppPtTEnd()->segment());
+ SkASSERT(coinSeg != test->oppPtTStart()->segment());
+ SkDEBUGCODE(double tcs = test->coinPtTStart()->fT);
+ SkASSERT(between(0, tcs, 1));
+ SkDEBUGCODE(double tce = test->coinPtTEnd()->fT);
+ SkASSERT(between(0, tce, 1));
+ SkASSERT(tcs < tce);
+ double tos = test->oppPtTStart()->fT;
+ SkASSERT(between(0, tos, 1));
+ double toe = test->oppPtTEnd()->fT;
+ SkASSERT(between(0, toe, 1));
+ SkASSERT(tos != toe);
+ if (tos > toe) {
+ using std::swap;
+ swap(tos, toe);
+ }
+ do {
+ double lcs, lce, los, loe;
+ if (coinSeg == list->coinPtTStart()->segment()) {
+ if (oppSeg != list->oppPtTStart()->segment()) {
+ continue;
+ }
+ lcs = list->coinPtTStart()->fT;
+ lce = list->coinPtTEnd()->fT;
+ los = list->oppPtTStart()->fT;
+ loe = list->oppPtTEnd()->fT;
+ if (los > loe) {
+ using std::swap;
+ swap(los, loe);
+ }
+ } else if (coinSeg == list->oppPtTStart()->segment()) {
+ if (oppSeg != list->coinPtTStart()->segment()) {
+ continue;
+ }
+ lcs = list->oppPtTStart()->fT;
+ lce = list->oppPtTEnd()->fT;
+ if (lcs > lce) {
+ using std::swap;
+ swap(lcs, lce);
+ }
+ los = list->coinPtTStart()->fT;
+ loe = list->coinPtTEnd()->fT;
+ } else {
+ continue;
+ }
+ SkASSERT(tce < lcs || lce < tcs);
+ SkASSERT(toe < los || loe < tos);
+ } while ((list = list->next()));
+}
+
+
+static void DebugCheckOverlapTop(const SkCoincidentSpans* head, const SkCoincidentSpans* opt,
+ SkPathOpsDebug::GlitchLog* log) {
+ // check for overlapping coincident spans
+ const SkCoincidentSpans* test = head;
+ while (test) {
+ const SkCoincidentSpans* next = test->next();
+ DebugCheckOverlap(test, next, log);
+ DebugCheckOverlap(test, opt, log);
+ test = next;
+ }
+}
+
+static void DebugValidate(const SkCoincidentSpans* head, const SkCoincidentSpans* opt,
+ SkPathOpsDebug::GlitchLog* log) {
+ // look for pts inside coincident spans that are not inside the opposite spans
+ const SkCoincidentSpans* coin = head;
+ while (coin) {
+ SkASSERT(SkOpCoincidence::Ordered(coin->coinPtTStart()->segment(),
+ coin->oppPtTStart()->segment()));
+ SkASSERT(coin->coinPtTStart()->span()->ptT() == coin->coinPtTStart());
+ SkASSERT(coin->coinPtTEnd()->span()->ptT() == coin->coinPtTEnd());
+ SkASSERT(coin->oppPtTStart()->span()->ptT() == coin->oppPtTStart());
+ SkASSERT(coin->oppPtTEnd()->span()->ptT() == coin->oppPtTEnd());
+ coin = coin->next();
+ }
+ DebugCheckOverlapTop(head, opt, log);
+}
+#endif // DEBUG_COIN
+
+void SkOpCoincidence::debugValidate() const {
+#if DEBUG_COINCIDENCE
+ DebugValidate(fHead, fTop, nullptr);
+ DebugValidate(fTop, nullptr, nullptr);
+#endif
+}
+
+#if DEBUG_COIN
+static void DebugCheckBetween(const SkCoincidentSpans* head, const SkCoincidentSpans* opt,
+ SkPathOpsDebug::GlitchLog* log) {
+ // look for pts inside coincident spans that are not inside the opposite spans
+ const SkCoincidentSpans* coin = head;
+ while (coin) {
+ DebugCheckBetween(coin->coinPtTStart()->span(), coin->coinPtTEnd()->span(),
+ coin->oppPtTStart()->fT, coin->oppPtTEnd()->fT, coin->oppPtTStart()->segment(),
+ log);
+ DebugCheckBetween(coin->oppPtTStart()->span(), coin->oppPtTEnd()->span(),
+ coin->coinPtTStart()->fT, coin->coinPtTEnd()->fT, coin->coinPtTStart()->segment(),
+ log);
+ coin = coin->next();
+ }
+ DebugCheckOverlapTop(head, opt, log);
+}
+#endif
+
+void SkOpCoincidence::debugCheckBetween() const {
+#if DEBUG_COINCIDENCE
+ if (fGlobalState->debugCheckHealth()) {
+ return;
+ }
+ DebugCheckBetween(fHead, fTop, nullptr);
+ DebugCheckBetween(fTop, nullptr, nullptr);
+#endif
+}
+
+#if DEBUG_COIN
+void SkOpContour::debugCheckHealth(SkPathOpsDebug::GlitchLog* log) const {
+ const SkOpSegment* segment = &fHead;
+ do {
+ segment->debugCheckHealth(log);
+ } while ((segment = segment->next()));
+}
+
+void SkOpCoincidence::debugCheckValid(SkPathOpsDebug::GlitchLog* log) const {
+#if DEBUG_VALIDATE
+ DebugValidate(fHead, fTop, log);
+ DebugValidate(fTop, nullptr, log);
+#endif
+}
+
+void SkOpCoincidence::debugCorrectEnds(SkPathOpsDebug::GlitchLog* log) const {
+ const SkCoincidentSpans* coin = fHead;
+ if (!coin) {
+ return;
+ }
+ do {
+ coin->debugCorrectEnds(log);
+ } while ((coin = coin->next()));
+}
+
+// commmented-out lines keep this aligned with missingCoincidence()
+void SkOpContour::debugMissingCoincidence(SkPathOpsDebug::GlitchLog* log) const {
+// SkASSERT(fCount > 0);
+ const SkOpSegment* segment = &fHead;
+// bool result = false;
+ do {
+ segment->debugMissingCoincidence(log);
+ segment = segment->next();
+ } while (segment);
+ return;
+}
+
+void SkOpContour::debugMoveMultiples(SkPathOpsDebug::GlitchLog* log) const {
+ SkASSERT(fCount > 0);
+ const SkOpSegment* segment = &fHead;
+ do {
+ segment->debugMoveMultiples(log);
+ } while ((segment = segment->next()));
+ return;
+}
+
+void SkOpContour::debugMoveNearby(SkPathOpsDebug::GlitchLog* log) const {
+ SkASSERT(fCount > 0);
+ const SkOpSegment* segment = &fHead;
+ do {
+ segment->debugMoveNearby(log);
+ } while ((segment = segment->next()));
+}
+#endif
+
+#if DEBUG_COINCIDENCE_ORDER
+void SkOpSegment::debugResetCoinT() const {
+ fDebugBaseIndex = -1;
+ fDebugBaseMin = 1;
+ fDebugBaseMax = -1;
+ fDebugLastIndex = -1;
+ fDebugLastMin = 1;
+ fDebugLastMax = -1;
+}
+#endif
+
+void SkOpSegment::debugValidate() const {
+#if DEBUG_COINCIDENCE_ORDER
+ {
+ const SkOpSpanBase* span = &fHead;
+ do {
+ span->debugResetCoinT();
+ } while (!span->final() && (span = span->upCast()->next()));
+ span = &fHead;
+ int index = 0;
+ do {
+ span->debugSetCoinT(index++);
+ } while (!span->final() && (span = span->upCast()->next()));
+ }
+#endif
+#if DEBUG_COINCIDENCE
+ if (this->globalState()->debugCheckHealth()) {
+ return;
+ }
+#endif
+#if DEBUG_VALIDATE
+ const SkOpSpanBase* span = &fHead;
+ double lastT = -1;
+ const SkOpSpanBase* prev = nullptr;
+ int count = 0;
+ int done = 0;
+ do {
+ if (!span->final()) {
+ ++count;
+ done += span->upCast()->done() ? 1 : 0;
+ }
+ SkASSERT(span->segment() == this);
+ SkASSERT(!prev || prev->upCast()->next() == span);
+ SkASSERT(!prev || prev == span->prev());
+ prev = span;
+ double t = span->ptT()->fT;
+ SkASSERT(lastT < t);
+ lastT = t;
+ span->debugValidate();
+ } while (!span->final() && (span = span->upCast()->next()));
+ SkASSERT(count == fCount);
+ SkASSERT(done == fDoneCount);
+ SkASSERT(count >= fDoneCount);
+ SkASSERT(span->final());
+ span->debugValidate();
+#endif
+}
+
+#if DEBUG_COIN
+
+// Commented-out lines keep this in sync with addOpp()
+void SkOpSpanBase::debugAddOpp(SkPathOpsDebug::GlitchLog* log, const SkOpSpanBase* opp) const {
+ const SkOpPtT* oppPrev = this->ptT()->oppPrev(opp->ptT());
+ if (!oppPrev) {
+ return;
+ }
+ this->debugMergeMatches(log, opp);
+ this->ptT()->debugAddOpp(opp->ptT(), oppPrev);
+ this->debugCheckForCollapsedCoincidence(log);
+}
+
+// Commented-out lines keep this in sync with checkForCollapsedCoincidence()
+void SkOpSpanBase::debugCheckForCollapsedCoincidence(SkPathOpsDebug::GlitchLog* log) const {
+ const SkOpCoincidence* coins = this->globalState()->coincidence();
+ if (coins->isEmpty()) {
+ return;
+ }
+// the insert above may have put both ends of a coincident run in the same span
+// for each coincident ptT in loop; see if its opposite in is also in the loop
+// this implementation is the motivation for marking that a ptT is referenced by a coincident span
+ const SkOpPtT* head = this->ptT();
+ const SkOpPtT* test = head;
+ do {
+ if (!test->coincident()) {
+ continue;
+ }
+ coins->debugMarkCollapsed(log, test);
+ } while ((test = test->next()) != head);
+}
+#endif
+
+bool SkOpSpanBase::debugCoinEndLoopCheck() const {
+ int loop = 0;
+ const SkOpSpanBase* next = this;
+ SkOpSpanBase* nextCoin;
+ do {
+ nextCoin = next->fCoinEnd;
+ SkASSERT(nextCoin == this || nextCoin->fCoinEnd != nextCoin);
+ for (int check = 1; check < loop - 1; ++check) {
+ const SkOpSpanBase* checkCoin = this->fCoinEnd;
+ const SkOpSpanBase* innerCoin = checkCoin;
+ for (int inner = check + 1; inner < loop; ++inner) {
+ innerCoin = innerCoin->fCoinEnd;
+ if (checkCoin == innerCoin) {
+ SkDebugf("*** bad coincident end loop ***\n");
+ return false;
+ }
+ }
+ }
+ ++loop;
+ } while ((next = nextCoin) && next != this);
+ return true;
+}
+
+#if DEBUG_COIN
+// Commented-out lines keep this in sync with insertCoinEnd()
+void SkOpSpanBase::debugInsertCoinEnd(SkPathOpsDebug::GlitchLog* log, const SkOpSpanBase* coin) const {
+ if (containsCoinEnd(coin)) {
+// SkASSERT(coin->containsCoinEnd(this));
+ return;
+ }
+ debugValidate();
+// SkASSERT(this != coin);
+ log->record(SkPathOpsDebug::kMarkCoinEnd_Glitch, this, coin);
+// coin->fCoinEnd = this->fCoinEnd;
+// this->fCoinEnd = coinNext;
+ debugValidate();
+}
+
+// Commented-out lines keep this in sync with mergeMatches()
+// Look to see if pt-t linked list contains same segment more than once
+// if so, and if each pt-t is directly pointed to by spans in that segment,
+// merge them
+// keep the points, but remove spans so that the segment doesn't have 2 or more
+// spans pointing to the same pt-t loop at different loop elements
+void SkOpSpanBase::debugMergeMatches(SkPathOpsDebug::GlitchLog* log, const SkOpSpanBase* opp) const {
+ const SkOpPtT* test = &fPtT;
+ const SkOpPtT* testNext;
+ const SkOpPtT* stop = test;
+ do {
+ testNext = test->next();
+ if (test->deleted()) {
+ continue;
+ }
+ const SkOpSpanBase* testBase = test->span();
+ SkASSERT(testBase->ptT() == test);
+ const SkOpSegment* segment = test->segment();
+ if (segment->done()) {
+ continue;
+ }
+ const SkOpPtT* inner = opp->ptT();
+ const SkOpPtT* innerStop = inner;
+ do {
+ if (inner->segment() != segment) {
+ continue;
+ }
+ if (inner->deleted()) {
+ continue;
+ }
+ const SkOpSpanBase* innerBase = inner->span();
+ SkASSERT(innerBase->ptT() == inner);
+ // when the intersection is first detected, the span base is marked if there are
+ // more than one point in the intersection.
+// if (!innerBase->hasMultipleHint() && !testBase->hasMultipleHint()) {
+ if (!zero_or_one(inner->fT)) {
+ log->record(SkPathOpsDebug::kMergeMatches_Glitch, innerBase, test);
+ } else {
+ SkASSERT(inner->fT != test->fT);
+ if (!zero_or_one(test->fT)) {
+ log->record(SkPathOpsDebug::kMergeMatches_Glitch, testBase, inner);
+ } else {
+ log->record(SkPathOpsDebug::kMergeMatches_Glitch, segment);
+// SkDEBUGCODE(testBase->debugSetDeleted());
+// test->setDeleted();
+// SkDEBUGCODE(innerBase->debugSetDeleted());
+// inner->setDeleted();
+ }
+ }
+#ifdef SK_DEBUG // assert if another undeleted entry points to segment
+ const SkOpPtT* debugInner = inner;
+ while ((debugInner = debugInner->next()) != innerStop) {
+ if (debugInner->segment() != segment) {
+ continue;
+ }
+ if (debugInner->deleted()) {
+ continue;
+ }
+ SkOPASSERT(0);
+ }
+#endif
+ break;
+// }
+ break;
+ } while ((inner = inner->next()) != innerStop);
+ } while ((test = testNext) != stop);
+ this->debugCheckForCollapsedCoincidence(log);
+}
+
+#endif
+
+void SkOpSpanBase::debugResetCoinT() const {
+#if DEBUG_COINCIDENCE_ORDER
+ const SkOpPtT* ptT = &fPtT;
+ do {
+ ptT->debugResetCoinT();
+ ptT = ptT->next();
+ } while (ptT != &fPtT);
+#endif
+}
+
+void SkOpSpanBase::debugSetCoinT(int index) const {
+#if DEBUG_COINCIDENCE_ORDER
+ const SkOpPtT* ptT = &fPtT;
+ do {
+ if (!ptT->deleted()) {
+ ptT->debugSetCoinT(index);
+ }
+ ptT = ptT->next();
+ } while (ptT != &fPtT);
+#endif
+}
+
+const SkOpSpan* SkOpSpanBase::debugStarter(SkOpSpanBase const** endPtr) const {
+ const SkOpSpanBase* end = *endPtr;
+ SkASSERT(this->segment() == end->segment());
+ const SkOpSpanBase* result;
+ if (t() < end->t()) {
+ result = this;
+ } else {
+ result = end;
+ *endPtr = this;
+ }
+ return result->upCast();
+}
+
+void SkOpSpanBase::debugValidate() const {
+#if DEBUG_COINCIDENCE
+ if (this->globalState()->debugCheckHealth()) {
+ return;
+ }
+#endif
+#if DEBUG_VALIDATE
+ const SkOpPtT* ptT = &fPtT;
+ SkASSERT(ptT->span() == this);
+ do {
+// SkASSERT(SkDPoint::RoughlyEqual(fPtT.fPt, ptT->fPt));
+ ptT->debugValidate();
+ ptT = ptT->next();
+ } while (ptT != &fPtT);
+ SkASSERT(this->debugCoinEndLoopCheck());
+ if (!this->final()) {
+ SkASSERT(this->upCast()->debugCoinLoopCheck());
+ }
+ if (fFromAngle) {
+ fFromAngle->debugValidate();
+ }
+ if (!this->final() && this->upCast()->toAngle()) {
+ this->upCast()->toAngle()->debugValidate();
+ }
+#endif
+}
+
+bool SkOpSpan::debugCoinLoopCheck() const {
+ int loop = 0;
+ const SkOpSpan* next = this;
+ SkOpSpan* nextCoin;
+ do {
+ nextCoin = next->fCoincident;
+ SkASSERT(nextCoin == this || nextCoin->fCoincident != nextCoin);
+ for (int check = 1; check < loop - 1; ++check) {
+ const SkOpSpan* checkCoin = this->fCoincident;
+ const SkOpSpan* innerCoin = checkCoin;
+ for (int inner = check + 1; inner < loop; ++inner) {
+ innerCoin = innerCoin->fCoincident;
+ if (checkCoin == innerCoin) {
+ SkDebugf("*** bad coincident loop ***\n");
+ return false;
+ }
+ }
+ }
+ ++loop;
+ } while ((next = nextCoin) && next != this);
+ return true;
+}
+
+#if DEBUG_COIN
+// Commented-out lines keep this in sync with insertCoincidence() in header
+void SkOpSpan::debugInsertCoincidence(SkPathOpsDebug::GlitchLog* log, const SkOpSpan* coin) const {
+ if (containsCoincidence(coin)) {
+// SkASSERT(coin->containsCoincidence(this));
+ return;
+ }
+ debugValidate();
+// SkASSERT(this != coin);
+ log->record(SkPathOpsDebug::kMarkCoinStart_Glitch, this, coin);
+// coin->fCoincident = this->fCoincident;
+// this->fCoincident = coinNext;
+ debugValidate();
+}
+
+// Commented-out lines keep this in sync with insertCoincidence()
+void SkOpSpan::debugInsertCoincidence(SkPathOpsDebug::GlitchLog* log, const SkOpSegment* segment, bool flipped, bool ordered) const {
+ if (this->containsCoincidence(segment)) {
+ return;
+ }
+ const SkOpPtT* next = &fPtT;
+ while ((next = next->next()) != &fPtT) {
+ if (next->segment() == segment) {
+ const SkOpSpan* span;
+ const SkOpSpanBase* base = next->span();
+ if (!ordered) {
+ const SkOpSpanBase* spanEnd = fNext->contains(segment)->span();
+ const SkOpPtT* start = base->ptT()->starter(spanEnd->ptT());
+ FAIL_IF_COIN(!start->span()->upCastable(), this);
+ span = const_cast<SkOpSpan*>(start->span()->upCast());
+ }
+ else if (flipped) {
+ span = base->prev();
+ FAIL_IF_COIN(!span, this);
+ }
+ else {
+ FAIL_IF_COIN(!base->upCastable(), this);
+ span = base->upCast();
+ }
+ log->record(SkPathOpsDebug::kMarkCoinInsert_Glitch, span);
+ return;
+ }
+ }
+ log->record(SkPathOpsDebug::kMarkCoinMissing_Glitch, segment, this);
+ return;
+}
+#endif // DEBUG_COIN
+
+// called only by test code
+int SkIntersections::debugCoincidentUsed() const {
+ if (!fIsCoincident[0]) {
+ SkASSERT(!fIsCoincident[1]);
+ return 0;
+ }
+ int count = 0;
+ SkDEBUGCODE(int count2 = 0;)
+ for (int index = 0; index < fUsed; ++index) {
+ if (fIsCoincident[0] & (1 << index)) {
+ ++count;
+ }
+#ifdef SK_DEBUG
+ if (fIsCoincident[1] & (1 << index)) {
+ ++count2;
+ }
+#endif
+ }
+ SkASSERT(count == count2);
+ return count;
+}
+
+// Commented-out lines keep this in sync with addOpp()
+void SkOpPtT::debugAddOpp(const SkOpPtT* opp, const SkOpPtT* oppPrev) const {
+ SkDEBUGCODE(const SkOpPtT* oldNext = this->fNext);
+ SkASSERT(this != opp);
+// this->fNext = opp;
+ SkASSERT(oppPrev != oldNext);
+// oppPrev->fNext = oldNext;
+}
+
+bool SkOpPtT::debugContains(const SkOpPtT* check) const {
+ SkASSERT(this != check);
+ const SkOpPtT* ptT = this;
+ int links = 0;
+ do {
+ ptT = ptT->next();
+ if (ptT == check) {
+ return true;
+ }
+ ++links;
+ const SkOpPtT* test = this;
+ for (int index = 0; index < links; ++index) {
+ if (ptT == test) {
+ return false;
+ }
+ test = test->next();
+ }
+ } while (true);
+}
+
+const SkOpPtT* SkOpPtT::debugContains(const SkOpSegment* check) const {
+ SkASSERT(this->segment() != check);
+ const SkOpPtT* ptT = this;
+ int links = 0;
+ do {
+ ptT = ptT->next();
+ if (ptT->segment() == check) {
+ return ptT;
+ }
+ ++links;
+ const SkOpPtT* test = this;
+ for (int index = 0; index < links; ++index) {
+ if (ptT == test) {
+ return nullptr;
+ }
+ test = test->next();
+ }
+ } while (true);
+}
+
+const SkOpPtT* SkOpPtT::debugEnder(const SkOpPtT* end) const {
+ return fT < end->fT ? end : this;
+}
+
+int SkOpPtT::debugLoopLimit(bool report) const {
+ int loop = 0;
+ const SkOpPtT* next = this;
+ do {
+ for (int check = 1; check < loop - 1; ++check) {
+ const SkOpPtT* checkPtT = this->fNext;
+ const SkOpPtT* innerPtT = checkPtT;
+ for (int inner = check + 1; inner < loop; ++inner) {
+ innerPtT = innerPtT->fNext;
+ if (checkPtT == innerPtT) {
+ if (report) {
+ SkDebugf("*** bad ptT loop ***\n");
+ }
+ return loop;
+ }
+ }
+ }
+ // there's nothing wrong with extremely large loop counts -- but this may appear to hang
+ // by taking a very long time to figure out that no loop entry is a duplicate
+ // -- and it's likely that a large loop count is indicative of a bug somewhere
+ if (++loop > 1000) {
+ SkDebugf("*** loop count exceeds 1000 ***\n");
+ return 1000;
+ }
+ } while ((next = next->fNext) && next != this);
+ return 0;
+}
+
+const SkOpPtT* SkOpPtT::debugOppPrev(const SkOpPtT* opp) const {
+ return this->oppPrev(const_cast<SkOpPtT*>(opp));
+}
+
+void SkOpPtT::debugResetCoinT() const {
+#if DEBUG_COINCIDENCE_ORDER
+ this->segment()->debugResetCoinT();
+#endif
+}
+
+void SkOpPtT::debugSetCoinT(int index) const {
+#if DEBUG_COINCIDENCE_ORDER
+ this->segment()->debugSetCoinT(index, fT);
+#endif
+}
+
+void SkOpPtT::debugValidate() const {
+#if DEBUG_COINCIDENCE
+ if (this->globalState()->debugCheckHealth()) {
+ return;
+ }
+#endif
+#if DEBUG_VALIDATE
+ SkOpPhase phase = contour()->globalState()->phase();
+ if (phase == SkOpPhase::kIntersecting || phase == SkOpPhase::kFixWinding) {
+ return;
+ }
+ SkASSERT(fNext);
+ SkASSERT(fNext != this);
+ SkASSERT(fNext->fNext);
+ SkASSERT(debugLoopLimit(false) == 0);
+#endif
+}
+
+static void output_scalar(SkScalar num) {
+ if (num == (int) num) {
+ SkDebugf("%d", (int) num);
+ } else {
+ SkString str;
+ str.printf("%1.9g", num);
+ int width = (int) str.size();
+ const char* cStr = str.c_str();
+ while (cStr[width - 1] == '0') {
+ --width;
+ }
+ str.resize(width);
+ SkDebugf("%sf", str.c_str());
+ }
+}
+
+static void output_points(const SkPoint* pts, int count) {
+ for (int index = 0; index < count; ++index) {
+ output_scalar(pts[index].fX);
+ SkDebugf(", ");
+ output_scalar(pts[index].fY);
+ if (index + 1 < count) {
+ SkDebugf(", ");
+ }
+ }
+}
+
+static void showPathContours(const SkPath& path, const char* pathName) {
+ for (auto [verb, pts, w] : SkPathPriv::Iterate(path)) {
+ switch (verb) {
+ case SkPathVerb::kMove:
+ SkDebugf(" %s.moveTo(", pathName);
+ output_points(&pts[0], 1);
+ SkDebugf(");\n");
+ continue;
+ case SkPathVerb::kLine:
+ SkDebugf(" %s.lineTo(", pathName);
+ output_points(&pts[1], 1);
+ SkDebugf(");\n");
+ break;
+ case SkPathVerb::kQuad:
+ SkDebugf(" %s.quadTo(", pathName);
+ output_points(&pts[1], 2);
+ SkDebugf(");\n");
+ break;
+ case SkPathVerb::kConic:
+ SkDebugf(" %s.conicTo(", pathName);
+ output_points(&pts[1], 2);
+ SkDebugf(", %1.9gf);\n", *w);
+ break;
+ case SkPathVerb::kCubic:
+ SkDebugf(" %s.cubicTo(", pathName);
+ output_points(&pts[1], 3);
+ SkDebugf(");\n");
+ break;
+ case SkPathVerb::kClose:
+ SkDebugf(" %s.close();\n", pathName);
+ break;
+ default:
+ SkDEBUGFAIL("bad verb");
+ return;
+ }
+ }
+}
+
+static const char* gFillTypeStr[] = {
+ "kWinding",
+ "kEvenOdd",
+ "kInverseWinding",
+ "kInverseEvenOdd"
+};
+
+void SkPathOpsDebug::ShowOnePath(const SkPath& path, const char* name, bool includeDeclaration) {
+#define SUPPORT_RECT_CONTOUR_DETECTION 0
+#if SUPPORT_RECT_CONTOUR_DETECTION
+ int rectCount = path.isRectContours() ? path.rectContours(nullptr, nullptr) : 0;
+ if (rectCount > 0) {
+ SkTDArray<SkRect> rects;
+ SkTDArray<SkPathDirection> directions;
+ rects.setCount(rectCount);
+ directions.setCount(rectCount);
+ path.rectContours(rects.begin(), directions.begin());
+ for (int contour = 0; contour < rectCount; ++contour) {
+ const SkRect& rect = rects[contour];
+ SkDebugf("path.addRect(%1.9g, %1.9g, %1.9g, %1.9g, %s);\n", rect.fLeft, rect.fTop,
+ rect.fRight, rect.fBottom, directions[contour] == SkPathDirection::kCCW
+ ? "SkPathDirection::kCCW" : "SkPathDirection::kCW");
+ }
+ return;
+ }
+#endif
+ SkPathFillType fillType = path.getFillType();
+ SkASSERT(fillType >= SkPathFillType::kWinding && fillType <= SkPathFillType::kInverseEvenOdd);
+ if (includeDeclaration) {
+ SkDebugf(" SkPath %s;\n", name);
+ }
+ SkDebugf(" %s.setFillType(SkPath::%s);\n", name, gFillTypeStr[(int)fillType]);
+ showPathContours(path, name);
+}
+
+#if DEBUG_DUMP_VERIFY
+#include "include/core/SkData.h"
+#include "include/core/SkStream.h"
+
+static void dump_path(FILE* file, const SkPath& path, bool dumpAsHex) {
+ SkDynamicMemoryWStream wStream;
+ path.dump(&wStream, dumpAsHex);
+ sk_sp<SkData> data(wStream.detachAsData());
+ fprintf(file, "%.*s\n", (int) data->size(), (char*) data->data());
+}
+
+static int dumpID = 0;
+
+void DumpOp(const SkPath& one, const SkPath& two, SkPathOp op,
+ const char* testName) {
+ FILE* file = sk_fopen("op_dump.txt", kWrite_SkFILE_Flag);
+ DumpOp(file, one, two, op, testName);
+}
+
+void DumpOp(FILE* file, const SkPath& one, const SkPath& two, SkPathOp op,
+ const char* testName) {
+ const char* name = testName ? testName : "op";
+ fprintf(file,
+ "\nstatic void %s_%d(skiatest::Reporter* reporter, const char* filename) {\n",
+ name, ++dumpID);
+ fprintf(file, " SkPath path;\n");
+ fprintf(file, " path.setFillType((SkPath::FillType) %d);\n", one.getFillType());
+ dump_path(file, one, true);
+ fprintf(file, " SkPath path1(path);\n");
+ fprintf(file, " path.reset();\n");
+ fprintf(file, " path.setFillType((SkPath::FillType) %d);\n", two.getFillType());
+ dump_path(file, two, true);
+ fprintf(file, " SkPath path2(path);\n");
+ fprintf(file, " testPathOp(reporter, path1, path2, (SkPathOp) %d, filename);\n", op);
+ fprintf(file, "}\n\n");
+ fclose(file);
+}
+
+void DumpSimplify(const SkPath& path, const char* testName) {
+ FILE* file = sk_fopen("simplify_dump.txt", kWrite_SkFILE_Flag);
+ DumpSimplify(file, path, testName);
+}
+
+void DumpSimplify(FILE* file, const SkPath& path, const char* testName) {
+ const char* name = testName ? testName : "simplify";
+ fprintf(file,
+ "\nstatic void %s_%d(skiatest::Reporter* reporter, const char* filename) {\n",
+ name, ++dumpID);
+ fprintf(file, " SkPath path;\n");
+ fprintf(file, " path.setFillType((SkPath::FillType) %d);\n", path.getFillType());
+ dump_path(file, path, true);
+ fprintf(file, " testSimplify(reporter, path, filename);\n");
+ fprintf(file, "}\n\n");
+ fclose(file);
+}
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkRegion.h"
+
+const int bitWidth = 64;
+const int bitHeight = 64;
+
+static void debug_scale_matrix(const SkPath& one, const SkPath* two, SkMatrix& scale) {
+ SkRect larger = one.getBounds();
+ if (two) {
+ larger.join(two->getBounds());
+ }
+ SkScalar largerWidth = larger.width();
+ if (largerWidth < 4) {
+ largerWidth = 4;
+ }
+ SkScalar largerHeight = larger.height();
+ if (largerHeight < 4) {
+ largerHeight = 4;
+ }
+ SkScalar hScale = (bitWidth - 2) / largerWidth;
+ SkScalar vScale = (bitHeight - 2) / largerHeight;
+ scale.reset();
+ scale.preScale(hScale, vScale);
+ larger.fLeft *= hScale;
+ larger.fRight *= hScale;
+ larger.fTop *= vScale;
+ larger.fBottom *= vScale;
+ SkScalar dx = -16000 > larger.fLeft ? -16000 - larger.fLeft
+ : 16000 < larger.fRight ? 16000 - larger.fRight : 0;
+ SkScalar dy = -16000 > larger.fTop ? -16000 - larger.fTop
+ : 16000 < larger.fBottom ? 16000 - larger.fBottom : 0;
+ scale.preTranslate(dx, dy);
+}
+
+static int debug_paths_draw_the_same(const SkPath& one, const SkPath& two, SkBitmap& bits) {
+ if (bits.width() == 0) {
+ bits.allocN32Pixels(bitWidth * 2, bitHeight);
+ }
+ SkCanvas canvas(bits);
+ canvas.drawColor(SK_ColorWHITE);
+ SkPaint paint;
+ canvas.save();
+ const SkRect& bounds1 = one.getBounds();
+ canvas.translate(-bounds1.fLeft + 1, -bounds1.fTop + 1);
+ canvas.drawPath(one, paint);
+ canvas.restore();
+ canvas.save();
+ canvas.translate(-bounds1.fLeft + 1 + bitWidth, -bounds1.fTop + 1);
+ canvas.drawPath(two, paint);
+ canvas.restore();
+ int errors = 0;
+ for (int y = 0; y < bitHeight - 1; ++y) {
+ uint32_t* addr1 = bits.getAddr32(0, y);
+ uint32_t* addr2 = bits.getAddr32(0, y + 1);
+ uint32_t* addr3 = bits.getAddr32(bitWidth, y);
+ uint32_t* addr4 = bits.getAddr32(bitWidth, y + 1);
+ for (int x = 0; x < bitWidth - 1; ++x) {
+ // count 2x2 blocks
+ bool err = addr1[x] != addr3[x];
+ if (err) {
+ errors += addr1[x + 1] != addr3[x + 1]
+ && addr2[x] != addr4[x] && addr2[x + 1] != addr4[x + 1];
+ }
+ }
+ }
+ return errors;
+}
+
+void ReportOpFail(const SkPath& one, const SkPath& two, SkPathOp op) {
+ SkDebugf("// Op did not expect failure\n");
+ DumpOp(stderr, one, two, op, "opTest");
+ fflush(stderr);
+}
+
+void VerifyOp(const SkPath& one, const SkPath& two, SkPathOp op,
+ const SkPath& result) {
+ SkPath pathOut, scaledPathOut;
+ SkRegion rgnA, rgnB, openClip, rgnOut;
+ openClip.setRect({-16000, -16000, 16000, 16000});
+ rgnA.setPath(one, openClip);
+ rgnB.setPath(two, openClip);
+ rgnOut.op(rgnA, rgnB, (SkRegion::Op) op);
+ rgnOut.getBoundaryPath(&pathOut);
+ SkMatrix scale;
+ debug_scale_matrix(one, &two, scale);
+ SkRegion scaledRgnA, scaledRgnB, scaledRgnOut;
+ SkPath scaledA, scaledB;
+ scaledA.addPath(one, scale);
+ scaledA.setFillType(one.getFillType());
+ scaledB.addPath(two, scale);
+ scaledB.setFillType(two.getFillType());
+ scaledRgnA.setPath(scaledA, openClip);
+ scaledRgnB.setPath(scaledB, openClip);
+ scaledRgnOut.op(scaledRgnA, scaledRgnB, (SkRegion::Op) op);
+ scaledRgnOut.getBoundaryPath(&scaledPathOut);
+ SkBitmap bitmap;
+ SkPath scaledOut;
+ scaledOut.addPath(result, scale);
+ scaledOut.setFillType(result.getFillType());
+ int errors = debug_paths_draw_the_same(scaledPathOut, scaledOut, bitmap);
+ const int MAX_ERRORS = 9;
+ if (errors > MAX_ERRORS) {
+ fprintf(stderr, "// Op did not expect errors=%d\n", errors);
+ DumpOp(stderr, one, two, op, "opTest");
+ fflush(stderr);
+ }
+}
+
+void ReportSimplifyFail(const SkPath& path) {
+ SkDebugf("// Simplify did not expect failure\n");
+ DumpSimplify(stderr, path, "simplifyTest");
+ fflush(stderr);
+}
+
+void VerifySimplify(const SkPath& path, const SkPath& result) {
+ SkPath pathOut, scaledPathOut;
+ SkRegion rgnA, openClip, rgnOut;
+ openClip.setRect({-16000, -16000, 16000, 16000});
+ rgnA.setPath(path, openClip);
+ rgnOut.getBoundaryPath(&pathOut);
+ SkMatrix scale;
+ debug_scale_matrix(path, nullptr, scale);
+ SkRegion scaledRgnA;
+ SkPath scaledA;
+ scaledA.addPath(path, scale);
+ scaledA.setFillType(path.getFillType());
+ scaledRgnA.setPath(scaledA, openClip);
+ scaledRgnA.getBoundaryPath(&scaledPathOut);
+ SkBitmap bitmap;
+ SkPath scaledOut;
+ scaledOut.addPath(result, scale);
+ scaledOut.setFillType(result.getFillType());
+ int errors = debug_paths_draw_the_same(scaledPathOut, scaledOut, bitmap);
+ const int MAX_ERRORS = 9;
+ if (errors > MAX_ERRORS) {
+ fprintf(stderr, "// Simplify did not expect errors=%d\n", errors);
+ DumpSimplify(stderr, path, "simplifyTest");
+ fflush(stderr);
+ }
+}
+#endif // DEBUG_DUMP_VERIFY
+
+// global path dumps for msvs Visual Studio 17 to use from Immediate Window
+void Dump(const SkPath& path) {
+ path.dump();
+}
+
+void DumpHex(const SkPath& path) {
+ path.dumpHex();
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsDebug.h b/gfx/skia/skia/src/pathops/SkPathOpsDebug.h
new file mode 100644
index 0000000000..ef0a233d6c
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsDebug.h
@@ -0,0 +1,453 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsDebug_DEFINED
+#define SkPathOpsDebug_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/pathops/SkPathOps.h"
+#include "include/private/base/SkTDArray.h"
+
+#include <cstddef>
+
+class SkOpAngle;
+class SkOpCoincidence;
+class SkOpContour;
+class SkOpContourHead;
+class SkOpPtT;
+class SkOpSegment;
+class SkOpSpan;
+class SkOpSpanBase;
+class SkPath;
+struct SkDConic;
+struct SkDCubic;
+struct SkDLine;
+struct SkDPoint;
+struct SkDQuad;
+
+// define this when running fuzz
+// #define SK_BUILD_FOR_FUZZER
+
+#ifdef SK_RELEASE
+#define FORCE_RELEASE 1
+#else
+#define FORCE_RELEASE 1 // set force release to 1 for multiple thread -- no debugging
+#endif
+
+#define DEBUG_UNDER_DEVELOPMENT 0
+
+#define ONE_OFF_DEBUG 0
+#define ONE_OFF_DEBUG_MATHEMATICA 0
+
+#if defined(SK_BUILD_FOR_WIN) || defined(SK_BUILD_FOR_ANDROID)
+ #define SK_RAND(seed) rand()
+#else
+ #define SK_RAND(seed) rand_r(&seed)
+#endif
+
+#define WIND_AS_STRING(x) char x##Str[12]; \
+ if (!SkPathOpsDebug::ValidWind(x)) strcpy(x##Str, "?"); \
+ else snprintf(x##Str, sizeof(x##Str), "%d", x)
+
+#if FORCE_RELEASE
+
+#define DEBUG_ACTIVE_OP 0
+#define DEBUG_ACTIVE_SPANS 0
+#define DEBUG_ADD_INTERSECTING_TS 0
+#define DEBUG_ADD_T 0
+#define DEBUG_ALIGNMENT 0
+#define DEBUG_ANGLE 0
+#define DEBUG_ASSEMBLE 0
+#define DEBUG_COINCIDENCE 0
+#define DEBUG_COINCIDENCE_DUMP 0 // accumulate and dump which algorithms fired
+#define DEBUG_COINCIDENCE_ORDER 0 // for well behaved curves, check if pairs match up in t-order
+#define DEBUG_COINCIDENCE_VERBOSE 0 // usually whether the next function generates coincidence
+#define DEBUG_CUBIC_BINARY_SEARCH 0
+#define DEBUG_CUBIC_SPLIT 0
+#define DEBUG_DUMP_SEGMENTS 0
+#define DEBUG_DUMP_VERIFY 0
+#define DEBUG_FLOW 0
+#define DEBUG_LIMIT_WIND_SUM 0
+#define DEBUG_MARK_DONE 0
+#define DEBUG_PATH_CONSTRUCTION 0
+#define DEBUG_PERP 0
+#define DEBUG_SORT 0
+#define DEBUG_T_SECT 0
+#define DEBUG_T_SECT_DUMP 0
+#define DEBUG_T_SECT_LOOP_COUNT 0
+#define DEBUG_VALIDATE 0
+#define DEBUG_WINDING 0
+#define DEBUG_WINDING_AT_T 0
+
+#else
+
+#define DEBUG_ACTIVE_OP 1
+#define DEBUG_ACTIVE_SPANS 1
+#define DEBUG_ADD_INTERSECTING_TS 1
+#define DEBUG_ADD_T 1
+#define DEBUG_ALIGNMENT 0
+#define DEBUG_ANGLE 1
+#define DEBUG_ASSEMBLE 1
+#define DEBUG_COINCIDENCE 1
+#define DEBUG_COINCIDENCE_DUMP 1
+#define DEBUG_COINCIDENCE_ORDER 1 // tight arc quads may generate out-of-order coincidence spans
+#define DEBUG_COINCIDENCE_VERBOSE 1
+#define DEBUG_CUBIC_BINARY_SEARCH 0
+#define DEBUG_CUBIC_SPLIT 1
+#define DEBUG_DUMP_VERIFY 1
+#define DEBUG_DUMP_SEGMENTS 1
+#define DEBUG_FLOW 1
+#define DEBUG_LIMIT_WIND_SUM 15
+#define DEBUG_MARK_DONE 1
+#define DEBUG_PATH_CONSTRUCTION 1
+#define DEBUG_PERP 1
+#define DEBUG_SORT 1
+#define DEBUG_T_SECT 0 // enabling may trigger validate asserts even though op does not fail
+#define DEBUG_T_SECT_DUMP 0 // Use 1 normally. Use 2 to number segments, 3 for script output
+#define DEBUG_T_SECT_LOOP_COUNT 0
+#define DEBUG_VALIDATE 1
+#define DEBUG_WINDING 1
+#define DEBUG_WINDING_AT_T 1
+
+#endif
+
+#ifdef SK_RELEASE
+ #define SkDEBUGRELEASE(a, b) b
+ #define SkDEBUGPARAMS(...)
+#else
+ #define SkDEBUGRELEASE(a, b) a
+ #define SkDEBUGPARAMS(...) , __VA_ARGS__
+#endif
+
+#if DEBUG_VALIDATE == 0
+ #define PATH_OPS_DEBUG_VALIDATE_PARAMS(...)
+#else
+ #define PATH_OPS_DEBUG_VALIDATE_PARAMS(...) , __VA_ARGS__
+#endif
+
+#if DEBUG_T_SECT == 0
+ #define PATH_OPS_DEBUG_T_SECT_RELEASE(a, b) b
+ #define PATH_OPS_DEBUG_T_SECT_PARAMS(...)
+ #define PATH_OPS_DEBUG_T_SECT_CODE(...)
+#else
+ #define PATH_OPS_DEBUG_T_SECT_RELEASE(a, b) a
+ #define PATH_OPS_DEBUG_T_SECT_PARAMS(...) , __VA_ARGS__
+ #define PATH_OPS_DEBUG_T_SECT_CODE(...) __VA_ARGS__
+#endif
+
+#if DEBUG_T_SECT_DUMP > 1
+ extern int gDumpTSectNum;
+#endif
+
+#if DEBUG_COINCIDENCE || DEBUG_COINCIDENCE_DUMP
+ #define DEBUG_COIN 1
+#else
+ #define DEBUG_COIN 0
+#endif
+
+#if DEBUG_COIN
+enum class SkOpPhase : char;
+
+ #define DEBUG_COIN_DECLARE_ONLY_PARAMS() \
+ int lineNo, SkOpPhase phase, int iteration
+ #define DEBUG_COIN_DECLARE_PARAMS() \
+ , DEBUG_COIN_DECLARE_ONLY_PARAMS()
+ #define DEBUG_COIN_ONLY_PARAMS() \
+ __LINE__, SkOpPhase::kNoChange, 0
+ #define DEBUG_COIN_PARAMS() \
+ , DEBUG_COIN_ONLY_PARAMS()
+ #define DEBUG_ITER_ONLY_PARAMS(iteration) \
+ __LINE__, SkOpPhase::kNoChange, iteration
+ #define DEBUG_ITER_PARAMS(iteration) \
+ , DEBUG_ITER_ONLY_PARAMS(iteration)
+ #define DEBUG_PHASE_ONLY_PARAMS(phase) \
+ __LINE__, SkOpPhase::phase, 0
+ #define DEBUG_PHASE_PARAMS(phase) \
+ , DEBUG_PHASE_ONLY_PARAMS(phase)
+ #define DEBUG_SET_PHASE() \
+ this->globalState()->debugSetPhase(__func__, lineNo, phase, iteration)
+ #define DEBUG_STATIC_SET_PHASE(obj) \
+ obj->globalState()->debugSetPhase(__func__, lineNo, phase, iteration)
+#elif DEBUG_VALIDATE
+ #define DEBUG_COIN_DECLARE_ONLY_PARAMS() \
+ SkOpPhase phase
+ #define DEBUG_COIN_DECLARE_PARAMS() \
+ , DEBUG_COIN_DECLARE_ONLY_PARAMS()
+ #define DEBUG_COIN_ONLY_PARAMS() \
+ SkOpPhase::kNoChange
+ #define DEBUG_COIN_PARAMS() \
+ , DEBUG_COIN_ONLY_PARAMS()
+ #define DEBUG_ITER_ONLY_PARAMS(iteration) \
+ SkOpPhase::kNoChange
+ #define DEBUG_ITER_PARAMS(iteration) \
+ , DEBUG_ITER_ONLY_PARAMS(iteration)
+ #define DEBUG_PHASE_ONLY_PARAMS(phase) \
+ SkOpPhase::phase
+ #define DEBUG_PHASE_PARAMS(phase) \
+ , DEBUG_PHASE_ONLY_PARAMS(phase)
+ #define DEBUG_SET_PHASE() \
+ this->globalState()->debugSetPhase(phase)
+ #define DEBUG_STATIC_SET_PHASE(obj) \
+ obj->globalState()->debugSetPhase(phase)
+#else
+ #define DEBUG_COIN_DECLARE_ONLY_PARAMS()
+ #define DEBUG_COIN_DECLARE_PARAMS()
+ #define DEBUG_COIN_ONLY_PARAMS()
+ #define DEBUG_COIN_PARAMS()
+ #define DEBUG_ITER_ONLY_PARAMS(iteration)
+ #define DEBUG_ITER_PARAMS(iteration)
+ #define DEBUG_PHASE_ONLY_PARAMS(phase)
+ #define DEBUG_PHASE_PARAMS(phase)
+ #define DEBUG_SET_PHASE()
+ #define DEBUG_STATIC_SET_PHASE(obj)
+#endif
+
+#define CUBIC_DEBUG_STR "{{{%1.9g,%1.9g}, {%1.9g,%1.9g}, {%1.9g,%1.9g}, {%1.9g,%1.9g}}}"
+#define CONIC_DEBUG_STR "{{{{%1.9g,%1.9g}, {%1.9g,%1.9g}, {%1.9g,%1.9g}}}, %1.9g}"
+#define QUAD_DEBUG_STR "{{{%1.9g,%1.9g}, {%1.9g,%1.9g}, {%1.9g,%1.9g}}}"
+#define LINE_DEBUG_STR "{{{%1.9g,%1.9g}, {%1.9g,%1.9g}}}"
+#define PT_DEBUG_STR "{{%1.9g,%1.9g}}"
+
+#define T_DEBUG_STR(t, n) #t "[" #n "]=%1.9g"
+#define TX_DEBUG_STR(t) #t "[%d]=%1.9g"
+#define CUBIC_DEBUG_DATA(c) c[0].fX, c[0].fY, c[1].fX, c[1].fY, c[2].fX, c[2].fY, c[3].fX, c[3].fY
+#define CONIC_DEBUG_DATA(c, w) c[0].fX, c[0].fY, c[1].fX, c[1].fY, c[2].fX, c[2].fY, w
+#define QUAD_DEBUG_DATA(q) q[0].fX, q[0].fY, q[1].fX, q[1].fY, q[2].fX, q[2].fY
+#define LINE_DEBUG_DATA(l) l[0].fX, l[0].fY, l[1].fX, l[1].fY
+#define PT_DEBUG_DATA(i, n) i.pt(n).asSkPoint().fX, i.pt(n).asSkPoint().fY
+
+#ifndef DEBUG_TEST
+#define DEBUG_TEST 0
+#endif
+
+// Tests with extreme numbers may fail, but all other tests should never fail.
+#define FAIL_IF(cond) \
+ do { bool fail = (cond); SkOPASSERT(!fail); if (fail) return false; } while (false)
+
+#define FAIL_WITH_NULL_IF(cond) \
+ do { bool fail = (cond); SkOPASSERT(!fail); if (fail) return nullptr; } while (false)
+
+class SkPathOpsDebug {
+public:
+#if DEBUG_COIN
+ struct GlitchLog;
+
+ enum GlitchType {
+ kUninitialized_Glitch,
+ kAddCorruptCoin_Glitch,
+ kAddExpandedCoin_Glitch,
+ kAddExpandedFail_Glitch,
+ kAddIfCollapsed_Glitch,
+ kAddIfMissingCoin_Glitch,
+ kAddMissingCoin_Glitch,
+ kAddMissingExtend_Glitch,
+ kAddOrOverlap_Glitch,
+ kCollapsedCoin_Glitch,
+ kCollapsedDone_Glitch,
+ kCollapsedOppValue_Glitch,
+ kCollapsedSpan_Glitch,
+ kCollapsedWindValue_Glitch,
+ kCorrectEnd_Glitch,
+ kDeletedCoin_Glitch,
+ kExpandCoin_Glitch,
+ kFail_Glitch,
+ kMarkCoinEnd_Glitch,
+ kMarkCoinInsert_Glitch,
+ kMarkCoinMissing_Glitch,
+ kMarkCoinStart_Glitch,
+ kMergeMatches_Glitch,
+ kMissingCoin_Glitch,
+ kMissingDone_Glitch,
+ kMissingIntersection_Glitch,
+ kMoveMultiple_Glitch,
+ kMoveNearbyClearAll_Glitch,
+ kMoveNearbyClearAll2_Glitch,
+ kMoveNearbyMerge_Glitch,
+ kMoveNearbyMergeFinal_Glitch,
+ kMoveNearbyRelease_Glitch,
+ kMoveNearbyReleaseFinal_Glitch,
+ kReleasedSpan_Glitch,
+ kReturnFalse_Glitch,
+ kUnaligned_Glitch,
+ kUnalignedHead_Glitch,
+ kUnalignedTail_Glitch,
+ };
+
+ struct CoinDictEntry {
+ int fIteration;
+ int fLineNumber;
+ GlitchType fGlitchType;
+ const char* fFunctionName;
+ };
+
+ struct CoinDict {
+ void add(const CoinDictEntry& key);
+ void add(const CoinDict& dict);
+ void dump(const char* str, bool visitCheck) const;
+ SkTDArray<CoinDictEntry> fDict;
+ };
+
+ static CoinDict gCoinSumChangedDict;
+ static CoinDict gCoinSumVisitedDict;
+ static CoinDict gCoinVistedDict;
+#endif
+
+#if defined(SK_DEBUG) || !FORCE_RELEASE
+ static int gContourID;
+ static int gSegmentID;
+#endif
+
+#if DEBUG_SORT
+ static int gSortCountDefault;
+ static int gSortCount;
+#endif
+
+#if DEBUG_ACTIVE_OP
+ static const char* kPathOpStr[];
+#endif
+ static bool gRunFail;
+ static bool gVeryVerbose;
+
+#if DEBUG_ACTIVE_SPANS
+ static SkString gActiveSpans;
+#endif
+#if DEBUG_DUMP_VERIFY
+ static bool gDumpOp;
+ static bool gVerifyOp;
+#endif
+
+ static const char* OpStr(SkPathOp );
+ static void MathematicaIze(char* str, size_t bufferSize);
+ static bool ValidWind(int winding);
+ static void WindingPrintf(int winding);
+
+ static void ShowActiveSpans(SkOpContourHead* contourList);
+ static void ShowOnePath(const SkPath& path, const char* name, bool includeDeclaration);
+ static void ShowPath(const SkPath& one, const SkPath& two, SkPathOp op, const char* name);
+
+ static bool ChaseContains(const SkTDArray<SkOpSpanBase*>& , const SkOpSpanBase* );
+
+ static void CheckHealth(class SkOpContourHead* contourList);
+
+#if DEBUG_COIN
+ static void DumpCoinDict();
+ static void DumpGlitchType(GlitchType );
+#endif
+
+};
+
+// Visual Studio 2017 does not permit calling member functions from the Immediate Window.
+// Global functions work fine, however. Use globals rather than static members inside a class.
+const SkOpAngle* AngleAngle(const SkOpAngle*, int id);
+SkOpContour* AngleContour(SkOpAngle*, int id);
+const SkOpPtT* AnglePtT(const SkOpAngle*, int id);
+const SkOpSegment* AngleSegment(const SkOpAngle*, int id);
+const SkOpSpanBase* AngleSpan(const SkOpAngle*, int id);
+
+const SkOpAngle* ContourAngle(SkOpContour*, int id);
+SkOpContour* ContourContour(SkOpContour*, int id);
+const SkOpPtT* ContourPtT(SkOpContour*, int id);
+const SkOpSegment* ContourSegment(SkOpContour*, int id);
+const SkOpSpanBase* ContourSpan(SkOpContour*, int id);
+
+const SkOpAngle* CoincidenceAngle(SkOpCoincidence*, int id);
+SkOpContour* CoincidenceContour(SkOpCoincidence*, int id);
+const SkOpPtT* CoincidencePtT(SkOpCoincidence*, int id);
+const SkOpSegment* CoincidenceSegment(SkOpCoincidence*, int id);
+const SkOpSpanBase* CoincidenceSpan(SkOpCoincidence*, int id);
+
+const SkOpAngle* PtTAngle(const SkOpPtT*, int id);
+SkOpContour* PtTContour(SkOpPtT*, int id);
+const SkOpPtT* PtTPtT(const SkOpPtT*, int id);
+const SkOpSegment* PtTSegment(const SkOpPtT*, int id);
+const SkOpSpanBase* PtTSpan(const SkOpPtT*, int id);
+
+const SkOpAngle* SegmentAngle(const SkOpSegment*, int id);
+SkOpContour* SegmentContour(SkOpSegment*, int id);
+const SkOpPtT* SegmentPtT(const SkOpSegment*, int id);
+const SkOpSegment* SegmentSegment(const SkOpSegment*, int id);
+const SkOpSpanBase* SegmentSpan(const SkOpSegment*, int id);
+
+const SkOpAngle* SpanAngle(const SkOpSpanBase*, int id);
+SkOpContour* SpanContour(SkOpSpanBase*, int id);
+const SkOpPtT* SpanPtT(const SkOpSpanBase*, int id);
+const SkOpSegment* SpanSegment(const SkOpSpanBase*, int id);
+const SkOpSpanBase* SpanSpan(const SkOpSpanBase*, int id);
+
+#if DEBUG_DUMP_VERIFY
+void DumpOp(const SkPath& one, const SkPath& two, SkPathOp op,
+ const char* testName);
+void DumpOp(FILE* file, const SkPath& one, const SkPath& two, SkPathOp op,
+ const char* testName);
+void DumpSimplify(const SkPath& path, const char* testName);
+void DumpSimplify(FILE* file, const SkPath& path, const char* testName);
+void ReportOpFail(const SkPath& one, const SkPath& two, SkPathOp op);
+void ReportSimplifyFail(const SkPath& path);
+void VerifyOp(const SkPath& one, const SkPath& two, SkPathOp op,
+ const SkPath& result);
+void VerifySimplify(const SkPath& path, const SkPath& result);
+#endif
+
+// global path dumps for msvs Visual Studio 17 to use from Immediate Window
+void Dump(const SkOpContour& );
+void DumpAll(const SkOpContour& );
+void DumpAngles(const SkOpContour& );
+void DumpContours(const SkOpContour& );
+void DumpContoursAll(const SkOpContour& );
+void DumpContoursAngles(const SkOpContour& );
+void DumpContoursPts(const SkOpContour& );
+void DumpContoursPt(const SkOpContour& , int segmentID);
+void DumpContoursSegment(const SkOpContour& , int segmentID);
+void DumpContoursSpan(const SkOpContour& , int segmentID);
+void DumpContoursSpans(const SkOpContour& );
+void DumpPt(const SkOpContour& , int );
+void DumpPts(const SkOpContour& , const char* prefix = "seg");
+void DumpSegment(const SkOpContour& , int );
+void DumpSegments(const SkOpContour& , const char* prefix = "seg", SkPathOp op = (SkPathOp) -1);
+void DumpSpan(const SkOpContour& , int );
+void DumpSpans(const SkOpContour& );
+
+void Dump(const SkOpSegment& );
+void DumpAll(const SkOpSegment& );
+void DumpAngles(const SkOpSegment& );
+void DumpCoin(const SkOpSegment& );
+void DumpPts(const SkOpSegment& , const char* prefix = "seg");
+
+void Dump(const SkOpPtT& );
+void DumpAll(const SkOpPtT& );
+
+void Dump(const SkOpSpanBase& );
+void DumpCoin(const SkOpSpanBase& );
+void DumpAll(const SkOpSpanBase& );
+
+void DumpCoin(const SkOpSpan& );
+bool DumpSpan(const SkOpSpan& );
+
+void Dump(const SkDConic& );
+void DumpID(const SkDConic& , int id);
+
+void Dump(const SkDCubic& );
+void DumpID(const SkDCubic& , int id);
+
+void Dump(const SkDLine& );
+void DumpID(const SkDLine& , int id);
+
+void Dump(const SkDQuad& );
+void DumpID(const SkDQuad& , int id);
+
+void Dump(const SkDPoint& );
+
+void Dump(const SkOpAngle& );
+
+// generates tools/path_sorter.htm and path_visualizer.htm compatible data
+void DumpQ(const SkDQuad& quad1, const SkDQuad& quad2, int testNo);
+void DumpT(const SkDQuad& quad, double t);
+
+// global path dumps for msvs Visual Studio 17 to use from Immediate Window
+void Dump(const SkPath& path);
+void DumpHex(const SkPath& path);
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsLine.cpp b/gfx/skia/skia/src/pathops/SkPathOpsLine.cpp
new file mode 100644
index 0000000000..253f95b770
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsLine.cpp
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkPathOpsLine.h"
+
+#include "src/pathops/SkPathOpsTypes.h"
+
+#include <cmath>
+#include <algorithm>
+
+SkDPoint SkDLine::ptAtT(double t) const {
+ if (0 == t) {
+ return fPts[0];
+ }
+ if (1 == t) {
+ return fPts[1];
+ }
+ double one_t = 1 - t;
+ SkDPoint result = { one_t * fPts[0].fX + t * fPts[1].fX, one_t * fPts[0].fY + t * fPts[1].fY };
+ return result;
+}
+
+double SkDLine::exactPoint(const SkDPoint& xy) const {
+ if (xy == fPts[0]) { // do cheapest test first
+ return 0;
+ }
+ if (xy == fPts[1]) {
+ return 1;
+ }
+ return -1;
+}
+
+double SkDLine::nearPoint(const SkDPoint& xy, bool* unequal) const {
+ if (!AlmostBetweenUlps(fPts[0].fX, xy.fX, fPts[1].fX)
+ || !AlmostBetweenUlps(fPts[0].fY, xy.fY, fPts[1].fY)) {
+ return -1;
+ }
+ // project a perpendicular ray from the point to the line; find the T on the line
+ SkDVector len = fPts[1] - fPts[0]; // the x/y magnitudes of the line
+ double denom = len.fX * len.fX + len.fY * len.fY; // see DLine intersectRay
+ SkDVector ab0 = xy - fPts[0];
+ double numer = len.fX * ab0.fX + ab0.fY * len.fY;
+ if (!between(0, numer, denom)) {
+ return -1;
+ }
+ if (!denom) {
+ return 0;
+ }
+ double t = numer / denom;
+ SkDPoint realPt = ptAtT(t);
+ double dist = realPt.distance(xy); // OPTIMIZATION: can we compare against distSq instead ?
+ // find the ordinal in the original line with the largest unsigned exponent
+ double tiniest = std::min(std::min(std::min(fPts[0].fX, fPts[0].fY), fPts[1].fX), fPts[1].fY);
+ double largest = std::max(std::max(std::max(fPts[0].fX, fPts[0].fY), fPts[1].fX), fPts[1].fY);
+ largest = std::max(largest, -tiniest);
+ if (!AlmostEqualUlps_Pin(largest, largest + dist)) { // is the dist within ULPS tolerance?
+ return -1;
+ }
+ if (unequal) {
+ *unequal = (float) largest != (float) (largest + dist);
+ }
+ t = SkPinT(t); // a looser pin breaks skpwww_lptemp_com_3
+ SkASSERT(between(0, t, 1));
+ return t;
+}
+
+bool SkDLine::nearRay(const SkDPoint& xy) const {
+ // project a perpendicular ray from the point to the line; find the T on the line
+ SkDVector len = fPts[1] - fPts[0]; // the x/y magnitudes of the line
+ double denom = len.fX * len.fX + len.fY * len.fY; // see DLine intersectRay
+ SkDVector ab0 = xy - fPts[0];
+ double numer = len.fX * ab0.fX + ab0.fY * len.fY;
+ double t = numer / denom;
+ SkDPoint realPt = ptAtT(t);
+ double dist = realPt.distance(xy); // OPTIMIZATION: can we compare against distSq instead ?
+ // find the ordinal in the original line with the largest unsigned exponent
+ double tiniest = std::min(std::min(std::min(fPts[0].fX, fPts[0].fY), fPts[1].fX), fPts[1].fY);
+ double largest = std::max(std::max(std::max(fPts[0].fX, fPts[0].fY), fPts[1].fX), fPts[1].fY);
+ largest = std::max(largest, -tiniest);
+ return RoughlyEqualUlps(largest, largest + dist); // is the dist within ULPS tolerance?
+}
+
+double SkDLine::ExactPointH(const SkDPoint& xy, double left, double right, double y) {
+ if (xy.fY == y) {
+ if (xy.fX == left) {
+ return 0;
+ }
+ if (xy.fX == right) {
+ return 1;
+ }
+ }
+ return -1;
+}
+
+double SkDLine::NearPointH(const SkDPoint& xy, double left, double right, double y) {
+ if (!AlmostBequalUlps(xy.fY, y)) {
+ return -1;
+ }
+ if (!AlmostBetweenUlps(left, xy.fX, right)) {
+ return -1;
+ }
+ double t = (xy.fX - left) / (right - left);
+ t = SkPinT(t);
+ SkASSERT(between(0, t, 1));
+ double realPtX = (1 - t) * left + t * right;
+ SkDVector distU = {xy.fY - y, xy.fX - realPtX};
+ double distSq = distU.fX * distU.fX + distU.fY * distU.fY;
+ double dist = sqrt(distSq); // OPTIMIZATION: can we compare against distSq instead ?
+ double tiniest = std::min(std::min(y, left), right);
+ double largest = std::max(std::max(y, left), right);
+ largest = std::max(largest, -tiniest);
+ if (!AlmostEqualUlps(largest, largest + dist)) { // is the dist within ULPS tolerance?
+ return -1;
+ }
+ return t;
+}
+
+double SkDLine::ExactPointV(const SkDPoint& xy, double top, double bottom, double x) {
+ if (xy.fX == x) {
+ if (xy.fY == top) {
+ return 0;
+ }
+ if (xy.fY == bottom) {
+ return 1;
+ }
+ }
+ return -1;
+}
+
+double SkDLine::NearPointV(const SkDPoint& xy, double top, double bottom, double x) {
+ if (!AlmostBequalUlps(xy.fX, x)) {
+ return -1;
+ }
+ if (!AlmostBetweenUlps(top, xy.fY, bottom)) {
+ return -1;
+ }
+ double t = (xy.fY - top) / (bottom - top);
+ t = SkPinT(t);
+ SkASSERT(between(0, t, 1));
+ double realPtY = (1 - t) * top + t * bottom;
+ SkDVector distU = {xy.fX - x, xy.fY - realPtY};
+ double distSq = distU.fX * distU.fX + distU.fY * distU.fY;
+ double dist = sqrt(distSq); // OPTIMIZATION: can we compare against distSq instead ?
+ double tiniest = std::min(std::min(x, top), bottom);
+ double largest = std::max(std::max(x, top), bottom);
+ largest = std::max(largest, -tiniest);
+ if (!AlmostEqualUlps(largest, largest + dist)) { // is the dist within ULPS tolerance?
+ return -1;
+ }
+ return t;
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsLine.h b/gfx/skia/skia/src/pathops/SkPathOpsLine.h
new file mode 100644
index 0000000000..ff5354d3ae
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsLine.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsLine_DEFINED
+#define SkPathOpsLine_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkTypes.h"
+#include "src/pathops/SkPathOpsPoint.h"
+
+struct SkDLine {
+ SkDPoint fPts[2];
+
+ const SkDPoint& operator[](int n) const { SkASSERT(n >= 0 && n < 2); return fPts[n]; }
+ SkDPoint& operator[](int n) { SkASSERT(n >= 0 && n < 2); return fPts[n]; }
+
+ const SkDLine& set(const SkPoint pts[2]) {
+ fPts[0] = pts[0];
+ fPts[1] = pts[1];
+ return *this;
+ }
+
+ double exactPoint(const SkDPoint& xy) const;
+ static double ExactPointH(const SkDPoint& xy, double left, double right, double y);
+ static double ExactPointV(const SkDPoint& xy, double top, double bottom, double x);
+
+ double nearPoint(const SkDPoint& xy, bool* unequal) const;
+ bool nearRay(const SkDPoint& xy) const;
+ static double NearPointH(const SkDPoint& xy, double left, double right, double y);
+ static double NearPointV(const SkDPoint& xy, double top, double bottom, double x);
+ SkDPoint ptAtT(double t) const;
+
+ void dump() const;
+ void dumpID(int ) const;
+ void dumpInner() const;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsOp.cpp b/gfx/skia/skia/src/pathops/SkPathOpsOp.cpp
new file mode 100644
index 0000000000..ad7665fe70
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsOp.cpp
@@ -0,0 +1,395 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkPath.h"
+#include "include/core/SkPathTypes.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkTypes.h"
+#include "include/pathops/SkPathOps.h"
+#include "include/private/base/SkMath.h"
+#include "include/private/base/SkTDArray.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/pathops/SkAddIntersections.h"
+#include "src/pathops/SkOpAngle.h"
+#include "src/pathops/SkOpCoincidence.h"
+#include "src/pathops/SkOpContour.h"
+#include "src/pathops/SkOpEdgeBuilder.h"
+#include "src/pathops/SkOpSegment.h"
+#include "src/pathops/SkOpSpan.h"
+#include "src/pathops/SkPathOpsCommon.h"
+#include "src/pathops/SkPathOpsTypes.h"
+#include "src/pathops/SkPathWriter.h"
+
+#include <utility>
+
+static bool findChaseOp(SkTDArray<SkOpSpanBase*>& chase, SkOpSpanBase** startPtr,
+ SkOpSpanBase** endPtr, SkOpSegment** result) {
+ while (!chase.empty()) {
+ SkOpSpanBase* span = chase.back();
+ chase.pop_back();
+ // OPTIMIZE: prev makes this compatible with old code -- but is it necessary?
+ *startPtr = span->ptT()->prev()->span();
+ SkOpSegment* segment = (*startPtr)->segment();
+ bool done = true;
+ *endPtr = nullptr;
+ if (SkOpAngle* last = segment->activeAngle(*startPtr, startPtr, endPtr, &done)) {
+ *startPtr = last->start();
+ *endPtr = last->end();
+ #if TRY_ROTATE
+ *chase.insert(0) = span;
+ #else
+ *chase.append() = span;
+ #endif
+ *result = last->segment();
+ return true;
+ }
+ if (done) {
+ continue;
+ }
+ int winding;
+ bool sortable;
+ const SkOpAngle* angle = AngleWinding(*startPtr, *endPtr, &winding, &sortable);
+ if (!angle) {
+ *result = nullptr;
+ return true;
+ }
+ if (winding == SK_MinS32) {
+ continue;
+ }
+ int sumMiWinding, sumSuWinding;
+ if (sortable) {
+ segment = angle->segment();
+ sumMiWinding = segment->updateWindingReverse(angle);
+ if (sumMiWinding == SK_MinS32) {
+ SkASSERT(segment->globalState()->debugSkipAssert());
+ *result = nullptr;
+ return true;
+ }
+ sumSuWinding = segment->updateOppWindingReverse(angle);
+ if (sumSuWinding == SK_MinS32) {
+ SkASSERT(segment->globalState()->debugSkipAssert());
+ *result = nullptr;
+ return true;
+ }
+ if (segment->operand()) {
+ using std::swap;
+ swap(sumMiWinding, sumSuWinding);
+ }
+ }
+ SkOpSegment* first = nullptr;
+ const SkOpAngle* firstAngle = angle;
+ while ((angle = angle->next()) != firstAngle) {
+ segment = angle->segment();
+ SkOpSpanBase* start = angle->start();
+ SkOpSpanBase* end = angle->end();
+ int maxWinding = 0, sumWinding = 0, oppMaxWinding = 0, oppSumWinding = 0;
+ if (sortable) {
+ segment->setUpWindings(start, end, &sumMiWinding, &sumSuWinding,
+ &maxWinding, &sumWinding, &oppMaxWinding, &oppSumWinding);
+ }
+ if (!segment->done(angle)) {
+ if (!first && (sortable || start->starter(end)->windSum() != SK_MinS32)) {
+ first = segment;
+ *startPtr = start;
+ *endPtr = end;
+ }
+ // OPTIMIZATION: should this also add to the chase?
+ if (sortable) {
+ if (!segment->markAngle(maxWinding, sumWinding, oppMaxWinding,
+ oppSumWinding, angle, nullptr)) {
+ return false;
+ }
+ }
+ }
+ }
+ if (first) {
+ #if TRY_ROTATE
+ *chase.insert(0) = span;
+ #else
+ *chase.append() = span;
+ #endif
+ *result = first;
+ return true;
+ }
+ }
+ *result = nullptr;
+ return true;
+}
+
+static bool bridgeOp(SkOpContourHead* contourList, const SkPathOp op,
+ const int xorMask, const int xorOpMask, SkPathWriter* writer) {
+ bool unsortable = false;
+ bool lastSimple = false;
+ bool simple = false;
+ do {
+ SkOpSpan* span = FindSortableTop(contourList);
+ if (!span) {
+ break;
+ }
+ SkOpSegment* current = span->segment();
+ SkOpSpanBase* start = span->next();
+ SkOpSpanBase* end = span;
+ SkTDArray<SkOpSpanBase*> chase;
+ do {
+ if (current->activeOp(start, end, xorMask, xorOpMask, op)) {
+ do {
+ if (!unsortable && current->done()) {
+ break;
+ }
+ SkASSERT(unsortable || !current->done());
+ SkOpSpanBase* nextStart = start;
+ SkOpSpanBase* nextEnd = end;
+ lastSimple = simple;
+ SkOpSegment* next = current->findNextOp(&chase, &nextStart, &nextEnd,
+ &unsortable, &simple, op, xorMask, xorOpMask);
+ if (!next) {
+ if (!unsortable && writer->hasMove()
+ && current->verb() != SkPath::kLine_Verb
+ && !writer->isClosed()) {
+ if (!current->addCurveTo(start, end, writer)) {
+ return false;
+ }
+ if (!writer->isClosed()) {
+ SkPathOpsDebug::ShowActiveSpans(contourList);
+ }
+ } else if (lastSimple) {
+ if (!current->addCurveTo(start, end, writer)) {
+ return false;
+ }
+ }
+ break;
+ }
+ #if DEBUG_FLOW
+ SkDebugf("%s current id=%d from=(%1.9g,%1.9g) to=(%1.9g,%1.9g)\n", __FUNCTION__,
+ current->debugID(), start->pt().fX, start->pt().fY,
+ end->pt().fX, end->pt().fY);
+ #endif
+ if (!current->addCurveTo(start, end, writer)) {
+ return false;
+ }
+ current = next;
+ start = nextStart;
+ end = nextEnd;
+ } while (!writer->isClosed() && (!unsortable || !start->starter(end)->done()));
+ if (current->activeWinding(start, end) && !writer->isClosed()) {
+ SkOpSpan* spanStart = start->starter(end);
+ if (!spanStart->done()) {
+ if (!current->addCurveTo(start, end, writer)) {
+ return false;
+ }
+ current->markDone(spanStart);
+ }
+ }
+ writer->finishContour();
+ } else {
+ SkOpSpanBase* last;
+ if (!current->markAndChaseDone(start, end, &last)) {
+ return false;
+ }
+ if (last && !last->chased()) {
+ last->setChased(true);
+ SkASSERT(!SkPathOpsDebug::ChaseContains(chase, last));
+ *chase.append() = last;
+#if DEBUG_WINDING
+ SkDebugf("%s chase.append id=%d", __FUNCTION__, last->segment()->debugID());
+ if (!last->final()) {
+ SkDebugf(" windSum=%d", last->upCast()->windSum());
+ }
+ SkDebugf("\n");
+#endif
+ }
+ }
+ if (!findChaseOp(chase, &start, &end, &current)) {
+ return false;
+ }
+ SkPathOpsDebug::ShowActiveSpans(contourList);
+ if (!current) {
+ break;
+ }
+ } while (true);
+ } while (true);
+ return true;
+}
+
+// diagram of why this simplifcation is possible is here:
+// https://skia.org/dev/present/pathops link at bottom of the page
+// https://drive.google.com/file/d/0BwoLUwz9PYkHLWpsaXd0UDdaN00/view?usp=sharing
+static const SkPathOp gOpInverse[kReverseDifference_SkPathOp + 1][2][2] = {
+// inside minuend outside minuend
+// inside subtrahend outside subtrahend inside subtrahend outside subtrahend
+{{ kDifference_SkPathOp, kIntersect_SkPathOp }, { kUnion_SkPathOp, kReverseDifference_SkPathOp }},
+{{ kIntersect_SkPathOp, kDifference_SkPathOp }, { kReverseDifference_SkPathOp, kUnion_SkPathOp }},
+{{ kUnion_SkPathOp, kReverseDifference_SkPathOp }, { kDifference_SkPathOp, kIntersect_SkPathOp }},
+{{ kXOR_SkPathOp, kXOR_SkPathOp }, { kXOR_SkPathOp, kXOR_SkPathOp }},
+{{ kReverseDifference_SkPathOp, kUnion_SkPathOp }, { kIntersect_SkPathOp, kDifference_SkPathOp }},
+};
+
+static const bool gOutInverse[kReverseDifference_SkPathOp + 1][2][2] = {
+ {{ false, false }, { true, false }}, // diff
+ {{ false, false }, { false, true }}, // sect
+ {{ false, true }, { true, true }}, // union
+ {{ false, true }, { true, false }}, // xor
+ {{ false, true }, { false, false }}, // rev diff
+};
+
+#if DEBUG_T_SECT_LOOP_COUNT
+
+#include "include/private/base/SkMutex.h"
+
+SkOpGlobalState debugWorstState(nullptr, nullptr SkDEBUGPARAMS(false) SkDEBUGPARAMS(nullptr));
+
+void ReportPathOpsDebugging() {
+ debugWorstState.debugLoopReport();
+}
+
+extern void (*gVerboseFinalize)();
+
+#endif
+
+bool OpDebug(const SkPath& one, const SkPath& two, SkPathOp op, SkPath* result
+ SkDEBUGPARAMS(bool skipAssert) SkDEBUGPARAMS(const char* testName)) {
+#if DEBUG_DUMP_VERIFY
+#ifndef SK_DEBUG
+ const char* testName = "release";
+#endif
+ if (SkPathOpsDebug::gDumpOp) {
+ DumpOp(one, two, op, testName);
+ }
+#endif
+ op = gOpInverse[op][one.isInverseFillType()][two.isInverseFillType()];
+ bool inverseFill = gOutInverse[op][one.isInverseFillType()][two.isInverseFillType()];
+ SkPathFillType fillType = inverseFill ? SkPathFillType::kInverseEvenOdd :
+ SkPathFillType::kEvenOdd;
+ SkRect rect1, rect2;
+ if (kIntersect_SkPathOp == op && one.isRect(&rect1) && two.isRect(&rect2)) {
+ result->reset();
+ result->setFillType(fillType);
+ if (rect1.intersect(rect2)) {
+ result->addRect(rect1);
+ }
+ return true;
+ }
+ if (one.isEmpty() || two.isEmpty()) {
+ SkPath work;
+ switch (op) {
+ case kIntersect_SkPathOp:
+ break;
+ case kUnion_SkPathOp:
+ case kXOR_SkPathOp:
+ work = one.isEmpty() ? two : one;
+ break;
+ case kDifference_SkPathOp:
+ if (!one.isEmpty()) {
+ work = one;
+ }
+ break;
+ case kReverseDifference_SkPathOp:
+ if (!two.isEmpty()) {
+ work = two;
+ }
+ break;
+ default:
+ SkASSERT(0); // unhandled case
+ }
+ if (inverseFill != work.isInverseFillType()) {
+ work.toggleInverseFillType();
+ }
+ return Simplify(work, result);
+ }
+ SkSTArenaAlloc<4096> allocator; // FIXME: add a constant expression here, tune
+ SkOpContour contour;
+ SkOpContourHead* contourList = static_cast<SkOpContourHead*>(&contour);
+ SkOpGlobalState globalState(contourList, &allocator
+ SkDEBUGPARAMS(skipAssert) SkDEBUGPARAMS(testName));
+ SkOpCoincidence coincidence(&globalState);
+ const SkPath* minuend = &one;
+ const SkPath* subtrahend = &two;
+ if (op == kReverseDifference_SkPathOp) {
+ using std::swap;
+ swap(minuend, subtrahend);
+ op = kDifference_SkPathOp;
+ }
+#if DEBUG_SORT
+ SkPathOpsDebug::gSortCount = SkPathOpsDebug::gSortCountDefault;
+#endif
+ // turn path into list of segments
+ SkOpEdgeBuilder builder(*minuend, contourList, &globalState);
+ if (builder.unparseable()) {
+ return false;
+ }
+ const int xorMask = builder.xorMask();
+ builder.addOperand(*subtrahend);
+ if (!builder.finish()) {
+ return false;
+ }
+#if DEBUG_DUMP_SEGMENTS
+ contourList->dumpSegments("seg", op);
+#endif
+
+ const int xorOpMask = builder.xorMask();
+ if (!SortContourList(&contourList, xorMask == kEvenOdd_PathOpsMask,
+ xorOpMask == kEvenOdd_PathOpsMask)) {
+ result->reset();
+ result->setFillType(fillType);
+ return true;
+ }
+ // find all intersections between segments
+ SkOpContour* current = contourList;
+ do {
+ SkOpContour* next = current;
+ while (AddIntersectTs(current, next, &coincidence)
+ && (next = next->next()))
+ ;
+ } while ((current = current->next()));
+#if DEBUG_VALIDATE
+ globalState.setPhase(SkOpPhase::kWalking);
+#endif
+ bool success = HandleCoincidence(contourList, &coincidence);
+#if DEBUG_COIN
+ globalState.debugAddToGlobalCoinDicts();
+#endif
+ if (!success) {
+ return false;
+ }
+#if DEBUG_ALIGNMENT
+ contourList->dumpSegments("aligned");
+#endif
+ // construct closed contours
+ SkPath original = *result;
+ result->reset();
+ result->setFillType(fillType);
+ SkPathWriter wrapper(*result);
+ if (!bridgeOp(contourList, op, xorMask, xorOpMask, &wrapper)) {
+ *result = original;
+ return false;
+ }
+ wrapper.assemble(); // if some edges could not be resolved, assemble remaining
+#if DEBUG_T_SECT_LOOP_COUNT
+ static SkMutex& debugWorstLoop = *(new SkMutex);
+ {
+ SkAutoMutexExclusive autoM(debugWorstLoop);
+ if (!gVerboseFinalize) {
+ gVerboseFinalize = &ReportPathOpsDebugging;
+ }
+ debugWorstState.debugDoYourWorst(&globalState);
+ }
+#endif
+ return true;
+}
+
+bool Op(const SkPath& one, const SkPath& two, SkPathOp op, SkPath* result) {
+#if DEBUG_DUMP_VERIFY
+ if (SkPathOpsDebug::gVerifyOp) {
+ if (!OpDebug(one, two, op, result SkDEBUGPARAMS(false) SkDEBUGPARAMS(nullptr))) {
+ ReportOpFail(one, two, op);
+ return false;
+ }
+ VerifyOp(one, two, op, *result);
+ return true;
+ }
+#endif
+ return OpDebug(one, two, op, result SkDEBUGPARAMS(true) SkDEBUGPARAMS(nullptr));
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsPoint.h b/gfx/skia/skia/src/pathops/SkPathOpsPoint.h
new file mode 100644
index 0000000000..9d70df0870
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsPoint.h
@@ -0,0 +1,281 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsPoint_DEFINED
+#define SkPathOpsPoint_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+inline bool AlmostEqualUlps(const SkPoint& pt1, const SkPoint& pt2) {
+ return AlmostEqualUlps(pt1.fX, pt2.fX) && AlmostEqualUlps(pt1.fY, pt2.fY);
+}
+
+struct SkDVector {
+ double fX;
+ double fY;
+
+ SkDVector& set(const SkVector& pt) {
+ fX = pt.fX;
+ fY = pt.fY;
+ return *this;
+ }
+
+ // only used by testing
+ void operator+=(const SkDVector& v) {
+ fX += v.fX;
+ fY += v.fY;
+ }
+
+ // only called by nearestT, which is currently only used by testing
+ void operator-=(const SkDVector& v) {
+ fX -= v.fX;
+ fY -= v.fY;
+ }
+
+ // only used by testing
+ void operator/=(const double s) {
+ fX /= s;
+ fY /= s;
+ }
+
+ // only used by testing
+ void operator*=(const double s) {
+ fX *= s;
+ fY *= s;
+ }
+
+ SkVector asSkVector() const {
+ SkVector v = {SkDoubleToScalar(fX), SkDoubleToScalar(fY)};
+ return v;
+ }
+
+ // only used by testing
+ double cross(const SkDVector& a) const {
+ return fX * a.fY - fY * a.fX;
+ }
+
+ // similar to cross, this bastardization considers nearly coincident to be zero
+ // uses ulps epsilon == 16
+ double crossCheck(const SkDVector& a) const {
+ double xy = fX * a.fY;
+ double yx = fY * a.fX;
+ return AlmostEqualUlps(xy, yx) ? 0 : xy - yx;
+ }
+
+ // allow tinier numbers
+ double crossNoNormalCheck(const SkDVector& a) const {
+ double xy = fX * a.fY;
+ double yx = fY * a.fX;
+ return AlmostEqualUlpsNoNormalCheck(xy, yx) ? 0 : xy - yx;
+ }
+
+ double dot(const SkDVector& a) const {
+ return fX * a.fX + fY * a.fY;
+ }
+
+ double length() const {
+ return sqrt(lengthSquared());
+ }
+
+ double lengthSquared() const {
+ return fX * fX + fY * fY;
+ }
+
+ SkDVector& normalize() {
+ double inverseLength = sk_ieee_double_divide(1, this->length());
+ fX *= inverseLength;
+ fY *= inverseLength;
+ return *this;
+ }
+
+ bool isFinite() const {
+ return std::isfinite(fX) && std::isfinite(fY);
+ }
+};
+
+struct SkDPoint {
+ double fX;
+ double fY;
+
+ void set(const SkPoint& pt) {
+ fX = pt.fX;
+ fY = pt.fY;
+ }
+
+ friend SkDVector operator-(const SkDPoint& a, const SkDPoint& b) {
+ return { a.fX - b.fX, a.fY - b.fY };
+ }
+
+ friend bool operator==(const SkDPoint& a, const SkDPoint& b) {
+ return a.fX == b.fX && a.fY == b.fY;
+ }
+
+ friend bool operator!=(const SkDPoint& a, const SkDPoint& b) {
+ return a.fX != b.fX || a.fY != b.fY;
+ }
+
+ void operator=(const SkPoint& pt) {
+ fX = pt.fX;
+ fY = pt.fY;
+ }
+
+ // only used by testing
+ void operator+=(const SkDVector& v) {
+ fX += v.fX;
+ fY += v.fY;
+ }
+
+ // only used by testing
+ void operator-=(const SkDVector& v) {
+ fX -= v.fX;
+ fY -= v.fY;
+ }
+
+ // only used by testing
+ SkDPoint operator+(const SkDVector& v) {
+ SkDPoint result = *this;
+ result += v;
+ return result;
+ }
+
+ // only used by testing
+ SkDPoint operator-(const SkDVector& v) {
+ SkDPoint result = *this;
+ result -= v;
+ return result;
+ }
+
+ // note: this can not be implemented with
+ // return approximately_equal(a.fY, fY) && approximately_equal(a.fX, fX);
+ // because that will not take the magnitude of the values into account
+ bool approximatelyDEqual(const SkDPoint& a) const {
+ if (approximately_equal(fX, a.fX) && approximately_equal(fY, a.fY)) {
+ return true;
+ }
+ if (!RoughlyEqualUlps(fX, a.fX) || !RoughlyEqualUlps(fY, a.fY)) {
+ return false;
+ }
+ double dist = distance(a); // OPTIMIZATION: can we compare against distSq instead ?
+ double tiniest = std::min(std::min(std::min(fX, a.fX), fY), a.fY);
+ double largest = std::max(std::max(std::max(fX, a.fX), fY), a.fY);
+ largest = std::max(largest, -tiniest);
+ return AlmostDequalUlps(largest, largest + dist); // is the dist within ULPS tolerance?
+ }
+
+ bool approximatelyDEqual(const SkPoint& a) const {
+ SkDPoint dA;
+ dA.set(a);
+ return approximatelyDEqual(dA);
+ }
+
+ bool approximatelyEqual(const SkDPoint& a) const {
+ if (approximately_equal(fX, a.fX) && approximately_equal(fY, a.fY)) {
+ return true;
+ }
+ if (!RoughlyEqualUlps(fX, a.fX) || !RoughlyEqualUlps(fY, a.fY)) {
+ return false;
+ }
+ double dist = distance(a); // OPTIMIZATION: can we compare against distSq instead ?
+ double tiniest = std::min(std::min(std::min(fX, a.fX), fY), a.fY);
+ double largest = std::max(std::max(std::max(fX, a.fX), fY), a.fY);
+ largest = std::max(largest, -tiniest);
+ return AlmostPequalUlps(largest, largest + dist); // is the dist within ULPS tolerance?
+ }
+
+ bool approximatelyEqual(const SkPoint& a) const {
+ SkDPoint dA;
+ dA.set(a);
+ return approximatelyEqual(dA);
+ }
+
+ static bool ApproximatelyEqual(const SkPoint& a, const SkPoint& b) {
+ if (approximately_equal(a.fX, b.fX) && approximately_equal(a.fY, b.fY)) {
+ return true;
+ }
+ if (!RoughlyEqualUlps(a.fX, b.fX) || !RoughlyEqualUlps(a.fY, b.fY)) {
+ return false;
+ }
+ SkDPoint dA, dB;
+ dA.set(a);
+ dB.set(b);
+ double dist = dA.distance(dB); // OPTIMIZATION: can we compare against distSq instead ?
+ float tiniest = std::min(std::min(std::min(a.fX, b.fX), a.fY), b.fY);
+ float largest = std::max(std::max(std::max(a.fX, b.fX), a.fY), b.fY);
+ largest = std::max(largest, -tiniest);
+ return AlmostDequalUlps((double) largest, largest + dist); // is dist within ULPS tolerance?
+ }
+
+ // only used by testing
+ bool approximatelyZero() const {
+ return approximately_zero(fX) && approximately_zero(fY);
+ }
+
+ SkPoint asSkPoint() const {
+ SkPoint pt = {SkDoubleToScalar(fX), SkDoubleToScalar(fY)};
+ return pt;
+ }
+
+ double distance(const SkDPoint& a) const {
+ SkDVector temp = *this - a;
+ return temp.length();
+ }
+
+ double distanceSquared(const SkDPoint& a) const {
+ SkDVector temp = *this - a;
+ return temp.lengthSquared();
+ }
+
+ static SkDPoint Mid(const SkDPoint& a, const SkDPoint& b) {
+ SkDPoint result;
+ result.fX = (a.fX + b.fX) / 2;
+ result.fY = (a.fY + b.fY) / 2;
+ return result;
+ }
+
+ bool roughlyEqual(const SkDPoint& a) const {
+ if (roughly_equal(fX, a.fX) && roughly_equal(fY, a.fY)) {
+ return true;
+ }
+ double dist = distance(a); // OPTIMIZATION: can we compare against distSq instead ?
+ double tiniest = std::min(std::min(std::min(fX, a.fX), fY), a.fY);
+ double largest = std::max(std::max(std::max(fX, a.fX), fY), a.fY);
+ largest = std::max(largest, -tiniest);
+ return RoughlyEqualUlps(largest, largest + dist); // is the dist within ULPS tolerance?
+ }
+
+ static bool RoughlyEqual(const SkPoint& a, const SkPoint& b) {
+ if (!RoughlyEqualUlps(a.fX, b.fX) && !RoughlyEqualUlps(a.fY, b.fY)) {
+ return false;
+ }
+ SkDPoint dA, dB;
+ dA.set(a);
+ dB.set(b);
+ double dist = dA.distance(dB); // OPTIMIZATION: can we compare against distSq instead ?
+ float tiniest = std::min(std::min(std::min(a.fX, b.fX), a.fY), b.fY);
+ float largest = std::max(std::max(std::max(a.fX, b.fX), a.fY), b.fY);
+ largest = std::max(largest, -tiniest);
+ return RoughlyEqualUlps((double) largest, largest + dist); // is dist within ULPS tolerance?
+ }
+
+ // very light weight check, should only be used for inequality check
+ static bool WayRoughlyEqual(const SkPoint& a, const SkPoint& b) {
+ float largestNumber = std::max(SkTAbs(a.fX), std::max(SkTAbs(a.fY),
+ std::max(SkTAbs(b.fX), SkTAbs(b.fY))));
+ SkVector diffs = a - b;
+ float largestDiff = std::max(diffs.fX, diffs.fY);
+ return roughly_zero_when_compared_to(largestDiff, largestNumber);
+ }
+
+ // utilities callable by the user from the debugger when the implementation code is linked in
+ void dump() const;
+ static void Dump(const SkPoint& pt);
+ static void DumpHex(const SkPoint& pt);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsQuad.cpp b/gfx/skia/skia/src/pathops/SkPathOpsQuad.cpp
new file mode 100644
index 0000000000..74578734aa
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsQuad.cpp
@@ -0,0 +1,423 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkPathOpsQuad.h"
+
+#include "src/pathops/SkIntersections.h"
+#include "src/pathops/SkLineParameters.h"
+#include "src/pathops/SkPathOpsConic.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsLine.h"
+#include "src/pathops/SkPathOpsRect.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+#include <algorithm>
+#include <cmath>
+
+// from blackpawn.com/texts/pointinpoly
+static bool pointInTriangle(const SkDPoint fPts[3], const SkDPoint& test) {
+ SkDVector v0 = fPts[2] - fPts[0];
+ SkDVector v1 = fPts[1] - fPts[0];
+ SkDVector v2 = test - fPts[0];
+ double dot00 = v0.dot(v0);
+ double dot01 = v0.dot(v1);
+ double dot02 = v0.dot(v2);
+ double dot11 = v1.dot(v1);
+ double dot12 = v1.dot(v2);
+ // Compute barycentric coordinates
+ double denom = dot00 * dot11 - dot01 * dot01;
+ double u = dot11 * dot02 - dot01 * dot12;
+ double v = dot00 * dot12 - dot01 * dot02;
+ // Check if point is in triangle
+ if (denom >= 0) {
+ return u >= 0 && v >= 0 && u + v < denom;
+ }
+ return u <= 0 && v <= 0 && u + v > denom;
+}
+
+static bool matchesEnd(const SkDPoint fPts[3], const SkDPoint& test) {
+ return fPts[0] == test || fPts[2] == test;
+}
+
+/* started with at_most_end_pts_in_common from SkDQuadIntersection.cpp */
+// Do a quick reject by rotating all points relative to a line formed by
+// a pair of one quad's points. If the 2nd quad's points
+// are on the line or on the opposite side from the 1st quad's 'odd man', the
+// curves at most intersect at the endpoints.
+/* if returning true, check contains true if quad's hull collapsed, making the cubic linear
+ if returning false, check contains true if the the quad pair have only the end point in common
+*/
+bool SkDQuad::hullIntersects(const SkDQuad& q2, bool* isLinear) const {
+ bool linear = true;
+ for (int oddMan = 0; oddMan < kPointCount; ++oddMan) {
+ const SkDPoint* endPt[2];
+ this->otherPts(oddMan, endPt);
+ double origX = endPt[0]->fX;
+ double origY = endPt[0]->fY;
+ double adj = endPt[1]->fX - origX;
+ double opp = endPt[1]->fY - origY;
+ double sign = (fPts[oddMan].fY - origY) * adj - (fPts[oddMan].fX - origX) * opp;
+ if (approximately_zero(sign)) {
+ continue;
+ }
+ linear = false;
+ bool foundOutlier = false;
+ for (int n = 0; n < kPointCount; ++n) {
+ double test = (q2[n].fY - origY) * adj - (q2[n].fX - origX) * opp;
+ if (test * sign > 0 && !precisely_zero(test)) {
+ foundOutlier = true;
+ break;
+ }
+ }
+ if (!foundOutlier) {
+ return false;
+ }
+ }
+ if (linear && !matchesEnd(fPts, q2.fPts[0]) && !matchesEnd(fPts, q2.fPts[2])) {
+ // if the end point of the opposite quad is inside the hull that is nearly a line,
+ // then representing the quad as a line may cause the intersection to be missed.
+ // Check to see if the endpoint is in the triangle.
+ if (pointInTriangle(fPts, q2.fPts[0]) || pointInTriangle(fPts, q2.fPts[2])) {
+ linear = false;
+ }
+ }
+ *isLinear = linear;
+ return true;
+}
+
+bool SkDQuad::hullIntersects(const SkDConic& conic, bool* isLinear) const {
+ return conic.hullIntersects(*this, isLinear);
+}
+
+bool SkDQuad::hullIntersects(const SkDCubic& cubic, bool* isLinear) const {
+ return cubic.hullIntersects(*this, isLinear);
+}
+
+/* bit twiddling for finding the off curve index (x&~m is the pair in [0,1,2] excluding oddMan)
+oddMan opp x=oddMan^opp x=x-oddMan m=x>>2 x&~m
+ 0 1 1 1 0 1
+ 2 2 2 0 2
+ 1 1 0 -1 -1 0
+ 2 3 2 0 2
+ 2 1 3 1 0 1
+ 2 0 -2 -1 0
+*/
+void SkDQuad::otherPts(int oddMan, const SkDPoint* endPt[2]) const {
+ for (int opp = 1; opp < kPointCount; ++opp) {
+ int end = (oddMan ^ opp) - oddMan; // choose a value not equal to oddMan
+ end &= ~(end >> 2); // if the value went negative, set it to zero
+ endPt[opp - 1] = &fPts[end];
+ }
+}
+
+int SkDQuad::AddValidTs(double s[], int realRoots, double* t) {
+ int foundRoots = 0;
+ for (int index = 0; index < realRoots; ++index) {
+ double tValue = s[index];
+ if (approximately_zero_or_more(tValue) && approximately_one_or_less(tValue)) {
+ if (approximately_less_than_zero(tValue)) {
+ tValue = 0;
+ } else if (approximately_greater_than_one(tValue)) {
+ tValue = 1;
+ }
+ for (int idx2 = 0; idx2 < foundRoots; ++idx2) {
+ if (approximately_equal(t[idx2], tValue)) {
+ goto nextRoot;
+ }
+ }
+ t[foundRoots++] = tValue;
+ }
+nextRoot:
+ {}
+ }
+ return foundRoots;
+}
+
+// note: caller expects multiple results to be sorted smaller first
+// note: http://en.wikipedia.org/wiki/Loss_of_significance has an interesting
+// analysis of the quadratic equation, suggesting why the following looks at
+// the sign of B -- and further suggesting that the greatest loss of precision
+// is in b squared less two a c
+int SkDQuad::RootsValidT(double A, double B, double C, double t[2]) {
+ double s[2];
+ int realRoots = RootsReal(A, B, C, s);
+ int foundRoots = AddValidTs(s, realRoots, t);
+ return foundRoots;
+}
+
+static int handle_zero(const double B, const double C, double s[2]) {
+ if (approximately_zero(B)) {
+ s[0] = 0;
+ return C == 0;
+ }
+ s[0] = -C / B;
+ return 1;
+}
+
+/*
+Numeric Solutions (5.6) suggests to solve the quadratic by computing
+ Q = -1/2(B + sgn(B)Sqrt(B^2 - 4 A C))
+and using the roots
+ t1 = Q / A
+ t2 = C / Q
+*/
+// this does not discard real roots <= 0 or >= 1
+// TODO(skbug.com/14063) Deduplicate with SkQuads::RootsReal
+int SkDQuad::RootsReal(const double A, const double B, const double C, double s[2]) {
+ if (!A) {
+ return handle_zero(B, C, s);
+ }
+ const double p = B / (2 * A);
+ const double q = C / A;
+ if (approximately_zero(A) && (approximately_zero_inverse(p) || approximately_zero_inverse(q))) {
+ return handle_zero(B, C, s);
+ }
+ /* normal form: x^2 + px + q = 0 */
+ const double p2 = p * p;
+ if (!AlmostDequalUlps(p2, q) && p2 < q) {
+ return 0;
+ }
+ double sqrt_D = 0;
+ if (p2 > q) {
+ sqrt_D = sqrt(p2 - q);
+ }
+ s[0] = sqrt_D - p;
+ s[1] = -sqrt_D - p;
+ return 1 + !AlmostDequalUlps(s[0], s[1]);
+}
+
+bool SkDQuad::isLinear(int startIndex, int endIndex) const {
+ SkLineParameters lineParameters;
+ lineParameters.quadEndPoints(*this, startIndex, endIndex);
+ // FIXME: maybe it's possible to avoid this and compare non-normalized
+ lineParameters.normalize();
+ double distance = lineParameters.controlPtDistance(*this);
+ double tiniest = std::min(std::min(std::min(std::min(std::min(fPts[0].fX, fPts[0].fY),
+ fPts[1].fX), fPts[1].fY), fPts[2].fX), fPts[2].fY);
+ double largest = std::max(std::max(std::max(std::max(std::max(fPts[0].fX, fPts[0].fY),
+ fPts[1].fX), fPts[1].fY), fPts[2].fX), fPts[2].fY);
+ largest = std::max(largest, -tiniest);
+ return approximately_zero_when_compared_to(distance, largest);
+}
+
+SkDVector SkDQuad::dxdyAtT(double t) const {
+ double a = t - 1;
+ double b = 1 - 2 * t;
+ double c = t;
+ SkDVector result = { a * fPts[0].fX + b * fPts[1].fX + c * fPts[2].fX,
+ a * fPts[0].fY + b * fPts[1].fY + c * fPts[2].fY };
+ if (result.fX == 0 && result.fY == 0) {
+ if (zero_or_one(t)) {
+ result = fPts[2] - fPts[0];
+ } else {
+ // incomplete
+ SkDebugf("!q");
+ }
+ }
+ return result;
+}
+
+// OPTIMIZE: assert if caller passes in t == 0 / t == 1 ?
+SkDPoint SkDQuad::ptAtT(double t) const {
+ if (0 == t) {
+ return fPts[0];
+ }
+ if (1 == t) {
+ return fPts[2];
+ }
+ double one_t = 1 - t;
+ double a = one_t * one_t;
+ double b = 2 * one_t * t;
+ double c = t * t;
+ SkDPoint result = { a * fPts[0].fX + b * fPts[1].fX + c * fPts[2].fX,
+ a * fPts[0].fY + b * fPts[1].fY + c * fPts[2].fY };
+ return result;
+}
+
+static double interp_quad_coords(const double* src, double t) {
+ if (0 == t) {
+ return src[0];
+ }
+ if (1 == t) {
+ return src[4];
+ }
+ double ab = SkDInterp(src[0], src[2], t);
+ double bc = SkDInterp(src[2], src[4], t);
+ double abc = SkDInterp(ab, bc, t);
+ return abc;
+}
+
+bool SkDQuad::monotonicInX() const {
+ return between(fPts[0].fX, fPts[1].fX, fPts[2].fX);
+}
+
+bool SkDQuad::monotonicInY() const {
+ return between(fPts[0].fY, fPts[1].fY, fPts[2].fY);
+}
+
+/*
+Given a quadratic q, t1, and t2, find a small quadratic segment.
+
+The new quadratic is defined by A, B, and C, where
+ A = c[0]*(1 - t1)*(1 - t1) + 2*c[1]*t1*(1 - t1) + c[2]*t1*t1
+ C = c[3]*(1 - t1)*(1 - t1) + 2*c[2]*t1*(1 - t1) + c[1]*t1*t1
+
+To find B, compute the point halfway between t1 and t2:
+
+q(at (t1 + t2)/2) == D
+
+Next, compute where D must be if we know the value of B:
+
+_12 = A/2 + B/2
+12_ = B/2 + C/2
+123 = A/4 + B/2 + C/4
+ = D
+
+Group the known values on one side:
+
+B = D*2 - A/2 - C/2
+*/
+
+// OPTIMIZE? : special case t1 = 1 && t2 = 0
+SkDQuad SkDQuad::subDivide(double t1, double t2) const {
+ if (0 == t1 && 1 == t2) {
+ return *this;
+ }
+ SkDQuad dst;
+ double ax = dst[0].fX = interp_quad_coords(&fPts[0].fX, t1);
+ double ay = dst[0].fY = interp_quad_coords(&fPts[0].fY, t1);
+ double dx = interp_quad_coords(&fPts[0].fX, (t1 + t2) / 2);
+ double dy = interp_quad_coords(&fPts[0].fY, (t1 + t2) / 2);
+ double cx = dst[2].fX = interp_quad_coords(&fPts[0].fX, t2);
+ double cy = dst[2].fY = interp_quad_coords(&fPts[0].fY, t2);
+ /* bx = */ dst[1].fX = 2 * dx - (ax + cx) / 2;
+ /* by = */ dst[1].fY = 2 * dy - (ay + cy) / 2;
+ return dst;
+}
+
+void SkDQuad::align(int endIndex, SkDPoint* dstPt) const {
+ if (fPts[endIndex].fX == fPts[1].fX) {
+ dstPt->fX = fPts[endIndex].fX;
+ }
+ if (fPts[endIndex].fY == fPts[1].fY) {
+ dstPt->fY = fPts[endIndex].fY;
+ }
+}
+
+SkDPoint SkDQuad::subDivide(const SkDPoint& a, const SkDPoint& c, double t1, double t2) const {
+ SkASSERT(t1 != t2);
+ SkDPoint b;
+ SkDQuad sub = subDivide(t1, t2);
+ SkDLine b0 = {{a, sub[1] + (a - sub[0])}};
+ SkDLine b1 = {{c, sub[1] + (c - sub[2])}};
+ SkIntersections i;
+ i.intersectRay(b0, b1);
+ if (i.used() == 1 && i[0][0] >= 0 && i[1][0] >= 0) {
+ b = i.pt(0);
+ } else {
+ SkASSERT(i.used() <= 2);
+ return SkDPoint::Mid(b0[1], b1[1]);
+ }
+ if (t1 == 0 || t2 == 0) {
+ align(0, &b);
+ }
+ if (t1 == 1 || t2 == 1) {
+ align(2, &b);
+ }
+ if (AlmostBequalUlps(b.fX, a.fX)) {
+ b.fX = a.fX;
+ } else if (AlmostBequalUlps(b.fX, c.fX)) {
+ b.fX = c.fX;
+ }
+ if (AlmostBequalUlps(b.fY, a.fY)) {
+ b.fY = a.fY;
+ } else if (AlmostBequalUlps(b.fY, c.fY)) {
+ b.fY = c.fY;
+ }
+ return b;
+}
+
+/* classic one t subdivision */
+static void interp_quad_coords(const double* src, double* dst, double t) {
+ double ab = SkDInterp(src[0], src[2], t);
+ double bc = SkDInterp(src[2], src[4], t);
+ dst[0] = src[0];
+ dst[2] = ab;
+ dst[4] = SkDInterp(ab, bc, t);
+ dst[6] = bc;
+ dst[8] = src[4];
+}
+
+SkDQuadPair SkDQuad::chopAt(double t) const
+{
+ SkDQuadPair dst;
+ interp_quad_coords(&fPts[0].fX, &dst.pts[0].fX, t);
+ interp_quad_coords(&fPts[0].fY, &dst.pts[0].fY, t);
+ return dst;
+}
+
+static int valid_unit_divide(double numer, double denom, double* ratio)
+{
+ if (numer < 0) {
+ numer = -numer;
+ denom = -denom;
+ }
+ if (denom == 0 || numer == 0 || numer >= denom) {
+ return 0;
+ }
+ double r = numer / denom;
+ if (r == 0) { // catch underflow if numer <<<< denom
+ return 0;
+ }
+ *ratio = r;
+ return 1;
+}
+
+/** Quad'(t) = At + B, where
+ A = 2(a - 2b + c)
+ B = 2(b - a)
+ Solve for t, only if it fits between 0 < t < 1
+*/
+int SkDQuad::FindExtrema(const double src[], double tValue[1]) {
+ /* At + B == 0
+ t = -B / A
+ */
+ double a = src[0];
+ double b = src[2];
+ double c = src[4];
+ return valid_unit_divide(a - b, a - b - b + c, tValue);
+}
+
+/* Parameterization form, given A*t*t + 2*B*t*(1-t) + C*(1-t)*(1-t)
+ *
+ * a = A - 2*B + C
+ * b = 2*B - 2*C
+ * c = C
+ */
+void SkDQuad::SetABC(const double* quad, double* a, double* b, double* c) {
+ *a = quad[0]; // a = A
+ *b = 2 * quad[2]; // b = 2*B
+ *c = quad[4]; // c = C
+ *b -= *c; // b = 2*B - C
+ *a -= *b; // a = A - 2*B + C
+ *b -= *c; // b = 2*B - 2*C
+}
+
+int SkTQuad::intersectRay(SkIntersections* i, const SkDLine& line) const {
+ return i->intersectRay(fQuad, line);
+}
+
+bool SkTQuad::hullIntersects(const SkDConic& conic, bool* isLinear) const {
+ return conic.hullIntersects(fQuad, isLinear);
+}
+
+bool SkTQuad::hullIntersects(const SkDCubic& cubic, bool* isLinear) const {
+ return cubic.hullIntersects(fQuad, isLinear);
+}
+
+void SkTQuad::setBounds(SkDRect* rect) const {
+ rect->setBounds(fQuad);
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsQuad.h b/gfx/skia/skia/src/pathops/SkPathOpsQuad.h
new file mode 100644
index 0000000000..076e0a7039
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsQuad.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathOpsQuad_DEFINED
+#define SkPathOpsQuad_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkMalloc.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsDebug.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkPathOpsTCurve.h"
+
+class SkIntersections;
+class SkOpGlobalState;
+struct SkDConic;
+struct SkDLine;
+struct SkDQuad;
+struct SkDRect;
+
+struct SkDQuadPair {
+ const SkDQuad& first() const { return (const SkDQuad&) pts[0]; }
+ const SkDQuad& second() const { return (const SkDQuad&) pts[2]; }
+ SkDPoint pts[5];
+};
+
+struct SkDQuad {
+ static const int kPointCount = 3;
+ static const int kPointLast = kPointCount - 1;
+ static const int kMaxIntersections = 4;
+
+ SkDPoint fPts[kPointCount];
+
+ bool collapsed() const {
+ return fPts[0].approximatelyEqual(fPts[1]) && fPts[0].approximatelyEqual(fPts[2]);
+ }
+
+ bool controlsInside() const {
+ SkDVector v01 = fPts[0] - fPts[1];
+ SkDVector v02 = fPts[0] - fPts[2];
+ SkDVector v12 = fPts[1] - fPts[2];
+ return v02.dot(v01) > 0 && v02.dot(v12) > 0;
+ }
+
+ void debugInit() {
+ sk_bzero(fPts, sizeof(fPts));
+ }
+
+ void debugSet(const SkDPoint* pts);
+
+ SkDQuad flip() const {
+ SkDQuad result = {{fPts[2], fPts[1], fPts[0]} SkDEBUGPARAMS(fDebugGlobalState) };
+ return result;
+ }
+
+ static bool IsConic() { return false; }
+
+ const SkDQuad& set(const SkPoint pts[kPointCount]
+ SkDEBUGPARAMS(SkOpGlobalState* state = nullptr)) {
+ fPts[0] = pts[0];
+ fPts[1] = pts[1];
+ fPts[2] = pts[2];
+ SkDEBUGCODE(fDebugGlobalState = state);
+ return *this;
+ }
+
+ const SkDPoint& operator[](int n) const { SkASSERT(n >= 0 && n < kPointCount); return fPts[n]; }
+ SkDPoint& operator[](int n) { SkASSERT(n >= 0 && n < kPointCount); return fPts[n]; }
+
+ static int AddValidTs(double s[], int realRoots, double* t);
+ void align(int endIndex, SkDPoint* dstPt) const;
+ SkDQuadPair chopAt(double t) const;
+ SkDVector dxdyAtT(double t) const;
+ static int FindExtrema(const double src[], double tValue[1]);
+
+#ifdef SK_DEBUG
+ SkOpGlobalState* globalState() const { return fDebugGlobalState; }
+#endif
+
+ /**
+ * Return the number of valid roots (0 < root < 1) for this cubic intersecting the
+ * specified horizontal line.
+ */
+ int horizontalIntersect(double yIntercept, double roots[2]) const;
+
+ bool hullIntersects(const SkDQuad& , bool* isLinear) const;
+ bool hullIntersects(const SkDConic& , bool* isLinear) const;
+ bool hullIntersects(const SkDCubic& , bool* isLinear) const;
+ bool isLinear(int startIndex, int endIndex) const;
+ static int maxIntersections() { return kMaxIntersections; }
+ bool monotonicInX() const;
+ bool monotonicInY() const;
+ void otherPts(int oddMan, const SkDPoint* endPt[2]) const;
+ static int pointCount() { return kPointCount; }
+ static int pointLast() { return kPointLast; }
+ SkDPoint ptAtT(double t) const;
+ static int RootsReal(double A, double B, double C, double t[2]);
+ static int RootsValidT(const double A, const double B, const double C, double s[2]);
+ static void SetABC(const double* quad, double* a, double* b, double* c);
+ SkDQuad subDivide(double t1, double t2) const;
+ void subDivide(double t1, double t2, SkDQuad* quad) const { *quad = this->subDivide(t1, t2); }
+
+ static SkDQuad SubDivide(const SkPoint a[kPointCount], double t1, double t2) {
+ SkDQuad quad;
+ quad.set(a);
+ return quad.subDivide(t1, t2);
+ }
+ SkDPoint subDivide(const SkDPoint& a, const SkDPoint& c, double t1, double t2) const;
+ static SkDPoint SubDivide(const SkPoint pts[kPointCount], const SkDPoint& a, const SkDPoint& c,
+ double t1, double t2) {
+ SkDQuad quad;
+ quad.set(pts);
+ return quad.subDivide(a, c, t1, t2);
+ }
+
+ /**
+ * Return the number of valid roots (0 < root < 1) for this cubic intersecting the
+ * specified vertical line.
+ */
+ int verticalIntersect(double xIntercept, double roots[2]) const;
+
+ SkDCubic debugToCubic() const;
+ // utilities callable by the user from the debugger when the implementation code is linked in
+ void dump() const;
+ void dumpID(int id) const;
+ void dumpInner() const;
+
+ SkDEBUGCODE(SkOpGlobalState* fDebugGlobalState);
+};
+
+
+class SkTQuad : public SkTCurve {
+public:
+ SkDQuad fQuad;
+
+ SkTQuad() {}
+
+ SkTQuad(const SkDQuad& q)
+ : fQuad(q) {
+ }
+
+ ~SkTQuad() override {}
+
+ const SkDPoint& operator[](int n) const override { return fQuad[n]; }
+ SkDPoint& operator[](int n) override { return fQuad[n]; }
+
+ bool collapsed() const override { return fQuad.collapsed(); }
+ bool controlsInside() const override { return fQuad.controlsInside(); }
+ void debugInit() override { return fQuad.debugInit(); }
+#if DEBUG_T_SECT
+ void dumpID(int id) const override { return fQuad.dumpID(id); }
+#endif
+ SkDVector dxdyAtT(double t) const override { return fQuad.dxdyAtT(t); }
+#ifdef SK_DEBUG
+ SkOpGlobalState* globalState() const override { return fQuad.globalState(); }
+#endif
+
+ bool hullIntersects(const SkDQuad& quad, bool* isLinear) const override {
+ return quad.hullIntersects(fQuad, isLinear);
+ }
+
+ bool hullIntersects(const SkDConic& conic, bool* isLinear) const override;
+ bool hullIntersects(const SkDCubic& cubic, bool* isLinear) const override;
+
+ bool hullIntersects(const SkTCurve& curve, bool* isLinear) const override {
+ return curve.hullIntersects(fQuad, isLinear);
+ }
+
+ int intersectRay(SkIntersections* i, const SkDLine& line) const override;
+ bool IsConic() const override { return false; }
+ SkTCurve* make(SkArenaAlloc& heap) const override { return heap.make<SkTQuad>(); }
+
+ int maxIntersections() const override { return SkDQuad::kMaxIntersections; }
+
+ void otherPts(int oddMan, const SkDPoint* endPt[2]) const override {
+ fQuad.otherPts(oddMan, endPt);
+ }
+
+ int pointCount() const override { return SkDQuad::kPointCount; }
+ int pointLast() const override { return SkDQuad::kPointLast; }
+ SkDPoint ptAtT(double t) const override { return fQuad.ptAtT(t); }
+ void setBounds(SkDRect* ) const override;
+
+ void subDivide(double t1, double t2, SkTCurve* curve) const override {
+ ((SkTQuad*) curve)->fQuad = fQuad.subDivide(t1, t2);
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsRect.cpp b/gfx/skia/skia/src/pathops/SkPathOpsRect.cpp
new file mode 100644
index 0000000000..eb0a9b6b76
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsRect.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkPathOpsRect.h"
+
+#include "src/pathops/SkPathOpsConic.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsQuad.h"
+#include "src/pathops/SkPathOpsTCurve.h"
+
+void SkDRect::setBounds(const SkDQuad& curve, const SkDQuad& sub, double startT, double endT) {
+ set(sub[0]);
+ add(sub[2]);
+ double tValues[2];
+ int roots = 0;
+ if (!sub.monotonicInX()) {
+ roots = SkDQuad::FindExtrema(&sub[0].fX, tValues);
+ }
+ if (!sub.monotonicInY()) {
+ roots += SkDQuad::FindExtrema(&sub[0].fY, &tValues[roots]);
+ }
+ for (int index = 0; index < roots; ++index) {
+ double t = startT + (endT - startT) * tValues[index];
+ add(curve.ptAtT(t));
+ }
+}
+
+void SkDRect::setBounds(const SkDConic& curve, const SkDConic& sub, double startT, double endT) {
+ set(sub[0]);
+ add(sub[2]);
+ double tValues[2];
+ int roots = 0;
+ if (!sub.monotonicInX()) {
+ roots = SkDConic::FindExtrema(&sub[0].fX, sub.fWeight, tValues);
+ }
+ if (!sub.monotonicInY()) {
+ roots += SkDConic::FindExtrema(&sub[0].fY, sub.fWeight, &tValues[roots]);
+ }
+ for (int index = 0; index < roots; ++index) {
+ double t = startT + (endT - startT) * tValues[index];
+ add(curve.ptAtT(t));
+ }
+}
+
+void SkDRect::setBounds(const SkDCubic& curve, const SkDCubic& sub, double startT, double endT) {
+ set(sub[0]);
+ add(sub[3]);
+ double tValues[4];
+ int roots = 0;
+ if (!sub.monotonicInX()) {
+ roots = SkDCubic::FindExtrema(&sub[0].fX, tValues);
+ }
+ if (!sub.monotonicInY()) {
+ roots += SkDCubic::FindExtrema(&sub[0].fY, &tValues[roots]);
+ }
+ for (int index = 0; index < roots; ++index) {
+ double t = startT + (endT - startT) * tValues[index];
+ add(curve.ptAtT(t));
+ }
+}
+
+void SkDRect::setBounds(const SkTCurve& curve) {
+ curve.setBounds(this);
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsRect.h b/gfx/skia/skia/src/pathops/SkPathOpsRect.h
new file mode 100644
index 0000000000..4abd50d705
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsRect.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsRect_DEFINED
+#define SkPathOpsRect_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+#include <algorithm>
+
+class SkTCurve;
+struct SkDConic;
+struct SkDCubic;
+struct SkDQuad;
+
+struct SkDRect {
+ double fLeft, fTop, fRight, fBottom;
+
+ void add(const SkDPoint& pt) {
+ fLeft = std::min(fLeft, pt.fX);
+ fTop = std::min(fTop, pt.fY);
+ fRight = std::max(fRight, pt.fX);
+ fBottom = std::max(fBottom, pt.fY);
+ }
+
+ bool contains(const SkDPoint& pt) const {
+ return approximately_between(fLeft, pt.fX, fRight)
+ && approximately_between(fTop, pt.fY, fBottom);
+ }
+
+ void debugInit();
+
+ bool intersects(const SkDRect& r) const {
+ SkASSERT(fLeft <= fRight);
+ SkASSERT(fTop <= fBottom);
+ SkASSERT(r.fLeft <= r.fRight);
+ SkASSERT(r.fTop <= r.fBottom);
+ return r.fLeft <= fRight && fLeft <= r.fRight && r.fTop <= fBottom && fTop <= r.fBottom;
+ }
+
+ void set(const SkDPoint& pt) {
+ fLeft = fRight = pt.fX;
+ fTop = fBottom = pt.fY;
+ }
+
+ double width() const {
+ return fRight - fLeft;
+ }
+
+ double height() const {
+ return fBottom - fTop;
+ }
+
+ void setBounds(const SkDConic& curve) {
+ setBounds(curve, curve, 0, 1);
+ }
+
+ void setBounds(const SkDConic& curve, const SkDConic& sub, double tStart, double tEnd);
+
+ void setBounds(const SkDCubic& curve) {
+ setBounds(curve, curve, 0, 1);
+ }
+
+ void setBounds(const SkDCubic& curve, const SkDCubic& sub, double tStart, double tEnd);
+
+ void setBounds(const SkDQuad& curve) {
+ setBounds(curve, curve, 0, 1);
+ }
+
+ void setBounds(const SkDQuad& curve, const SkDQuad& sub, double tStart, double tEnd);
+
+ void setBounds(const SkTCurve& curve);
+
+ bool valid() const {
+ return fLeft <= fRight && fTop <= fBottom;
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsSimplify.cpp b/gfx/skia/skia/src/pathops/SkPathOpsSimplify.cpp
new file mode 100644
index 0000000000..d9c4c46101
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsSimplify.cpp
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkPath.h"
+#include "include/core/SkPathTypes.h"
+#include "include/core/SkTypes.h"
+#include "include/pathops/SkPathOps.h"
+#include "include/private/base/SkTDArray.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/pathops/SkAddIntersections.h"
+#include "src/pathops/SkOpCoincidence.h"
+#include "src/pathops/SkOpContour.h"
+#include "src/pathops/SkOpEdgeBuilder.h"
+#include "src/pathops/SkOpSegment.h"
+#include "src/pathops/SkOpSpan.h"
+#include "src/pathops/SkPathOpsCommon.h"
+#include "src/pathops/SkPathOpsTypes.h"
+#include "src/pathops/SkPathWriter.h"
+
+static bool bridgeWinding(SkOpContourHead* contourList, SkPathWriter* writer) {
+ bool unsortable = false;
+ do {
+ SkOpSpan* span = FindSortableTop(contourList);
+ if (!span) {
+ break;
+ }
+ SkOpSegment* current = span->segment();
+ SkOpSpanBase* start = span->next();
+ SkOpSpanBase* end = span;
+ SkTDArray<SkOpSpanBase*> chase;
+ do {
+ if (current->activeWinding(start, end)) {
+ do {
+ if (!unsortable && current->done()) {
+ break;
+ }
+ SkASSERT(unsortable || !current->done());
+ SkOpSpanBase* nextStart = start;
+ SkOpSpanBase* nextEnd = end;
+ SkOpSegment* next = current->findNextWinding(&chase, &nextStart, &nextEnd,
+ &unsortable);
+ if (!next) {
+ break;
+ }
+ #if DEBUG_FLOW
+ SkDebugf("%s current id=%d from=(%1.9g,%1.9g) to=(%1.9g,%1.9g)\n", __FUNCTION__,
+ current->debugID(), start->pt().fX, start->pt().fY,
+ end->pt().fX, end->pt().fY);
+ #endif
+ if (!current->addCurveTo(start, end, writer)) {
+ return false;
+ }
+ current = next;
+ start = nextStart;
+ end = nextEnd;
+ } while (!writer->isClosed() && (!unsortable || !start->starter(end)->done()));
+ if (current->activeWinding(start, end) && !writer->isClosed()) {
+ SkOpSpan* spanStart = start->starter(end);
+ if (!spanStart->done()) {
+ if (!current->addCurveTo(start, end, writer)) {
+ return false;
+ }
+ current->markDone(spanStart);
+ }
+ }
+ writer->finishContour();
+ } else {
+ SkOpSpanBase* last;
+ if (!current->markAndChaseDone(start, end, &last)) {
+ return false;
+ }
+ if (last && !last->chased()) {
+ last->setChased(true);
+ SkASSERT(!SkPathOpsDebug::ChaseContains(chase, last));
+ *chase.append() = last;
+#if DEBUG_WINDING
+ SkDebugf("%s chase.append id=%d", __FUNCTION__, last->segment()->debugID());
+ if (!last->final()) {
+ SkDebugf(" windSum=%d", last->upCast()->windSum());
+ }
+ SkDebugf("\n");
+#endif
+ }
+ }
+ current = FindChase(&chase, &start, &end);
+ SkPathOpsDebug::ShowActiveSpans(contourList);
+ if (!current) {
+ break;
+ }
+ } while (true);
+ } while (true);
+ return true;
+}
+
+// returns true if all edges were processed
+static bool bridgeXor(SkOpContourHead* contourList, SkPathWriter* writer) {
+ bool unsortable = false;
+ int safetyNet = 1000000;
+ do {
+ SkOpSpan* span = FindUndone(contourList);
+ if (!span) {
+ break;
+ }
+ SkOpSegment* current = span->segment();
+ SkOpSpanBase* start = span->next();
+ SkOpSpanBase* end = span;
+ do {
+ if (--safetyNet < 0) {
+ return false;
+ }
+ if (!unsortable && current->done()) {
+ break;
+ }
+ SkASSERT(unsortable || !current->done());
+ SkOpSpanBase* nextStart = start;
+ SkOpSpanBase* nextEnd = end;
+ SkOpSegment* next = current->findNextXor(&nextStart, &nextEnd,
+ &unsortable);
+ if (!next) {
+ break;
+ }
+ #if DEBUG_FLOW
+ SkDebugf("%s current id=%d from=(%1.9g,%1.9g) to=(%1.9g,%1.9g)\n", __FUNCTION__,
+ current->debugID(), start->pt().fX, start->pt().fY,
+ end->pt().fX, end->pt().fY);
+ #endif
+ if (!current->addCurveTo(start, end, writer)) {
+ return false;
+ }
+ current = next;
+ start = nextStart;
+ end = nextEnd;
+ } while (!writer->isClosed() && (!unsortable || !start->starter(end)->done()));
+ if (!writer->isClosed()) {
+ SkOpSpan* spanStart = start->starter(end);
+ if (!spanStart->done()) {
+ return false;
+ }
+ }
+ writer->finishContour();
+ SkPathOpsDebug::ShowActiveSpans(contourList);
+ } while (true);
+ return true;
+}
+
+// FIXME : add this as a member of SkPath
+bool SimplifyDebug(const SkPath& path, SkPath* result
+ SkDEBUGPARAMS(bool skipAssert) SkDEBUGPARAMS(const char* testName)) {
+ // returns 1 for evenodd, -1 for winding, regardless of inverse-ness
+ SkPathFillType fillType = path.isInverseFillType() ? SkPathFillType::kInverseEvenOdd
+ : SkPathFillType::kEvenOdd;
+ if (path.isConvex()) {
+ if (result != &path) {
+ *result = path;
+ }
+ result->setFillType(fillType);
+ return true;
+ }
+ // turn path into list of segments
+ SkSTArenaAlloc<4096> allocator; // FIXME: constant-ize, tune
+ SkOpContour contour;
+ SkOpContourHead* contourList = static_cast<SkOpContourHead*>(&contour);
+ SkOpGlobalState globalState(contourList, &allocator
+ SkDEBUGPARAMS(skipAssert) SkDEBUGPARAMS(testName));
+ SkOpCoincidence coincidence(&globalState);
+#if DEBUG_DUMP_VERIFY
+#ifndef SK_DEBUG
+ const char* testName = "release";
+#endif
+ if (SkPathOpsDebug::gDumpOp) {
+ DumpSimplify(path, testName);
+ }
+#endif
+#if DEBUG_SORT
+ SkPathOpsDebug::gSortCount = SkPathOpsDebug::gSortCountDefault;
+#endif
+ SkOpEdgeBuilder builder(path, contourList, &globalState);
+ if (!builder.finish()) {
+ return false;
+ }
+#if DEBUG_DUMP_SEGMENTS
+ contour.dumpSegments();
+#endif
+ if (!SortContourList(&contourList, false, false)) {
+ result->reset();
+ result->setFillType(fillType);
+ return true;
+ }
+ // find all intersections between segments
+ SkOpContour* current = contourList;
+ do {
+ SkOpContour* next = current;
+ while (AddIntersectTs(current, next, &coincidence)
+ && (next = next->next()));
+ } while ((current = current->next()));
+#if DEBUG_VALIDATE
+ globalState.setPhase(SkOpPhase::kWalking);
+#endif
+ bool success = HandleCoincidence(contourList, &coincidence);
+#if DEBUG_COIN
+ globalState.debugAddToGlobalCoinDicts();
+#endif
+ if (!success) {
+ return false;
+ }
+#if DEBUG_DUMP_ALIGNMENT
+ contour.dumpSegments("aligned");
+#endif
+ // construct closed contours
+ result->reset();
+ result->setFillType(fillType);
+ SkPathWriter wrapper(*result);
+ if (builder.xorMask() == kWinding_PathOpsMask ? !bridgeWinding(contourList, &wrapper)
+ : !bridgeXor(contourList, &wrapper)) {
+ return false;
+ }
+ wrapper.assemble(); // if some edges could not be resolved, assemble remaining
+ return true;
+}
+
+bool Simplify(const SkPath& path, SkPath* result) {
+#if DEBUG_DUMP_VERIFY
+ if (SkPathOpsDebug::gVerifyOp) {
+ if (!SimplifyDebug(path, result SkDEBUGPARAMS(false) SkDEBUGPARAMS(nullptr))) {
+ ReportSimplifyFail(path);
+ return false;
+ }
+ VerifySimplify(path, *result);
+ return true;
+ }
+#endif
+ return SimplifyDebug(path, result SkDEBUGPARAMS(true) SkDEBUGPARAMS(nullptr));
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsTCurve.h b/gfx/skia/skia/src/pathops/SkPathOpsTCurve.h
new file mode 100644
index 0000000000..1f9030e275
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsTCurve.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPathOpsTCurve_DEFINED
+#define SkPathOpsTCurve_DEFINED
+
+#include "src/pathops/SkPathOpsPoint.h"
+
+class SkArenaAlloc;
+class SkIntersections;
+struct SkDRect;
+
+class SkTCurve {
+public:
+ virtual ~SkTCurve() {}
+ virtual const SkDPoint& operator[](int n) const = 0;
+ virtual SkDPoint& operator[](int n) = 0;
+
+ virtual bool collapsed() const = 0;
+ virtual bool controlsInside() const = 0;
+ virtual void debugInit() = 0;
+#if DEBUG_T_SECT
+ virtual void dumpID(int id) const = 0;
+#endif
+ virtual SkDVector dxdyAtT(double t) const = 0;
+ virtual bool hullIntersects(const SkDQuad& , bool* isLinear) const = 0;
+ virtual bool hullIntersects(const SkDConic& , bool* isLinear) const = 0;
+ virtual bool hullIntersects(const SkDCubic& , bool* isLinear) const = 0;
+ virtual bool hullIntersects(const SkTCurve& , bool* isLinear) const = 0;
+ virtual int intersectRay(SkIntersections* i, const SkDLine& line) const = 0;
+ virtual bool IsConic() const = 0;
+ virtual SkTCurve* make(SkArenaAlloc& ) const = 0;
+ virtual int maxIntersections() const = 0;
+ virtual void otherPts(int oddMan, const SkDPoint* endPt[2]) const = 0;
+ virtual int pointCount() const = 0;
+ virtual int pointLast() const = 0;
+ virtual SkDPoint ptAtT(double t) const = 0;
+ virtual void setBounds(SkDRect* ) const = 0;
+ virtual void subDivide(double t1, double t2, SkTCurve* curve) const = 0;
+#ifdef SK_DEBUG
+ virtual SkOpGlobalState* globalState() const = 0;
+#endif
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsTSect.cpp b/gfx/skia/skia/src/pathops/SkPathOpsTSect.cpp
new file mode 100644
index 0000000000..7c49330fdd
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsTSect.cpp
@@ -0,0 +1,2149 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pathops/SkPathOpsTSect.h"
+
+#include "include/private/base/SkMacros.h"
+#include "include/private/base/SkTArray.h"
+#include "src/base/SkTSort.h"
+#include "src/pathops/SkIntersections.h"
+#include "src/pathops/SkPathOpsConic.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsLine.h"
+#include "src/pathops/SkPathOpsQuad.h"
+
+#include <cfloat>
+#include <algorithm>
+#include <array>
+#include <cmath>
+
+#define COINCIDENT_SPAN_COUNT 9
+
+void SkTCoincident::setPerp(const SkTCurve& c1, double t,
+ const SkDPoint& cPt, const SkTCurve& c2) {
+ SkDVector dxdy = c1.dxdyAtT(t);
+ SkDLine perp = {{ cPt, {cPt.fX + dxdy.fY, cPt.fY - dxdy.fX} }};
+ SkIntersections i SkDEBUGCODE((c1.globalState()));
+ int used = i.intersectRay(c2, perp);
+ // only keep closest
+ if (used == 0 || used == 3) {
+ this->init();
+ return;
+ }
+ fPerpT = i[0][0];
+ fPerpPt = i.pt(0);
+ SkASSERT(used <= 2);
+ if (used == 2) {
+ double distSq = (fPerpPt - cPt).lengthSquared();
+ double dist2Sq = (i.pt(1) - cPt).lengthSquared();
+ if (dist2Sq < distSq) {
+ fPerpT = i[0][1];
+ fPerpPt = i.pt(1);
+ }
+ }
+#if DEBUG_T_SECT
+ SkDebugf("setPerp t=%1.9g cPt=(%1.9g,%1.9g) %s oppT=%1.9g fPerpPt=(%1.9g,%1.9g)\n",
+ t, cPt.fX, cPt.fY,
+ cPt.approximatelyEqual(fPerpPt) ? "==" : "!=", fPerpT, fPerpPt.fX, fPerpPt.fY);
+#endif
+ fMatch = cPt.approximatelyEqual(fPerpPt);
+#if DEBUG_T_SECT
+ if (fMatch) {
+ SkDebugf("%s", ""); // allow setting breakpoint
+ }
+#endif
+}
+
+void SkTSpan::addBounded(SkTSpan* span, SkArenaAlloc* heap) {
+ SkTSpanBounded* bounded = heap->make<SkTSpanBounded>();
+ bounded->fBounded = span;
+ bounded->fNext = fBounded;
+ fBounded = bounded;
+}
+
+SkTSpan* SkTSect::addFollowing(
+ SkTSpan* prior) {
+ SkTSpan* result = this->addOne();
+ SkDEBUGCODE(result->debugSetGlobalState(this->globalState()));
+ result->fStartT = prior ? prior->fEndT : 0;
+ SkTSpan* next = prior ? prior->fNext : fHead;
+ result->fEndT = next ? next->fStartT : 1;
+ result->fPrev = prior;
+ result->fNext = next;
+ if (prior) {
+ prior->fNext = result;
+ } else {
+ fHead = result;
+ }
+ if (next) {
+ next->fPrev = result;
+ }
+ result->resetBounds(fCurve);
+ // world may not be consistent to call validate here
+ result->validate();
+ return result;
+}
+
+void SkTSect::addForPerp(SkTSpan* span, double t) {
+ if (!span->hasOppT(t)) {
+ SkTSpan* priorSpan;
+ SkTSpan* opp = this->spanAtT(t, &priorSpan);
+ if (!opp) {
+ opp = this->addFollowing(priorSpan);
+#if DEBUG_PERP
+ SkDebugf("%s priorSpan=%d t=%1.9g opp=%d\n", __FUNCTION__, priorSpan ?
+ priorSpan->debugID() : -1, t, opp->debugID());
+#endif
+ }
+#if DEBUG_PERP
+ opp->dump(); SkDebugf("\n");
+ SkDebugf("%s addBounded span=%d opp=%d\n", __FUNCTION__, priorSpan ?
+ priorSpan->debugID() : -1, opp->debugID());
+#endif
+ opp->addBounded(span, &fHeap);
+ span->addBounded(opp, &fHeap);
+ }
+ this->validate();
+#if DEBUG_T_SECT
+ span->validatePerpT(t);
+#endif
+}
+
+double SkTSpan::closestBoundedT(const SkDPoint& pt) const {
+ double result = -1;
+ double closest = DBL_MAX;
+ const SkTSpanBounded* testBounded = fBounded;
+ while (testBounded) {
+ const SkTSpan* test = testBounded->fBounded;
+ double startDist = test->pointFirst().distanceSquared(pt);
+ if (closest > startDist) {
+ closest = startDist;
+ result = test->fStartT;
+ }
+ double endDist = test->pointLast().distanceSquared(pt);
+ if (closest > endDist) {
+ closest = endDist;
+ result = test->fEndT;
+ }
+ testBounded = testBounded->fNext;
+ }
+ SkASSERT(between(0, result, 1));
+ return result;
+}
+
+#ifdef SK_DEBUG
+
+bool SkTSpan::debugIsBefore(const SkTSpan* span) const {
+ const SkTSpan* work = this;
+ do {
+ if (span == work) {
+ return true;
+ }
+ } while ((work = work->fNext));
+ return false;
+}
+#endif
+
+bool SkTSpan::contains(double t) const {
+ const SkTSpan* work = this;
+ do {
+ if (between(work->fStartT, t, work->fEndT)) {
+ return true;
+ }
+ } while ((work = work->fNext));
+ return false;
+}
+
+const SkTSect* SkTSpan::debugOpp() const {
+ return SkDEBUGRELEASE(fDebugSect->debugOpp(), nullptr);
+}
+
+SkTSpan* SkTSpan::findOppSpan(
+ const SkTSpan* opp) const {
+ SkTSpanBounded* bounded = fBounded;
+ while (bounded) {
+ SkTSpan* test = bounded->fBounded;
+ if (opp == test) {
+ return test;
+ }
+ bounded = bounded->fNext;
+ }
+ return nullptr;
+}
+
+// returns 0 if no hull intersection
+// 1 if hulls intersect
+// 2 if hulls only share a common endpoint
+// -1 if linear and further checking is required
+
+int SkTSpan::hullCheck(const SkTSpan* opp,
+ bool* start, bool* oppStart) {
+ if (fIsLinear) {
+ return -1;
+ }
+ bool ptsInCommon;
+ if (onlyEndPointsInCommon(opp, start, oppStart, &ptsInCommon)) {
+ SkASSERT(ptsInCommon);
+ return 2;
+ }
+ bool linear;
+ if (fPart->hullIntersects(*opp->fPart, &linear)) {
+ if (!linear) { // check set true if linear
+ return 1;
+ }
+ fIsLinear = true;
+ fIsLine = fPart->controlsInside();
+ return ptsInCommon ? 1 : -1;
+ }
+ // hull is not linear; check set true if intersected at the end points
+ return ((int) ptsInCommon) << 1; // 0 or 2
+}
+
+// OPTIMIZE ? If at_most_end_pts_in_common detects that one quad is near linear,
+// use line intersection to guess a better split than 0.5
+// OPTIMIZE Once at_most_end_pts_in_common detects linear, mark span so all future splits are linear
+
+int SkTSpan::hullsIntersect(SkTSpan* opp,
+ bool* start, bool* oppStart) {
+ if (!fBounds.intersects(opp->fBounds)) {
+ return 0;
+ }
+ int hullSect = this->hullCheck(opp, start, oppStart);
+ if (hullSect >= 0) {
+ return hullSect;
+ }
+ hullSect = opp->hullCheck(this, oppStart, start);
+ if (hullSect >= 0) {
+ return hullSect;
+ }
+ return -1;
+}
+
+void SkTSpan::init(const SkTCurve& c) {
+ fPrev = fNext = nullptr;
+ fStartT = 0;
+ fEndT = 1;
+ fBounded = nullptr;
+ resetBounds(c);
+}
+
+bool SkTSpan::initBounds(const SkTCurve& c) {
+ if (SkDoubleIsNaN(fStartT) || SkDoubleIsNaN(fEndT)) {
+ return false;
+ }
+ c.subDivide(fStartT, fEndT, fPart);
+ fBounds.setBounds(*fPart);
+ fCoinStart.init();
+ fCoinEnd.init();
+ fBoundsMax = std::max(fBounds.width(), fBounds.height());
+ fCollapsed = fPart->collapsed();
+ fHasPerp = false;
+ fDeleted = false;
+#if DEBUG_T_SECT
+ if (fCollapsed) {
+ SkDebugf("%s", ""); // for convenient breakpoints
+ }
+#endif
+ return fBounds.valid();
+}
+
+bool SkTSpan::linearsIntersect(SkTSpan* span) {
+ int result = this->linearIntersects(*span->fPart);
+ if (result <= 1) {
+ return SkToBool(result);
+ }
+ SkASSERT(span->fIsLinear);
+ result = span->linearIntersects(*fPart);
+// SkASSERT(result <= 1);
+ return SkToBool(result);
+}
+
+double SkTSpan::linearT(const SkDPoint& pt) const {
+ SkDVector len = this->pointLast() - this->pointFirst();
+ return fabs(len.fX) > fabs(len.fY)
+ ? (pt.fX - this->pointFirst().fX) / len.fX
+ : (pt.fY - this->pointFirst().fY) / len.fY;
+}
+
+int SkTSpan::linearIntersects(const SkTCurve& q2) const {
+ // looks like q1 is near-linear
+ int start = 0, end = fPart->pointLast(); // the outside points are usually the extremes
+ if (!fPart->controlsInside()) {
+ double dist = 0; // if there's any question, compute distance to find best outsiders
+ for (int outer = 0; outer < this->pointCount() - 1; ++outer) {
+ for (int inner = outer + 1; inner < this->pointCount(); ++inner) {
+ double test = ((*fPart)[outer] - (*fPart)[inner]).lengthSquared();
+ if (dist > test) {
+ continue;
+ }
+ dist = test;
+ start = outer;
+ end = inner;
+ }
+ }
+ }
+ // see if q2 is on one side of the line formed by the extreme points
+ double origX = (*fPart)[start].fX;
+ double origY = (*fPart)[start].fY;
+ double adj = (*fPart)[end].fX - origX;
+ double opp = (*fPart)[end].fY - origY;
+ double maxPart = std::max(fabs(adj), fabs(opp));
+ double sign = 0; // initialization to shut up warning in release build
+ for (int n = 0; n < q2.pointCount(); ++n) {
+ double dx = q2[n].fY - origY;
+ double dy = q2[n].fX - origX;
+ double maxVal = std::max(maxPart, std::max(fabs(dx), fabs(dy)));
+ double test = (q2[n].fY - origY) * adj - (q2[n].fX - origX) * opp;
+ if (precisely_zero_when_compared_to(test, maxVal)) {
+ return 1;
+ }
+ if (approximately_zero_when_compared_to(test, maxVal)) {
+ return 3;
+ }
+ if (n == 0) {
+ sign = test;
+ continue;
+ }
+ if (test * sign < 0) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+bool SkTSpan::onlyEndPointsInCommon(const SkTSpan* opp,
+ bool* start, bool* oppStart, bool* ptsInCommon) {
+ if (opp->pointFirst() == this->pointFirst()) {
+ *start = *oppStart = true;
+ } else if (opp->pointFirst() == this->pointLast()) {
+ *start = false;
+ *oppStart = true;
+ } else if (opp->pointLast() == this->pointFirst()) {
+ *start = true;
+ *oppStart = false;
+ } else if (opp->pointLast() == this->pointLast()) {
+ *start = *oppStart = false;
+ } else {
+ *ptsInCommon = false;
+ return false;
+ }
+ *ptsInCommon = true;
+ const SkDPoint* otherPts[4], * oppOtherPts[4];
+// const SkDPoint* otherPts[this->pointCount() - 1], * oppOtherPts[opp->pointCount() - 1];
+ int baseIndex = *start ? 0 : fPart->pointLast();
+ fPart->otherPts(baseIndex, otherPts);
+ opp->fPart->otherPts(*oppStart ? 0 : opp->fPart->pointLast(), oppOtherPts);
+ const SkDPoint& base = (*fPart)[baseIndex];
+ for (int o1 = 0; o1 < this->pointCount() - 1; ++o1) {
+ SkDVector v1 = *otherPts[o1] - base;
+ for (int o2 = 0; o2 < opp->pointCount() - 1; ++o2) {
+ SkDVector v2 = *oppOtherPts[o2] - base;
+ if (v2.dot(v1) >= 0) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+SkTSpan* SkTSpan::oppT(double t) const {
+ SkTSpanBounded* bounded = fBounded;
+ while (bounded) {
+ SkTSpan* test = bounded->fBounded;
+ if (between(test->fStartT, t, test->fEndT)) {
+ return test;
+ }
+ bounded = bounded->fNext;
+ }
+ return nullptr;
+}
+
+bool SkTSpan::removeAllBounded() {
+ bool deleteSpan = false;
+ SkTSpanBounded* bounded = fBounded;
+ while (bounded) {
+ SkTSpan* opp = bounded->fBounded;
+ deleteSpan |= opp->removeBounded(this);
+ bounded = bounded->fNext;
+ }
+ return deleteSpan;
+}
+
+bool SkTSpan::removeBounded(const SkTSpan* opp) {
+ if (fHasPerp) {
+ bool foundStart = false;
+ bool foundEnd = false;
+ SkTSpanBounded* bounded = fBounded;
+ while (bounded) {
+ SkTSpan* test = bounded->fBounded;
+ if (opp != test) {
+ foundStart |= between(test->fStartT, fCoinStart.perpT(), test->fEndT);
+ foundEnd |= between(test->fStartT, fCoinEnd.perpT(), test->fEndT);
+ }
+ bounded = bounded->fNext;
+ }
+ if (!foundStart || !foundEnd) {
+ fHasPerp = false;
+ fCoinStart.init();
+ fCoinEnd.init();
+ }
+ }
+ SkTSpanBounded* bounded = fBounded;
+ SkTSpanBounded* prev = nullptr;
+ while (bounded) {
+ SkTSpanBounded* boundedNext = bounded->fNext;
+ if (opp == bounded->fBounded) {
+ if (prev) {
+ prev->fNext = boundedNext;
+ return false;
+ } else {
+ fBounded = boundedNext;
+ return fBounded == nullptr;
+ }
+ }
+ prev = bounded;
+ bounded = boundedNext;
+ }
+ SkOPASSERT(0);
+ return false;
+}
+
+bool SkTSpan::splitAt(SkTSpan* work, double t, SkArenaAlloc* heap) {
+ fStartT = t;
+ fEndT = work->fEndT;
+ if (fStartT == fEndT) {
+ fCollapsed = true;
+ return false;
+ }
+ work->fEndT = t;
+ if (work->fStartT == work->fEndT) {
+ work->fCollapsed = true;
+ return false;
+ }
+ fPrev = work;
+ fNext = work->fNext;
+ fIsLinear = work->fIsLinear;
+ fIsLine = work->fIsLine;
+
+ work->fNext = this;
+ if (fNext) {
+ fNext->fPrev = this;
+ }
+ this->validate();
+ SkTSpanBounded* bounded = work->fBounded;
+ fBounded = nullptr;
+ while (bounded) {
+ this->addBounded(bounded->fBounded, heap);
+ bounded = bounded->fNext;
+ }
+ bounded = fBounded;
+ while (bounded) {
+ bounded->fBounded->addBounded(this, heap);
+ bounded = bounded->fNext;
+ }
+ return true;
+}
+
+void SkTSpan::validate() const {
+#if DEBUG_VALIDATE
+ SkASSERT(this != fPrev);
+ SkASSERT(this != fNext);
+ SkASSERT(fNext == nullptr || fNext != fPrev);
+ SkASSERT(fNext == nullptr || this == fNext->fPrev);
+ SkASSERT(fPrev == nullptr || this == fPrev->fNext);
+ this->validateBounded();
+#endif
+#if DEBUG_T_SECT
+ SkASSERT(fBounds.width() || fBounds.height() || fCollapsed);
+ SkASSERT(fBoundsMax == std::max(fBounds.width(), fBounds.height()) || fCollapsed == 0xFF);
+ SkASSERT(0 <= fStartT);
+ SkASSERT(fEndT <= 1);
+ SkASSERT(fStartT <= fEndT);
+ SkASSERT(fBounded || fCollapsed == 0xFF);
+ if (fHasPerp) {
+ if (fCoinStart.isMatch()) {
+ validatePerpT(fCoinStart.perpT());
+ validatePerpPt(fCoinStart.perpT(), fCoinStart.perpPt());
+ }
+ if (fCoinEnd.isMatch()) {
+ validatePerpT(fCoinEnd.perpT());
+ validatePerpPt(fCoinEnd.perpT(), fCoinEnd.perpPt());
+ }
+ }
+#endif
+}
+
+void SkTSpan::validateBounded() const {
+#if DEBUG_VALIDATE
+ const SkTSpanBounded* testBounded = fBounded;
+ while (testBounded) {
+ SkDEBUGCODE(const SkTSpan* overlap = testBounded->fBounded);
+ SkASSERT(!overlap->fDeleted);
+#if DEBUG_T_SECT
+ SkASSERT(((this->debugID() ^ overlap->debugID()) & 1) == 1);
+ SkASSERT(overlap->findOppSpan(this));
+#endif
+ testBounded = testBounded->fNext;
+ }
+#endif
+}
+
+void SkTSpan::validatePerpT(double oppT) const {
+ const SkTSpanBounded* testBounded = fBounded;
+ while (testBounded) {
+ const SkTSpan* overlap = testBounded->fBounded;
+ if (precisely_between(overlap->fStartT, oppT, overlap->fEndT)) {
+ return;
+ }
+ testBounded = testBounded->fNext;
+ }
+ SkASSERT(0);
+}
+
+void SkTSpan::validatePerpPt(double t, const SkDPoint& pt) const {
+ SkASSERT(fDebugSect->fOppSect->fCurve.ptAtT(t) == pt);
+}
+
+SkTSect::SkTSect(const SkTCurve& c
+ SkDEBUGPARAMS(SkOpGlobalState* debugGlobalState)
+ PATH_OPS_DEBUG_T_SECT_PARAMS(int id))
+ : fCurve(c)
+ , fHeap(sizeof(SkTSpan) * 4)
+ , fCoincident(nullptr)
+ , fDeleted(nullptr)
+ , fActiveCount(0)
+ , fHung(false)
+ SkDEBUGPARAMS(fDebugGlobalState(debugGlobalState))
+ PATH_OPS_DEBUG_T_SECT_PARAMS(fID(id))
+ PATH_OPS_DEBUG_T_SECT_PARAMS(fDebugCount(0))
+ PATH_OPS_DEBUG_T_SECT_PARAMS(fDebugAllocatedCount(0))
+{
+ this->resetRemovedEnds();
+ fHead = this->addOne();
+ SkDEBUGCODE(fHead->debugSetGlobalState(debugGlobalState));
+ fHead->init(c);
+}
+
+SkTSpan* SkTSect::addOne() {
+ SkTSpan* result;
+ if (fDeleted) {
+ result = fDeleted;
+ fDeleted = result->fNext;
+ } else {
+ result = fHeap.make<SkTSpan>(fCurve, fHeap);
+#if DEBUG_T_SECT
+ ++fDebugAllocatedCount;
+#endif
+ }
+ result->reset();
+ result->fHasPerp = false;
+ result->fDeleted = false;
+ ++fActiveCount;
+ PATH_OPS_DEBUG_T_SECT_CODE(result->fID = fDebugCount++ * 2 + fID);
+ SkDEBUGCODE(result->fDebugSect = this);
+#ifdef SK_DEBUG
+ result->debugInit(fCurve, fHeap);
+ result->fCoinStart.debugInit();
+ result->fCoinEnd.debugInit();
+ result->fPrev = result->fNext = nullptr;
+ result->fBounds.debugInit();
+ result->fStartT = result->fEndT = result->fBoundsMax = SK_ScalarNaN;
+ result->fCollapsed = result->fIsLinear = result->fIsLine = 0xFF;
+#endif
+ return result;
+}
+
+bool SkTSect::binarySearchCoin(SkTSect* sect2, double tStart,
+ double tStep, double* resultT, double* oppT, SkTSpan** oppFirst) {
+ SkTSpan work(fCurve, fHeap);
+ double result = work.fStartT = work.fEndT = tStart;
+ SkDEBUGCODE(work.fDebugSect = this);
+ SkDPoint last = fCurve.ptAtT(tStart);
+ SkDPoint oppPt;
+ bool flip = false;
+ bool contained = false;
+ bool down = tStep < 0;
+ const SkTCurve& opp = sect2->fCurve;
+ do {
+ tStep *= 0.5;
+ work.fStartT += tStep;
+ if (flip) {
+ tStep = -tStep;
+ flip = false;
+ }
+ work.initBounds(fCurve);
+ if (work.fCollapsed) {
+ return false;
+ }
+ if (last.approximatelyEqual(work.pointFirst())) {
+ break;
+ }
+ last = work.pointFirst();
+ work.fCoinStart.setPerp(fCurve, work.fStartT, last, opp);
+ if (work.fCoinStart.isMatch()) {
+#if DEBUG_T_SECT
+ work.validatePerpPt(work.fCoinStart.perpT(), work.fCoinStart.perpPt());
+#endif
+ double oppTTest = work.fCoinStart.perpT();
+ if (sect2->fHead->contains(oppTTest)) {
+ *oppT = oppTTest;
+ oppPt = work.fCoinStart.perpPt();
+ contained = true;
+ if (down ? result <= work.fStartT : result >= work.fStartT) {
+ *oppFirst = nullptr; // signal caller to fail
+ return false;
+ }
+ result = work.fStartT;
+ continue;
+ }
+ }
+ tStep = -tStep;
+ flip = true;
+ } while (true);
+ if (!contained) {
+ return false;
+ }
+ if (last.approximatelyEqual(fCurve[0])) {
+ result = 0;
+ } else if (last.approximatelyEqual(this->pointLast())) {
+ result = 1;
+ }
+ if (oppPt.approximatelyEqual(opp[0])) {
+ *oppT = 0;
+ } else if (oppPt.approximatelyEqual(sect2->pointLast())) {
+ *oppT = 1;
+ }
+ *resultT = result;
+ return true;
+}
+
+// OPTIMIZE ? keep a sorted list of sizes in the form of a doubly-linked list in quad span
+// so that each quad sect has a pointer to the largest, and can update it as spans
+// are split
+
+SkTSpan* SkTSect::boundsMax() {
+ SkTSpan* test = fHead;
+ SkTSpan* largest = fHead;
+ bool lCollapsed = largest->fCollapsed;
+ int safetyNet = 10000;
+ while ((test = test->fNext)) {
+ if (!--safetyNet) {
+ fHung = true;
+ return nullptr;
+ }
+ bool tCollapsed = test->fCollapsed;
+ if ((lCollapsed && !tCollapsed) || (lCollapsed == tCollapsed &&
+ largest->fBoundsMax < test->fBoundsMax)) {
+ largest = test;
+ lCollapsed = test->fCollapsed;
+ }
+ }
+ return largest;
+}
+
+bool SkTSect::coincidentCheck(SkTSect* sect2) {
+ SkTSpan* first = fHead;
+ if (!first) {
+ return false;
+ }
+ SkTSpan* last, * next;
+ do {
+ int consecutive = this->countConsecutiveSpans(first, &last);
+ next = last->fNext;
+ if (consecutive < COINCIDENT_SPAN_COUNT) {
+ continue;
+ }
+ this->validate();
+ sect2->validate();
+ this->computePerpendiculars(sect2, first, last);
+ this->validate();
+ sect2->validate();
+ // check to see if a range of points are on the curve
+ SkTSpan* coinStart = first;
+ do {
+ bool success = this->extractCoincident(sect2, coinStart, last, &coinStart);
+ if (!success) {
+ return false;
+ }
+ } while (coinStart && !last->fDeleted);
+ if (!fHead || !sect2->fHead) {
+ break;
+ }
+ if (!next || next->fDeleted) {
+ break;
+ }
+ } while ((first = next));
+ return true;
+}
+
+void SkTSect::coincidentForce(SkTSect* sect2,
+ double start1s, double start1e) {
+ SkTSpan* first = fHead;
+ SkTSpan* last = this->tail();
+ SkTSpan* oppFirst = sect2->fHead;
+ SkTSpan* oppLast = sect2->tail();
+ if (!last || !oppLast) {
+ return;
+ }
+ bool deleteEmptySpans = this->updateBounded(first, last, oppFirst);
+ deleteEmptySpans |= sect2->updateBounded(oppFirst, oppLast, first);
+ this->removeSpanRange(first, last);
+ sect2->removeSpanRange(oppFirst, oppLast);
+ first->fStartT = start1s;
+ first->fEndT = start1e;
+ first->resetBounds(fCurve);
+ first->fCoinStart.setPerp(fCurve, start1s, fCurve[0], sect2->fCurve);
+ first->fCoinEnd.setPerp(fCurve, start1e, this->pointLast(), sect2->fCurve);
+ bool oppMatched = first->fCoinStart.perpT() < first->fCoinEnd.perpT();
+ double oppStartT = first->fCoinStart.perpT() == -1 ? 0 : std::max(0., first->fCoinStart.perpT());
+ double oppEndT = first->fCoinEnd.perpT() == -1 ? 1 : std::min(1., first->fCoinEnd.perpT());
+ if (!oppMatched) {
+ using std::swap;
+ swap(oppStartT, oppEndT);
+ }
+ oppFirst->fStartT = oppStartT;
+ oppFirst->fEndT = oppEndT;
+ oppFirst->resetBounds(sect2->fCurve);
+ this->removeCoincident(first, false);
+ sect2->removeCoincident(oppFirst, true);
+ if (deleteEmptySpans) {
+ this->deleteEmptySpans();
+ sect2->deleteEmptySpans();
+ }
+}
+
+bool SkTSect::coincidentHasT(double t) {
+ SkTSpan* test = fCoincident;
+ while (test) {
+ if (between(test->fStartT, t, test->fEndT)) {
+ return true;
+ }
+ test = test->fNext;
+ }
+ return false;
+}
+
+int SkTSect::collapsed() const {
+ int result = 0;
+ const SkTSpan* test = fHead;
+ while (test) {
+ if (test->fCollapsed) {
+ ++result;
+ }
+ test = test->next();
+ }
+ return result;
+}
+
+void SkTSect::computePerpendiculars(SkTSect* sect2,
+ SkTSpan* first, SkTSpan* last) {
+ if (!last) {
+ return;
+ }
+ const SkTCurve& opp = sect2->fCurve;
+ SkTSpan* work = first;
+ SkTSpan* prior = nullptr;
+ do {
+ if (!work->fHasPerp && !work->fCollapsed) {
+ if (prior) {
+ work->fCoinStart = prior->fCoinEnd;
+ } else {
+ work->fCoinStart.setPerp(fCurve, work->fStartT, work->pointFirst(), opp);
+ }
+ if (work->fCoinStart.isMatch()) {
+ double perpT = work->fCoinStart.perpT();
+ if (sect2->coincidentHasT(perpT)) {
+ work->fCoinStart.init();
+ } else {
+ sect2->addForPerp(work, perpT);
+ }
+ }
+ work->fCoinEnd.setPerp(fCurve, work->fEndT, work->pointLast(), opp);
+ if (work->fCoinEnd.isMatch()) {
+ double perpT = work->fCoinEnd.perpT();
+ if (sect2->coincidentHasT(perpT)) {
+ work->fCoinEnd.init();
+ } else {
+ sect2->addForPerp(work, perpT);
+ }
+ }
+ work->fHasPerp = true;
+ }
+ if (work == last) {
+ break;
+ }
+ prior = work;
+ work = work->fNext;
+ SkASSERT(work);
+ } while (true);
+}
+
+int SkTSect::countConsecutiveSpans(SkTSpan* first,
+ SkTSpan** lastPtr) const {
+ int consecutive = 1;
+ SkTSpan* last = first;
+ do {
+ SkTSpan* next = last->fNext;
+ if (!next) {
+ break;
+ }
+ if (next->fStartT > last->fEndT) {
+ break;
+ }
+ ++consecutive;
+ last = next;
+ } while (true);
+ *lastPtr = last;
+ return consecutive;
+}
+
+bool SkTSect::hasBounded(const SkTSpan* span) const {
+ const SkTSpan* test = fHead;
+ if (!test) {
+ return false;
+ }
+ do {
+ if (test->findOppSpan(span)) {
+ return true;
+ }
+ } while ((test = test->next()));
+ return false;
+}
+
+bool SkTSect::deleteEmptySpans() {
+ SkTSpan* test;
+ SkTSpan* next = fHead;
+ int safetyHatch = 1000;
+ while ((test = next)) {
+ next = test->fNext;
+ if (!test->fBounded) {
+ if (!this->removeSpan(test)) {
+ return false;
+ }
+ }
+ if (--safetyHatch < 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool SkTSect::extractCoincident(
+ SkTSect* sect2,
+ SkTSpan* first, SkTSpan* last,
+ SkTSpan** result) {
+ first = findCoincidentRun(first, &last);
+ if (!first || !last) {
+ *result = nullptr;
+ return true;
+ }
+ // march outwards to find limit of coincidence from here to previous and next spans
+ double startT = first->fStartT;
+ double oppStartT SK_INIT_TO_AVOID_WARNING;
+ double oppEndT SK_INIT_TO_AVOID_WARNING;
+ SkTSpan* prev = first->fPrev;
+ SkASSERT(first->fCoinStart.isMatch());
+ SkTSpan* oppFirst = first->findOppT(first->fCoinStart.perpT());
+ SkOPASSERT(last->fCoinEnd.isMatch());
+ bool oppMatched = first->fCoinStart.perpT() < first->fCoinEnd.perpT();
+ double coinStart;
+ SkDEBUGCODE(double coinEnd);
+ SkTSpan* cutFirst;
+ if (prev && prev->fEndT == startT
+ && this->binarySearchCoin(sect2, startT, prev->fStartT - startT, &coinStart,
+ &oppStartT, &oppFirst)
+ && prev->fStartT < coinStart && coinStart < startT
+ && (cutFirst = prev->oppT(oppStartT))) {
+ oppFirst = cutFirst;
+ first = this->addSplitAt(prev, coinStart);
+ first->markCoincident();
+ prev->fCoinEnd.markCoincident();
+ if (oppFirst->fStartT < oppStartT && oppStartT < oppFirst->fEndT) {
+ SkTSpan* oppHalf = sect2->addSplitAt(oppFirst, oppStartT);
+ if (oppMatched) {
+ oppFirst->fCoinEnd.markCoincident();
+ oppHalf->markCoincident();
+ oppFirst = oppHalf;
+ } else {
+ oppFirst->markCoincident();
+ oppHalf->fCoinStart.markCoincident();
+ }
+ }
+ } else {
+ if (!oppFirst) {
+ return false;
+ }
+ SkDEBUGCODE(coinStart = first->fStartT);
+ SkDEBUGCODE(oppStartT = oppMatched ? oppFirst->fStartT : oppFirst->fEndT);
+ }
+ // FIXME: incomplete : if we're not at the end, find end of coin
+ SkTSpan* oppLast;
+ SkOPASSERT(last->fCoinEnd.isMatch());
+ oppLast = last->findOppT(last->fCoinEnd.perpT());
+ SkDEBUGCODE(coinEnd = last->fEndT);
+#ifdef SK_DEBUG
+ if (!this->globalState() || !this->globalState()->debugSkipAssert()) {
+ oppEndT = oppMatched ? oppLast->fEndT : oppLast->fStartT;
+ }
+#endif
+ if (!oppMatched) {
+ using std::swap;
+ swap(oppFirst, oppLast);
+ swap(oppStartT, oppEndT);
+ }
+ SkOPASSERT(oppStartT < oppEndT);
+ SkASSERT(coinStart == first->fStartT);
+ SkASSERT(coinEnd == last->fEndT);
+ if (!oppFirst) {
+ *result = nullptr;
+ return true;
+ }
+ SkOPASSERT(oppStartT == oppFirst->fStartT);
+ if (!oppLast) {
+ *result = nullptr;
+ return true;
+ }
+ SkOPASSERT(oppEndT == oppLast->fEndT);
+ // reduce coincident runs to single entries
+ this->validate();
+ sect2->validate();
+ bool deleteEmptySpans = this->updateBounded(first, last, oppFirst);
+ deleteEmptySpans |= sect2->updateBounded(oppFirst, oppLast, first);
+ this->removeSpanRange(first, last);
+ sect2->removeSpanRange(oppFirst, oppLast);
+ first->fEndT = last->fEndT;
+ first->resetBounds(this->fCurve);
+ first->fCoinStart.setPerp(fCurve, first->fStartT, first->pointFirst(), sect2->fCurve);
+ first->fCoinEnd.setPerp(fCurve, first->fEndT, first->pointLast(), sect2->fCurve);
+ oppStartT = first->fCoinStart.perpT();
+ oppEndT = first->fCoinEnd.perpT();
+ if (between(0, oppStartT, 1) && between(0, oppEndT, 1)) {
+ if (!oppMatched) {
+ using std::swap;
+ swap(oppStartT, oppEndT);
+ }
+ oppFirst->fStartT = oppStartT;
+ oppFirst->fEndT = oppEndT;
+ oppFirst->resetBounds(sect2->fCurve);
+ }
+ this->validateBounded();
+ sect2->validateBounded();
+ last = first->fNext;
+ if (!this->removeCoincident(first, false)) {
+ return false;
+ }
+ if (!sect2->removeCoincident(oppFirst, true)) {
+ return false;
+ }
+ if (deleteEmptySpans) {
+ if (!this->deleteEmptySpans() || !sect2->deleteEmptySpans()) {
+ *result = nullptr;
+ return false;
+ }
+ }
+ this->validate();
+ sect2->validate();
+ *result = last && !last->fDeleted && fHead && sect2->fHead ? last : nullptr;
+ return true;
+}
+
+SkTSpan* SkTSect::findCoincidentRun(
+ SkTSpan* first, SkTSpan** lastPtr) {
+ SkTSpan* work = first;
+ SkTSpan* lastCandidate = nullptr;
+ first = nullptr;
+ // find the first fully coincident span
+ do {
+ if (work->fCoinStart.isMatch()) {
+#if DEBUG_T_SECT
+ work->validatePerpT(work->fCoinStart.perpT());
+ work->validatePerpPt(work->fCoinStart.perpT(), work->fCoinStart.perpPt());
+#endif
+ SkOPASSERT(work->hasOppT(work->fCoinStart.perpT()));
+ if (!work->fCoinEnd.isMatch()) {
+ break;
+ }
+ lastCandidate = work;
+ if (!first) {
+ first = work;
+ }
+ } else if (first && work->fCollapsed) {
+ *lastPtr = lastCandidate;
+ return first;
+ } else {
+ lastCandidate = nullptr;
+ SkOPASSERT(!first);
+ }
+ if (work == *lastPtr) {
+ return first;
+ }
+ work = work->fNext;
+ if (!work) {
+ return nullptr;
+ }
+ } while (true);
+ if (lastCandidate) {
+ *lastPtr = lastCandidate;
+ }
+ return first;
+}
+
+int SkTSect::intersects(SkTSpan* span,
+ SkTSect* opp,
+ SkTSpan* oppSpan, int* oppResult) {
+ bool spanStart, oppStart;
+ int hullResult = span->hullsIntersect(oppSpan, &spanStart, &oppStart);
+ if (hullResult >= 0) {
+ if (hullResult == 2) { // hulls have one point in common
+ if (!span->fBounded || !span->fBounded->fNext) {
+ SkASSERT(!span->fBounded || span->fBounded->fBounded == oppSpan);
+ if (spanStart) {
+ span->fEndT = span->fStartT;
+ } else {
+ span->fStartT = span->fEndT;
+ }
+ } else {
+ hullResult = 1;
+ }
+ if (!oppSpan->fBounded || !oppSpan->fBounded->fNext) {
+ if (oppSpan->fBounded && oppSpan->fBounded->fBounded != span) {
+ return 0;
+ }
+ if (oppStart) {
+ oppSpan->fEndT = oppSpan->fStartT;
+ } else {
+ oppSpan->fStartT = oppSpan->fEndT;
+ }
+ *oppResult = 2;
+ } else {
+ *oppResult = 1;
+ }
+ } else {
+ *oppResult = 1;
+ }
+ return hullResult;
+ }
+ if (span->fIsLine && oppSpan->fIsLine) {
+ SkIntersections i;
+ int sects = this->linesIntersect(span, opp, oppSpan, &i);
+ if (sects == 2) {
+ return *oppResult = 1;
+ }
+ if (!sects) {
+ return -1;
+ }
+ this->removedEndCheck(span);
+ span->fStartT = span->fEndT = i[0][0];
+ opp->removedEndCheck(oppSpan);
+ oppSpan->fStartT = oppSpan->fEndT = i[1][0];
+ return *oppResult = 2;
+ }
+ if (span->fIsLinear || oppSpan->fIsLinear) {
+ return *oppResult = (int) span->linearsIntersect(oppSpan);
+ }
+ return *oppResult = 1;
+}
+
+template<typename SkTCurve>
+static bool is_parallel(const SkDLine& thisLine, const SkTCurve& opp) {
+ if (!opp.IsConic()) {
+ return false; // FIXME : breaks a lot of stuff now
+ }
+ int finds = 0;
+ SkDLine thisPerp;
+ thisPerp.fPts[0].fX = thisLine.fPts[1].fX + (thisLine.fPts[1].fY - thisLine.fPts[0].fY);
+ thisPerp.fPts[0].fY = thisLine.fPts[1].fY + (thisLine.fPts[0].fX - thisLine.fPts[1].fX);
+ thisPerp.fPts[1] = thisLine.fPts[1];
+ SkIntersections perpRayI;
+ perpRayI.intersectRay(opp, thisPerp);
+ for (int pIndex = 0; pIndex < perpRayI.used(); ++pIndex) {
+ finds += perpRayI.pt(pIndex).approximatelyEqual(thisPerp.fPts[1]);
+ }
+ thisPerp.fPts[1].fX = thisLine.fPts[0].fX + (thisLine.fPts[1].fY - thisLine.fPts[0].fY);
+ thisPerp.fPts[1].fY = thisLine.fPts[0].fY + (thisLine.fPts[0].fX - thisLine.fPts[1].fX);
+ thisPerp.fPts[0] = thisLine.fPts[0];
+ perpRayI.intersectRay(opp, thisPerp);
+ for (int pIndex = 0; pIndex < perpRayI.used(); ++pIndex) {
+ finds += perpRayI.pt(pIndex).approximatelyEqual(thisPerp.fPts[0]);
+ }
+ return finds >= 2;
+}
+
+// while the intersection points are sufficiently far apart:
+// construct the tangent lines from the intersections
+// find the point where the tangent line intersects the opposite curve
+
+int SkTSect::linesIntersect(SkTSpan* span,
+ SkTSect* opp,
+ SkTSpan* oppSpan, SkIntersections* i) {
+ SkIntersections thisRayI SkDEBUGCODE((span->fDebugGlobalState));
+ SkIntersections oppRayI SkDEBUGCODE((span->fDebugGlobalState));
+ SkDLine thisLine = {{ span->pointFirst(), span->pointLast() }};
+ SkDLine oppLine = {{ oppSpan->pointFirst(), oppSpan->pointLast() }};
+ int loopCount = 0;
+ double bestDistSq = DBL_MAX;
+ if (!thisRayI.intersectRay(opp->fCurve, thisLine)) {
+ return 0;
+ }
+ if (!oppRayI.intersectRay(this->fCurve, oppLine)) {
+ return 0;
+ }
+ // if the ends of each line intersect the opposite curve, the lines are coincident
+ if (thisRayI.used() > 1) {
+ int ptMatches = 0;
+ for (int tIndex = 0; tIndex < thisRayI.used(); ++tIndex) {
+ for (int lIndex = 0; lIndex < (int) std::size(thisLine.fPts); ++lIndex) {
+ ptMatches += thisRayI.pt(tIndex).approximatelyEqual(thisLine.fPts[lIndex]);
+ }
+ }
+ if (ptMatches == 2 || is_parallel(thisLine, opp->fCurve)) {
+ return 2;
+ }
+ }
+ if (oppRayI.used() > 1) {
+ int ptMatches = 0;
+ for (int oIndex = 0; oIndex < oppRayI.used(); ++oIndex) {
+ for (int lIndex = 0; lIndex < (int) std::size(oppLine.fPts); ++lIndex) {
+ ptMatches += oppRayI.pt(oIndex).approximatelyEqual(oppLine.fPts[lIndex]);
+ }
+ }
+ if (ptMatches == 2|| is_parallel(oppLine, this->fCurve)) {
+ return 2;
+ }
+ }
+ do {
+ // pick the closest pair of points
+ double closest = DBL_MAX;
+ int closeIndex SK_INIT_TO_AVOID_WARNING;
+ int oppCloseIndex SK_INIT_TO_AVOID_WARNING;
+ for (int index = 0; index < oppRayI.used(); ++index) {
+ if (!roughly_between(span->fStartT, oppRayI[0][index], span->fEndT)) {
+ continue;
+ }
+ for (int oIndex = 0; oIndex < thisRayI.used(); ++oIndex) {
+ if (!roughly_between(oppSpan->fStartT, thisRayI[0][oIndex], oppSpan->fEndT)) {
+ continue;
+ }
+ double distSq = thisRayI.pt(index).distanceSquared(oppRayI.pt(oIndex));
+ if (closest > distSq) {
+ closest = distSq;
+ closeIndex = index;
+ oppCloseIndex = oIndex;
+ }
+ }
+ }
+ if (closest == DBL_MAX) {
+ break;
+ }
+ const SkDPoint& oppIPt = thisRayI.pt(oppCloseIndex);
+ const SkDPoint& iPt = oppRayI.pt(closeIndex);
+ if (between(span->fStartT, oppRayI[0][closeIndex], span->fEndT)
+ && between(oppSpan->fStartT, thisRayI[0][oppCloseIndex], oppSpan->fEndT)
+ && oppIPt.approximatelyEqual(iPt)) {
+ i->merge(oppRayI, closeIndex, thisRayI, oppCloseIndex);
+ return i->used();
+ }
+ double distSq = oppIPt.distanceSquared(iPt);
+ if (bestDistSq < distSq || ++loopCount > 5) {
+ return 0;
+ }
+ bestDistSq = distSq;
+ double oppStart = oppRayI[0][closeIndex];
+ thisLine[0] = fCurve.ptAtT(oppStart);
+ thisLine[1] = thisLine[0] + fCurve.dxdyAtT(oppStart);
+ if (!thisRayI.intersectRay(opp->fCurve, thisLine)) {
+ break;
+ }
+ double start = thisRayI[0][oppCloseIndex];
+ oppLine[0] = opp->fCurve.ptAtT(start);
+ oppLine[1] = oppLine[0] + opp->fCurve.dxdyAtT(start);
+ if (!oppRayI.intersectRay(this->fCurve, oppLine)) {
+ break;
+ }
+ } while (true);
+ // convergence may fail if the curves are nearly coincident
+ SkTCoincident oCoinS, oCoinE;
+ oCoinS.setPerp(opp->fCurve, oppSpan->fStartT, oppSpan->pointFirst(), fCurve);
+ oCoinE.setPerp(opp->fCurve, oppSpan->fEndT, oppSpan->pointLast(), fCurve);
+ double tStart = oCoinS.perpT();
+ double tEnd = oCoinE.perpT();
+ bool swap = tStart > tEnd;
+ if (swap) {
+ using std::swap;
+ swap(tStart, tEnd);
+ }
+ tStart = std::max(tStart, span->fStartT);
+ tEnd = std::min(tEnd, span->fEndT);
+ if (tStart > tEnd) {
+ return 0;
+ }
+ SkDVector perpS, perpE;
+ if (tStart == span->fStartT) {
+ SkTCoincident coinS;
+ coinS.setPerp(fCurve, span->fStartT, span->pointFirst(), opp->fCurve);
+ perpS = span->pointFirst() - coinS.perpPt();
+ } else if (swap) {
+ perpS = oCoinE.perpPt() - oppSpan->pointLast();
+ } else {
+ perpS = oCoinS.perpPt() - oppSpan->pointFirst();
+ }
+ if (tEnd == span->fEndT) {
+ SkTCoincident coinE;
+ coinE.setPerp(fCurve, span->fEndT, span->pointLast(), opp->fCurve);
+ perpE = span->pointLast() - coinE.perpPt();
+ } else if (swap) {
+ perpE = oCoinS.perpPt() - oppSpan->pointFirst();
+ } else {
+ perpE = oCoinE.perpPt() - oppSpan->pointLast();
+ }
+ if (perpS.dot(perpE) >= 0) {
+ return 0;
+ }
+ SkTCoincident coinW;
+ double workT = tStart;
+ double tStep = tEnd - tStart;
+ SkDPoint workPt;
+ do {
+ tStep *= 0.5;
+ if (precisely_zero(tStep)) {
+ return 0;
+ }
+ workT += tStep;
+ workPt = fCurve.ptAtT(workT);
+ coinW.setPerp(fCurve, workT, workPt, opp->fCurve);
+ double perpT = coinW.perpT();
+ if (coinW.isMatch() ? !between(oppSpan->fStartT, perpT, oppSpan->fEndT) : perpT < 0) {
+ continue;
+ }
+ SkDVector perpW = workPt - coinW.perpPt();
+ if ((perpS.dot(perpW) >= 0) == (tStep < 0)) {
+ tStep = -tStep;
+ }
+ if (workPt.approximatelyEqual(coinW.perpPt())) {
+ break;
+ }
+ } while (true);
+ double oppTTest = coinW.perpT();
+ if (!opp->fHead->contains(oppTTest)) {
+ return 0;
+ }
+ i->setMax(1);
+ i->insert(workT, oppTTest, workPt);
+ return 1;
+}
+
+bool SkTSect::markSpanGone(SkTSpan* span) {
+ if (--fActiveCount < 0) {
+ return false;
+ }
+ span->fNext = fDeleted;
+ fDeleted = span;
+ SkOPASSERT(!span->fDeleted);
+ span->fDeleted = true;
+ return true;
+}
+
+bool SkTSect::matchedDirection(double t, const SkTSect* sect2,
+ double t2) const {
+ SkDVector dxdy = this->fCurve.dxdyAtT(t);
+ SkDVector dxdy2 = sect2->fCurve.dxdyAtT(t2);
+ return dxdy.dot(dxdy2) >= 0;
+}
+
+void SkTSect::matchedDirCheck(double t, const SkTSect* sect2,
+ double t2, bool* calcMatched, bool* oppMatched) const {
+ if (*calcMatched) {
+ SkASSERT(*oppMatched == this->matchedDirection(t, sect2, t2));
+ } else {
+ *oppMatched = this->matchedDirection(t, sect2, t2);
+ *calcMatched = true;
+ }
+}
+
+void SkTSect::mergeCoincidence(SkTSect* sect2) {
+ double smallLimit = 0;
+ do {
+ // find the smallest unprocessed span
+ SkTSpan* smaller = nullptr;
+ SkTSpan* test = fCoincident;
+ do {
+ if (!test) {
+ return;
+ }
+ if (test->fStartT < smallLimit) {
+ continue;
+ }
+ if (smaller && smaller->fEndT < test->fStartT) {
+ continue;
+ }
+ smaller = test;
+ } while ((test = test->fNext));
+ if (!smaller) {
+ return;
+ }
+ smallLimit = smaller->fEndT;
+ // find next larger span
+ SkTSpan* prior = nullptr;
+ SkTSpan* larger = nullptr;
+ SkTSpan* largerPrior = nullptr;
+ test = fCoincident;
+ do {
+ if (test->fStartT < smaller->fEndT) {
+ continue;
+ }
+ SkOPASSERT(test->fStartT != smaller->fEndT);
+ if (larger && larger->fStartT < test->fStartT) {
+ continue;
+ }
+ largerPrior = prior;
+ larger = test;
+ } while ((void) (prior = test), (test = test->fNext));
+ if (!larger) {
+ continue;
+ }
+ // check middle t value to see if it is coincident as well
+ double midT = (smaller->fEndT + larger->fStartT) / 2;
+ SkDPoint midPt = fCurve.ptAtT(midT);
+ SkTCoincident coin;
+ coin.setPerp(fCurve, midT, midPt, sect2->fCurve);
+ if (coin.isMatch()) {
+ smaller->fEndT = larger->fEndT;
+ smaller->fCoinEnd = larger->fCoinEnd;
+ if (largerPrior) {
+ largerPrior->fNext = larger->fNext;
+ largerPrior->validate();
+ } else {
+ fCoincident = larger->fNext;
+ }
+ }
+ } while (true);
+}
+
+SkTSpan* SkTSect::prev(
+ SkTSpan* span) const {
+ SkTSpan* result = nullptr;
+ SkTSpan* test = fHead;
+ while (span != test) {
+ result = test;
+ test = test->fNext;
+ SkASSERT(test);
+ }
+ return result;
+}
+
+void SkTSect::recoverCollapsed() {
+ SkTSpan* deleted = fDeleted;
+ while (deleted) {
+ SkTSpan* delNext = deleted->fNext;
+ if (deleted->fCollapsed) {
+ SkTSpan** spanPtr = &fHead;
+ while (*spanPtr && (*spanPtr)->fEndT <= deleted->fStartT) {
+ spanPtr = &(*spanPtr)->fNext;
+ }
+ deleted->fNext = *spanPtr;
+ *spanPtr = deleted;
+ }
+ deleted = delNext;
+ }
+}
+
+void SkTSect::removeAllBut(const SkTSpan* keep,
+ SkTSpan* span, SkTSect* opp) {
+ const SkTSpanBounded* testBounded = span->fBounded;
+ while (testBounded) {
+ SkTSpan* bounded = testBounded->fBounded;
+ const SkTSpanBounded* next = testBounded->fNext;
+ // may have been deleted when opp did 'remove all but'
+ if (bounded != keep && !bounded->fDeleted) {
+ SkAssertResult(SkDEBUGCODE(!) span->removeBounded(bounded));
+ if (bounded->removeBounded(span)) {
+ opp->removeSpan(bounded);
+ }
+ }
+ testBounded = next;
+ }
+ SkASSERT(!span->fDeleted);
+ SkASSERT(span->findOppSpan(keep));
+ SkASSERT(keep->findOppSpan(span));
+}
+
+bool SkTSect::removeByPerpendicular(SkTSect* opp) {
+ SkTSpan* test = fHead;
+ SkTSpan* next;
+ do {
+ next = test->fNext;
+ if (test->fCoinStart.perpT() < 0 || test->fCoinEnd.perpT() < 0) {
+ continue;
+ }
+ SkDVector startV = test->fCoinStart.perpPt() - test->pointFirst();
+ SkDVector endV = test->fCoinEnd.perpPt() - test->pointLast();
+#if DEBUG_T_SECT
+ SkDebugf("%s startV=(%1.9g,%1.9g) endV=(%1.9g,%1.9g) dot=%1.9g\n", __FUNCTION__,
+ startV.fX, startV.fY, endV.fX, endV.fY, startV.dot(endV));
+#endif
+ if (startV.dot(endV) <= 0) {
+ continue;
+ }
+ if (!this->removeSpans(test, opp)) {
+ return false;
+ }
+ } while ((test = next));
+ return true;
+}
+
+bool SkTSect::removeCoincident(SkTSpan* span, bool isBetween) {
+ if (!this->unlinkSpan(span)) {
+ return false;
+ }
+ if (isBetween || between(0, span->fCoinStart.perpT(), 1)) {
+ --fActiveCount;
+ span->fNext = fCoincident;
+ fCoincident = span;
+ } else {
+ this->markSpanGone(span);
+ }
+ return true;
+}
+
+void SkTSect::removedEndCheck(SkTSpan* span) {
+ if (!span->fStartT) {
+ fRemovedStartT = true;
+ }
+ if (1 == span->fEndT) {
+ fRemovedEndT = true;
+ }
+}
+
+bool SkTSect::removeSpan(SkTSpan* span) {\
+ this->removedEndCheck(span);
+ if (!this->unlinkSpan(span)) {
+ return false;
+ }
+ return this->markSpanGone(span);
+}
+
+void SkTSect::removeSpanRange(SkTSpan* first,
+ SkTSpan* last) {
+ if (first == last) {
+ return;
+ }
+ SkTSpan* span = first;
+ SkASSERT(span);
+ SkTSpan* final = last->fNext;
+ SkTSpan* next = span->fNext;
+ while ((span = next) && span != final) {
+ next = span->fNext;
+ this->markSpanGone(span);
+ }
+ if (final) {
+ final->fPrev = first;
+ }
+ first->fNext = final;
+ // world may not be ready for validation here
+ first->validate();
+}
+
+bool SkTSect::removeSpans(SkTSpan* span,
+ SkTSect* opp) {
+ SkTSpanBounded* bounded = span->fBounded;
+ while (bounded) {
+ SkTSpan* spanBounded = bounded->fBounded;
+ SkTSpanBounded* next = bounded->fNext;
+ if (span->removeBounded(spanBounded)) { // shuffles last into position 0
+ this->removeSpan(span);
+ }
+ if (spanBounded->removeBounded(span)) {
+ opp->removeSpan(spanBounded);
+ }
+ if (span->fDeleted && opp->hasBounded(span)) {
+ return false;
+ }
+ bounded = next;
+ }
+ return true;
+}
+
+SkTSpan* SkTSect::spanAtT(double t,
+ SkTSpan** priorSpan) {
+ SkTSpan* test = fHead;
+ SkTSpan* prev = nullptr;
+ while (test && test->fEndT < t) {
+ prev = test;
+ test = test->fNext;
+ }
+ *priorSpan = prev;
+ return test && test->fStartT <= t ? test : nullptr;
+}
+
+SkTSpan* SkTSect::tail() {
+ SkTSpan* result = fHead;
+ SkTSpan* next = fHead;
+ int safetyNet = 100000;
+ while ((next = next->fNext)) {
+ if (!--safetyNet) {
+ return nullptr;
+ }
+ if (next->fEndT > result->fEndT) {
+ result = next;
+ }
+ }
+ return result;
+}
+
+/* Each span has a range of opposite spans it intersects. After the span is split in two,
+ adjust the range to its new size */
+
+bool SkTSect::trim(SkTSpan* span,
+ SkTSect* opp) {
+ FAIL_IF(!span->initBounds(fCurve));
+ const SkTSpanBounded* testBounded = span->fBounded;
+ while (testBounded) {
+ SkTSpan* test = testBounded->fBounded;
+ const SkTSpanBounded* next = testBounded->fNext;
+ int oppSects, sects = this->intersects(span, opp, test, &oppSects);
+ if (sects >= 1) {
+ if (oppSects == 2) {
+ test->initBounds(opp->fCurve);
+ opp->removeAllBut(span, test, this);
+ }
+ if (sects == 2) {
+ span->initBounds(fCurve);
+ this->removeAllBut(test, span, opp);
+ return true;
+ }
+ } else {
+ if (span->removeBounded(test)) {
+ this->removeSpan(span);
+ }
+ if (test->removeBounded(span)) {
+ opp->removeSpan(test);
+ }
+ }
+ testBounded = next;
+ }
+ return true;
+}
+
+bool SkTSect::unlinkSpan(SkTSpan* span) {
+ SkTSpan* prev = span->fPrev;
+ SkTSpan* next = span->fNext;
+ if (prev) {
+ prev->fNext = next;
+ if (next) {
+ next->fPrev = prev;
+ if (next->fStartT > next->fEndT) {
+ return false;
+ }
+ // world may not be ready for validate here
+ next->validate();
+ }
+ } else {
+ fHead = next;
+ if (next) {
+ next->fPrev = nullptr;
+ }
+ }
+ return true;
+}
+
+bool SkTSect::updateBounded(SkTSpan* first,
+ SkTSpan* last, SkTSpan* oppFirst) {
+ SkTSpan* test = first;
+ const SkTSpan* final = last->next();
+ bool deleteSpan = false;
+ do {
+ deleteSpan |= test->removeAllBounded();
+ } while ((test = test->fNext) != final && test);
+ first->fBounded = nullptr;
+ first->addBounded(oppFirst, &fHeap);
+ // cannot call validate until remove span range is called
+ return deleteSpan;
+}
+
+void SkTSect::validate() const {
+#if DEBUG_VALIDATE
+ int count = 0;
+ double last = 0;
+ if (fHead) {
+ const SkTSpan* span = fHead;
+ SkASSERT(!span->fPrev);
+ const SkTSpan* next;
+ do {
+ span->validate();
+ SkASSERT(span->fStartT >= last);
+ last = span->fEndT;
+ ++count;
+ next = span->fNext;
+ SkASSERT(next != span);
+ } while ((span = next) != nullptr);
+ }
+ SkASSERT(count == fActiveCount);
+#endif
+#if DEBUG_T_SECT
+ SkASSERT(fActiveCount <= fDebugAllocatedCount);
+ int deletedCount = 0;
+ const SkTSpan* deleted = fDeleted;
+ while (deleted) {
+ ++deletedCount;
+ deleted = deleted->fNext;
+ }
+ const SkTSpan* coincident = fCoincident;
+ while (coincident) {
+ ++deletedCount;
+ coincident = coincident->fNext;
+ }
+ SkASSERT(fActiveCount + deletedCount == fDebugAllocatedCount);
+#endif
+}
+
+void SkTSect::validateBounded() const {
+#if DEBUG_VALIDATE
+ if (!fHead) {
+ return;
+ }
+ const SkTSpan* span = fHead;
+ do {
+ span->validateBounded();
+ } while ((span = span->fNext) != nullptr);
+#endif
+}
+
+int SkTSect::EndsEqual(const SkTSect* sect1,
+ const SkTSect* sect2, SkIntersections* intersections) {
+ int zeroOneSet = 0;
+ if (sect1->fCurve[0] == sect2->fCurve[0]) {
+ zeroOneSet |= kZeroS1Set | kZeroS2Set;
+ intersections->insert(0, 0, sect1->fCurve[0]);
+ }
+ if (sect1->fCurve[0] == sect2->pointLast()) {
+ zeroOneSet |= kZeroS1Set | kOneS2Set;
+ intersections->insert(0, 1, sect1->fCurve[0]);
+ }
+ if (sect1->pointLast() == sect2->fCurve[0]) {
+ zeroOneSet |= kOneS1Set | kZeroS2Set;
+ intersections->insert(1, 0, sect1->pointLast());
+ }
+ if (sect1->pointLast() == sect2->pointLast()) {
+ zeroOneSet |= kOneS1Set | kOneS2Set;
+ intersections->insert(1, 1, sect1->pointLast());
+ }
+ // check for zero
+ if (!(zeroOneSet & (kZeroS1Set | kZeroS2Set))
+ && sect1->fCurve[0].approximatelyEqual(sect2->fCurve[0])) {
+ zeroOneSet |= kZeroS1Set | kZeroS2Set;
+ intersections->insertNear(0, 0, sect1->fCurve[0], sect2->fCurve[0]);
+ }
+ if (!(zeroOneSet & (kZeroS1Set | kOneS2Set))
+ && sect1->fCurve[0].approximatelyEqual(sect2->pointLast())) {
+ zeroOneSet |= kZeroS1Set | kOneS2Set;
+ intersections->insertNear(0, 1, sect1->fCurve[0], sect2->pointLast());
+ }
+ // check for one
+ if (!(zeroOneSet & (kOneS1Set | kZeroS2Set))
+ && sect1->pointLast().approximatelyEqual(sect2->fCurve[0])) {
+ zeroOneSet |= kOneS1Set | kZeroS2Set;
+ intersections->insertNear(1, 0, sect1->pointLast(), sect2->fCurve[0]);
+ }
+ if (!(zeroOneSet & (kOneS1Set | kOneS2Set))
+ && sect1->pointLast().approximatelyEqual(sect2->pointLast())) {
+ zeroOneSet |= kOneS1Set | kOneS2Set;
+ intersections->insertNear(1, 1, sect1->pointLast(), sect2->pointLast());
+ }
+ return zeroOneSet;
+}
+
+struct SkClosestRecord {
+ bool operator<(const SkClosestRecord& rh) const {
+ return fClosest < rh.fClosest;
+ }
+
+ void addIntersection(SkIntersections* intersections) const {
+ double r1t = fC1Index ? fC1Span->endT() : fC1Span->startT();
+ double r2t = fC2Index ? fC2Span->endT() : fC2Span->startT();
+ intersections->insert(r1t, r2t, fC1Span->part()[fC1Index]);
+ }
+
+ void findEnd(const SkTSpan* span1, const SkTSpan* span2,
+ int c1Index, int c2Index) {
+ const SkTCurve& c1 = span1->part();
+ const SkTCurve& c2 = span2->part();
+ if (!c1[c1Index].approximatelyEqual(c2[c2Index])) {
+ return;
+ }
+ double dist = c1[c1Index].distanceSquared(c2[c2Index]);
+ if (fClosest < dist) {
+ return;
+ }
+ fC1Span = span1;
+ fC2Span = span2;
+ fC1StartT = span1->startT();
+ fC1EndT = span1->endT();
+ fC2StartT = span2->startT();
+ fC2EndT = span2->endT();
+ fC1Index = c1Index;
+ fC2Index = c2Index;
+ fClosest = dist;
+ }
+
+ bool matesWith(const SkClosestRecord& mate SkDEBUGPARAMS(SkIntersections* i)) const {
+ SkOPOBJASSERT(i, fC1Span == mate.fC1Span || fC1Span->endT() <= mate.fC1Span->startT()
+ || mate.fC1Span->endT() <= fC1Span->startT());
+ SkOPOBJASSERT(i, fC2Span == mate.fC2Span || fC2Span->endT() <= mate.fC2Span->startT()
+ || mate.fC2Span->endT() <= fC2Span->startT());
+ return fC1Span == mate.fC1Span || fC1Span->endT() == mate.fC1Span->startT()
+ || fC1Span->startT() == mate.fC1Span->endT()
+ || fC2Span == mate.fC2Span
+ || fC2Span->endT() == mate.fC2Span->startT()
+ || fC2Span->startT() == mate.fC2Span->endT();
+ }
+
+ void merge(const SkClosestRecord& mate) {
+ fC1Span = mate.fC1Span;
+ fC2Span = mate.fC2Span;
+ fClosest = mate.fClosest;
+ fC1Index = mate.fC1Index;
+ fC2Index = mate.fC2Index;
+ }
+
+ void reset() {
+ fClosest = FLT_MAX;
+ SkDEBUGCODE(fC1Span = nullptr);
+ SkDEBUGCODE(fC2Span = nullptr);
+ SkDEBUGCODE(fC1Index = fC2Index = -1);
+ }
+
+ void update(const SkClosestRecord& mate) {
+ fC1StartT = std::min(fC1StartT, mate.fC1StartT);
+ fC1EndT = std::max(fC1EndT, mate.fC1EndT);
+ fC2StartT = std::min(fC2StartT, mate.fC2StartT);
+ fC2EndT = std::max(fC2EndT, mate.fC2EndT);
+ }
+
+ const SkTSpan* fC1Span;
+ const SkTSpan* fC2Span;
+ double fC1StartT;
+ double fC1EndT;
+ double fC2StartT;
+ double fC2EndT;
+ double fClosest;
+ int fC1Index;
+ int fC2Index;
+};
+
+struct SkClosestSect {
+ SkClosestSect()
+ : fUsed(0) {
+ fClosest.push_back().reset();
+ }
+
+ bool find(const SkTSpan* span1, const SkTSpan* span2
+ SkDEBUGPARAMS(SkIntersections* i)) {
+ SkClosestRecord* record = &fClosest[fUsed];
+ record->findEnd(span1, span2, 0, 0);
+ record->findEnd(span1, span2, 0, span2->part().pointLast());
+ record->findEnd(span1, span2, span1->part().pointLast(), 0);
+ record->findEnd(span1, span2, span1->part().pointLast(), span2->part().pointLast());
+ if (record->fClosest == FLT_MAX) {
+ return false;
+ }
+ for (int index = 0; index < fUsed; ++index) {
+ SkClosestRecord* test = &fClosest[index];
+ if (test->matesWith(*record SkDEBUGPARAMS(i))) {
+ if (test->fClosest > record->fClosest) {
+ test->merge(*record);
+ }
+ test->update(*record);
+ record->reset();
+ return false;
+ }
+ }
+ ++fUsed;
+ fClosest.push_back().reset();
+ return true;
+ }
+
+ void finish(SkIntersections* intersections) const {
+ SkSTArray<SkDCubic::kMaxIntersections * 3,
+ const SkClosestRecord*, true> closestPtrs;
+ for (int index = 0; index < fUsed; ++index) {
+ closestPtrs.push_back(&fClosest[index]);
+ }
+ SkTQSort<const SkClosestRecord>(closestPtrs.begin(), closestPtrs.end());
+ for (int index = 0; index < fUsed; ++index) {
+ const SkClosestRecord* test = closestPtrs[index];
+ test->addIntersection(intersections);
+ }
+ }
+
+ // this is oversized so that an extra records can merge into final one
+ SkSTArray<SkDCubic::kMaxIntersections * 2, SkClosestRecord, true> fClosest;
+ int fUsed;
+};
+
+// returns true if the rect is too small to consider
+
+void SkTSect::BinarySearch(SkTSect* sect1,
+ SkTSect* sect2, SkIntersections* intersections) {
+#if DEBUG_T_SECT_DUMP > 1
+ gDumpTSectNum = 0;
+#endif
+ SkDEBUGCODE(sect1->fOppSect = sect2);
+ SkDEBUGCODE(sect2->fOppSect = sect1);
+ intersections->reset();
+ intersections->setMax(sect1->fCurve.maxIntersections() + 4); // give extra for slop
+ SkTSpan* span1 = sect1->fHead;
+ SkTSpan* span2 = sect2->fHead;
+ int oppSect, sect = sect1->intersects(span1, sect2, span2, &oppSect);
+// SkASSERT(between(0, sect, 2));
+ if (!sect) {
+ return;
+ }
+ if (sect == 2 && oppSect == 2) {
+ (void) EndsEqual(sect1, sect2, intersections);
+ return;
+ }
+ span1->addBounded(span2, &sect1->fHeap);
+ span2->addBounded(span1, &sect2->fHeap);
+ const int kMaxCoinLoopCount = 8;
+ int coinLoopCount = kMaxCoinLoopCount;
+ double start1s SK_INIT_TO_AVOID_WARNING;
+ double start1e SK_INIT_TO_AVOID_WARNING;
+ do {
+ // find the largest bounds
+ SkTSpan* largest1 = sect1->boundsMax();
+ if (!largest1) {
+ if (sect1->fHung) {
+ return;
+ }
+ break;
+ }
+ SkTSpan* largest2 = sect2->boundsMax();
+ // split it
+ if (!largest2 || (largest1 && (largest1->fBoundsMax > largest2->fBoundsMax
+ || (!largest1->fCollapsed && largest2->fCollapsed)))) {
+ if (sect2->fHung) {
+ return;
+ }
+ if (largest1->fCollapsed) {
+ break;
+ }
+ sect1->resetRemovedEnds();
+ sect2->resetRemovedEnds();
+ // trim parts that don't intersect the opposite
+ SkTSpan* half1 = sect1->addOne();
+ SkDEBUGCODE(half1->debugSetGlobalState(sect1->globalState()));
+ if (!half1->split(largest1, &sect1->fHeap)) {
+ break;
+ }
+ if (!sect1->trim(largest1, sect2)) {
+ SkOPOBJASSERT(intersections, 0);
+ return;
+ }
+ if (!sect1->trim(half1, sect2)) {
+ SkOPOBJASSERT(intersections, 0);
+ return;
+ }
+ } else {
+ if (largest2->fCollapsed) {
+ break;
+ }
+ sect1->resetRemovedEnds();
+ sect2->resetRemovedEnds();
+ // trim parts that don't intersect the opposite
+ SkTSpan* half2 = sect2->addOne();
+ SkDEBUGCODE(half2->debugSetGlobalState(sect2->globalState()));
+ if (!half2->split(largest2, &sect2->fHeap)) {
+ break;
+ }
+ if (!sect2->trim(largest2, sect1)) {
+ SkOPOBJASSERT(intersections, 0);
+ return;
+ }
+ if (!sect2->trim(half2, sect1)) {
+ SkOPOBJASSERT(intersections, 0);
+ return;
+ }
+ }
+ sect1->validate();
+ sect2->validate();
+#if DEBUG_T_SECT_LOOP_COUNT
+ intersections->debugBumpLoopCount(SkIntersections::kIterations_DebugLoop);
+#endif
+ // if there are 9 or more continuous spans on both sects, suspect coincidence
+ if (sect1->fActiveCount >= COINCIDENT_SPAN_COUNT
+ && sect2->fActiveCount >= COINCIDENT_SPAN_COUNT) {
+ if (coinLoopCount == kMaxCoinLoopCount) {
+ start1s = sect1->fHead->fStartT;
+ start1e = sect1->tail()->fEndT;
+ }
+ if (!sect1->coincidentCheck(sect2)) {
+ return;
+ }
+ sect1->validate();
+ sect2->validate();
+#if DEBUG_T_SECT_LOOP_COUNT
+ intersections->debugBumpLoopCount(SkIntersections::kCoinCheck_DebugLoop);
+#endif
+ if (!--coinLoopCount && sect1->fHead && sect2->fHead) {
+ /* All known working cases resolve in two tries. Sadly, cubicConicTests[0]
+ gets stuck in a loop. It adds an extension to allow a coincident end
+ perpendicular to track its intersection in the opposite curve. However,
+ the bounding box of the extension does not intersect the original curve,
+ so the extension is discarded, only to be added again the next time around. */
+ sect1->coincidentForce(sect2, start1s, start1e);
+ sect1->validate();
+ sect2->validate();
+ }
+ }
+ if (sect1->fActiveCount >= COINCIDENT_SPAN_COUNT
+ && sect2->fActiveCount >= COINCIDENT_SPAN_COUNT) {
+ if (!sect1->fHead) {
+ return;
+ }
+ sect1->computePerpendiculars(sect2, sect1->fHead, sect1->tail());
+ if (!sect2->fHead) {
+ return;
+ }
+ sect2->computePerpendiculars(sect1, sect2->fHead, sect2->tail());
+ if (!sect1->removeByPerpendicular(sect2)) {
+ return;
+ }
+ sect1->validate();
+ sect2->validate();
+#if DEBUG_T_SECT_LOOP_COUNT
+ intersections->debugBumpLoopCount(SkIntersections::kComputePerp_DebugLoop);
+#endif
+ if (sect1->collapsed() > sect1->fCurve.maxIntersections()) {
+ break;
+ }
+ }
+#if DEBUG_T_SECT_DUMP
+ sect1->dumpBoth(sect2);
+#endif
+ if (!sect1->fHead || !sect2->fHead) {
+ break;
+ }
+ } while (true);
+ SkTSpan* coincident = sect1->fCoincident;
+ if (coincident) {
+ // if there is more than one coincident span, check loosely to see if they should be joined
+ if (coincident->fNext) {
+ sect1->mergeCoincidence(sect2);
+ coincident = sect1->fCoincident;
+ }
+ SkASSERT(sect2->fCoincident); // courtesy check : coincidence only looks at sect 1
+ do {
+ if (!coincident) {
+ return;
+ }
+ if (!coincident->fCoinStart.isMatch()) {
+ continue;
+ }
+ if (!coincident->fCoinEnd.isMatch()) {
+ continue;
+ }
+ double perpT = coincident->fCoinStart.perpT();
+ if (perpT < 0) {
+ return;
+ }
+ int index = intersections->insertCoincident(coincident->fStartT,
+ perpT, coincident->pointFirst());
+ if ((intersections->insertCoincident(coincident->fEndT,
+ coincident->fCoinEnd.perpT(),
+ coincident->pointLast()) < 0) && index >= 0) {
+ intersections->clearCoincidence(index);
+ }
+ } while ((coincident = coincident->fNext));
+ }
+ int zeroOneSet = EndsEqual(sect1, sect2, intersections);
+// if (!sect1->fHead || !sect2->fHead) {
+ // if the final iteration contains an end (0 or 1),
+ if (sect1->fRemovedStartT && !(zeroOneSet & kZeroS1Set)) {
+ SkTCoincident perp; // intersect perpendicular with opposite curve
+ perp.setPerp(sect1->fCurve, 0, sect1->fCurve[0], sect2->fCurve);
+ if (perp.isMatch()) {
+ intersections->insert(0, perp.perpT(), perp.perpPt());
+ }
+ }
+ if (sect1->fRemovedEndT && !(zeroOneSet & kOneS1Set)) {
+ SkTCoincident perp;
+ perp.setPerp(sect1->fCurve, 1, sect1->pointLast(), sect2->fCurve);
+ if (perp.isMatch()) {
+ intersections->insert(1, perp.perpT(), perp.perpPt());
+ }
+ }
+ if (sect2->fRemovedStartT && !(zeroOneSet & kZeroS2Set)) {
+ SkTCoincident perp;
+ perp.setPerp(sect2->fCurve, 0, sect2->fCurve[0], sect1->fCurve);
+ if (perp.isMatch()) {
+ intersections->insert(perp.perpT(), 0, perp.perpPt());
+ }
+ }
+ if (sect2->fRemovedEndT && !(zeroOneSet & kOneS2Set)) {
+ SkTCoincident perp;
+ perp.setPerp(sect2->fCurve, 1, sect2->pointLast(), sect1->fCurve);
+ if (perp.isMatch()) {
+ intersections->insert(perp.perpT(), 1, perp.perpPt());
+ }
+ }
+// }
+ if (!sect1->fHead || !sect2->fHead) {
+ return;
+ }
+ sect1->recoverCollapsed();
+ sect2->recoverCollapsed();
+ SkTSpan* result1 = sect1->fHead;
+ // check heads and tails for zero and ones and insert them if we haven't already done so
+ const SkTSpan* head1 = result1;
+ if (!(zeroOneSet & kZeroS1Set) && approximately_less_than_zero(head1->fStartT)) {
+ const SkDPoint& start1 = sect1->fCurve[0];
+ if (head1->isBounded()) {
+ double t = head1->closestBoundedT(start1);
+ if (sect2->fCurve.ptAtT(t).approximatelyEqual(start1)) {
+ intersections->insert(0, t, start1);
+ }
+ }
+ }
+ const SkTSpan* head2 = sect2->fHead;
+ if (!(zeroOneSet & kZeroS2Set) && approximately_less_than_zero(head2->fStartT)) {
+ const SkDPoint& start2 = sect2->fCurve[0];
+ if (head2->isBounded()) {
+ double t = head2->closestBoundedT(start2);
+ if (sect1->fCurve.ptAtT(t).approximatelyEqual(start2)) {
+ intersections->insert(t, 0, start2);
+ }
+ }
+ }
+ if (!(zeroOneSet & kOneS1Set)) {
+ const SkTSpan* tail1 = sect1->tail();
+ if (!tail1) {
+ return;
+ }
+ if (approximately_greater_than_one(tail1->fEndT)) {
+ const SkDPoint& end1 = sect1->pointLast();
+ if (tail1->isBounded()) {
+ double t = tail1->closestBoundedT(end1);
+ if (sect2->fCurve.ptAtT(t).approximatelyEqual(end1)) {
+ intersections->insert(1, t, end1);
+ }
+ }
+ }
+ }
+ if (!(zeroOneSet & kOneS2Set)) {
+ const SkTSpan* tail2 = sect2->tail();
+ if (!tail2) {
+ return;
+ }
+ if (approximately_greater_than_one(tail2->fEndT)) {
+ const SkDPoint& end2 = sect2->pointLast();
+ if (tail2->isBounded()) {
+ double t = tail2->closestBoundedT(end2);
+ if (sect1->fCurve.ptAtT(t).approximatelyEqual(end2)) {
+ intersections->insert(t, 1, end2);
+ }
+ }
+ }
+ }
+ SkClosestSect closest;
+ do {
+ while (result1 && result1->fCoinStart.isMatch() && result1->fCoinEnd.isMatch()) {
+ result1 = result1->fNext;
+ }
+ if (!result1) {
+ break;
+ }
+ SkTSpan* result2 = sect2->fHead;
+ while (result2) {
+ closest.find(result1, result2 SkDEBUGPARAMS(intersections));
+ result2 = result2->fNext;
+ }
+ } while ((result1 = result1->fNext));
+ closest.finish(intersections);
+ // if there is more than one intersection and it isn't already coincident, check
+ int last = intersections->used() - 1;
+ for (int index = 0; index < last; ) {
+ if (intersections->isCoincident(index) && intersections->isCoincident(index + 1)) {
+ ++index;
+ continue;
+ }
+ double midT = ((*intersections)[0][index] + (*intersections)[0][index + 1]) / 2;
+ SkDPoint midPt = sect1->fCurve.ptAtT(midT);
+ // intersect perpendicular with opposite curve
+ SkTCoincident perp;
+ perp.setPerp(sect1->fCurve, midT, midPt, sect2->fCurve);
+ if (!perp.isMatch()) {
+ ++index;
+ continue;
+ }
+ if (intersections->isCoincident(index)) {
+ intersections->removeOne(index);
+ --last;
+ } else if (intersections->isCoincident(index + 1)) {
+ intersections->removeOne(index + 1);
+ --last;
+ } else {
+ intersections->setCoincident(index++);
+ }
+ intersections->setCoincident(index);
+ }
+ SkOPOBJASSERT(intersections, intersections->used() <= sect1->fCurve.maxIntersections());
+}
+
+int SkIntersections::intersect(const SkDQuad& q1, const SkDQuad& q2) {
+ SkTQuad quad1(q1);
+ SkTQuad quad2(q2);
+ SkTSect sect1(quad1 SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(1));
+ SkTSect sect2(quad2 SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(2));
+ SkTSect::BinarySearch(&sect1, &sect2, this);
+ return used();
+}
+
+int SkIntersections::intersect(const SkDConic& c, const SkDQuad& q) {
+ SkTConic conic(c);
+ SkTQuad quad(q);
+ SkTSect sect1(conic SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(1));
+ SkTSect sect2(quad SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(2));
+ SkTSect::BinarySearch(&sect1, &sect2, this);
+ return used();
+}
+
+int SkIntersections::intersect(const SkDConic& c1, const SkDConic& c2) {
+ SkTConic conic1(c1);
+ SkTConic conic2(c2);
+ SkTSect sect1(conic1 SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(1));
+ SkTSect sect2(conic2 SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(2));
+ SkTSect::BinarySearch(&sect1, &sect2, this);
+ return used();
+}
+
+int SkIntersections::intersect(const SkDCubic& c, const SkDQuad& q) {
+ SkTCubic cubic(c);
+ SkTQuad quad(q);
+ SkTSect sect1(cubic SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(1));
+ SkTSect sect2(quad SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(2));
+ SkTSect::BinarySearch(&sect1, &sect2, this);
+ return used();
+}
+
+int SkIntersections::intersect(const SkDCubic& cu, const SkDConic& co) {
+ SkTCubic cubic(cu);
+ SkTConic conic(co);
+ SkTSect sect1(cubic SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(1));
+ SkTSect sect2(conic SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(2));
+ SkTSect::BinarySearch(&sect1, &sect2, this);
+ return used();
+
+}
+
+int SkIntersections::intersect(const SkDCubic& c1, const SkDCubic& c2) {
+ SkTCubic cubic1(c1);
+ SkTCubic cubic2(c2);
+ SkTSect sect1(cubic1 SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(1));
+ SkTSect sect2(cubic2 SkDEBUGPARAMS(globalState()) PATH_OPS_DEBUG_T_SECT_PARAMS(2));
+ SkTSect::BinarySearch(&sect1, &sect2, this);
+ return used();
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsTSect.h b/gfx/skia/skia/src/pathops/SkPathOpsTSect.h
new file mode 100644
index 0000000000..1929a8d616
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsTSect.h
@@ -0,0 +1,376 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsTSect_DEFINED
+#define SkPathOpsTSect_DEFINED
+
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkPathOpsRect.h"
+#include "src/pathops/SkPathOpsTCurve.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+#include <cstdint>
+
+class SkIntersections;
+class SkTSect;
+class SkTSpan;
+struct SkDLine;
+
+#ifdef SK_DEBUG
+typedef uint8_t SkOpDebugBool;
+#else
+typedef bool SkOpDebugBool;
+#endif
+
+#define SkDoubleIsNaN sk_double_isnan
+
+class SkTCoincident {
+public:
+ SkTCoincident() {
+ this->init();
+ }
+
+ void debugInit() {
+#ifdef SK_DEBUG
+ this->fPerpPt.fX = this->fPerpPt.fY = SK_ScalarNaN;
+ this->fPerpT = SK_ScalarNaN;
+ this->fMatch = 0xFF;
+#endif
+ }
+
+ char dumpIsCoincidentStr() const;
+ void dump() const;
+
+ bool isMatch() const {
+ SkASSERT(!!fMatch == fMatch);
+ return SkToBool(fMatch);
+ }
+
+ void init() {
+ fPerpT = -1;
+ fMatch = false;
+ fPerpPt.fX = fPerpPt.fY = SK_ScalarNaN;
+ }
+
+ void markCoincident() {
+ if (!fMatch) {
+ fPerpT = -1;
+ }
+ fMatch = true;
+ }
+
+ const SkDPoint& perpPt() const {
+ return fPerpPt;
+ }
+
+ double perpT() const {
+ return fPerpT;
+ }
+
+ void setPerp(const SkTCurve& c1, double t, const SkDPoint& cPt, const SkTCurve& );
+
+private:
+ SkDPoint fPerpPt;
+ double fPerpT; // perpendicular intersection on opposite curve
+ SkOpDebugBool fMatch;
+};
+
+struct SkTSpanBounded {
+ SkTSpan* fBounded;
+ SkTSpanBounded* fNext;
+};
+
+class SkTSpan {
+public:
+ SkTSpan(const SkTCurve& curve, SkArenaAlloc& heap) {
+ fPart = curve.make(heap);
+ }
+
+ void addBounded(SkTSpan* , SkArenaAlloc* );
+ double closestBoundedT(const SkDPoint& pt) const;
+ bool contains(double t) const;
+
+ void debugInit(const SkTCurve& curve, SkArenaAlloc& heap) {
+#ifdef SK_DEBUG
+ SkTCurve* fake = curve.make(heap);
+ fake->debugInit();
+ init(*fake);
+ initBounds(*fake);
+ fCoinStart.init();
+ fCoinEnd.init();
+#endif
+ }
+
+ const SkTSect* debugOpp() const;
+
+#ifdef SK_DEBUG
+ void debugSetGlobalState(SkOpGlobalState* state) {
+ fDebugGlobalState = state;
+ }
+
+ const SkTSpan* debugSpan(int ) const;
+ const SkTSpan* debugT(double t) const;
+ bool debugIsBefore(const SkTSpan* span) const;
+#endif
+ void dump() const;
+ void dumpAll() const;
+ void dumpBounded(int id) const;
+ void dumpBounds() const;
+ void dumpCoin() const;
+
+ double endT() const {
+ return fEndT;
+ }
+
+ SkTSpan* findOppSpan(const SkTSpan* opp) const;
+
+ SkTSpan* findOppT(double t) const {
+ SkTSpan* result = oppT(t);
+ SkOPASSERT(result);
+ return result;
+ }
+
+ SkDEBUGCODE(SkOpGlobalState* globalState() const { return fDebugGlobalState; })
+
+ bool hasOppT(double t) const {
+ return SkToBool(oppT(t));
+ }
+
+ int hullsIntersect(SkTSpan* span, bool* start, bool* oppStart);
+ void init(const SkTCurve& );
+ bool initBounds(const SkTCurve& );
+
+ bool isBounded() const {
+ return fBounded != nullptr;
+ }
+
+ bool linearsIntersect(SkTSpan* span);
+ double linearT(const SkDPoint& ) const;
+
+ void markCoincident() {
+ fCoinStart.markCoincident();
+ fCoinEnd.markCoincident();
+ }
+
+ const SkTSpan* next() const {
+ return fNext;
+ }
+
+ bool onlyEndPointsInCommon(const SkTSpan* opp, bool* start,
+ bool* oppStart, bool* ptsInCommon);
+
+ const SkTCurve& part() const {
+ return *fPart;
+ }
+
+ int pointCount() const {
+ return fPart->pointCount();
+ }
+
+ const SkDPoint& pointFirst() const {
+ return (*fPart)[0];
+ }
+
+ const SkDPoint& pointLast() const {
+ return (*fPart)[fPart->pointLast()];
+ }
+
+ bool removeAllBounded();
+ bool removeBounded(const SkTSpan* opp);
+
+ void reset() {
+ fBounded = nullptr;
+ }
+
+ void resetBounds(const SkTCurve& curve) {
+ fIsLinear = fIsLine = false;
+ initBounds(curve);
+ }
+
+ bool split(SkTSpan* work, SkArenaAlloc* heap) {
+ return splitAt(work, (work->fStartT + work->fEndT) * 0.5, heap);
+ }
+
+ bool splitAt(SkTSpan* work, double t, SkArenaAlloc* heap);
+
+ double startT() const {
+ return fStartT;
+ }
+
+private:
+
+ // implementation is for testing only
+ int debugID() const {
+ return PATH_OPS_DEBUG_T_SECT_RELEASE(fID, -1);
+ }
+
+ void dumpID() const;
+
+ int hullCheck(const SkTSpan* opp, bool* start, bool* oppStart);
+ int linearIntersects(const SkTCurve& ) const;
+ SkTSpan* oppT(double t) const;
+
+ void validate() const;
+ void validateBounded() const;
+ void validatePerpT(double oppT) const;
+ void validatePerpPt(double t, const SkDPoint& ) const;
+
+ SkTCurve* fPart;
+ SkTCoincident fCoinStart;
+ SkTCoincident fCoinEnd;
+ SkTSpanBounded* fBounded;
+ SkTSpan* fPrev;
+ SkTSpan* fNext;
+ SkDRect fBounds;
+ double fStartT;
+ double fEndT;
+ double fBoundsMax;
+ SkOpDebugBool fCollapsed;
+ SkOpDebugBool fHasPerp;
+ SkOpDebugBool fIsLinear;
+ SkOpDebugBool fIsLine;
+ SkOpDebugBool fDeleted;
+ SkDEBUGCODE(SkOpGlobalState* fDebugGlobalState);
+ SkDEBUGCODE(SkTSect* fDebugSect);
+ PATH_OPS_DEBUG_T_SECT_CODE(int fID);
+ friend class SkTSect;
+};
+
+class SkTSect {
+public:
+ SkTSect(const SkTCurve& c
+ SkDEBUGPARAMS(SkOpGlobalState* ) PATH_OPS_DEBUG_T_SECT_PARAMS(int id));
+ static void BinarySearch(SkTSect* sect1, SkTSect* sect2,
+ SkIntersections* intersections);
+
+ SkDEBUGCODE(SkOpGlobalState* globalState() { return fDebugGlobalState; })
+ bool hasBounded(const SkTSpan* ) const;
+
+ const SkTSect* debugOpp() const {
+ return SkDEBUGRELEASE(fOppSect, nullptr);
+ }
+
+#ifdef SK_DEBUG
+ const SkTSpan* debugSpan(int id) const;
+ const SkTSpan* debugT(double t) const;
+#endif
+ void dump() const;
+ void dumpBoth(SkTSect* ) const;
+ void dumpBounded(int id) const;
+ void dumpBounds() const;
+ void dumpCoin() const;
+ void dumpCoinCurves() const;
+ void dumpCurves() const;
+
+private:
+ enum {
+ kZeroS1Set = 1,
+ kOneS1Set = 2,
+ kZeroS2Set = 4,
+ kOneS2Set = 8
+ };
+
+ SkTSpan* addFollowing(SkTSpan* prior);
+ void addForPerp(SkTSpan* span, double t);
+ SkTSpan* addOne();
+
+ SkTSpan* addSplitAt(SkTSpan* span, double t) {
+ SkTSpan* result = this->addOne();
+ SkDEBUGCODE(result->debugSetGlobalState(this->globalState()));
+ result->splitAt(span, t, &fHeap);
+ result->initBounds(fCurve);
+ span->initBounds(fCurve);
+ return result;
+ }
+
+ bool binarySearchCoin(SkTSect* , double tStart, double tStep, double* t,
+ double* oppT, SkTSpan** oppFirst);
+ SkTSpan* boundsMax();
+ bool coincidentCheck(SkTSect* sect2);
+ void coincidentForce(SkTSect* sect2, double start1s, double start1e);
+ bool coincidentHasT(double t);
+ int collapsed() const;
+ void computePerpendiculars(SkTSect* sect2, SkTSpan* first,
+ SkTSpan* last);
+ int countConsecutiveSpans(SkTSpan* first,
+ SkTSpan** last) const;
+
+ int debugID() const {
+ return PATH_OPS_DEBUG_T_SECT_RELEASE(fID, -1);
+ }
+
+ bool deleteEmptySpans();
+ void dumpCommon(const SkTSpan* ) const;
+ void dumpCommonCurves(const SkTSpan* ) const;
+ static int EndsEqual(const SkTSect* sect1, const SkTSect* sect2,
+ SkIntersections* );
+ bool extractCoincident(SkTSect* sect2, SkTSpan* first,
+ SkTSpan* last, SkTSpan** result);
+ SkTSpan* findCoincidentRun(SkTSpan* first, SkTSpan** lastPtr);
+ int intersects(SkTSpan* span, SkTSect* opp,
+ SkTSpan* oppSpan, int* oppResult);
+ bool isParallel(const SkDLine& thisLine, const SkTSect* opp) const;
+ int linesIntersect(SkTSpan* span, SkTSect* opp,
+ SkTSpan* oppSpan, SkIntersections* );
+ bool markSpanGone(SkTSpan* span);
+ bool matchedDirection(double t, const SkTSect* sect2, double t2) const;
+ void matchedDirCheck(double t, const SkTSect* sect2, double t2,
+ bool* calcMatched, bool* oppMatched) const;
+ void mergeCoincidence(SkTSect* sect2);
+
+ const SkDPoint& pointLast() const {
+ return fCurve[fCurve.pointLast()];
+ }
+
+ SkTSpan* prev(SkTSpan* ) const;
+ bool removeByPerpendicular(SkTSect* opp);
+ void recoverCollapsed();
+ bool removeCoincident(SkTSpan* span, bool isBetween);
+ void removeAllBut(const SkTSpan* keep, SkTSpan* span,
+ SkTSect* opp);
+ bool removeSpan(SkTSpan* span);
+ void removeSpanRange(SkTSpan* first, SkTSpan* last);
+ bool removeSpans(SkTSpan* span, SkTSect* opp);
+ void removedEndCheck(SkTSpan* span);
+
+ void resetRemovedEnds() {
+ fRemovedStartT = fRemovedEndT = false;
+ }
+
+ SkTSpan* spanAtT(double t, SkTSpan** priorSpan);
+ SkTSpan* tail();
+ bool trim(SkTSpan* span, SkTSect* opp);
+ bool unlinkSpan(SkTSpan* span);
+ bool updateBounded(SkTSpan* first, SkTSpan* last,
+ SkTSpan* oppFirst);
+ void validate() const;
+ void validateBounded() const;
+
+ const SkTCurve& fCurve;
+ SkSTArenaAlloc<1024> fHeap;
+ SkTSpan* fHead;
+ SkTSpan* fCoincident;
+ SkTSpan* fDeleted;
+ int fActiveCount;
+ bool fRemovedStartT;
+ bool fRemovedEndT;
+ bool fHung;
+ SkDEBUGCODE(SkOpGlobalState* fDebugGlobalState);
+ SkDEBUGCODE(SkTSect* fOppSect);
+ PATH_OPS_DEBUG_T_SECT_CODE(int fID);
+ PATH_OPS_DEBUG_T_SECT_CODE(int fDebugCount);
+#if DEBUG_T_SECT
+ int fDebugAllocatedCount;
+#endif
+ friend class SkTSpan;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsTightBounds.cpp b/gfx/skia/skia/src/pathops/SkPathOpsTightBounds.cpp
new file mode 100644
index 0000000000..a079512ddf
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsTightBounds.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkPath.h"
+#include "include/core/SkPathTypes.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+#include "include/pathops/SkPathOps.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkPathPriv.h"
+#include "src/pathops/SkOpContour.h"
+#include "src/pathops/SkOpEdgeBuilder.h"
+#include "src/pathops/SkPathOpsBounds.h"
+#include "src/pathops/SkPathOpsCommon.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+#include <algorithm>
+
+bool TightBounds(const SkPath& path, SkRect* result) {
+ SkRect moveBounds = { SK_ScalarMax, SK_ScalarMax, SK_ScalarMin, SK_ScalarMin };
+ bool wellBehaved = true;
+ for (auto [verb, pts, w] : SkPathPriv::Iterate(path)) {
+ switch (verb) {
+ case SkPathVerb::kMove:
+ moveBounds.fLeft = std::min(moveBounds.fLeft, pts[0].fX);
+ moveBounds.fTop = std::min(moveBounds.fTop, pts[0].fY);
+ moveBounds.fRight = std::max(moveBounds.fRight, pts[0].fX);
+ moveBounds.fBottom = std::max(moveBounds.fBottom, pts[0].fY);
+ break;
+ case SkPathVerb::kQuad:
+ case SkPathVerb::kConic:
+ if (!wellBehaved) {
+ break;
+ }
+ wellBehaved &= between(pts[0].fX, pts[1].fX, pts[2].fX);
+ wellBehaved &= between(pts[0].fY, pts[1].fY, pts[2].fY);
+ break;
+ case SkPathVerb::kCubic:
+ if (!wellBehaved) {
+ break;
+ }
+ wellBehaved &= between(pts[0].fX, pts[1].fX, pts[3].fX);
+ wellBehaved &= between(pts[0].fY, pts[1].fY, pts[3].fY);
+ wellBehaved &= between(pts[0].fX, pts[2].fX, pts[3].fX);
+ wellBehaved &= between(pts[0].fY, pts[2].fY, pts[3].fY);
+ break;
+ default:
+ break;
+ }
+ }
+ if (wellBehaved) {
+ *result = path.getBounds();
+ return true;
+ }
+ SkSTArenaAlloc<4096> allocator; // FIXME: constant-ize, tune
+ SkOpContour contour;
+ SkOpContourHead* contourList = static_cast<SkOpContourHead*>(&contour);
+ SkOpGlobalState globalState(contourList, &allocator SkDEBUGPARAMS(false)
+ SkDEBUGPARAMS(nullptr));
+ // turn path into list of segments
+ SkOpEdgeBuilder builder(path, contourList, &globalState);
+ if (!builder.finish()) {
+ return false;
+ }
+ if (!SortContourList(&contourList, false, false)) {
+ *result = moveBounds;
+ return true;
+ }
+ SkOpContour* current = contourList;
+ SkPathOpsBounds bounds = current->bounds();
+ while ((current = current->next())) {
+ bounds.add(current->bounds());
+ }
+ *result = bounds;
+ if (!moveBounds.isEmpty()) {
+ result->join(moveBounds);
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsTypes.cpp b/gfx/skia/skia/src/pathops/SkPathOpsTypes.cpp
new file mode 100644
index 0000000000..d325d86a83
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsTypes.cpp
@@ -0,0 +1,226 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkPathOpsTypes.h"
+
+#include "include/private/base/SkFloatBits.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkMath.h"
+#include "include/private/base/SkTemplates.h"
+
+#include <algorithm>
+#include <cstdint>
+
+static bool arguments_denormalized(float a, float b, int epsilon) {
+ float denormalizedCheck = FLT_EPSILON * epsilon / 2;
+ return fabsf(a) <= denormalizedCheck && fabsf(b) <= denormalizedCheck;
+}
+
+// from http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
+// FIXME: move to SkFloatBits.h
+static bool equal_ulps(float a, float b, int epsilon, int depsilon) {
+ if (arguments_denormalized(a, b, depsilon)) {
+ return true;
+ }
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits < bBits + epsilon && bBits < aBits + epsilon;
+}
+
+static bool equal_ulps_no_normal_check(float a, float b, int epsilon, int depsilon) {
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits < bBits + epsilon && bBits < aBits + epsilon;
+}
+
+static bool equal_ulps_pin(float a, float b, int epsilon, int depsilon) {
+ if (!SkScalarIsFinite(a) || !SkScalarIsFinite(b)) {
+ return false;
+ }
+ if (arguments_denormalized(a, b, depsilon)) {
+ return true;
+ }
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits < bBits + epsilon && bBits < aBits + epsilon;
+}
+
+static bool d_equal_ulps(float a, float b, int epsilon) {
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits < bBits + epsilon && bBits < aBits + epsilon;
+}
+
+static bool not_equal_ulps(float a, float b, int epsilon) {
+ if (arguments_denormalized(a, b, epsilon)) {
+ return false;
+ }
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits >= bBits + epsilon || bBits >= aBits + epsilon;
+}
+
+static bool not_equal_ulps_pin(float a, float b, int epsilon) {
+ if (!SkScalarIsFinite(a) || !SkScalarIsFinite(b)) {
+ return false;
+ }
+ if (arguments_denormalized(a, b, epsilon)) {
+ return false;
+ }
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits >= bBits + epsilon || bBits >= aBits + epsilon;
+}
+
+static bool d_not_equal_ulps(float a, float b, int epsilon) {
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits >= bBits + epsilon || bBits >= aBits + epsilon;
+}
+
+static bool less_ulps(float a, float b, int epsilon) {
+ if (arguments_denormalized(a, b, epsilon)) {
+ return a <= b - FLT_EPSILON * epsilon;
+ }
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits <= bBits - epsilon;
+}
+
+static bool less_or_equal_ulps(float a, float b, int epsilon) {
+ if (arguments_denormalized(a, b, epsilon)) {
+ return a < b + FLT_EPSILON * epsilon;
+ }
+ int aBits = SkFloatAs2sCompliment(a);
+ int bBits = SkFloatAs2sCompliment(b);
+ // Find the difference in ULPs.
+ return aBits < bBits + epsilon;
+}
+
+// equality using the same error term as between
+bool AlmostBequalUlps(float a, float b) {
+ const int UlpsEpsilon = 2;
+ return equal_ulps(a, b, UlpsEpsilon, UlpsEpsilon);
+}
+
+bool AlmostPequalUlps(float a, float b) {
+ const int UlpsEpsilon = 8;
+ return equal_ulps(a, b, UlpsEpsilon, UlpsEpsilon);
+}
+
+bool AlmostDequalUlps(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return d_equal_ulps(a, b, UlpsEpsilon);
+}
+
+bool AlmostDequalUlps(double a, double b) {
+ if (fabs(a) < SK_ScalarMax && fabs(b) < SK_ScalarMax) {
+ return AlmostDequalUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+ }
+ // We allow divide-by-zero here. It only happens if one of a,b is zero, and the other is NaN.
+ // (Otherwise, we'd hit the condition above). Thus, if std::max returns 0, we compute NaN / 0,
+ // which will produce NaN. The comparison will return false, which is the correct answer.
+ return sk_ieee_double_divide(fabs(a - b), std::max(fabs(a), fabs(b))) < FLT_EPSILON * 16;
+}
+
+bool AlmostEqualUlps(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return equal_ulps(a, b, UlpsEpsilon, UlpsEpsilon);
+}
+
+bool AlmostEqualUlpsNoNormalCheck(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return equal_ulps_no_normal_check(a, b, UlpsEpsilon, UlpsEpsilon);
+}
+
+bool AlmostEqualUlps_Pin(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return equal_ulps_pin(a, b, UlpsEpsilon, UlpsEpsilon);
+}
+
+bool NotAlmostEqualUlps(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return not_equal_ulps(a, b, UlpsEpsilon);
+}
+
+bool NotAlmostEqualUlps_Pin(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return not_equal_ulps_pin(a, b, UlpsEpsilon);
+}
+
+bool NotAlmostDequalUlps(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return d_not_equal_ulps(a, b, UlpsEpsilon);
+}
+
+bool RoughlyEqualUlps(float a, float b) {
+ const int UlpsEpsilon = 256;
+ const int DUlpsEpsilon = 1024;
+ return equal_ulps(a, b, UlpsEpsilon, DUlpsEpsilon);
+}
+
+bool AlmostBetweenUlps(float a, float b, float c) {
+ const int UlpsEpsilon = 2;
+ return a <= c ? less_or_equal_ulps(a, b, UlpsEpsilon) && less_or_equal_ulps(b, c, UlpsEpsilon)
+ : less_or_equal_ulps(b, a, UlpsEpsilon) && less_or_equal_ulps(c, b, UlpsEpsilon);
+}
+
+bool AlmostLessUlps(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return less_ulps(a, b, UlpsEpsilon);
+}
+
+bool AlmostLessOrEqualUlps(float a, float b) {
+ const int UlpsEpsilon = 16;
+ return less_or_equal_ulps(a, b, UlpsEpsilon);
+}
+
+int UlpsDistance(float a, float b) {
+ SkFloatIntUnion floatIntA, floatIntB;
+ floatIntA.fFloat = a;
+ floatIntB.fFloat = b;
+ // Different signs means they do not match.
+ if ((floatIntA.fSignBitInt < 0) != (floatIntB.fSignBitInt < 0)) {
+ // Check for equality to make sure +0 == -0
+ return a == b ? 0 : SK_MaxS32;
+ }
+ // Find the difference in ULPs.
+ return SkTAbs(floatIntA.fSignBitInt - floatIntB.fSignBitInt);
+}
+
+SkOpGlobalState::SkOpGlobalState(SkOpContourHead* head,
+ SkArenaAlloc* allocator
+ SkDEBUGPARAMS(bool debugSkipAssert)
+ SkDEBUGPARAMS(const char* testName))
+ : fAllocator(allocator)
+ , fCoincidence(nullptr)
+ , fContourHead(head)
+ , fNested(0)
+ , fWindingFailed(false)
+ , fPhase(SkOpPhase::kIntersecting)
+ SkDEBUGPARAMS(fDebugTestName(testName))
+ SkDEBUGPARAMS(fAngleID(0))
+ SkDEBUGPARAMS(fCoinID(0))
+ SkDEBUGPARAMS(fContourID(0))
+ SkDEBUGPARAMS(fPtTID(0))
+ SkDEBUGPARAMS(fSegmentID(0))
+ SkDEBUGPARAMS(fSpanID(0))
+ SkDEBUGPARAMS(fDebugSkipAssert(debugSkipAssert)) {
+#if DEBUG_T_SECT_LOOP_COUNT
+ debugResetLoopCounts();
+#endif
+#if DEBUG_COIN
+ fPreviousFuncName = nullptr;
+#endif
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsTypes.h b/gfx/skia/skia/src/pathops/SkPathOpsTypes.h
new file mode 100644
index 0000000000..c05ac7c7d5
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsTypes.h
@@ -0,0 +1,607 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathOpsTypes_DEFINED
+#define SkPathOpsTypes_DEFINED
+
+#include "include/core/SkPath.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "src/pathops/SkPathOpsDebug.h"
+
+#include <cfloat>
+#include <cmath>
+
+enum SkPathOpsMask {
+ kWinding_PathOpsMask = -1,
+ kNo_PathOpsMask = 0,
+ kEvenOdd_PathOpsMask = 1
+};
+
+class SkArenaAlloc;
+class SkOpCoincidence;
+class SkOpContour;
+class SkOpContourHead;
+
+enum class SkOpPhase : char {
+ kNoChange,
+ kIntersecting,
+ kWalking,
+ kFixWinding,
+};
+
+class SkOpGlobalState {
+public:
+ SkOpGlobalState(SkOpContourHead* head,
+ SkArenaAlloc* allocator SkDEBUGPARAMS(bool debugSkipAssert)
+ SkDEBUGPARAMS(const char* testName));
+
+ enum {
+ kMaxWindingTries = 10
+ };
+
+ bool allocatedOpSpan() const {
+ return fAllocatedOpSpan;
+ }
+
+ SkArenaAlloc* allocator() {
+ return fAllocator;
+ }
+
+ void bumpNested() {
+ ++fNested;
+ }
+
+ void clearNested() {
+ fNested = 0;
+ }
+
+ SkOpCoincidence* coincidence() {
+ return fCoincidence;
+ }
+
+ SkOpContourHead* contourHead() {
+ return fContourHead;
+ }
+
+#ifdef SK_DEBUG
+ const class SkOpAngle* debugAngle(int id) const;
+ const SkOpCoincidence* debugCoincidence() const;
+ SkOpContour* debugContour(int id) const;
+ const class SkOpPtT* debugPtT(int id) const;
+#endif
+
+ static bool DebugRunFail();
+
+#ifdef SK_DEBUG
+ const class SkOpSegment* debugSegment(int id) const;
+ bool debugSkipAssert() const { return fDebugSkipAssert; }
+ const class SkOpSpanBase* debugSpan(int id) const;
+ const char* debugTestName() const { return fDebugTestName; }
+#endif
+
+#if DEBUG_T_SECT_LOOP_COUNT
+ void debugAddLoopCount(SkIntersections* , const SkIntersectionHelper& ,
+ const SkIntersectionHelper& );
+ void debugDoYourWorst(SkOpGlobalState* );
+ void debugLoopReport();
+ void debugResetLoopCounts();
+#endif
+
+#if DEBUG_COINCIDENCE
+ void debugSetCheckHealth(bool check) { fDebugCheckHealth = check; }
+ bool debugCheckHealth() const { return fDebugCheckHealth; }
+#endif
+
+#if DEBUG_VALIDATE || DEBUG_COIN
+ void debugSetPhase(const char* funcName DEBUG_COIN_DECLARE_PARAMS()) const;
+#endif
+
+#if DEBUG_COIN
+ void debugAddToCoinChangedDict();
+ void debugAddToGlobalCoinDicts();
+ SkPathOpsDebug::CoinDict* debugCoinChangedDict() { return &fCoinChangedDict; }
+ const SkPathOpsDebug::CoinDictEntry& debugCoinDictEntry() const { return fCoinDictEntry; }
+
+ static void DumpCoinDict();
+#endif
+
+
+ int nested() const {
+ return fNested;
+ }
+
+#ifdef SK_DEBUG
+ int nextAngleID() {
+ return ++fAngleID;
+ }
+
+ int nextCoinID() {
+ return ++fCoinID;
+ }
+
+ int nextContourID() {
+ return ++fContourID;
+ }
+
+ int nextPtTID() {
+ return ++fPtTID;
+ }
+
+ int nextSegmentID() {
+ return ++fSegmentID;
+ }
+
+ int nextSpanID() {
+ return ++fSpanID;
+ }
+#endif
+
+ SkOpPhase phase() const {
+ return fPhase;
+ }
+
+ void resetAllocatedOpSpan() {
+ fAllocatedOpSpan = false;
+ }
+
+ void setAllocatedOpSpan() {
+ fAllocatedOpSpan = true;
+ }
+
+ void setCoincidence(SkOpCoincidence* coincidence) {
+ fCoincidence = coincidence;
+ }
+
+ void setContourHead(SkOpContourHead* contourHead) {
+ fContourHead = contourHead;
+ }
+
+ void setPhase(SkOpPhase phase) {
+ if (SkOpPhase::kNoChange == phase) {
+ return;
+ }
+ SkASSERT(fPhase != phase);
+ fPhase = phase;
+ }
+
+ // called in very rare cases where angles are sorted incorrectly -- signfies op will fail
+ void setWindingFailed() {
+ fWindingFailed = true;
+ }
+
+ bool windingFailed() const {
+ return fWindingFailed;
+ }
+
+private:
+ SkArenaAlloc* fAllocator;
+ SkOpCoincidence* fCoincidence;
+ SkOpContourHead* fContourHead;
+ int fNested;
+ bool fAllocatedOpSpan;
+ bool fWindingFailed;
+ SkOpPhase fPhase;
+#ifdef SK_DEBUG
+ const char* fDebugTestName;
+ void* fDebugReporter;
+ int fAngleID;
+ int fCoinID;
+ int fContourID;
+ int fPtTID;
+ int fSegmentID;
+ int fSpanID;
+ bool fDebugSkipAssert;
+#endif
+#if DEBUG_T_SECT_LOOP_COUNT
+ int fDebugLoopCount[3];
+ SkPath::Verb fDebugWorstVerb[6];
+ SkPoint fDebugWorstPts[24];
+ float fDebugWorstWeight[6];
+#endif
+#if DEBUG_COIN
+ SkPathOpsDebug::CoinDict fCoinChangedDict;
+ SkPathOpsDebug::CoinDict fCoinVisitedDict;
+ SkPathOpsDebug::CoinDictEntry fCoinDictEntry;
+ const char* fPreviousFuncName;
+#endif
+#if DEBUG_COINCIDENCE
+ bool fDebugCheckHealth;
+#endif
+};
+
+#ifdef SK_DEBUG
+#if DEBUG_COINCIDENCE
+#define SkOPASSERT(cond) SkASSERT((this->globalState() && \
+ (this->globalState()->debugCheckHealth() || \
+ this->globalState()->debugSkipAssert())) || (cond))
+#else
+#define SkOPASSERT(cond) SkASSERT((this->globalState() && \
+ this->globalState()->debugSkipAssert()) || (cond))
+#endif
+#define SkOPOBJASSERT(obj, cond) SkASSERT((obj->globalState() && \
+ obj->globalState()->debugSkipAssert()) || (cond))
+#else
+#define SkOPASSERT(cond)
+#define SkOPOBJASSERT(obj, cond)
+#endif
+
+// Use Almost Equal when comparing coordinates. Use epsilon to compare T values.
+bool AlmostEqualUlps(float a, float b);
+inline bool AlmostEqualUlps(double a, double b) {
+ return AlmostEqualUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool AlmostEqualUlpsNoNormalCheck(float a, float b);
+inline bool AlmostEqualUlpsNoNormalCheck(double a, double b) {
+ return AlmostEqualUlpsNoNormalCheck(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool AlmostEqualUlps_Pin(float a, float b);
+inline bool AlmostEqualUlps_Pin(double a, double b) {
+ return AlmostEqualUlps_Pin(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+// Use Almost Dequal when comparing should not special case denormalized values.
+bool AlmostDequalUlps(float a, float b);
+bool AlmostDequalUlps(double a, double b);
+
+bool NotAlmostEqualUlps(float a, float b);
+inline bool NotAlmostEqualUlps(double a, double b) {
+ return NotAlmostEqualUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool NotAlmostEqualUlps_Pin(float a, float b);
+inline bool NotAlmostEqualUlps_Pin(double a, double b) {
+ return NotAlmostEqualUlps_Pin(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool NotAlmostDequalUlps(float a, float b);
+inline bool NotAlmostDequalUlps(double a, double b) {
+ return NotAlmostDequalUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+// Use Almost Bequal when comparing coordinates in conjunction with between.
+bool AlmostBequalUlps(float a, float b);
+inline bool AlmostBequalUlps(double a, double b) {
+ return AlmostBequalUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool AlmostPequalUlps(float a, float b);
+inline bool AlmostPequalUlps(double a, double b) {
+ return AlmostPequalUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool RoughlyEqualUlps(float a, float b);
+inline bool RoughlyEqualUlps(double a, double b) {
+ return RoughlyEqualUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool AlmostLessUlps(float a, float b);
+inline bool AlmostLessUlps(double a, double b) {
+ return AlmostLessUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool AlmostLessOrEqualUlps(float a, float b);
+inline bool AlmostLessOrEqualUlps(double a, double b) {
+ return AlmostLessOrEqualUlps(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+bool AlmostBetweenUlps(float a, float b, float c);
+inline bool AlmostBetweenUlps(double a, double b, double c) {
+ return AlmostBetweenUlps(SkDoubleToScalar(a), SkDoubleToScalar(b), SkDoubleToScalar(c));
+}
+
+int UlpsDistance(float a, float b);
+inline int UlpsDistance(double a, double b) {
+ return UlpsDistance(SkDoubleToScalar(a), SkDoubleToScalar(b));
+}
+
+// FLT_EPSILON == 1.19209290E-07 == 1 / (2 ^ 23)
+// DBL_EPSILON == 2.22045e-16
+const double FLT_EPSILON_CUBED = FLT_EPSILON * FLT_EPSILON * FLT_EPSILON;
+const double FLT_EPSILON_HALF = FLT_EPSILON / 2;
+const double FLT_EPSILON_DOUBLE = FLT_EPSILON * 2;
+const double FLT_EPSILON_ORDERABLE_ERR = FLT_EPSILON * 16;
+const double FLT_EPSILON_SQUARED = FLT_EPSILON * FLT_EPSILON;
+// Use a compile-time constant for FLT_EPSILON_SQRT to avoid initializers.
+// A 17 digit constant guarantees exact results.
+const double FLT_EPSILON_SQRT = 0.00034526697709225118; // sqrt(FLT_EPSILON);
+const double FLT_EPSILON_INVERSE = 1 / FLT_EPSILON;
+const double DBL_EPSILON_ERR = DBL_EPSILON * 4; // FIXME: tune -- allow a few bits of error
+const double DBL_EPSILON_SUBDIVIDE_ERR = DBL_EPSILON * 16;
+const double ROUGH_EPSILON = FLT_EPSILON * 64;
+const double MORE_ROUGH_EPSILON = FLT_EPSILON * 256;
+const double WAY_ROUGH_EPSILON = FLT_EPSILON * 2048;
+const double BUMP_EPSILON = FLT_EPSILON * 4096;
+
+const SkScalar INVERSE_NUMBER_RANGE = FLT_EPSILON_ORDERABLE_ERR;
+
+inline bool zero_or_one(double x) {
+ return x == 0 || x == 1;
+}
+
+inline bool approximately_zero(double x) {
+ return fabs(x) < FLT_EPSILON;
+}
+
+inline bool precisely_zero(double x) {
+ return fabs(x) < DBL_EPSILON_ERR;
+}
+
+inline bool precisely_subdivide_zero(double x) {
+ return fabs(x) < DBL_EPSILON_SUBDIVIDE_ERR;
+}
+
+inline bool approximately_zero(float x) {
+ return fabs(x) < FLT_EPSILON;
+}
+
+inline bool approximately_zero_half(double x) {
+ return fabs(x) < FLT_EPSILON_HALF;
+}
+
+inline bool approximately_zero_double(double x) {
+ return fabs(x) < FLT_EPSILON_DOUBLE;
+}
+
+inline bool approximately_zero_orderable(double x) {
+ return fabs(x) < FLT_EPSILON_ORDERABLE_ERR;
+}
+
+inline bool approximately_zero_squared(double x) {
+ return fabs(x) < FLT_EPSILON_SQUARED;
+}
+
+inline bool approximately_zero_sqrt(double x) {
+ return fabs(x) < FLT_EPSILON_SQRT;
+}
+
+inline bool roughly_zero(double x) {
+ return fabs(x) < ROUGH_EPSILON;
+}
+
+inline bool approximately_zero_inverse(double x) {
+ return fabs(x) > FLT_EPSILON_INVERSE;
+}
+
+inline bool approximately_zero_when_compared_to(double x, double y) {
+ return x == 0 || fabs(x) < fabs(y * FLT_EPSILON);
+}
+
+inline bool precisely_zero_when_compared_to(double x, double y) {
+ return x == 0 || fabs(x) < fabs(y * DBL_EPSILON);
+}
+
+inline bool roughly_zero_when_compared_to(double x, double y) {
+ return x == 0 || fabs(x) < fabs(y * ROUGH_EPSILON);
+}
+
+// Use this for comparing Ts in the range of 0 to 1. For general numbers (larger and smaller) use
+// AlmostEqualUlps instead.
+inline bool approximately_equal(double x, double y) {
+ return approximately_zero(x - y);
+}
+
+inline bool precisely_equal(double x, double y) {
+ return precisely_zero(x - y);
+}
+
+inline bool precisely_subdivide_equal(double x, double y) {
+ return precisely_subdivide_zero(x - y);
+}
+
+inline bool approximately_equal_half(double x, double y) {
+ return approximately_zero_half(x - y);
+}
+
+inline bool approximately_equal_double(double x, double y) {
+ return approximately_zero_double(x - y);
+}
+
+inline bool approximately_equal_orderable(double x, double y) {
+ return approximately_zero_orderable(x - y);
+}
+
+inline bool approximately_equal_squared(double x, double y) {
+ return approximately_equal(x, y);
+}
+
+inline bool approximately_greater(double x, double y) {
+ return x - FLT_EPSILON >= y;
+}
+
+inline bool approximately_greater_double(double x, double y) {
+ return x - FLT_EPSILON_DOUBLE >= y;
+}
+
+inline bool approximately_greater_orderable(double x, double y) {
+ return x - FLT_EPSILON_ORDERABLE_ERR >= y;
+}
+
+inline bool approximately_greater_or_equal(double x, double y) {
+ return x + FLT_EPSILON > y;
+}
+
+inline bool approximately_greater_or_equal_double(double x, double y) {
+ return x + FLT_EPSILON_DOUBLE > y;
+}
+
+inline bool approximately_greater_or_equal_orderable(double x, double y) {
+ return x + FLT_EPSILON_ORDERABLE_ERR > y;
+}
+
+inline bool approximately_lesser(double x, double y) {
+ return x + FLT_EPSILON <= y;
+}
+
+inline bool approximately_lesser_double(double x, double y) {
+ return x + FLT_EPSILON_DOUBLE <= y;
+}
+
+inline bool approximately_lesser_orderable(double x, double y) {
+ return x + FLT_EPSILON_ORDERABLE_ERR <= y;
+}
+
+inline bool approximately_lesser_or_equal(double x, double y) {
+ return x - FLT_EPSILON < y;
+}
+
+inline bool approximately_lesser_or_equal_double(double x, double y) {
+ return x - FLT_EPSILON_DOUBLE < y;
+}
+
+inline bool approximately_lesser_or_equal_orderable(double x, double y) {
+ return x - FLT_EPSILON_ORDERABLE_ERR < y;
+}
+
+inline bool approximately_greater_than_one(double x) {
+ return x > 1 - FLT_EPSILON;
+}
+
+inline bool precisely_greater_than_one(double x) {
+ return x > 1 - DBL_EPSILON_ERR;
+}
+
+inline bool approximately_less_than_zero(double x) {
+ return x < FLT_EPSILON;
+}
+
+inline bool precisely_less_than_zero(double x) {
+ return x < DBL_EPSILON_ERR;
+}
+
+inline bool approximately_negative(double x) {
+ return x < FLT_EPSILON;
+}
+
+inline bool approximately_negative_orderable(double x) {
+ return x < FLT_EPSILON_ORDERABLE_ERR;
+}
+
+inline bool precisely_negative(double x) {
+ return x < DBL_EPSILON_ERR;
+}
+
+inline bool approximately_one_or_less(double x) {
+ return x < 1 + FLT_EPSILON;
+}
+
+inline bool approximately_one_or_less_double(double x) {
+ return x < 1 + FLT_EPSILON_DOUBLE;
+}
+
+inline bool approximately_positive(double x) {
+ return x > -FLT_EPSILON;
+}
+
+inline bool approximately_positive_squared(double x) {
+ return x > -(FLT_EPSILON_SQUARED);
+}
+
+inline bool approximately_zero_or_more(double x) {
+ return x > -FLT_EPSILON;
+}
+
+inline bool approximately_zero_or_more_double(double x) {
+ return x > -FLT_EPSILON_DOUBLE;
+}
+
+inline bool approximately_between_orderable(double a, double b, double c) {
+ return a <= c
+ ? approximately_negative_orderable(a - b) && approximately_negative_orderable(b - c)
+ : approximately_negative_orderable(b - a) && approximately_negative_orderable(c - b);
+}
+
+inline bool approximately_between(double a, double b, double c) {
+ return a <= c ? approximately_negative(a - b) && approximately_negative(b - c)
+ : approximately_negative(b - a) && approximately_negative(c - b);
+}
+
+inline bool precisely_between(double a, double b, double c) {
+ return a <= c ? precisely_negative(a - b) && precisely_negative(b - c)
+ : precisely_negative(b - a) && precisely_negative(c - b);
+}
+
+// returns true if (a <= b <= c) || (a >= b >= c)
+inline bool between(double a, double b, double c) {
+ SkASSERT(((a <= b && b <= c) || (a >= b && b >= c)) == ((a - b) * (c - b) <= 0)
+ || (precisely_zero(a) && precisely_zero(b) && precisely_zero(c)));
+ return (a - b) * (c - b) <= 0;
+}
+
+inline bool roughly_equal(double x, double y) {
+ return fabs(x - y) < ROUGH_EPSILON;
+}
+
+inline bool roughly_negative(double x) {
+ return x < ROUGH_EPSILON;
+}
+
+inline bool roughly_between(double a, double b, double c) {
+ return a <= c ? roughly_negative(a - b) && roughly_negative(b - c)
+ : roughly_negative(b - a) && roughly_negative(c - b);
+}
+
+inline bool more_roughly_equal(double x, double y) {
+ return fabs(x - y) < MORE_ROUGH_EPSILON;
+}
+
+inline SkPath::Verb SkPathOpsPointsToVerb(int points) {
+ int verb = (1 << points) >> 1;
+#ifdef SK_DEBUG
+ switch (points) {
+ case 0: SkASSERT(SkPath::kMove_Verb == verb); break;
+ case 1: SkASSERT(SkPath::kLine_Verb == verb); break;
+ case 2: SkASSERT(SkPath::kQuad_Verb == verb); break;
+ case 3: SkASSERT(SkPath::kCubic_Verb == verb); break;
+ default: SkDEBUGFAIL("should not be here");
+ }
+#endif
+ return (SkPath::Verb)verb;
+}
+
+inline int SkPathOpsVerbToPoints(SkPath::Verb verb) {
+ int points = (int) verb - (((int) verb + 1) >> 2);
+#ifdef SK_DEBUG
+ switch (verb) {
+ case SkPath::kLine_Verb: SkASSERT(1 == points); break;
+ case SkPath::kQuad_Verb: SkASSERT(2 == points); break;
+ case SkPath::kConic_Verb: SkASSERT(2 == points); break;
+ case SkPath::kCubic_Verb: SkASSERT(3 == points); break;
+ default: SkDEBUGFAIL("should not get here");
+ }
+#endif
+ return points;
+}
+
+inline double SkDInterp(double A, double B, double t) {
+ return A + (B - A) * t;
+}
+
+/* Returns -1 if negative, 0 if zero, 1 if positive
+*/
+inline int SkDSign(double x) {
+ return (x > 0) - (x < 0);
+}
+
+/* Returns 0 if negative, 1 if zero, 2 if positive
+*/
+inline int SKDSide(double x) {
+ return (x > 0) + (x >= 0);
+}
+
+/* Returns 1 if negative, 2 if zero, 4 if positive
+*/
+inline int SkDSideBit(double x) {
+ return 1 << SKDSide(x);
+}
+
+inline double SkPinT(double t) {
+ return precisely_less_than_zero(t) ? 0 : precisely_greater_than_one(t) ? 1 : t;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/pathops/SkPathOpsWinding.cpp b/gfx/skia/skia/src/pathops/SkPathOpsWinding.cpp
new file mode 100644
index 0000000000..16517445af
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathOpsWinding.cpp
@@ -0,0 +1,441 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// given a prospective edge, compute its initial winding by projecting a ray
+// if the ray hits another edge
+ // if the edge doesn't have a winding yet, hop up to that edge and start over
+ // concern : check for hops forming a loop
+ // if the edge is unsortable, or
+ // the intersection is nearly at the ends, or
+ // the tangent at the intersection is nearly coincident to the ray,
+ // choose a different ray and try again
+ // concern : if it is unable to succeed after N tries, try another edge? direction?
+// if no edge is hit, compute the winding directly
+
+// given the top span, project the most perpendicular ray and look for intersections
+ // let's try up and then down. What the hey
+
+// bestXY is initialized by caller with basePt
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkMath.h"
+#include "include/private/base/SkTArray.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/base/SkTSort.h"
+#include "src/pathops/SkOpContour.h"
+#include "src/pathops/SkOpSegment.h"
+#include "src/pathops/SkOpSpan.h"
+#include "src/pathops/SkPathOpsBounds.h"
+#include "src/pathops/SkPathOpsCurve.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+#include <cmath>
+#include <utility>
+
+enum class SkOpRayDir {
+ kLeft,
+ kTop,
+ kRight,
+ kBottom,
+};
+
+#if DEBUG_WINDING
+const char* gDebugRayDirName[] = {
+ "kLeft",
+ "kTop",
+ "kRight",
+ "kBottom"
+};
+#endif
+
+static int xy_index(SkOpRayDir dir) {
+ return static_cast<int>(dir) & 1;
+}
+
+static SkScalar pt_xy(const SkPoint& pt, SkOpRayDir dir) {
+ return (&pt.fX)[xy_index(dir)];
+}
+
+static SkScalar pt_yx(const SkPoint& pt, SkOpRayDir dir) {
+ return (&pt.fX)[!xy_index(dir)];
+}
+
+static double pt_dxdy(const SkDVector& v, SkOpRayDir dir) {
+ return (&v.fX)[xy_index(dir)];
+}
+
+static double pt_dydx(const SkDVector& v, SkOpRayDir dir) {
+ return (&v.fX)[!xy_index(dir)];
+}
+
+static SkScalar rect_side(const SkRect& r, SkOpRayDir dir) {
+ return (&r.fLeft)[static_cast<int>(dir)];
+}
+
+static bool sideways_overlap(const SkRect& rect, const SkPoint& pt, SkOpRayDir dir) {
+ int i = !xy_index(dir);
+ return approximately_between((&rect.fLeft)[i], (&pt.fX)[i], (&rect.fRight)[i]);
+}
+
+static bool less_than(SkOpRayDir dir) {
+ return static_cast<bool>((static_cast<int>(dir) & 2) == 0);
+}
+
+static bool ccw_dxdy(const SkDVector& v, SkOpRayDir dir) {
+ bool vPartPos = pt_dydx(v, dir) > 0;
+ bool leftBottom = ((static_cast<int>(dir) + 1) & 2) != 0;
+ return vPartPos == leftBottom;
+}
+
+struct SkOpRayHit {
+ SkOpRayDir makeTestBase(SkOpSpan* span, double t) {
+ fNext = nullptr;
+ fSpan = span;
+ fT = span->t() * (1 - t) + span->next()->t() * t;
+ SkOpSegment* segment = span->segment();
+ fSlope = segment->dSlopeAtT(fT);
+ fPt = segment->ptAtT(fT);
+ fValid = true;
+ return fabs(fSlope.fX) < fabs(fSlope.fY) ? SkOpRayDir::kLeft : SkOpRayDir::kTop;
+ }
+
+ SkOpRayHit* fNext;
+ SkOpSpan* fSpan;
+ SkPoint fPt;
+ double fT;
+ SkDVector fSlope;
+ bool fValid;
+};
+
+void SkOpContour::rayCheck(const SkOpRayHit& base, SkOpRayDir dir, SkOpRayHit** hits,
+ SkArenaAlloc* allocator) {
+ // if the bounds extreme is outside the best, we're done
+ SkScalar baseXY = pt_xy(base.fPt, dir);
+ SkScalar boundsXY = rect_side(fBounds, dir);
+ bool checkLessThan = less_than(dir);
+ if (!approximately_equal(baseXY, boundsXY) && (baseXY < boundsXY) == checkLessThan) {
+ return;
+ }
+ SkOpSegment* testSegment = &fHead;
+ do {
+ testSegment->rayCheck(base, dir, hits, allocator);
+ } while ((testSegment = testSegment->next()));
+}
+
+void SkOpSegment::rayCheck(const SkOpRayHit& base, SkOpRayDir dir, SkOpRayHit** hits,
+ SkArenaAlloc* allocator) {
+ if (!sideways_overlap(fBounds, base.fPt, dir)) {
+ return;
+ }
+ SkScalar baseXY = pt_xy(base.fPt, dir);
+ SkScalar boundsXY = rect_side(fBounds, dir);
+ bool checkLessThan = less_than(dir);
+ if (!approximately_equal(baseXY, boundsXY) && (baseXY < boundsXY) == checkLessThan) {
+ return;
+ }
+ double tVals[3];
+ SkScalar baseYX = pt_yx(base.fPt, dir);
+ int roots = (*CurveIntercept[fVerb * 2 + xy_index(dir)])(fPts, fWeight, baseYX, tVals);
+ for (int index = 0; index < roots; ++index) {
+ double t = tVals[index];
+ if (base.fSpan->segment() == this && approximately_equal(base.fT, t)) {
+ continue;
+ }
+ SkDVector slope;
+ SkPoint pt;
+ SkDEBUGCODE(sk_bzero(&slope, sizeof(slope)));
+ bool valid = false;
+ if (approximately_zero(t)) {
+ pt = fPts[0];
+ } else if (approximately_equal(t, 1)) {
+ pt = fPts[SkPathOpsVerbToPoints(fVerb)];
+ } else {
+ SkASSERT(between(0, t, 1));
+ pt = this->ptAtT(t);
+ if (SkDPoint::ApproximatelyEqual(pt, base.fPt)) {
+ if (base.fSpan->segment() == this) {
+ continue;
+ }
+ } else {
+ SkScalar ptXY = pt_xy(pt, dir);
+ if (!approximately_equal(baseXY, ptXY) && (baseXY < ptXY) == checkLessThan) {
+ continue;
+ }
+ slope = this->dSlopeAtT(t);
+ if (fVerb == SkPath::kCubic_Verb && base.fSpan->segment() == this
+ && roughly_equal(base.fT, t)
+ && SkDPoint::RoughlyEqual(pt, base.fPt)) {
+ #if DEBUG_WINDING
+ SkDebugf("%s (rarely expect this)\n", __FUNCTION__);
+ #endif
+ continue;
+ }
+ if (fabs(pt_dydx(slope, dir) * 10000) > fabs(pt_dxdy(slope, dir))) {
+ valid = true;
+ }
+ }
+ }
+ SkOpSpan* span = this->windingSpanAtT(t);
+ if (!span) {
+ valid = false;
+ } else if (!span->windValue() && !span->oppValue()) {
+ continue;
+ }
+ SkOpRayHit* newHit = allocator->make<SkOpRayHit>();
+ newHit->fNext = *hits;
+ newHit->fPt = pt;
+ newHit->fSlope = slope;
+ newHit->fSpan = span;
+ newHit->fT = t;
+ newHit->fValid = valid;
+ *hits = newHit;
+ }
+}
+
+SkOpSpan* SkOpSegment::windingSpanAtT(double tHit) {
+ SkOpSpan* span = &fHead;
+ SkOpSpanBase* next;
+ do {
+ next = span->next();
+ if (approximately_equal(tHit, next->t())) {
+ return nullptr;
+ }
+ if (tHit < next->t()) {
+ return span;
+ }
+ } while (!next->final() && (span = next->upCast()));
+ return nullptr;
+}
+
+static bool hit_compare_x(const SkOpRayHit* a, const SkOpRayHit* b) {
+ return a->fPt.fX < b->fPt.fX;
+}
+
+static bool reverse_hit_compare_x(const SkOpRayHit* a, const SkOpRayHit* b) {
+ return b->fPt.fX < a->fPt.fX;
+}
+
+static bool hit_compare_y(const SkOpRayHit* a, const SkOpRayHit* b) {
+ return a->fPt.fY < b->fPt.fY;
+}
+
+static bool reverse_hit_compare_y(const SkOpRayHit* a, const SkOpRayHit* b) {
+ return b->fPt.fY < a->fPt.fY;
+}
+
+static double get_t_guess(int tTry, int* dirOffset) {
+ double t = 0.5;
+ *dirOffset = tTry & 1;
+ int tBase = tTry >> 1;
+ int tBits = 0;
+ while (tTry >>= 1) {
+ t /= 2;
+ ++tBits;
+ }
+ if (tBits) {
+ int tIndex = (tBase - 1) & ((1 << tBits) - 1);
+ t += t * 2 * tIndex;
+ }
+ return t;
+}
+
+bool SkOpSpan::sortableTop(SkOpContour* contourHead) {
+ SkSTArenaAlloc<1024> allocator;
+ int dirOffset;
+ double t = get_t_guess(fTopTTry++, &dirOffset);
+ SkOpRayHit hitBase;
+ SkOpRayDir dir = hitBase.makeTestBase(this, t);
+ if (hitBase.fSlope.fX == 0 && hitBase.fSlope.fY == 0) {
+ return false;
+ }
+ SkOpRayHit* hitHead = &hitBase;
+ dir = static_cast<SkOpRayDir>(static_cast<int>(dir) + dirOffset);
+ if (hitBase.fSpan && hitBase.fSpan->segment()->verb() > SkPath::kLine_Verb
+ && !pt_dydx(hitBase.fSlope, dir)) {
+ return false;
+ }
+ SkOpContour* contour = contourHead;
+ do {
+ if (!contour->count()) {
+ continue;
+ }
+ contour->rayCheck(hitBase, dir, &hitHead, &allocator);
+ } while ((contour = contour->next()));
+ // sort hits
+ SkSTArray<1, SkOpRayHit*> sorted;
+ SkOpRayHit* hit = hitHead;
+ while (hit) {
+ sorted.push_back(hit);
+ hit = hit->fNext;
+ }
+ int count = sorted.size();
+ SkTQSort(sorted.begin(), sorted.end(),
+ xy_index(dir) ? less_than(dir) ? hit_compare_y : reverse_hit_compare_y
+ : less_than(dir) ? hit_compare_x : reverse_hit_compare_x);
+ // verify windings
+#if DEBUG_WINDING
+ SkDebugf("%s dir=%s seg=%d t=%1.9g pt=(%1.9g,%1.9g)\n", __FUNCTION__,
+ gDebugRayDirName[static_cast<int>(dir)], hitBase.fSpan->segment()->debugID(),
+ hitBase.fT, hitBase.fPt.fX, hitBase.fPt.fY);
+ for (int index = 0; index < count; ++index) {
+ hit = sorted[index];
+ SkOpSpan* span = hit->fSpan;
+ SkOpSegment* hitSegment = span ? span->segment() : nullptr;
+ bool operand = span ? hitSegment->operand() : false;
+ bool ccw = ccw_dxdy(hit->fSlope, dir);
+ SkDebugf("%s [%d] valid=%d operand=%d span=%d ccw=%d ", __FUNCTION__, index,
+ hit->fValid, operand, span ? span->debugID() : -1, ccw);
+ if (span) {
+ hitSegment->dumpPtsInner();
+ }
+ SkDebugf(" t=%1.9g pt=(%1.9g,%1.9g) slope=(%1.9g,%1.9g)\n", hit->fT,
+ hit->fPt.fX, hit->fPt.fY, hit->fSlope.fX, hit->fSlope.fY);
+ }
+#endif
+ const SkPoint* last = nullptr;
+ int wind = 0;
+ int oppWind = 0;
+ for (int index = 0; index < count; ++index) {
+ hit = sorted[index];
+ if (!hit->fValid) {
+ return false;
+ }
+ bool ccw = ccw_dxdy(hit->fSlope, dir);
+// SkASSERT(!approximately_zero(hit->fT) || !hit->fValid);
+ SkOpSpan* span = hit->fSpan;
+ if (!span) {
+ return false;
+ }
+ SkOpSegment* hitSegment = span->segment();
+ if (span->windValue() == 0 && span->oppValue() == 0) {
+ continue;
+ }
+ if (last && SkDPoint::ApproximatelyEqual(*last, hit->fPt)) {
+ return false;
+ }
+ if (index < count - 1) {
+ const SkPoint& next = sorted[index + 1]->fPt;
+ if (SkDPoint::ApproximatelyEqual(next, hit->fPt)) {
+ return false;
+ }
+ }
+ bool operand = hitSegment->operand();
+ if (operand) {
+ using std::swap;
+ swap(wind, oppWind);
+ }
+ int lastWind = wind;
+ int lastOpp = oppWind;
+ int windValue = ccw ? -span->windValue() : span->windValue();
+ int oppValue = ccw ? -span->oppValue() : span->oppValue();
+ wind += windValue;
+ oppWind += oppValue;
+ bool sumSet = false;
+ int spanSum = span->windSum();
+ int windSum = SkOpSegment::UseInnerWinding(lastWind, wind) ? wind : lastWind;
+ if (spanSum == SK_MinS32) {
+ span->setWindSum(windSum);
+ sumSet = true;
+ } else {
+ // the need for this condition suggests that UseInnerWinding is flawed
+ // happened when last = 1 wind = -1
+#if 0
+ SkASSERT((hitSegment->isXor() ? (windSum & 1) == (spanSum & 1) : windSum == spanSum)
+ || (abs(wind) == abs(lastWind)
+ && (windSum ^ wind ^ lastWind) == spanSum));
+#endif
+ }
+ int oSpanSum = span->oppSum();
+ int oppSum = SkOpSegment::UseInnerWinding(lastOpp, oppWind) ? oppWind : lastOpp;
+ if (oSpanSum == SK_MinS32) {
+ span->setOppSum(oppSum);
+ } else {
+#if 0
+ SkASSERT(hitSegment->oppXor() ? (oppSum & 1) == (oSpanSum & 1) : oppSum == oSpanSum
+ || (abs(oppWind) == abs(lastOpp)
+ && (oppSum ^ oppWind ^ lastOpp) == oSpanSum));
+#endif
+ }
+ if (sumSet) {
+ if (this->globalState()->phase() == SkOpPhase::kFixWinding) {
+ hitSegment->contour()->setCcw(ccw);
+ } else {
+ (void) hitSegment->markAndChaseWinding(span, span->next(), windSum, oppSum, nullptr);
+ (void) hitSegment->markAndChaseWinding(span->next(), span, windSum, oppSum, nullptr);
+ }
+ }
+ if (operand) {
+ using std::swap;
+ swap(wind, oppWind);
+ }
+ last = &hit->fPt;
+ this->globalState()->bumpNested();
+ }
+ return true;
+}
+
+SkOpSpan* SkOpSegment::findSortableTop(SkOpContour* contourHead) {
+ SkOpSpan* span = &fHead;
+ SkOpSpanBase* next;
+ do {
+ next = span->next();
+ if (span->done()) {
+ continue;
+ }
+ if (span->windSum() != SK_MinS32) {
+ return span;
+ }
+ if (span->sortableTop(contourHead)) {
+ return span;
+ }
+ } while (!next->final() && (span = next->upCast()));
+ return nullptr;
+}
+
+SkOpSpan* SkOpContour::findSortableTop(SkOpContour* contourHead) {
+ bool allDone = true;
+ if (fCount) {
+ SkOpSegment* testSegment = &fHead;
+ do {
+ if (testSegment->done()) {
+ continue;
+ }
+ allDone = false;
+ SkOpSpan* result = testSegment->findSortableTop(contourHead);
+ if (result) {
+ return result;
+ }
+ } while ((testSegment = testSegment->next()));
+ }
+ if (allDone) {
+ fDone = true;
+ }
+ return nullptr;
+}
+
+SkOpSpan* FindSortableTop(SkOpContourHead* contourHead) {
+ for (int index = 0; index < SkOpGlobalState::kMaxWindingTries; ++index) {
+ SkOpContour* contour = contourHead;
+ do {
+ if (contour->done()) {
+ continue;
+ }
+ SkOpSpan* result = contour->findSortableTop(contourHead);
+ if (result) {
+ return result;
+ }
+ } while ((contour = contour->next()));
+ }
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathWriter.cpp b/gfx/skia/skia/src/pathops/SkPathWriter.cpp
new file mode 100644
index 0000000000..9ded489834
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathWriter.cpp
@@ -0,0 +1,434 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkPathWriter.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkMath.h"
+#include "src/base/SkTSort.h"
+#include "src/pathops/SkOpSegment.h"
+#include "src/pathops/SkOpSpan.h"
+#include "src/pathops/SkPathOpsDebug.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+// wrap path to keep track of whether the contour is initialized and non-empty
+SkPathWriter::SkPathWriter(SkPath& path)
+ : fPathPtr(&path)
+{
+ init();
+}
+
+void SkPathWriter::close() {
+ if (fCurrent.isEmpty()) {
+ return;
+ }
+ SkASSERT(this->isClosed());
+#if DEBUG_PATH_CONSTRUCTION
+ SkDebugf("path.close();\n");
+#endif
+ fCurrent.close();
+ fPathPtr->addPath(fCurrent);
+ fCurrent.reset();
+ init();
+}
+
+void SkPathWriter::conicTo(const SkPoint& pt1, const SkOpPtT* pt2, SkScalar weight) {
+ SkPoint pt2pt = this->update(pt2);
+#if DEBUG_PATH_CONSTRUCTION
+ SkDebugf("path.conicTo(%1.9g,%1.9g, %1.9g,%1.9g, %1.9g);\n",
+ pt1.fX, pt1.fY, pt2pt.fX, pt2pt.fY, weight);
+#endif
+ fCurrent.conicTo(pt1, pt2pt, weight);
+}
+
+void SkPathWriter::cubicTo(const SkPoint& pt1, const SkPoint& pt2, const SkOpPtT* pt3) {
+ SkPoint pt3pt = this->update(pt3);
+#if DEBUG_PATH_CONSTRUCTION
+ SkDebugf("path.cubicTo(%1.9g,%1.9g, %1.9g,%1.9g, %1.9g,%1.9g);\n",
+ pt1.fX, pt1.fY, pt2.fX, pt2.fY, pt3pt.fX, pt3pt.fY);
+#endif
+ fCurrent.cubicTo(pt1, pt2, pt3pt);
+}
+
+bool SkPathWriter::deferredLine(const SkOpPtT* pt) {
+ SkASSERT(fFirstPtT);
+ SkASSERT(fDefer[0]);
+ if (fDefer[0] == pt) {
+ // FIXME: why we're adding a degenerate line? Caller should have preflighted this.
+ return true;
+ }
+ if (pt->contains(fDefer[0])) {
+ // FIXME: why we're adding a degenerate line?
+ return true;
+ }
+ if (this->matchedLast(pt)) {
+ return false;
+ }
+ if (fDefer[1] && this->changedSlopes(pt)) {
+ this->lineTo();
+ fDefer[0] = fDefer[1];
+ }
+ fDefer[1] = pt;
+ return true;
+}
+
+void SkPathWriter::deferredMove(const SkOpPtT* pt) {
+ if (!fDefer[1]) {
+ fFirstPtT = fDefer[0] = pt;
+ return;
+ }
+ SkASSERT(fDefer[0]);
+ if (!this->matchedLast(pt)) {
+ this->finishContour();
+ fFirstPtT = fDefer[0] = pt;
+ }
+}
+
+void SkPathWriter::finishContour() {
+ if (!this->matchedLast(fDefer[0])) {
+ if (!fDefer[1]) {
+ return;
+ }
+ this->lineTo();
+ }
+ if (fCurrent.isEmpty()) {
+ return;
+ }
+ if (this->isClosed()) {
+ this->close();
+ } else {
+ SkASSERT(fDefer[1]);
+ fEndPtTs.push_back(fFirstPtT);
+ fEndPtTs.push_back(fDefer[1]);
+ fPartials.push_back(fCurrent);
+ this->init();
+ }
+}
+
+void SkPathWriter::init() {
+ fCurrent.reset();
+ fFirstPtT = fDefer[0] = fDefer[1] = nullptr;
+}
+
+bool SkPathWriter::isClosed() const {
+ return this->matchedLast(fFirstPtT);
+}
+
+void SkPathWriter::lineTo() {
+ if (fCurrent.isEmpty()) {
+ this->moveTo();
+ }
+#if DEBUG_PATH_CONSTRUCTION
+ SkDebugf("path.lineTo(%1.9g,%1.9g);\n", fDefer[1]->fPt.fX, fDefer[1]->fPt.fY);
+#endif
+ fCurrent.lineTo(fDefer[1]->fPt);
+}
+
+bool SkPathWriter::matchedLast(const SkOpPtT* test) const {
+ if (test == fDefer[1]) {
+ return true;
+ }
+ if (!test) {
+ return false;
+ }
+ if (!fDefer[1]) {
+ return false;
+ }
+ return test->contains(fDefer[1]);
+}
+
+void SkPathWriter::moveTo() {
+#if DEBUG_PATH_CONSTRUCTION
+ SkDebugf("path.moveTo(%1.9g,%1.9g);\n", fFirstPtT->fPt.fX, fFirstPtT->fPt.fY);
+#endif
+ fCurrent.moveTo(fFirstPtT->fPt);
+}
+
+void SkPathWriter::quadTo(const SkPoint& pt1, const SkOpPtT* pt2) {
+ SkPoint pt2pt = this->update(pt2);
+#if DEBUG_PATH_CONSTRUCTION
+ SkDebugf("path.quadTo(%1.9g,%1.9g, %1.9g,%1.9g);\n",
+ pt1.fX, pt1.fY, pt2pt.fX, pt2pt.fY);
+#endif
+ fCurrent.quadTo(pt1, pt2pt);
+}
+
+// if last point to be written matches the current path's first point, alter the
+// last to avoid writing a degenerate lineTo when the path is closed
+SkPoint SkPathWriter::update(const SkOpPtT* pt) {
+ if (!fDefer[1]) {
+ this->moveTo();
+ } else if (!this->matchedLast(fDefer[0])) {
+ this->lineTo();
+ }
+ SkPoint result = pt->fPt;
+ if (fFirstPtT && result != fFirstPtT->fPt && fFirstPtT->contains(pt)) {
+ result = fFirstPtT->fPt;
+ }
+ fDefer[0] = fDefer[1] = pt; // set both to know that there is not a pending deferred line
+ return result;
+}
+
+bool SkPathWriter::someAssemblyRequired() {
+ this->finishContour();
+ return !fEndPtTs.empty();
+}
+
+bool SkPathWriter::changedSlopes(const SkOpPtT* ptT) const {
+ if (matchedLast(fDefer[0])) {
+ return false;
+ }
+ SkVector deferDxdy = fDefer[1]->fPt - fDefer[0]->fPt;
+ SkVector lineDxdy = ptT->fPt - fDefer[1]->fPt;
+ return deferDxdy.fX * lineDxdy.fY != deferDxdy.fY * lineDxdy.fX;
+}
+
+class DistanceLessThan {
+public:
+ DistanceLessThan(double* distances) : fDistances(distances) { }
+ double* fDistances;
+ bool operator()(const int one, const int two) const {
+ return fDistances[one] < fDistances[two];
+ }
+};
+
+ /*
+ check start and end of each contour
+ if not the same, record them
+ match them up
+ connect closest
+ reassemble contour pieces into new path
+ */
+void SkPathWriter::assemble() {
+ if (!this->someAssemblyRequired()) {
+ return;
+ }
+#if DEBUG_PATH_CONSTRUCTION
+ SkDebugf("%s\n", __FUNCTION__);
+#endif
+ SkOpPtT const* const* runs = fEndPtTs.begin(); // starts, ends of partial contours
+ int endCount = fEndPtTs.size(); // all starts and ends
+ SkASSERT(endCount > 0);
+ SkASSERT(endCount == fPartials.size() * 2);
+#if DEBUG_ASSEMBLE
+ for (int index = 0; index < endCount; index += 2) {
+ const SkOpPtT* eStart = runs[index];
+ const SkOpPtT* eEnd = runs[index + 1];
+ SkASSERT(eStart != eEnd);
+ SkASSERT(!eStart->contains(eEnd));
+ SkDebugf("%s contour start=(%1.9g,%1.9g) end=(%1.9g,%1.9g)\n", __FUNCTION__,
+ eStart->fPt.fX, eStart->fPt.fY, eEnd->fPt.fX, eEnd->fPt.fY);
+ }
+#endif
+ // lengthen any partial contour adjacent to a simple segment
+ for (int pIndex = 0; pIndex < endCount; pIndex++) {
+ SkOpPtT* opPtT = const_cast<SkOpPtT*>(runs[pIndex]);
+ SkPath p;
+ SkPathWriter partWriter(p);
+ do {
+ if (!zero_or_one(opPtT->fT)) {
+ break;
+ }
+ SkOpSpanBase* opSpanBase = opPtT->span();
+ SkOpSpanBase* start = opPtT->fT ? opSpanBase->prev() : opSpanBase->upCast()->next();
+ int step = opPtT->fT ? 1 : -1;
+ const SkOpSegment* opSegment = opSpanBase->segment();
+ const SkOpSegment* nextSegment = opSegment->isSimple(&start, &step);
+ if (!nextSegment) {
+ break;
+ }
+ SkOpSpanBase* opSpanEnd = start->t() ? start->prev() : start->upCast()->next();
+ if (start->starter(opSpanEnd)->alreadyAdded()) {
+ break;
+ }
+ nextSegment->addCurveTo(start, opSpanEnd, &partWriter);
+ opPtT = opSpanEnd->ptT();
+ SkOpPtT** runsPtr = const_cast<SkOpPtT**>(&runs[pIndex]);
+ *runsPtr = opPtT;
+ } while (true);
+ partWriter.finishContour();
+ const SkTArray<SkPath>& partPartials = partWriter.partials();
+ if (partPartials.empty()) {
+ continue;
+ }
+ // if pIndex is even, reverse and prepend to fPartials; otherwise, append
+ SkPath& partial = const_cast<SkPath&>(fPartials[pIndex >> 1]);
+ const SkPath& part = partPartials[0];
+ if (pIndex & 1) {
+ partial.addPath(part, SkPath::kExtend_AddPathMode);
+ } else {
+ SkPath reverse;
+ reverse.reverseAddPath(part);
+ reverse.addPath(partial, SkPath::kExtend_AddPathMode);
+ partial = reverse;
+ }
+ }
+ SkTDArray<int> sLink, eLink;
+ int linkCount = endCount / 2; // number of partial contours
+ sLink.append(linkCount);
+ eLink.append(linkCount);
+ int rIndex, iIndex;
+ for (rIndex = 0; rIndex < linkCount; ++rIndex) {
+ sLink[rIndex] = eLink[rIndex] = SK_MaxS32;
+ }
+ const int entries = endCount * (endCount - 1) / 2; // folded triangle
+ SkSTArray<8, double, true> distances(entries);
+ SkSTArray<8, int, true> sortedDist(entries);
+ SkSTArray<8, int, true> distLookup(entries);
+ int rRow = 0;
+ int dIndex = 0;
+ for (rIndex = 0; rIndex < endCount - 1; ++rIndex) {
+ const SkOpPtT* oPtT = runs[rIndex];
+ for (iIndex = rIndex + 1; iIndex < endCount; ++iIndex) {
+ const SkOpPtT* iPtT = runs[iIndex];
+ double dx = iPtT->fPt.fX - oPtT->fPt.fX;
+ double dy = iPtT->fPt.fY - oPtT->fPt.fY;
+ double dist = dx * dx + dy * dy;
+ distLookup.push_back(rRow + iIndex);
+ distances.push_back(dist); // oStart distance from iStart
+ sortedDist.push_back(dIndex++);
+ }
+ rRow += endCount;
+ }
+ SkASSERT(dIndex == entries);
+ SkTQSort<int>(sortedDist.begin(), sortedDist.end(), DistanceLessThan(distances.begin()));
+ int remaining = linkCount; // number of start/end pairs
+ for (rIndex = 0; rIndex < entries; ++rIndex) {
+ int pair = sortedDist[rIndex];
+ pair = distLookup[pair];
+ int row = pair / endCount;
+ int col = pair - row * endCount;
+ int ndxOne = row >> 1;
+ bool endOne = row & 1;
+ int* linkOne = endOne ? eLink.begin() : sLink.begin();
+ if (linkOne[ndxOne] != SK_MaxS32) {
+ continue;
+ }
+ int ndxTwo = col >> 1;
+ bool endTwo = col & 1;
+ int* linkTwo = endTwo ? eLink.begin() : sLink.begin();
+ if (linkTwo[ndxTwo] != SK_MaxS32) {
+ continue;
+ }
+ SkASSERT(&linkOne[ndxOne] != &linkTwo[ndxTwo]);
+ bool flip = endOne == endTwo;
+ linkOne[ndxOne] = flip ? ~ndxTwo : ndxTwo;
+ linkTwo[ndxTwo] = flip ? ~ndxOne : ndxOne;
+ if (!--remaining) {
+ break;
+ }
+ }
+ SkASSERT(!remaining);
+#if DEBUG_ASSEMBLE
+ for (rIndex = 0; rIndex < linkCount; ++rIndex) {
+ int s = sLink[rIndex];
+ int e = eLink[rIndex];
+ SkDebugf("%s %c%d <- s%d - e%d -> %c%d\n", __FUNCTION__, s < 0 ? 's' : 'e',
+ s < 0 ? ~s : s, rIndex, rIndex, e < 0 ? 'e' : 's', e < 0 ? ~e : e);
+ }
+#endif
+ rIndex = 0;
+ do {
+ bool forward = true;
+ bool first = true;
+ int sIndex = sLink[rIndex];
+ SkASSERT(sIndex != SK_MaxS32);
+ sLink[rIndex] = SK_MaxS32;
+ int eIndex;
+ if (sIndex < 0) {
+ eIndex = sLink[~sIndex];
+ sLink[~sIndex] = SK_MaxS32;
+ } else {
+ eIndex = eLink[sIndex];
+ eLink[sIndex] = SK_MaxS32;
+ }
+ SkASSERT(eIndex != SK_MaxS32);
+#if DEBUG_ASSEMBLE
+ SkDebugf("%s sIndex=%c%d eIndex=%c%d\n", __FUNCTION__, sIndex < 0 ? 's' : 'e',
+ sIndex < 0 ? ~sIndex : sIndex, eIndex < 0 ? 's' : 'e',
+ eIndex < 0 ? ~eIndex : eIndex);
+#endif
+ do {
+ const SkPath& contour = fPartials[rIndex];
+ if (!first) {
+ SkPoint prior, next;
+ if (!fPathPtr->getLastPt(&prior)) {
+ return;
+ }
+ if (forward) {
+ next = contour.getPoint(0);
+ } else {
+ SkAssertResult(contour.getLastPt(&next));
+ }
+ if (prior != next) {
+ /* TODO: if there is a gap between open path written so far and path to come,
+ connect by following segments from one to the other, rather than introducing
+ a diagonal to connect the two.
+ */
+ }
+ }
+ if (forward) {
+ fPathPtr->addPath(contour,
+ first ? SkPath::kAppend_AddPathMode : SkPath::kExtend_AddPathMode);
+ } else {
+ SkASSERT(!first);
+ fPathPtr->reversePathTo(contour);
+ }
+ if (first) {
+ first = false;
+ }
+#if DEBUG_ASSEMBLE
+ SkDebugf("%s rIndex=%d eIndex=%s%d close=%d\n", __FUNCTION__, rIndex,
+ eIndex < 0 ? "~" : "", eIndex < 0 ? ~eIndex : eIndex,
+ sIndex == ((rIndex != eIndex) ^ forward ? eIndex : ~eIndex));
+#endif
+ if (sIndex == ((rIndex != eIndex) ^ forward ? eIndex : ~eIndex)) {
+ fPathPtr->close();
+ break;
+ }
+ if (forward) {
+ eIndex = eLink[rIndex];
+ SkASSERT(eIndex != SK_MaxS32);
+ eLink[rIndex] = SK_MaxS32;
+ if (eIndex >= 0) {
+ SkASSERT(sLink[eIndex] == rIndex);
+ sLink[eIndex] = SK_MaxS32;
+ } else {
+ SkASSERT(eLink[~eIndex] == ~rIndex);
+ eLink[~eIndex] = SK_MaxS32;
+ }
+ } else {
+ eIndex = sLink[rIndex];
+ SkASSERT(eIndex != SK_MaxS32);
+ sLink[rIndex] = SK_MaxS32;
+ if (eIndex >= 0) {
+ SkASSERT(eLink[eIndex] == rIndex);
+ eLink[eIndex] = SK_MaxS32;
+ } else {
+ SkASSERT(sLink[~eIndex] == ~rIndex);
+ sLink[~eIndex] = SK_MaxS32;
+ }
+ }
+ rIndex = eIndex;
+ if (rIndex < 0) {
+ forward ^= 1;
+ rIndex = ~rIndex;
+ }
+ } while (true);
+ for (rIndex = 0; rIndex < linkCount; ++rIndex) {
+ if (sLink[rIndex] != SK_MaxS32) {
+ break;
+ }
+ }
+ } while (rIndex < linkCount);
+#if DEBUG_ASSEMBLE
+ for (rIndex = 0; rIndex < linkCount; ++rIndex) {
+ SkASSERT(sLink[rIndex] == SK_MaxS32);
+ SkASSERT(eLink[rIndex] == SK_MaxS32);
+ }
+#endif
+ return;
+}
diff --git a/gfx/skia/skia/src/pathops/SkPathWriter.h b/gfx/skia/skia/src/pathops/SkPathWriter.h
new file mode 100644
index 0000000000..130301989e
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkPathWriter.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPathWriter_DEFINED
+#define SkPathWriter_DEFINED
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTDArray.h"
+
+class SkOpPtT;
+
+// Construct the path one contour at a time.
+// If the contour is closed, copy it to the final output.
+// Otherwise, keep the partial contour for later assembly.
+
+class SkPathWriter {
+public:
+ SkPathWriter(SkPath& path);
+ void assemble();
+ void conicTo(const SkPoint& pt1, const SkOpPtT* pt2, SkScalar weight);
+ void cubicTo(const SkPoint& pt1, const SkPoint& pt2, const SkOpPtT* pt3);
+ bool deferredLine(const SkOpPtT* pt);
+ void deferredMove(const SkOpPtT* pt);
+ void finishContour();
+ bool hasMove() const { return !fFirstPtT; }
+ void init();
+ bool isClosed() const;
+ const SkPath* nativePath() const { return fPathPtr; }
+ void quadTo(const SkPoint& pt1, const SkOpPtT* pt2);
+
+private:
+ bool changedSlopes(const SkOpPtT* pt) const;
+ void close();
+ const SkTDArray<const SkOpPtT*>& endPtTs() const { return fEndPtTs; }
+ void lineTo();
+ bool matchedLast(const SkOpPtT*) const;
+ void moveTo();
+ const SkTArray<SkPath>& partials() const { return fPartials; }
+ bool someAssemblyRequired();
+ SkPoint update(const SkOpPtT* pt);
+
+ SkPath fCurrent; // contour under construction
+ SkTArray<SkPath> fPartials; // contours with mismatched starts and ends
+ SkTDArray<const SkOpPtT*> fEndPtTs; // possible pt values for partial starts and ends
+ SkPath* fPathPtr; // closed contours are written here
+ const SkOpPtT* fDefer[2]; // [0] deferred move, [1] deferred line
+ const SkOpPtT* fFirstPtT; // first in current contour
+};
+
+#endif /* defined(__PathOps__SkPathWriter__) */
diff --git a/gfx/skia/skia/src/pathops/SkReduceOrder.cpp b/gfx/skia/skia/src/pathops/SkReduceOrder.cpp
new file mode 100644
index 0000000000..fbde6be9c6
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkReduceOrder.cpp
@@ -0,0 +1,290 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/pathops/SkReduceOrder.h"
+
+#include "include/core/SkPoint.h"
+#include "src/core/SkGeometry.h"
+#include "src/pathops/SkPathOpsPoint.h"
+#include "src/pathops/SkPathOpsTypes.h"
+
+#include <algorithm>
+#include <cmath>
+
+int SkReduceOrder::reduce(const SkDLine& line) {
+ fLine[0] = line[0];
+ int different = line[0] != line[1];
+ fLine[1] = line[different];
+ return 1 + different;
+}
+
+static int coincident_line(const SkDQuad& quad, SkDQuad& reduction) {
+ reduction[0] = reduction[1] = quad[0];
+ return 1;
+}
+
+static int reductionLineCount(const SkDQuad& reduction) {
+ return 1 + !reduction[0].approximatelyEqual(reduction[1]);
+}
+
+static int vertical_line(const SkDQuad& quad, SkDQuad& reduction) {
+ reduction[0] = quad[0];
+ reduction[1] = quad[2];
+ return reductionLineCount(reduction);
+}
+
+static int horizontal_line(const SkDQuad& quad, SkDQuad& reduction) {
+ reduction[0] = quad[0];
+ reduction[1] = quad[2];
+ return reductionLineCount(reduction);
+}
+
+static int check_linear(const SkDQuad& quad,
+ int minX, int maxX, int minY, int maxY, SkDQuad& reduction) {
+ if (!quad.isLinear(0, 2)) {
+ return 0;
+ }
+ // four are colinear: return line formed by outside
+ reduction[0] = quad[0];
+ reduction[1] = quad[2];
+ return reductionLineCount(reduction);
+}
+
+// reduce to a quadratic or smaller
+// look for identical points
+// look for all four points in a line
+ // note that three points in a line doesn't simplify a cubic
+// look for approximation with single quadratic
+ // save approximation with multiple quadratics for later
+int SkReduceOrder::reduce(const SkDQuad& quad) {
+ int index, minX, maxX, minY, maxY;
+ int minXSet, minYSet;
+ minX = maxX = minY = maxY = 0;
+ minXSet = minYSet = 0;
+ for (index = 1; index < 3; ++index) {
+ if (quad[minX].fX > quad[index].fX) {
+ minX = index;
+ }
+ if (quad[minY].fY > quad[index].fY) {
+ minY = index;
+ }
+ if (quad[maxX].fX < quad[index].fX) {
+ maxX = index;
+ }
+ if (quad[maxY].fY < quad[index].fY) {
+ maxY = index;
+ }
+ }
+ for (index = 0; index < 3; ++index) {
+ if (AlmostEqualUlps(quad[index].fX, quad[minX].fX)) {
+ minXSet |= 1 << index;
+ }
+ if (AlmostEqualUlps(quad[index].fY, quad[minY].fY)) {
+ minYSet |= 1 << index;
+ }
+ }
+ if ((minXSet & 0x05) == 0x5 && (minYSet & 0x05) == 0x5) { // test for degenerate
+ // this quad starts and ends at the same place, so never contributes
+ // to the fill
+ return coincident_line(quad, fQuad);
+ }
+ if (minXSet == 0x7) { // test for vertical line
+ return vertical_line(quad, fQuad);
+ }
+ if (minYSet == 0x7) { // test for horizontal line
+ return horizontal_line(quad, fQuad);
+ }
+ int result = check_linear(quad, minX, maxX, minY, maxY, fQuad);
+ if (result) {
+ return result;
+ }
+ fQuad = quad;
+ return 3;
+}
+
+////////////////////////////////////////////////////////////////////////////////////
+
+static int coincident_line(const SkDCubic& cubic, SkDCubic& reduction) {
+ reduction[0] = reduction[1] = cubic[0];
+ return 1;
+}
+
+static int reductionLineCount(const SkDCubic& reduction) {
+ return 1 + !reduction[0].approximatelyEqual(reduction[1]);
+}
+
+static int vertical_line(const SkDCubic& cubic, SkDCubic& reduction) {
+ reduction[0] = cubic[0];
+ reduction[1] = cubic[3];
+ return reductionLineCount(reduction);
+}
+
+static int horizontal_line(const SkDCubic& cubic, SkDCubic& reduction) {
+ reduction[0] = cubic[0];
+ reduction[1] = cubic[3];
+ return reductionLineCount(reduction);
+}
+
+// check to see if it is a quadratic or a line
+static int check_quadratic(const SkDCubic& cubic, SkDCubic& reduction) {
+ double dx10 = cubic[1].fX - cubic[0].fX;
+ double dx23 = cubic[2].fX - cubic[3].fX;
+ double midX = cubic[0].fX + dx10 * 3 / 2;
+ double sideAx = midX - cubic[3].fX;
+ double sideBx = dx23 * 3 / 2;
+ if (approximately_zero(sideAx) ? !approximately_equal(sideAx, sideBx)
+ : !AlmostEqualUlps_Pin(sideAx, sideBx)) {
+ return 0;
+ }
+ double dy10 = cubic[1].fY - cubic[0].fY;
+ double dy23 = cubic[2].fY - cubic[3].fY;
+ double midY = cubic[0].fY + dy10 * 3 / 2;
+ double sideAy = midY - cubic[3].fY;
+ double sideBy = dy23 * 3 / 2;
+ if (approximately_zero(sideAy) ? !approximately_equal(sideAy, sideBy)
+ : !AlmostEqualUlps_Pin(sideAy, sideBy)) {
+ return 0;
+ }
+ reduction[0] = cubic[0];
+ reduction[1].fX = midX;
+ reduction[1].fY = midY;
+ reduction[2] = cubic[3];
+ return 3;
+}
+
+static int check_linear(const SkDCubic& cubic,
+ int minX, int maxX, int minY, int maxY, SkDCubic& reduction) {
+ if (!cubic.isLinear(0, 3)) {
+ return 0;
+ }
+ // four are colinear: return line formed by outside
+ reduction[0] = cubic[0];
+ reduction[1] = cubic[3];
+ return reductionLineCount(reduction);
+}
+
+/* food for thought:
+http://objectmix.com/graphics/132906-fast-precision-driven-cubic-quadratic-piecewise-degree-reduction-algos-2-a.html
+
+Given points c1, c2, c3 and c4 of a cubic Bezier, the points of the
+corresponding quadratic Bezier are (given in convex combinations of
+points):
+
+q1 = (11/13)c1 + (3/13)c2 -(3/13)c3 + (2/13)c4
+q2 = -c1 + (3/2)c2 + (3/2)c3 - c4
+q3 = (2/13)c1 - (3/13)c2 + (3/13)c3 + (11/13)c4
+
+Of course, this curve does not interpolate the end-points, but it would
+be interesting to see the behaviour of such a curve in an applet.
+
+--
+Kalle Rutanen
+http://kaba.hilvi.org
+
+*/
+
+// reduce to a quadratic or smaller
+// look for identical points
+// look for all four points in a line
+ // note that three points in a line doesn't simplify a cubic
+// look for approximation with single quadratic
+ // save approximation with multiple quadratics for later
+int SkReduceOrder::reduce(const SkDCubic& cubic, Quadratics allowQuadratics) {
+ int index, minX, maxX, minY, maxY;
+ int minXSet, minYSet;
+ minX = maxX = minY = maxY = 0;
+ minXSet = minYSet = 0;
+ for (index = 1; index < 4; ++index) {
+ if (cubic[minX].fX > cubic[index].fX) {
+ minX = index;
+ }
+ if (cubic[minY].fY > cubic[index].fY) {
+ minY = index;
+ }
+ if (cubic[maxX].fX < cubic[index].fX) {
+ maxX = index;
+ }
+ if (cubic[maxY].fY < cubic[index].fY) {
+ maxY = index;
+ }
+ }
+ for (index = 0; index < 4; ++index) {
+ double cx = cubic[index].fX;
+ double cy = cubic[index].fY;
+ double denom = std::max(fabs(cx), std::max(fabs(cy),
+ std::max(fabs(cubic[minX].fX), fabs(cubic[minY].fY))));
+ if (denom == 0) {
+ minXSet |= 1 << index;
+ minYSet |= 1 << index;
+ continue;
+ }
+ double inv = 1 / denom;
+ if (approximately_equal_half(cx * inv, cubic[minX].fX * inv)) {
+ minXSet |= 1 << index;
+ }
+ if (approximately_equal_half(cy * inv, cubic[minY].fY * inv)) {
+ minYSet |= 1 << index;
+ }
+ }
+ if (minXSet == 0xF) { // test for vertical line
+ if (minYSet == 0xF) { // return 1 if all four are coincident
+ return coincident_line(cubic, fCubic);
+ }
+ return vertical_line(cubic, fCubic);
+ }
+ if (minYSet == 0xF) { // test for horizontal line
+ return horizontal_line(cubic, fCubic);
+ }
+ int result = check_linear(cubic, minX, maxX, minY, maxY, fCubic);
+ if (result) {
+ return result;
+ }
+ if (allowQuadratics == SkReduceOrder::kAllow_Quadratics
+ && (result = check_quadratic(cubic, fCubic))) {
+ return result;
+ }
+ fCubic = cubic;
+ return 4;
+}
+
+SkPath::Verb SkReduceOrder::Quad(const SkPoint a[3], SkPoint* reducePts) {
+ SkDQuad quad;
+ quad.set(a);
+ SkReduceOrder reducer;
+ int order = reducer.reduce(quad);
+ if (order == 2) { // quad became line
+ for (int index = 0; index < order; ++index) {
+ *reducePts++ = reducer.fLine[index].asSkPoint();
+ }
+ }
+ return SkPathOpsPointsToVerb(order - 1);
+}
+
+SkPath::Verb SkReduceOrder::Conic(const SkConic& c, SkPoint* reducePts) {
+ SkPath::Verb verb = SkReduceOrder::Quad(c.fPts, reducePts);
+ if (verb > SkPath::kLine_Verb && c.fW == 1) {
+ return SkPath::kQuad_Verb;
+ }
+ return verb == SkPath::kQuad_Verb ? SkPath::kConic_Verb : verb;
+}
+
+SkPath::Verb SkReduceOrder::Cubic(const SkPoint a[4], SkPoint* reducePts) {
+ if (SkDPoint::ApproximatelyEqual(a[0], a[1]) && SkDPoint::ApproximatelyEqual(a[0], a[2])
+ && SkDPoint::ApproximatelyEqual(a[0], a[3])) {
+ reducePts[0] = a[0];
+ return SkPath::kMove_Verb;
+ }
+ SkDCubic cubic;
+ cubic.set(a);
+ SkReduceOrder reducer;
+ int order = reducer.reduce(cubic, kAllow_Quadratics);
+ if (order == 2 || order == 3) { // cubic became line or quad
+ for (int index = 0; index < order; ++index) {
+ *reducePts++ = reducer.fQuad[index].asSkPoint();
+ }
+ }
+ return SkPathOpsPointsToVerb(order - 1);
+}
diff --git a/gfx/skia/skia/src/pathops/SkReduceOrder.h b/gfx/skia/skia/src/pathops/SkReduceOrder.h
new file mode 100644
index 0000000000..17acc8d78e
--- /dev/null
+++ b/gfx/skia/skia/src/pathops/SkReduceOrder.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkReduceOrder_DEFINED
+#define SkReduceOrder_DEFINED
+
+#include "include/core/SkPath.h"
+#include "src/pathops/SkPathOpsCubic.h"
+#include "src/pathops/SkPathOpsLine.h"
+#include "src/pathops/SkPathOpsQuad.h"
+
+struct SkConic;
+struct SkPoint;
+
+union SkReduceOrder {
+ enum Quadratics {
+ kNo_Quadratics,
+ kAllow_Quadratics
+ };
+
+ int reduce(const SkDCubic& cubic, Quadratics);
+ int reduce(const SkDLine& line);
+ int reduce(const SkDQuad& quad);
+
+ static SkPath::Verb Conic(const SkConic& conic, SkPoint* reducePts);
+ static SkPath::Verb Cubic(const SkPoint pts[4], SkPoint* reducePts);
+ static SkPath::Verb Quad(const SkPoint pts[3], SkPoint* reducePts);
+
+ SkDLine fLine;
+ SkDQuad fQuad;
+ SkDCubic fCubic;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkBitmapKey.h b/gfx/skia/skia/src/pdf/SkBitmapKey.h
new file mode 100644
index 0000000000..72df0c7abe
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkBitmapKey.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkBitmapKey_DEFINED
+#define SkBitmapKey_DEFINED
+
+#include "include/core/SkRect.h"
+
+struct SkBitmapKey {
+ SkIRect fSubset;
+ uint32_t fID;
+ bool operator==(const SkBitmapKey& rhs) const {
+ return fID == rhs.fID && fSubset == rhs.fSubset;
+ }
+ bool operator!=(const SkBitmapKey& rhs) const { return !(*this == rhs); }
+};
+
+
+#endif // SkBitmapKey_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkClusterator.cpp b/gfx/skia/skia/src/pdf/SkClusterator.cpp
new file mode 100644
index 0000000000..5eaed752da
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkClusterator.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkClusterator.h"
+
+#include "include/private/base/SkTo.h"
+#include "src/base/SkUTF.h"
+#include "src/text/GlyphRun.h"
+
+static bool is_reversed(const uint32_t* clusters, uint32_t count) {
+ // "ReversedChars" is how PDF deals with RTL text.
+ // return true if more than one cluster and monotonicly decreasing to zero.
+ if (count < 2 || clusters[0] == 0 || clusters[count - 1] != 0) {
+ return false;
+ }
+ for (uint32_t i = 0; i + 1 < count; ++i) {
+ if (clusters[i + 1] > clusters[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+SkClusterator::SkClusterator(const sktext::GlyphRun& run)
+ : fClusters(run.clusters().data())
+ , fUtf8Text(run.text().data())
+ , fGlyphCount(SkToU32(run.glyphsIDs().size()))
+ , fTextByteLength(SkToU32(run.text().size()))
+ , fReversedChars(fClusters ? is_reversed(fClusters, fGlyphCount) : false)
+{
+ if (fClusters) {
+ SkASSERT(fUtf8Text && fTextByteLength > 0 && fGlyphCount > 0);
+ } else {
+ SkASSERT(!fUtf8Text && fTextByteLength == 0);
+ }
+}
+
+SkClusterator::Cluster SkClusterator::next() {
+ if (fCurrentGlyphIndex >= fGlyphCount) {
+ return Cluster{nullptr, 0, 0, 0};
+ }
+ if (!fClusters || !fUtf8Text) {
+ return Cluster{nullptr, 0, fCurrentGlyphIndex++, 1};
+ }
+ uint32_t clusterGlyphIndex = fCurrentGlyphIndex;
+ uint32_t cluster = fClusters[clusterGlyphIndex];
+ do {
+ ++fCurrentGlyphIndex;
+ } while (fCurrentGlyphIndex < fGlyphCount && cluster == fClusters[fCurrentGlyphIndex]);
+ uint32_t clusterGlyphCount = fCurrentGlyphIndex - clusterGlyphIndex;
+ uint32_t clusterEnd = fTextByteLength;
+ for (unsigned i = 0; i < fGlyphCount; ++i) {
+ uint32_t c = fClusters[i];
+ if (c > cluster && c < clusterEnd) {
+ clusterEnd = c;
+ }
+ }
+ uint32_t clusterLen = clusterEnd - cluster;
+ return Cluster{fUtf8Text + cluster, clusterLen, clusterGlyphIndex, clusterGlyphCount};
+}
diff --git a/gfx/skia/skia/src/pdf/SkClusterator.h b/gfx/skia/skia/src/pdf/SkClusterator.h
new file mode 100644
index 0000000000..86fd6cdfdf
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkClusterator.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkClusterator_DEFINED
+#define SkClusterator_DEFINED
+
+#include <cstdint>
+#include <vector>
+
+namespace sktext {
+class GlyphRun;
+}
+
+/** Given the m-to-n glyph-to-character mapping data (as returned by
+ harfbuzz), iterate over the clusters. */
+class SkClusterator {
+public:
+ SkClusterator(const sktext::GlyphRun& run);
+ uint32_t glyphCount() const { return fGlyphCount; }
+ bool reversedChars() const { return fReversedChars; }
+ struct Cluster {
+ const char* fUtf8Text;
+ uint32_t fTextByteLength;
+ uint32_t fGlyphIndex;
+ uint32_t fGlyphCount;
+ explicit operator bool() const { return fGlyphCount != 0; }
+ bool operator==(const SkClusterator::Cluster& o) {
+ return fUtf8Text == o.fUtf8Text
+ && fTextByteLength == o.fTextByteLength
+ && fGlyphIndex == o.fGlyphIndex
+ && fGlyphCount == o.fGlyphCount;
+ }
+ };
+ Cluster next();
+
+private:
+ uint32_t const * const fClusters;
+ char const * const fUtf8Text;
+ uint32_t const fGlyphCount;
+ uint32_t const fTextByteLength;
+ bool const fReversedChars;
+ uint32_t fCurrentGlyphIndex = 0;
+};
+#endif // SkClusterator_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkDeflate.cpp b/gfx/skia/skia/src/pdf/SkDeflate.cpp
new file mode 100644
index 0000000000..f044c140fa
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkDeflate.cpp
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkDeflate.h"
+
+#include "include/core/SkData.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkTraceEvent.h"
+
+#include "zlib.h"
+
+#include <algorithm>
+
+namespace {
+
+// Different zlib implementations use different T.
+// We've seen size_t and unsigned.
+template <typename T> void* skia_alloc_func(void*, T items, T size) {
+ return sk_calloc_throw(SkToSizeT(items) * SkToSizeT(size));
+}
+
+void skia_free_func(void*, void* address) { sk_free(address); }
+
+} // namespace
+
+#define SKDEFLATEWSTREAM_INPUT_BUFFER_SIZE 4096
+#define SKDEFLATEWSTREAM_OUTPUT_BUFFER_SIZE 4224 // 4096 + 128, usually big
+ // enough to always do a
+ // single loop.
+
+// called by both write() and finalize()
+static void do_deflate(int flush,
+ z_stream* zStream,
+ SkWStream* out,
+ unsigned char* inBuffer,
+ size_t inBufferSize) {
+ zStream->next_in = inBuffer;
+ zStream->avail_in = SkToInt(inBufferSize);
+ unsigned char outBuffer[SKDEFLATEWSTREAM_OUTPUT_BUFFER_SIZE];
+ SkDEBUGCODE(int returnValue;)
+ do {
+ zStream->next_out = outBuffer;
+ zStream->avail_out = sizeof(outBuffer);
+ SkDEBUGCODE(returnValue =) deflate(zStream, flush);
+ SkASSERT(!zStream->msg);
+
+ out->write(outBuffer, sizeof(outBuffer) - zStream->avail_out);
+ } while (zStream->avail_in || !zStream->avail_out);
+ SkASSERT(flush == Z_FINISH
+ ? returnValue == Z_STREAM_END
+ : returnValue == Z_OK);
+}
+
+// Hide all zlib impl details.
+struct SkDeflateWStream::Impl {
+ SkWStream* fOut;
+ unsigned char fInBuffer[SKDEFLATEWSTREAM_INPUT_BUFFER_SIZE];
+ size_t fInBufferIndex;
+ z_stream fZStream;
+};
+
+SkDeflateWStream::SkDeflateWStream(SkWStream* out,
+ int compressionLevel,
+ bool gzip)
+ : fImpl(std::make_unique<SkDeflateWStream::Impl>()) {
+
+ // There has existed at some point at least one zlib implementation which thought it was being
+ // clever by randomizing the compression level. This is actually not entirely incorrect, except
+ // for the no-compression level which should always be deterministically pass-through.
+ // Users should instead consider the zero compression level broken and handle it themselves.
+ SkASSERT(compressionLevel != 0);
+
+ fImpl->fOut = out;
+ fImpl->fInBufferIndex = 0;
+ if (!fImpl->fOut) {
+ return;
+ }
+ fImpl->fZStream.next_in = nullptr;
+ fImpl->fZStream.zalloc = &skia_alloc_func;
+ fImpl->fZStream.zfree = &skia_free_func;
+ fImpl->fZStream.opaque = nullptr;
+ SkASSERT(compressionLevel <= 9 && compressionLevel >= -1);
+ SkDEBUGCODE(int r =) deflateInit2(&fImpl->fZStream, compressionLevel,
+ Z_DEFLATED, gzip ? 0x1F : 0x0F,
+ 8, Z_DEFAULT_STRATEGY);
+ SkASSERT(Z_OK == r);
+}
+
+SkDeflateWStream::~SkDeflateWStream() { this->finalize(); }
+
+void SkDeflateWStream::finalize() {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (!fImpl->fOut) {
+ return;
+ }
+ do_deflate(Z_FINISH, &fImpl->fZStream, fImpl->fOut, fImpl->fInBuffer,
+ fImpl->fInBufferIndex);
+ (void)deflateEnd(&fImpl->fZStream);
+ fImpl->fOut = nullptr;
+}
+
+bool SkDeflateWStream::write(const void* void_buffer, size_t len) {
+ TRACE_EVENT0("skia", TRACE_FUNC);
+ if (!fImpl->fOut) {
+ return false;
+ }
+ const char* buffer = (const char*)void_buffer;
+ while (len > 0) {
+ size_t tocopy =
+ std::min(len, sizeof(fImpl->fInBuffer) - fImpl->fInBufferIndex);
+ memcpy(fImpl->fInBuffer + fImpl->fInBufferIndex, buffer, tocopy);
+ len -= tocopy;
+ buffer += tocopy;
+ fImpl->fInBufferIndex += tocopy;
+ SkASSERT(fImpl->fInBufferIndex <= sizeof(fImpl->fInBuffer));
+
+ // if the buffer isn't filled, don't call into zlib yet.
+ if (sizeof(fImpl->fInBuffer) == fImpl->fInBufferIndex) {
+ do_deflate(Z_NO_FLUSH, &fImpl->fZStream, fImpl->fOut,
+ fImpl->fInBuffer, fImpl->fInBufferIndex);
+ fImpl->fInBufferIndex = 0;
+ }
+ }
+ return true;
+}
+
+size_t SkDeflateWStream::bytesWritten() const {
+ return fImpl->fZStream.total_in + fImpl->fInBufferIndex;
+}
diff --git a/gfx/skia/skia/src/pdf/SkDeflate.h b/gfx/skia/skia/src/pdf/SkDeflate.h
new file mode 100644
index 0000000000..fdffd01380
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkDeflate.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkFlate_DEFINED
+#define SkFlate_DEFINED
+
+#include "include/core/SkStream.h"
+
+/**
+ * Wrap a stream in this class to compress the information written to
+ * this stream using the Deflate algorithm.
+ *
+ * See http://en.wikipedia.org/wiki/DEFLATE
+ */
+class SkDeflateWStream final : public SkWStream {
+public:
+ /** Does not take ownership of the stream.
+
+ @param compressionLevel 1 is best speed; 9 is best compression.
+ The default, -1, is to use zlib's Z_DEFAULT_COMPRESSION level.
+ 0 would be no compression, but due to broken zlibs, users should handle that themselves.
+
+ @param gzip iff true, output a gzip file. "The gzip format is
+ a wrapper, documented in RFC 1952, around a deflate stream."
+ gzip adds a header with a magic number to the beginning of the
+ stream, allowing a client to identify a gzip file.
+ */
+ SkDeflateWStream(SkWStream*,
+ int compressionLevel,
+ bool gzip = false);
+
+ /** The destructor calls finalize(). */
+ ~SkDeflateWStream() override;
+
+ /** Write the end of the compressed stream. All subsequent calls to
+ write() will fail. Subsequent calls to finalize() do nothing. */
+ void finalize();
+
+ // The SkWStream interface:
+ bool write(const void*, size_t) override;
+ size_t bytesWritten() const override;
+
+private:
+ struct Impl;
+ std::unique_ptr<Impl> fImpl;
+};
+
+#endif // SkFlate_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkDocument_PDF_None.cpp b/gfx/skia/skia/src/pdf/SkDocument_PDF_None.cpp
new file mode 100644
index 0000000000..9593a5b52c
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkDocument_PDF_None.cpp
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkData.h"
+#include "include/docs/SkPDFDocument.h"
+
+class SkPDFArray {};
+
+sk_sp<SkDocument> SkPDF::MakeDocument(SkWStream*, const SkPDF::Metadata&) { return nullptr; }
+
+void SkPDF::SetNodeId(SkCanvas* c, int n) {
+ c->drawAnnotation({0, 0, 0, 0}, "PDF_Node_Key", SkData::MakeWithCopy(&n, sizeof(n)).get());
+}
+
+SkPDF::AttributeList::AttributeList() = default;
+
+SkPDF::AttributeList::~AttributeList() = default;
diff --git a/gfx/skia/skia/src/pdf/SkJpegInfo.cpp b/gfx/skia/skia/src/pdf/SkJpegInfo.cpp
new file mode 100644
index 0000000000..b0c72d011c
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkJpegInfo.cpp
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkJpegInfo.h"
+
+#include "include/private/base/SkTo.h"
+
+#ifndef SK_CODEC_DECODES_JPEG
+
+namespace {
+class JpegSegment {
+public:
+ JpegSegment(const void* data, size_t size)
+ : fData(static_cast<const char*>(data))
+ , fSize(size)
+ , fOffset(0)
+ , fLength(0) {}
+ bool read() {
+ if (!this->readBigendianUint16(&fMarker)) {
+ return false;
+ }
+ if (JpegSegment::StandAloneMarker(fMarker)) {
+ fLength = 0;
+ fBuffer = nullptr;
+ return true;
+ }
+ if (!this->readBigendianUint16(&fLength) || fLength < 2) {
+ return false;
+ }
+ fLength -= 2; // Length includes itself for some reason.
+ if (fOffset + fLength > fSize) {
+ return false; // Segment too long.
+ }
+ fBuffer = &fData[fOffset];
+ fOffset += fLength;
+ return true;
+ }
+
+ bool isSOF() {
+ return (fMarker & 0xFFF0) == 0xFFC0 && fMarker != 0xFFC4 &&
+ fMarker != 0xFFC8 && fMarker != 0xFFCC;
+ }
+ uint16_t marker() { return fMarker; }
+ uint16_t length() { return fLength; }
+ const char* data() { return fBuffer; }
+
+ static uint16_t GetBigendianUint16(const char* ptr) {
+ // "the most significant byte shall come first"
+ return (static_cast<uint8_t>(ptr[0]) << 8) |
+ static_cast<uint8_t>(ptr[1]);
+ }
+
+private:
+ const char* const fData;
+ const size_t fSize;
+ size_t fOffset;
+ const char* fBuffer;
+ uint16_t fMarker;
+ uint16_t fLength;
+
+ bool readBigendianUint16(uint16_t* value) {
+ if (fOffset + 2 > fSize) {
+ return false;
+ }
+ *value = JpegSegment::GetBigendianUint16(&fData[fOffset]);
+ fOffset += 2;
+ return true;
+ }
+ static bool StandAloneMarker(uint16_t marker) {
+ // RST[m] markers or SOI, EOI, TEM
+ return (marker & 0xFFF8) == 0xFFD0 || marker == 0xFFD8 ||
+ marker == 0xFFD9 || marker == 0xFF01;
+ }
+};
+} // namespace
+
+bool SkGetJpegInfo(const void* data, size_t len,
+ SkISize* size,
+ SkEncodedInfo::Color* colorType,
+ SkEncodedOrigin* orientation) {
+ static const uint16_t kSOI = 0xFFD8;
+ static const uint16_t kAPP0 = 0xFFE0;
+ JpegSegment segment(data, len);
+ if (!segment.read() || segment.marker() != kSOI) {
+ return false; // not a JPEG
+ }
+ if (!segment.read() || segment.marker() != kAPP0) {
+ return false; // not an APP0 segment
+ }
+ static const char kJfif[] = {'J', 'F', 'I', 'F', '\0'};
+ SkASSERT(segment.data());
+ if (SkToSizeT(segment.length()) < sizeof(kJfif) ||
+ 0 != memcmp(segment.data(), kJfif, sizeof(kJfif))) {
+ return false; // Not JFIF JPEG
+ }
+ do {
+ if (!segment.read()) {
+ return false; // malformed JPEG
+ }
+ } while (!segment.isSOF());
+ if (segment.length() < 6) {
+ return false; // SOF segment is short
+ }
+ if (8 != segment.data()[0]) {
+ return false; // Only support 8-bit precision
+ }
+ int numberOfComponents = segment.data()[5];
+ if (numberOfComponents != 1 && numberOfComponents != 3) {
+ return false; // Invalid JFIF
+ }
+ if (size) {
+ *size = {JpegSegment::GetBigendianUint16(&segment.data()[3]),
+ JpegSegment::GetBigendianUint16(&segment.data()[1])};
+ }
+ if (colorType) {
+ *colorType = numberOfComponents == 3 ? SkEncodedInfo::kYUV_Color
+ : SkEncodedInfo::kGray_Color;
+ }
+ if (orientation) {
+ *orientation = kTopLeft_SkEncodedOrigin;
+ }
+ return true;
+}
+#endif // SK_CODEC_DECODES_JPEG
diff --git a/gfx/skia/skia/src/pdf/SkJpegInfo.h b/gfx/skia/skia/src/pdf/SkJpegInfo.h
new file mode 100644
index 0000000000..82a8a736fd
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkJpegInfo.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkJpegInfo_DEFINED
+#define SkJpegInfo_DEFINED
+
+#include "include/codec/SkEncodedOrigin.h"
+#include "include/core/SkSize.h"
+#include "include/private/SkEncodedInfo.h"
+
+/** Returns true if the data seems to be a valid JPEG image with a known colorType.
+
+ @param [out] size Image size in pixels
+ @param [out] colorType Encoded color type (kGray_Color, kYUV_Color, several others).
+ @param [out] orientation EXIF Orientation of the image.
+*/
+bool SkGetJpegInfo(const void* data, size_t len,
+ SkISize* size,
+ SkEncodedInfo::Color* colorType,
+ SkEncodedOrigin* orientation);
+
+#endif // SkJpegInfo_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkKeyedImage.cpp b/gfx/skia/skia/src/pdf/SkKeyedImage.cpp
new file mode 100644
index 0000000000..7b733d072a
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkKeyedImage.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkKeyedImage.h"
+
+#include "src/image/SkImage_Base.h"
+
+SkBitmapKey SkBitmapKeyFromImage(const SkImage* image) {
+ if (!image) {
+ return {{0, 0, 0, 0}, 0};
+ }
+ if (const SkBitmap* bm = as_IB(image)->onPeekBitmap()) {
+ SkIPoint o = bm->pixelRefOrigin();
+ return {image->bounds().makeOffset(o), bm->getGenerationID()};
+ }
+ return {image->bounds(), image->uniqueID()};
+}
+
+SkKeyedImage::SkKeyedImage(sk_sp<SkImage> i) : fImage(std::move(i)) {
+ fKey = SkBitmapKeyFromImage(fImage.get());
+}
+
+SkKeyedImage::SkKeyedImage(const SkBitmap& bm) : fImage(bm.asImage()) {
+ if (fImage) {
+ fKey = {bm.getSubset(), bm.getGenerationID()};
+ }
+}
+
+SkKeyedImage SkKeyedImage::subset(SkIRect subset) const {
+ SkKeyedImage img;
+ if (fImage && subset.intersect(fImage->bounds())) {
+ img.fImage = fImage->makeSubset(subset);
+ if (img.fImage) {
+ img.fKey = {subset.makeOffset(fKey.fSubset.topLeft()), fKey.fID};
+ }
+ }
+ return img;
+}
+
+sk_sp<SkImage> SkKeyedImage::release() {
+ sk_sp<SkImage> image = std::move(fImage);
+ SkASSERT(nullptr == fImage);
+ fKey = {{0, 0, 0, 0}, 0};
+ return image;
+}
diff --git a/gfx/skia/skia/src/pdf/SkKeyedImage.h b/gfx/skia/skia/src/pdf/SkKeyedImage.h
new file mode 100644
index 0000000000..db7b09d7b6
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkKeyedImage.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkKeyedImage_DEFINED
+#define SkKeyedImage_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkImage.h"
+#include "src/pdf/SkBitmapKey.h"
+
+/**
+ This class has all the advantages of SkBitmaps and SkImages.
+
+ The SkImage holds on to encoded data. The SkBitmapKey properly de-dups subsets.
+ */
+class SkKeyedImage {
+public:
+ SkKeyedImage() {}
+ SkKeyedImage(sk_sp<SkImage>);
+ SkKeyedImage(const SkBitmap&);
+ SkKeyedImage(SkKeyedImage&&) = default;
+ SkKeyedImage(const SkKeyedImage&) = default;
+
+ SkKeyedImage& operator=(SkKeyedImage&&) = default;
+ SkKeyedImage& operator=(const SkKeyedImage&) = default;
+
+ explicit operator bool() const { return fImage != nullptr; }
+ const SkBitmapKey& key() const { return fKey; }
+ const sk_sp<SkImage>& image() const { return fImage; }
+ sk_sp<SkImage> release();
+ SkKeyedImage subset(SkIRect subset) const;
+
+private:
+ sk_sp<SkImage> fImage;
+ SkBitmapKey fKey = {{0, 0, 0, 0}, 0};
+};
+
+/**
+ * Given an Image, return the Bitmap Key that corresponds to it. If the Image
+ * wraps a Bitmap, use that Bitmap's key.
+ */
+SkBitmapKey SkBitmapKeyFromImage(const SkImage*);
+#endif // SkKeyedImage_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFBitmap.cpp b/gfx/skia/skia/src/pdf/SkPDFBitmap.cpp
new file mode 100644
index 0000000000..888c09729e
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFBitmap.cpp
@@ -0,0 +1,329 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkPDFBitmap.h"
+
+#include "include/codec/SkEncodedImageFormat.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkData.h"
+#include "include/core/SkExecutor.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkStream.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkImageInfoPriv.h"
+#include "src/pdf/SkDeflate.h"
+#include "src/pdf/SkJpegInfo.h"
+#include "src/pdf/SkPDFDocumentPriv.h"
+#include "src/pdf/SkPDFTypes.h"
+#include "src/pdf/SkPDFUtils.h"
+
+////////////////////////////////////////////////////////////////////////////////
+
+// write a single byte to a stream n times.
+static void fill_stream(SkWStream* out, char value, size_t n) {
+ char buffer[4096];
+ memset(buffer, value, sizeof(buffer));
+ for (size_t i = 0; i < n / sizeof(buffer); ++i) {
+ out->write(buffer, sizeof(buffer));
+ }
+ out->write(buffer, n % sizeof(buffer));
+}
+
+/* It is necessary to average the color component of transparent
+ pixels with their surrounding neighbors since the PDF renderer may
+ separately re-sample the alpha and color channels when the image is
+ not displayed at its native resolution. Since an alpha of zero
+ gives no information about the color component, the pathological
+ case is a white image with sharp transparency bounds - the color
+ channel goes to black, and the should-be-transparent pixels are
+ rendered as grey because of the separate soft mask and color
+ resizing. e.g.: gm/bitmappremul.cpp */
+static SkColor get_neighbor_avg_color(const SkPixmap& bm, int xOrig, int yOrig) {
+ SkASSERT(kBGRA_8888_SkColorType == bm.colorType());
+ unsigned r = 0, g = 0, b = 0, n = 0;
+ // Clamp the range to the edge of the bitmap.
+ int ymin = std::max(0, yOrig - 1);
+ int ymax = std::min(yOrig + 1, bm.height() - 1);
+ int xmin = std::max(0, xOrig - 1);
+ int xmax = std::min(xOrig + 1, bm.width() - 1);
+ for (int y = ymin; y <= ymax; ++y) {
+ const SkColor* scanline = bm.addr32(0, y);
+ for (int x = xmin; x <= xmax; ++x) {
+ SkColor color = scanline[x];
+ if (color != SK_ColorTRANSPARENT) {
+ r += SkColorGetR(color);
+ g += SkColorGetG(color);
+ b += SkColorGetB(color);
+ n++;
+ }
+ }
+ }
+ return n > 0 ? SkColorSetRGB(SkToU8(r / n), SkToU8(g / n), SkToU8(b / n))
+ : SK_ColorTRANSPARENT;
+}
+
+namespace {
+enum class SkPDFStreamFormat { DCT, Flate, Uncompressed };
+}
+
+template <typename T>
+static void emit_image_stream(SkPDFDocument* doc,
+ SkPDFIndirectReference ref,
+ T writeStream,
+ SkISize size,
+ const char* colorSpace,
+ SkPDFIndirectReference sMask,
+ int length,
+ SkPDFStreamFormat format) {
+ SkPDFDict pdfDict("XObject");
+ pdfDict.insertName("Subtype", "Image");
+ pdfDict.insertInt("Width", size.width());
+ pdfDict.insertInt("Height", size.height());
+ pdfDict.insertName("ColorSpace", colorSpace);
+ if (sMask) {
+ pdfDict.insertRef("SMask", sMask);
+ }
+ pdfDict.insertInt("BitsPerComponent", 8);
+ #ifdef SK_PDF_BASE85_BINARY
+ auto filters = SkPDFMakeArray();
+ filters->appendName("ASCII85Decode");
+ switch (format) {
+ case SkPDFStreamFormat::DCT: filters->appendName("DCTDecode"); break;
+ case SkPDFStreamFormat::Flate: filters->appendName("FlateDecode"); break;
+ case SkPDFStreamFormat::Uncompressed: break;
+ }
+ pdfDict.insertObject("Filter", std::move(filters));
+ #else
+ switch (format) {
+ case SkPDFStreamFormat::DCT: pdfDict.insertName("Filter", "DCTDecode"); break;
+ case SkPDFStreamFormat::Flate: pdfDict.insertName("Filter", "FlateDecode"); break;
+ case SkPDFStreamFormat::Uncompressed: break;
+ }
+ #endif
+ if (format == SkPDFStreamFormat::DCT) {
+ pdfDict.insertInt("ColorTransform", 0);
+ }
+ pdfDict.insertInt("Length", length);
+ doc->emitStream(pdfDict, std::move(writeStream), ref);
+}
+
+static void do_deflated_alpha(const SkPixmap& pm, SkPDFDocument* doc, SkPDFIndirectReference ref) {
+ SkPDF::Metadata::CompressionLevel compressionLevel = doc->metadata().fCompressionLevel;
+ SkPDFStreamFormat format = compressionLevel == SkPDF::Metadata::CompressionLevel::None
+ ? SkPDFStreamFormat::Uncompressed
+ : SkPDFStreamFormat::Flate;
+ SkDynamicMemoryWStream buffer;
+ SkWStream* stream = &buffer;
+ std::optional<SkDeflateWStream> deflateWStream;
+ if (format == SkPDFStreamFormat::Flate) {
+ deflateWStream.emplace(&buffer, SkToInt(compressionLevel));
+ stream = &*deflateWStream;
+ }
+ if (kAlpha_8_SkColorType == pm.colorType()) {
+ SkASSERT(pm.rowBytes() == (size_t)pm.width());
+ stream->write(pm.addr8(), pm.width() * pm.height());
+ } else {
+ SkASSERT(pm.alphaType() == kUnpremul_SkAlphaType);
+ SkASSERT(pm.colorType() == kBGRA_8888_SkColorType);
+ SkASSERT(pm.rowBytes() == (size_t)pm.width() * 4);
+ const uint32_t* ptr = pm.addr32();
+ const uint32_t* stop = ptr + pm.height() * pm.width();
+
+ uint8_t byteBuffer[4092];
+ uint8_t* bufferStop = byteBuffer + std::size(byteBuffer);
+ uint8_t* dst = byteBuffer;
+ while (ptr != stop) {
+ *dst++ = 0xFF & ((*ptr++) >> SK_BGRA_A32_SHIFT);
+ if (dst == bufferStop) {
+ stream->write(byteBuffer, sizeof(byteBuffer));
+ dst = byteBuffer;
+ }
+ }
+ stream->write(byteBuffer, dst - byteBuffer);
+ }
+ if (deflateWStream) {
+ deflateWStream->finalize();
+ }
+
+ #ifdef SK_PDF_BASE85_BINARY
+ SkPDFUtils::Base85Encode(buffer.detachAsStream(), &buffer);
+ #endif
+ int length = SkToInt(buffer.bytesWritten());
+ emit_image_stream(doc, ref, [&buffer](SkWStream* stream) { buffer.writeToAndReset(stream); },
+ pm.info().dimensions(), "DeviceGray", SkPDFIndirectReference(),
+ length, format);
+}
+
+static void do_deflated_image(const SkPixmap& pm,
+ SkPDFDocument* doc,
+ bool isOpaque,
+ SkPDFIndirectReference ref) {
+ SkPDFIndirectReference sMask;
+ if (!isOpaque) {
+ sMask = doc->reserveRef();
+ }
+ SkPDF::Metadata::CompressionLevel compressionLevel = doc->metadata().fCompressionLevel;
+ SkPDFStreamFormat format = compressionLevel == SkPDF::Metadata::CompressionLevel::None
+ ? SkPDFStreamFormat::Uncompressed
+ : SkPDFStreamFormat::Flate;
+ SkDynamicMemoryWStream buffer;
+ SkWStream* stream = &buffer;
+ std::optional<SkDeflateWStream> deflateWStream;
+ if (format == SkPDFStreamFormat::Flate) {
+ deflateWStream.emplace(&buffer, SkToInt(compressionLevel));
+ stream = &*deflateWStream;
+ }
+ const char* colorSpace = "DeviceGray";
+ switch (pm.colorType()) {
+ case kAlpha_8_SkColorType:
+ fill_stream(stream, '\x00', pm.width() * pm.height());
+ break;
+ case kGray_8_SkColorType:
+ SkASSERT(sMask.fValue = -1);
+ SkASSERT(pm.rowBytes() == (size_t)pm.width());
+ stream->write(pm.addr8(), pm.width() * pm.height());
+ break;
+ default:
+ colorSpace = "DeviceRGB";
+ SkASSERT(pm.alphaType() == kUnpremul_SkAlphaType);
+ SkASSERT(pm.colorType() == kBGRA_8888_SkColorType);
+ SkASSERT(pm.rowBytes() == (size_t)pm.width() * 4);
+ uint8_t byteBuffer[3072];
+ static_assert(std::size(byteBuffer) % 3 == 0, "");
+ uint8_t* bufferStop = byteBuffer + std::size(byteBuffer);
+ uint8_t* dst = byteBuffer;
+ for (int y = 0; y < pm.height(); ++y) {
+ const SkColor* src = pm.addr32(0, y);
+ for (int x = 0; x < pm.width(); ++x) {
+ SkColor color = *src++;
+ if (SkColorGetA(color) == SK_AlphaTRANSPARENT) {
+ color = get_neighbor_avg_color(pm, x, y);
+ }
+ *dst++ = SkColorGetR(color);
+ *dst++ = SkColorGetG(color);
+ *dst++ = SkColorGetB(color);
+ if (dst == bufferStop) {
+ stream->write(byteBuffer, sizeof(byteBuffer));
+ dst = byteBuffer;
+ }
+ }
+ }
+ stream->write(byteBuffer, dst - byteBuffer);
+ }
+ if (deflateWStream) {
+ deflateWStream->finalize();
+ }
+ #ifdef SK_PDF_BASE85_BINARY
+ SkPDFUtils::Base85Encode(buffer.detachAsStream(), &buffer);
+ #endif
+ int length = SkToInt(buffer.bytesWritten());
+ emit_image_stream(doc, ref, [&buffer](SkWStream* stream) { buffer.writeToAndReset(stream); },
+ pm.info().dimensions(), colorSpace, sMask, length, format);
+ if (!isOpaque) {
+ do_deflated_alpha(pm, doc, sMask);
+ }
+}
+
+static bool do_jpeg(sk_sp<SkData> data, SkPDFDocument* doc, SkISize size,
+ SkPDFIndirectReference ref) {
+ SkISize jpegSize;
+ SkEncodedInfo::Color jpegColorType;
+ SkEncodedOrigin exifOrientation;
+ if (!SkGetJpegInfo(data->data(), data->size(), &jpegSize,
+ &jpegColorType, &exifOrientation)) {
+ return false;
+ }
+ bool yuv = jpegColorType == SkEncodedInfo::kYUV_Color;
+ bool goodColorType = yuv || jpegColorType == SkEncodedInfo::kGray_Color;
+ if (jpegSize != size // Safety check.
+ || !goodColorType
+ || kTopLeft_SkEncodedOrigin != exifOrientation) {
+ return false;
+ }
+ #ifdef SK_PDF_BASE85_BINARY
+ SkDynamicMemoryWStream buffer;
+ SkPDFUtils::Base85Encode(SkMemoryStream::MakeDirect(data->data(), data->size()), &buffer);
+ data = buffer.detachAsData();
+ #endif
+
+ emit_image_stream(doc, ref,
+ [&data](SkWStream* dst) { dst->write(data->data(), data->size()); },
+ jpegSize, yuv ? "DeviceRGB" : "DeviceGray",
+ SkPDFIndirectReference(), SkToInt(data->size()), SkPDFStreamFormat::DCT);
+ return true;
+}
+
+static SkBitmap to_pixels(const SkImage* image) {
+ SkBitmap bm;
+ int w = image->width(),
+ h = image->height();
+ switch (image->colorType()) {
+ case kAlpha_8_SkColorType:
+ bm.allocPixels(SkImageInfo::MakeA8(w, h));
+ break;
+ case kGray_8_SkColorType:
+ bm.allocPixels(SkImageInfo::Make(w, h, kGray_8_SkColorType, kOpaque_SkAlphaType));
+ break;
+ default: {
+ // TODO: makeColorSpace(sRGB) or actually tag the images
+ SkAlphaType at = bm.isOpaque() ? kOpaque_SkAlphaType : kUnpremul_SkAlphaType;
+ bm.allocPixels(SkImageInfo::Make(w, h, kBGRA_8888_SkColorType, at));
+ }
+ }
+ // TODO: support GPU images in PDFs
+ if (!image->readPixels(nullptr, bm.pixmap(), 0, 0)) {
+ bm.eraseColor(SkColorSetARGB(0xFF, 0, 0, 0));
+ }
+ return bm;
+}
+
+void serialize_image(const SkImage* img,
+ int encodingQuality,
+ SkPDFDocument* doc,
+ SkPDFIndirectReference ref) {
+ SkASSERT(img);
+ SkASSERT(doc);
+ SkASSERT(encodingQuality >= 0);
+ SkISize dimensions = img->dimensions();
+ if (sk_sp<SkData> data = img->refEncodedData()) {
+ if (do_jpeg(std::move(data), doc, dimensions, ref)) {
+ return;
+ }
+ }
+ SkBitmap bm = to_pixels(img);
+ const SkPixmap& pm = bm.pixmap();
+ bool isOpaque = pm.isOpaque() || pm.computeIsOpaque();
+ if (encodingQuality <= 100 && isOpaque) {
+ if (sk_sp<SkData> data = img->encodeToData(SkEncodedImageFormat::kJPEG, encodingQuality)) {
+ if (do_jpeg(std::move(data), doc, dimensions, ref)) {
+ return;
+ }
+ }
+ }
+ do_deflated_image(pm, doc, isOpaque, ref);
+}
+
+SkPDFIndirectReference SkPDFSerializeImage(const SkImage* img,
+ SkPDFDocument* doc,
+ int encodingQuality) {
+ SkASSERT(img);
+ SkASSERT(doc);
+ SkPDFIndirectReference ref = doc->reserveRef();
+ if (SkExecutor* executor = doc->executor()) {
+ SkRef(img);
+ doc->incrementJobCount();
+ executor->add([img, encodingQuality, doc, ref]() {
+ serialize_image(img, encodingQuality, doc, ref);
+ SkSafeUnref(img);
+ doc->signalJobComplete();
+ });
+ return ref;
+ }
+ serialize_image(img, encodingQuality, doc, ref);
+ return ref;
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFBitmap.h b/gfx/skia/skia/src/pdf/SkPDFBitmap.h
new file mode 100644
index 0000000000..bc2c57bd3b
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFBitmap.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPDFBitmap_DEFINED
+#define SkPDFBitmap_DEFINED
+
+class SkImage;
+class SkPDFDocument;
+struct SkPDFIndirectReference;
+
+/**
+ * Serialize a SkImage as an Image Xobject.
+ * quality > 100 means lossless
+ */
+SkPDFIndirectReference SkPDFSerializeImage(const SkImage* img,
+ SkPDFDocument* doc,
+ int encodingQuality = 101);
+
+#endif // SkPDFBitmap_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFDevice.cpp b/gfx/skia/skia/src/pdf/SkPDFDevice.cpp
new file mode 100644
index 0000000000..50c828ff39
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFDevice.cpp
@@ -0,0 +1,1761 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkPDFDevice.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkPathUtils.h"
+#include "include/core/SkRRect.h"
+#include "include/core/SkString.h"
+#include "include/core/SkSurface.h"
+#include "include/core/SkTextBlob.h"
+#include "include/docs/SkPDFDocument.h"
+#include "include/encode/SkJpegEncoder.h"
+#include "include/pathops/SkPathOps.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkScopeExit.h"
+#include "src/base/SkUTF.h"
+#include "src/core/SkAdvancedTypefaceMetrics.h"
+#include "src/core/SkAnnotationKeys.h"
+#include "src/core/SkBitmapDevice.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkImageFilterCache.h"
+#include "src/core/SkImageFilter_Base.h"
+#include "src/core/SkMaskFilterBase.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkStrike.h"
+#include "src/core/SkStrikeSpec.h"
+#include "src/core/SkTextFormatParams.h"
+#include "src/core/SkXfermodeInterpretation.h"
+#include "src/pdf/SkBitmapKey.h"
+#include "src/pdf/SkClusterator.h"
+#include "src/pdf/SkPDFBitmap.h"
+#include "src/pdf/SkPDFDocumentPriv.h"
+#include "src/pdf/SkPDFFont.h"
+#include "src/pdf/SkPDFFormXObject.h"
+#include "src/pdf/SkPDFGraphicState.h"
+#include "src/pdf/SkPDFResourceDict.h"
+#include "src/pdf/SkPDFShader.h"
+#include "src/pdf/SkPDFTypes.h"
+#include "src/pdf/SkPDFUtils.h"
+#include "src/text/GlyphRun.h"
+#include "src/utils/SkClipStackUtils.h"
+
+#include <vector>
+
+#ifndef SK_PDF_MASK_QUALITY
+ // If MASK_QUALITY is in [0,100], will be used for JpegEncoder.
+ // Otherwise, just encode masks losslessly.
+ #define SK_PDF_MASK_QUALITY 50
+ // Since these masks are used for blurry shadows, we shouldn't need
+ // high quality. Raise this value if your shadows have visible JPEG
+ // artifacts.
+ // If SkJpegEncoder::Encode fails, we will fall back to the lossless
+ // encoding.
+#endif
+
+namespace {
+
+// If nodeId is not zero, outputs the tags to begin a marked-content sequence
+// for the given node ID, and then closes those tags when this object goes
+// out of scope.
+class ScopedOutputMarkedContentTags {
+public:
+ ScopedOutputMarkedContentTags(int nodeId, SkPDFDocument* document, SkDynamicMemoryWStream* out)
+ : fOut(out)
+ , fMarkId(-1) {
+ if (nodeId) {
+ fMarkId = document->createMarkIdForNodeId(nodeId);
+ }
+
+ if (fMarkId != -1) {
+ fOut->writeText("/P <</MCID ");
+ fOut->writeDecAsText(fMarkId);
+ fOut->writeText(" >>BDC\n");
+ }
+ }
+
+ ~ScopedOutputMarkedContentTags() {
+ if (fMarkId != -1) {
+ fOut->writeText("EMC\n");
+ }
+ }
+
+private:
+ SkDynamicMemoryWStream* fOut;
+ int fMarkId;
+};
+
+} // namespace
+
+// Utility functions
+
+// This function destroys the mask and either frees or takes the pixels.
+sk_sp<SkImage> mask_to_greyscale_image(SkMask* mask) {
+ sk_sp<SkImage> img;
+ SkPixmap pm(SkImageInfo::Make(mask->fBounds.width(), mask->fBounds.height(),
+ kGray_8_SkColorType, kOpaque_SkAlphaType),
+ mask->fImage, mask->fRowBytes);
+ const int imgQuality = SK_PDF_MASK_QUALITY;
+ if (imgQuality <= 100 && imgQuality >= 0) {
+ SkDynamicMemoryWStream buffer;
+ SkJpegEncoder::Options jpegOptions;
+ jpegOptions.fQuality = imgQuality;
+ if (SkJpegEncoder::Encode(&buffer, pm, jpegOptions)) {
+ img = SkImage::MakeFromEncoded(buffer.detachAsData());
+ SkASSERT(img);
+ if (img) {
+ SkMask::FreeImage(mask->fImage);
+ }
+ }
+ }
+ if (!img) {
+ img = SkImage::MakeFromRaster(pm, [](const void* p, void*) { SkMask::FreeImage((void*)p); },
+ nullptr);
+ }
+ *mask = SkMask(); // destructive;
+ return img;
+}
+
+sk_sp<SkImage> alpha_image_to_greyscale_image(const SkImage* mask) {
+ int w = mask->width(), h = mask->height();
+ SkBitmap greyBitmap;
+ greyBitmap.allocPixels(SkImageInfo::Make(w, h, kGray_8_SkColorType, kOpaque_SkAlphaType));
+ // TODO: support gpu images in pdf
+ if (!mask->readPixels(nullptr, SkImageInfo::MakeA8(w, h),
+ greyBitmap.getPixels(), greyBitmap.rowBytes(), 0, 0)) {
+ return nullptr;
+ }
+ greyBitmap.setImmutable();
+ return greyBitmap.asImage();
+}
+
+static int add_resource(SkTHashSet<SkPDFIndirectReference>& resources, SkPDFIndirectReference ref) {
+ resources.add(ref);
+ return ref.fValue;
+}
+
+static void draw_points(SkCanvas::PointMode mode,
+ size_t count,
+ const SkPoint* points,
+ const SkPaint& paint,
+ const SkIRect& bounds,
+ SkBaseDevice* device) {
+ SkRasterClip rc(bounds);
+ SkDraw draw;
+ draw.fDst = SkPixmap(SkImageInfo::MakeUnknown(bounds.right(), bounds.bottom()), nullptr, 0);
+ draw.fMatrixProvider = device;
+ draw.fRC = &rc;
+ draw.drawPoints(mode, count, points, paint, device);
+}
+
+static void transform_shader(SkPaint* paint, const SkMatrix& ctm) {
+ SkASSERT(!ctm.isIdentity());
+#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
+ // A shader's matrix is: CTM x LocalMatrix x WrappingLocalMatrix. We want to
+ // switch to device space, where CTM = I, while keeping the original behavior.
+ //
+ // I * LocalMatrix * NewWrappingMatrix = CTM * LocalMatrix
+ // LocalMatrix * NewWrappingMatrix = CTM * LocalMatrix
+ // InvLocalMatrix * LocalMatrix * NewWrappingMatrix = InvLocalMatrix * CTM * LocalMatrix
+ // NewWrappingMatrix = InvLocalMatrix * CTM * LocalMatrix
+ //
+ SkMatrix lm = SkPDFUtils::GetShaderLocalMatrix(paint->getShader());
+ SkMatrix lmInv;
+ if (lm.invert(&lmInv)) {
+ SkMatrix m = SkMatrix::Concat(SkMatrix::Concat(lmInv, ctm), lm);
+ paint->setShader(paint->getShader()->makeWithLocalMatrix(m));
+ }
+ return;
+#endif
+ paint->setShader(paint->getShader()->makeWithLocalMatrix(ctm));
+}
+
+
+static SkTCopyOnFirstWrite<SkPaint> clean_paint(const SkPaint& srcPaint) {
+ SkTCopyOnFirstWrite<SkPaint> paint(srcPaint);
+ // If the paint will definitely draw opaquely, replace kSrc with
+ // kSrcOver. http://crbug.com/473572
+ if (!paint->isSrcOver() &&
+ kSrcOver_SkXfermodeInterpretation == SkInterpretXfermode(*paint, false))
+ {
+ paint.writable()->setBlendMode(SkBlendMode::kSrcOver);
+ }
+ if (paint->getColorFilter()) {
+ // We assume here that PDFs all draw in sRGB.
+ SkPaintPriv::RemoveColorFilter(paint.writable(), sk_srgb_singleton());
+ }
+ SkASSERT(!paint->getColorFilter());
+ return paint;
+}
+
+static void set_style(SkTCopyOnFirstWrite<SkPaint>* paint, SkPaint::Style style) {
+ if (paint->get()->getStyle() != style) {
+ paint->writable()->setStyle(style);
+ }
+}
+
+/* Calculate an inverted path's equivalent non-inverted path, given the
+ * canvas bounds.
+ * outPath may alias with invPath (since this is supported by PathOps).
+ */
+static bool calculate_inverse_path(const SkRect& bounds, const SkPath& invPath,
+ SkPath* outPath) {
+ SkASSERT(invPath.isInverseFillType());
+ return Op(SkPath::Rect(bounds), invPath, kIntersect_SkPathOp, outPath);
+}
+
+SkBaseDevice* SkPDFDevice::onCreateDevice(const CreateInfo& cinfo, const SkPaint* layerPaint) {
+ // PDF does not support image filters, so render them on CPU.
+ // Note that this rendering is done at "screen" resolution (100dpi), not
+ // printer resolution.
+
+ // TODO: It may be possible to express some filters natively using PDF
+ // to improve quality and file size (https://bug.skia.org/3043)
+ if (layerPaint && (layerPaint->getImageFilter() || layerPaint->getColorFilter())) {
+ // need to return a raster device, which we will detect in drawDevice()
+ return SkBitmapDevice::Create(cinfo.fInfo, SkSurfaceProps(0, kUnknown_SkPixelGeometry));
+ }
+ return new SkPDFDevice(cinfo.fInfo.dimensions(), fDocument);
+}
+
+// A helper class to automatically finish a ContentEntry at the end of a
+// drawing method and maintain the state needed between set up and finish.
+class ScopedContentEntry {
+public:
+ ScopedContentEntry(SkPDFDevice* device,
+ const SkClipStack* clipStack,
+ const SkMatrix& matrix,
+ const SkPaint& paint,
+ SkScalar textScale = 0)
+ : fDevice(device)
+ , fBlendMode(SkBlendMode::kSrcOver)
+ , fClipStack(clipStack)
+ {
+ if (matrix.hasPerspective()) {
+ NOT_IMPLEMENTED(!matrix.hasPerspective(), false);
+ return;
+ }
+ fBlendMode = paint.getBlendMode_or(SkBlendMode::kSrcOver);
+ fContentStream =
+ fDevice->setUpContentEntry(clipStack, matrix, paint, textScale, &fDstFormXObject);
+ }
+ ScopedContentEntry(SkPDFDevice* dev, const SkPaint& paint, SkScalar textScale = 0)
+ : ScopedContentEntry(dev, &dev->cs(), dev->localToDevice(), paint, textScale) {}
+
+ ~ScopedContentEntry() {
+ if (fContentStream) {
+ SkPath* shape = &fShape;
+ if (shape->isEmpty()) {
+ shape = nullptr;
+ }
+ fDevice->finishContentEntry(fClipStack, fBlendMode, fDstFormXObject, shape);
+ }
+ }
+
+ explicit operator bool() const { return fContentStream != nullptr; }
+ SkDynamicMemoryWStream* stream() { return fContentStream; }
+
+ /* Returns true when we explicitly need the shape of the drawing. */
+ bool needShape() {
+ switch (fBlendMode) {
+ case SkBlendMode::kClear:
+ case SkBlendMode::kSrc:
+ case SkBlendMode::kSrcIn:
+ case SkBlendMode::kSrcOut:
+ case SkBlendMode::kDstIn:
+ case SkBlendMode::kDstOut:
+ case SkBlendMode::kSrcATop:
+ case SkBlendMode::kDstATop:
+ case SkBlendMode::kModulate:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /* Returns true unless we only need the shape of the drawing. */
+ bool needSource() {
+ if (fBlendMode == SkBlendMode::kClear) {
+ return false;
+ }
+ return true;
+ }
+
+ /* If the shape is different than the alpha component of the content, then
+ * setShape should be called with the shape. In particular, images and
+ * devices have rectangular shape.
+ */
+ void setShape(const SkPath& shape) {
+ fShape = shape;
+ }
+
+private:
+ SkPDFDevice* fDevice = nullptr;
+ SkDynamicMemoryWStream* fContentStream = nullptr;
+ SkBlendMode fBlendMode;
+ SkPDFIndirectReference fDstFormXObject;
+ SkPath fShape;
+ const SkClipStack* fClipStack;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+SkPDFDevice::SkPDFDevice(SkISize pageSize, SkPDFDocument* doc, const SkMatrix& transform)
+ : INHERITED(SkImageInfo::MakeUnknown(pageSize.width(), pageSize.height()),
+ SkSurfaceProps(0, kUnknown_SkPixelGeometry))
+ , fInitialTransform(transform)
+ , fNodeId(0)
+ , fDocument(doc)
+{
+ SkASSERT(!pageSize.isEmpty());
+}
+
+SkPDFDevice::~SkPDFDevice() = default;
+
+void SkPDFDevice::reset() {
+ fGraphicStateResources.reset();
+ fXObjectResources.reset();
+ fShaderResources.reset();
+ fFontResources.reset();
+ fContent.reset();
+ fActiveStackState = SkPDFGraphicStackState();
+}
+
+void SkPDFDevice::drawAnnotation(const SkRect& rect, const char key[], SkData* value) {
+ if (!value) {
+ return;
+ }
+ // Annotations are specified in absolute coordinates, so the page xform maps from device space
+ // to the global space, and applies the document transform.
+ SkMatrix pageXform = this->deviceToGlobal().asM33();
+ pageXform.postConcat(fDocument->currentPageTransform());
+ if (rect.isEmpty()) {
+ if (!strcmp(key, SkPDFGetNodeIdKey())) {
+ int nodeID;
+ if (value->size() != sizeof(nodeID)) { return; }
+ memcpy(&nodeID, value->data(), sizeof(nodeID));
+ fNodeId = nodeID;
+ return;
+ }
+ if (!strcmp(SkAnnotationKeys::Define_Named_Dest_Key(), key)) {
+ SkPoint p = this->localToDevice().mapXY(rect.x(), rect.y());
+ pageXform.mapPoints(&p, 1);
+ auto pg = fDocument->currentPage();
+ fDocument->fNamedDestinations.push_back(SkPDFNamedDestination{sk_ref_sp(value), p, pg});
+ }
+ return;
+ }
+ // Convert to path to handle non-90-degree rotations.
+ SkPath path = SkPath::Rect(rect).makeTransform(this->localToDevice());
+ SkPath clip;
+ SkClipStack_AsPath(this->cs(), &clip);
+ Op(clip, path, kIntersect_SkPathOp, &path);
+ // PDF wants a rectangle only.
+ SkRect transformedRect = pageXform.mapRect(path.getBounds());
+ if (transformedRect.isEmpty()) {
+ return;
+ }
+
+ SkPDFLink::Type linkType = SkPDFLink::Type::kNone;
+ if (!strcmp(SkAnnotationKeys::URL_Key(), key)) {
+ linkType = SkPDFLink::Type::kUrl;
+ } else if (!strcmp(SkAnnotationKeys::Link_Named_Dest_Key(), key)) {
+ linkType = SkPDFLink::Type::kNamedDestination;
+ }
+
+ if (linkType != SkPDFLink::Type::kNone) {
+ std::unique_ptr<SkPDFLink> link = std::make_unique<SkPDFLink>(
+ linkType, value, transformedRect, fNodeId);
+ fDocument->fCurrentPageLinks.push_back(std::move(link));
+ }
+}
+
+void SkPDFDevice::drawPaint(const SkPaint& srcPaint) {
+ SkMatrix inverse;
+ if (!this->localToDevice().invert(&inverse)) {
+ return;
+ }
+ SkRect bbox = this->cs().bounds(this->bounds());
+ inverse.mapRect(&bbox);
+ bbox.roundOut(&bbox);
+ if (this->hasEmptyClip()) {
+ return;
+ }
+ SkPaint newPaint = srcPaint;
+ newPaint.setStyle(SkPaint::kFill_Style);
+ this->drawRect(bbox, newPaint);
+}
+
+void SkPDFDevice::drawPoints(SkCanvas::PointMode mode,
+ size_t count,
+ const SkPoint* points,
+ const SkPaint& srcPaint) {
+ if (this->hasEmptyClip()) {
+ return;
+ }
+ if (count == 0) {
+ return;
+ }
+ SkTCopyOnFirstWrite<SkPaint> paint(clean_paint(srcPaint));
+
+
+
+ if (SkCanvas::kPoints_PointMode != mode) {
+ set_style(&paint, SkPaint::kStroke_Style);
+ }
+
+ // SkDraw::drawPoints converts to multiple calls to fDevice->drawPath.
+ // We only use this when there's a path effect or perspective because of the overhead
+ // of multiple calls to setUpContentEntry it causes.
+ if (paint->getPathEffect() || this->localToDevice().hasPerspective()) {
+ draw_points(mode, count, points, *paint, this->devClipBounds(), this);
+ return;
+ }
+
+
+ if (mode == SkCanvas::kPoints_PointMode && paint->getStrokeCap() != SkPaint::kRound_Cap) {
+ if (paint->getStrokeWidth()) {
+ // PDF won't draw a single point with square/butt caps because the
+ // orientation is ambiguous. Draw a rectangle instead.
+ set_style(&paint, SkPaint::kFill_Style);
+ SkScalar strokeWidth = paint->getStrokeWidth();
+ SkScalar halfStroke = SkScalarHalf(strokeWidth);
+ for (size_t i = 0; i < count; i++) {
+ SkRect r = SkRect::MakeXYWH(points[i].fX, points[i].fY, 0, 0);
+ r.inset(-halfStroke, -halfStroke);
+ this->drawRect(r, *paint);
+ }
+ return;
+ } else {
+ if (paint->getStrokeCap() != SkPaint::kRound_Cap) {
+ paint.writable()->setStrokeCap(SkPaint::kRound_Cap);
+ }
+ }
+ }
+
+ ScopedContentEntry content(this, *paint);
+ if (!content) {
+ return;
+ }
+ SkDynamicMemoryWStream* contentStream = content.stream();
+ switch (mode) {
+ case SkCanvas::kPolygon_PointMode:
+ SkPDFUtils::MoveTo(points[0].fX, points[0].fY, contentStream);
+ for (size_t i = 1; i < count; i++) {
+ SkPDFUtils::AppendLine(points[i].fX, points[i].fY, contentStream);
+ }
+ SkPDFUtils::StrokePath(contentStream);
+ break;
+ case SkCanvas::kLines_PointMode:
+ for (size_t i = 0; i < count/2; i++) {
+ SkPDFUtils::MoveTo(points[i * 2].fX, points[i * 2].fY, contentStream);
+ SkPDFUtils::AppendLine(points[i * 2 + 1].fX, points[i * 2 + 1].fY, contentStream);
+ SkPDFUtils::StrokePath(contentStream);
+ }
+ break;
+ case SkCanvas::kPoints_PointMode:
+ SkASSERT(paint->getStrokeCap() == SkPaint::kRound_Cap);
+ for (size_t i = 0; i < count; i++) {
+ SkPDFUtils::MoveTo(points[i].fX, points[i].fY, contentStream);
+ SkPDFUtils::ClosePath(contentStream);
+ SkPDFUtils::StrokePath(contentStream);
+ }
+ break;
+ default:
+ SkASSERT(false);
+ }
+}
+
+void SkPDFDevice::drawRect(const SkRect& rect, const SkPaint& paint) {
+ SkRect r = rect;
+ r.sort();
+ this->internalDrawPath(this->cs(), this->localToDevice(), SkPath::Rect(r), paint, true);
+}
+
+void SkPDFDevice::drawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ this->internalDrawPath(this->cs(), this->localToDevice(), SkPath::RRect(rrect), paint, true);
+}
+
+void SkPDFDevice::drawOval(const SkRect& oval, const SkPaint& paint) {
+ this->internalDrawPath(this->cs(), this->localToDevice(), SkPath::Oval(oval), paint, true);
+}
+
+void SkPDFDevice::drawPath(const SkPath& path, const SkPaint& paint, bool pathIsMutable) {
+ this->internalDrawPath(this->cs(), this->localToDevice(), path, paint, pathIsMutable);
+}
+
+void SkPDFDevice::internalDrawPathWithFilter(const SkClipStack& clipStack,
+ const SkMatrix& ctm,
+ const SkPath& origPath,
+ const SkPaint& origPaint) {
+ SkASSERT(origPaint.getMaskFilter());
+ SkPath path(origPath);
+ SkTCopyOnFirstWrite<SkPaint> paint(origPaint);
+
+ SkStrokeRec::InitStyle initStyle = skpathutils::FillPathWithPaint(path, *paint, &path)
+ ? SkStrokeRec::kFill_InitStyle
+ : SkStrokeRec::kHairline_InitStyle;
+ path.transform(ctm, &path);
+
+ SkIRect bounds = clipStack.bounds(this->bounds()).roundOut();
+ SkMask sourceMask;
+ if (!SkDraw::DrawToMask(path, bounds, paint->getMaskFilter(), &SkMatrix::I(),
+ &sourceMask, SkMask::kComputeBoundsAndRenderImage_CreateMode,
+ initStyle)) {
+ return;
+ }
+ SkAutoMaskFreeImage srcAutoMaskFreeImage(sourceMask.fImage);
+ SkMask dstMask;
+ SkIPoint margin;
+ if (!as_MFB(paint->getMaskFilter())->filterMask(&dstMask, sourceMask, ctm, &margin)) {
+ return;
+ }
+ SkIRect dstMaskBounds = dstMask.fBounds;
+ sk_sp<SkImage> mask = mask_to_greyscale_image(&dstMask);
+ // PDF doesn't seem to allow masking vector graphics with an Image XObject.
+ // Must mask with a Form XObject.
+ sk_sp<SkPDFDevice> maskDevice = this->makeCongruentDevice();
+ {
+ SkCanvas canvas(maskDevice);
+ canvas.drawImage(mask, dstMaskBounds.x(), dstMaskBounds.y());
+ }
+ if (!ctm.isIdentity() && paint->getShader()) {
+ transform_shader(paint.writable(), ctm); // Since we are using identity matrix.
+ }
+ ScopedContentEntry content(this, &clipStack, SkMatrix::I(), *paint);
+ if (!content) {
+ return;
+ }
+ this->setGraphicState(SkPDFGraphicState::GetSMaskGraphicState(
+ maskDevice->makeFormXObjectFromDevice(dstMaskBounds, true), false,
+ SkPDFGraphicState::kLuminosity_SMaskMode, fDocument), content.stream());
+ SkPDFUtils::AppendRectangle(SkRect::Make(dstMaskBounds), content.stream());
+ SkPDFUtils::PaintPath(SkPaint::kFill_Style, path.getFillType(), content.stream());
+ this->clearMaskOnGraphicState(content.stream());
+}
+
+void SkPDFDevice::setGraphicState(SkPDFIndirectReference gs, SkDynamicMemoryWStream* content) {
+ SkPDFUtils::ApplyGraphicState(add_resource(fGraphicStateResources, gs), content);
+}
+
+void SkPDFDevice::clearMaskOnGraphicState(SkDynamicMemoryWStream* contentStream) {
+ // The no-softmask graphic state is used to "turn off" the mask for later draw calls.
+ SkPDFIndirectReference& noSMaskGS = fDocument->fNoSmaskGraphicState;
+ if (!noSMaskGS) {
+ SkPDFDict tmp("ExtGState");
+ tmp.insertName("SMask", "None");
+ noSMaskGS = fDocument->emit(tmp);
+ }
+ this->setGraphicState(noSMaskGS, contentStream);
+}
+
+void SkPDFDevice::internalDrawPath(const SkClipStack& clipStack,
+ const SkMatrix& ctm,
+ const SkPath& origPath,
+ const SkPaint& srcPaint,
+ bool pathIsMutable) {
+ if (clipStack.isEmpty(this->bounds())) {
+ return;
+ }
+ SkTCopyOnFirstWrite<SkPaint> paint(clean_paint(srcPaint));
+ SkPath modifiedPath;
+ SkPath* pathPtr = const_cast<SkPath*>(&origPath);
+
+ if (paint->getMaskFilter()) {
+ this->internalDrawPathWithFilter(clipStack, ctm, origPath, *paint);
+ return;
+ }
+
+ SkMatrix matrix = ctm;
+
+ if (paint->getPathEffect()) {
+ if (clipStack.isEmpty(this->bounds())) {
+ return;
+ }
+ if (!pathIsMutable) {
+ modifiedPath = origPath;
+ pathPtr = &modifiedPath;
+ pathIsMutable = true;
+ }
+ if (skpathutils::FillPathWithPaint(*pathPtr, *paint, pathPtr)) {
+ set_style(&paint, SkPaint::kFill_Style);
+ } else {
+ set_style(&paint, SkPaint::kStroke_Style);
+ if (paint->getStrokeWidth() != 0) {
+ paint.writable()->setStrokeWidth(0);
+ }
+ }
+ paint.writable()->setPathEffect(nullptr);
+ }
+
+ if (this->handleInversePath(*pathPtr, *paint, pathIsMutable)) {
+ return;
+ }
+ if (matrix.getType() & SkMatrix::kPerspective_Mask) {
+ if (!pathIsMutable) {
+ modifiedPath = origPath;
+ pathPtr = &modifiedPath;
+ pathIsMutable = true;
+ }
+ pathPtr->transform(matrix);
+ if (paint->getShader()) {
+ transform_shader(paint.writable(), matrix);
+ }
+ matrix = SkMatrix::I();
+ }
+
+ ScopedContentEntry content(this, &clipStack, matrix, *paint);
+ if (!content) {
+ return;
+ }
+ constexpr SkScalar kToleranceScale = 0.0625f; // smaller = better conics (circles).
+ SkScalar matrixScale = matrix.mapRadius(1.0f);
+ SkScalar tolerance = matrixScale > 0.0f ? kToleranceScale / matrixScale : kToleranceScale;
+ bool consumeDegeratePathSegments =
+ paint->getStyle() == SkPaint::kFill_Style ||
+ (paint->getStrokeCap() != SkPaint::kRound_Cap &&
+ paint->getStrokeCap() != SkPaint::kSquare_Cap);
+ SkPDFUtils::EmitPath(*pathPtr, paint->getStyle(), consumeDegeratePathSegments, content.stream(),
+ tolerance);
+ SkPDFUtils::PaintPath(paint->getStyle(), pathPtr->getFillType(), content.stream());
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void SkPDFDevice::drawImageRect(const SkImage* image,
+ const SkRect* src,
+ const SkRect& dst,
+ const SkSamplingOptions& sampling,
+ const SkPaint& paint,
+ SkCanvas::SrcRectConstraint) {
+ SkASSERT(image);
+ this->internalDrawImageRect(SkKeyedImage(sk_ref_sp(const_cast<SkImage*>(image))),
+ src, dst, sampling, paint, this->localToDevice());
+}
+
+void SkPDFDevice::drawSprite(const SkBitmap& bm, int x, int y, const SkPaint& paint) {
+ SkASSERT(!bm.drawsNothing());
+ auto r = SkRect::MakeXYWH(x, y, bm.width(), bm.height());
+ this->internalDrawImageRect(SkKeyedImage(bm), nullptr, r, SkSamplingOptions(), paint,
+ SkMatrix::I());
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+class GlyphPositioner {
+public:
+ GlyphPositioner(SkDynamicMemoryWStream* content,
+ SkScalar textSkewX,
+ SkPoint origin)
+ : fContent(content)
+ , fCurrentMatrixOrigin(origin)
+ , fTextSkewX(textSkewX) {
+ }
+ ~GlyphPositioner() { this->flush(); }
+ void flush() {
+ if (fInText) {
+ fContent->writeText("> Tj\n");
+ fInText = false;
+ }
+ }
+ void setFont(SkPDFFont* pdfFont) {
+ this->flush();
+ fPDFFont = pdfFont;
+ // Reader 2020.013.20064 incorrectly advances some Type3 fonts https://crbug.com/1226960
+ bool convertedToType3 = fPDFFont->getType() == SkAdvancedTypefaceMetrics::kOther_Font;
+ bool thousandEM = fPDFFont->typeface()->getUnitsPerEm() == 1000;
+ fViewersAgreeOnAdvancesInFont = thousandEM || !convertedToType3;
+ }
+ void writeGlyph(uint16_t glyph, SkScalar advanceWidth, SkPoint xy) {
+ SkASSERT(fPDFFont);
+ if (!fInitialized) {
+ // Flip the text about the x-axis to account for origin swap and include
+ // the passed parameters.
+ fContent->writeText("1 0 ");
+ SkPDFUtils::AppendScalar(-fTextSkewX, fContent);
+ fContent->writeText(" -1 ");
+ SkPDFUtils::AppendScalar(fCurrentMatrixOrigin.x(), fContent);
+ fContent->writeText(" ");
+ SkPDFUtils::AppendScalar(fCurrentMatrixOrigin.y(), fContent);
+ fContent->writeText(" Tm\n");
+ fCurrentMatrixOrigin.set(0.0f, 0.0f);
+ fInitialized = true;
+ }
+ SkPoint position = xy - fCurrentMatrixOrigin;
+ if (!fViewersAgreeOnXAdvance || position != SkPoint{fXAdvance, 0}) {
+ this->flush();
+ SkPDFUtils::AppendScalar(position.x() - position.y() * fTextSkewX, fContent);
+ fContent->writeText(" ");
+ SkPDFUtils::AppendScalar(-position.y(), fContent);
+ fContent->writeText(" Td ");
+ fCurrentMatrixOrigin = xy;
+ fXAdvance = 0;
+ fViewersAgreeOnXAdvance = true;
+ }
+ fXAdvance += advanceWidth;
+ if (!fViewersAgreeOnAdvancesInFont) {
+ fViewersAgreeOnXAdvance = false;
+ }
+ if (!fInText) {
+ fContent->writeText("<");
+ fInText = true;
+ }
+ if (fPDFFont->multiByteGlyphs()) {
+ SkPDFUtils::WriteUInt16BE(fContent, glyph);
+ } else {
+ SkASSERT(0 == glyph >> 8);
+ SkPDFUtils::WriteUInt8(fContent, static_cast<uint8_t>(glyph));
+ }
+ }
+
+private:
+ SkDynamicMemoryWStream* fContent;
+ SkPDFFont* fPDFFont = nullptr;
+ SkPoint fCurrentMatrixOrigin;
+ SkScalar fXAdvance = 0.0f;
+ bool fViewersAgreeOnAdvancesInFont = true;
+ bool fViewersAgreeOnXAdvance = true;
+ SkScalar fTextSkewX;
+ bool fInText = false;
+ bool fInitialized = false;
+};
+} // namespace
+
+static SkUnichar map_glyph(const std::vector<SkUnichar>& glyphToUnicode, SkGlyphID glyph) {
+ return glyph < glyphToUnicode.size() ? glyphToUnicode[SkToInt(glyph)] : -1;
+}
+
+namespace {
+struct PositionedGlyph {
+ SkPoint fPos;
+ SkGlyphID fGlyph;
+};
+} // namespace
+
+static SkRect get_glyph_bounds_device_space(const SkGlyph* glyph,
+ SkScalar xScale, SkScalar yScale,
+ SkPoint xy, const SkMatrix& ctm) {
+ SkRect glyphBounds = SkMatrix::Scale(xScale, yScale).mapRect(glyph->rect());
+ glyphBounds.offset(xy);
+ ctm.mapRect(&glyphBounds); // now in dev space.
+ return glyphBounds;
+}
+
+static bool contains(const SkRect& r, SkPoint p) {
+ return r.left() <= p.x() && p.x() <= r.right() &&
+ r.top() <= p.y() && p.y() <= r.bottom();
+}
+
+void SkPDFDevice::drawGlyphRunAsPath(
+ const sktext::GlyphRun& glyphRun, SkPoint offset, const SkPaint& runPaint) {
+ const SkFont& font = glyphRun.font();
+ SkPath path;
+
+ struct Rec {
+ SkPath* fPath;
+ SkPoint fOffset;
+ const SkPoint* fPos;
+ } rec = {&path, offset, glyphRun.positions().data()};
+
+ font.getPaths(glyphRun.glyphsIDs().data(), glyphRun.glyphsIDs().size(),
+ [](const SkPath* path, const SkMatrix& mx, void* ctx) {
+ Rec* rec = reinterpret_cast<Rec*>(ctx);
+ if (path) {
+ SkMatrix total = mx;
+ total.postTranslate(rec->fPos->fX + rec->fOffset.fX,
+ rec->fPos->fY + rec->fOffset.fY);
+ rec->fPath->addPath(*path, total);
+ }
+ rec->fPos += 1; // move to the next glyph's position
+ }, &rec);
+ this->internalDrawPath(this->cs(), this->localToDevice(), path, runPaint, true);
+
+ SkFont transparentFont = glyphRun.font();
+ transparentFont.setEmbolden(false); // Stop Recursion
+ sktext::GlyphRun tmpGlyphRun(glyphRun, transparentFont);
+
+ SkPaint transparent;
+ transparent.setColor(SK_ColorTRANSPARENT);
+
+ if (this->localToDevice().hasPerspective()) {
+ SkAutoDeviceTransformRestore adr(this, SkMatrix::I());
+ this->internalDrawGlyphRun(tmpGlyphRun, offset, transparent);
+ } else {
+ this->internalDrawGlyphRun(tmpGlyphRun, offset, transparent);
+ }
+}
+
+static bool needs_new_font(SkPDFFont* font, const SkGlyph* glyph,
+ SkAdvancedTypefaceMetrics::FontType fontType) {
+ if (!font || !font->hasGlyph(glyph->getGlyphID())) {
+ return true;
+ }
+ if (fontType == SkAdvancedTypefaceMetrics::kOther_Font) {
+ return false;
+ }
+ if (glyph->isEmpty()) {
+ return false;
+ }
+
+ bool bitmapOnly = nullptr == glyph->path();
+ bool convertedToType3 = (font->getType() == SkAdvancedTypefaceMetrics::kOther_Font);
+ return convertedToType3 != bitmapOnly;
+}
+
+void SkPDFDevice::internalDrawGlyphRun(
+ const sktext::GlyphRun& glyphRun, SkPoint offset, const SkPaint& runPaint) {
+
+ const SkGlyphID* glyphIDs = glyphRun.glyphsIDs().data();
+ uint32_t glyphCount = SkToU32(glyphRun.glyphsIDs().size());
+ const SkFont& glyphRunFont = glyphRun.font();
+
+ if (!glyphCount || !glyphIDs || glyphRunFont.getSize() <= 0 || this->hasEmptyClip()) {
+ return;
+ }
+ if (runPaint.getPathEffect()
+ || runPaint.getMaskFilter()
+ || glyphRunFont.isEmbolden()
+ || this->localToDevice().hasPerspective()
+ || SkPaint::kFill_Style != runPaint.getStyle()) {
+ // Stroked Text doesn't work well with Type3 fonts.
+ this->drawGlyphRunAsPath(glyphRun, offset, runPaint);
+ return;
+ }
+ SkTypeface* typeface = glyphRunFont.getTypefaceOrDefault();
+ if (!typeface) {
+ SkDebugf("SkPDF: SkTypeface::MakeDefault() returned nullptr.\n");
+ return;
+ }
+
+ const SkAdvancedTypefaceMetrics* metrics = SkPDFFont::GetMetrics(typeface, fDocument);
+ if (!metrics) {
+ return;
+ }
+ SkAdvancedTypefaceMetrics::FontType fontType = SkPDFFont::FontType(*typeface, *metrics);
+
+ const std::vector<SkUnichar>& glyphToUnicode = SkPDFFont::GetUnicodeMap(typeface, fDocument);
+
+ SkClusterator clusterator(glyphRun);
+
+ int emSize;
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakePDFVector(*typeface, &emSize);
+
+ SkScalar textSize = glyphRunFont.getSize();
+ SkScalar advanceScale = textSize * glyphRunFont.getScaleX() / emSize;
+
+ // textScaleX and textScaleY are used to get a conservative bounding box for glyphs.
+ SkScalar textScaleY = textSize / emSize;
+ SkScalar textScaleX = advanceScale + glyphRunFont.getSkewX() * textScaleY;
+
+ SkRect clipStackBounds = this->cs().bounds(this->bounds());
+
+ SkTCopyOnFirstWrite<SkPaint> paint(clean_paint(runPaint));
+ ScopedContentEntry content(this, *paint, glyphRunFont.getScaleX());
+ if (!content) {
+ return;
+ }
+ SkDynamicMemoryWStream* out = content.stream();
+
+ out->writeText("BT\n");
+ SK_AT_SCOPE_EXIT(out->writeText("ET\n"));
+
+ ScopedOutputMarkedContentTags mark(fNodeId, fDocument, out);
+
+ const int numGlyphs = typeface->countGlyphs();
+
+ if (clusterator.reversedChars()) {
+ out->writeText("/ReversedChars BMC\n");
+ }
+ SK_AT_SCOPE_EXIT(if (clusterator.reversedChars()) { out->writeText("EMC\n"); } );
+ GlyphPositioner glyphPositioner(out, glyphRunFont.getSkewX(), offset);
+ SkPDFFont* font = nullptr;
+
+ SkBulkGlyphMetricsAndPaths paths{strikeSpec};
+ auto glyphs = paths.glyphs(glyphRun.glyphsIDs());
+
+ while (SkClusterator::Cluster c = clusterator.next()) {
+ int index = c.fGlyphIndex;
+ int glyphLimit = index + c.fGlyphCount;
+
+ bool actualText = false;
+ SK_AT_SCOPE_EXIT(if (actualText) {
+ glyphPositioner.flush();
+ out->writeText("EMC\n");
+ });
+ if (c.fUtf8Text) { // real cluster
+ // Check if `/ActualText` needed.
+ const char* textPtr = c.fUtf8Text;
+ const char* textEnd = c.fUtf8Text + c.fTextByteLength;
+ SkUnichar unichar = SkUTF::NextUTF8(&textPtr, textEnd);
+ if (unichar < 0) {
+ return;
+ }
+ if (textPtr < textEnd || // >1 code points in cluster
+ c.fGlyphCount > 1 || // >1 glyphs in cluster
+ unichar != map_glyph(glyphToUnicode, glyphIDs[index])) // 1:1 but wrong mapping
+ {
+ glyphPositioner.flush();
+ out->writeText("/Span<</ActualText ");
+ SkPDFWriteTextString(out, c.fUtf8Text, c.fTextByteLength);
+ out->writeText(" >> BDC\n"); // begin marked-content sequence
+ // with an associated property list.
+ actualText = true;
+ }
+ }
+ for (; index < glyphLimit; ++index) {
+ SkGlyphID gid = glyphIDs[index];
+ if (numGlyphs <= gid) {
+ continue;
+ }
+ SkPoint xy = glyphRun.positions()[index];
+ // Do a glyph-by-glyph bounds-reject if positions are absolute.
+ SkRect glyphBounds = get_glyph_bounds_device_space(
+ glyphs[index], textScaleX, textScaleY,
+ xy + offset, this->localToDevice());
+ if (glyphBounds.isEmpty()) {
+ if (!contains(clipStackBounds, {glyphBounds.x(), glyphBounds.y()})) {
+ continue;
+ }
+ } else {
+ if (!clipStackBounds.intersects(glyphBounds)) {
+ continue; // reject glyphs as out of bounds
+ }
+ }
+ if (needs_new_font(font, glyphs[index], fontType)) {
+ // Not yet specified font or need to switch font.
+ font = SkPDFFont::GetFontResource(fDocument, glyphs[index], typeface);
+ SkASSERT(font); // All preconditions for SkPDFFont::GetFontResource are met.
+ glyphPositioner.setFont(font);
+ SkPDFWriteResourceName(out, SkPDFResourceType::kFont,
+ add_resource(fFontResources, font->indirectReference()));
+ out->writeText(" ");
+ SkPDFUtils::AppendScalar(textSize, out);
+ out->writeText(" Tf\n");
+
+ }
+ font->noteGlyphUsage(gid);
+ SkGlyphID encodedGlyph = font->glyphToPDFFontEncoding(gid);
+ SkScalar advance = advanceScale * glyphs[index]->advanceX();
+ glyphPositioner.writeGlyph(encodedGlyph, advance, xy);
+ }
+ }
+}
+
+void SkPDFDevice::onDrawGlyphRunList(SkCanvas*,
+ const sktext::GlyphRunList& glyphRunList,
+ const SkPaint& initialPaint,
+ const SkPaint& drawingPaint) {
+ SkASSERT(!glyphRunList.hasRSXForm());
+ for (const sktext::GlyphRun& glyphRun : glyphRunList) {
+ this->internalDrawGlyphRun(glyphRun, glyphRunList.origin(), drawingPaint);
+ }
+}
+
+void SkPDFDevice::drawVertices(const SkVertices*, sk_sp<SkBlender>, const SkPaint&, bool) {
+ if (this->hasEmptyClip()) {
+ return;
+ }
+ // TODO: implement drawVertices
+}
+
+#ifdef SK_ENABLE_SKSL
+void SkPDFDevice::drawMesh(const SkMesh&, sk_sp<SkBlender>, const SkPaint&) {
+ if (this->hasEmptyClip()) {
+ return;
+ }
+ // TODO: implement drawMesh
+}
+#endif
+
+void SkPDFDevice::drawFormXObject(SkPDFIndirectReference xObject, SkDynamicMemoryWStream* content) {
+ ScopedOutputMarkedContentTags mark(fNodeId, fDocument, content);
+
+ SkASSERT(xObject);
+ SkPDFWriteResourceName(content, SkPDFResourceType::kXObject,
+ add_resource(fXObjectResources, xObject));
+ content->writeText(" Do\n");
+}
+
+sk_sp<SkSurface> SkPDFDevice::makeSurface(const SkImageInfo& info, const SkSurfaceProps& props) {
+ return SkSurface::MakeRaster(info, &props);
+}
+
+static std::vector<SkPDFIndirectReference> sort(const SkTHashSet<SkPDFIndirectReference>& src) {
+ std::vector<SkPDFIndirectReference> dst;
+ dst.reserve(src.count());
+ for (SkPDFIndirectReference ref : src) {
+ dst.push_back(ref);
+ }
+ std::sort(dst.begin(), dst.end(),
+ [](SkPDFIndirectReference a, SkPDFIndirectReference b) { return a.fValue < b.fValue; });
+ return dst;
+}
+
+std::unique_ptr<SkPDFDict> SkPDFDevice::makeResourceDict() {
+ return SkPDFMakeResourceDict(sort(fGraphicStateResources),
+ sort(fShaderResources),
+ sort(fXObjectResources),
+ sort(fFontResources));
+}
+
+std::unique_ptr<SkStreamAsset> SkPDFDevice::content() {
+ if (fActiveStackState.fContentStream) {
+ fActiveStackState.drainStack();
+ fActiveStackState = SkPDFGraphicStackState();
+ }
+ if (fContent.bytesWritten() == 0) {
+ return std::make_unique<SkMemoryStream>();
+ }
+ SkDynamicMemoryWStream buffer;
+ if (fInitialTransform.getType() != SkMatrix::kIdentity_Mask) {
+ SkPDFUtils::AppendTransform(fInitialTransform, &buffer);
+ }
+ if (fNeedsExtraSave) {
+ buffer.writeText("q\n");
+ }
+ fContent.writeToAndReset(&buffer);
+ if (fNeedsExtraSave) {
+ buffer.writeText("Q\n");
+ }
+ fNeedsExtraSave = false;
+ return std::unique_ptr<SkStreamAsset>(buffer.detachAsStream());
+}
+
+/* Draws an inverse filled path by using Path Ops to compute the positive
+ * inverse using the current clip as the inverse bounds.
+ * Return true if this was an inverse path and was properly handled,
+ * otherwise returns false and the normal drawing routine should continue,
+ * either as a (incorrect) fallback or because the path was not inverse
+ * in the first place.
+ */
+bool SkPDFDevice::handleInversePath(const SkPath& origPath,
+ const SkPaint& paint,
+ bool pathIsMutable) {
+ if (!origPath.isInverseFillType()) {
+ return false;
+ }
+
+ if (this->hasEmptyClip()) {
+ return false;
+ }
+
+ SkPath modifiedPath;
+ SkPath* pathPtr = const_cast<SkPath*>(&origPath);
+ SkPaint noInversePaint(paint);
+
+ // Merge stroking operations into final path.
+ if (SkPaint::kStroke_Style == paint.getStyle() ||
+ SkPaint::kStrokeAndFill_Style == paint.getStyle()) {
+ bool doFillPath = skpathutils::FillPathWithPaint(origPath, paint, &modifiedPath);
+ if (doFillPath) {
+ noInversePaint.setStyle(SkPaint::kFill_Style);
+ noInversePaint.setStrokeWidth(0);
+ pathPtr = &modifiedPath;
+ } else {
+ // To be consistent with the raster output, hairline strokes
+ // are rendered as non-inverted.
+ modifiedPath.toggleInverseFillType();
+ this->internalDrawPath(this->cs(), this->localToDevice(), modifiedPath, paint, true);
+ return true;
+ }
+ }
+
+ // Get bounds of clip in current transform space
+ // (clip bounds are given in device space).
+ SkMatrix transformInverse;
+ SkMatrix totalMatrix = this->localToDevice();
+
+ if (!totalMatrix.invert(&transformInverse)) {
+ return false;
+ }
+ SkRect bounds = this->cs().bounds(this->bounds());
+ transformInverse.mapRect(&bounds);
+
+ // Extend the bounds by the line width (plus some padding)
+ // so the edge doesn't cause a visible stroke.
+ bounds.outset(paint.getStrokeWidth() + SK_Scalar1,
+ paint.getStrokeWidth() + SK_Scalar1);
+
+ if (!calculate_inverse_path(bounds, *pathPtr, &modifiedPath)) {
+ return false;
+ }
+
+ this->internalDrawPath(this->cs(), this->localToDevice(), modifiedPath, noInversePaint, true);
+ return true;
+}
+
+SkPDFIndirectReference SkPDFDevice::makeFormXObjectFromDevice(SkIRect bounds, bool alpha) {
+ SkMatrix inverseTransform = SkMatrix::I();
+ if (!fInitialTransform.isIdentity()) {
+ if (!fInitialTransform.invert(&inverseTransform)) {
+ SkDEBUGFAIL("Layer initial transform should be invertible.");
+ inverseTransform.reset();
+ }
+ }
+ const char* colorSpace = alpha ? "DeviceGray" : nullptr;
+
+ SkPDFIndirectReference xobject =
+ SkPDFMakeFormXObject(fDocument, this->content(),
+ SkPDFMakeArray(bounds.left(), bounds.top(),
+ bounds.right(), bounds.bottom()),
+ this->makeResourceDict(), inverseTransform, colorSpace);
+ // We always draw the form xobjects that we create back into the device, so
+ // we simply preserve the font usage instead of pulling it out and merging
+ // it back in later.
+ this->reset();
+ return xobject;
+}
+
+SkPDFIndirectReference SkPDFDevice::makeFormXObjectFromDevice(bool alpha) {
+ return this->makeFormXObjectFromDevice(SkIRect{0, 0, this->width(), this->height()}, alpha);
+}
+
+void SkPDFDevice::drawFormXObjectWithMask(SkPDFIndirectReference xObject,
+ SkPDFIndirectReference sMask,
+ SkBlendMode mode,
+ bool invertClip) {
+ SkASSERT(sMask);
+ SkPaint paint;
+ paint.setBlendMode(mode);
+ ScopedContentEntry content(this, nullptr, SkMatrix::I(), paint);
+ if (!content) {
+ return;
+ }
+ this->setGraphicState(SkPDFGraphicState::GetSMaskGraphicState(
+ sMask, invertClip, SkPDFGraphicState::kAlpha_SMaskMode,
+ fDocument), content.stream());
+ this->drawFormXObject(xObject, content.stream());
+ this->clearMaskOnGraphicState(content.stream());
+}
+
+
+static bool treat_as_regular_pdf_blend_mode(SkBlendMode blendMode) {
+ return nullptr != SkPDFUtils::BlendModeName(blendMode);
+}
+
+static void populate_graphic_state_entry_from_paint(
+ SkPDFDocument* doc,
+ const SkMatrix& matrix,
+ const SkClipStack* clipStack,
+ SkIRect deviceBounds,
+ const SkPaint& paint,
+ const SkMatrix& initialTransform,
+ SkScalar textScale,
+ SkPDFGraphicStackState::Entry* entry,
+ SkTHashSet<SkPDFIndirectReference>* shaderResources,
+ SkTHashSet<SkPDFIndirectReference>* graphicStateResources) {
+ NOT_IMPLEMENTED(paint.getPathEffect() != nullptr, false);
+ NOT_IMPLEMENTED(paint.getMaskFilter() != nullptr, false);
+ NOT_IMPLEMENTED(paint.getColorFilter() != nullptr, false);
+
+ entry->fMatrix = matrix;
+ entry->fClipStackGenID = clipStack ? clipStack->getTopmostGenID()
+ : SkClipStack::kWideOpenGenID;
+ SkColor4f color = paint.getColor4f();
+ entry->fColor = {color.fR, color.fG, color.fB, 1};
+ entry->fShaderIndex = -1;
+
+ // PDF treats a shader as a color, so we only set one or the other.
+ SkShader* shader = paint.getShader();
+ if (shader) {
+ // note: we always present the alpha as 1 for the shader, knowing that it will be
+ // accounted for when we create our newGraphicsState (below)
+ if (as_SB(shader)->asGradient() == SkShaderBase::GradientType::kColor) {
+ // We don't have to set a shader just for a color.
+ SkShaderBase::GradientInfo gradientInfo;
+ SkColor gradientColor = SK_ColorBLACK;
+ gradientInfo.fColors = &gradientColor;
+ gradientInfo.fColorOffsets = nullptr;
+ gradientInfo.fColorCount = 1;
+ SkAssertResult(as_SB(shader)->asGradient(&gradientInfo) ==
+ SkShaderBase::GradientType::kColor);
+ color = SkColor4f::FromColor(gradientColor);
+ entry->fColor ={color.fR, color.fG, color.fB, 1};
+
+ } else {
+ // PDF positions patterns relative to the initial transform, so
+ // we need to apply the current transform to the shader parameters.
+ SkMatrix transform = matrix;
+ transform.postConcat(initialTransform);
+
+ // PDF doesn't support kClamp_TileMode, so we simulate it by making
+ // a pattern the size of the current clip.
+ SkRect clipStackBounds = clipStack ? clipStack->bounds(deviceBounds)
+ : SkRect::Make(deviceBounds);
+
+ // We need to apply the initial transform to bounds in order to get
+ // bounds in a consistent coordinate system.
+ initialTransform.mapRect(&clipStackBounds);
+ SkIRect bounds;
+ clipStackBounds.roundOut(&bounds);
+
+ auto c = paint.getColor4f();
+ SkPDFIndirectReference pdfShader = SkPDFMakeShader(doc, shader, transform, bounds,
+ {c.fR, c.fG, c.fB, 1.0f});
+
+ if (pdfShader) {
+ // pdfShader has been canonicalized so we can directly compare pointers.
+ entry->fShaderIndex = add_resource(*shaderResources, pdfShader);
+ }
+ }
+ }
+
+ SkPDFIndirectReference newGraphicState;
+ if (color == paint.getColor4f()) {
+ newGraphicState = SkPDFGraphicState::GetGraphicStateForPaint(doc, paint);
+ } else {
+ SkPaint newPaint = paint;
+ newPaint.setColor4f(color, nullptr);
+ newGraphicState = SkPDFGraphicState::GetGraphicStateForPaint(doc, newPaint);
+ }
+ entry->fGraphicStateIndex = add_resource(*graphicStateResources, newGraphicState);
+ entry->fTextScaleX = textScale;
+}
+
+SkDynamicMemoryWStream* SkPDFDevice::setUpContentEntry(const SkClipStack* clipStack,
+ const SkMatrix& matrix,
+ const SkPaint& paint,
+ SkScalar textScale,
+ SkPDFIndirectReference* dst) {
+ SkASSERT(!*dst);
+ SkBlendMode blendMode = paint.getBlendMode_or(SkBlendMode::kSrcOver);
+
+ // Dst xfer mode doesn't draw source at all.
+ if (blendMode == SkBlendMode::kDst) {
+ return nullptr;
+ }
+
+ // For the following modes, we want to handle source and destination
+ // separately, so make an object of what's already there.
+ if (!treat_as_regular_pdf_blend_mode(blendMode) && blendMode != SkBlendMode::kDstOver) {
+ if (!isContentEmpty()) {
+ *dst = this->makeFormXObjectFromDevice();
+ SkASSERT(isContentEmpty());
+ } else if (blendMode != SkBlendMode::kSrc &&
+ blendMode != SkBlendMode::kSrcOut) {
+ // Except for Src and SrcOut, if there isn't anything already there,
+ // then we're done.
+ return nullptr;
+ }
+ }
+ // TODO(vandebo): Figure out how/if we can handle the following modes:
+ // Xor, Plus. For now, we treat them as SrcOver/Normal.
+
+ if (treat_as_regular_pdf_blend_mode(blendMode)) {
+ if (!fActiveStackState.fContentStream) {
+ if (fContent.bytesWritten() != 0) {
+ fContent.writeText("Q\nq\n");
+ fNeedsExtraSave = true;
+ }
+ fActiveStackState = SkPDFGraphicStackState(&fContent);
+ } else {
+ SkASSERT(fActiveStackState.fContentStream = &fContent);
+ }
+ } else {
+ fActiveStackState.drainStack();
+ fActiveStackState = SkPDFGraphicStackState(&fContentBuffer);
+ }
+ SkASSERT(fActiveStackState.fContentStream);
+ SkPDFGraphicStackState::Entry entry;
+ populate_graphic_state_entry_from_paint(
+ fDocument,
+ matrix,
+ clipStack,
+ this->bounds(),
+ paint,
+ fInitialTransform,
+ textScale,
+ &entry,
+ &fShaderResources,
+ &fGraphicStateResources);
+ fActiveStackState.updateClip(clipStack, this->bounds());
+ fActiveStackState.updateMatrix(entry.fMatrix);
+ fActiveStackState.updateDrawingState(entry);
+
+ return fActiveStackState.fContentStream;
+}
+
+void SkPDFDevice::finishContentEntry(const SkClipStack* clipStack,
+ SkBlendMode blendMode,
+ SkPDFIndirectReference dst,
+ SkPath* shape) {
+ SkASSERT(blendMode != SkBlendMode::kDst);
+ if (treat_as_regular_pdf_blend_mode(blendMode)) {
+ SkASSERT(!dst);
+ return;
+ }
+
+ SkASSERT(fActiveStackState.fContentStream);
+
+ fActiveStackState.drainStack();
+ fActiveStackState = SkPDFGraphicStackState();
+
+ if (blendMode == SkBlendMode::kDstOver) {
+ SkASSERT(!dst);
+ if (fContentBuffer.bytesWritten() != 0) {
+ if (fContent.bytesWritten() != 0) {
+ fContentBuffer.writeText("Q\nq\n");
+ fNeedsExtraSave = true;
+ }
+ fContentBuffer.prependToAndReset(&fContent);
+ SkASSERT(fContentBuffer.bytesWritten() == 0);
+ }
+ return;
+ }
+ if (fContentBuffer.bytesWritten() != 0) {
+ if (fContent.bytesWritten() != 0) {
+ fContent.writeText("Q\nq\n");
+ fNeedsExtraSave = true;
+ }
+ fContentBuffer.writeToAndReset(&fContent);
+ SkASSERT(fContentBuffer.bytesWritten() == 0);
+ }
+
+ if (!dst) {
+ SkASSERT(blendMode == SkBlendMode::kSrc ||
+ blendMode == SkBlendMode::kSrcOut);
+ return;
+ }
+
+ SkASSERT(dst);
+ // Changing the current content into a form-xobject will destroy the clip
+ // objects which is fine since the xobject will already be clipped. However
+ // if source has shape, we need to clip it too, so a copy of the clip is
+ // saved.
+
+ SkPaint stockPaint;
+
+ SkPDFIndirectReference srcFormXObject;
+ if (this->isContentEmpty()) {
+ // If nothing was drawn and there's no shape, then the draw was a
+ // no-op, but dst needs to be restored for that to be true.
+ // If there is shape, then an empty source with Src, SrcIn, SrcOut,
+ // DstIn, DstAtop or Modulate reduces to Clear and DstOut or SrcAtop
+ // reduces to Dst.
+ if (shape == nullptr || blendMode == SkBlendMode::kDstOut ||
+ blendMode == SkBlendMode::kSrcATop) {
+ ScopedContentEntry content(this, nullptr, SkMatrix::I(), stockPaint);
+ this->drawFormXObject(dst, content.stream());
+ return;
+ } else {
+ blendMode = SkBlendMode::kClear;
+ }
+ } else {
+ srcFormXObject = this->makeFormXObjectFromDevice();
+ }
+
+ // TODO(vandebo) srcFormXObject may contain alpha, but here we want it
+ // without alpha.
+ if (blendMode == SkBlendMode::kSrcATop) {
+ // TODO(vandebo): In order to properly support SrcATop we have to track
+ // the shape of what's been drawn at all times. It's the intersection of
+ // the non-transparent parts of the device and the outlines (shape) of
+ // all images and devices drawn.
+ this->drawFormXObjectWithMask(srcFormXObject, dst, SkBlendMode::kSrcOver, true);
+ } else {
+ if (shape != nullptr) {
+ // Draw shape into a form-xobject.
+ SkPaint filledPaint;
+ filledPaint.setColor(SK_ColorBLACK);
+ filledPaint.setStyle(SkPaint::kFill_Style);
+ SkClipStack empty;
+ SkPDFDevice shapeDev(this->size(), fDocument, fInitialTransform);
+ shapeDev.internalDrawPath(clipStack ? *clipStack : empty,
+ SkMatrix::I(), *shape, filledPaint, true);
+ this->drawFormXObjectWithMask(dst, shapeDev.makeFormXObjectFromDevice(),
+ SkBlendMode::kSrcOver, true);
+ } else {
+ this->drawFormXObjectWithMask(dst, srcFormXObject, SkBlendMode::kSrcOver, true);
+ }
+ }
+
+ if (blendMode == SkBlendMode::kClear) {
+ return;
+ } else if (blendMode == SkBlendMode::kSrc ||
+ blendMode == SkBlendMode::kDstATop) {
+ ScopedContentEntry content(this, nullptr, SkMatrix::I(), stockPaint);
+ if (content) {
+ this->drawFormXObject(srcFormXObject, content.stream());
+ }
+ if (blendMode == SkBlendMode::kSrc) {
+ return;
+ }
+ } else if (blendMode == SkBlendMode::kSrcATop) {
+ ScopedContentEntry content(this, nullptr, SkMatrix::I(), stockPaint);
+ if (content) {
+ this->drawFormXObject(dst, content.stream());
+ }
+ }
+
+ SkASSERT(blendMode == SkBlendMode::kSrcIn ||
+ blendMode == SkBlendMode::kDstIn ||
+ blendMode == SkBlendMode::kSrcOut ||
+ blendMode == SkBlendMode::kDstOut ||
+ blendMode == SkBlendMode::kSrcATop ||
+ blendMode == SkBlendMode::kDstATop ||
+ blendMode == SkBlendMode::kModulate);
+
+ if (blendMode == SkBlendMode::kSrcIn ||
+ blendMode == SkBlendMode::kSrcOut ||
+ blendMode == SkBlendMode::kSrcATop) {
+ this->drawFormXObjectWithMask(srcFormXObject, dst, SkBlendMode::kSrcOver,
+ blendMode == SkBlendMode::kSrcOut);
+ return;
+ } else {
+ SkBlendMode mode = SkBlendMode::kSrcOver;
+ if (blendMode == SkBlendMode::kModulate) {
+ this->drawFormXObjectWithMask(srcFormXObject, dst, SkBlendMode::kSrcOver, false);
+ mode = SkBlendMode::kMultiply;
+ }
+ this->drawFormXObjectWithMask(dst, srcFormXObject, mode, blendMode == SkBlendMode::kDstOut);
+ return;
+ }
+}
+
+bool SkPDFDevice::isContentEmpty() {
+ return fContent.bytesWritten() == 0 && fContentBuffer.bytesWritten() == 0;
+}
+
+static SkSize rect_to_size(const SkRect& r) { return {r.width(), r.height()}; }
+
+static sk_sp<SkImage> color_filter(const SkImage* image,
+ SkColorFilter* colorFilter) {
+ auto surface =
+ SkSurface::MakeRaster(SkImageInfo::MakeN32Premul(image->dimensions()));
+ SkASSERT(surface);
+ SkCanvas* canvas = surface->getCanvas();
+ canvas->clear(SK_ColorTRANSPARENT);
+ SkPaint paint;
+ paint.setColorFilter(sk_ref_sp(colorFilter));
+ canvas->drawImage(image, 0, 0, SkSamplingOptions(), &paint);
+ return surface->makeImageSnapshot();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static bool is_integer(SkScalar x) {
+ return x == SkScalarTruncToScalar(x);
+}
+
+static bool is_integral(const SkRect& r) {
+ return is_integer(r.left()) &&
+ is_integer(r.top()) &&
+ is_integer(r.right()) &&
+ is_integer(r.bottom());
+}
+
+void SkPDFDevice::internalDrawImageRect(SkKeyedImage imageSubset,
+ const SkRect* src,
+ const SkRect& dst,
+ const SkSamplingOptions& sampling,
+ const SkPaint& srcPaint,
+ const SkMatrix& ctm) {
+ if (this->hasEmptyClip()) {
+ return;
+ }
+ if (!imageSubset) {
+ return;
+ }
+
+ // First, figure out the src->dst transform and subset the image if needed.
+ SkIRect bounds = imageSubset.image()->bounds();
+ SkRect srcRect = src ? *src : SkRect::Make(bounds);
+ SkMatrix transform = SkMatrix::RectToRect(srcRect, dst);
+ if (src && *src != SkRect::Make(bounds)) {
+ if (!srcRect.intersect(SkRect::Make(bounds))) {
+ return;
+ }
+ srcRect.roundOut(&bounds);
+ transform.preTranslate(SkIntToScalar(bounds.x()),
+ SkIntToScalar(bounds.y()));
+ if (bounds != imageSubset.image()->bounds()) {
+ imageSubset = imageSubset.subset(bounds);
+ }
+ if (!imageSubset) {
+ return;
+ }
+ }
+
+ // If the image is opaque and the paint's alpha is too, replace
+ // kSrc blendmode with kSrcOver. http://crbug.com/473572
+ SkTCopyOnFirstWrite<SkPaint> paint(srcPaint);
+ if (!paint->isSrcOver() &&
+ imageSubset.image()->isOpaque() &&
+ kSrcOver_SkXfermodeInterpretation == SkInterpretXfermode(*paint, false))
+ {
+ paint.writable()->setBlendMode(SkBlendMode::kSrcOver);
+ }
+
+ // Alpha-only images need to get their color from the shader, before
+ // applying the colorfilter.
+ if (imageSubset.image()->isAlphaOnly() && paint->getColorFilter()) {
+ // must blend alpha image and shader before applying colorfilter.
+ auto surface =
+ SkSurface::MakeRaster(SkImageInfo::MakeN32Premul(imageSubset.image()->dimensions()));
+ SkCanvas* canvas = surface->getCanvas();
+ SkPaint tmpPaint;
+ // In the case of alpha images with shaders, the shader's coordinate
+ // system is the image's coordiantes.
+ tmpPaint.setShader(sk_ref_sp(paint->getShader()));
+ tmpPaint.setColor4f(paint->getColor4f(), nullptr);
+ canvas->clear(0x00000000);
+ canvas->drawImage(imageSubset.image().get(), 0, 0, sampling, &tmpPaint);
+ if (paint->getShader() != nullptr) {
+ paint.writable()->setShader(nullptr);
+ }
+ imageSubset = SkKeyedImage(surface->makeImageSnapshot());
+ SkASSERT(!imageSubset.image()->isAlphaOnly());
+ }
+
+ if (imageSubset.image()->isAlphaOnly()) {
+ // The ColorFilter applies to the paint color/shader, not the alpha layer.
+ SkASSERT(nullptr == paint->getColorFilter());
+
+ sk_sp<SkImage> mask = alpha_image_to_greyscale_image(imageSubset.image().get());
+ if (!mask) {
+ return;
+ }
+ // PDF doesn't seem to allow masking vector graphics with an Image XObject.
+ // Must mask with a Form XObject.
+ sk_sp<SkPDFDevice> maskDevice = this->makeCongruentDevice();
+ {
+ SkCanvas canvas(maskDevice);
+ // This clip prevents the mask image shader from covering
+ // entire device if unnecessary.
+ canvas.clipRect(this->cs().bounds(this->bounds()));
+ canvas.concat(ctm);
+ if (paint->getMaskFilter()) {
+ SkPaint tmpPaint;
+ tmpPaint.setShader(mask->makeShader(SkSamplingOptions(), transform));
+ tmpPaint.setMaskFilter(sk_ref_sp(paint->getMaskFilter()));
+ canvas.drawRect(dst, tmpPaint);
+ } else {
+ if (src && !is_integral(*src)) {
+ canvas.clipRect(dst);
+ }
+ canvas.concat(transform);
+ canvas.drawImage(mask, 0, 0);
+ }
+ }
+ SkIRect maskDeviceBounds = maskDevice->cs().bounds(maskDevice->bounds()).roundOut();
+ if (!ctm.isIdentity() && paint->getShader()) {
+ transform_shader(paint.writable(), ctm); // Since we are using identity matrix.
+ }
+ ScopedContentEntry content(this, &this->cs(), SkMatrix::I(), *paint);
+ if (!content) {
+ return;
+ }
+ this->setGraphicState(SkPDFGraphicState::GetSMaskGraphicState(
+ maskDevice->makeFormXObjectFromDevice(maskDeviceBounds, true), false,
+ SkPDFGraphicState::kLuminosity_SMaskMode, fDocument), content.stream());
+ SkPDFUtils::AppendRectangle(SkRect::Make(this->size()), content.stream());
+ SkPDFUtils::PaintPath(SkPaint::kFill_Style, SkPathFillType::kWinding, content.stream());
+ this->clearMaskOnGraphicState(content.stream());
+ return;
+ }
+ if (paint->getMaskFilter()) {
+ paint.writable()->setShader(imageSubset.image()->makeShader(SkSamplingOptions(),
+ transform));
+ SkPath path = SkPath::Rect(dst); // handles non-integral clipping.
+ this->internalDrawPath(this->cs(), this->localToDevice(), path, *paint, true);
+ return;
+ }
+ transform.postConcat(ctm);
+
+ bool needToRestore = false;
+ if (src && !is_integral(*src)) {
+ // Need sub-pixel clipping to fix https://bug.skia.org/4374
+ this->cs().save();
+ this->cs().clipRect(dst, ctm, SkClipOp::kIntersect, true);
+ needToRestore = true;
+ }
+ SK_AT_SCOPE_EXIT(if (needToRestore) { this->cs().restore(); });
+
+ SkMatrix matrix = transform;
+
+ // Rasterize the bitmap using perspective in a new bitmap.
+ if (transform.hasPerspective()) {
+ // Transform the bitmap in the new space, without taking into
+ // account the initial transform.
+ SkRect imageBounds = SkRect::Make(imageSubset.image()->bounds());
+ SkPath perspectiveOutline = SkPath::Rect(imageBounds).makeTransform(transform);
+
+ // Retrieve the bounds of the new shape.
+ SkRect outlineBounds = perspectiveOutline.getBounds();
+ if (!outlineBounds.intersect(SkRect::Make(this->devClipBounds()))) {
+ return;
+ }
+
+ // Transform the bitmap in the new space to the final space, to account for DPI
+ SkRect physicalBounds = fInitialTransform.mapRect(outlineBounds);
+ SkScalar scaleX = physicalBounds.width() / outlineBounds.width();
+ SkScalar scaleY = physicalBounds.height() / outlineBounds.height();
+
+ // TODO(edisonn): A better approach would be to use a bitmap shader
+ // (in clamp mode) and draw a rect over the entire bounding box. Then
+ // intersect perspectiveOutline to the clip. That will avoid introducing
+ // alpha to the image while still giving good behavior at the edge of
+ // the image. Avoiding alpha will reduce the pdf size and generation
+ // CPU time some.
+
+ SkISize wh = rect_to_size(physicalBounds).toCeil();
+
+ auto surface = SkSurface::MakeRaster(SkImageInfo::MakeN32Premul(wh));
+ if (!surface) {
+ return;
+ }
+ SkCanvas* canvas = surface->getCanvas();
+ canvas->clear(SK_ColorTRANSPARENT);
+
+ SkScalar deltaX = outlineBounds.left();
+ SkScalar deltaY = outlineBounds.top();
+
+ SkMatrix offsetMatrix = transform;
+ offsetMatrix.postTranslate(-deltaX, -deltaY);
+ offsetMatrix.postScale(scaleX, scaleY);
+
+ // Translate the draw in the new canvas, so we perfectly fit the
+ // shape in the bitmap.
+ canvas->setMatrix(offsetMatrix);
+ canvas->drawImage(imageSubset.image(), 0, 0);
+ // Make sure the final bits are in the bitmap.
+ surface->flushAndSubmit();
+
+ // In the new space, we use the identity matrix translated
+ // and scaled to reflect DPI.
+ matrix.setScale(1 / scaleX, 1 / scaleY);
+ matrix.postTranslate(deltaX, deltaY);
+
+ imageSubset = SkKeyedImage(surface->makeImageSnapshot());
+ if (!imageSubset) {
+ return;
+ }
+ }
+
+ SkMatrix scaled;
+ // Adjust for origin flip.
+ scaled.setScale(SK_Scalar1, -SK_Scalar1);
+ scaled.postTranslate(0, SK_Scalar1);
+ // Scale the image up from 1x1 to WxH.
+ SkIRect subset = imageSubset.image()->bounds();
+ scaled.postScale(SkIntToScalar(subset.width()),
+ SkIntToScalar(subset.height()));
+ scaled.postConcat(matrix);
+ ScopedContentEntry content(this, &this->cs(), scaled, *paint);
+ if (!content) {
+ return;
+ }
+ if (content.needShape()) {
+ SkPath shape = SkPath::Rect(SkRect::Make(subset)).makeTransform(matrix);
+ content.setShape(shape);
+ }
+ if (!content.needSource()) {
+ return;
+ }
+
+ if (SkColorFilter* colorFilter = paint->getColorFilter()) {
+ sk_sp<SkImage> img = color_filter(imageSubset.image().get(), colorFilter);
+ imageSubset = SkKeyedImage(std::move(img));
+ if (!imageSubset) {
+ return;
+ }
+ // TODO(halcanary): de-dupe this by caching filtered images.
+ // (maybe in the resource cache?)
+ }
+
+ SkBitmapKey key = imageSubset.key();
+ SkPDFIndirectReference* pdfimagePtr = fDocument->fPDFBitmapMap.find(key);
+ SkPDFIndirectReference pdfimage = pdfimagePtr ? *pdfimagePtr : SkPDFIndirectReference();
+ if (!pdfimagePtr) {
+ SkASSERT(imageSubset);
+ pdfimage = SkPDFSerializeImage(imageSubset.image().get(), fDocument,
+ fDocument->metadata().fEncodingQuality);
+ SkASSERT((key != SkBitmapKey{{0, 0, 0, 0}, 0}));
+ fDocument->fPDFBitmapMap.set(key, pdfimage);
+ }
+ SkASSERT(pdfimage != SkPDFIndirectReference());
+ this->drawFormXObject(pdfimage, content.stream());
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+
+void SkPDFDevice::drawDevice(SkBaseDevice* device, const SkSamplingOptions& sampling,
+ const SkPaint& paint) {
+ SkASSERT(!paint.getImageFilter());
+ SkASSERT(!paint.getMaskFilter());
+
+ // Check if the source device is really a bitmapdevice (because that's what we returned
+ // from createDevice (an image filter would go through drawSpecial, but createDevice uses
+ // a raster device to apply color filters, too).
+ SkPixmap pmap;
+ if (device->peekPixels(&pmap)) {
+ this->INHERITED::drawDevice(device, sampling, paint);
+ return;
+ }
+
+ // our onCreateCompatibleDevice() always creates SkPDFDevice subclasses.
+ SkPDFDevice* pdfDevice = static_cast<SkPDFDevice*>(device);
+
+ if (pdfDevice->isContentEmpty()) {
+ return;
+ }
+
+ SkMatrix matrix = device->getRelativeTransform(*this);
+ ScopedContentEntry content(this, &this->cs(), matrix, paint);
+ if (!content) {
+ return;
+ }
+ if (content.needShape()) {
+ SkPath shape = SkPath::Rect(SkRect::Make(device->imageInfo().dimensions()));
+ shape.transform(matrix);
+ content.setShape(shape);
+ }
+ if (!content.needSource()) {
+ return;
+ }
+ this->drawFormXObject(pdfDevice->makeFormXObjectFromDevice(), content.stream());
+}
+
+void SkPDFDevice::drawSpecial(SkSpecialImage* srcImg, const SkMatrix& localToDevice,
+ const SkSamplingOptions& sampling, const SkPaint& paint) {
+ if (this->hasEmptyClip()) {
+ return;
+ }
+ SkASSERT(!srcImg->isTextureBacked());
+ SkASSERT(!paint.getMaskFilter() && !paint.getImageFilter());
+
+ SkBitmap resultBM;
+ if (srcImg->getROPixels(&resultBM)) {
+ auto r = SkRect::MakeWH(resultBM.width(), resultBM.height());
+ this->internalDrawImageRect(SkKeyedImage(resultBM), nullptr, r, sampling, paint,
+ localToDevice);
+ }
+}
+
+sk_sp<SkSpecialImage> SkPDFDevice::makeSpecial(const SkBitmap& bitmap) {
+ return SkSpecialImage::MakeFromRaster(bitmap.bounds(), bitmap, this->surfaceProps());
+}
+
+sk_sp<SkSpecialImage> SkPDFDevice::makeSpecial(const SkImage* image) {
+ return SkSpecialImage::MakeFromImage(nullptr, image->bounds(), image->makeNonTextureImage(),
+ this->surfaceProps());
+}
+
+SkImageFilterCache* SkPDFDevice::getImageFilterCache() {
+ // We always return a transient cache, so it is freed after each
+ // filter traversal.
+ return SkImageFilterCache::Create(SkImageFilterCache::kDefaultTransientSize);
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFDevice.h b/gfx/skia/skia/src/pdf/SkPDFDevice.h
new file mode 100644
index 0000000000..28ca1ca919
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFDevice.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPDFDevice_DEFINED
+#define SkPDFDevice_DEFINED
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkData.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkStream.h"
+#include "src/core/SkClipStack.h"
+#include "src/core/SkClipStackDevice.h"
+#include "src/core/SkTHash.h"
+#include "src/core/SkTextBlobPriv.h"
+#include "src/pdf/SkKeyedImage.h"
+#include "src/pdf/SkPDFGraphicStackState.h"
+#include "src/pdf/SkPDFTypes.h"
+
+#include <vector>
+
+namespace sktext {
+class GlyphRun;
+class GlyphRunList;
+}
+
+class SkKeyedImage;
+class SkPDFArray;
+class SkPDFDevice;
+class SkPDFDict;
+class SkPDFDocument;
+class SkPDFFont;
+class SkPDFObject;
+class SkPath;
+class SkRRect;
+struct SkPDFIndirectReference;
+
+/**
+ * \class SkPDFDevice
+ *
+ * An SkPDFDevice is the drawing context for a page or layer of PDF
+ * content.
+ */
+class SkPDFDevice final : public SkClipStackDevice {
+public:
+ /**
+ * @param pageSize Page size in point units.
+ * 1 point == 127/360 mm == 1/72 inch
+ * @param document A non-null pointer back to the
+ * PDFDocument object. The document is responsible for
+ * de-duplicating across pages (via the SkPDFDocument) and
+ * for early serializing of large immutable objects, such
+ * as images (via SkPDFDocument::serialize()).
+ * @param initialTransform Transform to be applied to the entire page.
+ */
+ SkPDFDevice(SkISize pageSize, SkPDFDocument* document,
+ const SkMatrix& initialTransform = SkMatrix::I());
+
+ sk_sp<SkPDFDevice> makeCongruentDevice() {
+ return sk_make_sp<SkPDFDevice>(this->size(), fDocument);
+ }
+
+ ~SkPDFDevice() override;
+
+ /**
+ * These are called inside the per-device-layer loop for each draw call.
+ * When these are called, we have already applied any saveLayer
+ * operations, and are handling any looping from the paint.
+ */
+ void drawPaint(const SkPaint& paint) override;
+ void drawPoints(SkCanvas::PointMode mode,
+ size_t count, const SkPoint[],
+ const SkPaint& paint) override;
+ void drawRect(const SkRect& r, const SkPaint& paint) override;
+ void drawOval(const SkRect& oval, const SkPaint& paint) override;
+ void drawRRect(const SkRRect& rr, const SkPaint& paint) override;
+ void drawPath(const SkPath& origpath, const SkPaint& paint, bool pathIsMutable) override;
+
+ void drawImageRect(const SkImage*,
+ const SkRect* src,
+ const SkRect& dst,
+ const SkSamplingOptions&,
+ const SkPaint&,
+ SkCanvas::SrcRectConstraint) override;
+ void onDrawGlyphRunList(SkCanvas*,
+ const sktext::GlyphRunList&,
+ const SkPaint& initialPaint,
+ const SkPaint& drawingPaint) override;
+ void drawVertices(const SkVertices*, sk_sp<SkBlender>, const SkPaint&, bool) override;
+#ifdef SK_ENABLE_SKSL
+ void drawMesh(const SkMesh&, sk_sp<SkBlender>, const SkPaint&) override;
+#endif
+
+ // PDF specific methods.
+ void drawSprite(const SkBitmap& bitmap, int x, int y,
+ const SkPaint& paint);
+
+ /** Create the resource dictionary for this device. Destructive. */
+ std::unique_ptr<SkPDFDict> makeResourceDict();
+
+ /** Returns a SkStream with the page contents.
+ */
+ std::unique_ptr<SkStreamAsset> content();
+
+ SkISize size() const { return this->imageInfo().dimensions(); }
+ SkIRect bounds() const { return this->imageInfo().bounds(); }
+
+ const SkMatrix& initialTransform() const { return fInitialTransform; }
+
+protected:
+ sk_sp<SkSurface> makeSurface(const SkImageInfo&, const SkSurfaceProps&) override;
+
+ void drawAnnotation(const SkRect&, const char key[], SkData* value) override;
+
+ void drawDevice(SkBaseDevice*, const SkSamplingOptions&, const SkPaint&) override;
+ void drawSpecial(SkSpecialImage*, const SkMatrix&, const SkSamplingOptions&,
+ const SkPaint&) override;
+
+ sk_sp<SkSpecialImage> makeSpecial(const SkBitmap&) override;
+ sk_sp<SkSpecialImage> makeSpecial(const SkImage*) override;
+ SkImageFilterCache* getImageFilterCache() override;
+
+private:
+ // TODO(vandebo): push most of SkPDFDevice's state into a core object in
+ // order to get the right access levels without using friend.
+ friend class ScopedContentEntry;
+
+ SkMatrix fInitialTransform;
+
+ SkTHashSet<SkPDFIndirectReference> fGraphicStateResources;
+ SkTHashSet<SkPDFIndirectReference> fXObjectResources;
+ SkTHashSet<SkPDFIndirectReference> fShaderResources;
+ SkTHashSet<SkPDFIndirectReference> fFontResources;
+ int fNodeId;
+
+ SkDynamicMemoryWStream fContent;
+ SkDynamicMemoryWStream fContentBuffer;
+ bool fNeedsExtraSave = false;
+ SkPDFGraphicStackState fActiveStackState;
+ SkPDFDocument* fDocument;
+
+ ////////////////////////////////////////////////////////////////////////////
+
+ SkBaseDevice* onCreateDevice(const CreateInfo&, const SkPaint*) override;
+
+ // Set alpha to true if making a transparency group form x-objects.
+ SkPDFIndirectReference makeFormXObjectFromDevice(bool alpha = false);
+ SkPDFIndirectReference makeFormXObjectFromDevice(SkIRect bbox, bool alpha = false);
+
+ void drawFormXObjectWithMask(SkPDFIndirectReference xObject,
+ SkPDFIndirectReference sMask,
+ SkBlendMode,
+ bool invertClip);
+
+ // If the paint or clip is such that we shouldn't draw anything, this
+ // returns nullptr and does not create a content entry.
+ // setUpContentEntry and finishContentEntry can be used directly, but
+ // the preferred method is to use the ScopedContentEntry helper class.
+ SkDynamicMemoryWStream* setUpContentEntry(const SkClipStack* clipStack,
+ const SkMatrix& matrix,
+ const SkPaint& paint,
+ SkScalar,
+ SkPDFIndirectReference* dst);
+ void finishContentEntry(const SkClipStack*, SkBlendMode, SkPDFIndirectReference, SkPath*);
+ bool isContentEmpty();
+
+ void internalDrawGlyphRun(
+ const sktext::GlyphRun& glyphRun, SkPoint offset, const SkPaint& runPaint);
+ void drawGlyphRunAsPath(
+ const sktext::GlyphRun& glyphRun, SkPoint offset, const SkPaint& runPaint);
+
+ void internalDrawImageRect(SkKeyedImage,
+ const SkRect* src,
+ const SkRect& dst,
+ const SkSamplingOptions&,
+ const SkPaint&,
+ const SkMatrix& canvasTransformationMatrix);
+
+ void internalDrawPath(const SkClipStack&,
+ const SkMatrix&,
+ const SkPath&,
+ const SkPaint&,
+ bool pathIsMutable);
+
+ void internalDrawPathWithFilter(const SkClipStack& clipStack,
+ const SkMatrix& ctm,
+ const SkPath& origPath,
+ const SkPaint& paint);
+
+ bool handleInversePath(const SkPath& origPath, const SkPaint& paint, bool pathIsMutable);
+
+ void clearMaskOnGraphicState(SkDynamicMemoryWStream*);
+ void setGraphicState(SkPDFIndirectReference gs, SkDynamicMemoryWStream*);
+ void drawFormXObject(SkPDFIndirectReference xObject, SkDynamicMemoryWStream*);
+
+ bool hasEmptyClip() const { return this->cs().isEmpty(this->bounds()); }
+
+ void reset();
+
+ using INHERITED = SkClipStackDevice;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFDocument.cpp b/gfx/skia/skia/src/pdf/SkPDFDocument.cpp
new file mode 100644
index 0000000000..3946f6054a
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFDocument.cpp
@@ -0,0 +1,641 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/docs/SkPDFDocument.h"
+#include "src/pdf/SkPDFDocumentPriv.h"
+
+#include "include/core/SkStream.h"
+#include "include/docs/SkPDFDocument.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkUTF.h"
+#include "src/pdf/SkPDFDevice.h"
+#include "src/pdf/SkPDFFont.h"
+#include "src/pdf/SkPDFGradientShader.h"
+#include "src/pdf/SkPDFGraphicState.h"
+#include "src/pdf/SkPDFShader.h"
+#include "src/pdf/SkPDFTag.h"
+#include "src/pdf/SkPDFUtils.h"
+
+#include <utility>
+
+// For use in SkCanvas::drawAnnotation
+const char* SkPDFGetNodeIdKey() {
+ static constexpr char key[] = "PDF_Node_Key";
+ return key;
+}
+
+static SkString ToValidUtf8String(const SkData& d) {
+ if (d.size() == 0) {
+ SkDEBUGFAIL("Not a valid string, data length is zero.");
+ return SkString();
+ }
+
+ const char* c_str = static_cast<const char*>(d.data());
+ if (c_str[d.size() - 1] != 0) {
+ SkDEBUGFAIL("Not a valid string, not null-terminated.");
+ return SkString();
+ }
+
+ // CountUTF8 returns -1 if there's an invalid UTF-8 byte sequence.
+ int valid_utf8_chars_count = SkUTF::CountUTF8(c_str, d.size() - 1);
+ if (valid_utf8_chars_count == -1) {
+ SkDEBUGFAIL("Not a valid UTF-8 string.");
+ return SkString();
+ }
+
+ return SkString(c_str, d.size() - 1);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void SkPDFOffsetMap::markStartOfDocument(const SkWStream* s) { fBaseOffset = s->bytesWritten(); }
+
+static size_t difference(size_t minuend, size_t subtrahend) {
+ return SkASSERT(minuend >= subtrahend), minuend - subtrahend;
+}
+
+void SkPDFOffsetMap::markStartOfObject(int referenceNumber, const SkWStream* s) {
+ SkASSERT(referenceNumber > 0);
+ size_t index = SkToSizeT(referenceNumber - 1);
+ if (index >= fOffsets.size()) {
+ fOffsets.resize(index + 1);
+ }
+ fOffsets[index] = SkToInt(difference(s->bytesWritten(), fBaseOffset));
+}
+
+int SkPDFOffsetMap::objectCount() const {
+ return SkToInt(fOffsets.size() + 1); // Include the special zeroth object in the count.
+}
+
+int SkPDFOffsetMap::emitCrossReferenceTable(SkWStream* s) const {
+ int xRefFileOffset = SkToInt(difference(s->bytesWritten(), fBaseOffset));
+ s->writeText("xref\n0 ");
+ s->writeDecAsText(this->objectCount());
+ s->writeText("\n0000000000 65535 f \n");
+ for (int offset : fOffsets) {
+ SkASSERT(offset > 0); // Offset was set.
+ s->writeBigDecAsText(offset, 10);
+ s->writeText(" 00000 n \n");
+ }
+ return xRefFileOffset;
+}
+//
+////////////////////////////////////////////////////////////////////////////////
+
+#define SKPDF_MAGIC "\xD3\xEB\xE9\xE1"
+#ifndef SK_BUILD_FOR_WIN
+static_assert((SKPDF_MAGIC[0] & 0x7F) == "Skia"[0], "");
+static_assert((SKPDF_MAGIC[1] & 0x7F) == "Skia"[1], "");
+static_assert((SKPDF_MAGIC[2] & 0x7F) == "Skia"[2], "");
+static_assert((SKPDF_MAGIC[3] & 0x7F) == "Skia"[3], "");
+#endif
+static void serializeHeader(SkPDFOffsetMap* offsetMap, SkWStream* wStream) {
+ offsetMap->markStartOfDocument(wStream);
+ wStream->writeText("%PDF-1.4\n%" SKPDF_MAGIC "\n");
+ // The PDF spec recommends including a comment with four
+ // bytes, all with their high bits set. "\xD3\xEB\xE9\xE1" is
+ // "Skia" with the high bits set.
+}
+#undef SKPDF_MAGIC
+
+static void begin_indirect_object(SkPDFOffsetMap* offsetMap,
+ SkPDFIndirectReference ref,
+ SkWStream* s) {
+ offsetMap->markStartOfObject(ref.fValue, s);
+ s->writeDecAsText(ref.fValue);
+ s->writeText(" 0 obj\n"); // Generation number is always 0.
+}
+
+static void end_indirect_object(SkWStream* s) { s->writeText("\nendobj\n"); }
+
+// Xref table and footer
+static void serialize_footer(const SkPDFOffsetMap& offsetMap,
+ SkWStream* wStream,
+ SkPDFIndirectReference infoDict,
+ SkPDFIndirectReference docCatalog,
+ SkUUID uuid) {
+ int xRefFileOffset = offsetMap.emitCrossReferenceTable(wStream);
+ SkPDFDict trailerDict;
+ trailerDict.insertInt("Size", offsetMap.objectCount());
+ SkASSERT(docCatalog != SkPDFIndirectReference());
+ trailerDict.insertRef("Root", docCatalog);
+ SkASSERT(infoDict != SkPDFIndirectReference());
+ trailerDict.insertRef("Info", infoDict);
+ if (SkUUID() != uuid) {
+ trailerDict.insertObject("ID", SkPDFMetadata::MakePdfId(uuid, uuid));
+ }
+ wStream->writeText("trailer\n");
+ trailerDict.emitObject(wStream);
+ wStream->writeText("\nstartxref\n");
+ wStream->writeBigDecAsText(xRefFileOffset);
+ wStream->writeText("\n%%EOF");
+}
+
+static SkPDFIndirectReference generate_page_tree(
+ SkPDFDocument* doc,
+ std::vector<std::unique_ptr<SkPDFDict>> pages,
+ const std::vector<SkPDFIndirectReference>& pageRefs) {
+ // PDF wants a tree describing all the pages in the document. We arbitrary
+ // choose 8 (kNodeSize) as the number of allowed children. The internal
+ // nodes have type "Pages" with an array of children, a parent pointer, and
+ // the number of leaves below the node as "Count." The leaves are passed
+ // into the method, have type "Page" and need a parent pointer. This method
+ // builds the tree bottom up, skipping internal nodes that would have only
+ // one child.
+ SkASSERT(pages.size() > 0);
+ struct PageTreeNode {
+ std::unique_ptr<SkPDFDict> fNode;
+ SkPDFIndirectReference fReservedRef;
+ int fPageObjectDescendantCount;
+
+ static std::vector<PageTreeNode> Layer(std::vector<PageTreeNode> vec, SkPDFDocument* doc) {
+ std::vector<PageTreeNode> result;
+ static constexpr size_t kMaxNodeSize = 8;
+ const size_t n = vec.size();
+ SkASSERT(n >= 1);
+ const size_t result_len = (n - 1) / kMaxNodeSize + 1;
+ SkASSERT(result_len >= 1);
+ SkASSERT(n == 1 || result_len < n);
+ result.reserve(result_len);
+ size_t index = 0;
+ for (size_t i = 0; i < result_len; ++i) {
+ if (n != 1 && index + 1 == n) { // No need to create a new node.
+ result.push_back(std::move(vec[index++]));
+ continue;
+ }
+ SkPDFIndirectReference parent = doc->reserveRef();
+ auto kids_list = SkPDFMakeArray();
+ int descendantCount = 0;
+ for (size_t j = 0; j < kMaxNodeSize && index < n; ++j) {
+ PageTreeNode& node = vec[index++];
+ node.fNode->insertRef("Parent", parent);
+ kids_list->appendRef(doc->emit(*node.fNode, node.fReservedRef));
+ descendantCount += node.fPageObjectDescendantCount;
+ }
+ auto next = SkPDFMakeDict("Pages");
+ next->insertInt("Count", descendantCount);
+ next->insertObject("Kids", std::move(kids_list));
+ result.push_back(PageTreeNode{std::move(next), parent, descendantCount});
+ }
+ return result;
+ }
+ };
+ std::vector<PageTreeNode> currentLayer;
+ currentLayer.reserve(pages.size());
+ SkASSERT(pages.size() == pageRefs.size());
+ for (size_t i = 0; i < pages.size(); ++i) {
+ currentLayer.push_back(PageTreeNode{std::move(pages[i]), pageRefs[i], 1});
+ }
+ currentLayer = PageTreeNode::Layer(std::move(currentLayer), doc);
+ while (currentLayer.size() > 1) {
+ currentLayer = PageTreeNode::Layer(std::move(currentLayer), doc);
+ }
+ SkASSERT(currentLayer.size() == 1);
+ const PageTreeNode& root = currentLayer[0];
+ return doc->emit(*root.fNode, root.fReservedRef);
+}
+
+template<typename T, typename... Args>
+static void reset_object(T* dst, Args&&... args) {
+ dst->~T();
+ new (dst) T(std::forward<Args>(args)...);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+SkPDFDocument::SkPDFDocument(SkWStream* stream,
+ SkPDF::Metadata metadata)
+ : SkDocument(stream)
+ , fMetadata(std::move(metadata)) {
+ constexpr float kDpiForRasterScaleOne = 72.0f;
+ if (fMetadata.fRasterDPI != kDpiForRasterScaleOne) {
+ fInverseRasterScale = kDpiForRasterScaleOne / fMetadata.fRasterDPI;
+ fRasterScale = fMetadata.fRasterDPI / kDpiForRasterScaleOne;
+ }
+ if (fMetadata.fStructureElementTreeRoot) {
+ fTagTree.init(fMetadata.fStructureElementTreeRoot);
+ }
+ fExecutor = fMetadata.fExecutor;
+}
+
+SkPDFDocument::~SkPDFDocument() {
+ // subclasses of SkDocument must call close() in their destructors.
+ this->close();
+}
+
+SkPDFIndirectReference SkPDFDocument::emit(const SkPDFObject& object, SkPDFIndirectReference ref){
+ SkAutoMutexExclusive lock(fMutex);
+ object.emitObject(this->beginObject(ref));
+ this->endObject();
+ return ref;
+}
+
+SkWStream* SkPDFDocument::beginObject(SkPDFIndirectReference ref) SK_REQUIRES(fMutex) {
+ begin_indirect_object(&fOffsetMap, ref, this->getStream());
+ return this->getStream();
+}
+
+void SkPDFDocument::endObject() SK_REQUIRES(fMutex) {
+ end_indirect_object(this->getStream());
+}
+
+static SkSize operator*(SkISize u, SkScalar s) { return SkSize{u.width() * s, u.height() * s}; }
+static SkSize operator*(SkSize u, SkScalar s) { return SkSize{u.width() * s, u.height() * s}; }
+
+SkCanvas* SkPDFDocument::onBeginPage(SkScalar width, SkScalar height) {
+ SkASSERT(fCanvas.imageInfo().dimensions().isZero());
+ if (fPages.empty()) {
+ // if this is the first page if the document.
+ {
+ SkAutoMutexExclusive autoMutexAcquire(fMutex);
+ serializeHeader(&fOffsetMap, this->getStream());
+
+ }
+
+ fInfoDict = this->emit(*SkPDFMetadata::MakeDocumentInformationDict(fMetadata));
+ if (fMetadata.fPDFA) {
+ fUUID = SkPDFMetadata::CreateUUID(fMetadata);
+ // We use the same UUID for Document ID and Instance ID since this
+ // is the first revision of this document (and Skia does not
+ // support revising existing PDF documents).
+ // If we are not in PDF/A mode, don't use a UUID since testing
+ // works best with reproducible outputs.
+ fXMP = SkPDFMetadata::MakeXMPObject(fMetadata, fUUID, fUUID, this);
+ }
+ }
+ // By scaling the page at the device level, we will create bitmap layer
+ // devices at the rasterized scale, not the 72dpi scale. Bitmap layer
+ // devices are created when saveLayer is called with an ImageFilter; see
+ // SkPDFDevice::onCreateDevice().
+ SkISize pageSize = (SkSize{width, height} * fRasterScale).toRound();
+ SkMatrix initialTransform;
+ // Skia uses the top left as the origin but PDF natively has the origin at the
+ // bottom left. This matrix corrects for that, as well as the raster scale.
+ initialTransform.setScaleTranslate(fInverseRasterScale, -fInverseRasterScale,
+ 0, fInverseRasterScale * pageSize.height());
+ fPageDevice = sk_make_sp<SkPDFDevice>(pageSize, this, initialTransform);
+ reset_object(&fCanvas, fPageDevice);
+ fCanvas.scale(fRasterScale, fRasterScale);
+ fPageRefs.push_back(this->reserveRef());
+ return &fCanvas;
+}
+
+static void populate_link_annotation(SkPDFDict* annotation, const SkRect& r) {
+ annotation->insertName("Subtype", "Link");
+ annotation->insertInt("F", 4); // required by ISO 19005
+ // Border: 0 = Horizontal corner radius.
+ // 0 = Vertical corner radius.
+ // 0 = Width, 0 = no border.
+ annotation->insertObject("Border", SkPDFMakeArray(0, 0, 0));
+ annotation->insertObject("Rect", SkPDFMakeArray(r.fLeft, r.fTop, r.fRight, r.fBottom));
+}
+
+static SkPDFIndirectReference append_destinations(
+ SkPDFDocument* doc,
+ const std::vector<SkPDFNamedDestination>& namedDestinations)
+{
+ SkPDFDict destinations;
+ for (const SkPDFNamedDestination& dest : namedDestinations) {
+ auto pdfDest = SkPDFMakeArray();
+ pdfDest->reserve(5);
+ pdfDest->appendRef(dest.fPage);
+ pdfDest->appendName("XYZ");
+ pdfDest->appendScalar(dest.fPoint.x());
+ pdfDest->appendScalar(dest.fPoint.y());
+ pdfDest->appendInt(0); // Leave zoom unchanged
+ destinations.insertObject(ToValidUtf8String(*dest.fName), std::move(pdfDest));
+ }
+ return doc->emit(destinations);
+}
+
+std::unique_ptr<SkPDFArray> SkPDFDocument::getAnnotations() {
+ std::unique_ptr<SkPDFArray> array;
+ size_t count = fCurrentPageLinks.size();
+ if (0 == count) {
+ return array; // is nullptr
+ }
+ array = SkPDFMakeArray();
+ array->reserve(count);
+ for (const auto& link : fCurrentPageLinks) {
+ SkPDFDict annotation("Annot");
+ populate_link_annotation(&annotation, link->fRect);
+ if (link->fType == SkPDFLink::Type::kUrl) {
+ std::unique_ptr<SkPDFDict> action = SkPDFMakeDict("Action");
+ action->insertName("S", "URI");
+ // This is documented to be a 7 bit ASCII (byte) string.
+ action->insertByteString("URI", ToValidUtf8String(*link->fData));
+ annotation.insertObject("A", std::move(action));
+ } else if (link->fType == SkPDFLink::Type::kNamedDestination) {
+ annotation.insertName("Dest", ToValidUtf8String(*link->fData));
+ } else {
+ SkDEBUGFAIL("Unknown link type.");
+ }
+
+ if (link->fNodeId) {
+ int structParentKey = createStructParentKeyForNodeId(link->fNodeId);
+ if (structParentKey != -1) {
+ annotation.insertInt("StructParent", structParentKey);
+ }
+ }
+
+ SkPDFIndirectReference annotationRef = emit(annotation);
+ array->appendRef(annotationRef);
+ if (link->fNodeId) {
+ fTagTree.addNodeAnnotation(link->fNodeId, annotationRef, SkToUInt(this->currentPageIndex()));
+ }
+ }
+ return array;
+}
+
+void SkPDFDocument::onEndPage() {
+ SkASSERT(!fCanvas.imageInfo().dimensions().isZero());
+ reset_object(&fCanvas);
+ SkASSERT(fPageDevice);
+
+ auto page = SkPDFMakeDict("Page");
+
+ SkSize mediaSize = fPageDevice->imageInfo().dimensions() * fInverseRasterScale;
+ std::unique_ptr<SkStreamAsset> pageContent = fPageDevice->content();
+ auto resourceDict = fPageDevice->makeResourceDict();
+ SkASSERT(fPageRefs.size() > 0);
+ fPageDevice = nullptr;
+
+ page->insertObject("Resources", std::move(resourceDict));
+ page->insertObject("MediaBox", SkPDFUtils::RectToArray(SkRect::MakeSize(mediaSize)));
+
+ if (std::unique_ptr<SkPDFArray> annotations = getAnnotations()) {
+ page->insertObject("Annots", std::move(annotations));
+ fCurrentPageLinks.clear();
+ }
+
+ page->insertRef("Contents", SkPDFStreamOut(nullptr, std::move(pageContent), this));
+ // The StructParents unique identifier for each page is just its
+ // 0-based page index.
+ page->insertInt("StructParents", SkToInt(this->currentPageIndex()));
+ fPages.emplace_back(std::move(page));
+}
+
+void SkPDFDocument::onAbort() {
+ this->waitForJobs();
+}
+
+static sk_sp<SkData> SkSrgbIcm() {
+ // Source: http://www.argyllcms.com/icclibsrc.html
+ static const char kProfile[] =
+ "\0\0\14\214argl\2 \0\0mntrRGB XYZ \7\336\0\1\0\6\0\26\0\17\0:acspM"
+ "SFT\0\0\0\0IEC sRGB\0\0\0\0\0\0\0\0\0\0\0\0\0\0\366\326\0\1\0\0\0\0"
+ "\323-argl\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+ "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\21desc\0\0\1P\0\0\0\231cprt\0"
+ "\0\1\354\0\0\0gdmnd\0\0\2T\0\0\0pdmdd\0\0\2\304\0\0\0\210tech\0\0\3"
+ "L\0\0\0\14vued\0\0\3X\0\0\0gview\0\0\3\300\0\0\0$lumi\0\0\3\344\0\0"
+ "\0\24meas\0\0\3\370\0\0\0$wtpt\0\0\4\34\0\0\0\24bkpt\0\0\0040\0\0\0"
+ "\24rXYZ\0\0\4D\0\0\0\24gXYZ\0\0\4X\0\0\0\24bXYZ\0\0\4l\0\0\0\24rTR"
+ "C\0\0\4\200\0\0\10\14gTRC\0\0\4\200\0\0\10\14bTRC\0\0\4\200\0\0\10"
+ "\14desc\0\0\0\0\0\0\0?sRGB IEC61966-2.1 (Equivalent to www.srgb.co"
+ "m 1998 HP profile)\0\0\0\0\0\0\0\0\0\0\0?sRGB IEC61966-2.1 (Equiva"
+ "lent to www.srgb.com 1998 HP profile)\0\0\0\0\0\0\0\0text\0\0\0\0C"
+ "reated by Graeme W. Gill. Released into the public domain. No Warr"
+ "anty, Use at your own risk.\0\0desc\0\0\0\0\0\0\0\26IEC http://www"
+ ".iec.ch\0\0\0\0\0\0\0\0\0\0\0\26IEC http://www.iec.ch\0\0\0\0\0\0\0"
+ "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+ "\0\0\0\0\0\0desc\0\0\0\0\0\0\0.IEC 61966-2.1 Default RGB colour sp"
+ "ace - sRGB\0\0\0\0\0\0\0\0\0\0\0.IEC 61966-2.1 Default RGB colour "
+ "space - sRGB\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0sig \0\0\0"
+ "\0CRT desc\0\0\0\0\0\0\0\rIEC61966-2.1\0\0\0\0\0\0\0\0\0\0\0\rIEC6"
+ "1966-2.1\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+ "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0view\0\0\0\0"
+ "\0\23\244|\0\24_0\0\20\316\2\0\3\355\262\0\4\23\n\0\3\\g\0\0\0\1XY"
+ "Z \0\0\0\0\0L\n=\0P\0\0\0W\36\270meas\0\0\0\0\0\0\0\1\0\0\0\0\0\0\0"
+ "\0\0\0\0\0\0\0\0\0\0\0\2\217\0\0\0\2XYZ \0\0\0\0\0\0\363Q\0\1\0\0\0"
+ "\1\26\314XYZ \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0XYZ \0\0\0\0\0\0o\240"
+ "\0\0008\365\0\0\3\220XYZ \0\0\0\0\0\0b\227\0\0\267\207\0\0\30\331X"
+ "YZ \0\0\0\0\0\0$\237\0\0\17\204\0\0\266\304curv\0\0\0\0\0\0\4\0\0\0"
+ "\0\5\0\n\0\17\0\24\0\31\0\36\0#\0(\0-\0002\0007\0;\0@\0E\0J\0O\0T\0"
+ "Y\0^\0c\0h\0m\0r\0w\0|\0\201\0\206\0\213\0\220\0\225\0\232\0\237\0"
+ "\244\0\251\0\256\0\262\0\267\0\274\0\301\0\306\0\313\0\320\0\325\0"
+ "\333\0\340\0\345\0\353\0\360\0\366\0\373\1\1\1\7\1\r\1\23\1\31\1\37"
+ "\1%\1+\0012\0018\1>\1E\1L\1R\1Y\1`\1g\1n\1u\1|\1\203\1\213\1\222\1"
+ "\232\1\241\1\251\1\261\1\271\1\301\1\311\1\321\1\331\1\341\1\351\1"
+ "\362\1\372\2\3\2\14\2\24\2\35\2&\2/\0028\2A\2K\2T\2]\2g\2q\2z\2\204"
+ "\2\216\2\230\2\242\2\254\2\266\2\301\2\313\2\325\2\340\2\353\2\365"
+ "\3\0\3\13\3\26\3!\3-\0038\3C\3O\3Z\3f\3r\3~\3\212\3\226\3\242\3\256"
+ "\3\272\3\307\3\323\3\340\3\354\3\371\4\6\4\23\4 \4-\4;\4H\4U\4c\4q"
+ "\4~\4\214\4\232\4\250\4\266\4\304\4\323\4\341\4\360\4\376\5\r\5\34"
+ "\5+\5:\5I\5X\5g\5w\5\206\5\226\5\246\5\265\5\305\5\325\5\345\5\366"
+ "\6\6\6\26\6'\0067\6H\6Y\6j\6{\6\214\6\235\6\257\6\300\6\321\6\343\6"
+ "\365\7\7\7\31\7+\7=\7O\7a\7t\7\206\7\231\7\254\7\277\7\322\7\345\7"
+ "\370\10\13\10\37\0102\10F\10Z\10n\10\202\10\226\10\252\10\276\10\322"
+ "\10\347\10\373\t\20\t%\t:\tO\td\ty\t\217\t\244\t\272\t\317\t\345\t"
+ "\373\n\21\n'\n=\nT\nj\n\201\n\230\n\256\n\305\n\334\n\363\13\13\13"
+ "\"\0139\13Q\13i\13\200\13\230\13\260\13\310\13\341\13\371\14\22\14"
+ "*\14C\14\\\14u\14\216\14\247\14\300\14\331\14\363\r\r\r&\r@\rZ\rt\r"
+ "\216\r\251\r\303\r\336\r\370\16\23\16.\16I\16d\16\177\16\233\16\266"
+ "\16\322\16\356\17\t\17%\17A\17^\17z\17\226\17\263\17\317\17\354\20"
+ "\t\20&\20C\20a\20~\20\233\20\271\20\327\20\365\21\23\0211\21O\21m\21"
+ "\214\21\252\21\311\21\350\22\7\22&\22E\22d\22\204\22\243\22\303\22"
+ "\343\23\3\23#\23C\23c\23\203\23\244\23\305\23\345\24\6\24'\24I\24j"
+ "\24\213\24\255\24\316\24\360\25\22\0254\25V\25x\25\233\25\275\25\340"
+ "\26\3\26&\26I\26l\26\217\26\262\26\326\26\372\27\35\27A\27e\27\211"
+ "\27\256\27\322\27\367\30\33\30@\30e\30\212\30\257\30\325\30\372\31"
+ " \31E\31k\31\221\31\267\31\335\32\4\32*\32Q\32w\32\236\32\305\32\354"
+ "\33\24\33;\33c\33\212\33\262\33\332\34\2\34*\34R\34{\34\243\34\314"
+ "\34\365\35\36\35G\35p\35\231\35\303\35\354\36\26\36@\36j\36\224\36"
+ "\276\36\351\37\23\37>\37i\37\224\37\277\37\352 \25 A l \230 \304 \360"
+ "!\34!H!u!\241!\316!\373\"'\"U\"\202\"\257\"\335#\n#8#f#\224#\302#\360"
+ "$\37$M$|$\253$\332%\t%8%h%\227%\307%\367&'&W&\207&\267&\350'\30'I'"
+ "z'\253'\334(\r(?(q(\242(\324)\6)8)k)\235)\320*\2*5*h*\233*\317+\2+"
+ "6+i+\235+\321,\5,9,n,\242,\327-\14-A-v-\253-\341.\26.L.\202.\267.\356"
+ "/$/Z/\221/\307/\376050l0\2440\3331\0221J1\2021\2721\3622*2c2\2332\324"
+ "3\r3F3\1773\2703\3614+4e4\2364\3305\0235M5\2075\3025\375676r6\2566"
+ "\3517$7`7\2347\3278\0248P8\2148\3109\0059B9\1779\2749\371:6:t:\262"
+ ":\357;-;k;\252;\350<'<e<\244<\343=\"=a=\241=\340> >`>\240>\340?!?a"
+ "?\242?\342@#@d@\246@\347A)AjA\254A\356B0BrB\265B\367C:C}C\300D\3DG"
+ "D\212D\316E\22EUE\232E\336F\"FgF\253F\360G5G{G\300H\5HKH\221H\327I"
+ "\35IcI\251I\360J7J}J\304K\14KSK\232K\342L*LrL\272M\2MJM\223M\334N%"
+ "NnN\267O\0OIO\223O\335P'PqP\273Q\6QPQ\233Q\346R1R|R\307S\23S_S\252"
+ "S\366TBT\217T\333U(UuU\302V\17V\\V\251V\367WDW\222W\340X/X}X\313Y\32"
+ "YiY\270Z\7ZVZ\246Z\365[E[\225[\345\\5\\\206\\\326]']x]\311^\32^l^\275"
+ "_\17_a_\263`\5`W`\252`\374aOa\242a\365bIb\234b\360cCc\227c\353d@d\224"
+ "d\351e=e\222e\347f=f\222f\350g=g\223g\351h?h\226h\354iCi\232i\361j"
+ "Hj\237j\367kOk\247k\377lWl\257m\10m`m\271n\22nkn\304o\36oxo\321p+p"
+ "\206p\340q:q\225q\360rKr\246s\1s]s\270t\24tpt\314u(u\205u\341v>v\233"
+ "v\370wVw\263x\21xnx\314y*y\211y\347zFz\245{\4{c{\302|!|\201|\341}A"
+ "}\241~\1~b~\302\177#\177\204\177\345\200G\200\250\201\n\201k\201\315"
+ "\2020\202\222\202\364\203W\203\272\204\35\204\200\204\343\205G\205"
+ "\253\206\16\206r\206\327\207;\207\237\210\4\210i\210\316\2113\211\231"
+ "\211\376\212d\212\312\2130\213\226\213\374\214c\214\312\2151\215\230"
+ "\215\377\216f\216\316\2176\217\236\220\6\220n\220\326\221?\221\250"
+ "\222\21\222z\222\343\223M\223\266\224 \224\212\224\364\225_\225\311"
+ "\2264\226\237\227\n\227u\227\340\230L\230\270\231$\231\220\231\374"
+ "\232h\232\325\233B\233\257\234\34\234\211\234\367\235d\235\322\236"
+ "@\236\256\237\35\237\213\237\372\240i\240\330\241G\241\266\242&\242"
+ "\226\243\6\243v\243\346\244V\244\307\2458\245\251\246\32\246\213\246"
+ "\375\247n\247\340\250R\250\304\2517\251\251\252\34\252\217\253\2\253"
+ "u\253\351\254\\\254\320\255D\255\270\256-\256\241\257\26\257\213\260"
+ "\0\260u\260\352\261`\261\326\262K\262\302\2638\263\256\264%\264\234"
+ "\265\23\265\212\266\1\266y\266\360\267h\267\340\270Y\270\321\271J\271"
+ "\302\272;\272\265\273.\273\247\274!\274\233\275\25\275\217\276\n\276"
+ "\204\276\377\277z\277\365\300p\300\354\301g\301\343\302_\302\333\303"
+ "X\303\324\304Q\304\316\305K\305\310\306F\306\303\307A\307\277\310="
+ "\310\274\311:\311\271\3128\312\267\3136\313\266\3145\314\265\3155\315"
+ "\265\3166\316\266\3177\317\270\3209\320\272\321<\321\276\322?\322\301"
+ "\323D\323\306\324I\324\313\325N\325\321\326U\326\330\327\\\327\340"
+ "\330d\330\350\331l\331\361\332v\332\373\333\200\334\5\334\212\335\20"
+ "\335\226\336\34\336\242\337)\337\257\3406\340\275\341D\341\314\342"
+ "S\342\333\343c\343\353\344s\344\374\345\204\346\r\346\226\347\37\347"
+ "\251\3502\350\274\351F\351\320\352[\352\345\353p\353\373\354\206\355"
+ "\21\355\234\356(\356\264\357@\357\314\360X\360\345\361r\361\377\362"
+ "\214\363\31\363\247\3644\364\302\365P\365\336\366m\366\373\367\212"
+ "\370\31\370\250\3718\371\307\372W\372\347\373w\374\7\374\230\375)\375"
+ "\272\376K\376\334\377m\377\377";
+ const size_t kProfileLength = 3212;
+ static_assert(kProfileLength == sizeof(kProfile) - 1, "");
+ return SkData::MakeWithoutCopy(kProfile, kProfileLength);
+}
+
+static SkPDFIndirectReference make_srgb_color_profile(SkPDFDocument* doc) {
+ std::unique_ptr<SkPDFDict> dict = SkPDFMakeDict();
+ dict->insertInt("N", 3);
+ dict->insertObject("Range", SkPDFMakeArray(0, 1, 0, 1, 0, 1));
+ return SkPDFStreamOut(std::move(dict), SkMemoryStream::Make(SkSrgbIcm()),
+ doc, SkPDFSteamCompressionEnabled::Yes);
+}
+
+static std::unique_ptr<SkPDFArray> make_srgb_output_intents(SkPDFDocument* doc) {
+ // sRGB is specified by HTML, CSS, and SVG.
+ auto outputIntent = SkPDFMakeDict("OutputIntent");
+ outputIntent->insertName("S", "GTS_PDFA1");
+ outputIntent->insertTextString("RegistryName", "http://www.color.org");
+ outputIntent->insertTextString("OutputConditionIdentifier", "Custom");
+ outputIntent->insertTextString("Info", "sRGB IEC61966-2.1");
+ outputIntent->insertRef("DestOutputProfile", make_srgb_color_profile(doc));
+ auto intentArray = SkPDFMakeArray();
+ intentArray->appendObject(std::move(outputIntent));
+ return intentArray;
+}
+
+SkPDFIndirectReference SkPDFDocument::getPage(size_t pageIndex) const {
+ SkASSERT(pageIndex < fPageRefs.size());
+ return fPageRefs[pageIndex];
+}
+
+const SkMatrix& SkPDFDocument::currentPageTransform() const {
+ return fPageDevice->initialTransform();
+}
+
+int SkPDFDocument::createMarkIdForNodeId(int nodeId) {
+ return fTagTree.createMarkIdForNodeId(nodeId, SkToUInt(this->currentPageIndex()));
+}
+
+int SkPDFDocument::createStructParentKeyForNodeId(int nodeId) {
+ return fTagTree.createStructParentKeyForNodeId(nodeId, SkToUInt(this->currentPageIndex()));
+}
+
+static std::vector<const SkPDFFont*> get_fonts(const SkPDFDocument& canon) {
+ std::vector<const SkPDFFont*> fonts;
+ fonts.reserve(canon.fFontMap.count());
+ // Sort so the output PDF is reproducible.
+ for (const auto& [unused, font] : canon.fFontMap) {
+ fonts.push_back(&font);
+ }
+ std::sort(fonts.begin(), fonts.end(), [](const SkPDFFont* u, const SkPDFFont* v) {
+ return u->indirectReference().fValue < v->indirectReference().fValue;
+ });
+ return fonts;
+}
+
+SkString SkPDFDocument::nextFontSubsetTag() {
+ // PDF 32000-1:2008 Section 9.6.4 FontSubsets "The tag shall consist of six uppercase letters"
+ // "followed by a plus sign" "different subsets in the same PDF file shall have different tags."
+ // There are 26^6 or 308,915,776 possible values. So start in range then increment and mod.
+ uint32_t thisFontSubsetTag = fNextFontSubsetTag;
+ fNextFontSubsetTag = (fNextFontSubsetTag + 1u) % 308915776u;
+
+ SkString subsetTag(7);
+ char* subsetTagData = subsetTag.data();
+ for (size_t i = 0; i < 6; ++i) {
+ subsetTagData[i] = 'A' + (thisFontSubsetTag % 26);
+ thisFontSubsetTag /= 26;
+ }
+ subsetTagData[6] = '+';
+ return subsetTag;
+}
+
+void SkPDFDocument::onClose(SkWStream* stream) {
+ SkASSERT(fCanvas.imageInfo().dimensions().isZero());
+ if (fPages.empty()) {
+ this->waitForJobs();
+ return;
+ }
+ auto docCatalog = SkPDFMakeDict("Catalog");
+ if (fMetadata.fPDFA) {
+ SkASSERT(fXMP != SkPDFIndirectReference());
+ docCatalog->insertRef("Metadata", fXMP);
+ // Don't specify OutputIntents if we are not in PDF/A mode since
+ // no one has ever asked for this feature.
+ docCatalog->insertObject("OutputIntents", make_srgb_output_intents(this));
+ }
+
+ docCatalog->insertRef("Pages", generate_page_tree(this, std::move(fPages), fPageRefs));
+
+ if (!fNamedDestinations.empty()) {
+ docCatalog->insertRef("Dests", append_destinations(this, fNamedDestinations));
+ fNamedDestinations.clear();
+ }
+
+ // Handle tagged PDFs.
+ if (SkPDFIndirectReference root = fTagTree.makeStructTreeRoot(this)) {
+ // In the document catalog, indicate that this PDF is tagged.
+ auto markInfo = SkPDFMakeDict("MarkInfo");
+ markInfo->insertBool("Marked", true);
+ docCatalog->insertObject("MarkInfo", std::move(markInfo));
+ docCatalog->insertRef("StructTreeRoot", root);
+ }
+
+ auto docCatalogRef = this->emit(*docCatalog);
+
+ for (const SkPDFFont* f : get_fonts(*this)) {
+ f->emitSubset(this);
+ }
+
+ this->waitForJobs();
+ {
+ SkAutoMutexExclusive autoMutexAcquire(fMutex);
+ serialize_footer(fOffsetMap, this->getStream(), fInfoDict, docCatalogRef, fUUID);
+ }
+}
+
+void SkPDFDocument::incrementJobCount() { fJobCount++; }
+
+void SkPDFDocument::signalJobComplete() { fSemaphore.signal(); }
+
+void SkPDFDocument::waitForJobs() {
+ // fJobCount can increase while we wait.
+ while (fJobCount > 0) {
+ fSemaphore.wait();
+ --fJobCount;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkPDF::SetNodeId(SkCanvas* canvas, int nodeID) {
+ sk_sp<SkData> payload = SkData::MakeWithCopy(&nodeID, sizeof(nodeID));
+ const char* key = SkPDFGetNodeIdKey();
+ canvas->drawAnnotation({0, 0, 0, 0}, key, payload.get());
+}
+
+sk_sp<SkDocument> SkPDF::MakeDocument(SkWStream* stream, const SkPDF::Metadata& metadata) {
+ SkPDF::Metadata meta = metadata;
+ if (meta.fRasterDPI <= 0) {
+ meta.fRasterDPI = 72.0f;
+ }
+ if (meta.fEncodingQuality < 0) {
+ meta.fEncodingQuality = 0;
+ }
+ return stream ? sk_make_sp<SkPDFDocument>(stream, std::move(meta)) : nullptr;
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFDocumentPriv.h b/gfx/skia/skia/src/pdf/SkPDFDocumentPriv.h
new file mode 100644
index 0000000000..792c6c2208
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFDocumentPriv.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPDFDocumentPriv_DEFINED
+#define SkPDFDocumentPriv_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkStream.h"
+#include "include/docs/SkPDFDocument.h"
+#include "include/private/base/SkMutex.h"
+#include "src/core/SkTHash.h"
+#include "src/pdf/SkPDFMetadata.h"
+#include "src/pdf/SkPDFTag.h"
+
+#include <atomic>
+#include <vector>
+#include <memory>
+
+class SkExecutor;
+class SkPDFDevice;
+class SkPDFFont;
+struct SkAdvancedTypefaceMetrics;
+struct SkBitmapKey;
+struct SkPDFFillGraphicState;
+struct SkPDFImageShaderKey;
+struct SkPDFStrokeGraphicState;
+
+namespace SkPDFGradientShader {
+struct Key;
+struct KeyHash;
+} // namespace SkPDFGradientShader
+
+const char* SkPDFGetNodeIdKey();
+
+// Logically part of SkPDFDocument, but separate to keep similar functionality together.
+class SkPDFOffsetMap {
+public:
+ void markStartOfDocument(const SkWStream*);
+ void markStartOfObject(int referenceNumber, const SkWStream*);
+ int objectCount() const;
+ int emitCrossReferenceTable(SkWStream* s) const;
+private:
+ std::vector<int> fOffsets;
+ size_t fBaseOffset = SIZE_MAX;
+};
+
+
+struct SkPDFNamedDestination {
+ sk_sp<SkData> fName;
+ SkPoint fPoint;
+ SkPDFIndirectReference fPage;
+};
+
+
+struct SkPDFLink {
+ enum class Type {
+ kNone,
+ kUrl,
+ kNamedDestination,
+ };
+
+ SkPDFLink(Type type, SkData* data, const SkRect& rect, int nodeId)
+ : fType(type)
+ , fData(sk_ref_sp(data))
+ , fRect(rect)
+ , fNodeId(nodeId) {}
+ const Type fType;
+ // The url or named destination, depending on |fType|.
+ const sk_sp<SkData> fData;
+ const SkRect fRect;
+ const int fNodeId;
+};
+
+
+/** Concrete implementation of SkDocument that creates PDF files. This
+ class does not produced linearized or optimized PDFs; instead it
+ it attempts to use a minimum amount of RAM. */
+class SkPDFDocument : public SkDocument {
+public:
+ SkPDFDocument(SkWStream*, SkPDF::Metadata);
+ ~SkPDFDocument() override;
+ SkCanvas* onBeginPage(SkScalar, SkScalar) override;
+ void onEndPage() override;
+ void onClose(SkWStream*) override;
+ void onAbort() override;
+
+ /**
+ Serialize the object, as well as any other objects it
+ indirectly refers to. If any any other objects have been added
+ to the SkPDFObjNumMap without serializing them, they will be
+ serialized as well.
+
+ It might go without saying that objects should not be changed
+ after calling serialize, since those changes will be too late.
+ */
+ SkPDFIndirectReference emit(const SkPDFObject&, SkPDFIndirectReference);
+ SkPDFIndirectReference emit(const SkPDFObject& o) { return this->emit(o, this->reserveRef()); }
+
+ template <typename T>
+ void emitStream(const SkPDFDict& dict, T writeStream, SkPDFIndirectReference ref) {
+ SkAutoMutexExclusive lock(fMutex);
+ SkWStream* stream = this->beginObject(ref);
+ dict.emitObject(stream);
+ stream->writeText(" stream\n");
+ writeStream(stream);
+ stream->writeText("\nendstream");
+ this->endObject();
+ }
+
+ const SkPDF::Metadata& metadata() const { return fMetadata; }
+
+ SkPDFIndirectReference getPage(size_t pageIndex) const;
+ SkPDFIndirectReference currentPage() const {
+ return SkASSERT(!fPageRefs.empty()), fPageRefs.back();
+ }
+ // Used to allow marked content to refer to its corresponding structure
+ // tree node, via a page entry in the parent tree. Returns -1 if no
+ // mark ID.
+ int createMarkIdForNodeId(int nodeId);
+ // Used to allow annotations to refer to their corresponding structure
+ // tree node, via the struct parent tree. Returns -1 if no struct parent
+ // key.
+ int createStructParentKeyForNodeId(int nodeId);
+
+ std::unique_ptr<SkPDFArray> getAnnotations();
+
+ SkPDFIndirectReference reserveRef() { return SkPDFIndirectReference{fNextObjectNumber++}; }
+
+ // Returns a tag to prepend to a PostScript name of a subset font. Includes the '+'.
+ SkString nextFontSubsetTag();
+
+ SkExecutor* executor() const { return fExecutor; }
+ void incrementJobCount();
+ void signalJobComplete();
+ size_t currentPageIndex() { return fPages.size(); }
+ size_t pageCount() { return fPageRefs.size(); }
+
+ const SkMatrix& currentPageTransform() const;
+
+ // Canonicalized objects
+ SkTHashMap<SkPDFImageShaderKey, SkPDFIndirectReference> fImageShaderMap;
+ SkTHashMap<SkPDFGradientShader::Key, SkPDFIndirectReference, SkPDFGradientShader::KeyHash>
+ fGradientPatternMap;
+ SkTHashMap<SkBitmapKey, SkPDFIndirectReference> fPDFBitmapMap;
+ SkTHashMap<uint32_t, std::unique_ptr<SkAdvancedTypefaceMetrics>> fTypefaceMetrics;
+ SkTHashMap<uint32_t, std::vector<SkString>> fType1GlyphNames;
+ SkTHashMap<uint32_t, std::vector<SkUnichar>> fToUnicodeMap;
+ SkTHashMap<uint32_t, SkPDFIndirectReference> fFontDescriptors;
+ SkTHashMap<uint32_t, SkPDFIndirectReference> fType3FontDescriptors;
+ SkTHashMap<uint64_t, SkPDFFont> fFontMap;
+ SkTHashMap<SkPDFStrokeGraphicState, SkPDFIndirectReference> fStrokeGSMap;
+ SkTHashMap<SkPDFFillGraphicState, SkPDFIndirectReference> fFillGSMap;
+ SkPDFIndirectReference fInvertFunction;
+ SkPDFIndirectReference fNoSmaskGraphicState;
+ std::vector<std::unique_ptr<SkPDFLink>> fCurrentPageLinks;
+ std::vector<SkPDFNamedDestination> fNamedDestinations;
+
+private:
+ SkPDFOffsetMap fOffsetMap;
+ SkCanvas fCanvas;
+ std::vector<std::unique_ptr<SkPDFDict>> fPages;
+ std::vector<SkPDFIndirectReference> fPageRefs;
+
+ sk_sp<SkPDFDevice> fPageDevice;
+ std::atomic<int> fNextObjectNumber = {1};
+ std::atomic<int> fJobCount = {0};
+ uint32_t fNextFontSubsetTag = {0};
+ SkUUID fUUID;
+ SkPDFIndirectReference fInfoDict;
+ SkPDFIndirectReference fXMP;
+ SkPDF::Metadata fMetadata;
+ SkScalar fRasterScale = 1;
+ SkScalar fInverseRasterScale = 1;
+ SkExecutor* fExecutor = nullptr;
+
+ // For tagged PDFs.
+ SkPDFTagTree fTagTree;
+
+ SkMutex fMutex;
+ SkSemaphore fSemaphore;
+
+ void waitForJobs();
+ SkWStream* beginObject(SkPDFIndirectReference);
+ void endObject();
+};
+
+#endif // SkPDFDocumentPriv_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFFont.cpp b/gfx/skia/skia/src/pdf/SkPDFFont.cpp
new file mode 100644
index 0000000000..964c9aeb23
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFFont.cpp
@@ -0,0 +1,724 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkData.h"
+#include "include/core/SkFont.h"
+#include "include/core/SkFontMetrics.h"
+#include "include/core/SkFontTypes.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/core/SkSurfaceProps.h"
+#include "include/core/SkTypes.h"
+#include "include/docs/SkPDFDocument.h"
+#include "include/private/SkBitmaskEnum.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkUTF.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkImagePriv.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkScalerContext.h"
+#include "src/core/SkStrike.h"
+#include "src/core/SkStrikeSpec.h"
+#include "src/core/SkTHash.h"
+#include "src/pdf/SkPDFBitmap.h"
+#include "src/pdf/SkPDFDevice.h"
+#include "src/pdf/SkPDFDocumentPriv.h"
+#include "src/pdf/SkPDFFont.h"
+#include "src/pdf/SkPDFFormXObject.h"
+#include "src/pdf/SkPDFMakeCIDGlyphWidthsArray.h"
+#include "src/pdf/SkPDFMakeToUnicodeCmap.h"
+#include "src/pdf/SkPDFSubsetFont.h"
+#include "src/pdf/SkPDFType1Font.h"
+#include "src/pdf/SkPDFUtils.h"
+
+#include <limits.h>
+#include <initializer_list>
+#include <memory>
+#include <utility>
+
+void SkPDFFont::GetType1GlyphNames(const SkTypeface& face, SkString* dst) {
+ face.getPostScriptGlyphNames(dst);
+}
+
+namespace {
+// PDF's notion of symbolic vs non-symbolic is related to the character set, not
+// symbols vs. characters. Rarely is a font the right character set to call it
+// non-symbolic, so always call it symbolic. (PDF 1.4 spec, section 5.7.1)
+static const int32_t kPdfSymbolic = 4;
+static const SkFontTableTag kCOLRTableTag = SkSetFourByteTag('C', 'O', 'L', 'R');
+
+// scale from em-units to base-1000, returning as a SkScalar
+inline SkScalar from_font_units(SkScalar scaled, uint16_t emSize) {
+ return emSize == 1000 ? scaled : scaled * 1000 / emSize;
+}
+
+inline SkScalar scaleFromFontUnits(int16_t val, uint16_t emSize) {
+ return from_font_units(SkIntToScalar(val), emSize);
+}
+
+void setGlyphWidthAndBoundingBox(SkScalar width, SkIRect box,
+ SkDynamicMemoryWStream* content) {
+ // Specify width and bounding box for the glyph.
+ SkPDFUtils::AppendScalar(width, content);
+ content->writeText(" 0 ");
+ content->writeDecAsText(box.fLeft);
+ content->writeText(" ");
+ content->writeDecAsText(box.fTop);
+ content->writeText(" ");
+ content->writeDecAsText(box.fRight);
+ content->writeText(" ");
+ content->writeDecAsText(box.fBottom);
+ content->writeText(" d1\n");
+}
+} // namespace
+
+///////////////////////////////////////////////////////////////////////////////
+// class SkPDFFont
+///////////////////////////////////////////////////////////////////////////////
+
+/* Resources are canonicalized and uniqueified by pointer so there has to be
+ * some additional state indicating which subset of the font is used. It
+ * must be maintained at the document granularity.
+ */
+
+SkPDFFont::~SkPDFFont() = default;
+
+SkPDFFont::SkPDFFont(SkPDFFont&&) = default;
+
+SkPDFFont& SkPDFFont::operator=(SkPDFFont&&) = default;
+
+static bool can_embed(const SkAdvancedTypefaceMetrics& metrics) {
+ return !SkToBool(metrics.fFlags & SkAdvancedTypefaceMetrics::kNotEmbeddable_FontFlag);
+}
+
+const SkAdvancedTypefaceMetrics* SkPDFFont::GetMetrics(const SkTypeface* typeface,
+ SkPDFDocument* canon) {
+ SkASSERT(typeface);
+ SkTypefaceID id = typeface->uniqueID();
+ if (std::unique_ptr<SkAdvancedTypefaceMetrics>* ptr = canon->fTypefaceMetrics.find(id)) {
+ return ptr->get(); // canon retains ownership.
+ }
+ int count = typeface->countGlyphs();
+ if (count <= 0 || count > 1 + SkTo<int>(UINT16_MAX)) {
+ // Cache nullptr to skip this check. Use SkSafeUnref().
+ canon->fTypefaceMetrics.set(id, nullptr);
+ return nullptr;
+ }
+ std::unique_ptr<SkAdvancedTypefaceMetrics> metrics = typeface->getAdvancedMetrics();
+ if (!metrics) {
+ metrics = std::make_unique<SkAdvancedTypefaceMetrics>();
+ }
+
+ if (0 == metrics->fStemV || 0 == metrics->fCapHeight) {
+ SkFont font;
+ font.setHinting(SkFontHinting::kNone);
+ font.setTypeface(sk_ref_sp(typeface));
+ font.setSize(1000); // glyph coordinate system
+ if (0 == metrics->fStemV) {
+ // Figure out a good guess for StemV - Min width of i, I, !, 1.
+ // This probably isn't very good with an italic font.
+ int16_t stemV = SHRT_MAX;
+ for (char c : {'i', 'I', '!', '1'}) {
+ uint16_t g = font.unicharToGlyph(c);
+ SkRect bounds;
+ font.getBounds(&g, 1, &bounds, nullptr);
+ stemV = std::min(stemV, SkToS16(SkScalarRoundToInt(bounds.width())));
+ }
+ metrics->fStemV = stemV;
+ }
+ if (0 == metrics->fCapHeight) {
+ // Figure out a good guess for CapHeight: average the height of M and X.
+ SkScalar capHeight = 0;
+ for (char c : {'M', 'X'}) {
+ uint16_t g = font.unicharToGlyph(c);
+ SkRect bounds;
+ font.getBounds(&g, 1, &bounds, nullptr);
+ capHeight += bounds.height();
+ }
+ metrics->fCapHeight = SkToS16(SkScalarRoundToInt(capHeight / 2));
+ }
+ }
+ // Fonts are always subset, so always prepend the subset tag.
+ metrics->fPostScriptName.prepend(canon->nextFontSubsetTag());
+ return canon->fTypefaceMetrics.set(id, std::move(metrics))->get();
+}
+
+const std::vector<SkUnichar>& SkPDFFont::GetUnicodeMap(const SkTypeface* typeface,
+ SkPDFDocument* canon) {
+ SkASSERT(typeface);
+ SkASSERT(canon);
+ SkTypefaceID id = typeface->uniqueID();
+ if (std::vector<SkUnichar>* ptr = canon->fToUnicodeMap.find(id)) {
+ return *ptr;
+ }
+ std::vector<SkUnichar> buffer(typeface->countGlyphs());
+ typeface->getGlyphToUnicodeMap(buffer.data());
+ return *canon->fToUnicodeMap.set(id, std::move(buffer));
+}
+
+SkAdvancedTypefaceMetrics::FontType SkPDFFont::FontType(const SkTypeface& typeface,
+ const SkAdvancedTypefaceMetrics& metrics) {
+ if (SkToBool(metrics.fFlags & SkAdvancedTypefaceMetrics::kVariable_FontFlag) ||
+ // PDF is actually interested in the encoding of the data, not just the logical format.
+ // If the TrueType is actually wOFF or wOF2 then it should not be directly embedded in PDF.
+ // For now export these as Type3 until the subsetter can handle table based fonts.
+ // See https://github.com/harfbuzz/harfbuzz/issues/3609 and
+ // https://skia-review.googlesource.com/c/skia/+/543485
+ SkToBool(metrics.fFlags & SkAdvancedTypefaceMetrics::kAltDataFormat_FontFlag) ||
+ SkToBool(metrics.fFlags & SkAdvancedTypefaceMetrics::kNotEmbeddable_FontFlag)) {
+ // force Type3 fallback.
+ return SkAdvancedTypefaceMetrics::kOther_Font;
+ }
+ if (typeface.getTableSize(kCOLRTableTag)) {
+ // https://bugs.chromium.org/p/skia/issues/detail?id=12650
+ // Don't embed COLRv0 / COLRv1 fonts, fall back to bitmaps.
+ return SkAdvancedTypefaceMetrics::kOther_Font;
+ }
+ return metrics.fType;
+}
+
+static SkGlyphID first_nonzero_glyph_for_single_byte_encoding(SkGlyphID gid) {
+ return gid != 0 ? gid - (gid - 1) % 255 : 1;
+}
+
+SkPDFFont* SkPDFFont::GetFontResource(SkPDFDocument* doc,
+ const SkGlyph* glyph,
+ SkTypeface* face) {
+ SkASSERT(doc);
+ SkASSERT(face); // All SkPDFDevice::internalDrawText ensures this.
+ const SkAdvancedTypefaceMetrics* fontMetrics = SkPDFFont::GetMetrics(face, doc);
+ SkASSERT(fontMetrics); // SkPDFDevice::internalDrawText ensures the typeface is good.
+ // GetMetrics only returns null to signify a bad typeface.
+ const SkAdvancedTypefaceMetrics& metrics = *fontMetrics;
+ SkAdvancedTypefaceMetrics::FontType type = SkPDFFont::FontType(*face, metrics);
+ if (!(glyph->isEmpty() || glyph->path())) {
+ type = SkAdvancedTypefaceMetrics::kOther_Font;
+ }
+ bool multibyte = SkPDFFont::IsMultiByte(type);
+ SkGlyphID subsetCode =
+ multibyte ? 0 : first_nonzero_glyph_for_single_byte_encoding(glyph->getGlyphID());
+ uint64_t typefaceID = (static_cast<uint64_t>(SkTypeface::UniqueID(face)) << 16) | subsetCode;
+
+ if (SkPDFFont* found = doc->fFontMap.find(typefaceID)) {
+ SkASSERT(multibyte == found->multiByteGlyphs());
+ return found;
+ }
+
+ sk_sp<SkTypeface> typeface(sk_ref_sp(face));
+ SkASSERT(typeface);
+
+ SkGlyphID lastGlyph = SkToU16(typeface->countGlyphs() - 1);
+
+ // should be caught by SkPDFDevice::internalDrawText
+ SkASSERT(glyph->getGlyphID() <= lastGlyph);
+
+ SkGlyphID firstNonZeroGlyph;
+ if (multibyte) {
+ firstNonZeroGlyph = 1;
+ } else {
+ firstNonZeroGlyph = subsetCode;
+ lastGlyph = SkToU16(std::min<int>((int)lastGlyph, 254 + (int)subsetCode));
+ }
+ auto ref = doc->reserveRef();
+ return doc->fFontMap.set(
+ typefaceID, SkPDFFont(std::move(typeface), firstNonZeroGlyph, lastGlyph, type, ref));
+}
+
+SkPDFFont::SkPDFFont(sk_sp<SkTypeface> typeface,
+ SkGlyphID firstGlyphID,
+ SkGlyphID lastGlyphID,
+ SkAdvancedTypefaceMetrics::FontType fontType,
+ SkPDFIndirectReference indirectReference)
+ : fTypeface(std::move(typeface))
+ , fGlyphUsage(firstGlyphID, lastGlyphID)
+ , fIndirectReference(indirectReference)
+ , fFontType(fontType)
+{
+ // Always include glyph 0
+ this->noteGlyphUsage(0);
+}
+
+void SkPDFFont::PopulateCommonFontDescriptor(SkPDFDict* descriptor,
+ const SkAdvancedTypefaceMetrics& metrics,
+ uint16_t emSize,
+ int16_t defaultWidth) {
+ descriptor->insertName("FontName", metrics.fPostScriptName);
+ descriptor->insertInt("Flags", (size_t)(metrics.fStyle | kPdfSymbolic));
+ descriptor->insertScalar("Ascent",
+ scaleFromFontUnits(metrics.fAscent, emSize));
+ descriptor->insertScalar("Descent",
+ scaleFromFontUnits(metrics.fDescent, emSize));
+ descriptor->insertScalar("StemV",
+ scaleFromFontUnits(metrics.fStemV, emSize));
+ descriptor->insertScalar("CapHeight",
+ scaleFromFontUnits(metrics.fCapHeight, emSize));
+ descriptor->insertInt("ItalicAngle", metrics.fItalicAngle);
+ descriptor->insertObject("FontBBox",
+ SkPDFMakeArray(scaleFromFontUnits(metrics.fBBox.left(), emSize),
+ scaleFromFontUnits(metrics.fBBox.bottom(), emSize),
+ scaleFromFontUnits(metrics.fBBox.right(), emSize),
+ scaleFromFontUnits(metrics.fBBox.top(), emSize)));
+ if (defaultWidth > 0) {
+ descriptor->insertScalar("MissingWidth",
+ scaleFromFontUnits(defaultWidth, emSize));
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Type0Font
+///////////////////////////////////////////////////////////////////////////////
+
+// if possible, make no copy.
+static sk_sp<SkData> stream_to_data(std::unique_ptr<SkStreamAsset> stream) {
+ SkASSERT(stream);
+ (void)stream->rewind();
+ SkASSERT(stream->hasLength());
+ size_t size = stream->getLength();
+ if (const void* base = stream->getMemoryBase()) {
+ SkData::ReleaseProc proc =
+ [](const void*, void* ctx) { delete (SkStreamAsset*)ctx; };
+ return SkData::MakeWithProc(base, size, proc, stream.release());
+ }
+ return SkData::MakeFromStream(stream.get(), size);
+}
+
+static void emit_subset_type0(const SkPDFFont& font, SkPDFDocument* doc) {
+ const SkAdvancedTypefaceMetrics* metricsPtr =
+ SkPDFFont::GetMetrics(font.typeface(), doc);
+ SkASSERT(metricsPtr);
+ if (!metricsPtr) { return; }
+ const SkAdvancedTypefaceMetrics& metrics = *metricsPtr;
+ SkASSERT(can_embed(metrics));
+ SkAdvancedTypefaceMetrics::FontType type = font.getType();
+ SkTypeface* face = font.typeface();
+ SkASSERT(face);
+
+ auto descriptor = SkPDFMakeDict("FontDescriptor");
+ uint16_t emSize = SkToU16(font.typeface()->getUnitsPerEm());
+ SkPDFFont::PopulateCommonFontDescriptor(descriptor.get(), metrics, emSize, 0);
+
+ int ttcIndex;
+ std::unique_ptr<SkStreamAsset> fontAsset = face->openStream(&ttcIndex);
+ size_t fontSize = fontAsset ? fontAsset->getLength() : 0;
+ if (0 == fontSize) {
+ SkDebugf("Error: (SkTypeface)(%p)::openStream() returned "
+ "empty stream (%p) when identified as kType1CID_Font "
+ "or kTrueType_Font.\n", face, fontAsset.get());
+ } else {
+ switch (type) {
+ case SkAdvancedTypefaceMetrics::kTrueType_Font: {
+ if (!SkToBool(metrics.fFlags &
+ SkAdvancedTypefaceMetrics::kNotSubsettable_FontFlag)) {
+ SkASSERT(font.firstGlyphID() == 1);
+ sk_sp<SkData> subsetFontData = SkPDFSubsetFont(
+ stream_to_data(std::move(fontAsset)), font.glyphUsage(),
+ doc->metadata().fSubsetter,
+ metrics.fFontName.c_str(), ttcIndex);
+ if (subsetFontData) {
+ std::unique_ptr<SkPDFDict> tmp = SkPDFMakeDict();
+ tmp->insertInt("Length1", SkToInt(subsetFontData->size()));
+ descriptor->insertRef(
+ "FontFile2",
+ SkPDFStreamOut(std::move(tmp),
+ SkMemoryStream::Make(std::move(subsetFontData)),
+ doc, SkPDFSteamCompressionEnabled::Yes));
+ break;
+ }
+ // If subsetting fails, fall back to original font data.
+ fontAsset = face->openStream(&ttcIndex);
+ SkASSERT(fontAsset);
+ SkASSERT(fontAsset->getLength() == fontSize);
+ if (!fontAsset || fontAsset->getLength() == 0) { break; }
+ }
+ std::unique_ptr<SkPDFDict> tmp = SkPDFMakeDict();
+ tmp->insertInt("Length1", fontSize);
+ descriptor->insertRef("FontFile2",
+ SkPDFStreamOut(std::move(tmp), std::move(fontAsset),
+ doc, SkPDFSteamCompressionEnabled::Yes));
+ break;
+ }
+ case SkAdvancedTypefaceMetrics::kType1CID_Font: {
+ std::unique_ptr<SkPDFDict> tmp = SkPDFMakeDict();
+ tmp->insertName("Subtype", "CIDFontType0C");
+ descriptor->insertRef("FontFile3",
+ SkPDFStreamOut(std::move(tmp), std::move(fontAsset),
+ doc, SkPDFSteamCompressionEnabled::Yes));
+ break;
+ }
+ default:
+ SkASSERT(false);
+ }
+ }
+
+ auto newCIDFont = SkPDFMakeDict("Font");
+ newCIDFont->insertRef("FontDescriptor", doc->emit(*descriptor));
+ newCIDFont->insertName("BaseFont", metrics.fPostScriptName);
+
+ switch (type) {
+ case SkAdvancedTypefaceMetrics::kType1CID_Font:
+ newCIDFont->insertName("Subtype", "CIDFontType0");
+ break;
+ case SkAdvancedTypefaceMetrics::kTrueType_Font:
+ newCIDFont->insertName("Subtype", "CIDFontType2");
+ newCIDFont->insertName("CIDToGIDMap", "Identity");
+ break;
+ default:
+ SkASSERT(false);
+ }
+ auto sysInfo = SkPDFMakeDict();
+ // These are actually ASCII strings.
+ sysInfo->insertByteString("Registry", "Adobe");
+ sysInfo->insertByteString("Ordering", "Identity");
+ sysInfo->insertInt("Supplement", 0);
+ newCIDFont->insertObject("CIDSystemInfo", std::move(sysInfo));
+
+ SkScalar defaultWidth = 0;
+ {
+ std::unique_ptr<SkPDFArray> widths = SkPDFMakeCIDGlyphWidthsArray(
+ *face, font.glyphUsage(), &defaultWidth);
+ if (widths && widths->size() > 0) {
+ newCIDFont->insertObject("W", std::move(widths));
+ }
+ newCIDFont->insertScalar("DW", defaultWidth);
+ }
+
+ ////////////////////////////////////////////////////////////////////////////
+
+ SkPDFDict fontDict("Font");
+ fontDict.insertName("Subtype", "Type0");
+ fontDict.insertName("BaseFont", metrics.fPostScriptName);
+ fontDict.insertName("Encoding", "Identity-H");
+ auto descendantFonts = SkPDFMakeArray();
+ descendantFonts->appendRef(doc->emit(*newCIDFont));
+ fontDict.insertObject("DescendantFonts", std::move(descendantFonts));
+
+ const std::vector<SkUnichar>& glyphToUnicode =
+ SkPDFFont::GetUnicodeMap(font.typeface(), doc);
+ SkASSERT(SkToSizeT(font.typeface()->countGlyphs()) == glyphToUnicode.size());
+ std::unique_ptr<SkStreamAsset> toUnicode =
+ SkPDFMakeToUnicodeCmap(glyphToUnicode.data(),
+ &font.glyphUsage(),
+ font.multiByteGlyphs(),
+ font.firstGlyphID(),
+ font.lastGlyphID());
+ fontDict.insertRef("ToUnicode", SkPDFStreamOut(nullptr, std::move(toUnicode), doc));
+
+ doc->emit(fontDict, font.indirectReference());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// PDFType3Font
+///////////////////////////////////////////////////////////////////////////////
+
+namespace {
+// returns [0, first, first+1, ... last-1, last]
+struct SingleByteGlyphIdIterator {
+ SingleByteGlyphIdIterator(SkGlyphID first, SkGlyphID last)
+ : fFirst(first), fLast(last) {
+ SkASSERT(fFirst > 0);
+ SkASSERT(fLast >= first);
+ }
+ struct Iter {
+ void operator++() {
+ fCurrent = (0 == fCurrent) ? fFirst : fCurrent + 1;
+ }
+ // This is an input_iterator
+ SkGlyphID operator*() const { return (SkGlyphID)fCurrent; }
+ bool operator!=(const Iter& rhs) const {
+ return fCurrent != rhs.fCurrent;
+ }
+ Iter(SkGlyphID f, int c) : fFirst(f), fCurrent(c) {}
+ private:
+ const SkGlyphID fFirst;
+ int fCurrent; // must be int to make fLast+1 to fit
+ };
+ Iter begin() const { return Iter(fFirst, 0); }
+ Iter end() const { return Iter(fFirst, (int)fLast + 1); }
+private:
+ const SkGlyphID fFirst;
+ const SkGlyphID fLast;
+};
+} // namespace
+
+struct ImageAndOffset {
+ sk_sp<SkImage> fImage;
+ SkIPoint fOffset;
+};
+static ImageAndOffset to_image(SkGlyphID gid, SkBulkGlyphMetricsAndImages* smallGlyphs) {
+ const SkGlyph* glyph = smallGlyphs->glyph(SkPackedGlyphID{gid});
+ SkMask mask = glyph->mask();
+ if (!mask.fImage) {
+ return {nullptr, {0, 0}};
+ }
+ SkIRect bounds = mask.fBounds;
+ SkBitmap bm;
+ switch (mask.fFormat) {
+ case SkMask::kBW_Format:
+ bm.allocPixels(SkImageInfo::MakeA8(bounds.width(), bounds.height()));
+ for (int y = 0; y < bm.height(); ++y) {
+ for (int x8 = 0; x8 < bm.width(); x8 += 8) {
+ uint8_t v = *mask.getAddr1(x8 + bounds.x(), y + bounds.y());
+ int e = std::min(x8 + 8, bm.width());
+ for (int x = x8; x < e; ++x) {
+ *bm.getAddr8(x, y) = (v >> (x & 0x7)) & 0x1 ? 0xFF : 0x00;
+ }
+ }
+ }
+ bm.setImmutable();
+ return {bm.asImage(), {bounds.x(), bounds.y()}};
+ case SkMask::kA8_Format:
+ bm.installPixels(SkImageInfo::MakeA8(bounds.width(), bounds.height()),
+ mask.fImage, mask.fRowBytes);
+ return {SkMakeImageFromRasterBitmap(bm, kAlways_SkCopyPixelsMode),
+ {bounds.x(), bounds.y()}};
+ case SkMask::kARGB32_Format:
+ bm.installPixels(SkImageInfo::MakeN32Premul(bounds.width(), bounds.height()),
+ mask.fImage, mask.fRowBytes);
+ return {SkMakeImageFromRasterBitmap(bm, kAlways_SkCopyPixelsMode),
+ {bounds.x(), bounds.y()}};
+ case SkMask::k3D_Format:
+ case SkMask::kLCD16_Format:
+ default:
+ SkASSERT(false);
+ return {nullptr, {0, 0}};
+ }
+}
+
+static SkPDFIndirectReference type3_descriptor(SkPDFDocument* doc,
+ const SkTypeface* typeface,
+ SkScalar xHeight) {
+ if (SkPDFIndirectReference* ptr = doc->fType3FontDescriptors.find(typeface->uniqueID())) {
+ return *ptr;
+ }
+
+ SkPDFDict descriptor("FontDescriptor");
+ int32_t fontDescriptorFlags = kPdfSymbolic;
+ if (const SkAdvancedTypefaceMetrics* metrics = SkPDFFont::GetMetrics(typeface, doc)) {
+ // Type3 FontDescriptor does not require all the same fields.
+ descriptor.insertName("FontName", metrics->fPostScriptName);
+ descriptor.insertInt("ItalicAngle", metrics->fItalicAngle);
+ fontDescriptorFlags |= (int32_t)metrics->fStyle;
+ // Adobe requests CapHeight, XHeight, and StemV be added
+ // to "greatly help our workflow downstream".
+ if (metrics->fCapHeight != 0) { descriptor.insertInt("CapHeight", metrics->fCapHeight); }
+ if (metrics->fStemV != 0) { descriptor.insertInt("StemV", metrics->fStemV); }
+ if (xHeight != 0) {
+ descriptor.insertScalar("XHeight", xHeight);
+ }
+ }
+ descriptor.insertInt("Flags", fontDescriptorFlags);
+ SkPDFIndirectReference ref = doc->emit(descriptor);
+ doc->fType3FontDescriptors.set(typeface->uniqueID(), ref);
+ return ref;
+}
+
+#ifdef SK_PDF_BITMAP_GLYPH_RASTER_SIZE
+static constexpr float kBitmapFontSize = SK_PDF_BITMAP_GLYPH_RASTER_SIZE;
+#else
+static constexpr float kBitmapFontSize = 64;
+#endif
+
+SkStrikeSpec make_small_strike(const SkTypeface& typeface) {
+ SkFont font(sk_ref_sp(&typeface), kBitmapFontSize);
+ font.setHinting(SkFontHinting::kNone);
+ font.setEdging(SkFont::Edging::kAlias);
+ return SkStrikeSpec::MakeMask(font,
+ SkPaint(),
+ SkSurfaceProps(0, kUnknown_SkPixelGeometry),
+ SkScalerContextFlags::kFakeGammaAndBoostContrast,
+ SkMatrix::I());
+}
+
+static void emit_subset_type3(const SkPDFFont& pdfFont, SkPDFDocument* doc) {
+ SkTypeface* typeface = pdfFont.typeface();
+ SkGlyphID firstGlyphID = pdfFont.firstGlyphID();
+ SkGlyphID lastGlyphID = pdfFont.lastGlyphID();
+ const SkPDFGlyphUse& subset = pdfFont.glyphUsage();
+ SkASSERT(lastGlyphID >= firstGlyphID);
+ // Remove unused glyphs at the end of the range.
+ // Keep the lastGlyphID >= firstGlyphID invariant true.
+ while (lastGlyphID > firstGlyphID && !subset.has(lastGlyphID)) {
+ --lastGlyphID;
+ }
+ int unitsPerEm;
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakePDFVector(*typeface, &unitsPerEm);
+ auto strike = strikeSpec.findOrCreateStrike();
+ SkASSERT(strike);
+ SkScalar emSize = (SkScalar)unitsPerEm;
+ SkScalar xHeight = strike->getFontMetrics().fXHeight;
+ SkBulkGlyphMetricsAndPaths metricsAndPaths((sk_sp<SkStrike>(strike)));
+ SkBulkGlyphMetricsAndDrawables metricsAndDrawables(std::move(strike));
+
+ SkStrikeSpec strikeSpecSmall = kBitmapFontSize > 0 ? make_small_strike(*typeface)
+ : strikeSpec;
+
+ SkBulkGlyphMetricsAndImages smallGlyphs(strikeSpecSmall);
+ float bitmapScale = kBitmapFontSize > 0 ? emSize / kBitmapFontSize : 1.0f;
+
+ SkPDFDict font("Font");
+ font.insertName("Subtype", "Type3");
+ // Flip about the x-axis and scale by 1/emSize.
+ SkMatrix fontMatrix;
+ fontMatrix.setScale(SkScalarInvert(emSize), -SkScalarInvert(emSize));
+ font.insertObject("FontMatrix", SkPDFUtils::MatrixToArray(fontMatrix));
+
+ auto charProcs = SkPDFMakeDict();
+ auto encoding = SkPDFMakeDict("Encoding");
+
+ auto encDiffs = SkPDFMakeArray();
+ // length(firstGlyphID .. lastGlyphID) == lastGlyphID - firstGlyphID + 1
+ // plus 1 for glyph 0;
+ SkASSERT(firstGlyphID > 0);
+ SkASSERT(lastGlyphID >= firstGlyphID);
+ int glyphCount = lastGlyphID - firstGlyphID + 2;
+ // one other entry for the index of first glyph.
+ encDiffs->reserve(glyphCount + 1);
+ encDiffs->appendInt(0); // index of first glyph
+
+ auto widthArray = SkPDFMakeArray();
+ widthArray->reserve(glyphCount);
+
+ SkIRect bbox = SkIRect::MakeEmpty();
+
+ std::vector<std::pair<SkGlyphID, SkPDFIndirectReference>> imageGlyphs;
+ for (SkGlyphID gID : SingleByteGlyphIdIterator(firstGlyphID, lastGlyphID)) {
+ bool skipGlyph = gID != 0 && !subset.has(gID);
+ SkString characterName;
+ SkScalar advance = 0.0f;
+ SkIRect glyphBBox;
+ if (skipGlyph) {
+ characterName.set("g0");
+ } else {
+ characterName.printf("g%X", gID);
+ const SkGlyph* pathGlyph = metricsAndPaths.glyph(gID);
+ const SkGlyph* drawableGlyph = metricsAndDrawables.glyph(gID);
+ advance = pathGlyph->advanceX();
+ glyphBBox = pathGlyph->iRect();
+ bbox.join(glyphBBox);
+ const SkPath* path = pathGlyph->path();
+ SkDrawable* drawable = drawableGlyph->drawable();
+ SkDynamicMemoryWStream content;
+ if (drawable && !drawable->getBounds().isEmpty()) {
+ sk_sp<SkPDFDevice> glyphDevice = sk_make_sp<SkPDFDevice>(glyphBBox.size(), doc);
+ SkCanvas canvas(glyphDevice);
+ canvas.translate(-glyphBBox.fLeft, -glyphBBox.fTop);
+ canvas.drawDrawable(drawable);
+ SkPDFIndirectReference xobject = SkPDFMakeFormXObject(
+ doc, glyphDevice->content(),
+ SkPDFMakeArray(0, 0, glyphBBox.width(), glyphBBox.height()),
+ glyphDevice->makeResourceDict(),
+ SkMatrix::Translate(glyphBBox.fLeft, glyphBBox.fTop), nullptr);
+ imageGlyphs.emplace_back(gID, xobject);
+ SkPDFUtils::AppendScalar(drawableGlyph->advanceX(), &content);
+ content.writeText(" 0 d0\n1 0 0 1 0 0 cm\n/X");
+ content.write(characterName.c_str(), characterName.size());
+ content.writeText(" Do\n");
+ } else if (path && !path->isEmpty()) {
+ setGlyphWidthAndBoundingBox(pathGlyph->advanceX(), glyphBBox, &content);
+ SkPDFUtils::EmitPath(*path, SkPaint::kFill_Style, &content);
+ SkPDFUtils::PaintPath(SkPaint::kFill_Style, path->getFillType(), &content);
+ } else {
+ auto pimg = to_image(gID, &smallGlyphs);
+ if (!pimg.fImage) {
+ setGlyphWidthAndBoundingBox(pathGlyph->advanceX(), glyphBBox, &content);
+ } else {
+ using SkPDFUtils::AppendScalar;
+ imageGlyphs.emplace_back(gID, SkPDFSerializeImage(pimg.fImage.get(), doc));
+ AppendScalar(pathGlyph->advanceX(), &content);
+ content.writeText(" 0 d0\n");
+ AppendScalar(pimg.fImage->width() * bitmapScale, &content);
+ content.writeText(" 0 0 ");
+ AppendScalar(-pimg.fImage->height() * bitmapScale, &content);
+ content.writeText(" ");
+ AppendScalar(pimg.fOffset.x() * bitmapScale, &content);
+ content.writeText(" ");
+ AppendScalar((pimg.fImage->height() + pimg.fOffset.y()) * bitmapScale,
+ &content);
+ content.writeText(" cm\n/X");
+ content.write(characterName.c_str(), characterName.size());
+ content.writeText(" Do\n");
+ }
+ }
+ charProcs->insertRef(characterName, SkPDFStreamOut(nullptr,
+ content.detachAsStream(), doc));
+ }
+ encDiffs->appendName(std::move(characterName));
+ widthArray->appendScalar(advance);
+ }
+
+ if (!imageGlyphs.empty()) {
+ auto d0 = SkPDFMakeDict();
+ for (const auto& pair : imageGlyphs) {
+ d0->insertRef(SkStringPrintf("Xg%X", pair.first), pair.second);
+ }
+ auto d1 = SkPDFMakeDict();
+ d1->insertObject("XObject", std::move(d0));
+ font.insertObject("Resources", std::move(d1));
+ }
+
+ encoding->insertObject("Differences", std::move(encDiffs));
+ font.insertInt("FirstChar", 0);
+ font.insertInt("LastChar", lastGlyphID - firstGlyphID + 1);
+ /* FontBBox: "A rectangle expressed in the glyph coordinate
+ system, specifying the font bounding box. This is the smallest
+ rectangle enclosing the shape that would result if all of the
+ glyphs of the font were placed with their origins coincident and
+ then filled." */
+ font.insertObject("FontBBox", SkPDFMakeArray(bbox.left(),
+ bbox.bottom(),
+ bbox.right(),
+ bbox.top()));
+
+ font.insertName("CIDToGIDMap", "Identity");
+
+ const std::vector<SkUnichar>& glyphToUnicode = SkPDFFont::GetUnicodeMap(typeface, doc);
+ SkASSERT(glyphToUnicode.size() == SkToSizeT(typeface->countGlyphs()));
+ auto toUnicodeCmap = SkPDFMakeToUnicodeCmap(glyphToUnicode.data(),
+ &subset,
+ false,
+ firstGlyphID,
+ lastGlyphID);
+ font.insertRef("ToUnicode", SkPDFStreamOut(nullptr, std::move(toUnicodeCmap), doc));
+ font.insertRef("FontDescriptor", type3_descriptor(doc, typeface, xHeight));
+ font.insertObject("Widths", std::move(widthArray));
+ font.insertObject("Encoding", std::move(encoding));
+ font.insertObject("CharProcs", std::move(charProcs));
+
+ doc->emit(font, pdfFont.indirectReference());
+}
+
+void SkPDFFont::emitSubset(SkPDFDocument* doc) const {
+ switch (fFontType) {
+ case SkAdvancedTypefaceMetrics::kType1CID_Font:
+ case SkAdvancedTypefaceMetrics::kTrueType_Font:
+ return emit_subset_type0(*this, doc);
+#ifndef SK_PDF_DO_NOT_SUPPORT_TYPE_1_FONTS
+ case SkAdvancedTypefaceMetrics::kType1_Font:
+ return SkPDFEmitType1Font(*this, doc);
+#endif
+ default:
+ return emit_subset_type3(*this, doc);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool SkPDFFont::CanEmbedTypeface(SkTypeface* typeface, SkPDFDocument* doc) {
+ const SkAdvancedTypefaceMetrics* metrics = SkPDFFont::GetMetrics(typeface, doc);
+ return metrics && can_embed(*metrics);
+}
+
diff --git a/gfx/skia/skia/src/pdf/SkPDFFont.h b/gfx/skia/skia/src/pdf/SkPDFFont.h
new file mode 100644
index 0000000000..18cd483d0c
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFFont.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPDFFont_DEFINED
+#define SkPDFFont_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypeface.h"
+#include "include/core/SkTypes.h"
+#include "src/core/SkAdvancedTypefaceMetrics.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/pdf/SkPDFGlyphUse.h"
+#include "src/pdf/SkPDFTypes.h"
+
+#include <vector>
+
+class SkPDFDocument;
+class SkString;
+
+/** \class SkPDFFont
+ A PDF Object class representing a font. The font may have resources
+ attached to it in order to embed the font. SkPDFFonts are canonicalized
+ so that resource deduplication will only include one copy of a font.
+ This class uses the same pattern as SkPDFGraphicState, a static weak
+ reference to each instantiated class.
+*/
+class SkPDFFont {
+public:
+ ~SkPDFFont();
+ SkPDFFont(SkPDFFont&&);
+ SkPDFFont& operator=(SkPDFFont&&);
+
+ /** Returns the typeface represented by this class. Returns nullptr for the
+ * default typeface.
+ */
+ SkTypeface* typeface() const { return fTypeface.get(); }
+
+ /** Returns the font type represented in this font. For Type0 fonts,
+ * returns the type of the descendant font.
+ */
+ SkAdvancedTypefaceMetrics::FontType getType() const { return fFontType; }
+
+ static SkAdvancedTypefaceMetrics::FontType FontType(const SkTypeface&,
+ const SkAdvancedTypefaceMetrics&);
+ static void GetType1GlyphNames(const SkTypeface&, SkString*);
+
+ static bool IsMultiByte(SkAdvancedTypefaceMetrics::FontType type) {
+ return type == SkAdvancedTypefaceMetrics::kType1CID_Font ||
+ type == SkAdvancedTypefaceMetrics::kTrueType_Font;
+ }
+
+ /** Returns true if this font encoding supports glyph IDs above 255.
+ */
+ bool multiByteGlyphs() const { return SkPDFFont::IsMultiByte(this->getType()); }
+
+ /** Return true if this font has an encoding for the passed glyph id.
+ */
+ bool hasGlyph(SkGlyphID gid) {
+ return (gid >= this->firstGlyphID() && gid <= this->lastGlyphID()) || gid == 0;
+ }
+
+ /** Convert the input glyph ID into the font encoding. */
+ SkGlyphID glyphToPDFFontEncoding(SkGlyphID gid) const {
+ if (this->multiByteGlyphs() || gid == 0) {
+ return gid;
+ }
+ SkASSERT(gid >= this->firstGlyphID() && gid <= this->lastGlyphID());
+ SkASSERT(this->firstGlyphID() > 0);
+ return gid - this->firstGlyphID() + 1;
+ }
+
+ void noteGlyphUsage(SkGlyphID glyph) {
+ SkASSERT(this->hasGlyph(glyph));
+ fGlyphUsage.set(glyph);
+ }
+
+ SkPDFIndirectReference indirectReference() const { return fIndirectReference; }
+
+ /** Get the font resource for the passed typeface and glyphID. The
+ * reference count of the object is incremented and it is the caller's
+ * responsibility to unreference it when done. This is needed to
+ * accommodate the weak reference pattern used when the returned object
+ * is new and has no other references.
+ * @param typeface The typeface to find, not nullptr.
+ * @param glyphID Specify which section of a large font is of interest.
+ */
+ static SkPDFFont* GetFontResource(SkPDFDocument* doc,
+ const SkGlyph* glyphs,
+ SkTypeface* typeface);
+
+ /** Gets SkAdvancedTypefaceMetrics, and caches the result.
+ * @param typeface can not be nullptr.
+ * @return nullptr only when typeface is bad.
+ */
+ static const SkAdvancedTypefaceMetrics* GetMetrics(const SkTypeface* typeface,
+ SkPDFDocument* canon);
+
+ static const std::vector<SkUnichar>& GetUnicodeMap(const SkTypeface* typeface,
+ SkPDFDocument* canon);
+
+ static void PopulateCommonFontDescriptor(SkPDFDict* descriptor,
+ const SkAdvancedTypefaceMetrics&,
+ uint16_t emSize,
+ int16_t defaultWidth);
+
+ void emitSubset(SkPDFDocument*) const;
+
+ /**
+ * Return false iff the typeface has its NotEmbeddable flag set.
+ * typeface is not nullptr
+ */
+ static bool CanEmbedTypeface(SkTypeface*, SkPDFDocument*);
+
+ SkGlyphID firstGlyphID() const { return fGlyphUsage.firstNonZero(); }
+ SkGlyphID lastGlyphID() const { return fGlyphUsage.lastGlyph(); }
+ const SkPDFGlyphUse& glyphUsage() const { return fGlyphUsage; }
+ sk_sp<SkTypeface> refTypeface() const { return fTypeface; }
+
+private:
+ sk_sp<SkTypeface> fTypeface;
+ SkPDFGlyphUse fGlyphUsage;
+ SkPDFIndirectReference fIndirectReference;
+ SkAdvancedTypefaceMetrics::FontType fFontType;
+
+ SkPDFFont(sk_sp<SkTypeface>,
+ SkGlyphID firstGlyphID,
+ SkGlyphID lastGlyphID,
+ SkAdvancedTypefaceMetrics::FontType fontType,
+ SkPDFIndirectReference indirectReference);
+ // The glyph IDs accessible with this font. For Type1 (non CID) fonts,
+ // this will be a subset if the font has more than 255 glyphs.
+
+ SkPDFFont() = delete;
+ SkPDFFont(const SkPDFFont&) = delete;
+ SkPDFFont& operator=(const SkPDFFont&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFFormXObject.cpp b/gfx/skia/skia/src/pdf/SkPDFFormXObject.cpp
new file mode 100644
index 0000000000..cc07e2a0fd
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFFormXObject.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "src/pdf/SkPDFFormXObject.h"
+#include "src/pdf/SkPDFUtils.h"
+
+SkPDFIndirectReference SkPDFMakeFormXObject(SkPDFDocument* doc,
+ std::unique_ptr<SkStreamAsset> content,
+ std::unique_ptr<SkPDFArray> mediaBox,
+ std::unique_ptr<SkPDFDict> resourceDict,
+ const SkMatrix& inverseTransform,
+ const char* colorSpace) {
+ std::unique_ptr<SkPDFDict> dict = SkPDFMakeDict();
+ dict->insertName("Type", "XObject");
+ dict->insertName("Subtype", "Form");
+ if (!inverseTransform.isIdentity()) {
+ dict->insertObject("Matrix", SkPDFUtils::MatrixToArray(inverseTransform));
+ }
+ dict->insertObject("Resources", std::move(resourceDict));
+ dict->insertObject("BBox", std::move(mediaBox));
+
+ // Right now FormXObject is only used for saveLayer, which implies
+ // isolated blending. Do this conditionally if that changes.
+ // TODO(halcanary): Is this comment obsolete, since we use it for
+ // alpha masks?
+ auto group = SkPDFMakeDict("Group");
+ group->insertName("S", "Transparency");
+ if (colorSpace != nullptr) {
+ group->insertName("CS", colorSpace);
+ }
+ group->insertBool("I", true); // Isolated.
+ dict->insertObject("Group", std::move(group));
+ return SkPDFStreamOut(std::move(dict), std::move(content), doc);
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFFormXObject.h b/gfx/skia/skia/src/pdf/SkPDFFormXObject.h
new file mode 100644
index 0000000000..b12c8b2ea7
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFFormXObject.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkPDFFormXObject_DEFINED
+#define SkPDFFormXObject_DEFINED
+
+#include "src/pdf/SkPDFDevice.h"
+#include "src/pdf/SkPDFTypes.h"
+
+class SkPDFDocument;
+
+/** A form XObject is a self contained description of a graphics
+ object. A form XObject is a page object with slightly different
+ syntax, that can be drawn into a page content stream, just like a
+ bitmap XObject can be drawn into a page content stream.
+*/
+SkPDFIndirectReference SkPDFMakeFormXObject(SkPDFDocument* doc,
+ std::unique_ptr<SkStreamAsset> content,
+ std::unique_ptr<SkPDFArray> mediaBox,
+ std::unique_ptr<SkPDFDict> resourceDict,
+ const SkMatrix& inverseTransform,
+ const char* colorSpace);
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFGlyphUse.h b/gfx/skia/skia/src/pdf/SkPDFGlyphUse.h
new file mode 100644
index 0000000000..fa7627ab51
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFGlyphUse.h
@@ -0,0 +1,49 @@
+// Copyright 2018 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+#ifndef SkPDFGlyphUse_DEFINED
+#define SkPDFGlyphUse_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "src/utils/SkBitSet.h"
+
+class SkPDFGlyphUse {
+public:
+ SkPDFGlyphUse() : fBitSet(0) {}
+ SkPDFGlyphUse(SkGlyphID firstNonZero, SkGlyphID lastGlyph)
+ : fBitSet(lastGlyph - firstNonZero + 2)
+ , fFirstNonZero(firstNonZero)
+ , fLastGlyph(lastGlyph) { SkASSERT(firstNonZero >= 1); }
+ ~SkPDFGlyphUse() = default;
+ SkPDFGlyphUse(SkPDFGlyphUse&&) = default;
+ SkPDFGlyphUse& operator=(SkPDFGlyphUse&&) = default;
+
+ SkGlyphID firstNonZero() const { return fFirstNonZero; }
+ SkGlyphID lastGlyph() const { return fLastGlyph; }
+ void set(SkGlyphID gid) { fBitSet.set(this->toCode(gid)); }
+ bool has(SkGlyphID gid) const { return fBitSet.test(this->toCode(gid)); }
+
+ template<typename FN>
+ void getSetValues(FN f) const {
+ if (fFirstNonZero == 1) {
+ return fBitSet.forEachSetIndex(std::move(f));
+ }
+ uint16_t offset = fFirstNonZero - 1;
+ fBitSet.forEachSetIndex([&f, offset](unsigned v) { f(v == 0 ? v : v + offset); });
+ }
+
+private:
+ SkBitSet fBitSet;
+ SkGlyphID fFirstNonZero = 0;
+ SkGlyphID fLastGlyph = 0;
+
+ uint16_t toCode(SkGlyphID gid) const {
+ if (gid == 0 || fFirstNonZero == 1) {
+ return gid;
+ }
+ SkASSERT(gid >= fFirstNonZero && gid <= fLastGlyph);
+ return gid - fFirstNonZero + 1;
+ }
+ SkPDFGlyphUse(const SkPDFGlyphUse&) = delete;
+ SkPDFGlyphUse& operator=(const SkPDFGlyphUse&) = delete;
+};
+#endif // SkPDFGlyphUse_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFGradientShader.cpp b/gfx/skia/skia/src/pdf/SkPDFGradientShader.cpp
new file mode 100644
index 0000000000..ffaaf06e46
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFGradientShader.cpp
@@ -0,0 +1,1013 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkPDFGradientShader.h"
+
+#include "include/docs/SkPDFDocument.h"
+#include "src/core/SkOpts.h"
+#include "src/pdf/SkPDFDocumentPriv.h"
+#include "src/pdf/SkPDFFormXObject.h"
+#include "src/pdf/SkPDFGraphicState.h"
+#include "src/pdf/SkPDFResourceDict.h"
+#include "src/pdf/SkPDFTypes.h"
+#include "src/pdf/SkPDFUtils.h"
+
+using namespace skia_private;
+
+static uint32_t hash(const SkShaderBase::GradientInfo& v) {
+ uint32_t buffer[] = {
+ (uint32_t)v.fColorCount,
+ SkOpts::hash(v.fColors, v.fColorCount * sizeof(SkColor)),
+ SkOpts::hash(v.fColorOffsets, v.fColorCount * sizeof(SkScalar)),
+ SkOpts::hash(v.fPoint, 2 * sizeof(SkPoint)),
+ SkOpts::hash(v.fRadius, 2 * sizeof(SkScalar)),
+ (uint32_t)v.fTileMode,
+ v.fGradientFlags,
+ };
+ return SkOpts::hash(buffer, sizeof(buffer));
+}
+
+static uint32_t hash(const SkPDFGradientShader::Key& k) {
+ uint32_t buffer[] = {
+ (uint32_t)k.fType,
+ hash(k.fInfo),
+ SkOpts::hash(&k.fCanvasTransform, sizeof(SkMatrix)),
+ SkOpts::hash(&k.fShaderTransform, sizeof(SkMatrix)),
+ SkOpts::hash(&k.fBBox, sizeof(SkIRect))
+ };
+ return SkOpts::hash(buffer, sizeof(buffer));
+}
+
+static void unit_to_points_matrix(const SkPoint pts[2], SkMatrix* matrix) {
+ SkVector vec = pts[1] - pts[0];
+ SkScalar mag = vec.length();
+ SkScalar inv = mag ? SkScalarInvert(mag) : 0;
+
+ vec.scale(inv);
+ matrix->setSinCos(vec.fY, vec.fX);
+ matrix->preScale(mag, mag);
+ matrix->postTranslate(pts[0].fX, pts[0].fY);
+}
+
+static const int kColorComponents = 3;
+typedef uint8_t ColorTuple[kColorComponents];
+
+/* Assumes t - startOffset is on the stack and does a linear interpolation on t
+ between startOffset and endOffset from prevColor to curColor (for each color
+ component), leaving the result in component order on the stack. It assumes
+ there are always 3 components per color.
+ @param range endOffset - startOffset
+ @param beginColor The previous color.
+ @param endColor The current color.
+ @param result The result ps function.
+ */
+static void interpolate_color_code(SkScalar range, SkColor beginColor, SkColor endColor,
+ SkDynamicMemoryWStream* result) {
+ SkASSERT(range != SkIntToScalar(0));
+
+ /* Linearly interpolate from the previous color to the current.
+ Scale the colors from 0..255 to 0..1 and determine the multipliers for interpolation.
+ C{r,g,b}(t, section) = t - offset_(section-1) + t * Multiplier{r,g,b}.
+ */
+
+ ColorTuple curColor = { SkTo<uint8_t>(SkColorGetR(endColor)),
+ SkTo<uint8_t>(SkColorGetG(endColor)),
+ SkTo<uint8_t>(SkColorGetB(endColor)) };
+
+ ColorTuple prevColor = { SkTo<uint8_t>(SkColorGetR(beginColor)),
+ SkTo<uint8_t>(SkColorGetG(beginColor)),
+ SkTo<uint8_t>(SkColorGetB(beginColor)) };
+
+ // Figure out how to scale each color component.
+ SkScalar multiplier[kColorComponents];
+ for (int i = 0; i < kColorComponents; i++) {
+ static const SkScalar kColorScale = SkScalarInvert(255);
+ multiplier[i] = kColorScale * (curColor[i] - prevColor[i]) / range;
+ }
+
+ // Calculate when we no longer need to keep a copy of the input parameter t.
+ // If the last component to use t is i, then dupInput[0..i - 1] = true
+ // and dupInput[i .. components] = false.
+ bool dupInput[kColorComponents];
+ dupInput[kColorComponents - 1] = false;
+ for (int i = kColorComponents - 2; i >= 0; i--) {
+ dupInput[i] = dupInput[i + 1] || multiplier[i + 1] != 0;
+ }
+
+ if (!dupInput[0] && multiplier[0] == 0) {
+ result->writeText("pop ");
+ }
+
+ for (int i = 0; i < kColorComponents; i++) {
+ // If the next components needs t and this component will consume a
+ // copy, make another copy.
+ if (dupInput[i] && multiplier[i] != 0) {
+ result->writeText("dup ");
+ }
+
+ if (multiplier[i] == 0) {
+ SkPDFUtils::AppendColorComponent(prevColor[i], result);
+ result->writeText(" ");
+ } else {
+ if (multiplier[i] != 1) {
+ SkPDFUtils::AppendScalar(multiplier[i], result);
+ result->writeText(" mul ");
+ }
+ if (prevColor[i] != 0) {
+ SkPDFUtils::AppendColorComponent(prevColor[i], result);
+ result->writeText(" add ");
+ }
+ }
+
+ if (dupInput[i]) {
+ result->writeText("exch ");
+ }
+ }
+}
+
+static void write_gradient_ranges(const SkShaderBase::GradientInfo& info, SkSpan<size_t> rangeEnds,
+ bool top, bool first, SkDynamicMemoryWStream* result) {
+ SkASSERT(rangeEnds.size() > 0);
+
+ size_t rangeEndIndex = rangeEnds[rangeEnds.size() - 1];
+ SkScalar rangeEnd = info.fColorOffsets[rangeEndIndex];
+
+ // Each range check tests 0 < t <= end.
+ if (top) {
+ SkASSERT(first);
+ // t may have been set to 0 to signal that the answer has already been found.
+ result->writeText("dup dup 0 gt exch "); // In Preview 11.0 (1033.3) `0. 0 ne` is true.
+ SkPDFUtils::AppendScalar(rangeEnd, result);
+ result->writeText(" le and {\n");
+ } else if (first) {
+ // After the top level check, only t <= end needs to be tested on if (lo) side.
+ result->writeText("dup ");
+ SkPDFUtils::AppendScalar(rangeEnd, result);
+ result->writeText(" le {\n");
+ } else {
+ // The else (hi) side.
+ result->writeText("{\n");
+ }
+
+ if (rangeEnds.size() == 1) {
+ // Set the stack to [r g b].
+ size_t rangeBeginIndex = rangeEndIndex - 1;
+ SkScalar rangeBegin = info.fColorOffsets[rangeBeginIndex];
+ SkPDFUtils::AppendScalar(rangeBegin, result);
+ result->writeText(" sub "); // consume t, put t - startOffset on the stack.
+ interpolate_color_code(rangeEnd - rangeBegin,
+ info.fColors[rangeBeginIndex], info.fColors[rangeEndIndex], result);
+ result->writeText("\n");
+ } else {
+ size_t loCount = rangeEnds.size() / 2;
+ SkSpan<size_t> loSpan = rangeEnds.subspan(0, loCount);
+ write_gradient_ranges(info, loSpan, false, true, result);
+
+ SkSpan<size_t> hiSpan = rangeEnds.subspan(loCount, rangeEnds.size() - loCount);
+ write_gradient_ranges(info, hiSpan, false, false, result);
+ }
+
+ if (top) {
+ // Put 0 on the stack for t once here instead of after every call to interpolate_color_code.
+ result->writeText("0} if\n");
+ } else if (first) {
+ result->writeText("}"); // The else (hi) side will come next.
+ } else {
+ result->writeText("} ifelse\n");
+ }
+}
+
+/* Generate Type 4 function code to map t to the passed gradient, clamping at the ends.
+ The types integer, real, and boolean are available.
+ There are no string, array, procedure, variable, or name types available.
+
+ The generated code will be of the following form with all values hard coded.
+
+ if (t <= 0) {
+ ret = color[0];
+ t = 0;
+ }
+ if (t > 0 && t <= stop[4]) {
+ if (t <= stop[2]) {
+ if (t <= stop[1]) {
+ ret = interp(t - stop[0], stop[1] - stop[0], color[0], color[1]);
+ } else {
+ ret = interp(t - stop[1], stop[2] - stop[1], color[1], color[2]);
+ }
+ } else {
+ if (t <= stop[3] {
+ ret = interp(t - stop[2], stop[3] - stop[2], color[2], color[3]);
+ } else {
+ ret = interp(t - stop[3], stop[4] - stop[3], color[3], color[4]);
+ }
+ }
+ t = 0;
+ }
+ if (t > 0) {
+ ret = color[4];
+ }
+
+ which in PDF will be represented like
+
+ dup 0 le {pop 0 0 0 0} if
+ dup dup 0 gt exch 1 le and {
+ dup .5 le {
+ dup .25 le {
+ 0 sub 2 mul 0 0
+ }{
+ .25 sub .5 exch 2 mul 0
+ } ifelse
+ }{
+ dup .75 le {
+ .5 sub .5 exch .5 exch 2 mul
+ }{
+ .75 sub dup 2 mul .5 add exch dup 2 mul .5 add exch 2 mul .5 add
+ } ifelse
+ } ifelse
+ 0} if
+ 0 gt {1 1 1} if
+ */
+static void gradient_function_code(const SkShaderBase::GradientInfo& info,
+ SkDynamicMemoryWStream* result) {
+ // While looking for a hit the stack is [t].
+ // After finding a hit the stack is [r g b 0].
+ // The 0 is consumed just before returning.
+
+ // The initial range has no previous and contains a solid color.
+ // Any t <= 0 will be handled by this initial range, so later t == 0 indicates a hit was found.
+ result->writeText("dup 0 le {pop ");
+ SkPDFUtils::AppendColorComponent(SkColorGetR(info.fColors[0]), result);
+ result->writeText(" ");
+ SkPDFUtils::AppendColorComponent(SkColorGetG(info.fColors[0]), result);
+ result->writeText(" ");
+ SkPDFUtils::AppendColorComponent(SkColorGetB(info.fColors[0]), result);
+ result->writeText(" 0} if\n");
+
+ // Optimize out ranges which don't make any visual difference.
+ AutoSTMalloc<4, size_t> rangeEnds(info.fColorCount);
+ size_t rangeEndsCount = 0;
+ for (int i = 1; i < info.fColorCount; ++i) {
+ // Ignoring the alpha, is this range the same solid color as the next range?
+ // This optimizes gradients where sometimes only the color or only the alpha is changing.
+ auto eqIgnoringAlpha = [](SkColor a, SkColor b) {
+ return SkColorSetA(a, 0x00) == SkColorSetA(b, 0x00);
+ };
+ bool constantColorBothSides =
+ eqIgnoringAlpha(info.fColors[i-1], info.fColors[i]) &&// This range is a solid color.
+ i != info.fColorCount-1 && // This is not the last range.
+ eqIgnoringAlpha(info.fColors[i], info.fColors[i+1]); // Next range is same solid color.
+
+ // Does this range have zero size?
+ bool degenerateRange = info.fColorOffsets[i-1] == info.fColorOffsets[i];
+
+ if (!degenerateRange && !constantColorBothSides) {
+ rangeEnds[rangeEndsCount] = i;
+ ++rangeEndsCount;
+ }
+ }
+
+ // If a cap on depth is needed, loop here.
+ write_gradient_ranges(info, SkSpan(rangeEnds.get(), rangeEndsCount), true, true, result);
+
+ // Clamp the final color.
+ result->writeText("0 gt {");
+ SkPDFUtils::AppendColorComponent(SkColorGetR(info.fColors[info.fColorCount - 1]), result);
+ result->writeText(" ");
+ SkPDFUtils::AppendColorComponent(SkColorGetG(info.fColors[info.fColorCount - 1]), result);
+ result->writeText(" ");
+ SkPDFUtils::AppendColorComponent(SkColorGetB(info.fColors[info.fColorCount - 1]), result);
+ result->writeText("} if\n");
+}
+
+static std::unique_ptr<SkPDFDict> createInterpolationFunction(const ColorTuple& color1,
+ const ColorTuple& color2) {
+ auto retval = SkPDFMakeDict();
+
+ auto c0 = SkPDFMakeArray();
+ c0->appendColorComponent(color1[0]);
+ c0->appendColorComponent(color1[1]);
+ c0->appendColorComponent(color1[2]);
+ retval->insertObject("C0", std::move(c0));
+
+ auto c1 = SkPDFMakeArray();
+ c1->appendColorComponent(color2[0]);
+ c1->appendColorComponent(color2[1]);
+ c1->appendColorComponent(color2[2]);
+ retval->insertObject("C1", std::move(c1));
+
+ retval->insertObject("Domain", SkPDFMakeArray(0, 1));
+
+ retval->insertInt("FunctionType", 2);
+ retval->insertScalar("N", 1.0f);
+
+ return retval;
+}
+
+static std::unique_ptr<SkPDFDict> gradientStitchCode(const SkShaderBase::GradientInfo& info) {
+ auto retval = SkPDFMakeDict();
+
+ // normalize color stops
+ int colorCount = info.fColorCount;
+ std::vector<SkColor> colors(info.fColors, info.fColors + colorCount);
+ std::vector<SkScalar> colorOffsets(info.fColorOffsets, info.fColorOffsets + colorCount);
+
+ int i = 1;
+ while (i < colorCount - 1) {
+ // ensure stops are in order
+ if (colorOffsets[i - 1] > colorOffsets[i]) {
+ colorOffsets[i] = colorOffsets[i - 1];
+ }
+
+ // remove points that are between 2 coincident points
+ if ((colorOffsets[i - 1] == colorOffsets[i]) && (colorOffsets[i] == colorOffsets[i + 1])) {
+ colorCount -= 1;
+ colors.erase(colors.begin() + i);
+ colorOffsets.erase(colorOffsets.begin() + i);
+ } else {
+ i++;
+ }
+ }
+ // find coincident points and slightly move them over
+ for (i = 1; i < colorCount - 1; i++) {
+ if (colorOffsets[i - 1] == colorOffsets[i]) {
+ colorOffsets[i] += 0.00001f;
+ }
+ }
+ // check if last 2 stops coincide
+ if (colorOffsets[i - 1] == colorOffsets[i]) {
+ colorOffsets[i - 1] -= 0.00001f;
+ }
+
+ AutoSTMalloc<4, ColorTuple> colorDataAlloc(colorCount);
+ ColorTuple *colorData = colorDataAlloc.get();
+ for (int idx = 0; idx < colorCount; idx++) {
+ colorData[idx][0] = SkColorGetR(colors[idx]);
+ colorData[idx][1] = SkColorGetG(colors[idx]);
+ colorData[idx][2] = SkColorGetB(colors[idx]);
+ }
+
+ // no need for a stitch function if there are only 2 stops.
+ if (colorCount == 2)
+ return createInterpolationFunction(colorData[0], colorData[1]);
+
+ auto encode = SkPDFMakeArray();
+ auto bounds = SkPDFMakeArray();
+ auto functions = SkPDFMakeArray();
+
+ retval->insertObject("Domain", SkPDFMakeArray(0, 1));
+ retval->insertInt("FunctionType", 3);
+
+ for (int idx = 1; idx < colorCount; idx++) {
+ if (idx > 1) {
+ bounds->appendScalar(colorOffsets[idx-1]);
+ }
+
+ encode->appendScalar(0);
+ encode->appendScalar(1.0f);
+
+ functions->appendObject(createInterpolationFunction(colorData[idx-1], colorData[idx]));
+ }
+
+ retval->insertObject("Encode", std::move(encode));
+ retval->insertObject("Bounds", std::move(bounds));
+ retval->insertObject("Functions", std::move(functions));
+
+ return retval;
+}
+
+/* Map a value of t on the stack into [0, 1) for Repeat or Mirror tile mode. */
+static void tileModeCode(SkTileMode mode, SkDynamicMemoryWStream* result) {
+ if (mode == SkTileMode::kRepeat) {
+ result->writeText("dup truncate sub\n"); // Get the fractional part.
+ result->writeText("dup 0 le {1 add} if\n"); // Map (-1,0) => (0,1)
+ return;
+ }
+
+ if (mode == SkTileMode::kMirror) {
+ // In Preview 11.0 (1033.3) `a n mod r eq` (with a and n both integers, r integer or real)
+ // early aborts the function when false would be put on the stack.
+ // Work around this by re-writing `t 2 mod 1 eq` as `t 2 mod 0 gt`.
+
+ // Map t mod 2 into [0, 1, 1, 0].
+ // Code Stack t
+ result->writeText("abs " // +t
+ "dup " // +t.s +t.s
+ "truncate " // +t.s +t
+ "dup " // +t.s +t +t
+ "cvi " // +t.s +t +T
+ "2 mod " // +t.s +t (+T mod 2)
+ /*"1 eq "*/ "0 gt " // +t.s +t true|false
+ "3 1 roll " // true|false +t.s +t
+ "sub " // true|false 0.s
+ "exch " // 0.s true|false
+ "{1 exch sub} if\n"); // 1 - 0.s|0.s
+ }
+}
+
+/**
+ * Returns PS function code that applies inverse perspective
+ * to a x, y point.
+ * The function assumes that the stack has at least two elements,
+ * and that the top 2 elements are numeric values.
+ * After executing this code on a PS stack, the last 2 elements are updated
+ * while the rest of the stack is preserved intact.
+ * inversePerspectiveMatrix is the inverse perspective matrix.
+ */
+static void apply_perspective_to_coordinates(const SkMatrix& inversePerspectiveMatrix,
+ SkDynamicMemoryWStream* code) {
+ if (!inversePerspectiveMatrix.hasPerspective()) {
+ return;
+ }
+
+ // Perspective matrix should be:
+ // 1 0 0
+ // 0 1 0
+ // p0 p1 p2
+
+ const SkScalar p0 = inversePerspectiveMatrix[SkMatrix::kMPersp0];
+ const SkScalar p1 = inversePerspectiveMatrix[SkMatrix::kMPersp1];
+ const SkScalar p2 = inversePerspectiveMatrix[SkMatrix::kMPersp2];
+
+ // y = y / (p2 + p0 x + p1 y)
+ // x = x / (p2 + p0 x + p1 y)
+
+ // Input on stack: x y
+ code->writeText(" dup "); // x y y
+ SkPDFUtils::AppendScalar(p1, code); // x y y p1
+ code->writeText(" mul " // x y y*p1
+ " 2 index "); // x y y*p1 x
+ SkPDFUtils::AppendScalar(p0, code); // x y y p1 x p0
+ code->writeText(" mul "); // x y y*p1 x*p0
+ SkPDFUtils::AppendScalar(p2, code); // x y y p1 x*p0 p2
+ code->writeText(" add " // x y y*p1 x*p0+p2
+ "add " // x y y*p1+x*p0+p2
+ "3 1 roll " // y*p1+x*p0+p2 x y
+ "2 index " // z x y y*p1+x*p0+p2
+ "div " // y*p1+x*p0+p2 x y/(y*p1+x*p0+p2)
+ "3 1 roll " // y/(y*p1+x*p0+p2) y*p1+x*p0+p2 x
+ "exch " // y/(y*p1+x*p0+p2) x y*p1+x*p0+p2
+ "div " // y/(y*p1+x*p0+p2) x/(y*p1+x*p0+p2)
+ "exch\n"); // x/(y*p1+x*p0+p2) y/(y*p1+x*p0+p2)
+}
+
+static void linearCode(const SkShaderBase::GradientInfo& info,
+ const SkMatrix& perspectiveRemover,
+ SkDynamicMemoryWStream* function) {
+ function->writeText("{");
+
+ apply_perspective_to_coordinates(perspectiveRemover, function);
+
+ function->writeText("pop\n"); // Just ditch the y value.
+ tileModeCode((SkTileMode)info.fTileMode, function);
+ gradient_function_code(info, function);
+ function->writeText("}");
+}
+
+static void radialCode(const SkShaderBase::GradientInfo& info,
+ const SkMatrix& perspectiveRemover,
+ SkDynamicMemoryWStream* function) {
+ function->writeText("{");
+
+ apply_perspective_to_coordinates(perspectiveRemover, function);
+
+ // Find the distance from the origin.
+ function->writeText("dup " // x y y
+ "mul " // x y^2
+ "exch " // y^2 x
+ "dup " // y^2 x x
+ "mul " // y^2 x^2
+ "add " // y^2+x^2
+ "sqrt\n"); // sqrt(y^2+x^2)
+
+ tileModeCode((SkTileMode)info.fTileMode, function);
+ gradient_function_code(info, function);
+ function->writeText("}");
+}
+
+/* Conical gradient shader, based on the Canvas spec for radial gradients
+ See: http://www.w3.org/TR/2dcontext/#dom-context-2d-createradialgradient
+ */
+static void twoPointConicalCode(const SkShaderBase::GradientInfo& info,
+ const SkMatrix& perspectiveRemover,
+ SkDynamicMemoryWStream* function) {
+ SkScalar dx = info.fPoint[1].fX - info.fPoint[0].fX;
+ SkScalar dy = info.fPoint[1].fY - info.fPoint[0].fY;
+ SkScalar r0 = info.fRadius[0];
+ SkScalar dr = info.fRadius[1] - info.fRadius[0];
+ SkScalar a = dx * dx + dy * dy - dr * dr;
+
+ // First compute t, if the pixel falls outside the cone, then we'll end
+ // with 'false' on the stack, otherwise we'll push 'true' with t below it
+
+ // We start with a stack of (x y), copy it and then consume one copy in
+ // order to calculate b and the other to calculate c.
+ function->writeText("{");
+
+ apply_perspective_to_coordinates(perspectiveRemover, function);
+
+ function->writeText("2 copy ");
+
+ // Calculate b and b^2; b = -2 * (y * dy + x * dx + r0 * dr).
+ SkPDFUtils::AppendScalar(dy, function);
+ function->writeText(" mul exch ");
+ SkPDFUtils::AppendScalar(dx, function);
+ function->writeText(" mul add ");
+ SkPDFUtils::AppendScalar(r0 * dr, function);
+ function->writeText(" add -2 mul dup dup mul\n");
+
+ // c = x^2 + y^2 + radius0^2
+ function->writeText("4 2 roll dup mul exch dup mul add ");
+ SkPDFUtils::AppendScalar(r0 * r0, function);
+ function->writeText(" sub dup 4 1 roll\n");
+
+ // Contents of the stack at this point: c, b, b^2, c
+
+ // if a = 0, then we collapse to a simpler linear case
+ if (a == 0) {
+
+ // t = -c/b
+ function->writeText("pop pop div neg dup ");
+
+ // compute radius(t)
+ SkPDFUtils::AppendScalar(dr, function);
+ function->writeText(" mul ");
+ SkPDFUtils::AppendScalar(r0, function);
+ function->writeText(" add\n");
+
+ // if r(t) < 0, then it's outside the cone
+ function->writeText("0 lt {pop false} {true} ifelse\n");
+
+ } else {
+
+ // quadratic case: the Canvas spec wants the largest
+ // root t for which radius(t) > 0
+
+ // compute the discriminant (b^2 - 4ac)
+ SkPDFUtils::AppendScalar(a * 4, function);
+ function->writeText(" mul sub dup\n");
+
+ // if d >= 0, proceed
+ function->writeText("0 ge {\n");
+
+ // an intermediate value we'll use to compute the roots:
+ // q = -0.5 * (b +/- sqrt(d))
+ function->writeText("sqrt exch dup 0 lt {exch -1 mul} if");
+ function->writeText(" add -0.5 mul dup\n");
+
+ // first root = q / a
+ SkPDFUtils::AppendScalar(a, function);
+ function->writeText(" div\n");
+
+ // second root = c / q
+ function->writeText("3 1 roll div\n");
+
+ // put the larger root on top of the stack
+ function->writeText("2 copy gt {exch} if\n");
+
+ // compute radius(t) for larger root
+ function->writeText("dup ");
+ SkPDFUtils::AppendScalar(dr, function);
+ function->writeText(" mul ");
+ SkPDFUtils::AppendScalar(r0, function);
+ function->writeText(" add\n");
+
+ // if r(t) > 0, we have our t, pop off the smaller root and we're done
+ function->writeText(" 0 gt {exch pop true}\n");
+
+ // otherwise, throw out the larger one and try the smaller root
+ function->writeText("{pop dup\n");
+ SkPDFUtils::AppendScalar(dr, function);
+ function->writeText(" mul ");
+ SkPDFUtils::AppendScalar(r0, function);
+ function->writeText(" add\n");
+
+ // if r(t) < 0, push false, otherwise the smaller root is our t
+ function->writeText("0 le {pop false} {true} ifelse\n");
+ function->writeText("} ifelse\n");
+
+ // d < 0, clear the stack and push false
+ function->writeText("} {pop pop pop false} ifelse\n");
+ }
+
+ // if the pixel is in the cone, proceed to compute a color
+ function->writeText("{");
+ tileModeCode((SkTileMode)info.fTileMode, function);
+ gradient_function_code(info, function);
+
+ // otherwise, just write black
+ function->writeText("} {0 0 0} ifelse }");
+}
+
+static void sweepCode(const SkShaderBase::GradientInfo& info,
+ const SkMatrix& perspectiveRemover,
+ SkDynamicMemoryWStream* function) {
+ function->writeText("{exch atan 360 div\n");
+ tileModeCode((SkTileMode)info.fTileMode, function);
+ gradient_function_code(info, function);
+ function->writeText("}");
+}
+
+
+// catch cases where the inner just touches the outer circle
+// and make the inner circle just inside the outer one to match raster
+static void FixUpRadius(const SkPoint& p1, SkScalar& r1, const SkPoint& p2, SkScalar& r2) {
+ // detect touching circles
+ SkScalar distance = SkPoint::Distance(p1, p2);
+ SkScalar subtractRadii = fabs(r1 - r2);
+ if (fabs(distance - subtractRadii) < 0.002f) {
+ if (r1 > r2) {
+ r1 += 0.002f;
+ } else {
+ r2 += 0.002f;
+ }
+ }
+}
+
+// Finds affine and persp such that in = affine * persp.
+// but it returns the inverse of perspective matrix.
+static bool split_perspective(const SkMatrix in, SkMatrix* affine,
+ SkMatrix* perspectiveInverse) {
+ const SkScalar p2 = in[SkMatrix::kMPersp2];
+
+ if (SkScalarNearlyZero(p2)) {
+ return false;
+ }
+
+ const SkScalar zero = SkIntToScalar(0);
+ const SkScalar one = SkIntToScalar(1);
+
+ const SkScalar sx = in[SkMatrix::kMScaleX];
+ const SkScalar kx = in[SkMatrix::kMSkewX];
+ const SkScalar tx = in[SkMatrix::kMTransX];
+ const SkScalar ky = in[SkMatrix::kMSkewY];
+ const SkScalar sy = in[SkMatrix::kMScaleY];
+ const SkScalar ty = in[SkMatrix::kMTransY];
+ const SkScalar p0 = in[SkMatrix::kMPersp0];
+ const SkScalar p1 = in[SkMatrix::kMPersp1];
+
+ // Perspective matrix would be:
+ // 1 0 0
+ // 0 1 0
+ // p0 p1 p2
+ // But we need the inverse of persp.
+ perspectiveInverse->setAll(one, zero, zero,
+ zero, one, zero,
+ -p0/p2, -p1/p2, 1/p2);
+
+ affine->setAll(sx - p0 * tx / p2, kx - p1 * tx / p2, tx / p2,
+ ky - p0 * ty / p2, sy - p1 * ty / p2, ty / p2,
+ zero, zero, one);
+
+ return true;
+}
+
+static SkPDFIndirectReference make_ps_function(std::unique_ptr<SkStreamAsset> psCode,
+ std::unique_ptr<SkPDFArray> domain,
+ std::unique_ptr<SkPDFObject> range,
+ SkPDFDocument* doc) {
+ std::unique_ptr<SkPDFDict> dict = SkPDFMakeDict();
+ dict->insertInt("FunctionType", 4);
+ dict->insertObject("Domain", std::move(domain));
+ dict->insertObject("Range", std::move(range));
+ return SkPDFStreamOut(std::move(dict), std::move(psCode), doc);
+}
+
+static SkPDFIndirectReference make_function_shader(SkPDFDocument* doc,
+ const SkPDFGradientShader::Key& state) {
+ SkPoint transformPoints[2];
+ const SkShaderBase::GradientInfo& info = state.fInfo;
+ SkMatrix finalMatrix = state.fCanvasTransform;
+ finalMatrix.preConcat(state.fShaderTransform);
+
+ bool doStitchFunctions = (state.fType == SkShaderBase::GradientType::kLinear ||
+ state.fType == SkShaderBase::GradientType::kRadial ||
+ state.fType == SkShaderBase::GradientType::kConical) &&
+ (SkTileMode)info.fTileMode == SkTileMode::kClamp &&
+ !finalMatrix.hasPerspective();
+
+ int32_t shadingType = 1;
+ auto pdfShader = SkPDFMakeDict();
+ // The two point radial gradient further references
+ // state.fInfo
+ // in translating from x, y coordinates to the t parameter. So, we have
+ // to transform the points and radii according to the calculated matrix.
+ if (doStitchFunctions) {
+ pdfShader->insertObject("Function", gradientStitchCode(info));
+ shadingType = (state.fType == SkShaderBase::GradientType::kLinear) ? 2 : 3;
+
+ auto extend = SkPDFMakeArray();
+ extend->reserve(2);
+ extend->appendBool(true);
+ extend->appendBool(true);
+ pdfShader->insertObject("Extend", std::move(extend));
+
+ std::unique_ptr<SkPDFArray> coords;
+ if (state.fType == SkShaderBase::GradientType::kConical) {
+ SkScalar r1 = info.fRadius[0];
+ SkScalar r2 = info.fRadius[1];
+ SkPoint pt1 = info.fPoint[0];
+ SkPoint pt2 = info.fPoint[1];
+ FixUpRadius(pt1, r1, pt2, r2);
+
+ coords = SkPDFMakeArray(pt1.x(),
+ pt1.y(),
+ r1,
+ pt2.x(),
+ pt2.y(),
+ r2);
+ } else if (state.fType == SkShaderBase::GradientType::kRadial) {
+ const SkPoint& pt1 = info.fPoint[0];
+ coords = SkPDFMakeArray(pt1.x(),
+ pt1.y(),
+ 0,
+ pt1.x(),
+ pt1.y(),
+ info.fRadius[0]);
+ } else {
+ const SkPoint& pt1 = info.fPoint[0];
+ const SkPoint& pt2 = info.fPoint[1];
+ coords = SkPDFMakeArray(pt1.x(),
+ pt1.y(),
+ pt2.x(),
+ pt2.y());
+ }
+
+ pdfShader->insertObject("Coords", std::move(coords));
+ } else {
+ // Depending on the type of the gradient, we want to transform the
+ // coordinate space in different ways.
+ transformPoints[0] = info.fPoint[0];
+ transformPoints[1] = info.fPoint[1];
+ switch (state.fType) {
+ case SkShaderBase::GradientType::kLinear:
+ break;
+ case SkShaderBase::GradientType::kRadial:
+ transformPoints[1] = transformPoints[0];
+ transformPoints[1].fX += info.fRadius[0];
+ break;
+ case SkShaderBase::GradientType::kConical: {
+ transformPoints[1] = transformPoints[0];
+ transformPoints[1].fX += SK_Scalar1;
+ break;
+ }
+ case SkShaderBase::GradientType::kSweep:
+ transformPoints[1] = transformPoints[0];
+ transformPoints[1].fX += SK_Scalar1;
+ break;
+ case SkShaderBase::GradientType::kColor:
+ case SkShaderBase::GradientType::kNone:
+ default:
+ return SkPDFIndirectReference();
+ }
+
+ // Move any scaling (assuming a unit gradient) or translation
+ // (and rotation for linear gradient), of the final gradient from
+ // info.fPoints to the matrix (updating bbox appropriately). Now
+ // the gradient can be drawn on on the unit segment.
+ SkMatrix mapperMatrix;
+ unit_to_points_matrix(transformPoints, &mapperMatrix);
+
+ finalMatrix.preConcat(mapperMatrix);
+
+ // Preserves as much as possible in the final matrix, and only removes
+ // the perspective. The inverse of the perspective is stored in
+ // perspectiveInverseOnly matrix and has 3 useful numbers
+ // (p0, p1, p2), while everything else is either 0 or 1.
+ // In this way the shader will handle it eficiently, with minimal code.
+ SkMatrix perspectiveInverseOnly = SkMatrix::I();
+ if (finalMatrix.hasPerspective()) {
+ if (!split_perspective(finalMatrix,
+ &finalMatrix, &perspectiveInverseOnly)) {
+ return SkPDFIndirectReference();
+ }
+ }
+
+ SkRect bbox;
+ bbox.set(state.fBBox);
+ if (!SkPDFUtils::InverseTransformBBox(finalMatrix, &bbox)) {
+ return SkPDFIndirectReference();
+ }
+ SkDynamicMemoryWStream functionCode;
+
+ SkShaderBase::GradientInfo infoCopy = info;
+
+ if (state.fType == SkShaderBase::GradientType::kConical) {
+ SkMatrix inverseMapperMatrix;
+ if (!mapperMatrix.invert(&inverseMapperMatrix)) {
+ return SkPDFIndirectReference();
+ }
+ inverseMapperMatrix.mapPoints(infoCopy.fPoint, 2);
+ infoCopy.fRadius[0] = inverseMapperMatrix.mapRadius(info.fRadius[0]);
+ infoCopy.fRadius[1] = inverseMapperMatrix.mapRadius(info.fRadius[1]);
+ }
+ switch (state.fType) {
+ case SkShaderBase::GradientType::kLinear:
+ linearCode(infoCopy, perspectiveInverseOnly, &functionCode);
+ break;
+ case SkShaderBase::GradientType::kRadial:
+ radialCode(infoCopy, perspectiveInverseOnly, &functionCode);
+ break;
+ case SkShaderBase::GradientType::kConical:
+ twoPointConicalCode(infoCopy, perspectiveInverseOnly, &functionCode);
+ break;
+ case SkShaderBase::GradientType::kSweep:
+ sweepCode(infoCopy, perspectiveInverseOnly, &functionCode);
+ break;
+ default:
+ SkASSERT(false);
+ }
+ pdfShader->insertObject(
+ "Domain", SkPDFMakeArray(bbox.left(), bbox.right(), bbox.top(), bbox.bottom()));
+
+ auto domain = SkPDFMakeArray(bbox.left(), bbox.right(), bbox.top(), bbox.bottom());
+ std::unique_ptr<SkPDFArray> rangeObject = SkPDFMakeArray(0, 1, 0, 1, 0, 1);
+ pdfShader->insertRef("Function",
+ make_ps_function(functionCode.detachAsStream(), std::move(domain),
+ std::move(rangeObject), doc));
+ }
+
+ pdfShader->insertInt("ShadingType", shadingType);
+ pdfShader->insertName("ColorSpace", "DeviceRGB");
+
+ SkPDFDict pdfFunctionShader("Pattern");
+ pdfFunctionShader.insertInt("PatternType", 2);
+ pdfFunctionShader.insertObject("Matrix", SkPDFUtils::MatrixToArray(finalMatrix));
+ pdfFunctionShader.insertObject("Shading", std::move(pdfShader));
+ return doc->emit(pdfFunctionShader);
+}
+
+static SkPDFIndirectReference find_pdf_shader(SkPDFDocument* doc,
+ SkPDFGradientShader::Key key,
+ bool keyHasAlpha);
+
+static std::unique_ptr<SkPDFDict> get_gradient_resource_dict(SkPDFIndirectReference functionShader,
+ SkPDFIndirectReference gState) {
+ std::vector<SkPDFIndirectReference> patternShaders;
+ if (functionShader != SkPDFIndirectReference()) {
+ patternShaders.push_back(functionShader);
+ }
+ std::vector<SkPDFIndirectReference> graphicStates;
+ if (gState != SkPDFIndirectReference()) {
+ graphicStates.push_back(gState);
+ }
+ return SkPDFMakeResourceDict(std::move(graphicStates),
+ std::move(patternShaders),
+ std::vector<SkPDFIndirectReference>(),
+ std::vector<SkPDFIndirectReference>());
+}
+
+// Creates a content stream which fills the pattern P0 across bounds.
+// @param gsIndex A graphics state resource index to apply, or <0 if no
+// graphics state to apply.
+static std::unique_ptr<SkStreamAsset> create_pattern_fill_content(int gsIndex,
+ int patternIndex,
+ SkRect& bounds) {
+ SkDynamicMemoryWStream content;
+ if (gsIndex >= 0) {
+ SkPDFUtils::ApplyGraphicState(gsIndex, &content);
+ }
+ SkPDFUtils::ApplyPattern(patternIndex, &content);
+ SkPDFUtils::AppendRectangle(bounds, &content);
+ SkPDFUtils::PaintPath(SkPaint::kFill_Style, SkPathFillType::kEvenOdd, &content);
+ return content.detachAsStream();
+}
+
+static bool gradient_has_alpha(const SkPDFGradientShader::Key& key) {
+ SkASSERT(key.fType != SkShaderBase::GradientType::kNone);
+ for (int i = 0; i < key.fInfo.fColorCount; i++) {
+ if ((SkAlpha)SkColorGetA(key.fInfo.fColors[i]) != SK_AlphaOPAQUE) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// warning: does not set fHash on new key. (Both callers need to change fields.)
+static SkPDFGradientShader::Key clone_key(const SkPDFGradientShader::Key& k) {
+ SkPDFGradientShader::Key clone = {
+ k.fType,
+ k.fInfo, // change pointers later.
+ std::unique_ptr<SkColor[]>(new SkColor[k.fInfo.fColorCount]),
+ std::unique_ptr<SkScalar[]>(new SkScalar[k.fInfo.fColorCount]),
+ k.fCanvasTransform,
+ k.fShaderTransform,
+ k.fBBox, 0};
+ clone.fInfo.fColors = clone.fColors.get();
+ clone.fInfo.fColorOffsets = clone.fStops.get();
+ for (int i = 0; i < clone.fInfo.fColorCount; i++) {
+ clone.fInfo.fColorOffsets[i] = k.fInfo.fColorOffsets[i];
+ clone.fInfo.fColors[i] = k.fInfo.fColors[i];
+ }
+ return clone;
+}
+
+static SkPDFIndirectReference create_smask_graphic_state(SkPDFDocument* doc,
+ const SkPDFGradientShader::Key& state) {
+ SkASSERT(state.fType != SkShaderBase::GradientType::kNone);
+ SkPDFGradientShader::Key luminosityState = clone_key(state);
+ for (int i = 0; i < luminosityState.fInfo.fColorCount; i++) {
+ SkAlpha alpha = SkColorGetA(luminosityState.fInfo.fColors[i]);
+ luminosityState.fInfo.fColors[i] = SkColorSetARGB(255, alpha, alpha, alpha);
+ }
+ luminosityState.fHash = hash(luminosityState);
+
+ SkASSERT(!gradient_has_alpha(luminosityState));
+ SkPDFIndirectReference luminosityShader = find_pdf_shader(doc, std::move(luminosityState), false);
+ std::unique_ptr<SkPDFDict> resources = get_gradient_resource_dict(luminosityShader,
+ SkPDFIndirectReference());
+ SkRect bbox = SkRect::Make(state.fBBox);
+ SkPDFIndirectReference alphaMask =
+ SkPDFMakeFormXObject(doc,
+ create_pattern_fill_content(-1, luminosityShader.fValue, bbox),
+ SkPDFUtils::RectToArray(bbox),
+ std::move(resources),
+ SkMatrix::I(),
+ "DeviceRGB");
+ return SkPDFGraphicState::GetSMaskGraphicState(
+ alphaMask, false, SkPDFGraphicState::kLuminosity_SMaskMode, doc);
+}
+
+static SkPDFIndirectReference make_alpha_function_shader(SkPDFDocument* doc,
+ const SkPDFGradientShader::Key& state) {
+ SkASSERT(state.fType != SkShaderBase::GradientType::kNone);
+ SkPDFGradientShader::Key opaqueState = clone_key(state);
+ for (int i = 0; i < opaqueState.fInfo.fColorCount; i++) {
+ opaqueState.fInfo.fColors[i] = SkColorSetA(opaqueState.fInfo.fColors[i], SK_AlphaOPAQUE);
+ }
+ opaqueState.fHash = hash(opaqueState);
+
+ SkASSERT(!gradient_has_alpha(opaqueState));
+ SkRect bbox = SkRect::Make(state.fBBox);
+ SkPDFIndirectReference colorShader = find_pdf_shader(doc, std::move(opaqueState), false);
+ if (!colorShader) {
+ return SkPDFIndirectReference();
+ }
+ // Create resource dict with alpha graphics state as G0 and
+ // pattern shader as P0, then write content stream.
+ SkPDFIndirectReference alphaGsRef = create_smask_graphic_state(doc, state);
+
+ std::unique_ptr<SkPDFDict> resourceDict = get_gradient_resource_dict(colorShader, alphaGsRef);
+
+ std::unique_ptr<SkStreamAsset> colorStream =
+ create_pattern_fill_content(alphaGsRef.fValue, colorShader.fValue, bbox);
+ std::unique_ptr<SkPDFDict> alphaFunctionShader = SkPDFMakeDict();
+ SkPDFUtils::PopulateTilingPatternDict(alphaFunctionShader.get(), bbox,
+ std::move(resourceDict), SkMatrix::I());
+ return SkPDFStreamOut(std::move(alphaFunctionShader), std::move(colorStream), doc);
+}
+
+static SkPDFGradientShader::Key make_key(const SkShader* shader,
+ const SkMatrix& canvasTransform,
+ const SkIRect& bbox) {
+ SkPDFGradientShader::Key key = {
+ SkShaderBase::GradientType::kNone,
+ {0, nullptr, nullptr, {{0, 0}, {0, 0}}, {0, 0}, SkTileMode::kClamp, 0},
+ nullptr,
+ nullptr,
+ canvasTransform,
+ SkPDFUtils::GetShaderLocalMatrix(shader),
+ bbox, 0};
+ key.fType = as_SB(shader)->asGradient(&key.fInfo);
+ SkASSERT(SkShaderBase::GradientType::kNone != key.fType);
+ SkASSERT(key.fInfo.fColorCount > 0);
+ key.fColors.reset(new SkColor[key.fInfo.fColorCount]);
+ key.fStops.reset(new SkScalar[key.fInfo.fColorCount]);
+ key.fInfo.fColors = key.fColors.get();
+ key.fInfo.fColorOffsets = key.fStops.get();
+ as_SB(shader)->asGradient(&key.fInfo);
+ key.fHash = hash(key);
+ return key;
+}
+
+static SkPDFIndirectReference find_pdf_shader(SkPDFDocument* doc,
+ SkPDFGradientShader::Key key,
+ bool keyHasAlpha) {
+ SkASSERT(gradient_has_alpha(key) == keyHasAlpha);
+ auto& gradientPatternMap = doc->fGradientPatternMap;
+ if (SkPDFIndirectReference* ptr = gradientPatternMap.find(key)) {
+ return *ptr;
+ }
+ SkPDFIndirectReference pdfShader;
+ if (keyHasAlpha) {
+ pdfShader = make_alpha_function_shader(doc, key);
+ } else {
+ pdfShader = make_function_shader(doc, key);
+ }
+ gradientPatternMap.set(std::move(key), pdfShader);
+ return pdfShader;
+}
+
+SkPDFIndirectReference SkPDFGradientShader::Make(SkPDFDocument* doc,
+ SkShader* shader,
+ const SkMatrix& canvasTransform,
+ const SkIRect& bbox) {
+ SkASSERT(shader);
+ SkASSERT(as_SB(shader)->asGradient() != SkShaderBase::GradientType::kNone);
+ SkPDFGradientShader::Key key = make_key(shader, canvasTransform, bbox);
+ bool alpha = gradient_has_alpha(key);
+ return find_pdf_shader(doc, std::move(key), alpha);
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFGradientShader.h b/gfx/skia/skia/src/pdf/SkPDFGradientShader.h
new file mode 100644
index 0000000000..d1e4dea594
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFGradientShader.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPDFGradientShader_DEFINED
+#define SkPDFGradientShader_DEFINED
+
+#include "include/core/SkShader.h"
+#include "src/pdf/SkPDFTypes.h"
+#include "src/pdf/SkPDFUtils.h"
+#include "src/shaders/SkShaderBase.h"
+
+class SkMatrix;
+class SkPDFDocument;
+struct SkIRect;
+
+namespace SkPDFGradientShader {
+
+SkPDFIndirectReference Make(SkPDFDocument* doc,
+ SkShader* shader,
+ const SkMatrix& matrix,
+ const SkIRect& surfaceBBox);
+
+struct Key {
+ SkShaderBase::GradientType fType;
+ SkShaderBase::GradientInfo fInfo;
+ std::unique_ptr<SkColor[]> fColors;
+ std::unique_ptr<SkScalar[]> fStops;
+ SkMatrix fCanvasTransform;
+ SkMatrix fShaderTransform;
+ SkIRect fBBox;
+ uint32_t fHash;
+};
+
+struct KeyHash {
+ uint32_t operator()(const Key& k) const { return k.fHash; }
+};
+
+inline bool operator==(const SkShaderBase::GradientInfo& u, const SkShaderBase::GradientInfo& v) {
+ return u.fColorCount == v.fColorCount
+ && u.fPoint[0] == v.fPoint[0]
+ && u.fPoint[1] == v.fPoint[1]
+ && u.fRadius[0] == v.fRadius[0]
+ && u.fRadius[1] == v.fRadius[1]
+ && u.fTileMode == v.fTileMode
+ && u.fGradientFlags == v.fGradientFlags
+ && SkPackedArrayEqual(u.fColors, v.fColors, u.fColorCount)
+ && SkPackedArrayEqual(u.fColorOffsets, v.fColorOffsets, u.fColorCount);
+}
+
+inline bool operator==(const Key& u, const Key& v) {
+ SkASSERT(u.fInfo.fColors == u.fColors.get());
+ SkASSERT(u.fInfo.fColorOffsets == u.fStops.get());
+ SkASSERT(v.fInfo.fColors == v.fColors.get());
+ SkASSERT(v.fInfo.fColorOffsets == v.fStops.get());
+ return u.fType == v.fType
+ && u.fInfo == v.fInfo
+ && u.fCanvasTransform == v.fCanvasTransform
+ && u.fShaderTransform == v.fShaderTransform
+ && u.fBBox == v.fBBox;
+}
+inline bool operator!=(const Key& u, const Key& v) { return !(u == v); }
+
+} // namespace SkPDFGradientShader
+#endif // SkPDFGradientShader_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFGraphicStackState.cpp b/gfx/skia/skia/src/pdf/SkPDFGraphicStackState.cpp
new file mode 100644
index 0000000000..dfd2214fbf
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFGraphicStackState.cpp
@@ -0,0 +1,237 @@
+// Copyright 2019 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+
+#include "src/pdf/SkPDFGraphicStackState.h"
+
+#include "include/core/SkStream.h"
+#include "include/pathops/SkPathOps.h"
+#include "src/pdf/SkPDFUtils.h"
+#include "src/utils/SkClipStackUtils.h"
+
+static void emit_pdf_color(SkColor4f color, SkWStream* result) {
+ SkASSERT(color.fA == 1); // We handle alpha elsewhere.
+ SkPDFUtils::AppendColorComponentF(color.fR, result);
+ result->writeText(" ");
+ SkPDFUtils::AppendColorComponentF(color.fG, result);
+ result->writeText(" ");
+ SkPDFUtils::AppendColorComponentF(color.fB, result);
+ result->writeText(" ");
+}
+
+static SkRect rect_intersect(SkRect u, SkRect v) {
+ if (u.isEmpty() || v.isEmpty()) { return {0, 0, 0, 0}; }
+ return u.intersect(v) ? u : SkRect{0, 0, 0, 0};
+}
+
+// Test to see if the clipstack is a simple rect, If so, we can avoid all PathOps code
+// and speed thing up.
+static bool is_rect(const SkClipStack& clipStack, const SkRect& bounds, SkRect* dst) {
+ SkRect currentClip = bounds;
+ SkClipStack::Iter iter(clipStack, SkClipStack::Iter::kBottom_IterStart);
+ while (const SkClipStack::Element* element = iter.next()) {
+ SkRect elementRect{0, 0, 0, 0};
+ switch (element->getDeviceSpaceType()) {
+ case SkClipStack::Element::DeviceSpaceType::kEmpty:
+ break;
+ case SkClipStack::Element::DeviceSpaceType::kRect:
+ elementRect = element->getDeviceSpaceRect();
+ break;
+ default:
+ return false;
+ }
+ if (element->isReplaceOp()) {
+ currentClip = rect_intersect(bounds, elementRect);
+ } else if (element->getOp() == SkClipOp::kIntersect) {
+ currentClip = rect_intersect(currentClip, elementRect);
+ } else {
+ return false;
+ }
+ }
+ *dst = currentClip;
+ return true;
+}
+
+// TODO: When there's no expanding clip ops, this function may not be necessary anymore.
+static bool is_complex_clip(const SkClipStack& stack) {
+ SkClipStack::Iter iter(stack, SkClipStack::Iter::kBottom_IterStart);
+ while (const SkClipStack::Element* element = iter.next()) {
+ if (element->isReplaceOp() ||
+ (element->getOp() != SkClipOp::kDifference &&
+ element->getOp() != SkClipOp::kIntersect)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+template <typename F>
+static void apply_clip(const SkClipStack& stack, const SkRect& outerBounds, F fn) {
+ // assumes clipstack is not complex.
+ constexpr SkRect kHuge{-30000, -30000, 30000, 30000};
+ SkClipStack::Iter iter(stack, SkClipStack::Iter::kBottom_IterStart);
+ SkRect bounds = outerBounds;
+ while (const SkClipStack::Element* element = iter.next()) {
+ SkPath operand;
+ element->asDeviceSpacePath(&operand);
+ SkPathOp op;
+ switch (element->getOp()) {
+ case SkClipOp::kDifference: op = kDifference_SkPathOp; break;
+ case SkClipOp::kIntersect: op = kIntersect_SkPathOp; break;
+ default: SkASSERT(false); return;
+ }
+ if (op == kDifference_SkPathOp ||
+ operand.isInverseFillType() ||
+ !kHuge.contains(operand.getBounds()))
+ {
+ Op(SkPath::Rect(bounds), operand, op, &operand);
+ }
+ SkASSERT(!operand.isInverseFillType());
+ fn(operand);
+ if (!bounds.intersect(operand.getBounds())) {
+ return; // return early;
+ }
+ }
+}
+
+static void append_clip_path(const SkPath& clipPath, SkWStream* wStream) {
+ SkPDFUtils::EmitPath(clipPath, SkPaint::kFill_Style, wStream);
+ SkPathFillType clipFill = clipPath.getFillType();
+ NOT_IMPLEMENTED(clipFill == SkPathFillType::kInverseEvenOdd, false);
+ NOT_IMPLEMENTED(clipFill == SkPathFillType::kInverseWinding, false);
+ if (clipFill == SkPathFillType::kEvenOdd) {
+ wStream->writeText("W* n\n");
+ } else {
+ wStream->writeText("W n\n");
+ }
+}
+
+static void append_clip(const SkClipStack& clipStack,
+ const SkIRect& bounds,
+ SkWStream* wStream) {
+ // The bounds are slightly outset to ensure this is correct in the
+ // face of floating-point accuracy and possible SkRegion bitmap
+ // approximations.
+ SkRect outsetBounds = SkRect::Make(bounds.makeOutset(1, 1));
+
+ SkRect clipStackRect;
+ if (is_rect(clipStack, outsetBounds, &clipStackRect)) {
+ SkPDFUtils::AppendRectangle(clipStackRect, wStream);
+ wStream->writeText("W* n\n");
+ return;
+ }
+
+ if (is_complex_clip(clipStack)) {
+ SkPath clipPath;
+ SkClipStack_AsPath(clipStack, &clipPath);
+ if (Op(clipPath, SkPath::Rect(outsetBounds), kIntersect_SkPathOp, &clipPath)) {
+ append_clip_path(clipPath, wStream);
+ }
+ // If Op() fails (pathological case; e.g. input values are
+ // extremely large or NaN), emit no clip at all.
+ } else {
+ apply_clip(clipStack, outsetBounds, [wStream](const SkPath& path) {
+ append_clip_path(path, wStream);
+ });
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void SkPDFGraphicStackState::updateClip(const SkClipStack* clipStack, const SkIRect& bounds) {
+ uint32_t clipStackGenID = clipStack ? clipStack->getTopmostGenID()
+ : SkClipStack::kWideOpenGenID;
+ if (clipStackGenID == currentEntry()->fClipStackGenID) {
+ return;
+ }
+ while (fStackDepth > 0) {
+ this->pop();
+ if (clipStackGenID == currentEntry()->fClipStackGenID) {
+ return;
+ }
+ }
+ SkASSERT(currentEntry()->fClipStackGenID == SkClipStack::kWideOpenGenID);
+ if (clipStackGenID != SkClipStack::kWideOpenGenID) {
+ SkASSERT(clipStack);
+ this->push();
+
+ currentEntry()->fClipStackGenID = clipStackGenID;
+ append_clip(*clipStack, bounds, fContentStream);
+ }
+}
+
+
+void SkPDFGraphicStackState::updateMatrix(const SkMatrix& matrix) {
+ if (matrix == currentEntry()->fMatrix) {
+ return;
+ }
+
+ if (currentEntry()->fMatrix.getType() != SkMatrix::kIdentity_Mask) {
+ SkASSERT(fStackDepth > 0);
+ SkASSERT(fEntries[fStackDepth].fClipStackGenID ==
+ fEntries[fStackDepth -1].fClipStackGenID);
+ this->pop();
+
+ SkASSERT(currentEntry()->fMatrix.getType() == SkMatrix::kIdentity_Mask);
+ }
+ if (matrix.getType() == SkMatrix::kIdentity_Mask) {
+ return;
+ }
+
+ this->push();
+ SkPDFUtils::AppendTransform(matrix, fContentStream);
+ currentEntry()->fMatrix = matrix;
+}
+
+void SkPDFGraphicStackState::updateDrawingState(const SkPDFGraphicStackState::Entry& state) {
+ // PDF treats a shader as a color, so we only set one or the other.
+ if (state.fShaderIndex >= 0) {
+ if (state.fShaderIndex != currentEntry()->fShaderIndex) {
+ SkPDFUtils::ApplyPattern(state.fShaderIndex, fContentStream);
+ currentEntry()->fShaderIndex = state.fShaderIndex;
+ }
+ } else if (state.fColor != currentEntry()->fColor || currentEntry()->fShaderIndex >= 0) {
+ emit_pdf_color(state.fColor, fContentStream);
+ fContentStream->writeText("RG ");
+ emit_pdf_color(state.fColor, fContentStream);
+ fContentStream->writeText("rg\n");
+ currentEntry()->fColor = state.fColor;
+ currentEntry()->fShaderIndex = -1;
+ }
+
+ if (state.fGraphicStateIndex != currentEntry()->fGraphicStateIndex) {
+ SkPDFUtils::ApplyGraphicState(state.fGraphicStateIndex, fContentStream);
+ currentEntry()->fGraphicStateIndex = state.fGraphicStateIndex;
+ }
+
+ if (state.fTextScaleX) {
+ if (state.fTextScaleX != currentEntry()->fTextScaleX) {
+ SkScalar pdfScale = state.fTextScaleX * 100;
+ SkPDFUtils::AppendScalar(pdfScale, fContentStream);
+ fContentStream->writeText(" Tz\n");
+ currentEntry()->fTextScaleX = state.fTextScaleX;
+ }
+ }
+}
+
+void SkPDFGraphicStackState::push() {
+ SkASSERT(fStackDepth < kMaxStackDepth);
+ fContentStream->writeText("q\n");
+ ++fStackDepth;
+ fEntries[fStackDepth] = fEntries[fStackDepth - 1];
+}
+
+void SkPDFGraphicStackState::pop() {
+ SkASSERT(fStackDepth > 0);
+ fContentStream->writeText("Q\n");
+ fEntries[fStackDepth] = SkPDFGraphicStackState::Entry();
+ --fStackDepth;
+}
+
+void SkPDFGraphicStackState::drainStack() {
+ if (fContentStream) {
+ while (fStackDepth) {
+ this->pop();
+ }
+ }
+ SkASSERT(fStackDepth == 0);
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFGraphicStackState.h b/gfx/skia/skia/src/pdf/SkPDFGraphicStackState.h
new file mode 100644
index 0000000000..2ea890c1b0
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFGraphicStackState.h
@@ -0,0 +1,41 @@
+// Copyright 2019 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+#ifndef SkPDFGraphicStackState_DEFINED
+#define SkPDFGraphicStackState_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkScalar.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "src/core/SkClipStack.h"
+
+class SkDynamicMemoryWStream;
+
+// It is important to not confuse SkPDFGraphicStackState with SkPDFGraphicState, the
+// later being our representation of an object in the PDF file.
+struct SkPDFGraphicStackState {
+ struct Entry {
+ SkMatrix fMatrix = SkMatrix::I();
+ uint32_t fClipStackGenID = SkClipStack::kWideOpenGenID;
+ SkColor4f fColor = {SK_FloatNaN, SK_FloatNaN, SK_FloatNaN, SK_FloatNaN};
+ SkScalar fTextScaleX = 1; // Zero means we don't care what the value is.
+ int fShaderIndex = -1;
+ int fGraphicStateIndex = -1;
+ };
+ // Must use stack for matrix, and for clip, plus one for no matrix or clip.
+ inline static constexpr int kMaxStackDepth = 2;
+ Entry fEntries[kMaxStackDepth + 1];
+ int fStackDepth = 0;
+ SkDynamicMemoryWStream* fContentStream;
+
+ SkPDFGraphicStackState(SkDynamicMemoryWStream* s = nullptr) : fContentStream(s) {}
+ void updateClip(const SkClipStack* clipStack, const SkIRect& bounds);
+ void updateMatrix(const SkMatrix& matrix);
+ void updateDrawingState(const Entry& state);
+ void push();
+ void pop();
+ void drainStack();
+ Entry* currentEntry() { return &fEntries[fStackDepth]; }
+};
+
+#endif // SkPDFGraphicStackState_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFGraphicState.cpp b/gfx/skia/skia/src/pdf/SkPDFGraphicState.cpp
new file mode 100644
index 0000000000..6cab7441a6
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFGraphicState.cpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkPDFGraphicState.h"
+
+#include "include/core/SkData.h"
+#include "include/core/SkPaint.h"
+#include "include/docs/SkPDFDocument.h"
+#include "include/private/base/SkTo.h"
+#include "src/pdf/SkPDFDocumentPriv.h"
+#include "src/pdf/SkPDFFormXObject.h"
+#include "src/pdf/SkPDFUtils.h"
+
+static const char* as_pdf_blend_mode_name(SkBlendMode mode) {
+ const char* name = SkPDFUtils::BlendModeName(mode);
+ SkASSERT(name);
+ return name;
+}
+
+static int to_stroke_cap(uint8_t cap) {
+ // PDF32000.book section 8.4.3.3 "Line Cap Style"
+ switch ((SkPaint::Cap)cap) {
+ case SkPaint::kButt_Cap: return 0;
+ case SkPaint::kRound_Cap: return 1;
+ case SkPaint::kSquare_Cap: return 2;
+ default: SkASSERT(false); return 0;
+ }
+}
+
+static int to_stroke_join(uint8_t join) {
+ // PDF32000.book section 8.4.3.4 "Line Join Style"
+ switch ((SkPaint::Join)join) {
+ case SkPaint::kMiter_Join: return 0;
+ case SkPaint::kRound_Join: return 1;
+ case SkPaint::kBevel_Join: return 2;
+ default: SkASSERT(false); return 0;
+ }
+}
+
+// If a SkXfermode is unsupported in PDF, this function returns
+// SrcOver, otherwise, it returns that Xfermode as a Mode.
+static uint8_t pdf_blend_mode(SkBlendMode mode) {
+ if (!SkPDFUtils::BlendModeName(mode)
+ || SkBlendMode::kXor == mode
+ || SkBlendMode::kPlus == mode)
+ {
+ mode = SkBlendMode::kSrcOver;
+ }
+ return SkToU8((unsigned)mode);
+}
+
+SkPDFIndirectReference SkPDFGraphicState::GetGraphicStateForPaint(SkPDFDocument* doc,
+ const SkPaint& p) {
+ SkASSERT(doc);
+ const SkBlendMode mode = p.getBlendMode_or(SkBlendMode::kSrcOver);
+
+ if (SkPaint::kFill_Style == p.getStyle()) {
+ SkPDFFillGraphicState fillKey = {p.getColor4f().fA, pdf_blend_mode(mode)};
+ auto& fillMap = doc->fFillGSMap;
+ if (SkPDFIndirectReference* statePtr = fillMap.find(fillKey)) {
+ return *statePtr;
+ }
+ SkPDFDict state;
+ state.reserve(2);
+ state.insertColorComponentF("ca", fillKey.fAlpha);
+ state.insertName("BM", as_pdf_blend_mode_name((SkBlendMode)fillKey.fBlendMode));
+ SkPDFIndirectReference ref = doc->emit(state);
+ fillMap.set(fillKey, ref);
+ return ref;
+ } else {
+ SkPDFStrokeGraphicState strokeKey = {
+ p.getStrokeWidth(),
+ p.getStrokeMiter(),
+ p.getColor4f().fA,
+ SkToU8(p.getStrokeCap()),
+ SkToU8(p.getStrokeJoin()),
+ pdf_blend_mode(mode)
+ };
+ auto& sMap = doc->fStrokeGSMap;
+ if (SkPDFIndirectReference* statePtr = sMap.find(strokeKey)) {
+ return *statePtr;
+ }
+ SkPDFDict state;
+ state.reserve(8);
+ state.insertColorComponentF("CA", strokeKey.fAlpha);
+ state.insertColorComponentF("ca", strokeKey.fAlpha);
+ state.insertInt("LC", to_stroke_cap(strokeKey.fStrokeCap));
+ state.insertInt("LJ", to_stroke_join(strokeKey.fStrokeJoin));
+ state.insertScalar("LW", strokeKey.fStrokeWidth);
+ state.insertScalar("ML", strokeKey.fStrokeMiter);
+ state.insertBool("SA", true); // SA = Auto stroke adjustment.
+ state.insertName("BM", as_pdf_blend_mode_name((SkBlendMode)strokeKey.fBlendMode));
+ SkPDFIndirectReference ref = doc->emit(state);
+ sMap.set(strokeKey, ref);
+ return ref;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static SkPDFIndirectReference make_invert_function(SkPDFDocument* doc) {
+ // Acrobat crashes if we use a type 0 function, kpdf crashes if we use
+ // a type 2 function, so we use a type 4 function.
+ static const char psInvert[] = "{1 exch sub}";
+ // Do not copy the trailing '\0' into the SkData.
+ auto invertFunction = SkData::MakeWithoutCopy(psInvert, strlen(psInvert));
+
+ std::unique_ptr<SkPDFDict> dict = SkPDFMakeDict();
+ dict->insertInt("FunctionType", 4);
+ dict->insertObject("Domain", SkPDFMakeArray(0, 1));
+ dict->insertObject("Range", SkPDFMakeArray(0, 1));
+ return SkPDFStreamOut(std::move(dict), SkMemoryStream::Make(std::move(invertFunction)), doc);
+}
+
+SkPDFIndirectReference SkPDFGraphicState::GetSMaskGraphicState(SkPDFIndirectReference sMask,
+ bool invert,
+ SkPDFSMaskMode sMaskMode,
+ SkPDFDocument* doc) {
+ // The practical chances of using the same mask more than once are unlikely
+ // enough that it's not worth canonicalizing.
+ auto sMaskDict = SkPDFMakeDict("Mask");
+ if (sMaskMode == kAlpha_SMaskMode) {
+ sMaskDict->insertName("S", "Alpha");
+ } else if (sMaskMode == kLuminosity_SMaskMode) {
+ sMaskDict->insertName("S", "Luminosity");
+ }
+ sMaskDict->insertRef("G", sMask);
+ if (invert) {
+ // let the doc deduplicate this object.
+ if (doc->fInvertFunction == SkPDFIndirectReference()) {
+ doc->fInvertFunction = make_invert_function(doc);
+ }
+ sMaskDict->insertRef("TR", doc->fInvertFunction);
+ }
+ SkPDFDict result("ExtGState");
+ result.insertObject("SMask", std::move(sMaskDict));
+ return doc->emit(result);
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFGraphicState.h b/gfx/skia/skia/src/pdf/SkPDFGraphicState.h
new file mode 100644
index 0000000000..20c5cf3326
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFGraphicState.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkPDFGraphicState_DEFINED
+#define SkPDFGraphicState_DEFINED
+
+#include "include/private/base/SkMacros.h"
+#include "src/core/SkOpts.h"
+#include "src/pdf/SkPDFTypes.h"
+
+class SkPaint;
+
+
+/** \class SkPDFGraphicState
+ SkPaint objects roughly correspond to graphic state dictionaries that can
+ be installed. So that a given dictionary is only output to the pdf file
+ once, we want to canonicalize them.
+*/
+namespace SkPDFGraphicState {
+ enum SkPDFSMaskMode {
+ kAlpha_SMaskMode,
+ kLuminosity_SMaskMode
+ };
+
+ /** Get the graphic state for the passed SkPaint.
+ */
+ SkPDFIndirectReference GetGraphicStateForPaint(SkPDFDocument*, const SkPaint&);
+
+ /** Make a graphic state that only sets the passed soft mask.
+ * @param sMask The form xobject to use as a soft mask.
+ * @param invert Indicates if the alpha of the sMask should be inverted.
+ * @param sMaskMode Whether to use alpha or luminosity for the sMask.
+ *
+ * These are not de-duped.
+ */
+ SkPDFIndirectReference GetSMaskGraphicState(SkPDFIndirectReference sMask,
+ bool invert,
+ SkPDFSMaskMode sMaskMode,
+ SkPDFDocument* doc);
+} // namespace SkPDFGraphicState
+
+SK_BEGIN_REQUIRE_DENSE
+struct SkPDFStrokeGraphicState {
+ SkScalar fStrokeWidth;
+ SkScalar fStrokeMiter;
+ SkScalar fAlpha;
+ uint8_t fStrokeCap; // SkPaint::Cap
+ uint8_t fStrokeJoin; // SkPaint::Join
+ uint8_t fBlendMode; // SkBlendMode
+ uint8_t fPADDING = 0;
+ bool operator==(const SkPDFStrokeGraphicState& o) const { return !memcmp(this, &o, sizeof(o)); }
+ bool operator!=(const SkPDFStrokeGraphicState& o) const { return !(*this == o); }
+};
+SK_END_REQUIRE_DENSE
+
+SK_BEGIN_REQUIRE_DENSE
+struct SkPDFFillGraphicState {
+ SkScalar fAlpha;
+ uint8_t fBlendMode;
+ uint8_t fPADDING[3] = {0, 0, 0};
+ bool operator==(const SkPDFFillGraphicState& o) const { return !memcmp(this, &o, sizeof(o)); }
+ bool operator!=(const SkPDFFillGraphicState& o) const { return !(*this == o); }
+};
+SK_END_REQUIRE_DENSE
+
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.cpp b/gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.cpp
new file mode 100644
index 0000000000..85a4688ce8
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.cpp
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkPDFMakeCIDGlyphWidthsArray.h"
+
+#include "include/core/SkPaint.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkStrike.h"
+#include "src/core/SkStrikeSpec.h"
+#include "src/pdf/SkPDFGlyphUse.h"
+
+#include <algorithm>
+#include <vector>
+
+// TODO(halcanary): Write unit tests for SkPDFMakeCIDGlyphWidthsArray().
+
+// TODO(halcanary): The logic in this file originated in several
+// disparate places. I feel sure that someone could simplify this
+// down to a single easy-to-read function.
+
+namespace {
+
+// scale from em-units to base-1000, returning as a SkScalar
+SkScalar from_font_units(SkScalar scaled, uint16_t emSize) {
+ if (emSize == 1000) {
+ return scaled;
+ } else {
+ return scaled * 1000 / emSize;
+ }
+}
+
+SkScalar scale_from_font_units(int16_t val, uint16_t emSize) {
+ return from_font_units(SkIntToScalar(val), emSize);
+}
+
+// Unfortunately poppler does not appear to respect the default width setting.
+#if defined(SK_PDF_CAN_USE_DW)
+int16_t findMode(SkSpan<const int16_t> advances) {
+ if (advances.empty()) {
+ return 0;
+ }
+
+ int16_t previousAdvance = advances[0];
+ int16_t currentModeAdvance = advances[0];
+ size_t currentCount = 1;
+ size_t currentModeCount = 1;
+
+ for (size_t i = 1; i < advances.size(); ++i) {
+ if (advances[i] == previousAdvance) {
+ ++currentCount;
+ } else {
+ if (currentCount > currentModeCount) {
+ currentModeAdvance = previousAdvance;
+ currentModeCount = currentCount;
+ }
+ previousAdvance = advances[i];
+ currentCount = 1;
+ }
+ }
+
+ return currentCount > currentModeCount ? previousAdvance : currentModeAdvance;
+}
+#endif
+} // namespace
+
+/** Retrieve advance data for glyphs. Used by the PDF backend. */
+// TODO(halcanary): this function is complex enough to need its logic
+// tested with unit tests.
+std::unique_ptr<SkPDFArray> SkPDFMakeCIDGlyphWidthsArray(const SkTypeface& typeface,
+ const SkPDFGlyphUse& subset,
+ SkScalar* defaultAdvance) {
+ // There are two ways of expressing advances
+ //
+ // range: " gfid [adv.ances adv.ances ... adv.ances]"
+ // run: " gfid gfid adv.ances"
+ //
+ // Assuming that on average
+ // the ASCII representation of an advance plus a space is 10 characters
+ // the ASCII representation of a glyph id plus a space is 4 characters
+ // the ASCII representation of unused gid plus a space in a range is 2 characters
+ //
+ // When not in a range or run
+ // a. Skipping don't cares or defaults is a win (trivial)
+ // b. Run wins for 2+ repeats " gid gid adv.ances"
+ // " gid [adv.ances adv.ances]"
+ // rule: 2+ repeats create run as long as possible, else start range
+ //
+ // When in a range
+ // Cost of stopping and starting a range is 8 characters "] gid ["
+ // c. Skipping defaults is always a win " adv.ances"
+ // rule: end range if default seen
+ // d. Skipping 4+ don't cares is a win " 0 0 0 0"
+ // rule: end range if 4+ don't cares
+ // Cost of stop and start range plus run is 28 characters "] gid gid adv.ances gid ["
+ // e. Switching for 2+ repeats and 4+ don't cares wins " 0 0 adv.ances 0 0 adv.ances"
+ // rule: end range for 2+ repeats with 4+ don't cares
+ // f. Switching for 3+ repeats wins " adv.ances adv.ances adv.ances"
+ // rule: end range for 3+ repeats
+
+ int emSize;
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakePDFVector(typeface, &emSize);
+ SkBulkGlyphMetricsAndPaths paths{strikeSpec};
+
+ auto result = SkPDFMakeArray();
+
+ std::vector<SkGlyphID> glyphIDs;
+ subset.getSetValues([&](unsigned index) {
+ glyphIDs.push_back(SkToU16(index));
+ });
+ auto glyphs = paths.glyphs(SkSpan(glyphIDs));
+
+#if defined(SK_PDF_CAN_USE_DW)
+ std::vector<int16_t> advances;
+ advances.reserve_back(glyphs.size());
+ for (const SkGlyph* glyph : glyphs) {
+ advances.push_back((int16_t)glyph->advanceX());
+ }
+ std::sort(advances.begin(), advances.end());
+ int16_t modeAdvance = findMode(SkSpan(advances));
+ *defaultAdvance = scale_from_font_units(modeAdvance, emSize);
+#else
+ *defaultAdvance = 0;
+#endif
+
+ for (size_t i = 0; i < glyphs.size(); ++i) {
+ int16_t advance = (int16_t)glyphs[i]->advanceX();
+
+#if defined(SK_PDF_CAN_USE_DW)
+ // a. Skipping don't cares or defaults is a win (trivial)
+ if (advance == modeAdvance) {
+ continue;
+ }
+#endif
+
+ // b. 2+ repeats create run as long as possible, else start range
+ {
+ size_t j = i + 1; // j is always one past the last known repeat
+ for (; j < glyphs.size(); ++j) {
+ int16_t next_advance = (int16_t)glyphs[j]->advanceX();
+ if (advance != next_advance) {
+ break;
+ }
+ }
+ if (j - i >= 2) {
+ result->appendInt(glyphs[i]->getGlyphID());
+ result->appendInt(glyphs[j - 1]->getGlyphID());
+ result->appendScalar(scale_from_font_units(advance, emSize));
+ i = j - 1;
+ continue;
+ }
+ }
+
+ {
+ result->appendInt(glyphs[i]->getGlyphID());
+ auto advanceArray = SkPDFMakeArray();
+ advanceArray->appendScalar(scale_from_font_units(advance, emSize));
+ size_t j = i + 1; // j is always one past the last output
+ for (; j < glyphs.size(); ++j) {
+ advance = (int16_t)glyphs[j]->advanceX();
+#if defined(SK_PDF_CAN_USE_DW)
+ // c. end range if default seen
+ if (advance == modeAdvance) {
+ break;
+ }
+#endif
+
+ int dontCares = glyphs[j]->getGlyphID() - glyphs[j - 1]->getGlyphID() - 1;
+ // d. end range if 4+ don't cares
+ if (dontCares >= 4) {
+ break;
+ }
+
+ int16_t next_advance = 0;
+ // e. end range for 2+ repeats with 4+ don't cares
+ if (j + 1 < glyphs.size()) {
+ next_advance = (int16_t)glyphs[j+1]->advanceX();
+ int next_dontCares = glyphs[j+1]->getGlyphID() - glyphs[j]->getGlyphID() - 1;
+ if (advance == next_advance && dontCares + next_dontCares >= 4) {
+ break;
+ }
+ }
+
+ // f. end range for 3+ repeats
+ if (j + 2 < glyphs.size() && advance == next_advance) {
+ next_advance = (int16_t)glyphs[j+2]->advanceX();
+ if (advance == next_advance) {
+ break;
+ }
+ }
+
+ while (dontCares --> 0) {
+ advanceArray->appendScalar(0);
+ }
+ advanceArray->appendScalar(scale_from_font_units(advance, emSize));
+ }
+ result->appendObject(std::move(advanceArray));
+ i = j - 1;
+ }
+ }
+
+ return result;
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.h b/gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.h
new file mode 100644
index 0000000000..fd541c2d58
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFMakeCIDGlyphWidthsArray.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPDFMakeCIDGlyphWidthsArray_DEFINED
+#define SkPDFMakeCIDGlyphWidthsArray_DEFINED
+
+#include "src/pdf/SkPDFTypes.h"
+
+class SkPDFGlyphUse;
+class SkTypeface;
+
+/* PDF 32000-1:2008, page 270: "The array's elements have a variable
+ format that can specify individual widths for consecutive CIDs or
+ one width for a range of CIDs". */
+std::unique_ptr<SkPDFArray> SkPDFMakeCIDGlyphWidthsArray(const SkTypeface& typeface,
+ const SkPDFGlyphUse& subset,
+ SkScalar* defaultAdvance);
+
+#endif // SkPDFMakeCIDGlyphWidthsArray_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.cpp b/gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.cpp
new file mode 100644
index 0000000000..e6d6c6f06c
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.cpp
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkPDFMakeToUnicodeCmap.h"
+
+#include "include/private/base/SkTo.h"
+#include "src/base/SkUTF.h"
+#include "src/pdf/SkPDFUtils.h"
+
+static void append_tounicode_header(SkDynamicMemoryWStream* cmap,
+ bool multibyte) {
+ // 12 dict begin: 12 is an Adobe-suggested value. Shall not change.
+ // It's there to prevent old version Adobe Readers from malfunctioning.
+ const char* kHeader =
+ "/CIDInit /ProcSet findresource begin\n"
+ "12 dict begin\n"
+ "begincmap\n";
+ cmap->writeText(kHeader);
+
+ // The /CIDSystemInfo must be consistent to the one in
+ // SkPDFFont::populateCIDFont().
+ // We can not pass over the system info object here because the format is
+ // different. This is not a reference object.
+ const char* kSysInfo =
+ "/CIDSystemInfo\n"
+ "<< /Registry (Adobe)\n"
+ "/Ordering (UCS)\n"
+ "/Supplement 0\n"
+ ">> def\n";
+ cmap->writeText(kSysInfo);
+
+ // The CMapName must be consistent to /CIDSystemInfo above.
+ // /CMapType 2 means ToUnicode.
+ // Codespace range just tells the PDF processor the valid range.
+ const char* kTypeInfoHeader =
+ "/CMapName /Adobe-Identity-UCS def\n"
+ "/CMapType 2 def\n"
+ "1 begincodespacerange\n";
+ cmap->writeText(kTypeInfoHeader);
+ if (multibyte) {
+ cmap->writeText("<0000> <FFFF>\n");
+ } else {
+ cmap->writeText("<00> <FF>\n");
+ }
+ cmap->writeText("endcodespacerange\n");
+}
+
+static void append_cmap_footer(SkDynamicMemoryWStream* cmap) {
+ const char kFooter[] =
+ "endcmap\n"
+ "CMapName currentdict /CMap defineresource pop\n"
+ "end\n"
+ "end";
+ cmap->writeText(kFooter);
+}
+
+namespace {
+struct BFChar {
+ SkGlyphID fGlyphId;
+ SkUnichar fUnicode;
+};
+
+struct BFRange {
+ SkGlyphID fStart;
+ SkGlyphID fEnd;
+ SkUnichar fUnicode;
+};
+} // namespace
+
+static void write_glyph(SkDynamicMemoryWStream* cmap,
+ bool multiByte,
+ SkGlyphID gid) {
+ if (multiByte) {
+ SkPDFUtils::WriteUInt16BE(cmap, gid);
+ } else {
+ SkPDFUtils::WriteUInt8(cmap, SkToU8(gid));
+ }
+}
+
+static void append_bfchar_section(const std::vector<BFChar>& bfchar,
+ bool multiByte,
+ SkDynamicMemoryWStream* cmap) {
+ // PDF spec defines that every bf* list can have at most 100 entries.
+ for (size_t i = 0; i < bfchar.size(); i += 100) {
+ int count = SkToInt(bfchar.size() - i);
+ count = std::min(count, 100);
+ cmap->writeDecAsText(count);
+ cmap->writeText(" beginbfchar\n");
+ for (int j = 0; j < count; ++j) {
+ cmap->writeText("<");
+ write_glyph(cmap, multiByte, bfchar[i + j].fGlyphId);
+ cmap->writeText("> <");
+ SkPDFUtils::WriteUTF16beHex(cmap, bfchar[i + j].fUnicode);
+ cmap->writeText(">\n");
+ }
+ cmap->writeText("endbfchar\n");
+ }
+}
+
+static void append_bfrange_section(const std::vector<BFRange>& bfrange,
+ bool multiByte,
+ SkDynamicMemoryWStream* cmap) {
+ // PDF spec defines that every bf* list can have at most 100 entries.
+ for (size_t i = 0; i < bfrange.size(); i += 100) {
+ int count = SkToInt(bfrange.size() - i);
+ count = std::min(count, 100);
+ cmap->writeDecAsText(count);
+ cmap->writeText(" beginbfrange\n");
+ for (int j = 0; j < count; ++j) {
+ cmap->writeText("<");
+ write_glyph(cmap, multiByte, bfrange[i + j].fStart);
+ cmap->writeText("> <");
+ write_glyph(cmap, multiByte, bfrange[i + j].fEnd);
+ cmap->writeText("> <");
+ SkPDFUtils::WriteUTF16beHex(cmap, bfrange[i + j].fUnicode);
+ cmap->writeText(">\n");
+ }
+ cmap->writeText("endbfrange\n");
+ }
+}
+
+// Generate <bfchar> and <bfrange> table according to PDF spec 1.4 and Adobe
+// Technote 5014.
+// The function is not static so we can test it in unit tests.
+//
+// Current implementation guarantees bfchar and bfrange entries do not overlap.
+//
+// Current implementation does not attempt aggressive optimizations against
+// following case because the specification is not clear.
+//
+// 4 beginbfchar 1 beginbfchar
+// <0003> <0013> <0020> <0014>
+// <0005> <0015> to endbfchar
+// <0007> <0017> 1 beginbfrange
+// <0020> <0014> <0003> <0007> <0013>
+// endbfchar endbfrange
+//
+// Adobe Technote 5014 said: "Code mappings (unlike codespace ranges) may
+// overlap, but succeeding maps supersede preceding maps."
+//
+// In case of searching text in PDF, bfrange will have higher precedence so
+// typing char id 0x0014 in search box will get glyph id 0x0004 first. However,
+// the spec does not mention how will this kind of conflict being resolved.
+//
+// For the worst case (having 65536 continuous unicode and we use every other
+// one of them), the possible savings by aggressive optimization is 416KB
+// pre-compressed and does not provide enough motivation for implementation.
+void SkPDFAppendCmapSections(const SkUnichar* glyphToUnicode,
+ const SkPDFGlyphUse* subset,
+ SkDynamicMemoryWStream* cmap,
+ bool multiByteGlyphs,
+ SkGlyphID firstGlyphID,
+ SkGlyphID lastGlyphID) {
+ int glyphOffset = 0;
+ if (!multiByteGlyphs) {
+ glyphOffset = firstGlyphID - 1;
+ }
+
+ std::vector<BFChar> bfcharEntries;
+ std::vector<BFRange> bfrangeEntries;
+
+ BFRange currentRangeEntry = {0, 0, 0};
+ bool rangeEmpty = true;
+ const int limit = (int)lastGlyphID + 1 - glyphOffset;
+
+ for (int i = firstGlyphID - glyphOffset; i < limit + 1; ++i) {
+ SkGlyphID gid = i + glyphOffset;
+ bool inSubset = i < limit && (subset == nullptr || subset->has(gid));
+ if (!rangeEmpty) {
+ // PDF spec requires bfrange not changing the higher byte,
+ // e.g. <1035> <10FF> <2222> is ok, but
+ // <1035> <1100> <2222> is no good
+ bool inRange =
+ i == currentRangeEntry.fEnd + 1 &&
+ i >> 8 == currentRangeEntry.fStart >> 8 &&
+ i < limit &&
+ glyphToUnicode[gid] ==
+ currentRangeEntry.fUnicode + i - currentRangeEntry.fStart;
+ if (!inSubset || !inRange) {
+ if (currentRangeEntry.fEnd > currentRangeEntry.fStart) {
+ bfrangeEntries.push_back(currentRangeEntry);
+ } else {
+ bfcharEntries.push_back({currentRangeEntry.fStart, currentRangeEntry.fUnicode});
+ }
+ rangeEmpty = true;
+ }
+ }
+ if (inSubset) {
+ currentRangeEntry.fEnd = i;
+ if (rangeEmpty) {
+ currentRangeEntry.fStart = i;
+ currentRangeEntry.fUnicode = glyphToUnicode[gid];
+ rangeEmpty = false;
+ }
+ }
+ }
+
+ // The spec requires all bfchar entries for a font must come before bfrange
+ // entries.
+ append_bfchar_section(bfcharEntries, multiByteGlyphs, cmap);
+ append_bfrange_section(bfrangeEntries, multiByteGlyphs, cmap);
+}
+
+std::unique_ptr<SkStreamAsset> SkPDFMakeToUnicodeCmap(
+ const SkUnichar* glyphToUnicode,
+ const SkPDFGlyphUse* subset,
+ bool multiByteGlyphs,
+ SkGlyphID firstGlyphID,
+ SkGlyphID lastGlyphID) {
+ SkDynamicMemoryWStream cmap;
+ append_tounicode_header(&cmap, multiByteGlyphs);
+ SkPDFAppendCmapSections(glyphToUnicode, subset, &cmap, multiByteGlyphs,
+ firstGlyphID, lastGlyphID);
+ append_cmap_footer(&cmap);
+ return cmap.detachAsStream();
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.h b/gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.h
new file mode 100644
index 0000000000..b77f23de16
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFMakeToUnicodeCmap.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPDFMakeToUnicodeCmap_DEFINED
+#define SkPDFMakeToUnicodeCmap_DEFINED
+
+#include "include/core/SkStream.h"
+#include "src/pdf/SkPDFFont.h"
+
+std::unique_ptr<SkStreamAsset> SkPDFMakeToUnicodeCmap(
+ const SkUnichar* glyphToUnicode,
+ const SkPDFGlyphUse* subset,
+ bool multiByteGlyphs,
+ SkGlyphID firstGlyphID,
+ SkGlyphID lastGlyphID);
+
+// Exposed for unit testing.
+void SkPDFAppendCmapSections(const SkUnichar* glyphToUnicode,
+ const SkPDFGlyphUse* subset,
+ SkDynamicMemoryWStream* cmap,
+ bool multiByteGlyphs,
+ SkGlyphID firstGlyphID,
+ SkGlyphID lastGlyphID);
+
+#endif // SkPDFMakeToUnicodeCmap_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFMetadata.cpp b/gfx/skia/skia/src/pdf/SkPDFMetadata.cpp
new file mode 100644
index 0000000000..943dfbc846
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFMetadata.cpp
@@ -0,0 +1,325 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkPDFMetadata.h"
+
+#include "include/private/base/SkTo.h"
+#include "src/base/SkUTF.h"
+#include "src/base/SkUtils.h"
+#include "src/core/SkMD5.h"
+#include "src/pdf/SkPDFTypes.h"
+
+#include <utility>
+
+static constexpr SkTime::DateTime kZeroTime = {0, 0, 0, 0, 0, 0, 0, 0};
+
+static bool operator!=(const SkTime::DateTime& u, const SkTime::DateTime& v) {
+ return u.fTimeZoneMinutes != v.fTimeZoneMinutes ||
+ u.fYear != v.fYear ||
+ u.fMonth != v.fMonth ||
+ u.fDayOfWeek != v.fDayOfWeek ||
+ u.fDay != v.fDay ||
+ u.fHour != v.fHour ||
+ u.fMinute != v.fMinute ||
+ u.fSecond != v.fSecond;
+}
+
+static SkString pdf_date(const SkTime::DateTime& dt) {
+ int timeZoneMinutes = SkToInt(dt.fTimeZoneMinutes);
+ char timezoneSign = timeZoneMinutes >= 0 ? '+' : '-';
+ int timeZoneHours = SkTAbs(timeZoneMinutes) / 60;
+ timeZoneMinutes = SkTAbs(timeZoneMinutes) % 60;
+ return SkStringPrintf(
+ "D:%04u%02u%02u%02u%02u%02u%c%02d'%02d'",
+ static_cast<unsigned>(dt.fYear), static_cast<unsigned>(dt.fMonth),
+ static_cast<unsigned>(dt.fDay), static_cast<unsigned>(dt.fHour),
+ static_cast<unsigned>(dt.fMinute),
+ static_cast<unsigned>(dt.fSecond), timezoneSign, timeZoneHours,
+ timeZoneMinutes);
+}
+
+namespace {
+static const struct {
+ const char* const key;
+ SkString SkPDF::Metadata::*const valuePtr;
+} gMetadataKeys[] = {
+ {"Title", &SkPDF::Metadata::fTitle},
+ {"Author", &SkPDF::Metadata::fAuthor},
+ {"Subject", &SkPDF::Metadata::fSubject},
+ {"Keywords", &SkPDF::Metadata::fKeywords},
+ {"Creator", &SkPDF::Metadata::fCreator},
+ {"Producer", &SkPDF::Metadata::fProducer},
+};
+} // namespace
+
+std::unique_ptr<SkPDFObject> SkPDFMetadata::MakeDocumentInformationDict(
+ const SkPDF::Metadata& metadata) {
+ auto dict = SkPDFMakeDict();
+ for (const auto keyValuePtr : gMetadataKeys) {
+ const SkString& value = metadata.*(keyValuePtr.valuePtr);
+ if (value.size() > 0) {
+ dict->insertTextString(keyValuePtr.key, value);
+ }
+ }
+ if (metadata.fCreation != kZeroTime) {
+ dict->insertTextString("CreationDate", pdf_date(metadata.fCreation));
+ }
+ if (metadata.fModified != kZeroTime) {
+ dict->insertTextString("ModDate", pdf_date(metadata.fModified));
+ }
+ return std::move(dict);
+}
+
+SkUUID SkPDFMetadata::CreateUUID(const SkPDF::Metadata& metadata) {
+ // The main requirement is for the UUID to be unique; the exact
+ // format of the data that will be hashed is not important.
+ SkMD5 md5;
+ const char uuidNamespace[] = "org.skia.pdf\n";
+ md5.writeText(uuidNamespace);
+ double msec = SkTime::GetMSecs();
+ md5.write(&msec, sizeof(msec));
+ SkTime::DateTime dateTime;
+ SkTime::GetDateTime(&dateTime);
+ md5.write(&dateTime, sizeof(dateTime));
+ md5.write(&metadata.fCreation, sizeof(metadata.fCreation));
+ md5.write(&metadata.fModified, sizeof(metadata.fModified));
+
+ for (const auto keyValuePtr : gMetadataKeys) {
+ md5.writeText(keyValuePtr.key);
+ md5.write("\037", 1);
+ const SkString& value = metadata.*(keyValuePtr.valuePtr);
+ md5.write(value.c_str(), value.size());
+ md5.write("\036", 1);
+ }
+ SkMD5::Digest digest = md5.finish();
+ // See RFC 4122, page 6-7.
+ digest.data[6] = (digest.data[6] & 0x0F) | 0x30;
+ digest.data[8] = (digest.data[6] & 0x3F) | 0x80;
+ static_assert(sizeof(digest) == sizeof(SkUUID), "uuid_size");
+ SkUUID uuid;
+ memcpy((void*)&uuid, &digest, sizeof(digest));
+ return uuid;
+}
+
+std::unique_ptr<SkPDFObject> SkPDFMetadata::MakePdfId(const SkUUID& doc, const SkUUID& instance) {
+ // /ID [ <81b14aafa313db63dbd6f981e49f94f4>
+ // <81b14aafa313db63dbd6f981e49f94f4> ]
+ auto array = SkPDFMakeArray();
+ static_assert(sizeof(SkUUID) == 16, "uuid_size");
+ array->appendByteString(SkString(reinterpret_cast<const char*>(&doc ), sizeof(SkUUID)));
+ array->appendByteString(SkString(reinterpret_cast<const char*>(&instance), sizeof(SkUUID)));
+ return std::move(array);
+}
+
+// Convert a block of memory to hexadecimal. Input and output pointers will be
+// moved to end of the range.
+static void hexify(const uint8_t** inputPtr, char** outputPtr, int count) {
+ SkASSERT(inputPtr && *inputPtr);
+ SkASSERT(outputPtr && *outputPtr);
+ while (count-- > 0) {
+ uint8_t value = *(*inputPtr)++;
+ *(*outputPtr)++ = SkHexadecimalDigits::gLower[value >> 4];
+ *(*outputPtr)++ = SkHexadecimalDigits::gLower[value & 0xF];
+ }
+}
+
+static SkString uuid_to_string(const SkUUID& uuid) {
+ // 8-4-4-4-12
+ char buffer[36]; // [32 + 4]
+ char* ptr = buffer;
+ const uint8_t* data = uuid.fData;
+ hexify(&data, &ptr, 4);
+ *ptr++ = '-';
+ hexify(&data, &ptr, 2);
+ *ptr++ = '-';
+ hexify(&data, &ptr, 2);
+ *ptr++ = '-';
+ hexify(&data, &ptr, 2);
+ *ptr++ = '-';
+ hexify(&data, &ptr, 6);
+ SkASSERT(ptr == buffer + 36);
+ SkASSERT(data == uuid.fData + 16);
+ return SkString(buffer, 36);
+}
+
+namespace {
+class PDFXMLObject final : public SkPDFObject {
+public:
+ PDFXMLObject(SkString xml) : fXML(std::move(xml)) {}
+ void emitObject(SkWStream* stream) const override {
+ SkPDFDict dict("Metadata");
+ dict.insertName("Subtype", "XML");
+ dict.insertInt("Length", fXML.size());
+ dict.emitObject(stream);
+ static const char streamBegin[] = " stream\n";
+ stream->writeText(streamBegin);
+ // Do not compress this. The standard requires that a
+ // program that does not understand PDF can grep for
+ // "<?xpacket" and extract the entire XML.
+ stream->write(fXML.c_str(), fXML.size());
+ static const char streamEnd[] = "\nendstream";
+ stream->writeText(streamEnd);
+ }
+
+private:
+ const SkString fXML;
+};
+} // namespace
+
+static int count_xml_escape_size(const SkString& input) {
+ int extra = 0;
+ for (size_t i = 0; i < input.size(); ++i) {
+ if (input[i] == '&') {
+ extra += 4; // strlen("&amp;") - strlen("&")
+ } else if (input[i] == '<') {
+ extra += 3; // strlen("&lt;") - strlen("<")
+ }
+ }
+ return extra;
+}
+
+SkString escape_xml(const SkString& input,
+ const char* before = nullptr,
+ const char* after = nullptr) {
+ if (input.size() == 0) {
+ return input;
+ }
+ // "&" --> "&amp;" and "<" --> "&lt;"
+ // text is assumed to be in UTF-8
+ // all strings are xml content, not attribute values.
+ size_t beforeLen = before ? strlen(before) : 0;
+ size_t afterLen = after ? strlen(after) : 0;
+ int extra = count_xml_escape_size(input);
+ SkString output(input.size() + extra + beforeLen + afterLen);
+ char* out = output.data();
+ if (before) {
+ strncpy(out, before, beforeLen);
+ out += beforeLen;
+ }
+ static const char kAmp[] = "&amp;";
+ static const char kLt[] = "&lt;";
+ for (size_t i = 0; i < input.size(); ++i) {
+ if (input[i] == '&') {
+ memcpy(out, kAmp, strlen(kAmp));
+ out += strlen(kAmp);
+ } else if (input[i] == '<') {
+ memcpy(out, kLt, strlen(kLt));
+ out += strlen(kLt);
+ } else {
+ *out++ = input[i];
+ }
+ }
+ if (after) {
+ strncpy(out, after, afterLen);
+ out += afterLen;
+ }
+ // Validate that we haven't written outside of our string.
+ SkASSERT(out == &output.data()[output.size()]);
+ *out = '\0';
+ return output;
+}
+
+SkPDFIndirectReference SkPDFMetadata::MakeXMPObject(
+ const SkPDF::Metadata& metadata,
+ const SkUUID& doc,
+ const SkUUID& instance,
+ SkPDFDocument* docPtr) {
+ static const char templateString[] =
+ "<?xpacket begin=\"\" id=\"W5M0MpCehiHzreSzNTczkc9d\"?>\n"
+ "<x:xmpmeta xmlns:x=\"adobe:ns:meta/\"\n"
+ " x:xmptk=\"Adobe XMP Core 5.4-c005 78.147326, "
+ "2012/08/23-13:03:03\">\n"
+ "<rdf:RDF "
+ "xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\">\n"
+ "<rdf:Description rdf:about=\"\"\n"
+ " xmlns:xmp=\"http://ns.adobe.com/xap/1.0/\"\n"
+ " xmlns:dc=\"http://purl.org/dc/elements/1.1/\"\n"
+ " xmlns:xmpMM=\"http://ns.adobe.com/xap/1.0/mm/\"\n"
+ " xmlns:pdf=\"http://ns.adobe.com/pdf/1.3/\"\n"
+ " xmlns:pdfaid=\"http://www.aiim.org/pdfa/ns/id/\">\n"
+ "<pdfaid:part>2</pdfaid:part>\n"
+ "<pdfaid:conformance>B</pdfaid:conformance>\n"
+ "%s" // ModifyDate
+ "%s" // CreateDate
+ "%s" // xmp:CreatorTool
+ "<dc:format>application/pdf</dc:format>\n"
+ "%s" // dc:title
+ "%s" // dc:description
+ "%s" // author
+ "%s" // keywords
+ "<xmpMM:DocumentID>uuid:%s</xmpMM:DocumentID>\n"
+ "<xmpMM:InstanceID>uuid:%s</xmpMM:InstanceID>\n"
+ "%s" // pdf:Producer
+ "%s" // pdf:Keywords
+ "</rdf:Description>\n"
+ "</rdf:RDF>\n"
+ "</x:xmpmeta>\n" // Note: the standard suggests 4k of padding.
+ "<?xpacket end=\"w\"?>\n";
+
+ SkString creationDate;
+ SkString modificationDate;
+ if (metadata.fCreation != kZeroTime) {
+ SkString tmp;
+ metadata.fCreation.toISO8601(&tmp);
+ SkASSERT(0 == count_xml_escape_size(tmp));
+ // YYYY-mm-ddTHH:MM:SS[+|-]ZZ:ZZ; no need to escape
+ creationDate = SkStringPrintf("<xmp:CreateDate>%s</xmp:CreateDate>\n",
+ tmp.c_str());
+ }
+ if (metadata.fModified != kZeroTime) {
+ SkString tmp;
+ metadata.fModified.toISO8601(&tmp);
+ SkASSERT(0 == count_xml_escape_size(tmp));
+ modificationDate = SkStringPrintf(
+ "<xmp:ModifyDate>%s</xmp:ModifyDate>\n", tmp.c_str());
+ }
+ SkString title =
+ escape_xml(metadata.fTitle,
+ "<dc:title><rdf:Alt><rdf:li xml:lang=\"x-default\">",
+ "</rdf:li></rdf:Alt></dc:title>\n");
+ SkString author =
+ escape_xml(metadata.fAuthor, "<dc:creator><rdf:Seq><rdf:li>",
+ "</rdf:li></rdf:Seq></dc:creator>\n");
+ // TODO: in theory, XMP can support multiple authors. Split on a delimiter?
+ SkString subject = escape_xml(
+ metadata.fSubject,
+ "<dc:description><rdf:Alt><rdf:li xml:lang=\"x-default\">",
+ "</rdf:li></rdf:Alt></dc:description>\n");
+ SkString keywords1 =
+ escape_xml(metadata.fKeywords, "<dc:subject><rdf:Bag><rdf:li>",
+ "</rdf:li></rdf:Bag></dc:subject>\n");
+ SkString keywords2 = escape_xml(metadata.fKeywords, "<pdf:Keywords>",
+ "</pdf:Keywords>\n");
+ // TODO: in theory, keywords can be a list too.
+
+ SkString producer = escape_xml(metadata.fProducer, "<pdf:Producer>", "</pdf:Producer>\n");
+
+ SkString creator = escape_xml(metadata.fCreator, "<xmp:CreatorTool>",
+ "</xmp:CreatorTool>\n");
+ SkString documentID = uuid_to_string(doc); // no need to escape
+ SkASSERT(0 == count_xml_escape_size(documentID));
+ SkString instanceID = uuid_to_string(instance);
+ SkASSERT(0 == count_xml_escape_size(instanceID));
+
+
+ auto value = SkStringPrintf(
+ templateString, modificationDate.c_str(), creationDate.c_str(),
+ creator.c_str(), title.c_str(), subject.c_str(), author.c_str(),
+ keywords1.c_str(), documentID.c_str(), instanceID.c_str(),
+ producer.c_str(), keywords2.c_str());
+
+ std::unique_ptr<SkPDFDict> dict = SkPDFMakeDict("Metadata");
+ dict->insertName("Subtype", "XML");
+ return SkPDFStreamOut(std::move(dict),
+ SkMemoryStream::MakeCopy(value.c_str(), value.size()),
+ docPtr, SkPDFSteamCompressionEnabled::No);
+}
+
+#undef SKPDF_CUSTOM_PRODUCER_KEY
+#undef SKPDF_PRODUCER
+#undef SKPDF_STRING
+#undef SKPDF_STRING_IMPL
diff --git a/gfx/skia/skia/src/pdf/SkPDFMetadata.h b/gfx/skia/skia/src/pdf/SkPDFMetadata.h
new file mode 100644
index 0000000000..d9df0aeff2
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFMetadata.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPDFMetadata_DEFINED
+#define SkPDFMetadata_DEFINED
+
+#include "include/docs/SkPDFDocument.h"
+#include "src/pdf/SkPDFTypes.h"
+#include "src/pdf/SkUUID.h"
+
+class SkPDFObject;
+
+namespace SkPDFMetadata {
+std::unique_ptr<SkPDFObject> MakeDocumentInformationDict(const SkPDF::Metadata&);
+
+SkUUID CreateUUID(const SkPDF::Metadata&);
+
+std::unique_ptr<SkPDFObject> MakePdfId(const SkUUID& doc, const SkUUID& instance);
+
+SkPDFIndirectReference MakeXMPObject(const SkPDF::Metadata& metadata,
+ const SkUUID& doc,
+ const SkUUID& instance,
+ SkPDFDocument*);
+} // namespace SkPDFMetadata
+#endif // SkPDFMetadata_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFResourceDict.cpp b/gfx/skia/skia/src/pdf/SkPDFResourceDict.cpp
new file mode 100644
index 0000000000..a4eeed3029
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFResourceDict.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStream.h"
+#include "src/pdf/SkPDFResourceDict.h"
+#include "src/pdf/SkPDFTypes.h"
+
+// Verify that the values of enum ResourceType correspond to the expected values
+// as defined in the arrays below.
+// If these are failing, you may need to update the kResourceTypePrefixes
+// and kResourceTypeNames arrays below.
+static_assert(0 == (int)SkPDFResourceType::kExtGState, "resource_type_mismatch");
+static_assert(1 == (int)SkPDFResourceType::kPattern, "resource_type_mismatch");
+static_assert(2 == (int)SkPDFResourceType::kXObject, "resource_type_mismatch");
+static_assert(3 == (int)SkPDFResourceType::kFont, "resource_type_mismatch");
+
+// One extra character for the Prefix.
+constexpr size_t kMaxResourceNameLength = 1 + kSkStrAppendS32_MaxSize;
+
+// returns pointer just past end of what's written into `dst`.
+static char* get_resource_name(char dst[kMaxResourceNameLength], SkPDFResourceType type, int key) {
+ static const char kResourceTypePrefixes[] = {
+ 'G', // kExtGState
+ 'P', // kPattern
+ 'X', // kXObject
+ 'F' // kFont
+ };
+ SkASSERT((unsigned)type < std::size(kResourceTypePrefixes));
+ dst[0] = kResourceTypePrefixes[(unsigned)type];
+ return SkStrAppendS32(dst + 1, key);
+}
+
+void SkPDFWriteResourceName(SkWStream* dst, SkPDFResourceType type, int key) {
+ // One extra character for the leading '/'.
+ char buffer[1 + kMaxResourceNameLength];
+ buffer[0] = '/';
+ char* end = get_resource_name(buffer + 1, type, key);
+ dst->write(buffer, (size_t)(end - buffer));
+}
+
+static const char* resource_name(SkPDFResourceType type) {
+ static const char* kResourceTypeNames[] = {
+ "ExtGState",
+ "Pattern",
+ "XObject",
+ "Font"
+ };
+ SkASSERT((unsigned)type < std::size(kResourceTypeNames));
+ return kResourceTypeNames[(unsigned)type];
+}
+
+static SkString resource(SkPDFResourceType type, int index) {
+ char buffer[kMaxResourceNameLength];
+ char* end = get_resource_name(buffer, type, index);
+ return SkString(buffer, (size_t)(end - buffer));
+}
+
+static void add_subdict(const std::vector<SkPDFIndirectReference>& resourceList,
+ SkPDFResourceType type,
+ SkPDFDict* dst) {
+ if (!resourceList.empty()) {
+ auto resources = SkPDFMakeDict();
+ for (SkPDFIndirectReference ref : resourceList) {
+ resources->insertRef(resource(type, ref.fValue), ref);
+ }
+ dst->insertObject(resource_name(type), std::move(resources));
+ }
+}
+
+static std::unique_ptr<SkPDFArray> make_proc_set() {
+ auto procSets = SkPDFMakeArray();
+ static const char kProcs[][7] = { "PDF", "Text", "ImageB", "ImageC", "ImageI"};
+ procSets->reserve(std::size(kProcs));
+ for (const char* proc : kProcs) {
+ procSets->appendName(proc);
+ }
+ return procSets;
+}
+
+std::unique_ptr<SkPDFDict> SkPDFMakeResourceDict(
+ const std::vector<SkPDFIndirectReference>& graphicStateResources,
+ const std::vector<SkPDFIndirectReference>& shaderResources,
+ const std::vector<SkPDFIndirectReference>& xObjectResources,
+ const std::vector<SkPDFIndirectReference>& fontResources) {
+ auto dict = SkPDFMakeDict();
+ dict->insertObject("ProcSet", make_proc_set());
+ add_subdict(graphicStateResources, SkPDFResourceType::kExtGState, dict.get());
+ add_subdict(shaderResources, SkPDFResourceType::kPattern, dict.get());
+ add_subdict(xObjectResources, SkPDFResourceType::kXObject, dict.get());
+ add_subdict(fontResources, SkPDFResourceType::kFont, dict.get());
+ return dict;
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFResourceDict.h b/gfx/skia/skia/src/pdf/SkPDFResourceDict.h
new file mode 100644
index 0000000000..4cd9dfa1c3
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFResourceDict.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPDFResourceDict_DEFINED
+#define SkPDFResourceDict_DEFINED
+
+#include "src/pdf/SkPDFFont.h"
+
+#include <vector>
+
+class SkPDFDict;
+class SkPDFObject;
+class SkWStream;
+
+enum class SkPDFResourceType {
+ kExtGState = 0,
+ kPattern = 1,
+ kXObject = 2,
+ kFont = 3,
+ // These additional types are defined by the spec, but not
+ // currently used by Skia: ColorSpace, Shading, Properties
+};
+
+
+/** Create a PDF resource dictionary.
+ * The full set of ProcSet entries is automatically created for backwards
+ * compatibility, as recommended by the PDF spec.
+ *
+ * Any arguments can be nullptr.
+ */
+std::unique_ptr<SkPDFDict> SkPDFMakeResourceDict(
+ const std::vector<SkPDFIndirectReference>& graphicStateResources,
+ const std::vector<SkPDFIndirectReference>& shaderResources,
+ const std::vector<SkPDFIndirectReference>& xObjectResources,
+ const std::vector<SkPDFIndirectReference>& fontResources);
+
+/**
+ * Writes the name for the resource that will be generated by the resource
+ * dict.
+ *
+ * @param type The type of resource being entered
+ * @param key The resource key, should be unique within its type.
+ */
+void SkPDFWriteResourceName(SkWStream*, SkPDFResourceType type, int key);
+
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFShader.cpp b/gfx/skia/skia/src/pdf/SkPDFShader.cpp
new file mode 100644
index 0000000000..cbaa32e524
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFShader.cpp
@@ -0,0 +1,367 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkPDFShader.h"
+
+#include "include/core/SkData.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkSurface.h"
+#include "include/docs/SkPDFDocument.h"
+#include "include/private/base/SkMath.h"
+#include "include/private/base/SkTPin.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/pdf/SkPDFDevice.h"
+#include "src/pdf/SkPDFDocumentPriv.h"
+#include "src/pdf/SkPDFFormXObject.h"
+#include "src/pdf/SkPDFGradientShader.h"
+#include "src/pdf/SkPDFGraphicState.h"
+#include "src/pdf/SkPDFResourceDict.h"
+#include "src/pdf/SkPDFUtils.h"
+
+static void draw(SkCanvas* canvas, const SkImage* image, SkColor4f paintColor) {
+ SkPaint paint(paintColor);
+ canvas->drawImage(image, 0, 0, SkSamplingOptions(), &paint);
+}
+
+static SkBitmap to_bitmap(const SkImage* image) {
+ SkBitmap bitmap;
+ if (!SkPDFUtils::ToBitmap(image, &bitmap)) {
+ bitmap.allocN32Pixels(image->width(), image->height());
+ bitmap.eraseColor(0x00000000);
+ }
+ return bitmap;
+}
+
+static void draw_matrix(SkCanvas* canvas, const SkImage* image,
+ const SkMatrix& matrix, SkColor4f paintColor) {
+ SkAutoCanvasRestore acr(canvas, true);
+ canvas->concat(matrix);
+ draw(canvas, image, paintColor);
+}
+
+static void draw_bitmap_matrix(SkCanvas* canvas, const SkBitmap& bm,
+ const SkMatrix& matrix, SkColor4f paintColor) {
+ SkAutoCanvasRestore acr(canvas, true);
+ canvas->concat(matrix);
+ SkPaint paint(paintColor);
+ canvas->drawImage(bm.asImage(), 0, 0, SkSamplingOptions(), &paint);
+}
+
+static void fill_color_from_bitmap(SkCanvas* canvas,
+ float left, float top, float right, float bottom,
+ const SkBitmap& bitmap, int x, int y, float alpha) {
+ SkRect rect{left, top, right, bottom};
+ if (!rect.isEmpty()) {
+ SkColor4f color = SkColor4f::FromColor(bitmap.getColor(x, y));
+ SkPaint paint(SkColor4f{color.fR, color.fG, color.fB, alpha * color.fA});
+ canvas->drawRect(rect, paint);
+ }
+}
+
+static SkMatrix scale_translate(SkScalar sx, SkScalar sy, SkScalar tx, SkScalar ty) {
+ SkMatrix m;
+ m.setScaleTranslate(sx, sy, tx, ty);
+ return m;
+}
+
+static bool is_tiled(SkTileMode m) { return SkTileMode::kMirror == m || SkTileMode::kRepeat == m; }
+
+static SkPDFIndirectReference make_image_shader(SkPDFDocument* doc,
+ SkMatrix finalMatrix,
+ SkTileMode tileModesX,
+ SkTileMode tileModesY,
+ SkRect bBox,
+ const SkImage* image,
+ SkColor4f paintColor) {
+ // The image shader pattern cell will be drawn into a separate device
+ // in pattern cell space (no scaling on the bitmap, though there may be
+ // translations so that all content is in the device, coordinates > 0).
+
+ // Map clip bounds to shader space to ensure the device is large enough
+ // to handle fake clamping.
+
+ SkRect deviceBounds = bBox;
+ if (!SkPDFUtils::InverseTransformBBox(finalMatrix, &deviceBounds)) {
+ return SkPDFIndirectReference();
+ }
+
+ SkRect bitmapBounds = SkRect::MakeSize(SkSize::Make(image->dimensions()));
+
+ // For tiling modes, the bounds should be extended to include the bitmap,
+ // otherwise the bitmap gets clipped out and the shader is empty and awful.
+ // For clamp modes, we're only interested in the clip region, whether
+ // or not the main bitmap is in it.
+ if (is_tiled(tileModesX) || is_tiled(tileModesY)) {
+ deviceBounds.join(bitmapBounds);
+ }
+
+ SkISize patternDeviceSize = {SkScalarCeilToInt(deviceBounds.width()),
+ SkScalarCeilToInt(deviceBounds.height())};
+ auto patternDevice = sk_make_sp<SkPDFDevice>(patternDeviceSize, doc);
+ SkCanvas canvas(patternDevice);
+
+ SkRect patternBBox = SkRect::MakeSize(SkSize::Make(image->dimensions()));
+ SkScalar width = patternBBox.width();
+ SkScalar height = patternBBox.height();
+
+ // Translate the canvas so that the bitmap origin is at (0, 0).
+ canvas.translate(-deviceBounds.left(), -deviceBounds.top());
+ patternBBox.offset(-deviceBounds.left(), -deviceBounds.top());
+ // Undo the translation in the final matrix
+ finalMatrix.preTranslate(deviceBounds.left(), deviceBounds.top());
+
+ // If the bitmap is out of bounds (i.e. clamp mode where we only see the
+ // stretched sides), canvas will clip this out and the extraneous data
+ // won't be saved to the PDF.
+ draw(&canvas, image, paintColor);
+
+ // Tiling is implied. First we handle mirroring.
+ if (tileModesX == SkTileMode::kMirror) {
+ draw_matrix(&canvas, image, scale_translate(-1, 1, 2 * width, 0), paintColor);
+ patternBBox.fRight += width;
+ }
+ if (tileModesY == SkTileMode::kMirror) {
+ draw_matrix(&canvas, image, scale_translate(1, -1, 0, 2 * height), paintColor);
+ patternBBox.fBottom += height;
+ }
+ if (tileModesX == SkTileMode::kMirror && tileModesY == SkTileMode::kMirror) {
+ draw_matrix(&canvas, image, scale_translate(-1, -1, 2 * width, 2 * height), paintColor);
+ }
+
+ // Then handle Clamping, which requires expanding the pattern canvas to
+ // cover the entire surfaceBBox.
+
+ SkBitmap bitmap;
+ if (tileModesX == SkTileMode::kClamp || tileModesY == SkTileMode::kClamp) {
+ // For now, the easiest way to access the colors in the corners and sides is
+ // to just make a bitmap from the image.
+ bitmap = to_bitmap(image);
+ }
+
+ // If both x and y are in clamp mode, we start by filling in the corners.
+ // (Which are just a rectangles of the corner colors.)
+ if (tileModesX == SkTileMode::kClamp && tileModesY == SkTileMode::kClamp) {
+ SkASSERT(!bitmap.drawsNothing());
+
+ fill_color_from_bitmap(&canvas, deviceBounds.left(), deviceBounds.top(), 0, 0,
+ bitmap, 0, 0, paintColor.fA);
+
+ fill_color_from_bitmap(&canvas, width, deviceBounds.top(), deviceBounds.right(), 0,
+ bitmap, bitmap.width() - 1, 0, paintColor.fA);
+
+ fill_color_from_bitmap(&canvas, width, height, deviceBounds.right(), deviceBounds.bottom(),
+ bitmap, bitmap.width() - 1, bitmap.height() - 1, paintColor.fA);
+
+ fill_color_from_bitmap(&canvas, deviceBounds.left(), height, 0, deviceBounds.bottom(),
+ bitmap, 0, bitmap.height() - 1, paintColor.fA);
+ }
+
+ // Then expand the left, right, top, then bottom.
+ if (tileModesX == SkTileMode::kClamp) {
+ SkASSERT(!bitmap.drawsNothing());
+ SkIRect subset = SkIRect::MakeXYWH(0, 0, 1, bitmap.height());
+ if (deviceBounds.left() < 0) {
+ SkBitmap left;
+ SkAssertResult(bitmap.extractSubset(&left, subset));
+
+ SkMatrix leftMatrix = scale_translate(-deviceBounds.left(), 1, deviceBounds.left(), 0);
+ draw_bitmap_matrix(&canvas, left, leftMatrix, paintColor);
+
+ if (tileModesY == SkTileMode::kMirror) {
+ leftMatrix.postScale(SK_Scalar1, -SK_Scalar1);
+ leftMatrix.postTranslate(0, 2 * height);
+ draw_bitmap_matrix(&canvas, left, leftMatrix, paintColor);
+ }
+ patternBBox.fLeft = 0;
+ }
+
+ if (deviceBounds.right() > width) {
+ SkBitmap right;
+ subset.offset(bitmap.width() - 1, 0);
+ SkAssertResult(bitmap.extractSubset(&right, subset));
+
+ SkMatrix rightMatrix = scale_translate(deviceBounds.right() - width, 1, width, 0);
+ draw_bitmap_matrix(&canvas, right, rightMatrix, paintColor);
+
+ if (tileModesY == SkTileMode::kMirror) {
+ rightMatrix.postScale(SK_Scalar1, -SK_Scalar1);
+ rightMatrix.postTranslate(0, 2 * height);
+ draw_bitmap_matrix(&canvas, right, rightMatrix, paintColor);
+ }
+ patternBBox.fRight = deviceBounds.width();
+ }
+ }
+ if (tileModesX == SkTileMode::kDecal) {
+ if (deviceBounds.left() < 0) {
+ patternBBox.fLeft = 0;
+ }
+ if (deviceBounds.right() > width) {
+ patternBBox.fRight = deviceBounds.width();
+ }
+ }
+
+ if (tileModesY == SkTileMode::kClamp) {
+ SkASSERT(!bitmap.drawsNothing());
+ SkIRect subset = SkIRect::MakeXYWH(0, 0, bitmap.width(), 1);
+ if (deviceBounds.top() < 0) {
+ SkBitmap top;
+ SkAssertResult(bitmap.extractSubset(&top, subset));
+
+ SkMatrix topMatrix = scale_translate(1, -deviceBounds.top(), 0, deviceBounds.top());
+ draw_bitmap_matrix(&canvas, top, topMatrix, paintColor);
+
+ if (tileModesX == SkTileMode::kMirror) {
+ topMatrix.postScale(-1, 1);
+ topMatrix.postTranslate(2 * width, 0);
+ draw_bitmap_matrix(&canvas, top, topMatrix, paintColor);
+ }
+ patternBBox.fTop = 0;
+ }
+
+ if (deviceBounds.bottom() > height) {
+ SkBitmap bottom;
+ subset.offset(0, bitmap.height() - 1);
+ SkAssertResult(bitmap.extractSubset(&bottom, subset));
+
+ SkMatrix bottomMatrix = scale_translate(1, deviceBounds.bottom() - height, 0, height);
+ draw_bitmap_matrix(&canvas, bottom, bottomMatrix, paintColor);
+
+ if (tileModesX == SkTileMode::kMirror) {
+ bottomMatrix.postScale(-1, 1);
+ bottomMatrix.postTranslate(2 * width, 0);
+ draw_bitmap_matrix(&canvas, bottom, bottomMatrix, paintColor);
+ }
+ patternBBox.fBottom = deviceBounds.height();
+ }
+ }
+ if (tileModesY == SkTileMode::kDecal) {
+ if (deviceBounds.top() < 0) {
+ patternBBox.fTop = 0;
+ }
+ if (deviceBounds.bottom() > height) {
+ patternBBox.fBottom = deviceBounds.height();
+ }
+ }
+
+ auto imageShader = patternDevice->content();
+ std::unique_ptr<SkPDFDict> resourceDict = patternDevice->makeResourceDict();
+ std::unique_ptr<SkPDFDict> dict = SkPDFMakeDict();
+ SkPDFUtils::PopulateTilingPatternDict(dict.get(), patternBBox,
+ std::move(resourceDict), finalMatrix);
+ return SkPDFStreamOut(std::move(dict), std::move(imageShader), doc);
+}
+
+// Generic fallback for unsupported shaders:
+// * allocate a surfaceBBox-sized bitmap
+// * shade the whole area
+// * use the result as a bitmap shader
+static SkPDFIndirectReference make_fallback_shader(SkPDFDocument* doc,
+ SkShader* shader,
+ const SkMatrix& canvasTransform,
+ const SkIRect& surfaceBBox,
+ SkColor4f paintColor) {
+ // surfaceBBox is in device space. While that's exactly what we
+ // want for sizing our bitmap, we need to map it into
+ // shader space for adjustments (to match
+ // MakeImageShader's behavior).
+ SkRect shaderRect = SkRect::Make(surfaceBBox);
+ if (!SkPDFUtils::InverseTransformBBox(canvasTransform, &shaderRect)) {
+ return SkPDFIndirectReference();
+ }
+ // Clamp the bitmap size to about 1M pixels
+ static const int kMaxBitmapArea = 1024 * 1024;
+ SkScalar bitmapArea = (float)surfaceBBox.width() * (float)surfaceBBox.height();
+ SkScalar rasterScale = 1.0f;
+ if (bitmapArea > (float)kMaxBitmapArea) {
+ rasterScale *= SkScalarSqrt((float)kMaxBitmapArea / bitmapArea);
+ }
+
+ SkISize size = {
+ SkTPin(SkScalarCeilToInt(rasterScale * surfaceBBox.width()), 1, kMaxBitmapArea),
+ SkTPin(SkScalarCeilToInt(rasterScale * surfaceBBox.height()), 1, kMaxBitmapArea)};
+ SkSize scale = {SkIntToScalar(size.width()) / shaderRect.width(),
+ SkIntToScalar(size.height()) / shaderRect.height()};
+
+ auto surface = SkSurface::MakeRasterN32Premul(size.width(), size.height());
+ SkASSERT(surface);
+ SkCanvas* canvas = surface->getCanvas();
+ canvas->clear(SK_ColorTRANSPARENT);
+
+ SkPaint p(paintColor);
+ p.setShader(sk_ref_sp(shader));
+
+ canvas->scale(scale.width(), scale.height());
+ canvas->translate(-shaderRect.x(), -shaderRect.y());
+ canvas->drawPaint(p);
+
+ auto shaderTransform = SkMatrix::Translate(shaderRect.x(), shaderRect.y());
+ shaderTransform.preScale(1 / scale.width(), 1 / scale.height());
+
+ sk_sp<SkImage> image = surface->makeImageSnapshot();
+ SkASSERT(image);
+ return make_image_shader(doc,
+ SkMatrix::Concat(canvasTransform, shaderTransform),
+ SkTileMode::kClamp, SkTileMode::kClamp,
+ SkRect::Make(surfaceBBox),
+ image.get(),
+ paintColor);
+}
+
+static SkColor4f adjust_color(SkShader* shader, SkColor4f paintColor) {
+ if (SkImage* img = shader->isAImage(nullptr, (SkTileMode*)nullptr)) {
+ if (img->isAlphaOnly()) {
+ return paintColor;
+ }
+ }
+ return SkColor4f{0, 0, 0, paintColor.fA}; // only preserve the alpha.
+}
+
+SkPDFIndirectReference SkPDFMakeShader(SkPDFDocument* doc,
+ SkShader* shader,
+ const SkMatrix& canvasTransform,
+ const SkIRect& surfaceBBox,
+ SkColor4f paintColor) {
+ SkASSERT(shader);
+ SkASSERT(doc);
+ if (as_SB(shader)->asGradient() != SkShaderBase::GradientType::kNone) {
+ return SkPDFGradientShader::Make(doc, shader, canvasTransform, surfaceBBox);
+ }
+ if (surfaceBBox.isEmpty()) {
+ return SkPDFIndirectReference();
+ }
+ SkBitmap image;
+
+ paintColor = adjust_color(shader, paintColor);
+ SkMatrix shaderTransform;
+ SkTileMode imageTileModes[2];
+ if (SkImage* skimg = shader->isAImage(&shaderTransform, imageTileModes)) {
+ SkMatrix finalMatrix = SkMatrix::Concat(canvasTransform, shaderTransform);
+ SkPDFImageShaderKey key = {
+ finalMatrix,
+ surfaceBBox,
+ SkBitmapKeyFromImage(skimg),
+ {imageTileModes[0], imageTileModes[1]},
+ paintColor};
+ SkPDFIndirectReference* shaderPtr = doc->fImageShaderMap.find(key);
+ if (shaderPtr) {
+ return *shaderPtr;
+ }
+ SkPDFIndirectReference pdfShader =
+ make_image_shader(doc,
+ finalMatrix,
+ imageTileModes[0],
+ imageTileModes[1],
+ SkRect::Make(surfaceBBox),
+ skimg,
+ paintColor);
+ doc->fImageShaderMap.set(std::move(key), pdfShader);
+ return pdfShader;
+ }
+ // Don't bother to de-dup fallback shader.
+ return make_fallback_shader(doc, shader, canvasTransform, surfaceBBox, paintColor);
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFShader.h b/gfx/skia/skia/src/pdf/SkPDFShader.h
new file mode 100644
index 0000000000..a771734719
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFShader.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkPDFShader_DEFINED
+#define SkPDFShader_DEFINED
+
+#include "include/core/SkShader.h"
+#include "include/private/base/SkMacros.h"
+#include "src/pdf/SkBitmapKey.h"
+#include "src/pdf/SkPDFTypes.h"
+
+
+class SkPDFDocument;
+class SkMatrix;
+struct SkIRect;
+
+/** Make a PDF shader for the passed SkShader. If the SkShader is invalid in
+ * some way, returns nullptr.
+ *
+ * In PDF parlance, this is a pattern, used in place of a color when the
+ * pattern color space is selected.
+ *
+ * May cache the shader in the document for later re-use. If this function is
+ * called again with an equivalent shader, a new reference to the cached pdf
+ * shader may be returned.
+ *
+ * @param doc The parent document, must be non-null.
+ * @param shader The SkShader to emulate.
+ * @param ctm The current transform matrix. (PDF shaders are absolutely
+ * positioned, relative to where the page is drawn.)
+ * @param surfaceBBox The bounding box of the drawing surface (with matrix
+ * already applied).
+ * @param paintColor Color+Alpha of the paint. Color is usually ignored,
+ * unless it is a alpha shader.
+ */
+SkPDFIndirectReference SkPDFMakeShader(SkPDFDocument* doc,
+ SkShader* shader,
+ const SkMatrix& ctm,
+ const SkIRect& surfaceBBox,
+ SkColor4f paintColor);
+
+SK_BEGIN_REQUIRE_DENSE
+struct SkPDFImageShaderKey {
+ SkMatrix fTransform;
+ SkIRect fBBox;
+ SkBitmapKey fBitmapKey;
+ SkTileMode fImageTileModes[2];
+ SkColor4f fPaintColor;
+};
+SK_END_REQUIRE_DENSE
+
+inline bool operator==(const SkPDFImageShaderKey& a, const SkPDFImageShaderKey& b) {
+ SkASSERT(a.fBitmapKey.fID != 0);
+ SkASSERT(b.fBitmapKey.fID != 0);
+ return a.fTransform == b.fTransform
+ && a.fBBox == b.fBBox
+ && a.fBitmapKey == b.fBitmapKey
+ && a.fImageTileModes[0] == b.fImageTileModes[0]
+ && a.fImageTileModes[1] == b.fImageTileModes[1]
+ && a.fPaintColor == b.fPaintColor;
+}
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFSubsetFont.cpp b/gfx/skia/skia/src/pdf/SkPDFSubsetFont.cpp
new file mode 100644
index 0000000000..0a729bef50
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFSubsetFont.cpp
@@ -0,0 +1,208 @@
+// Copyright 2018 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+
+#include "src/pdf/SkPDFSubsetFont.h"
+
+#if defined(SK_PDF_USE_HARFBUZZ_SUBSET)
+
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "src/utils/SkCallableTraits.h"
+
+#include "hb.h"
+#include "hb-subset.h"
+
+using HBBlob = std::unique_ptr<hb_blob_t, SkFunctionObject<hb_blob_destroy>>;
+using HBFace = std::unique_ptr<hb_face_t, SkFunctionObject<hb_face_destroy>>;
+using HBSubsetInput = std::unique_ptr<hb_subset_input_t, SkFunctionObject<hb_subset_input_destroy>>;
+using HBSet = std::unique_ptr<hb_set_t, SkFunctionObject<hb_set_destroy>>;
+
+static HBBlob to_blob(sk_sp<SkData> data) {
+ using blob_size_t = SkCallableTraits<decltype(hb_blob_create)>::argument<1>::type;
+ if (!SkTFitsIn<blob_size_t>(data->size())) {
+ return nullptr;
+ }
+ const char* blobData = static_cast<const char*>(data->data());
+ blob_size_t blobSize = SkTo<blob_size_t>(data->size());
+ return HBBlob(hb_blob_create(blobData, blobSize,
+ HB_MEMORY_MODE_READONLY,
+ data.release(), [](void* p){ ((SkData*)p)->unref(); }));
+}
+
+static sk_sp<SkData> to_data(HBBlob blob) {
+ if (!blob) {
+ return nullptr;
+ }
+ unsigned int length;
+ const char* data = hb_blob_get_data(blob.get(), &length);
+ if (!data || !length) {
+ return nullptr;
+ }
+ return SkData::MakeWithProc(data, SkToSizeT(length),
+ [](const void*, void* ctx) { hb_blob_destroy((hb_blob_t*)ctx); },
+ blob.release());
+}
+
+template<typename...> using void_t = void;
+template<typename T, typename = void>
+struct SkPDFHarfBuzzSubset {
+ // This is the HarfBuzz 3.0 interface.
+ // hb_subset_flags_t does not exist in 2.0. It isn't dependent on T, so inline the value of
+ // HB_SUBSET_FLAGS_RETAIN_GIDS until 2.0 is no longer supported.
+ static HBFace Make(T input, hb_face_t* face, bool retainZeroGlyph) {
+ // TODO: When possible, check if a font is 'tricky' with FT_IS_TRICKY.
+ // If it isn't known if a font is 'tricky', retain the hints.
+ unsigned int flags = 0x2u/*HB_SUBSET_FLAGS_RETAIN_GIDS*/;
+ if (retainZeroGlyph) {
+ flags |= 0x40u/*HB_SUBSET_FLAGS_NOTDEF_OUTLINE*/;
+ }
+ hb_subset_input_set_flags(input, flags);
+ return HBFace(hb_subset_or_fail(face, input));
+ }
+};
+template<typename T>
+struct SkPDFHarfBuzzSubset<T, void_t<
+ decltype(hb_subset_input_set_retain_gids(std::declval<T>(), std::declval<bool>())),
+ decltype(hb_subset_input_set_drop_hints(std::declval<T>(), std::declval<bool>())),
+ decltype(hb_subset(std::declval<hb_face_t*>(), std::declval<T>()))
+ >>
+{
+ // This is the HarfBuzz 2.0 (non-public) interface, used if it exists.
+ // This code should be removed as soon as all users are migrated to the newer API.
+ static HBFace Make(T input, hb_face_t* face, bool) {
+ hb_subset_input_set_retain_gids(input, true);
+ // TODO: When possible, check if a font is 'tricky' with FT_IS_TRICKY.
+ // If it isn't known if a font is 'tricky', retain the hints.
+ hb_subset_input_set_drop_hints(input, false);
+ return HBFace(hb_subset(face, input));
+ }
+};
+
+static sk_sp<SkData> subset_harfbuzz(sk_sp<SkData> fontData,
+ const SkPDFGlyphUse& glyphUsage,
+ int ttcIndex) {
+ if (!fontData) {
+ return nullptr;
+ }
+ HBFace face(hb_face_create(to_blob(std::move(fontData)).get(), ttcIndex));
+ SkASSERT(face);
+
+ HBSubsetInput input(hb_subset_input_create_or_fail());
+ SkASSERT(input);
+ if (!face || !input) {
+ return nullptr;
+ }
+ hb_set_t* glyphs = hb_subset_input_glyph_set(input.get());
+ glyphUsage.getSetValues([&glyphs](unsigned gid) { hb_set_add(glyphs, gid);});
+
+ HBFace subset = SkPDFHarfBuzzSubset<hb_subset_input_t*>::Make(input.get(), face.get(),
+ glyphUsage.has(0));
+ if (!subset) {
+ return nullptr;
+ }
+ HBBlob result(hb_face_reference_blob(subset.get()));
+ return to_data(std::move(result));
+}
+
+#endif // defined(SK_PDF_USE_HARFBUZZ_SUBSET)
+
+////////////////////////////////////////////////////////////////////////////////
+
+#if defined(SK_PDF_USE_SFNTLY)
+
+#include "sample/chromium/font_subsetter.h"
+#include <vector>
+
+#if defined(SK_USING_THIRD_PARTY_ICU)
+#include "third_party/icu/SkLoadICU.h"
+#endif
+
+static sk_sp<SkData> subset_sfntly(sk_sp<SkData> fontData,
+ const SkPDFGlyphUse& glyphUsage,
+ const char* fontName,
+ int ttcIndex) {
+#if defined(SK_USING_THIRD_PARTY_ICU)
+ if (!SkLoadICU()) {
+ return nullptr;
+ }
+#endif
+ // Generate glyph id array in format needed by sfntly.
+ // TODO(halcanary): sfntly should take a more compact format.
+ std::vector<unsigned> subset;
+ glyphUsage.getSetValues([&subset](unsigned v) { subset.push_back(v); });
+
+ unsigned char* subsetFont{nullptr};
+#if defined(SK_BUILD_FOR_GOOGLE3)
+ // TODO(halcanary): update SK_BUILD_FOR_GOOGLE3 to newest version of Sfntly.
+ (void)ttcIndex;
+ int subsetFontSize = SfntlyWrapper::SubsetFont(fontName,
+ fontData->bytes(),
+ fontData->size(),
+ subset.data(),
+ subset.size(),
+ &subsetFont);
+#else // defined(SK_BUILD_FOR_GOOGLE3)
+ (void)fontName;
+ int subsetFontSize = SfntlyWrapper::SubsetFont(ttcIndex,
+ fontData->bytes(),
+ fontData->size(),
+ subset.data(),
+ subset.size(),
+ &subsetFont);
+#endif // defined(SK_BUILD_FOR_GOOGLE3)
+ SkASSERT(subsetFontSize > 0 || subsetFont == nullptr);
+ if (subsetFontSize < 1 || subsetFont == nullptr) {
+ return nullptr;
+ }
+ return SkData::MakeWithProc(subsetFont, subsetFontSize,
+ [](const void* p, void*) { delete[] (unsigned char*)p; },
+ nullptr);
+}
+
+#endif // defined(SK_PDF_USE_SFNTLY)
+
+////////////////////////////////////////////////////////////////////////////////
+
+#if defined(SK_PDF_USE_SFNTLY) && defined(SK_PDF_USE_HARFBUZZ_SUBSET)
+
+sk_sp<SkData> SkPDFSubsetFont(sk_sp<SkData> fontData,
+ const SkPDFGlyphUse& glyphUsage,
+ SkPDF::Metadata::Subsetter subsetter,
+ const char* fontName,
+ int ttcIndex) {
+ switch (subsetter) {
+ case SkPDF::Metadata::kHarfbuzz_Subsetter:
+ return subset_harfbuzz(std::move(fontData), glyphUsage, ttcIndex);
+ case SkPDF::Metadata::kSfntly_Subsetter:
+ return subset_sfntly(std::move(fontData), glyphUsage, fontName, ttcIndex);
+ }
+ return nullptr;
+}
+
+#elif defined(SK_PDF_USE_SFNTLY)
+
+sk_sp<SkData> SkPDFSubsetFont(sk_sp<SkData> fontData,
+ const SkPDFGlyphUse& glyphUsage,
+ SkPDF::Metadata::Subsetter,
+ const char* fontName,
+ int ttcIndex) {
+ return subset_sfntly(std::move(fontData), glyphUsage, fontName, ttcIndex);
+}
+
+#elif defined(SK_PDF_USE_HARFBUZZ_SUBSET)
+
+sk_sp<SkData> SkPDFSubsetFont(sk_sp<SkData> fontData,
+ const SkPDFGlyphUse& glyphUsage,
+ SkPDF::Metadata::Subsetter,
+ const char*,
+ int ttcIndex) {
+ return subset_harfbuzz(std::move(fontData), glyphUsage, ttcIndex);
+}
+
+#else
+
+sk_sp<SkData> SkPDFSubsetFont(sk_sp<SkData>, const SkPDFGlyphUse&, SkPDF::Metadata::Subsetter,
+ const char*, int) {
+ return nullptr;
+}
+#endif // defined(SK_PDF_USE_SFNTLY)
diff --git a/gfx/skia/skia/src/pdf/SkPDFSubsetFont.h b/gfx/skia/skia/src/pdf/SkPDFSubsetFont.h
new file mode 100644
index 0000000000..b812c52ff5
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFSubsetFont.h
@@ -0,0 +1,16 @@
+// Copyright 2018 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+#ifndef SkPDFSubsetFont_DEFINED
+#define SkPDFSubsetFont_DEFINED
+
+#include "include/core/SkData.h"
+#include "include/docs/SkPDFDocument.h"
+#include "src/pdf/SkPDFGlyphUse.h"
+
+sk_sp<SkData> SkPDFSubsetFont(sk_sp<SkData> fontData,
+ const SkPDFGlyphUse& glyphUsage,
+ SkPDF::Metadata::Subsetter subsetter,
+ const char* fontName,
+ int ttcIndex);
+
+#endif // SkPDFSubsetFont_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFTag.cpp b/gfx/skia/skia/src/pdf/SkPDFTag.cpp
new file mode 100644
index 0000000000..fcfb701477
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFTag.cpp
@@ -0,0 +1,372 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkPDFDocumentPriv.h"
+#include "src/pdf/SkPDFTag.h"
+
+// The struct parent tree consists of one entry per page, followed by
+// entries for individual struct tree nodes corresponding to
+// annotations. Each entry is a key/value pair with an integer key
+// and an indirect reference key.
+//
+// The page entries get consecutive keys starting at 0. Since we don't
+// know the total number of pages in the document at the time we start
+// processing annotations, start the key for annotations with a large
+// number, which effectively becomes the maximum number of pages in a
+// PDF we can handle.
+const int kFirstAnnotationStructParentKey = 100000;
+
+struct SkPDFTagNode {
+ // Structure element nodes need a unique alphanumeric ID,
+ // and we need to be able to output them sorted in lexicographic
+ // order. This helper function takes one of our node IDs and
+ // builds an ID string that zero-pads the digits so that lexicographic
+ // order matches numeric order.
+ static SkString nodeIdToString(int nodeId) {
+ SkString idString;
+ idString.printf("node%08d", nodeId);
+ return idString;
+ }
+
+ SkPDFTagNode* fChildren = nullptr;
+ size_t fChildCount = 0;
+ struct MarkedContentInfo {
+ unsigned fPageIndex;
+ int fMarkId;
+ };
+ SkTArray<MarkedContentInfo> fMarkedContent;
+ int fNodeId;
+ SkString fTypeString;
+ SkString fAlt;
+ SkString fLang;
+ SkPDFIndirectReference fRef;
+ enum State {
+ kUnknown,
+ kYes,
+ kNo,
+ } fCanDiscard = kUnknown;
+ std::unique_ptr<SkPDFArray> fAttributes;
+ struct AnnotationInfo {
+ unsigned fPageIndex;
+ SkPDFIndirectReference fAnnotationRef;
+ };
+ std::vector<AnnotationInfo> fAnnotations;
+};
+
+SkPDF::AttributeList::AttributeList() = default;
+
+SkPDF::AttributeList::~AttributeList() = default;
+
+void SkPDF::AttributeList::appendInt(
+ const char* owner, const char* name, int value) {
+ if (!fAttrs)
+ fAttrs = SkPDFMakeArray();
+ std::unique_ptr<SkPDFDict> attrDict = SkPDFMakeDict();
+ attrDict->insertName("O", owner);
+ attrDict->insertInt(name, value);
+ fAttrs->appendObject(std::move(attrDict));
+}
+
+void SkPDF::AttributeList::appendFloat(
+ const char* owner, const char* name, float value) {
+ if (!fAttrs)
+ fAttrs = SkPDFMakeArray();
+ std::unique_ptr<SkPDFDict> attrDict = SkPDFMakeDict();
+ attrDict->insertName("O", owner);
+ attrDict->insertScalar(name, value);
+ fAttrs->appendObject(std::move(attrDict));
+}
+
+void SkPDF::AttributeList::appendName(
+ const char* owner, const char* name, const char* value) {
+ if (!fAttrs)
+ fAttrs = SkPDFMakeArray();
+ std::unique_ptr<SkPDFDict> attrDict = SkPDFMakeDict();
+ attrDict->insertName("O", owner);
+ attrDict->insertName(name, value);
+ fAttrs->appendObject(std::move(attrDict));
+}
+
+void SkPDF::AttributeList::appendFloatArray(
+ const char* owner, const char* name, const std::vector<float>& value) {
+ if (!fAttrs)
+ fAttrs = SkPDFMakeArray();
+ std::unique_ptr<SkPDFDict> attrDict = SkPDFMakeDict();
+ attrDict->insertName("O", owner);
+ std::unique_ptr<SkPDFArray> pdfArray = SkPDFMakeArray();
+ for (float element : value) {
+ pdfArray->appendScalar(element);
+ }
+ attrDict->insertObject(name, std::move(pdfArray));
+ fAttrs->appendObject(std::move(attrDict));
+}
+
+void SkPDF::AttributeList::appendNodeIdArray(
+ const char* owner,
+ const char* name,
+ const std::vector<int>& nodeIds) {
+ if (!fAttrs)
+ fAttrs = SkPDFMakeArray();
+ std::unique_ptr<SkPDFDict> attrDict = SkPDFMakeDict();
+ attrDict->insertName("O", owner);
+ std::unique_ptr<SkPDFArray> pdfArray = SkPDFMakeArray();
+ for (int nodeId : nodeIds) {
+ SkString idString = SkPDFTagNode::nodeIdToString(nodeId);
+ pdfArray->appendByteString(idString);
+ }
+ attrDict->insertObject(name, std::move(pdfArray));
+ fAttrs->appendObject(std::move(attrDict));
+}
+
+SkPDFTagTree::SkPDFTagTree() : fArena(4 * sizeof(SkPDFTagNode)) {}
+
+SkPDFTagTree::~SkPDFTagTree() = default;
+
+// static
+void SkPDFTagTree::Copy(SkPDF::StructureElementNode& node,
+ SkPDFTagNode* dst,
+ SkArenaAlloc* arena,
+ SkTHashMap<int, SkPDFTagNode*>* nodeMap) {
+ nodeMap->set(node.fNodeId, dst);
+ for (int nodeId : node.fAdditionalNodeIds) {
+ SkASSERT(!nodeMap->find(nodeId));
+ nodeMap->set(nodeId, dst);
+ }
+ dst->fNodeId = node.fNodeId;
+ dst->fTypeString = node.fTypeString;
+ dst->fAlt = node.fAlt;
+ dst->fLang = node.fLang;
+
+ size_t childCount = node.fChildVector.size();
+ SkPDFTagNode* children = arena->makeArray<SkPDFTagNode>(childCount);
+ dst->fChildCount = childCount;
+ dst->fChildren = children;
+ for (size_t i = 0; i < childCount; ++i) {
+ Copy(*node.fChildVector[i], &children[i], arena, nodeMap);
+ }
+
+ dst->fAttributes = std::move(node.fAttributes.fAttrs);
+}
+
+void SkPDFTagTree::init(SkPDF::StructureElementNode* node) {
+ if (node) {
+ fRoot = fArena.make<SkPDFTagNode>();
+ Copy(*node, fRoot, &fArena, &fNodeMap);
+ }
+}
+
+int SkPDFTagTree::createMarkIdForNodeId(int nodeId, unsigned pageIndex) {
+ if (!fRoot) {
+ return -1;
+ }
+ SkPDFTagNode** tagPtr = fNodeMap.find(nodeId);
+ if (!tagPtr) {
+ return -1;
+ }
+ SkPDFTagNode* tag = *tagPtr;
+ SkASSERT(tag);
+ while (SkToUInt(fMarksPerPage.size()) < pageIndex + 1) {
+ fMarksPerPage.push_back();
+ }
+ SkTArray<SkPDFTagNode*>& pageMarks = fMarksPerPage[pageIndex];
+ int markId = pageMarks.size();
+ tag->fMarkedContent.push_back({pageIndex, markId});
+ pageMarks.push_back(tag);
+ return markId;
+}
+
+int SkPDFTagTree::createStructParentKeyForNodeId(int nodeId, unsigned pageIndex) {
+ if (!fRoot) {
+ return -1;
+ }
+ SkPDFTagNode** tagPtr = fNodeMap.find(nodeId);
+ if (!tagPtr) {
+ return -1;
+ }
+ SkPDFTagNode* tag = *tagPtr;
+ SkASSERT(tag);
+
+ tag->fCanDiscard = SkPDFTagNode::kNo;
+
+ int nextStructParentKey = kFirstAnnotationStructParentKey +
+ static_cast<int>(fParentTreeAnnotationNodeIds.size());
+ fParentTreeAnnotationNodeIds.push_back(nodeId);
+ return nextStructParentKey;
+}
+
+static bool can_discard(SkPDFTagNode* node) {
+ if (node->fCanDiscard == SkPDFTagNode::kYes) {
+ return true;
+ }
+ if (node->fCanDiscard == SkPDFTagNode::kNo) {
+ return false;
+ }
+ if (!node->fMarkedContent.empty()) {
+ node->fCanDiscard = SkPDFTagNode::kNo;
+ return false;
+ }
+ for (size_t i = 0; i < node->fChildCount; ++i) {
+ if (!can_discard(&node->fChildren[i])) {
+ node->fCanDiscard = SkPDFTagNode::kNo;
+ return false;
+ }
+ }
+ node->fCanDiscard = SkPDFTagNode::kYes;
+ return true;
+}
+
+SkPDFIndirectReference SkPDFTagTree::PrepareTagTreeToEmit(SkPDFIndirectReference parent,
+ SkPDFTagNode* node,
+ SkPDFDocument* doc) {
+ SkPDFIndirectReference ref = doc->reserveRef();
+ std::unique_ptr<SkPDFArray> kids = SkPDFMakeArray();
+ SkPDFTagNode* children = node->fChildren;
+ size_t childCount = node->fChildCount;
+ for (size_t i = 0; i < childCount; ++i) {
+ SkPDFTagNode* child = &children[i];
+ if (!(can_discard(child))) {
+ kids->appendRef(PrepareTagTreeToEmit(ref, child, doc));
+ }
+ }
+ for (const SkPDFTagNode::MarkedContentInfo& info : node->fMarkedContent) {
+ std::unique_ptr<SkPDFDict> mcr = SkPDFMakeDict("MCR");
+ mcr->insertRef("Pg", doc->getPage(info.fPageIndex));
+ mcr->insertInt("MCID", info.fMarkId);
+ kids->appendObject(std::move(mcr));
+ }
+ for (const SkPDFTagNode::AnnotationInfo& annotationInfo : node->fAnnotations) {
+ std::unique_ptr<SkPDFDict> annotationDict = SkPDFMakeDict("OBJR");
+ annotationDict->insertRef("Obj", annotationInfo.fAnnotationRef);
+ annotationDict->insertRef("Pg", doc->getPage(annotationInfo.fPageIndex));
+ kids->appendObject(std::move(annotationDict));
+ }
+ node->fRef = ref;
+ SkPDFDict dict("StructElem");
+ dict.insertName("S", node->fTypeString.isEmpty() ? "NonStruct" : node->fTypeString.c_str());
+ if (!node->fAlt.isEmpty()) {
+ dict.insertTextString("Alt", node->fAlt);
+ }
+ if (!node->fLang.isEmpty()) {
+ dict.insertTextString("Lang", node->fLang);
+ }
+ dict.insertRef("P", parent);
+ dict.insertObject("K", std::move(kids));
+ if (node->fAttributes) {
+ dict.insertObject("A", std::move(node->fAttributes));
+ }
+
+ // Each node has a unique ID that also needs to be referenced
+ // in a separate IDTree node, along with the lowest and highest
+ // unique ID string.
+ SkString idString = SkPDFTagNode::nodeIdToString(node->fNodeId);
+ dict.insertByteString("ID", idString.c_str());
+ IDTreeEntry idTreeEntry = {node->fNodeId, ref};
+ fIdTreeEntries.push_back(idTreeEntry);
+
+ return doc->emit(dict, ref);
+}
+
+void SkPDFTagTree::addNodeAnnotation(int nodeId, SkPDFIndirectReference annotationRef, unsigned pageIndex) {
+ if (!fRoot) {
+ return;
+ }
+ SkPDFTagNode** tagPtr = fNodeMap.find(nodeId);
+ if (!tagPtr) {
+ return;
+ }
+ SkPDFTagNode* tag = *tagPtr;
+ SkASSERT(tag);
+
+ SkPDFTagNode::AnnotationInfo annotationInfo = {pageIndex, annotationRef};
+ tag->fAnnotations.push_back(annotationInfo);
+}
+
+SkPDFIndirectReference SkPDFTagTree::makeStructTreeRoot(SkPDFDocument* doc) {
+ if (!fRoot || can_discard(fRoot)) {
+ return SkPDFIndirectReference();
+ }
+
+ SkPDFIndirectReference ref = doc->reserveRef();
+
+ unsigned pageCount = SkToUInt(doc->pageCount());
+
+ // Build the StructTreeRoot.
+ SkPDFDict structTreeRoot("StructTreeRoot");
+ structTreeRoot.insertRef("K", PrepareTagTreeToEmit(ref, fRoot, doc));
+ structTreeRoot.insertInt("ParentTreeNextKey", SkToInt(pageCount));
+
+ // Build the parent tree, which consists of two things:
+ // (1) For each page, a mapping from the marked content IDs on
+ // each page to their corresponding tags
+ // (2) For each annotation, an indirect reference to that
+ // annotation's struct tree element.
+ SkPDFDict parentTree("ParentTree");
+ auto parentTreeNums = SkPDFMakeArray();
+
+ // First, one entry per page.
+ SkASSERT(SkToUInt(fMarksPerPage.size()) <= pageCount);
+ for (int j = 0; j < fMarksPerPage.size(); ++j) {
+ const SkTArray<SkPDFTagNode*>& pageMarks = fMarksPerPage[j];
+ SkPDFArray markToTagArray;
+ for (SkPDFTagNode* mark : pageMarks) {
+ SkASSERT(mark->fRef);
+ markToTagArray.appendRef(mark->fRef);
+ }
+ parentTreeNums->appendInt(j);
+ parentTreeNums->appendRef(doc->emit(markToTagArray));
+ }
+
+ // Then, one entry per annotation.
+ for (size_t j = 0; j < fParentTreeAnnotationNodeIds.size(); ++j) {
+ int nodeId = fParentTreeAnnotationNodeIds[j];
+ int structParentKey = kFirstAnnotationStructParentKey + static_cast<int>(j);
+
+ SkPDFTagNode** tagPtr = fNodeMap.find(nodeId);
+ if (!tagPtr) {
+ continue;
+ }
+ SkPDFTagNode* tag = *tagPtr;
+ parentTreeNums->appendInt(structParentKey);
+ parentTreeNums->appendRef(tag->fRef);
+ }
+
+ parentTree.insertObject("Nums", std::move(parentTreeNums));
+ structTreeRoot.insertRef("ParentTree", doc->emit(parentTree));
+
+ // Build the IDTree, a mapping from every unique ID string to
+ // a reference to its corresponding structure element node.
+ if (!fIdTreeEntries.empty()) {
+ std::sort(fIdTreeEntries.begin(), fIdTreeEntries.end(),
+ [](const IDTreeEntry& a, const IDTreeEntry& b) {
+ return a.nodeId < b.nodeId;
+ });
+
+ SkPDFDict idTree;
+ SkPDFDict idTreeLeaf;
+ auto limits = SkPDFMakeArray();
+ SkString lowestNodeIdString = SkPDFTagNode::nodeIdToString(
+ fIdTreeEntries.begin()->nodeId);
+ limits->appendByteString(lowestNodeIdString);
+ SkString highestNodeIdString = SkPDFTagNode::nodeIdToString(
+ fIdTreeEntries.rbegin()->nodeId);
+ limits->appendByteString(highestNodeIdString);
+ idTreeLeaf.insertObject("Limits", std::move(limits));
+ auto names = SkPDFMakeArray();
+ for (const IDTreeEntry& entry : fIdTreeEntries) {
+ SkString idString = SkPDFTagNode::nodeIdToString(entry.nodeId);
+ names->appendByteString(idString);
+ names->appendRef(entry.ref);
+ }
+ idTreeLeaf.insertObject("Names", std::move(names));
+ auto idTreeKids = SkPDFMakeArray();
+ idTreeKids->appendRef(doc->emit(idTreeLeaf));
+ idTree.insertObject("Kids", std::move(idTreeKids));
+ structTreeRoot.insertRef("IDTree", doc->emit(idTree));
+ }
+
+ return doc->emit(structTreeRoot, ref);
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFTag.h b/gfx/skia/skia/src/pdf/SkPDFTag.h
new file mode 100644
index 0000000000..61d97ef57c
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFTag.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPDFTag_DEFINED
+#define SkPDFTag_DEFINED
+
+#include "include/docs/SkPDFDocument.h"
+#include "include/private/base/SkTArray.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkTHash.h"
+
+class SkPDFDocument;
+struct SkPDFIndirectReference;
+struct SkPDFTagNode;
+
+class SkPDFTagTree {
+public:
+ SkPDFTagTree();
+ ~SkPDFTagTree();
+ void init(SkPDF::StructureElementNode*);
+ // Used to allow marked content to refer to its corresponding structure
+ // tree node, via a page entry in the parent tree. Returns -1 if no
+ // mark ID.
+ int createMarkIdForNodeId(int nodeId, unsigned pageIndex);
+ // Used to allow annotations to refer to their corresponding structure
+ // tree node, via the struct parent tree. Returns -1 if no struct parent
+ // key.
+ int createStructParentKeyForNodeId(int nodeId, unsigned pageIndex);
+
+ void addNodeAnnotation(int nodeId, SkPDFIndirectReference annotationRef, unsigned pageIndex);
+ SkPDFIndirectReference makeStructTreeRoot(SkPDFDocument* doc);
+
+private:
+ // An entry in a map from a node ID to an indirect reference to its
+ // corresponding structure element node.
+ struct IDTreeEntry {
+ int nodeId;
+ SkPDFIndirectReference ref;
+ };
+
+ static void Copy(SkPDF::StructureElementNode& node,
+ SkPDFTagNode* dst,
+ SkArenaAlloc* arena,
+ SkTHashMap<int, SkPDFTagNode*>* nodeMap);
+ SkPDFIndirectReference PrepareTagTreeToEmit(SkPDFIndirectReference parent,
+ SkPDFTagNode* node,
+ SkPDFDocument* doc);
+
+ SkArenaAlloc fArena;
+ SkTHashMap<int, SkPDFTagNode*> fNodeMap;
+ SkPDFTagNode* fRoot = nullptr;
+ SkTArray<SkTArray<SkPDFTagNode*>> fMarksPerPage;
+ std::vector<IDTreeEntry> fIdTreeEntries;
+ std::vector<int> fParentTreeAnnotationNodeIds;
+
+ SkPDFTagTree(const SkPDFTagTree&) = delete;
+ SkPDFTagTree& operator=(const SkPDFTagTree&) = delete;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFType1Font.cpp b/gfx/skia/skia/src/pdf/SkPDFType1Font.cpp
new file mode 100644
index 0000000000..da17281ef4
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFType1Font.cpp
@@ -0,0 +1,339 @@
+// Copyright 2019 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+
+#include "src/pdf/SkPDFType1Font.h"
+
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkStrike.h"
+#include "src/core/SkStrikeSpec.h"
+
+#include <ctype.h>
+
+using namespace skia_private;
+
+/*
+ "A standard Type 1 font program, as described in the Adobe Type 1
+ Font Format specification, consists of three parts: a clear-text
+ portion (written using PostScript syntax), an encrypted portion, and
+ a fixed-content portion. The fixed-content portion contains 512
+ ASCII zeros followed by a cleartomark operator, and perhaps followed
+ by additional data. Although the encrypted portion of a standard
+ Type 1 font may be in binary or ASCII hexadecimal format, PDF
+ supports only the binary format."
+*/
+static bool parsePFBSection(const uint8_t** src, size_t* len, int sectionType,
+ size_t* size) {
+ // PFB sections have a two or six bytes header. 0x80 and a one byte
+ // section type followed by a four byte section length. Type one is
+ // an ASCII section (includes a length), type two is a binary section
+ // (includes a length) and type three is an EOF marker with no length.
+ const uint8_t* buf = *src;
+ if (*len < 2 || buf[0] != 0x80 || buf[1] != sectionType) {
+ return false;
+ } else if (buf[1] == 3) {
+ return true;
+ } else if (*len < 6) {
+ return false;
+ }
+
+ *size = (size_t)buf[2] | ((size_t)buf[3] << 8) | ((size_t)buf[4] << 16) |
+ ((size_t)buf[5] << 24);
+ size_t consumed = *size + 6;
+ if (consumed > *len) {
+ return false;
+ }
+ *src = *src + consumed;
+ *len = *len - consumed;
+ return true;
+}
+
+static bool parsePFB(const uint8_t* src, size_t size, size_t* headerLen,
+ size_t* dataLen, size_t* trailerLen) {
+ const uint8_t* srcPtr = src;
+ size_t remaining = size;
+
+ return parsePFBSection(&srcPtr, &remaining, 1, headerLen) &&
+ parsePFBSection(&srcPtr, &remaining, 2, dataLen) &&
+ parsePFBSection(&srcPtr, &remaining, 1, trailerLen) &&
+ parsePFBSection(&srcPtr, &remaining, 3, nullptr);
+}
+
+/* The sections of a PFA file are implicitly defined. The body starts
+ * after the line containing "eexec," and the trailer starts with 512
+ * literal 0's followed by "cleartomark" (plus arbitrary white space).
+ *
+ * This function assumes that src is NUL terminated, but the NUL
+ * termination is not included in size.
+ *
+ */
+static bool parsePFA(const char* src, size_t size, size_t* headerLen,
+ size_t* hexDataLen, size_t* dataLen, size_t* trailerLen) {
+ const char* end = src + size;
+
+ const char* dataPos = strstr(src, "eexec");
+ if (!dataPos) {
+ return false;
+ }
+ dataPos += strlen("eexec");
+ while ((*dataPos == '\n' || *dataPos == '\r' || *dataPos == ' ') &&
+ dataPos < end) {
+ dataPos++;
+ }
+ *headerLen = dataPos - src;
+
+ const char* trailerPos = strstr(dataPos, "cleartomark");
+ if (!trailerPos) {
+ return false;
+ }
+ int zeroCount = 0;
+ for (trailerPos--; trailerPos > dataPos && zeroCount < 512; trailerPos--) {
+ if (*trailerPos == '\n' || *trailerPos == '\r' || *trailerPos == ' ') {
+ continue;
+ } else if (*trailerPos == '0') {
+ zeroCount++;
+ } else {
+ return false;
+ }
+ }
+ if (zeroCount != 512) {
+ return false;
+ }
+
+ *hexDataLen = trailerPos - src - *headerLen;
+ *trailerLen = size - *headerLen - *hexDataLen;
+
+ // Verify that the data section is hex encoded and count the bytes.
+ int nibbles = 0;
+ for (; dataPos < trailerPos; dataPos++) {
+ if (isspace(*dataPos)) {
+ continue;
+ }
+ // isxdigit() is locale-sensitive https://bugs.skia.org/8285
+ if (nullptr == strchr("0123456789abcdefABCDEF", *dataPos)) {
+ return false;
+ }
+ nibbles++;
+ }
+ *dataLen = (nibbles + 1) / 2;
+
+ return true;
+}
+
+static int8_t hexToBin(uint8_t c) {
+ if (!isxdigit(c)) {
+ return -1;
+ } else if (c <= '9') {
+ return c - '0';
+ } else if (c <= 'F') {
+ return c - 'A' + 10;
+ } else if (c <= 'f') {
+ return c - 'a' + 10;
+ }
+ return -1;
+}
+
+static sk_sp<SkData> convert_type1_font_stream(std::unique_ptr<SkStreamAsset> srcStream,
+ size_t* headerLen,
+ size_t* dataLen,
+ size_t* trailerLen) {
+ size_t srcLen = srcStream ? srcStream->getLength() : 0;
+ SkASSERT(srcLen);
+ if (!srcLen) {
+ return nullptr;
+ }
+ // Flatten and Nul-terminate the source stream so that we can use
+ // strstr() to search it.
+ AutoTMalloc<uint8_t> sourceBuffer(SkToInt(srcLen + 1));
+ (void)srcStream->read(sourceBuffer.get(), srcLen);
+ sourceBuffer[SkToInt(srcLen)] = 0;
+ const uint8_t* src = sourceBuffer.get();
+
+ if (parsePFB(src, srcLen, headerLen, dataLen, trailerLen)) {
+ static const int kPFBSectionHeaderLength = 6;
+ const size_t length = *headerLen + *dataLen + *trailerLen;
+ SkASSERT(length > 0);
+ SkASSERT(length + (2 * kPFBSectionHeaderLength) <= srcLen);
+
+ sk_sp<SkData> data(SkData::MakeUninitialized(length));
+
+ const uint8_t* const srcHeader = src + kPFBSectionHeaderLength;
+ // There is a six-byte section header before header and data
+ // (but not trailer) that we're not going to copy.
+ const uint8_t* const srcData = srcHeader + *headerLen + kPFBSectionHeaderLength;
+ const uint8_t* const srcTrailer = srcData + *headerLen;
+
+ uint8_t* const resultHeader = (uint8_t*)data->writable_data();
+ uint8_t* const resultData = resultHeader + *headerLen;
+ uint8_t* const resultTrailer = resultData + *dataLen;
+
+ SkASSERT(resultTrailer + *trailerLen == resultHeader + length);
+
+ memcpy(resultHeader, srcHeader, *headerLen);
+ memcpy(resultData, srcData, *dataLen);
+ memcpy(resultTrailer, srcTrailer, *trailerLen);
+
+ return data;
+ }
+
+ // A PFA has to be converted for PDF.
+ size_t hexDataLen;
+ if (!parsePFA((const char*)src, srcLen, headerLen, &hexDataLen, dataLen,
+ trailerLen)) {
+ return nullptr;
+ }
+ const size_t length = *headerLen + *dataLen + *trailerLen;
+ SkASSERT(length > 0);
+ auto data = SkData::MakeUninitialized(length);
+ uint8_t* buffer = (uint8_t*)data->writable_data();
+
+ memcpy(buffer, src, *headerLen);
+ uint8_t* const resultData = &(buffer[*headerLen]);
+
+ const uint8_t* hexData = src + *headerLen;
+ const uint8_t* trailer = hexData + hexDataLen;
+ size_t outputOffset = 0;
+ uint8_t dataByte = 0; // To hush compiler.
+ bool highNibble = true;
+ for (; hexData < trailer; hexData++) {
+ int8_t curNibble = hexToBin(*hexData);
+ if (curNibble < 0) {
+ continue;
+ }
+ if (highNibble) {
+ dataByte = curNibble << 4;
+ highNibble = false;
+ } else {
+ dataByte |= curNibble;
+ highNibble = true;
+ resultData[outputOffset++] = dataByte;
+ }
+ }
+ if (!highNibble) {
+ resultData[outputOffset++] = dataByte;
+ }
+ SkASSERT(outputOffset == *dataLen);
+
+ uint8_t* const resultTrailer = &(buffer[SkToInt(*headerLen + outputOffset)]);
+ memcpy(resultTrailer, src + *headerLen + hexDataLen, *trailerLen);
+ return data;
+}
+
+inline static bool can_embed(const SkAdvancedTypefaceMetrics& metrics) {
+ return !SkToBool(metrics.fFlags & SkAdvancedTypefaceMetrics::kNotEmbeddable_FontFlag);
+}
+
+inline static SkScalar from_font_units(SkScalar scaled, uint16_t emSize) {
+ return emSize == 1000 ? scaled : scaled * 1000 / emSize;
+}
+
+static SkPDFIndirectReference make_type1_font_descriptor(SkPDFDocument* doc,
+ const SkTypeface* typeface,
+ const SkAdvancedTypefaceMetrics* info) {
+ SkPDFDict descriptor("FontDescriptor");
+ uint16_t emSize = SkToU16(typeface->getUnitsPerEm());
+ if (info) {
+ SkPDFFont::PopulateCommonFontDescriptor(&descriptor, *info, emSize, 0);
+ if (can_embed(*info)) {
+ int ttcIndex;
+ size_t header SK_INIT_TO_AVOID_WARNING;
+ size_t data SK_INIT_TO_AVOID_WARNING;
+ size_t trailer SK_INIT_TO_AVOID_WARNING;
+ std::unique_ptr<SkStreamAsset> rawFontData = typeface->openStream(&ttcIndex);
+ sk_sp<SkData> fontData = convert_type1_font_stream(std::move(rawFontData),
+ &header, &data, &trailer);
+ if (fontData) {
+ std::unique_ptr<SkPDFDict> dict = SkPDFMakeDict();
+ dict->insertInt("Length1", header);
+ dict->insertInt("Length2", data);
+ dict->insertInt("Length3", trailer);
+ auto fontStream = SkMemoryStream::Make(std::move(fontData));
+ descriptor.insertRef("FontFile",
+ SkPDFStreamOut(std::move(dict), std::move(fontStream),
+ doc, SkPDFSteamCompressionEnabled::Yes));
+ }
+ }
+ }
+ return doc->emit(descriptor);
+}
+
+
+static const std::vector<SkString>& type_1_glyphnames(SkPDFDocument* canon,
+ const SkTypeface* typeface) {
+ SkTypefaceID typefaceID = typeface->uniqueID();
+ const std::vector<SkString>* glyphNames = canon->fType1GlyphNames.find(typefaceID);
+ if (!glyphNames) {
+ std::vector<SkString> names(typeface->countGlyphs());
+ SkPDFFont::GetType1GlyphNames(*typeface, names.data());
+ glyphNames = canon->fType1GlyphNames.set(typefaceID, std::move(names));
+ }
+ SkASSERT(glyphNames);
+ return *glyphNames;
+}
+
+static SkPDFIndirectReference type1_font_descriptor(SkPDFDocument* doc,
+ const SkTypeface* typeface) {
+ SkTypefaceID typefaceID = typeface->uniqueID();
+ if (SkPDFIndirectReference* ptr = doc->fFontDescriptors.find(typefaceID)) {
+ return *ptr;
+ }
+ const SkAdvancedTypefaceMetrics* info = SkPDFFont::GetMetrics(typeface, doc);
+ auto fontDescriptor = make_type1_font_descriptor(doc, typeface, info);
+ doc->fFontDescriptors.set(typefaceID, fontDescriptor);
+ return fontDescriptor;
+}
+
+
+void SkPDFEmitType1Font(const SkPDFFont& pdfFont, SkPDFDocument* doc) {
+ SkTypeface* typeface = pdfFont.typeface();
+ const std::vector<SkString>& glyphNames = type_1_glyphnames(doc, typeface);
+ SkGlyphID firstGlyphID = pdfFont.firstGlyphID();
+ SkGlyphID lastGlyphID = pdfFont.lastGlyphID();
+
+ SkPDFDict font("Font");
+ font.insertRef("FontDescriptor", type1_font_descriptor(doc, typeface));
+ font.insertName("Subtype", "Type1");
+ if (const SkAdvancedTypefaceMetrics* info = SkPDFFont::GetMetrics(typeface, doc)) {
+ font.insertName("BaseFont", info->fPostScriptName);
+ }
+
+ // glyphCount not including glyph 0
+ unsigned glyphCount = 1 + lastGlyphID - firstGlyphID;
+ SkASSERT(glyphCount > 0 && glyphCount <= 255);
+ font.insertInt("FirstChar", (size_t)0);
+ font.insertInt("LastChar", (size_t)glyphCount);
+ {
+ int emSize;
+ auto widths = SkPDFMakeArray();
+
+ int glyphRangeSize = lastGlyphID - firstGlyphID + 2;
+ AutoTArray<SkGlyphID> glyphIDs{glyphRangeSize};
+ glyphIDs[0] = 0;
+ for (unsigned gId = firstGlyphID; gId <= lastGlyphID; gId++) {
+ glyphIDs[gId - firstGlyphID + 1] = gId;
+ }
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakePDFVector(*typeface, &emSize);
+ SkBulkGlyphMetrics metrics{strikeSpec};
+ auto glyphs = metrics.glyphs(SkSpan(glyphIDs.get(), glyphRangeSize));
+ for (int i = 0; i < glyphRangeSize; ++i) {
+ widths->appendScalar(from_font_units(glyphs[i]->advanceX(), SkToU16(emSize)));
+ }
+ font.insertObject("Widths", std::move(widths));
+ }
+ auto encDiffs = SkPDFMakeArray();
+ encDiffs->reserve(lastGlyphID - firstGlyphID + 3);
+ encDiffs->appendInt(0);
+
+ SkASSERT(glyphNames.size() > lastGlyphID);
+ const SkString unknown("UNKNOWN");
+ encDiffs->appendName(glyphNames[0].isEmpty() ? unknown : glyphNames[0]);
+ for (int gID = firstGlyphID; gID <= lastGlyphID; gID++) {
+ encDiffs->appendName(glyphNames[gID].isEmpty() ? unknown : glyphNames[gID]);
+ }
+
+ auto encoding = SkPDFMakeDict("Encoding");
+ encoding->insertObject("Differences", std::move(encDiffs));
+ font.insertObject("Encoding", std::move(encoding));
+
+ doc->emit(font, pdfFont.indirectReference());
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFType1Font.h b/gfx/skia/skia/src/pdf/SkPDFType1Font.h
new file mode 100644
index 0000000000..7f9d972fe5
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFType1Font.h
@@ -0,0 +1,11 @@
+// Copyright 2019 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+#ifndef SkPDFType1Font_DEFINED
+#define SkPDFType1Font_DEFINED
+
+#include "src/pdf/SkPDFDocumentPriv.h"
+#include "src/pdf/SkPDFFont.h"
+
+void SkPDFEmitType1Font(const SkPDFFont&, SkPDFDocument*);
+
+#endif // SkPDFType1Font_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFTypes.cpp b/gfx/skia/skia/src/pdf/SkPDFTypes.cpp
new file mode 100644
index 0000000000..79c402a9ad
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFTypes.cpp
@@ -0,0 +1,602 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkPDFTypes.h"
+
+#include "include/core/SkData.h"
+#include "include/core/SkExecutor.h"
+#include "include/core/SkStream.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkStreamPriv.h"
+#include "src/pdf/SkDeflate.h"
+#include "src/pdf/SkPDFDocumentPriv.h"
+#include "src/pdf/SkPDFUnion.h"
+#include "src/pdf/SkPDFUtils.h"
+
+#include <new>
+
+////////////////////////////////////////////////////////////////////////////////
+
+SkPDFUnion::SkPDFUnion(Type t, int32_t v) : fIntValue (v) , fType(t) {}
+SkPDFUnion::SkPDFUnion(Type t, bool v) : fBoolValue (v) , fType(t) {}
+SkPDFUnion::SkPDFUnion(Type t, SkScalar v) : fScalarValue (v) , fType(t) {}
+SkPDFUnion::SkPDFUnion(Type t, const char* v) : fStaticString (v) , fType(t) {}
+SkPDFUnion::SkPDFUnion(Type t, SkString v) : fSkString(std::move(v)), fType(t) {}
+SkPDFUnion::SkPDFUnion(Type t, PDFObject v) : fObject (std::move(v)), fType(t) {}
+
+SkPDFUnion::~SkPDFUnion() {
+ switch (fType) {
+ case Type::kNameSkS:
+ case Type::kByteStringSkS:
+ case Type::kTextStringSkS:
+ fSkString.~SkString();
+ return;
+ case Type::kObject:
+ fObject.~PDFObject();
+ return;
+ default:
+ return;
+ }
+}
+
+SkPDFUnion::SkPDFUnion(SkPDFUnion&& that) : fType(that.fType) {
+ SkASSERT(this != &that);
+
+ switch (fType) {
+ case Type::kDestroyed:
+ break;
+ case Type::kInt:
+ case Type::kColorComponent:
+ case Type::kRef:
+ fIntValue = that.fIntValue;
+ break;
+ case Type::kBool:
+ fBoolValue = that.fBoolValue;
+ break;
+ case Type::kColorComponentF:
+ case Type::kScalar:
+ fScalarValue = that.fScalarValue;
+ break;
+ case Type::kName:
+ case Type::kByteString:
+ case Type::kTextString:
+ fStaticString = that.fStaticString;
+ break;
+ case Type::kNameSkS:
+ case Type::kByteStringSkS:
+ case Type::kTextStringSkS:
+ new (&fSkString) SkString(std::move(that.fSkString));
+ break;
+ case Type::kObject:
+ new (&fObject) PDFObject(std::move(that.fObject));
+ break;
+ default:
+ SkDEBUGFAIL("SkPDFUnion::SkPDFUnion with bad type");
+ }
+ that.fType = Type::kDestroyed;
+}
+
+SkPDFUnion& SkPDFUnion::operator=(SkPDFUnion&& that) {
+ if (this != &that) {
+ this->~SkPDFUnion();
+ new (this) SkPDFUnion(std::move(that));
+ }
+ return *this;
+}
+
+bool SkPDFUnion::isName() const {
+ return Type::kName == fType || Type::kNameSkS == fType;
+}
+
+#ifdef SK_DEBUG
+// Most names need no escaping. Such names are handled as static const strings.
+bool is_valid_name(const char* n) {
+ static const char kControlChars[] = "/%()<>[]{}";
+ while (*n) {
+ if (*n < '!' || *n > '~' || strchr(kControlChars, *n)) {
+ return false;
+ }
+ ++n;
+ }
+ return true;
+}
+#endif // SK_DEBUG
+
+// Given an arbitrary string, write it as a valid name (not including leading slash).
+static void write_name_escaped(SkWStream* o, const char* name) {
+ static const char kToEscape[] = "#/%()<>[]{}";
+ for (const uint8_t* n = reinterpret_cast<const uint8_t*>(name); *n; ++n) {
+ uint8_t v = *n;
+ if (v < '!' || v > '~' || strchr(kToEscape, v)) {
+ char buffer[3] = {'#',
+ SkHexadecimalDigits::gUpper[v >> 4],
+ SkHexadecimalDigits::gUpper[v & 0xF]};
+ o->write(buffer, sizeof(buffer));
+ } else {
+ o->write(n, 1);
+ }
+ }
+}
+
+static void write_literal_byte_string(SkWStream* wStream, const char* cin, size_t len) {
+ wStream->writeText("(");
+ for (size_t i = 0; i < len; i++) {
+ uint8_t c = static_cast<uint8_t>(cin[i]);
+ if (c < ' ' || '~' < c) {
+ uint8_t octal[4] = { '\\',
+ (uint8_t)('0' | ( c >> 6 )),
+ (uint8_t)('0' | ((c >> 3) & 0x07)),
+ (uint8_t)('0' | ( c & 0x07)) };
+ wStream->write(octal, 4);
+ } else {
+ if (c == '\\' || c == '(' || c == ')') {
+ wStream->writeText("\\");
+ }
+ wStream->write(&c, 1);
+ }
+ }
+ wStream->writeText(")");
+}
+
+static void write_hex_byte_string(SkWStream* wStream, const char* cin, size_t len) {
+ SkDEBUGCODE(static const size_t kMaxLen = 65535;)
+ SkASSERT(len <= kMaxLen);
+
+ wStream->writeText("<");
+ for (size_t i = 0; i < len; i++) {
+ uint8_t c = static_cast<uint8_t>(cin[i]);
+ char hexValue[2] = { SkHexadecimalDigits::gUpper[c >> 4],
+ SkHexadecimalDigits::gUpper[c & 0xF] };
+ wStream->write(hexValue, 2);
+ }
+ wStream->writeText(">");
+}
+
+static void write_optimized_byte_string(SkWStream* wStream, const char* cin, size_t len,
+ size_t literalExtras) {
+ const size_t hexLength = 2 + 2*len;
+ const size_t literalLength = 2 + len + literalExtras;
+ if (literalLength <= hexLength) {
+ write_literal_byte_string(wStream, cin, len);
+ } else {
+ write_hex_byte_string(wStream, cin, len);
+ }
+}
+
+static void write_byte_string(SkWStream* wStream, const char* cin, size_t len) {
+ SkDEBUGCODE(static const size_t kMaxLen = 65535;)
+ SkASSERT(len <= kMaxLen);
+
+ size_t literalExtras = 0;
+ {
+ for (size_t i = 0; i < len; i++) {
+ uint8_t c = static_cast<uint8_t>(cin[i]);
+ if (c < ' ' || '~' < c) {
+ literalExtras += 3;
+ } else if (c == '\\' || c == '(' || c == ')') {
+ ++literalExtras;
+ }
+ }
+ }
+ write_optimized_byte_string(wStream, cin, len, literalExtras);
+}
+
+static void write_text_string(SkWStream* wStream, const char* cin, size_t len) {
+ SkDEBUGCODE(static const size_t kMaxLen = 65535;)
+ SkASSERT(len <= kMaxLen);
+
+ bool inputIsValidUTF8 = true;
+ bool inputIsPDFDocEncoding = true;
+ size_t literalExtras = 0;
+ {
+ const char* textPtr = cin;
+ const char* textEnd = cin + len;
+ while (textPtr < textEnd) {
+ SkUnichar unichar = SkUTF::NextUTF8(&textPtr, textEnd);
+ if (unichar < 0) {
+ inputIsValidUTF8 = false;
+ break;
+ }
+ // See Table D.2 (PDFDocEncoding Character Set) in the PDF3200_2008 spec.
+ // Could convert from UTF-8 to PDFDocEncoding and, if successful, use that.
+ if ((0x15 < unichar && unichar < 0x20) || 0x7E < unichar) {
+ inputIsPDFDocEncoding = false;
+ break;
+ }
+ if (unichar < ' ' || '~' < unichar) {
+ literalExtras += 3;
+ } else if (unichar == '\\' || unichar == '(' || unichar == ')') {
+ ++literalExtras;
+ }
+ }
+ }
+
+ if (!inputIsValidUTF8) {
+ SkDebugf("Invalid UTF8: %.*s\n", (int)len, cin);
+ wStream->writeText("<>");
+ return;
+ }
+
+ if (inputIsPDFDocEncoding) {
+ write_optimized_byte_string(wStream, cin, len, literalExtras);
+ return;
+ }
+
+ wStream->writeText("<FEFF");
+ const char* textPtr = cin;
+ const char* textEnd = cin + len;
+ while (textPtr < textEnd) {
+ SkUnichar unichar = SkUTF::NextUTF8(&textPtr, textEnd);
+ SkPDFUtils::WriteUTF16beHex(wStream, unichar);
+ }
+ wStream->writeText(">");
+}
+
+void SkPDFWriteTextString(SkWStream* wStream, const char* cin, size_t len) {
+ write_text_string(wStream, cin, len);
+}
+void SkPDFWriteByteString(SkWStream* wStream, const char* cin, size_t len) {
+ write_byte_string(wStream, cin, len);
+}
+
+void SkPDFUnion::emitObject(SkWStream* stream) const {
+ switch (fType) {
+ case Type::kInt:
+ stream->writeDecAsText(fIntValue);
+ return;
+ case Type::kColorComponent:
+ SkPDFUtils::AppendColorComponent(SkToU8(fIntValue), stream);
+ return;
+ case Type::kColorComponentF:
+ SkPDFUtils::AppendColorComponentF(fScalarValue, stream);
+ return;
+ case Type::kBool:
+ stream->writeText(fBoolValue ? "true" : "false");
+ return;
+ case Type::kScalar:
+ SkPDFUtils::AppendScalar(fScalarValue, stream);
+ return;
+ case Type::kName:
+ stream->writeText("/");
+ SkASSERT(is_valid_name(fStaticString));
+ stream->writeText(fStaticString);
+ return;
+ case Type::kByteString:
+ SkASSERT(fStaticString);
+ write_byte_string(stream, fStaticString, strlen(fStaticString));
+ return;
+ case Type::kTextString:
+ SkASSERT(fStaticString);
+ write_text_string(stream, fStaticString, strlen(fStaticString));
+ return;
+ case Type::kNameSkS:
+ stream->writeText("/");
+ write_name_escaped(stream, fSkString.c_str());
+ return;
+ case Type::kByteStringSkS:
+ write_byte_string(stream, fSkString.c_str(), fSkString.size());
+ return;
+ case Type::kTextStringSkS:
+ write_text_string(stream, fSkString.c_str(), fSkString.size());
+ return;
+ case Type::kObject:
+ fObject->emitObject(stream);
+ return;
+ case Type::kRef:
+ SkASSERT(fIntValue >= 0);
+ stream->writeDecAsText(fIntValue);
+ stream->writeText(" 0 R"); // Generation number is always 0.
+ return;
+ default:
+ SkDEBUGFAIL("SkPDFUnion::emitObject with bad type");
+ }
+}
+
+SkPDFUnion SkPDFUnion::Int(int32_t value) {
+ return SkPDFUnion(Type::kInt, value);
+}
+
+SkPDFUnion SkPDFUnion::ColorComponent(uint8_t value) {
+ return SkPDFUnion(Type::kColorComponent, SkTo<int32_t>(value));
+}
+
+SkPDFUnion SkPDFUnion::ColorComponentF(float value) {
+ return SkPDFUnion(Type::kColorComponentF, SkFloatToScalar(value));
+}
+
+SkPDFUnion SkPDFUnion::Bool(bool value) {
+ return SkPDFUnion(Type::kBool, value);
+}
+
+SkPDFUnion SkPDFUnion::Scalar(SkScalar value) {
+ return SkPDFUnion(Type::kScalar, value);
+}
+
+SkPDFUnion SkPDFUnion::Name(const char* value) {
+ SkASSERT(value);
+ SkASSERT(is_valid_name(value));
+ return SkPDFUnion(Type::kName, value);
+}
+
+SkPDFUnion SkPDFUnion::ByteString(const char* value) {
+ SkASSERT(value);
+ return SkPDFUnion(Type::kByteString, value);
+}
+
+SkPDFUnion SkPDFUnion::TextString(const char* value) {
+ SkASSERT(value);
+ return SkPDFUnion(Type::kTextString, value);
+}
+
+SkPDFUnion SkPDFUnion::Name(SkString s) {
+ return SkPDFUnion(Type::kNameSkS, std::move(s));
+}
+
+SkPDFUnion SkPDFUnion::ByteString(SkString s) {
+ return SkPDFUnion(Type::kByteStringSkS, std::move(s));
+}
+
+SkPDFUnion SkPDFUnion::TextString(SkString s) {
+ return SkPDFUnion(Type::kTextStringSkS, std::move(s));
+}
+
+SkPDFUnion SkPDFUnion::Object(std::unique_ptr<SkPDFObject> objSp) {
+ SkASSERT(objSp.get());
+ return SkPDFUnion(Type::kObject, std::move(objSp));
+}
+
+SkPDFUnion SkPDFUnion::Ref(SkPDFIndirectReference ref) {
+ SkASSERT(ref.fValue > 0);
+ return SkPDFUnion(Type::kRef, SkTo<int32_t>(ref.fValue));
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+#if 0 // Enable if needed.
+void SkPDFAtom::emitObject(SkWStream* stream) const {
+ fValue.emitObject(stream);
+}
+#endif // 0
+
+////////////////////////////////////////////////////////////////////////////////
+
+SkPDFArray::SkPDFArray() {}
+
+SkPDFArray::~SkPDFArray() {}
+
+size_t SkPDFArray::size() const { return fValues.size(); }
+
+void SkPDFArray::reserve(int length) {
+ fValues.reserve(length);
+}
+
+void SkPDFArray::emitObject(SkWStream* stream) const {
+ stream->writeText("[");
+ for (size_t i = 0; i < fValues.size(); i++) {
+ fValues[i].emitObject(stream);
+ if (i + 1 < fValues.size()) {
+ stream->writeText(" ");
+ }
+ }
+ stream->writeText("]");
+}
+
+void SkPDFArray::append(SkPDFUnion&& value) {
+ fValues.emplace_back(std::move(value));
+}
+
+void SkPDFArray::appendInt(int32_t value) {
+ this->append(SkPDFUnion::Int(value));
+}
+
+void SkPDFArray::appendColorComponent(uint8_t value) {
+ this->append(SkPDFUnion::ColorComponent(value));
+}
+
+void SkPDFArray::appendBool(bool value) {
+ this->append(SkPDFUnion::Bool(value));
+}
+
+void SkPDFArray::appendScalar(SkScalar value) {
+ this->append(SkPDFUnion::Scalar(value));
+}
+
+void SkPDFArray::appendName(const char name[]) {
+ this->append(SkPDFUnion::Name(SkString(name)));
+}
+
+void SkPDFArray::appendName(SkString name) {
+ this->append(SkPDFUnion::Name(std::move(name)));
+}
+
+void SkPDFArray::appendByteString(SkString value) {
+ this->append(SkPDFUnion::ByteString(std::move(value)));
+}
+
+void SkPDFArray::appendTextString(SkString value) {
+ this->append(SkPDFUnion::TextString(std::move(value)));
+}
+
+void SkPDFArray::appendByteString(const char value[]) {
+ this->append(SkPDFUnion::ByteString(value));
+}
+
+void SkPDFArray::appendTextString(const char value[]) {
+ this->append(SkPDFUnion::TextString(value));
+}
+
+void SkPDFArray::appendObject(std::unique_ptr<SkPDFObject>&& objSp) {
+ this->append(SkPDFUnion::Object(std::move(objSp)));
+}
+
+void SkPDFArray::appendRef(SkPDFIndirectReference ref) {
+ this->append(SkPDFUnion::Ref(ref));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPDFDict::~SkPDFDict() {}
+
+SkPDFDict::SkPDFDict(const char type[]) {
+ if (type) {
+ this->insertName("Type", type);
+ }
+}
+
+void SkPDFDict::emitObject(SkWStream* stream) const {
+ stream->writeText("<<");
+ for (size_t i = 0; i < fRecords.size(); ++i) {
+ const std::pair<SkPDFUnion, SkPDFUnion>& record = fRecords[i];
+ record.first.emitObject(stream);
+ stream->writeText(" ");
+ record.second.emitObject(stream);
+ if (i + 1 < fRecords.size()) {
+ stream->writeText("\n");
+ }
+ }
+ stream->writeText(">>");
+}
+
+size_t SkPDFDict::size() const { return fRecords.size(); }
+
+void SkPDFDict::reserve(int n) {
+ fRecords.reserve(n);
+}
+
+void SkPDFDict::insertRef(const char key[], SkPDFIndirectReference ref) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::Ref(ref));
+}
+
+void SkPDFDict::insertRef(SkString key, SkPDFIndirectReference ref) {
+ fRecords.emplace_back(SkPDFUnion::Name(std::move(key)), SkPDFUnion::Ref(ref));
+}
+
+void SkPDFDict::insertObject(const char key[], std::unique_ptr<SkPDFObject>&& objSp) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::Object(std::move(objSp)));
+}
+void SkPDFDict::insertObject(SkString key, std::unique_ptr<SkPDFObject>&& objSp) {
+ fRecords.emplace_back(SkPDFUnion::Name(std::move(key)),
+ SkPDFUnion::Object(std::move(objSp)));
+}
+
+void SkPDFDict::insertBool(const char key[], bool value) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::Bool(value));
+}
+
+void SkPDFDict::insertInt(const char key[], int32_t value) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::Int(value));
+}
+
+void SkPDFDict::insertInt(const char key[], size_t value) {
+ this->insertInt(key, SkToS32(value));
+}
+
+void SkPDFDict::insertColorComponentF(const char key[], SkScalar value) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::ColorComponentF(value));
+}
+
+void SkPDFDict::insertScalar(const char key[], SkScalar value) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::Scalar(value));
+}
+
+void SkPDFDict::insertName(const char key[], const char name[]) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::Name(name));
+}
+
+void SkPDFDict::insertName(const char key[], SkString name) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::Name(std::move(name)));
+}
+
+void SkPDFDict::insertByteString(const char key[], const char value[]) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::ByteString(value));
+}
+
+void SkPDFDict::insertTextString(const char key[], const char value[]) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::TextString(value));
+}
+
+void SkPDFDict::insertByteString(const char key[], SkString value) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::ByteString(std::move(value)));
+}
+
+void SkPDFDict::insertTextString(const char key[], SkString value) {
+ fRecords.emplace_back(SkPDFUnion::Name(key), SkPDFUnion::TextString(std::move(value)));
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+
+
+static void serialize_stream(SkPDFDict* origDict,
+ SkStreamAsset* stream,
+ SkPDFSteamCompressionEnabled compress,
+ SkPDFDocument* doc,
+ SkPDFIndirectReference ref) {
+ // Code assumes that the stream starts at the beginning.
+ SkASSERT(stream && stream->hasLength());
+
+ std::unique_ptr<SkStreamAsset> tmp;
+ SkPDFDict tmpDict;
+ SkPDFDict& dict = origDict ? *origDict : tmpDict;
+ static const size_t kMinimumSavings = strlen("/Filter_/FlateDecode_");
+ if (doc->metadata().fCompressionLevel != SkPDF::Metadata::CompressionLevel::None &&
+ compress == SkPDFSteamCompressionEnabled::Yes &&
+ stream->getLength() > kMinimumSavings)
+ {
+ SkDynamicMemoryWStream compressedData;
+ SkDeflateWStream deflateWStream(&compressedData,SkToInt(doc->metadata().fCompressionLevel));
+ SkStreamCopy(&deflateWStream, stream);
+ deflateWStream.finalize();
+ #ifdef SK_PDF_BASE85_BINARY
+ {
+ SkPDFUtils::Base85Encode(compressedData.detachAsStream(), &compressedData);
+ tmp = compressedData.detachAsStream();
+ stream = tmp.get();
+ auto filters = SkPDFMakeArray();
+ filters->appendName("ASCII85Decode");
+ filters->appendName("FlateDecode");
+ dict.insertObject("Filter", std::move(filters));
+ }
+ #else
+ if (stream->getLength() > compressedData.bytesWritten() + kMinimumSavings) {
+ tmp = compressedData.detachAsStream();
+ stream = tmp.get();
+ dict.insertName("Filter", "FlateDecode");
+ } else {
+ SkAssertResult(stream->rewind());
+ }
+ #endif
+
+ }
+ dict.insertInt("Length", stream->getLength());
+ doc->emitStream(dict,
+ [stream](SkWStream* dst) { dst->writeStream(stream, stream->getLength()); },
+ ref);
+}
+
+SkPDFIndirectReference SkPDFStreamOut(std::unique_ptr<SkPDFDict> dict,
+ std::unique_ptr<SkStreamAsset> content,
+ SkPDFDocument* doc,
+ SkPDFSteamCompressionEnabled compress) {
+ SkPDFIndirectReference ref = doc->reserveRef();
+ if (SkExecutor* executor = doc->executor()) {
+ SkPDFDict* dictPtr = dict.release();
+ SkStreamAsset* contentPtr = content.release();
+ // Pass ownership of both pointers into a std::function, which should
+ // only be executed once.
+ doc->incrementJobCount();
+ executor->add([dictPtr, contentPtr, compress, doc, ref]() {
+ serialize_stream(dictPtr, contentPtr, compress, doc, ref);
+ delete dictPtr;
+ delete contentPtr;
+ doc->signalJobComplete();
+ });
+ return ref;
+ }
+ serialize_stream(dict.get(), content.get(), compress, doc, ref);
+ return ref;
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFTypes.h b/gfx/skia/skia/src/pdf/SkPDFTypes.h
new file mode 100644
index 0000000000..3726017501
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFTypes.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright 2010 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPDFTypes_DEFINED
+#define SkPDFTypes_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkTHash.h"
+
+#include <memory>
+#include <new>
+#include <type_traits>
+#include <utility>
+#include <vector>
+#include <memory>
+
+class SkData;
+class SkPDFArray;
+
+class SkPDFDict;
+class SkPDFDocument;
+class SkPDFObject;
+class SkPDFUnion;
+class SkStreamAsset;
+class SkString;
+class SkWStream;
+struct SkPDFObjectSerializer;
+
+struct SkPDFIndirectReference {
+ int fValue = -1;
+ explicit operator bool() { return fValue != -1; }
+};
+
+inline static bool operator==(SkPDFIndirectReference u, SkPDFIndirectReference v) {
+ return u.fValue == v.fValue;
+}
+
+inline static bool operator!=(SkPDFIndirectReference u, SkPDFIndirectReference v) {
+ return u.fValue != v.fValue;
+}
+
+/** \class SkPDFObject
+
+ A PDF Object is the base class for primitive elements in a PDF file. A
+ common subtype is used to ease the use of indirect object references,
+ which are common in the PDF format.
+
+*/
+class SkPDFObject {
+public:
+ SkPDFObject() = default;
+
+ /** Subclasses must implement this method to print the object to the
+ * PDF file.
+ * @param catalog The object catalog to use.
+ * @param stream The writable output stream to send the output to.
+ */
+ virtual void emitObject(SkWStream* stream) const = 0;
+
+ virtual ~SkPDFObject() = default;
+
+private:
+ SkPDFObject(SkPDFObject&&) = delete;
+ SkPDFObject(const SkPDFObject&) = delete;
+ SkPDFObject& operator=(SkPDFObject&&) = delete;
+ SkPDFObject& operator=(const SkPDFObject&) = delete;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+/** \class SkPDFArray
+
+ An array object in a PDF.
+*/
+class SkPDFArray final : public SkPDFObject {
+public:
+ /** Create a PDF array. Maximum length is 8191.
+ */
+ SkPDFArray();
+ ~SkPDFArray() override;
+
+ // The SkPDFObject interface.
+ void emitObject(SkWStream* stream) const override;
+
+ /** The size of the array.
+ */
+ size_t size() const;
+
+ /** Preallocate space for the given number of entries.
+ * @param length The number of array slots to preallocate.
+ */
+ void reserve(int length);
+
+ /** Appends a value to the end of the array.
+ * @param value The value to add to the array.
+ */
+ void appendInt(int32_t);
+ void appendColorComponent(uint8_t);
+ void appendBool(bool);
+ void appendScalar(SkScalar);
+ void appendName(const char[]);
+ void appendName(SkString);
+ void appendByteString(const char[]);
+ void appendTextString(const char[]);
+ void appendByteString(SkString);
+ void appendTextString(SkString);
+ void appendObject(std::unique_ptr<SkPDFObject>&&);
+ void appendRef(SkPDFIndirectReference);
+
+private:
+ std::vector<SkPDFUnion> fValues;
+ void append(SkPDFUnion&& value);
+};
+
+static inline void SkPDFArray_Append(SkPDFArray* a, int v) { a->appendInt(v); }
+
+static inline void SkPDFArray_Append(SkPDFArray* a, SkScalar v) { a->appendScalar(v); }
+
+template <typename T, typename... Args>
+static inline void SkPDFArray_Append(SkPDFArray* a, T v, Args... args) {
+ SkPDFArray_Append(a, v);
+ SkPDFArray_Append(a, args...);
+}
+
+static inline void SkPDFArray_Append(SkPDFArray* a) {}
+
+template <typename... Args>
+static inline std::unique_ptr<SkPDFArray> SkPDFMakeArray(Args... args) {
+ std::unique_ptr<SkPDFArray> ret(new SkPDFArray());
+ ret->reserve(sizeof...(Args));
+ SkPDFArray_Append(ret.get(), args...);
+ return ret;
+}
+
+/** \class SkPDFDict
+
+ A dictionary object in a PDF.
+*/
+class SkPDFDict final : public SkPDFObject {
+public:
+ /** Create a PDF dictionary.
+ * @param type The value of the Type entry, nullptr for no type.
+ */
+ explicit SkPDFDict(const char type[] = nullptr);
+
+ ~SkPDFDict() override;
+
+ // The SkPDFObject interface.
+ void emitObject(SkWStream* stream) const override;
+
+ /** The size of the dictionary.
+ */
+ size_t size() const;
+
+ /** Preallocate space for n key-value pairs */
+ void reserve(int n);
+
+ /** Add the value to the dictionary with the given key.
+ * @param key The text of the key for this dictionary entry.
+ * @param value The value for this dictionary entry.
+ */
+ void insertObject(const char key[], std::unique_ptr<SkPDFObject>&&);
+ void insertObject(SkString, std::unique_ptr<SkPDFObject>&&);
+ void insertRef(const char key[], SkPDFIndirectReference);
+ void insertRef(SkString, SkPDFIndirectReference);
+
+ /** Add the value to the dictionary with the given key.
+ * @param key The text of the key for this dictionary entry.
+ * @param value The value for this dictionary entry.
+ */
+ void insertBool(const char key[], bool value);
+ void insertInt(const char key[], int32_t value);
+ void insertInt(const char key[], size_t value);
+ void insertScalar(const char key[], SkScalar value);
+ void insertColorComponentF(const char key[], SkScalar value);
+ void insertName(const char key[], const char nameValue[]);
+ void insertName(const char key[], SkString nameValue);
+ void insertByteString(const char key[], const char value[]);
+ void insertTextString(const char key[], const char value[]);
+ void insertByteString(const char key[], SkString value);
+ void insertTextString(const char key[], SkString value);
+
+private:
+ std::vector<std::pair<SkPDFUnion, SkPDFUnion>> fRecords;
+};
+
+static inline std::unique_ptr<SkPDFDict> SkPDFMakeDict(const char* type = nullptr) {
+ return std::make_unique<SkPDFDict>(type);
+}
+
+enum class SkPDFSteamCompressionEnabled : bool {
+ No = false,
+ Yes = true,
+ Default =
+#ifdef SK_PDF_LESS_COMPRESSION
+ No,
+#else
+ Yes,
+#endif
+};
+
+// Exposed for unit testing.
+void SkPDFWriteTextString(SkWStream* wStream, const char* cin, size_t len);
+void SkPDFWriteByteString(SkWStream* wStream, const char* cin, size_t len);
+
+SkPDFIndirectReference SkPDFStreamOut(
+ std::unique_ptr<SkPDFDict> dict,
+ std::unique_ptr<SkStreamAsset> stream,
+ SkPDFDocument* doc,
+ SkPDFSteamCompressionEnabled compress = SkPDFSteamCompressionEnabled::Default);
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkPDFUnion.h b/gfx/skia/skia/src/pdf/SkPDFUnion.h
new file mode 100644
index 0000000000..51cb5f0a96
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFUnion.h
@@ -0,0 +1,112 @@
+// Copyright 2018 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+#ifndef SkPDFUnion_DEFINED
+#define SkPDFUnion_DEFINED
+
+#include "src/pdf/SkPDFTypes.h"
+
+/**
+ A SkPDFUnion is a non-virtualized implementation of the
+ non-compound, non-specialized PDF Object types: Name, String,
+ Number, Boolean.
+ */
+class SkPDFUnion {
+public:
+ // Move constructor and assignment operator destroy the argument
+ // and steal their references (if needed).
+ SkPDFUnion(SkPDFUnion&&);
+ SkPDFUnion& operator=(SkPDFUnion&&);
+
+ ~SkPDFUnion();
+
+ /** The following nine functions are the standard way of creating
+ SkPDFUnion objects. */
+
+ static SkPDFUnion Int(int32_t);
+
+ static SkPDFUnion Int(size_t v) { return SkPDFUnion::Int(SkToS32(v)); }
+
+ static SkPDFUnion Bool(bool);
+
+ static SkPDFUnion Scalar(SkScalar);
+
+ static SkPDFUnion ColorComponent(uint8_t);
+
+ static SkPDFUnion ColorComponentF(float);
+
+ /** These two functions do NOT take ownership of char*, and do NOT
+ copy the string. Suitable for passing in static const
+ strings. For example:
+ SkPDFUnion n = SkPDFUnion::Name("Length");
+ SkPDFUnion u = SkPDFUnion::String("Identity"); */
+
+ /** SkPDFUnion::Name(const char*) assumes that the passed string
+ is already a valid name (that is: it has no control or
+ whitespace characters). This will not copy the name. */
+ static SkPDFUnion Name(const char*);
+
+ /** SkPDFUnion::String will encode the passed string. This will not copy. */
+ static SkPDFUnion ByteString(const char*);
+ static SkPDFUnion TextString(const char*);
+
+ /** SkPDFUnion::Name(SkString) does not assume that the
+ passed string is already a valid name and it will escape the
+ string. */
+ static SkPDFUnion Name(SkString);
+
+ /** SkPDFUnion::String will encode the passed string. */
+ static SkPDFUnion ByteString(SkString);
+ static SkPDFUnion TextString(SkString);
+
+ static SkPDFUnion Object(std::unique_ptr<SkPDFObject>);
+
+ static SkPDFUnion Ref(SkPDFIndirectReference);
+
+ /** These two non-virtual methods mirror SkPDFObject's
+ corresponding virtuals. */
+ void emitObject(SkWStream*) const;
+
+ bool isName() const;
+
+private:
+ using PDFObject = std::unique_ptr<SkPDFObject>;
+ union {
+ int32_t fIntValue;
+ bool fBoolValue;
+ SkScalar fScalarValue;
+ const char* fStaticString;
+ SkString fSkString;
+ PDFObject fObject;
+ };
+ enum class Type : char {
+ /** It is an error to call emitObject() or addResources() on an kDestroyed object. */
+ kDestroyed = 0,
+ kInt,
+ kColorComponent,
+ kColorComponentF,
+ kBool,
+ kScalar,
+ kName,
+ kByteString,
+ kTextString,
+ kNameSkS,
+ kByteStringSkS,
+ kTextStringSkS,
+ kObject,
+ kRef,
+ };
+ Type fType;
+
+ SkPDFUnion(Type, int32_t);
+ SkPDFUnion(Type, bool);
+ SkPDFUnion(Type, SkScalar);
+ SkPDFUnion(Type, const char*);
+ SkPDFUnion(Type, SkString);
+ SkPDFUnion(Type, PDFObject);
+
+ SkPDFUnion& operator=(const SkPDFUnion&) = delete;
+ SkPDFUnion(const SkPDFUnion&) = delete;
+};
+static_assert(sizeof(SkString) == sizeof(void*), "SkString_size");
+
+#endif // SkPDFUnion_DEFINED
diff --git a/gfx/skia/skia/src/pdf/SkPDFUtils.cpp b/gfx/skia/skia/src/pdf/SkPDFUtils.cpp
new file mode 100644
index 0000000000..d5b9923095
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFUtils.cpp
@@ -0,0 +1,395 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/pdf/SkPDFUtils.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkData.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/private/base/SkFixed.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkPathPriv.h"
+#include "src/image/SkImage_Base.h"
+#include "src/pdf/SkPDFResourceDict.h"
+#include "src/pdf/SkPDFTypes.h"
+
+#include <cmath>
+
+const char* SkPDFUtils::BlendModeName(SkBlendMode mode) {
+ // PDF32000.book section 11.3.5 "Blend Mode"
+ switch (mode) {
+ case SkBlendMode::kSrcOver: return "Normal";
+ case SkBlendMode::kXor: return "Normal"; // (unsupported mode)
+ case SkBlendMode::kPlus: return "Normal"; // (unsupported mode)
+ case SkBlendMode::kScreen: return "Screen";
+ case SkBlendMode::kOverlay: return "Overlay";
+ case SkBlendMode::kDarken: return "Darken";
+ case SkBlendMode::kLighten: return "Lighten";
+ case SkBlendMode::kColorDodge: return "ColorDodge";
+ case SkBlendMode::kColorBurn: return "ColorBurn";
+ case SkBlendMode::kHardLight: return "HardLight";
+ case SkBlendMode::kSoftLight: return "SoftLight";
+ case SkBlendMode::kDifference: return "Difference";
+ case SkBlendMode::kExclusion: return "Exclusion";
+ case SkBlendMode::kMultiply: return "Multiply";
+ case SkBlendMode::kHue: return "Hue";
+ case SkBlendMode::kSaturation: return "Saturation";
+ case SkBlendMode::kColor: return "Color";
+ case SkBlendMode::kLuminosity: return "Luminosity";
+ // Other blendmodes are handled in SkPDFDevice::setUpContentEntry.
+ default: return nullptr;
+ }
+}
+
+std::unique_ptr<SkPDFArray> SkPDFUtils::RectToArray(const SkRect& r) {
+ return SkPDFMakeArray(r.left(), r.top(), r.right(), r.bottom());
+}
+
+std::unique_ptr<SkPDFArray> SkPDFUtils::MatrixToArray(const SkMatrix& matrix) {
+ SkScalar a[6];
+ if (!matrix.asAffine(a)) {
+ SkMatrix::SetAffineIdentity(a);
+ }
+ return SkPDFMakeArray(a[0], a[1], a[2], a[3], a[4], a[5]);
+}
+
+void SkPDFUtils::MoveTo(SkScalar x, SkScalar y, SkWStream* content) {
+ SkPDFUtils::AppendScalar(x, content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(y, content);
+ content->writeText(" m\n");
+}
+
+void SkPDFUtils::AppendLine(SkScalar x, SkScalar y, SkWStream* content) {
+ SkPDFUtils::AppendScalar(x, content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(y, content);
+ content->writeText(" l\n");
+}
+
+static void append_cubic(SkScalar ctl1X, SkScalar ctl1Y,
+ SkScalar ctl2X, SkScalar ctl2Y,
+ SkScalar dstX, SkScalar dstY, SkWStream* content) {
+ SkString cmd("y\n");
+ SkPDFUtils::AppendScalar(ctl1X, content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(ctl1Y, content);
+ content->writeText(" ");
+ if (ctl2X != dstX || ctl2Y != dstY) {
+ cmd.set("c\n");
+ SkPDFUtils::AppendScalar(ctl2X, content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(ctl2Y, content);
+ content->writeText(" ");
+ }
+ SkPDFUtils::AppendScalar(dstX, content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(dstY, content);
+ content->writeText(" ");
+ content->writeText(cmd.c_str());
+}
+
+static void append_quad(const SkPoint quad[], SkWStream* content) {
+ SkPoint cubic[4];
+ SkConvertQuadToCubic(quad, cubic);
+ append_cubic(cubic[1].fX, cubic[1].fY, cubic[2].fX, cubic[2].fY,
+ cubic[3].fX, cubic[3].fY, content);
+}
+
+void SkPDFUtils::AppendRectangle(const SkRect& rect, SkWStream* content) {
+ // Skia has 0,0 at top left, pdf at bottom left. Do the right thing.
+ SkScalar bottom = std::min(rect.fBottom, rect.fTop);
+
+ SkPDFUtils::AppendScalar(rect.fLeft, content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(bottom, content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(rect.width(), content);
+ content->writeText(" ");
+ SkPDFUtils::AppendScalar(rect.height(), content);
+ content->writeText(" re\n");
+}
+
+void SkPDFUtils::EmitPath(const SkPath& path, SkPaint::Style paintStyle,
+ bool doConsumeDegerates, SkWStream* content,
+ SkScalar tolerance) {
+ if (path.isEmpty() && SkPaint::kFill_Style == paintStyle) {
+ SkPDFUtils::AppendRectangle({0, 0, 0, 0}, content);
+ return;
+ }
+ // Filling a path with no area results in a drawing in PDF renderers but
+ // Chrome expects to be able to draw some such entities with no visible
+ // result, so we detect those cases and discard the drawing for them.
+ // Specifically: moveTo(X), lineTo(Y) and moveTo(X), lineTo(X), lineTo(Y).
+
+ SkRect rect;
+ bool isClosed; // Both closure and direction need to be checked.
+ SkPathDirection direction;
+ if (path.isRect(&rect, &isClosed, &direction) &&
+ isClosed &&
+ (SkPathDirection::kCW == direction ||
+ SkPathFillType::kEvenOdd == path.getFillType()))
+ {
+ SkPDFUtils::AppendRectangle(rect, content);
+ return;
+ }
+
+ enum SkipFillState {
+ kEmpty_SkipFillState,
+ kSingleLine_SkipFillState,
+ kNonSingleLine_SkipFillState,
+ };
+ SkipFillState fillState = kEmpty_SkipFillState;
+ //if (paintStyle != SkPaint::kFill_Style) {
+ // fillState = kNonSingleLine_SkipFillState;
+ //}
+ SkPoint lastMovePt = SkPoint::Make(0,0);
+ SkDynamicMemoryWStream currentSegment;
+ SkPoint args[4];
+ SkPath::Iter iter(path, false);
+ for (SkPath::Verb verb = iter.next(args);
+ verb != SkPath::kDone_Verb;
+ verb = iter.next(args)) {
+ // args gets all the points, even the implicit first point.
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ MoveTo(args[0].fX, args[0].fY, &currentSegment);
+ lastMovePt = args[0];
+ fillState = kEmpty_SkipFillState;
+ break;
+ case SkPath::kLine_Verb:
+ if (!doConsumeDegerates || !SkPathPriv::AllPointsEq(args, 2)) {
+ AppendLine(args[1].fX, args[1].fY, &currentSegment);
+ if ((fillState == kEmpty_SkipFillState) && (args[0] != lastMovePt)) {
+ fillState = kSingleLine_SkipFillState;
+ break;
+ }
+ fillState = kNonSingleLine_SkipFillState;
+ }
+ break;
+ case SkPath::kQuad_Verb:
+ if (!doConsumeDegerates || !SkPathPriv::AllPointsEq(args, 3)) {
+ append_quad(args, &currentSegment);
+ fillState = kNonSingleLine_SkipFillState;
+ }
+ break;
+ case SkPath::kConic_Verb:
+ if (!doConsumeDegerates || !SkPathPriv::AllPointsEq(args, 3)) {
+ SkAutoConicToQuads converter;
+ const SkPoint* quads = converter.computeQuads(args, iter.conicWeight(), tolerance);
+ for (int i = 0; i < converter.countQuads(); ++i) {
+ append_quad(&quads[i * 2], &currentSegment);
+ }
+ fillState = kNonSingleLine_SkipFillState;
+ }
+ break;
+ case SkPath::kCubic_Verb:
+ if (!doConsumeDegerates || !SkPathPriv::AllPointsEq(args, 4)) {
+ append_cubic(args[1].fX, args[1].fY, args[2].fX, args[2].fY,
+ args[3].fX, args[3].fY, &currentSegment);
+ fillState = kNonSingleLine_SkipFillState;
+ }
+ break;
+ case SkPath::kClose_Verb:
+ ClosePath(&currentSegment);
+ currentSegment.writeToStream(content);
+ currentSegment.reset();
+ break;
+ default:
+ SkASSERT(false);
+ break;
+ }
+ }
+ if (currentSegment.bytesWritten() > 0) {
+ currentSegment.writeToStream(content);
+ }
+}
+
+void SkPDFUtils::ClosePath(SkWStream* content) {
+ content->writeText("h\n");
+}
+
+void SkPDFUtils::PaintPath(SkPaint::Style style, SkPathFillType fill, SkWStream* content) {
+ if (style == SkPaint::kFill_Style) {
+ content->writeText("f");
+ } else if (style == SkPaint::kStrokeAndFill_Style) {
+ content->writeText("B");
+ } else if (style == SkPaint::kStroke_Style) {
+ content->writeText("S");
+ }
+
+ if (style != SkPaint::kStroke_Style) {
+ NOT_IMPLEMENTED(fill == SkPathFillType::kInverseEvenOdd, false);
+ NOT_IMPLEMENTED(fill == SkPathFillType::kInverseWinding, false);
+ if (fill == SkPathFillType::kEvenOdd) {
+ content->writeText("*");
+ }
+ }
+ content->writeText("\n");
+}
+
+void SkPDFUtils::StrokePath(SkWStream* content) {
+ SkPDFUtils::PaintPath(SkPaint::kStroke_Style, SkPathFillType::kWinding, content);
+}
+
+void SkPDFUtils::ApplyGraphicState(int objectIndex, SkWStream* content) {
+ SkPDFWriteResourceName(content, SkPDFResourceType::kExtGState, objectIndex);
+ content->writeText(" gs\n");
+}
+
+void SkPDFUtils::ApplyPattern(int objectIndex, SkWStream* content) {
+ // Select Pattern color space (CS, cs) and set pattern object as current
+ // color (SCN, scn)
+ content->writeText("/Pattern CS/Pattern cs");
+ SkPDFWriteResourceName(content, SkPDFResourceType::kPattern, objectIndex);
+ content->writeText(" SCN");
+ SkPDFWriteResourceName(content, SkPDFResourceType::kPattern, objectIndex);
+ content->writeText(" scn\n");
+}
+
+// return "x/pow(10, places)", given 0<x<pow(10, places)
+// result points to places+2 chars.
+static size_t print_permil_as_decimal(int x, char* result, unsigned places) {
+ result[0] = '.';
+ for (int i = places; i > 0; --i) {
+ result[i] = '0' + x % 10;
+ x /= 10;
+ }
+ int j;
+ for (j = places; j > 1; --j) {
+ if (result[j] != '0') {
+ break;
+ }
+ }
+ result[j + 1] = '\0';
+ return j + 1;
+}
+
+
+static constexpr int int_pow(int base, unsigned exp, int acc = 1) {
+ return exp < 1 ? acc
+ : int_pow(base * base,
+ exp / 2,
+ (exp % 2) ? acc * base : acc);
+}
+
+
+size_t SkPDFUtils::ColorToDecimalF(float value, char result[kFloatColorDecimalCount + 2]) {
+ static constexpr int kFactor = int_pow(10, kFloatColorDecimalCount);
+ int x = sk_float_round2int(value * kFactor);
+ if (x >= kFactor || x <= 0) { // clamp to 0-1
+ result[0] = x > 0 ? '1' : '0';
+ result[1] = '\0';
+ return 1;
+ }
+ return print_permil_as_decimal(x, result, kFloatColorDecimalCount);
+}
+
+size_t SkPDFUtils::ColorToDecimal(uint8_t value, char result[5]) {
+ if (value == 255 || value == 0) {
+ result[0] = value ? '1' : '0';
+ result[1] = '\0';
+ return 1;
+ }
+ // int x = 0.5 + (1000.0 / 255.0) * value;
+ int x = SkFixedRoundToInt((SK_Fixed1 * 1000 / 255) * value);
+ return print_permil_as_decimal(x, result, 3);
+}
+
+bool SkPDFUtils::InverseTransformBBox(const SkMatrix& matrix, SkRect* bbox) {
+ SkMatrix inverse;
+ if (!matrix.invert(&inverse)) {
+ return false;
+ }
+ inverse.mapRect(bbox);
+ return true;
+}
+
+void SkPDFUtils::PopulateTilingPatternDict(SkPDFDict* pattern,
+ SkRect& bbox,
+ std::unique_ptr<SkPDFDict> resources,
+ const SkMatrix& matrix) {
+ const int kTiling_PatternType = 1;
+ const int kColoredTilingPattern_PaintType = 1;
+ const int kConstantSpacing_TilingType = 1;
+
+ pattern->insertName("Type", "Pattern");
+ pattern->insertInt("PatternType", kTiling_PatternType);
+ pattern->insertInt("PaintType", kColoredTilingPattern_PaintType);
+ pattern->insertInt("TilingType", kConstantSpacing_TilingType);
+ pattern->insertObject("BBox", SkPDFUtils::RectToArray(bbox));
+ pattern->insertScalar("XStep", bbox.width());
+ pattern->insertScalar("YStep", bbox.height());
+ pattern->insertObject("Resources", std::move(resources));
+ if (!matrix.isIdentity()) {
+ pattern->insertObject("Matrix", SkPDFUtils::MatrixToArray(matrix));
+ }
+}
+
+bool SkPDFUtils::ToBitmap(const SkImage* img, SkBitmap* dst) {
+ SkASSERT(img);
+ SkASSERT(dst);
+ SkBitmap bitmap;
+ // TODO: support GPU images
+ if(as_IB(img)->getROPixels(nullptr, &bitmap)) {
+ SkASSERT(bitmap.dimensions() == img->dimensions());
+ SkASSERT(!bitmap.drawsNothing());
+ *dst = std::move(bitmap);
+ return true;
+ }
+ return false;
+}
+
+#ifdef SK_PDF_BASE85_BINARY
+void SkPDFUtils::Base85Encode(std::unique_ptr<SkStreamAsset> stream, SkDynamicMemoryWStream* dst) {
+ SkASSERT(dst);
+ SkASSERT(stream);
+ dst->writeText("\n");
+ int column = 0;
+ while (true) {
+ uint8_t src[4] = {0, 0, 0, 0};
+ size_t count = stream->read(src, 4);
+ SkASSERT(count < 5);
+ if (0 == count) {
+ dst->writeText("~>\n");
+ return;
+ }
+ uint32_t v = ((uint32_t)src[0] << 24) | ((uint32_t)src[1] << 16) |
+ ((uint32_t)src[2] << 8) | src[3];
+ if (v == 0 && count == 4) {
+ dst->writeText("z");
+ column += 1;
+ } else {
+ char buffer[5];
+ for (int n = 4; n > 0; --n) {
+ buffer[n] = (v % 85) + '!';
+ v /= 85;
+ }
+ buffer[0] = v + '!';
+ dst->write(buffer, count + 1);
+ column += count + 1;
+ }
+ if (column > 74) {
+ dst->writeText("\n");
+ column = 0;
+ }
+ }
+}
+#endif // SK_PDF_BASE85_BINARY
+
+void SkPDFUtils::AppendTransform(const SkMatrix& matrix, SkWStream* content) {
+ SkScalar values[6];
+ if (!matrix.asAffine(values)) {
+ SkMatrix::SetAffineIdentity(values);
+ }
+ for (SkScalar v : values) {
+ SkPDFUtils::AppendScalar(v, content);
+ content->writeText(" ");
+ }
+ content->writeText("cm\n");
+}
diff --git a/gfx/skia/skia/src/pdf/SkPDFUtils.h b/gfx/skia/skia/src/pdf/SkPDFUtils.h
new file mode 100644
index 0000000000..2d6503b548
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkPDFUtils.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkPDFUtils_DEFINED
+#define SkPDFUtils_DEFINED
+
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkStream.h"
+#include "src/base/SkUTF.h"
+#include "src/base/SkUtils.h"
+#include "src/pdf/SkPDFTypes.h"
+#include "src/shaders/SkShaderBase.h"
+#include "src/utils/SkFloatToDecimal.h"
+
+class SkMatrix;
+class SkPDFArray;
+struct SkRect;
+
+template <typename T>
+bool SkPackedArrayEqual(T* u, T* v, size_t n) {
+ SkASSERT(u);
+ SkASSERT(v);
+ return 0 == memcmp(u, v, n * sizeof(T));
+}
+
+#if 0
+#define PRINT_NOT_IMPL(str) fprintf(stderr, str)
+#else
+#define PRINT_NOT_IMPL(str)
+#endif
+
+#define NOT_IMPLEMENTED(condition, assert) \
+ do { \
+ if ((bool)(condition)) { \
+ PRINT_NOT_IMPL("NOT_IMPLEMENTED: " #condition "\n"); \
+ SkDEBUGCODE(SkASSERT(!assert);) \
+ } \
+ } while (0)
+
+namespace SkPDFUtils {
+
+const char* BlendModeName(SkBlendMode);
+
+std::unique_ptr<SkPDFArray> RectToArray(const SkRect& rect);
+std::unique_ptr<SkPDFArray> MatrixToArray(const SkMatrix& matrix);
+
+void MoveTo(SkScalar x, SkScalar y, SkWStream* content);
+void AppendLine(SkScalar x, SkScalar y, SkWStream* content);
+void AppendRectangle(const SkRect& rect, SkWStream* content);
+void EmitPath(const SkPath& path, SkPaint::Style paintStyle,
+ bool doConsumeDegerates, SkWStream* content, SkScalar tolerance = 0.25f);
+inline void EmitPath(const SkPath& path, SkPaint::Style paintStyle,
+ SkWStream* content, SkScalar tolerance = 0.25f) {
+ SkPDFUtils::EmitPath(path, paintStyle, true, content, tolerance);
+}
+void ClosePath(SkWStream* content);
+void PaintPath(SkPaint::Style style, SkPathFillType fill, SkWStream* content);
+void StrokePath(SkWStream* content);
+void ApplyGraphicState(int objectIndex, SkWStream* content);
+void ApplyPattern(int objectIndex, SkWStream* content);
+
+// Converts (value / 255.0) with three significant digits of accuracy.
+// Writes value as string into result. Returns strlen() of result.
+size_t ColorToDecimal(uint8_t value, char result[5]);
+
+static constexpr unsigned kFloatColorDecimalCount = 4;
+size_t ColorToDecimalF(float value, char result[kFloatColorDecimalCount + 2]);
+inline void AppendColorComponent(uint8_t value, SkWStream* wStream) {
+ char buffer[5];
+ size_t len = SkPDFUtils::ColorToDecimal(value, buffer);
+ wStream->write(buffer, len);
+}
+inline void AppendColorComponentF(float value, SkWStream* wStream) {
+ char buffer[kFloatColorDecimalCount + 2];
+ size_t len = SkPDFUtils::ColorToDecimalF(value, buffer);
+ wStream->write(buffer, len);
+}
+
+inline void AppendScalar(SkScalar value, SkWStream* stream) {
+ char result[kMaximumSkFloatToDecimalLength];
+ size_t len = SkFloatToDecimal(SkScalarToFloat(value), result);
+ SkASSERT(len < kMaximumSkFloatToDecimalLength);
+ stream->write(result, len);
+}
+
+inline void WriteUInt16BE(SkWStream* wStream, uint16_t value) {
+ char result[4] = { SkHexadecimalDigits::gUpper[ value >> 12 ],
+ SkHexadecimalDigits::gUpper[0xF & (value >> 8 )],
+ SkHexadecimalDigits::gUpper[0xF & (value >> 4 )],
+ SkHexadecimalDigits::gUpper[0xF & (value )] };
+ wStream->write(result, 4);
+}
+
+inline void WriteUInt8(SkWStream* wStream, uint8_t value) {
+ char result[2] = { SkHexadecimalDigits::gUpper[value >> 4],
+ SkHexadecimalDigits::gUpper[value & 0xF] };
+ wStream->write(result, 2);
+}
+
+inline void WriteUTF16beHex(SkWStream* wStream, SkUnichar utf32) {
+ uint16_t utf16[2] = {0, 0};
+ size_t len = SkUTF::ToUTF16(utf32, utf16);
+ SkASSERT(len == 1 || len == 2);
+ SkPDFUtils::WriteUInt16BE(wStream, utf16[0]);
+ if (len == 2) {
+ SkPDFUtils::WriteUInt16BE(wStream, utf16[1]);
+ }
+}
+
+inline SkMatrix GetShaderLocalMatrix(const SkShader* shader) {
+ SkMatrix localMatrix;
+ if (sk_sp<SkShader> s = as_SB(shader)->makeAsALocalMatrixShader(&localMatrix)) {
+ return localMatrix;
+ }
+ return SkMatrix::I();
+}
+bool InverseTransformBBox(const SkMatrix& matrix, SkRect* bbox);
+void PopulateTilingPatternDict(SkPDFDict* pattern,
+ SkRect& bbox,
+ std::unique_ptr<SkPDFDict> resources,
+ const SkMatrix& matrix);
+
+bool ToBitmap(const SkImage* img, SkBitmap* dst);
+
+#ifdef SK_PDF_BASE85_BINARY
+void Base85Encode(std::unique_ptr<SkStreamAsset> src, SkDynamicMemoryWStream* dst);
+#endif // SK_PDF_BASE85_BINARY
+
+void AppendTransform(const SkMatrix&, SkWStream*);
+} // namespace SkPDFUtils
+
+#endif
diff --git a/gfx/skia/skia/src/pdf/SkUUID.h b/gfx/skia/skia/src/pdf/SkUUID.h
new file mode 100644
index 0000000000..3d81865dc0
--- /dev/null
+++ b/gfx/skia/skia/src/pdf/SkUUID.h
@@ -0,0 +1,18 @@
+// Copyright 2018 Google LLC.
+// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
+#ifndef SkUUID_DEFINED
+#define SkUUID_DEFINED
+
+#include <cstdint>
+#include <cstring>
+
+struct SkUUID {
+ uint8_t fData[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+};
+
+static inline bool operator==(const SkUUID& u, const SkUUID& v) {
+ return 0 == memcmp(u.fData, v.fData, sizeof(u.fData));
+}
+static inline bool operator!=(const SkUUID& u, const SkUUID& v) { return !(u == v); }
+
+#endif // SkUUID_DEFINED
diff --git a/gfx/skia/skia/src/ports/SkDebug_android.cpp b/gfx/skia/skia/src/ports/SkDebug_android.cpp
new file mode 100644
index 0000000000..49ba4ae7f7
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkDebug_android.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_ANDROID)
+
+#include <stdio.h>
+
+#ifdef LOG_TAG
+ #undef LOG_TAG
+#endif
+#define LOG_TAG "skia"
+#include <android/log.h>
+
+// Print debug output to stdout as well. This is useful for command line
+// applications (e.g. skia_launcher).
+bool gSkDebugToStdOut = false;
+
+void SkDebugf(const char format[], ...) {
+ va_list args1, args2;
+ va_start(args1, format);
+
+ if (gSkDebugToStdOut) {
+ va_copy(args2, args1);
+ vprintf(format, args2);
+ va_end(args2);
+ }
+
+ __android_log_vprint(ANDROID_LOG_DEBUG, LOG_TAG, format, args1);
+
+ va_end(args1);
+}
+
+#endif//defined(SK_BUILD_FOR_ANDROID)
diff --git a/gfx/skia/skia/src/ports/SkDebug_stdio.cpp b/gfx/skia/skia/src/ports/SkDebug_stdio.cpp
new file mode 100644
index 0000000000..78c7072bd0
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkDebug_stdio.cpp
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/base/SkFeatures.h"
+#include "include/private/base/SkLoadUserConfig.h"
+
+#if !defined(SK_BUILD_FOR_WIN) && !defined(SK_BUILD_FOR_ANDROID)
+
+#include <stdarg.h>
+#include <stdio.h>
+
+void SkDebugf(const char format[], ...) {
+ va_list args;
+ va_start(args, format);
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wformat-nonliteral"
+ vfprintf(stderr, format, args);
+#pragma GCC diagnostic pop
+ va_end(args);
+}
+#endif//!defined(SK_BUILD_FOR_WIN) && !defined(SK_BUILD_FOR_ANDROID)
diff --git a/gfx/skia/skia/src/ports/SkDebug_win.cpp b/gfx/skia/skia/src/ports/SkDebug_win.cpp
new file mode 100644
index 0000000000..1ad754e624
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkDebug_win.cpp
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "src/base/SkLeanWindows.h"
+
+#include <stdarg.h>
+#include <stdio.h>
+
+static const size_t kBufferSize = 2048;
+
+void SkDebugf(const char format[], ...) {
+ char buffer[kBufferSize + 1];
+ va_list args;
+
+ va_start(args, format);
+ vfprintf(stderr, format, args);
+ va_end(args);
+ fflush(stderr); // stderr seems to be buffered on Windows.
+
+ va_start(args, format);
+ vsnprintf(buffer, kBufferSize, format, args);
+ va_end(args);
+
+ OutputDebugStringA(buffer);
+}
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkDiscardableMemory_none.cpp b/gfx/skia/skia/src/ports/SkDiscardableMemory_none.cpp
new file mode 100644
index 0000000000..eeb64cade6
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkDiscardableMemory_none.cpp
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#include "include/private/chromium/SkDiscardableMemory.h"
+#include "src/lazy/SkDiscardableMemoryPool.h"
+
+SkDiscardableMemory* SkDiscardableMemory::Create(size_t bytes) {
+ return SkGetGlobalDiscardableMemoryPool()->create(bytes);
+}
diff --git a/gfx/skia/skia/src/ports/SkFontConfigInterface.cpp b/gfx/skia/skia/src/ports/SkFontConfigInterface.cpp
new file mode 100644
index 0000000000..7747f9126c
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontConfigInterface.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkRefCnt.h"
+#include "include/ports/SkFontConfigInterface.h"
+#include "include/private/base/SkMutex.h"
+
+static SkMutex& font_config_interface_mutex() {
+ static SkMutex& mutex = *(new SkMutex);
+ return mutex;
+}
+static SkFontConfigInterface* gFontConfigInterface;
+
+sk_sp<SkFontConfigInterface> SkFontConfigInterface::RefGlobal() {
+ SkAutoMutexExclusive ac(font_config_interface_mutex());
+
+ if (gFontConfigInterface) {
+ return sk_ref_sp(gFontConfigInterface);
+ }
+ return sk_ref_sp(SkFontConfigInterface::GetSingletonDirectInterface());
+}
+
+void SkFontConfigInterface::SetGlobal(sk_sp<SkFontConfigInterface> fc) {
+ SkAutoMutexExclusive ac(font_config_interface_mutex());
+
+ SkSafeUnref(gFontConfigInterface);
+ gFontConfigInterface = fc.release();
+}
diff --git a/gfx/skia/skia/src/ports/SkFontConfigInterface_direct.cpp b/gfx/skia/skia/src/ports/SkFontConfigInterface_direct.cpp
new file mode 100644
index 0000000000..f735f4302b
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontConfigInterface_direct.cpp
@@ -0,0 +1,709 @@
+/*
+ * Copyright 2009-2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/* migrated from chrome/src/skia/ext/SkFontHost_fontconfig_direct.cpp */
+
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/base/SkFixed.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTDArray.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/base/SkAutoMalloc.h"
+#include "src/base/SkBuffer.h"
+#include "src/ports/SkFontConfigInterface_direct.h"
+
+#include <fontconfig/fontconfig.h>
+#include <unistd.h>
+
+namespace {
+
+// FontConfig was thread antagonistic until 2.10.91 with known thread safety issues until 2.13.93.
+// Before that, lock with a global mutex.
+// See https://bug.skia.org/1497 and cl/339089311 for background.
+static SkMutex& f_c_mutex() {
+ static SkMutex& mutex = *(new SkMutex);
+ return mutex;
+}
+
+struct FCLocker {
+ inline static constexpr int FontConfigThreadSafeVersion = 21393;
+
+ // Assume FcGetVersion() has always been thread safe.
+ FCLocker() {
+ if (FcGetVersion() < FontConfigThreadSafeVersion) {
+ f_c_mutex().acquire();
+ }
+ }
+
+ ~FCLocker() {
+ AssertHeld();
+ if (FcGetVersion() < FontConfigThreadSafeVersion) {
+ f_c_mutex().release();
+ }
+ }
+
+ static void AssertHeld() { SkDEBUGCODE(
+ if (FcGetVersion() < FontConfigThreadSafeVersion) {
+ f_c_mutex().assertHeld();
+ }
+ ) }
+};
+
+using UniqueFCConfig = std::unique_ptr<FcConfig, SkFunctionObject<FcConfigDestroy>>;
+
+} // namespace
+
+size_t SkFontConfigInterface::FontIdentity::writeToMemory(void* addr) const {
+ size_t size = sizeof(fID) + sizeof(fTTCIndex);
+ size += sizeof(int32_t) + sizeof(int32_t) + sizeof(uint8_t); // weight, width, italic
+ size += sizeof(int32_t) + fString.size(); // store length+data
+ if (addr) {
+ SkWBuffer buffer(addr, size);
+
+ buffer.write32(fID);
+ buffer.write32(fTTCIndex);
+ buffer.write32(fString.size());
+ buffer.write32(fStyle.weight());
+ buffer.write32(fStyle.width());
+ buffer.write8(fStyle.slant());
+ buffer.write(fString.c_str(), fString.size());
+ buffer.padToAlign4();
+
+ SkASSERT(buffer.pos() == size);
+ }
+ return size;
+}
+
+size_t SkFontConfigInterface::FontIdentity::readFromMemory(const void* addr,
+ size_t size) {
+ SkRBuffer buffer(addr, size);
+
+ (void)buffer.readU32(&fID);
+ (void)buffer.readS32(&fTTCIndex);
+ uint32_t strLen, weight, width;
+ (void)buffer.readU32(&strLen);
+ (void)buffer.readU32(&weight);
+ (void)buffer.readU32(&width);
+ uint8_t u8;
+ (void)buffer.readU8(&u8);
+ SkFontStyle::Slant slant = (SkFontStyle::Slant)u8;
+ fStyle = SkFontStyle(weight, width, slant);
+ fString.resize(strLen);
+ (void)buffer.read(fString.data(), strLen);
+ buffer.skipToAlign4();
+
+ return buffer.pos(); // the actual number of bytes read
+}
+
+#ifdef SK_DEBUG
+static void make_iden(SkFontConfigInterface::FontIdentity* iden) {
+ iden->fID = 10;
+ iden->fTTCIndex = 2;
+ iden->fString.set("Hello world");
+ iden->fStyle = SkFontStyle(300, 6, SkFontStyle::kItalic_Slant);
+}
+
+static void test_writeToMemory(const SkFontConfigInterface::FontIdentity& iden0,
+ int initValue) {
+ SkFontConfigInterface::FontIdentity iden1;
+
+ size_t size0 = iden0.writeToMemory(nullptr);
+
+ SkAutoMalloc storage(size0);
+ memset(storage.get(), initValue, size0);
+
+ size_t size1 = iden0.writeToMemory(storage.get());
+ SkASSERT(size0 == size1);
+
+ SkASSERT(iden0 != iden1);
+ size_t size2 = iden1.readFromMemory(storage.get(), size1);
+ SkASSERT(size2 == size1);
+ SkASSERT(iden0 == iden1);
+}
+
+static void fontconfiginterface_unittest() {
+ SkFontConfigInterface::FontIdentity iden0, iden1;
+
+ SkASSERT(iden0 == iden1);
+
+ make_iden(&iden0);
+ SkASSERT(iden0 != iden1);
+
+ make_iden(&iden1);
+ SkASSERT(iden0 == iden1);
+
+ test_writeToMemory(iden0, 0);
+ test_writeToMemory(iden0, 0);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Returns the string from the pattern, or nullptr
+static const char* get_string(FcPattern* pattern, const char field[], int index = 0) {
+ const char* name;
+ if (FcPatternGetString(pattern, field, index, (FcChar8**)&name) != FcResultMatch) {
+ name = nullptr;
+ }
+ return name;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+// Equivalence classes, used to match the Liberation and other fonts
+// with their metric-compatible replacements. See the discussion in
+// GetFontEquivClass().
+enum FontEquivClass
+{
+ OTHER,
+ SANS,
+ SERIF,
+ MONO,
+ SYMBOL,
+ PGOTHIC,
+ GOTHIC,
+ PMINCHO,
+ MINCHO,
+ SIMSUN,
+ NSIMSUN,
+ SIMHEI,
+ PMINGLIU,
+ MINGLIU,
+ PMINGLIUHK,
+ MINGLIUHK,
+ CAMBRIA,
+ CALIBRI,
+};
+
+// Match the font name against a whilelist of fonts, returning the equivalence
+// class.
+FontEquivClass GetFontEquivClass(const char* fontname)
+{
+ // It would be nice for fontconfig to tell us whether a given suggested
+ // replacement is a "strong" match (that is, an equivalent font) or
+ // a "weak" match (that is, fontconfig's next-best attempt at finding a
+ // substitute). However, I played around with the fontconfig API for
+ // a good few hours and could not make it reveal this information.
+ //
+ // So instead, we hardcode. Initially this function emulated
+ // /etc/fonts/conf.d/30-metric-aliases.conf
+ // from my Ubuntu system, but we're better off being very conservative.
+
+ // Arimo, Tinos and Cousine are a set of fonts metric-compatible with
+ // Arial, Times New Roman and Courier New with a character repertoire
+ // much larger than Liberation. Note that Cousine is metrically
+ // compatible with Courier New, but the former is sans-serif while
+ // the latter is serif.
+
+
+ struct FontEquivMap {
+ FontEquivClass clazz;
+ const char name[40];
+ };
+
+ static const FontEquivMap kFontEquivMap[] = {
+ { SANS, "Arial" },
+ { SANS, "Arimo" },
+ { SANS, "Liberation Sans" },
+
+ { SERIF, "Times New Roman" },
+ { SERIF, "Tinos" },
+ { SERIF, "Liberation Serif" },
+
+ { MONO, "Courier New" },
+ { MONO, "Cousine" },
+ { MONO, "Liberation Mono" },
+
+ { SYMBOL, "Symbol" },
+ { SYMBOL, "Symbol Neu" },
+
+ // MS Pゴシック
+ { PGOTHIC, "MS PGothic" },
+ { PGOTHIC, "\xef\xbc\xad\xef\xbc\xb3 \xef\xbc\xb0"
+ "\xe3\x82\xb4\xe3\x82\xb7\xe3\x83\x83\xe3\x82\xaf" },
+ { PGOTHIC, "Noto Sans CJK JP" },
+ { PGOTHIC, "IPAPGothic" },
+ { PGOTHIC, "MotoyaG04Gothic" },
+
+ // MS ゴシック
+ { GOTHIC, "MS Gothic" },
+ { GOTHIC, "\xef\xbc\xad\xef\xbc\xb3 "
+ "\xe3\x82\xb4\xe3\x82\xb7\xe3\x83\x83\xe3\x82\xaf" },
+ { GOTHIC, "Noto Sans Mono CJK JP" },
+ { GOTHIC, "IPAGothic" },
+ { GOTHIC, "MotoyaG04GothicMono" },
+
+ // MS P明朝
+ { PMINCHO, "MS PMincho" },
+ { PMINCHO, "\xef\xbc\xad\xef\xbc\xb3 \xef\xbc\xb0"
+ "\xe6\x98\x8e\xe6\x9c\x9d"},
+ { PMINCHO, "Noto Serif CJK JP" },
+ { PMINCHO, "IPAPMincho" },
+ { PMINCHO, "MotoyaG04Mincho" },
+
+ // MS 明朝
+ { MINCHO, "MS Mincho" },
+ { MINCHO, "\xef\xbc\xad\xef\xbc\xb3 \xe6\x98\x8e\xe6\x9c\x9d" },
+ { MINCHO, "Noto Serif CJK JP" },
+ { MINCHO, "IPAMincho" },
+ { MINCHO, "MotoyaG04MinchoMono" },
+
+ // 宋体
+ { SIMSUN, "Simsun" },
+ { SIMSUN, "\xe5\xae\x8b\xe4\xbd\x93" },
+ { SIMSUN, "Noto Serif CJK SC" },
+ { SIMSUN, "MSung GB18030" },
+ { SIMSUN, "Song ASC" },
+
+ // 新宋体
+ { NSIMSUN, "NSimsun" },
+ { NSIMSUN, "\xe6\x96\xb0\xe5\xae\x8b\xe4\xbd\x93" },
+ { NSIMSUN, "Noto Serif CJK SC" },
+ { NSIMSUN, "MSung GB18030" },
+ { NSIMSUN, "N Song ASC" },
+
+ // 黑体
+ { SIMHEI, "Simhei" },
+ { SIMHEI, "\xe9\xbb\x91\xe4\xbd\x93" },
+ { SIMHEI, "Noto Sans CJK SC" },
+ { SIMHEI, "MYingHeiGB18030" },
+ { SIMHEI, "MYingHeiB5HK" },
+
+ // 新細明體
+ { PMINGLIU, "PMingLiU"},
+ { PMINGLIU, "\xe6\x96\xb0\xe7\xb4\xb0\xe6\x98\x8e\xe9\xab\x94" },
+ { PMINGLIU, "Noto Serif CJK TC"},
+ { PMINGLIU, "MSung B5HK"},
+
+ // 細明體
+ { MINGLIU, "MingLiU"},
+ { MINGLIU, "\xe7\xb4\xb0\xe6\x98\x8e\xe9\xab\x94" },
+ { MINGLIU, "Noto Serif CJK TC"},
+ { MINGLIU, "MSung B5HK"},
+
+ // 新細明體
+ { PMINGLIUHK, "PMingLiU_HKSCS"},
+ { PMINGLIUHK, "\xe6\x96\xb0\xe7\xb4\xb0\xe6\x98\x8e\xe9\xab\x94_HKSCS" },
+ { PMINGLIUHK, "Noto Serif CJK TC"},
+ { PMINGLIUHK, "MSung B5HK"},
+
+ // 細明體
+ { MINGLIUHK, "MingLiU_HKSCS"},
+ { MINGLIUHK, "\xe7\xb4\xb0\xe6\x98\x8e\xe9\xab\x94_HKSCS" },
+ { MINGLIUHK, "Noto Serif CJK TC"},
+ { MINGLIUHK, "MSung B5HK"},
+
+ // Cambria
+ { CAMBRIA, "Cambria" },
+ { CAMBRIA, "Caladea" },
+
+ // Calibri
+ { CALIBRI, "Calibri" },
+ { CALIBRI, "Carlito" },
+ };
+
+ static const size_t kFontCount =
+ sizeof(kFontEquivMap)/sizeof(kFontEquivMap[0]);
+
+ // TODO(jungshik): If this loop turns out to be hot, turn
+ // the array to a static (hash)map to speed it up.
+ for (size_t i = 0; i < kFontCount; ++i) {
+ if (strcasecmp(kFontEquivMap[i].name, fontname) == 0)
+ return kFontEquivMap[i].clazz;
+ }
+ return OTHER;
+}
+
+
+// Return true if |font_a| and |font_b| are visually and at the metrics
+// level interchangeable.
+bool IsMetricCompatibleReplacement(const char* font_a, const char* font_b)
+{
+ FontEquivClass class_a = GetFontEquivClass(font_a);
+ FontEquivClass class_b = GetFontEquivClass(font_b);
+
+ return class_a != OTHER && class_a == class_b;
+}
+
+// Normally we only return exactly the font asked for. In last-resort
+// cases, the request either doesn't specify a font or is one of the
+// basic font names like "Sans", "Serif" or "Monospace". This function
+// tells you whether a given request is for such a fallback.
+bool IsFallbackFontAllowed(const SkString& family) {
+ const char* family_cstr = family.c_str();
+ return family.isEmpty() ||
+ strcasecmp(family_cstr, "sans") == 0 ||
+ strcasecmp(family_cstr, "serif") == 0 ||
+ strcasecmp(family_cstr, "monospace") == 0;
+}
+
+// Retrieves |is_bold|, |is_italic| and |font_family| properties from |font|.
+static int get_int(FcPattern* pattern, const char object[], int missing) {
+ int value;
+ if (FcPatternGetInteger(pattern, object, 0, &value) != FcResultMatch) {
+ return missing;
+ }
+ return value;
+}
+
+static int map_range(SkScalar value,
+ SkScalar old_min, SkScalar old_max,
+ SkScalar new_min, SkScalar new_max)
+{
+ SkASSERT(old_min < old_max);
+ SkASSERT(new_min <= new_max);
+ return new_min + ((value - old_min) * (new_max - new_min) / (old_max - old_min));
+}
+
+struct MapRanges {
+ SkScalar old_val;
+ SkScalar new_val;
+};
+
+static SkScalar map_ranges(SkScalar val, MapRanges const ranges[], int rangesCount) {
+ // -Inf to [0]
+ if (val < ranges[0].old_val) {
+ return ranges[0].new_val;
+ }
+
+ // Linear from [i] to [i+1]
+ for (int i = 0; i < rangesCount - 1; ++i) {
+ if (val < ranges[i+1].old_val) {
+ return map_range(val, ranges[i].old_val, ranges[i+1].old_val,
+ ranges[i].new_val, ranges[i+1].new_val);
+ }
+ }
+
+ // From [n] to +Inf
+ // if (fcweight < Inf)
+ return ranges[rangesCount-1].new_val;
+}
+
+#ifndef FC_WEIGHT_DEMILIGHT
+#define FC_WEIGHT_DEMILIGHT 65
+#endif
+
+static SkFontStyle skfontstyle_from_fcpattern(FcPattern* pattern) {
+ typedef SkFontStyle SkFS;
+
+ static constexpr MapRanges weightRanges[] = {
+ { FC_WEIGHT_THIN, SkFS::kThin_Weight },
+ { FC_WEIGHT_EXTRALIGHT, SkFS::kExtraLight_Weight },
+ { FC_WEIGHT_LIGHT, SkFS::kLight_Weight },
+ { FC_WEIGHT_DEMILIGHT, 350 },
+ { FC_WEIGHT_BOOK, 380 },
+ { FC_WEIGHT_REGULAR, SkFS::kNormal_Weight },
+ { FC_WEIGHT_MEDIUM, SkFS::kMedium_Weight },
+ { FC_WEIGHT_DEMIBOLD, SkFS::kSemiBold_Weight },
+ { FC_WEIGHT_BOLD, SkFS::kBold_Weight },
+ { FC_WEIGHT_EXTRABOLD, SkFS::kExtraBold_Weight },
+ { FC_WEIGHT_BLACK, SkFS::kBlack_Weight },
+ { FC_WEIGHT_EXTRABLACK, SkFS::kExtraBlack_Weight },
+ };
+ SkScalar weight = map_ranges(get_int(pattern, FC_WEIGHT, FC_WEIGHT_REGULAR),
+ weightRanges, std::size(weightRanges));
+
+ static constexpr MapRanges widthRanges[] = {
+ { FC_WIDTH_ULTRACONDENSED, SkFS::kUltraCondensed_Width },
+ { FC_WIDTH_EXTRACONDENSED, SkFS::kExtraCondensed_Width },
+ { FC_WIDTH_CONDENSED, SkFS::kCondensed_Width },
+ { FC_WIDTH_SEMICONDENSED, SkFS::kSemiCondensed_Width },
+ { FC_WIDTH_NORMAL, SkFS::kNormal_Width },
+ { FC_WIDTH_SEMIEXPANDED, SkFS::kSemiExpanded_Width },
+ { FC_WIDTH_EXPANDED, SkFS::kExpanded_Width },
+ { FC_WIDTH_EXTRAEXPANDED, SkFS::kExtraExpanded_Width },
+ { FC_WIDTH_ULTRAEXPANDED, SkFS::kUltraExpanded_Width },
+ };
+ SkScalar width = map_ranges(get_int(pattern, FC_WIDTH, FC_WIDTH_NORMAL),
+ widthRanges, std::size(widthRanges));
+
+ SkFS::Slant slant = SkFS::kUpright_Slant;
+ switch (get_int(pattern, FC_SLANT, FC_SLANT_ROMAN)) {
+ case FC_SLANT_ROMAN: slant = SkFS::kUpright_Slant; break;
+ case FC_SLANT_ITALIC : slant = SkFS::kItalic_Slant ; break;
+ case FC_SLANT_OBLIQUE: slant = SkFS::kOblique_Slant; break;
+ default: SkASSERT(false); break;
+ }
+
+ return SkFontStyle(SkScalarRoundToInt(weight), SkScalarRoundToInt(width), slant);
+}
+
+static void fcpattern_from_skfontstyle(SkFontStyle style, FcPattern* pattern) {
+ typedef SkFontStyle SkFS;
+
+ static constexpr MapRanges weightRanges[] = {
+ { SkFS::kThin_Weight, FC_WEIGHT_THIN },
+ { SkFS::kExtraLight_Weight, FC_WEIGHT_EXTRALIGHT },
+ { SkFS::kLight_Weight, FC_WEIGHT_LIGHT },
+ { 350, FC_WEIGHT_DEMILIGHT },
+ { 380, FC_WEIGHT_BOOK },
+ { SkFS::kNormal_Weight, FC_WEIGHT_REGULAR },
+ { SkFS::kMedium_Weight, FC_WEIGHT_MEDIUM },
+ { SkFS::kSemiBold_Weight, FC_WEIGHT_DEMIBOLD },
+ { SkFS::kBold_Weight, FC_WEIGHT_BOLD },
+ { SkFS::kExtraBold_Weight, FC_WEIGHT_EXTRABOLD },
+ { SkFS::kBlack_Weight, FC_WEIGHT_BLACK },
+ { SkFS::kExtraBlack_Weight, FC_WEIGHT_EXTRABLACK },
+ };
+ int weight = map_ranges(style.weight(), weightRanges, std::size(weightRanges));
+
+ static constexpr MapRanges widthRanges[] = {
+ { SkFS::kUltraCondensed_Width, FC_WIDTH_ULTRACONDENSED },
+ { SkFS::kExtraCondensed_Width, FC_WIDTH_EXTRACONDENSED },
+ { SkFS::kCondensed_Width, FC_WIDTH_CONDENSED },
+ { SkFS::kSemiCondensed_Width, FC_WIDTH_SEMICONDENSED },
+ { SkFS::kNormal_Width, FC_WIDTH_NORMAL },
+ { SkFS::kSemiExpanded_Width, FC_WIDTH_SEMIEXPANDED },
+ { SkFS::kExpanded_Width, FC_WIDTH_EXPANDED },
+ { SkFS::kExtraExpanded_Width, FC_WIDTH_EXTRAEXPANDED },
+ { SkFS::kUltraExpanded_Width, FC_WIDTH_ULTRAEXPANDED },
+ };
+ int width = map_ranges(style.width(), widthRanges, std::size(widthRanges));
+
+ int slant = FC_SLANT_ROMAN;
+ switch (style.slant()) {
+ case SkFS::kUpright_Slant: slant = FC_SLANT_ROMAN ; break;
+ case SkFS::kItalic_Slant : slant = FC_SLANT_ITALIC ; break;
+ case SkFS::kOblique_Slant: slant = FC_SLANT_OBLIQUE; break;
+ default: SkASSERT(false); break;
+ }
+
+ FcPatternAddInteger(pattern, FC_WEIGHT, weight);
+ FcPatternAddInteger(pattern, FC_WIDTH , width);
+ FcPatternAddInteger(pattern, FC_SLANT , slant);
+}
+
+} // anonymous namespace
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define kMaxFontFamilyLength 2048
+#ifdef SK_FONT_CONFIG_INTERFACE_ONLY_ALLOW_SFNT_FONTS
+const char* kFontFormatTrueType = "TrueType";
+const char* kFontFormatCFF = "CFF";
+#endif
+
+SkFontConfigInterfaceDirect::SkFontConfigInterfaceDirect(FcConfig* fc) : fFC(fc)
+{
+ SkDEBUGCODE(fontconfiginterface_unittest();)
+}
+
+SkFontConfigInterfaceDirect::~SkFontConfigInterfaceDirect() {
+ if (fFC) {
+ FcConfigDestroy(fFC);
+ }
+}
+
+bool SkFontConfigInterfaceDirect::isAccessible(const char* filename) {
+ if (access(filename, R_OK) != 0) {
+ return false;
+ }
+ return true;
+}
+
+bool SkFontConfigInterfaceDirect::isValidPattern(FcPattern* pattern) {
+#ifdef SK_FONT_CONFIG_INTERFACE_ONLY_ALLOW_SFNT_FONTS
+ const char* font_format = get_string(pattern, FC_FONTFORMAT);
+ if (font_format
+ && 0 != strcmp(font_format, kFontFormatTrueType)
+ && 0 != strcmp(font_format, kFontFormatCFF))
+ {
+ return false;
+ }
+#endif
+
+ // fontconfig can also return fonts which are unreadable
+ const char* c_filename = get_string(pattern, FC_FILE);
+ if (!c_filename) {
+ return false;
+ }
+
+ FcConfig* fc = fFC;
+ UniqueFCConfig fcStorage;
+ if (!fc) {
+ fcStorage.reset(FcConfigReference(nullptr));
+ fc = fcStorage.get();
+ }
+
+ const char* sysroot = (const char*)FcConfigGetSysRoot(fc);
+ SkString resolvedFilename;
+ if (sysroot) {
+ resolvedFilename = sysroot;
+ resolvedFilename += c_filename;
+ c_filename = resolvedFilename.c_str();
+ }
+ return this->isAccessible(c_filename);
+}
+
+// Find matching font from |font_set| for the given font family.
+FcPattern* SkFontConfigInterfaceDirect::MatchFont(FcFontSet* font_set,
+ const char* post_config_family,
+ const SkString& family) {
+ // Older versions of fontconfig have a bug where they cannot select
+ // only scalable fonts so we have to manually filter the results.
+ FcPattern* match = nullptr;
+ for (int i = 0; i < font_set->nfont; ++i) {
+ FcPattern* current = font_set->fonts[i];
+ if (this->isValidPattern(current)) {
+ match = current;
+ break;
+ }
+ }
+
+ if (match && !IsFallbackFontAllowed(family)) {
+ bool acceptable_substitute = false;
+ for (int id = 0; id < 255; ++id) {
+ const char* post_match_family = get_string(match, FC_FAMILY, id);
+ if (!post_match_family)
+ break;
+ acceptable_substitute =
+ (strcasecmp(post_config_family, post_match_family) == 0 ||
+ // Workaround for Issue 12530:
+ // requested family: "Bitstream Vera Sans"
+ // post_config_family: "Arial"
+ // post_match_family: "Bitstream Vera Sans"
+ // -> We should treat this case as a good match.
+ strcasecmp(family.c_str(), post_match_family) == 0) ||
+ IsMetricCompatibleReplacement(family.c_str(), post_match_family);
+ if (acceptable_substitute)
+ break;
+ }
+ if (!acceptable_substitute)
+ return nullptr;
+ }
+
+ return match;
+}
+
+bool SkFontConfigInterfaceDirect::matchFamilyName(const char familyName[],
+ SkFontStyle style,
+ FontIdentity* outIdentity,
+ SkString* outFamilyName,
+ SkFontStyle* outStyle) {
+ SkString familyStr(familyName ? familyName : "");
+ if (familyStr.size() > kMaxFontFamilyLength) {
+ return false;
+ }
+
+ FcConfig* fc = fFC;
+ UniqueFCConfig fcStorage;
+ if (!fc) {
+ fcStorage.reset(FcConfigReference(nullptr));
+ fc = fcStorage.get();
+ }
+
+ FCLocker lock;
+ FcPattern* pattern = FcPatternCreate();
+
+ if (familyName) {
+ FcPatternAddString(pattern, FC_FAMILY, (FcChar8*)familyName);
+ }
+ fcpattern_from_skfontstyle(style, pattern);
+
+ FcPatternAddBool(pattern, FC_SCALABLE, FcTrue);
+
+ FcConfigSubstitute(fc, pattern, FcMatchPattern);
+ FcDefaultSubstitute(pattern);
+
+ // Font matching:
+ // CSS often specifies a fallback list of families:
+ // font-family: a, b, c, serif;
+ // However, fontconfig will always do its best to find *a* font when asked
+ // for something so we need a way to tell if the match which it has found is
+ // "good enough" for us. Otherwise, we can return nullptr which gets piped up
+ // and lets WebKit know to try the next CSS family name. However, fontconfig
+ // configs allow substitutions (mapping "Arial -> Helvetica" etc) and we
+ // wish to support that.
+ //
+ // Thus, if a specific family is requested we set @family_requested. Then we
+ // record two strings: the family name after config processing and the
+ // family name after resolving. If the two are equal, it's a good match.
+ //
+ // So consider the case where a user has mapped Arial to Helvetica in their
+ // config.
+ // requested family: "Arial"
+ // post_config_family: "Helvetica"
+ // post_match_family: "Helvetica"
+ // -> good match
+ //
+ // and for a missing font:
+ // requested family: "Monaco"
+ // post_config_family: "Monaco"
+ // post_match_family: "Times New Roman"
+ // -> BAD match
+ //
+ // However, we special-case fallback fonts; see IsFallbackFontAllowed().
+
+ const char* post_config_family = get_string(pattern, FC_FAMILY);
+ if (!post_config_family) {
+ // we can just continue with an empty name, e.g. default font
+ post_config_family = "";
+ }
+
+ FcResult result;
+ FcFontSet* font_set = FcFontSort(fc, pattern, 0, nullptr, &result);
+ if (!font_set) {
+ FcPatternDestroy(pattern);
+ return false;
+ }
+
+ FcPattern* match = this->MatchFont(font_set, post_config_family, familyStr);
+ if (!match) {
+ FcPatternDestroy(pattern);
+ FcFontSetDestroy(font_set);
+ return false;
+ }
+
+ FcPatternDestroy(pattern);
+
+ // From here out we just extract our results from 'match'
+
+ post_config_family = get_string(match, FC_FAMILY);
+ if (!post_config_family) {
+ FcFontSetDestroy(font_set);
+ return false;
+ }
+
+ const char* c_filename = get_string(match, FC_FILE);
+ if (!c_filename) {
+ FcFontSetDestroy(font_set);
+ return false;
+ }
+ const char* sysroot = (const char*)FcConfigGetSysRoot(fc);
+ SkString resolvedFilename;
+ if (sysroot) {
+ resolvedFilename = sysroot;
+ resolvedFilename += c_filename;
+ c_filename = resolvedFilename.c_str();
+ }
+
+ int face_index = get_int(match, FC_INDEX, 0);
+
+ FcFontSetDestroy(font_set);
+
+ if (outIdentity) {
+ outIdentity->fTTCIndex = face_index;
+ outIdentity->fString.set(c_filename);
+ }
+ if (outFamilyName) {
+ outFamilyName->set(post_config_family);
+ }
+ if (outStyle) {
+ *outStyle = skfontstyle_from_fcpattern(match);
+ }
+ return true;
+}
+
+SkStreamAsset* SkFontConfigInterfaceDirect::openStream(const FontIdentity& identity) {
+ return SkStream::MakeFromFile(identity.fString.c_str()).release();
+}
diff --git a/gfx/skia/skia/src/ports/SkFontConfigInterface_direct.h b/gfx/skia/skia/src/ports/SkFontConfigInterface_direct.h
new file mode 100644
index 0000000000..e0f3127ed4
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontConfigInterface_direct.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2009-2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/* migrated from chrome/src/skia/ext/SkFontHost_fontconfig_direct.cpp */
+#ifndef SKFONTCONFIGINTERFACE_DIRECT_H_
+#define SKFONTCONFIGINTERFACE_DIRECT_H_
+
+#include "include/ports/SkFontConfigInterface.h"
+
+#include <fontconfig/fontconfig.h>
+
+class SkFontConfigInterfaceDirect : public SkFontConfigInterface {
+public:
+ /** Create around a FontConfig instance.
+ * If 'fc' is nullptr, each method call will use the current config.
+ * Takes ownership of 'fc' and will call FcConfigDestroy on it.
+ */
+ SkFontConfigInterfaceDirect(FcConfig* fc);
+ ~SkFontConfigInterfaceDirect() override;
+
+ bool matchFamilyName(const char familyName[],
+ SkFontStyle requested,
+ FontIdentity* outFontIdentifier,
+ SkString* outFamilyName,
+ SkFontStyle* outStyle) override;
+
+ SkStreamAsset* openStream(const FontIdentity&) override;
+
+protected:
+ virtual bool isAccessible(const char* filename);
+
+private:
+ FcConfig * const fFC;
+ bool isValidPattern(FcPattern* pattern);
+ FcPattern* MatchFont(FcFontSet* font_set, const char* post_config_family,
+ const SkString& family);
+ using INHERITED = SkFontConfigInterface;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/ports/SkFontConfigInterface_direct_factory.cpp b/gfx/skia/skia/src/ports/SkFontConfigInterface_direct_factory.cpp
new file mode 100644
index 0000000000..19234788f5
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontConfigInterface_direct_factory.cpp
@@ -0,0 +1,16 @@
+/*
+ * Copyright 2009-2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/base/SkOnce.h"
+#include "src/ports/SkFontConfigInterface_direct.h"
+
+SkFontConfigInterface* SkFontConfigInterface::GetSingletonDirectInterface() {
+ static SkFontConfigInterface* singleton;
+ static SkOnce once;
+ once([]{ singleton = new SkFontConfigInterfaceDirect(nullptr); });
+ return singleton;
+}
diff --git a/gfx/skia/skia/src/ports/SkFontConfigTypeface.h b/gfx/skia/skia/src/ports/SkFontConfigTypeface.h
new file mode 100644
index 0000000000..79550499df
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontConfigTypeface.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontConfigTypeface_DEFINED
+#define SkFontConfigTypeface_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkStream.h"
+#include "include/ports/SkFontConfigInterface.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/ports/SkFontHost_FreeType_common.h"
+
+class SkFontDescriptor;
+
+class SkTypeface_FCI : public SkTypeface_FreeType {
+ sk_sp<SkFontConfigInterface> fFCI;
+ SkFontConfigInterface::FontIdentity fIdentity;
+ SkString fFamilyName;
+
+public:
+ static SkTypeface_FCI* Create(sk_sp<SkFontConfigInterface> fci,
+ const SkFontConfigInterface::FontIdentity& fi,
+ SkString familyName,
+ const SkFontStyle& style)
+ {
+ return new SkTypeface_FCI(std::move(fci), fi, std::move(familyName), style);
+ }
+
+ const SkFontConfigInterface::FontIdentity& getIdentity() const {
+ return fIdentity;
+ }
+
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments& args) const override {
+ std::unique_ptr<SkFontData> data = this->cloneFontData(args);
+ if (!data) {
+ return nullptr;
+ }
+ return sk_sp<SkTypeface>(
+ new SkTypeface_FreeTypeStream(std::move(data), fFamilyName,
+ this->fontStyle(), this->isFixedPitch()));
+ }
+
+protected:
+ SkTypeface_FCI(sk_sp<SkFontConfigInterface> fci,
+ const SkFontConfigInterface::FontIdentity& fi,
+ SkString familyName,
+ const SkFontStyle& style)
+ : INHERITED(style, false)
+ , fFCI(std::move(fci))
+ , fIdentity(fi)
+ , fFamilyName(std::move(familyName)) {}
+
+ void onGetFamilyName(SkString* familyName) const override { *familyName = fFamilyName; }
+ void onGetFontDescriptor(SkFontDescriptor*, bool*) const override;
+ std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const override;
+ std::unique_ptr<SkFontData> onMakeFontData() const override;
+
+private:
+ using INHERITED = SkTypeface_FreeType;
+};
+
+#endif // SkFontConfigTypeface_DEFINED
diff --git a/gfx/skia/skia/src/ports/SkFontHost_FreeType.cpp b/gfx/skia/skia/src/ports/SkFontHost_FreeType.cpp
new file mode 100644
index 0000000000..f46bf19e6c
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontHost_FreeType.cpp
@@ -0,0 +1,2365 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBBHFactory.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkData.h"
+#include "include/core/SkDrawable.h"
+#include "include/core/SkFontMetrics.h"
+#include "include/core/SkGraphics.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPictureRecorder.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkTPin.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkTSearch.h"
+#include "src/core/SkAdvancedTypefaceMetrics.h"
+#include "src/core/SkDescriptor.h"
+#include "src/core/SkFDot6.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkMaskGamma.h"
+#include "src/core/SkScalerContext.h"
+#include "src/ports/SkFontHost_FreeType_common.h"
+#include "src/sfnt/SkOTUtils.h"
+#include "src/sfnt/SkSFNTHeader.h"
+#include "src/sfnt/SkTTCFHeader.h"
+#include "src/utils/SkCallableTraits.h"
+#include "src/utils/SkMatrix22.h"
+
+#include <memory>
+#include <optional>
+#include <tuple>
+
+#include <ft2build.h>
+#include <freetype/ftadvanc.h>
+#include <freetype/ftimage.h>
+#include <freetype/ftbitmap.h>
+#ifdef FT_COLOR_H // 2.10.0
+# include <freetype/ftcolor.h>
+#endif
+#include <freetype/freetype.h>
+#include <freetype/ftlcdfil.h>
+#include <freetype/ftmodapi.h>
+#include <freetype/ftmm.h>
+#include <freetype/ftoutln.h>
+#include <freetype/ftsizes.h>
+#include <freetype/ftsystem.h>
+#include <freetype/tttables.h>
+#include <freetype/t1tables.h>
+#include <freetype/ftfntfmt.h>
+
+using namespace skia_private;
+
+namespace {
+[[maybe_unused]] static inline const constexpr bool kSkShowTextBlitCoverage = false;
+
+using SkUniqueFTFace = std::unique_ptr<FT_FaceRec, SkFunctionObject<FT_Done_Face>>;
+using SkUniqueFTSize = std::unique_ptr<FT_SizeRec, SkFunctionObject<FT_Done_Size>>;
+}
+
+// SK_FREETYPE_MINIMUM_RUNTIME_VERSION 0x<major><minor><patch><flags>
+// Flag SK_FREETYPE_DLOPEN: also try dlopen to get newer features.
+#define SK_FREETYPE_DLOPEN (0x1)
+#ifndef SK_FREETYPE_MINIMUM_RUNTIME_VERSION
+# if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) || defined (SK_BUILD_FOR_GOOGLE3)
+# define SK_FREETYPE_MINIMUM_RUNTIME_VERSION (((FREETYPE_MAJOR) << 24) | ((FREETYPE_MINOR) << 16) | ((FREETYPE_PATCH) << 8))
+# else
+# define SK_FREETYPE_MINIMUM_RUNTIME_VERSION ((2 << 24) | (8 << 16) | (1 << 8) | (SK_FREETYPE_DLOPEN))
+# endif
+#endif
+#if SK_FREETYPE_MINIMUM_RUNTIME_VERSION & SK_FREETYPE_DLOPEN
+# include <dlfcn.h>
+#endif
+
+#ifdef TT_SUPPORT_COLRV1
+// FT_ClipBox and FT_Get_Color_Glyph_ClipBox introduced VER-2-11-0-18-g47cf8ebf4
+// FT_COLR_COMPOSITE_PLUS and renumbering introduced VER-2-11-0-21-ge40ae7569
+// FT_SIZEOF_LONG_LONG introduced VER-2-11-0-31-gffdac8d67
+// FT_PaintRadialGradient changed size and layout at VER-2-11-0-147-gd3d3ff76d
+// FT_STATIC_CAST introduced VER-2-11-0-172-g9079c5d91
+// So undefine TT_SUPPORT_COLRV1 before 2.11.1 but not if FT_STATIC_CAST is defined.
+#if (((FREETYPE_MAJOR) < 2) || \
+ ((FREETYPE_MAJOR) == 2 && (FREETYPE_MINOR) < 11) || \
+ ((FREETYPE_MAJOR) == 2 && (FREETYPE_MINOR) == 11 && (FREETYPE_PATCH) < 1)) && \
+ !defined(FT_STATIC_CAST)
+# undef TT_SUPPORT_COLRV1
+#endif
+#endif
+
+//#define ENABLE_GLYPH_SPEW // for tracing calls
+//#define DUMP_STRIKE_CREATION
+//#define SK_FONTHOST_FREETYPE_RUNTIME_VERSION
+//#define SK_GAMMA_APPLY_TO_A8
+
+#if 1
+ #define LOG_INFO(...)
+#else
+ #define LOG_INFO SkDEBUGF
+#endif
+
+static bool isLCD(const SkScalerContextRec& rec) {
+ return SkMask::kLCD16_Format == rec.fMaskFormat;
+}
+
+static SkScalar SkFT_FixedToScalar(FT_Fixed x) {
+ return SkFixedToScalar(x);
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+using FT_Alloc_size_t = SkCallableTraits<FT_Alloc_Func>::argument<1>::type;
+static_assert(std::is_same<FT_Alloc_size_t, long >::value ||
+ std::is_same<FT_Alloc_size_t, size_t>::value,"");
+
+extern "C" {
+ static void* sk_ft_alloc(FT_Memory, FT_Alloc_size_t size) {
+ return sk_malloc_canfail(size);
+ }
+ static void sk_ft_free(FT_Memory, void* block) {
+ sk_free(block);
+ }
+ static void* sk_ft_realloc(FT_Memory, FT_Alloc_size_t cur_size,
+ FT_Alloc_size_t new_size, void* block) {
+ return sk_realloc_throw(block, new_size);
+ }
+}
+FT_MemoryRec_ gFTMemory = { nullptr, sk_ft_alloc, sk_ft_free, sk_ft_realloc };
+
+class FreeTypeLibrary : SkNoncopyable {
+public:
+ FreeTypeLibrary() : fLibrary(nullptr) {
+ if (FT_New_Library(&gFTMemory, &fLibrary)) {
+ return;
+ }
+ FT_Add_Default_Modules(fLibrary);
+ FT_Set_Default_Properties(fLibrary);
+
+ // Subpixel anti-aliasing may be unfiltered until the LCD filter is set.
+ // Newer versions may still need this, so this test with side effects must come first.
+ // The default has changed over time, so this doesn't mean the same thing to all users.
+ FT_Library_SetLcdFilter(fLibrary, FT_LCD_FILTER_DEFAULT);
+ }
+ ~FreeTypeLibrary() {
+ if (fLibrary) {
+ FT_Done_Library(fLibrary);
+ }
+ }
+
+ FT_Library library() { return fLibrary; }
+
+private:
+ FT_Library fLibrary;
+
+ // FT_Library_SetLcdFilterWeights 2.4.0
+ // FT_LOAD_COLOR 2.5.0
+ // FT_Pixel_Mode::FT_PIXEL_MODE_BGRA 2.5.0
+ // Thread safety in 2.6.0
+ // freetype/ftfntfmt.h (rename) 2.6.0
+ // Direct header inclusion 2.6.1
+ // FT_Get_Var_Design_Coordinates 2.7.1
+ // FT_LOAD_BITMAP_METRICS_ONLY 2.7.1
+ // FT_Set_Default_Properties 2.7.2
+ // The 'light' hinting is vertical only from 2.8.0
+ // FT_Get_Var_Axis_Flags 2.8.1
+ // FT_VAR_AXIS_FLAG_HIDDEN was introduced in FreeType 2.8.1
+ // --------------------
+ // FT_Done_MM_Var 2.9.0 (Currenty setting ft_free to a known allocator.)
+ // freetype/ftcolor.h 2.10.0 (Currently assuming if compiled with FT_COLOR_H runtime available.)
+
+ // Ubuntu 18.04 2.8.1
+ // Debian 10 2.9.1
+ // openSUSE Leap 15.2 2.10.1
+ // Fedora 32 2.10.4
+ // RHEL 8 2.9.1
+};
+
+static SkMutex& f_t_mutex() {
+ static SkMutex& mutex = *(new SkMutex);
+ return mutex;
+}
+
+static FreeTypeLibrary* gFTLibrary;
+
+///////////////////////////////////////////////////////////////////////////
+
+class SkTypeface_FreeType::FaceRec {
+public:
+ SkUniqueFTFace fFace;
+ FT_StreamRec fFTStream;
+ std::unique_ptr<SkStreamAsset> fSkStream;
+ FT_UShort fFTPaletteEntryCount = 0;
+ std::unique_ptr<SkColor[]> fSkPalette;
+
+ static std::unique_ptr<FaceRec> Make(const SkTypeface_FreeType* typeface);
+ ~FaceRec();
+
+private:
+ FaceRec(std::unique_ptr<SkStreamAsset> stream);
+ void setupAxes(const SkFontData& data);
+ void setupPalette(const SkFontData& data);
+
+ // Private to ref_ft_library and unref_ft_library
+ static int gFTCount;
+
+ // Caller must lock f_t_mutex() before calling this function.
+ static bool ref_ft_library() {
+ f_t_mutex().assertHeld();
+ SkASSERT(gFTCount >= 0);
+
+ if (0 == gFTCount) {
+ SkASSERT(nullptr == gFTLibrary);
+ gFTLibrary = new FreeTypeLibrary;
+ }
+ ++gFTCount;
+ return gFTLibrary->library();
+ }
+
+ // Caller must lock f_t_mutex() before calling this function.
+ static void unref_ft_library() {
+ f_t_mutex().assertHeld();
+ SkASSERT(gFTCount > 0);
+
+ --gFTCount;
+ if (0 == gFTCount) {
+ SkASSERT(nullptr != gFTLibrary);
+ delete gFTLibrary;
+ SkDEBUGCODE(gFTLibrary = nullptr;)
+ }
+ }
+};
+int SkTypeface_FreeType::FaceRec::gFTCount;
+
+extern "C" {
+ static unsigned long sk_ft_stream_io(FT_Stream ftStream,
+ unsigned long offset,
+ unsigned char* buffer,
+ unsigned long count)
+ {
+ SkStreamAsset* stream = static_cast<SkStreamAsset*>(ftStream->descriptor.pointer);
+
+ if (count) {
+ if (!stream->seek(offset)) {
+ return 0;
+ }
+ count = stream->read(buffer, count);
+ }
+ return count;
+ }
+
+ static void sk_ft_stream_close(FT_Stream) {}
+}
+
+SkTypeface_FreeType::FaceRec::FaceRec(std::unique_ptr<SkStreamAsset> stream)
+ : fSkStream(std::move(stream))
+{
+ sk_bzero(&fFTStream, sizeof(fFTStream));
+ fFTStream.size = fSkStream->getLength();
+ fFTStream.descriptor.pointer = fSkStream.get();
+ fFTStream.read = sk_ft_stream_io;
+ fFTStream.close = sk_ft_stream_close;
+
+ f_t_mutex().assertHeld();
+ ref_ft_library();
+}
+
+SkTypeface_FreeType::FaceRec::~FaceRec() {
+ f_t_mutex().assertHeld();
+ fFace.reset(); // Must release face before the library, the library frees existing faces.
+ unref_ft_library();
+}
+
+void SkTypeface_FreeType::FaceRec::setupAxes(const SkFontData& data) {
+ if (!(fFace->face_flags & FT_FACE_FLAG_MULTIPLE_MASTERS)) {
+ return;
+ }
+
+ // If a named variation is requested, don't overwrite the named variation's position.
+ if (data.getIndex() > 0xFFFF) {
+ return;
+ }
+
+ SkDEBUGCODE(
+ FT_MM_Var* variations = nullptr;
+ if (FT_Get_MM_Var(fFace.get(), &variations)) {
+ LOG_INFO("INFO: font %s claims variations, but none found.\n",
+ rec->fFace->family_name);
+ return;
+ }
+ UniqueVoidPtr autoFreeVariations(variations);
+
+ if (static_cast<FT_UInt>(data.getAxisCount()) != variations->num_axis) {
+ LOG_INFO("INFO: font %s has %d variations, but %d were specified.\n",
+ rec->fFace->family_name, variations->num_axis, data.getAxisCount());
+ return;
+ }
+ )
+
+ AutoSTMalloc<4, FT_Fixed> coords(data.getAxisCount());
+ for (int i = 0; i < data.getAxisCount(); ++i) {
+ coords[i] = data.getAxis()[i];
+ }
+ if (FT_Set_Var_Design_Coordinates(fFace.get(), data.getAxisCount(), coords.get())) {
+ LOG_INFO("INFO: font %s has variations, but specified variations could not be set.\n",
+ rec->fFace->family_name);
+ return;
+ }
+}
+
+void SkTypeface_FreeType::FaceRec::setupPalette(const SkFontData& data) {
+#ifdef FT_COLOR_H
+ FT_Palette_Data paletteData;
+ if (FT_Palette_Data_Get(fFace.get(), &paletteData)) {
+ return;
+ }
+
+ // Treat out of range values as 0. Still apply overrides.
+ // https://www.w3.org/TR/css-fonts-4/#base-palette-desc
+ FT_UShort basePaletteIndex = 0;
+ if (SkTFitsIn<FT_UShort>(data.getPaletteIndex()) &&
+ SkTo<FT_UShort>(data.getPaletteIndex()) < paletteData.num_palettes)
+ {
+ basePaletteIndex = data.getPaletteIndex();
+ }
+
+ FT_Color* ftPalette = nullptr;
+ if (FT_Palette_Select(fFace.get(), basePaletteIndex, &ftPalette)) {
+ return;
+ }
+ fFTPaletteEntryCount = paletteData.num_palette_entries;
+
+ for (int i = 0; i < data.getPaletteOverrideCount(); ++i) {
+ const SkFontArguments::Palette::Override& paletteOverride = data.getPaletteOverrides()[i];
+ if (0 <= paletteOverride.index && paletteOverride.index < fFTPaletteEntryCount) {
+ const SkColor& skColor = paletteOverride.color;
+ FT_Color& ftColor = ftPalette[paletteOverride.index];
+ ftColor.blue = SkColorGetB(skColor);
+ ftColor.green = SkColorGetG(skColor);
+ ftColor.red = SkColorGetR(skColor);
+ ftColor.alpha = SkColorGetA(skColor);
+ }
+ }
+
+ fSkPalette.reset(new SkColor[fFTPaletteEntryCount]);
+ for (int i = 0; i < fFTPaletteEntryCount; ++i) {
+ fSkPalette[i] = SkColorSetARGB(ftPalette[i].alpha,
+ ftPalette[i].red,
+ ftPalette[i].green,
+ ftPalette[i].blue);
+ }
+#endif
+}
+
+// Will return nullptr on failure
+// Caller must lock f_t_mutex() before calling this function.
+std::unique_ptr<SkTypeface_FreeType::FaceRec>
+SkTypeface_FreeType::FaceRec::Make(const SkTypeface_FreeType* typeface) {
+ f_t_mutex().assertHeld();
+
+ std::unique_ptr<SkFontData> data = typeface->makeFontData();
+ if (nullptr == data || !data->hasStream()) {
+ return nullptr;
+ }
+
+ std::unique_ptr<FaceRec> rec(new FaceRec(data->detachStream()));
+
+ FT_Open_Args args;
+ memset(&args, 0, sizeof(args));
+ const void* memoryBase = rec->fSkStream->getMemoryBase();
+ if (memoryBase) {
+ args.flags = FT_OPEN_MEMORY;
+ args.memory_base = (const FT_Byte*)memoryBase;
+ args.memory_size = rec->fSkStream->getLength();
+ } else {
+ args.flags = FT_OPEN_STREAM;
+ args.stream = &rec->fFTStream;
+ }
+
+ {
+ FT_Face rawFace;
+ FT_Error err = FT_Open_Face(gFTLibrary->library(), &args, data->getIndex(), &rawFace);
+ if (err) {
+ SK_TRACEFTR(err, "unable to open font '%x'", typeface->uniqueID());
+ return nullptr;
+ }
+ rec->fFace.reset(rawFace);
+ }
+ SkASSERT(rec->fFace);
+
+ rec->setupAxes(*data);
+ rec->setupPalette(*data);
+
+ // FreeType will set the charmap to the "most unicode" cmap if it exists.
+ // If there are no unicode cmaps, the charmap is set to nullptr.
+ // However, "symbol" cmaps should also be considered "fallback unicode" cmaps
+ // because they are effectively private use area only (even if they aren't).
+ // This is the last on the fallback list at
+ // https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6cmap.html
+ if (!rec->fFace->charmap) {
+ FT_Select_Charmap(rec->fFace.get(), FT_ENCODING_MS_SYMBOL);
+ }
+
+ return rec;
+}
+
+class AutoFTAccess {
+public:
+ AutoFTAccess(const SkTypeface_FreeType* tf) : fFaceRec(nullptr) {
+ f_t_mutex().acquire();
+ fFaceRec = tf->getFaceRec();
+ }
+
+ ~AutoFTAccess() {
+ f_t_mutex().release();
+ }
+
+ FT_Face face() { return fFaceRec ? fFaceRec->fFace.get() : nullptr; }
+
+private:
+ SkTypeface_FreeType::FaceRec* fFaceRec;
+};
+
+///////////////////////////////////////////////////////////////////////////
+
+class SkScalerContext_FreeType : public SkScalerContext_FreeType_Base {
+public:
+ SkScalerContext_FreeType(sk_sp<SkTypeface_FreeType>,
+ const SkScalerContextEffects&,
+ const SkDescriptor* desc);
+ ~SkScalerContext_FreeType() override;
+
+ bool success() const {
+ return fFTSize != nullptr && fFace != nullptr;
+ }
+
+protected:
+ bool generateAdvance(SkGlyph* glyph) override;
+ void generateMetrics(SkGlyph* glyph, SkArenaAlloc*) override;
+ void generateImage(const SkGlyph& glyph) override;
+ bool generatePath(const SkGlyph& glyph, SkPath* path) override;
+ sk_sp<SkDrawable> generateDrawable(const SkGlyph&) override;
+ void generateFontMetrics(SkFontMetrics*) override;
+
+private:
+ SkTypeface_FreeType::FaceRec* fFaceRec; // Borrowed face from the typeface's FaceRec.
+ FT_Face fFace; // Borrowed face from fFaceRec.
+ FT_Size fFTSize; // The size to apply to the fFace.
+ FT_Int fStrikeIndex; // The bitmap strike for the fFace (or -1 if none).
+
+ /** The rest of the matrix after FreeType handles the size.
+ * With outline font rasterization this is handled by FreeType with FT_Set_Transform.
+ * With bitmap only fonts this matrix must be applied to scale the bitmap.
+ */
+ SkMatrix fMatrix22Scalar;
+ /** Same as fMatrix22Scalar, but in FreeType units and space. */
+ FT_Matrix fMatrix22;
+ /** The actual size requested. */
+ SkVector fScale;
+
+ uint32_t fLoadGlyphFlags;
+ bool fDoLinearMetrics;
+ bool fLCDIsVert;
+
+ FT_Error setupSize();
+ static bool getBoundsOfCurrentOutlineGlyph(FT_GlyphSlot glyph, SkRect* bounds);
+ static void setGlyphBounds(SkGlyph* glyph, SkRect* bounds, bool subpixel);
+ bool getCBoxForLetter(char letter, FT_BBox* bbox);
+ // Caller must lock f_t_mutex() before calling this function.
+ void updateGlyphBoundsIfLCD(SkGlyph* glyph);
+ // Caller must lock f_t_mutex() before calling this function.
+ // update FreeType2 glyph slot with glyph emboldened
+ void emboldenIfNeeded(FT_Face face, FT_GlyphSlot glyph, SkGlyphID gid);
+ bool shouldSubpixelBitmap(const SkGlyph&, const SkMatrix&);
+};
+
+///////////////////////////////////////////////////////////////////////////
+
+static bool canEmbed(FT_Face face) {
+ FT_UShort fsType = FT_Get_FSType_Flags(face);
+ return (fsType & (FT_FSTYPE_RESTRICTED_LICENSE_EMBEDDING |
+ FT_FSTYPE_BITMAP_EMBEDDING_ONLY)) == 0;
+}
+
+static bool canSubset(FT_Face face) {
+ FT_UShort fsType = FT_Get_FSType_Flags(face);
+ return (fsType & FT_FSTYPE_NO_SUBSETTING) == 0;
+}
+
+static SkAdvancedTypefaceMetrics::FontType get_font_type(FT_Face face) {
+ const char* fontType = FT_Get_X11_Font_Format(face);
+ static struct { const char* s; SkAdvancedTypefaceMetrics::FontType t; } values[] = {
+ { "Type 1", SkAdvancedTypefaceMetrics::kType1_Font },
+ { "CID Type 1", SkAdvancedTypefaceMetrics::kType1CID_Font },
+ { "CFF", SkAdvancedTypefaceMetrics::kCFF_Font },
+ { "TrueType", SkAdvancedTypefaceMetrics::kTrueType_Font },
+ };
+ for(const auto& v : values) { if (strcmp(fontType, v.s) == 0) { return v.t; } }
+ return SkAdvancedTypefaceMetrics::kOther_Font;
+}
+
+static bool is_opentype_font_data_standard_format(const SkTypeface& typeface) {
+ // FreeType reports TrueType for any data that can be decoded to TrueType or OpenType.
+ // However, there are alternate data formats for OpenType, like wOFF and wOF2.
+ std::unique_ptr<SkStreamAsset> stream = typeface.openStream(nullptr);
+ if (!stream) {
+ return false;
+ }
+ char buffer[4];
+ if (stream->read(buffer, 4) < 4) {
+ return false;
+ }
+
+ SkFourByteTag tag = SkSetFourByteTag(buffer[0], buffer[1], buffer[2], buffer[3]);
+ SK_OT_ULONG otTag = SkEndian_SwapBE32(tag);
+ return otTag == SkSFNTHeader::fontType_WindowsTrueType::TAG ||
+ otTag == SkSFNTHeader::fontType_MacTrueType::TAG ||
+ otTag == SkSFNTHeader::fontType_PostScript::TAG ||
+ otTag == SkSFNTHeader::fontType_OpenTypeCFF::TAG ||
+ otTag == SkTTCFHeader::TAG;
+}
+
+std::unique_ptr<SkAdvancedTypefaceMetrics> SkTypeface_FreeType::onGetAdvancedMetrics() const {
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ if (!face) {
+ return nullptr;
+ }
+
+ std::unique_ptr<SkAdvancedTypefaceMetrics> info(new SkAdvancedTypefaceMetrics);
+ info->fPostScriptName.set(FT_Get_Postscript_Name(face));
+ info->fFontName = info->fPostScriptName;
+
+ if (FT_HAS_MULTIPLE_MASTERS(face)) {
+ info->fFlags |= SkAdvancedTypefaceMetrics::kVariable_FontFlag;
+ }
+ if (!canEmbed(face)) {
+ info->fFlags |= SkAdvancedTypefaceMetrics::kNotEmbeddable_FontFlag;
+ }
+ if (!canSubset(face)) {
+ info->fFlags |= SkAdvancedTypefaceMetrics::kNotSubsettable_FontFlag;
+ }
+
+ info->fType = get_font_type(face);
+ if (info->fType == SkAdvancedTypefaceMetrics::kTrueType_Font &&
+ !is_opentype_font_data_standard_format(*this))
+ {
+ info->fFlags |= SkAdvancedTypefaceMetrics::kAltDataFormat_FontFlag;
+ }
+
+ info->fStyle = (SkAdvancedTypefaceMetrics::StyleFlags)0;
+ if (FT_IS_FIXED_WIDTH(face)) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kFixedPitch_Style;
+ }
+ if (face->style_flags & FT_STYLE_FLAG_ITALIC) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kItalic_Style;
+ }
+
+ PS_FontInfoRec psFontInfo;
+ TT_Postscript* postTable;
+ if (FT_Get_PS_Font_Info(face, &psFontInfo) == 0) {
+ info->fItalicAngle = psFontInfo.italic_angle;
+ } else if ((postTable = (TT_Postscript*)FT_Get_Sfnt_Table(face, ft_sfnt_post)) != nullptr) {
+ info->fItalicAngle = SkFixedFloorToInt(postTable->italicAngle);
+ } else {
+ info->fItalicAngle = 0;
+ }
+
+ info->fAscent = face->ascender;
+ info->fDescent = face->descender;
+
+ TT_PCLT* pcltTable;
+ TT_OS2* os2Table;
+ if ((pcltTable = (TT_PCLT*)FT_Get_Sfnt_Table(face, ft_sfnt_pclt)) != nullptr) {
+ info->fCapHeight = pcltTable->CapHeight;
+ uint8_t serif_style = pcltTable->SerifStyle & 0x3F;
+ if (2 <= serif_style && serif_style <= 6) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kSerif_Style;
+ } else if (9 <= serif_style && serif_style <= 12) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kScript_Style;
+ }
+ } else if (((os2Table = (TT_OS2*)FT_Get_Sfnt_Table(face, ft_sfnt_os2)) != nullptr) &&
+ // sCapHeight is available only when version 2 or later.
+ os2Table->version != 0xFFFF &&
+ os2Table->version >= 2)
+ {
+ info->fCapHeight = os2Table->sCapHeight;
+ }
+ info->fBBox = SkIRect::MakeLTRB(face->bbox.xMin, face->bbox.yMax,
+ face->bbox.xMax, face->bbox.yMin);
+ return info;
+}
+
+void SkTypeface_FreeType::getGlyphToUnicodeMap(SkUnichar* dstArray) const {
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ if (!face) {
+ return;
+ }
+
+ FT_Long numGlyphs = face->num_glyphs;
+ if (!dstArray) { SkASSERT(numGlyphs == 0); }
+ sk_bzero(dstArray, sizeof(SkUnichar) * numGlyphs);
+
+ FT_UInt glyphIndex;
+ SkUnichar charCode = FT_Get_First_Char(face, &glyphIndex);
+ while (glyphIndex) {
+ SkASSERT(glyphIndex < SkToUInt(numGlyphs));
+ // Use the first character that maps to this glyphID. https://crbug.com/359065
+ if (0 == dstArray[glyphIndex]) {
+ dstArray[glyphIndex] = charCode;
+ }
+ charCode = FT_Get_Next_Char(face, charCode, &glyphIndex);
+ }
+}
+
+void SkTypeface_FreeType::getPostScriptGlyphNames(SkString* dstArray) const {
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ if (!face) {
+ return;
+ }
+
+ FT_Long numGlyphs = face->num_glyphs;
+ if (!dstArray) { SkASSERT(numGlyphs == 0); }
+
+ if (FT_HAS_GLYPH_NAMES(face)) {
+ for (int gID = 0; gID < numGlyphs; ++gID) {
+ char glyphName[128]; // PS limit for names is 127 bytes.
+ FT_Get_Glyph_Name(face, gID, glyphName, 128);
+ dstArray[gID] = glyphName;
+ }
+ }
+}
+
+bool SkTypeface_FreeType::onGetPostScriptName(SkString* skPostScriptName) const {
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ if (!face) {
+ return false;
+ }
+
+ const char* ftPostScriptName = FT_Get_Postscript_Name(face);
+ if (!ftPostScriptName) {
+ return false;
+ }
+ if (skPostScriptName) {
+ *skPostScriptName = ftPostScriptName;
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////
+
+static bool bothZero(SkScalar a, SkScalar b) {
+ return 0 == a && 0 == b;
+}
+
+// returns false if there is any non-90-rotation or skew
+static bool isAxisAligned(const SkScalerContextRec& rec) {
+ return 0 == rec.fPreSkewX &&
+ (bothZero(rec.fPost2x2[0][1], rec.fPost2x2[1][0]) ||
+ bothZero(rec.fPost2x2[0][0], rec.fPost2x2[1][1]));
+}
+
+std::unique_ptr<SkScalerContext> SkTypeface_FreeType::onCreateScalerContext(
+ const SkScalerContextEffects& effects, const SkDescriptor* desc) const
+{
+ auto c = std::make_unique<SkScalerContext_FreeType>(
+ sk_ref_sp(const_cast<SkTypeface_FreeType*>(this)), effects, desc);
+ if (c->success()) {
+ return std::move(c);
+ }
+ return SkScalerContext::MakeEmpty(
+ sk_ref_sp(const_cast<SkTypeface_FreeType*>(this)), effects, desc);
+}
+
+/** Copy the design variation coordinates into 'coordinates'.
+ *
+ * @param coordinates the buffer into which to write the design variation coordinates.
+ * @param coordinateCount the number of entries available through 'coordinates'.
+ *
+ * @return The number of axes, or -1 if there is an error.
+ * If 'coordinates != nullptr' and 'coordinateCount >= numAxes' then 'coordinates' will be
+ * filled with the variation coordinates describing the position of this typeface in design
+ * variation space. It is possible the number of axes can be retrieved but actual position
+ * cannot.
+ */
+static int GetVariationDesignPosition(AutoFTAccess& fta,
+ SkFontArguments::VariationPosition::Coordinate coordinates[], int coordinateCount)
+{
+ FT_Face face = fta.face();
+ if (!face) {
+ return -1;
+ }
+
+ if (!(face->face_flags & FT_FACE_FLAG_MULTIPLE_MASTERS)) {
+ return 0;
+ }
+
+ FT_MM_Var* variations = nullptr;
+ if (FT_Get_MM_Var(face, &variations)) {
+ return -1;
+ }
+ UniqueVoidPtr autoFreeVariations(variations);
+
+ if (!coordinates || coordinateCount < SkToInt(variations->num_axis)) {
+ return variations->num_axis;
+ }
+
+ AutoSTMalloc<4, FT_Fixed> coords(variations->num_axis);
+ if (FT_Get_Var_Design_Coordinates(face, variations->num_axis, coords.get())) {
+ return -1;
+ }
+ for (FT_UInt i = 0; i < variations->num_axis; ++i) {
+ coordinates[i].axis = variations->axis[i].tag;
+ coordinates[i].value = SkFixedToScalar(coords[i]);
+ }
+
+ return variations->num_axis;
+}
+
+std::unique_ptr<SkFontData> SkTypeface_FreeType::cloneFontData(const SkFontArguments& args) const {
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ if (!face) {
+ return nullptr;
+ }
+
+ Scanner::AxisDefinitions axisDefinitions;
+ if (!Scanner::GetAxes(face, &axisDefinitions)) {
+ return nullptr;
+ }
+ int axisCount = axisDefinitions.size();
+
+ AutoSTMalloc<4, SkFontArguments::VariationPosition::Coordinate> currentPosition(axisCount);
+ int currentAxisCount = GetVariationDesignPosition(fta, currentPosition, axisCount);
+
+ SkString name;
+ AutoSTMalloc<4, SkFixed> axisValues(axisCount);
+ Scanner::computeAxisValues(axisDefinitions, args.getVariationDesignPosition(), axisValues, name,
+ currentAxisCount == axisCount ? currentPosition.get() : nullptr);
+
+ int ttcIndex;
+ std::unique_ptr<SkStreamAsset> stream = this->openStream(&ttcIndex);
+
+ return std::make_unique<SkFontData>(std::move(stream),
+ ttcIndex,
+ args.getPalette().index,
+ axisValues.get(),
+ axisCount,
+ args.getPalette().overrides,
+ args.getPalette().overrideCount);
+}
+
+void SkTypeface_FreeType::onFilterRec(SkScalerContextRec* rec) const {
+ //BOGUS: http://code.google.com/p/chromium/issues/detail?id=121119
+ //Cap the requested size as larger sizes give bogus values.
+ //Remove when http://code.google.com/p/skia/issues/detail?id=554 is fixed.
+ //Note that this also currently only protects against large text size requests,
+ //the total matrix is not taken into account here.
+ if (rec->fTextSize > SkIntToScalar(1 << 14)) {
+ rec->fTextSize = SkIntToScalar(1 << 14);
+ }
+
+ SkFontHinting h = rec->getHinting();
+ if (SkFontHinting::kFull == h && !isLCD(*rec)) {
+ // collapse full->normal hinting if we're not doing LCD
+ h = SkFontHinting::kNormal;
+ }
+
+ // rotated text looks bad with hinting, so we disable it as needed
+ if (!isAxisAligned(*rec)) {
+ h = SkFontHinting::kNone;
+ }
+ rec->setHinting(h);
+
+#ifndef SK_GAMMA_APPLY_TO_A8
+ if (!isLCD(*rec)) {
+ // SRGBTODO: Is this correct? Do we want contrast boost?
+ rec->ignorePreBlend();
+ }
+#endif
+}
+
+int SkTypeface_FreeType::GetUnitsPerEm(FT_Face face) {
+ SkASSERT(face);
+
+ SkScalar upem = SkIntToScalar(face->units_per_EM);
+ // At least some versions of FreeType set face->units_per_EM to 0 for bitmap only fonts.
+ if (upem == 0) {
+ TT_Header* ttHeader = (TT_Header*)FT_Get_Sfnt_Table(face, ft_sfnt_head);
+ if (ttHeader) {
+ upem = SkIntToScalar(ttHeader->Units_Per_EM);
+ }
+ }
+ return upem;
+}
+
+int SkTypeface_FreeType::onGetUPEM() const {
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ if (!face) {
+ return 0;
+ }
+ return GetUnitsPerEm(face);
+}
+
+bool SkTypeface_FreeType::onGetKerningPairAdjustments(const uint16_t glyphs[],
+ int count, int32_t adjustments[]) const {
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ if (!face || !FT_HAS_KERNING(face)) {
+ return false;
+ }
+
+ for (int i = 0; i < count - 1; ++i) {
+ FT_Vector delta;
+ FT_Error err = FT_Get_Kerning(face, glyphs[i], glyphs[i+1],
+ FT_KERNING_UNSCALED, &delta);
+ if (err) {
+ return false;
+ }
+ adjustments[i] = delta.x;
+ }
+ return true;
+}
+
+/** Returns the bitmap strike equal to or just larger than the requested size. */
+static FT_Int chooseBitmapStrike(FT_Face face, FT_F26Dot6 scaleY) {
+ if (face == nullptr) {
+ LOG_INFO("chooseBitmapStrike aborted due to nullptr face.\n");
+ return -1;
+ }
+
+ FT_Pos requestedPPEM = scaleY; // FT_Bitmap_Size::y_ppem is in 26.6 format.
+ FT_Int chosenStrikeIndex = -1;
+ FT_Pos chosenPPEM = 0;
+ for (FT_Int strikeIndex = 0; strikeIndex < face->num_fixed_sizes; ++strikeIndex) {
+ FT_Pos strikePPEM = face->available_sizes[strikeIndex].y_ppem;
+ if (strikePPEM == requestedPPEM) {
+ // exact match - our search stops here
+ return strikeIndex;
+ } else if (chosenPPEM < requestedPPEM) {
+ // attempt to increase chosenPPEM
+ if (chosenPPEM < strikePPEM) {
+ chosenPPEM = strikePPEM;
+ chosenStrikeIndex = strikeIndex;
+ }
+ } else {
+ // attempt to decrease chosenPPEM, but not below requestedPPEM
+ if (requestedPPEM < strikePPEM && strikePPEM < chosenPPEM) {
+ chosenPPEM = strikePPEM;
+ chosenStrikeIndex = strikeIndex;
+ }
+ }
+ }
+ return chosenStrikeIndex;
+}
+
+SkScalerContext_FreeType::SkScalerContext_FreeType(sk_sp<SkTypeface_FreeType> typeface,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc)
+ : SkScalerContext_FreeType_Base(std::move(typeface), effects, desc)
+ , fFace(nullptr)
+ , fFTSize(nullptr)
+ , fStrikeIndex(-1)
+{
+ SkAutoMutexExclusive ac(f_t_mutex());
+ fFaceRec = static_cast<SkTypeface_FreeType*>(this->getTypeface())->getFaceRec();
+
+ // load the font file
+ if (nullptr == fFaceRec) {
+ LOG_INFO("Could not create FT_Face.\n");
+ return;
+ }
+
+ fLCDIsVert = SkToBool(fRec.fFlags & SkScalerContext::kLCD_Vertical_Flag);
+
+ // compute the flags we send to Load_Glyph
+ bool linearMetrics = this->isLinearMetrics();
+ {
+ FT_Int32 loadFlags = FT_LOAD_DEFAULT;
+
+ if (SkMask::kBW_Format == fRec.fMaskFormat) {
+ // See http://code.google.com/p/chromium/issues/detail?id=43252#c24
+ loadFlags = FT_LOAD_TARGET_MONO;
+ if (fRec.getHinting() == SkFontHinting::kNone) {
+ loadFlags |= FT_LOAD_NO_HINTING;
+ linearMetrics = true;
+ }
+ } else {
+ switch (fRec.getHinting()) {
+ case SkFontHinting::kNone:
+ loadFlags = FT_LOAD_NO_HINTING;
+ linearMetrics = true;
+ break;
+ case SkFontHinting::kSlight:
+ loadFlags = FT_LOAD_TARGET_LIGHT; // This implies FORCE_AUTOHINT
+ linearMetrics = true;
+ break;
+ case SkFontHinting::kNormal:
+ loadFlags = FT_LOAD_TARGET_NORMAL;
+ break;
+ case SkFontHinting::kFull:
+ loadFlags = FT_LOAD_TARGET_NORMAL;
+ if (isLCD(fRec)) {
+ if (fLCDIsVert) {
+ loadFlags = FT_LOAD_TARGET_LCD_V;
+ } else {
+ loadFlags = FT_LOAD_TARGET_LCD;
+ }
+ }
+ break;
+ default:
+ LOG_INFO("---------- UNKNOWN hinting %d\n", fRec.getHinting());
+ break;
+ }
+ }
+
+ if (fRec.fFlags & SkScalerContext::kForceAutohinting_Flag) {
+ loadFlags |= FT_LOAD_FORCE_AUTOHINT;
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ } else {
+ loadFlags |= FT_LOAD_NO_AUTOHINT;
+#endif
+ }
+
+ if ((fRec.fFlags & SkScalerContext::kEmbeddedBitmapText_Flag) == 0) {
+ loadFlags |= FT_LOAD_NO_BITMAP;
+ }
+
+ // Always using FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH to get correct
+ // advances, as fontconfig and cairo do.
+ // See http://code.google.com/p/skia/issues/detail?id=222.
+ loadFlags |= FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH;
+
+ // Use vertical layout if requested.
+ if (this->isVertical()) {
+ loadFlags |= FT_LOAD_VERTICAL_LAYOUT;
+ }
+
+ fLoadGlyphFlags = loadFlags;
+ }
+
+ SkUniqueFTSize ftSize([this]() -> FT_Size {
+ FT_Size size;
+ FT_Error err = FT_New_Size(fFaceRec->fFace.get(), &size);
+ if (err != 0) {
+ SK_TRACEFTR(err, "FT_New_Size(%s) failed.", fFaceRec->fFace->family_name);
+ return nullptr;
+ }
+ return size;
+ }());
+ if (nullptr == ftSize) {
+ LOG_INFO("Could not create FT_Size.\n");
+ return;
+ }
+
+ FT_Error err = FT_Activate_Size(ftSize.get());
+ if (err != 0) {
+ SK_TRACEFTR(err, "FT_Activate_Size(%s) failed.", fFaceRec->fFace->family_name);
+ return;
+ }
+
+ fRec.computeMatrices(SkScalerContextRec::PreMatrixScale::kFull, &fScale, &fMatrix22Scalar);
+ FT_F26Dot6 scaleX = SkScalarToFDot6(fScale.fX);
+ FT_F26Dot6 scaleY = SkScalarToFDot6(fScale.fY);
+
+ if (FT_IS_SCALABLE(fFaceRec->fFace)) {
+ err = FT_Set_Char_Size(fFaceRec->fFace.get(), scaleX, scaleY, 72, 72);
+ if (err != 0) {
+ SK_TRACEFTR(err, "FT_Set_CharSize(%s, %f, %f) failed.",
+ fFaceRec->fFace->family_name, fScale.fX, fScale.fY);
+ return;
+ }
+
+ // Adjust the matrix to reflect the actually chosen scale.
+ // FreeType currently does not allow requesting sizes less than 1, this allow for scaling.
+ // Don't do this at all sizes as that will interfere with hinting.
+ if (fScale.fX < 1 || fScale.fY < 1) {
+ SkScalar upem = fFaceRec->fFace->units_per_EM;
+ FT_Size_Metrics& ftmetrics = fFaceRec->fFace->size->metrics;
+ SkScalar x_ppem = upem * SkFT_FixedToScalar(ftmetrics.x_scale) / 64.0f;
+ SkScalar y_ppem = upem * SkFT_FixedToScalar(ftmetrics.y_scale) / 64.0f;
+ fMatrix22Scalar.preScale(fScale.x() / x_ppem, fScale.y() / y_ppem);
+ }
+
+ // FT_LOAD_COLOR with scalable fonts means allow SVG.
+ // It also implies attempt to render COLR if available, but this is not used.
+#if defined(FT_CONFIG_OPTION_SVG)
+ if (SkGraphics::GetOpenTypeSVGDecoderFactory()) {
+ fLoadGlyphFlags |= FT_LOAD_COLOR;
+ }
+#endif
+ } else if (FT_HAS_FIXED_SIZES(fFaceRec->fFace)) {
+ fStrikeIndex = chooseBitmapStrike(fFaceRec->fFace.get(), scaleY);
+ if (fStrikeIndex == -1) {
+ LOG_INFO("No glyphs for font \"%s\" size %f.\n",
+ fFaceRec->fFace->family_name, fScale.fY);
+ return;
+ }
+
+ err = FT_Select_Size(fFaceRec->fFace.get(), fStrikeIndex);
+ if (err != 0) {
+ SK_TRACEFTR(err, "FT_Select_Size(%s, %d) failed.",
+ fFaceRec->fFace->family_name, fStrikeIndex);
+ fStrikeIndex = -1;
+ return;
+ }
+
+ // Adjust the matrix to reflect the actually chosen scale.
+ // It is likely that the ppem chosen was not the one requested, this allows for scaling.
+ fMatrix22Scalar.preScale(fScale.x() / fFaceRec->fFace->size->metrics.x_ppem,
+ fScale.y() / fFaceRec->fFace->size->metrics.y_ppem);
+
+ // FreeType does not provide linear metrics for bitmap fonts.
+ linearMetrics = false;
+
+ // FreeType documentation says:
+ // FT_LOAD_NO_BITMAP -- Ignore bitmap strikes when loading.
+ // Bitmap-only fonts ignore this flag.
+ //
+ // However, in FreeType 2.5.1 color bitmap only fonts do not ignore this flag.
+ // Force this flag off for bitmap only fonts.
+ fLoadGlyphFlags &= ~FT_LOAD_NO_BITMAP;
+
+ // Color bitmaps are supported.
+ fLoadGlyphFlags |= FT_LOAD_COLOR;
+ } else {
+ LOG_INFO("Unknown kind of font \"%s\" size %f.\n", fFaceRec->fFace->family_name, fScale.fY);
+ return;
+ }
+
+ fMatrix22.xx = SkScalarToFixed(fMatrix22Scalar.getScaleX());
+ fMatrix22.xy = SkScalarToFixed(-fMatrix22Scalar.getSkewX());
+ fMatrix22.yx = SkScalarToFixed(-fMatrix22Scalar.getSkewY());
+ fMatrix22.yy = SkScalarToFixed(fMatrix22Scalar.getScaleY());
+
+ fFTSize = ftSize.release();
+ fFace = fFaceRec->fFace.get();
+ fDoLinearMetrics = linearMetrics;
+}
+
+SkScalerContext_FreeType::~SkScalerContext_FreeType() {
+ SkAutoMutexExclusive ac(f_t_mutex());
+
+ if (fFTSize != nullptr) {
+ FT_Done_Size(fFTSize);
+ }
+
+ fFaceRec = nullptr;
+}
+
+/* We call this before each use of the fFace, since we may be sharing
+ this face with other context (at different sizes).
+*/
+FT_Error SkScalerContext_FreeType::setupSize() {
+ f_t_mutex().assertHeld();
+ FT_Error err = FT_Activate_Size(fFTSize);
+ if (err != 0) {
+ return err;
+ }
+ FT_Set_Transform(fFace, &fMatrix22, nullptr);
+ return 0;
+}
+
+bool SkScalerContext_FreeType::generateAdvance(SkGlyph* glyph) {
+ /* unhinted and light hinted text have linearly scaled advances
+ * which are very cheap to compute with some font formats...
+ */
+ if (!fDoLinearMetrics) {
+ return false;
+ }
+
+ SkAutoMutexExclusive ac(f_t_mutex());
+
+ if (this->setupSize()) {
+ glyph->zeroMetrics();
+ return true;
+ }
+
+ FT_Error error;
+ FT_Fixed advance;
+
+ error = FT_Get_Advance( fFace, glyph->getGlyphID(),
+ fLoadGlyphFlags | FT_ADVANCE_FLAG_FAST_ONLY,
+ &advance );
+
+ if (error != 0) {
+ return false;
+ }
+
+ const SkScalar advanceScalar = SkFT_FixedToScalar(advance);
+ glyph->fAdvanceX = SkScalarToFloat(fMatrix22Scalar.getScaleX() * advanceScalar);
+ glyph->fAdvanceY = SkScalarToFloat(fMatrix22Scalar.getSkewY() * advanceScalar);
+ return true;
+}
+
+bool SkScalerContext_FreeType::getBoundsOfCurrentOutlineGlyph(FT_GlyphSlot glyph, SkRect* bounds) {
+ if (glyph->format != FT_GLYPH_FORMAT_OUTLINE) {
+ SkASSERT(false);
+ return false;
+ }
+ if (0 == glyph->outline.n_contours) {
+ return false;
+ }
+
+ FT_BBox bbox;
+ FT_Outline_Get_CBox(&glyph->outline, &bbox);
+ *bounds = SkRect::MakeLTRB(SkFDot6ToScalar(bbox.xMin), -SkFDot6ToScalar(bbox.yMax),
+ SkFDot6ToScalar(bbox.xMax), -SkFDot6ToScalar(bbox.yMin));
+ return true;
+}
+
+bool SkScalerContext_FreeType::getCBoxForLetter(char letter, FT_BBox* bbox) {
+ const FT_UInt glyph_id = FT_Get_Char_Index(fFace, letter);
+ if (!glyph_id) {
+ return false;
+ }
+ if (FT_Load_Glyph(fFace, glyph_id, fLoadGlyphFlags)) {
+ return false;
+ }
+ if (fFace->glyph->format != FT_GLYPH_FORMAT_OUTLINE) {
+ return false;
+ }
+ emboldenIfNeeded(fFace, fFace->glyph, SkTo<SkGlyphID>(glyph_id));
+ FT_Outline_Get_CBox(&fFace->glyph->outline, bbox);
+ return true;
+}
+
+void SkScalerContext_FreeType::setGlyphBounds(SkGlyph* glyph, SkRect* bounds, bool subpixel) {
+ SkIRect irect;
+ if (bounds->isEmpty()) {
+ irect = SkIRect::MakeEmpty();
+ } else {
+ if (subpixel) {
+ bounds->offset(SkFixedToScalar(glyph->getSubXFixed()),
+ SkFixedToScalar(glyph->getSubYFixed()));
+ }
+
+ irect = bounds->roundOut();
+ if (!SkTFitsIn<decltype(glyph->fWidth )>(irect.width ()) ||
+ !SkTFitsIn<decltype(glyph->fHeight)>(irect.height()) ||
+ !SkTFitsIn<decltype(glyph->fTop )>(irect.top ()) ||
+ !SkTFitsIn<decltype(glyph->fLeft )>(irect.left ()) )
+ {
+ irect = SkIRect::MakeEmpty();
+ }
+ }
+ glyph->fWidth = SkToU16(irect.width ());
+ glyph->fHeight = SkToU16(irect.height());
+ glyph->fTop = SkToS16(irect.top ());
+ glyph->fLeft = SkToS16(irect.left ());
+}
+
+void SkScalerContext_FreeType::updateGlyphBoundsIfLCD(SkGlyph* glyph) {
+ if (glyph->fMaskFormat == SkMask::kLCD16_Format &&
+ glyph->fWidth > 0 && glyph->fHeight > 0)
+ {
+ if (fLCDIsVert) {
+ glyph->fHeight += 2;
+ glyph->fTop -= 1;
+ } else {
+ glyph->fWidth += 2;
+ glyph->fLeft -= 1;
+ }
+ }
+}
+
+bool SkScalerContext_FreeType::shouldSubpixelBitmap(const SkGlyph& glyph, const SkMatrix& matrix) {
+ // If subpixel rendering of a bitmap *can* be done.
+ bool mechanism = fFace->glyph->format == FT_GLYPH_FORMAT_BITMAP &&
+ this->isSubpixel() &&
+ (glyph.getSubXFixed() || glyph.getSubYFixed());
+
+ // If subpixel rendering of a bitmap *should* be done.
+ // 1. If the face is not scalable then always allow subpixel rendering.
+ // Otherwise, if the font has an 8ppem strike 7 will subpixel render but 8 won't.
+ // 2. If the matrix is already not identity the bitmap will already be resampled,
+ // so resampling slightly differently shouldn't make much difference.
+ bool policy = !FT_IS_SCALABLE(fFace) || !matrix.isIdentity();
+
+ return mechanism && policy;
+}
+
+void SkScalerContext_FreeType::generateMetrics(SkGlyph* glyph, SkArenaAlloc* alloc) {
+ SkAutoMutexExclusive ac(f_t_mutex());
+
+ if (this->setupSize()) {
+ glyph->zeroMetrics();
+ return;
+ }
+
+ FT_Bool haveLayers = false;
+#ifdef FT_COLOR_H
+ // See https://skbug.com/12945, if the face isn't marked scalable then paths cannot be loaded.
+ if (FT_IS_SCALABLE(fFace)) {
+ SkRect bounds = SkRect::MakeEmpty();
+#ifdef TT_SUPPORT_COLRV1
+ FT_OpaquePaint opaqueLayerPaint{nullptr, 1};
+ if (FT_Get_Color_Glyph_Paint(fFace, glyph->getGlyphID(),
+ FT_COLOR_INCLUDE_ROOT_TRANSFORM, &opaqueLayerPaint)) {
+ haveLayers = true;
+ glyph->fScalerContextBits = ScalerContextBits::COLRv1;
+
+ // COLRv1 optionally provides a ClipBox.
+ FT_ClipBox clipBox;
+ if (FT_Get_Color_Glyph_ClipBox(fFace, glyph->getGlyphID(), &clipBox)) {
+ // Find bounding box of clip box corner points, needed when clipbox is transformed.
+ FT_BBox bbox;
+ bbox.xMin = clipBox.bottom_left.x;
+ bbox.xMax = clipBox.bottom_left.x;
+ bbox.yMin = clipBox.bottom_left.y;
+ bbox.yMax = clipBox.bottom_left.y;
+ for (auto& corner : {clipBox.top_left, clipBox.top_right, clipBox.bottom_right}) {
+ bbox.xMin = std::min(bbox.xMin, corner.x);
+ bbox.yMin = std::min(bbox.yMin, corner.y);
+ bbox.xMax = std::max(bbox.xMax, corner.x);
+ bbox.yMax = std::max(bbox.yMax, corner.y);
+ }
+ bounds = SkRect::MakeLTRB(SkFDot6ToScalar(bbox.xMin), -SkFDot6ToScalar(bbox.yMax),
+ SkFDot6ToScalar(bbox.xMax), -SkFDot6ToScalar(bbox.yMin));
+ } else {
+ // Traverse the glyph graph with a focus on measuring the required bounding box.
+ // The call to computeColrV1GlyphBoundingBox may modify the face.
+ // Reset the face to load the base glyph for metrics.
+ if (!computeColrV1GlyphBoundingBox(fFace, glyph->getGlyphID(), &bounds) ||
+ this->setupSize())
+ {
+ glyph->zeroMetrics();
+ return;
+ }
+ }
+ }
+#endif // #TT_SUPPORT_COLRV1
+
+ if (!haveLayers) {
+ FT_LayerIterator layerIterator = { 0, 0, nullptr };
+ FT_UInt layerGlyphIndex;
+ FT_UInt layerColorIndex;
+ FT_Int32 flags = fLoadGlyphFlags;
+ flags |= FT_LOAD_BITMAP_METRICS_ONLY; // Don't decode any bitmaps.
+ flags |= FT_LOAD_NO_BITMAP; // Ignore embedded bitmaps.
+ flags &= ~FT_LOAD_RENDER; // Don't scan convert.
+ flags &= ~FT_LOAD_COLOR; // Ignore SVG.
+ // For COLRv0 compute the glyph bounding box from the union of layer bounding boxes.
+ while (FT_Get_Color_Glyph_Layer(fFace, glyph->getGlyphID(), &layerGlyphIndex,
+ &layerColorIndex, &layerIterator)) {
+ haveLayers = true;
+ if (FT_Load_Glyph(fFace, layerGlyphIndex, flags)) {
+ glyph->zeroMetrics();
+ return;
+ }
+
+ SkRect currentBounds;
+ if (getBoundsOfCurrentOutlineGlyph(fFace->glyph, &currentBounds)) {
+ bounds.join(currentBounds);
+ }
+ }
+ if (haveLayers) {
+ glyph->fScalerContextBits = ScalerContextBits::COLRv0;
+ }
+ }
+
+ if (haveLayers) {
+ glyph->fMaskFormat = SkMask::kARGB32_Format;
+ glyph->setPath(alloc, nullptr, false);
+ setGlyphBounds(glyph, &bounds, this->isSubpixel());
+ }
+ }
+#endif //FT_COLOR_H
+
+ // Even if haveLayers, the base glyph must be loaded to get the metrics.
+ if (FT_Load_Glyph(fFace, glyph->getGlyphID(), fLoadGlyphFlags | FT_LOAD_BITMAP_METRICS_ONLY)) {
+ glyph->zeroMetrics();
+ return;
+ }
+
+ if (!haveLayers) {
+ emboldenIfNeeded(fFace, fFace->glyph, glyph->getGlyphID());
+
+ if (fFace->glyph->format == FT_GLYPH_FORMAT_OUTLINE) {
+ SkRect bounds;
+ if (!getBoundsOfCurrentOutlineGlyph(fFace->glyph, &bounds)) {
+ bounds = SkRect::MakeEmpty();
+ }
+ setGlyphBounds(glyph, &bounds, this->isSubpixel());
+ updateGlyphBoundsIfLCD(glyph);
+
+ } else if (fFace->glyph->format == FT_GLYPH_FORMAT_BITMAP) {
+ glyph->setPath(alloc, nullptr, false);
+
+ if (this->isVertical()) {
+ FT_Vector vector;
+ vector.x = fFace->glyph->metrics.vertBearingX - fFace->glyph->metrics.horiBearingX;
+ vector.y = -fFace->glyph->metrics.vertBearingY - fFace->glyph->metrics.horiBearingY;
+ FT_Vector_Transform(&vector, &fMatrix22);
+ fFace->glyph->bitmap_left += SkFDot6Floor(vector.x);
+ fFace->glyph->bitmap_top += SkFDot6Floor(vector.y);
+ }
+
+ if (fFace->glyph->bitmap.pixel_mode == FT_PIXEL_MODE_BGRA) {
+ glyph->fMaskFormat = SkMask::kARGB32_Format;
+ }
+
+ SkRect bounds = SkRect::MakeXYWH(SkIntToScalar(fFace->glyph->bitmap_left ),
+ -SkIntToScalar(fFace->glyph->bitmap_top ),
+ SkIntToScalar(fFace->glyph->bitmap.width),
+ SkIntToScalar(fFace->glyph->bitmap.rows ));
+ fMatrix22Scalar.mapRect(&bounds);
+ setGlyphBounds(glyph, &bounds, this->shouldSubpixelBitmap(*glyph, fMatrix22Scalar));
+
+#if defined(FT_CONFIG_OPTION_SVG)
+ } else if (fFace->glyph->format == FT_GLYPH_FORMAT_SVG) {
+ glyph->fScalerContextBits = ScalerContextBits::SVG;
+ glyph->fMaskFormat = SkMask::kARGB32_Format;
+ glyph->setPath(alloc, nullptr, false);
+
+ SkPictureRecorder recorder;
+ SkRect infiniteRect = SkRect::MakeLTRB(-SK_ScalarInfinity, -SK_ScalarInfinity,
+ SK_ScalarInfinity, SK_ScalarInfinity);
+ sk_sp<SkBBoxHierarchy> bboxh = SkRTreeFactory()();
+ SkSpan<SkColor> palette(fFaceRec->fSkPalette.get(), fFaceRec->fFTPaletteEntryCount);
+ SkCanvas* recordingCanvas = recorder.beginRecording(infiniteRect, bboxh);
+ if (!this->drawSVGGlyph(fFace, *glyph, fLoadGlyphFlags, palette, recordingCanvas)) {
+ glyph->zeroMetrics();
+ return;
+ }
+ sk_sp<SkPicture> pic = recorder.finishRecordingAsPicture();
+ SkRect bounds = pic->cullRect();
+ SkASSERT(bounds.isFinite());
+
+ // drawSVGGlyph already applied the subpixel positioning.
+ setGlyphBounds(glyph, &bounds, false);
+#endif // FT_CONFIG_OPTION_SVG
+
+ } else {
+ SkDEBUGFAIL("unknown glyph format");
+ glyph->zeroMetrics();
+ return;
+ }
+ }
+
+ if (this->isVertical()) {
+ if (fDoLinearMetrics) {
+ const SkScalar advanceScalar = SkFT_FixedToScalar(fFace->glyph->linearVertAdvance);
+ glyph->fAdvanceX = SkScalarToFloat(fMatrix22Scalar.getSkewX() * advanceScalar);
+ glyph->fAdvanceY = SkScalarToFloat(fMatrix22Scalar.getScaleY() * advanceScalar);
+ } else {
+ glyph->fAdvanceX = -SkFDot6ToFloat(fFace->glyph->advance.x);
+ glyph->fAdvanceY = SkFDot6ToFloat(fFace->glyph->advance.y);
+ }
+ } else {
+ if (fDoLinearMetrics) {
+ const SkScalar advanceScalar = SkFT_FixedToScalar(fFace->glyph->linearHoriAdvance);
+ glyph->fAdvanceX = SkScalarToFloat(fMatrix22Scalar.getScaleX() * advanceScalar);
+ glyph->fAdvanceY = SkScalarToFloat(fMatrix22Scalar.getSkewY() * advanceScalar);
+ } else {
+ glyph->fAdvanceX = SkFDot6ToFloat(fFace->glyph->advance.x);
+ glyph->fAdvanceY = -SkFDot6ToFloat(fFace->glyph->advance.y);
+ }
+ }
+
+#ifdef ENABLE_GLYPH_SPEW
+ LOG_INFO("Metrics(glyph:%d flags:0x%x) w:%d\n", glyph->getGlyphID(), fLoadGlyphFlags, glyph->fWidth);
+#endif
+}
+
+void SkScalerContext_FreeType::generateImage(const SkGlyph& glyph) {
+ SkAutoMutexExclusive ac(f_t_mutex());
+
+ if (this->setupSize()) {
+ sk_bzero(glyph.fImage, glyph.imageSize());
+ return;
+ }
+
+ if (glyph.fScalerContextBits == ScalerContextBits::COLRv0 ||
+ glyph.fScalerContextBits == ScalerContextBits::COLRv1 ||
+ glyph.fScalerContextBits == ScalerContextBits::SVG )
+ {
+ SkASSERT(glyph.maskFormat() == SkMask::kARGB32_Format);
+ SkBitmap dstBitmap;
+ // TODO: mark this as sRGB when the blits will be sRGB.
+ dstBitmap.setInfo(SkImageInfo::Make(glyph.fWidth, glyph.fHeight,
+ kN32_SkColorType,
+ kPremul_SkAlphaType),
+ glyph.rowBytes());
+ dstBitmap.setPixels(glyph.fImage);
+
+ SkCanvas canvas(dstBitmap);
+ if constexpr (kSkShowTextBlitCoverage) {
+ canvas.clear(0x33FF0000);
+ } else {
+ canvas.clear(SK_ColorTRANSPARENT);
+ }
+ canvas.translate(-glyph.fLeft, -glyph.fTop);
+
+ SkSpan<SkColor> palette(fFaceRec->fSkPalette.get(), fFaceRec->fFTPaletteEntryCount);
+ if (glyph.fScalerContextBits == ScalerContextBits::COLRv0) {
+#ifdef FT_COLOR_H
+ this->drawCOLRv0Glyph(fFace, glyph, fLoadGlyphFlags, palette, &canvas);
+#endif
+ } else if (glyph.fScalerContextBits == ScalerContextBits::COLRv1) {
+#ifdef TT_SUPPORT_COLRV1
+ this->drawCOLRv1Glyph(fFace, glyph, fLoadGlyphFlags, palette, &canvas);
+#endif
+ } else if (glyph.fScalerContextBits == ScalerContextBits::SVG) {
+#if defined(FT_CONFIG_OPTION_SVG)
+ if (FT_Load_Glyph(fFace, glyph.getGlyphID(), fLoadGlyphFlags)) {
+ return;
+ }
+ this->drawSVGGlyph(fFace, glyph, fLoadGlyphFlags, palette, &canvas);
+#endif
+ }
+ return;
+ }
+
+ if (FT_Load_Glyph(fFace, glyph.getGlyphID(), fLoadGlyphFlags)) {
+ sk_bzero(glyph.fImage, glyph.imageSize());
+ return;
+ }
+ emboldenIfNeeded(fFace, fFace->glyph, glyph.getGlyphID());
+
+ SkMatrix* bitmapMatrix = &fMatrix22Scalar;
+ SkMatrix subpixelBitmapMatrix;
+ if (this->shouldSubpixelBitmap(glyph, *bitmapMatrix)) {
+ subpixelBitmapMatrix = fMatrix22Scalar;
+ subpixelBitmapMatrix.postTranslate(SkFixedToScalar(glyph.getSubXFixed()),
+ SkFixedToScalar(glyph.getSubYFixed()));
+ bitmapMatrix = &subpixelBitmapMatrix;
+ }
+
+ generateGlyphImage(fFace, glyph, *bitmapMatrix);
+}
+
+sk_sp<SkDrawable> SkScalerContext_FreeType::generateDrawable(const SkGlyph& glyph) {
+ // Because FreeType's FT_Face is stateful (not thread safe) and the current design of this
+ // SkTypeface and SkScalerContext does not work around this, it is necessary lock at least the
+ // FT_Face when using it (this implementation currently locks the whole FT_Library).
+ // It should be possible to draw the drawable straight out of the FT_Face. However, this would
+ // mean locking each time any such drawable is drawn. To avoid locking, this implementation
+ // creates drawables backed as pictures so that they can be played back later without locking.
+ SkAutoMutexExclusive ac(f_t_mutex());
+
+ if (this->setupSize()) {
+ return nullptr;
+ }
+
+#if defined(FT_COLOR_H) || defined(TT_SUPPORT_COLRV1) || defined(FT_CONFIG_OPTION_SVG)
+ if (glyph.fScalerContextBits == ScalerContextBits::COLRv0 ||
+ glyph.fScalerContextBits == ScalerContextBits::COLRv1 ||
+ glyph.fScalerContextBits == ScalerContextBits::SVG )
+ {
+ SkSpan<SkColor> palette(fFaceRec->fSkPalette.get(), fFaceRec->fFTPaletteEntryCount);
+ SkPictureRecorder recorder;
+ SkCanvas* recordingCanvas = recorder.beginRecording(SkRect::Make(glyph.mask().fBounds));
+ if (glyph.fScalerContextBits == ScalerContextBits::COLRv0) {
+#ifdef FT_COLOR_H
+ if (!this->drawCOLRv0Glyph(fFace, glyph, fLoadGlyphFlags, palette, recordingCanvas)) {
+ return nullptr;
+ }
+#else
+ return nullptr;
+#endif
+ } else if (glyph.fScalerContextBits == ScalerContextBits::COLRv1) {
+#ifdef TT_SUPPORT_COLRV1
+ if (!this->drawCOLRv1Glyph(fFace, glyph, fLoadGlyphFlags, palette, recordingCanvas)) {
+ return nullptr;
+ }
+#else
+ return nullptr;
+#endif
+ } else if (glyph.fScalerContextBits == ScalerContextBits::SVG) {
+#if defined(FT_CONFIG_OPTION_SVG)
+ if (FT_Load_Glyph(fFace, glyph.getGlyphID(), fLoadGlyphFlags)) {
+ return nullptr;
+ }
+ if (!this->drawSVGGlyph(fFace, glyph, fLoadGlyphFlags, palette, recordingCanvas)) {
+ return nullptr;
+ }
+#else
+ return nullptr;
+#endif
+ }
+ return recorder.finishRecordingAsDrawable();
+ }
+#endif
+ return nullptr;
+}
+
+bool SkScalerContext_FreeType::generatePath(const SkGlyph& glyph, SkPath* path) {
+ SkASSERT(path);
+
+ SkAutoMutexExclusive ac(f_t_mutex());
+
+ SkGlyphID glyphID = glyph.getGlyphID();
+ // FT_IS_SCALABLE is documented to mean the face contains outline glyphs.
+ if (!FT_IS_SCALABLE(fFace) || this->setupSize()) {
+ path->reset();
+ return false;
+ }
+
+ uint32_t flags = fLoadGlyphFlags;
+ flags |= FT_LOAD_NO_BITMAP; // ignore embedded bitmaps so we're sure to get the outline
+ flags &= ~FT_LOAD_RENDER; // don't scan convert (we just want the outline)
+
+ FT_Error err = FT_Load_Glyph(fFace, glyphID, flags);
+ if (err != 0 || fFace->glyph->format != FT_GLYPH_FORMAT_OUTLINE) {
+ path->reset();
+ return false;
+ }
+ emboldenIfNeeded(fFace, fFace->glyph, glyphID);
+
+ if (!generateGlyphPath(fFace, path)) {
+ path->reset();
+ return false;
+ }
+
+ // The path's origin from FreeType is always the horizontal layout origin.
+ // Offset the path so that it is relative to the vertical origin if needed.
+ if (this->isVertical()) {
+ FT_Vector vector;
+ vector.x = fFace->glyph->metrics.vertBearingX - fFace->glyph->metrics.horiBearingX;
+ vector.y = -fFace->glyph->metrics.vertBearingY - fFace->glyph->metrics.horiBearingY;
+ FT_Vector_Transform(&vector, &fMatrix22);
+ path->offset(SkFDot6ToScalar(vector.x), -SkFDot6ToScalar(vector.y));
+ }
+ return true;
+}
+
+void SkScalerContext_FreeType::generateFontMetrics(SkFontMetrics* metrics) {
+ if (nullptr == metrics) {
+ return;
+ }
+
+ SkAutoMutexExclusive ac(f_t_mutex());
+
+ if (this->setupSize()) {
+ sk_bzero(metrics, sizeof(*metrics));
+ return;
+ }
+
+ FT_Face face = fFace;
+ metrics->fFlags = 0;
+
+ SkScalar upem = SkIntToScalar(SkTypeface_FreeType::GetUnitsPerEm(face));
+
+ // use the os/2 table as a source of reasonable defaults.
+ SkScalar x_height = 0.0f;
+ SkScalar avgCharWidth = 0.0f;
+ SkScalar cap_height = 0.0f;
+ SkScalar strikeoutThickness = 0.0f, strikeoutPosition = 0.0f;
+ TT_OS2* os2 = (TT_OS2*) FT_Get_Sfnt_Table(face, ft_sfnt_os2);
+ if (os2) {
+ x_height = SkIntToScalar(os2->sxHeight) / upem * fScale.y();
+ avgCharWidth = SkIntToScalar(os2->xAvgCharWidth) / upem;
+ strikeoutThickness = SkIntToScalar(os2->yStrikeoutSize) / upem;
+ strikeoutPosition = -SkIntToScalar(os2->yStrikeoutPosition) / upem;
+ metrics->fFlags |= SkFontMetrics::kStrikeoutThicknessIsValid_Flag;
+ metrics->fFlags |= SkFontMetrics::kStrikeoutPositionIsValid_Flag;
+ if (os2->version != 0xFFFF && os2->version >= 2) {
+ cap_height = SkIntToScalar(os2->sCapHeight) / upem * fScale.y();
+ }
+ }
+
+ // pull from format-specific metrics as needed
+ SkScalar ascent, descent, leading, xmin, xmax, ymin, ymax;
+ SkScalar underlineThickness, underlinePosition;
+ if (face->face_flags & FT_FACE_FLAG_SCALABLE) { // scalable outline font
+ // FreeType will always use HHEA metrics if they're not zero.
+ // It completely ignores the OS/2 fsSelection::UseTypoMetrics bit.
+ // It also ignores the VDMX tables, which are also of interest here
+ // (and override everything else when they apply).
+ static const int kUseTypoMetricsMask = (1 << 7);
+ if (os2 && os2->version != 0xFFFF && (os2->fsSelection & kUseTypoMetricsMask)) {
+ ascent = -SkIntToScalar(os2->sTypoAscender) / upem;
+ descent = -SkIntToScalar(os2->sTypoDescender) / upem;
+ leading = SkIntToScalar(os2->sTypoLineGap) / upem;
+ } else {
+ ascent = -SkIntToScalar(face->ascender) / upem;
+ descent = -SkIntToScalar(face->descender) / upem;
+ leading = SkIntToScalar(face->height + (face->descender - face->ascender)) / upem;
+ }
+ xmin = SkIntToScalar(face->bbox.xMin) / upem;
+ xmax = SkIntToScalar(face->bbox.xMax) / upem;
+ ymin = -SkIntToScalar(face->bbox.yMin) / upem;
+ ymax = -SkIntToScalar(face->bbox.yMax) / upem;
+ underlineThickness = SkIntToScalar(face->underline_thickness) / upem;
+ underlinePosition = -SkIntToScalar(face->underline_position +
+ face->underline_thickness / 2) / upem;
+
+ metrics->fFlags |= SkFontMetrics::kUnderlineThicknessIsValid_Flag;
+ metrics->fFlags |= SkFontMetrics::kUnderlinePositionIsValid_Flag;
+
+ // we may be able to synthesize x_height and cap_height from outline
+ if (!x_height) {
+ FT_BBox bbox;
+ if (getCBoxForLetter('x', &bbox)) {
+ x_height = SkIntToScalar(bbox.yMax) / 64.0f;
+ }
+ }
+ if (!cap_height) {
+ FT_BBox bbox;
+ if (getCBoxForLetter('H', &bbox)) {
+ cap_height = SkIntToScalar(bbox.yMax) / 64.0f;
+ }
+ }
+ } else if (fStrikeIndex != -1) { // bitmap strike metrics
+ SkScalar xppem = SkIntToScalar(face->size->metrics.x_ppem);
+ SkScalar yppem = SkIntToScalar(face->size->metrics.y_ppem);
+ ascent = -SkIntToScalar(face->size->metrics.ascender) / (yppem * 64.0f);
+ descent = -SkIntToScalar(face->size->metrics.descender) / (yppem * 64.0f);
+ leading = (SkIntToScalar(face->size->metrics.height) / (yppem * 64.0f)) + ascent - descent;
+
+ xmin = 0.0f;
+ xmax = SkIntToScalar(face->available_sizes[fStrikeIndex].width) / xppem;
+ ymin = descent;
+ ymax = ascent;
+ // The actual bitmaps may be any size and placed at any offset.
+ metrics->fFlags |= SkFontMetrics::kBoundsInvalid_Flag;
+
+ underlineThickness = 0;
+ underlinePosition = 0;
+ metrics->fFlags &= ~SkFontMetrics::kUnderlineThicknessIsValid_Flag;
+ metrics->fFlags &= ~SkFontMetrics::kUnderlinePositionIsValid_Flag;
+
+ TT_Postscript* post = (TT_Postscript*) FT_Get_Sfnt_Table(face, ft_sfnt_post);
+ if (post) {
+ underlineThickness = SkIntToScalar(post->underlineThickness) / upem;
+ underlinePosition = -SkIntToScalar(post->underlinePosition) / upem;
+ metrics->fFlags |= SkFontMetrics::kUnderlineThicknessIsValid_Flag;
+ metrics->fFlags |= SkFontMetrics::kUnderlinePositionIsValid_Flag;
+ }
+ } else {
+ sk_bzero(metrics, sizeof(*metrics));
+ return;
+ }
+
+ // synthesize elements that were not provided by the os/2 table or format-specific metrics
+ if (!x_height) {
+ x_height = -ascent * fScale.y();
+ }
+ if (!avgCharWidth) {
+ avgCharWidth = xmax - xmin;
+ }
+ if (!cap_height) {
+ cap_height = -ascent * fScale.y();
+ }
+
+ // disallow negative linespacing
+ if (leading < 0.0f) {
+ leading = 0.0f;
+ }
+
+ metrics->fTop = ymax * fScale.y();
+ metrics->fAscent = ascent * fScale.y();
+ metrics->fDescent = descent * fScale.y();
+ metrics->fBottom = ymin * fScale.y();
+ metrics->fLeading = leading * fScale.y();
+ metrics->fAvgCharWidth = avgCharWidth * fScale.y();
+ metrics->fXMin = xmin * fScale.y();
+ metrics->fXMax = xmax * fScale.y();
+ metrics->fMaxCharWidth = metrics->fXMax - metrics->fXMin;
+ metrics->fXHeight = x_height;
+ metrics->fCapHeight = cap_height;
+ metrics->fUnderlineThickness = underlineThickness * fScale.y();
+ metrics->fUnderlinePosition = underlinePosition * fScale.y();
+ metrics->fStrikeoutThickness = strikeoutThickness * fScale.y();
+ metrics->fStrikeoutPosition = strikeoutPosition * fScale.y();
+
+ if (face->face_flags & FT_FACE_FLAG_MULTIPLE_MASTERS
+#if defined(FT_CONFIG_OPTION_SVG)
+ || face->face_flags & FT_FACE_FLAG_SVG
+#endif // FT_CONFIG_OPTION_SVG
+ ) {
+ // The bounds are only valid for the default variation of variable glyphs.
+ // https://docs.microsoft.com/en-us/typography/opentype/spec/head
+ // For SVG glyphs this number is often incorrect for its non-`glyf` points.
+ // https://github.com/fonttools/fonttools/issues/2566
+ metrics->fFlags |= SkFontMetrics::kBoundsInvalid_Flag;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// hand-tuned value to reduce outline embolden strength
+#ifndef SK_OUTLINE_EMBOLDEN_DIVISOR
+ #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ #define SK_OUTLINE_EMBOLDEN_DIVISOR 34
+ #else
+ #define SK_OUTLINE_EMBOLDEN_DIVISOR 24
+ #endif
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkScalerContext_FreeType::emboldenIfNeeded(FT_Face face, FT_GlyphSlot glyph, SkGlyphID gid) {
+ // check to see if the embolden bit is set
+ if (0 == (fRec.fFlags & SkScalerContext::kEmbolden_Flag)) {
+ return;
+ }
+
+ switch (glyph->format) {
+ case FT_GLYPH_FORMAT_OUTLINE:
+ FT_Pos strength;
+ strength = FT_MulFix(face->units_per_EM, face->size->metrics.y_scale)
+ / SK_OUTLINE_EMBOLDEN_DIVISOR;
+ FT_Outline_Embolden(&glyph->outline, strength);
+ break;
+ case FT_GLYPH_FORMAT_BITMAP:
+ if (!fFace->glyph->bitmap.buffer) {
+ FT_Load_Glyph(fFace, gid, fLoadGlyphFlags);
+ }
+ FT_GlyphSlot_Own_Bitmap(glyph);
+ FT_Bitmap_Embolden(glyph->library, &glyph->bitmap, kBitmapEmboldenStrength, 0);
+ break;
+ default:
+ SkDEBUGFAIL("unknown glyph format");
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "src/base/SkUtils.h"
+
+SkTypeface_FreeType::SkTypeface_FreeType(const SkFontStyle& style, bool isFixedPitch)
+ : INHERITED(style, isFixedPitch)
+{}
+
+SkTypeface_FreeType::~SkTypeface_FreeType() {
+ if (fFaceRec) {
+ SkAutoMutexExclusive ac(f_t_mutex());
+ fFaceRec.reset();
+ }
+}
+
+// Just made up, so we don't end up storing 1000s of entries
+constexpr int kMaxC2GCacheCount = 512;
+
+void SkTypeface_FreeType::onCharsToGlyphs(const SkUnichar uni[], int count,
+ SkGlyphID glyphs[]) const {
+ // Try the cache first, *before* accessing freetype lib/face, as that
+ // can be very slow. If we do need to compute a new glyphID, then
+ // access those freetype objects and continue the loop.
+
+ int i;
+ {
+ // Optimistically use a shared lock.
+ SkAutoSharedMutexShared ama(fC2GCacheMutex);
+ for (i = 0; i < count; ++i) {
+ int index = fC2GCache.findGlyphIndex(uni[i]);
+ if (index < 0) {
+ break;
+ }
+ glyphs[i] = SkToU16(index);
+ }
+ if (i == count) {
+ // we're done, no need to access the freetype objects
+ return;
+ }
+ }
+
+ // Need to add more so grab an exclusive lock.
+ SkAutoSharedMutexExclusive ama(fC2GCacheMutex);
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ if (!face) {
+ sk_bzero(glyphs, count * sizeof(glyphs[0]));
+ return;
+ }
+
+ for (; i < count; ++i) {
+ SkUnichar c = uni[i];
+ int index = fC2GCache.findGlyphIndex(c);
+ if (index >= 0) {
+ glyphs[i] = SkToU16(index);
+ } else {
+ glyphs[i] = SkToU16(FT_Get_Char_Index(face, c));
+ fC2GCache.insertCharAndGlyph(~index, c, glyphs[i]);
+ }
+ }
+
+ if (fC2GCache.count() > kMaxC2GCacheCount) {
+ fC2GCache.reset();
+ }
+}
+
+int SkTypeface_FreeType::onCountGlyphs() const {
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ return face ? face->num_glyphs : 0;
+}
+
+SkTypeface::LocalizedStrings* SkTypeface_FreeType::onCreateFamilyNameIterator() const {
+ sk_sp<SkTypeface::LocalizedStrings> nameIter =
+ SkOTUtils::LocalizedStrings_NameTable::MakeForFamilyNames(*this);
+ if (!nameIter) {
+ SkString familyName;
+ this->getFamilyName(&familyName);
+ SkString language("und"); //undetermined
+ nameIter = sk_make_sp<SkOTUtils::LocalizedStrings_SingleName>(familyName, language);
+ }
+ return nameIter.release();
+}
+
+bool SkTypeface_FreeType::onGlyphMaskNeedsCurrentColor() const {
+ fGlyphMasksMayNeedCurrentColorOnce([this]{
+ static constexpr SkFourByteTag COLRTag = SkSetFourByteTag('C', 'O', 'L', 'R');
+ fGlyphMasksMayNeedCurrentColor = this->getTableSize(COLRTag) > 0;
+#if defined(FT_CONFIG_OPTION_SVG)
+ static constexpr SkFourByteTag SVGTag = SkSetFourByteTag('S', 'V', 'G', ' ');
+ fGlyphMasksMayNeedCurrentColor |= this->getTableSize(SVGTag) > 0 ;
+#endif // FT_CONFIG_OPTION_SVG
+ });
+ return fGlyphMasksMayNeedCurrentColor;
+}
+
+int SkTypeface_FreeType::onGetVariationDesignPosition(
+ SkFontArguments::VariationPosition::Coordinate coordinates[], int coordinateCount) const
+{
+ AutoFTAccess fta(this);
+ return GetVariationDesignPosition(fta, coordinates, coordinateCount);
+}
+
+int SkTypeface_FreeType::onGetVariationDesignParameters(
+ SkFontParameters::Variation::Axis parameters[], int parameterCount) const
+{
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ if (!face) {
+ return -1;
+ }
+
+ if (!(face->face_flags & FT_FACE_FLAG_MULTIPLE_MASTERS)) {
+ return 0;
+ }
+
+ FT_MM_Var* variations = nullptr;
+ if (FT_Get_MM_Var(face, &variations)) {
+ return -1;
+ }
+ UniqueVoidPtr autoFreeVariations(variations);
+
+ if (!parameters || parameterCount < SkToInt(variations->num_axis)) {
+ return variations->num_axis;
+ }
+
+ for (FT_UInt i = 0; i < variations->num_axis; ++i) {
+ parameters[i].tag = variations->axis[i].tag;
+ parameters[i].min = SkFixedToScalar(variations->axis[i].minimum);
+ parameters[i].def = SkFixedToScalar(variations->axis[i].def);
+ parameters[i].max = SkFixedToScalar(variations->axis[i].maximum);
+ FT_UInt flags = 0;
+ bool hidden = !FT_Get_Var_Axis_Flags(variations, i, &flags) &&
+ (flags & FT_VAR_AXIS_FLAG_HIDDEN);
+ parameters[i].setHidden(hidden);
+ }
+
+ return variations->num_axis;
+}
+
+int SkTypeface_FreeType::onGetTableTags(SkFontTableTag tags[]) const {
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ if (!face) {
+ return 0;
+ }
+
+ FT_ULong tableCount = 0;
+ FT_Error error;
+
+ // When 'tag' is nullptr, returns number of tables in 'length'.
+ error = FT_Sfnt_Table_Info(face, 0, nullptr, &tableCount);
+ if (error) {
+ return 0;
+ }
+
+ if (tags) {
+ for (FT_ULong tableIndex = 0; tableIndex < tableCount; ++tableIndex) {
+ FT_ULong tableTag;
+ FT_ULong tablelength;
+ error = FT_Sfnt_Table_Info(face, tableIndex, &tableTag, &tablelength);
+ if (error) {
+ return 0;
+ }
+ tags[tableIndex] = static_cast<SkFontTableTag>(tableTag);
+ }
+ }
+ return tableCount;
+}
+
+size_t SkTypeface_FreeType::onGetTableData(SkFontTableTag tag, size_t offset,
+ size_t length, void* data) const
+{
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ if (!face) {
+ return 0;
+ }
+
+ FT_ULong tableLength = 0;
+ FT_Error error;
+
+ // When 'length' is 0 it is overwritten with the full table length; 'offset' is ignored.
+ error = FT_Load_Sfnt_Table(face, tag, 0, nullptr, &tableLength);
+ if (error) {
+ return 0;
+ }
+
+ if (offset > tableLength) {
+ return 0;
+ }
+ FT_ULong size = std::min((FT_ULong)length, tableLength - (FT_ULong)offset);
+ if (data) {
+ error = FT_Load_Sfnt_Table(face, tag, offset, reinterpret_cast<FT_Byte*>(data), &size);
+ if (error) {
+ return 0;
+ }
+ }
+
+ return size;
+}
+
+sk_sp<SkData> SkTypeface_FreeType::onCopyTableData(SkFontTableTag tag) const {
+ AutoFTAccess fta(this);
+ FT_Face face = fta.face();
+ if (!face) {
+ return nullptr;
+ }
+
+ FT_ULong tableLength = 0;
+ FT_Error error;
+
+ // When 'length' is 0 it is overwritten with the full table length; 'offset' is ignored.
+ error = FT_Load_Sfnt_Table(face, tag, 0, nullptr, &tableLength);
+ if (error) {
+ return nullptr;
+ }
+
+ sk_sp<SkData> data = SkData::MakeUninitialized(tableLength);
+ if (data) {
+ error = FT_Load_Sfnt_Table(face, tag, 0,
+ reinterpret_cast<FT_Byte*>(data->writable_data()), &tableLength);
+ if (error) {
+ data.reset();
+ }
+ }
+ return data;
+}
+
+SkTypeface_FreeType::FaceRec* SkTypeface_FreeType::getFaceRec() const {
+ f_t_mutex().assertHeld();
+ fFTFaceOnce([this]{ fFaceRec = SkTypeface_FreeType::FaceRec::Make(this); });
+ return fFaceRec.get();
+}
+
+std::unique_ptr<SkFontData> SkTypeface_FreeType::makeFontData() const {
+ return this->onMakeFontData();
+}
+
+void SkTypeface_FreeType::FontDataPaletteToDescriptorPalette(const SkFontData& fontData,
+ SkFontDescriptor* desc) {
+ desc->setPaletteIndex(fontData.getPaletteIndex());
+ int paletteOverrideCount = fontData.getPaletteOverrideCount();
+ auto overrides = desc->setPaletteEntryOverrides(paletteOverrideCount);
+ for (int i = 0; i < paletteOverrideCount; ++i) {
+ overrides[i] = fontData.getPaletteOverrides()[i];
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+SkTypeface_FreeType::Scanner::Scanner() : fLibrary(nullptr) {
+ if (FT_New_Library(&gFTMemory, &fLibrary)) {
+ return;
+ }
+ FT_Add_Default_Modules(fLibrary);
+ FT_Set_Default_Properties(fLibrary);
+}
+SkTypeface_FreeType::Scanner::~Scanner() {
+ if (fLibrary) {
+ FT_Done_Library(fLibrary);
+ }
+}
+
+FT_Face SkTypeface_FreeType::Scanner::openFace(SkStreamAsset* stream, int ttcIndex,
+ FT_Stream ftStream) const
+{
+ if (fLibrary == nullptr || stream == nullptr) {
+ return nullptr;
+ }
+
+ FT_Open_Args args;
+ memset(&args, 0, sizeof(args));
+
+ const void* memoryBase = stream->getMemoryBase();
+
+ if (memoryBase) {
+ args.flags = FT_OPEN_MEMORY;
+ args.memory_base = (const FT_Byte*)memoryBase;
+ args.memory_size = stream->getLength();
+ } else {
+ memset(ftStream, 0, sizeof(*ftStream));
+ ftStream->size = stream->getLength();
+ ftStream->descriptor.pointer = stream;
+ ftStream->read = sk_ft_stream_io;
+ ftStream->close = sk_ft_stream_close;
+
+ args.flags = FT_OPEN_STREAM;
+ args.stream = ftStream;
+ }
+
+ FT_Face face;
+ if (FT_Open_Face(fLibrary, &args, ttcIndex, &face)) {
+ return nullptr;
+ }
+ return face;
+}
+
+bool SkTypeface_FreeType::Scanner::recognizedFont(SkStreamAsset* stream, int* numFaces) const {
+ SkAutoMutexExclusive libraryLock(fLibraryMutex);
+
+ FT_StreamRec streamRec;
+ SkUniqueFTFace face(this->openFace(stream, -1, &streamRec));
+ if (!face) {
+ return false;
+ }
+
+ *numFaces = face->num_faces;
+ return true;
+}
+
+bool SkTypeface_FreeType::Scanner::scanFont(
+ SkStreamAsset* stream, int ttcIndex,
+ SkString* name, SkFontStyle* style, bool* isFixedPitch, AxisDefinitions* axes) const
+{
+ SkAutoMutexExclusive libraryLock(fLibraryMutex);
+
+ FT_StreamRec streamRec;
+ SkUniqueFTFace face(this->openFace(stream, ttcIndex, &streamRec));
+ if (!face) {
+ return false;
+ }
+
+ int weight = SkFontStyle::kNormal_Weight;
+ int width = SkFontStyle::kNormal_Width;
+ SkFontStyle::Slant slant = SkFontStyle::kUpright_Slant;
+ if (face->style_flags & FT_STYLE_FLAG_BOLD) {
+ weight = SkFontStyle::kBold_Weight;
+ }
+ if (face->style_flags & FT_STYLE_FLAG_ITALIC) {
+ slant = SkFontStyle::kItalic_Slant;
+ }
+
+ bool hasAxes = face->face_flags & FT_FACE_FLAG_MULTIPLE_MASTERS;
+ TT_OS2* os2 = static_cast<TT_OS2*>(FT_Get_Sfnt_Table(face.get(), ft_sfnt_os2));
+ bool hasOs2 = os2 && os2->version != 0xffff;
+
+ PS_FontInfoRec psFontInfo;
+
+ if (hasOs2) {
+ weight = os2->usWeightClass;
+ width = os2->usWidthClass;
+
+ // OS/2::fsSelection bit 9 indicates oblique.
+ if (SkToBool(os2->fsSelection & (1u << 9))) {
+ slant = SkFontStyle::kOblique_Slant;
+ }
+ }
+
+ // Let variable axes override properties from the OS/2 table.
+ if (hasAxes) {
+ AxisDefinitions axisDefinitions;
+ if (GetAxes(face.get(), &axisDefinitions)) {
+ size_t numAxes = axisDefinitions.size();
+ static constexpr SkFourByteTag wghtTag = SkSetFourByteTag('w', 'g', 'h', 't');
+ static constexpr SkFourByteTag wdthTag = SkSetFourByteTag('w', 'd', 't', 'h');
+ static constexpr SkFourByteTag slntTag = SkSetFourByteTag('s', 'l', 'n', 't');
+ std::optional<size_t> wghtIndex;
+ std::optional<size_t> wdthIndex;
+ std::optional<size_t> slntIndex;
+ for(size_t i = 0; i < numAxes; ++i) {
+ if (axisDefinitions[i].fTag == wghtTag) {
+ // Rough validity check, is there sufficient spread and are ranges
+ // within 0-1000.
+ int wghtRange = SkFixedToScalar(axisDefinitions[i].fMaximum) -
+ SkFixedToScalar(axisDefinitions[i].fMinimum);
+ if (wghtRange > 5 && wghtRange <= 1000 &&
+ SkFixedToScalar(axisDefinitions[i].fMaximum) <= 1000) {
+ wghtIndex = i;
+ }
+ }
+ if (axisDefinitions[i].fTag == wdthTag) {
+ // Rough validity check, is there a spread and are ranges within
+ // 0-500.
+ int widthRange = SkFixedToScalar(axisDefinitions[i].fMaximum) -
+ SkFixedToScalar(axisDefinitions[i].fMinimum);
+ if (widthRange > 0 && widthRange <= 500 &&
+ SkFixedToScalar(axisDefinitions[i].fMaximum) <= 500)
+ wdthIndex = i;
+ }
+ if (axisDefinitions[i].fTag == slntTag)
+ slntIndex = i;
+ }
+ AutoSTMalloc<4, FT_Fixed> coords(numAxes);
+ if ((wghtIndex || wdthIndex || slntIndex) &&
+ !FT_Get_Var_Design_Coordinates(face.get(), numAxes, coords.get())) {
+ if (wghtIndex) {
+ SkASSERT(*wghtIndex < numAxes);
+ weight = SkScalarRoundToInt(SkFixedToScalar(coords[*wghtIndex]));
+ }
+ if (wdthIndex) {
+ SkASSERT(*wdthIndex < numAxes);
+ SkScalar wdthValue = SkFixedToScalar(coords[*wdthIndex]);
+ width = SkFontDescriptor::SkFontStyleWidthForWidthAxisValue(wdthValue);
+ }
+ if (slntIndex) {
+ SkASSERT(*slntIndex < numAxes);
+ // https://docs.microsoft.com/en-us/typography/opentype/spec/dvaraxistag_slnt
+ // "Scale interpretation: Values can be interpreted as the angle,
+ // in counter-clockwise degrees, of oblique slant from whatever
+ // the designer considers to be upright for that font design."
+ if (SkFixedToScalar(coords[*slntIndex]) < 0) {
+ slant = SkFontStyle::kOblique_Slant;
+ }
+ }
+ }
+ }
+ }
+
+ if (!hasOs2 && !hasAxes && 0 == FT_Get_PS_Font_Info(face.get(), &psFontInfo) && psFontInfo.weight) {
+ static const struct {
+ char const * const name;
+ int const weight;
+ } commonWeights [] = {
+ // There are probably more common names, but these are known to exist.
+ { "all", SkFontStyle::kNormal_Weight }, // Multiple Masters usually default to normal.
+ { "black", SkFontStyle::kBlack_Weight },
+ { "bold", SkFontStyle::kBold_Weight },
+ { "book", (SkFontStyle::kNormal_Weight + SkFontStyle::kLight_Weight)/2 },
+ { "demi", SkFontStyle::kSemiBold_Weight },
+ { "demibold", SkFontStyle::kSemiBold_Weight },
+ { "extra", SkFontStyle::kExtraBold_Weight },
+ { "extrabold", SkFontStyle::kExtraBold_Weight },
+ { "extralight", SkFontStyle::kExtraLight_Weight },
+ { "hairline", SkFontStyle::kThin_Weight },
+ { "heavy", SkFontStyle::kBlack_Weight },
+ { "light", SkFontStyle::kLight_Weight },
+ { "medium", SkFontStyle::kMedium_Weight },
+ { "normal", SkFontStyle::kNormal_Weight },
+ { "plain", SkFontStyle::kNormal_Weight },
+ { "regular", SkFontStyle::kNormal_Weight },
+ { "roman", SkFontStyle::kNormal_Weight },
+ { "semibold", SkFontStyle::kSemiBold_Weight },
+ { "standard", SkFontStyle::kNormal_Weight },
+ { "thin", SkFontStyle::kThin_Weight },
+ { "ultra", SkFontStyle::kExtraBold_Weight },
+ { "ultrablack", SkFontStyle::kExtraBlack_Weight },
+ { "ultrabold", SkFontStyle::kExtraBold_Weight },
+ { "ultraheavy", SkFontStyle::kExtraBlack_Weight },
+ { "ultralight", SkFontStyle::kExtraLight_Weight },
+ };
+ int const index = SkStrLCSearch(&commonWeights[0].name, std::size(commonWeights),
+ psFontInfo.weight, sizeof(commonWeights[0]));
+ if (index >= 0) {
+ weight = commonWeights[index].weight;
+ } else {
+ LOG_INFO("Do not know weight for: %s (%s) \n", face->family_name, psFontInfo.weight);
+ }
+ }
+
+ if (name != nullptr) {
+ name->set(face->family_name);
+ }
+ if (style != nullptr) {
+ *style = SkFontStyle(weight, width, slant);
+ }
+ if (isFixedPitch != nullptr) {
+ *isFixedPitch = FT_IS_FIXED_WIDTH(face);
+ }
+
+ if (axes != nullptr && !GetAxes(face.get(), axes)) {
+ return false;
+ }
+ return true;
+}
+
+bool SkTypeface_FreeType::Scanner::GetAxes(FT_Face face, AxisDefinitions* axes) {
+ SkASSERT(face && axes);
+ if (face->face_flags & FT_FACE_FLAG_MULTIPLE_MASTERS) {
+ FT_MM_Var* variations = nullptr;
+ FT_Error err = FT_Get_MM_Var(face, &variations);
+ if (err) {
+ LOG_INFO("INFO: font %s claims to have variations, but none found.\n",
+ face->family_name);
+ return false;
+ }
+ UniqueVoidPtr autoFreeVariations(variations);
+
+ axes->reset(variations->num_axis);
+ for (FT_UInt i = 0; i < variations->num_axis; ++i) {
+ const FT_Var_Axis& ftAxis = variations->axis[i];
+ (*axes)[i].fTag = ftAxis.tag;
+ (*axes)[i].fMinimum = ftAxis.minimum;
+ (*axes)[i].fDefault = ftAxis.def;
+ (*axes)[i].fMaximum = ftAxis.maximum;
+ }
+ }
+ return true;
+}
+
+/*static*/ void SkTypeface_FreeType::Scanner::computeAxisValues(
+ AxisDefinitions axisDefinitions,
+ const SkFontArguments::VariationPosition position,
+ SkFixed* axisValues,
+ const SkString& name,
+ const SkFontArguments::VariationPosition::Coordinate* current)
+{
+ for (int i = 0; i < axisDefinitions.size(); ++i) {
+ const Scanner::AxisDefinition& axisDefinition = axisDefinitions[i];
+ const SkScalar axisMin = SkFixedToScalar(axisDefinition.fMinimum);
+ const SkScalar axisMax = SkFixedToScalar(axisDefinition.fMaximum);
+
+ // Start with the default value.
+ axisValues[i] = axisDefinition.fDefault;
+
+ // Then the current value.
+ if (current) {
+ for (int j = 0; j < axisDefinitions.size(); ++j) {
+ const auto& coordinate = current[j];
+ if (axisDefinition.fTag == coordinate.axis) {
+ const SkScalar axisValue = SkTPin(coordinate.value, axisMin, axisMax);
+ axisValues[i] = SkScalarToFixed(axisValue);
+ break;
+ }
+ }
+ }
+
+ // Then the requested value.
+ // The position may be over specified. If there are multiple values for a given axis,
+ // use the last one since that's what css-fonts-4 requires.
+ for (int j = position.coordinateCount; j --> 0;) {
+ const auto& coordinate = position.coordinates[j];
+ if (axisDefinition.fTag == coordinate.axis) {
+ const SkScalar axisValue = SkTPin(coordinate.value, axisMin, axisMax);
+ if (coordinate.value != axisValue) {
+ LOG_INFO("Requested font axis value out of range: "
+ "%s '%c%c%c%c' %f; pinned to %f.\n",
+ name.c_str(),
+ (axisDefinition.fTag >> 24) & 0xFF,
+ (axisDefinition.fTag >> 16) & 0xFF,
+ (axisDefinition.fTag >> 8) & 0xFF,
+ (axisDefinition.fTag ) & 0xFF,
+ SkScalarToDouble(coordinate.value),
+ SkScalarToDouble(axisValue));
+ }
+ axisValues[i] = SkScalarToFixed(axisValue);
+ break;
+ }
+ }
+ // TODO: warn on defaulted axis?
+ }
+
+ SkDEBUGCODE(
+ // Check for axis specified, but not matched in font.
+ for (int i = 0; i < position.coordinateCount; ++i) {
+ SkFourByteTag skTag = position.coordinates[i].axis;
+ bool found = false;
+ for (int j = 0; j < axisDefinitions.size(); ++j) {
+ if (skTag == axisDefinitions[j].fTag) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ LOG_INFO("Requested font axis not found: %s '%c%c%c%c'\n",
+ name.c_str(),
+ (skTag >> 24) & 0xFF,
+ (skTag >> 16) & 0xFF,
+ (skTag >> 8) & 0xFF,
+ (skTag) & 0xFF);
+ }
+ }
+ )
+}
+
+
+SkTypeface_FreeTypeStream::SkTypeface_FreeTypeStream(std::unique_ptr<SkFontData> fontData,
+ const SkString familyName,
+ const SkFontStyle& style, bool isFixedPitch)
+ : SkTypeface_FreeType(style, isFixedPitch)
+ , fFamilyName(std::move(familyName))
+ , fData(std::move(fontData))
+{ }
+
+SkTypeface_FreeTypeStream::~SkTypeface_FreeTypeStream() {}
+
+void SkTypeface_FreeTypeStream::onGetFamilyName(SkString* familyName) const {
+ *familyName = fFamilyName;
+}
+
+std::unique_ptr<SkStreamAsset> SkTypeface_FreeTypeStream::onOpenStream(int* ttcIndex) const {
+ *ttcIndex = fData->getIndex();
+ return fData->getStream()->duplicate();
+}
+
+std::unique_ptr<SkFontData> SkTypeface_FreeTypeStream::onMakeFontData() const {
+ return std::make_unique<SkFontData>(*fData);
+}
+
+sk_sp<SkTypeface> SkTypeface_FreeTypeStream::onMakeClone(const SkFontArguments& args) const {
+ std::unique_ptr<SkFontData> data = this->cloneFontData(args);
+ if (!data) {
+ return nullptr;
+ }
+
+ SkString familyName;
+ this->getFamilyName(&familyName);
+
+ return sk_make_sp<SkTypeface_FreeTypeStream>(
+ std::move(data), familyName, this->fontStyle(), this->isFixedPitch());
+}
+
+void SkTypeface_FreeTypeStream::onGetFontDescriptor(SkFontDescriptor* desc, bool* serialize) const {
+ desc->setFamilyName(fFamilyName.c_str());
+ desc->setStyle(this->fontStyle());
+ desc->setFactoryId(SkTypeface_FreeType::FactoryId);
+ SkTypeface_FreeType::FontDataPaletteToDescriptorPalette(*fData, desc);
+ *serialize = true;
+}
+
+sk_sp<SkTypeface> SkTypeface_FreeType::MakeFromStream(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments& args) {
+ using Scanner = SkTypeface_FreeType::Scanner;
+ static Scanner scanner;
+ bool isFixedPitch;
+ SkFontStyle style;
+ SkString name;
+ Scanner::AxisDefinitions axisDefinitions;
+ if (!scanner.scanFont(stream.get(), args.getCollectionIndex(),
+ &name, &style, &isFixedPitch, &axisDefinitions)) {
+ return nullptr;
+ }
+
+ const SkFontArguments::VariationPosition position = args.getVariationDesignPosition();
+ AutoSTMalloc<4, SkFixed> axisValues(axisDefinitions.size());
+ Scanner::computeAxisValues(axisDefinitions, position, axisValues, name);
+
+ auto data = std::make_unique<SkFontData>(
+ std::move(stream), args.getCollectionIndex(), args.getPalette().index,
+ axisValues.get(), axisDefinitions.size(),
+ args.getPalette().overrides, args.getPalette().overrideCount);
+ return sk_make_sp<SkTypeface_FreeTypeStream>(std::move(data), name, style, isFixedPitch);
+}
diff --git a/gfx/skia/skia/src/ports/SkFontHost_FreeType_common.cpp b/gfx/skia/skia/src/ports/SkFontHost_FreeType_common.cpp
new file mode 100644
index 0000000000..4d3abe8ea7
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontHost_FreeType_common.cpp
@@ -0,0 +1,2091 @@
+/*
+ * Copyright 2006-2012 The Android Open Source Project
+ * Copyright 2012 Mozilla Foundation
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkDrawable.h"
+#include "include/core/SkGraphics.h"
+#include "include/core/SkOpenTypeSVGDecoder.h"
+#include "include/core/SkPath.h"
+#include "include/effects/SkGradientShader.h"
+#include "include/pathops/SkPathOps.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkFDot6.h"
+#include "src/core/SkSwizzlePriv.h"
+#include "src/ports/SkFontHost_FreeType_common.h"
+
+#include <algorithm>
+#include <utility>
+
+#include <ft2build.h>
+#include FT_FREETYPE_H
+#include FT_BITMAP_H
+#ifdef FT_COLOR_H
+# include FT_COLOR_H
+#endif
+#include FT_IMAGE_H
+#include FT_OUTLINE_H
+#include FT_SIZES_H
+// In the past, FT_GlyphSlot_Own_Bitmap was defined in this header file.
+#include FT_SYNTHESIS_H
+
+namespace {
+[[maybe_unused]] static inline const constexpr bool kSkShowTextBlitCoverage = false;
+}
+
+#if defined(FT_CONFIG_OPTION_SVG)
+# include FT_OTSVG_H
+#endif
+
+#ifdef TT_SUPPORT_COLRV1
+// FT_ClipBox and FT_Get_Color_Glyph_ClipBox introduced VER-2-11-0-18-g47cf8ebf4
+// FT_COLR_COMPOSITE_PLUS and renumbering introduced VER-2-11-0-21-ge40ae7569
+// FT_SIZEOF_LONG_LONG introduced VER-2-11-0-31-gffdac8d67
+// FT_PaintRadialGradient changed size and layout at VER-2-11-0-147-gd3d3ff76d
+// FT_STATIC_CAST introduced VER-2-11-0-172-g9079c5d91
+// So undefine TT_SUPPORT_COLRV1 before 2.11.1 but not if FT_STATIC_CAST is defined.
+#if (((FREETYPE_MAJOR) < 2) || \
+ ((FREETYPE_MAJOR) == 2 && (FREETYPE_MINOR) < 11) || \
+ ((FREETYPE_MAJOR) == 2 && (FREETYPE_MINOR) == 11 && (FREETYPE_PATCH) < 1)) && \
+ !defined(FT_STATIC_CAST)
+# undef TT_SUPPORT_COLRV1
+#else
+# include "src/base/SkScopeExit.h"
+#endif
+#endif
+
+// FT_OUTLINE_OVERLAP was added in FreeType 2.10.3
+#ifndef FT_OUTLINE_OVERLAP
+# define FT_OUTLINE_OVERLAP 0x40
+#endif
+
+// FT_LOAD_COLOR and the corresponding FT_Pixel_Mode::FT_PIXEL_MODE_BGRA
+// were introduced in FreeType 2.5.0.
+// The following may be removed once FreeType 2.5.0 is required to build.
+#ifndef FT_LOAD_COLOR
+# define FT_LOAD_COLOR ( 1L << 20 )
+# define FT_PIXEL_MODE_BGRA 7
+#endif
+
+#ifndef FT_LOAD_BITMAP_METRICS_ONLY
+# define FT_LOAD_BITMAP_METRICS_ONLY ( 1L << 22 )
+#endif
+
+#ifdef SK_DEBUG
+const char* SkTraceFtrGetError(int e) {
+ switch ((FT_Error)e) {
+ #undef FTERRORS_H_
+ #define FT_ERRORDEF( e, v, s ) case v: return s;
+ #define FT_ERROR_START_LIST
+ #define FT_ERROR_END_LIST
+ #include FT_ERRORS_H
+ #undef FT_ERRORDEF
+ #undef FT_ERROR_START_LIST
+ #undef FT_ERROR_END_LIST
+ default: return "";
+ }
+}
+#endif // SK_DEBUG
+
+#ifdef TT_SUPPORT_COLRV1
+bool operator==(const FT_OpaquePaint& a, const FT_OpaquePaint& b) {
+ return a.p == b.p && a.insert_root_transform == b.insert_root_transform;
+}
+
+// The stop_offset field is being upgraded to a larger representation in FreeType, and changed from
+// 2.14 to 16.16. Adjust the shift factor depending on size type.
+static_assert(sizeof(FT_Fixed) != sizeof(FT_F2Dot14));
+constexpr float kColorStopShift =
+ sizeof(FT_ColorStop::stop_offset) == sizeof(FT_F2Dot14) ? 1 << 14 : 1 << 16;
+#endif
+
+namespace {
+using SkUniqueFTSize = std::unique_ptr<FT_SizeRec, SkFunctionObject<FT_Done_Size>>;
+
+FT_Pixel_Mode compute_pixel_mode(SkMask::Format format) {
+ switch (format) {
+ case SkMask::kBW_Format:
+ return FT_PIXEL_MODE_MONO;
+ case SkMask::kA8_Format:
+ default:
+ return FT_PIXEL_MODE_GRAY;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+uint16_t packTriple(U8CPU r, U8CPU g, U8CPU b) {
+ if constexpr (kSkShowTextBlitCoverage) {
+ r = std::max(r, (U8CPU)0x40);
+ g = std::max(g, (U8CPU)0x40);
+ b = std::max(b, (U8CPU)0x40);
+ }
+ return SkPack888ToRGB16(r, g, b);
+}
+
+uint16_t grayToRGB16(U8CPU gray) {
+ if constexpr (kSkShowTextBlitCoverage) {
+ gray = std::max(gray, (U8CPU)0x40);
+ }
+ return SkPack888ToRGB16(gray, gray, gray);
+}
+
+int bittst(const uint8_t data[], int bitOffset) {
+ SkASSERT(bitOffset >= 0);
+ int lowBit = data[bitOffset >> 3] >> (~bitOffset & 7);
+ return lowBit & 1;
+}
+
+/**
+ * Copies a FT_Bitmap into an SkMask with the same dimensions.
+ *
+ * FT_PIXEL_MODE_MONO
+ * FT_PIXEL_MODE_GRAY
+ * FT_PIXEL_MODE_LCD
+ * FT_PIXEL_MODE_LCD_V
+ */
+template<bool APPLY_PREBLEND>
+void copyFT2LCD16(const FT_Bitmap& bitmap, const SkMask& mask, int lcdIsBGR,
+ const uint8_t* tableR, const uint8_t* tableG, const uint8_t* tableB)
+{
+ SkASSERT(SkMask::kLCD16_Format == mask.fFormat);
+ if (FT_PIXEL_MODE_LCD != bitmap.pixel_mode) {
+ SkASSERT(mask.fBounds.width() == static_cast<int>(bitmap.width));
+ }
+ if (FT_PIXEL_MODE_LCD_V != bitmap.pixel_mode) {
+ SkASSERT(mask.fBounds.height() == static_cast<int>(bitmap.rows));
+ }
+
+ const uint8_t* src = bitmap.buffer;
+ uint16_t* dst = reinterpret_cast<uint16_t*>(mask.fImage);
+ const size_t dstRB = mask.fRowBytes;
+
+ const int width = mask.fBounds.width();
+ const int height = mask.fBounds.height();
+
+ switch (bitmap.pixel_mode) {
+ case FT_PIXEL_MODE_MONO:
+ for (int y = height; y --> 0;) {
+ for (int x = 0; x < width; ++x) {
+ dst[x] = -bittst(src, x);
+ }
+ dst = (uint16_t*)((char*)dst + dstRB);
+ src += bitmap.pitch;
+ }
+ break;
+ case FT_PIXEL_MODE_GRAY:
+ for (int y = height; y --> 0;) {
+ for (int x = 0; x < width; ++x) {
+ dst[x] = grayToRGB16(src[x]);
+ }
+ dst = (uint16_t*)((char*)dst + dstRB);
+ src += bitmap.pitch;
+ }
+ break;
+ case FT_PIXEL_MODE_LCD:
+ SkASSERT(3 * mask.fBounds.width() == static_cast<int>(bitmap.width));
+ for (int y = height; y --> 0;) {
+ const uint8_t* triple = src;
+ if (lcdIsBGR) {
+ for (int x = 0; x < width; x++) {
+ dst[x] = packTriple(sk_apply_lut_if<APPLY_PREBLEND>(triple[2], tableR),
+ sk_apply_lut_if<APPLY_PREBLEND>(triple[1], tableG),
+ sk_apply_lut_if<APPLY_PREBLEND>(triple[0], tableB));
+ triple += 3;
+ }
+ } else {
+ for (int x = 0; x < width; x++) {
+ dst[x] = packTriple(sk_apply_lut_if<APPLY_PREBLEND>(triple[0], tableR),
+ sk_apply_lut_if<APPLY_PREBLEND>(triple[1], tableG),
+ sk_apply_lut_if<APPLY_PREBLEND>(triple[2], tableB));
+ triple += 3;
+ }
+ }
+ src += bitmap.pitch;
+ dst = (uint16_t*)((char*)dst + dstRB);
+ }
+ break;
+ case FT_PIXEL_MODE_LCD_V:
+ SkASSERT(3 * mask.fBounds.height() == static_cast<int>(bitmap.rows));
+ for (int y = height; y --> 0;) {
+ const uint8_t* srcR = src;
+ const uint8_t* srcG = srcR + bitmap.pitch;
+ const uint8_t* srcB = srcG + bitmap.pitch;
+ if (lcdIsBGR) {
+ using std::swap;
+ swap(srcR, srcB);
+ }
+ for (int x = 0; x < width; x++) {
+ dst[x] = packTriple(sk_apply_lut_if<APPLY_PREBLEND>(*srcR++, tableR),
+ sk_apply_lut_if<APPLY_PREBLEND>(*srcG++, tableG),
+ sk_apply_lut_if<APPLY_PREBLEND>(*srcB++, tableB));
+ }
+ src += 3 * bitmap.pitch;
+ dst = (uint16_t*)((char*)dst + dstRB);
+ }
+ break;
+ default:
+ SkDEBUGF("FT_Pixel_Mode %d", bitmap.pixel_mode);
+ SkDEBUGFAIL("unsupported FT_Pixel_Mode for LCD16");
+ break;
+ }
+}
+
+/**
+ * Copies a FT_Bitmap into an SkMask with the same dimensions.
+ *
+ * Yes, No, Never Requested, Never Produced
+ *
+ * kBW kA8 k3D kARGB32 kLCD16
+ * FT_PIXEL_MODE_MONO Y Y NR N Y
+ * FT_PIXEL_MODE_GRAY N Y NR N Y
+ * FT_PIXEL_MODE_GRAY2 NP NP NR NP NP
+ * FT_PIXEL_MODE_GRAY4 NP NP NR NP NP
+ * FT_PIXEL_MODE_LCD NP NP NR NP NP
+ * FT_PIXEL_MODE_LCD_V NP NP NR NP NP
+ * FT_PIXEL_MODE_BGRA N N NR Y N
+ *
+ * TODO: All of these N need to be Y or otherwise ruled out.
+ */
+void copyFTBitmap(const FT_Bitmap& srcFTBitmap, SkMask& dstMask) {
+ SkASSERTF(dstMask.fBounds.width() == static_cast<int>(srcFTBitmap.width),
+ "dstMask.fBounds.width() = %d\n"
+ "static_cast<int>(srcFTBitmap.width) = %d",
+ dstMask.fBounds.width(),
+ static_cast<int>(srcFTBitmap.width)
+ );
+ SkASSERTF(dstMask.fBounds.height() == static_cast<int>(srcFTBitmap.rows),
+ "dstMask.fBounds.height() = %d\n"
+ "static_cast<int>(srcFTBitmap.rows) = %d",
+ dstMask.fBounds.height(),
+ static_cast<int>(srcFTBitmap.rows)
+ );
+
+ const uint8_t* src = reinterpret_cast<const uint8_t*>(srcFTBitmap.buffer);
+ const FT_Pixel_Mode srcFormat = static_cast<FT_Pixel_Mode>(srcFTBitmap.pixel_mode);
+ // FT_Bitmap::pitch is an int and allowed to be negative.
+ const int srcPitch = srcFTBitmap.pitch;
+ const size_t srcRowBytes = SkTAbs(srcPitch);
+
+ uint8_t* dst = dstMask.fImage;
+ const SkMask::Format dstFormat = static_cast<SkMask::Format>(dstMask.fFormat);
+ const size_t dstRowBytes = dstMask.fRowBytes;
+
+ const size_t width = srcFTBitmap.width;
+ const size_t height = srcFTBitmap.rows;
+
+ if (SkMask::kLCD16_Format == dstFormat) {
+ copyFT2LCD16<false>(srcFTBitmap, dstMask, false, nullptr, nullptr, nullptr);
+ return;
+ }
+
+ if ((FT_PIXEL_MODE_MONO == srcFormat && SkMask::kBW_Format == dstFormat) ||
+ (FT_PIXEL_MODE_GRAY == srcFormat && SkMask::kA8_Format == dstFormat))
+ {
+ size_t commonRowBytes = std::min(srcRowBytes, dstRowBytes);
+ for (size_t y = height; y --> 0;) {
+ memcpy(dst, src, commonRowBytes);
+ src += srcPitch;
+ dst += dstRowBytes;
+ }
+ } else if (FT_PIXEL_MODE_MONO == srcFormat && SkMask::kA8_Format == dstFormat) {
+ for (size_t y = height; y --> 0;) {
+ uint8_t byte = 0;
+ int bits = 0;
+ const uint8_t* src_row = src;
+ uint8_t* dst_row = dst;
+ for (size_t x = width; x --> 0;) {
+ if (0 == bits) {
+ byte = *src_row++;
+ bits = 8;
+ }
+ *dst_row++ = byte & 0x80 ? 0xff : 0x00;
+ bits--;
+ byte <<= 1;
+ }
+ src += srcPitch;
+ dst += dstRowBytes;
+ }
+ } else if (FT_PIXEL_MODE_BGRA == srcFormat && SkMask::kARGB32_Format == dstFormat) {
+ // FT_PIXEL_MODE_BGRA is pre-multiplied.
+ for (size_t y = height; y --> 0;) {
+ const uint8_t* src_row = src;
+ SkPMColor* dst_row = reinterpret_cast<SkPMColor*>(dst);
+ for (size_t x = 0; x < width; ++x) {
+ uint8_t b = *src_row++;
+ uint8_t g = *src_row++;
+ uint8_t r = *src_row++;
+ uint8_t a = *src_row++;
+ *dst_row++ = SkPackARGB32(a, r, g, b);
+ if constexpr (kSkShowTextBlitCoverage) {
+ *(dst_row-1) = SkFourByteInterp256(*(dst_row-1), SK_ColorWHITE, 0x40);
+ }
+ }
+ src += srcPitch;
+ dst += dstRowBytes;
+ }
+ } else {
+ SkDEBUGF("FT_Pixel_Mode %d, SkMask::Format %d\n", srcFormat, dstFormat);
+ SkDEBUGFAIL("unsupported combination of FT_Pixel_Mode and SkMask::Format");
+ }
+}
+
+inline int convert_8_to_1(unsigned byte) {
+ SkASSERT(byte <= 0xFF);
+ // Arbitrary decision that making the cutoff at 1/4 instead of 1/2 in general looks better.
+ return (byte >> 6) != 0;
+}
+
+uint8_t pack_8_to_1(const uint8_t alpha[8]) {
+ unsigned bits = 0;
+ for (int i = 0; i < 8; ++i) {
+ bits <<= 1;
+ bits |= convert_8_to_1(alpha[i]);
+ }
+ return SkToU8(bits);
+}
+
+void packA8ToA1(const SkMask& mask, const uint8_t* src, size_t srcRB) {
+ const int height = mask.fBounds.height();
+ const int width = mask.fBounds.width();
+ const int octs = width >> 3;
+ const int leftOverBits = width & 7;
+
+ uint8_t* dst = mask.fImage;
+ const int dstPad = mask.fRowBytes - SkAlign8(width)/8;
+ SkASSERT(dstPad >= 0);
+
+ const int srcPad = srcRB - width;
+ SkASSERT(srcPad >= 0);
+
+ for (int y = 0; y < height; ++y) {
+ for (int i = 0; i < octs; ++i) {
+ *dst++ = pack_8_to_1(src);
+ src += 8;
+ }
+ if (leftOverBits > 0) {
+ unsigned bits = 0;
+ int shift = 7;
+ for (int i = 0; i < leftOverBits; ++i, --shift) {
+ bits |= convert_8_to_1(*src++) << shift;
+ }
+ *dst++ = bits;
+ }
+ src += srcPad;
+ dst += dstPad;
+ }
+}
+
+inline SkMask::Format SkMaskFormat_for_SkColorType(SkColorType colorType) {
+ switch (colorType) {
+ case kAlpha_8_SkColorType:
+ return SkMask::kA8_Format;
+ case kN32_SkColorType:
+ return SkMask::kARGB32_Format;
+ default:
+ SkDEBUGFAIL("unsupported SkBitmap::Config");
+ return SkMask::kA8_Format;
+ }
+}
+
+inline SkColorType SkColorType_for_FTPixelMode(FT_Pixel_Mode pixel_mode) {
+ switch (pixel_mode) {
+ case FT_PIXEL_MODE_MONO:
+ case FT_PIXEL_MODE_GRAY:
+ return kAlpha_8_SkColorType;
+ case FT_PIXEL_MODE_BGRA:
+ return kN32_SkColorType;
+ default:
+ SkDEBUGFAIL("unsupported FT_PIXEL_MODE");
+ return kAlpha_8_SkColorType;
+ }
+}
+
+inline SkColorType SkColorType_for_SkMaskFormat(SkMask::Format format) {
+ switch (format) {
+ case SkMask::kBW_Format:
+ case SkMask::kA8_Format:
+ case SkMask::kLCD16_Format:
+ return kAlpha_8_SkColorType;
+ case SkMask::kARGB32_Format:
+ return kN32_SkColorType;
+ default:
+ SkDEBUGFAIL("unsupported destination SkBitmap::Config");
+ return kAlpha_8_SkColorType;
+ }
+}
+
+// Only build COLRv1 rendering code if FreeType is new enough to have COLRv1
+// additions. FreeType defines a macro in the ftoption header to tell us whether
+// it does support these features.
+#ifdef TT_SUPPORT_COLRV1
+
+const uint16_t kForegroundColorPaletteIndex = 0xFFFF;
+
+// This linear interpolation is used for calculating a truncated color line in special edge cases.
+// This interpolation needs to be kept in sync with what the gradient shader would normally do when
+// truncating and drawing color lines. When drawing into N32 surfaces, this is expected to be true.
+// If that changes, or if we support other color spaces in CPAL tables at some point, this needs to
+// be looked at.
+SkColor lerpSkColor(SkColor c0, SkColor c1, float t) {
+ // Due to the floating point calculation in the caller, when interpolating between very narrow
+ // stops, we may get values outside the interpolation range, guard against these.
+ if (t < 0) {
+ return c0;
+ }
+ if (t > 1) {
+ return c1;
+ }
+ const auto c0_4f = Sk4f_fromL32(c0), c1_4f = Sk4f_fromL32(c1),
+ c_4f = c0_4f + (c1_4f - c0_4f) * t;
+
+ return Sk4f_toL32(c_4f);
+}
+
+enum TruncateStops {
+ TruncateStart,
+ TruncateEnd
+};
+
+// Truncate a vector of color stops at a previously computed stop position and insert at that
+// position the color interpolated between the surrounding stops.
+void truncateToStopInterpolating(SkScalar zeroRadiusStop,
+ std::vector<SkColor>& colors,
+ std::vector<SkScalar>& stops,
+ TruncateStops truncateStops) {
+ if (stops.size() <= 1u ||
+ zeroRadiusStop < stops.front() || stops.back() < zeroRadiusStop)
+ {
+ return;
+ }
+
+ size_t afterIndex = (truncateStops == TruncateStart)
+ ? std::lower_bound(stops.begin(), stops.end(), zeroRadiusStop) - stops.begin()
+ : std::upper_bound(stops.begin(), stops.end(), zeroRadiusStop) - stops.begin();
+
+ const float t = (zeroRadiusStop - stops[afterIndex - 1]) /
+ (stops[afterIndex] - stops[afterIndex - 1]);
+ SkColor lerpColor = lerpSkColor(colors[afterIndex - 1], colors[afterIndex], t);
+
+ if (truncateStops == TruncateStart) {
+ stops.erase(stops.begin(), stops.begin() + afterIndex);
+ colors.erase(colors.begin(), colors.begin() + afterIndex);
+ stops.insert(stops.begin(), 0);
+ colors.insert(colors.begin(), lerpColor);
+ } else {
+ stops.erase(stops.begin() + afterIndex, stops.end());
+ colors.erase(colors.begin() + afterIndex, colors.end());
+ stops.insert(stops.end(), 1);
+ colors.insert(colors.end(), lerpColor);
+ }
+}
+
+struct OpaquePaintHasher {
+ size_t operator()(const FT_OpaquePaint& opaquePaint) {
+ return SkGoodHash()(opaquePaint.p) ^
+ SkGoodHash()(opaquePaint.insert_root_transform);
+ }
+};
+
+using VisitedSet = SkTHashSet<FT_OpaquePaint, OpaquePaintHasher>;
+
+bool generateFacePathCOLRv1(FT_Face face, SkGlyphID glyphID, SkPath* path);
+
+inline float SkColrV1AlphaToFloat(uint16_t alpha) { return (alpha / float(1 << 14)); }
+
+
+inline SkTileMode ToSkTileMode(FT_PaintExtend extendMode) {
+ switch (extendMode) {
+ case FT_COLR_PAINT_EXTEND_REPEAT:
+ return SkTileMode::kRepeat;
+ case FT_COLR_PAINT_EXTEND_REFLECT:
+ return SkTileMode::kMirror;
+ default:
+ return SkTileMode::kClamp;
+ }
+}
+
+inline SkBlendMode ToSkBlendMode(FT_Composite_Mode compositeMode) {
+ switch (compositeMode) {
+ case FT_COLR_COMPOSITE_CLEAR:
+ return SkBlendMode::kClear;
+ case FT_COLR_COMPOSITE_SRC:
+ return SkBlendMode::kSrc;
+ case FT_COLR_COMPOSITE_DEST:
+ return SkBlendMode::kDst;
+ case FT_COLR_COMPOSITE_SRC_OVER:
+ return SkBlendMode::kSrcOver;
+ case FT_COLR_COMPOSITE_DEST_OVER:
+ return SkBlendMode::kDstOver;
+ case FT_COLR_COMPOSITE_SRC_IN:
+ return SkBlendMode::kSrcIn;
+ case FT_COLR_COMPOSITE_DEST_IN:
+ return SkBlendMode::kDstIn;
+ case FT_COLR_COMPOSITE_SRC_OUT:
+ return SkBlendMode::kSrcOut;
+ case FT_COLR_COMPOSITE_DEST_OUT:
+ return SkBlendMode::kDstOut;
+ case FT_COLR_COMPOSITE_SRC_ATOP:
+ return SkBlendMode::kSrcATop;
+ case FT_COLR_COMPOSITE_DEST_ATOP:
+ return SkBlendMode::kDstATop;
+ case FT_COLR_COMPOSITE_XOR:
+ return SkBlendMode::kXor;
+ case FT_COLR_COMPOSITE_PLUS:
+ return SkBlendMode::kPlus;
+ case FT_COLR_COMPOSITE_SCREEN:
+ return SkBlendMode::kScreen;
+ case FT_COLR_COMPOSITE_OVERLAY:
+ return SkBlendMode::kOverlay;
+ case FT_COLR_COMPOSITE_DARKEN:
+ return SkBlendMode::kDarken;
+ case FT_COLR_COMPOSITE_LIGHTEN:
+ return SkBlendMode::kLighten;
+ case FT_COLR_COMPOSITE_COLOR_DODGE:
+ return SkBlendMode::kColorDodge;
+ case FT_COLR_COMPOSITE_COLOR_BURN:
+ return SkBlendMode::kColorBurn;
+ case FT_COLR_COMPOSITE_HARD_LIGHT:
+ return SkBlendMode::kHardLight;
+ case FT_COLR_COMPOSITE_SOFT_LIGHT:
+ return SkBlendMode::kSoftLight;
+ case FT_COLR_COMPOSITE_DIFFERENCE:
+ return SkBlendMode::kDifference;
+ case FT_COLR_COMPOSITE_EXCLUSION:
+ return SkBlendMode::kExclusion;
+ case FT_COLR_COMPOSITE_MULTIPLY:
+ return SkBlendMode::kMultiply;
+ case FT_COLR_COMPOSITE_HSL_HUE:
+ return SkBlendMode::kHue;
+ case FT_COLR_COMPOSITE_HSL_SATURATION:
+ return SkBlendMode::kSaturation;
+ case FT_COLR_COMPOSITE_HSL_COLOR:
+ return SkBlendMode::kColor;
+ case FT_COLR_COMPOSITE_HSL_LUMINOSITY:
+ return SkBlendMode::kLuminosity;
+ default:
+ return SkBlendMode::kDst;
+ }
+}
+
+inline SkMatrix ToSkMatrix(FT_Affine23 affine23) {
+ // Convert from FreeType's FT_Affine23 column major order to SkMatrix row-major order.
+ return SkMatrix::MakeAll(
+ SkFixedToScalar(affine23.xx), -SkFixedToScalar(affine23.xy), SkFixedToScalar(affine23.dx),
+ -SkFixedToScalar(affine23.yx), SkFixedToScalar(affine23.yy), -SkFixedToScalar(affine23.dy),
+ 0, 0, 1);
+}
+
+inline SkPoint SkVectorProjection(SkPoint a, SkPoint b) {
+ SkScalar length = b.length();
+ if (!length) {
+ return SkPoint();
+ }
+ SkPoint bNormalized = b;
+ bNormalized.normalize();
+ bNormalized.scale(SkPoint::DotProduct(a, b) / length);
+ return bNormalized;
+}
+
+bool colrv1_configure_skpaint(FT_Face face,
+ const SkSpan<SkColor>& palette,
+ const SkColor foregroundColor,
+ const FT_COLR_Paint& colrPaint,
+ SkPaint* paint) {
+ auto fetchColorStops = [&face, &palette, &foregroundColor](
+ const FT_ColorStopIterator& colorStopIterator,
+ std::vector<SkScalar>& stops,
+ std::vector<SkColor>& colors) -> bool {
+ const FT_UInt colorStopCount = colorStopIterator.num_color_stops;
+ if (colorStopCount == 0) {
+ return false;
+ }
+
+ // 5.7.11.2.4 ColorIndex, ColorStop and ColorLine
+ // "Applications shall apply the colorStops in increasing stopOffset order."
+ struct ColorStop {
+ SkScalar pos;
+ SkColor color;
+ };
+ std::vector<ColorStop> colorStopsSorted;
+ colorStopsSorted.resize(colorStopCount);
+
+ FT_ColorStop color_stop;
+ FT_ColorStopIterator mutable_color_stop_iterator = colorStopIterator;
+ while (FT_Get_Colorline_Stops(face, &color_stop, &mutable_color_stop_iterator)) {
+ FT_UInt index = mutable_color_stop_iterator.current_color_stop - 1;
+ colorStopsSorted[index].pos = color_stop.stop_offset / kColorStopShift;
+ FT_UInt16& palette_index = color_stop.color.palette_index;
+ if (palette_index == kForegroundColorPaletteIndex) {
+ U8CPU newAlpha = SkColorGetA(foregroundColor) *
+ SkColrV1AlphaToFloat(color_stop.color.alpha);
+ colorStopsSorted[index].color = SkColorSetA(foregroundColor, newAlpha);
+ } else if (palette_index >= palette.size()) {
+ return false;
+ } else {
+ U8CPU newAlpha = SkColorGetA(palette[palette_index]) *
+ SkColrV1AlphaToFloat(color_stop.color.alpha);
+ colorStopsSorted[index].color = SkColorSetA(palette[palette_index], newAlpha);
+ }
+ }
+
+ std::stable_sort(colorStopsSorted.begin(), colorStopsSorted.end(),
+ [](const ColorStop& a, const ColorStop& b) { return a.pos < b.pos; });
+
+ stops.resize(colorStopCount);
+ colors.resize(colorStopCount);
+ for (size_t i = 0; i < colorStopCount; ++i) {
+ stops[i] = colorStopsSorted[i].pos;
+ colors[i] = colorStopsSorted[i].color;
+ }
+ return true;
+ };
+
+ switch (colrPaint.format) {
+ case FT_COLR_PAINTFORMAT_SOLID: {
+ FT_PaintSolid solid = colrPaint.u.solid;
+
+ // Dont' draw anything with this color if the palette index is out of bounds.
+ SkColor color = SK_ColorTRANSPARENT;
+ if (solid.color.palette_index == kForegroundColorPaletteIndex) {
+ U8CPU newAlpha = SkColorGetA(foregroundColor) *
+ SkColrV1AlphaToFloat(solid.color.alpha);
+ color = SkColorSetA(foregroundColor, newAlpha);
+ } else if (solid.color.palette_index >= palette.size()) {
+ return false;
+ } else {
+ U8CPU newAlpha = SkColorGetA(palette[solid.color.palette_index]) *
+ SkColrV1AlphaToFloat(solid.color.alpha);
+ color = SkColorSetA(palette[solid.color.palette_index], newAlpha);
+ }
+ paint->setShader(nullptr);
+ paint->setColor(color);
+ return true;
+ }
+ case FT_COLR_PAINTFORMAT_LINEAR_GRADIENT: {
+ const FT_PaintLinearGradient& linearGradient = colrPaint.u.linear_gradient;
+ std::vector<SkScalar> stops;
+ std::vector<SkColor> colors;
+
+ if (!fetchColorStops(linearGradient.colorline.color_stop_iterator, stops, colors)) {
+ return false;
+ }
+
+ if (stops.size() == 1) {
+ paint->setColor(colors[0]);
+ return true;
+ }
+
+ SkPoint linePositions[2] = {SkPoint::Make( SkFixedToScalar(linearGradient.p0.x),
+ -SkFixedToScalar(linearGradient.p0.y)),
+ SkPoint::Make( SkFixedToScalar(linearGradient.p1.x),
+ -SkFixedToScalar(linearGradient.p1.y))};
+ SkPoint p0 = linePositions[0];
+ SkPoint p1 = linePositions[1];
+ SkPoint p2 = SkPoint::Make( SkFixedToScalar(linearGradient.p2.x),
+ -SkFixedToScalar(linearGradient.p2.y));
+
+ // If p0p1 or p0p2 are degenerate probably nothing should be drawn.
+ // If p0p1 and p0p2 are parallel then one side is the first color and the other side is
+ // the last color, depending on the direction.
+ // For now, just use the first color.
+ if (p1 == p0 || p2 == p0 || !SkPoint::CrossProduct(p1 - p0, p2 - p0)) {
+ paint->setColor(colors[0]);
+ return true;
+ }
+
+ // Follow implementation note in nanoemoji:
+ // https://github.com/googlefonts/nanoemoji/blob/0ac6e7bb4d8202db692574d8530a9b643f1b3b3c/src/nanoemoji/svg.py#L188
+ // to compute a new gradient end point P3 as the orthogonal
+ // projection of the vector from p0 to p1 onto a line perpendicular
+ // to line p0p2 and passing through p0.
+ SkVector perpendicularToP2P0 = (p2 - p0);
+ perpendicularToP2P0 = SkPoint::Make( perpendicularToP2P0.y(),
+ -perpendicularToP2P0.x());
+ SkVector p3 = p0 + SkVectorProjection((p1 - p0), perpendicularToP2P0);
+ linePositions[1] = p3;
+
+ // Project/scale points according to stop extrema along p0p3 line,
+ // p3 being the result of the projection above, then scale stops to
+ // to [0, 1] range so that repeat modes work. The Skia linear
+ // gradient shader performs the repeat modes over the 0 to 1 range,
+ // that's why we need to scale the stops to within that range.
+ SkTileMode tileMode = ToSkTileMode(linearGradient.colorline.extend);
+ SkScalar colorStopRange = stops.back() - stops.front();
+ // If the color stops are all at the same offset position, repeat and reflect modes
+ // become meaningless.
+ if (colorStopRange == 0.f) {
+ if (tileMode != SkTileMode::kClamp) {
+ paint->setColor(SK_ColorTRANSPARENT);
+ return true;
+ } else {
+ // Insert duplicated fake color stop in pad case at +1.0f to enable the projection
+ // of circles for an originally 0-length color stop range. Adding this stop will
+ // paint the equivalent gradient, because: All font specified color stops are in the
+ // same spot, mode is pad, so everything before this spot is painted with the first
+ // color, everything after this spot is painted with the last color. Not adding this
+ // stop will skip the projection and result in specifying non-normalized color stops
+ // to the shader.
+ stops.push_back(stops.back() + 1.0f);
+ colors.push_back(colors.back());
+ colorStopRange = 1.0f;
+ }
+ }
+ SkASSERT(colorStopRange != 0.f);
+
+ // If the colorStopRange is 0 at this point, the default behavior of the shader is to
+ // clamp to 1 color stops that are above 1, clamp to 0 for color stops that are below 0,
+ // and repeat the outer color stops at 0 and 1 if the color stops are inside the
+ // range. That will result in the correct rendering.
+ if ((colorStopRange != 1 || stops.front() != 0.f)) {
+ SkVector p0p3 = p3 - p0;
+ SkVector p0Offset = p0p3;
+ p0Offset.scale(stops.front());
+ SkVector p1Offset = p0p3;
+ p1Offset.scale(stops.back());
+
+ linePositions[0] = p0 + p0Offset;
+ linePositions[1] = p0 + p1Offset;
+
+ SkScalar scaleFactor = 1 / colorStopRange;
+ SkScalar startOffset = stops.front();
+ for (SkScalar& stop : stops) {
+ stop = (stop - startOffset) * scaleFactor;
+ }
+ }
+
+ sk_sp<SkShader> shader(SkGradientShader::MakeLinear(
+ linePositions,
+ colors.data(), stops.data(), stops.size(),
+ tileMode));
+ SkASSERT(shader);
+ // An opaque color is needed to ensure the gradient is not modulated by alpha.
+ paint->setColor(SK_ColorBLACK);
+ paint->setShader(shader);
+ return true;
+ }
+ case FT_COLR_PAINTFORMAT_RADIAL_GRADIENT: {
+ const FT_PaintRadialGradient& radialGradient = colrPaint.u.radial_gradient;
+ SkPoint start = SkPoint::Make( SkFixedToScalar(radialGradient.c0.x),
+ -SkFixedToScalar(radialGradient.c0.y));
+ SkScalar startRadius = SkFixedToScalar(radialGradient.r0);
+ SkPoint end = SkPoint::Make( SkFixedToScalar(radialGradient.c1.x),
+ -SkFixedToScalar(radialGradient.c1.y));
+ SkScalar endRadius = SkFixedToScalar(radialGradient.r1);
+
+
+ std::vector<SkScalar> stops;
+ std::vector<SkColor> colors;
+ if (!fetchColorStops(radialGradient.colorline.color_stop_iterator, stops, colors)) {
+ return false;
+ }
+
+ if (stops.size() == 1) {
+ paint->setColor(colors[0]);
+ return true;
+ }
+
+ SkScalar colorStopRange = stops.back() - stops.front();
+ SkTileMode tileMode = ToSkTileMode(radialGradient.colorline.extend);
+
+ if (colorStopRange == 0.f) {
+ if (tileMode != SkTileMode::kClamp) {
+ paint->setColor(SK_ColorTRANSPARENT);
+ return true;
+ } else {
+ // Insert duplicated fake color stop in pad case at +1.0f to enable the projection
+ // of circles for an originally 0-length color stop range. Adding this stop will
+ // paint the equivalent gradient, because: All font specified color stops are in the
+ // same spot, mode is pad, so everything before this spot is painted with the first
+ // color, everything after this spot is painted with the last color. Not adding this
+ // stop will skip the projection and result in specifying non-normalized color stops
+ // to the shader.
+ stops.push_back(stops.back() + 1.0f);
+ colors.push_back(colors.back());
+ colorStopRange = 1.0f;
+ }
+ }
+ SkASSERT(colorStopRange != 0.f);
+
+ // If the colorStopRange is 0 at this point, the default behavior of the shader is to
+ // clamp to 1 color stops that are above 1, clamp to 0 for color stops that are below 0,
+ // and repeat the outer color stops at 0 and 1 if the color stops are inside the
+ // range. That will result in the correct rendering.
+ if (colorStopRange != 1 || stops.front() != 0.f) {
+ // For the Skia two-point caonical shader to understand the
+ // COLRv1 color stops we need to scale stops to 0 to 1 range and
+ // interpolate new centers and radii. Otherwise the shader
+ // clamps stops outside the range to 0 and 1 (larger interval)
+ // or repeats the outer stops at 0 and 1 if the (smaller
+ // interval).
+ SkVector startToEnd = end - start;
+ SkScalar radiusDiff = endRadius - startRadius;
+ SkScalar scaleFactor = 1 / colorStopRange;
+ SkScalar stopsStartOffset = stops.front();
+
+ SkVector startOffset = startToEnd;
+ startOffset.scale(stops.front());
+ SkVector endOffset = startToEnd;
+ endOffset.scale(stops.back());
+
+ // The order of the following computations is important in order to avoid
+ // overwriting start or startRadius before the second reassignment.
+ end = start + endOffset;
+ start = start + startOffset;
+ endRadius = startRadius + radiusDiff * stops.back();
+ startRadius = startRadius + radiusDiff * stops.front();
+
+ for (auto& stop : stops) {
+ stop = (stop - stopsStartOffset) * scaleFactor;
+ }
+ }
+
+ // For negative radii, interpolation is needed to prepare parameters suitable
+ // for invoking the shader. Implementation below as resolution discussed in
+ // https://github.com/googlefonts/colr-gradients-spec/issues/367.
+ // Truncate to manually interpolated color for tile mode clamp, otherwise
+ // calculate positive projected circles.
+ if (startRadius < 0 || endRadius < 0) {
+ if (startRadius == endRadius && startRadius < 0) {
+ paint->setColor(SK_ColorTRANSPARENT);
+ return true;
+ }
+
+ if (tileMode == SkTileMode::kClamp) {
+ SkVector startToEnd = end - start;
+ SkScalar radiusDiff = endRadius - startRadius;
+ SkScalar zeroRadiusStop = 0.f;
+ TruncateStops truncateSide = TruncateStart;
+ if (startRadius < 0) {
+ truncateSide = TruncateStart;
+
+ // Compute color stop position where radius is = 0. After the scaling
+ // of stop positions to the normal 0,1 range that we have done above,
+ // the size of the radius as a function of the color stops is: r(x) = r0
+ // + x*(r1-r0) Solving this function for r(x) = 0, we get: x = -r0 /
+ // (r1-r0)
+ zeroRadiusStop = -startRadius / (endRadius - startRadius);
+ startRadius = 0.f;
+ SkVector startEndDiff = end - start;
+ startEndDiff.scale(zeroRadiusStop);
+ start = start + startEndDiff;
+ }
+
+ if (endRadius < 0) {
+ truncateSide = TruncateEnd;
+ zeroRadiusStop = -startRadius / (endRadius - startRadius);
+ endRadius = 0.f;
+ SkVector startEndDiff = end - start;
+ startEndDiff.scale(1 - zeroRadiusStop);
+ end = end - startEndDiff;
+ }
+
+ if (!(startRadius == 0 && endRadius == 0)) {
+ truncateToStopInterpolating(
+ zeroRadiusStop, colors, stops, truncateSide);
+ } else {
+ // If both radii have become negative and where clamped to 0, we need to
+ // produce a single color cone, otherwise the shader colors the whole
+ // plane in a single color when two radii are specified as 0.
+ if (radiusDiff > 0) {
+ end = start + startToEnd;
+ endRadius = radiusDiff;
+ colors.erase(colors.begin(), colors.end() - 1);
+ stops.erase(stops.begin(), stops.end() - 1);
+ } else {
+ start -= startToEnd;
+ startRadius = -radiusDiff;
+ colors.erase(colors.begin() + 1, colors.end());
+ stops.erase(stops.begin() + 1, stops.end());
+ }
+ }
+ } else {
+ if (startRadius < 0 || endRadius < 0) {
+ auto roundIntegerMultiple = [](SkScalar factorZeroCrossing,
+ SkTileMode tileMode) {
+ int roundedMultiple = factorZeroCrossing > 0
+ ? ceilf(factorZeroCrossing)
+ : floorf(factorZeroCrossing) - 1;
+ if (tileMode == SkTileMode::kMirror && roundedMultiple % 2 != 0) {
+ roundedMultiple += roundedMultiple < 0 ? -1 : 1;
+ }
+ return roundedMultiple;
+ };
+
+ SkVector startToEnd = end - start;
+ SkScalar radiusDiff = endRadius - startRadius;
+ SkScalar factorZeroCrossing = (startRadius / (startRadius - endRadius));
+ bool inRange = 0.f <= factorZeroCrossing && factorZeroCrossing <= 1.0f;
+ SkScalar direction = inRange && radiusDiff < 0 ? -1.0f : 1.0f;
+ SkScalar circleProjectionFactor =
+ roundIntegerMultiple(factorZeroCrossing * direction, tileMode);
+ startToEnd.scale(circleProjectionFactor);
+ startRadius += circleProjectionFactor * radiusDiff;
+ endRadius += circleProjectionFactor * radiusDiff;
+ start += startToEnd;
+ end += startToEnd;
+ }
+ }
+ }
+
+ // An opaque color is needed to ensure the gradient is not modulated by alpha.
+ paint->setColor(SK_ColorBLACK);
+
+ paint->setShader(SkGradientShader::MakeTwoPointConical(
+ start, startRadius, end, endRadius, colors.data(), stops.data(), stops.size(),
+ tileMode));
+ return true;
+ }
+ case FT_COLR_PAINTFORMAT_SWEEP_GRADIENT: {
+ const FT_PaintSweepGradient& sweepGradient = colrPaint.u.sweep_gradient;
+ SkPoint center = SkPoint::Make( SkFixedToScalar(sweepGradient.center.x),
+ -SkFixedToScalar(sweepGradient.center.y));
+
+
+ SkScalar startAngle = SkFixedToScalar(sweepGradient.start_angle * 180.0f);
+ SkScalar endAngle = SkFixedToScalar(sweepGradient.end_angle * 180.0f);
+ // OpenType 1.9.1 adds a shift to the angle to ease specification of a 0 to 360
+ // degree sweep.
+ startAngle += 180.0f;
+ endAngle += 180.0f;
+
+ std::vector<SkScalar> stops;
+ std::vector<SkColor> colors;
+ if (!fetchColorStops(sweepGradient.colorline.color_stop_iterator, stops, colors)) {
+ return false;
+ }
+
+ if (stops.size() == 1) {
+ paint->setColor(colors[0]);
+ return true;
+ }
+
+ // An opaque color is needed to ensure the gradient is not modulated by alpha.
+ paint->setColor(SK_ColorBLACK);
+
+ // New (Var)SweepGradient implementation compliant with OpenType 1.9.1 from here.
+
+ // The shader expects stops from 0 to 1, so we need to account for
+ // minimum and maximum stop positions being different from 0 and
+ // 1. We do that by scaling minimum and maximum stop positions to
+ // the 0 to 1 interval and scaling the angles inverse proportionally.
+
+ // 1) Scale angles to their equivalent positions if stops were from 0 to 1.
+
+ SkScalar sectorAngle = endAngle - startAngle;
+ SkTileMode tileMode = ToSkTileMode(sweepGradient.colorline.extend);
+ if (sectorAngle == 0 && tileMode != SkTileMode::kClamp) {
+ // "If the ColorLine's extend mode is reflect or repeat and start and end angle
+ // are equal, nothing is drawn.".
+ paint->setColor(SK_ColorTRANSPARENT);
+ return true;
+ }
+
+
+ SkScalar startAngleScaled = startAngle + sectorAngle * stops.front();
+ SkScalar endAngleScaled = startAngle + sectorAngle * stops.back();
+
+ // 2) Scale stops accordingly to 0 to 1 range.
+
+ float colorStopRange = stops.back() - stops.front();
+ bool colorStopInserted = false;
+ if (colorStopRange == 0.f) {
+ if (tileMode != SkTileMode::kClamp) {
+ paint->setColor(SK_ColorTRANSPARENT);
+ return true;
+ } else {
+ // Insert duplicated fake color stop in pad case at +1.0f to feed the shader correct
+ // values and enable painting a pad sweep gradient with two colors. Adding this stop
+ // will paint the equivalent gradient, because: All font specified color stops are
+ // in the same spot, mode is pad, so everything before this spot is painted with the
+ // first color, everything after this spot is painted with the last color. Not
+ // adding this stop will skip the projection and result in specifying non-normalized
+ // color stops to the shader.
+ stops.push_back(stops.back() + 1.0f);
+ colors.push_back(colors.back());
+ colorStopRange = 1.0f;
+ colorStopInserted = true;
+ }
+ }
+
+ SkScalar scaleFactor = 1 / colorStopRange;
+ SkScalar startOffset = stops.front();
+
+ for (SkScalar& stop : stops) {
+ stop = (stop - startOffset) * scaleFactor;
+ }
+
+ /* https://docs.microsoft.com/en-us/typography/opentype/spec/colr#sweep-gradients
+ * "The angles are expressed in counter-clockwise degrees from
+ * the direction of the positive x-axis on the design
+ * grid. [...] The color line progresses from the start angle
+ * to the end angle in the counter-clockwise direction;" -
+ * Convert angles and stops from counter-clockwise to clockwise
+ * for the shader if the gradient is not already reversed due to
+ * start angle being larger than end angle. */
+ startAngleScaled = 360.f - startAngleScaled;
+ endAngleScaled = 360.f - endAngleScaled;
+ if (startAngleScaled > endAngleScaled ||
+ (startAngleScaled == endAngleScaled && !colorStopInserted)) {
+ std::swap(startAngleScaled, endAngleScaled);
+ std::reverse(stops.begin(), stops.end());
+ std::reverse(colors.begin(), colors.end());
+ for (auto& stop : stops) {
+ stop = 1.0f - stop;
+ }
+ }
+
+ paint->setShader(SkGradientShader::MakeSweep(center.x(), center.y(),
+ colors.data(),
+ stops.data(), stops.size(),
+ tileMode,
+ startAngleScaled,
+ endAngleScaled,
+ 0, nullptr));
+ return true;
+ }
+ default: {
+ SkASSERT(false);
+ return false;
+ }
+ }
+ SkUNREACHABLE;
+}
+
+bool colrv1_draw_paint(SkCanvas* canvas,
+ const SkSpan<SkColor>& palette,
+ const SkColor foregroundColor,
+ FT_Face face,
+ const FT_COLR_Paint& colrPaint) {
+ switch (colrPaint.format) {
+ case FT_COLR_PAINTFORMAT_GLYPH: {
+ FT_UInt glyphID = colrPaint.u.glyph.glyphID;
+ SkPath path;
+ /* TODO: Currently this call retrieves the path at units_per_em size. If we want to get
+ * correct hinting for the scaled size under the transforms at this point in the color
+ * glyph graph, we need to extract at least the requested glyph width and height and
+ * pass that to the path generation. */
+ if (!generateFacePathCOLRv1(face, glyphID, &path)) {
+ return false;
+ }
+ if constexpr (kSkShowTextBlitCoverage) {
+ SkPaint highlight_paint;
+ highlight_paint.setColor(0x33FF0000);
+ canvas->drawRect(path.getBounds(), highlight_paint);
+ }
+ canvas->clipPath(path, true /* doAntiAlias */);
+ return true;
+ }
+ case FT_COLR_PAINTFORMAT_SOLID:
+ case FT_COLR_PAINTFORMAT_LINEAR_GRADIENT:
+ case FT_COLR_PAINTFORMAT_RADIAL_GRADIENT:
+ case FT_COLR_PAINTFORMAT_SWEEP_GRADIENT: {
+ SkPaint skPaint;
+ if (!colrv1_configure_skpaint(face, palette, foregroundColor, colrPaint, &skPaint)) {
+ return false;
+ }
+ canvas->drawPaint(skPaint);
+ return true;
+ }
+ case FT_COLR_PAINTFORMAT_TRANSFORM:
+ case FT_COLR_PAINTFORMAT_TRANSLATE:
+ case FT_COLR_PAINTFORMAT_SCALE:
+ case FT_COLR_PAINTFORMAT_ROTATE:
+ case FT_COLR_PAINTFORMAT_SKEW:
+ [[fallthrough]]; // Transforms handled in colrv1_transform.
+ default:
+ SkASSERT(false);
+ return false;
+ }
+ SkUNREACHABLE;
+}
+
+bool colrv1_draw_glyph_with_path(SkCanvas* canvas,
+ const SkSpan<SkColor>& palette, SkColor foregroundColor,
+ FT_Face face,
+ const FT_COLR_Paint& glyphPaint, const FT_COLR_Paint& fillPaint) {
+ SkASSERT(glyphPaint.format == FT_COLR_PAINTFORMAT_GLYPH);
+ SkASSERT(fillPaint.format == FT_COLR_PAINTFORMAT_SOLID ||
+ fillPaint.format == FT_COLR_PAINTFORMAT_LINEAR_GRADIENT ||
+ fillPaint.format == FT_COLR_PAINTFORMAT_RADIAL_GRADIENT ||
+ fillPaint.format == FT_COLR_PAINTFORMAT_SWEEP_GRADIENT);
+
+ SkPaint skiaFillPaint;
+ skiaFillPaint.setAntiAlias(true);
+ if (!colrv1_configure_skpaint(face, palette, foregroundColor, fillPaint, &skiaFillPaint)) {
+ return false;
+ }
+
+ FT_UInt glyphID = glyphPaint.u.glyph.glyphID;
+ SkPath path;
+ /* TODO: Currently this call retrieves the path at units_per_em size. If we want to get
+ * correct hinting for the scaled size under the transforms at this point in the color
+ * glyph graph, we need to extract at least the requested glyph width and height and
+ * pass that to the path generation. */
+ if (!generateFacePathCOLRv1(face, glyphID, &path)) {
+ return false;
+ }
+ if constexpr (kSkShowTextBlitCoverage) {
+ SkPaint highlightPaint;
+ highlightPaint.setColor(0x33FF0000);
+ canvas->drawRect(path.getBounds(), highlightPaint);
+ }
+ canvas->drawPath(path, skiaFillPaint);
+ return true;
+}
+
+
+/* In drawing mode, concatenates the transforms directly on SkCanvas. In
+ * bounding box calculation mode, no SkCanvas is specified, but we only want to
+ * retrieve the transform from the FreeType paint object. */
+void colrv1_transform(FT_Face face,
+ const FT_COLR_Paint& colrPaint,
+ SkCanvas* canvas,
+ SkMatrix* outTransform = nullptr) {
+ SkMatrix transform;
+
+ SkASSERT(canvas || outTransform);
+
+ switch (colrPaint.format) {
+ case FT_COLR_PAINTFORMAT_TRANSFORM: {
+ transform = ToSkMatrix(colrPaint.u.transform.affine);
+ break;
+ }
+ case FT_COLR_PAINTFORMAT_TRANSLATE: {
+ transform = SkMatrix::Translate( SkFixedToScalar(colrPaint.u.translate.dx),
+ -SkFixedToScalar(colrPaint.u.translate.dy));
+ break;
+ }
+ case FT_COLR_PAINTFORMAT_SCALE: {
+ transform.setScale( SkFixedToScalar(colrPaint.u.scale.scale_x),
+ SkFixedToScalar(colrPaint.u.scale.scale_y),
+ SkFixedToScalar(colrPaint.u.scale.center_x),
+ -SkFixedToScalar(colrPaint.u.scale.center_y));
+ break;
+ }
+ case FT_COLR_PAINTFORMAT_ROTATE: {
+ // COLRv1 angles are counter-clockwise, compare
+ // https://docs.microsoft.com/en-us/typography/opentype/spec/colr#formats-24-to-27-paintrotate-paintvarrotate-paintrotatearoundcenter-paintvarrotatearoundcenter
+ transform = SkMatrix::RotateDeg(
+ -SkFixedToScalar(colrPaint.u.rotate.angle) * 180.0f,
+ SkPoint::Make( SkFixedToScalar(colrPaint.u.rotate.center_x),
+ -SkFixedToScalar(colrPaint.u.rotate.center_y)));
+ break;
+ }
+ case FT_COLR_PAINTFORMAT_SKEW: {
+ // In the PAINTFORMAT_ROTATE implementation, SkMatrix setRotate
+ // snaps to 0 for values very close to 0. Do the same here.
+
+ SkScalar xDeg = SkFixedToScalar(colrPaint.u.skew.x_skew_angle) * 180.0f;
+ SkScalar xRad = SkDegreesToRadians(xDeg);
+ SkScalar xTan = SkScalarTan(xRad);
+ xTan = SkScalarNearlyZero(xTan) ? 0.0f : xTan;
+
+ SkScalar yDeg = SkFixedToScalar(colrPaint.u.skew.y_skew_angle) * 180.0f;
+ // Negate y_skew_angle due to Skia's y-down coordinate system to achieve
+ // counter-clockwise skew along the y-axis.
+ SkScalar yRad = SkDegreesToRadians(-yDeg);
+ SkScalar yTan = SkScalarTan(yRad);
+ yTan = SkScalarNearlyZero(yTan) ? 0.0f : yTan;
+
+ transform.setSkew(xTan, yTan,
+ SkFixedToScalar(colrPaint.u.skew.center_x),
+ -SkFixedToScalar(colrPaint.u.skew.center_y));
+ break;
+ }
+ default: {
+ SkASSERT(false); // Only transforms are handled in this function.
+ }
+ }
+ if (canvas) {
+ canvas->concat(transform);
+ }
+ if (outTransform) {
+ *outTransform = transform;
+ }
+}
+
+bool colrv1_start_glyph(SkCanvas* canvas,
+ const SkSpan<SkColor>& palette,
+ const SkColor foregroundColor,
+ FT_Face face,
+ uint16_t glyphId,
+ FT_Color_Root_Transform rootTransform,
+ VisitedSet* activePaints);
+
+bool colrv1_traverse_paint(SkCanvas* canvas,
+ const SkSpan<SkColor>& palette,
+ const SkColor foregroundColor,
+ FT_Face face,
+ FT_OpaquePaint opaquePaint,
+ VisitedSet* activePaints) {
+ // Cycle detection, see section "5.7.11.1.9 Color glyphs as a directed acyclic graph".
+ if (activePaints->contains(opaquePaint)) {
+ return false;
+ }
+
+ activePaints->add(opaquePaint);
+ SK_AT_SCOPE_EXIT(activePaints->remove(opaquePaint));
+
+ FT_COLR_Paint paint;
+ if (!FT_Get_Paint(face, opaquePaint, &paint)) {
+ return false;
+ }
+
+ SkAutoCanvasRestore autoRestore(canvas, true /* doSave */);
+ switch (paint.format) {
+ case FT_COLR_PAINTFORMAT_COLR_LAYERS: {
+ FT_LayerIterator& layerIterator = paint.u.colr_layers.layer_iterator;
+ FT_OpaquePaint layerPaint{nullptr, 1};
+ while (FT_Get_Paint_Layers(face, &layerIterator, &layerPaint)) {
+ if (!colrv1_traverse_paint(canvas, palette, foregroundColor, face,
+ layerPaint, activePaints)) {
+ return false;
+ }
+ }
+ return true;
+ }
+ case FT_COLR_PAINTFORMAT_GLYPH:
+ // Special case paint graph leaf situations to improve
+ // performance. These are situations in the graph where a GlyphPaint
+ // is followed by either a solid or a gradient fill. Here we can use
+ // drawPath() + SkPaint directly which is faster than setting a
+ // clipPath() followed by a drawPaint().
+ FT_COLR_Paint fillPaint;
+ if (!FT_Get_Paint(face, paint.u.glyph.paint, &fillPaint)) {
+ return false;
+ }
+ if (fillPaint.format == FT_COLR_PAINTFORMAT_SOLID ||
+ fillPaint.format == FT_COLR_PAINTFORMAT_LINEAR_GRADIENT ||
+ fillPaint.format == FT_COLR_PAINTFORMAT_RADIAL_GRADIENT ||
+ fillPaint.format == FT_COLR_PAINTFORMAT_SWEEP_GRADIENT)
+ {
+ return colrv1_draw_glyph_with_path(canvas, palette, foregroundColor,
+ face, paint, fillPaint);
+ }
+ if (!colrv1_draw_paint(canvas, palette, foregroundColor, face, paint)) {
+ return false;
+ }
+ return colrv1_traverse_paint(canvas, palette, foregroundColor,
+ face, paint.u.glyph.paint, activePaints);
+ case FT_COLR_PAINTFORMAT_COLR_GLYPH:
+ return colrv1_start_glyph(canvas, palette, foregroundColor,
+ face, paint.u.colr_glyph.glyphID, FT_COLOR_NO_ROOT_TRANSFORM,
+ activePaints);
+ case FT_COLR_PAINTFORMAT_TRANSFORM:
+ colrv1_transform(face, paint, canvas);
+ return colrv1_traverse_paint(canvas, palette, foregroundColor,
+ face, paint.u.transform.paint, activePaints);
+ case FT_COLR_PAINTFORMAT_TRANSLATE:
+ colrv1_transform(face, paint, canvas);
+ return colrv1_traverse_paint(canvas, palette, foregroundColor,
+ face, paint.u.translate.paint, activePaints);
+ case FT_COLR_PAINTFORMAT_SCALE:
+ colrv1_transform(face, paint, canvas);
+ return colrv1_traverse_paint(canvas, palette, foregroundColor,
+ face, paint.u.scale.paint, activePaints);
+ case FT_COLR_PAINTFORMAT_ROTATE:
+ colrv1_transform(face, paint, canvas);
+ return colrv1_traverse_paint(canvas, palette, foregroundColor,
+ face, paint.u.rotate.paint, activePaints);
+ case FT_COLR_PAINTFORMAT_SKEW:
+ colrv1_transform(face, paint, canvas);
+ return colrv1_traverse_paint(canvas, palette, foregroundColor,
+ face, paint.u.skew.paint, activePaints);
+ case FT_COLR_PAINTFORMAT_COMPOSITE: {
+ SkAutoCanvasRestore acr(canvas, false);
+ canvas->saveLayer(nullptr, nullptr);
+ if (!colrv1_traverse_paint(canvas, palette, foregroundColor,
+ face, paint.u.composite.backdrop_paint, activePaints)) {
+ return false;
+ }
+ SkPaint blendModePaint;
+ blendModePaint.setBlendMode(ToSkBlendMode(paint.u.composite.composite_mode));
+ canvas->saveLayer(nullptr, &blendModePaint);
+ return colrv1_traverse_paint(canvas, palette, foregroundColor,
+ face, paint.u.composite.source_paint, activePaints);
+ }
+ case FT_COLR_PAINTFORMAT_SOLID:
+ case FT_COLR_PAINTFORMAT_LINEAR_GRADIENT:
+ case FT_COLR_PAINTFORMAT_RADIAL_GRADIENT:
+ case FT_COLR_PAINTFORMAT_SWEEP_GRADIENT: {
+ return colrv1_draw_paint(canvas, palette, foregroundColor, face, paint);
+ }
+ default:
+ SkASSERT(false);
+ return false;
+ }
+ SkUNREACHABLE;
+}
+
+SkPath GetClipBoxPath(FT_Face face, uint16_t glyphId, bool untransformed) {
+ SkPath resultPath;
+ SkUniqueFTSize unscaledFtSize = nullptr;
+ FT_Size oldSize = face->size;
+ FT_Matrix oldTransform;
+ FT_Vector oldDelta;
+ FT_Error err = 0;
+
+ if (untransformed) {
+ unscaledFtSize.reset(
+ [face]() -> FT_Size {
+ FT_Size size;
+ FT_Error err = FT_New_Size(face, &size);
+ if (err != 0) {
+ SK_TRACEFTR(err,
+ "FT_New_Size(%s) failed in generateFacePathStaticCOLRv1.",
+ face->family_name);
+ return nullptr;
+ }
+ return size;
+ }());
+ if (!unscaledFtSize) {
+ return resultPath;
+ }
+
+ err = FT_Activate_Size(unscaledFtSize.get());
+ if (err != 0) {
+ return resultPath;
+ }
+
+ err = FT_Set_Char_Size(face, SkIntToFDot6(face->units_per_EM), 0, 0, 0);
+ if (err != 0) {
+ return resultPath;
+ }
+
+ FT_Get_Transform(face, &oldTransform, &oldDelta);
+ FT_Set_Transform(face, nullptr, nullptr);
+ }
+
+ FT_ClipBox colrGlyphClipBox;
+ if (FT_Get_Color_Glyph_ClipBox(face, glyphId, &colrGlyphClipBox)) {
+ resultPath = SkPath::Polygon({{ SkFDot6ToScalar(colrGlyphClipBox.bottom_left.x),
+ -SkFDot6ToScalar(colrGlyphClipBox.bottom_left.y)},
+ { SkFDot6ToScalar(colrGlyphClipBox.top_left.x),
+ -SkFDot6ToScalar(colrGlyphClipBox.top_left.y)},
+ { SkFDot6ToScalar(colrGlyphClipBox.top_right.x),
+ -SkFDot6ToScalar(colrGlyphClipBox.top_right.y)},
+ { SkFDot6ToScalar(colrGlyphClipBox.bottom_right.x),
+ -SkFDot6ToScalar(colrGlyphClipBox.bottom_right.y)}},
+ true);
+ }
+
+ if (untransformed) {
+ err = FT_Activate_Size(oldSize);
+ if (err != 0) {
+ return resultPath;
+ }
+ FT_Set_Transform(face, &oldTransform, &oldDelta);
+ }
+
+ return resultPath;
+}
+
+bool colrv1_start_glyph(SkCanvas* canvas,
+ const SkSpan<SkColor>& palette,
+ const SkColor foregroundColor,
+ FT_Face face,
+ uint16_t glyphId,
+ FT_Color_Root_Transform rootTransform,
+ VisitedSet* activePaints) {
+ FT_OpaquePaint opaquePaint{nullptr, 1};
+ if (!FT_Get_Color_Glyph_Paint(face, glyphId, rootTransform, &opaquePaint)) {
+ return false;
+ }
+
+ bool untransformed = rootTransform == FT_COLOR_NO_ROOT_TRANSFORM;
+ SkPath clipBoxPath = GetClipBoxPath(face, glyphId, untransformed);
+ if (!clipBoxPath.isEmpty()) {
+ canvas->clipPath(clipBoxPath, true);
+ }
+
+ if (!colrv1_traverse_paint(canvas, palette, foregroundColor,
+ face, opaquePaint, activePaints)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool colrv1_start_glyph_bounds(SkMatrix *ctm,
+ SkRect* bounds,
+ FT_Face face,
+ uint16_t glyphId,
+ FT_Color_Root_Transform rootTransform,
+ VisitedSet* activePaints);
+
+bool colrv1_traverse_paint_bounds(SkMatrix* ctm,
+ SkRect* bounds,
+ FT_Face face,
+ FT_OpaquePaint opaquePaint,
+ VisitedSet* activePaints) {
+ // Cycle detection, see section "5.7.11.1.9 Color glyphs as a directed acyclic graph".
+ if (activePaints->contains(opaquePaint)) {
+ return false;
+ }
+
+ activePaints->add(opaquePaint);
+ SK_AT_SCOPE_EXIT(activePaints->remove(opaquePaint));
+
+ FT_COLR_Paint paint;
+ if (!FT_Get_Paint(face, opaquePaint, &paint)) {
+ return false;
+ }
+
+ SkMatrix restoreMatrix = *ctm;
+ SK_AT_SCOPE_EXIT(*ctm = restoreMatrix);
+
+ switch (paint.format) {
+ case FT_COLR_PAINTFORMAT_COLR_LAYERS: {
+ FT_LayerIterator& layerIterator = paint.u.colr_layers.layer_iterator;
+ FT_OpaquePaint layerPaint{nullptr, 1};
+ while (FT_Get_Paint_Layers(face, &layerIterator, &layerPaint)) {
+ if (!colrv1_traverse_paint_bounds(ctm, bounds, face, layerPaint, activePaints)) {
+ return false;
+ }
+ }
+ return true;
+ }
+ case FT_COLR_PAINTFORMAT_GLYPH: {
+ FT_UInt glyphID = paint.u.glyph.glyphID;
+ SkPath path;
+ if (!generateFacePathCOLRv1(face, glyphID, &path)) {
+ return false;
+ }
+ path.transform(*ctm);
+ bounds->join(path.getBounds());
+ return true;
+ }
+ case FT_COLR_PAINTFORMAT_COLR_GLYPH: {
+ FT_UInt glyphID = paint.u.colr_glyph.glyphID;
+ return colrv1_start_glyph_bounds(ctm, bounds, face, glyphID, FT_COLOR_NO_ROOT_TRANSFORM,
+ activePaints);
+ }
+ case FT_COLR_PAINTFORMAT_TRANSFORM: {
+ SkMatrix transformMatrix;
+ colrv1_transform(face, paint, nullptr, &transformMatrix);
+ ctm->preConcat(transformMatrix);
+ FT_OpaquePaint& transformPaint = paint.u.transform.paint;
+ return colrv1_traverse_paint_bounds(ctm, bounds, face, transformPaint, activePaints);
+ }
+ case FT_COLR_PAINTFORMAT_TRANSLATE: {
+ SkMatrix transformMatrix;
+ colrv1_transform(face, paint, nullptr, &transformMatrix);
+ ctm->preConcat(transformMatrix);
+ FT_OpaquePaint& translatePaint = paint.u.translate.paint;
+ return colrv1_traverse_paint_bounds(ctm, bounds, face, translatePaint, activePaints);
+ }
+ case FT_COLR_PAINTFORMAT_SCALE: {
+ SkMatrix transformMatrix;
+ colrv1_transform(face, paint, nullptr, &transformMatrix);
+ ctm->preConcat(transformMatrix);
+ FT_OpaquePaint& scalePaint = paint.u.scale.paint;
+ return colrv1_traverse_paint_bounds(ctm, bounds, face, scalePaint, activePaints);
+ }
+ case FT_COLR_PAINTFORMAT_ROTATE: {
+ SkMatrix transformMatrix;
+ colrv1_transform(face, paint, nullptr, &transformMatrix);
+ ctm->preConcat(transformMatrix);
+ FT_OpaquePaint& rotatePaint = paint.u.rotate.paint;
+ return colrv1_traverse_paint_bounds(ctm, bounds, face, rotatePaint, activePaints);
+ }
+ case FT_COLR_PAINTFORMAT_SKEW: {
+ SkMatrix transformMatrix;
+ colrv1_transform(face, paint, nullptr, &transformMatrix);
+ ctm->preConcat(transformMatrix);
+ FT_OpaquePaint& skewPaint = paint.u.skew.paint;
+ return colrv1_traverse_paint_bounds(ctm, bounds, face, skewPaint, activePaints);
+ }
+ case FT_COLR_PAINTFORMAT_COMPOSITE: {
+ FT_OpaquePaint& backdropPaint = paint.u.composite.backdrop_paint;
+ FT_OpaquePaint& sourcePaint = paint.u.composite. source_paint;
+ return colrv1_traverse_paint_bounds(ctm, bounds, face, backdropPaint, activePaints) &&
+ colrv1_traverse_paint_bounds(ctm, bounds, face, sourcePaint, activePaints);
+ }
+ case FT_COLR_PAINTFORMAT_SOLID:
+ case FT_COLR_PAINTFORMAT_LINEAR_GRADIENT:
+ case FT_COLR_PAINTFORMAT_RADIAL_GRADIENT:
+ case FT_COLR_PAINTFORMAT_SWEEP_GRADIENT: {
+ return true;
+ }
+ default:
+ SkASSERT(false);
+ return false;
+ }
+ SkUNREACHABLE;
+}
+
+
+bool colrv1_start_glyph_bounds(SkMatrix *ctm,
+ SkRect* bounds,
+ FT_Face face,
+ uint16_t glyphId,
+ FT_Color_Root_Transform rootTransform,
+ VisitedSet* activePaints) {
+ FT_OpaquePaint opaquePaint{nullptr, 1};
+ return FT_Get_Color_Glyph_Paint(face, glyphId, rootTransform, &opaquePaint) &&
+ colrv1_traverse_paint_bounds(ctm, bounds, face, opaquePaint, activePaints);
+}
+#endif // TT_SUPPORT_COLRV1
+
+} // namespace
+
+
+#ifdef TT_SUPPORT_COLRV1
+bool SkScalerContext_FreeType_Base::drawCOLRv1Glyph(FT_Face face,
+ const SkGlyph& glyph,
+ uint32_t loadGlyphFlags,
+ SkSpan<SkColor> palette,
+ SkCanvas* canvas) {
+ if (this->isSubpixel()) {
+ canvas->translate(SkFixedToScalar(glyph.getSubXFixed()),
+ SkFixedToScalar(glyph.getSubYFixed()));
+ }
+
+ VisitedSet activePaints;
+ bool haveLayers = colrv1_start_glyph(canvas, palette,
+ fRec.fForegroundColor,
+ face, glyph.getGlyphID(),
+ FT_COLOR_INCLUDE_ROOT_TRANSFORM,
+ &activePaints);
+ SkASSERTF(haveLayers, "Could not get COLRv1 layers from '%s'.", face->family_name);
+ return haveLayers;
+}
+#endif // TT_SUPPORT_COLRV1
+
+#ifdef FT_COLOR_H
+bool SkScalerContext_FreeType_Base::drawCOLRv0Glyph(FT_Face face,
+ const SkGlyph& glyph,
+ uint32_t loadGlyphFlags,
+ SkSpan<SkColor> palette,
+ SkCanvas* canvas) {
+ if (this->isSubpixel()) {
+ canvas->translate(SkFixedToScalar(glyph.getSubXFixed()),
+ SkFixedToScalar(glyph.getSubYFixed()));
+ }
+
+ bool haveLayers = false;
+ FT_LayerIterator layerIterator;
+ layerIterator.p = nullptr;
+ FT_UInt layerGlyphIndex = 0;
+ FT_UInt layerColorIndex = 0;
+ SkPaint paint;
+ paint.setAntiAlias(!(loadGlyphFlags & FT_LOAD_TARGET_MONO));
+ while (FT_Get_Color_Glyph_Layer(face, glyph.getGlyphID(), &layerGlyphIndex,
+ &layerColorIndex, &layerIterator)) {
+ haveLayers = true;
+ if (layerColorIndex == 0xFFFF) {
+ paint.setColor(fRec.fForegroundColor);
+ } else {
+ paint.setColor(palette[layerColorIndex]);
+ }
+ SkPath path;
+ if (this->generateFacePath(face, layerGlyphIndex, loadGlyphFlags, &path)) {
+ canvas->drawPath(path, paint);
+ }
+ }
+ SkASSERTF(haveLayers, "Could not get COLRv0 layers from '%s'.", face->family_name);
+ return haveLayers;
+}
+#endif // FT_COLOR_H
+
+#if defined(FT_CONFIG_OPTION_SVG)
+bool SkScalerContext_FreeType_Base::drawSVGGlyph(FT_Face face,
+ const SkGlyph& glyph,
+ uint32_t loadGlyphFlags,
+ SkSpan<SkColor> palette,
+ SkCanvas* canvas) {
+ SkASSERT(face->glyph->format == FT_GLYPH_FORMAT_SVG);
+
+ FT_SVG_Document ftSvg = (FT_SVG_Document)face->glyph->other;
+ SkMatrix m;
+ FT_Matrix ftMatrix = ftSvg->transform;
+ FT_Vector ftOffset = ftSvg->delta;
+ m.setAll(
+ SkFixedToFloat(ftMatrix.xx), -SkFixedToFloat(ftMatrix.xy), SkFixedToFloat(ftOffset.x),
+ -SkFixedToFloat(ftMatrix.yx), SkFixedToFloat(ftMatrix.yy), -SkFixedToFloat(ftOffset.y),
+ 0 , 0 , 1 );
+ m.postScale(SkFixedToFloat(ftSvg->metrics.x_scale) / 64.0f,
+ SkFixedToFloat(ftSvg->metrics.y_scale) / 64.0f);
+ if (this->isSubpixel()) {
+ m.postTranslate(SkFixedToScalar(glyph.getSubXFixed()),
+ SkFixedToScalar(glyph.getSubYFixed()));
+ }
+ canvas->concat(m);
+
+ SkGraphics::OpenTypeSVGDecoderFactory svgFactory = SkGraphics::GetOpenTypeSVGDecoderFactory();
+ if (!svgFactory) {
+ return false;
+ }
+ auto svgDecoder = svgFactory(ftSvg->svg_document, ftSvg->svg_document_length);
+ if (!svgDecoder) {
+ return false;
+ }
+ return svgDecoder->render(*canvas, ftSvg->units_per_EM, glyph.getGlyphID(),
+ fRec.fForegroundColor, palette);
+}
+#endif // FT_CONFIG_OPTION_SVG
+
+void SkScalerContext_FreeType_Base::generateGlyphImage(FT_Face face,
+ const SkGlyph& glyph,
+ const SkMatrix& bitmapTransform)
+{
+ switch ( face->glyph->format ) {
+ case FT_GLYPH_FORMAT_OUTLINE: {
+ FT_Outline* outline = &face->glyph->outline;
+
+ int dx = 0, dy = 0;
+ if (this->isSubpixel()) {
+ dx = SkFixedToFDot6(glyph.getSubXFixed());
+ dy = SkFixedToFDot6(glyph.getSubYFixed());
+ // negate dy since freetype-y-goes-up and skia-y-goes-down
+ dy = -dy;
+ }
+
+ memset(glyph.fImage, 0, glyph.rowBytes() * glyph.fHeight);
+
+ if (SkMask::kLCD16_Format == glyph.fMaskFormat) {
+ const bool doBGR = SkToBool(fRec.fFlags & SkScalerContext::kLCD_BGROrder_Flag);
+ const bool doVert = SkToBool(fRec.fFlags & SkScalerContext::kLCD_Vertical_Flag);
+
+ FT_Outline_Translate(outline, dx, dy);
+ FT_Error err = FT_Render_Glyph(face->glyph, doVert ? FT_RENDER_MODE_LCD_V :
+ FT_RENDER_MODE_LCD);
+ if (err) {
+ SK_TRACEFTR(err, "Could not render glyph %p.", face->glyph);
+ return;
+ }
+
+ SkMask mask = glyph.mask();
+ if constexpr (kSkShowTextBlitCoverage) {
+ memset(mask.fImage, 0x80, mask.fBounds.height() * mask.fRowBytes);
+ }
+ FT_GlyphSlotRec& ftGlyph = *face->glyph;
+
+ if (!SkIRect::Intersects(mask.fBounds,
+ SkIRect::MakeXYWH( ftGlyph.bitmap_left,
+ -ftGlyph.bitmap_top,
+ ftGlyph.bitmap.width,
+ ftGlyph.bitmap.rows)))
+ {
+ return;
+ }
+
+ // If the FT_Bitmap extent is larger, discard bits of the bitmap outside the mask.
+ // If the SkMask extent is larger, shrink mask to fit bitmap (clearing discarded).
+ unsigned char* origBuffer = ftGlyph.bitmap.buffer;
+ // First align the top left (origin).
+ if (-ftGlyph.bitmap_top < mask.fBounds.fTop) {
+ int32_t topDiff = mask.fBounds.fTop - (-ftGlyph.bitmap_top);
+ ftGlyph.bitmap.buffer += ftGlyph.bitmap.pitch * topDiff;
+ ftGlyph.bitmap.rows -= topDiff;
+ ftGlyph.bitmap_top = -mask.fBounds.fTop;
+ }
+ if (ftGlyph.bitmap_left < mask.fBounds.fLeft) {
+ int32_t leftDiff = mask.fBounds.fLeft - ftGlyph.bitmap_left;
+ ftGlyph.bitmap.buffer += leftDiff;
+ ftGlyph.bitmap.width -= leftDiff;
+ ftGlyph.bitmap_left = mask.fBounds.fLeft;
+ }
+ if (mask.fBounds.fTop < -ftGlyph.bitmap_top) {
+ mask.fImage += mask.fRowBytes * (-ftGlyph.bitmap_top - mask.fBounds.fTop);
+ mask.fBounds.fTop = -ftGlyph.bitmap_top;
+ }
+ if (mask.fBounds.fLeft < ftGlyph.bitmap_left) {
+ mask.fImage += sizeof(uint16_t) * (ftGlyph.bitmap_left - mask.fBounds.fLeft);
+ mask.fBounds.fLeft = ftGlyph.bitmap_left;
+ }
+ // Origins aligned, clean up the width and height.
+ int ftVertScale = (doVert ? 3 : 1);
+ int ftHoriScale = (doVert ? 1 : 3);
+ if (mask.fBounds.height() * ftVertScale < SkToInt(ftGlyph.bitmap.rows)) {
+ ftGlyph.bitmap.rows = mask.fBounds.height() * ftVertScale;
+ }
+ if (mask.fBounds.width() * ftHoriScale < SkToInt(ftGlyph.bitmap.width)) {
+ ftGlyph.bitmap.width = mask.fBounds.width() * ftHoriScale;
+ }
+ if (SkToInt(ftGlyph.bitmap.rows) < mask.fBounds.height() * ftVertScale) {
+ mask.fBounds.fBottom = mask.fBounds.fTop + ftGlyph.bitmap.rows / ftVertScale;
+ }
+ if (SkToInt(ftGlyph.bitmap.width) < mask.fBounds.width() * ftHoriScale) {
+ mask.fBounds.fRight = mask.fBounds.fLeft + ftGlyph.bitmap.width / ftHoriScale;
+ }
+ if (fPreBlend.isApplicable()) {
+ copyFT2LCD16<true>(ftGlyph.bitmap, mask, doBGR,
+ fPreBlend.fR, fPreBlend.fG, fPreBlend.fB);
+ } else {
+ copyFT2LCD16<false>(ftGlyph.bitmap, mask, doBGR,
+ fPreBlend.fR, fPreBlend.fG, fPreBlend.fB);
+ }
+ // Restore the buffer pointer so FreeType can properly free it.
+ ftGlyph.bitmap.buffer = origBuffer;
+ } else {
+ FT_BBox bbox;
+ FT_Bitmap target;
+ FT_Outline_Get_CBox(outline, &bbox);
+ /*
+ what we really want to do for subpixel is
+ offset(dx, dy)
+ compute_bounds
+ offset(bbox & !63)
+ but that is two calls to offset, so we do the following, which
+ achieves the same thing with only one offset call.
+ */
+ FT_Outline_Translate(outline, dx - ((bbox.xMin + dx) & ~63),
+ dy - ((bbox.yMin + dy) & ~63));
+
+ target.width = glyph.fWidth;
+ target.rows = glyph.fHeight;
+ target.pitch = glyph.rowBytes();
+ target.buffer = reinterpret_cast<uint8_t*>(glyph.fImage);
+ target.pixel_mode = compute_pixel_mode(glyph.fMaskFormat);
+ target.num_grays = 256;
+
+ FT_Outline_Get_Bitmap(face->glyph->library, outline, &target);
+ if constexpr (kSkShowTextBlitCoverage) {
+ if (glyph.fMaskFormat == SkMask::kBW_Format) {
+ for (unsigned y = 0; y < target.rows; y += 2) {
+ for (unsigned x = (y & 0x2); x < target.width; x+=4) {
+ uint8_t& b = target.buffer[(target.pitch * y) + (x >> 3)];
+ b = b ^ (1 << (0x7 - (x & 0x7)));
+ }
+ }
+ } else {
+ for (unsigned y = 0; y < target.rows; ++y) {
+ for (unsigned x = 0; x < target.width; ++x) {
+ uint8_t& a = target.buffer[(target.pitch * y) + x];
+ a = std::max<uint8_t>(a, 0x20);
+ }
+ }
+ }
+ }
+ }
+ } break;
+
+ case FT_GLYPH_FORMAT_BITMAP: {
+ FT_Pixel_Mode pixel_mode = static_cast<FT_Pixel_Mode>(face->glyph->bitmap.pixel_mode);
+ SkMask::Format maskFormat = static_cast<SkMask::Format>(glyph.fMaskFormat);
+
+ // Assume that the other formats do not exist.
+ SkASSERT(FT_PIXEL_MODE_MONO == pixel_mode ||
+ FT_PIXEL_MODE_GRAY == pixel_mode ||
+ FT_PIXEL_MODE_BGRA == pixel_mode);
+
+ // These are the only formats this ScalerContext should request.
+ SkASSERT(SkMask::kBW_Format == maskFormat ||
+ SkMask::kA8_Format == maskFormat ||
+ SkMask::kARGB32_Format == maskFormat ||
+ SkMask::kLCD16_Format == maskFormat);
+
+ // If no scaling needed, directly copy glyph bitmap.
+ if (bitmapTransform.isIdentity()) {
+ SkMask dstMask = glyph.mask();
+ copyFTBitmap(face->glyph->bitmap, dstMask);
+ break;
+ }
+
+ // Otherwise, scale the bitmap.
+
+ // Copy the FT_Bitmap into an SkBitmap (either A8 or ARGB)
+ SkBitmap unscaledBitmap;
+ // TODO: mark this as sRGB when the blits will be sRGB.
+ unscaledBitmap.setInfo(SkImageInfo::Make(face->glyph->bitmap.width,
+ face->glyph->bitmap.rows,
+ SkColorType_for_FTPixelMode(pixel_mode),
+ kPremul_SkAlphaType));
+ if (!unscaledBitmap.tryAllocPixels()) {
+ // TODO: set the fImage to indicate "missing"
+ memset(glyph.fImage, 0, glyph.rowBytes() * glyph.fHeight);
+ return;
+ }
+
+ SkMask unscaledBitmapAlias;
+ unscaledBitmapAlias.fImage = reinterpret_cast<uint8_t*>(unscaledBitmap.getPixels());
+ unscaledBitmapAlias.fBounds.setWH(unscaledBitmap.width(), unscaledBitmap.height());
+ unscaledBitmapAlias.fRowBytes = unscaledBitmap.rowBytes();
+ unscaledBitmapAlias.fFormat = SkMaskFormat_for_SkColorType(unscaledBitmap.colorType());
+ copyFTBitmap(face->glyph->bitmap, unscaledBitmapAlias);
+
+ // Wrap the glyph's mask in a bitmap, unless the glyph's mask is BW or LCD.
+ // BW requires an A8 target for resizing, which can then be down sampled.
+ // LCD should use a 4x A8 target, which will then be down sampled.
+ // For simplicity, LCD uses A8 and is replicated.
+ int bitmapRowBytes = 0;
+ if (SkMask::kBW_Format != maskFormat && SkMask::kLCD16_Format != maskFormat) {
+ bitmapRowBytes = glyph.rowBytes();
+ }
+ SkBitmap dstBitmap;
+ // TODO: mark this as sRGB when the blits will be sRGB.
+ dstBitmap.setInfo(SkImageInfo::Make(glyph.fWidth, glyph.fHeight,
+ SkColorType_for_SkMaskFormat(maskFormat),
+ kPremul_SkAlphaType),
+ bitmapRowBytes);
+ if (SkMask::kBW_Format == maskFormat || SkMask::kLCD16_Format == maskFormat) {
+ if (!dstBitmap.tryAllocPixels()) {
+ // TODO: set the fImage to indicate "missing"
+ memset(glyph.fImage, 0, glyph.rowBytes() * glyph.fHeight);
+ return;
+ }
+ } else {
+ dstBitmap.setPixels(glyph.fImage);
+ }
+
+ // Scale unscaledBitmap into dstBitmap.
+ SkCanvas canvas(dstBitmap);
+ if constexpr (kSkShowTextBlitCoverage) {
+ canvas.clear(0x33FF0000);
+ } else {
+ canvas.clear(SK_ColorTRANSPARENT);
+ }
+ canvas.translate(-glyph.fLeft, -glyph.fTop);
+ canvas.concat(bitmapTransform);
+ canvas.translate(face->glyph->bitmap_left, -face->glyph->bitmap_top);
+
+ SkSamplingOptions sampling(SkFilterMode::kLinear, SkMipmapMode::kNearest);
+ canvas.drawImage(unscaledBitmap.asImage().get(), 0, 0, sampling, nullptr);
+
+ // If the destination is BW or LCD, convert from A8.
+ if (SkMask::kBW_Format == maskFormat) {
+ // Copy the A8 dstBitmap into the A1 glyph.fImage.
+ SkMask dstMask = glyph.mask();
+ packA8ToA1(dstMask, dstBitmap.getAddr8(0, 0), dstBitmap.rowBytes());
+ } else if (SkMask::kLCD16_Format == maskFormat) {
+ // Copy the A8 dstBitmap into the LCD16 glyph.fImage.
+ uint8_t* src = dstBitmap.getAddr8(0, 0);
+ uint16_t* dst = reinterpret_cast<uint16_t*>(glyph.fImage);
+ for (int y = dstBitmap.height(); y --> 0;) {
+ for (int x = 0; x < dstBitmap.width(); ++x) {
+ dst[x] = grayToRGB16(src[x]);
+ }
+ dst = (uint16_t*)((char*)dst + glyph.rowBytes());
+ src += dstBitmap.rowBytes();
+ }
+ }
+ } break;
+
+ default:
+ SkDEBUGFAIL("unknown glyph format");
+ memset(glyph.fImage, 0, glyph.rowBytes() * glyph.fHeight);
+ return;
+ }
+
+// We used to always do this pre-USE_COLOR_LUMINANCE, but with colorlum,
+// it is optional
+#if defined(SK_GAMMA_APPLY_TO_A8)
+ if (SkMask::kA8_Format == glyph.fMaskFormat && fPreBlend.isApplicable()) {
+ uint8_t* SK_RESTRICT dst = (uint8_t*)glyph.fImage;
+ unsigned rowBytes = glyph.rowBytes();
+
+ for (int y = glyph.fHeight - 1; y >= 0; --y) {
+ for (int x = glyph.fWidth - 1; x >= 0; --x) {
+ dst[x] = fPreBlend.fG[dst[x]];
+ }
+ dst += rowBytes;
+ }
+ }
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+class SkFTGeometrySink {
+ SkPath* fPath;
+ bool fStarted;
+ FT_Vector fCurrent;
+
+ void goingTo(const FT_Vector* pt) {
+ if (!fStarted) {
+ fStarted = true;
+ fPath->moveTo(SkFDot6ToScalar(fCurrent.x), -SkFDot6ToScalar(fCurrent.y));
+ }
+ fCurrent = *pt;
+ }
+
+ bool currentIsNot(const FT_Vector* pt) {
+ return fCurrent.x != pt->x || fCurrent.y != pt->y;
+ }
+
+ static int Move(const FT_Vector* pt, void* ctx) {
+ SkFTGeometrySink& self = *(SkFTGeometrySink*)ctx;
+ if (self.fStarted) {
+ self.fPath->close();
+ self.fStarted = false;
+ }
+ self.fCurrent = *pt;
+ return 0;
+ }
+
+ static int Line(const FT_Vector* pt, void* ctx) {
+ SkFTGeometrySink& self = *(SkFTGeometrySink*)ctx;
+ if (self.currentIsNot(pt)) {
+ self.goingTo(pt);
+ self.fPath->lineTo(SkFDot6ToScalar(pt->x), -SkFDot6ToScalar(pt->y));
+ }
+ return 0;
+ }
+
+ static int Quad(const FT_Vector* pt0, const FT_Vector* pt1, void* ctx) {
+ SkFTGeometrySink& self = *(SkFTGeometrySink*)ctx;
+ if (self.currentIsNot(pt0) || self.currentIsNot(pt1)) {
+ self.goingTo(pt1);
+ self.fPath->quadTo(SkFDot6ToScalar(pt0->x), -SkFDot6ToScalar(pt0->y),
+ SkFDot6ToScalar(pt1->x), -SkFDot6ToScalar(pt1->y));
+ }
+ return 0;
+ }
+
+ static int Cubic(const FT_Vector* pt0, const FT_Vector* pt1, const FT_Vector* pt2, void* ctx) {
+ SkFTGeometrySink& self = *(SkFTGeometrySink*)ctx;
+ if (self.currentIsNot(pt0) || self.currentIsNot(pt1) || self.currentIsNot(pt2)) {
+ self.goingTo(pt2);
+ self.fPath->cubicTo(SkFDot6ToScalar(pt0->x), -SkFDot6ToScalar(pt0->y),
+ SkFDot6ToScalar(pt1->x), -SkFDot6ToScalar(pt1->y),
+ SkFDot6ToScalar(pt2->x), -SkFDot6ToScalar(pt2->y));
+ }
+ return 0;
+ }
+
+public:
+ SkFTGeometrySink(SkPath* path) : fPath{path}, fStarted{false}, fCurrent{0,0} {}
+
+ inline static constexpr const FT_Outline_Funcs Funcs{
+ /*move_to =*/ SkFTGeometrySink::Move,
+ /*line_to =*/ SkFTGeometrySink::Line,
+ /*conic_to =*/ SkFTGeometrySink::Quad,
+ /*cubic_to =*/ SkFTGeometrySink::Cubic,
+ /*shift = */ 0,
+ /*delta =*/ 0,
+ };
+};
+
+bool generateGlyphPathStatic(FT_Face face, SkPath* path) {
+ SkFTGeometrySink sink{path};
+ if (face->glyph->format != FT_GLYPH_FORMAT_OUTLINE ||
+ FT_Outline_Decompose(&face->glyph->outline, &SkFTGeometrySink::Funcs, &sink))
+ {
+ path->reset();
+ return false;
+ }
+ path->close();
+ return true;
+}
+
+bool generateFacePathStatic(FT_Face face, SkGlyphID glyphID, uint32_t loadGlyphFlags, SkPath* path){
+ loadGlyphFlags |= FT_LOAD_BITMAP_METRICS_ONLY; // Don't decode any bitmaps.
+ loadGlyphFlags |= FT_LOAD_NO_BITMAP; // Ignore embedded bitmaps.
+ loadGlyphFlags &= ~FT_LOAD_RENDER; // Don't scan convert.
+ loadGlyphFlags &= ~FT_LOAD_COLOR; // Ignore SVG.
+ if (FT_Load_Glyph(face, glyphID, loadGlyphFlags)) {
+ path->reset();
+ return false;
+ }
+ return generateGlyphPathStatic(face, path);
+}
+
+#ifdef TT_SUPPORT_COLRV1
+bool generateFacePathCOLRv1(FT_Face face, SkGlyphID glyphID, SkPath* path) {
+ uint32_t flags = 0;
+ flags |= FT_LOAD_BITMAP_METRICS_ONLY; // Don't decode any bitmaps.
+ flags |= FT_LOAD_NO_BITMAP; // Ignore embedded bitmaps.
+ flags &= ~FT_LOAD_RENDER; // Don't scan convert.
+ flags &= ~FT_LOAD_COLOR; // Ignore SVG.
+ flags |= FT_LOAD_NO_HINTING;
+ flags |= FT_LOAD_NO_AUTOHINT;
+ flags |= FT_LOAD_IGNORE_TRANSFORM;
+
+ SkUniqueFTSize unscaledFtSize([face]() -> FT_Size {
+ FT_Size size;
+ FT_Error err = FT_New_Size(face, &size);
+ if (err != 0) {
+ SK_TRACEFTR(err, "FT_New_Size(%s) failed in generateFacePathStaticCOLRv1.",
+ face->family_name);
+ return nullptr;
+ }
+ return size;
+ }());
+
+ if (!unscaledFtSize) {
+ return false;
+ }
+
+ FT_Size oldSize = face->size;
+
+ auto tryGeneratePath = [face, &unscaledFtSize, glyphID, flags, path]() {
+ FT_Error err = 0;
+
+ err = FT_Activate_Size(unscaledFtSize.get());
+ if (err != 0) {
+ return false;
+ }
+
+ err = FT_Set_Char_Size(face, SkIntToFDot6(face->units_per_EM),
+ SkIntToFDot6(face->units_per_EM), 72, 72);
+ if (err != 0) {
+ return false;
+ }
+
+ err = FT_Load_Glyph(face, glyphID, flags);
+ if (err != 0) {
+ path->reset();
+ return false;
+ }
+
+ if (!generateGlyphPathStatic(face, path)) {
+ path->reset();
+ return false;
+ }
+
+ return true;
+ };
+
+ bool pathGenerationResult = tryGeneratePath();
+
+ FT_Activate_Size(oldSize);
+
+ return pathGenerationResult;
+}
+#endif
+
+} // namespace
+
+bool SkScalerContext_FreeType_Base::generateGlyphPath(FT_Face face, SkPath* path) {
+ if (!generateGlyphPathStatic(face, path)) {
+ return false;
+ }
+ if (face->glyph->outline.flags & FT_OUTLINE_OVERLAP) {
+ Simplify(*path, path);
+ // Simplify will return an even-odd path.
+ // A stroke+fill (for fake bold) may be incorrect for even-odd.
+ // https://github.com/flutter/flutter/issues/112546
+ AsWinding(*path, path);
+ }
+ return true;
+}
+
+bool SkScalerContext_FreeType_Base::generateFacePath(FT_Face face,
+ SkGlyphID glyphID,
+ uint32_t loadGlyphFlags,
+ SkPath* path) {
+ return generateFacePathStatic(face, glyphID, loadGlyphFlags, path);
+}
+
+#ifdef TT_SUPPORT_COLRV1
+bool SkScalerContext_FreeType_Base::computeColrV1GlyphBoundingBox(FT_Face face,
+ SkGlyphID glyphID,
+ SkRect* bounds) {
+ SkMatrix ctm;
+ *bounds = SkRect::MakeEmpty();
+ VisitedSet activePaints;
+ return colrv1_start_glyph_bounds(&ctm, bounds, face, glyphID,
+ FT_COLOR_INCLUDE_ROOT_TRANSFORM, &activePaints);
+}
+#endif
diff --git a/gfx/skia/skia/src/ports/SkFontHost_FreeType_common.h b/gfx/skia/skia/src/ports/SkFontHost_FreeType_common.h
new file mode 100644
index 0000000000..ec9ba8b8aa
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontHost_FreeType_common.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2006-2012 The Android Open Source Project
+ * Copyright 2012 Mozilla Foundation
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKFONTHOST_FREETYPE_COMMON_H_
+#define SKFONTHOST_FREETYPE_COMMON_H_
+
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypeface.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkTArray.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkScalerContext.h"
+#include "src/core/SkSharedMutex.h"
+#include "src/utils/SkCharToGlyphCache.h"
+
+struct SkAdvancedTypefaceMetrics;
+class SkFontDescriptor;
+class SkFontData;
+
+// These are forward declared to avoid pimpl but also hide the FreeType implementation.
+typedef struct FT_LibraryRec_* FT_Library;
+typedef struct FT_FaceRec_* FT_Face;
+typedef struct FT_StreamRec_* FT_Stream;
+typedef signed long FT_Pos;
+typedef struct FT_BBox_ FT_BBox;
+
+
+#ifdef SK_DEBUG
+const char* SkTraceFtrGetError(int);
+#define SK_TRACEFTR(ERR, MSG, ...) \
+ SkDebugf("%s:%d:1: error: 0x%x '%s' " MSG "\n", __FILE__, __LINE__, ERR, \
+ SkTraceFtrGetError((int)(ERR)), __VA_ARGS__)
+#else
+#define SK_TRACEFTR(ERR, ...) do { sk_ignore_unused_variable(ERR); } while (false)
+#endif
+
+
+class SkScalerContext_FreeType_Base : public SkScalerContext {
+protected:
+ // See http://freetype.sourceforge.net/freetype2/docs/reference/ft2-bitmap_handling.html#FT_Bitmap_Embolden
+ // This value was chosen by eyeballing the result in Firefox and trying to match it.
+ static const FT_Pos kBitmapEmboldenStrength = 1 << 6;
+
+ SkScalerContext_FreeType_Base(sk_sp<SkTypeface> typeface, const SkScalerContextEffects& effects,
+ const SkDescriptor *desc)
+ : INHERITED(std::move(typeface), effects, desc)
+ {}
+
+ bool drawCOLRv0Glyph(FT_Face, const SkGlyph&, uint32_t loadGlyphFlags,
+ SkSpan<SkColor> palette, SkCanvas*);
+ bool drawCOLRv1Glyph(FT_Face, const SkGlyph&, uint32_t loadGlyphFlags,
+ SkSpan<SkColor> palette, SkCanvas*);
+ bool drawSVGGlyph(FT_Face, const SkGlyph&, uint32_t loadGlyphFlags,
+ SkSpan<SkColor> palette, SkCanvas*);
+ void generateGlyphImage(FT_Face, const SkGlyph&, const SkMatrix& bitmapTransform);
+ bool generateGlyphPath(FT_Face, SkPath*);
+ bool generateFacePath(FT_Face, SkGlyphID, uint32_t loadGlyphFlags, SkPath*);
+
+ /** Computes a bounding box for a COLRv1 glyph.
+ *
+ * This method may change the configured size and transforms on FT_Face. Make sure to
+ * configure size, matrix and load glyphs as needed after using this function to restore the
+ * state of FT_Face.
+ */
+ static bool computeColrV1GlyphBoundingBox(FT_Face, SkGlyphID, SkRect* bounds);
+
+ struct ScalerContextBits {
+ static const constexpr uint32_t COLRv0 = 1;
+ static const constexpr uint32_t COLRv1 = 2;
+ static const constexpr uint32_t SVG = 3;
+ };
+private:
+ using INHERITED = SkScalerContext;
+};
+
+class SkTypeface_FreeType : public SkTypeface {
+public:
+ /** For SkFontMgrs to make use of our ability to extract
+ * name and style from a stream, using FreeType's API.
+ */
+ class Scanner : ::SkNoncopyable {
+ public:
+ Scanner();
+ ~Scanner();
+ struct AxisDefinition {
+ SkFourByteTag fTag;
+ SkFixed fMinimum;
+ SkFixed fDefault;
+ SkFixed fMaximum;
+ };
+ using AxisDefinitions = SkSTArray<4, AxisDefinition, true>;
+ bool recognizedFont(SkStreamAsset* stream, int* numFonts) const;
+ bool scanFont(SkStreamAsset* stream, int ttcIndex,
+ SkString* name, SkFontStyle* style, bool* isFixedPitch,
+ AxisDefinitions* axes) const;
+ static void computeAxisValues(
+ AxisDefinitions axisDefinitions,
+ const SkFontArguments::VariationPosition position,
+ SkFixed* axisValues,
+ const SkString& name,
+ const SkFontArguments::VariationPosition::Coordinate* currentPosition = nullptr);
+ static bool GetAxes(FT_Face face, AxisDefinitions* axes);
+ private:
+ FT_Face openFace(SkStreamAsset* stream, int ttcIndex, FT_Stream ftStream) const;
+ FT_Library fLibrary;
+ mutable SkMutex fLibraryMutex;
+ };
+
+ /** Fetch units/EM from "head" table if needed (ie for bitmap fonts) */
+ static int GetUnitsPerEm(FT_Face face);
+
+ /** Return the font data, or nullptr on failure. */
+ std::unique_ptr<SkFontData> makeFontData() const;
+ class FaceRec;
+ FaceRec* getFaceRec() const;
+
+ static constexpr SkTypeface::FactoryId FactoryId = SkSetFourByteTag('f','r','e','e');
+ static sk_sp<SkTypeface> MakeFromStream(std::unique_ptr<SkStreamAsset>, const SkFontArguments&);
+
+protected:
+ SkTypeface_FreeType(const SkFontStyle& style, bool isFixedPitch);
+ ~SkTypeface_FreeType() override;
+
+ std::unique_ptr<SkFontData> cloneFontData(const SkFontArguments&) const;
+ std::unique_ptr<SkScalerContext> onCreateScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor*) const override;
+ void onFilterRec(SkScalerContextRec*) const override;
+ void getGlyphToUnicodeMap(SkUnichar*) const override;
+ std::unique_ptr<SkAdvancedTypefaceMetrics> onGetAdvancedMetrics() const override;
+ void getPostScriptGlyphNames(SkString* dstArray) const override;
+ bool onGetPostScriptName(SkString*) const override;
+ int onGetUPEM() const override;
+ bool onGetKerningPairAdjustments(const uint16_t glyphs[], int count,
+ int32_t adjustments[]) const override;
+ void onCharsToGlyphs(const SkUnichar uni[], int count, SkGlyphID glyphs[]) const override;
+ int onCountGlyphs() const override;
+
+ LocalizedStrings* onCreateFamilyNameIterator() const override;
+
+ bool onGlyphMaskNeedsCurrentColor() const override;
+ int onGetVariationDesignPosition(SkFontArguments::VariationPosition::Coordinate coordinates[],
+ int coordinateCount) const override;
+ int onGetVariationDesignParameters(SkFontParameters::Variation::Axis parameters[],
+ int parameterCount) const override;
+ int onGetTableTags(SkFontTableTag tags[]) const override;
+ size_t onGetTableData(SkFontTableTag, size_t offset,
+ size_t length, void* data) const override;
+ sk_sp<SkData> onCopyTableData(SkFontTableTag) const override;
+
+ virtual std::unique_ptr<SkFontData> onMakeFontData() const = 0;
+ /** Utility to fill out the SkFontDescriptor palette information from the SkFontData. */
+ static void FontDataPaletteToDescriptorPalette(const SkFontData&, SkFontDescriptor*);
+
+private:
+ mutable SkOnce fFTFaceOnce;
+ mutable std::unique_ptr<FaceRec> fFaceRec;
+
+ mutable SkSharedMutex fC2GCacheMutex;
+ mutable SkCharToGlyphCache fC2GCache;
+
+ mutable SkOnce fGlyphMasksMayNeedCurrentColorOnce;
+ mutable bool fGlyphMasksMayNeedCurrentColor;
+
+ using INHERITED = SkTypeface;
+};
+
+class SkTypeface_FreeTypeStream : public SkTypeface_FreeType {
+public:
+ SkTypeface_FreeTypeStream(std::unique_ptr<SkFontData> fontData, const SkString familyName,
+ const SkFontStyle& style, bool isFixedPitch);
+ ~SkTypeface_FreeTypeStream() override;
+
+protected:
+ void onGetFamilyName(SkString* familyName) const override;
+ void onGetFontDescriptor(SkFontDescriptor*, bool* serialize) const override;
+ std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const override;
+ std::unique_ptr<SkFontData> onMakeFontData() const override;
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments&) const override;
+
+private:
+ const SkString fFamilyName;
+ const std::unique_ptr<const SkFontData> fData;
+};
+
+#endif // SKFONTHOST_FREETYPE_COMMON_H_
diff --git a/gfx/skia/skia/src/ports/SkFontHost_cairo.cpp b/gfx/skia/skia/src/ports/SkFontHost_cairo.cpp
new file mode 100644
index 0000000000..f276117c46
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontHost_cairo.cpp
@@ -0,0 +1,681 @@
+
+/*
+ * Copyright 2012 Mozilla Foundation
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/ports/SkFontHost_FreeType_common.h"
+
+#include "src/core/SkAdvancedTypefaceMetrics.h"
+#include "src/core/SkFDot6.h"
+#include "include/core/SkFontMetrics.h"
+#include "include/core/SkPath.h"
+#include "src/core/SkScalerContext.h"
+#include "src/core/SkTypefaceCache.h"
+
+#include <cmath>
+
+#include <ft2build.h>
+#include FT_FREETYPE_H
+#include FT_OUTLINE_H
+
+// for FT_GlyphSlot_Embolden
+#ifdef FT_SYNTHESIS_H
+#include FT_SYNTHESIS_H
+#endif
+
+// for FT_Library_SetLcdFilter
+#ifdef FT_LCD_FILTER_H
+#include FT_LCD_FILTER_H
+#else
+typedef enum FT_LcdFilter_
+{
+ FT_LCD_FILTER_NONE = 0,
+ FT_LCD_FILTER_DEFAULT = 1,
+ FT_LCD_FILTER_LIGHT = 2,
+ FT_LCD_FILTER_LEGACY = 16,
+} FT_LcdFilter;
+#endif
+
+// If compiling with FreeType before 2.5.0
+#ifndef FT_LOAD_COLOR
+# define FT_LOAD_COLOR ( 1L << 20 )
+# define FT_PIXEL_MODE_BGRA 7
+#endif
+
+// If compiling with FreeType before 2.12.0
+#ifndef FT_FACE_FLAG_SVG
+// We need the format tag so that we can switch on it and handle a possibly-
+// newer version of the library at runtime.
+static constexpr FT_UInt32 FT_IMAGE_TAG(FT_GLYPH_FORMAT_SVG, 'S', 'V', 'G', ' ');
+#endif
+
+#ifndef SK_CAN_USE_DLOPEN
+#define SK_CAN_USE_DLOPEN 1
+#endif
+#if SK_CAN_USE_DLOPEN
+#include <dlfcn.h>
+#endif
+
+#ifndef SK_FONTHOST_CAIRO_STANDALONE
+#define SK_FONTHOST_CAIRO_STANDALONE 1
+#endif
+
+static bool gFontHintingEnabled = true;
+static FT_Error (*gSetLcdFilter)(FT_Library, FT_LcdFilter) = nullptr;
+
+extern "C"
+{
+ void mozilla_LockFTLibrary(FT_Library aLibrary);
+ void mozilla_UnlockFTLibrary(FT_Library aLibrary);
+ void mozilla_AddRefSharedFTFace(void* aContext);
+ void mozilla_ReleaseSharedFTFace(void* aContext, void* aOwner);
+ void mozilla_ForgetSharedFTFaceLockOwner(void* aContext, void* aOwner);
+ int mozilla_LockSharedFTFace(void* aContext, void* aOwner);
+ void mozilla_UnlockSharedFTFace(void* aContext);
+ FT_Error mozilla_LoadFTGlyph(FT_Face aFace, uint32_t aGlyphIndex, int32_t aFlags);
+ void mozilla_glyphslot_embolden_less(FT_GlyphSlot slot);
+}
+
+void SkInitCairoFT(bool fontHintingEnabled)
+{
+ gFontHintingEnabled = fontHintingEnabled;
+#if SK_CAN_USE_DLOPEN
+ gSetLcdFilter = (FT_Error (*)(FT_Library, FT_LcdFilter))dlsym(RTLD_DEFAULT, "FT_Library_SetLcdFilter");
+#else
+ gSetLcdFilter = &FT_Library_SetLcdFilter;
+#endif
+ // FT_Library_SetLcdFilter may be provided but have no effect if FreeType
+ // is built without FT_CONFIG_OPTION_SUBPIXEL_RENDERING.
+ if (gSetLcdFilter &&
+ gSetLcdFilter(nullptr, FT_LCD_FILTER_NONE) == FT_Err_Unimplemented_Feature) {
+ gSetLcdFilter = nullptr;
+ }
+}
+
+class SkScalerContext_CairoFT : public SkScalerContext_FreeType_Base {
+public:
+ SkScalerContext_CairoFT(sk_sp<SkTypeface> typeface,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc, FT_Face face,
+ void* faceContext, SkPixelGeometry pixelGeometry,
+ FT_LcdFilter lcdFilter);
+
+ virtual ~SkScalerContext_CairoFT() {
+ mozilla_ForgetSharedFTFaceLockOwner(fFTFaceContext, this);
+ }
+
+ bool isValid() const { return fFTFaceContext != nullptr; }
+
+ void Lock() {
+ if (!mozilla_LockSharedFTFace(fFTFaceContext, this)) {
+ FT_Set_Transform(fFTFace, fHaveShape ? &fShapeMatrixFT : nullptr, nullptr);
+ FT_Set_Char_Size(fFTFace, FT_F26Dot6(fScaleX * 64.0f + 0.5f),
+ FT_F26Dot6(fScaleY * 64.0f + 0.5f), 0, 0);
+ }
+ }
+
+ void Unlock() { mozilla_UnlockSharedFTFace(fFTFaceContext); }
+
+protected:
+ bool generateAdvance(SkGlyph* glyph) override;
+ void generateMetrics(SkGlyph* glyph, SkArenaAlloc* arena) override;
+ void generateImage(const SkGlyph& glyph) override;
+ bool generatePath(const SkGlyph& glyph, SkPath* path) override;
+ void generateFontMetrics(SkFontMetrics* metrics) override;
+
+private:
+ bool computeShapeMatrix(const SkMatrix& m);
+ void prepareGlyph(FT_GlyphSlot glyph);
+
+ FT_Face fFTFace;
+ void* fFTFaceContext;
+ FT_Int32 fLoadGlyphFlags;
+ FT_LcdFilter fLcdFilter;
+ SkScalar fScaleX;
+ SkScalar fScaleY;
+ SkMatrix fShapeMatrix;
+ FT_Matrix fShapeMatrixFT;
+ bool fHaveShape;
+};
+
+class AutoLockFTFace {
+public:
+ AutoLockFTFace(SkScalerContext_CairoFT* scalerContext)
+ : fScalerContext(scalerContext) {
+ fScalerContext->Lock();
+ }
+
+ ~AutoLockFTFace() { fScalerContext->Unlock(); }
+
+private:
+ SkScalerContext_CairoFT* fScalerContext;
+};
+
+static bool isLCD(const SkScalerContextRec& rec) {
+ return SkMask::kLCD16_Format == rec.fMaskFormat;
+}
+
+static bool bothZero(SkScalar a, SkScalar b) {
+ return 0 == a && 0 == b;
+}
+
+// returns false if there is any non-90-rotation or skew
+static bool isAxisAligned(const SkScalerContextRec& rec) {
+ return 0 == rec.fPreSkewX &&
+ (bothZero(rec.fPost2x2[0][1], rec.fPost2x2[1][0]) ||
+ bothZero(rec.fPost2x2[0][0], rec.fPost2x2[1][1]));
+}
+
+class SkCairoFTTypeface : public SkTypeface {
+public:
+ std::unique_ptr<SkStreamAsset> onOpenStream(int*) const override { return nullptr; }
+
+ std::unique_ptr<SkAdvancedTypefaceMetrics> onGetAdvancedMetrics() const override
+ {
+ SkDEBUGCODE(SkDebugf("SkCairoFTTypeface::onGetAdvancedMetrics unimplemented\n"));
+ return nullptr;
+ }
+
+ std::unique_ptr<SkScalerContext> onCreateScalerContext(const SkScalerContextEffects& effects, const SkDescriptor* desc) const override
+ {
+ SkScalerContext_CairoFT* ctx = new SkScalerContext_CairoFT(
+ sk_ref_sp(const_cast<SkCairoFTTypeface*>(this)), effects, desc,
+ fFTFace, fFTFaceContext, fPixelGeometry, fLcdFilter);
+ std::unique_ptr<SkScalerContext> result(ctx);
+ if (!ctx->isValid()) {
+ return nullptr;
+ }
+ return result;
+ }
+
+ void onFilterRec(SkScalerContextRec* rec) const override
+ {
+ // rotated text looks bad with hinting, so we disable it as needed
+ if (!gFontHintingEnabled || !isAxisAligned(*rec)) {
+ rec->setHinting(SkFontHinting::kNone);
+ }
+
+ // Don't apply any gamma so that we match cairo-ft's results.
+ rec->ignorePreBlend();
+ }
+
+ void onGetFontDescriptor(SkFontDescriptor*, bool*) const override
+ {
+ SkDEBUGCODE(SkDebugf("SkCairoFTTypeface::onGetFontDescriptor unimplemented\n"));
+ }
+
+ void onCharsToGlyphs(const SkUnichar* chars, int count, SkGlyphID glyphs[]) const override
+ {
+ mozilla_LockSharedFTFace(fFTFaceContext, nullptr);
+ for (int i = 0; i < count; ++i) {
+ glyphs[i] = SkToU16(FT_Get_Char_Index(fFTFace, chars[i]));
+ }
+ mozilla_UnlockSharedFTFace(fFTFaceContext);
+ }
+
+ int onCountGlyphs() const override
+ {
+ return fFTFace->num_glyphs;
+ }
+
+ int onGetUPEM() const override
+ {
+ return 0;
+ }
+
+ SkTypeface::LocalizedStrings* onCreateFamilyNameIterator() const override
+ {
+ return nullptr;
+ }
+
+ void onGetFamilyName(SkString* familyName) const override
+ {
+ familyName->reset();
+ }
+
+ bool onGetPostScriptName(SkString*) const override {
+ return false;
+ }
+
+ bool onGlyphMaskNeedsCurrentColor() const override {
+ return false;
+ }
+
+ int onGetTableTags(SkFontTableTag*) const override
+ {
+ return 0;
+ }
+
+ size_t onGetTableData(SkFontTableTag, size_t, size_t, void*) const override
+ {
+ return 0;
+ }
+
+ void getPostScriptGlyphNames(SkString*) const override {}
+
+ void getGlyphToUnicodeMap(SkUnichar*) const override {}
+
+ int onGetVariationDesignPosition(SkFontArguments::VariationPosition::Coordinate coordinates[],
+ int coordinateCount) const override
+ {
+ return 0;
+ }
+
+ int onGetVariationDesignParameters(SkFontParameters::Variation::Axis parameters[],
+ int parameterCount) const override
+ {
+ return 0;
+ }
+
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments& args) const override {
+ return sk_ref_sp(this);
+ }
+
+ SkCairoFTTypeface(FT_Face face, void* faceContext,
+ SkPixelGeometry pixelGeometry, FT_LcdFilter lcdFilter)
+ : SkTypeface(SkFontStyle::Normal())
+ , fFTFace(face)
+ , fFTFaceContext(faceContext)
+ , fPixelGeometry(pixelGeometry)
+ , fLcdFilter(lcdFilter)
+ {
+ mozilla_AddRefSharedFTFace(fFTFaceContext);
+ }
+
+ void* GetFTFaceContext() const { return fFTFaceContext; }
+
+ bool hasColorGlyphs() const override
+ {
+ // Check if the font has scalable outlines. If not, then avoid trying
+ // to render it as a path.
+ if (fFTFace) {
+ return !FT_IS_SCALABLE(fFTFace);
+ }
+ return false;
+ }
+
+private:
+ ~SkCairoFTTypeface()
+ {
+ mozilla_ReleaseSharedFTFace(fFTFaceContext, nullptr);
+ }
+
+ FT_Face fFTFace;
+ void* fFTFaceContext;
+ SkPixelGeometry fPixelGeometry;
+ FT_LcdFilter fLcdFilter;
+};
+
+static bool FindByFTFaceContext(SkTypeface* typeface, void* context) {
+ return static_cast<SkCairoFTTypeface*>(typeface)->GetFTFaceContext() == context;
+}
+
+SkTypeface* SkCreateTypefaceFromCairoFTFont(FT_Face face, void* faceContext,
+ SkPixelGeometry pixelGeometry,
+ uint8_t lcdFilter)
+{
+ sk_sp<SkTypeface> typeface =
+ SkTypefaceCache::FindByProcAndRef(FindByFTFaceContext, faceContext);
+ if (!typeface) {
+ typeface = sk_make_sp<SkCairoFTTypeface>(face, faceContext, pixelGeometry,
+ (FT_LcdFilter)lcdFilter);
+ SkTypefaceCache::Add(typeface);
+ }
+
+ return typeface.release();
+}
+
+SkScalerContext_CairoFT::SkScalerContext_CairoFT(
+ sk_sp<SkTypeface> typeface, const SkScalerContextEffects& effects,
+ const SkDescriptor* desc, FT_Face face, void* faceContext,
+ SkPixelGeometry pixelGeometry, FT_LcdFilter lcdFilter)
+ : SkScalerContext_FreeType_Base(std::move(typeface), effects, desc)
+ , fFTFace(face)
+ , fFTFaceContext(faceContext)
+ , fLcdFilter(lcdFilter)
+{
+ SkMatrix matrix;
+ fRec.getSingleMatrix(&matrix);
+
+ computeShapeMatrix(matrix);
+
+ FT_Int32 loadFlags = FT_LOAD_DEFAULT;
+
+ if (SkMask::kBW_Format == fRec.fMaskFormat) {
+ if (fRec.getHinting() == SkFontHinting::kNone) {
+ loadFlags |= FT_LOAD_NO_HINTING;
+ } else {
+ loadFlags = FT_LOAD_TARGET_MONO;
+ }
+ loadFlags |= FT_LOAD_MONOCHROME;
+ } else {
+ if (isLCD(fRec)) {
+ switch (pixelGeometry) {
+ case kRGB_H_SkPixelGeometry:
+ default:
+ break;
+ case kRGB_V_SkPixelGeometry:
+ fRec.fFlags |= SkScalerContext::kLCD_Vertical_Flag;
+ break;
+ case kBGR_H_SkPixelGeometry:
+ fRec.fFlags |= SkScalerContext::kLCD_BGROrder_Flag;
+ break;
+ case kBGR_V_SkPixelGeometry:
+ fRec.fFlags |= SkScalerContext::kLCD_Vertical_Flag |
+ SkScalerContext::kLCD_BGROrder_Flag;
+ break;
+ }
+ }
+
+ switch (fRec.getHinting()) {
+ case SkFontHinting::kNone:
+ loadFlags |= FT_LOAD_NO_HINTING;
+ break;
+ case SkFontHinting::kSlight:
+ loadFlags = FT_LOAD_TARGET_LIGHT; // This implies FORCE_AUTOHINT
+ break;
+ case SkFontHinting::kNormal:
+ if (fRec.fFlags & SkScalerContext::kForceAutohinting_Flag) {
+ loadFlags |= FT_LOAD_FORCE_AUTOHINT;
+ }
+ break;
+ case SkFontHinting::kFull:
+ if (isLCD(fRec)) {
+ if (fRec.fFlags & SkScalerContext::kLCD_Vertical_Flag) {
+ loadFlags = FT_LOAD_TARGET_LCD_V;
+ } else {
+ loadFlags = FT_LOAD_TARGET_LCD;
+ }
+ }
+ if (fRec.fFlags & SkScalerContext::kForceAutohinting_Flag) {
+ loadFlags |= FT_LOAD_FORCE_AUTOHINT;
+ }
+ break;
+ default:
+ SkDebugf("---------- UNKNOWN hinting %d\n", fRec.getHinting());
+ break;
+ }
+ }
+
+ // Disable autohinting when asked to disable hinting, except for "tricky" fonts.
+ if (!gFontHintingEnabled) {
+ if (fFTFace && !(fFTFace->face_flags & FT_FACE_FLAG_TRICKY)) {
+ loadFlags |= FT_LOAD_NO_AUTOHINT;
+ }
+ }
+
+ if ((fRec.fFlags & SkScalerContext::kEmbeddedBitmapText_Flag) == 0) {
+ loadFlags |= FT_LOAD_NO_BITMAP;
+ }
+
+ // Always using FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH to get correct
+ // advances, as fontconfig and cairo do.
+ // See http://code.google.com/p/skia/issues/detail?id=222.
+ loadFlags |= FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH;
+
+ loadFlags |= FT_LOAD_COLOR;
+
+ fLoadGlyphFlags = loadFlags;
+}
+
+bool SkScalerContext_CairoFT::computeShapeMatrix(const SkMatrix& m)
+{
+ // Compute a shape matrix compatible with Cairo's _compute_transform.
+ // Finds major/minor scales and uses them to normalize the transform.
+ double scaleX = m.getScaleX();
+ double skewX = m.getSkewX();
+ double skewY = m.getSkewY();
+ double scaleY = m.getScaleY();
+ double det = scaleX * scaleY - skewY * skewX;
+ if (!std::isfinite(det)) {
+ fScaleX = fRec.fTextSize * fRec.fPreScaleX;
+ fScaleY = fRec.fTextSize;
+ fHaveShape = false;
+ return false;
+ }
+ double major = det != 0.0 ? hypot(scaleX, skewY) : 0.0;
+ double minor = major != 0.0 ? fabs(det) / major : 0.0;
+ // Limit scales to be above 1pt.
+ major = std::max(major, 1.0);
+ minor = std::max(minor, 1.0);
+
+ // If the font is not scalable, then choose the best available size.
+ if (fFTFace && !FT_IS_SCALABLE(fFTFace)) {
+ double bestDist = DBL_MAX;
+ FT_Int bestSize = -1;
+ for (FT_Int i = 0; i < fFTFace->num_fixed_sizes; i++) {
+ // Distance is positive if strike is larger than desired size,
+ // or negative if smaller. If previously a found smaller strike,
+ // then prefer a larger strike. Otherwise, minimize distance.
+ double dist = fFTFace->available_sizes[i].y_ppem / 64.0 - minor;
+ if (bestDist < 0 ? dist >= bestDist : fabs(dist) <= bestDist) {
+ bestDist = dist;
+ bestSize = i;
+ }
+ }
+ if (bestSize < 0) {
+ fScaleX = fRec.fTextSize * fRec.fPreScaleX;
+ fScaleY = fRec.fTextSize;
+ fHaveShape = false;
+ return false;
+ }
+ major = fFTFace->available_sizes[bestSize].x_ppem / 64.0;
+ minor = fFTFace->available_sizes[bestSize].y_ppem / 64.0;
+ fHaveShape = true;
+ } else {
+ fHaveShape = !m.isScaleTranslate() || scaleX < 0.0 || scaleY < 0.0;
+ }
+
+ fScaleX = SkDoubleToScalar(major);
+ fScaleY = SkDoubleToScalar(minor);
+
+ if (fHaveShape) {
+ // Normalize the transform and convert to fixed-point.
+ fShapeMatrix = m;
+ fShapeMatrix.preScale(SkDoubleToScalar(1.0 / major), SkDoubleToScalar(1.0 / minor));
+
+ fShapeMatrixFT.xx = SkScalarToFixed(fShapeMatrix.getScaleX());
+ fShapeMatrixFT.yx = SkScalarToFixed(-fShapeMatrix.getSkewY());
+ fShapeMatrixFT.xy = SkScalarToFixed(-fShapeMatrix.getSkewX());
+ fShapeMatrixFT.yy = SkScalarToFixed(fShapeMatrix.getScaleY());
+ }
+ return true;
+}
+
+bool SkScalerContext_CairoFT::generateAdvance(SkGlyph* glyph)
+{
+ generateMetrics(glyph, nullptr);
+ return !glyph->isEmpty();
+}
+
+void SkScalerContext_CairoFT::prepareGlyph(FT_GlyphSlot glyph)
+{
+ if (fRec.fFlags & SkScalerContext::kEmbolden_Flag) {
+ // Not FT_GlyphSlot_Embolden because we want a less extreme effect.
+ mozilla_glyphslot_embolden_less(glyph);
+ }
+}
+
+void SkScalerContext_CairoFT::generateMetrics(SkGlyph* glyph, SkArenaAlloc* arena)
+{
+ glyph->fMaskFormat = fRec.fMaskFormat;
+
+ glyph->zeroMetrics();
+
+ AutoLockFTFace faceLock(this);
+
+ FT_Error err = mozilla_LoadFTGlyph(fFTFace, glyph->getGlyphID(), fLoadGlyphFlags);
+ if (err != 0) {
+ return;
+ }
+
+ prepareGlyph(fFTFace->glyph);
+
+ glyph->fAdvanceX = SkFDot6ToFloat(fFTFace->glyph->advance.x);
+ glyph->fAdvanceY = -SkFDot6ToFloat(fFTFace->glyph->advance.y);
+
+ SkIRect bounds;
+ switch (fFTFace->glyph->format) {
+ case FT_GLYPH_FORMAT_OUTLINE:
+ if (!fFTFace->glyph->outline.n_contours) {
+ return;
+ }
+
+ FT_BBox bbox;
+ FT_Outline_Get_CBox(&fFTFace->glyph->outline, &bbox);
+ if (this->isSubpixel()) {
+ int dx = SkFixedToFDot6(glyph->getSubXFixed());
+ int dy = SkFixedToFDot6(glyph->getSubYFixed());
+ bbox.xMin += dx;
+ bbox.yMin -= dy;
+ bbox.xMax += dx;
+ bbox.yMax -= dy;
+ }
+ bbox.xMin &= ~63;
+ bbox.yMin &= ~63;
+ bbox.xMax = (bbox.xMax + 63) & ~63;
+ bbox.yMax = (bbox.yMax + 63) & ~63;
+ bounds = SkIRect::MakeLTRB(SkFDot6Floor(bbox.xMin),
+ -SkFDot6Floor(bbox.yMax),
+ SkFDot6Floor(bbox.xMax),
+ -SkFDot6Floor(bbox.yMin));
+
+ if (isLCD(fRec)) {
+ // In FreeType < 2.8.1, LCD filtering, if explicitly used, may
+ // add padding to the glyph. When not used, there is no padding.
+ // As of 2.8.1, LCD filtering is now always supported and may
+ // add padding even if an LCD filter is not explicitly set.
+ // Regardless, if no LCD filtering is used, or if LCD filtering
+ // doesn't add padding, it is safe to modify the glyph's bounds
+ // here. generateGlyphImage will detect if the mask is smaller
+ // than the bounds and clip things appropriately.
+ if (fRec.fFlags & kLCD_Vertical_Flag) {
+ bounds.outset(0, 1);
+ } else {
+ bounds.outset(1, 0);
+ }
+ }
+ break;
+ case FT_GLYPH_FORMAT_BITMAP:
+ if (fFTFace->glyph->bitmap.pixel_mode == FT_PIXEL_MODE_BGRA) {
+ glyph->fMaskFormat = SkMask::kARGB32_Format;
+ }
+
+ if (isLCD(fRec)) {
+ fRec.fMaskFormat = SkMask::kA8_Format;
+ }
+
+ if (fHaveShape) {
+ // Ensure filtering is preserved when the bitmap is transformed.
+ // Otherwise, the result will look horrifically aliased.
+ if (fRec.fMaskFormat == SkMask::kBW_Format) {
+ fRec.fMaskFormat = SkMask::kA8_Format;
+ }
+
+ // Apply the shape matrix to the glyph's bounding box.
+ SkRect srcRect = SkRect::MakeXYWH(
+ SkIntToScalar(fFTFace->glyph->bitmap_left),
+ -SkIntToScalar(fFTFace->glyph->bitmap_top),
+ SkIntToScalar(fFTFace->glyph->bitmap.width),
+ SkIntToScalar(fFTFace->glyph->bitmap.rows));
+ SkRect destRect;
+ fShapeMatrix.mapRect(&destRect, srcRect);
+ SkIRect glyphRect = destRect.roundOut();
+ bounds = SkIRect::MakeXYWH(SkScalarRoundToInt(destRect.fLeft),
+ SkScalarRoundToInt(destRect.fTop),
+ glyphRect.width(),
+ glyphRect.height());
+ } else {
+ bounds = SkIRect::MakeXYWH(fFTFace->glyph->bitmap_left,
+ -fFTFace->glyph->bitmap_top,
+ fFTFace->glyph->bitmap.width,
+ fFTFace->glyph->bitmap.rows);
+ }
+ break;
+ case FT_GLYPH_FORMAT_SVG:
+ // We don't support getting glyph bounds for SVG, but at least the advance
+ // should be correctly returned, and we don't want to fire an assertion.
+ break;
+ default:
+ SkDEBUGFAIL("unknown glyph format");
+ return;
+ }
+
+ if (SkIRect::MakeXYWH(SHRT_MIN, SHRT_MIN, USHRT_MAX, USHRT_MAX).contains(bounds)) {
+ glyph->fWidth = SkToU16(bounds.width());
+ glyph->fHeight = SkToU16(bounds.height());
+ glyph->fLeft = SkToS16(bounds.left());
+ glyph->fTop = SkToS16(bounds.top());
+ }
+}
+
+void SkScalerContext_CairoFT::generateImage(const SkGlyph& glyph)
+{
+ AutoLockFTFace faceLock(this);
+
+ FT_Error err = mozilla_LoadFTGlyph(fFTFace, glyph.getGlyphID(), fLoadGlyphFlags);
+
+ if (err != 0) {
+ memset(glyph.fImage, 0, glyph.rowBytes() * glyph.fHeight);
+ return;
+ }
+
+ prepareGlyph(fFTFace->glyph);
+
+ bool useLcdFilter =
+ fFTFace->glyph->format == FT_GLYPH_FORMAT_OUTLINE &&
+ glyph.maskFormat() == SkMask::kLCD16_Format &&
+ gSetLcdFilter;
+ if (useLcdFilter) {
+ mozilla_LockFTLibrary(fFTFace->glyph->library);
+ gSetLcdFilter(fFTFace->glyph->library, fLcdFilter);
+ }
+
+ SkMatrix matrix;
+ if (fFTFace->glyph->format == FT_GLYPH_FORMAT_BITMAP &&
+ fHaveShape) {
+ matrix = fShapeMatrix;
+ } else {
+ matrix.setIdentity();
+ }
+ generateGlyphImage(fFTFace, glyph, matrix);
+
+ if (useLcdFilter) {
+ gSetLcdFilter(fFTFace->glyph->library, FT_LCD_FILTER_NONE);
+ mozilla_UnlockFTLibrary(fFTFace->glyph->library);
+ }
+}
+
+bool SkScalerContext_CairoFT::generatePath(const SkGlyph& glyph, SkPath* path)
+{
+ AutoLockFTFace faceLock(this);
+
+ SkASSERT(path);
+
+ SkGlyphID glyphID = glyph.getGlyphID();
+
+ uint32_t flags = fLoadGlyphFlags;
+ flags |= FT_LOAD_NO_BITMAP; // ignore embedded bitmaps so we're sure to get the outline
+ flags &= ~FT_LOAD_RENDER; // don't scan convert (we just want the outline)
+
+ FT_Error err = mozilla_LoadFTGlyph(fFTFace, glyphID, flags);
+
+ if (err != 0) {
+ path->reset();
+ return false;
+ }
+
+ prepareGlyph(fFTFace->glyph);
+
+ return generateGlyphPath(fFTFace, path);
+}
+
+void SkScalerContext_CairoFT::generateFontMetrics(SkFontMetrics* metrics)
+{
+ if (metrics) {
+ memset(metrics, 0, sizeof(SkFontMetrics));
+ }
+}
diff --git a/gfx/skia/skia/src/ports/SkFontHost_win.cpp b/gfx/skia/skia/src/ports/SkFontHost_win.cpp
new file mode 100644
index 0000000000..4e18aa6820
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontHost_win.cpp
@@ -0,0 +1,2356 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "include/core/SkData.h"
+#include "include/core/SkFontMetrics.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/ports/SkTypeface_win.h"
+#include "src/ports/SkTypeface_win_dw.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkMacros.h"
+#include "include/private/base/SkOnce.h"
+#include "include/private/base/SkTDArray.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "include/utils/SkBase64.h"
+#include "src/base/SkLeanWindows.h"
+#include "src/base/SkUTF.h"
+#include "src/core/SkAdvancedTypefaceMetrics.h"
+#include "src/core/SkDescriptor.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkMaskGamma.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkTypefaceCache.h"
+#include "src/sfnt/SkOTTable_OS_2.h"
+#include "src/sfnt/SkOTTable_maxp.h"
+#include "src/sfnt/SkOTTable_name.h"
+#include "src/sfnt/SkOTUtils.h"
+#include "src/sfnt/SkSFNTHeader.h"
+#include "src/utils/SkMatrix22.h"
+#include "src/utils/win/SkHRESULT.h"
+
+#include <tchar.h>
+#include <usp10.h>
+#include <objbase.h>
+
+using namespace skia_private;
+
+namespace {
+static inline const constexpr bool kSkShowTextBlitCoverage = false;
+}
+
+static void (*gEnsureLOGFONTAccessibleProc)(const LOGFONT&);
+
+void SkTypeface_SetEnsureLOGFONTAccessibleProc(void (*proc)(const LOGFONT&)) {
+ gEnsureLOGFONTAccessibleProc = proc;
+}
+
+static void call_ensure_accessible(const LOGFONT& lf) {
+ if (gEnsureLOGFONTAccessibleProc) {
+ gEnsureLOGFONTAccessibleProc(lf);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// always packed xxRRGGBB
+typedef uint32_t SkGdiRGB;
+
+// define this in your Makefile or .gyp to enforce AA requests
+// which GDI ignores at small sizes. This flag guarantees AA
+// for rotated text, regardless of GDI's notions.
+//#define SK_ENFORCE_ROTATED_TEXT_AA_ON_WINDOWS
+
+static bool isLCD(const SkScalerContextRec& rec) {
+ return SkMask::kLCD16_Format == rec.fMaskFormat;
+}
+
+static bool bothZero(SkScalar a, SkScalar b) {
+ return 0 == a && 0 == b;
+}
+
+// returns false if there is any non-90-rotation or skew
+static bool isAxisAligned(const SkScalerContextRec& rec) {
+ return 0 == rec.fPreSkewX &&
+ (bothZero(rec.fPost2x2[0][1], rec.fPost2x2[1][0]) ||
+ bothZero(rec.fPost2x2[0][0], rec.fPost2x2[1][1]));
+}
+
+static bool needToRenderWithSkia(const SkScalerContextRec& rec) {
+#ifdef SK_ENFORCE_ROTATED_TEXT_AA_ON_WINDOWS
+ // What we really want to catch is when GDI will ignore the AA request and give
+ // us BW instead. Smallish rotated text is one heuristic, so this code is just
+ // an approximation. We shouldn't need to do this for larger sizes, but at those
+ // sizes, the quality difference gets less and less between our general
+ // scanconverter and GDI's.
+ if (SkMask::kA8_Format == rec.fMaskFormat && !isAxisAligned(rec)) {
+ return true;
+ }
+#endif
+ return rec.getHinting() == SkFontHinting::kNone || rec.getHinting() == SkFontHinting::kSlight;
+}
+
+static void tchar_to_skstring(const TCHAR t[], SkString* s) {
+#ifdef UNICODE
+ size_t sSize = WideCharToMultiByte(CP_UTF8, 0, t, -1, nullptr, 0, nullptr, nullptr);
+ s->resize(sSize);
+ WideCharToMultiByte(CP_UTF8, 0, t, -1, s->data(), sSize, nullptr, nullptr);
+#else
+ s->set(t);
+#endif
+}
+
+static void dcfontname_to_skstring(HDC deviceContext, const LOGFONT& lf, SkString* familyName) {
+ int fontNameLen; //length of fontName in TCHARS.
+ if (0 == (fontNameLen = GetTextFace(deviceContext, 0, nullptr))) {
+ call_ensure_accessible(lf);
+ if (0 == (fontNameLen = GetTextFace(deviceContext, 0, nullptr))) {
+ fontNameLen = 0;
+ }
+ }
+
+ AutoSTArray<LF_FULLFACESIZE, TCHAR> fontName(fontNameLen+1);
+ if (0 == GetTextFace(deviceContext, fontNameLen, fontName.get())) {
+ call_ensure_accessible(lf);
+ if (0 == GetTextFace(deviceContext, fontNameLen, fontName.get())) {
+ fontName[0] = 0;
+ }
+ }
+
+ tchar_to_skstring(fontName.get(), familyName);
+}
+
+static void make_canonical(LOGFONT* lf) {
+ lf->lfHeight = -64;
+ lf->lfWidth = 0; // lfWidth is related to lfHeight, not to the OS/2::usWidthClass.
+ lf->lfQuality = CLEARTYPE_QUALITY;//PROOF_QUALITY;
+ lf->lfCharSet = DEFAULT_CHARSET;
+// lf->lfClipPrecision = 64;
+}
+
+static SkFontStyle get_style(const LOGFONT& lf) {
+ return SkFontStyle(lf.lfWeight,
+ SkFontStyle::kNormal_Width,
+ lf.lfItalic ? SkFontStyle::kItalic_Slant : SkFontStyle::kUpright_Slant);
+}
+
+static inline FIXED SkFixedToFIXED(SkFixed x) {
+ return *(FIXED*)(&x);
+}
+static inline SkFixed SkFIXEDToFixed(FIXED x) {
+ return *(SkFixed*)(&x);
+}
+
+static inline FIXED SkScalarToFIXED(SkScalar x) {
+ return SkFixedToFIXED(SkScalarToFixed(x));
+}
+
+static inline SkScalar SkFIXEDToScalar(FIXED x) {
+ return SkFixedToScalar(SkFIXEDToFixed(x));
+}
+
+static unsigned calculateGlyphCount(HDC hdc, const LOGFONT& lf) {
+ TEXTMETRIC textMetric;
+ if (0 == GetTextMetrics(hdc, &textMetric)) {
+ textMetric.tmPitchAndFamily = TMPF_VECTOR;
+ call_ensure_accessible(lf);
+ GetTextMetrics(hdc, &textMetric);
+ }
+
+ if (!(textMetric.tmPitchAndFamily & TMPF_VECTOR)) {
+ return textMetric.tmLastChar;
+ }
+
+ // The 'maxp' table stores the number of glyphs at offset 4, in 2 bytes.
+ uint16_t glyphs;
+ if (GDI_ERROR != GetFontData(hdc, SkOTTableMaximumProfile::TAG, 4, &glyphs, sizeof(glyphs))) {
+ return SkEndian_SwapBE16(glyphs);
+ }
+
+ // Binary search for glyph count.
+ static const MAT2 mat2 = {{0, 1}, {0, 0}, {0, 0}, {0, 1}};
+ int32_t max = UINT16_MAX + 1;
+ int32_t min = 0;
+ GLYPHMETRICS gm;
+ while (min < max) {
+ int32_t mid = min + ((max - min) / 2);
+ if (GetGlyphOutlineW(hdc, mid, GGO_METRICS | GGO_GLYPH_INDEX, &gm, 0,
+ nullptr, &mat2) == GDI_ERROR) {
+ max = mid;
+ } else {
+ min = mid + 1;
+ }
+ }
+ SkASSERT(min == max);
+ return min;
+}
+
+static unsigned calculateUPEM(HDC hdc, const LOGFONT& lf) {
+ TEXTMETRIC textMetric;
+ if (0 == GetTextMetrics(hdc, &textMetric)) {
+ textMetric.tmPitchAndFamily = TMPF_VECTOR;
+ call_ensure_accessible(lf);
+ GetTextMetrics(hdc, &textMetric);
+ }
+
+ if (!(textMetric.tmPitchAndFamily & TMPF_VECTOR)) {
+ return textMetric.tmMaxCharWidth;
+ }
+
+ OUTLINETEXTMETRIC otm;
+ unsigned int otmRet = GetOutlineTextMetrics(hdc, sizeof(otm), &otm);
+ if (0 == otmRet) {
+ call_ensure_accessible(lf);
+ otmRet = GetOutlineTextMetrics(hdc, sizeof(otm), &otm);
+ }
+
+ return (0 == otmRet) ? 0 : otm.otmEMSquare;
+}
+
+class SkAutoHDC {
+public:
+ explicit SkAutoHDC(const LOGFONT& lf)
+ : fHdc(::CreateCompatibleDC(nullptr))
+ , fFont(::CreateFontIndirect(&lf))
+ , fSavefont((HFONT)::SelectObject(fHdc, fFont))
+ { }
+ ~SkAutoHDC() {
+ if (fHdc) {
+ ::SelectObject(fHdc, fSavefont);
+ ::DeleteDC(fHdc);
+ }
+ if (fFont) {
+ ::DeleteObject(fFont);
+ }
+ }
+ operator HDC() { return fHdc; }
+private:
+ HDC fHdc;
+ HFONT fFont;
+ HFONT fSavefont;
+};
+
+class LogFontTypeface : public SkTypeface {
+public:
+ LogFontTypeface(const SkFontStyle& style, const LOGFONT& lf, bool serializeAsStream)
+ : SkTypeface(style, false)
+ , fLogFont(lf)
+ , fSerializeAsStream(serializeAsStream)
+ {
+ SkAutoHDC hdc(fLogFont);
+ TEXTMETRIC textMetric;
+ if (0 == GetTextMetrics(hdc, &textMetric)) {
+ call_ensure_accessible(lf);
+ if (0 == GetTextMetrics(hdc, &textMetric)) {
+ textMetric.tmPitchAndFamily = TMPF_TRUETYPE;
+ }
+ }
+
+ // The fixed pitch bit is set if the font is *not* fixed pitch.
+ this->setIsFixedPitch((textMetric.tmPitchAndFamily & TMPF_FIXED_PITCH) == 0);
+ this->setFontStyle(SkFontStyle(textMetric.tmWeight, style.width(), style.slant()));
+
+ // Used a logfont on a memory context, should never get a device font.
+ // Therefore all TMPF_DEVICE will be PostScript (cubic) fonts.
+ // If the font has cubic outlines, it will not be rendered with ClearType.
+ fCanBeLCD = !((textMetric.tmPitchAndFamily & TMPF_VECTOR) &&
+ (textMetric.tmPitchAndFamily & TMPF_DEVICE));
+ }
+
+ LOGFONT fLogFont;
+ bool fSerializeAsStream;
+ bool fCanBeLCD;
+
+ static sk_sp<LogFontTypeface> Make(const LOGFONT& lf) {
+ return sk_sp<LogFontTypeface>(new LogFontTypeface(get_style(lf), lf, false));
+ }
+
+ static void EnsureAccessible(const SkTypeface* face) {
+ call_ensure_accessible(static_cast<const LogFontTypeface*>(face)->fLogFont);
+ }
+
+protected:
+ std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const override;
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments& args) const override;
+ std::unique_ptr<SkScalerContext> onCreateScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor*) const override;
+ void onFilterRec(SkScalerContextRec*) const override;
+ void getGlyphToUnicodeMap(SkUnichar*) const override;
+ std::unique_ptr<SkAdvancedTypefaceMetrics> onGetAdvancedMetrics() const override;
+ void onGetFontDescriptor(SkFontDescriptor*, bool*) const override;
+ void onCharsToGlyphs(const SkUnichar* chars, int count, SkGlyphID glyphs[]) const override;
+ int onCountGlyphs() const override;
+ void getPostScriptGlyphNames(SkString*) const override;
+ int onGetUPEM() const override;
+ void onGetFamilyName(SkString* familyName) const override;
+ bool onGetPostScriptName(SkString*) const override { return false; }
+ SkTypeface::LocalizedStrings* onCreateFamilyNameIterator() const override;
+ bool onGlyphMaskNeedsCurrentColor() const override { return false; }
+ int onGetVariationDesignPosition(SkFontArguments::VariationPosition::Coordinate coordinates[],
+ int coordinateCount) const override
+ {
+ return -1;
+ }
+ int onGetVariationDesignParameters(SkFontParameters::Variation::Axis parameters[],
+ int parameterCount) const override
+ {
+ return -1;
+ }
+ int onGetTableTags(SkFontTableTag tags[]) const override;
+ size_t onGetTableData(SkFontTableTag, size_t offset, size_t length, void* data) const override;
+ sk_sp<SkData> onCopyTableData(SkFontTableTag) const override;
+};
+
+class FontMemResourceTypeface : public LogFontTypeface {
+public:
+ /**
+ * The created FontMemResourceTypeface takes ownership of fontMemResource.
+ */
+ static sk_sp<FontMemResourceTypeface> Make(const LOGFONT& lf, HANDLE fontMemResource) {
+ return sk_sp<FontMemResourceTypeface>(
+ new FontMemResourceTypeface(get_style(lf), lf, fontMemResource));
+ }
+
+protected:
+ void weak_dispose() const override {
+ RemoveFontMemResourceEx(fFontMemResource);
+ INHERITED::weak_dispose();
+ }
+
+private:
+ /**
+ * Takes ownership of fontMemResource.
+ */
+ FontMemResourceTypeface(const SkFontStyle& style, const LOGFONT& lf, HANDLE fontMemResource)
+ : LogFontTypeface(style, lf, true), fFontMemResource(fontMemResource)
+ { }
+
+ HANDLE fFontMemResource;
+
+ using INHERITED = LogFontTypeface;
+};
+
+static const LOGFONT& get_default_font() {
+ static LOGFONT gDefaultFont;
+ return gDefaultFont;
+}
+
+static bool FindByLogFont(SkTypeface* face, void* ctx) {
+ LogFontTypeface* lface = static_cast<LogFontTypeface*>(face);
+ const LOGFONT* lf = reinterpret_cast<const LOGFONT*>(ctx);
+
+ return !memcmp(&lface->fLogFont, lf, sizeof(LOGFONT));
+}
+
+/**
+ * This is public. It first searches the cache, and if a match is not found,
+ * it creates a new face.
+ */
+SkTypeface* SkCreateTypefaceFromLOGFONT(const LOGFONT& origLF) {
+ LOGFONT lf = origLF;
+ make_canonical(&lf);
+ sk_sp<SkTypeface> face = SkTypefaceCache::FindByProcAndRef(FindByLogFont, &lf);
+ if (!face) {
+ face = LogFontTypeface::Make(lf);
+ SkTypefaceCache::Add(face);
+ }
+ return face.release();
+}
+
+/***
+ * This guy is public.
+ */
+SkTypeface* SkCreateTypefaceFromDWriteFont(IDWriteFactory* aFactory,
+ IDWriteFontFace* aFontFace,
+ SkFontStyle aStyle,
+ int aRenderingMode,
+ float aGamma,
+ float aContrast,
+ float aClearTypeLevel)
+{
+ return DWriteFontTypeface::Create(aFactory, aFontFace, aStyle,
+ (DWRITE_RENDERING_MODE)aRenderingMode,
+ aGamma, aContrast, aClearTypeLevel);
+}
+
+/**
+ * The created SkTypeface takes ownership of fontMemResource.
+ */
+sk_sp<SkTypeface> SkCreateFontMemResourceTypefaceFromLOGFONT(const LOGFONT& origLF, HANDLE fontMemResource) {
+ LOGFONT lf = origLF;
+ make_canonical(&lf);
+ // We'll never get a cache hit, so no point in putting this in SkTypefaceCache.
+ return FontMemResourceTypeface::Make(lf, fontMemResource);
+}
+
+/**
+ * This is public
+ */
+void SkLOGFONTFromTypeface(const SkTypeface* face, LOGFONT* lf) {
+ if (nullptr == face) {
+ *lf = get_default_font();
+ } else {
+ *lf = static_cast<const LogFontTypeface*>(face)->fLogFont;
+ }
+}
+
+// Construct Glyph to Unicode table.
+// Unicode code points that require conjugate pairs in utf16 are not
+// supported.
+// TODO(arthurhsu): Add support for conjugate pairs. It looks like that may
+// require parsing the TTF cmap table (platform 4, encoding 12) directly instead
+// of calling GetFontUnicodeRange().
+static void populate_glyph_to_unicode(HDC fontHdc, const unsigned glyphCount,
+ SkUnichar* glyphToUnicode) {
+ sk_bzero(glyphToUnicode, sizeof(SkUnichar) * glyphCount);
+ DWORD glyphSetBufferSize = GetFontUnicodeRanges(fontHdc, nullptr);
+ if (!glyphSetBufferSize) {
+ return;
+ }
+
+ std::unique_ptr<BYTE[]> glyphSetBuffer(new BYTE[glyphSetBufferSize]);
+ GLYPHSET* glyphSet =
+ reinterpret_cast<LPGLYPHSET>(glyphSetBuffer.get());
+ if (GetFontUnicodeRanges(fontHdc, glyphSet) != glyphSetBufferSize) {
+ return;
+ }
+
+ for (DWORD i = 0; i < glyphSet->cRanges; ++i) {
+ // There is no guarantee that within a Unicode range, the corresponding
+ // glyph id in a font file are continuous. So, even if we have ranges,
+ // we can't just use the first and last entry of the range to compute
+ // result. We need to enumerate them one by one.
+ int count = glyphSet->ranges[i].cGlyphs;
+ AutoTArray<WCHAR> chars(count + 1);
+ chars[count] = 0; // termintate string
+ AutoTArray<WORD> glyph(count);
+ for (USHORT j = 0; j < count; ++j) {
+ chars[j] = glyphSet->ranges[i].wcLow + j;
+ }
+ GetGlyphIndicesW(fontHdc, chars.get(), count, glyph.get(),
+ GGI_MARK_NONEXISTING_GLYPHS);
+ // If the glyph ID is valid, and the glyph is not mapped, then we will
+ // fill in the char id into the vector. If the glyph is mapped already,
+ // skip it.
+ // TODO(arthurhsu): better improve this. e.g. Get all used char ids from
+ // font cache, then generate this mapping table from there. It's
+ // unlikely to have collisions since glyph reuse happens mostly for
+ // different Unicode pages.
+ for (USHORT j = 0; j < count; ++j) {
+ if (glyph[j] != 0xFFFF && glyph[j] < glyphCount && glyphToUnicode[glyph[j]] == 0) {
+ glyphToUnicode[glyph[j]] = chars[j];
+ }
+ }
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+
+static int alignTo32(int n) {
+ return (n + 31) & ~31;
+}
+
+struct MyBitmapInfo : public BITMAPINFO {
+ RGBQUAD fMoreSpaceForColors[1];
+};
+
+class HDCOffscreen {
+public:
+ HDCOffscreen() = default;
+
+ ~HDCOffscreen() {
+ if (fDC) {
+ ::SelectObject(fDC, fSavefont);
+ ::DeleteDC(fDC);
+ }
+ if (fBM) {
+ DeleteObject(fBM);
+ }
+ }
+
+ void init(HFONT font, const XFORM& xform) {
+ fFont = font;
+ fXform = xform;
+ }
+
+ const void* draw(const SkGlyph&, bool isBW, size_t* srcRBPtr);
+
+private:
+ HDC fDC{nullptr};
+ HFONT fSavefont{nullptr};
+ HBITMAP fBM{nullptr};
+ HFONT fFont{nullptr};
+ XFORM fXform{1, 0, 0, 1, 0, 0};
+ void* fBits{nullptr}; // points into fBM
+ int fWidth{0};
+ int fHeight{0};
+ bool fIsBW{false};
+};
+
+const void* HDCOffscreen::draw(const SkGlyph& glyph, bool isBW,
+ size_t* srcRBPtr) {
+ // Can we share the scalercontext's fDDC, so we don't need to create
+ // a separate fDC here?
+ if (nullptr == fDC) {
+ fDC = CreateCompatibleDC(0);
+ if (nullptr == fDC) {
+ return nullptr;
+ }
+ SetGraphicsMode(fDC, GM_ADVANCED);
+ SetBkMode(fDC, TRANSPARENT);
+ SetTextAlign(fDC, TA_LEFT | TA_BASELINE);
+ fSavefont = (HFONT)SelectObject(fDC, fFont);
+
+ COLORREF color = 0x00FFFFFF;
+ SkDEBUGCODE(COLORREF prev =) SetTextColor(fDC, color);
+ SkASSERT(prev != CLR_INVALID);
+ }
+
+ if (fBM && (fIsBW != isBW || fWidth < glyph.width() || fHeight < glyph.height())) {
+ DeleteObject(fBM);
+ fBM = nullptr;
+ }
+ fIsBW = isBW;
+
+ fWidth = std::max(fWidth, glyph.width());
+ fHeight = std::max(fHeight, glyph.height());
+
+ int biWidth = isBW ? alignTo32(fWidth) : fWidth;
+
+ if (nullptr == fBM) {
+ MyBitmapInfo info;
+ sk_bzero(&info, sizeof(info));
+ if (isBW) {
+ RGBQUAD blackQuad = { 0, 0, 0, 0 };
+ RGBQUAD whiteQuad = { 0xFF, 0xFF, 0xFF, 0 };
+ info.bmiColors[0] = blackQuad;
+ info.bmiColors[1] = whiteQuad;
+ }
+ info.bmiHeader.biSize = sizeof(info.bmiHeader);
+ info.bmiHeader.biWidth = biWidth;
+ info.bmiHeader.biHeight = fHeight;
+ info.bmiHeader.biPlanes = 1;
+ info.bmiHeader.biBitCount = isBW ? 1 : 32;
+ info.bmiHeader.biCompression = BI_RGB;
+ if (isBW) {
+ info.bmiHeader.biClrUsed = 2;
+ }
+ fBM = CreateDIBSection(fDC, &info, DIB_RGB_COLORS, &fBits, 0, 0);
+ if (nullptr == fBM) {
+ return nullptr;
+ }
+ SelectObject(fDC, fBM);
+ }
+
+ // erase
+ size_t srcRB = isBW ? (biWidth >> 3) : (fWidth << 2);
+ size_t size = fHeight * srcRB;
+ memset(fBits, 0, size);
+
+ XFORM xform = fXform;
+ xform.eDx = (float)-glyph.left();
+ xform.eDy = (float)-glyph.top();
+ SetWorldTransform(fDC, &xform);
+
+ uint16_t glyphID = glyph.getGlyphID();
+ BOOL ret = ExtTextOutW(fDC, 0, 0, ETO_GLYPH_INDEX, nullptr, reinterpret_cast<LPCWSTR>(&glyphID),
+ 1, nullptr);
+ GdiFlush();
+ if (0 == ret) {
+ return nullptr;
+ }
+ *srcRBPtr = srcRB;
+ // offset to the start of the image
+ return (const char*)fBits + (fHeight - glyph.height()) * srcRB;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+#define BUFFERSIZE (1 << 13)
+
+class SkScalerContext_GDI : public SkScalerContext {
+public:
+ SkScalerContext_GDI(sk_sp<LogFontTypeface>,
+ const SkScalerContextEffects&,
+ const SkDescriptor* desc);
+ ~SkScalerContext_GDI() override;
+
+ // Returns true if the constructor was able to complete all of its
+ // initializations (which may include calling GDI).
+ bool isValid() const;
+
+protected:
+ bool generateAdvance(SkGlyph* glyph) override;
+ void generateMetrics(SkGlyph* glyph, SkArenaAlloc*) override;
+ void generateImage(const SkGlyph& glyph) override;
+ bool generatePath(const SkGlyph& glyph, SkPath* path) override;
+ void generateFontMetrics(SkFontMetrics*) override;
+
+private:
+ DWORD getGDIGlyphPath(SkGlyphID glyph, UINT flags,
+ AutoSTMalloc<BUFFERSIZE, uint8_t>* glyphbuf);
+ template<bool APPLY_PREBLEND>
+ static void RGBToA8(const SkGdiRGB* SK_RESTRICT src, size_t srcRB,
+ const SkGlyph& glyph, const uint8_t* table8);
+
+ template<bool APPLY_PREBLEND>
+ static void RGBToLcd16(const SkGdiRGB* SK_RESTRICT src, size_t srcRB, const SkGlyph& glyph,
+ const uint8_t* tableR, const uint8_t* tableG, const uint8_t* tableB);
+
+ HDCOffscreen fOffscreen;
+ /** fGsA is the non-rotational part of total matrix without the text height scale.
+ * Used to find the magnitude of advances.
+ */
+ MAT2 fGsA;
+ /** The total matrix without the textSize. */
+ MAT2 fMat22;
+ /** Scales font to EM size. */
+ MAT2 fHighResMat22;
+ HDC fDDC;
+ HFONT fSavefont;
+ HFONT fFont;
+ SCRIPT_CACHE fSC;
+
+ /** The total matrix which also removes EM scale. */
+ SkMatrix fHiResMatrix;
+ /** fG_inv is the inverse of the rotational part of the total matrix.
+ * Used to set the direction of advances.
+ */
+ SkMatrix fG_inv;
+ enum Type {
+ kTrueType_Type, kBitmap_Type, kLine_Type
+ } fType;
+ TEXTMETRIC fTM;
+};
+
+static FIXED SkFloatToFIXED(float x) {
+ return SkFixedToFIXED(SkFloatToFixed(x));
+}
+
+static inline float SkFIXEDToFloat(FIXED x) {
+ return SkFixedToFloat(SkFIXEDToFixed(x));
+}
+
+static BYTE compute_quality(const SkScalerContextRec& rec) {
+ switch (rec.fMaskFormat) {
+ case SkMask::kBW_Format:
+ return NONANTIALIASED_QUALITY;
+ case SkMask::kLCD16_Format:
+ return CLEARTYPE_QUALITY;
+ default:
+ if (rec.fFlags & SkScalerContext::kGenA8FromLCD_Flag) {
+ return CLEARTYPE_QUALITY;
+ } else {
+ return ANTIALIASED_QUALITY;
+ }
+ }
+}
+
+SkScalerContext_GDI::SkScalerContext_GDI(sk_sp<LogFontTypeface> rawTypeface,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc)
+ : SkScalerContext(std::move(rawTypeface), effects, desc)
+ , fDDC(nullptr)
+ , fSavefont(nullptr)
+ , fFont(nullptr)
+ , fSC(nullptr)
+{
+ LogFontTypeface* typeface = static_cast<LogFontTypeface*>(this->getTypeface());
+
+ fDDC = ::CreateCompatibleDC(nullptr);
+ if (!fDDC) {
+ return;
+ }
+ SetGraphicsMode(fDDC, GM_ADVANCED);
+ SetBkMode(fDDC, TRANSPARENT);
+
+ // When GDI hinting, remove the entire Y scale from sA and GsA. (Prevents 'linear' metrics.)
+ // When not hinting, remove only the integer Y scale from sA and GsA. (Applied by GDI.)
+ SkScalerContextRec::PreMatrixScale scaleConstraints =
+ (fRec.getHinting() == SkFontHinting::kNone || fRec.getHinting() == SkFontHinting::kSlight)
+ ? SkScalerContextRec::PreMatrixScale::kVerticalInteger
+ : SkScalerContextRec::PreMatrixScale::kVertical;
+ SkVector scale;
+ SkMatrix sA;
+ SkMatrix GsA;
+ SkMatrix A;
+ fRec.computeMatrices(scaleConstraints, &scale, &sA, &GsA, &fG_inv, &A);
+
+ fGsA.eM11 = SkScalarToFIXED(GsA.get(SkMatrix::kMScaleX));
+ fGsA.eM12 = SkScalarToFIXED(-GsA.get(SkMatrix::kMSkewY)); // This should be ~0.
+ fGsA.eM21 = SkScalarToFIXED(-GsA.get(SkMatrix::kMSkewX));
+ fGsA.eM22 = SkScalarToFIXED(GsA.get(SkMatrix::kMScaleY));
+
+ // When not hinting, scale was computed with kVerticalInteger, so is already an integer.
+ // The sA and GsA transforms will be used to create 'linear' metrics.
+
+ // When hinting, scale was computed with kVertical, stating that our port can handle
+ // non-integer scales. This is done so that sA and GsA are computed without any 'residual'
+ // scale in them, preventing 'linear' metrics. However, GDI cannot actually handle non-integer
+ // scales so we need to round in this case. This is fine, since all of the scale has been
+ // removed from sA and GsA, so GDI will be handling the scale completely.
+ SkScalar gdiTextSize = SkScalarRoundToScalar(scale.fY);
+
+ // GDI will not accept a size of zero, so round the range [0, 1] to 1.
+ // If the size was non-zero, the scale factors will also be non-zero and 1px tall text is drawn.
+ // If the size actually was zero, the scale factors will also be zero, so GDI will draw nothing.
+ if (gdiTextSize == 0) {
+ gdiTextSize = SK_Scalar1;
+ }
+
+ LOGFONT lf = typeface->fLogFont;
+ lf.lfHeight = -SkScalarTruncToInt(gdiTextSize);
+ lf.lfQuality = compute_quality(fRec);
+ fFont = CreateFontIndirect(&lf);
+ if (!fFont) {
+ return;
+ }
+
+ fSavefont = (HFONT)SelectObject(fDDC, fFont);
+
+ if (0 == GetTextMetrics(fDDC, &fTM)) {
+ call_ensure_accessible(lf);
+ if (0 == GetTextMetrics(fDDC, &fTM)) {
+ fTM.tmPitchAndFamily = TMPF_TRUETYPE;
+ }
+ }
+
+ XFORM xform;
+ if (fTM.tmPitchAndFamily & TMPF_VECTOR) {
+ // Used a logfont on a memory context, should never get a device font.
+ // Therefore all TMPF_DEVICE will be PostScript fonts.
+
+ // If TMPF_VECTOR is set, one of TMPF_TRUETYPE or TMPF_DEVICE means that
+ // we have an outline font. Otherwise we have a vector FON, which is
+ // scalable, but not an outline font.
+ // This was determined by testing with Type1 PFM/PFB and
+ // OpenTypeCFF OTF, as well as looking at Wine bugs and sources.
+ if (fTM.tmPitchAndFamily & (TMPF_TRUETYPE | TMPF_DEVICE)) {
+ // Truetype or PostScript.
+ fType = SkScalerContext_GDI::kTrueType_Type;
+ } else {
+ // Stroked FON.
+ fType = SkScalerContext_GDI::kLine_Type;
+ }
+
+ // fPost2x2 is column-major, left handed (y down).
+ // XFORM 2x2 is row-major, left handed (y down).
+ xform.eM11 = SkScalarToFloat(sA.get(SkMatrix::kMScaleX));
+ xform.eM12 = SkScalarToFloat(sA.get(SkMatrix::kMSkewY));
+ xform.eM21 = SkScalarToFloat(sA.get(SkMatrix::kMSkewX));
+ xform.eM22 = SkScalarToFloat(sA.get(SkMatrix::kMScaleY));
+ xform.eDx = 0;
+ xform.eDy = 0;
+
+ // MAT2 is row major, right handed (y up).
+ fMat22.eM11 = SkFloatToFIXED(xform.eM11);
+ fMat22.eM12 = SkFloatToFIXED(-xform.eM12);
+ fMat22.eM21 = SkFloatToFIXED(-xform.eM21);
+ fMat22.eM22 = SkFloatToFIXED(xform.eM22);
+
+ if (needToRenderWithSkia(fRec)) {
+ this->forceGenerateImageFromPath();
+ }
+
+ // Create a hires matrix if we need linear metrics.
+ if (this->isLinearMetrics()) {
+ OUTLINETEXTMETRIC otm;
+ UINT success = GetOutlineTextMetrics(fDDC, sizeof(otm), &otm);
+ if (0 == success) {
+ call_ensure_accessible(lf);
+ success = GetOutlineTextMetrics(fDDC, sizeof(otm), &otm);
+ }
+ if (0 != success) {
+ SkScalar upem = SkIntToScalar(otm.otmEMSquare);
+
+ SkScalar gdiTextSizeToEMScale = upem / gdiTextSize;
+ fHighResMat22.eM11 = SkScalarToFIXED(gdiTextSizeToEMScale);
+ fHighResMat22.eM12 = SkScalarToFIXED(0);
+ fHighResMat22.eM21 = SkScalarToFIXED(0);
+ fHighResMat22.eM22 = SkScalarToFIXED(gdiTextSizeToEMScale);
+
+ SkScalar removeEMScale = SkScalarInvert(upem);
+ fHiResMatrix = A;
+ fHiResMatrix.preScale(removeEMScale, removeEMScale);
+ }
+ }
+
+ } else {
+ // Assume bitmap
+ fType = SkScalerContext_GDI::kBitmap_Type;
+
+ xform.eM11 = 1.0f;
+ xform.eM12 = 0.0f;
+ xform.eM21 = 0.0f;
+ xform.eM22 = 1.0f;
+ xform.eDx = 0.0f;
+ xform.eDy = 0.0f;
+
+ // fPost2x2 is column-major, left handed (y down).
+ // MAT2 is row major, right handed (y up).
+ fMat22.eM11 = SkScalarToFIXED(fRec.fPost2x2[0][0]);
+ fMat22.eM12 = SkScalarToFIXED(-fRec.fPost2x2[1][0]);
+ fMat22.eM21 = SkScalarToFIXED(-fRec.fPost2x2[0][1]);
+ fMat22.eM22 = SkScalarToFIXED(fRec.fPost2x2[1][1]);
+ }
+
+ fOffscreen.init(fFont, xform);
+}
+
+SkScalerContext_GDI::~SkScalerContext_GDI() {
+ if (fDDC) {
+ ::SelectObject(fDDC, fSavefont);
+ ::DeleteDC(fDDC);
+ }
+ if (fFont) {
+ ::DeleteObject(fFont);
+ }
+ if (fSC) {
+ ::ScriptFreeCache(&fSC);
+ }
+}
+
+bool SkScalerContext_GDI::isValid() const {
+ return fDDC && fFont;
+}
+
+bool SkScalerContext_GDI::generateAdvance(SkGlyph* glyph) {
+ return false;
+}
+
+void SkScalerContext_GDI::generateMetrics(SkGlyph* glyph, SkArenaAlloc* alloc) {
+ SkASSERT(fDDC);
+
+ glyph->fMaskFormat = fRec.fMaskFormat;
+
+ if (fType == SkScalerContext_GDI::kBitmap_Type || fType == SkScalerContext_GDI::kLine_Type) {
+ SIZE size;
+ WORD glyphs = glyph->getGlyphID();
+ if (0 == GetTextExtentPointI(fDDC, &glyphs, 1, &size)) {
+ glyph->fWidth = SkToS16(fTM.tmMaxCharWidth);
+ glyph->fHeight = SkToS16(fTM.tmHeight);
+ } else {
+ glyph->fWidth = SkToS16(size.cx);
+ glyph->fHeight = SkToS16(size.cy);
+ }
+
+ glyph->fTop = SkToS16(-fTM.tmAscent);
+ // Bitmap FON cannot underhang, but vector FON may.
+ // There appears no means of determining underhang of vector FON.
+ glyph->fLeft = SkToS16(0);
+ glyph->fAdvanceX = glyph->width();
+ glyph->fAdvanceY = 0;
+
+ // Vector FON will transform nicely, but bitmap FON do not.
+ if (fType == SkScalerContext_GDI::kLine_Type) {
+ SkRect bounds = SkRect::MakeXYWH(glyph->fLeft, glyph->fTop,
+ glyph->width(), glyph->height());
+ SkMatrix m;
+ m.setAll(SkFIXEDToScalar(fMat22.eM11), -SkFIXEDToScalar(fMat22.eM21), 0,
+ -SkFIXEDToScalar(fMat22.eM12), SkFIXEDToScalar(fMat22.eM22), 0,
+ 0, 0, 1);
+ m.mapRect(&bounds);
+ bounds.roundOut(&bounds);
+ glyph->fLeft = SkScalarTruncToInt(bounds.fLeft);
+ glyph->fTop = SkScalarTruncToInt(bounds.fTop);
+ glyph->fWidth = SkScalarTruncToInt(bounds.width());
+ glyph->fHeight = SkScalarTruncToInt(bounds.height());
+ }
+
+ // Apply matrix to advance.
+ glyph->fAdvanceY = -SkFIXEDToFloat(fMat22.eM12) * glyph->fAdvanceX;
+ glyph->fAdvanceX *= SkFIXEDToFloat(fMat22.eM11);
+
+ // These do not have an outline path at all.
+ glyph->setPath(alloc, nullptr, false);
+
+ return;
+ }
+
+ UINT glyphId = glyph->getGlyphID();
+
+ GLYPHMETRICS gm;
+ sk_bzero(&gm, sizeof(gm));
+
+ DWORD status = GetGlyphOutlineW(fDDC, glyphId, GGO_METRICS | GGO_GLYPH_INDEX, &gm, 0, nullptr, &fMat22);
+ if (GDI_ERROR == status) {
+ LogFontTypeface::EnsureAccessible(this->getTypeface());
+ status = GetGlyphOutlineW(fDDC, glyphId, GGO_METRICS | GGO_GLYPH_INDEX, &gm, 0, nullptr, &fMat22);
+ if (GDI_ERROR == status) {
+ glyph->zeroMetrics();
+ return;
+ }
+ }
+
+ bool empty = false;
+ // The black box is either the embedded bitmap size or the outline extent.
+ // It is 1x1 if nothing is to be drawn, but will also be 1x1 if something very small
+ // is to be drawn, like a '.'. We need to outset '.' but do not wish to outset ' '.
+ if (1 == gm.gmBlackBoxX && 1 == gm.gmBlackBoxY) {
+ // If GetGlyphOutline with GGO_NATIVE returns 0, we know there was no outline.
+ DWORD bufferSize = GetGlyphOutlineW(fDDC, glyphId, GGO_NATIVE | GGO_GLYPH_INDEX, &gm, 0, nullptr, &fMat22);
+ empty = (0 == bufferSize);
+ }
+
+ glyph->fTop = SkToS16(-gm.gmptGlyphOrigin.y);
+ glyph->fLeft = SkToS16(gm.gmptGlyphOrigin.x);
+ if (empty) {
+ glyph->fWidth = 0;
+ glyph->fHeight = 0;
+ } else {
+ // Outset, since the image may bleed out of the black box.
+ // For embedded bitmaps the black box should be exact.
+ // For outlines we need to outset by 1 in all directions for bleed.
+ // For ClearType we need to outset by 2 for bleed.
+ glyph->fWidth = gm.gmBlackBoxX + 4;
+ glyph->fHeight = gm.gmBlackBoxY + 4;
+ glyph->fTop -= 2;
+ glyph->fLeft -= 2;
+ }
+ // TODO(benjaminwagner): What is the type of gm.gmCellInc[XY]?
+ glyph->fAdvanceX = (float)((int)gm.gmCellIncX);
+ glyph->fAdvanceY = (float)((int)gm.gmCellIncY);
+
+ if ((fTM.tmPitchAndFamily & TMPF_VECTOR) && this->isLinearMetrics()) {
+ sk_bzero(&gm, sizeof(gm));
+ status = GetGlyphOutlineW(fDDC, glyphId, GGO_METRICS | GGO_GLYPH_INDEX, &gm, 0, nullptr, &fHighResMat22);
+ if (GDI_ERROR != status) {
+ SkPoint advance;
+ fHiResMatrix.mapXY(SkIntToScalar(gm.gmCellIncX), SkIntToScalar(gm.gmCellIncY), &advance);
+ glyph->fAdvanceX = SkScalarToFloat(advance.fX);
+ glyph->fAdvanceY = SkScalarToFloat(advance.fY);
+ }
+ } else if (!isAxisAligned(this->fRec)) {
+ status = GetGlyphOutlineW(fDDC, glyphId, GGO_METRICS | GGO_GLYPH_INDEX, &gm, 0, nullptr, &fGsA);
+ if (GDI_ERROR != status) {
+ SkPoint advance;
+ fG_inv.mapXY(SkIntToScalar(gm.gmCellIncX), SkIntToScalar(gm.gmCellIncY), &advance);
+ glyph->fAdvanceX = SkScalarToFloat(advance.fX);
+ glyph->fAdvanceY = SkScalarToFloat(advance.fY);
+ }
+ }
+}
+
+static const MAT2 gMat2Identity = {{0, 1}, {0, 0}, {0, 0}, {0, 1}};
+void SkScalerContext_GDI::generateFontMetrics(SkFontMetrics* metrics) {
+ if (nullptr == metrics) {
+ return;
+ }
+ sk_bzero(metrics, sizeof(*metrics));
+
+ SkASSERT(fDDC);
+
+#ifndef SK_GDI_ALWAYS_USE_TEXTMETRICS_FOR_FONT_METRICS
+ if (fType == SkScalerContext_GDI::kBitmap_Type || fType == SkScalerContext_GDI::kLine_Type) {
+#endif
+ metrics->fTop = SkIntToScalar(-fTM.tmAscent);
+ metrics->fAscent = SkIntToScalar(-fTM.tmAscent);
+ metrics->fDescent = SkIntToScalar(fTM.tmDescent);
+ metrics->fBottom = SkIntToScalar(fTM.tmDescent);
+ metrics->fLeading = SkIntToScalar(fTM.tmExternalLeading);
+ metrics->fAvgCharWidth = SkIntToScalar(fTM.tmAveCharWidth);
+ metrics->fMaxCharWidth = SkIntToScalar(fTM.tmMaxCharWidth);
+ metrics->fXMin = 0;
+ metrics->fXMax = metrics->fMaxCharWidth;
+ //metrics->fXHeight = 0;
+#ifndef SK_GDI_ALWAYS_USE_TEXTMETRICS_FOR_FONT_METRICS
+ return;
+ }
+#endif
+
+ OUTLINETEXTMETRIC otm;
+
+ uint32_t ret = GetOutlineTextMetrics(fDDC, sizeof(otm), &otm);
+ if (0 == ret) {
+ LogFontTypeface::EnsureAccessible(this->getTypeface());
+ ret = GetOutlineTextMetrics(fDDC, sizeof(otm), &otm);
+ }
+ if (0 == ret) {
+ return;
+ }
+
+#ifndef SK_GDI_ALWAYS_USE_TEXTMETRICS_FOR_FONT_METRICS
+ metrics->fTop = SkIntToScalar(-otm.otmrcFontBox.top);
+ metrics->fAscent = SkIntToScalar(-otm.otmAscent);
+ metrics->fDescent = SkIntToScalar(-otm.otmDescent);
+ metrics->fBottom = SkIntToScalar(-otm.otmrcFontBox.bottom);
+ metrics->fLeading = SkIntToScalar(otm.otmLineGap);
+ metrics->fAvgCharWidth = SkIntToScalar(otm.otmTextMetrics.tmAveCharWidth);
+ metrics->fMaxCharWidth = SkIntToScalar(otm.otmTextMetrics.tmMaxCharWidth);
+ metrics->fXMin = SkIntToScalar(otm.otmrcFontBox.left);
+ metrics->fXMax = SkIntToScalar(otm.otmrcFontBox.right);
+#endif
+ metrics->fUnderlineThickness = SkIntToScalar(otm.otmsUnderscoreSize);
+ metrics->fUnderlinePosition = -SkIntToScalar(otm.otmsUnderscorePosition);
+
+ metrics->fFlags |= SkFontMetrics::kUnderlineThicknessIsValid_Flag;
+ metrics->fFlags |= SkFontMetrics::kUnderlinePositionIsValid_Flag;
+
+ metrics->fXHeight = SkIntToScalar(otm.otmsXHeight);
+ GLYPHMETRICS gm;
+ sk_bzero(&gm, sizeof(gm));
+ DWORD len = GetGlyphOutlineW(fDDC, 'x', GGO_METRICS, &gm, 0, nullptr, &gMat2Identity);
+ if (len != GDI_ERROR && gm.gmBlackBoxY > 0) {
+ metrics->fXHeight = SkIntToScalar(gm.gmBlackBoxY);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////
+
+static void build_power_table(uint8_t table[], float ee) {
+ for (int i = 0; i < 256; i++) {
+ float x = i / 255.f;
+ x = sk_float_pow(x, ee);
+ int xx = SkScalarRoundToInt(x * 255);
+ table[i] = SkToU8(xx);
+ }
+}
+
+/**
+ * This will invert the gamma applied by GDI (gray-scale antialiased), so we
+ * can get linear values.
+ *
+ * GDI grayscale appears to use a hard-coded gamma of 2.3.
+ *
+ * GDI grayscale appears to draw using the black and white rasterizer at four
+ * times the size and then downsamples to compute the coverage mask. As a
+ * result there are only seventeen total grays. This lack of fidelity means
+ * that shifting into other color spaces is imprecise.
+ */
+static const uint8_t* getInverseGammaTableGDI() {
+ static SkOnce once;
+ static uint8_t gTableGdi[256];
+ once([]{
+ build_power_table(gTableGdi, 2.3f);
+ });
+ return gTableGdi;
+}
+
+/**
+ * This will invert the gamma applied by GDI ClearType, so we can get linear
+ * values.
+ *
+ * GDI ClearType uses SPI_GETFONTSMOOTHINGCONTRAST / 1000 as the gamma value.
+ * If this value is not specified, the default is a gamma of 1.4.
+ */
+static const uint8_t* getInverseGammaTableClearType() {
+ static SkOnce once;
+ static uint8_t gTableClearType[256];
+ once([]{
+ UINT level = 0;
+ if (!SystemParametersInfo(SPI_GETFONTSMOOTHINGCONTRAST, 0, &level, 0) || !level) {
+ // can't get the data, so use a default
+ level = 1400;
+ }
+ build_power_table(gTableClearType, level / 1000.0f);
+ });
+ return gTableClearType;
+}
+
+#include "include/private/SkColorData.h"
+
+//Cannot assume that the input rgb is gray due to possible setting of kGenA8FromLCD_Flag.
+template<bool APPLY_PREBLEND>
+static inline uint8_t rgb_to_a8(SkGdiRGB rgb, const uint8_t* table8) {
+ U8CPU r = (rgb >> 16) & 0xFF;
+ U8CPU g = (rgb >> 8) & 0xFF;
+ U8CPU b = (rgb >> 0) & 0xFF;
+ return sk_apply_lut_if<APPLY_PREBLEND>(SkComputeLuminance(r, g, b), table8);
+}
+
+template<bool APPLY_PREBLEND>
+static inline uint16_t rgb_to_lcd16(SkGdiRGB rgb, const uint8_t* tableR,
+ const uint8_t* tableG,
+ const uint8_t* tableB) {
+ U8CPU r = sk_apply_lut_if<APPLY_PREBLEND>((rgb >> 16) & 0xFF, tableR);
+ U8CPU g = sk_apply_lut_if<APPLY_PREBLEND>((rgb >> 8) & 0xFF, tableG);
+ U8CPU b = sk_apply_lut_if<APPLY_PREBLEND>((rgb >> 0) & 0xFF, tableB);
+ if constexpr (kSkShowTextBlitCoverage) {
+ r = std::max(r, 10u);
+ g = std::max(g, 10u);
+ b = std::max(b, 10u);
+ }
+ return SkPack888ToRGB16(r, g, b);
+}
+
+template<bool APPLY_PREBLEND>
+void SkScalerContext_GDI::RGBToA8(const SkGdiRGB* SK_RESTRICT src, size_t srcRB,
+ const SkGlyph& glyph, const uint8_t* table8) {
+ const size_t dstRB = glyph.rowBytes();
+ const int width = glyph.width();
+ uint8_t* SK_RESTRICT dst = (uint8_t*)((char*)glyph.fImage + (glyph.height() - 1) * dstRB);
+
+ for (int y = 0; y < glyph.fHeight; y++) {
+ for (int i = 0; i < width; i++) {
+ dst[i] = rgb_to_a8<APPLY_PREBLEND>(src[i], table8);
+ if constexpr (kSkShowTextBlitCoverage) {
+ dst[i] = std::max<uint8_t>(dst[i], 10u);
+ }
+ }
+ src = SkTAddOffset<const SkGdiRGB>(src, srcRB);
+ dst -= dstRB;
+ }
+}
+
+template<bool APPLY_PREBLEND>
+void SkScalerContext_GDI::RGBToLcd16(
+ const SkGdiRGB* SK_RESTRICT src, size_t srcRB, const SkGlyph& glyph,
+ const uint8_t* tableR, const uint8_t* tableG, const uint8_t* tableB) {
+ const size_t dstRB = glyph.rowBytes();
+ const int width = glyph.width();
+ uint16_t* SK_RESTRICT dst = (uint16_t*)((char*)glyph.fImage + (glyph.height() - 1) * dstRB);
+
+ for (int y = 0; y < glyph.fHeight; y++) {
+ for (int i = 0; i < width; i++) {
+ dst[i] = rgb_to_lcd16<APPLY_PREBLEND>(src[i], tableR, tableG, tableB);
+ }
+ src = SkTAddOffset<const SkGdiRGB>(src, srcRB);
+ dst = (uint16_t*)((char*)dst - dstRB);
+ }
+}
+
+void SkScalerContext_GDI::generateImage(const SkGlyph& glyph) {
+ SkASSERT(fDDC);
+
+ const bool isBW = SkMask::kBW_Format == fRec.fMaskFormat;
+ const bool isAA = !isLCD(fRec);
+
+ size_t srcRB;
+ const void* bits = fOffscreen.draw(glyph, isBW, &srcRB);
+ if (nullptr == bits) {
+ LogFontTypeface::EnsureAccessible(this->getTypeface());
+ bits = fOffscreen.draw(glyph, isBW, &srcRB);
+ if (nullptr == bits) {
+ sk_bzero(glyph.fImage, glyph.imageSize());
+ return;
+ }
+ }
+
+ if (!isBW) {
+ const uint8_t* table;
+ //The offscreen contains a GDI blit if isAA and kGenA8FromLCD_Flag is not set.
+ //Otherwise the offscreen contains a ClearType blit.
+ if (isAA && !(fRec.fFlags & SkScalerContext::kGenA8FromLCD_Flag)) {
+ table = getInverseGammaTableGDI();
+ } else {
+ table = getInverseGammaTableClearType();
+ }
+ //Note that the following cannot really be integrated into the
+ //pre-blend, since we may not be applying the pre-blend; when we aren't
+ //applying the pre-blend it means that a filter wants linear anyway.
+ //Other code may also be applying the pre-blend, so we'd need another
+ //one with this and one without.
+ SkGdiRGB* addr = (SkGdiRGB*)bits;
+ for (int y = 0; y < glyph.fHeight; ++y) {
+ for (int x = 0; x < glyph.width(); ++x) {
+ int r = (addr[x] >> 16) & 0xFF;
+ int g = (addr[x] >> 8) & 0xFF;
+ int b = (addr[x] >> 0) & 0xFF;
+ addr[x] = (table[r] << 16) | (table[g] << 8) | table[b];
+ }
+ addr = SkTAddOffset<SkGdiRGB>(addr, srcRB);
+ }
+ }
+
+ size_t dstRB = glyph.rowBytes();
+ if (isBW) {
+ const uint8_t* src = (const uint8_t*)bits;
+ uint8_t* dst = (uint8_t*)((char*)glyph.fImage + (glyph.fHeight - 1) * dstRB);
+ for (int y = 0; y < glyph.fHeight; y++) {
+ memcpy(dst, src, dstRB);
+ src += srcRB;
+ dst -= dstRB;
+ }
+ if constexpr (kSkShowTextBlitCoverage) {
+ if (glyph.width() > 0 && glyph.fHeight > 0) {
+ int bitCount = glyph.width() & 7;
+ uint8_t* first = (uint8_t*)glyph.fImage;
+ uint8_t* last = (uint8_t*)((char*)glyph.fImage + glyph.height() * dstRB - 1);
+ *first |= 1 << 7;
+ *last |= bitCount == 0 ? 1 : 1 << (8 - bitCount);
+ }
+ }
+ } else if (isAA) {
+ // since the caller may require A8 for maskfilters, we can't check for BW
+ // ... until we have the caller tell us that explicitly
+ const SkGdiRGB* src = (const SkGdiRGB*)bits;
+ if (fPreBlend.isApplicable()) {
+ RGBToA8<true>(src, srcRB, glyph, fPreBlend.fG);
+ } else {
+ RGBToA8<false>(src, srcRB, glyph, fPreBlend.fG);
+ }
+ } else { // LCD16
+ const SkGdiRGB* src = (const SkGdiRGB*)bits;
+ SkASSERT(SkMask::kLCD16_Format == glyph.fMaskFormat);
+ if (fPreBlend.isApplicable()) {
+ RGBToLcd16<true>(src, srcRB, glyph, fPreBlend.fR, fPreBlend.fG, fPreBlend.fB);
+ } else {
+ RGBToLcd16<false>(src, srcRB, glyph, fPreBlend.fR, fPreBlend.fG, fPreBlend.fB);
+ }
+ }
+}
+
+namespace {
+
+class GDIGlyphbufferPointIter {
+public:
+ GDIGlyphbufferPointIter(const uint8_t* glyphbuf, DWORD total_size)
+ : fHeaderIter(glyphbuf, total_size), fCurveIter(), fPointIter()
+ { }
+
+ POINTFX const * next() {
+nextHeader:
+ if (!fCurveIter.isSet()) {
+ const TTPOLYGONHEADER* header = fHeaderIter.next();
+ if (nullptr == header) {
+ return nullptr;
+ }
+ fCurveIter.set(header);
+ const TTPOLYCURVE* curve = fCurveIter.next();
+ if (nullptr == curve) {
+ return nullptr;
+ }
+ fPointIter.set(curve);
+ return &header->pfxStart;
+ }
+
+ const POINTFX* nextPoint = fPointIter.next();
+ if (nullptr == nextPoint) {
+ const TTPOLYCURVE* curve = fCurveIter.next();
+ if (nullptr == curve) {
+ fCurveIter.set();
+ goto nextHeader;
+ } else {
+ fPointIter.set(curve);
+ }
+ nextPoint = fPointIter.next();
+ }
+ return nextPoint;
+ }
+
+ WORD currentCurveType() {
+ return fPointIter.fCurveType;
+ }
+
+private:
+ /** Iterates over all of the polygon headers in a glyphbuf. */
+ class GDIPolygonHeaderIter {
+ public:
+ GDIPolygonHeaderIter(const uint8_t* glyphbuf, DWORD total_size)
+ : fCurPolygon(reinterpret_cast<const TTPOLYGONHEADER*>(glyphbuf))
+ , fEndPolygon(SkTAddOffset<const TTPOLYGONHEADER>(glyphbuf, total_size))
+ { }
+
+ const TTPOLYGONHEADER* next() {
+ if (fCurPolygon >= fEndPolygon) {
+ return nullptr;
+ }
+ const TTPOLYGONHEADER* thisPolygon = fCurPolygon;
+ fCurPolygon = SkTAddOffset<const TTPOLYGONHEADER>(fCurPolygon, fCurPolygon->cb);
+ return thisPolygon;
+ }
+ private:
+ const TTPOLYGONHEADER* fCurPolygon;
+ const TTPOLYGONHEADER* fEndPolygon;
+ };
+
+ /** Iterates over all of the polygon curves in a polygon header. */
+ class GDIPolygonCurveIter {
+ public:
+ GDIPolygonCurveIter() : fCurCurve(nullptr), fEndCurve(nullptr) { }
+
+ GDIPolygonCurveIter(const TTPOLYGONHEADER* curPolygon)
+ : fCurCurve(SkTAddOffset<const TTPOLYCURVE>(curPolygon, sizeof(TTPOLYGONHEADER)))
+ , fEndCurve(SkTAddOffset<const TTPOLYCURVE>(curPolygon, curPolygon->cb))
+ { }
+
+ bool isSet() { return fCurCurve != nullptr; }
+
+ void set(const TTPOLYGONHEADER* curPolygon) {
+ fCurCurve = SkTAddOffset<const TTPOLYCURVE>(curPolygon, sizeof(TTPOLYGONHEADER));
+ fEndCurve = SkTAddOffset<const TTPOLYCURVE>(curPolygon, curPolygon->cb);
+ }
+ void set() {
+ fCurCurve = nullptr;
+ fEndCurve = nullptr;
+ }
+
+ const TTPOLYCURVE* next() {
+ if (fCurCurve >= fEndCurve) {
+ return nullptr;
+ }
+ const TTPOLYCURVE* thisCurve = fCurCurve;
+ fCurCurve = SkTAddOffset<const TTPOLYCURVE>(fCurCurve, size_of_TTPOLYCURVE(*fCurCurve));
+ return thisCurve;
+ }
+ private:
+ size_t size_of_TTPOLYCURVE(const TTPOLYCURVE& curve) {
+ return 2*sizeof(WORD) + curve.cpfx*sizeof(POINTFX);
+ }
+ const TTPOLYCURVE* fCurCurve;
+ const TTPOLYCURVE* fEndCurve;
+ };
+
+ /** Iterates over all of the polygon points in a polygon curve. */
+ class GDIPolygonCurvePointIter {
+ public:
+ GDIPolygonCurvePointIter() : fCurveType(0), fCurPoint(nullptr), fEndPoint(nullptr) { }
+
+ GDIPolygonCurvePointIter(const TTPOLYCURVE* curPolygon)
+ : fCurveType(curPolygon->wType)
+ , fCurPoint(&curPolygon->apfx[0])
+ , fEndPoint(&curPolygon->apfx[curPolygon->cpfx])
+ { }
+
+ bool isSet() { return fCurPoint != nullptr; }
+
+ void set(const TTPOLYCURVE* curPolygon) {
+ fCurveType = curPolygon->wType;
+ fCurPoint = &curPolygon->apfx[0];
+ fEndPoint = &curPolygon->apfx[curPolygon->cpfx];
+ }
+ void set() {
+ fCurPoint = nullptr;
+ fEndPoint = nullptr;
+ }
+
+ const POINTFX* next() {
+ if (fCurPoint >= fEndPoint) {
+ return nullptr;
+ }
+ const POINTFX* thisPoint = fCurPoint;
+ ++fCurPoint;
+ return thisPoint;
+ }
+
+ WORD fCurveType;
+ private:
+ const POINTFX* fCurPoint;
+ const POINTFX* fEndPoint;
+ };
+
+ GDIPolygonHeaderIter fHeaderIter;
+ GDIPolygonCurveIter fCurveIter;
+ GDIPolygonCurvePointIter fPointIter;
+};
+
+class SkGDIGeometrySink {
+ SkPath* fPath;
+ bool fStarted = false;
+ POINTFX fCurrent;
+
+ void goingTo(const POINTFX pt) {
+ if (!fStarted) {
+ fStarted = true;
+ fPath->moveTo( SkFIXEDToScalar(fCurrent.x),
+ -SkFIXEDToScalar(fCurrent.y));
+ }
+ fCurrent = pt;
+ }
+
+ bool currentIsNot(const POINTFX pt) {
+ return fCurrent.x.value != pt.x.value || fCurrent.x.fract != pt.x.fract ||
+ fCurrent.y.value != pt.y.value || fCurrent.y.fract != pt.y.fract;
+ }
+
+public:
+ SkGDIGeometrySink(SkPath* path) : fPath(path) {}
+ void process(const uint8_t* glyphbuf, DWORD total_size);
+
+ /** It is possible for the hinted and unhinted versions of the same path to have
+ * a different number of points due to GDI's handling of flipped points.
+ * If this is detected, this will return false.
+ */
+ bool process(const uint8_t* glyphbuf, DWORD total_size, GDIGlyphbufferPointIter hintedYs);
+};
+
+void SkGDIGeometrySink::process(const uint8_t* glyphbuf, DWORD total_size) {
+ const uint8_t* cur_glyph = glyphbuf;
+ const uint8_t* end_glyph = glyphbuf + total_size;
+
+ while (cur_glyph < end_glyph) {
+ const TTPOLYGONHEADER* th = (TTPOLYGONHEADER*)cur_glyph;
+
+ const uint8_t* end_poly = cur_glyph + th->cb;
+ const uint8_t* cur_poly = cur_glyph + sizeof(TTPOLYGONHEADER);
+
+ fStarted = false;
+ fCurrent = th->pfxStart;
+
+ while (cur_poly < end_poly) {
+ const TTPOLYCURVE* pc = (const TTPOLYCURVE*)cur_poly;
+ const POINTFX* apfx = pc->apfx;
+ const WORD cpfx = pc->cpfx;
+
+ if (pc->wType == TT_PRIM_LINE) {
+ for (uint16_t i = 0; i < cpfx; i++) {
+ POINTFX pnt_b = apfx[i];
+ if (this->currentIsNot(pnt_b)) {
+ this->goingTo(pnt_b);
+ fPath->lineTo( SkFIXEDToScalar(pnt_b.x),
+ -SkFIXEDToScalar(pnt_b.y));
+ }
+ }
+ }
+
+ if (pc->wType == TT_PRIM_QSPLINE) {
+ for (uint16_t u = 0; u < cpfx - 1; u++) { // Walk through points in spline
+ POINTFX pnt_b = apfx[u]; // B is always the current point
+ POINTFX pnt_c = apfx[u+1];
+
+ if (u < cpfx - 2) { // If not on last spline, compute C
+ pnt_c.x = SkFixedToFIXED(SkFixedAve(SkFIXEDToFixed(pnt_b.x),
+ SkFIXEDToFixed(pnt_c.x)));
+ pnt_c.y = SkFixedToFIXED(SkFixedAve(SkFIXEDToFixed(pnt_b.y),
+ SkFIXEDToFixed(pnt_c.y)));
+ }
+
+
+ if (this->currentIsNot(pnt_b) || this->currentIsNot(pnt_c)) {
+ this->goingTo(pnt_c);
+ fPath->quadTo( SkFIXEDToScalar(pnt_b.x),
+ -SkFIXEDToScalar(pnt_b.y),
+ SkFIXEDToScalar(pnt_c.x),
+ -SkFIXEDToScalar(pnt_c.y));
+ }
+ }
+ }
+
+ // Advance past this TTPOLYCURVE.
+ cur_poly += sizeof(WORD) * 2 + sizeof(POINTFX) * cpfx;
+ }
+ cur_glyph += th->cb;
+ if (this->fStarted) {
+ fPath->close();
+ }
+ }
+}
+
+#define move_next_expected_hinted_point(iter, pElem) do {\
+ pElem = iter.next(); \
+ if (nullptr == pElem) return false; \
+} while(0)
+
+bool SkGDIGeometrySink::process(const uint8_t* glyphbuf, DWORD total_size,
+ GDIGlyphbufferPointIter hintedYs) {
+ const uint8_t* cur_glyph = glyphbuf;
+ const uint8_t* end_glyph = glyphbuf + total_size;
+
+ POINTFX const * hintedPoint;
+
+ while (cur_glyph < end_glyph) {
+ const TTPOLYGONHEADER* th = (TTPOLYGONHEADER*)cur_glyph;
+
+ const uint8_t* end_poly = cur_glyph + th->cb;
+ const uint8_t* cur_poly = cur_glyph + sizeof(TTPOLYGONHEADER);
+
+ move_next_expected_hinted_point(hintedYs, hintedPoint);
+ fStarted = false;
+ fCurrent = {th->pfxStart.x, hintedPoint->y};
+
+ while (cur_poly < end_poly) {
+ const TTPOLYCURVE* pc = (const TTPOLYCURVE*)cur_poly;
+ const POINTFX* apfx = pc->apfx;
+ const WORD cpfx = pc->cpfx;
+
+ if (pc->wType == TT_PRIM_LINE) {
+ for (uint16_t i = 0; i < cpfx; i++) {
+ move_next_expected_hinted_point(hintedYs, hintedPoint);
+ POINTFX pnt_b = {apfx[i].x, hintedPoint->y};
+ if (this->currentIsNot(pnt_b)) {
+ this->goingTo(pnt_b);
+ fPath->lineTo( SkFIXEDToScalar(pnt_b.x),
+ -SkFIXEDToScalar(pnt_b.y));
+ }
+ }
+ }
+
+ if (pc->wType == TT_PRIM_QSPLINE) {
+ POINTFX currentPoint = apfx[0];
+ move_next_expected_hinted_point(hintedYs, hintedPoint);
+ // only take the hinted y if it wasn't flipped
+ if (hintedYs.currentCurveType() == TT_PRIM_QSPLINE) {
+ currentPoint.y = hintedPoint->y;
+ }
+ for (uint16_t u = 0; u < cpfx - 1; u++) { // Walk through points in spline
+ POINTFX pnt_b = currentPoint;//pc->apfx[u]; // B is always the current point
+ POINTFX pnt_c = apfx[u+1];
+ move_next_expected_hinted_point(hintedYs, hintedPoint);
+ // only take the hinted y if it wasn't flipped
+ if (hintedYs.currentCurveType() == TT_PRIM_QSPLINE) {
+ pnt_c.y = hintedPoint->y;
+ }
+ currentPoint.x = pnt_c.x;
+ currentPoint.y = pnt_c.y;
+
+ if (u < cpfx - 2) { // If not on last spline, compute C
+ pnt_c.x = SkFixedToFIXED(SkFixedAve(SkFIXEDToFixed(pnt_b.x),
+ SkFIXEDToFixed(pnt_c.x)));
+ pnt_c.y = SkFixedToFIXED(SkFixedAve(SkFIXEDToFixed(pnt_b.y),
+ SkFIXEDToFixed(pnt_c.y)));
+ }
+
+ if (this->currentIsNot(pnt_b) || this->currentIsNot(pnt_c)) {
+ this->goingTo(pnt_c);
+ fPath->quadTo( SkFIXEDToScalar(pnt_b.x),
+ -SkFIXEDToScalar(pnt_b.y),
+ SkFIXEDToScalar(pnt_c.x),
+ -SkFIXEDToScalar(pnt_c.y));
+ }
+ }
+ }
+
+ // Advance past this TTPOLYCURVE.
+ cur_poly += sizeof(WORD) * 2 + sizeof(POINTFX) * cpfx;
+ }
+ cur_glyph += th->cb;
+ if (this->fStarted) {
+ fPath->close();
+ }
+ }
+ return true;
+}
+} // namespace
+
+DWORD SkScalerContext_GDI::getGDIGlyphPath(SkGlyphID glyph, UINT flags,
+ AutoSTMalloc<BUFFERSIZE, uint8_t>* glyphbuf)
+{
+ GLYPHMETRICS gm;
+
+ DWORD total_size = GetGlyphOutlineW(fDDC, glyph, flags, &gm, BUFFERSIZE, glyphbuf->get(), &fMat22);
+ // Sometimes GetGlyphOutlineW returns a number larger than BUFFERSIZE even if BUFFERSIZE > 0.
+ // It has been verified that this does not involve a buffer overrun.
+ if (GDI_ERROR == total_size || total_size > BUFFERSIZE) {
+ // GDI_ERROR because the BUFFERSIZE was too small, or because the data was not accessible.
+ // When the data is not accessable GetGlyphOutlineW fails rather quickly,
+ // so just try to get the size. If that fails then ensure the data is accessible.
+ total_size = GetGlyphOutlineW(fDDC, glyph, flags, &gm, 0, nullptr, &fMat22);
+ if (GDI_ERROR == total_size) {
+ LogFontTypeface::EnsureAccessible(this->getTypeface());
+ total_size = GetGlyphOutlineW(fDDC, glyph, flags, &gm, 0, nullptr, &fMat22);
+ if (GDI_ERROR == total_size) {
+ // GetGlyphOutlineW is known to fail for some characters, such as spaces.
+ // In these cases, just return that the glyph does not have a shape.
+ return 0;
+ }
+ }
+
+ glyphbuf->reset(total_size);
+
+ DWORD ret = GetGlyphOutlineW(fDDC, glyph, flags, &gm, total_size, glyphbuf->get(), &fMat22);
+ if (GDI_ERROR == ret) {
+ LogFontTypeface::EnsureAccessible(this->getTypeface());
+ ret = GetGlyphOutlineW(fDDC, glyph, flags, &gm, total_size, glyphbuf->get(), &fMat22);
+ if (GDI_ERROR == ret) {
+ SkASSERT(false);
+ return 0;
+ }
+ }
+ }
+ return total_size;
+}
+
+bool SkScalerContext_GDI::generatePath(const SkGlyph& glyph, SkPath* path) {
+ SkASSERT(path);
+ SkASSERT(fDDC);
+
+ path->reset();
+
+ SkGlyphID glyphID = glyph.getGlyphID();
+
+ // Out of all the fonts on a typical Windows box,
+ // 25% of glyphs require more than 2KB.
+ // 1% of glyphs require more than 4KB.
+ // 0.01% of glyphs require more than 8KB.
+ // 8KB is less than 1% of the normal 1MB stack on Windows.
+ // Note that some web fonts glyphs require more than 20KB.
+ //static const DWORD BUFFERSIZE = (1 << 13);
+
+ //GDI only uses hinted outlines when axis aligned.
+ UINT format = GGO_NATIVE | GGO_GLYPH_INDEX;
+ if (fRec.getHinting() == SkFontHinting::kNone || fRec.getHinting() == SkFontHinting::kSlight){
+ format |= GGO_UNHINTED;
+ }
+ AutoSTMalloc<BUFFERSIZE, uint8_t> glyphbuf(BUFFERSIZE);
+ DWORD total_size = getGDIGlyphPath(glyphID, format, &glyphbuf);
+ if (0 == total_size) {
+ return false;
+ }
+
+ if (fRec.getHinting() != SkFontHinting::kSlight) {
+ SkGDIGeometrySink sink(path);
+ sink.process(glyphbuf, total_size);
+ } else {
+ AutoSTMalloc<BUFFERSIZE, uint8_t> hintedGlyphbuf(BUFFERSIZE);
+ //GDI only uses hinted outlines when axis aligned.
+ DWORD hinted_total_size = getGDIGlyphPath(glyphID, GGO_NATIVE | GGO_GLYPH_INDEX,
+ &hintedGlyphbuf);
+ if (0 == hinted_total_size) {
+ return false;
+ }
+
+ SkGDIGeometrySink sinkXBufYIter(path);
+ if (!sinkXBufYIter.process(glyphbuf, total_size,
+ GDIGlyphbufferPointIter(hintedGlyphbuf, hinted_total_size)))
+ {
+ // Both path and sinkXBufYIter are in the state they were in at the time of failure.
+ path->reset();
+ SkGDIGeometrySink sink(path);
+ sink.process(glyphbuf, total_size);
+ }
+ }
+ return true;
+}
+
+static void logfont_for_name(const char* familyName, LOGFONT* lf) {
+ sk_bzero(lf, sizeof(LOGFONT));
+#ifdef UNICODE
+ // Get the buffer size needed first.
+ size_t str_len = ::MultiByteToWideChar(CP_UTF8, 0, familyName,
+ -1, nullptr, 0);
+ // Allocate a buffer (str_len already has terminating null
+ // accounted for).
+ wchar_t *wideFamilyName = new wchar_t[str_len];
+ // Now actually convert the string.
+ ::MultiByteToWideChar(CP_UTF8, 0, familyName, -1,
+ wideFamilyName, str_len);
+ ::wcsncpy(lf->lfFaceName, wideFamilyName, LF_FACESIZE - 1);
+ delete [] wideFamilyName;
+ lf->lfFaceName[LF_FACESIZE-1] = L'\0';
+#else
+ ::strncpy(lf->lfFaceName, familyName, LF_FACESIZE - 1);
+ lf->lfFaceName[LF_FACESIZE - 1] = '\0';
+#endif
+}
+
+void LogFontTypeface::onGetFamilyName(SkString* familyName) const {
+ // Get the actual name of the typeface. The logfont may not know this.
+ SkAutoHDC hdc(fLogFont);
+ dcfontname_to_skstring(hdc, fLogFont, familyName);
+}
+
+void LogFontTypeface::onGetFontDescriptor(SkFontDescriptor* desc,
+ bool* isLocalStream) const {
+ SkString familyName;
+ this->onGetFamilyName(&familyName);
+ desc->setFamilyName(familyName.c_str());
+ desc->setStyle(this->fontStyle());
+ *isLocalStream = this->fSerializeAsStream;
+}
+
+void LogFontTypeface::getGlyphToUnicodeMap(SkUnichar* dstArray) const {
+ SkAutoHDC hdc(fLogFont);
+ unsigned int glyphCount = calculateGlyphCount(hdc, fLogFont);
+ populate_glyph_to_unicode(hdc, glyphCount, dstArray);
+}
+
+std::unique_ptr<SkAdvancedTypefaceMetrics> LogFontTypeface::onGetAdvancedMetrics() const {
+ LOGFONT lf = fLogFont;
+ std::unique_ptr<SkAdvancedTypefaceMetrics> info(nullptr);
+
+ // The design HFONT must be destroyed after the HDC
+ using HFONT_T = typename std::remove_pointer<HFONT>::type;
+ std::unique_ptr<HFONT_T, SkFunctionObject<DeleteObject>> designFont;
+ SkAutoHDC hdc(lf);
+
+ const char stem_chars[] = {'i', 'I', '!', '1'};
+ int16_t min_width;
+ unsigned glyphCount;
+
+ // To request design units, create a logical font whose height is specified
+ // as unitsPerEm.
+ OUTLINETEXTMETRIC otm;
+ unsigned int otmRet = GetOutlineTextMetrics(hdc, sizeof(otm), &otm);
+ if (0 == otmRet) {
+ call_ensure_accessible(lf);
+ otmRet = GetOutlineTextMetrics(hdc, sizeof(otm), &otm);
+ }
+ if (!otmRet || !GetTextFace(hdc, LF_FACESIZE, lf.lfFaceName)) {
+ return info;
+ }
+ lf.lfHeight = -SkToS32(otm.otmEMSquare);
+ designFont.reset(CreateFontIndirect(&lf));
+ SelectObject(hdc, designFont.get());
+ if (!GetOutlineTextMetrics(hdc, sizeof(otm), &otm)) {
+ return info;
+ }
+ glyphCount = calculateGlyphCount(hdc, fLogFont);
+
+ info.reset(new SkAdvancedTypefaceMetrics);
+ tchar_to_skstring(lf.lfFaceName, &info->fFontName);
+
+ SkOTTableOS2_V4::Type fsType;
+ if (sizeof(fsType) == this->getTableData(SkTEndian_SwapBE32(SkOTTableOS2::TAG),
+ offsetof(SkOTTableOS2_V4, fsType),
+ sizeof(fsType),
+ &fsType)) {
+ SkOTUtils::SetAdvancedTypefaceFlags(fsType, info.get());
+ } else {
+ // If bit 1 is set, the font may not be embedded in a document.
+ // If bit 1 is clear, the font can be embedded.
+ // If bit 2 is set, the embedding is read-only.
+ if (otm.otmfsType & 0x1) {
+ info->fFlags |= SkAdvancedTypefaceMetrics::kNotEmbeddable_FontFlag;
+ }
+ }
+
+ if (glyphCount == 0 || (otm.otmTextMetrics.tmPitchAndFamily & TMPF_TRUETYPE) == 0) {
+ return info;
+ }
+ info->fType = SkAdvancedTypefaceMetrics::kTrueType_Font;
+
+ // If this bit is clear the font is a fixed pitch font.
+ if (!(otm.otmTextMetrics.tmPitchAndFamily & TMPF_FIXED_PITCH)) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kFixedPitch_Style;
+ }
+ if (otm.otmTextMetrics.tmItalic) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kItalic_Style;
+ }
+ if (otm.otmTextMetrics.tmPitchAndFamily & FF_ROMAN) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kSerif_Style;
+ } else if (otm.otmTextMetrics.tmPitchAndFamily & FF_SCRIPT) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kScript_Style;
+ }
+
+ // The main italic angle of the font, in tenths of a degree counterclockwise
+ // from vertical.
+ info->fItalicAngle = otm.otmItalicAngle / 10;
+ info->fAscent = SkToS16(otm.otmTextMetrics.tmAscent);
+ info->fDescent = SkToS16(-otm.otmTextMetrics.tmDescent);
+ // TODO(ctguil): Use alternate cap height calculation.
+ // MSDN says otmsCapEmHeight is not support but it is returning a value on
+ // my Win7 box.
+ info->fCapHeight = otm.otmsCapEmHeight;
+ info->fBBox =
+ SkIRect::MakeLTRB(otm.otmrcFontBox.left, otm.otmrcFontBox.top,
+ otm.otmrcFontBox.right, otm.otmrcFontBox.bottom);
+
+ // Figure out a good guess for StemV - Min width of i, I, !, 1.
+ // This probably isn't very good with an italic font.
+ min_width = SHRT_MAX;
+ info->fStemV = 0;
+ for (size_t i = 0; i < std::size(stem_chars); i++) {
+ ABC abcWidths;
+ if (GetCharABCWidths(hdc, stem_chars[i], stem_chars[i], &abcWidths)) {
+ int16_t width = abcWidths.abcB;
+ if (width > 0 && width < min_width) {
+ min_width = width;
+ info->fStemV = min_width;
+ }
+ }
+ }
+
+ return info;
+}
+
+//Placeholder representation of a Base64 encoded GUID from create_unique_font_name.
+#define BASE64_GUID_ID "XXXXXXXXXXXXXXXXXXXXXXXX"
+//Length of GUID representation from create_id, including nullptr terminator.
+#define BASE64_GUID_ID_LEN std::size(BASE64_GUID_ID)
+
+static_assert(BASE64_GUID_ID_LEN < LF_FACESIZE, "GUID_longer_than_facesize");
+
+/**
+ NameID 6 Postscript names cannot have the character '/'.
+ It would be easier to hex encode the GUID, but that is 32 bytes,
+ and many systems have issues with names longer than 28 bytes.
+ The following need not be any standard base64 encoding.
+ The encoded value is never decoded.
+*/
+static const char postscript_safe_base64_encode[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz"
+ "0123456789-_=";
+
+/**
+ Formats a GUID into Base64 and places it into buffer.
+ buffer should have space for at least BASE64_GUID_ID_LEN characters.
+ The string will always be null terminated.
+ XXXXXXXXXXXXXXXXXXXXXXXX0
+ */
+static void format_guid_b64(const GUID& guid, char* buffer, size_t bufferSize) {
+ SkASSERT(bufferSize >= BASE64_GUID_ID_LEN);
+ size_t written = SkBase64::Encode(&guid, sizeof(guid), buffer, postscript_safe_base64_encode);
+ SkASSERT(written < LF_FACESIZE);
+ buffer[written] = '\0';
+}
+
+/**
+ Creates a Base64 encoded GUID and places it into buffer.
+ buffer should have space for at least BASE64_GUID_ID_LEN characters.
+ The string will always be null terminated.
+ XXXXXXXXXXXXXXXXXXXXXXXX0
+ */
+static HRESULT create_unique_font_name(char* buffer, size_t bufferSize) {
+ GUID guid = {};
+ if (FAILED(CoCreateGuid(&guid))) {
+ return E_UNEXPECTED;
+ }
+ format_guid_b64(guid, buffer, bufferSize);
+
+ return S_OK;
+}
+
+/**
+ Introduces a font to GDI. On failure will return nullptr. The returned handle
+ should eventually be passed to RemoveFontMemResourceEx.
+*/
+static HANDLE activate_font(SkData* fontData) {
+ DWORD numFonts = 0;
+ //AddFontMemResourceEx just copies the data, but does not specify const.
+ HANDLE fontHandle = AddFontMemResourceEx(const_cast<void*>(fontData->data()),
+ static_cast<DWORD>(fontData->size()),
+ nullptr,
+ &numFonts);
+
+ if (fontHandle != nullptr && numFonts < 1) {
+ RemoveFontMemResourceEx(fontHandle);
+ return nullptr;
+ }
+
+ return fontHandle;
+}
+
+// Does not affect ownership of stream.
+static sk_sp<SkTypeface> create_from_stream(std::unique_ptr<SkStreamAsset> stream) {
+ // Create a unique and unpredictable font name.
+ // Avoids collisions and access from CSS.
+ char familyName[BASE64_GUID_ID_LEN];
+ const int familyNameSize = std::size(familyName);
+ if (FAILED(create_unique_font_name(familyName, familyNameSize))) {
+ return nullptr;
+ }
+
+ // Change the name of the font.
+ sk_sp<SkData> rewrittenFontData(SkOTUtils::RenameFont(stream.get(), familyName, familyNameSize-1));
+ if (nullptr == rewrittenFontData.get()) {
+ return nullptr;
+ }
+
+ // Register the font with GDI.
+ HANDLE fontReference = activate_font(rewrittenFontData.get());
+ if (nullptr == fontReference) {
+ return nullptr;
+ }
+
+ // Create the typeface.
+ LOGFONT lf;
+ logfont_for_name(familyName, &lf);
+
+ return sk_sp<SkTypeface>(SkCreateFontMemResourceTypefaceFromLOGFONT(lf, fontReference));
+}
+
+std::unique_ptr<SkStreamAsset> LogFontTypeface::onOpenStream(int* ttcIndex) const {
+ *ttcIndex = 0;
+
+ const DWORD kTTCTag = SkEndian_SwapBE32(SkSetFourByteTag('t', 't', 'c', 'f'));
+ LOGFONT lf = fLogFont;
+
+ SkAutoHDC hdc(lf);
+
+ std::unique_ptr<SkStreamAsset> stream;
+ DWORD tables[2] = {kTTCTag, 0};
+ for (size_t i = 0; i < std::size(tables); i++) {
+ DWORD bufferSize = GetFontData(hdc, tables[i], 0, nullptr, 0);
+ if (bufferSize == GDI_ERROR) {
+ call_ensure_accessible(lf);
+ bufferSize = GetFontData(hdc, tables[i], 0, nullptr, 0);
+ }
+ if (bufferSize != GDI_ERROR) {
+ stream.reset(new SkMemoryStream(bufferSize));
+ if (GetFontData(hdc, tables[i], 0, (void*)stream->getMemoryBase(), bufferSize)) {
+ break;
+ } else {
+ stream.reset();
+ }
+ }
+ }
+ return stream;
+}
+
+sk_sp<SkTypeface> LogFontTypeface::onMakeClone(const SkFontArguments& args) const {
+ return sk_ref_sp(this);
+}
+
+static void bmpCharsToGlyphs(HDC hdc, const WCHAR* bmpChars, int count, uint16_t* glyphs,
+ bool Ox1FHack)
+{
+ // Type1 fonts fail with uniscribe API. Use GetGlyphIndices for plane 0.
+
+ /** Real documentation for GetGlyphIndicesW:
+ *
+ * When GGI_MARK_NONEXISTING_GLYPHS is not specified and a character does not map to a
+ * glyph, then the 'default character's glyph is returned instead. The 'default character'
+ * is available in fTM.tmDefaultChar. FON fonts have a default character, and there exists
+ * a usDefaultChar in the 'OS/2' table, version 2 and later. If there is no
+ * 'default character' specified by the font, then often the first character found is used.
+ *
+ * When GGI_MARK_NONEXISTING_GLYPHS is specified and a character does not map to a glyph,
+ * then the glyph 0xFFFF is used. In Windows XP and earlier, Bitmap/Vector FON usually use
+ * glyph 0x1F instead ('Terminal' appears to be special, returning 0xFFFF).
+ * Type1 PFM/PFB, TT, OT TT, OT CFF all appear to use 0xFFFF, even on XP.
+ */
+ DWORD result = GetGlyphIndicesW(hdc, bmpChars, count, glyphs, GGI_MARK_NONEXISTING_GLYPHS);
+ if (GDI_ERROR == result) {
+ for (int i = 0; i < count; ++i) {
+ glyphs[i] = 0;
+ }
+ return;
+ }
+
+ if (Ox1FHack) {
+ for (int i = 0; i < count; ++i) {
+ if (0xFFFF == glyphs[i] || 0x1F == glyphs[i]) {
+ glyphs[i] = 0;
+ }
+ }
+ } else {
+ for (int i = 0; i < count; ++i) {
+ if (0xFFFF == glyphs[i]){
+ glyphs[i] = 0;
+ }
+ }
+ }
+}
+
+static uint16_t nonBmpCharToGlyph(HDC hdc, SCRIPT_CACHE* scriptCache, const WCHAR utf16[2]) {
+ uint16_t index = 0;
+ // Use uniscribe to detemine glyph index for non-BMP characters.
+ static const int numWCHAR = 2;
+ static const int maxItems = 2;
+ // MSDN states that this can be nullptr, but some things don't work then.
+ SCRIPT_CONTROL scriptControl;
+ memset(&scriptControl, 0, sizeof(scriptControl));
+ // Add extra item to SCRIPT_ITEM to work around a bug (now documented).
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=366643
+ SCRIPT_ITEM si[maxItems + 1];
+ int numItems;
+ HRZM(ScriptItemize(utf16, numWCHAR, maxItems, &scriptControl, nullptr, si, &numItems),
+ "Could not itemize character.");
+
+ // Sometimes ScriptShape cannot find a glyph for a non-BMP and returns 2 space glyphs.
+ static const int maxGlyphs = 2;
+ SCRIPT_VISATTR vsa[maxGlyphs];
+ WORD outGlyphs[maxGlyphs];
+ WORD logClust[numWCHAR];
+ int numGlyphs;
+ SCRIPT_ANALYSIS& script = si[0].a;
+ script.eScript = SCRIPT_UNDEFINED;
+ script.fRTL = FALSE;
+ script.fLayoutRTL = FALSE;
+ script.fLinkBefore = FALSE;
+ script.fLinkAfter = FALSE;
+ script.fLogicalOrder = FALSE;
+ script.fNoGlyphIndex = FALSE;
+ script.s.uBidiLevel = 0;
+ script.s.fOverrideDirection = 0;
+ script.s.fInhibitSymSwap = TRUE;
+ script.s.fCharShape = FALSE;
+ script.s.fDigitSubstitute = FALSE;
+ script.s.fInhibitLigate = FALSE;
+ script.s.fDisplayZWG = TRUE;
+ script.s.fArabicNumContext = FALSE;
+ script.s.fGcpClusters = FALSE;
+ script.s.fReserved = 0;
+ script.s.fEngineReserved = 0;
+ // For the future, 0x80040200 from here is USP_E_SCRIPT_NOT_IN_FONT
+ HRZM(ScriptShape(hdc, scriptCache, utf16, numWCHAR, maxGlyphs, &script,
+ outGlyphs, logClust, vsa, &numGlyphs),
+ "Could not shape character.");
+ if (1 == numGlyphs) {
+ index = outGlyphs[0];
+ }
+ return index;
+}
+
+void LogFontTypeface::onCharsToGlyphs(const SkUnichar* uni, int glyphCount,
+ SkGlyphID glyphs[]) const
+{
+ SkAutoHDC hdc(fLogFont);
+
+ TEXTMETRIC tm;
+ if (0 == GetTextMetrics(hdc, &tm)) {
+ call_ensure_accessible(fLogFont);
+ if (0 == GetTextMetrics(hdc, &tm)) {
+ tm.tmPitchAndFamily = TMPF_TRUETYPE;
+ }
+ }
+ bool Ox1FHack = !(tm.tmPitchAndFamily & TMPF_VECTOR) /*&& winVer < Vista */;
+
+ SCRIPT_CACHE sc = nullptr;
+ static const int scratchCount = 256;
+ WCHAR scratch[scratchCount];
+ int glyphIndex = 0;
+ const uint32_t* utf32 = reinterpret_cast<const uint32_t*>(uni);
+ while (glyphIndex < glyphCount) {
+ // Try a run of bmp.
+ int glyphsLeft = std::min(glyphCount - glyphIndex, scratchCount);
+ int runLength = 0;
+ while (runLength < glyphsLeft && utf32[glyphIndex + runLength] <= 0xFFFF) {
+ scratch[runLength] = static_cast<WCHAR>(utf32[glyphIndex + runLength]);
+ ++runLength;
+ }
+ if (runLength) {
+ bmpCharsToGlyphs(hdc, scratch, runLength, &glyphs[glyphIndex], Ox1FHack);
+ glyphIndex += runLength;
+ }
+
+ // Try a run of non-bmp.
+ while (glyphIndex < glyphCount && utf32[glyphIndex] > 0xFFFF) {
+ SkUTF::ToUTF16(utf32[glyphIndex], reinterpret_cast<uint16_t*>(scratch));
+ glyphs[glyphIndex] = nonBmpCharToGlyph(hdc, &sc, scratch);
+ ++glyphIndex;
+ }
+ }
+
+ if (sc) {
+ ::ScriptFreeCache(&sc);
+ }
+}
+
+int LogFontTypeface::onCountGlyphs() const {
+ SkAutoHDC hdc(fLogFont);
+ return calculateGlyphCount(hdc, fLogFont);
+}
+
+void LogFontTypeface::getPostScriptGlyphNames(SkString*) const {}
+
+int LogFontTypeface::onGetUPEM() const {
+ SkAutoHDC hdc(fLogFont);
+ return calculateUPEM(hdc, fLogFont);
+}
+
+SkTypeface::LocalizedStrings* LogFontTypeface::onCreateFamilyNameIterator() const {
+ sk_sp<SkTypeface::LocalizedStrings> nameIter =
+ SkOTUtils::LocalizedStrings_NameTable::MakeForFamilyNames(*this);
+ if (!nameIter) {
+ SkString familyName;
+ this->getFamilyName(&familyName);
+ SkString language("und"); //undetermined
+ nameIter = sk_make_sp<SkOTUtils::LocalizedStrings_SingleName>(familyName, language);
+ }
+ return nameIter.release();
+}
+
+int LogFontTypeface::onGetTableTags(SkFontTableTag tags[]) const {
+ SkSFNTHeader header;
+ if (sizeof(header) != this->onGetTableData(0, 0, sizeof(header), &header)) {
+ return 0;
+ }
+
+ int numTables = SkEndian_SwapBE16(header.numTables);
+
+ if (tags) {
+ size_t size = numTables * sizeof(SkSFNTHeader::TableDirectoryEntry);
+ AutoSTMalloc<0x20, SkSFNTHeader::TableDirectoryEntry> dir(numTables);
+ if (size != this->onGetTableData(0, sizeof(header), size, dir.get())) {
+ return 0;
+ }
+
+ for (int i = 0; i < numTables; ++i) {
+ tags[i] = SkEndian_SwapBE32(dir[i].tag);
+ }
+ }
+ return numTables;
+}
+
+size_t LogFontTypeface::onGetTableData(SkFontTableTag tag, size_t offset,
+ size_t length, void* data) const
+{
+ LOGFONT lf = fLogFont;
+ SkAutoHDC hdc(lf);
+
+ tag = SkEndian_SwapBE32(tag);
+ if (nullptr == data) {
+ length = 0;
+ }
+ DWORD bufferSize = GetFontData(hdc, tag, (DWORD) offset, data, (DWORD) length);
+ if (bufferSize == GDI_ERROR) {
+ call_ensure_accessible(lf);
+ bufferSize = GetFontData(hdc, tag, (DWORD) offset, data, (DWORD) length);
+ }
+ return bufferSize == GDI_ERROR ? 0 : bufferSize;
+}
+
+sk_sp<SkData> LogFontTypeface::onCopyTableData(SkFontTableTag tag) const {
+ LOGFONT lf = fLogFont;
+ SkAutoHDC hdc(lf);
+
+ tag = SkEndian_SwapBE32(tag);
+ DWORD size = GetFontData(hdc, tag, 0, nullptr, 0);
+ if (size == GDI_ERROR) {
+ call_ensure_accessible(lf);
+ size = GetFontData(hdc, tag, 0, nullptr, 0);
+ }
+
+ sk_sp<SkData> data;
+ if (size != GDI_ERROR) {
+ data = SkData::MakeUninitialized(size);
+ if (GetFontData(hdc, tag, 0, data->writable_data(), size) == GDI_ERROR) {
+ data.reset();
+ }
+ }
+ return data;
+}
+
+std::unique_ptr<SkScalerContext> LogFontTypeface::onCreateScalerContext(
+ const SkScalerContextEffects& effects, const SkDescriptor* desc) const
+{
+ auto ctx = std::make_unique<SkScalerContext_GDI>(
+ sk_ref_sp(const_cast<LogFontTypeface*>(this)), effects, desc);
+ if (ctx->isValid()) {
+ return std::move(ctx);
+ }
+
+ ctx.reset();
+ SkStrikeCache::PurgeAll();
+ ctx = std::make_unique<SkScalerContext_GDI>(
+ sk_ref_sp(const_cast<LogFontTypeface*>(this)), effects, desc);
+ if (ctx->isValid()) {
+ return std::move(ctx);
+ }
+
+ return SkScalerContext::MakeEmpty(
+ sk_ref_sp(const_cast<LogFontTypeface*>(this)), effects, desc);
+}
+
+void LogFontTypeface::onFilterRec(SkScalerContextRec* rec) const {
+ if (rec->fFlags & SkScalerContext::kLCD_BGROrder_Flag ||
+ rec->fFlags & SkScalerContext::kLCD_Vertical_Flag)
+ {
+ rec->fMaskFormat = SkMask::kA8_Format;
+ rec->fFlags |= SkScalerContext::kGenA8FromLCD_Flag;
+ }
+
+ unsigned flagsWeDontSupport = SkScalerContext::kForceAutohinting_Flag |
+ SkScalerContext::kEmbeddedBitmapText_Flag |
+ SkScalerContext::kEmbolden_Flag |
+ SkScalerContext::kLCD_BGROrder_Flag |
+ SkScalerContext::kLCD_Vertical_Flag;
+ rec->fFlags &= ~flagsWeDontSupport;
+
+ SkFontHinting h = rec->getHinting();
+ switch (h) {
+ case SkFontHinting::kNone:
+ break;
+ case SkFontHinting::kSlight:
+ // Only do slight hinting when axis aligned.
+ // TODO: re-enable slight hinting when FontHostTest can pass.
+ //if (!isAxisAligned(*rec)) {
+ h = SkFontHinting::kNone;
+ //}
+ break;
+ case SkFontHinting::kNormal:
+ case SkFontHinting::kFull:
+ // TODO: need to be able to distinguish subpixel positioned glyphs
+ // and linear metrics.
+ //rec->fFlags &= ~SkScalerContext::kSubpixelPositioning_Flag;
+ h = SkFontHinting::kNormal;
+ break;
+ default:
+ SkDEBUGFAIL("unknown hinting");
+ }
+ //TODO: if this is a bitmap font, squash hinting and subpixel.
+ rec->setHinting(h);
+
+// turn this off since GDI might turn A8 into BW! Need a bigger fix.
+#if 0
+ // Disable LCD when rotated, since GDI's output is ugly
+ if (isLCD(*rec) && !isAxisAligned(*rec)) {
+ rec->fMaskFormat = SkMask::kA8_Format;
+ }
+#endif
+
+ if (!fCanBeLCD && isLCD(*rec)) {
+ rec->fMaskFormat = SkMask::kA8_Format;
+ rec->fFlags &= ~SkScalerContext::kGenA8FromLCD_Flag;
+ } else if (rec->fMaskFormat == SkMask::kA8_Format) {
+ // Bug 1277404
+ // If we have non LCD GDI text, render the fonts as cleartype and convert them
+ // to grayscale. This seems to be what Chrome and IE are doing on Windows 7.
+ // This also applies if cleartype is disabled system wide.
+ rec->fFlags |= SkScalerContext::kGenA8FromLCD_Flag;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "include/core/SkDataTable.h"
+#include "include/core/SkFontMgr.h"
+
+static bool valid_logfont_for_enum(const LOGFONT& lf) {
+ // TODO: Vector FON is unsupported and should not be listed.
+ return
+ // Ignore implicit vertical variants.
+ lf.lfFaceName[0] && lf.lfFaceName[0] != '@'
+
+ // DEFAULT_CHARSET is used to get all fonts, but also implies all
+ // character sets. Filter assuming all fonts support ANSI_CHARSET.
+ && ANSI_CHARSET == lf.lfCharSet
+ ;
+}
+
+/** An EnumFontFamExProc implementation which interprets builderParam as
+ * an SkTDArray<ENUMLOGFONTEX>* and appends logfonts which
+ * pass the valid_logfont_for_enum predicate.
+ */
+static int CALLBACK enum_family_proc(const LOGFONT* lf, const TEXTMETRIC*,
+ DWORD fontType, LPARAM builderParam) {
+ if (valid_logfont_for_enum(*lf)) {
+ SkTDArray<ENUMLOGFONTEX>* array = (SkTDArray<ENUMLOGFONTEX>*)builderParam;
+ *array->append() = *(ENUMLOGFONTEX*)lf;
+ }
+ return 1; // non-zero means continue
+}
+
+class SkFontStyleSetGDI : public SkFontStyleSet {
+public:
+ SkFontStyleSetGDI(const TCHAR familyName[]) {
+ LOGFONT lf;
+ sk_bzero(&lf, sizeof(lf));
+ lf.lfCharSet = DEFAULT_CHARSET;
+ _tcscpy_s(lf.lfFaceName, familyName);
+
+ HDC hdc = ::CreateCompatibleDC(nullptr);
+ ::EnumFontFamiliesEx(hdc, &lf, enum_family_proc, (LPARAM)&fArray, 0);
+ ::DeleteDC(hdc);
+ }
+
+ int count() override {
+ return fArray.size();
+ }
+
+ void getStyle(int index, SkFontStyle* fs, SkString* styleName) override {
+ if (fs) {
+ *fs = get_style(fArray[index].elfLogFont);
+ }
+ if (styleName) {
+ const ENUMLOGFONTEX& ref = fArray[index];
+ // For some reason, ENUMLOGFONTEX and LOGFONT disagree on their type in the
+ // non-unicode version.
+ // ENUMLOGFONTEX uses BYTE
+ // LOGFONT uses CHAR
+ // Here we assert they that the style name is logically the same (size) as
+ // a TCHAR, so we can use the same converter function.
+ SkASSERT(sizeof(TCHAR) == sizeof(ref.elfStyle[0]));
+ tchar_to_skstring((const TCHAR*)ref.elfStyle, styleName);
+ }
+ }
+
+ SkTypeface* createTypeface(int index) override {
+ return SkCreateTypefaceFromLOGFONT(fArray[index].elfLogFont);
+ }
+
+ SkTypeface* matchStyle(const SkFontStyle& pattern) override {
+ return this->matchStyleCSS3(pattern);
+ }
+
+private:
+ SkTDArray<ENUMLOGFONTEX> fArray;
+};
+
+class SkFontMgrGDI : public SkFontMgr {
+public:
+ SkFontMgrGDI() {
+ LOGFONT lf;
+ sk_bzero(&lf, sizeof(lf));
+ lf.lfCharSet = DEFAULT_CHARSET;
+
+ HDC hdc = ::CreateCompatibleDC(nullptr);
+ ::EnumFontFamiliesEx(hdc, &lf, enum_family_proc, (LPARAM)&fLogFontArray, 0);
+ ::DeleteDC(hdc);
+ }
+
+protected:
+ int onCountFamilies() const override {
+ return fLogFontArray.size();
+ }
+
+ void onGetFamilyName(int index, SkString* familyName) const override {
+ SkASSERT(index < fLogFontArray.size());
+ tchar_to_skstring(fLogFontArray[index].elfLogFont.lfFaceName, familyName);
+ }
+
+ SkFontStyleSet* onCreateStyleSet(int index) const override {
+ SkASSERT(index < fLogFontArray.size());
+ return new SkFontStyleSetGDI(fLogFontArray[index].elfLogFont.lfFaceName);
+ }
+
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override {
+ if (nullptr == familyName) {
+ familyName = ""; // do we need this check???
+ }
+ LOGFONT lf;
+ logfont_for_name(familyName, &lf);
+ return new SkFontStyleSetGDI(lf.lfFaceName);
+ }
+
+ virtual SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& fontstyle) const override {
+ // could be in base impl
+ sk_sp<SkFontStyleSet> sset(this->matchFamily(familyName));
+ return sset->matchStyle(fontstyle);
+ }
+
+ virtual SkTypeface* onMatchFamilyStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const override {
+ return nullptr;
+ }
+
+ sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset> stream,
+ int ttcIndex) const override {
+ if (ttcIndex != 0) {
+ return nullptr;
+ }
+ return create_from_stream(std::move(stream));
+ }
+
+ sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments& args) const override {
+ return this->makeFromStream(std::move(stream), args.getCollectionIndex());
+ }
+
+ sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData> data, int ttcIndex) const override {
+ // could be in base impl
+ return this->makeFromStream(std::unique_ptr<SkStreamAsset>(new SkMemoryStream(std::move(data))),
+ ttcIndex);
+ }
+
+ sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const override {
+ // could be in base impl
+ auto stream = SkStream::MakeFromFile(path);
+ return stream ? this->makeFromStream(std::move(stream), ttcIndex) : nullptr;
+ }
+
+ sk_sp<SkTypeface> onLegacyMakeTypeface(const char familyName[], SkFontStyle style) const override {
+ LOGFONT lf;
+ if (nullptr == familyName) {
+ lf = get_default_font();
+ } else {
+ logfont_for_name(familyName, &lf);
+ }
+
+ lf.lfWeight = style.weight();
+ lf.lfItalic = style.slant() == SkFontStyle::kUpright_Slant ? FALSE : TRUE;
+ return sk_sp<SkTypeface>(SkCreateTypefaceFromLOGFONT(lf));
+ }
+
+private:
+ SkTDArray<ENUMLOGFONTEX> fLogFontArray;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkFontMgr> SkFontMgr_New_GDI() { return sk_make_sp<SkFontMgrGDI>(); }
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface.cpp b/gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface.cpp
new file mode 100644
index 0000000000..89685e8aee
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface.cpp
@@ -0,0 +1,276 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypeface.h"
+#include "include/ports/SkFontConfigInterface.h"
+#include "include/ports/SkFontMgr_FontConfigInterface.h"
+#include "include/private/base/SkMutex.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkResourceCache.h"
+#include "src/core/SkTypefaceCache.h"
+#include "src/ports/SkFontConfigTypeface.h"
+#include <new>
+
+using namespace skia_private;
+
+std::unique_ptr<SkStreamAsset> SkTypeface_FCI::onOpenStream(int* ttcIndex) const {
+ *ttcIndex = this->getIdentity().fTTCIndex;
+ return std::unique_ptr<SkStreamAsset>(fFCI->openStream(this->getIdentity()));
+}
+
+std::unique_ptr<SkFontData> SkTypeface_FCI::onMakeFontData() const {
+ const SkFontConfigInterface::FontIdentity& id = this->getIdentity();
+ return std::make_unique<SkFontData>(std::unique_ptr<SkStreamAsset>(fFCI->openStream(id)),
+ id.fTTCIndex, 0, nullptr, 0, nullptr, 0);
+}
+
+void SkTypeface_FCI::onGetFontDescriptor(SkFontDescriptor* desc, bool* serialize) const {
+ SkString name;
+ this->getFamilyName(&name);
+ desc->setFamilyName(name.c_str());
+ desc->setStyle(this->fontStyle());
+ desc->setFactoryId(SkTypeface_FreeType::FactoryId);
+ *serialize = true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkFontStyleSet_FCI : public SkFontStyleSet {
+public:
+ SkFontStyleSet_FCI() {}
+
+ int count() override { return 0; }
+ void getStyle(int index, SkFontStyle*, SkString* style) override { SkASSERT(false); }
+ SkTypeface* createTypeface(int index) override { SkASSERT(false); return nullptr; }
+ SkTypeface* matchStyle(const SkFontStyle& pattern) override { return nullptr; }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkFontRequestCache {
+public:
+ struct Request : public SkResourceCache::Key {
+ private:
+ Request(const char* name, size_t nameLen, const SkFontStyle& style) : fStyle(style) {
+ /** Pointer to just after the last field of this class. */
+ char* content = const_cast<char*>(SkTAfter<const char>(&this->fStyle));
+
+ // No holes.
+ SkASSERT(SkTAddOffset<char>(this, sizeof(SkResourceCache::Key) + keySize) == content);
+
+ // Has a size divisible by size of uint32_t.
+ SkASSERT((content - reinterpret_cast<char*>(this)) % sizeof(uint32_t) == 0);
+
+ size_t contentLen = SkAlign4(nameLen);
+ sk_careful_memcpy(content, name, nameLen);
+ sk_bzero(content + nameLen, contentLen - nameLen);
+ this->init(nullptr, 0, keySize + contentLen);
+ }
+ const SkFontStyle fStyle;
+ /** The sum of the sizes of the fields of this class. */
+ static const size_t keySize = sizeof(fStyle);
+
+ public:
+ static Request* Create(const char* name, const SkFontStyle& style) {
+ size_t nameLen = name ? strlen(name) : 0;
+ size_t contentLen = SkAlign4(nameLen);
+ char* storage = new char[sizeof(Request) + contentLen];
+ return new (storage) Request(name, nameLen, style);
+ }
+ void operator delete(void* storage) {
+ delete[] reinterpret_cast<char*>(storage);
+ }
+ };
+
+
+private:
+ struct Result : public SkResourceCache::Rec {
+ Result(Request* request, sk_sp<SkTypeface> typeface)
+ : fRequest(request), fFace(std::move(typeface)) {}
+ Result(Result&&) = default;
+ Result& operator=(Result&&) = default;
+
+ const Key& getKey() const override { return *fRequest; }
+ size_t bytesUsed() const override { return fRequest->size() + sizeof(fFace); }
+ const char* getCategory() const override { return "request_cache"; }
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override { return nullptr; }
+
+ std::unique_ptr<Request> fRequest;
+ sk_sp<SkTypeface> fFace;
+ };
+
+ SkResourceCache fCachedResults;
+
+public:
+ SkFontRequestCache(size_t maxSize) : fCachedResults(maxSize) {}
+
+ /** Takes ownership of request. It will be deleted when no longer needed. */
+ void add(sk_sp<SkTypeface> face, Request* request) {
+ fCachedResults.add(new Result(request, std::move(face)));
+ }
+ /** Does not take ownership of request. */
+ sk_sp<SkTypeface> findAndRef(Request* request) {
+ sk_sp<SkTypeface> face;
+ fCachedResults.find(*request, [](const SkResourceCache::Rec& rec, void* context) -> bool {
+ const Result& result = static_cast<const Result&>(rec);
+ sk_sp<SkTypeface>* face = static_cast<sk_sp<SkTypeface>*>(context);
+
+ *face = result.fFace;
+ return true;
+ }, &face);
+ return face;
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool find_by_FontIdentity(SkTypeface* cachedTypeface, void* ctx) {
+ typedef SkFontConfigInterface::FontIdentity FontIdentity;
+ SkTypeface_FCI* cachedFCTypeface = static_cast<SkTypeface_FCI*>(cachedTypeface);
+ FontIdentity* identity = static_cast<FontIdentity*>(ctx);
+
+ return cachedFCTypeface->getIdentity() == *identity;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SkFontMgr_FCI : public SkFontMgr {
+ sk_sp<SkFontConfigInterface> fFCI;
+ SkTypeface_FreeType::Scanner fScanner;
+
+ mutable SkMutex fMutex;
+ mutable SkTypefaceCache fTFCache;
+
+ // The value of maxSize here is a compromise between cache hits and cache size.
+ // See https://crbug.com/424082#63 for reason for current size.
+ static const size_t kMaxSize = 1 << 15;
+ mutable SkFontRequestCache fCache;
+
+public:
+ SkFontMgr_FCI(sk_sp<SkFontConfigInterface> fci)
+ : fFCI(std::move(fci))
+ , fCache(kMaxSize)
+ {}
+
+protected:
+ int onCountFamilies() const override {
+ SK_ABORT("Not implemented.");
+ }
+
+ void onGetFamilyName(int index, SkString* familyName) const override {
+ SK_ABORT("Not implemented.");
+ }
+
+ SkFontStyleSet* onCreateStyleSet(int index) const override {
+ SK_ABORT("Not implemented.");
+ }
+
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override {
+ SK_ABORT("Not implemented.");
+ }
+
+ SkTypeface* onMatchFamilyStyle(const char requestedFamilyName[],
+ const SkFontStyle& requestedStyle) const override
+ {
+ SkAutoMutexExclusive ama(fMutex);
+
+ SkFontConfigInterface::FontIdentity identity;
+ SkString outFamilyName;
+ SkFontStyle outStyle;
+ if (!fFCI->matchFamilyName(requestedFamilyName, requestedStyle,
+ &identity, &outFamilyName, &outStyle))
+ {
+ return nullptr;
+ }
+
+ // Check if a typeface with this FontIdentity is already in the FontIdentity cache.
+ sk_sp<SkTypeface> face = fTFCache.findByProcAndRef(find_by_FontIdentity, &identity);
+ if (!face) {
+ face.reset(SkTypeface_FCI::Create(fFCI, identity, std::move(outFamilyName), outStyle));
+ // Add this FontIdentity to the FontIdentity cache.
+ fTFCache.add(face);
+ }
+ return face.release();
+ }
+
+ SkTypeface* onMatchFamilyStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const override {
+ SK_ABORT("Not implemented.");
+ }
+
+ sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData> data, int ttcIndex) const override {
+ return this->onMakeFromStreamIndex(SkMemoryStream::Make(std::move(data)), ttcIndex);
+ }
+
+ sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset> stream,
+ int ttcIndex) const override {
+ return this->makeFromStream(std::move(stream),
+ SkFontArguments().setCollectionIndex(ttcIndex));
+ }
+
+ sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments& args) const override {
+ const size_t length = stream->getLength();
+ if (!length) {
+ return nullptr;
+ }
+ if (length >= 1024 * 1024 * 1024) {
+ return nullptr; // don't accept too large fonts (>= 1GB) for safety.
+ }
+
+ return SkTypeface_FreeType::MakeFromStream(std::move(stream), args);
+ }
+
+ sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const override {
+ std::unique_ptr<SkStreamAsset> stream = SkStream::MakeFromFile(path);
+ return stream ? this->makeFromStream(std::move(stream), ttcIndex) : nullptr;
+ }
+
+ sk_sp<SkTypeface> onLegacyMakeTypeface(const char requestedFamilyName[],
+ SkFontStyle requestedStyle) const override
+ {
+ SkAutoMutexExclusive ama(fMutex);
+
+ // Check if this request is already in the request cache.
+ using Request = SkFontRequestCache::Request;
+ std::unique_ptr<Request> request(Request::Create(requestedFamilyName, requestedStyle));
+ sk_sp<SkTypeface> face = fCache.findAndRef(request.get());
+ if (face) {
+ return sk_sp<SkTypeface>(face);
+ }
+
+ SkFontConfigInterface::FontIdentity identity;
+ SkString outFamilyName;
+ SkFontStyle outStyle;
+ if (!fFCI->matchFamilyName(requestedFamilyName, requestedStyle,
+ &identity, &outFamilyName, &outStyle))
+ {
+ return nullptr;
+ }
+
+ // Check if a typeface with this FontIdentity is already in the FontIdentity cache.
+ face = fTFCache.findByProcAndRef(find_by_FontIdentity, &identity);
+ if (!face) {
+ face.reset(SkTypeface_FCI::Create(fFCI, identity, std::move(outFamilyName), outStyle));
+ // Add this FontIdentity to the FontIdentity cache.
+ fTFCache.add(face);
+ }
+ // Add this request to the request cache.
+ fCache.add(face, request.release());
+
+ return face;
+ }
+};
+
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_FCI(sk_sp<SkFontConfigInterface> fci) {
+ SkASSERT(fci);
+ return sk_make_sp<SkFontMgr_FCI>(std::move(fci));
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface_factory.cpp
new file mode 100644
index 0000000000..cb64ec1ed4
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_FontConfigInterface_factory.cpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2008 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMgr.h"
+#include "include/ports/SkFontConfigInterface.h"
+#include "include/ports/SkFontMgr_FontConfigInterface.h"
+
+sk_sp<SkFontMgr> SkFontMgr::Factory() {
+ sk_sp<SkFontConfigInterface> fci(SkFontConfigInterface::RefGlobal());
+ if (!fci) {
+ return nullptr;
+ }
+ return SkFontMgr_New_FCI(std::move(fci));
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_android.cpp b/gfx/skia/skia/src/ports/SkFontMgr_android.cpp
new file mode 100644
index 0000000000..58a26e22ef
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_android.cpp
@@ -0,0 +1,508 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+
+#include "include/core/SkData.h"
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/ports/SkFontMgr_android.h"
+#include "include/private/base/SkFixed.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTDArray.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/base/SkTSearch.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkOSFile.h"
+#include "src/core/SkTypefaceCache.h"
+#include "src/ports/SkFontHost_FreeType_common.h"
+#include "src/ports/SkFontMgr_android_parser.h"
+
+#include <algorithm>
+#include <limits>
+
+using namespace skia_private;
+
+class SkData;
+
+class SkTypeface_Android : public SkTypeface_FreeType {
+public:
+ SkTypeface_Android(const SkFontStyle& style,
+ bool isFixedPitch,
+ const SkString& familyName)
+ : INHERITED(style, isFixedPitch)
+ , fFamilyName(familyName)
+ { }
+
+protected:
+ void onGetFamilyName(SkString* familyName) const override {
+ *familyName = fFamilyName;
+ }
+
+ SkString fFamilyName;
+
+private:
+ using INHERITED = SkTypeface_FreeType;
+};
+
+class SkTypeface_AndroidSystem : public SkTypeface_Android {
+public:
+ SkTypeface_AndroidSystem(const SkString& pathName,
+ const bool cacheFontFiles,
+ int index,
+ const SkFixed* axes, int axesCount,
+ const SkFontStyle& style,
+ bool isFixedPitch,
+ const SkString& familyName,
+ const SkTArray<SkLanguage, true>& lang,
+ FontVariant variantStyle)
+ : INHERITED(style, isFixedPitch, familyName)
+ , fPathName(pathName)
+ , fIndex(index)
+ , fAxes(axes, axesCount)
+ , fLang(lang)
+ , fVariantStyle(variantStyle)
+ , fFile(cacheFontFiles ? sk_fopen(fPathName.c_str(), kRead_SkFILE_Flag) : nullptr) {
+ if (cacheFontFiles) {
+ SkASSERT(fFile);
+ }
+ }
+
+ std::unique_ptr<SkStreamAsset> makeStream() const {
+ if (fFile) {
+ sk_sp<SkData> data(SkData::MakeFromFILE(fFile));
+ return data ? std::make_unique<SkMemoryStream>(std::move(data)) : nullptr;
+ }
+ return SkStream::MakeFromFile(fPathName.c_str());
+ }
+
+ void onGetFontDescriptor(SkFontDescriptor* desc, bool* serialize) const override {
+ SkASSERT(desc);
+ SkASSERT(serialize);
+ desc->setFamilyName(fFamilyName.c_str());
+ desc->setStyle(this->fontStyle());
+ desc->setFactoryId(SkTypeface_FreeType::FactoryId);
+ *serialize = false;
+ }
+ std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const override {
+ *ttcIndex = fIndex;
+ return this->makeStream();
+ }
+ std::unique_ptr<SkFontData> onMakeFontData() const override {
+ return std::make_unique<SkFontData>(
+ this->makeStream(), fIndex, 0, fAxes.begin(), fAxes.size(), nullptr, 0);
+ }
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments& args) const override {
+ std::unique_ptr<SkFontData> data = this->cloneFontData(args);
+ if (!data) {
+ return nullptr;
+ }
+ return sk_make_sp<SkTypeface_AndroidSystem>(fPathName,
+ fFile,
+ fIndex,
+ data->getAxis(),
+ data->getAxisCount(),
+ this->fontStyle(),
+ this->isFixedPitch(),
+ fFamilyName,
+ fLang,
+ fVariantStyle);
+ }
+
+ const SkString fPathName;
+ int fIndex;
+ const SkSTArray<4, SkFixed, true> fAxes;
+ const SkSTArray<4, SkLanguage, true> fLang;
+ const FontVariant fVariantStyle;
+ SkAutoTCallVProc<FILE, sk_fclose> fFile;
+
+ using INHERITED = SkTypeface_Android;
+};
+
+class SkFontStyleSet_Android : public SkFontStyleSet {
+ typedef SkTypeface_FreeType::Scanner Scanner;
+
+public:
+ explicit SkFontStyleSet_Android(const FontFamily& family, const Scanner& scanner,
+ const bool cacheFontFiles) {
+ const SkString* cannonicalFamilyName = nullptr;
+ if (family.fNames.size() > 0) {
+ cannonicalFamilyName = &family.fNames[0];
+ }
+ fFallbackFor = family.fFallbackFor;
+
+ // TODO? make this lazy
+ for (int i = 0; i < family.fFonts.size(); ++i) {
+ const FontFileInfo& fontFile = family.fFonts[i];
+
+ SkString pathName(family.fBasePath);
+ pathName.append(fontFile.fFileName);
+
+ std::unique_ptr<SkStreamAsset> stream = SkStream::MakeFromFile(pathName.c_str());
+ if (!stream) {
+ SkDEBUGF("Requested font file %s does not exist or cannot be opened.\n",
+ pathName.c_str());
+ continue;
+ }
+
+ const int ttcIndex = fontFile.fIndex;
+ SkString familyName;
+ SkFontStyle style;
+ bool isFixedWidth;
+ Scanner::AxisDefinitions axisDefinitions;
+ if (!scanner.scanFont(stream.get(), ttcIndex,
+ &familyName, &style, &isFixedWidth, &axisDefinitions))
+ {
+ SkDEBUGF("Requested font file %s exists, but is not a valid font.\n",
+ pathName.c_str());
+ continue;
+ }
+
+ int weight = fontFile.fWeight != 0 ? fontFile.fWeight : style.weight();
+ SkFontStyle::Slant slant = style.slant();
+ switch (fontFile.fStyle) {
+ case FontFileInfo::Style::kAuto: slant = style.slant(); break;
+ case FontFileInfo::Style::kNormal: slant = SkFontStyle::kUpright_Slant; break;
+ case FontFileInfo::Style::kItalic: slant = SkFontStyle::kItalic_Slant; break;
+ default: SkASSERT(false); break;
+ }
+ style = SkFontStyle(weight, style.width(), slant);
+
+ uint32_t variant = family.fVariant;
+ if (kDefault_FontVariant == variant) {
+ variant = kCompact_FontVariant | kElegant_FontVariant;
+ }
+
+ // The first specified family name overrides the family name found in the font.
+ // TODO: SkTypeface_AndroidSystem::onCreateFamilyNameIterator should return
+ // all of the specified family names in addition to the names found in the font.
+ if (cannonicalFamilyName != nullptr) {
+ familyName = *cannonicalFamilyName;
+ }
+
+ AutoSTMalloc<4, SkFixed> axisValues(axisDefinitions.size());
+ SkFontArguments::VariationPosition position = {
+ fontFile.fVariationDesignPosition.begin(),
+ fontFile.fVariationDesignPosition.size()
+ };
+ Scanner::computeAxisValues(axisDefinitions, position,
+ axisValues, familyName);
+
+ fStyles.push_back().reset(new SkTypeface_AndroidSystem(
+ pathName, cacheFontFiles, ttcIndex, axisValues.get(), axisDefinitions.size(),
+ style, isFixedWidth, familyName, family.fLanguages, variant));
+ }
+ }
+
+ int count() override {
+ return fStyles.size();
+ }
+ void getStyle(int index, SkFontStyle* style, SkString* name) override {
+ if (index < 0 || fStyles.size() <= index) {
+ return;
+ }
+ if (style) {
+ *style = fStyles[index]->fontStyle();
+ }
+ if (name) {
+ name->reset();
+ }
+ }
+ SkTypeface_AndroidSystem* createTypeface(int index) override {
+ if (index < 0 || fStyles.size() <= index) {
+ return nullptr;
+ }
+ return SkRef(fStyles[index].get());
+ }
+
+ SkTypeface_AndroidSystem* matchStyle(const SkFontStyle& pattern) override {
+ return static_cast<SkTypeface_AndroidSystem*>(this->matchStyleCSS3(pattern));
+ }
+
+private:
+ SkTArray<sk_sp<SkTypeface_AndroidSystem>> fStyles;
+ SkString fFallbackFor;
+
+ friend struct NameToFamily;
+ friend class SkFontMgr_Android;
+
+ using INHERITED = SkFontStyleSet;
+};
+
+/** On Android a single family can have many names, but our API assumes unique names.
+ * Map names to the back end so that all names for a given family refer to the same
+ * (non-replicated) set of typefaces.
+ * SkTDict<> doesn't let us do index-based lookup, so we write our own mapping.
+ */
+struct NameToFamily {
+ SkString name;
+ SkFontStyleSet_Android* styleSet;
+};
+
+class SkFontMgr_Android : public SkFontMgr {
+public:
+ SkFontMgr_Android(const SkFontMgr_Android_CustomFonts* custom) {
+ SkTDArray<FontFamily*> families;
+ if (custom && SkFontMgr_Android_CustomFonts::kPreferSystem != custom->fSystemFontUse) {
+ SkString base(custom->fBasePath);
+ SkFontMgr_Android_Parser::GetCustomFontFamilies(
+ families, base, custom->fFontsXml, custom->fFallbackFontsXml);
+ }
+ if (!custom ||
+ (custom && SkFontMgr_Android_CustomFonts::kOnlyCustom != custom->fSystemFontUse))
+ {
+ SkFontMgr_Android_Parser::GetSystemFontFamilies(families);
+ }
+ if (custom && SkFontMgr_Android_CustomFonts::kPreferSystem == custom->fSystemFontUse) {
+ SkString base(custom->fBasePath);
+ SkFontMgr_Android_Parser::GetCustomFontFamilies(
+ families, base, custom->fFontsXml, custom->fFallbackFontsXml);
+ }
+ this->buildNameToFamilyMap(families, custom ? custom->fIsolated : false);
+ this->findDefaultStyleSet();
+ for (FontFamily* p : families) {
+ delete p;
+ }
+ families.reset();
+ }
+
+protected:
+ /** Returns not how many families we have, but how many unique names
+ * exist among the families.
+ */
+ int onCountFamilies() const override {
+ return fNameToFamilyMap.size();
+ }
+
+ void onGetFamilyName(int index, SkString* familyName) const override {
+ if (index < 0 || fNameToFamilyMap.size() <= index) {
+ familyName->reset();
+ return;
+ }
+ familyName->set(fNameToFamilyMap[index].name);
+ }
+
+ SkFontStyleSet* onCreateStyleSet(int index) const override {
+ if (index < 0 || fNameToFamilyMap.size() <= index) {
+ return nullptr;
+ }
+ return SkRef(fNameToFamilyMap[index].styleSet);
+ }
+
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override {
+ if (!familyName) {
+ return nullptr;
+ }
+ SkAutoAsciiToLC tolc(familyName);
+ for (int i = 0; i < fNameToFamilyMap.size(); ++i) {
+ if (fNameToFamilyMap[i].name.equals(tolc.lc())) {
+ return SkRef(fNameToFamilyMap[i].styleSet);
+ }
+ }
+ // TODO: eventually we should not need to name fallback families.
+ for (int i = 0; i < fFallbackNameToFamilyMap.size(); ++i) {
+ if (fFallbackNameToFamilyMap[i].name.equals(tolc.lc())) {
+ return SkRef(fFallbackNameToFamilyMap[i].styleSet);
+ }
+ }
+ return nullptr;
+ }
+
+ SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& style) const override {
+ sk_sp<SkFontStyleSet> sset(this->matchFamily(familyName));
+ return sset->matchStyle(style);
+ }
+
+ static sk_sp<SkTypeface_AndroidSystem> find_family_style_character(
+ const SkString& familyName,
+ const SkTArray<NameToFamily, true>& fallbackNameToFamilyMap,
+ const SkFontStyle& style, bool elegant,
+ const SkString& langTag, SkUnichar character)
+ {
+ for (int i = 0; i < fallbackNameToFamilyMap.size(); ++i) {
+ SkFontStyleSet_Android* family = fallbackNameToFamilyMap[i].styleSet;
+ if (familyName != family->fFallbackFor) {
+ continue;
+ }
+ sk_sp<SkTypeface_AndroidSystem> face(family->matchStyle(style));
+
+ if (!langTag.isEmpty() &&
+ std::none_of(face->fLang.begin(), face->fLang.end(), [&](SkLanguage lang){
+ return lang.getTag().startsWith(langTag.c_str());
+ }))
+ {
+ continue;
+ }
+
+ if (SkToBool(face->fVariantStyle & kElegant_FontVariant) != elegant) {
+ continue;
+ }
+
+ if (face->unicharToGlyph(character) != 0) {
+ return face;
+ }
+ }
+ return nullptr;
+ }
+
+ SkTypeface* onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle& style,
+ const char* bcp47[],
+ int bcp47Count,
+ SkUnichar character) const override {
+ // The variant 'elegant' is 'not squashed', 'compact' is 'stays in ascent/descent'.
+ // The variant 'default' means 'compact and elegant'.
+ // As a result, it is not possible to know the variant context from the font alone.
+ // TODO: add 'is_elegant' and 'is_compact' bits to 'style' request.
+
+ SkString familyNameString(familyName);
+ for (const SkString& currentFamilyName : { familyNameString, SkString() }) {
+ // The first time match anything elegant, second time anything not elegant.
+ for (int elegant = 2; elegant --> 0;) {
+ for (int bcp47Index = bcp47Count; bcp47Index --> 0;) {
+ SkLanguage lang(bcp47[bcp47Index]);
+ while (!lang.getTag().isEmpty()) {
+ sk_sp<SkTypeface_AndroidSystem> matchingTypeface =
+ find_family_style_character(currentFamilyName, fFallbackNameToFamilyMap,
+ style, SkToBool(elegant),
+ lang.getTag(), character);
+ if (matchingTypeface) {
+ return matchingTypeface.release();
+ }
+
+ lang = lang.getParent();
+ }
+ }
+ sk_sp<SkTypeface_AndroidSystem> matchingTypeface =
+ find_family_style_character(currentFamilyName, fFallbackNameToFamilyMap,
+ style, SkToBool(elegant),
+ SkString(), character);
+ if (matchingTypeface) {
+ return matchingTypeface.release();
+ }
+ }
+ }
+ return nullptr;
+ }
+
+ sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData> data, int ttcIndex) const override {
+ return this->makeFromStream(std::unique_ptr<SkStreamAsset>(new SkMemoryStream(std::move(data))),
+ ttcIndex);
+ }
+
+ sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const override {
+ std::unique_ptr<SkStreamAsset> stream = SkStream::MakeFromFile(path);
+ return stream ? this->makeFromStream(std::move(stream), ttcIndex) : nullptr;
+ }
+
+ sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset> stream,
+ int ttcIndex) const override {
+ return this->makeFromStream(std::move(stream),
+ SkFontArguments().setCollectionIndex(ttcIndex));
+ }
+
+ sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments& args) const override {
+ return SkTypeface_FreeType::MakeFromStream(std::move(stream), args);
+ }
+
+ sk_sp<SkTypeface> onLegacyMakeTypeface(const char familyName[], SkFontStyle style) const override {
+ if (familyName) {
+ // On Android, we must return nullptr when we can't find the requested
+ // named typeface so that the system/app can provide their own recovery
+ // mechanism. On other platforms we'd provide a typeface from the
+ // default family instead.
+ return sk_sp<SkTypeface>(this->onMatchFamilyStyle(familyName, style));
+ }
+ return sk_sp<SkTypeface>(fDefaultStyleSet->matchStyle(style));
+ }
+
+
+private:
+
+ SkTypeface_FreeType::Scanner fScanner;
+
+ SkTArray<sk_sp<SkFontStyleSet_Android>> fStyleSets;
+ sk_sp<SkFontStyleSet> fDefaultStyleSet;
+
+ SkTArray<NameToFamily, true> fNameToFamilyMap;
+ SkTArray<NameToFamily, true> fFallbackNameToFamilyMap;
+
+ void addFamily(FontFamily& family, const bool isolated, int familyIndex) {
+ SkTArray<NameToFamily, true>* nameToFamily = &fNameToFamilyMap;
+ if (family.fIsFallbackFont) {
+ nameToFamily = &fFallbackNameToFamilyMap;
+
+ if (0 == family.fNames.size()) {
+ SkString& fallbackName = family.fNames.push_back();
+ fallbackName.printf("%.2x##fallback", familyIndex);
+ }
+ }
+
+ sk_sp<SkFontStyleSet_Android> newSet =
+ sk_make_sp<SkFontStyleSet_Android>(family, fScanner, isolated);
+ if (0 == newSet->count()) {
+ return;
+ }
+
+ for (const SkString& name : family.fNames) {
+ nameToFamily->emplace_back(NameToFamily{name, newSet.get()});
+ }
+ fStyleSets.emplace_back(std::move(newSet));
+ }
+ void buildNameToFamilyMap(SkTDArray<FontFamily*> families, const bool isolated) {
+ int familyIndex = 0;
+ for (FontFamily* family : families) {
+ addFamily(*family, isolated, familyIndex++);
+ for (const auto& [unused, fallbackFamily] : family->fallbackFamilies) {
+ addFamily(*fallbackFamily, isolated, familyIndex++);
+ }
+ }
+ }
+
+ void findDefaultStyleSet() {
+ SkASSERT(!fStyleSets.empty());
+
+ static const char* defaultNames[] = { "sans-serif" };
+ for (const char* defaultName : defaultNames) {
+ fDefaultStyleSet.reset(this->onMatchFamily(defaultName));
+ if (fDefaultStyleSet) {
+ break;
+ }
+ }
+ if (nullptr == fDefaultStyleSet) {
+ fDefaultStyleSet = fStyleSets[0];
+ }
+ SkASSERT(fDefaultStyleSet);
+ }
+
+ using INHERITED = SkFontMgr;
+};
+
+#ifdef SK_DEBUG
+static char const * const gSystemFontUseStrings[] = {
+ "OnlyCustom", "PreferCustom", "PreferSystem"
+};
+#endif
+
+sk_sp<SkFontMgr> SkFontMgr_New_Android(const SkFontMgr_Android_CustomFonts* custom) {
+ if (custom) {
+ SkASSERT(0 <= custom->fSystemFontUse);
+ SkASSERT(custom->fSystemFontUse < std::size(gSystemFontUseStrings));
+ SkDEBUGF("SystemFontUse: %s BasePath: %s Fonts: %s FallbackFonts: %s\n",
+ gSystemFontUseStrings[custom->fSystemFontUse],
+ custom->fBasePath,
+ custom->fFontsXml,
+ custom->fFallbackFontsXml);
+ }
+ return sk_make_sp<SkFontMgr_Android>(custom);
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_android_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_android_factory.cpp
new file mode 100644
index 0000000000..f4eaa3fbcb
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_android_factory.cpp
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMgr.h"
+#include "include/ports/SkFontMgr_android.h"
+
+sk_sp<SkFontMgr> SkFontMgr::Factory() {
+ return SkFontMgr_New_Android(nullptr);
+}
+
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_android_parser.cpp b/gfx/skia/skia/src/ports/SkFontMgr_android_parser.cpp
new file mode 100644
index 0000000000..94d69928a0
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_android_parser.cpp
@@ -0,0 +1,846 @@
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Despite the name and location, this is portable code.
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkStream.h"
+#include "include/private/base/SkFixed.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkTDArray.h"
+#include "include/private/base/SkTLogic.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/base/SkTSearch.h"
+#include "src/core/SkOSFile.h"
+#include "src/ports/SkFontMgr_android_parser.h"
+
+#include <expat.h>
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <memory>
+
+#define LMP_SYSTEM_FONTS_FILE "/system/etc/fonts.xml"
+#define OLD_SYSTEM_FONTS_FILE "/system/etc/system_fonts.xml"
+#define FALLBACK_FONTS_FILE "/system/etc/fallback_fonts.xml"
+#define VENDOR_FONTS_FILE "/vendor/etc/fallback_fonts.xml"
+
+#define LOCALE_FALLBACK_FONTS_SYSTEM_DIR "/system/etc"
+#define LOCALE_FALLBACK_FONTS_VENDOR_DIR "/vendor/etc"
+#define LOCALE_FALLBACK_FONTS_PREFIX "fallback_fonts-"
+#define LOCALE_FALLBACK_FONTS_SUFFIX ".xml"
+
+#ifndef SK_FONT_FILE_PREFIX
+# define SK_FONT_FILE_PREFIX "/fonts/"
+#endif
+
+/**
+ * This file contains TWO 'familyset' handlers:
+ * One for JB and earlier which works with
+ * /system/etc/system_fonts.xml
+ * /system/etc/fallback_fonts.xml
+ * /vendor/etc/fallback_fonts.xml
+ * /system/etc/fallback_fonts-XX.xml
+ * /vendor/etc/fallback_fonts-XX.xml
+ * and the other for LMP and later which works with
+ * /system/etc/fonts.xml
+ *
+ * If the 'familyset' 'version' attribute is 21 or higher the LMP parser is used, otherwise the JB.
+ */
+
+struct FamilyData;
+
+struct TagHandler {
+ /** Called at the start tag.
+ * Called immediately after the parent tag retuns this handler from a call to 'tag'.
+ * Allows setting up for handling the tag content and processing attributes.
+ * If nullptr, will not be called.
+ */
+ void (*start)(FamilyData* data, const char* tag, const char** attributes);
+
+ /** Called at the end tag.
+ * Allows post-processing of any accumulated information.
+ * This will be the last call made in relation to the current tag.
+ * If nullptr, will not be called.
+ */
+ void (*end)(FamilyData* data, const char* tag);
+
+ /** Called when a nested tag is encountered.
+ * This is responsible for determining how to handle the tag.
+ * If the tag is not recognized, return nullptr to skip the tag.
+ * If nullptr, all nested tags will be skipped.
+ */
+ const TagHandler* (*tag)(FamilyData* data, const char* tag, const char** attributes);
+
+ /** The character handler for this tag.
+ * This is only active for character data contained directly in this tag (not sub-tags).
+ * The first parameter will be castable to a FamilyData*.
+ * If nullptr, any character data in this tag will be ignored.
+ */
+ XML_CharacterDataHandler chars;
+};
+
+/** Represents the current parsing state. */
+struct FamilyData {
+ FamilyData(XML_Parser parser, SkTDArray<FontFamily*>& families,
+ const SkString& basePath, bool isFallback, const char* filename,
+ const TagHandler* topLevelHandler)
+ : fParser(parser)
+ , fFamilies(families)
+ , fCurrentFamily(nullptr)
+ , fCurrentFontInfo(nullptr)
+ , fVersion(0)
+ , fBasePath(basePath)
+ , fIsFallback(isFallback)
+ , fFilename(filename)
+ , fDepth(1)
+ , fSkip(0)
+ , fHandler(&topLevelHandler, 1)
+ { }
+
+ XML_Parser fParser; // The expat parser doing the work, owned by caller
+ SkTDArray<FontFamily*>& fFamilies; // The array to append families, owned by caller
+ std::unique_ptr<FontFamily> fCurrentFamily; // The family being created, owned by this
+ FontFileInfo* fCurrentFontInfo; // The info being created, owned by fCurrentFamily
+ int fVersion; // The version of the file parsed.
+ const SkString& fBasePath; // The current base path.
+ const bool fIsFallback; // The file being parsed is a fallback file
+ const char* fFilename; // The name of the file currently being parsed.
+
+ int fDepth; // The current element depth of the parse.
+ int fSkip; // The depth to stop skipping, 0 if not skipping.
+ SkTDArray<const TagHandler*> fHandler; // The stack of current tag handlers.
+};
+
+static bool memeq(const char* s1, const char* s2, size_t n1, size_t n2) {
+ return n1 == n2 && 0 == memcmp(s1, s2, n1);
+}
+#define MEMEQ(c, s, n) memeq(c, s, sizeof(c) - 1, n)
+
+#define ATTS_NON_NULL(a, i) (a[i] != nullptr && a[i+1] != nullptr)
+
+#define SK_FONTMGR_ANDROID_PARSER_PREFIX "[SkFontMgr Android Parser] "
+
+#define SK_FONTCONFIGPARSER_WARNING(message, ...) \
+ SkDebugf(SK_FONTMGR_ANDROID_PARSER_PREFIX "%s:%d:%d: warning: " message "\n", \
+ self->fFilename, \
+ (int)XML_GetCurrentLineNumber(self->fParser), \
+ (int)XML_GetCurrentColumnNumber(self->fParser), \
+ ##__VA_ARGS__)
+
+static bool is_whitespace(char c) {
+ return c == ' ' || c == '\n'|| c == '\r' || c == '\t';
+}
+
+static void trim_string(SkString* s) {
+ char* str = s->data();
+ const char* start = str; // start is inclusive
+ const char* end = start + s->size(); // end is exclusive
+ while (is_whitespace(*start)) { ++start; }
+ if (start != end) {
+ --end; // make end inclusive
+ while (is_whitespace(*end)) { --end; }
+ ++end; // make end exclusive
+ }
+ size_t len = end - start;
+ memmove(str, start, len);
+ s->resize(len);
+}
+
+static void parse_space_separated_languages(const char* value, size_t valueLen,
+ SkTArray<SkLanguage, true>& languages)
+{
+ size_t i = 0;
+ while (true) {
+ for (; i < valueLen && is_whitespace(value[i]); ++i) { }
+ if (i == valueLen) { break; }
+ size_t j;
+ for (j = i + 1; j < valueLen && !is_whitespace(value[j]); ++j) { }
+ languages.emplace_back(value + i, j - i);
+ i = j;
+ if (i == valueLen) { break; }
+ }
+}
+
+namespace lmpParser {
+
+static const TagHandler axisHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) {
+ FontFileInfo& file = *self->fCurrentFontInfo;
+ SkFourByteTag axisTag = SkSetFourByteTag('\0','\0','\0','\0');
+ SkFixed axisStyleValue = 0;
+ bool axisTagIsValid = false;
+ bool axisStyleValueIsValid = false;
+ for (size_t i = 0; ATTS_NON_NULL(attributes, i); i += 2) {
+ const char* name = attributes[i];
+ const char* value = attributes[i+1];
+ size_t nameLen = strlen(name);
+ if (MEMEQ("tag", name, nameLen)) {
+ size_t valueLen = strlen(value);
+ if (valueLen == 4) {
+ axisTag = SkSetFourByteTag(value[0], value[1], value[2], value[3]);
+ axisTagIsValid = true;
+ for (int j = 0; j < file.fVariationDesignPosition.size() - 1; ++j) {
+ if (file.fVariationDesignPosition[j].axis == axisTag) {
+ axisTagIsValid = false;
+ SK_FONTCONFIGPARSER_WARNING("'%c%c%c%c' axis specified more than once",
+ (axisTag >> 24) & 0xFF,
+ (axisTag >> 16) & 0xFF,
+ (axisTag >> 8) & 0xFF,
+ (axisTag ) & 0xFF);
+ }
+ }
+ } else {
+ SK_FONTCONFIGPARSER_WARNING("'%s' is an invalid axis tag", value);
+ }
+ } else if (MEMEQ("stylevalue", name, nameLen)) {
+ if (parse_fixed<16>(value, &axisStyleValue)) {
+ axisStyleValueIsValid = true;
+ } else {
+ SK_FONTCONFIGPARSER_WARNING("'%s' is an invalid axis stylevalue", value);
+ }
+ }
+ }
+ if (axisTagIsValid && axisStyleValueIsValid) {
+ auto& coordinate = file.fVariationDesignPosition.push_back();
+ coordinate.axis = axisTag;
+ coordinate.value = SkFixedToScalar(axisStyleValue);
+ }
+ },
+ /*end*/nullptr,
+ /*tag*/nullptr,
+ /*chars*/nullptr,
+};
+
+static const TagHandler fontHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) {
+ // 'weight' (non-negative integer) [default 0]
+ // 'style' ("normal", "italic") [default "auto"]
+ // 'index' (non-negative integer) [default 0]
+ // The character data should be a filename.
+ FontFileInfo& file = self->fCurrentFamily->fFonts.push_back();
+ self->fCurrentFontInfo = &file;
+ SkString fallbackFor;
+ for (size_t i = 0; ATTS_NON_NULL(attributes, i); i += 2) {
+ const char* name = attributes[i];
+ const char* value = attributes[i+1];
+ size_t nameLen = strlen(name);
+ if (MEMEQ("weight", name, nameLen)) {
+ if (!parse_non_negative_integer(value, &file.fWeight)) {
+ SK_FONTCONFIGPARSER_WARNING("'%s' is an invalid weight", value);
+ }
+ } else if (MEMEQ("style", name, nameLen)) {
+ size_t valueLen = strlen(value);
+ if (MEMEQ("normal", value, valueLen)) {
+ file.fStyle = FontFileInfo::Style::kNormal;
+ } else if (MEMEQ("italic", value, valueLen)) {
+ file.fStyle = FontFileInfo::Style::kItalic;
+ }
+ } else if (MEMEQ("index", name, nameLen)) {
+ if (!parse_non_negative_integer(value, &file.fIndex)) {
+ SK_FONTCONFIGPARSER_WARNING("'%s' is an invalid index", value);
+ }
+ } else if (MEMEQ("fallbackFor", name, nameLen)) {
+ /** fallbackFor specifies a family fallback and should have been on family. */
+ fallbackFor = value;
+ }
+ }
+ if (!fallbackFor.isEmpty()) {
+ std::unique_ptr<FontFamily>* fallbackFamily =
+ self->fCurrentFamily->fallbackFamilies.find(fallbackFor);
+ if (!fallbackFamily) {
+ std::unique_ptr<FontFamily> newFallbackFamily(
+ new FontFamily(self->fCurrentFamily->fBasePath, true));
+ fallbackFamily = self->fCurrentFamily->fallbackFamilies.set(
+ fallbackFor, std::move(newFallbackFamily));
+ (*fallbackFamily)->fLanguages = self->fCurrentFamily->fLanguages;
+ (*fallbackFamily)->fVariant = self->fCurrentFamily->fVariant;
+ (*fallbackFamily)->fOrder = self->fCurrentFamily->fOrder;
+ (*fallbackFamily)->fFallbackFor = fallbackFor;
+ }
+ self->fCurrentFontInfo = &(*fallbackFamily)->fFonts.emplace_back(file);
+ self->fCurrentFamily->fFonts.pop_back();
+ }
+ },
+ /*end*/[](FamilyData* self, const char* tag) {
+ trim_string(&self->fCurrentFontInfo->fFileName);
+ },
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("axis", tag, len)) {
+ return &axisHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/[](void* data, const char* s, int len) {
+ FamilyData* self = static_cast<FamilyData*>(data);
+ self->fCurrentFontInfo->fFileName.append(s, len);
+ }
+};
+
+static const TagHandler familyHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) {
+ // 'name' (string) [optional]
+ // 'lang' (space separated string) [default ""]
+ // 'variant' ("elegant", "compact") [default "default"]
+ // If there is no name, this is a fallback only font.
+ FontFamily* family = new FontFamily(self->fBasePath, true);
+ self->fCurrentFamily.reset(family);
+ for (size_t i = 0; ATTS_NON_NULL(attributes, i); i += 2) {
+ const char* name = attributes[i];
+ const char* value = attributes[i+1];
+ size_t nameLen = strlen(name);
+ size_t valueLen = strlen(value);
+ if (MEMEQ("name", name, nameLen)) {
+ SkAutoAsciiToLC tolc(value);
+ family->fNames.push_back().set(tolc.lc());
+ family->fIsFallbackFont = false;
+ } else if (MEMEQ("lang", name, nameLen)) {
+ parse_space_separated_languages(value, valueLen, family->fLanguages);
+ } else if (MEMEQ("variant", name, nameLen)) {
+ if (MEMEQ("elegant", value, valueLen)) {
+ family->fVariant = kElegant_FontVariant;
+ } else if (MEMEQ("compact", value, valueLen)) {
+ family->fVariant = kCompact_FontVariant;
+ }
+ }
+ }
+ },
+ /*end*/[](FamilyData* self, const char* tag) {
+ *self->fFamilies.append() = self->fCurrentFamily.release();
+ },
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("font", tag, len)) {
+ return &fontHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/nullptr,
+};
+
+static FontFamily* find_family(FamilyData* self, const SkString& familyName) {
+ for (int i = 0; i < self->fFamilies.size(); i++) {
+ FontFamily* candidate = self->fFamilies[i];
+ for (int j = 0; j < candidate->fNames.size(); j++) {
+ if (candidate->fNames[j] == familyName) {
+ return candidate;
+ }
+ }
+ }
+ return nullptr;
+}
+
+static const TagHandler aliasHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) {
+ // 'name' (string) introduces a new family name.
+ // 'to' (string) specifies which (previous) family to alias
+ // 'weight' (non-negative integer) [optional]
+ // If it *does not* have a weight, 'name' is an alias for the entire 'to' family.
+ // If it *does* have a weight, 'name' is a new family consisting of
+ // the font(s) with 'weight' from the 'to' family.
+
+ SkString aliasName;
+ SkString to;
+ int weight = 0;
+ for (size_t i = 0; ATTS_NON_NULL(attributes, i); i += 2) {
+ const char* name = attributes[i];
+ const char* value = attributes[i+1];
+ size_t nameLen = strlen(name);
+ if (MEMEQ("name", name, nameLen)) {
+ SkAutoAsciiToLC tolc(value);
+ aliasName.set(tolc.lc());
+ } else if (MEMEQ("to", name, nameLen)) {
+ to.set(value);
+ } else if (MEMEQ("weight", name, nameLen)) {
+ if (!parse_non_negative_integer(value, &weight)) {
+ SK_FONTCONFIGPARSER_WARNING("'%s' is an invalid weight", value);
+ }
+ }
+ }
+
+ // Assumes that the named family is already declared
+ FontFamily* targetFamily = find_family(self, to);
+ if (!targetFamily) {
+ SK_FONTCONFIGPARSER_WARNING("'%s' alias target not found", to.c_str());
+ return;
+ }
+
+ if (weight) {
+ FontFamily* family = new FontFamily(targetFamily->fBasePath, self->fIsFallback);
+ family->fNames.push_back().set(aliasName);
+
+ for (int i = 0; i < targetFamily->fFonts.size(); i++) {
+ if (targetFamily->fFonts[i].fWeight == weight) {
+ family->fFonts.push_back(targetFamily->fFonts[i]);
+ }
+ }
+ *self->fFamilies.append() = family;
+ } else {
+ targetFamily->fNames.push_back().set(aliasName);
+ }
+ },
+ /*end*/nullptr,
+ /*tag*/nullptr,
+ /*chars*/nullptr,
+};
+
+static const TagHandler familySetHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) { },
+ /*end*/nullptr,
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("family", tag, len)) {
+ return &familyHandler;
+ } else if (MEMEQ("alias", tag, len)) {
+ return &aliasHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/nullptr,
+};
+
+} // namespace lmpParser
+
+namespace jbParser {
+
+static const TagHandler fileHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) {
+ // 'variant' ("elegant", "compact") [default "default"]
+ // 'lang' (string) [default ""]
+ // 'index' (non-negative integer) [default 0]
+ // The character data should be a filename.
+ FontFamily& currentFamily = *self->fCurrentFamily;
+ FontFileInfo& newFileInfo = currentFamily.fFonts.push_back();
+ if (attributes) {
+ for (size_t i = 0; ATTS_NON_NULL(attributes, i); i += 2) {
+ const char* name = attributes[i];
+ const char* value = attributes[i+1];
+ size_t nameLen = strlen(name);
+ size_t valueLen = strlen(value);
+ if (MEMEQ("variant", name, nameLen)) {
+ const FontVariant prevVariant = currentFamily.fVariant;
+ if (MEMEQ("elegant", value, valueLen)) {
+ currentFamily.fVariant = kElegant_FontVariant;
+ } else if (MEMEQ("compact", value, valueLen)) {
+ currentFamily.fVariant = kCompact_FontVariant;
+ }
+ if (currentFamily.fFonts.size() > 1 && currentFamily.fVariant != prevVariant) {
+ SK_FONTCONFIGPARSER_WARNING("'%s' unexpected variant found\n"
+ "Note: Every font file within a family must have identical variants.",
+ value);
+ }
+
+ } else if (MEMEQ("lang", name, nameLen)) {
+ SkLanguage currentLanguage = SkLanguage(value, valueLen);
+ bool showWarning = false;
+ if (currentFamily.fLanguages.empty()) {
+ showWarning = (currentFamily.fFonts.size() > 1);
+ currentFamily.fLanguages.push_back(std::move(currentLanguage));
+ } else if (currentFamily.fLanguages[0] != currentLanguage) {
+ showWarning = true;
+ currentFamily.fLanguages[0] = std::move(currentLanguage);
+ }
+ if (showWarning) {
+ SK_FONTCONFIGPARSER_WARNING("'%s' unexpected language found\n"
+ "Note: Every font file within a family must have identical languages.",
+ value);
+ }
+
+ } else if (MEMEQ("index", name, nameLen)) {
+ if (!parse_non_negative_integer(value, &newFileInfo.fIndex)) {
+ SK_FONTCONFIGPARSER_WARNING("'%s' is an invalid index", value);
+ }
+ }
+ }
+ }
+ self->fCurrentFontInfo = &newFileInfo;
+ },
+ /*end*/nullptr,
+ /*tag*/nullptr,
+ /*chars*/[](void* data, const char* s, int len) {
+ FamilyData* self = static_cast<FamilyData*>(data);
+ self->fCurrentFontInfo->fFileName.append(s, len);
+ }
+};
+
+static const TagHandler fileSetHandler = {
+ /*start*/nullptr,
+ /*end*/nullptr,
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("file", tag, len)) {
+ return &fileHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/nullptr,
+};
+
+static const TagHandler nameHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) {
+ // The character data should be a name for the font.
+ self->fCurrentFamily->fNames.push_back();
+ },
+ /*end*/nullptr,
+ /*tag*/nullptr,
+ /*chars*/[](void* data, const char* s, int len) {
+ FamilyData* self = static_cast<FamilyData*>(data);
+ SkAutoAsciiToLC tolc(s, len);
+ self->fCurrentFamily->fNames.back().append(tolc.lc(), len);
+ }
+};
+
+static const TagHandler nameSetHandler = {
+ /*start*/nullptr,
+ /*end*/nullptr,
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("name", tag, len)) {
+ return &nameHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/nullptr,
+};
+
+static const TagHandler familyHandler = {
+ /*start*/[](FamilyData* self, const char* tag, const char** attributes) {
+ self->fCurrentFamily = std::make_unique<FontFamily>(self->fBasePath, self->fIsFallback);
+ // 'order' (non-negative integer) [default -1]
+ for (size_t i = 0; ATTS_NON_NULL(attributes, i); i += 2) {
+ const char* value = attributes[i+1];
+ parse_non_negative_integer(value, &self->fCurrentFamily->fOrder);
+ }
+ },
+ /*end*/[](FamilyData* self, const char* tag) {
+ *self->fFamilies.append() = self->fCurrentFamily.release();
+ },
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("nameset", tag, len)) {
+ return &nameSetHandler;
+ } else if (MEMEQ("fileset", tag, len)) {
+ return &fileSetHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/nullptr,
+};
+
+static const TagHandler familySetHandler = {
+ /*start*/nullptr,
+ /*end*/nullptr,
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("family", tag, len)) {
+ return &familyHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/nullptr,
+};
+
+} // namespace jbParser
+
+static const TagHandler topLevelHandler = {
+ /*start*/nullptr,
+ /*end*/nullptr,
+ /*tag*/[](FamilyData* self, const char* tag, const char** attributes) -> const TagHandler* {
+ size_t len = strlen(tag);
+ if (MEMEQ("familyset", tag, len)) {
+ // 'version' (non-negative integer) [default 0]
+ for (size_t i = 0; ATTS_NON_NULL(attributes, i); i += 2) {
+ const char* name = attributes[i];
+ size_t nameLen = strlen(name);
+ if (MEMEQ("version", name, nameLen)) {
+ const char* value = attributes[i+1];
+ if (parse_non_negative_integer(value, &self->fVersion)) {
+ if (self->fVersion >= 21) {
+ return &lmpParser::familySetHandler;
+ }
+ }
+ }
+ }
+ return &jbParser::familySetHandler;
+ }
+ return nullptr;
+ },
+ /*chars*/nullptr,
+};
+
+static void XMLCALL start_element_handler(void *data, const char *tag, const char **attributes) {
+ FamilyData* self = static_cast<FamilyData*>(data);
+
+ if (!self->fSkip) {
+ const TagHandler* parent = self->fHandler.back();
+ const TagHandler* child = parent->tag ? parent->tag(self, tag, attributes) : nullptr;
+ if (child) {
+ if (child->start) {
+ child->start(self, tag, attributes);
+ }
+ self->fHandler.push_back(child);
+ XML_SetCharacterDataHandler(self->fParser, child->chars);
+ } else {
+ SK_FONTCONFIGPARSER_WARNING("'%s' tag not recognized, skipping", tag);
+ XML_SetCharacterDataHandler(self->fParser, nullptr);
+ self->fSkip = self->fDepth;
+ }
+ }
+
+ ++self->fDepth;
+}
+
+static void XMLCALL end_element_handler(void* data, const char* tag) {
+ FamilyData* self = static_cast<FamilyData*>(data);
+ --self->fDepth;
+
+ if (!self->fSkip) {
+ const TagHandler* child = self->fHandler.back();
+ if (child->end) {
+ child->end(self, tag);
+ }
+ self->fHandler.pop_back();
+ const TagHandler* parent = self->fHandler.back();
+ XML_SetCharacterDataHandler(self->fParser, parent->chars);
+ }
+
+ if (self->fSkip == self->fDepth) {
+ self->fSkip = 0;
+ const TagHandler* parent = self->fHandler.back();
+ XML_SetCharacterDataHandler(self->fParser, parent->chars);
+ }
+}
+
+static void XMLCALL xml_entity_decl_handler(void *data,
+ const XML_Char *entityName,
+ int is_parameter_entity,
+ const XML_Char *value,
+ int value_length,
+ const XML_Char *base,
+ const XML_Char *systemId,
+ const XML_Char *publicId,
+ const XML_Char *notationName)
+{
+ FamilyData* self = static_cast<FamilyData*>(data);
+ SK_FONTCONFIGPARSER_WARNING("'%s' entity declaration found, stopping processing", entityName);
+ XML_StopParser(self->fParser, XML_FALSE);
+}
+
+static const XML_Memory_Handling_Suite sk_XML_alloc = {
+ sk_malloc_throw,
+ sk_realloc_throw,
+ sk_free
+};
+
+/**
+ * This function parses the given filename and stores the results in the given
+ * families array. Returns the version of the file, negative if the file does not exist.
+ */
+static int parse_config_file(const char* filename, SkTDArray<FontFamily*>& families,
+ const SkString& basePath, bool isFallback)
+{
+ SkFILEStream file(filename);
+
+ // Some of the files we attempt to parse (in particular, /vendor/etc/fallback_fonts.xml)
+ // are optional - failure here is okay because one of these optional files may not exist.
+ if (!file.isValid()) {
+ SkDebugf(SK_FONTMGR_ANDROID_PARSER_PREFIX "'%s' could not be opened\n", filename);
+ return -1;
+ }
+
+ SkAutoTCallVProc<std::remove_pointer_t<XML_Parser>, XML_ParserFree> parser(
+ XML_ParserCreate_MM(nullptr, &sk_XML_alloc, nullptr));
+ if (!parser) {
+ SkDebugf(SK_FONTMGR_ANDROID_PARSER_PREFIX "could not create XML parser\n");
+ return -1;
+ }
+
+ FamilyData self(parser, families, basePath, isFallback, filename, &topLevelHandler);
+ XML_SetUserData(parser, &self);
+
+ // Disable entity processing, to inhibit internal entity expansion. See expat CVE-2013-0340
+ XML_SetEntityDeclHandler(parser, xml_entity_decl_handler);
+
+ // Start parsing oldschool; switch these in flight if we detect a newer version of the file.
+ XML_SetElementHandler(parser, start_element_handler, end_element_handler);
+
+ // One would assume it would be faster to have a buffer on the stack and call XML_Parse.
+ // But XML_Parse will call XML_GetBuffer anyway and memmove the passed buffer into it.
+ // (Unless XML_CONTEXT_BYTES is undefined, but all users define it.)
+ // In debug, buffer a small odd number of bytes to detect slicing in XML_CharacterDataHandler.
+ static const int bufferSize = 512 SkDEBUGCODE( - 507);
+ bool done = false;
+ while (!done) {
+ void* buffer = XML_GetBuffer(parser, bufferSize);
+ if (!buffer) {
+ SkDebugf(SK_FONTMGR_ANDROID_PARSER_PREFIX "could not buffer enough to continue\n");
+ return -1;
+ }
+ size_t len = file.read(buffer, bufferSize);
+ done = file.isAtEnd();
+ XML_Status status = XML_ParseBuffer(parser, len, done);
+ if (XML_STATUS_ERROR == status) {
+ XML_Error error = XML_GetErrorCode(parser);
+ int line = XML_GetCurrentLineNumber(parser);
+ int column = XML_GetCurrentColumnNumber(parser);
+ const XML_LChar* errorString = XML_ErrorString(error);
+ SkDebugf(SK_FONTMGR_ANDROID_PARSER_PREFIX "%s:%d:%d error %d: %s.\n",
+ filename, line, column, error, errorString);
+ return -1;
+ }
+ }
+ return self.fVersion;
+}
+
+/** Returns the version of the system font file actually found, negative if none. */
+static int append_system_font_families(SkTDArray<FontFamily*>& fontFamilies,
+ const SkString& basePath)
+{
+ int initialCount = fontFamilies.size();
+ int version = parse_config_file(LMP_SYSTEM_FONTS_FILE, fontFamilies, basePath, false);
+ if (version < 0 || fontFamilies.size() == initialCount) {
+ version = parse_config_file(OLD_SYSTEM_FONTS_FILE, fontFamilies, basePath, false);
+ }
+ return version;
+}
+
+/**
+ * In some versions of Android prior to Android 4.2 (JellyBean MR1 at API
+ * Level 17) the fallback fonts for certain locales were encoded in their own
+ * XML files with a suffix that identified the locale. We search the provided
+ * directory for those files,add all of their entries to the fallback chain, and
+ * include the locale as part of each entry.
+ */
+static void append_fallback_font_families_for_locale(SkTDArray<FontFamily*>& fallbackFonts,
+ const char* dir,
+ const SkString& basePath)
+{
+ SkOSFile::Iter iter(dir, nullptr);
+ SkString fileName;
+ while (iter.next(&fileName, false)) {
+ // The size of the prefix and suffix.
+ static const size_t fixedLen = sizeof(LOCALE_FALLBACK_FONTS_PREFIX) - 1
+ + sizeof(LOCALE_FALLBACK_FONTS_SUFFIX) - 1;
+
+ // The size of the prefix, suffix, and a minimum valid language code
+ static const size_t minSize = fixedLen + 2;
+
+ if (fileName.size() < minSize ||
+ !fileName.startsWith(LOCALE_FALLBACK_FONTS_PREFIX) ||
+ !fileName.endsWith(LOCALE_FALLBACK_FONTS_SUFFIX))
+ {
+ continue;
+ }
+
+ SkString locale(fileName.c_str() + sizeof(LOCALE_FALLBACK_FONTS_PREFIX) - 1,
+ fileName.size() - fixedLen);
+
+ SkString absoluteFilename;
+ absoluteFilename.printf("%s/%s", dir, fileName.c_str());
+
+ SkTDArray<FontFamily*> langSpecificFonts;
+ parse_config_file(absoluteFilename.c_str(), langSpecificFonts, basePath, true);
+
+ for (int i = 0; i < langSpecificFonts.size(); ++i) {
+ FontFamily* family = langSpecificFonts[i];
+ family->fLanguages.emplace_back(locale);
+ *fallbackFonts.append() = family;
+ }
+ }
+}
+
+static void append_system_fallback_font_families(SkTDArray<FontFamily*>& fallbackFonts,
+ const SkString& basePath)
+{
+ parse_config_file(FALLBACK_FONTS_FILE, fallbackFonts, basePath, true);
+ append_fallback_font_families_for_locale(fallbackFonts,
+ LOCALE_FALLBACK_FONTS_SYSTEM_DIR,
+ basePath);
+}
+
+static void mixin_vendor_fallback_font_families(SkTDArray<FontFamily*>& fallbackFonts,
+ const SkString& basePath)
+{
+ SkTDArray<FontFamily*> vendorFonts;
+ parse_config_file(VENDOR_FONTS_FILE, vendorFonts, basePath, true);
+ append_fallback_font_families_for_locale(vendorFonts,
+ LOCALE_FALLBACK_FONTS_VENDOR_DIR,
+ basePath);
+
+ // This loop inserts the vendor fallback fonts in the correct order in the
+ // overall fallbacks list.
+ int currentOrder = -1;
+ for (int i = 0; i < vendorFonts.size(); ++i) {
+ FontFamily* family = vendorFonts[i];
+ int order = family->fOrder;
+ if (order < 0) {
+ if (currentOrder < 0) {
+ // Default case - just add it to the end of the fallback list
+ *fallbackFonts.append() = family;
+ } else {
+ // no order specified on this font, but we're incrementing the order
+ // based on an earlier order insertion request
+ *fallbackFonts.insert(currentOrder++) = family;
+ }
+ } else {
+ // Add the font into the fallback list in the specified order. Set
+ // currentOrder for correct placement of other fonts in the vendor list.
+ *fallbackFonts.insert(order) = family;
+ currentOrder = order + 1;
+ }
+ }
+}
+
+void SkFontMgr_Android_Parser::GetSystemFontFamilies(SkTDArray<FontFamily*>& fontFamilies) {
+ // Version 21 of the system font configuration does not need any fallback configuration files.
+ SkString basePath(getenv("ANDROID_ROOT"));
+ basePath.append(SK_FONT_FILE_PREFIX, sizeof(SK_FONT_FILE_PREFIX) - 1);
+
+ if (append_system_font_families(fontFamilies, basePath) >= 21) {
+ return;
+ }
+
+ // Append all the fallback fonts to system fonts
+ SkTDArray<FontFamily*> fallbackFonts;
+ append_system_fallback_font_families(fallbackFonts, basePath);
+ mixin_vendor_fallback_font_families(fallbackFonts, basePath);
+ fontFamilies.append(fallbackFonts.size(), fallbackFonts.begin());
+}
+
+void SkFontMgr_Android_Parser::GetCustomFontFamilies(SkTDArray<FontFamily*>& fontFamilies,
+ const SkString& basePath,
+ const char* fontsXml,
+ const char* fallbackFontsXml,
+ const char* langFallbackFontsDir)
+{
+ if (fontsXml) {
+ parse_config_file(fontsXml, fontFamilies, basePath, false);
+ }
+ if (fallbackFontsXml) {
+ parse_config_file(fallbackFontsXml, fontFamilies, basePath, true);
+ }
+ if (langFallbackFontsDir) {
+ append_fallback_font_families_for_locale(fontFamilies,
+ langFallbackFontsDir,
+ basePath);
+ }
+}
+
+SkLanguage SkLanguage::getParent() const {
+ SkASSERT(!fTag.isEmpty());
+ const char* tag = fTag.c_str();
+
+ // strip off the rightmost "-.*"
+ const char* parentTagEnd = strrchr(tag, '-');
+ if (parentTagEnd == nullptr) {
+ return SkLanguage();
+ }
+ size_t parentTagLen = parentTagEnd - tag;
+ return SkLanguage(tag, parentTagLen);
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_android_parser.h b/gfx/skia/skia/src/ports/SkFontMgr_android_parser.h
new file mode 100644
index 0000000000..9d639a4302
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_android_parser.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2011 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_android_parser_DEFINED
+#define SkFontMgr_android_parser_DEFINED
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTDArray.h"
+#include "src/core/SkTHash.h"
+
+#include <climits>
+#include <limits>
+
+/** \class SkLanguage
+
+ The SkLanguage class represents a human written language, and is used by
+ text draw operations to determine which glyph to draw when drawing
+ characters with variants (ie Han-derived characters).
+*/
+class SkLanguage {
+public:
+ SkLanguage() { }
+ SkLanguage(const SkString& tag) : fTag(tag) { }
+ SkLanguage(const char* tag) : fTag(tag) { }
+ SkLanguage(const char* tag, size_t len) : fTag(tag, len) { }
+ SkLanguage(const SkLanguage& b) : fTag(b.fTag) { }
+
+ /** Gets a BCP 47 language identifier for this SkLanguage.
+ @return a BCP 47 language identifier representing this language
+ */
+ const SkString& getTag() const { return fTag; }
+
+ /** Performs BCP 47 fallback to return an SkLanguage one step more general.
+ @return an SkLanguage one step more general
+ */
+ SkLanguage getParent() const;
+
+ bool operator==(const SkLanguage& b) const {
+ return fTag == b.fTag;
+ }
+ bool operator!=(const SkLanguage& b) const {
+ return fTag != b.fTag;
+ }
+ SkLanguage& operator=(const SkLanguage& b) {
+ fTag = b.fTag;
+ return *this;
+ }
+
+private:
+ //! BCP 47 language identifier
+ SkString fTag;
+};
+
+enum FontVariants {
+ kDefault_FontVariant = 0x01,
+ kCompact_FontVariant = 0x02,
+ kElegant_FontVariant = 0x04,
+ kLast_FontVariant = kElegant_FontVariant,
+};
+typedef uint32_t FontVariant;
+
+// Must remain trivially movable (can be memmoved).
+struct FontFileInfo {
+ FontFileInfo() : fIndex(0), fWeight(0), fStyle(Style::kAuto) { }
+
+ SkString fFileName;
+ int fIndex;
+ int fWeight;
+ enum class Style { kAuto, kNormal, kItalic } fStyle;
+ SkTArray<SkFontArguments::VariationPosition::Coordinate, true> fVariationDesignPosition;
+};
+
+/**
+ * A font family provides one or more names for a collection of fonts, each of
+ * which has a different style (normal, italic) or weight (thin, light, bold,
+ * etc).
+ * Some fonts may occur in compact variants for use in the user interface.
+ * Android distinguishes "fallback" fonts to support non-ASCII character sets.
+ */
+struct FontFamily {
+ FontFamily(const SkString& basePath, bool isFallbackFont)
+ : fVariant(kDefault_FontVariant)
+ , fOrder(-1)
+ , fIsFallbackFont(isFallbackFont)
+ , fBasePath(basePath)
+ { }
+
+ SkTArray<SkString, true> fNames;
+ SkTArray<FontFileInfo, true> fFonts;
+ SkTArray<SkLanguage, true> fLanguages;
+ SkTHashMap<SkString, std::unique_ptr<FontFamily>> fallbackFamilies;
+ FontVariant fVariant;
+ int fOrder; // internal to the parser, not useful to users.
+ bool fIsFallbackFont;
+ SkString fFallbackFor;
+ const SkString fBasePath;
+};
+
+namespace SkFontMgr_Android_Parser {
+
+/** Parses system font configuration files and appends result to fontFamilies. */
+void GetSystemFontFamilies(SkTDArray<FontFamily*>& fontFamilies);
+
+/** Parses font configuration files and appends result to fontFamilies. */
+void GetCustomFontFamilies(SkTDArray<FontFamily*>& fontFamilies,
+ const SkString& basePath,
+ const char* fontsXml,
+ const char* fallbackFontsXml,
+ const char* langFallbackFontsDir = nullptr);
+
+} // namespace SkFontMgr_Android_Parser
+
+
+/** Parses a null terminated string into an integer type, checking for overflow.
+ * http://www.w3.org/TR/html-markup/datatypes.html#common.data.integer.non-negative-def
+ *
+ * If the string cannot be parsed into 'value', returns false and does not change 'value'.
+ */
+template <typename T> bool parse_non_negative_integer(const char* s, T* value) {
+ static_assert(std::numeric_limits<T>::is_integer, "T_must_be_integer");
+
+ if (*s == '\0') {
+ return false;
+ }
+
+ const T nMax = std::numeric_limits<T>::max() / 10;
+ const T dMax = std::numeric_limits<T>::max() - (nMax * 10);
+ T n = 0;
+ for (; *s; ++s) {
+ // Check if digit
+ if (*s < '0' || '9' < *s) {
+ return false;
+ }
+ T d = *s - '0';
+ // Check for overflow
+ if (n > nMax || (n == nMax && d > dMax)) {
+ return false;
+ }
+ n = (n * 10) + d;
+ }
+ *value = n;
+ return true;
+}
+
+/** Parses a null terminated string into a signed fixed point value with bias N.
+ *
+ * Like http://www.w3.org/TR/html-markup/datatypes.html#common.data.float-def ,
+ * but may start with '.' and does not support 'e'. '-?((:digit:+(.:digit:+)?)|(.:digit:+))'
+ *
+ * Checks for overflow.
+ * Low bit rounding is not defined (is currently truncate).
+ * Bias (N) required to allow for the sign bit and 4 bits of integer.
+ *
+ * If the string cannot be parsed into 'value', returns false and does not change 'value'.
+ */
+template <int N, typename T> bool parse_fixed(const char* s, T* value) {
+ static_assert(std::numeric_limits<T>::is_integer, "T_must_be_integer");
+ static_assert(std::numeric_limits<T>::is_signed, "T_must_be_signed");
+ static_assert(sizeof(T) * CHAR_BIT - N >= 5, "N_must_leave_four_bits_plus_sign");
+
+ bool negate = false;
+ if (*s == '-') {
+ ++s;
+ negate = true;
+ }
+ if (*s == '\0') {
+ return false;
+ }
+
+ const T nMax = (std::numeric_limits<T>::max() >> N) / 10;
+ const T dMax = (std::numeric_limits<T>::max() >> N) - (nMax * 10);
+ T n = 0;
+ T frac = 0;
+ for (; *s; ++s) {
+ // Check if digit
+ if (*s < '0' || '9' < *s) {
+ // If it wasn't a digit, check if it is a '.' followed by something.
+ if (*s != '.' || s[1] == '\0') {
+ return false;
+ }
+ // Find the end, verify digits.
+ for (++s; *s; ++s) {
+ if (*s < '0' || '9' < *s) {
+ return false;
+ }
+ }
+ // Read back toward the '.'.
+ for (--s; *s != '.'; --s) {
+ T d = *s - '0';
+ frac = (frac + (d << N)) / 10; // This requires four bits overhead.
+ }
+ break;
+ }
+ T d = *s - '0';
+ // Check for overflow
+ if (n > nMax || (n == nMax && d > dMax)) {
+ return false;
+ }
+ n = (n * 10) + d;
+ }
+ if (negate) {
+ n = -n;
+ frac = -frac;
+ }
+ *value = SkLeftShift(n, N) + frac;
+ return true;
+}
+
+#endif /* SkFontMgr_android_parser_DEFINED */
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_custom.cpp b/gfx/skia/skia/src/ports/SkFontMgr_custom.cpp
new file mode 100644
index 0000000000..54ea68e7a7
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_custom.cpp
@@ -0,0 +1,228 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontArguments.h"
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypeface.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/ports/SkFontHost_FreeType_common.h"
+#include "src/ports/SkFontMgr_custom.h"
+
+#include <limits>
+#include <memory>
+
+using namespace skia_private;
+
+class SkData;
+
+SkTypeface_Custom::SkTypeface_Custom(const SkFontStyle& style, bool isFixedPitch,
+ bool sysFont, const SkString familyName, int index)
+ : INHERITED(style, isFixedPitch)
+ , fIsSysFont(sysFont), fFamilyName(familyName), fIndex(index)
+{ }
+
+bool SkTypeface_Custom::isSysFont() const { return fIsSysFont; }
+
+void SkTypeface_Custom::onGetFamilyName(SkString* familyName) const {
+ *familyName = fFamilyName;
+}
+
+void SkTypeface_Custom::onGetFontDescriptor(SkFontDescriptor* desc, bool* isLocal) const {
+ desc->setFamilyName(fFamilyName.c_str());
+ desc->setStyle(this->fontStyle());
+ desc->setFactoryId(SkTypeface_FreeType::FactoryId);
+ *isLocal = !this->isSysFont();
+}
+
+int SkTypeface_Custom::getIndex() const { return fIndex; }
+
+
+SkTypeface_Empty::SkTypeface_Empty() : INHERITED(SkFontStyle(), false, true, SkString(), 0) {}
+
+std::unique_ptr<SkStreamAsset> SkTypeface_Empty::onOpenStream(int*) const { return nullptr; }
+
+sk_sp<SkTypeface> SkTypeface_Empty::onMakeClone(const SkFontArguments& args) const {
+ return sk_ref_sp(this);
+}
+
+std::unique_ptr<SkFontData> SkTypeface_Empty::onMakeFontData() const { return nullptr; }
+
+SkTypeface_File::SkTypeface_File(const SkFontStyle& style, bool isFixedPitch, bool sysFont,
+ const SkString familyName, const char path[], int index)
+ : INHERITED(style, isFixedPitch, sysFont, familyName, index)
+ , fPath(path)
+{ }
+
+std::unique_ptr<SkStreamAsset> SkTypeface_File::onOpenStream(int* ttcIndex) const {
+ *ttcIndex = this->getIndex();
+ return SkStream::MakeFromFile(fPath.c_str());
+}
+
+sk_sp<SkTypeface> SkTypeface_File::onMakeClone(const SkFontArguments& args) const {
+ std::unique_ptr<SkFontData> data = this->cloneFontData(args);
+ if (!data) {
+ return nullptr;
+ }
+
+ SkString familyName;
+ this->getFamilyName(&familyName);
+
+ return sk_make_sp<SkTypeface_FreeTypeStream>(std::move(data),
+ familyName,
+ this->fontStyle(),
+ this->isFixedPitch());
+}
+
+std::unique_ptr<SkFontData> SkTypeface_File::onMakeFontData() const {
+ int index;
+ std::unique_ptr<SkStreamAsset> stream(this->onOpenStream(&index));
+ if (!stream) {
+ return nullptr;
+ }
+ return std::make_unique<SkFontData>(std::move(stream), index, 0, nullptr, 0, nullptr, 0);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkFontStyleSet_Custom::SkFontStyleSet_Custom(const SkString familyName) : fFamilyName(familyName) {}
+
+void SkFontStyleSet_Custom::appendTypeface(sk_sp<SkTypeface> typeface) {
+ fStyles.emplace_back(std::move(typeface));
+}
+
+int SkFontStyleSet_Custom::count() {
+ return fStyles.size();
+}
+
+void SkFontStyleSet_Custom::getStyle(int index, SkFontStyle* style, SkString* name) {
+ SkASSERT(index < fStyles.size());
+ if (style) {
+ *style = fStyles[index]->fontStyle();
+ }
+ if (name) {
+ name->reset();
+ }
+}
+
+SkTypeface* SkFontStyleSet_Custom::createTypeface(int index) {
+ SkASSERT(index < fStyles.size());
+ return SkRef(fStyles[index].get());
+}
+
+SkTypeface* SkFontStyleSet_Custom::matchStyle(const SkFontStyle& pattern) {
+ return this->matchStyleCSS3(pattern);
+}
+
+SkString SkFontStyleSet_Custom::getFamilyName() { return fFamilyName; }
+
+
+SkFontMgr_Custom::SkFontMgr_Custom(const SystemFontLoader& loader) : fDefaultFamily(nullptr) {
+ loader.loadSystemFonts(fScanner, &fFamilies);
+
+ // Try to pick a default font.
+ static const char* defaultNames[] = {
+ "Arial", "Verdana", "Times New Roman", "Droid Sans", "DejaVu Serif", nullptr
+ };
+ for (size_t i = 0; i < std::size(defaultNames); ++i) {
+ sk_sp<SkFontStyleSet_Custom> set(this->onMatchFamily(defaultNames[i]));
+ if (nullptr == set) {
+ continue;
+ }
+
+ sk_sp<SkTypeface> tf(set->matchStyle(SkFontStyle(SkFontStyle::kNormal_Weight,
+ SkFontStyle::kNormal_Width,
+ SkFontStyle::kUpright_Slant)));
+ if (nullptr == tf) {
+ continue;
+ }
+
+ fDefaultFamily = set.get();
+ break;
+ }
+ if (nullptr == fDefaultFamily) {
+ fDefaultFamily = fFamilies[0].get();
+ }
+}
+
+int SkFontMgr_Custom::onCountFamilies() const {
+ return fFamilies.size();
+}
+
+void SkFontMgr_Custom::onGetFamilyName(int index, SkString* familyName) const {
+ SkASSERT(index < fFamilies.size());
+ familyName->set(fFamilies[index]->getFamilyName());
+}
+
+SkFontStyleSet_Custom* SkFontMgr_Custom::onCreateStyleSet(int index) const {
+ SkASSERT(index < fFamilies.size());
+ return SkRef(fFamilies[index].get());
+}
+
+SkFontStyleSet_Custom* SkFontMgr_Custom::onMatchFamily(const char familyName[]) const {
+ for (int i = 0; i < fFamilies.size(); ++i) {
+ if (fFamilies[i]->getFamilyName().equals(familyName)) {
+ return SkRef(fFamilies[i].get());
+ }
+ }
+ return nullptr;
+}
+
+SkTypeface* SkFontMgr_Custom::onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& fontStyle) const
+{
+ sk_sp<SkFontStyleSet> sset(this->matchFamily(familyName));
+ return sset->matchStyle(fontStyle);
+}
+
+SkTypeface* SkFontMgr_Custom::onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const
+{
+ return nullptr;
+}
+
+sk_sp<SkTypeface> SkFontMgr_Custom::onMakeFromData(sk_sp<SkData> data, int ttcIndex) const {
+ return this->makeFromStream(std::make_unique<SkMemoryStream>(std::move(data)), ttcIndex);
+}
+
+sk_sp<SkTypeface> SkFontMgr_Custom::onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset> stream,
+ int ttcIndex) const {
+ return this->makeFromStream(std::move(stream), SkFontArguments().setCollectionIndex(ttcIndex));
+}
+
+sk_sp<SkTypeface> SkFontMgr_Custom::onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments& args) const {
+ return SkTypeface_FreeType::MakeFromStream(std::move(stream), args);
+}
+
+sk_sp<SkTypeface> SkFontMgr_Custom::onMakeFromFile(const char path[], int ttcIndex) const {
+ std::unique_ptr<SkStreamAsset> stream = SkStream::MakeFromFile(path);
+ return stream ? this->makeFromStream(std::move(stream), ttcIndex) : nullptr;
+}
+
+sk_sp<SkTypeface> SkFontMgr_Custom::onLegacyMakeTypeface(const char familyName[],
+ SkFontStyle style) const {
+ sk_sp<SkTypeface> tf;
+
+ if (familyName) {
+ tf.reset(this->onMatchFamilyStyle(familyName, style));
+ }
+
+ if (nullptr == tf) {
+ tf.reset(fDefaultFamily->matchStyle(style));
+ }
+
+ return tf;
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_custom.h b/gfx/skia/skia/src/ports/SkFontMgr_custom.h
new file mode 100644
index 0000000000..d867f51773
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_custom.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFontMgr_custom_DEFINED
+#define SkFontMgr_custom_DEFINED
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTArray.h"
+#include "src/ports/SkFontHost_FreeType_common.h"
+
+class SkData;
+class SkFontDescriptor;
+class SkStreamAsset;
+class SkTypeface;
+
+/** The base SkTypeface implementation for the custom font manager. */
+class SkTypeface_Custom : public SkTypeface_FreeType {
+public:
+ SkTypeface_Custom(const SkFontStyle& style, bool isFixedPitch,
+ bool sysFont, const SkString familyName, int index);
+ bool isSysFont() const;
+
+protected:
+ void onGetFamilyName(SkString* familyName) const override;
+ void onGetFontDescriptor(SkFontDescriptor* desc, bool* isLocal) const override;
+ int getIndex() const;
+
+private:
+ const bool fIsSysFont;
+ const SkString fFamilyName;
+ const int fIndex;
+
+ using INHERITED = SkTypeface_FreeType;
+};
+
+/** The empty SkTypeface implementation for the custom font manager.
+ * Used as the last resort fallback typeface.
+ */
+class SkTypeface_Empty : public SkTypeface_Custom {
+public:
+ SkTypeface_Empty() ;
+
+protected:
+ std::unique_ptr<SkStreamAsset> onOpenStream(int*) const override;
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments& args) const override;
+ std::unique_ptr<SkFontData> onMakeFontData() const override;
+
+private:
+ using INHERITED = SkTypeface_Custom;
+};
+
+/** The file SkTypeface implementation for the custom font manager. */
+class SkTypeface_File : public SkTypeface_Custom {
+public:
+ SkTypeface_File(const SkFontStyle& style, bool isFixedPitch, bool sysFont,
+ const SkString familyName, const char path[], int index);
+
+protected:
+ std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const override;
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments& args) const override;
+ std::unique_ptr<SkFontData> onMakeFontData() const override;
+
+private:
+ SkString fPath;
+
+ using INHERITED = SkTypeface_Custom;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * SkFontStyleSet_Custom
+ *
+ * This class is used by SkFontMgr_Custom to hold SkTypeface_Custom families.
+ */
+class SkFontStyleSet_Custom : public SkFontStyleSet {
+public:
+ explicit SkFontStyleSet_Custom(const SkString familyName);
+
+ /** Should only be called during the initial build phase. */
+ void appendTypeface(sk_sp<SkTypeface> typeface);
+ int count() override;
+ void getStyle(int index, SkFontStyle* style, SkString* name) override;
+ SkTypeface* createTypeface(int index) override;
+ SkTypeface* matchStyle(const SkFontStyle& pattern) override;
+ SkString getFamilyName();
+
+private:
+ SkTArray<sk_sp<SkTypeface>> fStyles;
+ SkString fFamilyName;
+
+ friend class SkFontMgr_Custom;
+};
+
+/**
+ * SkFontMgr_Custom
+ *
+ * This class is essentially a collection of SkFontStyleSet_Custom,
+ * one SkFontStyleSet_Custom for each family. This class may be modified
+ * to load fonts from any source by changing the initialization.
+ */
+class SkFontMgr_Custom : public SkFontMgr {
+public:
+ typedef SkTArray<sk_sp<SkFontStyleSet_Custom>> Families;
+ class SystemFontLoader {
+ public:
+ virtual ~SystemFontLoader() { }
+ virtual void loadSystemFonts(const SkTypeface_FreeType::Scanner&, Families*) const = 0;
+ };
+ explicit SkFontMgr_Custom(const SystemFontLoader& loader);
+
+protected:
+ int onCountFamilies() const override;
+ void onGetFamilyName(int index, SkString* familyName) const override;
+ SkFontStyleSet_Custom* onCreateStyleSet(int index) const override;
+ SkFontStyleSet_Custom* onMatchFamily(const char familyName[]) const override;
+ SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& fontStyle) const override;
+ SkTypeface* onMatchFamilyStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const override;
+ sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData> data, int ttcIndex) const override;
+ sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset>, int ttcIndex) const override;
+ sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset>, const SkFontArguments&) const override;
+ sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const override;
+ sk_sp<SkTypeface> onLegacyMakeTypeface(const char familyName[], SkFontStyle style) const override;
+
+private:
+ Families fFamilies;
+ SkFontStyleSet_Custom* fDefaultFamily;
+ SkTypeface_FreeType::Scanner fScanner;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_custom_directory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_custom_directory.cpp
new file mode 100644
index 0000000000..bda5681131
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_custom_directory.cpp
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStream.h"
+#include "include/ports/SkFontMgr_directory.h"
+#include "src/core/SkOSFile.h"
+#include "src/ports/SkFontMgr_custom.h"
+#include "src/utils/SkOSPath.h"
+
+class DirectorySystemFontLoader : public SkFontMgr_Custom::SystemFontLoader {
+public:
+ DirectorySystemFontLoader(const char* dir) : fBaseDirectory(dir) { }
+
+ void loadSystemFonts(const SkTypeface_FreeType::Scanner& scanner,
+ SkFontMgr_Custom::Families* families) const override
+ {
+ load_directory_fonts(scanner, fBaseDirectory, ".ttf", families);
+ load_directory_fonts(scanner, fBaseDirectory, ".ttc", families);
+ load_directory_fonts(scanner, fBaseDirectory, ".otf", families);
+ load_directory_fonts(scanner, fBaseDirectory, ".pfb", families);
+
+ if (families->empty()) {
+ SkFontStyleSet_Custom* family = new SkFontStyleSet_Custom(SkString());
+ families->push_back().reset(family);
+ family->appendTypeface(sk_make_sp<SkTypeface_Empty>());
+ }
+ }
+
+private:
+ static SkFontStyleSet_Custom* find_family(SkFontMgr_Custom::Families& families,
+ const char familyName[])
+ {
+ for (int i = 0; i < families.size(); ++i) {
+ if (families[i]->getFamilyName().equals(familyName)) {
+ return families[i].get();
+ }
+ }
+ return nullptr;
+ }
+
+ static void load_directory_fonts(const SkTypeface_FreeType::Scanner& scanner,
+ const SkString& directory, const char* suffix,
+ SkFontMgr_Custom::Families* families)
+ {
+ SkOSFile::Iter iter(directory.c_str(), suffix);
+ SkString name;
+
+ while (iter.next(&name, false)) {
+ SkString filename(SkOSPath::Join(directory.c_str(), name.c_str()));
+ std::unique_ptr<SkStreamAsset> stream = SkStream::MakeFromFile(filename.c_str());
+ if (!stream) {
+ // SkDebugf("---- failed to open <%s>\n", filename.c_str());
+ continue;
+ }
+
+ int numFaces;
+ if (!scanner.recognizedFont(stream.get(), &numFaces)) {
+ // SkDebugf("---- failed to open <%s> as a font\n", filename.c_str());
+ continue;
+ }
+
+ for (int faceIndex = 0; faceIndex < numFaces; ++faceIndex) {
+ bool isFixedPitch;
+ SkString realname;
+ SkFontStyle style = SkFontStyle(); // avoid uninitialized warning
+ if (!scanner.scanFont(stream.get(), faceIndex,
+ &realname, &style, &isFixedPitch, nullptr))
+ {
+ // SkDebugf("---- failed to open <%s> <%d> as a font\n",
+ // filename.c_str(), faceIndex);
+ continue;
+ }
+
+ SkFontStyleSet_Custom* addTo = find_family(*families, realname.c_str());
+ if (nullptr == addTo) {
+ addTo = new SkFontStyleSet_Custom(realname);
+ families->push_back().reset(addTo);
+ }
+ addTo->appendTypeface(sk_make_sp<SkTypeface_File>(style, isFixedPitch, true,
+ realname, filename.c_str(),
+ faceIndex));
+ }
+ }
+
+ SkOSFile::Iter dirIter(directory.c_str());
+ while (dirIter.next(&name, true)) {
+ if (name.startsWith(".")) {
+ continue;
+ }
+ SkString dirname(SkOSPath::Join(directory.c_str(), name.c_str()));
+ load_directory_fonts(scanner, dirname, suffix, families);
+ }
+ }
+
+ SkString fBaseDirectory;
+};
+
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_Custom_Directory(const char* dir) {
+ return sk_make_sp<SkFontMgr_Custom>(DirectorySystemFontLoader(dir));
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_custom_directory_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_custom_directory_factory.cpp
new file mode 100644
index 0000000000..f20b1b5bc4
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_custom_directory_factory.cpp
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMgr.h"
+#include "include/ports/SkFontMgr_directory.h"
+
+#ifndef SK_FONT_FILE_PREFIX
+# if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+# define SK_FONT_FILE_PREFIX "/System/Library/Fonts/"
+# else
+# define SK_FONT_FILE_PREFIX "/usr/share/fonts/"
+# endif
+#endif
+
+sk_sp<SkFontMgr> SkFontMgr::Factory() {
+ return SkFontMgr_New_Custom_Directory(SK_FONT_FILE_PREFIX);
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_custom_embedded.cpp b/gfx/skia/skia/src/ports/SkFontMgr_custom_embedded.cpp
new file mode 100644
index 0000000000..aae51d0cdd
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_custom_embedded.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStream.h"
+#include "include/ports/SkFontMgr_data.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/ports/SkFontMgr_custom.h"
+
+struct SkEmbeddedResource { const uint8_t* data; size_t size; };
+struct SkEmbeddedResourceHeader { const SkEmbeddedResource* entries; int count; };
+
+static void load_font_from_data(const SkTypeface_FreeType::Scanner& scanner,
+ std::unique_ptr<SkMemoryStream> stream, int index,
+ SkFontMgr_Custom::Families* families);
+
+class EmbeddedSystemFontLoader : public SkFontMgr_Custom::SystemFontLoader {
+public:
+ EmbeddedSystemFontLoader(const SkEmbeddedResourceHeader* header) : fHeader(header) { }
+
+ void loadSystemFonts(const SkTypeface_FreeType::Scanner& scanner,
+ SkFontMgr_Custom::Families* families) const override
+ {
+ for (int i = 0; i < fHeader->count; ++i) {
+ const SkEmbeddedResource& fontEntry = fHeader->entries[i];
+ auto stream = std::make_unique<SkMemoryStream>(fontEntry.data, fontEntry.size, false);
+ load_font_from_data(scanner, std::move(stream), i, families);
+ }
+
+ if (families->empty()) {
+ SkFontStyleSet_Custom* family = new SkFontStyleSet_Custom(SkString());
+ families->push_back().reset(family);
+ family->appendTypeface(sk_make_sp<SkTypeface_Empty>());
+ }
+ }
+
+ const SkEmbeddedResourceHeader* fHeader;
+};
+
+class DataFontLoader : public SkFontMgr_Custom::SystemFontLoader {
+public:
+ DataFontLoader(sk_sp<SkData>* datas, int n) : fDatas(datas), fNum(n) { }
+
+ void loadSystemFonts(const SkTypeface_FreeType::Scanner& scanner,
+ SkFontMgr_Custom::Families* families) const override
+ {
+ for (int i = 0; i < fNum; ++i) {
+ auto stream = std::make_unique<SkMemoryStream>(fDatas[i]);
+ load_font_from_data(scanner, std::move(stream), i, families);
+ }
+
+ if (families->empty()) {
+ SkFontStyleSet_Custom* family = new SkFontStyleSet_Custom(SkString());
+ families->push_back().reset(family);
+ family->appendTypeface(sk_make_sp<SkTypeface_Empty>());
+ }
+ }
+
+ const sk_sp<SkData>* fDatas;
+ const int fNum;
+};
+
+static SkFontStyleSet_Custom* find_family(SkFontMgr_Custom::Families& families,
+ const char familyName[])
+{
+ for (int i = 0; i < families.size(); ++i) {
+ if (families[i]->getFamilyName().equals(familyName)) {
+ return families[i].get();
+ }
+ }
+ return nullptr;
+}
+
+static void load_font_from_data(const SkTypeface_FreeType::Scanner& scanner,
+ std::unique_ptr<SkMemoryStream> stream, int index,
+ SkFontMgr_Custom::Families* families)
+{
+ int numFaces;
+ if (!scanner.recognizedFont(stream.get(), &numFaces)) {
+ SkDebugf("---- failed to open <%d> as a font\n", index);
+ return;
+ }
+
+ for (int faceIndex = 0; faceIndex < numFaces; ++faceIndex) {
+ bool isFixedPitch;
+ SkString realname;
+ SkFontStyle style = SkFontStyle(); // avoid uninitialized warning
+ if (!scanner.scanFont(stream.get(), faceIndex,
+ &realname, &style, &isFixedPitch, nullptr))
+ {
+ SkDebugf("---- failed to open <%d> <%d> as a font\n", index, faceIndex);
+ return;
+ }
+
+ SkFontStyleSet_Custom* addTo = find_family(*families, realname.c_str());
+ if (nullptr == addTo) {
+ addTo = new SkFontStyleSet_Custom(realname);
+ families->push_back().reset(addTo);
+ }
+ auto data = std::make_unique<SkFontData>(stream->duplicate(), faceIndex, 0,
+ nullptr, 0, nullptr, 0);
+ addTo->appendTypeface(sk_make_sp<SkTypeface_FreeTypeStream>(
+ std::move(data), realname, style, isFixedPitch));
+ }
+}
+
+sk_sp<SkFontMgr> SkFontMgr_New_Custom_Embedded(const SkEmbeddedResourceHeader* header) {
+ return sk_make_sp<SkFontMgr_Custom>(EmbeddedSystemFontLoader(header));
+}
+
+sk_sp<SkFontMgr> SkFontMgr_New_Custom_Data(SkSpan<sk_sp<SkData>> datas) {
+ SkASSERT(!datas.empty());
+ return sk_make_sp<SkFontMgr_Custom>(DataFontLoader(datas.data(), datas.size()));
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_custom_embedded_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_custom_embedded_factory.cpp
new file mode 100644
index 0000000000..82e1b842ad
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_custom_embedded_factory.cpp
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMgr.h"
+
+struct SkEmbeddedResource { const uint8_t* data; size_t size; };
+struct SkEmbeddedResourceHeader { const SkEmbeddedResource* entries; int count; };
+sk_sp<SkFontMgr> SkFontMgr_New_Custom_Embedded(const SkEmbeddedResourceHeader* header);
+
+extern "C" const SkEmbeddedResourceHeader SK_EMBEDDED_FONTS;
+sk_sp<SkFontMgr> SkFontMgr::Factory() {
+ return SkFontMgr_New_Custom_Embedded(&SK_EMBEDDED_FONTS);
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_custom_empty.cpp b/gfx/skia/skia/src/ports/SkFontMgr_custom_empty.cpp
new file mode 100644
index 0000000000..0e3e18aefd
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_custom_empty.cpp
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/ports/SkFontMgr_empty.h"
+#include "src/ports/SkFontMgr_custom.h"
+
+class EmptyFontLoader : public SkFontMgr_Custom::SystemFontLoader {
+public:
+ EmptyFontLoader() { }
+
+ void loadSystemFonts(const SkTypeface_FreeType::Scanner& scanner,
+ SkFontMgr_Custom::Families* families) const override
+ {
+ SkFontStyleSet_Custom* family = new SkFontStyleSet_Custom(SkString());
+ families->push_back().reset(family);
+ family->appendTypeface(sk_make_sp<SkTypeface_Empty>());
+ }
+
+};
+
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_Custom_Empty() {
+ return sk_make_sp<SkFontMgr_Custom>(EmptyFontLoader());
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_custom_empty_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_custom_empty_factory.cpp
new file mode 100644
index 0000000000..b97c199490
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_custom_empty_factory.cpp
@@ -0,0 +1,13 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMgr.h"
+#include "include/ports/SkFontMgr_empty.h"
+
+sk_sp<SkFontMgr> SkFontMgr::Factory() {
+ return SkFontMgr_New_Custom_Empty();
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_empty_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_empty_factory.cpp
new file mode 100644
index 0000000000..69410c5ef9
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_empty_factory.cpp
@@ -0,0 +1,13 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMgr.h"
+
+sk_sp<SkFontMgr> SkFontMgr::Factory() {
+ // Always return nullptr, an empty SkFontMgr will be used.
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_fontconfig.cpp b/gfx/skia/skia/src/ports/SkFontMgr_fontconfig.cpp
new file mode 100644
index 0000000000..89f1b0150a
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_fontconfig.cpp
@@ -0,0 +1,948 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkDataTable.h"
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypeface.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkFixed.h"
+#include "include/private/base/SkMath.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkTDArray.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/core/SkAdvancedTypefaceMetrics.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkOSFile.h"
+#include "src/core/SkTypefaceCache.h"
+#include "src/ports/SkFontHost_FreeType_common.h"
+
+#include <fontconfig/fontconfig.h>
+#include <string.h>
+
+using namespace skia_private;
+
+class SkData;
+
+// FC_POSTSCRIPT_NAME was added with b561ff20 which ended up in 2.10.92
+// Ubuntu 14.04 is on 2.11.0
+// Debian 8 and 9 are on 2.11
+// OpenSUSE Leap 42.1 is on 2.11.0 (42.3 is on 2.11.1)
+// Fedora 24 is on 2.11.94
+#ifndef FC_POSTSCRIPT_NAME
+# define FC_POSTSCRIPT_NAME "postscriptname"
+#endif
+
+/** Since FontConfig is poorly documented, this gives a high level overview:
+ *
+ * FcConfig is a handle to a FontConfig configuration instance. Each 'configuration' is independent
+ * from any others which may exist. There exists a default global configuration which is created
+ * and destroyed by FcInit and FcFini, but this default should not normally be used.
+ * Instead, one should use FcConfigCreate and FcInit* to have a named local state.
+ *
+ * FcPatterns are {objectName -> [element]} (maps from object names to a list of elements).
+ * Each element is some internal data plus an FcValue which is a variant (a union with a type tag).
+ * Lists of elements are not typed, except by convention. Any collection of FcValues must be
+ * assumed to be heterogeneous by the code, but the code need not do anything particularly
+ * interesting if the values go against convention.
+ *
+ * Somewhat like DirectWrite, FontConfig supports synthetics through FC_EMBOLDEN and FC_MATRIX.
+ * Like all synthetic information, such information must be passed with the font data.
+ */
+
+namespace {
+
+// FontConfig was thread antagonistic until 2.10.91 with known thread safety issues until 2.13.93.
+// Before that, lock with a global mutex.
+// See https://bug.skia.org/1497 and cl/339089311 for background.
+static SkMutex& f_c_mutex() {
+ static SkMutex& mutex = *(new SkMutex);
+ return mutex;
+}
+
+class FCLocker {
+ inline static constexpr int FontConfigThreadSafeVersion = 21393;
+
+ // Assume FcGetVersion() has always been thread safe.
+ static void lock() SK_NO_THREAD_SAFETY_ANALYSIS {
+ if (FcGetVersion() < FontConfigThreadSafeVersion) {
+ f_c_mutex().acquire();
+ }
+ }
+ static void unlock() SK_NO_THREAD_SAFETY_ANALYSIS {
+ AssertHeld();
+ if (FcGetVersion() < FontConfigThreadSafeVersion) {
+ f_c_mutex().release();
+ }
+ }
+
+public:
+ FCLocker() { lock(); }
+ ~FCLocker() { unlock(); }
+
+ static void AssertHeld() { SkDEBUGCODE(
+ if (FcGetVersion() < FontConfigThreadSafeVersion) {
+ f_c_mutex().assertHeld();
+ }
+ ) }
+};
+
+} // namespace
+
+template<typename T, void (*D)(T*)> void FcTDestroy(T* t) {
+ FCLocker::AssertHeld();
+ D(t);
+}
+template <typename T, T* (*C)(), void (*D)(T*)> class SkAutoFc
+ : public SkAutoTCallVProc<T, FcTDestroy<T, D>> {
+ using inherited = SkAutoTCallVProc<T, FcTDestroy<T, D>>;
+public:
+ SkAutoFc() : SkAutoTCallVProc<T, FcTDestroy<T, D>>( C() ) {
+ T* obj = this->operator T*();
+ SkASSERT_RELEASE(nullptr != obj);
+ }
+ explicit SkAutoFc(T* obj) : inherited(obj) {}
+ SkAutoFc(const SkAutoFc&) = delete;
+ SkAutoFc(SkAutoFc&& that) : inherited(std::move(that)) {}
+};
+
+typedef SkAutoFc<FcCharSet, FcCharSetCreate, FcCharSetDestroy> SkAutoFcCharSet;
+typedef SkAutoFc<FcConfig, FcConfigCreate, FcConfigDestroy> SkAutoFcConfig;
+typedef SkAutoFc<FcFontSet, FcFontSetCreate, FcFontSetDestroy> SkAutoFcFontSet;
+typedef SkAutoFc<FcLangSet, FcLangSetCreate, FcLangSetDestroy> SkAutoFcLangSet;
+typedef SkAutoFc<FcObjectSet, FcObjectSetCreate, FcObjectSetDestroy> SkAutoFcObjectSet;
+typedef SkAutoFc<FcPattern, FcPatternCreate, FcPatternDestroy> SkAutoFcPattern;
+
+static bool get_bool(FcPattern* pattern, const char object[], bool missing = false) {
+ FcBool value;
+ if (FcPatternGetBool(pattern, object, 0, &value) != FcResultMatch) {
+ return missing;
+ }
+ return value;
+}
+
+static int get_int(FcPattern* pattern, const char object[], int missing) {
+ int value;
+ if (FcPatternGetInteger(pattern, object, 0, &value) != FcResultMatch) {
+ return missing;
+ }
+ return value;
+}
+
+static const char* get_string(FcPattern* pattern, const char object[], const char* missing = "") {
+ FcChar8* value;
+ if (FcPatternGetString(pattern, object, 0, &value) != FcResultMatch) {
+ return missing;
+ }
+ return (const char*)value;
+}
+
+static const FcMatrix* get_matrix(FcPattern* pattern, const char object[]) {
+ FcMatrix* matrix;
+ if (FcPatternGetMatrix(pattern, object, 0, &matrix) != FcResultMatch) {
+ return nullptr;
+ }
+ return matrix;
+}
+
+enum SkWeakReturn {
+ kIsWeak_WeakReturn,
+ kIsStrong_WeakReturn,
+ kNoId_WeakReturn
+};
+/** Ideally there would exist a call like
+ * FcResult FcPatternIsWeak(pattern, object, id, FcBool* isWeak);
+ * Sometime after 2.12.4 FcPatternGetWithBinding was added which can retrieve the binding.
+ *
+ * However, there is no such call and as of Fc 2.11.0 even FcPatternEquals ignores the weak bit.
+ * Currently, the only reliable way of finding the weak bit is by its effect on matching.
+ * The weak bit only affects the matching of FC_FAMILY and FC_POSTSCRIPT_NAME object values.
+ * A element with the weak bit is scored after FC_LANG, without the weak bit is scored before.
+ * Note that the weak bit is stored on the element, not on the value it holds.
+ */
+static SkWeakReturn is_weak(FcPattern* pattern, const char object[], int id) {
+ FCLocker::AssertHeld();
+
+ FcResult result;
+
+ // Create a copy of the pattern with only the value 'pattern'['object'['id']] in it.
+ // Internally, FontConfig pattern objects are linked lists, so faster to remove from head.
+ SkAutoFcObjectSet requestedObjectOnly(FcObjectSetBuild(object, nullptr));
+ SkAutoFcPattern minimal(FcPatternFilter(pattern, requestedObjectOnly));
+ FcBool hasId = true;
+ for (int i = 0; hasId && i < id; ++i) {
+ hasId = FcPatternRemove(minimal, object, 0);
+ }
+ if (!hasId) {
+ return kNoId_WeakReturn;
+ }
+ FcValue value;
+ result = FcPatternGet(minimal, object, 0, &value);
+ if (result != FcResultMatch) {
+ return kNoId_WeakReturn;
+ }
+ while (hasId) {
+ hasId = FcPatternRemove(minimal, object, 1);
+ }
+
+ // Create a font set with two patterns.
+ // 1. the same 'object' as minimal and a lang object with only 'nomatchlang'.
+ // 2. a different 'object' from minimal and a lang object with only 'matchlang'.
+ SkAutoFcFontSet fontSet;
+
+ SkAutoFcLangSet strongLangSet;
+ FcLangSetAdd(strongLangSet, (const FcChar8*)"nomatchlang");
+ SkAutoFcPattern strong(FcPatternDuplicate(minimal));
+ FcPatternAddLangSet(strong, FC_LANG, strongLangSet);
+
+ SkAutoFcLangSet weakLangSet;
+ FcLangSetAdd(weakLangSet, (const FcChar8*)"matchlang");
+ SkAutoFcPattern weak;
+ FcPatternAddString(weak, object, (const FcChar8*)"nomatchstring");
+ FcPatternAddLangSet(weak, FC_LANG, weakLangSet);
+
+ FcFontSetAdd(fontSet, strong.release());
+ FcFontSetAdd(fontSet, weak.release());
+
+ // Add 'matchlang' to the copy of the pattern.
+ FcPatternAddLangSet(minimal, FC_LANG, weakLangSet);
+
+ // Run a match against the copy of the pattern.
+ // If the 'id' was weak, then we should match the pattern with 'matchlang'.
+ // If the 'id' was strong, then we should match the pattern with 'nomatchlang'.
+
+ // Note that this config is only used for FcFontRenderPrepare, which we don't even want.
+ // However, there appears to be no way to match/sort without it.
+ SkAutoFcConfig config;
+ FcFontSet* fontSets[1] = { fontSet };
+ SkAutoFcPattern match(FcFontSetMatch(config, fontSets, std::size(fontSets),
+ minimal, &result));
+
+ FcLangSet* matchLangSet;
+ FcPatternGetLangSet(match, FC_LANG, 0, &matchLangSet);
+ return FcLangEqual == FcLangSetHasLang(matchLangSet, (const FcChar8*)"matchlang")
+ ? kIsWeak_WeakReturn : kIsStrong_WeakReturn;
+}
+
+/** Removes weak elements from either FC_FAMILY or FC_POSTSCRIPT_NAME objects in the property.
+ * This can be quite expensive, and should not be used more than once per font lookup.
+ * This removes all of the weak elements after the last strong element.
+ */
+static void remove_weak(FcPattern* pattern, const char object[]) {
+ FCLocker::AssertHeld();
+
+ SkAutoFcObjectSet requestedObjectOnly(FcObjectSetBuild(object, nullptr));
+ SkAutoFcPattern minimal(FcPatternFilter(pattern, requestedObjectOnly));
+
+ int lastStrongId = -1;
+ int numIds;
+ SkWeakReturn result;
+ for (int id = 0; ; ++id) {
+ result = is_weak(minimal, object, 0);
+ if (kNoId_WeakReturn == result) {
+ numIds = id;
+ break;
+ }
+ if (kIsStrong_WeakReturn == result) {
+ lastStrongId = id;
+ }
+ SkAssertResult(FcPatternRemove(minimal, object, 0));
+ }
+
+ // If they were all weak, then leave the pattern alone.
+ if (lastStrongId < 0) {
+ return;
+ }
+
+ // Remove everything after the last strong.
+ for (int id = lastStrongId + 1; id < numIds; ++id) {
+ SkAssertResult(FcPatternRemove(pattern, object, lastStrongId + 1));
+ }
+}
+
+static int map_range(SkScalar value,
+ SkScalar old_min, SkScalar old_max,
+ SkScalar new_min, SkScalar new_max)
+{
+ SkASSERT(old_min < old_max);
+ SkASSERT(new_min <= new_max);
+ return new_min + ((value - old_min) * (new_max - new_min) / (old_max - old_min));
+}
+
+struct MapRanges {
+ SkScalar old_val;
+ SkScalar new_val;
+};
+
+static SkScalar map_ranges(SkScalar val, MapRanges const ranges[], int rangesCount) {
+ // -Inf to [0]
+ if (val < ranges[0].old_val) {
+ return ranges[0].new_val;
+ }
+
+ // Linear from [i] to [i+1]
+ for (int i = 0; i < rangesCount - 1; ++i) {
+ if (val < ranges[i+1].old_val) {
+ return map_range(val, ranges[i].old_val, ranges[i+1].old_val,
+ ranges[i].new_val, ranges[i+1].new_val);
+ }
+ }
+
+ // From [n] to +Inf
+ // if (fcweight < Inf)
+ return ranges[rangesCount-1].new_val;
+}
+
+#ifndef FC_WEIGHT_DEMILIGHT
+#define FC_WEIGHT_DEMILIGHT 65
+#endif
+
+static SkFontStyle skfontstyle_from_fcpattern(FcPattern* pattern) {
+ typedef SkFontStyle SkFS;
+
+ // FcWeightToOpenType was buggy until 2.12.4
+ static constexpr MapRanges weightRanges[] = {
+ { FC_WEIGHT_THIN, SkFS::kThin_Weight },
+ { FC_WEIGHT_EXTRALIGHT, SkFS::kExtraLight_Weight },
+ { FC_WEIGHT_LIGHT, SkFS::kLight_Weight },
+ { FC_WEIGHT_DEMILIGHT, 350 },
+ { FC_WEIGHT_BOOK, 380 },
+ { FC_WEIGHT_REGULAR, SkFS::kNormal_Weight },
+ { FC_WEIGHT_MEDIUM, SkFS::kMedium_Weight },
+ { FC_WEIGHT_DEMIBOLD, SkFS::kSemiBold_Weight },
+ { FC_WEIGHT_BOLD, SkFS::kBold_Weight },
+ { FC_WEIGHT_EXTRABOLD, SkFS::kExtraBold_Weight },
+ { FC_WEIGHT_BLACK, SkFS::kBlack_Weight },
+ { FC_WEIGHT_EXTRABLACK, SkFS::kExtraBlack_Weight },
+ };
+ SkScalar weight = map_ranges(get_int(pattern, FC_WEIGHT, FC_WEIGHT_REGULAR),
+ weightRanges, std::size(weightRanges));
+
+ static constexpr MapRanges widthRanges[] = {
+ { FC_WIDTH_ULTRACONDENSED, SkFS::kUltraCondensed_Width },
+ { FC_WIDTH_EXTRACONDENSED, SkFS::kExtraCondensed_Width },
+ { FC_WIDTH_CONDENSED, SkFS::kCondensed_Width },
+ { FC_WIDTH_SEMICONDENSED, SkFS::kSemiCondensed_Width },
+ { FC_WIDTH_NORMAL, SkFS::kNormal_Width },
+ { FC_WIDTH_SEMIEXPANDED, SkFS::kSemiExpanded_Width },
+ { FC_WIDTH_EXPANDED, SkFS::kExpanded_Width },
+ { FC_WIDTH_EXTRAEXPANDED, SkFS::kExtraExpanded_Width },
+ { FC_WIDTH_ULTRAEXPANDED, SkFS::kUltraExpanded_Width },
+ };
+ SkScalar width = map_ranges(get_int(pattern, FC_WIDTH, FC_WIDTH_NORMAL),
+ widthRanges, std::size(widthRanges));
+
+ SkFS::Slant slant = SkFS::kUpright_Slant;
+ switch (get_int(pattern, FC_SLANT, FC_SLANT_ROMAN)) {
+ case FC_SLANT_ROMAN: slant = SkFS::kUpright_Slant; break;
+ case FC_SLANT_ITALIC : slant = SkFS::kItalic_Slant ; break;
+ case FC_SLANT_OBLIQUE: slant = SkFS::kOblique_Slant; break;
+ default: SkASSERT(false); break;
+ }
+
+ return SkFontStyle(SkScalarRoundToInt(weight), SkScalarRoundToInt(width), slant);
+}
+
+static void fcpattern_from_skfontstyle(SkFontStyle style, FcPattern* pattern) {
+ FCLocker::AssertHeld();
+
+ typedef SkFontStyle SkFS;
+
+ // FcWeightFromOpenType was buggy until 2.12.4
+ static constexpr MapRanges weightRanges[] = {
+ { SkFS::kThin_Weight, FC_WEIGHT_THIN },
+ { SkFS::kExtraLight_Weight, FC_WEIGHT_EXTRALIGHT },
+ { SkFS::kLight_Weight, FC_WEIGHT_LIGHT },
+ { 350, FC_WEIGHT_DEMILIGHT },
+ { 380, FC_WEIGHT_BOOK },
+ { SkFS::kNormal_Weight, FC_WEIGHT_REGULAR },
+ { SkFS::kMedium_Weight, FC_WEIGHT_MEDIUM },
+ { SkFS::kSemiBold_Weight, FC_WEIGHT_DEMIBOLD },
+ { SkFS::kBold_Weight, FC_WEIGHT_BOLD },
+ { SkFS::kExtraBold_Weight, FC_WEIGHT_EXTRABOLD },
+ { SkFS::kBlack_Weight, FC_WEIGHT_BLACK },
+ { SkFS::kExtraBlack_Weight, FC_WEIGHT_EXTRABLACK },
+ };
+ int weight = map_ranges(style.weight(), weightRanges, std::size(weightRanges));
+
+ static constexpr MapRanges widthRanges[] = {
+ { SkFS::kUltraCondensed_Width, FC_WIDTH_ULTRACONDENSED },
+ { SkFS::kExtraCondensed_Width, FC_WIDTH_EXTRACONDENSED },
+ { SkFS::kCondensed_Width, FC_WIDTH_CONDENSED },
+ { SkFS::kSemiCondensed_Width, FC_WIDTH_SEMICONDENSED },
+ { SkFS::kNormal_Width, FC_WIDTH_NORMAL },
+ { SkFS::kSemiExpanded_Width, FC_WIDTH_SEMIEXPANDED },
+ { SkFS::kExpanded_Width, FC_WIDTH_EXPANDED },
+ { SkFS::kExtraExpanded_Width, FC_WIDTH_EXTRAEXPANDED },
+ { SkFS::kUltraExpanded_Width, FC_WIDTH_ULTRAEXPANDED },
+ };
+ int width = map_ranges(style.width(), widthRanges, std::size(widthRanges));
+
+ int slant = FC_SLANT_ROMAN;
+ switch (style.slant()) {
+ case SkFS::kUpright_Slant: slant = FC_SLANT_ROMAN ; break;
+ case SkFS::kItalic_Slant : slant = FC_SLANT_ITALIC ; break;
+ case SkFS::kOblique_Slant: slant = FC_SLANT_OBLIQUE; break;
+ default: SkASSERT(false); break;
+ }
+
+ FcPatternAddInteger(pattern, FC_WEIGHT, weight);
+ FcPatternAddInteger(pattern, FC_WIDTH , width);
+ FcPatternAddInteger(pattern, FC_SLANT , slant);
+}
+
+class SkTypeface_fontconfig : public SkTypeface_FreeType {
+public:
+ static sk_sp<SkTypeface_fontconfig> Make(SkAutoFcPattern pattern, SkString sysroot) {
+ return sk_sp<SkTypeface_fontconfig>(new SkTypeface_fontconfig(std::move(pattern),
+ std::move(sysroot)));
+ }
+ mutable SkAutoFcPattern fPattern; // Mutable for passing to FontConfig API.
+ const SkString fSysroot;
+
+ void onGetFamilyName(SkString* familyName) const override {
+ *familyName = get_string(fPattern, FC_FAMILY);
+ }
+
+ void onGetFontDescriptor(SkFontDescriptor* desc, bool* serialize) const override {
+ // TODO: need to serialize FC_MATRIX and FC_EMBOLDEN
+ FCLocker lock;
+ desc->setFamilyName(get_string(fPattern, FC_FAMILY));
+ desc->setFullName(get_string(fPattern, FC_FULLNAME));
+ desc->setPostscriptName(get_string(fPattern, FC_POSTSCRIPT_NAME));
+ desc->setStyle(this->fontStyle());
+ desc->setFactoryId(SkTypeface_FreeType::FactoryId);
+ *serialize = false;
+ }
+
+ std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const override {
+ FCLocker lock;
+ *ttcIndex = get_int(fPattern, FC_INDEX, 0);
+ const char* filename = get_string(fPattern, FC_FILE);
+ // See FontAccessible for note on searching sysroot then non-sysroot path.
+ SkString resolvedFilename;
+ if (!fSysroot.isEmpty()) {
+ resolvedFilename = fSysroot;
+ resolvedFilename += filename;
+ if (sk_exists(resolvedFilename.c_str(), kRead_SkFILE_Flag)) {
+ filename = resolvedFilename.c_str();
+ }
+ }
+ return SkStream::MakeFromFile(filename);
+ }
+
+ void onFilterRec(SkScalerContextRec* rec) const override {
+ // FontConfig provides 10-scale-bitmap-fonts.conf which applies an inverse "pixelsize"
+ // matrix. It is not known if this .conf is active or not, so it is not clear if
+ // "pixelsize" should be applied before this matrix. Since using a matrix with a bitmap
+ // font isn't a great idea, only apply the matrix to outline fonts.
+ const FcMatrix* fcMatrix = get_matrix(fPattern, FC_MATRIX);
+ bool fcOutline = get_bool(fPattern, FC_OUTLINE, true);
+ if (fcOutline && fcMatrix) {
+ // fPost2x2 is column-major, left handed (y down).
+ // FcMatrix is column-major, right handed (y up).
+ SkMatrix fm;
+ fm.setAll(fcMatrix->xx,-fcMatrix->xy, 0,
+ -fcMatrix->yx, fcMatrix->yy, 0,
+ 0 , 0 , 1);
+
+ SkMatrix sm;
+ rec->getMatrixFrom2x2(&sm);
+
+ sm.preConcat(fm);
+ rec->fPost2x2[0][0] = sm.getScaleX();
+ rec->fPost2x2[0][1] = sm.getSkewX();
+ rec->fPost2x2[1][0] = sm.getSkewY();
+ rec->fPost2x2[1][1] = sm.getScaleY();
+ }
+ if (get_bool(fPattern, FC_EMBOLDEN)) {
+ rec->fFlags |= SkScalerContext::kEmbolden_Flag;
+ }
+ this->INHERITED::onFilterRec(rec);
+ }
+
+ std::unique_ptr<SkAdvancedTypefaceMetrics> onGetAdvancedMetrics() const override {
+ std::unique_ptr<SkAdvancedTypefaceMetrics> info =
+ this->INHERITED::onGetAdvancedMetrics();
+
+ // Simulated fonts shouldn't be considered to be of the type of their data.
+ if (get_matrix(fPattern, FC_MATRIX) || get_bool(fPattern, FC_EMBOLDEN)) {
+ info->fType = SkAdvancedTypefaceMetrics::kOther_Font;
+ }
+ return info;
+ }
+
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments& args) const override {
+ std::unique_ptr<SkFontData> data = this->cloneFontData(args);
+ if (!data) {
+ return nullptr;
+ }
+
+ // TODO: need to clone FC_MATRIX and FC_EMBOLDEN
+ SkString familyName;
+ this->getFamilyName(&familyName);
+ return sk_make_sp<SkTypeface_FreeTypeStream>(
+ std::move(data), familyName, this->fontStyle(), this->isFixedPitch());
+ }
+
+ std::unique_ptr<SkFontData> onMakeFontData() const override {
+ int index;
+ std::unique_ptr<SkStreamAsset> stream(this->onOpenStream(&index));
+ if (!stream) {
+ return nullptr;
+ }
+ // TODO: FC_VARIABLE and FC_FONT_VARIATIONS
+ return std::make_unique<SkFontData>(std::move(stream), index, 0, nullptr, 0, nullptr, 0);
+ }
+
+ ~SkTypeface_fontconfig() override {
+ // Hold the lock while unrefing the pattern.
+ FCLocker lock;
+ fPattern.reset();
+ }
+
+private:
+ SkTypeface_fontconfig(SkAutoFcPattern pattern, SkString sysroot)
+ : INHERITED(skfontstyle_from_fcpattern(pattern),
+ FC_PROPORTIONAL != get_int(pattern, FC_SPACING, FC_PROPORTIONAL))
+ , fPattern(std::move(pattern))
+ , fSysroot(std::move(sysroot))
+ { }
+
+ using INHERITED = SkTypeface_FreeType;
+};
+
+class SkFontMgr_fontconfig : public SkFontMgr {
+ mutable SkAutoFcConfig fFC; // Only mutable to avoid const cast when passed to FontConfig API.
+ const SkString fSysroot;
+ const sk_sp<SkDataTable> fFamilyNames;
+ const SkTypeface_FreeType::Scanner fScanner;
+
+ class StyleSet : public SkFontStyleSet {
+ public:
+ StyleSet(sk_sp<SkFontMgr_fontconfig> parent, SkAutoFcFontSet fontSet)
+ : fFontMgr(std::move(parent)), fFontSet(std::move(fontSet))
+ { }
+
+ ~StyleSet() override {
+ // Hold the lock while unrefing the font set.
+ FCLocker lock;
+ fFontSet.reset();
+ }
+
+ int count() override { return fFontSet->nfont; }
+
+ void getStyle(int index, SkFontStyle* style, SkString* styleName) override {
+ if (index < 0 || fFontSet->nfont <= index) {
+ return;
+ }
+
+ FCLocker lock;
+ if (style) {
+ *style = skfontstyle_from_fcpattern(fFontSet->fonts[index]);
+ }
+ if (styleName) {
+ *styleName = get_string(fFontSet->fonts[index], FC_STYLE);
+ }
+ }
+
+ SkTypeface* createTypeface(int index) override {
+ if (index < 0 || fFontSet->nfont <= index) {
+ return nullptr;
+ }
+ SkAutoFcPattern match([this, &index]() {
+ FCLocker lock;
+ FcPatternReference(fFontSet->fonts[index]);
+ return fFontSet->fonts[index];
+ }());
+ return fFontMgr->createTypefaceFromFcPattern(std::move(match)).release();
+ }
+
+ SkTypeface* matchStyle(const SkFontStyle& style) override {
+ SkAutoFcPattern match([this, &style]() {
+ FCLocker lock;
+
+ SkAutoFcPattern pattern;
+ fcpattern_from_skfontstyle(style, pattern);
+ FcConfigSubstitute(fFontMgr->fFC, pattern, FcMatchPattern);
+ FcDefaultSubstitute(pattern);
+
+ FcResult result;
+ FcFontSet* fontSets[1] = { fFontSet };
+ return FcFontSetMatch(fFontMgr->fFC,
+ fontSets, std::size(fontSets),
+ pattern, &result);
+
+ }());
+ return fFontMgr->createTypefaceFromFcPattern(std::move(match)).release();
+ }
+
+ private:
+ sk_sp<SkFontMgr_fontconfig> fFontMgr;
+ SkAutoFcFontSet fFontSet;
+ };
+
+ static bool FindName(const SkTDArray<const char*>& list, const char* str) {
+ int count = list.size();
+ for (int i = 0; i < count; ++i) {
+ if (!strcmp(list[i], str)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ static sk_sp<SkDataTable> GetFamilyNames(FcConfig* fcconfig) {
+ FCLocker lock;
+
+ SkTDArray<const char*> names;
+ SkTDArray<size_t> sizes;
+
+ static const FcSetName fcNameSet[] = { FcSetSystem, FcSetApplication };
+ for (int setIndex = 0; setIndex < (int)std::size(fcNameSet); ++setIndex) {
+ // Return value of FcConfigGetFonts must not be destroyed.
+ FcFontSet* allFonts(FcConfigGetFonts(fcconfig, fcNameSet[setIndex]));
+ if (nullptr == allFonts) {
+ continue;
+ }
+
+ for (int fontIndex = 0; fontIndex < allFonts->nfont; ++fontIndex) {
+ FcPattern* current = allFonts->fonts[fontIndex];
+ for (int id = 0; ; ++id) {
+ FcChar8* fcFamilyName;
+ FcResult result = FcPatternGetString(current, FC_FAMILY, id, &fcFamilyName);
+ if (FcResultNoId == result) {
+ break;
+ }
+ if (FcResultMatch != result) {
+ continue;
+ }
+ const char* familyName = reinterpret_cast<const char*>(fcFamilyName);
+ if (familyName && !FindName(names, familyName)) {
+ *names.append() = familyName;
+ *sizes.append() = strlen(familyName) + 1;
+ }
+ }
+ }
+ }
+
+ return SkDataTable::MakeCopyArrays((void const *const *)names.begin(),
+ sizes.begin(), names.size());
+ }
+
+ static bool FindByFcPattern(SkTypeface* cached, void* ctx) {
+ SkTypeface_fontconfig* cshFace = static_cast<SkTypeface_fontconfig*>(cached);
+ FcPattern* ctxPattern = static_cast<FcPattern*>(ctx);
+ return FcTrue == FcPatternEqual(cshFace->fPattern, ctxPattern);
+ }
+
+ mutable SkMutex fTFCacheMutex;
+ mutable SkTypefaceCache fTFCache;
+ /** Creates a typeface using a typeface cache.
+ * @param pattern a complete pattern from FcFontRenderPrepare.
+ */
+ sk_sp<SkTypeface> createTypefaceFromFcPattern(SkAutoFcPattern pattern) const {
+ if (!pattern) {
+ return nullptr;
+ }
+ // Cannot hold FCLocker when calling fTFCache.add; an evicted typeface may need to lock.
+ // Must hold fTFCacheMutex when interacting with fTFCache.
+ SkAutoMutexExclusive ama(fTFCacheMutex);
+ sk_sp<SkTypeface> face = [&]() {
+ FCLocker lock;
+ sk_sp<SkTypeface> face = fTFCache.findByProcAndRef(FindByFcPattern, pattern);
+ if (face) {
+ pattern.reset();
+ }
+ return face;
+ }();
+ if (!face) {
+ face = SkTypeface_fontconfig::Make(std::move(pattern), fSysroot);
+ if (face) {
+ // Cannot hold FCLocker in fTFCache.add; evicted typefaces may need to lock.
+ fTFCache.add(face);
+ }
+ }
+ return face;
+ }
+
+public:
+ /** Takes control of the reference to 'config'. */
+ explicit SkFontMgr_fontconfig(FcConfig* config)
+ : fFC(config ? config : FcInitLoadConfigAndFonts())
+ , fSysroot(reinterpret_cast<const char*>(FcConfigGetSysRoot(fFC)))
+ , fFamilyNames(GetFamilyNames(fFC)) { }
+
+ ~SkFontMgr_fontconfig() override {
+ // Hold the lock while unrefing the config.
+ FCLocker lock;
+ fFC.reset();
+ }
+
+protected:
+ int onCountFamilies() const override {
+ return fFamilyNames->count();
+ }
+
+ void onGetFamilyName(int index, SkString* familyName) const override {
+ familyName->set(fFamilyNames->atStr(index));
+ }
+
+ SkFontStyleSet* onCreateStyleSet(int index) const override {
+ return this->onMatchFamily(fFamilyNames->atStr(index));
+ }
+
+ /** True if any string object value in the font is the same
+ * as a string object value in the pattern.
+ */
+ static bool AnyMatching(FcPattern* font, FcPattern* pattern, const char* object) {
+ FcChar8* fontString;
+ FcChar8* patternString;
+ FcResult result;
+ // Set an arbitrary limit on the number of pattern object values to consider.
+ // TODO: re-write this to avoid N*M
+ static const int maxId = 16;
+ for (int patternId = 0; patternId < maxId; ++patternId) {
+ result = FcPatternGetString(pattern, object, patternId, &patternString);
+ if (FcResultNoId == result) {
+ break;
+ }
+ if (FcResultMatch != result) {
+ continue;
+ }
+ for (int fontId = 0; fontId < maxId; ++fontId) {
+ result = FcPatternGetString(font, object, fontId, &fontString);
+ if (FcResultNoId == result) {
+ break;
+ }
+ if (FcResultMatch != result) {
+ continue;
+ }
+ if (0 == FcStrCmpIgnoreCase(patternString, fontString)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ bool FontAccessible(FcPattern* font) const {
+ // FontConfig can return fonts which are unreadable.
+ const char* filename = get_string(font, FC_FILE, nullptr);
+ if (nullptr == filename) {
+ return false;
+ }
+
+ // When sysroot was implemented in e96d7760886a3781a46b3271c76af99e15cb0146 (before 2.11.0)
+ // it was broken; mostly fixed in d17f556153fbaf8fe57fdb4fc1f0efa4313f0ecf (after 2.11.1).
+ // This leaves Debian 8 and 9 with broken support for this feature.
+ // As a result, this feature should not be used until at least 2.11.91.
+ // The broken support is mostly around not making all paths relative to the sysroot.
+ // However, even at 2.13.1 it is possible to get a mix of sysroot and non-sysroot paths,
+ // as any added file path not lexically starting with the sysroot will be unchanged.
+ // To allow users to add local app files outside the sysroot,
+ // prefer the sysroot but also look without the sysroot.
+ if (!fSysroot.isEmpty()) {
+ SkString resolvedFilename;
+ resolvedFilename = fSysroot;
+ resolvedFilename += filename;
+ if (sk_exists(resolvedFilename.c_str(), kRead_SkFILE_Flag)) {
+ return true;
+ }
+ }
+ return sk_exists(filename, kRead_SkFILE_Flag);
+ }
+
+ static bool FontFamilyNameMatches(FcPattern* font, FcPattern* pattern) {
+ return AnyMatching(font, pattern, FC_FAMILY);
+ }
+
+ static bool FontContainsCharacter(FcPattern* font, uint32_t character) {
+ FcResult result;
+ FcCharSet* matchCharSet;
+ for (int charSetId = 0; ; ++charSetId) {
+ result = FcPatternGetCharSet(font, FC_CHARSET, charSetId, &matchCharSet);
+ if (FcResultNoId == result) {
+ break;
+ }
+ if (FcResultMatch != result) {
+ continue;
+ }
+ if (FcCharSetHasChar(matchCharSet, character)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override {
+ if (!familyName) {
+ return nullptr;
+ }
+ FCLocker lock;
+
+ SkAutoFcPattern pattern;
+ FcPatternAddString(pattern, FC_FAMILY, (FcChar8*)familyName);
+ FcConfigSubstitute(fFC, pattern, FcMatchPattern);
+ FcDefaultSubstitute(pattern);
+
+ FcPattern* matchPattern;
+ SkAutoFcPattern strongPattern(nullptr);
+ if (familyName) {
+ strongPattern.reset(FcPatternDuplicate(pattern));
+ remove_weak(strongPattern, FC_FAMILY);
+ matchPattern = strongPattern;
+ } else {
+ matchPattern = pattern;
+ }
+
+ SkAutoFcFontSet matches;
+ // TODO: Some families have 'duplicates' due to symbolic links.
+ // The patterns are exactly the same except for the FC_FILE.
+ // It should be possible to collapse these patterns by normalizing.
+ static const FcSetName fcNameSet[] = { FcSetSystem, FcSetApplication };
+ for (int setIndex = 0; setIndex < (int)std::size(fcNameSet); ++setIndex) {
+ // Return value of FcConfigGetFonts must not be destroyed.
+ FcFontSet* allFonts(FcConfigGetFonts(fFC, fcNameSet[setIndex]));
+ if (nullptr == allFonts) {
+ continue;
+ }
+
+ for (int fontIndex = 0; fontIndex < allFonts->nfont; ++fontIndex) {
+ FcPattern* font = allFonts->fonts[fontIndex];
+ if (FontAccessible(font) && FontFamilyNameMatches(font, matchPattern)) {
+ FcFontSetAdd(matches, FcFontRenderPrepare(fFC, pattern, font));
+ }
+ }
+ }
+
+ return new StyleSet(sk_ref_sp(this), std::move(matches));
+ }
+
+ SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& style) const override
+ {
+ SkAutoFcPattern font([this, &familyName, &style]() {
+ FCLocker lock;
+
+ SkAutoFcPattern pattern;
+ FcPatternAddString(pattern, FC_FAMILY, (FcChar8*)familyName);
+ fcpattern_from_skfontstyle(style, pattern);
+ FcConfigSubstitute(fFC, pattern, FcMatchPattern);
+ FcDefaultSubstitute(pattern);
+
+ // We really want to match strong (preferred) and same (acceptable) only here.
+ // If a family name was specified, assume that any weak matches after the last strong
+ // match are weak (default) and ignore them.
+ // After substitution the pattern for 'sans-serif' looks like "wwwwwwwwwwwwwwswww" where
+ // there are many weak but preferred names, followed by defaults.
+ // So it is possible to have weakly matching but preferred names.
+ // In aliases, bindings are weak by default, so this is easy and common.
+ // If no family name was specified, we'll probably only get weak matches, but that's ok.
+ FcPattern* matchPattern;
+ SkAutoFcPattern strongPattern(nullptr);
+ if (familyName) {
+ strongPattern.reset(FcPatternDuplicate(pattern));
+ remove_weak(strongPattern, FC_FAMILY);
+ matchPattern = strongPattern;
+ } else {
+ matchPattern = pattern;
+ }
+
+ FcResult result;
+ SkAutoFcPattern font(FcFontMatch(fFC, pattern, &result));
+ if (!font || !FontAccessible(font) || !FontFamilyNameMatches(font, matchPattern)) {
+ font.reset();
+ }
+ return font;
+ }());
+ return createTypefaceFromFcPattern(std::move(font)).release();
+ }
+
+ SkTypeface* onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle& style,
+ const char* bcp47[],
+ int bcp47Count,
+ SkUnichar character) const override
+ {
+ SkAutoFcPattern font([&](){
+ FCLocker lock;
+
+ SkAutoFcPattern pattern;
+ if (familyName) {
+ FcValue familyNameValue;
+ familyNameValue.type = FcTypeString;
+ familyNameValue.u.s = reinterpret_cast<const FcChar8*>(familyName);
+ FcPatternAddWeak(pattern, FC_FAMILY, familyNameValue, FcFalse);
+ }
+ fcpattern_from_skfontstyle(style, pattern);
+
+ SkAutoFcCharSet charSet;
+ FcCharSetAddChar(charSet, character);
+ FcPatternAddCharSet(pattern, FC_CHARSET, charSet);
+
+ if (bcp47Count > 0) {
+ SkASSERT(bcp47);
+ SkAutoFcLangSet langSet;
+ for (int i = bcp47Count; i --> 0;) {
+ FcLangSetAdd(langSet, (const FcChar8*)bcp47[i]);
+ }
+ FcPatternAddLangSet(pattern, FC_LANG, langSet);
+ }
+
+ FcConfigSubstitute(fFC, pattern, FcMatchPattern);
+ FcDefaultSubstitute(pattern);
+
+ FcResult result;
+ SkAutoFcPattern font(FcFontMatch(fFC, pattern, &result));
+ if (!font || !FontAccessible(font) || !FontContainsCharacter(font, character)) {
+ font.reset();
+ }
+ return font;
+ }());
+ return createTypefaceFromFcPattern(std::move(font)).release();
+ }
+
+ sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset> stream,
+ int ttcIndex) const override {
+ return this->makeFromStream(std::move(stream),
+ SkFontArguments().setCollectionIndex(ttcIndex));
+ }
+
+ sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments& args) const override {
+ const size_t length = stream->getLength();
+ if (length <= 0 || (1u << 30) < length) {
+ return nullptr;
+ }
+ return SkTypeface_FreeType::MakeFromStream(std::move(stream), args);
+ }
+
+ sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData> data, int ttcIndex) const override {
+ return this->makeFromStream(std::make_unique<SkMemoryStream>(std::move(data)), ttcIndex);
+ }
+
+ sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const override {
+ return this->makeFromStream(SkStream::MakeFromFile(path), ttcIndex);
+ }
+
+ sk_sp<SkTypeface> onLegacyMakeTypeface(const char familyName[], SkFontStyle style) const override {
+ sk_sp<SkTypeface> typeface(this->matchFamilyStyle(familyName, style));
+ if (typeface) {
+ return typeface;
+ }
+
+ return sk_sp<SkTypeface>(this->matchFamilyStyle(nullptr, style));
+ }
+};
+
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_FontConfig(FcConfig* fc) {
+ return sk_make_sp<SkFontMgr_fontconfig>(fc);
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_fontconfig_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_fontconfig_factory.cpp
new file mode 100644
index 0000000000..a011ec5c1e
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_fontconfig_factory.cpp
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkTypes.h"
+#include "include/ports/SkFontMgr_fontconfig.h"
+
+sk_sp<SkFontMgr> SkFontMgr::Factory() {
+ return SkFontMgr_New_FontConfig(nullptr);
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_fuchsia.cpp b/gfx/skia/skia/src/ports/SkFontMgr_fuchsia.cpp
new file mode 100644
index 0000000000..87a0f88b76
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_fuchsia.cpp
@@ -0,0 +1,515 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/ports/SkFontMgr_fuchsia.h"
+
+#include <fuchsia/fonts/cpp/fidl.h>
+#include <lib/zx/vmar.h>
+#include <strings.h>
+#include <memory>
+#include <unordered_map>
+
+#include "src/core/SkFontDescriptor.h"
+#include "src/ports/SkFontMgr_custom.h"
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/base/SkThreadAnnotations.h"
+#include "src/core/SkTypefaceCache.h"
+
+using namespace skia_private;
+
+// SkFuchsiaFontDataCache keep track of SkData created from `fuchsia::mem::Buffer` where each buffer
+// is identified with a unique identifier. It allows to share the same SkData instances between all
+// SkTypeface instances created from the same buffer.
+class SkFuchsiaFontDataCache : public SkRefCnt {
+public:
+ SkFuchsiaFontDataCache() = default;
+ ~SkFuchsiaFontDataCache() { SkASSERT(fBuffers.empty()); }
+
+ sk_sp<SkData> GetOrCreateSkData(int bufferId, const fuchsia::mem::Buffer& buffer);
+
+private:
+ struct ReleaseSkDataContext {
+ sk_sp<SkFuchsiaFontDataCache> fCache;
+ int fBufferId;
+ };
+
+ static void ReleaseSkData(const void* buffer, void* context);
+ void OnBufferDeleted(int bufferId);
+
+ SkMutex fMutex;
+ std::unordered_map<int, SkData*> fBuffers SK_GUARDED_BY(fMutex);
+};
+
+sk_sp<SkData> SkFuchsiaFontDataCache::GetOrCreateSkData(int bufferId,
+ const fuchsia::mem::Buffer& buffer) {
+ SkAutoMutexExclusive mutexLock(fMutex);
+
+ auto iter = fBuffers.find(bufferId);
+ if (iter != fBuffers.end()) {
+ return sk_ref_sp(iter->second);
+ }
+ auto font_mgr = sk_ref_sp(this);
+
+ uint64_t size = buffer.size;
+ uintptr_t mapped_addr = 0;
+ zx_status_t status =
+ zx::vmar::root_self()->map(ZX_VM_PERM_READ, 0, buffer.vmo, 0, size, &mapped_addr);
+ if (status != ZX_OK) return nullptr;
+
+ auto context = new ReleaseSkDataContext{sk_ref_sp(this), bufferId};
+ auto data = SkData::MakeWithProc(
+ reinterpret_cast<void*>(mapped_addr), size, ReleaseSkData, context);
+ SkASSERT(data);
+
+ fBuffers[bufferId] = data.get();
+ return data;
+}
+
+void SkFuchsiaFontDataCache::OnBufferDeleted(int bufferId) {
+ zx_vaddr_t unmap_addr;
+ size_t unmap_size;
+ {
+ SkAutoMutexExclusive mutexLock(fMutex);
+ auto it = fBuffers.find(bufferId);
+ SkASSERT(it != fBuffers.end());
+ unmap_addr = reinterpret_cast<zx_vaddr_t>(it->second->data());
+ unmap_size = it->second->size();
+ fBuffers.erase(it);
+ }
+
+ zx::vmar::root_self()->unmap(unmap_addr, unmap_size);
+}
+
+// static
+void SkFuchsiaFontDataCache::ReleaseSkData(const void* buffer, void* context) {
+ auto releaseSkDataContext = reinterpret_cast<ReleaseSkDataContext*>(context);
+ releaseSkDataContext->fCache->OnBufferDeleted(releaseSkDataContext->fBufferId);
+ delete releaseSkDataContext;
+}
+
+fuchsia::fonts::Slant SkToFuchsiaSlant(SkFontStyle::Slant slant) {
+ switch (slant) {
+ case SkFontStyle::kOblique_Slant:
+ return fuchsia::fonts::Slant::OBLIQUE;
+ case SkFontStyle::kItalic_Slant:
+ return fuchsia::fonts::Slant::ITALIC;
+ case SkFontStyle::kUpright_Slant:
+ default:
+ return fuchsia::fonts::Slant::UPRIGHT;
+ }
+}
+
+SkFontStyle::Slant FuchsiaToSkSlant(fuchsia::fonts::Slant slant) {
+ switch (slant) {
+ case fuchsia::fonts::Slant::OBLIQUE:
+ return SkFontStyle::kOblique_Slant;
+ case fuchsia::fonts::Slant::ITALIC:
+ return SkFontStyle::kItalic_Slant;
+ case fuchsia::fonts::Slant::UPRIGHT:
+ default:
+ return SkFontStyle::kUpright_Slant;
+ }
+}
+
+fuchsia::fonts::Width SkToFuchsiaWidth(SkFontStyle::Width width) {
+ switch (width) {
+ case SkFontStyle::Width::kUltraCondensed_Width:
+ return fuchsia::fonts::Width::ULTRA_CONDENSED;
+ case SkFontStyle::Width::kExtraCondensed_Width:
+ return fuchsia::fonts::Width::EXTRA_CONDENSED;
+ case SkFontStyle::Width::kCondensed_Width:
+ return fuchsia::fonts::Width::CONDENSED;
+ case SkFontStyle::Width::kSemiCondensed_Width:
+ return fuchsia::fonts::Width::SEMI_CONDENSED;
+ case SkFontStyle::Width::kNormal_Width:
+ return fuchsia::fonts::Width::NORMAL;
+ case SkFontStyle::Width::kSemiExpanded_Width:
+ return fuchsia::fonts::Width::SEMI_EXPANDED;
+ case SkFontStyle::Width::kExpanded_Width:
+ return fuchsia::fonts::Width::EXPANDED;
+ case SkFontStyle::Width::kExtraExpanded_Width:
+ return fuchsia::fonts::Width::EXTRA_EXPANDED;
+ case SkFontStyle::Width::kUltraExpanded_Width:
+ return fuchsia::fonts::Width::ULTRA_EXPANDED;
+ }
+}
+
+// Tries to convert the given integer Skia style width value to the Fuchsia equivalent.
+//
+// On success, returns true. On failure, returns false, and `outFuchsiaWidth` is left untouched.
+bool SkToFuchsiaWidth(int skWidth, fuchsia::fonts::Width* outFuchsiaWidth) {
+ if (skWidth < SkFontStyle::Width::kUltraCondensed_Width ||
+ skWidth > SkFontStyle::Width::kUltraExpanded_Width) {
+ return false;
+ }
+ auto typedSkWidth = static_cast<SkFontStyle::Width>(skWidth);
+ *outFuchsiaWidth = SkToFuchsiaWidth(typedSkWidth);
+ return true;
+}
+
+SkFontStyle::Width FuchsiaToSkWidth(fuchsia::fonts::Width width) {
+ switch (width) {
+ case fuchsia::fonts::Width::ULTRA_CONDENSED:
+ return SkFontStyle::Width::kUltraCondensed_Width;
+ case fuchsia::fonts::Width::EXTRA_CONDENSED:
+ return SkFontStyle::Width::kExtraCondensed_Width;
+ case fuchsia::fonts::Width::CONDENSED:
+ return SkFontStyle::Width::kCondensed_Width;
+ case fuchsia::fonts::Width::SEMI_CONDENSED:
+ return SkFontStyle::Width::kSemiCondensed_Width;
+ case fuchsia::fonts::Width::NORMAL:
+ return SkFontStyle::Width::kNormal_Width;
+ case fuchsia::fonts::Width::SEMI_EXPANDED:
+ return SkFontStyle::Width::kSemiExpanded_Width;
+ case fuchsia::fonts::Width::EXPANDED:
+ return SkFontStyle::Width::kExpanded_Width;
+ case fuchsia::fonts::Width::EXTRA_EXPANDED:
+ return SkFontStyle::Width::kExtraExpanded_Width;
+ case fuchsia::fonts::Width::ULTRA_EXPANDED:
+ return SkFontStyle::Width::kUltraExpanded_Width;
+ }
+}
+
+fuchsia::fonts::Style2 SkToFuchsiaStyle(const SkFontStyle& style) {
+ fuchsia::fonts::Style2 fuchsiaStyle;
+ fuchsiaStyle.set_slant(SkToFuchsiaSlant(style.slant())).set_weight(style.weight());
+
+ fuchsia::fonts::Width fuchsiaWidth = fuchsia::fonts::Width::NORMAL;
+ if (SkToFuchsiaWidth(style.width(), &fuchsiaWidth)) {
+ fuchsiaStyle.set_width(fuchsiaWidth);
+ }
+
+ return fuchsiaStyle;
+}
+
+constexpr struct {
+ const char* fName;
+ fuchsia::fonts::GenericFontFamily fGenericFontFamily;
+} kGenericFontFamiliesByName[] = {{"serif", fuchsia::fonts::GenericFontFamily::SERIF},
+ {"sans", fuchsia::fonts::GenericFontFamily::SANS_SERIF},
+ {"sans-serif", fuchsia::fonts::GenericFontFamily::SANS_SERIF},
+ {"mono", fuchsia::fonts::GenericFontFamily::MONOSPACE},
+ {"monospace", fuchsia::fonts::GenericFontFamily::MONOSPACE},
+ {"cursive", fuchsia::fonts::GenericFontFamily::CURSIVE},
+ {"fantasy", fuchsia::fonts::GenericFontFamily::FANTASY},
+ {"system-ui", fuchsia::fonts::GenericFontFamily::SYSTEM_UI},
+ {"emoji", fuchsia::fonts::GenericFontFamily::EMOJI},
+ {"math", fuchsia::fonts::GenericFontFamily::MATH},
+ {"fangsong", fuchsia::fonts::GenericFontFamily::FANGSONG}};
+
+// Tries to find a generic font family with the given name. If none is found, returns false.
+bool GetGenericFontFamilyByName(const char* name,
+ fuchsia::fonts::GenericFontFamily* outGenericFamily) {
+ if (!name) return false;
+ for (auto& genericFamily : kGenericFontFamiliesByName) {
+ if (strcasecmp(genericFamily.fName, name) == 0) {
+ *outGenericFamily = genericFamily.fGenericFontFamily;
+ return true;
+ }
+ }
+ return false;
+}
+
+struct TypefaceId {
+ uint32_t bufferId;
+ uint32_t ttcIndex;
+
+ bool operator==(TypefaceId& other) {
+ return std::tie(bufferId, ttcIndex) == std::tie(other.bufferId, other.ttcIndex);
+ }
+}
+
+constexpr kNullTypefaceId = {0xFFFFFFFF, 0xFFFFFFFF};
+
+class SkTypeface_Fuchsia : public SkTypeface_FreeTypeStream {
+public:
+ SkTypeface_Fuchsia(std::unique_ptr<SkFontData> fontData, const SkFontStyle& style,
+ bool isFixedPitch, const SkString familyName, TypefaceId id)
+ : SkTypeface_FreeTypeStream(std::move(fontData), familyName, style, isFixedPitch)
+ , fId(id) {}
+
+ TypefaceId id() { return fId; }
+
+private:
+ TypefaceId fId;
+};
+
+sk_sp<SkTypeface> CreateTypefaceFromSkStream(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments& args, TypefaceId id) {
+ using Scanner = SkTypeface_FreeType::Scanner;
+ Scanner scanner;
+ bool isFixedPitch;
+ SkFontStyle style;
+ SkString name;
+ Scanner::AxisDefinitions axisDefinitions;
+ if (!scanner.scanFont(stream.get(), args.getCollectionIndex(), &name, &style, &isFixedPitch,
+ &axisDefinitions)) {
+ return nullptr;
+ }
+
+ const SkFontArguments::VariationPosition position = args.getVariationDesignPosition();
+ AutoSTMalloc<4, SkFixed> axisValues(axisDefinitions.size());
+ Scanner::computeAxisValues(axisDefinitions, position, axisValues, name);
+
+ auto fontData = std::make_unique<SkFontData>(
+ std::move(stream), args.getCollectionIndex(), args.getPalette().index,
+ axisValues.get(), axisDefinitions.size(),
+ args.getPalette().overrides, args.getPalette().overrideCount);
+ return sk_make_sp<SkTypeface_Fuchsia>(std::move(fontData), style, isFixedPitch, name, id);
+}
+
+sk_sp<SkTypeface> CreateTypefaceFromSkData(sk_sp<SkData> data, TypefaceId id) {
+ return CreateTypefaceFromSkStream(std::make_unique<SkMemoryStream>(std::move(data)),
+ SkFontArguments().setCollectionIndex(id.ttcIndex), id);
+}
+
+class SkFontMgr_Fuchsia final : public SkFontMgr {
+public:
+ SkFontMgr_Fuchsia(fuchsia::fonts::ProviderSyncPtr provider);
+ ~SkFontMgr_Fuchsia() override;
+
+protected:
+ // SkFontMgr overrides.
+ int onCountFamilies() const override;
+ void onGetFamilyName(int index, SkString* familyName) const override;
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override;
+ SkFontStyleSet* onCreateStyleSet(int index) const override;
+ SkTypeface* onMatchFamilyStyle(const char familyName[], const SkFontStyle&) const override;
+ SkTypeface* onMatchFamilyStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const override;
+ sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData>, int ttcIndex) const override;
+ sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset>,
+ int ttcIndex) const override;
+ sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset>,
+ const SkFontArguments&) const override;
+ sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const override;
+ sk_sp<SkTypeface> onLegacyMakeTypeface(const char familyName[], SkFontStyle) const override;
+
+private:
+ friend class SkFontStyleSet_Fuchsia;
+
+ sk_sp<SkTypeface> FetchTypeface(const char familyName[], const SkFontStyle& style,
+ const char* bcp47[], int bcp47Count, SkUnichar character,
+ bool allow_fallback, bool exact_style_match) const;
+
+ sk_sp<SkTypeface> GetOrCreateTypeface(TypefaceId id, const fuchsia::mem::Buffer& buffer) const;
+
+ mutable fuchsia::fonts::ProviderSyncPtr fFontProvider;
+
+ sk_sp<SkFuchsiaFontDataCache> fBufferCache;
+
+ mutable SkMutex fCacheMutex;
+ mutable SkTypefaceCache fTypefaceCache SK_GUARDED_BY(fCacheMutex);
+};
+
+class SkFontStyleSet_Fuchsia : public SkFontStyleSet {
+public:
+ SkFontStyleSet_Fuchsia(sk_sp<SkFontMgr_Fuchsia> font_manager, std::string familyName,
+ std::vector<SkFontStyle> styles)
+ : fFontManager(font_manager), fFamilyName(familyName), fStyles(styles) {}
+
+ ~SkFontStyleSet_Fuchsia() override = default;
+
+ int count() override { return fStyles.size(); }
+
+ void getStyle(int index, SkFontStyle* style, SkString* styleName) override {
+ SkASSERT(index >= 0 && index < static_cast<int>(fStyles.size()));
+ if (style) *style = fStyles[index];
+
+ // We don't have style names. Return an empty name.
+ if (styleName) styleName->reset();
+ }
+
+ SkTypeface* createTypeface(int index) override {
+ SkASSERT(index >= 0 && index < static_cast<int>(fStyles.size()));
+
+ if (fTypefaces.empty()) fTypefaces.resize(fStyles.size());
+
+ if (!fTypefaces[index]) {
+ fTypefaces[index] = fFontManager->FetchTypeface(
+ fFamilyName.c_str(), fStyles[index], /*bcp47=*/nullptr,
+ /*bcp47Count=*/0, /*character=*/0,
+ /*allow_fallback=*/false, /*exact_style_match=*/true);
+ }
+
+ return SkSafeRef(fTypefaces[index].get());
+ }
+
+ SkTypeface* matchStyle(const SkFontStyle& pattern) override { return matchStyleCSS3(pattern); }
+
+private:
+ sk_sp<SkFontMgr_Fuchsia> fFontManager;
+ std::string fFamilyName;
+ std::vector<SkFontStyle> fStyles;
+ std::vector<sk_sp<SkTypeface>> fTypefaces;
+};
+
+SkFontMgr_Fuchsia::SkFontMgr_Fuchsia(fuchsia::fonts::ProviderSyncPtr provider)
+ : fFontProvider(std::move(provider)), fBufferCache(sk_make_sp<SkFuchsiaFontDataCache>()) {}
+
+SkFontMgr_Fuchsia::~SkFontMgr_Fuchsia() = default;
+
+int SkFontMgr_Fuchsia::onCountFamilies() const {
+ // Family enumeration is not supported.
+ return 0;
+}
+
+void SkFontMgr_Fuchsia::onGetFamilyName(int index, SkString* familyName) const {
+ // Family enumeration is not supported.
+ familyName->reset();
+}
+
+SkFontStyleSet* SkFontMgr_Fuchsia::onCreateStyleSet(int index) const {
+ // Family enumeration is not supported.
+ return nullptr;
+}
+
+SkFontStyleSet* SkFontMgr_Fuchsia::onMatchFamily(const char familyName[]) const {
+ fuchsia::fonts::FamilyName typedFamilyName;
+ typedFamilyName.name = familyName;
+
+ fuchsia::fonts::FontFamilyInfo familyInfo;
+ int result = fFontProvider->GetFontFamilyInfo(typedFamilyName, &familyInfo);
+ if (result != ZX_OK || !familyInfo.has_styles() || familyInfo.styles().empty()) return nullptr;
+
+ std::vector<SkFontStyle> styles;
+ for (auto& style : familyInfo.styles()) {
+ styles.push_back(SkFontStyle(style.weight(), FuchsiaToSkWidth(style.width()),
+ FuchsiaToSkSlant(style.slant())));
+ }
+
+ return new SkFontStyleSet_Fuchsia(sk_ref_sp(this), familyInfo.name().name, std::move(styles));
+}
+
+SkTypeface* SkFontMgr_Fuchsia::onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& style) const {
+ sk_sp<SkTypeface> typeface =
+ FetchTypeface(familyName, style, /*bcp47=*/nullptr,
+ /*bcp47Count=*/0, /*character=*/0,
+ /*allow_fallback=*/false, /*exact_style_match=*/false);
+ return typeface.release();
+}
+
+SkTypeface* SkFontMgr_Fuchsia::onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle& style,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const {
+ sk_sp<SkTypeface> typeface =
+ FetchTypeface(familyName, style, bcp47, bcp47Count, character, /*allow_fallback=*/true,
+ /*exact_style_match=*/false);
+ return typeface.release();
+}
+
+sk_sp<SkTypeface> SkFontMgr_Fuchsia::onMakeFromData(sk_sp<SkData> data, int ttcIndex) const {
+ return makeFromStream(std::make_unique<SkMemoryStream>(std::move(data)), ttcIndex);
+}
+
+sk_sp<SkTypeface> SkFontMgr_Fuchsia::onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset> asset,
+ int ttcIndex) const {
+ return makeFromStream(std::move(asset), SkFontArguments().setCollectionIndex(ttcIndex));
+}
+
+sk_sp<SkTypeface> SkFontMgr_Fuchsia::onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset> asset,
+ const SkFontArguments& args) const {
+ return CreateTypefaceFromSkStream(std::move(asset), args, kNullTypefaceId);
+}
+
+sk_sp<SkTypeface> SkFontMgr_Fuchsia::onMakeFromFile(const char path[], int ttcIndex) const {
+ return makeFromStream(std::make_unique<SkFILEStream>(path), ttcIndex);
+}
+
+sk_sp<SkTypeface> SkFontMgr_Fuchsia::onLegacyMakeTypeface(const char familyName[],
+ SkFontStyle style) const {
+ return sk_sp<SkTypeface>(matchFamilyStyle(familyName, style));
+}
+
+sk_sp<SkTypeface> SkFontMgr_Fuchsia::FetchTypeface(const char familyName[],
+ const SkFontStyle& style, const char* bcp47[],
+ int bcp47Count, SkUnichar character,
+ bool allow_fallback,
+ bool exact_style_match) const {
+ fuchsia::fonts::TypefaceQuery query;
+ query.set_style(SkToFuchsiaStyle(style));
+
+ if (bcp47Count > 0) {
+ std::vector<fuchsia::intl::LocaleId> languages{};
+ for (int i = 0; i < bcp47Count; i++) {
+ fuchsia::intl::LocaleId localeId;
+ localeId.id = bcp47[i];
+ languages.push_back(localeId);
+ }
+ query.set_languages(std::move(languages));
+ }
+
+ if (character) {
+ query.set_code_points({static_cast<uint32_t>(character)});
+ }
+
+ // If family name is not specified or is a generic family name (e.g. "serif"), then enable
+ // fallback; otherwise, pass the family name as is.
+ fuchsia::fonts::GenericFontFamily genericFontFamily =
+ fuchsia::fonts::GenericFontFamily::SANS_SERIF;
+ bool isGenericFontFamily = GetGenericFontFamilyByName(familyName, &genericFontFamily);
+ if (!familyName || *familyName == '\0' || isGenericFontFamily) {
+ if (isGenericFontFamily) {
+ query.set_fallback_family(genericFontFamily);
+ }
+ allow_fallback = true;
+ } else {
+ fuchsia::fonts::FamilyName typedFamilyName{};
+ typedFamilyName.name = familyName;
+ query.set_family(typedFamilyName);
+ }
+
+ fuchsia::fonts::TypefaceRequestFlags flags{};
+ if (!allow_fallback) flags |= fuchsia::fonts::TypefaceRequestFlags::EXACT_FAMILY;
+ if (exact_style_match) flags |= fuchsia::fonts::TypefaceRequestFlags::EXACT_STYLE;
+
+ fuchsia::fonts::TypefaceRequest request;
+ request.set_query(std::move(query));
+ request.set_flags(flags);
+
+ fuchsia::fonts::TypefaceResponse response;
+ int result = fFontProvider->GetTypeface(std::move(request), &response);
+ if (result != ZX_OK) return nullptr;
+
+ // The service may return an empty response if there is no font matching the request.
+ if (response.IsEmpty()) return nullptr;
+
+ return GetOrCreateTypeface(TypefaceId{response.buffer_id(), response.font_index()},
+ response.buffer());
+}
+
+static bool FindByTypefaceId(SkTypeface* cachedTypeface, void* ctx) {
+ SkTypeface_Fuchsia* cachedFuchsiaTypeface = static_cast<SkTypeface_Fuchsia*>(cachedTypeface);
+ TypefaceId* id = static_cast<TypefaceId*>(ctx);
+
+ return cachedFuchsiaTypeface->id() == *id;
+}
+
+sk_sp<SkTypeface> SkFontMgr_Fuchsia::GetOrCreateTypeface(TypefaceId id,
+ const fuchsia::mem::Buffer& buffer) const {
+ SkAutoMutexExclusive mutexLock(fCacheMutex);
+
+ sk_sp<SkTypeface> cached = fTypefaceCache.findByProcAndRef(FindByTypefaceId, &id);
+ if (cached) return cached;
+
+ sk_sp<SkData> data = fBufferCache->GetOrCreateSkData(id.bufferId, buffer);
+ if (!data) return nullptr;
+
+ auto result = CreateTypefaceFromSkData(std::move(data), id);
+ fTypefaceCache.add(result);
+ return result;
+}
+
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_Fuchsia(fuchsia::fonts::ProviderSyncPtr provider) {
+ return sk_make_sp<SkFontMgr_Fuchsia>(std::move(provider));
+}
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_mac_ct.cpp b/gfx/skia/skia/src/ports/SkFontMgr_mac_ct.cpp
new file mode 100644
index 0000000000..f6b3ad61d1
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_mac_ct.cpp
@@ -0,0 +1,532 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#ifdef SK_BUILD_FOR_MAC
+#import <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreText/CoreText.h>
+#include <CoreText/CTFontManager.h>
+#include <CoreGraphics/CoreGraphics.h>
+#include <CoreFoundation/CoreFoundation.h>
+#include <dlfcn.h>
+#endif
+
+#include "include/core/SkData.h"
+#include "include/core/SkFontArguments.h"
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypeface.h"
+#include "include/ports/SkFontMgr_mac_ct.h"
+#include "include/private/base/SkFixed.h"
+#include "include/private/base/SkOnce.h"
+#include "include/private/base/SkTPin.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkUTF.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/ports/SkTypeface_mac_ct.h"
+
+#include <string.h>
+#include <memory>
+
+using namespace skia_private;
+
+#if (defined(SK_BUILD_FOR_IOS) && defined(__IPHONE_14_0) && \
+ __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_14_0) || \
+ (defined(SK_BUILD_FOR_MAC) && defined(__MAC_11_0) && \
+ __MAC_OS_VERSION_MIN_REQUIRED >= __MAC_11_0)
+
+static uint32_t SkGetCoreTextVersion() {
+ // If compiling for iOS 14.0+ or macOS 11.0+, the CoreText version number
+ // must be derived from the OS version number.
+ static const uint32_t kCoreTextVersionNEWER = 0x000D0000;
+ return kCoreTextVersionNEWER;
+}
+
+#else
+
+static uint32_t SkGetCoreTextVersion() {
+ // Check for CoreText availability before calling CTGetCoreTextVersion().
+ static const bool kCoreTextIsAvailable = (&CTGetCoreTextVersion != nullptr);
+ if (kCoreTextIsAvailable) {
+ return CTGetCoreTextVersion();
+ }
+
+ // Default to a value that's smaller than any known CoreText version.
+ static const uint32_t kCoreTextVersionUNKNOWN = 0;
+ return kCoreTextVersionUNKNOWN;
+}
+
+#endif
+
+static SkUniqueCFRef<CFStringRef> make_CFString(const char s[]) {
+ return SkUniqueCFRef<CFStringRef>(CFStringCreateWithCString(nullptr, s, kCFStringEncodingUTF8));
+}
+
+/** Creates a typeface from a descriptor, searching the cache. */
+static sk_sp<SkTypeface> create_from_desc(CTFontDescriptorRef desc) {
+ SkUniqueCFRef<CTFontRef> ctFont(CTFontCreateWithFontDescriptor(desc, 0, nullptr));
+ if (!ctFont) {
+ return nullptr;
+ }
+
+ return SkTypeface_Mac::Make(std::move(ctFont), OpszVariation(), nullptr);
+}
+
+static SkUniqueCFRef<CTFontDescriptorRef> create_descriptor(const char familyName[],
+ const SkFontStyle& style) {
+ SkUniqueCFRef<CFMutableDictionaryRef> cfAttributes(
+ CFDictionaryCreateMutable(kCFAllocatorDefault, 0,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
+
+ SkUniqueCFRef<CFMutableDictionaryRef> cfTraits(
+ CFDictionaryCreateMutable(kCFAllocatorDefault, 0,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
+
+ if (!cfAttributes || !cfTraits) {
+ return nullptr;
+ }
+
+ // TODO(crbug.com/1018581) Some CoreText versions have errant behavior when
+ // certain traits set. Temporary workaround to omit specifying trait for those
+ // versions.
+ // Long term solution will involve serializing typefaces instead of relying upon
+ // this to match between processes.
+ //
+ // Compare CoreText.h in an up to date SDK for where these values come from.
+ static const uint32_t kSkiaLocalCTVersionNumber10_14 = 0x000B0000;
+ static const uint32_t kSkiaLocalCTVersionNumber10_15 = 0x000C0000;
+
+ // CTFontTraits (symbolic)
+ // macOS 14 and iOS 12 seem to behave badly when kCTFontSymbolicTrait is set.
+ // macOS 15 yields LastResort font instead of a good default font when
+ // kCTFontSymbolicTrait is set.
+ if (SkGetCoreTextVersion() < kSkiaLocalCTVersionNumber10_14) {
+ CTFontSymbolicTraits ctFontTraits = 0;
+ if (style.weight() >= SkFontStyle::kBold_Weight) {
+ ctFontTraits |= kCTFontBoldTrait;
+ }
+ if (style.slant() != SkFontStyle::kUpright_Slant) {
+ ctFontTraits |= kCTFontItalicTrait;
+ }
+ SkUniqueCFRef<CFNumberRef> cfFontTraits(
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &ctFontTraits));
+ if (cfFontTraits) {
+ CFDictionaryAddValue(cfTraits.get(), kCTFontSymbolicTrait, cfFontTraits.get());
+ }
+ }
+
+ // CTFontTraits (weight)
+ CGFloat ctWeight = SkCTFontCTWeightForCSSWeight(style.weight());
+ SkUniqueCFRef<CFNumberRef> cfFontWeight(
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberCGFloatType, &ctWeight));
+ if (cfFontWeight) {
+ CFDictionaryAddValue(cfTraits.get(), kCTFontWeightTrait, cfFontWeight.get());
+ }
+ // CTFontTraits (width)
+ CGFloat ctWidth = SkCTFontCTWidthForCSSWidth(style.width());
+ SkUniqueCFRef<CFNumberRef> cfFontWidth(
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberCGFloatType, &ctWidth));
+ if (cfFontWidth) {
+ CFDictionaryAddValue(cfTraits.get(), kCTFontWidthTrait, cfFontWidth.get());
+ }
+ // CTFontTraits (slant)
+ // macOS 15 behaves badly when kCTFontSlantTrait is set.
+ if (SkGetCoreTextVersion() != kSkiaLocalCTVersionNumber10_15) {
+ CGFloat ctSlant = style.slant() == SkFontStyle::kUpright_Slant ? 0 : 1;
+ SkUniqueCFRef<CFNumberRef> cfFontSlant(
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberCGFloatType, &ctSlant));
+ if (cfFontSlant) {
+ CFDictionaryAddValue(cfTraits.get(), kCTFontSlantTrait, cfFontSlant.get());
+ }
+ }
+ // CTFontTraits
+ CFDictionaryAddValue(cfAttributes.get(), kCTFontTraitsAttribute, cfTraits.get());
+
+ // CTFontFamilyName
+ if (familyName) {
+ SkUniqueCFRef<CFStringRef> cfFontName = make_CFString(familyName);
+ if (cfFontName) {
+ CFDictionaryAddValue(cfAttributes.get(), kCTFontFamilyNameAttribute, cfFontName.get());
+ }
+ }
+
+ return SkUniqueCFRef<CTFontDescriptorRef>(
+ CTFontDescriptorCreateWithAttributes(cfAttributes.get()));
+}
+
+// Same as the above function except style is included so we can
+// compare whether the created font conforms to the style. If not, we need
+// to recreate the font with symbolic traits. This is needed due to MacOS 10.11
+// font creation problem https://bugs.chromium.org/p/skia/issues/detail?id=8447.
+static sk_sp<SkTypeface> create_from_desc_and_style(CTFontDescriptorRef desc,
+ const SkFontStyle& style) {
+ SkUniqueCFRef<CTFontRef> ctFont(CTFontCreateWithFontDescriptor(desc, 0, nullptr));
+ if (!ctFont) {
+ return nullptr;
+ }
+
+ const CTFontSymbolicTraits traits = CTFontGetSymbolicTraits(ctFont.get());
+ CTFontSymbolicTraits expected_traits = traits;
+ if (style.slant() != SkFontStyle::kUpright_Slant) {
+ expected_traits |= kCTFontItalicTrait;
+ }
+ if (style.weight() >= SkFontStyle::kBold_Weight) {
+ expected_traits |= kCTFontBoldTrait;
+ }
+
+ if (expected_traits != traits) {
+ SkUniqueCFRef<CTFontRef> ctNewFont(CTFontCreateCopyWithSymbolicTraits(
+ ctFont.get(), 0, nullptr, expected_traits, expected_traits));
+ if (ctNewFont) {
+ ctFont = std::move(ctNewFont);
+ }
+ }
+
+ return SkTypeface_Mac::Make(std::move(ctFont), OpszVariation(), nullptr);
+}
+
+/** Creates a typeface from a name, searching the cache. */
+static sk_sp<SkTypeface> create_from_name(const char familyName[], const SkFontStyle& style) {
+ SkUniqueCFRef<CTFontDescriptorRef> desc = create_descriptor(familyName, style);
+ if (!desc) {
+ return nullptr;
+ }
+ return create_from_desc_and_style(desc.get(), style);
+}
+
+static const char* map_css_names(const char* name) {
+ static const struct {
+ const char* fFrom; // name the caller specified
+ const char* fTo; // "canonical" name we map to
+ } gPairs[] = {
+ { "sans-serif", "Helvetica" },
+ { "serif", "Times" },
+ { "monospace", "Courier" }
+ };
+
+ for (size_t i = 0; i < std::size(gPairs); i++) {
+ if (strcmp(name, gPairs[i].fFrom) == 0) {
+ return gPairs[i].fTo;
+ }
+ }
+ return name; // no change
+}
+
+namespace {
+
+static bool find_desc_str(CTFontDescriptorRef desc, CFStringRef name, SkString* value) {
+ SkUniqueCFRef<CFStringRef> ref((CFStringRef)CTFontDescriptorCopyAttribute(desc, name));
+ if (!ref) {
+ return false;
+ }
+ SkStringFromCFString(ref.get(), value);
+ return true;
+}
+
+static inline int sqr(int value) {
+ SkASSERT(SkAbs32(value) < 0x7FFF); // check for overflow
+ return value * value;
+}
+
+// We normalize each axis (weight, width, italic) to be base-900
+static int compute_metric(const SkFontStyle& a, const SkFontStyle& b) {
+ return sqr(a.weight() - b.weight()) +
+ sqr((a.width() - b.width()) * 100) +
+ sqr((a.slant() != b.slant()) * 900);
+}
+
+static SkUniqueCFRef<CFSetRef> name_required() {
+ CFStringRef set_values[] = {kCTFontFamilyNameAttribute};
+ return SkUniqueCFRef<CFSetRef>(CFSetCreate(kCFAllocatorDefault,
+ reinterpret_cast<const void**>(set_values), std::size(set_values),
+ &kCFTypeSetCallBacks));
+}
+
+class SkFontStyleSet_Mac : public SkFontStyleSet {
+public:
+ SkFontStyleSet_Mac(CTFontDescriptorRef desc)
+ : fArray(CTFontDescriptorCreateMatchingFontDescriptors(desc, name_required().get()))
+ , fCount(0)
+ {
+ if (!fArray) {
+ fArray.reset(CFArrayCreate(nullptr, nullptr, 0, nullptr));
+ }
+ fCount = SkToInt(CFArrayGetCount(fArray.get()));
+ }
+
+ int count() override {
+ return fCount;
+ }
+
+ void getStyle(int index, SkFontStyle* style, SkString* name) override {
+ SkASSERT((unsigned)index < (unsigned)fCount);
+ CTFontDescriptorRef desc = (CTFontDescriptorRef)CFArrayGetValueAtIndex(fArray.get(), index);
+ if (style) {
+ *style = SkCTFontDescriptorGetSkFontStyle(desc, false);
+ }
+ if (name) {
+ if (!find_desc_str(desc, kCTFontStyleNameAttribute, name)) {
+ name->reset();
+ }
+ }
+ }
+
+ SkTypeface* createTypeface(int index) override {
+ SkASSERT((unsigned)index < (unsigned)CFArrayGetCount(fArray.get()));
+ CTFontDescriptorRef desc = (CTFontDescriptorRef)CFArrayGetValueAtIndex(fArray.get(), index);
+
+ return create_from_desc(desc).release();
+ }
+
+ SkTypeface* matchStyle(const SkFontStyle& pattern) override {
+ if (0 == fCount) {
+ return nullptr;
+ }
+ return create_from_desc(findMatchingDesc(pattern)).release();
+ }
+
+private:
+ SkUniqueCFRef<CFArrayRef> fArray;
+ int fCount;
+
+ CTFontDescriptorRef findMatchingDesc(const SkFontStyle& pattern) const {
+ int bestMetric = SK_MaxS32;
+ CTFontDescriptorRef bestDesc = nullptr;
+
+ for (int i = 0; i < fCount; ++i) {
+ CTFontDescriptorRef desc = (CTFontDescriptorRef)CFArrayGetValueAtIndex(fArray.get(), i);
+ int metric = compute_metric(pattern, SkCTFontDescriptorGetSkFontStyle(desc, false));
+ if (0 == metric) {
+ return desc;
+ }
+ if (metric < bestMetric) {
+ bestMetric = metric;
+ bestDesc = desc;
+ }
+ }
+ SkASSERT(bestDesc);
+ return bestDesc;
+ }
+};
+
+SkUniqueCFRef<CFArrayRef> SkCopyAvailableFontFamilyNames(CTFontCollectionRef collection) {
+ // Create a CFArray of all available font descriptors.
+ SkUniqueCFRef<CFArrayRef> descriptors(
+ CTFontCollectionCreateMatchingFontDescriptors(collection));
+
+ // Copy the font family names of the font descriptors into a CFSet.
+ auto addDescriptorFamilyNameToSet = [](const void* value, void* context) -> void {
+ CTFontDescriptorRef descriptor = static_cast<CTFontDescriptorRef>(value);
+ CFMutableSetRef familyNameSet = static_cast<CFMutableSetRef>(context);
+ SkUniqueCFRef<CFTypeRef> familyName(
+ CTFontDescriptorCopyAttribute(descriptor, kCTFontFamilyNameAttribute));
+ if (familyName) {
+ CFSetAddValue(familyNameSet, familyName.get());
+ }
+ };
+ SkUniqueCFRef<CFMutableSetRef> familyNameSet(
+ CFSetCreateMutable(kCFAllocatorDefault, 0, &kCFTypeSetCallBacks));
+ CFArrayApplyFunction(descriptors.get(), CFRangeMake(0, CFArrayGetCount(descriptors.get())),
+ addDescriptorFamilyNameToSet, familyNameSet.get());
+
+ // Get the set of family names into an array; this does not retain.
+ CFIndex count = CFSetGetCount(familyNameSet.get());
+ std::unique_ptr<const void*[]> familyNames(new const void*[count]);
+ CFSetGetValues(familyNameSet.get(), familyNames.get());
+
+ // Sort the array of family names (to match CTFontManagerCopyAvailableFontFamilyNames).
+ std::sort(familyNames.get(), familyNames.get() + count, [](const void* a, const void* b){
+ return CFStringCompare((CFStringRef)a, (CFStringRef)b, 0) == kCFCompareLessThan;
+ });
+
+ // Copy family names into a CFArray; this does retain.
+ return SkUniqueCFRef<CFArrayRef>(
+ CFArrayCreate(kCFAllocatorDefault, familyNames.get(), count, &kCFTypeArrayCallBacks));
+}
+
+/** Use CTFontManagerCopyAvailableFontFamilyNames if available, simulate if not. */
+SkUniqueCFRef<CFArrayRef> SkCTFontManagerCopyAvailableFontFamilyNames() {
+#ifdef SK_BUILD_FOR_IOS
+ using CTFontManagerCopyAvailableFontFamilyNamesProc = CFArrayRef (*)(void);
+ CTFontManagerCopyAvailableFontFamilyNamesProc ctFontManagerCopyAvailableFontFamilyNames;
+ *(void**)(&ctFontManagerCopyAvailableFontFamilyNames) =
+ dlsym(RTLD_DEFAULT, "CTFontManagerCopyAvailableFontFamilyNames");
+ if (ctFontManagerCopyAvailableFontFamilyNames) {
+ return SkUniqueCFRef<CFArrayRef>(ctFontManagerCopyAvailableFontFamilyNames());
+ }
+ SkUniqueCFRef<CTFontCollectionRef> collection(
+ CTFontCollectionCreateFromAvailableFonts(nullptr));
+ return SkUniqueCFRef<CFArrayRef>(SkCopyAvailableFontFamilyNames(collection.get()));
+#else
+ return SkUniqueCFRef<CFArrayRef>(CTFontManagerCopyAvailableFontFamilyNames());
+#endif
+}
+
+} // namespace
+
+class SkFontMgr_Mac : public SkFontMgr {
+ SkUniqueCFRef<CFArrayRef> fNames;
+ int fCount;
+
+ CFStringRef getFamilyNameAt(int index) const {
+ SkASSERT((unsigned)index < (unsigned)fCount);
+ return (CFStringRef)CFArrayGetValueAtIndex(fNames.get(), index);
+ }
+
+ static SkFontStyleSet* CreateSet(CFStringRef cfFamilyName) {
+ SkUniqueCFRef<CFMutableDictionaryRef> cfAttr(
+ CFDictionaryCreateMutable(kCFAllocatorDefault, 0,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
+
+ CFDictionaryAddValue(cfAttr.get(), kCTFontFamilyNameAttribute, cfFamilyName);
+
+ SkUniqueCFRef<CTFontDescriptorRef> desc(
+ CTFontDescriptorCreateWithAttributes(cfAttr.get()));
+ return new SkFontStyleSet_Mac(desc.get());
+ }
+
+public:
+ SkUniqueCFRef<CTFontCollectionRef> fFontCollection;
+ SkFontMgr_Mac(CTFontCollectionRef fontCollection)
+ : fNames(fontCollection ? SkCopyAvailableFontFamilyNames(fontCollection)
+ : SkCTFontManagerCopyAvailableFontFamilyNames())
+ , fCount(fNames ? SkToInt(CFArrayGetCount(fNames.get())) : 0)
+ , fFontCollection(fontCollection ? (CTFontCollectionRef)CFRetain(fontCollection)
+ : CTFontCollectionCreateFromAvailableFonts(nullptr))
+ {}
+
+protected:
+ int onCountFamilies() const override {
+ return fCount;
+ }
+
+ void onGetFamilyName(int index, SkString* familyName) const override {
+ if ((unsigned)index < (unsigned)fCount) {
+ SkStringFromCFString(this->getFamilyNameAt(index), familyName);
+ } else {
+ familyName->reset();
+ }
+ }
+
+ SkFontStyleSet* onCreateStyleSet(int index) const override {
+ if ((unsigned)index >= (unsigned)fCount) {
+ return nullptr;
+ }
+ return CreateSet(this->getFamilyNameAt(index));
+ }
+
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override {
+ if (!familyName) {
+ return nullptr;
+ }
+ SkUniqueCFRef<CFStringRef> cfName = make_CFString(familyName);
+ return CreateSet(cfName.get());
+ }
+
+ SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& style) const override {
+ SkUniqueCFRef<CTFontDescriptorRef> reqDesc = create_descriptor(familyName, style);
+ if (!familyName) {
+ return create_from_desc(reqDesc.get()).release();
+ }
+ SkUniqueCFRef<CTFontDescriptorRef> resolvedDesc(
+ CTFontDescriptorCreateMatchingFontDescriptor(reqDesc.get(), name_required().get()));
+ if (!resolvedDesc) {
+ return nullptr;
+ }
+ return create_from_desc(resolvedDesc.get()).release();
+ }
+
+ SkTypeface* onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle& style,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const override {
+ SkUniqueCFRef<CTFontDescriptorRef> desc = create_descriptor(familyName, style);
+ SkUniqueCFRef<CTFontRef> familyFont(CTFontCreateWithFontDescriptor(desc.get(), 0, nullptr));
+
+ // kCFStringEncodingUTF32 is BE unless there is a BOM.
+ // Since there is no machine endian option, explicitly state machine endian.
+#ifdef SK_CPU_LENDIAN
+ constexpr CFStringEncoding encoding = kCFStringEncodingUTF32LE;
+#else
+ constexpr CFStringEncoding encoding = kCFStringEncodingUTF32BE;
+#endif
+ SkUniqueCFRef<CFStringRef> string(CFStringCreateWithBytes(
+ kCFAllocatorDefault, reinterpret_cast<const UInt8 *>(&character), sizeof(character),
+ encoding, false));
+ // If 0xD800 <= codepoint <= 0xDFFF || 0x10FFFF < codepoint 'string' may be nullptr.
+ // No font should be covering such codepoints (even the magic fallback font).
+ if (!string) {
+ return nullptr;
+ }
+ CFRange range = CFRangeMake(0, CFStringGetLength(string.get())); // in UniChar units.
+ SkUniqueCFRef<CTFontRef> fallbackFont(
+ CTFontCreateForString(familyFont.get(), string.get(), range));
+ return SkTypeface_Mac::Make(std::move(fallbackFont), OpszVariation(), nullptr).release();
+ }
+
+ sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData> data, int ttcIndex) const override {
+ return this->makeFromStream(
+ std::unique_ptr<SkStreamAsset>(new SkMemoryStream(std::move(data))), ttcIndex);
+ }
+
+ sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset> stream,
+ int ttcIndex) const override {
+ return this->makeFromStream(std::move(stream),
+ SkFontArguments().setCollectionIndex(ttcIndex));
+ }
+
+ sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments& args) const override {
+ return SkTypeface_Mac::MakeFromStream(std::move(stream), args);
+ }
+
+ sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const override {
+ sk_sp<SkData> data = SkData::MakeFromFileName(path);
+ if (!data) {
+ return nullptr;
+ }
+
+ return this->onMakeFromData(std::move(data), ttcIndex);
+ }
+
+ sk_sp<SkTypeface> onLegacyMakeTypeface(const char familyName[], SkFontStyle style) const override {
+ if (familyName) {
+ familyName = map_css_names(familyName);
+ }
+
+ sk_sp<SkTypeface> face = create_from_name(familyName, style);
+ if (face) {
+ return face;
+ }
+
+ static SkTypeface* gDefaultFace;
+ static SkOnce lookupDefault;
+ static const char FONT_DEFAULT_NAME[] = "Lucida Sans";
+ lookupDefault([]{
+ gDefaultFace = create_from_name(FONT_DEFAULT_NAME, SkFontStyle()).release();
+ });
+ return sk_ref_sp(gDefaultFace);
+ }
+};
+
+sk_sp<SkFontMgr> SkFontMgr_New_CoreText(CTFontCollectionRef fontCollection) {
+ return sk_make_sp<SkFontMgr_Mac>(fontCollection);
+}
+
+#endif//defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_mac_ct_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_mac_ct_factory.cpp
new file mode 100644
index 0000000000..ef834e4af2
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_mac_ct_factory.cpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#include "include/core/SkFontMgr.h"
+#include "include/ports/SkFontMgr_mac_ct.h"
+
+sk_sp<SkFontMgr> SkFontMgr::Factory() {
+ return SkFontMgr_New_CoreText(nullptr);
+}
+
+#endif//defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_win_dw.cpp b/gfx/skia/skia/src/ports/SkFontMgr_win_dw.cpp
new file mode 100644
index 0000000000..134364129e
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_win_dw.cpp
@@ -0,0 +1,956 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/utils/win/SkDWriteNTDDI_VERSION.h"
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "include/core/SkFontMgr.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypeface.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkTPin.h"
+#include "src/base/SkEndian.h"
+#include "src/base/SkUTF.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkTypefaceCache.h"
+#include "src/ports/SkTypeface_win_dw.h"
+#include "src/utils/win/SkDWrite.h"
+#include "src/utils/win/SkDWriteFontFileStream.h"
+#include "src/utils/win/SkHRESULT.h"
+#include "src/utils/win/SkObjBase.h"
+#include "src/utils/win/SkTScopedComPtr.h"
+
+#include <dwrite.h>
+#include <dwrite_2.h>
+#include <dwrite_3.h>
+
+using namespace skia_private;
+
+namespace {
+
+// Korean fonts Gulim, Dotum, Batang, Gungsuh have bitmap strikes that get
+// artifically emboldened by Windows without antialiasing. Korean users prefer
+// these over the synthetic boldening performed by Skia. So let's make an
+// exception for fonts with bitmap strikes and allow passing through Windows
+// simulations for those, until Skia provides more control over simulations in
+// font matching, see https://crbug.com/1258378
+bool HasBitmapStrikes(const SkTScopedComPtr<IDWriteFont>& font) {
+ SkTScopedComPtr<IDWriteFontFace> fontFace;
+ HRB(font->CreateFontFace(&fontFace));
+
+ AutoDWriteTable ebdtTable(fontFace.get(),
+ SkEndian_SwapBE32(SkSetFourByteTag('E', 'B', 'D', 'T')));
+ return ebdtTable.fExists;
+}
+
+// Iterate calls to GetFirstMatchingFont incrementally removing bold or italic
+// styling that can trigger the simulations. Implementing it this way gets us a
+// IDWriteFont that can be used as before and has the correct information on its
+// own style. Stripping simulations from IDWriteFontFace is possible via
+// IDWriteFontList1, IDWriteFontFaceReference and CreateFontFace, but this way
+// we won't have a matching IDWriteFont which is still used in get_style().
+HRESULT FirstMatchingFontWithoutSimulations(const SkTScopedComPtr<IDWriteFontFamily>& family,
+ DWriteStyle dwStyle,
+ SkTScopedComPtr<IDWriteFont>& font) {
+ bool noSimulations = false;
+ while (!noSimulations) {
+ SkTScopedComPtr<IDWriteFont> searchFont;
+ HR(family->GetFirstMatchingFont(
+ dwStyle.fWeight, dwStyle.fWidth, dwStyle.fSlant, &searchFont));
+ DWRITE_FONT_SIMULATIONS simulations = searchFont->GetSimulations();
+ // If we still get simulations even though we're not asking for bold or
+ // italic, we can't help it and exit the loop.
+
+#ifdef SK_WIN_FONTMGR_NO_SIMULATIONS
+ noSimulations = simulations == DWRITE_FONT_SIMULATIONS_NONE ||
+ (dwStyle.fWeight == DWRITE_FONT_WEIGHT_REGULAR &&
+ dwStyle.fSlant == DWRITE_FONT_STYLE_NORMAL) ||
+ HasBitmapStrikes(searchFont);
+#else
+ noSimulations = true;
+#endif
+ if (noSimulations) {
+ font = std::move(searchFont);
+ break;
+ }
+ if (simulations & DWRITE_FONT_SIMULATIONS_BOLD) {
+ dwStyle.fWeight = DWRITE_FONT_WEIGHT_REGULAR;
+ continue;
+ }
+ if (simulations & DWRITE_FONT_SIMULATIONS_OBLIQUE) {
+ dwStyle.fSlant = DWRITE_FONT_STYLE_NORMAL;
+ continue;
+ }
+ }
+ return S_OK;
+}
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+class SkFontMgr_DirectWrite : public SkFontMgr {
+public:
+ /** localeNameLength and defaultFamilyNameLength must include the null terminator. */
+ SkFontMgr_DirectWrite(IDWriteFactory* factory, IDWriteFontCollection* fontCollection,
+ IDWriteFontFallback* fallback,
+ const WCHAR* localeName, int localeNameLength,
+ const WCHAR* defaultFamilyName, int defaultFamilyNameLength)
+ : fFactory(SkRefComPtr(factory))
+ , fFontFallback(SkSafeRefComPtr(fallback))
+ , fFontCollection(SkRefComPtr(fontCollection))
+ , fLocaleName(localeNameLength)
+ , fDefaultFamilyName(defaultFamilyNameLength)
+ {
+ memcpy(fLocaleName.get(), localeName, localeNameLength * sizeof(WCHAR));
+ memcpy(fDefaultFamilyName.get(), defaultFamilyName, defaultFamilyNameLength*sizeof(WCHAR));
+ }
+
+protected:
+ int onCountFamilies() const override;
+ void onGetFamilyName(int index, SkString* familyName) const override;
+ SkFontStyleSet* onCreateStyleSet(int index) const override;
+ SkFontStyleSet* onMatchFamily(const char familyName[]) const override;
+ SkTypeface* onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& fontstyle) const override;
+ SkTypeface* onMatchFamilyStyleCharacter(const char familyName[], const SkFontStyle&,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const override;
+ sk_sp<SkTypeface> onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset>, int ttcIndex) const override;
+ sk_sp<SkTypeface> onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset>, const SkFontArguments&) const override;
+ sk_sp<SkTypeface> onMakeFromData(sk_sp<SkData>, int ttcIndex) const override;
+ sk_sp<SkTypeface> onMakeFromFile(const char path[], int ttcIndex) const override;
+ sk_sp<SkTypeface> onLegacyMakeTypeface(const char familyName[], SkFontStyle) const override;
+
+private:
+ HRESULT getByFamilyName(const WCHAR familyName[], IDWriteFontFamily** fontFamily) const;
+ sk_sp<SkTypeface> fallback(const WCHAR* dwFamilyName, DWriteStyle,
+ const WCHAR* dwBcp47, UINT32 character) const;
+ sk_sp<SkTypeface> layoutFallback(const WCHAR* dwFamilyName, DWriteStyle,
+ const WCHAR* dwBcp47, UINT32 character) const;
+
+ /** Creates a typeface using a typeface cache. */
+ sk_sp<SkTypeface> makeTypefaceFromDWriteFont(IDWriteFontFace* fontFace,
+ IDWriteFont* font,
+ IDWriteFontFamily* fontFamily) const;
+
+ SkTScopedComPtr<IDWriteFactory> fFactory;
+ SkTScopedComPtr<IDWriteFontFallback> fFontFallback;
+ SkTScopedComPtr<IDWriteFontCollection> fFontCollection;
+ SkSMallocWCHAR fLocaleName;
+ SkSMallocWCHAR fDefaultFamilyName;
+ mutable SkMutex fTFCacheMutex;
+ mutable SkTypefaceCache fTFCache;
+
+ friend class SkFontStyleSet_DirectWrite;
+ friend class FontFallbackRenderer;
+};
+
+class SkFontStyleSet_DirectWrite : public SkFontStyleSet {
+public:
+ SkFontStyleSet_DirectWrite(const SkFontMgr_DirectWrite* fontMgr,
+ IDWriteFontFamily* fontFamily)
+ : fFontMgr(SkRef(fontMgr))
+ , fFontFamily(SkRefComPtr(fontFamily))
+ { }
+
+ int count() override;
+ void getStyle(int index, SkFontStyle* fs, SkString* styleName) override;
+ SkTypeface* createTypeface(int index) override;
+ SkTypeface* matchStyle(const SkFontStyle& pattern) override;
+
+private:
+ sk_sp<const SkFontMgr_DirectWrite> fFontMgr;
+ SkTScopedComPtr<IDWriteFontFamily> fFontFamily;
+};
+
+static HRESULT are_same(IUnknown* a, IUnknown* b, bool& same) {
+ SkTScopedComPtr<IUnknown> iunkA;
+ HRM(a->QueryInterface(&iunkA), "Failed to QI<IUnknown> for a.");
+
+ SkTScopedComPtr<IUnknown> iunkB;
+ HRM(b->QueryInterface(&iunkB), "Failed to QI<IUnknown> for b.");
+
+ same = (iunkA.get() == iunkB.get());
+ return S_OK;
+}
+
+struct ProtoDWriteTypeface {
+ IDWriteFontFace* fDWriteFontFace;
+ IDWriteFont* fDWriteFont;
+ IDWriteFontFamily* fDWriteFontFamily;
+};
+
+static bool FindByDWriteFont(SkTypeface* cached, void* ctx) {
+ DWriteFontTypeface* cshFace = reinterpret_cast<DWriteFontTypeface*>(cached);
+ ProtoDWriteTypeface* ctxFace = reinterpret_cast<ProtoDWriteTypeface*>(ctx);
+
+ // IDWriteFontFace5 introduced both Equals and HasVariations
+ SkTScopedComPtr<IDWriteFontFace5> cshFontFace5;
+ SkTScopedComPtr<IDWriteFontFace5> ctxFontFace5;
+ cshFace->fDWriteFontFace->QueryInterface(&cshFontFace5);
+ ctxFace->fDWriteFontFace->QueryInterface(&ctxFontFace5);
+ if (cshFontFace5 && ctxFontFace5) {
+ return cshFontFace5->Equals(ctxFontFace5.get());
+ }
+
+ bool same;
+
+ //Check to see if the two fonts are identical.
+ HRB(are_same(cshFace->fDWriteFont.get(), ctxFace->fDWriteFont, same));
+ if (same) {
+ return true;
+ }
+
+ HRB(are_same(cshFace->fDWriteFontFace.get(), ctxFace->fDWriteFontFace, same));
+ if (same) {
+ return true;
+ }
+
+ //Check if the two fonts share the same loader and have the same key.
+ UINT32 cshNumFiles;
+ UINT32 ctxNumFiles;
+ HRB(cshFace->fDWriteFontFace->GetFiles(&cshNumFiles, nullptr));
+ HRB(ctxFace->fDWriteFontFace->GetFiles(&ctxNumFiles, nullptr));
+ if (cshNumFiles != ctxNumFiles) {
+ return false;
+ }
+
+ SkTScopedComPtr<IDWriteFontFile> cshFontFile;
+ SkTScopedComPtr<IDWriteFontFile> ctxFontFile;
+ HRB(cshFace->fDWriteFontFace->GetFiles(&cshNumFiles, &cshFontFile));
+ HRB(ctxFace->fDWriteFontFace->GetFiles(&ctxNumFiles, &ctxFontFile));
+
+ //for (each file) { //we currently only admit fonts from one file.
+ SkTScopedComPtr<IDWriteFontFileLoader> cshFontFileLoader;
+ SkTScopedComPtr<IDWriteFontFileLoader> ctxFontFileLoader;
+ HRB(cshFontFile->GetLoader(&cshFontFileLoader));
+ HRB(ctxFontFile->GetLoader(&ctxFontFileLoader));
+ HRB(are_same(cshFontFileLoader.get(), ctxFontFileLoader.get(), same));
+ if (!same) {
+ return false;
+ }
+ //}
+
+ const void* cshRefKey;
+ UINT32 cshRefKeySize;
+ const void* ctxRefKey;
+ UINT32 ctxRefKeySize;
+ HRB(cshFontFile->GetReferenceKey(&cshRefKey, &cshRefKeySize));
+ HRB(ctxFontFile->GetReferenceKey(&ctxRefKey, &ctxRefKeySize));
+ if (cshRefKeySize != ctxRefKeySize) {
+ return false;
+ }
+ if (0 != memcmp(cshRefKey, ctxRefKey, ctxRefKeySize)) {
+ return false;
+ }
+
+ //TODO: better means than comparing name strings?
+ //NOTE: .ttc and fake bold/italic will end up here.
+ SkTScopedComPtr<IDWriteLocalizedStrings> cshFamilyNames;
+ SkTScopedComPtr<IDWriteLocalizedStrings> cshFaceNames;
+ HRB(cshFace->fDWriteFontFamily->GetFamilyNames(&cshFamilyNames));
+ HRB(cshFace->fDWriteFont->GetFaceNames(&cshFaceNames));
+ UINT32 cshFamilyNameLength;
+ UINT32 cshFaceNameLength;
+ HRB(cshFamilyNames->GetStringLength(0, &cshFamilyNameLength));
+ HRB(cshFaceNames->GetStringLength(0, &cshFaceNameLength));
+
+ SkTScopedComPtr<IDWriteLocalizedStrings> ctxFamilyNames;
+ SkTScopedComPtr<IDWriteLocalizedStrings> ctxFaceNames;
+ HRB(ctxFace->fDWriteFontFamily->GetFamilyNames(&ctxFamilyNames));
+ HRB(ctxFace->fDWriteFont->GetFaceNames(&ctxFaceNames));
+ UINT32 ctxFamilyNameLength;
+ UINT32 ctxFaceNameLength;
+ HRB(ctxFamilyNames->GetStringLength(0, &ctxFamilyNameLength));
+ HRB(ctxFaceNames->GetStringLength(0, &ctxFaceNameLength));
+
+ if (cshFamilyNameLength != ctxFamilyNameLength ||
+ cshFaceNameLength != ctxFaceNameLength)
+ {
+ return false;
+ }
+
+ SkSMallocWCHAR cshFamilyName(cshFamilyNameLength+1);
+ SkSMallocWCHAR cshFaceName(cshFaceNameLength+1);
+ HRB(cshFamilyNames->GetString(0, cshFamilyName.get(), cshFamilyNameLength+1));
+ HRB(cshFaceNames->GetString(0, cshFaceName.get(), cshFaceNameLength+1));
+
+ SkSMallocWCHAR ctxFamilyName(ctxFamilyNameLength+1);
+ SkSMallocWCHAR ctxFaceName(ctxFaceNameLength+1);
+ HRB(ctxFamilyNames->GetString(0, ctxFamilyName.get(), ctxFamilyNameLength+1));
+ HRB(ctxFaceNames->GetString(0, ctxFaceName.get(), ctxFaceNameLength+1));
+
+ return wcscmp(cshFamilyName.get(), ctxFamilyName.get()) == 0 &&
+ wcscmp(cshFaceName.get(), ctxFaceName.get()) == 0;
+}
+
+sk_sp<SkTypeface> SkFontMgr_DirectWrite::makeTypefaceFromDWriteFont(
+ IDWriteFontFace* fontFace,
+ IDWriteFont* font,
+ IDWriteFontFamily* fontFamily) const {
+ SkAutoMutexExclusive ama(fTFCacheMutex);
+ ProtoDWriteTypeface spec = { fontFace, font, fontFamily };
+ sk_sp<SkTypeface> face = fTFCache.findByProcAndRef(FindByDWriteFont, &spec);
+ if (nullptr == face) {
+ face = DWriteFontTypeface::Make(fFactory.get(), fontFace, font, fontFamily, nullptr,
+ SkFontArguments::Palette{0, nullptr, 0});
+ if (face) {
+ fTFCache.add(face);
+ }
+ }
+ return face;
+}
+
+int SkFontMgr_DirectWrite::onCountFamilies() const {
+ return fFontCollection->GetFontFamilyCount();
+}
+
+void SkFontMgr_DirectWrite::onGetFamilyName(int index, SkString* familyName) const {
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HRVM(fFontCollection->GetFontFamily(index, &fontFamily), "Could not get requested family.");
+
+ SkTScopedComPtr<IDWriteLocalizedStrings> familyNames;
+ HRVM(fontFamily->GetFamilyNames(&familyNames), "Could not get family names.");
+
+ sk_get_locale_string(familyNames.get(), fLocaleName.get(), familyName);
+}
+
+SkFontStyleSet* SkFontMgr_DirectWrite::onCreateStyleSet(int index) const {
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HRNM(fFontCollection->GetFontFamily(index, &fontFamily), "Could not get requested family.");
+
+ return new SkFontStyleSet_DirectWrite(this, fontFamily.get());
+}
+
+SkFontStyleSet* SkFontMgr_DirectWrite::onMatchFamily(const char familyName[]) const {
+ if (!familyName) {
+ return nullptr;
+ }
+
+ SkSMallocWCHAR dwFamilyName;
+ HRN(sk_cstring_to_wchar(familyName, &dwFamilyName));
+
+ UINT32 index;
+ BOOL exists;
+ HRNM(fFontCollection->FindFamilyName(dwFamilyName.get(), &index, &exists),
+ "Failed while finding family by name.");
+ if (!exists) {
+ return nullptr;
+ }
+
+ return this->onCreateStyleSet(index);
+}
+
+SkTypeface* SkFontMgr_DirectWrite::onMatchFamilyStyle(const char familyName[],
+ const SkFontStyle& fontstyle) const {
+ sk_sp<SkFontStyleSet> sset(this->matchFamily(familyName));
+ return sset->matchStyle(fontstyle);
+}
+
+class FontFallbackRenderer : public IDWriteTextRenderer {
+public:
+ FontFallbackRenderer(const SkFontMgr_DirectWrite* outer, UINT32 character)
+ : fRefCount(1), fOuter(SkSafeRef(outer)), fCharacter(character), fResolvedTypeface(nullptr) {
+ }
+
+ // IUnknown methods
+ SK_STDMETHODIMP QueryInterface(IID const& riid, void** ppvObject) override {
+ if (__uuidof(IUnknown) == riid ||
+ __uuidof(IDWritePixelSnapping) == riid ||
+ __uuidof(IDWriteTextRenderer) == riid)
+ {
+ *ppvObject = this;
+ this->AddRef();
+ return S_OK;
+ }
+ *ppvObject = nullptr;
+ return E_FAIL;
+ }
+
+ SK_STDMETHODIMP_(ULONG) AddRef() override {
+ return InterlockedIncrement(&fRefCount);
+ }
+
+ SK_STDMETHODIMP_(ULONG) Release() override {
+ ULONG newCount = InterlockedDecrement(&fRefCount);
+ if (0 == newCount) {
+ delete this;
+ }
+ return newCount;
+ }
+
+ // IDWriteTextRenderer methods
+ SK_STDMETHODIMP DrawGlyphRun(
+ void* clientDrawingContext,
+ FLOAT baselineOriginX,
+ FLOAT baselineOriginY,
+ DWRITE_MEASURING_MODE measuringMode,
+ DWRITE_GLYPH_RUN const* glyphRun,
+ DWRITE_GLYPH_RUN_DESCRIPTION const* glyphRunDescription,
+ IUnknown* clientDrawingEffect) override
+ {
+ if (!glyphRun->fontFace) {
+ HRM(E_INVALIDARG, "Glyph run without font face.");
+ }
+
+ SkTScopedComPtr<IDWriteFont> font;
+ HRM(fOuter->fFontCollection->GetFontFromFontFace(glyphRun->fontFace, &font),
+ "Could not get font from font face.");
+
+ // It is possible that the font passed does not actually have the requested character,
+ // due to no font being found and getting the fallback font.
+ // Check that the font actually contains the requested character.
+ BOOL exists;
+ HRM(font->HasCharacter(fCharacter, &exists), "Could not find character.");
+
+ if (exists) {
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HRM(font->GetFontFamily(&fontFamily), "Could not get family.");
+ fResolvedTypeface = fOuter->makeTypefaceFromDWriteFont(glyphRun->fontFace,
+ font.get(),
+ fontFamily.get());
+ fHasSimulations = (font->GetSimulations() != DWRITE_FONT_SIMULATIONS_NONE) &&
+ !HasBitmapStrikes(font);
+ }
+
+ return S_OK;
+ }
+
+ SK_STDMETHODIMP DrawUnderline(
+ void* clientDrawingContext,
+ FLOAT baselineOriginX,
+ FLOAT baselineOriginY,
+ DWRITE_UNDERLINE const* underline,
+ IUnknown* clientDrawingEffect) override
+ { return E_NOTIMPL; }
+
+ SK_STDMETHODIMP DrawStrikethrough(
+ void* clientDrawingContext,
+ FLOAT baselineOriginX,
+ FLOAT baselineOriginY,
+ DWRITE_STRIKETHROUGH const* strikethrough,
+ IUnknown* clientDrawingEffect) override
+ { return E_NOTIMPL; }
+
+ SK_STDMETHODIMP DrawInlineObject(
+ void* clientDrawingContext,
+ FLOAT originX,
+ FLOAT originY,
+ IDWriteInlineObject* inlineObject,
+ BOOL isSideways,
+ BOOL isRightToLeft,
+ IUnknown* clientDrawingEffect) override
+ { return E_NOTIMPL; }
+
+ // IDWritePixelSnapping methods
+ SK_STDMETHODIMP IsPixelSnappingDisabled(
+ void* clientDrawingContext,
+ BOOL* isDisabled) override
+ {
+ *isDisabled = FALSE;
+ return S_OK;
+ }
+
+ SK_STDMETHODIMP GetCurrentTransform(
+ void* clientDrawingContext,
+ DWRITE_MATRIX* transform) override
+ {
+ const DWRITE_MATRIX ident = { 1.0, 0.0, 0.0, 1.0, 0.0, 0.0 };
+ *transform = ident;
+ return S_OK;
+ }
+
+ SK_STDMETHODIMP GetPixelsPerDip(
+ void* clientDrawingContext,
+ FLOAT* pixelsPerDip) override
+ {
+ *pixelsPerDip = 1.0f;
+ return S_OK;
+ }
+
+ sk_sp<SkTypeface> ConsumeFallbackTypeface() { return std::move(fResolvedTypeface); }
+
+ bool FallbackTypefaceHasSimulations() { return fHasSimulations; }
+
+private:
+ virtual ~FontFallbackRenderer() { }
+
+ ULONG fRefCount;
+ sk_sp<const SkFontMgr_DirectWrite> fOuter;
+ UINT32 fCharacter;
+ sk_sp<SkTypeface> fResolvedTypeface;
+ bool fHasSimulations{false};
+};
+
+class FontFallbackSource : public IDWriteTextAnalysisSource {
+public:
+ FontFallbackSource(const WCHAR* string, UINT32 length, const WCHAR* locale,
+ IDWriteNumberSubstitution* numberSubstitution)
+ : fRefCount(1)
+ , fString(string)
+ , fLength(length)
+ , fLocale(locale)
+ , fNumberSubstitution(numberSubstitution)
+ { }
+
+ // IUnknown methods
+ SK_STDMETHODIMP QueryInterface(IID const& riid, void** ppvObject) override {
+ if (__uuidof(IUnknown) == riid ||
+ __uuidof(IDWriteTextAnalysisSource) == riid)
+ {
+ *ppvObject = this;
+ this->AddRef();
+ return S_OK;
+ }
+ *ppvObject = nullptr;
+ return E_FAIL;
+ }
+
+ SK_STDMETHODIMP_(ULONG) AddRef() override {
+ return InterlockedIncrement(&fRefCount);
+ }
+
+ SK_STDMETHODIMP_(ULONG) Release() override {
+ ULONG newCount = InterlockedDecrement(&fRefCount);
+ if (0 == newCount) {
+ delete this;
+ }
+ return newCount;
+ }
+
+ // IDWriteTextAnalysisSource methods
+ SK_STDMETHODIMP GetTextAtPosition(
+ UINT32 textPosition,
+ WCHAR const** textString,
+ UINT32* textLength) override
+ {
+ if (fLength <= textPosition) {
+ *textString = nullptr;
+ *textLength = 0;
+ return S_OK;
+ }
+ *textString = fString + textPosition;
+ *textLength = fLength - textPosition;
+ return S_OK;
+ }
+
+ SK_STDMETHODIMP GetTextBeforePosition(
+ UINT32 textPosition,
+ WCHAR const** textString,
+ UINT32* textLength) override
+ {
+ if (textPosition < 1 || fLength <= textPosition) {
+ *textString = nullptr;
+ *textLength = 0;
+ return S_OK;
+ }
+ *textString = fString;
+ *textLength = textPosition;
+ return S_OK;
+ }
+
+ SK_STDMETHODIMP_(DWRITE_READING_DIRECTION) GetParagraphReadingDirection() override {
+ // TODO: this is also interesting.
+ return DWRITE_READING_DIRECTION_LEFT_TO_RIGHT;
+ }
+
+ SK_STDMETHODIMP GetLocaleName(
+ UINT32 textPosition,
+ UINT32* textLength,
+ WCHAR const** localeName) override
+ {
+ *localeName = fLocale;
+ return S_OK;
+ }
+
+ SK_STDMETHODIMP GetNumberSubstitution(
+ UINT32 textPosition,
+ UINT32* textLength,
+ IDWriteNumberSubstitution** numberSubstitution) override
+ {
+ *numberSubstitution = fNumberSubstitution;
+ return S_OK;
+ }
+
+private:
+ virtual ~FontFallbackSource() { }
+
+ ULONG fRefCount;
+ const WCHAR* fString;
+ UINT32 fLength;
+ const WCHAR* fLocale;
+ IDWriteNumberSubstitution* fNumberSubstitution;
+};
+
+SkTypeface* SkFontMgr_DirectWrite::onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle& style,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const {
+ DWriteStyle dwStyle(style);
+
+ const WCHAR* dwFamilyName = nullptr;
+ SkSMallocWCHAR dwFamilyNameLocal;
+ if (familyName) {
+ HRN(sk_cstring_to_wchar(familyName, &dwFamilyNameLocal));
+ dwFamilyName = dwFamilyNameLocal;
+ }
+
+ const SkSMallocWCHAR* dwBcp47;
+ SkSMallocWCHAR dwBcp47Local;
+ if (bcp47Count < 1) {
+ dwBcp47 = &fLocaleName;
+ } else {
+ // TODO: support fallback stack.
+ // TODO: DirectWrite supports 'zh-CN' or 'zh-Hans', but 'zh' misses completely
+ // and may produce a Japanese font.
+ HRN(sk_cstring_to_wchar(bcp47[bcp47Count - 1], &dwBcp47Local));
+ dwBcp47 = &dwBcp47Local;
+ }
+
+ if (fFontFallback) {
+ return this->fallback(dwFamilyName, dwStyle, dwBcp47->get(), character).release();
+ }
+
+ // LayoutFallback may use the system font collection for fallback.
+ return this->layoutFallback(dwFamilyName, dwStyle, dwBcp47->get(), character).release();
+}
+
+sk_sp<SkTypeface> SkFontMgr_DirectWrite::fallback(const WCHAR* dwFamilyName,
+ DWriteStyle dwStyle,
+ const WCHAR* dwBcp47,
+ UINT32 character) const {
+ WCHAR str[16];
+ UINT32 strLen = SkTo<UINT32>(SkUTF::ToUTF16(character, reinterpret_cast<uint16_t*>(str)));
+
+ if (!fFontFallback) {
+ return nullptr;
+ }
+
+ SkTScopedComPtr<IDWriteNumberSubstitution> numberSubstitution;
+ HRNM(fFactory->CreateNumberSubstitution(DWRITE_NUMBER_SUBSTITUTION_METHOD_NONE, dwBcp47,
+ TRUE, &numberSubstitution),
+ "Could not create number substitution.");
+ SkTScopedComPtr<FontFallbackSource> fontFallbackSource(
+ new FontFallbackSource(str, strLen, dwBcp47, numberSubstitution.get()));
+
+ UINT32 mappedLength;
+ SkTScopedComPtr<IDWriteFont> font;
+ FLOAT scale;
+
+ bool noSimulations = false;
+ while (!noSimulations) {
+ font.reset();
+ HRNM(fFontFallback->MapCharacters(fontFallbackSource.get(),
+ 0, // textPosition,
+ strLen,
+ fFontCollection.get(),
+ dwFamilyName,
+ dwStyle.fWeight,
+ dwStyle.fSlant,
+ dwStyle.fWidth,
+ &mappedLength,
+ &font,
+ &scale),
+ "Could not map characters");
+ if (!font.get()) {
+ return nullptr;
+ }
+
+ DWRITE_FONT_SIMULATIONS simulations = font->GetSimulations();
+
+#ifdef SK_WIN_FONTMGR_NO_SIMULATIONS
+ noSimulations = simulations == DWRITE_FONT_SIMULATIONS_NONE || HasBitmapStrikes(font);
+#else
+ noSimulations = true;
+#endif
+
+ if (simulations & DWRITE_FONT_SIMULATIONS_BOLD) {
+ dwStyle.fWeight = DWRITE_FONT_WEIGHT_REGULAR;
+ continue;
+ }
+
+ if (simulations & DWRITE_FONT_SIMULATIONS_OBLIQUE) {
+ dwStyle.fSlant = DWRITE_FONT_STYLE_NORMAL;
+ continue;
+ }
+ }
+
+ SkTScopedComPtr<IDWriteFontFace> fontFace;
+ HRNM(font->CreateFontFace(&fontFace), "Could not get font face from font.");
+
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HRNM(font->GetFontFamily(&fontFamily), "Could not get family from font.");
+ return this->makeTypefaceFromDWriteFont(fontFace.get(), font.get(), fontFamily.get());
+}
+
+sk_sp<SkTypeface> SkFontMgr_DirectWrite::layoutFallback(const WCHAR* dwFamilyName,
+ DWriteStyle dwStyle,
+ const WCHAR* dwBcp47,
+ UINT32 character) const
+{
+ WCHAR str[16];
+ UINT32 strLen = SkTo<UINT32>(SkUTF::ToUTF16(character, reinterpret_cast<uint16_t*>(str)));
+
+ bool noSimulations = false;
+ sk_sp<SkTypeface> returnTypeface(nullptr);
+ while (!noSimulations) {
+ SkTScopedComPtr<IDWriteTextFormat> fallbackFormat;
+ HRNM(fFactory->CreateTextFormat(dwFamilyName ? dwFamilyName : L"",
+ fFontCollection.get(),
+ dwStyle.fWeight,
+ dwStyle.fSlant,
+ dwStyle.fWidth,
+ 72.0f,
+ dwBcp47,
+ &fallbackFormat),
+ "Could not create text format.");
+
+ // No matter how the font collection is set on this IDWriteTextLayout, it is not possible to
+ // disable use of the system font collection in fallback.
+ SkTScopedComPtr<IDWriteTextLayout> fallbackLayout;
+ HRNM(fFactory->CreateTextLayout(
+ str, strLen, fallbackFormat.get(), 200.0f, 200.0f, &fallbackLayout),
+ "Could not create text layout.");
+
+ SkTScopedComPtr<FontFallbackRenderer> fontFallbackRenderer(
+ new FontFallbackRenderer(this, character));
+
+ HRNM(fallbackLayout->SetFontCollection(fFontCollection.get(), {0, strLen}),
+ "Could not set layout font collection.");
+ HRNM(fallbackLayout->Draw(nullptr, fontFallbackRenderer.get(), 50.0f, 50.0f),
+ "Could not draw layout with renderer.");
+
+#ifdef SK_WIN_FONTMGR_NO_SIMULATIONS
+ noSimulations = !fontFallbackRenderer->FallbackTypefaceHasSimulations();
+#else
+ noSimulations = true;
+#endif
+
+ if (noSimulations) {
+ returnTypeface = fontFallbackRenderer->ConsumeFallbackTypeface();
+ }
+
+ if (dwStyle.fWeight != DWRITE_FONT_WEIGHT_REGULAR) {
+ dwStyle.fWeight = DWRITE_FONT_WEIGHT_REGULAR;
+ continue;
+ }
+
+ if (dwStyle.fSlant != DWRITE_FONT_STYLE_NORMAL) {
+ dwStyle.fSlant = DWRITE_FONT_STYLE_NORMAL;
+ continue;
+ }
+ }
+
+ return returnTypeface;
+}
+
+sk_sp<SkTypeface> SkFontMgr_DirectWrite::onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset> stream,
+ int ttcIndex) const {
+ SkFontArguments args;
+ args.setCollectionIndex(ttcIndex);
+ return this->onMakeFromStreamArgs(std::move(stream), args);
+}
+
+sk_sp<SkTypeface> SkFontMgr_DirectWrite::onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments& args) const {
+ return DWriteFontTypeface::MakeFromStream(std::move(stream), args);
+}
+
+sk_sp<SkTypeface> SkFontMgr_DirectWrite::onMakeFromData(sk_sp<SkData> data, int ttcIndex) const {
+ return this->makeFromStream(std::make_unique<SkMemoryStream>(std::move(data)), ttcIndex);
+}
+
+sk_sp<SkTypeface> SkFontMgr_DirectWrite::onMakeFromFile(const char path[], int ttcIndex) const {
+ return this->makeFromStream(SkStream::MakeFromFile(path), ttcIndex);
+}
+
+HRESULT SkFontMgr_DirectWrite::getByFamilyName(const WCHAR wideFamilyName[],
+ IDWriteFontFamily** fontFamily) const {
+ UINT32 index;
+ BOOL exists;
+ HR(fFontCollection->FindFamilyName(wideFamilyName, &index, &exists));
+
+ if (exists) {
+ HR(fFontCollection->GetFontFamily(index, fontFamily));
+ }
+ return S_OK;
+}
+
+sk_sp<SkTypeface> SkFontMgr_DirectWrite::onLegacyMakeTypeface(const char familyName[],
+ SkFontStyle style) const {
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ DWriteStyle dwStyle(style);
+ if (familyName) {
+ SkSMallocWCHAR dwFamilyName;
+ if (SUCCEEDED(sk_cstring_to_wchar(familyName, &dwFamilyName))) {
+ this->getByFamilyName(dwFamilyName, &fontFamily);
+ if (!fontFamily && fFontFallback) {
+ return this->fallback(
+ dwFamilyName, dwStyle, fLocaleName.get(), 32);
+ }
+ }
+ }
+
+ if (!fontFamily) {
+ if (fFontFallback) {
+ return this->fallback(nullptr, dwStyle, fLocaleName.get(), 32);
+ }
+ // SPI_GETNONCLIENTMETRICS lfMessageFont can fail in Win8. (DisallowWin32kSystemCalls)
+ // layoutFallback causes DCHECK in Chromium. (Uses system font collection.)
+ HRNM(this->getByFamilyName(fDefaultFamilyName, &fontFamily),
+ "Could not create DWrite font family from LOGFONT.");
+ }
+
+ if (!fontFamily) {
+ // Could not obtain the default font.
+ HRNM(fFontCollection->GetFontFamily(0, &fontFamily),
+ "Could not get default-default font family.");
+ }
+
+ SkTScopedComPtr<IDWriteFont> font;
+ HRNM(FirstMatchingFontWithoutSimulations(fontFamily, dwStyle, font),
+ "No font found from family.");
+
+ SkTScopedComPtr<IDWriteFontFace> fontFace;
+ HRNM(font->CreateFontFace(&fontFace), "Could not create font face.");
+
+ return this->makeTypefaceFromDWriteFont(fontFace.get(), font.get(), fontFamily.get());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+int SkFontStyleSet_DirectWrite::count() {
+ return fFontFamily->GetFontCount();
+}
+
+SkTypeface* SkFontStyleSet_DirectWrite::createTypeface(int index) {
+ SkTScopedComPtr<IDWriteFont> font;
+ HRNM(fFontFamily->GetFont(index, &font), "Could not get font.");
+
+ SkTScopedComPtr<IDWriteFontFace> fontFace;
+ HRNM(font->CreateFontFace(&fontFace), "Could not create font face.");
+
+ return fFontMgr->makeTypefaceFromDWriteFont(fontFace.get(), font.get(), fFontFamily.get()).release();
+}
+
+void SkFontStyleSet_DirectWrite::getStyle(int index, SkFontStyle* fs, SkString* styleName) {
+ SkTScopedComPtr<IDWriteFont> font;
+ HRVM(fFontFamily->GetFont(index, &font), "Could not get font.");
+
+ if (fs) {
+ *fs = get_style(font.get());
+ }
+
+ if (styleName) {
+ SkTScopedComPtr<IDWriteLocalizedStrings> faceNames;
+ if (SUCCEEDED(font->GetFaceNames(&faceNames))) {
+ sk_get_locale_string(faceNames.get(), fFontMgr->fLocaleName.get(), styleName);
+ }
+ }
+}
+
+SkTypeface* SkFontStyleSet_DirectWrite::matchStyle(const SkFontStyle& pattern) {
+ SkTScopedComPtr<IDWriteFont> font;
+ DWriteStyle dwStyle(pattern);
+
+ HRNM(FirstMatchingFontWithoutSimulations(fFontFamily, dwStyle, font),
+ "No font found from family.");
+
+ SkTScopedComPtr<IDWriteFontFace> fontFace;
+ HRNM(font->CreateFontFace(&fontFace), "Could not create font face.");
+
+ return fFontMgr->makeTypefaceFromDWriteFont(fontFace.get(), font.get(),
+ fFontFamily.get()).release();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+#include "include/ports/SkTypeface_win.h"
+
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_DirectWrite(IDWriteFactory* factory,
+ IDWriteFontCollection* collection) {
+ return SkFontMgr_New_DirectWrite(factory, collection, nullptr);
+}
+
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_DirectWrite(IDWriteFactory* factory,
+ IDWriteFontCollection* collection,
+ IDWriteFontFallback* fallback) {
+ if (nullptr == factory) {
+ factory = sk_get_dwrite_factory();
+ if (nullptr == factory) {
+ return nullptr;
+ }
+ }
+
+ SkTScopedComPtr<IDWriteFontCollection> systemFontCollection;
+ if (nullptr == collection) {
+ HRNM(factory->GetSystemFontCollection(&systemFontCollection, FALSE),
+ "Could not get system font collection.");
+ collection = systemFontCollection.get();
+ }
+
+ // It is possible to have been provided a font fallback when factory2 is not available.
+ SkTScopedComPtr<IDWriteFontFallback> systemFontFallback;
+ if (nullptr == fallback) {
+ SkTScopedComPtr<IDWriteFactory2> factory2;
+ if (!SUCCEEDED(factory->QueryInterface(&factory2))) {
+ // IUnknown::QueryInterface states that if it fails, punk will be set to nullptr.
+ // http://blogs.msdn.com/b/oldnewthing/archive/2004/03/26/96777.aspx
+ SkASSERT_RELEASE(nullptr == factory2.get());
+ } else {
+ HRNM(factory2->GetSystemFontFallback(&systemFontFallback),
+ "Could not get system fallback.");
+ fallback = systemFontFallback.get();
+ }
+ }
+
+ const WCHAR* defaultFamilyName = L"";
+ int defaultFamilyNameLen = 1;
+ NONCLIENTMETRICSW metrics;
+ metrics.cbSize = sizeof(metrics);
+
+ #ifndef SK_WINUWP
+ if (nullptr == fallback) {
+ if (SystemParametersInfoW(SPI_GETNONCLIENTMETRICS, sizeof(metrics), &metrics, 0)) {
+ defaultFamilyName = metrics.lfMessageFont.lfFaceName;
+ defaultFamilyNameLen = LF_FACESIZE;
+ }
+ }
+ #endif //SK_WINUWP
+
+ WCHAR localeNameStorage[LOCALE_NAME_MAX_LENGTH];
+ const WCHAR* localeName = L"";
+ int localeNameLen = 1;
+
+ // Dynamically load GetUserDefaultLocaleName function, as it is not available on XP.
+ SkGetUserDefaultLocaleNameProc getUserDefaultLocaleNameProc = nullptr;
+ HRESULT hr = SkGetGetUserDefaultLocaleNameProc(&getUserDefaultLocaleNameProc);
+ if (nullptr == getUserDefaultLocaleNameProc) {
+ SK_TRACEHR(hr, "Could not get GetUserDefaultLocaleName.");
+ } else {
+ int size = getUserDefaultLocaleNameProc(localeNameStorage, LOCALE_NAME_MAX_LENGTH);
+ if (size) {
+ localeName = localeNameStorage;
+ localeNameLen = size;
+ }
+ }
+
+ return sk_make_sp<SkFontMgr_DirectWrite>(factory, collection, fallback,
+ localeName, localeNameLen,
+ defaultFamilyName, defaultFamilyNameLen);
+}
+
+#include "include/ports/SkFontMgr_indirect.h"
+SK_API sk_sp<SkFontMgr> SkFontMgr_New_DirectWriteRenderer(sk_sp<SkRemotableFontMgr> proxy) {
+ sk_sp<SkFontMgr> impl(SkFontMgr_New_DirectWrite());
+ if (!impl) {
+ return nullptr;
+ }
+ return sk_make_sp<SkFontMgr_Indirect>(std::move(impl), std::move(proxy));
+}
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkFontMgr_win_dw_factory.cpp b/gfx/skia/skia/src/ports/SkFontMgr_win_dw_factory.cpp
new file mode 100644
index 0000000000..08195c569f
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkFontMgr_win_dw_factory.cpp
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN) // And !SKIA_GDI?
+
+#include "include/core/SkFontMgr.h"
+#include "include/ports/SkTypeface_win.h"
+
+sk_sp<SkFontMgr> SkFontMgr::Factory() {
+ return SkFontMgr_New_DirectWrite();
+}
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkGlobalInitialization_default.cpp b/gfx/skia/skia/src/ports/SkGlobalInitialization_default.cpp
new file mode 100644
index 0000000000..a9f6c8ed17
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkGlobalInitialization_default.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFlattenable.h"
+
+#if defined(SK_DISABLE_EFFECT_DESERIALIZATION)
+
+ void SkFlattenable::PrivateInitializer::InitEffects() {}
+ void SkFlattenable::PrivateInitializer::InitImageFilters() {}
+
+#else
+
+ #include "include/core/SkMaskFilter.h"
+ #include "src/core/SkColorFilterBase.h"
+ #include "src/core/SkImageFilter_Base.h"
+ #include "src/effects/SkDashImpl.h"
+ #include "src/shaders/gradients/SkGradientShaderBase.h"
+
+ /**
+ * Register most effects for deserialization.
+ *
+ * None of these are strictly required for Skia to operate, so if you're
+ * not using deserialization yourself, you can define
+ * SK_DISABLE_EFFECT_SERIALIZATION, or modify/replace this file as needed.
+ */
+ void SkFlattenable::PrivateInitializer::InitEffects() {
+ // Shaders.
+ SkRegisterLinearGradientShaderFlattenable();
+ SkRegisterRadialGradientShaderFlattenable();
+ SkRegisterSweepGradientShaderFlattenable();
+ SkRegisterTwoPointConicalGradientShaderFlattenable();
+
+ // Color filters.
+ SkRegisterComposeColorFilterFlattenable();
+ SkRegisterModeColorFilterFlattenable();
+ SkRegisterColorSpaceXformColorFilterFlattenable();
+ SkRegisterWorkingFormatColorFilterFlattenable();
+
+ // Mask filters.
+ SkMaskFilter::RegisterFlattenables();
+
+ // Path effects.
+ SK_REGISTER_FLATTENABLE(SkDashImpl);
+ }
+
+ /*
+ * Register SkImageFilters for deserialization.
+ *
+ * None of these are strictly required for Skia to operate, so if you're
+ * not using deserialization yourself, you can define
+ * SK_DISABLE_EFFECT_SERIALIZATION, or modify/replace this file as needed.
+ */
+ void SkFlattenable::PrivateInitializer::InitImageFilters() {
+ SkRegisterBlurImageFilterFlattenable();
+ SkRegisterComposeImageFilterFlattenable();
+ }
+
+#endif
diff --git a/gfx/skia/skia/src/ports/SkImageEncoder_CG.cpp b/gfx/skia/skia/src/ports/SkImageEncoder_CG.cpp
new file mode 100644
index 0000000000..a247d4955e
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageEncoder_CG.cpp
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2008 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/encode/SkImageEncoderPriv.h"
+
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkData.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkUnPreMultiply.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/utils/mac/SkCGUtils.h"
+#include "src/core/SkStreamPriv.h"
+#include "src/utils/mac/SkUniqueCFRef.h"
+
+#ifdef SK_BUILD_FOR_MAC
+#include <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreGraphics/CoreGraphics.h>
+#include <ImageIO/ImageIO.h>
+#include <MobileCoreServices/MobileCoreServices.h>
+#endif
+
+static size_t consumer_put(void* info, const void* buffer, size_t count) {
+ SkWStream* stream = reinterpret_cast<SkWStream*>(info);
+ return stream->write(buffer, count) ? count : 0;
+}
+
+static void consumer_release(void* info) {
+ // we do nothing, since by design we don't "own" the stream (i.e. info)
+}
+
+static SkUniqueCFRef<CGDataConsumerRef> SkStreamToCGDataConsumer(SkWStream* stream) {
+ CGDataConsumerCallbacks procs;
+ procs.putBytes = consumer_put;
+ procs.releaseConsumer = consumer_release;
+ // we don't own/reference the stream, so it our consumer must not live
+ // longer that our caller's ownership of the stream
+ return SkUniqueCFRef<CGDataConsumerRef>(CGDataConsumerCreate(stream, &procs));
+}
+
+static SkUniqueCFRef<CGImageDestinationRef> SkStreamToImageDestination(SkWStream* stream,
+ CFStringRef type) {
+ SkUniqueCFRef<CGDataConsumerRef> consumer = SkStreamToCGDataConsumer(stream);
+ if (nullptr == consumer) {
+ return nullptr;
+ }
+
+ return SkUniqueCFRef<CGImageDestinationRef>(
+ CGImageDestinationCreateWithDataConsumer(consumer.get(), type, 1, nullptr));
+}
+
+/* Encode bitmaps via CGImageDestination. We setup a DataConsumer which writes
+ to our SkWStream. Since we don't reference/own the SkWStream, our consumer
+ must only live for the duration of the onEncode() method.
+ */
+bool SkEncodeImageWithCG(SkWStream* stream, const SkPixmap& pixmap, SkEncodedImageFormat format) {
+ SkBitmap bm;
+ if (!bm.installPixels(pixmap)) {
+ return false;
+ }
+ bm.setImmutable();
+
+ CFStringRef type;
+ switch (format) {
+ case SkEncodedImageFormat::kICO:
+ type = kUTTypeICO;
+ break;
+ case SkEncodedImageFormat::kBMP:
+ type = kUTTypeBMP;
+ break;
+ case SkEncodedImageFormat::kGIF:
+ type = kUTTypeGIF;
+ break;
+ case SkEncodedImageFormat::kJPEG:
+ type = kUTTypeJPEG;
+ break;
+ case SkEncodedImageFormat::kPNG:
+ // PNG encoding an ARGB_4444 bitmap gives the following errors in GM:
+ // <Error>: CGImageDestinationAddImage image could not be converted to destination
+ // format.
+ // <Error>: CGImageDestinationFinalize image destination does not have enough images
+ // So instead we copy to 8888.
+ if (bm.colorType() == kARGB_4444_SkColorType) {
+ SkBitmap bitmapN32;
+ bitmapN32.allocPixels(bm.info().makeColorType(kN32_SkColorType));
+ bm.readPixels(bitmapN32.info(), bitmapN32.getPixels(), bitmapN32.rowBytes(), 0, 0);
+ bm.swap(bitmapN32);
+ }
+ type = kUTTypePNG;
+ break;
+ default:
+ return false;
+ }
+
+ SkUniqueCFRef<CGImageDestinationRef> dst = SkStreamToImageDestination(stream, type);
+ if (nullptr == dst) {
+ return false;
+ }
+
+ SkUniqueCFRef<CGImageRef> image(SkCreateCGImageRef(bm));
+ if (nullptr == image) {
+ return false;
+ }
+
+ CGImageDestinationAddImage(dst.get(), image.get(), nullptr);
+ return CGImageDestinationFinalize(dst.get());
+}
+
+#endif//defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
diff --git a/gfx/skia/skia/src/ports/SkImageEncoder_NDK.cpp b/gfx/skia/skia/src/ports/SkImageEncoder_NDK.cpp
new file mode 100644
index 0000000000..5fc55d7124
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageEncoder_NDK.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPixmap.h"
+#include "include/core/SkStream.h"
+#include "include/private/base/SkTFitsIn.h"
+#include "include/private/base/SkTo.h"
+#include "src/encode/SkImageEncoderPriv.h"
+#include "src/ports/SkNDKConversions.h"
+
+bool SkEncodeImageWithNDK(SkWStream* stream, const SkPixmap& pmap, SkEncodedImageFormat format,
+ int quality) {
+ // If any of these values is invalid (e.g. set to zero), the info will be rejected by
+ // AndroidBitmap_compress.
+ AndroidBitmapInfo info {
+ .width = SkTFitsIn<uint32_t>(pmap.width()) ? SkToU32(pmap.width()) : 0,
+ .height = SkTFitsIn<uint32_t>(pmap.height()) ? SkToU32(pmap.height()) : 0,
+ .stride = SkTFitsIn<uint32_t>(pmap.rowBytes()) ? SkToU32(pmap.rowBytes()) : 0,
+ .format = SkNDKConversions::toAndroidBitmapFormat(pmap.colorType())
+ };
+
+ switch (pmap.alphaType()) {
+ case kPremul_SkAlphaType:
+ info.flags = ANDROID_BITMAP_FLAGS_ALPHA_PREMUL;
+ break;
+ case kOpaque_SkAlphaType:
+ info.flags = ANDROID_BITMAP_FLAGS_ALPHA_OPAQUE;
+ break;
+ case kUnpremul_SkAlphaType:
+ info.flags = ANDROID_BITMAP_FLAGS_ALPHA_UNPREMUL;
+ break;
+ default:
+ return false;
+ }
+
+ AndroidBitmapCompressFormat androidFormat;
+ switch (format) {
+ case SkEncodedImageFormat::kJPEG:
+ androidFormat = ANDROID_BITMAP_COMPRESS_FORMAT_JPEG;
+ break;
+ case SkEncodedImageFormat::kPNG:
+ androidFormat = ANDROID_BITMAP_COMPRESS_FORMAT_PNG;
+ break;
+ case SkEncodedImageFormat::kWEBP:
+ if (quality == 100) {
+ // Mimic the behavior of SkImageEncoder.cpp. In LOSSLESS mode, libwebp
+ // interprets quality as the amount of effort (time) to spend making
+ // the encoded image smaller, while the visual quality remains constant.
+ // This value of 75 (on a scale of 0 - 100, where 100 spends the most
+ // time for the smallest encoding) matches WebPConfigInit.
+ androidFormat = ANDROID_BITMAP_COMPRESS_FORMAT_WEBP_LOSSLESS;
+ quality = 75;
+ } else {
+ androidFormat = ANDROID_BITMAP_COMPRESS_FORMAT_WEBP_LOSSY;
+ }
+ break;
+ default:
+ return false;
+ }
+
+ auto write_to_stream = [](void* userContext, const void* data, size_t size) {
+ return reinterpret_cast<SkWStream*>(userContext)->write(data, size);
+ };
+
+ return ANDROID_BITMAP_RESULT_SUCCESS == AndroidBitmap_compress(&info,
+ SkNDKConversions::toDataSpace(pmap.colorSpace()), pmap.addr(), androidFormat, quality,
+ reinterpret_cast<void*>(stream), write_to_stream);
+}
diff --git a/gfx/skia/skia/src/ports/SkImageEncoder_WIC.cpp b/gfx/skia/skia/src/ports/SkImageEncoder_WIC.cpp
new file mode 100644
index 0000000000..1a37e57a61
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageEncoder_WIC.cpp
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkImageEncoder.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkUnPreMultiply.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/base/SkAutoMalloc.h"
+#include "src/encode/SkImageEncoderPriv.h"
+#include "src/utils/win/SkAutoCoInitialize.h"
+#include "src/utils/win/SkIStream.h"
+#include "src/utils/win/SkTScopedComPtr.h"
+#include <wincodec.h>
+
+//All Windows SDKs back to XPSP2 export the CLSID_WICImagingFactory symbol.
+//In the Windows8 SDK the CLSID_WICImagingFactory symbol is still exported
+//but CLSID_WICImagingFactory is then #defined to CLSID_WICImagingFactory2.
+//Undo this #define if it has been done so that we link against the symbols
+//we intended to link against on all SDKs.
+#if defined(CLSID_WICImagingFactory)
+#undef CLSID_WICImagingFactory
+#endif
+
+bool SkEncodeImageWithWIC(SkWStream* stream, const SkPixmap& pixmap,
+ SkEncodedImageFormat format, int quality) {
+ GUID type;
+ switch (format) {
+ case SkEncodedImageFormat::kJPEG:
+ type = GUID_ContainerFormatJpeg;
+ break;
+ case SkEncodedImageFormat::kPNG:
+ type = GUID_ContainerFormatPng;
+ break;
+ default:
+ return false;
+ }
+ SkBitmap bitmapOrig;
+ if (!bitmapOrig.installPixels(pixmap)) {
+ return false;
+ }
+ bitmapOrig.setImmutable();
+
+ // First convert to BGRA if necessary.
+ SkBitmap bitmap;
+ if (!bitmap.tryAllocPixels(bitmapOrig.info().makeColorType(kBGRA_8888_SkColorType)) ||
+ !bitmapOrig.readPixels(bitmap.info(), bitmap.getPixels(), bitmap.rowBytes(), 0, 0))
+ {
+ return false;
+ }
+
+ // WIC expects unpremultiplied pixels. Unpremultiply if necessary.
+ if (kPremul_SkAlphaType == bitmap.alphaType()) {
+ uint8_t* pixels = reinterpret_cast<uint8_t*>(bitmap.getPixels());
+ for (int y = 0; y < bitmap.height(); ++y) {
+ for (int x = 0; x < bitmap.width(); ++x) {
+ uint8_t* bytes = pixels + y * bitmap.rowBytes() + x * bitmap.bytesPerPixel();
+ SkPMColor* src = reinterpret_cast<SkPMColor*>(bytes);
+ SkColor* dst = reinterpret_cast<SkColor*>(bytes);
+ *dst = SkUnPreMultiply::PMColorToColor(*src);
+ }
+ }
+ }
+
+ // Finally, if we are performing a jpeg encode, we must convert to BGR.
+ void* pixels = bitmap.getPixels();
+ size_t rowBytes = bitmap.rowBytes();
+ SkAutoMalloc pixelStorage;
+ WICPixelFormatGUID formatDesired = GUID_WICPixelFormat32bppBGRA;
+ if (SkEncodedImageFormat::kJPEG == format) {
+ formatDesired = GUID_WICPixelFormat24bppBGR;
+ rowBytes = SkAlign4(bitmap.width() * 3);
+ pixelStorage.reset(rowBytes * bitmap.height());
+ for (int y = 0; y < bitmap.height(); y++) {
+ uint8_t* dstRow = SkTAddOffset<uint8_t>(pixelStorage.get(), y * rowBytes);
+ for (int x = 0; x < bitmap.width(); x++) {
+ uint32_t bgra = *bitmap.getAddr32(x, y);
+ dstRow[0] = (uint8_t) ((bgra >> 0) & 0xFF);
+ dstRow[1] = (uint8_t) ((bgra >> 8) & 0xFF);
+ dstRow[2] = (uint8_t) ((bgra >> 16) & 0xFF);
+ dstRow += 3;
+ }
+ }
+
+ pixels = pixelStorage.get();
+ }
+
+
+ //Initialize COM.
+ SkAutoCoInitialize scopedCo;
+ if (!scopedCo.succeeded()) {
+ return false;
+ }
+
+ HRESULT hr = S_OK;
+
+ //Create Windows Imaging Component ImagingFactory.
+ SkTScopedComPtr<IWICImagingFactory> piImagingFactory;
+ if (SUCCEEDED(hr)) {
+ hr = CoCreateInstance(
+ CLSID_WICImagingFactory
+ , nullptr
+ , CLSCTX_INPROC_SERVER
+ , IID_PPV_ARGS(&piImagingFactory)
+ );
+ }
+
+ //Convert the SkWStream to an IStream.
+ SkTScopedComPtr<IStream> piStream;
+ if (SUCCEEDED(hr)) {
+ hr = SkWIStream::CreateFromSkWStream(stream, &piStream);
+ }
+
+ //Create an encode of the appropriate type.
+ SkTScopedComPtr<IWICBitmapEncoder> piEncoder;
+ if (SUCCEEDED(hr)) {
+ hr = piImagingFactory->CreateEncoder(type, nullptr, &piEncoder);
+ }
+
+ if (SUCCEEDED(hr)) {
+ hr = piEncoder->Initialize(piStream.get(), WICBitmapEncoderNoCache);
+ }
+
+ //Create a the frame.
+ SkTScopedComPtr<IWICBitmapFrameEncode> piBitmapFrameEncode;
+ SkTScopedComPtr<IPropertyBag2> piPropertybag;
+ if (SUCCEEDED(hr)) {
+ hr = piEncoder->CreateNewFrame(&piBitmapFrameEncode, &piPropertybag);
+ }
+
+ if (SUCCEEDED(hr)) {
+ PROPBAG2 name;
+ memset(&name, 0, sizeof(name));
+ name.dwType = PROPBAG2_TYPE_DATA;
+ name.vt = VT_R4;
+ name.pstrName = const_cast<LPOLESTR>(L"ImageQuality");
+
+ VARIANT value;
+ VariantInit(&value);
+ value.vt = VT_R4;
+ value.fltVal = (FLOAT)(quality / 100.0);
+
+ //Ignore result code.
+ // This returns E_FAIL if the named property is not in the bag.
+ //TODO(bungeman) enumerate the properties,
+ // write and set hr iff property exists.
+ piPropertybag->Write(1, &name, &value);
+ }
+ if (SUCCEEDED(hr)) {
+ hr = piBitmapFrameEncode->Initialize(piPropertybag.get());
+ }
+
+ //Set the size of the frame.
+ const UINT width = bitmap.width();
+ const UINT height = bitmap.height();
+ if (SUCCEEDED(hr)) {
+ hr = piBitmapFrameEncode->SetSize(width, height);
+ }
+
+ //Set the pixel format of the frame. If native encoded format cannot match BGRA,
+ //it will choose the closest pixel format that it supports.
+ WICPixelFormatGUID formatGUID = formatDesired;
+ if (SUCCEEDED(hr)) {
+ hr = piBitmapFrameEncode->SetPixelFormat(&formatGUID);
+ }
+ if (SUCCEEDED(hr)) {
+ //Be sure the image format is the one requested.
+ hr = IsEqualGUID(formatGUID, formatDesired) ? S_OK : E_FAIL;
+ }
+
+ //Write the pixels into the frame.
+ if (SUCCEEDED(hr)) {
+ hr = piBitmapFrameEncode->WritePixels(height,
+ (UINT) rowBytes,
+ (UINT) rowBytes * height,
+ reinterpret_cast<BYTE*>(pixels));
+ }
+
+ if (SUCCEEDED(hr)) {
+ hr = piBitmapFrameEncode->Commit();
+ }
+
+ if (SUCCEEDED(hr)) {
+ hr = piEncoder->Commit();
+ }
+
+ return SUCCEEDED(hr);
+}
+
+#endif // defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkImageGeneratorCG.cpp b/gfx/skia/skia/src/ports/SkImageGeneratorCG.cpp
new file mode 100644
index 0000000000..e01283d8ae
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageGeneratorCG.cpp
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/codec/SkEncodedOrigin.h"
+#include "include/ports/SkImageGeneratorCG.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/utils/mac/SkCGUtils.h"
+#include "src/codec/SkPixmapUtils.h"
+#include "src/utils/mac/SkUniqueCFRef.h"
+
+#ifdef SK_BUILD_FOR_MAC
+#include <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreGraphics/CoreGraphics.h>
+#include <ImageIO/ImageIO.h>
+#include <MobileCoreServices/MobileCoreServices.h>
+#endif
+
+namespace {
+class ImageGeneratorCG : public SkImageGenerator {
+public:
+ ImageGeneratorCG(const SkImageInfo&, SkUniqueCFRef<CGImageSourceRef> imageSrc,
+ sk_sp<SkData> data, SkEncodedOrigin);
+
+protected:
+ sk_sp<SkData> onRefEncodedData() override;
+
+ bool onGetPixels(const SkImageInfo&, void* pixels, size_t rowBytes, const Options&) override;
+
+private:
+ const SkUniqueCFRef<CGImageSourceRef> fImageSrc;
+ const sk_sp<SkData> fData;
+ const SkEncodedOrigin fOrigin;
+
+ using INHERITED = SkImageGenerator;
+};
+
+static SkUniqueCFRef<CGImageSourceRef> data_to_CGImageSrc(SkData* data) {
+ SkUniqueCFRef<CGDataProviderRef> cgData(
+ CGDataProviderCreateWithData(data, data->data(), data->size(), nullptr));
+ if (!cgData) {
+ return nullptr;
+ }
+ return SkUniqueCFRef<CGImageSourceRef>(
+ CGImageSourceCreateWithDataProvider(cgData.get(), nullptr));
+}
+
+} // namespace
+
+std::unique_ptr<SkImageGenerator> SkImageGeneratorCG::MakeFromEncodedCG(sk_sp<SkData> data) {
+ SkUniqueCFRef<CGImageSourceRef> imageSrc = data_to_CGImageSrc(data.get());
+ if (!imageSrc) {
+ return nullptr;
+ }
+
+ SkUniqueCFRef<CFDictionaryRef> properties(
+ CGImageSourceCopyPropertiesAtIndex(imageSrc.get(), 0, nullptr));
+ if (!properties) {
+ return nullptr;
+ }
+
+ CFNumberRef widthRef = static_cast<CFNumberRef>(
+ CFDictionaryGetValue(properties.get(), kCGImagePropertyPixelWidth));
+ CFNumberRef heightRef = static_cast<CFNumberRef>(
+ CFDictionaryGetValue(properties.get(), kCGImagePropertyPixelHeight));
+ if (nullptr == widthRef || nullptr == heightRef) {
+ return nullptr;
+ }
+
+ int width, height;
+ if (!CFNumberGetValue(widthRef , kCFNumberIntType, &width ) ||
+ !CFNumberGetValue(heightRef, kCFNumberIntType, &height))
+ {
+ return nullptr;
+ }
+
+ bool hasAlpha = bool(CFDictionaryGetValue(properties.get(), kCGImagePropertyHasAlpha));
+ SkAlphaType alphaType = hasAlpha ? kPremul_SkAlphaType : kOpaque_SkAlphaType;
+ SkImageInfo info = SkImageInfo::MakeS32(width, height, alphaType);
+
+ SkEncodedOrigin origin = kDefault_SkEncodedOrigin;
+ CFNumberRef orientationRef = static_cast<CFNumberRef>(
+ CFDictionaryGetValue(properties.get(), kCGImagePropertyOrientation));
+ int originInt;
+ if (orientationRef && CFNumberGetValue(orientationRef, kCFNumberIntType, &originInt)) {
+ origin = (SkEncodedOrigin) originInt;
+ }
+
+ if (SkEncodedOriginSwapsWidthHeight(origin)) {
+ info = SkPixmapUtils::SwapWidthHeight(info);
+ }
+
+ // FIXME: We have the opportunity to extract color space information here,
+ // though I think it makes sense to wait until we understand how
+ // we want to communicate it to the generator.
+
+ return std::unique_ptr<SkImageGenerator>(new ImageGeneratorCG(info, std::move(imageSrc),
+ std::move(data), origin));
+}
+
+ImageGeneratorCG::ImageGeneratorCG(const SkImageInfo& info, SkUniqueCFRef<CGImageSourceRef> src,
+ sk_sp<SkData> data, SkEncodedOrigin origin)
+ : INHERITED(info)
+ , fImageSrc(std::move(src))
+ , fData(std::move(data))
+ , fOrigin(origin)
+{}
+
+sk_sp<SkData> ImageGeneratorCG::onRefEncodedData() {
+ return fData;
+}
+
+bool ImageGeneratorCG::onGetPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const Options&)
+{
+ if (kN32_SkColorType != info.colorType()) {
+ // FIXME: Support other colorTypes.
+ return false;
+ }
+
+ switch (info.alphaType()) {
+ case kOpaque_SkAlphaType:
+ if (kOpaque_SkAlphaType != this->getInfo().alphaType()) {
+ return false;
+ }
+ break;
+ case kPremul_SkAlphaType:
+ break;
+ default:
+ return false;
+ }
+
+ SkUniqueCFRef<CGImageRef> image(CGImageSourceCreateImageAtIndex(fImageSrc.get(), 0, nullptr));
+ if (!image) {
+ return false;
+ }
+
+ SkPixmap dst(info, pixels, rowBytes);
+ auto decode = [&image](const SkPixmap& pm) {
+ // FIXME: Using SkCopyPixelsFromCGImage (as opposed to swizzling
+ // ourselves) greatly restricts the color and alpha types that we
+ // support. If we swizzle ourselves, we can add support for:
+ // kUnpremul_SkAlphaType
+ // 16-bit per component RGBA
+ // kGray_8_SkColorType
+ // Additionally, it would be interesting to compare the performance
+ // of SkSwizzler with CG's built in swizzler.
+ return SkCopyPixelsFromCGImage(pm, image.get());
+ };
+ return SkPixmapUtils::Orient(dst, fOrigin, decode);
+}
diff --git a/gfx/skia/skia/src/ports/SkImageGeneratorNDK.cpp b/gfx/skia/skia/src/ports/SkImageGeneratorNDK.cpp
new file mode 100644
index 0000000000..7d050b9704
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageGeneratorNDK.cpp
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkImageGenerator.h"
+#include "include/core/SkImageInfo.h"
+#include "include/ports/SkImageGeneratorNDK.h"
+#include "src/ports/SkNDKConversions.h"
+
+#include <android/bitmap.h>
+#include <android/data_space.h>
+#include <android/imagedecoder.h>
+
+namespace {
+class ImageGeneratorNDK : public SkImageGenerator {
+public:
+ ImageGeneratorNDK(const SkImageInfo&, sk_sp<SkData>, AImageDecoder*);
+ ~ImageGeneratorNDK() override;
+
+protected:
+ sk_sp<SkData> onRefEncodedData() override;
+
+ bool onGetPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const Options& opts) override;
+
+private:
+ sk_sp<SkData> fData;
+ AImageDecoder* fDecoder;
+ // Setting the ADataSpace is sticky - it is set for all future decodes
+ // until it is set again. But as of R there is no way to reset it to
+ // ADATASPACE_UNKNOWN to skip color correction. If the client requests
+ // skipping correction after having set it to something else, we need
+ // to recreate the AImageDecoder.
+ bool fPreviouslySetADataSpace;
+
+ using INHERITED = SkImageGenerator;
+};
+
+} // anonymous namespace
+
+static bool ok(int result) {
+ return result == ANDROID_IMAGE_DECODER_SUCCESS;
+}
+
+static bool set_android_bitmap_format(AImageDecoder* decoder, SkColorType colorType) {
+ auto format = SkNDKConversions::toAndroidBitmapFormat(colorType);
+ return ok(AImageDecoder_setAndroidBitmapFormat(decoder, format));
+}
+
+static SkColorType colorType(AImageDecoder* decoder, const AImageDecoderHeaderInfo* headerInfo) {
+ // AImageDecoder never defaults to gray, but allows setting it if the image is 8 bit gray.
+ if (set_android_bitmap_format(decoder, kGray_8_SkColorType)) {
+ return kGray_8_SkColorType;
+ }
+
+ auto format = static_cast<AndroidBitmapFormat>(
+ AImageDecoderHeaderInfo_getAndroidBitmapFormat(headerInfo));
+ return SkNDKConversions::toColorType(format);
+}
+
+static sk_sp<SkColorSpace> get_default_colorSpace(const AImageDecoderHeaderInfo* headerInfo) {
+ auto dataSpace = static_cast<ADataSpace>(AImageDecoderHeaderInfo_getDataSpace(headerInfo));
+ if (auto cs = SkNDKConversions::toColorSpace(dataSpace)) {
+ return cs;
+ }
+
+ return SkColorSpace::MakeSRGB();
+}
+
+std::unique_ptr<SkImageGenerator> SkImageGeneratorNDK::MakeFromEncodedNDK(sk_sp<SkData> data) {
+ if (!data) return nullptr;
+
+ AImageDecoder* rawDecoder;
+ if (!ok(AImageDecoder_createFromBuffer(data->data(), data->size(), &rawDecoder))) {
+ return nullptr;
+ }
+
+ const AImageDecoderHeaderInfo* headerInfo = AImageDecoder_getHeaderInfo(rawDecoder);
+ int32_t width = AImageDecoderHeaderInfo_getWidth(headerInfo);
+ int32_t height = AImageDecoderHeaderInfo_getHeight(headerInfo);
+ SkColorType ct = colorType(rawDecoder, headerInfo);
+
+ // Although the encoded data stores unpremultiplied pixels, AImageDecoder defaults to premul
+ // (if the image may have alpha).
+ SkAlphaType at = AImageDecoderHeaderInfo_getAlphaFlags(headerInfo)
+ == ANDROID_BITMAP_FLAGS_ALPHA_OPAQUE ? kOpaque_SkAlphaType : kPremul_SkAlphaType;
+ auto imageInfo = SkImageInfo::Make(width, height, ct, at, get_default_colorSpace(headerInfo));
+ return std::unique_ptr<SkImageGenerator>(
+ new ImageGeneratorNDK(imageInfo, std::move(data), rawDecoder));
+}
+
+ImageGeneratorNDK::ImageGeneratorNDK(const SkImageInfo& info, sk_sp<SkData> data,
+ AImageDecoder* decoder)
+ : INHERITED(info)
+ , fData(std::move(data))
+ , fDecoder(decoder)
+ , fPreviouslySetADataSpace(false)
+{
+ SkASSERT(fDecoder);
+}
+
+ImageGeneratorNDK::~ImageGeneratorNDK() {
+ AImageDecoder_delete(fDecoder);
+}
+
+static bool set_target_size(AImageDecoder* decoder, const SkISize& size, const SkISize targetSize) {
+ if (size != targetSize) {
+ // AImageDecoder will scale to arbitrary sizes. Only support a size if it's supported by the
+ // underlying library.
+ const AImageDecoderHeaderInfo* headerInfo = AImageDecoder_getHeaderInfo(decoder);
+ const char* mimeType = AImageDecoderHeaderInfo_getMimeType(headerInfo);
+ if (0 == strcmp(mimeType, "image/jpeg")) {
+ bool supported = false;
+ for (int sampleSize : { 2, 4, 8 }) {
+ int32_t width;
+ int32_t height;
+ if (ok(AImageDecoder_computeSampledSize(decoder, sampleSize, &width, &height))
+ && targetSize == SkISize::Make(width, height)) {
+ supported = true;
+ break;
+ }
+ }
+ if (!supported) return false;
+ } else if (0 == strcmp(mimeType, "image/webp")) {
+ // libwebp supports arbitrary downscaling.
+ if (targetSize.width() > size.width() || targetSize.height() > size.height()) {
+ return false;
+ }
+ } else {
+ return false;
+ }
+ }
+ return ok(AImageDecoder_setTargetSize(decoder, targetSize.width(), targetSize.height()));
+}
+
+bool ImageGeneratorNDK::onGetPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const Options& opts) {
+ if (auto* cs = info.colorSpace()) {
+ if (!ok(AImageDecoder_setDataSpace(fDecoder, SkNDKConversions::toDataSpace(cs)))) {
+ return false;
+ }
+ fPreviouslySetADataSpace = true;
+ } else {
+ // If the requested SkColorSpace is null, the client wants the "raw" colors, without color
+ // space transformations applied. (This is primarily useful for a client that wants to do
+ // their own color transformations.) This is AImageDecoder's default, but if a previous call
+ // set an ADataSpace, AImageDecoder is no longer using its default, so we need to set it
+ // back.
+ if (fPreviouslySetADataSpace) {
+ // AImageDecoderHeaderInfo_getDataSpace always returns the same value for the same
+ // image, regardless of prior calls to AImageDecoder_setDataSpace. Check if it's
+ // ADATASPACE_UNKNOWN, which needs to be handled specially.
+ const AImageDecoderHeaderInfo* headerInfo = AImageDecoder_getHeaderInfo(fDecoder);
+ const auto defaultDataSpace = AImageDecoderHeaderInfo_getDataSpace(headerInfo);
+ if (defaultDataSpace == ADATASPACE_UNKNOWN) {
+ // As of R, there's no way to reset AImageDecoder to ADATASPACE_UNKNOWN, so
+ // create a new one.
+ AImageDecoder* decoder;
+ if (!ok(AImageDecoder_createFromBuffer(fData->data(), fData->size(), &decoder))) {
+ return false;
+ }
+ AImageDecoder_delete(fDecoder);
+ fDecoder = decoder;
+ } else {
+ if (!ok(AImageDecoder_setDataSpace(fDecoder, defaultDataSpace))) {
+ return false;
+ }
+ }
+
+ // Whether by recreating AImageDecoder or calling AImageDecoder_setDataSpace, the
+ // AImageDecoder is back to its default, so if the next call has a null SkColorSpace, it
+ // does not need to reset it again.
+ fPreviouslySetADataSpace = false;
+ }
+ }
+
+ if (!set_android_bitmap_format(fDecoder, info.colorType())) {
+ return false;
+ }
+
+ switch (info.alphaType()) {
+ case kUnknown_SkAlphaType:
+ return false;
+ case kOpaque_SkAlphaType:
+ if (this->getInfo().alphaType() != kOpaque_SkAlphaType) {
+ return false;
+ }
+ break;
+ case kUnpremul_SkAlphaType:
+ if (!ok(AImageDecoder_setUnpremultipliedRequired(fDecoder, true))) {
+ return false;
+ }
+ break;
+ case kPremul_SkAlphaType:
+ break;
+ }
+
+ if (!set_target_size(fDecoder, getInfo().dimensions(), info.dimensions())) {
+ return false;
+ }
+
+ auto byteSize = info.computeByteSize(rowBytes);
+ switch (AImageDecoder_decodeImage(fDecoder, pixels, rowBytes, byteSize)) {
+ case ANDROID_IMAGE_DECODER_INCOMPLETE:
+ // The image was partially decoded, but the input was truncated. The client may be
+ // happy with the partial image.
+ case ANDROID_IMAGE_DECODER_ERROR:
+ // Similarly, the image was partially decoded, but the input had an error. The client
+ // may be happy with the partial image.
+ case ANDROID_IMAGE_DECODER_SUCCESS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+sk_sp<SkData> ImageGeneratorNDK::onRefEncodedData() {
+ return fData;
+}
diff --git a/gfx/skia/skia/src/ports/SkImageGeneratorWIC.cpp b/gfx/skia/skia/src/ports/SkImageGeneratorWIC.cpp
new file mode 100644
index 0000000000..225ec8facf
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageGeneratorWIC.cpp
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStream.h"
+#include "include/ports/SkImageGeneratorWIC.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/utils/win/SkIStream.h"
+#include "src/utils/win/SkTScopedComPtr.h"
+
+#include <wincodec.h>
+
+// All Windows SDKs back to XPSP2 export the CLSID_WICImagingFactory symbol.
+// In the Windows8 SDK the CLSID_WICImagingFactory symbol is still exported
+// but CLSID_WICImagingFactory is then #defined to CLSID_WICImagingFactory2.
+// Undo this #define if it has been done so that we link against the symbols
+// we intended to link against on all SDKs.
+#if defined(CLSID_WICImagingFactory)
+ #undef CLSID_WICImagingFactory
+#endif
+
+namespace {
+class ImageGeneratorWIC : public SkImageGenerator {
+public:
+ /*
+ * Takes ownership of the imagingFactory
+ * Takes ownership of the imageSource
+ */
+ ImageGeneratorWIC(const SkImageInfo& info, IWICImagingFactory* imagingFactory,
+ IWICBitmapSource* imageSource, sk_sp<SkData>);
+protected:
+ sk_sp<SkData> onRefEncodedData() override;
+
+ bool onGetPixels(const SkImageInfo& info, void* pixels, size_t rowBytes, const Options&)
+ override;
+
+private:
+ SkTScopedComPtr<IWICImagingFactory> fImagingFactory;
+ SkTScopedComPtr<IWICBitmapSource> fImageSource;
+ sk_sp<SkData> fData;
+
+ using INHERITED = SkImageGenerator;
+};
+} // namespace
+
+std::unique_ptr<SkImageGenerator> SkImageGeneratorWIC::MakeFromEncodedWIC(sk_sp<SkData> data) {
+ // Create Windows Imaging Component ImagingFactory.
+ SkTScopedComPtr<IWICImagingFactory> imagingFactory;
+ HRESULT hr = CoCreateInstance(CLSID_WICImagingFactory, nullptr, CLSCTX_INPROC_SERVER,
+ IID_PPV_ARGS(&imagingFactory));
+ if (FAILED(hr)) {
+ return nullptr;
+ }
+
+ // Create an IStream.
+ SkTScopedComPtr<IStream> iStream;
+ // Note that iStream will take ownership of the new memory stream because
+ // we set |deleteOnRelease| to true.
+ hr = SkIStream::CreateFromSkStream(std::make_unique<SkMemoryStream>(data), &iStream);
+ if (FAILED(hr)) {
+ return nullptr;
+ }
+
+ // Create the decoder from the stream.
+ SkTScopedComPtr<IWICBitmapDecoder> decoder;
+ hr = imagingFactory->CreateDecoderFromStream(iStream.get(), nullptr,
+ WICDecodeMetadataCacheOnDemand, &decoder);
+ if (FAILED(hr)) {
+ return nullptr;
+ }
+
+ // Select the first frame from the decoder.
+ SkTScopedComPtr<IWICBitmapFrameDecode> imageFrame;
+ hr = decoder->GetFrame(0, &imageFrame);
+ if (FAILED(hr)) {
+ return nullptr;
+ }
+
+ // Treat the frame as an image source.
+ SkTScopedComPtr<IWICBitmapSource> imageSource;
+ hr = imageFrame->QueryInterface(IID_PPV_ARGS(&imageSource));
+ if (FAILED(hr)) {
+ return nullptr;
+ }
+
+ // Get the size of the image.
+ UINT width;
+ UINT height;
+ hr = imageSource->GetSize(&width, &height);
+ if (FAILED(hr)) {
+ return nullptr;
+ }
+
+ // Get the encoded pixel format.
+ WICPixelFormatGUID format;
+ hr = imageSource->GetPixelFormat(&format);
+ if (FAILED(hr)) {
+ return nullptr;
+ }
+
+ // Recommend kOpaque if the image is opaque and kPremul otherwise.
+ // FIXME: We are stuck recommending kPremul for all indexed formats
+ // (Ex: GUID_WICPixelFormat8bppIndexed) because we don't have
+ // a way to check if the image has alpha.
+ SkAlphaType alphaType = kPremul_SkAlphaType;
+
+ if (GUID_WICPixelFormat16bppBGR555 == format ||
+ GUID_WICPixelFormat16bppBGR565 == format ||
+ GUID_WICPixelFormat32bppBGR101010 == format ||
+ GUID_WICPixelFormatBlackWhite == format ||
+ GUID_WICPixelFormat2bppGray == format ||
+ GUID_WICPixelFormat4bppGray == format ||
+ GUID_WICPixelFormat8bppGray == format ||
+ GUID_WICPixelFormat16bppGray == format ||
+ GUID_WICPixelFormat16bppGrayFixedPoint == format ||
+ GUID_WICPixelFormat16bppGrayHalf == format ||
+ GUID_WICPixelFormat32bppGrayFloat == format ||
+ GUID_WICPixelFormat32bppGrayFixedPoint == format ||
+ GUID_WICPixelFormat32bppRGBE == format ||
+ GUID_WICPixelFormat24bppRGB == format ||
+ GUID_WICPixelFormat24bppBGR == format ||
+ GUID_WICPixelFormat32bppBGR == format ||
+ GUID_WICPixelFormat48bppRGB == format ||
+ GUID_WICPixelFormat48bppBGR == format ||
+ GUID_WICPixelFormat48bppRGBFixedPoint == format ||
+ GUID_WICPixelFormat48bppBGRFixedPoint == format ||
+ GUID_WICPixelFormat48bppRGBHalf == format ||
+ GUID_WICPixelFormat64bppRGBFixedPoint == format ||
+ GUID_WICPixelFormat64bppRGBHalf == format ||
+ GUID_WICPixelFormat96bppRGBFixedPoint == format ||
+ GUID_WICPixelFormat128bppRGBFloat == format ||
+ GUID_WICPixelFormat128bppRGBFixedPoint == format ||
+ GUID_WICPixelFormat32bppRGB == format ||
+ GUID_WICPixelFormat64bppRGB == format ||
+ GUID_WICPixelFormat96bppRGBFloat == format ||
+ GUID_WICPixelFormat32bppCMYK == format ||
+ GUID_WICPixelFormat64bppCMYK == format ||
+ GUID_WICPixelFormat8bppY == format ||
+ GUID_WICPixelFormat8bppCb == format ||
+ GUID_WICPixelFormat8bppCr == format ||
+ GUID_WICPixelFormat16bppCbCr == format)
+ {
+ alphaType = kOpaque_SkAlphaType;
+ }
+
+ // FIXME: If we change the implementation to handle swizzling ourselves,
+ // we can support more output formats.
+ SkImageInfo info = SkImageInfo::MakeS32(width, height, alphaType);
+ return std::unique_ptr<SkImageGenerator>(
+ new ImageGeneratorWIC(info, imagingFactory.release(), imageSource.release(),
+ std::move(data)));
+}
+
+ImageGeneratorWIC::ImageGeneratorWIC(const SkImageInfo& info,
+ IWICImagingFactory* imagingFactory, IWICBitmapSource* imageSource, sk_sp<SkData> data)
+ : INHERITED(info)
+ , fImagingFactory(imagingFactory)
+ , fImageSource(imageSource)
+ , fData(std::move(data))
+{}
+
+sk_sp<SkData> ImageGeneratorWIC::onRefEncodedData() {
+ return fData;
+}
+
+bool ImageGeneratorWIC::onGetPixels(const SkImageInfo& info, void* pixels, size_t rowBytes,
+ const Options&) {
+ if (kN32_SkColorType != info.colorType()) {
+ return false;
+ }
+
+ // Create a format converter.
+ SkTScopedComPtr<IWICFormatConverter> formatConverter;
+ HRESULT hr = fImagingFactory->CreateFormatConverter(&formatConverter);
+ if (FAILED(hr)) {
+ return false;
+ }
+
+ GUID format = GUID_WICPixelFormat32bppPBGRA;
+ if (kUnpremul_SkAlphaType == info.alphaType()) {
+ format = GUID_WICPixelFormat32bppBGRA;
+ }
+
+ hr = formatConverter->Initialize(fImageSource.get(), format, WICBitmapDitherTypeNone, nullptr,
+ 0.0, WICBitmapPaletteTypeCustom);
+ if (FAILED(hr)) {
+ return false;
+ }
+
+ // Treat the format converter as an image source.
+ SkTScopedComPtr<IWICBitmapSource> formatConverterSrc;
+ hr = formatConverter->QueryInterface(IID_PPV_ARGS(&formatConverterSrc));
+ if (FAILED(hr)) {
+ return false;
+ }
+
+ // Set the destination pixels.
+ hr = formatConverterSrc->CopyPixels(nullptr, (UINT) rowBytes, (UINT) rowBytes * info.height(),
+ (BYTE*) pixels);
+
+ return SUCCEEDED(hr);
+}
diff --git a/gfx/skia/skia/src/ports/SkImageGenerator_none.cpp b/gfx/skia/skia/src/ports/SkImageGenerator_none.cpp
new file mode 100644
index 0000000000..226bb126c1
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageGenerator_none.cpp
@@ -0,0 +1,13 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkImageGenerator.h"
+
+std::unique_ptr<SkImageGenerator> SkImageGenerator::MakeFromEncodedImpl(
+ sk_sp<SkData>, std::optional<SkAlphaType>) {
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/ports/SkImageGenerator_skia.cpp b/gfx/skia/skia/src/ports/SkImageGenerator_skia.cpp
new file mode 100644
index 0000000000..e0f990d388
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkImageGenerator_skia.cpp
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkData.h"
+#include "src/codec/SkCodecImageGenerator.h"
+
+std::unique_ptr<SkImageGenerator> SkImageGenerator::MakeFromEncodedImpl(
+ sk_sp<SkData> data, std::optional<SkAlphaType> at) {
+ return SkCodecImageGenerator::MakeFromEncodedCodec(std::move(data), at);
+}
diff --git a/gfx/skia/skia/src/ports/SkMemory_malloc.cpp b/gfx/skia/skia/src/ports/SkMemory_malloc.cpp
new file mode 100644
index 0000000000..085c07acde
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkMemory_malloc.cpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/base/SkAssert.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkFeatures.h"
+#include "include/private/base/SkMalloc.h"
+
+#include <cstdlib>
+
+#if defined(SK_DEBUG) && defined(SK_BUILD_FOR_WIN)
+#include <intrin.h>
+// This is a super stable value and setting it here avoids pulling in all of windows.h.
+#ifndef FAST_FAIL_FATAL_APP_EXIT
+#define FAST_FAIL_FATAL_APP_EXIT 7
+#endif
+#endif
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ #define SK_DEBUGFAILF(fmt, ...) SK_ABORT(fmt"\n", __VA_ARGS__)
+#else
+ #define SK_DEBUGFAILF(fmt, ...) SkASSERT((SkDebugf(fmt"\n", __VA_ARGS__), false))
+#endif
+
+static inline void sk_out_of_memory(size_t size) {
+ SK_DEBUGFAILF("sk_out_of_memory (asked for %zu bytes)",
+ size);
+#if defined(SK_BUILD_FOR_AFL_FUZZ)
+ exit(1);
+#else
+ abort();
+#endif
+}
+
+static inline void* throw_on_failure(size_t size, void* p) {
+ if (size > 0 && p == nullptr) {
+ // If we've got a nullptr here, the only reason we should have failed is running out of RAM.
+ sk_out_of_memory(size);
+ }
+ return p;
+}
+
+bool sk_abort_is_enabled() { return true; }
+
+void sk_abort_no_print() {
+#if defined(SK_DEBUG) && defined(SK_BUILD_FOR_WIN)
+ __fastfail(FAST_FAIL_FATAL_APP_EXIT);
+#elif defined(__clang__)
+ __builtin_trap();
+#else
+ abort();
+#endif
+}
+
+void sk_out_of_memory(void) {
+ SkDEBUGFAIL("sk_out_of_memory");
+#if defined(SK_BUILD_FOR_AFL_FUZZ)
+ exit(1);
+#else
+ abort();
+#endif
+}
+
+void* sk_realloc_throw(void* addr, size_t size) {
+ if (size == 0) {
+ sk_free(addr);
+ return nullptr;
+ }
+ return throw_on_failure(size, realloc(addr, size));
+}
+
+void sk_free(void* p) {
+ // The guard here produces a performance improvement across many tests, and many platforms.
+ // Removing the check was tried in skia cl 588037.
+ if (p != nullptr) {
+ free(p);
+ }
+}
+
+void* sk_malloc_flags(size_t size, unsigned flags) {
+ void* p;
+ if (flags & SK_MALLOC_ZERO_INITIALIZE) {
+ p = calloc(size, 1);
+ } else {
+#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) && defined(__BIONIC__)
+ /* TODO: After b/169449588 is fixed, we will want to change this to restore
+ * original behavior instead of always disabling the flag.
+ * TODO: After b/158870657 is fixed and scudo is used globally, we can assert when an
+ * an error is returned.
+ */
+ // malloc() generally doesn't initialize its memory and that's a huge security hole,
+ // so Android has replaced its malloc() with one that zeros memory,
+ // but that's a huge performance hit for HWUI, so turn it back off again.
+ (void)mallopt(M_THREAD_DISABLE_MEM_INIT, 1);
+#endif
+ p = malloc(size);
+#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) && defined(__BIONIC__)
+ (void)mallopt(M_THREAD_DISABLE_MEM_INIT, 0);
+#endif
+ }
+ if (flags & SK_MALLOC_THROW) {
+ return throw_on_failure(size, p);
+ } else {
+ return p;
+ }
+}
diff --git a/gfx/skia/skia/src/ports/SkMemory_mozalloc.cpp b/gfx/skia/skia/src/ports/SkMemory_mozalloc.cpp
new file mode 100644
index 0000000000..edf0b8e0ec
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkMemory_mozalloc.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2011 Google Inc.
+ * Copyright 2012 Mozilla Foundation
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/base/SkMalloc.h"
+
+#include "include/core/SkTypes.h"
+#include "mozilla/mozalloc.h"
+#include "mozilla/mozalloc_abort.h"
+#include "mozilla/mozalloc_oom.h"
+#include "prenv.h"
+
+bool sk_abort_is_enabled() {
+#ifdef SK_DEBUG
+ const char* env = PR_GetEnv("MOZ_SKIA_DISABLE_ASSERTS");
+ if (env && *env != '0') {
+ return false;
+ }
+#endif
+ return true;
+}
+
+void sk_abort_no_print() {
+ mozalloc_abort("Abort from sk_abort");
+}
+
+void sk_out_of_memory(void) {
+ SkDEBUGFAIL("sk_out_of_memory");
+ mozalloc_handle_oom(0);
+}
+
+void sk_free(void* p) {
+ free(p);
+}
+
+void* sk_realloc_throw(void* addr, size_t size) {
+ return moz_xrealloc(addr, size);
+}
+
+void* sk_malloc_flags(size_t size, unsigned flags) {
+ if (flags & SK_MALLOC_ZERO_INITIALIZE) {
+ return (flags & SK_MALLOC_THROW) ? moz_xcalloc(size, 1) : calloc(size, 1);
+ }
+ return (flags & SK_MALLOC_THROW) ? moz_xmalloc(size) : malloc(size);
+}
diff --git a/gfx/skia/skia/src/ports/SkNDKConversions.cpp b/gfx/skia/skia/src/ports/SkNDKConversions.cpp
new file mode 100644
index 0000000000..a977815db2
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkNDKConversions.cpp
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/ports/SkNDKConversions.h"
+
+namespace {
+static const struct {
+ SkColorType colorType;
+ AndroidBitmapFormat format;
+} gColorTypeTable[] = {
+ { kRGBA_8888_SkColorType, ANDROID_BITMAP_FORMAT_RGBA_8888 },
+ { kRGBA_F16_SkColorType, ANDROID_BITMAP_FORMAT_RGBA_F16 },
+ { kRGB_565_SkColorType, ANDROID_BITMAP_FORMAT_RGB_565 },
+ // Android allows using its alpha 8 format to get 8 bit gray pixels.
+ { kGray_8_SkColorType, ANDROID_BITMAP_FORMAT_A_8 },
+};
+
+} // anonymous namespace
+
+namespace SkNDKConversions {
+ AndroidBitmapFormat toAndroidBitmapFormat(SkColorType colorType) {
+ for (const auto& entry : gColorTypeTable) {
+ if (entry.colorType == colorType) {
+ return entry.format;
+ }
+ }
+ return ANDROID_BITMAP_FORMAT_NONE;
+ }
+
+ SkColorType toColorType(AndroidBitmapFormat format) {
+ for (const auto& entry : gColorTypeTable) {
+ if (entry.format == format) {
+ return entry.colorType;
+ }
+ }
+ return kUnknown_SkColorType;
+ }
+
+} // SkNDKConversions
+
+static constexpr skcms_TransferFunction k2Dot6 = {2.6f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f};
+
+static constexpr skcms_Matrix3x3 kDCIP3 = {{
+ {0.486143, 0.323835, 0.154234},
+ {0.226676, 0.710327, 0.0629966},
+ {0.000800549, 0.0432385, 0.78275},
+}};
+
+namespace {
+static const struct {
+ ADataSpace dataSpace;
+ skcms_TransferFunction transferFunction;
+ skcms_Matrix3x3 gamut;
+} gColorSpaceTable[] = {
+ { ADATASPACE_SRGB, SkNamedTransferFn::kSRGB, SkNamedGamut::kSRGB },
+ { ADATASPACE_SCRGB, SkNamedTransferFn::kSRGB, SkNamedGamut::kSRGB },
+ { ADATASPACE_SCRGB_LINEAR, SkNamedTransferFn::kLinear, SkNamedGamut::kSRGB },
+ { ADATASPACE_SRGB_LINEAR, SkNamedTransferFn::kLinear, SkNamedGamut::kSRGB },
+ { ADATASPACE_ADOBE_RGB, SkNamedTransferFn::k2Dot2, SkNamedGamut::kAdobeRGB },
+ { ADATASPACE_DISPLAY_P3, SkNamedTransferFn::kSRGB, SkNamedGamut::kDisplayP3 },
+ { ADATASPACE_BT2020, SkNamedTransferFn::kRec2020, SkNamedGamut::kRec2020 },
+ { ADATASPACE_BT709, SkNamedTransferFn::kRec2020, SkNamedGamut::kSRGB },
+ { ADATASPACE_DCI_P3, k2Dot6, kDCIP3 },
+};
+
+} // anonymous namespace
+
+static bool nearly_equal(float a, float b) {
+ return fabs(a - b) < .002f;
+}
+
+static bool nearly_equal(const skcms_TransferFunction& x, const skcms_TransferFunction& y) {
+ return nearly_equal(x.g, y.g)
+ && nearly_equal(x.a, y.a)
+ && nearly_equal(x.b, y.b)
+ && nearly_equal(x.c, y.c)
+ && nearly_equal(x.d, y.d)
+ && nearly_equal(x.e, y.e)
+ && nearly_equal(x.f, y.f);
+}
+
+static bool nearly_equal(const skcms_Matrix3x3& a, const skcms_Matrix3x3& b) {
+ for (int i = 0; i < 3; i++)
+ for (int j = 0; j < 3; j++) {
+ if (!nearly_equal(a.vals[i][j], b.vals[i][j])) return false;
+ }
+ return true;
+}
+
+namespace SkNDKConversions {
+ ADataSpace toDataSpace(SkColorSpace* cs) {
+ if (!cs) return ADATASPACE_SRGB;
+
+ skcms_TransferFunction fn;
+ skcms_Matrix3x3 gamut;
+ if (cs->isNumericalTransferFn(&fn) && cs->toXYZD50(&gamut)) {
+ for (const auto& entry : gColorSpaceTable) {
+ if (nearly_equal(gamut, entry.gamut) && nearly_equal(fn, entry.transferFunction)) {
+ return entry.dataSpace;
+ }
+ }
+ }
+ return ADATASPACE_UNKNOWN;
+ }
+
+ sk_sp<SkColorSpace> toColorSpace(ADataSpace dataSpace) {
+ for (const auto& entry : gColorSpaceTable) {
+ if (entry.dataSpace == dataSpace) {
+ return SkColorSpace::MakeRGB(entry.transferFunction, entry.gamut);
+ }
+ }
+ return nullptr;
+ }
+}
+
diff --git a/gfx/skia/skia/src/ports/SkNDKConversions.h b/gfx/skia/skia/src/ports/SkNDKConversions.h
new file mode 100644
index 0000000000..03b124a3af
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkNDKConversions.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNDKConversions_DEFINED
+#define SkNDKConversions_DEFINED
+
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkImageInfo.h"
+
+#include <android/bitmap.h>
+#include <android/data_space.h>
+
+namespace SkNDKConversions {
+ // Supports a small subset of SkColorType. Others are treated as
+ // ANDROID_BITMAP_FORMAT_NONE.
+ AndroidBitmapFormat toAndroidBitmapFormat(SkColorType);
+
+ SkColorType toColorType(AndroidBitmapFormat);
+
+ // Treats null as ADATASPACE_SRGB.
+ ADataSpace toDataSpace(SkColorSpace*);
+
+ // Treats ADATASPACE_UNKNOWN as nullptr.
+ sk_sp<SkColorSpace> toColorSpace(ADataSpace);
+}
+
+#endif // SkNDKConversions_DEFINED
diff --git a/gfx/skia/skia/src/ports/SkOSFile_ios.h b/gfx/skia/skia/src/ports/SkOSFile_ios.h
new file mode 100644
index 0000000000..67e7e7b959
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkOSFile_ios.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOSFile_ios_DEFINED
+#define SkOSFile_ios_DEFINED
+
+#include "include/core/SkString.h"
+
+#ifdef SK_BUILD_FOR_IOS
+#import <CoreFoundation/CoreFoundation.h>
+
+#include "include/ports/SkCFObject.h"
+
+static bool ios_get_path_in_bundle(const char path[], SkString* result) {
+ // Get a reference to the main bundle
+ CFBundleRef mainBundle = CFBundleGetMainBundle();
+
+ // Get a reference to the file's URL
+ // Use this to normalize the path
+ sk_cfp<CFURLRef> pathURL(CFURLCreateFromFileSystemRepresentation(/*allocator=*/nullptr,
+ (const UInt8*)path,
+ strlen(path),
+ /*isDirectory=*/false));
+ sk_cfp<CFStringRef> pathRef(CFURLCopyFileSystemPath(pathURL.get(), kCFURLPOSIXPathStyle));
+ // We use "data" as our subdirectory to match {{bundle_resources_dir}}/data in GN
+ // Unfortunately "resources" is not a valid top-level name in iOS, so we push it one level down
+ sk_cfp<CFURLRef> fileURL(CFBundleCopyResourceURL(mainBundle, pathRef.get(),
+ /*resourceType=*/nullptr, CFSTR("data")));
+ if (!fileURL) {
+ return false;
+ }
+ if (!result) {
+ return true;
+ }
+
+ // Convert the URL reference into a string reference
+ sk_cfp<CFStringRef> filePath(CFURLCopyFileSystemPath(fileURL.get(), kCFURLPOSIXPathStyle));
+
+ // Get the system encoding method
+ CFStringEncoding encodingMethod = CFStringGetSystemEncoding();
+
+ // Convert the string reference into an SkString
+ result->set(CFStringGetCStringPtr(filePath.get(), encodingMethod));
+ return true;
+}
+#endif
+
+#endif // SkOSFile_ios_DEFINED
diff --git a/gfx/skia/skia/src/ports/SkOSFile_posix.cpp b/gfx/skia/skia/src/ports/SkOSFile_posix.cpp
new file mode 100644
index 0000000000..c8ba97412c
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkOSFile_posix.cpp
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTFitsIn.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/core/SkOSFile.h"
+
+#include <dirent.h>
+#include <new>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#ifdef SK_BUILD_FOR_IOS
+#include "src/ports/SkOSFile_ios.h"
+#endif
+
+void sk_fsync(FILE* f) {
+#if !defined(SK_BUILD_FOR_ANDROID) && !defined(__UCLIBC__) && !defined(_NEWLIB_VERSION)
+ int fd = fileno(f);
+ fsync(fd);
+#endif
+}
+
+bool sk_exists(const char *path, SkFILE_Flags flags) {
+ int mode = F_OK;
+ if (flags & kRead_SkFILE_Flag) {
+ mode |= R_OK;
+ }
+ if (flags & kWrite_SkFILE_Flag) {
+ mode |= W_OK;
+ }
+#ifdef SK_BUILD_FOR_IOS
+ // if the default path fails, check the bundle (but only if read-only)
+ if (0 == access(path, mode)) {
+ return true;
+ } else {
+ return (kRead_SkFILE_Flag == flags && ios_get_path_in_bundle(path, nullptr));
+ }
+#else
+ return (0 == access(path, mode));
+#endif
+}
+
+typedef struct {
+ dev_t dev;
+ ino_t ino;
+} SkFILEID;
+
+static bool sk_ino(FILE* a, SkFILEID* id) {
+ int fd = fileno(a);
+ if (fd < 0) {
+ return 0;
+ }
+ struct stat status;
+ if (0 != fstat(fd, &status)) {
+ return 0;
+ }
+ id->dev = status.st_dev;
+ id->ino = status.st_ino;
+ return true;
+}
+
+bool sk_fidentical(FILE* a, FILE* b) {
+ SkFILEID aID, bID;
+ return sk_ino(a, &aID) && sk_ino(b, &bID)
+ && aID.ino == bID.ino
+ && aID.dev == bID.dev;
+}
+
+void sk_fmunmap(const void* addr, size_t length) {
+ munmap(const_cast<void*>(addr), length);
+}
+
+void* sk_fdmmap(int fd, size_t* size) {
+ struct stat status;
+ if (0 != fstat(fd, &status)) {
+ return nullptr;
+ }
+ if (!S_ISREG(status.st_mode)) {
+ return nullptr;
+ }
+ if (!SkTFitsIn<size_t>(status.st_size)) {
+ return nullptr;
+ }
+ size_t fileSize = static_cast<size_t>(status.st_size);
+
+ void* addr = mmap(nullptr, fileSize, PROT_READ, MAP_PRIVATE, fd, 0);
+ if (MAP_FAILED == addr) {
+ return nullptr;
+ }
+
+ *size = fileSize;
+ return addr;
+}
+
+int sk_fileno(FILE* f) {
+ return fileno(f);
+}
+
+void* sk_fmmap(FILE* f, size_t* size) {
+ int fd = sk_fileno(f);
+ if (fd < 0) {
+ return nullptr;
+ }
+
+ return sk_fdmmap(fd, size);
+}
+
+size_t sk_qread(FILE* file, void* buffer, size_t count, size_t offset) {
+ int fd = sk_fileno(file);
+ if (fd < 0) {
+ return SIZE_MAX;
+ }
+ ssize_t bytesRead = pread(fd, buffer, count, offset);
+ if (bytesRead < 0) {
+ return SIZE_MAX;
+ }
+ return bytesRead;
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+struct SkOSFileIterData {
+ SkOSFileIterData() : fDIR(nullptr) { }
+ DIR* fDIR;
+ SkString fPath, fSuffix;
+};
+static_assert(sizeof(SkOSFileIterData) <= SkOSFile::Iter::kStorageSize, "not_enough_space");
+
+SkOSFile::Iter::Iter() { new (fSelf) SkOSFileIterData; }
+
+SkOSFile::Iter::Iter(const char path[], const char suffix[]) {
+ new (fSelf) SkOSFileIterData;
+ this->reset(path, suffix);
+}
+
+SkOSFile::Iter::~Iter() {
+ SkOSFileIterData& self = *reinterpret_cast<SkOSFileIterData*>(fSelf);
+ if (self.fDIR) {
+ ::closedir(self.fDIR);
+ }
+ self.~SkOSFileIterData();
+}
+
+void SkOSFile::Iter::reset(const char path[], const char suffix[]) {
+ SkOSFileIterData& self = *reinterpret_cast<SkOSFileIterData*>(fSelf);
+ if (self.fDIR) {
+ ::closedir(self.fDIR);
+ self.fDIR = nullptr;
+ }
+ self.fPath.set(path);
+
+ if (path) {
+ self.fDIR = ::opendir(path);
+#ifdef SK_BUILD_FOR_IOS
+ // check bundle for directory
+ if (!self.fDIR && ios_get_path_in_bundle(path, &self.fPath)) {
+ self.fDIR = ::opendir(self.fPath.c_str());
+ }
+#endif
+ self.fSuffix.set(suffix);
+ } else {
+ self.fSuffix.reset();
+ }
+}
+
+// returns true if suffix is empty, or if str ends with suffix
+static bool issuffixfor(const SkString& suffix, const char str[]) {
+ size_t suffixLen = suffix.size();
+ size_t strLen = strlen(str);
+
+ return strLen >= suffixLen &&
+ memcmp(suffix.c_str(), str + strLen - suffixLen, suffixLen) == 0;
+}
+
+bool SkOSFile::Iter::next(SkString* name, bool getDir) {
+ SkOSFileIterData& self = *reinterpret_cast<SkOSFileIterData*>(fSelf);
+ if (self.fDIR) {
+ dirent* entry;
+
+ while ((entry = ::readdir(self.fDIR)) != nullptr) {
+ struct stat s;
+ SkString str(self.fPath);
+
+ if (!str.endsWith("/") && !str.endsWith("\\")) {
+ str.append("/");
+ }
+ str.append(entry->d_name);
+
+ if (0 == stat(str.c_str(), &s)) {
+ if (getDir) {
+ if (s.st_mode & S_IFDIR) {
+ break;
+ }
+ } else {
+ if (!(s.st_mode & S_IFDIR) && issuffixfor(self.fSuffix, entry->d_name)) {
+ break;
+ }
+ }
+ }
+ }
+ if (entry) { // we broke out with a file
+ if (name) {
+ name->set(entry->d_name);
+ }
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/gfx/skia/skia/src/ports/SkOSFile_stdio.cpp b/gfx/skia/skia/src/ports/SkOSFile_stdio.cpp
new file mode 100644
index 0000000000..895802ec5a
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkOSFile_stdio.cpp
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#include "src/core/SkOSFile.h"
+
+#include <errno.h>
+#include <stdio.h>
+#include <sys/stat.h>
+
+#ifdef _WIN32
+#include <direct.h>
+#include <io.h>
+#include <vector>
+#include "src/base/SkUTF.h"
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include "src/ports/SkOSFile_ios.h"
+#endif
+
+#ifdef _WIN32
+static bool is_ascii(const char* s) {
+ while (char v = *s++) {
+ if ((v & 0x80) != 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static FILE* fopen_win(const char* utf8path, const char* perm) {
+ if (is_ascii(utf8path)) {
+ return fopen(utf8path, perm);
+ }
+
+ const char* ptr = utf8path;
+ const char* end = utf8path + strlen(utf8path);
+ size_t n = 0;
+ while (ptr < end) {
+ SkUnichar u = SkUTF::NextUTF8(&ptr, end);
+ if (u < 0) {
+ return nullptr; // malformed UTF-8
+ }
+ n += SkUTF::ToUTF16(u);
+ }
+ std::vector<uint16_t> wchars(n + 1);
+ uint16_t* out = wchars.data();
+ ptr = utf8path;
+ while (ptr < end) {
+ out += SkUTF::ToUTF16(SkUTF::NextUTF8(&ptr, end), out);
+ }
+ SkASSERT(out == &wchars[n]);
+ *out = 0; // final null
+ wchar_t wperms[4] = {(wchar_t)perm[0], (wchar_t)perm[1], (wchar_t)perm[2], (wchar_t)perm[3]};
+ return _wfopen((wchar_t*)wchars.data(), wperms);
+}
+#endif
+
+FILE* sk_fopen(const char path[], SkFILE_Flags flags) {
+ char perm[4] = {0, 0, 0, 0};
+ char* p = perm;
+
+ if (flags & kRead_SkFILE_Flag) {
+ *p++ = 'r';
+ }
+ if (flags & kWrite_SkFILE_Flag) {
+ *p++ = 'w';
+ }
+ *p = 'b';
+
+ FILE* file = nullptr;
+#ifdef _WIN32
+ file = fopen_win(path, perm);
+#else
+ file = fopen(path, perm);
+#endif
+#ifdef SK_BUILD_FOR_IOS
+ // if not found in default path and read-only, try to open from bundle
+ if (!file && kRead_SkFILE_Flag == flags) {
+ SkString bundlePath;
+ if (ios_get_path_in_bundle(path, &bundlePath)) {
+ file = fopen(bundlePath.c_str(), perm);
+ }
+ }
+#endif
+
+ if (nullptr == file && (flags & kWrite_SkFILE_Flag)) {
+ SkDEBUGF("sk_fopen: fopen(\"%s\", \"%s\") returned nullptr (errno:%d): %s\n",
+ path, perm, errno, strerror(errno));
+ }
+ return file;
+}
+
+size_t sk_fgetsize(FILE* f) {
+ SkASSERT(f);
+
+ long curr = ftell(f); // remember where we are
+ if (curr < 0) {
+ return 0;
+ }
+
+ fseek(f, 0, SEEK_END); // go to the end
+ long size = ftell(f); // record the size
+ if (size < 0) {
+ size = 0;
+ }
+
+ fseek(f, curr, SEEK_SET); // go back to our prev location
+ return size;
+}
+
+size_t sk_fwrite(const void* buffer, size_t byteCount, FILE* f) {
+ SkASSERT(f);
+ return fwrite(buffer, 1, byteCount, f);
+}
+
+void sk_fflush(FILE* f) {
+ SkASSERT(f);
+ fflush(f);
+}
+
+size_t sk_ftell(FILE* f) {
+ long curr = ftell(f);
+ if (curr < 0) {
+ return 0;
+ }
+ return curr;
+}
+
+void sk_fclose(FILE* f) {
+ if (f) {
+ fclose(f);
+ }
+}
+
+bool sk_isdir(const char *path) {
+ struct stat status;
+ if (0 != stat(path, &status)) {
+#ifdef SK_BUILD_FOR_IOS
+ // check the bundle directory if not in default path
+ SkString bundlePath;
+ if (ios_get_path_in_bundle(path, &bundlePath)) {
+ if (0 != stat(bundlePath.c_str(), &status)) {
+ return false;
+ }
+ }
+#else
+ return false;
+#endif
+ }
+ return SkToBool(status.st_mode & S_IFDIR);
+}
+
+bool sk_mkdir(const char* path) {
+ if (sk_isdir(path)) {
+ return true;
+ }
+ if (sk_exists(path)) {
+ fprintf(stderr,
+ "sk_mkdir: path '%s' already exists but is not a directory\n",
+ path);
+ return false;
+ }
+
+ int retval;
+#ifdef _WIN32
+ retval = _mkdir(path);
+#else
+ retval = mkdir(path, 0777);
+ if (retval) {
+ perror("mkdir() failed with error: ");
+ }
+#endif
+ return 0 == retval;
+}
diff --git a/gfx/skia/skia/src/ports/SkOSFile_win.cpp b/gfx/skia/skia/src/ports/SkOSFile_win.cpp
new file mode 100644
index 0000000000..021ed06395
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkOSFile_win.cpp
@@ -0,0 +1,286 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkNoncopyable.h"
+#include "include/private/base/SkTFitsIn.h"
+#include "src/base/SkLeanWindows.h"
+#include "src/core/SkOSFile.h"
+#include "src/core/SkStringUtils.h"
+
+#include <io.h>
+#include <new>
+#include <stdio.h>
+#include <sys/stat.h>
+
+void sk_fsync(FILE* f) {
+ _commit(sk_fileno(f));
+}
+
+bool sk_exists(const char *path, SkFILE_Flags flags) {
+ int mode = 0; // existence
+ if (flags & kRead_SkFILE_Flag) {
+ mode |= 4; // read
+ }
+ if (flags & kWrite_SkFILE_Flag) {
+ mode |= 2; // write
+ }
+ return (0 == _access(path, mode));
+}
+
+typedef struct {
+ ULONGLONG fVolume;
+ ULONGLONG fLsbSize;
+ ULONGLONG fMsbSize;
+} SkFILEID;
+
+static bool sk_ino(FILE* f, SkFILEID* id) {
+ int fileno = _fileno((FILE*)f);
+ if (fileno < 0) {
+ return false;
+ }
+
+ HANDLE file = (HANDLE)_get_osfhandle(fileno);
+ if (INVALID_HANDLE_VALUE == file) {
+ return false;
+ }
+
+ //TODO: call GetFileInformationByHandleEx on Vista and later with FileIdInfo.
+ BY_HANDLE_FILE_INFORMATION info;
+ if (0 == GetFileInformationByHandle(file, &info)) {
+ return false;
+ }
+ id->fVolume = info.dwVolumeSerialNumber;
+ id->fLsbSize = info.nFileIndexLow + (((ULONGLONG)info.nFileIndexHigh) << 32);
+ id->fMsbSize = 0;
+
+ return true;
+}
+
+bool sk_fidentical(FILE* a, FILE* b) {
+ SkFILEID aID, bID;
+ return sk_ino(a, &aID) && sk_ino(b, &bID)
+ && aID.fLsbSize == bID.fLsbSize
+ && aID.fMsbSize == bID.fMsbSize
+ && aID.fVolume == bID.fVolume;
+}
+
+class SkAutoNullKernelHandle : SkNoncopyable {
+public:
+ SkAutoNullKernelHandle(const HANDLE handle) : fHandle(handle) { }
+ ~SkAutoNullKernelHandle() { CloseHandle(fHandle); }
+ operator HANDLE() const { return fHandle; }
+ bool isValid() const { return SkToBool(fHandle); }
+private:
+ HANDLE fHandle;
+};
+typedef SkAutoNullKernelHandle SkAutoWinMMap;
+
+void sk_fmunmap(const void* addr, size_t) {
+ UnmapViewOfFile(addr);
+}
+
+void* sk_fdmmap(int fileno, size_t* length) {
+ HANDLE file = (HANDLE)_get_osfhandle(fileno);
+ if (INVALID_HANDLE_VALUE == file) {
+ return nullptr;
+ }
+
+ LARGE_INTEGER fileSize;
+ if (0 == GetFileSizeEx(file, &fileSize)) {
+ //TODO: use SK_TRACEHR(GetLastError(), "Could not get file size.") to report.
+ return nullptr;
+ }
+ if (!SkTFitsIn<size_t>(fileSize.QuadPart)) {
+ return nullptr;
+ }
+
+ SkAutoWinMMap mmap(CreateFileMapping(file, nullptr, PAGE_READONLY, 0, 0, nullptr));
+ if (!mmap.isValid()) {
+ //TODO: use SK_TRACEHR(GetLastError(), "Could not create file mapping.") to report.
+ return nullptr;
+ }
+
+ // Eventually call UnmapViewOfFile
+ void* addr = MapViewOfFile(mmap, FILE_MAP_READ, 0, 0, 0);
+ if (nullptr == addr) {
+ //TODO: use SK_TRACEHR(GetLastError(), "Could not map view of file.") to report.
+ return nullptr;
+ }
+
+ *length = static_cast<size_t>(fileSize.QuadPart);
+ return addr;
+}
+
+int sk_fileno(FILE* f) {
+ return _fileno((FILE*)f);
+}
+
+void* sk_fmmap(FILE* f, size_t* length) {
+ int fileno = sk_fileno(f);
+ if (fileno < 0) {
+ return nullptr;
+ }
+
+ return sk_fdmmap(fileno, length);
+}
+
+size_t sk_qread(FILE* file, void* buffer, size_t count, size_t offset) {
+ int fileno = sk_fileno(file);
+ HANDLE fileHandle = (HANDLE)_get_osfhandle(fileno);
+ if (INVALID_HANDLE_VALUE == file) {
+ return SIZE_MAX;
+ }
+
+ OVERLAPPED overlapped;
+ memset(&overlapped, 0, sizeof(overlapped));
+ ULARGE_INTEGER winOffset;
+ winOffset.QuadPart = offset;
+ overlapped.Offset = winOffset.LowPart;
+ overlapped.OffsetHigh = winOffset.HighPart;
+
+ if (!SkTFitsIn<DWORD>(count)) {
+ count = std::numeric_limits<DWORD>::max();
+ }
+
+ DWORD bytesRead;
+ if (ReadFile(fileHandle, buffer, static_cast<DWORD>(count), &bytesRead, &overlapped)) {
+ return bytesRead;
+ }
+ if (GetLastError() == ERROR_HANDLE_EOF) {
+ return 0;
+ }
+ return SIZE_MAX;
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+struct SkOSFileIterData {
+ SkOSFileIterData() : fHandle(0), fPath16(nullptr) { }
+ HANDLE fHandle;
+ uint16_t* fPath16;
+};
+static_assert(sizeof(SkOSFileIterData) <= SkOSFile::Iter::kStorageSize, "not_enough_space");
+
+static uint16_t* concat_to_16(const char src[], const char suffix[]) {
+ size_t i, len = strlen(src);
+ size_t len2 = 3 + (suffix ? strlen(suffix) : 0);
+ uint16_t* dst = (uint16_t*)sk_malloc_throw((len + len2) * sizeof(uint16_t));
+
+ for (i = 0; i < len; i++) {
+ dst[i] = src[i];
+ }
+
+ if (i > 0 && dst[i-1] != '/') {
+ dst[i++] = '/';
+ }
+ dst[i++] = '*';
+
+ if (suffix) {
+ while (*suffix) {
+ dst[i++] = *suffix++;
+ }
+ }
+ dst[i] = 0;
+ SkASSERT(i + 1 <= len + len2);
+
+ return dst;
+}
+
+SkOSFile::Iter::Iter() { new (fSelf) SkOSFileIterData; }
+
+SkOSFile::Iter::Iter(const char path[], const char suffix[]) {
+ new (fSelf) SkOSFileIterData;
+ this->reset(path, suffix);
+}
+
+SkOSFile::Iter::~Iter() {
+ SkOSFileIterData& self = *reinterpret_cast<SkOSFileIterData*>(fSelf);
+ sk_free(self.fPath16);
+ if (self.fHandle) {
+ ::FindClose(self.fHandle);
+ }
+ self.~SkOSFileIterData();
+}
+
+void SkOSFile::Iter::reset(const char path[], const char suffix[]) {
+ SkOSFileIterData& self = *reinterpret_cast<SkOSFileIterData*>(fSelf);
+ if (self.fHandle) {
+ ::FindClose(self.fHandle);
+ self.fHandle = 0;
+ }
+ if (nullptr == path) {
+ path = "";
+ }
+
+ sk_free(self.fPath16);
+ self.fPath16 = concat_to_16(path, suffix);
+}
+
+static bool is_magic_dir(const uint16_t dir[]) {
+ // return true for "." and ".."
+ return dir[0] == '.' && (dir[1] == 0 || (dir[1] == '.' && dir[2] == 0));
+}
+
+static bool get_the_file(HANDLE handle, SkString* name, WIN32_FIND_DATAW* dataPtr, bool getDir) {
+ WIN32_FIND_DATAW data;
+
+ if (nullptr == dataPtr) {
+ if (::FindNextFileW(handle, &data))
+ dataPtr = &data;
+ else
+ return false;
+ }
+
+ for (;;) {
+ if (getDir) {
+ if ((dataPtr->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) &&
+ !is_magic_dir((uint16_t*)dataPtr->cFileName))
+ {
+ break;
+ }
+ } else {
+ if (!(dataPtr->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
+ break;
+ }
+ }
+ if (!::FindNextFileW(handle, dataPtr)) {
+ return false;
+ }
+ }
+ // if we get here, we've found a file/dir
+ if (name) {
+ const uint16_t* utf16name = (const uint16_t*)dataPtr->cFileName;
+ const uint16_t* ptr = utf16name;
+ while (*ptr != 0) { ++ptr; }
+ *name = SkStringFromUTF16(utf16name, ptr - utf16name);
+ }
+ return true;
+}
+
+bool SkOSFile::Iter::next(SkString* name, bool getDir) {
+ SkOSFileIterData& self = *reinterpret_cast<SkOSFileIterData*>(fSelf);
+ WIN32_FIND_DATAW data;
+ WIN32_FIND_DATAW* dataPtr = nullptr;
+
+ if (self.fHandle == 0) { // our first time
+ if (self.fPath16 == nullptr || *self.fPath16 == 0) { // check for no path
+ return false;
+ }
+
+ self.fHandle = ::FindFirstFileW((LPCWSTR)self.fPath16, &data);
+ if (self.fHandle != 0 && self.fHandle != (HANDLE)~0) {
+ dataPtr = &data;
+ }
+ }
+ return self.fHandle != (HANDLE)~0 && get_the_file(self.fHandle, name, dataPtr, getDir);
+}
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkOSLibrary.h b/gfx/skia/skia/src/ports/SkOSLibrary.h
new file mode 100644
index 0000000000..d7f696f09b
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkOSLibrary.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOSLibrary_DEFINED
+#define SkOSLibrary_DEFINED
+
+void* SkLoadDynamicLibrary(const char* libraryName);
+void* SkGetProcedureAddress(void* library, const char* functionName);
+bool SkFreeDynamicLibrary(void* library);
+
+#endif
diff --git a/gfx/skia/skia/src/ports/SkOSLibrary_posix.cpp b/gfx/skia/skia/src/ports/SkOSLibrary_posix.cpp
new file mode 100644
index 0000000000..0bb020064f
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkOSLibrary_posix.cpp
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkTypes.h"
+#if !defined(SK_BUILD_FOR_WIN)
+
+#include "src/ports/SkOSLibrary.h"
+
+#include <dlfcn.h>
+
+void* SkLoadDynamicLibrary(const char* libraryName) {
+ return dlopen(libraryName, RTLD_LAZY);
+}
+
+void* SkGetProcedureAddress(void* library, const char* functionName) {
+ return dlsym(library, functionName);
+}
+
+bool SkFreeDynamicLibrary(void* library) {
+ return dlclose(library) == 0;
+}
+
+#endif//!defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkOSLibrary_win.cpp b/gfx/skia/skia/src/ports/SkOSLibrary_win.cpp
new file mode 100644
index 0000000000..b5fdec4c6d
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkOSLibrary_win.cpp
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "src/base/SkLeanWindows.h"
+#include "src/ports/SkOSLibrary.h"
+
+void* SkLoadDynamicLibrary(const char* libraryName) {
+ return LoadLibraryA(libraryName);
+}
+
+void* SkGetProcedureAddress(void* library, const char* functionName) {
+ return reinterpret_cast<void*>(::GetProcAddress((HMODULE)library, functionName));
+}
+
+bool SkFreeDynamicLibrary(void* library) {
+ return FreeLibrary((HMODULE)library);
+}
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkRemotableFontMgr_win_dw.cpp b/gfx/skia/skia/src/ports/SkRemotableFontMgr_win_dw.cpp
new file mode 100644
index 0000000000..2c2d8520f4
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkRemotableFontMgr_win_dw.cpp
@@ -0,0 +1,472 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/utils/win/SkDWriteNTDDI_VERSION.h"
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/ports/SkRemotableFontMgr.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkTArray.h"
+#include "src/base/SkUTF.h"
+#include "src/ports/SkTypeface_win_dw.h"
+#include "src/utils/win/SkDWrite.h"
+#include "src/utils/win/SkDWriteFontFileStream.h"
+#include "src/utils/win/SkHRESULT.h"
+#include "src/utils/win/SkObjBase.h"
+#include "src/utils/win/SkTScopedComPtr.h"
+
+#include <dwrite.h>
+
+class SK_API SkRemotableFontMgr_DirectWrite : public SkRemotableFontMgr {
+private:
+ struct DataId {
+ IUnknown* fLoader; // In COM only IUnknown pointers may be safely used for identity.
+ void* fKey;
+ UINT32 fKeySize;
+
+ DataId() { }
+
+ DataId(DataId&& that) : fLoader(that.fLoader), fKey(that.fKey), fKeySize(that.fKeySize) {
+ that.fLoader = nullptr;
+ that.fKey = nullptr;
+ SkDEBUGCODE(that.fKeySize = 0xFFFFFFFF;)
+ }
+
+ ~DataId() {
+ if (fLoader) {
+ fLoader->Release();
+ }
+ sk_free(fKey);
+ }
+ };
+
+ mutable SkTArray<DataId> fDataIdCache;
+ mutable SkMutex fDataIdCacheMutex;
+
+ int FindOrAdd(IDWriteFontFileLoader* fontFileLoader,
+ const void* refKey, UINT32 refKeySize) const
+ {
+ SkTScopedComPtr<IUnknown> fontFileLoaderId;
+ HR_GENERAL(fontFileLoader->QueryInterface(&fontFileLoaderId),
+ "Failed to re-convert to IDWriteFontFileLoader.",
+ SkFontIdentity::kInvalidDataId);
+
+ SkAutoMutexExclusive ama(fDataIdCacheMutex);
+ int count = fDataIdCache.size();
+ int i;
+ for (i = 0; i < count; ++i) {
+ const DataId& current = fDataIdCache[i];
+ if (fontFileLoaderId.get() == current.fLoader &&
+ refKeySize == current.fKeySize &&
+ 0 == memcmp(refKey, current.fKey, refKeySize))
+ {
+ return i;
+ }
+ }
+ DataId& added = fDataIdCache.push_back();
+ added.fLoader = fontFileLoaderId.release(); // Ref is passed.
+ added.fKey = sk_malloc_throw(refKeySize);
+ memcpy(added.fKey, refKey, refKeySize);
+ added.fKeySize = refKeySize;
+
+ return i;
+ }
+
+public:
+
+
+ /** localeNameLength must include the null terminator. */
+ SkRemotableFontMgr_DirectWrite(IDWriteFontCollection* fontCollection,
+ WCHAR* localeName, int localeNameLength)
+ : fFontCollection(SkRefComPtr(fontCollection))
+ , fLocaleName(localeNameLength)
+ {
+ memcpy(fLocaleName.get(), localeName, localeNameLength * sizeof(WCHAR));
+ }
+
+ HRESULT FontToIdentity(IDWriteFont* font, SkFontIdentity* fontId) const {
+ SkTScopedComPtr<IDWriteFontFace> fontFace;
+ HRM(font->CreateFontFace(&fontFace), "Could not create font face.");
+
+ UINT32 numFiles;
+ HR(fontFace->GetFiles(&numFiles, nullptr));
+ if (numFiles > 1) {
+ return E_FAIL;
+ }
+
+ // data id
+ SkTScopedComPtr<IDWriteFontFile> fontFile;
+ HR(fontFace->GetFiles(&numFiles, &fontFile));
+
+ SkTScopedComPtr<IDWriteFontFileLoader> fontFileLoader;
+ HR(fontFile->GetLoader(&fontFileLoader));
+
+ const void* refKey;
+ UINT32 refKeySize;
+ HR(fontFile->GetReferenceKey(&refKey, &refKeySize));
+
+ fontId->fDataId = FindOrAdd(fontFileLoader.get(), refKey, refKeySize);
+
+ // index
+ fontId->fTtcIndex = fontFace->GetIndex();
+
+ // style
+ fontId->fFontStyle = get_style(font);
+ return S_OK;
+ }
+
+ SkRemotableFontIdentitySet* getIndex(int familyIndex) const override {
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HRNM(fFontCollection->GetFontFamily(familyIndex, &fontFamily),
+ "Could not get requested family.");
+
+ int count = fontFamily->GetFontCount();
+ SkFontIdentity* fontIds;
+ sk_sp<SkRemotableFontIdentitySet> fontIdSet(
+ new SkRemotableFontIdentitySet(count, &fontIds));
+ for (int fontIndex = 0; fontIndex < count; ++fontIndex) {
+ SkTScopedComPtr<IDWriteFont> font;
+ HRNM(fontFamily->GetFont(fontIndex, &font), "Could not get font.");
+
+ HRN(FontToIdentity(font.get(), &fontIds[fontIndex]));
+ }
+ return fontIdSet.release();
+ }
+
+ virtual SkFontIdentity matchIndexStyle(int familyIndex,
+ const SkFontStyle& pattern) const override
+ {
+ SkFontIdentity identity = { SkFontIdentity::kInvalidDataId };
+
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HR_GENERAL(fFontCollection->GetFontFamily(familyIndex, &fontFamily),
+ "Could not get requested family.",
+ identity);
+
+ const DWriteStyle dwStyle(pattern);
+ SkTScopedComPtr<IDWriteFont> font;
+ HR_GENERAL(fontFamily->GetFirstMatchingFont(dwStyle.fWeight, dwStyle.fWidth,
+ dwStyle.fSlant, &font),
+ "Could not match font in family.",
+ identity);
+
+ HR_GENERAL(FontToIdentity(font.get(), &identity), nullptr, identity);
+
+ return identity;
+ }
+
+ static HRESULT getDefaultFontFamilyName(SkSMallocWCHAR* name) {
+ NONCLIENTMETRICSW metrics;
+ metrics.cbSize = sizeof(metrics);
+ if (0 == SystemParametersInfoW(SPI_GETNONCLIENTMETRICS,
+ sizeof(metrics),
+ &metrics,
+ 0)) {
+ return E_UNEXPECTED;
+ }
+
+ size_t len = wcsnlen_s(metrics.lfMessageFont.lfFaceName, LF_FACESIZE) + 1;
+ if (0 != wcsncpy_s(name->reset(len), len, metrics.lfMessageFont.lfFaceName, _TRUNCATE)) {
+ return E_UNEXPECTED;
+ }
+
+ return S_OK;
+ }
+
+ SkRemotableFontIdentitySet* matchName(const char familyName[]) const override {
+ SkSMallocWCHAR dwFamilyName;
+ if (nullptr == familyName) {
+ HR_GENERAL(getDefaultFontFamilyName(&dwFamilyName),
+ nullptr, SkRemotableFontIdentitySet::NewEmpty());
+ } else {
+ HR_GENERAL(sk_cstring_to_wchar(familyName, &dwFamilyName),
+ nullptr, SkRemotableFontIdentitySet::NewEmpty());
+ }
+
+ UINT32 index;
+ BOOL exists;
+ HR_GENERAL(fFontCollection->FindFamilyName(dwFamilyName.get(), &index, &exists),
+ "Failed while finding family by name.",
+ SkRemotableFontIdentitySet::NewEmpty());
+ if (!exists) {
+ return SkRemotableFontIdentitySet::NewEmpty();
+ }
+
+ return this->getIndex(index);
+ }
+
+ virtual SkFontIdentity matchNameStyle(const char familyName[],
+ const SkFontStyle& style) const override
+ {
+ SkFontIdentity identity = { SkFontIdentity::kInvalidDataId };
+
+ SkSMallocWCHAR dwFamilyName;
+ if (nullptr == familyName) {
+ HR_GENERAL(getDefaultFontFamilyName(&dwFamilyName), nullptr, identity);
+ } else {
+ HR_GENERAL(sk_cstring_to_wchar(familyName, &dwFamilyName), nullptr, identity);
+ }
+
+ UINT32 index;
+ BOOL exists;
+ HR_GENERAL(fFontCollection->FindFamilyName(dwFamilyName.get(), &index, &exists),
+ "Failed while finding family by name.",
+ identity);
+ if (!exists) {
+ return identity;
+ }
+
+ return this->matchIndexStyle(index, style);
+ }
+
+ class FontFallbackRenderer : public IDWriteTextRenderer {
+ public:
+ FontFallbackRenderer(const SkRemotableFontMgr_DirectWrite* outer, UINT32 character)
+ : fRefCount(1), fOuter(SkSafeRef(outer)), fCharacter(character) {
+ fIdentity.fDataId = SkFontIdentity::kInvalidDataId;
+ }
+
+ virtual ~FontFallbackRenderer() { }
+
+ // IDWriteTextRenderer methods
+ SK_STDMETHODIMP DrawGlyphRun(
+ void* clientDrawingContext,
+ FLOAT baselineOriginX,
+ FLOAT baselineOriginY,
+ DWRITE_MEASURING_MODE measuringMode,
+ DWRITE_GLYPH_RUN const* glyphRun,
+ DWRITE_GLYPH_RUN_DESCRIPTION const* glyphRunDescription,
+ IUnknown* clientDrawingEffect) override
+ {
+ SkTScopedComPtr<IDWriteFont> font;
+ HRM(fOuter->fFontCollection->GetFontFromFontFace(glyphRun->fontFace, &font),
+ "Could not get font from font face.");
+
+ // It is possible that the font passed does not actually have the requested character,
+ // due to no font being found and getting the fallback font.
+ // Check that the font actually contains the requested character.
+ BOOL exists;
+ HRM(font->HasCharacter(fCharacter, &exists), "Could not find character.");
+
+ if (exists) {
+ HR(fOuter->FontToIdentity(font.get(), &fIdentity));
+ }
+
+ return S_OK;
+ }
+
+ SK_STDMETHODIMP DrawUnderline(
+ void* clientDrawingContext,
+ FLOAT baselineOriginX,
+ FLOAT baselineOriginY,
+ DWRITE_UNDERLINE const* underline,
+ IUnknown* clientDrawingEffect) override
+ { return E_NOTIMPL; }
+
+ SK_STDMETHODIMP DrawStrikethrough(
+ void* clientDrawingContext,
+ FLOAT baselineOriginX,
+ FLOAT baselineOriginY,
+ DWRITE_STRIKETHROUGH const* strikethrough,
+ IUnknown* clientDrawingEffect) override
+ { return E_NOTIMPL; }
+
+ SK_STDMETHODIMP DrawInlineObject(
+ void* clientDrawingContext,
+ FLOAT originX,
+ FLOAT originY,
+ IDWriteInlineObject* inlineObject,
+ BOOL isSideways,
+ BOOL isRightToLeft,
+ IUnknown* clientDrawingEffect) override
+ { return E_NOTIMPL; }
+
+ // IDWritePixelSnapping methods
+ SK_STDMETHODIMP IsPixelSnappingDisabled(
+ void* clientDrawingContext,
+ BOOL* isDisabled) override
+ {
+ *isDisabled = FALSE;
+ return S_OK;
+ }
+
+ SK_STDMETHODIMP GetCurrentTransform(
+ void* clientDrawingContext,
+ DWRITE_MATRIX* transform) override
+ {
+ const DWRITE_MATRIX ident = {1.0, 0.0, 0.0, 1.0, 0.0, 0.0};
+ *transform = ident;
+ return S_OK;
+ }
+
+ SK_STDMETHODIMP GetPixelsPerDip(
+ void* clientDrawingContext,
+ FLOAT* pixelsPerDip) override
+ {
+ *pixelsPerDip = 1.0f;
+ return S_OK;
+ }
+
+ // IUnknown methods
+ SK_STDMETHODIMP_(ULONG) AddRef() override {
+ return InterlockedIncrement(&fRefCount);
+ }
+
+ SK_STDMETHODIMP_(ULONG) Release() override {
+ ULONG newCount = InterlockedDecrement(&fRefCount);
+ if (0 == newCount) {
+ delete this;
+ }
+ return newCount;
+ }
+
+ SK_STDMETHODIMP QueryInterface(
+ IID const& riid, void** ppvObject) override
+ {
+ if (__uuidof(IUnknown) == riid ||
+ __uuidof(IDWritePixelSnapping) == riid ||
+ __uuidof(IDWriteTextRenderer) == riid)
+ {
+ *ppvObject = this;
+ this->AddRef();
+ return S_OK;
+ }
+ *ppvObject = nullptr;
+ return E_FAIL;
+ }
+
+ const SkFontIdentity FallbackIdentity() { return fIdentity; }
+
+ protected:
+ ULONG fRefCount;
+ sk_sp<const SkRemotableFontMgr_DirectWrite> fOuter;
+ UINT32 fCharacter;
+ SkFontIdentity fIdentity;
+ };
+
+ virtual SkFontIdentity matchNameStyleCharacter(const char familyName[],
+ const SkFontStyle& pattern,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar character) const override
+ {
+ SkFontIdentity identity = { SkFontIdentity::kInvalidDataId };
+
+ IDWriteFactory* dwFactory = sk_get_dwrite_factory();
+ if (nullptr == dwFactory) {
+ return identity;
+ }
+
+ // TODO: use IDWriteFactory2::GetSystemFontFallback when available.
+
+ const DWriteStyle dwStyle(pattern);
+
+ SkSMallocWCHAR dwFamilyName;
+ if (nullptr == familyName) {
+ HR_GENERAL(getDefaultFontFamilyName(&dwFamilyName), nullptr, identity);
+ } else {
+ HR_GENERAL(sk_cstring_to_wchar(familyName, &dwFamilyName), nullptr, identity);
+ }
+
+ const SkSMallocWCHAR* dwBcp47;
+ SkSMallocWCHAR dwBcp47Local;
+ if (bcp47Count < 1) {
+ dwBcp47 = &fLocaleName;
+ } else {
+ //TODO: support fallback stack.
+ HR_GENERAL(sk_cstring_to_wchar(bcp47[bcp47Count-1], &dwBcp47Local), nullptr, identity);
+ dwBcp47 = &dwBcp47Local;
+ }
+
+ SkTScopedComPtr<IDWriteTextFormat> fallbackFormat;
+ HR_GENERAL(dwFactory->CreateTextFormat(dwFamilyName,
+ fFontCollection.get(),
+ dwStyle.fWeight,
+ dwStyle.fSlant,
+ dwStyle.fWidth,
+ 72.0f,
+ *dwBcp47,
+ &fallbackFormat),
+ "Could not create text format.",
+ identity);
+
+ WCHAR str[16];
+ UINT32 strLen = static_cast<UINT32>(
+ SkUTF::ToUTF16(character, reinterpret_cast<uint16_t*>(str)));
+ SkTScopedComPtr<IDWriteTextLayout> fallbackLayout;
+ HR_GENERAL(dwFactory->CreateTextLayout(str, strLen, fallbackFormat.get(),
+ 200.0f, 200.0f,
+ &fallbackLayout),
+ "Could not create text layout.",
+ identity);
+
+ SkTScopedComPtr<FontFallbackRenderer> fontFallbackRenderer(
+ new FontFallbackRenderer(this, character));
+
+ HR_GENERAL(fallbackLayout->Draw(nullptr, fontFallbackRenderer.get(), 50.0f, 50.0f),
+ "Could not draw layout with renderer.",
+ identity);
+
+ return fontFallbackRenderer->FallbackIdentity();
+ }
+
+ SkStreamAsset* getData(int dataId) const override {
+ SkAutoMutexExclusive ama(fDataIdCacheMutex);
+ if (dataId >= fDataIdCache.size()) {
+ return nullptr;
+ }
+ const DataId& id = fDataIdCache[dataId];
+
+ SkTScopedComPtr<IDWriteFontFileLoader> loader;
+ HRNM(id.fLoader->QueryInterface(&loader), "QuerryInterface IDWriteFontFileLoader failed");
+
+ SkTScopedComPtr<IDWriteFontFileStream> fontFileStream;
+ HRNM(loader->CreateStreamFromKey(id.fKey, id.fKeySize, &fontFileStream),
+ "Could not create font file stream.");
+
+ return new SkDWriteFontFileStream(fontFileStream.get());
+ }
+
+private:
+ SkTScopedComPtr<IDWriteFontCollection> fFontCollection;
+ SkSMallocWCHAR fLocaleName;
+
+ using INHERITED = SkRemotableFontMgr;
+};
+
+SkRemotableFontMgr* SkRemotableFontMgr_New_DirectWrite() {
+ IDWriteFactory* factory = sk_get_dwrite_factory();
+ if (nullptr == factory) {
+ return nullptr;
+ }
+
+ SkTScopedComPtr<IDWriteFontCollection> sysFontCollection;
+ HRNM(factory->GetSystemFontCollection(&sysFontCollection, FALSE),
+ "Could not get system font collection.");
+
+ WCHAR localeNameStorage[LOCALE_NAME_MAX_LENGTH];
+ WCHAR* localeName = nullptr;
+ int localeNameLen = 0;
+
+ // Dynamically load GetUserDefaultLocaleName function, as it is not available on XP.
+ SkGetUserDefaultLocaleNameProc getUserDefaultLocaleNameProc = nullptr;
+ HRESULT hr = SkGetGetUserDefaultLocaleNameProc(&getUserDefaultLocaleNameProc);
+ if (nullptr == getUserDefaultLocaleNameProc) {
+ SK_TRACEHR(hr, "Could not get GetUserDefaultLocaleName.");
+ } else {
+ localeNameLen = getUserDefaultLocaleNameProc(localeNameStorage, LOCALE_NAME_MAX_LENGTH);
+ if (localeNameLen) {
+ localeName = localeNameStorage;
+ };
+ }
+
+ return new SkRemotableFontMgr_DirectWrite(sysFontCollection.get(), localeName, localeNameLen);
+}
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkScalerContext_mac_ct.cpp b/gfx/skia/skia/src/ports/SkScalerContext_mac_ct.cpp
new file mode 100644
index 0000000000..1e61bbd775
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkScalerContext_mac_ct.cpp
@@ -0,0 +1,789 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#ifdef SK_BUILD_FOR_MAC
+#import <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreText/CoreText.h>
+#include <CoreText/CTFontManager.h>
+#include <CoreGraphics/CoreGraphics.h>
+#include <CoreFoundation/CoreFoundation.h>
+#endif
+
+#include "include/core/SkColor.h"
+#include "include/core/SkColorPriv.h"
+#include "include/core/SkFontMetrics.h"
+#include "include/core/SkFontTypes.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPathBuilder.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkFixed.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkAutoMalloc.h"
+#include "src/base/SkEndian.h"
+#include "src/base/SkMathPriv.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkMaskGamma.h"
+#include "src/core/SkOpts.h"
+#include "src/ports/SkScalerContext_mac_ct.h"
+#include "src/ports/SkTypeface_mac_ct.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkOTTable_OS_2.h"
+#include "src/utils/mac/SkCGBase.h"
+#include "src/utils/mac/SkCGGeometry.h"
+#include "src/utils/mac/SkCTFont.h"
+#include "src/utils/mac/SkUniqueCFRef.h"
+
+#include <algorithm>
+
+class SkDescriptor;
+
+
+namespace {
+static inline const constexpr bool kSkShowTextBlitCoverage = false;
+}
+
+static void sk_memset_rect32(uint32_t* ptr, uint32_t value,
+ int width, int height, size_t rowBytes) {
+ SkASSERT(width);
+ SkASSERT(width * sizeof(uint32_t) <= rowBytes);
+
+ if (width >= 32) {
+ while (height) {
+ SkOpts::memset32(ptr, value, width);
+ ptr = (uint32_t*)((char*)ptr + rowBytes);
+ height -= 1;
+ }
+ return;
+ }
+
+ rowBytes -= width * sizeof(uint32_t);
+
+ if (width >= 8) {
+ while (height) {
+ int w = width;
+ do {
+ *ptr++ = value; *ptr++ = value;
+ *ptr++ = value; *ptr++ = value;
+ *ptr++ = value; *ptr++ = value;
+ *ptr++ = value; *ptr++ = value;
+ w -= 8;
+ } while (w >= 8);
+ while (--w >= 0) {
+ *ptr++ = value;
+ }
+ ptr = (uint32_t*)((char*)ptr + rowBytes);
+ height -= 1;
+ }
+ } else {
+ while (height) {
+ int w = width;
+ do {
+ *ptr++ = value;
+ } while (--w > 0);
+ ptr = (uint32_t*)((char*)ptr + rowBytes);
+ height -= 1;
+ }
+ }
+}
+
+static unsigned CGRGBPixel_getAlpha(CGRGBPixel pixel) {
+ return pixel & 0xFF;
+}
+
+static CGAffineTransform MatrixToCGAffineTransform(const SkMatrix& matrix) {
+ return CGAffineTransformMake( SkScalarToCGFloat(matrix[SkMatrix::kMScaleX]),
+ -SkScalarToCGFloat(matrix[SkMatrix::kMSkewY] ),
+ -SkScalarToCGFloat(matrix[SkMatrix::kMSkewX] ),
+ SkScalarToCGFloat(matrix[SkMatrix::kMScaleY]),
+ SkScalarToCGFloat(matrix[SkMatrix::kMTransX]),
+ SkScalarToCGFloat(matrix[SkMatrix::kMTransY]));
+}
+
+SkScalerContext_Mac::SkScalerContext_Mac(sk_sp<SkTypeface_Mac> typeface,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc)
+ : INHERITED(std::move(typeface), effects, desc)
+ , fOffscreen(fRec.fForegroundColor)
+ , fDoSubPosition(SkToBool(fRec.fFlags & kSubpixelPositioning_Flag))
+
+{
+ CTFontRef ctFont = (CTFontRef)this->getTypeface()->internal_private_getCTFontRef();
+
+ // CT on (at least) 10.9 will size color glyphs down from the requested size, but not up.
+ // As a result, it is necessary to know the actual device size and request that.
+ SkVector scale;
+ SkMatrix skTransform;
+ bool invertible = fRec.computeMatrices(SkScalerContextRec::PreMatrixScale::kVertical,
+ &scale, &skTransform, nullptr, nullptr, nullptr);
+ fTransform = MatrixToCGAffineTransform(skTransform);
+ // CGAffineTransformInvert documents that if the transform is non-invertible it will return the
+ // passed transform unchanged. It does so, but then also prints a message to stdout. Avoid this.
+ if (invertible) {
+ fInvTransform = CGAffineTransformInvert(fTransform);
+ } else {
+ fInvTransform = fTransform;
+ }
+
+ // The transform contains everything except the requested text size.
+ // Some properties, like 'trak', are based on the optical text size.
+ CGFloat textSize = SkScalarToCGFloat(scale.y());
+ fCTFont = SkCTFontCreateExactCopy(ctFont, textSize,
+ ((SkTypeface_Mac*)this->getTypeface())->fOpszVariation);
+ fCGFont.reset(CTFontCopyGraphicsFont(fCTFont.get(), nullptr));
+}
+
+static int RoundSize(int dimension) {
+ return SkNextPow2(dimension);
+}
+
+static CGColorRef CGColorForSkColor(CGColorSpaceRef rgbcs, SkColor bgra) {
+ CGFloat components[4];
+ components[0] = (CGFloat)SkColorGetR(bgra) * (1/255.0f);
+ components[1] = (CGFloat)SkColorGetG(bgra) * (1/255.0f);
+ components[2] = (CGFloat)SkColorGetB(bgra) * (1/255.0f);
+ // CoreText applies the CGContext fill color as the COLR foreground color.
+ // However, the alpha is applied to the whole glyph drawing (and Skia will do that as well).
+ // For now, cannot really support COLR foreground color alpha.
+ components[3] = 1.0f;
+ return CGColorCreate(rgbcs, components);
+}
+
+SkScalerContext_Mac::Offscreen::Offscreen(SkColor foregroundColor)
+ : fCG(nullptr)
+ , fSKForegroundColor(foregroundColor)
+ , fDoAA(false)
+ , fDoLCD(false)
+{
+ fSize.set(0, 0);
+}
+
+CGRGBPixel* SkScalerContext_Mac::Offscreen::getCG(const SkScalerContext_Mac& context,
+ const SkGlyph& glyph, CGGlyph glyphID,
+ size_t* rowBytesPtr,
+ bool generateA8FromLCD,
+ bool lightOnDark) {
+ if (!fRGBSpace) {
+ //It doesn't appear to matter what color space is specified.
+ //Regular blends and antialiased text are always (s*a + d*(1-a))
+ //and subpixel antialiased text is always g=2.0.
+ fRGBSpace.reset(CGColorSpaceCreateDeviceRGB());
+ fCGForegroundColor.reset(CGColorForSkColor(fRGBSpace.get(), fSKForegroundColor));
+ }
+
+ // default to kBW_Format
+ bool doAA = false;
+ bool doLCD = false;
+
+ if (SkMask::kBW_Format != glyph.maskFormat()) {
+ doLCD = true;
+ doAA = true;
+ }
+
+ // FIXME: lcd smoothed un-hinted rasterization unsupported.
+ if (!generateA8FromLCD && SkMask::kA8_Format == glyph.maskFormat()) {
+ doLCD = false;
+ doAA = true;
+ }
+
+ // If this font might have color glyphs, disable LCD as there's no way to support it.
+ // CoreText doesn't tell us which format it ended up using, so we can't detect it.
+ // A8 will end up black on transparent, but TODO: we can detect gray and set to A8.
+ if (SkMask::kARGB32_Format == glyph.maskFormat()) {
+ doLCD = false;
+ }
+
+ size_t rowBytes = fSize.fWidth * sizeof(CGRGBPixel);
+ if (!fCG || fSize.fWidth < glyph.width() || fSize.fHeight < glyph.height()) {
+ if (fSize.fWidth < glyph.width()) {
+ fSize.fWidth = RoundSize(glyph.width());
+ }
+ if (fSize.fHeight < glyph.height()) {
+ fSize.fHeight = RoundSize(glyph.height());
+ }
+
+ rowBytes = fSize.fWidth * sizeof(CGRGBPixel);
+ void* image = fImageStorage.reset(rowBytes * fSize.fHeight);
+ const CGImageAlphaInfo alpha = (glyph.isColor())
+ ? kCGImageAlphaPremultipliedFirst
+ : kCGImageAlphaNoneSkipFirst;
+ const CGBitmapInfo bitmapInfo = kCGBitmapByteOrder32Host | (CGBitmapInfo)alpha;
+ fCG.reset(CGBitmapContextCreate(image, fSize.fWidth, fSize.fHeight, 8,
+ rowBytes, fRGBSpace.get(), bitmapInfo));
+
+ // Skia handles quantization and subpixel positioning,
+ // so disable quantization and enable subpixel positioning in CG.
+ CGContextSetAllowsFontSubpixelQuantization(fCG.get(), false);
+ CGContextSetShouldSubpixelQuantizeFonts(fCG.get(), false);
+
+ // Because CG always draws from the horizontal baseline,
+ // if there is a non-integral translation from the horizontal origin to the vertical origin,
+ // then CG cannot draw the glyph in the correct location without subpixel positioning.
+ CGContextSetAllowsFontSubpixelPositioning(fCG.get(), true);
+ CGContextSetShouldSubpixelPositionFonts(fCG.get(), true);
+
+ CGContextSetTextDrawingMode(fCG.get(), kCGTextFill);
+
+ if (SkMask::kARGB32_Format != glyph.maskFormat()) {
+ // Draw black on white to create mask. (Special path exists to speed this up in CG.)
+ // If light-on-dark is requested, draw white on black.
+ CGContextSetGrayFillColor(fCG.get(), lightOnDark ? 1.0f : 0.0f, 1.0f);
+ } else {
+ CGContextSetFillColorWithColor(fCG.get(), fCGForegroundColor.get());
+ }
+
+ // force our checks below to happen
+ fDoAA = !doAA;
+ fDoLCD = !doLCD;
+
+ CGContextSetTextMatrix(fCG.get(), context.fTransform);
+ }
+
+ if (fDoAA != doAA) {
+ CGContextSetShouldAntialias(fCG.get(), doAA);
+ fDoAA = doAA;
+ }
+ if (fDoLCD != doLCD) {
+ CGContextSetShouldSmoothFonts(fCG.get(), doLCD);
+ fDoLCD = doLCD;
+ }
+
+ CGRGBPixel* image = (CGRGBPixel*)fImageStorage.get();
+ // skip rows based on the glyph's height
+ image += (fSize.fHeight - glyph.height()) * fSize.fWidth;
+
+ // Erase to white (or transparent black if it's a color glyph, to not composite against white).
+ // For light-on-dark, instead erase to black.
+ uint32_t bgColor = (!glyph.isColor()) ? (lightOnDark ? 0xFF000000 : 0xFFFFFFFF) : 0x00000000;
+ sk_memset_rect32(image, bgColor, glyph.width(), glyph.height(), rowBytes);
+
+ float subX = 0;
+ float subY = 0;
+ if (context.fDoSubPosition) {
+ subX = SkFixedToFloat(glyph.getSubXFixed());
+ subY = SkFixedToFloat(glyph.getSubYFixed());
+ }
+
+ CGPoint point = CGPointMake(-glyph.left() + subX, glyph.top() + glyph.height() - subY);
+ // Prior to 10.10, CTFontDrawGlyphs acted like CGContextShowGlyphsAtPositions and took
+ // 'positions' which are in text space. The glyph location (in device space) must be
+ // mapped into text space, so that CG can convert it back into device space.
+ // In 10.10.1, this is handled directly in CTFontDrawGlyphs.
+ //
+ // However, in 10.10.2 color glyphs no longer rotate based on the font transform.
+ // So always make the font transform identity and place the transform on the context.
+ point = CGPointApplyAffineTransform(point, context.fInvTransform);
+
+ CTFontDrawGlyphs(context.fCTFont.get(), &glyphID, &point, 1, fCG.get());
+
+ SkASSERT(rowBytesPtr);
+ *rowBytesPtr = rowBytes;
+ return image;
+}
+
+bool SkScalerContext_Mac::generateAdvance(SkGlyph* glyph) {
+ return false;
+}
+
+void SkScalerContext_Mac::generateMetrics(SkGlyph* glyph, SkArenaAlloc* alloc) {
+ glyph->fMaskFormat = fRec.fMaskFormat;
+
+#ifndef MOZ_SKIA
+ if (((SkTypeface_Mac*)this->getTypeface())->fHasColorGlyphs) {
+ glyph->setPath(alloc, nullptr, false);
+ }
+#endif
+
+ const CGGlyph cgGlyph = (CGGlyph) glyph->getGlyphID();
+ glyph->zeroMetrics();
+
+ // The following block produces cgAdvance in CG units (pixels, y up).
+ CGSize cgAdvance;
+ CTFontGetAdvancesForGlyphs(fCTFont.get(), kCTFontOrientationHorizontal,
+ &cgGlyph, &cgAdvance, 1);
+ cgAdvance = CGSizeApplyAffineTransform(cgAdvance, fTransform);
+ glyph->fAdvanceX = SkFloatFromCGFloat(cgAdvance.width);
+ glyph->fAdvanceY = -SkFloatFromCGFloat(cgAdvance.height);
+
+ // The following produces skBounds in SkGlyph units (pixels, y down),
+ // or returns early if skBounds would be empty.
+ SkRect skBounds;
+
+ // Glyphs are always drawn from the horizontal origin. The caller must manually use the result
+ // of CTFontGetVerticalTranslationsForGlyphs to calculate where to draw the glyph for vertical
+ // glyphs. As a result, always get the horizontal bounds of a glyph and translate it if the
+ // glyph is vertical. This avoids any diagreement between the various means of retrieving
+ // vertical metrics.
+ {
+ // CTFontGetBoundingRectsForGlyphs produces cgBounds in CG units (pixels, y up).
+ CGRect cgBounds;
+ CTFontGetBoundingRectsForGlyphs(fCTFont.get(), kCTFontOrientationHorizontal,
+ &cgGlyph, &cgBounds, 1);
+ cgBounds = CGRectApplyAffineTransform(cgBounds, fTransform);
+
+ // BUG?
+ // 0x200B (zero-advance space) seems to return a huge (garbage) bounds, when
+ // it should be empty. So, if we see a zero-advance, we check if it has an
+ // empty path or not, and if so, we jam the bounds to 0. Hopefully a zero-advance
+ // is rare, so we won't incur a big performance cost for this extra check.
+ // Avoid trying to create a path from a color font due to crashing on 10.9.
+ if (0 == cgAdvance.width && 0 == cgAdvance.height &&
+ SkMask::kARGB32_Format != glyph->fMaskFormat) {
+ SkUniqueCFRef<CGPathRef> path(CTFontCreatePathForGlyph(fCTFont.get(), cgGlyph,nullptr));
+ if (!path || CGPathIsEmpty(path.get())) {
+ return;
+ }
+ }
+
+ if (SkCGRectIsEmpty(cgBounds)) {
+ return;
+ }
+
+ // Convert cgBounds to SkGlyph units (pixels, y down).
+ skBounds = SkRect::MakeXYWH(cgBounds.origin.x, -cgBounds.origin.y - cgBounds.size.height,
+ cgBounds.size.width, cgBounds.size.height);
+ }
+
+ // Currently the bounds are based on being rendered at (0,0).
+ // The top left must not move, since that is the base from which subpixel positioning is offset.
+ if (fDoSubPosition) {
+ skBounds.fRight += SkFixedToFloat(glyph->getSubXFixed());
+ skBounds.fBottom += SkFixedToFloat(glyph->getSubYFixed());
+ }
+
+ // We're trying to pack left and top into int16_t,
+ // and width and height into uint16_t, after outsetting by 1.
+ if (!SkRect::MakeXYWH(-32767, -32767, 65535, 65535).contains(skBounds)) {
+ return;
+ }
+
+ SkIRect skIBounds;
+ skBounds.roundOut(&skIBounds);
+ // Expand the bounds by 1 pixel, to give CG room for anti-aliasing.
+ // Note that this outset is to allow room for LCD smoothed glyphs. However, the correct outset
+ // is not currently known, as CG dilates the outlines by some percentage.
+ // Note that if this context is A8 and not back-forming from LCD, there is no need to outset.
+ skIBounds.outset(1, 1);
+ glyph->fLeft = SkToS16(skIBounds.fLeft);
+ glyph->fTop = SkToS16(skIBounds.fTop);
+ glyph->fWidth = SkToU16(skIBounds.width());
+ glyph->fHeight = SkToU16(skIBounds.height());
+}
+
+static constexpr uint8_t sk_pow2_table(size_t i) {
+ return SkToU8(((i * i + 128) / 255));
+}
+
+/**
+ * This will invert the gamma applied by CoreGraphics, so we can get linear
+ * values.
+ *
+ * CoreGraphics obscurely defaults to 2.0 as the subpixel coverage gamma value.
+ * The color space used does not appear to affect this choice.
+ */
+static constexpr auto gLinearCoverageFromCGLCDValue = SkMakeArray<256>(sk_pow2_table);
+
+static void cgpixels_to_bits(uint8_t dst[], const CGRGBPixel src[], int count) {
+ while (count > 0) {
+ uint8_t mask = 0;
+ for (int i = 7; i >= 0; --i) {
+ mask |= ((CGRGBPixel_getAlpha(*src++) >> 7) ^ 0x1) << i;
+ if (0 == --count) {
+ break;
+ }
+ }
+ *dst++ = mask;
+ }
+}
+
+template<bool APPLY_PREBLEND>
+static inline uint8_t rgb_to_a8(CGRGBPixel rgb, const uint8_t* table8) {
+ U8CPU r = 0xFF - ((rgb >> 16) & 0xFF);
+ U8CPU g = 0xFF - ((rgb >> 8) & 0xFF);
+ U8CPU b = 0xFF - ((rgb >> 0) & 0xFF);
+ U8CPU lum = sk_apply_lut_if<APPLY_PREBLEND>(SkComputeLuminance(r, g, b), table8);
+ if constexpr (kSkShowTextBlitCoverage) {
+ lum = std::max(lum, (U8CPU)0x30);
+ }
+ return lum;
+}
+
+template<bool APPLY_PREBLEND>
+static void RGBToA8(const CGRGBPixel* SK_RESTRICT cgPixels, size_t cgRowBytes,
+ const SkGlyph& glyph, void* glyphImage, const uint8_t* table8) {
+ const int width = glyph.width();
+ const int height = glyph.height();
+ size_t dstRB = glyph.rowBytes();
+ uint8_t* SK_RESTRICT dst = (uint8_t*)glyphImage;
+
+ for (int y = 0; y < height; y++) {
+ for (int i = 0; i < width; ++i) {
+ dst[i] = rgb_to_a8<APPLY_PREBLEND>(cgPixels[i], table8);
+ }
+ cgPixels = SkTAddOffset<const CGRGBPixel>(cgPixels, cgRowBytes);
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ }
+}
+
+template<bool APPLY_PREBLEND>
+static uint16_t RGBToLcd16(CGRGBPixel rgb,
+ const uint8_t* tableR, const uint8_t* tableG, const uint8_t* tableB) {
+ U8CPU r = sk_apply_lut_if<APPLY_PREBLEND>(0xFF - ((rgb >> 16) & 0xFF), tableR);
+ U8CPU g = sk_apply_lut_if<APPLY_PREBLEND>(0xFF - ((rgb >> 8) & 0xFF), tableG);
+ U8CPU b = sk_apply_lut_if<APPLY_PREBLEND>(0xFF - ((rgb >> 0) & 0xFF), tableB);
+ if constexpr (kSkShowTextBlitCoverage) {
+ r = std::max(r, (U8CPU)0x30);
+ g = std::max(g, (U8CPU)0x30);
+ b = std::max(b, (U8CPU)0x30);
+ }
+ return SkPack888ToRGB16(r, g, b);
+}
+
+template<bool APPLY_PREBLEND>
+static void RGBToLcd16(const CGRGBPixel* SK_RESTRICT cgPixels, size_t cgRowBytes,
+ const SkGlyph& glyph, void* glyphImage,
+ const uint8_t* tableR, const uint8_t* tableG, const uint8_t* tableB) {
+ const int width = glyph.width();
+ const int height = glyph.height();
+ size_t dstRB = glyph.rowBytes();
+ uint16_t* SK_RESTRICT dst = (uint16_t*)glyphImage;
+
+ for (int y = 0; y < height; y++) {
+ for (int i = 0; i < width; i++) {
+ dst[i] = RGBToLcd16<APPLY_PREBLEND>(cgPixels[i], tableR, tableG, tableB);
+ }
+ cgPixels = SkTAddOffset<const CGRGBPixel>(cgPixels, cgRowBytes);
+ dst = SkTAddOffset<uint16_t>(dst, dstRB);
+ }
+}
+
+static SkPMColor cgpixels_to_pmcolor(CGRGBPixel rgb) {
+ U8CPU a = (rgb >> 24) & 0xFF;
+ U8CPU r = (rgb >> 16) & 0xFF;
+ U8CPU g = (rgb >> 8) & 0xFF;
+ U8CPU b = (rgb >> 0) & 0xFF;
+ if constexpr (kSkShowTextBlitCoverage) {
+ a = std::max(a, (U8CPU)0x30);
+ }
+ return SkPackARGB32(a, r, g, b);
+}
+
+void SkScalerContext_Mac::generateImage(const SkGlyph& glyph) {
+ CGGlyph cgGlyph = SkTo<CGGlyph>(glyph.getGlyphID());
+
+ // FIXME: lcd smoothed un-hinted rasterization unsupported.
+ bool requestSmooth = fRec.getHinting() != SkFontHinting::kNone;
+ bool lightOnDark = (fRec.fFlags & SkScalerContext::kLightOnDark_Flag) != 0;
+
+ // Draw the glyph
+ size_t cgRowBytes;
+ CGRGBPixel* cgPixels = fOffscreen.getCG(*this, glyph, cgGlyph, &cgRowBytes, requestSmooth, lightOnDark);
+ if (cgPixels == nullptr) {
+ return;
+ }
+
+ // Fix the glyph
+ if ((glyph.fMaskFormat == SkMask::kLCD16_Format) ||
+ (glyph.fMaskFormat == SkMask::kA8_Format
+ && requestSmooth
+ && SkCTFontGetSmoothBehavior() != SkCTFontSmoothBehavior::none))
+ {
+ const uint8_t* linear = gLinearCoverageFromCGLCDValue.data();
+
+ //Note that the following cannot really be integrated into the
+ //pre-blend, since we may not be applying the pre-blend; when we aren't
+ //applying the pre-blend it means that a filter wants linear anyway.
+ //Other code may also be applying the pre-blend, so we'd need another
+ //one with this and one without.
+ CGRGBPixel* addr = cgPixels;
+ for (int y = 0; y < glyph.fHeight; ++y) {
+ for (int x = 0; x < glyph.fWidth; ++x) {
+ int r = linear[(addr[x] >> 16) & 0xFF];
+ int g = linear[(addr[x] >> 8) & 0xFF];
+ int b = linear[(addr[x] >> 0) & 0xFF];
+ // If light-on-dark was requested, the mask is drawn inverted.
+ if (lightOnDark) {
+ r = 255 - r;
+ g = 255 - g;
+ b = 255 - b;
+ }
+ addr[x] = (r << 16) | (g << 8) | b;
+ }
+ addr = SkTAddOffset<CGRGBPixel>(addr, cgRowBytes);
+ }
+ }
+
+ // Convert glyph to mask
+ switch (glyph.fMaskFormat) {
+ case SkMask::kLCD16_Format: {
+ if (fPreBlend.isApplicable()) {
+ RGBToLcd16<true>(cgPixels, cgRowBytes, glyph, glyph.fImage,
+ fPreBlend.fR, fPreBlend.fG, fPreBlend.fB);
+ } else {
+ RGBToLcd16<false>(cgPixels, cgRowBytes, glyph, glyph.fImage,
+ fPreBlend.fR, fPreBlend.fG, fPreBlend.fB);
+ }
+ } break;
+ case SkMask::kA8_Format: {
+ if (fPreBlend.isApplicable()) {
+ RGBToA8<true>(cgPixels, cgRowBytes, glyph, glyph.fImage, fPreBlend.fG);
+ } else {
+ RGBToA8<false>(cgPixels, cgRowBytes, glyph, glyph.fImage, fPreBlend.fG);
+ }
+ } break;
+ case SkMask::kBW_Format: {
+ const int width = glyph.fWidth;
+ size_t dstRB = glyph.rowBytes();
+ uint8_t* dst = (uint8_t*)glyph.fImage;
+ for (int y = 0; y < glyph.fHeight; y++) {
+ cgpixels_to_bits(dst, cgPixels, width);
+ cgPixels = SkTAddOffset<CGRGBPixel>(cgPixels, cgRowBytes);
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ }
+ } break;
+ case SkMask::kARGB32_Format: {
+ const int width = glyph.fWidth;
+ size_t dstRB = glyph.rowBytes();
+ SkPMColor* dst = (SkPMColor*)glyph.fImage;
+ for (int y = 0; y < glyph.fHeight; y++) {
+ for (int x = 0; x < width; ++x) {
+ dst[x] = cgpixels_to_pmcolor(cgPixels[x]);
+ }
+ cgPixels = SkTAddOffset<CGRGBPixel>(cgPixels, cgRowBytes);
+ dst = SkTAddOffset<SkPMColor>(dst, dstRB);
+ }
+ } break;
+ default:
+ SkDEBUGFAIL("unexpected mask format");
+ break;
+ }
+}
+
+namespace {
+class SkCTPathGeometrySink {
+ SkPathBuilder fBuilder;
+ bool fStarted;
+ CGPoint fCurrent;
+
+ void goingTo(const CGPoint pt) {
+ if (!fStarted) {
+ fStarted = true;
+ fBuilder.moveTo(fCurrent.x, -fCurrent.y);
+ }
+ fCurrent = pt;
+ }
+
+ bool currentIsNot(const CGPoint pt) {
+ return fCurrent.x != pt.x || fCurrent.y != pt.y;
+ }
+
+public:
+ SkCTPathGeometrySink() : fStarted{false}, fCurrent{0,0} {}
+
+ SkPath detach() { return fBuilder.detach(); }
+
+ static void ApplyElement(void *ctx, const CGPathElement *element) {
+ SkCTPathGeometrySink& self = *(SkCTPathGeometrySink*)ctx;
+ CGPoint* points = element->points;
+
+ switch (element->type) {
+ case kCGPathElementMoveToPoint:
+ self.fStarted = false;
+ self.fCurrent = points[0];
+ break;
+
+ case kCGPathElementAddLineToPoint:
+ if (self.currentIsNot(points[0])) {
+ self.goingTo(points[0]);
+ self.fBuilder.lineTo(points[0].x, -points[0].y);
+ }
+ break;
+
+ case kCGPathElementAddQuadCurveToPoint:
+ if (self.currentIsNot(points[0]) || self.currentIsNot(points[1])) {
+ self.goingTo(points[1]);
+ self.fBuilder.quadTo(points[0].x, -points[0].y,
+ points[1].x, -points[1].y);
+ }
+ break;
+
+ case kCGPathElementAddCurveToPoint:
+ if (self.currentIsNot(points[0]) ||
+ self.currentIsNot(points[1]) ||
+ self.currentIsNot(points[2]))
+ {
+ self.goingTo(points[2]);
+ self.fBuilder.cubicTo(points[0].x, -points[0].y,
+ points[1].x, -points[1].y,
+ points[2].x, -points[2].y);
+ }
+ break;
+
+ case kCGPathElementCloseSubpath:
+ if (self.fStarted) {
+ self.fBuilder.close();
+ }
+ break;
+
+ default:
+ SkDEBUGFAIL("Unknown path element!");
+ break;
+ }
+ }
+};
+} // namespace
+
+/*
+ * Our subpixel resolution is only 2 bits in each direction, so a scale of 4
+ * seems sufficient, and possibly even correct, to allow the hinted outline
+ * to be subpixel positioned.
+ */
+#define kScaleForSubPixelPositionHinting (4.0f)
+
+bool SkScalerContext_Mac::generatePath(const SkGlyph& glyph, SkPath* path) {
+ SkScalar scaleX = SK_Scalar1;
+ SkScalar scaleY = SK_Scalar1;
+
+ CGAffineTransform xform = fTransform;
+ /*
+ * For subpixel positioning, we want to return an unhinted outline, so it
+ * can be positioned nicely at fractional offsets. However, we special-case
+ * if the baseline of the (horizontal) text is axis-aligned. In those cases
+ * we want to retain hinting in the direction orthogonal to the baseline.
+ * e.g. for horizontal baseline, we want to retain hinting in Y.
+ * The way we remove hinting is to scale the font by some value (4) in that
+ * direction, ask for the path, and then scale the path back down.
+ */
+ if (fDoSubPosition) {
+ // start out by assuming that we want no hining in X and Y
+ scaleX = scaleY = kScaleForSubPixelPositionHinting;
+ // now see if we need to restore hinting for axis-aligned baselines
+ switch (this->computeAxisAlignmentForHText()) {
+ case SkAxisAlignment::kX:
+ scaleY = SK_Scalar1; // want hinting in the Y direction
+ break;
+ case SkAxisAlignment::kY:
+ scaleX = SK_Scalar1; // want hinting in the X direction
+ break;
+ default:
+ break;
+ }
+
+ CGAffineTransform scale(CGAffineTransformMakeScale(SkScalarToCGFloat(scaleX),
+ SkScalarToCGFloat(scaleY)));
+ xform = CGAffineTransformConcat(fTransform, scale);
+ }
+
+ CGGlyph cgGlyph = SkTo<CGGlyph>(glyph.getGlyphID());
+ SkUniqueCFRef<CGPathRef> cgPath(CTFontCreatePathForGlyph(fCTFont.get(), cgGlyph, &xform));
+
+ path->reset();
+ if (!cgPath) {
+ return false;
+ }
+
+ SkCTPathGeometrySink sink;
+ CGPathApply(cgPath.get(), &sink, SkCTPathGeometrySink::ApplyElement);
+ *path = sink.detach();
+ if (fDoSubPosition) {
+ SkMatrix m;
+ m.setScale(SkScalarInvert(scaleX), SkScalarInvert(scaleY));
+ path->transform(m);
+ }
+ return true;
+}
+
+void SkScalerContext_Mac::generateFontMetrics(SkFontMetrics* metrics) {
+ if (nullptr == metrics) {
+ return;
+ }
+
+ CGRect theBounds = CTFontGetBoundingBox(fCTFont.get());
+
+ metrics->fTop = SkScalarFromCGFloat(-SkCGRectGetMaxY(theBounds));
+ metrics->fAscent = SkScalarFromCGFloat(-CTFontGetAscent(fCTFont.get()));
+ metrics->fDescent = SkScalarFromCGFloat( CTFontGetDescent(fCTFont.get()));
+ metrics->fBottom = SkScalarFromCGFloat(-SkCGRectGetMinY(theBounds));
+ metrics->fLeading = SkScalarFromCGFloat( CTFontGetLeading(fCTFont.get()));
+ metrics->fAvgCharWidth = SkScalarFromCGFloat( SkCGRectGetWidth(theBounds));
+ metrics->fXMin = SkScalarFromCGFloat( SkCGRectGetMinX(theBounds));
+ metrics->fXMax = SkScalarFromCGFloat( SkCGRectGetMaxX(theBounds));
+ metrics->fMaxCharWidth = metrics->fXMax - metrics->fXMin;
+ metrics->fXHeight = SkScalarFromCGFloat( CTFontGetXHeight(fCTFont.get()));
+ metrics->fCapHeight = SkScalarFromCGFloat( CTFontGetCapHeight(fCTFont.get()));
+ metrics->fUnderlineThickness = SkScalarFromCGFloat( CTFontGetUnderlineThickness(fCTFont.get()));
+ metrics->fUnderlinePosition = -SkScalarFromCGFloat( CTFontGetUnderlinePosition(fCTFont.get()));
+ metrics->fStrikeoutThickness = 0;
+ metrics->fStrikeoutPosition = 0;
+
+ metrics->fFlags = 0;
+ metrics->fFlags |= SkFontMetrics::kUnderlineThicknessIsValid_Flag;
+ metrics->fFlags |= SkFontMetrics::kUnderlinePositionIsValid_Flag;
+
+ CFArrayRef ctAxes = ((SkTypeface_Mac*)this->getTypeface())->getVariationAxes();
+ if ((ctAxes && CFArrayGetCount(ctAxes) > 0) ||
+ ((SkTypeface_Mac*)this->getTypeface())->fHasColorGlyphs)
+ {
+ // The bounds are only valid for the default outline variation.
+ // In particular `sbix` and `SVG ` data may draw outside these bounds.
+ metrics->fFlags |= SkFontMetrics::kBoundsInvalid_Flag;
+ }
+
+ sk_sp<SkData> os2 = this->getTypeface()->copyTableData(SkTEndian_SwapBE32(SkOTTableOS2::TAG));
+ if (os2) {
+ // 'fontSize' is correct because the entire resolved size is set by the constructor.
+ const CGFloat fontSize = CTFontGetSize(fCTFont.get());
+ const unsigned int upem = CTFontGetUnitsPerEm(fCTFont.get());
+ const unsigned int maxSaneHeight = upem * 2;
+
+ // See https://bugs.chromium.org/p/skia/issues/detail?id=6203
+ // At least on 10.12.3 with memory based fonts the x-height is always 0.6666 of the ascent
+ // and the cap-height is always 0.8888 of the ascent. It appears that the values from the
+ // 'OS/2' table are read, but then overwritten if the font is not a system font. As a
+ // result, if there is a valid 'OS/2' table available use the values from the table if they
+ // aren't too strange.
+ if (sizeof(SkOTTableOS2_V2) <= os2->size()) {
+ const SkOTTableOS2_V2* os2v2 = static_cast<const SkOTTableOS2_V2*>(os2->data());
+ uint16_t xHeight = SkEndian_SwapBE16(os2v2->sxHeight);
+ if (xHeight && xHeight < maxSaneHeight) {
+ metrics->fXHeight = SkScalarFromCGFloat(xHeight * fontSize / upem);
+ }
+ uint16_t capHeight = SkEndian_SwapBE16(os2v2->sCapHeight);
+ if (capHeight && capHeight < maxSaneHeight) {
+ metrics->fCapHeight = SkScalarFromCGFloat(capHeight * fontSize / upem);
+ }
+ }
+
+ // CoreText does not provide the strikeout metrics, which are available in OS/2 version 0.
+ if (sizeof(SkOTTableOS2_V0) <= os2->size()) {
+ const SkOTTableOS2_V0* os2v0 = static_cast<const SkOTTableOS2_V0*>(os2->data());
+ uint16_t strikeoutSize = SkEndian_SwapBE16(os2v0->yStrikeoutSize);
+ if (strikeoutSize && strikeoutSize < maxSaneHeight) {
+ metrics->fStrikeoutThickness = SkScalarFromCGFloat(strikeoutSize * fontSize / upem);
+ metrics->fFlags |= SkFontMetrics::kStrikeoutThicknessIsValid_Flag;
+ }
+ uint16_t strikeoutPos = SkEndian_SwapBE16(os2v0->yStrikeoutPosition);
+ if (strikeoutPos && strikeoutPos < maxSaneHeight) {
+ metrics->fStrikeoutPosition = -SkScalarFromCGFloat(strikeoutPos * fontSize / upem);
+ metrics->fFlags |= SkFontMetrics::kStrikeoutPositionIsValid_Flag;
+ }
+ }
+ }
+}
+
+#endif
diff --git a/gfx/skia/skia/src/ports/SkScalerContext_mac_ct.h b/gfx/skia/skia/src/ports/SkScalerContext_mac_ct.h
new file mode 100644
index 0000000000..be034bd2c7
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkScalerContext_mac_ct.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkScalerContext_mac_ct_DEFINED
+#define SkScalerContext_mac_ct_DEFINED
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSize.h"
+#include "src/base/SkAutoMalloc.h"
+#include "src/core/SkScalerContext.h"
+#include "src/utils/mac/SkUniqueCFRef.h"
+
+#ifdef SK_BUILD_FOR_MAC
+#import <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreText/CoreText.h>
+#include <CoreText/CTFontManager.h>
+#include <CoreGraphics/CoreGraphics.h>
+#include <CoreFoundation/CoreFoundation.h>
+#endif
+
+#include <memory>
+
+class SkDescriptor;
+class SkGlyph;
+class SkPath;
+class SkTypeface_Mac;
+struct SkFontMetrics;
+
+
+typedef uint32_t CGRGBPixel;
+
+class SkScalerContext_Mac : public SkScalerContext {
+public:
+ SkScalerContext_Mac(sk_sp<SkTypeface_Mac>, const SkScalerContextEffects&, const SkDescriptor*);
+
+protected:
+ bool generateAdvance(SkGlyph* glyph) override;
+ void generateMetrics(SkGlyph* glyph, SkArenaAlloc*) override;
+ void generateImage(const SkGlyph& glyph) override;
+ bool generatePath(const SkGlyph& glyph, SkPath* path) override;
+ void generateFontMetrics(SkFontMetrics*) override;
+
+private:
+ class Offscreen {
+ public:
+ Offscreen(SkColor foregroundColor);
+
+ CGRGBPixel* getCG(const SkScalerContext_Mac& context, const SkGlyph& glyph,
+ CGGlyph glyphID, size_t* rowBytesPtr, bool generateA8FromLCD,
+ bool lightOnDark);
+
+ private:
+ enum {
+ kSize = 32 * 32 * sizeof(CGRGBPixel)
+ };
+ SkAutoSMalloc<kSize> fImageStorage;
+ SkUniqueCFRef<CGColorSpaceRef> fRGBSpace;
+
+ // cached state
+ SkUniqueCFRef<CGContextRef> fCG;
+ SkUniqueCFRef<CGColorRef> fCGForegroundColor;
+ SkColor fSKForegroundColor;
+ SkISize fSize;
+ bool fDoAA;
+ bool fDoLCD;
+ };
+ Offscreen fOffscreen;
+
+ /** Unrotated variant of fCTFont.
+ *
+ * In 10.10.1 CTFontGetAdvancesForGlyphs applies the font transform to the width of the
+ * advances, but always sets the height to 0. This font is used to get the advances of the
+ * unrotated glyph, and then the rotation is applied separately.
+ *
+ * CT vertical metrics are pre-rotated (in em space, before transform) 90deg clock-wise.
+ * This makes kCTFontOrientationDefault dangerous, because the metrics from
+ * kCTFontOrientationHorizontal are in a different space from kCTFontOrientationVertical.
+ * With kCTFontOrientationVertical the advances must be unrotated.
+ *
+ * Sometimes, creating a copy of a CTFont with the same size but different trasform will select
+ * different underlying font data. As a result, avoid ever creating more than one CTFont per
+ * SkScalerContext to ensure that only one CTFont is used.
+ *
+ * As a result of the above (and other constraints) this font contains the size, but not the
+ * transform. The transform must always be applied separately.
+ */
+ SkUniqueCFRef<CTFontRef> fCTFont;
+
+ /** The transform without the font size. */
+ CGAffineTransform fTransform;
+ CGAffineTransform fInvTransform;
+
+ SkUniqueCFRef<CGFontRef> fCGFont;
+ const bool fDoSubPosition;
+
+ friend class Offscreen;
+
+ using INHERITED = SkScalerContext;
+};
+
+#endif
+#endif //SkScalerContext_mac_ct_DEFINED
diff --git a/gfx/skia/skia/src/ports/SkScalerContext_win_dw.cpp b/gfx/skia/skia/src/ports/SkScalerContext_win_dw.cpp
new file mode 100644
index 0000000000..7f7f72191b
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkScalerContext_win_dw.cpp
@@ -0,0 +1,1523 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/utils/win/SkDWriteNTDDI_VERSION.h"
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#undef GetGlyphIndices
+
+#include "include/codec/SkCodec.h"
+#include "include/core/SkBBHFactory.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkData.h"
+#include "include/core/SkDrawable.h"
+#include "include/core/SkFontMetrics.h"
+#include "include/core/SkGraphics.h"
+#include "include/core/SkOpenTypeSVGDecoder.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPictureRecorder.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkEndian.h"
+#include "src/core/SkDraw.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkMaskGamma.h"
+#include "src/core/SkMatrixProvider.h"
+#include "src/core/SkRasterClip.h"
+#include "src/core/SkScalerContext.h"
+#include "src/core/SkSharedMutex.h"
+#include "src/ports/SkScalerContext_win_dw.h"
+#include "src/ports/SkTypeface_win_dw.h"
+#include "src/sfnt/SkOTTable_EBLC.h"
+#include "src/sfnt/SkOTTable_EBSC.h"
+#include "src/sfnt/SkOTTable_gasp.h"
+#include "src/sfnt/SkOTTable_maxp.h"
+#include "src/utils/SkMatrix22.h"
+#include "src/utils/win/SkDWrite.h"
+#include "src/utils/win/SkDWriteGeometrySink.h"
+#include "src/utils/win/SkHRESULT.h"
+#include "src/utils/win/SkTScopedComPtr.h"
+
+#include <dwrite.h>
+#include <dwrite_1.h>
+#include <dwrite_3.h>
+
+namespace {
+static inline const constexpr bool kSkShowTextBlitCoverage = false;
+
+/* Note:
+ * In versions 8 and 8.1 of Windows, some calls in DWrite are not thread safe.
+ * The mutex returned from maybe_dw_mutex protects the calls that are
+ * problematic.
+ */
+static SkSharedMutex* maybe_dw_mutex(DWriteFontTypeface& typeface) {
+ static SkSharedMutex mutex;
+ return typeface.fDWriteFontFace4 ? nullptr : &mutex;
+}
+
+class SK_SCOPED_CAPABILITY Exclusive {
+public:
+ explicit Exclusive(SkSharedMutex* maybe_lock) SK_ACQUIRE(*maybe_lock)
+ : fLock(maybe_lock) {
+ if (fLock) {
+ fLock->acquire();
+ }
+ }
+ ~Exclusive() SK_RELEASE_CAPABILITY() {
+ if (fLock) {
+ fLock->release();
+ }
+ }
+
+private:
+ SkSharedMutex* fLock;
+};
+class SK_SCOPED_CAPABILITY Shared {
+public:
+ explicit Shared(SkSharedMutex* maybe_lock) SK_ACQUIRE_SHARED(*maybe_lock)
+ : fLock(maybe_lock) {
+ if (fLock) {
+ fLock->acquireShared();
+ }
+ }
+
+ // You would think this should be SK_RELEASE_SHARED_CAPABILITY, but SK_SCOPED_CAPABILITY
+ // doesn't fully understand the difference between shared and exclusive.
+ // Please review https://reviews.llvm.org/D52578 for more information.
+ ~Shared() SK_RELEASE_CAPABILITY() {
+ if (fLock) {
+ fLock->releaseShared();
+ }
+ }
+
+private:
+ SkSharedMutex* fLock;
+};
+
+static bool isLCD(const SkScalerContextRec& rec) {
+ return SkMask::kLCD16_Format == rec.fMaskFormat;
+}
+
+static bool is_hinted(DWriteFontTypeface* typeface) {
+ Exclusive l(maybe_dw_mutex(*typeface));
+ AutoTDWriteTable<SkOTTableMaximumProfile> maxp(typeface->fDWriteFontFace.get());
+ if (!maxp.fExists) {
+ return false;
+ }
+ if (maxp.fSize < sizeof(SkOTTableMaximumProfile::Version::TT)) {
+ return false;
+ }
+ if (maxp->version.version != SkOTTableMaximumProfile::Version::TT::VERSION) {
+ return false;
+ }
+ return (0 != maxp->version.tt.maxSizeOfInstructions);
+}
+
+/** A GaspRange is inclusive, [min, max]. */
+struct GaspRange {
+ using Behavior = SkOTTableGridAndScanProcedure::GaspRange::behavior;
+ GaspRange(int min, int max, int version, Behavior flags)
+ : fMin(min), fMax(max), fVersion(version), fFlags(flags) { }
+ int fMin;
+ int fMax;
+ int fVersion;
+ Behavior fFlags;
+};
+
+bool get_gasp_range(DWriteFontTypeface* typeface, int size, GaspRange* range) {
+ AutoTDWriteTable<SkOTTableGridAndScanProcedure> gasp(typeface->fDWriteFontFace.get());
+ if (!gasp.fExists) {
+ return false;
+ }
+ if (gasp.fSize < sizeof(SkOTTableGridAndScanProcedure)) {
+ return false;
+ }
+ if (gasp->version != SkOTTableGridAndScanProcedure::version0 &&
+ gasp->version != SkOTTableGridAndScanProcedure::version1)
+ {
+ return false;
+ }
+
+ uint16_t numRanges = SkEndianSwap16(gasp->numRanges);
+ if (numRanges > 1024 ||
+ gasp.fSize < sizeof(SkOTTableGridAndScanProcedure) +
+ sizeof(SkOTTableGridAndScanProcedure::GaspRange) * numRanges)
+ {
+ return false;
+ }
+
+ const SkOTTableGridAndScanProcedure::GaspRange* rangeTable =
+ SkTAfter<const SkOTTableGridAndScanProcedure::GaspRange>(gasp.get());
+ int minPPEM = -1;
+ for (uint16_t i = 0; i < numRanges; ++i, ++rangeTable) {
+ int maxPPEM = SkEndianSwap16(rangeTable->maxPPEM);
+ if (minPPEM < size && size <= maxPPEM) {
+ range->fMin = minPPEM + 1;
+ range->fMax = maxPPEM;
+ range->fVersion = SkEndian_SwapBE16(gasp->version);
+ range->fFlags = rangeTable->flags;
+ return true;
+ }
+ minPPEM = maxPPEM;
+ }
+ return false;
+}
+/** If the rendering mode for the specified 'size' is gridfit, then place
+ * the gridfit range into 'range'. Otherwise, leave 'range' alone.
+ */
+static bool is_gridfit_only(GaspRange::Behavior flags) {
+ return flags.raw.value == GaspRange::Behavior::Raw::GridfitMask;
+}
+
+static bool has_bitmap_strike(DWriteFontTypeface* typeface, GaspRange range) {
+ Exclusive l(maybe_dw_mutex(*typeface));
+ {
+ AutoTDWriteTable<SkOTTableEmbeddedBitmapLocation> eblc(typeface->fDWriteFontFace.get());
+ if (!eblc.fExists) {
+ return false;
+ }
+ if (eblc.fSize < sizeof(SkOTTableEmbeddedBitmapLocation)) {
+ return false;
+ }
+ if (eblc->version != SkOTTableEmbeddedBitmapLocation::version_initial) {
+ return false;
+ }
+
+ uint32_t numSizes = SkEndianSwap32(eblc->numSizes);
+ if (numSizes > 1024 ||
+ eblc.fSize < sizeof(SkOTTableEmbeddedBitmapLocation) +
+ sizeof(SkOTTableEmbeddedBitmapLocation::BitmapSizeTable) * numSizes)
+ {
+ return false;
+ }
+
+ const SkOTTableEmbeddedBitmapLocation::BitmapSizeTable* sizeTable =
+ SkTAfter<const SkOTTableEmbeddedBitmapLocation::BitmapSizeTable>(eblc.get());
+ for (uint32_t i = 0; i < numSizes; ++i, ++sizeTable) {
+ if (sizeTable->ppemX == sizeTable->ppemY &&
+ range.fMin <= sizeTable->ppemX && sizeTable->ppemX <= range.fMax)
+ {
+ // TODO: determine if we should dig through IndexSubTableArray/IndexSubTable
+ // to determine the actual number of glyphs with bitmaps.
+
+ // TODO: Ensure that the bitmaps actually cover a significant portion of the strike.
+
+ // TODO: Ensure that the bitmaps are bi-level?
+ if (sizeTable->endGlyphIndex >= sizeTable->startGlyphIndex + 3) {
+ return true;
+ }
+ }
+ }
+ }
+
+ {
+ AutoTDWriteTable<SkOTTableEmbeddedBitmapScaling> ebsc(typeface->fDWriteFontFace.get());
+ if (!ebsc.fExists) {
+ return false;
+ }
+ if (ebsc.fSize < sizeof(SkOTTableEmbeddedBitmapScaling)) {
+ return false;
+ }
+ if (ebsc->version != SkOTTableEmbeddedBitmapScaling::version_initial) {
+ return false;
+ }
+
+ uint32_t numSizes = SkEndianSwap32(ebsc->numSizes);
+ if (numSizes > 1024 ||
+ ebsc.fSize < sizeof(SkOTTableEmbeddedBitmapScaling) +
+ sizeof(SkOTTableEmbeddedBitmapScaling::BitmapScaleTable) * numSizes)
+ {
+ return false;
+ }
+
+ const SkOTTableEmbeddedBitmapScaling::BitmapScaleTable* scaleTable =
+ SkTAfter<const SkOTTableEmbeddedBitmapScaling::BitmapScaleTable>(ebsc.get());
+ for (uint32_t i = 0; i < numSizes; ++i, ++scaleTable) {
+ if (scaleTable->ppemX == scaleTable->ppemY &&
+ range.fMin <= scaleTable->ppemX && scaleTable->ppemX <= range.fMax) {
+ // EBSC tables are normally only found in bitmap only fonts.
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static bool both_zero(SkScalar a, SkScalar b) {
+ return 0 == a && 0 == b;
+}
+
+// returns false if there is any non-90-rotation or skew
+static bool is_axis_aligned(const SkScalerContextRec& rec) {
+ return 0 == rec.fPreSkewX &&
+ (both_zero(rec.fPost2x2[0][1], rec.fPost2x2[1][0]) ||
+ both_zero(rec.fPost2x2[0][0], rec.fPost2x2[1][1]));
+}
+
+} //namespace
+
+SkScalerContext_DW::SkScalerContext_DW(sk_sp<DWriteFontTypeface> typefaceRef,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc)
+ : SkScalerContext(std::move(typefaceRef), effects, desc)
+{
+ DWriteFontTypeface* typeface = this->getDWriteTypeface();
+ fGlyphCount = typeface->fDWriteFontFace->GetGlyphCount();
+
+ fClearTypeLevel = int(typeface->GetClearTypeLevel() * 256);
+
+ // In general, all glyphs should use DWriteFontFace::GetRecommendedRenderingMode
+ // except when bi-level rendering is requested or there are embedded
+ // bi-level bitmaps (and the embedded bitmap flag is set and no rotation).
+ //
+ // DirectWrite's IDWriteFontFace::GetRecommendedRenderingMode does not do
+ // this. As a result, determine the actual size of the text and then see if
+ // there are any embedded bi-level bitmaps of that size. If there are, then
+ // force bitmaps by requesting bi-level rendering.
+ //
+ // FreeType allows for separate ppemX and ppemY, but DirectWrite assumes
+ // square pixels and only uses ppemY. Therefore the transform must track any
+ // non-uniform x-scale.
+ //
+ // Also, rotated glyphs should have the same absolute advance widths as
+ // horizontal glyphs and the subpixel flag should not affect glyph shapes.
+
+ SkVector scale;
+ fRec.computeMatrices(SkScalerContextRec::PreMatrixScale::kVertical, &scale, &fSkXform);
+
+ fXform.m11 = SkScalarToFloat(fSkXform.getScaleX());
+ fXform.m12 = SkScalarToFloat(fSkXform.getSkewY());
+ fXform.m21 = SkScalarToFloat(fSkXform.getSkewX());
+ fXform.m22 = SkScalarToFloat(fSkXform.getScaleY());
+ fXform.dx = 0;
+ fXform.dy = 0;
+
+ // realTextSize is the actual device size we want (as opposed to the size the user requested).
+ // gdiTextSize is the size we request when GDI compatible.
+ // If the scale is negative, this means the matrix will do the flip anyway.
+ const SkScalar realTextSize = scale.fY;
+ // Due to floating point math, the lower bits are suspect. Round carefully.
+ SkScalar gdiTextSize = SkScalarRoundToScalar(realTextSize * 64.0f) / 64.0f;
+ if (gdiTextSize == 0) {
+ gdiTextSize = SK_Scalar1;
+ }
+
+ bool bitmapRequested = SkToBool(fRec.fFlags & SkScalerContext::kEmbeddedBitmapText_Flag);
+ bool treatLikeBitmap = false;
+ bool axisAlignedBitmap = false;
+ if (bitmapRequested) {
+ // When embedded bitmaps are requested, treat the entire range like
+ // a bitmap strike if the range is gridfit only and contains a bitmap.
+ int bitmapPPEM = SkScalarTruncToInt(gdiTextSize);
+ GaspRange range(bitmapPPEM, bitmapPPEM, 0, GaspRange::Behavior());
+ if (get_gasp_range(typeface, bitmapPPEM, &range)) {
+ if (!is_gridfit_only(range.fFlags)) {
+ range = GaspRange(bitmapPPEM, bitmapPPEM, 0, GaspRange::Behavior());
+ }
+ }
+ treatLikeBitmap = has_bitmap_strike(typeface, range);
+
+ axisAlignedBitmap = is_axis_aligned(fRec);
+ }
+
+ GaspRange range(0, 0xFFFF, 0, GaspRange::Behavior());
+
+ // If the user requested aliased, do so with aliased compatible metrics.
+ if (SkMask::kBW_Format == fRec.fMaskFormat) {
+ fTextSizeRender = gdiTextSize;
+ fRenderingMode = DWRITE_RENDERING_MODE_ALIASED;
+ fTextureType = DWRITE_TEXTURE_ALIASED_1x1;
+ fTextSizeMeasure = gdiTextSize;
+ fMeasuringMode = DWRITE_MEASURING_MODE_GDI_CLASSIC;
+
+ // If we can use a bitmap, use gdi classic rendering and measurement.
+ // This will not always provide a bitmap, but matches expected behavior.
+ } else if ((treatLikeBitmap && axisAlignedBitmap) || typeface->ForceGDI()) {
+ fTextSizeRender = gdiTextSize;
+ fRenderingMode = DWRITE_RENDERING_MODE_GDI_CLASSIC;
+ fTextureType = DWRITE_TEXTURE_CLEARTYPE_3x1;
+ fTextSizeMeasure = gdiTextSize;
+ fMeasuringMode = DWRITE_MEASURING_MODE_GDI_CLASSIC;
+
+ // If rotated but the horizontal text could have used a bitmap,
+ // render high quality rotated glyphs but measure using bitmap metrics.
+ } else if (treatLikeBitmap) {
+ fTextSizeRender = gdiTextSize;
+ fRenderingMode = DWRITE_RENDERING_MODE_NATURAL_SYMMETRIC;
+ fTextureType = DWRITE_TEXTURE_CLEARTYPE_3x1;
+ fTextSizeMeasure = gdiTextSize;
+ fMeasuringMode = DWRITE_MEASURING_MODE_GDI_CLASSIC;
+
+ // Force symmetric if the font is above the threshold or there is an explicit mode.
+ // Here we check if the size exceeds 20 before checking the GASP table to match the
+ // results of calling GetRecommendedRenderingMode/Direct2D, which skip looking at
+ // the GASP table if the text is too large.
+ } else if (realTextSize > SkIntToScalar(20) ||
+ typeface->GetRenderingMode() == DWRITE_RENDERING_MODE_NATURAL ||
+ typeface->GetRenderingMode() == DWRITE_RENDERING_MODE_NATURAL_SYMMETRIC) {
+ fTextSizeRender = realTextSize;
+ fRenderingMode = typeface->GetRenderingMode() == DWRITE_RENDERING_MODE_NATURAL ?
+ DWRITE_RENDERING_MODE_NATURAL : DWRITE_RENDERING_MODE_NATURAL_SYMMETRIC;
+ fTextureType = DWRITE_TEXTURE_CLEARTYPE_3x1;
+ fTextSizeMeasure = realTextSize;
+ fMeasuringMode = DWRITE_MEASURING_MODE_NATURAL;
+ // If the font has a gasp table version 1, use it to determine symmetric rendering.
+ } else if (get_gasp_range(typeface, SkScalarRoundToInt(gdiTextSize), &range) &&
+ range.fVersion >= 1) {
+ fTextSizeRender = realTextSize;
+ fRenderingMode = !range.fFlags.field.SymmetricSmoothing ?
+ DWRITE_RENDERING_MODE_NATURAL : DWRITE_RENDERING_MODE_NATURAL_SYMMETRIC;
+ fTextureType = DWRITE_TEXTURE_CLEARTYPE_3x1;
+ fTextSizeMeasure = realTextSize;
+ fMeasuringMode = DWRITE_MEASURING_MODE_NATURAL;
+ // Fonts with hints, no gasp or gasp version 0, and below 20px get non-symmetric rendering.
+ // Often such fonts have hints which were only tested with GDI ClearType classic.
+ // Some of these fonts rely on drop out control in the y direction in order to be legible.
+ // Tenor Sans
+ // https://fonts.google.com/specimen/Tenor+Sans
+ // Gill Sans W04
+ // https://cdn.leagueoflegends.com/lolkit/1.1.9/resources/fonts/gill-sans-w04-book.woff
+ // https://na.leagueoflegends.com/en/news/game-updates/patch/patch-410-notes
+ // See https://crbug.com/385897
+ } else {
+ if (is_hinted(typeface)) {
+ fTextSizeRender = gdiTextSize;
+ fRenderingMode = DWRITE_RENDERING_MODE_NATURAL;
+ } else {
+ // Unhinted but with no gasp and below 20px defaults to symmetric for
+ // GetRecommendedRenderingMode.
+ fTextSizeRender = realTextSize;
+ fRenderingMode = DWRITE_RENDERING_MODE_NATURAL_SYMMETRIC;
+ }
+ fTextureType = DWRITE_TEXTURE_CLEARTYPE_3x1;
+ fTextSizeMeasure = realTextSize;
+ fMeasuringMode = DWRITE_MEASURING_MODE_NATURAL;
+ }
+
+ // DirectWrite2 allows for grayscale hinting.
+ fAntiAliasMode = DWRITE_TEXT_ANTIALIAS_MODE_CLEARTYPE;
+ if (typeface->fFactory2 && typeface->fDWriteFontFace2 &&
+ SkMask::kA8_Format == fRec.fMaskFormat &&
+ !(fRec.fFlags & SkScalerContext::kGenA8FromLCD_Flag))
+ {
+ // DWRITE_TEXTURE_ALIASED_1x1 is now misnamed, it must also be used with grayscale.
+ fTextureType = DWRITE_TEXTURE_ALIASED_1x1;
+ fAntiAliasMode = DWRITE_TEXT_ANTIALIAS_MODE_GRAYSCALE;
+ }
+
+ // DirectWrite2 allows hinting to be disabled.
+ fGridFitMode = DWRITE_GRID_FIT_MODE_ENABLED;
+ if (fRec.getHinting() == SkFontHinting::kNone) {
+ fGridFitMode = DWRITE_GRID_FIT_MODE_DISABLED;
+ if (fRenderingMode != DWRITE_RENDERING_MODE_ALIASED) {
+ fRenderingMode = DWRITE_RENDERING_MODE_NATURAL_SYMMETRIC;
+ }
+ }
+
+ if (this->isLinearMetrics()) {
+ fTextSizeMeasure = realTextSize;
+ fMeasuringMode = DWRITE_MEASURING_MODE_NATURAL;
+ }
+
+ // The GDI measuring modes don't seem to work well with CBDT fonts (DWrite.dll 10.0.18362.836).
+ if (fMeasuringMode != DWRITE_MEASURING_MODE_NATURAL) {
+ constexpr UINT32 CBDTTag = DWRITE_MAKE_OPENTYPE_TAG('C','B','D','T');
+ AutoDWriteTable CBDT(typeface->fDWriteFontFace.get(), CBDTTag);
+ if (CBDT.fExists) {
+ fMeasuringMode = DWRITE_MEASURING_MODE_NATURAL;
+ }
+ }
+}
+
+SkScalerContext_DW::~SkScalerContext_DW() {
+}
+
+bool SkScalerContext_DW::generateAdvance(SkGlyph* glyph) {
+ glyph->fAdvanceX = 0;
+ glyph->fAdvanceY = 0;
+ uint16_t glyphId = glyph->getGlyphID();
+ DWriteFontTypeface* typeface = this->getDWriteTypeface();
+
+ // DirectWrite treats all out of bounds glyph ids as having the same data as glyph 0.
+ // For consistency with all other backends, treat out of range glyph ids as an error.
+ if (fGlyphCount <= glyphId) {
+ return false;
+ }
+
+ DWRITE_GLYPH_METRICS gm;
+
+ if (DWRITE_MEASURING_MODE_GDI_CLASSIC == fMeasuringMode ||
+ DWRITE_MEASURING_MODE_GDI_NATURAL == fMeasuringMode)
+ {
+ Exclusive l(maybe_dw_mutex(*typeface));
+ HRBM(typeface->fDWriteFontFace->GetGdiCompatibleGlyphMetrics(
+ fTextSizeMeasure,
+ 1.0f, // pixelsPerDip
+ // This parameter does not act like the lpmat2 parameter to GetGlyphOutlineW.
+ // If it did then GsA here and G_inv below to mapVectors.
+ nullptr,
+ DWRITE_MEASURING_MODE_GDI_NATURAL == fMeasuringMode,
+ &glyphId, 1,
+ &gm),
+ "Could not get gdi compatible glyph metrics.");
+ } else {
+ Exclusive l(maybe_dw_mutex(*typeface));
+ HRBM(typeface->fDWriteFontFace->GetDesignGlyphMetrics(&glyphId, 1, &gm),
+ "Could not get design metrics.");
+ }
+
+ DWRITE_FONT_METRICS dwfm;
+ {
+ Shared l(maybe_dw_mutex(*typeface));
+ typeface->fDWriteFontFace->GetMetrics(&dwfm);
+ }
+ SkScalar advanceX = fTextSizeMeasure * gm.advanceWidth / dwfm.designUnitsPerEm;
+
+ SkVector advance = { advanceX, 0 };
+ if (DWRITE_MEASURING_MODE_GDI_CLASSIC == fMeasuringMode ||
+ DWRITE_MEASURING_MODE_GDI_NATURAL == fMeasuringMode)
+ {
+ // DirectWrite produced 'compatible' metrics, but while close,
+ // the end result is not always an integer as it would be with GDI.
+ advance.fX = SkScalarRoundToScalar(advance.fX);
+ }
+ fSkXform.mapVectors(&advance, 1);
+
+ glyph->fAdvanceX = SkScalarToFloat(advance.fX);
+ glyph->fAdvanceY = SkScalarToFloat(advance.fY);
+ return true;
+}
+
+HRESULT SkScalerContext_DW::getBoundingBox(SkGlyph* glyph,
+ DWRITE_RENDERING_MODE renderingMode,
+ DWRITE_TEXTURE_TYPE textureType,
+ RECT* bbox)
+{
+ DWriteFontTypeface* typeface = this->getDWriteTypeface();
+
+ //Measure raster size.
+ fXform.dx = SkFixedToFloat(glyph->getSubXFixed());
+ fXform.dy = SkFixedToFloat(glyph->getSubYFixed());
+
+ FLOAT advance = 0;
+
+ UINT16 glyphId = glyph->getGlyphID();
+
+ DWRITE_GLYPH_OFFSET offset;
+ offset.advanceOffset = 0.0f;
+ offset.ascenderOffset = 0.0f;
+
+ DWRITE_GLYPH_RUN run;
+ run.glyphCount = 1;
+ run.glyphAdvances = &advance;
+ run.fontFace = typeface->fDWriteFontFace.get();
+ run.fontEmSize = SkScalarToFloat(fTextSizeRender);
+ run.bidiLevel = 0;
+ run.glyphIndices = &glyphId;
+ run.isSideways = FALSE;
+ run.glyphOffsets = &offset;
+
+ SkTScopedComPtr<IDWriteGlyphRunAnalysis> glyphRunAnalysis;
+ {
+ Exclusive l(maybe_dw_mutex(*typeface));
+ // IDWriteFactory2::CreateGlyphRunAnalysis is very bad at aliased glyphs.
+ if (typeface->fFactory2 &&
+ (fGridFitMode == DWRITE_GRID_FIT_MODE_DISABLED ||
+ fAntiAliasMode == DWRITE_TEXT_ANTIALIAS_MODE_GRAYSCALE))
+ {
+ HRM(typeface->fFactory2->CreateGlyphRunAnalysis(
+ &run,
+ &fXform,
+ renderingMode,
+ fMeasuringMode,
+ fGridFitMode,
+ fAntiAliasMode,
+ 0.0f, // baselineOriginX,
+ 0.0f, // baselineOriginY,
+ &glyphRunAnalysis),
+ "Could not create DW2 glyph run analysis.");
+ } else {
+ HRM(typeface->fFactory->CreateGlyphRunAnalysis(&run,
+ 1.0f, // pixelsPerDip,
+ &fXform,
+ renderingMode,
+ fMeasuringMode,
+ 0.0f, // baselineOriginX,
+ 0.0f, // baselineOriginY,
+ &glyphRunAnalysis),
+ "Could not create glyph run analysis.");
+ }
+ }
+ {
+ Shared l(maybe_dw_mutex(*typeface));
+ HRM(glyphRunAnalysis->GetAlphaTextureBounds(textureType, bbox),
+ "Could not get texture bounds.");
+ }
+ return S_OK;
+}
+
+bool SkScalerContext_DW::isColorGlyph(const SkGlyph& glyph) {
+ // One would think that with newer DirectWrite that this could be like isPngGlyph
+ // except test for DWRITE_GLYPH_IMAGE_FORMATS_COLR, but that doesn't seem to work.
+
+ SkTScopedComPtr<IDWriteColorGlyphRunEnumerator> colorLayer;
+ return getColorGlyphRun(glyph, &colorLayer);
+}
+
+bool SkScalerContext_DW::isPngGlyph(const SkGlyph& glyph) {
+ if (!this->getDWriteTypeface()->fDWriteFontFace4) {
+ return false;
+ }
+
+ DWRITE_GLYPH_IMAGE_FORMATS f;
+ IDWriteFontFace4* fontFace4 = this->getDWriteTypeface()->fDWriteFontFace4.get();
+ HRBM(fontFace4->GetGlyphImageFormats(glyph.getGlyphID(), 0, UINT32_MAX, &f),
+ "Cannot get glyph image formats.");
+ return f & DWRITE_GLYPH_IMAGE_FORMATS_PNG;
+}
+
+bool SkScalerContext_DW::isSVGGlyph(const SkGlyph& glyph) {
+ if (!SkGraphics::GetOpenTypeSVGDecoderFactory() ||
+ !this->getDWriteTypeface()->fDWriteFontFace4)
+ {
+ return false;
+ }
+
+ DWRITE_GLYPH_IMAGE_FORMATS f;
+ IDWriteFontFace4* fontFace4 = this->getDWriteTypeface()->fDWriteFontFace4.get();
+ HRBM(fontFace4->GetGlyphImageFormats(glyph.getGlyphID(), 0, UINT32_MAX, &f),
+ "Cannot get glyph image formats.");
+ return f & DWRITE_GLYPH_IMAGE_FORMATS_SVG;
+}
+
+bool SkScalerContext_DW::getColorGlyphRun(const SkGlyph& glyph,
+ IDWriteColorGlyphRunEnumerator** colorGlyph)
+{
+ FLOAT advance = 0;
+ UINT16 glyphId = glyph.getGlyphID();
+
+ DWRITE_GLYPH_OFFSET offset;
+ offset.advanceOffset = 0.0f;
+ offset.ascenderOffset = 0.0f;
+
+ DWRITE_GLYPH_RUN run;
+ run.glyphCount = 1;
+ run.glyphAdvances = &advance;
+ run.fontFace = this->getDWriteTypeface()->fDWriteFontFace.get();
+ run.fontEmSize = SkScalarToFloat(fTextSizeRender);
+ run.bidiLevel = 0;
+ run.glyphIndices = &glyphId;
+ run.isSideways = FALSE;
+ run.glyphOffsets = &offset;
+
+ HRESULT hr = this->getDWriteTypeface()->fFactory2->TranslateColorGlyphRun(
+ 0, 0, &run, nullptr, fMeasuringMode, &fXform, 0, colorGlyph);
+ if (hr == DWRITE_E_NOCOLOR) {
+ return false;
+ }
+ HRBM(hr, "Failed to translate color glyph run");
+ return true;
+}
+
+void SkScalerContext_DW::SetGlyphBounds(SkGlyph* glyph, const SkRect& bounds) {
+ SkIRect ibounds = bounds.roundOut();
+
+ if (!SkTFitsIn<decltype(glyph->fWidth )>(ibounds.width ()) ||
+ !SkTFitsIn<decltype(glyph->fHeight)>(ibounds.height()) ||
+ !SkTFitsIn<decltype(glyph->fTop )>(ibounds.top ()) ||
+ !SkTFitsIn<decltype(glyph->fLeft )>(ibounds.left ()) )
+ {
+ ibounds = SkIRect::MakeEmpty();
+ }
+
+ glyph->fWidth = SkToU16(ibounds.width ());
+ glyph->fHeight = SkToU16(ibounds.height());
+ glyph->fTop = SkToS16(ibounds.top ());
+ glyph->fLeft = SkToS16(ibounds.left ());
+}
+
+bool SkScalerContext_DW::generateColorMetrics(SkGlyph* glyph) {
+ SkTScopedComPtr<IDWriteColorGlyphRunEnumerator> colorLayers;
+ if (!getColorGlyphRun(*glyph, &colorLayers)) {
+ return false;
+ }
+ SkASSERT(colorLayers.get());
+
+ SkRect bounds = SkRect::MakeEmpty();
+ BOOL hasNextRun = FALSE;
+ while (SUCCEEDED(colorLayers->MoveNext(&hasNextRun)) && hasNextRun) {
+ const DWRITE_COLOR_GLYPH_RUN* colorGlyph;
+ HRBM(colorLayers->GetCurrentRun(&colorGlyph), "Could not get current color glyph run");
+
+ SkPath path;
+ SkTScopedComPtr<IDWriteGeometrySink> geometryToPath;
+ HRBM(SkDWriteGeometrySink::Create(&path, &geometryToPath),
+ "Could not create geometry to path converter.");
+ {
+ Exclusive l(maybe_dw_mutex(*this->getDWriteTypeface()));
+ HRBM(colorGlyph->glyphRun.fontFace->GetGlyphRunOutline(
+ colorGlyph->glyphRun.fontEmSize,
+ colorGlyph->glyphRun.glyphIndices,
+ colorGlyph->glyphRun.glyphAdvances,
+ colorGlyph->glyphRun.glyphOffsets,
+ colorGlyph->glyphRun.glyphCount,
+ colorGlyph->glyphRun.isSideways,
+ colorGlyph->glyphRun.bidiLevel % 2, //rtl
+ geometryToPath.get()),
+ "Could not create glyph outline.");
+ }
+ bounds.join(path.getBounds());
+ }
+ SkMatrix matrix = fSkXform;
+ if (this->isSubpixel()) {
+ matrix.postTranslate(SkFixedToScalar(glyph->getSubXFixed()),
+ SkFixedToScalar(glyph->getSubYFixed()));
+ }
+ matrix.mapRect(&bounds);
+ SetGlyphBounds(glyph, bounds);
+ return true;
+}
+
+#ifdef USE_SVG
+bool SkScalerContext_DW::generateSVGMetrics(SkGlyph* glyph) {
+ SkPictureRecorder recorder;
+ SkRect infiniteRect = SkRect::MakeLTRB(-SK_ScalarInfinity, -SK_ScalarInfinity,
+ SK_ScalarInfinity, SK_ScalarInfinity);
+ sk_sp<SkBBoxHierarchy> bboxh = SkRTreeFactory()();
+ SkCanvas* recordingCanvas = recorder.beginRecording(infiniteRect, bboxh);
+ if (!this->drawSVGGlyphImage(*glyph, *recordingCanvas)) {
+ return false;
+ }
+ sk_sp<SkPicture> pic = recorder.finishRecordingAsPicture();
+ SkRect bounds = pic->cullRect();
+ SkASSERT(bounds.isFinite());
+
+ SetGlyphBounds(glyph, bounds);
+ return true;
+}
+#endif
+
+#ifdef USE_PNG
+namespace {
+struct Context {
+ SkTScopedComPtr<IDWriteFontFace4> fontFace4;
+ void* glyphDataContext;
+ Context(IDWriteFontFace4* face4, void* context)
+ : fontFace4(SkRefComPtr(face4))
+ , glyphDataContext(context)
+ {}
+};
+
+static void ReleaseProc(const void* ptr, void* context) {
+ Context* ctx = (Context*)context;
+ ctx->fontFace4->ReleaseGlyphImageData(ctx->glyphDataContext);
+ delete ctx;
+}
+}
+
+bool SkScalerContext_DW::generatePngMetrics(SkGlyph* glyph) {
+ SkASSERT(isPngGlyph(*glyph));
+ SkASSERT(this->getDWriteTypeface()->fDWriteFontFace4);
+
+ IDWriteFontFace4* fontFace4 = this->getDWriteTypeface()->fDWriteFontFace4.get();
+ DWRITE_GLYPH_IMAGE_DATA glyphData;
+ void* glyphDataContext;
+ HRBM(fontFace4->GetGlyphImageData(glyph->getGlyphID(),
+ fTextSizeRender,
+ DWRITE_GLYPH_IMAGE_FORMATS_PNG,
+ &glyphData,
+ &glyphDataContext),
+ "Glyph image data could not be acquired.");
+
+ Context* context = new Context(fontFace4, glyphDataContext);
+ sk_sp<SkData> data = SkData::MakeWithProc(glyphData.imageData,
+ glyphData.imageDataSize,
+ &ReleaseProc,
+ context);
+
+ std::unique_ptr<SkCodec> codec = SkCodec::MakeFromData(std::move(data));
+ if (!codec) {
+ return false;
+ }
+
+ SkImageInfo info = codec->getInfo();
+ SkRect bounds = SkRect::MakeLTRB(SkIntToScalar(info.bounds().fLeft),
+ SkIntToScalar(info.bounds().fTop),
+ SkIntToScalar(info.bounds().fRight),
+ SkIntToScalar(info.bounds().fBottom));
+
+ SkMatrix matrix = fSkXform;
+ SkScalar scale = fTextSizeRender / glyphData.pixelsPerEm;
+ matrix.preScale(scale, scale);
+ matrix.preTranslate(-glyphData.horizontalLeftOrigin.x, -glyphData.horizontalLeftOrigin.y);
+ if (this->isSubpixel()) {
+ matrix.postTranslate(SkFixedToScalar(glyph->getSubXFixed()),
+ SkFixedToScalar(glyph->getSubYFixed()));
+ }
+ matrix.mapRect(&bounds);
+ SetGlyphBounds(glyph, bounds);
+ return true;
+}
+#endif
+
+void SkScalerContext_DW::generateMetrics(SkGlyph* glyph, SkArenaAlloc* alloc) {
+
+ // GetAlphaTextureBounds succeeds but sometimes returns empty bounds like
+ // { 0x80000000, 0x80000000, 0x80000000, 0x80000000 }
+ // for small but not quite zero and large (but not really large) glyphs,
+ // Only set as non-empty if the returned bounds are non-empty.
+ auto glyphCheckAndSetBounds = [](SkGlyph* glyph, const RECT& bbox) {
+ if (bbox.left >= bbox.right || bbox.top >= bbox.bottom) {
+ return false;
+ }
+
+ // We're trying to pack left and top into int16_t,
+ // and width and height into uint16_t, after outsetting by 1.
+ if (!SkIRect::MakeXYWH(-32767, -32767, 65535, 65535).contains(
+ SkIRect::MakeLTRB(bbox.left, bbox.top, bbox.right, bbox.bottom))) {
+ return false;
+ }
+
+ glyph->fWidth = SkToU16(bbox.right - bbox.left);
+ glyph->fHeight = SkToU16(bbox.bottom - bbox.top);
+ glyph->fLeft = SkToS16(bbox.left);
+ glyph->fTop = SkToS16(bbox.top);
+ return true;
+ };
+
+ glyph->fWidth = 0;
+ glyph->fHeight = 0;
+ glyph->fLeft = 0;
+ glyph->fTop = 0;
+
+ if (!this->generateAdvance(glyph)) {
+ return;
+ }
+
+ DWriteFontTypeface* typeface = this->getDWriteTypeface();
+ if (typeface->fIsColorFont) {
+ if (isColorGlyph(*glyph) && generateColorMetrics(glyph)) {
+ glyph->fMaskFormat = SkMask::kARGB32_Format;
+ glyph->fScalerContextBits |= ScalerContextBits::COLR;
+ glyph->setPath(alloc, nullptr, false);
+ return;
+ }
+
+#ifdef USE_SVG
+ if (isSVGGlyph(*glyph) && generateSVGMetrics(glyph)) {
+ glyph->fMaskFormat = SkMask::kARGB32_Format;
+ glyph->fScalerContextBits |= ScalerContextBits::SVG;
+ glyph->setPath(alloc, nullptr, false);
+ return;
+ }
+#endif
+
+#ifdef USE_PNG
+ if (isPngGlyph(*glyph) && generatePngMetrics(glyph)) {
+ glyph->fMaskFormat = SkMask::kARGB32_Format;
+ glyph->fScalerContextBits |= ScalerContextBits::PNG;
+ glyph->setPath(alloc, nullptr, false);
+ return;
+ }
+#endif
+ }
+
+ RECT bbox;
+ HRVM(this->getBoundingBox(glyph, fRenderingMode, fTextureType, &bbox),
+ "Requested bounding box could not be determined.");
+
+ if (glyphCheckAndSetBounds(glyph, bbox)) {
+ return;
+ }
+
+ // GetAlphaTextureBounds succeeds but returns an empty RECT if there are no
+ // glyphs of the specified texture type or it is too big for smoothing.
+ // When this happens, try with the alternate texture type.
+ if (DWRITE_TEXTURE_ALIASED_1x1 != fTextureType ||
+ DWRITE_TEXT_ANTIALIAS_MODE_GRAYSCALE == fAntiAliasMode)
+ {
+ HRVM(this->getBoundingBox(glyph,
+ DWRITE_RENDERING_MODE_ALIASED,
+ DWRITE_TEXTURE_ALIASED_1x1,
+ &bbox),
+ "Fallback bounding box could not be determined.");
+ if (glyphCheckAndSetBounds(glyph, bbox)) {
+ glyph->fScalerContextBits |= ScalerContextBits::ForceBW;
+ glyph->fMaskFormat = SkMask::kBW_Format;
+ }
+ }
+ // TODO: handle the case where a request for DWRITE_TEXTURE_ALIASED_1x1
+ // fails, and try DWRITE_TEXTURE_CLEARTYPE_3x1.
+
+ // GetAlphaTextureBounds can fail for various reasons. As a fallback, attempt to generate the
+ // metrics from the path
+ SkDEBUGCODE(glyph->fAdvancesBoundsFormatAndInitialPathDone = true;)
+ this->getPath(*glyph, alloc);
+ const SkPath* devPath = glyph->path();
+ if (devPath) {
+ // Sometimes all the above fails. If so, try to create the glyph from path.
+ const SkMask::Format format = glyph->maskFormat();
+ const bool doVert = SkToBool(fRec.fFlags & SkScalerContext::kLCD_Vertical_Flag);
+ const bool a8LCD = SkToBool(fRec.fFlags & SkScalerContext::kGenA8FromLCD_Flag);
+ const bool hairline = glyph->pathIsHairline();
+ if (GenerateMetricsFromPath(glyph, *devPath, format, doVert, a8LCD, hairline)) {
+ glyph->fScalerContextBits |= ScalerContextBits::PATH;
+ }
+ }
+}
+
+void SkScalerContext_DW::generateFontMetrics(SkFontMetrics* metrics) {
+ if (nullptr == metrics) {
+ return;
+ }
+
+ sk_bzero(metrics, sizeof(*metrics));
+
+ DWRITE_FONT_METRICS dwfm;
+ if (DWRITE_MEASURING_MODE_GDI_CLASSIC == fMeasuringMode ||
+ DWRITE_MEASURING_MODE_GDI_NATURAL == fMeasuringMode)
+ {
+ this->getDWriteTypeface()->fDWriteFontFace->GetGdiCompatibleMetrics(
+ fTextSizeRender,
+ 1.0f, // pixelsPerDip
+ &fXform,
+ &dwfm);
+ } else {
+ this->getDWriteTypeface()->fDWriteFontFace->GetMetrics(&dwfm);
+ }
+
+ SkScalar upem = SkIntToScalar(dwfm.designUnitsPerEm);
+
+ metrics->fAscent = -fTextSizeRender * SkIntToScalar(dwfm.ascent) / upem;
+ metrics->fDescent = fTextSizeRender * SkIntToScalar(dwfm.descent) / upem;
+ metrics->fLeading = fTextSizeRender * SkIntToScalar(dwfm.lineGap) / upem;
+ metrics->fXHeight = fTextSizeRender * SkIntToScalar(dwfm.xHeight) / upem;
+ metrics->fCapHeight = fTextSizeRender * SkIntToScalar(dwfm.capHeight) / upem;
+ metrics->fUnderlineThickness = fTextSizeRender * SkIntToScalar(dwfm.underlineThickness) / upem;
+ metrics->fUnderlinePosition = -(fTextSizeRender * SkIntToScalar(dwfm.underlinePosition) / upem);
+ metrics->fStrikeoutThickness = fTextSizeRender * SkIntToScalar(dwfm.strikethroughThickness) / upem;
+ metrics->fStrikeoutPosition = -(fTextSizeRender * SkIntToScalar(dwfm.strikethroughPosition) / upem);
+
+ metrics->fFlags |= SkFontMetrics::kUnderlineThicknessIsValid_Flag;
+ metrics->fFlags |= SkFontMetrics::kUnderlinePositionIsValid_Flag;
+ metrics->fFlags |= SkFontMetrics::kStrikeoutThicknessIsValid_Flag;
+ metrics->fFlags |= SkFontMetrics::kStrikeoutPositionIsValid_Flag;
+
+ SkTScopedComPtr<IDWriteFontFace5> fontFace5;
+ if (SUCCEEDED(this->getDWriteTypeface()->fDWriteFontFace->QueryInterface(&fontFace5))) {
+ if (fontFace5->HasVariations()) {
+ // The bounds are only valid for the default variation.
+ metrics->fFlags |= SkFontMetrics::kBoundsInvalid_Flag;
+ }
+ }
+
+ if (this->getDWriteTypeface()->fDWriteFontFace1.get()) {
+ DWRITE_FONT_METRICS1 dwfm1;
+ this->getDWriteTypeface()->fDWriteFontFace1->GetMetrics(&dwfm1);
+ metrics->fTop = -fTextSizeRender * SkIntToScalar(dwfm1.glyphBoxTop) / upem;
+ metrics->fBottom = -fTextSizeRender * SkIntToScalar(dwfm1.glyphBoxBottom) / upem;
+ metrics->fXMin = fTextSizeRender * SkIntToScalar(dwfm1.glyphBoxLeft) / upem;
+ metrics->fXMax = fTextSizeRender * SkIntToScalar(dwfm1.glyphBoxRight) / upem;
+
+ metrics->fMaxCharWidth = metrics->fXMax - metrics->fXMin;
+ return;
+ }
+
+ AutoTDWriteTable<SkOTTableHead> head(this->getDWriteTypeface()->fDWriteFontFace.get());
+ if (head.fExists &&
+ head.fSize >= sizeof(SkOTTableHead) &&
+ head->version == SkOTTableHead::version1)
+ {
+ metrics->fTop = -fTextSizeRender * (int16_t)SkEndian_SwapBE16(head->yMax) / upem;
+ metrics->fBottom = -fTextSizeRender * (int16_t)SkEndian_SwapBE16(head->yMin) / upem;
+ metrics->fXMin = fTextSizeRender * (int16_t)SkEndian_SwapBE16(head->xMin) / upem;
+ metrics->fXMax = fTextSizeRender * (int16_t)SkEndian_SwapBE16(head->xMax) / upem;
+
+ metrics->fMaxCharWidth = metrics->fXMax - metrics->fXMin;
+ return;
+ }
+
+ // The real bounds weren't actually available.
+ metrics->fFlags |= SkFontMetrics::kBoundsInvalid_Flag;
+ metrics->fTop = metrics->fAscent;
+ metrics->fBottom = metrics->fDescent;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "include/private/SkColorData.h"
+
+void SkScalerContext_DW::BilevelToBW(const uint8_t* SK_RESTRICT src, const SkGlyph& glyph) {
+ const int width = glyph.width();
+ const size_t dstRB = (width + 7) >> 3;
+ uint8_t* SK_RESTRICT dst = static_cast<uint8_t*>(glyph.fImage);
+
+ int byteCount = width >> 3;
+ int bitCount = width & 7;
+
+ for (int y = 0; y < glyph.height(); ++y) {
+ if (byteCount > 0) {
+ for (int i = 0; i < byteCount; ++i) {
+ unsigned byte = 0;
+ byte |= src[0] & (1 << 7);
+ byte |= src[1] & (1 << 6);
+ byte |= src[2] & (1 << 5);
+ byte |= src[3] & (1 << 4);
+ byte |= src[4] & (1 << 3);
+ byte |= src[5] & (1 << 2);
+ byte |= src[6] & (1 << 1);
+ byte |= src[7] & (1 << 0);
+ dst[i] = byte;
+ src += 8;
+ }
+ }
+ if (bitCount > 0) {
+ unsigned byte = 0;
+ unsigned mask = 0x80;
+ for (int i = 0; i < bitCount; i++) {
+ byte |= (src[i]) & mask;
+ mask >>= 1;
+ }
+ dst[byteCount] = byte;
+ }
+ src += bitCount;
+ dst += dstRB;
+ }
+
+ if constexpr (kSkShowTextBlitCoverage) {
+ dst = static_cast<uint8_t*>(glyph.fImage);
+ for (unsigned y = 0; y < (unsigned)glyph.height(); y += 2) {
+ for (unsigned x = (y & 0x2); x < (unsigned)glyph.width(); x+=4) {
+ uint8_t& b = dst[(dstRB * y) + (x >> 3)];
+ b = b ^ (1 << (0x7 - (x & 0x7)));
+ }
+ }
+ }
+}
+
+template<bool APPLY_PREBLEND>
+void SkScalerContext_DW::GrayscaleToA8(const uint8_t* SK_RESTRICT src,
+ const SkGlyph& glyph,
+ const uint8_t* table8) {
+ const size_t dstRB = glyph.rowBytes();
+ const int width = glyph.width();
+ uint8_t* SK_RESTRICT dst = static_cast<uint8_t*>(glyph.fImage);
+
+ for (int y = 0; y < glyph.height(); y++) {
+ for (int i = 0; i < width; i++) {
+ U8CPU a = *(src++);
+ dst[i] = sk_apply_lut_if<APPLY_PREBLEND>(a, table8);
+ if constexpr (kSkShowTextBlitCoverage) {
+ dst[i] = std::max<U8CPU>(0x30, dst[i]);
+ }
+ }
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ }
+}
+
+template<bool APPLY_PREBLEND>
+void SkScalerContext_DW::RGBToA8(const uint8_t* SK_RESTRICT src,
+ const SkGlyph& glyph,
+ const uint8_t* table8) {
+ const size_t dstRB = glyph.rowBytes();
+ const int width = glyph.width();
+ uint8_t* SK_RESTRICT dst = static_cast<uint8_t*>(glyph.fImage);
+
+ for (int y = 0; y < glyph.height(); y++) {
+ for (int i = 0; i < width; i++) {
+ // Ignore the R, B channels. It looks the closest to what
+ // D2D with grayscale AA has. But there's no way
+ // to just get a grayscale AA alpha texture from a glyph run.
+ U8CPU g = src[1];
+ src += 3;
+
+ dst[i] = sk_apply_lut_if<APPLY_PREBLEND>(g, table8);
+ if constexpr (kSkShowTextBlitCoverage) {
+ dst[i] = std::max<U8CPU>(0x30, dst[i]);
+ }
+ }
+ dst = SkTAddOffset<uint8_t>(dst, dstRB);
+ }
+}
+
+template<bool APPLY_PREBLEND, bool RGB>
+void SkScalerContext_DW::RGBToLcd16(const uint8_t* SK_RESTRICT src, const SkGlyph& glyph,
+ const uint8_t* tableR, const uint8_t* tableG,
+ const uint8_t* tableB, int clearTypeLevel) {
+ const size_t dstRB = glyph.rowBytes();
+ const int width = glyph.width();
+ uint16_t* SK_RESTRICT dst = static_cast<uint16_t*>(glyph.fImage);
+
+ for (int y = 0; y < glyph.height(); y++) {
+ for (int i = 0; i < width; i++) {
+ int r, g, b;
+ if (RGB) {
+ r = sk_apply_lut_if<APPLY_PREBLEND>(*(src++), tableR);
+ g = sk_apply_lut_if<APPLY_PREBLEND>(*(src++), tableG);
+ b = sk_apply_lut_if<APPLY_PREBLEND>(*(src++), tableB);
+ } else {
+ b = sk_apply_lut_if<APPLY_PREBLEND>(*(src++), tableB);
+ g = sk_apply_lut_if<APPLY_PREBLEND>(*(src++), tableG);
+ r = sk_apply_lut_if<APPLY_PREBLEND>(*(src++), tableR);
+ }
+ if constexpr (kSkShowTextBlitCoverage) {
+ r = std::max<U8CPU>(0x30, r);
+ g = std::max<U8CPU>(0x30, g);
+ b = std::max<U8CPU>(0x30, b);
+ }
+ r = g + (((r - g) * clearTypeLevel) >> 8);
+ b = g + (((b - g) * clearTypeLevel) >> 8);
+ dst[i] = SkPack888ToRGB16(r, g, b);
+ }
+ dst = SkTAddOffset<uint16_t>(dst, dstRB);
+ }
+}
+
+const void* SkScalerContext_DW::drawDWMask(const SkGlyph& glyph,
+ DWRITE_RENDERING_MODE renderingMode,
+ DWRITE_TEXTURE_TYPE textureType)
+{
+ DWriteFontTypeface* typeface = this->getDWriteTypeface();
+
+ int sizeNeeded = glyph.width() * glyph.height();
+ if (DWRITE_TEXTURE_CLEARTYPE_3x1 == textureType) {
+ sizeNeeded *= 3;
+ }
+ if (sizeNeeded > fBits.size()) {
+ fBits.resize(sizeNeeded);
+ }
+
+ // erase
+ memset(fBits.begin(), 0, sizeNeeded);
+
+ fXform.dx = SkFixedToFloat(glyph.getSubXFixed());
+ fXform.dy = SkFixedToFloat(glyph.getSubYFixed());
+
+ FLOAT advance = 0.0f;
+
+ UINT16 index = glyph.getGlyphID();
+
+ DWRITE_GLYPH_OFFSET offset;
+ offset.advanceOffset = 0.0f;
+ offset.ascenderOffset = 0.0f;
+
+ DWRITE_GLYPH_RUN run;
+ run.glyphCount = 1;
+ run.glyphAdvances = &advance;
+ run.fontFace = this->getDWriteTypeface()->fDWriteFontFace.get();
+ run.fontEmSize = SkScalarToFloat(fTextSizeRender);
+ run.bidiLevel = 0;
+ run.glyphIndices = &index;
+ run.isSideways = FALSE;
+ run.glyphOffsets = &offset;
+ {
+ SkTScopedComPtr<IDWriteGlyphRunAnalysis> glyphRunAnalysis;
+ {
+ Exclusive l(maybe_dw_mutex(*typeface));
+ // IDWriteFactory2::CreateGlyphRunAnalysis is very bad at aliased glyphs.
+ if (this->getDWriteTypeface()->fFactory2 &&
+ (fGridFitMode == DWRITE_GRID_FIT_MODE_DISABLED ||
+ fAntiAliasMode == DWRITE_TEXT_ANTIALIAS_MODE_GRAYSCALE))
+ {
+ HRNM(this->getDWriteTypeface()->fFactory2->CreateGlyphRunAnalysis(&run,
+ &fXform,
+ renderingMode,
+ fMeasuringMode,
+ fGridFitMode,
+ fAntiAliasMode,
+ 0.0f, // baselineOriginX,
+ 0.0f, // baselineOriginY,
+ &glyphRunAnalysis),
+ "Could not create DW2 glyph run analysis.");
+ } else {
+ HRNM(this->getDWriteTypeface()->fFactory->CreateGlyphRunAnalysis(&run,
+ 1.0f, // pixelsPerDip,
+ &fXform,
+ renderingMode,
+ fMeasuringMode,
+ 0.0f, // baselineOriginX,
+ 0.0f, // baselineOriginY,
+ &glyphRunAnalysis),
+ "Could not create glyph run analysis.");
+ }
+ }
+ //NOTE: this assumes that the glyph has already been measured
+ //with an exact same glyph run analysis.
+ RECT bbox;
+ bbox.left = glyph.left();
+ bbox.top = glyph.top();
+ bbox.right = glyph.left() + glyph.width();
+ bbox.bottom = glyph.top() + glyph.height();
+ {
+ Shared l(maybe_dw_mutex(*typeface));
+ HRNM(glyphRunAnalysis->CreateAlphaTexture(textureType,
+ &bbox,
+ fBits.begin(),
+ sizeNeeded),
+ "Could not draw mask.");
+ }
+ }
+ return fBits.begin();
+}
+
+bool SkScalerContext_DW::drawColorGlyphImage(const SkGlyph& glyph, SkCanvas& canvas) {
+ SkTScopedComPtr<IDWriteColorGlyphRunEnumerator> colorLayers;
+ if (!getColorGlyphRun(glyph, &colorLayers)) {
+ SkASSERTF(false, "Could not get color layers");
+ return false;
+ }
+
+ SkPaint paint;
+ paint.setAntiAlias(fRenderingMode != DWRITE_RENDERING_MODE_ALIASED);
+
+ if (this->isSubpixel()) {
+ canvas.translate(SkFixedToScalar(glyph.getSubXFixed()),
+ SkFixedToScalar(glyph.getSubYFixed()));
+ }
+ canvas.concat(fSkXform);
+
+ DWriteFontTypeface* typeface = this->getDWriteTypeface();
+ size_t paletteEntryCount = typeface->fPaletteEntryCount;
+ SkColor* palette = typeface->fPalette.get();
+ BOOL hasNextRun = FALSE;
+ while (SUCCEEDED(colorLayers->MoveNext(&hasNextRun)) && hasNextRun) {
+ const DWRITE_COLOR_GLYPH_RUN* colorGlyph;
+ HRBM(colorLayers->GetCurrentRun(&colorGlyph), "Could not get current color glyph run");
+
+ SkColor color;
+ if (colorGlyph->paletteIndex == 0xffff) {
+ color = fRec.fForegroundColor;
+ } else if (colorGlyph->paletteIndex < paletteEntryCount) {
+ color = palette[colorGlyph->paletteIndex];
+ } else {
+ SK_TRACEHR(DWRITE_E_NOCOLOR, "Invalid palette index.");
+ color = SK_ColorBLACK;
+ }
+ paint.setColor(color);
+
+ SkPath path;
+ SkTScopedComPtr<IDWriteGeometrySink> geometryToPath;
+ HRBM(SkDWriteGeometrySink::Create(&path, &geometryToPath),
+ "Could not create geometry to path converter.");
+ {
+ Exclusive l(maybe_dw_mutex(*this->getDWriteTypeface()));
+ HRBM(colorGlyph->glyphRun.fontFace->GetGlyphRunOutline(
+ colorGlyph->glyphRun.fontEmSize,
+ colorGlyph->glyphRun.glyphIndices,
+ colorGlyph->glyphRun.glyphAdvances,
+ colorGlyph->glyphRun.glyphOffsets,
+ colorGlyph->glyphRun.glyphCount,
+ colorGlyph->glyphRun.isSideways,
+ colorGlyph->glyphRun.bidiLevel % 2, //rtl
+ geometryToPath.get()),
+ "Could not create glyph outline.");
+ }
+ canvas.drawPath(path, paint);
+ }
+ return true;
+}
+
+bool SkScalerContext_DW::generateColorGlyphImage(const SkGlyph& glyph) {
+ SkASSERT(isColorGlyph(glyph));
+ SkASSERT(glyph.fMaskFormat == SkMask::Format::kARGB32_Format);
+
+ SkBitmap dstBitmap;
+ // TODO: mark this as sRGB when the blits will be sRGB.
+ dstBitmap.setInfo(SkImageInfo::Make(glyph.fWidth, glyph.fHeight,
+ kN32_SkColorType, kPremul_SkAlphaType),
+ glyph.rowBytes());
+ dstBitmap.setPixels(glyph.fImage);
+
+ SkCanvas canvas(dstBitmap);
+ if constexpr (kSkShowTextBlitCoverage) {
+ canvas.clear(0x33FF0000);
+ } else {
+ canvas.clear(SK_ColorTRANSPARENT);
+ }
+ canvas.translate(-SkIntToScalar(glyph.fLeft), -SkIntToScalar(glyph.fTop));
+
+ return this->drawColorGlyphImage(glyph, canvas);
+}
+
+bool SkScalerContext_DW::drawSVGGlyphImage(const SkGlyph& glyph, SkCanvas& canvas) {
+ SkASSERT(isSVGGlyph(glyph));
+ SkASSERT(this->getDWriteTypeface()->fDWriteFontFace4);
+
+ SkGraphics::OpenTypeSVGDecoderFactory svgFactory = SkGraphics::GetOpenTypeSVGDecoderFactory();
+ if (!svgFactory) {
+ return false;
+ }
+
+ DWriteFontTypeface* typeface = this->getDWriteTypeface();
+ IDWriteFontFace4* fontFace4 = typeface->fDWriteFontFace4.get();
+ DWRITE_GLYPH_IMAGE_DATA glyphData;
+ void* glyphDataContext;
+ HRBM(fontFace4->GetGlyphImageData(glyph.getGlyphID(),
+ fTextSizeRender,
+ DWRITE_GLYPH_IMAGE_FORMATS_SVG,
+ &glyphData,
+ &glyphDataContext),
+ "Glyph SVG data could not be acquired.");
+ auto svgDecoder = svgFactory((const uint8_t*)glyphData.imageData, glyphData.imageDataSize);
+ fontFace4->ReleaseGlyphImageData(glyphDataContext);
+ if (!svgDecoder) {
+ return false;
+ }
+
+ size_t paletteEntryCount = typeface->fPaletteEntryCount;
+ SkColor* palette = typeface->fPalette.get();
+ int upem = typeface->getUnitsPerEm();
+
+ SkMatrix matrix = fSkXform;
+ SkScalar scale = fTextSizeRender / upem;
+ matrix.preScale(scale, scale);
+ matrix.preTranslate(-glyphData.horizontalLeftOrigin.x, -glyphData.horizontalLeftOrigin.y);
+ if (this->isSubpixel()) {
+ matrix.postTranslate(SkFixedToScalar(glyph.getSubXFixed()),
+ SkFixedToScalar(glyph.getSubYFixed()));
+ }
+ canvas.concat(matrix);
+
+ return svgDecoder->render(canvas, upem, glyph.getGlyphID(),
+ fRec.fForegroundColor, SkSpan(palette, paletteEntryCount));
+}
+
+bool SkScalerContext_DW::generateSVGGlyphImage(const SkGlyph& glyph) {
+ SkASSERT(isSVGGlyph(glyph));
+ SkASSERT(glyph.fMaskFormat == SkMask::Format::kARGB32_Format);
+
+ SkBitmap dstBitmap;
+ // TODO: mark this as sRGB when the blits will be sRGB.
+ dstBitmap.setInfo(SkImageInfo::Make(glyph.fWidth, glyph.fHeight,
+ kN32_SkColorType, kPremul_SkAlphaType),
+ glyph.rowBytes());
+ dstBitmap.setPixels(glyph.fImage);
+
+ SkCanvas canvas(dstBitmap);
+ if constexpr (kSkShowTextBlitCoverage) {
+ canvas.clear(0x33FF0000);
+ } else {
+ canvas.clear(SK_ColorTRANSPARENT);
+ }
+ canvas.translate(-SkIntToScalar(glyph.fLeft), -SkIntToScalar(glyph.fTop));
+
+ return this->drawSVGGlyphImage(glyph, canvas);
+}
+
+#ifdef USE_PNG
+bool SkScalerContext_DW::drawPngGlyphImage(const SkGlyph& glyph, SkCanvas& canvas) {
+ IDWriteFontFace4* fontFace4 = this->getDWriteTypeface()->fDWriteFontFace4.get();
+ DWRITE_GLYPH_IMAGE_DATA glyphData;
+ void* glyphDataContext;
+ HRBM(fontFace4->GetGlyphImageData(glyph.getGlyphID(),
+ fTextSizeRender,
+ DWRITE_GLYPH_IMAGE_FORMATS_PNG,
+ &glyphData,
+ &glyphDataContext),
+ "Glyph image data could not be acquired.");
+ Context* context = new Context(fontFace4, glyphDataContext);
+ sk_sp<SkData> data = SkData::MakeWithProc(glyphData.imageData,
+ glyphData.imageDataSize,
+ &ReleaseProc,
+ context);
+ sk_sp<SkImage> image = SkImage::MakeFromEncoded(std::move(data));
+ if (!image) {
+ return false;
+ }
+
+ if (this->isSubpixel()) {
+ canvas.translate(SkFixedToScalar(glyph.getSubXFixed()),
+ SkFixedToScalar(glyph.getSubYFixed()));
+ }
+ canvas.concat(fSkXform);
+ SkScalar ratio = fTextSizeRender / glyphData.pixelsPerEm;
+ canvas.scale(ratio, ratio);
+ canvas.translate(-glyphData.horizontalLeftOrigin.x, -glyphData.horizontalLeftOrigin.y);
+ canvas.drawImage(image, 0, 0);
+ return true;
+}
+
+bool SkScalerContext_DW::generatePngGlyphImage(const SkGlyph& glyph) {
+ SkASSERT(isPngGlyph(glyph));
+ SkASSERT(glyph.fMaskFormat == SkMask::Format::kARGB32_Format);
+ SkASSERT(this->getDWriteTypeface()->fDWriteFontFace4);
+
+ SkBitmap dstBitmap;
+ dstBitmap.setInfo(SkImageInfo::Make(glyph.width(), glyph.height(),
+ kN32_SkColorType, kPremul_SkAlphaType),
+ glyph.rowBytes());
+ dstBitmap.setPixels(glyph.fImage);
+
+ SkCanvas canvas(dstBitmap);
+ canvas.clear(SK_ColorTRANSPARENT);
+ canvas.translate(-glyph.left(), -glyph.top());
+
+ return this->drawPngGlyphImage(glyph, canvas);
+}
+#endif
+
+void SkScalerContext_DW::generateImage(const SkGlyph& glyph) {
+ ScalerContextBits::value_type format = glyph.fScalerContextBits & ScalerContextBits::FormatMask;
+ if (format == ScalerContextBits::COLR) {
+ this->generateColorGlyphImage(glyph);
+ return;
+ }
+#ifdef USE_SVG
+ if (format == ScalerContextBits::SVG) {
+ this->generateSVGGlyphImage(glyph);
+ return;
+ }
+#endif
+#ifdef USE_PNG
+ if (format == ScalerContextBits::PNG) {
+ this->generatePngGlyphImage(glyph);
+ return;
+ }
+#endif
+ if (format == ScalerContextBits::PATH) {
+ const SkPath* devPath = glyph.path();
+ SkASSERT_RELEASE(devPath);
+ SkMask mask = glyph.mask();
+ SkASSERT(SkMask::kARGB32_Format != mask.fFormat);
+ const bool doBGR = SkToBool(fRec.fFlags & SkScalerContext::kLCD_BGROrder_Flag);
+ const bool doVert = SkToBool(fRec.fFlags & SkScalerContext::kLCD_Vertical_Flag);
+ const bool a8LCD = SkToBool(fRec.fFlags & SkScalerContext::kGenA8FromLCD_Flag);
+ const bool hairline = glyph.pathIsHairline();
+ GenerateImageFromPath(mask, *devPath, fPreBlend, doBGR, doVert, a8LCD, hairline);
+ return;
+ }
+
+ //Create the mask.
+ DWRITE_RENDERING_MODE renderingMode = fRenderingMode;
+ DWRITE_TEXTURE_TYPE textureType = fTextureType;
+ if (glyph.fScalerContextBits & ScalerContextBits::ForceBW) {
+ renderingMode = DWRITE_RENDERING_MODE_ALIASED;
+ textureType = DWRITE_TEXTURE_ALIASED_1x1;
+ }
+ const void* bits = this->drawDWMask(glyph, renderingMode, textureType);
+ if (!bits) {
+ sk_bzero(glyph.fImage, glyph.imageSize());
+ return;
+ }
+
+ //Copy the mask into the glyph.
+ const uint8_t* src = (const uint8_t*)bits;
+ if (DWRITE_RENDERING_MODE_ALIASED == renderingMode) {
+ SkASSERT(SkMask::kBW_Format == glyph.fMaskFormat);
+ SkASSERT(DWRITE_TEXTURE_ALIASED_1x1 == textureType);
+ BilevelToBW(src, glyph);
+ } else if (!isLCD(fRec)) {
+ if (textureType == DWRITE_TEXTURE_ALIASED_1x1) {
+ if (fPreBlend.isApplicable()) {
+ GrayscaleToA8<true>(src, glyph, fPreBlend.fG);
+ } else {
+ GrayscaleToA8<false>(src, glyph, fPreBlend.fG);
+ }
+ } else {
+ if (fPreBlend.isApplicable()) {
+ RGBToA8<true>(src, glyph, fPreBlend.fG);
+ } else {
+ RGBToA8<false>(src, glyph, fPreBlend.fG);
+ }
+ }
+ } else {
+ SkASSERT(SkMask::kLCD16_Format == glyph.fMaskFormat);
+ if (fPreBlend.isApplicable()) {
+ if (fRec.fFlags & SkScalerContext::kLCD_BGROrder_Flag) {
+ RGBToLcd16<true, false>(src, glyph, fPreBlend.fR, fPreBlend.fG, fPreBlend.fB, fClearTypeLevel);
+ } else {
+ RGBToLcd16<true, true>(src, glyph, fPreBlend.fR, fPreBlend.fG, fPreBlend.fB, fClearTypeLevel);
+ }
+ } else {
+ if (fRec.fFlags & SkScalerContext::kLCD_BGROrder_Flag) {
+ RGBToLcd16<false, false>(src, glyph, fPreBlend.fR, fPreBlend.fG, fPreBlend.fB, fClearTypeLevel);
+ } else {
+ RGBToLcd16<false, true>(src, glyph, fPreBlend.fR, fPreBlend.fG, fPreBlend.fB, fClearTypeLevel);
+ }
+ }
+ }
+}
+
+bool SkScalerContext_DW::generatePath(const SkGlyph& glyph, SkPath* path) {
+ SkASSERT(path);
+ path->reset();
+
+ SkGlyphID glyphID = glyph.getGlyphID();
+
+ // DirectWrite treats all out of bounds glyph ids as having the same data as glyph 0.
+ // For consistency with all other backends, treat out of range glyph ids as an error.
+ if (fGlyphCount <= glyphID) {
+ return false;
+ }
+
+ SkTScopedComPtr<IDWriteGeometrySink> geometryToPath;
+ HRBM(SkDWriteGeometrySink::Create(path, &geometryToPath),
+ "Could not create geometry to path converter.");
+ UINT16 glyphId = SkTo<UINT16>(glyphID);
+ {
+ Exclusive l(maybe_dw_mutex(*this->getDWriteTypeface()));
+ //TODO: convert to<->from DIUs? This would make a difference if hinting.
+ //It may not be needed, it appears that DirectWrite only hints at em size.
+ HRBM(this->getDWriteTypeface()->fDWriteFontFace->GetGlyphRunOutline(
+ SkScalarToFloat(fTextSizeRender),
+ &glyphId,
+ nullptr, //advances
+ nullptr, //offsets
+ 1, //num glyphs
+ FALSE, //sideways
+ FALSE, //rtl
+ geometryToPath.get()),
+ "Could not create glyph outline.");
+ }
+
+ path->transform(fSkXform);
+ return true;
+}
+
+sk_sp<SkDrawable> SkScalerContext_DW::generateDrawable(const SkGlyph& glyph) {
+ struct GlyphDrawable : public SkDrawable {
+ SkScalerContext_DW* fSelf;
+ SkGlyph fGlyph;
+ GlyphDrawable(SkScalerContext_DW* self, const SkGlyph& glyph) : fSelf(self), fGlyph(glyph){}
+ SkRect onGetBounds() override { return fGlyph.rect(); }
+ size_t onApproximateBytesUsed() override { return sizeof(GlyphDrawable); }
+ void maybeShowTextBlitCoverage(SkCanvas* canvas) {
+ if constexpr (kSkShowTextBlitCoverage) {
+ SkPaint paint;
+ paint.setColor(0x3300FF00);
+ paint.setStyle(SkPaint::kFill_Style);
+ canvas->drawRect(this->onGetBounds(), paint);
+ }
+ }
+ };
+ struct COLRGlyphDrawable : public GlyphDrawable {
+ using GlyphDrawable::GlyphDrawable;
+ void onDraw(SkCanvas* canvas) override {
+ this->maybeShowTextBlitCoverage(canvas);
+ fSelf->drawColorGlyphImage(fGlyph, *canvas);
+ }
+ };
+ struct SVGGlyphDrawable : public GlyphDrawable {
+ using GlyphDrawable::GlyphDrawable;
+ void onDraw(SkCanvas* canvas) override {
+ this->maybeShowTextBlitCoverage(canvas);
+ fSelf->drawSVGGlyphImage(fGlyph, *canvas);
+ }
+ };
+ ScalerContextBits::value_type format = glyph.fScalerContextBits & ScalerContextBits::FormatMask;
+ if (format == ScalerContextBits::COLR) {
+ return sk_sp<SkDrawable>(new COLRGlyphDrawable(this, glyph));
+ }
+ if (format == ScalerContextBits::SVG) {
+ return sk_sp<SkDrawable>(new SVGGlyphDrawable(this, glyph));
+ }
+ return nullptr;
+}
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkScalerContext_win_dw.h b/gfx/skia/skia/src/ports/SkScalerContext_win_dw.h
new file mode 100644
index 0000000000..7cc59969bb
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkScalerContext_win_dw.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkScalarContext_win_dw_DEFINED
+#define SkScalarContext_win_dw_DEFINED
+
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTDArray.h"
+#include "src/core/SkScalerContext.h"
+#include "src/ports/SkTypeface_win_dw.h"
+
+#include <dwrite.h>
+#include <dwrite_2.h>
+
+class SkGlyph;
+class SkDescriptor;
+
+class SkScalerContext_DW : public SkScalerContext {
+public:
+ SkScalerContext_DW(sk_sp<DWriteFontTypeface>,
+ const SkScalerContextEffects&,
+ const SkDescriptor*);
+ ~SkScalerContext_DW() override;
+
+protected:
+ bool generateAdvance(SkGlyph* glyph) override;
+ void generateMetrics(SkGlyph* glyph, SkArenaAlloc*) override;
+ void generateImage(const SkGlyph& glyph) override;
+ bool generatePath(const SkGlyph&, SkPath*) override;
+ sk_sp<SkDrawable> generateDrawable(const SkGlyph&) override;
+ void generateFontMetrics(SkFontMetrics*) override;
+
+private:
+ struct ScalerContextBits {
+ using value_type = decltype(SkGlyph::fScalerContextBits);
+ static const constexpr value_type ForceBW = 1 << 0;
+
+ static const constexpr value_type DW = 0 << 1;
+ static const constexpr value_type PNG = 1 << 1;
+ static const constexpr value_type SVG = 2 << 1;
+ static const constexpr value_type COLR = 3 << 1;
+ static const constexpr value_type PATH = 4 << 1;
+ static const constexpr value_type FormatMask = 0x7 << 1;
+ };
+
+ static void BilevelToBW(const uint8_t* SK_RESTRICT src, const SkGlyph& glyph);
+
+ template<bool APPLY_PREBLEND>
+ static void GrayscaleToA8(const uint8_t* SK_RESTRICT src,
+ const SkGlyph& glyph,
+ const uint8_t* table8);
+
+ template<bool APPLY_PREBLEND>
+ static void RGBToA8(const uint8_t* SK_RESTRICT src,
+ const SkGlyph& glyph,
+ const uint8_t* table8);
+
+ template<bool APPLY_PREBLEND, bool RGB>
+ static void RGBToLcd16(const uint8_t* SK_RESTRICT src, const SkGlyph& glyph,
+ const uint8_t* tableR, const uint8_t* tableG, const uint8_t* tableB,
+ int clearTypeLevel);
+
+ const void* drawDWMask(const SkGlyph& glyph,
+ DWRITE_RENDERING_MODE renderingMode,
+ DWRITE_TEXTURE_TYPE textureType);
+
+ HRESULT getBoundingBox(SkGlyph* glyph,
+ DWRITE_RENDERING_MODE renderingMode,
+ DWRITE_TEXTURE_TYPE textureType,
+ RECT* bbox);
+
+ DWriteFontTypeface* getDWriteTypeface() {
+ return static_cast<DWriteFontTypeface*>(this->getTypeface());
+ }
+
+ bool isColorGlyph(const SkGlyph&);
+ bool getColorGlyphRun(const SkGlyph&, IDWriteColorGlyphRunEnumerator**);
+ bool generateColorMetrics(SkGlyph*);
+ bool generateColorGlyphImage(const SkGlyph&);
+ bool drawColorGlyphImage(const SkGlyph&, SkCanvas&);
+
+ bool isSVGGlyph(const SkGlyph&);
+ bool generateSVGMetrics(SkGlyph*);
+ bool generateSVGGlyphImage(const SkGlyph&);
+ bool drawSVGGlyphImage(const SkGlyph&, SkCanvas&);
+
+ bool isPngGlyph(const SkGlyph&);
+ bool generatePngMetrics(SkGlyph*);
+ bool generatePngGlyphImage(const SkGlyph&);
+ bool drawPngGlyphImage(const SkGlyph&, SkCanvas&);
+
+ static void SetGlyphBounds(SkGlyph* glyph, const SkRect& bounds);
+
+ SkTDArray<uint8_t> fBits;
+ /** The total matrix without the text height scale. */
+ SkMatrix fSkXform;
+ /** The total matrix without the text height scale. */
+ DWRITE_MATRIX fXform;
+ /** The text size to render with. */
+ SkScalar fTextSizeRender;
+ /** The text size to measure with. */
+ SkScalar fTextSizeMeasure;
+ int fGlyphCount;
+ DWRITE_RENDERING_MODE fRenderingMode;
+ DWRITE_TEXTURE_TYPE fTextureType;
+ DWRITE_MEASURING_MODE fMeasuringMode;
+ DWRITE_TEXT_ANTIALIAS_MODE fAntiAliasMode;
+ DWRITE_GRID_FIT_MODE fGridFitMode;
+ int fClearTypeLevel;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/ports/SkTypeface_mac_ct.cpp b/gfx/skia/skia/src/ports/SkTypeface_mac_ct.cpp
new file mode 100644
index 0000000000..da45a3b547
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkTypeface_mac_ct.cpp
@@ -0,0 +1,1541 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#ifdef SK_BUILD_FOR_MAC
+#import <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreText/CoreText.h>
+#include <CoreText/CTFontManager.h>
+#include <CoreGraphics/CoreGraphics.h>
+#include <CoreFoundation/CoreFoundation.h>
+#endif
+
+#include "include/core/SkColor.h"
+#include "include/core/SkData.h"
+#include "include/core/SkFontArguments.h"
+#include "include/core/SkFontParameters.h"
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkFontTypes.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypeface.h"
+#include "include/ports/SkTypeface_mac.h"
+#include "include/private/base/SkFixed.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkMutex.h"
+#include "include/private/base/SkOnce.h"
+#include "include/private/base/SkTDArray.h"
+#include "include/private/base/SkTPin.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkEndian.h"
+#include "src/base/SkUTF.h"
+#include "src/core/SkAdvancedTypefaceMetrics.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkScalerContext.h"
+#include "src/core/SkTypefaceCache.h"
+#include "src/ports/SkScalerContext_mac_ct.h"
+#include "src/ports/SkTypeface_mac_ct.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkOTTable_OS_2.h"
+#include "src/sfnt/SkOTTable_OS_2_V4.h"
+#include "src/sfnt/SkOTUtils.h"
+#include "src/sfnt/SkSFNTHeader.h"
+#include "src/utils/mac/SkCGBase.h"
+#include "src/utils/mac/SkCGGeometry.h"
+#include "src/utils/mac/SkCTFont.h"
+#include "src/utils/mac/SkUniqueCFRef.h"
+
+#include <dlfcn.h>
+#include <limits.h>
+#include <string.h>
+#include <memory>
+
+using namespace skia_private;
+
+/** Assumes src and dst are not nullptr. */
+void SkStringFromCFString(CFStringRef src, SkString* dst) {
+ // Reserve enough room for the worst-case string,
+ // plus 1 byte for the trailing null.
+ CFIndex length = CFStringGetMaximumSizeForEncoding(CFStringGetLength(src),
+ kCFStringEncodingUTF8) + 1;
+ dst->resize(length);
+ CFStringGetCString(src, dst->data(), length, kCFStringEncodingUTF8);
+ // Resize to the actual UTF-8 length used, stripping the null character.
+ dst->resize(strlen(dst->c_str()));
+}
+
+SkString SkCFTypeIDDescription(CFTypeID id) {
+ SkUniqueCFRef<CFStringRef> typeDescription(CFCopyTypeIDDescription(id));
+ SkString skTypeDescription;
+ SkStringFromCFString(typeDescription.get(), &skTypeDescription);
+ return skTypeDescription;
+}
+
+template<typename CF> CFTypeID SkCFGetTypeID();
+#define SK_GETCFTYPEID(cf) \
+template<> CFTypeID SkCFGetTypeID<cf##Ref>() { return cf##GetTypeID(); }
+SK_GETCFTYPEID(CFBoolean);
+SK_GETCFTYPEID(CFDictionary);
+SK_GETCFTYPEID(CFNumber);
+
+/* Checked dynamic downcast of CFTypeRef.
+ *
+ * @param cf the ref to downcast.
+ * @param cfAsCF if cf can be cast to the type CF, receives the downcast ref.
+ * @param name if non-nullptr the cast is expected to succeed and failures will be logged.
+ * @return true if the cast succeeds, false otherwise.
+ */
+template <typename CF>
+static bool SkCFDynamicCast(CFTypeRef cf, CF* cfAsCF, char const* name) {
+ //SkDEBUGF("SkCFDynamicCast '%s' of type %s to type %s\n", name ? name : "<annon>",
+ // SkCFTypeIDDescription( CFGetTypeID(cf) ).c_str()
+ // SkCFTypeIDDescription(SkCFGetTypeID<CF>()).c_str());
+ if (!cf) {
+ if (name) {
+ SkDEBUGF("%s not present\n", name);
+ }
+ return false;
+ }
+ if (CFGetTypeID(cf) != SkCFGetTypeID<CF>()) {
+ if (name) {
+ SkDEBUGF("%s is a %s but expected a %s\n", name,
+ SkCFTypeIDDescription( CFGetTypeID(cf) ).c_str(),
+ SkCFTypeIDDescription(SkCFGetTypeID<CF>()).c_str());
+ }
+ return false;
+ }
+ *cfAsCF = static_cast<CF>(cf);
+ return true;
+}
+
+template<typename T> struct SkCFNumberTypeFor {};
+#define SK_CFNUMBERTYPE_FOR(c, cf) \
+template<> struct SkCFNumberTypeFor<c> : std::integral_constant<CFNumberType, cf> {};
+SK_CFNUMBERTYPE_FOR(char , kCFNumberCharType );
+SK_CFNUMBERTYPE_FOR(short , kCFNumberShortType );
+SK_CFNUMBERTYPE_FOR(int , kCFNumberIntType );
+SK_CFNUMBERTYPE_FOR(long , kCFNumberLongType );
+SK_CFNUMBERTYPE_FOR(long long, kCFNumberLongLongType);
+SK_CFNUMBERTYPE_FOR(float , kCFNumberFloatType );
+SK_CFNUMBERTYPE_FOR(double , kCFNumberDoubleType );
+
+template <typename T>
+static bool SkCFNumberDynamicCast(CFTypeRef cf, T* number, CFNumberRef* cfNumber, char const* name){
+ CFNumberRef cfAsCFNumber;
+ if (!SkCFDynamicCast(cf, &cfAsCFNumber, name)) {
+ return false;
+ }
+ if (!CFNumberGetValue(cfAsCFNumber, SkCFNumberTypeFor<T>::value, number)) {
+ if (name) {
+ SkDEBUGF("%s CFNumber not extractable\n", name);
+ }
+ return false;
+ }
+ if (cfNumber) {
+ *cfNumber = cfAsCFNumber;
+ }
+ return true;
+}
+
+// In macOS 10.12 and later any variation on the CGFont which has default axis value will be
+// dropped when creating the CTFont. Unfortunately, in macOS 10.15 the priority of setting
+// the optical size (and opsz variation) is
+// 1. the value of kCTFontOpticalSizeAttribute in the CTFontDescriptor (undocumented)
+// 2. the opsz axis default value if kCTFontOpticalSizeAttribute is 'none' (undocumented)
+// 3. the opsz variation on the nascent CTFont from the CGFont (was dropped if default)
+// 4. the opsz variation in kCTFontVariationAttribute in CTFontDescriptor (crashes 10.10)
+// 5. the size requested (can fudge in SkTypeface but not SkScalerContext)
+// The first one which is found will be used to set the opsz variation (after clamping).
+static void add_opsz_attr(CFMutableDictionaryRef attr, double opsz) {
+ SkUniqueCFRef<CFNumberRef> opszValueNumber(
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberDoubleType, &opsz));
+ // Avoid using kCTFontOpticalSizeAttribute directly
+ CFStringRef SkCTFontOpticalSizeAttribute = CFSTR("NSCTFontOpticalSizeAttribute");
+ CFDictionarySetValue(attr, SkCTFontOpticalSizeAttribute, opszValueNumber.get());
+}
+
+// This turns off application of the 'trak' table to advances, but also all other tracking.
+static void add_notrak_attr(CFMutableDictionaryRef attr) {
+ int zero = 0;
+ SkUniqueCFRef<CFNumberRef> unscaledTrackingNumber(
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &zero));
+ CFStringRef SkCTFontUnscaledTrackingAttribute = CFSTR("NSCTFontUnscaledTrackingAttribute");
+ CFDictionarySetValue(attr, SkCTFontUnscaledTrackingAttribute, unscaledTrackingNumber.get());
+}
+
+SkUniqueCFRef<CTFontRef> SkCTFontCreateExactCopy(CTFontRef baseFont, CGFloat textSize,
+ OpszVariation opszVariation)
+{
+ SkUniqueCFRef<CFMutableDictionaryRef> attr(
+ CFDictionaryCreateMutable(kCFAllocatorDefault, 0,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
+
+ if (opszVariation.isSet) {
+ add_opsz_attr(attr.get(), opszVariation.value);
+#ifdef MOZ_SKIA
+ }
+#else
+ } else {
+ // On (at least) 10.10 though 10.14 the default system font was SFNSText/SFNSDisplay.
+ // The CTFont is backed by both; optical size < 20 means SFNSText else SFNSDisplay.
+ // On at least 10.11 the glyph ids in these fonts became non-interchangable.
+ // To keep glyph ids stable over size changes, preserve the optical size.
+ // In 10.15 this was replaced with use of variable fonts with an opsz axis.
+ // A CTFont backed by multiple fonts picked by opsz where the multiple backing fonts are
+ // variable fonts with opsz axis and non-interchangeable glyph ids would break the
+ // opsz.isSet branch above, but hopefully that never happens.
+ // See https://crbug.com/524646 .
+ CFStringRef SkCTFontOpticalSizeAttribute = CFSTR("NSCTFontOpticalSizeAttribute");
+ SkUniqueCFRef<CFTypeRef> opsz(CTFontCopyAttribute(baseFont, SkCTFontOpticalSizeAttribute));
+ double opsz_val;
+ if (!opsz ||
+ CFGetTypeID(opsz.get()) != CFNumberGetTypeID() ||
+ !CFNumberGetValue(static_cast<CFNumberRef>(opsz.get()),kCFNumberDoubleType,&opsz_val) ||
+ opsz_val <= 0)
+ {
+ opsz_val = CTFontGetSize(baseFont);
+ }
+ add_opsz_attr(attr.get(), opsz_val);
+ }
+ add_notrak_attr(attr.get());
+#endif
+
+ // To figure out if a font is installed locally or used from a @font-face
+ // resource, we check whether its descriptor can provide a URL. This will
+ // be present for installed fonts, but not for those activated from an
+ // in-memory resource.
+ auto IsInstalledFont = [](CTFontRef aFont) {
+ CTFontDescriptorRef desc = CTFontCopyFontDescriptor(aFont);
+ CFTypeRef attr = CTFontDescriptorCopyAttribute(desc, kCTFontURLAttribute);
+ CFRelease(desc);
+ bool result = false;
+ if (attr) {
+ result = true;
+ CFRelease(attr);
+ }
+ return result;
+ };
+
+ SkUniqueCFRef<CGFontRef> baseCGFont;
+
+ // If we have a system font we need to use the CGFont APIs to avoid having the
+ // underlying font change for us when using CTFontCreateCopyWithAttributes.
+ if (IsInstalledFont(baseFont)) {
+ baseCGFont.reset(CTFontCopyGraphicsFont(baseFont, nullptr));
+
+ // The last parameter (CTFontDescriptorRef attributes) *must* be nullptr.
+ // If non-nullptr then with fonts with variation axes, the copy will fail in
+ // CGFontVariationFromDictCallback when it assumes kCGFontVariationAxisName is CFNumberRef
+ // which it quite obviously is not.
+
+ // Because we cannot setup the CTFont descriptor to match, the same restriction applies here
+ // as other uses of CTFontCreateWithGraphicsFont which is that such CTFonts should not escape
+ // the scaler context, since they aren't 'normal'.
+
+ // Avoid calling potentially buggy variation APIs on pre-Sierra macOS
+ // versions (see bug 1331683).
+ //
+ // And on HighSierra, CTFontCreateWithGraphicsFont properly carries over
+ // variation settings from the CGFont to CTFont, so we don't need to do
+ // the extra work here -- and this seems to avoid Core Text crashiness
+ // seen in bug 1454094.
+ //
+ // However, for installed fonts it seems we DO need to copy the variations
+ // explicitly even on 10.13, otherwise fonts fail to render (as in bug
+ // 1455494) when non-default values are used. Fortunately, the crash
+ // mentioned above occurs with data fonts, not (AFAICT) with system-
+ // installed fonts.
+ //
+ // So we only need to do this "the hard way" on Sierra, and for installed
+ // fonts on HighSierra+; otherwise, just let the standard CTFont function
+ // do its thing.
+ //
+ // NOTE in case this ever needs further adjustment: there is similar logic
+ // in four places in the tree (sadly):
+ // CreateCTFontFromCGFontWithVariations in gfxMacFont.cpp
+ // CreateCTFontFromCGFontWithVariations in ScaledFontMac.cpp
+ // CreateCTFontFromCGFontWithVariations in cairo-quartz-font.c
+ // ctfont_create_exact_copy in SkFontHost_mac.cpp
+
+ // Not UniqueCFRef<> because CGFontCopyVariations can return null!
+ CFDictionaryRef variations = CGFontCopyVariations(baseCGFont.get());
+ if (variations) {
+ CFDictionarySetValue(attr.get(), kCTFontVariationAttribute, variations);
+ CFRelease(variations);
+ }
+ }
+
+ SkUniqueCFRef<CTFontDescriptorRef> desc(CTFontDescriptorCreateWithAttributes(attr.get()));
+
+ if (baseCGFont.get()) {
+ return SkUniqueCFRef<CTFontRef>(
+ CTFontCreateWithGraphicsFont(baseCGFont.get(), textSize, nullptr, desc.get()));
+ }
+
+ return SkUniqueCFRef<CTFontRef>(
+ CTFontCreateCopyWithAttributes(baseFont, textSize, nullptr, desc.get()));
+}
+
+CTFontRef SkTypeface_GetCTFontRef(const SkTypeface* face) {
+ return face ? (CTFontRef)face->internal_private_getCTFontRef() : nullptr;
+}
+
+static bool find_by_CTFontRef(SkTypeface* cached, void* context) {
+ CTFontRef self = (CTFontRef)context;
+ CTFontRef other = (CTFontRef)cached->internal_private_getCTFontRef();
+
+ return CFEqual(self, other);
+}
+
+/** Creates a typeface, searching the cache if providedData is nullptr. */
+sk_sp<SkTypeface> SkTypeface_Mac::Make(SkUniqueCFRef<CTFontRef> font,
+ OpszVariation opszVariation,
+ std::unique_ptr<SkStreamAsset> providedData) {
+ static SkMutex gTFCacheMutex;
+ static SkTypefaceCache gTFCache;
+
+ SkASSERT(font);
+ const bool isFromStream(providedData);
+
+ auto makeTypeface = [&]() {
+ SkUniqueCFRef<CTFontDescriptorRef> desc(CTFontCopyFontDescriptor(font.get()));
+ SkFontStyle style = SkCTFontDescriptorGetSkFontStyle(desc.get(), isFromStream);
+ CTFontSymbolicTraits traits = CTFontGetSymbolicTraits(font.get());
+ bool isFixedPitch = SkToBool(traits & kCTFontMonoSpaceTrait);
+
+ return sk_sp<SkTypeface>(new SkTypeface_Mac(std::move(font), style, isFixedPitch,
+ opszVariation, std::move(providedData)));
+ };
+
+ if (isFromStream) {
+ return makeTypeface();
+ }
+
+ SkAutoMutexExclusive ama(gTFCacheMutex);
+ sk_sp<SkTypeface> face = gTFCache.findByProcAndRef(find_by_CTFontRef, (void*)font.get());
+ if (!face) {
+ face = makeTypeface();
+ if (face) {
+ gTFCache.add(face);
+ }
+ }
+ return face;
+}
+
+/* This function is visible on the outside. It first searches the cache, and if
+ * not found, returns a new entry (after adding it to the cache).
+ */
+sk_sp<SkTypeface> SkMakeTypefaceFromCTFont(CTFontRef font) {
+ CFRetain(font);
+ return SkTypeface_Mac::Make(SkUniqueCFRef<CTFontRef>(font),
+ OpszVariation(),
+ nullptr);
+}
+
+static bool find_dict_CGFloat(CFDictionaryRef dict, CFStringRef name, CGFloat* value) {
+ CFNumberRef num;
+ return CFDictionaryGetValueIfPresent(dict, name, (const void**)&num)
+ && CFNumberIsFloatType(num)
+ && CFNumberGetValue(num, kCFNumberCGFloatType, value);
+}
+
+template <typename S, typename D, typename C> struct LinearInterpolater {
+ struct Mapping {
+ S src_val;
+ D dst_val;
+ };
+ constexpr LinearInterpolater(Mapping const mapping[], int mappingCount)
+ : fMapping(mapping), fMappingCount(mappingCount) {}
+
+ static D map(S value, S src_min, S src_max, D dst_min, D dst_max) {
+ SkASSERT(src_min < src_max);
+ SkASSERT(dst_min <= dst_max);
+ return C()(dst_min + (((value - src_min) * (dst_max - dst_min)) / (src_max - src_min)));
+ }
+
+ D map(S val) const {
+ // -Inf to [0]
+ if (val < fMapping[0].src_val) {
+ return fMapping[0].dst_val;
+ }
+
+ // Linear from [i] to [i+1]
+ for (int i = 0; i < fMappingCount - 1; ++i) {
+ if (val < fMapping[i+1].src_val) {
+ return map(val, fMapping[i].src_val, fMapping[i+1].src_val,
+ fMapping[i].dst_val, fMapping[i+1].dst_val);
+ }
+ }
+
+ // From [n] to +Inf
+ // if (fcweight < Inf)
+ return fMapping[fMappingCount - 1].dst_val;
+ }
+
+ Mapping const * fMapping;
+ int fMappingCount;
+};
+
+struct RoundCGFloatToInt {
+ int operator()(CGFloat s) { return s + 0.5; }
+};
+struct CGFloatIdentity {
+ CGFloat operator()(CGFloat s) { return s; }
+};
+
+/** Convert the [0, 1000] CSS weight to [-1, 1] CTFontDescriptor weight (for system fonts).
+ *
+ * The -1 to 1 weights reported by CTFontDescriptors have different mappings depending on if the
+ * CTFont is native or created from a CGDataProvider.
+ */
+CGFloat SkCTFontCTWeightForCSSWeight(int fontstyleWeight) {
+ using Interpolator = LinearInterpolater<int, CGFloat, CGFloatIdentity>;
+
+ // Note that Mac supports the old OS2 version A so 0 through 10 are as if multiplied by 100.
+ // However, on this end we can't tell, so this is ignored.
+
+ static Interpolator::Mapping nativeWeightMappings[11];
+ static SkOnce once;
+ once([&] {
+ const CGFloat(&nsFontWeights)[11] = SkCTFontGetNSFontWeightMapping();
+ for (int i = 0; i < 11; ++i) {
+ nativeWeightMappings[i].src_val = i * 100;
+ nativeWeightMappings[i].dst_val = nsFontWeights[i];
+ }
+ });
+ static constexpr Interpolator nativeInterpolator(
+ nativeWeightMappings, std::size(nativeWeightMappings));
+
+ return nativeInterpolator.map(fontstyleWeight);
+}
+
+/** Convert the [-1, 1] CTFontDescriptor weight to [0, 1000] CSS weight.
+ *
+ * The -1 to 1 weights reported by CTFontDescriptors have different mappings depending on if the
+ * CTFont is native or created from a CGDataProvider.
+ */
+static int ct_weight_to_fontstyle(CGFloat cgWeight, bool fromDataProvider) {
+ using Interpolator = LinearInterpolater<CGFloat, int, RoundCGFloatToInt>;
+
+ // Note that Mac supports the old OS2 version A so 0 through 10 are as if multiplied by 100.
+ // However, on this end we can't tell, so this is ignored.
+
+ static Interpolator::Mapping nativeWeightMappings[11];
+ static Interpolator::Mapping dataProviderWeightMappings[11];
+ static SkOnce once;
+ once([&] {
+ const CGFloat(&nsFontWeights)[11] = SkCTFontGetNSFontWeightMapping();
+ const CGFloat(&userFontWeights)[11] = SkCTFontGetDataFontWeightMapping();
+ for (int i = 0; i < 11; ++i) {
+ nativeWeightMappings[i].src_val = nsFontWeights[i];
+ nativeWeightMappings[i].dst_val = i * 100;
+
+ dataProviderWeightMappings[i].src_val = userFontWeights[i];
+ dataProviderWeightMappings[i].dst_val = i * 100;
+ }
+ });
+ static constexpr Interpolator nativeInterpolator(
+ nativeWeightMappings, std::size(nativeWeightMappings));
+ static constexpr Interpolator dataProviderInterpolator(
+ dataProviderWeightMappings, std::size(dataProviderWeightMappings));
+
+ return fromDataProvider ? dataProviderInterpolator.map(cgWeight)
+ : nativeInterpolator.map(cgWeight);
+}
+
+/** Convert the [0, 10] CSS weight to [-1, 1] CTFontDescriptor width. */
+CGFloat SkCTFontCTWidthForCSSWidth(int fontstyleWidth) {
+ using Interpolator = LinearInterpolater<int, CGFloat, CGFloatIdentity>;
+
+ // Values determined by creating font data with every width, creating a CTFont,
+ // and asking the CTFont for its width. See TypefaceStyle test for basics.
+ static constexpr Interpolator::Mapping widthMappings[] = {
+ { 0, -0.5 },
+ { 10, 0.5 },
+ };
+ static constexpr Interpolator interpolator(widthMappings, std::size(widthMappings));
+ return interpolator.map(fontstyleWidth);
+}
+
+/** Convert the [-1, 1] CTFontDescriptor width to [0, 10] CSS weight. */
+static int ct_width_to_fontstyle(CGFloat cgWidth) {
+ using Interpolator = LinearInterpolater<CGFloat, int, RoundCGFloatToInt>;
+
+ // Values determined by creating font data with every width, creating a CTFont,
+ // and asking the CTFont for its width. See TypefaceStyle test for basics.
+ static constexpr Interpolator::Mapping widthMappings[] = {
+ { -0.5, 0 },
+ { 0.5, 10 },
+ };
+ static constexpr Interpolator interpolator(widthMappings, std::size(widthMappings));
+ return interpolator.map(cgWidth);
+}
+
+SkFontStyle SkCTFontDescriptorGetSkFontStyle(CTFontDescriptorRef desc, bool fromDataProvider) {
+ SkUniqueCFRef<CFTypeRef> traits(CTFontDescriptorCopyAttribute(desc, kCTFontTraitsAttribute));
+ CFDictionaryRef fontTraitsDict;
+ if (!SkCFDynamicCast(traits.get(), &fontTraitsDict, "Font traits")) {
+ return SkFontStyle();
+ }
+
+ CGFloat weight, width, slant;
+ if (!find_dict_CGFloat(fontTraitsDict, kCTFontWeightTrait, &weight)) {
+ weight = 0;
+ }
+ if (!find_dict_CGFloat(fontTraitsDict, kCTFontWidthTrait, &width)) {
+ width = 0;
+ }
+ if (!find_dict_CGFloat(fontTraitsDict, kCTFontSlantTrait, &slant)) {
+ slant = 0;
+ }
+
+ return SkFontStyle(ct_weight_to_fontstyle(weight, fromDataProvider),
+ ct_width_to_fontstyle(width),
+ slant ? SkFontStyle::kItalic_Slant
+ : SkFontStyle::kUpright_Slant);
+}
+
+
+// Web fonts added to the CTFont registry do not return their character set.
+// Iterate through the font in this case. The existing caller caches the result,
+// so the performance impact isn't too bad.
+static void populate_glyph_to_unicode_slow(CTFontRef ctFont, CFIndex glyphCount,
+ SkUnichar* out) {
+ sk_bzero(out, glyphCount * sizeof(SkUnichar));
+ UniChar unichar = 0;
+ while (glyphCount > 0) {
+ CGGlyph glyph;
+ if (CTFontGetGlyphsForCharacters(ctFont, &unichar, &glyph, 1)) {
+ if (out[glyph] == 0) {
+ out[glyph] = unichar;
+ --glyphCount;
+ }
+ }
+ if (++unichar == 0) {
+ break;
+ }
+ }
+}
+
+static constexpr uint16_t kPlaneSize = 1 << 13;
+
+static void get_plane_glyph_map(const uint8_t* bits,
+ CTFontRef ctFont,
+ CFIndex glyphCount,
+ SkUnichar* glyphToUnicode,
+ uint8_t planeIndex) {
+ SkUnichar planeOrigin = (SkUnichar)planeIndex << 16; // top half of codepoint.
+ for (uint16_t i = 0; i < kPlaneSize; i++) {
+ uint8_t mask = bits[i];
+ if (!mask) {
+ continue;
+ }
+ for (uint8_t j = 0; j < 8; j++) {
+ if (0 == (mask & ((uint8_t)1 << j))) {
+ continue;
+ }
+ uint16_t planeOffset = (i << 3) | j;
+ SkUnichar codepoint = planeOrigin | (SkUnichar)planeOffset;
+ uint16_t utf16[2] = {planeOffset, 0};
+ size_t count = 1;
+ if (planeOrigin != 0) {
+ count = SkUTF::ToUTF16(codepoint, utf16);
+ }
+ CGGlyph glyphs[2] = {0, 0};
+ if (CTFontGetGlyphsForCharacters(ctFont, utf16, glyphs, count)) {
+ SkASSERT(glyphs[1] == 0);
+ SkASSERT(glyphs[0] < glyphCount);
+ // CTFontCopyCharacterSet and CTFontGetGlyphsForCharacters seem to add 'support'
+ // for characters 0x9, 0xA, and 0xD mapping them to the glyph for character 0x20?
+ // Prefer mappings to codepoints at or above 0x20.
+ if (glyphToUnicode[glyphs[0]] < 0x20) {
+ glyphToUnicode[glyphs[0]] = codepoint;
+ }
+ }
+ }
+ }
+}
+// Construct Glyph to Unicode table.
+static void populate_glyph_to_unicode(CTFontRef ctFont, CFIndex glyphCount,
+ SkUnichar* glyphToUnicode) {
+ sk_bzero(glyphToUnicode, sizeof(SkUnichar) * glyphCount);
+ SkUniqueCFRef<CFCharacterSetRef> charSet(CTFontCopyCharacterSet(ctFont));
+ if (!charSet) {
+ populate_glyph_to_unicode_slow(ctFont, glyphCount, glyphToUnicode);
+ return;
+ }
+
+ SkUniqueCFRef<CFDataRef> bitmap(
+ CFCharacterSetCreateBitmapRepresentation(nullptr, charSet.get()));
+ if (!bitmap) {
+ return;
+ }
+ CFIndex dataLength = CFDataGetLength(bitmap.get());
+ if (!dataLength) {
+ return;
+ }
+ SkASSERT(dataLength >= kPlaneSize);
+ const UInt8* bits = CFDataGetBytePtr(bitmap.get());
+
+ get_plane_glyph_map(bits, ctFont, glyphCount, glyphToUnicode, 0);
+ /*
+ A CFData object that specifies the bitmap representation of the Unicode
+ character points the for the new character set. The bitmap representation could
+ contain all the Unicode character range starting from BMP to Plane 16. The
+ first 8KiB (8192 bytes) of the data represent the BMP range. The BMP range 8KiB
+ can be followed by zero to sixteen 8KiB bitmaps, each prepended with the plane
+ index byte. For example, the bitmap representing the BMP and Plane 2 has the
+ size of 16385 bytes (8KiB for BMP, 1 byte index, and a 8KiB bitmap for Plane
+ 2). The plane index byte, in this case, contains the integer value two.
+ */
+
+ if (dataLength <= kPlaneSize) {
+ return;
+ }
+ int extraPlaneCount = (dataLength - kPlaneSize) / (1 + kPlaneSize);
+ SkASSERT(dataLength == kPlaneSize + extraPlaneCount * (1 + kPlaneSize));
+ while (extraPlaneCount-- > 0) {
+ bits += kPlaneSize;
+ uint8_t planeIndex = *bits++;
+ SkASSERT(planeIndex >= 1);
+ SkASSERT(planeIndex <= 16);
+ get_plane_glyph_map(bits, ctFont, glyphCount, glyphToUnicode, planeIndex);
+ }
+}
+
+void SkTypeface_Mac::getGlyphToUnicodeMap(SkUnichar* dstArray) const {
+ SkUniqueCFRef<CTFontRef> ctFont =
+ SkCTFontCreateExactCopy(fFontRef.get(), CTFontGetUnitsPerEm(fFontRef.get()),
+ fOpszVariation);
+ CFIndex glyphCount = CTFontGetGlyphCount(ctFont.get());
+ populate_glyph_to_unicode(ctFont.get(), glyphCount, dstArray);
+}
+
+std::unique_ptr<SkAdvancedTypefaceMetrics> SkTypeface_Mac::onGetAdvancedMetrics() const {
+
+ SkUniqueCFRef<CTFontRef> ctFont =
+ SkCTFontCreateExactCopy(fFontRef.get(), CTFontGetUnitsPerEm(fFontRef.get()),
+ fOpszVariation);
+
+ std::unique_ptr<SkAdvancedTypefaceMetrics> info(new SkAdvancedTypefaceMetrics);
+
+ {
+ SkUniqueCFRef<CFStringRef> fontName(CTFontCopyPostScriptName(ctFont.get()));
+ if (fontName.get()) {
+ SkStringFromCFString(fontName.get(), &info->fPostScriptName);
+ info->fFontName = info->fPostScriptName;
+ }
+ }
+
+ CFArrayRef ctAxes = this->getVariationAxes();
+ if (ctAxes && CFArrayGetCount(ctAxes) > 0) {
+ info->fFlags |= SkAdvancedTypefaceMetrics::kVariable_FontFlag;
+ }
+
+ SkOTTableOS2_V4::Type fsType;
+ if (sizeof(fsType) == this->getTableData(SkTEndian_SwapBE32(SkOTTableOS2::TAG),
+ offsetof(SkOTTableOS2_V4, fsType),
+ sizeof(fsType),
+ &fsType)) {
+ SkOTUtils::SetAdvancedTypefaceFlags(fsType, info.get());
+ }
+
+ // If it's not a truetype font, mark it as 'other'. Assume that TrueType
+ // fonts always have both glyf and loca tables. At the least, this is what
+ // sfntly needs to subset the font. CTFontCopyAttribute() does not always
+ // succeed in determining this directly.
+ if (!this->getTableSize(SkSetFourByteTag('g','l','y','f')) ||
+ !this->getTableSize(SkSetFourByteTag('l','o','c','a')))
+ {
+ return info;
+ }
+
+ info->fType = SkAdvancedTypefaceMetrics::kTrueType_Font;
+ CTFontSymbolicTraits symbolicTraits = CTFontGetSymbolicTraits(ctFont.get());
+ if (symbolicTraits & kCTFontMonoSpaceTrait) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kFixedPitch_Style;
+ }
+ if (symbolicTraits & kCTFontItalicTrait) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kItalic_Style;
+ }
+ CTFontStylisticClass stylisticClass = symbolicTraits & kCTFontClassMaskTrait;
+ if (stylisticClass >= kCTFontOldStyleSerifsClass && stylisticClass <= kCTFontSlabSerifsClass) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kSerif_Style;
+ } else if (stylisticClass & kCTFontScriptsClass) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kScript_Style;
+ }
+ info->fItalicAngle = (int16_t) CTFontGetSlantAngle(ctFont.get());
+ info->fAscent = (int16_t) CTFontGetAscent(ctFont.get());
+ info->fDescent = (int16_t) CTFontGetDescent(ctFont.get());
+ info->fCapHeight = (int16_t) CTFontGetCapHeight(ctFont.get());
+ CGRect bbox = CTFontGetBoundingBox(ctFont.get());
+
+ SkRect r;
+ r.setLTRB(SkScalarFromCGFloat(SkCGRectGetMinX(bbox)), // Left
+ SkScalarFromCGFloat(SkCGRectGetMaxY(bbox)), // Top
+ SkScalarFromCGFloat(SkCGRectGetMaxX(bbox)), // Right
+ SkScalarFromCGFloat(SkCGRectGetMinY(bbox))); // Bottom
+
+ r.roundOut(&(info->fBBox));
+
+ // Figure out a good guess for StemV - Min width of i, I, !, 1.
+ // This probably isn't very good with an italic font.
+ int16_t min_width = SHRT_MAX;
+ info->fStemV = 0;
+ static const UniChar stem_chars[] = {'i', 'I', '!', '1'};
+ const size_t count = sizeof(stem_chars) / sizeof(stem_chars[0]);
+ CGGlyph glyphs[count];
+ CGRect boundingRects[count];
+ if (CTFontGetGlyphsForCharacters(ctFont.get(), stem_chars, glyphs, count)) {
+ CTFontGetBoundingRectsForGlyphs(ctFont.get(), kCTFontOrientationHorizontal,
+ glyphs, boundingRects, count);
+ for (size_t i = 0; i < count; i++) {
+ int16_t width = (int16_t) boundingRects[i].size.width;
+ if (width > 0 && width < min_width) {
+ min_width = width;
+ info->fStemV = min_width;
+ }
+ }
+ }
+ return info;
+}
+
+static SK_SFNT_ULONG get_font_type_tag(CTFontRef ctFont) {
+ SkUniqueCFRef<CFNumberRef> fontFormatRef(
+ static_cast<CFNumberRef>(CTFontCopyAttribute(ctFont, kCTFontFormatAttribute)));
+ if (!fontFormatRef) {
+ return 0;
+ }
+
+ SInt32 fontFormatValue;
+ if (!CFNumberGetValue(fontFormatRef.get(), kCFNumberSInt32Type, &fontFormatValue)) {
+ return 0;
+ }
+
+ switch (fontFormatValue) {
+ case kCTFontFormatOpenTypePostScript:
+ return SkSFNTHeader::fontType_OpenTypeCFF::TAG;
+ case kCTFontFormatOpenTypeTrueType:
+ return SkSFNTHeader::fontType_WindowsTrueType::TAG;
+ case kCTFontFormatTrueType:
+ return SkSFNTHeader::fontType_MacTrueType::TAG;
+ case kCTFontFormatPostScript:
+ return SkSFNTHeader::fontType_PostScript::TAG;
+ case kCTFontFormatBitmap:
+ return SkSFNTHeader::fontType_MacTrueType::TAG;
+ case kCTFontFormatUnrecognized:
+ default:
+ return 0;
+ }
+}
+
+std::unique_ptr<SkStreamAsset> SkTypeface_Mac::onOpenStream(int* ttcIndex) const {
+ *ttcIndex = 0;
+
+ fInitStream([this]{
+ if (fStream) {
+ return;
+ }
+
+ SK_SFNT_ULONG fontType = get_font_type_tag(fFontRef.get());
+
+ // get table tags
+ int numTables = this->countTables();
+ SkTDArray<SkFontTableTag> tableTags;
+ tableTags.resize(numTables);
+ this->getTableTags(tableTags.begin());
+
+ // CT seems to be unreliable in being able to obtain the type,
+ // even if all we want is the first four bytes of the font resource.
+ // Just the presence of the FontForge 'FFTM' table seems to throw it off.
+ if (fontType == 0) {
+ fontType = SkSFNTHeader::fontType_WindowsTrueType::TAG;
+
+ // see https://skbug.com/7630#c7
+ bool couldBeCFF = false;
+ constexpr SkFontTableTag CFFTag = SkSetFourByteTag('C', 'F', 'F', ' ');
+ constexpr SkFontTableTag CFF2Tag = SkSetFourByteTag('C', 'F', 'F', '2');
+ for (int tableIndex = 0; tableIndex < numTables; ++tableIndex) {
+ if (CFFTag == tableTags[tableIndex] || CFF2Tag == tableTags[tableIndex]) {
+ couldBeCFF = true;
+ }
+ }
+ if (couldBeCFF) {
+ fontType = SkSFNTHeader::fontType_OpenTypeCFF::TAG;
+ }
+ }
+
+ // Sometimes CoreGraphics incorrectly thinks a font is kCTFontFormatPostScript.
+ // It is exceedingly unlikely that this is the case, so double check
+ // (see https://crbug.com/809763 ).
+ if (fontType == SkSFNTHeader::fontType_PostScript::TAG) {
+ // see if there are any required 'typ1' tables (see Adobe Technical Note #5180)
+ bool couldBeTyp1 = false;
+ constexpr SkFontTableTag TYPE1Tag = SkSetFourByteTag('T', 'Y', 'P', '1');
+ constexpr SkFontTableTag CIDTag = SkSetFourByteTag('C', 'I', 'D', ' ');
+ for (int tableIndex = 0; tableIndex < numTables; ++tableIndex) {
+ if (TYPE1Tag == tableTags[tableIndex] || CIDTag == tableTags[tableIndex]) {
+ couldBeTyp1 = true;
+ }
+ }
+ if (!couldBeTyp1) {
+ fontType = SkSFNTHeader::fontType_OpenTypeCFF::TAG;
+ }
+ }
+
+ // get the table sizes and accumulate the total size of the font
+ SkTDArray<size_t> tableSizes;
+ size_t totalSize = sizeof(SkSFNTHeader) + sizeof(SkSFNTHeader::TableDirectoryEntry) * numTables;
+ for (int tableIndex = 0; tableIndex < numTables; ++tableIndex) {
+ size_t tableSize = this->getTableSize(tableTags[tableIndex]);
+ totalSize += (tableSize + 3) & ~3;
+ *tableSizes.append() = tableSize;
+ }
+
+ // reserve memory for stream, and zero it (tables must be zero padded)
+ fStream = std::make_unique<SkMemoryStream>(totalSize);
+ char* dataStart = (char*)fStream->getMemoryBase();
+ sk_bzero(dataStart, totalSize);
+ char* dataPtr = dataStart;
+
+ // compute font header entries
+ uint16_t entrySelector = 0;
+ uint16_t searchRange = 1;
+ while (searchRange < numTables >> 1) {
+ entrySelector++;
+ searchRange <<= 1;
+ }
+ searchRange <<= 4;
+ uint16_t rangeShift = (numTables << 4) - searchRange;
+
+ // write font header
+ SkSFNTHeader* header = (SkSFNTHeader*)dataPtr;
+ header->fontType = fontType;
+ header->numTables = SkEndian_SwapBE16(numTables);
+ header->searchRange = SkEndian_SwapBE16(searchRange);
+ header->entrySelector = SkEndian_SwapBE16(entrySelector);
+ header->rangeShift = SkEndian_SwapBE16(rangeShift);
+ dataPtr += sizeof(SkSFNTHeader);
+
+ // write tables
+ SkSFNTHeader::TableDirectoryEntry* entry = (SkSFNTHeader::TableDirectoryEntry*)dataPtr;
+ dataPtr += sizeof(SkSFNTHeader::TableDirectoryEntry) * numTables;
+ for (int tableIndex = 0; tableIndex < numTables; ++tableIndex) {
+ size_t tableSize = tableSizes[tableIndex];
+ this->getTableData(tableTags[tableIndex], 0, tableSize, dataPtr);
+ entry->tag = SkEndian_SwapBE32(tableTags[tableIndex]);
+ entry->checksum = SkEndian_SwapBE32(SkOTUtils::CalcTableChecksum((SK_OT_ULONG*)dataPtr,
+ tableSize));
+ entry->offset = SkEndian_SwapBE32(SkToU32(dataPtr - dataStart));
+ entry->logicalLength = SkEndian_SwapBE32(SkToU32(tableSize));
+
+ dataPtr += (tableSize + 3) & ~3;
+ ++entry;
+ }
+ });
+ return fStream->duplicate();
+}
+
+std::unique_ptr<SkStreamAsset> SkTypeface_Mac::onOpenExistingStream(int* ttcIndex) const {
+ *ttcIndex = 0;
+ return fStream ? fStream->duplicate() : nullptr;
+}
+
+bool SkTypeface_Mac::onGlyphMaskNeedsCurrentColor() const {
+ // `CPAL` (`COLR` and `SVG`) fonts may need the current color.
+ // However, even `sbix` fonts can have glyphs which need the current color.
+ // These may be glyphs with paths but no `sbix` entries, which are impossible to distinguish.
+ return this->fHasColorGlyphs;
+}
+
+CFArrayRef SkTypeface_Mac::getVariationAxes() const {
+ fInitVariationAxes([this]{
+ fVariationAxes.reset(CTFontCopyVariationAxes(fFontRef.get()));
+ });
+ return fVariationAxes.get();
+}
+
+int SkTypeface_Mac::onGetVariationDesignPosition(
+ SkFontArguments::VariationPosition::Coordinate coordinates[], int coordinateCount) const
+{
+ CFArrayRef ctAxes = this->getVariationAxes();
+ if (!ctAxes) {
+ return -1;
+ }
+ CFIndex axisCount = CFArrayGetCount(ctAxes);
+ if (!coordinates || coordinateCount < axisCount) {
+ return axisCount;
+ }
+
+ // On 10.12 and later, this only returns non-default variations.
+ SkUniqueCFRef<CFDictionaryRef> ctVariation(CTFontCopyVariation(fFontRef.get()));
+ if (!ctVariation) {
+ return -1;
+ }
+
+ for (int i = 0; i < axisCount; ++i) {
+ CFDictionaryRef axisInfoDict;
+ if (!SkCFDynamicCast(CFArrayGetValueAtIndex(ctAxes, i), &axisInfoDict, "Axis")) {
+ return -1;
+ }
+
+ int64_t tagLong;
+ CFNumberRef tagNumber;
+ CFTypeRef tag = CFDictionaryGetValue(axisInfoDict, kCTFontVariationAxisIdentifierKey);
+ if (!SkCFNumberDynamicCast(tag, &tagLong, &tagNumber, "Axis tag")) {
+ return -1;
+ }
+ coordinates[i].axis = tagLong;
+
+ CGFloat valueCGFloat;
+ CFTypeRef value = CFDictionaryGetValue(ctVariation.get(), tagNumber);
+ if (value) {
+ if (!SkCFNumberDynamicCast(value, &valueCGFloat, nullptr, "Variation value")) {
+ return -1;
+ }
+ } else {
+ CFTypeRef def = CFDictionaryGetValue(axisInfoDict, kCTFontVariationAxisDefaultValueKey);
+ if (!SkCFNumberDynamicCast(def, &valueCGFloat, nullptr, "Axis default value")) {
+ return -1;
+ }
+ }
+ coordinates[i].value = SkScalarFromCGFloat(valueCGFloat);
+ }
+ return axisCount;
+}
+
+int SkTypeface_Mac::onGetUPEM() const {
+ SkUniqueCFRef<CGFontRef> cgFont(CTFontCopyGraphicsFont(fFontRef.get(), nullptr));
+ return CGFontGetUnitsPerEm(cgFont.get());
+}
+
+SkTypeface::LocalizedStrings* SkTypeface_Mac::onCreateFamilyNameIterator() const {
+ sk_sp<SkTypeface::LocalizedStrings> nameIter =
+ SkOTUtils::LocalizedStrings_NameTable::MakeForFamilyNames(*this);
+ if (!nameIter) {
+ CFStringRef cfLanguageRaw;
+ SkUniqueCFRef<CFStringRef> cfFamilyName(
+ CTFontCopyLocalizedName(fFontRef.get(), kCTFontFamilyNameKey, &cfLanguageRaw));
+ SkUniqueCFRef<CFStringRef> cfLanguage(cfLanguageRaw);
+
+ SkString skLanguage;
+ SkString skFamilyName;
+ if (cfLanguage) {
+ SkStringFromCFString(cfLanguage.get(), &skLanguage);
+ } else {
+ skLanguage = "und"; //undetermined
+ }
+ if (cfFamilyName) {
+ SkStringFromCFString(cfFamilyName.get(), &skFamilyName);
+ }
+
+ nameIter = sk_make_sp<SkOTUtils::LocalizedStrings_SingleName>(skFamilyName, skLanguage);
+ }
+ return nameIter.release();
+}
+
+int SkTypeface_Mac::onGetTableTags(SkFontTableTag tags[]) const {
+ SkUniqueCFRef<CFArrayRef> cfArray(
+ CTFontCopyAvailableTables(fFontRef.get(), kCTFontTableOptionNoOptions));
+ if (!cfArray) {
+ return 0;
+ }
+ CFIndex count = CFArrayGetCount(cfArray.get());
+ if (tags) {
+ for (CFIndex i = 0; i < count; ++i) {
+ uintptr_t fontTag = reinterpret_cast<uintptr_t>(
+ CFArrayGetValueAtIndex(cfArray.get(), i));
+ tags[i] = static_cast<SkFontTableTag>(fontTag);
+ }
+ }
+ return count;
+}
+
+// If, as is the case with web fonts, the CTFont data isn't available,
+// the CGFont data may work. While the CGFont may always provide the
+// right result, leave the CTFont code path to minimize disruption.
+static SkUniqueCFRef<CFDataRef> copy_table_from_font(CTFontRef ctFont, SkFontTableTag tag) {
+ SkUniqueCFRef<CFDataRef> data(CTFontCopyTable(ctFont, (CTFontTableTag) tag,
+ kCTFontTableOptionNoOptions));
+ if (!data) {
+ SkUniqueCFRef<CGFontRef> cgFont(CTFontCopyGraphicsFont(ctFont, nullptr));
+ data.reset(CGFontCopyTableForTag(cgFont.get(), tag));
+ }
+ return data;
+}
+
+size_t SkTypeface_Mac::onGetTableData(SkFontTableTag tag, size_t offset,
+ size_t length, void* dstData) const {
+ SkUniqueCFRef<CFDataRef> srcData = copy_table_from_font(fFontRef.get(), tag);
+ if (!srcData) {
+ return 0;
+ }
+
+ size_t srcSize = CFDataGetLength(srcData.get());
+ if (offset >= srcSize) {
+ return 0;
+ }
+ if (length > srcSize - offset) {
+ length = srcSize - offset;
+ }
+ if (dstData) {
+ memcpy(dstData, CFDataGetBytePtr(srcData.get()) + offset, length);
+ }
+ return length;
+}
+
+sk_sp<SkData> SkTypeface_Mac::onCopyTableData(SkFontTableTag tag) const {
+ SkUniqueCFRef<CFDataRef> srcData = copy_table_from_font(fFontRef.get(), tag);
+ if (!srcData) {
+ return nullptr;
+ }
+ const UInt8* data = CFDataGetBytePtr(srcData.get());
+ CFIndex length = CFDataGetLength(srcData.get());
+ return SkData::MakeWithProc(data, length,
+ [](const void*, void* ctx) {
+ CFRelease((CFDataRef)ctx);
+ }, (void*)srcData.release());
+}
+
+std::unique_ptr<SkScalerContext> SkTypeface_Mac::onCreateScalerContext(
+ const SkScalerContextEffects& effects, const SkDescriptor* desc) const
+{
+ return std::make_unique<SkScalerContext_Mac>(
+ sk_ref_sp(const_cast<SkTypeface_Mac*>(this)), effects, desc);
+}
+
+void SkTypeface_Mac::onFilterRec(SkScalerContextRec* rec) const {
+ if (rec->fFlags & SkScalerContext::kLCD_BGROrder_Flag ||
+ rec->fFlags & SkScalerContext::kLCD_Vertical_Flag)
+ {
+ rec->fMaskFormat = SkMask::kA8_Format;
+ // Render the glyphs as close as possible to what was requested.
+ // The above turns off subpixel rendering, but the user requested it.
+ // Normal hinting will cause the A8 masks to be generated from CoreGraphics subpixel masks.
+ // See comments below for more details.
+ rec->setHinting(SkFontHinting::kNormal);
+ }
+
+ unsigned flagsWeDontSupport = SkScalerContext::kForceAutohinting_Flag |
+ SkScalerContext::kLCD_BGROrder_Flag |
+ SkScalerContext::kLCD_Vertical_Flag;
+
+ rec->fFlags &= ~flagsWeDontSupport;
+
+ const SkCTFontSmoothBehavior smoothBehavior = SkCTFontGetSmoothBehavior();
+
+ // Only two levels of hinting are supported.
+ // kNo_Hinting means avoid CoreGraphics outline dilation (smoothing).
+ // kNormal_Hinting means CoreGraphics outline dilation (smoothing) is allowed.
+ if (rec->getHinting() != SkFontHinting::kNone) {
+ rec->setHinting(SkFontHinting::kNormal);
+ }
+ // If smoothing has no effect, don't request it.
+ if (smoothBehavior == SkCTFontSmoothBehavior::none) {
+ rec->setHinting(SkFontHinting::kNone);
+ }
+
+ // FIXME: lcd smoothed un-hinted rasterization unsupported.
+ // Tracked by http://code.google.com/p/skia/issues/detail?id=915 .
+ // There is no current means to honor a request for unhinted lcd,
+ // so arbitrarilly ignore the hinting request and honor lcd.
+
+ // Hinting and smoothing should be orthogonal, but currently they are not.
+ // CoreGraphics has no API to influence hinting. However, its lcd smoothed
+ // output is drawn from auto-dilated outlines (the amount of which is
+ // determined by AppleFontSmoothing). Its regular anti-aliased output is
+ // drawn from un-dilated outlines.
+
+ // The behavior of Skia is as follows:
+ // [AA][no-hint]: generate AA using CoreGraphic's AA output.
+ // [AA][yes-hint]: use CoreGraphic's LCD output and reduce it to a single
+ // channel. This matches [LCD][yes-hint] in weight.
+ // [LCD][no-hint]: currently unable to honor, and must pick which to respect.
+ // Currently side with LCD, effectively ignoring the hinting setting.
+ // [LCD][yes-hint]: generate LCD using CoreGraphic's LCD output.
+ if (rec->fMaskFormat == SkMask::kLCD16_Format) {
+ if (smoothBehavior == SkCTFontSmoothBehavior::subpixel) {
+ //CoreGraphics creates 555 masks for smoothed text anyway.
+ rec->fMaskFormat = SkMask::kLCD16_Format;
+ rec->setHinting(SkFontHinting::kNormal);
+ } else {
+ rec->fMaskFormat = SkMask::kA8_Format;
+ if (smoothBehavior != SkCTFontSmoothBehavior::none) {
+ rec->setHinting(SkFontHinting::kNormal);
+ }
+ }
+ }
+
+ // CoreText provides no information as to whether a glyph will be color or not.
+ // Fonts may mix outlines and bitmaps, so information is needed on a glyph by glyph basis.
+ // If a font contains an 'sbix' table, consider it to be a color font, and disable lcd.
+ if (fHasColorGlyphs) {
+ rec->fMaskFormat = SkMask::kARGB32_Format;
+ }
+
+ // Smoothing will be used if the format is either LCD or if there is hinting.
+ // In those cases, we need to choose the proper dilation mask based on the color.
+ if (rec->fMaskFormat == SkMask::kLCD16_Format ||
+ (rec->fMaskFormat == SkMask::kA8_Format && rec->getHinting() != SkFontHinting::kNone)) {
+ SkColor color = rec->getLuminanceColor();
+ int r = SkColorGetR(color);
+ int g = SkColorGetG(color);
+ int b = SkColorGetB(color);
+ // Choose whether to draw using a light-on-dark mask based on observed
+ // color/luminance thresholds that CoreText uses.
+ if (r >= 85 && g >= 85 && b >= 85 && r + g + b >= 2 * 255) {
+ rec->fFlags |= SkScalerContext::kLightOnDark_Flag;
+ }
+ }
+
+ // Unhinted A8 masks (those not derived from LCD masks) must respect SK_GAMMA_APPLY_TO_A8.
+ // All other masks can use regular gamma.
+ if (SkMask::kA8_Format == rec->fMaskFormat && SkFontHinting::kNone == rec->getHinting()) {
+#ifndef SK_GAMMA_APPLY_TO_A8
+ // SRGBTODO: Is this correct? Do we want contrast boost?
+ rec->ignorePreBlend();
+#endif
+ } else {
+#ifndef SK_IGNORE_MAC_BLENDING_MATCH_FIX
+ SkColor color = rec->getLuminanceColor();
+ if (smoothBehavior == SkCTFontSmoothBehavior::some) {
+ // CoreGraphics smoothed text without subpixel coverage blitting goes from a gamma of
+ // 2.0 for black foreground to a gamma of 1.0 for white foreground. Emulate this
+ // through the mask gamma by reducing the color values to 1/2.
+ color = SkColorSetRGB(SkColorGetR(color) * 1/2,
+ SkColorGetG(color) * 1/2,
+ SkColorGetB(color) * 1/2);
+ } else if (smoothBehavior == SkCTFontSmoothBehavior::subpixel) {
+ // CoreGraphics smoothed text with subpixel coverage blitting goes from a gamma of
+ // 2.0 for black foreground to a gamma of ~1.4? for white foreground. Emulate this
+ // through the mask gamma by reducing the color values to 3/4.
+ color = SkColorSetRGB(SkColorGetR(color) * 3/4,
+ SkColorGetG(color) * 3/4,
+ SkColorGetB(color) * 3/4);
+ }
+ rec->setLuminanceColor(color);
+#endif
+
+ // CoreGraphics dialates smoothed text to provide contrast.
+ rec->setContrast(0);
+ }
+}
+
+/** Takes ownership of the CFStringRef. */
+static const char* get_str(CFStringRef ref, SkString* str) {
+ if (nullptr == ref) {
+ return nullptr;
+ }
+ SkStringFromCFString(ref, str);
+ CFRelease(ref);
+ return str->c_str();
+}
+
+void SkTypeface_Mac::onGetFamilyName(SkString* familyName) const {
+ get_str(CTFontCopyFamilyName(fFontRef.get()), familyName);
+}
+
+bool SkTypeface_Mac::onGetPostScriptName(SkString* skPostScriptName) const {
+ SkUniqueCFRef<CFStringRef> ctPostScriptName(CTFontCopyPostScriptName(fFontRef.get()));
+ if (!ctPostScriptName) {
+ return false;
+ }
+ if (skPostScriptName) {
+ SkStringFromCFString(ctPostScriptName.get(), skPostScriptName);
+ }
+ return true;
+}
+
+void SkTypeface_Mac::onGetFontDescriptor(SkFontDescriptor* desc,
+ bool* isLocalStream) const {
+ SkString tmpStr;
+
+ desc->setFamilyName(get_str(CTFontCopyFamilyName(fFontRef.get()), &tmpStr));
+ desc->setFullName(get_str(CTFontCopyFullName(fFontRef.get()), &tmpStr));
+ desc->setPostscriptName(get_str(CTFontCopyPostScriptName(fFontRef.get()), &tmpStr));
+ desc->setStyle(this->fontStyle());
+ desc->setFactoryId(FactoryId);
+ *isLocalStream = fIsFromStream;
+}
+
+void SkTypeface_Mac::onCharsToGlyphs(const SkUnichar uni[], int count, SkGlyphID glyphs[]) const {
+ // Undocumented behavior of CTFontGetGlyphsForCharacters with non-bmp code points:
+ // When a surrogate pair is detected, the glyph index used is the index of the high surrogate.
+ // It is documented that if a mapping is unavailable, the glyph will be set to 0.
+
+ AutoSTMalloc<1024, UniChar> charStorage;
+ const UniChar* src; // UniChar is a UTF-16 16-bit code unit.
+ int srcCount;
+ const SkUnichar* utf32 = reinterpret_cast<const SkUnichar*>(uni);
+ UniChar* utf16 = charStorage.reset(2 * count);
+ src = utf16;
+ for (int i = 0; i < count; ++i) {
+ utf16 += SkUTF::ToUTF16(utf32[i], utf16);
+ }
+ srcCount = SkToInt(utf16 - src);
+
+ // If there are any non-bmp code points, the provided 'glyphs' storage will be inadequate.
+ AutoSTMalloc<1024, uint16_t> glyphStorage;
+ uint16_t* macGlyphs = glyphs;
+ if (srcCount > count) {
+ macGlyphs = glyphStorage.reset(srcCount);
+ }
+
+ CTFontGetGlyphsForCharacters(fFontRef.get(), src, macGlyphs, srcCount);
+
+ // If there were any non-bmp, then copy and compact.
+ // If all are bmp, 'glyphs' already contains the compact glyphs.
+ // If some are non-bmp, copy and compact into 'glyphs'.
+ if (srcCount > count) {
+ SkASSERT(glyphs != macGlyphs);
+ int extra = 0;
+ for (int i = 0; i < count; ++i) {
+ glyphs[i] = macGlyphs[i + extra];
+ if (SkUTF::IsLeadingSurrogateUTF16(src[i + extra])) {
+ ++extra;
+ }
+ }
+ } else {
+ SkASSERT(glyphs == macGlyphs);
+ }
+}
+
+int SkTypeface_Mac::onCountGlyphs() const {
+ return SkToInt(CTFontGetGlyphCount(fFontRef.get()));
+}
+
+/** Creates a dictionary suitable for setting the axes on a CTFont. */
+static CTFontVariation ctvariation_from_SkFontArguments(CTFontRef ct, CFArrayRef ctAxes,
+ const SkFontArguments& args) {
+ OpszVariation opsz;
+ constexpr const SkFourByteTag opszTag = SkSetFourByteTag('o','p','s','z');
+
+ if (!ctAxes) {
+ return CTFontVariation();
+ }
+ CFIndex axisCount = CFArrayGetCount(ctAxes);
+
+ // On 10.12 and later, this only returns non-default variations.
+ SkUniqueCFRef<CFDictionaryRef> oldCtVariation(CTFontCopyVariation(ct));
+
+ const SkFontArguments::VariationPosition position = args.getVariationDesignPosition();
+
+ SkUniqueCFRef<CFMutableDictionaryRef> newCtVariation(
+ CFDictionaryCreateMutable(kCFAllocatorDefault, axisCount,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
+ SkUniqueCFRef<CFMutableDictionaryRef> wrongOpszVariation;
+
+ for (int i = 0; i < axisCount; ++i) {
+ CFDictionaryRef axisInfoDict;
+ if (!SkCFDynamicCast(CFArrayGetValueAtIndex(ctAxes, i), &axisInfoDict, "Axis")) {
+ return CTFontVariation();
+ }
+
+ int64_t tagLong;
+ CFNumberRef tagNumber;
+ CFTypeRef tag = CFDictionaryGetValue(axisInfoDict, kCTFontVariationAxisIdentifierKey);
+ if (!SkCFNumberDynamicCast(tag, &tagLong, &tagNumber, "Axis tag")) {
+ return CTFontVariation();
+ }
+
+ // The variation axes can be set to any value, but cg will effectively pin them.
+ // Pin them here to normalize.
+ double minDouble;
+ double maxDouble;
+ double defDouble;
+ CFTypeRef min = CFDictionaryGetValue(axisInfoDict, kCTFontVariationAxisMinimumValueKey);
+ CFTypeRef max = CFDictionaryGetValue(axisInfoDict, kCTFontVariationAxisMaximumValueKey);
+ CFTypeRef def = CFDictionaryGetValue(axisInfoDict, kCTFontVariationAxisDefaultValueKey);
+ if (!SkCFNumberDynamicCast(min, &minDouble, nullptr, "Axis min") ||
+ !SkCFNumberDynamicCast(max, &maxDouble, nullptr, "Axis max") ||
+ !SkCFNumberDynamicCast(def, &defDouble, nullptr, "Axis def"))
+ {
+ return CTFontVariation();
+ }
+
+ // Start with the default value.
+ double value = defDouble;
+
+ // Then the current value.
+ bool haveCurrentDouble = false;
+ double currentDouble = 0;
+ if (oldCtVariation) {
+ CFTypeRef currentNumber = CFDictionaryGetValue(oldCtVariation.get(), tagNumber);
+ if (currentNumber) {
+ if (!SkCFNumberDynamicCast(currentNumber, &value, nullptr, "Variation value")) {
+ return CTFontVariation();
+ }
+ currentDouble = value;
+ haveCurrentDouble = true;
+ }
+ }
+
+ // Then the requested value.
+ // The position may be over specified. If there are multiple values for a given axis,
+ // use the last one since that's what css-fonts-4 requires.
+ for (int j = position.coordinateCount; j --> 0;) {
+ if (position.coordinates[j].axis == tagLong) {
+ value = SkTPin<double>(position.coordinates[j].value, minDouble, maxDouble);
+ if (tagLong == opszTag) {
+ opsz.isSet = true;
+ }
+ break;
+ }
+ }
+ if (tagLong == opszTag) {
+ opsz.value = value;
+ if (haveCurrentDouble && value == currentDouble) {
+ // Calculate a value strictly in range but different from currentValue.
+ double wrongOpszDouble = ((maxDouble - minDouble) / 2.0) + minDouble;
+ if (wrongOpszDouble == currentDouble) {
+ wrongOpszDouble = ((maxDouble - minDouble) / 4.0) + minDouble;
+ }
+ wrongOpszVariation.reset(
+ CFDictionaryCreateMutable(kCFAllocatorDefault, 0,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
+ SkUniqueCFRef<CFNumberRef> wrongOpszNumber(
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberDoubleType, &wrongOpszDouble));
+ CFDictionarySetValue(wrongOpszVariation.get(), tagNumber, wrongOpszNumber.get());
+ }
+ }
+ SkUniqueCFRef<CFNumberRef> valueNumber(
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberDoubleType, &value));
+ CFDictionaryAddValue(newCtVariation.get(), tagNumber, valueNumber.get());
+ }
+ return { SkUniqueCFRef<CFDictionaryRef>(std::move(newCtVariation)),
+ SkUniqueCFRef<CFDictionaryRef>(std::move(wrongOpszVariation)),
+ opsz };
+}
+
+sk_sp<SkTypeface> SkTypeface_Mac::onMakeClone(const SkFontArguments& args) const {
+ CTFontVariation ctVariation = ctvariation_from_SkFontArguments(fFontRef.get(),
+ this->getVariationAxes(),
+ args);
+
+ SkUniqueCFRef<CTFontRef> ctVariant;
+ if (ctVariation.variation) {
+ SkUniqueCFRef<CFMutableDictionaryRef> attributes(
+ CFDictionaryCreateMutable(kCFAllocatorDefault, 0,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
+
+ CTFontRef ctFont = fFontRef.get();
+ SkUniqueCFRef<CTFontRef> wrongOpszFont;
+ if (ctVariation.wrongOpszVariation) {
+ // On macOS 11 cloning a system font with an opsz axis and not changing the
+ // value of the opsz axis (either by setting it to the same value or not
+ // specifying it at all) when setting a variation causes the variation to
+ // be set but the cloned font will still compare CFEqual to the original
+ // font. Work around this by setting the opsz to something which isn't the
+ // desired value before setting the entire desired variation.
+ //
+ // A similar issue occurs with fonts from data on macOS 10.15 and the same
+ // work around seems to apply. This is less noticeable though since CFEqual
+ // isn't used on these fonts.
+ CFDictionarySetValue(attributes.get(),
+ kCTFontVariationAttribute, ctVariation.wrongOpszVariation.get());
+ SkUniqueCFRef<CTFontDescriptorRef> varDesc(
+ CTFontDescriptorCreateWithAttributes(attributes.get()));
+ wrongOpszFont.reset(CTFontCreateCopyWithAttributes(ctFont, 0, nullptr, varDesc.get()));
+ ctFont = wrongOpszFont.get();
+ }
+
+ CFDictionarySetValue(attributes.get(),
+ kCTFontVariationAttribute, ctVariation.variation.get());
+ SkUniqueCFRef<CTFontDescriptorRef> varDesc(
+ CTFontDescriptorCreateWithAttributes(attributes.get()));
+ ctVariant.reset(CTFontCreateCopyWithAttributes(ctFont, 0, nullptr, varDesc.get()));
+ } else {
+ ctVariant.reset((CTFontRef)CFRetain(fFontRef.get()));
+ }
+ if (!ctVariant) {
+ return nullptr;
+ }
+
+ return SkTypeface_Mac::Make(std::move(ctVariant), ctVariation.opsz,
+ fStream ? fStream->duplicate() : nullptr);
+}
+
+static sk_sp<SkData> skdata_from_skstreamasset(std::unique_ptr<SkStreamAsset> stream) {
+ size_t size = stream->getLength();
+ if (const void* base = stream->getMemoryBase()) {
+ return SkData::MakeWithProc(base, size,
+ [](const void*, void* ctx) -> void {
+ delete (SkStreamAsset*)ctx;
+ }, stream.release());
+ }
+ return SkData::MakeFromStream(stream.get(), size);
+}
+
+static SkUniqueCFRef<CFDataRef> cfdata_from_skdata(sk_sp<SkData> data) {
+ void const * const addr = data->data();
+ size_t const size = data->size();
+
+ CFAllocatorContext ctx = {
+ 0, // CFIndex version
+ data.release(), // void* info
+ nullptr, // const void *(*retain)(const void *info);
+ nullptr, // void (*release)(const void *info);
+ nullptr, // CFStringRef (*copyDescription)(const void *info);
+ nullptr, // void * (*allocate)(CFIndex size, CFOptionFlags hint, void *info);
+ nullptr, // void*(*reallocate)(void* ptr,CFIndex newsize,CFOptionFlags hint,void* info);
+ [](void*,void* info) -> void { // void (*deallocate)(void *ptr, void *info);
+ SkASSERT(info);
+ ((SkData*)info)->unref();
+ },
+ nullptr, // CFIndex (*preferredSize)(CFIndex size, CFOptionFlags hint, void *info);
+ };
+ SkUniqueCFRef<CFAllocatorRef> alloc(CFAllocatorCreate(kCFAllocatorDefault, &ctx));
+ return SkUniqueCFRef<CFDataRef>(CFDataCreateWithBytesNoCopy(
+ kCFAllocatorDefault, (const UInt8 *)addr, size, alloc.get()));
+}
+
+static SkUniqueCFRef<CTFontRef> ctfont_from_skdata(sk_sp<SkData> data, int ttcIndex) {
+ // TODO: Use CTFontManagerCreateFontDescriptorsFromData when available.
+ if (ttcIndex != 0) {
+ return nullptr;
+ }
+
+ SkUniqueCFRef<CFDataRef> cfData(cfdata_from_skdata(std::move(data)));
+
+ SkUniqueCFRef<CTFontDescriptorRef> desc(
+ CTFontManagerCreateFontDescriptorFromData(cfData.get()));
+ if (!desc) {
+ return nullptr;
+ }
+ return SkUniqueCFRef<CTFontRef>(CTFontCreateWithFontDescriptor(desc.get(), 0, nullptr));
+}
+
+sk_sp<SkTypeface> SkTypeface_Mac::MakeFromStream(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments& args)
+{
+ // TODO: Use CTFontManagerCreateFontDescriptorsFromData when available.
+ int ttcIndex = args.getCollectionIndex();
+ if (ttcIndex != 0) {
+ return nullptr;
+ }
+
+ sk_sp<SkData> data = skdata_from_skstreamasset(stream->duplicate());
+ if (!data) {
+ return nullptr;
+ }
+ SkUniqueCFRef<CTFontRef> ct = ctfont_from_skdata(std::move(data), ttcIndex);
+ if (!ct) {
+ return nullptr;
+ }
+
+ SkUniqueCFRef<CTFontRef> ctVariant;
+ CTFontVariation ctVariation;
+ if (args.getVariationDesignPosition().coordinateCount == 0) {
+ ctVariant.reset(ct.release());
+ } else {
+ SkUniqueCFRef<CFArrayRef> axes(CTFontCopyVariationAxes(ct.get()));
+ ctVariation = ctvariation_from_SkFontArguments(ct.get(), axes.get(), args);
+
+ if (ctVariation.variation) {
+ SkUniqueCFRef<CFMutableDictionaryRef> attributes(
+ CFDictionaryCreateMutable(kCFAllocatorDefault, 0,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
+ CFDictionaryAddValue(attributes.get(),
+ kCTFontVariationAttribute, ctVariation.variation.get());
+ SkUniqueCFRef<CTFontDescriptorRef> varDesc(
+ CTFontDescriptorCreateWithAttributes(attributes.get()));
+ ctVariant.reset(CTFontCreateCopyWithAttributes(ct.get(), 0, nullptr, varDesc.get()));
+ } else {
+ ctVariant.reset(ct.release());
+ }
+ }
+ if (!ctVariant) {
+ return nullptr;
+ }
+
+ return SkTypeface_Mac::Make(std::move(ctVariant), ctVariation.opsz, std::move(stream));
+}
+
+int SkTypeface_Mac::onGetVariationDesignParameters(SkFontParameters::Variation::Axis parameters[],
+ int parameterCount) const
+{
+ CFArrayRef ctAxes = this->getVariationAxes();
+ if (!ctAxes) {
+ return -1;
+ }
+ CFIndex axisCount = CFArrayGetCount(ctAxes);
+
+ if (!parameters || parameterCount < axisCount) {
+ return axisCount;
+ }
+
+ // Added in 10.13
+ static CFStringRef* kCTFontVariationAxisHiddenKeyPtr =
+ static_cast<CFStringRef*>(dlsym(RTLD_DEFAULT, "kCTFontVariationAxisHiddenKey"));
+
+ for (int i = 0; i < axisCount; ++i) {
+ CFDictionaryRef axisInfoDict;
+ if (!SkCFDynamicCast(CFArrayGetValueAtIndex(ctAxes, i), &axisInfoDict, "Axis")) {
+ return -1;
+ }
+
+ int64_t tagLong;
+ CFTypeRef tag = CFDictionaryGetValue(axisInfoDict, kCTFontVariationAxisIdentifierKey);
+ if (!SkCFNumberDynamicCast(tag, &tagLong, nullptr, "Axis tag")) {
+ return -1;
+ }
+
+ double minDouble;
+ double maxDouble;
+ double defDouble;
+ CFTypeRef min = CFDictionaryGetValue(axisInfoDict, kCTFontVariationAxisMinimumValueKey);
+ CFTypeRef max = CFDictionaryGetValue(axisInfoDict, kCTFontVariationAxisMaximumValueKey);
+ CFTypeRef def = CFDictionaryGetValue(axisInfoDict, kCTFontVariationAxisDefaultValueKey);
+ if (!SkCFNumberDynamicCast(min, &minDouble, nullptr, "Axis min") ||
+ !SkCFNumberDynamicCast(max, &maxDouble, nullptr, "Axis max") ||
+ !SkCFNumberDynamicCast(def, &defDouble, nullptr, "Axis def"))
+ {
+ return -1;
+ }
+
+ SkFontParameters::Variation::Axis& skAxis = parameters[i];
+ skAxis.tag = tagLong;
+ skAxis.min = minDouble;
+ skAxis.max = maxDouble;
+ skAxis.def = defDouble;
+ skAxis.setHidden(false);
+ if (kCTFontVariationAxisHiddenKeyPtr) {
+ CFTypeRef hidden = CFDictionaryGetValue(axisInfoDict,*kCTFontVariationAxisHiddenKeyPtr);
+ if (hidden) {
+ // At least macOS 11 Big Sur Beta 4 uses CFNumberRef instead of CFBooleanRef.
+ // https://crbug.com/1113444
+ CFBooleanRef hiddenBoolean;
+ int hiddenInt;
+ if (SkCFDynamicCast(hidden, &hiddenBoolean, nullptr)) {
+ skAxis.setHidden(CFBooleanGetValue(hiddenBoolean));
+ } else if (SkCFNumberDynamicCast(hidden, &hiddenInt, nullptr, "Axis hidden")) {
+ skAxis.setHidden(hiddenInt);
+ } else {
+ return -1;
+ }
+ }
+ }
+ }
+ return axisCount;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/ports/SkTypeface_mac_ct.h b/gfx/skia/skia/src/ports/SkTypeface_mac_ct.h
new file mode 100644
index 0000000000..1ae5d89365
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkTypeface_mac_ct.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTypeface_mac_ct_DEFINED
+#define SkTypeface_mac_ct_DEFINED
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#include "include/core/SkFontArguments.h"
+#include "include/core/SkFontParameters.h"
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypeface.h"
+#include "include/private/base/SkOnce.h"
+#include "src/utils/mac/SkUniqueCFRef.h"
+
+#ifdef SK_BUILD_FOR_MAC
+#import <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreText/CoreText.h>
+#include <CoreText/CTFontManager.h>
+#include <CoreGraphics/CoreGraphics.h>
+#include <CoreFoundation/CoreFoundation.h>
+#endif
+
+#include <memory>
+
+class SkData;
+class SkDescriptor;
+class SkFontData;
+class SkFontDescriptor;
+class SkScalerContext;
+class SkString;
+struct SkAdvancedTypefaceMetrics;
+struct SkScalerContextEffects;
+struct SkScalerContextRec;
+
+struct OpszVariation {
+ bool isSet = false;
+ double value = 0;
+};
+
+struct CTFontVariation {
+ SkUniqueCFRef<CFDictionaryRef> variation;
+ SkUniqueCFRef<CFDictionaryRef> wrongOpszVariation;
+ OpszVariation opsz;
+};
+
+SkUniqueCFRef<CTFontRef> SkCTFontCreateExactCopy(CTFontRef baseFont, CGFloat textSize,
+ OpszVariation opsz);
+
+SkFontStyle SkCTFontDescriptorGetSkFontStyle(CTFontDescriptorRef desc, bool fromDataProvider);
+
+CGFloat SkCTFontCTWeightForCSSWeight(int fontstyleWeight);
+CGFloat SkCTFontCTWidthForCSSWidth(int fontstyleWidth);
+
+void SkStringFromCFString(CFStringRef src, SkString* dst);
+
+class SkTypeface_Mac : public SkTypeface {
+private:
+ SkTypeface_Mac(SkUniqueCFRef<CTFontRef> fontRef, const SkFontStyle& fs, bool isFixedPitch,
+ OpszVariation opszVariation, std::unique_ptr<SkStreamAsset> providedData)
+ : SkTypeface(fs, isFixedPitch)
+ , fFontRef(std::move(fontRef))
+ , fOpszVariation(opszVariation)
+ , fHasColorGlyphs(
+ SkToBool(CTFontGetSymbolicTraits(fFontRef.get()) & kCTFontColorGlyphsTrait))
+ , fStream(std::move(providedData))
+ , fIsFromStream(fStream)
+ {
+ SkASSERT(fFontRef);
+ }
+
+public:
+ static sk_sp<SkTypeface> Make(SkUniqueCFRef<CTFontRef> font,
+ OpszVariation opszVariation,
+ std::unique_ptr<SkStreamAsset> providedData);
+
+ static constexpr SkTypeface::FactoryId FactoryId = SkSetFourByteTag('c','t','x','t');
+ static sk_sp<SkTypeface> MakeFromStream(std::unique_ptr<SkStreamAsset>, const SkFontArguments&);
+
+ SkUniqueCFRef<CTFontRef> fFontRef;
+ const OpszVariation fOpszVariation;
+ const bool fHasColorGlyphs;
+
+ bool hasColorGlyphs() const override { return fHasColorGlyphs; }
+
+ /**
+ * CTFontCopyVariationAxes provides the localized name of all axes, making it very slow.
+ * This is unfortunate, its result is needed just to see if there are any axes at all.
+ * To avoid calling internal APIs cache the result of CTFontCopyVariationAxes.
+ * https://github.com/WebKit/WebKit/commit/1842365d413ed87868e7d33d4fad1691fa3a8129
+ * https://bugs.webkit.org/show_bug.cgi?id=232690
+ */
+ CFArrayRef getVariationAxes() const;
+
+protected:
+ int onGetUPEM() const override;
+ std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const override;
+ std::unique_ptr<SkStreamAsset> onOpenExistingStream(int* ttcIndex) const override;
+ bool onGlyphMaskNeedsCurrentColor() const override;
+ int onGetVariationDesignPosition(SkFontArguments::VariationPosition::Coordinate coordinates[],
+ int coordinateCount) const override;
+ void onGetFamilyName(SkString* familyName) const override;
+ bool onGetPostScriptName(SkString*) const override;
+ SkTypeface::LocalizedStrings* onCreateFamilyNameIterator() const override;
+ int onGetTableTags(SkFontTableTag tags[]) const override;
+ size_t onGetTableData(SkFontTableTag, size_t offset, size_t length, void* data) const override;
+ sk_sp<SkData> onCopyTableData(SkFontTableTag) const override;
+ std::unique_ptr<SkScalerContext> onCreateScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor*) const override;
+ void onFilterRec(SkScalerContextRec*) const override;
+ void onGetFontDescriptor(SkFontDescriptor*, bool*) const override;
+ void getGlyphToUnicodeMap(SkUnichar*) const override;
+ std::unique_ptr<SkAdvancedTypefaceMetrics> onGetAdvancedMetrics() const override;
+ void onCharsToGlyphs(const SkUnichar* chars, int count, SkGlyphID glyphs[]) const override;
+ int onCountGlyphs() const override;
+ void getPostScriptGlyphNames(SkString*) const override {}
+ int onGetVariationDesignParameters(SkFontParameters::Variation::Axis parameters[],
+ int parameterCount) const override;
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments&) const override;
+
+ void* onGetCTFontRef() const override { return (void*)fFontRef.get(); }
+
+private:
+ mutable std::unique_ptr<SkStreamAsset> fStream;
+ mutable SkUniqueCFRef<CFArrayRef> fVariationAxes;
+ bool fIsFromStream;
+ mutable SkOnce fInitStream;
+ mutable SkOnce fInitVariationAxes;
+
+ using INHERITED = SkTypeface;
+};
+
+#endif
+#endif //SkTypeface_mac_ct_DEFINED
diff --git a/gfx/skia/skia/src/ports/SkTypeface_win_dw.cpp b/gfx/skia/skia/src/ports/SkTypeface_win_dw.cpp
new file mode 100644
index 0000000000..c551466b27
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkTypeface_win_dw.cpp
@@ -0,0 +1,1094 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/utils/win/SkDWriteNTDDI_VERSION.h"
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "src/base/SkLeanWindows.h"
+
+// SkLeanWindows will include Windows.h, which will pull in all of the GDI defines.
+// GDI #defines GetGlyphIndices to GetGlyphIndicesA or GetGlyphIndicesW, but
+// IDWriteFontFace has a method called GetGlyphIndices. Since this file does
+// not use GDI, undefing GetGlyphIndices makes things less confusing.
+#undef GetGlyphIndices
+
+#include "include/core/SkData.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkFontStream.h"
+#include "src/core/SkScalerContext.h"
+#include "src/ports/SkScalerContext_win_dw.h"
+#include "src/ports/SkTypeface_win_dw.h"
+#include "src/sfnt/SkOTTable_OS_2.h"
+#include "src/sfnt/SkOTTable_fvar.h"
+#include "src/sfnt/SkOTTable_head.h"
+#include "src/sfnt/SkOTTable_hhea.h"
+#include "src/sfnt/SkOTTable_post.h"
+#include "src/sfnt/SkOTUtils.h"
+#include "src/utils/win/SkDWrite.h"
+#include "src/utils/win/SkDWriteFontFileStream.h"
+
+using namespace skia_private;
+
+HRESULT DWriteFontTypeface::initializePalette() {
+ if (!fIsColorFont) {
+ return S_OK;
+ }
+
+ UINT32 dwPaletteCount = fDWriteFontFace2->GetColorPaletteCount();
+ if (dwPaletteCount == 0) {
+ return S_OK;
+ }
+
+ // Treat out of range palette index values as 0. Still apply overrides.
+ // https://www.w3.org/TR/css-fonts-4/#base-palette-desc
+ UINT32 basePaletteIndex = 0;
+ if (SkTFitsIn<UINT32>(fRequestedPalette.index) &&
+ SkTo<UINT32>(fRequestedPalette.index) < dwPaletteCount)
+ {
+ basePaletteIndex = fRequestedPalette.index;
+ }
+
+ UINT32 dwPaletteEntryCount = fDWriteFontFace2->GetPaletteEntryCount();
+ AutoSTMalloc<8, DWRITE_COLOR_F> dwPaletteEntry(dwPaletteEntryCount);
+ HRM(fDWriteFontFace2->GetPaletteEntries(basePaletteIndex,
+ 0, dwPaletteEntryCount,
+ dwPaletteEntry),
+ "Could not retrieve palette entries.");
+
+ fPalette.reset(new SkColor[dwPaletteEntryCount]);
+ for (UINT32 i = 0; i < dwPaletteEntryCount; ++i) {
+ fPalette[i] = SkColorSetARGB(sk_float_round2int(dwPaletteEntry[i].a * 255),
+ sk_float_round2int(dwPaletteEntry[i].r * 255),
+ sk_float_round2int(dwPaletteEntry[i].g * 255),
+ sk_float_round2int(dwPaletteEntry[i].b * 255));
+ }
+
+ for (int i = 0; i < fRequestedPalette.overrideCount; ++i) {
+ const SkFontArguments::Palette::Override& paletteOverride = fRequestedPalette.overrides[i];
+ if (SkTFitsIn<UINT32>(paletteOverride.index) &&
+ SkTo<UINT32>(paletteOverride.index) < dwPaletteEntryCount)
+ {
+ fPalette[paletteOverride.index] = paletteOverride.color;
+ }
+ }
+ fPaletteEntryCount = dwPaletteEntryCount;
+
+ return S_OK;
+}
+
+DWriteFontTypeface::DWriteFontTypeface(const SkFontStyle& style,
+ IDWriteFactory* factory,
+ IDWriteFontFace* fontFace,
+ IDWriteFont* font,
+ IDWriteFontFamily* fontFamily,
+ sk_sp<Loaders> loaders,
+ const SkFontArguments::Palette& palette)
+ : SkTypeface(style, false)
+ , fFactory(SkRefComPtr(factory))
+ , fDWriteFontFamily(SkSafeRefComPtr(fontFamily))
+ , fDWriteFont(SkSafeRefComPtr(font))
+ , fDWriteFontFace(SkRefComPtr(fontFace))
+ , fRequestedPaletteEntryOverrides(palette.overrideCount
+ ? (SkFontArguments::Palette::Override*)memcpy(
+ new SkFontArguments::Palette::Override[palette.overrideCount],
+ palette.overrides,
+ palette.overrideCount * sizeof(palette.overrides[0]))
+ : nullptr)
+ , fRequestedPalette{palette.index,
+ fRequestedPaletteEntryOverrides.get(), palette.overrideCount }
+ , fPaletteEntryCount(0)
+ , fLoaders(std::move(loaders))
+ , fRenderingMode(DWRITE_RENDERING_MODE_DEFAULT)
+ , fGamma(2.2f)
+ , fContrast(1.0f)
+ , fClearTypeLevel(1.0f)
+{
+ if (!SUCCEEDED(fDWriteFontFace->QueryInterface(&fDWriteFontFace1))) {
+ // IUnknown::QueryInterface states that if it fails, punk will be set to nullptr.
+ // http://blogs.msdn.com/b/oldnewthing/archive/2004/03/26/96777.aspx
+ SkASSERT_RELEASE(nullptr == fDWriteFontFace1.get());
+ }
+ if (!SUCCEEDED(fDWriteFontFace->QueryInterface(&fDWriteFontFace2))) {
+ SkASSERT_RELEASE(nullptr == fDWriteFontFace2.get());
+ }
+ if (!SUCCEEDED(fDWriteFontFace->QueryInterface(&fDWriteFontFace4))) {
+ SkASSERT_RELEASE(nullptr == fDWriteFontFace4.get());
+ }
+ if (!SUCCEEDED(fFactory->QueryInterface(&fFactory2))) {
+ SkASSERT_RELEASE(nullptr == fFactory2.get());
+ }
+
+ if (fDWriteFontFace1 && fDWriteFontFace1->IsMonospacedFont()) {
+ this->setIsFixedPitch(true);
+ }
+
+ fIsColorFont = fFactory2 && fDWriteFontFace2 && fDWriteFontFace2->IsColorFont();
+ this->initializePalette();
+}
+
+DWriteFontTypeface::~DWriteFontTypeface() = default;
+
+DWriteFontTypeface::Loaders::~Loaders() {
+ // Don't return if any fail, just keep going to free up as much as possible.
+ HRESULT hr;
+
+ hr = fFactory->UnregisterFontCollectionLoader(fDWriteFontCollectionLoader.get());
+ if (FAILED(hr)) {
+ SK_TRACEHR(hr, "FontCollectionLoader");
+ }
+
+ hr = fFactory->UnregisterFontFileLoader(fDWriteFontFileLoader.get());
+ if (FAILED(hr)) {
+ SK_TRACEHR(hr, "FontFileLoader");
+ }
+}
+
+void DWriteFontTypeface::onGetFamilyName(SkString* familyName) const {
+ SkTScopedComPtr<IDWriteLocalizedStrings> familyNames;
+ HRV(fDWriteFontFamily->GetFamilyNames(&familyNames));
+
+ sk_get_locale_string(familyNames.get(), nullptr/*fMgr->fLocaleName.get()*/, familyName);
+}
+
+bool DWriteFontTypeface::onGetPostScriptName(SkString* skPostScriptName) const {
+ SkString localSkPostScriptName;
+ SkTScopedComPtr<IDWriteLocalizedStrings> postScriptNames;
+ BOOL exists = FALSE;
+ if (FAILED(fDWriteFont->GetInformationalStrings(
+ DWRITE_INFORMATIONAL_STRING_POSTSCRIPT_NAME,
+ &postScriptNames,
+ &exists)) ||
+ !exists ||
+ FAILED(sk_get_locale_string(postScriptNames.get(), nullptr, &localSkPostScriptName)))
+ {
+ return false;
+ }
+ if (skPostScriptName) {
+ *skPostScriptName = localSkPostScriptName;
+ }
+ return true;
+}
+
+void DWriteFontTypeface::onGetFontDescriptor(SkFontDescriptor* desc,
+ bool* serialize) const {
+ // Get the family name.
+ SkTScopedComPtr<IDWriteLocalizedStrings> familyNames;
+ HRV(fDWriteFontFamily->GetFamilyNames(&familyNames));
+
+ SkString utf8FamilyName;
+ sk_get_locale_string(familyNames.get(), nullptr/*fMgr->fLocaleName.get()*/, &utf8FamilyName);
+
+ desc->setFamilyName(utf8FamilyName.c_str());
+ desc->setStyle(this->fontStyle());
+
+ desc->setPaletteIndex(fRequestedPalette.index);
+ sk_careful_memcpy(desc->setPaletteEntryOverrides(fRequestedPalette.overrideCount),
+ fRequestedPalette.overrides,
+ fRequestedPalette.overrideCount * sizeof(fRequestedPalette.overrides[0]));
+
+ desc->setFactoryId(FactoryId);
+ *serialize = SkToBool(fLoaders);
+}
+
+void DWriteFontTypeface::onCharsToGlyphs(const SkUnichar* uni, int count,
+ SkGlyphID glyphs[]) const {
+ fDWriteFontFace->GetGlyphIndices((const UINT32*)uni, count, glyphs);
+}
+
+int DWriteFontTypeface::onCountGlyphs() const {
+ return fDWriteFontFace->GetGlyphCount();
+}
+
+void DWriteFontTypeface::getPostScriptGlyphNames(SkString*) const {}
+
+int DWriteFontTypeface::onGetUPEM() const {
+ DWRITE_FONT_METRICS metrics;
+ fDWriteFontFace->GetMetrics(&metrics);
+ return metrics.designUnitsPerEm;
+}
+
+class LocalizedStrings_IDWriteLocalizedStrings : public SkTypeface::LocalizedStrings {
+public:
+ /** Takes ownership of the IDWriteLocalizedStrings. */
+ explicit LocalizedStrings_IDWriteLocalizedStrings(IDWriteLocalizedStrings* strings)
+ : fIndex(0), fStrings(strings)
+ { }
+
+ bool next(SkTypeface::LocalizedString* localizedString) override {
+ if (fIndex >= fStrings->GetCount()) {
+ return false;
+ }
+
+ // String
+ UINT32 stringLen;
+ HRBM(fStrings->GetStringLength(fIndex, &stringLen), "Could not get string length.");
+
+ SkSMallocWCHAR wString(static_cast<size_t>(stringLen)+1);
+ HRBM(fStrings->GetString(fIndex, wString.get(), stringLen+1), "Could not get string.");
+
+ HRB(sk_wchar_to_skstring(wString.get(), stringLen, &localizedString->fString));
+
+ // Locale
+ UINT32 localeLen;
+ HRBM(fStrings->GetLocaleNameLength(fIndex, &localeLen), "Could not get locale length.");
+
+ SkSMallocWCHAR wLocale(static_cast<size_t>(localeLen)+1);
+ HRBM(fStrings->GetLocaleName(fIndex, wLocale.get(), localeLen+1), "Could not get locale.");
+
+ HRB(sk_wchar_to_skstring(wLocale.get(), localeLen, &localizedString->fLanguage));
+
+ ++fIndex;
+ return true;
+ }
+
+private:
+ UINT32 fIndex;
+ SkTScopedComPtr<IDWriteLocalizedStrings> fStrings;
+};
+
+SkTypeface::LocalizedStrings* DWriteFontTypeface::onCreateFamilyNameIterator() const {
+ sk_sp<SkTypeface::LocalizedStrings> nameIter =
+ SkOTUtils::LocalizedStrings_NameTable::MakeForFamilyNames(*this);
+ if (!nameIter) {
+ SkTScopedComPtr<IDWriteLocalizedStrings> familyNames;
+ HRNM(fDWriteFontFamily->GetFamilyNames(&familyNames), "Could not obtain family names.");
+ nameIter = sk_make_sp<LocalizedStrings_IDWriteLocalizedStrings>(familyNames.release());
+ }
+ return nameIter.release();
+}
+
+bool DWriteFontTypeface::onGlyphMaskNeedsCurrentColor() const {
+ return fDWriteFontFace2 && fDWriteFontFace2->GetColorPaletteCount() > 0;
+}
+
+int DWriteFontTypeface::onGetVariationDesignPosition(
+ SkFontArguments::VariationPosition::Coordinate coordinates[], int coordinateCount) const
+{
+
+#if defined(NTDDI_WIN10_RS3) && NTDDI_VERSION >= NTDDI_WIN10_RS3
+
+ SkTScopedComPtr<IDWriteFontFace5> fontFace5;
+ if (FAILED(fDWriteFontFace->QueryInterface(&fontFace5))) {
+ return -1;
+ }
+
+ // Return 0 if the font is not variable font.
+ if (!fontFace5->HasVariations()) {
+ return 0;
+ }
+
+ UINT32 fontAxisCount = fontFace5->GetFontAxisValueCount();
+ SkTScopedComPtr<IDWriteFontResource> fontResource;
+ HR_GENERAL(fontFace5->GetFontResource(&fontResource), nullptr, -1);
+ UINT32 variableAxisCount = 0;
+ for (UINT32 i = 0; i < fontAxisCount; ++i) {
+ if (fontResource->GetFontAxisAttributes(i) & DWRITE_FONT_AXIS_ATTRIBUTES_VARIABLE) {
+ ++variableAxisCount;
+ }
+ }
+
+ if (!coordinates || coordinateCount < 0 || (unsigned)coordinateCount < variableAxisCount) {
+ return SkTo<int>(variableAxisCount);
+ }
+
+ AutoSTMalloc<8, DWRITE_FONT_AXIS_VALUE> fontAxisValue(fontAxisCount);
+ HR_GENERAL(fontFace5->GetFontAxisValues(fontAxisValue.get(), fontAxisCount), nullptr, -1);
+ UINT32 coordIndex = 0;
+ for (UINT32 axisIndex = 0; axisIndex < fontAxisCount; ++axisIndex) {
+ if (fontResource->GetFontAxisAttributes(axisIndex) & DWRITE_FONT_AXIS_ATTRIBUTES_VARIABLE) {
+ coordinates[coordIndex].axis = SkEndian_SwapBE32(fontAxisValue[axisIndex].axisTag);
+ coordinates[coordIndex].value = fontAxisValue[axisIndex].value;
+ ++coordIndex;
+ }
+ }
+
+ SkASSERT(coordIndex == variableAxisCount);
+ return SkTo<int>(variableAxisCount);
+
+#else
+ return -1;
+#endif
+}
+
+int DWriteFontTypeface::onGetVariationDesignParameters(
+ SkFontParameters::Variation::Axis parameters[], int parameterCount) const
+{
+
+#if defined(NTDDI_WIN10_RS3) && NTDDI_VERSION >= NTDDI_WIN10_RS3
+
+ SkTScopedComPtr<IDWriteFontFace5> fontFace5;
+ if (FAILED(fDWriteFontFace->QueryInterface(&fontFace5))) {
+ return -1;
+ }
+
+ // Return 0 if the font is not variable font.
+ if (!fontFace5->HasVariations()) {
+ return 0;
+ }
+
+ UINT32 fontAxisCount = fontFace5->GetFontAxisValueCount();
+ SkTScopedComPtr<IDWriteFontResource> fontResource;
+ HR_GENERAL(fontFace5->GetFontResource(&fontResource), nullptr, -1);
+ int variableAxisCount = 0;
+ for (UINT32 i = 0; i < fontAxisCount; ++i) {
+ if (fontResource->GetFontAxisAttributes(i) & DWRITE_FONT_AXIS_ATTRIBUTES_VARIABLE) {
+ variableAxisCount++;
+ }
+ }
+
+ if (!parameters || parameterCount < variableAxisCount) {
+ return variableAxisCount;
+ }
+
+ AutoSTMalloc<8, DWRITE_FONT_AXIS_RANGE> fontAxisRange(fontAxisCount);
+ HR_GENERAL(fontResource->GetFontAxisRanges(fontAxisRange.get(), fontAxisCount), nullptr, -1);
+ AutoSTMalloc<8, DWRITE_FONT_AXIS_VALUE> fontAxisDefaultValue(fontAxisCount);
+ HR_GENERAL(fontResource->GetDefaultFontAxisValues(fontAxisDefaultValue.get(), fontAxisCount),
+ nullptr, -1);
+ UINT32 coordIndex = 0;
+
+ for (UINT32 axisIndex = 0; axisIndex < fontAxisCount; ++axisIndex) {
+ if (fontResource->GetFontAxisAttributes(axisIndex) & DWRITE_FONT_AXIS_ATTRIBUTES_VARIABLE) {
+ parameters[coordIndex].tag = SkEndian_SwapBE32(fontAxisDefaultValue[axisIndex].axisTag);
+ parameters[coordIndex].min = fontAxisRange[axisIndex].minValue;
+ parameters[coordIndex].def = fontAxisDefaultValue[axisIndex].value;
+ parameters[coordIndex].max = fontAxisRange[axisIndex].maxValue;
+ parameters[coordIndex].setHidden(fontResource->GetFontAxisAttributes(axisIndex) &
+ DWRITE_FONT_AXIS_ATTRIBUTES_HIDDEN);
+ ++coordIndex;
+ }
+ }
+
+ return variableAxisCount;
+
+#else
+ return -1;
+#endif
+}
+
+int DWriteFontTypeface::onGetTableTags(SkFontTableTag tags[]) const {
+ DWRITE_FONT_FACE_TYPE type = fDWriteFontFace->GetType();
+ if (type != DWRITE_FONT_FACE_TYPE_CFF &&
+ type != DWRITE_FONT_FACE_TYPE_TRUETYPE &&
+ type != DWRITE_FONT_FACE_TYPE_TRUETYPE_COLLECTION)
+ {
+ return 0;
+ }
+
+ int ttcIndex;
+ std::unique_ptr<SkStreamAsset> stream = this->openStream(&ttcIndex);
+ return stream.get() ? SkFontStream::GetTableTags(stream.get(), ttcIndex, tags) : 0;
+}
+
+size_t DWriteFontTypeface::onGetTableData(SkFontTableTag tag, size_t offset,
+ size_t length, void* data) const
+{
+ AutoDWriteTable table(fDWriteFontFace.get(), SkEndian_SwapBE32(tag));
+ if (!table.fExists) {
+ return 0;
+ }
+
+ if (offset > table.fSize) {
+ return 0;
+ }
+ size_t size = std::min(length, table.fSize - offset);
+ if (data) {
+ memcpy(data, table.fData + offset, size);
+ }
+
+ return size;
+}
+
+sk_sp<SkData> DWriteFontTypeface::onCopyTableData(SkFontTableTag tag) const {
+ const uint8_t* data;
+ UINT32 size;
+ void* lock;
+ BOOL exists;
+ fDWriteFontFace->TryGetFontTable(SkEndian_SwapBE32(tag),
+ reinterpret_cast<const void **>(&data), &size, &lock, &exists);
+ if (!exists) {
+ return nullptr;
+ }
+ struct Context {
+ Context(void* lock, IDWriteFontFace* face) : fLock(lock), fFontFace(SkRefComPtr(face)) {}
+ ~Context() { fFontFace->ReleaseFontTable(fLock); }
+ void* fLock;
+ SkTScopedComPtr<IDWriteFontFace> fFontFace;
+ };
+ return SkData::MakeWithProc(data, size,
+ [](const void*, void* ctx) { delete (Context*)ctx; },
+ new Context(lock, fDWriteFontFace.get()));
+}
+
+sk_sp<SkTypeface> DWriteFontTypeface::onMakeClone(const SkFontArguments& args) const {
+ // Skip if the current face index does not match the ttcIndex
+ if (fDWriteFontFace->GetIndex() != SkTo<UINT32>(args.getCollectionIndex())) {
+ return sk_ref_sp(this);
+ }
+
+#if defined(NTDDI_WIN10_RS3) && NTDDI_VERSION >= NTDDI_WIN10_RS3
+
+ SkTScopedComPtr<IDWriteFontFace5> fontFace5;
+
+ if (SUCCEEDED(fDWriteFontFace->QueryInterface(&fontFace5)) && fontFace5->HasVariations()) {
+ UINT32 fontAxisCount = fontFace5->GetFontAxisValueCount();
+ UINT32 argsCoordCount = args.getVariationDesignPosition().coordinateCount;
+ AutoSTMalloc<8, DWRITE_FONT_AXIS_VALUE> fontAxisValue(fontAxisCount);
+ HRN(fontFace5->GetFontAxisValues(fontAxisValue.get(), fontAxisCount));
+
+ for (UINT32 fontIndex = 0; fontIndex < fontAxisCount; ++fontIndex) {
+ for (UINT32 argsIndex = 0; argsIndex < argsCoordCount; ++argsIndex) {
+ if (SkEndian_SwapBE32(fontAxisValue[fontIndex].axisTag) ==
+ args.getVariationDesignPosition().coordinates[argsIndex].axis) {
+ fontAxisValue[fontIndex].value =
+ args.getVariationDesignPosition().coordinates[argsIndex].value;
+ }
+ }
+ }
+ SkTScopedComPtr<IDWriteFontResource> fontResource;
+ HRN(fontFace5->GetFontResource(&fontResource));
+ SkTScopedComPtr<IDWriteFontFace5> newFontFace5;
+ HRN(fontResource->CreateFontFace(fDWriteFont->GetSimulations(),
+ fontAxisValue.get(),
+ fontAxisCount,
+ &newFontFace5));
+
+ SkTScopedComPtr<IDWriteFontFace> newFontFace;
+ HRN(newFontFace5->QueryInterface(&newFontFace));
+ return DWriteFontTypeface::Make(fFactory.get(),
+ newFontFace.get(),
+ fDWriteFont.get(),
+ fDWriteFontFamily.get(),
+ fLoaders,
+ args.getPalette());
+ }
+
+#endif
+
+ // If the palette args have changed, a new font will need to be created.
+ if (args.getPalette().index != fRequestedPalette.index ||
+ args.getPalette().overrideCount != fRequestedPalette.overrideCount ||
+ memcmp(args.getPalette().overrides, fRequestedPalette.overrides,
+ fRequestedPalette.overrideCount * sizeof(fRequestedPalette.overrides[0])))
+ {
+ return DWriteFontTypeface::Make(fFactory.get(),
+ fDWriteFontFace.get(),
+ fDWriteFont.get(),
+ fDWriteFontFamily.get(),
+ fLoaders,
+ args.getPalette());
+ }
+
+ return sk_ref_sp(this);
+}
+
+std::unique_ptr<SkStreamAsset> DWriteFontTypeface::onOpenStream(int* ttcIndex) const {
+ *ttcIndex = fDWriteFontFace->GetIndex();
+
+ UINT32 numFiles = 0;
+ HRNM(fDWriteFontFace->GetFiles(&numFiles, nullptr),
+ "Could not get number of font files.");
+ if (numFiles != 1) {
+ return nullptr;
+ }
+
+ SkTScopedComPtr<IDWriteFontFile> fontFile;
+ HRNM(fDWriteFontFace->GetFiles(&numFiles, &fontFile), "Could not get font files.");
+
+ const void* fontFileKey;
+ UINT32 fontFileKeySize;
+ HRNM(fontFile->GetReferenceKey(&fontFileKey, &fontFileKeySize),
+ "Could not get font file reference key.");
+
+ SkTScopedComPtr<IDWriteFontFileLoader> fontFileLoader;
+ HRNM(fontFile->GetLoader(&fontFileLoader), "Could not get font file loader.");
+
+ SkTScopedComPtr<IDWriteFontFileStream> fontFileStream;
+ HRNM(fontFileLoader->CreateStreamFromKey(fontFileKey, fontFileKeySize,
+ &fontFileStream),
+ "Could not create font file stream.");
+
+ return std::unique_ptr<SkStreamAsset>(new SkDWriteFontFileStream(fontFileStream.get()));
+}
+
+std::unique_ptr<SkScalerContext> DWriteFontTypeface::onCreateScalerContext(
+ const SkScalerContextEffects& effects, const SkDescriptor* desc) const
+{
+ return std::make_unique<SkScalerContext_DW>(
+ sk_ref_sp(const_cast<DWriteFontTypeface*>(this)), effects, desc);
+}
+
+void DWriteFontTypeface::onFilterRec(SkScalerContextRec* rec) const {
+ if (rec->fFlags & SkScalerContext::kLCD_Vertical_Flag) {
+ rec->fMaskFormat = SkMask::kA8_Format;
+ rec->fFlags |= SkScalerContext::kGenA8FromLCD_Flag;
+ }
+
+ unsigned flagsWeDontSupport = SkScalerContext::kForceAutohinting_Flag |
+ SkScalerContext::kEmbolden_Flag |
+ SkScalerContext::kLCD_Vertical_Flag;
+ rec->fFlags &= ~flagsWeDontSupport;
+
+ SkFontHinting h = rec->getHinting();
+ // DirectWrite2 allows for hinting to be turned off. Force everything else to normal.
+ if (h != SkFontHinting::kNone || !fFactory2 || !fDWriteFontFace2) {
+ h = SkFontHinting::kNormal;
+ }
+ rec->setHinting(h);
+
+#if defined(SK_FONT_HOST_USE_SYSTEM_SETTINGS)
+ IDWriteFactory* factory = sk_get_dwrite_factory();
+ if (factory != nullptr) {
+ SkTScopedComPtr<IDWriteRenderingParams> defaultRenderingParams;
+ if (SUCCEEDED(factory->CreateRenderingParams(&defaultRenderingParams))) {
+ float gamma = defaultRenderingParams->GetGamma();
+ rec->setDeviceGamma(gamma);
+ rec->setPaintGamma(gamma);
+
+ rec->setContrast(defaultRenderingParams->GetEnhancedContrast());
+ }
+ }
+#elif defined(MOZ_SKIA)
+ rec->setContrast(fContrast);
+
+ rec->setDeviceGamma(fGamma);
+ rec->setPaintGamma(fGamma);
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+//PDF Support
+
+static void glyph_to_unicode_map(IDWriteFontFace* fontFace, DWRITE_UNICODE_RANGE range,
+ UINT32* remainingGlyphCount, UINT32 numGlyphs,
+ SkUnichar* glyphToUnicode)
+{
+ constexpr const int batchSize = 128;
+ UINT32 codepoints[batchSize];
+ UINT16 glyphs[batchSize];
+ for (UINT32 c = range.first; c <= range.last && *remainingGlyphCount != 0; c += batchSize) {
+ UINT32 numBatchedCodePoints = std::min<UINT32>(range.last - c + 1, batchSize);
+ for (UINT32 i = 0; i < numBatchedCodePoints; ++i) {
+ codepoints[i] = c + i;
+ }
+ HRVM(fontFace->GetGlyphIndices(codepoints, numBatchedCodePoints, glyphs),
+ "Failed to get glyph indexes.");
+ for (UINT32 i = 0; i < numBatchedCodePoints; ++i) {
+ UINT16 glyph = glyphs[i];
+ // Intermittent DW bug on Windows 10. See crbug.com/470146.
+ if (glyph >= numGlyphs) {
+ return;
+ }
+ if (0 < glyph && glyphToUnicode[glyph] == 0) {
+ glyphToUnicode[glyph] = c + i; // Always use lowest-index unichar.
+ --*remainingGlyphCount;
+ }
+ }
+ }
+}
+
+void DWriteFontTypeface::getGlyphToUnicodeMap(SkUnichar* glyphToUnicode) const {
+ IDWriteFontFace* face = fDWriteFontFace.get();
+ UINT32 numGlyphs = face->GetGlyphCount();
+ sk_bzero(glyphToUnicode, sizeof(SkUnichar) * numGlyphs);
+ UINT32 remainingGlyphCount = numGlyphs;
+
+ if (fDWriteFontFace1) {
+ IDWriteFontFace1* face1 = fDWriteFontFace1.get();
+ UINT32 numRanges = 0;
+ HRESULT hr = face1->GetUnicodeRanges(0, nullptr, &numRanges);
+ if (hr != E_NOT_SUFFICIENT_BUFFER && FAILED(hr)) {
+ HRVM(hr, "Failed to get number of ranges.");
+ }
+ std::unique_ptr<DWRITE_UNICODE_RANGE[]> ranges(new DWRITE_UNICODE_RANGE[numRanges]);
+ HRVM(face1->GetUnicodeRanges(numRanges, ranges.get(), &numRanges), "Failed to get ranges.");
+ for (UINT32 i = 0; i < numRanges; ++i) {
+ glyph_to_unicode_map(face1, ranges[i], &remainingGlyphCount, numGlyphs, glyphToUnicode);
+ }
+ } else {
+ glyph_to_unicode_map(face, {0, 0x10FFFF}, &remainingGlyphCount, numGlyphs, glyphToUnicode);
+ }
+}
+
+std::unique_ptr<SkAdvancedTypefaceMetrics> DWriteFontTypeface::onGetAdvancedMetrics() const {
+
+ std::unique_ptr<SkAdvancedTypefaceMetrics> info(nullptr);
+
+ DWRITE_FONT_METRICS dwfm;
+ fDWriteFontFace->GetMetrics(&dwfm);
+
+ info.reset(new SkAdvancedTypefaceMetrics);
+
+ info->fAscent = SkToS16(dwfm.ascent);
+ info->fDescent = SkToS16(dwfm.descent);
+ info->fCapHeight = SkToS16(dwfm.capHeight);
+
+ {
+ SkTScopedComPtr<IDWriteLocalizedStrings> postScriptNames;
+ BOOL exists = FALSE;
+ if (FAILED(fDWriteFont->GetInformationalStrings(
+ DWRITE_INFORMATIONAL_STRING_POSTSCRIPT_NAME,
+ &postScriptNames,
+ &exists)) ||
+ !exists ||
+ FAILED(sk_get_locale_string(postScriptNames.get(), nullptr, &info->fPostScriptName)))
+ {
+ SkDEBUGF("Unable to get postscript name for typeface %p\n", this);
+ }
+ }
+
+ // SkAdvancedTypefaceMetrics::fFontName must actually be a family name.
+ SkTScopedComPtr<IDWriteLocalizedStrings> familyNames;
+ if (FAILED(fDWriteFontFamily->GetFamilyNames(&familyNames)) ||
+ FAILED(sk_get_locale_string(familyNames.get(), nullptr, &info->fFontName)))
+ {
+ SkDEBUGF("Unable to get family name for typeface 0x%p\n", this);
+ }
+ if (info->fPostScriptName.isEmpty()) {
+ info->fPostScriptName = info->fFontName;
+ }
+
+ DWRITE_FONT_FACE_TYPE fontType = fDWriteFontFace->GetType();
+ if (fontType != DWRITE_FONT_FACE_TYPE_TRUETYPE &&
+ fontType != DWRITE_FONT_FACE_TYPE_TRUETYPE_COLLECTION)
+ {
+ return info;
+ }
+
+ // Simulated fonts aren't really TrueType fonts.
+ if (fDWriteFontFace->GetSimulations() == DWRITE_FONT_SIMULATIONS_NONE) {
+ info->fType = SkAdvancedTypefaceMetrics::kTrueType_Font;
+ }
+
+ AutoTDWriteTable<SkOTTableHead> headTable(fDWriteFontFace.get());
+ AutoTDWriteTable<SkOTTablePostScript> postTable(fDWriteFontFace.get());
+ AutoTDWriteTable<SkOTTableHorizontalHeader> hheaTable(fDWriteFontFace.get());
+ AutoTDWriteTable<SkOTTableOS2> os2Table(fDWriteFontFace.get());
+ if (!headTable.fExists || !postTable.fExists || !hheaTable.fExists || !os2Table.fExists) {
+ return info;
+ }
+
+ SkOTUtils::SetAdvancedTypefaceFlags(os2Table->version.v4.fsType, info.get());
+
+ // There are versions of DirectWrite which support named instances for system variation fonts,
+ // but no means to indicate that such a typeface is a variation.
+ AutoTDWriteTable<SkOTTableFontVariations> fvarTable(fDWriteFontFace.get());
+ if (fvarTable.fExists) {
+ info->fFlags |= SkAdvancedTypefaceMetrics::kVariable_FontFlag;
+ }
+
+ //There exist CJK fonts which set the IsFixedPitch and Monospace bits,
+ //but have full width, latin half-width, and half-width kana.
+ bool fixedWidth = (postTable->isFixedPitch &&
+ (1 == SkEndian_SwapBE16(hheaTable->numberOfHMetrics)));
+ //Monospace
+ if (fixedWidth) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kFixedPitch_Style;
+ }
+ //Italic
+ if (os2Table->version.v0.fsSelection.field.Italic) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kItalic_Style;
+ }
+ //Serif
+ using SerifStyle = SkPanose::Data::TextAndDisplay::SerifStyle;
+ SerifStyle serifStyle = os2Table->version.v0.panose.data.textAndDisplay.bSerifStyle;
+ if (SkPanose::FamilyType::TextAndDisplay == os2Table->version.v0.panose.bFamilyType) {
+ if (SerifStyle::Cove == serifStyle ||
+ SerifStyle::ObtuseCove == serifStyle ||
+ SerifStyle::SquareCove == serifStyle ||
+ SerifStyle::ObtuseSquareCove == serifStyle ||
+ SerifStyle::Square == serifStyle ||
+ SerifStyle::Thin == serifStyle ||
+ SerifStyle::Bone == serifStyle ||
+ SerifStyle::Exaggerated == serifStyle ||
+ SerifStyle::Triangle == serifStyle)
+ {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kSerif_Style;
+ }
+ //Script
+ } else if (SkPanose::FamilyType::Script == os2Table->version.v0.panose.bFamilyType) {
+ info->fStyle |= SkAdvancedTypefaceMetrics::kScript_Style;
+ }
+
+ info->fItalicAngle = SkEndian_SwapBE32(postTable->italicAngle) >> 16;
+
+ info->fBBox = SkIRect::MakeLTRB((int32_t)SkEndian_SwapBE16((uint16_t)headTable->xMin),
+ (int32_t)SkEndian_SwapBE16((uint16_t)headTable->yMax),
+ (int32_t)SkEndian_SwapBE16((uint16_t)headTable->xMax),
+ (int32_t)SkEndian_SwapBE16((uint16_t)headTable->yMin));
+ return info;
+}
+
+class StreamFontFileLoader : public IDWriteFontFileLoader {
+public:
+ // IUnknown methods
+ SK_STDMETHODIMP QueryInterface(REFIID iid, void** ppvObject) override;
+ SK_STDMETHODIMP_(ULONG) AddRef() override;
+ SK_STDMETHODIMP_(ULONG) Release() override;
+
+ // IDWriteFontFileLoader methods
+ SK_STDMETHODIMP CreateStreamFromKey(
+ void const* fontFileReferenceKey,
+ UINT32 fontFileReferenceKeySize,
+ IDWriteFontFileStream** fontFileStream) override;
+
+ // Takes ownership of stream.
+ static HRESULT Create(std::unique_ptr<SkStreamAsset> stream,
+ StreamFontFileLoader** streamFontFileLoader) {
+ *streamFontFileLoader = new StreamFontFileLoader(std::move(stream));
+ if (nullptr == *streamFontFileLoader) {
+ return E_OUTOFMEMORY;
+ }
+ return S_OK;
+ }
+
+private:
+ StreamFontFileLoader(std::unique_ptr<SkStreamAsset> stream)
+ : fStream(std::move(stream)), fRefCount(1)
+ {}
+ virtual ~StreamFontFileLoader() { }
+
+ std::unique_ptr<SkStreamAsset> fStream;
+ ULONG fRefCount;
+};
+
+SK_STDMETHODIMP StreamFontFileLoader::QueryInterface(REFIID iid, void** ppvObject) {
+ if (iid == IID_IUnknown || iid == __uuidof(IDWriteFontFileLoader)) {
+ *ppvObject = this;
+ AddRef();
+ return S_OK;
+ } else {
+ *ppvObject = nullptr;
+ return E_NOINTERFACE;
+ }
+}
+
+SK_STDMETHODIMP_(ULONG) StreamFontFileLoader::AddRef() {
+ return InterlockedIncrement(&fRefCount);
+}
+
+SK_STDMETHODIMP_(ULONG) StreamFontFileLoader::Release() {
+ ULONG newCount = InterlockedDecrement(&fRefCount);
+ if (0 == newCount) {
+ delete this;
+ }
+ return newCount;
+}
+
+SK_STDMETHODIMP StreamFontFileLoader::CreateStreamFromKey(
+ void const* fontFileReferenceKey,
+ UINT32 fontFileReferenceKeySize,
+ IDWriteFontFileStream** fontFileStream)
+{
+ SkTScopedComPtr<SkDWriteFontFileStreamWrapper> stream;
+ HR(SkDWriteFontFileStreamWrapper::Create(fStream->duplicate().release(), &stream));
+ *fontFileStream = stream.release();
+ return S_OK;
+}
+
+class StreamFontFileEnumerator : public IDWriteFontFileEnumerator {
+public:
+ // IUnknown methods
+ SK_STDMETHODIMP QueryInterface(REFIID iid, void** ppvObject) override;
+ SK_STDMETHODIMP_(ULONG) AddRef() override;
+ SK_STDMETHODIMP_(ULONG) Release() override;
+
+ // IDWriteFontFileEnumerator methods
+ SK_STDMETHODIMP MoveNext(BOOL* hasCurrentFile) override;
+ SK_STDMETHODIMP GetCurrentFontFile(IDWriteFontFile** fontFile) override;
+
+ static HRESULT Create(IDWriteFactory* factory, IDWriteFontFileLoader* fontFileLoader,
+ StreamFontFileEnumerator** streamFontFileEnumerator) {
+ *streamFontFileEnumerator = new StreamFontFileEnumerator(factory, fontFileLoader);
+ if (nullptr == *streamFontFileEnumerator) {
+ return E_OUTOFMEMORY;
+ }
+ return S_OK;
+ }
+private:
+ StreamFontFileEnumerator(IDWriteFactory* factory, IDWriteFontFileLoader* fontFileLoader);
+ virtual ~StreamFontFileEnumerator() { }
+
+ ULONG fRefCount;
+
+ SkTScopedComPtr<IDWriteFactory> fFactory;
+ SkTScopedComPtr<IDWriteFontFile> fCurrentFile;
+ SkTScopedComPtr<IDWriteFontFileLoader> fFontFileLoader;
+ bool fHasNext;
+};
+
+StreamFontFileEnumerator::StreamFontFileEnumerator(IDWriteFactory* factory,
+ IDWriteFontFileLoader* fontFileLoader)
+ : fRefCount(1)
+ , fFactory(SkRefComPtr(factory))
+ , fCurrentFile()
+ , fFontFileLoader(SkRefComPtr(fontFileLoader))
+ , fHasNext(true)
+{ }
+
+SK_STDMETHODIMP StreamFontFileEnumerator::QueryInterface(REFIID iid, void** ppvObject) {
+ if (iid == IID_IUnknown || iid == __uuidof(IDWriteFontFileEnumerator)) {
+ *ppvObject = this;
+ AddRef();
+ return S_OK;
+ } else {
+ *ppvObject = nullptr;
+ return E_NOINTERFACE;
+ }
+}
+
+SK_STDMETHODIMP_(ULONG) StreamFontFileEnumerator::AddRef() {
+ return InterlockedIncrement(&fRefCount);
+}
+
+SK_STDMETHODIMP_(ULONG) StreamFontFileEnumerator::Release() {
+ ULONG newCount = InterlockedDecrement(&fRefCount);
+ if (0 == newCount) {
+ delete this;
+ }
+ return newCount;
+}
+
+SK_STDMETHODIMP StreamFontFileEnumerator::MoveNext(BOOL* hasCurrentFile) {
+ *hasCurrentFile = FALSE;
+
+ if (!fHasNext) {
+ return S_OK;
+ }
+ fHasNext = false;
+
+ UINT32 fontFileReferenceKey = 0;
+ HR(fFactory->CreateCustomFontFileReference(
+ &fontFileReferenceKey, //cannot be nullptr
+ sizeof(fontFileReferenceKey), //even if this is 0
+ fFontFileLoader.get(),
+ &fCurrentFile));
+
+ *hasCurrentFile = TRUE;
+ return S_OK;
+}
+
+SK_STDMETHODIMP StreamFontFileEnumerator::GetCurrentFontFile(IDWriteFontFile** fontFile) {
+ if (fCurrentFile.get() == nullptr) {
+ *fontFile = nullptr;
+ return E_FAIL;
+ }
+
+ *fontFile = SkRefComPtr(fCurrentFile.get());
+ return S_OK;
+}
+
+class StreamFontCollectionLoader : public IDWriteFontCollectionLoader {
+public:
+ // IUnknown methods
+ SK_STDMETHODIMP QueryInterface(REFIID iid, void** ppvObject) override;
+ SK_STDMETHODIMP_(ULONG) AddRef() override;
+ SK_STDMETHODIMP_(ULONG) Release() override;
+
+ // IDWriteFontCollectionLoader methods
+ SK_STDMETHODIMP CreateEnumeratorFromKey(
+ IDWriteFactory* factory,
+ void const* collectionKey,
+ UINT32 collectionKeySize,
+ IDWriteFontFileEnumerator** fontFileEnumerator) override;
+
+ static HRESULT Create(IDWriteFontFileLoader* fontFileLoader,
+ StreamFontCollectionLoader** streamFontCollectionLoader) {
+ *streamFontCollectionLoader = new StreamFontCollectionLoader(fontFileLoader);
+ if (nullptr == *streamFontCollectionLoader) {
+ return E_OUTOFMEMORY;
+ }
+ return S_OK;
+ }
+private:
+ StreamFontCollectionLoader(IDWriteFontFileLoader* fontFileLoader)
+ : fRefCount(1)
+ , fFontFileLoader(SkRefComPtr(fontFileLoader))
+ { }
+ virtual ~StreamFontCollectionLoader() { }
+
+ ULONG fRefCount;
+ SkTScopedComPtr<IDWriteFontFileLoader> fFontFileLoader;
+};
+
+SK_STDMETHODIMP StreamFontCollectionLoader::QueryInterface(REFIID iid, void** ppvObject) {
+ if (iid == IID_IUnknown || iid == __uuidof(IDWriteFontCollectionLoader)) {
+ *ppvObject = this;
+ AddRef();
+ return S_OK;
+ } else {
+ *ppvObject = nullptr;
+ return E_NOINTERFACE;
+ }
+}
+
+SK_STDMETHODIMP_(ULONG) StreamFontCollectionLoader::AddRef() {
+ return InterlockedIncrement(&fRefCount);
+}
+
+SK_STDMETHODIMP_(ULONG) StreamFontCollectionLoader::Release() {
+ ULONG newCount = InterlockedDecrement(&fRefCount);
+ if (0 == newCount) {
+ delete this;
+ }
+ return newCount;
+}
+
+template <typename T> class SkAutoIDWriteUnregister {
+public:
+ SkAutoIDWriteUnregister(IDWriteFactory* factory, T* unregister)
+ : fFactory(factory), fUnregister(unregister)
+ { }
+ SkAutoIDWriteUnregister(const SkAutoIDWriteUnregister&) = delete;
+ SkAutoIDWriteUnregister& operator=(const SkAutoIDWriteUnregister&) = delete;
+ SkAutoIDWriteUnregister(SkAutoIDWriteUnregister&&) = delete;
+ SkAutoIDWriteUnregister& operator=(SkAutoIDWriteUnregister&&) = delete;
+
+ ~SkAutoIDWriteUnregister() {
+ if (fUnregister) {
+ unregister(fFactory, fUnregister);
+ }
+ }
+
+ T* detatch() {
+ T* old = fUnregister;
+ fUnregister = nullptr;
+ return old;
+ }
+
+private:
+ HRESULT unregister(IDWriteFactory* factory, IDWriteFontFileLoader* unregister) {
+ return factory->UnregisterFontFileLoader(unregister);
+ }
+
+ HRESULT unregister(IDWriteFactory* factory, IDWriteFontCollectionLoader* unregister) {
+ return factory->UnregisterFontCollectionLoader(unregister);
+ }
+
+ IDWriteFactory* fFactory;
+ T* fUnregister;
+};
+
+SK_STDMETHODIMP StreamFontCollectionLoader::CreateEnumeratorFromKey(
+ IDWriteFactory* factory,
+ void const* collectionKey,
+ UINT32 collectionKeySize,
+ IDWriteFontFileEnumerator** fontFileEnumerator)
+{
+ SkTScopedComPtr<StreamFontFileEnumerator> enumerator;
+ HR(StreamFontFileEnumerator::Create(factory, fFontFileLoader.get(), &enumerator));
+ *fontFileEnumerator = enumerator.release();
+ return S_OK;
+}
+
+static HRESULT apply_fontargument_variation(SkTScopedComPtr<IDWriteFontFace>& fontFace,
+ const SkFontArguments& args)
+{
+#if defined(NTDDI_WIN10_RS3) && NTDDI_VERSION >= NTDDI_WIN10_RS3
+
+ SkTScopedComPtr<IDWriteFontFace5> fontFace5;
+ if (FAILED(fontFace->QueryInterface(&fontFace5)) || !fontFace5->HasVariations()) {
+ return S_OK;
+ }
+
+ UINT32 fontAxisCount = fontFace5->GetFontAxisValueCount();
+ UINT32 argsCoordCount = args.getVariationDesignPosition().coordinateCount;
+ AutoSTMalloc<8, DWRITE_FONT_AXIS_VALUE> variation(fontAxisCount);
+ SkTScopedComPtr<IDWriteFontResource> fontResource;
+ HR(fontFace5->GetFontResource(&fontResource));
+ HR(fontResource->GetDefaultFontAxisValues(variation, fontAxisCount));
+
+ for (UINT32 fontAxisIndex = 0; fontAxisIndex < fontAxisCount; ++fontAxisIndex) {
+ DWRITE_FONT_AXIS_VALUE& fontCoordinate = variation[fontAxisIndex];
+
+ for (UINT32 argsCoordIndex = argsCoordCount; argsCoordIndex --> 0;) {
+ const SkFontArguments::VariationPosition::Coordinate& argsCoordinate =
+ args.getVariationDesignPosition().coordinates[argsCoordIndex];
+ if (SkEndian_SwapBE32(fontCoordinate.axisTag) == argsCoordinate.axis) {
+ fontCoordinate.value = argsCoordinate.value;
+ break;
+ }
+ }
+ }
+
+ SkTScopedComPtr<IDWriteFontFace5> fontFace5_Out;
+ HR(fontResource->CreateFontFace(DWRITE_FONT_SIMULATIONS_NONE,
+ variation.get(), fontAxisCount,
+ &fontFace5_Out));
+ fontFace.reset();
+ HR(fontFace5_Out->QueryInterface(&fontFace));
+#endif
+ return S_OK;
+}
+
+sk_sp<SkTypeface> DWriteFontTypeface::MakeFromStream(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments& args) {
+ // TODO: do we need to use some user provided factory?
+ IDWriteFactory* factory = sk_get_dwrite_factory();
+ if (nullptr == factory) {
+ return nullptr;
+ }
+
+ SkTScopedComPtr<StreamFontFileLoader> fontFileLoader;
+ HRN(StreamFontFileLoader::Create(std::move(stream), &fontFileLoader));
+ HRN(factory->RegisterFontFileLoader(fontFileLoader.get()));
+ SkAutoIDWriteUnregister<StreamFontFileLoader> autoUnregisterFontFileLoader(
+ factory, fontFileLoader.get());
+
+ SkTScopedComPtr<StreamFontCollectionLoader> fontCollectionLoader;
+ HRN(StreamFontCollectionLoader::Create(fontFileLoader.get(), &fontCollectionLoader));
+ HRN(factory->RegisterFontCollectionLoader(fontCollectionLoader.get()));
+ SkAutoIDWriteUnregister<StreamFontCollectionLoader> autoUnregisterFontCollectionLoader(
+ factory, fontCollectionLoader.get());
+
+ SkTScopedComPtr<IDWriteFontCollection> fontCollection;
+ HRN(factory->CreateCustomFontCollection(fontCollectionLoader.get(), nullptr, 0,
+ &fontCollection));
+
+ // Find the first non-simulated font which has the given ttc index.
+ UINT32 familyCount = fontCollection->GetFontFamilyCount();
+ for (UINT32 familyIndex = 0; familyIndex < familyCount; ++familyIndex) {
+ SkTScopedComPtr<IDWriteFontFamily> fontFamily;
+ HRN(fontCollection->GetFontFamily(familyIndex, &fontFamily));
+
+ UINT32 fontCount = fontFamily->GetFontCount();
+ for (UINT32 fontIndex = 0; fontIndex < fontCount; ++fontIndex) {
+ SkTScopedComPtr<IDWriteFont> font;
+ HRN(fontFamily->GetFont(fontIndex, &font));
+
+ // Skip if the current font is simulated
+ if (font->GetSimulations() != DWRITE_FONT_SIMULATIONS_NONE) {
+ continue;
+ }
+ SkTScopedComPtr<IDWriteFontFace> fontFace;
+ HRN(font->CreateFontFace(&fontFace));
+ int faceIndex = fontFace->GetIndex();
+ int ttcIndex = args.getCollectionIndex();
+
+ // Skip if the current face index does not match the ttcIndex
+ if (faceIndex != ttcIndex) {
+ continue;
+ }
+
+ apply_fontargument_variation(fontFace, args);
+
+ return DWriteFontTypeface::Make(
+ factory, fontFace.get(), font.get(), fontFamily.get(),
+ sk_make_sp<DWriteFontTypeface::Loaders>(
+ factory,
+ autoUnregisterFontFileLoader.detatch(),
+ autoUnregisterFontCollectionLoader.detatch()),
+ args.getPalette());
+ }
+ }
+
+ return nullptr;
+}
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/ports/SkTypeface_win_dw.h b/gfx/skia/skia/src/ports/SkTypeface_win_dw.h
new file mode 100644
index 0000000000..7bbaeca614
--- /dev/null
+++ b/gfx/skia/skia/src/ports/SkTypeface_win_dw.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTypeface_win_dw_DEFINED
+#define SkTypeface_win_dw_DEFINED
+
+#include "include/core/SkFontArguments.h"
+#include "include/core/SkTypeface.h"
+#include "src/base/SkLeanWindows.h"
+#include "src/core/SkAdvancedTypefaceMetrics.h"
+#include "src/core/SkTypefaceCache.h"
+#include "src/utils/win/SkDWrite.h"
+#include "src/utils/win/SkHRESULT.h"
+#include "src/utils/win/SkTScopedComPtr.h"
+
+#include <dwrite.h>
+#include <dwrite_1.h>
+#include <dwrite_2.h>
+#include <dwrite_3.h>
+
+#if !defined(__MINGW32__) && WINVER < 0x0A00
+#include "mozilla/gfx/dw-extra.h"
+#endif
+
+class SkFontDescriptor;
+struct SkScalerContextRec;
+
+/* dwrite_3.h incorrectly uses NTDDI_VERSION to hide immutible interfaces (it should only be used to
+ gate changes to public ABI). The implementation files can (and must) get away with including
+ SkDWriteNTDDI_VERSION.h which simply unsets NTDDI_VERSION, but this doesn't work well for this
+ header which can be included in SkTypeface.cpp. Instead, ensure that any declarations hidden
+ behind the NTDDI_VERSION are forward (backward?) declared here in case dwrite_3.h did not declare
+ them. */
+interface IDWriteFontFace4;
+
+static SkFontStyle get_style(IDWriteFont* font) {
+ int weight = font->GetWeight();
+ int width = font->GetStretch();
+ SkFontStyle::Slant slant = SkFontStyle::kUpright_Slant;
+ switch (font->GetStyle()) {
+ case DWRITE_FONT_STYLE_NORMAL: slant = SkFontStyle::kUpright_Slant; break;
+ case DWRITE_FONT_STYLE_OBLIQUE: slant = SkFontStyle::kOblique_Slant; break;
+ case DWRITE_FONT_STYLE_ITALIC: slant = SkFontStyle::kItalic_Slant; break;
+ default: SkASSERT(false); break;
+ }
+ return SkFontStyle(weight, width, slant);
+}
+
+class DWriteFontTypeface : public SkTypeface {
+public:
+ struct Loaders : public SkNVRefCnt<Loaders> {
+ Loaders(IDWriteFactory* factory,
+ IDWriteFontFileLoader* fontFileLoader,
+ IDWriteFontCollectionLoader* fontCollectionLoader)
+ : fFactory(SkRefComPtr(factory))
+ , fDWriteFontFileLoader(SkRefComPtr(fontFileLoader))
+ , fDWriteFontCollectionLoader(SkRefComPtr(fontCollectionLoader))
+ {}
+ Loaders(const Loaders&) = delete;
+ Loaders& operator=(const Loaders&) = delete;
+ Loaders(Loaders&&) = delete;
+ Loaders& operator=(Loaders&&) = delete;
+ ~Loaders();
+
+ SkTScopedComPtr<IDWriteFactory> fFactory;
+ SkTScopedComPtr<IDWriteFontFileLoader> fDWriteFontFileLoader;
+ SkTScopedComPtr<IDWriteFontCollectionLoader> fDWriteFontCollectionLoader;
+ };
+
+ static constexpr SkTypeface::FactoryId FactoryId = SkSetFourByteTag('d','w','r','t');
+ static sk_sp<SkTypeface> MakeFromStream(std::unique_ptr<SkStreamAsset>, const SkFontArguments&);
+
+ ~DWriteFontTypeface() override;
+private:
+ DWriteFontTypeface(const SkFontStyle& style,
+ IDWriteFactory* factory,
+ IDWriteFontFace* fontFace,
+ IDWriteFont* font,
+ IDWriteFontFamily* fontFamily,
+ sk_sp<Loaders> loaders,
+ const SkFontArguments::Palette&);
+ HRESULT initializePalette();
+
+public:
+ SkTScopedComPtr<IDWriteFactory> fFactory;
+ SkTScopedComPtr<IDWriteFactory2> fFactory2;
+ SkTScopedComPtr<IDWriteFontFamily> fDWriteFontFamily;
+ SkTScopedComPtr<IDWriteFont> fDWriteFont;
+ SkTScopedComPtr<IDWriteFontFace> fDWriteFontFace;
+ SkTScopedComPtr<IDWriteFontFace1> fDWriteFontFace1;
+ SkTScopedComPtr<IDWriteFontFace2> fDWriteFontFace2;
+ SkTScopedComPtr<IDWriteFontFace4> fDWriteFontFace4;
+ bool fIsColorFont;
+
+ std::unique_ptr<SkFontArguments::Palette::Override> fRequestedPaletteEntryOverrides;
+ SkFontArguments::Palette fRequestedPalette;
+
+ size_t fPaletteEntryCount;
+ std::unique_ptr<SkColor[]> fPalette;
+
+ static sk_sp<DWriteFontTypeface> Make(
+ IDWriteFactory* factory,
+ IDWriteFontFace* fontFace,
+ IDWriteFont* font,
+ IDWriteFontFamily* fontFamily,
+ sk_sp<Loaders> loaders,
+ const SkFontArguments::Palette& palette)
+ {
+ return sk_sp<DWriteFontTypeface>(new DWriteFontTypeface(
+ get_style(font), factory, fontFace, font, fontFamily, std::move(loaders), palette));
+ }
+
+ static DWriteFontTypeface* Create(IDWriteFactory* factory,
+ IDWriteFontFace* fontFace,
+ SkFontStyle aStyle,
+ DWRITE_RENDERING_MODE aRenderingMode,
+ float aGamma,
+ float aContrast,
+ float aClearTypeLevel) {
+ DWriteFontTypeface* typeface =
+ new DWriteFontTypeface(aStyle, factory, fontFace,
+ nullptr, nullptr,
+ nullptr, SkFontArguments::Palette{0, nullptr, 0});
+ typeface->fRenderingMode = aRenderingMode;
+ typeface->fGamma = aGamma;
+ typeface->fContrast = aContrast;
+ typeface->fClearTypeLevel = aClearTypeLevel;
+ return typeface;
+ }
+
+ bool ForceGDI() const { return fRenderingMode == DWRITE_RENDERING_MODE_GDI_CLASSIC; }
+ DWRITE_RENDERING_MODE GetRenderingMode() const { return fRenderingMode; }
+ float GetClearTypeLevel() const { return fClearTypeLevel; }
+
+protected:
+ void weak_dispose() const override {
+ fLoaders.reset();
+
+ //SkTypefaceCache::Remove(this);
+ INHERITED::weak_dispose();
+ }
+
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments&) const override;
+ std::unique_ptr<SkStreamAsset> onOpenStream(int* ttcIndex) const override;
+ std::unique_ptr<SkScalerContext> onCreateScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor*) const override;
+ void onFilterRec(SkScalerContextRec*) const override;
+ void getGlyphToUnicodeMap(SkUnichar* glyphToUnicode) const override;
+ std::unique_ptr<SkAdvancedTypefaceMetrics> onGetAdvancedMetrics() const override;
+ void onGetFontDescriptor(SkFontDescriptor*, bool*) const override;
+ void onCharsToGlyphs(const SkUnichar* chars, int count, SkGlyphID glyphs[]) const override;
+ int onCountGlyphs() const override;
+ void getPostScriptGlyphNames(SkString*) const override;
+ int onGetUPEM() const override;
+ void onGetFamilyName(SkString* familyName) const override;
+ bool onGetPostScriptName(SkString*) const override;
+ SkTypeface::LocalizedStrings* onCreateFamilyNameIterator() const override;
+ bool onGlyphMaskNeedsCurrentColor() const override;
+ int onGetVariationDesignPosition(SkFontArguments::VariationPosition::Coordinate coordinates[],
+ int coordinateCount) const override;
+ int onGetVariationDesignParameters(SkFontParameters::Variation::Axis parameters[],
+ int parameterCount) const override;
+ int onGetTableTags(SkFontTableTag tags[]) const override;
+ size_t onGetTableData(SkFontTableTag, size_t offset, size_t length, void* data) const override;
+ sk_sp<SkData> onCopyTableData(SkFontTableTag) const override;
+
+private:
+ mutable sk_sp<Loaders> fLoaders;
+ using INHERITED = SkTypeface;
+ DWRITE_RENDERING_MODE fRenderingMode;
+ float fGamma;
+ float fContrast;
+ float fClearTypeLevel;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkIBMFamilyClass.h b/gfx/skia/skia/src/sfnt/SkIBMFamilyClass.h
new file mode 100644
index 0000000000..8ad1dbeaf4
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkIBMFamilyClass.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkIBMFamilyClass_DEFINED
+#define SkIBMFamilyClass_DEFINED
+
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkIBMFamilyClass {
+ enum class Class : SK_OT_BYTE {
+ NoClassification = 0,
+ OldstyleSerifs = 1,
+ TransitionalSerifs = 2,
+ ModernSerifs = 3,
+ ClarendonSerifs = 4,
+ SlabSerifs = 5,
+ //6 reserved for future use
+ FreeformSerifs = 7,
+ SansSerif = 8,
+ Ornamentals = 9,
+ Scripts = 10,
+ //11 reserved for future use
+ Symbolic = 12,
+ //13-15 reserved for future use
+ } familyClass;
+ union SubClass {
+ enum class OldstyleSerifs : SK_OT_BYTE {
+ NoClassification = 0,
+ IBMRoundedLegibility = 1,
+ Garalde = 2,
+ Venetian = 3,
+ ModifiedVenetian = 4,
+ DutchModern = 5,
+ DutchTraditional = 6,
+ Contemporary = 7,
+ Calligraphic = 8,
+ //9-14 reserved for future use
+ Miscellaneous = 15,
+ } oldstyleSerifs;
+ enum class TransitionalSerifs : SK_OT_BYTE {
+ NoClassification = 0,
+ DirectLine = 1,
+ Script = 2,
+ //3-14 reserved for future use
+ Miscellaneous = 15,
+ } transitionalSerifs;
+ enum class ModernSerifs : SK_OT_BYTE {
+ NoClassification = 0,
+ Italian = 1,
+ Script = 2,
+ //3-14 reserved for future use
+ Miscellaneous = 15,
+ } modernSerifs;
+ enum class ClarendonSerifs : SK_OT_BYTE {
+ NoClassification = 0,
+ Clarendon = 1,
+ Modern = 2,
+ Traditional = 3,
+ Newspaper = 4,
+ StubSerif = 5,
+ Monotone = 6,
+ Typewriter = 7,
+ //8-14 reserved for future use
+ Miscellaneous = 15,
+ } clarendonSerifs;
+ enum class SlabSerifs : SK_OT_BYTE {
+ NoClassification = 0,
+ Monotone = 1,
+ Humanist = 2,
+ Geometric = 3,
+ Swiss = 4,
+ Typewriter = 5,
+ //6-14 reserved for future use
+ Miscellaneous = 15,
+ } slabSerifs;
+ enum class FreeformSerifs : SK_OT_BYTE {
+ NoClassification = 0,
+ Modern = 1,
+ //2-14 reserved for future use
+ Miscellaneous = 15,
+ } freeformSerifs;
+ enum class SansSerif : SK_OT_BYTE {
+ NoClassification = 0,
+ IBMNeoGrotesqueGothic = 1,
+ Humanist = 2,
+ LowXRoundGeometric = 3,
+ HighXRoundGeometric = 4,
+ NeoGrotesqueGothic = 5,
+ ModifiedNeoGrotesqueGothic = 6,
+ //7-8 reserved for future use
+ TypewriterGothic = 9,
+ Matrix = 10,
+ //11-14 reserved for future use
+ Miscellaneous = 15,
+ } sansSerif;
+ enum class Ornamentals : SK_OT_BYTE {
+ NoClassification = 0,
+ Engraver = 1,
+ BlackLetter = 2,
+ Decorative = 3,
+ ThreeDimensional = 4,
+ //5-14 reserved for future use
+ Miscellaneous = 15,
+ } ornamentals;
+ enum class Scripts : SK_OT_BYTE {
+ NoClassification = 0,
+ Uncial = 1,
+ Brush_Joined = 2,
+ Formal_Joined = 3,
+ Monotone_Joined = 4,
+ Calligraphic = 5,
+ Brush_Unjoined = 6,
+ Formal_Unjoined = 7,
+ Monotone_Unjoined = 8,
+ //9-14 reserved for future use
+ Miscellaneous = 15,
+ } scripts;
+ enum class Symbolic : SK_OT_BYTE {
+ NoClassification = 0,
+ //1-2 reserved for future use
+ MixedSerif = 3,
+ //4-5 reserved for future use
+ OldstyleSerif = 6,
+ NeoGrotesqueSansSerif = 7,
+ //8-14 reserved for future use
+ Miscellaneous = 15,
+ } symbolic;
+ } familySubClass;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkIBMFamilyClass) == 2, "sizeof_SkIBMFamilyClass_not_2");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTableTypes.h b/gfx/skia/skia/src/sfnt/SkOTTableTypes.h
new file mode 100644
index 0000000000..739bd0ec16
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTableTypes.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTableTypes_DEFINED
+#define SkOTTableTypes_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "src/base/SkEndian.h"
+
+//All SK_OT_ prefixed types should be considered as big endian.
+typedef uint8_t SK_OT_BYTE;
+#if CHAR_BIT == 8
+typedef signed char SK_OT_CHAR; //easier to debug
+#else
+typedef int8_t SK_OT_CHAR;
+#endif
+typedef uint16_t SK_OT_SHORT;
+typedef uint16_t SK_OT_USHORT;
+typedef uint32_t SK_OT_ULONG;
+typedef uint32_t SK_OT_LONG;
+//16.16 Signed fixed point representation.
+typedef int32_t SK_OT_Fixed;
+//2.14 Signed fixed point representation.
+typedef uint16_t SK_OT_F2DOT14;
+//F units are the units of measurement in em space.
+typedef uint16_t SK_OT_FWORD;
+typedef uint16_t SK_OT_UFWORD;
+//Number of seconds since 12:00 midnight, January 1, 1904.
+typedef uint64_t SK_OT_LONGDATETIME;
+
+#define SK_OT_BYTE_BITFIELD SK_UINT8_BITFIELD
+
+template<typename T> class SkOTTableTAG {
+public:
+ /**
+ * SkOTTableTAG<T>::value is the big endian value of an OpenType table tag.
+ * It may be directly compared with raw big endian table data.
+ */
+ static const SK_OT_ULONG value = SkTEndian_SwapBE32(
+ SkSetFourByteTag(T::TAG0, T::TAG1, T::TAG2, T::TAG3)
+ );
+};
+
+/** SkOTSetUSHORTBit<N>::value is an SK_OT_USHORT with the Nth BE bit set. */
+template <unsigned N> struct SkOTSetUSHORTBit {
+ static_assert(N < 16, "NTooBig");
+ static const uint16_t bit = 1u << N;
+ static const SK_OT_USHORT value = SkTEndian_SwapBE16(bit);
+};
+
+/** SkOTSetULONGBit<N>::value is an SK_OT_ULONG with the Nth BE bit set. */
+template <unsigned N> struct SkOTSetULONGBit {
+ static_assert(N < 32, "NTooBig");
+ static const uint32_t bit = 1u << N;
+ static const SK_OT_ULONG value = SkTEndian_SwapBE32(bit);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_EBDT.h b/gfx/skia/skia/src/sfnt/SkOTTable_EBDT.h
new file mode 100644
index 0000000000..da04d31c30
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_EBDT.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_EBDT_DEFINED
+#define SkOTTable_EBDT_DEFINED
+
+#include "src/base/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkOTTable_head.h"
+#include "src/sfnt/SkOTTable_loca.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableEmbeddedBitmapData {
+ static const SK_OT_CHAR TAG0 = 'E';
+ static const SK_OT_CHAR TAG1 = 'B';
+ static const SK_OT_CHAR TAG2 = 'D';
+ static const SK_OT_CHAR TAG3 = 'T';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableEmbeddedBitmapData>::value;
+
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed version_initial = SkTEndian_SwapBE32(0x00020000);
+
+ struct BigGlyphMetrics {
+ SK_OT_BYTE height;
+ SK_OT_BYTE width;
+ SK_OT_CHAR horiBearingX;
+ SK_OT_CHAR horiBearingY;
+ SK_OT_BYTE horiAdvance;
+ SK_OT_CHAR vertBearingX;
+ SK_OT_CHAR vertBearingY;
+ SK_OT_BYTE vertAdvance;
+ };
+
+ struct SmallGlyphMetrics {
+ SK_OT_BYTE height;
+ SK_OT_BYTE width;
+ SK_OT_CHAR bearingX;
+ SK_OT_CHAR bearingY;
+ SK_OT_BYTE advance;
+ };
+
+ // Small metrics, byte-aligned data.
+ struct Format1 {
+ SmallGlyphMetrics smallGlyphMetrics;
+ //SK_OT_BYTE[] byteAlignedBitmap;
+ };
+
+ // Small metrics, bit-aligned data.
+ struct Format2 {
+ SmallGlyphMetrics smallGlyphMetrics;
+ //SK_OT_BYTE[] bitAlignedBitmap;
+ };
+
+ // Format 3 is not used.
+
+ // EBLC metrics (IndexSubTable::header::indexFormat 2 or 5), compressed data.
+ // Only used on Mac.
+ struct Format4 {
+ SK_OT_ULONG whiteTreeOffset;
+ SK_OT_ULONG blackTreeOffset;
+ SK_OT_ULONG glyphDataOffset;
+ };
+
+ // EBLC metrics (IndexSubTable::header::indexFormat 2 or 5), bit-aligned data.
+ struct Format5 {
+ //SK_OT_BYTE[] bitAlignedBitmap;
+ };
+
+ // Big metrics, byte-aligned data.
+ struct Format6 {
+ BigGlyphMetrics bigGlyphMetrics;
+ //SK_OT_BYTE[] byteAlignedBitmap;
+ };
+
+ // Big metrics, bit-aligned data.
+ struct Format7 {
+ BigGlyphMetrics bigGlyphMetrics;
+ //SK_OT_BYTE[] bitAlignedBitmap;
+ };
+
+ struct EBDTComponent {
+ SK_OT_USHORT glyphCode; // Component glyph code
+ SK_OT_CHAR xOffset; // Position of component left
+ SK_OT_CHAR yOffset; // Position of component top
+ };
+
+ struct Format8 {
+ SmallGlyphMetrics smallMetrics; // Metrics information for the glyph
+ SK_OT_BYTE pad; // Pad to short boundary
+ SK_OT_USHORT numComponents; // Number of components
+ //EBDTComponent componentArray[numComponents]; // Glyph code, offset array
+ };
+
+ struct Format9 {
+ BigGlyphMetrics bigMetrics; // Metrics information for the glyph
+ SK_OT_USHORT numComponents; // Number of components
+ //EBDTComponent componentArray[numComponents]; // Glyph code, offset array
+ };
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_EBLC.h b/gfx/skia/skia/src/sfnt/SkOTTable_EBLC.h
new file mode 100644
index 0000000000..e7c65832c5
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_EBLC.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_EBLC_DEFINED
+#define SkOTTable_EBLC_DEFINED
+
+#include "src/base/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkOTTable_EBDT.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableEmbeddedBitmapLocation {
+ static const SK_OT_CHAR TAG0 = 'E';
+ static const SK_OT_CHAR TAG1 = 'B';
+ static const SK_OT_CHAR TAG2 = 'L';
+ static const SK_OT_CHAR TAG3 = 'C';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableEmbeddedBitmapLocation>::value;
+
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed version_initial = SkTEndian_SwapBE32(0x00020000);
+
+ SK_OT_ULONG numSizes;
+
+ struct SbitLineMetrics {
+ SK_OT_CHAR ascender;
+ SK_OT_CHAR descender;
+ SK_OT_BYTE widthMax;
+ SK_OT_CHAR caretSlopeNumerator;
+ SK_OT_CHAR caretSlopeDenominator;
+ SK_OT_CHAR caretOffset;
+ SK_OT_CHAR minOriginSB;
+ SK_OT_CHAR minAdvanceSB;
+ SK_OT_CHAR maxBeforeBL;
+ SK_OT_CHAR minAfterBL;
+ SK_OT_CHAR pad1;
+ SK_OT_CHAR pad2;
+ };
+
+ struct BitmapSizeTable {
+ SK_OT_ULONG indexSubTableArrayOffset; //offset to indexSubtableArray from beginning of EBLC.
+ SK_OT_ULONG indexTablesSize; //number of bytes in corresponding index subtables and array
+ SK_OT_ULONG numberOfIndexSubTables; //an index subtable for each range or format change
+ SK_OT_ULONG colorRef; //not used; set to 0.
+ SbitLineMetrics hori; //line metrics for text rendered horizontally
+ SbitLineMetrics vert; //line metrics for text rendered vertically
+ SK_OT_USHORT startGlyphIndex; //lowest glyph index for this size
+ SK_OT_USHORT endGlyphIndex; //highest glyph index for this size
+ SK_OT_BYTE ppemX; //horizontal pixels per Em
+ SK_OT_BYTE ppemY; //vertical pixels per Em
+ struct BitDepth {
+ enum Value : SK_OT_BYTE {
+ BW = 1,
+ Gray4 = 2,
+ Gray16 = 4,
+ Gray256 = 8,
+ };
+ SK_OT_BYTE value;
+ } bitDepth; //the Microsoft rasterizer v.1.7 or greater supports
+ union Flags {
+ struct Field {
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Horizontal, // Horizontal small glyph metrics
+ Vertical, // Vertical small glyph metrics
+ Reserved02,
+ Reserved03,
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_CHAR Horizontal = 1u << 0;
+ static const SK_OT_CHAR Vertical = 1u << 1;
+ SK_OT_CHAR value;
+ } raw;
+ } flags;
+ }; //bitmapSizeTable[numSizes];
+
+ struct IndexSubTableArray {
+ SK_OT_USHORT firstGlyphIndex; //first glyph code of this range
+ SK_OT_USHORT lastGlyphIndex; //last glyph code of this range (inclusive)
+ SK_OT_ULONG additionalOffsetToIndexSubtable; //add to BitmapSizeTable::indexSubTableArrayOffset to get offset from beginning of 'EBLC'
+ }; //indexSubTableArray[BitmapSizeTable::numberOfIndexSubTables];
+
+ struct IndexSubHeader {
+ SK_OT_USHORT indexFormat; //format of this indexSubTable
+ SK_OT_USHORT imageFormat; //format of 'EBDT' image data
+ SK_OT_ULONG imageDataOffset; //offset to image data in 'EBDT' table
+ };
+
+ // Variable metrics glyphs with 4 byte offsets
+ struct IndexSubTable1 {
+ IndexSubHeader header;
+ //SK_OT_ULONG offsetArray[lastGlyphIndex - firstGlyphIndex + 1 + 1]; //last element points to one past end of last glyph
+ //glyphData = offsetArray[glyphIndex - firstGlyphIndex] + imageDataOffset
+ };
+
+ // All Glyphs have identical metrics
+ struct IndexSubTable2 {
+ IndexSubHeader header;
+ SK_OT_ULONG imageSize; // all glyphs are of the same size
+ SkOTTableEmbeddedBitmapData::BigGlyphMetrics bigMetrics; // all glyphs have the same metrics; glyph data may be compressed, byte-aligned, or bit-aligned
+ };
+
+ // Variable metrics glyphs with 2 byte offsets
+ struct IndexSubTable3 {
+ IndexSubHeader header;
+ //SK_OT_USHORT offsetArray[lastGlyphIndex - firstGlyphIndex + 1 + 1]; //last element points to one past end of last glyph, may have extra element to force even number of elements
+ //glyphData = offsetArray[glyphIndex - firstGlyphIndex] + imageDataOffset
+ };
+
+ // Variable metrics glyphs with sparse glyph codes
+ struct IndexSubTable4 {
+ IndexSubHeader header;
+ SK_OT_ULONG numGlyphs;
+ struct CodeOffsetPair {
+ SK_OT_USHORT glyphCode;
+ SK_OT_USHORT offset; //location in EBDT
+ }; //glyphArray[numGlyphs+1]
+ };
+
+ // Constant metrics glyphs with sparse glyph codes
+ struct IndexSubTable5 {
+ IndexSubHeader header;
+ SK_OT_ULONG imageSize; //all glyphs have the same data size
+ SkOTTableEmbeddedBitmapData::BigGlyphMetrics bigMetrics; //all glyphs have the same metrics
+ SK_OT_ULONG numGlyphs;
+ //SK_OT_USHORT glyphCodeArray[numGlyphs] //must have even number of entries (set pad to 0)
+ };
+
+ union IndexSubTable {
+ IndexSubHeader header;
+ IndexSubTable1 format1;
+ IndexSubTable2 format2;
+ IndexSubTable3 format3;
+ IndexSubTable4 format4;
+ IndexSubTable5 format5;
+ };
+
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_EBSC.h b/gfx/skia/skia/src/sfnt/SkOTTable_EBSC.h
new file mode 100644
index 0000000000..549f114406
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_EBSC.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_EBSC_DEFINED
+#define SkOTTable_EBSC_DEFINED
+
+#include "src/base/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkOTTable_EBLC.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableEmbeddedBitmapScaling {
+ static const SK_OT_CHAR TAG0 = 'E';
+ static const SK_OT_CHAR TAG1 = 'S';
+ static const SK_OT_CHAR TAG2 = 'B';
+ static const SK_OT_CHAR TAG3 = 'C';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableEmbeddedBitmapScaling>::value;
+
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed version_initial = SkTEndian_SwapBE32(0x00020000);
+
+ SK_OT_ULONG numSizes;
+
+ struct BitmapScaleTable {
+ SkOTTableEmbeddedBitmapLocation::SbitLineMetrics hori;
+ SkOTTableEmbeddedBitmapLocation::SbitLineMetrics vert;
+ SK_OT_BYTE ppemX; //target horizontal pixels per EM
+ SK_OT_BYTE ppemY; //target vertical pixels per EM
+ SK_OT_BYTE substitutePpemX; //use bitmaps of this size
+ SK_OT_BYTE substitutePpemY; //use bitmaps of this size
+ }; //bitmapScaleTable[numSizes];
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_OS_2.h b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2.h
new file mode 100644
index 0000000000..cb5312f33e
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_OS_2_DEFINED
+#define SkOTTable_OS_2_DEFINED
+
+#include "src/sfnt/SkOTTable_OS_2_V0.h"
+#include "src/sfnt/SkOTTable_OS_2_V1.h"
+#include "src/sfnt/SkOTTable_OS_2_V2.h"
+#include "src/sfnt/SkOTTable_OS_2_V3.h"
+#include "src/sfnt/SkOTTable_OS_2_V4.h"
+#include "src/sfnt/SkOTTable_OS_2_VA.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableOS2 {
+ inline static constexpr SK_OT_CHAR TAG0 = 'O';
+ inline static constexpr SK_OT_CHAR TAG1 = 'S';
+ inline static constexpr SK_OT_CHAR TAG2 = '/';
+ inline static constexpr SK_OT_CHAR TAG3 = '2';
+ inline static constexpr SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableOS2>::value;
+
+ union Version {
+ SK_OT_USHORT version;
+
+ //original V0 TT
+ struct VA : SkOTTableOS2_VA { } vA;
+ struct V0 : SkOTTableOS2_V0 { } v0;
+ struct V1 : SkOTTableOS2_V1 { } v1;
+ struct V2 : SkOTTableOS2_V2 { } v2;
+ //makes fsType 0-3 exclusive
+ struct V3 : SkOTTableOS2_V3 { } v3;
+ //defines fsSelection bits 7-9
+ struct V4 : SkOTTableOS2_V4 { } v4;
+ } version;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableOS2::Version::VA) == 68, "sizeof_SkOTTableOS2__VA_not_68");
+static_assert(sizeof(SkOTTableOS2::Version::V0) == 78, "sizeof_SkOTTableOS2__V0_not_78");
+static_assert(sizeof(SkOTTableOS2::Version::V1) == 86, "sizeof_SkOTTableOS2__V1_not_86");
+static_assert(sizeof(SkOTTableOS2::Version::V2) == 96, "sizeof_SkOTTableOS2__V2_not_96");
+static_assert(sizeof(SkOTTableOS2::Version::V3) == 96, "sizeof_SkOTTableOS2__V3_not_96");
+static_assert(sizeof(SkOTTableOS2::Version::V4) == 96, "sizeof_SkOTTableOS2__V4_not_96");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V0.h b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V0.h
new file mode 100644
index 0000000000..fc789f3e2f
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V0.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_OS_2_V0_DEFINED
+#define SkOTTable_OS_2_V0_DEFINED
+
+#include "src/base/SkEndian.h"
+#include "src/sfnt/SkIBMFamilyClass.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkPanose.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableOS2_V0 {
+ SK_OT_USHORT version;
+ //SkOTTableOS2_VA::VERSION and SkOTTableOS2_V0::VERSION are both 0.
+ //The only way to differentiate these two versions is by the size of the table.
+ static const SK_OT_USHORT VERSION = SkTEndian_SwapBE16(0);
+
+ SK_OT_SHORT xAvgCharWidth;
+ struct WeightClass {
+ enum Value : SK_OT_USHORT {
+ Thin = SkTEndian_SwapBE16(100),
+ ExtraLight = SkTEndian_SwapBE16(200),
+ Light = SkTEndian_SwapBE16(300),
+ Normal = SkTEndian_SwapBE16(400),
+ Medium = SkTEndian_SwapBE16(500),
+ SemiBold = SkTEndian_SwapBE16(600),
+ Bold = SkTEndian_SwapBE16(700),
+ ExtraBold = SkTEndian_SwapBE16(800),
+ Black = SkTEndian_SwapBE16(900),
+ };
+ SK_OT_USHORT value;
+ } usWeightClass;
+ struct WidthClass {
+ enum Value : SK_OT_USHORT {
+ UltraCondensed = SkTEndian_SwapBE16(1),
+ ExtraCondensed = SkTEndian_SwapBE16(2),
+ Condensed = SkTEndian_SwapBE16(3),
+ SemiCondensed = SkTEndian_SwapBE16(4),
+ Medium = SkTEndian_SwapBE16(5),
+ SemiExpanded = SkTEndian_SwapBE16(6),
+ Expanded = SkTEndian_SwapBE16(7),
+ ExtraExpanded = SkTEndian_SwapBE16(8),
+ UltraExpanded = SkTEndian_SwapBE16(9),
+ } value;
+ } usWidthClass;
+ union Type {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved00,
+ Restricted,
+ PreviewPrint,
+ Editable,
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT Installable = 0;
+ static const SK_OT_USHORT RestrictedMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT PreviewPrintMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT EditableMask = SkOTSetUSHORTBit<3>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsType;
+ SK_OT_SHORT ySubscriptXSize;
+ SK_OT_SHORT ySubscriptYSize;
+ SK_OT_SHORT ySubscriptXOffset;
+ SK_OT_SHORT ySubscriptYOffset;
+ SK_OT_SHORT ySuperscriptXSize;
+ SK_OT_SHORT ySuperscriptYSize;
+ SK_OT_SHORT ySuperscriptXOffset;
+ SK_OT_SHORT ySuperscriptYOffset;
+ SK_OT_SHORT yStrikeoutSize;
+ SK_OT_SHORT yStrikeoutPosition;
+ SkIBMFamilyClass sFamilyClass;
+ SkPanose panose;
+ SK_OT_ULONG ulCharRange[4];
+ SK_OT_CHAR achVendID[4];
+ union Selection {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Italic,
+ Underscore,
+ Negative,
+ Outlined,
+ Strikeout,
+ Bold,
+ Regular,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT ItalicMask = SkOTSetUSHORTBit<0>::value;
+ static const SK_OT_USHORT UnderscoreMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT NegativeMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT OutlinedMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT StrikeoutMask = SkOTSetUSHORTBit<4>::value;
+ static const SK_OT_USHORT BoldMask = SkOTSetUSHORTBit<5>::value;
+ static const SK_OT_USHORT RegularMask = SkOTSetUSHORTBit<6>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsSelection;
+ SK_OT_USHORT usFirstCharIndex;
+ SK_OT_USHORT usLastCharIndex;
+ //version0
+ SK_OT_SHORT sTypoAscender;
+ SK_OT_SHORT sTypoDescender;
+ SK_OT_SHORT sTypoLineGap;
+ SK_OT_USHORT usWinAscent;
+ SK_OT_USHORT usWinDescent;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableOS2_V0) == 78, "sizeof_SkOTTableOS2_V0_not_78");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V1.h b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V1.h
new file mode 100644
index 0000000000..cf183c9cee
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V1.h
@@ -0,0 +1,515 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_OS_2_V1_DEFINED
+#define SkOTTable_OS_2_V1_DEFINED
+
+#include "src/base/SkEndian.h"
+#include "src/sfnt/SkIBMFamilyClass.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkPanose.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableOS2_V1 {
+ SK_OT_USHORT version;
+ static const SK_OT_USHORT VERSION = SkTEndian_SwapBE16(1);
+
+ SK_OT_SHORT xAvgCharWidth;
+ struct WeightClass {
+ enum Value : SK_OT_USHORT {
+ Thin = SkTEndian_SwapBE16(100),
+ ExtraLight = SkTEndian_SwapBE16(200),
+ Light = SkTEndian_SwapBE16(300),
+ Normal = SkTEndian_SwapBE16(400),
+ Medium = SkTEndian_SwapBE16(500),
+ SemiBold = SkTEndian_SwapBE16(600),
+ Bold = SkTEndian_SwapBE16(700),
+ ExtraBold = SkTEndian_SwapBE16(800),
+ Black = SkTEndian_SwapBE16(900),
+ };
+ SK_OT_USHORT value;
+ } usWeightClass;
+ struct WidthClass {
+ enum Value : SK_OT_USHORT {
+ UltraCondensed = SkTEndian_SwapBE16(1),
+ ExtraCondensed = SkTEndian_SwapBE16(2),
+ Condensed = SkTEndian_SwapBE16(3),
+ SemiCondensed = SkTEndian_SwapBE16(4),
+ Medium = SkTEndian_SwapBE16(5),
+ SemiExpanded = SkTEndian_SwapBE16(6),
+ Expanded = SkTEndian_SwapBE16(7),
+ ExtraExpanded = SkTEndian_SwapBE16(8),
+ UltraExpanded = SkTEndian_SwapBE16(9),
+ } value;
+ } usWidthClass;
+ union Type {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved00,
+ Restricted,
+ PreviewPrint,
+ Editable,
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT Installable = 0;
+ static const SK_OT_USHORT RestrictedMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT PreviewPrintMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT EditableMask = SkOTSetUSHORTBit<3>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsType;
+ SK_OT_SHORT ySubscriptXSize;
+ SK_OT_SHORT ySubscriptYSize;
+ SK_OT_SHORT ySubscriptXOffset;
+ SK_OT_SHORT ySubscriptYOffset;
+ SK_OT_SHORT ySuperscriptXSize;
+ SK_OT_SHORT ySuperscriptYSize;
+ SK_OT_SHORT ySuperscriptXOffset;
+ SK_OT_SHORT ySuperscriptYOffset;
+ SK_OT_SHORT yStrikeoutSize;
+ SK_OT_SHORT yStrikeoutPosition;
+ SkIBMFamilyClass sFamilyClass;
+ SkPanose panose;
+ union UnicodeRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Thai,
+ Lao,
+ BasicGeorgian,
+ GeorgianExtended,
+ HangulJamo,
+ LatinExtendedAdditional,
+ GreekExtended,
+ GeneralPunctuation)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Bengali,
+ Gurmukhi,
+ Gujarati,
+ Oriya,
+ Tamil,
+ Telugu,
+ Kannada,
+ Malayalam)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ GreekSymbolsAndCoptic,
+ Cyrillic,
+ Armenian,
+ BasicHebrew,
+ HebrewExtendedAB,
+ BasicArabic,
+ ArabicExtended,
+ Devanagari)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ BasicLatin,
+ Latin1Supplement,
+ LatinExtendedA,
+ LatinExtendedB,
+ IPAExtensions,
+ SpacingModifierLetters,
+ CombiningDiacriticalMarks,
+ BasicGreek)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ Hangul,
+ Reserved057,
+ Reserved058,
+ CJKUnifiedIdeographs,
+ PrivateUseArea,
+ CJKCompatibilityIdeographs,
+ AlphabeticPresentationForms,
+ ArabicPresentationFormsA)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ CJKSymbolsAndPunctuation,
+ Hiragana,
+ Katakana,
+ Bopomofo,
+ HangulCompatibilityJamo,
+ CJKMiscellaneous,
+ EnclosedCJKLettersAndMonths,
+ CJKCompatibility)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ ControlPictures,
+ OpticalCharacterRecognition,
+ EnclosedAlphanumerics,
+ BoxDrawing,
+ BlockElements,
+ GeometricShapes,
+ MiscellaneousSymbols,
+ Dingbats)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ SuperscriptsAndSubscripts,
+ CurrencySymbols,
+ CombiningDiacriticalMarksForSymbols,
+ LetterlikeSymbols,
+ NumberForms,
+ Arrows,
+ MathematicalOperators,
+ MiscellaneousTechnical)
+
+ //l2 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved088,
+ Reserved089,
+ Reserved090,
+ Reserved091,
+ Reserved092,
+ Reserved093,
+ Reserved094,
+ Reserved095)
+ //l2 16-23
+ SK_OT_BYTE_BITFIELD(
+ Reserved080,
+ Reserved081,
+ Reserved082,
+ Reserved083,
+ Reserved084,
+ Reserved085,
+ Reserved086,
+ Reserved087)
+ //l2 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved072,
+ Reserved073,
+ Reserved074,
+ Reserved075,
+ Reserved076,
+ Reserved077,
+ Reserved078,
+ Reserved079)
+ //l2 0-7
+ SK_OT_BYTE_BITFIELD(
+ CombiningHalfMarks,
+ CJKCompatibilityForms,
+ SmallFormVariants,
+ ArabicPresentationFormsB,
+ HalfwidthAndFullwidthForms,
+ Specials,
+ Reserved70,
+ Reserved71)
+
+ //l3 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved120,
+ Reserved121,
+ Reserved122,
+ Reserved123,
+ Reserved124,
+ Reserved125,
+ Reserved126,
+ Reserved127)
+ //l3 16-23
+ SK_OT_BYTE_BITFIELD(
+ Reserved112,
+ Reserved113,
+ Reserved114,
+ Reserved115,
+ Reserved116,
+ Reserved117,
+ Reserved118,
+ Reserved119)
+ //l3 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved104,
+ Reserved105,
+ Reserved106,
+ Reserved107,
+ Reserved108,
+ Reserved109,
+ Reserved110,
+ Reserved111)
+ //l3 0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved096,
+ Reserved097,
+ Reserved098,
+ Reserved099,
+ Reserved100,
+ Reserved101,
+ Reserved102,
+ Reserved103)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG BasicLatinMask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin1SupplementMask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG LatinExtendedAMask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG LatinExtendedBMask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG IPAExtensionsMask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG SpacingModifierLettersMask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksMask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG BasicGreekMask = SkOTSetULONGBit<7>::value;
+ static const SK_OT_ULONG GreekSymbolsAndCCopticMask = SkOTSetULONGBit<8>::value;
+ static const SK_OT_ULONG CyrillicMask = SkOTSetULONGBit<9>::value;
+ static const SK_OT_ULONG ArmenianMask = SkOTSetULONGBit<10>::value;
+ static const SK_OT_ULONG BasicHebrewMask = SkOTSetULONGBit<11>::value;
+ static const SK_OT_ULONG HebrewExtendedABMask = SkOTSetULONGBit<12>::value;
+ static const SK_OT_ULONG BasicArabicMask = SkOTSetULONGBit<13>::value;
+ static const SK_OT_ULONG ArabicExtendedMask = SkOTSetULONGBit<14>::value;
+ static const SK_OT_ULONG DevanagariMask = SkOTSetULONGBit<15>::value;
+ static const SK_OT_ULONG BengaliMask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG GurmukhiMask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG GujaratiMask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG OriyaMask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG TamilMask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG TeluguMask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG KannadaMask = SkOTSetULONGBit<22>::value;
+ static const SK_OT_ULONG MalayalamMask = SkOTSetULONGBit<23>::value;
+ static const SK_OT_ULONG ThaiMask = SkOTSetULONGBit<24>::value;
+ static const SK_OT_ULONG LaoMask = SkOTSetULONGBit<25>::value;
+ static const SK_OT_ULONG BasicGeorgianMask = SkOTSetULONGBit<26>::value;
+ static const SK_OT_ULONG GeorgianExtendedMask = SkOTSetULONGBit<27>::value;
+ static const SK_OT_ULONG HangulJamoMask = SkOTSetULONGBit<28>::value;
+ static const SK_OT_ULONG LatinExtendedAdditionalMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG GreekExtendedMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG GeneralPunctuationMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG SuperscriptsAndSubscriptsMask = SkOTSetULONGBit<32 - 32>::value;
+ static const SK_OT_ULONG CurrencySymbolsMask = SkOTSetULONGBit<33 - 32>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksForSymbolsMask = SkOTSetULONGBit<34 - 32>::value;
+ static const SK_OT_ULONG LetterlikeSymbolsMask = SkOTSetULONGBit<35 - 32>::value;
+ static const SK_OT_ULONG NumberFormsMask = SkOTSetULONGBit<36 - 32>::value;
+ static const SK_OT_ULONG ArrowsMask = SkOTSetULONGBit<37 - 32>::value;
+ static const SK_OT_ULONG MathematicalOperatorsMask = SkOTSetULONGBit<38 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousTechnicalMask = SkOTSetULONGBit<39 - 32>::value;
+ static const SK_OT_ULONG ControlPicturesMask = SkOTSetULONGBit<40 - 32>::value;
+ static const SK_OT_ULONG OpticalCharacterRecognitionMask = SkOTSetULONGBit<41 - 32>::value;
+ static const SK_OT_ULONG EnclosedAlphanumericsMask = SkOTSetULONGBit<42 - 32>::value;
+ static const SK_OT_ULONG BoxDrawingMask = SkOTSetULONGBit<43 - 32>::value;
+ static const SK_OT_ULONG BlockElementsMask = SkOTSetULONGBit<44 - 32>::value;
+ static const SK_OT_ULONG GeometricShapesMask = SkOTSetULONGBit<45 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousSymbolsMask = SkOTSetULONGBit<46 - 32>::value;
+ static const SK_OT_ULONG DingbatsMask = SkOTSetULONGBit<47 - 32>::value;
+ static const SK_OT_ULONG CJKSymbolsAndPunctuationMask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG HiraganaMask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG KatakanaMask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG BopomofoMask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG HangulCompatibilityJamoMask = SkOTSetULONGBit<52 - 32>::value;
+ static const SK_OT_ULONG CJKMiscellaneousMask = SkOTSetULONGBit<53 - 32>::value;
+ static const SK_OT_ULONG EnclosedCJKLettersAndMonthsMask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityMask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG HangulMask = SkOTSetULONGBit<56 - 32>::value;
+ //Reserved
+ //Reserved
+ static const SK_OT_ULONG CJKUnifiedIdeographsMask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG PrivateUseAreaMask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityIdeographsMask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG AlphabeticPresentationFormsMask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsAMask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ struct l2 {
+ static const SK_OT_ULONG CombiningHalfMarksMask = SkOTSetULONGBit<64 - 64>::value;
+ static const SK_OT_ULONG CJKCompatibilityFormsMask = SkOTSetULONGBit<65 - 64>::value;
+ static const SK_OT_ULONG SmallFormVariantsMask = SkOTSetULONGBit<66 - 64>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsBMask = SkOTSetULONGBit<67 - 64>::value;
+ static const SK_OT_ULONG HalfwidthAndFullwidthFormsMask = SkOTSetULONGBit<68 - 64>::value;
+ static const SK_OT_ULONG SpecialsMask = SkOTSetULONGBit<69 - 64>::value;
+ };
+ SK_OT_ULONG value[4];
+ } raw;
+ } ulUnicodeRange;
+ SK_OT_CHAR achVendID[4];
+ union Selection {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Italic,
+ Underscore,
+ Negative,
+ Outlined,
+ Strikeout,
+ Bold,
+ Regular,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT ItalicMask = SkOTSetUSHORTBit<0>::value;
+ static const SK_OT_USHORT UnderscoreMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT NegativeMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT OutlinedMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT StrikeoutMask = SkOTSetUSHORTBit<4>::value;
+ static const SK_OT_USHORT BoldMask = SkOTSetUSHORTBit<5>::value;
+ static const SK_OT_USHORT RegularMask = SkOTSetUSHORTBit<6>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsSelection;
+ SK_OT_USHORT usFirstCharIndex;
+ SK_OT_USHORT usLastCharIndex;
+ //version0
+ SK_OT_SHORT sTypoAscender;
+ SK_OT_SHORT sTypoDescender;
+ SK_OT_SHORT sTypoLineGap;
+ SK_OT_USHORT usWinAscent;
+ SK_OT_USHORT usWinDescent;
+ //version1
+ union CodePageRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved24,
+ Reserved25,
+ Reserved26,
+ Reserved27,
+ Reserved28,
+ MacintoshCharacterSet,
+ OEMCharacterSet,
+ SymbolCharacterSet)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Thai_874,
+ JISJapan_932,
+ ChineseSimplified_936,
+ KoreanWansung_949,
+ ChineseTraditional_950,
+ KoreanJohab_1361,
+ Reserved22,
+ Reserved23)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ Latin1_1252,
+ Latin2EasternEurope_1250,
+ Cyrillic_1251,
+ Greek_1253,
+ Turkish_1254,
+ Hebrew_1255,
+ Arabic_1256,
+ WindowsBaltic_1257)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ IBMTurkish_857,
+ IBMCyrillic_855,
+ Latin2_852,
+ MSDOSBaltic_775,
+ Greek_737,
+ Arabic_708,
+ WELatin1_850,
+ US_437)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ IBMGreek_869,
+ MSDOSRussian_866,
+ MSDOSNordic_865,
+ Arabic_864,
+ MSDOSCanadianFrench_863,
+ Hebrew_862,
+ MSDOSIcelandic_861,
+ MSDOSPortuguese_860)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved40,
+ Reserved41,
+ Reserved42,
+ Reserved43,
+ Reserved44,
+ Reserved45,
+ Reserved46,
+ Reserved47)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved32,
+ Reserved33,
+ Reserved34,
+ Reserved35,
+ Reserved36,
+ Reserved37,
+ Reserved38,
+ Reserved39)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG Latin1_1252Mask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin2EasternEurope_1250Mask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG Cyrillic_1251Mask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG Greek_1253Mask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG Turkish_1254Mask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG Hebrew_1255Mask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG Arabic_1256Mask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG WindowsBaltic_1257Mask = SkOTSetULONGBit<7>::value;
+ static const SK_OT_ULONG Thai_874Mask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG JISJapan_932Mask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG ChineseSimplified_936Mask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG KoreanWansung_949Mask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG ChineseTraditional_950Mask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG KoreanJohab_1361Mask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG MacintoshCharacterSetMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG OEMCharacterSetMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG SymbolCharacterSetMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG IBMGreek_869Mask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG MSDOSRussian_866Mask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG MSDOSNordic_865Mask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG Arabic_864Mask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG MSDOSCanadianFrench_863Mask = SkOTSetULONGBit<52 - 32>::value;
+ static const SK_OT_ULONG Hebrew_862Mask = SkOTSetULONGBit<53 - 32>::value;
+ static const SK_OT_ULONG MSDOSIcelandic_861Mask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG MSDOSPortuguese_860Mask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG IBMTurkish_857Mask = SkOTSetULONGBit<56 - 32>::value;
+ static const SK_OT_ULONG IBMCyrillic_855Mask = SkOTSetULONGBit<57 - 32>::value;
+ static const SK_OT_ULONG Latin2_852Mask = SkOTSetULONGBit<58 - 32>::value;
+ static const SK_OT_ULONG MSDOSBaltic_775Mask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG Greek_737Mask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG Arabic_708Mask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG WELatin1_850Mask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG US_437Mask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ SK_OT_ULONG value[2];
+ } raw;
+ } ulCodePageRange;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableOS2_V1) == 86, "sizeof_SkOTTableOS2_V1_not_86");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V2.h b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V2.h
new file mode 100644
index 0000000000..cc5712dbce
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V2.h
@@ -0,0 +1,538 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_OS_2_V2_DEFINED
+#define SkOTTable_OS_2_V2_DEFINED
+
+#include "src/base/SkEndian.h"
+#include "src/sfnt/SkIBMFamilyClass.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkPanose.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableOS2_V2 {
+ SK_OT_USHORT version;
+ static const SK_OT_USHORT VERSION = SkTEndian_SwapBE16(2);
+
+ SK_OT_SHORT xAvgCharWidth;
+ struct WeightClass {
+ enum Value : SK_OT_USHORT {
+ Thin = SkTEndian_SwapBE16(100),
+ ExtraLight = SkTEndian_SwapBE16(200),
+ Light = SkTEndian_SwapBE16(300),
+ Normal = SkTEndian_SwapBE16(400),
+ Medium = SkTEndian_SwapBE16(500),
+ SemiBold = SkTEndian_SwapBE16(600),
+ Bold = SkTEndian_SwapBE16(700),
+ ExtraBold = SkTEndian_SwapBE16(800),
+ Black = SkTEndian_SwapBE16(900),
+ };
+ SK_OT_USHORT value;
+ } usWeightClass;
+ struct WidthClass {
+ enum Value : SK_OT_USHORT {
+ UltraCondensed = SkTEndian_SwapBE16(1),
+ ExtraCondensed = SkTEndian_SwapBE16(2),
+ Condensed = SkTEndian_SwapBE16(3),
+ SemiCondensed = SkTEndian_SwapBE16(4),
+ Medium = SkTEndian_SwapBE16(5),
+ SemiExpanded = SkTEndian_SwapBE16(6),
+ Expanded = SkTEndian_SwapBE16(7),
+ ExtraExpanded = SkTEndian_SwapBE16(8),
+ UltraExpanded = SkTEndian_SwapBE16(9),
+ } value;
+ } usWidthClass;
+ union Type {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ NoSubsetting,
+ Bitmap,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved00,
+ Restricted,
+ PreviewPrint,
+ Editable,
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT Installable = 0;
+ static const SK_OT_USHORT RestrictedMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT PreviewPrintMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT EditableMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT NoSubsettingMask = SkOTSetUSHORTBit<8>::value;
+ static const SK_OT_USHORT BitmapMask = SkOTSetUSHORTBit<9>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsType;
+ SK_OT_SHORT ySubscriptXSize;
+ SK_OT_SHORT ySubscriptYSize;
+ SK_OT_SHORT ySubscriptXOffset;
+ SK_OT_SHORT ySubscriptYOffset;
+ SK_OT_SHORT ySuperscriptXSize;
+ SK_OT_SHORT ySuperscriptYSize;
+ SK_OT_SHORT ySuperscriptXOffset;
+ SK_OT_SHORT ySuperscriptYOffset;
+ SK_OT_SHORT yStrikeoutSize;
+ SK_OT_SHORT yStrikeoutPosition;
+ SkIBMFamilyClass sFamilyClass;
+ SkPanose panose;
+ union UnicodeRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Thai,
+ Lao,
+ Georgian,
+ Reserved027,
+ HangulJamo,
+ LatinExtendedAdditional,
+ GreekExtended,
+ GeneralPunctuation)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Bengali,
+ Gurmukhi,
+ Gujarati,
+ Oriya,
+ Tamil,
+ Telugu,
+ Kannada,
+ Malayalam)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved008,
+ Cyrillic,
+ Armenian,
+ Hebrew,
+ Reserved012,
+ Arabic,
+ Reserved014,
+ Devanagari)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ BasicLatin,
+ Latin1Supplement,
+ LatinExtendedA,
+ LatinExtendedB,
+ IPAExtensions,
+ SpacingModifierLetters,
+ CombiningDiacriticalMarks,
+ Greek)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ Hangul,
+ Surrogates,
+ Reserved058,
+ CJKUnifiedIdeographs,
+ PrivateUseArea,
+ CJKCompatibilityIdeographs,
+ AlphabeticPresentationForms,
+ ArabicPresentationFormsA)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ CJKSymbolsAndPunctuation,
+ Hiragana,
+ Katakana,
+ Bopomofo,
+ HangulCompatibilityJamo,
+ CJKMiscellaneous,
+ EnclosedCJKLettersAndMonths,
+ CJKCompatibility)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ ControlPictures,
+ OpticalCharacterRecognition,
+ EnclosedAlphanumerics,
+ BoxDrawing,
+ BlockElements,
+ GeometricShapes,
+ MiscellaneousSymbols,
+ Dingbats)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ SuperscriptsAndSubscripts,
+ CurrencySymbols,
+ CombiningDiacriticalMarksForSymbols,
+ LetterlikeSymbols,
+ NumberForms,
+ Arrows,
+ MathematicalOperators,
+ MiscellaneousTechnical)
+
+ //l2 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved088,
+ Reserved089,
+ Reserved090,
+ Reserved091,
+ Reserved092,
+ Reserved093,
+ Reserved094,
+ Reserved095)
+ //l2 16-23
+ SK_OT_BYTE_BITFIELD(
+ Khmer,
+ Mongolian,
+ Braille,
+ Yi,
+ Reserved084,
+ Reserved085,
+ Reserved086,
+ Reserved087)
+ //l2 8-15
+ SK_OT_BYTE_BITFIELD(
+ Thaana,
+ Sinhala,
+ Myanmar,
+ Ethiopic,
+ Cherokee,
+ UnifiedCanadianSyllabics,
+ Ogham,
+ Runic)
+ //l2 0-7
+ SK_OT_BYTE_BITFIELD(
+ CombiningHalfMarks,
+ CJKCompatibilityForms,
+ SmallFormVariants,
+ ArabicPresentationFormsB,
+ HalfwidthAndFullwidthForms,
+ Specials,
+ Tibetan,
+ Syriac)
+
+ //l3 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved120,
+ Reserved121,
+ Reserved122,
+ Reserved123,
+ Reserved124,
+ Reserved125,
+ Reserved126,
+ Reserved127)
+ //l3 16-23
+ SK_OT_BYTE_BITFIELD(
+ Reserved112,
+ Reserved113,
+ Reserved114,
+ Reserved115,
+ Reserved116,
+ Reserved117,
+ Reserved118,
+ Reserved119)
+ //l3 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved104,
+ Reserved105,
+ Reserved106,
+ Reserved107,
+ Reserved108,
+ Reserved109,
+ Reserved110,
+ Reserved111)
+ //l3 0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved096,
+ Reserved097,
+ Reserved098,
+ Reserved099,
+ Reserved100,
+ Reserved101,
+ Reserved102,
+ Reserved103)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG BasicLatinMask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin1SupplementMask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG LatinExtendedAMask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG LatinExtendedBMask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG IPAExtensionsMask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG SpacingModifierLettersMask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksMask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG GreekMask = SkOTSetULONGBit<7>::value;
+ //Reserved
+ static const SK_OT_ULONG CyrillicMask = SkOTSetULONGBit<9>::value;
+ static const SK_OT_ULONG ArmenianMask = SkOTSetULONGBit<10>::value;
+ static const SK_OT_ULONG HebrewMask = SkOTSetULONGBit<11>::value;
+ //Reserved
+ static const SK_OT_ULONG ArabicMask = SkOTSetULONGBit<13>::value;
+ //Reserved
+ static const SK_OT_ULONG DevanagariMask = SkOTSetULONGBit<15>::value;
+ static const SK_OT_ULONG BengaliMask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG GurmukhiMask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG GujaratiMask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG OriyaMask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG TamilMask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG TeluguMask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG KannadaMask = SkOTSetULONGBit<22>::value;
+ static const SK_OT_ULONG MalayalamMask = SkOTSetULONGBit<23>::value;
+ static const SK_OT_ULONG ThaiMask = SkOTSetULONGBit<24>::value;
+ static const SK_OT_ULONG LaoMask = SkOTSetULONGBit<25>::value;
+ static const SK_OT_ULONG GeorgianMask = SkOTSetULONGBit<26>::value;
+ //Reserved
+ static const SK_OT_ULONG HangulJamoMask = SkOTSetULONGBit<28>::value;
+ static const SK_OT_ULONG LatinExtendedAdditionalMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG GreekExtendedMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG GeneralPunctuationMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG SuperscriptsAndSubscriptsMask = SkOTSetULONGBit<32 - 32>::value;
+ static const SK_OT_ULONG CurrencySymbolsMask = SkOTSetULONGBit<33 - 32>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksForSymbolsMask = SkOTSetULONGBit<34 - 32>::value;
+ static const SK_OT_ULONG LetterlikeSymbolsMask = SkOTSetULONGBit<35 - 32>::value;
+ static const SK_OT_ULONG NumberFormsMask = SkOTSetULONGBit<36 - 32>::value;
+ static const SK_OT_ULONG ArrowsMask = SkOTSetULONGBit<37 - 32>::value;
+ static const SK_OT_ULONG MathematicalOperatorsMask = SkOTSetULONGBit<38 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousTechnicalMask = SkOTSetULONGBit<39 - 32>::value;
+ static const SK_OT_ULONG ControlPicturesMask = SkOTSetULONGBit<40 - 32>::value;
+ static const SK_OT_ULONG OpticalCharacterRecognitionMask = SkOTSetULONGBit<41 - 32>::value;
+ static const SK_OT_ULONG EnclosedAlphanumericsMask = SkOTSetULONGBit<42 - 32>::value;
+ static const SK_OT_ULONG BoxDrawingMask = SkOTSetULONGBit<43 - 32>::value;
+ static const SK_OT_ULONG BlockElementsMask = SkOTSetULONGBit<44 - 32>::value;
+ static const SK_OT_ULONG GeometricShapesMask = SkOTSetULONGBit<45 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousSymbolsMask = SkOTSetULONGBit<46 - 32>::value;
+ static const SK_OT_ULONG DingbatsMask = SkOTSetULONGBit<47 - 32>::value;
+ static const SK_OT_ULONG CJKSymbolsAndPunctuationMask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG HiraganaMask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG KatakanaMask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG BopomofoMask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG HangulCompatibilityJamoMask = SkOTSetULONGBit<52 - 32>::value;
+ static const SK_OT_ULONG CJKMiscellaneousMask = SkOTSetULONGBit<53 - 32>::value;
+ static const SK_OT_ULONG EnclosedCJKLettersAndMonthsMask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityMask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG HangulMask = SkOTSetULONGBit<56 - 32>::value;
+ static const SK_OT_ULONG SurrogatesMask = SkOTSetULONGBit<57 - 32>::value;
+ //Reserved
+ static const SK_OT_ULONG CJKUnifiedIdeographsMask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG PrivateUseAreaMask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityIdeographsMask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG AlphabeticPresentationFormsMask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsAMask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ struct l2 {
+ static const SK_OT_ULONG CombiningHalfMarksMask = SkOTSetULONGBit<64 - 64>::value;
+ static const SK_OT_ULONG CJKCompatibilityFormsMask = SkOTSetULONGBit<65 - 64>::value;
+ static const SK_OT_ULONG SmallFormVariantsMask = SkOTSetULONGBit<66 - 64>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsBMask = SkOTSetULONGBit<67 - 64>::value;
+ static const SK_OT_ULONG HalfwidthAndFullwidthFormsMask = SkOTSetULONGBit<68 - 64>::value;
+ static const SK_OT_ULONG SpecialsMask = SkOTSetULONGBit<69 - 64>::value;
+ static const SK_OT_ULONG TibetanMask = SkOTSetULONGBit<70 - 64>::value;
+ static const SK_OT_ULONG SyriacMask = SkOTSetULONGBit<71 - 64>::value;
+ static const SK_OT_ULONG ThaanaMask = SkOTSetULONGBit<72 - 64>::value;
+ static const SK_OT_ULONG SinhalaMask = SkOTSetULONGBit<73 - 64>::value;
+ static const SK_OT_ULONG MyanmarMask = SkOTSetULONGBit<74 - 64>::value;
+ static const SK_OT_ULONG EthiopicMask = SkOTSetULONGBit<75 - 64>::value;
+ static const SK_OT_ULONG CherokeeMask = SkOTSetULONGBit<76 - 64>::value;
+ static const SK_OT_ULONG UnifiedCanadianSyllabicsMask = SkOTSetULONGBit<77 - 64>::value;
+ static const SK_OT_ULONG OghamMask = SkOTSetULONGBit<78 - 64>::value;
+ static const SK_OT_ULONG RunicMask = SkOTSetULONGBit<79 - 64>::value;
+ static const SK_OT_ULONG KhmerMask = SkOTSetULONGBit<80 - 64>::value;
+ static const SK_OT_ULONG MongolianMask = SkOTSetULONGBit<81 - 64>::value;
+ static const SK_OT_ULONG BrailleMask = SkOTSetULONGBit<82 - 64>::value;
+ static const SK_OT_ULONG YiMask = SkOTSetULONGBit<83 - 64>::value;
+ };
+ SK_OT_ULONG value[4];
+ } raw;
+ } ulUnicodeRange;
+ SK_OT_CHAR achVendID[4];
+ union Selection {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Italic,
+ Underscore,
+ Negative,
+ Outlined,
+ Strikeout,
+ Bold,
+ Regular,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT ItalicMask = SkOTSetUSHORTBit<0>::value;
+ static const SK_OT_USHORT UnderscoreMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT NegativeMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT OutlinedMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT StrikeoutMask = SkOTSetUSHORTBit<4>::value;
+ static const SK_OT_USHORT BoldMask = SkOTSetUSHORTBit<5>::value;
+ static const SK_OT_USHORT RegularMask = SkOTSetUSHORTBit<6>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsSelection;
+ SK_OT_USHORT usFirstCharIndex;
+ SK_OT_USHORT usLastCharIndex;
+ //version0
+ SK_OT_SHORT sTypoAscender;
+ SK_OT_SHORT sTypoDescender;
+ SK_OT_SHORT sTypoLineGap;
+ SK_OT_USHORT usWinAscent;
+ SK_OT_USHORT usWinDescent;
+ //version1
+ union CodePageRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved24,
+ Reserved25,
+ Reserved26,
+ Reserved27,
+ Reserved28,
+ MacintoshCharacterSet,
+ OEMCharacterSet,
+ SymbolCharacterSet)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Thai_874,
+ JISJapan_932,
+ ChineseSimplified_936,
+ KoreanWansung_949,
+ ChineseTraditional_950,
+ KoreanJohab_1361,
+ Reserved22,
+ Reserved23)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ Vietnamese,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ Latin1_1252,
+ Latin2EasternEurope_1250,
+ Cyrillic_1251,
+ Greek_1253,
+ Turkish_1254,
+ Hebrew_1255,
+ Arabic_1256,
+ WindowsBaltic_1257)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ IBMTurkish_857,
+ IBMCyrillic_855,
+ Latin2_852,
+ MSDOSBaltic_775,
+ Greek_737,
+ Arabic_708,
+ WELatin1_850,
+ US_437)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ IBMGreek_869,
+ MSDOSRussian_866,
+ MSDOSNordic_865,
+ Arabic_864,
+ MSDOSCanadianFrench_863,
+ Hebrew_862,
+ MSDOSIcelandic_861,
+ MSDOSPortuguese_860)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved40,
+ Reserved41,
+ Reserved42,
+ Reserved43,
+ Reserved44,
+ Reserved45,
+ Reserved46,
+ Reserved47)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved32,
+ Reserved33,
+ Reserved34,
+ Reserved35,
+ Reserved36,
+ Reserved37,
+ Reserved38,
+ Reserved39)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG Latin1_1252Mask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin2EasternEurope_1250Mask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG Cyrillic_1251Mask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG Greek_1253Mask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG Turkish_1254Mask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG Hebrew_1255Mask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG Arabic_1256Mask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG WindowsBaltic_1257Mask = SkOTSetULONGBit<7>::value;
+ static const SK_OT_ULONG Vietnamese_1258Mask = SkOTSetULONGBit<8>::value;
+ static const SK_OT_ULONG Thai_874Mask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG JISJapan_932Mask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG ChineseSimplified_936Mask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG KoreanWansung_949Mask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG ChineseTraditional_950Mask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG KoreanJohab_1361Mask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG MacintoshCharacterSetMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG OEMCharacterSetMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG SymbolCharacterSetMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG IBMGreek_869Mask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG MSDOSRussian_866Mask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG MSDOSNordic_865Mask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG Arabic_864Mask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG MSDOSCanadianFrench_863Mask = SkOTSetULONGBit<52 - 32>::value;
+ static const SK_OT_ULONG Hebrew_862Mask = SkOTSetULONGBit<53 - 32>::value;
+ static const SK_OT_ULONG MSDOSIcelandic_861Mask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG MSDOSPortuguese_860Mask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG IBMTurkish_857Mask = SkOTSetULONGBit<56 - 32>::value;
+ static const SK_OT_ULONG IBMCyrillic_855Mask = SkOTSetULONGBit<57 - 32>::value;
+ static const SK_OT_ULONG Latin2_852Mask = SkOTSetULONGBit<58 - 32>::value;
+ static const SK_OT_ULONG MSDOSBaltic_775Mask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG Greek_737Mask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG Arabic_708Mask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG WELatin1_850Mask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG US_437Mask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ SK_OT_ULONG value[2];
+ } raw;
+ } ulCodePageRange;
+ //version2
+ SK_OT_SHORT sxHeight;
+ SK_OT_SHORT sCapHeight;
+ SK_OT_USHORT usDefaultChar;
+ SK_OT_USHORT usBreakChar;
+ SK_OT_USHORT usMaxContext;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableOS2_V2) == 96, "sizeof_SkOTTableOS2_V2_not_96");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V3.h b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V3.h
new file mode 100644
index 0000000000..2474282842
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V3.h
@@ -0,0 +1,547 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_OS_2_V3_DEFINED
+#define SkOTTable_OS_2_V3_DEFINED
+
+#include "src/base/SkEndian.h"
+#include "src/sfnt/SkIBMFamilyClass.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkPanose.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableOS2_V3 {
+ SK_OT_USHORT version;
+ static const SK_OT_USHORT VERSION = SkTEndian_SwapBE16(3);
+
+ SK_OT_SHORT xAvgCharWidth;
+ struct WeightClass {
+ enum Value : SK_OT_USHORT {
+ Thin = SkTEndian_SwapBE16(100),
+ ExtraLight = SkTEndian_SwapBE16(200),
+ Light = SkTEndian_SwapBE16(300),
+ Normal = SkTEndian_SwapBE16(400),
+ Medium = SkTEndian_SwapBE16(500),
+ SemiBold = SkTEndian_SwapBE16(600),
+ Bold = SkTEndian_SwapBE16(700),
+ ExtraBold = SkTEndian_SwapBE16(800),
+ Black = SkTEndian_SwapBE16(900),
+ };
+ SK_OT_USHORT value;
+ } usWeightClass;
+ struct WidthClass {
+ enum Value : SK_OT_USHORT {
+ UltraCondensed = SkTEndian_SwapBE16(1),
+ ExtraCondensed = SkTEndian_SwapBE16(2),
+ Condensed = SkTEndian_SwapBE16(3),
+ SemiCondensed = SkTEndian_SwapBE16(4),
+ Medium = SkTEndian_SwapBE16(5),
+ SemiExpanded = SkTEndian_SwapBE16(6),
+ Expanded = SkTEndian_SwapBE16(7),
+ ExtraExpanded = SkTEndian_SwapBE16(8),
+ UltraExpanded = SkTEndian_SwapBE16(9),
+ } value;
+ } usWidthClass;
+ union Type {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ NoSubsetting,
+ Bitmap,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved00,
+ Restricted,
+ PreviewPrint,
+ Editable,
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT Installable = 0;
+ static const SK_OT_USHORT RestrictedMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT PreviewPrintMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT EditableMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT NoSubsettingMask = SkOTSetUSHORTBit<8>::value;
+ static const SK_OT_USHORT BitmapMask = SkOTSetUSHORTBit<9>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsType;
+ SK_OT_SHORT ySubscriptXSize;
+ SK_OT_SHORT ySubscriptYSize;
+ SK_OT_SHORT ySubscriptXOffset;
+ SK_OT_SHORT ySubscriptYOffset;
+ SK_OT_SHORT ySuperscriptXSize;
+ SK_OT_SHORT ySuperscriptYSize;
+ SK_OT_SHORT ySuperscriptXOffset;
+ SK_OT_SHORT ySuperscriptYOffset;
+ SK_OT_SHORT yStrikeoutSize;
+ SK_OT_SHORT yStrikeoutPosition;
+ SkIBMFamilyClass sFamilyClass;
+ SkPanose panose;
+ union UnicodeRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Thai,
+ Lao,
+ Georgian,
+ Reserved027,
+ HangulJamo,
+ LatinExtendedAdditional,
+ GreekExtended,
+ GeneralPunctuation)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Bengali,
+ Gurmukhi,
+ Gujarati,
+ Oriya,
+ Tamil,
+ Telugu,
+ Kannada,
+ Malayalam)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved008,
+ Cyrillic,
+ Armenian,
+ Hebrew,
+ Reserved012,
+ Arabic,
+ Reserved014,
+ Devanagari)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ BasicLatin,
+ Latin1Supplement,
+ LatinExtendedA,
+ LatinExtendedB,
+ IPAExtensions,
+ SpacingModifierLetters,
+ CombiningDiacriticalMarks,
+ GreekAndCoptic)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ Hangul,
+ NonPlane0,
+ Reserved058,
+ CJKUnifiedIdeographs,
+ PrivateUseArea,
+ CJKCompatibilityIdeographs,
+ AlphabeticPresentationForms,
+ ArabicPresentationFormsA)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ CJKSymbolsAndPunctuation,
+ Hiragana,
+ Katakana,
+ Bopomofo,
+ HangulCompatibilityJamo,
+ Reserved053,
+ EnclosedCJKLettersAndMonths,
+ CJKCompatibility)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ ControlPictures,
+ OpticalCharacterRecognition,
+ EnclosedAlphanumerics,
+ BoxDrawing,
+ BlockElements,
+ GeometricShapes,
+ MiscellaneousSymbols,
+ Dingbats)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ SuperscriptsAndSubscripts,
+ CurrencySymbols,
+ CombiningDiacriticalMarksForSymbols,
+ LetterlikeSymbols,
+ NumberForms,
+ Arrows,
+ MathematicalOperators,
+ MiscellaneousTechnical)
+
+ //l2 24-31
+ SK_OT_BYTE_BITFIELD(
+ MusicalSymbols,
+ MathematicalAlphanumericSymbols,
+ PrivateUse,
+ VariationSelectors,
+ Tags,
+ Reserved093,
+ Reserved094,
+ Reserved095)
+ //l2 16-23
+ SK_OT_BYTE_BITFIELD(
+ Khmer,
+ Mongolian,
+ Braille,
+ Yi,
+ Tagalog_Hanunoo_Buhid_Tagbanwa,
+ OldItalic,
+ Gothic,
+ Deseret)
+ //l2 8-15
+ SK_OT_BYTE_BITFIELD(
+ Thaana,
+ Sinhala,
+ Myanmar,
+ Ethiopic,
+ Cherokee,
+ UnifiedCanadianSyllabics,
+ Ogham,
+ Runic)
+ //l2 0-7
+ SK_OT_BYTE_BITFIELD(
+ CombiningHalfMarks,
+ CJKCompatibilityForms,
+ SmallFormVariants,
+ ArabicPresentationFormsB,
+ HalfwidthAndFullwidthForms,
+ Specials,
+ Tibetan,
+ Syriac)
+
+ //l3 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved120,
+ Reserved121,
+ Reserved122,
+ Reserved123,
+ Reserved124,
+ Reserved125,
+ Reserved126,
+ Reserved127)
+ //l3 16-23
+ SK_OT_BYTE_BITFIELD(
+ Reserved112,
+ Reserved113,
+ Reserved114,
+ Reserved115,
+ Reserved116,
+ Reserved117,
+ Reserved118,
+ Reserved119)
+ //l3 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved104,
+ Reserved105,
+ Reserved106,
+ Reserved107,
+ Reserved108,
+ Reserved109,
+ Reserved110,
+ Reserved111)
+ //l3 0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved096,
+ Reserved097,
+ Reserved098,
+ Reserved099,
+ Reserved100,
+ Reserved101,
+ Reserved102,
+ Reserved103)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG BasicLatinMask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin1SupplementMask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG LatinExtendedAMask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG LatinExtendedBMask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG IPAExtensionsMask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG SpacingModifierLettersMask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksMask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG GreekAndCopticMask = SkOTSetULONGBit<7>::value;
+ //Reserved
+ static const SK_OT_ULONG CyrillicMask = SkOTSetULONGBit<9>::value;
+ static const SK_OT_ULONG ArmenianMask = SkOTSetULONGBit<10>::value;
+ static const SK_OT_ULONG HebrewMask = SkOTSetULONGBit<11>::value;
+ //Reserved
+ static const SK_OT_ULONG ArabicMask = SkOTSetULONGBit<13>::value;
+ //Reserved
+ static const SK_OT_ULONG DevanagariMask = SkOTSetULONGBit<15>::value;
+ static const SK_OT_ULONG BengaliMask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG GurmukhiMask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG GujaratiMask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG OriyaMask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG TamilMask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG TeluguMask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG KannadaMask = SkOTSetULONGBit<22>::value;
+ static const SK_OT_ULONG MalayalamMask = SkOTSetULONGBit<23>::value;
+ static const SK_OT_ULONG ThaiMask = SkOTSetULONGBit<24>::value;
+ static const SK_OT_ULONG LaoMask = SkOTSetULONGBit<25>::value;
+ static const SK_OT_ULONG GeorgianMask = SkOTSetULONGBit<26>::value;
+ //Reserved
+ static const SK_OT_ULONG HangulJamoMask = SkOTSetULONGBit<28>::value;
+ static const SK_OT_ULONG LatinExtendedAdditionalMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG GreekExtendedMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG GeneralPunctuationMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG SuperscriptsAndSubscriptsMask = SkOTSetULONGBit<32 - 32>::value;
+ static const SK_OT_ULONG CurrencySymbolsMask = SkOTSetULONGBit<33 - 32>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksForSymbolsMask = SkOTSetULONGBit<34 - 32>::value;
+ static const SK_OT_ULONG LetterlikeSymbolsMask = SkOTSetULONGBit<35 - 32>::value;
+ static const SK_OT_ULONG NumberFormsMask = SkOTSetULONGBit<36 - 32>::value;
+ static const SK_OT_ULONG ArrowsMask = SkOTSetULONGBit<37 - 32>::value;
+ static const SK_OT_ULONG MathematicalOperatorsMask = SkOTSetULONGBit<38 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousTechnicalMask = SkOTSetULONGBit<39 - 32>::value;
+ static const SK_OT_ULONG ControlPicturesMask = SkOTSetULONGBit<40 - 32>::value;
+ static const SK_OT_ULONG OpticalCharacterRecognitionMask = SkOTSetULONGBit<41 - 32>::value;
+ static const SK_OT_ULONG EnclosedAlphanumericsMask = SkOTSetULONGBit<42 - 32>::value;
+ static const SK_OT_ULONG BoxDrawingMask = SkOTSetULONGBit<43 - 32>::value;
+ static const SK_OT_ULONG BlockElementsMask = SkOTSetULONGBit<44 - 32>::value;
+ static const SK_OT_ULONG GeometricShapesMask = SkOTSetULONGBit<45 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousSymbolsMask = SkOTSetULONGBit<46 - 32>::value;
+ static const SK_OT_ULONG DingbatsMask = SkOTSetULONGBit<47 - 32>::value;
+ static const SK_OT_ULONG CJKSymbolsAndPunctuationMask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG HiraganaMask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG KatakanaMask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG BopomofoMask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG HangulCompatibilityJamoMask = SkOTSetULONGBit<52 - 32>::value;
+ //Reserved
+ static const SK_OT_ULONG EnclosedCJKLettersAndMonthsMask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityMask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG HangulMask = SkOTSetULONGBit<56 - 32>::value;
+ static const SK_OT_ULONG NonPlane0Mask = SkOTSetULONGBit<57 - 32>::value;
+ //Reserved
+ static const SK_OT_ULONG CJKUnifiedIdeographsMask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG PrivateUseAreaMask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityIdeographsMask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG AlphabeticPresentationFormsMask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsAMask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ struct l2 {
+ static const SK_OT_ULONG CombiningHalfMarksMask = SkOTSetULONGBit<64 - 64>::value;
+ static const SK_OT_ULONG CJKCompatibilityFormsMask = SkOTSetULONGBit<65 - 64>::value;
+ static const SK_OT_ULONG SmallFormVariantsMask = SkOTSetULONGBit<66 - 64>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsBMask = SkOTSetULONGBit<67 - 64>::value;
+ static const SK_OT_ULONG HalfwidthAndFullwidthFormsMask = SkOTSetULONGBit<68 - 64>::value;
+ static const SK_OT_ULONG SpecialsMask = SkOTSetULONGBit<69 - 64>::value;
+ static const SK_OT_ULONG TibetanMask = SkOTSetULONGBit<70 - 64>::value;
+ static const SK_OT_ULONG SyriacMask = SkOTSetULONGBit<71 - 64>::value;
+ static const SK_OT_ULONG ThaanaMask = SkOTSetULONGBit<72 - 64>::value;
+ static const SK_OT_ULONG SinhalaMask = SkOTSetULONGBit<73 - 64>::value;
+ static const SK_OT_ULONG MyanmarMask = SkOTSetULONGBit<74 - 64>::value;
+ static const SK_OT_ULONG EthiopicMask = SkOTSetULONGBit<75 - 64>::value;
+ static const SK_OT_ULONG CherokeeMask = SkOTSetULONGBit<76 - 64>::value;
+ static const SK_OT_ULONG UnifiedCanadianSyllabicsMask = SkOTSetULONGBit<77 - 64>::value;
+ static const SK_OT_ULONG OghamMask = SkOTSetULONGBit<78 - 64>::value;
+ static const SK_OT_ULONG RunicMask = SkOTSetULONGBit<79 - 64>::value;
+ static const SK_OT_ULONG KhmerMask = SkOTSetULONGBit<80 - 64>::value;
+ static const SK_OT_ULONG MongolianMask = SkOTSetULONGBit<81 - 64>::value;
+ static const SK_OT_ULONG BrailleMask = SkOTSetULONGBit<82 - 64>::value;
+ static const SK_OT_ULONG YiMask = SkOTSetULONGBit<83 - 64>::value;
+ static const SK_OT_ULONG Tagalog_Hanunoo_Buhid_TagbanwaMask = SkOTSetULONGBit<84 - 64>::value;
+ static const SK_OT_ULONG OldItalicMask = SkOTSetULONGBit<85 - 64>::value;
+ static const SK_OT_ULONG GothicMask = SkOTSetULONGBit<86 - 64>::value;
+ static const SK_OT_ULONG DeseretMask = SkOTSetULONGBit<87 - 64>::value;
+ static const SK_OT_ULONG MusicalSymbolsMask = SkOTSetULONGBit<88 - 64>::value;
+ static const SK_OT_ULONG MathematicalAlphanumericSymbolsMask = SkOTSetULONGBit<89 - 64>::value;
+ static const SK_OT_ULONG PrivateUseMask = SkOTSetULONGBit<90 - 64>::value;
+ static const SK_OT_ULONG VariationSelectorsMask = SkOTSetULONGBit<91 - 64>::value;
+ static const SK_OT_ULONG TagsMask = SkOTSetULONGBit<92 - 64>::value;
+ };
+ SK_OT_ULONG value[4];
+ } raw;
+ } ulUnicodeRange;
+ SK_OT_CHAR achVendID[4];
+ union Selection {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Italic,
+ Underscore,
+ Negative,
+ Outlined,
+ Strikeout,
+ Bold,
+ Regular,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT ItalicMask = SkOTSetUSHORTBit<0>::value;
+ static const SK_OT_USHORT UnderscoreMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT NegativeMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT OutlinedMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT StrikeoutMask = SkOTSetUSHORTBit<4>::value;
+ static const SK_OT_USHORT BoldMask = SkOTSetUSHORTBit<5>::value;
+ static const SK_OT_USHORT RegularMask = SkOTSetUSHORTBit<6>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsSelection;
+ SK_OT_USHORT usFirstCharIndex;
+ SK_OT_USHORT usLastCharIndex;
+ //version0
+ SK_OT_SHORT sTypoAscender;
+ SK_OT_SHORT sTypoDescender;
+ SK_OT_SHORT sTypoLineGap;
+ SK_OT_USHORT usWinAscent;
+ SK_OT_USHORT usWinDescent;
+ //version1
+ union CodePageRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved24,
+ Reserved25,
+ Reserved26,
+ Reserved27,
+ Reserved28,
+ MacintoshCharacterSet,
+ OEMCharacterSet,
+ SymbolCharacterSet)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Thai_874,
+ JISJapan_932,
+ ChineseSimplified_936,
+ KoreanWansung_949,
+ ChineseTraditional_950,
+ KoreanJohab_1361,
+ Reserved22,
+ Reserved23)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ Vietnamese,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ Latin1_1252,
+ Latin2EasternEurope_1250,
+ Cyrillic_1251,
+ Greek_1253,
+ Turkish_1254,
+ Hebrew_1255,
+ Arabic_1256,
+ WindowsBaltic_1257)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ IBMTurkish_857,
+ IBMCyrillic_855,
+ Latin2_852,
+ MSDOSBaltic_775,
+ Greek_737,
+ Arabic_708,
+ WELatin1_850,
+ US_437)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ IBMGreek_869,
+ MSDOSRussian_866,
+ MSDOSNordic_865,
+ Arabic_864,
+ MSDOSCanadianFrench_863,
+ Hebrew_862,
+ MSDOSIcelandic_861,
+ MSDOSPortuguese_860)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved40,
+ Reserved41,
+ Reserved42,
+ Reserved43,
+ Reserved44,
+ Reserved45,
+ Reserved46,
+ Reserved47)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved32,
+ Reserved33,
+ Reserved34,
+ Reserved35,
+ Reserved36,
+ Reserved37,
+ Reserved38,
+ Reserved39)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG Latin1_1252Mask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin2EasternEurope_1250Mask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG Cyrillic_1251Mask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG Greek_1253Mask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG Turkish_1254Mask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG Hebrew_1255Mask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG Arabic_1256Mask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG WindowsBaltic_1257Mask = SkOTSetULONGBit<7>::value;
+ static const SK_OT_ULONG Vietnamese_1258Mask = SkOTSetULONGBit<8>::value;
+ static const SK_OT_ULONG Thai_874Mask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG JISJapan_932Mask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG ChineseSimplified_936Mask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG KoreanWansung_949Mask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG ChineseTraditional_950Mask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG KoreanJohab_1361Mask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG MacintoshCharacterSetMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG OEMCharacterSetMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG SymbolCharacterSetMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG IBMGreek_869Mask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG MSDOSRussian_866Mask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG MSDOSNordic_865Mask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG Arabic_864Mask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG MSDOSCanadianFrench_863Mask = SkOTSetULONGBit<52 - 32>::value;
+ static const SK_OT_ULONG Hebrew_862Mask = SkOTSetULONGBit<53 - 32>::value;
+ static const SK_OT_ULONG MSDOSIcelandic_861Mask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG MSDOSPortuguese_860Mask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG IBMTurkish_857Mask = SkOTSetULONGBit<56 - 32>::value;
+ static const SK_OT_ULONG IBMCyrillic_855Mask = SkOTSetULONGBit<57 - 32>::value;
+ static const SK_OT_ULONG Latin2_852Mask = SkOTSetULONGBit<58 - 32>::value;
+ static const SK_OT_ULONG MSDOSBaltic_775Mask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG Greek_737Mask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG Arabic_708Mask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG WELatin1_850Mask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG US_437Mask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ SK_OT_ULONG value[2];
+ } raw;
+ } ulCodePageRange;
+ //version2
+ SK_OT_SHORT sxHeight;
+ SK_OT_SHORT sCapHeight;
+ SK_OT_USHORT usDefaultChar;
+ SK_OT_USHORT usBreakChar;
+ SK_OT_USHORT usMaxContext;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableOS2_V3) == 96, "sizeof_SkOTTableOS2_V3_not_96");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V4.h b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V4.h
new file mode 100644
index 0000000000..14bd9a0d2c
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_V4.h
@@ -0,0 +1,582 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_OS_2_V4_DEFINED
+#define SkOTTable_OS_2_V4_DEFINED
+
+#include "src/base/SkEndian.h"
+#include "src/sfnt/SkIBMFamilyClass.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkPanose.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableOS2_V4 {
+ SK_OT_USHORT version;
+ static const SK_OT_USHORT VERSION = SkTEndian_SwapBE16(4);
+
+ SK_OT_SHORT xAvgCharWidth;
+ struct WeightClass {
+ enum Value : SK_OT_USHORT {
+ Thin = SkTEndian_SwapBE16(100),
+ ExtraLight = SkTEndian_SwapBE16(200),
+ Light = SkTEndian_SwapBE16(300),
+ Normal = SkTEndian_SwapBE16(400),
+ Medium = SkTEndian_SwapBE16(500),
+ SemiBold = SkTEndian_SwapBE16(600),
+ Bold = SkTEndian_SwapBE16(700),
+ ExtraBold = SkTEndian_SwapBE16(800),
+ Black = SkTEndian_SwapBE16(900),
+ };
+ SK_OT_USHORT value;
+ } usWeightClass;
+ struct WidthClass {
+ enum Value : SK_OT_USHORT {
+ UltraCondensed = SkTEndian_SwapBE16(1),
+ ExtraCondensed = SkTEndian_SwapBE16(2),
+ Condensed = SkTEndian_SwapBE16(3),
+ SemiCondensed = SkTEndian_SwapBE16(4),
+ Medium = SkTEndian_SwapBE16(5),
+ SemiExpanded = SkTEndian_SwapBE16(6),
+ Expanded = SkTEndian_SwapBE16(7),
+ ExtraExpanded = SkTEndian_SwapBE16(8),
+ UltraExpanded = SkTEndian_SwapBE16(9),
+ } value;
+ } usWidthClass;
+ union Type {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ NoSubsetting,
+ Bitmap,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved00,
+ Restricted,
+ PreviewPrint,
+ Editable,
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT Installable = 0;
+ static const SK_OT_USHORT RestrictedMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT PreviewPrintMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT EditableMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT NoSubsettingMask = SkOTSetUSHORTBit<8>::value;
+ static const SK_OT_USHORT BitmapMask = SkOTSetUSHORTBit<9>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsType;
+ SK_OT_SHORT ySubscriptXSize;
+ SK_OT_SHORT ySubscriptYSize;
+ SK_OT_SHORT ySubscriptXOffset;
+ SK_OT_SHORT ySubscriptYOffset;
+ SK_OT_SHORT ySuperscriptXSize;
+ SK_OT_SHORT ySuperscriptYSize;
+ SK_OT_SHORT ySuperscriptXOffset;
+ SK_OT_SHORT ySuperscriptYOffset;
+ SK_OT_SHORT yStrikeoutSize;
+ SK_OT_SHORT yStrikeoutPosition;
+ SkIBMFamilyClass sFamilyClass;
+ SkPanose panose;
+ union UnicodeRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Thai,
+ Lao,
+ Georgian,
+ Balinese,
+ HangulJamo,
+ LatinExtendedAdditional,
+ GreekExtended,
+ GeneralPunctuation)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Bengali,
+ Gurmukhi,
+ Gujarati,
+ Oriya,
+ Tamil,
+ Telugu,
+ Kannada,
+ Malayalam)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ Coptic,
+ Cyrillic,
+ Armenian,
+ Hebrew,
+ Vai,
+ Arabic,
+ NKo,
+ Devanagari)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ BasicLatin,
+ Latin1Supplement,
+ LatinExtendedA,
+ LatinExtendedB,
+ IPAExtensions,
+ SpacingModifierLetters,
+ CombiningDiacriticalMarks,
+ GreekAndCoptic)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ Hangul,
+ NonPlane0,
+ Phoenician,
+ CJKUnifiedIdeographs,
+ PrivateUseArea,
+ CJKCompatibilityIdeographs,
+ AlphabeticPresentationForms,
+ ArabicPresentationFormsA)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ CJKSymbolsAndPunctuation,
+ Hiragana,
+ Katakana,
+ Bopomofo,
+ HangulCompatibilityJamo,
+ PhagsPa,
+ EnclosedCJKLettersAndMonths,
+ CJKCompatibility)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ ControlPictures,
+ OpticalCharacterRecognition,
+ EnclosedAlphanumerics,
+ BoxDrawing,
+ BlockElements,
+ GeometricShapes,
+ MiscellaneousSymbols,
+ Dingbats)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ SuperscriptsAndSubscripts,
+ CurrencySymbols,
+ CombiningDiacriticalMarksForSymbols,
+ LetterlikeSymbols,
+ NumberForms,
+ Arrows,
+ MathematicalOperators,
+ MiscellaneousTechnical)
+
+ //l2 24-31
+ SK_OT_BYTE_BITFIELD(
+ MusicalSymbols,
+ MathematicalAlphanumericSymbols,
+ PrivateUse,
+ VariationSelectors,
+ Tags,
+ Limbu,
+ TaiLe,
+ NewTaiLue)
+ //l2 16-23
+ SK_OT_BYTE_BITFIELD(
+ Khmer,
+ Mongolian,
+ Braille,
+ Yi,
+ Tagalog_Hanunoo_Buhid_Tagbanwa,
+ OldItalic,
+ Gothic,
+ Deseret)
+ //l2 8-15
+ SK_OT_BYTE_BITFIELD(
+ Thaana,
+ Sinhala,
+ Myanmar,
+ Ethiopic,
+ Cherokee,
+ UnifiedCanadianSyllabics,
+ Ogham,
+ Runic)
+ //l2 0-7
+ SK_OT_BYTE_BITFIELD(
+ CombiningHalfMarks,
+ CJKCompatibilityForms,
+ SmallFormVariants,
+ ArabicPresentationFormsB,
+ HalfwidthAndFullwidthForms,
+ Specials,
+ Tibetan,
+ Syriac)
+
+ //l3 24-31
+ SK_OT_BYTE_BITFIELD(
+ PhaistosDisc,
+ Carian_Lycian_Lydian,
+ DominoTiles_MahjongTiles,
+ Reserved123,
+ Reserved124,
+ Reserved125,
+ Reserved126,
+ Reserved127)
+ //l3 16-23
+ SK_OT_BYTE_BITFIELD(
+ Sundanese,
+ Lepcha,
+ OlChiki,
+ Saurashtra,
+ KayahLi,
+ Rejang,
+ Cham,
+ AncientSymbols)
+ //l3 8-15
+ SK_OT_BYTE_BITFIELD(
+ OldPersian,
+ Shavian,
+ Osmanya,
+ CypriotSyllabary,
+ Kharoshthi,
+ TaiXuanJingSymbols,
+ Cuneiform,
+ CountingRodNumerals)
+ //l3 0-7
+ SK_OT_BYTE_BITFIELD(
+ Buginese,
+ Glagolitic,
+ Tifinagh,
+ YijingHexagramSymbols,
+ SylotiNagri,
+ LinearB_AegeanNumbers,
+ AncientGreekNumbers,
+ Ugaritic)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG BasicLatinMask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin1SupplementMask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG LatinExtendedAMask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG LatinExtendedBMask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG IPAExtensionsMask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG SpacingModifierLettersMask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksMask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG GreekAndCopticMask = SkOTSetULONGBit<7>::value;
+ static const SK_OT_ULONG CopticMask = SkOTSetULONGBit<8>::value;
+ static const SK_OT_ULONG CyrillicMask = SkOTSetULONGBit<9>::value;
+ static const SK_OT_ULONG ArmenianMask = SkOTSetULONGBit<10>::value;
+ static const SK_OT_ULONG HebrewMask = SkOTSetULONGBit<11>::value;
+ static const SK_OT_ULONG VaiMask = SkOTSetULONGBit<12>::value;
+ static const SK_OT_ULONG ArabicMask = SkOTSetULONGBit<13>::value;
+ static const SK_OT_ULONG NKoMask = SkOTSetULONGBit<14>::value;
+ static const SK_OT_ULONG DevanagariMask = SkOTSetULONGBit<15>::value;
+ static const SK_OT_ULONG BengaliMask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG GurmukhiMask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG GujaratiMask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG OriyaMask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG TamilMask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG TeluguMask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG KannadaMask = SkOTSetULONGBit<22>::value;
+ static const SK_OT_ULONG MalayalamMask = SkOTSetULONGBit<23>::value;
+ static const SK_OT_ULONG ThaiMask = SkOTSetULONGBit<24>::value;
+ static const SK_OT_ULONG LaoMask = SkOTSetULONGBit<25>::value;
+ static const SK_OT_ULONG GeorgianMask = SkOTSetULONGBit<26>::value;
+ static const SK_OT_ULONG BalineseMask = SkOTSetULONGBit<27>::value;
+ static const SK_OT_ULONG HangulJamoMask = SkOTSetULONGBit<28>::value;
+ static const SK_OT_ULONG LatinExtendedAdditionalMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG GreekExtendedMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG GeneralPunctuationMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG SuperscriptsAndSubscriptsMask = SkOTSetULONGBit<32 - 32>::value;
+ static const SK_OT_ULONG CurrencySymbolsMask = SkOTSetULONGBit<33 - 32>::value;
+ static const SK_OT_ULONG CombiningDiacriticalMarksForSymbolsMask = SkOTSetULONGBit<34 - 32>::value;
+ static const SK_OT_ULONG LetterlikeSymbolsMask = SkOTSetULONGBit<35 - 32>::value;
+ static const SK_OT_ULONG NumberFormsMask = SkOTSetULONGBit<36 - 32>::value;
+ static const SK_OT_ULONG ArrowsMask = SkOTSetULONGBit<37 - 32>::value;
+ static const SK_OT_ULONG MathematicalOperatorsMask = SkOTSetULONGBit<38 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousTechnicalMask = SkOTSetULONGBit<39 - 32>::value;
+ static const SK_OT_ULONG ControlPicturesMask = SkOTSetULONGBit<40 - 32>::value;
+ static const SK_OT_ULONG OpticalCharacterRecognitionMask = SkOTSetULONGBit<41 - 32>::value;
+ static const SK_OT_ULONG EnclosedAlphanumericsMask = SkOTSetULONGBit<42 - 32>::value;
+ static const SK_OT_ULONG BoxDrawingMask = SkOTSetULONGBit<43 - 32>::value;
+ static const SK_OT_ULONG BlockElementsMask = SkOTSetULONGBit<44 - 32>::value;
+ static const SK_OT_ULONG GeometricShapesMask = SkOTSetULONGBit<45 - 32>::value;
+ static const SK_OT_ULONG MiscellaneousSymbolsMask = SkOTSetULONGBit<46 - 32>::value;
+ static const SK_OT_ULONG DingbatsMask = SkOTSetULONGBit<47 - 32>::value;
+ static const SK_OT_ULONG CJKSymbolsAndPunctuationMask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG HiraganaMask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG KatakanaMask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG BopomofoMask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG HangulCompatibilityJamoMask = SkOTSetULONGBit<52 - 32>::value;
+ static const SK_OT_ULONG PhagsPaMask = SkOTSetULONGBit<53 - 32>::value;
+ static const SK_OT_ULONG EnclosedCJKLettersAndMonthsMask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityMask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG HangulMask = SkOTSetULONGBit<56 - 32>::value;
+ static const SK_OT_ULONG NonPlane0Mask = SkOTSetULONGBit<57 - 32>::value;
+ static const SK_OT_ULONG PhoenicianMask = SkOTSetULONGBit<58 - 32>::value;
+ static const SK_OT_ULONG CJKUnifiedIdeographsMask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG PrivateUseAreaMask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG CJKCompatibilityIdeographsMask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG AlphabeticPresentationFormsMask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsAMask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ struct l2 {
+ static const SK_OT_ULONG CombiningHalfMarksMask = SkOTSetULONGBit<64 - 64>::value;
+ static const SK_OT_ULONG CJKCompatibilityFormsMask = SkOTSetULONGBit<65 - 64>::value;
+ static const SK_OT_ULONG SmallFormVariantsMask = SkOTSetULONGBit<66 - 64>::value;
+ static const SK_OT_ULONG ArabicPresentationFormsBMask = SkOTSetULONGBit<67 - 64>::value;
+ static const SK_OT_ULONG HalfwidthAndFullwidthFormsMask = SkOTSetULONGBit<68 - 64>::value;
+ static const SK_OT_ULONG SpecialsMask = SkOTSetULONGBit<69 - 64>::value;
+ static const SK_OT_ULONG TibetanMask = SkOTSetULONGBit<70 - 64>::value;
+ static const SK_OT_ULONG SyriacMask = SkOTSetULONGBit<71 - 64>::value;
+ static const SK_OT_ULONG ThaanaMask = SkOTSetULONGBit<72 - 64>::value;
+ static const SK_OT_ULONG SinhalaMask = SkOTSetULONGBit<73 - 64>::value;
+ static const SK_OT_ULONG MyanmarMask = SkOTSetULONGBit<74 - 64>::value;
+ static const SK_OT_ULONG EthiopicMask = SkOTSetULONGBit<75 - 64>::value;
+ static const SK_OT_ULONG CherokeeMask = SkOTSetULONGBit<76 - 64>::value;
+ static const SK_OT_ULONG UnifiedCanadianSyllabicsMask = SkOTSetULONGBit<77 - 64>::value;
+ static const SK_OT_ULONG OghamMask = SkOTSetULONGBit<78 - 64>::value;
+ static const SK_OT_ULONG RunicMask = SkOTSetULONGBit<79 - 64>::value;
+ static const SK_OT_ULONG KhmerMask = SkOTSetULONGBit<80 - 64>::value;
+ static const SK_OT_ULONG MongolianMask = SkOTSetULONGBit<81 - 64>::value;
+ static const SK_OT_ULONG BrailleMask = SkOTSetULONGBit<82 - 64>::value;
+ static const SK_OT_ULONG YiMask = SkOTSetULONGBit<83 - 64>::value;
+ static const SK_OT_ULONG Tagalog_Hanunoo_Buhid_TagbanwaMask = SkOTSetULONGBit<84 - 64>::value;
+ static const SK_OT_ULONG OldItalicMask = SkOTSetULONGBit<85 - 64>::value;
+ static const SK_OT_ULONG GothicMask = SkOTSetULONGBit<86 - 64>::value;
+ static const SK_OT_ULONG DeseretMask = SkOTSetULONGBit<87 - 64>::value;
+ static const SK_OT_ULONG MusicalSymbolsMask = SkOTSetULONGBit<88 - 64>::value;
+ static const SK_OT_ULONG MathematicalAlphanumericSymbolsMask = SkOTSetULONGBit<89 - 64>::value;
+ static const SK_OT_ULONG PrivateUseMask = SkOTSetULONGBit<90 - 64>::value;
+ static const SK_OT_ULONG VariationSelectorsMask = SkOTSetULONGBit<91 - 64>::value;
+ static const SK_OT_ULONG TagsMask = SkOTSetULONGBit<92 - 64>::value;
+ static const SK_OT_ULONG LimbuMask = SkOTSetULONGBit<93 - 64>::value;
+ static const SK_OT_ULONG TaiLeMask = SkOTSetULONGBit<94 - 64>::value;
+ static const SK_OT_ULONG NewTaiLueMask = SkOTSetULONGBit<95 - 64>::value;
+ };
+ struct l3 {
+ static const SK_OT_ULONG BugineseMask = SkOTSetULONGBit<96 - 96>::value;
+ static const SK_OT_ULONG GlagoliticMask = SkOTSetULONGBit<97 - 96>::value;
+ static const SK_OT_ULONG TifinaghMask = SkOTSetULONGBit<98 - 96>::value;
+ static const SK_OT_ULONG YijingHexagramSymbolsMask = SkOTSetULONGBit<99 - 96>::value;
+ static const SK_OT_ULONG SylotiNagriMask = SkOTSetULONGBit<100 - 96>::value;
+ static const SK_OT_ULONG LinearB_AegeanNumbersMask = SkOTSetULONGBit<101 - 96>::value;
+ static const SK_OT_ULONG AncientGreekNumbersMask = SkOTSetULONGBit<102 - 96>::value;
+ static const SK_OT_ULONG UgariticMask = SkOTSetULONGBit<103 - 96>::value;
+ static const SK_OT_ULONG OldPersianMask = SkOTSetULONGBit<104 - 96>::value;
+ static const SK_OT_ULONG ShavianMask = SkOTSetULONGBit<105 - 96>::value;
+ static const SK_OT_ULONG OsmanyaMask = SkOTSetULONGBit<106 - 96>::value;
+ static const SK_OT_ULONG CypriotSyllabaryMask = SkOTSetULONGBit<107 - 96>::value;
+ static const SK_OT_ULONG KharoshthiMask = SkOTSetULONGBit<108 - 96>::value;
+ static const SK_OT_ULONG TaiXuanJingSymbolsMask = SkOTSetULONGBit<109 - 96>::value;
+ static const SK_OT_ULONG CuneiformMask = SkOTSetULONGBit<110 - 96>::value;
+ static const SK_OT_ULONG CountingRodNumeralsMask = SkOTSetULONGBit<111 - 96>::value;
+ static const SK_OT_ULONG SundaneseMask = SkOTSetULONGBit<112 - 96>::value;
+ static const SK_OT_ULONG LepchaMask = SkOTSetULONGBit<113 - 96>::value;
+ static const SK_OT_ULONG OlChikiMask = SkOTSetULONGBit<114 - 96>::value;
+ static const SK_OT_ULONG SaurashtraMask = SkOTSetULONGBit<115 - 96>::value;
+ static const SK_OT_ULONG KayahLiMask = SkOTSetULONGBit<116 - 96>::value;
+ static const SK_OT_ULONG RejangMask = SkOTSetULONGBit<117 - 96>::value;
+ static const SK_OT_ULONG ChamMask = SkOTSetULONGBit<118 - 96>::value;
+ static const SK_OT_ULONG AncientSymbolsMask = SkOTSetULONGBit<119 - 96>::value;
+ static const SK_OT_ULONG PhaistosDiscMask = SkOTSetULONGBit<120 - 96>::value;
+ static const SK_OT_ULONG Carian_Lycian_LydianMask = SkOTSetULONGBit<121 - 96>::value;
+ static const SK_OT_ULONG DominoTiles_MahjongTilesMask = SkOTSetULONGBit<122 - 96>::value;
+ };
+ SK_OT_ULONG value[4];
+ } raw;
+ } ulUnicodeRange;
+ SK_OT_CHAR achVendID[4];
+ union Selection {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ WWS,
+ Oblique,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Italic,
+ Underscore,
+ Negative,
+ Outlined,
+ Strikeout,
+ Bold,
+ Regular,
+ UseTypoMetrics)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT ItalicMask = SkOTSetUSHORTBit<0>::value;
+ static const SK_OT_USHORT UnderscoreMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT NegativeMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT OutlinedMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT StrikeoutMask = SkOTSetUSHORTBit<4>::value;
+ static const SK_OT_USHORT BoldMask = SkOTSetUSHORTBit<5>::value;
+ static const SK_OT_USHORT RegularMask = SkOTSetUSHORTBit<6>::value;
+ static const SK_OT_USHORT UseTypoMetricsMask = SkOTSetUSHORTBit<7>::value;
+ static const SK_OT_USHORT WWSMask = SkOTSetUSHORTBit<8>::value;
+ static const SK_OT_USHORT ObliqueMask = SkOTSetUSHORTBit<9>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsSelection;
+ SK_OT_USHORT usFirstCharIndex;
+ SK_OT_USHORT usLastCharIndex;
+ //version0
+ SK_OT_SHORT sTypoAscender;
+ SK_OT_SHORT sTypoDescender;
+ SK_OT_SHORT sTypoLineGap;
+ SK_OT_USHORT usWinAscent;
+ SK_OT_USHORT usWinDescent;
+ //version1
+ union CodePageRange {
+ struct Field {
+ //l0 24-31
+ SK_OT_BYTE_BITFIELD(
+ Reserved24,
+ Reserved25,
+ Reserved26,
+ Reserved27,
+ Reserved28,
+ MacintoshCharacterSet,
+ OEMCharacterSet,
+ SymbolCharacterSet)
+ //l0 16-23
+ SK_OT_BYTE_BITFIELD(
+ Thai_874,
+ JISJapan_932,
+ ChineseSimplified_936,
+ KoreanWansung_949,
+ ChineseTraditional_950,
+ KoreanJohab_1361,
+ Reserved22,
+ Reserved23)
+ //l0 8-15
+ SK_OT_BYTE_BITFIELD(
+ Vietnamese,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //l0 0-7
+ SK_OT_BYTE_BITFIELD(
+ Latin1_1252,
+ Latin2EasternEurope_1250,
+ Cyrillic_1251,
+ Greek_1253,
+ Turkish_1254,
+ Hebrew_1255,
+ Arabic_1256,
+ WindowsBaltic_1257)
+
+ //l1 24-31
+ SK_OT_BYTE_BITFIELD(
+ IBMTurkish_857,
+ IBMCyrillic_855,
+ Latin2_852,
+ MSDOSBaltic_775,
+ Greek_737,
+ Arabic_708,
+ WELatin1_850,
+ US_437)
+ //l1 16-23
+ SK_OT_BYTE_BITFIELD(
+ IBMGreek_869,
+ MSDOSRussian_866,
+ MSDOSNordic_865,
+ Arabic_864,
+ MSDOSCanadianFrench_863,
+ Hebrew_862,
+ MSDOSIcelandic_861,
+ MSDOSPortuguese_860)
+ //l1 8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved40,
+ Reserved41,
+ Reserved42,
+ Reserved43,
+ Reserved44,
+ Reserved45,
+ Reserved46,
+ Reserved47)
+ //l1 0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved32,
+ Reserved33,
+ Reserved34,
+ Reserved35,
+ Reserved36,
+ Reserved37,
+ Reserved38,
+ Reserved39)
+ } field;
+ struct Raw {
+ struct l0 {
+ static const SK_OT_ULONG Latin1_1252Mask = SkOTSetULONGBit<0>::value;
+ static const SK_OT_ULONG Latin2EasternEurope_1250Mask = SkOTSetULONGBit<1>::value;
+ static const SK_OT_ULONG Cyrillic_1251Mask = SkOTSetULONGBit<2>::value;
+ static const SK_OT_ULONG Greek_1253Mask = SkOTSetULONGBit<3>::value;
+ static const SK_OT_ULONG Turkish_1254Mask = SkOTSetULONGBit<4>::value;
+ static const SK_OT_ULONG Hebrew_1255Mask = SkOTSetULONGBit<5>::value;
+ static const SK_OT_ULONG Arabic_1256Mask = SkOTSetULONGBit<6>::value;
+ static const SK_OT_ULONG WindowsBaltic_1257Mask = SkOTSetULONGBit<7>::value;
+ static const SK_OT_ULONG Vietnamese_1258Mask = SkOTSetULONGBit<8>::value;
+ static const SK_OT_ULONG Thai_874Mask = SkOTSetULONGBit<16>::value;
+ static const SK_OT_ULONG JISJapan_932Mask = SkOTSetULONGBit<17>::value;
+ static const SK_OT_ULONG ChineseSimplified_936Mask = SkOTSetULONGBit<18>::value;
+ static const SK_OT_ULONG KoreanWansung_949Mask = SkOTSetULONGBit<19>::value;
+ static const SK_OT_ULONG ChineseTraditional_950Mask = SkOTSetULONGBit<20>::value;
+ static const SK_OT_ULONG KoreanJohab_1361Mask = SkOTSetULONGBit<21>::value;
+ static const SK_OT_ULONG MacintoshCharacterSetMask = SkOTSetULONGBit<29>::value;
+ static const SK_OT_ULONG OEMCharacterSetMask = SkOTSetULONGBit<30>::value;
+ static const SK_OT_ULONG SymbolCharacterSetMask = SkOTSetULONGBit<31>::value;
+ };
+ struct l1 {
+ static const SK_OT_ULONG IBMGreek_869Mask = SkOTSetULONGBit<48 - 32>::value;
+ static const SK_OT_ULONG MSDOSRussian_866Mask = SkOTSetULONGBit<49 - 32>::value;
+ static const SK_OT_ULONG MSDOSNordic_865Mask = SkOTSetULONGBit<50 - 32>::value;
+ static const SK_OT_ULONG Arabic_864Mask = SkOTSetULONGBit<51 - 32>::value;
+ static const SK_OT_ULONG MSDOSCanadianFrench_863Mask = SkOTSetULONGBit<52 - 32>::value;
+ static const SK_OT_ULONG Hebrew_862Mask = SkOTSetULONGBit<53 - 32>::value;
+ static const SK_OT_ULONG MSDOSIcelandic_861Mask = SkOTSetULONGBit<54 - 32>::value;
+ static const SK_OT_ULONG MSDOSPortuguese_860Mask = SkOTSetULONGBit<55 - 32>::value;
+ static const SK_OT_ULONG IBMTurkish_857Mask = SkOTSetULONGBit<56 - 32>::value;
+ static const SK_OT_ULONG IBMCyrillic_855Mask = SkOTSetULONGBit<57 - 32>::value;
+ static const SK_OT_ULONG Latin2_852Mask = SkOTSetULONGBit<58 - 32>::value;
+ static const SK_OT_ULONG MSDOSBaltic_775Mask = SkOTSetULONGBit<59 - 32>::value;
+ static const SK_OT_ULONG Greek_737Mask = SkOTSetULONGBit<60 - 32>::value;
+ static const SK_OT_ULONG Arabic_708Mask = SkOTSetULONGBit<61 - 32>::value;
+ static const SK_OT_ULONG WELatin1_850Mask = SkOTSetULONGBit<62 - 32>::value;
+ static const SK_OT_ULONG US_437Mask = SkOTSetULONGBit<63 - 32>::value;
+ };
+ SK_OT_ULONG value[2];
+ } raw;
+ } ulCodePageRange;
+ //version2
+ SK_OT_SHORT sxHeight;
+ SK_OT_SHORT sCapHeight;
+ SK_OT_USHORT usDefaultChar;
+ SK_OT_USHORT usBreakChar;
+ SK_OT_USHORT usMaxContext;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableOS2_V4) == 96, "sizeof_SkOTTableOS2_V4_not_96");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_VA.h b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_VA.h
new file mode 100644
index 0000000000..a3bad945d8
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_OS_2_VA.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_OS_2_VA_DEFINED
+#define SkOTTable_OS_2_VA_DEFINED
+
+#include "src/base/SkEndian.h"
+#include "src/sfnt/SkIBMFamilyClass.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkPanose.h"
+
+#pragma pack(push, 1)
+
+//Original V0 TT
+struct SkOTTableOS2_VA {
+ SK_OT_USHORT version;
+ //SkOTTableOS2_VA::VERSION and SkOTTableOS2_V0::VERSION are both 0.
+ //The only way to differentiate these two versions is by the size of the table.
+ static const SK_OT_USHORT VERSION = SkTEndian_SwapBE16(0);
+
+ SK_OT_SHORT xAvgCharWidth;
+ struct WeightClass {
+ enum Value : SK_OT_USHORT {
+ UltraLight = SkTEndian_SwapBE16(1),
+ ExtraLight = SkTEndian_SwapBE16(2),
+ Light = SkTEndian_SwapBE16(3),
+ SemiLight = SkTEndian_SwapBE16(4),
+ Medium = SkTEndian_SwapBE16(5),
+ SemiBold = SkTEndian_SwapBE16(6),
+ Bold = SkTEndian_SwapBE16(7),
+ ExtraBold = SkTEndian_SwapBE16(8),
+ UltraBold = SkTEndian_SwapBE16(9),
+ SK_SEQ_END,
+ } value;
+ } usWeightClass;
+ struct WidthClass {
+ enum Value : SK_OT_USHORT {
+ UltraCondensed = SkTEndian_SwapBE16(1),
+ ExtraCondensed = SkTEndian_SwapBE16(2),
+ Condensed = SkTEndian_SwapBE16(3),
+ SemiCondensed = SkTEndian_SwapBE16(4),
+ Medium = SkTEndian_SwapBE16(5),
+ SemiExpanded = SkTEndian_SwapBE16(6),
+ Expanded = SkTEndian_SwapBE16(7),
+ ExtraExpanded = SkTEndian_SwapBE16(8),
+ UltraExpanded = SkTEndian_SwapBE16(9),
+ SK_SEQ_END,
+ } value;
+ } usWidthClass;
+ union Type {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Reserved00,
+ Restricted,
+ PreviewPrint,
+ Editable,
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT Installable = 0;
+ static const SK_OT_USHORT RestrictedMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT PreviewPrintMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT EditableMask = SkOTSetUSHORTBit<3>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsType;
+ SK_OT_SHORT ySubscriptXSize;
+ SK_OT_SHORT ySubscriptYSize;
+ SK_OT_SHORT ySubscriptXOffset;
+ SK_OT_SHORT ySubscriptYOffset;
+ SK_OT_SHORT ySuperscriptXSize;
+ SK_OT_SHORT ySuperscriptYSize;
+ SK_OT_SHORT ySuperscriptXOffset;
+ SK_OT_SHORT ySuperscriptYOffset;
+ SK_OT_SHORT yStrikeoutSize;
+ SK_OT_SHORT yStrikeoutPosition;
+ SkIBMFamilyClass sFamilyClass;
+ SkPanose panose;
+ SK_OT_ULONG ulCharRange[4];
+ SK_OT_CHAR achVendID[4];
+ union Selection {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Italic,
+ Underscore,
+ Negative,
+ Outlined,
+ Strikeout,
+ Bold,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT ItalicMask = SkOTSetUSHORTBit<0>::value;
+ static const SK_OT_USHORT UnderscoreMask = SkOTSetUSHORTBit<1>::value;
+ static const SK_OT_USHORT NegativeMask = SkOTSetUSHORTBit<2>::value;
+ static const SK_OT_USHORT OutlinedMask = SkOTSetUSHORTBit<3>::value;
+ static const SK_OT_USHORT StrikeoutMask = SkOTSetUSHORTBit<4>::value;
+ static const SK_OT_USHORT BoldMask = SkOTSetUSHORTBit<5>::value;
+ SK_OT_USHORT value;
+ } raw;
+ } fsSelection;
+ SK_OT_USHORT usFirstCharIndex;
+ SK_OT_USHORT usLastCharIndex;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableOS2_VA) == 68, "sizeof_SkOTTableOS2_VA_not_68");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_fvar.h b/gfx/skia/skia/src/sfnt/SkOTTable_fvar.h
new file mode 100644
index 0000000000..913cbfb546
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_fvar.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_fvar_DEFINED
+#define SkOTTable_fvar_DEFINED
+
+#include "src/base/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableFontVariations {
+ static const SK_OT_CHAR TAG0 = 'f';
+ static const SK_OT_CHAR TAG1 = 'v';
+ static const SK_OT_CHAR TAG2 = 'a';
+ static const SK_OT_CHAR TAG3 = 'r';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableFontVariations>::value;
+
+ SK_OT_USHORT majorVersion;
+ SK_OT_USHORT minorVersion;
+ SK_OT_USHORT offsetToAxesArray;
+ SK_OT_USHORT reserved;
+ SK_OT_USHORT axisCount;
+ SK_OT_USHORT axisSize; // Must be 0x0014 in v1.0
+ SK_OT_USHORT instanceCount;
+ SK_OT_USHORT instanceSize; // Must be axisCount * sizeof(Fixed) + (4 | 6)
+
+ struct VariationAxisRecord {
+ SK_OT_ULONG axisTag;
+ SK_OT_Fixed minValue;
+ SK_OT_Fixed defaultValue;
+ SK_OT_Fixed maxValue;
+ SK_OT_USHORT flags; // Must be 0
+ SK_OT_USHORT axisNameID;
+ }; // axes[axisCount];
+
+ template <size_t AxisCount> struct InstanceRecord {
+ SK_OT_USHORT subfamilyNameID;
+ SK_OT_USHORT flags; // Must be 0
+ SK_OT_Fixed coordinates[AxisCount];
+ SK_OT_USHORT postScriptNameID;
+ }; // instances[instanceCount];
+};
+
+#pragma pack(pop)
+
+
+#include <stddef.h>
+static_assert(offsetof(SkOTTableFontVariations, instanceSize) == 14, "SkOTTableFontVariations_instanceSize_not_at_14");
+static_assert(sizeof(SkOTTableFontVariations) == 16, "sizeof_SkOTTableFontVariations_not_16");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_gasp.h b/gfx/skia/skia/src/sfnt/SkOTTable_gasp.h
new file mode 100644
index 0000000000..caf21e03c9
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_gasp.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_gasp_DEFINED
+#define SkOTTable_gasp_DEFINED
+
+#include "src/base/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableGridAndScanProcedure {
+ static const SK_OT_CHAR TAG0 = 'g';
+ static const SK_OT_CHAR TAG1 = 'a';
+ static const SK_OT_CHAR TAG2 = 's';
+ static const SK_OT_CHAR TAG3 = 'p';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableGridAndScanProcedure>::value;
+
+ SK_OT_USHORT version;
+ static const SK_OT_USHORT version0 = SkTEndian_SwapBE16(0);
+ static const SK_OT_USHORT version1 = SkTEndian_SwapBE16(1);
+
+ SK_OT_USHORT numRanges;
+
+ struct GaspRange {
+ SK_OT_USHORT maxPPEM;
+ union behavior {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Gridfit,
+ DoGray,
+ SymmetricGridfit, // Version 1
+ SymmetricSmoothing, // Version 1
+ Reserved04,
+ Reserved05,
+ Reserved06,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT GridfitMask = SkTEndian_SwapBE16(1 << 0);
+ static const SK_OT_USHORT DoGrayMask = SkTEndian_SwapBE16(1 << 1);
+ static const SK_OT_USHORT SymmetricGridfitMask = SkTEndian_SwapBE16(1 << 2);
+ static const SK_OT_USHORT SymmetricSmoothingMask = SkTEndian_SwapBE16(1 << 3);
+ SK_OT_USHORT value;
+ } raw;
+ } flags;
+ }; //gaspRange[numRanges]
+};
+
+#pragma pack(pop)
+
+
+#include <stddef.h>
+static_assert(offsetof(SkOTTableGridAndScanProcedure, numRanges) == 2, "SkOTTableGridAndScanProcedure_numRanges_not_at_2");
+static_assert(sizeof(SkOTTableGridAndScanProcedure) == 4, "sizeof_SkOTTableGridAndScanProcedure_not_4");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_glyf.h b/gfx/skia/skia/src/sfnt/SkOTTable_glyf.h
new file mode 100644
index 0000000000..207d59bec0
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_glyf.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_glyf_DEFINED
+#define SkOTTable_glyf_DEFINED
+
+#include "src/base/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkOTTable_head.h"
+#include "src/sfnt/SkOTTable_loca.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableGlyphData;
+
+struct SkOTTableGlyph {
+ static const SK_OT_CHAR TAG0 = 'g';
+ static const SK_OT_CHAR TAG1 = 'l';
+ static const SK_OT_CHAR TAG2 = 'y';
+ static const SK_OT_CHAR TAG3 = 'f';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableGlyph>::value;
+
+ class Iterator {
+ public:
+ Iterator(SkOTTableGlyph& glyf,
+ const SkOTTableIndexToLocation& loca,
+ SkOTTableHead::IndexToLocFormat locaFormat)
+ : fGlyf(glyf)
+ , fLoca(loca)
+ , fLocaFormat(locaFormat)
+ , fCurrentGlyph(0)
+ , fCurrentGlyphOffset(0)
+ {
+ SkASSERT(locaFormat.value == SkOTTableHead::IndexToLocFormat::ShortOffsets ||
+ locaFormat.value == SkOTTableHead::IndexToLocFormat::LongOffsets);
+ }
+
+ void advance(uint16_t num) {
+ fCurrentGlyph += num;
+ if (fLocaFormat.value == SkOTTableHead::IndexToLocFormat::ShortOffsets) {
+ fCurrentGlyphOffset =
+ SkEndian_SwapBE16(fLoca.offsets.shortOffset[fCurrentGlyph]) << 1;
+ } else if (fLocaFormat.value == SkOTTableHead::IndexToLocFormat::LongOffsets) {
+ fCurrentGlyphOffset = SkEndian_SwapBE32(fLoca.offsets.longOffset[fCurrentGlyph]);
+ }
+ }
+ SkOTTableGlyphData* next() {
+ uint32_t previousGlyphOffset = fCurrentGlyphOffset;
+ advance(1);
+ if (previousGlyphOffset == fCurrentGlyphOffset) {
+ return nullptr;
+ } else {
+ return reinterpret_cast<SkOTTableGlyphData*>(
+ reinterpret_cast<SK_OT_BYTE*>(&fGlyf) + previousGlyphOffset
+ );
+ }
+ }
+ private:
+ SkOTTableGlyph& fGlyf;
+ const SkOTTableIndexToLocation& fLoca;
+ SkOTTableHead::IndexToLocFormat fLocaFormat;
+ uint32_t fCurrentGlyph;
+ uint32_t fCurrentGlyphOffset;
+ };
+};
+
+struct SkOTTableGlyphData {
+ SK_OT_SHORT numberOfContours; //== -1 Composite, > 0 Simple
+ SK_OT_FWORD xMin;
+ SK_OT_FWORD yMin;
+ SK_OT_FWORD xMax;
+ SK_OT_FWORD yMax;
+
+ struct Simple {
+ SK_OT_USHORT endPtsOfContours[1/*numberOfContours*/];
+
+ struct Instructions {
+ SK_OT_USHORT length;
+ SK_OT_BYTE data[1/*length*/];
+ };
+
+ union Flags {
+ struct Field {
+ SK_OT_BYTE_BITFIELD(
+ OnCurve,
+ xShortVector,
+ yShortVector,
+ Repeat,
+ xIsSame_xShortVectorPositive,
+ yIsSame_yShortVectorPositive,
+ Reserved6,
+ Reserved7)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT OnCurveMask = SkTEndian_SwapBE16(1 << 0);
+ static const SK_OT_USHORT xShortVectorMask = SkTEndian_SwapBE16(1 << 1);
+ static const SK_OT_USHORT yShortVectorMask = SkTEndian_SwapBE16(1 << 2);
+ static const SK_OT_USHORT RepeatMask = SkTEndian_SwapBE16(1 << 3);
+ static const SK_OT_USHORT xIsSame_xShortVectorPositiveMask = SkTEndian_SwapBE16(1 << 4);
+ static const SK_OT_USHORT yIsSame_yShortVectorPositiveMask = SkTEndian_SwapBE16(1 << 5);
+ SK_OT_BYTE value;
+ } raw;
+ };
+
+ //xCoordinates
+ //yCoordinates
+ };
+
+ struct Composite {
+ struct Component {
+ union Flags {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ WE_HAVE_INSTRUCTIONS,
+ USE_MY_METRICS,
+ OVERLAP_COMPOUND,
+ SCALED_COMPONENT_OFFSET,
+ UNSCALED_COMPONENT_OFFSET,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ ARG_1_AND_2_ARE_WORDS,
+ ARGS_ARE_XY_VALUES,
+ ROUND_XY_TO_GRID,
+ WE_HAVE_A_SCALE,
+ RESERVED,
+ MORE_COMPONENTS,
+ WE_HAVE_AN_X_AND_Y_SCALE,
+ WE_HAVE_A_TWO_BY_TWO)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT ARG_1_AND_2_ARE_WORDS_Mask = SkTEndian_SwapBE16(1 << 0);
+ static const SK_OT_USHORT ARGS_ARE_XY_VALUES_Mask = SkTEndian_SwapBE16(1 << 1);
+ static const SK_OT_USHORT ROUND_XY_TO_GRID_Mask = SkTEndian_SwapBE16(1 << 2);
+ static const SK_OT_USHORT WE_HAVE_A_SCALE_Mask = SkTEndian_SwapBE16(1 << 3);
+ static const SK_OT_USHORT RESERVED_Mask = SkTEndian_SwapBE16(1 << 4);
+ static const SK_OT_USHORT MORE_COMPONENTS_Mask = SkTEndian_SwapBE16(1 << 5);
+ static const SK_OT_USHORT WE_HAVE_AN_X_AND_Y_SCALE_Mask = SkTEndian_SwapBE16(1 << 6);
+ static const SK_OT_USHORT WE_HAVE_A_TWO_BY_TWO_Mask = SkTEndian_SwapBE16(1 << 7);
+
+ static const SK_OT_USHORT WE_HAVE_INSTRUCTIONS_Mask = SkTEndian_SwapBE16(1 << 8);
+ static const SK_OT_USHORT USE_MY_METRICS_Mask = SkTEndian_SwapBE16(1 << 9);
+ static const SK_OT_USHORT OVERLAP_COMPOUND_Mask = SkTEndian_SwapBE16(1 << 10);
+ static const SK_OT_USHORT SCALED_COMPONENT_OFFSET_Mask = SkTEndian_SwapBE16(1 << 11);
+ static const SK_OT_USHORT UNSCALED_COMPONENT_OFFSET_mask = SkTEndian_SwapBE16(1 << 12);
+ //Reserved
+ //Reserved
+ //Reserved
+ SK_OT_USHORT value;
+ } raw;
+ } flags;
+ SK_OT_USHORT glyphIndex;
+ union Transform {
+ union Matrix {
+ /** !WE_HAVE_A_SCALE & !WE_HAVE_AN_X_AND_Y_SCALE & !WE_HAVE_A_TWO_BY_TWO */
+ struct None { } none;
+ /** WE_HAVE_A_SCALE */
+ struct Scale {
+ SK_OT_F2DOT14 a_d;
+ } scale;
+ /** WE_HAVE_AN_X_AND_Y_SCALE */
+ struct ScaleXY {
+ SK_OT_F2DOT14 a;
+ SK_OT_F2DOT14 d;
+ } scaleXY;
+ /** WE_HAVE_A_TWO_BY_TWO */
+ struct TwoByTwo {
+ SK_OT_F2DOT14 a;
+ SK_OT_F2DOT14 b;
+ SK_OT_F2DOT14 c;
+ SK_OT_F2DOT14 d;
+ } twoByTwo;
+ };
+ /** ARG_1_AND_2_ARE_WORDS & ARGS_ARE_XY_VALUES */
+ struct WordValue {
+ SK_OT_FWORD e;
+ SK_OT_FWORD f;
+ SkOTTableGlyphData::Composite::Component::Transform::Matrix matrix;
+ } wordValue;
+ /** !ARG_1_AND_2_ARE_WORDS & ARGS_ARE_XY_VALUES */
+ struct ByteValue {
+ SK_OT_CHAR e;
+ SK_OT_CHAR f;
+ SkOTTableGlyphData::Composite::Component::Transform::Matrix matrix;
+ } byteValue;
+ /** ARG_1_AND_2_ARE_WORDS & !ARGS_ARE_XY_VALUES */
+ struct WordIndex {
+ SK_OT_USHORT compoundPointIndex;
+ SK_OT_USHORT componentPointIndex;
+ SkOTTableGlyphData::Composite::Component::Transform::Matrix matrix;
+ } wordIndex;
+ /** !ARG_1_AND_2_ARE_WORDS & !ARGS_ARE_XY_VALUES */
+ struct ByteIndex {
+ SK_OT_BYTE compoundPointIndex;
+ SK_OT_BYTE componentPointIndex;
+ SkOTTableGlyphData::Composite::Component::Transform::Matrix matrix;
+ } byteIndex;
+ } transform;
+ } component;//[] last element does not set MORE_COMPONENTS
+
+ /** Comes after the last Component if the last component has WE_HAVE_INSTR. */
+ struct Instructions {
+ SK_OT_USHORT length;
+ SK_OT_BYTE data[1/*length*/];
+ };
+ };
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_head.h b/gfx/skia/skia/src/sfnt/SkOTTable_head.h
new file mode 100644
index 0000000000..d3b4ac8f45
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_head.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_head_DEFINED
+#define SkOTTable_head_DEFINED
+
+#include "src/base/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableHead {
+ static const SK_OT_CHAR TAG0 = 'h';
+ static const SK_OT_CHAR TAG1 = 'e';
+ static const SK_OT_CHAR TAG2 = 'a';
+ static const SK_OT_CHAR TAG3 = 'd';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableHead>::value;
+
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed version1 = SkTEndian_SwapBE32(0x00010000);
+ SK_OT_Fixed fontRevision;
+ static const uint32_t fontChecksum = 0xB1B0AFBA; //checksum of all TT fonts
+ SK_OT_ULONG checksumAdjustment;
+ SK_OT_ULONG magicNumber;
+ static const SK_OT_ULONG magicNumberConst = SkTEndian_SwapBE32(0x5F0F3CF5);
+ union Flags {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ GXMetamorphosis_Apple,
+ HasStrongRTL_Apple,
+ HasIndicStyleRearrangement,
+ AgfaMicroTypeExpressProcessed,
+ FontConverted,
+ DesignedForClearType,
+ LastResort,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ BaselineAtY0,
+ LeftSidebearingAtX0,
+ InstructionsDependOnPointSize,
+ IntegerScaling,
+ InstructionsAlterAdvanceWidth,
+ VerticalCenteredGlyphs_Apple,
+ Reserved06,
+ RequiresLayout_Apple)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT BaselineAtY0Mask = SkTEndian_SwapBE16(1 << 0);
+ static const SK_OT_USHORT LeftSidebearingAtX0Mask = SkTEndian_SwapBE16(1 << 1);
+ static const SK_OT_USHORT InstructionsDependOnPointSizeMask = SkTEndian_SwapBE16(1 << 2);
+ static const SK_OT_USHORT IntegerScalingMask = SkTEndian_SwapBE16(1 << 3);
+ static const SK_OT_USHORT InstructionsAlterAdvanceWidthMask = SkTEndian_SwapBE16(1 << 4);
+ static const SK_OT_USHORT VerticalCenteredGlyphs_AppleMask = SkTEndian_SwapBE16(1 << 5);
+ //Reserved
+ static const SK_OT_USHORT RequiresLayout_AppleMask = SkTEndian_SwapBE16(1 << 7);
+
+ static const SK_OT_USHORT GXMetamorphosis_AppleMask = SkTEndian_SwapBE16(1 << 8);
+ static const SK_OT_USHORT HasStrongRTL_AppleMask = SkTEndian_SwapBE16(1 << 9);
+ static const SK_OT_USHORT HasIndicStyleRearrangementMask = SkTEndian_SwapBE16(1 << 10);
+ static const SK_OT_USHORT AgfaMicroTypeExpressProcessedMask = SkTEndian_SwapBE16(1 << 11);
+ static const SK_OT_USHORT FontConvertedMask = SkTEndian_SwapBE16(1 << 12);
+ static const SK_OT_USHORT DesignedForClearTypeMask = SkTEndian_SwapBE16(1 << 13);
+ static const SK_OT_USHORT LastResortMask = SkTEndian_SwapBE16(1 << 14);
+ //Reserved
+ SK_OT_USHORT value;
+ } raw;
+ } flags;
+ SK_OT_USHORT unitsPerEm;
+ SK_OT_LONGDATETIME created;
+ SK_OT_LONGDATETIME modified;
+ SK_OT_SHORT xMin;
+ SK_OT_SHORT yMin;
+ SK_OT_SHORT xMax;
+ SK_OT_SHORT yMax;
+ union MacStyle {
+ struct Field {
+ //8-15
+ SK_OT_BYTE_BITFIELD(
+ Reserved08,
+ Reserved09,
+ Reserved10,
+ Reserved11,
+ Reserved12,
+ Reserved13,
+ Reserved14,
+ Reserved15)
+ //0-7
+ SK_OT_BYTE_BITFIELD(
+ Bold,
+ Italic,
+ Underline,
+ Outline,
+ Shadow,
+ Condensed,
+ Extended,
+ Reserved07)
+ } field;
+ struct Raw {
+ static const SK_OT_USHORT BoldMask = SkTEndian_SwapBE16(1);
+ static const SK_OT_USHORT ItalicMask = SkTEndian_SwapBE16(1 << 1);
+ static const SK_OT_USHORT UnderlineMask = SkTEndian_SwapBE16(1 << 2);
+ static const SK_OT_USHORT OutlineMask = SkTEndian_SwapBE16(1 << 3);
+ static const SK_OT_USHORT ShadowMask = SkTEndian_SwapBE16(1 << 4);
+ static const SK_OT_USHORT CondensedMask = SkTEndian_SwapBE16(1 << 5);
+ static const SK_OT_USHORT ExtendedMask = SkTEndian_SwapBE16(1 << 6);
+
+ SK_OT_USHORT value;
+ } raw;
+ } macStyle;
+ SK_OT_USHORT lowestRecPPEM;
+ struct FontDirectionHint {
+ enum Value : SK_OT_SHORT {
+ FullyMixedDirectionalGlyphs = SkTEndian_SwapBE16(0),
+ OnlyStronglyLTR = SkTEndian_SwapBE16(1),
+ StronglyLTR = SkTEndian_SwapBE16(2),
+ OnlyStronglyRTL = static_cast<SK_OT_SHORT>(SkTEndian_SwapBE16((uint16_t)-1)),
+ StronglyRTL = static_cast<SK_OT_SHORT>(SkTEndian_SwapBE16((uint16_t)-2)),
+ } value;
+ } fontDirectionHint;
+ struct IndexToLocFormat {
+ enum Value : SK_OT_SHORT {
+ ShortOffsets = SkTEndian_SwapBE16(0),
+ LongOffsets = SkTEndian_SwapBE16(1),
+ } value;
+ } indexToLocFormat;
+ struct GlyphDataFormat {
+ enum Value : SK_OT_SHORT {
+ CurrentFormat = SkTEndian_SwapBE16(0),
+ } value;
+ } glyphDataFormat;
+};
+
+#pragma pack(pop)
+
+
+#include <stddef.h>
+static_assert(offsetof(SkOTTableHead, glyphDataFormat) == 52, "SkOTTableHead_glyphDataFormat_not_at_52");
+static_assert(sizeof(SkOTTableHead) == 54, "sizeof_SkOTTableHead_not_54");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_hhea.h b/gfx/skia/skia/src/sfnt/SkOTTable_hhea.h
new file mode 100644
index 0000000000..83ff5933d8
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_hhea.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_hhea_DEFINED
+#define SkOTTable_hhea_DEFINED
+
+#include "src/base/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableHorizontalHeader {
+ static const SK_OT_CHAR TAG0 = 'h';
+ static const SK_OT_CHAR TAG1 = 'h';
+ static const SK_OT_CHAR TAG2 = 'e';
+ static const SK_OT_CHAR TAG3 = 'a';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableHorizontalHeader>::value;
+
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed version1 = SkTEndian_SwapBE32(0x00010000);
+ SK_OT_FWORD Ascender;
+ SK_OT_FWORD Descender;
+ SK_OT_FWORD LineGap;
+ SK_OT_UFWORD advanceWidthMax;
+ SK_OT_FWORD minLeftSideBearing;
+ SK_OT_FWORD minRightSideBearing;
+ SK_OT_FWORD xMaxExtent;
+ SK_OT_SHORT caretSlopeRise;
+ SK_OT_SHORT caretSlopeRun;
+ SK_OT_SHORT caretOffset;
+ SK_OT_SHORT Reserved24;
+ SK_OT_SHORT Reserved26;
+ SK_OT_SHORT Reserved28;
+ SK_OT_SHORT Reserved30;
+ struct MetricDataFormat {
+ enum Value : SK_OT_SHORT {
+ CurrentFormat = SkTEndian_SwapBE16(0),
+ } value;
+ } metricDataFormat;
+ SK_OT_USHORT numberOfHMetrics;
+};
+
+#pragma pack(pop)
+
+
+#include <stddef.h>
+static_assert(offsetof(SkOTTableHorizontalHeader, numberOfHMetrics) == 34, "SkOTTableHorizontalHeader_numberOfHMetrics_not_at_34");
+static_assert(sizeof(SkOTTableHorizontalHeader) == 36, "sizeof_SkOTTableHorizontalHeader_not_36");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_hmtx.h b/gfx/skia/skia/src/sfnt/SkOTTable_hmtx.h
new file mode 100644
index 0000000000..45aaa8870c
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_hmtx.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2022 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_hmtx_DEFINED
+#define SkOTTable_hmtx_DEFINED
+
+#include "src/base/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableHorizontalMetrics {
+ static const SK_OT_CHAR TAG0 = 'h';
+ static const SK_OT_CHAR TAG1 = 'm';
+ static const SK_OT_CHAR TAG2 = 't';
+ static const SK_OT_CHAR TAG3 = 'x';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableHorizontalMetrics>::value;
+
+ struct FullMetric {
+ SK_OT_USHORT advanceWidth;
+ SK_OT_SHORT lsb;
+ } longHorMetric[1/*hhea::numberOfHMetrics*/];
+ struct ShortMetric {
+ SK_OT_SHORT lsb;
+ }; /* maxp::numGlyphs - hhea::numberOfHMetrics */
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_loca.h b/gfx/skia/skia/src/sfnt/SkOTTable_loca.h
new file mode 100644
index 0000000000..4ce345f7eb
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_loca.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_loca_DEFINED
+#define SkOTTable_loca_DEFINED
+
+#include "src/base/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableIndexToLocation {
+ static const SK_OT_CHAR TAG0 = 'l';
+ static const SK_OT_CHAR TAG1 = 'o';
+ static const SK_OT_CHAR TAG2 = 'c';
+ static const SK_OT_CHAR TAG3 = 'a';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableIndexToLocation>::value;
+
+ union Offsets {
+ SK_OT_USHORT shortOffset[1];
+ SK_OT_ULONG longOffset[1];
+ } offsets;
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_maxp.h b/gfx/skia/skia/src/sfnt/SkOTTable_maxp.h
new file mode 100644
index 0000000000..aaae28a9e3
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_maxp.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_maxp_DEFINED
+#define SkOTTable_maxp_DEFINED
+
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkOTTable_maxp_CFF.h"
+#include "src/sfnt/SkOTTable_maxp_TT.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableMaximumProfile {
+ static const SK_OT_CHAR TAG0 = 'm';
+ static const SK_OT_CHAR TAG1 = 'a';
+ static const SK_OT_CHAR TAG2 = 'x';
+ static const SK_OT_CHAR TAG3 = 'p';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableMaximumProfile>::value;
+
+ union Version {
+ SK_OT_Fixed version;
+
+ struct CFF : SkOTTableMaximumProfile_CFF { } cff;
+ struct TT : SkOTTableMaximumProfile_TT { } tt;
+ } version;
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_maxp_CFF.h b/gfx/skia/skia/src/sfnt/SkOTTable_maxp_CFF.h
new file mode 100644
index 0000000000..5f6608d692
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_maxp_CFF.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_maxp_CFF_DEFINED
+#define SkOTTable_maxp_CFF_DEFINED
+
+#include "src/base/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableMaximumProfile_CFF {
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed VERSION = SkTEndian_SwapBE32(0x00005000);
+
+ SK_OT_USHORT numGlyphs;
+};
+
+#pragma pack(pop)
+
+
+#include <stddef.h>
+static_assert(offsetof(SkOTTableMaximumProfile_CFF, numGlyphs) == 4, "SkOTTableMaximumProfile_CFF_numGlyphs_not_at_4");
+static_assert(sizeof(SkOTTableMaximumProfile_CFF) == 6, "sizeof_SkOTTableMaximumProfile_CFF_not_6");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_maxp_TT.h b/gfx/skia/skia/src/sfnt/SkOTTable_maxp_TT.h
new file mode 100644
index 0000000000..fb8fb3692a
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_maxp_TT.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_maxp_TT_DEFINED
+#define SkOTTable_maxp_TT_DEFINED
+
+#include "src/base/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableMaximumProfile_TT {
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed VERSION = SkTEndian_SwapBE32(0x00010000);
+
+ SK_OT_USHORT numGlyphs;
+ SK_OT_USHORT maxPoints;
+ SK_OT_USHORT maxContours;
+ SK_OT_USHORT maxCompositePoints;
+ SK_OT_USHORT maxCompositeContours;
+ struct MaxZones {
+ enum Value : SK_OT_USHORT {
+ DoesNotUseTwilightZone = SkTEndian_SwapBE16(1),
+ UsesTwilightZone = SkTEndian_SwapBE16(2),
+ } value;
+ } maxZones;
+ SK_OT_USHORT maxTwilightPoints;
+ SK_OT_USHORT maxStorage;
+ SK_OT_USHORT maxFunctionDefs;
+ SK_OT_USHORT maxInstructionDefs;
+ SK_OT_USHORT maxStackElements;
+ SK_OT_USHORT maxSizeOfInstructions;
+ SK_OT_USHORT maxComponentElements;
+ SK_OT_USHORT maxComponentDepth;
+};
+
+#pragma pack(pop)
+
+
+#include <stddef.h>
+static_assert(offsetof(SkOTTableMaximumProfile_TT, maxComponentDepth) == 30, "SkOTTableMaximumProfile_TT_maxComponentDepth_not_at_30");
+static_assert(sizeof(SkOTTableMaximumProfile_TT) == 32, "sizeof_SkOTTableMaximumProfile_TT_not_32");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_name.cpp b/gfx/skia/skia/src/sfnt/SkOTTable_name.cpp
new file mode 100644
index 0000000000..b422f74280
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_name.cpp
@@ -0,0 +1,586 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sfnt/SkOTTable_name.h"
+
+#include "src/base/SkEndian.h"
+#include "src/base/SkTSearch.h"
+#include "src/base/SkUTF.h"
+#include "src/core/SkStringUtils.h"
+
+static SkUnichar next_unichar_UTF16BE(const uint8_t** srcPtr, size_t* length) {
+ SkASSERT(srcPtr && *srcPtr && length);
+ SkASSERT(*length > 0);
+
+ uint16_t leading;
+ if (*length < sizeof(leading)) {
+ *length = 0;
+ return 0xFFFD;
+ }
+ memcpy(&leading, *srcPtr, sizeof(leading));
+ *srcPtr += sizeof(leading);
+ *length -= sizeof(leading);
+ SkUnichar c = SkEndian_SwapBE16(leading);
+
+ if (SkUTF::IsTrailingSurrogateUTF16(c)) {
+ return 0xFFFD;
+ }
+ if (SkUTF::IsLeadingSurrogateUTF16(c)) {
+ uint16_t trailing;
+ if (*length < sizeof(trailing)) {
+ *length = 0;
+ return 0xFFFD;
+ }
+ memcpy(&trailing, *srcPtr, sizeof(trailing));
+ SkUnichar c2 = SkEndian_SwapBE16(trailing);
+ if (!SkUTF::IsTrailingSurrogateUTF16(c2)) {
+ return 0xFFFD;
+ }
+ *srcPtr += sizeof(trailing);
+ *length -= sizeof(trailing);
+
+ c = (c << 10) + c2 + (0x10000 - (0xD800 << 10) - 0xDC00);
+ }
+ return c;
+}
+
+static void SkString_from_UTF16BE(const uint8_t* utf16be, size_t length, SkString& utf8) {
+ // Note that utf16be may not be 2-byte aligned.
+ SkASSERT(utf16be != nullptr);
+
+ utf8.reset();
+ while (length) {
+ utf8.appendUnichar(next_unichar_UTF16BE(&utf16be, &length));
+ }
+}
+
+/** UnicodeFromMacRoman[macRomanPoint - 0x80] -> unicodeCodePoint.
+ * Derived from http://www.unicode.org/Public/MAPPINGS/VENDORS/APPLE/ROMAN.TXT .
+ * In MacRoman the first 128 code points match ASCII code points.
+ * This maps the second 128 MacRoman code points to unicode code points.
+ */
+static const uint16_t UnicodeFromMacRoman[0x80] = {
+ 0x00C4, 0x00C5, 0x00C7, 0x00C9, 0x00D1, 0x00D6, 0x00DC, 0x00E1,
+ 0x00E0, 0x00E2, 0x00E4, 0x00E3, 0x00E5, 0x00E7, 0x00E9, 0x00E8,
+ 0x00EA, 0x00EB, 0x00ED, 0x00EC, 0x00EE, 0x00EF, 0x00F1, 0x00F3,
+ 0x00F2, 0x00F4, 0x00F6, 0x00F5, 0x00FA, 0x00F9, 0x00FB, 0x00FC,
+ 0x2020, 0x00B0, 0x00A2, 0x00A3, 0x00A7, 0x2022, 0x00B6, 0x00DF,
+ 0x00AE, 0x00A9, 0x2122, 0x00B4, 0x00A8, 0x2260, 0x00C6, 0x00D8,
+ 0x221E, 0x00B1, 0x2264, 0x2265, 0x00A5, 0x00B5, 0x2202, 0x2211,
+ 0x220F, 0x03C0, 0x222B, 0x00AA, 0x00BA, 0x03A9, 0x00E6, 0x00F8,
+ 0x00BF, 0x00A1, 0x00AC, 0x221A, 0x0192, 0x2248, 0x2206, 0x00AB,
+ 0x00BB, 0x2026, 0x00A0, 0x00C0, 0x00C3, 0x00D5, 0x0152, 0x0153,
+ 0x2013, 0x2014, 0x201C, 0x201D, 0x2018, 0x2019, 0x00F7, 0x25CA,
+ 0x00FF, 0x0178, 0x2044, 0x20AC, 0x2039, 0x203A, 0xFB01, 0xFB02,
+ 0x2021, 0x00B7, 0x201A, 0x201E, 0x2030, 0x00C2, 0x00CA, 0x00C1,
+ 0x00CB, 0x00C8, 0x00CD, 0x00CE, 0x00CF, 0x00CC, 0x00D3, 0x00D4,
+ 0xF8FF, 0x00D2, 0x00DA, 0x00DB, 0x00D9, 0x0131, 0x02C6, 0x02DC,
+ 0x00AF, 0x02D8, 0x02D9, 0x02DA, 0x00B8, 0x02DD, 0x02DB, 0x02C7,
+};
+
+static void SkStringFromMacRoman(const uint8_t* macRoman, size_t length, SkString& utf8) {
+ utf8.reset();
+ for (size_t i = 0; i < length; ++i) {
+ utf8.appendUnichar(macRoman[i] < 0x80 ? macRoman[i]
+ : UnicodeFromMacRoman[macRoman[i] - 0x80]);
+ }
+}
+
+static const struct BCP47FromLanguageId {
+ uint16_t languageID;
+ const char* bcp47;
+}
+/** The Mac and Windows values do not conflict, so this is currently one single table. */
+BCP47FromLanguageID[] = {
+ /** A mapping from Mac Language Designators to BCP 47 codes.
+ * The following list was constructed more or less manually.
+ * Apple now uses BCP 47 (post OSX10.4), so there will be no new entries.
+ */
+ {0, "en"}, //English
+ {1, "fr"}, //French
+ {2, "de"}, //German
+ {3, "it"}, //Italian
+ {4, "nl"}, //Dutch
+ {5, "sv"}, //Swedish
+ {6, "es"}, //Spanish
+ {7, "da"}, //Danish
+ {8, "pt"}, //Portuguese
+ {9, "nb"}, //Norwegian
+ {10, "he"}, //Hebrew
+ {11, "ja"}, //Japanese
+ {12, "ar"}, //Arabic
+ {13, "fi"}, //Finnish
+ {14, "el"}, //Greek
+ {15, "is"}, //Icelandic
+ {16, "mt"}, //Maltese
+ {17, "tr"}, //Turkish
+ {18, "hr"}, //Croatian
+ {19, "zh-Hant"}, //Chinese (Traditional)
+ {20, "ur"}, //Urdu
+ {21, "hi"}, //Hindi
+ {22, "th"}, //Thai
+ {23, "ko"}, //Korean
+ {24, "lt"}, //Lithuanian
+ {25, "pl"}, //Polish
+ {26, "hu"}, //Hungarian
+ {27, "et"}, //Estonian
+ {28, "lv"}, //Latvian
+ {29, "se"}, //Sami
+ {30, "fo"}, //Faroese
+ {31, "fa"}, //Farsi (Persian)
+ {32, "ru"}, //Russian
+ {33, "zh-Hans"}, //Chinese (Simplified)
+ {34, "nl"}, //Dutch
+ {35, "ga"}, //Irish(Gaelic)
+ {36, "sq"}, //Albanian
+ {37, "ro"}, //Romanian
+ {38, "cs"}, //Czech
+ {39, "sk"}, //Slovak
+ {40, "sl"}, //Slovenian
+ {41, "yi"}, //Yiddish
+ {42, "sr"}, //Serbian
+ {43, "mk"}, //Macedonian
+ {44, "bg"}, //Bulgarian
+ {45, "uk"}, //Ukrainian
+ {46, "be"}, //Byelorussian
+ {47, "uz"}, //Uzbek
+ {48, "kk"}, //Kazakh
+ {49, "az-Cyrl"}, //Azerbaijani (Cyrillic)
+ {50, "az-Arab"}, //Azerbaijani (Arabic)
+ {51, "hy"}, //Armenian
+ {52, "ka"}, //Georgian
+ {53, "mo"}, //Moldavian
+ {54, "ky"}, //Kirghiz
+ {55, "tg"}, //Tajiki
+ {56, "tk"}, //Turkmen
+ {57, "mn-Mong"}, //Mongolian (Traditional)
+ {58, "mn-Cyrl"}, //Mongolian (Cyrillic)
+ {59, "ps"}, //Pashto
+ {60, "ku"}, //Kurdish
+ {61, "ks"}, //Kashmiri
+ {62, "sd"}, //Sindhi
+ {63, "bo"}, //Tibetan
+ {64, "ne"}, //Nepali
+ {65, "sa"}, //Sanskrit
+ {66, "mr"}, //Marathi
+ {67, "bn"}, //Bengali
+ {68, "as"}, //Assamese
+ {69, "gu"}, //Gujarati
+ {70, "pa"}, //Punjabi
+ {71, "or"}, //Oriya
+ {72, "ml"}, //Malayalam
+ {73, "kn"}, //Kannada
+ {74, "ta"}, //Tamil
+ {75, "te"}, //Telugu
+ {76, "si"}, //Sinhalese
+ {77, "my"}, //Burmese
+ {78, "km"}, //Khmer
+ {79, "lo"}, //Lao
+ {80, "vi"}, //Vietnamese
+ {81, "id"}, //Indonesian
+ {82, "tl"}, //Tagalog
+ {83, "ms-Latn"}, //Malay (Roman)
+ {84, "ms-Arab"}, //Malay (Arabic)
+ {85, "am"}, //Amharic
+ {86, "ti"}, //Tigrinya
+ {87, "om"}, //Oromo
+ {88, "so"}, //Somali
+ {89, "sw"}, //Swahili
+ {90, "rw"}, //Kinyarwanda/Ruanda
+ {91, "rn"}, //Rundi
+ {92, "ny"}, //Nyanja/Chewa
+ {93, "mg"}, //Malagasy
+ {94, "eo"}, //Esperanto
+ {128, "cy"}, //Welsh
+ {129, "eu"}, //Basque
+ {130, "ca"}, //Catalan
+ {131, "la"}, //Latin
+ {132, "qu"}, //Quechua
+ {133, "gn"}, //Guarani
+ {134, "ay"}, //Aymara
+ {135, "tt"}, //Tatar
+ {136, "ug"}, //Uighur
+ {137, "dz"}, //Dzongkha
+ {138, "jv-Latn"}, //Javanese (Roman)
+ {139, "su-Latn"}, //Sundanese (Roman)
+ {140, "gl"}, //Galician
+ {141, "af"}, //Afrikaans
+ {142, "br"}, //Breton
+ {143, "iu"}, //Inuktitut
+ {144, "gd"}, //Scottish (Gaelic)
+ {145, "gv"}, //Manx (Gaelic)
+ {146, "ga"}, //Irish (Gaelic with Lenition)
+ {147, "to"}, //Tongan
+ {148, "el"}, //Greek (Polytonic) Note: ISO 15924 does not have an equivalent script name.
+ {149, "kl"}, //Greenlandic
+ {150, "az-Latn"}, //Azerbaijani (Roman)
+ {151, "nn"}, //Nynorsk
+
+ /** A mapping from Windows LCID to BCP 47 codes.
+ * This list is the sorted, curated output of tools/win_lcid.cpp.
+ * Note that these are sorted by value for quick binary lookup, and not logically by lsb.
+ * The 'bare' language ids (e.g. 0x0001 for Arabic) are ommitted
+ * as they do not appear as valid language ids in the OpenType specification.
+ */
+ { 0x0401, "ar-SA" }, //Arabic
+ { 0x0402, "bg-BG" }, //Bulgarian
+ { 0x0403, "ca-ES" }, //Catalan
+ { 0x0404, "zh-TW" }, //Chinese (Traditional)
+ { 0x0405, "cs-CZ" }, //Czech
+ { 0x0406, "da-DK" }, //Danish
+ { 0x0407, "de-DE" }, //German
+ { 0x0408, "el-GR" }, //Greek
+ { 0x0409, "en-US" }, //English
+ { 0x040a, "es-ES_tradnl" }, //Spanish
+ { 0x040b, "fi-FI" }, //Finnish
+ { 0x040c, "fr-FR" }, //French
+ { 0x040d, "he-IL" }, //Hebrew
+ { 0x040d, "he" }, //Hebrew
+ { 0x040e, "hu-HU" }, //Hungarian
+ { 0x040e, "hu" }, //Hungarian
+ { 0x040f, "is-IS" }, //Icelandic
+ { 0x0410, "it-IT" }, //Italian
+ { 0x0411, "ja-JP" }, //Japanese
+ { 0x0412, "ko-KR" }, //Korean
+ { 0x0413, "nl-NL" }, //Dutch
+ { 0x0414, "nb-NO" }, //Norwegian (Bokmål)
+ { 0x0415, "pl-PL" }, //Polish
+ { 0x0416, "pt-BR" }, //Portuguese
+ { 0x0417, "rm-CH" }, //Romansh
+ { 0x0418, "ro-RO" }, //Romanian
+ { 0x0419, "ru-RU" }, //Russian
+ { 0x041a, "hr-HR" }, //Croatian
+ { 0x041b, "sk-SK" }, //Slovak
+ { 0x041c, "sq-AL" }, //Albanian
+ { 0x041d, "sv-SE" }, //Swedish
+ { 0x041e, "th-TH" }, //Thai
+ { 0x041f, "tr-TR" }, //Turkish
+ { 0x0420, "ur-PK" }, //Urdu
+ { 0x0421, "id-ID" }, //Indonesian
+ { 0x0422, "uk-UA" }, //Ukrainian
+ { 0x0423, "be-BY" }, //Belarusian
+ { 0x0424, "sl-SI" }, //Slovenian
+ { 0x0425, "et-EE" }, //Estonian
+ { 0x0426, "lv-LV" }, //Latvian
+ { 0x0427, "lt-LT" }, //Lithuanian
+ { 0x0428, "tg-Cyrl-TJ" }, //Tajik (Cyrillic)
+ { 0x0429, "fa-IR" }, //Persian
+ { 0x042a, "vi-VN" }, //Vietnamese
+ { 0x042b, "hy-AM" }, //Armenian
+ { 0x042c, "az-Latn-AZ" }, //Azeri (Latin)
+ { 0x042d, "eu-ES" }, //Basque
+ { 0x042e, "hsb-DE" }, //Upper Sorbian
+ { 0x042f, "mk-MK" }, //Macedonian (FYROM)
+ { 0x0432, "tn-ZA" }, //Setswana
+ { 0x0434, "xh-ZA" }, //isiXhosa
+ { 0x0435, "zu-ZA" }, //isiZulu
+ { 0x0436, "af-ZA" }, //Afrikaans
+ { 0x0437, "ka-GE" }, //Georgian
+ { 0x0438, "fo-FO" }, //Faroese
+ { 0x0439, "hi-IN" }, //Hindi
+ { 0x043a, "mt-MT" }, //Maltese
+ { 0x043b, "se-NO" }, //Sami (Northern)
+ { 0x043e, "ms-MY" }, //Malay
+ { 0x043f, "kk-KZ" }, //Kazakh
+ { 0x0440, "ky-KG" }, //Kyrgyz
+ { 0x0441, "sw-KE" }, //Kiswahili
+ { 0x0442, "tk-TM" }, //Turkmen
+ { 0x0443, "uz-Latn-UZ" }, //Uzbek (Latin)
+ { 0x0443, "uz" }, //Uzbek
+ { 0x0444, "tt-RU" }, //Tatar
+ { 0x0445, "bn-IN" }, //Bengali
+ { 0x0446, "pa-IN" }, //Punjabi
+ { 0x0447, "gu-IN" }, //Gujarati
+ { 0x0448, "or-IN" }, //Oriya
+ { 0x0449, "ta-IN" }, //Tamil
+ { 0x044a, "te-IN" }, //Telugu
+ { 0x044b, "kn-IN" }, //Kannada
+ { 0x044c, "ml-IN" }, //Malayalam
+ { 0x044d, "as-IN" }, //Assamese
+ { 0x044e, "mr-IN" }, //Marathi
+ { 0x044f, "sa-IN" }, //Sanskrit
+ { 0x0450, "mn-Cyrl" }, //Mongolian (Cyrillic)
+ { 0x0451, "bo-CN" }, //Tibetan
+ { 0x0452, "cy-GB" }, //Welsh
+ { 0x0453, "km-KH" }, //Khmer
+ { 0x0454, "lo-LA" }, //Lao
+ { 0x0456, "gl-ES" }, //Galician
+ { 0x0457, "kok-IN" }, //Konkani
+ { 0x045a, "syr-SY" }, //Syriac
+ { 0x045b, "si-LK" }, //Sinhala
+ { 0x045d, "iu-Cans-CA" }, //Inuktitut (Syllabics)
+ { 0x045e, "am-ET" }, //Amharic
+ { 0x0461, "ne-NP" }, //Nepali
+ { 0x0462, "fy-NL" }, //Frisian
+ { 0x0463, "ps-AF" }, //Pashto
+ { 0x0464, "fil-PH" }, //Filipino
+ { 0x0465, "dv-MV" }, //Divehi
+ { 0x0468, "ha-Latn-NG" }, //Hausa (Latin)
+ { 0x046a, "yo-NG" }, //Yoruba
+ { 0x046b, "quz-BO" }, //Quechua
+ { 0x046c, "nso-ZA" }, //Sesotho sa Leboa
+ { 0x046d, "ba-RU" }, //Bashkir
+ { 0x046e, "lb-LU" }, //Luxembourgish
+ { 0x046f, "kl-GL" }, //Greenlandic
+ { 0x0470, "ig-NG" }, //Igbo
+ { 0x0478, "ii-CN" }, //Yi
+ { 0x047a, "arn-CL" }, //Mapudungun
+ { 0x047c, "moh-CA" }, //Mohawk
+ { 0x047e, "br-FR" }, //Breton
+ { 0x0480, "ug-CN" }, //Uyghur
+ { 0x0481, "mi-NZ" }, //Maori
+ { 0x0482, "oc-FR" }, //Occitan
+ { 0x0483, "co-FR" }, //Corsican
+ { 0x0484, "gsw-FR" }, //Alsatian
+ { 0x0485, "sah-RU" }, //Yakut
+ { 0x0486, "qut-GT" }, //K'iche
+ { 0x0487, "rw-RW" }, //Kinyarwanda
+ { 0x0488, "wo-SN" }, //Wolof
+ { 0x048c, "prs-AF" }, //Dari
+ { 0x0491, "gd-GB" }, //Scottish Gaelic
+ { 0x0801, "ar-IQ" }, //Arabic
+ { 0x0804, "zh-Hans" }, //Chinese (Simplified)
+ { 0x0807, "de-CH" }, //German
+ { 0x0809, "en-GB" }, //English
+ { 0x080a, "es-MX" }, //Spanish
+ { 0x080c, "fr-BE" }, //French
+ { 0x0810, "it-CH" }, //Italian
+ { 0x0813, "nl-BE" }, //Dutch
+ { 0x0814, "nn-NO" }, //Norwegian (Nynorsk)
+ { 0x0816, "pt-PT" }, //Portuguese
+ { 0x081a, "sr-Latn-CS" }, //Serbian (Latin)
+ { 0x081d, "sv-FI" }, //Swedish
+ { 0x082c, "az-Cyrl-AZ" }, //Azeri (Cyrillic)
+ { 0x082e, "dsb-DE" }, //Lower Sorbian
+ { 0x082e, "dsb" }, //Lower Sorbian
+ { 0x083b, "se-SE" }, //Sami (Northern)
+ { 0x083c, "ga-IE" }, //Irish
+ { 0x083e, "ms-BN" }, //Malay
+ { 0x0843, "uz-Cyrl-UZ" }, //Uzbek (Cyrillic)
+ { 0x0845, "bn-BD" }, //Bengali
+ { 0x0850, "mn-Mong-CN" }, //Mongolian (Traditional Mongolian)
+ { 0x085d, "iu-Latn-CA" }, //Inuktitut (Latin)
+ { 0x085f, "tzm-Latn-DZ" }, //Tamazight (Latin)
+ { 0x086b, "quz-EC" }, //Quechua
+ { 0x0c01, "ar-EG" }, //Arabic
+ { 0x0c04, "zh-Hant" }, //Chinese (Traditional)
+ { 0x0c07, "de-AT" }, //German
+ { 0x0c09, "en-AU" }, //English
+ { 0x0c0a, "es-ES" }, //Spanish
+ { 0x0c0c, "fr-CA" }, //French
+ { 0x0c1a, "sr-Cyrl-CS" }, //Serbian (Cyrillic)
+ { 0x0c3b, "se-FI" }, //Sami (Northern)
+ { 0x0c6b, "quz-PE" }, //Quechua
+ { 0x1001, "ar-LY" }, //Arabic
+ { 0x1004, "zh-SG" }, //Chinese (Simplified)
+ { 0x1007, "de-LU" }, //German
+ { 0x1009, "en-CA" }, //English
+ { 0x100a, "es-GT" }, //Spanish
+ { 0x100c, "fr-CH" }, //French
+ { 0x101a, "hr-BA" }, //Croatian (Latin)
+ { 0x103b, "smj-NO" }, //Sami (Lule)
+ { 0x1401, "ar-DZ" }, //Arabic
+ { 0x1404, "zh-MO" }, //Chinese (Traditional)
+ { 0x1407, "de-LI" }, //German
+ { 0x1409, "en-NZ" }, //English
+ { 0x140a, "es-CR" }, //Spanish
+ { 0x140c, "fr-LU" }, //French
+ { 0x141a, "bs-Latn-BA" }, //Bosnian (Latin)
+ { 0x141a, "bs" }, //Bosnian
+ { 0x143b, "smj-SE" }, //Sami (Lule)
+ { 0x143b, "smj" }, //Sami (Lule)
+ { 0x1801, "ar-MA" }, //Arabic
+ { 0x1809, "en-IE" }, //English
+ { 0x180a, "es-PA" }, //Spanish
+ { 0x180c, "fr-MC" }, //French
+ { 0x181a, "sr-Latn-BA" }, //Serbian (Latin)
+ { 0x183b, "sma-NO" }, //Sami (Southern)
+ { 0x1c01, "ar-TN" }, //Arabic
+ { 0x1c09, "en-ZA" }, //English
+ { 0x1c0a, "es-DO" }, //Spanish
+ { 0x1c1a, "sr-Cyrl-BA" }, //Serbian (Cyrillic)
+ { 0x1c3b, "sma-SE" }, //Sami (Southern)
+ { 0x1c3b, "sma" }, //Sami (Southern)
+ { 0x2001, "ar-OM" }, //Arabic
+ { 0x2009, "en-JM" }, //English
+ { 0x200a, "es-VE" }, //Spanish
+ { 0x201a, "bs-Cyrl-BA" }, //Bosnian (Cyrillic)
+ { 0x201a, "bs-Cyrl" }, //Bosnian (Cyrillic)
+ { 0x203b, "sms-FI" }, //Sami (Skolt)
+ { 0x203b, "sms" }, //Sami (Skolt)
+ { 0x2401, "ar-YE" }, //Arabic
+ { 0x2409, "en-029" }, //English
+ { 0x240a, "es-CO" }, //Spanish
+ { 0x241a, "sr-Latn-RS" }, //Serbian (Latin)
+ { 0x243b, "smn-FI" }, //Sami (Inari)
+ { 0x2801, "ar-SY" }, //Arabic
+ { 0x2809, "en-BZ" }, //English
+ { 0x280a, "es-PE" }, //Spanish
+ { 0x281a, "sr-Cyrl-RS" }, //Serbian (Cyrillic)
+ { 0x2c01, "ar-JO" }, //Arabic
+ { 0x2c09, "en-TT" }, //English
+ { 0x2c0a, "es-AR" }, //Spanish
+ { 0x2c1a, "sr-Latn-ME" }, //Serbian (Latin)
+ { 0x3001, "ar-LB" }, //Arabic
+ { 0x3009, "en-ZW" }, //English
+ { 0x300a, "es-EC" }, //Spanish
+ { 0x301a, "sr-Cyrl-ME" }, //Serbian (Cyrillic)
+ { 0x3401, "ar-KW" }, //Arabic
+ { 0x3409, "en-PH" }, //English
+ { 0x340a, "es-CL" }, //Spanish
+ { 0x3801, "ar-AE" }, //Arabic
+ { 0x380a, "es-UY" }, //Spanish
+ { 0x3c01, "ar-BH" }, //Arabic
+ { 0x3c0a, "es-PY" }, //Spanish
+ { 0x4001, "ar-QA" }, //Arabic
+ { 0x4009, "en-IN" }, //English
+ { 0x400a, "es-BO" }, //Spanish
+ { 0x4409, "en-MY" }, //English
+ { 0x440a, "es-SV" }, //Spanish
+ { 0x4809, "en-SG" }, //English
+ { 0x480a, "es-HN" }, //Spanish
+ { 0x4c0a, "es-NI" }, //Spanish
+ { 0x500a, "es-PR" }, //Spanish
+ { 0x540a, "es-US" }, //Spanish
+};
+
+namespace {
+bool BCP47FromLanguageIdLess(const BCP47FromLanguageId& a, const BCP47FromLanguageId& b) {
+ return a.languageID < b.languageID;
+}
+} // namespace
+
+bool SkOTTableName::Iterator::next(SkOTTableName::Iterator::Record& record) {
+ SkOTTableName nameTable;
+ if (fNameTableSize < sizeof(nameTable)) {
+ return false;
+ }
+ memcpy(&nameTable, fNameTable, sizeof(nameTable));
+
+ const uint8_t* nameRecords = fNameTable + sizeof(nameTable);
+ const size_t nameRecordsSize = fNameTableSize - sizeof(nameTable);
+
+ const size_t stringTableOffset = SkEndian_SwapBE16(nameTable.stringOffset);
+ if (fNameTableSize < stringTableOffset) {
+ return false;
+ }
+ const uint8_t* stringTable = fNameTable + stringTableOffset;
+ const size_t stringTableSize = fNameTableSize - stringTableOffset;
+
+ // Find the next record which matches the requested type.
+ SkOTTableName::Record nameRecord;
+ const size_t nameRecordsCount = SkEndian_SwapBE16(nameTable.count);
+ const size_t nameRecordsMax = std::min(nameRecordsCount, nameRecordsSize / sizeof(nameRecord));
+ do {
+ if (fIndex >= nameRecordsMax) {
+ return false;
+ }
+
+ memcpy(&nameRecord, nameRecords + sizeof(nameRecord)*fIndex, sizeof(nameRecord));
+ ++fIndex;
+ } while (fType != -1 && nameRecord.nameID.fontSpecific != fType);
+
+ record.type = nameRecord.nameID.fontSpecific;
+
+ // Decode the name into UTF-8.
+ const size_t nameOffset = SkEndian_SwapBE16(nameRecord.offset);
+ const size_t nameLength = SkEndian_SwapBE16(nameRecord.length);
+ if (stringTableSize < nameOffset + nameLength) {
+ return false; // continue?
+ }
+ const uint8_t* nameString = stringTable + nameOffset;
+ switch (nameRecord.platformID.value) {
+ case SkOTTableName::Record::PlatformID::Windows:
+ if (SkOTTableName::Record::EncodingID::Windows::UnicodeBMPUCS2
+ != nameRecord.encodingID.windows.value
+ && SkOTTableName::Record::EncodingID::Windows::UnicodeUCS4
+ != nameRecord.encodingID.windows.value
+ && SkOTTableName::Record::EncodingID::Windows::Symbol
+ != nameRecord.encodingID.windows.value)
+ {
+ record.name.reset();
+ break; // continue?
+ }
+ [[fallthrough]];
+ case SkOTTableName::Record::PlatformID::Unicode:
+ case SkOTTableName::Record::PlatformID::ISO:
+ SkString_from_UTF16BE(nameString, nameLength, record.name);
+ break;
+
+ case SkOTTableName::Record::PlatformID::Macintosh:
+ // TODO: need better decoding, especially on Mac.
+ if (SkOTTableName::Record::EncodingID::Macintosh::Roman
+ != nameRecord.encodingID.macintosh.value)
+ {
+ record.name.reset();
+ break; // continue?
+ }
+ SkStringFromMacRoman(nameString, nameLength, record.name);
+ break;
+
+ case SkOTTableName::Record::PlatformID::Custom:
+ // These should never appear in a 'name' table.
+ default:
+ SkASSERT(false);
+ record.name.reset();
+ break; // continue?
+ }
+
+ // Determine the language.
+ const uint16_t languageID = SkEndian_SwapBE16(nameRecord.languageID.languageTagID);
+
+ // Handle format 1 languages.
+ if (SkOTTableName::format_1 == nameTable.format && languageID >= 0x8000) {
+ const uint16_t languageTagRecordIndex = languageID - 0x8000;
+
+ if (nameRecordsSize < sizeof(nameRecord)*nameRecordsCount) {
+ return false; //"und" or break?
+ }
+ const uint8_t* format1extData = nameRecords + sizeof(nameRecord)*nameRecordsCount;
+ size_t format1extSize = nameRecordsSize - sizeof(nameRecord)*nameRecordsCount;
+ SkOTTableName::Format1Ext format1ext;
+ if (format1extSize < sizeof(format1ext)) {
+ return false; // "und" or break?
+ }
+ memcpy(&format1ext, format1extData, sizeof(format1ext));
+
+ const uint8_t* languageTagRecords = format1extData + sizeof(format1ext);
+ size_t languageTagRecordsSize = format1extSize - sizeof(format1ext);
+ if (languageTagRecordIndex < SkEndian_SwapBE16(format1ext.langTagCount)) {
+ SkOTTableName::Format1Ext::LangTagRecord languageTagRecord;
+ if (languageTagRecordsSize < sizeof(languageTagRecord)*(languageTagRecordIndex+1)) {
+ return false; // "und"?
+ }
+ const uint8_t* languageTagData = languageTagRecords
+ + sizeof(languageTagRecord)*languageTagRecordIndex;
+ memcpy(&languageTagRecord, languageTagData, sizeof(languageTagRecord));
+
+ uint16_t languageOffset = SkEndian_SwapBE16(languageTagRecord.offset);
+ uint16_t languageLength = SkEndian_SwapBE16(languageTagRecord.length);
+
+ if (fNameTableSize < stringTableOffset + languageOffset + languageLength) {
+ return false; // "und"?
+ }
+ const uint8_t* languageString = stringTable + languageOffset;
+ SkString_from_UTF16BE(languageString, languageLength, record.language);
+ return true;
+ }
+ }
+
+ // Handle format 0 languages, translating them into BCP 47.
+ const BCP47FromLanguageId target = { languageID, "" };
+ int languageIndex = SkTSearch<BCP47FromLanguageId, BCP47FromLanguageIdLess>(
+ BCP47FromLanguageID, std::size(BCP47FromLanguageID), target, sizeof(target));
+ if (languageIndex >= 0) {
+ record.language = BCP47FromLanguageID[languageIndex].bcp47;
+ return true;
+ }
+
+ // Unknown language, return the BCP 47 code 'und' for 'undetermined'.
+ record.language = "und";
+ return true;
+}
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_name.h b/gfx/skia/skia/src/sfnt/SkOTTable_name.h
new file mode 100644
index 0000000000..271100e2cf
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_name.h
@@ -0,0 +1,577 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_name_DEFINED
+#define SkOTTable_name_DEFINED
+
+#include "include/core/SkString.h"
+#include "src/base/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTableName {
+ static const SK_OT_CHAR TAG0 = 'n';
+ static const SK_OT_CHAR TAG1 = 'a';
+ static const SK_OT_CHAR TAG2 = 'm';
+ static const SK_OT_CHAR TAG3 = 'e';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTableName>::value;
+
+ SK_OT_USHORT format;
+ static const SK_OT_USHORT format_0 = SkTEndian_SwapBE16(0);
+ /** Format 1 was added in OpenType 1.6 (April 2009). */
+ static const SK_OT_USHORT format_1 = SkTEndian_SwapBE16(1);
+
+ /** The number of name records which follow. */
+ SK_OT_USHORT count;
+
+ /** Offset in SK_OT_BYTEs to start of string storage area (from start of table). */
+ SK_OT_USHORT stringOffset;
+
+ struct Record {
+ /** The platform ID specifies how to interpret the encoding and language ID. */
+ struct PlatformID {
+ enum Value : SK_OT_USHORT {
+ Unicode = SkTEndian_SwapBE16(0),
+ Macintosh = SkTEndian_SwapBE16(1),
+ ISO = SkTEndian_SwapBE16(2), // Deprecated, use Unicode instead.
+ Windows = SkTEndian_SwapBE16(3),
+ Custom = SkTEndian_SwapBE16(4),
+ } value;
+ } platformID;
+
+ union EncodingID {
+ SK_OT_USHORT custom;
+
+ /** Always UTF-16BE. */
+ struct Unicode {
+ enum Value : SK_OT_USHORT {
+ Unicode10 = SkTEndian_SwapBE16(0),
+ Unicode11 = SkTEndian_SwapBE16(1),
+ ISO10646 = SkTEndian_SwapBE16(2), //deprecated, use Unicode11
+ Unicode20BMP = SkTEndian_SwapBE16(3),
+ Unicode20 = SkTEndian_SwapBE16(4),
+ UnicodeVariationSequences = SkTEndian_SwapBE16(5),
+ UnicodeFull = SkTEndian_SwapBE16(6),
+ } value;
+ } unicode;
+
+ /** These are Mac encodings, see http://www.unicode.org/Public/MAPPINGS/VENDORS/APPLE/
+ * for their mappings to unicode.
+ * Name table strings using PlatformID::Macintosh must use Roman.
+ */
+ struct Macintosh {
+ enum Value : SK_OT_USHORT {
+ Roman = SkTEndian_SwapBE16(0),
+ Japanese = SkTEndian_SwapBE16(1),
+ ChineseTraditional = SkTEndian_SwapBE16(2),
+ Korean = SkTEndian_SwapBE16(3),
+ Arabic = SkTEndian_SwapBE16(4),
+ Hebrew = SkTEndian_SwapBE16(5),
+ Greek = SkTEndian_SwapBE16(6),
+ Russian = SkTEndian_SwapBE16(7),
+ RSymbol = SkTEndian_SwapBE16(8),
+ Devanagari = SkTEndian_SwapBE16(9),
+ Gurmukhi = SkTEndian_SwapBE16(10),
+ Gujarati = SkTEndian_SwapBE16(11),
+ Oriya = SkTEndian_SwapBE16(12),
+ Bengali = SkTEndian_SwapBE16(13),
+ Tamil = SkTEndian_SwapBE16(14),
+ Telugu = SkTEndian_SwapBE16(15),
+ Kannada = SkTEndian_SwapBE16(16),
+ Malayalam = SkTEndian_SwapBE16(17),
+ Sinhalese = SkTEndian_SwapBE16(18),
+ Burmese = SkTEndian_SwapBE16(19),
+ Khmer = SkTEndian_SwapBE16(20),
+ Thai = SkTEndian_SwapBE16(21),
+ Laotian = SkTEndian_SwapBE16(22),
+ Georgian = SkTEndian_SwapBE16(23),
+ Armenian = SkTEndian_SwapBE16(24),
+ ChineseSimplified = SkTEndian_SwapBE16(25),
+ Tibetan = SkTEndian_SwapBE16(26),
+ Mongolian = SkTEndian_SwapBE16(27),
+ Geez = SkTEndian_SwapBE16(28),
+ Slavic = SkTEndian_SwapBE16(29),
+ Vietnamese = SkTEndian_SwapBE16(30),
+ Sindhi = SkTEndian_SwapBE16(31),
+ Uninterpreted = SkTEndian_SwapBE16(32),
+ } value;
+ } macintosh;
+
+ /** Deprecated, use Unicode instead. */
+ struct ISO {
+ enum Value : SK_OT_USHORT {
+ ASCII7 = SkTEndian_SwapBE16(0),
+ ISO10646 = SkTEndian_SwapBE16(1),
+ ISO88591 = SkTEndian_SwapBE16(2),
+ } value;
+ } iso;
+
+ /** Name table strings using PlatformID::Windows must use Symbol, UnicodeBMPUCS2, or
+ * UnicodeUCS4. Symbol and UnicodeBMPUCS2 are both UCS2-BE, UnicodeUCS4 is actually
+ * UTF-16BE.
+ */
+ struct Windows {
+ enum Value : SK_OT_USHORT {
+ Symbol = SkTEndian_SwapBE16(0), // UCS2-BE, but don't use this font to display it's own name.
+ UnicodeBMPUCS2 = SkTEndian_SwapBE16(1), // UCS2-BE, Windows default
+ ShiftJIS = SkTEndian_SwapBE16(2),
+ PRC = SkTEndian_SwapBE16(3),
+ Big5 = SkTEndian_SwapBE16(4),
+ Wansung = SkTEndian_SwapBE16(5),
+ Johab = SkTEndian_SwapBE16(6),
+ UnicodeUCS4 = SkTEndian_SwapBE16(10), // UTF-16BE. It means UCS4 in charmaps.
+ } value;
+ } windows;
+ } encodingID;
+
+ /** LanguageIDs <= 0x7FFF are predefined.
+ * LanguageIDs > 0x7FFF are indexes into the langTagRecord array
+ * (in format 1 name tables, see SkOTTableName::format).
+ */
+ union LanguageID {
+ /** A value greater than 0x7FFF.
+ * languageTagID - 0x8000 is an index into the langTagRecord array.
+ */
+ SK_OT_USHORT languageTagID;
+
+ /** These are known as Language Designators.
+ * Apple now uses BCP 47 (post OSX10.4), so there will be no new entries.
+ */
+ struct Macintosh {
+ enum Value : SK_OT_USHORT {
+ English = SkTEndian_SwapBE16(0),
+ French = SkTEndian_SwapBE16(1),
+ German = SkTEndian_SwapBE16(2),
+ Italian = SkTEndian_SwapBE16(3),
+ Dutch = SkTEndian_SwapBE16(4),
+ Swedish = SkTEndian_SwapBE16(5),
+ Spanish = SkTEndian_SwapBE16(6),
+ Danish = SkTEndian_SwapBE16(7),
+ Portuguese = SkTEndian_SwapBE16(8),
+ Norwegian = SkTEndian_SwapBE16(9),
+ Hebrew = SkTEndian_SwapBE16(10),
+ Japanese = SkTEndian_SwapBE16(11),
+ Arabic = SkTEndian_SwapBE16(12),
+ Finnish = SkTEndian_SwapBE16(13),
+ Greek = SkTEndian_SwapBE16(14),
+ Icelandic = SkTEndian_SwapBE16(15),
+ Maltese = SkTEndian_SwapBE16(16),
+ Turkish = SkTEndian_SwapBE16(17),
+ Croatian = SkTEndian_SwapBE16(18),
+ ChineseTraditional = SkTEndian_SwapBE16(19),
+ Urdu = SkTEndian_SwapBE16(20),
+ Hindi = SkTEndian_SwapBE16(21),
+ Thai = SkTEndian_SwapBE16(22),
+ Korean = SkTEndian_SwapBE16(23),
+ Lithuanian = SkTEndian_SwapBE16(24),
+ Polish = SkTEndian_SwapBE16(25),
+ Hungarian = SkTEndian_SwapBE16(26),
+ Estonian = SkTEndian_SwapBE16(27),
+ Latvian = SkTEndian_SwapBE16(28),
+ Sami = SkTEndian_SwapBE16(29),
+ Faroese = SkTEndian_SwapBE16(30),
+ Farsi_Persian = SkTEndian_SwapBE16(31),
+ Russian = SkTEndian_SwapBE16(32),
+ ChineseSimplified = SkTEndian_SwapBE16(33),
+ Flemish = SkTEndian_SwapBE16(34),
+ IrishGaelic = SkTEndian_SwapBE16(35),
+ Albanian = SkTEndian_SwapBE16(36),
+ Romanian = SkTEndian_SwapBE16(37),
+ Czech = SkTEndian_SwapBE16(38),
+ Slovak = SkTEndian_SwapBE16(39),
+ Slovenian = SkTEndian_SwapBE16(40),
+ Yiddish = SkTEndian_SwapBE16(41),
+ Serbian = SkTEndian_SwapBE16(42),
+ Macedonian = SkTEndian_SwapBE16(43),
+ Bulgarian = SkTEndian_SwapBE16(44),
+ Ukrainian = SkTEndian_SwapBE16(45),
+ Byelorussian = SkTEndian_SwapBE16(46),
+ Uzbek = SkTEndian_SwapBE16(47),
+ Kazakh = SkTEndian_SwapBE16(48),
+ AzerbaijaniCyrillic = SkTEndian_SwapBE16(49),
+ AzerbaijaniArabic = SkTEndian_SwapBE16(50),
+ Armenian = SkTEndian_SwapBE16(51),
+ Georgian = SkTEndian_SwapBE16(52),
+ Moldavian = SkTEndian_SwapBE16(53),
+ Kirghiz = SkTEndian_SwapBE16(54),
+ Tajiki = SkTEndian_SwapBE16(55),
+ Turkmen = SkTEndian_SwapBE16(56),
+ MongolianTraditional = SkTEndian_SwapBE16(57),
+ MongolianCyrillic = SkTEndian_SwapBE16(58),
+ Pashto = SkTEndian_SwapBE16(59),
+ Kurdish = SkTEndian_SwapBE16(60),
+ Kashmiri = SkTEndian_SwapBE16(61),
+ Sindhi = SkTEndian_SwapBE16(62),
+ Tibetan = SkTEndian_SwapBE16(63),
+ Nepali = SkTEndian_SwapBE16(64),
+ Sanskrit = SkTEndian_SwapBE16(65),
+ Marathi = SkTEndian_SwapBE16(66),
+ Bengali = SkTEndian_SwapBE16(67),
+ Assamese = SkTEndian_SwapBE16(68),
+ Gujarati = SkTEndian_SwapBE16(69),
+ Punjabi = SkTEndian_SwapBE16(70),
+ Oriya = SkTEndian_SwapBE16(71),
+ Malayalam = SkTEndian_SwapBE16(72),
+ Kannada = SkTEndian_SwapBE16(73),
+ Tamil = SkTEndian_SwapBE16(74),
+ Telugu = SkTEndian_SwapBE16(75),
+ Sinhalese = SkTEndian_SwapBE16(76),
+ Burmese = SkTEndian_SwapBE16(77),
+ Khmer = SkTEndian_SwapBE16(78),
+ Lao = SkTEndian_SwapBE16(79),
+ Vietnamese = SkTEndian_SwapBE16(80),
+ Indonesian = SkTEndian_SwapBE16(81),
+ Tagalong = SkTEndian_SwapBE16(82),
+ MalayRoman = SkTEndian_SwapBE16(83),
+ MalayArabic = SkTEndian_SwapBE16(84),
+ Amharic = SkTEndian_SwapBE16(85),
+ Tigrinya = SkTEndian_SwapBE16(86),
+ Galla = SkTEndian_SwapBE16(87),
+ Somali = SkTEndian_SwapBE16(88),
+ Swahili = SkTEndian_SwapBE16(89),
+ Kinyarwanda_Ruanda = SkTEndian_SwapBE16(90),
+ Rundi = SkTEndian_SwapBE16(91),
+ Nyanja_Chewa = SkTEndian_SwapBE16(92),
+ Malagasy = SkTEndian_SwapBE16(93),
+ Esperanto = SkTEndian_SwapBE16(94),
+ Welsh = SkTEndian_SwapBE16(128),
+ Basque = SkTEndian_SwapBE16(129),
+ Catalan = SkTEndian_SwapBE16(130),
+ Latin = SkTEndian_SwapBE16(131),
+ Quenchua = SkTEndian_SwapBE16(132),
+ Guarani = SkTEndian_SwapBE16(133),
+ Aymara = SkTEndian_SwapBE16(134),
+ Tatar = SkTEndian_SwapBE16(135),
+ Uighur = SkTEndian_SwapBE16(136),
+ Dzongkha = SkTEndian_SwapBE16(137),
+ JavaneseRoman = SkTEndian_SwapBE16(138),
+ SundaneseRoman = SkTEndian_SwapBE16(139),
+ Galician = SkTEndian_SwapBE16(140),
+ Afrikaans = SkTEndian_SwapBE16(141),
+ Breton = SkTEndian_SwapBE16(142),
+ Inuktitut = SkTEndian_SwapBE16(143),
+ ScottishGaelic = SkTEndian_SwapBE16(144),
+ ManxGaelic = SkTEndian_SwapBE16(145),
+ IrishGaelicWithLenition = SkTEndian_SwapBE16(146),
+ Tongan = SkTEndian_SwapBE16(147),
+ GreekPolytonic = SkTEndian_SwapBE16(148),
+ Greenlandic = SkTEndian_SwapBE16(149),
+ AzerbaijaniRoman = SkTEndian_SwapBE16(150),
+ } value;
+ } macintosh;
+
+ /** These are known as LCIDs.
+ * On Windows the current set can be had from EnumSystemLocalesEx and LocaleNameToLCID.
+ */
+ struct Windows {
+ enum Value : SK_OT_USHORT {
+ Afrikaans_SouthAfrica = SkTEndian_SwapBE16(0x0436),
+ Albanian_Albania = SkTEndian_SwapBE16(0x041C),
+ Alsatian_France = SkTEndian_SwapBE16(0x0484),
+ Amharic_Ethiopia = SkTEndian_SwapBE16(0x045E),
+ Arabic_Algeria = SkTEndian_SwapBE16(0x1401),
+ Arabic_Bahrain = SkTEndian_SwapBE16(0x3C01),
+ Arabic_Egypt = SkTEndian_SwapBE16(0x0C01),
+ Arabic_Iraq = SkTEndian_SwapBE16(0x0801),
+ Arabic_Jordan = SkTEndian_SwapBE16(0x2C01),
+ Arabic_Kuwait = SkTEndian_SwapBE16(0x3401),
+ Arabic_Lebanon = SkTEndian_SwapBE16(0x3001),
+ Arabic_Libya = SkTEndian_SwapBE16(0x1001),
+ Arabic_Morocco = SkTEndian_SwapBE16(0x1801),
+ Arabic_Oman = SkTEndian_SwapBE16(0x2001),
+ Arabic_Qatar = SkTEndian_SwapBE16(0x4001),
+ Arabic_SaudiArabia = SkTEndian_SwapBE16(0x0401),
+ Arabic_Syria = SkTEndian_SwapBE16(0x2801),
+ Arabic_Tunisia = SkTEndian_SwapBE16(0x1C01),
+ Arabic_UAE = SkTEndian_SwapBE16(0x3801),
+ Arabic_Yemen = SkTEndian_SwapBE16(0x2401),
+ Armenian_Armenia = SkTEndian_SwapBE16(0x042B),
+ Assamese_India = SkTEndian_SwapBE16(0x044D),
+ AzeriCyrillic_Azerbaijan = SkTEndian_SwapBE16(0x082C),
+ AzeriLatin_Azerbaijan = SkTEndian_SwapBE16(0x042C),
+ Bashkir_Russia = SkTEndian_SwapBE16(0x046D),
+ Basque_Basque = SkTEndian_SwapBE16(0x042D),
+ Belarusian_Belarus = SkTEndian_SwapBE16(0x0423),
+ Bengali_Bangladesh = SkTEndian_SwapBE16(0x0845),
+ Bengali_India = SkTEndian_SwapBE16(0x0445),
+ BosnianCyrillic_BosniaAndHerzegovina = SkTEndian_SwapBE16(0x201A),
+ BosnianLatin_BosniaAndHerzegovina = SkTEndian_SwapBE16(0x141A),
+ Breton_France = SkTEndian_SwapBE16(0x047E),
+ Bulgarian_Bulgaria = SkTEndian_SwapBE16(0x0402),
+ Catalan_Catalan = SkTEndian_SwapBE16(0x0403),
+ Chinese_HongKongSAR = SkTEndian_SwapBE16(0x0C04),
+ Chinese_MacaoSAR = SkTEndian_SwapBE16(0x1404),
+ Chinese_PeoplesRepublicOfChina = SkTEndian_SwapBE16(0x0804),
+ Chinese_Singapore = SkTEndian_SwapBE16(0x1004),
+ Chinese_Taiwan = SkTEndian_SwapBE16(0x0404),
+ Corsican_France = SkTEndian_SwapBE16(0x0483),
+ Croatian_Croatia = SkTEndian_SwapBE16(0x041A),
+ CroatianLatin_BosniaAndHerzegovina = SkTEndian_SwapBE16(0x101A),
+ Czech_CzechRepublic = SkTEndian_SwapBE16(0x0405),
+ Danish_Denmark = SkTEndian_SwapBE16(0x0406),
+ Dari_Afghanistan = SkTEndian_SwapBE16(0x048C),
+ Divehi_Maldives = SkTEndian_SwapBE16(0x0465),
+ Dutch_Belgium = SkTEndian_SwapBE16(0x0813),
+ Dutch_Netherlands = SkTEndian_SwapBE16(0x0413),
+ English_Australia = SkTEndian_SwapBE16(0x0C09),
+ English_Belize = SkTEndian_SwapBE16(0x2809),
+ English_Canada = SkTEndian_SwapBE16(0x1009),
+ English_Caribbean = SkTEndian_SwapBE16(0x2409),
+ English_India = SkTEndian_SwapBE16(0x4009),
+ English_Ireland = SkTEndian_SwapBE16(0x1809),
+ English_Jamaica = SkTEndian_SwapBE16(0x2009),
+ English_Malaysia = SkTEndian_SwapBE16(0x4409),
+ English_NewZealand = SkTEndian_SwapBE16(0x1409),
+ English_RepublicOfThePhilippines = SkTEndian_SwapBE16(0x3409),
+ English_Singapore = SkTEndian_SwapBE16(0x4809),
+ English_SouthAfrica = SkTEndian_SwapBE16(0x1C09),
+ English_TrinidadAndTobago = SkTEndian_SwapBE16(0x2C09),
+ English_UnitedKingdom = SkTEndian_SwapBE16(0x0809),
+ English_UnitedStates = SkTEndian_SwapBE16(0x0409),
+ English_Zimbabwe = SkTEndian_SwapBE16(0x3009),
+ Estonian_Estonia = SkTEndian_SwapBE16(0x0425),
+ Faroese_FaroeIslands = SkTEndian_SwapBE16(0x0438),
+ Filipino_Philippines = SkTEndian_SwapBE16(0x0464),
+ Finnish_Finland = SkTEndian_SwapBE16(0x040B),
+ French_Belgium = SkTEndian_SwapBE16(0x080C),
+ French_Canada = SkTEndian_SwapBE16(0x0C0C),
+ French_France = SkTEndian_SwapBE16(0x040C),
+ French_Luxembourg = SkTEndian_SwapBE16(0x140c),
+ French_PrincipalityOfMonoco = SkTEndian_SwapBE16(0x180C),
+ French_Switzerland = SkTEndian_SwapBE16(0x100C),
+ Frisian_Netherlands = SkTEndian_SwapBE16(0x0462),
+ Galician_Galician = SkTEndian_SwapBE16(0x0456),
+ Georgian_Georgia = SkTEndian_SwapBE16(0x0437),
+ German_Austria = SkTEndian_SwapBE16(0x0C07),
+ German_Germany = SkTEndian_SwapBE16(0x0407),
+ German_Liechtenstein = SkTEndian_SwapBE16(0x1407),
+ German_Luxembourg = SkTEndian_SwapBE16(0x1007),
+ German_Switzerland = SkTEndian_SwapBE16(0x0807),
+ Greek_Greece = SkTEndian_SwapBE16(0x0408),
+ Greenlandic_Greenland = SkTEndian_SwapBE16(0x046F),
+ Gujarati_India = SkTEndian_SwapBE16(0x0447),
+ HausaLatin_Nigeria = SkTEndian_SwapBE16(0x0468),
+ Hebrew_Israel = SkTEndian_SwapBE16(0x040D),
+ Hindi_India = SkTEndian_SwapBE16(0x0439),
+ Hungarian_Hungary = SkTEndian_SwapBE16(0x040E),
+ Icelandic_Iceland = SkTEndian_SwapBE16(0x040F),
+ Igbo_Nigeria = SkTEndian_SwapBE16(0x0470),
+ Indonesian_Indonesia = SkTEndian_SwapBE16(0x0421),
+ Inuktitut_Canada = SkTEndian_SwapBE16(0x045D),
+ InuktitutLatin_Canada = SkTEndian_SwapBE16(0x085D),
+ Irish_Ireland = SkTEndian_SwapBE16(0x083C),
+ isiXhosa_SouthAfrica = SkTEndian_SwapBE16(0x0434),
+ isiZulu_SouthAfrica = SkTEndian_SwapBE16(0x0435),
+ Italian_Italy = SkTEndian_SwapBE16(0x0410),
+ Italian_Switzerland = SkTEndian_SwapBE16(0x0810),
+ Japanese_Japan = SkTEndian_SwapBE16(0x0411),
+ Kannada_India = SkTEndian_SwapBE16(0x044B),
+ Kazakh_Kazakhstan = SkTEndian_SwapBE16(0x043F),
+ Khmer_Cambodia = SkTEndian_SwapBE16(0x0453),
+ Kiche_Guatemala = SkTEndian_SwapBE16(0x0486),
+ Kinyarwanda_Rwanda = SkTEndian_SwapBE16(0x0487),
+ Kiswahili_Kenya = SkTEndian_SwapBE16(0x0441),
+ Konkani_India = SkTEndian_SwapBE16(0x0457),
+ Korean_Korea = SkTEndian_SwapBE16(0x0412),
+ Kyrgyz_Kyrgyzstan = SkTEndian_SwapBE16(0x0440),
+ Lao_LaoPDR = SkTEndian_SwapBE16(0x0454),
+ Latvian_Latvia = SkTEndian_SwapBE16(0x0426),
+ Lithuanian_Lithuania = SkTEndian_SwapBE16(0x0427),
+ LowerSorbian_Germany = SkTEndian_SwapBE16(0x082E),
+ Luxembourgish_Luxembourg = SkTEndian_SwapBE16(0x046E),
+ MacedonianFYROM_FormerYugoslavRepublicOfMacedonia = SkTEndian_SwapBE16(0x042F),
+ Malay_BruneiDarussalam = SkTEndian_SwapBE16(0x083E),
+ Malay_Malaysia = SkTEndian_SwapBE16(0x043E),
+ Malayalam_India = SkTEndian_SwapBE16(0x044C),
+ Maltese_Malta = SkTEndian_SwapBE16(0x043A),
+ Maori_NewZealand = SkTEndian_SwapBE16(0x0481),
+ Mapudungun_Chile = SkTEndian_SwapBE16(0x047A),
+ Marathi_India = SkTEndian_SwapBE16(0x044E),
+ Mohawk_Mohawk = SkTEndian_SwapBE16(0x047C),
+ MongolianCyrillic_Mongolia = SkTEndian_SwapBE16(0x0450),
+ MongolianTraditional_PeoplesRepublicOfChina = SkTEndian_SwapBE16(0x0850),
+ Nepali_Nepal = SkTEndian_SwapBE16(0x0461),
+ NorwegianBokmal_Norway = SkTEndian_SwapBE16(0x0414),
+ NorwegianNynorsk_Norway = SkTEndian_SwapBE16(0x0814),
+ Occitan_France = SkTEndian_SwapBE16(0x0482),
+ Odia_India = SkTEndian_SwapBE16(0x0448),
+ Pashto_Afghanistan = SkTEndian_SwapBE16(0x0463),
+ Polish_Poland = SkTEndian_SwapBE16(0x0415),
+ Portuguese_Brazil = SkTEndian_SwapBE16(0x0416),
+ Portuguese_Portugal = SkTEndian_SwapBE16(0x0816),
+ Punjabi_India = SkTEndian_SwapBE16(0x0446),
+ Quechua_Bolivia = SkTEndian_SwapBE16(0x046B),
+ Quechua_Ecuador = SkTEndian_SwapBE16(0x086B),
+ Quechua_Peru = SkTEndian_SwapBE16(0x0C6B),
+ Romanian_Romania = SkTEndian_SwapBE16(0x0418),
+ Romansh_Switzerland = SkTEndian_SwapBE16(0x0417),
+ Russian_Russia = SkTEndian_SwapBE16(0x0419),
+ SamiInari_Finland = SkTEndian_SwapBE16(0x243B),
+ SamiLule_Norway = SkTEndian_SwapBE16(0x103B),
+ SamiLule_Sweden = SkTEndian_SwapBE16(0x143B),
+ SamiNorthern_Finland = SkTEndian_SwapBE16(0x0C3B),
+ SamiNorthern_Norway = SkTEndian_SwapBE16(0x043B),
+ SamiNorthern_Sweden = SkTEndian_SwapBE16(0x083B),
+ SamiSkolt_Finland = SkTEndian_SwapBE16(0x203B),
+ SamiSouthern_Norway = SkTEndian_SwapBE16(0x183B),
+ SamiSouthern_Sweden = SkTEndian_SwapBE16(0x1C3B),
+ Sanskrit_India = SkTEndian_SwapBE16(0x044F),
+ SerbianCyrillic_BosniaAndHerzegovina = SkTEndian_SwapBE16(0x1C1A),
+ SerbianCyrillic_Serbia = SkTEndian_SwapBE16(0x0C1A),
+ SerbianLatin_BosniaAndHerzegovina = SkTEndian_SwapBE16(0x181A),
+ SerbianLatin_Serbia = SkTEndian_SwapBE16(0x081A),
+ SesothoSaLeboa_SouthAfrica = SkTEndian_SwapBE16(0x046C),
+ Setswana_SouthAfrica = SkTEndian_SwapBE16(0x0432),
+ Sinhala_SriLanka = SkTEndian_SwapBE16(0x045B),
+ Slovak_Slovakia = SkTEndian_SwapBE16(0x041B),
+ Slovenian_Slovenia = SkTEndian_SwapBE16(0x0424),
+ Spanish_Argentina = SkTEndian_SwapBE16(0x2C0A),
+ Spanish_Bolivia = SkTEndian_SwapBE16(0x400A),
+ Spanish_Chile = SkTEndian_SwapBE16(0x340A),
+ Spanish_Colombia = SkTEndian_SwapBE16(0x240A),
+ Spanish_CostaRica = SkTEndian_SwapBE16(0x140A),
+ Spanish_DominicanRepublic = SkTEndian_SwapBE16(0x1C0A),
+ Spanish_Ecuador = SkTEndian_SwapBE16(0x300A),
+ Spanish_ElSalvador = SkTEndian_SwapBE16(0x440A),
+ Spanish_Guatemala = SkTEndian_SwapBE16(0x100A),
+ Spanish_Honduras = SkTEndian_SwapBE16(0x480A),
+ Spanish_Mexico = SkTEndian_SwapBE16(0x080A),
+ Spanish_Nicaragua = SkTEndian_SwapBE16(0x4C0A),
+ Spanish_Panama = SkTEndian_SwapBE16(0x180A),
+ Spanish_Paraguay = SkTEndian_SwapBE16(0x3C0A),
+ Spanish_Peru = SkTEndian_SwapBE16(0x280A),
+ Spanish_PuertoRico = SkTEndian_SwapBE16(0x500A),
+ SpanishModernSort_Spain = SkTEndian_SwapBE16(0x0C0A),
+ SpanishTraditionalSort_Spain = SkTEndian_SwapBE16(0x040A),
+ Spanish_UnitedStates = SkTEndian_SwapBE16(0x540A),
+ Spanish_Uruguay = SkTEndian_SwapBE16(0x380A),
+ Spanish_Venezuela = SkTEndian_SwapBE16(0x200A),
+ Sweden_Finland = SkTEndian_SwapBE16(0x081D),
+ Swedish_Sweden = SkTEndian_SwapBE16(0x041D),
+ Syriac_Syria = SkTEndian_SwapBE16(0x045A),
+ TajikCyrillic_Tajikistan = SkTEndian_SwapBE16(0x0428),
+ TamazightLatin_Algeria = SkTEndian_SwapBE16(0x085F),
+ Tamil_India = SkTEndian_SwapBE16(0x0449),
+ Tatar_Russia = SkTEndian_SwapBE16(0x0444),
+ Telugu_India = SkTEndian_SwapBE16(0x044A),
+ Thai_Thailand = SkTEndian_SwapBE16(0x041E),
+ Tibetan_PRC = SkTEndian_SwapBE16(0x0451),
+ Turkish_Turkey = SkTEndian_SwapBE16(0x041F),
+ Turkmen_Turkmenistan = SkTEndian_SwapBE16(0x0442),
+ Uighur_PRC = SkTEndian_SwapBE16(0x0480),
+ Ukrainian_Ukraine = SkTEndian_SwapBE16(0x0422),
+ UpperSorbian_Germany = SkTEndian_SwapBE16(0x042E),
+ Urdu_IslamicRepublicOfPakistan = SkTEndian_SwapBE16(0x0420),
+ UzbekCyrillic_Uzbekistan = SkTEndian_SwapBE16(0x0843),
+ UzbekLatin_Uzbekistan = SkTEndian_SwapBE16(0x0443),
+ Vietnamese_Vietnam = SkTEndian_SwapBE16(0x042A),
+ Welsh_UnitedKingdom = SkTEndian_SwapBE16(0x0452),
+ Wolof_Senegal = SkTEndian_SwapBE16(0x0488),
+ Yakut_Russia = SkTEndian_SwapBE16(0x0485),
+ Yi_PRC = SkTEndian_SwapBE16(0x0478),
+ Yoruba_Nigeria = SkTEndian_SwapBE16(0x046A),
+ } value;
+ } windows;
+ } languageID;
+
+ /** NameIDs <= 0xFF are predefined. Those > 0xFF are font specific. */
+ union NameID {
+ /** A font specific name id which should be greater than 0xFF. */
+ SK_OT_USHORT fontSpecific;
+ struct Predefined {
+ enum Value : SK_OT_USHORT {
+ CopyrightNotice = SkTEndian_SwapBE16(0),
+ FontFamilyName = SkTEndian_SwapBE16(1),
+ FontSubfamilyName = SkTEndian_SwapBE16(2),
+ UniqueFontIdentifier = SkTEndian_SwapBE16(3),
+ FullFontName = SkTEndian_SwapBE16(4),
+ VersionString = SkTEndian_SwapBE16(5), //Version <number>.<number>
+ PostscriptName = SkTEndian_SwapBE16(6), //See spec for constraints.
+ Trademark = SkTEndian_SwapBE16(7),
+ ManufacturerName = SkTEndian_SwapBE16(8),
+ Designer = SkTEndian_SwapBE16(9),
+ Description = SkTEndian_SwapBE16(10),
+ URLVendor = SkTEndian_SwapBE16(11),
+ URLDesigner = SkTEndian_SwapBE16(12),
+ LicenseDescription = SkTEndian_SwapBE16(13),
+ LicenseInfoURL = SkTEndian_SwapBE16(14),
+ PreferredFamily = SkTEndian_SwapBE16(16),
+ PreferredSubfamily = SkTEndian_SwapBE16(17),
+ CompatibleFullName = SkTEndian_SwapBE16(18),
+ SampleText = SkTEndian_SwapBE16(19),
+ PostscriptCIDFindfontName = SkTEndian_SwapBE16(20),
+ WWSFamilyName = SkTEndian_SwapBE16(21),
+ WWSSubfamilyName = SkTEndian_SwapBE16(22),
+ } value;
+ } predefined;
+ } nameID;
+
+ /** The length of the string in SK_OT_BYTEs. */
+ SK_OT_USHORT length;
+
+ /** Offset in SK_OT_BYTEs from start of string storage area
+ * (see SkOTTableName::stringOffset).
+ */
+ SK_OT_USHORT offset;
+ }; //nameRecord[count];
+
+ struct Format1Ext {
+ /** The number of languageTagRecords which follow. */
+ SK_OT_USHORT langTagCount;
+
+ /** The encoding of a langTagRecord string is always UTF-16BE.
+ * The content should follow IETF specification BCP 47.
+ */
+ struct LangTagRecord {
+ /** The length of the string in SK_OT_BYTEs. */
+ SK_OT_USHORT length;
+
+ /** Offset in SK_OT_BYTEs from start of string storage area
+ * (see SkOTTableName::stringOffset).
+ */
+ SK_OT_USHORT offset;
+ }; //langTagRecord[langTagCount]
+ }; //format1ext (if format == format_1)
+
+ class Iterator {
+ public:
+ Iterator(const uint8_t* nameTable, size_t size)
+ : fNameTable(nameTable), fNameTableSize(size), fIndex(0), fType(-1) { }
+ Iterator(const uint8_t* nameTable, size_t size, SK_OT_USHORT type)
+ : fNameTable(nameTable), fNameTableSize(size), fIndex(0), fType(type)
+ { }
+
+ void reset(SK_OT_USHORT type) {
+ fIndex = 0;
+ fType = type;
+ }
+
+ struct Record {
+ SkString name;
+ SkString language;
+ SK_OT_USHORT type;
+ };
+ bool next(Record&);
+
+ private:
+ const uint8_t* fNameTable;
+ const size_t fNameTableSize;
+ size_t fIndex;
+ int fType;
+ };
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkOTTableName) == 6, "sizeof_SkOTTableName_not_6");
+static_assert(sizeof(SkOTTableName::Format1Ext) == 2, "sizeof_SkOTTableNameF1_not_2");
+static_assert(sizeof(SkOTTableName::Format1Ext::LangTagRecord) == 4, "sizeof_SkOTTableNameLangTagRecord_not_4");
+static_assert(sizeof(SkOTTableName::Record) == 12, "sizeof_SkOTTableNameRecord_not_12");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTTable_post.h b/gfx/skia/skia/src/sfnt/SkOTTable_post.h
new file mode 100644
index 0000000000..7f85ca4215
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTTable_post.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTTable_post_DEFINED
+#define SkOTTable_post_DEFINED
+
+#include "src/base/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkOTTablePostScript {
+ static const SK_OT_CHAR TAG0 = 'p';
+ static const SK_OT_CHAR TAG1 = 'o';
+ static const SK_OT_CHAR TAG2 = 's';
+ static const SK_OT_CHAR TAG3 = 't';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkOTTablePostScript>::value;
+
+ struct Format {
+ enum Value : SK_OT_Fixed {
+ version1 = SkTEndian_SwapBE32(0x00010000),
+ version2 = SkTEndian_SwapBE32(0x00020000),
+ version2_5 = SkTEndian_SwapBE32(0x00025000),
+ version3 = SkTEndian_SwapBE32(0x00030000),
+ version4 = SkTEndian_SwapBE32(0x00040000),
+ };
+ SK_OT_Fixed value;
+ } format;
+ SK_OT_Fixed italicAngle;
+ SK_OT_FWORD underlinePosition;
+ SK_OT_FWORD underlineThickness;
+ SK_OT_ULONG isFixedPitch;
+ SK_OT_ULONG minMemType42;
+ SK_OT_ULONG maxMemType42;
+ SK_OT_ULONG minMemType1;
+ SK_OT_ULONG maxMemType1;
+};
+
+#pragma pack(pop)
+
+
+#include <stddef.h>
+static_assert(offsetof(SkOTTablePostScript, maxMemType1) == 28, "SkOTTablePostScript_maxMemType1_not_at_28");
+static_assert(sizeof(SkOTTablePostScript) == 32, "sizeof_SkOTTablePostScript_not_32");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkOTUtils.cpp b/gfx/skia/skia/src/sfnt/SkOTUtils.cpp
new file mode 100644
index 0000000000..0dd44a079a
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTUtils.cpp
@@ -0,0 +1,231 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <array>
+
+#include "src/sfnt/SkOTUtils.h"
+
+#include "include/core/SkData.h"
+#include "include/core/SkStream.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkEndian.h"
+#include "src/core/SkAdvancedTypefaceMetrics.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkOTTable_head.h"
+#include "src/sfnt/SkOTTable_name.h"
+#include "src/sfnt/SkSFNTHeader.h"
+
+uint32_t SkOTUtils::CalcTableChecksum(SK_OT_ULONG *data, size_t length) {
+ uint32_t sum = 0;
+ SK_OT_ULONG *dataEnd = data + ((length + 3) & ~3) / sizeof(SK_OT_ULONG);
+ for (; data < dataEnd; ++data) {
+ sum += SkEndian_SwapBE32(*data);
+ }
+ return sum;
+}
+
+SkData* SkOTUtils::RenameFont(SkStreamAsset* fontData, const char* fontName, int fontNameLen) {
+
+ // Get the sfnt header.
+ SkSFNTHeader sfntHeader;
+ if (fontData->read(&sfntHeader, sizeof(sfntHeader)) < sizeof(sfntHeader)) {
+ return nullptr;
+ }
+
+ // Find the existing 'name' table.
+ int tableIndex;
+ SkSFNTHeader::TableDirectoryEntry tableEntry;
+ int numTables = SkEndian_SwapBE16(sfntHeader.numTables);
+ for (tableIndex = 0; tableIndex < numTables; ++tableIndex) {
+ if (fontData->read(&tableEntry, sizeof(tableEntry)) < sizeof(tableEntry)) {
+ return nullptr;
+ }
+ if (SkOTTableName::TAG == tableEntry.tag) {
+ break;
+ }
+ }
+ if (tableIndex == numTables) {
+ return nullptr;
+ }
+
+ if (!fontData->rewind()) {
+ return nullptr;
+ }
+
+ // The required 'name' record types: Family, Style, Unique, Full and PostScript.
+ static constexpr std::array<SkOTTableName::Record::NameID::Predefined::Value, 5> names{{
+ SkOTTableName::Record::NameID::Predefined::FontFamilyName,
+ SkOTTableName::Record::NameID::Predefined::FontSubfamilyName,
+ SkOTTableName::Record::NameID::Predefined::UniqueFontIdentifier,
+ SkOTTableName::Record::NameID::Predefined::FullFontName,
+ SkOTTableName::Record::NameID::Predefined::PostscriptName,
+ }};
+
+ // GDI will not use a Symbol cmap table if there is no Symbol encoded name.
+ static constexpr std::array<SkOTTableName::Record::EncodingID::Windows::Value, 2> encodings{{
+ SkOTTableName::Record::EncodingID::Windows::Symbol,
+ SkOTTableName::Record::EncodingID::Windows::UnicodeBMPUCS2,
+ }};
+
+ // Copy the data, leaving out the old name table.
+ // In theory, we could also remove the DSIG table if it exists.
+ size_t nameTableLogicalSize = sizeof(SkOTTableName)
+ + (encodings.size() * names.size() * sizeof(SkOTTableName::Record))
+ + (fontNameLen * sizeof(SK_OT_USHORT));
+ size_t nameTablePhysicalSize = (nameTableLogicalSize + 3) & ~3; // Rounded up to a multiple of 4.
+
+ size_t oldNameTablePhysicalSize = (SkEndian_SwapBE32(tableEntry.logicalLength) + 3) & ~3; // Rounded up to a multiple of 4.
+ size_t oldNameTableOffset = SkEndian_SwapBE32(tableEntry.offset);
+
+ //originalDataSize is the size of the original data without the name table.
+ size_t originalDataSize = fontData->getLength() - oldNameTablePhysicalSize;
+ size_t newDataSize = originalDataSize + nameTablePhysicalSize;
+
+ auto rewrittenFontData = SkData::MakeUninitialized(newDataSize);
+ SK_OT_BYTE* data = static_cast<SK_OT_BYTE*>(rewrittenFontData->writable_data());
+
+ if (fontData->read(data, oldNameTableOffset) < oldNameTableOffset) {
+ return nullptr;
+ }
+ if (fontData->skip(oldNameTablePhysicalSize) < oldNameTablePhysicalSize) {
+ return nullptr;
+ }
+ if (fontData->read(data + oldNameTableOffset, originalDataSize - oldNameTableOffset) < originalDataSize - oldNameTableOffset) {
+ return nullptr;
+ }
+
+ //Fix up the offsets of the directory entries after the old 'name' table entry.
+ SkSFNTHeader::TableDirectoryEntry* currentEntry = reinterpret_cast<SkSFNTHeader::TableDirectoryEntry*>(data + sizeof(SkSFNTHeader));
+ SkSFNTHeader::TableDirectoryEntry* endEntry = currentEntry + numTables;
+ SkSFNTHeader::TableDirectoryEntry* headTableEntry = nullptr;
+ for (; currentEntry < endEntry; ++currentEntry) {
+ uint32_t oldOffset = SkEndian_SwapBE32(currentEntry->offset);
+ if (oldOffset > oldNameTableOffset) {
+ currentEntry->offset = SkEndian_SwapBE32(SkToU32(oldOffset - oldNameTablePhysicalSize));
+ }
+
+ if (SkOTTableHead::TAG == currentEntry->tag) {
+ headTableEntry = currentEntry;
+ }
+ }
+
+ // Make the table directory entry point to the new 'name' table.
+ SkSFNTHeader::TableDirectoryEntry* nameTableEntry = reinterpret_cast<SkSFNTHeader::TableDirectoryEntry*>(data + sizeof(SkSFNTHeader)) + tableIndex;
+ nameTableEntry->logicalLength = SkEndian_SwapBE32(SkToU32(nameTableLogicalSize));
+ nameTableEntry->offset = SkEndian_SwapBE32(SkToU32(originalDataSize));
+
+ // Write the new 'name' table after the original font data.
+ SkOTTableName* nameTable = reinterpret_cast<SkOTTableName*>(data + originalDataSize);
+ unsigned short stringOffset = sizeof(SkOTTableName) + (encodings.size() * names.size() * sizeof(SkOTTableName::Record));
+ nameTable->format = SkOTTableName::format_0;
+ nameTable->count = SkEndian_SwapBE16(encodings.size() * names.size());
+ nameTable->stringOffset = SkEndian_SwapBE16(stringOffset);
+
+ SkOTTableName::Record* nameRecord = reinterpret_cast<SkOTTableName::Record*>(data + originalDataSize + sizeof(SkOTTableName));
+ for (const auto& encoding : encodings) {
+ for (const auto& name : names) {
+ nameRecord->platformID.value = SkOTTableName::Record::PlatformID::Windows;
+ nameRecord->encodingID.windows.value = encoding;
+ nameRecord->languageID.windows.value = SkOTTableName::Record::LanguageID::Windows::English_UnitedStates;
+ nameRecord->nameID.predefined.value = name;
+ nameRecord->offset = SkEndian_SwapBE16(0);
+ nameRecord->length = SkEndian_SwapBE16(SkToU16(fontNameLen * sizeof(SK_OT_USHORT)));
+ ++nameRecord;
+ }
+ }
+
+ SK_OT_USHORT* nameString = reinterpret_cast<SK_OT_USHORT*>(data + originalDataSize + stringOffset);
+ for (int i = 0; i < fontNameLen; ++i) {
+ nameString[i] = SkEndian_SwapBE16(fontName[i]);
+ }
+
+ unsigned char* logical = data + originalDataSize + nameTableLogicalSize;
+ unsigned char* physical = data + originalDataSize + nameTablePhysicalSize;
+ for (; logical < physical; ++logical) {
+ *logical = 0;
+ }
+
+ // Update the table checksum in the directory entry.
+ nameTableEntry->checksum = SkEndian_SwapBE32(SkOTUtils::CalcTableChecksum(reinterpret_cast<SK_OT_ULONG*>(nameTable), nameTableLogicalSize));
+
+ // Update the checksum adjustment in the head table.
+ if (headTableEntry) {
+ size_t headTableOffset = SkEndian_SwapBE32(headTableEntry->offset);
+ if (headTableOffset + sizeof(SkOTTableHead) < originalDataSize) {
+ SkOTTableHead* headTable = reinterpret_cast<SkOTTableHead*>(data + headTableOffset);
+ headTable->checksumAdjustment = SkEndian_SwapBE32(0);
+ uint32_t unadjustedFontChecksum = SkOTUtils::CalcTableChecksum(reinterpret_cast<SK_OT_ULONG*>(data), originalDataSize + nameTablePhysicalSize);
+ headTable->checksumAdjustment = SkEndian_SwapBE32(SkOTTableHead::fontChecksum - unadjustedFontChecksum);
+ }
+ }
+
+ return rewrittenFontData.release();
+}
+
+sk_sp<SkOTUtils::LocalizedStrings_NameTable>
+SkOTUtils::LocalizedStrings_NameTable::Make(const SkTypeface& typeface,
+ SK_OT_USHORT types[],
+ int typesCount)
+{
+ static const SkFontTableTag nameTag = SkSetFourByteTag('n','a','m','e');
+ size_t nameTableSize = typeface.getTableSize(nameTag);
+ if (0 == nameTableSize) {
+ return nullptr;
+ }
+ std::unique_ptr<uint8_t[]> nameTableData(new uint8_t[nameTableSize]);
+ size_t copied = typeface.getTableData(nameTag, 0, nameTableSize, nameTableData.get());
+ if (copied != nameTableSize) {
+ return nullptr;
+ }
+
+ return sk_sp<SkOTUtils::LocalizedStrings_NameTable>(
+ new SkOTUtils::LocalizedStrings_NameTable(std::move(nameTableData), nameTableSize,
+ types, typesCount));
+}
+
+sk_sp<SkOTUtils::LocalizedStrings_NameTable>
+SkOTUtils::LocalizedStrings_NameTable::MakeForFamilyNames(const SkTypeface& typeface) {
+ return Make(typeface,
+ SkOTUtils::LocalizedStrings_NameTable::familyNameTypes,
+ std::size(SkOTUtils::LocalizedStrings_NameTable::familyNameTypes));
+}
+
+bool SkOTUtils::LocalizedStrings_NameTable::next(SkTypeface::LocalizedString* localizedString) {
+ do {
+ SkOTTableName::Iterator::Record record;
+ if (fFamilyNameIter.next(record)) {
+ localizedString->fString = record.name;
+ localizedString->fLanguage = record.language;
+ return true;
+ }
+ if (fTypesCount == fTypesIndex + 1) {
+ return false;
+ }
+ ++fTypesIndex;
+ fFamilyNameIter.reset(fTypes[fTypesIndex]);
+ } while (true);
+}
+
+SK_OT_USHORT SkOTUtils::LocalizedStrings_NameTable::familyNameTypes[3] = {
+ SkOTTableName::Record::NameID::Predefined::FontFamilyName,
+ SkOTTableName::Record::NameID::Predefined::PreferredFamily,
+ SkOTTableName::Record::NameID::Predefined::WWSFamilyName,
+};
+
+void SkOTUtils::SetAdvancedTypefaceFlags(SkOTTableOS2_V4::Type fsType,
+ SkAdvancedTypefaceMetrics* info) {
+ SkASSERT(info);
+ // The logic should be identical to SkTypeface_FreeType::onGetAdvancedMetrics().
+ if (fsType.raw.value != 0) {
+ if (SkToBool(fsType.field.Restricted) || SkToBool(fsType.field.Bitmap)) {
+ info->fFlags |= SkAdvancedTypefaceMetrics::kNotEmbeddable_FontFlag;
+ }
+ if (SkToBool(fsType.field.NoSubsetting)) {
+ info->fFlags |= SkAdvancedTypefaceMetrics::kNotSubsettable_FontFlag;
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/sfnt/SkOTUtils.h b/gfx/skia/skia/src/sfnt/SkOTUtils.h
new file mode 100644
index 0000000000..fb2732385c
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkOTUtils.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOTUtils_DEFINED
+#define SkOTUtils_DEFINED
+
+#include "include/core/SkTypeface.h"
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkOTTable_OS_2_V4.h"
+#include "src/sfnt/SkOTTable_name.h"
+
+class SkData;
+class SkStream;
+struct SkAdvancedTypefaceMetrics;
+
+struct SkOTUtils {
+ /**
+ * Calculates the OpenType checksum for data.
+ */
+ static uint32_t CalcTableChecksum(SK_OT_ULONG *data, size_t length);
+
+ /**
+ * Renames an sfnt font. On failure (invalid data or not an sfnt font)
+ * returns nullptr.
+ *
+ * Essentially, this removes any existing 'name' table and replaces it
+ * with a new one in which FontFamilyName, FontSubfamilyName,
+ * UniqueFontIdentifier, FullFontName, and PostscriptName are fontName.
+ *
+ * The new 'name' table records will be written with the Windows,
+ * UnicodeBMPUCS2, and English_UnitedStates settings.
+ *
+ * fontName and fontNameLen must be specified in terms of ASCII chars.
+ *
+ * Does not affect fontData's ownership.
+ */
+ static SkData* RenameFont(SkStreamAsset* fontData, const char* fontName, int fontNameLen);
+
+ /** An implementation of LocalizedStrings which obtains it's data from a 'name' table. */
+ class LocalizedStrings_NameTable : public SkTypeface::LocalizedStrings {
+ public:
+ /** Takes ownership of the nameTableData and will free it with SK_DELETE. */
+ LocalizedStrings_NameTable(std::unique_ptr<uint8_t[]> nameTableData, size_t size,
+ SK_OT_USHORT types[],
+ int typesCount)
+ : fTypes(types), fTypesCount(typesCount), fTypesIndex(0)
+ , fNameTableData(std::move(nameTableData))
+ , fFamilyNameIter(fNameTableData.get(), size, fTypes[fTypesIndex])
+ { }
+
+ /** Creates an iterator over all data in the 'name' table of a typeface.
+ * If no valid 'name' table can be found, returns nullptr.
+ */
+ static sk_sp<LocalizedStrings_NameTable> Make(
+ const SkTypeface& typeface,
+ SK_OT_USHORT types[],
+ int typesCount);
+
+ /** Creates an iterator over all the family names in the 'name' table of a typeface.
+ * If no valid 'name' table can be found, returns nullptr.
+ */
+ static sk_sp<LocalizedStrings_NameTable> MakeForFamilyNames(const SkTypeface& typeface);
+
+ bool next(SkTypeface::LocalizedString* localizedString) override;
+ private:
+ static SK_OT_USHORT familyNameTypes[3];
+
+ SK_OT_USHORT* fTypes;
+ int fTypesCount;
+ int fTypesIndex;
+ std::unique_ptr<uint8_t[]> fNameTableData;
+ SkOTTableName::Iterator fFamilyNameIter;
+ };
+
+ /** An implementation of LocalizedStrings which has one name. */
+ class LocalizedStrings_SingleName : public SkTypeface::LocalizedStrings {
+ public:
+ LocalizedStrings_SingleName(SkString name, SkString language)
+ : fName(name), fLanguage(language), fHasNext(true)
+ { }
+
+ bool next(SkTypeface::LocalizedString* localizedString) override {
+ localizedString->fString = fName;
+ localizedString->fLanguage = fLanguage;
+
+ bool hadNext = fHasNext;
+ fHasNext = false;
+ return hadNext;
+ }
+
+ private:
+ SkString fName;
+ SkString fLanguage;
+ bool fHasNext;
+ };
+
+ static void SetAdvancedTypefaceFlags(SkOTTableOS2_V4::Type fsType,
+ SkAdvancedTypefaceMetrics* info);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkPanose.h b/gfx/skia/skia/src/sfnt/SkPanose.h
new file mode 100644
index 0000000000..50ccb7a301
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkPanose.h
@@ -0,0 +1,527 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPanose_DEFINED
+#define SkPanose_DEFINED
+
+#include "src/sfnt/SkOTTableTypes.h"
+
+#pragma pack(push, 1)
+
+struct SkPanose {
+ //This value changes the meaning of the following 9 bytes.
+ enum class FamilyType : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ TextAndDisplay = 2,
+ Script = 3,
+ Decorative = 4,
+ Pictoral = 5,
+ } bFamilyType;
+
+ union Data {
+ struct TextAndDisplay {
+ enum class SerifStyle : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Cove = 2,
+ ObtuseCove = 3,
+ SquareCove = 4,
+ ObtuseSquareCove = 5,
+ Square = 6,
+ Thin = 7,
+ Bone = 8,
+ Exaggerated = 9,
+ Triangle = 10,
+ NormalSans = 11,
+ ObtuseSans = 12,
+ PerpSans = 13,
+ Flared = 14,
+ Rounded = 15,
+ } bSerifStyle;
+
+ enum class Weight : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ VeryLight = 2,
+ Light = 3,
+ Thin = 4,
+ Book = 5,
+ Medium = 6,
+ Demi = 7,
+ Bold = 8,
+ Heavy = 9,
+ Black = 10,
+ ExtraBlack = 11,
+ } bWeight;
+
+ enum class Proportion : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ OldStyle = 2,
+ Modern = 3,
+ EvenWidth = 4,
+ Expanded = 5,
+ Condensed = 6,
+ VeryExpanded = 7,
+ VeryCondensed = 8,
+ Monospaced = 9,
+ } bProportion;
+
+ enum class Contrast : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ None = 2,
+ VeryLow = 3,
+ Low = 4,
+ MediumLow = 5,
+ Medium = 6,
+ MediumHigh = 7,
+ High = 8,
+ VeryHigh = 9,
+ } bContrast;
+
+#ifdef SK_WIN_PANOSE
+ //This is what Windows (and FontForge and Apple TT spec) define.
+ //The Impact font uses 9.
+ enum class StrokeVariation : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ GradualDiagonal = 2,
+ GradualTransitional = 3,
+ GradualVertical = 4,
+ GradualHorizontal = 5,
+ RapidVertical = 6,
+ RapidHorizontal = 7,
+ InstantVertical = 8,
+ } bStrokeVariation;
+#else
+ //Stroke variation description in OT OS/2 ver0,ver1 is incorrect.
+ //This is what HP Panose says.
+ enum class StrokeVariation : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ NoVariation = 2,
+ Gradual_Diagonal = 3,
+ Gradual_Transitional = 4,
+ Gradual_Vertical = 5,
+ Gradual_Horizontal = 6,
+ Rapid_Vertical = 7,
+ Rapid_Horizontal = 8,
+ Instant_Vertical = 9,
+ Instant_Horizontal = 10,
+ } bStrokeVariation;
+#endif
+
+ enum class ArmStyle : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ StraightArms_Horizontal = 2,
+ StraightArms_Wedge = 3,
+ StraightArms_Vertical = 4,
+ StraightArms_SingleSerif = 5,
+ StraightArms_DoubleSerif = 6,
+ NonStraightArms_Horizontal = 7,
+ NonStraightArms_Wedge = 8,
+ NonStraightArms_Vertical = 9,
+ NonStraightArms_SingleSerif = 10,
+ NonStraightArms_DoubleSerif = 11,
+ } bArmStyle;
+
+ enum class Letterform : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Normal_Contact = 2,
+ Normal_Weighted = 3,
+ Normal_Boxed = 4,
+ Normal_Flattened = 5,
+ Normal_Rounded = 6,
+ Normal_OffCenter = 7,
+ Normal_Square = 8,
+ Oblique_Contact = 9,
+ Oblique_Weighted = 10,
+ Oblique_Boxed = 11,
+ Oblique_Flattened = 12,
+ Oblique_Rounded = 13,
+ Oblique_OffCenter = 14,
+ Oblique_Square = 15,
+ } bLetterform;
+
+ enum class Midline : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Standard_Trimmed = 2,
+ Standard_Pointed = 3,
+ Standard_Serifed = 4,
+ High_Trimmed = 5,
+ High_Pointed = 6,
+ High_Serifed = 7,
+ Constant_Trimmed = 8,
+ Constant_Pointed = 9,
+ Constant_Serifed = 10,
+ Low_Trimmed = 11,
+ Low_Pointed = 12,
+ Low_Serifed = 13,
+ } bMidline;
+
+ enum class XHeight : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Constant_Small = 2,
+ Constant_Standard = 3,
+ Constant_Large = 4,
+ Ducking_Small = 5,
+ Ducking_Standard = 6,
+ Ducking_Large = 7,
+ } bXHeight;
+ } textAndDisplay;
+
+ struct Script {
+ enum class ToolKind : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ FlatNib = 2,
+ PressurePoint = 3,
+ Engraved = 4,
+ Ball = 5,
+ Brush = 6,
+ Rough = 7,
+ FeltPen = 8,
+ WildBrush = 9,
+ } bToolKind;
+
+ enum class Weight : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ VeryLight = 2,
+ Light = 3,
+ Thin = 4,
+ Book = 5,
+ Medium = 6,
+ Demi = 7,
+ Bold = 8,
+ Heavy = 9,
+ Black = 10,
+ ExtraBlack = 11,
+ } bWeight;
+
+ enum class Spacing : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ ProportionalSpaced = 2,
+ Monospaced = 3,
+ } bSpacing;
+
+ enum class AspectRatio : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ VeryCondensed = 2,
+ Condensed = 3,
+ Normal = 4,
+ Expanded = 5,
+ VeryExpanded = 6,
+ } bAspectRatio;
+
+ enum class Contrast : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ None = 2,
+ VeryLow = 3,
+ Low = 4,
+ MediumLow = 5,
+ Medium = 6,
+ MediumHigh = 7,
+ High = 8,
+ VeryHigh = 9,
+ } bContrast;
+
+ enum class Topology : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Roman_Disconnected = 2,
+ Roman_Trailing = 3,
+ Roman_Connected = 4,
+ Cursive_Disconnected = 5,
+ Cursive_Trailing = 6,
+ Cursive_Connected = 7,
+ Blackletter_Disconnected = 8,
+ Blackletter_Trailing = 9,
+ Blackletter_Connected = 10,
+ } bTopology;
+
+ enum class Form : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Upright_NoWrapping = 2,
+ Upright_SomeWrapping = 3,
+ Upright_MoreWrapping = 4,
+ Upright_ExtremeWrapping = 5,
+ Oblique_NoWrapping = 6,
+ Oblique_SomeWrapping = 7,
+ Oblique_MoreWrapping = 8,
+ Oblique_ExtremeWrapping = 9,
+ Exaggerated_NoWrapping = 10,
+ Exaggerated_SomeWrapping = 11,
+ Exaggerated_MoreWrapping = 12,
+ Exaggerated_ExtremeWrapping = 13,
+ } bForm;
+
+ enum class Finials : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ None_NoLoops = 2,
+ None_ClosedLoops = 3,
+ None_OpenLoops = 4,
+ Sharp_NoLoops = 5,
+ Sharp_ClosedLoops = 6,
+ Sharp_OpenLoops = 7,
+ Tapered_NoLoops = 8,
+ Tapered_ClosedLoops = 9,
+ Tapered_OpenLoops = 10,
+ Round_NoLoops = 11,
+ Round_ClosedLoops = 12,
+ Round_OpenLoops = 13,
+ } bFinials;
+
+ enum class XAscent : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ VeryLow = 2,
+ Low = 3,
+ Medium = 4,
+ High = 5,
+ VeryHigh = 6,
+ } bXAscent;
+ } script;
+
+ struct Decorative {
+ enum class Class : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Derivative = 2,
+ NonStandard_Topology = 3,
+ NonStandard_Elements = 4,
+ NonStandard_Aspect = 5,
+ Initials = 6,
+ Cartoon = 7,
+ PictureStems = 8,
+ Ornamented = 9,
+ TextAndBackground = 10,
+ Collage = 11,
+ Montage = 12,
+ } bClass;
+
+ enum class Weight : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ VeryLight = 2,
+ Light = 3,
+ Thin = 4,
+ Book = 5,
+ Medium = 6,
+ Demi = 7,
+ Bold = 8,
+ Heavy = 9,
+ Black = 10,
+ ExtraBlack = 11,
+ } bWeight;
+
+ enum class Aspect : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ SuperCondensed = 2,
+ VeryCondensed = 3,
+ Condensed = 4,
+ Normal = 5,
+ Extended = 6,
+ VeryExtended = 7,
+ SuperExtended = 8,
+ Monospaced = 9,
+ } bAspect;
+
+ enum class Contrast : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ None = 2,
+ VeryLow = 3,
+ Low = 4,
+ MediumLow = 5,
+ Medium = 6,
+ MediumHigh = 7,
+ High = 8,
+ VeryHigh = 9,
+ HorizontalLow = 10,
+ HorizontalMedium = 11,
+ HorizontalHigh = 12,
+ Broken = 13,
+ } bContrast;
+
+ enum class SerifVariant : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Cove = 2,
+ ObtuseCove = 3,
+ SquareCove = 4,
+ ObtuseSquareCove = 5,
+ Square = 6,
+ Thin = 7,
+ Oval = 8,
+ Exaggerated = 9,
+ Triangle = 10,
+ NormalSans = 11,
+ ObtuseSans = 12,
+ PerpendicularSans = 13,
+ Flared = 14,
+ Rounded = 15,
+ Script = 16,
+ } bSerifVariant;
+
+ enum class Treatment : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ None_StandardSolidFill = 2,
+ White_NoFill = 3,
+ PatternedFill = 4,
+ ComplexFill = 5,
+ ShapedFill = 6,
+ DrawnDistressed = 7,
+ } bTreatment;
+
+ enum class Lining : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ None = 2,
+ Inline = 3,
+ Outline = 4,
+ Engraved = 5,
+ Shadow = 6,
+ Relief = 7,
+ Backdrop = 8,
+ } bLining;
+
+ enum class Topology : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Standard = 2,
+ Square = 3,
+ MultipleSegment = 4,
+ DecoWacoMidlines = 5,
+ UnevenWeighting = 6,
+ DiverseArms = 7,
+ DiverseForms = 8,
+ LombardicForms = 9,
+ UpperCaseInLowerCase = 10,
+ ImpliedTopology = 11,
+ HorseshoeEandA = 12,
+ Cursive = 13,
+ Blackletter = 14,
+ SwashVariance = 15,
+ } bTopology;
+
+ enum class RangeOfCharacters : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ ExtendedCollection = 2,
+ Litterals = 3,
+ NoLowerCase = 4,
+ SmallCaps = 5,
+ } bRangeOfCharacters;
+ } decorative;
+
+ struct Pictoral {
+ enum class Kind : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ Montages = 2,
+ Pictures = 3,
+ Shapes = 4,
+ Scientific = 5,
+ Music = 6,
+ Expert = 7,
+ Patterns = 8,
+ Boarders = 9,
+ Icons = 10,
+ Logos = 11,
+ IndustrySpecific = 12,
+ } bKind;
+
+ enum class Weight : SK_OT_BYTE {
+ NoFit = 1,
+ } bWeight;
+
+ enum class Spacing : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ ProportionalSpaced = 2,
+ Monospaced = 3,
+ } bSpacing;
+
+ enum class AspectRatioAndContrast : SK_OT_BYTE {
+ NoFit = 1,
+ } bAspectRatioAndContrast;
+
+ enum class AspectRatio94 : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ NoWidth = 2,
+ ExceptionallyWide = 3,
+ SuperWide = 4,
+ VeryWide = 5,
+ Wide = 6,
+ Normal = 7,
+ Narrow = 8,
+ VeryNarrow = 9,
+ } bAspectRatio94;
+
+ enum class AspectRatio119 : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ NoWidth = 2,
+ ExceptionallyWide = 3,
+ SuperWide = 4,
+ VeryWide = 5,
+ Wide = 6,
+ Normal = 7,
+ Narrow = 8,
+ VeryNarrow = 9,
+ } bAspectRatio119;
+
+ enum class AspectRatio157 : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ NoWidth = 2,
+ ExceptionallyWide = 3,
+ SuperWide = 4,
+ VeryWide = 5,
+ Wide = 6,
+ Normal = 7,
+ Narrow = 8,
+ VeryNarrow = 9,
+ } bAspectRatio157;
+
+ enum class AspectRatio163 : SK_OT_BYTE {
+ Any = 0,
+ NoFit = 1,
+ NoWidth = 2,
+ ExceptionallyWide = 3,
+ SuperWide = 4,
+ VeryWide = 5,
+ Wide = 6,
+ Normal = 7,
+ Narrow = 8,
+ VeryNarrow = 9,
+ } bAspectRatio163;
+ } pictoral;
+ } data;
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkPanose) == 10, "sizeof_SkPanose_not_10");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkSFNTHeader.h b/gfx/skia/skia/src/sfnt/SkSFNTHeader.h
new file mode 100644
index 0000000000..6aa19fe764
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkSFNTHeader.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSFNTHeader_DEFINED
+#define SkSFNTHeader_DEFINED
+
+#include "src/base/SkEndian.h"
+#include "src/sfnt/SkOTTableTypes.h"
+
+//All SK_SFNT_ prefixed types should be considered as big endian.
+typedef uint16_t SK_SFNT_USHORT;
+typedef uint32_t SK_SFNT_ULONG;
+
+#pragma pack(push, 1)
+
+struct SkSFNTHeader {
+ SK_SFNT_ULONG fontType;
+ struct fontType_WindowsTrueType {
+ static const SK_OT_CHAR TAG0 = 0;
+ static const SK_OT_CHAR TAG1 = 1;
+ static const SK_OT_CHAR TAG2 = 0;
+ static const SK_OT_CHAR TAG3 = 0;
+ static const SK_OT_ULONG TAG = SkOTTableTAG<fontType_WindowsTrueType>::value;
+ };
+ struct fontType_MacTrueType {
+ static const SK_OT_CHAR TAG0 = 't';
+ static const SK_OT_CHAR TAG1 = 'r';
+ static const SK_OT_CHAR TAG2 = 'u';
+ static const SK_OT_CHAR TAG3 = 'e';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<fontType_MacTrueType>::value;
+ };
+ struct fontType_PostScript {
+ static const SK_OT_CHAR TAG0 = 't';
+ static const SK_OT_CHAR TAG1 = 'y';
+ static const SK_OT_CHAR TAG2 = 'p';
+ static const SK_OT_CHAR TAG3 = '1';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<fontType_PostScript>::value;
+ };
+ struct fontType_OpenTypeCFF {
+ static const SK_OT_CHAR TAG0 = 'O';
+ static const SK_OT_CHAR TAG1 = 'T';
+ static const SK_OT_CHAR TAG2 = 'T';
+ static const SK_OT_CHAR TAG3 = 'O';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<fontType_OpenTypeCFF>::value;
+ };
+
+ SK_SFNT_USHORT numTables;
+ SK_SFNT_USHORT searchRange;
+ SK_SFNT_USHORT entrySelector;
+ SK_SFNT_USHORT rangeShift;
+
+ struct TableDirectoryEntry {
+ SK_SFNT_ULONG tag;
+ SK_SFNT_ULONG checksum;
+ SK_SFNT_ULONG offset; //From beginning of header.
+ SK_SFNT_ULONG logicalLength;
+ }; //tableDirectoryEntries[numTables]
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkSFNTHeader) == 12, "sizeof_SkSFNTHeader_not_12");
+static_assert(sizeof(SkSFNTHeader::TableDirectoryEntry) == 16, "sizeof_SkSFNTHeader_TableDirectoryEntry_not_16");
+
+#endif
diff --git a/gfx/skia/skia/src/sfnt/SkTTCFHeader.h b/gfx/skia/skia/src/sfnt/SkTTCFHeader.h
new file mode 100644
index 0000000000..63eac7c3e2
--- /dev/null
+++ b/gfx/skia/skia/src/sfnt/SkTTCFHeader.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTTCFHeader_DEFINED
+#define SkTTCFHeader_DEFINED
+
+#include "src/sfnt/SkOTTableTypes.h"
+#include "src/sfnt/SkSFNTHeader.h"
+
+#pragma pack(push, 1)
+
+struct SkTTCFHeader {
+ SK_SFNT_ULONG ttcTag;
+ static const SK_OT_CHAR TAG0 = 't';
+ static const SK_OT_CHAR TAG1 = 't';
+ static const SK_OT_CHAR TAG2 = 'c';
+ static const SK_OT_CHAR TAG3 = 'f';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<SkTTCFHeader>::value;
+
+ SK_OT_Fixed version;
+ static const SK_OT_Fixed version_1 = SkTEndian_SwapBE32(1 << 16);
+ static const SK_OT_Fixed version_2 = SkTEndian_SwapBE32(2 << 16);
+
+ SK_OT_ULONG numOffsets;
+ //SK_OT_ULONG offset[numOffsets]
+
+ struct Version2Ext {
+ SK_OT_ULONG dsigType;
+ struct dsigType_None {
+ static const SK_OT_CHAR TAG0 = 0;
+ static const SK_OT_CHAR TAG1 = 0;
+ static const SK_OT_CHAR TAG2 = 0;
+ static const SK_OT_CHAR TAG3 = 0;
+ static const SK_OT_ULONG TAG = SkOTTableTAG<dsigType_None>::value;
+ };
+ struct dsigType_Format1 {
+ static const SK_OT_CHAR TAG0 = 'D';
+ static const SK_OT_CHAR TAG1 = 'S';
+ static const SK_OT_CHAR TAG2 = 'I';
+ static const SK_OT_CHAR TAG3 = 'G';
+ static const SK_OT_ULONG TAG = SkOTTableTAG<dsigType_Format1>::value;
+ };
+ SK_OT_ULONG dsigLength; //Length of DSIG table (in bytes).
+ SK_OT_ULONG dsigOffset; //Offset of DSIG table from the beginning of file (in bytes).
+ };// version2ext (if version == version_2)
+};
+
+#pragma pack(pop)
+
+
+static_assert(sizeof(SkTTCFHeader) == 12, "sizeof_SkTTCFHeader_not_12");
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/SkBitmapProcShader.cpp b/gfx/skia/skia/src/shaders/SkBitmapProcShader.cpp
new file mode 100644
index 0000000000..2f0b35f3b4
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkBitmapProcShader.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/shaders/SkBitmapProcShader.h"
+
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkBitmapProcState.h"
+#include "src/core/SkPaintPriv.h"
+#include "src/core/SkXfermodePriv.h"
+
+class BitmapProcShaderContext : public SkShaderBase::Context {
+public:
+ BitmapProcShaderContext(const SkShaderBase& shader, const SkShaderBase::ContextRec& rec,
+ SkBitmapProcState* state)
+ : INHERITED(shader, rec)
+ , fState(state)
+ , fFlags(0)
+ {
+ if (fState->fPixmap.isOpaque() && (255 == this->getPaintAlpha())) {
+ fFlags |= SkShaderBase::kOpaqueAlpha_Flag;
+ }
+
+ auto only_scale_and_translate = [](const SkMatrix& matrix) {
+ unsigned mask = SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask;
+ return (matrix.getType() & ~mask) == 0;
+ };
+
+ if (1 == fState->fPixmap.height() && only_scale_and_translate(this->getTotalInverse())) {
+ fFlags |= SkShaderBase::kConstInY32_Flag;
+ }
+ }
+
+ uint32_t getFlags() const override { return fFlags; }
+
+ void shadeSpan(int x, int y, SkPMColor dstC[], int count) override {
+ const SkBitmapProcState& state = *fState;
+ if (state.getShaderProc32()) {
+ state.getShaderProc32()(&state, x, y, dstC, count);
+ return;
+ }
+
+ const int BUF_MAX = 128;
+ uint32_t buffer[BUF_MAX];
+ SkBitmapProcState::MatrixProc mproc = state.getMatrixProc();
+ SkBitmapProcState::SampleProc32 sproc = state.getSampleProc32();
+ const int max = state.maxCountForBufferSize(sizeof(buffer[0]) * BUF_MAX);
+
+ SkASSERT(state.fPixmap.addr());
+
+ for (;;) {
+ int n = std::min(count, max);
+ SkASSERT(n > 0 && n < BUF_MAX*2);
+ mproc(state, buffer, n, x, y);
+ sproc(state, buffer, n, dstC);
+
+ if ((count -= n) == 0) {
+ break;
+ }
+ SkASSERT(count > 0);
+ x += n;
+ dstC += n;
+ }
+ }
+
+private:
+ SkBitmapProcState* fState;
+ uint32_t fFlags;
+
+ using INHERITED = SkShaderBase::Context;
+};
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkShaderBase::Context* SkBitmapProcLegacyShader::MakeContext(
+ const SkShaderBase& shader, SkTileMode tmx, SkTileMode tmy, const SkSamplingOptions& sampling,
+ const SkImage_Base* image, const ContextRec& rec, SkArenaAlloc* alloc)
+{
+ SkMatrix totalInverse;
+ // Do this first, so we know the matrix can be inverted.
+ if (!shader.computeTotalInverse(*rec.fMatrix, rec.fLocalMatrix, &totalInverse)) {
+ return nullptr;
+ }
+
+ SkBitmapProcState* state = alloc->make<SkBitmapProcState>(image, tmx, tmy);
+ if (!state->setup(totalInverse, rec.fPaintAlpha, sampling)) {
+ return nullptr;
+ }
+ return alloc->make<BitmapProcShaderContext>(shader, rec, state);
+}
diff --git a/gfx/skia/skia/src/shaders/SkBitmapProcShader.h b/gfx/skia/skia/src/shaders/SkBitmapProcShader.h
new file mode 100644
index 0000000000..763f304d8b
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkBitmapProcShader.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkBitmapProcShader_DEFINED
+#define SkBitmapProcShader_DEFINED
+
+#include "src/core/SkImagePriv.h"
+#include "src/shaders/SkShaderBase.h"
+
+class SkImage_Base;
+
+class SkBitmapProcLegacyShader : public SkShaderBase {
+private:
+ friend class SkImageShader;
+
+ static Context* MakeContext(const SkShaderBase&, SkTileMode tmx, SkTileMode tmy,
+ const SkSamplingOptions&, const SkImage_Base*,
+ const ContextRec&, SkArenaAlloc* alloc);
+
+ using INHERITED = SkShaderBase;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/SkColorFilterShader.cpp b/gfx/skia/skia/src/shaders/SkColorFilterShader.cpp
new file mode 100644
index 0000000000..7ff7e8dd50
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkColorFilterShader.cpp
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkShader.h"
+#include "include/core/SkString.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkColorFilterBase.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkVM.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/SkColorFilterShader.h"
+
+#if defined(SK_GANESH)
+#include "src/gpu/ganesh/GrFPArgs.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#endif
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/KeyHelpers.h"
+#include "src/gpu/graphite/PaintParamsKey.h"
+#endif
+
+SkColorFilterShader::SkColorFilterShader(sk_sp<SkShader> shader,
+ float alpha,
+ sk_sp<SkColorFilter> filter)
+ : fShader(std::move(shader))
+ , fFilter(as_CFB_sp(std::move(filter)))
+ , fAlpha (alpha)
+{
+ SkASSERT(fShader);
+ SkASSERT(fFilter);
+}
+
+sk_sp<SkFlattenable> SkColorFilterShader::CreateProc(SkReadBuffer& buffer) {
+ auto shader = buffer.readShader();
+ auto filter = buffer.readColorFilter();
+ if (!shader || !filter) {
+ return nullptr;
+ }
+ return sk_make_sp<SkColorFilterShader>(shader, 1.0f, filter);
+}
+
+bool SkColorFilterShader::isOpaque() const {
+ return fShader->isOpaque() && fAlpha == 1.0f && as_CFB(fFilter)->isAlphaUnchanged();
+}
+
+void SkColorFilterShader::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeFlattenable(fShader.get());
+ SkASSERT(fAlpha == 1.0f); // Not exposed in public API SkShader::makeWithColorFilter().
+ buffer.writeFlattenable(fFilter.get());
+}
+
+bool SkColorFilterShader::appendStages(const SkStageRec& rec, const MatrixRec& mRec) const {
+ if (!as_SB(fShader)->appendStages(rec, mRec)) {
+ return false;
+ }
+ if (fAlpha != 1.0f) {
+ rec.fPipeline->append(SkRasterPipelineOp::scale_1_float, rec.fAlloc->make<float>(fAlpha));
+ }
+ if (!fFilter->appendStages(rec, fShader->isOpaque())) {
+ return false;
+ }
+ return true;
+}
+
+skvm::Color SkColorFilterShader::program(skvm::Builder* p,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color paint,
+ const MatrixRec& mRec,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const {
+ // Run the shader.
+ skvm::Color c = as_SB(fShader)->program(p, device, local, paint, mRec, dst, uniforms, alloc);
+ if (!c) {
+ return {};
+ }
+ // Scale that by alpha.
+ if (fAlpha != 1.0f) {
+ skvm::F32 A = p->uniformF(uniforms->pushF(fAlpha));
+ c.r *= A;
+ c.g *= A;
+ c.b *= A;
+ c.a *= A;
+ }
+
+ // Finally run that through the color filter.
+ return fFilter->program(p,c, dst, uniforms,alloc);
+}
+
+#if defined(SK_GANESH)
+/////////////////////////////////////////////////////////////////////
+
+std::unique_ptr<GrFragmentProcessor>
+SkColorFilterShader::asFragmentProcessor(const GrFPArgs& args, const MatrixRec& mRec) const {
+ auto shaderFP = as_SB(fShader)->asFragmentProcessor(args, mRec);
+ if (!shaderFP) {
+ return nullptr;
+ }
+
+ // TODO I guess, but it shouldn't come up as used today.
+ SkASSERT(fAlpha == 1.0f);
+
+ auto [success, fp] = fFilter->asFragmentProcessor(std::move(shaderFP), args.fContext,
+ *args.fDstColorInfo, args.fSurfaceProps);
+ // If the filter FP could not be created, we still want to return the shader FP, so checking
+ // success can be omitted here.
+ return std::move(fp);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if defined(SK_GRAPHITE)
+
+void SkColorFilterShader::addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const {
+ using namespace skgpu::graphite;
+
+ ColorFilterShaderBlock::BeginBlock(keyContext, builder, gatherer);
+
+ as_SB(fShader)->addToKey(keyContext, builder, gatherer);
+ as_CFB(fFilter)->addToKey(keyContext, builder, gatherer);
+
+ builder->endBlock();
+}
+
+#endif // SK_ENABLE_SKSL
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkShader> SkShader::makeWithColorFilter(sk_sp<SkColorFilter> filter) const {
+ SkShader* base = const_cast<SkShader*>(this);
+ if (!filter) {
+ return sk_ref_sp(base);
+ }
+ return sk_make_sp<SkColorFilterShader>(sk_ref_sp(base), 1.0f, std::move(filter));
+}
diff --git a/gfx/skia/skia/src/shaders/SkColorFilterShader.h b/gfx/skia/skia/src/shaders/SkColorFilterShader.h
new file mode 100644
index 0000000000..4cdbea6a45
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkColorFilterShader.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkColorFilterShader_DEFINED
+#define SkColorFilterShader_DEFINED
+
+#include "src/core/SkColorFilterBase.h"
+#include "src/shaders/SkShaderBase.h"
+
+class SkArenaAlloc;
+
+class SkColorFilterShader : public SkShaderBase {
+public:
+ SkColorFilterShader(sk_sp<SkShader> shader, float alpha, sk_sp<SkColorFilter> filter);
+
+#if defined(SK_GANESH)
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&,
+ const MatrixRec&) const override;
+#endif
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext&,
+ skgpu::graphite::PaintParamsKeyBuilder*,
+ skgpu::graphite::PipelineDataGatherer*) const override;
+#endif
+
+private:
+ bool isOpaque() const override;
+ void flatten(SkWriteBuffer&) const override;
+ bool appendStages(const SkStageRec&, const MatrixRec&) const override;
+
+ skvm::Color program(skvm::Builder*,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color paint,
+ const MatrixRec&,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc*) const override;
+
+ SK_FLATTENABLE_HOOKS(SkColorFilterShader)
+
+ sk_sp<SkShader> fShader;
+ sk_sp<SkColorFilterBase> fFilter;
+ float fAlpha;
+
+ using INHERITED = SkShaderBase;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/SkColorShader.cpp b/gfx/skia/skia/src/shaders/SkColorShader.cpp
new file mode 100644
index 0000000000..149b155d62
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkColorShader.cpp
@@ -0,0 +1,275 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkFlattenable.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/base/SkUtils.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkVM.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/SkShaderBase.h"
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/KeyHelpers.h"
+#include "src/gpu/graphite/PaintParamsKey.h"
+#endif
+
+/** \class SkColorShader
+ A Shader that represents a single color. In general, this effect can be
+ accomplished by just using the color field on the paint, but if an
+ actual shader object is needed, this provides that feature.
+*/
+class SkColorShader : public SkShaderBase {
+public:
+ /** Create a ColorShader that ignores the color in the paint, and uses the
+ specified color. Note: like all shaders, at draw time the paint's alpha
+ will be respected, and is applied to the specified color.
+ */
+ explicit SkColorShader(SkColor c);
+
+ bool isOpaque() const override;
+ bool isConstant() const override { return true; }
+
+ GradientType asGradient(GradientInfo* info, SkMatrix* localMatrix) const override;
+
+#if defined(SK_GANESH)
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&,
+ const MatrixRec&) const override;
+#endif
+
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext&,
+ skgpu::graphite::PaintParamsKeyBuilder*,
+ skgpu::graphite::PipelineDataGatherer*) const override;
+#endif
+
+private:
+ friend void ::SkRegisterColorShaderFlattenable();
+ SK_FLATTENABLE_HOOKS(SkColorShader)
+
+ void flatten(SkWriteBuffer&) const override;
+
+ bool onAsLuminanceColor(SkColor* lum) const override {
+ *lum = fColor;
+ return true;
+ }
+
+ bool appendStages(const SkStageRec&, const MatrixRec&) const override;
+
+ skvm::Color program(skvm::Builder*,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color paint,
+ const MatrixRec&,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc*) const override;
+
+ SkColor fColor;
+};
+
+class SkColor4Shader : public SkShaderBase {
+public:
+ SkColor4Shader(const SkColor4f&, sk_sp<SkColorSpace>);
+
+ bool isOpaque() const override { return fColor.isOpaque(); }
+ bool isConstant() const override { return true; }
+
+#if defined(SK_GANESH)
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&,
+ const MatrixRec&) const override;
+#endif
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext&,
+ skgpu::graphite::PaintParamsKeyBuilder*,
+ skgpu::graphite::PipelineDataGatherer*) const override;
+#endif
+
+private:
+ friend void ::SkRegisterColor4ShaderFlattenable();
+ SK_FLATTENABLE_HOOKS(SkColor4Shader)
+
+ void flatten(SkWriteBuffer&) const override;
+ bool appendStages(const SkStageRec&, const MatrixRec&) const override;
+
+ skvm::Color program(skvm::Builder*,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color paint,
+ const MatrixRec&,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc*) const override;
+
+ sk_sp<SkColorSpace> fColorSpace;
+ const SkColor4f fColor;
+};
+
+SkColorShader::SkColorShader(SkColor c) : fColor(c) {}
+
+bool SkColorShader::isOpaque() const {
+ return SkColorGetA(fColor) == 255;
+}
+
+sk_sp<SkFlattenable> SkColorShader::CreateProc(SkReadBuffer& buffer) {
+ return sk_make_sp<SkColorShader>(buffer.readColor());
+}
+
+void SkColorShader::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeColor(fColor);
+}
+
+SkShaderBase::GradientType SkColorShader::asGradient(GradientInfo* info,
+ SkMatrix* localMatrix) const {
+ if (info) {
+ if (info->fColors && info->fColorCount >= 1) {
+ info->fColors[0] = fColor;
+ }
+ info->fColorCount = 1;
+ info->fTileMode = SkTileMode::kRepeat;
+ }
+ if (localMatrix) {
+ *localMatrix = SkMatrix::I();
+ }
+ return GradientType::kColor;
+}
+
+SkColor4Shader::SkColor4Shader(const SkColor4f& color, sk_sp<SkColorSpace> space)
+ : fColorSpace(std::move(space))
+ , fColor({color.fR, color.fG, color.fB, SkTPin(color.fA, 0.0f, 1.0f)})
+{}
+
+sk_sp<SkFlattenable> SkColor4Shader::CreateProc(SkReadBuffer& buffer) {
+ SkColor4f color;
+ sk_sp<SkColorSpace> colorSpace;
+ buffer.readColor4f(&color);
+ if (buffer.readBool()) {
+ sk_sp<SkData> data = buffer.readByteArrayAsData();
+ colorSpace = data ? SkColorSpace::Deserialize(data->data(), data->size()) : nullptr;
+ }
+ return SkShaders::Color(color, std::move(colorSpace));
+}
+
+void SkColor4Shader::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeColor4f(fColor);
+ sk_sp<SkData> colorSpaceData = fColorSpace ? fColorSpace->serialize() : nullptr;
+ if (colorSpaceData) {
+ buffer.writeBool(true);
+ buffer.writeDataAsByteArray(colorSpaceData.get());
+ } else {
+ buffer.writeBool(false);
+ }
+}
+
+bool SkColorShader::appendStages(const SkStageRec& rec, const MatrixRec&) const {
+ SkColor4f color = SkColor4f::FromColor(fColor);
+ SkColorSpaceXformSteps(sk_srgb_singleton(), kUnpremul_SkAlphaType,
+ rec.fDstCS, kUnpremul_SkAlphaType).apply(color.vec());
+ rec.fPipeline->append_constant_color(rec.fAlloc, color.premul().vec());
+ return true;
+}
+
+bool SkColor4Shader::appendStages(const SkStageRec& rec, const MatrixRec&) const {
+ SkColor4f color = fColor;
+ SkColorSpaceXformSteps(fColorSpace.get(), kUnpremul_SkAlphaType,
+ rec.fDstCS, kUnpremul_SkAlphaType).apply(color.vec());
+ rec.fPipeline->append_constant_color(rec.fAlloc, color.premul().vec());
+ return true;
+}
+
+skvm::Color SkColorShader::program(skvm::Builder* p,
+ skvm::Coord /*device*/,
+ skvm::Coord /*local*/,
+ skvm::Color /*paint*/,
+ const MatrixRec&,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc*) const {
+ SkColor4f color = SkColor4f::FromColor(fColor);
+ SkColorSpaceXformSteps(sk_srgb_singleton(), kUnpremul_SkAlphaType,
+ dst.colorSpace(), kPremul_SkAlphaType).apply(color.vec());
+ return p->uniformColor(color, uniforms);
+}
+skvm::Color SkColor4Shader::program(skvm::Builder* p,
+ skvm::Coord /*device*/,
+ skvm::Coord /*local*/,
+ skvm::Color /*paint*/,
+ const MatrixRec&,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc*) const {
+ SkColor4f color = fColor;
+ SkColorSpaceXformSteps(fColorSpace.get(), kUnpremul_SkAlphaType,
+ dst.colorSpace(), kPremul_SkAlphaType).apply(color.vec());
+ return p->uniformColor(color, uniforms);
+}
+
+#if defined(SK_GANESH)
+
+#include "src/gpu/ganesh/GrColorInfo.h"
+#include "src/gpu/ganesh/GrColorSpaceXform.h"
+#include "src/gpu/ganesh/GrFPArgs.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/SkGr.h"
+
+std::unique_ptr<GrFragmentProcessor> SkColorShader::asFragmentProcessor(const GrFPArgs& args,
+ const MatrixRec&) const {
+ return GrFragmentProcessor::MakeColor(SkColorToPMColor4f(fColor, *args.fDstColorInfo));
+}
+
+std::unique_ptr<GrFragmentProcessor> SkColor4Shader::asFragmentProcessor(const GrFPArgs& args,
+ const MatrixRec&) const {
+ SkColorSpaceXformSteps steps{ fColorSpace.get(), kUnpremul_SkAlphaType,
+ args.fDstColorInfo->colorSpace(), kUnpremul_SkAlphaType };
+ SkColor4f color = fColor;
+ steps.apply(color.vec());
+ return GrFragmentProcessor::MakeColor(color.premul());
+}
+
+#endif
+
+#if defined(SK_GRAPHITE)
+void SkColorShader::addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const {
+ using namespace skgpu::graphite;
+
+ SolidColorShaderBlock::BeginBlock(keyContext, builder, gatherer,
+ SkColor4f::FromColor(fColor).premul());
+ builder->endBlock();
+}
+
+void SkColor4Shader::addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const {
+ using namespace skgpu::graphite;
+
+ SolidColorShaderBlock::BeginBlock(keyContext, builder, gatherer, fColor.premul());
+ builder->endBlock();
+}
+#endif
+
+sk_sp<SkShader> SkShaders::Color(SkColor color) { return sk_make_sp<SkColorShader>(color); }
+
+sk_sp<SkShader> SkShaders::Color(const SkColor4f& color, sk_sp<SkColorSpace> space) {
+ if (!SkScalarsAreFinite(color.vec(), 4)) {
+ return nullptr;
+ }
+ return sk_make_sp<SkColor4Shader>(color, std::move(space));
+}
+
+void SkRegisterColor4ShaderFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkColor4Shader);
+}
+
+void SkRegisterColorShaderFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkColorShader);
+}
diff --git a/gfx/skia/skia/src/shaders/SkComposeShader.cpp b/gfx/skia/skia/src/shaders/SkComposeShader.cpp
new file mode 100644
index 0000000000..ee9a1ed1a9
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkComposeShader.cpp
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkString.h"
+#include "include/effects/SkRuntimeEffect.h"
+#include "include/private/SkColorData.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkBlendModePriv.h"
+#include "src/core/SkBlenderBase.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkRuntimeEffectPriv.h"
+#include "src/core/SkVM.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/SkShaderBase.h"
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/Blend.h"
+#include "src/gpu/graphite/KeyHelpers.h"
+#include "src/gpu/graphite/PaintParamsKey.h"
+#endif
+
+class SkShader_Blend final : public SkShaderBase {
+public:
+ SkShader_Blend(SkBlendMode mode, sk_sp<SkShader> dst, sk_sp<SkShader> src)
+ : fDst(std::move(dst))
+ , fSrc(std::move(src))
+ , fMode(mode) {}
+
+#if defined(SK_GANESH)
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&,
+ const MatrixRec&) const override;
+#endif
+
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext&,
+ skgpu::graphite::PaintParamsKeyBuilder*,
+ skgpu::graphite::PipelineDataGatherer*) const override;
+#endif
+
+protected:
+ SkShader_Blend(SkReadBuffer&);
+ void flatten(SkWriteBuffer&) const override;
+ bool appendStages(const SkStageRec&, const MatrixRec&) const override;
+ skvm::Color program(skvm::Builder*,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color paint,
+ const MatrixRec& mRec,
+ const SkColorInfo& dst,
+ skvm::Uniforms*,
+ SkArenaAlloc*) const override;
+
+private:
+ friend void ::SkRegisterComposeShaderFlattenable();
+ SK_FLATTENABLE_HOOKS(SkShader_Blend)
+
+ sk_sp<SkShader> fDst;
+ sk_sp<SkShader> fSrc;
+ SkBlendMode fMode;
+
+ using INHERITED = SkShaderBase;
+};
+
+sk_sp<SkFlattenable> SkShader_Blend::CreateProc(SkReadBuffer& buffer) {
+ sk_sp<SkShader> dst(buffer.readShader());
+ sk_sp<SkShader> src(buffer.readShader());
+ if (!buffer.validate(dst && src)) {
+ return nullptr;
+ }
+
+ unsigned mode = buffer.read32();
+
+ if (mode == kCustom_SkBlendMode) {
+ sk_sp<SkBlender> blender = buffer.readBlender();
+ if (buffer.validate(blender != nullptr)) {
+ return SkShaders::Blend(std::move(blender), std::move(dst), std::move(src));
+ }
+ } else {
+ if (buffer.validate(mode <= (unsigned)SkBlendMode::kLastMode)) {
+ return SkShaders::Blend(static_cast<SkBlendMode>(mode), std::move(dst), std::move(src));
+ }
+ }
+ return nullptr;
+}
+
+void SkShader_Blend::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeFlattenable(fDst.get());
+ buffer.writeFlattenable(fSrc.get());
+ buffer.write32((int)fMode);
+}
+
+// Returns the output of e0, and leaves the output of e1 in r,g,b,a
+static float* append_two_shaders(const SkStageRec& rec,
+ const SkShaderBase::MatrixRec& mRec,
+ SkShader* s0,
+ SkShader* s1) {
+ struct Storage {
+ float fCoords[2 * SkRasterPipeline_kMaxStride];
+ float fRes0 [4 * SkRasterPipeline_kMaxStride];
+ };
+ auto storage = rec.fAlloc->make<Storage>();
+
+ // Note we cannot simply apply mRec here and then unconditionally store the coordinates. When
+ // building for Android Framework it would interrupt the backwards local matrix concatenation if
+ // mRec had a pending local matrix and either of the children also had a local matrix.
+ // b/256873449
+ if (mRec.rasterPipelineCoordsAreSeeded()) {
+ rec.fPipeline->append(SkRasterPipelineOp::store_src_rg, storage->fCoords);
+ }
+ if (!as_SB(s0)->appendStages(rec, mRec)) {
+ return nullptr;
+ }
+ rec.fPipeline->append(SkRasterPipelineOp::store_src, storage->fRes0);
+
+ if (mRec.rasterPipelineCoordsAreSeeded()) {
+ rec.fPipeline->append(SkRasterPipelineOp::load_src_rg, storage->fCoords);
+ }
+ if (!as_SB(s1)->appendStages(rec, mRec)) {
+ return nullptr;
+ }
+ return storage->fRes0;
+}
+
+bool SkShader_Blend::appendStages(const SkStageRec& rec, const MatrixRec& mRec) const {
+ float* res0 = append_two_shaders(rec, mRec, fDst.get(), fSrc.get());
+ if (!res0) {
+ return false;
+ }
+
+ rec.fPipeline->append(SkRasterPipelineOp::load_dst, res0);
+ SkBlendMode_AppendStages(fMode, rec.fPipeline);
+ return true;
+}
+
+skvm::Color SkShader_Blend::program(skvm::Builder* p,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color paint,
+ const MatrixRec& mRec,
+ const SkColorInfo& cinfo,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const {
+ skvm::Color d,s;
+ if ((d = as_SB(fDst)->program(p, device, local, paint, mRec, cinfo, uniforms, alloc)) &&
+ (s = as_SB(fSrc)->program(p, device, local, paint, mRec, cinfo, uniforms, alloc))) {
+ return p->blend(fMode, s,d);
+ }
+ return {};
+}
+
+#if defined(SK_GANESH)
+
+#include "include/gpu/GrRecordingContext.h"
+#include "src/gpu/ganesh/GrFPArgs.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/effects/GrBlendFragmentProcessor.h"
+
+std::unique_ptr<GrFragmentProcessor>
+SkShader_Blend::asFragmentProcessor(const GrFPArgs& args, const MatrixRec& mRec) const {
+ auto fpA = as_SB(fDst)->asFragmentProcessor(args, mRec);
+ auto fpB = as_SB(fSrc)->asFragmentProcessor(args, mRec);
+ if (!fpA || !fpB) {
+ // This is unexpected. Both src and dst shaders should be valid. Just fail.
+ return nullptr;
+ }
+ return GrBlendFragmentProcessor::Make(std::move(fpB), std::move(fpA), fMode);
+}
+#endif
+
+#if defined(SK_GRAPHITE)
+void SkShader_Blend::addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const {
+ using namespace skgpu::graphite;
+
+ SkSpan<const float> porterDuffConstants = skgpu::GetPorterDuffBlendConstants(fMode);
+ if (!porterDuffConstants.empty()) {
+ PorterDuffBlendShaderBlock::BeginBlock(keyContext, builder, gatherer,
+ {porterDuffConstants});
+ } else {
+ BlendShaderBlock::BeginBlock(keyContext, builder, gatherer, {fMode});
+ }
+
+ as_SB(fDst)->addToKey(keyContext, builder, gatherer);
+ as_SB(fSrc)->addToKey(keyContext, builder, gatherer);
+
+ builder->endBlock();
+}
+#endif
+
+sk_sp<SkShader> SkShaders::Blend(SkBlendMode mode, sk_sp<SkShader> dst, sk_sp<SkShader> src) {
+ if (!src || !dst) {
+ return nullptr;
+ }
+ switch (mode) {
+ case SkBlendMode::kClear: return Color(0);
+ case SkBlendMode::kDst: return dst;
+ case SkBlendMode::kSrc: return src;
+ default: break;
+ }
+ return sk_sp<SkShader>(new SkShader_Blend(mode, std::move(dst), std::move(src)));
+}
+
+sk_sp<SkShader> SkShaders::Blend(sk_sp<SkBlender> blender,
+ sk_sp<SkShader> dst,
+ sk_sp<SkShader> src) {
+ if (!src || !dst) {
+ return nullptr;
+ }
+ if (!blender) {
+ return SkShaders::Blend(SkBlendMode::kSrcOver, std::move(dst), std::move(src));
+ }
+ if (std::optional<SkBlendMode> mode = as_BB(blender)->asBlendMode()) {
+ return sk_make_sp<SkShader_Blend>(mode.value(), std::move(dst), std::move(src));
+ }
+
+#ifdef SK_ENABLE_SKSL
+ // This isn't a built-in blend mode; we might as well use a runtime effect to evaluate it.
+ static SkRuntimeEffect* sBlendEffect = SkMakeRuntimeEffect(SkRuntimeEffect::MakeForShader,
+ "uniform blender b;"
+ "uniform shader d, s;"
+ "half4 main(float2 xy) {"
+ "return b.eval(s.eval(xy), d.eval(xy));"
+ "}"
+ );
+ SkRuntimeEffect::ChildPtr children[] = {std::move(blender), std::move(dst), std::move(src)};
+ return sBlendEffect->makeShader(/*uniforms=*/{}, children);
+#else
+ // We need SkSL to render this blend.
+ return nullptr;
+#endif
+}
+
+void SkRegisterComposeShaderFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkShader_Blend);
+}
diff --git a/gfx/skia/skia/src/shaders/SkCoordClampShader.cpp b/gfx/skia/skia/src/shaders/SkCoordClampShader.cpp
new file mode 100644
index 0000000000..f65e7d9f10
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkCoordClampShader.cpp
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkFlattenable.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkRuntimeEffectPriv.h"
+#include "src/core/SkVM.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/SkShaderBase.h"
+
+#if defined(SK_GANESH)
+#include "include/effects/SkRuntimeEffect.h"
+#include "include/gpu/GrRecordingContext.h"
+#include "src/gpu/ganesh/GrFPArgs.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/effects/GrSkSLFP.h"
+#endif
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/KeyHelpers.h"
+#include "src/gpu/graphite/PaintParamsKey.h"
+#endif // SK_GRAPHITE
+
+class SkShader_CoordClamp final : public SkShaderBase {
+public:
+ SkShader_CoordClamp(sk_sp<SkShader> shader, const SkRect& subset)
+ : fShader(std::move(shader)), fSubset(subset) {}
+
+#if defined(SK_GANESH)
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&,
+ const MatrixRec&) const override;
+#endif
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext&,
+ skgpu::graphite::PaintParamsKeyBuilder*,
+ skgpu::graphite::PipelineDataGatherer*) const override;
+#endif
+
+protected:
+ SkShader_CoordClamp(SkReadBuffer&);
+ void flatten(SkWriteBuffer&) const override;
+ bool appendStages(const SkStageRec&, const MatrixRec&) const override;
+ skvm::Color program(skvm::Builder*,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color paint,
+ const MatrixRec&,
+ const SkColorInfo& dst,
+ skvm::Uniforms*,
+ SkArenaAlloc*) const override;
+
+private:
+ friend void ::SkRegisterCoordClampShaderFlattenable();
+ SK_FLATTENABLE_HOOKS(SkShader_CoordClamp)
+
+ sk_sp<SkShader> fShader;
+ SkRect fSubset;
+};
+
+sk_sp<SkFlattenable> SkShader_CoordClamp::CreateProc(SkReadBuffer& buffer) {
+ sk_sp<SkShader> shader(buffer.readShader());
+ SkRect subset = buffer.readRect();
+ if (!buffer.validate(SkToBool(shader))) {
+ return nullptr;
+ }
+ return SkShaders::CoordClamp(std::move(shader), subset);
+}
+
+void SkShader_CoordClamp::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeFlattenable(fShader.get());
+ buffer.writeRect(fSubset);
+}
+
+bool SkShader_CoordClamp::appendStages(const SkStageRec& rec, const MatrixRec& mRec) const {
+ std::optional<MatrixRec> childMRec = mRec.apply(rec);
+ if (!childMRec.has_value()) {
+ return false;
+ }
+ // Strictly speaking, childMRec's total matrix is not valid. It is only valid inside the subset
+ // rectangle. However, we don't mark it as such because we want the "total matrix is valid"
+ // behavior in SkImageShader for filtering.
+ auto clampCtx = rec.fAlloc->make<SkRasterPipeline_CoordClampCtx>();
+ *clampCtx = {fSubset.fLeft, fSubset.fTop, fSubset.fRight, fSubset.fBottom};
+ rec.fPipeline->append(SkRasterPipelineOp::clamp_x_and_y, clampCtx);
+ return as_SB(fShader)->appendStages(rec, *childMRec);
+}
+
+skvm::Color SkShader_CoordClamp::program(skvm::Builder* p,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color paint,
+ const MatrixRec& mRec,
+ const SkColorInfo& cinfo,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const {
+ std::optional<MatrixRec> childMRec = mRec.apply(p, &local, uniforms);
+ if (!childMRec.has_value()) {
+ return {};
+ }
+ // See comment in appendStages about not marking childMRec with an invalid total matrix.
+
+ auto l = uniforms->pushF(fSubset.left());
+ auto t = uniforms->pushF(fSubset.top());
+ auto r = uniforms->pushF(fSubset.right());
+ auto b = uniforms->pushF(fSubset.bottom());
+
+ local.x = p->clamp(local.x, p->uniformF(l), p->uniformF(r));
+ local.y = p->clamp(local.y, p->uniformF(t), p->uniformF(b));
+
+ return as_SB(fShader)->program(p, device, local, paint, *childMRec, cinfo, uniforms, alloc);
+}
+
+#if defined(SK_GANESH)
+std::unique_ptr<GrFragmentProcessor> SkShader_CoordClamp::asFragmentProcessor(
+ const GrFPArgs& args, const MatrixRec& mRec) const {
+ static const SkRuntimeEffect* effect = SkMakeRuntimeEffect(SkRuntimeEffect::MakeForShader,
+ "uniform shader c;"
+ "uniform float4 s;"
+ "half4 main(float2 p) {"
+ "return c.eval(clamp(p, s.LT, s.RB));"
+ "}");
+
+ auto fp = as_SB(fShader)->asFragmentProcessor(args, mRec.applied());
+ if (!fp) {
+ return nullptr;
+ }
+
+ GrSkSLFP::OptFlags flags = GrSkSLFP::OptFlags::kNone;
+ if (fp->compatibleWithCoverageAsAlpha()) {
+ flags |= GrSkSLFP::OptFlags::kCompatibleWithCoverageAsAlpha;
+ }
+ if (fp->preservesOpaqueInput()) {
+ flags |= GrSkSLFP::OptFlags::kPreservesOpaqueInput;
+ }
+ fp = GrSkSLFP::Make(effect,
+ "clamp_fp",
+ /*inputFP=*/nullptr,
+ flags,
+ "c", std::move(fp),
+ "s", fSubset);
+ bool success;
+ std::tie(success, fp) = mRec.apply(std::move(fp));
+ return success ? std::move(fp) : nullptr;
+}
+#endif // defined(SK_GANESH)
+
+#if defined(SK_GRAPHITE)
+void SkShader_CoordClamp::addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const {
+ using namespace skgpu::graphite;
+
+ CoordClampShaderBlock::CoordClampData data(fSubset);
+
+ CoordClampShaderBlock::BeginBlock(keyContext, builder, gatherer, &data);
+ as_SB(fShader)->addToKey(keyContext, builder, gatherer);
+ builder->endBlock();
+}
+#endif // SK_GRAPHITE
+
+void SkRegisterCoordClampShaderFlattenable() { SK_REGISTER_FLATTENABLE(SkShader_CoordClamp); }
+
+sk_sp<SkShader> SkShaders::CoordClamp(sk_sp<SkShader> shader, const SkRect& subset) {
+ if (!shader) {
+ return nullptr;
+ }
+ if (!subset.isSorted()) {
+ return nullptr;
+ }
+ return sk_make_sp<SkShader_CoordClamp>(std::move(shader), subset);
+}
diff --git a/gfx/skia/skia/src/shaders/SkEmptyShader.cpp b/gfx/skia/skia/src/shaders/SkEmptyShader.cpp
new file mode 100644
index 0000000000..94040338a8
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkEmptyShader.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/shaders/SkShaderBase.h"
+
+#include "include/core/SkFlattenable.h"
+#include "src/core/SkVM.h"
+
+/**
+ * \class SkEmptyShader
+ * A Shader that always draws nothing. Its createContext always returns nullptr.
+ */
+class SkEmptyShader : public SkShaderBase {
+public:
+ SkEmptyShader() {}
+
+protected:
+ void flatten(SkWriteBuffer& buffer) const override {
+ // Do nothing.
+ // We just don't want to fall through to SkShader::flatten(),
+ // which will write data we don't care to serialize or decode.
+ }
+
+ bool appendStages(const SkStageRec&, const MatrixRec&) const override { return false; }
+
+ skvm::Color program(skvm::Builder*,
+ skvm::Coord,
+ skvm::Coord,
+ skvm::Color,
+ const MatrixRec&,
+ const SkColorInfo&,
+ skvm::Uniforms*,
+ SkArenaAlloc*) const override;
+
+private:
+ friend void ::SkRegisterEmptyShaderFlattenable();
+ SK_FLATTENABLE_HOOKS(SkEmptyShader)
+
+ using INHERITED = SkShaderBase;
+};
+
+skvm::Color SkEmptyShader::program(skvm::Builder*,
+ skvm::Coord,
+ skvm::Coord,
+ skvm::Color,
+ const MatrixRec&,
+ const SkColorInfo&,
+ skvm::Uniforms*,
+ SkArenaAlloc*) const {
+ return {}; // signal failure
+}
+
+sk_sp<SkFlattenable> SkEmptyShader::CreateProc(SkReadBuffer&) {
+ return SkShaders::Empty();
+}
+
+sk_sp<SkShader> SkShaders::Empty() { return sk_make_sp<SkEmptyShader>(); }
+
+void SkRegisterEmptyShaderFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkEmptyShader);
+}
diff --git a/gfx/skia/skia/src/shaders/SkGainmapShader.cpp b/gfx/skia/skia/src/shaders/SkGainmapShader.cpp
new file mode 100644
index 0000000000..1654709c2c
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkGainmapShader.cpp
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2023 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkGainmapShader.h"
+
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkShader.h"
+#include "include/effects/SkRuntimeEffect.h"
+#include "include/private/SkGainmapInfo.h"
+#include "src/core/SkColorFilterPriv.h"
+#include "src/core/SkImageInfoPriv.h"
+
+#ifdef SK_ENABLE_SKSL
+static constexpr char gGainmapSKSL[] =
+ "uniform shader base;"
+ "uniform shader gainmap;"
+ "uniform half4 logRatioMin;"
+ "uniform half4 logRatioMax;"
+ "uniform half4 gainmapGamma;"
+ "uniform half4 epsilonSdr;"
+ "uniform half4 epsilonHdr;"
+ "uniform half W;"
+ "uniform int gainmapIsAlpha;"
+ "uniform int gainmapIsRed;"
+ "uniform int singleChannel;"
+ "uniform int noGamma;"
+ ""
+ "half4 main(float2 coord) {"
+ " half4 S = base.eval(coord);"
+ " half4 G = gainmap.eval(coord);"
+ " if (gainmapIsAlpha == 1) {"
+ " G = half4(G.a, G.a, G.a, 1.0);"
+ " }"
+ " if (gainmapIsRed == 1) {"
+ " G = half4(G.r, G.r, G.r, 1.0);"
+ " }"
+ " if (singleChannel == 1) {"
+ " half L;"
+ " if (noGamma == 1) {"
+ " L = mix(logRatioMin.r, logRatioMax.r, G.r);"
+ " } else {"
+ " L = mix(logRatioMin.r, logRatioMax.r, pow(G.r, gainmapGamma.r));"
+ " }"
+ " half3 H = (S.rgb + epsilonSdr.rgb) * exp(L * W) - epsilonHdr.rgb;"
+ " return half4(H.r, H.g, H.b, S.a);"
+ " } else {"
+ " half3 L;"
+ " if (noGamma == 1) {"
+ " L = mix(logRatioMin.rgb, logRatioMax.rgb, G.rgb);"
+ " } else {"
+ " L = mix(logRatioMin.rgb, logRatioMax.rgb, pow(G.rgb, gainmapGamma.rgb));"
+ " }"
+ " half3 H = (S.rgb + epsilonSdr.rgb) * exp(L * W) - epsilonHdr.rgb;"
+ " return half4(H.r, H.g, H.b, S.a);"
+ " }"
+ "}";
+
+static sk_sp<SkRuntimeEffect> gainmap_apply_effect() {
+ static const SkRuntimeEffect* effect =
+ SkRuntimeEffect::MakeForShader(SkString(gGainmapSKSL), {}).effect.release();
+ SkASSERT(effect);
+ return sk_ref_sp(effect);
+}
+
+static bool all_channels_equal(const SkColor4f& c) {
+ return c.fR == c.fG && c.fR == c.fB;
+}
+#endif // SK_ENABLE_SKSL
+
+sk_sp<SkShader> SkGainmapShader::Make(const sk_sp<const SkImage>& baseImage,
+ const SkRect& baseRect,
+ const SkSamplingOptions& baseSamplingOptions,
+ const sk_sp<const SkImage>& gainmapImage,
+ const SkRect& gainmapRect,
+ const SkSamplingOptions& gainmapSamplingOptions,
+ const SkGainmapInfo& gainmapInfo,
+ const SkRect& dstRect,
+ float dstHdrRatio,
+ sk_sp<SkColorSpace> dstColorSpace) {
+#ifdef SK_ENABLE_SKSL
+ sk_sp<SkColorSpace> baseColorSpace =
+ baseImage->colorSpace() ? baseImage->refColorSpace() : SkColorSpace::MakeSRGB();
+
+ // Determine the color space in which the gainmap math is to be applied.
+ sk_sp<SkColorSpace> gainmapMathColorSpace = baseColorSpace->makeLinearGamma();
+ if (!dstColorSpace) {
+ dstColorSpace = SkColorSpace::MakeSRGB();
+ }
+
+ // Create a color filter to transform from the base image's color space to the color space in
+ // which the gainmap is to be applied.
+ auto colorXformSdrToGainmap =
+ SkColorFilterPriv::MakeColorSpaceXform(baseColorSpace, gainmapMathColorSpace);
+
+ // Create a color filter to transform from the color space in which the gainmap is applied to
+ // the destination color space.
+ auto colorXformGainmapToDst =
+ SkColorFilterPriv::MakeColorSpaceXform(gainmapMathColorSpace, dstColorSpace);
+
+ // The base image shader will convert into the color space in which the gainmap is applied.
+ const SkMatrix baseRectToDstRect = SkMatrix::RectToRect(baseRect, dstRect);
+ auto baseImageShader = baseImage->makeRawShader(baseSamplingOptions, &baseRectToDstRect)
+ ->makeWithColorFilter(colorXformSdrToGainmap);
+
+ // The gainmap image shader will ignore any color space that the gainmap has.
+ const SkMatrix gainmapRectToDstRect = SkMatrix::RectToRect(gainmapRect, dstRect);
+ auto gainmapImageShader =
+ gainmapImage->makeRawShader(gainmapSamplingOptions, &gainmapRectToDstRect);
+
+ // Create the shader to apply the gainmap.
+ sk_sp<SkShader> gainmapMathShader;
+ {
+ SkRuntimeShaderBuilder builder(gainmap_apply_effect());
+ const SkColor4f logRatioMin({sk_float_log(gainmapInfo.fGainmapRatioMin.fR),
+ sk_float_log(gainmapInfo.fGainmapRatioMin.fG),
+ sk_float_log(gainmapInfo.fGainmapRatioMin.fB),
+ 1.f});
+ const SkColor4f logRatioMax({sk_float_log(gainmapInfo.fGainmapRatioMax.fR),
+ sk_float_log(gainmapInfo.fGainmapRatioMax.fG),
+ sk_float_log(gainmapInfo.fGainmapRatioMax.fB),
+ 1.f});
+ const float Wunclamped =
+ (sk_float_log(dstHdrRatio) - sk_float_log(gainmapInfo.fDisplayRatioSdr)) /
+ (sk_float_log(gainmapInfo.fDisplayRatioHdr) -
+ sk_float_log(gainmapInfo.fDisplayRatioSdr));
+ const float W = std::max(std::min(Wunclamped, 1.f), 0.f);
+ const int noGamma =
+ gainmapInfo.fGainmapGamma.fR == 1.f &&
+ gainmapInfo.fGainmapGamma.fG == 1.f &&
+ gainmapInfo.fGainmapGamma.fB == 1.f;
+ const uint32_t colorTypeFlags = SkColorTypeChannelFlags(gainmapImage->colorType());
+ const int gainmapIsAlpha = colorTypeFlags == kAlpha_SkColorChannelFlag;
+ const int gainmapIsRed = colorTypeFlags == kRed_SkColorChannelFlag;
+ const int singleChannel = all_channels_equal(gainmapInfo.fGainmapGamma) &&
+ all_channels_equal(gainmapInfo.fGainmapRatioMin) &&
+ all_channels_equal(gainmapInfo.fGainmapRatioMax) &&
+ (colorTypeFlags == kGray_SkColorChannelFlag ||
+ colorTypeFlags == kAlpha_SkColorChannelFlag ||
+ colorTypeFlags == kRed_SkColorChannelFlag);
+ builder.child("base") = baseImageShader;
+ builder.child("gainmap") = gainmapImageShader;
+ builder.uniform("logRatioMin") = logRatioMin;
+ builder.uniform("logRatioMax") = logRatioMax;
+ builder.uniform("gainmapGamma") = gainmapInfo.fGainmapGamma;
+ builder.uniform("epsilonSdr") = gainmapInfo.fEpsilonSdr;
+ builder.uniform("epsilonHdr") = gainmapInfo.fEpsilonHdr;
+ builder.uniform("noGamma") = noGamma;
+ builder.uniform("singleChannel") = singleChannel;
+ builder.uniform("gainmapIsAlpha") = gainmapIsAlpha;
+ builder.uniform("gainmapIsRed") = gainmapIsRed;
+ builder.uniform("W") = W;
+ gainmapMathShader = builder.makeShader();
+ SkASSERT(gainmapMathShader);
+ }
+
+ // Return a shader that will apply the gainmap and then convert to the destination color space.
+ return gainmapMathShader->makeWithColorFilter(colorXformGainmapToDst);
+#else
+ // This shader is currently only implemented using SkSL.
+ return nullptr;
+#endif
+}
diff --git a/gfx/skia/skia/src/shaders/SkImageShader.cpp b/gfx/skia/skia/src/shaders/SkImageShader.cpp
new file mode 100644
index 0000000000..7e4d520f37
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkImageShader.cpp
@@ -0,0 +1,1142 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/shaders/SkImageShader.h"
+
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkImageInfoPriv.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkMatrixProvider.h"
+#include "src/core/SkMipmapAccessor.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkVM.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/image/SkImage_Base.h"
+#include "src/shaders/SkBitmapProcShader.h"
+#include "src/shaders/SkLocalMatrixShader.h"
+#include "src/shaders/SkTransformShader.h"
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/ImageUtils.h"
+#include "src/gpu/graphite/Image_Graphite.h"
+#include "src/gpu/graphite/KeyContext.h"
+#include "src/gpu/graphite/KeyHelpers.h"
+#include "src/gpu/graphite/Log.h"
+#include "src/gpu/graphite/PaintParamsKey.h"
+#include "src/gpu/graphite/ReadSwizzle.h"
+#include "src/gpu/graphite/TextureProxyView.h"
+
+
+static skgpu::graphite::ReadSwizzle swizzle_class_to_read_enum(const skgpu::Swizzle& swizzle) {
+ if (swizzle == skgpu::Swizzle::RGBA()) {
+ return skgpu::graphite::ReadSwizzle::kRGBA;
+ } else if (swizzle == skgpu::Swizzle::RGB1()) {
+ return skgpu::graphite::ReadSwizzle::kRGB1;
+ } else if (swizzle == skgpu::Swizzle("rrrr")) {
+ return skgpu::graphite::ReadSwizzle::kRRRR;
+ } else if (swizzle == skgpu::Swizzle("rrr1")) {
+ return skgpu::graphite::ReadSwizzle::kRRR1;
+ } else if (swizzle == skgpu::Swizzle::BGRA()) {
+ return skgpu::graphite::ReadSwizzle::kBGRA;
+ } else {
+ SKGPU_LOG_W("%s is an unsupported read swizzle. Defaulting to RGBA.\n",
+ swizzle.asString().data());
+ return skgpu::graphite::ReadSwizzle::kRGBA;
+ }
+}
+#endif
+
+SkM44 SkImageShader::CubicResamplerMatrix(float B, float C) {
+#if 0
+ constexpr SkM44 kMitchell = SkM44( 1.f/18.f, -9.f/18.f, 15.f/18.f, -7.f/18.f,
+ 16.f/18.f, 0.f/18.f, -36.f/18.f, 21.f/18.f,
+ 1.f/18.f, 9.f/18.f, 27.f/18.f, -21.f/18.f,
+ 0.f/18.f, 0.f/18.f, -6.f/18.f, 7.f/18.f);
+
+ constexpr SkM44 kCatmull = SkM44(0.0f, -0.5f, 1.0f, -0.5f,
+ 1.0f, 0.0f, -2.5f, 1.5f,
+ 0.0f, 0.5f, 2.0f, -1.5f,
+ 0.0f, 0.0f, -0.5f, 0.5f);
+
+ if (B == 1.0f/3 && C == 1.0f/3) {
+ return kMitchell;
+ }
+ if (B == 0 && C == 0.5f) {
+ return kCatmull;
+ }
+#endif
+ return SkM44( (1.f/6)*B, -(3.f/6)*B - C, (3.f/6)*B + 2*C, - (1.f/6)*B - C,
+ 1 - (2.f/6)*B, 0, -3 + (12.f/6)*B + C, 2 - (9.f/6)*B - C,
+ (1.f/6)*B, (3.f/6)*B + C, 3 - (15.f/6)*B - 2*C, -2 + (9.f/6)*B + C,
+ 0, 0, -C, (1.f/6)*B + C);
+}
+
+/**
+ * We are faster in clamp, so always use that tiling when we can.
+ */
+static SkTileMode optimize(SkTileMode tm, int dimension) {
+ SkASSERT(dimension > 0);
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ // need to update frameworks/base/libs/hwui/tests/unit/SkiaBehaviorTests.cpp:55 to allow
+ // for transforming to clamp.
+ return tm;
+#else
+ // mirror and repeat on a 1px axis are the same as clamping, but decal will still transition to
+ // transparent black.
+ return (tm != SkTileMode::kDecal && dimension == 1) ? SkTileMode::kClamp : tm;
+#endif
+}
+
+// TODO: currently this only *always* used in asFragmentProcessor(), which is excluded on no-gpu
+// builds. No-gpu builds only use needs_subset() in asserts, so release+no-gpu doesn't use it, which
+// can cause builds to fail if unused warnings are treated as errors.
+[[maybe_unused]] static bool needs_subset(SkImage* img, const SkRect& subset) {
+ return subset != SkRect::Make(img->dimensions());
+}
+
+SkImageShader::SkImageShader(sk_sp<SkImage> img,
+ const SkRect& subset,
+ SkTileMode tmx, SkTileMode tmy,
+ const SkSamplingOptions& sampling,
+ bool raw,
+ bool clampAsIfUnpremul)
+ : fImage(std::move(img))
+ , fSampling(sampling)
+ , fTileModeX(optimize(tmx, fImage->width()))
+ , fTileModeY(optimize(tmy, fImage->height()))
+ , fSubset(subset)
+ , fRaw(raw)
+ , fClampAsIfUnpremul(clampAsIfUnpremul) {
+ // These options should never appear together:
+ SkASSERT(!fRaw || !fClampAsIfUnpremul);
+
+ // Bicubic filtering of raw image shaders would add a surprising clamp - so we don't support it
+ SkASSERT(!fRaw || !fSampling.useCubic);
+}
+
+// just used for legacy-unflattening
+enum class LegacyFilterEnum {
+ kNone,
+ kLow,
+ kMedium,
+ kHigh,
+ // this is the special value for backward compatibility
+ kInheritFromPaint,
+ // this signals we should use the new SkFilterOptions
+ kUseFilterOptions,
+ // use cubic and ignore FilterOptions
+ kUseCubicResampler,
+
+ kLast = kUseCubicResampler,
+};
+
+// fClampAsIfUnpremul is always false when constructed through public APIs,
+// so there's no need to read or write it here.
+
+sk_sp<SkFlattenable> SkImageShader::CreateProc(SkReadBuffer& buffer) {
+ auto tmx = buffer.read32LE<SkTileMode>(SkTileMode::kLastTileMode);
+ auto tmy = buffer.read32LE<SkTileMode>(SkTileMode::kLastTileMode);
+
+ SkSamplingOptions sampling;
+ bool readSampling = true;
+ if (buffer.isVersionLT(SkPicturePriv::kNoFilterQualityShaders_Version) &&
+ !buffer.readBool() /* legacy has_sampling */)
+ {
+ readSampling = false;
+ // we just default to Nearest in sampling
+ }
+ if (readSampling) {
+ sampling = buffer.readSampling();
+ }
+
+ SkMatrix localMatrix;
+ if (buffer.isVersionLT(SkPicturePriv::Version::kNoShaderLocalMatrix)) {
+ buffer.readMatrix(&localMatrix);
+ }
+ sk_sp<SkImage> img = buffer.readImage();
+ if (!img) {
+ return nullptr;
+ }
+
+ bool raw = buffer.isVersionLT(SkPicturePriv::Version::kRawImageShaders) ? false
+ : buffer.readBool();
+
+ // TODO(skbug.com/12784): Subset is not serialized yet; it's only used by special images so it
+ // will never be written to an SKP.
+
+ return raw ? SkImageShader::MakeRaw(std::move(img), tmx, tmy, sampling, &localMatrix)
+ : SkImageShader::Make(std::move(img), tmx, tmy, sampling, &localMatrix);
+}
+
+void SkImageShader::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeUInt((unsigned)fTileModeX);
+ buffer.writeUInt((unsigned)fTileModeY);
+
+ buffer.writeSampling(fSampling);
+
+ buffer.writeImage(fImage.get());
+ SkASSERT(fClampAsIfUnpremul == false);
+
+ // TODO(skbug.com/12784): Subset is not serialized yet; it's only used by special images so it
+ // will never be written to an SKP.
+ SkASSERT(!needs_subset(fImage.get(), fSubset));
+
+ buffer.writeBool(fRaw);
+}
+
+bool SkImageShader::isOpaque() const {
+ return fImage->isOpaque() &&
+ fTileModeX != SkTileMode::kDecal && fTileModeY != SkTileMode::kDecal;
+}
+
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+
+static bool legacy_shader_can_handle(const SkMatrix& inv) {
+ SkASSERT(!inv.hasPerspective());
+
+ // Scale+translate methods are always present, but affine might not be.
+ if (!SkOpts::S32_alpha_D32_filter_DXDY && !inv.isScaleTranslate()) {
+ return false;
+ }
+
+ // legacy code uses SkFixed 32.32, so ensure the inverse doesn't map device coordinates
+ // out of range.
+ const SkScalar max_dev_coord = 32767.0f;
+ const SkRect src = inv.mapRect(SkRect::MakeWH(max_dev_coord, max_dev_coord));
+
+ // take 1/4 of max signed 32bits so we have room to subtract local values
+ const SkScalar max_fixed32dot32 = float(SK_MaxS32) * 0.25f;
+ if (!SkRect::MakeLTRB(-max_fixed32dot32, -max_fixed32dot32,
+ +max_fixed32dot32, +max_fixed32dot32).contains(src)) {
+ return false;
+ }
+
+ // legacy shader impl should be able to handle these matrices
+ return true;
+}
+
+SkShaderBase::Context* SkImageShader::onMakeContext(const ContextRec& rec,
+ SkArenaAlloc* alloc) const {
+ SkASSERT(!needs_subset(fImage.get(), fSubset)); // TODO(skbug.com/12784)
+ if (fImage->alphaType() == kUnpremul_SkAlphaType) {
+ return nullptr;
+ }
+ if (fImage->colorType() != kN32_SkColorType) {
+ return nullptr;
+ }
+ if (fTileModeX != fTileModeY) {
+ return nullptr;
+ }
+ if (fTileModeX == SkTileMode::kDecal || fTileModeY == SkTileMode::kDecal) {
+ return nullptr;
+ }
+
+ SkSamplingOptions sampling = fSampling;
+ if (sampling.isAniso()) {
+ sampling = SkSamplingPriv::AnisoFallback(fImage->hasMipmaps());
+ }
+
+ auto supported = [](const SkSamplingOptions& sampling) {
+ const std::tuple<SkFilterMode,SkMipmapMode> supported[] = {
+ {SkFilterMode::kNearest, SkMipmapMode::kNone}, // legacy None
+ {SkFilterMode::kLinear, SkMipmapMode::kNone}, // legacy Low
+ {SkFilterMode::kLinear, SkMipmapMode::kNearest}, // legacy Medium
+ };
+ for (auto [f, m] : supported) {
+ if (sampling.filter == f && sampling.mipmap == m) {
+ return true;
+ }
+ }
+ return false;
+ };
+ if (sampling.useCubic || !supported(sampling)) {
+ return nullptr;
+ }
+
+ // SkBitmapProcShader stores bitmap coordinates in a 16bit buffer,
+ // so it can't handle bitmaps larger than 65535.
+ //
+ // We back off another bit to 32767 to make small amounts of
+ // intermediate math safe, e.g. in
+ //
+ // SkFixed fx = ...;
+ // fx = tile(fx + SK_Fixed1);
+ //
+ // we want to make sure (fx + SK_Fixed1) never overflows.
+ if (fImage-> width() > 32767 ||
+ fImage->height() > 32767) {
+ return nullptr;
+ }
+
+ SkMatrix inv;
+ if (!this->computeTotalInverse(*rec.fMatrix, rec.fLocalMatrix, &inv) ||
+ !legacy_shader_can_handle(inv)) {
+ return nullptr;
+ }
+
+ if (!rec.isLegacyCompatible(fImage->colorSpace())) {
+ return nullptr;
+ }
+
+ return SkBitmapProcLegacyShader::MakeContext(*this, fTileModeX, fTileModeY, sampling,
+ as_IB(fImage.get()), rec, alloc);
+}
+#endif
+
+SkImage* SkImageShader::onIsAImage(SkMatrix* texM, SkTileMode xy[]) const {
+ if (texM) {
+ *texM = SkMatrix::I();
+ }
+ if (xy) {
+ xy[0] = fTileModeX;
+ xy[1] = fTileModeY;
+ }
+ return const_cast<SkImage*>(fImage.get());
+}
+
+sk_sp<SkShader> SkImageShader::Make(sk_sp<SkImage> image,
+ SkTileMode tmx, SkTileMode tmy,
+ const SkSamplingOptions& options,
+ const SkMatrix* localMatrix,
+ bool clampAsIfUnpremul) {
+ SkRect subset = image ? SkRect::Make(image->dimensions()) : SkRect::MakeEmpty();
+ return MakeSubset(std::move(image), subset, tmx, tmy, options, localMatrix, clampAsIfUnpremul);
+}
+
+sk_sp<SkShader> SkImageShader::MakeRaw(sk_sp<SkImage> image,
+ SkTileMode tmx, SkTileMode tmy,
+ const SkSamplingOptions& options,
+ const SkMatrix* localMatrix) {
+ if (options.useCubic) {
+ return nullptr;
+ }
+ if (!image) {
+ return SkShaders::Empty();
+ }
+ auto subset = SkRect::Make(image->dimensions());
+ return SkLocalMatrixShader::MakeWrapped<SkImageShader>(localMatrix,
+ image,
+ subset,
+ tmx, tmy,
+ options,
+ /*raw=*/true,
+ /*clampAsIfUnpremul=*/false);
+}
+
+sk_sp<SkShader> SkImageShader::MakeSubset(sk_sp<SkImage> image,
+ const SkRect& subset,
+ SkTileMode tmx, SkTileMode tmy,
+ const SkSamplingOptions& options,
+ const SkMatrix* localMatrix,
+ bool clampAsIfUnpremul) {
+ auto is_unit = [](float x) {
+ return x >= 0 && x <= 1;
+ };
+ if (options.useCubic) {
+ if (!is_unit(options.cubic.B) || !is_unit(options.cubic.C)) {
+ return nullptr;
+ }
+ }
+ if (!image || subset.isEmpty()) {
+ return SkShaders::Empty();
+ }
+
+ // Validate subset and check if we can drop it
+ if (!SkRect::Make(image->bounds()).contains(subset)) {
+ return nullptr;
+ }
+ // TODO(skbug.com/12784): GPU-only for now since it's only supported in onAsFragmentProcessor()
+ SkASSERT(!needs_subset(image.get(), subset) || image->isTextureBacked());
+ return SkLocalMatrixShader::MakeWrapped<SkImageShader>(localMatrix,
+ std::move(image),
+ subset,
+ tmx, tmy,
+ options,
+ /*raw=*/false,
+ clampAsIfUnpremul);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if defined(SK_GANESH)
+
+#include "src/gpu/ganesh/GrColorInfo.h"
+#include "src/gpu/ganesh/GrFPArgs.h"
+#include "src/gpu/ganesh/effects/GrBlendFragmentProcessor.h"
+
+std::unique_ptr<GrFragmentProcessor>
+SkImageShader::asFragmentProcessor(const GrFPArgs& args, const MatrixRec& mRec) const {
+ SkTileMode tileModes[2] = {fTileModeX, fTileModeY};
+ const SkRect* subset = needs_subset(fImage.get(), fSubset) ? &fSubset : nullptr;
+ auto fp = as_IB(fImage.get())->asFragmentProcessor(args.fContext,
+ fSampling,
+ tileModes,
+ SkMatrix::I(),
+ subset);
+ if (!fp) {
+ return nullptr;
+ }
+
+ bool success;
+ std::tie(success, fp) = mRec.apply(std::move(fp));
+ if (!success) {
+ return nullptr;
+ }
+
+ if (!fRaw) {
+ fp = GrColorSpaceXformEffect::Make(std::move(fp),
+ fImage->colorSpace(),
+ fImage->alphaType(),
+ args.fDstColorInfo->colorSpace(),
+ kPremul_SkAlphaType);
+
+ if (fImage->isAlphaOnly()) {
+ fp = GrBlendFragmentProcessor::Make<SkBlendMode::kDstIn>(std::move(fp), nullptr);
+ }
+ }
+
+ return fp;
+}
+
+#endif
+
+#if defined(SK_GRAPHITE)
+void SkImageShader::addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const {
+ using namespace skgpu::graphite;
+
+ ImageShaderBlock::ImageData imgData(fSampling, fTileModeX, fTileModeY, fSubset,
+ ReadSwizzle::kRGBA);
+
+ auto [ imageToDraw, newSampling ] = skgpu::graphite::GetGraphiteBacked(keyContext.recorder(),
+ fImage.get(),
+ fSampling);
+
+ if (imageToDraw) {
+ imgData.fSampling = newSampling;
+ skgpu::Mipmapped mipmapped = (newSampling.mipmap != SkMipmapMode::kNone)
+ ? skgpu::Mipmapped::kYes : skgpu::Mipmapped::kNo;
+
+ auto [view, _] = as_IB(imageToDraw)->asView(keyContext.recorder(), mipmapped);
+ imgData.fTextureProxy = view.refProxy();
+ skgpu::Swizzle readSwizzle = view.swizzle();
+ // If the color type is alpha-only, propagate the alpha value to the other channels.
+ if (imageToDraw->isAlphaOnly()) {
+ readSwizzle = skgpu::Swizzle::Concat(readSwizzle, skgpu::Swizzle("aaaa"));
+ }
+ imgData.fReadSwizzle = swizzle_class_to_read_enum(readSwizzle);
+ }
+
+ if (!fRaw) {
+ imgData.fSteps = SkColorSpaceXformSteps(fImage->colorSpace(),
+ fImage->alphaType(),
+ keyContext.dstColorInfo().colorSpace(),
+ keyContext.dstColorInfo().alphaType());
+
+ if (fImage->isAlphaOnly()) {
+ SkSpan<const float> constants = skgpu::GetPorterDuffBlendConstants(SkBlendMode::kDstIn);
+ // expects dst, src
+ PorterDuffBlendShaderBlock::BeginBlock(keyContext, builder, gatherer,
+ {constants});
+
+ // dst
+ SolidColorShaderBlock::BeginBlock(keyContext, builder, gatherer,
+ keyContext.paintColor());
+ builder->endBlock();
+
+ // src
+ ImageShaderBlock::BeginBlock(keyContext, builder, gatherer, &imgData);
+ builder->endBlock();
+
+ builder->endBlock();
+ return;
+ }
+ }
+
+ ImageShaderBlock::BeginBlock(keyContext, builder, gatherer, &imgData);
+ builder->endBlock();
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#include "src/core/SkImagePriv.h"
+
+sk_sp<SkShader> SkMakeBitmapShaderForPaint(const SkPaint& paint, const SkBitmap& src,
+ SkTileMode tmx, SkTileMode tmy,
+ const SkSamplingOptions& sampling,
+ const SkMatrix* localMatrix, SkCopyPixelsMode mode) {
+ auto s = SkImageShader::Make(SkMakeImageFromRasterBitmap(src, mode),
+ tmx, tmy, sampling, localMatrix);
+ if (!s) {
+ return nullptr;
+ }
+ if (SkColorTypeIsAlphaOnly(src.colorType()) && paint.getShader()) {
+ // Compose the image shader with the paint's shader. Alpha images+shaders should output the
+ // texture's alpha multiplied by the shader's color. DstIn (d*sa) will achieve this with
+ // the source image and dst shader (MakeBlend takes dst first, src second).
+ s = SkShaders::Blend(SkBlendMode::kDstIn, paint.refShader(), std::move(s));
+ }
+ return s;
+}
+
+void SkShaderBase::RegisterFlattenables() { SK_REGISTER_FLATTENABLE(SkImageShader); }
+
+namespace {
+
+struct MipLevelHelper {
+ SkPixmap pm;
+ SkMatrix inv;
+ SkRasterPipeline_GatherCtx* gather;
+ SkRasterPipeline_TileCtx* limitX;
+ SkRasterPipeline_TileCtx* limitY;
+ SkRasterPipeline_DecalTileCtx* decalCtx = nullptr;
+
+ void allocAndInit(SkArenaAlloc* alloc,
+ const SkSamplingOptions& sampling,
+ SkTileMode tileModeX,
+ SkTileMode tileModeY) {
+ gather = alloc->make<SkRasterPipeline_GatherCtx>();
+ gather->pixels = pm.addr();
+ gather->stride = pm.rowBytesAsPixels();
+ gather->width = pm.width();
+ gather->height = pm.height();
+
+ if (sampling.useCubic) {
+ SkImageShader::CubicResamplerMatrix(sampling.cubic.B, sampling.cubic.C)
+ .getColMajor(gather->weights);
+ }
+
+ limitX = alloc->make<SkRasterPipeline_TileCtx>();
+ limitY = alloc->make<SkRasterPipeline_TileCtx>();
+ limitX->scale = pm.width();
+ limitX->invScale = 1.0f / pm.width();
+ limitY->scale = pm.height();
+ limitY->invScale = 1.0f / pm.height();
+
+ // We would like an image that is mapped 1:1 with device pixels but at a half pixel offset
+ // to select every pixel from the src image once. Our rasterizer biases upward. That is a
+ // rect from 0.5...1.5 fills pixel 1 and not pixel 0. So we make exact integer pixel sample
+ // values select the pixel to the left/above the integer value.
+ //
+ // Note that a mirror mapping between canvas and image space will not have this property -
+ // on one side of the image a row/column will be skipped and one repeated on the other side.
+ //
+ // The GM nearest_half_pixel_image tests both of the above scenarios.
+ //
+ // The implementation of SkTileMode::kMirror also modifies integer pixel snapping to create
+ // consistency when the sample coords are running backwards and must account for gather
+ // modification we perform here. The GM mirror_tile tests this.
+ if (!sampling.useCubic && sampling.filter == SkFilterMode::kNearest) {
+ gather->roundDownAtInteger = true;
+ limitX->mirrorBiasDir = limitY->mirrorBiasDir = 1;
+ }
+
+ if (tileModeX == SkTileMode::kDecal || tileModeY == SkTileMode::kDecal) {
+ decalCtx = alloc->make<SkRasterPipeline_DecalTileCtx>();
+ decalCtx->limit_x = limitX->scale;
+ decalCtx->limit_y = limitY->scale;
+
+ // When integer sample coords snap left/up then we want the right/bottom edge of the
+ // image bounds to be inside the image rather than the left/top edge, that is (0, w]
+ // rather than [0, w).
+ if (gather->roundDownAtInteger) {
+ decalCtx->inclusiveEdge_x = decalCtx->limit_x;
+ decalCtx->inclusiveEdge_y = decalCtx->limit_y;
+ }
+ }
+ }
+};
+
+} // namespace
+
+static SkSamplingOptions tweak_sampling(SkSamplingOptions sampling, const SkMatrix& matrix) {
+ SkFilterMode filter = sampling.filter;
+
+ // When the matrix is just an integer translate, bilerp == nearest neighbor.
+ if (filter == SkFilterMode::kLinear &&
+ matrix.getType() <= SkMatrix::kTranslate_Mask &&
+ matrix.getTranslateX() == (int)matrix.getTranslateX() &&
+ matrix.getTranslateY() == (int)matrix.getTranslateY()) {
+ filter = SkFilterMode::kNearest;
+ }
+
+ return SkSamplingOptions(filter, sampling.mipmap);
+}
+
+bool SkImageShader::appendStages(const SkStageRec& rec, const MatrixRec& mRec) const {
+ SkASSERT(!needs_subset(fImage.get(), fSubset)); // TODO(skbug.com/12784)
+
+ // We only support certain sampling options in stages so far
+ auto sampling = fSampling;
+ if (sampling.isAniso()) {
+ sampling = SkSamplingPriv::AnisoFallback(fImage->hasMipmaps());
+ }
+
+ SkRasterPipeline* p = rec.fPipeline;
+ SkArenaAlloc* alloc = rec.fAlloc;
+
+ SkMatrix baseInv;
+ // If the total matrix isn't valid then we will always access the base MIP level.
+ if (mRec.totalMatrixIsValid()) {
+ if (!mRec.totalInverse(&baseInv)) {
+ return false;
+ }
+ baseInv.normalizePerspective();
+ }
+
+ SkASSERT(!sampling.useCubic || sampling.mipmap == SkMipmapMode::kNone);
+ auto* access = SkMipmapAccessor::Make(alloc, fImage.get(), baseInv, sampling.mipmap);
+ if (!access) {
+ return false;
+ }
+
+ MipLevelHelper upper;
+ std::tie(upper.pm, upper.inv) = access->level();
+
+ if (!sampling.useCubic) {
+ // TODO: can tweak_sampling sometimes for cubic too when B=0
+ if (mRec.totalMatrixIsValid()) {
+ sampling = tweak_sampling(sampling, SkMatrix::Concat(upper.inv, baseInv));
+ }
+ }
+
+ if (!mRec.apply(rec, upper.inv)) {
+ return false;
+ }
+
+ upper.allocAndInit(alloc, sampling, fTileModeX, fTileModeY);
+
+ MipLevelHelper lower;
+ SkRasterPipeline_MipmapCtx* mipmapCtx = nullptr;
+ float lowerWeight = access->lowerWeight();
+ if (lowerWeight > 0) {
+ std::tie(lower.pm, lower.inv) = access->lowerLevel();
+ mipmapCtx = alloc->make<SkRasterPipeline_MipmapCtx>();
+ mipmapCtx->lowerWeight = lowerWeight;
+ mipmapCtx->scaleX = static_cast<float>(lower.pm.width()) / upper.pm.width();
+ mipmapCtx->scaleY = static_cast<float>(lower.pm.height()) / upper.pm.height();
+
+ lower.allocAndInit(alloc, sampling, fTileModeX, fTileModeY);
+
+ p->append(SkRasterPipelineOp::mipmap_linear_init, mipmapCtx);
+ }
+
+ const bool decalBothAxes = fTileModeX == SkTileMode::kDecal && fTileModeY == SkTileMode::kDecal;
+
+ auto append_tiling_and_gather = [&](const MipLevelHelper* level) {
+ if (decalBothAxes) {
+ p->append(SkRasterPipelineOp::decal_x_and_y, level->decalCtx);
+ } else {
+ switch (fTileModeX) {
+ case SkTileMode::kClamp: /* The gather_xxx stage will clamp for us. */
+ break;
+ case SkTileMode::kMirror:
+ p->append(SkRasterPipelineOp::mirror_x, level->limitX);
+ break;
+ case SkTileMode::kRepeat:
+ p->append(SkRasterPipelineOp::repeat_x, level->limitX);
+ break;
+ case SkTileMode::kDecal:
+ p->append(SkRasterPipelineOp::decal_x, level->decalCtx);
+ break;
+ }
+ switch (fTileModeY) {
+ case SkTileMode::kClamp: /* The gather_xxx stage will clamp for us. */
+ break;
+ case SkTileMode::kMirror:
+ p->append(SkRasterPipelineOp::mirror_y, level->limitY);
+ break;
+ case SkTileMode::kRepeat:
+ p->append(SkRasterPipelineOp::repeat_y, level->limitY);
+ break;
+ case SkTileMode::kDecal:
+ p->append(SkRasterPipelineOp::decal_y, level->decalCtx);
+ break;
+ }
+ }
+
+ void* ctx = level->gather;
+ switch (level->pm.colorType()) {
+ case kAlpha_8_SkColorType: p->append(SkRasterPipelineOp::gather_a8, ctx); break;
+ case kA16_unorm_SkColorType: p->append(SkRasterPipelineOp::gather_a16, ctx); break;
+ case kA16_float_SkColorType: p->append(SkRasterPipelineOp::gather_af16, ctx); break;
+ case kRGB_565_SkColorType: p->append(SkRasterPipelineOp::gather_565, ctx); break;
+ case kARGB_4444_SkColorType: p->append(SkRasterPipelineOp::gather_4444, ctx); break;
+ case kR8G8_unorm_SkColorType: p->append(SkRasterPipelineOp::gather_rg88, ctx); break;
+ case kR16G16_unorm_SkColorType: p->append(SkRasterPipelineOp::gather_rg1616,ctx); break;
+ case kR16G16_float_SkColorType: p->append(SkRasterPipelineOp::gather_rgf16, ctx); break;
+ case kRGBA_8888_SkColorType: p->append(SkRasterPipelineOp::gather_8888, ctx); break;
+
+ case kRGBA_1010102_SkColorType:
+ p->append(SkRasterPipelineOp::gather_1010102, ctx);
+ break;
+
+ case kR16G16B16A16_unorm_SkColorType:
+ p->append(SkRasterPipelineOp::gather_16161616, ctx);
+ break;
+
+ case kRGBA_F16Norm_SkColorType:
+ case kRGBA_F16_SkColorType: p->append(SkRasterPipelineOp::gather_f16, ctx); break;
+ case kRGBA_F32_SkColorType: p->append(SkRasterPipelineOp::gather_f32, ctx); break;
+
+ case kGray_8_SkColorType: p->append(SkRasterPipelineOp::gather_a8, ctx);
+ p->append(SkRasterPipelineOp::alpha_to_gray ); break;
+
+ case kR8_unorm_SkColorType: p->append(SkRasterPipelineOp::gather_a8, ctx);
+ p->append(SkRasterPipelineOp::alpha_to_red ); break;
+
+ case kRGB_888x_SkColorType: p->append(SkRasterPipelineOp::gather_8888, ctx);
+ p->append(SkRasterPipelineOp::force_opaque ); break;
+
+ case kBGRA_1010102_SkColorType:
+ p->append(SkRasterPipelineOp::gather_1010102, ctx);
+ p->append(SkRasterPipelineOp::swap_rb);
+ break;
+
+ case kRGB_101010x_SkColorType:
+ p->append(SkRasterPipelineOp::gather_1010102, ctx);
+ p->append(SkRasterPipelineOp::force_opaque);
+ break;
+
+ case kBGR_101010x_XR_SkColorType:
+ SkASSERT(false);
+ break;
+
+ case kBGR_101010x_SkColorType:
+ p->append(SkRasterPipelineOp::gather_1010102, ctx);
+ p->append(SkRasterPipelineOp::force_opaque);
+ p->append(SkRasterPipelineOp::swap_rb);
+ break;
+
+ case kBGRA_8888_SkColorType:
+ p->append(SkRasterPipelineOp::gather_8888, ctx);
+ p->append(SkRasterPipelineOp::swap_rb);
+ break;
+
+ case kSRGBA_8888_SkColorType:
+ p->append(SkRasterPipelineOp::gather_8888, ctx);
+ p->append_transfer_function(*skcms_sRGB_TransferFunction());
+ break;
+
+ case kUnknown_SkColorType: SkASSERT(false);
+ }
+ if (level->decalCtx) {
+ p->append(SkRasterPipelineOp::check_decal_mask, level->decalCtx);
+ }
+ };
+
+ auto append_misc = [&] {
+ SkColorSpace* cs = upper.pm.colorSpace();
+ SkAlphaType at = upper.pm.alphaType();
+
+ // Color for alpha-only images comes from the paint (already converted to dst color space).
+ if (SkColorTypeIsAlphaOnly(upper.pm.colorType()) && !fRaw) {
+ p->append_set_rgb(alloc, rec.fPaintColor);
+
+ cs = rec.fDstCS;
+ at = kUnpremul_SkAlphaType;
+ }
+
+ // Bicubic filtering naturally produces out of range values on both sides of [0,1].
+ if (sampling.useCubic) {
+ p->append(at == kUnpremul_SkAlphaType || fClampAsIfUnpremul
+ ? SkRasterPipelineOp::clamp_01
+ : SkRasterPipelineOp::clamp_gamut);
+ }
+
+ // Transform color space and alpha type to match shader convention (dst CS, premul alpha).
+ if (!fRaw) {
+ alloc->make<SkColorSpaceXformSteps>(cs, at, rec.fDstCS, kPremul_SkAlphaType)->apply(p);
+ }
+
+ return true;
+ };
+
+ // Check for fast-path stages.
+ // TODO: Could we use the fast-path stages for each level when doing linear mipmap filtering?
+ SkColorType ct = upper.pm.colorType();
+ if (true
+ && (ct == kRGBA_8888_SkColorType || ct == kBGRA_8888_SkColorType)
+ && !sampling.useCubic && sampling.filter == SkFilterMode::kLinear
+ && sampling.mipmap != SkMipmapMode::kLinear
+ && fTileModeX == SkTileMode::kClamp && fTileModeY == SkTileMode::kClamp) {
+
+ p->append(SkRasterPipelineOp::bilerp_clamp_8888, upper.gather);
+ if (ct == kBGRA_8888_SkColorType) {
+ p->append(SkRasterPipelineOp::swap_rb);
+ }
+ return append_misc();
+ }
+ if (true
+ && (ct == kRGBA_8888_SkColorType || ct == kBGRA_8888_SkColorType)
+ && sampling.useCubic
+ && fTileModeX == SkTileMode::kClamp && fTileModeY == SkTileMode::kClamp) {
+
+ p->append(SkRasterPipelineOp::bicubic_clamp_8888, upper.gather);
+ if (ct == kBGRA_8888_SkColorType) {
+ p->append(SkRasterPipelineOp::swap_rb);
+ }
+ return append_misc();
+ }
+
+ // This context can be shared by both levels when doing linear mipmap filtering
+ SkRasterPipeline_SamplerCtx* sampler = alloc->make<SkRasterPipeline_SamplerCtx>();
+
+ auto sample = [&](SkRasterPipelineOp setup_x,
+ SkRasterPipelineOp setup_y,
+ const MipLevelHelper* level) {
+ p->append(setup_x, sampler);
+ p->append(setup_y, sampler);
+ append_tiling_and_gather(level);
+ p->append(SkRasterPipelineOp::accumulate, sampler);
+ };
+
+ auto sample_level = [&](const MipLevelHelper* level) {
+ if (sampling.useCubic) {
+ CubicResamplerMatrix(sampling.cubic.B, sampling.cubic.C).getColMajor(sampler->weights);
+
+ p->append(SkRasterPipelineOp::bicubic_setup, sampler);
+
+ sample(SkRasterPipelineOp::bicubic_n3x, SkRasterPipelineOp::bicubic_n3y, level);
+ sample(SkRasterPipelineOp::bicubic_n1x, SkRasterPipelineOp::bicubic_n3y, level);
+ sample(SkRasterPipelineOp::bicubic_p1x, SkRasterPipelineOp::bicubic_n3y, level);
+ sample(SkRasterPipelineOp::bicubic_p3x, SkRasterPipelineOp::bicubic_n3y, level);
+
+ sample(SkRasterPipelineOp::bicubic_n3x, SkRasterPipelineOp::bicubic_n1y, level);
+ sample(SkRasterPipelineOp::bicubic_n1x, SkRasterPipelineOp::bicubic_n1y, level);
+ sample(SkRasterPipelineOp::bicubic_p1x, SkRasterPipelineOp::bicubic_n1y, level);
+ sample(SkRasterPipelineOp::bicubic_p3x, SkRasterPipelineOp::bicubic_n1y, level);
+
+ sample(SkRasterPipelineOp::bicubic_n3x, SkRasterPipelineOp::bicubic_p1y, level);
+ sample(SkRasterPipelineOp::bicubic_n1x, SkRasterPipelineOp::bicubic_p1y, level);
+ sample(SkRasterPipelineOp::bicubic_p1x, SkRasterPipelineOp::bicubic_p1y, level);
+ sample(SkRasterPipelineOp::bicubic_p3x, SkRasterPipelineOp::bicubic_p1y, level);
+
+ sample(SkRasterPipelineOp::bicubic_n3x, SkRasterPipelineOp::bicubic_p3y, level);
+ sample(SkRasterPipelineOp::bicubic_n1x, SkRasterPipelineOp::bicubic_p3y, level);
+ sample(SkRasterPipelineOp::bicubic_p1x, SkRasterPipelineOp::bicubic_p3y, level);
+ sample(SkRasterPipelineOp::bicubic_p3x, SkRasterPipelineOp::bicubic_p3y, level);
+
+ p->append(SkRasterPipelineOp::move_dst_src);
+ } else if (sampling.filter == SkFilterMode::kLinear) {
+ p->append(SkRasterPipelineOp::bilinear_setup, sampler);
+
+ sample(SkRasterPipelineOp::bilinear_nx, SkRasterPipelineOp::bilinear_ny, level);
+ sample(SkRasterPipelineOp::bilinear_px, SkRasterPipelineOp::bilinear_ny, level);
+ sample(SkRasterPipelineOp::bilinear_nx, SkRasterPipelineOp::bilinear_py, level);
+ sample(SkRasterPipelineOp::bilinear_px, SkRasterPipelineOp::bilinear_py, level);
+
+ p->append(SkRasterPipelineOp::move_dst_src);
+ } else {
+ append_tiling_and_gather(level);
+ }
+ };
+
+ sample_level(&upper);
+
+ if (mipmapCtx) {
+ p->append(SkRasterPipelineOp::mipmap_linear_update, mipmapCtx);
+ sample_level(&lower);
+ p->append(SkRasterPipelineOp::mipmap_linear_finish, mipmapCtx);
+ }
+
+ return append_misc();
+}
+
+skvm::Color SkImageShader::program(skvm::Builder* p,
+ skvm::Coord device,
+ skvm::Coord origLocal,
+ skvm::Color paint,
+ const MatrixRec& mRec,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const {
+ SkASSERT(!needs_subset(fImage.get(), fSubset)); // TODO(skbug.com/12784)
+
+ auto sampling = fSampling;
+ if (sampling.isAniso()) {
+ sampling = SkSamplingPriv::AnisoFallback(fImage->hasMipmaps());
+ }
+
+ SkMatrix baseInv;
+ // If the total matrix isn't valid then we will always access the base MIP level.
+ if (mRec.totalMatrixIsValid()) {
+ if (!mRec.totalInverse(&baseInv)) {
+ return {};
+ }
+ baseInv.normalizePerspective();
+ }
+
+ SkASSERT(!sampling.useCubic || sampling.mipmap == SkMipmapMode::kNone);
+ auto* access = SkMipmapAccessor::Make(alloc, fImage.get(), baseInv, sampling.mipmap);
+ if (!access) {
+ return {};
+ }
+
+ SkPixmap upper;
+ SkMatrix upperInv;
+ std::tie(upper, upperInv) = access->level();
+
+ if (!sampling.useCubic) {
+ // TODO: can tweak_sampling sometimes for cubic too when B=0
+ if (mRec.totalMatrixIsValid()) {
+ sampling = tweak_sampling(sampling, SkMatrix::Concat(upperInv, baseInv));
+ }
+ }
+
+ SkPixmap lowerPixmap;
+ SkMatrix lowerInv;
+ SkPixmap* lower = nullptr;
+ float lowerWeight = access->lowerWeight();
+ if (lowerWeight > 0) {
+ std::tie(lowerPixmap, lowerInv) = access->lowerLevel();
+ lower = &lowerPixmap;
+ }
+
+ skvm::Coord upperLocal = origLocal;
+ if (!mRec.apply(p, &upperLocal, uniforms, upperInv).has_value()) {
+ return {};
+ }
+
+ // We can exploit image opacity to skip work unpacking alpha channels.
+ const bool input_is_opaque = SkAlphaTypeIsOpaque(upper.alphaType())
+ || SkColorTypeIsAlwaysOpaque(upper.colorType());
+
+ // Each call to sample() will try to rewrite the same uniforms over and over,
+ // so remember where we start and reset back there each time. That way each
+ // sample() call uses the same uniform offsets.
+
+ auto compute_clamp_limit = [&](float limit) {
+ // Subtract an ulp so the upper clamp limit excludes limit itself.
+ int bits;
+ memcpy(&bits, &limit, 4);
+ return p->uniformF(uniforms->push(bits-1));
+ };
+
+ // Except in the simplest case (no mips, no filtering), we reference uniforms
+ // more than once. To avoid adding/registering them multiple times, we pre-load them
+ // into a struct (just to logically group them together), based on the "current"
+ // pixmap (level of a mipmap).
+ //
+ struct Uniforms {
+ skvm::F32 w, iw, i2w,
+ h, ih, i2h;
+
+ skvm::F32 clamp_w,
+ clamp_h;
+
+ skvm::Uniform addr;
+ skvm::I32 rowBytesAsPixels;
+
+ skvm::PixelFormat pixelFormat; // not a uniform, but needed for each texel sample,
+ // so we store it here, since it is also dependent on
+ // the current pixmap (level).
+ };
+
+ auto setup_uniforms = [&](const SkPixmap& pm) -> Uniforms {
+ skvm::PixelFormat pixelFormat = skvm::SkColorType_to_PixelFormat(pm.colorType());
+ return {
+ p->uniformF(uniforms->pushF( pm.width())),
+ p->uniformF(uniforms->pushF(1.0f/pm.width())), // iff tileX == kRepeat
+ p->uniformF(uniforms->pushF(0.5f/pm.width())), // iff tileX == kMirror
+
+ p->uniformF(uniforms->pushF( pm.height())),
+ p->uniformF(uniforms->pushF(1.0f/pm.height())), // iff tileY == kRepeat
+ p->uniformF(uniforms->pushF(0.5f/pm.height())), // iff tileY == kMirror
+
+ compute_clamp_limit(pm. width()),
+ compute_clamp_limit(pm.height()),
+
+ uniforms->pushPtr(pm.addr()),
+ p->uniform32(uniforms->push(pm.rowBytesAsPixels())),
+
+ pixelFormat,
+ };
+ };
+
+ auto sample_texel = [&](const Uniforms& u, skvm::F32 sx, skvm::F32 sy) -> skvm::Color {
+ // repeat() and mirror() are written assuming they'll be followed by a [0,scale) clamp.
+ auto repeat = [&](skvm::F32 v, skvm::F32 S, skvm::F32 I) {
+ return v - floor(v * I) * S;
+ };
+ auto mirror = [&](skvm::F32 v, skvm::F32 S, skvm::F32 I2) {
+ // abs( (v-scale) - (2*scale)*floor((v-scale)*(0.5f/scale)) - scale )
+ // {---A---} {------------------B------------------}
+ skvm::F32 A = v - S,
+ B = (S + S) * floor(A * I2);
+ return abs(A - B - S);
+ };
+ switch (fTileModeX) {
+ case SkTileMode::kDecal: /* handled after gather */ break;
+ case SkTileMode::kClamp: /* we always clamp */ break;
+ case SkTileMode::kRepeat: sx = repeat(sx, u.w, u.iw); break;
+ case SkTileMode::kMirror: sx = mirror(sx, u.w, u.i2w); break;
+ }
+ switch (fTileModeY) {
+ case SkTileMode::kDecal: /* handled after gather */ break;
+ case SkTileMode::kClamp: /* we always clamp */ break;
+ case SkTileMode::kRepeat: sy = repeat(sy, u.h, u.ih); break;
+ case SkTileMode::kMirror: sy = mirror(sy, u.h, u.i2h); break;
+ }
+
+ // Always clamp sample coordinates to [0,width), [0,height), both for memory
+ // safety and to handle the clamps still needed by kClamp, kRepeat, and kMirror.
+ skvm::F32 clamped_x = clamp(sx, 0, u.clamp_w),
+ clamped_y = clamp(sy, 0, u.clamp_h);
+
+ // Load pixels from pm.addr()[(int)sx + (int)sy*stride].
+ skvm::I32 index = trunc(clamped_x) +
+ trunc(clamped_y) * u.rowBytesAsPixels;
+ skvm::Color c = gather(u.pixelFormat, u.addr, index);
+
+ // If we know the image is opaque, jump right to alpha = 1.0f, skipping work to unpack it.
+ if (input_is_opaque) {
+ c.a = p->splat(1.0f);
+ }
+
+ // Mask away any pixels that we tried to sample outside the bounds in kDecal.
+ if (fTileModeX == SkTileMode::kDecal || fTileModeY == SkTileMode::kDecal) {
+ skvm::I32 mask = p->splat(~0);
+ if (fTileModeX == SkTileMode::kDecal) { mask &= (sx == clamped_x); }
+ if (fTileModeY == SkTileMode::kDecal) { mask &= (sy == clamped_y); }
+ c.r = pun_to_F32(p->bit_and(mask, pun_to_I32(c.r)));
+ c.g = pun_to_F32(p->bit_and(mask, pun_to_I32(c.g)));
+ c.b = pun_to_F32(p->bit_and(mask, pun_to_I32(c.b)));
+ c.a = pun_to_F32(p->bit_and(mask, pun_to_I32(c.a)));
+ // Notice that even if input_is_opaque, c.a might now be 0.
+ }
+
+ return c;
+ };
+
+ auto sample_level = [&](const SkPixmap& pm, skvm::Coord local) {
+ const Uniforms u = setup_uniforms(pm);
+
+ if (sampling.useCubic) {
+ // All bicubic samples have the same fractional offset (fx,fy) from the center.
+ // They're either the 16 corners of a 3x3 grid/ surrounding (x,y) at (0.5,0.5) off-center.
+ skvm::F32 fx = fract(local.x + 0.5f),
+ fy = fract(local.y + 0.5f);
+ skvm::F32 wx[4],
+ wy[4];
+
+ SkM44 weights = CubicResamplerMatrix(sampling.cubic.B, sampling.cubic.C);
+
+ auto dot = [](const skvm::F32 a[], const skvm::F32 b[]) {
+ return a[0]*b[0] + a[1]*b[1] + a[2]*b[2] + a[3]*b[3];
+ };
+ const skvm::F32 tmpx[] = { p->splat(1.0f), fx, fx*fx, fx*fx*fx };
+ const skvm::F32 tmpy[] = { p->splat(1.0f), fy, fy*fy, fy*fy*fy };
+
+ for (int row = 0; row < 4; ++row) {
+ SkV4 r = weights.row(row);
+ skvm::F32 ru[] = {
+ p->uniformF(uniforms->pushF(r[0])),
+ p->uniformF(uniforms->pushF(r[1])),
+ p->uniformF(uniforms->pushF(r[2])),
+ p->uniformF(uniforms->pushF(r[3])),
+ };
+ wx[row] = dot(ru, tmpx);
+ wy[row] = dot(ru, tmpy);
+ }
+
+ skvm::Color c;
+ c.r = c.g = c.b = c.a = p->splat(0.0f);
+
+ skvm::F32 sy = local.y - 1.5f;
+ for (int j = 0; j < 4; j++, sy += 1.0f) {
+ skvm::F32 sx = local.x - 1.5f;
+ for (int i = 0; i < 4; i++, sx += 1.0f) {
+ skvm::Color s = sample_texel(u, sx,sy);
+ skvm::F32 w = wx[i] * wy[j];
+
+ c.r += s.r * w;
+ c.g += s.g * w;
+ c.b += s.b * w;
+ c.a += s.a * w;
+ }
+ }
+ return c;
+ } else if (sampling.filter == SkFilterMode::kLinear) {
+ // Our four sample points are the corners of a logical 1x1 pixel
+ // box surrounding (x,y) at (0.5,0.5) off-center.
+ skvm::F32 left = local.x - 0.5f,
+ top = local.y - 0.5f,
+ right = local.x + 0.5f,
+ bottom = local.y + 0.5f;
+
+ // The fractional parts of right and bottom are our lerp factors in x and y respectively.
+ skvm::F32 fx = fract(right ),
+ fy = fract(bottom);
+
+ return lerp(lerp(sample_texel(u, left,top ), sample_texel(u, right,top ), fx),
+ lerp(sample_texel(u, left,bottom), sample_texel(u, right,bottom), fx), fy);
+ } else {
+ SkASSERT(sampling.filter == SkFilterMode::kNearest);
+ // Our rasterizer biases upward. That is a rect from 0.5...1.5 fills pixel 1 and not
+ // pixel 0. To make an image that is mapped 1:1 with device pixels but at a half pixel
+ // offset select every pixel from the src image once we make exact integer pixel sample
+ // values round down not up. Note that a mirror mapping will not have this property.
+ local.x = skvm::pun_to_F32(skvm::pun_to_I32(local.x) - 1);
+ local.y = skvm::pun_to_F32(skvm::pun_to_I32(local.y) - 1);
+ return sample_texel(u, local.x,local.y);
+ }
+ };
+
+ skvm::Color c = sample_level(upper, upperLocal);
+ if (lower) {
+ skvm::Coord lowerLocal = origLocal;
+ if (!mRec.apply(p, &lowerLocal, uniforms, lowerInv)) {
+ return {};
+ }
+ // lower * weight + upper * (1 - weight)
+ c = lerp(c,
+ sample_level(*lower, lowerLocal),
+ p->uniformF(uniforms->pushF(lowerWeight)));
+ }
+
+ // If the input is opaque and we're not in decal mode, that means the output is too.
+ // Forcing *a to 1.0 here will retroactively skip any work we did to interpolate sample alphas.
+ if (input_is_opaque
+ && fTileModeX != SkTileMode::kDecal
+ && fTileModeY != SkTileMode::kDecal) {
+ c.a = p->splat(1.0f);
+ }
+
+ // Alpha-only images get their color from the paint (already converted to dst color space).
+ SkColorSpace* cs = upper.colorSpace();
+ SkAlphaType at = upper.alphaType();
+ if (SkColorTypeIsAlphaOnly(upper.colorType()) && !fRaw) {
+ c.r = paint.r;
+ c.g = paint.g;
+ c.b = paint.b;
+
+ cs = dst.colorSpace();
+ at = kUnpremul_SkAlphaType;
+ }
+
+ if (sampling.useCubic) {
+ // Bicubic filtering naturally produces out of range values on both sides of [0,1].
+ c.a = clamp01(c.a);
+
+ skvm::F32 limit = (at == kUnpremul_SkAlphaType || fClampAsIfUnpremul)
+ ? p->splat(1.0f)
+ : c.a;
+ c.r = clamp(c.r, 0.0f, limit);
+ c.g = clamp(c.g, 0.0f, limit);
+ c.b = clamp(c.b, 0.0f, limit);
+ }
+
+ return fRaw ? c
+ : SkColorSpaceXformSteps{cs, at, dst.colorSpace(), dst.alphaType()}.program(
+ p, uniforms, c);
+}
diff --git a/gfx/skia/skia/src/shaders/SkImageShader.h b/gfx/skia/skia/src/shaders/SkImageShader.h
new file mode 100644
index 0000000000..3740aec1f8
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkImageShader.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkImageShader_DEFINED
+#define SkImageShader_DEFINED
+
+#include "include/core/SkImage.h"
+#include "include/core/SkM44.h"
+#include "src/shaders/SkBitmapProcShader.h"
+#include "src/shaders/SkShaderBase.h"
+
+namespace skgpu {
+class Swizzle;
+}
+
+namespace skgpu::graphite {
+class KeyContext;
+enum class ReadSwizzle;
+}
+
+class SkImageShader : public SkShaderBase {
+public:
+ static sk_sp<SkShader> Make(sk_sp<SkImage>,
+ SkTileMode tmx,
+ SkTileMode tmy,
+ const SkSamplingOptions&,
+ const SkMatrix* localMatrix,
+ bool clampAsIfUnpremul = false);
+
+ static sk_sp<SkShader> MakeRaw(sk_sp<SkImage>,
+ SkTileMode tmx,
+ SkTileMode tmy,
+ const SkSamplingOptions&,
+ const SkMatrix* localMatrix);
+
+ // TODO(skbug.com/12784): Requires SkImage to be texture backed, and created SkShader can only
+ // be used on GPU-backed surfaces.
+ static sk_sp<SkShader> MakeSubset(sk_sp<SkImage>,
+ const SkRect& subset,
+ SkTileMode tmx,
+ SkTileMode tmy,
+ const SkSamplingOptions&,
+ const SkMatrix* localMatrix,
+ bool clampAsIfUnpremul = false);
+
+ SkImageShader(sk_sp<SkImage>,
+ const SkRect& subset,
+ SkTileMode tmx, SkTileMode tmy,
+ const SkSamplingOptions&,
+ bool raw,
+ bool clampAsIfUnpremul);
+
+ bool isOpaque() const override;
+
+#if defined(SK_GANESH)
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&,
+ const MatrixRec&) const override;
+#endif
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext&,
+ skgpu::graphite::PaintParamsKeyBuilder*,
+ skgpu::graphite::PipelineDataGatherer*) const override;
+#endif
+ static SkM44 CubicResamplerMatrix(float B, float C);
+
+private:
+ SK_FLATTENABLE_HOOKS(SkImageShader)
+
+ void flatten(SkWriteBuffer&) const override;
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+ Context* onMakeContext(const ContextRec&, SkArenaAlloc* storage) const override;
+#endif
+ SkImage* onIsAImage(SkMatrix*, SkTileMode*) const override;
+
+ bool appendStages(const SkStageRec&, const MatrixRec&) const override;
+
+ skvm::Color program(skvm::Builder*,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color paint,
+ const MatrixRec&,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc*) const override;
+
+ sk_sp<SkImage> fImage;
+ const SkSamplingOptions fSampling;
+ const SkTileMode fTileModeX;
+ const SkTileMode fTileModeY;
+
+ // TODO(skbug.com/12784): This is only supported for GPU images currently.
+ // If subset == (0,0,w,h) of the image, then no subset is applied. Subset will not be empty.
+ const SkRect fSubset;
+
+ const bool fRaw;
+ const bool fClampAsIfUnpremul;
+
+ friend class SkShaderBase;
+ using INHERITED = SkShaderBase;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/SkLocalMatrixShader.cpp b/gfx/skia/skia/src/shaders/SkLocalMatrixShader.cpp
new file mode 100644
index 0000000000..ceddf24c53
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkLocalMatrixShader.cpp
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/base/SkTLazy.h"
+#include "src/core/SkMatrixProvider.h"
+#include "src/core/SkVM.h"
+#include "src/shaders/SkLocalMatrixShader.h"
+
+#if defined(SK_GANESH)
+#include "src/gpu/ganesh/GrFPArgs.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/effects/GrMatrixEffect.h"
+#endif
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/KeyContext.h"
+#include "src/gpu/graphite/KeyHelpers.h"
+#include "src/gpu/graphite/PaintParamsKey.h"
+#endif
+
+SkShaderBase::GradientType SkLocalMatrixShader::asGradient(GradientInfo* info,
+ SkMatrix* localMatrix) const {
+ GradientType type = as_SB(fWrappedShader)->asGradient(info, localMatrix);
+ if (type != SkShaderBase::GradientType::kNone && localMatrix) {
+ *localMatrix = ConcatLocalMatrices(fLocalMatrix, *localMatrix);
+ }
+ return type;
+}
+
+#if defined(SK_GANESH)
+std::unique_ptr<GrFragmentProcessor> SkLocalMatrixShader::asFragmentProcessor(
+ const GrFPArgs& args, const MatrixRec& mRec) const {
+ return as_SB(fWrappedShader)->asFragmentProcessor(args, mRec.concat(fLocalMatrix));
+}
+#endif
+
+#if defined(SK_GRAPHITE)
+void SkLocalMatrixShader::addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const {
+ using namespace skgpu::graphite;
+
+ LocalMatrixShaderBlock::LMShaderData lmShaderData(fLocalMatrix);
+
+ KeyContextWithLocalMatrix newContext(keyContext, fLocalMatrix);
+
+ LocalMatrixShaderBlock::BeginBlock(newContext, builder, gatherer, &lmShaderData);
+
+ as_SB(fWrappedShader)->addToKey(newContext, builder, gatherer);
+
+ builder->endBlock();
+}
+#endif
+
+sk_sp<SkFlattenable> SkLocalMatrixShader::CreateProc(SkReadBuffer& buffer) {
+ SkMatrix lm;
+ buffer.readMatrix(&lm);
+ auto baseShader(buffer.readShader());
+ if (!baseShader) {
+ return nullptr;
+ }
+ return baseShader->makeWithLocalMatrix(lm);
+}
+
+void SkLocalMatrixShader::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeMatrix(fLocalMatrix);
+ buffer.writeFlattenable(fWrappedShader.get());
+}
+
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+SkShaderBase::Context* SkLocalMatrixShader::onMakeContext(
+ const ContextRec& rec, SkArenaAlloc* alloc) const
+{
+ SkTCopyOnFirstWrite<SkMatrix> lm(fLocalMatrix);
+ if (rec.fLocalMatrix) {
+ *lm.writable() = ConcatLocalMatrices(*rec.fLocalMatrix, *lm);
+ }
+
+ ContextRec newRec(rec);
+ newRec.fLocalMatrix = lm;
+
+ return as_SB(fWrappedShader)->makeContext(newRec, alloc);
+}
+#endif
+
+SkImage* SkLocalMatrixShader::onIsAImage(SkMatrix* outMatrix, SkTileMode* mode) const {
+ SkMatrix imageMatrix;
+ SkImage* image = fWrappedShader->isAImage(&imageMatrix, mode);
+ if (image && outMatrix) {
+ *outMatrix = ConcatLocalMatrices(fLocalMatrix, imageMatrix);
+ }
+
+ return image;
+}
+
+bool SkLocalMatrixShader::appendStages(const SkStageRec& rec, const MatrixRec& mRec) const {
+ return as_SB(fWrappedShader)->appendStages(rec, mRec.concat(fLocalMatrix));
+}
+
+skvm::Color SkLocalMatrixShader::program(skvm::Builder* p,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color paint,
+ const MatrixRec& mRec,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const {
+ return as_SB(fWrappedShader)->program(p,
+ device,
+ local,
+ paint,
+ mRec.concat(fLocalMatrix),
+ dst,
+ uniforms,
+ alloc);
+}
+
+sk_sp<SkShader> SkShader::makeWithLocalMatrix(const SkMatrix& localMatrix) const {
+ if (localMatrix.isIdentity()) {
+ return sk_ref_sp(const_cast<SkShader*>(this));
+ }
+
+ const SkMatrix* lm = &localMatrix;
+
+ sk_sp<SkShader> baseShader;
+ SkMatrix otherLocalMatrix;
+ sk_sp<SkShader> proxy = as_SB(this)->makeAsALocalMatrixShader(&otherLocalMatrix);
+ if (proxy) {
+ otherLocalMatrix = SkShaderBase::ConcatLocalMatrices(localMatrix, otherLocalMatrix);
+ lm = &otherLocalMatrix;
+ baseShader = proxy;
+ } else {
+ baseShader = sk_ref_sp(const_cast<SkShader*>(this));
+ }
+
+ return sk_make_sp<SkLocalMatrixShader>(std::move(baseShader), *lm);
+}
+
+////////////////////////////////////////////////////////////////////
+
+/**
+ * Replaces the CTM when used. Created to support clipShaders, which have to be evaluated
+ * using the CTM that was present at the time they were specified (which may be different
+ * from the CTM at the time something is drawn through the clip.
+ */
+class SkCTMShader final : public SkShaderBase {
+public:
+ SkCTMShader(sk_sp<SkShader> proxy, const SkMatrix& ctm)
+ : fProxyShader(std::move(proxy))
+ , fCTM(ctm)
+ {}
+
+ GradientType asGradient(GradientInfo* info, SkMatrix* localMatrix) const override {
+ return as_SB(fProxyShader)->asGradient(info, localMatrix);
+ }
+
+#if defined(SK_GANESH)
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&,
+ const MatrixRec&) const override;
+#endif
+
+protected:
+ void flatten(SkWriteBuffer&) const override { SkASSERT(false); }
+
+ bool appendStages(const SkStageRec& rec, const MatrixRec&) const override {
+ return as_SB(fProxyShader)->appendRootStages(rec, fCTM);
+ }
+
+ skvm::Color program(skvm::Builder* p,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color paint,
+ const MatrixRec& mRec,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const override {
+ return as_SB(fProxyShader)->rootProgram(p, device, paint, fCTM, dst, uniforms, alloc);
+ }
+
+private:
+ SK_FLATTENABLE_HOOKS(SkCTMShader)
+
+ sk_sp<SkShader> fProxyShader;
+ SkMatrix fCTM;
+
+ using INHERITED = SkShaderBase;
+};
+
+
+#if defined(SK_GANESH)
+std::unique_ptr<GrFragmentProcessor> SkCTMShader::asFragmentProcessor(const GrFPArgs& args,
+ const MatrixRec& mRec) const {
+ SkMatrix ctmInv;
+ if (!fCTM.invert(&ctmInv)) {
+ return nullptr;
+ }
+
+ auto base = as_SB(fProxyShader)->asRootFragmentProcessor(args, fCTM);
+ if (!base) {
+ return nullptr;
+ }
+
+ // In order for the shader to be evaluated with the original CTM, we explicitly evaluate it
+ // at sk_FragCoord, and pass that through the inverse of the original CTM. This avoids requiring
+ // local coords for the shader and mapping from the draw's local to device and then back.
+ return GrFragmentProcessor::DeviceSpace(GrMatrixEffect::Make(ctmInv, std::move(base)));
+}
+#endif
+
+sk_sp<SkFlattenable> SkCTMShader::CreateProc(SkReadBuffer& buffer) {
+ SkASSERT(false);
+ return nullptr;
+}
+
+sk_sp<SkShader> SkShaderBase::makeWithCTM(const SkMatrix& postM) const {
+ return sk_sp<SkShader>(new SkCTMShader(sk_ref_sp(this), postM));
+}
diff --git a/gfx/skia/skia/src/shaders/SkLocalMatrixShader.h b/gfx/skia/skia/src/shaders/SkLocalMatrixShader.h
new file mode 100644
index 0000000000..e4b186621e
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkLocalMatrixShader.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLocalMatrixShader_DEFINED
+#define SkLocalMatrixShader_DEFINED
+
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/SkShaderBase.h"
+
+class GrFragmentProcessor;
+class SkArenaAlloc;
+
+class SkLocalMatrixShader final : public SkShaderBase {
+public:
+ template <typename T, typename... Args>
+ static std::enable_if_t<std::is_base_of_v<SkShader, T>, sk_sp<SkShader>>
+ MakeWrapped(const SkMatrix* localMatrix, Args&&... args) {
+ auto t = sk_make_sp<T>(std::forward<Args>(args)...);
+ if (!localMatrix || localMatrix->isIdentity()) {
+ return std::move(t);
+ }
+ return sk_make_sp<SkLocalMatrixShader>(sk_sp<SkShader>(std::move(t)), *localMatrix);
+ }
+
+ SkLocalMatrixShader(sk_sp<SkShader> wrapped, const SkMatrix& localMatrix)
+ : fLocalMatrix(localMatrix), fWrappedShader(std::move(wrapped)) {}
+
+ GradientType asGradient(GradientInfo* info, SkMatrix* localMatrix) const override;
+
+#if defined(SK_GANESH)
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&,
+ const MatrixRec&) const override;
+#endif
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext&,
+ skgpu::graphite::PaintParamsKeyBuilder*,
+ skgpu::graphite::PipelineDataGatherer*) const override;
+#endif
+
+ sk_sp<SkShader> makeAsALocalMatrixShader(SkMatrix* localMatrix) const override {
+ if (localMatrix) {
+ *localMatrix = fLocalMatrix;
+ }
+ return fWrappedShader;
+ }
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+ Context* onMakeContext(const ContextRec&, SkArenaAlloc*) const override;
+#endif
+
+ SkImage* onIsAImage(SkMatrix* matrix, SkTileMode* mode) const override;
+
+ bool appendStages(const SkStageRec&, const MatrixRec&) const override;
+
+ skvm::Color program(skvm::Builder*,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color paint,
+ const MatrixRec&,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc*) const override;
+
+private:
+ SK_FLATTENABLE_HOOKS(SkLocalMatrixShader)
+
+ SkMatrix fLocalMatrix;
+ sk_sp<SkShader> fWrappedShader;
+
+ using INHERITED = SkShaderBase;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/SkPerlinNoiseShader.cpp b/gfx/skia/skia/src/shaders/SkPerlinNoiseShader.cpp
new file mode 100644
index 0000000000..9042e91574
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkPerlinNoiseShader.cpp
@@ -0,0 +1,1149 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/effects/SkPerlinNoiseShader.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkString.h"
+#include "include/core/SkUnPreMultiply.h"
+#include "include/private/base/SkTPin.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkMatrixProvider.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkVM.h"
+#include "src/core/SkWriteBuffer.h"
+
+#if defined(SK_GANESH)
+#include "include/gpu/GrRecordingContext.h"
+#include "src/gpu/KeyBuilder.h"
+#include "src/gpu/ganesh/GrFPArgs.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/GrProcessorUnitTest.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/SkGr.h"
+#include "src/gpu/ganesh/effects/GrMatrixEffect.h"
+#include "src/gpu/ganesh/effects/GrTextureEffect.h"
+#include "src/gpu/ganesh/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/ganesh/glsl/GrGLSLProgramDataManager.h"
+#include "src/gpu/ganesh/glsl/GrGLSLUniformHandler.h"
+#endif // SK_GANESH
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/KeyContext.h"
+#include "src/gpu/graphite/KeyHelpers.h"
+#include "src/gpu/graphite/Log.h"
+#include "src/gpu/graphite/PaintParamsKey.h"
+#include "src/gpu/graphite/RecorderPriv.h"
+#include "src/gpu/graphite/TextureProxyView.h"
+#include "src/image/SkImage_Base.h"
+#endif // SK_GRAPHITE
+
+static const int kBlockSize = 256;
+static const int kBlockMask = kBlockSize - 1;
+static const int kPerlinNoise = 4096;
+static const int kRandMaximum = SK_MaxS32; // 2**31 - 1
+
+class SkPerlinNoiseShaderImpl : public SkShaderBase {
+public:
+ struct StitchData {
+ StitchData()
+ : fWidth(0)
+ , fWrapX(0)
+ , fHeight(0)
+ , fWrapY(0)
+ {}
+
+ StitchData(SkScalar w, SkScalar h)
+ : fWidth(std::min(SkScalarRoundToInt(w), SK_MaxS32 - kPerlinNoise))
+ , fWrapX(kPerlinNoise + fWidth)
+ , fHeight(std::min(SkScalarRoundToInt(h), SK_MaxS32 - kPerlinNoise))
+ , fWrapY(kPerlinNoise + fHeight) {}
+
+ bool operator==(const StitchData& other) const {
+ return fWidth == other.fWidth &&
+ fWrapX == other.fWrapX &&
+ fHeight == other.fHeight &&
+ fWrapY == other.fWrapY;
+ }
+
+ int fWidth; // How much to subtract to wrap for stitching.
+ int fWrapX; // Minimum value to wrap.
+ int fHeight;
+ int fWrapY;
+ };
+
+ struct PaintingData {
+ PaintingData(const SkISize& tileSize, SkScalar seed,
+ SkScalar baseFrequencyX, SkScalar baseFrequencyY,
+ const SkMatrix& matrix)
+ {
+ SkVector tileVec;
+ matrix.mapVector(SkIntToScalar(tileSize.fWidth), SkIntToScalar(tileSize.fHeight),
+ &tileVec);
+
+ SkSize scale;
+ if (!matrix.decomposeScale(&scale, nullptr)) {
+ scale.set(SK_ScalarNearlyZero, SK_ScalarNearlyZero);
+ }
+ fBaseFrequency.set(baseFrequencyX * SkScalarInvert(scale.width()),
+ baseFrequencyY * SkScalarInvert(scale.height()));
+ fTileSize.set(SkScalarRoundToInt(tileVec.fX), SkScalarRoundToInt(tileVec.fY));
+ this->init(seed);
+ if (!fTileSize.isEmpty()) {
+ this->stitch();
+ }
+
+ #if defined(SK_GANESH) || defined(SK_GRAPHITE)
+ SkImageInfo info = SkImageInfo::MakeA8(kBlockSize, 1);
+ fPermutationsBitmap.installPixels(info, fLatticeSelector, info.minRowBytes());
+ fPermutationsBitmap.setImmutable();
+
+ info = SkImageInfo::Make(kBlockSize, 4, kRGBA_8888_SkColorType, kPremul_SkAlphaType);
+ fNoiseBitmap.installPixels(info, fNoise[0][0], info.minRowBytes());
+ fNoiseBitmap.setImmutable();
+ #endif
+ }
+
+ #if defined(SK_GANESH) || defined(SK_GRAPHITE)
+ PaintingData(const PaintingData& that)
+ : fSeed(that.fSeed)
+ , fTileSize(that.fTileSize)
+ , fBaseFrequency(that.fBaseFrequency)
+ , fStitchDataInit(that.fStitchDataInit)
+ , fPermutationsBitmap(that.fPermutationsBitmap)
+ , fNoiseBitmap(that.fNoiseBitmap) {
+ memcpy(fLatticeSelector, that.fLatticeSelector, sizeof(fLatticeSelector));
+ memcpy(fNoise, that.fNoise, sizeof(fNoise));
+ memcpy(fGradient, that.fGradient, sizeof(fGradient));
+ }
+ #endif
+
+ int fSeed;
+ uint8_t fLatticeSelector[kBlockSize];
+ uint16_t fNoise[4][kBlockSize][2];
+ SkPoint fGradient[4][kBlockSize];
+ SkISize fTileSize;
+ SkVector fBaseFrequency;
+ StitchData fStitchDataInit;
+
+ private:
+
+ #if defined(SK_GANESH) || defined(SK_GRAPHITE)
+ SkBitmap fPermutationsBitmap;
+ SkBitmap fNoiseBitmap;
+ #endif
+
+ inline int random() {
+ // See https://www.w3.org/TR/SVG11/filters.html#feTurbulenceElement
+ // m = kRandMaximum, 2**31 - 1 (2147483647)
+ static constexpr int kRandAmplitude = 16807; // 7**5; primitive root of m
+ static constexpr int kRandQ = 127773; // m / a
+ static constexpr int kRandR = 2836; // m % a
+
+ int result = kRandAmplitude * (fSeed % kRandQ) - kRandR * (fSeed / kRandQ);
+ if (result <= 0) {
+ result += kRandMaximum;
+ }
+ fSeed = result;
+ return result;
+ }
+
+ // Only called once. Could be part of the constructor.
+ void init(SkScalar seed) {
+ // According to the SVG spec, we must truncate (not round) the seed value.
+ fSeed = SkScalarTruncToInt(seed);
+ // The seed value clamp to the range [1, kRandMaximum - 1].
+ if (fSeed <= 0) {
+ fSeed = -(fSeed % (kRandMaximum - 1)) + 1;
+ }
+ if (fSeed > kRandMaximum - 1) {
+ fSeed = kRandMaximum - 1;
+ }
+ for (int channel = 0; channel < 4; ++channel) {
+ for (int i = 0; i < kBlockSize; ++i) {
+ fLatticeSelector[i] = i;
+ fNoise[channel][i][0] = (random() % (2 * kBlockSize));
+ fNoise[channel][i][1] = (random() % (2 * kBlockSize));
+ }
+ }
+ for (int i = kBlockSize - 1; i > 0; --i) {
+ int k = fLatticeSelector[i];
+ int j = random() % kBlockSize;
+ SkASSERT(j >= 0);
+ SkASSERT(j < kBlockSize);
+ fLatticeSelector[i] = fLatticeSelector[j];
+ fLatticeSelector[j] = k;
+ }
+
+ // Perform the permutations now
+ {
+ // Copy noise data
+ uint16_t noise[4][kBlockSize][2];
+ for (int i = 0; i < kBlockSize; ++i) {
+ for (int channel = 0; channel < 4; ++channel) {
+ for (int j = 0; j < 2; ++j) {
+ noise[channel][i][j] = fNoise[channel][i][j];
+ }
+ }
+ }
+ // Do permutations on noise data
+ for (int i = 0; i < kBlockSize; ++i) {
+ for (int channel = 0; channel < 4; ++channel) {
+ for (int j = 0; j < 2; ++j) {
+ fNoise[channel][i][j] = noise[channel][fLatticeSelector[i]][j];
+ }
+ }
+ }
+ }
+
+ // Half of the largest possible value for 16 bit unsigned int
+ static constexpr SkScalar kHalfMax16bits = 32767.5f;
+
+ // Compute gradients from permuted noise data
+ static constexpr SkScalar kInvBlockSizef = 1.0 / SkIntToScalar(kBlockSize);
+ for (int channel = 0; channel < 4; ++channel) {
+ for (int i = 0; i < kBlockSize; ++i) {
+ fGradient[channel][i] = SkPoint::Make(
+ (fNoise[channel][i][0] - kBlockSize) * kInvBlockSizef,
+ (fNoise[channel][i][1] - kBlockSize) * kInvBlockSizef);
+ fGradient[channel][i].normalize();
+ // Put the normalized gradient back into the noise data
+ fNoise[channel][i][0] =
+ SkScalarRoundToInt((fGradient[channel][i].fX + 1) * kHalfMax16bits);
+ fNoise[channel][i][1] =
+ SkScalarRoundToInt((fGradient[channel][i].fY + 1) * kHalfMax16bits);
+ }
+ }
+ }
+
+ // Only called once. Could be part of the constructor.
+ void stitch() {
+ SkScalar tileWidth = SkIntToScalar(fTileSize.width());
+ SkScalar tileHeight = SkIntToScalar(fTileSize.height());
+ SkASSERT(tileWidth > 0 && tileHeight > 0);
+ // When stitching tiled turbulence, the frequencies must be adjusted
+ // so that the tile borders will be continuous.
+ if (fBaseFrequency.fX) {
+ SkScalar lowFrequencx =
+ SkScalarFloorToScalar(tileWidth * fBaseFrequency.fX) / tileWidth;
+ SkScalar highFrequencx =
+ SkScalarCeilToScalar(tileWidth * fBaseFrequency.fX) / tileWidth;
+ // BaseFrequency should be non-negative according to the standard.
+ // lowFrequencx can be 0 if fBaseFrequency.fX is very small.
+ if (sk_ieee_float_divide(fBaseFrequency.fX, lowFrequencx) < highFrequencx / fBaseFrequency.fX) {
+ fBaseFrequency.fX = lowFrequencx;
+ } else {
+ fBaseFrequency.fX = highFrequencx;
+ }
+ }
+ if (fBaseFrequency.fY) {
+ SkScalar lowFrequency =
+ SkScalarFloorToScalar(tileHeight * fBaseFrequency.fY) / tileHeight;
+ SkScalar highFrequency =
+ SkScalarCeilToScalar(tileHeight * fBaseFrequency.fY) / tileHeight;
+ // lowFrequency can be 0 if fBaseFrequency.fY is very small.
+ if (sk_ieee_float_divide(fBaseFrequency.fY, lowFrequency) < highFrequency / fBaseFrequency.fY) {
+ fBaseFrequency.fY = lowFrequency;
+ } else {
+ fBaseFrequency.fY = highFrequency;
+ }
+ }
+ fStitchDataInit = StitchData(tileWidth * fBaseFrequency.fX,
+ tileHeight * fBaseFrequency.fY);
+ }
+
+ public:
+
+#if defined(SK_GANESH) || defined(SK_GRAPHITE)
+ const SkBitmap& getPermutationsBitmap() const { return fPermutationsBitmap; }
+
+ const SkBitmap& getNoiseBitmap() const { return fNoiseBitmap; }
+#endif
+ };
+
+ /**
+ * About the noise types : the difference between the first 2 is just minor tweaks to the
+ * algorithm, they're not 2 entirely different noises. The output looks different, but once the
+ * noise is generated in the [1, -1] range, the output is brought back in the [0, 1] range by
+ * doing :
+ * kFractalNoise_Type : noise * 0.5 + 0.5
+ * kTurbulence_Type : abs(noise)
+ * Very little differs between the 2 types, although you can tell the difference visually.
+ */
+ enum Type {
+ kFractalNoise_Type,
+ kTurbulence_Type,
+ kLast_Type = kTurbulence_Type
+ };
+
+ static const int kMaxOctaves = 255; // numOctaves must be <= 0 and <= kMaxOctaves
+
+ SkPerlinNoiseShaderImpl(SkPerlinNoiseShaderImpl::Type type, SkScalar baseFrequencyX,
+ SkScalar baseFrequencyY, int numOctaves, SkScalar seed,
+ const SkISize* tileSize);
+
+ class PerlinNoiseShaderContext : public Context {
+ public:
+ PerlinNoiseShaderContext(const SkPerlinNoiseShaderImpl& shader, const ContextRec&);
+
+ void shadeSpan(int x, int y, SkPMColor[], int count) override;
+
+ private:
+ SkPMColor shade(const SkPoint& point, StitchData& stitchData) const;
+ SkScalar calculateTurbulenceValueForPoint(int channel,
+ StitchData& stitchData,
+ const SkPoint& point) const;
+ SkScalar noise2D(int channel,
+ const StitchData& stitchData,
+ const SkPoint& noiseVector) const;
+
+ SkMatrix fMatrix;
+ PaintingData fPaintingData;
+
+ using INHERITED = Context;
+ };
+
+#if defined(SK_GANESH)
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&,
+ const MatrixRec&) const override;
+#endif
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext&,
+ skgpu::graphite::PaintParamsKeyBuilder*,
+ skgpu::graphite::PipelineDataGatherer*) const override;
+#endif
+
+ skvm::Color program(skvm::Builder*,
+ skvm::Coord,
+ skvm::Coord,
+ skvm::Color,
+ const MatrixRec&,
+ const SkColorInfo&,
+ skvm::Uniforms*,
+ SkArenaAlloc*) const override {
+ // TODO?
+ return {};
+ }
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+ Context* onMakeContext(const ContextRec&, SkArenaAlloc*) const override;
+#endif
+
+private:
+ SK_FLATTENABLE_HOOKS(SkPerlinNoiseShaderImpl)
+
+ const SkPerlinNoiseShaderImpl::Type fType;
+ const SkScalar fBaseFrequencyX;
+ const SkScalar fBaseFrequencyY;
+ const int fNumOctaves;
+ const SkScalar fSeed;
+ const SkISize fTileSize;
+ const bool fStitchTiles;
+
+ friend class ::SkPerlinNoiseShader;
+
+ using INHERITED = SkShaderBase;
+};
+
+namespace {
+
+// noiseValue is the color component's value (or color)
+// limitValue is the maximum perlin noise array index value allowed
+// newValue is the current noise dimension (either width or height)
+inline int checkNoise(int noiseValue, int limitValue, int newValue) {
+ // If the noise value would bring us out of bounds of the current noise array while we are
+ // stiching noise tiles together, wrap the noise around the current dimension of the noise to
+ // stay within the array bounds in a continuous fashion (so that tiling lines are not visible)
+ if (noiseValue >= limitValue) {
+ noiseValue -= newValue;
+ }
+ return noiseValue;
+}
+
+inline SkScalar smoothCurve(SkScalar t) {
+ return t * t * (3 - 2 * t);
+}
+
+} // end namespace
+
+SkPerlinNoiseShaderImpl::SkPerlinNoiseShaderImpl(SkPerlinNoiseShaderImpl::Type type,
+ SkScalar baseFrequencyX,
+ SkScalar baseFrequencyY,
+ int numOctaves,
+ SkScalar seed,
+ const SkISize* tileSize)
+ : fType(type)
+ , fBaseFrequencyX(baseFrequencyX)
+ , fBaseFrequencyY(baseFrequencyY)
+ , fNumOctaves(numOctaves > kMaxOctaves ? kMaxOctaves : numOctaves) //[0,255] octaves allowed
+ , fSeed(seed)
+ , fTileSize(nullptr == tileSize ? SkISize::Make(0, 0) : *tileSize)
+ , fStitchTiles(!fTileSize.isEmpty()) {
+ SkASSERT(numOctaves >= 0 && numOctaves <= kMaxOctaves);
+ SkASSERT(fBaseFrequencyX >= 0);
+ SkASSERT(fBaseFrequencyY >= 0);
+}
+
+sk_sp<SkFlattenable> SkPerlinNoiseShaderImpl::CreateProc(SkReadBuffer& buffer) {
+ Type type = buffer.read32LE(kLast_Type);
+
+ SkScalar freqX = buffer.readScalar();
+ SkScalar freqY = buffer.readScalar();
+ int octaves = buffer.read32LE<int>(kMaxOctaves);
+
+ SkScalar seed = buffer.readScalar();
+ SkISize tileSize;
+ tileSize.fWidth = buffer.readInt();
+ tileSize.fHeight = buffer.readInt();
+
+ switch (type) {
+ case kFractalNoise_Type:
+ return SkPerlinNoiseShader::MakeFractalNoise(freqX, freqY, octaves, seed, &tileSize);
+ case kTurbulence_Type:
+ return SkPerlinNoiseShader::MakeTurbulence(freqX, freqY, octaves, seed, &tileSize);
+ default:
+ // Really shouldn't get here b.c. of earlier check on type
+ buffer.validate(false);
+ return nullptr;
+ }
+}
+
+void SkPerlinNoiseShaderImpl::flatten(SkWriteBuffer& buffer) const {
+ buffer.writeInt((int) fType);
+ buffer.writeScalar(fBaseFrequencyX);
+ buffer.writeScalar(fBaseFrequencyY);
+ buffer.writeInt(fNumOctaves);
+ buffer.writeScalar(fSeed);
+ buffer.writeInt(fTileSize.fWidth);
+ buffer.writeInt(fTileSize.fHeight);
+}
+
+SkScalar SkPerlinNoiseShaderImpl::PerlinNoiseShaderContext::noise2D(
+ int channel, const StitchData& stitchData, const SkPoint& noiseVector) const {
+ struct Noise {
+ int noisePositionIntegerValue;
+ int nextNoisePositionIntegerValue;
+ SkScalar noisePositionFractionValue;
+ Noise(SkScalar component)
+ {
+ SkScalar position = component + kPerlinNoise;
+ noisePositionIntegerValue = SkScalarFloorToInt(position);
+ noisePositionFractionValue = position - SkIntToScalar(noisePositionIntegerValue);
+ nextNoisePositionIntegerValue = noisePositionIntegerValue + 1;
+ }
+ };
+ Noise noiseX(noiseVector.x());
+ Noise noiseY(noiseVector.y());
+ SkScalar u, v;
+ const SkPerlinNoiseShaderImpl& perlinNoiseShader = static_cast<const SkPerlinNoiseShaderImpl&>(fShader);
+ // If stitching, adjust lattice points accordingly.
+ if (perlinNoiseShader.fStitchTiles) {
+ noiseX.noisePositionIntegerValue =
+ checkNoise(noiseX.noisePositionIntegerValue, stitchData.fWrapX, stitchData.fWidth);
+ noiseY.noisePositionIntegerValue =
+ checkNoise(noiseY.noisePositionIntegerValue, stitchData.fWrapY, stitchData.fHeight);
+ noiseX.nextNoisePositionIntegerValue =
+ checkNoise(noiseX.nextNoisePositionIntegerValue, stitchData.fWrapX, stitchData.fWidth);
+ noiseY.nextNoisePositionIntegerValue =
+ checkNoise(noiseY.nextNoisePositionIntegerValue, stitchData.fWrapY, stitchData.fHeight);
+ }
+ noiseX.noisePositionIntegerValue &= kBlockMask;
+ noiseY.noisePositionIntegerValue &= kBlockMask;
+ noiseX.nextNoisePositionIntegerValue &= kBlockMask;
+ noiseY.nextNoisePositionIntegerValue &= kBlockMask;
+ int i = fPaintingData.fLatticeSelector[noiseX.noisePositionIntegerValue];
+ int j = fPaintingData.fLatticeSelector[noiseX.nextNoisePositionIntegerValue];
+ int b00 = (i + noiseY.noisePositionIntegerValue) & kBlockMask;
+ int b10 = (j + noiseY.noisePositionIntegerValue) & kBlockMask;
+ int b01 = (i + noiseY.nextNoisePositionIntegerValue) & kBlockMask;
+ int b11 = (j + noiseY.nextNoisePositionIntegerValue) & kBlockMask;
+ SkScalar sx = smoothCurve(noiseX.noisePositionFractionValue);
+ SkScalar sy = smoothCurve(noiseY.noisePositionFractionValue);
+
+ if (sx < 0 || sy < 0 || sx > 1 || sy > 1) {
+ return 0; // Check for pathological inputs.
+ }
+
+ // This is taken 1:1 from SVG spec: http://www.w3.org/TR/SVG11/filters.html#feTurbulenceElement
+ SkPoint fractionValue = SkPoint::Make(noiseX.noisePositionFractionValue,
+ noiseY.noisePositionFractionValue); // Offset (0,0)
+ u = fPaintingData.fGradient[channel][b00].dot(fractionValue);
+ fractionValue.fX -= SK_Scalar1; // Offset (-1,0)
+ v = fPaintingData.fGradient[channel][b10].dot(fractionValue);
+ SkScalar a = SkScalarInterp(u, v, sx);
+ fractionValue.fY -= SK_Scalar1; // Offset (-1,-1)
+ v = fPaintingData.fGradient[channel][b11].dot(fractionValue);
+ fractionValue.fX = noiseX.noisePositionFractionValue; // Offset (0,-1)
+ u = fPaintingData.fGradient[channel][b01].dot(fractionValue);
+ SkScalar b = SkScalarInterp(u, v, sx);
+ return SkScalarInterp(a, b, sy);
+}
+
+SkScalar SkPerlinNoiseShaderImpl::PerlinNoiseShaderContext::calculateTurbulenceValueForPoint(
+ int channel, StitchData& stitchData, const SkPoint& point) const {
+ const SkPerlinNoiseShaderImpl& perlinNoiseShader = static_cast<const SkPerlinNoiseShaderImpl&>(fShader);
+ if (perlinNoiseShader.fStitchTiles) {
+ stitchData = fPaintingData.fStitchDataInit;
+ }
+ SkScalar turbulenceFunctionResult = 0;
+ SkPoint noiseVector(SkPoint::Make(point.x() * fPaintingData.fBaseFrequency.fX,
+ point.y() * fPaintingData.fBaseFrequency.fY));
+ SkScalar ratio = SK_Scalar1;
+ for (int octave = 0; octave < perlinNoiseShader.fNumOctaves; ++octave) {
+ SkScalar noise = noise2D(channel, stitchData, noiseVector);
+ SkScalar numer = (perlinNoiseShader.fType == kFractalNoise_Type) ?
+ noise : SkScalarAbs(noise);
+ turbulenceFunctionResult += numer / ratio;
+ noiseVector.fX *= 2;
+ noiseVector.fY *= 2;
+ ratio *= 2;
+ if (perlinNoiseShader.fStitchTiles) {
+ stitchData = StitchData(SkIntToScalar(stitchData.fWidth) * 2,
+ SkIntToScalar(stitchData.fHeight) * 2);
+ }
+ }
+
+ if (perlinNoiseShader.fType == kFractalNoise_Type) {
+ // For kFractalNoise the result is: noise[-1,1] * 0.5 + 0.5
+ turbulenceFunctionResult = SkScalarHalf(turbulenceFunctionResult + 1);
+ }
+
+ if (channel == 3) { // Scale alpha by paint value
+ turbulenceFunctionResult *= SkIntToScalar(getPaintAlpha()) / 255;
+ }
+
+ // Clamp result
+ return SkTPin(turbulenceFunctionResult, 0.0f, SK_Scalar1);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+SkPMColor SkPerlinNoiseShaderImpl::PerlinNoiseShaderContext::shade(
+ const SkPoint& point, StitchData& stitchData) const {
+ SkPoint newPoint;
+ fMatrix.mapPoints(&newPoint, &point, 1);
+ newPoint.fX = SkScalarRoundToScalar(newPoint.fX);
+ newPoint.fY = SkScalarRoundToScalar(newPoint.fY);
+
+ U8CPU rgba[4];
+ for (int channel = 3; channel >= 0; --channel) {
+ SkScalar value;
+ value = calculateTurbulenceValueForPoint(channel, stitchData, newPoint);
+ rgba[channel] = SkScalarFloorToInt(255 * value);
+ }
+ return SkPreMultiplyARGB(rgba[3], rgba[0], rgba[1], rgba[2]);
+}
+
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+SkShaderBase::Context* SkPerlinNoiseShaderImpl::onMakeContext(const ContextRec& rec,
+ SkArenaAlloc* alloc) const {
+ // should we pay attention to rec's device-colorspace?
+ return alloc->make<PerlinNoiseShaderContext>(*this, rec);
+}
+#endif
+
+static inline SkMatrix total_matrix(const SkShaderBase::ContextRec& rec,
+ const SkShaderBase& shader) {
+ if (rec.fLocalMatrix) {
+ return SkMatrix::Concat(*rec.fMatrix, *rec.fLocalMatrix);
+ }
+ return *rec.fMatrix;
+}
+
+SkPerlinNoiseShaderImpl::PerlinNoiseShaderContext::PerlinNoiseShaderContext(
+ const SkPerlinNoiseShaderImpl& shader, const ContextRec& rec)
+ : INHERITED(shader, rec)
+ , fMatrix(total_matrix(rec, shader)) // used for temp storage, adjusted below
+ , fPaintingData(shader.fTileSize, shader.fSeed, shader.fBaseFrequencyX,
+ shader.fBaseFrequencyY, fMatrix)
+{
+ // This (1,1) translation is due to WebKit's 1 based coordinates for the noise
+ // (as opposed to 0 based, usually). The same adjustment is in the setData() function.
+ fMatrix.setTranslate(-fMatrix.getTranslateX() + SK_Scalar1,
+ -fMatrix.getTranslateY() + SK_Scalar1);
+}
+
+void SkPerlinNoiseShaderImpl::PerlinNoiseShaderContext::shadeSpan(
+ int x, int y, SkPMColor result[], int count) {
+ SkPoint point = SkPoint::Make(SkIntToScalar(x), SkIntToScalar(y));
+ StitchData stitchData;
+ for (int i = 0; i < count; ++i) {
+ result[i] = shade(point, stitchData);
+ point.fX += SK_Scalar1;
+ }
+}
+
+/////////////////////////////////////////////////////////////////////
+
+#if defined(SK_GANESH)
+
+class GrPerlinNoise2Effect : public GrFragmentProcessor {
+public:
+ static std::unique_ptr<GrFragmentProcessor> Make(
+ SkPerlinNoiseShaderImpl::Type type,
+ int numOctaves,
+ bool stitchTiles,
+ std::unique_ptr<SkPerlinNoiseShaderImpl::PaintingData> paintingData,
+ GrSurfaceProxyView permutationsView,
+ GrSurfaceProxyView noiseView,
+ const GrCaps& caps) {
+ static constexpr GrSamplerState kRepeatXSampler = {GrSamplerState::WrapMode::kRepeat,
+ GrSamplerState::WrapMode::kClamp,
+ GrSamplerState::Filter::kNearest};
+ auto permutationsFP =
+ GrTextureEffect::Make(std::move(permutationsView), kPremul_SkAlphaType,
+ SkMatrix::I(), kRepeatXSampler, caps);
+ auto noiseFP = GrTextureEffect::Make(std::move(noiseView), kPremul_SkAlphaType,
+ SkMatrix::I(), kRepeatXSampler, caps);
+
+ return std::unique_ptr<GrFragmentProcessor>(
+ new GrPerlinNoise2Effect(type,
+ numOctaves,
+ stitchTiles,
+ std::move(paintingData),
+ std::move(permutationsFP),
+ std::move(noiseFP)));
+ }
+
+ const char* name() const override { return "PerlinNoise"; }
+
+ std::unique_ptr<GrFragmentProcessor> clone() const override {
+ return std::unique_ptr<GrFragmentProcessor>(new GrPerlinNoise2Effect(*this));
+ }
+
+ const SkPerlinNoiseShaderImpl::StitchData& stitchData() const { return fPaintingData->fStitchDataInit; }
+
+ SkPerlinNoiseShaderImpl::Type type() const { return fType; }
+ bool stitchTiles() const { return fStitchTiles; }
+ const SkVector& baseFrequency() const { return fPaintingData->fBaseFrequency; }
+ int numOctaves() const { return fNumOctaves; }
+
+private:
+ class Impl : public ProgramImpl {
+ public:
+ SkString emitHelper(EmitArgs& args);
+ void emitCode(EmitArgs&) override;
+
+ private:
+ void onSetData(const GrGLSLProgramDataManager&, const GrFragmentProcessor&) override;
+
+ GrGLSLProgramDataManager::UniformHandle fStitchDataUni;
+ GrGLSLProgramDataManager::UniformHandle fBaseFrequencyUni;
+ };
+
+ std::unique_ptr<ProgramImpl> onMakeProgramImpl() const override {
+ return std::make_unique<Impl>();
+ }
+
+ void onAddToKey(const GrShaderCaps& caps, skgpu::KeyBuilder* b) const override;
+
+ bool onIsEqual(const GrFragmentProcessor& sBase) const override {
+ const GrPerlinNoise2Effect& s = sBase.cast<GrPerlinNoise2Effect>();
+ return fType == s.fType &&
+ fPaintingData->fBaseFrequency == s.fPaintingData->fBaseFrequency &&
+ fNumOctaves == s.fNumOctaves &&
+ fStitchTiles == s.fStitchTiles &&
+ fPaintingData->fStitchDataInit == s.fPaintingData->fStitchDataInit;
+ }
+
+ GrPerlinNoise2Effect(SkPerlinNoiseShaderImpl::Type type,
+ int numOctaves,
+ bool stitchTiles,
+ std::unique_ptr<SkPerlinNoiseShaderImpl::PaintingData> paintingData,
+ std::unique_ptr<GrFragmentProcessor> permutationsFP,
+ std::unique_ptr<GrFragmentProcessor> noiseFP)
+ : INHERITED(kGrPerlinNoise2Effect_ClassID, kNone_OptimizationFlags)
+ , fType(type)
+ , fNumOctaves(numOctaves)
+ , fStitchTiles(stitchTiles)
+ , fPaintingData(std::move(paintingData)) {
+ this->registerChild(std::move(permutationsFP), SkSL::SampleUsage::Explicit());
+ this->registerChild(std::move(noiseFP), SkSL::SampleUsage::Explicit());
+ this->setUsesSampleCoordsDirectly();
+ }
+
+ GrPerlinNoise2Effect(const GrPerlinNoise2Effect& that)
+ : INHERITED(that)
+ , fType(that.fType)
+ , fNumOctaves(that.fNumOctaves)
+ , fStitchTiles(that.fStitchTiles)
+ , fPaintingData(new SkPerlinNoiseShaderImpl::PaintingData(*that.fPaintingData)) {}
+
+ GR_DECLARE_FRAGMENT_PROCESSOR_TEST
+
+ SkPerlinNoiseShaderImpl::Type fType;
+ int fNumOctaves;
+ bool fStitchTiles;
+
+ std::unique_ptr<SkPerlinNoiseShaderImpl::PaintingData> fPaintingData;
+
+ using INHERITED = GrFragmentProcessor;
+};
+
+/////////////////////////////////////////////////////////////////////
+GR_DEFINE_FRAGMENT_PROCESSOR_TEST(GrPerlinNoise2Effect)
+
+#if GR_TEST_UTILS
+std::unique_ptr<GrFragmentProcessor> GrPerlinNoise2Effect::TestCreate(GrProcessorTestData* d) {
+ int numOctaves = d->fRandom->nextRangeU(2, 10);
+ bool stitchTiles = d->fRandom->nextBool();
+ SkScalar seed = SkIntToScalar(d->fRandom->nextU());
+ SkISize tileSize;
+ tileSize.fWidth = d->fRandom->nextRangeU(4, 4096);
+ tileSize.fHeight = d->fRandom->nextRangeU(4, 4096);
+ SkScalar baseFrequencyX = d->fRandom->nextRangeScalar(0.01f, 0.99f);
+ SkScalar baseFrequencyY = d->fRandom->nextRangeScalar(0.01f, 0.99f);
+
+ sk_sp<SkShader> shader(d->fRandom->nextBool() ?
+ SkPerlinNoiseShader::MakeFractalNoise(baseFrequencyX, baseFrequencyY, numOctaves, seed,
+ stitchTiles ? &tileSize : nullptr) :
+ SkPerlinNoiseShader::MakeTurbulence(baseFrequencyX, baseFrequencyY, numOctaves, seed,
+ stitchTiles ? &tileSize : nullptr));
+
+ GrTest::TestAsFPArgs asFPArgs(d);
+ return as_SB(shader)->asRootFragmentProcessor(asFPArgs.args(), GrTest::TestMatrix(d->fRandom));
+}
+#endif
+
+SkString GrPerlinNoise2Effect::Impl::emitHelper(EmitArgs& args) {
+ const GrPerlinNoise2Effect& pne = args.fFp.cast<GrPerlinNoise2Effect>();
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+
+ // Add noise function
+ const GrShaderVar gPerlinNoiseArgs[] = {{"chanCoord", SkSLType::kHalf },
+ {"noiseVec ", SkSLType::kHalf2}};
+
+ const GrShaderVar gPerlinNoiseStitchArgs[] = {{"chanCoord" , SkSLType::kHalf },
+ {"noiseVec" , SkSLType::kHalf2},
+ {"stitchData", SkSLType::kHalf2}};
+
+ SkString noiseCode;
+
+ noiseCode.append(
+ "half4 floorVal;"
+ "floorVal.xy = floor(noiseVec);"
+ "floorVal.zw = floorVal.xy + half2(1);"
+ "half2 fractVal = fract(noiseVec);"
+
+ // smooth curve : t^2*(3 - 2*t)
+ "half2 noiseSmooth = fractVal*fractVal*(half2(3) - 2*fractVal);"
+ );
+
+ // Adjust frequencies if we're stitching tiles
+ if (pne.stitchTiles()) {
+ noiseCode.append(
+ "if (floorVal.x >= stitchData.x) { floorVal.x -= stitchData.x; };"
+ "if (floorVal.y >= stitchData.y) { floorVal.y -= stitchData.y; };"
+ "if (floorVal.z >= stitchData.x) { floorVal.z -= stitchData.x; };"
+ "if (floorVal.w >= stitchData.y) { floorVal.w -= stitchData.y; };"
+ );
+ }
+
+ // NOTE: We need to explicitly pass half4(1) as input color here, because the helper function
+ // can't see fInputColor (which is "_input" in the FP's outer function). skbug.com/10506
+ SkString sampleX = this->invokeChild(0, "half4(1)", args, "half2(floorVal.x, 0.5)");
+ SkString sampleY = this->invokeChild(0, "half4(1)", args, "half2(floorVal.z, 0.5)");
+ noiseCode.appendf("half2 latticeIdx = half2(%s.a, %s.a);", sampleX.c_str(), sampleY.c_str());
+
+#if defined(SK_BUILD_FOR_ANDROID)
+ // Android rounding for Tegra devices, like, for example: Xoom (Tegra 2), Nexus 7 (Tegra 3).
+ // The issue is that colors aren't accurate enough on Tegra devices. For example, if an 8 bit
+ // value of 124 (or 0.486275 here) is entered, we can get a texture value of 123.513725
+ // (or 0.484368 here). The following rounding operation prevents these precision issues from
+ // affecting the result of the noise by making sure that we only have multiples of 1/255.
+ // (Note that 1/255 is about 0.003921569, which is the value used here).
+ noiseCode.append(
+ "latticeIdx = floor(latticeIdx * half2(255.0) + half2(0.5)) * half2(0.003921569);");
+#endif
+
+ // Get (x,y) coordinates with the permuted x
+ noiseCode.append("half4 bcoords = 256*latticeIdx.xyxy + floorVal.yyww;");
+
+ noiseCode.append("half2 uv;");
+
+ // This is the math to convert the two 16bit integer packed into rgba 8 bit input into a
+ // [-1,1] vector and perform a dot product between that vector and the provided vector.
+ // Save it as a string because we will repeat it 4x.
+ static constexpr const char* inc8bit = "0.00390625"; // 1.0 / 256.0
+ SkString dotLattice =
+ SkStringPrintf("dot((lattice.ga + lattice.rb*%s)*2 - half2(1), fractVal)", inc8bit);
+
+ SkString sampleA = this->invokeChild(1, "half4(1)", args, "half2(bcoords.x, chanCoord)");
+ SkString sampleB = this->invokeChild(1, "half4(1)", args, "half2(bcoords.y, chanCoord)");
+ SkString sampleC = this->invokeChild(1, "half4(1)", args, "half2(bcoords.w, chanCoord)");
+ SkString sampleD = this->invokeChild(1, "half4(1)", args, "half2(bcoords.z, chanCoord)");
+
+ // Compute u, at offset (0,0)
+ noiseCode.appendf("half4 lattice = %s;", sampleA.c_str());
+ noiseCode.appendf("uv.x = %s;", dotLattice.c_str());
+
+ // Compute v, at offset (-1,0)
+ noiseCode.append("fractVal.x -= 1.0;");
+ noiseCode.appendf("lattice = %s;", sampleB.c_str());
+ noiseCode.appendf("uv.y = %s;", dotLattice.c_str());
+
+ // Compute 'a' as a linear interpolation of 'u' and 'v'
+ noiseCode.append("half2 ab;");
+ noiseCode.append("ab.x = mix(uv.x, uv.y, noiseSmooth.x);");
+
+ // Compute v, at offset (-1,-1)
+ noiseCode.append("fractVal.y -= 1.0;");
+ noiseCode.appendf("lattice = %s;", sampleC.c_str());
+ noiseCode.appendf("uv.y = %s;", dotLattice.c_str());
+
+ // Compute u, at offset (0,-1)
+ noiseCode.append("fractVal.x += 1.0;");
+ noiseCode.appendf("lattice = %s;", sampleD.c_str());
+ noiseCode.appendf("uv.x = %s;", dotLattice.c_str());
+
+ // Compute 'b' as a linear interpolation of 'u' and 'v'
+ noiseCode.append("ab.y = mix(uv.x, uv.y, noiseSmooth.x);");
+ // Compute the noise as a linear interpolation of 'a' and 'b'
+ noiseCode.append("return mix(ab.x, ab.y, noiseSmooth.y);");
+
+ SkString noiseFuncName = fragBuilder->getMangledFunctionName("noiseFuncName");
+ if (pne.stitchTiles()) {
+ fragBuilder->emitFunction(SkSLType::kHalf, noiseFuncName.c_str(),
+ {gPerlinNoiseStitchArgs, std::size(gPerlinNoiseStitchArgs)},
+ noiseCode.c_str());
+ } else {
+ fragBuilder->emitFunction(SkSLType::kHalf, noiseFuncName.c_str(),
+ {gPerlinNoiseArgs, std::size(gPerlinNoiseArgs)},
+ noiseCode.c_str());
+ }
+
+ return noiseFuncName;
+}
+
+void GrPerlinNoise2Effect::Impl::emitCode(EmitArgs& args) {
+
+ SkString noiseFuncName = this->emitHelper(args);
+
+ const GrPerlinNoise2Effect& pne = args.fFp.cast<GrPerlinNoise2Effect>();
+
+ GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
+ GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
+
+ fBaseFrequencyUni = uniformHandler->addUniform(&pne, kFragment_GrShaderFlag, SkSLType::kHalf2,
+ "baseFrequency");
+ const char* baseFrequencyUni = uniformHandler->getUniformCStr(fBaseFrequencyUni);
+
+ const char* stitchDataUni = nullptr;
+ if (pne.stitchTiles()) {
+ fStitchDataUni = uniformHandler->addUniform(&pne, kFragment_GrShaderFlag, SkSLType::kHalf2,
+ "stitchData");
+ stitchDataUni = uniformHandler->getUniformCStr(fStitchDataUni);
+ }
+
+ // There are rounding errors if the floor operation is not performed here
+ fragBuilder->codeAppendf("half2 noiseVec = half2(floor(%s.xy) * %s);",
+ args.fSampleCoord, baseFrequencyUni);
+
+ // Clear the color accumulator
+ fragBuilder->codeAppendf("half4 color = half4(0);");
+
+ if (pne.stitchTiles()) {
+ fragBuilder->codeAppendf("half2 stitchData = %s;", stitchDataUni);
+ }
+
+ fragBuilder->codeAppendf("half ratio = 1.0;");
+
+ // Loop over all octaves
+ fragBuilder->codeAppendf("for (int octave = 0; octave < %d; ++octave) {", pne.numOctaves());
+ fragBuilder->codeAppendf( "color += ");
+ if (pne.type() != SkPerlinNoiseShaderImpl::kFractalNoise_Type) {
+ fragBuilder->codeAppend("abs(");
+ }
+
+ // There are 4 lines, put y coords at center of each.
+ static constexpr const char* chanCoordR = "0.5";
+ static constexpr const char* chanCoordG = "1.5";
+ static constexpr const char* chanCoordB = "2.5";
+ static constexpr const char* chanCoordA = "3.5";
+ if (pne.stitchTiles()) {
+ fragBuilder->codeAppendf(
+ "half4(%s(%s, noiseVec, stitchData), %s(%s, noiseVec, stitchData),"
+ "%s(%s, noiseVec, stitchData), %s(%s, noiseVec, stitchData))",
+ noiseFuncName.c_str(), chanCoordR,
+ noiseFuncName.c_str(), chanCoordG,
+ noiseFuncName.c_str(), chanCoordB,
+ noiseFuncName.c_str(), chanCoordA);
+ } else {
+ fragBuilder->codeAppendf(
+ "half4(%s(%s, noiseVec), %s(%s, noiseVec),"
+ "%s(%s, noiseVec), %s(%s, noiseVec))",
+ noiseFuncName.c_str(), chanCoordR,
+ noiseFuncName.c_str(), chanCoordG,
+ noiseFuncName.c_str(), chanCoordB,
+ noiseFuncName.c_str(), chanCoordA);
+ }
+ if (pne.type() != SkPerlinNoiseShaderImpl::kFractalNoise_Type) {
+ fragBuilder->codeAppend(")"); // end of "abs("
+ }
+ fragBuilder->codeAppend(" * ratio;");
+
+ fragBuilder->codeAppend("noiseVec *= half2(2.0);"
+ "ratio *= 0.5;");
+
+ if (pne.stitchTiles()) {
+ fragBuilder->codeAppend("stitchData *= half2(2.0);");
+ }
+ fragBuilder->codeAppend("}"); // end of the for loop on octaves
+
+ if (pne.type() == SkPerlinNoiseShaderImpl::kFractalNoise_Type) {
+ // The value of turbulenceFunctionResult comes from ((turbulenceFunctionResult) + 1) / 2
+ // by fractalNoise and (turbulenceFunctionResult) by turbulence.
+ fragBuilder->codeAppendf("color = color * half4(0.5) + half4(0.5);");
+ }
+
+ // Clamp values
+ fragBuilder->codeAppendf("color = saturate(color);");
+
+ // Pre-multiply the result
+ fragBuilder->codeAppendf("return half4(color.rgb * color.aaa, color.a);");
+}
+
+void GrPerlinNoise2Effect::Impl::onSetData(const GrGLSLProgramDataManager& pdman,
+ const GrFragmentProcessor& processor) {
+ const GrPerlinNoise2Effect& turbulence = processor.cast<GrPerlinNoise2Effect>();
+
+ const SkVector& baseFrequency = turbulence.baseFrequency();
+ pdman.set2f(fBaseFrequencyUni, baseFrequency.fX, baseFrequency.fY);
+
+ if (turbulence.stitchTiles()) {
+ const SkPerlinNoiseShaderImpl::StitchData& stitchData = turbulence.stitchData();
+ pdman.set2f(fStitchDataUni,
+ SkIntToScalar(stitchData.fWidth),
+ SkIntToScalar(stitchData.fHeight));
+ }
+}
+
+void GrPerlinNoise2Effect::onAddToKey(const GrShaderCaps& caps, skgpu::KeyBuilder* b) const {
+ uint32_t key = fNumOctaves;
+ key = key << 3; // Make room for next 3 bits
+ switch (fType) {
+ case SkPerlinNoiseShaderImpl::kFractalNoise_Type:
+ key |= 0x1;
+ break;
+ case SkPerlinNoiseShaderImpl::kTurbulence_Type:
+ key |= 0x2;
+ break;
+ default:
+ // leave key at 0
+ break;
+ }
+ if (fStitchTiles) {
+ key |= 0x4; // Flip the 3rd bit if tile stitching is on
+ }
+ b->add32(key);
+}
+
+/////////////////////////////////////////////////////////////////////
+
+std::unique_ptr<GrFragmentProcessor> SkPerlinNoiseShaderImpl::asFragmentProcessor(
+ const GrFPArgs& args, const MatrixRec& mRec) const {
+ SkASSERT(args.fContext);
+ SkASSERT(fNumOctaves);
+
+ const SkMatrix& totalMatrix = mRec.totalMatrix();
+
+ // Either we don't stitch tiles, or we have a valid tile size
+ SkASSERT(!fStitchTiles || !fTileSize.isEmpty());
+
+ auto paintingData = std::make_unique<SkPerlinNoiseShaderImpl::PaintingData>(fTileSize,
+ fSeed,
+ fBaseFrequencyX,
+ fBaseFrequencyY,
+ totalMatrix);
+
+ // Like shadeSpan, we start from device space. We will account for that below with a device
+ // space effect.
+
+ auto context = args.fContext;
+
+ const SkBitmap& permutationsBitmap = paintingData->getPermutationsBitmap();
+ const SkBitmap& noiseBitmap = paintingData->getNoiseBitmap();
+
+ auto permutationsView = std::get<0>(GrMakeCachedBitmapProxyView(
+ context,
+ permutationsBitmap,
+ /*label=*/"PerlinNoiseShader_FragmentProcessor_PermutationsView"));
+ auto noiseView = std::get<0>(GrMakeCachedBitmapProxyView(
+ context, noiseBitmap, /*label=*/"PerlinNoiseShader_FragmentProcessor_NoiseView"));
+
+ if (permutationsView && noiseView) {
+ return GrFragmentProcessor::DeviceSpace(
+ GrMatrixEffect::Make(SkMatrix::Translate(1 - totalMatrix.getTranslateX(),
+ 1 - totalMatrix.getTranslateY()),
+ GrPerlinNoise2Effect::Make(fType,
+ fNumOctaves,
+ fStitchTiles,
+ std::move(paintingData),
+ std::move(permutationsView),
+ std::move(noiseView),
+ *context->priv().caps())));
+ }
+ return nullptr;
+}
+
+#endif
+
+#if defined(SK_GRAPHITE)
+
+// If either of these change then the corresponding change must also be made in the SkSL
+// perlin_noise_shader function.
+static_assert((int)SkPerlinNoiseShaderImpl::kFractalNoise_Type ==
+ (int)skgpu::graphite::PerlinNoiseShaderBlock::Type::kFractalNoise);
+static_assert((int)SkPerlinNoiseShaderImpl::kTurbulence_Type ==
+ (int)skgpu::graphite::PerlinNoiseShaderBlock::Type::kTurbulence);
+
+// If kBlockSize changes here then it must also be changed in the SkSL noise_function
+// implementation.
+static_assert(kBlockSize == 256);
+
+void SkPerlinNoiseShaderImpl::addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const {
+ using namespace skgpu::graphite;
+
+ SkASSERT(fNumOctaves);
+
+ SkMatrix totalMatrix = keyContext.local2Dev().asM33();
+ if (keyContext.localMatrix()) {
+ totalMatrix.preConcat(*keyContext.localMatrix());
+ }
+
+ SkMatrix invTotal;
+ bool result = totalMatrix.invert(&invTotal);
+ if (!result) {
+ SKGPU_LOG_W("Couldn't invert totalMatrix for PerlinNoiseShader");
+
+ SolidColorShaderBlock::BeginBlock(keyContext, builder, gatherer, {1, 0, 0, 1});
+ builder->endBlock();
+ return;
+ }
+
+ auto paintingData = std::make_unique<SkPerlinNoiseShaderImpl::PaintingData>(fTileSize,
+ fSeed,
+ fBaseFrequencyX,
+ fBaseFrequencyY,
+ totalMatrix);
+
+ sk_sp<SkImage> permImg = RecorderPriv::CreateCachedImage(keyContext.recorder(),
+ paintingData->getPermutationsBitmap());
+
+ sk_sp<SkImage> noiseImg = RecorderPriv::CreateCachedImage(keyContext.recorder(),
+ paintingData->getNoiseBitmap());
+
+ if (!permImg || !noiseImg) {
+ SKGPU_LOG_W("Couldn't create tables for PerlinNoiseShader");
+
+ SolidColorShaderBlock::BeginBlock(keyContext, builder, gatherer, {1, 0, 0, 1});
+ builder->endBlock();
+ return;
+ }
+
+ PerlinNoiseShaderBlock::PerlinNoiseData data(static_cast<PerlinNoiseShaderBlock::Type>(fType),
+ paintingData->fBaseFrequency,
+ fNumOctaves,
+ { paintingData->fStitchDataInit.fWidth,
+ paintingData->fStitchDataInit.fHeight });
+
+ TextureProxyView view;
+
+ std::tie(view, std::ignore) = as_IB(permImg)->asView(keyContext.recorder(),
+ skgpu::Mipmapped::kNo);
+ data.fPermutationsProxy = view.refProxy();
+
+ std::tie(view, std::ignore) = as_IB(noiseImg)->asView(keyContext.recorder(),
+ skgpu::Mipmapped::kNo);
+ data.fNoiseProxy = view.refProxy();
+
+ // This (1,1) translation is due to WebKit's 1 based coordinates for the noise
+ // (as opposed to 0 based, usually). Remember: this matrix (shader2World) is going to be
+ // inverted before being applied.
+ SkMatrix shader2Local = SkMatrix::Translate(-1 + totalMatrix.getTranslateX(),
+ -1 + totalMatrix.getTranslateY());
+ shader2Local.postConcat(invTotal);
+
+ LocalMatrixShaderBlock::LMShaderData lmShaderData(shader2Local);
+
+ KeyContextWithLocalMatrix newContext(keyContext, shader2Local);
+
+ LocalMatrixShaderBlock::BeginBlock(newContext, builder, gatherer, &lmShaderData);
+ PerlinNoiseShaderBlock::BeginBlock(newContext, builder, gatherer, &data);
+ builder->endBlock();
+ builder->endBlock();
+}
+#endif // SK_GRAPHITE
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static bool valid_input(SkScalar baseX, SkScalar baseY, int numOctaves, const SkISize* tileSize,
+ SkScalar seed) {
+ if (!(baseX >= 0 && baseY >= 0)) {
+ return false;
+ }
+ if (!(numOctaves >= 0 && numOctaves <= SkPerlinNoiseShaderImpl::kMaxOctaves)) {
+ return false;
+ }
+ if (tileSize && !(tileSize->width() >= 0 && tileSize->height() >= 0)) {
+ return false;
+ }
+ if (!SkScalarIsFinite(seed)) {
+ return false;
+ }
+ return true;
+}
+
+sk_sp<SkShader> SkPerlinNoiseShader::MakeFractalNoise(SkScalar baseFrequencyX,
+ SkScalar baseFrequencyY,
+ int numOctaves, SkScalar seed,
+ const SkISize* tileSize) {
+ if (!valid_input(baseFrequencyX, baseFrequencyY, numOctaves, tileSize, seed)) {
+ return nullptr;
+ }
+
+ if (0 == numOctaves) {
+ // For kFractalNoise, w/o any octaves, the entire shader collapses to:
+ // [0,0,0,0] * 0.5 + 0.5
+ constexpr SkColor4f kTransparentGray = {0.5f, 0.5f, 0.5f, 0.5f};
+
+ return SkShaders::Color(kTransparentGray, /* colorSpace= */ nullptr);
+ }
+
+ return sk_sp<SkShader>(new SkPerlinNoiseShaderImpl(SkPerlinNoiseShaderImpl::kFractalNoise_Type,
+ baseFrequencyX, baseFrequencyY, numOctaves,
+ seed, tileSize));
+}
+
+sk_sp<SkShader> SkPerlinNoiseShader::MakeTurbulence(SkScalar baseFrequencyX,
+ SkScalar baseFrequencyY,
+ int numOctaves, SkScalar seed,
+ const SkISize* tileSize) {
+ if (!valid_input(baseFrequencyX, baseFrequencyY, numOctaves, tileSize, seed)) {
+ return nullptr;
+ }
+
+ if (0 == numOctaves) {
+ // For kTurbulence, w/o any octaves, the entire shader collapses to: [0,0,0,0]
+ return SkShaders::Color(SkColors::kTransparent, /* colorSpace= */ nullptr);
+ }
+
+ return sk_sp<SkShader>(new SkPerlinNoiseShaderImpl(SkPerlinNoiseShaderImpl::kTurbulence_Type,
+ baseFrequencyX, baseFrequencyY, numOctaves,
+ seed, tileSize));
+}
+
+void SkPerlinNoiseShader::RegisterFlattenables() {
+ SK_REGISTER_FLATTENABLE(SkPerlinNoiseShaderImpl);
+}
diff --git a/gfx/skia/skia/src/shaders/SkPictureShader.cpp b/gfx/skia/skia/src/shaders/SkPictureShader.cpp
new file mode 100644
index 0000000000..b65b1bca56
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkPictureShader.cpp
@@ -0,0 +1,501 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/shaders/SkPictureShader.h"
+
+#include "include/core/SkBitmap.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkImage.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkImageInfoPriv.h"
+#include "src/core/SkImagePriv.h"
+#include "src/core/SkMatrixPriv.h"
+#include "src/core/SkMatrixProvider.h"
+#include "src/core/SkMatrixUtils.h"
+#include "src/core/SkPicturePriv.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkResourceCache.h"
+#include "src/core/SkVM.h"
+#include "src/shaders/SkBitmapProcShader.h"
+#include "src/shaders/SkImageShader.h"
+#include "src/shaders/SkLocalMatrixShader.h"
+
+#if defined(SK_GANESH)
+#include "include/gpu/GrDirectContext.h"
+#include "include/gpu/GrRecordingContext.h"
+#include "src/gpu/ganesh/GrCaps.h"
+#include "src/gpu/ganesh/GrColorInfo.h"
+#include "src/gpu/ganesh/GrFPArgs.h"
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/SkGr.h"
+#include "src/gpu/ganesh/effects/GrTextureEffect.h"
+#include "src/image/SkImage_Base.h"
+#include "src/shaders/SkLocalMatrixShader.h"
+#endif
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/Caps.h"
+#include "src/gpu/graphite/KeyContext.h"
+#include "src/gpu/graphite/KeyHelpers.h"
+#include "src/gpu/graphite/PaintParamsKey.h"
+#include "src/gpu/graphite/RecorderPriv.h"
+#endif
+
+sk_sp<SkShader> SkPicture::makeShader(SkTileMode tmx, SkTileMode tmy, SkFilterMode filter,
+ const SkMatrix* localMatrix, const SkRect* tile) const {
+ if (localMatrix && !localMatrix->invert(nullptr)) {
+ return nullptr;
+ }
+ return SkPictureShader::Make(sk_ref_sp(this), tmx, tmy, filter, localMatrix, tile);
+}
+
+namespace {
+static unsigned gImageFromPictureKeyNamespaceLabel;
+
+struct ImageFromPictureKey : public SkResourceCache::Key {
+public:
+ ImageFromPictureKey(SkColorSpace* colorSpace, SkColorType colorType,
+ uint32_t pictureID, const SkRect& subset,
+ SkSize scale, const SkSurfaceProps& surfaceProps)
+ : fColorSpaceXYZHash(colorSpace->toXYZD50Hash())
+ , fColorSpaceTransferFnHash(colorSpace->transferFnHash())
+ , fColorType(static_cast<uint32_t>(colorType))
+ , fSubset(subset)
+ , fScale(scale)
+ , fSurfaceProps(surfaceProps)
+ {
+ static const size_t keySize = sizeof(fColorSpaceXYZHash) +
+ sizeof(fColorSpaceTransferFnHash) +
+ sizeof(fColorType) +
+ sizeof(fSubset) +
+ sizeof(fScale) +
+ sizeof(fSurfaceProps);
+ // This better be packed.
+ SkASSERT(sizeof(uint32_t) * (&fEndOfStruct - &fColorSpaceXYZHash) == keySize);
+ this->init(&gImageFromPictureKeyNamespaceLabel,
+ SkPicturePriv::MakeSharedID(pictureID),
+ keySize);
+ }
+
+private:
+ uint32_t fColorSpaceXYZHash;
+ uint32_t fColorSpaceTransferFnHash;
+ uint32_t fColorType;
+ SkRect fSubset;
+ SkSize fScale;
+ SkSurfaceProps fSurfaceProps;
+
+ SkDEBUGCODE(uint32_t fEndOfStruct;)
+};
+
+struct ImageFromPictureRec : public SkResourceCache::Rec {
+ ImageFromPictureRec(const ImageFromPictureKey& key, sk_sp<SkImage> image)
+ : fKey(key)
+ , fImage(std::move(image)) {}
+
+ ImageFromPictureKey fKey;
+ sk_sp<SkImage> fImage;
+
+ const Key& getKey() const override { return fKey; }
+ size_t bytesUsed() const override {
+ // Just the record overhead -- the actual pixels are accounted by SkImage_Lazy.
+ return sizeof(fKey) + (size_t)fImage->width() * fImage->height() * 4;
+ }
+ const char* getCategory() const override { return "bitmap-shader"; }
+ SkDiscardableMemory* diagnostic_only_getDiscardable() const override { return nullptr; }
+
+ static bool Visitor(const SkResourceCache::Rec& baseRec, void* contextShader) {
+ const ImageFromPictureRec& rec = static_cast<const ImageFromPictureRec&>(baseRec);
+ sk_sp<SkImage>* result = reinterpret_cast<sk_sp<SkImage>*>(contextShader);
+
+ *result = rec.fImage;
+ return true;
+ }
+};
+
+} // namespace
+
+SkPictureShader::SkPictureShader(sk_sp<SkPicture> picture,
+ SkTileMode tmx,
+ SkTileMode tmy,
+ SkFilterMode filter,
+ const SkRect* tile)
+ : fPicture(std::move(picture))
+ , fTile(tile ? *tile : fPicture->cullRect())
+ , fTmx(tmx)
+ , fTmy(tmy)
+ , fFilter(filter) {}
+
+sk_sp<SkShader> SkPictureShader::Make(sk_sp<SkPicture> picture, SkTileMode tmx, SkTileMode tmy,
+ SkFilterMode filter, const SkMatrix* lm, const SkRect* tile) {
+ if (!picture || picture->cullRect().isEmpty() || (tile && tile->isEmpty())) {
+ return SkShaders::Empty();
+ }
+ return SkLocalMatrixShader::MakeWrapped<SkPictureShader>(lm,
+ std::move(picture),
+ tmx, tmy,
+ filter,
+ tile);
+}
+
+sk_sp<SkFlattenable> SkPictureShader::CreateProc(SkReadBuffer& buffer) {
+ SkMatrix lm;
+ if (buffer.isVersionLT(SkPicturePriv::Version::kNoShaderLocalMatrix)) {
+ buffer.readMatrix(&lm);
+ }
+ auto tmx = buffer.read32LE(SkTileMode::kLastTileMode);
+ auto tmy = buffer.read32LE(SkTileMode::kLastTileMode);
+ SkRect tile = buffer.readRect();
+
+ sk_sp<SkPicture> picture;
+
+ SkFilterMode filter = SkFilterMode::kNearest;
+ if (buffer.isVersionLT(SkPicturePriv::kNoFilterQualityShaders_Version)) {
+ if (buffer.isVersionLT(SkPicturePriv::kPictureShaderFilterParam_Version)) {
+ bool didSerialize = buffer.readBool();
+ if (didSerialize) {
+ picture = SkPicturePriv::MakeFromBuffer(buffer);
+ }
+ } else {
+ unsigned legacyFilter = buffer.read32();
+ if (legacyFilter <= (unsigned)SkFilterMode::kLast) {
+ filter = (SkFilterMode)legacyFilter;
+ }
+ picture = SkPicturePriv::MakeFromBuffer(buffer);
+ }
+ } else {
+ filter = buffer.read32LE(SkFilterMode::kLast);
+ picture = SkPicturePriv::MakeFromBuffer(buffer);
+ }
+ return SkPictureShader::Make(picture, tmx, tmy, filter, &lm, &tile);
+}
+
+void SkPictureShader::flatten(SkWriteBuffer& buffer) const {
+ buffer.write32((unsigned)fTmx);
+ buffer.write32((unsigned)fTmy);
+ buffer.writeRect(fTile);
+ buffer.write32((unsigned)fFilter);
+ SkPicturePriv::Flatten(fPicture, buffer);
+}
+
+static sk_sp<SkColorSpace> ref_or_srgb(SkColorSpace* cs) {
+ return cs ? sk_ref_sp(cs) : SkColorSpace::MakeSRGB();
+}
+
+struct CachedImageInfo {
+ bool success;
+ SkSize tileScale; // Additional scale factors to apply when sampling image.
+ SkMatrix matrixForDraw; // Matrix used to produce an image from the picture
+ SkImageInfo imageInfo;
+ SkSurfaceProps props;
+
+ static CachedImageInfo Make(const SkRect& bounds,
+ const SkMatrix& totalM,
+ SkColorType dstColorType,
+ SkColorSpace* dstColorSpace,
+ const int maxTextureSize,
+ const SkSurfaceProps& propsIn) {
+ SkSurfaceProps props = propsIn.cloneWithPixelGeometry(kUnknown_SkPixelGeometry);
+
+ const SkSize scaledSize = [&]() {
+ SkSize size;
+ // Use a rotation-invariant scale
+ if (!totalM.decomposeScale(&size, nullptr)) {
+ SkPoint center = {bounds.centerX(), bounds.centerY()};
+ SkScalar area = SkMatrixPriv::DifferentialAreaScale(totalM, center);
+ if (!SkScalarIsFinite(area) || SkScalarNearlyZero(area)) {
+ size = {1, 1}; // ill-conditioned matrix
+ } else {
+ size.fWidth = size.fHeight = SkScalarSqrt(area);
+ }
+ }
+ size.fWidth *= bounds.width();
+ size.fHeight *= bounds.height();
+
+ // Clamp the tile size to about 4M pixels
+ static const SkScalar kMaxTileArea = 2048 * 2048;
+ SkScalar tileArea = size.width() * size.height();
+ if (tileArea > kMaxTileArea) {
+ SkScalar clampScale = SkScalarSqrt(kMaxTileArea / tileArea);
+ size.set(size.width() * clampScale, size.height() * clampScale);
+ }
+
+ // Scale down the tile size if larger than maxTextureSize for GPU path
+ // or it should fail on create texture
+ if (maxTextureSize) {
+ if (size.width() > maxTextureSize || size.height() > maxTextureSize) {
+ SkScalar downScale = maxTextureSize / std::max(size.width(),
+ size.height());
+ size.set(SkScalarFloorToScalar(size.width() * downScale),
+ SkScalarFloorToScalar(size.height() * downScale));
+ }
+ }
+ return size;
+ }();
+
+ const SkISize tileSize = scaledSize.toCeil();
+ if (tileSize.isEmpty()) {
+ return {false, {}, {}, {}, {}};
+ }
+
+ const SkSize tileScale = {
+ tileSize.width() / bounds.width(), tileSize.height() / bounds.height()
+ };
+ auto imgCS = ref_or_srgb(dstColorSpace);
+ const SkColorType imgCT = SkColorTypeMaxBitsPerChannel(dstColorType) <= 8
+ ? kRGBA_8888_SkColorType
+ : kRGBA_F16Norm_SkColorType;
+
+ return {true,
+ tileScale,
+ SkMatrix::RectToRect(bounds, SkRect::MakeIWH(tileSize.width(), tileSize.height())),
+ SkImageInfo::Make(tileSize, imgCT, kPremul_SkAlphaType, imgCS),
+ props};
+ }
+
+ sk_sp<SkImage> makeImage(sk_sp<SkSurface> surf, const SkPicture* pict) const {
+ if (!surf) {
+ return nullptr;
+ }
+ auto canvas = surf->getCanvas();
+ canvas->concat(matrixForDraw);
+ canvas->drawPicture(pict);
+ return surf->makeImageSnapshot();
+ }
+};
+
+// Returns a cached image shader, which wraps a single picture tile at the given
+// CTM/local matrix. Also adjusts the local matrix for tile scaling.
+sk_sp<SkShader> SkPictureShader::rasterShader(const SkMatrix& totalM,
+ SkColorType dstColorType,
+ SkColorSpace* dstColorSpace,
+ const SkSurfaceProps& propsIn) const {
+ const int maxTextureSize_NotUsedForCPU = 0;
+ CachedImageInfo info = CachedImageInfo::Make(fTile,
+ totalM,
+ dstColorType, dstColorSpace,
+ maxTextureSize_NotUsedForCPU,
+ propsIn);
+ if (!info.success) {
+ return nullptr;
+ }
+
+ ImageFromPictureKey key(info.imageInfo.colorSpace(), info.imageInfo.colorType(),
+ fPicture->uniqueID(), fTile, info.tileScale, info.props);
+
+ sk_sp<SkImage> image;
+ if (!SkResourceCache::Find(key, ImageFromPictureRec::Visitor, &image)) {
+ image = info.makeImage(SkSurface::MakeRaster(info.imageInfo, &info.props), fPicture.get());
+ if (!image) {
+ return nullptr;
+ }
+
+ SkResourceCache::Add(new ImageFromPictureRec(key, image));
+ SkPicturePriv::AddedToCache(fPicture.get());
+ }
+ // Scale the image to the original picture size.
+ auto lm = SkMatrix::Scale(1.f/info.tileScale.width(), 1.f/info.tileScale.height());
+ return image->makeShader(fTmx, fTmy, SkSamplingOptions(fFilter), &lm);
+}
+
+bool SkPictureShader::appendStages(const SkStageRec& rec, const MatrixRec& mRec) const {
+ // Keep bitmapShader alive by using alloc instead of stack memory
+ auto& bitmapShader = *rec.fAlloc->make<sk_sp<SkShader>>();
+ // We don't check whether the total local matrix is valid here because we have to assume *some*
+ // mapping to make an image. It could be wildly wrong if there is a runtime shader transforming
+ // the coordinates in a manner we don't know about here. However, that is a fundamental problem
+ // with the technique of converting a picture to an image to implement this shader.
+ bitmapShader = this->rasterShader(mRec.totalMatrix(),
+ rec.fDstColorType,
+ rec.fDstCS,
+ rec.fSurfaceProps);
+ if (!bitmapShader) {
+ return false;
+ }
+ return as_SB(bitmapShader)->appendStages(rec, mRec);
+}
+
+skvm::Color SkPictureShader::program(skvm::Builder* p,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color paint,
+ const MatrixRec& mRec,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const {
+ // TODO: We'll need additional plumbing to get the correct props from our callers.
+ SkSurfaceProps props{};
+
+ // Keep bitmapShader alive by using alloc instead of stack memory
+ auto& bitmapShader = *alloc->make<sk_sp<SkShader>>();
+ bitmapShader = this->rasterShader(mRec.totalMatrix(), dst.colorType(), dst.colorSpace(), props);
+ if (!bitmapShader) {
+ return {};
+ }
+
+ return as_SB(bitmapShader)->program(p, device, local, paint, mRec, dst, uniforms, alloc);
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+SkShaderBase::Context* SkPictureShader::onMakeContext(const ContextRec& rec, SkArenaAlloc* alloc)
+const {
+ const auto& vm = *rec.fMatrix;
+ const auto* lm = rec.fLocalMatrix;
+ const auto totalM = lm ? SkMatrix::Concat(vm, *lm) : vm;
+ sk_sp<SkShader> bitmapShader = this->rasterShader(totalM, rec.fDstColorType,
+ rec.fDstColorSpace, rec.fProps);
+ if (!bitmapShader) {
+ return nullptr;
+ }
+
+ return as_SB(bitmapShader)->makeContext(rec, alloc);
+}
+#endif
+
+/////////////////////////////////////////////////////////////////////////////////////////
+
+#if defined(SK_GANESH)
+
+#include "src/gpu/ganesh/GrProxyProvider.h"
+
+std::unique_ptr<GrFragmentProcessor> SkPictureShader::asFragmentProcessor(
+ const GrFPArgs& args, const MatrixRec& mRec) const {
+ auto ctx = args.fContext;
+ SkColorType dstColorType = GrColorTypeToSkColorType(args.fDstColorInfo->colorType());
+ if (dstColorType == kUnknown_SkColorType) {
+ dstColorType = kRGBA_8888_SkColorType;
+ }
+
+ auto dstCS = ref_or_srgb(args.fDstColorInfo->colorSpace());
+
+ auto info = CachedImageInfo::Make(fTile,
+ mRec.totalMatrix(),
+ dstColorType,
+ dstCS.get(),
+ ctx->priv().caps()->maxTextureSize(),
+ args.fSurfaceProps);
+ if (!info.success) {
+ return nullptr;
+ }
+
+ // Gotta be sure the GPU can support our requested colortype (might be FP16)
+ if (!ctx->colorTypeSupportedAsSurface(info.imageInfo.colorType())) {
+ info.imageInfo = info.imageInfo.makeColorType(kRGBA_8888_SkColorType);
+ }
+
+ static const skgpu::UniqueKey::Domain kDomain = skgpu::UniqueKey::GenerateDomain();
+ skgpu::UniqueKey key;
+ std::tuple keyData = {
+ dstCS->toXYZD50Hash(),
+ dstCS->transferFnHash(),
+ static_cast<uint32_t>(dstColorType),
+ fPicture->uniqueID(),
+ fTile,
+ info.tileScale,
+ info.props
+ };
+ skgpu::UniqueKey::Builder builder(&key, kDomain, sizeof(keyData)/sizeof(uint32_t),
+ "Picture Shader Image");
+ memcpy(&builder[0], &keyData, sizeof(keyData));
+ builder.finish();
+
+ GrProxyProvider* provider = ctx->priv().proxyProvider();
+ GrSurfaceProxyView view;
+ if (auto proxy = provider->findOrCreateProxyByUniqueKey(key)) {
+ view = GrSurfaceProxyView(proxy, kTopLeft_GrSurfaceOrigin, skgpu::Swizzle());
+ } else {
+ const int msaaSampleCount = 0;
+ const bool createWithMips = false;
+ auto image = info.makeImage(SkSurface::MakeRenderTarget(ctx,
+ skgpu::Budgeted::kYes,
+ info.imageInfo,
+ msaaSampleCount,
+ kTopLeft_GrSurfaceOrigin,
+ &info.props,
+ createWithMips),
+ fPicture.get());
+ if (!image) {
+ return nullptr;
+ }
+ auto [v, ct] = as_IB(image)->asView(ctx, GrMipmapped::kNo);
+ view = std::move(v);
+ provider->assignUniqueKeyToProxy(key, view.asTextureProxy());
+ }
+
+ const GrSamplerState sampler(static_cast<GrSamplerState::WrapMode>(fTmx),
+ static_cast<GrSamplerState::WrapMode>(fTmy),
+ fFilter);
+ auto fp = GrTextureEffect::Make(std::move(view),
+ kPremul_SkAlphaType,
+ SkMatrix::I(),
+ sampler,
+ *ctx->priv().caps());
+ SkMatrix scale = SkMatrix::Scale(info.tileScale.width(), info.tileScale.height());
+ bool success;
+ std::tie(success, fp) = mRec.apply(std::move(fp), scale);
+ return success ? std::move(fp) : nullptr;
+}
+#endif
+
+#if defined(SK_GRAPHITE)
+void SkPictureShader::addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const {
+
+ using namespace skgpu::graphite;
+
+ Recorder* recorder = keyContext.recorder();
+ const Caps* caps = recorder->priv().caps();
+
+ // TODO: We'll need additional plumbing to get the correct props from our callers. In
+ // particular we'll need to expand the keyContext to have the surfaceProps, the dstColorType
+ // and dstColorSpace.
+ SkSurfaceProps props{};
+
+ SkMatrix totalM = keyContext.local2Dev().asM33();
+ if (keyContext.localMatrix()) {
+ totalM.preConcat(*keyContext.localMatrix());
+ }
+ CachedImageInfo info = CachedImageInfo::Make(fTile,
+ totalM,
+ /* dstColorType= */ kRGBA_8888_SkColorType,
+ /* dstColorSpace= */ nullptr,
+ caps->maxTextureSize(),
+ props);
+ if (!info.success) {
+ SolidColorShaderBlock::BeginBlock(keyContext, builder, gatherer, {1, 0, 0, 1});
+ builder->endBlock();
+ return;
+ }
+
+ // TODO: right now we're explicitly not caching here. We could expand the ImageProvider
+ // API to include already Graphite-backed images, add a Recorder-local cache or add
+ // rendered-picture images to the global cache.
+ sk_sp<SkImage> img = info.makeImage(SkSurface::MakeGraphite(recorder, info.imageInfo,
+ skgpu::Mipmapped::kNo, &info.props),
+ fPicture.get());
+ if (!img) {
+ SolidColorShaderBlock::BeginBlock(keyContext, builder, gatherer, {1, 0, 0, 1});
+ builder->endBlock();
+ return;
+ }
+
+ const auto shaderLM = SkMatrix::Scale(1.f/info.tileScale.width(), 1.f/info.tileScale.height());
+ sk_sp<SkShader> shader = img->makeShader(fTmx, fTmy, SkSamplingOptions(fFilter), &shaderLM);
+ if (!shader) {
+ SolidColorShaderBlock::BeginBlock(keyContext, builder, gatherer, {1, 0, 0, 1});
+ builder->endBlock();
+ return;
+ }
+
+ as_SB(shader)->addToKey(keyContext, builder, gatherer);
+}
+#endif // SK_GRAPHITE
diff --git a/gfx/skia/skia/src/shaders/SkPictureShader.h b/gfx/skia/skia/src/shaders/SkPictureShader.h
new file mode 100644
index 0000000000..3f2942bc9a
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkPictureShader.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPictureShader_DEFINED
+#define SkPictureShader_DEFINED
+
+#include "include/core/SkTileMode.h"
+#include "src/shaders/SkShaderBase.h"
+#include <atomic>
+
+class SkArenaAlloc;
+class SkBitmap;
+class SkPicture;
+
+/*
+ * An SkPictureShader can be used to draw SkPicture-based patterns.
+ *
+ * The SkPicture is first rendered into a tile, which is then used to shade the area according
+ * to specified tiling rules.
+ */
+class SkPictureShader : public SkShaderBase {
+public:
+ static sk_sp<SkShader> Make(sk_sp<SkPicture>, SkTileMode, SkTileMode, SkFilterMode,
+ const SkMatrix*, const SkRect*);
+
+#if defined(SK_GANESH)
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&,
+ const MatrixRec&) const override;
+#endif
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext&,
+ skgpu::graphite::PaintParamsKeyBuilder*,
+ skgpu::graphite::PipelineDataGatherer*) const override;
+#endif
+
+ SkPictureShader(sk_sp<SkPicture>, SkTileMode, SkTileMode, SkFilterMode, const SkRect*);
+
+protected:
+ SkPictureShader(SkReadBuffer&);
+ void flatten(SkWriteBuffer&) const override;
+ bool appendStages(const SkStageRec&, const MatrixRec&) const override;
+ skvm::Color program(skvm::Builder*,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color paint,
+ const MatrixRec&,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const override;
+
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+ Context* onMakeContext(const ContextRec&, SkArenaAlloc*) const override;
+#endif
+
+private:
+ SK_FLATTENABLE_HOOKS(SkPictureShader)
+
+ sk_sp<SkShader> rasterShader(const SkMatrix&,
+ SkColorType dstColorType,
+ SkColorSpace* dstColorSpace,
+ const SkSurfaceProps& props) const;
+
+ sk_sp<SkPicture> fPicture;
+ SkRect fTile;
+ SkTileMode fTmx, fTmy;
+ SkFilterMode fFilter;
+
+ using INHERITED = SkShaderBase;
+};
+
+#endif // SkPictureShader_DEFINED
diff --git a/gfx/skia/skia/src/shaders/SkShader.cpp b/gfx/skia/skia/src/shaders/SkShader.cpp
new file mode 100644
index 0000000000..83eee34278
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkShader.cpp
@@ -0,0 +1,334 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkMallocPixelRef.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkScalar.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/base/SkTLazy.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkMatrixProvider.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/SkBitmapProcShader.h"
+#include "src/shaders/SkImageShader.h"
+#include "src/shaders/SkShaderBase.h"
+#include "src/shaders/SkTransformShader.h"
+
+#if defined(SK_GANESH)
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+#include "src/gpu/ganesh/effects/GrMatrixEffect.h"
+#endif
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/KeyHelpers.h"
+#include "src/gpu/graphite/PaintParamsKey.h"
+#endif
+
+SkShaderBase::SkShaderBase() = default;
+
+SkShaderBase::~SkShaderBase() = default;
+
+SkShaderBase::MatrixRec::MatrixRec(const SkMatrix& ctm) : fCTM(ctm) {}
+
+std::optional<SkShaderBase::MatrixRec>
+SkShaderBase::MatrixRec::apply(const SkStageRec& rec, const SkMatrix& postInv) const {
+ SkMatrix total = fPendingLocalMatrix;
+ if (!fCTMApplied) {
+ total = SkMatrix::Concat(fCTM, total);
+ }
+ if (!total.invert(&total)) {
+ return {};
+ }
+ total = SkMatrix::Concat(postInv, total);
+ if (!fCTMApplied) {
+ rec.fPipeline->append(SkRasterPipelineOp::seed_shader);
+ }
+ // append_matrix is a no-op if total worked out to identity.
+ rec.fPipeline->append_matrix(rec.fAlloc, total);
+ return MatrixRec{fCTM,
+ fTotalLocalMatrix,
+ /*pendingLocalMatrix=*/SkMatrix::I(),
+ fTotalMatrixIsValid,
+ /*ctmApplied=*/true};
+}
+
+std::optional<SkShaderBase::MatrixRec>
+SkShaderBase::MatrixRec::apply(skvm::Builder* p,
+ skvm::Coord* local,
+ skvm::Uniforms* uniforms,
+ const SkMatrix& postInv) const {
+ SkMatrix total = fPendingLocalMatrix;
+ if (!fCTMApplied) {
+ total = SkMatrix::Concat(fCTM, total);
+ }
+ if (!total.invert(&total)) {
+ return {};
+ }
+ total = SkMatrix::Concat(postInv, total);
+ // ApplyMatrix is a no-op if total worked out to identity.
+ *local = SkShaderBase::ApplyMatrix(p, total, *local, uniforms);
+ return MatrixRec{fCTM,
+ fTotalLocalMatrix,
+ /*pendingLocalMatrix=*/SkMatrix::I(),
+ fTotalMatrixIsValid,
+ /*ctmApplied=*/true};
+}
+
+#if defined(SK_GANESH)
+GrFPResult SkShaderBase::MatrixRec::apply(std::unique_ptr<GrFragmentProcessor> fp,
+ const SkMatrix& postInv) const {
+ // FP matrices work differently than SkRasterPipeline and SkVM. The starting coordinates
+ // provided to the root SkShader's FP are already in local space. So we never apply the inverse
+ // CTM.
+ SkASSERT(!fCTMApplied);
+ SkMatrix total;
+ if (!fPendingLocalMatrix.invert(&total)) {
+ return {false, std::move(fp)};
+ }
+ total = SkMatrix::Concat(postInv, total);
+ // GrMatrixEffect returns 'fp' if total worked out to identity.
+ return {true, GrMatrixEffect::Make(total, std::move(fp))};
+}
+
+SkShaderBase::MatrixRec SkShaderBase::MatrixRec::applied() const {
+ // We mark the CTM as "not applied" because we *never* apply the CTM for FPs. Their starting
+ // coords are local, not device, coords.
+ return MatrixRec{fCTM,
+ fTotalLocalMatrix,
+ /*pendingLocalMatrix=*/SkMatrix::I(),
+ fTotalMatrixIsValid,
+ /*ctmApplied=*/false};
+}
+#endif
+
+SkShaderBase::MatrixRec SkShaderBase::MatrixRec::concat(const SkMatrix& m) const {
+ return {fCTM,
+ SkShaderBase::ConcatLocalMatrices(fTotalLocalMatrix, m),
+ SkShaderBase::ConcatLocalMatrices(fPendingLocalMatrix, m),
+ fTotalMatrixIsValid,
+ fCTMApplied};
+}
+
+void SkShaderBase::flatten(SkWriteBuffer& buffer) const { this->INHERITED::flatten(buffer); }
+
+bool SkShaderBase::computeTotalInverse(const SkMatrix& ctm,
+ const SkMatrix* localMatrix,
+ SkMatrix* totalInverse) const {
+ return (localMatrix ? SkMatrix::Concat(ctm, *localMatrix) : ctm).invert(totalInverse);
+}
+
+bool SkShaderBase::asLuminanceColor(SkColor* colorPtr) const {
+ SkColor storage;
+ if (nullptr == colorPtr) {
+ colorPtr = &storage;
+ }
+ if (this->onAsLuminanceColor(colorPtr)) {
+ *colorPtr = SkColorSetA(*colorPtr, 0xFF); // we only return opaque
+ return true;
+ }
+ return false;
+}
+
+SkShaderBase::Context* SkShaderBase::makeContext(const ContextRec& rec, SkArenaAlloc* alloc) const {
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+ // We always fall back to raster pipeline when perspective is present.
+ if (rec.fMatrix->hasPerspective() || (rec.fLocalMatrix && rec.fLocalMatrix->hasPerspective()) ||
+ !this->computeTotalInverse(*rec.fMatrix, rec.fLocalMatrix, nullptr)) {
+ return nullptr;
+ }
+
+ return this->onMakeContext(rec, alloc);
+#else
+ return nullptr;
+#endif
+}
+
+SkShaderBase::Context::Context(const SkShaderBase& shader, const ContextRec& rec)
+ : fShader(shader), fCTM(*rec.fMatrix)
+{
+ // We should never use a context with perspective.
+ SkASSERT(!rec.fMatrix->hasPerspective());
+ SkASSERT(!rec.fLocalMatrix || !rec.fLocalMatrix->hasPerspective());
+
+ // Because the context parameters must be valid at this point, we know that the matrix is
+ // invertible.
+ SkAssertResult(fShader.computeTotalInverse(*rec.fMatrix, rec.fLocalMatrix, &fTotalInverse));
+
+ fPaintAlpha = rec.fPaintAlpha;
+}
+
+SkShaderBase::Context::~Context() {}
+
+bool SkShaderBase::ContextRec::isLegacyCompatible(SkColorSpace* shaderColorSpace) const {
+ // In legacy pipelines, shaders always produce premul (or opaque) and the destination is also
+ // always premul (or opaque). (And those "or opaque" caveats won't make any difference here.)
+ SkAlphaType shaderAT = kPremul_SkAlphaType,
+ dstAT = kPremul_SkAlphaType;
+ return 0 == SkColorSpaceXformSteps{shaderColorSpace, shaderAT,
+ fDstColorSpace, dstAT}.flags.mask();
+}
+
+SkImage* SkShader::isAImage(SkMatrix* localMatrix, SkTileMode xy[2]) const {
+ return as_SB(this)->onIsAImage(localMatrix, xy);
+}
+
+#if defined(SK_GANESH)
+std::unique_ptr<GrFragmentProcessor>
+SkShaderBase::asRootFragmentProcessor(const GrFPArgs& args, const SkMatrix& ctm) const {
+ return this->asFragmentProcessor(args, MatrixRec(ctm));
+}
+
+std::unique_ptr<GrFragmentProcessor> SkShaderBase::asFragmentProcessor(const GrFPArgs&,
+ const MatrixRec&) const {
+ return nullptr;
+}
+#endif
+
+sk_sp<SkShader> SkShaderBase::makeAsALocalMatrixShader(SkMatrix*) const {
+ return nullptr;
+}
+
+#if defined(SK_GRAPHITE)
+// TODO: add implementations for derived classes
+void SkShaderBase::addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const {
+ using namespace skgpu::graphite;
+
+ SolidColorShaderBlock::BeginBlock(keyContext, builder, gatherer, {1, 0, 0, 1});
+ builder->endBlock();
+}
+#endif
+
+bool SkShaderBase::appendRootStages(const SkStageRec& rec, const SkMatrix& ctm) const {
+ return this->appendStages(rec, MatrixRec(ctm));
+}
+
+bool SkShaderBase::appendStages(const SkStageRec& rec, const MatrixRec& mRec) const {
+ // SkShader::Context::shadeSpan() handles the paint opacity internally,
+ // but SkRasterPipelineBlitter applies it as a separate stage.
+ // We skip the internal shadeSpan() step by forcing the paint opaque.
+ SkColor4f opaquePaintColor = rec.fPaintColor.makeOpaque();
+
+ // We don't have a separate ctm and local matrix at this point. Just pass the combined matrix
+ // as the CTM. TODO: thread the MatrixRec through the legacy context system.
+ auto tm = mRec.totalMatrix();
+ ContextRec cr(opaquePaintColor,
+ tm,
+ nullptr,
+ rec.fDstColorType,
+ sk_srgb_singleton(),
+ rec.fSurfaceProps);
+
+ struct CallbackCtx : SkRasterPipeline_CallbackCtx {
+ sk_sp<const SkShader> shader;
+ Context* ctx;
+ };
+ auto cb = rec.fAlloc->make<CallbackCtx>();
+ cb->shader = sk_ref_sp(this);
+ cb->ctx = as_SB(this)->makeContext(cr, rec.fAlloc);
+ cb->fn = [](SkRasterPipeline_CallbackCtx* self, int active_pixels) {
+ auto c = (CallbackCtx*)self;
+ int x = (int)c->rgba[0],
+ y = (int)c->rgba[1];
+ SkPMColor tmp[SkRasterPipeline_kMaxStride_highp];
+ c->ctx->shadeSpan(x,y, tmp, active_pixels);
+
+ for (int i = 0; i < active_pixels; i++) {
+ auto rgba_4f = SkPMColor4f::FromPMColor(tmp[i]);
+ memcpy(c->rgba + 4*i, rgba_4f.vec(), 4*sizeof(float));
+ }
+ };
+
+ if (cb->ctx) {
+ rec.fPipeline->append(SkRasterPipelineOp::seed_shader);
+ rec.fPipeline->append(SkRasterPipelineOp::callback, cb);
+ rec.fAlloc->make<SkColorSpaceXformSteps>(sk_srgb_singleton(), kPremul_SkAlphaType,
+ rec.fDstCS, kPremul_SkAlphaType)
+ ->apply(rec.fPipeline);
+ return true;
+ }
+ return false;
+}
+
+skvm::Color SkShaderBase::rootProgram(skvm::Builder* p,
+ skvm::Coord device,
+ skvm::Color paint,
+ const SkMatrix& ctm,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const {
+ // Shader subclasses should always act as if the destination were premul or opaque.
+ // SkVMBlitter handles all the coordination of unpremul itself, via premul.
+ SkColorInfo tweaked = dst.alphaType() == kUnpremul_SkAlphaType
+ ? dst.makeAlphaType(kPremul_SkAlphaType)
+ : dst;
+
+ // Force opaque alpha for all opaque shaders.
+ //
+ // This is primarily nice in that we usually have a 1.0f constant splat
+ // somewhere in the program anyway, and this will let us drop the work the
+ // shader notionally does to produce alpha, p->extract(...), etc. in favor
+ // of that simple hoistable splat.
+ //
+ // More subtly, it makes isOpaque() a parameter to all shader program
+ // generation, guaranteeing that is-opaque bit is mixed into the overall
+ // shader program hash and blitter Key. This makes it safe for us to use
+ // that bit to make decisions when constructing an SkVMBlitter, like doing
+ // SrcOver -> Src strength reduction.
+ if (auto color = this->program(p,
+ device,
+ /*local=*/device,
+ paint,
+ MatrixRec(ctm),
+ tweaked,
+ uniforms,
+ alloc)) {
+ if (this->isOpaque()) {
+ color.a = p->splat(1.0f);
+ }
+ return color;
+ }
+ return {};
+}
+
+// need a cheap way to invert the alpha channel of a shader (i.e. 1 - a)
+sk_sp<SkShader> SkShaderBase::makeInvertAlpha() const {
+ return this->makeWithColorFilter(SkColorFilters::Blend(0xFFFFFFFF, SkBlendMode::kSrcOut));
+}
+
+
+skvm::Coord SkShaderBase::ApplyMatrix(skvm::Builder* p, const SkMatrix& m,
+ skvm::Coord coord, skvm::Uniforms* uniforms) {
+ skvm::F32 x = coord.x,
+ y = coord.y;
+ if (m.isIdentity()) {
+ // That was easy.
+ } else if (m.isTranslate()) {
+ x = p->add(x, p->uniformF(uniforms->pushF(m[2])));
+ y = p->add(y, p->uniformF(uniforms->pushF(m[5])));
+ } else if (m.isScaleTranslate()) {
+ x = p->mad(x, p->uniformF(uniforms->pushF(m[0])), p->uniformF(uniforms->pushF(m[2])));
+ y = p->mad(y, p->uniformF(uniforms->pushF(m[4])), p->uniformF(uniforms->pushF(m[5])));
+ } else { // Affine or perspective.
+ auto dot = [&,x,y](int row) {
+ return p->mad(x, p->uniformF(uniforms->pushF(m[3*row+0])),
+ p->mad(y, p->uniformF(uniforms->pushF(m[3*row+1])),
+ p->uniformF(uniforms->pushF(m[3*row+2]))));
+ };
+ x = dot(0);
+ y = dot(1);
+ if (m.hasPerspective()) {
+ x = x * (1.0f / dot(2));
+ y = y * (1.0f / dot(2));
+ }
+ }
+ return {x,y};
+}
diff --git a/gfx/skia/skia/src/shaders/SkShaderBase.h b/gfx/skia/skia/src/shaders/SkShaderBase.h
new file mode 100644
index 0000000000..d6348c2859
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkShaderBase.h
@@ -0,0 +1,494 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkShaderBase_DEFINED
+#define SkShaderBase_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkSurfaceProps.h"
+#include "include/private/base/SkNoncopyable.h"
+#include "src/base/SkTLazy.h"
+#include "src/core/SkEffectPriv.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkVM_fwd.h"
+
+#include <tuple>
+
+class GrFragmentProcessor;
+struct GrFPArgs;
+class SkArenaAlloc;
+class SkColorSpace;
+class SkImage;
+struct SkImageInfo;
+class SkPaint;
+class SkRasterPipeline;
+class SkRuntimeEffect;
+class SkStageUpdater;
+class SkUpdatableShader;
+
+namespace skgpu::graphite {
+class KeyContext;
+class PaintParamsKeyBuilder;
+class PipelineDataGatherer;
+}
+
+#if defined(SK_GANESH)
+using GrFPResult = std::tuple<bool /*success*/, std::unique_ptr<GrFragmentProcessor>>;
+#endif
+
+class SkShaderBase : public SkShader {
+public:
+ ~SkShaderBase() override;
+
+ sk_sp<SkShader> makeInvertAlpha() const;
+ sk_sp<SkShader> makeWithCTM(const SkMatrix&) const; // owns its own ctm
+
+ /**
+ * Returns true if the shader is guaranteed to produce only a single color.
+ * Subclasses can override this to allow loop-hoisting optimization.
+ */
+ virtual bool isConstant() const { return false; }
+
+ enum class GradientType {
+ kNone,
+ kColor,
+ kLinear,
+ kRadial,
+ kSweep,
+ kConical
+ };
+
+ /**
+ * If the shader subclass can be represented as a gradient, asGradient
+ * returns the matching GradientType enum (or GradientType::kNone if it
+ * cannot). Also, if info is not null, asGradient populates info with
+ * the relevant (see below) parameters for the gradient. fColorCount
+ * is both an input and output parameter. On input, it indicates how
+ * many entries in fColors and fColorOffsets can be used, if they are
+ * non-NULL. After asGradient has run, fColorCount indicates how
+ * many color-offset pairs there are in the gradient. If there is
+ * insufficient space to store all of the color-offset pairs, fColors
+ * and fColorOffsets will not be altered. fColorOffsets specifies
+ * where on the range of 0 to 1 to transition to the given color.
+ * The meaning of fPoint and fRadius is dependent on the type of gradient.
+ *
+ * None:
+ * info is ignored.
+ * Color:
+ * fColorOffsets[0] is meaningless.
+ * Linear:
+ * fPoint[0] and fPoint[1] are the end-points of the gradient
+ * Radial:
+ * fPoint[0] and fRadius[0] are the center and radius
+ * Conical:
+ * fPoint[0] and fRadius[0] are the center and radius of the 1st circle
+ * fPoint[1] and fRadius[1] are the center and radius of the 2nd circle
+ * Sweep:
+ * fPoint[0] is the center of the sweep.
+ */
+ struct GradientInfo {
+ int fColorCount = 0; //!< In-out parameter, specifies passed size
+ // of fColors/fColorOffsets on input, and
+ // actual number of colors/offsets on
+ // output.
+ SkColor* fColors = nullptr; //!< The colors in the gradient.
+ SkScalar* fColorOffsets = nullptr; //!< The unit offset for color transitions.
+ SkPoint fPoint[2]; //!< Type specific, see above.
+ SkScalar fRadius[2]; //!< Type specific, see above.
+ SkTileMode fTileMode;
+ uint32_t fGradientFlags = 0; //!< see SkGradientShader::Flags
+ };
+
+ virtual GradientType asGradient(GradientInfo* info = nullptr,
+ SkMatrix* localMatrix = nullptr) const {
+ return GradientType::kNone;
+ }
+
+ enum Flags {
+ //!< set if all of the colors will be opaque
+ kOpaqueAlpha_Flag = 1 << 0,
+
+ /** set if the spans only vary in X (const in Y).
+ e.g. an Nx1 bitmap that is being tiled in Y, or a linear-gradient
+ that varies from left-to-right. This flag specifies this for
+ shadeSpan().
+ */
+ kConstInY32_Flag = 1 << 1,
+
+ /** hint for the blitter that 4f is the preferred shading mode.
+ */
+ kPrefers4f_Flag = 1 << 2,
+ };
+
+ /**
+ * ContextRec acts as a parameter bundle for creating Contexts.
+ */
+ struct ContextRec {
+ ContextRec(const SkColor4f& paintColor, const SkMatrix& matrix, const SkMatrix* localM,
+ SkColorType dstColorType, SkColorSpace* dstColorSpace, SkSurfaceProps props)
+ : fMatrix(&matrix)
+ , fLocalMatrix(localM)
+ , fDstColorType(dstColorType)
+ , fDstColorSpace(dstColorSpace)
+ , fProps(props) {
+ fPaintAlpha = SkColorGetA(paintColor.toSkColor());
+ }
+
+ const SkMatrix* fMatrix; // the current matrix in the canvas
+ const SkMatrix* fLocalMatrix; // optional local matrix
+ SkColorType fDstColorType; // the color type of the dest surface
+ SkColorSpace* fDstColorSpace; // the color space of the dest surface (if any)
+ SkSurfaceProps fProps; // props of the dest surface
+ SkAlpha fPaintAlpha;
+
+ bool isLegacyCompatible(SkColorSpace* shadersColorSpace) const;
+ };
+
+ class Context : public ::SkNoncopyable {
+ public:
+ Context(const SkShaderBase& shader, const ContextRec&);
+
+ virtual ~Context();
+
+ /**
+ * Called sometimes before drawing with this shader. Return the type of
+ * alpha your shader will return. The default implementation returns 0.
+ * Your subclass should override if it can (even sometimes) report a
+ * non-zero value, since that will enable various blitters to perform
+ * faster.
+ */
+ virtual uint32_t getFlags() const { return 0; }
+
+ /**
+ * Called for each span of the object being drawn. Your subclass should
+ * set the appropriate colors (with premultiplied alpha) that correspond
+ * to the specified device coordinates.
+ */
+ virtual void shadeSpan(int x, int y, SkPMColor[], int count) = 0;
+
+ protected:
+ // Reference to shader, so we don't have to dupe information.
+ const SkShaderBase& fShader;
+
+ uint8_t getPaintAlpha() const { return fPaintAlpha; }
+ const SkMatrix& getTotalInverse() const { return fTotalInverse; }
+ const SkMatrix& getCTM() const { return fCTM; }
+
+ private:
+ SkMatrix fCTM;
+ SkMatrix fTotalInverse;
+ uint8_t fPaintAlpha;
+
+ using INHERITED = SkNoncopyable;
+ };
+
+ /**
+ * This is used to accumulate matrices, starting with the CTM, when building up
+ * SkRasterPipeline, SkVM, and GrFragmentProcessor by walking the SkShader tree. It avoids
+ * adding a matrix multiply for each individual matrix. It also handles the reverse matrix
+ * concatenation order required by Android Framework, see b/256873449.
+ *
+ * This also tracks the dubious concept of a "total matrix", which includes all the matrices
+ * encountered during traversal to the current shader, including ones that have already been
+ * applied. The total matrix represents the transformation from the current shader's coordinate
+ * space to device space. It is dubious because it doesn't account for SkShaders that manipulate
+ * the coordinates passed to their children, which may not even be representable by a matrix.
+ *
+ * The total matrix is used for mipmap level selection and a filter downgrade optimizations in
+ * SkImageShader and sizing of the SkImage created by SkPictureShader. If we can remove usages
+ * of the "total matrix" and if Android Framework could be updated to not use backwards local
+ * matrix concatenation this could just be replaced by a simple SkMatrix or SkM44 passed down
+ * during traversal.
+ */
+ class MatrixRec {
+ public:
+ MatrixRec() = default;
+
+ explicit MatrixRec(const SkMatrix& ctm);
+
+ /**
+ * Returns a new MatrixRec that represents the existing total and pending matrix
+ * pre-concat'ed with m.
+ */
+ MatrixRec SK_WARN_UNUSED_RESULT concat(const SkMatrix& m) const;
+
+ /**
+ * Appends a mul by the inverse of the pending local matrix to the pipeline. 'postInv' is an
+ * additional matrix to post-apply to the inverted pending matrix. If the pending matrix is
+ * not invertible the std::optional result won't have a value and the pipeline will be
+ * unmodified.
+ */
+ std::optional<MatrixRec> SK_WARN_UNUSED_RESULT apply(const SkStageRec& rec,
+ const SkMatrix& postInv = {}) const;
+
+ /**
+ * Muls local by the inverse of the pending matrix. 'postInv' is an additional matrix to
+ * post-apply to the inverted pending matrix. If the pending matrix is not invertible the
+ * std::optional result won't have a value and the Builder will be unmodified.
+ */
+ std::optional<MatrixRec> SK_WARN_UNUSED_RESULT apply(skvm::Builder*,
+ skvm::Coord* local, // inout
+ skvm::Uniforms*,
+ const SkMatrix& postInv = {}) const;
+
+#if defined(SK_GANESH)
+ /**
+ * Produces an FP that muls its input coords by the inverse of the pending matrix and then
+ * samples the passed FP with those coordinates. 'postInv' is an additional matrix to
+ * post-apply to the inverted pending matrix. If the pending matrix is not invertible the
+ * GrFPResult's bool will be false and the passed FP will be returned to the caller in the
+ * GrFPResult.
+ */
+ GrFPResult SK_WARN_UNUSED_RESULT apply(std::unique_ptr<GrFragmentProcessor>,
+ const SkMatrix& postInv = {}) const;
+ /**
+ * A parent FP may need to create a FP for its child by calling
+ * SkShaderBase::asFragmentProcessor() and then pass the result to the apply() above.
+ * This comes up when the parent needs to ensure pending matrices are applied before the
+ * child because the parent is going to manipulate the coordinates *after* any pending
+ * matrix and pass the resulting coords to the child. This function gets a MatrixRec that
+ * reflects the state after this MatrixRec has bee applied but it does not apply it!
+ * Example:
+ * auto childFP = fChild->asFragmentProcessor(args, mrec.applied());
+ * childFP = MakeAWrappingFPThatModifiesChildsCoords(std::move(childFP));
+ * auto [success, parentFP] = mrec.apply(std::move(childFP));
+ */
+ MatrixRec applied() const;
+#endif
+
+ /** Call to indicate that the mapping from shader to device space is not known. */
+ void markTotalMatrixInvalid() { fTotalMatrixIsValid = false; }
+
+ /** Marks the CTM as already applied; can avoid re-seeding the shader unnecessarily. */
+ void markCTMApplied() { fCTMApplied = true; }
+
+ /**
+ * Indicates whether the total matrix of a MatrixRec passed to a SkShader actually
+ * represents the full transform between that shader's coordinate space and device space.
+ */
+ bool totalMatrixIsValid() const { return fTotalMatrixIsValid; }
+
+ /**
+ * Gets the total transform from the current shader's space to device space. This may or
+ * may not be valid. Shaders should avoid making decisions based on this matrix if
+ * totalMatrixIsValid() is false.
+ */
+ SkMatrix totalMatrix() const { return SkMatrix::Concat(fCTM, fTotalLocalMatrix); }
+
+ /** Gets the inverse of totalMatrix(), if invertible. */
+ bool SK_WARN_UNUSED_RESULT totalInverse(SkMatrix* out) const {
+ return this->totalMatrix().invert(out);
+ }
+
+ /** Is there a transform that has not yet been applied by a parent shader? */
+ bool hasPendingMatrix() const {
+ return (!fCTMApplied && !fCTM.isIdentity()) || !fPendingLocalMatrix.isIdentity();
+ }
+
+ /** When generating raster pipeline, have the device coordinates been seeded? */
+ bool rasterPipelineCoordsAreSeeded() const { return fCTMApplied; }
+
+ private:
+ MatrixRec(const SkMatrix& ctm,
+ const SkMatrix& totalLocalMatrix,
+ const SkMatrix& pendingLocalMatrix,
+ bool totalIsValid,
+ bool ctmApplied)
+ : fCTM(ctm)
+ , fTotalLocalMatrix(totalLocalMatrix)
+ , fPendingLocalMatrix(pendingLocalMatrix)
+ , fTotalMatrixIsValid(totalIsValid)
+ , fCTMApplied(ctmApplied) {}
+
+ const SkMatrix fCTM;
+
+ // Concatenation of all local matrices, including those already applied.
+ const SkMatrix fTotalLocalMatrix;
+
+ // The accumulated local matrices from walking down the shader hierarchy that have NOT yet
+ // been incorporated into the SkRasterPipeline.
+ const SkMatrix fPendingLocalMatrix;
+
+ bool fTotalMatrixIsValid = true;
+
+ // Tracks whether the CTM has already been applied (and in raster pipeline whether the
+ // device coords have been seeded.)
+ bool fCTMApplied = false;
+ };
+
+ /**
+ * Make a context using the memory provided by the arena.
+ *
+ * @return pointer to context or nullptr if can't be created
+ */
+ Context* makeContext(const ContextRec&, SkArenaAlloc*) const;
+
+#if defined(SK_GANESH)
+ /**
+ * Call on the root SkShader to produce a GrFragmentProcessor.
+ *
+ * The returned GrFragmentProcessor expects an unpremultiplied input color and produces a
+ * premultiplied output.
+ */
+ std::unique_ptr<GrFragmentProcessor> asRootFragmentProcessor(const GrFPArgs&,
+ const SkMatrix& ctm) const;
+ /**
+ * Virtualized implementation of above. Any pending matrix in the MatrixRec should be applied
+ * to the coords if the SkShader uses its coordinates. This can be done by calling
+ * MatrixRec::apply() to wrap a GrFragmentProcessor in a GrMatrixEffect.
+ */
+ virtual std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&,
+ const MatrixRec&) const;
+#endif
+
+ /**
+ * If the shader can represent its "average" luminance in a single color, return true and
+ * if color is not NULL, return that color. If it cannot, return false and ignore the color
+ * parameter.
+ *
+ * Note: if this returns true, the returned color will always be opaque, as only the RGB
+ * components are used to compute luminance.
+ */
+ bool asLuminanceColor(SkColor*) const;
+
+ /**
+ * If this returns false, then we draw nothing (do not fall back to shader context). This should
+ * only be called on a root-level effect. It assumes that the initial device coordinates have
+ * not yet been seeded.
+ */
+ SK_WARN_UNUSED_RESULT
+ bool appendRootStages(const SkStageRec& rec, const SkMatrix& ctm) const;
+
+ /**
+ * Adds stages to implement this shader. To ensure that the correct input coords are present
+ * in r,g MatrixRec::apply() must be called (unless the shader doesn't require it's input
+ * coords). The default impl creates shadercontext and calls that (not very efficient).
+ */
+ virtual bool appendStages(const SkStageRec&, const MatrixRec&) const;
+
+ bool SK_WARN_UNUSED_RESULT computeTotalInverse(const SkMatrix& ctm,
+ const SkMatrix* localMatrix,
+ SkMatrix* totalInverse) const;
+
+ virtual SkImage* onIsAImage(SkMatrix*, SkTileMode[2]) const {
+ return nullptr;
+ }
+
+ virtual SkRuntimeEffect* asRuntimeEffect() const { return nullptr; }
+
+ static Type GetFlattenableType() { return kSkShader_Type; }
+ Type getFlattenableType() const override { return GetFlattenableType(); }
+
+ static sk_sp<SkShaderBase> Deserialize(const void* data, size_t size,
+ const SkDeserialProcs* procs = nullptr) {
+ return sk_sp<SkShaderBase>(static_cast<SkShaderBase*>(
+ SkFlattenable::Deserialize(GetFlattenableType(), data, size, procs).release()));
+ }
+ static void RegisterFlattenables();
+
+ /** DEPRECATED. skbug.com/8941
+ * If this shader can be represented by another shader + a localMatrix, return that shader and
+ * the localMatrix. If not, return nullptr and ignore the localMatrix parameter.
+ */
+ virtual sk_sp<SkShader> makeAsALocalMatrixShader(SkMatrix* localMatrix) const;
+
+ /**
+ * Called at the root of a shader tree to build a VM that produces color. The device coords
+ * should be initialized to the centers of device space pixels being shaded and the inverse of
+ * ctm should be the transform of those coords to local space.
+ */
+ SK_WARN_UNUSED_RESULT
+ skvm::Color rootProgram(skvm::Builder*,
+ skvm::Coord device,
+ skvm::Color paint,
+ const SkMatrix& ctm,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const;
+
+ /**
+ * Virtualized implementation of above. A note on the local coords param: it must be transformed
+ * by the inverse of the "pending" matrix in MatrixRec to be put in the correct space for this
+ * shader. This is done by calling MatrixRec::apply().
+ */
+ virtual skvm::Color program(skvm::Builder*,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color paint,
+ const MatrixRec&,
+ const SkColorInfo& dst,
+ skvm::Uniforms*,
+ SkArenaAlloc*) const = 0;
+
+#if defined(SK_GRAPHITE)
+ /**
+ Add implementation details, for the specified backend, of this SkShader to the
+ provided key.
+
+ @param keyContext backend context for key creation
+ @param builder builder for creating the key for this SkShader
+ @param gatherer if non-null, storage for this shader's data
+ */
+ virtual void addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const;
+#endif
+
+ static SkMatrix ConcatLocalMatrices(const SkMatrix& parentLM, const SkMatrix& childLM) {
+#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) // b/256873449
+ return SkMatrix::Concat(childLM, parentLM);
+#endif
+ return SkMatrix::Concat(parentLM, childLM);
+ }
+
+protected:
+ SkShaderBase();
+
+ void flatten(SkWriteBuffer&) const override;
+
+#ifdef SK_ENABLE_LEGACY_SHADERCONTEXT
+ /**
+ * Specialize creating a SkShader context using the supplied allocator.
+ * @return pointer to context owned by the arena allocator.
+ */
+ virtual Context* onMakeContext(const ContextRec&, SkArenaAlloc*) const {
+ return nullptr;
+ }
+#endif
+
+ virtual bool onAsLuminanceColor(SkColor*) const {
+ return false;
+ }
+
+protected:
+ static skvm::Coord ApplyMatrix(skvm::Builder*, const SkMatrix&, skvm::Coord, skvm::Uniforms*);
+
+ using INHERITED = SkShader;
+};
+inline SkShaderBase* as_SB(SkShader* shader) {
+ return static_cast<SkShaderBase*>(shader);
+}
+
+inline const SkShaderBase* as_SB(const SkShader* shader) {
+ return static_cast<const SkShaderBase*>(shader);
+}
+
+inline const SkShaderBase* as_SB(const sk_sp<SkShader>& shader) {
+ return static_cast<SkShaderBase*>(shader.get());
+}
+
+void SkRegisterColor4ShaderFlattenable();
+void SkRegisterColorShaderFlattenable();
+void SkRegisterComposeShaderFlattenable();
+void SkRegisterCoordClampShaderFlattenable();
+void SkRegisterEmptyShaderFlattenable();
+
+#endif // SkShaderBase_DEFINED
diff --git a/gfx/skia/skia/src/shaders/SkTransformShader.cpp b/gfx/skia/skia/src/shaders/SkTransformShader.cpp
new file mode 100644
index 0000000000..080560a782
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkTransformShader.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkMatrixProvider.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/shaders/SkTransformShader.h"
+
+SkTransformShader::SkTransformShader(const SkShaderBase& shader, bool allowPerspective)
+ : fShader{shader}, fAllowPerspective{allowPerspective} {
+ SkMatrix::I().get9(fMatrixStorage);
+}
+
+skvm::Color SkTransformShader::program(skvm::Builder* b,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color color,
+ const MatrixRec& mRec,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const {
+ // We have to seed and apply any constant matrices before appending our matrix that may
+ // mutate. We could try to apply one matrix stage and then incorporate the parent matrix
+ // with the variable matrix in each call to update(). However, in practice our callers
+ // fold the CTM into the update() matrix and don't wrap the transform shader in local matrix
+ // shaders so the call to apply below should be no-op. If this assert fires it just indicates an
+ // optimization opportunity, not a correctness bug.
+ SkASSERT(!mRec.hasPendingMatrix());
+
+ std::optional<MatrixRec> childMRec = mRec.apply(b, &local, uniforms);
+ if (!childMRec.has_value()) {
+ return {};
+ }
+ // The matrix we're about to insert gets updated between uses of the VM so our children can't
+ // know the total transform when they add their stages. We don't incorporate this shader's
+ // matrix into the MatrixRec at all.
+ childMRec->markTotalMatrixInvalid();
+
+ auto matrix = uniforms->pushPtr(&fMatrixStorage);
+
+ skvm::F32 x = local.x,
+ y = local.y;
+
+ auto dot = [&, x, y](int row) {
+ return b->mad(x,
+ b->arrayF(matrix, 3 * row + 0),
+ b->mad(y, b->arrayF(matrix, 3 * row + 1), b->arrayF(matrix, 3 * row + 2)));
+ };
+
+ x = dot(0);
+ y = dot(1);
+ if (fAllowPerspective) {
+ x = x * (1.0f / dot(2));
+ y = y * (1.0f / dot(2));
+ }
+
+ skvm::Coord newLocal = {x, y};
+ return fShader.program(b, device, newLocal, color, *childMRec, dst, uniforms, alloc);
+}
+
+bool SkTransformShader::update(const SkMatrix& matrix) {
+ if (SkMatrix inv; matrix.invert(&inv)) {
+ if (!fAllowPerspective && inv.hasPerspective()) {
+ return false;
+ }
+
+ inv.get9(fMatrixStorage);
+ return true;
+ }
+ return false;
+}
+
+bool SkTransformShader::appendStages(const SkStageRec& rec, const MatrixRec& mRec) const {
+ // We have to seed and apply any constant matrices before appending our matrix that may
+ // mutate. We could try to add one matrix stage and then incorporate the parent matrix
+ // with the variable matrix in each call to update(). However, in practice our callers
+ // fold the CTM into the update() matrix and don't wrap the transform shader in local matrix
+ // shaders so the call to apply below should just seed the coordinates. If this assert fires
+ // it just indicates an optimization opportunity, not a correctness bug.
+ SkASSERT(!mRec.hasPendingMatrix());
+ std::optional<MatrixRec> childMRec = mRec.apply(rec);
+ if (!childMRec.has_value()) {
+ return false;
+ }
+ // The matrix we're about to insert gets updated between uses of the pipeline so our children
+ // can't know the total transform when they add their stages. We don't even incorporate this
+ // matrix into the MatrixRec at all.
+ childMRec->markTotalMatrixInvalid();
+
+ auto type = fAllowPerspective ? SkRasterPipelineOp::matrix_perspective
+ : SkRasterPipelineOp::matrix_2x3;
+ rec.fPipeline->append(type, fMatrixStorage);
+
+ fShader.appendStages(rec, *childMRec);
+ return true;
+}
diff --git a/gfx/skia/skia/src/shaders/SkTransformShader.h b/gfx/skia/skia/src/shaders/SkTransformShader.h
new file mode 100644
index 0000000000..f04d4baad6
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/SkTransformShader.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkTextCoordShader_DEFINED
+#define SkTextCoordShader_DEFINED
+
+#include "src/core/SkVM.h"
+#include "src/shaders/SkShaderBase.h"
+
+// SkTransformShader applies a matrix transform to the shader coordinates, like a local matrix
+// shader. The difference with a typical local matrix shader is that this shader's matrix is
+// not combined with the inverse CTM or other local matrices in order to facilitate modifying the
+// matrix between uses of the SkVM or SkRasterPipeline. This supports drawVertices and drawAtlas, in
+// which the mapping from each triangle (when explicit texture coords are used) or atlas quad to
+// shader space is different.
+class SkTransformShader : public SkShaderBase {
+public:
+ explicit SkTransformShader(const SkShaderBase& shader, bool allowPerspective);
+
+ // Adds instructions to use the mapping stored in the uniforms represented by fMatrix. After
+ // generating a new skvm::Coord, it passes the mapped coordinates to fShader's program
+ // along with the identity matrix.
+ skvm::Color program(skvm::Builder* b,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color color,
+ const MatrixRec& mRec,
+ const SkColorInfo& dst,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const override;
+
+ // Adds a pipestage to multiply the incoming coords in 'r' and 'g' by the matrix. The child
+ // shader is called with no pending local matrix and the total transform as unknowable.
+ bool appendStages(const SkStageRec& rec, const MatrixRec&) const override;
+
+ // Change the matrix used by the generated SkRasterpipeline or SkVM.
+ bool update(const SkMatrix& matrix);
+
+ // These are never serialized/deserialized
+ Factory getFactory() const override {
+ SkDEBUGFAIL("SkTransformShader shouldn't be serialized.");
+ return {};
+ }
+ const char* getTypeName() const override {
+ SkDEBUGFAIL("SkTransformShader shouldn't be serialized.");
+ return nullptr;
+ }
+
+ bool isOpaque() const override { return fShader.isOpaque(); }
+
+private:
+ const SkShaderBase& fShader;
+ SkScalar fMatrixStorage[9]; // actual memory used by generated RP or VM
+ bool fAllowPerspective;
+};
+#endif //SkTextCoordShader_DEFINED
diff --git a/gfx/skia/skia/src/shaders/gradients/SkGradientShader.cpp b/gfx/skia/skia/src/shaders/gradients/SkGradientShader.cpp
new file mode 100644
index 0000000000..7040d5de51
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/SkGradientShader.cpp
@@ -0,0 +1,7 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
diff --git a/gfx/skia/skia/src/shaders/gradients/SkGradientShaderBase.cpp b/gfx/skia/skia/src/shaders/gradients/SkGradientShaderBase.cpp
new file mode 100644
index 0000000000..87b6d91bb1
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/SkGradientShaderBase.cpp
@@ -0,0 +1,1325 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/shaders/gradients/SkGradientShaderBase.h"
+
+#include "include/core/SkColorSpace.h"
+#include "src/base/SkVx.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkColorSpaceXformSteps.h"
+#include "src/core/SkConvertPixels.h"
+#include "src/core/SkMatrixProvider.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkVM.h"
+#include "src/core/SkWriteBuffer.h"
+
+#if defined(SK_GRAPHITE)
+#include "src/core/SkColorSpacePriv.h"
+#include "src/gpu/graphite/KeyContext.h"
+#include "src/gpu/graphite/KeyHelpers.h"
+#include "src/gpu/graphite/PaintParamsKey.h"
+#endif
+
+#include <cmath>
+
+enum GradientSerializationFlags {
+ // Bits 29:31 used for various boolean flags
+ kHasPosition_GSF = 0x80000000,
+ kHasLegacyLocalMatrix_GSF = 0x40000000,
+ kHasColorSpace_GSF = 0x20000000,
+
+ // Bits 12:28 unused
+
+ // Bits 8:11 for fTileMode
+ kTileModeShift_GSF = 8,
+ kTileModeMask_GSF = 0xF,
+
+ // Bits 4:7 for fInterpolation.fColorSpace
+ kInterpolationColorSpaceShift_GSF = 4,
+ kInterpolationColorSpaceMask_GSF = 0xF,
+
+ // Bits 1:3 for fInterpolation.fHueMethod
+ kInterpolationHueMethodShift_GSF = 1,
+ kInterpolationHueMethodMask_GSF = 0x7,
+
+ // Bit 0 for fInterpolation.fInPremul
+ kInterpolationInPremul_GSF = 0x1,
+};
+
+SkGradientShaderBase::Descriptor::Descriptor() {
+ sk_bzero(this, sizeof(*this));
+ fTileMode = SkTileMode::kClamp;
+}
+SkGradientShaderBase::Descriptor::~Descriptor() = default;
+
+void SkGradientShaderBase::flatten(SkWriteBuffer& buffer) const {
+ uint32_t flags = 0;
+ if (fPositions) {
+ flags |= kHasPosition_GSF;
+ }
+ sk_sp<SkData> colorSpaceData = fColorSpace ? fColorSpace->serialize() : nullptr;
+ if (colorSpaceData) {
+ flags |= kHasColorSpace_GSF;
+ }
+ if (fInterpolation.fInPremul == Interpolation::InPremul::kYes) {
+ flags |= kInterpolationInPremul_GSF;
+ }
+ SkASSERT(static_cast<uint32_t>(fTileMode) <= kTileModeMask_GSF);
+ flags |= ((uint32_t)fTileMode << kTileModeShift_GSF);
+ SkASSERT(static_cast<uint32_t>(fInterpolation.fColorSpace) <= kInterpolationColorSpaceMask_GSF);
+ flags |= ((uint32_t)fInterpolation.fColorSpace << kInterpolationColorSpaceShift_GSF);
+ SkASSERT(static_cast<uint32_t>(fInterpolation.fHueMethod) <= kInterpolationHueMethodMask_GSF);
+ flags |= ((uint32_t)fInterpolation.fHueMethod << kInterpolationHueMethodShift_GSF);
+
+ buffer.writeUInt(flags);
+
+ // If we injected implicit first/last stops at construction time, omit those when serializing:
+ int colorCount = fColorCount;
+ const SkColor4f* colors = fColors;
+ const SkScalar* positions = fPositions;
+ if (fFirstStopIsImplicit) {
+ colorCount--;
+ colors++;
+ if (positions) {
+ positions++;
+ }
+ }
+ if (fLastStopIsImplicit) {
+ colorCount--;
+ }
+
+ buffer.writeColor4fArray(colors, colorCount);
+ if (colorSpaceData) {
+ buffer.writeDataAsByteArray(colorSpaceData.get());
+ }
+ if (positions) {
+ buffer.writeScalarArray(positions, colorCount);
+ }
+}
+
+template <int N, typename T, bool MEM_MOVE>
+static bool validate_array(SkReadBuffer& buffer, size_t count, SkSTArray<N, T, MEM_MOVE>* array) {
+ if (!buffer.validateCanReadN<T>(count)) {
+ return false;
+ }
+
+ array->resize_back(count);
+ return true;
+}
+
+bool SkGradientShaderBase::DescriptorScope::unflatten(SkReadBuffer& buffer,
+ SkMatrix* legacyLocalMatrix) {
+ // New gradient format. Includes floating point color, color space, densely packed flags
+ uint32_t flags = buffer.readUInt();
+
+ fTileMode = (SkTileMode)((flags >> kTileModeShift_GSF) & kTileModeMask_GSF);
+
+ fInterpolation.fColorSpace = (Interpolation::ColorSpace)(
+ (flags >> kInterpolationColorSpaceShift_GSF) & kInterpolationColorSpaceMask_GSF);
+ fInterpolation.fHueMethod = (Interpolation::HueMethod)(
+ (flags >> kInterpolationHueMethodShift_GSF) & kInterpolationHueMethodMask_GSF);
+ fInterpolation.fInPremul = (flags & kInterpolationInPremul_GSF) ? Interpolation::InPremul::kYes
+ : Interpolation::InPremul::kNo;
+
+ fColorCount = buffer.getArrayCount();
+
+ if (!(validate_array(buffer, fColorCount, &fColorStorage) &&
+ buffer.readColor4fArray(fColorStorage.begin(), fColorCount))) {
+ return false;
+ }
+ fColors = fColorStorage.begin();
+
+ if (SkToBool(flags & kHasColorSpace_GSF)) {
+ sk_sp<SkData> data = buffer.readByteArrayAsData();
+ fColorSpace = data ? SkColorSpace::Deserialize(data->data(), data->size()) : nullptr;
+ } else {
+ fColorSpace = nullptr;
+ }
+ if (SkToBool(flags & kHasPosition_GSF)) {
+ if (!(validate_array(buffer, fColorCount, &fPositionStorage) &&
+ buffer.readScalarArray(fPositionStorage.begin(), fColorCount))) {
+ return false;
+ }
+ fPositions = fPositionStorage.begin();
+ } else {
+ fPositions = nullptr;
+ }
+ if (SkToBool(flags & kHasLegacyLocalMatrix_GSF)) {
+ SkASSERT(buffer.isVersionLT(SkPicturePriv::Version::kNoShaderLocalMatrix));
+ buffer.readMatrix(legacyLocalMatrix);
+ } else {
+ *legacyLocalMatrix = SkMatrix::I();
+ }
+ return buffer.isValid();
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////
+
+SkGradientShaderBase::SkGradientShaderBase(const Descriptor& desc, const SkMatrix& ptsToUnit)
+ : fPtsToUnit(ptsToUnit)
+ , fColorSpace(desc.fColorSpace ? desc.fColorSpace : SkColorSpace::MakeSRGB())
+ , fFirstStopIsImplicit(false)
+ , fLastStopIsImplicit(false)
+ , fColorsAreOpaque(true) {
+ fPtsToUnit.getType(); // Precache so reads are threadsafe.
+ SkASSERT(desc.fColorCount > 1);
+
+ fInterpolation = desc.fInterpolation;
+
+ SkASSERT((unsigned)desc.fTileMode < kSkTileModeCount);
+ fTileMode = desc.fTileMode;
+
+ /* Note: we let the caller skip the first and/or last position.
+ i.e. pos[0] = 0.3, pos[1] = 0.7
+ In these cases, we insert entries to ensure that the final data
+ will be bracketed by [0, 1].
+ i.e. our_pos[0] = 0, our_pos[1] = 0.3, our_pos[2] = 0.7, our_pos[3] = 1
+
+ Thus colorCount (the caller's value, and fColorCount (our value) may
+ differ by up to 2. In the above example:
+ colorCount = 2
+ fColorCount = 4
+ */
+ fColorCount = desc.fColorCount;
+ // check if we need to add in start and/or end position/colors
+ if (desc.fPositions) {
+ fFirstStopIsImplicit = desc.fPositions[0] != 0;
+ fLastStopIsImplicit = desc.fPositions[desc.fColorCount - 1] != SK_Scalar1;
+ fColorCount += fFirstStopIsImplicit + fLastStopIsImplicit;
+ }
+
+ size_t storageSize =
+ fColorCount * (sizeof(SkColor4f) + (desc.fPositions ? sizeof(SkScalar) : 0));
+ fColors = reinterpret_cast<SkColor4f*>(fStorage.reset(storageSize));
+ fPositions = desc.fPositions ? reinterpret_cast<SkScalar*>(fColors + fColorCount) : nullptr;
+
+ // Now copy over the colors, adding the duplicates at t=0 and t=1 as needed
+ SkColor4f* colors = fColors;
+ if (fFirstStopIsImplicit) {
+ *colors++ = desc.fColors[0];
+ }
+ for (int i = 0; i < desc.fColorCount; ++i) {
+ colors[i] = desc.fColors[i];
+ fColorsAreOpaque = fColorsAreOpaque && (desc.fColors[i].fA == 1);
+ }
+ if (fLastStopIsImplicit) {
+ colors += desc.fColorCount;
+ *colors = desc.fColors[desc.fColorCount - 1];
+ }
+
+ if (desc.fPositions) {
+ SkScalar prev = 0;
+ SkScalar* positions = fPositions;
+ *positions++ = prev; // force the first pos to 0
+
+ int startIndex = fFirstStopIsImplicit ? 0 : 1;
+ int count = desc.fColorCount + fLastStopIsImplicit;
+
+ bool uniformStops = true;
+ const SkScalar uniformStep = desc.fPositions[startIndex] - prev;
+ for (int i = startIndex; i < count; i++) {
+ // Pin the last value to 1.0, and make sure pos is monotonic.
+ auto curr = (i == desc.fColorCount) ? 1 : SkTPin(desc.fPositions[i], prev, 1.0f);
+ uniformStops &= SkScalarNearlyEqual(uniformStep, curr - prev);
+
+ *positions++ = prev = curr;
+ }
+
+ // If the stops are uniform, treat them as implicit.
+ if (uniformStops) {
+ fPositions = nullptr;
+ }
+ }
+}
+
+SkGradientShaderBase::~SkGradientShaderBase() {}
+
+static void add_stop_color(SkRasterPipeline_GradientCtx* ctx, size_t stop,
+ SkPMColor4f Fs, SkPMColor4f Bs) {
+ (ctx->fs[0])[stop] = Fs.fR;
+ (ctx->fs[1])[stop] = Fs.fG;
+ (ctx->fs[2])[stop] = Fs.fB;
+ (ctx->fs[3])[stop] = Fs.fA;
+
+ (ctx->bs[0])[stop] = Bs.fR;
+ (ctx->bs[1])[stop] = Bs.fG;
+ (ctx->bs[2])[stop] = Bs.fB;
+ (ctx->bs[3])[stop] = Bs.fA;
+}
+
+static void add_const_color(SkRasterPipeline_GradientCtx* ctx, size_t stop, SkPMColor4f color) {
+ add_stop_color(ctx, stop, { 0, 0, 0, 0 }, color);
+}
+
+// Calculate a factor F and a bias B so that color = F*t + B when t is in range of
+// the stop. Assume that the distance between stops is 1/gapCount.
+static void init_stop_evenly(SkRasterPipeline_GradientCtx* ctx, float gapCount, size_t stop,
+ SkPMColor4f c_l, SkPMColor4f c_r) {
+ // Clankium's GCC 4.9 targeting ARMv7 is barfing when we use Sk4f math here, so go scalar...
+ SkPMColor4f Fs = {
+ (c_r.fR - c_l.fR) * gapCount,
+ (c_r.fG - c_l.fG) * gapCount,
+ (c_r.fB - c_l.fB) * gapCount,
+ (c_r.fA - c_l.fA) * gapCount,
+ };
+ SkPMColor4f Bs = {
+ c_l.fR - Fs.fR*(stop/gapCount),
+ c_l.fG - Fs.fG*(stop/gapCount),
+ c_l.fB - Fs.fB*(stop/gapCount),
+ c_l.fA - Fs.fA*(stop/gapCount),
+ };
+ add_stop_color(ctx, stop, Fs, Bs);
+}
+
+// For each stop we calculate a bias B and a scale factor F, such that
+// for any t between stops n and n+1, the color we want is B[n] + F[n]*t.
+static void init_stop_pos(SkRasterPipeline_GradientCtx* ctx, size_t stop, float t_l, float t_r,
+ SkPMColor4f c_l, SkPMColor4f c_r) {
+ // See note about Clankium's old compiler in init_stop_evenly().
+ SkPMColor4f Fs = {
+ (c_r.fR - c_l.fR) / (t_r - t_l),
+ (c_r.fG - c_l.fG) / (t_r - t_l),
+ (c_r.fB - c_l.fB) / (t_r - t_l),
+ (c_r.fA - c_l.fA) / (t_r - t_l),
+ };
+ SkPMColor4f Bs = {
+ c_l.fR - Fs.fR*t_l,
+ c_l.fG - Fs.fG*t_l,
+ c_l.fB - Fs.fB*t_l,
+ c_l.fA - Fs.fA*t_l,
+ };
+ ctx->ts[stop] = t_l;
+ add_stop_color(ctx, stop, Fs, Bs);
+}
+
+void SkGradientShaderBase::AppendGradientFillStages(SkRasterPipeline* p,
+ SkArenaAlloc* alloc,
+ const SkPMColor4f* pmColors,
+ const SkScalar* positions,
+ int count) {
+ // The two-stop case with stops at 0 and 1.
+ if (count == 2 && positions == nullptr) {
+ const SkPMColor4f c_l = pmColors[0],
+ c_r = pmColors[1];
+
+ // See F and B below.
+ auto ctx = alloc->make<SkRasterPipeline_EvenlySpaced2StopGradientCtx>();
+ (skvx::float4::Load(c_r.vec()) - skvx::float4::Load(c_l.vec())).store(ctx->f);
+ ( skvx::float4::Load(c_l.vec())).store(ctx->b);
+
+ p->append(SkRasterPipelineOp::evenly_spaced_2_stop_gradient, ctx);
+ } else {
+ auto* ctx = alloc->make<SkRasterPipeline_GradientCtx>();
+
+ // Note: In order to handle clamps in search, the search assumes a stop conceptully placed
+ // at -inf. Therefore, the max number of stops is fColorCount+1.
+ for (int i = 0; i < 4; i++) {
+ // Allocate at least at for the AVX2 gather from a YMM register.
+ ctx->fs[i] = alloc->makeArray<float>(std::max(count + 1, 8));
+ ctx->bs[i] = alloc->makeArray<float>(std::max(count + 1, 8));
+ }
+
+ if (positions == nullptr) {
+ // Handle evenly distributed stops.
+
+ size_t stopCount = count;
+ float gapCount = stopCount - 1;
+
+ SkPMColor4f c_l = pmColors[0];
+ for (size_t i = 0; i < stopCount - 1; i++) {
+ SkPMColor4f c_r = pmColors[i + 1];
+ init_stop_evenly(ctx, gapCount, i, c_l, c_r);
+ c_l = c_r;
+ }
+ add_const_color(ctx, stopCount - 1, c_l);
+
+ ctx->stopCount = stopCount;
+ p->append(SkRasterPipelineOp::evenly_spaced_gradient, ctx);
+ } else {
+ // Handle arbitrary stops.
+
+ ctx->ts = alloc->makeArray<float>(count + 1);
+
+ // Remove the default stops inserted by SkGradientShaderBase::SkGradientShaderBase
+ // because they are naturally handled by the search method.
+ int firstStop;
+ int lastStop;
+ if (count > 2) {
+ firstStop = pmColors[0] != pmColors[1] ? 0 : 1;
+ lastStop = pmColors[count - 2] != pmColors[count - 1] ? count - 1 : count - 2;
+ } else {
+ firstStop = 0;
+ lastStop = 1;
+ }
+
+ size_t stopCount = 0;
+ float t_l = positions[firstStop];
+ SkPMColor4f c_l = pmColors[firstStop];
+ add_const_color(ctx, stopCount++, c_l);
+ // N.B. lastStop is the index of the last stop, not one after.
+ for (int i = firstStop; i < lastStop; i++) {
+ float t_r = positions[i + 1];
+ SkPMColor4f c_r = pmColors[i + 1];
+ SkASSERT(t_l <= t_r);
+ if (t_l < t_r) {
+ init_stop_pos(ctx, stopCount, t_l, t_r, c_l, c_r);
+ stopCount += 1;
+ }
+ t_l = t_r;
+ c_l = c_r;
+ }
+
+ ctx->ts[stopCount] = t_l;
+ add_const_color(ctx, stopCount++, c_l);
+
+ ctx->stopCount = stopCount;
+ p->append(SkRasterPipelineOp::gradient, ctx);
+ }
+ }
+}
+
+bool SkGradientShaderBase::appendStages(const SkStageRec& rec, const MatrixRec& mRec) const {
+ SkRasterPipeline* p = rec.fPipeline;
+ SkArenaAlloc* alloc = rec.fAlloc;
+ SkRasterPipeline_DecalTileCtx* decal_ctx = nullptr;
+
+ std::optional<MatrixRec> newMRec = mRec.apply(rec, fPtsToUnit);
+ if (!newMRec.has_value()) {
+ return false;
+ }
+
+ SkRasterPipeline_<256> postPipeline;
+
+ this->appendGradientStages(alloc, p, &postPipeline);
+
+ switch(fTileMode) {
+ case SkTileMode::kMirror: p->append(SkRasterPipelineOp::mirror_x_1); break;
+ case SkTileMode::kRepeat: p->append(SkRasterPipelineOp::repeat_x_1); break;
+ case SkTileMode::kDecal:
+ decal_ctx = alloc->make<SkRasterPipeline_DecalTileCtx>();
+ decal_ctx->limit_x = SkBits2Float(SkFloat2Bits(1.0f) + 1);
+ // reuse mask + limit_x stage, or create a custom decal_1 that just stores the mask
+ p->append(SkRasterPipelineOp::decal_x, decal_ctx);
+ [[fallthrough]];
+
+ case SkTileMode::kClamp:
+ if (!fPositions) {
+ // We clamp only when the stops are evenly spaced.
+ // If not, there may be hard stops, and clamping ruins hard stops at 0 and/or 1.
+ // In that case, we must make sure we're using the general "gradient" stage,
+ // which is the only stage that will correctly handle unclamped t.
+ p->append(SkRasterPipelineOp::clamp_x_1);
+ }
+ break;
+ }
+
+ // Transform all of the colors to destination color space, possibly premultiplied
+ SkColor4fXformer xformedColors(this, rec.fDstCS);
+ AppendGradientFillStages(p, alloc, xformedColors.fColors.begin(), fPositions, fColorCount);
+
+ using ColorSpace = Interpolation::ColorSpace;
+ bool colorIsPremul = this->interpolateInPremul();
+
+ // If we interpolated premul colors in any of the special color spaces, we need to unpremul
+ if (colorIsPremul && !fColorsAreOpaque) {
+ switch (fInterpolation.fColorSpace) {
+ case ColorSpace::kLab:
+ case ColorSpace::kOKLab:
+ p->append(SkRasterPipelineOp::unpremul);
+ colorIsPremul = false;
+ break;
+ case ColorSpace::kLCH:
+ case ColorSpace::kOKLCH:
+ case ColorSpace::kHSL:
+ case ColorSpace::kHWB:
+ p->append(SkRasterPipelineOp::unpremul_polar);
+ colorIsPremul = false;
+ break;
+ default: break;
+ }
+ }
+
+ // Convert colors in exotic spaces back to their intermediate SkColorSpace
+ switch (fInterpolation.fColorSpace) {
+ case ColorSpace::kLab: p->append(SkRasterPipelineOp::css_lab_to_xyz); break;
+ case ColorSpace::kOKLab: p->append(SkRasterPipelineOp::css_oklab_to_linear_srgb); break;
+ case ColorSpace::kLCH: p->append(SkRasterPipelineOp::css_hcl_to_lab);
+ p->append(SkRasterPipelineOp::css_lab_to_xyz); break;
+ case ColorSpace::kOKLCH: p->append(SkRasterPipelineOp::css_hcl_to_lab);
+ p->append(SkRasterPipelineOp::css_oklab_to_linear_srgb); break;
+ case ColorSpace::kHSL: p->append(SkRasterPipelineOp::css_hsl_to_srgb); break;
+ case ColorSpace::kHWB: p->append(SkRasterPipelineOp::css_hwb_to_srgb); break;
+ default: break;
+ }
+
+ // Now transform from intermediate to destination color space.
+ // See comments in GrGradientShader.cpp about the decisions here.
+ SkColorSpace* dstColorSpace = rec.fDstCS ? rec.fDstCS : sk_srgb_singleton();
+ SkAlphaType intermediateAlphaType = colorIsPremul ? kPremul_SkAlphaType : kUnpremul_SkAlphaType;
+ // TODO(skia:13108): Get dst alpha type correctly
+ SkAlphaType dstAlphaType = kPremul_SkAlphaType;
+
+ if (fColorsAreOpaque) {
+ intermediateAlphaType = dstAlphaType = kUnpremul_SkAlphaType;
+ }
+
+ alloc->make<SkColorSpaceXformSteps>(xformedColors.fIntermediateColorSpace.get(),
+ intermediateAlphaType,
+ dstColorSpace,
+ dstAlphaType)
+ ->apply(p);
+
+ if (decal_ctx) {
+ p->append(SkRasterPipelineOp::check_decal_mask, decal_ctx);
+ }
+
+ p->extend(postPipeline);
+
+ return true;
+}
+
+// Color conversion functions used in gradient interpolation, based on
+// https://www.w3.org/TR/css-color-4/#color-conversion-code
+static skvm::Color css_lab_to_xyz(skvm::Color lab) {
+ constexpr float k = 24389 / 27.0f;
+ constexpr float e = 216 / 24389.0f;
+
+ skvm::F32 f[3];
+ f[1] = (lab.r + 16) * (1 / 116.0f);
+ f[0] = (lab.g * (1 / 500.0f)) + f[1];
+ f[2] = f[1] - (lab.b * (1 / 200.0f));
+
+ skvm::F32 f_cubed[3] = { f[0]*f[0]*f[0], f[1]*f[1]*f[1], f[2]*f[2]*f[2] };
+
+ skvm::F32 xyz[3] = {
+ skvm::select(f_cubed[0] > e, f_cubed[0], (116 * f[0] - 16) * (1 / k)),
+ skvm::select(lab.r > k * e , f_cubed[1], lab.r * (1 / k)),
+ skvm::select(f_cubed[2] > e, f_cubed[2], (116 * f[2] - 16) * (1 / k))
+ };
+
+ constexpr float D50[3] = { 0.3457f / 0.3585f, 1.0f, (1.0f - 0.3457f - 0.3585f) / 0.3585f };
+ return skvm::Color { xyz[0]*D50[0], xyz[1]*D50[1], xyz[2]*D50[2], lab.a };
+}
+
+// Skia stores all polar colors with hue in the first component, so this "LCH -> Lab" transform
+// actually takes "HCL". This is also used to do the same polar transform for OkHCL to OkLAB.
+static skvm::Color css_hcl_to_lab(skvm::Color hcl) {
+ skvm::F32 hueRadians = hcl.r * (SK_FloatPI / 180);
+ return skvm::Color {
+ hcl.b,
+ hcl.g * approx_cos(hueRadians),
+ hcl.g * approx_sin(hueRadians),
+ hcl.a
+ };
+}
+
+static skvm::Color css_hcl_to_xyz(skvm::Color hcl) {
+ return css_lab_to_xyz(css_hcl_to_lab(hcl));
+}
+
+static skvm::Color css_oklab_to_linear_srgb(skvm::Color oklab) {
+ skvm::F32 l_ = oklab.r + 0.3963377774f * oklab.g + 0.2158037573f * oklab.b,
+ m_ = oklab.r - 0.1055613458f * oklab.g - 0.0638541728f * oklab.b,
+ s_ = oklab.r - 0.0894841775f * oklab.g - 1.2914855480f * oklab.b;
+
+ skvm::F32 l = l_*l_*l_,
+ m = m_*m_*m_,
+ s = s_*s_*s_;
+
+ return skvm::Color {
+ +4.0767416621f * l - 3.3077115913f * m + 0.2309699292f * s,
+ -1.2684380046f * l + 2.6097574011f * m - 0.3413193965f * s,
+ -0.0041960863f * l - 0.7034186147f * m + 1.7076147010f * s,
+ oklab.a
+ };
+
+}
+
+static skvm::Color css_okhcl_to_linear_srgb(skvm::Color okhcl) {
+ return css_oklab_to_linear_srgb(css_hcl_to_lab(okhcl));
+}
+
+static skvm::F32 mod_f(skvm::F32 x, float y) {
+ return x - y * skvm::floor(x * (1 / y));
+}
+
+static skvm::Color css_hsl_to_srgb(skvm::Color hsl) {
+ hsl.r = mod_f(hsl.r, 360);
+ hsl.r = skvm::select(hsl.r < 0, hsl.r + 360, hsl.r);
+
+ hsl.g *= 0.01f;
+ hsl.b *= 0.01f;
+
+ skvm::F32 k[3] = {
+ mod_f(0 + hsl.r * (1 / 30.0f), 12),
+ mod_f(8 + hsl.r * (1 / 30.0f), 12),
+ mod_f(4 + hsl.r * (1 / 30.0f), 12),
+ };
+ skvm::F32 a = hsl.g * min(hsl.b, 1 - hsl.b);
+ return skvm::Color {
+ hsl.b - a * clamp(min(k[0] - 3, 9 - k[0]), -1, 1),
+ hsl.b - a * clamp(min(k[1] - 3, 9 - k[1]), -1, 1),
+ hsl.b - a * clamp(min(k[2] - 3, 9 - k[2]), -1, 1),
+ hsl.a
+ };
+}
+
+static skvm::Color css_hwb_to_srgb(skvm::Color hwb, skvm::Builder* p) {
+ hwb.g *= 0.01f;
+ hwb.b *= 0.01f;
+
+ skvm::F32 gray = hwb.g / (hwb.g + hwb.b);
+
+ skvm::Color rgb = css_hsl_to_srgb(skvm::Color{hwb.r, p->splat(100.0f), p->splat(50.0f), hwb.a});
+ rgb.r = rgb.r * (1 - hwb.g - hwb.b) + hwb.g;
+ rgb.g = rgb.g * (1 - hwb.g - hwb.b) + hwb.g;
+ rgb.b = rgb.b * (1 - hwb.g - hwb.b) + hwb.g;
+
+ skvm::I32 isGray = (hwb.g + hwb.b) >= 1;
+
+ return skvm::Color {
+ select(isGray, gray, rgb.r),
+ select(isGray, gray, rgb.g),
+ select(isGray, gray, rgb.b),
+ hwb.a
+ };
+}
+
+skvm::Color SkGradientShaderBase::program(skvm::Builder* p,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color /*paint*/,
+ const MatrixRec& mRec,
+ const SkColorInfo& dstInfo,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const {
+ if (!mRec.apply(p, &local, uniforms, fPtsToUnit).has_value()) {
+ return {};
+ }
+
+ skvm::I32 mask = p->splat(~0);
+ skvm::F32 t = this->transformT(p,uniforms, local, &mask);
+
+ // Perhaps unexpectedly, clamping is handled naturally by our search, so we
+ // don't explicitly clamp t to [0,1]. That clamp would break hard stops
+ // right at 0 or 1 boundaries in kClamp mode. (kRepeat and kMirror always
+ // produce values in [0,1].)
+ switch(fTileMode) {
+ case SkTileMode::kClamp:
+ break;
+
+ case SkTileMode::kDecal:
+ mask &= (t == clamp01(t));
+ break;
+
+ case SkTileMode::kRepeat:
+ t = fract(t);
+ break;
+
+ case SkTileMode::kMirror: {
+ // t = | (t-1) - 2*(floor( (t-1)*0.5 )) - 1 |
+ // {-A-} {--------B-------}
+ skvm::F32 A = t - 1.0f,
+ B = floor(A * 0.5f);
+ t = abs(A - (B + B) - 1.0f);
+ } break;
+ }
+
+ // Transform our colors as we want them interpolated, in dst color space, possibly premul.
+ SkColor4fXformer xformedColors(this, dstInfo.colorSpace());
+ const SkPMColor4f* rgba = xformedColors.fColors.begin();
+
+ // Transform our colors into a scale factor f and bias b such that for
+ // any t between stops i and i+1, the color we want is mad(t, f[i], b[i]).
+ using F4 = skvx::Vec<4,float>;
+ struct FB { F4 f,b; };
+ skvm::Color color;
+
+ auto uniformF = [&](float x) { return p->uniformF(uniforms->pushF(x)); };
+
+ if (fColorCount == 2) {
+ // 2-stop gradients have colors at 0 and 1, and so must be evenly spaced.
+ SkASSERT(fPositions == nullptr);
+
+ // With 2 stops, we upload the single FB as uniforms and interpolate directly with t.
+ F4 lo = F4::Load(rgba + 0),
+ hi = F4::Load(rgba + 1);
+ F4 F = hi - lo,
+ B = lo;
+
+ auto T = clamp01(t);
+ color = {
+ T * uniformF(F[0]) + uniformF(B[0]),
+ T * uniformF(F[1]) + uniformF(B[1]),
+ T * uniformF(F[2]) + uniformF(B[2]),
+ T * uniformF(F[3]) + uniformF(B[3]),
+ };
+ } else {
+ // To handle clamps in search we add a conceptual stop at t=-inf, so we
+ // may need up to fColorCount+1 FBs and fColorCount t stops between them:
+ //
+ // FBs: [color 0] [color 0->1] [color 1->2] [color 2->3] ...
+ // stops: (-inf) t0 t1 t2 ...
+ //
+ // Both these arrays could end up shorter if any hard stops share the same t.
+ FB* fb = alloc->makeArrayDefault<FB>(fColorCount+1);
+ std::vector<float> stops; // TODO: SkSTArray?
+ stops.reserve(fColorCount);
+
+ // Here's our conceptual stop at t=-inf covering all t<=0, clamping to our first color.
+ float t_lo = this->getPos(0);
+ F4 color_lo = F4::Load(rgba);
+ fb[0] = { 0.0f, color_lo };
+ // N.B. No stops[] entry for this implicit -inf.
+
+ // Now the non-edge cases, calculating scale and bias between adjacent normal stops.
+ for (int i = 1; i < fColorCount; i++) {
+ float t_hi = this->getPos(i);
+ F4 color_hi = F4::Load(rgba + i);
+
+ // If t_lo == t_hi, we're on a hard stop, and transition immediately to the next color.
+ SkASSERT(t_lo <= t_hi);
+ if (t_lo < t_hi) {
+ F4 f = (color_hi - color_lo) / (t_hi - t_lo),
+ b = color_lo - f*t_lo;
+ stops.push_back(t_lo);
+ fb[stops.size()] = {f,b};
+ }
+
+ t_lo = t_hi;
+ color_lo = color_hi;
+ }
+ // Anything >= our final t clamps to our final color.
+ stops.push_back(t_lo);
+ fb[stops.size()] = { 0.0f, color_lo };
+
+ // We'll gather FBs from that array we just created.
+ skvm::Uniform fbs = uniforms->pushPtr(fb);
+
+ // Find the two stops we need to interpolate.
+ skvm::I32 ix;
+ if (fPositions == nullptr) {
+ // Evenly spaced stops... we can calculate ix directly.
+ ix = trunc(clamp(t * uniformF(stops.size() - 1) + 1.0f, 0.0f, uniformF(stops.size())));
+ } else {
+ // Starting ix at 0 bakes in our conceptual first stop at -inf.
+ // TODO: good place to experiment with a loop in skvm.... stops.size() can be huge.
+ ix = p->splat(0);
+ for (float stop : stops) {
+ // ix += (t >= stop) ? +1 : 0 ~~>
+ // ix -= (t >= stop) ? -1 : 0
+ ix -= (t >= uniformF(stop));
+ }
+ // TODO: we could skip any of the default stops GradientShaderBase's ctor added
+ // to ensure the full [0,1] span is covered. This linear search doesn't need
+ // them for correctness, and it'd be up to two fewer stops to check.
+ // N.B. we do still need those stops for the fPositions == nullptr direct math path.
+ }
+
+ // A scale factor and bias for each lane, 8 total.
+ // TODO: simpler, faster, tidier to push 8 uniform pointers, one for each struct lane?
+ ix = shl(ix, 3);
+ skvm::F32 Fr = gatherF(fbs, ix + 0);
+ skvm::F32 Fg = gatherF(fbs, ix + 1);
+ skvm::F32 Fb = gatherF(fbs, ix + 2);
+ skvm::F32 Fa = gatherF(fbs, ix + 3);
+
+ skvm::F32 Br = gatherF(fbs, ix + 4);
+ skvm::F32 Bg = gatherF(fbs, ix + 5);
+ skvm::F32 Bb = gatherF(fbs, ix + 6);
+ skvm::F32 Ba = gatherF(fbs, ix + 7);
+
+ // This is what we've been building towards!
+ color = {
+ t * Fr + Br,
+ t * Fg + Bg,
+ t * Fb + Bb,
+ t * Fa + Ba,
+ };
+ }
+
+ using ColorSpace = Interpolation::ColorSpace;
+ bool colorIsPremul = this->interpolateInPremul();
+
+ // If we interpolated premul colors in any of the special color spaces, we need to unpremul
+ if (colorIsPremul) {
+ switch (fInterpolation.fColorSpace) {
+ case ColorSpace::kLab:
+ case ColorSpace::kOKLab:
+ color = unpremul(color);
+ colorIsPremul = false;
+ break;
+ case ColorSpace::kLCH:
+ case ColorSpace::kOKLCH:
+ case ColorSpace::kHSL:
+ case ColorSpace::kHWB: {
+ // Avoid unpremuling hue
+ skvm::F32 hue = color.r;
+ color = unpremul(color);
+ color.r = hue;
+ colorIsPremul = false;
+ } break;
+ default: break;
+ }
+ }
+
+ // Convert colors in exotic spaces back to their intermediate SkColorSpace
+ switch (fInterpolation.fColorSpace) {
+ case ColorSpace::kLab: color = css_lab_to_xyz(color); break;
+ case ColorSpace::kOKLab: color = css_oklab_to_linear_srgb(color); break;
+ case ColorSpace::kLCH: color = css_hcl_to_xyz(color); break;
+ case ColorSpace::kOKLCH: color = css_okhcl_to_linear_srgb(color); break;
+ case ColorSpace::kHSL: color = css_hsl_to_srgb(color); break;
+ case ColorSpace::kHWB: color = css_hwb_to_srgb(color, p); break;
+ default: break;
+ }
+
+ // Now transform from intermediate to destination color space.
+ // See comments in GrGradientShader.cpp about the decisions here.
+ SkColorSpace* dstColorSpace = dstInfo.colorSpace() ? dstInfo.colorSpace() : sk_srgb_singleton();
+ SkAlphaType intermediateAlphaType = colorIsPremul ? kPremul_SkAlphaType : kUnpremul_SkAlphaType;
+ SkAlphaType dstAlphaType = dstInfo.alphaType();
+
+ if (fColorsAreOpaque) {
+ intermediateAlphaType = dstAlphaType = kUnpremul_SkAlphaType;
+ }
+
+ color = SkColorSpaceXformSteps{xformedColors.fIntermediateColorSpace.get(),
+ intermediateAlphaType,
+ dstColorSpace,
+ dstAlphaType}
+ .program(p, uniforms, color);
+
+ return {
+ pun_to_F32(mask & pun_to_I32(color.r)),
+ pun_to_F32(mask & pun_to_I32(color.g)),
+ pun_to_F32(mask & pun_to_I32(color.b)),
+ pun_to_F32(mask & pun_to_I32(color.a)),
+ };
+}
+
+bool SkGradientShaderBase::isOpaque() const {
+ return fColorsAreOpaque && (this->getTileMode() != SkTileMode::kDecal);
+}
+
+static unsigned rounded_divide(unsigned numer, unsigned denom) {
+ return (numer + (denom >> 1)) / denom;
+}
+
+bool SkGradientShaderBase::onAsLuminanceColor(SkColor* lum) const {
+ // we just compute an average color.
+ // possibly we could weight this based on the proportional width for each color
+ // assuming they are not evenly distributed in the fPos array.
+ int r = 0;
+ int g = 0;
+ int b = 0;
+ const int n = fColorCount;
+ // TODO: use linear colors?
+ for (int i = 0; i < n; ++i) {
+ SkColor c = this->getLegacyColor(i);
+ r += SkColorGetR(c);
+ g += SkColorGetG(c);
+ b += SkColorGetB(c);
+ }
+ *lum = SkColorSetRGB(rounded_divide(r, n), rounded_divide(g, n), rounded_divide(b, n));
+ return true;
+}
+
+static sk_sp<SkColorSpace> intermediate_color_space(SkGradientShader::Interpolation::ColorSpace cs,
+ SkColorSpace* dst) {
+ using ColorSpace = SkGradientShader::Interpolation::ColorSpace;
+ switch (cs) {
+ case ColorSpace::kDestination: return sk_ref_sp(dst);
+
+ // css-color-4 allows XYZD50 and XYZD65. For gradients, those are redundant. Interpolating
+ // in any linear RGB space, (regardless of white point), gives the same answer.
+ case ColorSpace::kSRGBLinear: return SkColorSpace::MakeSRGBLinear();
+
+ case ColorSpace::kSRGB:
+ case ColorSpace::kHSL:
+ case ColorSpace::kHWB: return SkColorSpace::MakeSRGB();
+
+ case ColorSpace::kLab:
+ case ColorSpace::kLCH:
+ // Conversion to Lab (and LCH) starts with XYZD50
+ return SkColorSpace::MakeRGB(SkNamedTransferFn::kLinear, SkNamedGamut::kXYZ);
+
+ case ColorSpace::kOKLab:
+ case ColorSpace::kOKLCH:
+ // The "standard" conversion to these spaces starts with XYZD65. That requires extra
+ // effort to conjure. The author also has reference code for going directly from linear
+ // sRGB, so we use that.
+ // TODO(skia:13108): Even better would be to have an LMS color space, because the first
+ // part of the conversion is a matrix multiply, which could be absorbed into the
+ // color space xform.
+ return SkColorSpace::MakeSRGBLinear();
+ }
+ SkUNREACHABLE;
+}
+
+typedef SkPMColor4f (*ConvertColorProc)(SkPMColor4f);
+
+static SkPMColor4f srgb_to_hsl(SkPMColor4f rgb) {
+ float mx = std::max({rgb.fR, rgb.fG, rgb.fB});
+ float mn = std::min({rgb.fR, rgb.fG, rgb.fB});
+ float hue = 0, sat = 0, light = (mn + mx) / 2;
+ float d = mx - mn;
+
+ if (d != 0) {
+ sat = (light == 0 || light == 1) ? 0 : (mx - light) / std::min(light, 1 - light);
+ if (mx == rgb.fR) {
+ hue = (rgb.fG - rgb.fB) / d + (rgb.fG < rgb.fB ? 6 : 0);
+ } else if (mx == rgb.fG) {
+ hue = (rgb.fB - rgb.fR) / d + 2;
+ } else {
+ hue = (rgb.fR - rgb.fG) / d + 4;
+ }
+
+ hue *= 60;
+ }
+ return { hue, sat * 100, light * 100, rgb.fA };
+}
+
+static SkPMColor4f srgb_to_hwb(SkPMColor4f rgb) {
+ SkPMColor4f hsl = srgb_to_hsl(rgb);
+ float white = std::min({rgb.fR, rgb.fG, rgb.fB});
+ float black = 1 - std::max({rgb.fR, rgb.fG, rgb.fB});
+ return { hsl.fR, white * 100, black * 100, rgb.fA };
+}
+
+static SkPMColor4f xyzd50_to_lab(SkPMColor4f xyz) {
+ constexpr float D50[3] = { 0.3457f / 0.3585f, 1.0f, (1.0f - 0.3457f - 0.3585f) / 0.3585f };
+
+ constexpr float e = 216.0f / 24389;
+ constexpr float k = 24389.0f / 27;
+
+ SkPMColor4f f;
+ for (int i = 0; i < 3; ++i) {
+ float v = xyz[i] / D50[i];
+ f[i] = (v > e) ? std::cbrtf(v) : (k * v + 16) / 116;
+ }
+
+ return { (116 * f[1]) - 16, 500 * (f[0] - f[1]), 200 * (f[1] - f[2]), xyz.fA };
+}
+
+// The color space is technically LCH, but we produce HCL, so that all polar spaces have hue in the
+// first component. This simplifies the hue handling for HueMethod and premul/unpremul.
+static SkPMColor4f xyzd50_to_hcl(SkPMColor4f xyz) {
+ SkPMColor4f Lab = xyzd50_to_lab(xyz);
+ float hue = sk_float_radians_to_degrees(atan2f(Lab[2], Lab[1]));
+ return {hue >= 0 ? hue : hue + 360,
+ sqrtf(Lab[1] * Lab[1] + Lab[2] * Lab[2]),
+ Lab[0],
+ xyz.fA};
+}
+
+// https://bottosson.github.io/posts/oklab/#converting-from-linear-srgb-to-oklab
+static SkPMColor4f lin_srgb_to_oklab(SkPMColor4f rgb) {
+ float l = 0.4122214708f * rgb.fR + 0.5363325363f * rgb.fG + 0.0514459929f * rgb.fB;
+ float m = 0.2119034982f * rgb.fR + 0.6806995451f * rgb.fG + 0.1073969566f * rgb.fB;
+ float s = 0.0883024619f * rgb.fR + 0.2817188376f * rgb.fG + 0.6299787005f * rgb.fB;
+ l = std::cbrtf(l);
+ m = std::cbrtf(m);
+ s = std::cbrtf(s);
+ return {
+ 0.2104542553f*l + 0.7936177850f*m - 0.0040720468f*s,
+ 1.9779984951f*l - 2.4285922050f*m + 0.4505937099f*s,
+ 0.0259040371f*l + 0.7827717662f*m - 0.8086757660f*s,
+ rgb.fA
+ };
+}
+
+// The color space is technically OkLCH, but we produce HCL, so that all polar spaces have hue in
+// the first component. This simplifies the hue handling for HueMethod and premul/unpremul.
+static SkPMColor4f lin_srgb_to_okhcl(SkPMColor4f rgb) {
+ SkPMColor4f OKLab = lin_srgb_to_oklab(rgb);
+ float hue = sk_float_radians_to_degrees(atan2f(OKLab[2], OKLab[1]));
+ return {hue >= 0 ? hue : hue + 360,
+ sqrtf(OKLab[1] * OKLab[1] + OKLab[2] * OKLab[2]),
+ OKLab[0],
+ rgb.fA};
+}
+
+static SkPMColor4f premul_polar(SkPMColor4f hsl) {
+ return { hsl.fR, hsl.fG * hsl.fA, hsl.fB * hsl.fA, hsl.fA };
+}
+
+static SkPMColor4f premul_rgb(SkPMColor4f rgb) {
+ return { rgb.fR * rgb.fA, rgb.fG * rgb.fA, rgb.fB * rgb.fA, rgb.fA };
+}
+
+static bool color_space_is_polar(SkGradientShader::Interpolation::ColorSpace cs) {
+ using ColorSpace = SkGradientShader::Interpolation::ColorSpace;
+ switch (cs) {
+ case ColorSpace::kLCH:
+ case ColorSpace::kOKLCH:
+ case ColorSpace::kHSL:
+ case ColorSpace::kHWB:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Given `colors` in `src` color space, an interpolation space, and a `dst` color space,
+// we are doing several things. First, some definitions:
+//
+// The interpolation color space is "special" if it can't be represented as an SkColorSpace. This
+// applies to any color space that isn't an RGB space, like Lab or HSL. These need special handling
+// because we have to run bespoke code to do the conversion (before interpolation here, and after
+// interpolation in the backend shader/pipeline).
+//
+// The interpolation color space is "polar" if it involves hue (HSL, HWB, LCH, Oklch). These need
+// special handling, becuase hue is never premultiplied, and because HueMethod comes into play.
+//
+// 1) Pick an `intermediate` SkColorSpace. If the interpolation color space is not "special",
+// (kDestination, kSRGB, etc... ), then `intermediate` is exact. Otherwise, `intermediate` is the
+// RGB space that prepares us to do the final conversion. For example, conversion to Lab starts
+// with XYZD50, so `intermediate` will be XYZD50 if we're actually interpolating in Lab.
+// 2) Transform all colors to the `intermediate` color space, leaving them unpremultiplied.
+// 3) If the interpolation color space is "special", transform the colors to that space.
+// 4) If the interpolation color space is "polar", adjust the angles to respect HueMethod.
+// 5) If premul interpolation is requested, apply that. For "polar" interpolated colors, don't
+// premultiply hue, only the other two channels. Note that there are four polar spaces.
+// Two have hue as the first component, and two have it as the third component. To reduce
+// complexity, we always store hue in the first component, swapping it with luminance for
+// LCH and Oklch. The backend code (eg, shaders) needs to know about this.
+SkColor4fXformer::SkColor4fXformer(const SkGradientShaderBase* shader, SkColorSpace* dst) {
+ using ColorSpace = SkGradientShader::Interpolation::ColorSpace;
+ using HueMethod = SkGradientShader::Interpolation::HueMethod;
+
+ const int colorCount = shader->fColorCount;
+ const SkGradientShader::Interpolation interpolation = shader->fInterpolation;
+
+ // 1) Determine the color space of our intermediate colors
+ fIntermediateColorSpace = intermediate_color_space(interpolation.fColorSpace, dst);
+
+ // 2) Convert all colors to the intermediate color space
+ auto info = SkImageInfo::Make(colorCount, 1, kRGBA_F32_SkColorType, kUnpremul_SkAlphaType);
+
+ auto dstInfo = info.makeColorSpace(fIntermediateColorSpace);
+ auto srcInfo = info.makeColorSpace(shader->fColorSpace);
+
+ fColors.reset(colorCount);
+ SkAssertResult(SkConvertPixels(dstInfo, fColors.begin(), info.minRowBytes(),
+ srcInfo, shader->fColors, info.minRowBytes()));
+
+ // 3) Transform to the interpolation color space (if it's special)
+ ConvertColorProc convertFn = nullptr;
+ switch (interpolation.fColorSpace) {
+ case ColorSpace::kHSL: convertFn = srgb_to_hsl; break;
+ case ColorSpace::kHWB: convertFn = srgb_to_hwb; break;
+ case ColorSpace::kLab: convertFn = xyzd50_to_lab; break;
+ case ColorSpace::kLCH: convertFn = xyzd50_to_hcl; break;
+ case ColorSpace::kOKLab: convertFn = lin_srgb_to_oklab; break;
+ case ColorSpace::kOKLCH: convertFn = lin_srgb_to_okhcl; break;
+ default: break;
+ }
+
+ if (convertFn) {
+ for (int i = 0; i < colorCount; ++i) {
+ fColors[i] = convertFn(fColors[i]);
+ }
+ }
+
+ // 4) For polar colors, adjust hue values to respect the hue method. We're using a trick here...
+ // The specification looks at adjacent colors, and adjusts one or the other. Because we store
+ // the stops in uniforms (and our backend conversions normalize the hue angle), we can
+ // instead always apply the adjustment to the *second* color. That lets us keep a running
+ // total, and do a single pass across all the colors to respect the requested hue method,
+ // without needing to do any extra work per-pixel.
+ if (color_space_is_polar(interpolation.fColorSpace)) {
+ float delta = 0;
+ for (int i = 0; i < colorCount - 1; ++i) {
+ float h1 = fColors[i].fR;
+ float& h2 = fColors[i+1].fR;
+ h2 += delta;
+ switch (interpolation.fHueMethod) {
+ case HueMethod::kShorter:
+ if (h2 - h1 > 180) {
+ h2 -= 360; // i.e. h1 += 360
+ delta -= 360;
+ } else if (h2 - h1 < -180) {
+ h2 += 360;
+ delta += 360;
+ }
+ break;
+ case HueMethod::kLonger:
+ if ((i == 0 && shader->fFirstStopIsImplicit) ||
+ (i == colorCount - 2 && shader->fLastStopIsImplicit)) {
+ // Do nothing. We don't want to introduce a full revolution for these stops
+ // Full rationale at skbug.com/13941
+ } else if (0 < h2 - h1 && h2 - h1 < 180) {
+ h2 -= 360; // i.e. h1 += 360
+ delta -= 360;
+ } else if (-180 < h2 - h1 && h2 - h1 <= 0) {
+ h2 += 360;
+ delta += 360;
+ }
+ break;
+ case HueMethod::kIncreasing:
+ if (h2 < h1) {
+ h2 += 360;
+ delta += 360;
+ }
+ break;
+ case HueMethod::kDecreasing:
+ if (h1 < h2) {
+ h2 -= 360; // i.e. h1 += 360;
+ delta -= 360;
+ }
+ break;
+ }
+ }
+ }
+
+ // 5) Apply premultiplication
+ ConvertColorProc premulFn = nullptr;
+ if (static_cast<bool>(interpolation.fInPremul)) {
+ switch (interpolation.fColorSpace) {
+ case ColorSpace::kHSL:
+ case ColorSpace::kHWB:
+ case ColorSpace::kLCH:
+ case ColorSpace::kOKLCH: premulFn = premul_polar; break;
+ default: premulFn = premul_rgb; break;
+ }
+ }
+
+ if (premulFn) {
+ for (int i = 0; i < colorCount; ++i) {
+ fColors[i] = premulFn(fColors[i]);
+ }
+ }
+}
+
+SkColorConverter::SkColorConverter(const SkColor* colors, int count) {
+ const float ONE_OVER_255 = 1.f / 255;
+ for (int i = 0; i < count; ++i) {
+ fColors4f.push_back({ SkColorGetR(colors[i]) * ONE_OVER_255,
+ SkColorGetG(colors[i]) * ONE_OVER_255,
+ SkColorGetB(colors[i]) * ONE_OVER_255,
+ SkColorGetA(colors[i]) * ONE_OVER_255 });
+ }
+}
+
+void SkGradientShaderBase::commonAsAGradient(GradientInfo* info) const {
+ if (info) {
+ if (info->fColorCount >= fColorCount) {
+ if (info->fColors) {
+ for (int i = 0; i < fColorCount; ++i) {
+ info->fColors[i] = this->getLegacyColor(i);
+ }
+ }
+ if (info->fColorOffsets) {
+ for (int i = 0; i < fColorCount; ++i) {
+ info->fColorOffsets[i] = this->getPos(i);
+ }
+ }
+ }
+ info->fColorCount = fColorCount;
+ info->fTileMode = fTileMode;
+
+ info->fGradientFlags =
+ this->interpolateInPremul() ? SkGradientShader::kInterpolateColorsInPremul_Flag : 0;
+ }
+}
+
+// Return true if these parameters are valid/legal/safe to construct a gradient
+//
+bool SkGradientShaderBase::ValidGradient(const SkColor4f colors[], int count, SkTileMode tileMode,
+ const Interpolation& interpolation) {
+ return nullptr != colors && count >= 1 && (unsigned)tileMode < kSkTileModeCount &&
+ (unsigned)interpolation.fColorSpace < Interpolation::kColorSpaceCount &&
+ (unsigned)interpolation.fHueMethod < Interpolation::kHueMethodCount;
+}
+
+SkGradientShaderBase::Descriptor::Descriptor(const SkColor4f colors[],
+ sk_sp<SkColorSpace> colorSpace,
+ const SkScalar positions[],
+ int colorCount,
+ SkTileMode mode,
+ const Interpolation& interpolation)
+ : fColors(colors)
+ , fColorSpace(std::move(colorSpace))
+ , fPositions(positions)
+ , fColorCount(colorCount)
+ , fTileMode(mode)
+ , fInterpolation(interpolation) {
+ SkASSERT(fColorCount > 1);
+}
+
+static SkColor4f average_gradient_color(const SkColor4f colors[], const SkScalar pos[],
+ int colorCount) {
+ // The gradient is a piecewise linear interpolation between colors. For a given interval,
+ // the integral between the two endpoints is 0.5 * (ci + cj) * (pj - pi), which provides that
+ // intervals average color. The overall average color is thus the sum of each piece. The thing
+ // to keep in mind is that the provided gradient definition may implicitly use p=0 and p=1.
+ skvx::float4 blend(0.0f);
+ for (int i = 0; i < colorCount - 1; ++i) {
+ // Calculate the average color for the interval between pos(i) and pos(i+1)
+ auto c0 = skvx::float4::Load(&colors[i]);
+ auto c1 = skvx::float4::Load(&colors[i + 1]);
+
+ // when pos == null, there are colorCount uniformly distributed stops, going from 0 to 1,
+ // so pos[i + 1] - pos[i] = 1/(colorCount-1)
+ SkScalar w;
+ if (pos) {
+ // Match position fixing in SkGradientShader's constructor, clamping positions outside
+ // [0, 1] and forcing the sequence to be monotonic
+ SkScalar p0 = SkTPin(pos[i], 0.f, 1.f);
+ SkScalar p1 = SkTPin(pos[i + 1], p0, 1.f);
+ w = p1 - p0;
+
+ // And account for any implicit intervals at the start or end of the positions
+ if (i == 0) {
+ if (p0 > 0.0f) {
+ // The first color is fixed between p = 0 to pos[0], so 0.5*(ci + cj)*(pj - pi)
+ // becomes 0.5*(c + c)*(pj - 0) = c * pj
+ auto c = skvx::float4::Load(&colors[0]);
+ blend += p0 * c;
+ }
+ }
+ if (i == colorCount - 2) {
+ if (p1 < 1.f) {
+ // The last color is fixed between pos[n-1] to p = 1, so 0.5*(ci + cj)*(pj - pi)
+ // becomes 0.5*(c + c)*(1 - pi) = c * (1 - pi)
+ auto c = skvx::float4::Load(&colors[colorCount - 1]);
+ blend += (1.f - p1) * c;
+ }
+ }
+ } else {
+ w = 1.f / (colorCount - 1);
+ }
+
+ blend += 0.5f * w * (c1 + c0);
+ }
+
+ SkColor4f avg;
+ blend.store(&avg);
+ return avg;
+}
+
+// Except for special circumstances of clamped gradients, every gradient shape--when degenerate--
+// can be mapped to the same fallbacks. The specific shape factories must account for special
+// clamped conditions separately, this will always return the last color for clamped gradients.
+sk_sp<SkShader> SkGradientShaderBase::MakeDegenerateGradient(const SkColor4f colors[],
+ const SkScalar pos[],
+ int colorCount,
+ sk_sp<SkColorSpace> colorSpace,
+ SkTileMode mode) {
+ switch(mode) {
+ case SkTileMode::kDecal:
+ // normally this would reject the area outside of the interpolation region, so since
+ // inside region is empty when the radii are equal, the entire draw region is empty
+ return SkShaders::Empty();
+ case SkTileMode::kRepeat:
+ case SkTileMode::kMirror:
+ // repeat and mirror are treated the same: the border colors are never visible,
+ // but approximate the final color as infinite repetitions of the colors, so
+ // it can be represented as the average color of the gradient.
+ return SkShaders::Color(
+ average_gradient_color(colors, pos, colorCount), std::move(colorSpace));
+ case SkTileMode::kClamp:
+ // Depending on how the gradient shape degenerates, there may be a more specialized
+ // fallback representation for the factories to use, but this is a reasonable default.
+ return SkShaders::Color(colors[colorCount - 1], std::move(colorSpace));
+ }
+ SkDEBUGFAIL("Should not be reached");
+ return nullptr;
+}
+
+SkGradientShaderBase::ColorStopOptimizer::ColorStopOptimizer(const SkColor4f* colors,
+ const SkScalar* pos,
+ int count,
+ SkTileMode mode)
+ : fColors(colors)
+ , fPos(pos)
+ , fCount(count) {
+
+ if (!pos || count != 3) {
+ return;
+ }
+
+ if (SkScalarNearlyEqual(pos[0], 0.0f) &&
+ SkScalarNearlyEqual(pos[1], 0.0f) &&
+ SkScalarNearlyEqual(pos[2], 1.0f)) {
+
+ if (SkTileMode::kRepeat == mode || SkTileMode::kMirror == mode ||
+ colors[0] == colors[1]) {
+
+ // Ignore the leftmost color/pos.
+ fColors += 1;
+ fPos += 1;
+ fCount = 2;
+ }
+ } else if (SkScalarNearlyEqual(pos[0], 0.0f) &&
+ SkScalarNearlyEqual(pos[1], 1.0f) &&
+ SkScalarNearlyEqual(pos[2], 1.0f)) {
+
+ if (SkTileMode::kRepeat == mode || SkTileMode::kMirror == mode ||
+ colors[1] == colors[2]) {
+
+ // Ignore the rightmost color/pos.
+ fCount = 2;
+ }
+ }
+}
+
+#if defined(SK_GRAPHITE)
+// Please see GrGradientShader.cpp::make_interpolated_to_dst for substantial comments
+// as to why this code is structured this way.
+void SkGradientShaderBase::MakeInterpolatedToDst(
+ const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer,
+ const skgpu::graphite::GradientShaderBlocks::GradientData& gradData,
+ const SkGradientShaderBase::Interpolation& interp,
+ SkColorSpace* intermediateCS) {
+ using ColorSpace = SkGradientShader::Interpolation::ColorSpace;
+ using namespace skgpu::graphite;
+
+ bool inputPremul = static_cast<bool>(interp.fInPremul);
+
+ switch (interp.fColorSpace) {
+ case ColorSpace::kLab:
+ case ColorSpace::kOKLab:
+ case ColorSpace::kLCH:
+ case ColorSpace::kOKLCH:
+ case ColorSpace::kHSL:
+ case ColorSpace::kHWB:
+ inputPremul = false;
+ break;
+ default:
+ break;
+ }
+
+ const SkColorInfo& dstColorInfo = keyContext.dstColorInfo();
+
+ SkColorSpace* dstColorSpace = dstColorInfo.colorSpace() ? dstColorInfo.colorSpace()
+ : sk_srgb_singleton();
+
+ SkAlphaType intermediateAlphaType = inputPremul ? kPremul_SkAlphaType
+ : kUnpremul_SkAlphaType;
+
+ ColorSpaceTransformBlock::ColorSpaceTransformData data(intermediateCS, intermediateAlphaType,
+ dstColorSpace, dstColorInfo.alphaType());
+
+ // The gradient block and colorSpace conversion block need to be combined together
+ // (via the colorFilterShader block) so that the localMatrix block can treat them as
+ // one child.
+ ColorFilterShaderBlock::BeginBlock(keyContext, builder, gatherer);
+
+ GradientShaderBlocks::BeginBlock(keyContext, builder, gatherer, gradData);
+ builder->endBlock();
+
+ ColorSpaceTransformBlock::BeginBlock(keyContext, builder, gatherer, &data);
+ builder->endBlock();
+
+ builder->endBlock();
+}
+#endif
diff --git a/gfx/skia/skia/src/shaders/gradients/SkGradientShaderBase.h b/gfx/skia/skia/src/shaders/gradients/SkGradientShaderBase.h
new file mode 100644
index 0000000000..fc677cfaed
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/SkGradientShaderBase.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGradientShaderPriv_DEFINED
+#define SkGradientShaderPriv_DEFINED
+
+#include "include/effects/SkGradientShader.h"
+
+#include "include/core/SkMatrix.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/core/SkVM.h"
+#include "src/shaders/SkShaderBase.h"
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/KeyHelpers.h"
+#endif
+
+class SkArenaAlloc;
+class SkColorSpace;
+class SkRasterPipeline;
+class SkReadBuffer;
+class SkWriteBuffer;
+
+class SkGradientShaderBase : public SkShaderBase {
+public:
+ using Interpolation = SkGradientShader::Interpolation;
+
+ struct Descriptor {
+ Descriptor();
+ ~Descriptor();
+
+ Descriptor(const SkColor4f colors[],
+ sk_sp<SkColorSpace> colorSpace,
+ const SkScalar positions[],
+ int colorCount,
+ SkTileMode mode,
+ const Interpolation& interpolation);
+
+ const SkColor4f* fColors;
+ sk_sp<SkColorSpace> fColorSpace;
+ const SkScalar* fPositions;
+ int fColorCount; // length of fColors (and fPositions, if not nullptr)
+ SkTileMode fTileMode;
+ Interpolation fInterpolation;
+ };
+
+ class DescriptorScope : public Descriptor {
+ public:
+ DescriptorScope() {}
+
+ bool unflatten(SkReadBuffer&, SkMatrix* legacyLocalMatrix);
+
+ private:
+ SkSTArray<16, SkColor4f, true> fColorStorage;
+ SkSTArray<16, SkScalar , true> fPositionStorage;
+ };
+
+ SkGradientShaderBase(const Descriptor& desc, const SkMatrix& ptsToUnit);
+ ~SkGradientShaderBase() override;
+
+ bool isOpaque() const override;
+
+ bool interpolateInPremul() const {
+ return fInterpolation.fInPremul == SkGradientShader::Interpolation::InPremul::kYes;
+ }
+
+ const SkMatrix& getGradientMatrix() const { return fPtsToUnit; }
+
+ static bool ValidGradient(const SkColor4f colors[], int count, SkTileMode tileMode,
+ const Interpolation& interpolation);
+
+ static sk_sp<SkShader> MakeDegenerateGradient(const SkColor4f colors[], const SkScalar pos[],
+ int colorCount, sk_sp<SkColorSpace> colorSpace,
+ SkTileMode mode);
+
+ struct ColorStopOptimizer {
+ ColorStopOptimizer(const SkColor4f* colors, const SkScalar* pos, int count,
+ SkTileMode mode);
+
+ const SkColor4f* fColors;
+ const SkScalar* fPos;
+ int fCount;
+ };
+
+ // The default SkScalarNearlyZero threshold of .0024 is too big and causes regressions for svg
+ // gradients defined in the wild.
+ static constexpr SkScalar kDegenerateThreshold = SK_Scalar1 / (1 << 15);
+
+protected:
+ void flatten(SkWriteBuffer&) const override;
+
+ void commonAsAGradient(GradientInfo*) const;
+
+ bool onAsLuminanceColor(SkColor*) const override;
+
+ bool appendStages(const SkStageRec&, const MatrixRec&) const override;
+
+ skvm::Color program(skvm::Builder*,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color paint,
+ const MatrixRec&,
+ const SkColorInfo& dstCS,
+ skvm::Uniforms* uniforms,
+ SkArenaAlloc* alloc) const override;
+
+ virtual void appendGradientStages(SkArenaAlloc* alloc, SkRasterPipeline* tPipeline,
+ SkRasterPipeline* postPipeline) const = 0;
+
+ // Produce t from (x,y), modifying mask if it should be anything other than ~0.
+ virtual skvm::F32 transformT(skvm::Builder*, skvm::Uniforms*,
+ skvm::Coord coord, skvm::I32* mask) const = 0;
+
+ const SkMatrix fPtsToUnit;
+ SkTileMode fTileMode;
+
+#if defined(SK_GRAPHITE)
+ static void MakeInterpolatedToDst(const skgpu::graphite::KeyContext&,
+ skgpu::graphite::PaintParamsKeyBuilder*,
+ skgpu::graphite::PipelineDataGatherer*,
+ const skgpu::graphite::GradientShaderBlocks::GradientData&,
+ const SkGradientShaderBase::Interpolation&,
+ SkColorSpace* intermediateCS);
+#endif
+
+public:
+ static void AppendGradientFillStages(SkRasterPipeline* p,
+ SkArenaAlloc* alloc,
+ const SkPMColor4f* colors,
+ const SkScalar* positions,
+ int count);
+
+ SkScalar getPos(int i) const {
+ SkASSERT(i < fColorCount);
+ return fPositions ? fPositions[i] : SkIntToScalar(i) / (fColorCount - 1);
+ }
+
+ SkColor getLegacyColor(int i) const {
+ SkASSERT(i < fColorCount);
+ return fColors[i].toSkColor();
+ }
+
+ SkColor4f* fColors; // points into fStorage
+ SkScalar* fPositions; // points into fStorage, or nullptr
+ int fColorCount; // length of fColors (and fPositions, if not nullptr)
+ sk_sp<SkColorSpace> fColorSpace; // color space of gradient stops
+ Interpolation fInterpolation;
+ bool fFirstStopIsImplicit;
+ bool fLastStopIsImplicit;
+
+ bool colorsAreOpaque() const { return fColorsAreOpaque; }
+
+ SkTileMode getTileMode() const { return fTileMode; }
+
+private:
+ // Reserve inline space for up to 4 stops.
+ inline static constexpr size_t kInlineStopCount = 4;
+ inline static constexpr size_t kInlineStorageSize = (sizeof(SkColor4f) + sizeof(SkScalar))
+ * kInlineStopCount;
+ skia_private::AutoSTMalloc<kInlineStorageSize, uint8_t> fStorage;
+
+ bool fColorsAreOpaque;
+
+ using INHERITED = SkShaderBase;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+struct SkColor4fXformer {
+ SkColor4fXformer(const SkGradientShaderBase* shader, SkColorSpace* dst);
+
+ SkSTArray<4, SkPMColor4f, true> fColors;
+ sk_sp<SkColorSpace> fIntermediateColorSpace;
+};
+
+struct SkColorConverter {
+ SkColorConverter(const SkColor* colors, int count);
+
+ SkSTArray<2, SkColor4f, true> fColors4f;
+};
+
+void SkRegisterLinearGradientShaderFlattenable();
+void SkRegisterRadialGradientShaderFlattenable();
+void SkRegisterSweepGradientShaderFlattenable();
+void SkRegisterTwoPointConicalGradientShaderFlattenable();
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/gradients/SkLinearGradient.cpp b/gfx/skia/skia/src/shaders/gradients/SkLinearGradient.cpp
new file mode 100644
index 0000000000..a7144555fb
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/SkLinearGradient.cpp
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/shaders/gradients/SkLinearGradient.h"
+
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/SkLocalMatrixShader.h"
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/KeyContext.h"
+#include "src/gpu/graphite/KeyHelpers.h"
+#include "src/gpu/graphite/PaintParamsKey.h"
+#endif
+
+static SkMatrix pts_to_unit_matrix(const SkPoint pts[2]) {
+ SkVector vec = pts[1] - pts[0];
+ SkScalar mag = vec.length();
+ SkScalar inv = mag ? SkScalarInvert(mag) : 0;
+
+ vec.scale(inv);
+ SkMatrix matrix;
+ matrix.setSinCos(-vec.fY, vec.fX, pts[0].fX, pts[0].fY);
+ matrix.postTranslate(-pts[0].fX, -pts[0].fY);
+ matrix.postScale(inv, inv);
+ return matrix;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkLinearGradient::SkLinearGradient(const SkPoint pts[2], const Descriptor& desc)
+ : SkGradientShaderBase(desc, pts_to_unit_matrix(pts))
+ , fStart(pts[0])
+ , fEnd(pts[1]) {
+}
+
+sk_sp<SkFlattenable> SkLinearGradient::CreateProc(SkReadBuffer& buffer) {
+ DescriptorScope desc;
+ SkMatrix legacyLocalMatrix;
+ if (!desc.unflatten(buffer, &legacyLocalMatrix)) {
+ return nullptr;
+ }
+ SkPoint pts[2];
+ pts[0] = buffer.readPoint();
+ pts[1] = buffer.readPoint();
+ return SkGradientShader::MakeLinear(pts,
+ desc.fColors,
+ std::move(desc.fColorSpace),
+ desc.fPositions,
+ desc.fColorCount,
+ desc.fTileMode,
+ desc.fInterpolation,
+ &legacyLocalMatrix);
+}
+
+void SkLinearGradient::flatten(SkWriteBuffer& buffer) const {
+ this->INHERITED::flatten(buffer);
+ buffer.writePoint(fStart);
+ buffer.writePoint(fEnd);
+}
+
+void SkLinearGradient::appendGradientStages(SkArenaAlloc*, SkRasterPipeline*,
+ SkRasterPipeline*) const {
+ // No extra stage needed for linear gradients.
+}
+
+skvm::F32 SkLinearGradient::transformT(skvm::Builder* p, skvm::Uniforms*,
+ skvm::Coord coord, skvm::I32* mask) const {
+ // We've baked getting t in x into the matrix, so this is pretty trivial.
+ return coord.x;
+}
+
+SkShaderBase::GradientType SkLinearGradient::asGradient(GradientInfo* info,
+ SkMatrix* localMatrix) const {
+ if (info) {
+ commonAsAGradient(info);
+ info->fPoint[0] = fStart;
+ info->fPoint[1] = fEnd;
+ }
+ if (localMatrix) {
+ *localMatrix = SkMatrix::I();
+ }
+ return GradientType::kLinear;
+}
+
+/////////////////////////////////////////////////////////////////////
+
+#if defined(SK_GANESH)
+
+#include "src/gpu/ganesh/gradients/GrGradientShader.h"
+
+std::unique_ptr<GrFragmentProcessor> SkLinearGradient::asFragmentProcessor(
+ const GrFPArgs& args, const MatrixRec& mRec) const {
+ return GrGradientShader::MakeLinear(*this, args, mRec);
+}
+
+#endif
+
+#if defined(SK_GRAPHITE)
+void SkLinearGradient::addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const {
+ using namespace skgpu::graphite;
+
+ SkColor4fXformer xformedColors(this, keyContext.dstColorInfo().colorSpace());
+ const SkPMColor4f* colors = xformedColors.fColors.begin();
+
+ GradientShaderBlocks::GradientData data(GradientType::kLinear,
+ fStart, fEnd,
+ 0.0f, 0.0f,
+ 0.0f, 0.0f,
+ fTileMode,
+ fColorCount,
+ colors,
+ fPositions,
+ fInterpolation);
+
+ MakeInterpolatedToDst(keyContext, builder, gatherer,
+ data, fInterpolation,
+ xformedColors.fIntermediateColorSpace.get());
+}
+#endif
+
+sk_sp<SkShader> SkGradientShader::MakeLinear(const SkPoint pts[2],
+ const SkColor4f colors[],
+ sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[],
+ int colorCount,
+ SkTileMode mode,
+ const Interpolation& interpolation,
+ const SkMatrix* localMatrix) {
+ if (!pts || !SkScalarIsFinite((pts[1] - pts[0]).length())) {
+ return nullptr;
+ }
+ if (!SkGradientShaderBase::ValidGradient(colors, colorCount, mode, interpolation)) {
+ return nullptr;
+ }
+ if (1 == colorCount) {
+ return SkShaders::Color(colors[0], std::move(colorSpace));
+ }
+ if (localMatrix && !localMatrix->invert(nullptr)) {
+ return nullptr;
+ }
+
+ if (SkScalarNearlyZero((pts[1] - pts[0]).length(),
+ SkGradientShaderBase::kDegenerateThreshold)) {
+ // Degenerate gradient, the only tricky complication is when in clamp mode, the limit of
+ // the gradient approaches two half planes of solid color (first and last). However, they
+ // are divided by the line perpendicular to the start and end point, which becomes undefined
+ // once start and end are exactly the same, so just use the end color for a stable solution.
+ return SkGradientShaderBase::MakeDegenerateGradient(colors, pos, colorCount,
+ std::move(colorSpace), mode);
+ }
+
+ SkGradientShaderBase::ColorStopOptimizer opt(colors, pos, colorCount, mode);
+
+ SkGradientShaderBase::Descriptor desc(opt.fColors, std::move(colorSpace), opt.fPos,
+ opt.fCount, mode, interpolation);
+ return SkLocalMatrixShader::MakeWrapped<SkLinearGradient>(localMatrix, pts, desc);
+}
+
+sk_sp<SkShader> SkGradientShader::MakeLinear(const SkPoint pts[2],
+ const SkColor colors[],
+ const SkScalar pos[],
+ int colorCount,
+ SkTileMode mode,
+ uint32_t flags,
+ const SkMatrix* localMatrix) {
+ SkColorConverter converter(colors, colorCount);
+ return MakeLinear(pts, converter.fColors4f.begin(), nullptr, pos, colorCount, mode, flags,
+ localMatrix);
+}
+
+void SkRegisterLinearGradientShaderFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkLinearGradient);
+}
diff --git a/gfx/skia/skia/src/shaders/gradients/SkLinearGradient.h b/gfx/skia/skia/src/shaders/gradients/SkLinearGradient.h
new file mode 100644
index 0000000000..48a340ed1c
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/SkLinearGradient.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkLinearGradient_DEFINED
+#define SkLinearGradient_DEFINED
+
+#include "src/shaders/gradients/SkGradientShaderBase.h"
+
+class SkLinearGradient final : public SkGradientShaderBase {
+public:
+ SkLinearGradient(const SkPoint pts[2], const Descriptor&);
+
+ GradientType asGradient(GradientInfo* info, SkMatrix* localMatrix) const override;
+#if defined(SK_GANESH)
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&,
+ const MatrixRec&) const override;
+#endif
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext&,
+ skgpu::graphite::PaintParamsKeyBuilder*,
+ skgpu::graphite::PipelineDataGatherer*) const override;
+#endif
+
+protected:
+ SkLinearGradient(SkReadBuffer& buffer);
+ void flatten(SkWriteBuffer& buffer) const override;
+
+ void appendGradientStages(SkArenaAlloc* alloc, SkRasterPipeline* tPipeline,
+ SkRasterPipeline* postPipeline) const final;
+
+ skvm::F32 transformT(skvm::Builder*, skvm::Uniforms*,
+ skvm::Coord coord, skvm::I32* mask) const final;
+
+private:
+ friend void ::SkRegisterLinearGradientShaderFlattenable();
+ SK_FLATTENABLE_HOOKS(SkLinearGradient)
+
+ class LinearGradient4fContext;
+
+ friend class SkGradientShader;
+ using INHERITED = SkGradientShaderBase;
+ const SkPoint fStart;
+ const SkPoint fEnd;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/shaders/gradients/SkRadialGradient.cpp b/gfx/skia/skia/src/shaders/gradients/SkRadialGradient.cpp
new file mode 100644
index 0000000000..89760ac072
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/SkRadialGradient.cpp
@@ -0,0 +1,218 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/SkLocalMatrixShader.h"
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/KeyContext.h"
+#include "src/gpu/graphite/KeyHelpers.h"
+#include "src/gpu/graphite/PaintParamsKey.h"
+#endif
+
+#include "src/shaders/gradients/SkGradientShaderBase.h"
+
+namespace {
+
+SkMatrix rad_to_unit_matrix(const SkPoint& center, SkScalar radius) {
+ SkScalar inv = SkScalarInvert(radius);
+
+ SkMatrix matrix;
+ matrix.setTranslate(-center.fX, -center.fY);
+ matrix.postScale(inv, inv);
+ return matrix;
+}
+
+} // namespace
+
+/////////////////////////////////////////////////////////////////////
+class SkRadialGradient final : public SkGradientShaderBase {
+public:
+ SkRadialGradient(const SkPoint& center, SkScalar radius, const Descriptor&);
+
+ GradientType asGradient(GradientInfo* info, SkMatrix* matrix) const override;
+#if defined(SK_GANESH)
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&,
+ const MatrixRec&) const override;
+#endif
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext&,
+ skgpu::graphite::PaintParamsKeyBuilder*,
+ skgpu::graphite::PipelineDataGatherer*) const override;
+#endif
+protected:
+ SkRadialGradient(SkReadBuffer& buffer);
+ void flatten(SkWriteBuffer& buffer) const override;
+
+ void appendGradientStages(SkArenaAlloc* alloc, SkRasterPipeline* tPipeline,
+ SkRasterPipeline* postPipeline) const override;
+
+ skvm::F32 transformT(skvm::Builder*, skvm::Uniforms*,
+ skvm::Coord coord, skvm::I32* mask) const final;
+
+private:
+ friend void ::SkRegisterRadialGradientShaderFlattenable();
+ SK_FLATTENABLE_HOOKS(SkRadialGradient)
+
+ const SkPoint fCenter;
+ const SkScalar fRadius;
+};
+
+SkRadialGradient::SkRadialGradient(const SkPoint& center, SkScalar radius, const Descriptor& desc)
+ : SkGradientShaderBase(desc, rad_to_unit_matrix(center, radius))
+ , fCenter(center)
+ , fRadius(radius) {
+}
+
+SkShaderBase::GradientType SkRadialGradient::asGradient(GradientInfo* info,
+ SkMatrix* localMatrix) const {
+ if (info) {
+ commonAsAGradient(info);
+ info->fPoint[0] = fCenter;
+ info->fRadius[0] = fRadius;
+ }
+ if (localMatrix) {
+ *localMatrix = SkMatrix::I();
+ }
+ return GradientType::kRadial;
+}
+
+sk_sp<SkFlattenable> SkRadialGradient::CreateProc(SkReadBuffer& buffer) {
+ DescriptorScope desc;
+ SkMatrix legacyLocalMatrix;
+ if (!desc.unflatten(buffer, &legacyLocalMatrix)) {
+ return nullptr;
+ }
+ const SkPoint center = buffer.readPoint();
+ const SkScalar radius = buffer.readScalar();
+ return SkGradientShader::MakeRadial(center,
+ radius,
+ desc.fColors,
+ std::move(desc.fColorSpace),
+ desc.fPositions,
+ desc.fColorCount,
+ desc.fTileMode,
+ desc.fInterpolation,
+ &legacyLocalMatrix);
+}
+
+void SkRadialGradient::flatten(SkWriteBuffer& buffer) const {
+ this->SkGradientShaderBase::flatten(buffer);
+ buffer.writePoint(fCenter);
+ buffer.writeScalar(fRadius);
+}
+
+void SkRadialGradient::appendGradientStages(SkArenaAlloc*, SkRasterPipeline* p,
+ SkRasterPipeline*) const {
+ p->append(SkRasterPipelineOp::xy_to_radius);
+}
+
+skvm::F32 SkRadialGradient::transformT(skvm::Builder* p, skvm::Uniforms*,
+ skvm::Coord coord, skvm::I32* mask) const {
+ return sqrt(coord.x*coord.x + coord.y*coord.y);
+}
+
+/////////////////////////////////////////////////////////////////////
+
+#if defined(SK_GANESH)
+
+#include "src/core/SkRuntimeEffectPriv.h"
+#include "src/gpu/ganesh/effects/GrSkSLFP.h"
+#include "src/gpu/ganesh/gradients/GrGradientShader.h"
+
+std::unique_ptr<GrFragmentProcessor>
+SkRadialGradient::asFragmentProcessor(const GrFPArgs& args, const MatrixRec& mRec) const {
+ static const SkRuntimeEffect* effect = SkMakeRuntimeEffect(SkRuntimeEffect::MakeForShader,
+ "half4 main(float2 coord) {"
+ "return half4(half(length(coord)), 1, 0, 0);" // y = 1 for always valid
+ "}"
+ );
+ // The radial gradient never rejects a pixel so it doesn't change opacity
+ auto fp = GrSkSLFP::Make(effect, "RadialLayout", /*inputFP=*/nullptr,
+ GrSkSLFP::OptFlags::kPreservesOpaqueInput);
+ return GrGradientShader::MakeGradientFP(*this, args, mRec, std::move(fp));
+}
+
+#endif
+
+#if defined(SK_GRAPHITE)
+void SkRadialGradient::addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const {
+ using namespace skgpu::graphite;
+
+ SkColor4fXformer xformedColors(this, keyContext.dstColorInfo().colorSpace());
+ const SkPMColor4f* colors = xformedColors.fColors.begin();
+
+ GradientShaderBlocks::GradientData data(GradientType::kRadial,
+ fCenter, { 0.0f, 0.0f },
+ fRadius, 0.0f,
+ 0.0f, 0.0f,
+ fTileMode,
+ fColorCount,
+ colors,
+ fPositions,
+ fInterpolation);
+
+ MakeInterpolatedToDst(keyContext, builder, gatherer,
+ data,
+ fInterpolation,
+ xformedColors.fIntermediateColorSpace.get());
+}
+#endif
+
+sk_sp<SkShader> SkGradientShader::MakeRadial(const SkPoint& center, SkScalar radius,
+ const SkColor4f colors[],
+ sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[],
+ int colorCount,
+ SkTileMode mode,
+ const Interpolation& interpolation,
+ const SkMatrix* localMatrix) {
+ if (radius < 0) {
+ return nullptr;
+ }
+ if (!SkGradientShaderBase::ValidGradient(colors, colorCount, mode, interpolation)) {
+ return nullptr;
+ }
+ if (1 == colorCount) {
+ return SkShaders::Color(colors[0], std::move(colorSpace));
+ }
+ if (localMatrix && !localMatrix->invert(nullptr)) {
+ return nullptr;
+ }
+
+ if (SkScalarNearlyZero(radius, SkGradientShaderBase::kDegenerateThreshold)) {
+ // Degenerate gradient optimization, and no special logic needed for clamped radial gradient
+ return SkGradientShaderBase::MakeDegenerateGradient(colors, pos, colorCount,
+ std::move(colorSpace), mode);
+ }
+
+ SkGradientShaderBase::ColorStopOptimizer opt(colors, pos, colorCount, mode);
+
+ SkGradientShaderBase::Descriptor desc(opt.fColors, std::move(colorSpace), opt.fPos,
+ opt.fCount, mode, interpolation);
+ return SkLocalMatrixShader::MakeWrapped<SkRadialGradient>(localMatrix, center, radius, desc);
+}
+
+sk_sp<SkShader> SkGradientShader::MakeRadial(const SkPoint& center, SkScalar radius,
+ const SkColor colors[],
+ const SkScalar pos[],
+ int colorCount,
+ SkTileMode mode,
+ uint32_t flags,
+ const SkMatrix* localMatrix) {
+ SkColorConverter converter(colors, colorCount);
+ return MakeRadial(center, radius, converter.fColors4f.begin(), nullptr, pos, colorCount, mode,
+ flags, localMatrix);
+}
+
+void SkRegisterRadialGradientShaderFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkRadialGradient);
+}
diff --git a/gfx/skia/skia/src/shaders/gradients/SkSweepGradient.cpp b/gfx/skia/skia/src/shaders/gradients/SkSweepGradient.cpp
new file mode 100644
index 0000000000..f45be27844
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/SkSweepGradient.cpp
@@ -0,0 +1,294 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/base/SkFloatingPoint.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/SkLocalMatrixShader.h"
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/KeyContext.h"
+#include "src/gpu/graphite/KeyHelpers.h"
+#include "src/gpu/graphite/PaintParamsKey.h"
+#endif
+
+#include "src/shaders/gradients/SkGradientShaderBase.h"
+
+class SkSweepGradient final : public SkGradientShaderBase {
+public:
+ SkSweepGradient(const SkPoint& center, SkScalar t0, SkScalar t1, const Descriptor&);
+
+ GradientType asGradient(GradientInfo* info, SkMatrix* localMatrix) const override;
+
+#if defined(SK_GANESH)
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&,
+ const MatrixRec&) const override;
+#endif
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext&,
+ skgpu::graphite::PaintParamsKeyBuilder*,
+ skgpu::graphite::PipelineDataGatherer*) const override;
+#endif
+
+protected:
+ void flatten(SkWriteBuffer& buffer) const override;
+
+ void appendGradientStages(SkArenaAlloc* alloc, SkRasterPipeline* tPipeline,
+ SkRasterPipeline* postPipeline) const override;
+
+ skvm::F32 transformT(skvm::Builder*, skvm::Uniforms*,
+ skvm::Coord coord, skvm::I32* mask) const final;
+private:
+ friend void ::SkRegisterSweepGradientShaderFlattenable();
+ SK_FLATTENABLE_HOOKS(SkSweepGradient)
+
+ const SkPoint fCenter;
+ const SkScalar fTBias;
+ const SkScalar fTScale;
+};
+
+SkSweepGradient::SkSweepGradient(const SkPoint& center, SkScalar t0, SkScalar t1,
+ const Descriptor& desc)
+ : SkGradientShaderBase(desc, SkMatrix::Translate(-center.x(), -center.y()))
+ , fCenter(center)
+ , fTBias(-t0)
+ , fTScale(1 / (t1 - t0))
+{
+ SkASSERT(t0 < t1);
+}
+
+SkShaderBase::GradientType SkSweepGradient::asGradient(GradientInfo* info,
+ SkMatrix* localMatrix) const {
+ if (info) {
+ commonAsAGradient(info);
+ info->fPoint[0] = fCenter;
+ }
+ if (localMatrix) {
+ *localMatrix = SkMatrix::I();
+ }
+ return GradientType::kSweep;
+}
+
+static std::tuple<SkScalar, SkScalar> angles_from_t_coeff(SkScalar tBias, SkScalar tScale) {
+ return std::make_tuple(-tBias * 360, (sk_ieee_float_divide(1, tScale) - tBias) * 360);
+}
+
+sk_sp<SkFlattenable> SkSweepGradient::CreateProc(SkReadBuffer& buffer) {
+ DescriptorScope desc;
+ SkMatrix legacyLocalMatrix;
+ if (!desc.unflatten(buffer, &legacyLocalMatrix)) {
+ return nullptr;
+ }
+ const SkPoint center = buffer.readPoint();
+
+ const auto tBias = buffer.readScalar(),
+ tScale = buffer.readScalar();
+ auto [startAngle, endAngle] = angles_from_t_coeff(tBias, tScale);
+
+ return SkGradientShader::MakeSweep(center.x(), center.y(),
+ desc.fColors,
+ std::move(desc.fColorSpace),
+ desc.fPositions,
+ desc.fColorCount,
+ desc.fTileMode,
+ startAngle,
+ endAngle,
+ desc.fInterpolation,
+ &legacyLocalMatrix);
+}
+
+void SkSweepGradient::flatten(SkWriteBuffer& buffer) const {
+ this->SkGradientShaderBase::flatten(buffer);
+ buffer.writePoint(fCenter);
+ buffer.writeScalar(fTBias);
+ buffer.writeScalar(fTScale);
+}
+
+void SkSweepGradient::appendGradientStages(SkArenaAlloc* alloc, SkRasterPipeline* p,
+ SkRasterPipeline*) const {
+ p->append(SkRasterPipelineOp::xy_to_unit_angle);
+ p->append_matrix(alloc, SkMatrix::Scale(fTScale, 1) * SkMatrix::Translate(fTBias, 0));
+}
+
+skvm::F32 SkSweepGradient::transformT(skvm::Builder* p, skvm::Uniforms* uniforms,
+ skvm::Coord coord, skvm::I32* mask) const {
+ skvm::F32 xabs = abs(coord.x),
+ yabs = abs(coord.y),
+ slope = min(xabs, yabs) / max(xabs, yabs);
+ skvm::F32 s = slope * slope;
+
+ // Use a 7th degree polynomial to approximate atan.
+ // This was generated using sollya.gforge.inria.fr.
+ // A float optimized polynomial was generated using the following command.
+ // P1 = fpminimax((1/(2*Pi))*atan(x),[|1,3,5,7|],[|24...|],[2^(-40),1],relative);
+ skvm::F32 phi = slope * poly(s, -7.0547382347285747528076171875e-3f,
+ +2.476101927459239959716796875e-2f,
+ -5.185396969318389892578125e-2f,
+ +0.15912117063999176025390625f);
+ phi = select( xabs < yabs, (1/4.0f) - phi, phi);
+ phi = select(coord.x < 0.0f, (1/2.0f) - phi, phi);
+ phi = select(coord.y < 0.0f, (1/1.0f) - phi, phi);
+
+ skvm::F32 t = select(is_NaN(phi), p->splat(0.0f)
+ , phi);
+
+ if (fTScale != 1.0f || fTBias != 0.0f) {
+ t = t * p->uniformF(uniforms->pushF(fTScale))
+ + p->uniformF(uniforms->pushF(fTScale*fTBias));
+ }
+ return t;
+}
+
+/////////////////////////////////////////////////////////////////////
+
+#if defined(SK_GANESH)
+
+#include "src/core/SkRuntimeEffectPriv.h"
+#include "src/gpu/ganesh/GrCaps.h"
+#include "src/gpu/ganesh/GrRecordingContextPriv.h"
+#include "src/gpu/ganesh/effects/GrSkSLFP.h"
+#include "src/gpu/ganesh/gradients/GrGradientShader.h"
+
+std::unique_ptr<GrFragmentProcessor> SkSweepGradient::asFragmentProcessor(
+ const GrFPArgs& args, const MatrixRec& mRec) const {
+ // On some devices they incorrectly implement atan2(y,x) as atan(y/x). In actuality it is
+ // atan2(y,x) = 2 * atan(y / (sqrt(x^2 + y^2) + x)). So to work around this we pass in (sqrt(x^2
+ // + y^2) + x) as the second parameter to atan2 in these cases. We let the device handle the
+ // undefined behavior of the second paramenter being 0 instead of doing the divide ourselves and
+ // using atan instead.
+ int useAtanWorkaround =
+ args.fContext->priv().caps()->shaderCaps()->fAtan2ImplementedAsAtanYOverX;
+ static const SkRuntimeEffect* effect = SkMakeRuntimeEffect(SkRuntimeEffect::MakeForShader,
+ "uniform half bias;"
+ "uniform half scale;"
+ "uniform int useAtanWorkaround;" // specialized
+
+ "half4 main(float2 coord) {"
+ "half angle = bool(useAtanWorkaround)"
+ "? half(2 * atan(-coord.y, length(coord) - coord.x))"
+ ": half(atan(-coord.y, -coord.x));"
+
+ // 0.1591549430918 is 1/(2*pi), used since atan returns values [-pi, pi]
+ "half t = (angle * 0.1591549430918 + 0.5 + bias) * scale;"
+ "return half4(t, 1, 0, 0);" // y = 1 for always valid
+ "}"
+ );
+
+ // The sweep gradient never rejects a pixel so it doesn't change opacity
+ auto fp = GrSkSLFP::Make(effect, "SweepLayout", /*inputFP=*/nullptr,
+ GrSkSLFP::OptFlags::kPreservesOpaqueInput,
+ "bias", fTBias,
+ "scale", fTScale,
+ "useAtanWorkaround", GrSkSLFP::Specialize(useAtanWorkaround));
+ return GrGradientShader::MakeGradientFP(*this, args, mRec, std::move(fp));
+}
+
+#endif
+
+#if defined(SK_GRAPHITE)
+void SkSweepGradient::addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const {
+ using namespace skgpu::graphite;
+
+ SkColor4fXformer xformedColors(this, keyContext.dstColorInfo().colorSpace());
+ const SkPMColor4f* colors = xformedColors.fColors.begin();
+
+ GradientShaderBlocks::GradientData data(SkShaderBase::GradientType::kSweep,
+ fCenter, { 0.0f, 0.0f },
+ 0.0, 0.0f,
+ fTBias, fTScale,
+ fTileMode,
+ fColorCount,
+ colors,
+ fPositions,
+ fInterpolation);
+
+ MakeInterpolatedToDst(keyContext, builder, gatherer,
+ data, fInterpolation,
+ xformedColors.fIntermediateColorSpace.get());
+
+}
+#endif
+
+sk_sp<SkShader> SkGradientShader::MakeSweep(SkScalar cx, SkScalar cy,
+ const SkColor4f colors[],
+ sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[],
+ int colorCount,
+ SkTileMode mode,
+ SkScalar startAngle,
+ SkScalar endAngle,
+ const Interpolation& interpolation,
+ const SkMatrix* localMatrix) {
+ if (!SkGradientShaderBase::ValidGradient(colors, colorCount, mode, interpolation)) {
+ return nullptr;
+ }
+ if (1 == colorCount) {
+ return SkShaders::Color(colors[0], std::move(colorSpace));
+ }
+ if (!SkScalarIsFinite(startAngle) || !SkScalarIsFinite(endAngle) || startAngle > endAngle) {
+ return nullptr;
+ }
+ if (localMatrix && !localMatrix->invert(nullptr)) {
+ return nullptr;
+ }
+
+ if (SkScalarNearlyEqual(startAngle, endAngle, SkGradientShaderBase::kDegenerateThreshold)) {
+ // Degenerate gradient, which should follow default degenerate behavior unless it is
+ // clamped and the angle is greater than 0.
+ if (mode == SkTileMode::kClamp && endAngle > SkGradientShaderBase::kDegenerateThreshold) {
+ // In this case, the first color is repeated from 0 to the angle, then a hardstop
+ // switches to the last color (all other colors are compressed to the infinitely thin
+ // interpolation region).
+ static constexpr SkScalar clampPos[3] = {0, 1, 1};
+ SkColor4f reColors[3] = {colors[0], colors[0], colors[colorCount - 1]};
+ return MakeSweep(cx, cy, reColors, std::move(colorSpace), clampPos, 3, mode, 0,
+ endAngle, interpolation, localMatrix);
+ } else {
+ return SkGradientShaderBase::MakeDegenerateGradient(colors, pos, colorCount,
+ std::move(colorSpace), mode);
+ }
+ }
+
+ if (startAngle <= 0 && endAngle >= 360) {
+ // If the t-range includes [0,1], then we can always use clamping (presumably faster).
+ mode = SkTileMode::kClamp;
+ }
+
+ SkGradientShaderBase::ColorStopOptimizer opt(colors, pos, colorCount, mode);
+
+ SkGradientShaderBase::Descriptor desc(opt.fColors, std::move(colorSpace), opt.fPos,
+ opt.fCount, mode, interpolation);
+
+ const SkScalar t0 = startAngle / 360,
+ t1 = endAngle / 360;
+
+ return SkLocalMatrixShader::MakeWrapped<SkSweepGradient>(localMatrix,
+ SkPoint::Make(cx, cy),
+ t0, t1,
+ desc);
+}
+
+sk_sp<SkShader> SkGradientShader::MakeSweep(SkScalar cx, SkScalar cy,
+ const SkColor colors[],
+ const SkScalar pos[],
+ int colorCount,
+ SkTileMode mode,
+ SkScalar startAngle,
+ SkScalar endAngle,
+ uint32_t flags,
+ const SkMatrix* localMatrix) {
+ SkColorConverter converter(colors, colorCount);
+ return MakeSweep(cx, cy, converter.fColors4f.begin(), nullptr, pos, colorCount,
+ mode, startAngle, endAngle, flags, localMatrix);
+}
+
+void SkRegisterSweepGradientShaderFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkSweepGradient);
+}
diff --git a/gfx/skia/skia/src/shaders/gradients/SkTwoPointConicalGradient.cpp b/gfx/skia/skia/src/shaders/gradients/SkTwoPointConicalGradient.cpp
new file mode 100644
index 0000000000..4b578194e6
--- /dev/null
+++ b/gfx/skia/skia/src/shaders/gradients/SkTwoPointConicalGradient.cpp
@@ -0,0 +1,657 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/base/SkFloatingPoint.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkWriteBuffer.h"
+#include "src/shaders/SkLocalMatrixShader.h"
+#include "src/shaders/gradients/SkGradientShaderBase.h"
+
+#include <utility>
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/KeyContext.h"
+#include "src/gpu/graphite/KeyHelpers.h"
+#include "src/gpu/graphite/PaintParamsKey.h"
+#endif
+
+// Please see https://skia.org/dev/design/conical for how our shader works.
+
+class SkTwoPointConicalGradient final : public SkGradientShaderBase {
+public:
+ // See https://skia.org/dev/design/conical for what focal data means and how our shader works.
+ // We make it public so the GPU shader can also use it.
+ struct FocalData {
+ SkScalar fR1; // r1 after mapping focal point to (0, 0)
+ SkScalar fFocalX; // f
+ bool fIsSwapped; // whether we swapped r0, r1
+
+ // The input r0, r1 are the radii when we map centers to {(0, 0), (1, 0)}.
+ // We'll post concat matrix with our transformation matrix that maps focal point to (0, 0).
+ // Returns true if the set succeeded
+ bool set(SkScalar r0, SkScalar r1, SkMatrix* matrix);
+
+ // Whether the focal point (0, 0) is on the end circle with center (1, 0) and radius r1. If
+ // this is true, it's as if an aircraft is flying at Mach 1 and all circles (soundwaves)
+ // will go through the focal point (aircraft). In our previous implementations, this was
+ // known as the edge case where the inside circle touches the outside circle (on the focal
+ // point). If we were to solve for t bruteforcely using a quadratic equation, this case
+ // implies that the quadratic equation degenerates to a linear equation.
+ bool isFocalOnCircle() const { return SkScalarNearlyZero(1 - fR1); }
+
+ bool isSwapped() const { return fIsSwapped; }
+ bool isWellBehaved() const { return !this->isFocalOnCircle() && fR1 > 1; }
+ bool isNativelyFocal() const { return SkScalarNearlyZero(fFocalX); }
+ };
+
+ enum class Type {
+ kRadial,
+ kStrip,
+ kFocal
+ };
+
+ static sk_sp<SkShader> Create(const SkPoint& start, SkScalar startRadius,
+ const SkPoint& end, SkScalar endRadius,
+ const Descriptor&, const SkMatrix* localMatrix);
+
+ GradientType asGradient(GradientInfo* info, SkMatrix* localMatrix) const override;
+#if defined(SK_GANESH)
+ std::unique_ptr<GrFragmentProcessor> asFragmentProcessor(const GrFPArgs&,
+ const MatrixRec&) const override;
+#endif
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext&,
+ skgpu::graphite::PaintParamsKeyBuilder*,
+ skgpu::graphite::PipelineDataGatherer*) const override;
+#endif
+ bool isOpaque() const override;
+
+ SkScalar getCenterX1() const { return SkPoint::Distance(fCenter1, fCenter2); }
+ SkScalar getStartRadius() const { return fRadius1; }
+ SkScalar getDiffRadius() const { return fRadius2 - fRadius1; }
+ const SkPoint& getStartCenter() const { return fCenter1; }
+ const SkPoint& getEndCenter() const { return fCenter2; }
+ SkScalar getEndRadius() const { return fRadius2; }
+
+ Type getType() const { return fType; }
+ const FocalData& getFocalData() const { return fFocalData; }
+
+ SkTwoPointConicalGradient(const SkPoint& c0, SkScalar r0,
+ const SkPoint& c1, SkScalar r1,
+ const Descriptor&, Type, const SkMatrix&, const FocalData&);
+
+protected:
+ void flatten(SkWriteBuffer& buffer) const override;
+
+ void appendGradientStages(SkArenaAlloc* alloc, SkRasterPipeline* tPipeline,
+ SkRasterPipeline* postPipeline) const override;
+
+ skvm::F32 transformT(skvm::Builder*, skvm::Uniforms*,
+ skvm::Coord coord, skvm::I32* mask) const final;
+
+private:
+ friend void ::SkRegisterTwoPointConicalGradientShaderFlattenable();
+ SK_FLATTENABLE_HOOKS(SkTwoPointConicalGradient)
+
+ SkPoint fCenter1;
+ SkPoint fCenter2;
+ SkScalar fRadius1;
+ SkScalar fRadius2;
+ Type fType;
+
+ FocalData fFocalData;
+};
+
+bool SkTwoPointConicalGradient::FocalData::set(SkScalar r0, SkScalar r1, SkMatrix* matrix) {
+ fIsSwapped = false;
+ fFocalX = sk_ieee_float_divide(r0, (r0 - r1));
+ if (SkScalarNearlyZero(fFocalX - 1)) {
+ // swap r0, r1
+ matrix->postTranslate(-1, 0);
+ matrix->postScale(-1, 1);
+ std::swap(r0, r1);
+ fFocalX = 0; // because r0 is now 0
+ fIsSwapped = true;
+ }
+
+ // Map {focal point, (1, 0)} to {(0, 0), (1, 0)}
+ const SkPoint from[2] = { {fFocalX, 0}, {1, 0} };
+ const SkPoint to[2] = { {0, 0}, {1, 0} };
+ SkMatrix focalMatrix;
+ if (!focalMatrix.setPolyToPoly(from, to, 2)) {
+ return false;
+ }
+ matrix->postConcat(focalMatrix);
+ fR1 = r1 / SkScalarAbs(1 - fFocalX); // focalMatrix has a scale of 1/(1-f)
+
+ // The following transformations are just to accelerate the shader computation by saving
+ // some arithmatic operations.
+ if (this->isFocalOnCircle()) {
+ matrix->postScale(0.5, 0.5);
+ } else {
+ matrix->postScale(fR1 / (fR1 * fR1 - 1), 1 / sqrt(SkScalarAbs(fR1 * fR1 - 1)));
+ }
+ matrix->postScale(SkScalarAbs(1 - fFocalX), SkScalarAbs(1 - fFocalX)); // scale |1 - f|
+ return true;
+}
+
+sk_sp<SkShader> SkTwoPointConicalGradient::Create(const SkPoint& c0, SkScalar r0,
+ const SkPoint& c1, SkScalar r1,
+ const Descriptor& desc,
+ const SkMatrix* localMatrix) {
+ SkMatrix gradientMatrix;
+ Type gradientType;
+
+ if (SkScalarNearlyZero((c0 - c1).length())) {
+ if (SkScalarNearlyZero(std::max(r0, r1)) || SkScalarNearlyEqual(r0, r1)) {
+ // Degenerate case; avoid dividing by zero. Should have been caught by caller but
+ // just in case, recheck here.
+ return nullptr;
+ }
+ // Concentric case: we can pretend we're radial (with a tiny twist).
+ const SkScalar scale = sk_ieee_float_divide(1, std::max(r0, r1));
+ gradientMatrix = SkMatrix::Translate(-c1.x(), -c1.y());
+ gradientMatrix.postScale(scale, scale);
+
+ gradientType = Type::kRadial;
+ } else {
+ const SkPoint centers[2] = { c0 , c1 };
+ const SkPoint unitvec[2] = { {0, 0}, {1, 0} };
+
+ if (!gradientMatrix.setPolyToPoly(centers, unitvec, 2)) {
+ // Degenerate case.
+ return nullptr;
+ }
+
+ gradientType = SkScalarNearlyZero(r1 - r0) ? Type::kStrip : Type::kFocal;
+ }
+
+ FocalData focalData;
+ if (gradientType == Type::kFocal) {
+ const auto dCenter = (c0 - c1).length();
+ if (!focalData.set(r0 / dCenter, r1 / dCenter, &gradientMatrix)) {
+ return nullptr;
+ }
+ }
+ return SkLocalMatrixShader::MakeWrapped<SkTwoPointConicalGradient>(localMatrix,
+ c0, r0,
+ c1, r1,
+ desc,
+ gradientType,
+ gradientMatrix,
+ focalData);
+}
+
+SkTwoPointConicalGradient::SkTwoPointConicalGradient(
+ const SkPoint& start, SkScalar startRadius,
+ const SkPoint& end, SkScalar endRadius,
+ const Descriptor& desc, Type type, const SkMatrix& gradientMatrix, const FocalData& data)
+ : SkGradientShaderBase(desc, gradientMatrix)
+ , fCenter1(start)
+ , fCenter2(end)
+ , fRadius1(startRadius)
+ , fRadius2(endRadius)
+ , fType(type)
+{
+ // this is degenerate, and should be caught by our caller
+ SkASSERT(fCenter1 != fCenter2 || fRadius1 != fRadius2);
+ if (type == Type::kFocal) {
+ fFocalData = data;
+ }
+}
+
+bool SkTwoPointConicalGradient::isOpaque() const {
+ // Because areas outside the cone are left untouched, we cannot treat the
+ // shader as opaque even if the gradient itself is opaque.
+ // TODO(junov): Compute whether the cone fills the plane crbug.com/222380
+ return false;
+}
+
+// Returns the original non-sorted version of the gradient
+SkShaderBase::GradientType SkTwoPointConicalGradient::asGradient(GradientInfo* info,
+ SkMatrix* localMatrix) const {
+ if (info) {
+ commonAsAGradient(info);
+ info->fPoint[0] = fCenter1;
+ info->fPoint[1] = fCenter2;
+ info->fRadius[0] = fRadius1;
+ info->fRadius[1] = fRadius2;
+ }
+ if (localMatrix) {
+ *localMatrix = SkMatrix::I();
+ }
+ return GradientType::kConical;
+}
+
+sk_sp<SkFlattenable> SkTwoPointConicalGradient::CreateProc(SkReadBuffer& buffer) {
+ DescriptorScope desc;
+ SkMatrix legacyLocalMatrix;
+ if (!desc.unflatten(buffer, &legacyLocalMatrix)) {
+ return nullptr;
+ }
+ SkPoint c1 = buffer.readPoint();
+ SkPoint c2 = buffer.readPoint();
+ SkScalar r1 = buffer.readScalar();
+ SkScalar r2 = buffer.readScalar();
+
+ if (!buffer.isValid()) {
+ return nullptr;
+ }
+ return SkGradientShader::MakeTwoPointConical(c1, r1,
+ c2, r2,
+ desc.fColors,
+ std::move(desc.fColorSpace),
+ desc.fPositions,
+ desc.fColorCount,
+ desc.fTileMode,
+ desc.fInterpolation,
+ &legacyLocalMatrix);
+}
+
+void SkTwoPointConicalGradient::flatten(SkWriteBuffer& buffer) const {
+ this->SkGradientShaderBase::flatten(buffer);
+ buffer.writePoint(fCenter1);
+ buffer.writePoint(fCenter2);
+ buffer.writeScalar(fRadius1);
+ buffer.writeScalar(fRadius2);
+}
+
+void SkTwoPointConicalGradient::appendGradientStages(SkArenaAlloc* alloc, SkRasterPipeline* p,
+ SkRasterPipeline* postPipeline) const {
+ const auto dRadius = fRadius2 - fRadius1;
+
+ if (fType == Type::kRadial) {
+ p->append(SkRasterPipelineOp::xy_to_radius);
+
+ // Tiny twist: radial computes a t for [0, r2], but we want a t for [r1, r2].
+ auto scale = std::max(fRadius1, fRadius2) / dRadius;
+ auto bias = -fRadius1 / dRadius;
+
+ p->append_matrix(alloc, SkMatrix::Translate(bias, 0) * SkMatrix::Scale(scale, 1));
+ return;
+ }
+
+ if (fType == Type::kStrip) {
+ auto* ctx = alloc->make<SkRasterPipeline_2PtConicalCtx>();
+ SkScalar scaledR0 = fRadius1 / this->getCenterX1();
+ ctx->fP0 = scaledR0 * scaledR0;
+ p->append(SkRasterPipelineOp::xy_to_2pt_conical_strip, ctx);
+ p->append(SkRasterPipelineOp::mask_2pt_conical_nan, ctx);
+ postPipeline->append(SkRasterPipelineOp::apply_vector_mask, &ctx->fMask);
+ return;
+ }
+
+ auto* ctx = alloc->make<SkRasterPipeline_2PtConicalCtx>();
+ ctx->fP0 = 1/fFocalData.fR1;
+ ctx->fP1 = fFocalData.fFocalX;
+
+ if (fFocalData.isFocalOnCircle()) {
+ p->append(SkRasterPipelineOp::xy_to_2pt_conical_focal_on_circle);
+ } else if (fFocalData.isWellBehaved()) {
+ p->append(SkRasterPipelineOp::xy_to_2pt_conical_well_behaved, ctx);
+ } else if (fFocalData.isSwapped() || 1 - fFocalData.fFocalX < 0) {
+ p->append(SkRasterPipelineOp::xy_to_2pt_conical_smaller, ctx);
+ } else {
+ p->append(SkRasterPipelineOp::xy_to_2pt_conical_greater, ctx);
+ }
+
+ if (!fFocalData.isWellBehaved()) {
+ p->append(SkRasterPipelineOp::mask_2pt_conical_degenerates, ctx);
+ }
+ if (1 - fFocalData.fFocalX < 0) {
+ p->append(SkRasterPipelineOp::negate_x);
+ }
+ if (!fFocalData.isNativelyFocal()) {
+ p->append(SkRasterPipelineOp::alter_2pt_conical_compensate_focal, ctx);
+ }
+ if (fFocalData.isSwapped()) {
+ p->append(SkRasterPipelineOp::alter_2pt_conical_unswap);
+ }
+ if (!fFocalData.isWellBehaved()) {
+ postPipeline->append(SkRasterPipelineOp::apply_vector_mask, &ctx->fMask);
+ }
+}
+
+skvm::F32 SkTwoPointConicalGradient::transformT(skvm::Builder* p, skvm::Uniforms* uniforms,
+ skvm::Coord coord, skvm::I32* mask) const {
+ auto mag = [](skvm::F32 x, skvm::F32 y) { return sqrt(x*x + y*y); };
+
+ // See https://skia.org/dev/design/conical, and appendStages() above.
+ // There's a lot going on here, and I'm not really sure what's independent
+ // or disjoint, what can be reordered, simplified, etc. Tweak carefully.
+
+ const skvm::F32 x = coord.x,
+ y = coord.y;
+ if (fType == Type::kRadial) {
+ float denom = 1.0f / (fRadius2 - fRadius1),
+ scale = std::max(fRadius1, fRadius2) * denom,
+ bias = -fRadius1 * denom;
+ return mag(x,y) * p->uniformF(uniforms->pushF(scale))
+ + p->uniformF(uniforms->pushF(bias ));
+ }
+
+ if (fType == Type::kStrip) {
+ float r = fRadius1 / this->getCenterX1();
+ skvm::F32 t = x + sqrt(p->uniformF(uniforms->pushF(r*r)) - y*y);
+
+ *mask = (t == t); // t != NaN
+ return t;
+ }
+
+ const skvm::F32 invR1 = p->uniformF(uniforms->pushF(1 / fFocalData.fR1));
+
+ skvm::F32 t;
+ if (fFocalData.isFocalOnCircle()) {
+ t = (y/x) * y + x; // (x^2 + y^2) / x ~~> x + y^2/x ~~> y/x * y + x
+ } else if (fFocalData.isWellBehaved()) {
+ t = mag(x,y) - x*invR1;
+ } else {
+ skvm::F32 k = sqrt(x*x - y*y);
+ if (fFocalData.isSwapped() || 1 - fFocalData.fFocalX < 0) {
+ k = -k;
+ }
+ t = k - x*invR1;
+ }
+
+ if (!fFocalData.isWellBehaved()) {
+ // TODO: not sure why we consider t == 0 degenerate
+ *mask = (t > 0.0f); // and implicitly, t != NaN
+ }
+
+ const skvm::F32 focalX = p->uniformF(uniforms->pushF(fFocalData.fFocalX));
+ if (1 - fFocalData.fFocalX < 0) { t = -t; }
+ if (!fFocalData.isNativelyFocal()) { t += focalX; }
+ if ( fFocalData.isSwapped()) { t = 1.0f - t; }
+ return t;
+}
+
+/////////////////////////////////////////////////////////////////////
+
+#if defined(SK_GANESH)
+
+#include "src/core/SkRuntimeEffectPriv.h"
+#include "src/gpu/ganesh/effects/GrSkSLFP.h"
+#include "src/gpu/ganesh/gradients/GrGradientShader.h"
+
+std::unique_ptr<GrFragmentProcessor>
+SkTwoPointConicalGradient::asFragmentProcessor(const GrFPArgs& args, const MatrixRec& mRec) const {
+ // The 2 point conical gradient can reject a pixel so it does change opacity even if the input
+ // was opaque. Thus, all of these layout FPs disable that optimization.
+ std::unique_ptr<GrFragmentProcessor> fp;
+ SkTLazy<SkMatrix> matrix;
+ switch (this->getType()) {
+ case SkTwoPointConicalGradient::Type::kStrip: {
+ static const SkRuntimeEffect* kEffect =
+ SkMakeRuntimeEffect(SkRuntimeEffect::MakeForShader,
+ "uniform half r0_2;"
+ "half4 main(float2 p) {"
+ "half v = 1;" // validation flag,set to negative to discard fragment later
+ "float t = r0_2 - p.y * p.y;"
+ "if (t >= 0) {"
+ "t = p.x + sqrt(t);"
+ "} else {"
+ "v = -1;"
+ "}"
+ "return half4(half(t), v, 0, 0);"
+ "}"
+ );
+ float r0 = this->getStartRadius() / this->getCenterX1();
+ fp = GrSkSLFP::Make(kEffect, "TwoPointConicalStripLayout", /*inputFP=*/nullptr,
+ GrSkSLFP::OptFlags::kNone,
+ "r0_2", r0 * r0);
+ } break;
+
+ case SkTwoPointConicalGradient::Type::kRadial: {
+ static const SkRuntimeEffect* kEffect =
+ SkMakeRuntimeEffect(SkRuntimeEffect::MakeForShader,
+ "uniform half r0;"
+ "uniform half lengthScale;"
+ "half4 main(float2 p) {"
+ "half v = 1;" // validation flag,set to negative to discard fragment later
+ "float t = length(p) * lengthScale - r0;"
+ "return half4(half(t), v, 0, 0);"
+ "}"
+ );
+ float dr = this->getDiffRadius();
+ float r0 = this->getStartRadius() / dr;
+ bool isRadiusIncreasing = dr >= 0;
+ fp = GrSkSLFP::Make(kEffect, "TwoPointConicalRadialLayout", /*inputFP=*/nullptr,
+ GrSkSLFP::OptFlags::kNone,
+ "r0", r0,
+ "lengthScale", isRadiusIncreasing ? 1.0f : -1.0f);
+
+ // GPU radial matrix is different from the original matrix, since we map the diff radius
+ // to have |dr| = 1, so manually compute the final gradient matrix here.
+
+ // Map center to (0, 0)
+ matrix.set(SkMatrix::Translate(-this->getStartCenter().fX,
+ -this->getStartCenter().fY));
+ // scale |diffRadius| to 1
+ matrix->postScale(1 / dr, 1 / dr);
+ } break;
+
+ case SkTwoPointConicalGradient::Type::kFocal: {
+ static const SkRuntimeEffect* kEffect =
+ SkMakeRuntimeEffect(SkRuntimeEffect::MakeForShader,
+ // Optimization flags, all specialized:
+ "uniform int isRadiusIncreasing;"
+ "uniform int isFocalOnCircle;"
+ "uniform int isWellBehaved;"
+ "uniform int isSwapped;"
+ "uniform int isNativelyFocal;"
+
+ "uniform half invR1;" // 1/r1
+ "uniform half fx;" // focalX = r0/(r0-r1)
+
+ "half4 main(float2 p) {"
+ "float t = -1;"
+ "half v = 1;" // validation flag,set to negative to discard fragment later
+
+ "float x_t = -1;"
+ "if (bool(isFocalOnCircle)) {"
+ "x_t = dot(p, p) / p.x;"
+ "} else if (bool(isWellBehaved)) {"
+ "x_t = length(p) - p.x * invR1;"
+ "} else {"
+ "float temp = p.x * p.x - p.y * p.y;"
+
+ // Only do sqrt if temp >= 0; this is significantly slower than
+ // checking temp >= 0 in the if statement that checks r(t) >= 0.
+ // But GPU may break if we sqrt a negative float. (Although I
+ // haven't observed that on any devices so far, and the old
+ // approach also does sqrt negative value without a check.) If
+ // the performance is really critical, maybe we should just
+ // compute the area where temp and x_t are always valid and drop
+ // all these ifs.
+ "if (temp >= 0) {"
+ "if (bool(isSwapped) || !bool(isRadiusIncreasing)) {"
+ "x_t = -sqrt(temp) - p.x * invR1;"
+ "} else {"
+ "x_t = sqrt(temp) - p.x * invR1;"
+ "}"
+ "}"
+ "}"
+
+ // The final calculation of t from x_t has lots of static
+ // optimizations but only do them when x_t is positive (which
+ // can be assumed true if isWellBehaved is true)
+ "if (!bool(isWellBehaved)) {"
+ // This will still calculate t even though it will be ignored
+ // later in the pipeline to avoid a branch
+ "if (x_t <= 0.0) {"
+ "v = -1;"
+ "}"
+ "}"
+ "if (bool(isRadiusIncreasing)) {"
+ "if (bool(isNativelyFocal)) {"
+ "t = x_t;"
+ "} else {"
+ "t = x_t + fx;"
+ "}"
+ "} else {"
+ "if (bool(isNativelyFocal)) {"
+ "t = -x_t;"
+ "} else {"
+ "t = -x_t + fx;"
+ "}"
+ "}"
+
+ "if (bool(isSwapped)) {"
+ "t = 1 - t;"
+ "}"
+
+ "return half4(half(t), v, 0, 0);"
+ "}"
+ );
+
+ const SkTwoPointConicalGradient::FocalData& focalData = this->getFocalData();
+ bool isRadiusIncreasing = (1 - focalData.fFocalX) > 0,
+ isFocalOnCircle = focalData.isFocalOnCircle(),
+ isWellBehaved = focalData.isWellBehaved(),
+ isSwapped = focalData.isSwapped(),
+ isNativelyFocal = focalData.isNativelyFocal();
+
+ fp = GrSkSLFP::Make(kEffect, "TwoPointConicalFocalLayout", /*inputFP=*/nullptr,
+ GrSkSLFP::OptFlags::kNone,
+ "isRadiusIncreasing", GrSkSLFP::Specialize<int>(isRadiusIncreasing),
+ "isFocalOnCircle", GrSkSLFP::Specialize<int>(isFocalOnCircle),
+ "isWellBehaved", GrSkSLFP::Specialize<int>(isWellBehaved),
+ "isSwapped", GrSkSLFP::Specialize<int>(isSwapped),
+ "isNativelyFocal", GrSkSLFP::Specialize<int>(isNativelyFocal),
+ "invR1", 1.0f / focalData.fR1,
+ "fx", focalData.fFocalX);
+ } break;
+ }
+ return GrGradientShader::MakeGradientFP(*this,
+ args,
+ mRec,
+ std::move(fp),
+ matrix.getMaybeNull());
+}
+
+#endif
+
+#if defined(SK_GRAPHITE)
+void SkTwoPointConicalGradient::addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const {
+ using namespace skgpu::graphite;
+
+ SkColor4fXformer xformedColors(this, keyContext.dstColorInfo().colorSpace());
+ const SkPMColor4f* colors = xformedColors.fColors.begin();
+
+ GradientShaderBlocks::GradientData data(GradientType::kConical,
+ fCenter1, fCenter2,
+ fRadius1, fRadius2,
+ 0.0f, 0.0f,
+ fTileMode,
+ fColorCount,
+ colors,
+ fPositions,
+ fInterpolation);
+
+ MakeInterpolatedToDst(keyContext, builder, gatherer,
+ data, fInterpolation,
+ xformedColors.fIntermediateColorSpace.get());
+}
+#endif
+
+// assumes colors is SkColor4f* and pos is SkScalar*
+#define EXPAND_1_COLOR(count) \
+ SkColor4f tmp[2]; \
+ do { \
+ if (1 == count) { \
+ tmp[0] = tmp[1] = colors[0]; \
+ colors = tmp; \
+ pos = nullptr; \
+ count = 2; \
+ } \
+ } while (0)
+
+sk_sp<SkShader> SkGradientShader::MakeTwoPointConical(const SkPoint& start,
+ SkScalar startRadius,
+ const SkPoint& end,
+ SkScalar endRadius,
+ const SkColor4f colors[],
+ sk_sp<SkColorSpace> colorSpace,
+ const SkScalar pos[],
+ int colorCount,
+ SkTileMode mode,
+ const Interpolation& interpolation,
+ const SkMatrix* localMatrix) {
+ if (startRadius < 0 || endRadius < 0) {
+ return nullptr;
+ }
+ if (!SkGradientShaderBase::ValidGradient(colors, colorCount, mode, interpolation)) {
+ return nullptr;
+ }
+ if (SkScalarNearlyZero((start - end).length(), SkGradientShaderBase::kDegenerateThreshold)) {
+ // If the center positions are the same, then the gradient is the radial variant of a 2 pt
+ // conical gradient, an actual radial gradient (startRadius == 0), or it is fully degenerate
+ // (startRadius == endRadius).
+ if (SkScalarNearlyEqual(startRadius, endRadius,
+ SkGradientShaderBase::kDegenerateThreshold)) {
+ // Degenerate case, where the interpolation region area approaches zero. The proper
+ // behavior depends on the tile mode, which is consistent with the default degenerate
+ // gradient behavior, except when mode = clamp and the radii > 0.
+ if (mode == SkTileMode::kClamp &&
+ endRadius > SkGradientShaderBase::kDegenerateThreshold) {
+ // The interpolation region becomes an infinitely thin ring at the radius, so the
+ // final gradient will be the first color repeated from p=0 to 1, and then a hard
+ // stop switching to the last color at p=1.
+ static constexpr SkScalar circlePos[3] = {0, 1, 1};
+ SkColor4f reColors[3] = {colors[0], colors[0], colors[colorCount - 1]};
+ return MakeRadial(start, endRadius, reColors, std::move(colorSpace),
+ circlePos, 3, mode, interpolation, localMatrix);
+ } else {
+ // Otherwise use the default degenerate case
+ return SkGradientShaderBase::MakeDegenerateGradient(colors, pos, colorCount,
+ std::move(colorSpace), mode);
+ }
+ } else if (SkScalarNearlyZero(startRadius, SkGradientShaderBase::kDegenerateThreshold)) {
+ // We can treat this gradient as radial, which is faster. If we got here, we know
+ // that endRadius is not equal to 0, so this produces a meaningful gradient
+ return MakeRadial(start, endRadius, colors, std::move(colorSpace), pos, colorCount,
+ mode, interpolation, localMatrix);
+ }
+ // Else it's the 2pt conical radial variant with no degenerate radii, so fall through to the
+ // regular 2pt constructor.
+ }
+
+ if (localMatrix && !localMatrix->invert(nullptr)) {
+ return nullptr;
+ }
+ EXPAND_1_COLOR(colorCount);
+
+ SkGradientShaderBase::ColorStopOptimizer opt(colors, pos, colorCount, mode);
+
+ SkGradientShaderBase::Descriptor desc(opt.fColors, std::move(colorSpace), opt.fPos,
+ opt.fCount, mode, interpolation);
+ return SkTwoPointConicalGradient::Create(start, startRadius, end, endRadius, desc, localMatrix);
+}
+
+#undef EXPAND_1_COLOR
+
+sk_sp<SkShader> SkGradientShader::MakeTwoPointConical(const SkPoint& start,
+ SkScalar startRadius,
+ const SkPoint& end,
+ SkScalar endRadius,
+ const SkColor colors[],
+ const SkScalar pos[],
+ int colorCount,
+ SkTileMode mode,
+ uint32_t flags,
+ const SkMatrix* localMatrix) {
+ SkColorConverter converter(colors, colorCount);
+ return MakeTwoPointConical(start, startRadius, end, endRadius, converter.fColors4f.begin(),
+ nullptr, pos, colorCount, mode, flags, localMatrix);
+}
+
+void SkRegisterTwoPointConicalGradientShaderFlattenable() {
+ SK_REGISTER_FLATTENABLE(SkTwoPointConicalGradient);
+}
diff --git a/gfx/skia/skia/src/sksl/GLSL.std.450.h b/gfx/skia/skia/src/sksl/GLSL.std.450.h
new file mode 100644
index 0000000000..943fd8650f
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/GLSL.std.450.h
@@ -0,0 +1,131 @@
+/*
+** Copyright (c) 2014-2016 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+#ifndef GLSLstd450_H
+#define GLSLstd450_H
+
+static const int GLSLstd450Version = 100;
+static const int GLSLstd450Revision = 3;
+
+enum GLSLstd450 {
+ GLSLstd450Bad = 0, // Don't use
+
+ GLSLstd450Round = 1,
+ GLSLstd450RoundEven = 2,
+ GLSLstd450Trunc = 3,
+ GLSLstd450FAbs = 4,
+ GLSLstd450SAbs = 5,
+ GLSLstd450FSign = 6,
+ GLSLstd450SSign = 7,
+ GLSLstd450Floor = 8,
+ GLSLstd450Ceil = 9,
+ GLSLstd450Fract = 10,
+
+ GLSLstd450Radians = 11,
+ GLSLstd450Degrees = 12,
+ GLSLstd450Sin = 13,
+ GLSLstd450Cos = 14,
+ GLSLstd450Tan = 15,
+ GLSLstd450Asin = 16,
+ GLSLstd450Acos = 17,
+ GLSLstd450Atan = 18,
+ GLSLstd450Sinh = 19,
+ GLSLstd450Cosh = 20,
+ GLSLstd450Tanh = 21,
+ GLSLstd450Asinh = 22,
+ GLSLstd450Acosh = 23,
+ GLSLstd450Atanh = 24,
+ GLSLstd450Atan2 = 25,
+
+ GLSLstd450Pow = 26,
+ GLSLstd450Exp = 27,
+ GLSLstd450Log = 28,
+ GLSLstd450Exp2 = 29,
+ GLSLstd450Log2 = 30,
+ GLSLstd450Sqrt = 31,
+ GLSLstd450InverseSqrt = 32,
+
+ GLSLstd450Determinant = 33,
+ GLSLstd450MatrixInverse = 34,
+
+ GLSLstd450Modf = 35, // second operand needs an OpVariable to write to
+ GLSLstd450ModfStruct = 36, // no OpVariable operand
+ GLSLstd450FMin = 37,
+ GLSLstd450UMin = 38,
+ GLSLstd450SMin = 39,
+ GLSLstd450FMax = 40,
+ GLSLstd450UMax = 41,
+ GLSLstd450SMax = 42,
+ GLSLstd450FClamp = 43,
+ GLSLstd450UClamp = 44,
+ GLSLstd450SClamp = 45,
+ GLSLstd450FMix = 46,
+ GLSLstd450IMix = 47, // Reserved
+ GLSLstd450Step = 48,
+ GLSLstd450SmoothStep = 49,
+
+ GLSLstd450Fma = 50,
+ GLSLstd450Frexp = 51, // second operand needs an OpVariable to write to
+ GLSLstd450FrexpStruct = 52, // no OpVariable operand
+ GLSLstd450Ldexp = 53,
+
+ GLSLstd450PackSnorm4x8 = 54,
+ GLSLstd450PackUnorm4x8 = 55,
+ GLSLstd450PackSnorm2x16 = 56,
+ GLSLstd450PackUnorm2x16 = 57,
+ GLSLstd450PackHalf2x16 = 58,
+ GLSLstd450PackDouble2x32 = 59,
+ GLSLstd450UnpackSnorm2x16 = 60,
+ GLSLstd450UnpackUnorm2x16 = 61,
+ GLSLstd450UnpackHalf2x16 = 62,
+ GLSLstd450UnpackSnorm4x8 = 63,
+ GLSLstd450UnpackUnorm4x8 = 64,
+ GLSLstd450UnpackDouble2x32 = 65,
+
+ GLSLstd450Length = 66,
+ GLSLstd450Distance = 67,
+ GLSLstd450Cross = 68,
+ GLSLstd450Normalize = 69,
+ GLSLstd450FaceForward = 70,
+ GLSLstd450Reflect = 71,
+ GLSLstd450Refract = 72,
+
+ GLSLstd450FindILsb = 73,
+ GLSLstd450FindSMsb = 74,
+ GLSLstd450FindUMsb = 75,
+
+ GLSLstd450InterpolateAtCentroid = 76,
+ GLSLstd450InterpolateAtSample = 77,
+ GLSLstd450InterpolateAtOffset = 78,
+
+ GLSLstd450NMin = 79,
+ GLSLstd450NMax = 80,
+ GLSLstd450NClamp = 81,
+
+ GLSLstd450Count
+};
+
+#endif // #ifndef GLSLstd450_H
diff --git a/gfx/skia/skia/src/sksl/README.md b/gfx/skia/skia/src/sksl/README.md
new file mode 100644
index 0000000000..862f5c6965
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/README.md
@@ -0,0 +1,158 @@
+# Overview
+
+SkSL ("Skia Shading Language") is a variant of GLSL which is used as Skia's
+internal shading language. SkSL is, at its heart, a single standardized version
+of GLSL which avoids all of the various version and dialect differences found
+in GLSL "in the wild", but it does bring a few of its own changes to the table.
+
+Skia uses the SkSL compiler to convert SkSL code to GLSL, GLSL ES, SPIR-V, or
+MSL before handing it over to the graphics driver.
+
+
+# Differences from GLSL
+
+* Precision modifiers are not used. 'float', 'int', and 'uint' are always high
+ precision. New types 'half', 'short', and 'ushort' are medium precision (we
+ do not use low precision).
+* Vector types are named <base type><columns>, so float2 instead of vec2 and
+ bool4 instead of bvec4
+* Matrix types are named <base type><columns>x<rows>, so float2x3 instead of
+ mat2x3 and double4x4 instead of dmat4
+* GLSL caps can be referenced via the syntax 'sk_Caps.<name>', e.g.
+ sk_Caps.integerSupport. The value will be a constant boolean or int,
+ as appropriate. As SkSL supports constant folding and branch elimination, this
+ means that an 'if' statement which statically queries a cap will collapse down
+ to the chosen branch, meaning that:
+
+ if (sk_Caps.integerSupport)
+ do_something();
+ else
+ do_something_else();
+
+ will compile as if you had written either 'do_something();' or
+ 'do_something_else();', depending on whether that cap is enabled or not.
+* no #version statement is required, and it will be ignored if present
+* the output color is sk_FragColor (do not declare it)
+* use sk_Position instead of gl_Position. sk_Position is in device coordinates
+ rather than normalized coordinates.
+* use sk_PointSize instead of gl_PointSize
+* use sk_VertexID instead of gl_VertexID
+* use sk_InstanceID instead of gl_InstanceID
+* the fragment coordinate is sk_FragCoord, and is always relative to the upper
+ left.
+* use sk_Clockwise instead of gl_FrontFacing. This is always relative to an
+ upper left origin.
+* you do not need to include ".0" to make a number a float (meaning that
+ "float2(x, y) * 4" is perfectly legal in SkSL, unlike GLSL where it would
+ often have to be expressed "float2(x, y) * 4.0". There is no performance
+ penalty for this, as the number is converted to a float at compile time)
+* type suffixes on numbers (1.0f, 0xFFu) are both unnecessary and unsupported
+* creating a smaller vector from a larger vector (e.g. float2(float3(1))) is
+ intentionally disallowed, as it is just a wordier way of performing a swizzle.
+ Use swizzles instead.
+* Swizzle components, in addition to the normal rgba / xyzw components, can also
+ be LTRB (meaning "left/top/right/bottom", for when we store rectangles in
+ vectors), and may also be the constants '0' or '1' to produce a constant 0 or
+ 1 in that channel instead of selecting anything from the source vector.
+ foo.rgb1 is equivalent to float4(foo.rgb, 1).
+* All texture functions are named "sample", e.g. sample(sampler2D, float3) is
+ equivalent to GLSL's textureProj(sampler2D, float3).
+* Functions support the 'inline' modifier, which causes the compiler to ignore
+ its normal inlining heuristics and inline the function if at all possible
+* some built-in functions and one or two rarely-used language features are not
+ yet supported (sorry!)
+
+
+# Synchronization Primitives
+
+SkSL offers atomic operations and synchronization primitives geared towards GPU compute
+programs. These primitives are designed to abstract over the capabilities provided by
+MSL, SPIR-V, and WGSL, and differ from the corresponding primitives in GLSL.
+
+## Atomics
+
+SkSL provides the `atomicUint` type. This is an opaque type that requires the use of an
+atomic intrinsic (such as `atomicLoad`, `atomicStore`, and `atomicAdd`) to act on its value (which
+is of type `uint`).
+
+A variable with the `atomicUint` type must be declared inside a writable storage buffer block or as
+a workgroup-shared variable. When declared inside a buffer block, it is guaranteed to conform to the
+same size and stride as a `uint`.
+
+```
+workgroup atomicUint myLocalAtomicUint;
+
+layout(set = 0, binding = 0) buffer mySSBO {
+ atomicUint myGlobalAtomicUint;
+};
+
+```
+
+An `atomicUint` can be declared as a struct member or the element type of an array, provided that
+the struct/array type is only instantiated in a workgroup-shared or storage buffer block variable.
+
+### Backend considerations and differences from GLSL
+
+`atomicUint` should not be confused with the GLSL [`atomic_uint` (aka Atomic
+Counter)](https://www.khronos.org/opengl/wiki/Atomic_Counter) type. The semantics provided by
+`atomicUint` are more similar to GLSL ["Atomic Memory
+Functions"](https://www.khronos.org/opengl/wiki/Atomic_Variable_Operations)
+(see GLSL Spec v4.3, 8.11 "Atomic Memory Functions"). The key difference is that SkSL atomic
+operations only operate on a variable of type `atomicUint` while GLSL Atomic Memory Functions can
+operate over arbitrary memory locations (such as a component of a vector).
+
+* The semantics of `atomicUint` are similar to Metal's `atomic<uint>` and WGSL's `atomic<u32>`.
+ These are the types that an `atomicUint` is translated to when targeting Metal and WGSL.
+* When translated to Metal, the atomic intrinsics use relaxed memory order semantics.
+* When translated to SPIR-V, the atomic intrinsics use relaxed [memory
+ semantics](https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#Memory_Semantics_-id-)
+ (i.e. `0x0 None`). The [memory
+ scope](https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#Scope_-id-) is either `1
+ Device` or `2 Workgroup` depending on whether the `atomicUint` is declared in a buffer block or
+ workgroup variable.
+
+## Barriers
+
+SkSL provides two barrier intrinsics: `workgroupBarrier()` and `storageBarrier()`. These functions
+are only available in compute programs and synchronize access to workgroup-shared and storage buffer
+memory between invocations in the same workgroup. They provide the same semantics as the equivalent
+[WGSL Synchronization Built-in Functions](https://www.w3.org/TR/WGSL/#sync-builtin-functions). More
+specifically:
+
+* Both functions execute a control barrier with Acquire/Release memory ordering.
+* Both functions use a `Workgroup` execution and memory scope. This means that a coherent memory
+ view is only guaranteed between invocations in the same workgroup and NOT across workgroups in a
+ given compute pipeline dispatch. If multiple workgroups require a _synchronized_ coherent view
+ over the same shared mutable state, their access must be synchronized via other means (such as a
+ pipeline barrier between multiple dispatches).
+
+### Backend considerations
+
+* The closest GLSL equivalent for `workgroupBarrier()` is the
+[`barrier()`](https://registry.khronos.org/OpenGL-Refpages/gl4/html/barrier.xhtml) intrinsic. Both
+`workgroupBarrier()` and `storageBarrier()` can be defined as the following invocations of the
+`controlBarrier` intrinsic defined in
+[GL_KHR_memory_scope_semantics](https://github.com/KhronosGroup/GLSL/blob/master/extensions/khr/GL_KHR_memory_scope_semantics.txt):
+
+```
+// workgroupBarrier():
+controlBarrier(gl_ScopeWorkgroup,
+ gl_ScopeWorkgroup,
+ gl_StorageSemanticsShared,
+ gl_SemanticsAcquireRelease);
+
+// storageBarrier():
+controlBarrier(gl_ScopeWorkgroup,
+ gl_ScopeWorkgroup,
+ gl_StorageSemanticsBuffer,
+ gl_SemanticsAcquireRelease);
+```
+
+* In Metal, `workgroupBarrier()` is equivalent to `threadgroup_barrier(mem_flags::mem_threadgroup)`.
+ `storageBarrier()` is equivalent to `threadgroup_barrier(mem_flags::mem_device)`.
+
+* In Vulkan SPIR-V, `workgroupBarrier()` is equivalent to `OpControlBarrier` with `Workgroup`
+ execution and memory scope, and `AcquireRelease | WorkgroupMemory` memory semantics.
+
+ `storageBarrier()` is equivalent to `OpControlBarrier` with `Workgroup` execution and memory
+ scope, and `AcquireRelease | UniformMemory` memory semantics.
diff --git a/gfx/skia/skia/src/sksl/SkSLAnalysis.cpp b/gfx/skia/skia/src/sksl/SkSLAnalysis.cpp
new file mode 100644
index 0000000000..a7aba3d02c
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLAnalysis.cpp
@@ -0,0 +1,705 @@
+/*
+ * Copyright 2020 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLAnalysis.h"
+
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLLayout.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLSampleUsage.h"
+#include "include/private/SkSLStatement.h"
+#include "include/private/base/SkTArray.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLOperator.h"
+#include "src/core/SkTHash.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLConstantFolder.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLIntrinsicList.h"
+#include "src/sksl/analysis/SkSLNoOpErrorReporter.h"
+#include "src/sksl/analysis/SkSLProgramUsage.h"
+#include "src/sksl/analysis/SkSLProgramVisitor.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLChildCall.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLDoStatement.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLExpressionStatement.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLForStatement.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLIfStatement.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLPostfixExpression.h"
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/ir/SkSLReturnStatement.h"
+#include "src/sksl/ir/SkSLSwitchCase.h"
+#include "src/sksl/ir/SkSLSwitchStatement.h"
+#include "src/sksl/ir/SkSLSwizzle.h"
+#include "src/sksl/ir/SkSLTernaryExpression.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+#include "src/sksl/transform/SkSLProgramWriter.h"
+
+#include <optional>
+#include <string>
+#include <string_view>
+
+namespace SkSL {
+
+namespace {
+
+// Visitor that determines the merged SampleUsage for a given child in the program.
+class MergeSampleUsageVisitor : public ProgramVisitor {
+public:
+ MergeSampleUsageVisitor(const Context& context,
+ const Variable& child,
+ bool writesToSampleCoords)
+ : fContext(context), fChild(child), fWritesToSampleCoords(writesToSampleCoords) {}
+
+ SampleUsage visit(const Program& program) {
+ fUsage = SampleUsage(); // reset to none
+ INHERITED::visit(program);
+ return fUsage;
+ }
+
+ int elidedSampleCoordCount() const { return fElidedSampleCoordCount; }
+
+protected:
+ const Context& fContext;
+ const Variable& fChild;
+ const bool fWritesToSampleCoords;
+ SampleUsage fUsage;
+ int fElidedSampleCoordCount = 0;
+
+ bool visitExpression(const Expression& e) override {
+ // Looking for child(...)
+ if (e.is<ChildCall>() && &e.as<ChildCall>().child() == &fChild) {
+ // Determine the type of call at this site, and merge it with the accumulated state
+ const ExpressionArray& arguments = e.as<ChildCall>().arguments();
+ SkASSERT(arguments.size() >= 1);
+
+ const Expression* maybeCoords = arguments[0].get();
+ if (maybeCoords->type().matches(*fContext.fTypes.fFloat2)) {
+ // If the coords are a direct reference to the program's sample-coords, and those
+ // coords are never modified, we can conservatively turn this into PassThrough
+ // sampling. In all other cases, we consider it Explicit.
+ if (!fWritesToSampleCoords && maybeCoords->is<VariableReference>() &&
+ maybeCoords->as<VariableReference>().variable()->modifiers().fLayout.fBuiltin ==
+ SK_MAIN_COORDS_BUILTIN) {
+ fUsage.merge(SampleUsage::PassThrough());
+ ++fElidedSampleCoordCount;
+ } else {
+ fUsage.merge(SampleUsage::Explicit());
+ }
+ } else {
+ // child(inputColor) or child(srcColor, dstColor) -> PassThrough
+ fUsage.merge(SampleUsage::PassThrough());
+ }
+ }
+
+ return INHERITED::visitExpression(e);
+ }
+
+ using INHERITED = ProgramVisitor;
+};
+
+// Visitor that searches for child calls from a function other than main()
+class SampleOutsideMainVisitor : public ProgramVisitor {
+public:
+ SampleOutsideMainVisitor() {}
+
+ bool visitExpression(const Expression& e) override {
+ if (e.is<ChildCall>()) {
+ return true;
+ }
+ return INHERITED::visitExpression(e);
+ }
+
+ bool visitProgramElement(const ProgramElement& p) override {
+ return p.is<FunctionDefinition>() &&
+ !p.as<FunctionDefinition>().declaration().isMain() &&
+ INHERITED::visitProgramElement(p);
+ }
+
+ using INHERITED = ProgramVisitor;
+};
+
+class ReturnsNonOpaqueColorVisitor : public ProgramVisitor {
+public:
+ ReturnsNonOpaqueColorVisitor() {}
+
+ bool visitStatement(const Statement& s) override {
+ if (s.is<ReturnStatement>()) {
+ const Expression* e = s.as<ReturnStatement>().expression().get();
+ bool knownOpaque = e && e->type().slotCount() == 4 &&
+ ConstantFolder::GetConstantValueForVariable(*e)
+ ->getConstantValue(/*n=*/3)
+ .value_or(0) == 1;
+ return !knownOpaque;
+ }
+ return INHERITED::visitStatement(s);
+ }
+
+ bool visitExpression(const Expression& e) override {
+ // No need to recurse into expressions, these can never contain return statements
+ return false;
+ }
+
+ using INHERITED = ProgramVisitor;
+ using INHERITED::visitProgramElement;
+};
+
+// Visitor that counts the number of nodes visited
+class NodeCountVisitor : public ProgramVisitor {
+public:
+ NodeCountVisitor(int limit) : fLimit(limit) {}
+
+ int visit(const Statement& s) {
+ this->visitStatement(s);
+ return fCount;
+ }
+
+ bool visitExpression(const Expression& e) override {
+ ++fCount;
+ return (fCount >= fLimit) || INHERITED::visitExpression(e);
+ }
+
+ bool visitProgramElement(const ProgramElement& p) override {
+ ++fCount;
+ return (fCount >= fLimit) || INHERITED::visitProgramElement(p);
+ }
+
+ bool visitStatement(const Statement& s) override {
+ ++fCount;
+ return (fCount >= fLimit) || INHERITED::visitStatement(s);
+ }
+
+private:
+ int fCount = 0;
+ int fLimit;
+
+ using INHERITED = ProgramVisitor;
+};
+
+class VariableWriteVisitor : public ProgramVisitor {
+public:
+ VariableWriteVisitor(const Variable* var)
+ : fVar(var) {}
+
+ bool visit(const Statement& s) {
+ return this->visitStatement(s);
+ }
+
+ bool visitExpression(const Expression& e) override {
+ if (e.is<VariableReference>()) {
+ const VariableReference& ref = e.as<VariableReference>();
+ if (ref.variable() == fVar &&
+ (ref.refKind() == VariableReference::RefKind::kWrite ||
+ ref.refKind() == VariableReference::RefKind::kReadWrite ||
+ ref.refKind() == VariableReference::RefKind::kPointer)) {
+ return true;
+ }
+ }
+ return INHERITED::visitExpression(e);
+ }
+
+private:
+ const Variable* fVar;
+
+ using INHERITED = ProgramVisitor;
+};
+
+// This isn't actually using ProgramVisitor, because it only considers a subset of the fields for
+// any given expression kind. For instance, when indexing an array (e.g. `x[1]`), we only want to
+// know if the base (`x`) is assignable; the index expression (`1`) doesn't need to be.
+class IsAssignableVisitor {
+public:
+ IsAssignableVisitor(ErrorReporter* errors) : fErrors(errors) {}
+
+ bool visit(Expression& expr, Analysis::AssignmentInfo* info) {
+ int oldErrorCount = fErrors->errorCount();
+ this->visitExpression(expr);
+ if (info) {
+ info->fAssignedVar = fAssignedVar;
+ }
+ return fErrors->errorCount() == oldErrorCount;
+ }
+
+ void visitExpression(Expression& expr, const FieldAccess* fieldAccess = nullptr) {
+ switch (expr.kind()) {
+ case Expression::Kind::kVariableReference: {
+ VariableReference& varRef = expr.as<VariableReference>();
+ const Variable* var = varRef.variable();
+ auto fieldName = [&] {
+ return fieldAccess ? fieldAccess->description(OperatorPrecedence::kTopLevel)
+ : std::string(var->name());
+ };
+ if (var->modifiers().fFlags & (Modifiers::kConst_Flag | Modifiers::kUniform_Flag)) {
+ fErrors->error(expr.fPosition,
+ "cannot modify immutable variable '" + fieldName() + "'");
+ } else if (var->storage() == Variable::Storage::kGlobal &&
+ (var->modifiers().fFlags & Modifiers::kIn_Flag)) {
+ fErrors->error(expr.fPosition,
+ "cannot modify pipeline input variable '" + fieldName() + "'");
+ } else {
+ SkASSERT(fAssignedVar == nullptr);
+ fAssignedVar = &varRef;
+ }
+ break;
+ }
+ case Expression::Kind::kFieldAccess: {
+ const FieldAccess& f = expr.as<FieldAccess>();
+ this->visitExpression(*f.base(), &f);
+ break;
+ }
+ case Expression::Kind::kSwizzle: {
+ const Swizzle& swizzle = expr.as<Swizzle>();
+ this->checkSwizzleWrite(swizzle);
+ this->visitExpression(*swizzle.base(), fieldAccess);
+ break;
+ }
+ case Expression::Kind::kIndex:
+ this->visitExpression(*expr.as<IndexExpression>().base(), fieldAccess);
+ break;
+
+ case Expression::Kind::kPoison:
+ break;
+
+ default:
+ fErrors->error(expr.fPosition, "cannot assign to this expression");
+ break;
+ }
+ }
+
+private:
+ void checkSwizzleWrite(const Swizzle& swizzle) {
+ int bits = 0;
+ for (int8_t idx : swizzle.components()) {
+ SkASSERT(idx >= SwizzleComponent::X && idx <= SwizzleComponent::W);
+ int bit = 1 << idx;
+ if (bits & bit) {
+ fErrors->error(swizzle.fPosition,
+ "cannot write to the same swizzle field more than once");
+ break;
+ }
+ bits |= bit;
+ }
+ }
+
+ ErrorReporter* fErrors;
+ VariableReference* fAssignedVar = nullptr;
+
+ using INHERITED = ProgramVisitor;
+};
+
+} // namespace
+
+////////////////////////////////////////////////////////////////////////////////
+// Analysis
+
+SampleUsage Analysis::GetSampleUsage(const Program& program,
+ const Variable& child,
+ bool writesToSampleCoords,
+ int* elidedSampleCoordCount) {
+ MergeSampleUsageVisitor visitor(*program.fContext, child, writesToSampleCoords);
+ SampleUsage result = visitor.visit(program);
+ if (elidedSampleCoordCount) {
+ *elidedSampleCoordCount += visitor.elidedSampleCoordCount();
+ }
+ return result;
+}
+
+bool Analysis::ReferencesBuiltin(const Program& program, int builtin) {
+ SkASSERT(program.fUsage);
+ for (const auto& [variable, counts] : program.fUsage->fVariableCounts) {
+ if (counts.fRead > 0 && variable->modifiers().fLayout.fBuiltin == builtin) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool Analysis::ReferencesSampleCoords(const Program& program) {
+ return Analysis::ReferencesBuiltin(program, SK_MAIN_COORDS_BUILTIN);
+}
+
+bool Analysis::ReferencesFragCoords(const Program& program) {
+ return Analysis::ReferencesBuiltin(program, SK_FRAGCOORD_BUILTIN);
+}
+
+bool Analysis::CallsSampleOutsideMain(const Program& program) {
+ SampleOutsideMainVisitor visitor;
+ return visitor.visit(program);
+}
+
+bool Analysis::CallsColorTransformIntrinsics(const Program& program) {
+ for (auto [fn, count] : program.usage()->fCallCounts) {
+ if (count != 0 && (fn->intrinsicKind() == k_toLinearSrgb_IntrinsicKind ||
+ fn->intrinsicKind() == k_fromLinearSrgb_IntrinsicKind)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool Analysis::ReturnsOpaqueColor(const FunctionDefinition& function) {
+ ReturnsNonOpaqueColorVisitor visitor;
+ return !visitor.visitProgramElement(function);
+}
+
+bool Analysis::ContainsRTAdjust(const Expression& expr) {
+ class ContainsRTAdjustVisitor : public ProgramVisitor {
+ public:
+ bool visitExpression(const Expression& expr) override {
+ if (expr.is<VariableReference>() &&
+ expr.as<VariableReference>().variable()->name() == Compiler::RTADJUST_NAME) {
+ return true;
+ }
+ return INHERITED::visitExpression(expr);
+ }
+
+ using INHERITED = ProgramVisitor;
+ };
+
+ ContainsRTAdjustVisitor visitor;
+ return visitor.visitExpression(expr);
+}
+
+bool Analysis::IsCompileTimeConstant(const Expression& expr) {
+ class IsCompileTimeConstantVisitor : public ProgramVisitor {
+ public:
+ bool visitExpression(const Expression& expr) override {
+ switch (expr.kind()) {
+ case Expression::Kind::kLiteral:
+ // Literals are compile-time constants.
+ return false;
+
+ case Expression::Kind::kConstructorArray:
+ case Expression::Kind::kConstructorCompound:
+ case Expression::Kind::kConstructorDiagonalMatrix:
+ case Expression::Kind::kConstructorMatrixResize:
+ case Expression::Kind::kConstructorSplat:
+ case Expression::Kind::kConstructorStruct:
+ // Constructors might be compile-time constants, if they are composed entirely
+ // of literals and constructors. (Casting constructors are intentionally omitted
+ // here. If the value inside was a compile-time constant, we would have not have
+ // generated a cast at all.)
+ return INHERITED::visitExpression(expr);
+
+ default:
+ // This expression isn't a compile-time constant.
+ fIsConstant = false;
+ return true;
+ }
+ }
+
+ bool fIsConstant = true;
+ using INHERITED = ProgramVisitor;
+ };
+
+ IsCompileTimeConstantVisitor visitor;
+ visitor.visitExpression(expr);
+ return visitor.fIsConstant;
+}
+
+bool Analysis::DetectVarDeclarationWithoutScope(const Statement& stmt, ErrorReporter* errors) {
+ // A variable declaration can create either a lone VarDeclaration or an unscoped Block
+ // containing multiple VarDeclaration statements. We need to detect either case.
+ const Variable* var;
+ if (stmt.is<VarDeclaration>()) {
+ // The single-variable case. No blocks at all.
+ var = stmt.as<VarDeclaration>().var();
+ } else if (stmt.is<Block>()) {
+ // The multiple-variable case: an unscoped, non-empty block...
+ const Block& block = stmt.as<Block>();
+ if (block.isScope() || block.children().empty()) {
+ return false;
+ }
+ // ... holding a variable declaration.
+ const Statement& innerStmt = *block.children().front();
+ if (!innerStmt.is<VarDeclaration>()) {
+ return false;
+ }
+ var = innerStmt.as<VarDeclaration>().var();
+ } else {
+ // This statement wasn't a variable declaration. No problem.
+ return false;
+ }
+
+ // Report an error.
+ SkASSERT(var);
+ if (errors) {
+ errors->error(var->fPosition,
+ "variable '" + std::string(var->name()) + "' must be created in a scope");
+ }
+ return true;
+}
+
+int Analysis::NodeCountUpToLimit(const FunctionDefinition& function, int limit) {
+ return NodeCountVisitor{limit}.visit(*function.body());
+}
+
+bool Analysis::StatementWritesToVariable(const Statement& stmt, const Variable& var) {
+ return VariableWriteVisitor(&var).visit(stmt);
+}
+
+bool Analysis::IsAssignable(Expression& expr, AssignmentInfo* info, ErrorReporter* errors) {
+ NoOpErrorReporter unusedErrors;
+ return IsAssignableVisitor{errors ? errors : &unusedErrors}.visit(expr, info);
+}
+
+bool Analysis::UpdateVariableRefKind(Expression* expr,
+ VariableReference::RefKind kind,
+ ErrorReporter* errors) {
+ Analysis::AssignmentInfo info;
+ if (!Analysis::IsAssignable(*expr, &info, errors)) {
+ return false;
+ }
+ if (!info.fAssignedVar) {
+ if (errors) {
+ errors->error(expr->fPosition, "can't assign to expression '" + expr->description() +
+ "'");
+ }
+ return false;
+ }
+ info.fAssignedVar->setRefKind(kind);
+ return true;
+}
+
+class ES2IndexingVisitor : public ProgramVisitor {
+public:
+ ES2IndexingVisitor(ErrorReporter& errors) : fErrors(errors) {}
+
+ bool visitStatement(const Statement& s) override {
+ if (s.is<ForStatement>()) {
+ const ForStatement& f = s.as<ForStatement>();
+ SkASSERT(f.initializer() && f.initializer()->is<VarDeclaration>());
+ const Variable* var = f.initializer()->as<VarDeclaration>().var();
+ auto [iter, inserted] = fLoopIndices.insert(var);
+ SkASSERT(inserted);
+ bool result = this->visitStatement(*f.statement());
+ fLoopIndices.erase(iter);
+ return result;
+ }
+ return INHERITED::visitStatement(s);
+ }
+
+ bool visitExpression(const Expression& e) override {
+ if (e.is<IndexExpression>()) {
+ const IndexExpression& i = e.as<IndexExpression>();
+ if (!Analysis::IsConstantIndexExpression(*i.index(), &fLoopIndices)) {
+ fErrors.error(i.fPosition, "index expression must be constant");
+ return true;
+ }
+ }
+ return INHERITED::visitExpression(e);
+ }
+
+ using ProgramVisitor::visitProgramElement;
+
+private:
+ ErrorReporter& fErrors;
+ std::set<const Variable*> fLoopIndices;
+ using INHERITED = ProgramVisitor;
+};
+
+void Analysis::ValidateIndexingForES2(const ProgramElement& pe, ErrorReporter& errors) {
+ ES2IndexingVisitor visitor(errors);
+ visitor.visitProgramElement(pe);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// ProgramVisitor
+
+bool ProgramVisitor::visit(const Program& program) {
+ for (const ProgramElement* pe : program.elements()) {
+ if (this->visitProgramElement(*pe)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+template <typename T> bool TProgramVisitor<T>::visitExpression(typename T::Expression& e) {
+ switch (e.kind()) {
+ case Expression::Kind::kFunctionReference:
+ case Expression::Kind::kLiteral:
+ case Expression::Kind::kMethodReference:
+ case Expression::Kind::kPoison:
+ case Expression::Kind::kSetting:
+ case Expression::Kind::kTypeReference:
+ case Expression::Kind::kVariableReference:
+ // Leaf expressions return false
+ return false;
+
+ case Expression::Kind::kBinary: {
+ auto& b = e.template as<BinaryExpression>();
+ return (b.left() && this->visitExpressionPtr(b.left())) ||
+ (b.right() && this->visitExpressionPtr(b.right()));
+ }
+ case Expression::Kind::kChildCall: {
+ // We don't visit the child variable itself, just the arguments
+ auto& c = e.template as<ChildCall>();
+ for (auto& arg : c.arguments()) {
+ if (arg && this->visitExpressionPtr(arg)) { return true; }
+ }
+ return false;
+ }
+ case Expression::Kind::kConstructorArray:
+ case Expression::Kind::kConstructorArrayCast:
+ case Expression::Kind::kConstructorCompound:
+ case Expression::Kind::kConstructorCompoundCast:
+ case Expression::Kind::kConstructorDiagonalMatrix:
+ case Expression::Kind::kConstructorMatrixResize:
+ case Expression::Kind::kConstructorScalarCast:
+ case Expression::Kind::kConstructorSplat:
+ case Expression::Kind::kConstructorStruct: {
+ auto& c = e.asAnyConstructor();
+ for (auto& arg : c.argumentSpan()) {
+ if (this->visitExpressionPtr(arg)) { return true; }
+ }
+ return false;
+ }
+ case Expression::Kind::kFieldAccess:
+ return this->visitExpressionPtr(e.template as<FieldAccess>().base());
+
+ case Expression::Kind::kFunctionCall: {
+ auto& c = e.template as<FunctionCall>();
+ for (auto& arg : c.arguments()) {
+ if (arg && this->visitExpressionPtr(arg)) { return true; }
+ }
+ return false;
+ }
+ case Expression::Kind::kIndex: {
+ auto& i = e.template as<IndexExpression>();
+ return this->visitExpressionPtr(i.base()) || this->visitExpressionPtr(i.index());
+ }
+ case Expression::Kind::kPostfix:
+ return this->visitExpressionPtr(e.template as<PostfixExpression>().operand());
+
+ case Expression::Kind::kPrefix:
+ return this->visitExpressionPtr(e.template as<PrefixExpression>().operand());
+
+ case Expression::Kind::kSwizzle: {
+ auto& s = e.template as<Swizzle>();
+ return s.base() && this->visitExpressionPtr(s.base());
+ }
+
+ case Expression::Kind::kTernary: {
+ auto& t = e.template as<TernaryExpression>();
+ return this->visitExpressionPtr(t.test()) ||
+ (t.ifTrue() && this->visitExpressionPtr(t.ifTrue())) ||
+ (t.ifFalse() && this->visitExpressionPtr(t.ifFalse()));
+ }
+ default:
+ SkUNREACHABLE;
+ }
+}
+
+template <typename T> bool TProgramVisitor<T>::visitStatement(typename T::Statement& s) {
+ switch (s.kind()) {
+ case Statement::Kind::kBreak:
+ case Statement::Kind::kContinue:
+ case Statement::Kind::kDiscard:
+ case Statement::Kind::kNop:
+ // Leaf statements just return false
+ return false;
+
+ case Statement::Kind::kBlock:
+ for (auto& stmt : s.template as<Block>().children()) {
+ if (stmt && this->visitStatementPtr(stmt)) {
+ return true;
+ }
+ }
+ return false;
+
+ case Statement::Kind::kSwitchCase: {
+ auto& sc = s.template as<SwitchCase>();
+ return this->visitStatementPtr(sc.statement());
+ }
+ case Statement::Kind::kDo: {
+ auto& d = s.template as<DoStatement>();
+ return this->visitExpressionPtr(d.test()) || this->visitStatementPtr(d.statement());
+ }
+ case Statement::Kind::kExpression:
+ return this->visitExpressionPtr(s.template as<ExpressionStatement>().expression());
+
+ case Statement::Kind::kFor: {
+ auto& f = s.template as<ForStatement>();
+ return (f.initializer() && this->visitStatementPtr(f.initializer())) ||
+ (f.test() && this->visitExpressionPtr(f.test())) ||
+ (f.next() && this->visitExpressionPtr(f.next())) ||
+ this->visitStatementPtr(f.statement());
+ }
+ case Statement::Kind::kIf: {
+ auto& i = s.template as<IfStatement>();
+ return (i.test() && this->visitExpressionPtr(i.test())) ||
+ (i.ifTrue() && this->visitStatementPtr(i.ifTrue())) ||
+ (i.ifFalse() && this->visitStatementPtr(i.ifFalse()));
+ }
+ case Statement::Kind::kReturn: {
+ auto& r = s.template as<ReturnStatement>();
+ return r.expression() && this->visitExpressionPtr(r.expression());
+ }
+ case Statement::Kind::kSwitch: {
+ auto& sw = s.template as<SwitchStatement>();
+ if (this->visitExpressionPtr(sw.value())) {
+ return true;
+ }
+ for (auto& c : sw.cases()) {
+ if (this->visitStatementPtr(c)) {
+ return true;
+ }
+ }
+ return false;
+ }
+ case Statement::Kind::kVarDeclaration: {
+ auto& v = s.template as<VarDeclaration>();
+ return v.value() && this->visitExpressionPtr(v.value());
+ }
+ default:
+ SkUNREACHABLE;
+ }
+}
+
+template <typename T> bool TProgramVisitor<T>::visitProgramElement(typename T::ProgramElement& pe) {
+ switch (pe.kind()) {
+ case ProgramElement::Kind::kExtension:
+ case ProgramElement::Kind::kFunctionPrototype:
+ case ProgramElement::Kind::kInterfaceBlock:
+ case ProgramElement::Kind::kModifiers:
+ case ProgramElement::Kind::kStructDefinition:
+ // Leaf program elements just return false by default
+ return false;
+
+ case ProgramElement::Kind::kFunction:
+ return this->visitStatementPtr(pe.template as<FunctionDefinition>().body());
+
+ case ProgramElement::Kind::kGlobalVar:
+ return this->visitStatementPtr(pe.template as<GlobalVarDeclaration>().declaration());
+
+ default:
+ SkUNREACHABLE;
+ }
+}
+
+template class TProgramVisitor<ProgramVisitorTypes>;
+template class TProgramVisitor<ProgramWriterTypes>;
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/SkSLAnalysis.h b/gfx/skia/skia/src/sksl/SkSLAnalysis.h
new file mode 100644
index 0000000000..b875098fda
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLAnalysis.h
@@ -0,0 +1,261 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSLAnalysis_DEFINED
+#define SkSLAnalysis_DEFINED
+
+#include "include/private/SkSLSampleUsage.h"
+#include "include/private/base/SkTArray.h"
+
+#include <cstdint>
+#include <memory>
+#include <set>
+#include <vector>
+
+namespace SkSL {
+
+class Context;
+class ErrorReporter;
+class Expression;
+class FunctionDeclaration;
+class FunctionDefinition;
+class Position;
+class ProgramElement;
+class ProgramUsage;
+class Statement;
+class SymbolTable;
+class Variable;
+class VariableReference;
+enum class VariableRefKind : int8_t;
+struct ForLoopPositions;
+struct LoopUnrollInfo;
+struct Module;
+struct Program;
+
+/**
+ * Provides utilities for analyzing SkSL statically before it's composed into a full program.
+ */
+namespace Analysis {
+
+/**
+ * Determines how `program` samples `child`. By default, assumes that the sample coords
+ * (SK_MAIN_COORDS_BUILTIN) might be modified, so `child.eval(sampleCoords)` is treated as
+ * Explicit. If writesToSampleCoords is false, treats that as PassThrough, instead.
+ * If elidedSampleCoordCount is provided, the pointed to value will be incremented by the
+ * number of sample calls where the above rewrite was performed.
+ */
+SampleUsage GetSampleUsage(const Program& program,
+ const Variable& child,
+ bool writesToSampleCoords = true,
+ int* elidedSampleCoordCount = nullptr);
+
+bool ReferencesBuiltin(const Program& program, int builtin);
+
+bool ReferencesSampleCoords(const Program& program);
+bool ReferencesFragCoords(const Program& program);
+
+bool CallsSampleOutsideMain(const Program& program);
+
+bool CallsColorTransformIntrinsics(const Program& program);
+
+/**
+ * Determines if `function` always returns an opaque color (a vec4 where the last component is known
+ * to be 1). This is conservative, and based on constant expression analysis.
+ */
+bool ReturnsOpaqueColor(const FunctionDefinition& function);
+
+/**
+ * Checks for recursion or overly-deep function-call chains, and rejects programs which have them.
+ * Also, computes the size of the program in a completely flattened state--loops fully unrolled,
+ * function calls inlined--and rejects programs that exceed an arbitrary upper bound. This is
+ * intended to prevent absurdly large programs from overwhemling SkVM. Only strict-ES2 mode is
+ * supported; complex control flow is not SkVM-compatible (and this becomes the halting problem)
+ */
+bool CheckProgramStructure(const Program& program, bool enforceSizeLimit);
+
+/** Determines if `expr` contains a reference to the variable sk_RTAdjust. */
+bool ContainsRTAdjust(const Expression& expr);
+
+/** Determines if `expr` has any side effects. (Is the expression state-altering or pure?) */
+bool HasSideEffects(const Expression& expr);
+
+/** Determines if `expr` is a compile-time constant (composed of just constructors and literals). */
+bool IsCompileTimeConstant(const Expression& expr);
+
+/**
+ * Determines if `expr` is a dynamically-uniform expression; this returns true if the expression
+ * could be evaluated at compile time if uniform values were known.
+ */
+bool IsDynamicallyUniformExpression(const Expression& expr);
+
+/**
+ * Detect an orphaned variable declaration outside of a scope, e.g. if (true) int a;. Returns
+ * true if an error was reported.
+ */
+bool DetectVarDeclarationWithoutScope(const Statement& stmt, ErrorReporter* errors = nullptr);
+
+int NodeCountUpToLimit(const FunctionDefinition& function, int limit);
+
+/**
+ * Finds unconditional exits from a switch-case. Returns true if this statement unconditionally
+ * causes an exit from this switch (via continue, break or return).
+ */
+bool SwitchCaseContainsUnconditionalExit(Statement& stmt);
+
+/**
+ * Finds conditional exits from a switch-case. Returns true if this statement contains a
+ * conditional that wraps a potential exit from the switch (via continue, break or return).
+ */
+bool SwitchCaseContainsConditionalExit(Statement& stmt);
+
+std::unique_ptr<ProgramUsage> GetUsage(const Program& program);
+std::unique_ptr<ProgramUsage> GetUsage(const Module& module);
+
+/** Returns true if the passed-in statement might alter `var`. */
+bool StatementWritesToVariable(const Statement& stmt, const Variable& var);
+
+/**
+ * Detects if the passed-in block contains a `continue`, `break` or `return` that could directly
+ * affect its control flow. (A `continue` or `break` nested inside an inner loop/switch will not
+ * affect the loop, but a `return` will.)
+ */
+struct LoopControlFlowInfo {
+ bool fHasContinue = false;
+ bool fHasBreak = false;
+ bool fHasReturn = false;
+};
+LoopControlFlowInfo GetLoopControlFlowInfo(const Statement& stmt);
+
+/**
+ * Returns true if the expression can be assigned-into. Pass `info` if you want to know the
+ * VariableReference that will be written to. Pass `errors` to report an error for expressions that
+ * are not actually writable.
+ */
+struct AssignmentInfo {
+ VariableReference* fAssignedVar = nullptr;
+};
+bool IsAssignable(Expression& expr, AssignmentInfo* info = nullptr,
+ ErrorReporter* errors = nullptr);
+
+/**
+ * Updates the `refKind` field of the VariableReference at the top level of `expr`.
+ * If `expr` can be assigned to (`IsAssignable`), true is returned and no errors are reported.
+ * If not, false is returned. and an error is reported if `errors` is non-null.
+ */
+bool UpdateVariableRefKind(Expression* expr, VariableRefKind kind, ErrorReporter* errors = nullptr);
+
+/**
+ * A "trivial" expression is one where we'd feel comfortable cloning it multiple times in
+ * the code, without worrying about incurring a performance penalty. Examples:
+ * - true
+ * - 3.14159265
+ * - myIntVariable
+ * - myColor.rgb
+ * - myArray[123]
+ * - myStruct.myField
+ * - half4(0)
+ *
+ * Trivial-ness is stackable. Somewhat large expressions can occasionally make the cut:
+ * - half4(myColor.a)
+ * - myStruct.myArrayField[7].xzy
+ */
+bool IsTrivialExpression(const Expression& expr);
+
+/**
+ * Returns true if both expression trees are the same. Used by the optimizer to look for self-
+ * assignment or self-comparison; won't necessarily catch complex cases. Rejects expressions
+ * that may cause side effects.
+ */
+bool IsSameExpressionTree(const Expression& left, const Expression& right);
+
+/**
+ * Returns true if expr is a constant-expression, as defined by GLSL 1.0, section 5.10.
+ * A constant expression is one of:
+ * - A literal value
+ * - A global or local variable qualified as 'const', excluding function parameters
+ * - An expression formed by an operator on operands that are constant expressions, including
+ * getting an element of a constant vector or a constant matrix, or a field of a constant
+ * structure
+ * - A constructor whose arguments are all constant expressions
+ * - A built-in function call whose arguments are all constant expressions, with the exception
+ * of the texture lookup functions
+ */
+bool IsConstantExpression(const Expression& expr);
+
+/**
+ * Returns true if expr is a valid constant-index-expression, as defined by GLSL 1.0, Appendix A,
+ * Section 5. A constant-index-expression is:
+ * - A constant-expression
+ * - Loop indices (as defined in Appendix A, Section 4)
+ * - Expressions composed of both of the above
+ */
+bool IsConstantIndexExpression(const Expression& expr,
+ const std::set<const Variable*>* loopIndices);
+
+/**
+ * Ensures that a for-loop meets the strict requirements of The OpenGL ES Shading Language 1.00,
+ * Appendix A, Section 4.
+ * If the requirements are met, information about the loop's structure is returned.
+ * If the requirements are not met, the problem is reported via `errors` (if not nullptr), and
+ * null is returned.
+ */
+std::unique_ptr<LoopUnrollInfo> GetLoopUnrollInfo(Position pos,
+ const ForLoopPositions& positions,
+ const Statement* loopInitializer,
+ const Expression* loopTest,
+ const Expression* loopNext,
+ const Statement* loopStatement,
+ ErrorReporter* errors);
+
+void ValidateIndexingForES2(const ProgramElement& pe, ErrorReporter& errors);
+
+/** Detects functions that fail to return a value on at least one path. */
+bool CanExitWithoutReturningValue(const FunctionDeclaration& funcDecl, const Statement& body);
+
+/** Determines if a given function has multiple and/or early returns. */
+enum class ReturnComplexity {
+ kSingleSafeReturn,
+ kScopedReturns,
+ kEarlyReturns,
+};
+ReturnComplexity GetReturnComplexity(const FunctionDefinition& funcDef);
+
+/**
+ * Runs at finalization time to perform any last-minute correctness checks:
+ * - Reports dangling FunctionReference or TypeReference expressions
+ * - Reports function `out` params which are never written to (structs are currently exempt)
+ */
+void DoFinalizationChecks(const Program& program);
+
+/**
+ * Error checks compute shader in/outs and returns a vector containing them ordered by location.
+ */
+SkTArray<const SkSL::Variable*> GetComputeShaderMainParams(const Context& context,
+ const Program& program);
+
+/**
+ * Tracks the symbol table stack, in conjunction with a ProgramVisitor. Inside `visitStatement`,
+ * pass the current statement and a symbol-table vector to a SymbolTableStackBuilder and the symbol
+ * table stack will be maintained automatically.
+ */
+class SymbolTableStackBuilder {
+public:
+ // If the passed-in statement holds a symbol table, adds it to the stack.
+ SymbolTableStackBuilder(const Statement* stmt,
+ std::vector<std::shared_ptr<SymbolTable>>* stack);
+
+ // If a symbol table was added to the stack earlier, removes it from the stack.
+ ~SymbolTableStackBuilder();
+
+private:
+ std::vector<std::shared_ptr<SymbolTable>>* fStackToPop = nullptr;
+};
+
+} // namespace Analysis
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLBuiltinTypes.cpp b/gfx/skia/skia/src/sksl/SkSLBuiltinTypes.cpp
new file mode 100644
index 0000000000..460358e54a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLBuiltinTypes.cpp
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLBuiltinTypes.h"
+
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/spirv.h"
+
+namespace SkSL {
+
+/**
+ * Initializes the core SkSL types.
+ */
+BuiltinTypes::BuiltinTypes()
+ : fFloat(Type::MakeScalarType(
+ "float", "f", Type::NumberKind::kFloat, /*priority=*/10, /*bitWidth=*/32))
+ , fFloat2(Type::MakeVectorType("float2", "f2", *fFloat, /*columns=*/2))
+ , fFloat3(Type::MakeVectorType("float3", "f3", *fFloat, /*columns=*/3))
+ , fFloat4(Type::MakeVectorType("float4", "f4", *fFloat, /*columns=*/4))
+ , fHalf(Type::MakeScalarType(
+ "half", "h", Type::NumberKind::kFloat, /*priority=*/9, /*bitWidth=*/16))
+ , fHalf2(Type::MakeVectorType("half2", "h2", *fHalf, /*columns=*/2))
+ , fHalf3(Type::MakeVectorType("half3", "h3", *fHalf, /*columns=*/3))
+ , fHalf4(Type::MakeVectorType("half4", "h4", *fHalf, /*columns=*/4))
+ , fInt(Type::MakeScalarType(
+ "int", "i", Type::NumberKind::kSigned, /*priority=*/7, /*bitWidth=*/32))
+ , fInt2(Type::MakeVectorType("int2", "i2", *fInt, /*columns=*/2))
+ , fInt3(Type::MakeVectorType("int3", "i3", *fInt, /*columns=*/3))
+ , fInt4(Type::MakeVectorType("int4", "i4", *fInt, /*columns=*/4))
+ , fUInt(Type::MakeScalarType(
+ "uint", "I", Type::NumberKind::kUnsigned, /*priority=*/6, /*bitWidth=*/32))
+ , fUInt2(Type::MakeVectorType("uint2", "I2", *fUInt, /*columns=*/2))
+ , fUInt3(Type::MakeVectorType("uint3", "I3", *fUInt, /*columns=*/3))
+ , fUInt4(Type::MakeVectorType("uint4", "I4", *fUInt, /*columns=*/4))
+ , fShort(Type::MakeScalarType(
+ "short", "s", Type::NumberKind::kSigned, /*priority=*/4, /*bitWidth=*/16))
+ , fShort2(Type::MakeVectorType("short2", "s2", *fShort, /*columns=*/2))
+ , fShort3(Type::MakeVectorType("short3", "s3", *fShort, /*columns=*/3))
+ , fShort4(Type::MakeVectorType("short4", "s4", *fShort, /*columns=*/4))
+ , fUShort(Type::MakeScalarType(
+ "ushort", "S", Type::NumberKind::kUnsigned, /*priority=*/3, /*bitWidth=*/16))
+ , fUShort2(Type::MakeVectorType("ushort2", "S2", *fUShort, /*columns=*/2))
+ , fUShort3(Type::MakeVectorType("ushort3", "S3", *fUShort, /*columns=*/3))
+ , fUShort4(Type::MakeVectorType("ushort4", "S4", *fUShort, /*columns=*/4))
+ , fBool(Type::MakeScalarType(
+ "bool", "b", Type::NumberKind::kBoolean, /*priority=*/0, /*bitWidth=*/1))
+ , fBool2(Type::MakeVectorType("bool2", "b2", *fBool, /*columns=*/2))
+ , fBool3(Type::MakeVectorType("bool3", "b3", *fBool, /*columns=*/3))
+ , fBool4(Type::MakeVectorType("bool4", "b4", *fBool, /*columns=*/4))
+ , fInvalid(Type::MakeSpecialType("<INVALID>", "O", Type::TypeKind::kOther))
+ , fPoison(Type::MakeSpecialType(Compiler::POISON_TAG, "P", Type::TypeKind::kOther))
+ , fVoid(Type::MakeSpecialType("void", "v", Type::TypeKind::kVoid))
+ , fFloatLiteral(Type::MakeLiteralType("$floatLiteral", *fFloat, /*priority=*/8))
+ , fIntLiteral(Type::MakeLiteralType("$intLiteral", *fInt, /*priority=*/5))
+ , fFloat2x2(Type::MakeMatrixType("float2x2", "f22", *fFloat, /*columns=*/2, /*rows=*/2))
+ , fFloat2x3(Type::MakeMatrixType("float2x3", "f23", *fFloat, /*columns=*/2, /*rows=*/3))
+ , fFloat2x4(Type::MakeMatrixType("float2x4", "f24", *fFloat, /*columns=*/2, /*rows=*/4))
+ , fFloat3x2(Type::MakeMatrixType("float3x2", "f32", *fFloat, /*columns=*/3, /*rows=*/2))
+ , fFloat3x3(Type::MakeMatrixType("float3x3", "f33", *fFloat, /*columns=*/3, /*rows=*/3))
+ , fFloat3x4(Type::MakeMatrixType("float3x4", "f34", *fFloat, /*columns=*/3, /*rows=*/4))
+ , fFloat4x2(Type::MakeMatrixType("float4x2", "f42", *fFloat, /*columns=*/4, /*rows=*/2))
+ , fFloat4x3(Type::MakeMatrixType("float4x3", "f43", *fFloat, /*columns=*/4, /*rows=*/3))
+ , fFloat4x4(Type::MakeMatrixType("float4x4", "f44", *fFloat, /*columns=*/4, /*rows=*/4))
+ , fHalf2x2(Type::MakeMatrixType("half2x2", "h22", *fHalf, /*columns=*/2, /*rows=*/2))
+ , fHalf2x3(Type::MakeMatrixType("half2x3", "h23", *fHalf, /*columns=*/2, /*rows=*/3))
+ , fHalf2x4(Type::MakeMatrixType("half2x4", "h24", *fHalf, /*columns=*/2, /*rows=*/4))
+ , fHalf3x2(Type::MakeMatrixType("half3x2", "h32", *fHalf, /*columns=*/3, /*rows=*/2))
+ , fHalf3x3(Type::MakeMatrixType("half3x3", "h33", *fHalf, /*columns=*/3, /*rows=*/3))
+ , fHalf3x4(Type::MakeMatrixType("half3x4", "h34", *fHalf, /*columns=*/3, /*rows=*/4))
+ , fHalf4x2(Type::MakeMatrixType("half4x2", "h42", *fHalf, /*columns=*/4, /*rows=*/2))
+ , fHalf4x3(Type::MakeMatrixType("half4x3", "h43", *fHalf, /*columns=*/4, /*rows=*/3))
+ , fHalf4x4(Type::MakeMatrixType("half4x4", "h44", *fHalf, /*columns=*/4, /*rows=*/4))
+ , fVec2(Type::MakeAliasType("vec2", *fFloat2))
+ , fVec3(Type::MakeAliasType("vec3", *fFloat3))
+ , fVec4(Type::MakeAliasType("vec4", *fFloat4))
+ , fIVec2(Type::MakeAliasType("ivec2", *fInt2))
+ , fIVec3(Type::MakeAliasType("ivec3", *fInt3))
+ , fIVec4(Type::MakeAliasType("ivec4", *fInt4))
+ , fBVec2(Type::MakeAliasType("bvec2", *fBool2))
+ , fBVec3(Type::MakeAliasType("bvec3", *fBool3))
+ , fBVec4(Type::MakeAliasType("bvec4", *fBool4))
+ , fMat2(Type::MakeAliasType("mat2", *fFloat2x2))
+ , fMat3(Type::MakeAliasType("mat3", *fFloat3x3))
+ , fMat4(Type::MakeAliasType("mat4", *fFloat4x4))
+ , fMat2x2(Type::MakeAliasType("mat2x2", *fFloat2x2))
+ , fMat2x3(Type::MakeAliasType("mat2x3", *fFloat2x3))
+ , fMat2x4(Type::MakeAliasType("mat2x4", *fFloat2x4))
+ , fMat3x2(Type::MakeAliasType("mat3x2", *fFloat3x2))
+ , fMat3x3(Type::MakeAliasType("mat3x3", *fFloat3x3))
+ , fMat3x4(Type::MakeAliasType("mat3x4", *fFloat3x4))
+ , fMat4x2(Type::MakeAliasType("mat4x2", *fFloat4x2))
+ , fMat4x3(Type::MakeAliasType("mat4x3", *fFloat4x3))
+ , fMat4x4(Type::MakeAliasType("mat4x4", *fFloat4x4))
+ , fTexture2D(Type::MakeTextureType("texture2D",
+ SpvDim2D,
+ /*isDepth=*/false,
+ /*isArrayedTexture=*/false,
+ /*isMultisampled=*/false,
+ Type::TextureAccess::kSample))
+ , fTextureExternalOES(Type::MakeTextureType("textureExternalOES",
+ SpvDim2D,
+ /*isDepth=*/false,
+ /*isArrayedTexture=*/false,
+ /*isMultisampled=*/false,
+ Type::TextureAccess::kSample))
+ , fTexture2DRect(Type::MakeTextureType("texture2DRect",
+ SpvDimRect,
+ /*isDepth=*/false,
+ /*isArrayedTexture=*/false,
+ /*isMultisampled=*/false,
+ Type::TextureAccess::kSample))
+ , fReadWriteTexture2D(Type::MakeTextureType("readWriteTexture2D",
+ SpvDim2D,
+ /*isDepth=*/false,
+ /*isArrayedTexture=*/false,
+ /*isMultisampled=*/false,
+ Type::TextureAccess::kReadWrite))
+ , fReadOnlyTexture2D(Type::MakeTextureType("readonlyTexture2D",
+ SpvDim2D,
+ /*isDepth=*/false,
+ /*isArrayedTexture=*/false,
+ /*isMultisampled=*/false,
+ Type::TextureAccess::kRead))
+ , fWriteOnlyTexture2D(Type::MakeTextureType("writeonlyTexture2D",
+ SpvDim2D,
+ /*isDepth=*/false,
+ /*isArrayedTexture=*/false,
+ /*isMultisampled=*/false,
+ Type::TextureAccess::kWrite))
+ , fGenTexture2D(Type::MakeGenericType("$genTexture2D",
+ {fReadOnlyTexture2D.get(),
+ fWriteOnlyTexture2D.get(),
+ fReadWriteTexture2D.get()}))
+ , fReadableTexture2D(Type::MakeGenericType("$readableTexture2D",
+ {fReadOnlyTexture2D.get(),
+ fInvalid.get(),
+ fReadWriteTexture2D.get()}))
+ , fWritableTexture2D(Type::MakeGenericType("$writableTexture2D",
+ {fInvalid.get(),
+ fWriteOnlyTexture2D.get(),
+ fReadWriteTexture2D.get()}))
+ , fSampler2D(Type::MakeSamplerType("sampler2D", *fTexture2D))
+ , fSamplerExternalOES(Type::MakeSamplerType("samplerExternalOES", *fTextureExternalOES))
+ , fSampler2DRect(Type::MakeSamplerType("sampler2DRect", *fTexture2DRect))
+
+ , fSampler(Type::MakeSpecialType("sampler", "ss", Type::TypeKind::kSeparateSampler))
+
+ , fSubpassInput(Type::MakeTextureType("subpassInput",
+ SpvDimSubpassData,
+ /*isDepth=*/false,
+ /*isArrayedTexture=*/false,
+ /*isMultisampled=*/false,
+ Type::TextureAccess::kRead))
+ , fSubpassInputMS(Type::MakeTextureType("subpassInputMS",
+ SpvDimSubpassData,
+ /*isDepth=*/false,
+ /*isArrayedTexture=*/false,
+ /*isMultisampled=*/true,
+ Type::TextureAccess::kRead))
+ , fGenType(Type::MakeGenericType("$genType", {fFloat.get(), fFloat2.get(), fFloat3.get(),
+ fFloat4.get()}))
+ , fGenHType(Type::MakeGenericType("$genHType", {fHalf.get(), fHalf2.get(), fHalf3.get(),
+ fHalf4.get()}))
+ , fGenIType(Type::MakeGenericType("$genIType", {fInt.get(), fInt2.get(), fInt3.get(),
+ fInt4.get()}))
+ , fGenUType(Type::MakeGenericType("$genUType", {fUInt.get(), fUInt2.get(), fUInt3.get(),
+ fUInt4.get()}))
+ , fGenBType(Type::MakeGenericType("$genBType", {fBool.get(), fBool2.get(), fBool3.get(),
+ fBool4.get()}))
+ , fMat(Type::MakeGenericType("$mat", {fFloat2x2.get(), fFloat2x3.get(), fFloat2x4.get(),
+ fFloat3x2.get(), fFloat3x3.get(), fFloat3x4.get(),
+ fFloat4x2.get(), fFloat4x3.get(), fFloat4x4.get()}))
+ , fHMat(Type::MakeGenericType(
+ "$hmat",
+ {fHalf2x2.get(), fHalf2x3.get(), fHalf2x4.get(), fHalf3x2.get(), fHalf3x3.get(),
+ fHalf3x4.get(), fHalf4x2.get(), fHalf4x3.get(), fHalf4x4.get()}))
+ , fSquareMat(Type::MakeGenericType("$squareMat", {fInvalid.get(), fFloat2x2.get(),
+ fFloat3x3.get(), fFloat4x4.get()}))
+ , fSquareHMat(Type::MakeGenericType("$squareHMat", {fInvalid.get(), fHalf2x2.get(),
+ fHalf3x3.get(), fHalf4x4.get()}))
+ , fVec(Type::MakeGenericType("$vec", {fInvalid.get(), fFloat2.get(), fFloat3.get(),
+ fFloat4.get()}))
+ , fHVec(Type::MakeGenericType("$hvec", {fInvalid.get(), fHalf2.get(), fHalf3.get(),
+ fHalf4.get()}))
+ , fIVec(Type::MakeGenericType("$ivec", {fInvalid.get(), fInt2.get(), fInt3.get(),
+ fInt4.get()}))
+ , fUVec(Type::MakeGenericType("$uvec", {fInvalid.get(), fUInt2.get(), fUInt3.get(),
+ fUInt4.get()}))
+ , fSVec(Type::MakeGenericType("$svec", {fInvalid.get(), fShort2.get(), fShort3.get(),
+ fShort4.get()}))
+ , fUSVec(Type::MakeGenericType("$usvec", {fInvalid.get(), fUShort2.get(), fUShort3.get(),
+ fUShort4.get()}))
+ , fBVec(Type::MakeGenericType("$bvec", {fInvalid.get(), fBool2.get(), fBool3.get(),
+ fBool4.get()}))
+ , fSkCaps(Type::MakeSpecialType("$sk_Caps", "O", Type::TypeKind::kOther))
+ , fColorFilter(Type::MakeSpecialType("colorFilter", "CF", Type::TypeKind::kColorFilter))
+ , fShader(Type::MakeSpecialType("shader", "SH", Type::TypeKind::kShader))
+ , fBlender(Type::MakeSpecialType("blender", "B", Type::TypeKind::kBlender))
+ , fAtomicUInt(Type::MakeAtomicType("atomicUint", "au")) {}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/SkSLBuiltinTypes.h b/gfx/skia/skia/src/sksl/SkSLBuiltinTypes.h
new file mode 100644
index 0000000000..75da759659
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLBuiltinTypes.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_BUILTIN_TYPES
+#define SKSL_BUILTIN_TYPES
+
+#include <memory>
+
+#include "src/sksl/ir/SkSLType.h"
+
+namespace SkSL {
+
+/**
+ * Contains the built-in, core types for SkSL.
+ */
+class BuiltinTypes {
+public:
+ BuiltinTypes();
+
+ const std::unique_ptr<Type> fFloat;
+ const std::unique_ptr<Type> fFloat2;
+ const std::unique_ptr<Type> fFloat3;
+ const std::unique_ptr<Type> fFloat4;
+
+ const std::unique_ptr<Type> fHalf;
+ const std::unique_ptr<Type> fHalf2;
+ const std::unique_ptr<Type> fHalf3;
+ const std::unique_ptr<Type> fHalf4;
+
+ const std::unique_ptr<Type> fInt;
+ const std::unique_ptr<Type> fInt2;
+ const std::unique_ptr<Type> fInt3;
+ const std::unique_ptr<Type> fInt4;
+
+ const std::unique_ptr<Type> fUInt;
+ const std::unique_ptr<Type> fUInt2;
+ const std::unique_ptr<Type> fUInt3;
+ const std::unique_ptr<Type> fUInt4;
+
+ const std::unique_ptr<Type> fShort;
+ const std::unique_ptr<Type> fShort2;
+ const std::unique_ptr<Type> fShort3;
+ const std::unique_ptr<Type> fShort4;
+
+ const std::unique_ptr<Type> fUShort;
+ const std::unique_ptr<Type> fUShort2;
+ const std::unique_ptr<Type> fUShort3;
+ const std::unique_ptr<Type> fUShort4;
+
+ const std::unique_ptr<Type> fBool;
+ const std::unique_ptr<Type> fBool2;
+ const std::unique_ptr<Type> fBool3;
+ const std::unique_ptr<Type> fBool4;
+
+ const std::unique_ptr<Type> fInvalid;
+ const std::unique_ptr<Type> fPoison;
+ const std::unique_ptr<Type> fVoid;
+ const std::unique_ptr<Type> fFloatLiteral;
+ const std::unique_ptr<Type> fIntLiteral;
+
+ const std::unique_ptr<Type> fFloat2x2;
+ const std::unique_ptr<Type> fFloat2x3;
+ const std::unique_ptr<Type> fFloat2x4;
+ const std::unique_ptr<Type> fFloat3x2;
+ const std::unique_ptr<Type> fFloat3x3;
+ const std::unique_ptr<Type> fFloat3x4;
+ const std::unique_ptr<Type> fFloat4x2;
+ const std::unique_ptr<Type> fFloat4x3;
+ const std::unique_ptr<Type> fFloat4x4;
+
+ const std::unique_ptr<Type> fHalf2x2;
+ const std::unique_ptr<Type> fHalf2x3;
+ const std::unique_ptr<Type> fHalf2x4;
+ const std::unique_ptr<Type> fHalf3x2;
+ const std::unique_ptr<Type> fHalf3x3;
+ const std::unique_ptr<Type> fHalf3x4;
+ const std::unique_ptr<Type> fHalf4x2;
+ const std::unique_ptr<Type> fHalf4x3;
+ const std::unique_ptr<Type> fHalf4x4;
+
+ const std::unique_ptr<Type> fVec2;
+ const std::unique_ptr<Type> fVec3;
+ const std::unique_ptr<Type> fVec4;
+
+ const std::unique_ptr<Type> fIVec2;
+ const std::unique_ptr<Type> fIVec3;
+ const std::unique_ptr<Type> fIVec4;
+
+ const std::unique_ptr<Type> fBVec2;
+ const std::unique_ptr<Type> fBVec3;
+ const std::unique_ptr<Type> fBVec4;
+
+ const std::unique_ptr<Type> fMat2;
+ const std::unique_ptr<Type> fMat3;
+ const std::unique_ptr<Type> fMat4;
+
+ const std::unique_ptr<Type> fMat2x2;
+ const std::unique_ptr<Type> fMat2x3;
+ const std::unique_ptr<Type> fMat2x4;
+ const std::unique_ptr<Type> fMat3x2;
+ const std::unique_ptr<Type> fMat3x3;
+ const std::unique_ptr<Type> fMat3x4;
+ const std::unique_ptr<Type> fMat4x2;
+ const std::unique_ptr<Type> fMat4x3;
+ const std::unique_ptr<Type> fMat4x4;
+
+ const std::unique_ptr<Type> fTexture2D;
+ const std::unique_ptr<Type> fTextureExternalOES;
+ const std::unique_ptr<Type> fTexture2DRect;
+
+ const std::unique_ptr<Type> fReadWriteTexture2D;
+ const std::unique_ptr<Type> fReadOnlyTexture2D;
+ const std::unique_ptr<Type> fWriteOnlyTexture2D;
+
+ const std::unique_ptr<Type> fGenTexture2D;
+ const std::unique_ptr<Type> fReadableTexture2D;
+ const std::unique_ptr<Type> fWritableTexture2D;
+
+ const std::unique_ptr<Type> fSampler2D;
+ const std::unique_ptr<Type> fSamplerExternalOES;
+ const std::unique_ptr<Type> fSampler2DRect;
+
+ const std::unique_ptr<Type> fSampler;
+
+ const std::unique_ptr<Type> fSubpassInput;
+ const std::unique_ptr<Type> fSubpassInputMS;
+
+ const std::unique_ptr<Type> fGenType;
+ const std::unique_ptr<Type> fGenHType;
+ const std::unique_ptr<Type> fGenIType;
+ const std::unique_ptr<Type> fGenUType;
+ const std::unique_ptr<Type> fGenBType;
+
+ const std::unique_ptr<Type> fMat;
+ const std::unique_ptr<Type> fHMat;
+ const std::unique_ptr<Type> fSquareMat;
+ const std::unique_ptr<Type> fSquareHMat;
+
+ const std::unique_ptr<Type> fVec;
+
+ const std::unique_ptr<Type> fHVec;
+ const std::unique_ptr<Type> fDVec;
+ const std::unique_ptr<Type> fIVec;
+ const std::unique_ptr<Type> fUVec;
+ const std::unique_ptr<Type> fSVec;
+ const std::unique_ptr<Type> fUSVec;
+ const std::unique_ptr<Type> fByteVec;
+ const std::unique_ptr<Type> fUByteVec;
+
+ const std::unique_ptr<Type> fBVec;
+
+ const std::unique_ptr<Type> fSkCaps;
+
+ const std::unique_ptr<Type> fColorFilter;
+ const std::unique_ptr<Type> fShader;
+ const std::unique_ptr<Type> fBlender;
+
+ const std::unique_ptr<Type> fAtomicUInt;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLCompiler.cpp b/gfx/skia/skia/src/sksl/SkSLCompiler.cpp
new file mode 100644
index 0000000000..78498b58af
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLCompiler.cpp
@@ -0,0 +1,726 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLCompiler.h"
+
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLProgramKind.h"
+#include "include/private/SkSLSymbol.h"
+#include "include/private/base/SkDebug.h"
+#include "include/sksl/DSLCore.h"
+#include "include/sksl/DSLModifiers.h"
+#include "include/sksl/DSLType.h"
+#include "src/core/SkTraceEvent.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLInliner.h"
+#include "src/sksl/SkSLModuleLoader.h"
+#include "src/sksl/SkSLOutputStream.h"
+#include "src/sksl/SkSLParser.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/SkSLStringStream.h"
+#include "src/sksl/analysis/SkSLProgramUsage.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLField.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionReference.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/ir/SkSLSymbolTable.h" // IWYU pragma: keep
+#include "src/sksl/ir/SkSLTypeReference.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+#include "src/sksl/transform/SkSLTransform.h"
+
+#include <atomic>
+#include <cstdint>
+#include <memory>
+#include <utility>
+
+#if defined(SKSL_STANDALONE)
+#include <fstream>
+#endif
+
+#if defined(SKSL_STANDALONE) || defined(SK_GANESH) || defined(SK_GRAPHITE)
+#include "src/sksl/codegen/SkSLGLSLCodeGenerator.h"
+#include "src/sksl/codegen/SkSLMetalCodeGenerator.h"
+#include "src/sksl/codegen/SkSLSPIRVCodeGenerator.h"
+#include "src/sksl/codegen/SkSLSPIRVtoHLSL.h"
+#include "src/sksl/codegen/SkSLWGSLCodeGenerator.h"
+#endif
+
+#ifdef SK_ENABLE_SPIRV_VALIDATION
+#include "spirv-tools/libspirv.hpp"
+#endif
+
+#ifdef SK_ENABLE_WGSL_VALIDATION
+#include "tint/tint.h"
+#endif
+
+namespace SkSL {
+
+class ModifiersPool;
+
+// These flags allow tools like Viewer or Nanobench to override the compiler's ProgramSettings.
+Compiler::OverrideFlag Compiler::sOptimizer = OverrideFlag::kDefault;
+Compiler::OverrideFlag Compiler::sInliner = OverrideFlag::kDefault;
+
+using RefKind = VariableReference::RefKind;
+
+class AutoSource {
+public:
+ AutoSource(Compiler* compiler, std::string_view source)
+ : fCompiler(compiler) {
+ SkASSERT(!fCompiler->errorReporter().source().data());
+ fCompiler->errorReporter().setSource(source);
+ }
+
+ ~AutoSource() {
+ fCompiler->errorReporter().setSource(std::string_view());
+ }
+
+ Compiler* fCompiler;
+};
+
+class AutoProgramConfig {
+public:
+ AutoProgramConfig(Context& context, ProgramConfig* config)
+ : fContext(context)
+ , fOldConfig(context.fConfig) {
+ fContext.fConfig = config;
+ }
+
+ ~AutoProgramConfig() {
+ fContext.fConfig = fOldConfig;
+ }
+
+ Context& fContext;
+ ProgramConfig* fOldConfig;
+};
+
+class AutoShaderCaps {
+public:
+ AutoShaderCaps(std::shared_ptr<Context>& context, const ShaderCaps* caps)
+ : fContext(context.get())
+ , fOldCaps(fContext->fCaps) {
+ fContext->fCaps = caps;
+ }
+
+ ~AutoShaderCaps() {
+ fContext->fCaps = fOldCaps;
+ }
+
+ Context* fContext;
+ const ShaderCaps* fOldCaps;
+};
+
+class AutoModifiersPool {
+public:
+ AutoModifiersPool(std::shared_ptr<Context>& context, ModifiersPool* modifiersPool)
+ : fContext(context.get()) {
+ SkASSERT(!fContext->fModifiersPool);
+ fContext->fModifiersPool = modifiersPool;
+ }
+
+ ~AutoModifiersPool() {
+ fContext->fModifiersPool = nullptr;
+ }
+
+ Context* fContext;
+};
+
+Compiler::Compiler(const ShaderCaps* caps) : fErrorReporter(this), fCaps(caps) {
+ SkASSERT(caps);
+
+ auto moduleLoader = ModuleLoader::Get();
+ fContext = std::make_shared<Context>(moduleLoader.builtinTypes(), /*caps=*/nullptr,
+ fErrorReporter);
+}
+
+Compiler::~Compiler() {}
+
+const Module* Compiler::moduleForProgramKind(ProgramKind kind) {
+ auto m = ModuleLoader::Get();
+ switch (kind) {
+ case ProgramKind::kVertex: return m.loadVertexModule(this);
+ case ProgramKind::kFragment: return m.loadFragmentModule(this);
+ case ProgramKind::kCompute: return m.loadComputeModule(this);
+ case ProgramKind::kGraphiteVertex: return m.loadGraphiteVertexModule(this);
+ case ProgramKind::kGraphiteFragment: return m.loadGraphiteFragmentModule(this);
+ case ProgramKind::kPrivateRuntimeShader: return m.loadPrivateRTShaderModule(this);
+ case ProgramKind::kRuntimeColorFilter:
+ case ProgramKind::kRuntimeShader:
+ case ProgramKind::kRuntimeBlender:
+ case ProgramKind::kPrivateRuntimeColorFilter:
+ case ProgramKind::kPrivateRuntimeBlender:
+ case ProgramKind::kMeshVertex:
+ case ProgramKind::kMeshFragment: return m.loadPublicModule(this);
+ }
+ SkUNREACHABLE;
+}
+
+void Compiler::FinalizeSettings(ProgramSettings* settings, ProgramKind kind) {
+ // Honor our optimization-override flags.
+ switch (sOptimizer) {
+ case OverrideFlag::kDefault:
+ break;
+ case OverrideFlag::kOff:
+ settings->fOptimize = false;
+ break;
+ case OverrideFlag::kOn:
+ settings->fOptimize = true;
+ break;
+ }
+
+ switch (sInliner) {
+ case OverrideFlag::kDefault:
+ break;
+ case OverrideFlag::kOff:
+ settings->fInlineThreshold = 0;
+ break;
+ case OverrideFlag::kOn:
+ if (settings->fInlineThreshold == 0) {
+ settings->fInlineThreshold = kDefaultInlineThreshold;
+ }
+ break;
+ }
+
+ // Disable optimization settings that depend on a parent setting which has been disabled.
+ settings->fInlineThreshold *= (int)settings->fOptimize;
+ settings->fRemoveDeadFunctions &= settings->fOptimize;
+ settings->fRemoveDeadVariables &= settings->fOptimize;
+
+ // Runtime effects always allow narrowing conversions.
+ if (ProgramConfig::IsRuntimeEffect(kind)) {
+ settings->fAllowNarrowingConversions = true;
+ }
+}
+
+std::unique_ptr<Module> Compiler::compileModule(ProgramKind kind,
+ const char* moduleName,
+ std::string moduleSource,
+ const Module* parent,
+ ModifiersPool& modifiersPool,
+ bool shouldInline) {
+ SkASSERT(parent);
+ SkASSERT(!moduleSource.empty());
+ SkASSERT(this->errorCount() == 0);
+
+ // Modules are shared and cannot rely on shader caps.
+ AutoShaderCaps autoCaps(fContext, nullptr);
+ AutoModifiersPool autoPool(fContext, &modifiersPool);
+
+ // Compile the module from source, using default program settings.
+ ProgramSettings settings;
+ FinalizeSettings(&settings, kind);
+ SkSL::Parser parser{this, settings, kind, std::move(moduleSource)};
+ std::unique_ptr<Module> module = parser.moduleInheritingFrom(parent);
+ if (this->errorCount() != 0) {
+ SkDebugf("Unexpected errors compiling %s:\n\n%s\n", moduleName, this->errorText().c_str());
+ return nullptr;
+ }
+ if (shouldInline) {
+ this->optimizeModuleAfterLoading(kind, *module);
+ }
+ return module;
+}
+
+std::unique_ptr<Program> Compiler::convertProgram(ProgramKind kind,
+ std::string text,
+ ProgramSettings settings) {
+ TRACE_EVENT0("skia.shaders", "SkSL::Compiler::convertProgram");
+
+ // Make sure the passed-in settings are valid.
+ FinalizeSettings(&settings, kind);
+
+ // Put the ShaderCaps into the context while compiling a program.
+ AutoShaderCaps autoCaps(fContext, fCaps);
+
+ this->resetErrors();
+
+ return Parser(this, settings, kind, std::move(text)).program();
+}
+
+std::unique_ptr<Expression> Compiler::convertIdentifier(Position pos, std::string_view name) {
+ const Symbol* result = fSymbolTable->find(name);
+ if (!result) {
+ this->errorReporter().error(pos, "unknown identifier '" + std::string(name) + "'");
+ return nullptr;
+ }
+ switch (result->kind()) {
+ case Symbol::Kind::kFunctionDeclaration: {
+ return std::make_unique<FunctionReference>(*fContext, pos,
+ &result->as<FunctionDeclaration>());
+ }
+ case Symbol::Kind::kVariable: {
+ const Variable* var = &result->as<Variable>();
+ // default to kRead_RefKind; this will be corrected later if the variable is written to
+ return VariableReference::Make(pos, var, VariableReference::RefKind::kRead);
+ }
+ case Symbol::Kind::kField: {
+ const Field* field = &result->as<Field>();
+ auto base = VariableReference::Make(pos, &field->owner(),
+ VariableReference::RefKind::kRead);
+ return FieldAccess::Make(*fContext, pos, std::move(base), field->fieldIndex(),
+ FieldAccess::OwnerKind::kAnonymousInterfaceBlock);
+ }
+ case Symbol::Kind::kType: {
+ // go through DSLType so we report errors on private types
+ dsl::DSLModifiers modifiers;
+ dsl::DSLType dslType(result->name(), &modifiers, pos);
+ return TypeReference::Convert(*fContext, pos, &dslType.skslType());
+ }
+ default:
+ SK_ABORT("unsupported symbol type %d\n", (int) result->kind());
+ }
+}
+
+bool Compiler::optimizeModuleBeforeMinifying(ProgramKind kind, Module& module) {
+ SkASSERT(this->errorCount() == 0);
+
+ auto m = SkSL::ModuleLoader::Get();
+
+ // Create a temporary program configuration with default settings.
+ ProgramConfig config;
+ config.fIsBuiltinCode = true;
+ config.fKind = kind;
+ AutoProgramConfig autoConfig(this->context(), &config);
+ AutoModifiersPool autoPool(fContext, &m.coreModifiers());
+
+ std::unique_ptr<ProgramUsage> usage = Analysis::GetUsage(module);
+
+ // Assign shorter names to symbols as long as it won't change the external meaning of the code.
+ Transform::RenamePrivateSymbols(this->context(), module, usage.get(), kind);
+
+ // Replace constant variables with their literal values to save space.
+ Transform::ReplaceConstVarsWithLiterals(module, usage.get());
+
+ // Remove any unreachable code.
+ Transform::EliminateUnreachableCode(module, usage.get());
+
+ // We can only remove dead functions from runtime shaders, since runtime-effect helper functions
+ // are isolated from other parts of the program. In a module, an unreferenced function is
+ // intended to be called by the code that includes the module.
+ if (kind == ProgramKind::kRuntimeShader) {
+ while (Transform::EliminateDeadFunctions(this->context(), module, usage.get())) {
+ // Removing dead functions may cause more functions to become unreferenced. Try again.
+ }
+ }
+
+ while (Transform::EliminateDeadLocalVariables(this->context(), module, usage.get())) {
+ // Removing dead variables may cause more variables to become unreferenced. Try again.
+ }
+
+ // Runtime shaders are isolated from other parts of the program via name mangling, so we can
+ // eliminate public globals if they aren't referenced. Otherwise, we only eliminate private
+ // globals (prefixed with `$`) to avoid changing the meaning of the module code.
+ bool onlyPrivateGlobals = !ProgramConfig::IsRuntimeEffect(kind);
+ while (Transform::EliminateDeadGlobalVariables(this->context(), module, usage.get(),
+ onlyPrivateGlobals)) {
+ // Repeat until no changes occur.
+ }
+
+ // We eliminate empty statements to avoid runs of `;;;;;;` caused by the previous passes.
+ SkSL::Transform::EliminateEmptyStatements(module);
+
+ // Make sure that program usage is still correct after the optimization pass is complete.
+ SkASSERT(*usage == *Analysis::GetUsage(module));
+
+ return this->errorCount() == 0;
+}
+
+bool Compiler::optimizeModuleAfterLoading(ProgramKind kind, Module& module) {
+ SkASSERT(this->errorCount() == 0);
+
+#ifndef SK_ENABLE_OPTIMIZE_SIZE
+ // Create a temporary program configuration with default settings.
+ ProgramConfig config;
+ config.fIsBuiltinCode = true;
+ config.fKind = kind;
+ AutoProgramConfig autoConfig(this->context(), &config);
+
+ std::unique_ptr<ProgramUsage> usage = Analysis::GetUsage(module);
+
+ // Perform inline-candidate analysis and inline any functions deemed suitable.
+ Inliner inliner(fContext.get());
+ while (this->errorCount() == 0) {
+ if (!this->runInliner(&inliner, module.fElements, module.fSymbols, usage.get())) {
+ break;
+ }
+ }
+ // Make sure that program usage is still correct after the optimization pass is complete.
+ SkASSERT(*usage == *Analysis::GetUsage(module));
+#endif
+
+ return this->errorCount() == 0;
+}
+
+bool Compiler::optimize(Program& program) {
+ // The optimizer only needs to run when it is enabled.
+ if (!program.fConfig->fSettings.fOptimize) {
+ return true;
+ }
+
+ AutoShaderCaps autoCaps(fContext, fCaps);
+
+ SkASSERT(!this->errorCount());
+ if (this->errorCount() == 0) {
+#ifndef SK_ENABLE_OPTIMIZE_SIZE
+ // Run the inliner only once; it is expensive! Multiple passes can occasionally shake out
+ // more wins, but it's diminishing returns.
+ Inliner inliner(fContext.get());
+ this->runInliner(&inliner, program.fOwnedElements, program.fSymbols, program.fUsage.get());
+#endif
+
+ // Unreachable code can confuse some drivers, so it's worth removing. (skia:12012)
+ Transform::EliminateUnreachableCode(program);
+
+ while (Transform::EliminateDeadFunctions(program)) {
+ // Removing dead functions may cause more functions to become unreferenced. Try again.
+ }
+ while (Transform::EliminateDeadLocalVariables(program)) {
+ // Removing dead variables may cause more variables to become unreferenced. Try again.
+ }
+ while (Transform::EliminateDeadGlobalVariables(program)) {
+ // Repeat until no changes occur.
+ }
+ // Make sure that program usage is still correct after the optimization pass is complete.
+ SkASSERT(*program.usage() == *Analysis::GetUsage(program));
+ }
+
+ return this->errorCount() == 0;
+}
+
+bool Compiler::runInliner(Inliner* inliner,
+ const std::vector<std::unique_ptr<ProgramElement>>& elements,
+ std::shared_ptr<SymbolTable> symbols,
+ ProgramUsage* usage) {
+#ifdef SK_ENABLE_OPTIMIZE_SIZE
+ return true;
+#else
+ // The program's SymbolTable was taken out of fSymbolTable when the program was bundled, but
+ // the inliner relies (indirectly) on having a valid SymbolTable.
+ // In particular, inlining can turn a non-optimizable expression like `normalize(myVec)` into
+ // `normalize(vec2(7))`, which is now optimizable. The optimizer can use DSL to simplify this
+ // expression--e.g., in the case of normalize, using DSL's Length(). The DSL relies on
+ // convertIdentifier() to look up `length`. convertIdentifier() needs a valid symbol table to
+ // find the declaration of `length`. To allow this chain of events to succeed, we re-insert the
+ // program's symbol table temporarily.
+ SkASSERT(!fSymbolTable);
+ fSymbolTable = symbols;
+
+ bool result = inliner->analyze(elements, symbols, usage);
+
+ fSymbolTable = nullptr;
+ return result;
+#endif
+}
+
+bool Compiler::finalize(Program& program) {
+ AutoShaderCaps autoCaps(fContext, fCaps);
+
+ // Copy all referenced built-in functions into the Program.
+ Transform::FindAndDeclareBuiltinFunctions(program);
+
+ // Variables defined in the pre-includes need their declaring elements added to the program.
+ Transform::FindAndDeclareBuiltinVariables(program);
+
+ // Do one last correctness-check pass. This looks for dangling FunctionReference/TypeReference
+ // expressions, and reports them as errors.
+ Analysis::DoFinalizationChecks(program);
+
+ if (fContext->fConfig->strictES2Mode() && this->errorCount() == 0) {
+ // Enforce Appendix A, Section 5 of the GLSL ES 1.00 spec -- Indexing. This logic assumes
+ // that all loops meet the criteria of Section 4, and if they don't, could crash.
+ for (const auto& pe : program.fOwnedElements) {
+ Analysis::ValidateIndexingForES2(*pe, this->errorReporter());
+ }
+ }
+ if (this->errorCount() == 0) {
+ bool enforceSizeLimit = ProgramConfig::IsRuntimeEffect(program.fConfig->fKind);
+ Analysis::CheckProgramStructure(program, enforceSizeLimit);
+ }
+
+ // Make sure that program usage is still correct after finalization is complete.
+ SkASSERT(*program.usage() == *Analysis::GetUsage(program));
+
+ return this->errorCount() == 0;
+}
+
+#if defined(SKSL_STANDALONE) || defined(SK_GANESH) || defined(SK_GRAPHITE)
+
+#if defined(SK_ENABLE_SPIRV_VALIDATION)
+static bool validate_spirv(ErrorReporter& reporter, std::string_view program) {
+ SkASSERT(0 == program.size() % 4);
+ const uint32_t* programData = reinterpret_cast<const uint32_t*>(program.data());
+ size_t programSize = program.size() / 4;
+
+ spvtools::SpirvTools tools(SPV_ENV_VULKAN_1_0);
+ std::string errors;
+ auto msgFn = [&errors](spv_message_level_t, const char*, const spv_position_t&, const char* m) {
+ errors += "SPIR-V validation error: ";
+ errors += m;
+ errors += '\n';
+ };
+ tools.SetMessageConsumer(msgFn);
+
+ // Verify that the SPIR-V we produced is valid. At runtime, we will abort() with a message
+ // explaining the error. In standalone mode (skslc), we will send the message, plus the
+ // entire disassembled SPIR-V (for easier context & debugging) as *our* error message.
+ bool result = tools.Validate(programData, programSize);
+ if (!result) {
+#if defined(SKSL_STANDALONE)
+ // Convert the string-stream to a SPIR-V disassembly.
+ std::string disassembly;
+ if (tools.Disassemble(programData, programSize, &disassembly)) {
+ errors.append(disassembly);
+ }
+ reporter.error(Position(), errors);
+#else
+ SkDEBUGFAILF("%s", errors.c_str());
+#endif
+ }
+ return result;
+}
+#endif
+
+bool Compiler::toSPIRV(Program& program, OutputStream& out) {
+ TRACE_EVENT0("skia.shaders", "SkSL::Compiler::toSPIRV");
+ AutoSource as(this, *program.fSource);
+ AutoShaderCaps autoCaps(fContext, fCaps);
+ ProgramSettings settings;
+ settings.fUseMemoryPool = false;
+ dsl::Start(this, program.fConfig->fKind, settings);
+ dsl::SetErrorReporter(&fErrorReporter);
+ fSymbolTable = program.fSymbols;
+#ifdef SK_ENABLE_SPIRV_VALIDATION
+ StringStream buffer;
+ SPIRVCodeGenerator cg(fContext.get(), &program, &buffer);
+ bool result = cg.generateCode();
+
+ if (result && program.fConfig->fSettings.fValidateSPIRV) {
+ std::string_view binary = buffer.str();
+ result = validate_spirv(this->errorReporter(), binary);
+ out.write(binary.data(), binary.size());
+ }
+#else
+ SPIRVCodeGenerator cg(fContext.get(), &program, &out);
+ bool result = cg.generateCode();
+#endif
+ dsl::End();
+ return result;
+}
+
+bool Compiler::toSPIRV(Program& program, std::string* out) {
+ StringStream buffer;
+ bool result = this->toSPIRV(program, buffer);
+ if (result) {
+ *out = buffer.str();
+ }
+ return result;
+}
+
+bool Compiler::toGLSL(Program& program, OutputStream& out) {
+ TRACE_EVENT0("skia.shaders", "SkSL::Compiler::toGLSL");
+ AutoSource as(this, *program.fSource);
+ AutoShaderCaps autoCaps(fContext, fCaps);
+ GLSLCodeGenerator cg(fContext.get(), &program, &out);
+ bool result = cg.generateCode();
+ return result;
+}
+
+bool Compiler::toGLSL(Program& program, std::string* out) {
+ StringStream buffer;
+ bool result = this->toGLSL(program, buffer);
+ if (result) {
+ *out = buffer.str();
+ }
+ return result;
+}
+
+bool Compiler::toHLSL(Program& program, OutputStream& out) {
+ TRACE_EVENT0("skia.shaders", "SkSL::Compiler::toHLSL");
+ std::string hlsl;
+ if (!this->toHLSL(program, &hlsl)) {
+ return false;
+ }
+ out.writeString(hlsl);
+ return true;
+}
+
+bool Compiler::toHLSL(Program& program, std::string* out) {
+ std::string spirv;
+ if (!this->toSPIRV(program, &spirv)) {
+ return false;
+ }
+
+ if (!SPIRVtoHLSL(spirv, out)) {
+ fErrorText += "HLSL cross-compilation not enabled";
+ return false;
+ }
+
+ return true;
+}
+
+bool Compiler::toMetal(Program& program, OutputStream& out) {
+ TRACE_EVENT0("skia.shaders", "SkSL::Compiler::toMetal");
+ AutoSource as(this, *program.fSource);
+ AutoShaderCaps autoCaps(fContext, fCaps);
+ MetalCodeGenerator cg(fContext.get(), &program, &out);
+ bool result = cg.generateCode();
+ return result;
+}
+
+bool Compiler::toMetal(Program& program, std::string* out) {
+ StringStream buffer;
+ bool result = this->toMetal(program, buffer);
+ if (result) {
+ *out = buffer.str();
+ }
+ return result;
+}
+
+#if defined(SK_ENABLE_WGSL_VALIDATION)
+static bool validate_wgsl(ErrorReporter& reporter, const std::string& wgsl) {
+ tint::Source::File srcFile("", wgsl);
+ tint::Program program(tint::reader::wgsl::Parse(&srcFile));
+ if (program.Diagnostics().count() > 0) {
+ tint::diag::Formatter diagFormatter;
+ std::string diagOutput = diagFormatter.format(program.Diagnostics());
+#if defined(SKSL_STANDALONE)
+ reporter.error(Position(), diagOutput);
+#else
+ SkDEBUGFAILF("%s", diagOutput.c_str());
+#endif
+ return false;
+ }
+ return true;
+}
+#endif // defined(SK_ENABLE_WGSL_VALIDATION)
+
+bool Compiler::toWGSL(Program& program, OutputStream& out) {
+ TRACE_EVENT0("skia.shaders", "SkSL::Compiler::toWGSL");
+ AutoSource as(this, *program.fSource);
+#ifdef SK_ENABLE_WGSL_VALIDATION
+ StringStream wgsl;
+ WGSLCodeGenerator cg(fContext.get(), &program, &wgsl);
+ bool result = cg.generateCode();
+ if (result) {
+ std::string wgslString = wgsl.str();
+ result = validate_wgsl(this->errorReporter(), wgslString);
+ out.writeString(wgslString);
+ }
+#else
+ WGSLCodeGenerator cg(fContext.get(), &program, &out);
+ bool result = cg.generateCode();
+#endif
+ return result;
+}
+
+#endif // defined(SKSL_STANDALONE) || defined(SK_GANESH) || defined(SK_GRAPHITE)
+
+void Compiler::handleError(std::string_view msg, Position pos) {
+ fErrorText += "error: ";
+ bool printLocation = false;
+ std::string_view src = this->errorReporter().source();
+ int line = -1;
+ if (pos.valid()) {
+ line = pos.line(src);
+ printLocation = pos.startOffset() < (int)src.length();
+ fErrorText += std::to_string(line) + ": ";
+ }
+ fErrorText += std::string(msg) + "\n";
+ if (printLocation) {
+ const int kMaxSurroundingChars = 100;
+
+ // Find the beginning of the line.
+ int lineStart = pos.startOffset();
+ while (lineStart > 0) {
+ if (src[lineStart - 1] == '\n') {
+ break;
+ }
+ --lineStart;
+ }
+
+ // We don't want to show more than 100 characters surrounding the error, so push the line
+ // start forward and add a leading ellipsis if there would be more than this.
+ std::string lineText;
+ std::string caretText;
+ if ((pos.startOffset() - lineStart) > kMaxSurroundingChars) {
+ lineStart = pos.startOffset() - kMaxSurroundingChars;
+ lineText = "...";
+ caretText = " ";
+ }
+
+ // Echo the line. Again, we don't want to show more than 100 characters after the end of the
+ // error, so truncate with a trailing ellipsis if needed.
+ const char* lineSuffix = "...\n";
+ int lineStop = pos.endOffset() + kMaxSurroundingChars;
+ if (lineStop >= (int)src.length()) {
+ lineStop = src.length() - 1;
+ lineSuffix = "\n"; // no ellipsis if we reach end-of-file
+ }
+ for (int i = lineStart; i < lineStop; ++i) {
+ char c = src[i];
+ if (c == '\n') {
+ lineSuffix = "\n"; // no ellipsis if we reach end-of-line
+ break;
+ }
+ switch (c) {
+ case '\t': lineText += " "; break;
+ case '\0': lineText += " "; break;
+ default: lineText += src[i]; break;
+ }
+ }
+ fErrorText += lineText + lineSuffix;
+
+ // print the carets underneath it, pointing to the range in question
+ for (int i = lineStart; i < (int)src.length(); i++) {
+ if (i >= pos.endOffset()) {
+ break;
+ }
+ switch (src[i]) {
+ case '\t':
+ caretText += (i >= pos.startOffset()) ? "^^^^" : " ";
+ break;
+ case '\n':
+ SkASSERT(i >= pos.startOffset());
+ // use an ellipsis if the error continues past the end of the line
+ caretText += (pos.endOffset() > i + 1) ? "..." : "^";
+ i = src.length();
+ break;
+ default:
+ caretText += (i >= pos.startOffset()) ? '^' : ' ';
+ break;
+ }
+ }
+ fErrorText += caretText + '\n';
+ }
+}
+
+std::string Compiler::errorText(bool showCount) {
+ if (showCount) {
+ this->writeErrorCount();
+ }
+ std::string result = fErrorText;
+ this->resetErrors();
+ return result;
+}
+
+void Compiler::writeErrorCount() {
+ int count = this->errorCount();
+ if (count) {
+ fErrorText += std::to_string(count) + " error";
+ if (count > 1) {
+ fErrorText += "s";
+ }
+ fErrorText += "\n";
+ }
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/SkSLCompiler.h b/gfx/skia/skia/src/sksl/SkSLCompiler.h
new file mode 100644
index 0000000000..382c69609b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLCompiler.h
@@ -0,0 +1,242 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_COMPILER
+#define SKSL_COMPILER
+
+#include "include/core/SkSize.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/SkSLContext.h" // IWYU pragma: keep
+
+#include <array>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <string_view>
+#include <type_traits>
+#include <vector>
+
+#define SK_FRAGCOLOR_BUILTIN 10001
+#define SK_LASTFRAGCOLOR_BUILTIN 10008
+#define SK_MAIN_COORDS_BUILTIN 10009
+#define SK_INPUT_COLOR_BUILTIN 10010
+#define SK_DEST_COLOR_BUILTIN 10011
+#define SK_SECONDARYFRAGCOLOR_BUILTIN 10012
+#define SK_FRAGCOORD_BUILTIN 15
+#define SK_CLOCKWISE_BUILTIN 17
+
+#define SK_VERTEXID_BUILTIN 42
+#define SK_INSTANCEID_BUILTIN 43
+#define SK_POSITION_BUILTIN 0
+#define SK_POINTSIZE_BUILTIN 1
+
+#define SK_NUMWORKGROUPS_BUILTIN 24
+#define SK_WORKGROUPID_BUILTIN 26
+#define SK_LOCALINVOCATIONID_BUILTIN 27
+#define SK_GLOBALINVOCATIONID_BUILTIN 28
+#define SK_LOCALINVOCATIONINDEX_BUILTIN 29
+
+namespace SkSL {
+
+namespace dsl {
+ class DSLCore;
+}
+
+class Expression;
+class Inliner;
+class ModifiersPool;
+class OutputStream;
+class ProgramUsage;
+class SymbolTable;
+enum class ProgramKind : int8_t;
+struct Program;
+struct ProgramSettings;
+struct ShaderCaps;
+
+struct Module {
+ const Module* fParent = nullptr;
+ std::shared_ptr<SymbolTable> fSymbols;
+ std::vector<std::unique_ptr<ProgramElement>> fElements;
+};
+
+/**
+ * Main compiler entry point. The compiler parses the SkSL text directly into a tree of IRNodes,
+ * while performing basic optimizations such as constant-folding and dead-code elimination. Then the
+ * Program is passed into a CodeGenerator to produce compiled output.
+ *
+ * See the README for information about SkSL.
+ */
+class SK_API Compiler {
+public:
+ inline static constexpr const char FRAGCOLOR_NAME[] = "sk_FragColor";
+ inline static constexpr const char RTADJUST_NAME[] = "sk_RTAdjust";
+ inline static constexpr const char POSITION_NAME[] = "sk_Position";
+ inline static constexpr const char POISON_TAG[] = "<POISON>";
+
+ /**
+ * Gets a float4 that adjusts the position from Skia device coords to normalized device coords,
+ * used to populate sk_RTAdjust. Assuming the transformed position, pos, is a homogeneous
+ * float4, the vec, v, is applied as such:
+ * float4((pos.xy * v.xz) + sk_Position.ww * v.yw, 0, pos.w);
+ */
+ static std::array<float, 4> GetRTAdjustVector(SkISize rtDims, bool flipY) {
+ std::array<float, 4> result;
+ result[0] = 2.f/rtDims.width();
+ result[2] = 2.f/rtDims.height();
+ result[1] = -1.f;
+ result[3] = -1.f;
+ if (flipY) {
+ result[2] = -result[2];
+ result[3] = -result[3];
+ }
+ return result;
+ }
+
+ /**
+ * Uniform values used by the compiler to implement origin-neutral dFdy, sk_Clockwise, and
+ * sk_FragCoord.
+ */
+ static std::array<float, 2> GetRTFlipVector(int rtHeight, bool flipY) {
+ std::array<float, 2> result;
+ result[0] = flipY ? rtHeight : 0.f;
+ result[1] = flipY ? -1.f : 1.f;
+ return result;
+ }
+
+ Compiler(const ShaderCaps* caps);
+
+ ~Compiler();
+
+ Compiler(const Compiler&) = delete;
+ Compiler& operator=(const Compiler&) = delete;
+
+ /**
+ * Allows optimization settings to be unilaterally overridden. This is meant to allow tools like
+ * Viewer or Nanobench to override the compiler's ProgramSettings and ShaderCaps for debugging.
+ */
+ enum class OverrideFlag {
+ kDefault,
+ kOff,
+ kOn,
+ };
+ static void EnableOptimizer(OverrideFlag flag) { sOptimizer = flag; }
+ static void EnableInliner(OverrideFlag flag) { sInliner = flag; }
+
+ std::unique_ptr<Program> convertProgram(ProgramKind kind,
+ std::string text,
+ ProgramSettings settings);
+
+ std::unique_ptr<Expression> convertIdentifier(Position pos, std::string_view name);
+
+ bool toSPIRV(Program& program, OutputStream& out);
+
+ bool toSPIRV(Program& program, std::string* out);
+
+ bool toGLSL(Program& program, OutputStream& out);
+
+ bool toGLSL(Program& program, std::string* out);
+
+ bool toHLSL(Program& program, OutputStream& out);
+
+ bool toHLSL(Program& program, std::string* out);
+
+ bool toMetal(Program& program, OutputStream& out);
+
+ bool toMetal(Program& program, std::string* out);
+
+ bool toWGSL(Program& program, OutputStream& out);
+
+ void handleError(std::string_view msg, Position pos);
+
+ std::string errorText(bool showCount = true);
+
+ ErrorReporter& errorReporter() { return *fContext->fErrors; }
+
+ int errorCount() const { return fContext->fErrors->errorCount(); }
+
+ void writeErrorCount();
+
+ void resetErrors() {
+ fErrorText.clear();
+ this->errorReporter().resetErrorCount();
+ }
+
+ Context& context() const {
+ return *fContext;
+ }
+
+ std::shared_ptr<SymbolTable>& symbolTable() {
+ return fSymbolTable;
+ }
+
+ std::unique_ptr<Module> compileModule(ProgramKind kind,
+ const char* moduleName,
+ std::string moduleSource,
+ const Module* parent,
+ ModifiersPool& modifiersPool,
+ bool shouldInline);
+
+ /** Optimize a module at minification time, before writing it out. */
+ bool optimizeModuleBeforeMinifying(ProgramKind kind, Module& module);
+
+ const Module* moduleForProgramKind(ProgramKind kind);
+
+private:
+ class CompilerErrorReporter : public ErrorReporter {
+ public:
+ CompilerErrorReporter(Compiler* compiler)
+ : fCompiler(*compiler) {}
+
+ void handleError(std::string_view msg, Position pos) override {
+ fCompiler.handleError(msg, pos);
+ }
+
+ private:
+ Compiler& fCompiler;
+ };
+
+ /** Updates ProgramSettings to eliminate contradictions and to honor the ProgramKind. */
+ static void FinalizeSettings(ProgramSettings* settings, ProgramKind kind);
+
+ /** Optimize every function in the program. */
+ bool optimize(Program& program);
+
+ /** Performs final checks to confirm that a fully-assembled/optimized is valid. */
+ bool finalize(Program& program);
+
+ /** Optimize a module at Skia runtime, after loading it. */
+ bool optimizeModuleAfterLoading(ProgramKind kind, Module& module);
+
+ /** Flattens out function calls when it is safe to do so. */
+ bool runInliner(Inliner* inliner,
+ const std::vector<std::unique_ptr<ProgramElement>>& elements,
+ std::shared_ptr<SymbolTable> symbols,
+ ProgramUsage* usage);
+
+ CompilerErrorReporter fErrorReporter;
+ std::shared_ptr<Context> fContext;
+ const ShaderCaps* fCaps;
+
+ // This is the current symbol table of the code we are processing, and therefore changes during
+ // compilation
+ std::shared_ptr<SymbolTable> fSymbolTable;
+
+ std::string fErrorText;
+
+ static OverrideFlag sOptimizer;
+ static OverrideFlag sInliner;
+
+ friend class ThreadContext;
+ friend class dsl::DSLCore;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLConstantFolder.cpp b/gfx/skia/skia/src/sksl/SkSLConstantFolder.cpp
new file mode 100644
index 0000000000..76cf7f820a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLConstantFolder.cpp
@@ -0,0 +1,884 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLConstantFolder.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkTArray.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLConstructorCompound.h"
+#include "src/sksl/ir/SkSLConstructorDiagonalMatrix.h"
+#include "src/sksl/ir/SkSLConstructorSplat.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLLiteral.h"
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+
+#include <cstdint>
+#include <float.h>
+#include <limits>
+#include <optional>
+#include <string>
+#include <utility>
+
+namespace SkSL {
+
+static bool is_vec_or_mat(const Type& type) {
+ switch (type.typeKind()) {
+ case Type::TypeKind::kMatrix:
+ case Type::TypeKind::kVector:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static std::unique_ptr<Expression> eliminate_no_op_boolean(Position pos,
+ const Expression& left,
+ Operator op,
+ const Expression& right) {
+ bool rightVal = right.as<Literal>().boolValue();
+
+ // Detect no-op Boolean expressions and optimize them away.
+ if ((op.kind() == Operator::Kind::LOGICALAND && rightVal) || // (expr && true) -> (expr)
+ (op.kind() == Operator::Kind::LOGICALOR && !rightVal) || // (expr || false) -> (expr)
+ (op.kind() == Operator::Kind::LOGICALXOR && !rightVal) || // (expr ^^ false) -> (expr)
+ (op.kind() == Operator::Kind::EQEQ && rightVal) || // (expr == true) -> (expr)
+ (op.kind() == Operator::Kind::NEQ && !rightVal)) { // (expr != false) -> (expr)
+
+ return left.clone(pos);
+ }
+
+ return nullptr;
+}
+
+static std::unique_ptr<Expression> short_circuit_boolean(Position pos,
+ const Expression& left,
+ Operator op,
+ const Expression& right) {
+ bool leftVal = left.as<Literal>().boolValue();
+
+ // When the literal is on the left, we can sometimes eliminate the other expression entirely.
+ if ((op.kind() == Operator::Kind::LOGICALAND && !leftVal) || // (false && expr) -> (false)
+ (op.kind() == Operator::Kind::LOGICALOR && leftVal)) { // (true || expr) -> (true)
+
+ return left.clone(pos);
+ }
+
+ // We can't eliminate the right-side expression via short-circuit, but we might still be able to
+ // simplify away a no-op expression.
+ return eliminate_no_op_boolean(pos, right, op, left);
+}
+
+static std::unique_ptr<Expression> simplify_constant_equality(const Context& context,
+ Position pos,
+ const Expression& left,
+ Operator op,
+ const Expression& right) {
+ if (op.kind() == Operator::Kind::EQEQ || op.kind() == Operator::Kind::NEQ) {
+ bool equality = (op.kind() == Operator::Kind::EQEQ);
+
+ switch (left.compareConstant(right)) {
+ case Expression::ComparisonResult::kNotEqual:
+ equality = !equality;
+ [[fallthrough]];
+
+ case Expression::ComparisonResult::kEqual:
+ return Literal::MakeBool(context, pos, equality);
+
+ case Expression::ComparisonResult::kUnknown:
+ break;
+ }
+ }
+ return nullptr;
+}
+
+static std::unique_ptr<Expression> simplify_matrix_multiplication(const Context& context,
+ Position pos,
+ const Expression& left,
+ const Expression& right,
+ int leftColumns,
+ int leftRows,
+ int rightColumns,
+ int rightRows) {
+ const Type& componentType = left.type().componentType();
+ SkASSERT(componentType.matches(right.type().componentType()));
+
+ // Fetch the left matrix.
+ double leftVals[4][4];
+ for (int c = 0; c < leftColumns; ++c) {
+ for (int r = 0; r < leftRows; ++r) {
+ leftVals[c][r] = *left.getConstantValue((c * leftRows) + r);
+ }
+ }
+ // Fetch the right matrix.
+ double rightVals[4][4];
+ for (int c = 0; c < rightColumns; ++c) {
+ for (int r = 0; r < rightRows; ++r) {
+ rightVals[c][r] = *right.getConstantValue((c * rightRows) + r);
+ }
+ }
+
+ SkASSERT(leftColumns == rightRows);
+ int outColumns = rightColumns,
+ outRows = leftRows;
+
+ ExpressionArray args;
+ args.reserve_back(outColumns * outRows);
+ for (int c = 0; c < outColumns; ++c) {
+ for (int r = 0; r < outRows; ++r) {
+ // Compute a dot product for this position.
+ double val = 0;
+ for (int dotIdx = 0; dotIdx < leftColumns; ++dotIdx) {
+ val += leftVals[dotIdx][r] * rightVals[c][dotIdx];
+ }
+ args.push_back(Literal::Make(pos, val, &componentType));
+ }
+ }
+
+ if (outColumns == 1) {
+ // Matrix-times-vector conceptually makes a 1-column N-row matrix, but we return vecN.
+ std::swap(outColumns, outRows);
+ }
+
+ const Type& resultType = componentType.toCompound(context, outColumns, outRows);
+ return ConstructorCompound::Make(context, pos, resultType, std::move(args));
+}
+
+static std::unique_ptr<Expression> simplify_matrix_times_matrix(const Context& context,
+ Position pos,
+ const Expression& left,
+ const Expression& right) {
+ const Type& leftType = left.type();
+ const Type& rightType = right.type();
+
+ SkASSERT(leftType.isMatrix());
+ SkASSERT(rightType.isMatrix());
+
+ return simplify_matrix_multiplication(context, pos, left, right,
+ leftType.columns(), leftType.rows(),
+ rightType.columns(), rightType.rows());
+}
+
+static std::unique_ptr<Expression> simplify_vector_times_matrix(const Context& context,
+ Position pos,
+ const Expression& left,
+ const Expression& right) {
+ const Type& leftType = left.type();
+ const Type& rightType = right.type();
+
+ SkASSERT(leftType.isVector());
+ SkASSERT(rightType.isMatrix());
+
+ return simplify_matrix_multiplication(context, pos, left, right,
+ /*leftColumns=*/leftType.columns(), /*leftRows=*/1,
+ rightType.columns(), rightType.rows());
+}
+
+static std::unique_ptr<Expression> simplify_matrix_times_vector(const Context& context,
+ Position pos,
+ const Expression& left,
+ const Expression& right) {
+ const Type& leftType = left.type();
+ const Type& rightType = right.type();
+
+ SkASSERT(leftType.isMatrix());
+ SkASSERT(rightType.isVector());
+
+ return simplify_matrix_multiplication(context, pos, left, right,
+ leftType.columns(), leftType.rows(),
+ /*rightColumns=*/1, /*rightRows=*/rightType.columns());
+}
+
+static std::unique_ptr<Expression> simplify_componentwise(const Context& context,
+ Position pos,
+ const Expression& left,
+ Operator op,
+ const Expression& right) {
+ SkASSERT(is_vec_or_mat(left.type()));
+ SkASSERT(left.type().matches(right.type()));
+ const Type& type = left.type();
+
+ // Handle equality operations: == !=
+ if (std::unique_ptr<Expression> result = simplify_constant_equality(context, pos, left, op,
+ right)) {
+ return result;
+ }
+
+ // Handle floating-point arithmetic: + - * /
+ using FoldFn = double (*)(double, double);
+ FoldFn foldFn;
+ switch (op.kind()) {
+ case Operator::Kind::PLUS: foldFn = +[](double a, double b) { return a + b; }; break;
+ case Operator::Kind::MINUS: foldFn = +[](double a, double b) { return a - b; }; break;
+ case Operator::Kind::STAR: foldFn = +[](double a, double b) { return a * b; }; break;
+ case Operator::Kind::SLASH: foldFn = +[](double a, double b) { return a / b; }; break;
+ default:
+ return nullptr;
+ }
+
+ const Type& componentType = type.componentType();
+ SkASSERT(componentType.isNumber());
+
+ double minimumValue = componentType.minimumValue();
+ double maximumValue = componentType.maximumValue();
+
+ ExpressionArray args;
+ int numSlots = type.slotCount();
+ args.reserve_back(numSlots);
+ for (int i = 0; i < numSlots; i++) {
+ double value = foldFn(*left.getConstantValue(i), *right.getConstantValue(i));
+ if (value < minimumValue || value > maximumValue) {
+ return nullptr;
+ }
+
+ args.push_back(Literal::Make(pos, value, &componentType));
+ }
+ return ConstructorCompound::Make(context, pos, type, std::move(args));
+}
+
+static std::unique_ptr<Expression> splat_scalar(const Context& context,
+ const Expression& scalar,
+ const Type& type) {
+ if (type.isVector()) {
+ return ConstructorSplat::Make(context, scalar.fPosition, type, scalar.clone());
+ }
+ if (type.isMatrix()) {
+ int numSlots = type.slotCount();
+ ExpressionArray splatMatrix;
+ splatMatrix.reserve_back(numSlots);
+ for (int index = 0; index < numSlots; ++index) {
+ splatMatrix.push_back(scalar.clone());
+ }
+ return ConstructorCompound::Make(context, scalar.fPosition, type, std::move(splatMatrix));
+ }
+ SkDEBUGFAILF("unsupported type %s", type.description().c_str());
+ return nullptr;
+}
+
+static std::unique_ptr<Expression> cast_expression(const Context& context,
+ Position pos,
+ const Expression& expr,
+ const Type& type) {
+ SkASSERT(type.componentType().matches(expr.type().componentType()));
+ if (expr.type().isScalar()) {
+ if (type.isMatrix()) {
+ return ConstructorDiagonalMatrix::Make(context, pos, type, expr.clone());
+ }
+ if (type.isVector()) {
+ return ConstructorSplat::Make(context, pos, type, expr.clone());
+ }
+ }
+ if (type.matches(expr.type())) {
+ return expr.clone(pos);
+ }
+ // We can't cast matrices into vectors or vice-versa.
+ return nullptr;
+}
+
+static std::unique_ptr<Expression> zero_expression(const Context& context,
+ Position pos,
+ const Type& type) {
+ std::unique_ptr<Expression> zero = Literal::Make(pos, 0.0, &type.componentType());
+ if (type.isScalar()) {
+ return zero;
+ }
+ if (type.isVector()) {
+ return ConstructorSplat::Make(context, pos, type, std::move(zero));
+ }
+ if (type.isMatrix()) {
+ return ConstructorDiagonalMatrix::Make(context, pos, type, std::move(zero));
+ }
+ SkDEBUGFAILF("unsupported type %s", type.description().c_str());
+ return nullptr;
+}
+
+static std::unique_ptr<Expression> negate_expression(const Context& context,
+ Position pos,
+ const Expression& expr,
+ const Type& type) {
+ std::unique_ptr<Expression> ctor = cast_expression(context, pos, expr, type);
+ return ctor ? PrefixExpression::Make(context, pos, Operator::Kind::MINUS, std::move(ctor))
+ : nullptr;
+}
+
+bool ConstantFolder::GetConstantInt(const Expression& value, SKSL_INT* out) {
+ const Expression* expr = GetConstantValueForVariable(value);
+ if (!expr->isIntLiteral()) {
+ return false;
+ }
+ *out = expr->as<Literal>().intValue();
+ return true;
+}
+
+bool ConstantFolder::GetConstantValue(const Expression& value, double* out) {
+ const Expression* expr = GetConstantValueForVariable(value);
+ if (!expr->is<Literal>()) {
+ return false;
+ }
+ *out = expr->as<Literal>().value();
+ return true;
+}
+
+static bool contains_constant_zero(const Expression& expr) {
+ int numSlots = expr.type().slotCount();
+ for (int index = 0; index < numSlots; ++index) {
+ std::optional<double> slotVal = expr.getConstantValue(index);
+ if (slotVal.has_value() && *slotVal == 0.0) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Returns true if the expression contains `value` in every slot.
+static bool is_constant_splat(const Expression& expr, double value) {
+ int numSlots = expr.type().slotCount();
+ for (int index = 0; index < numSlots; ++index) {
+ std::optional<double> slotVal = expr.getConstantValue(index);
+ if (!slotVal.has_value() || *slotVal != value) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Returns true if the expression is a square diagonal matrix containing `value`.
+static bool is_constant_diagonal(const Expression& expr, double value) {
+ SkASSERT(expr.type().isMatrix());
+ int columns = expr.type().columns();
+ int rows = expr.type().rows();
+ if (columns != rows) {
+ return false;
+ }
+ int slotIdx = 0;
+ for (int c = 0; c < columns; ++c) {
+ for (int r = 0; r < rows; ++r) {
+ double expectation = (c == r) ? value : 0;
+ std::optional<double> slotVal = expr.getConstantValue(slotIdx++);
+ if (!slotVal.has_value() || *slotVal != expectation) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+// Returns true if the expression is a scalar, vector, or diagonal matrix containing `value`.
+static bool is_constant_value(const Expression& expr, double value) {
+ return expr.type().isMatrix() ? is_constant_diagonal(expr, value)
+ : is_constant_splat(expr, value);
+}
+
+// The expression represents the right-hand side of a division op. If the division can be
+// strength-reduced into multiplication by a reciprocal, returns that reciprocal as an expression.
+// Note that this only supports literal values with safe-to-use reciprocals, and returns null if
+// Expression contains anything else.
+static std::unique_ptr<Expression> make_reciprocal_expression(const Context& context,
+ const Expression& right) {
+ if (right.type().isMatrix() || !right.type().componentType().isFloat()) {
+ return nullptr;
+ }
+ // Verify that each slot contains a finite, non-zero literal, take its reciprocal.
+ int nslots = right.type().slotCount();
+ SkSTArray<4, double> values;
+ for (int index = 0; index < nslots; ++index) {
+ std::optional<double> value = right.getConstantValue(index);
+ if (!value) {
+ return nullptr;
+ }
+ *value = sk_ieee_double_divide(1.0, *value);
+ if (*value >= -FLT_MAX && *value <= FLT_MAX && *value != 0.0) {
+ // The reciprocal can be represented safely as a finite 32-bit float.
+ values.push_back(*value);
+ } else {
+ // The value is outside the 32-bit float range, or is NaN; do not optimize.
+ return nullptr;
+ }
+ }
+ // Convert our reciprocal values to Literals.
+ ExpressionArray exprs;
+ exprs.reserve_back(nslots);
+ for (double value : values) {
+ exprs.push_back(Literal::Make(right.fPosition, value, &right.type().componentType()));
+ }
+ // Turn the expression array into a compound constructor. (If this is a single-slot expression,
+ // this will return the literal as-is.)
+ return ConstructorCompound::Make(context, right.fPosition, right.type(), std::move(exprs));
+}
+
+static bool error_on_divide_by_zero(const Context& context, Position pos, Operator op,
+ const Expression& right) {
+ switch (op.kind()) {
+ case Operator::Kind::SLASH:
+ case Operator::Kind::SLASHEQ:
+ case Operator::Kind::PERCENT:
+ case Operator::Kind::PERCENTEQ:
+ if (contains_constant_zero(right)) {
+ context.fErrors->error(pos, "division by zero");
+ return true;
+ }
+ return false;
+ default:
+ return false;
+ }
+}
+
+const Expression* ConstantFolder::GetConstantValueOrNullForVariable(const Expression& inExpr) {
+ for (const Expression* expr = &inExpr;;) {
+ if (!expr->is<VariableReference>()) {
+ break;
+ }
+ const VariableReference& varRef = expr->as<VariableReference>();
+ if (varRef.refKind() != VariableRefKind::kRead) {
+ break;
+ }
+ const Variable& var = *varRef.variable();
+ if (!(var.modifiers().fFlags & Modifiers::kConst_Flag)) {
+ break;
+ }
+ expr = var.initialValue();
+ if (!expr) {
+ // Function parameters can be const but won't have an initial value.
+ break;
+ }
+ if (Analysis::IsCompileTimeConstant(*expr)) {
+ return expr;
+ }
+ }
+ // We didn't find a compile-time constant at the end.
+ return nullptr;
+}
+
+const Expression* ConstantFolder::GetConstantValueForVariable(const Expression& inExpr) {
+ const Expression* expr = GetConstantValueOrNullForVariable(inExpr);
+ return expr ? expr : &inExpr;
+}
+
+std::unique_ptr<Expression> ConstantFolder::MakeConstantValueForVariable(
+ Position pos, std::unique_ptr<Expression> inExpr) {
+ const Expression* expr = GetConstantValueOrNullForVariable(*inExpr);
+ return expr ? expr->clone(pos) : std::move(inExpr);
+}
+
+static bool is_scalar_op_matrix(const Expression& left, const Expression& right) {
+ return left.type().isScalar() && right.type().isMatrix();
+}
+
+static bool is_matrix_op_scalar(const Expression& left, const Expression& right) {
+ return is_scalar_op_matrix(right, left);
+}
+
+static std::unique_ptr<Expression> simplify_arithmetic(const Context& context,
+ Position pos,
+ const Expression& left,
+ Operator op,
+ const Expression& right,
+ const Type& resultType) {
+ switch (op.kind()) {
+ case Operator::Kind::PLUS:
+ if (!is_scalar_op_matrix(left, right) && is_constant_splat(right, 0.0)) { // x + 0
+ if (std::unique_ptr<Expression> expr = cast_expression(context, pos, left,
+ resultType)) {
+ return expr;
+ }
+ }
+ if (!is_matrix_op_scalar(left, right) && is_constant_splat(left, 0.0)) { // 0 + x
+ if (std::unique_ptr<Expression> expr = cast_expression(context, pos, right,
+ resultType)) {
+ return expr;
+ }
+ }
+ break;
+
+ case Operator::Kind::STAR:
+ if (is_constant_value(right, 1.0)) { // x * 1
+ if (std::unique_ptr<Expression> expr = cast_expression(context, pos, left,
+ resultType)) {
+ return expr;
+ }
+ }
+ if (is_constant_value(left, 1.0)) { // 1 * x
+ if (std::unique_ptr<Expression> expr = cast_expression(context, pos, right,
+ resultType)) {
+ return expr;
+ }
+ }
+ if (is_constant_value(right, 0.0) && !Analysis::HasSideEffects(left)) { // x * 0
+ return zero_expression(context, pos, resultType);
+ }
+ if (is_constant_value(left, 0.0) && !Analysis::HasSideEffects(right)) { // 0 * x
+ return zero_expression(context, pos, resultType);
+ }
+ if (is_constant_value(right, -1.0)) { // x * -1 (to `-x`)
+ if (std::unique_ptr<Expression> expr = negate_expression(context, pos, left,
+ resultType)) {
+ return expr;
+ }
+ }
+ if (is_constant_value(left, -1.0)) { // -1 * x (to `-x`)
+ if (std::unique_ptr<Expression> expr = negate_expression(context, pos, right,
+ resultType)) {
+ return expr;
+ }
+ }
+ break;
+
+ case Operator::Kind::MINUS:
+ if (!is_scalar_op_matrix(left, right) && is_constant_splat(right, 0.0)) { // x - 0
+ if (std::unique_ptr<Expression> expr = cast_expression(context, pos, left,
+ resultType)) {
+ return expr;
+ }
+ }
+ if (!is_matrix_op_scalar(left, right) && is_constant_splat(left, 0.0)) { // 0 - x
+ if (std::unique_ptr<Expression> expr = negate_expression(context, pos, right,
+ resultType)) {
+ return expr;
+ }
+ }
+ break;
+
+ case Operator::Kind::SLASH:
+ if (!is_scalar_op_matrix(left, right) && is_constant_splat(right, 1.0)) { // x / 1
+ if (std::unique_ptr<Expression> expr = cast_expression(context, pos, left,
+ resultType)) {
+ return expr;
+ }
+ }
+ if (!left.type().isMatrix()) { // convert `x / 2` into `x * 0.5`
+ if (std::unique_ptr<Expression> expr = make_reciprocal_expression(context, right)) {
+ return BinaryExpression::Make(context, pos, left.clone(), Operator::Kind::STAR,
+ std::move(expr));
+ }
+ }
+ break;
+
+ case Operator::Kind::PLUSEQ:
+ case Operator::Kind::MINUSEQ:
+ if (is_constant_splat(right, 0.0)) { // x += 0, x -= 0
+ if (std::unique_ptr<Expression> var = cast_expression(context, pos, left,
+ resultType)) {
+ Analysis::UpdateVariableRefKind(var.get(), VariableRefKind::kRead);
+ return var;
+ }
+ }
+ break;
+
+ case Operator::Kind::STAREQ:
+ if (is_constant_value(right, 1.0)) { // x *= 1
+ if (std::unique_ptr<Expression> var = cast_expression(context, pos, left,
+ resultType)) {
+ Analysis::UpdateVariableRefKind(var.get(), VariableRefKind::kRead);
+ return var;
+ }
+ }
+ break;
+
+ case Operator::Kind::SLASHEQ:
+ if (is_constant_splat(right, 1.0)) { // x /= 1
+ if (std::unique_ptr<Expression> var = cast_expression(context, pos, left,
+ resultType)) {
+ Analysis::UpdateVariableRefKind(var.get(), VariableRefKind::kRead);
+ return var;
+ }
+ }
+ if (std::unique_ptr<Expression> expr = make_reciprocal_expression(context, right)) {
+ return BinaryExpression::Make(context, pos, left.clone(), Operator::Kind::STAREQ,
+ std::move(expr));
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return nullptr;
+}
+
+// The expression must be scalar, and represents the right-hand side of a division op. It can
+// contain anything, not just literal values. This returns the binary expression `1.0 / expr`. The
+// expression might be further simplified by the constant folding, if possible.
+static std::unique_ptr<Expression> one_over_scalar(const Context& context,
+ const Expression& right) {
+ SkASSERT(right.type().isScalar());
+ Position pos = right.fPosition;
+ return BinaryExpression::Make(context, pos,
+ Literal::Make(pos, 1.0, &right.type()),
+ Operator::Kind::SLASH,
+ right.clone());
+}
+
+static std::unique_ptr<Expression> simplify_matrix_division(const Context& context,
+ Position pos,
+ const Expression& left,
+ Operator op,
+ const Expression& right,
+ const Type& resultType) {
+ // Convert matrix-over-scalar `x /= y` into `x *= (1.0 / y)`. This generates better
+ // code in SPIR-V and Metal, and should be roughly equivalent elsewhere.
+ switch (op.kind()) {
+ case OperatorKind::SLASH:
+ case OperatorKind::SLASHEQ:
+ if (left.type().isMatrix() && right.type().isScalar()) {
+ Operator multiplyOp = op.isAssignment() ? OperatorKind::STAREQ
+ : OperatorKind::STAR;
+ return BinaryExpression::Make(context, pos,
+ left.clone(),
+ multiplyOp,
+ one_over_scalar(context, right));
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return nullptr;
+}
+
+static std::unique_ptr<Expression> fold_expression(Position pos,
+ double result,
+ const Type* resultType) {
+ if (resultType->isNumber()) {
+ if (result >= resultType->minimumValue() && result <= resultType->maximumValue()) {
+ // This result will fit inside its type.
+ } else {
+ // The value is outside the range or is NaN (all if-checks fail); do not optimize.
+ return nullptr;
+ }
+ }
+
+ return Literal::Make(pos, result, resultType);
+}
+
+std::unique_ptr<Expression> ConstantFolder::Simplify(const Context& context,
+ Position pos,
+ const Expression& leftExpr,
+ Operator op,
+ const Expression& rightExpr,
+ const Type& resultType) {
+ // Replace constant variables with their literal values.
+ const Expression* left = GetConstantValueForVariable(leftExpr);
+ const Expression* right = GetConstantValueForVariable(rightExpr);
+
+ // If this is the assignment operator, and both sides are the same trivial expression, this is
+ // self-assignment (i.e., `var = var`) and can be reduced to just a variable reference (`var`).
+ // This can happen when other parts of the assignment are optimized away.
+ if (op.kind() == Operator::Kind::EQ && Analysis::IsSameExpressionTree(*left, *right)) {
+ return right->clone(pos);
+ }
+
+ // Simplify the expression when both sides are constant Boolean literals.
+ if (left->isBoolLiteral() && right->isBoolLiteral()) {
+ bool leftVal = left->as<Literal>().boolValue();
+ bool rightVal = right->as<Literal>().boolValue();
+ bool result;
+ switch (op.kind()) {
+ case Operator::Kind::LOGICALAND: result = leftVal && rightVal; break;
+ case Operator::Kind::LOGICALOR: result = leftVal || rightVal; break;
+ case Operator::Kind::LOGICALXOR: result = leftVal ^ rightVal; break;
+ case Operator::Kind::EQEQ: result = leftVal == rightVal; break;
+ case Operator::Kind::NEQ: result = leftVal != rightVal; break;
+ default: return nullptr;
+ }
+ return Literal::MakeBool(context, pos, result);
+ }
+
+ // If the left side is a Boolean literal, apply short-circuit optimizations.
+ if (left->isBoolLiteral()) {
+ return short_circuit_boolean(pos, *left, op, *right);
+ }
+
+ // If the right side is a Boolean literal...
+ if (right->isBoolLiteral()) {
+ // ... and the left side has no side effects...
+ if (!Analysis::HasSideEffects(*left)) {
+ // We can reverse the expressions and short-circuit optimizations are still valid.
+ return short_circuit_boolean(pos, *right, op, *left);
+ }
+
+ // We can't use short-circuiting, but we can still optimize away no-op Boolean expressions.
+ return eliminate_no_op_boolean(pos, *left, op, *right);
+ }
+
+ if (op.kind() == Operator::Kind::EQEQ && Analysis::IsSameExpressionTree(*left, *right)) {
+ // With == comparison, if both sides are the same trivial expression, this is self-
+ // comparison and is always true. (We are not concerned with NaN.)
+ return Literal::MakeBool(context, pos, /*value=*/true);
+ }
+
+ if (op.kind() == Operator::Kind::NEQ && Analysis::IsSameExpressionTree(*left, *right)) {
+ // With != comparison, if both sides are the same trivial expression, this is self-
+ // comparison and is always false. (We are not concerned with NaN.)
+ return Literal::MakeBool(context, pos, /*value=*/false);
+ }
+
+ if (error_on_divide_by_zero(context, pos, op, *right)) {
+ return nullptr;
+ }
+
+ // Perform full constant folding when both sides are compile-time constants.
+ const Type& leftType = left->type();
+ const Type& rightType = right->type();
+ bool leftSideIsConstant = Analysis::IsCompileTimeConstant(*left);
+ bool rightSideIsConstant = Analysis::IsCompileTimeConstant(*right);
+
+ if (leftSideIsConstant && rightSideIsConstant) {
+ // Handle pairs of integer literals.
+ if (left->isIntLiteral() && right->isIntLiteral()) {
+ using SKSL_UINT = uint64_t;
+ SKSL_INT leftVal = left->as<Literal>().intValue();
+ SKSL_INT rightVal = right->as<Literal>().intValue();
+
+ // Note that fold_expression returns null if the result would overflow its type.
+ #define RESULT(Op) fold_expression(pos, (SKSL_INT)(leftVal) Op \
+ (SKSL_INT)(rightVal), &resultType)
+ #define URESULT(Op) fold_expression(pos, (SKSL_INT)((SKSL_UINT)(leftVal) Op \
+ (SKSL_UINT)(rightVal)), &resultType)
+ switch (op.kind()) {
+ case Operator::Kind::PLUS: return URESULT(+);
+ case Operator::Kind::MINUS: return URESULT(-);
+ case Operator::Kind::STAR: return URESULT(*);
+ case Operator::Kind::SLASH:
+ if (leftVal == std::numeric_limits<SKSL_INT>::min() && rightVal == -1) {
+ context.fErrors->error(pos, "arithmetic overflow");
+ return nullptr;
+ }
+ return RESULT(/);
+ case Operator::Kind::PERCENT:
+ if (leftVal == std::numeric_limits<SKSL_INT>::min() && rightVal == -1) {
+ context.fErrors->error(pos, "arithmetic overflow");
+ return nullptr;
+ }
+ return RESULT(%);
+ case Operator::Kind::BITWISEAND: return RESULT(&);
+ case Operator::Kind::BITWISEOR: return RESULT(|);
+ case Operator::Kind::BITWISEXOR: return RESULT(^);
+ case Operator::Kind::EQEQ: return RESULT(==);
+ case Operator::Kind::NEQ: return RESULT(!=);
+ case Operator::Kind::GT: return RESULT(>);
+ case Operator::Kind::GTEQ: return RESULT(>=);
+ case Operator::Kind::LT: return RESULT(<);
+ case Operator::Kind::LTEQ: return RESULT(<=);
+ case Operator::Kind::SHL:
+ if (rightVal >= 0 && rightVal <= 31) {
+ // Left-shifting a negative (or really, any signed) value is undefined
+ // behavior in C++, but not in GLSL. Do the shift on unsigned values to avoid
+ // triggering an UBSAN error.
+ return URESULT(<<);
+ }
+ context.fErrors->error(pos, "shift value out of range");
+ return nullptr;
+ case Operator::Kind::SHR:
+ if (rightVal >= 0 && rightVal <= 31) {
+ return RESULT(>>);
+ }
+ context.fErrors->error(pos, "shift value out of range");
+ return nullptr;
+
+ default:
+ return nullptr;
+ }
+ #undef RESULT
+ #undef URESULT
+ }
+
+ // Handle pairs of floating-point literals.
+ if (left->isFloatLiteral() && right->isFloatLiteral()) {
+ SKSL_FLOAT leftVal = left->as<Literal>().floatValue();
+ SKSL_FLOAT rightVal = right->as<Literal>().floatValue();
+
+ #define RESULT(Op) fold_expression(pos, leftVal Op rightVal, &resultType)
+ switch (op.kind()) {
+ case Operator::Kind::PLUS: return RESULT(+);
+ case Operator::Kind::MINUS: return RESULT(-);
+ case Operator::Kind::STAR: return RESULT(*);
+ case Operator::Kind::SLASH: return RESULT(/);
+ case Operator::Kind::EQEQ: return RESULT(==);
+ case Operator::Kind::NEQ: return RESULT(!=);
+ case Operator::Kind::GT: return RESULT(>);
+ case Operator::Kind::GTEQ: return RESULT(>=);
+ case Operator::Kind::LT: return RESULT(<);
+ case Operator::Kind::LTEQ: return RESULT(<=);
+ default: return nullptr;
+ }
+ #undef RESULT
+ }
+
+ // Perform matrix multiplication.
+ if (op.kind() == Operator::Kind::STAR) {
+ if (leftType.isMatrix() && rightType.isMatrix()) {
+ return simplify_matrix_times_matrix(context, pos, *left, *right);
+ }
+ if (leftType.isVector() && rightType.isMatrix()) {
+ return simplify_vector_times_matrix(context, pos, *left, *right);
+ }
+ if (leftType.isMatrix() && rightType.isVector()) {
+ return simplify_matrix_times_vector(context, pos, *left, *right);
+ }
+ }
+
+ // Perform constant folding on pairs of vectors/matrices.
+ if (is_vec_or_mat(leftType) && leftType.matches(rightType)) {
+ return simplify_componentwise(context, pos, *left, op, *right);
+ }
+
+ // Perform constant folding on vectors/matrices against scalars, e.g.: half4(2) + 2
+ if (rightType.isScalar() && is_vec_or_mat(leftType) &&
+ leftType.componentType().matches(rightType)) {
+ return simplify_componentwise(context, pos,
+ *left, op, *splat_scalar(context, *right, left->type()));
+ }
+
+ // Perform constant folding on scalars against vectors/matrices, e.g.: 2 + half4(2)
+ if (leftType.isScalar() && is_vec_or_mat(rightType) &&
+ rightType.componentType().matches(leftType)) {
+ return simplify_componentwise(context, pos,
+ *splat_scalar(context, *left, right->type()), op, *right);
+ }
+
+ // Perform constant folding on pairs of matrices, arrays or structs.
+ if ((leftType.isMatrix() && rightType.isMatrix()) ||
+ (leftType.isArray() && rightType.isArray()) ||
+ (leftType.isStruct() && rightType.isStruct())) {
+ return simplify_constant_equality(context, pos, *left, op, *right);
+ }
+ }
+
+ if (context.fConfig->fSettings.fOptimize) {
+ // If just one side is constant, we might still be able to simplify arithmetic expressions
+ // like `x * 1`, `x *= 1`, `x + 0`, `x * 0`, `0 / x`, etc.
+ if (leftSideIsConstant || rightSideIsConstant) {
+ if (std::unique_ptr<Expression> expr = simplify_arithmetic(context, pos, *left, op,
+ *right, resultType)) {
+ return expr;
+ }
+ }
+
+ // We can simplify some forms of matrix division even when neither side is constant.
+ if (std::unique_ptr<Expression> expr = simplify_matrix_division(context, pos, *left, op,
+ *right, resultType)) {
+ return expr;
+ }
+ }
+
+ // We aren't able to constant-fold.
+ return nullptr;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/SkSLConstantFolder.h b/gfx/skia/skia/src/sksl/SkSLConstantFolder.h
new file mode 100644
index 0000000000..25dd2e7b86
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLConstantFolder.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CONSTANT_FOLDER
+#define SKSL_CONSTANT_FOLDER
+
+#include <memory>
+
+#include "include/private/SkSLDefines.h"
+#include "include/sksl/SkSLOperator.h"
+
+namespace SkSL {
+
+class Context;
+class Expression;
+class Position;
+class Type;
+
+/**
+ * Performs constant folding on IR expressions. This simplifies expressions containing
+ * compile-time constants, such as replacing `Literal(2) + Literal(2)` with `Literal(4)`.
+ */
+class ConstantFolder {
+public:
+ /**
+ * If value is an int literal or const int variable with a known value, returns true and stores
+ * the value in out. Otherwise returns false.
+ */
+ static bool GetConstantInt(const Expression& value, SKSL_INT* out);
+
+ /**
+ * If value is a literal or const scalar variable with a known value, returns true and stores
+ * the value in out. Otherwise returns false.
+ */
+ static bool GetConstantValue(const Expression& value, double* out);
+
+ /**
+ * If the expression is a const variable with a known compile-time-constant value, returns that
+ * value. If not, returns the original expression as-is.
+ */
+ static const Expression* GetConstantValueForVariable(const Expression& value);
+
+ /**
+ * If the expression is a const variable with a known compile-time-constant value, returns that
+ * value. If not, returns null.
+ */
+ static const Expression* GetConstantValueOrNullForVariable(const Expression& value);
+
+ /**
+ * If the expression is a const variable with a known compile-time-constant value, returns a
+ * clone of that value. If not, returns the original expression as-is.
+ */
+ static std::unique_ptr<Expression> MakeConstantValueForVariable(Position pos,
+ std::unique_ptr<Expression> expr);
+
+ /** Simplifies the binary expression `left OP right`. Returns null if it can't be simplified. */
+ static std::unique_ptr<Expression> Simplify(const Context& context,
+ Position pos,
+ const Expression& left,
+ Operator op,
+ const Expression& right,
+ const Type& resultType);
+};
+
+} // namespace SkSL
+
+#endif // SKSL_CONSTANT_FOLDER
diff --git a/gfx/skia/skia/src/sksl/SkSLContext.cpp b/gfx/skia/skia/src/sksl/SkSLContext.cpp
new file mode 100644
index 0000000000..d28fc9d727
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLContext.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLContext.h"
+
+#include "include/core/SkTypes.h"
+#ifdef SK_DEBUG
+#include "src/sksl/SkSLPool.h"
+#endif
+
+namespace SkSL {
+
+Context::Context(const BuiltinTypes& types, const ShaderCaps* caps, ErrorReporter& errors)
+ : fTypes(types)
+ , fCaps(caps)
+ , fErrors(&errors) {
+ SkASSERT(!Pool::IsAttached());
+}
+
+Context::~Context() {
+ SkASSERT(!Pool::IsAttached());
+}
+
+} // namespace SkSL
+
diff --git a/gfx/skia/skia/src/sksl/SkSLContext.h b/gfx/skia/skia/src/sksl/SkSLContext.h
new file mode 100644
index 0000000000..e83f2f36ec
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLContext.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CONTEXT
+#define SKSL_CONTEXT
+
+namespace SkSL {
+
+class BuiltinTypes;
+class ErrorReporter;
+class ModifiersPool;
+struct Module;
+struct ProgramConfig;
+struct ShaderCaps;
+
+/**
+ * Contains compiler-wide objects, which currently means the core types.
+ */
+class Context {
+public:
+ Context(const BuiltinTypes& types, const ShaderCaps* caps, ErrorReporter& errors);
+ ~Context();
+
+ // The Context holds a reference to all of the built-in types.
+ const BuiltinTypes& fTypes;
+
+ // The Context holds a reference to our shader caps bits.
+ const ShaderCaps* fCaps;
+
+ // The Context holds a pointer to our pool of modifiers.
+ ModifiersPool* fModifiersPool = nullptr;
+
+ // The Context holds a pointer to the configuration of the program being compiled.
+ ProgramConfig* fConfig = nullptr;
+
+ // The Context holds a pointer to our error reporter.
+ ErrorReporter* fErrors;
+
+ // The Context holds a pointer to our module with built-in declarations.
+ const Module* fModule = nullptr;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLErrorReporter.cpp b/gfx/skia/skia/src/sksl/SkSLErrorReporter.cpp
new file mode 100644
index 0000000000..a11234ff5e
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLErrorReporter.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/sksl/SkSLErrorReporter.h"
+
+#include "include/sksl/SkSLPosition.h"
+#include "src/base/SkStringView.h"
+#include "src/sksl/SkSLCompiler.h"
+
+namespace SkSL {
+
+void ErrorReporter::error(Position position, std::string_view msg) {
+ if (skstd::contains(msg, Compiler::POISON_TAG)) {
+ // Don't report errors on poison values.
+ return;
+ }
+ ++fErrorCount;
+ this->handleError(msg, position);
+}
+
+void TestingOnly_AbortErrorReporter::handleError(std::string_view msg, Position pos) {
+ SK_ABORT("%.*s", (int)msg.length(), msg.data());
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/SkSLFileOutputStream.h b/gfx/skia/skia/src/sksl/SkSLFileOutputStream.h
new file mode 100644
index 0000000000..26f59edefc
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLFileOutputStream.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FILEOUTPUTSTREAM
+#define SKSL_FILEOUTPUTSTREAM
+
+#include "src/sksl/SkSLOutputStream.h"
+#include "src/sksl/SkSLUtil.h"
+#include <stdio.h>
+
+namespace SkSL {
+
+class FileOutputStream : public OutputStream {
+public:
+ FileOutputStream(const char* name) {
+ fFile = fopen(name, "wb");
+ }
+
+ ~FileOutputStream() override {
+ if (fOpen) {
+ close();
+ }
+ }
+
+ bool isValid() const override {
+ return nullptr != fFile;
+ }
+
+ void write8(uint8_t b) override {
+ SkASSERT(fOpen);
+ if (isValid()) {
+ if (EOF == fputc(b, fFile)) {
+ fFile = nullptr;
+ }
+ }
+ }
+
+ void writeText(const char* s) override {
+ SkASSERT(fOpen);
+ if (isValid()) {
+ if (EOF == fputs(s, fFile)) {
+ fFile = nullptr;
+ }
+ }
+ }
+
+ void write(const void* s, size_t size) override {
+ if (isValid()) {
+ size_t written = fwrite(s, 1, size, fFile);
+ if (written != size) {
+ fFile = nullptr;
+ }
+ }
+ }
+
+ bool close() {
+ fOpen = false;
+ if (isValid() && fclose(fFile)) {
+ fFile = nullptr;
+ return false;
+ }
+ return true;
+ }
+
+private:
+ bool fOpen = true;
+ FILE *fFile;
+
+ using INHERITED = OutputStream;
+};
+
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLGLSL.h b/gfx/skia/skia/src/sksl/SkSLGLSL.h
new file mode 100644
index 0000000000..55c8bc87e5
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLGLSL.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSLGLSL_DEFINED
+#define SkSLGLSL_DEFINED
+
+namespace SkSL {
+
+// Limited set of GLSL versions we build shaders for. Caller should round
+// down the GLSL version to one of these enums.
+enum class GLSLGeneration {
+ /**
+ * Desktop GLSL 1.10 and ES2 shading language (based on desktop GLSL 1.20)
+ */
+ k110,
+ k100es = k110,
+ /**
+ * Desktop GLSL 1.30
+ */
+ k130,
+ /**
+ * Desktop GLSL 1.40
+ */
+ k140,
+ /**
+ * Desktop GLSL 1.50
+ */
+ k150,
+ /**
+ * Desktop GLSL 3.30, and ES GLSL 3.00
+ */
+ k330,
+ k300es = k330,
+ /**
+ * Desktop GLSL 4.00
+ */
+ k400,
+ /**
+ * Desktop GLSL 4.20
+ */
+ k420,
+ /**
+ * ES GLSL 3.10 only TODO Make GLSLCap objects to make this more granular
+ */
+ k310es,
+ /**
+ * ES GLSL 3.20
+ */
+ k320es,
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLInliner.cpp b/gfx/skia/skia/src/sksl/SkSLInliner.cpp
new file mode 100644
index 0000000000..d90227e3bb
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLInliner.cpp
@@ -0,0 +1,1062 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLInliner.h"
+
+#ifndef SK_ENABLE_OPTIMIZE_SIZE
+
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLStatement.h"
+#include "include/private/base/SkTArray.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLOperator.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/analysis/SkSLProgramUsage.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLChildCall.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLConstructorArray.h"
+#include "src/sksl/ir/SkSLConstructorArrayCast.h"
+#include "src/sksl/ir/SkSLConstructorCompound.h"
+#include "src/sksl/ir/SkSLConstructorCompoundCast.h"
+#include "src/sksl/ir/SkSLConstructorDiagonalMatrix.h"
+#include "src/sksl/ir/SkSLConstructorMatrixResize.h"
+#include "src/sksl/ir/SkSLConstructorScalarCast.h"
+#include "src/sksl/ir/SkSLConstructorSplat.h"
+#include "src/sksl/ir/SkSLConstructorStruct.h"
+#include "src/sksl/ir/SkSLDoStatement.h"
+#include "src/sksl/ir/SkSLExpressionStatement.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLForStatement.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLIfStatement.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLLiteral.h"
+#include "src/sksl/ir/SkSLNop.h"
+#include "src/sksl/ir/SkSLPostfixExpression.h"
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+#include "src/sksl/ir/SkSLReturnStatement.h"
+#include "src/sksl/ir/SkSLSetting.h"
+#include "src/sksl/ir/SkSLSwitchCase.h"
+#include "src/sksl/ir/SkSLSwitchStatement.h"
+#include "src/sksl/ir/SkSLSwizzle.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLTernaryExpression.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+#include "src/sksl/transform/SkSLTransform.h"
+
+#include <algorithm>
+#include <climits>
+#include <cstddef>
+#include <memory>
+#include <string>
+#include <string_view>
+#include <utility>
+
+namespace SkSL {
+namespace {
+
+static constexpr int kInlinedStatementLimit = 2500;
+
+static std::unique_ptr<Statement>* find_parent_statement(
+ const std::vector<std::unique_ptr<Statement>*>& stmtStack) {
+ SkASSERT(!stmtStack.empty());
+
+ // Walk the statement stack from back to front, ignoring the last element (which is the
+ // enclosing statement).
+ auto iter = stmtStack.rbegin();
+ ++iter;
+
+ // Anything counts as a parent statement other than a scopeless Block.
+ for (; iter != stmtStack.rend(); ++iter) {
+ std::unique_ptr<Statement>* stmt = *iter;
+ if (!(*stmt)->is<Block>() || (*stmt)->as<Block>().isScope()) {
+ return stmt;
+ }
+ }
+
+ // There wasn't any parent statement to be found.
+ return nullptr;
+}
+
+std::unique_ptr<Expression> clone_with_ref_kind(const Expression& expr,
+ VariableReference::RefKind refKind) {
+ std::unique_ptr<Expression> clone = expr.clone();
+ Analysis::UpdateVariableRefKind(clone.get(), refKind);
+ return clone;
+}
+
+} // namespace
+
+const Variable* Inliner::RemapVariable(const Variable* variable,
+ const VariableRewriteMap* varMap) {
+ std::unique_ptr<Expression>* remap = varMap->find(variable);
+ if (!remap) {
+ SkDEBUGFAILF("rewrite map does not contain variable '%.*s'",
+ (int)variable->name().size(), variable->name().data());
+ return variable;
+ }
+ Expression* expr = remap->get();
+ SkASSERT(expr);
+ if (!expr->is<VariableReference>()) {
+ SkDEBUGFAILF("rewrite map contains non-variable replacement for '%.*s'",
+ (int)variable->name().size(), variable->name().data());
+ return variable;
+ }
+ return expr->as<VariableReference>().variable();
+}
+
+void Inliner::ensureScopedBlocks(Statement* inlinedBody, Statement* parentStmt) {
+ // No changes necessary if this statement isn't actually a block.
+ if (!inlinedBody || !inlinedBody->is<Block>()) {
+ return;
+ }
+
+ // No changes necessary if the parent statement doesn't require a scope.
+ if (!parentStmt || !(parentStmt->is<IfStatement>() || parentStmt->is<ForStatement>() ||
+ parentStmt->is<DoStatement>())) {
+ return;
+ }
+
+ Block& block = inlinedBody->as<Block>();
+
+ // The inliner will create inlined function bodies as a Block containing multiple statements,
+ // but no scope. Normally, this is fine, but if this block is used as the statement for a
+ // do/for/if/while, the block needs to be scoped for the generated code to match the intent.
+ // In the case of Blocks nested inside other Blocks, we add the scope to the outermost block if
+ // needed.
+ for (Block* nestedBlock = &block;; ) {
+ if (nestedBlock->isScope()) {
+ // We found an explicit scope; all is well.
+ return;
+ }
+ if (nestedBlock->children().size() == 1 && nestedBlock->children()[0]->is<Block>()) {
+ // This block wraps another unscoped block; we need to go deeper.
+ nestedBlock = &nestedBlock->children()[0]->as<Block>();
+ continue;
+ }
+ // We found a block containing real statements (not just more blocks), but no scope.
+ // Let's add a scope to the outermost block.
+ block.setBlockKind(Block::Kind::kBracedScope);
+ return;
+ }
+}
+
+std::unique_ptr<Expression> Inliner::inlineExpression(Position pos,
+ VariableRewriteMap* varMap,
+ SymbolTable* symbolTableForExpression,
+ const Expression& expression) {
+ auto expr = [&](const std::unique_ptr<Expression>& e) -> std::unique_ptr<Expression> {
+ if (e) {
+ return this->inlineExpression(pos, varMap, symbolTableForExpression, *e);
+ }
+ return nullptr;
+ };
+ auto argList = [&](const ExpressionArray& originalArgs) -> ExpressionArray {
+ ExpressionArray args;
+ args.reserve_back(originalArgs.size());
+ for (const std::unique_ptr<Expression>& arg : originalArgs) {
+ args.push_back(expr(arg));
+ }
+ return args;
+ };
+
+ switch (expression.kind()) {
+ case Expression::Kind::kBinary: {
+ const BinaryExpression& binaryExpr = expression.as<BinaryExpression>();
+ return BinaryExpression::Make(*fContext,
+ pos,
+ expr(binaryExpr.left()),
+ binaryExpr.getOperator(),
+ expr(binaryExpr.right()));
+ }
+ case Expression::Kind::kLiteral:
+ return expression.clone();
+ case Expression::Kind::kChildCall: {
+ const ChildCall& childCall = expression.as<ChildCall>();
+ return ChildCall::Make(*fContext,
+ pos,
+ childCall.type().clone(symbolTableForExpression),
+ childCall.child(),
+ argList(childCall.arguments()));
+ }
+ case Expression::Kind::kConstructorArray: {
+ const ConstructorArray& ctor = expression.as<ConstructorArray>();
+ return ConstructorArray::Make(*fContext, pos,
+ *ctor.type().clone(symbolTableForExpression),
+ argList(ctor.arguments()));
+ }
+ case Expression::Kind::kConstructorArrayCast: {
+ const ConstructorArrayCast& ctor = expression.as<ConstructorArrayCast>();
+ return ConstructorArrayCast::Make(*fContext, pos,
+ *ctor.type().clone(symbolTableForExpression),
+ expr(ctor.argument()));
+ }
+ case Expression::Kind::kConstructorCompound: {
+ const ConstructorCompound& ctor = expression.as<ConstructorCompound>();
+ return ConstructorCompound::Make(*fContext, pos,
+ *ctor.type().clone(symbolTableForExpression),
+ argList(ctor.arguments()));
+ }
+ case Expression::Kind::kConstructorCompoundCast: {
+ const ConstructorCompoundCast& ctor = expression.as<ConstructorCompoundCast>();
+ return ConstructorCompoundCast::Make(*fContext, pos,
+ *ctor.type().clone(symbolTableForExpression),
+ expr(ctor.argument()));
+ }
+ case Expression::Kind::kConstructorDiagonalMatrix: {
+ const ConstructorDiagonalMatrix& ctor = expression.as<ConstructorDiagonalMatrix>();
+ return ConstructorDiagonalMatrix::Make(*fContext, pos,
+ *ctor.type().clone(symbolTableForExpression),
+ expr(ctor.argument()));
+ }
+ case Expression::Kind::kConstructorMatrixResize: {
+ const ConstructorMatrixResize& ctor = expression.as<ConstructorMatrixResize>();
+ return ConstructorMatrixResize::Make(*fContext, pos,
+ *ctor.type().clone(symbolTableForExpression),
+ expr(ctor.argument()));
+ }
+ case Expression::Kind::kConstructorScalarCast: {
+ const ConstructorScalarCast& ctor = expression.as<ConstructorScalarCast>();
+ return ConstructorScalarCast::Make(*fContext, pos,
+ *ctor.type().clone(symbolTableForExpression),
+ expr(ctor.argument()));
+ }
+ case Expression::Kind::kConstructorSplat: {
+ const ConstructorSplat& ctor = expression.as<ConstructorSplat>();
+ return ConstructorSplat::Make(*fContext, pos,
+ *ctor.type().clone(symbolTableForExpression),
+ expr(ctor.argument()));
+ }
+ case Expression::Kind::kConstructorStruct: {
+ const ConstructorStruct& ctor = expression.as<ConstructorStruct>();
+ return ConstructorStruct::Make(*fContext, pos,
+ *ctor.type().clone(symbolTableForExpression),
+ argList(ctor.arguments()));
+ }
+ case Expression::Kind::kFieldAccess: {
+ const FieldAccess& f = expression.as<FieldAccess>();
+ return FieldAccess::Make(*fContext, pos, expr(f.base()), f.fieldIndex(), f.ownerKind());
+ }
+ case Expression::Kind::kFunctionCall: {
+ const FunctionCall& funcCall = expression.as<FunctionCall>();
+ return FunctionCall::Make(*fContext,
+ pos,
+ funcCall.type().clone(symbolTableForExpression),
+ funcCall.function(),
+ argList(funcCall.arguments()));
+ }
+ case Expression::Kind::kFunctionReference:
+ return expression.clone();
+ case Expression::Kind::kIndex: {
+ const IndexExpression& idx = expression.as<IndexExpression>();
+ return IndexExpression::Make(*fContext, pos, expr(idx.base()), expr(idx.index()));
+ }
+ case Expression::Kind::kMethodReference:
+ return expression.clone();
+ case Expression::Kind::kPrefix: {
+ const PrefixExpression& p = expression.as<PrefixExpression>();
+ return PrefixExpression::Make(*fContext, pos, p.getOperator(), expr(p.operand()));
+ }
+ case Expression::Kind::kPostfix: {
+ const PostfixExpression& p = expression.as<PostfixExpression>();
+ return PostfixExpression::Make(*fContext, pos, expr(p.operand()), p.getOperator());
+ }
+ case Expression::Kind::kSetting: {
+ const Setting& s = expression.as<Setting>();
+ return Setting::Convert(*fContext, pos, s.name());
+ }
+ case Expression::Kind::kSwizzle: {
+ const Swizzle& s = expression.as<Swizzle>();
+ return Swizzle::Make(*fContext, pos, expr(s.base()), s.components());
+ }
+ case Expression::Kind::kTernary: {
+ const TernaryExpression& t = expression.as<TernaryExpression>();
+ return TernaryExpression::Make(*fContext, pos, expr(t.test()),
+ expr(t.ifTrue()), expr(t.ifFalse()));
+ }
+ case Expression::Kind::kTypeReference:
+ return expression.clone();
+ case Expression::Kind::kVariableReference: {
+ const VariableReference& v = expression.as<VariableReference>();
+ std::unique_ptr<Expression>* remap = varMap->find(v.variable());
+ if (remap) {
+ return clone_with_ref_kind(**remap, v.refKind());
+ }
+ return expression.clone();
+ }
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+}
+
+std::unique_ptr<Statement> Inliner::inlineStatement(Position pos,
+ VariableRewriteMap* varMap,
+ SymbolTable* symbolTableForStatement,
+ std::unique_ptr<Expression>* resultExpr,
+ Analysis::ReturnComplexity returnComplexity,
+ const Statement& statement,
+ const ProgramUsage& usage,
+ bool isBuiltinCode) {
+ auto stmt = [&](const std::unique_ptr<Statement>& s) -> std::unique_ptr<Statement> {
+ if (s) {
+ return this->inlineStatement(pos, varMap, symbolTableForStatement, resultExpr,
+ returnComplexity, *s, usage, isBuiltinCode);
+ }
+ return nullptr;
+ };
+ auto blockStmts = [&](const Block& block) {
+ StatementArray result;
+ result.reserve_back(block.children().size());
+ for (const std::unique_ptr<Statement>& child : block.children()) {
+ result.push_back(stmt(child));
+ }
+ return result;
+ };
+ auto expr = [&](const std::unique_ptr<Expression>& e) -> std::unique_ptr<Expression> {
+ if (e) {
+ return this->inlineExpression(pos, varMap, symbolTableForStatement, *e);
+ }
+ return nullptr;
+ };
+ auto variableModifiers = [&](const Variable& variable,
+ const Expression* initialValue) -> const Modifiers* {
+ return Transform::AddConstToVarModifiers(*fContext, variable, initialValue, &usage);
+ };
+
+ ++fInlinedStatementCounter;
+
+ switch (statement.kind()) {
+ case Statement::Kind::kBlock: {
+ const Block& b = statement.as<Block>();
+ return Block::Make(pos, blockStmts(b), b.blockKind(),
+ SymbolTable::WrapIfBuiltin(b.symbolTable()));
+ }
+
+ case Statement::Kind::kBreak:
+ case Statement::Kind::kContinue:
+ case Statement::Kind::kDiscard:
+ return statement.clone();
+
+ case Statement::Kind::kDo: {
+ const DoStatement& d = statement.as<DoStatement>();
+ return DoStatement::Make(*fContext, pos, stmt(d.statement()), expr(d.test()));
+ }
+ case Statement::Kind::kExpression: {
+ const ExpressionStatement& e = statement.as<ExpressionStatement>();
+ return ExpressionStatement::Make(*fContext, expr(e.expression()));
+ }
+ case Statement::Kind::kFor: {
+ const ForStatement& f = statement.as<ForStatement>();
+ // need to ensure initializer is evaluated first so that we've already remapped its
+ // declarations by the time we evaluate test & next
+ std::unique_ptr<Statement> initializer = stmt(f.initializer());
+
+ std::unique_ptr<LoopUnrollInfo> unrollInfo;
+ if (f.unrollInfo()) {
+ // The for loop's unroll-info points to the Variable in the initializer as the
+ // index. This variable has been rewritten into a clone by the inliner, so we need
+ // to update the loop-unroll info to point to the clone.
+ unrollInfo = std::make_unique<LoopUnrollInfo>(*f.unrollInfo());
+ unrollInfo->fIndex = RemapVariable(unrollInfo->fIndex, varMap);
+ }
+ return ForStatement::Make(*fContext, pos, ForLoopPositions{}, std::move(initializer),
+ expr(f.test()), expr(f.next()), stmt(f.statement()),
+ std::move(unrollInfo),
+ SymbolTable::WrapIfBuiltin(f.symbols()));
+ }
+ case Statement::Kind::kIf: {
+ const IfStatement& i = statement.as<IfStatement>();
+ return IfStatement::Make(*fContext, pos, expr(i.test()),
+ stmt(i.ifTrue()), stmt(i.ifFalse()));
+ }
+ case Statement::Kind::kNop:
+ return statement.clone();
+
+ case Statement::Kind::kReturn: {
+ const ReturnStatement& r = statement.as<ReturnStatement>();
+ if (!r.expression()) {
+ // This function doesn't return a value. We won't inline functions with early
+ // returns, so a return statement is a no-op and can be treated as such.
+ return Nop::Make();
+ }
+
+ // If a function only contains a single return, and it doesn't reference variables from
+ // inside an Block's scope, we don't need to store the result in a variable at all. Just
+ // replace the function-call expression with the function's return expression.
+ SkASSERT(resultExpr);
+ if (returnComplexity <= Analysis::ReturnComplexity::kSingleSafeReturn) {
+ *resultExpr = expr(r.expression());
+ return Nop::Make();
+ }
+
+ // For more complex functions, we assign their result into a variable. We refuse to
+ // inline anything with early returns, so this should be safe to do; that is, on this
+ // control path, this is the last statement that will occur.
+ SkASSERT(*resultExpr);
+ return ExpressionStatement::Make(
+ *fContext,
+ BinaryExpression::Make(
+ *fContext,
+ pos,
+ clone_with_ref_kind(**resultExpr, VariableRefKind::kWrite),
+ Operator::Kind::EQ,
+ expr(r.expression())));
+ }
+ case Statement::Kind::kSwitch: {
+ const SwitchStatement& ss = statement.as<SwitchStatement>();
+ StatementArray cases;
+ cases.reserve_back(ss.cases().size());
+ for (const std::unique_ptr<Statement>& switchCaseStmt : ss.cases()) {
+ const SwitchCase& sc = switchCaseStmt->as<SwitchCase>();
+ if (sc.isDefault()) {
+ cases.push_back(SwitchCase::MakeDefault(pos, stmt(sc.statement())));
+ } else {
+ cases.push_back(SwitchCase::Make(pos, sc.value(), stmt(sc.statement())));
+ }
+ }
+ return SwitchStatement::Make(*fContext, pos, expr(ss.value()),
+ std::move(cases), SymbolTable::WrapIfBuiltin(ss.symbols()));
+ }
+ case Statement::Kind::kVarDeclaration: {
+ const VarDeclaration& decl = statement.as<VarDeclaration>();
+ std::unique_ptr<Expression> initialValue = expr(decl.value());
+ const Variable* variable = decl.var();
+
+ // We assign unique names to inlined variables--scopes hide most of the problems in this
+ // regard, but see `InlinerAvoidsVariableNameOverlap` for a counterexample where unique
+ // names are important.
+ const std::string* name = symbolTableForStatement->takeOwnershipOfString(
+ fMangler.uniqueName(variable->name(), symbolTableForStatement));
+ auto clonedVar = std::make_unique<Variable>(
+ pos,
+ variable->modifiersPosition(),
+ variableModifiers(*variable, initialValue.get()),
+ name->c_str(),
+ variable->type().clone(symbolTableForStatement),
+ isBuiltinCode,
+ variable->storage());
+ varMap->set(variable, VariableReference::Make(pos, clonedVar.get()));
+ auto result = VarDeclaration::Make(*fContext,
+ clonedVar.get(),
+ decl.baseType().clone(symbolTableForStatement),
+ decl.arraySize(),
+ std::move(initialValue));
+ symbolTableForStatement->takeOwnershipOfSymbol(std::move(clonedVar));
+ return result;
+ }
+ default:
+ SkASSERT(false);
+ return nullptr;
+ }
+}
+
+Inliner::InlinedCall Inliner::inlineCall(const FunctionCall& call,
+ std::shared_ptr<SymbolTable> symbolTable,
+ const ProgramUsage& usage,
+ const FunctionDeclaration* caller) {
+ using ScratchVariable = Variable::ScratchVariable;
+
+ // Inlining is more complicated here than in a typical compiler, because we have to have a
+ // high-level IR and can't just drop statements into the middle of an expression or even use
+ // gotos.
+ //
+ // Since we can't insert statements into an expression, we run the inline function as extra
+ // statements before the statement we're currently processing, relying on a lack of execution
+ // order guarantees. Since we can't use gotos (which are normally used to replace return
+ // statements), we wrap the whole function in a loop and use break statements to jump to the
+ // end.
+ SkASSERT(fContext);
+ SkASSERT(this->isSafeToInline(call.function().definition(), usage));
+
+ const ExpressionArray& arguments = call.arguments();
+ const Position pos = call.fPosition;
+ const FunctionDefinition& function = *call.function().definition();
+ const Block& body = function.body()->as<Block>();
+ const Analysis::ReturnComplexity returnComplexity = Analysis::GetReturnComplexity(function);
+
+ StatementArray inlineStatements;
+ int expectedStmtCount = 1 + // Result variable
+ arguments.size() + // Function argument temp-vars
+ body.children().size(); // Inlined code
+
+ inlineStatements.reserve_back(expectedStmtCount);
+
+ std::unique_ptr<Expression> resultExpr;
+ if (returnComplexity > Analysis::ReturnComplexity::kSingleSafeReturn &&
+ !function.declaration().returnType().isVoid()) {
+ // Create a variable to hold the result in the extra statements. We don't need to do this
+ // for void-return functions, or in cases that are simple enough that we can just replace
+ // the function-call node with the result expression.
+ ScratchVariable var = Variable::MakeScratchVariable(*fContext,
+ fMangler,
+ function.declaration().name(),
+ &function.declaration().returnType(),
+ Modifiers{},
+ symbolTable.get(),
+ /*initialValue=*/nullptr);
+ inlineStatements.push_back(std::move(var.fVarDecl));
+ resultExpr = VariableReference::Make(Position(), var.fVarSymbol);
+ }
+
+ // Create variables in the extra statements to hold the arguments, and assign the arguments to
+ // them.
+ VariableRewriteMap varMap;
+ for (int i = 0; i < arguments.size(); ++i) {
+ // If the parameter isn't written to within the inline function ...
+ const Expression* arg = arguments[i].get();
+ const Variable* param = function.declaration().parameters()[i];
+ const ProgramUsage::VariableCounts& paramUsage = usage.get(*param);
+ if (!paramUsage.fWrite) {
+ // ... and can be inlined trivially (e.g. a swizzle, or a constant array index),
+ // or any expression without side effects that is only accessed at most once...
+ if ((paramUsage.fRead > 1) ? Analysis::IsTrivialExpression(*arg)
+ : !Analysis::HasSideEffects(*arg)) {
+ // ... we don't need to copy it at all! We can just use the existing expression.
+ varMap.set(param, arg->clone());
+ continue;
+ }
+ }
+ ScratchVariable var = Variable::MakeScratchVariable(*fContext,
+ fMangler,
+ param->name(),
+ &arg->type(),
+ param->modifiers(),
+ symbolTable.get(),
+ arg->clone());
+ inlineStatements.push_back(std::move(var.fVarDecl));
+ varMap.set(param, VariableReference::Make(Position(), var.fVarSymbol));
+ }
+
+ for (const std::unique_ptr<Statement>& stmt : body.children()) {
+ inlineStatements.push_back(this->inlineStatement(pos, &varMap, symbolTable.get(),
+ &resultExpr, returnComplexity, *stmt,
+ usage, caller->isBuiltin()));
+ }
+
+ SkASSERT(inlineStatements.size() <= expectedStmtCount);
+
+ // Wrap all of the generated statements in a block. We need a real Block here, because we need
+ // to add another child statement to the Block later.
+ InlinedCall inlinedCall;
+ inlinedCall.fInlinedBody = Block::MakeBlock(pos, std::move(inlineStatements),
+ Block::Kind::kUnbracedBlock);
+ if (resultExpr) {
+ // Return our result expression as-is.
+ inlinedCall.fReplacementExpr = std::move(resultExpr);
+ } else if (function.declaration().returnType().isVoid()) {
+ // It's a void function, so it doesn't actually result in anything, but we have to return
+ // something non-null as a standin.
+ inlinedCall.fReplacementExpr = Literal::MakeBool(*fContext, pos, /*value=*/false);
+ } else {
+ // It's a non-void function, but it never created a result expression--that is, it never
+ // returned anything on any path! This should have been detected in the function finalizer.
+ // Still, discard our output and generate an error.
+ SkDEBUGFAIL("inliner found non-void function that fails to return a value on any path");
+ fContext->fErrors->error(function.fPosition, "inliner found non-void function '" +
+ std::string(function.declaration().name()) +
+ "' that fails to return a value on any path");
+ inlinedCall = {};
+ }
+
+ return inlinedCall;
+}
+
+bool Inliner::isSafeToInline(const FunctionDefinition* functionDef, const ProgramUsage& usage) {
+ // A threshold of zero indicates that the inliner is completely disabled, so we can just return.
+ if (this->settings().fInlineThreshold <= 0) {
+ return false;
+ }
+
+ // Enforce a limit on inlining to avoid pathological cases. (inliner/ExponentialGrowth.sksl)
+ if (fInlinedStatementCounter >= kInlinedStatementLimit) {
+ return false;
+ }
+
+ if (functionDef == nullptr) {
+ // Can't inline something if we don't actually have its definition.
+ return false;
+ }
+
+ if (functionDef->declaration().modifiers().fFlags & Modifiers::kNoInline_Flag) {
+ // Refuse to inline functions decorated with `noinline`.
+ return false;
+ }
+
+ // We don't allow inlining a function with out parameters that are written to.
+ // (See skia:11326 for rationale.)
+ for (const Variable* param : functionDef->declaration().parameters()) {
+ if (param->modifiers().fFlags & Modifiers::Flag::kOut_Flag) {
+ ProgramUsage::VariableCounts counts = usage.get(*param);
+ if (counts.fWrite > 0) {
+ return false;
+ }
+ }
+ }
+
+ // We don't have a mechanism to simulate early returns, so we can't inline if there is one.
+ return Analysis::GetReturnComplexity(*functionDef) < Analysis::ReturnComplexity::kEarlyReturns;
+}
+
+// A candidate function for inlining, containing everything that `inlineCall` needs.
+struct InlineCandidate {
+ std::shared_ptr<SymbolTable> fSymbols; // the SymbolTable of the candidate
+ std::unique_ptr<Statement>* fParentStmt; // the parent Statement of the enclosing stmt
+ std::unique_ptr<Statement>* fEnclosingStmt; // the Statement containing the candidate
+ std::unique_ptr<Expression>* fCandidateExpr; // the candidate FunctionCall to be inlined
+ FunctionDefinition* fEnclosingFunction; // the Function containing the candidate
+};
+
+struct InlineCandidateList {
+ std::vector<InlineCandidate> fCandidates;
+};
+
+class InlineCandidateAnalyzer {
+public:
+ // A list of all the inlining candidates we found during analysis.
+ InlineCandidateList* fCandidateList;
+
+ // A stack of the symbol tables; since most nodes don't have one, expected to be shallower than
+ // the enclosing-statement stack.
+ std::vector<std::shared_ptr<SymbolTable>> fSymbolTableStack;
+ // A stack of "enclosing" statements--these would be suitable for the inliner to use for adding
+ // new instructions. Not all statements are suitable (e.g. a for-loop's initializer). The
+ // inliner might replace a statement with a block containing the statement.
+ std::vector<std::unique_ptr<Statement>*> fEnclosingStmtStack;
+ // The function that we're currently processing (i.e. inlining into).
+ FunctionDefinition* fEnclosingFunction = nullptr;
+
+ void visit(const std::vector<std::unique_ptr<ProgramElement>>& elements,
+ std::shared_ptr<SymbolTable> symbols,
+ InlineCandidateList* candidateList) {
+ fCandidateList = candidateList;
+ fSymbolTableStack.push_back(symbols);
+
+ for (const std::unique_ptr<ProgramElement>& pe : elements) {
+ this->visitProgramElement(pe.get());
+ }
+
+ fSymbolTableStack.pop_back();
+ fCandidateList = nullptr;
+ }
+
+ void visitProgramElement(ProgramElement* pe) {
+ switch (pe->kind()) {
+ case ProgramElement::Kind::kFunction: {
+ FunctionDefinition& funcDef = pe->as<FunctionDefinition>();
+ fEnclosingFunction = &funcDef;
+ this->visitStatement(&funcDef.body());
+ break;
+ }
+ default:
+ // The inliner can't operate outside of a function's scope.
+ break;
+ }
+ }
+
+ void visitStatement(std::unique_ptr<Statement>* stmt,
+ bool isViableAsEnclosingStatement = true) {
+ if (!*stmt) {
+ return;
+ }
+
+ Analysis::SymbolTableStackBuilder scopedStackBuilder(stmt->get(), &fSymbolTableStack);
+ size_t oldEnclosingStmtStackSize = fEnclosingStmtStack.size();
+
+ if (isViableAsEnclosingStatement) {
+ fEnclosingStmtStack.push_back(stmt);
+ }
+
+ switch ((*stmt)->kind()) {
+ case Statement::Kind::kBreak:
+ case Statement::Kind::kContinue:
+ case Statement::Kind::kDiscard:
+ case Statement::Kind::kNop:
+ break;
+
+ case Statement::Kind::kBlock: {
+ Block& block = (*stmt)->as<Block>();
+ for (std::unique_ptr<Statement>& blockStmt : block.children()) {
+ this->visitStatement(&blockStmt);
+ }
+ break;
+ }
+ case Statement::Kind::kDo: {
+ DoStatement& doStmt = (*stmt)->as<DoStatement>();
+ // The loop body is a candidate for inlining.
+ this->visitStatement(&doStmt.statement());
+ // The inliner isn't smart enough to inline the test-expression for a do-while
+ // loop at this time. There are two limitations:
+ // - We would need to insert the inlined-body block at the very end of the do-
+ // statement's inner fStatement. We don't support that today, but it's doable.
+ // - We cannot inline the test expression if the loop uses `continue` anywhere; that
+ // would skip over the inlined block that evaluates the test expression. There
+ // isn't a good fix for this--any workaround would be more complex than the cost
+ // of a function call. However, loops that don't use `continue` would still be
+ // viable candidates for inlining.
+ break;
+ }
+ case Statement::Kind::kExpression: {
+ ExpressionStatement& expr = (*stmt)->as<ExpressionStatement>();
+ this->visitExpression(&expr.expression());
+ break;
+ }
+ case Statement::Kind::kFor: {
+ ForStatement& forStmt = (*stmt)->as<ForStatement>();
+ // The initializer and loop body are candidates for inlining.
+ this->visitStatement(&forStmt.initializer(),
+ /*isViableAsEnclosingStatement=*/false);
+ this->visitStatement(&forStmt.statement());
+
+ // The inliner isn't smart enough to inline the test- or increment-expressions
+ // of a for loop loop at this time. There are a handful of limitations:
+ // - We would need to insert the test-expression block at the very beginning of the
+ // for-loop's inner fStatement, and the increment-expression block at the very
+ // end. We don't support that today, but it's doable.
+ // - The for-loop's built-in test-expression would need to be dropped entirely,
+ // and the loop would be halted via a break statement at the end of the inlined
+ // test-expression. This is again something we don't support today, but it could
+ // be implemented.
+ // - We cannot inline the increment-expression if the loop uses `continue` anywhere;
+ // that would skip over the inlined block that evaluates the increment expression.
+ // There isn't a good fix for this--any workaround would be more complex than the
+ // cost of a function call. However, loops that don't use `continue` would still
+ // be viable candidates for increment-expression inlining.
+ break;
+ }
+ case Statement::Kind::kIf: {
+ IfStatement& ifStmt = (*stmt)->as<IfStatement>();
+ this->visitExpression(&ifStmt.test());
+ this->visitStatement(&ifStmt.ifTrue());
+ this->visitStatement(&ifStmt.ifFalse());
+ break;
+ }
+ case Statement::Kind::kReturn: {
+ ReturnStatement& returnStmt = (*stmt)->as<ReturnStatement>();
+ this->visitExpression(&returnStmt.expression());
+ break;
+ }
+ case Statement::Kind::kSwitch: {
+ SwitchStatement& switchStmt = (*stmt)->as<SwitchStatement>();
+ this->visitExpression(&switchStmt.value());
+ for (const std::unique_ptr<Statement>& switchCase : switchStmt.cases()) {
+ // The switch-case's fValue cannot be a FunctionCall; skip it.
+ this->visitStatement(&switchCase->as<SwitchCase>().statement());
+ }
+ break;
+ }
+ case Statement::Kind::kVarDeclaration: {
+ VarDeclaration& varDeclStmt = (*stmt)->as<VarDeclaration>();
+ // Don't need to scan the declaration's sizes; those are always IntLiterals.
+ this->visitExpression(&varDeclStmt.value());
+ break;
+ }
+ default:
+ SkUNREACHABLE;
+ }
+
+ // Pop our symbol and enclosing-statement stacks.
+ fEnclosingStmtStack.resize(oldEnclosingStmtStackSize);
+ }
+
+ void visitExpression(std::unique_ptr<Expression>* expr) {
+ if (!*expr) {
+ return;
+ }
+
+ switch ((*expr)->kind()) {
+ case Expression::Kind::kFieldAccess:
+ case Expression::Kind::kFunctionReference:
+ case Expression::Kind::kLiteral:
+ case Expression::Kind::kMethodReference:
+ case Expression::Kind::kSetting:
+ case Expression::Kind::kTypeReference:
+ case Expression::Kind::kVariableReference:
+ // Nothing to scan here.
+ break;
+
+ case Expression::Kind::kBinary: {
+ BinaryExpression& binaryExpr = (*expr)->as<BinaryExpression>();
+ this->visitExpression(&binaryExpr.left());
+
+ // Logical-and and logical-or binary expressions do not inline the right side,
+ // because that would invalidate short-circuiting. That is, when evaluating
+ // expressions like these:
+ // (false && x()) // always false
+ // (true || y()) // always true
+ // It is illegal for side-effects from x() or y() to occur. The simplest way to
+ // enforce that rule is to avoid inlining the right side entirely. However, it is
+ // safe for other types of binary expression to inline both sides.
+ Operator op = binaryExpr.getOperator();
+ bool shortCircuitable = (op.kind() == Operator::Kind::LOGICALAND ||
+ op.kind() == Operator::Kind::LOGICALOR);
+ if (!shortCircuitable) {
+ this->visitExpression(&binaryExpr.right());
+ }
+ break;
+ }
+ case Expression::Kind::kChildCall: {
+ ChildCall& childCallExpr = (*expr)->as<ChildCall>();
+ for (std::unique_ptr<Expression>& arg : childCallExpr.arguments()) {
+ this->visitExpression(&arg);
+ }
+ break;
+ }
+ case Expression::Kind::kConstructorArray:
+ case Expression::Kind::kConstructorArrayCast:
+ case Expression::Kind::kConstructorCompound:
+ case Expression::Kind::kConstructorCompoundCast:
+ case Expression::Kind::kConstructorDiagonalMatrix:
+ case Expression::Kind::kConstructorMatrixResize:
+ case Expression::Kind::kConstructorScalarCast:
+ case Expression::Kind::kConstructorSplat:
+ case Expression::Kind::kConstructorStruct: {
+ AnyConstructor& constructorExpr = (*expr)->asAnyConstructor();
+ for (std::unique_ptr<Expression>& arg : constructorExpr.argumentSpan()) {
+ this->visitExpression(&arg);
+ }
+ break;
+ }
+ case Expression::Kind::kFunctionCall: {
+ FunctionCall& funcCallExpr = (*expr)->as<FunctionCall>();
+ for (std::unique_ptr<Expression>& arg : funcCallExpr.arguments()) {
+ this->visitExpression(&arg);
+ }
+ this->addInlineCandidate(expr);
+ break;
+ }
+ case Expression::Kind::kIndex: {
+ IndexExpression& indexExpr = (*expr)->as<IndexExpression>();
+ this->visitExpression(&indexExpr.base());
+ this->visitExpression(&indexExpr.index());
+ break;
+ }
+ case Expression::Kind::kPostfix: {
+ PostfixExpression& postfixExpr = (*expr)->as<PostfixExpression>();
+ this->visitExpression(&postfixExpr.operand());
+ break;
+ }
+ case Expression::Kind::kPrefix: {
+ PrefixExpression& prefixExpr = (*expr)->as<PrefixExpression>();
+ this->visitExpression(&prefixExpr.operand());
+ break;
+ }
+ case Expression::Kind::kSwizzle: {
+ Swizzle& swizzleExpr = (*expr)->as<Swizzle>();
+ this->visitExpression(&swizzleExpr.base());
+ break;
+ }
+ case Expression::Kind::kTernary: {
+ TernaryExpression& ternaryExpr = (*expr)->as<TernaryExpression>();
+ // The test expression is a candidate for inlining.
+ this->visitExpression(&ternaryExpr.test());
+ // The true- and false-expressions cannot be inlined, because we are only allowed to
+ // evaluate one side.
+ break;
+ }
+ default:
+ SkUNREACHABLE;
+ }
+ }
+
+ void addInlineCandidate(std::unique_ptr<Expression>* candidate) {
+ fCandidateList->fCandidates.push_back(
+ InlineCandidate{fSymbolTableStack.back(),
+ find_parent_statement(fEnclosingStmtStack),
+ fEnclosingStmtStack.back(),
+ candidate,
+ fEnclosingFunction});
+ }
+};
+
+static const FunctionDeclaration& candidate_func(const InlineCandidate& candidate) {
+ return (*candidate.fCandidateExpr)->as<FunctionCall>().function();
+}
+
+bool Inliner::candidateCanBeInlined(const InlineCandidate& candidate,
+ const ProgramUsage& usage,
+ InlinabilityCache* cache) {
+ const FunctionDeclaration& funcDecl = candidate_func(candidate);
+ if (const bool* cachedInlinability = cache->find(&funcDecl)) {
+ return *cachedInlinability;
+ }
+ bool inlinability = this->isSafeToInline(funcDecl.definition(), usage);
+ cache->set(&funcDecl, inlinability);
+ return inlinability;
+}
+
+int Inliner::getFunctionSize(const FunctionDeclaration& funcDecl, FunctionSizeCache* cache) {
+ if (const int* cachedSize = cache->find(&funcDecl)) {
+ return *cachedSize;
+ }
+ int size = Analysis::NodeCountUpToLimit(*funcDecl.definition(),
+ this->settings().fInlineThreshold);
+ cache->set(&funcDecl, size);
+ return size;
+}
+
+void Inliner::buildCandidateList(const std::vector<std::unique_ptr<ProgramElement>>& elements,
+ std::shared_ptr<SymbolTable> symbols, ProgramUsage* usage,
+ InlineCandidateList* candidateList) {
+ // This is structured much like a ProgramVisitor, but does not actually use ProgramVisitor.
+ // The analyzer needs to keep track of the `unique_ptr<T>*` of statements and expressions so
+ // that they can later be replaced, and ProgramVisitor does not provide this; it only provides a
+ // `const T&`.
+ InlineCandidateAnalyzer analyzer;
+ analyzer.visit(elements, symbols, candidateList);
+
+ // Early out if there are no inlining candidates.
+ std::vector<InlineCandidate>& candidates = candidateList->fCandidates;
+ if (candidates.empty()) {
+ return;
+ }
+
+ // Remove candidates that are not safe to inline.
+ InlinabilityCache cache;
+ candidates.erase(std::remove_if(candidates.begin(),
+ candidates.end(),
+ [&](const InlineCandidate& candidate) {
+ return !this->candidateCanBeInlined(
+ candidate, *usage, &cache);
+ }),
+ candidates.end());
+
+ // If the inline threshold is unlimited, or if we have no candidates left, our candidate list is
+ // complete.
+ if (this->settings().fInlineThreshold == INT_MAX || candidates.empty()) {
+ return;
+ }
+
+ // Remove candidates on a per-function basis if the effect of inlining would be to make more
+ // than `inlineThreshold` nodes. (i.e. if Func() would be inlined six times and its size is
+ // 10 nodes, it should be inlined if the inlineThreshold is 60 or higher.)
+ FunctionSizeCache functionSizeCache;
+ FunctionSizeCache candidateTotalCost;
+ for (InlineCandidate& candidate : candidates) {
+ const FunctionDeclaration& fnDecl = candidate_func(candidate);
+ candidateTotalCost[&fnDecl] += this->getFunctionSize(fnDecl, &functionSizeCache);
+ }
+
+ candidates.erase(std::remove_if(candidates.begin(), candidates.end(),
+ [&](const InlineCandidate& candidate) {
+ const FunctionDeclaration& fnDecl = candidate_func(candidate);
+ if (fnDecl.modifiers().fFlags & Modifiers::kInline_Flag) {
+ // Functions marked `inline` ignore size limitations.
+ return false;
+ }
+ if (usage->get(fnDecl) == 1) {
+ // If a function is only used once, it's cost-free to inline.
+ return false;
+ }
+ if (candidateTotalCost[&fnDecl] <= this->settings().fInlineThreshold) {
+ // We won't exceed the inline threshold by inlining this.
+ return false;
+ }
+ // Inlining this function will add too many IRNodes.
+ return true;
+ }),
+ candidates.end());
+}
+
+bool Inliner::analyze(const std::vector<std::unique_ptr<ProgramElement>>& elements,
+ std::shared_ptr<SymbolTable> symbols,
+ ProgramUsage* usage) {
+ // A threshold of zero indicates that the inliner is completely disabled, so we can just return.
+ if (this->settings().fInlineThreshold <= 0) {
+ return false;
+ }
+
+ // Enforce a limit on inlining to avoid pathological cases. (inliner/ExponentialGrowth.sksl)
+ if (fInlinedStatementCounter >= kInlinedStatementLimit) {
+ return false;
+ }
+
+ InlineCandidateList candidateList;
+ this->buildCandidateList(elements, symbols, usage, &candidateList);
+
+ // Inline the candidates where we've determined that it's safe to do so.
+ using StatementRemappingTable = SkTHashMap<std::unique_ptr<Statement>*,
+ std::unique_ptr<Statement>*>;
+ StatementRemappingTable statementRemappingTable;
+
+ bool madeChanges = false;
+ for (const InlineCandidate& candidate : candidateList.fCandidates) {
+ const FunctionCall& funcCall = (*candidate.fCandidateExpr)->as<FunctionCall>();
+
+ // Convert the function call to its inlined equivalent.
+ InlinedCall inlinedCall = this->inlineCall(funcCall, candidate.fSymbols, *usage,
+ &candidate.fEnclosingFunction->declaration());
+
+ // Stop if an error was detected during the inlining process.
+ if (!inlinedCall.fInlinedBody && !inlinedCall.fReplacementExpr) {
+ break;
+ }
+
+ // Ensure that the inlined body has a scope if it needs one.
+ this->ensureScopedBlocks(inlinedCall.fInlinedBody.get(), candidate.fParentStmt->get());
+
+ // Add references within the inlined body
+ usage->add(inlinedCall.fInlinedBody.get());
+
+ // Look up the enclosing statement; remap it if necessary.
+ std::unique_ptr<Statement>* enclosingStmt = candidate.fEnclosingStmt;
+ for (;;) {
+ std::unique_ptr<Statement>** remappedStmt = statementRemappingTable.find(enclosingStmt);
+ if (!remappedStmt) {
+ break;
+ }
+ enclosingStmt = *remappedStmt;
+ }
+
+ // Move the enclosing statement to the end of the unscoped Block containing the inlined
+ // function, then replace the enclosing statement with that Block.
+ // Before:
+ // fInlinedBody = Block{ stmt1, stmt2, stmt3 }
+ // fEnclosingStmt = stmt4
+ // After:
+ // fInlinedBody = null
+ // fEnclosingStmt = Block{ stmt1, stmt2, stmt3, stmt4 }
+ inlinedCall.fInlinedBody->children().push_back(std::move(*enclosingStmt));
+ *enclosingStmt = std::move(inlinedCall.fInlinedBody);
+
+ // Replace the candidate function call with our replacement expression.
+ usage->remove(candidate.fCandidateExpr->get());
+ usage->add(inlinedCall.fReplacementExpr.get());
+ *candidate.fCandidateExpr = std::move(inlinedCall.fReplacementExpr);
+ madeChanges = true;
+
+ // If anything else pointed at our enclosing statement, it's now pointing at a Block
+ // containing many other statements as well. Maintain a fix-up table to account for this.
+ statementRemappingTable.set(enclosingStmt,&(*enclosingStmt)->as<Block>().children().back());
+
+ // Stop inlining if we've reached our hard cap on new statements.
+ if (fInlinedStatementCounter >= kInlinedStatementLimit) {
+ break;
+ }
+
+ // Note that nothing was destroyed except for the FunctionCall. All other nodes should
+ // remain valid.
+ }
+
+ return madeChanges;
+}
+
+} // namespace SkSL
+
+#endif // SK_ENABLE_OPTIMIZE_SIZE
diff --git a/gfx/skia/skia/src/sksl/SkSLInliner.h b/gfx/skia/skia/src/sksl/SkSLInliner.h
new file mode 100644
index 0000000000..618365baf0
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLInliner.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_INLINER
+#define SKSL_INLINER
+
+#ifndef SK_ENABLE_OPTIMIZE_SIZE
+
+#include "src/core/SkTHash.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLMangler.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <memory>
+#include <vector>
+
+namespace SkSL {
+
+class FunctionCall;
+class FunctionDeclaration;
+class FunctionDefinition;
+class Position;
+class ProgramElement;
+class ProgramUsage;
+class Statement;
+class SymbolTable;
+class Variable;
+struct InlineCandidate;
+struct InlineCandidateList;
+namespace Analysis { enum class ReturnComplexity; }
+
+/**
+ * Converts a FunctionCall in the IR to a set of statements to be injected ahead of the function
+ * call, and a replacement expression. Can also detect cases where inlining isn't cleanly possible
+ * (e.g. return statements nested inside of a loop construct). The inliner isn't able to guarantee
+ * identical-to-GLSL execution order if the inlined function has visible side effects.
+ */
+class Inliner {
+public:
+ Inliner(const Context* context) : fContext(context) {}
+
+ /** Inlines any eligible functions that are found. Returns true if any changes are made. */
+ bool analyze(const std::vector<std::unique_ptr<ProgramElement>>& elements,
+ std::shared_ptr<SymbolTable> symbols,
+ ProgramUsage* usage);
+
+private:
+ using VariableRewriteMap = SkTHashMap<const Variable*, std::unique_ptr<Expression>>;
+
+ const ProgramSettings& settings() const { return fContext->fConfig->fSettings; }
+
+ void buildCandidateList(const std::vector<std::unique_ptr<ProgramElement>>& elements,
+ std::shared_ptr<SymbolTable> symbols, ProgramUsage* usage,
+ InlineCandidateList* candidateList);
+
+ std::unique_ptr<Expression> inlineExpression(Position pos,
+ VariableRewriteMap* varMap,
+ SymbolTable* symbolTableForExpression,
+ const Expression& expression);
+ std::unique_ptr<Statement> inlineStatement(Position pos,
+ VariableRewriteMap* varMap,
+ SymbolTable* symbolTableForStatement,
+ std::unique_ptr<Expression>* resultExpr,
+ Analysis::ReturnComplexity returnComplexity,
+ const Statement& statement,
+ const ProgramUsage& usage,
+ bool isBuiltinCode);
+
+ /**
+ * Searches the rewrite map for an rewritten Variable* for the passed-in one. Asserts if the
+ * rewrite map doesn't contain the variable, or contains a different type of expression.
+ */
+ static const Variable* RemapVariable(const Variable* variable,
+ const VariableRewriteMap* varMap);
+
+ using InlinabilityCache = SkTHashMap<const FunctionDeclaration*, bool>;
+ bool candidateCanBeInlined(const InlineCandidate& candidate,
+ const ProgramUsage& usage,
+ InlinabilityCache* cache);
+
+ using FunctionSizeCache = SkTHashMap<const FunctionDeclaration*, int>;
+ int getFunctionSize(const FunctionDeclaration& fnDecl, FunctionSizeCache* cache);
+
+ /**
+ * Processes the passed-in FunctionCall expression. The FunctionCall expression should be
+ * replaced with `fReplacementExpr`. If non-null, `fInlinedBody` should be inserted immediately
+ * above the statement containing the inlined expression.
+ */
+ struct InlinedCall {
+ std::unique_ptr<Block> fInlinedBody;
+ std::unique_ptr<Expression> fReplacementExpr;
+ };
+ InlinedCall inlineCall(const FunctionCall&,
+ std::shared_ptr<SymbolTable>,
+ const ProgramUsage&,
+ const FunctionDeclaration* caller);
+
+ /** Adds a scope to inlined bodies returned by `inlineCall`, if one is required. */
+ void ensureScopedBlocks(Statement* inlinedBody, Statement* parentStmt);
+
+ /** Checks whether inlining is viable for a FunctionCall, modulo recursion and function size. */
+ bool isSafeToInline(const FunctionDefinition* functionDef, const ProgramUsage& usage);
+
+ const Context* fContext = nullptr;
+ Mangler fMangler;
+ int fInlinedStatementCounter = 0;
+};
+
+} // namespace SkSL
+
+#endif // SK_ENABLE_OPTIMIZE_SIZE
+
+#endif // SKSL_INLINER
diff --git a/gfx/skia/skia/src/sksl/SkSLIntrinsicList.cpp b/gfx/skia/skia/src/sksl/SkSLIntrinsicList.cpp
new file mode 100644
index 0000000000..a582bdff60
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLIntrinsicList.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/base/SkStringView.h"
+#include "src/sksl/SkSLIntrinsicList.h"
+
+namespace SkSL {
+
+const IntrinsicMap& GetIntrinsicMap() {
+ #define SKSL_INTRINSIC(name) {#name, k_##name##_IntrinsicKind},
+ static const auto* kAllIntrinsics = new SkTHashMap<std::string_view, IntrinsicKind>{
+ SKSL_INTRINSIC_LIST
+ };
+ #undef SKSL_INTRINSIC
+
+ return *kAllIntrinsics;
+}
+
+IntrinsicKind FindIntrinsicKind(std::string_view functionName) {
+ if (skstd::starts_with(functionName, '$')) {
+ functionName.remove_prefix(1);
+ }
+
+ const IntrinsicMap& intrinsicMap = GetIntrinsicMap();
+ IntrinsicKind* kind = intrinsicMap.find(functionName);
+ return kind ? *kind : kNotIntrinsic;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/SkSLIntrinsicList.h b/gfx/skia/skia/src/sksl/SkSLIntrinsicList.h
new file mode 100644
index 0000000000..9e41f3b1aa
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLIntrinsicList.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_INTRINSIC_LIST_DEFINED
+#define SKSL_INTRINSIC_LIST_DEFINED
+
+#include "src/core/SkTHash.h"
+
+#include <cstdint>
+#include <initializer_list>
+#include <string_view>
+
+// A list of every intrinsic supported by SkSL.
+// Using an X-Macro (https://en.wikipedia.org/wiki/X_Macro) to manage the list.
+#define SKSL_INTRINSIC_LIST \
+ SKSL_INTRINSIC(abs) \
+ SKSL_INTRINSIC(acosh) \
+ SKSL_INTRINSIC(acos) \
+ SKSL_INTRINSIC(all) \
+ SKSL_INTRINSIC(any) \
+ SKSL_INTRINSIC(asinh) \
+ SKSL_INTRINSIC(asin) \
+ SKSL_INTRINSIC(atanh) \
+ SKSL_INTRINSIC(atan) \
+ SKSL_INTRINSIC(atomicAdd) \
+ SKSL_INTRINSIC(atomicLoad) \
+ SKSL_INTRINSIC(atomicStore) \
+ SKSL_INTRINSIC(bitCount) \
+ SKSL_INTRINSIC(ceil) \
+ SKSL_INTRINSIC(clamp) \
+ SKSL_INTRINSIC(cosh) \
+ SKSL_INTRINSIC(cos) \
+ SKSL_INTRINSIC(cross) \
+ SKSL_INTRINSIC(degrees) \
+ SKSL_INTRINSIC(determinant) \
+ SKSL_INTRINSIC(dFdx) \
+ SKSL_INTRINSIC(dFdy) \
+ SKSL_INTRINSIC(distance) \
+ SKSL_INTRINSIC(dot) \
+ SKSL_INTRINSIC(equal) \
+ SKSL_INTRINSIC(eval) \
+ SKSL_INTRINSIC(exp2) \
+ SKSL_INTRINSIC(exp) \
+ SKSL_INTRINSIC(faceforward) \
+ SKSL_INTRINSIC(findLSB) \
+ SKSL_INTRINSIC(findMSB) \
+ SKSL_INTRINSIC(floatBitsToInt) \
+ SKSL_INTRINSIC(floatBitsToUint) \
+ SKSL_INTRINSIC(floor) \
+ SKSL_INTRINSIC(fma) \
+ SKSL_INTRINSIC(fract) \
+ SKSL_INTRINSIC(frexp) \
+ SKSL_INTRINSIC(fromLinearSrgb) \
+ SKSL_INTRINSIC(fwidth) \
+ SKSL_INTRINSIC(greaterThanEqual) \
+ SKSL_INTRINSIC(greaterThan) \
+ SKSL_INTRINSIC(height) \
+ SKSL_INTRINSIC(intBitsToFloat) \
+ SKSL_INTRINSIC(inversesqrt) \
+ SKSL_INTRINSIC(inverse) \
+ SKSL_INTRINSIC(isinf) \
+ SKSL_INTRINSIC(isnan) \
+ SKSL_INTRINSIC(ldexp) \
+ SKSL_INTRINSIC(length) \
+ SKSL_INTRINSIC(lessThanEqual) \
+ SKSL_INTRINSIC(lessThan) \
+ SKSL_INTRINSIC(log2) \
+ SKSL_INTRINSIC(log) \
+ SKSL_INTRINSIC(makeSampler2D) \
+ SKSL_INTRINSIC(matrixCompMult) \
+ SKSL_INTRINSIC(matrixInverse) \
+ SKSL_INTRINSIC(max) \
+ SKSL_INTRINSIC(min) \
+ SKSL_INTRINSIC(mix) \
+ SKSL_INTRINSIC(modf) \
+ SKSL_INTRINSIC(mod) \
+ SKSL_INTRINSIC(normalize) \
+ SKSL_INTRINSIC(notEqual) \
+ SKSL_INTRINSIC(not ) \
+ SKSL_INTRINSIC(outerProduct) \
+ SKSL_INTRINSIC(packDouble2x32) \
+ SKSL_INTRINSIC(packHalf2x16) \
+ SKSL_INTRINSIC(packSnorm2x16) \
+ SKSL_INTRINSIC(packSnorm4x8) \
+ SKSL_INTRINSIC(packUnorm2x16) \
+ SKSL_INTRINSIC(packUnorm4x8) \
+ SKSL_INTRINSIC(pow) \
+ SKSL_INTRINSIC(radians) \
+ SKSL_INTRINSIC(read) \
+ SKSL_INTRINSIC(reflect) \
+ SKSL_INTRINSIC(refract) \
+ SKSL_INTRINSIC(roundEven) \
+ SKSL_INTRINSIC(round) \
+ SKSL_INTRINSIC(sample) \
+ SKSL_INTRINSIC(sampleGrad) \
+ SKSL_INTRINSIC(sampleLod) \
+ SKSL_INTRINSIC(saturate) \
+ SKSL_INTRINSIC(sign) \
+ SKSL_INTRINSIC(sinh) \
+ SKSL_INTRINSIC(sin) \
+ SKSL_INTRINSIC(smoothstep) \
+ SKSL_INTRINSIC(sqrt) \
+ SKSL_INTRINSIC(step) \
+ SKSL_INTRINSIC(storageBarrier) \
+ SKSL_INTRINSIC(subpassLoad) \
+ SKSL_INTRINSIC(tanh) \
+ SKSL_INTRINSIC(tan) \
+ SKSL_INTRINSIC(toLinearSrgb) \
+ SKSL_INTRINSIC(transpose) \
+ SKSL_INTRINSIC(trunc) \
+ SKSL_INTRINSIC(uintBitsToFloat) \
+ SKSL_INTRINSIC(unpackDouble2x32) \
+ SKSL_INTRINSIC(unpackHalf2x16) \
+ SKSL_INTRINSIC(unpackSnorm2x16) \
+ SKSL_INTRINSIC(unpackSnorm4x8) \
+ SKSL_INTRINSIC(unpackUnorm2x16) \
+ SKSL_INTRINSIC(unpackUnorm4x8) \
+ SKSL_INTRINSIC(width) \
+ SKSL_INTRINSIC(workgroupBarrier) \
+ SKSL_INTRINSIC(write)
+
+namespace SkSL {
+
+// The `IntrinsicKind` enum holds every intrinsic supported by SkSL.
+#define SKSL_INTRINSIC(name) k_##name##_IntrinsicKind,
+enum IntrinsicKind : int8_t {
+ kNotIntrinsic = -1,
+ SKSL_INTRINSIC_LIST
+};
+#undef SKSL_INTRINSIC
+
+// Returns a map which allows IntrinsicKind values to be looked up by name.
+using IntrinsicMap = SkTHashMap<std::string_view, IntrinsicKind>;
+const IntrinsicMap& GetIntrinsicMap();
+
+// Looks up intrinsic functions by name.
+IntrinsicKind FindIntrinsicKind(std::string_view functionName);
+
+}
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLLexer.cpp b/gfx/skia/skia/src/sksl/SkSLLexer.cpp
new file mode 100644
index 0000000000..10c1108c09
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLLexer.cpp
@@ -0,0 +1,808 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+/*****************************************************************************************
+ ******************** This file was generated by sksllex. Do not edit. *******************
+ *****************************************************************************************/
+#include "src/sksl/SkSLLexer.h"
+
+namespace SkSL {
+
+using State = uint16_t;
+static constexpr uint8_t kInvalidChar = 18;
+static constexpr int8_t kMappings[118] = {
+ 1, 2, 3, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 1, 4, 3, 5, 6, 7, 8, 3, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, 22, 22, 22, 23, 23, 24, 25, 26, 27, 28, 29, 3, 30, 30, 31, 32,
+ 33, 30, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 35, 36, 34, 37, 34, 34, 38,
+ 34, 34, 39, 3, 40, 41, 42, 3, 43, 44, 45, 46, 47, 48, 49, 50, 51, 34, 52, 53,
+ 54, 55, 56, 57, 34, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70};
+using IndexEntry = int16_t;
+struct FullEntry {
+ State data[71];
+};
+struct CompactEntry {
+ uint32_t values;
+ uint8_t data[18];
+};
+static constexpr FullEntry kFull[] = {
+ {
+ 0, 2, 3, 4, 5, 7, 9, 23, 25, 28, 29, 30, 32, 35, 36,
+ 39, 44, 50, 69, 69, 69, 69, 69, 69, 71, 72, 73, 77, 79, 83,
+ 84, 84, 84, 84, 84, 84, 84, 84, 84, 86, 87, 88, 84, 91, 104,
+ 114, 130, 150, 162, 178, 183, 191, 84, 215, 225, 232, 258, 263, 279, 291,
+ 345, 362, 378, 390, 84, 84, 84, 411, 412, 415, 416,
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 51, 0, 59, 59, 59, 59, 59, 59, 60,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 61, 0, 0, 0, 66, 67, 0, 0, 0, 0, 0, 0, 0, 0, 61,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 66, 0, 0, 67, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 51, 0, 59, 59, 59, 59, 59, 59, 60,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 61, 0, 0, 0, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 61,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 51, 0, 60, 60, 60, 60, 60, 60, 60,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 61, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 61,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 51, 0, 70, 70, 70, 70, 70, 70, 70,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 61, 0, 0, 0, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 61,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 85,
+ 85, 85, 85, 85, 85, 85, 0, 0, 0, 0, 0, 0, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 0, 0, 0, 85, 115, 85, 85, 85, 85, 85, 85, 85, 85, 85, 118,
+ 85, 85, 121, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 0, 0, 0, 0,
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 85,
+ 85, 85, 85, 85, 85, 85, 0, 0, 0, 0, 0, 0, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 0, 0, 0, 85, 85, 85, 85, 85, 131, 85, 85, 85, 137, 85, 85,
+ 85, 85, 143, 85, 85, 85, 85, 85, 147, 85, 85, 85, 85, 0, 0, 0, 0,
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 85,
+ 85, 85, 85, 85, 85, 85, 0, 0, 0, 0, 0, 0, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 0, 0, 0, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 151,
+ 85, 154, 85, 85, 85, 85, 85, 85, 85, 85, 156, 85, 85, 0, 0, 0, 0,
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 85,
+ 85, 85, 85, 85, 85, 85, 0, 0, 0, 0, 0, 0, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 0, 0, 0, 85, 163, 85, 85, 85, 85, 85, 85, 85, 167, 85, 170,
+ 85, 85, 173, 85, 85, 85, 85, 85, 175, 85, 85, 85, 85, 0, 0, 0, 0,
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 85,
+ 85, 85, 85, 85, 85, 85, 0, 0, 0, 0, 0, 0, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 0, 0, 0, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 194,
+ 85, 85, 198, 201, 85, 85, 203, 85, 209, 85, 85, 85, 85, 0, 0, 0, 0,
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 85,
+ 85, 85, 85, 85, 85, 85, 0, 0, 0, 0, 0, 0, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 0, 0, 0, 85, 264, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85, 268, 85, 85, 275, 85, 85, 85, 85, 85, 0, 0, 0, 0,
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 85,
+ 85, 85, 85, 85, 85, 85, 0, 0, 0, 0, 0, 0, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 0, 0, 0, 85, 292, 85, 85, 85, 85, 85, 85, 85, 324, 85, 85,
+ 85, 85, 85, 85, 85, 85, 328, 336, 85, 340, 85, 85, 85, 0, 0, 0, 0,
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 85,
+ 298, 305, 316, 85, 85, 85, 0, 0, 0, 0, 0, 0, 85, 321, 85, 85, 85, 85,
+ 85, 85, 85, 0, 0, 0, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 0, 0, 0, 0,
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 85,
+ 85, 85, 85, 85, 85, 85, 0, 0, 0, 0, 0, 0, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 0, 0, 0, 85, 85, 85, 85, 85, 346, 85, 85, 352, 85, 85, 85,
+ 85, 85, 85, 85, 354, 85, 85, 85, 85, 85, 85, 357, 85, 0, 0, 0, 0,
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 85,
+ 85, 85, 85, 85, 85, 85, 0, 0, 0, 0, 0, 0, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 0, 0, 0, 85, 85, 85, 85, 85, 85, 85, 85, 391, 85, 85, 85,
+ 85, 85, 395, 85, 403, 85, 85, 85, 85, 85, 85, 85, 85, 0, 0, 0, 0,
+ },
+};
+static constexpr CompactEntry kCompact[] = {
+ {0,
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {3,
+ {195, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {6,
+ {255, 255, 255, 255, 255, 255, 63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {8, {255, 255, 255, 255, 255, 255, 255, 15, 0, 192, 15, 0, 0, 0, 0, 0, 192, 63}},
+ {8, {255, 255, 255, 255, 3, 0, 255, 15, 0, 192, 15, 0, 0, 0, 0, 0, 192, 63}},
+ {19 | (11 << 9) | (10 << 18),
+ {255, 255, 255, 255, 171, 170, 255, 175, 170, 234, 175, 106, 170, 170, 162, 170, 234, 63}},
+ {10, {255, 255, 255, 255, 3, 0, 255, 15, 0, 192, 15, 0, 0, 0, 0, 0, 192, 63}},
+ {14 | (12 << 9) | (10 << 18),
+ {255, 255, 255, 255, 171, 170, 255, 175, 170, 234, 175, 170, 170, 170, 106, 170, 232, 63}},
+ {13 | (10 << 9),
+ {255, 255, 255, 255, 87, 84, 255, 95, 85, 213, 95, 85, 85, 85, 85, 85, 213, 63}},
+ {15 | (10 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 81, 85, 213, 63}},
+ {16 | (10 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 84, 85, 213, 63}},
+ {17 | (10 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 69, 85, 213, 63}},
+ {18 | (10 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 84, 213, 63}},
+ {20 | (10 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 81, 213, 63}},
+ {21 | (10 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 69, 85, 213, 63}},
+ {22 | (10 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {24,
+ {255, 255, 255, 255, 255, 255, 63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {27 | (26 << 9),
+ {255, 255, 253, 255, 255, 255, 63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {31,
+ {255, 255, 255, 255, 255, 255, 63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {34 | (33 << 9),
+ {255, 255, 255, 253, 255, 255, 63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {38 | (37 << 9),
+ {255, 255, 255, 223, 255, 255, 63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {40, {255, 255, 255, 255, 3, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {41 | (40 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 255, 243, 255, 255, 63, 255, 255, 255, 255, 255, 63}},
+ {43 | (42 << 9),
+ {255, 255, 255, 221, 3, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {43, {255, 255, 255, 255, 3, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {49 | (48 << 9) | (45 << 18),
+ {255, 255, 191, 255, 253, 255, 63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {46 | (45 << 9), {87, 85, 21, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 21}},
+ {47 | (45 << 9), {87, 85, 85, 85, 84, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 21}},
+ {48, {51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+ {56 | (52 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 255, 243, 255, 255, 63, 255, 255, 255, 255, 255, 63}},
+ {53 | (52 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 255, 243, 255, 255, 63, 255, 255, 255, 255, 255, 63}},
+ {55 | (54 << 9),
+ {255, 255, 255, 221, 3, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {55, {255, 255, 255, 255, 3, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {58 | (57 << 9),
+ {255, 255, 255, 221, 3, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {58, {255, 255, 255, 255, 3, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {63 | (62 << 9),
+ {255, 255, 255, 221, 3, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {63, {255, 255, 255, 255, 3, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {68, {255, 255, 255, 255, 3, 0, 255, 15, 240, 255, 63, 0, 252, 255, 255, 255, 255, 63}},
+ {68 | (66 << 9),
+ {255, 255, 255, 255, 3, 0, 255, 15, 240, 247, 63, 0, 252, 255, 255, 247, 255, 63}},
+ {76 | (74 << 9),
+ {255, 255, 255, 255, 255, 255, 31, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {75,
+ {255, 255, 255, 255, 255, 255, 63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {78,
+ {255, 255, 255, 255, 255, 255, 63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {81 | (80 << 9),
+ {255, 255, 255, 255, 255, 255, 127, 252, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {82,
+ {255, 255, 255, 255, 255, 255, 63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63}},
+ {85, {255, 255, 255, 255, 3, 0, 255, 15, 0, 192, 15, 0, 0, 0, 0, 0, 192, 63}},
+ {90 | (89 << 9),
+ {255, 255, 255, 255, 255, 255, 127, 255, 255, 255, 243, 255, 255, 255, 255, 255, 255, 63}},
+ {94 | (92 << 9) | (85 << 18),
+ {255, 255, 255, 255, 171, 170, 255, 175, 170, 234, 175, 170, 170, 170, 106, 168, 234, 63}},
+ {93 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 69, 85, 85, 213, 63}},
+ {98 | (95 << 9) | (85 << 18),
+ {255, 255, 255, 255, 171, 170, 255, 175, 170, 234, 175, 170, 170, 170, 169, 168, 234, 63}},
+ {96 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 69, 85, 85, 213, 63}},
+ {97 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 21, 85, 85, 85, 213, 63}},
+ {93 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 81, 85, 85, 85, 85, 213, 63}},
+ {99 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 69, 85, 213, 63}},
+ {100 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 21, 85, 85, 85, 213, 63}},
+ {101 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 84, 85, 85, 85, 85, 213, 63}},
+ {102 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 81, 213, 63}},
+ {103 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 84, 213, 63}},
+ {93 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {109 | (105 << 9) | (85 << 18),
+ {255, 255, 255, 255, 171, 170, 255, 175, 170, 234, 175, 170, 170, 170, 154, 162, 234, 63}},
+ {106 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {107 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 31, 85, 85, 85, 85, 85, 213, 63}},
+ {108 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 84, 85, 85, 213, 63}},
+ {110 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 84, 85, 85, 85, 213, 63}},
+ {111 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 84, 85, 85, 85, 213, 63}},
+ {112 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {113 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 69, 85, 213, 63}},
+ {116 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 21, 85, 213, 63}},
+ {117 | (93 << 9) | (85 << 18),
+ {255, 255, 255, 255, 171, 170, 255, 175, 170, 234, 175, 42, 170, 170, 170, 169, 234, 63}},
+ {119 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 31, 85, 85, 85, 85, 85, 213, 63}},
+ {120 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 21, 85, 213, 63}},
+ {93 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 21, 85, 213, 63}},
+ {122 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 21, 85, 85, 213, 63}},
+ {125 | (123 << 9) | (85 << 18),
+ {255, 255, 255, 255, 171, 170, 255, 175, 170, 234, 175, 170, 170, 170, 106, 168, 234, 63}},
+ {124 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 84, 213, 63}},
+ {126 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 21, 85, 85, 85, 213, 63}},
+ {127 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 21, 85, 85, 213, 63}},
+ {128 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 81, 213, 63}},
+ {129 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {132 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 84, 85, 85, 85, 213, 63}},
+ {133 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 31, 85, 85, 85, 85, 85, 213, 63}},
+ {134 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 81, 213, 63}},
+ {135 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 81, 85, 85, 213, 63}},
+ {136 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 84, 213, 63}},
+ {138 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 21, 85, 213, 63}},
+ {139 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 81, 85, 85, 85, 85, 213, 63}},
+ {140 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 31, 85, 85, 85, 85, 85, 213, 63}},
+ {141 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 69, 85, 213, 63}},
+ {142 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 69, 85, 85, 85, 85, 213, 63}},
+ {144 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 81, 213, 63}},
+ {145 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 84, 85, 85, 85, 85, 213, 63}},
+ {146 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 81, 85, 85, 213, 63}},
+ {148 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {149 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 81, 85, 85, 85, 85, 213, 63}},
+ {93 | (85 << 9),
+ {255, 255, 255, 255, 23, 80, 255, 95, 85, 213, 95, 85, 85, 85, 85, 85, 213, 63}},
+ {152 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 21, 85, 213, 63}},
+ {153 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {155 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 81, 213, 63}},
+ {157 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 84, 213, 63}},
+ {158 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {159 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 69, 85, 213, 63}},
+ {160 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 21, 85, 85, 213, 63}},
+ {161 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 31, 85, 85, 85, 85, 85, 213, 63}},
+ {93 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 81, 85, 85, 213, 63}},
+ {164 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 81, 85, 85, 213, 63}},
+ {165 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 21, 85, 213, 63}},
+ {166 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {168 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 85, 212, 63}},
+ {169 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {93 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 69, 85, 85, 85, 85, 213, 63}},
+ {171 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 31, 85, 85, 85, 85, 85, 213, 63}},
+ {172 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 84, 213, 63}},
+ {174 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 69, 85, 213, 63}},
+ {176 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {177 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 81, 85, 85, 85, 85, 213, 63}},
+ {181 | (179 << 9) | (85 << 18),
+ {255, 255, 255, 255, 171, 170, 255, 175, 170, 234, 175, 170, 170, 166, 168, 170, 234, 63}},
+ {180 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 79, 85, 85, 85, 85, 85, 213, 63}},
+ {180, {255, 255, 255, 255, 3, 0, 255, 15, 0, 192, 15, 0, 0, 0, 0, 0, 192, 63}},
+ {182 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 84, 213, 63}},
+ {93 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 84, 85, 213, 63}},
+ {188 | (184 << 9) | (85 << 18),
+ {255, 255, 255, 255, 171, 170, 255, 175, 170, 234, 175, 170, 106, 170, 170, 138, 234, 63}},
+ {185 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 81, 85, 85, 85, 213, 63}},
+ {186 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 69, 85, 85, 85, 213, 63}},
+ {187 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 81, 85, 213, 63}},
+ {189 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {190 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 81, 85, 85, 85, 85, 213, 63}},
+ {193 | (192 << 9) | (85 << 18),
+ {255, 255, 255, 255, 171, 170, 255, 175, 170, 234, 175, 170, 169, 42, 170, 170, 234, 63}},
+ {195 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 21, 85, 85, 85, 213, 63}},
+ {196 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 21, 85, 85, 213, 63}},
+ {197 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {199 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 81, 213, 63}},
+ {200 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 84, 213, 63}},
+ {202 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 81, 213, 63}},
+ {93 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 84, 213, 63}},
+ {204 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {205 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 69, 85, 213, 63}},
+ {206 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 84, 85, 85, 85, 213, 63}},
+ {207 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 31, 85, 85, 85, 85, 85, 213, 63}},
+ {208 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 81, 85, 85, 85, 85, 213, 63}},
+ {210 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 31, 85, 85, 85, 85, 85, 213, 63}},
+ {211 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 69, 85, 213, 63}},
+ {212 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 21, 85, 85, 85, 213, 63}},
+ {213 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 31, 85, 85, 85, 85, 85, 213, 63}},
+ {214 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 21, 85, 85, 213, 63}},
+ {221 | (216 << 9) | (85 << 18),
+ {255, 255, 255, 255, 171, 170, 255, 175, 170, 234, 111, 170, 170, 170, 168, 170, 234, 63}},
+ {217 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 85, 209, 63}},
+ {218 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 84, 85, 213, 63}},
+ {219 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 81, 213, 63}},
+ {220 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 84, 213, 63}},
+ {223 | (222 << 9) | (85 << 18),
+ {255, 255, 255, 255, 171, 170, 255, 175, 170, 234, 175, 170, 170, 106, 170, 42, 234, 63}},
+ {93 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 81, 85, 85, 85, 213, 63}},
+ {224 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 81, 85, 213, 63}},
+ {226 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {227 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 69, 85, 85, 85, 85, 213, 63}},
+ {228 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 21, 85, 85, 85, 213, 63}},
+ {229 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 81, 213, 63}},
+ {230 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 69, 85, 85, 213, 63}},
+ {231 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 81, 85, 213, 63}},
+ {240 | (233 << 9) | (85 << 18),
+ {255, 255, 255, 255, 171, 170, 255, 175, 170, 234, 111, 170, 170, 170, 168, 170, 234, 63}},
+ {234 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 69, 85, 85, 213, 63}},
+ {235 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {236 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 21, 85, 213, 63}},
+ {237 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 81, 85, 213, 63}},
+ {238 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 31, 85, 85, 85, 85, 85, 213, 63}},
+ {239 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 81, 85, 85, 85, 85, 213, 63}},
+ {247 | (241 << 9) | (85 << 18),
+ {255, 255, 255, 255, 171, 170, 255, 175, 170, 234, 175, 170, 106, 170, 162, 170, 234, 63}},
+ {242 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 21, 85, 85, 213, 63}},
+ {243 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 81, 85, 85, 213, 63}},
+ {244 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 21, 85, 85, 85, 213, 63}},
+ {245 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 21, 85, 85, 213, 63}},
+ {246 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {248 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {249 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 69, 85, 213, 63}},
+ {250 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 21, 85, 213, 63}},
+ {251 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 81, 85, 213, 63}},
+ {252 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {253 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 81, 85, 85, 85, 85, 213, 63}},
+ {254 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 84, 213, 63}},
+ {255 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 21, 85, 85, 85, 213, 63}},
+ {256 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 69, 213, 63}},
+ {257 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {259 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 81, 213, 63}},
+ {260 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 84, 213, 63}},
+ {261 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 81, 85, 213, 63}},
+ {262 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 81, 213, 63}},
+ {265 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 81, 85, 85, 85, 85, 213, 63}},
+ {266 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 84, 85, 85, 213, 63}},
+ {267 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {269 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {270 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 81, 85, 85, 85, 85, 213, 63}},
+ {271 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 21, 85, 85, 85, 213, 63}},
+ {272 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 21, 85, 213, 63}},
+ {273 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 21, 85, 85, 85, 213, 63}},
+ {274 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 84, 85, 213, 63}},
+ {93 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 21, 85, 85, 213, 63}},
+ {276 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 84, 85, 85, 85, 85, 213, 63}},
+ {277 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 81, 85, 85, 213, 63}},
+ {278 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 21, 85, 85, 85, 213, 63}},
+ {280 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {287 | (281 << 9) | (85 << 18),
+ {255, 255, 255, 255, 171, 170, 255, 175, 170, 234, 111, 170, 170, 170, 170, 168, 234, 63}},
+ {282 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 69, 85, 85, 85, 85, 213, 63}},
+ {283 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 84, 85, 213, 63}},
+ {284 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 21, 85, 85, 213, 63}},
+ {285 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 81, 85, 85, 213, 63}},
+ {286 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 85, 209, 63}},
+ {288 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 81, 213, 63}},
+ {289 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 69, 85, 213, 63}},
+ {290 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 21, 85, 85, 213, 63}},
+ {293 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 69, 85, 85, 213, 63}},
+ {294 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 81, 85, 213, 63}},
+ {295 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 81, 85, 85, 213, 63}},
+ {296 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {297 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 69, 85, 213, 63}},
+ {299 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 84, 213, 95, 85, 85, 85, 85, 85, 213, 63}},
+ {300 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 212, 95, 85, 85, 85, 85, 85, 213, 63}},
+ {301 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 69, 85, 85, 85, 213, 63}},
+ {302 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 31, 85, 85, 85, 85, 85, 213, 63}},
+ {303 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 69, 85, 85, 85, 85, 213, 63}},
+ {304 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 84, 85, 213, 63}},
+ {93 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 21, 213, 63}},
+ {306 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 84, 213, 95, 85, 85, 85, 85, 85, 213, 63}},
+ {307 | (300 << 9) | (85 << 18),
+ {255, 255, 255, 255, 171, 170, 255, 175, 42, 233, 175, 170, 170, 170, 170, 170, 234, 63}},
+ {308 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {309 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 81, 85, 85, 85, 85, 213, 63}},
+ {310 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 84, 213, 63}},
+ {311 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 212, 95, 85, 85, 85, 85, 85, 213, 63}},
+ {312 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 69, 85, 85, 85, 213, 63}},
+ {313 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 31, 85, 85, 85, 85, 85, 213, 63}},
+ {314 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 69, 85, 85, 85, 85, 213, 63}},
+ {315 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 84, 85, 213, 63}},
+ {317 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 84, 213, 95, 85, 85, 85, 85, 85, 213, 63}},
+ {318 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 21, 213, 95, 85, 85, 85, 85, 85, 213, 63}},
+ {319 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {320 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 81, 85, 85, 85, 85, 213, 63}},
+ {322 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 81, 213, 63}},
+ {323 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 84, 85, 85, 85, 85, 213, 63}},
+ {325 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 85, 197, 63}},
+ {326 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {327 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 84, 85, 213, 63}},
+ {93 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 84, 85, 85, 85, 213, 63}},
+ {332 | (329 << 9) | (85 << 18),
+ {255, 255, 255, 255, 171, 170, 255, 175, 170, 234, 111, 170, 170, 170, 138, 170, 234, 63}},
+ {330 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 84, 213, 63}},
+ {331 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 21, 85, 85, 85, 213, 63}},
+ {333 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 81, 213, 63}},
+ {334 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 81, 85, 85, 85, 85, 213, 63}},
+ {335 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 84, 213, 63}},
+ {337 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 81, 85, 213, 63}},
+ {338 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {339 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 69, 85, 213, 63}},
+ {93 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 81, 85, 213, 63}},
+ {341 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 21, 85, 85, 85, 213, 63}},
+ {342 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 84, 213, 63}},
+ {343 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 81, 85, 85, 85, 85, 213, 63}},
+ {344 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 69, 85, 85, 85, 213, 63}},
+ {347 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 69, 85, 85, 213, 63}},
+ {348 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 81, 85, 213, 63}},
+ {349 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 81, 85, 85, 213, 63}},
+ {350 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 31, 85, 85, 85, 85, 85, 213, 63}},
+ {351 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 84, 213, 63}},
+ {353 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 21, 85, 85, 85, 213, 63}},
+ {355 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 81, 213, 63}},
+ {356 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {358 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 81, 85, 213, 63}},
+ {359 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {360 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 69, 85, 85, 85, 85, 213, 63}},
+ {361 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {375 | (363 << 9) | (85 << 18),
+ {255, 255, 255, 255, 171, 170, 255, 175, 170, 234, 175, 170, 170, 106, 42, 170, 234, 63}},
+ {370 | (364 << 9) | (85 << 18),
+ {255, 255, 255, 255, 171, 170, 255, 175, 170, 234, 175, 170, 106, 170, 42, 170, 234, 63}},
+ {369 | (365 << 9) | (85 << 18),
+ {255, 255, 255, 255, 171, 170, 255, 175, 170, 234, 175, 170, 169, 170, 168, 170, 234, 63}},
+ {366 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 84, 85, 213, 63}},
+ {367 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 69, 85, 213, 63}},
+ {368 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 69, 85, 85, 213, 63}},
+ {371 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 21, 85, 85, 85, 213, 63}},
+ {372 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 81, 85, 85, 85, 213, 63}},
+ {373 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 21, 85, 85, 213, 63}},
+ {374 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {376 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 21, 85, 85, 85, 213, 63}},
+ {377 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 21, 85, 85, 213, 63}},
+ {384 | (379 << 9) | (85 << 18),
+ {255, 255, 255, 255, 171, 170, 255, 175, 170, 234, 111, 170, 170, 170, 168, 170, 234, 63}},
+ {380 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 69, 85, 213, 63}},
+ {381 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 85, 209, 63}},
+ {382 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 21, 85, 85, 85, 213, 63}},
+ {383 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 21, 85, 85, 213, 63}},
+ {385 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 81, 85, 85, 213, 63}},
+ {386 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 31, 85, 85, 85, 85, 85, 213, 63}},
+ {387 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 84, 213, 63}},
+ {388 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 21, 85, 85, 85, 213, 63}},
+ {389 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 81, 85, 85, 213, 63}},
+ {392 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 21, 85, 85, 85, 213, 63}},
+ {393 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 81, 85, 85, 213, 63}},
+ {394 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {396 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 69, 85, 213, 63}},
+ {397 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 84, 85, 85, 213, 63}},
+ {398 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 81, 85, 85, 85, 213, 63}},
+ {399 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 69, 85, 213, 63}},
+ {400 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 84, 85, 213, 63}},
+ {401 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 81, 213, 63}},
+ {402 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 81, 85, 213, 63}},
+ {404 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 21, 85, 85, 85, 213, 63}},
+ {405 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 84, 213, 63}},
+ {406 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 21, 85, 85, 85, 85, 213, 63}},
+ {407 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 84, 85, 213, 63}},
+ {408 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 21, 85, 85, 213, 63}},
+ {409 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 81, 85, 85, 213, 63}},
+ {410 | (85 << 9),
+ {255, 255, 255, 255, 87, 85, 255, 95, 85, 213, 95, 85, 85, 85, 85, 85, 209, 63}},
+ {414 | (413 << 9),
+ {255, 255, 255, 255, 255, 255, 127, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 60}},
+};
+static constexpr IndexEntry kIndices[] = {
+ 0, -1, 1, 1, 0, 2, 0, 3, 4, 5, 6, 7, 8, 6, 9, 10, 11, 12,
+ 6, 13, 14, 15, 6, 16, 0, 17, 0, 0, 0, 0, 18, 0, 19, 0, 0, 0,
+ 20, 0, 0, 21, 22, 23, 24, 24, 25, 26, 27, 0, 28, 0, -2, 29, 30, 31,
+ 32, 32, 33, 34, 34, -3, -4, 35, 36, 36, 0, 0, 0, 37, 38, -5, -5, 0,
+ 0, 39, 40, 0, 0, 41, 0, 42, 0, 43, 0, 0, 44, 44, 0, 0, 45, 0,
+ 0, 46, 47, 44, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 44, 62, 63, 64, 65, 44, -6, 66, 67, 44, 68, 69, 70, 71, 72, 73, 44, 74,
+ 75, 76, 77, 44, -7, 78, 79, 80, 81, 82, 44, 83, 84, 85, 86, 87, 44, 88,
+ 89, 90, 57, 91, 92, 93, -8, 94, 95, 44, 96, 47, 97, 98, 99, 100, 101, 102,
+ -9, 103, 104, 105, 44, 106, 107, 108, 109, 110, 44, 111, 44, 112, 113, 93, 114, 115,
+ 116, 117, 118, 119, 120, 121, 122, 44, 123, 124, 93, 125, 44, -10, 126, 127, 128, 44,
+ 129, 130, 44, 131, 132, 133, 134, 135, 136, 137, 57, 138, 139, 140, 141, 142, 132, 143,
+ 144, 145, 146, 147, 44, 148, 149, 150, 44, 151, 152, 153, 154, 155, 156, 44, 157, 158,
+ 159, 160, 161, 162, 163, 57, 164, 165, 166, 167, 168, 169, 44, 170, 171, 172, 173, 174,
+ 175, 176, 177, 178, 179, 44, 180, 181, 182, 183, 132, -11, 184, 185, 186, 108, 187, 188,
+ 189, 190, 191, 192, 193, 194, 195, 196, 51, 197, 198, 199, 200, 201, 202, 203, 44, 204,
+ 205, 206, 44, -12, 207, 208, 209, 210, 211, -13, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 218, 229, 230, 231, 232, 132, 233, 234, 57,
+ 235, 236, 237, 238, 239, 240, 241, 51, 242, 243, 244, 44, 245, 246, 247, 248, 249, 250,
+ 251, 252, 44, -14, 253, 254, 255, 256, 257, 57, 258, 70, 259, 260, 44, 261, 262, 263,
+ 264, 238, 265, 266, 267, 268, 269, 270, 44, 193, 271, 272, 273, 274, 108, 275, 276, 149,
+ 277, 278, 279, 280, 281, 149, 282, 283, 284, 285, 286, 57, -15, 287, 288, 289, 44, 290,
+ 291, 292, 293, 294, 295, 296, 44, 297, 298, 299, 300, 301, 302, 303, 44, 0, 304, 0,
+ 0, 0, 0,
+};
+State get_transition(int transition, int state) {
+ IndexEntry index = kIndices[state];
+ if (index < 0) {
+ return kFull[~index].data[transition];
+ }
+ const CompactEntry& entry = kCompact[index];
+ int v = entry.data[transition >> 2];
+ v >>= 2 * (transition & 3);
+ v &= 3;
+ v *= 9;
+ return (entry.values >> v) & 511;
+}
+static const int8_t kAccepts[417] = {
+ -1, -1, 88, 88, 91, 67, 72, 91, 42, 40, 40, 40, 40, 36, 40, 40, 40, 40, 37, 40, 40, 40,
+ 27, 57, 81, 62, 66, 86, 43, 44, 55, 79, 53, 51, 77, 50, 54, 52, 78, 49, 1, -1, -1, 1,
+ 56, -1, -1, 90, 89, 80, 2, 1, 1, -1, -1, 1, -1, -1, 1, 2, 3, -1, -1, 1, 3, 2,
+ 2, -1, 2, 2, 2, 69, 87, 74, 58, 82, 76, 70, 71, 73, 75, 59, 83, 68, 41, 41, 47, 48,
+ 61, 85, 65, 41, 41, 39, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 14, 41,
+ 41, 41, 41, 30, 41, 41, 41, 12, 41, 41, 41, 41, 41, 41, 22, 41, 41, 41, 41, 15, 41, 41,
+ 41, 41, 41, 41, 13, 41, 41, 41, 41, 41, 16, 10, 41, 41, 41, 41, 41, 41, 41, 41, 41, 7,
+ 41, 41, 41, 41, 41, 41, 39, 41, 41, 41, 41, 41, 5, 41, 41, 41, 41, 41, 23, 41, 8, 41,
+ 41, 41, 41, 41, 39, 41, 41, 41, 41, 41, 41, 33, 41, 41, 41, 41, 6, 18, 41, 41, 41, 25,
+ 41, 41, 20, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+ 32, 41, 41, 41, 35, 41, 41, 41, 41, 41, 41, 34, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+ 41, 41, 41, 41, 26, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 24, 41, 41, 19, 41, 41, 41,
+ 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+ 28, 41, 41, 41, 17, 41, 41, 41, 41, 41, 41, 41, 41, 39, 41, 41, 41, 41, 41, 41, 41, 41,
+ 41, 41, 41, 41, 41, 41, 41, 41, 41, 39, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+ 41, 41, 41, 41, 41, 31, 41, 41, 41, 41, 41, 41, 41, 41, 11, 41, 41, 41, 41, 41, 41, 41,
+ 41, 41, 41, 41, 4, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 21, 41, 41, 41, 41, 41,
+ 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 9, 41,
+ 41, 41, 41, 41, 41, 41, 38, 41, 41, 41, 41, 41, 41, 41, 29, 45, 60, 84, 64, 46, 63,
+};
+
+Token Lexer::next() {
+ // note that we cheat here: normally a lexer needs to worry about the case
+ // where a token has a prefix which is not itself a valid token - for instance,
+ // maybe we have a valid token 'while', but 'w', 'wh', etc. are not valid
+ // tokens. Our grammar doesn't have this property, so we can simplify the logic
+ // a bit.
+ int32_t startOffset = fOffset;
+ State state = 1;
+ for (;;) {
+ if (fOffset >= (int32_t)fText.length()) {
+ if (startOffset == (int32_t)fText.length() || kAccepts[state] == -1) {
+ return Token(Token::Kind::TK_END_OF_FILE, startOffset, 0);
+ }
+ break;
+ }
+ uint8_t c = (uint8_t)(fText[fOffset] - 9);
+ if (c >= 118) {
+ c = kInvalidChar;
+ }
+ State newState = get_transition(kMappings[c], state);
+ if (!newState) {
+ break;
+ }
+ state = newState;
+ ++fOffset;
+ }
+ Token::Kind kind = (Token::Kind)kAccepts[state];
+ return Token(kind, startOffset, fOffset - startOffset);
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/SkSLLexer.h b/gfx/skia/skia/src/sksl/SkSLLexer.h
new file mode 100644
index 0000000000..1cd81a66b7
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLLexer.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+/*****************************************************************************************
+ ******************** This file was generated by sksllex. Do not edit. *******************
+ *****************************************************************************************/
+#ifndef SKSL_Lexer
+#define SKSL_Lexer
+#include <cstdint>
+#include <string_view>
+namespace SkSL {
+
+struct Token {
+ enum class Kind {
+ TK_END_OF_FILE,
+ TK_FLOAT_LITERAL,
+ TK_INT_LITERAL,
+ TK_BAD_OCTAL,
+ TK_TRUE_LITERAL,
+ TK_FALSE_LITERAL,
+ TK_IF,
+ TK_ELSE,
+ TK_FOR,
+ TK_WHILE,
+ TK_DO,
+ TK_SWITCH,
+ TK_CASE,
+ TK_DEFAULT,
+ TK_BREAK,
+ TK_CONTINUE,
+ TK_DISCARD,
+ TK_RETURN,
+ TK_IN,
+ TK_OUT,
+ TK_INOUT,
+ TK_UNIFORM,
+ TK_CONST,
+ TK_FLAT,
+ TK_NOPERSPECTIVE,
+ TK_INLINE,
+ TK_NOINLINE,
+ TK_PURE,
+ TK_READONLY,
+ TK_WRITEONLY,
+ TK_BUFFER,
+ TK_STRUCT,
+ TK_LAYOUT,
+ TK_HIGHP,
+ TK_MEDIUMP,
+ TK_LOWP,
+ TK_ES3,
+ TK_EXPORT,
+ TK_WORKGROUP,
+ TK_RESERVED,
+ TK_PRIVATE_IDENTIFIER,
+ TK_IDENTIFIER,
+ TK_DIRECTIVE,
+ TK_LPAREN,
+ TK_RPAREN,
+ TK_LBRACE,
+ TK_RBRACE,
+ TK_LBRACKET,
+ TK_RBRACKET,
+ TK_DOT,
+ TK_COMMA,
+ TK_PLUSPLUS,
+ TK_MINUSMINUS,
+ TK_PLUS,
+ TK_MINUS,
+ TK_STAR,
+ TK_SLASH,
+ TK_PERCENT,
+ TK_SHL,
+ TK_SHR,
+ TK_BITWISEOR,
+ TK_BITWISEXOR,
+ TK_BITWISEAND,
+ TK_BITWISENOT,
+ TK_LOGICALOR,
+ TK_LOGICALXOR,
+ TK_LOGICALAND,
+ TK_LOGICALNOT,
+ TK_QUESTION,
+ TK_COLON,
+ TK_EQ,
+ TK_EQEQ,
+ TK_NEQ,
+ TK_GT,
+ TK_LT,
+ TK_GTEQ,
+ TK_LTEQ,
+ TK_PLUSEQ,
+ TK_MINUSEQ,
+ TK_STAREQ,
+ TK_SLASHEQ,
+ TK_PERCENTEQ,
+ TK_SHLEQ,
+ TK_SHREQ,
+ TK_BITWISEOREQ,
+ TK_BITWISEXOREQ,
+ TK_BITWISEANDEQ,
+ TK_SEMICOLON,
+ TK_WHITESPACE,
+ TK_LINE_COMMENT,
+ TK_BLOCK_COMMENT,
+ TK_INVALID,
+ TK_NONE,
+ };
+
+ Token() {}
+ Token(Kind kind, int32_t offset, int32_t length)
+ : fKind(kind), fOffset(offset), fLength(length) {}
+
+ Kind fKind = Kind::TK_NONE;
+ int32_t fOffset = -1;
+ int32_t fLength = -1;
+};
+
+class Lexer {
+public:
+ void start(std::string_view text) {
+ fText = text;
+ fOffset = 0;
+ }
+
+ Token next();
+
+ struct Checkpoint {
+ int32_t fOffset;
+ };
+
+ Checkpoint getCheckpoint() const { return {fOffset}; }
+
+ void rewindToCheckpoint(Checkpoint checkpoint) { fOffset = checkpoint.fOffset; }
+
+private:
+ std::string_view fText;
+ int32_t fOffset;
+};
+
+} // namespace SkSL
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLMangler.cpp b/gfx/skia/skia/src/sksl/SkSLMangler.cpp
new file mode 100644
index 0000000000..650837d7aa
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLMangler.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLMangler.h"
+
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "src/base/SkStringView.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+
+#include <algorithm>
+#include <cstring>
+#include <ctype.h>
+
+namespace SkSL {
+
+std::string Mangler::uniqueName(std::string_view baseName, SymbolTable* symbolTable) {
+ SkASSERT(symbolTable);
+
+ // Private names might begin with a $. Strip that off.
+ if (skstd::starts_with(baseName, '$')) {
+ baseName.remove_prefix(1);
+ }
+
+ // The inliner runs more than once, so the base name might already have been mangled and have a
+ // prefix like "_123_x". Let's strip that prefix off to make the generated code easier to read.
+ if (skstd::starts_with(baseName, '_')) {
+ // Determine if we have a string of digits.
+ int offset = 1;
+ while (isdigit(baseName[offset])) {
+ ++offset;
+ }
+ // If we found digits, another underscore, and anything else, that's the mangler prefix.
+ // Strip it off.
+ if (offset > 1 && baseName[offset] == '_' && baseName[offset + 1] != '\0') {
+ baseName.remove_prefix(offset + 1);
+ } else {
+ // This name doesn't contain a mangler prefix, but it does start with an underscore.
+ // OpenGL disallows two consecutive underscores anywhere in the string, and we'll be
+ // adding one as part of the mangler prefix, so strip the leading underscore.
+ baseName.remove_prefix(1);
+ }
+ }
+
+ // Append a unique numeric prefix to avoid name overlap. Check the symbol table to make sure
+ // we're not reusing an existing name. (Note that within a single compilation pass, this check
+ // isn't fully comprehensive, as code isn't always generated in top-to-bottom order.)
+
+ // This code is a performance hotspot. Assemble the string manually to save a few cycles.
+ char uniqueName[256];
+ uniqueName[0] = '_';
+ char* uniqueNameEnd = uniqueName + std::size(uniqueName);
+ for (;;) {
+ // _123
+ char* endPtr = SkStrAppendS32(uniqueName + 1, fCounter++);
+
+ // _123_
+ *endPtr++ = '_';
+
+ // _123_baseNameTruncatedToFit (no null terminator, because string_view doesn't require one)
+ int baseNameCopyLength = std::min<int>(baseName.size(), uniqueNameEnd - endPtr);
+ memcpy(endPtr, baseName.data(), baseNameCopyLength);
+ endPtr += baseNameCopyLength;
+
+ std::string_view uniqueNameView(uniqueName, endPtr - uniqueName);
+ if (symbolTable->find(uniqueNameView) == nullptr) {
+ return std::string(uniqueNameView);
+ }
+ }
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/SkSLMangler.h b/gfx/skia/skia/src/sksl/SkSLMangler.h
new file mode 100644
index 0000000000..8c0dd5e6e0
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLMangler.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_MANGLER
+#define SKSL_MANGLER
+
+#include <string>
+#include <string_view>
+
+namespace SkSL {
+
+class SymbolTable;
+
+class Mangler {
+public:
+ /**
+ * Mangles baseName to create a name that is unique within symbolTable.
+ */
+ std::string uniqueName(std::string_view baseName, SymbolTable* symbolTable);
+
+ void reset() {
+ fCounter = 0;
+ }
+
+private:
+ int fCounter = 0;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLMemoryLayout.h b/gfx/skia/skia/src/sksl/SkSLMemoryLayout.h
new file mode 100644
index 0000000000..8389c00346
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLMemoryLayout.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKIASL_MEMORYLAYOUT
+#define SKIASL_MEMORYLAYOUT
+
+#include <algorithm>
+
+#include "src/sksl/ir/SkSLType.h"
+
+namespace SkSL {
+
+class MemoryLayout {
+public:
+ enum class Standard {
+ // GLSL std140 layout as described in OpenGL Spec v4.5, 7.6.2.2.
+ k140,
+
+ // GLSL std430 layout. This layout is like std140 but with optimizations. This layout can
+ // ONLY be used with shader storage blocks.
+ k430,
+
+ // MSL memory layout.
+ kMetal,
+
+ // WebGPU Shading Language buffer layout constraints for the uniform address space.
+ kWGSLUniform,
+
+ // WebGPU Shading Language buffer layout constraints for the storage address space.
+ kWGSLStorage,
+ };
+
+ MemoryLayout(Standard std)
+ : fStd(std) {}
+
+ bool isWGSL() const { return fStd == Standard::kWGSLUniform || fStd == Standard::kWGSLStorage; }
+
+ bool isMetal() const { return fStd == Standard::kMetal; }
+
+ /**
+ * WGSL and std140 require various types of variables (structs, arrays, and matrices) in the
+ * uniform address space to be rounded up to the nearest multiple of 16. This function performs
+ * the rounding depending on the given `type` and the current memory layout standard.
+ *
+ * (For WGSL, see https://www.w3.org/TR/WGSL/#address-space-layout-constraints).
+ */
+ size_t roundUpIfNeeded(size_t raw, Type::TypeKind type) const {
+ if (fStd == Standard::k140) {
+ return roundUp16(raw);
+ }
+ // WGSL uniform matrix layout is simply the alignment of the matrix columns and
+ // doesn't have a 16-byte multiple alignment constraint.
+ if (fStd == Standard::kWGSLUniform && type != Type::TypeKind::kMatrix) {
+ return roundUp16(raw);
+ }
+ return raw;
+ }
+
+ /**
+ * Rounds up the integer `n` to the smallest multiple of 16 greater than `n`.
+ */
+ size_t roundUp16(size_t n) const { return (n + 15) & ~15; }
+
+ /**
+ * Returns a type's required alignment when used as a standalone variable.
+ */
+ size_t alignment(const Type& type) const {
+ // See OpenGL Spec 7.6.2.2 Standard Uniform Block Layout
+ switch (type.typeKind()) {
+ case Type::TypeKind::kScalar:
+ case Type::TypeKind::kAtomic:
+ return this->size(type);
+ case Type::TypeKind::kVector:
+ return GetVectorAlignment(this->size(type.componentType()), type.columns());
+ case Type::TypeKind::kMatrix:
+ return this->roundUpIfNeeded(
+ GetVectorAlignment(this->size(type.componentType()), type.rows()),
+ type.typeKind());
+ case Type::TypeKind::kArray:
+ return this->roundUpIfNeeded(this->alignment(type.componentType()),
+ type.typeKind());
+ case Type::TypeKind::kStruct: {
+ size_t result = 0;
+ for (const auto& f : type.fields()) {
+ size_t alignment = this->alignment(*f.fType);
+ if (alignment > result) {
+ result = alignment;
+ }
+ }
+ return this->roundUpIfNeeded(result, type.typeKind());
+ }
+ default:
+ SK_ABORT("cannot determine alignment of type %s", type.displayName().c_str());
+ }
+ }
+
+ /**
+ * For matrices and arrays, returns the number of bytes from the start of one entry (row, in
+ * the case of matrices) to the start of the next.
+ */
+ size_t stride(const Type& type) const {
+ switch (type.typeKind()) {
+ case Type::TypeKind::kMatrix:
+ return this->alignment(type);
+ case Type::TypeKind::kArray: {
+ int stride = this->size(type.componentType());
+ if (stride > 0) {
+ int align = this->alignment(type.componentType());
+ stride += align - 1;
+ stride -= stride % align;
+ stride = this->roundUpIfNeeded(stride, type.typeKind());
+ }
+ return stride;
+ }
+ default:
+ SK_ABORT("type does not have a stride");
+ }
+ }
+
+ /**
+ * Returns the size of a type in bytes. Returns 0 if the given type is not supported.
+ */
+ size_t size(const Type& type) const {
+ switch (type.typeKind()) {
+ case Type::TypeKind::kScalar:
+ if (type.isBoolean()) {
+ if (this->isWGSL()) {
+ return 0;
+ }
+ return 1;
+ }
+ if ((this->isMetal() || this->isWGSL()) && !type.highPrecision() &&
+ type.isNumber()) {
+ return 2;
+ }
+ return 4;
+ case Type::TypeKind::kAtomic:
+ // Our atomic types (currently atomicUint) always occupy 4 bytes.
+ return 4;
+ case Type::TypeKind::kVector:
+ if (this->isMetal() && type.columns() == 3) {
+ return 4 * this->size(type.componentType());
+ }
+ return type.columns() * this->size(type.componentType());
+ case Type::TypeKind::kMatrix: // fall through
+ case Type::TypeKind::kArray:
+ return type.isUnsizedArray() ? 0 : (type.columns() * this->stride(type));
+ case Type::TypeKind::kStruct: {
+ size_t total = 0;
+ for (const auto& f : type.fields()) {
+ size_t alignment = this->alignment(*f.fType);
+ if (total % alignment != 0) {
+ total += alignment - total % alignment;
+ }
+ SkASSERT(total % alignment == 0);
+ total += this->size(*f.fType);
+ }
+ size_t alignment = this->alignment(type);
+ SkASSERT(!type.fields().size() ||
+ (0 == alignment % this->alignment(*type.fields()[0].fType)));
+ return (total + alignment - 1) & ~(alignment - 1);
+ }
+ default:
+ SK_ABORT("cannot determine size of type %s", type.displayName().c_str());
+ }
+ }
+
+ /**
+ * Not all types are compatible with memory layout.
+ */
+ size_t isSupported(const Type& type) const {
+ switch (type.typeKind()) {
+ case Type::TypeKind::kAtomic:
+ return true;
+
+ case Type::TypeKind::kScalar:
+ // bool and short are not host-shareable in WGSL.
+ return !this->isWGSL() ||
+ (!type.isBoolean() && (type.isFloat() || type.highPrecision()));
+
+ case Type::TypeKind::kVector:
+ case Type::TypeKind::kMatrix:
+ case Type::TypeKind::kArray:
+ return this->isSupported(type.componentType());
+
+ case Type::TypeKind::kStruct:
+ return std::all_of(
+ type.fields().begin(), type.fields().end(), [this](const Type::Field& f) {
+ return this->isSupported(*f.fType);
+ });
+
+ default:
+ return false;
+ }
+ }
+
+private:
+ static size_t GetVectorAlignment(size_t componentSize, int columns) {
+ return componentSize * (columns + columns % 2);
+ }
+
+ const Standard fStd;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLMemoryPool.h b/gfx/skia/skia/src/sksl/SkSLMemoryPool.h
new file mode 100644
index 0000000000..0d16d84ac0
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLMemoryPool.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_MEMORYPOOL
+#define SKSL_MEMORYPOOL
+
+#include <memory>
+
+#include "include/core/SkTypes.h"
+
+#if defined(SK_GANESH)
+
+#include "src/gpu/ganesh/GrMemoryPool.h"
+
+namespace SkSL {
+using MemoryPool = ::GrMemoryPool;
+}
+
+#else
+
+// When Ganesh is disabled, GrMemoryPool is not linked in. We include a minimal class which mimics
+// the GrMemoryPool interface but simply redirects to the system allocator.
+namespace SkSL {
+
+class MemoryPool {
+public:
+ static std::unique_ptr<MemoryPool> Make(size_t, size_t) {
+ return std::make_unique<MemoryPool>();
+ }
+ void resetScratchSpace() {}
+ void reportLeaks() const {}
+ bool isEmpty() const { return true; }
+ void* allocate(size_t size) { return ::operator new(size); }
+ void release(void* p) { ::operator delete(p); }
+};
+
+} // namespace SkSL
+
+#endif // defined(SK_GANESH)
+#endif // SKSL_MEMORYPOOL
diff --git a/gfx/skia/skia/src/sksl/SkSLModifiersPool.h b/gfx/skia/skia/src/sksl/SkSLModifiersPool.h
new file mode 100644
index 0000000000..e9b863c871
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLModifiersPool.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2020 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_MODIFIERSPOOL
+#define SKSL_MODIFIERSPOOL
+
+#include "include/private/SkSLModifiers.h"
+
+#include <unordered_set>
+
+namespace SkSL {
+
+/**
+ * Deduplicates Modifiers objects and stores them in a shared pool. Modifiers are fairly heavy, and
+ * tend to be reused a lot, so deduplication can be a significant win.
+ */
+class ModifiersPool {
+public:
+ const Modifiers* add(const Modifiers& modifiers) {
+ auto [iter, wasInserted] = fModifiersSet.insert(modifiers);
+ return &*iter;
+ }
+
+ void clear() {
+ fModifiersSet.clear();
+ }
+
+private:
+ std::unordered_set<Modifiers> fModifiersSet;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLModuleLoader.cpp b/gfx/skia/skia/src/sksl/SkSLModuleLoader.cpp
new file mode 100644
index 0000000000..c164fc1fe6
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLModuleLoader.cpp
@@ -0,0 +1,444 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/sksl/SkSLModuleLoader.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLProgramKind.h"
+#include "include/private/base/SkMutex.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLModifiersPool.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVariable.h"
+
+#include <algorithm>
+#include <string>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#if SKSL_STANDALONE
+
+#include "include/core/SkString.h"
+#include "src/utils/SkOSPath.h"
+#include "tools/SkGetExecutablePath.h"
+
+ // In standalone mode, we load the original SkSL source files. GN is responsible for copying
+ // these files from src/sksl/ to the directory where the executable is located.
+ #include <fstream>
+
+ static std::string load_module_file(const char* moduleFilename) {
+ std::string exePath = SkGetExecutablePath();
+ SkString exeDir = SkOSPath::Dirname(exePath.c_str());
+ SkString modulePath = SkOSPath::Join(exeDir.c_str(), moduleFilename);
+ std::ifstream in(std::string{modulePath.c_str()});
+ std::string moduleSource{std::istreambuf_iterator<char>(in),
+ std::istreambuf_iterator<char>()};
+ if (in.rdstate()) {
+ SK_ABORT("Error reading %s\n", modulePath.c_str());
+ }
+ return moduleSource;
+ }
+
+ #define MODULE_DATA(name) #name, load_module_file(#name ".sksl")
+
+#else
+
+ // We include minified SkSL module code and pass it directly to the compiler.
+ #if defined(SK_ENABLE_OPTIMIZE_SIZE) || !defined(SK_DEBUG)
+ #include "src/sksl/generated/sksl_shared.minified.sksl"
+ #include "src/sksl/generated/sksl_compute.minified.sksl"
+ #include "src/sksl/generated/sksl_frag.minified.sksl"
+ #include "src/sksl/generated/sksl_gpu.minified.sksl"
+ #include "src/sksl/generated/sksl_public.minified.sksl"
+ #include "src/sksl/generated/sksl_rt_shader.minified.sksl"
+ #include "src/sksl/generated/sksl_vert.minified.sksl"
+ #if defined(SK_GRAPHITE)
+ #include "src/sksl/generated/sksl_graphite_frag.minified.sksl"
+ #include "src/sksl/generated/sksl_graphite_vert.minified.sksl"
+ #endif
+ #else
+ #include "src/sksl/generated/sksl_shared.unoptimized.sksl"
+ #include "src/sksl/generated/sksl_compute.unoptimized.sksl"
+ #include "src/sksl/generated/sksl_frag.unoptimized.sksl"
+ #include "src/sksl/generated/sksl_gpu.unoptimized.sksl"
+ #include "src/sksl/generated/sksl_public.unoptimized.sksl"
+ #include "src/sksl/generated/sksl_rt_shader.unoptimized.sksl"
+ #include "src/sksl/generated/sksl_vert.unoptimized.sksl"
+ #if defined(SK_GRAPHITE)
+ #include "src/sksl/generated/sksl_graphite_frag.unoptimized.sksl"
+ #include "src/sksl/generated/sksl_graphite_vert.unoptimized.sksl"
+ #endif
+ #endif
+
+ #define MODULE_DATA(name) #name, std::string(SKSL_MINIFIED_##name)
+
+#endif
+
+namespace SkSL {
+
+#define TYPE(t) &BuiltinTypes::f ## t
+
+static constexpr BuiltinTypePtr kRootTypes[] = {
+ TYPE(Void),
+
+ TYPE( Float), TYPE( Float2), TYPE( Float3), TYPE( Float4),
+ TYPE( Half), TYPE( Half2), TYPE( Half3), TYPE( Half4),
+ TYPE( Int), TYPE( Int2), TYPE( Int3), TYPE( Int4),
+ TYPE( UInt), TYPE( UInt2), TYPE( UInt3), TYPE( UInt4),
+ TYPE( Short), TYPE( Short2), TYPE( Short3), TYPE( Short4),
+ TYPE(UShort), TYPE(UShort2), TYPE(UShort3), TYPE(UShort4),
+ TYPE( Bool), TYPE( Bool2), TYPE( Bool3), TYPE( Bool4),
+
+ TYPE(Float2x2), TYPE(Float2x3), TYPE(Float2x4),
+ TYPE(Float3x2), TYPE(Float3x3), TYPE(Float3x4),
+ TYPE(Float4x2), TYPE(Float4x3), TYPE(Float4x4),
+
+ TYPE(Half2x2), TYPE(Half2x3), TYPE(Half2x4),
+ TYPE(Half3x2), TYPE(Half3x3), TYPE(Half3x4),
+ TYPE(Half4x2), TYPE(Half4x3), TYPE(Half4x4),
+
+ TYPE(SquareMat), TYPE(SquareHMat),
+ TYPE(Mat), TYPE(HMat),
+
+ // TODO(skia:12349): generic short/ushort
+ TYPE(GenType), TYPE(GenIType), TYPE(GenUType),
+ TYPE(GenHType), /* (GenSType) (GenUSType) */
+ TYPE(GenBType),
+ TYPE(IntLiteral),
+ TYPE(FloatLiteral),
+
+ TYPE(Vec), TYPE(IVec), TYPE(UVec),
+ TYPE(HVec), TYPE(SVec), TYPE(USVec),
+ TYPE(BVec),
+
+ TYPE(ColorFilter),
+ TYPE(Shader),
+ TYPE(Blender),
+};
+
+static constexpr BuiltinTypePtr kPrivateTypes[] = {
+ TYPE(Sampler2D), TYPE(SamplerExternalOES), TYPE(Sampler2DRect),
+
+ TYPE(SubpassInput), TYPE(SubpassInputMS),
+
+ TYPE(Sampler),
+ TYPE(Texture2D),
+ TYPE(ReadWriteTexture2D), TYPE(ReadOnlyTexture2D), TYPE(WriteOnlyTexture2D),
+ TYPE(GenTexture2D), TYPE(ReadableTexture2D), TYPE(WritableTexture2D),
+
+ TYPE(AtomicUInt),
+};
+
+#undef TYPE
+
+struct ModuleLoader::Impl {
+ Impl();
+
+ void makeRootSymbolTable();
+
+ // This mutex is taken when ModuleLoader::Get is called, and released when the returned
+ // ModuleLoader object falls out of scope.
+ SkMutex fMutex;
+ const BuiltinTypes fBuiltinTypes;
+ ModifiersPool fCoreModifiers;
+
+ std::unique_ptr<const Module> fRootModule;
+
+ std::unique_ptr<const Module> fSharedModule; // [Root] + Public intrinsics
+ std::unique_ptr<const Module> fGPUModule; // [Shared] + Non-public intrinsics/
+ // helper functions
+ std::unique_ptr<const Module> fVertexModule; // [GPU] + Vertex stage decls
+ std::unique_ptr<const Module> fFragmentModule; // [GPU] + Fragment stage decls
+ std::unique_ptr<const Module> fComputeModule; // [GPU] + Compute stage decls
+ std::unique_ptr<const Module> fGraphiteVertexModule; // [Vert] + Graphite vertex helpers
+ std::unique_ptr<const Module> fGraphiteFragmentModule; // [Frag] + Graphite fragment helpers
+
+ std::unique_ptr<const Module> fPublicModule; // [Shared] minus Private types +
+ // Runtime effect intrinsics
+ std::unique_ptr<const Module> fRuntimeShaderModule; // [Public] + Runtime shader decls
+};
+
+ModuleLoader ModuleLoader::Get() {
+ static ModuleLoader::Impl* sModuleLoaderImpl = new ModuleLoader::Impl;
+ return ModuleLoader(*sModuleLoaderImpl);
+}
+
+ModuleLoader::ModuleLoader(ModuleLoader::Impl& m) : fModuleLoader(m) {
+ fModuleLoader.fMutex.acquire();
+}
+
+ModuleLoader::~ModuleLoader() {
+ fModuleLoader.fMutex.release();
+}
+
+void ModuleLoader::unloadModules() {
+ fModuleLoader.fSharedModule = nullptr;
+ fModuleLoader.fGPUModule = nullptr;
+ fModuleLoader.fVertexModule = nullptr;
+ fModuleLoader.fFragmentModule = nullptr;
+ fModuleLoader.fComputeModule = nullptr;
+ fModuleLoader.fGraphiteVertexModule = nullptr;
+ fModuleLoader.fGraphiteFragmentModule = nullptr;
+ fModuleLoader.fPublicModule = nullptr;
+ fModuleLoader.fRuntimeShaderModule = nullptr;
+}
+
+ModuleLoader::Impl::Impl() {
+ this->makeRootSymbolTable();
+}
+
+static void add_compute_type_aliases(SkSL::SymbolTable* symbols, const SkSL::BuiltinTypes& types) {
+ // A `texture2D` in a compute shader should generally mean "read-write" texture access, not
+ // "sample" texture access. Remap the name `texture2D` to point to `readWriteTexture2D`.
+ symbols->inject(Type::MakeAliasType("texture2D", *types.fReadWriteTexture2D));
+}
+
+static std::unique_ptr<Module> compile_and_shrink(SkSL::Compiler* compiler,
+ ProgramKind kind,
+ const char* moduleName,
+ std::string moduleSource,
+ const Module* parent,
+ ModifiersPool& modifiersPool) {
+ std::unique_ptr<Module> m = compiler->compileModule(kind,
+ moduleName,
+ std::move(moduleSource),
+ parent,
+ modifiersPool,
+ /*shouldInline=*/true);
+ if (!m) {
+ SK_ABORT("Unable to load module %s", moduleName);
+ }
+
+ // We can eliminate FunctionPrototypes without changing the meaning of the module; the function
+ // declaration is still safely in the symbol table. This only impacts our ability to recreate
+ // the input verbatim, which we don't care about at runtime.
+ m->fElements.erase(std::remove_if(m->fElements.begin(), m->fElements.end(),
+ [](const std::unique_ptr<ProgramElement>& element) {
+ switch (element->kind()) {
+ case ProgramElement::Kind::kFunction:
+ case ProgramElement::Kind::kGlobalVar:
+ case ProgramElement::Kind::kInterfaceBlock:
+ // We need to preserve these.
+ return false;
+
+ case ProgramElement::Kind::kFunctionPrototype:
+ // These are already in the symbol table; the
+ // ProgramElement isn't needed anymore.
+ return true;
+
+ default:
+ SkDEBUGFAILF("Unsupported element: %s\n",
+ element->description().c_str());
+ return false;
+ }
+ }),
+ m->fElements.end());
+
+ m->fElements.shrink_to_fit();
+ return m;
+}
+
+const BuiltinTypes& ModuleLoader::builtinTypes() {
+ return fModuleLoader.fBuiltinTypes;
+}
+
+ModifiersPool& ModuleLoader::coreModifiers() {
+ return fModuleLoader.fCoreModifiers;
+}
+
+const Module* ModuleLoader::rootModule() {
+ return fModuleLoader.fRootModule.get();
+}
+
+void ModuleLoader::addPublicTypeAliases(const SkSL::Module* module) {
+ const SkSL::BuiltinTypes& types = this->builtinTypes();
+ SymbolTable* symbols = module->fSymbols.get();
+
+ // Add some aliases to the runtime effect modules so that it's friendlier, and more like GLSL.
+ symbols->addWithoutOwnership(types.fVec2.get());
+ symbols->addWithoutOwnership(types.fVec3.get());
+ symbols->addWithoutOwnership(types.fVec4.get());
+
+ symbols->addWithoutOwnership(types.fIVec2.get());
+ symbols->addWithoutOwnership(types.fIVec3.get());
+ symbols->addWithoutOwnership(types.fIVec4.get());
+
+ symbols->addWithoutOwnership(types.fBVec2.get());
+ symbols->addWithoutOwnership(types.fBVec3.get());
+ symbols->addWithoutOwnership(types.fBVec4.get());
+
+ symbols->addWithoutOwnership(types.fMat2.get());
+ symbols->addWithoutOwnership(types.fMat3.get());
+ symbols->addWithoutOwnership(types.fMat4.get());
+
+ symbols->addWithoutOwnership(types.fMat2x2.get());
+ symbols->addWithoutOwnership(types.fMat2x3.get());
+ symbols->addWithoutOwnership(types.fMat2x4.get());
+ symbols->addWithoutOwnership(types.fMat3x2.get());
+ symbols->addWithoutOwnership(types.fMat3x3.get());
+ symbols->addWithoutOwnership(types.fMat3x4.get());
+ symbols->addWithoutOwnership(types.fMat4x2.get());
+ symbols->addWithoutOwnership(types.fMat4x3.get());
+ symbols->addWithoutOwnership(types.fMat4x4.get());
+
+ // Hide all the private symbols by aliasing them all to "invalid". This will prevent code from
+ // using built-in names like `sampler2D` as variable names.
+ for (BuiltinTypePtr privateType : kPrivateTypes) {
+ symbols->inject(Type::MakeAliasType((types.*privateType)->name(), *types.fInvalid));
+ }
+}
+
+const Module* ModuleLoader::loadPublicModule(SkSL::Compiler* compiler) {
+ if (!fModuleLoader.fPublicModule) {
+ const Module* sharedModule = this->loadSharedModule(compiler);
+ fModuleLoader.fPublicModule = compile_and_shrink(compiler,
+ ProgramKind::kFragment,
+ MODULE_DATA(sksl_public),
+ sharedModule,
+ this->coreModifiers());
+ this->addPublicTypeAliases(fModuleLoader.fPublicModule.get());
+ }
+ return fModuleLoader.fPublicModule.get();
+}
+
+const Module* ModuleLoader::loadPrivateRTShaderModule(SkSL::Compiler* compiler) {
+ if (!fModuleLoader.fRuntimeShaderModule) {
+ const Module* publicModule = this->loadPublicModule(compiler);
+ fModuleLoader.fRuntimeShaderModule = compile_and_shrink(compiler,
+ ProgramKind::kFragment,
+ MODULE_DATA(sksl_rt_shader),
+ publicModule,
+ this->coreModifiers());
+ }
+ return fModuleLoader.fRuntimeShaderModule.get();
+}
+
+const Module* ModuleLoader::loadSharedModule(SkSL::Compiler* compiler) {
+ if (!fModuleLoader.fSharedModule) {
+ const Module* rootModule = this->rootModule();
+ fModuleLoader.fSharedModule = compile_and_shrink(compiler,
+ ProgramKind::kFragment,
+ MODULE_DATA(sksl_shared),
+ rootModule,
+ this->coreModifiers());
+ }
+ return fModuleLoader.fSharedModule.get();
+}
+
+const Module* ModuleLoader::loadGPUModule(SkSL::Compiler* compiler) {
+ if (!fModuleLoader.fGPUModule) {
+ const Module* sharedModule = this->loadSharedModule(compiler);
+ fModuleLoader.fGPUModule = compile_and_shrink(compiler,
+ ProgramKind::kFragment,
+ MODULE_DATA(sksl_gpu),
+ sharedModule,
+ this->coreModifiers());
+ }
+ return fModuleLoader.fGPUModule.get();
+}
+
+const Module* ModuleLoader::loadFragmentModule(SkSL::Compiler* compiler) {
+ if (!fModuleLoader.fFragmentModule) {
+ const Module* gpuModule = this->loadGPUModule(compiler);
+ fModuleLoader.fFragmentModule = compile_and_shrink(compiler,
+ ProgramKind::kFragment,
+ MODULE_DATA(sksl_frag),
+ gpuModule,
+ this->coreModifiers());
+ }
+ return fModuleLoader.fFragmentModule.get();
+}
+
+const Module* ModuleLoader::loadVertexModule(SkSL::Compiler* compiler) {
+ if (!fModuleLoader.fVertexModule) {
+ const Module* gpuModule = this->loadGPUModule(compiler);
+ fModuleLoader.fVertexModule = compile_and_shrink(compiler,
+ ProgramKind::kVertex,
+ MODULE_DATA(sksl_vert),
+ gpuModule,
+ this->coreModifiers());
+ }
+ return fModuleLoader.fVertexModule.get();
+}
+
+const Module* ModuleLoader::loadComputeModule(SkSL::Compiler* compiler) {
+ if (!fModuleLoader.fComputeModule) {
+ const Module* gpuModule = this->loadGPUModule(compiler);
+ fModuleLoader.fComputeModule = compile_and_shrink(compiler,
+ ProgramKind::kCompute,
+ MODULE_DATA(sksl_compute),
+ gpuModule,
+ this->coreModifiers());
+ add_compute_type_aliases(fModuleLoader.fComputeModule->fSymbols.get(),
+ this->builtinTypes());
+ }
+ return fModuleLoader.fComputeModule.get();
+}
+
+const Module* ModuleLoader::loadGraphiteFragmentModule(SkSL::Compiler* compiler) {
+#if defined(SK_GRAPHITE)
+ if (!fModuleLoader.fGraphiteFragmentModule) {
+ const Module* fragmentModule = this->loadFragmentModule(compiler);
+ fModuleLoader.fGraphiteFragmentModule = compile_and_shrink(compiler,
+ ProgramKind::kGraphiteFragment,
+ MODULE_DATA(sksl_graphite_frag),
+ fragmentModule,
+ this->coreModifiers());
+ }
+ return fModuleLoader.fGraphiteFragmentModule.get();
+#else
+ return this->loadFragmentModule(compiler);
+#endif
+}
+
+const Module* ModuleLoader::loadGraphiteVertexModule(SkSL::Compiler* compiler) {
+#if defined(SK_GRAPHITE)
+ if (!fModuleLoader.fGraphiteVertexModule) {
+ const Module* vertexModule = this->loadVertexModule(compiler);
+ fModuleLoader.fGraphiteVertexModule = compile_and_shrink(compiler,
+ ProgramKind::kGraphiteVertex,
+ MODULE_DATA(sksl_graphite_vert),
+ vertexModule,
+ this->coreModifiers());
+ }
+ return fModuleLoader.fGraphiteVertexModule.get();
+#else
+ return this->loadVertexModule(compiler);
+#endif
+}
+
+void ModuleLoader::Impl::makeRootSymbolTable() {
+ auto rootModule = std::make_unique<Module>();
+ rootModule->fSymbols = std::make_shared<SymbolTable>(/*builtin=*/true);
+
+ for (BuiltinTypePtr rootType : kRootTypes) {
+ rootModule->fSymbols->addWithoutOwnership((fBuiltinTypes.*rootType).get());
+ }
+
+ for (BuiltinTypePtr privateType : kPrivateTypes) {
+ rootModule->fSymbols->addWithoutOwnership((fBuiltinTypes.*privateType).get());
+ }
+
+ // sk_Caps is "builtin", but all references to it are resolved to Settings, so we don't need to
+ // treat it as builtin (ie, no need to clone it into the Program).
+ rootModule->fSymbols->add(std::make_unique<Variable>(/*pos=*/Position(),
+ /*modifiersPosition=*/Position(),
+ fCoreModifiers.add(Modifiers{}),
+ "sk_Caps",
+ fBuiltinTypes.fSkCaps.get(),
+ /*builtin=*/false,
+ Variable::Storage::kGlobal));
+ fRootModule = std::move(rootModule);
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/SkSLModuleLoader.h b/gfx/skia/skia/src/sksl/SkSLModuleLoader.h
new file mode 100644
index 0000000000..bb300e2f7a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLModuleLoader.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_MODULELOADER
+#define SKSL_MODULELOADER
+
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include <memory>
+
+namespace SkSL {
+
+class Compiler;
+class ModifiersPool;
+struct Module;
+class Type;
+
+using BuiltinTypePtr = const std::unique_ptr<Type> BuiltinTypes::*;
+
+class ModuleLoader {
+private:
+ struct Impl;
+ Impl& fModuleLoader;
+
+public:
+ ModuleLoader(ModuleLoader::Impl&);
+ ~ModuleLoader();
+
+ // Acquires a mutex-locked reference to the singleton ModuleLoader. When the ModuleLoader is
+ // allowed to fall out of scope, the mutex will be released.
+ static ModuleLoader Get();
+
+ // The built-in types and root module are universal, immutable, and shared by every Compiler.
+ // They are created when the ModuleLoader is instantiated and never change.
+ const BuiltinTypes& builtinTypes();
+ const Module* rootModule();
+
+ // This ModifiersPool is shared by every built-in module.
+ ModifiersPool& coreModifiers();
+
+ // These modules are loaded on demand; once loaded, they are kept for the lifetime of the
+ // process.
+ const Module* loadSharedModule(SkSL::Compiler* compiler);
+ const Module* loadGPUModule(SkSL::Compiler* compiler);
+ const Module* loadVertexModule(SkSL::Compiler* compiler);
+ const Module* loadFragmentModule(SkSL::Compiler* compiler);
+ const Module* loadComputeModule(SkSL::Compiler* compiler);
+ const Module* loadGraphiteVertexModule(SkSL::Compiler* compiler);
+ const Module* loadGraphiteFragmentModule(SkSL::Compiler* compiler);
+
+ const Module* loadPublicModule(SkSL::Compiler* compiler);
+ const Module* loadPrivateRTShaderModule(SkSL::Compiler* compiler);
+
+ // This updates an existing Module's symbol table to match Runtime Effect rules. GLSL types like
+ // `vec4` are added; SkSL private types like `sampler2D` are replaced with an invalid type.
+ void addPublicTypeAliases(const SkSL::Module* module);
+
+ // This unloads every module. It's useful primarily for benchmarking purposes.
+ void unloadModules();
+};
+
+} // namespace SkSL
+
+#endif // SKSL_MODULELOADER
diff --git a/gfx/skia/skia/src/sksl/SkSLOperator.cpp b/gfx/skia/skia/src/sksl/SkSLOperator.cpp
new file mode 100644
index 0000000000..6c9ddc92b4
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLOperator.cpp
@@ -0,0 +1,384 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/sksl/SkSLOperator.h"
+
+#include "include/core/SkTypes.h"
+#include "src/base/SkStringView.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <memory>
+
+namespace SkSL {
+
+OperatorPrecedence Operator::getBinaryPrecedence() const {
+ switch (this->kind()) {
+ case Kind::STAR: // fall through
+ case Kind::SLASH: // fall through
+ case Kind::PERCENT: return OperatorPrecedence::kMultiplicative;
+ case Kind::PLUS: // fall through
+ case Kind::MINUS: return OperatorPrecedence::kAdditive;
+ case Kind::SHL: // fall through
+ case Kind::SHR: return OperatorPrecedence::kShift;
+ case Kind::LT: // fall through
+ case Kind::GT: // fall through
+ case Kind::LTEQ: // fall through
+ case Kind::GTEQ: return OperatorPrecedence::kRelational;
+ case Kind::EQEQ: // fall through
+ case Kind::NEQ: return OperatorPrecedence::kEquality;
+ case Kind::BITWISEAND: return OperatorPrecedence::kBitwiseAnd;
+ case Kind::BITWISEXOR: return OperatorPrecedence::kBitwiseXor;
+ case Kind::BITWISEOR: return OperatorPrecedence::kBitwiseOr;
+ case Kind::LOGICALAND: return OperatorPrecedence::kLogicalAnd;
+ case Kind::LOGICALXOR: return OperatorPrecedence::kLogicalXor;
+ case Kind::LOGICALOR: return OperatorPrecedence::kLogicalOr;
+ case Kind::EQ: // fall through
+ case Kind::PLUSEQ: // fall through
+ case Kind::MINUSEQ: // fall through
+ case Kind::STAREQ: // fall through
+ case Kind::SLASHEQ: // fall through
+ case Kind::PERCENTEQ: // fall through
+ case Kind::SHLEQ: // fall through
+ case Kind::SHREQ: // fall through
+ case Kind::BITWISEANDEQ: // fall through
+ case Kind::BITWISEXOREQ: // fall through
+ case Kind::BITWISEOREQ: return OperatorPrecedence::kAssignment;
+ case Kind::COMMA: return OperatorPrecedence::kSequence;
+ default: SK_ABORT("unsupported binary operator");
+ }
+}
+
+const char* Operator::operatorName() const {
+ switch (this->kind()) {
+ case Kind::PLUS: return " + ";
+ case Kind::MINUS: return " - ";
+ case Kind::STAR: return " * ";
+ case Kind::SLASH: return " / ";
+ case Kind::PERCENT: return " % ";
+ case Kind::SHL: return " << ";
+ case Kind::SHR: return " >> ";
+ case Kind::LOGICALNOT: return "!";
+ case Kind::LOGICALAND: return " && ";
+ case Kind::LOGICALOR: return " || ";
+ case Kind::LOGICALXOR: return " ^^ ";
+ case Kind::BITWISENOT: return "~";
+ case Kind::BITWISEAND: return " & ";
+ case Kind::BITWISEOR: return " | ";
+ case Kind::BITWISEXOR: return " ^ ";
+ case Kind::EQ: return " = ";
+ case Kind::EQEQ: return " == ";
+ case Kind::NEQ: return " != ";
+ case Kind::LT: return " < ";
+ case Kind::GT: return " > ";
+ case Kind::LTEQ: return " <= ";
+ case Kind::GTEQ: return " >= ";
+ case Kind::PLUSEQ: return " += ";
+ case Kind::MINUSEQ: return " -= ";
+ case Kind::STAREQ: return " *= ";
+ case Kind::SLASHEQ: return " /= ";
+ case Kind::PERCENTEQ: return " %= ";
+ case Kind::SHLEQ: return " <<= ";
+ case Kind::SHREQ: return " >>= ";
+ case Kind::BITWISEANDEQ: return " &= ";
+ case Kind::BITWISEOREQ: return " |= ";
+ case Kind::BITWISEXOREQ: return " ^= ";
+ case Kind::PLUSPLUS: return "++";
+ case Kind::MINUSMINUS: return "--";
+ case Kind::COMMA: return ", ";
+ default: SkUNREACHABLE;
+ }
+}
+
+std::string_view Operator::tightOperatorName() const {
+ std::string_view name = this->operatorName();
+ if (skstd::starts_with(name, ' ')) {
+ name.remove_prefix(1);
+ }
+ if (skstd::ends_with(name, ' ')) {
+ name.remove_suffix(1);
+ }
+ return name;
+}
+
+bool Operator::isAssignment() const {
+ switch (this->kind()) {
+ case Kind::EQ: // fall through
+ case Kind::PLUSEQ: // fall through
+ case Kind::MINUSEQ: // fall through
+ case Kind::STAREQ: // fall through
+ case Kind::SLASHEQ: // fall through
+ case Kind::PERCENTEQ: // fall through
+ case Kind::SHLEQ: // fall through
+ case Kind::SHREQ: // fall through
+ case Kind::BITWISEOREQ: // fall through
+ case Kind::BITWISEXOREQ: // fall through
+ case Kind::BITWISEANDEQ:
+ return true;
+ default:
+ return false;
+ }
+}
+
+Operator Operator::removeAssignment() const {
+ switch (this->kind()) {
+ case Kind::PLUSEQ: return Kind::PLUS;
+ case Kind::MINUSEQ: return Kind::MINUS;
+ case Kind::STAREQ: return Kind::STAR;
+ case Kind::SLASHEQ: return Kind::SLASH;
+ case Kind::PERCENTEQ: return Kind::PERCENT;
+ case Kind::SHLEQ: return Kind::SHL;
+ case Kind::SHREQ: return Kind::SHR;
+ case Kind::BITWISEOREQ: return Kind::BITWISEOR;
+ case Kind::BITWISEXOREQ: return Kind::BITWISEXOR;
+ case Kind::BITWISEANDEQ: return Kind::BITWISEAND;
+ default: return *this;
+ }
+}
+
+bool Operator::isRelational() const {
+ switch (this->kind()) {
+ case Kind::LT:
+ case Kind::GT:
+ case Kind::LTEQ:
+ case Kind::GTEQ:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool Operator::isOnlyValidForIntegralTypes() const {
+ switch (this->kind()) {
+ case Kind::SHL:
+ case Kind::SHR:
+ case Kind::BITWISEAND:
+ case Kind::BITWISEOR:
+ case Kind::BITWISEXOR:
+ case Kind::PERCENT:
+ case Kind::SHLEQ:
+ case Kind::SHREQ:
+ case Kind::BITWISEANDEQ:
+ case Kind::BITWISEOREQ:
+ case Kind::BITWISEXOREQ:
+ case Kind::PERCENTEQ:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool Operator::isValidForMatrixOrVector() const {
+ switch (this->kind()) {
+ case Kind::PLUS:
+ case Kind::MINUS:
+ case Kind::STAR:
+ case Kind::SLASH:
+ case Kind::PERCENT:
+ case Kind::SHL:
+ case Kind::SHR:
+ case Kind::BITWISEAND:
+ case Kind::BITWISEOR:
+ case Kind::BITWISEXOR:
+ case Kind::PLUSEQ:
+ case Kind::MINUSEQ:
+ case Kind::STAREQ:
+ case Kind::SLASHEQ:
+ case Kind::PERCENTEQ:
+ case Kind::SHLEQ:
+ case Kind::SHREQ:
+ case Kind::BITWISEANDEQ:
+ case Kind::BITWISEOREQ:
+ case Kind::BITWISEXOREQ:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool Operator::isMatrixMultiply(const Type& left, const Type& right) const {
+ if (this->kind() != Kind::STAR && this->kind() != Kind::STAREQ) {
+ return false;
+ }
+ if (left.isMatrix()) {
+ return right.isMatrix() || right.isVector();
+ }
+ return left.isVector() && right.isMatrix();
+}
+
+/**
+ * Determines the operand and result types of a binary expression. Returns true if the expression is
+ * legal, false otherwise. If false, the values of the out parameters are undefined.
+ */
+bool Operator::determineBinaryType(const Context& context,
+ const Type& left,
+ const Type& right,
+ const Type** outLeftType,
+ const Type** outRightType,
+ const Type** outResultType) const {
+ const bool allowNarrowing = context.fConfig->fSettings.fAllowNarrowingConversions;
+ switch (this->kind()) {
+ case Kind::EQ: // left = right
+ if (left.isVoid()) {
+ return false;
+ }
+ *outLeftType = &left;
+ *outRightType = &left;
+ *outResultType = &left;
+ return right.canCoerceTo(left, allowNarrowing);
+
+ case Kind::EQEQ: // left == right
+ case Kind::NEQ: { // left != right
+ if (left.isVoid() || left.isOpaque()) {
+ return false;
+ }
+ CoercionCost rightToLeft = right.coercionCost(left),
+ leftToRight = left.coercionCost(right);
+ if (rightToLeft < leftToRight) {
+ if (rightToLeft.isPossible(allowNarrowing)) {
+ *outLeftType = &left;
+ *outRightType = &left;
+ *outResultType = context.fTypes.fBool.get();
+ return true;
+ }
+ } else {
+ if (leftToRight.isPossible(allowNarrowing)) {
+ *outLeftType = &right;
+ *outRightType = &right;
+ *outResultType = context.fTypes.fBool.get();
+ return true;
+ }
+ }
+ return false;
+ }
+ case Kind::LOGICALOR: // left || right
+ case Kind::LOGICALAND: // left && right
+ case Kind::LOGICALXOR: // left ^^ right
+ *outLeftType = context.fTypes.fBool.get();
+ *outRightType = context.fTypes.fBool.get();
+ *outResultType = context.fTypes.fBool.get();
+ return left.canCoerceTo(*context.fTypes.fBool, allowNarrowing) &&
+ right.canCoerceTo(*context.fTypes.fBool, allowNarrowing);
+
+ case Operator::Kind::COMMA: // left, right
+ if (left.isOpaque() || right.isOpaque()) {
+ return false;
+ }
+ *outLeftType = &left;
+ *outRightType = &right;
+ *outResultType = &right;
+ return true;
+
+ default:
+ break;
+ }
+
+ // Boolean types only support the operators listed above (, = == != || && ^^).
+ // If we've gotten this far with a boolean, we have an unsupported operator.
+ const Type& leftComponentType = left.componentType();
+ const Type& rightComponentType = right.componentType();
+ if (leftComponentType.isBoolean() || rightComponentType.isBoolean()) {
+ return false;
+ }
+
+ bool isAssignment = this->isAssignment();
+ if (this->isMatrixMultiply(left, right)) { // left * right
+ // Determine final component type.
+ if (!this->determineBinaryType(context, left.componentType(), right.componentType(),
+ outLeftType, outRightType, outResultType)) {
+ return false;
+ }
+ // Convert component type to compound.
+ *outLeftType = &(*outResultType)->toCompound(context, left.columns(), left.rows());
+ *outRightType = &(*outResultType)->toCompound(context, right.columns(), right.rows());
+ int leftColumns = left.columns(), leftRows = left.rows();
+ int rightColumns = right.columns(), rightRows = right.rows();
+ if (right.isVector()) {
+ // `matrix * vector` treats the vector as a column vector; we need to transpose it.
+ std::swap(rightColumns, rightRows);
+ SkASSERT(rightColumns == 1);
+ }
+ if (rightColumns > 1) {
+ *outResultType = &(*outResultType)->toCompound(context, rightColumns, leftRows);
+ } else {
+ // The result was a column vector. Transpose it back to a row.
+ *outResultType = &(*outResultType)->toCompound(context, leftRows, rightColumns);
+ }
+ if (isAssignment && ((*outResultType)->columns() != leftColumns ||
+ (*outResultType)->rows() != leftRows)) {
+ return false;
+ }
+ return leftColumns == rightRows;
+ }
+
+ bool leftIsVectorOrMatrix = left.isVector() || left.isMatrix();
+ bool validMatrixOrVectorOp = this->isValidForMatrixOrVector();
+
+ if (leftIsVectorOrMatrix && validMatrixOrVectorOp && right.isScalar()) {
+ // Determine final component type.
+ if (!this->determineBinaryType(context, left.componentType(), right,
+ outLeftType, outRightType, outResultType)) {
+ return false;
+ }
+ // Convert component type to compound.
+ *outLeftType = &(*outLeftType)->toCompound(context, left.columns(), left.rows());
+ if (!this->isRelational()) {
+ *outResultType = &(*outResultType)->toCompound(context, left.columns(), left.rows());
+ }
+ return true;
+ }
+
+ bool rightIsVectorOrMatrix = right.isVector() || right.isMatrix();
+
+ if (!isAssignment && rightIsVectorOrMatrix && validMatrixOrVectorOp && left.isScalar()) {
+ // Determine final component type.
+ if (!this->determineBinaryType(context, left, right.componentType(),
+ outLeftType, outRightType, outResultType)) {
+ return false;
+ }
+ // Convert component type to compound.
+ *outRightType = &(*outRightType)->toCompound(context, right.columns(), right.rows());
+ if (!this->isRelational()) {
+ *outResultType = &(*outResultType)->toCompound(context, right.columns(), right.rows());
+ }
+ return true;
+ }
+
+ CoercionCost rightToLeftCost = right.coercionCost(left);
+ CoercionCost leftToRightCost = isAssignment ? CoercionCost::Impossible()
+ : left.coercionCost(right);
+
+ if ((left.isScalar() && right.isScalar()) || (leftIsVectorOrMatrix && validMatrixOrVectorOp)) {
+ if (this->isOnlyValidForIntegralTypes()) {
+ if (!leftComponentType.isInteger() || !rightComponentType.isInteger()) {
+ return false;
+ }
+ }
+ if (rightToLeftCost.isPossible(allowNarrowing) && rightToLeftCost < leftToRightCost) {
+ // Right-to-Left conversion is possible and cheaper
+ *outLeftType = &left;
+ *outRightType = &left;
+ *outResultType = &left;
+ } else if (leftToRightCost.isPossible(allowNarrowing)) {
+ // Left-to-Right conversion is possible (and at least as cheap as Right-to-Left)
+ *outLeftType = &right;
+ *outRightType = &right;
+ *outResultType = &right;
+ } else {
+ return false;
+ }
+ if (this->isRelational()) {
+ *outResultType = context.fTypes.fBool.get();
+ }
+ return true;
+ }
+ return false;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/SkSLOutputStream.cpp b/gfx/skia/skia/src/sksl/SkSLOutputStream.cpp
new file mode 100644
index 0000000000..7972c9fd19
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLOutputStream.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLOutputStream.h"
+
+#include <stdio.h>
+#include <memory>
+
+namespace SkSL {
+
+void OutputStream::writeString(const std::string& s) {
+ this->write(s.c_str(), s.size());
+}
+
+void OutputStream::printf(const char format[], ...) {
+ va_list args;
+ va_start(args, format);
+ this->appendVAList(format, args);
+ va_end(args);
+}
+
+void OutputStream::appendVAList(const char format[], va_list args) {
+ char buffer[kBufferSize];
+ va_list copy;
+ va_copy(copy, args);
+ int length = vsnprintf(buffer, kBufferSize, format, args);
+ if (length > (int) kBufferSize) {
+ std::unique_ptr<char[]> bigBuffer(new char[length + 1]);
+ vsnprintf(bigBuffer.get(), length + 1, format, copy);
+ this->write(bigBuffer.get(), length);
+ } else {
+ this->write(buffer, length);
+ }
+ va_end(copy);
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/SkSLOutputStream.h b/gfx/skia/skia/src/sksl/SkSLOutputStream.h
new file mode 100644
index 0000000000..542ffd6f90
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLOutputStream.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_OUTPUTSTREAM
+#define SKSL_OUTPUTSTREAM
+
+#include "include/core/SkTypes.h"
+
+#include <cstdarg>
+#include <cstddef>
+#include <cstdint>
+#include <string>
+
+namespace SkSL {
+
+class OutputStream {
+public:
+ virtual bool isValid() const {
+ return true;
+ }
+
+ virtual void write8(uint8_t b) = 0;
+
+ void write16(uint16_t i) {
+ this->write8((uint8_t) i);
+ this->write8((uint8_t) (i >> 8));
+ }
+
+ void write32(uint32_t i) {
+ this->write8((uint8_t) i);
+ this->write8((uint8_t) (i >> 8));
+ this->write8((uint8_t) (i >> 16));
+ this->write8((uint8_t) (i >> 24));
+ }
+
+ virtual void writeText(const char* s) = 0;
+
+ virtual void write(const void* s, size_t size) = 0;
+
+ void writeString(const std::string& s);
+
+ void printf(const char format[], ...) SK_PRINTF_LIKE(2, 3);
+
+ void appendVAList(const char format[], va_list args) SK_PRINTF_LIKE(2, 0);
+
+ virtual ~OutputStream() {}
+
+private:
+ static const int kBufferSize = 1024;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLParser.cpp b/gfx/skia/skia/src/sksl/SkSLParser.cpp
new file mode 100644
index 0000000000..d63f930a63
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLParser.cpp
@@ -0,0 +1,2248 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLParser.h"
+
+#include "include/core/SkSpan.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLString.h"
+#include "include/sksl/DSLBlock.h"
+#include "include/sksl/DSLCase.h"
+#include "include/sksl/DSLFunction.h"
+#include "include/sksl/DSLVar.h"
+#include "include/sksl/SkSLOperator.h"
+#include "include/sksl/SkSLVersion.h"
+#include "src/core/SkTHash.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLConstantFolder.h"
+#include "src/sksl/SkSLThreadContext.h"
+#include "src/sksl/dsl/priv/DSLWriter.h"
+#include "src/sksl/dsl/priv/DSL_priv.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLVariable.h"
+
+#include <algorithm>
+#include <climits>
+#include <initializer_list>
+#include <memory>
+#include <utility>
+#include <vector>
+
+using namespace SkSL::dsl;
+
+namespace SkSL {
+
+static constexpr int kMaxParseDepth = 50;
+
+static int parse_modifier_token(Token::Kind token) {
+ switch (token) {
+ case Token::Kind::TK_UNIFORM: return Modifiers::kUniform_Flag;
+ case Token::Kind::TK_CONST: return Modifiers::kConst_Flag;
+ case Token::Kind::TK_IN: return Modifiers::kIn_Flag;
+ case Token::Kind::TK_OUT: return Modifiers::kOut_Flag;
+ case Token::Kind::TK_INOUT: return Modifiers::kIn_Flag | Modifiers::kOut_Flag;
+ case Token::Kind::TK_FLAT: return Modifiers::kFlat_Flag;
+ case Token::Kind::TK_NOPERSPECTIVE: return Modifiers::kNoPerspective_Flag;
+ case Token::Kind::TK_PURE: return Modifiers::kPure_Flag;
+ case Token::Kind::TK_INLINE: return Modifiers::kInline_Flag;
+ case Token::Kind::TK_NOINLINE: return Modifiers::kNoInline_Flag;
+ case Token::Kind::TK_HIGHP: return Modifiers::kHighp_Flag;
+ case Token::Kind::TK_MEDIUMP: return Modifiers::kMediump_Flag;
+ case Token::Kind::TK_LOWP: return Modifiers::kLowp_Flag;
+ case Token::Kind::TK_EXPORT: return Modifiers::kExport_Flag;
+ case Token::Kind::TK_ES3: return Modifiers::kES3_Flag;
+ case Token::Kind::TK_WORKGROUP: return Modifiers::kWorkgroup_Flag;
+ case Token::Kind::TK_READONLY: return Modifiers::kReadOnly_Flag;
+ case Token::Kind::TK_WRITEONLY: return Modifiers::kWriteOnly_Flag;
+ case Token::Kind::TK_BUFFER: return Modifiers::kBuffer_Flag;
+ default: return 0;
+ }
+}
+
+class Parser::AutoDepth {
+public:
+ AutoDepth(Parser* p)
+ : fParser(p)
+ , fDepth(0) {}
+
+ ~AutoDepth() {
+ fParser->fDepth -= fDepth;
+ }
+
+ bool increase() {
+ ++fDepth;
+ ++fParser->fDepth;
+ if (fParser->fDepth > kMaxParseDepth) {
+ fParser->error(fParser->peek(), "exceeded max parse depth");
+ fParser->fEncounteredFatalError = true;
+ return false;
+ }
+ return true;
+ }
+
+private:
+ Parser* fParser;
+ int fDepth;
+};
+
+class Parser::AutoSymbolTable {
+public:
+ AutoSymbolTable(Parser* p) : fParser(p) {
+ SymbolTable::Push(&fParser->symbolTable());
+ }
+
+ ~AutoSymbolTable() {
+ SymbolTable::Pop(&fParser->symbolTable());
+ }
+
+private:
+ Parser* fParser;
+};
+
+Parser::Parser(Compiler* compiler,
+ const ProgramSettings& settings,
+ ProgramKind kind,
+ std::string text)
+ : fCompiler(*compiler)
+ , fSettings(settings)
+ , fKind(kind)
+ , fText(std::make_unique<std::string>(std::move(text)))
+ , fPushback(Token::Kind::TK_NONE, /*offset=*/-1, /*length=*/-1) {
+ fLexer.start(*fText);
+}
+
+std::shared_ptr<SymbolTable>& Parser::symbolTable() {
+ return fCompiler.symbolTable();
+}
+
+void Parser::addToSymbolTable(DSLVarBase& var, Position pos) {
+ if (SkSL::Variable* skslVar = DSLWriter::Var(var)) {
+ this->symbolTable()->addWithoutOwnership(skslVar);
+ }
+}
+
+Token Parser::nextRawToken() {
+ Token token;
+ if (fPushback.fKind != Token::Kind::TK_NONE) {
+ // Retrieve the token from the pushback buffer.
+ token = fPushback;
+ fPushback.fKind = Token::Kind::TK_NONE;
+ } else {
+ // Fetch a token from the lexer.
+ token = fLexer.next();
+
+ // Some tokens are always invalid, so we detect and report them here.
+ switch (token.fKind) {
+ case Token::Kind::TK_PRIVATE_IDENTIFIER:
+ if (ProgramConfig::AllowsPrivateIdentifiers(fKind)) {
+ token.fKind = Token::Kind::TK_IDENTIFIER;
+ break;
+ }
+ [[fallthrough]];
+
+ case Token::Kind::TK_RESERVED:
+ this->error(token, "name '" + std::string(this->text(token)) + "' is reserved");
+ token.fKind = Token::Kind::TK_IDENTIFIER; // reduces additional follow-up errors
+ break;
+
+ case Token::Kind::TK_BAD_OCTAL:
+ this->error(token, "'" + std::string(this->text(token)) +
+ "' is not a valid octal number");
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ return token;
+}
+
+static bool is_whitespace(Token::Kind kind) {
+ switch (kind) {
+ case Token::Kind::TK_WHITESPACE:
+ case Token::Kind::TK_LINE_COMMENT:
+ case Token::Kind::TK_BLOCK_COMMENT:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool Parser::expectNewline() {
+ Token token = this->nextRawToken();
+ if (token.fKind == Token::Kind::TK_WHITESPACE) {
+ // The lexer doesn't distinguish newlines from other forms of whitespace, so we check
+ // for newlines by searching through the token text.
+ std::string_view tokenText = this->text(token);
+ if (tokenText.find_first_of('\r') != std::string_view::npos ||
+ tokenText.find_first_of('\n') != std::string_view::npos) {
+ return true;
+ }
+ }
+ // We didn't find a newline.
+ this->pushback(token);
+ return false;
+}
+
+Token Parser::nextToken() {
+ for (;;) {
+ Token token = this->nextRawToken();
+ if (!is_whitespace(token.fKind)) {
+ return token;
+ }
+ }
+}
+
+void Parser::pushback(Token t) {
+ SkASSERT(fPushback.fKind == Token::Kind::TK_NONE);
+ fPushback = std::move(t);
+}
+
+Token Parser::peek() {
+ if (fPushback.fKind == Token::Kind::TK_NONE) {
+ fPushback = this->nextToken();
+ }
+ return fPushback;
+}
+
+bool Parser::checkNext(Token::Kind kind, Token* result) {
+ if (fPushback.fKind != Token::Kind::TK_NONE && fPushback.fKind != kind) {
+ return false;
+ }
+ Token next = this->nextToken();
+ if (next.fKind == kind) {
+ if (result) {
+ *result = next;
+ }
+ return true;
+ }
+ this->pushback(std::move(next));
+ return false;
+}
+
+bool Parser::expect(Token::Kind kind, const char* expected, Token* result) {
+ Token next = this->nextToken();
+ if (next.fKind == kind) {
+ if (result) {
+ *result = std::move(next);
+ }
+ return true;
+ } else {
+ this->error(next, "expected " + std::string(expected) + ", but found '" +
+ std::string(this->text(next)) + "'");
+ this->fEncounteredFatalError = true;
+ return false;
+ }
+}
+
+bool Parser::expectIdentifier(Token* result) {
+ if (!this->expect(Token::Kind::TK_IDENTIFIER, "an identifier", result)) {
+ return false;
+ }
+ if (this->symbolTable()->isBuiltinType(this->text(*result))) {
+ this->error(*result, "expected an identifier, but found type '" +
+ std::string(this->text(*result)) + "'");
+ this->fEncounteredFatalError = true;
+ return false;
+ }
+ return true;
+}
+
+bool Parser::checkIdentifier(Token* result) {
+ if (!this->checkNext(Token::Kind::TK_IDENTIFIER, result)) {
+ return false;
+ }
+ if (this->symbolTable()->isBuiltinType(this->text(*result))) {
+ this->pushback(std::move(*result));
+ return false;
+ }
+ return true;
+}
+
+std::string_view Parser::text(Token token) {
+ return std::string_view(fText->data() + token.fOffset, token.fLength);
+}
+
+Position Parser::position(Token t) {
+ if (t.fOffset >= 0) {
+ return Position::Range(t.fOffset, t.fOffset + t.fLength);
+ } else {
+ return Position();
+ }
+}
+
+void Parser::error(Token token, std::string_view msg) {
+ this->error(this->position(token), msg);
+}
+
+void Parser::error(Position position, std::string_view msg) {
+ GetErrorReporter().error(position, msg);
+}
+
+Position Parser::rangeFrom(Position start) {
+ int offset = fPushback.fKind != Token::Kind::TK_NONE ? fPushback.fOffset
+ : fLexer.getCheckpoint().fOffset;
+ return Position::Range(start.startOffset(), offset);
+}
+
+Position Parser::rangeFrom(Token start) {
+ return this->rangeFrom(this->position(start));
+}
+
+/* declaration* END_OF_FILE */
+std::unique_ptr<Program> Parser::program() {
+ ErrorReporter* errorReporter = &fCompiler.errorReporter();
+ Start(&fCompiler, fKind, fSettings);
+ SetErrorReporter(errorReporter);
+ errorReporter->setSource(*fText);
+ this->declarations();
+ std::unique_ptr<Program> result;
+ if (!GetErrorReporter().errorCount()) {
+ result = dsl::ReleaseProgram(std::move(fText));
+ }
+ errorReporter->setSource(std::string_view());
+ End();
+ return result;
+}
+
+std::unique_ptr<SkSL::Module> Parser::moduleInheritingFrom(const SkSL::Module* parent) {
+ ErrorReporter* errorReporter = &fCompiler.errorReporter();
+ StartModule(&fCompiler, fKind, fSettings, parent);
+ SetErrorReporter(errorReporter);
+ errorReporter->setSource(*fText);
+ this->declarations();
+ this->symbolTable()->takeOwnershipOfString(std::move(*fText));
+ auto result = std::make_unique<SkSL::Module>();
+ result->fParent = parent;
+ result->fSymbols = this->symbolTable();
+ result->fElements = std::move(ThreadContext::ProgramElements());
+ errorReporter->setSource(std::string_view());
+ End();
+ return result;
+}
+
+void Parser::declarations() {
+ fEncounteredFatalError = false;
+ // Any #version directive must appear as the first thing in a file
+ if (this->peek().fKind == Token::Kind::TK_DIRECTIVE) {
+ this->directive(/*allowVersion=*/true);
+ }
+ bool done = false;
+ while (!done) {
+ switch (this->peek().fKind) {
+ case Token::Kind::TK_END_OF_FILE:
+ done = true;
+ break;
+ case Token::Kind::TK_DIRECTIVE:
+ this->directive(/*allowVersion=*/false);
+ break;
+ case Token::Kind::TK_INVALID:
+ this->error(this->peek(), "invalid token");
+ this->nextToken();
+ done = true;
+ break;
+ default:
+ this->declaration();
+ done = fEncounteredFatalError;
+ break;
+ }
+ }
+}
+
+/* DIRECTIVE(#extension) IDENTIFIER COLON IDENTIFIER NEWLINE |
+ DIRECTIVE(#version) INTLITERAL NEWLINE */
+void Parser::directive(bool allowVersion) {
+ Token start;
+ if (!this->expect(Token::Kind::TK_DIRECTIVE, "a directive", &start)) {
+ return;
+ }
+ std::string_view text = this->text(start);
+ const bool allowExtensions = !ProgramConfig::IsRuntimeEffect(fKind);
+ if (text == "#extension" && allowExtensions) {
+ Token name;
+ if (!this->expectIdentifier(&name)) {
+ return;
+ }
+ if (!this->expect(Token::Kind::TK_COLON, "':'")) {
+ return;
+ }
+ Token behavior;
+ if (!this->expect(Token::Kind::TK_IDENTIFIER, "an identifier", &behavior)) {
+ return;
+ }
+ std::string_view behaviorText = this->text(behavior);
+ if (behaviorText != "disable") {
+ if (behaviorText == "require" || behaviorText == "enable" || behaviorText == "warn") {
+ // We don't currently do anything different between require, enable, and warn
+ dsl::AddExtension(this->text(name));
+ } else {
+ this->error(behavior, "expected 'require', 'enable', 'warn', or 'disable'");
+ }
+ }
+
+ // We expect a newline after an #extension directive.
+ if (!this->expectNewline()) {
+ this->error(start, "invalid #extension directive");
+ }
+ } else if (text == "#version") {
+ if (!allowVersion) {
+ this->error(start, "#version directive must appear before anything else");
+ return;
+ }
+ SKSL_INT version;
+ if (!this->intLiteral(&version)) {
+ return;
+ }
+ switch (version) {
+ case 100:
+ ThreadContext::GetProgramConfig()->fRequiredSkSLVersion = Version::k100;
+ break;
+ case 300:
+ ThreadContext::GetProgramConfig()->fRequiredSkSLVersion = Version::k300;
+ break;
+ default:
+ this->error(start, "unsupported version number");
+ return;
+ }
+ // We expect a newline after a #version directive.
+ if (!this->expectNewline()) {
+ this->error(start, "invalid #version directive");
+ }
+ } else {
+ this->error(start, "unsupported directive '" + std::string(this->text(start)) + "'");
+ }
+}
+
+/* modifiers (structVarDeclaration | type IDENTIFIER ((LPAREN parameter (COMMA parameter)* RPAREN
+ (block | SEMICOLON)) | SEMICOLON) | interfaceBlock) */
+bool Parser::declaration() {
+ Token start = this->peek();
+ if (start.fKind == Token::Kind::TK_SEMICOLON) {
+ this->nextToken();
+ this->error(start, "expected a declaration, but found ';'");
+ return false;
+ }
+ DSLModifiers modifiers = this->modifiers();
+ Token lookahead = this->peek();
+ if (lookahead.fKind == Token::Kind::TK_IDENTIFIER &&
+ !this->symbolTable()->isType(this->text(lookahead))) {
+ // we have an identifier that's not a type, could be the start of an interface block
+ return this->interfaceBlock(modifiers);
+ }
+ if (lookahead.fKind == Token::Kind::TK_SEMICOLON) {
+ this->nextToken();
+ Declare(modifiers, this->position(start));
+ return true;
+ }
+ if (lookahead.fKind == Token::Kind::TK_STRUCT) {
+ this->structVarDeclaration(this->position(start), modifiers);
+ return true;
+ }
+ DSLType type = this->type(&modifiers);
+ if (!type.hasValue()) {
+ return false;
+ }
+ Token name;
+ if (!this->expectIdentifier(&name)) {
+ return false;
+ }
+ if (this->checkNext(Token::Kind::TK_LPAREN)) {
+ return this->functionDeclarationEnd(this->position(start), modifiers, type, name);
+ } else {
+ this->globalVarDeclarationEnd(this->position(start), modifiers, type, name);
+ return true;
+ }
+}
+
+/* (RPAREN | VOID RPAREN | parameter (COMMA parameter)* RPAREN) (block | SEMICOLON) */
+bool Parser::functionDeclarationEnd(Position start,
+ DSLModifiers& modifiers,
+ DSLType type,
+ const Token& name) {
+ SkSTArray<8, DSLParameter> parameters;
+ Token lookahead = this->peek();
+ if (lookahead.fKind == Token::Kind::TK_RPAREN) {
+ // `()` means no parameters at all.
+ } else if (lookahead.fKind == Token::Kind::TK_IDENTIFIER && this->text(lookahead) == "void") {
+ // `(void)` also means no parameters at all.
+ this->nextToken();
+ } else {
+ for (;;) {
+ size_t paramIndex = parameters.size();
+ std::optional<DSLParameter> parameter = this->parameter(paramIndex);
+ if (!parameter) {
+ return false;
+ }
+ parameters.push_back(std::move(*parameter));
+ if (!this->checkNext(Token::Kind::TK_COMMA)) {
+ break;
+ }
+ }
+ }
+ if (!this->expect(Token::Kind::TK_RPAREN, "')'")) {
+ return false;
+ }
+ SkSTArray<8, DSLParameter*> parameterPointers;
+ parameterPointers.reserve_back(parameters.size());
+ for (DSLParameter& param : parameters) {
+ parameterPointers.push_back(&param);
+ }
+
+ DSLFunction result(this->text(name), modifiers, type, parameterPointers,
+ this->rangeFrom(start));
+
+ const bool hasFunctionBody = !this->checkNext(Token::Kind::TK_SEMICOLON);
+ if (hasFunctionBody) {
+ AutoSymbolTable symbols(this);
+ for (DSLParameter* var : parameterPointers) {
+ if (!var->name().empty()) {
+ this->addToSymbolTable(*var);
+ }
+ }
+ Token bodyStart = this->peek();
+ std::optional<DSLBlock> body = this->block();
+ if (!body) {
+ return false;
+ }
+ result.define(std::move(*body), this->rangeFrom(bodyStart));
+ } else {
+ result.prototype();
+ }
+ return true;
+}
+
+bool Parser::arraySize(SKSL_INT* outResult) {
+ // Start out with a safe value that won't generate any errors downstream
+ *outResult = 1;
+ Token next = this->peek();
+ if (next.fKind == Token::Kind::TK_RBRACKET) {
+ this->error(this->position(next), "unsized arrays are not permitted here");
+ return true;
+ }
+ DSLExpression sizeExpr = this->expression();
+ if (!sizeExpr.hasValue()) {
+ return false;
+ }
+ if (sizeExpr.isValid()) {
+ std::unique_ptr<SkSL::Expression> sizeLiteral = sizeExpr.release();
+ SKSL_INT size;
+ if (!ConstantFolder::GetConstantInt(*sizeLiteral, &size)) {
+ this->error(sizeLiteral->fPosition, "array size must be an integer");
+ return true;
+ }
+ if (size > INT32_MAX) {
+ this->error(sizeLiteral->fPosition, "array size out of bounds");
+ return true;
+ }
+ if (size <= 0) {
+ this->error(sizeLiteral->fPosition, "array size must be positive");
+ return true;
+ }
+ // Now that we've validated it, output the real value
+ *outResult = size;
+ }
+ return true;
+}
+
+bool Parser::parseArrayDimensions(Position pos, DSLType* type) {
+ Token next;
+ while (this->checkNext(Token::Kind::TK_LBRACKET, &next)) {
+ if (this->checkNext(Token::Kind::TK_RBRACKET)) {
+ if (this->allowUnsizedArrays()) {
+ *type = UnsizedArray(*type, this->rangeFrom(pos));
+ } else {
+ this->error(this->rangeFrom(pos), "unsized arrays are not permitted here");
+ }
+ } else {
+ SKSL_INT size;
+ if (!this->arraySize(&size)) {
+ return false;
+ }
+ if (!this->expect(Token::Kind::TK_RBRACKET, "']'")) {
+ return false;
+ }
+ *type = Array(*type, size, this->rangeFrom(pos));
+ }
+ }
+ return true;
+}
+
+bool Parser::parseInitializer(Position pos, DSLExpression* initializer) {
+ if (this->checkNext(Token::Kind::TK_EQ)) {
+ DSLExpression value = this->assignmentExpression();
+ if (!value.hasValue()) {
+ return false;
+ }
+ initializer->swap(value);
+ }
+ return true;
+}
+
+/* (LBRACKET expression? RBRACKET)* (EQ assignmentExpression)? (COMMA IDENTIFER
+ (LBRACKET expression? RBRACKET)* (EQ assignmentExpression)?)* SEMICOLON */
+void Parser::globalVarDeclarationEnd(Position pos,
+ const dsl::DSLModifiers& mods,
+ dsl::DSLType baseType,
+ Token name) {
+ using namespace dsl;
+ DSLType type = baseType;
+ DSLExpression initializer;
+ if (!this->parseArrayDimensions(pos, &type)) {
+ return;
+ }
+ if (!this->parseInitializer(pos, &initializer)) {
+ return;
+ }
+ DSLGlobalVar first(mods, type, this->text(name), std::move(initializer), this->rangeFrom(pos),
+ this->position(name));
+ Declare(first);
+ this->addToSymbolTable(first);
+
+ while (this->checkNext(Token::Kind::TK_COMMA)) {
+ type = baseType;
+ Token identifierName;
+ if (!this->expectIdentifier(&identifierName)) {
+ return;
+ }
+ if (!this->parseArrayDimensions(pos, &type)) {
+ return;
+ }
+ DSLExpression anotherInitializer;
+ if (!this->parseInitializer(pos, &anotherInitializer)) {
+ return;
+ }
+ DSLGlobalVar next(mods, type, this->text(identifierName), std::move(anotherInitializer),
+ this->rangeFrom(identifierName));
+ Declare(next);
+ this->addToSymbolTable(next, this->position(identifierName));
+ }
+ this->expect(Token::Kind::TK_SEMICOLON, "';'");
+}
+
+/* (LBRACKET expression? RBRACKET)* (EQ assignmentExpression)? (COMMA IDENTIFER
+ (LBRACKET expression? RBRACKET)* (EQ assignmentExpression)?)* SEMICOLON */
+DSLStatement Parser::localVarDeclarationEnd(Position pos,
+ const dsl::DSLModifiers& mods,
+ dsl::DSLType baseType,
+ Token name) {
+ using namespace dsl;
+ DSLType type = baseType;
+ DSLExpression initializer;
+ if (!this->parseArrayDimensions(pos, &type)) {
+ return {};
+ }
+ if (!this->parseInitializer(pos, &initializer)) {
+ return {};
+ }
+ DSLVar first(mods, type, this->text(name), std::move(initializer), this->rangeFrom(pos),
+ this->position(name));
+ DSLStatement result = Declare(first);
+ this->addToSymbolTable(first);
+
+ while (this->checkNext(Token::Kind::TK_COMMA)) {
+ type = baseType;
+ Token identifierName;
+ if (!this->expectIdentifier(&identifierName)) {
+ return result;
+ }
+ if (!this->parseArrayDimensions(pos, &type)) {
+ return result;
+ }
+ DSLExpression anotherInitializer;
+ if (!this->parseInitializer(pos, &anotherInitializer)) {
+ return result;
+ }
+ DSLVar next(mods, type, this->text(identifierName), std::move(anotherInitializer),
+ this->rangeFrom(identifierName), this->position(identifierName));
+ DSLWriter::AddVarDeclaration(result, next);
+ this->addToSymbolTable(next, this->position(identifierName));
+ }
+ this->expect(Token::Kind::TK_SEMICOLON, "';'");
+ result.setPosition(this->rangeFrom(pos));
+ return result;
+}
+
+/* (varDeclarations | expressionStatement) */
+DSLStatement Parser::varDeclarationsOrExpressionStatement() {
+ Token nextToken = this->peek();
+ if (nextToken.fKind == Token::Kind::TK_CONST) {
+ // Statements that begin with `const` might be variable declarations, but can't be legal
+ // SkSL expression-statements. (SkSL constructors don't take a `const` modifier.)
+ return this->varDeclarations();
+ }
+
+ if (nextToken.fKind == Token::Kind::TK_HIGHP ||
+ nextToken.fKind == Token::Kind::TK_MEDIUMP ||
+ nextToken.fKind == Token::Kind::TK_LOWP ||
+ this->symbolTable()->isType(this->text(nextToken))) {
+ // Statements that begin with a typename are most often variable declarations, but
+ // occasionally the type is part of a constructor, and these are actually expression-
+ // statements in disguise. First, attempt the common case: parse it as a vardecl.
+ Checkpoint checkpoint(this);
+ VarDeclarationsPrefix prefix;
+ if (this->varDeclarationsPrefix(&prefix)) {
+ checkpoint.accept();
+ return this->localVarDeclarationEnd(prefix.fPosition, prefix.fModifiers, prefix.fType,
+ prefix.fName);
+ }
+
+ // If this statement wasn't actually a vardecl after all, rewind and try parsing it as an
+ // expression-statement instead.
+ checkpoint.rewind();
+ }
+ return this->expressionStatement();
+}
+
+// Helper function for varDeclarations(). If this function succeeds, we assume that the rest of the
+// statement is a variable-declaration statement, not an expression-statement.
+bool Parser::varDeclarationsPrefix(VarDeclarationsPrefix* prefixData) {
+ prefixData->fPosition = this->position(this->peek());
+ prefixData->fModifiers = this->modifiers();
+ prefixData->fType = this->type(&prefixData->fModifiers);
+ if (!prefixData->fType.hasValue()) {
+ return false;
+ }
+ return this->expectIdentifier(&prefixData->fName);
+}
+
+/* modifiers type IDENTIFIER varDeclarationEnd */
+DSLStatement Parser::varDeclarations() {
+ VarDeclarationsPrefix prefix;
+ if (!this->varDeclarationsPrefix(&prefix)) {
+ return {};
+ }
+ return this->localVarDeclarationEnd(prefix.fPosition, prefix.fModifiers, prefix.fType,
+ prefix.fName);
+}
+
+/* STRUCT IDENTIFIER LBRACE varDeclaration* RBRACE */
+DSLType Parser::structDeclaration() {
+ Position start = this->position(this->peek());
+ if (!this->expect(Token::Kind::TK_STRUCT, "'struct'")) {
+ return DSLType(nullptr);
+ }
+ Token name;
+ if (!this->expectIdentifier(&name)) {
+ return DSLType(nullptr);
+ }
+ if (!this->expect(Token::Kind::TK_LBRACE, "'{'")) {
+ return DSLType(nullptr);
+ }
+ AutoDepth depth(this);
+ if (!depth.increase()) {
+ return DSLType(nullptr);
+ }
+ SkTArray<DSLField> fields;
+ SkTHashSet<std::string_view> fieldNames;
+ while (!this->checkNext(Token::Kind::TK_RBRACE)) {
+ Token fieldStart = this->peek();
+ DSLModifiers modifiers = this->modifiers();
+ DSLType type = this->type(&modifiers);
+ if (!type.hasValue()) {
+ return DSLType(nullptr);
+ }
+
+ do {
+ DSLType actualType = type;
+ Token memberName;
+ if (!this->expectIdentifier(&memberName)) {
+ return DSLType(nullptr);
+ }
+
+ while (this->checkNext(Token::Kind::TK_LBRACKET)) {
+ SKSL_INT size;
+ if (!this->arraySize(&size)) {
+ return DSLType(nullptr);
+ }
+ if (!this->expect(Token::Kind::TK_RBRACKET, "']'")) {
+ return DSLType(nullptr);
+ }
+ actualType = dsl::Array(actualType, size,
+ this->rangeFrom(this->position(fieldStart)));
+ }
+
+ std::string_view nameText = this->text(memberName);
+ if (!fieldNames.contains(nameText)) {
+ fields.push_back(DSLField(modifiers,
+ std::move(actualType),
+ nameText,
+ this->rangeFrom(fieldStart)));
+ fieldNames.add(nameText);
+ } else {
+ this->error(memberName, "field '" + std::string(nameText) +
+ "' was already defined in the same struct ('" +
+ std::string(this->text(name)) + "')");
+ }
+ } while (this->checkNext(Token::Kind::TK_COMMA));
+ if (!this->expect(Token::Kind::TK_SEMICOLON, "';'")) {
+ return DSLType(nullptr);
+ }
+ }
+ if (fields.empty()) {
+ this->error(this->rangeFrom(start), "struct '" + std::string(this->text(name)) +
+ "' must contain at least one field");
+ }
+ return dsl::Struct(this->text(name), SkSpan(fields), this->rangeFrom(start));
+}
+
+/* structDeclaration ((IDENTIFIER varDeclarationEnd) | SEMICOLON) */
+SkTArray<dsl::DSLGlobalVar> Parser::structVarDeclaration(Position start,
+ const DSLModifiers& modifiers) {
+ DSLType type = this->structDeclaration();
+ if (!type.hasValue()) {
+ return {};
+ }
+ Token name;
+ if (this->checkIdentifier(&name)) {
+ this->globalVarDeclarationEnd(this->rangeFrom(name), modifiers, type, name);
+ } else {
+ this->expect(Token::Kind::TK_SEMICOLON, "';'");
+ }
+ return {};
+}
+
+/* modifiers type IDENTIFIER (LBRACKET INT_LITERAL RBRACKET)? */
+std::optional<DSLParameter> Parser::parameter(size_t paramIndex) {
+ Position pos = this->position(this->peek());
+ DSLModifiers modifiers = this->modifiers();
+ DSLType type = this->type(&modifiers);
+ if (!type.hasValue()) {
+ return std::nullopt;
+ }
+ Token name;
+ std::string_view paramText;
+ Position paramPos;
+ if (this->checkIdentifier(&name)) {
+ paramText = this->text(name);
+ paramPos = this->position(name);
+ } else {
+ paramPos = this->rangeFrom(pos);
+ }
+ if (!this->parseArrayDimensions(pos, &type)) {
+ return std::nullopt;
+ }
+ return DSLParameter(modifiers, type, paramText, this->rangeFrom(pos), paramPos);
+}
+
+/** EQ INT_LITERAL */
+int Parser::layoutInt() {
+ if (!this->expect(Token::Kind::TK_EQ, "'='")) {
+ return -1;
+ }
+ Token resultToken;
+ if (!this->expect(Token::Kind::TK_INT_LITERAL, "a non-negative integer", &resultToken)) {
+ return -1;
+ }
+ std::string_view resultFrag = this->text(resultToken);
+ SKSL_INT resultValue;
+ if (!SkSL::stoi(resultFrag, &resultValue)) {
+ this->error(resultToken, "value in layout is too large: " + std::string(resultFrag));
+ return -1;
+ }
+ return resultValue;
+}
+
+/** EQ IDENTIFIER */
+std::string_view Parser::layoutIdentifier() {
+ if (!this->expect(Token::Kind::TK_EQ, "'='")) {
+ return {};
+ }
+ Token resultToken;
+ if (!this->expectIdentifier(&resultToken)) {
+ return {};
+ }
+ return this->text(resultToken);
+}
+
+/* LAYOUT LPAREN IDENTIFIER (EQ INT_LITERAL)? (COMMA IDENTIFIER (EQ INT_LITERAL)?)* RPAREN */
+DSLLayout Parser::layout() {
+ enum class LayoutToken {
+ LOCATION,
+ OFFSET,
+ BINDING,
+ TEXTURE,
+ SAMPLER,
+ INDEX,
+ SET,
+ BUILTIN,
+ INPUT_ATTACHMENT_INDEX,
+ ORIGIN_UPPER_LEFT,
+ BLEND_SUPPORT_ALL_EQUATIONS,
+ PUSH_CONSTANT,
+ COLOR,
+ SPIRV,
+ METAL,
+ GL,
+ WGSL
+ };
+
+ using LayoutMap = SkTHashMap<std::string_view, LayoutToken>;
+ static LayoutMap* sLayoutTokens = new LayoutMap{
+ {"location", LayoutToken::LOCATION},
+ {"offset", LayoutToken::OFFSET},
+ {"binding", LayoutToken::BINDING},
+ {"texture", LayoutToken::TEXTURE},
+ {"sampler", LayoutToken::SAMPLER},
+ {"index", LayoutToken::INDEX},
+ {"set", LayoutToken::SET},
+ {"builtin", LayoutToken::BUILTIN},
+ {"input_attachment_index", LayoutToken::INPUT_ATTACHMENT_INDEX},
+ {"origin_upper_left", LayoutToken::ORIGIN_UPPER_LEFT},
+ {"blend_support_all_equations", LayoutToken::BLEND_SUPPORT_ALL_EQUATIONS},
+ {"push_constant", LayoutToken::PUSH_CONSTANT},
+ {"color", LayoutToken::COLOR},
+ {"spirv", LayoutToken::SPIRV},
+ {"metal", LayoutToken::METAL},
+ {"gl", LayoutToken::GL},
+ {"wgsl", LayoutToken::WGSL},
+ };
+
+ DSLLayout result;
+ if (this->checkNext(Token::Kind::TK_LAYOUT)) {
+ if (!this->expect(Token::Kind::TK_LPAREN, "'('")) {
+ return result;
+ }
+ for (;;) {
+ Token t = this->nextToken();
+ std::string text(this->text(t));
+ LayoutToken* found = sLayoutTokens->find(text);
+ if (found != nullptr) {
+ switch (*found) {
+ case LayoutToken::SPIRV:
+ result.spirv(this->position(t));
+ break;
+ case LayoutToken::METAL:
+ result.metal(this->position(t));
+ break;
+ case LayoutToken::GL:
+ result.gl(this->position(t));
+ break;
+ case LayoutToken::WGSL:
+ result.wgsl(this->position(t));
+ break;
+ case LayoutToken::ORIGIN_UPPER_LEFT:
+ result.originUpperLeft(this->position(t));
+ break;
+ case LayoutToken::PUSH_CONSTANT:
+ result.pushConstant(this->position(t));
+ break;
+ case LayoutToken::BLEND_SUPPORT_ALL_EQUATIONS:
+ result.blendSupportAllEquations(this->position(t));
+ break;
+ case LayoutToken::COLOR:
+ result.color(this->position(t));
+ break;
+ case LayoutToken::LOCATION:
+ result.location(this->layoutInt(), this->position(t));
+ break;
+ case LayoutToken::OFFSET:
+ result.offset(this->layoutInt(), this->position(t));
+ break;
+ case LayoutToken::BINDING:
+ result.binding(this->layoutInt(), this->position(t));
+ break;
+ case LayoutToken::INDEX:
+ result.index(this->layoutInt(), this->position(t));
+ break;
+ case LayoutToken::SET:
+ result.set(this->layoutInt(), this->position(t));
+ break;
+ case LayoutToken::TEXTURE:
+ result.texture(this->layoutInt(), this->position(t));
+ break;
+ case LayoutToken::SAMPLER:
+ result.sampler(this->layoutInt(), this->position(t));
+ break;
+ case LayoutToken::BUILTIN:
+ result.builtin(this->layoutInt(), this->position(t));
+ break;
+ case LayoutToken::INPUT_ATTACHMENT_INDEX:
+ result.inputAttachmentIndex(this->layoutInt(), this->position(t));
+ break;
+ }
+ } else {
+ this->error(t, "'" + text + "' is not a valid layout qualifier");
+ }
+ if (this->checkNext(Token::Kind::TK_RPAREN)) {
+ break;
+ }
+ if (!this->expect(Token::Kind::TK_COMMA, "','")) {
+ break;
+ }
+ }
+ }
+ return result;
+}
+
+/* layout? (UNIFORM | CONST | IN | OUT | INOUT | LOWP | MEDIUMP | HIGHP | FLAT | NOPERSPECTIVE |
+ VARYING | INLINE | WORKGROUP | READONLY | WRITEONLY | BUFFER)* */
+DSLModifiers Parser::modifiers() {
+ int start = this->peek().fOffset;
+ DSLLayout layout = this->layout();
+ Token raw = this->nextRawToken();
+ int end = raw.fOffset;
+ if (!is_whitespace(raw.fKind)) {
+ this->pushback(raw);
+ }
+ int flags = 0;
+ for (;;) {
+ int tokenFlag = parse_modifier_token(peek().fKind);
+ if (!tokenFlag) {
+ break;
+ }
+ Token modifier = this->nextToken();
+ if (int duplicateFlags = (tokenFlag & flags)) {
+ this->error(modifier, "'" + Modifiers::DescribeFlags(duplicateFlags) +
+ "' appears more than once");
+ }
+ flags |= tokenFlag;
+ end = this->position(modifier).endOffset();
+ }
+ return DSLModifiers(std::move(layout), flags, Position::Range(start, end));
+}
+
+/* ifStatement | forStatement | doStatement | whileStatement | block | expression */
+DSLStatement Parser::statement() {
+ Token start = this->nextToken();
+ AutoDepth depth(this);
+ if (!depth.increase()) {
+ return {};
+ }
+ this->pushback(start);
+ switch (start.fKind) {
+ case Token::Kind::TK_IF:
+ return this->ifStatement();
+ case Token::Kind::TK_FOR:
+ return this->forStatement();
+ case Token::Kind::TK_DO:
+ return this->doStatement();
+ case Token::Kind::TK_WHILE:
+ return this->whileStatement();
+ case Token::Kind::TK_SWITCH:
+ return this->switchStatement();
+ case Token::Kind::TK_RETURN:
+ return this->returnStatement();
+ case Token::Kind::TK_BREAK:
+ return this->breakStatement();
+ case Token::Kind::TK_CONTINUE:
+ return this->continueStatement();
+ case Token::Kind::TK_DISCARD:
+ return this->discardStatement();
+ case Token::Kind::TK_LBRACE: {
+ std::optional<DSLBlock> result = this->block();
+ return result ? DSLStatement(std::move(*result)) : DSLStatement();
+ }
+ case Token::Kind::TK_SEMICOLON:
+ this->nextToken();
+ return DSLBlock();
+ case Token::Kind::TK_HIGHP:
+ case Token::Kind::TK_MEDIUMP:
+ case Token::Kind::TK_LOWP:
+ case Token::Kind::TK_CONST:
+ case Token::Kind::TK_IDENTIFIER:
+ return this->varDeclarationsOrExpressionStatement();
+ default:
+ return this->expressionStatement();
+ }
+}
+
+/* IDENTIFIER(type) (LBRACKET intLiteral? RBRACKET)* QUESTION? */
+DSLType Parser::type(DSLModifiers* modifiers) {
+ Token type;
+ if (!this->expect(Token::Kind::TK_IDENTIFIER, "a type", &type)) {
+ return DSLType(nullptr);
+ }
+ if (!this->symbolTable()->isType(this->text(type))) {
+ this->error(type, "no type named '" + std::string(this->text(type)) + "'");
+ return DSLType::Invalid();
+ }
+ DSLType result(this->text(type), modifiers, this->position(type));
+ if (result.isInterfaceBlock()) {
+ // SkSL puts interface blocks into the symbol table, but they aren't general-purpose types;
+ // you can't use them to declare a variable type or a function return type.
+ this->error(type, "expected a type, found '" + std::string(this->text(type)) + "'");
+ return DSLType::Invalid();
+ }
+ Token bracket;
+ while (this->checkNext(Token::Kind::TK_LBRACKET, &bracket)) {
+ if (this->checkNext(Token::Kind::TK_RBRACKET)) {
+ if (this->allowUnsizedArrays()) {
+ result = UnsizedArray(result, this->rangeFrom(type));
+ } else {
+ this->error(this->rangeFrom(bracket), "unsized arrays are not permitted here");
+ }
+ } else {
+ SKSL_INT size;
+ if (!this->arraySize(&size)) {
+ return DSLType(nullptr);
+ }
+ this->expect(Token::Kind::TK_RBRACKET, "']'");
+ result = Array(result, size, this->rangeFrom(type));
+ }
+ }
+ return result;
+}
+
+/* IDENTIFIER LBRACE
+ varDeclaration+
+ RBRACE (IDENTIFIER (LBRACKET expression RBRACKET)*)? SEMICOLON */
+bool Parser::interfaceBlock(const dsl::DSLModifiers& modifiers) {
+ Token typeName;
+ if (!this->expectIdentifier(&typeName)) {
+ return false;
+ }
+ if (this->peek().fKind != Token::Kind::TK_LBRACE) {
+ // we only get into interfaceBlock if we found a top-level identifier which was not a type.
+ // 99% of the time, the user was not actually intending to create an interface block, so
+ // it's better to report it as an unknown type
+ this->error(typeName, "no type named '" + std::string(this->text(typeName)) + "'");
+ return false;
+ }
+ this->nextToken();
+ SkTArray<DSLField> fields;
+ SkTHashSet<std::string_view> fieldNames;
+ while (!this->checkNext(Token::Kind::TK_RBRACE)) {
+ Position fieldPos = this->position(this->peek());
+ DSLModifiers fieldModifiers = this->modifiers();
+ DSLType type = this->type(&fieldModifiers);
+ if (!type.hasValue()) {
+ return false;
+ }
+ do {
+ Token fieldName;
+ if (!this->expectIdentifier(&fieldName)) {
+ return false;
+ }
+ DSLType actualType = type;
+ if (this->checkNext(Token::Kind::TK_LBRACKET)) {
+ Token sizeToken = this->peek();
+ if (sizeToken.fKind != Token::Kind::TK_RBRACKET) {
+ SKSL_INT size;
+ if (!this->arraySize(&size)) {
+ return false;
+ }
+ actualType = Array(std::move(actualType), size, this->position(typeName));
+ } else if (this->allowUnsizedArrays()) {
+ actualType = UnsizedArray(std::move(actualType), this->position(typeName));
+ } else {
+ this->error(sizeToken, "unsized arrays are not permitted here");
+ }
+ this->expect(Token::Kind::TK_RBRACKET, "']'");
+ }
+ if (!this->expect(Token::Kind::TK_SEMICOLON, "';'")) {
+ return false;
+ }
+
+ std::string_view nameText = this->text(fieldName);
+ if (!fieldNames.contains(nameText)) {
+ fields.push_back(DSLField(fieldModifiers,
+ std::move(actualType),
+ nameText,
+ this->rangeFrom(fieldPos)));
+ fieldNames.add(nameText);
+ } else {
+ this->error(fieldName, "field '" + std::string(nameText) +
+ "' was already defined in the same interface block ('" +
+ std::string(this->text(typeName)) + "')");
+ }
+ } while (this->checkNext(Token::Kind::TK_COMMA));
+ }
+ if (fields.empty()) {
+ this->error(this->rangeFrom(typeName), "interface block '" +
+ std::string(this->text(typeName)) + "' must contain at least one member");
+ }
+ std::string_view instanceName;
+ Token instanceNameToken;
+ SKSL_INT size = 0;
+ if (this->checkIdentifier(&instanceNameToken)) {
+ instanceName = this->text(instanceNameToken);
+ if (this->checkNext(Token::Kind::TK_LBRACKET)) {
+ if (!this->arraySize(&size)) {
+ return false;
+ }
+ this->expect(Token::Kind::TK_RBRACKET, "']'");
+ }
+ }
+ if (!fields.empty()) {
+ dsl::InterfaceBlock(modifiers, this->text(typeName), std::move(fields), instanceName,
+ size, this->position(typeName));
+ }
+ this->expect(Token::Kind::TK_SEMICOLON, "';'");
+ return true;
+}
+
+/* IF LPAREN expression RPAREN statement (ELSE statement)? */
+DSLStatement Parser::ifStatement() {
+ Token start;
+ if (!this->expect(Token::Kind::TK_IF, "'if'", &start)) {
+ return {};
+ }
+ if (!this->expect(Token::Kind::TK_LPAREN, "'('")) {
+ return {};
+ }
+ DSLExpression test = this->expression();
+ if (!test.hasValue()) {
+ return {};
+ }
+ if (!this->expect(Token::Kind::TK_RPAREN, "')'")) {
+ return {};
+ }
+ DSLStatement ifTrue = this->statement();
+ if (!ifTrue.hasValue()) {
+ return {};
+ }
+ DSLStatement ifFalse;
+ if (this->checkNext(Token::Kind::TK_ELSE)) {
+ ifFalse = this->statement();
+ if (!ifFalse.hasValue()) {
+ return {};
+ }
+ }
+ Position pos = this->rangeFrom(start);
+ return If(std::move(test), std::move(ifTrue),
+ ifFalse.hasValue() ? std::move(ifFalse) : DSLStatement(), pos);
+}
+
+/* DO statement WHILE LPAREN expression RPAREN SEMICOLON */
+DSLStatement Parser::doStatement() {
+ Token start;
+ if (!this->expect(Token::Kind::TK_DO, "'do'", &start)) {
+ return {};
+ }
+ DSLStatement statement = this->statement();
+ if (!statement.hasValue()) {
+ return {};
+ }
+ if (!this->expect(Token::Kind::TK_WHILE, "'while'")) {
+ return {};
+ }
+ if (!this->expect(Token::Kind::TK_LPAREN, "'('")) {
+ return {};
+ }
+ DSLExpression test = this->expression();
+ if (!test.hasValue()) {
+ return {};
+ }
+ if (!this->expect(Token::Kind::TK_RPAREN, "')'")) {
+ return {};
+ }
+ if (!this->expect(Token::Kind::TK_SEMICOLON, "';'")) {
+ return {};
+ }
+ return Do(std::move(statement), std::move(test), this->rangeFrom(start));
+}
+
+/* WHILE LPAREN expression RPAREN STATEMENT */
+DSLStatement Parser::whileStatement() {
+ Token start;
+ if (!this->expect(Token::Kind::TK_WHILE, "'while'", &start)) {
+ return {};
+ }
+ if (!this->expect(Token::Kind::TK_LPAREN, "'('")) {
+ return {};
+ }
+ DSLExpression test = this->expression();
+ if (!test.hasValue()) {
+ return {};
+ }
+ if (!this->expect(Token::Kind::TK_RPAREN, "')'")) {
+ return {};
+ }
+ DSLStatement statement = this->statement();
+ if (!statement.hasValue()) {
+ return {};
+ }
+ return While(std::move(test), std::move(statement), this->rangeFrom(start));
+}
+
+/* CASE expression COLON statement* */
+std::optional<DSLCase> Parser::switchCase() {
+ Token start;
+ if (!this->expect(Token::Kind::TK_CASE, "'case'", &start)) {
+ return {};
+ }
+ DSLExpression value = this->expression();
+ if (!value.hasValue()) {
+ return {};
+ }
+ if (!this->expect(Token::Kind::TK_COLON, "':'")) {
+ return {};
+ }
+ SkTArray<DSLStatement> statements;
+ while (this->peek().fKind != Token::Kind::TK_RBRACE &&
+ this->peek().fKind != Token::Kind::TK_CASE &&
+ this->peek().fKind != Token::Kind::TK_DEFAULT) {
+ DSLStatement s = this->statement();
+ if (!s.hasValue()) {
+ return {};
+ }
+ statements.push_back(std::move(s));
+ }
+ return DSLCase(std::move(value), std::move(statements));
+}
+
+/* SWITCH LPAREN expression RPAREN LBRACE switchCase* (DEFAULT COLON statement*)? RBRACE */
+DSLStatement Parser::switchStatement() {
+ Token start;
+ if (!this->expect(Token::Kind::TK_SWITCH, "'switch'", &start)) {
+ return {};
+ }
+ if (!this->expect(Token::Kind::TK_LPAREN, "'('")) {
+ return {};
+ }
+ DSLExpression value = this->expression();
+ if (!value.hasValue()) {
+ return {};
+ }
+ if (!this->expect(Token::Kind::TK_RPAREN, "')'")) {
+ return {};
+ }
+ if (!this->expect(Token::Kind::TK_LBRACE, "'{'")) {
+ return {};
+ }
+ SkTArray<DSLCase> cases;
+ while (this->peek().fKind == Token::Kind::TK_CASE) {
+ std::optional<DSLCase> c = this->switchCase();
+ if (!c) {
+ return {};
+ }
+ cases.push_back(std::move(*c));
+ }
+ // Requiring default: to be last (in defiance of C and GLSL) was a deliberate decision. Other
+ // parts of the compiler may rely upon this assumption.
+ if (this->peek().fKind == Token::Kind::TK_DEFAULT) {
+ SkTArray<DSLStatement> statements;
+ Token defaultStart;
+ SkAssertResult(this->expect(Token::Kind::TK_DEFAULT, "'default'", &defaultStart));
+ if (!this->expect(Token::Kind::TK_COLON, "':'")) {
+ return {};
+ }
+ while (this->peek().fKind != Token::Kind::TK_RBRACE) {
+ DSLStatement s = this->statement();
+ if (!s.hasValue()) {
+ return {};
+ }
+ statements.push_back(std::move(s));
+ }
+ cases.push_back(DSLCase(DSLExpression(), std::move(statements), this->position(start)));
+ }
+ if (!this->expect(Token::Kind::TK_RBRACE, "'}'")) {
+ return {};
+ }
+ Position pos = this->rangeFrom(start);
+ return Switch(std::move(value), std::move(cases), pos);
+}
+
+static Position range_of_at_least_one_char(int start, int end) {
+ return Position::Range(start, std::max(end, start + 1));
+}
+
+/* FOR LPAREN (declaration | expression)? SEMICOLON expression? SEMICOLON expression? RPAREN
+ STATEMENT */
+dsl::DSLStatement Parser::forStatement() {
+ Token start;
+ if (!this->expect(Token::Kind::TK_FOR, "'for'", &start)) {
+ return {};
+ }
+ Token lparen;
+ if (!this->expect(Token::Kind::TK_LPAREN, "'('", &lparen)) {
+ return {};
+ }
+ AutoSymbolTable symbols(this);
+ dsl::DSLStatement initializer;
+ Token nextToken = this->peek();
+ int firstSemicolonOffset;
+ if (nextToken.fKind == Token::Kind::TK_SEMICOLON) {
+ // An empty init-statement.
+ firstSemicolonOffset = this->nextToken().fOffset;
+ } else {
+ // The init-statement must be an expression or variable declaration.
+ initializer = this->varDeclarationsOrExpressionStatement();
+ if (!initializer.hasValue()) {
+ return {};
+ }
+ firstSemicolonOffset = fLexer.getCheckpoint().fOffset - 1;
+ }
+ dsl::DSLExpression test;
+ if (this->peek().fKind != Token::Kind::TK_SEMICOLON) {
+ dsl::DSLExpression testValue = this->expression();
+ if (!testValue.hasValue()) {
+ return {};
+ }
+ test.swap(testValue);
+ }
+ Token secondSemicolon;
+ if (!this->expect(Token::Kind::TK_SEMICOLON, "';'", &secondSemicolon)) {
+ return {};
+ }
+ dsl::DSLExpression next;
+ if (this->peek().fKind != Token::Kind::TK_RPAREN) {
+ dsl::DSLExpression nextValue = this->expression();
+ if (!nextValue.hasValue()) {
+ return {};
+ }
+ next.swap(nextValue);
+ }
+ Token rparen;
+ if (!this->expect(Token::Kind::TK_RPAREN, "')'", &rparen)) {
+ return {};
+ }
+ dsl::DSLStatement statement = this->statement();
+ if (!statement.hasValue()) {
+ return {};
+ }
+ return For(initializer.hasValue() ? std::move(initializer) : DSLStatement(),
+ test.hasValue() ? std::move(test) : DSLExpression(),
+ next.hasValue() ? std::move(next) : DSLExpression(),
+ std::move(statement),
+ this->rangeFrom(start),
+ ForLoopPositions{
+ range_of_at_least_one_char(lparen.fOffset + 1, firstSemicolonOffset),
+ range_of_at_least_one_char(firstSemicolonOffset + 1, secondSemicolon.fOffset),
+ range_of_at_least_one_char(secondSemicolon.fOffset + 1, rparen.fOffset)
+ });
+}
+
+/* RETURN expression? SEMICOLON */
+DSLStatement Parser::returnStatement() {
+ Token start;
+ if (!this->expect(Token::Kind::TK_RETURN, "'return'", &start)) {
+ return {};
+ }
+ DSLExpression expression;
+ if (this->peek().fKind != Token::Kind::TK_SEMICOLON) {
+ DSLExpression next = this->expression();
+ if (!next.hasValue()) {
+ return {};
+ }
+ expression.swap(next);
+ }
+ if (!this->expect(Token::Kind::TK_SEMICOLON, "';'")) {
+ return {};
+ }
+ return Return(expression.hasValue() ? std::move(expression) : DSLExpression(),
+ this->rangeFrom(start));
+}
+
+/* BREAK SEMICOLON */
+DSLStatement Parser::breakStatement() {
+ Token start;
+ if (!this->expect(Token::Kind::TK_BREAK, "'break'", &start)) {
+ return {};
+ }
+ if (!this->expect(Token::Kind::TK_SEMICOLON, "';'")) {
+ return {};
+ }
+ return Break(this->position(start));
+}
+
+/* CONTINUE SEMICOLON */
+DSLStatement Parser::continueStatement() {
+ Token start;
+ if (!this->expect(Token::Kind::TK_CONTINUE, "'continue'", &start)) {
+ return {};
+ }
+ if (!this->expect(Token::Kind::TK_SEMICOLON, "';'")) {
+ return {};
+ }
+ return Continue(this->position(start));
+}
+
+/* DISCARD SEMICOLON */
+DSLStatement Parser::discardStatement() {
+ Token start;
+ if (!this->expect(Token::Kind::TK_DISCARD, "'continue'", &start)) {
+ return {};
+ }
+ if (!this->expect(Token::Kind::TK_SEMICOLON, "';'")) {
+ return {};
+ }
+ return Discard(this->position(start));
+}
+
+/* LBRACE statement* RBRACE */
+std::optional<DSLBlock> Parser::block() {
+ Token start;
+ if (!this->expect(Token::Kind::TK_LBRACE, "'{'", &start)) {
+ return std::nullopt;
+ }
+ AutoDepth depth(this);
+ if (!depth.increase()) {
+ return std::nullopt;
+ }
+ AutoSymbolTable symbols(this);
+ StatementArray statements;
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::Kind::TK_RBRACE:
+ this->nextToken();
+ return DSLBlock(std::move(statements), this->symbolTable(), this->rangeFrom(start));
+ case Token::Kind::TK_END_OF_FILE:
+ this->error(this->peek(), "expected '}', but found end of file");
+ return std::nullopt;
+ default: {
+ DSLStatement statement = this->statement();
+ if (fEncounteredFatalError) {
+ return std::nullopt;
+ }
+ if (statement.hasValue()) {
+ statements.push_back(statement.release());
+ }
+ break;
+ }
+ }
+ }
+}
+
+/* expression SEMICOLON */
+DSLStatement Parser::expressionStatement() {
+ DSLExpression expr = this->expression();
+ if (expr.hasValue()) {
+ if (!this->expect(Token::Kind::TK_SEMICOLON, "';'")) {
+ return {};
+ }
+ return DSLStatement(std::move(expr));
+ }
+ return {};
+}
+
+bool Parser::operatorRight(Parser::AutoDepth& depth,
+ Operator::Kind op,
+ BinaryParseFn rightFn,
+ DSLExpression& result) {
+ this->nextToken();
+ if (!depth.increase()) {
+ return false;
+ }
+ DSLExpression right = (this->*rightFn)();
+ if (!right.hasValue()) {
+ return false;
+ }
+ Position pos = result.position().rangeThrough(right.position());
+ DSLExpression next = result.binary(op, std::move(right), pos);
+ result.swap(next);
+ return true;
+}
+
+/* assignmentExpression (COMMA assignmentExpression)* */
+DSLExpression Parser::expression() {
+ [[maybe_unused]] Token start = this->peek();
+ DSLExpression result = this->assignmentExpression();
+ if (!result.hasValue()) {
+ return {};
+ }
+ Token t;
+ AutoDepth depth(this);
+ while (this->peek().fKind == Token::Kind::TK_COMMA) {
+ if (!operatorRight(depth, Operator::Kind::COMMA, &Parser::assignmentExpression,
+ result)) {
+ return {};
+ }
+ }
+ SkASSERTF(result.position().valid(), "Expression %s has invalid position",
+ result.description().c_str());
+ SkASSERTF(result.position().startOffset() == this->position(start).startOffset(),
+ "Expected %s to start at %d (first token: '%.*s'), but it has range %d-%d\n",
+ result.description().c_str(), this->position(start).startOffset(),
+ (int)this->text(start).length(), this->text(start).data(),
+ result.position().startOffset(), result.position().endOffset());
+ return result;
+}
+
+/* ternaryExpression ((EQEQ | STAREQ | SLASHEQ | PERCENTEQ | PLUSEQ | MINUSEQ | SHLEQ | SHREQ |
+ BITWISEANDEQ | BITWISEXOREQ | BITWISEOREQ | LOGICALANDEQ | LOGICALXOREQ | LOGICALOREQ)
+ assignmentExpression)*
+ */
+DSLExpression Parser::assignmentExpression() {
+ AutoDepth depth(this);
+ DSLExpression result = this->ternaryExpression();
+ if (!result.hasValue()) {
+ return {};
+ }
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::Kind::TK_EQ:
+ if (!operatorRight(depth, Operator::Kind::EQ, &Parser::assignmentExpression,
+ result)) {
+ return {};
+ }
+ break;
+ case Token::Kind::TK_STAREQ:
+ if (!operatorRight(depth, Operator::Kind::STAREQ, &Parser::assignmentExpression,
+ result)) {
+ return {};
+ }
+ break;
+ case Token::Kind::TK_SLASHEQ:
+ if (!operatorRight(depth, Operator::Kind::SLASHEQ, &Parser::assignmentExpression,
+ result)) {
+ return {};
+ }
+ break;
+ case Token::Kind::TK_PERCENTEQ:
+ if (!operatorRight(depth, Operator::Kind::PERCENTEQ,
+ &Parser::assignmentExpression, result)) {
+ return {};
+ }
+ break;
+ case Token::Kind::TK_PLUSEQ:
+ if (!operatorRight(depth, Operator::Kind::PLUSEQ, &Parser::assignmentExpression,
+ result)) {
+ return {};
+ }
+ break;
+ case Token::Kind::TK_MINUSEQ:
+ if (!operatorRight(depth, Operator::Kind::MINUSEQ, &Parser::assignmentExpression,
+ result)) {
+ return {};
+ }
+ break;
+ case Token::Kind::TK_SHLEQ:
+ if (!operatorRight(depth, Operator::Kind::SHLEQ, &Parser::assignmentExpression,
+ result)) {
+ return {};
+ }
+ break;
+ case Token::Kind::TK_SHREQ:
+ if (!operatorRight(depth, Operator::Kind::SHREQ, &Parser::assignmentExpression,
+ result)) {
+ return {};
+ }
+ break;
+ case Token::Kind::TK_BITWISEANDEQ:
+ if (!operatorRight(depth, Operator::Kind::BITWISEANDEQ,
+ &Parser::assignmentExpression, result)) {
+ return {};
+ }
+ break;
+ case Token::Kind::TK_BITWISEXOREQ:
+ if (!operatorRight(depth, Operator::Kind::BITWISEXOREQ,
+ &Parser::assignmentExpression, result)) {
+ return {};
+ }
+ break;
+ case Token::Kind::TK_BITWISEOREQ:
+ if (!operatorRight(depth, Operator::Kind::BITWISEOREQ,
+ &Parser::assignmentExpression, result)) {
+ return {};
+ }
+ break;
+ default:
+ return result;
+ }
+ }
+}
+
+/* logicalOrExpression ('?' expression ':' assignmentExpression)? */
+DSLExpression Parser::ternaryExpression() {
+ DSLExpression base = this->logicalOrExpression();
+ if (!base.hasValue()) {
+ return {};
+ }
+ if (!this->checkNext(Token::Kind::TK_QUESTION)) {
+ return base;
+ }
+ AutoDepth depth(this);
+ if (!depth.increase()) {
+ return {};
+ }
+ DSLExpression trueExpr = this->expression();
+ if (!trueExpr.hasValue()) {
+ return {};
+ }
+ if (!this->expect(Token::Kind::TK_COLON, "':'")) {
+ return {};
+ }
+ DSLExpression falseExpr = this->assignmentExpression();
+ if (!falseExpr.hasValue()) {
+ return {};
+ }
+ Position pos = base.position().rangeThrough(falseExpr.position());
+ return Select(std::move(base), std::move(trueExpr), std::move(falseExpr), pos);
+}
+
+/* logicalXorExpression (LOGICALOR logicalXorExpression)* */
+DSLExpression Parser::logicalOrExpression() {
+ AutoDepth depth(this);
+ DSLExpression result = this->logicalXorExpression();
+ if (!result.hasValue()) {
+ return {};
+ }
+ while (this->peek().fKind == Token::Kind::TK_LOGICALOR) {
+ if (!operatorRight(depth, Operator::Kind::LOGICALOR, &Parser::logicalXorExpression,
+ result)) {
+ return {};
+ }
+ }
+ return result;
+}
+
+/* logicalAndExpression (LOGICALXOR logicalAndExpression)* */
+DSLExpression Parser::logicalXorExpression() {
+ AutoDepth depth(this);
+ DSLExpression result = this->logicalAndExpression();
+ if (!result.hasValue()) {
+ return {};
+ }
+ while (this->peek().fKind == Token::Kind::TK_LOGICALXOR) {
+ if (!operatorRight(depth, Operator::Kind::LOGICALXOR, &Parser::logicalAndExpression,
+ result)) {
+ return {};
+ }
+ }
+ return result;
+}
+
+/* bitwiseOrExpression (LOGICALAND bitwiseOrExpression)* */
+DSLExpression Parser::logicalAndExpression() {
+ AutoDepth depth(this);
+ DSLExpression result = this->bitwiseOrExpression();
+ if (!result.hasValue()) {
+ return {};
+ }
+ while (this->peek().fKind == Token::Kind::TK_LOGICALAND) {
+ if (!operatorRight(depth, Operator::Kind::LOGICALAND, &Parser::bitwiseOrExpression,
+ result)) {
+ return {};
+ }
+ }
+ return result;
+}
+
+/* bitwiseXorExpression (BITWISEOR bitwiseXorExpression)* */
+DSLExpression Parser::bitwiseOrExpression() {
+ AutoDepth depth(this);
+ DSLExpression result = this->bitwiseXorExpression();
+ if (!result.hasValue()) {
+ return {};
+ }
+ while (this->peek().fKind == Token::Kind::TK_BITWISEOR) {
+ if (!operatorRight(depth, Operator::Kind::BITWISEOR, &Parser::bitwiseXorExpression,
+ result)) {
+ return {};
+ }
+ }
+ return result;
+}
+
+/* bitwiseAndExpression (BITWISEXOR bitwiseAndExpression)* */
+DSLExpression Parser::bitwiseXorExpression() {
+ AutoDepth depth(this);
+ DSLExpression result = this->bitwiseAndExpression();
+ if (!result.hasValue()) {
+ return {};
+ }
+ while (this->peek().fKind == Token::Kind::TK_BITWISEXOR) {
+ if (!operatorRight(depth, Operator::Kind::BITWISEXOR, &Parser::bitwiseAndExpression,
+ result)) {
+ return {};
+ }
+ }
+ return result;
+}
+
+/* equalityExpression (BITWISEAND equalityExpression)* */
+DSLExpression Parser::bitwiseAndExpression() {
+ AutoDepth depth(this);
+ DSLExpression result = this->equalityExpression();
+ if (!result.hasValue()) {
+ return {};
+ }
+ while (this->peek().fKind == Token::Kind::TK_BITWISEAND) {
+ if (!operatorRight(depth, Operator::Kind::BITWISEAND, &Parser::equalityExpression,
+ result)) {
+ return {};
+ }
+ }
+ return result;
+}
+
+/* relationalExpression ((EQEQ | NEQ) relationalExpression)* */
+DSLExpression Parser::equalityExpression() {
+ AutoDepth depth(this);
+ DSLExpression result = this->relationalExpression();
+ if (!result.hasValue()) {
+ return {};
+ }
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::Kind::TK_EQEQ:
+ if (!operatorRight(depth, Operator::Kind::EQEQ, &Parser::relationalExpression,
+ result)) {
+ return {};
+ }
+ break;
+ case Token::Kind::TK_NEQ:
+ if (!operatorRight(depth, Operator::Kind::NEQ, &Parser::relationalExpression,
+ result)) {
+ return {};
+ }
+ break;
+ default: return result;
+ }
+ }
+}
+
+/* shiftExpression ((LT | GT | LTEQ | GTEQ) shiftExpression)* */
+DSLExpression Parser::relationalExpression() {
+ AutoDepth depth(this);
+ DSLExpression result = this->shiftExpression();
+ if (!result.hasValue()) {
+ return {};
+ }
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::Kind::TK_LT:
+ if (!operatorRight(depth, Operator::Kind::LT, &Parser::shiftExpression,
+ result)) {
+ return {};
+ }
+ break;
+ case Token::Kind::TK_GT:
+ if (!operatorRight(depth, Operator::Kind::GT, &Parser::shiftExpression,
+ result)) {
+ return {};
+ }
+ break;
+ case Token::Kind::TK_LTEQ:
+ if (!operatorRight(depth, Operator::Kind::LTEQ, &Parser::shiftExpression,
+ result)) {
+ return {};
+ }
+ break;
+ case Token::Kind::TK_GTEQ:
+ if (!operatorRight(depth, Operator::Kind::GTEQ, &Parser::shiftExpression,
+ result)) {
+ return {};
+ }
+ break;
+ default:
+ return result;
+ }
+ }
+}
+
+/* additiveExpression ((SHL | SHR) additiveExpression)* */
+DSLExpression Parser::shiftExpression() {
+ AutoDepth depth(this);
+ DSLExpression result = this->additiveExpression();
+ if (!result.hasValue()) {
+ return {};
+ }
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::Kind::TK_SHL:
+ if (!operatorRight(depth, Operator::Kind::SHL, &Parser::additiveExpression,
+ result)) {
+ return {};
+ }
+ break;
+ case Token::Kind::TK_SHR:
+ if (!operatorRight(depth, Operator::Kind::SHR, &Parser::additiveExpression,
+ result)) {
+ return {};
+ }
+ break;
+ default:
+ return result;
+ }
+ }
+}
+
+/* multiplicativeExpression ((PLUS | MINUS) multiplicativeExpression)* */
+DSLExpression Parser::additiveExpression() {
+ AutoDepth depth(this);
+ DSLExpression result = this->multiplicativeExpression();
+ if (!result.hasValue()) {
+ return {};
+ }
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::Kind::TK_PLUS:
+ if (!operatorRight(depth, Operator::Kind::PLUS,
+ &Parser::multiplicativeExpression, result)) {
+ return {};
+ }
+ break;
+ case Token::Kind::TK_MINUS:
+ if (!operatorRight(depth, Operator::Kind::MINUS,
+ &Parser::multiplicativeExpression, result)) {
+ return {};
+ }
+ break;
+ default:
+ return result;
+ }
+ }
+}
+
+/* unaryExpression ((STAR | SLASH | PERCENT) unaryExpression)* */
+DSLExpression Parser::multiplicativeExpression() {
+ AutoDepth depth(this);
+ DSLExpression result = this->unaryExpression();
+ if (!result.hasValue()) {
+ return {};
+ }
+ for (;;) {
+ switch (this->peek().fKind) {
+ case Token::Kind::TK_STAR:
+ if (!operatorRight(depth, Operator::Kind::STAR, &Parser::unaryExpression,
+ result)) {
+ return {};
+ }
+ break;
+ case Token::Kind::TK_SLASH:
+ if (!operatorRight(depth, Operator::Kind::SLASH, &Parser::unaryExpression,
+ result)) {
+ return {};
+ }
+ break;
+ case Token::Kind::TK_PERCENT:
+ if (!operatorRight(depth, Operator::Kind::PERCENT, &Parser::unaryExpression,
+ result)) {
+ return {};
+ }
+ break;
+ default: return result;
+ }
+ }
+}
+
+/* postfixExpression | (PLUS | MINUS | NOT | PLUSPLUS | MINUSMINUS) unaryExpression */
+DSLExpression Parser::unaryExpression() {
+ AutoDepth depth(this);
+ Token start = this->peek();
+ switch (start.fKind) {
+ case Token::Kind::TK_PLUS:
+ case Token::Kind::TK_MINUS:
+ case Token::Kind::TK_LOGICALNOT:
+ case Token::Kind::TK_BITWISENOT:
+ case Token::Kind::TK_PLUSPLUS:
+ case Token::Kind::TK_MINUSMINUS: {
+ this->nextToken();
+ if (!depth.increase()) {
+ return {};
+ }
+ DSLExpression expr = this->unaryExpression();
+ if (!expr.hasValue()) {
+ return {};
+ }
+ Position p = Position::Range(start.fOffset, expr.position().endOffset());
+ switch (start.fKind) {
+ case Token::Kind::TK_PLUS: return expr.prefix(Operator::Kind::PLUS, p);
+ case Token::Kind::TK_MINUS: return expr.prefix(Operator::Kind::MINUS, p);
+ case Token::Kind::TK_LOGICALNOT: return expr.prefix(Operator::Kind::LOGICALNOT, p);
+ case Token::Kind::TK_BITWISENOT: return expr.prefix(Operator::Kind::BITWISENOT, p);
+ case Token::Kind::TK_PLUSPLUS: return expr.prefix(Operator::Kind::PLUSPLUS, p);
+ case Token::Kind::TK_MINUSMINUS: return expr.prefix(Operator::Kind::MINUSMINUS, p);
+ default: SkUNREACHABLE;
+ }
+ }
+ default:
+ return this->postfixExpression();
+ }
+}
+
+/* term suffix* */
+DSLExpression Parser::postfixExpression() {
+ AutoDepth depth(this);
+ DSLExpression result = this->term();
+ if (!result.hasValue()) {
+ return {};
+ }
+ for (;;) {
+ Token t = this->peek();
+ switch (t.fKind) {
+ case Token::Kind::TK_FLOAT_LITERAL:
+ if (this->text(t)[0] != '.') {
+ return result;
+ }
+ [[fallthrough]];
+ case Token::Kind::TK_LBRACKET:
+ case Token::Kind::TK_DOT:
+ case Token::Kind::TK_LPAREN:
+ case Token::Kind::TK_PLUSPLUS:
+ case Token::Kind::TK_MINUSMINUS: {
+ if (!depth.increase()) {
+ return {};
+ }
+ DSLExpression next = this->suffix(std::move(result));
+ if (!next.hasValue()) {
+ return {};
+ }
+ result.swap(next);
+ break;
+ }
+ default:
+ return result;
+ }
+ }
+}
+
+DSLExpression Parser::swizzle(Position pos,
+ DSLExpression base,
+ std::string_view swizzleMask,
+ Position maskPos) {
+ SkASSERT(swizzleMask.length() > 0);
+ if (!base.type().isVector() && !base.type().isScalar()) {
+ return base.field(swizzleMask, pos);
+ }
+ int length = swizzleMask.length();
+ SkSL::SwizzleComponent::Type components[4];
+ for (int i = 0; i < length; ++i) {
+ if (i >= 4) {
+ Position errorPos = maskPos.valid() ? Position::Range(maskPos.startOffset() + 4,
+ maskPos.endOffset())
+ : pos;
+ this->error(errorPos, "too many components in swizzle mask");
+ return DSLExpression::Poison(pos);
+ }
+ switch (swizzleMask[i]) {
+ case '0': components[i] = SwizzleComponent::ZERO; break;
+ case '1': components[i] = SwizzleComponent::ONE; break;
+ case 'r': components[i] = SwizzleComponent::R; break;
+ case 'x': components[i] = SwizzleComponent::X; break;
+ case 's': components[i] = SwizzleComponent::S; break;
+ case 'L': components[i] = SwizzleComponent::UL; break;
+ case 'g': components[i] = SwizzleComponent::G; break;
+ case 'y': components[i] = SwizzleComponent::Y; break;
+ case 't': components[i] = SwizzleComponent::T; break;
+ case 'T': components[i] = SwizzleComponent::UT; break;
+ case 'b': components[i] = SwizzleComponent::B; break;
+ case 'z': components[i] = SwizzleComponent::Z; break;
+ case 'p': components[i] = SwizzleComponent::P; break;
+ case 'R': components[i] = SwizzleComponent::UR; break;
+ case 'a': components[i] = SwizzleComponent::A; break;
+ case 'w': components[i] = SwizzleComponent::W; break;
+ case 'q': components[i] = SwizzleComponent::Q; break;
+ case 'B': components[i] = SwizzleComponent::UB; break;
+ default: {
+ Position componentPos = Position::Range(maskPos.startOffset() + i,
+ maskPos.startOffset() + i + 1);
+ this->error(componentPos, String::printf("invalid swizzle component '%c'",
+ swizzleMask[i]).c_str());
+ return DSLExpression::Poison(pos);
+ }
+ }
+ }
+ switch (length) {
+ case 1: return dsl::Swizzle(std::move(base), components[0], pos, maskPos);
+ case 2: return dsl::Swizzle(std::move(base), components[0], components[1], pos, maskPos);
+ case 3: return dsl::Swizzle(std::move(base), components[0], components[1], components[2],
+ pos, maskPos);
+ case 4: return dsl::Swizzle(std::move(base), components[0], components[1], components[2],
+ components[3], pos, maskPos);
+ default: SkUNREACHABLE;
+ }
+}
+
+dsl::DSLExpression Parser::call(Position pos, dsl::DSLExpression base, ExpressionArray args) {
+ return base(std::move(args), pos);
+}
+
+/* LBRACKET expression? RBRACKET | DOT IDENTIFIER | LPAREN arguments RPAREN |
+ PLUSPLUS | MINUSMINUS | COLONCOLON IDENTIFIER | FLOAT_LITERAL [IDENTIFIER] */
+DSLExpression Parser::suffix(DSLExpression base) {
+ Token next = this->nextToken();
+ AutoDepth depth(this);
+ if (!depth.increase()) {
+ return {};
+ }
+ switch (next.fKind) {
+ case Token::Kind::TK_LBRACKET: {
+ if (this->checkNext(Token::Kind::TK_RBRACKET)) {
+ this->error(this->rangeFrom(next), "missing index in '[]'");
+ return DSLExpression::Poison(this->rangeFrom(base.position()));
+ }
+ DSLExpression index = this->expression();
+ if (!index.hasValue()) {
+ return {};
+ }
+ this->expect(Token::Kind::TK_RBRACKET, "']' to complete array access expression");
+ return base.index(std::move(index), this->rangeFrom(base.position()));
+ }
+ case Token::Kind::TK_DOT: {
+ std::string_view text;
+ if (this->identifier(&text)) {
+ Position pos = this->rangeFrom(base.position());
+ return this->swizzle(pos, std::move(base), text,
+ this->rangeFrom(this->position(next).after()));
+ }
+ [[fallthrough]];
+ }
+ case Token::Kind::TK_FLOAT_LITERAL: {
+ // Swizzles that start with a constant number, e.g. '.000r', will be tokenized as
+ // floating point literals, possibly followed by an identifier. Handle that here.
+ std::string_view field = this->text(next);
+ SkASSERT(field[0] == '.');
+ field.remove_prefix(1);
+ // use the next *raw* token so we don't ignore whitespace - we only care about
+ // identifiers that directly follow the float
+ Position pos = this->rangeFrom(base.position());
+ Position start = this->position(next);
+ // skip past the "."
+ start = Position::Range(start.startOffset() + 1, start.endOffset());
+ Position maskPos = this->rangeFrom(start);
+ Token id = this->nextRawToken();
+ if (id.fKind == Token::Kind::TK_IDENTIFIER) {
+ pos = this->rangeFrom(base.position());
+ maskPos = this->rangeFrom(start);
+ return this->swizzle(pos, std::move(base), std::string(field) +
+ std::string(this->text(id)), maskPos);
+ } else if (field.empty()) {
+ this->error(pos, "expected field name or swizzle mask after '.'");
+ return {{DSLExpression::Poison(pos)}};
+ }
+ this->pushback(id);
+ return this->swizzle(pos, std::move(base), field, maskPos);
+ }
+ case Token::Kind::TK_LPAREN: {
+ ExpressionArray args;
+ if (this->peek().fKind != Token::Kind::TK_RPAREN) {
+ for (;;) {
+ DSLExpression expr = this->assignmentExpression();
+ if (!expr.hasValue()) {
+ return {};
+ }
+ args.push_back(expr.release());
+ if (!this->checkNext(Token::Kind::TK_COMMA)) {
+ break;
+ }
+ }
+ }
+ this->expect(Token::Kind::TK_RPAREN, "')' to complete function arguments");
+ Position pos = this->rangeFrom(base.position());
+ return this->call(pos, std::move(base), std::move(args));
+ }
+ case Token::Kind::TK_PLUSPLUS:
+ return base.postfix(Operator::Kind::PLUSPLUS, this->rangeFrom(base.position()));
+ case Token::Kind::TK_MINUSMINUS:
+ return base.postfix(Operator::Kind::MINUSMINUS, this->rangeFrom(base.position()));
+ default: {
+ this->error(next, "expected expression suffix, but found '" +
+ std::string(this->text(next)) + "'");
+ return {};
+ }
+ }
+}
+
+/* IDENTIFIER | intLiteral | floatLiteral | boolLiteral | '(' expression ')' */
+DSLExpression Parser::term() {
+ Token t = this->peek();
+ switch (t.fKind) {
+ case Token::Kind::TK_IDENTIFIER: {
+ std::string_view text;
+ if (this->identifier(&text)) {
+ Position pos = this->position(t);
+ return DSLExpression(fCompiler.convertIdentifier(pos, text), pos);
+ }
+ break;
+ }
+ case Token::Kind::TK_INT_LITERAL: {
+ SKSL_INT i;
+ if (!this->intLiteral(&i)) {
+ i = 0;
+ }
+ return DSLExpression(i, this->position(t));
+ }
+ case Token::Kind::TK_FLOAT_LITERAL: {
+ SKSL_FLOAT f;
+ if (!this->floatLiteral(&f)) {
+ f = 0.0f;
+ }
+ return DSLExpression(f, this->position(t));
+ }
+ case Token::Kind::TK_TRUE_LITERAL: // fall through
+ case Token::Kind::TK_FALSE_LITERAL: {
+ bool b;
+ SkAssertResult(this->boolLiteral(&b));
+ return DSLExpression(b, this->position(t));
+ }
+ case Token::Kind::TK_LPAREN: {
+ this->nextToken();
+ AutoDepth depth(this);
+ if (!depth.increase()) {
+ return {};
+ }
+ DSLExpression result = this->expression();
+ if (result.hasValue()) {
+ this->expect(Token::Kind::TK_RPAREN, "')' to complete expression");
+ result.setPosition(this->rangeFrom(this->position(t)));
+ return result;
+ }
+ break;
+ }
+ default:
+ this->nextToken();
+ this->error(t, "expected expression, but found '" + std::string(this->text(t)) + "'");
+ fEncounteredFatalError = true;
+ break;
+ }
+ return {};
+}
+
+/* INT_LITERAL */
+bool Parser::intLiteral(SKSL_INT* dest) {
+ Token t;
+ if (!this->expect(Token::Kind::TK_INT_LITERAL, "integer literal", &t)) {
+ return false;
+ }
+ std::string_view s = this->text(t);
+ if (!SkSL::stoi(s, dest)) {
+ this->error(t, "integer is too large: " + std::string(s));
+ return false;
+ }
+ return true;
+}
+
+/* FLOAT_LITERAL */
+bool Parser::floatLiteral(SKSL_FLOAT* dest) {
+ Token t;
+ if (!this->expect(Token::Kind::TK_FLOAT_LITERAL, "float literal", &t)) {
+ return false;
+ }
+ std::string_view s = this->text(t);
+ if (!SkSL::stod(s, dest)) {
+ this->error(t, "floating-point value is too large: " + std::string(s));
+ return false;
+ }
+ return true;
+}
+
+/* TRUE_LITERAL | FALSE_LITERAL */
+bool Parser::boolLiteral(bool* dest) {
+ Token t = this->nextToken();
+ switch (t.fKind) {
+ case Token::Kind::TK_TRUE_LITERAL:
+ *dest = true;
+ return true;
+ case Token::Kind::TK_FALSE_LITERAL:
+ *dest = false;
+ return true;
+ default:
+ this->error(t, "expected 'true' or 'false', but found '" +
+ std::string(this->text(t)) + "'");
+ return false;
+ }
+}
+
+/* IDENTIFIER */
+bool Parser::identifier(std::string_view* dest) {
+ Token t;
+ if (this->expect(Token::Kind::TK_IDENTIFIER, "identifier", &t)) {
+ *dest = this->text(t);
+ return true;
+ }
+ return false;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/SkSLParser.h b/gfx/skia/skia/src/sksl/SkSLParser.h
new file mode 100644
index 0000000000..74909f5e94
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLParser.h
@@ -0,0 +1,369 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_PARSER
+#define SKSL_PARSER
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/base/SkTArray.h"
+#include "include/sksl/DSLCore.h"
+#include "include/sksl/DSLExpression.h"
+#include "include/sksl/DSLLayout.h"
+#include "include/sksl/DSLModifiers.h"
+#include "include/sksl/DSLStatement.h"
+#include "include/sksl/DSLType.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLOperator.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/SkSLLexer.h"
+#include "src/sksl/SkSLProgramSettings.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <optional>
+#include <string>
+#include <string_view>
+
+namespace SkSL {
+
+class Compiler;
+class SymbolTable;
+enum class ProgramKind : int8_t;
+struct Module;
+struct Program;
+
+namespace dsl {
+class DSLBlock;
+class DSLCase;
+class DSLGlobalVar;
+class DSLParameter;
+class DSLVarBase;
+}
+
+/**
+ * Consumes .sksl text and invokes DSL functions to instantiate the program.
+ */
+class Parser {
+public:
+ Parser(Compiler* compiler, const ProgramSettings& settings, ProgramKind kind, std::string text);
+
+ std::unique_ptr<Program> program();
+
+ std::unique_ptr<Module> moduleInheritingFrom(const Module* parent);
+
+ std::string_view text(Token token);
+
+ Position position(Token token);
+
+private:
+ class AutoDepth;
+ class AutoSymbolTable;
+
+ /**
+ * Return the next token, including whitespace tokens, from the parse stream.
+ */
+ Token nextRawToken();
+
+ /**
+ * Return the next non-whitespace token from the parse stream.
+ */
+ Token nextToken();
+
+ /**
+ * Push a token back onto the parse stream, so that it is the next one read. Only a single level
+ * of pushback is supported (that is, it is an error to call pushback() twice in a row without
+ * an intervening nextToken()).
+ */
+ void pushback(Token t);
+
+ /**
+ * Returns the next non-whitespace token without consuming it from the stream.
+ */
+ Token peek();
+
+ /**
+ * Checks to see if the next token is of the specified type. If so, stores it in result (if
+ * result is non-null) and returns true. Otherwise, pushes it back and returns false.
+ */
+ bool checkNext(Token::Kind kind, Token* result = nullptr);
+
+ /**
+ * Behaves like checkNext(TK_IDENTIFIER), but also verifies that identifier is not a builtin
+ * type. If the token was actually a builtin type, false is returned (the next token is not
+ * considered to be an identifier).
+ */
+ bool checkIdentifier(Token* result = nullptr);
+
+ /**
+ * Reads the next non-whitespace token and generates an error if it is not the expected type.
+ * The 'expected' string is part of the error message, which reads:
+ *
+ * "expected <expected>, but found '<actual text>'"
+ *
+ * If 'result' is non-null, it is set to point to the token that was read.
+ * Returns true if the read token was as expected, false otherwise.
+ */
+ bool expect(Token::Kind kind, const char* expected, Token* result = nullptr);
+ bool expect(Token::Kind kind, std::string expected, Token* result = nullptr);
+
+ /**
+ * Behaves like expect(TK_IDENTIFIER), but also verifies that identifier is not a type.
+ * If the token was actually a type, generates an error message of the form:
+ *
+ * "expected an identifier, but found type 'float2'"
+ */
+ bool expectIdentifier(Token* result);
+
+ /** If the next token is a newline, consumes it and returns true. If not, returns false. */
+ bool expectNewline();
+
+ void error(Token token, std::string_view msg);
+ void error(Position position, std::string_view msg);
+
+ // Returns the range from `start` to the current parse position.
+ Position rangeFrom(Position start);
+ Position rangeFrom(Token start);
+
+ // these functions parse individual grammar rules from the current parse position; you probably
+ // don't need to call any of these outside of the parser. The function declarations in the .cpp
+ // file have comments describing the grammar rules.
+
+ void declarations();
+
+ /**
+ * Parses an expression representing an array size. Reports errors if the array size is not
+ * valid (out of bounds, not a literal integer). Returns true if an expression was
+ * successfully parsed, even if that array size is not actually valid. In the event of a true
+ * return, outResult always contains a valid array size (even if the parsed array size was not
+ * actually valid; invalid array sizes result in a 1 to avoid additional errors downstream).
+ */
+ bool arraySize(SKSL_INT* outResult);
+
+ void directive(bool allowVersion);
+
+ bool declaration();
+
+ bool functionDeclarationEnd(Position start,
+ dsl::DSLModifiers& modifiers,
+ dsl::DSLType type,
+ const Token& name);
+
+ struct VarDeclarationsPrefix {
+ Position fPosition;
+ dsl::DSLModifiers fModifiers;
+ dsl::DSLType fType = dsl::DSLType(dsl::kVoid_Type);
+ Token fName;
+ };
+
+ bool varDeclarationsPrefix(VarDeclarationsPrefix* prefixData);
+
+ dsl::DSLStatement varDeclarationsOrExpressionStatement();
+
+ dsl::DSLStatement varDeclarations();
+
+ dsl::DSLType structDeclaration();
+
+ SkTArray<dsl::DSLGlobalVar> structVarDeclaration(Position start,
+ const dsl::DSLModifiers& modifiers);
+
+ bool allowUnsizedArrays() {
+ return ProgramConfig::IsCompute(fKind) || ProgramConfig::IsFragment(fKind) ||
+ ProgramConfig::IsVertex(fKind);
+ }
+
+ bool parseArrayDimensions(Position pos, dsl::DSLType* type);
+
+ bool parseInitializer(Position pos, dsl::DSLExpression* initializer);
+
+ void globalVarDeclarationEnd(Position position, const dsl::DSLModifiers& mods,
+ dsl::DSLType baseType, Token name);
+
+ dsl::DSLStatement localVarDeclarationEnd(Position position, const dsl::DSLModifiers& mods,
+ dsl::DSLType baseType, Token name);
+
+ std::optional<dsl::DSLParameter> parameter(size_t paramIndex);
+
+ int layoutInt();
+
+ std::string_view layoutIdentifier();
+
+ dsl::DSLLayout layout();
+
+ dsl::DSLModifiers modifiers();
+
+ dsl::DSLStatement statement();
+
+ dsl::DSLType type(dsl::DSLModifiers* modifiers);
+
+ bool interfaceBlock(const dsl::DSLModifiers& mods);
+
+ dsl::DSLStatement ifStatement();
+
+ dsl::DSLStatement doStatement();
+
+ dsl::DSLStatement whileStatement();
+
+ dsl::DSLStatement forStatement();
+
+ std::optional<dsl::DSLCase> switchCase();
+
+ dsl::DSLStatement switchStatement();
+
+ dsl::DSLStatement returnStatement();
+
+ dsl::DSLStatement breakStatement();
+
+ dsl::DSLStatement continueStatement();
+
+ dsl::DSLStatement discardStatement();
+
+ std::optional<dsl::DSLBlock> block();
+
+ dsl::DSLStatement expressionStatement();
+
+ using BinaryParseFn = dsl::DSLExpression (Parser::*)();
+ bool SK_WARN_UNUSED_RESULT operatorRight(AutoDepth& depth, Operator::Kind op,
+ BinaryParseFn rightFn, dsl::DSLExpression& result);
+
+ dsl::DSLExpression expression();
+
+ dsl::DSLExpression assignmentExpression();
+
+ dsl::DSLExpression ternaryExpression();
+
+ dsl::DSLExpression logicalOrExpression();
+
+ dsl::DSLExpression logicalXorExpression();
+
+ dsl::DSLExpression logicalAndExpression();
+
+ dsl::DSLExpression bitwiseOrExpression();
+
+ dsl::DSLExpression bitwiseXorExpression();
+
+ dsl::DSLExpression bitwiseAndExpression();
+
+ dsl::DSLExpression equalityExpression();
+
+ dsl::DSLExpression relationalExpression();
+
+ dsl::DSLExpression shiftExpression();
+
+ dsl::DSLExpression additiveExpression();
+
+ dsl::DSLExpression multiplicativeExpression();
+
+ dsl::DSLExpression unaryExpression();
+
+ dsl::DSLExpression postfixExpression();
+
+ dsl::DSLExpression swizzle(Position pos, dsl::DSLExpression base,
+ std::string_view swizzleMask, Position maskPos);
+
+ dsl::DSLExpression call(Position pos, dsl::DSLExpression base, ExpressionArray args);
+
+ dsl::DSLExpression suffix(dsl::DSLExpression base);
+
+ dsl::DSLExpression term();
+
+ bool intLiteral(SKSL_INT* dest);
+
+ bool floatLiteral(SKSL_FLOAT* dest);
+
+ bool boolLiteral(bool* dest);
+
+ bool identifier(std::string_view* dest);
+
+ std::shared_ptr<SymbolTable>& symbolTable();
+
+ void addToSymbolTable(dsl::DSLVarBase& var, Position pos = {});
+
+ class Checkpoint {
+ public:
+ Checkpoint(Parser* p) : fParser(p) {
+ fPushbackCheckpoint = fParser->fPushback;
+ fLexerCheckpoint = fParser->fLexer.getCheckpoint();
+ fOldErrorReporter = &dsl::GetErrorReporter();
+ fOldEncounteredFatalError = fParser->fEncounteredFatalError;
+ SkASSERT(fOldErrorReporter);
+ dsl::SetErrorReporter(&fErrorReporter);
+ }
+
+ ~Checkpoint() {
+ SkASSERTF(!fOldErrorReporter,
+ "Checkpoint was not accepted or rewound before destruction");
+ }
+
+ void accept() {
+ this->restoreErrorReporter();
+ // Parser errors should have been fatal, but we can encounter other errors like type
+ // mismatches despite accepting the parse. Forward those messages to the actual error
+ // handler now.
+ fErrorReporter.forwardErrors();
+ }
+
+ void rewind() {
+ this->restoreErrorReporter();
+ fParser->fPushback = fPushbackCheckpoint;
+ fParser->fLexer.rewindToCheckpoint(fLexerCheckpoint);
+ fParser->fEncounteredFatalError = fOldEncounteredFatalError;
+ }
+
+ private:
+ class ForwardingErrorReporter : public ErrorReporter {
+ public:
+ void handleError(std::string_view msg, Position pos) override {
+ fErrors.push_back({std::string(msg), pos});
+ }
+
+ void forwardErrors() {
+ for (Error& error : fErrors) {
+ dsl::GetErrorReporter().error(error.fPos, error.fMsg);
+ }
+ }
+
+ private:
+ struct Error {
+ std::string fMsg;
+ Position fPos;
+ };
+
+ SkTArray<Error> fErrors;
+ };
+
+ void restoreErrorReporter() {
+ SkASSERT(fOldErrorReporter);
+ dsl::SetErrorReporter(fOldErrorReporter);
+ fOldErrorReporter = nullptr;
+ }
+
+ Parser* fParser;
+ Token fPushbackCheckpoint;
+ SkSL::Lexer::Checkpoint fLexerCheckpoint;
+ ForwardingErrorReporter fErrorReporter;
+ ErrorReporter* fOldErrorReporter;
+ bool fOldEncounteredFatalError;
+ };
+
+ Compiler& fCompiler;
+ ProgramSettings fSettings;
+ ErrorReporter* fErrorReporter;
+ bool fEncounteredFatalError;
+ ProgramKind fKind;
+ std::unique_ptr<std::string> fText;
+ Lexer fLexer;
+ // current parse depth, used to enforce a recursion limit to try to keep us from overflowing the
+ // stack on pathological inputs
+ int fDepth = 0;
+ Token fPushback;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLPool.cpp b/gfx/skia/skia/src/sksl/SkSLPool.cpp
new file mode 100644
index 0000000000..5ef5d0065e
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLPool.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLPool.h"
+
+#include "include/core/SkTypes.h"
+
+#if defined(SK_GANESH)
+// With GPU support, SkSL::MemoryPool is really GrMemoryPool
+#include "src/gpu/ganesh/GrMemoryPool.h"
+#endif
+
+#define VLOG(...) // printf(__VA_ARGS__)
+
+namespace SkSL {
+
+static thread_local MemoryPool* sMemPool = nullptr;
+
+static MemoryPool* get_thread_local_memory_pool() {
+ return sMemPool;
+}
+
+static void set_thread_local_memory_pool(MemoryPool* memPool) {
+ sMemPool = memPool;
+}
+
+Pool::~Pool() {
+ if (get_thread_local_memory_pool() == fMemPool.get()) {
+ SkDEBUGFAIL("SkSL pool is being destroyed while it is still attached to the thread");
+ set_thread_local_memory_pool(nullptr);
+ }
+
+ fMemPool->reportLeaks();
+ SkASSERT(fMemPool->isEmpty());
+
+ VLOG("DELETE Pool:0x%016llX\n", (uint64_t)fMemPool.get());
+}
+
+std::unique_ptr<Pool> Pool::Create() {
+ auto pool = std::unique_ptr<Pool>(new Pool);
+ pool->fMemPool = MemoryPool::Make(/*preallocSize=*/65536, /*minAllocSize=*/32768);
+ VLOG("CREATE Pool:0x%016llX\n", (uint64_t)pool->fMemPool.get());
+ return pool;
+}
+
+bool Pool::IsAttached() {
+ return get_thread_local_memory_pool();
+}
+
+void Pool::attachToThread() {
+ VLOG("ATTACH Pool:0x%016llX\n", (uint64_t)fMemPool.get());
+ SkASSERT(get_thread_local_memory_pool() == nullptr);
+ set_thread_local_memory_pool(fMemPool.get());
+}
+
+void Pool::detachFromThread() {
+ MemoryPool* memPool = get_thread_local_memory_pool();
+ VLOG("DETACH Pool:0x%016llX\n", (uint64_t)memPool);
+ SkASSERT(memPool == fMemPool.get());
+ memPool->resetScratchSpace();
+ set_thread_local_memory_pool(nullptr);
+}
+
+void* Pool::AllocMemory(size_t size) {
+ // Is a pool attached?
+ MemoryPool* memPool = get_thread_local_memory_pool();
+ if (memPool) {
+ void* ptr = memPool->allocate(size);
+ VLOG("ALLOC Pool:0x%016llX 0x%016llX\n", (uint64_t)memPool, (uint64_t)ptr);
+ return ptr;
+ }
+
+ // There's no pool attached. Allocate memory using the system allocator.
+ void* ptr = ::operator new(size);
+ VLOG("ALLOC Pool:__________________ 0x%016llX\n", (uint64_t)ptr);
+ return ptr;
+}
+
+void Pool::FreeMemory(void* ptr) {
+ // Is a pool attached?
+ MemoryPool* memPool = get_thread_local_memory_pool();
+ if (memPool) {
+ VLOG("FREE Pool:0x%016llX 0x%016llX\n", (uint64_t)memPool, (uint64_t)ptr);
+ memPool->release(ptr);
+ return;
+ }
+
+ // There's no pool attached. Free it using the system allocator.
+ VLOG("FREE Pool:__________________ 0x%016llX\n", (uint64_t)ptr);
+ ::operator delete(ptr);
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/SkSLPool.h b/gfx/skia/skia/src/sksl/SkSLPool.h
new file mode 100644
index 0000000000..9e64d44b9a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLPool.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_POOL
+#define SKSL_POOL
+
+#include "src/sksl/SkSLMemoryPool.h"
+
+#include <cstddef>
+#include <memory>
+
+namespace SkSL {
+
+/**
+ * Efficiently allocates memory in an SkSL program. Optimized for allocate/release performance over
+ * memory efficiency.
+ *
+ * All allocated memory must be released back to the pool before it can be destroyed or recycled.
+ */
+
+class Pool {
+public:
+ ~Pool();
+
+ // Creates a pool to store objects during program creation. Call attachToThread() to start using
+ // the pool for its allocations. When your program is complete, call pool->detachFromThread() to
+ // take ownership of the pool and its allocations. Before freeing any of the program's
+ // allocations, make sure to reattach the pool by calling pool->attachToThread() again.
+ static std::unique_ptr<Pool> Create();
+
+ // Attaches a pool to the current thread.
+ // It is an error to call this while a pool is already attached.
+ void attachToThread();
+
+ // Once you are done creating or destroying objects in the pool, detach it from the thread.
+ // It is an error to call this while no pool is attached.
+ void detachFromThread();
+
+ // Allocates memory from the thread pool. If the pool is exhausted, an additional block of pool
+ // storage will be created to hold the data.
+ static void* AllocMemory(size_t size);
+
+ // Releases memory that was created by AllocMemory. All objects in the pool must be freed before
+ // the pool can be destroyed.
+ static void FreeMemory(void* ptr);
+
+ static bool IsAttached();
+
+private:
+ Pool() = default; // use Create to make a pool
+ std::unique_ptr<SkSL::MemoryPool> fMemPool;
+};
+
+/**
+ * If your class inherits from Poolable, its objects will be allocated from the pool.
+ */
+class Poolable {
+public:
+ // Override operator new and delete to allow us to use a memory pool.
+ static void* operator new(const size_t size) {
+ return Pool::AllocMemory(size);
+ }
+
+ static void operator delete(void* ptr) {
+ Pool::FreeMemory(ptr);
+ }
+};
+
+/**
+ * Temporarily attaches a pool to the current thread within a scope.
+ */
+class AutoAttachPoolToThread {
+public:
+ AutoAttachPoolToThread(Pool* p) : fPool(p) {
+ if (fPool) {
+ fPool->attachToThread();
+ }
+ }
+ ~AutoAttachPoolToThread() {
+ if (fPool) {
+ fPool->detachFromThread();
+ }
+ }
+
+private:
+ Pool* fPool = nullptr;
+};
+
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLPosition.cpp b/gfx/skia/skia/src/sksl/SkSLPosition.cpp
new file mode 100644
index 0000000000..494accaf7b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLPosition.cpp
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2022 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/sksl/SkSLPosition.h"
+
+#include <algorithm>
+
+namespace SkSL {
+
+int Position::line(std::string_view source) const {
+ SkASSERT(this->valid());
+ if (fStartOffset == -1) {
+ return -1;
+ }
+ if (!source.data()) {
+ return -1;
+ }
+ // we allow the offset to equal the length, because that's where TK_END_OF_FILE is reported
+ SkASSERT(fStartOffset <= (int)source.length());
+ int offset = std::min(fStartOffset, (int)source.length());
+ int line = 1;
+ for (int i = 0; i < offset; i++) {
+ if (source[i] == '\n') {
+ ++line;
+ }
+ }
+ return line;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/SkSLProgramSettings.h b/gfx/skia/skia/src/sksl/SkSLProgramSettings.h
new file mode 100644
index 0000000000..48622bb0c3
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLProgramSettings.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_PROGRAMSETTINGS
+#define SKSL_PROGRAMSETTINGS
+
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLProgramKind.h"
+#include "include/sksl/SkSLVersion.h"
+
+#include <vector>
+
+namespace SkSL {
+
+/**
+ * Holds the compiler settings for a program.
+ */
+struct ProgramSettings {
+ // If true, the destination fragment color can be read from sk_FragColor. It must be declared
+ // inout. This is only supported in GLSL, when framebuffer-fetch is used.
+ bool fFragColorIsInOut = false;
+ // if true, all halfs are forced to be floats
+ bool fForceHighPrecision = false;
+ // if true, add -0.5 bias to LOD of all texture lookups
+ bool fSharpenTextures = false;
+ // If true, sk_FragCoord, the dFdy gradient, and sk_Clockwise won't be modified by the
+ // rtFlip. Additionally, the 'fUseFlipRTUniform' boolean will be forced to false so no rtFlip
+ // uniform will be emitted.
+ bool fForceNoRTFlip = false;
+ // if the program needs to create an RTFlip uniform, this is its offset in the uniform buffer
+ int fRTFlipOffset = -1;
+ // if the program needs to create an RTFlip uniform and is creating SPIR-V, this is the binding
+ // and set number of the uniform buffer.
+ int fRTFlipBinding = -1;
+ int fRTFlipSet = -1;
+ // If layout(set=S, binding=B) is not specified for a uniform, these values will be used.
+ // At present, zero is always used by our backends.
+ int fDefaultUniformSet = 0;
+ int fDefaultUniformBinding = 0;
+ // Enables the SkSL optimizer. Note that we never disable optimizations which are needed to
+ // fully evaluate constant-expressions, like constant folding or constant-intrinsic evaluation.
+ bool fOptimize = true;
+ // (Requires fOptimize = true) Removes any uncalled functions other than main(). Note that a
+ // function which starts out being used may end up being uncalled after optimization.
+ bool fRemoveDeadFunctions = true;
+ // (Requires fOptimize = true) Removes variables which are never used.
+ bool fRemoveDeadVariables = true;
+ // (Requires fOptimize = true) When greater than zero, enables the inliner. The threshold value
+ // sets an upper limit on the acceptable amount of code growth from inlining.
+ int fInlineThreshold = SkSL::kDefaultInlineThreshold;
+ // If true, every function in the generated program will be given the `noinline` modifier.
+ bool fForceNoInline = false;
+ // If true, implicit conversions to lower precision numeric types are allowed (e.g., float to
+ // half). These are always allowed when compiling Runtime Effects.
+ bool fAllowNarrowingConversions = false;
+ // If true, then Debug code will run SPIR-V output through the validator to ensure its
+ // correctness
+ bool fValidateSPIRV = true;
+ // If true, any synthetic uniforms must use push constant syntax
+ bool fUsePushConstants = false;
+ // TODO(skia:11209) - Replace this with a "promised" capabilities?
+ // Sets a maximum SkSL version. Compilation will fail if the program uses features that aren't
+ // allowed at the requested version. For instance, a valid program must have fully-unrollable
+ // `for` loops at version 100, but any loop structure is allowed at version 300.
+ SkSL::Version fMaxVersionAllowed = SkSL::Version::k100;
+ // If true, SkVM debug traces will contain the `trace_var` opcode. This opcode can cause the
+ // generated code to contain a lot of extra computations, because we need to explicitly compute
+ // every temporary value, even ones that would otherwise be optimized away entirely. The other
+ // debug opcodes are much less invasive on the generated code.
+ bool fAllowTraceVarInSkVMDebugTrace = true;
+ // If true, SkSL will use a memory pool for all IR nodes when compiling a program. This is
+ // usually a significant speed increase, but uses more memory, so it is a good idea for programs
+ // that will be freed shortly after compilation. It can also be useful to disable this flag when
+ // investigating memory corruption. (This controls behavior of the SkSL compiler, not the code
+ // we generate.)
+ bool fUseMemoryPool = true;
+ // If true, VarDeclaration can be cloned for testing purposes. See VarDeclaration::clone for
+ // more information.
+ bool fAllowVarDeclarationCloneForTesting = false;
+ // If true, SPIR-V codegen restricted to a subset supported by Dawn.
+ // TODO(skia:13840, skia:14023): Remove this setting when Skia can use WGSL on Dawn.
+ bool fSPIRVDawnCompatMode = false;
+};
+
+/**
+ * All the configuration data for a given program.
+ */
+struct ProgramConfig {
+ /** True if we are currently processing one of the built-in SkSL include modules. */
+ bool fIsBuiltinCode;
+ ProgramKind fKind;
+ ProgramSettings fSettings;
+
+ // When enforcesSkSLVersion() is true, this determines the available feature set that will be
+ // enforced. This is set automatically when the `#version` directive is parsed.
+ SkSL::Version fRequiredSkSLVersion = SkSL::Version::k100;
+
+ bool enforcesSkSLVersion() const {
+ return IsRuntimeEffect(fKind);
+ }
+
+ bool strictES2Mode() const {
+ // TODO(skia:11209): Remove the first condition - so this is just based on #version.
+ // Make it more generic (eg, isVersionLT) checking.
+ return fSettings.fMaxVersionAllowed == Version::k100 &&
+ fRequiredSkSLVersion == Version::k100 &&
+ this->enforcesSkSLVersion();
+ }
+
+ const char* versionDescription() const {
+ if (this->enforcesSkSLVersion()) {
+ switch (fRequiredSkSLVersion) {
+ case Version::k100: return "#version 100\n";
+ case Version::k300: return "#version 300\n";
+ }
+ }
+ return "";
+ }
+
+ static bool IsFragment(ProgramKind kind) {
+ return kind == ProgramKind::kFragment ||
+ kind == ProgramKind::kGraphiteFragment;
+ }
+
+ static bool IsVertex(ProgramKind kind) {
+ return kind == ProgramKind::kVertex ||
+ kind == ProgramKind::kGraphiteVertex;
+ }
+
+ static bool IsCompute(ProgramKind kind) {
+ return kind == ProgramKind::kCompute;
+ }
+
+ static bool IsRuntimeEffect(ProgramKind kind) {
+ return (kind == ProgramKind::kRuntimeColorFilter ||
+ kind == ProgramKind::kRuntimeShader ||
+ kind == ProgramKind::kRuntimeBlender ||
+ kind == ProgramKind::kPrivateRuntimeColorFilter ||
+ kind == ProgramKind::kPrivateRuntimeShader ||
+ kind == ProgramKind::kPrivateRuntimeBlender ||
+ kind == ProgramKind::kMeshVertex ||
+ kind == ProgramKind::kMeshFragment);
+ }
+
+ static bool AllowsPrivateIdentifiers(ProgramKind kind) {
+ return (kind != ProgramKind::kRuntimeColorFilter &&
+ kind != ProgramKind::kRuntimeShader &&
+ kind != ProgramKind::kRuntimeBlender &&
+ kind != ProgramKind::kMeshVertex &&
+ kind != ProgramKind::kMeshFragment);
+ }
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLSampleUsage.cpp b/gfx/skia/skia/src/sksl/SkSLSampleUsage.cpp
new file mode 100644
index 0000000000..9b908b35e8
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLSampleUsage.cpp
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkSLSampleUsage.h"
+
+#include <algorithm>
+
+namespace SkSL {
+
+SampleUsage SampleUsage::merge(const SampleUsage& other) {
+ // This function is only used in Analysis::MergeSampleUsageVisitor to determine the combined
+ // SampleUsage for a child fp/shader/etc. We should never see matrix sampling here.
+ SkASSERT(fKind != Kind::kUniformMatrix && other.fKind != Kind::kUniformMatrix);
+
+ static_assert(Kind::kExplicit > Kind::kPassThrough);
+ static_assert(Kind::kPassThrough > Kind::kNone);
+ fKind = std::max(fKind, other.fKind);
+
+ return *this;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/SkSLString.cpp b/gfx/skia/skia/src/sksl/SkSLString.cpp
new file mode 100644
index 0000000000..c746ff2823
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLString.cpp
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLString.h"
+#include "include/private/base/SkAssert.h"
+#include "src/base/SkStringView.h"
+
+#include <cerrno>
+#include <cmath>
+#include <cstdarg>
+#include <cstdio>
+#include <cstdlib>
+#include <locale>
+#include <memory>
+#include <sstream>
+#include <string>
+#include <string_view>
+
+template <typename RoundtripType, int kFullPrecision>
+static std::string to_string_impl(RoundtripType value) {
+ std::stringstream buffer;
+ buffer.imbue(std::locale::classic());
+ buffer.precision(7);
+ buffer << value;
+ std::string text = buffer.str();
+
+ double roundtripped;
+ buffer >> roundtripped;
+ if (value != (RoundtripType)roundtripped && std::isfinite(value)) {
+ buffer.str({});
+ buffer.clear();
+ buffer.precision(kFullPrecision);
+ buffer << value;
+ text = buffer.str();
+ SkASSERTF((buffer >> roundtripped, value == (RoundtripType)roundtripped),
+ "%.17g -> %s -> %.17g", value, text.c_str(), roundtripped);
+ }
+
+ // We need to emit a decimal point to distinguish floats from ints.
+ if (!skstd::contains(text, '.') && !skstd::contains(text, 'e')) {
+ text += ".0";
+ }
+
+ return text;
+}
+
+std::string skstd::to_string(float value) {
+ return to_string_impl<float, 9>(value);
+}
+
+std::string skstd::to_string(double value) {
+ return to_string_impl<double, 17>(value);
+}
+
+bool SkSL::stod(std::string_view s, SKSL_FLOAT* value) {
+ std::string str(s.data(), s.size());
+ std::stringstream buffer(str);
+ buffer.imbue(std::locale::classic());
+ buffer >> *value;
+ return !buffer.fail() && std::isfinite(*value);
+}
+
+bool SkSL::stoi(std::string_view s, SKSL_INT* value) {
+ if (s.empty()) {
+ return false;
+ }
+ char suffix = s.back();
+ if (suffix == 'u' || suffix == 'U') {
+ s.remove_suffix(1);
+ }
+ std::string str(s); // s is not null-terminated
+ const char* strEnd = str.data() + str.length();
+ char* p;
+ errno = 0;
+ unsigned long long result = strtoull(str.data(), &p, /*base=*/0);
+ *value = static_cast<SKSL_INT>(result);
+ return p == strEnd && errno == 0 && result <= 0xFFFFFFFF;
+}
+
+std::string SkSL::String::printf(const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ std::string result;
+ vappendf(&result, fmt, args);
+ va_end(args);
+ return result;
+}
+
+void SkSL::String::appendf(std::string *str, const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ vappendf(str, fmt, args);
+ va_end(args);
+}
+
+void SkSL::String::vappendf(std::string *str, const char* fmt, va_list args) {
+ #define BUFFER_SIZE 256
+ char buffer[BUFFER_SIZE];
+ va_list reuse;
+ va_copy(reuse, args);
+ size_t size = vsnprintf(buffer, BUFFER_SIZE, fmt, args);
+ if (BUFFER_SIZE >= size + 1) {
+ str->append(buffer, size);
+ } else {
+ auto newBuffer = std::unique_ptr<char[]>(new char[size + 1]);
+ vsnprintf(newBuffer.get(), size + 1, fmt, reuse);
+ str->append(newBuffer.get(), size);
+ }
+ va_end(reuse);
+}
diff --git a/gfx/skia/skia/src/sksl/SkSLStringStream.h b/gfx/skia/skia/src/sksl/SkSLStringStream.h
new file mode 100644
index 0000000000..aabda3894e
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLStringStream.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_STRINGSTREAM
+#define SKSL_STRINGSTREAM
+
+#include "include/core/SkData.h"
+#include "include/core/SkStream.h"
+#include "src/sksl/SkSLOutputStream.h"
+
+namespace SkSL {
+
+class StringStream : public OutputStream {
+public:
+ void write8(uint8_t b) override {
+ SkASSERT(fString.empty());
+ fStream.write8(b);
+ }
+
+ void writeText(const char* s) override {
+ SkASSERT(fString.empty());
+ fStream.writeText(s);
+ }
+
+ void write(const void* s, size_t size) override {
+ SkASSERT(fString.empty());
+ fStream.write(s, size);
+ }
+
+ size_t bytesWritten() const {
+ return fStream.bytesWritten();
+ }
+
+ const std::string& str() const {
+ if (!fString.size()) {
+ sk_sp<SkData> data = fStream.detachAsData();
+ fString = std::string((const char*) data->data(), data->size());
+ }
+ return fString;
+ }
+
+ void reset() {
+ fStream.reset();
+ fString = "";
+ }
+
+private:
+ mutable SkDynamicMemoryWStream fStream;
+ mutable std::string fString;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLThreadContext.cpp b/gfx/skia/skia/src/sksl/SkSLThreadContext.cpp
new file mode 100644
index 0000000000..58d9b40d83
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLThreadContext.cpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLThreadContext.h"
+
+#include "include/private/SkSLProgramElement.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLModifiersPool.h"
+#include "src/sksl/SkSLPool.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+
+#include <type_traits>
+
+namespace SkSL {
+
+ThreadContext::ThreadContext(SkSL::Compiler* compiler,
+ SkSL::ProgramKind kind,
+ const SkSL::ProgramSettings& settings,
+ const SkSL::Module* module,
+ bool isModule)
+ : fCompiler(compiler)
+ , fOldConfig(fCompiler->fContext->fConfig)
+ , fOldModifiersPool(fCompiler->fContext->fModifiersPool)
+ , fOldErrorReporter(*fCompiler->fContext->fErrors)
+ , fSettings(settings) {
+ if (!isModule) {
+ if (settings.fUseMemoryPool) {
+ fPool = Pool::Create();
+ fPool->attachToThread();
+ }
+ fModifiersPool = std::make_unique<SkSL::ModifiersPool>();
+ fCompiler->fContext->fModifiersPool = fModifiersPool.get();
+ }
+
+ fConfig = std::make_unique<SkSL::ProgramConfig>();
+ fConfig->fKind = kind;
+ fConfig->fSettings = settings;
+ fConfig->fIsBuiltinCode = isModule;
+ fCompiler->fContext->fConfig = fConfig.get();
+ fCompiler->fContext->fErrors = &fDefaultErrorReporter;
+ fCompiler->fContext->fModule = module;
+ fCompiler->fSymbolTable = module->fSymbols;
+ this->setupSymbolTable();
+}
+
+ThreadContext::~ThreadContext() {
+ if (SymbolTable()) {
+ fCompiler->fSymbolTable = nullptr;
+ fProgramElements.clear();
+ } else {
+ // We should only be here with a null symbol table if ReleaseProgram was called
+ SkASSERT(fProgramElements.empty());
+ }
+ fCompiler->fContext->fErrors = &fOldErrorReporter;
+ fCompiler->fContext->fConfig = fOldConfig;
+ fCompiler->fContext->fModifiersPool = fOldModifiersPool;
+ if (fPool) {
+ fPool->detachFromThread();
+ }
+}
+
+void ThreadContext::setupSymbolTable() {
+ SkSL::Context& context = *fCompiler->fContext;
+ SymbolTable::Push(&fCompiler->fSymbolTable, context.fConfig->fIsBuiltinCode);
+
+ SkSL::SymbolTable& symbolTable = *fCompiler->fSymbolTable;
+ symbolTable.markModuleBoundary();
+}
+
+SkSL::Context& ThreadContext::Context() {
+ return Compiler().context();
+}
+
+const SkSL::ProgramSettings& ThreadContext::Settings() {
+ return Context().fConfig->fSettings;
+}
+
+std::shared_ptr<SkSL::SymbolTable>& ThreadContext::SymbolTable() {
+ return Compiler().fSymbolTable;
+}
+
+const SkSL::Modifiers* ThreadContext::Modifiers(const SkSL::Modifiers& modifiers) {
+ return Context().fModifiersPool->add(modifiers);
+}
+
+ThreadContext::RTAdjustData& ThreadContext::RTAdjustState() {
+ return Instance().fRTAdjust;
+}
+
+void ThreadContext::SetErrorReporter(ErrorReporter* errorReporter) {
+ SkASSERT(errorReporter);
+ Context().fErrors = errorReporter;
+}
+
+void ThreadContext::ReportError(std::string_view msg, Position pos) {
+ GetErrorReporter().error(pos, msg);
+}
+
+void ThreadContext::DefaultErrorReporter::handleError(std::string_view msg, Position pos) {
+ SK_ABORT("error: %.*s\nNo SkSL error reporter configured, treating this as a fatal error\n",
+ (int)msg.length(), msg.data());
+}
+
+thread_local ThreadContext* instance = nullptr;
+
+bool ThreadContext::IsActive() {
+ return instance != nullptr;
+}
+
+ThreadContext& ThreadContext::Instance() {
+ SkASSERTF(instance, "dsl::Start() has not been called");
+ return *instance;
+}
+
+void ThreadContext::SetInstance(std::unique_ptr<ThreadContext> newInstance) {
+ SkASSERT((instance == nullptr) != (newInstance == nullptr));
+ delete instance;
+ instance = newInstance.release();
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/SkSLThreadContext.h b/gfx/skia/skia/src/sksl/SkSLThreadContext.h
new file mode 100644
index 0000000000..853ec4ed98
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLThreadContext.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_THREADCONTEXT
+#define SKSL_THREADCONTEXT
+
+#include "include/core/SkTypes.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/ir/SkSLProgram.h"
+
+#include <cstdint>
+#include <memory>
+#include <string_view>
+#include <vector>
+
+namespace SkSL {
+
+class Compiler;
+class ModifiersPool;
+class Pool;
+class ProgramElement;
+class SymbolTable;
+class Variable;
+enum class ProgramKind : int8_t;
+struct Modifiers;
+struct Module;
+
+namespace dsl {
+
+class DSLCore;
+
+} // namespace dsl
+
+/**
+ * Thread-safe class that tracks per-thread state associated with SkSL output.
+ */
+class ThreadContext {
+public:
+ ThreadContext(SkSL::Compiler* compiler,
+ SkSL::ProgramKind kind,
+ const SkSL::ProgramSettings& settings,
+ const SkSL::Module* module,
+ bool isModule);
+ ~ThreadContext();
+
+ /**
+ * Returns true if the DSL has been started.
+ */
+ static bool IsActive();
+
+ /**
+ * Returns the Compiler used by DSL operations in the current thread.
+ */
+ static SkSL::Compiler& Compiler() { return *Instance().fCompiler; }
+
+ /**
+ * Returns the Context used by DSL operations in the current thread.
+ */
+ static SkSL::Context& Context();
+
+ /**
+ * Returns the Settings used by DSL operations in the current thread.
+ */
+ static const SkSL::ProgramSettings& Settings();
+
+ /**
+ * Returns the Program::Inputs used by the current thread.
+ */
+ static SkSL::Program::Inputs& Inputs() { return Instance().fInputs; }
+
+ /**
+ * Returns the collection to which DSL program elements in this thread should be appended.
+ */
+ static std::vector<std::unique_ptr<SkSL::ProgramElement>>& ProgramElements() {
+ return Instance().fProgramElements;
+ }
+
+ static std::vector<const ProgramElement*>& SharedElements() {
+ return Instance().fSharedElements;
+ }
+
+ /**
+ * Returns the current SymbolTable.
+ */
+ static std::shared_ptr<SkSL::SymbolTable>& SymbolTable();
+
+ /**
+ * Returns the current memory pool.
+ */
+ static std::unique_ptr<Pool>& MemoryPool() { return Instance().fPool; }
+
+ /**
+ * Returns the current modifiers pool.
+ */
+ static std::unique_ptr<ModifiersPool>& GetModifiersPool() { return Instance().fModifiersPool; }
+
+ /**
+ * Returns the current ProgramConfig.
+ */
+ static const std::unique_ptr<ProgramConfig>& GetProgramConfig() { return Instance().fConfig; }
+
+ static bool IsModule() { return GetProgramConfig()->fIsBuiltinCode; }
+
+ /**
+ * Returns the final pointer to a pooled Modifiers object that should be used to represent the
+ * given modifiers.
+ */
+ static const SkSL::Modifiers* Modifiers(const SkSL::Modifiers& modifiers);
+
+ struct RTAdjustData {
+ // Points to a standalone sk_RTAdjust variable, if one exists.
+ const Variable* fVar = nullptr;
+ // Points to the interface block containing an sk_RTAdjust field, if one exists.
+ const Variable* fInterfaceBlock = nullptr;
+ // If fInterfaceBlock is non-null, contains the index of the sk_RTAdjust field within it.
+ int fFieldIndex = -1;
+ };
+
+ /**
+ * Returns a struct containing information about the RTAdjust variable.
+ */
+ static RTAdjustData& RTAdjustState();
+
+ /**
+ * Returns the ErrorReporter associated with the current thread. This object will be notified
+ * when any DSL errors occur.
+ */
+ static ErrorReporter& GetErrorReporter() {
+ return *Context().fErrors;
+ }
+
+ static void SetErrorReporter(ErrorReporter* errorReporter);
+
+ /**
+ * Notifies the current ErrorReporter that an error has occurred. The default error handler
+ * prints the message to stderr and aborts.
+ */
+ static void ReportError(std::string_view msg, Position pos = Position{});
+
+ static ThreadContext& Instance();
+
+ static void SetInstance(std::unique_ptr<ThreadContext> instance);
+
+private:
+ class DefaultErrorReporter : public ErrorReporter {
+ void handleError(std::string_view msg, Position pos) override;
+ };
+
+ void setupSymbolTable();
+
+ std::unique_ptr<SkSL::ProgramConfig> fConfig;
+ std::unique_ptr<SkSL::ModifiersPool> fModifiersPool;
+ SkSL::Compiler* fCompiler;
+ std::unique_ptr<Pool> fPool;
+ SkSL::ProgramConfig* fOldConfig;
+ SkSL::ModifiersPool* fOldModifiersPool;
+ std::vector<std::unique_ptr<SkSL::ProgramElement>> fProgramElements;
+ std::vector<const SkSL::ProgramElement*> fSharedElements;
+ DefaultErrorReporter fDefaultErrorReporter;
+ ErrorReporter& fOldErrorReporter;
+ ProgramSettings fSettings;
+ RTAdjustData fRTAdjust;
+ Program::Inputs fInputs;
+
+ friend class dsl::DSLCore;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/SkSLUtil.cpp b/gfx/skia/skia/src/sksl/SkSLUtil.cpp
new file mode 100644
index 0000000000..8efeb21790
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLUtil.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLUtil.h"
+
+#include "src/core/SkSLTypeShared.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLOutputStream.h"
+#include "src/sksl/SkSLStringStream.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <string>
+
+namespace SkSL {
+
+// TODO: Once Graphite has its own GPU-caps system, SK_GRAPHITE should get its own mode.
+// At the moment, it either mimics what GrShaderCaps reports, or it uses these hard-coded values
+// depending on the build.
+#if defined(SKSL_STANDALONE) || !defined(SK_GANESH)
+std::unique_ptr<ShaderCaps> ShaderCapsFactory::MakeShaderCaps() {
+ std::unique_ptr<ShaderCaps> standalone = std::make_unique<ShaderCaps>();
+ standalone->fShaderDerivativeSupport = true;
+ standalone->fExplicitTextureLodSupport = true;
+ standalone->fFlatInterpolationSupport = true;
+ standalone->fNoPerspectiveInterpolationSupport = true;
+ standalone->fSampleMaskSupport = true;
+ standalone->fExternalTextureSupport = true;
+ return standalone;
+}
+#else
+std::unique_ptr<ShaderCaps> ShaderCapsFactory::MakeShaderCaps() {
+ return std::make_unique<ShaderCaps>();
+}
+#endif // defined(SKSL_STANDALONE) || !defined(SK_GANESH)
+
+void write_stringstream(const StringStream& s, OutputStream& out) {
+ out.write(s.str().c_str(), s.str().size());
+}
+
+#if !defined(SKSL_STANDALONE) && (defined(SK_GANESH) || SK_SUPPORT_GRAPHITE)
+bool type_to_sksltype(const Context& context, const Type& type, SkSLType* outType) {
+ // If a new GrSL type is added, this function will need to be updated.
+ static_assert(kSkSLTypeCount == 41);
+
+ if (type.matches(*context.fTypes.fVoid )) { *outType = SkSLType::kVoid; return true; }
+ if (type.matches(*context.fTypes.fBool )) { *outType = SkSLType::kBool; return true; }
+ if (type.matches(*context.fTypes.fBool2 )) { *outType = SkSLType::kBool2; return true; }
+ if (type.matches(*context.fTypes.fBool3 )) { *outType = SkSLType::kBool3; return true; }
+ if (type.matches(*context.fTypes.fBool4 )) { *outType = SkSLType::kBool4; return true; }
+ if (type.matches(*context.fTypes.fShort )) { *outType = SkSLType::kShort; return true; }
+ if (type.matches(*context.fTypes.fShort2 )) { *outType = SkSLType::kShort2; return true; }
+ if (type.matches(*context.fTypes.fShort3 )) { *outType = SkSLType::kShort3; return true; }
+ if (type.matches(*context.fTypes.fShort4 )) { *outType = SkSLType::kShort4; return true; }
+ if (type.matches(*context.fTypes.fUShort )) { *outType = SkSLType::kUShort; return true; }
+ if (type.matches(*context.fTypes.fUShort2 )) { *outType = SkSLType::kUShort2; return true; }
+ if (type.matches(*context.fTypes.fUShort3 )) { *outType = SkSLType::kUShort3; return true; }
+ if (type.matches(*context.fTypes.fUShort4 )) { *outType = SkSLType::kUShort4; return true; }
+ if (type.matches(*context.fTypes.fFloat )) { *outType = SkSLType::kFloat; return true; }
+ if (type.matches(*context.fTypes.fFloat2 )) { *outType = SkSLType::kFloat2; return true; }
+ if (type.matches(*context.fTypes.fFloat3 )) { *outType = SkSLType::kFloat3; return true; }
+ if (type.matches(*context.fTypes.fFloat4 )) { *outType = SkSLType::kFloat4; return true; }
+ if (type.matches(*context.fTypes.fFloat2x2)) { *outType = SkSLType::kFloat2x2; return true; }
+ if (type.matches(*context.fTypes.fFloat3x3)) { *outType = SkSLType::kFloat3x3; return true; }
+ if (type.matches(*context.fTypes.fFloat4x4)) { *outType = SkSLType::kFloat4x4; return true; }
+ if (type.matches(*context.fTypes.fHalf )) { *outType = SkSLType::kHalf; return true; }
+ if (type.matches(*context.fTypes.fHalf2 )) { *outType = SkSLType::kHalf2; return true; }
+ if (type.matches(*context.fTypes.fHalf3 )) { *outType = SkSLType::kHalf3; return true; }
+ if (type.matches(*context.fTypes.fHalf4 )) { *outType = SkSLType::kHalf4; return true; }
+ if (type.matches(*context.fTypes.fHalf2x2 )) { *outType = SkSLType::kHalf2x2; return true; }
+ if (type.matches(*context.fTypes.fHalf3x3 )) { *outType = SkSLType::kHalf3x3; return true; }
+ if (type.matches(*context.fTypes.fHalf4x4 )) { *outType = SkSLType::kHalf4x4; return true; }
+ if (type.matches(*context.fTypes.fInt )) { *outType = SkSLType::kInt; return true; }
+ if (type.matches(*context.fTypes.fInt2 )) { *outType = SkSLType::kInt2; return true; }
+ if (type.matches(*context.fTypes.fInt3 )) { *outType = SkSLType::kInt3; return true; }
+ if (type.matches(*context.fTypes.fInt4 )) { *outType = SkSLType::kInt4; return true; }
+ if (type.matches(*context.fTypes.fUInt )) { *outType = SkSLType::kUInt; return true; }
+ if (type.matches(*context.fTypes.fUInt2 )) { *outType = SkSLType::kUInt2; return true; }
+ if (type.matches(*context.fTypes.fUInt3 )) { *outType = SkSLType::kUInt3; return true; }
+ if (type.matches(*context.fTypes.fUInt4 )) { *outType = SkSLType::kUInt4; return true; }
+ return false;
+}
+#endif
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/SkSLUtil.h b/gfx/skia/skia/src/sksl/SkSLUtil.h
new file mode 100644
index 0000000000..92dfe537a9
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/SkSLUtil.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_UTIL
+#define SKSL_UTIL
+
+#include "include/core/SkTypes.h"
+#include "include/sksl/SkSLVersion.h"
+#include "src/sksl/SkSLGLSL.h"
+
+#include <memory>
+
+enum class SkSLType : char;
+
+namespace SkSL {
+
+class Context;
+class OutputStream;
+class StringStream;
+class Type;
+
+struct ShaderCaps {
+ /**
+ * Indicates how GLSL must interact with advanced blend equations. The KHR extension requires
+ * special layout qualifiers in the fragment shader.
+ */
+ enum AdvBlendEqInteraction {
+ kNotSupported_AdvBlendEqInteraction, //<! No _blend_equation_advanced extension
+ kAutomatic_AdvBlendEqInteraction, //<! No interaction required
+ kGeneralEnable_AdvBlendEqInteraction, //<! layout(blend_support_all_equations) out
+
+ kLast_AdvBlendEqInteraction = kGeneralEnable_AdvBlendEqInteraction
+ };
+
+ bool mustEnableAdvBlendEqs() const {
+ return fAdvBlendEqInteraction >= kGeneralEnable_AdvBlendEqInteraction;
+ }
+
+ bool mustDeclareFragmentShaderOutput() const {
+ return fGLSLGeneration > SkSL::GLSLGeneration::k110;
+ }
+
+ // Returns the string of an extension that must be enabled in the shader to support
+ // derivatives. If nullptr is returned then no extension needs to be enabled. Before calling
+ // this function, the caller should check that shaderDerivativeSupport exists.
+ const char* shaderDerivativeExtensionString() const {
+ SkASSERT(this->fShaderDerivativeSupport);
+ return fShaderDerivativeExtensionString;
+ }
+
+ // This returns the name of an extension that must be enabled in the shader to support external
+ // textures. In some cases, two extensions must be enabled - the second extension is returned
+ // by secondExternalTextureExtensionString(). If that function returns nullptr, then only one
+ // extension is required.
+ const char* externalTextureExtensionString() const {
+ SkASSERT(this->fExternalTextureSupport);
+ return fExternalTextureExtensionString;
+ }
+
+ const char* secondExternalTextureExtensionString() const {
+ SkASSERT(this->fExternalTextureSupport);
+ return fSecondExternalTextureExtensionString;
+ }
+
+ /**
+ * SkSL 300 requires support for derivatives, nonsquare matrices and bitwise integer operations.
+ */
+ SkSL::Version supportedSkSLVerion() const {
+ if (fShaderDerivativeSupport && fNonsquareMatrixSupport && fIntegerSupport &&
+ fGLSLGeneration >= SkSL::GLSLGeneration::k330) {
+ return SkSL::Version::k300;
+ }
+ return SkSL::Version::k100;
+ }
+
+ bool supportsDistanceFieldText() const { return fShaderDerivativeSupport; }
+
+ SkSL::GLSLGeneration fGLSLGeneration = SkSL::GLSLGeneration::k330;
+
+ bool fShaderDerivativeSupport = false;
+ /** Enables sampleGrad and sampleLod functions that don't rely on implicit derivatives */
+ bool fExplicitTextureLodSupport = false;
+ /** Indicates true 32-bit integer support, with unsigned types and bitwise operations */
+ bool fIntegerSupport = false;
+ bool fNonsquareMatrixSupport = false;
+ /** asinh(), acosh(), atanh() */
+ bool fInverseHyperbolicSupport = false;
+ bool fFBFetchSupport = false;
+ bool fFBFetchNeedsCustomOutput = false;
+ bool fUsesPrecisionModifiers = false;
+ bool fFlatInterpolationSupport = false;
+ bool fNoPerspectiveInterpolationSupport = false;
+ bool fSampleMaskSupport = false;
+ bool fExternalTextureSupport = false;
+ bool fFloatIs32Bits = true;
+
+ // isinf() is defined, and floating point infinities are handled according to IEEE standards.
+ bool fInfinitySupport = false;
+
+ // Used by SkSL to know when to generate polyfills.
+ bool fBuiltinFMASupport = true;
+ bool fBuiltinDeterminantSupport = true;
+
+ // Used for specific driver bug work arounds
+ bool fCanUseMinAndAbsTogether = true;
+ bool fCanUseFractForNegativeValues = true;
+ bool fMustForceNegatedAtanParamToFloat = false;
+ bool fMustForceNegatedLdexpParamToMultiply = false; // http://skbug.com/12076
+ // Returns whether a device incorrectly implements atan(y,x) as atan(y/x)
+ bool fAtan2ImplementedAsAtanYOverX = false;
+ // If this returns true some operation (could be a no op) must be called between floor and abs
+ // to make sure the driver compiler doesn't inline them together which can cause a driver bug in
+ // the shader.
+ bool fMustDoOpBetweenFloorAndAbs = false;
+ // The D3D shader compiler, when targeting PS 3.0 (ie within ANGLE) fails to compile certain
+ // constructs. See detailed comments in GrGLCaps.cpp.
+ bool fMustGuardDivisionEvenAfterExplicitZeroCheck = false;
+ // If false, SkSL uses a workaround so that sk_FragCoord doesn't actually query gl_FragCoord
+ bool fCanUseFragCoord = true;
+ // If true, short ints can't represent every integer in the 16-bit two's complement range as
+ // required by the spec. SKSL will always emit full ints.
+ bool fIncompleteShortIntPrecision = false;
+ // If true, then conditions in for loops need "&& true" to work around driver bugs.
+ bool fAddAndTrueToLoopCondition = false;
+ // If true, then expressions such as "x && y" or "x || y" are rewritten as ternary to work
+ // around driver bugs.
+ bool fUnfoldShortCircuitAsTernary = false;
+ bool fEmulateAbsIntFunction = false;
+ bool fRewriteDoWhileLoops = false;
+ bool fRewriteSwitchStatements = false;
+ bool fRemovePowWithConstantExponent = false;
+ // The Android emulator claims samplerExternalOES is an unknown type if a default precision
+ // statement is made for the type.
+ bool fNoDefaultPrecisionForExternalSamplers = false;
+ // ARM GPUs calculate `matrix * vector` in SPIR-V at full precision, even when the inputs are
+ // RelaxedPrecision. Rewriting the multiply as a sum of vector*scalar fixes this. (skia:11769)
+ bool fRewriteMatrixVectorMultiply = false;
+ // Rewrites matrix equality comparisons to avoid an Adreno driver bug. (skia:11308)
+ bool fRewriteMatrixComparisons = false;
+ // Strips const from function parameters in the GLSL code generator. (skia:13858)
+ bool fRemoveConstFromFunctionParameters = false;
+
+ const char* fVersionDeclString = "";
+
+ const char* fShaderDerivativeExtensionString = nullptr;
+ const char* fExternalTextureExtensionString = nullptr;
+ const char* fSecondExternalTextureExtensionString = nullptr;
+ const char* fFBFetchColorName = nullptr;
+
+ AdvBlendEqInteraction fAdvBlendEqInteraction = kNotSupported_AdvBlendEqInteraction;
+};
+
+// Various sets of caps for use in tests
+class ShaderCapsFactory {
+public:
+ static const ShaderCaps* Default() {
+ static const SkSL::ShaderCaps* sCaps = [] {
+ std::unique_ptr<ShaderCaps> caps = MakeShaderCaps();
+ caps->fVersionDeclString = "#version 400";
+ caps->fShaderDerivativeSupport = true;
+ return caps.release();
+ }();
+ return sCaps;
+ }
+
+ static const ShaderCaps* Standalone() {
+ static const SkSL::ShaderCaps* sCaps = MakeShaderCaps().release();
+ return sCaps;
+ }
+
+protected:
+ static std::unique_ptr<ShaderCaps> MakeShaderCaps();
+};
+
+#if !defined(SKSL_STANDALONE) && (defined(SK_GANESH) || defined(SK_GRAPHITE))
+bool type_to_sksltype(const Context& context, const Type& type, SkSLType* outType);
+#endif
+
+void write_stringstream(const StringStream& d, OutputStream& out);
+
+} // namespace SkSL
+
+#endif // SKSL_UTIL
diff --git a/gfx/skia/skia/src/sksl/analysis/SkSLCanExitWithoutReturningValue.cpp b/gfx/skia/skia/src/sksl/analysis/SkSLCanExitWithoutReturningValue.cpp
new file mode 100644
index 0000000000..015f233c4c
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/analysis/SkSLCanExitWithoutReturningValue.cpp
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLStatement.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/analysis/SkSLProgramVisitor.h"
+#include "src/sksl/ir/SkSLDoStatement.h"
+#include "src/sksl/ir/SkSLForStatement.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLIfStatement.h"
+#include "src/sksl/ir/SkSLSwitchCase.h"
+#include "src/sksl/ir/SkSLSwitchStatement.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <memory>
+
+namespace SkSL {
+class Expression;
+namespace {
+
+class ReturnsOnAllPathsVisitor : public ProgramVisitor {
+public:
+ bool visitExpression(const Expression& expr) override {
+ // We can avoid processing expressions entirely.
+ return false;
+ }
+
+ bool visitStatement(const Statement& stmt) override {
+ switch (stmt.kind()) {
+ // Returns, breaks, or continues will stop the scan, so only one of these should ever be
+ // true.
+ case Statement::Kind::kReturn:
+ fFoundReturn = true;
+ return true;
+
+ case Statement::Kind::kBreak:
+ fFoundBreak = true;
+ return true;
+
+ case Statement::Kind::kContinue:
+ fFoundContinue = true;
+ return true;
+
+ case Statement::Kind::kIf: {
+ const IfStatement& i = stmt.as<IfStatement>();
+ ReturnsOnAllPathsVisitor trueVisitor;
+ ReturnsOnAllPathsVisitor falseVisitor;
+ trueVisitor.visitStatement(*i.ifTrue());
+ if (i.ifFalse()) {
+ falseVisitor.visitStatement(*i.ifFalse());
+ }
+ // If either branch leads to a break or continue, we report the entire if as
+ // containing a break or continue, since we don't know which side will be reached.
+ fFoundBreak = (trueVisitor.fFoundBreak || falseVisitor.fFoundBreak);
+ fFoundContinue = (trueVisitor.fFoundContinue || falseVisitor.fFoundContinue);
+ // On the other hand, we only want to report returns that definitely happen, so we
+ // require those to be found on both sides.
+ fFoundReturn = (trueVisitor.fFoundReturn && falseVisitor.fFoundReturn);
+ return fFoundBreak || fFoundContinue || fFoundReturn;
+ }
+ case Statement::Kind::kFor: {
+ const ForStatement& f = stmt.as<ForStatement>();
+ // We assume a for/while loop runs for at least one iteration; this isn't strictly
+ // guaranteed, but it's better to be slightly over-permissive here than to fail on
+ // reasonable code.
+ ReturnsOnAllPathsVisitor forVisitor;
+ forVisitor.visitStatement(*f.statement());
+ // A for loop that contains a break or continue is safe; it won't exit the entire
+ // function, just the loop. So we disregard those signals.
+ fFoundReturn = forVisitor.fFoundReturn;
+ return fFoundReturn;
+ }
+ case Statement::Kind::kDo: {
+ const DoStatement& d = stmt.as<DoStatement>();
+ // Do-while blocks are always entered at least once.
+ ReturnsOnAllPathsVisitor doVisitor;
+ doVisitor.visitStatement(*d.statement());
+ // A do-while loop that contains a break or continue is safe; it won't exit the
+ // entire function, just the loop. So we disregard those signals.
+ fFoundReturn = doVisitor.fFoundReturn;
+ return fFoundReturn;
+ }
+ case Statement::Kind::kBlock:
+ // Blocks are definitely entered and don't imply any additional control flow.
+ // If the block contains a break, continue or return, we want to keep that.
+ return INHERITED::visitStatement(stmt);
+
+ case Statement::Kind::kSwitch: {
+ // Switches are the most complex control flow we need to deal with; fortunately we
+ // already have good primitives for dissecting them. We need to verify that:
+ // - a default case exists, so that every possible input value is covered
+ // - every switch-case either (a) returns unconditionally, or
+ // (b) falls through to another case that does
+ const SwitchStatement& s = stmt.as<SwitchStatement>();
+ bool foundDefault = false;
+ bool fellThrough = false;
+ for (const std::unique_ptr<Statement>& switchStmt : s.cases()) {
+ // The default case is indicated by a null value. A switch without a default
+ // case cannot definitively return, as its value might not be in the cases list.
+ const SwitchCase& sc = switchStmt->as<SwitchCase>();
+ if (sc.isDefault()) {
+ foundDefault = true;
+ }
+ // Scan this switch-case for any exit (break, continue or return).
+ ReturnsOnAllPathsVisitor caseVisitor;
+ caseVisitor.visitStatement(sc);
+
+ // If we found a break or continue, whether conditional or not, this switch case
+ // can't be called an unconditional return. Switches absorb breaks but not
+ // continues.
+ if (caseVisitor.fFoundContinue) {
+ fFoundContinue = true;
+ return false;
+ }
+ if (caseVisitor.fFoundBreak) {
+ return false;
+ }
+ // We just confirmed that there weren't any breaks or continues. If we didn't
+ // find an unconditional return either, the switch is considered fallen-through.
+ // (There might be a conditional return, but that doesn't count.)
+ fellThrough = !caseVisitor.fFoundReturn;
+ }
+
+ // If we didn't find a default case, or the very last case fell through, this switch
+ // doesn't meet our criteria.
+ if (fellThrough || !foundDefault) {
+ return false;
+ }
+
+ // We scanned the entire switch, found a default case, and every section either fell
+ // through or contained an unconditional return.
+ fFoundReturn = true;
+ return true;
+ }
+
+ case Statement::Kind::kSwitchCase:
+ // Recurse into the switch-case.
+ return INHERITED::visitStatement(stmt);
+
+ case Statement::Kind::kDiscard:
+ case Statement::Kind::kExpression:
+ case Statement::Kind::kNop:
+ case Statement::Kind::kVarDeclaration:
+ // None of these statements could contain a return.
+ break;
+ }
+
+ return false;
+ }
+
+ bool fFoundReturn = false;
+ bool fFoundBreak = false;
+ bool fFoundContinue = false;
+
+ using INHERITED = ProgramVisitor;
+};
+
+} // namespace
+
+bool Analysis::CanExitWithoutReturningValue(const FunctionDeclaration& funcDecl,
+ const Statement& body) {
+ if (funcDecl.returnType().isVoid()) {
+ return false;
+ }
+ ReturnsOnAllPathsVisitor visitor;
+ visitor.visitStatement(body);
+ return !visitor.fFoundReturn;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/analysis/SkSLCheckProgramStructure.cpp b/gfx/skia/skia/src/sksl/analysis/SkSLCheckProgramStructure.cpp
new file mode 100644
index 0000000000..5407d820d8
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/analysis/SkSLCheckProgramStructure.cpp
@@ -0,0 +1,223 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLStatement.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/base/SkSafeMath.h"
+#include "src/core/SkTHash.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/analysis/SkSLProgramVisitor.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLForStatement.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLProgram.h"
+
+#include <cstddef>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace SkSL {
+
+bool Analysis::CheckProgramStructure(const Program& program, bool enforceSizeLimit) {
+ // We check the size of strict-ES2 programs; since SkVM will completely unroll them, it's
+ // important to know how large the result will be. For non-ES2 code, we compute an approximate
+ // lower bound by assuming all non-unrollable loops will execute one time only.
+ const Context& context = *program.fContext;
+
+ // If we decide that expressions are cheaper than statements, or that certain statements are
+ // more expensive than others, etc., we can always tweak these ratios as needed. A very rough
+ // ballpark estimate is currently good enough for our purposes.
+ static constexpr size_t kExpressionCost = 1;
+ static constexpr size_t kStatementCost = 1;
+ static constexpr size_t kUnknownCost = -1;
+ static constexpr size_t kProgramSizeLimit = 100000;
+ static constexpr size_t kProgramStackDepthLimit = 50;
+
+ class ProgramSizeVisitor : public ProgramVisitor {
+ public:
+ ProgramSizeVisitor(const Context& c) : fContext(c) {}
+
+ using ProgramVisitor::visitProgramElement;
+
+ size_t functionSize() const {
+ return fFunctionSize;
+ }
+
+ bool visitProgramElement(const ProgramElement& pe) override {
+ if (pe.is<FunctionDefinition>()) {
+ // Check the function-size cache map first. We don't need to visit this function if
+ // we already processed it before.
+ const FunctionDeclaration* decl = &pe.as<FunctionDefinition>().declaration();
+ if (size_t *cachedCost = fFunctionCostMap.find(decl)) {
+ // We already have this function in our map. We don't need to check it again.
+ if (*cachedCost == kUnknownCost) {
+ // If the function is present in the map with an unknown cost, we're
+ // recursively processing it--in other words, we found a cycle in the code.
+ // Unwind our stack into a string.
+ std::string msg = "\n\t" + decl->description();
+ for (auto unwind = fStack.rbegin(); unwind != fStack.rend(); ++unwind) {
+ msg = "\n\t" + (*unwind)->description() + msg;
+ if (*unwind == decl) {
+ break;
+ }
+ }
+ msg = "potential recursion (function call cycle) not allowed:" + msg;
+ fContext.fErrors->error(pe.fPosition, std::move(msg));
+ fFunctionSize = 0;
+ *cachedCost = 0;
+ return true;
+ }
+ // Set the size to its known value.
+ fFunctionSize = *cachedCost;
+ return false;
+ }
+
+ // If the function-call stack has gotten too deep, stop the analysis.
+ if (fStack.size() >= kProgramStackDepthLimit) {
+ std::string msg = "exceeded max function call depth:";
+ for (auto unwind = fStack.begin(); unwind != fStack.end(); ++unwind) {
+ msg += "\n\t" + (*unwind)->description();
+ }
+ msg += "\n\t" + decl->description();
+ fContext.fErrors->error(pe.fPosition, std::move(msg));
+ fFunctionSize = 0;
+ fFunctionCostMap.set(decl, 0);
+ return true;
+ }
+
+ // Calculate the function cost and store it in our cache.
+ fFunctionCostMap.set(decl, kUnknownCost);
+ fStack.push_back(decl);
+ fFunctionSize = 0;
+ bool result = INHERITED::visitProgramElement(pe);
+ fFunctionCostMap.set(decl, fFunctionSize);
+ fStack.pop_back();
+
+ return result;
+ }
+
+ return INHERITED::visitProgramElement(pe);
+ }
+
+ bool visitStatement(const Statement& stmt) override {
+ switch (stmt.kind()) {
+ case Statement::Kind::kFor: {
+ // We count a for-loop's unrolled size here. We expect that the init statement
+ // will be emitted once, and the test-expr, next-expr and statement will be
+ // repeated in the output for every iteration of the loop.
+ bool earlyExit = false;
+ const ForStatement& forStmt = stmt.as<ForStatement>();
+ if (forStmt.initializer() && this->visitStatement(*forStmt.initializer())) {
+ earlyExit = true;
+ }
+
+ size_t originalFunctionSize = fFunctionSize;
+ fFunctionSize = 0;
+
+ if (forStmt.next() && this->visitExpression(*forStmt.next())) {
+ earlyExit = true;
+ }
+ if (forStmt.test() && this->visitExpression(*forStmt.test())) {
+ earlyExit = true;
+ }
+ if (this->visitStatement(*forStmt.statement())) {
+ earlyExit = true;
+ }
+
+ // ES2 programs always have a known unroll count. Non-ES2 programs don't enforce
+ // a maximum program size, so it's fine to treat the loop as executing once.
+ if (const LoopUnrollInfo* unrollInfo = forStmt.unrollInfo()) {
+ fFunctionSize = SkSafeMath::Mul(fFunctionSize, unrollInfo->fCount);
+ }
+ fFunctionSize = SkSafeMath::Add(fFunctionSize, originalFunctionSize);
+ return earlyExit;
+ }
+
+ case Statement::Kind::kExpression:
+ // The cost of an expression-statement is counted in visitExpression. It would
+ // be double-dipping to count it here too.
+ break;
+
+ case Statement::Kind::kNop:
+ case Statement::Kind::kVarDeclaration:
+ // These statements don't directly consume any space in a compiled program.
+ break;
+
+ default:
+ // Note that we don't make any attempt to estimate the number of iterations of
+ // do-while loops here. Those aren't an ES2 construct so we aren't enforcing
+ // program size on them.
+ fFunctionSize = SkSafeMath::Add(fFunctionSize, kStatementCost);
+ break;
+ }
+
+ return INHERITED::visitStatement(stmt);
+ }
+
+ bool visitExpression(const Expression& expr) override {
+ // Other than function calls, all expressions are assumed to have a fixed unit cost.
+ bool earlyExit = false;
+ size_t expressionCost = kExpressionCost;
+
+ if (expr.is<FunctionCall>()) {
+ // Visit this function call to calculate its size. If we've already sized it, this
+ // will retrieve the size from our cache.
+ const FunctionCall& call = expr.as<FunctionCall>();
+ const FunctionDeclaration* decl = &call.function();
+ if (decl->definition() && !decl->isIntrinsic()) {
+ size_t originalFunctionSize = fFunctionSize;
+ fFunctionSize = 0;
+
+ earlyExit = this->visitProgramElement(*decl->definition());
+ expressionCost = fFunctionSize;
+
+ fFunctionSize = originalFunctionSize;
+ }
+ }
+
+ fFunctionSize = SkSafeMath::Add(fFunctionSize, expressionCost);
+ return earlyExit || INHERITED::visitExpression(expr);
+ }
+
+ private:
+ using INHERITED = ProgramVisitor;
+
+ const Context& fContext;
+ size_t fFunctionSize = 0;
+ SkTHashMap<const FunctionDeclaration*, size_t> fFunctionCostMap;
+ std::vector<const FunctionDeclaration*> fStack;
+ };
+
+ // Process every function in our program.
+ ProgramSizeVisitor visitor{context};
+ for (const std::unique_ptr<ProgramElement>& element : program.fOwnedElements) {
+ if (element->is<FunctionDefinition>()) {
+ // Visit every function--we want to detect static recursion and report it as an error,
+ // even in unreferenced functions.
+ visitor.visitProgramElement(*element);
+ // Report an error when main()'s flattened size is larger than our program limit.
+ if (enforceSizeLimit &&
+ visitor.functionSize() > kProgramSizeLimit &&
+ element->as<FunctionDefinition>().declaration().isMain()) {
+ context.fErrors->error(Position(), "program is too large");
+ }
+ }
+ }
+
+ return true;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/analysis/SkSLFinalizationChecks.cpp b/gfx/skia/skia/src/sksl/analysis/SkSLFinalizationChecks.cpp
new file mode 100644
index 0000000000..f1c0b4cdfa
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/analysis/SkSLFinalizationChecks.cpp
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLLayout.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "src/base/SkSafeMath.h"
+#include "src/core/SkTHash.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/analysis/SkSLProgramUsage.h"
+#include "src/sksl/analysis/SkSLProgramVisitor.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLInterfaceBlock.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVariable.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace SkSL {
+namespace {
+
+class FinalizationVisitor : public ProgramVisitor {
+public:
+ FinalizationVisitor(const Context& c, const ProgramUsage& u) : fContext(c), fUsage(u) {}
+
+ bool visitProgramElement(const ProgramElement& pe) override {
+ switch (pe.kind()) {
+ case ProgramElement::Kind::kGlobalVar:
+ this->checkGlobalVariableSizeLimit(pe.as<GlobalVarDeclaration>());
+ break;
+ case ProgramElement::Kind::kInterfaceBlock:
+ // TODO(skia:13664): Enforce duplicate checks universally. This is currently not
+ // possible without changes to the binding index assignment logic in graphite.
+ this->checkBindUniqueness(pe.as<InterfaceBlock>());
+ break;
+ case ProgramElement::Kind::kFunction:
+ this->checkOutParamsAreAssigned(pe.as<FunctionDefinition>());
+ break;
+ default:
+ break;
+ }
+ return INHERITED::visitProgramElement(pe);
+ }
+
+ void checkGlobalVariableSizeLimit(const GlobalVarDeclaration& globalDecl) {
+ if (!ProgramConfig::IsRuntimeEffect(fContext.fConfig->fKind)) {
+ return;
+ }
+ const VarDeclaration& decl = globalDecl.varDeclaration();
+
+ size_t prevSlotsUsed = fGlobalSlotsUsed;
+ fGlobalSlotsUsed = SkSafeMath::Add(fGlobalSlotsUsed, decl.var()->type().slotCount());
+ // To avoid overzealous error reporting, only trigger the error at the first place where the
+ // global limit is exceeded.
+ if (prevSlotsUsed < kVariableSlotLimit && fGlobalSlotsUsed >= kVariableSlotLimit) {
+ fContext.fErrors->error(decl.fPosition,
+ "global variable '" + std::string(decl.var()->name()) +
+ "' exceeds the size limit");
+ }
+ }
+
+ void checkBindUniqueness(const InterfaceBlock& block) {
+ const Variable* var = block.var();
+ int32_t set = var->modifiers().fLayout.fSet;
+ int32_t binding = var->modifiers().fLayout.fBinding;
+ if (binding != -1) {
+ // TODO(skia:13664): This should map a `set` value of -1 to the default settings value
+ // used by codegen backends to prevent duplicates that may arise from the effective
+ // default set value.
+ uint64_t key = ((uint64_t)set << 32) + binding;
+ if (!fBindings.contains(key)) {
+ fBindings.add(key);
+ } else {
+ if (set != -1) {
+ fContext.fErrors->error(block.fPosition,
+ "layout(set=" + std::to_string(set) +
+ ", binding=" + std::to_string(binding) +
+ ") has already been defined");
+ } else {
+ fContext.fErrors->error(block.fPosition,
+ "layout(binding=" + std::to_string(binding) +
+ ") has already been defined");
+ }
+ }
+ }
+ }
+
+ void checkOutParamsAreAssigned(const FunctionDefinition& funcDef) {
+ const FunctionDeclaration& funcDecl = funcDef.declaration();
+
+ // Searches for `out` parameters that are not written to. According to the GLSL spec,
+ // the value of an out-param that's never assigned to is unspecified, so report it.
+ for (const Variable* param : funcDecl.parameters()) {
+ const int paramInout = param->modifiers().fFlags & (Modifiers::Flag::kIn_Flag |
+ Modifiers::Flag::kOut_Flag);
+ if (paramInout == Modifiers::Flag::kOut_Flag) {
+ ProgramUsage::VariableCounts counts = fUsage.get(*param);
+ if (counts.fWrite <= 0) {
+ fContext.fErrors->error(param->fPosition,
+ "function '" + std::string(funcDecl.name()) +
+ "' never assigns a value to out parameter '" +
+ std::string(param->name()) + "'");
+ }
+ }
+ }
+ }
+
+ bool visitExpression(const Expression& expr) override {
+ switch (expr.kind()) {
+ case Expression::Kind::kFunctionCall: {
+ const FunctionDeclaration& decl = expr.as<FunctionCall>().function();
+ if (!decl.isBuiltin() && !decl.definition()) {
+ fContext.fErrors->error(expr.fPosition, "function '" + decl.description() +
+ "' is not defined");
+ }
+ break;
+ }
+ case Expression::Kind::kFunctionReference:
+ case Expression::Kind::kMethodReference:
+ case Expression::Kind::kTypeReference:
+ SkDEBUGFAIL("invalid reference-expr, should have been reported by coerce()");
+ fContext.fErrors->error(expr.fPosition, "invalid expression");
+ break;
+ default:
+ if (expr.type().matches(*fContext.fTypes.fInvalid)) {
+ fContext.fErrors->error(expr.fPosition, "invalid expression");
+ }
+ break;
+ }
+ return INHERITED::visitExpression(expr);
+ }
+
+private:
+ using INHERITED = ProgramVisitor;
+ size_t fGlobalSlotsUsed = 0;
+ const Context& fContext;
+ const ProgramUsage& fUsage;
+ // we pack the set/binding pair into a single 64 bit int
+ SkTHashSet<uint64_t> fBindings;
+};
+
+} // namespace
+
+void Analysis::DoFinalizationChecks(const Program& program) {
+ // Check all of the program's owned elements. (Built-in elements are assumed to be valid.)
+ FinalizationVisitor visitor{*program.fContext, *program.usage()};
+ for (const std::unique_ptr<ProgramElement>& element : program.fOwnedElements) {
+ visitor.visitProgramElement(*element);
+ }
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/analysis/SkSLGetLoopControlFlowInfo.cpp b/gfx/skia/skia/src/sksl/analysis/SkSLGetLoopControlFlowInfo.cpp
new file mode 100644
index 0000000000..65c9e5e424
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/analysis/SkSLGetLoopControlFlowInfo.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLAnalysis.h"
+
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLStatement.h"
+#include "src/sksl/analysis/SkSLProgramVisitor.h"
+
+namespace SkSL {
+
+class Expression;
+
+namespace Analysis {
+namespace {
+
+class LoopControlFlowVisitor : public ProgramVisitor {
+public:
+ LoopControlFlowVisitor() {}
+
+ bool visitExpression(const Expression& expr) override {
+ // We can avoid processing expressions entirely.
+ return false;
+ }
+
+ bool visitStatement(const Statement& stmt) override {
+ switch (stmt.kind()) {
+ case Statement::Kind::kContinue:
+ // A continue only affects the control flow of the loop if it's not nested inside
+ // another looping structure. (Inside a switch, SkSL disallows continue entirely.)
+ fResult.fHasContinue |= (fDepth == 0);
+ break;
+
+ case Statement::Kind::kBreak:
+ // A break only affects the control flow of the loop if it's not nested inside
+ // another loop/switch structure.
+ fResult.fHasBreak |= (fDepth == 0);
+ break;
+
+ case Statement::Kind::kReturn:
+ // A return will abort the loop's control flow no matter how deeply it is nested.
+ fResult.fHasReturn = true;
+ break;
+
+ case Statement::Kind::kFor:
+ case Statement::Kind::kDo:
+ case Statement::Kind::kSwitch: {
+ ++fDepth;
+ bool done = ProgramVisitor::visitStatement(stmt);
+ --fDepth;
+ return done;
+ }
+
+ default:
+ return ProgramVisitor::visitStatement(stmt);
+ }
+
+ // If we've already found everything we're hunting for, we can stop searching early.
+ return fResult.fHasContinue && fResult.fHasBreak && fResult.fHasReturn;
+ }
+
+ LoopControlFlowInfo fResult;
+ int fDepth = 0;
+};
+
+} // namespace
+
+LoopControlFlowInfo GetLoopControlFlowInfo(const Statement& stmt) {
+ LoopControlFlowVisitor visitor;
+ visitor.visitStatement(stmt);
+ return visitor.fResult;
+}
+
+} // namespace Analysis
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/analysis/SkSLGetLoopUnrollInfo.cpp b/gfx/skia/skia/src/sksl/analysis/SkSLGetLoopUnrollInfo.cpp
new file mode 100644
index 0000000000..cf49867392
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/analysis/SkSLGetLoopUnrollInfo.cpp
@@ -0,0 +1,280 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLStatement.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLOperator.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLConstantFolder.h"
+#include "src/sksl/analysis/SkSLNoOpErrorReporter.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLForStatement.h"
+#include "src/sksl/ir/SkSLPostfixExpression.h"
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+
+#include <cmath>
+#include <memory>
+
+namespace SkSL {
+
+// Loops that run for 100000+ iterations will exceed our program size limit.
+static constexpr int kLoopTerminationLimit = 100000;
+
+static int calculate_count(double start, double end, double delta, bool forwards, bool inclusive) {
+ if (forwards != (start < end)) {
+ // The loop starts in a completed state (the start has already advanced past the end).
+ return 0;
+ }
+ if ((delta == 0.0) || forwards != (delta > 0.0)) {
+ // The loop does not progress toward a completed state, and will never terminate.
+ return kLoopTerminationLimit;
+ }
+ double iterations = sk_ieee_double_divide(end - start, delta);
+ double count = std::ceil(iterations);
+ if (inclusive && (count == iterations)) {
+ count += 1.0;
+ }
+ if (count > kLoopTerminationLimit || !std::isfinite(count)) {
+ // The loop runs for more iterations than we can safely unroll.
+ return kLoopTerminationLimit;
+ }
+ return (int)count;
+}
+
+std::unique_ptr<LoopUnrollInfo> Analysis::GetLoopUnrollInfo(Position loopPos,
+ const ForLoopPositions& positions,
+ const Statement* loopInitializer,
+ const Expression* loopTest,
+ const Expression* loopNext,
+ const Statement* loopStatement,
+ ErrorReporter* errorPtr) {
+ NoOpErrorReporter unused;
+ ErrorReporter& errors = errorPtr ? *errorPtr : unused;
+ auto loopInfo = std::make_unique<LoopUnrollInfo>();
+
+ //
+ // init_declaration has the form: type_specifier identifier = constant_expression
+ //
+ if (!loopInitializer) {
+ Position pos = positions.initPosition.valid() ? positions.initPosition : loopPos;
+ errors.error(pos, "missing init declaration");
+ return nullptr;
+ }
+ if (!loopInitializer->is<VarDeclaration>()) {
+ errors.error(loopInitializer->fPosition, "invalid init declaration");
+ return nullptr;
+ }
+ const VarDeclaration& initDecl = loopInitializer->as<VarDeclaration>();
+ if (!initDecl.baseType().isNumber()) {
+ errors.error(loopInitializer->fPosition, "invalid type for loop index");
+ return nullptr;
+ }
+ if (initDecl.arraySize() != 0) {
+ errors.error(loopInitializer->fPosition, "invalid type for loop index");
+ return nullptr;
+ }
+ if (!initDecl.value()) {
+ errors.error(loopInitializer->fPosition, "missing loop index initializer");
+ return nullptr;
+ }
+ if (!ConstantFolder::GetConstantValue(*initDecl.value(), &loopInfo->fStart)) {
+ errors.error(loopInitializer->fPosition,
+ "loop index initializer must be a constant expression");
+ return nullptr;
+ }
+
+ loopInfo->fIndex = initDecl.var();
+
+ auto is_loop_index = [&](const std::unique_ptr<Expression>& expr) {
+ return expr->is<VariableReference>() &&
+ expr->as<VariableReference>().variable() == loopInfo->fIndex;
+ };
+
+ //
+ // condition has the form: loop_index relational_operator constant_expression
+ //
+ if (!loopTest) {
+ Position pos = positions.conditionPosition.valid() ? positions.conditionPosition : loopPos;
+ errors.error(pos, "missing condition");
+ return nullptr;
+ }
+ if (!loopTest->is<BinaryExpression>()) {
+ errors.error(loopTest->fPosition, "invalid condition");
+ return nullptr;
+ }
+ const BinaryExpression& cond = loopTest->as<BinaryExpression>();
+ if (!is_loop_index(cond.left())) {
+ errors.error(loopTest->fPosition, "expected loop index on left hand side of condition");
+ return nullptr;
+ }
+ // relational_operator is one of: > >= < <= == or !=
+ switch (cond.getOperator().kind()) {
+ case Operator::Kind::GT:
+ case Operator::Kind::GTEQ:
+ case Operator::Kind::LT:
+ case Operator::Kind::LTEQ:
+ case Operator::Kind::EQEQ:
+ case Operator::Kind::NEQ:
+ break;
+ default:
+ errors.error(loopTest->fPosition, "invalid relational operator");
+ return nullptr;
+ }
+ double loopEnd = 0;
+ if (!ConstantFolder::GetConstantValue(*cond.right(), &loopEnd)) {
+ errors.error(loopTest->fPosition, "loop index must be compared with a constant expression");
+ return nullptr;
+ }
+
+ //
+ // expression has one of the following forms:
+ // loop_index++
+ // loop_index--
+ // loop_index += constant_expression
+ // loop_index -= constant_expression
+ // The spec doesn't mention prefix increment and decrement, but there is some consensus that
+ // it's an oversight, so we allow those as well.
+ //
+ if (!loopNext) {
+ Position pos = positions.nextPosition.valid() ? positions.nextPosition : loopPos;
+ errors.error(pos, "missing loop expression");
+ return nullptr;
+ }
+ switch (loopNext->kind()) {
+ case Expression::Kind::kBinary: {
+ const BinaryExpression& next = loopNext->as<BinaryExpression>();
+ if (!is_loop_index(next.left())) {
+ errors.error(loopNext->fPosition, "expected loop index in loop expression");
+ return nullptr;
+ }
+ if (!ConstantFolder::GetConstantValue(*next.right(), &loopInfo->fDelta)) {
+ errors.error(loopNext->fPosition,
+ "loop index must be modified by a constant expression");
+ return nullptr;
+ }
+ switch (next.getOperator().kind()) {
+ case Operator::Kind::PLUSEQ: break;
+ case Operator::Kind::MINUSEQ: loopInfo->fDelta = -loopInfo->fDelta; break;
+ default:
+ errors.error(loopNext->fPosition, "invalid operator in loop expression");
+ return nullptr;
+ }
+ } break;
+ case Expression::Kind::kPrefix: {
+ const PrefixExpression& next = loopNext->as<PrefixExpression>();
+ if (!is_loop_index(next.operand())) {
+ errors.error(loopNext->fPosition, "expected loop index in loop expression");
+ return nullptr;
+ }
+ switch (next.getOperator().kind()) {
+ case Operator::Kind::PLUSPLUS: loopInfo->fDelta = 1; break;
+ case Operator::Kind::MINUSMINUS: loopInfo->fDelta = -1; break;
+ default:
+ errors.error(loopNext->fPosition, "invalid operator in loop expression");
+ return nullptr;
+ }
+ } break;
+ case Expression::Kind::kPostfix: {
+ const PostfixExpression& next = loopNext->as<PostfixExpression>();
+ if (!is_loop_index(next.operand())) {
+ errors.error(loopNext->fPosition, "expected loop index in loop expression");
+ return nullptr;
+ }
+ switch (next.getOperator().kind()) {
+ case Operator::Kind::PLUSPLUS: loopInfo->fDelta = 1; break;
+ case Operator::Kind::MINUSMINUS: loopInfo->fDelta = -1; break;
+ default:
+ errors.error(loopNext->fPosition, "invalid operator in loop expression");
+ return nullptr;
+ }
+ } break;
+ default:
+ errors.error(loopNext->fPosition, "invalid loop expression");
+ return nullptr;
+ }
+
+ //
+ // Within the body of the loop, the loop index is not statically assigned to, nor is it used as
+ // argument to a function 'out' or 'inout' parameter.
+ //
+ if (Analysis::StatementWritesToVariable(*loopStatement, *initDecl.var())) {
+ errors.error(loopStatement->fPosition,
+ "loop index must not be modified within body of the loop");
+ return nullptr;
+ }
+
+ // Finally, compute the iteration count, based on the bounds, and the termination operator.
+ loopInfo->fCount = 0;
+
+ switch (cond.getOperator().kind()) {
+ case Operator::Kind::LT:
+ loopInfo->fCount = calculate_count(loopInfo->fStart, loopEnd, loopInfo->fDelta,
+ /*forwards=*/true, /*inclusive=*/false);
+ break;
+
+ case Operator::Kind::GT:
+ loopInfo->fCount = calculate_count(loopInfo->fStart, loopEnd, loopInfo->fDelta,
+ /*forwards=*/false, /*inclusive=*/false);
+ break;
+
+ case Operator::Kind::LTEQ:
+ loopInfo->fCount = calculate_count(loopInfo->fStart, loopEnd, loopInfo->fDelta,
+ /*forwards=*/true, /*inclusive=*/true);
+ break;
+
+ case Operator::Kind::GTEQ:
+ loopInfo->fCount = calculate_count(loopInfo->fStart, loopEnd, loopInfo->fDelta,
+ /*forwards=*/false, /*inclusive=*/true);
+ break;
+
+ case Operator::Kind::NEQ: {
+ float iterations = sk_ieee_double_divide(loopEnd - loopInfo->fStart, loopInfo->fDelta);
+ loopInfo->fCount = std::ceil(iterations);
+ if (loopInfo->fCount < 0 || loopInfo->fCount != iterations ||
+ !std::isfinite(iterations)) {
+ // The loop doesn't reach the exact endpoint and so will never terminate.
+ loopInfo->fCount = kLoopTerminationLimit;
+ }
+ break;
+ }
+ case Operator::Kind::EQEQ: {
+ if (loopInfo->fStart == loopEnd) {
+ // Start and end begin in the same place, so we can run one iteration...
+ if (loopInfo->fDelta) {
+ // ... and then they diverge, so the loop terminates.
+ loopInfo->fCount = 1;
+ } else {
+ // ... but they never diverge, so the loop runs forever.
+ loopInfo->fCount = kLoopTerminationLimit;
+ }
+ } else {
+ // Start never equals end, so the loop will not run a single iteration.
+ loopInfo->fCount = 0;
+ }
+ break;
+ }
+ default: SkUNREACHABLE;
+ }
+
+ SkASSERT(loopInfo->fCount >= 0);
+ if (loopInfo->fCount >= kLoopTerminationLimit) {
+ errors.error(loopPos, "loop must guarantee termination in fewer iterations");
+ return nullptr;
+ }
+
+ return loopInfo;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/analysis/SkSLGetReturnComplexity.cpp b/gfx/skia/skia/src/sksl/analysis/SkSLGetReturnComplexity.cpp
new file mode 100644
index 0000000000..a5b6e1132f
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/analysis/SkSLGetReturnComplexity.cpp
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLStatement.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/analysis/SkSLProgramVisitor.h"
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+
+#include <algorithm>
+#include <memory>
+
+namespace SkSL {
+
+class Expression;
+
+static int count_returns_at_end_of_control_flow(const FunctionDefinition& funcDef) {
+ class CountReturnsAtEndOfControlFlow : public ProgramVisitor {
+ public:
+ CountReturnsAtEndOfControlFlow(const FunctionDefinition& funcDef) {
+ this->visitProgramElement(funcDef);
+ }
+
+ bool visitExpression(const Expression& expr) override {
+ // Do not recurse into expressions.
+ return false;
+ }
+
+ bool visitStatement(const Statement& stmt) override {
+ switch (stmt.kind()) {
+ case Statement::Kind::kBlock: {
+ // Check only the last statement of a block.
+ const auto& block = stmt.as<Block>();
+ return block.children().size() &&
+ this->visitStatement(*block.children().back());
+ }
+ case Statement::Kind::kSwitch:
+ case Statement::Kind::kDo:
+ case Statement::Kind::kFor:
+ // Don't introspect switches or loop structures at all.
+ return false;
+
+ case Statement::Kind::kReturn:
+ ++fNumReturns;
+ [[fallthrough]];
+
+ default:
+ return INHERITED::visitStatement(stmt);
+ }
+ }
+
+ int fNumReturns = 0;
+ using INHERITED = ProgramVisitor;
+ };
+
+ return CountReturnsAtEndOfControlFlow{funcDef}.fNumReturns;
+}
+
+class CountReturnsWithLimit : public ProgramVisitor {
+public:
+ CountReturnsWithLimit(const FunctionDefinition& funcDef, int limit) : fLimit(limit) {
+ this->visitProgramElement(funcDef);
+ }
+
+ bool visitExpression(const Expression& expr) override {
+ // Do not recurse into expressions.
+ return false;
+ }
+
+ bool visitStatement(const Statement& stmt) override {
+ switch (stmt.kind()) {
+ case Statement::Kind::kReturn: {
+ ++fNumReturns;
+ fDeepestReturn = std::max(fDeepestReturn, fScopedBlockDepth);
+ return (fNumReturns >= fLimit) || INHERITED::visitStatement(stmt);
+ }
+ case Statement::Kind::kVarDeclaration: {
+ if (fScopedBlockDepth > 1) {
+ fVariablesInBlocks = true;
+ }
+ return INHERITED::visitStatement(stmt);
+ }
+ case Statement::Kind::kBlock: {
+ int depthIncrement = stmt.as<Block>().isScope() ? 1 : 0;
+ fScopedBlockDepth += depthIncrement;
+ bool result = INHERITED::visitStatement(stmt);
+ fScopedBlockDepth -= depthIncrement;
+ if (fNumReturns == 0 && fScopedBlockDepth <= 1) {
+ // If closing this block puts us back at the top level, and we haven't
+ // encountered any return statements yet, any vardecls we may have encountered
+ // up until this point can be ignored. They are out of scope now, and they were
+ // never used in a return statement.
+ fVariablesInBlocks = false;
+ }
+ return result;
+ }
+ default:
+ return INHERITED::visitStatement(stmt);
+ }
+ }
+
+ int fNumReturns = 0;
+ int fDeepestReturn = 0;
+ int fLimit = 0;
+ int fScopedBlockDepth = 0;
+ bool fVariablesInBlocks = false;
+ using INHERITED = ProgramVisitor;
+};
+
+Analysis::ReturnComplexity Analysis::GetReturnComplexity(const FunctionDefinition& funcDef) {
+ int returnsAtEndOfControlFlow = count_returns_at_end_of_control_flow(funcDef);
+ CountReturnsWithLimit counter{funcDef, returnsAtEndOfControlFlow + 1};
+ if (counter.fNumReturns > returnsAtEndOfControlFlow) {
+ return ReturnComplexity::kEarlyReturns;
+ }
+ if (counter.fNumReturns > 1) {
+ return ReturnComplexity::kScopedReturns;
+ }
+ if (counter.fVariablesInBlocks && counter.fDeepestReturn > 1) {
+ return ReturnComplexity::kScopedReturns;
+ }
+ return ReturnComplexity::kSingleSafeReturn;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/analysis/SkSLHasSideEffects.cpp b/gfx/skia/skia/src/sksl/analysis/SkSLHasSideEffects.cpp
new file mode 100644
index 0000000000..0d328991e0
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/analysis/SkSLHasSideEffects.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/sksl/SkSLOperator.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/analysis/SkSLProgramVisitor.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+
+namespace SkSL {
+
+bool Analysis::HasSideEffects(const Expression& expr) {
+ class HasSideEffectsVisitor : public ProgramVisitor {
+ public:
+ bool visitExpression(const Expression& expr) override {
+ switch (expr.kind()) {
+ case Expression::Kind::kFunctionCall: {
+ const FunctionCall& call = expr.as<FunctionCall>();
+ if (!(call.function().modifiers().fFlags & Modifiers::kPure_Flag)) {
+ return true;
+ }
+ break;
+ }
+ case Expression::Kind::kPrefix: {
+ const PrefixExpression& prefix = expr.as<PrefixExpression>();
+ if (prefix.getOperator().kind() == Operator::Kind::PLUSPLUS ||
+ prefix.getOperator().kind() == Operator::Kind::MINUSMINUS) {
+ return true;
+ }
+ break;
+ }
+ case Expression::Kind::kBinary: {
+ const BinaryExpression& binary = expr.as<BinaryExpression>();
+ if (binary.getOperator().isAssignment()) {
+ return true;
+ }
+ break;
+ }
+ case Expression::Kind::kPostfix:
+ return true;
+
+ default:
+ break;
+ }
+ return INHERITED::visitExpression(expr);
+ }
+
+ using INHERITED = ProgramVisitor;
+ };
+
+ HasSideEffectsVisitor visitor;
+ return visitor.visitExpression(expr);
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/analysis/SkSLIsConstantExpression.cpp b/gfx/skia/skia/src/sksl/analysis/SkSLIsConstantExpression.cpp
new file mode 100644
index 0000000000..d0a54d2d4b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/analysis/SkSLIsConstantExpression.cpp
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/sksl/SkSLOperator.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/analysis/SkSLProgramVisitor.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+
+#include <set>
+
+namespace SkSL {
+
+// Checks for ES2 constant-expression rules, and (optionally) constant-index-expression rules
+// (if loopIndices is non-nullptr)
+class ConstantExpressionVisitor : public ProgramVisitor {
+public:
+ ConstantExpressionVisitor(const std::set<const Variable*>* loopIndices)
+ : fLoopIndices(loopIndices) {}
+
+ bool visitExpression(const Expression& e) override {
+ // A constant-(index)-expression is one of...
+ switch (e.kind()) {
+ // ... a literal value
+ case Expression::Kind::kLiteral:
+ return false;
+
+ // ... settings can appear in fragment processors; they will resolve when compiled
+ case Expression::Kind::kSetting:
+ return false;
+
+ // ... a global or local variable qualified as 'const', excluding function parameters.
+ // ... loop indices as defined in section 4. [constant-index-expression]
+ case Expression::Kind::kVariableReference: {
+ const Variable* v = e.as<VariableReference>().variable();
+ if ((v->storage() == Variable::Storage::kGlobal ||
+ v->storage() == Variable::Storage::kLocal) &&
+ (v->modifiers().fFlags & Modifiers::kConst_Flag)) {
+ return false;
+ }
+ return !fLoopIndices || fLoopIndices->find(v) == fLoopIndices->end();
+ }
+
+ // ... not a sequence expression (skia:13311)...
+ case Expression::Kind::kBinary:
+ if (e.as<BinaryExpression>().getOperator().kind() == Operator::Kind::COMMA) {
+ return true;
+ }
+ [[fallthrough]];
+
+ // ... expressions composed of both of the above
+ case Expression::Kind::kConstructorArray:
+ case Expression::Kind::kConstructorArrayCast:
+ case Expression::Kind::kConstructorCompound:
+ case Expression::Kind::kConstructorCompoundCast:
+ case Expression::Kind::kConstructorDiagonalMatrix:
+ case Expression::Kind::kConstructorMatrixResize:
+ case Expression::Kind::kConstructorScalarCast:
+ case Expression::Kind::kConstructorSplat:
+ case Expression::Kind::kConstructorStruct:
+ case Expression::Kind::kFieldAccess:
+ case Expression::Kind::kIndex:
+ case Expression::Kind::kPrefix:
+ case Expression::Kind::kPostfix:
+ case Expression::Kind::kSwizzle:
+ case Expression::Kind::kTernary:
+ return INHERITED::visitExpression(e);
+
+ // Function calls are completely disallowed in SkSL constant-(index)-expressions.
+ // GLSL does mandate that calling a built-in function where the arguments are all
+ // constant-expressions should result in a constant-expression. SkSL handles this by
+ // optimizing fully-constant function calls into literals in FunctionCall::Make.
+ case Expression::Kind::kFunctionCall:
+ case Expression::Kind::kChildCall:
+
+ // These shouldn't appear in a valid program at all, and definitely aren't
+ // constant-(index)-expressions.
+ case Expression::Kind::kPoison:
+ case Expression::Kind::kFunctionReference:
+ case Expression::Kind::kMethodReference:
+ case Expression::Kind::kTypeReference:
+ return true;
+
+ default:
+ SkDEBUGFAIL("Unexpected expression type");
+ return true;
+ }
+ }
+
+private:
+ const std::set<const Variable*>* fLoopIndices;
+ using INHERITED = ProgramVisitor;
+};
+
+bool Analysis::IsConstantExpression(const Expression& expr) {
+ return !ConstantExpressionVisitor{/*loopIndices=*/nullptr}.visitExpression(expr);
+}
+
+bool Analysis::IsConstantIndexExpression(const Expression& expr,
+ const std::set<const Variable*>* loopIndices) {
+ return !ConstantExpressionVisitor{loopIndices}.visitExpression(expr);
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/analysis/SkSLIsDynamicallyUniformExpression.cpp b/gfx/skia/skia/src/sksl/analysis/SkSLIsDynamicallyUniformExpression.cpp
new file mode 100644
index 0000000000..0e07fac6f5
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/analysis/SkSLIsDynamicallyUniformExpression.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLModifiers.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/analysis/SkSLProgramVisitor.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+
+namespace SkSL {
+
+bool Analysis::IsDynamicallyUniformExpression(const Expression& expr) {
+ class IsDynamicallyUniformExpressionVisitor : public ProgramVisitor {
+ public:
+ bool visitExpression(const Expression& expr) override {
+ switch (expr.kind()) {
+ case Expression::Kind::kBinary:
+ case Expression::Kind::kConstructorArray:
+ case Expression::Kind::kConstructorArrayCast:
+ case Expression::Kind::kConstructorCompound:
+ case Expression::Kind::kConstructorCompoundCast:
+ case Expression::Kind::kConstructorDiagonalMatrix:
+ case Expression::Kind::kConstructorMatrixResize:
+ case Expression::Kind::kConstructorScalarCast:
+ case Expression::Kind::kConstructorSplat:
+ case Expression::Kind::kConstructorStruct:
+ case Expression::Kind::kFieldAccess:
+ case Expression::Kind::kIndex:
+ case Expression::Kind::kPostfix:
+ case Expression::Kind::kPrefix:
+ case Expression::Kind::kSwizzle:
+ case Expression::Kind::kTernary:
+ // These expressions might be dynamically uniform, if they are composed entirely
+ // of constants and uniforms.
+ break;
+
+ case Expression::Kind::kVariableReference: {
+ // Verify that variable references are const or uniform.
+ const Variable* var = expr.as<VariableReference>().variable();
+ if (!var || !(var->modifiers().fFlags & (Modifiers::Flag::kConst_Flag |
+ Modifiers::Flag::kUniform_Flag))) {
+ fIsDynamicallyUniform = false;
+ return true;
+ }
+ break;
+ }
+ case Expression::Kind::kFunctionCall: {
+ // Verify that function calls are pure.
+ const FunctionDeclaration& decl = expr.as<FunctionCall>().function();
+ if (!(decl.modifiers().fFlags & Modifiers::Flag::kPure_Flag)) {
+ fIsDynamicallyUniform = false;
+ return true;
+ }
+ break;
+ }
+ case Expression::Kind::kLiteral:
+ // Literals are compile-time constants.
+ return false;
+
+ default:
+ // This expression isn't dynamically uniform.
+ fIsDynamicallyUniform = false;
+ return true;
+ }
+ return INHERITED::visitExpression(expr);
+ }
+
+ bool fIsDynamicallyUniform = true;
+ using INHERITED = ProgramVisitor;
+ };
+
+ IsDynamicallyUniformExpressionVisitor visitor;
+ visitor.visitExpression(expr);
+ return visitor.fIsDynamicallyUniform;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/analysis/SkSLIsSameExpressionTree.cpp b/gfx/skia/skia/src/sksl/analysis/SkSLIsSameExpressionTree.cpp
new file mode 100644
index 0000000000..2c4506a725
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/analysis/SkSLIsSameExpressionTree.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/base/SkTArray.h"
+#include "include/sksl/SkSLOperator.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLLiteral.h"
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+#include "src/sksl/ir/SkSLSwizzle.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+
+#include <cstddef>
+#include <memory>
+
+namespace SkSL {
+
+bool Analysis::IsSameExpressionTree(const Expression& left, const Expression& right) {
+ if (left.kind() != right.kind() || !left.type().matches(right.type())) {
+ return false;
+ }
+
+ // This isn't a fully exhaustive list of expressions by any stretch of the imagination; for
+ // instance, `x[y+1] = x[y+1]` isn't detected because we don't look at BinaryExpressions.
+ // Since this is intended to be used for optimization purposes, handling the common cases is
+ // sufficient.
+ switch (left.kind()) {
+ case Expression::Kind::kLiteral:
+ return left.as<Literal>().value() == right.as<Literal>().value();
+
+ case Expression::Kind::kConstructorArray:
+ case Expression::Kind::kConstructorArrayCast:
+ case Expression::Kind::kConstructorCompound:
+ case Expression::Kind::kConstructorCompoundCast:
+ case Expression::Kind::kConstructorDiagonalMatrix:
+ case Expression::Kind::kConstructorMatrixResize:
+ case Expression::Kind::kConstructorScalarCast:
+ case Expression::Kind::kConstructorStruct:
+ case Expression::Kind::kConstructorSplat: {
+ if (left.kind() != right.kind()) {
+ return false;
+ }
+ const AnyConstructor& leftCtor = left.asAnyConstructor();
+ const AnyConstructor& rightCtor = right.asAnyConstructor();
+ const auto leftSpan = leftCtor.argumentSpan();
+ const auto rightSpan = rightCtor.argumentSpan();
+ if (leftSpan.size() != rightSpan.size()) {
+ return false;
+ }
+ for (size_t index = 0; index < leftSpan.size(); ++index) {
+ if (!IsSameExpressionTree(*leftSpan[index], *rightSpan[index])) {
+ return false;
+ }
+ }
+ return true;
+ }
+ case Expression::Kind::kFieldAccess:
+ return left.as<FieldAccess>().fieldIndex() == right.as<FieldAccess>().fieldIndex() &&
+ IsSameExpressionTree(*left.as<FieldAccess>().base(),
+ *right.as<FieldAccess>().base());
+
+ case Expression::Kind::kIndex:
+ return IsSameExpressionTree(*left.as<IndexExpression>().index(),
+ *right.as<IndexExpression>().index()) &&
+ IsSameExpressionTree(*left.as<IndexExpression>().base(),
+ *right.as<IndexExpression>().base());
+
+ case Expression::Kind::kPrefix:
+ return (left.as<PrefixExpression>().getOperator().kind() ==
+ right.as<PrefixExpression>().getOperator().kind()) &&
+ IsSameExpressionTree(*left.as<PrefixExpression>().operand(),
+ *right.as<PrefixExpression>().operand());
+
+ case Expression::Kind::kSwizzle:
+ return left.as<Swizzle>().components() == right.as<Swizzle>().components() &&
+ IsSameExpressionTree(*left.as<Swizzle>().base(), *right.as<Swizzle>().base());
+
+ case Expression::Kind::kVariableReference:
+ return left.as<VariableReference>().variable() ==
+ right.as<VariableReference>().variable();
+
+ default:
+ return false;
+ }
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/analysis/SkSLIsTrivialExpression.cpp b/gfx/skia/skia/src/sksl/analysis/SkSLIsTrivialExpression.cpp
new file mode 100644
index 0000000000..4479d1215d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/analysis/SkSLIsTrivialExpression.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLIRNode.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLSwizzle.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <memory>
+
+namespace SkSL {
+
+bool Analysis::IsTrivialExpression(const Expression& expr) {
+ switch (expr.kind()) {
+ case Expression::Kind::kLiteral:
+ case Expression::Kind::kVariableReference:
+ return true;
+
+ case Expression::Kind::kSwizzle:
+ // All swizzles are considered to be trivial.
+ return IsTrivialExpression(*expr.as<Swizzle>().base());
+
+ case Expression::Kind::kFieldAccess:
+ // Accessing a field is trivial.
+ return IsTrivialExpression(*expr.as<FieldAccess>().base());
+
+ case Expression::Kind::kIndex: {
+ // Accessing a constant array index is trivial.
+ const IndexExpression& inner = expr.as<IndexExpression>();
+ return inner.index()->isIntLiteral() && IsTrivialExpression(*inner.base());
+ }
+ case Expression::Kind::kConstructorArray:
+ case Expression::Kind::kConstructorStruct:
+ // Only consider small arrays/structs of compile-time-constants to be trivial.
+ return expr.type().slotCount() <= 4 && IsCompileTimeConstant(expr);
+
+ case Expression::Kind::kConstructorArrayCast:
+ case Expression::Kind::kConstructorMatrixResize:
+ // These operations require function calls in Metal, so they're never trivial.
+ return false;
+
+ case Expression::Kind::kConstructorCompound:
+ // Only compile-time-constant compound constructors are considered to be trivial.
+ return IsCompileTimeConstant(expr);
+
+ case Expression::Kind::kConstructorCompoundCast:
+ case Expression::Kind::kConstructorScalarCast:
+ case Expression::Kind::kConstructorSplat:
+ case Expression::Kind::kConstructorDiagonalMatrix: {
+ // Single-argument constructors are trivial when their inner expression is trivial.
+ SkASSERT(expr.asAnyConstructor().argumentSpan().size() == 1);
+ const Expression& inner = *expr.asAnyConstructor().argumentSpan().front();
+ return IsTrivialExpression(inner);
+ }
+ default:
+ return false;
+ }
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/analysis/SkSLNoOpErrorReporter.h b/gfx/skia/skia/src/sksl/analysis/SkSLNoOpErrorReporter.h
new file mode 100644
index 0000000000..16040d4d71
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/analysis/SkSLNoOpErrorReporter.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSLNoOpErrorReporter_DEFINED
+#define SkSLNoOpErrorReporter_DEFINED
+
+#include "include/sksl/SkSLErrorReporter.h"
+
+namespace SkSL {
+
+// We can use a no-op error reporter to silently ignore errors.
+class NoOpErrorReporter : public ErrorReporter {
+public:
+ void handleError(std::string_view, Position) override {}
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/analysis/SkSLProgramUsage.cpp b/gfx/skia/skia/src/sksl/analysis/SkSLProgramUsage.cpp
new file mode 100644
index 0000000000..46f0a452b6
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/analysis/SkSLProgramUsage.cpp
@@ -0,0 +1,242 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLStatement.h"
+#include "include/private/base/SkDebug.h"
+#include "src/core/SkTHash.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/analysis/SkSLProgramUsage.h"
+#include "src/sksl/analysis/SkSLProgramVisitor.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLInterfaceBlock.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+
+#include <cstring>
+#include <memory>
+#include <string_view>
+#include <vector>
+
+namespace SkSL {
+
+struct Program;
+
+namespace {
+
+class ProgramUsageVisitor : public ProgramVisitor {
+public:
+ ProgramUsageVisitor(ProgramUsage* usage, int delta) : fUsage(usage), fDelta(delta) {}
+
+ bool visitProgramElement(const ProgramElement& pe) override {
+ if (pe.is<FunctionDefinition>()) {
+ for (const Variable* param : pe.as<FunctionDefinition>().declaration().parameters()) {
+ // Ensure function-parameter variables exist in the variable usage map. They aren't
+ // otherwise declared, but ProgramUsage::get() should be able to find them, even if
+ // they are unread and unwritten.
+ fUsage->fVariableCounts[param];
+ }
+ } else if (pe.is<InterfaceBlock>()) {
+ // Ensure interface-block variables exist in the variable usage map.
+ fUsage->fVariableCounts[pe.as<InterfaceBlock>().var()];
+ }
+ return INHERITED::visitProgramElement(pe);
+ }
+
+ bool visitStatement(const Statement& s) override {
+ if (s.is<VarDeclaration>()) {
+ // Add all declared variables to the usage map (even if never otherwise accessed).
+ const VarDeclaration& vd = s.as<VarDeclaration>();
+ ProgramUsage::VariableCounts& counts = fUsage->fVariableCounts[vd.var()];
+ counts.fVarExists += fDelta;
+ SkASSERT(counts.fVarExists >= 0 && counts.fVarExists <= 1);
+ if (vd.value()) {
+ // The initial-value expression, when present, counts as a write.
+ counts.fWrite += fDelta;
+ }
+ }
+ return INHERITED::visitStatement(s);
+ }
+
+ bool visitExpression(const Expression& e) override {
+ if (e.is<FunctionCall>()) {
+ const FunctionDeclaration* f = &e.as<FunctionCall>().function();
+ fUsage->fCallCounts[f] += fDelta;
+ SkASSERT(fUsage->fCallCounts[f] >= 0);
+ } else if (e.is<VariableReference>()) {
+ const VariableReference& ref = e.as<VariableReference>();
+ ProgramUsage::VariableCounts& counts = fUsage->fVariableCounts[ref.variable()];
+ switch (ref.refKind()) {
+ case VariableRefKind::kRead:
+ counts.fRead += fDelta;
+ break;
+ case VariableRefKind::kWrite:
+ counts.fWrite += fDelta;
+ break;
+ case VariableRefKind::kReadWrite:
+ case VariableRefKind::kPointer:
+ counts.fRead += fDelta;
+ counts.fWrite += fDelta;
+ break;
+ }
+ SkASSERT(counts.fRead >= 0 && counts.fWrite >= 0);
+ }
+ return INHERITED::visitExpression(e);
+ }
+
+ using ProgramVisitor::visitProgramElement;
+ using ProgramVisitor::visitStatement;
+
+ ProgramUsage* fUsage;
+ int fDelta;
+ using INHERITED = ProgramVisitor;
+};
+
+} // namespace
+
+std::unique_ptr<ProgramUsage> Analysis::GetUsage(const Program& program) {
+ auto usage = std::make_unique<ProgramUsage>();
+ ProgramUsageVisitor addRefs(usage.get(), /*delta=*/+1);
+ addRefs.visit(program);
+ return usage;
+}
+
+std::unique_ptr<ProgramUsage> Analysis::GetUsage(const Module& module) {
+ auto usage = std::make_unique<ProgramUsage>();
+ ProgramUsageVisitor addRefs(usage.get(), /*delta=*/+1);
+
+ for (const Module* m = &module; m != nullptr; m = m->fParent) {
+ for (const std::unique_ptr<ProgramElement>& element : m->fElements) {
+ addRefs.visitProgramElement(*element);
+ }
+ }
+ return usage;
+}
+
+ProgramUsage::VariableCounts ProgramUsage::get(const Variable& v) const {
+ const VariableCounts* counts = fVariableCounts.find(&v);
+ SkASSERT(counts);
+ return *counts;
+}
+
+bool ProgramUsage::isDead(const Variable& v) const {
+ const Modifiers& modifiers = v.modifiers();
+ VariableCounts counts = this->get(v);
+ if ((v.storage() != Variable::Storage::kLocal && counts.fRead) ||
+ (modifiers.fFlags &
+ (Modifiers::kIn_Flag | Modifiers::kOut_Flag | Modifiers::kUniform_Flag))) {
+ return false;
+ }
+ // Consider the variable dead if it's never read and never written (besides the initial-value).
+ return !counts.fRead && (counts.fWrite <= (v.initialValue() ? 1 : 0));
+}
+
+int ProgramUsage::get(const FunctionDeclaration& f) const {
+ const int* count = fCallCounts.find(&f);
+ return count ? *count : 0;
+}
+
+void ProgramUsage::add(const Expression* expr) {
+ ProgramUsageVisitor addRefs(this, /*delta=*/+1);
+ addRefs.visitExpression(*expr);
+}
+
+void ProgramUsage::add(const Statement* stmt) {
+ ProgramUsageVisitor addRefs(this, /*delta=*/+1);
+ addRefs.visitStatement(*stmt);
+}
+
+void ProgramUsage::add(const ProgramElement& element) {
+ ProgramUsageVisitor addRefs(this, /*delta=*/+1);
+ addRefs.visitProgramElement(element);
+}
+
+void ProgramUsage::remove(const Expression* expr) {
+ ProgramUsageVisitor subRefs(this, /*delta=*/-1);
+ subRefs.visitExpression(*expr);
+}
+
+void ProgramUsage::remove(const Statement* stmt) {
+ ProgramUsageVisitor subRefs(this, /*delta=*/-1);
+ subRefs.visitStatement(*stmt);
+}
+
+void ProgramUsage::remove(const ProgramElement& element) {
+ ProgramUsageVisitor subRefs(this, /*delta=*/-1);
+ subRefs.visitProgramElement(element);
+}
+
+static bool contains_matching_data(const ProgramUsage& a, const ProgramUsage& b) {
+ constexpr bool kReportMismatch = false;
+
+ for (const auto& [varA, varCountA] : a.fVariableCounts) {
+ // Skip variable entries with zero reported usage.
+ if (!varCountA.fVarExists && !varCountA.fRead && !varCountA.fWrite) {
+ continue;
+ }
+ // Find the matching variable in the other map and ensure that its counts match.
+ const ProgramUsage::VariableCounts* varCountB = b.fVariableCounts.find(varA);
+ if (!varCountB || 0 != memcmp(&varCountA, varCountB, sizeof(varCountA))) {
+ if constexpr (kReportMismatch) {
+ SkDebugf("VariableCounts mismatch: '%.*s' (E%d R%d W%d != E%d R%d W%d)\n",
+ (int)varA->name().size(), varA->name().data(),
+ varCountA.fVarExists,
+ varCountA.fRead,
+ varCountA.fWrite,
+ varCountB ? varCountB->fVarExists : 0,
+ varCountB ? varCountB->fRead : 0,
+ varCountB ? varCountB->fWrite : 0);
+ }
+ return false;
+ }
+ }
+
+ for (const auto& [callA, callCountA] : a.fCallCounts) {
+ // Skip function-call entries with zero reported usage.
+ if (!callCountA) {
+ continue;
+ }
+ // Find the matching function in the other map and ensure that its call-count matches.
+ const int* callCountB = b.fCallCounts.find(callA);
+ if (!callCountB || callCountA != *callCountB) {
+ if constexpr (kReportMismatch) {
+ SkDebugf("CallCounts mismatch: '%.*s' (%d != %d)\n",
+ (int)callA->name().size(), callA->name().data(),
+ callCountA,
+ callCountB ? *callCountB : 0);
+ }
+ return false;
+ }
+ }
+
+ // Every non-zero entry in A has a matching non-zero entry in B.
+ return true;
+}
+
+bool ProgramUsage::operator==(const ProgramUsage& that) const {
+ // ProgramUsage can be "equal" while the underlying hash maps look slightly different, because a
+ // dead-stripped variable or function will have a usage count of zero, but will still exist in
+ // the maps. If the program usage is re-analyzed from scratch, the maps will not contain an
+ // entry for these variables or functions at all. This means our maps can be "equal" while
+ // having different element counts.
+ //
+ // In order to check these maps, we compare map entries bi-directionally, skipping zero-usage
+ // entries. If all the non-zero elements in `this` match the elements in `that`, and all the
+ // non-zero elements in `that` match the elements in `this`, all the non-zero elements must be
+ // identical, and all the zero elements must be either zero or non-existent on both sides.
+ return contains_matching_data(*this, that) &&
+ contains_matching_data(that, *this);
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/analysis/SkSLProgramUsage.h b/gfx/skia/skia/src/sksl/analysis/SkSLProgramUsage.h
new file mode 100644
index 0000000000..991240ce2d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/analysis/SkSLProgramUsage.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_PROGRAMUSAGE
+#define SKSL_PROGRAMUSAGE
+
+#include "include/core/SkTypes.h"
+#include "src/core/SkTHash.h"
+
+namespace SkSL {
+
+class Expression;
+class FunctionDeclaration;
+class ProgramElement;
+class Statement;
+class Variable;
+
+/**
+ * Side-car class holding mutable information about a Program's IR
+ */
+class ProgramUsage {
+public:
+ struct VariableCounts {
+ int fVarExists = 0; // if this is zero, the Variable might have already been deleted
+ int fRead = 0;
+ int fWrite = 0;
+ };
+ VariableCounts get(const Variable&) const;
+ bool isDead(const Variable&) const;
+
+ int get(const FunctionDeclaration&) const;
+
+ void add(const Expression* expr);
+ void add(const Statement* stmt);
+ void add(const ProgramElement& element);
+ void remove(const Expression* expr);
+ void remove(const Statement* stmt);
+ void remove(const ProgramElement& element);
+
+ bool operator==(const ProgramUsage& that) const;
+ bool operator!=(const ProgramUsage& that) const { return !(*this == that); }
+
+ SkTHashMap<const Variable*, VariableCounts> fVariableCounts;
+ SkTHashMap<const FunctionDeclaration*, int> fCallCounts;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/analysis/SkSLProgramVisitor.h b/gfx/skia/skia/src/sksl/analysis/SkSLProgramVisitor.h
new file mode 100644
index 0000000000..3a27ef35d1
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/analysis/SkSLProgramVisitor.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSLProgramVisitor_DEFINED
+#define SkSLProgramVisitor_DEFINED
+
+#include <memory>
+
+namespace SkSL {
+
+struct Program;
+class Expression;
+class Statement;
+class ProgramElement;
+
+/**
+ * Utility class to visit every element, statement, and expression in an SkSL program IR.
+ * This is intended for simple analysis and accumulation, where custom visitation behavior is only
+ * needed for a limited set of expression kinds.
+ *
+ * Subclasses should override visitExpression/visitStatement/visitProgramElement as needed and
+ * intercept elements of interest. They can then invoke the base class's function to visit all
+ * sub expressions. They can also choose not to call the base function to arrest recursion, or
+ * implement custom recursion.
+ *
+ * The visit functions return a bool that determines how the default implementation recurses. Once
+ * any visit call returns true, the default behavior stops recursing and propagates true up the
+ * stack.
+ */
+template <typename T>
+class TProgramVisitor {
+public:
+ virtual ~TProgramVisitor() = default;
+
+protected:
+ virtual bool visitExpression(typename T::Expression& expression);
+ virtual bool visitStatement(typename T::Statement& statement);
+ virtual bool visitProgramElement(typename T::ProgramElement& programElement);
+
+ virtual bool visitExpressionPtr(typename T::UniquePtrExpression& expr) = 0;
+ virtual bool visitStatementPtr(typename T::UniquePtrStatement& stmt) = 0;
+};
+
+// ProgramVisitors take const types; ProgramWriters do not.
+struct ProgramVisitorTypes {
+ using Program = const SkSL::Program;
+ using Expression = const SkSL::Expression;
+ using Statement = const SkSL::Statement;
+ using ProgramElement = const SkSL::ProgramElement;
+ using UniquePtrExpression = const std::unique_ptr<SkSL::Expression>;
+ using UniquePtrStatement = const std::unique_ptr<SkSL::Statement>;
+};
+
+extern template class TProgramVisitor<ProgramVisitorTypes>;
+
+class ProgramVisitor : public TProgramVisitor<ProgramVisitorTypes> {
+public:
+ bool visit(const Program& program);
+
+private:
+ // ProgramVisitors shouldn't need access to unique_ptrs, and marking these as final should help
+ // these accessors inline away. Use ProgramWriter if you need the unique_ptrs.
+ bool visitExpressionPtr(const std::unique_ptr<Expression>& e) final {
+ return this->visitExpression(*e);
+ }
+ bool visitStatementPtr(const std::unique_ptr<Statement>& s) final {
+ return this->visitStatement(*s);
+ }
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/analysis/SkSLSwitchCaseContainsExit.cpp b/gfx/skia/skia/src/sksl/analysis/SkSLSwitchCaseContainsExit.cpp
new file mode 100644
index 0000000000..992df2adde
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/analysis/SkSLSwitchCaseContainsExit.cpp
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLAnalysis.h"
+
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLStatement.h"
+#include "src/sksl/analysis/SkSLProgramVisitor.h"
+
+namespace SkSL {
+
+class Expression;
+
+namespace {
+
+class SwitchCaseContainsExit : public ProgramVisitor {
+public:
+ SwitchCaseContainsExit(bool conditionalExits) : fConditionalExits(conditionalExits) {}
+
+ bool visitExpression(const Expression& expr) override {
+ // We can avoid processing expressions entirely.
+ return false;
+ }
+
+ bool visitStatement(const Statement& stmt) override {
+ switch (stmt.kind()) {
+ case Statement::Kind::kBlock:
+ case Statement::Kind::kSwitchCase:
+ return INHERITED::visitStatement(stmt);
+
+ case Statement::Kind::kReturn:
+ // Returns are an early exit regardless of the surrounding control structures.
+ return fConditionalExits ? fInConditional : !fInConditional;
+
+ case Statement::Kind::kContinue:
+ // Continues are an early exit from switches, but not loops.
+ return !fInLoop &&
+ (fConditionalExits ? fInConditional : !fInConditional);
+
+ case Statement::Kind::kBreak:
+ // Breaks cannot escape from switches or loops.
+ return !fInLoop && !fInSwitch &&
+ (fConditionalExits ? fInConditional : !fInConditional);
+
+ case Statement::Kind::kIf: {
+ ++fInConditional;
+ bool result = INHERITED::visitStatement(stmt);
+ --fInConditional;
+ return result;
+ }
+
+ case Statement::Kind::kFor:
+ case Statement::Kind::kDo: {
+ // Loops are treated as conditionals because a loop could potentially execute zero
+ // times. We don't have a straightforward way to determine that a loop definitely
+ // executes at least once.
+ ++fInConditional;
+ ++fInLoop;
+ bool result = INHERITED::visitStatement(stmt);
+ --fInLoop;
+ --fInConditional;
+ return result;
+ }
+
+ case Statement::Kind::kSwitch: {
+ ++fInSwitch;
+ bool result = INHERITED::visitStatement(stmt);
+ --fInSwitch;
+ return result;
+ }
+
+ default:
+ return false;
+ }
+ }
+
+ bool fConditionalExits = false;
+ int fInConditional = 0;
+ int fInLoop = 0;
+ int fInSwitch = 0;
+ using INHERITED = ProgramVisitor;
+};
+
+} // namespace
+
+bool Analysis::SwitchCaseContainsUnconditionalExit(Statement& stmt) {
+ return SwitchCaseContainsExit{/*conditionalExits=*/false}.visitStatement(stmt);
+}
+
+bool Analysis::SwitchCaseContainsConditionalExit(Statement& stmt) {
+ return SwitchCaseContainsExit{/*conditionalExits=*/true}.visitStatement(stmt);
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/analysis/SkSLSymbolTableStackBuilder.cpp b/gfx/skia/skia/src/sksl/analysis/SkSLSymbolTableStackBuilder.cpp
new file mode 100644
index 0000000000..ada31aa000
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/analysis/SkSLSymbolTableStackBuilder.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLStatement.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLForStatement.h"
+#include "src/sksl/ir/SkSLSwitchStatement.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+namespace SkSL {
+
+class SymbolTable;
+
+namespace Analysis {
+
+SymbolTableStackBuilder::SymbolTableStackBuilder(const Statement* stmt,
+ std::vector<std::shared_ptr<SymbolTable>>* stack) {
+ if (stmt) {
+ switch (stmt->kind()) {
+ case Statement::Kind::kBlock:
+ if (std::shared_ptr<SymbolTable> symbols = stmt->as<Block>().symbolTable()) {
+ stack->push_back(std::move(symbols));
+ fStackToPop = stack;
+ }
+ break;
+
+ case Statement::Kind::kFor:
+ if (std::shared_ptr<SymbolTable> symbols = stmt->as<ForStatement>().symbols()) {
+ stack->push_back(std::move(symbols));
+ fStackToPop = stack;
+ }
+ break;
+
+ case Statement::Kind::kSwitch:
+ if (std::shared_ptr<SymbolTable> symbols = stmt->as<SwitchStatement>().symbols()) {
+ stack->push_back(std::move(symbols));
+ fStackToPop = stack;
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+SymbolTableStackBuilder::~SymbolTableStackBuilder() {
+ if (fStackToPop) {
+ fStackToPop->pop_back();
+ }
+}
+
+} // namespace Analysis
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/codegen/SkSLCodeGenerator.h b/gfx/skia/skia/src/sksl/codegen/SkSLCodeGenerator.h
new file mode 100644
index 0000000000..fd58648cd9
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/codegen/SkSLCodeGenerator.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CODEGENERATOR
+#define SKSL_CODEGENERATOR
+
+#include "src/sksl/SkSLOutputStream.h"
+#include "src/sksl/ir/SkSLProgram.h"
+
+namespace SkSL {
+
+/**
+ * Abstract superclass of all code generators, which take a Program as input and produce code as
+ * output.
+ */
+class CodeGenerator {
+public:
+ CodeGenerator(const Context* context, const Program* program, OutputStream* out)
+ : fContext(*context)
+ , fProgram(*program)
+ , fOut(out) {}
+
+ virtual ~CodeGenerator() {}
+
+ virtual bool generateCode() = 0;
+
+ // Intended for use by AutoOutputStream.
+ OutputStream* outputStream() { return fOut; }
+ void setOutputStream(OutputStream* output) { fOut = output; }
+
+protected:
+#if defined(SK_USE_LEGACY_MIPMAP_LOD_BIAS)
+ static constexpr float kSharpenTexturesBias = -.5f;
+#else
+ // For SkMipmapMode::kLinear we want a bias such that when the unbiased LOD value is
+ // midway between levels we select just the larger level, i.e. a bias of -.5. However, using
+ // this bias with kNearest mode with a draw that is a perfect power of two downscale puts us
+ // right on the rounding edge where it could go up or down depending on the particular GPU.
+ // Experimentally we found that at -.49 most iOS devices (iPhone 7, 8, and iPad Pro
+ // [PowerVRGT7800 version]) all round to the level twice as big as the device space footprint
+ // for some such draws in our unit tests on GLES. However, the iPhone 11 still fails and so
+ // we are using -.475. They do not at -.48. All other GPUs passed tests with -.499. Though, at
+ // this time the bias is not implemented in the MSL codegen and so iOS/Metal was not tested.
+ static constexpr float kSharpenTexturesBias = -.475f;
+#endif
+
+ const Context& fContext;
+ const Program& fProgram;
+ OutputStream* fOut;
+};
+
+class AutoOutputStream {
+public:
+ // Maintains the current indentation level while writing to the new output stream.
+ AutoOutputStream(CodeGenerator* codeGen, OutputStream* newOutput)
+ : fCodeGen(codeGen)
+ , fOldOutput(codeGen->outputStream()) {
+ fCodeGen->setOutputStream(newOutput);
+ }
+ // Resets the indentation when entering the scope, and restores it when leaving.
+ AutoOutputStream(CodeGenerator* codeGen, OutputStream* newOutput, int *indentationPtr)
+ : fCodeGen(codeGen)
+ , fOldOutput(codeGen->outputStream())
+ , fIndentationPtr(indentationPtr)
+ , fOldIndentation(indentationPtr ? *indentationPtr : 0) {
+ fCodeGen->setOutputStream(newOutput);
+ *fIndentationPtr = 0;
+ }
+ ~AutoOutputStream() {
+ fCodeGen->setOutputStream(fOldOutput);
+ if (fIndentationPtr) {
+ *fIndentationPtr = fOldIndentation;
+ }
+ }
+
+private:
+ CodeGenerator* fCodeGen = nullptr;
+ OutputStream* fOldOutput = nullptr;
+ int *fIndentationPtr = nullptr;
+ int fOldIndentation = 0;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/codegen/SkSLGLSLCodeGenerator.cpp b/gfx/skia/skia/src/sksl/codegen/SkSLGLSLCodeGenerator.cpp
new file mode 100644
index 0000000000..c953a1a46c
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/codegen/SkSLGLSLCodeGenerator.cpp
@@ -0,0 +1,1774 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/codegen/SkSLGLSLCodeGenerator.h"
+
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLLayout.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLStatement.h"
+#include "include/private/SkSLString.h"
+#include "include/private/base/SkTArray.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLOperator.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/base/SkStringView.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLGLSL.h"
+#include "src/sksl/SkSLIntrinsicList.h"
+#include "src/sksl/SkSLOutputStream.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/SkSLUtil.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLConstructorArrayCast.h"
+#include "src/sksl/ir/SkSLConstructorCompound.h"
+#include "src/sksl/ir/SkSLConstructorDiagonalMatrix.h"
+#include "src/sksl/ir/SkSLDoStatement.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLExpressionStatement.h"
+#include "src/sksl/ir/SkSLExtension.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLForStatement.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLFunctionPrototype.h"
+#include "src/sksl/ir/SkSLIfStatement.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLInterfaceBlock.h"
+#include "src/sksl/ir/SkSLLiteral.h"
+#include "src/sksl/ir/SkSLModifiersDeclaration.h"
+#include "src/sksl/ir/SkSLPostfixExpression.h"
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/ir/SkSLReturnStatement.h"
+#include "src/sksl/ir/SkSLSetting.h"
+#include "src/sksl/ir/SkSLStructDefinition.h"
+#include "src/sksl/ir/SkSLSwitchCase.h"
+#include "src/sksl/ir/SkSLSwitchStatement.h"
+#include "src/sksl/ir/SkSLSwizzle.h"
+#include "src/sksl/ir/SkSLTernaryExpression.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+#include "src/sksl/spirv.h"
+
+#include <cstddef>
+#include <memory>
+#include <vector>
+
+namespace SkSL {
+
+void GLSLCodeGenerator::write(std::string_view s) {
+ if (!s.length()) {
+ return;
+ }
+ if (fAtLineStart) {
+ for (int i = 0; i < fIndentation; i++) {
+ fOut->writeText(" ");
+ }
+ }
+ fOut->write(s.data(), s.length());
+ fAtLineStart = false;
+}
+
+void GLSLCodeGenerator::writeLine(std::string_view s) {
+ this->write(s);
+ fOut->writeText("\n");
+ fAtLineStart = true;
+}
+
+void GLSLCodeGenerator::finishLine() {
+ if (!fAtLineStart) {
+ this->writeLine();
+ }
+}
+
+void GLSLCodeGenerator::writeExtension(std::string_view name, bool require) {
+ fExtensions.writeText("#extension ");
+ fExtensions.write(name.data(), name.length());
+ fExtensions.writeText(require ? " : require\n" : " : enable\n");
+}
+
+bool GLSLCodeGenerator::usesPrecisionModifiers() const {
+ return this->caps().fUsesPrecisionModifiers;
+}
+
+void GLSLCodeGenerator::writeIdentifier(std::string_view identifier) {
+ // GLSL forbids two underscores in a row.
+ // If an identifier contains "__" or "_X", replace each "_" in the identifier with "_X".
+ if (skstd::contains(identifier, "__") || skstd::contains(identifier, "_X")) {
+ for (const char c : identifier) {
+ if (c == '_') {
+ this->write("_X");
+ } else {
+ this->write(std::string_view(&c, 1));
+ }
+ }
+ } else {
+ this->write(identifier);
+ }
+}
+
+// Returns the name of the type with array dimensions, e.g. `float[2]`.
+std::string GLSLCodeGenerator::getTypeName(const Type& raw) {
+ const Type& type = raw.resolve();
+ switch (type.typeKind()) {
+ case Type::TypeKind::kVector: {
+ const Type& component = type.componentType();
+ std::string result;
+ if (component.matches(*fContext.fTypes.fFloat) ||
+ component.matches(*fContext.fTypes.fHalf)) {
+ result = "vec";
+ }
+ else if (component.isSigned()) {
+ result = "ivec";
+ }
+ else if (component.isUnsigned()) {
+ result = "uvec";
+ }
+ else if (component.matches(*fContext.fTypes.fBool)) {
+ result = "bvec";
+ }
+ else {
+ SK_ABORT("unsupported vector type");
+ }
+ result += std::to_string(type.columns());
+ return result;
+ }
+ case Type::TypeKind::kMatrix: {
+ std::string result;
+ const Type& component = type.componentType();
+ if (component.matches(*fContext.fTypes.fFloat) ||
+ component.matches(*fContext.fTypes.fHalf)) {
+ result = "mat";
+ }
+ else {
+ SK_ABORT("unsupported matrix type");
+ }
+ result += std::to_string(type.columns());
+ if (type.columns() != type.rows()) {
+ result += "x";
+ result += std::to_string(type.rows());
+ }
+ return result;
+ }
+ case Type::TypeKind::kArray: {
+ std::string baseTypeName = this->getTypeName(type.componentType());
+ if (type.isUnsizedArray()) {
+ return String::printf("%s[]", baseTypeName.c_str());
+ }
+ return String::printf("%s[%d]", baseTypeName.c_str(), type.columns());
+ }
+ case Type::TypeKind::kScalar: {
+ if (type.matches(*fContext.fTypes.fHalf)) {
+ return "float";
+ }
+ else if (type.matches(*fContext.fTypes.fShort)) {
+ return "int";
+ }
+ else if (type.matches(*fContext.fTypes.fUShort)) {
+ return "uint";
+ }
+
+ return std::string(type.name());
+ }
+ default:
+ return std::string(type.name());
+ }
+}
+
+void GLSLCodeGenerator::writeStructDefinition(const StructDefinition& s) {
+ const Type& type = s.type();
+ this->write("struct ");
+ this->writeIdentifier(type.name());
+ this->writeLine(" {");
+ fIndentation++;
+ for (const auto& f : type.fields()) {
+ this->writeModifiers(f.fModifiers, false);
+ this->writeTypePrecision(*f.fType);
+ const Type& baseType = f.fType->isArray() ? f.fType->componentType() : *f.fType;
+ this->writeType(baseType);
+ this->write(" ");
+ this->writeIdentifier(f.fName);
+ if (f.fType->isArray()) {
+ this->write("[" + std::to_string(f.fType->columns()) + "]");
+ }
+ this->writeLine(";");
+ }
+ fIndentation--;
+ this->writeLine("};");
+}
+
+void GLSLCodeGenerator::writeType(const Type& type) {
+ this->writeIdentifier(this->getTypeName(type));
+}
+
+void GLSLCodeGenerator::writeExpression(const Expression& expr, Precedence parentPrecedence) {
+ switch (expr.kind()) {
+ case Expression::Kind::kBinary:
+ this->writeBinaryExpression(expr.as<BinaryExpression>(), parentPrecedence);
+ break;
+ case Expression::Kind::kConstructorDiagonalMatrix:
+ this->writeConstructorDiagonalMatrix(expr.as<ConstructorDiagonalMatrix>(),
+ parentPrecedence);
+ break;
+ case Expression::Kind::kConstructorArrayCast:
+ this->writeExpression(*expr.as<ConstructorArrayCast>().argument(), parentPrecedence);
+ break;
+ case Expression::Kind::kConstructorCompound:
+ this->writeConstructorCompound(expr.as<ConstructorCompound>(), parentPrecedence);
+ break;
+ case Expression::Kind::kConstructorArray:
+ case Expression::Kind::kConstructorMatrixResize:
+ case Expression::Kind::kConstructorSplat:
+ case Expression::Kind::kConstructorStruct:
+ this->writeAnyConstructor(expr.asAnyConstructor(), parentPrecedence);
+ break;
+ case Expression::Kind::kConstructorScalarCast:
+ case Expression::Kind::kConstructorCompoundCast:
+ this->writeCastConstructor(expr.asAnyConstructor(), parentPrecedence);
+ break;
+ case Expression::Kind::kFieldAccess:
+ this->writeFieldAccess(expr.as<FieldAccess>());
+ break;
+ case Expression::Kind::kFunctionCall:
+ this->writeFunctionCall(expr.as<FunctionCall>());
+ break;
+ case Expression::Kind::kLiteral:
+ this->writeLiteral(expr.as<Literal>());
+ break;
+ case Expression::Kind::kPrefix:
+ this->writePrefixExpression(expr.as<PrefixExpression>(), parentPrecedence);
+ break;
+ case Expression::Kind::kPostfix:
+ this->writePostfixExpression(expr.as<PostfixExpression>(), parentPrecedence);
+ break;
+ case Expression::Kind::kSetting:
+ this->writeExpression(*expr.as<Setting>().toLiteral(fContext), parentPrecedence);
+ break;
+ case Expression::Kind::kSwizzle:
+ this->writeSwizzle(expr.as<Swizzle>());
+ break;
+ case Expression::Kind::kVariableReference:
+ this->writeVariableReference(expr.as<VariableReference>());
+ break;
+ case Expression::Kind::kTernary:
+ this->writeTernaryExpression(expr.as<TernaryExpression>(), parentPrecedence);
+ break;
+ case Expression::Kind::kIndex:
+ this->writeIndexExpression(expr.as<IndexExpression>());
+ break;
+ default:
+ SkDEBUGFAILF("unsupported expression: %s", expr.description().c_str());
+ break;
+ }
+}
+
+static bool is_abs(Expression& expr) {
+ return expr.is<FunctionCall>() &&
+ expr.as<FunctionCall>().function().intrinsicKind() == k_abs_IntrinsicKind;
+}
+
+// turns min(abs(x), y) into ((tmpVar1 = abs(x)) < (tmpVar2 = y) ? tmpVar1 : tmpVar2) to avoid a
+// Tegra3 compiler bug.
+void GLSLCodeGenerator::writeMinAbsHack(Expression& absExpr, Expression& otherExpr) {
+ SkASSERT(!this->caps().fCanUseMinAndAbsTogether);
+ std::string tmpVar1 = "minAbsHackVar" + std::to_string(fVarCount++);
+ std::string tmpVar2 = "minAbsHackVar" + std::to_string(fVarCount++);
+ this->fFunctionHeader += std::string(" ") + this->getTypePrecision(absExpr.type()) +
+ this->getTypeName(absExpr.type()) + " " + tmpVar1 + ";\n";
+ this->fFunctionHeader += std::string(" ") + this->getTypePrecision(otherExpr.type()) +
+ this->getTypeName(otherExpr.type()) + " " + tmpVar2 + ";\n";
+ this->write("((" + tmpVar1 + " = ");
+ this->writeExpression(absExpr, Precedence::kTopLevel);
+ this->write(") < (" + tmpVar2 + " = ");
+ this->writeExpression(otherExpr, Precedence::kAssignment);
+ this->write(") ? " + tmpVar1 + " : " + tmpVar2 + ")");
+}
+
+void GLSLCodeGenerator::writeInverseSqrtHack(const Expression& x) {
+ this->write("(1.0 / sqrt(");
+ this->writeExpression(x, Precedence::kTopLevel);
+ this->write("))");
+}
+
+static constexpr char kDeterminant2[] = R"(
+float _determinant2(mat2 m) {
+return m[0].x*m[1].y - m[0].y*m[1].x;
+}
+)";
+
+static constexpr char kDeterminant3[] = R"(
+float _determinant3(mat3 m) {
+float
+ a00 = m[0].x, a01 = m[0].y, a02 = m[0].z,
+ a10 = m[1].x, a11 = m[1].y, a12 = m[1].z,
+ a20 = m[2].x, a21 = m[2].y, a22 = m[2].z,
+ b01 = a22*a11 - a12*a21,
+ b11 =-a22*a10 + a12*a20,
+ b21 = a21*a10 - a11*a20;
+return a00*b01 + a01*b11 + a02*b21;
+}
+)";
+
+static constexpr char kDeterminant4[] = R"(
+mat4 _determinant4(mat4 m) {
+float
+ a00 = m[0].x, a01 = m[0].y, a02 = m[0].z, a03 = m[0].w,
+ a10 = m[1].x, a11 = m[1].y, a12 = m[1].z, a13 = m[1].w,
+ a20 = m[2].x, a21 = m[2].y, a22 = m[2].z, a23 = m[2].w,
+ a30 = m[3].x, a31 = m[3].y, a32 = m[3].z, a33 = m[3].w,
+ b00 = a00*a11 - a01*a10,
+ b01 = a00*a12 - a02*a10,
+ b02 = a00*a13 - a03*a10,
+ b03 = a01*a12 - a02*a11,
+ b04 = a01*a13 - a03*a11,
+ b05 = a02*a13 - a03*a12,
+ b06 = a20*a31 - a21*a30,
+ b07 = a20*a32 - a22*a30,
+ b08 = a20*a33 - a23*a30,
+ b09 = a21*a32 - a22*a31,
+ b10 = a21*a33 - a23*a31,
+ b11 = a22*a33 - a23*a32;
+return b00*b11 - b01*b10 + b02*b09 + b03*b08 - b04*b07 + b05*b06;
+}
+)";
+
+void GLSLCodeGenerator::writeDeterminantHack(const Expression& mat) {
+ const Type& type = mat.type();
+ if (type.matches(*fContext.fTypes.fFloat2x2) ||
+ type.matches(*fContext.fTypes.fHalf2x2)) {
+ this->write("_determinant2(");
+ if (!fWrittenDeterminant2) {
+ fWrittenDeterminant2 = true;
+ fExtraFunctions.writeText(kDeterminant2);
+ }
+ } else if (type.matches(*fContext.fTypes.fFloat3x3) ||
+ type.matches(*fContext.fTypes.fHalf3x3)) {
+ this->write("_determinant3(");
+ if (!fWrittenDeterminant3) {
+ fWrittenDeterminant3 = true;
+ fExtraFunctions.writeText(kDeterminant3);
+ }
+ } else if (type.matches(*fContext.fTypes.fFloat4x4) ||
+ type.matches(*fContext.fTypes.fHalf4x4)) {
+ this->write("_determinant4(");
+ if (!fWrittenDeterminant4) {
+ fWrittenDeterminant4 = true;
+ fExtraFunctions.writeText(kDeterminant4);
+ }
+ } else {
+ SkDEBUGFAILF("no polyfill for determinant(%s)", type.description().c_str());
+ this->write("determinant(");
+ }
+ this->writeExpression(mat, Precedence::kTopLevel);
+ this->write(")");
+}
+
+static constexpr char kInverse2[] = R"(
+mat2 _inverse2(mat2 m) {
+return mat2(m[1].y, -m[0].y, -m[1].x, m[0].x) / (m[0].x * m[1].y - m[0].y * m[1].x);
+}
+)";
+
+static constexpr char kInverse3[] = R"(
+mat3 _inverse3(mat3 m) {
+float
+ a00 = m[0].x, a01 = m[0].y, a02 = m[0].z,
+ a10 = m[1].x, a11 = m[1].y, a12 = m[1].z,
+ a20 = m[2].x, a21 = m[2].y, a22 = m[2].z,
+ b01 = a22*a11 - a12*a21,
+ b11 =-a22*a10 + a12*a20,
+ b21 = a21*a10 - a11*a20,
+ det = a00*b01 + a01*b11 + a02*b21;
+return mat3(
+ b01, (-a22*a01 + a02*a21), ( a12*a01 - a02*a11),
+ b11, ( a22*a00 - a02*a20), (-a12*a00 + a02*a10),
+ b21, (-a21*a00 + a01*a20), ( a11*a00 - a01*a10)) / det;
+}
+)";
+
+static constexpr char kInverse4[] = R"(
+mat4 _inverse4(mat4 m) {
+float
+ a00 = m[0].x, a01 = m[0].y, a02 = m[0].z, a03 = m[0].w,
+ a10 = m[1].x, a11 = m[1].y, a12 = m[1].z, a13 = m[1].w,
+ a20 = m[2].x, a21 = m[2].y, a22 = m[2].z, a23 = m[2].w,
+ a30 = m[3].x, a31 = m[3].y, a32 = m[3].z, a33 = m[3].w,
+ b00 = a00*a11 - a01*a10,
+ b01 = a00*a12 - a02*a10,
+ b02 = a00*a13 - a03*a10,
+ b03 = a01*a12 - a02*a11,
+ b04 = a01*a13 - a03*a11,
+ b05 = a02*a13 - a03*a12,
+ b06 = a20*a31 - a21*a30,
+ b07 = a20*a32 - a22*a30,
+ b08 = a20*a33 - a23*a30,
+ b09 = a21*a32 - a22*a31,
+ b10 = a21*a33 - a23*a31,
+ b11 = a22*a33 - a23*a32,
+ det = b00*b11 - b01*b10 + b02*b09 + b03*b08 - b04*b07 + b05*b06;
+return mat4(
+ a11*b11 - a12*b10 + a13*b09,
+ a02*b10 - a01*b11 - a03*b09,
+ a31*b05 - a32*b04 + a33*b03,
+ a22*b04 - a21*b05 - a23*b03,
+ a12*b08 - a10*b11 - a13*b07,
+ a00*b11 - a02*b08 + a03*b07,
+ a32*b02 - a30*b05 - a33*b01,
+ a20*b05 - a22*b02 + a23*b01,
+ a10*b10 - a11*b08 + a13*b06,
+ a01*b08 - a00*b10 - a03*b06,
+ a30*b04 - a31*b02 + a33*b00,
+ a21*b02 - a20*b04 - a23*b00,
+ a11*b07 - a10*b09 - a12*b06,
+ a00*b09 - a01*b07 + a02*b06,
+ a31*b01 - a30*b03 - a32*b00,
+ a20*b03 - a21*b01 + a22*b00) / det;
+}
+)";
+
+void GLSLCodeGenerator::writeInverseHack(const Expression& mat) {
+ const Type& type = mat.type();
+ if (type.matches(*fContext.fTypes.fFloat2x2) || type.matches(*fContext.fTypes.fHalf2x2)) {
+ this->write("_inverse2(");
+ if (!fWrittenInverse2) {
+ fWrittenInverse2 = true;
+ fExtraFunctions.writeText(kInverse2);
+ }
+ } else if (type.matches(*fContext.fTypes.fFloat3x3) ||
+ type.matches(*fContext.fTypes.fHalf3x3)) {
+ this->write("_inverse3(");
+ if (!fWrittenInverse3) {
+ fWrittenInverse3 = true;
+ fExtraFunctions.writeText(kInverse3);
+ }
+ } else if (type.matches(*fContext.fTypes.fFloat4x4) ||
+ type.matches(*fContext.fTypes.fHalf4x4)) {
+ this->write("_inverse4(");
+ if (!fWrittenInverse4) {
+ fWrittenInverse4 = true;
+ fExtraFunctions.writeText(kInverse4);
+ }
+ } else {
+ SkDEBUGFAILF("no polyfill for inverse(%s)", type.description().c_str());
+ this->write("inverse(");
+ }
+ this->writeExpression(mat, Precedence::kTopLevel);
+ this->write(")");
+}
+
+void GLSLCodeGenerator::writeTransposeHack(const Expression& mat) {
+ const Type& type = mat.type();
+ int c = type.columns();
+ int r = type.rows();
+ std::string name = "transpose" + std::to_string(c) + std::to_string(r);
+
+ SkASSERT(c >= 2 && c <= 4);
+ SkASSERT(r >= 2 && r <= 4);
+ bool* writtenThisTranspose = &fWrittenTranspose[c - 2][r - 2];
+ if (!*writtenThisTranspose) {
+ *writtenThisTranspose = true;
+ std::string typeName = this->getTypeName(type);
+ const Type& base = type.componentType();
+ std::string transposed = this->getTypeName(base.toCompound(fContext, r, c));
+ fExtraFunctions.writeText((transposed + " " + name + "(" + typeName + " m) { return " +
+ transposed + "(").c_str());
+ auto separator = SkSL::String::Separator();
+ for (int row = 0; row < r; ++row) {
+ for (int column = 0; column < c; ++column) {
+ fExtraFunctions.writeText(separator().c_str());
+ fExtraFunctions.writeText(("m[" + std::to_string(column) + "][" +
+ std::to_string(row) + "]").c_str());
+ }
+ }
+ fExtraFunctions.writeText("); }\n");
+ }
+ this->write(name + "(");
+ this->writeExpression(mat, Precedence::kTopLevel);
+ this->write(")");
+}
+
+void GLSLCodeGenerator::writeFunctionCall(const FunctionCall& c) {
+ const FunctionDeclaration& function = c.function();
+ const ExpressionArray& arguments = c.arguments();
+ bool isTextureFunctionWithBias = false;
+ bool nameWritten = false;
+ const char* closingParen = ")";
+ switch (c.function().intrinsicKind()) {
+ case k_abs_IntrinsicKind: {
+ if (!this->caps().fEmulateAbsIntFunction)
+ break;
+ SkASSERT(arguments.size() == 1);
+ if (!arguments[0]->type().matches(*fContext.fTypes.fInt)) {
+ break;
+ }
+ // abs(int) on Intel OSX is incorrect, so emulate it:
+ this->write("_absemulation");
+ nameWritten = true;
+ if (!fWrittenAbsEmulation) {
+ fWrittenAbsEmulation = true;
+ fExtraFunctions.writeText("int _absemulation(int x) { return x * sign(x); }\n");
+ }
+ break;
+ }
+ case k_atan_IntrinsicKind:
+ if (this->caps().fMustForceNegatedAtanParamToFloat &&
+ arguments.size() == 2 &&
+ arguments[1]->kind() == Expression::Kind::kPrefix) {
+ const PrefixExpression& p = (PrefixExpression&) *arguments[1];
+ if (p.getOperator().kind() == Operator::Kind::MINUS) {
+ this->write("atan(");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write(", -1.0 * ");
+ this->writeExpression(*p.operand(), Precedence::kMultiplicative);
+ this->write(")");
+ return;
+ }
+ }
+ break;
+ case k_ldexp_IntrinsicKind:
+ if (this->caps().fMustForceNegatedLdexpParamToMultiply &&
+ arguments.size() == 2 &&
+ arguments[1]->is<PrefixExpression>()) {
+ const PrefixExpression& p = arguments[1]->as<PrefixExpression>();
+ if (p.getOperator().kind() == Operator::Kind::MINUS) {
+ this->write("ldexp(");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write(", ");
+ this->writeExpression(*p.operand(), Precedence::kMultiplicative);
+ this->write(" * -1)");
+ return;
+ }
+ }
+ break;
+ case k_dFdy_IntrinsicKind:
+ // Flipping Y also negates the Y derivatives.
+ closingParen = "))";
+ this->write("(");
+ if (!fProgram.fConfig->fSettings.fForceNoRTFlip) {
+ this->write(SKSL_RTFLIP_NAME ".y * ");
+ }
+ this->write("dFdy");
+ nameWritten = true;
+ [[fallthrough]];
+ case k_dFdx_IntrinsicKind:
+ case k_fwidth_IntrinsicKind:
+ if (!fFoundDerivatives &&
+ this->caps().shaderDerivativeExtensionString()) {
+ this->writeExtension(this->caps().shaderDerivativeExtensionString());
+ fFoundDerivatives = true;
+ }
+ break;
+ case k_determinant_IntrinsicKind:
+ if (!this->caps().fBuiltinDeterminantSupport) {
+ SkASSERT(arguments.size() == 1);
+ this->writeDeterminantHack(*arguments[0]);
+ return;
+ }
+ break;
+ case k_fma_IntrinsicKind:
+ if (!this->caps().fBuiltinFMASupport) {
+ SkASSERT(arguments.size() == 3);
+ this->write("((");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write(") * (");
+ this->writeExpression(*arguments[1], Precedence::kSequence);
+ this->write(") + (");
+ this->writeExpression(*arguments[2], Precedence::kSequence);
+ this->write("))");
+ return;
+ }
+ break;
+ case k_fract_IntrinsicKind:
+ if (!this->caps().fCanUseFractForNegativeValues) {
+ SkASSERT(arguments.size() == 1);
+ this->write("(0.5 - sign(");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write(") * (0.5 - fract(abs(");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write("))))");
+ return;
+ }
+ break;
+ case k_inverse_IntrinsicKind:
+ if (this->caps().fGLSLGeneration < SkSL::GLSLGeneration::k140) {
+ SkASSERT(arguments.size() == 1);
+ this->writeInverseHack(*arguments[0]);
+ return;
+ }
+ break;
+ case k_inversesqrt_IntrinsicKind:
+ if (this->caps().fGLSLGeneration < SkSL::GLSLGeneration::k130) {
+ SkASSERT(arguments.size() == 1);
+ this->writeInverseSqrtHack(*arguments[0]);
+ return;
+ }
+ break;
+ case k_min_IntrinsicKind:
+ if (!this->caps().fCanUseMinAndAbsTogether) {
+ SkASSERT(arguments.size() == 2);
+ if (is_abs(*arguments[0])) {
+ this->writeMinAbsHack(*arguments[0], *arguments[1]);
+ return;
+ }
+ if (is_abs(*arguments[1])) {
+ // note that this violates the GLSL left-to-right evaluation semantics.
+ // I doubt it will ever end up mattering, but it's worth calling out.
+ this->writeMinAbsHack(*arguments[1], *arguments[0]);
+ return;
+ }
+ }
+ break;
+ case k_pow_IntrinsicKind:
+ if (!this->caps().fRemovePowWithConstantExponent) {
+ break;
+ }
+ // pow(x, y) on some NVIDIA drivers causes crashes if y is a constant.
+ // It's hard to tell what constitutes "constant" here, so just replace in all cases.
+
+ // Change pow(x, y) into exp2(y * log2(x))
+ this->write("exp2(");
+ this->writeExpression(*arguments[1], Precedence::kMultiplicative);
+ this->write(" * log2(");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write("))");
+ return;
+ case k_saturate_IntrinsicKind:
+ SkASSERT(arguments.size() == 1);
+ this->write("clamp(");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write(", 0.0, 1.0)");
+ return;
+ case k_sample_IntrinsicKind: {
+ const char* dim = "";
+ bool proj = false;
+ const Type& arg0Type = arguments[0]->type();
+ const Type& arg1Type = arguments[1]->type();
+ switch (arg0Type.dimensions()) {
+ case SpvDim1D:
+ dim = "1D";
+ isTextureFunctionWithBias = true;
+ if (arg1Type.matches(*fContext.fTypes.fFloat)) {
+ proj = false;
+ } else {
+ SkASSERT(arg1Type.matches(*fContext.fTypes.fFloat2));
+ proj = true;
+ }
+ break;
+ case SpvDim2D:
+ dim = "2D";
+ if (!arg0Type.matches(*fContext.fTypes.fSamplerExternalOES)) {
+ isTextureFunctionWithBias = true;
+ }
+ if (arg1Type.matches(*fContext.fTypes.fFloat2)) {
+ proj = false;
+ } else {
+ SkASSERT(arg1Type.matches(*fContext.fTypes.fFloat3));
+ proj = true;
+ }
+ break;
+ case SpvDim3D:
+ dim = "3D";
+ isTextureFunctionWithBias = true;
+ if (arg1Type.matches(*fContext.fTypes.fFloat3)) {
+ proj = false;
+ } else {
+ SkASSERT(arg1Type.matches(*fContext.fTypes.fFloat4));
+ proj = true;
+ }
+ break;
+ case SpvDimCube:
+ dim = "Cube";
+ isTextureFunctionWithBias = true;
+ proj = false;
+ break;
+ case SpvDimRect:
+ dim = "2DRect";
+ proj = false;
+ break;
+ case SpvDimBuffer:
+ SkASSERT(false); // doesn't exist
+ dim = "Buffer";
+ proj = false;
+ break;
+ case SpvDimSubpassData:
+ SkASSERT(false); // doesn't exist
+ dim = "SubpassData";
+ proj = false;
+ break;
+ }
+ this->write("texture");
+ if (this->caps().fGLSLGeneration < SkSL::GLSLGeneration::k130) {
+ this->write(dim);
+ }
+ if (proj) {
+ this->write("Proj");
+ }
+ nameWritten = true;
+ break;
+ }
+ case k_sampleGrad_IntrinsicKind: {
+ SkASSERT(arguments.size() == 4);
+ this->write("textureGrad");
+ nameWritten = true;
+ break;
+ }
+ case k_sampleLod_IntrinsicKind: {
+ SkASSERT(arguments.size() == 3);
+ this->write("textureLod");
+ nameWritten = true;
+ break;
+ }
+ case k_transpose_IntrinsicKind:
+ if (this->caps().fGLSLGeneration < SkSL::GLSLGeneration::k130) {
+ SkASSERT(arguments.size() == 1);
+ this->writeTransposeHack(*arguments[0]);
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (!nameWritten) {
+ this->writeIdentifier(function.mangledName());
+ }
+ this->write("(");
+ auto separator = SkSL::String::Separator();
+ for (const auto& arg : arguments) {
+ this->write(separator());
+ this->writeExpression(*arg, Precedence::kSequence);
+ }
+ if (fProgram.fConfig->fSettings.fSharpenTextures && isTextureFunctionWithBias) {
+ this->write(String::printf(", %g", kSharpenTexturesBias));
+ }
+ this->write(closingParen);
+}
+
+void GLSLCodeGenerator::writeConstructorDiagonalMatrix(const ConstructorDiagonalMatrix& c,
+ Precedence parentPrecedence) {
+ if (c.type().columns() == 4 && c.type().rows() == 2) {
+ // Due to a longstanding bug in glslang and Mesa, several GPU drivers generate diagonal 4x2
+ // matrices incorrectly. (skia:12003, https://github.com/KhronosGroup/glslang/pull/2646)
+ // We can work around this issue by multiplying a scalar by the identity matrix.
+ // In practice, this doesn't come up naturally in real code and we don't know every affected
+ // driver, so we just apply this workaround everywhere.
+ this->write("(");
+ this->writeType(c.type());
+ this->write("(1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0) * ");
+ this->writeExpression(*c.argument(), Precedence::kMultiplicative);
+ this->write(")");
+ return;
+ }
+ this->writeAnyConstructor(c, parentPrecedence);
+}
+
+void GLSLCodeGenerator::writeConstructorCompound(const ConstructorCompound& c,
+ Precedence parentPrecedence) {
+ // If this is a 2x2 matrix constructor containing a single argument...
+ if (c.type().isMatrix() && c.arguments().size() == 1) {
+ // ... and that argument is a vec4...
+ const Expression& expr = *c.arguments().front();
+ if (expr.type().isVector() && expr.type().columns() == 4) {
+ // ... let's rewrite the cast to dodge issues on very old GPUs. (skia:13559)
+ if (Analysis::IsTrivialExpression(expr)) {
+ this->writeType(c.type());
+ this->write("(");
+ this->writeExpression(expr, Precedence::kPostfix);
+ this->write(".xy, ");
+ this->writeExpression(expr, Precedence::kPostfix);
+ this->write(".zw)");
+ } else {
+ std::string tempVec = "_tempVec" + std::to_string(fVarCount++);
+ this->fFunctionHeader += std::string(" ") + this->getTypePrecision(expr.type()) +
+ this->getTypeName(expr.type()) + " " + tempVec + ";\n";
+ this->write("((");
+ this->write(tempVec);
+ this->write(" = ");
+ this->writeExpression(expr, Precedence::kAssignment);
+ this->write("), ");
+ this->writeType(c.type());
+ this->write("(");
+ this->write(tempVec);
+ this->write(".xy, ");
+ this->write(tempVec);
+ this->write(".zw))");
+ }
+ return;
+ }
+ }
+ this->writeAnyConstructor(c, parentPrecedence);
+}
+
+void GLSLCodeGenerator::writeCastConstructor(const AnyConstructor& c, Precedence parentPrecedence) {
+ const auto arguments = c.argumentSpan();
+ SkASSERT(arguments.size() == 1);
+
+ const Expression& argument = *arguments.front();
+ if ((this->getTypeName(c.type()) == this->getTypeName(argument.type()) ||
+ (argument.type().matches(*fContext.fTypes.fFloatLiteral)))) {
+ // In cases like half(float), they're different types as far as SkSL is concerned but
+ // the same type as far as GLSL is concerned. We avoid a redundant float(float) by just
+ // writing out the inner expression here.
+ this->writeExpression(argument, parentPrecedence);
+ return;
+ }
+
+ // This cast should be emitted as-is.
+ return this->writeAnyConstructor(c, parentPrecedence);
+}
+
+void GLSLCodeGenerator::writeAnyConstructor(const AnyConstructor& c, Precedence parentPrecedence) {
+ this->writeType(c.type());
+ this->write("(");
+ auto separator = SkSL::String::Separator();
+ for (const auto& arg : c.argumentSpan()) {
+ this->write(separator());
+ this->writeExpression(*arg, Precedence::kSequence);
+ }
+ this->write(")");
+}
+
+void GLSLCodeGenerator::writeFragCoord() {
+ if (!this->caps().fCanUseFragCoord) {
+ if (!fSetupFragCoordWorkaround) {
+ const char* precision = this->usesPrecisionModifiers() ? "highp " : "";
+ fFunctionHeader += precision;
+ fFunctionHeader += " float sk_FragCoord_InvW = 1. / sk_FragCoord_Workaround.w;\n";
+ fFunctionHeader += precision;
+ fFunctionHeader += " vec4 sk_FragCoord_Resolved = "
+ "vec4(sk_FragCoord_Workaround.xyz * sk_FragCoord_InvW, sk_FragCoord_InvW);\n";
+ // Ensure that we get exact .5 values for x and y.
+ fFunctionHeader += " sk_FragCoord_Resolved.xy = floor(sk_FragCoord_Resolved.xy) + "
+ "vec2(.5);\n";
+ fSetupFragCoordWorkaround = true;
+ }
+ this->writeIdentifier("sk_FragCoord_Resolved");
+ return;
+ }
+
+ if (!fSetupFragPosition) {
+ fFunctionHeader += this->usesPrecisionModifiers() ? "highp " : "";
+ fFunctionHeader += " vec4 sk_FragCoord = vec4("
+ "gl_FragCoord.x, ";
+ if (fProgram.fConfig->fSettings.fForceNoRTFlip) {
+ fFunctionHeader += "gl_FragCoord.y, ";
+ } else {
+ fFunctionHeader += SKSL_RTFLIP_NAME ".x + " SKSL_RTFLIP_NAME ".y * gl_FragCoord.y, ";
+ }
+ fFunctionHeader +=
+ "gl_FragCoord.z, "
+ "gl_FragCoord.w);\n";
+ fSetupFragPosition = true;
+ }
+ this->writeIdentifier("sk_FragCoord");
+}
+
+void GLSLCodeGenerator::writeVariableReference(const VariableReference& ref) {
+ switch (ref.variable()->modifiers().fLayout.fBuiltin) {
+ case SK_FRAGCOLOR_BUILTIN:
+ if (this->caps().mustDeclareFragmentShaderOutput()) {
+ this->writeIdentifier("sk_FragColor");
+ } else {
+ this->writeIdentifier("gl_FragColor");
+ }
+ break;
+ case SK_SECONDARYFRAGCOLOR_BUILTIN:
+ this->writeIdentifier("gl_SecondaryFragColorEXT");
+ break;
+ case SK_FRAGCOORD_BUILTIN:
+ this->writeFragCoord();
+ break;
+ case SK_CLOCKWISE_BUILTIN:
+ if (!fSetupClockwise) {
+ fFunctionHeader += " bool sk_Clockwise = gl_FrontFacing;\n";
+ if (!fProgram.fConfig->fSettings.fForceNoRTFlip) {
+ fFunctionHeader += " if (" SKSL_RTFLIP_NAME ".y < 0.0) {\n"
+ " sk_Clockwise = !sk_Clockwise;\n"
+ " }\n";
+ }
+ fSetupClockwise = true;
+ }
+ this->writeIdentifier("sk_Clockwise");
+ break;
+ case SK_VERTEXID_BUILTIN:
+ this->writeIdentifier("gl_VertexID");
+ break;
+ case SK_INSTANCEID_BUILTIN:
+ this->writeIdentifier("gl_InstanceID");
+ break;
+ case SK_LASTFRAGCOLOR_BUILTIN:
+ if (this->caps().fFBFetchSupport) {
+ this->write(this->caps().fFBFetchColorName);
+ } else {
+ fContext.fErrors->error(ref.fPosition,
+ "sk_LastFragColor requires framebuffer fetch support");
+ }
+ break;
+ default:
+ this->writeIdentifier(ref.variable()->mangledName());
+ break;
+ }
+}
+
+void GLSLCodeGenerator::writeIndexExpression(const IndexExpression& expr) {
+ this->writeExpression(*expr.base(), Precedence::kPostfix);
+ this->write("[");
+ this->writeExpression(*expr.index(), Precedence::kTopLevel);
+ this->write("]");
+}
+
+bool is_sk_position(const FieldAccess& f) {
+ return f.base()->type().fields()[f.fieldIndex()].fModifiers.fLayout.fBuiltin ==
+ SK_POSITION_BUILTIN;
+}
+
+void GLSLCodeGenerator::writeFieldAccess(const FieldAccess& f) {
+ if (f.ownerKind() == FieldAccess::OwnerKind::kDefault) {
+ this->writeExpression(*f.base(), Precedence::kPostfix);
+ this->write(".");
+ }
+ const Type& baseType = f.base()->type();
+ int builtin = baseType.fields()[f.fieldIndex()].fModifiers.fLayout.fBuiltin;
+ if (builtin == SK_POSITION_BUILTIN) {
+ this->writeIdentifier("gl_Position");
+ } else if (builtin == SK_POINTSIZE_BUILTIN) {
+ this->writeIdentifier("gl_PointSize");
+ } else {
+ this->writeIdentifier(baseType.fields()[f.fieldIndex()].fName);
+ }
+}
+
+void GLSLCodeGenerator::writeSwizzle(const Swizzle& swizzle) {
+ this->writeExpression(*swizzle.base(), Precedence::kPostfix);
+ this->write(".");
+ for (int c : swizzle.components()) {
+ SkASSERT(c >= 0 && c <= 3);
+ this->write(&("x\0y\0z\0w\0"[c * 2]));
+ }
+}
+
+void GLSLCodeGenerator::writeMatrixComparisonWorkaround(const BinaryExpression& b) {
+ const Expression& left = *b.left();
+ const Expression& right = *b.right();
+ Operator op = b.getOperator();
+
+ SkASSERT(op.kind() == Operator::Kind::EQEQ || op.kind() == Operator::Kind::NEQ);
+ SkASSERT(left.type().isMatrix());
+ SkASSERT(right.type().isMatrix());
+
+ std::string tempMatrix1 = "_tempMatrix" + std::to_string(fVarCount++);
+ std::string tempMatrix2 = "_tempMatrix" + std::to_string(fVarCount++);
+
+ this->fFunctionHeader += std::string(" ") + this->getTypePrecision(left.type()) +
+ this->getTypeName(left.type()) + " " + tempMatrix1 + ";\n " +
+ this->getTypePrecision(right.type()) +
+ this->getTypeName(right.type()) + " " + tempMatrix2 + ";\n";
+ this->write("((" + tempMatrix1 + " = ");
+ this->writeExpression(left, Precedence::kAssignment);
+ this->write("), (" + tempMatrix2 + " = ");
+ this->writeExpression(right, Precedence::kAssignment);
+ this->write("), (" + tempMatrix1);
+ this->write(op.operatorName());
+ this->write(tempMatrix2 + "))");
+}
+
+void GLSLCodeGenerator::writeBinaryExpression(const BinaryExpression& b,
+ Precedence parentPrecedence) {
+ const Expression& left = *b.left();
+ const Expression& right = *b.right();
+ Operator op = b.getOperator();
+ if (this->caps().fUnfoldShortCircuitAsTernary &&
+ (op.kind() == Operator::Kind::LOGICALAND || op.kind() == Operator::Kind::LOGICALOR)) {
+ this->writeShortCircuitWorkaroundExpression(b, parentPrecedence);
+ return;
+ }
+
+ if (this->caps().fRewriteMatrixComparisons &&
+ left.type().isMatrix() && right.type().isMatrix() &&
+ (op.kind() == Operator::Kind::EQEQ || op.kind() == Operator::Kind::NEQ)) {
+ this->writeMatrixComparisonWorkaround(b);
+ return;
+ }
+
+ Precedence precedence = op.getBinaryPrecedence();
+ if (precedence >= parentPrecedence) {
+ this->write("(");
+ }
+ bool positionWorkaround = ProgramConfig::IsVertex(fProgram.fConfig->fKind) &&
+ op.isAssignment() &&
+ left.is<FieldAccess>() &&
+ is_sk_position(left.as<FieldAccess>()) &&
+ !Analysis::ContainsRTAdjust(right) &&
+ !this->caps().fCanUseFragCoord;
+ if (positionWorkaround) {
+ this->write("sk_FragCoord_Workaround = (");
+ }
+ this->writeExpression(left, precedence);
+ this->write(op.operatorName());
+ this->writeExpression(right, precedence);
+ if (positionWorkaround) {
+ this->write(")");
+ }
+ if (precedence >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+void GLSLCodeGenerator::writeShortCircuitWorkaroundExpression(const BinaryExpression& b,
+ Precedence parentPrecedence) {
+ if (Precedence::kTernary >= parentPrecedence) {
+ this->write("(");
+ }
+
+ // Transform:
+ // a && b => a ? b : false
+ // a || b => a ? true : b
+ this->writeExpression(*b.left(), Precedence::kTernary);
+ this->write(" ? ");
+ if (b.getOperator().kind() == Operator::Kind::LOGICALAND) {
+ this->writeExpression(*b.right(), Precedence::kTernary);
+ } else {
+ Literal boolTrue(Position(), /*value=*/1, fContext.fTypes.fBool.get());
+ this->writeLiteral(boolTrue);
+ }
+ this->write(" : ");
+ if (b.getOperator().kind() == Operator::Kind::LOGICALAND) {
+ Literal boolFalse(Position(), /*value=*/0, fContext.fTypes.fBool.get());
+ this->writeLiteral(boolFalse);
+ } else {
+ this->writeExpression(*b.right(), Precedence::kTernary);
+ }
+ if (Precedence::kTernary >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+void GLSLCodeGenerator::writeTernaryExpression(const TernaryExpression& t,
+ Precedence parentPrecedence) {
+ if (Precedence::kTernary >= parentPrecedence) {
+ this->write("(");
+ }
+ this->writeExpression(*t.test(), Precedence::kTernary);
+ this->write(" ? ");
+ this->writeExpression(*t.ifTrue(), Precedence::kTernary);
+ this->write(" : ");
+ this->writeExpression(*t.ifFalse(), Precedence::kTernary);
+ if (Precedence::kTernary >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+void GLSLCodeGenerator::writePrefixExpression(const PrefixExpression& p,
+ Precedence parentPrecedence) {
+ if (Precedence::kPrefix >= parentPrecedence) {
+ this->write("(");
+ }
+ this->write(p.getOperator().tightOperatorName());
+ this->writeExpression(*p.operand(), Precedence::kPrefix);
+ if (Precedence::kPrefix >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+void GLSLCodeGenerator::writePostfixExpression(const PostfixExpression& p,
+ Precedence parentPrecedence) {
+ if (Precedence::kPostfix >= parentPrecedence) {
+ this->write("(");
+ }
+ this->writeExpression(*p.operand(), Precedence::kPostfix);
+ this->write(p.getOperator().tightOperatorName());
+ if (Precedence::kPostfix >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+void GLSLCodeGenerator::writeLiteral(const Literal& l) {
+ const Type& type = l.type();
+ if (type.isInteger()) {
+ if (type.matches(*fContext.fTypes.fUInt)) {
+ this->write(std::to_string(l.intValue() & 0xffffffff) + "u");
+ } else if (type.matches(*fContext.fTypes.fUShort)) {
+ this->write(std::to_string(l.intValue() & 0xffff) + "u");
+ } else {
+ this->write(std::to_string(l.intValue()));
+ }
+ return;
+ }
+ this->write(l.description(OperatorPrecedence::kTopLevel));
+}
+
+void GLSLCodeGenerator::writeFunctionDeclaration(const FunctionDeclaration& f) {
+ this->writeTypePrecision(f.returnType());
+ this->writeType(f.returnType());
+ this->write(" ");
+ this->writeIdentifier(f.mangledName());
+ this->write("(");
+ auto separator = SkSL::String::Separator();
+ for (size_t index = 0; index < f.parameters().size(); ++index) {
+ const Variable* param = f.parameters()[index];
+
+ // This is a workaround for our test files. They use the runtime effect signature, so main
+ // takes a coords parameter. The IR generator tags those with a builtin ID (sk_FragCoord),
+ // and we omit them from the declaration here, so the function is valid GLSL.
+ if (f.isMain() && param->modifiers().fLayout.fBuiltin != -1) {
+ continue;
+ }
+ this->write(separator());
+ Modifiers modifiers = param->modifiers();
+ if (this->caps().fRemoveConstFromFunctionParameters) {
+ modifiers.fFlags &= ~Modifiers::kConst_Flag;
+ }
+ this->writeModifiers(modifiers, false);
+ std::vector<int> sizes;
+ const Type* type = &param->type();
+ if (type->isArray()) {
+ sizes.push_back(type->columns());
+ type = &type->componentType();
+ }
+ this->writeTypePrecision(*type);
+ this->writeType(*type);
+ this->write(" ");
+ if (!param->name().empty()) {
+ this->writeIdentifier(param->mangledName());
+ } else {
+ // By the spec, GLSL does not require function parameters to be named (see
+ // `single_declaration` in the Shading Language Grammar), but some older versions of
+ // GLSL report "formal parameter lacks a name" if a parameter is not named.
+ this->write("_skAnonymousParam");
+ this->write(std::to_string(index));
+ }
+ for (int s : sizes) {
+ this->write("[" + std::to_string(s) + "]");
+ }
+ }
+ this->write(")");
+}
+
+void GLSLCodeGenerator::writeFunction(const FunctionDefinition& f) {
+ fSetupFragPosition = false;
+ fSetupFragCoordWorkaround = false;
+
+ this->writeFunctionDeclaration(f.declaration());
+ this->writeLine(" {");
+ fIndentation++;
+
+ fFunctionHeader.clear();
+ OutputStream* oldOut = fOut;
+ StringStream buffer;
+ fOut = &buffer;
+ for (const std::unique_ptr<Statement>& stmt : f.body()->as<Block>().children()) {
+ if (!stmt->isEmpty()) {
+ this->writeStatement(*stmt);
+ this->finishLine();
+ }
+ }
+
+ fIndentation--;
+ this->writeLine("}");
+
+ fOut = oldOut;
+ this->write(fFunctionHeader);
+ this->write(buffer.str());
+}
+
+void GLSLCodeGenerator::writeFunctionPrototype(const FunctionPrototype& f) {
+ this->writeFunctionDeclaration(f.declaration());
+ this->writeLine(";");
+}
+
+void GLSLCodeGenerator::writeModifiers(const Modifiers& modifiers,
+ bool globalContext) {
+ std::string layout = modifiers.fLayout.description();
+ if (layout.size()) {
+ this->write(layout + " ");
+ }
+
+ // For GLSL 4.1 and below, qualifier-order matters! These are written out in Modifier-bit order.
+ if (modifiers.fFlags & Modifiers::kFlat_Flag) {
+ this->write("flat ");
+ }
+ if (modifiers.fFlags & Modifiers::kNoPerspective_Flag) {
+ this->write("noperspective ");
+ }
+
+ if (modifiers.fFlags & Modifiers::kConst_Flag) {
+ this->write("const ");
+ }
+ if (modifiers.fFlags & Modifiers::kUniform_Flag) {
+ this->write("uniform ");
+ }
+ if ((modifiers.fFlags & Modifiers::kIn_Flag) &&
+ (modifiers.fFlags & Modifiers::kOut_Flag)) {
+ this->write("inout ");
+ } else if (modifiers.fFlags & Modifiers::kIn_Flag) {
+ if (globalContext && this->caps().fGLSLGeneration < SkSL::GLSLGeneration::k130) {
+ this->write(ProgramConfig::IsVertex(fProgram.fConfig->fKind) ? "attribute "
+ : "varying ");
+ } else {
+ this->write("in ");
+ }
+ } else if (modifiers.fFlags & Modifiers::kOut_Flag) {
+ if (globalContext &&
+ this->caps().fGLSLGeneration < SkSL::GLSLGeneration::k130) {
+ this->write("varying ");
+ } else {
+ this->write("out ");
+ }
+ }
+
+ if (modifiers.fFlags & Modifiers::kReadOnly_Flag) {
+ this->write("readonly ");
+ }
+ if (modifiers.fFlags & Modifiers::kWriteOnly_Flag) {
+ this->write("writeonly ");
+ }
+ if (modifiers.fFlags & Modifiers::kBuffer_Flag) {
+ this->write("buffer ");
+ }
+}
+
+void GLSLCodeGenerator::writeInterfaceBlock(const InterfaceBlock& intf) {
+ if (intf.typeName() == "sk_PerVertex") {
+ return;
+ }
+ const Type* structType = &intf.var()->type().componentType();
+ this->writeModifiers(intf.var()->modifiers(), true);
+ this->writeType(*structType);
+ this->writeLine(" {");
+ fIndentation++;
+ for (const auto& f : structType->fields()) {
+ this->writeModifiers(f.fModifiers, false);
+ this->writeTypePrecision(*f.fType);
+ this->writeType(*f.fType);
+ this->write(" ");
+ this->writeIdentifier(f.fName);
+ this->writeLine(";");
+ }
+ fIndentation--;
+ this->write("}");
+ if (intf.instanceName().size()) {
+ this->write(" ");
+ this->writeIdentifier(intf.instanceName());
+ if (intf.arraySize() > 0) {
+ this->write("[");
+ this->write(std::to_string(intf.arraySize()));
+ this->write("]");
+ }
+ }
+ this->writeLine(";");
+}
+
+void GLSLCodeGenerator::writeVarInitializer(const Variable& var, const Expression& value) {
+ this->writeExpression(value, Precedence::kTopLevel);
+}
+
+const char* GLSLCodeGenerator::getTypePrecision(const Type& type) {
+ if (this->usesPrecisionModifiers()) {
+ switch (type.typeKind()) {
+ case Type::TypeKind::kScalar:
+ if (type.matches(*fContext.fTypes.fShort) ||
+ type.matches(*fContext.fTypes.fUShort)) {
+ if (fProgram.fConfig->fSettings.fForceHighPrecision ||
+ this->caps().fIncompleteShortIntPrecision) {
+ return "highp ";
+ }
+ return "mediump ";
+ }
+ if (type.matches(*fContext.fTypes.fHalf)) {
+ return fProgram.fConfig->fSettings.fForceHighPrecision ? "highp " : "mediump ";
+ }
+ if (type.matches(*fContext.fTypes.fFloat) || type.matches(*fContext.fTypes.fInt) ||
+ type.matches(*fContext.fTypes.fUInt)) {
+ return "highp ";
+ }
+ return "";
+ case Type::TypeKind::kVector: // fall through
+ case Type::TypeKind::kMatrix:
+ case Type::TypeKind::kArray:
+ return this->getTypePrecision(type.componentType());
+ default:
+ break;
+ }
+ }
+ return "";
+}
+
+void GLSLCodeGenerator::writeTypePrecision(const Type& type) {
+ this->write(this->getTypePrecision(type));
+}
+
+void GLSLCodeGenerator::writeVarDeclaration(const VarDeclaration& var, bool global) {
+ this->writeModifiers(var.var()->modifiers(), global);
+ this->writeTypePrecision(var.baseType());
+ this->writeType(var.baseType());
+ this->write(" ");
+ this->writeIdentifier(var.var()->mangledName());
+ if (var.arraySize() > 0) {
+ this->write("[");
+ this->write(std::to_string(var.arraySize()));
+ this->write("]");
+ }
+ if (var.value()) {
+ this->write(" = ");
+ this->writeVarInitializer(*var.var(), *var.value());
+ }
+ if (!fFoundExternalSamplerDecl &&
+ var.var()->type().matches(*fContext.fTypes.fSamplerExternalOES)) {
+ if (this->caps().externalTextureExtensionString()) {
+ this->writeExtension(this->caps().externalTextureExtensionString());
+ }
+ if (this->caps().secondExternalTextureExtensionString()) {
+ this->writeExtension(this->caps().secondExternalTextureExtensionString());
+ }
+ fFoundExternalSamplerDecl = true;
+ }
+ if (!fFoundRectSamplerDecl && var.var()->type().matches(*fContext.fTypes.fSampler2DRect)) {
+ fFoundRectSamplerDecl = true;
+ }
+ this->write(";");
+}
+
+void GLSLCodeGenerator::writeStatement(const Statement& s) {
+ switch (s.kind()) {
+ case Statement::Kind::kBlock:
+ this->writeBlock(s.as<Block>());
+ break;
+ case Statement::Kind::kExpression:
+ this->writeExpressionStatement(s.as<ExpressionStatement>());
+ break;
+ case Statement::Kind::kReturn:
+ this->writeReturnStatement(s.as<ReturnStatement>());
+ break;
+ case Statement::Kind::kVarDeclaration:
+ this->writeVarDeclaration(s.as<VarDeclaration>(), false);
+ break;
+ case Statement::Kind::kIf:
+ this->writeIfStatement(s.as<IfStatement>());
+ break;
+ case Statement::Kind::kFor:
+ this->writeForStatement(s.as<ForStatement>());
+ break;
+ case Statement::Kind::kDo:
+ this->writeDoStatement(s.as<DoStatement>());
+ break;
+ case Statement::Kind::kSwitch:
+ this->writeSwitchStatement(s.as<SwitchStatement>());
+ break;
+ case Statement::Kind::kBreak:
+ this->write("break;");
+ break;
+ case Statement::Kind::kContinue:
+ this->write("continue;");
+ break;
+ case Statement::Kind::kDiscard:
+ this->write("discard;");
+ break;
+ case Statement::Kind::kNop:
+ this->write(";");
+ break;
+ default:
+ SkDEBUGFAILF("unsupported statement: %s", s.description().c_str());
+ break;
+ }
+}
+
+void GLSLCodeGenerator::writeBlock(const Block& b) {
+ // Write scope markers if this block is a scope, or if the block is empty (since we need to emit
+ // something here to make the code valid).
+ bool isScope = b.isScope() || b.isEmpty();
+ if (isScope) {
+ this->writeLine("{");
+ fIndentation++;
+ }
+ for (const std::unique_ptr<Statement>& stmt : b.children()) {
+ if (!stmt->isEmpty()) {
+ this->writeStatement(*stmt);
+ this->finishLine();
+ }
+ }
+ if (isScope) {
+ fIndentation--;
+ this->write("}");
+ }
+}
+
+void GLSLCodeGenerator::writeIfStatement(const IfStatement& stmt) {
+ this->write("if (");
+ this->writeExpression(*stmt.test(), Precedence::kTopLevel);
+ this->write(") ");
+ this->writeStatement(*stmt.ifTrue());
+ if (stmt.ifFalse()) {
+ this->write(" else ");
+ this->writeStatement(*stmt.ifFalse());
+ }
+}
+
+void GLSLCodeGenerator::writeForStatement(const ForStatement& f) {
+ // Emit loops of the form 'for(;test;)' as 'while(test)', which is probably how they started
+ if (!f.initializer() && f.test() && !f.next()) {
+ this->write("while (");
+ this->writeExpression(*f.test(), Precedence::kTopLevel);
+ this->write(") ");
+ this->writeStatement(*f.statement());
+ return;
+ }
+
+ this->write("for (");
+ if (f.initializer() && !f.initializer()->isEmpty()) {
+ this->writeStatement(*f.initializer());
+ } else {
+ this->write("; ");
+ }
+ if (f.test()) {
+ if (this->caps().fAddAndTrueToLoopCondition) {
+ std::unique_ptr<Expression> and_true(new BinaryExpression(
+ Position(), f.test()->clone(), Operator::Kind::LOGICALAND,
+ Literal::MakeBool(fContext, Position(), /*value=*/true),
+ fContext.fTypes.fBool.get()));
+ this->writeExpression(*and_true, Precedence::kTopLevel);
+ } else {
+ this->writeExpression(*f.test(), Precedence::kTopLevel);
+ }
+ }
+ this->write("; ");
+ if (f.next()) {
+ this->writeExpression(*f.next(), Precedence::kTopLevel);
+ }
+ this->write(") ");
+ this->writeStatement(*f.statement());
+}
+
+void GLSLCodeGenerator::writeDoStatement(const DoStatement& d) {
+ if (!this->caps().fRewriteDoWhileLoops) {
+ this->write("do ");
+ this->writeStatement(*d.statement());
+ this->write(" while (");
+ this->writeExpression(*d.test(), Precedence::kTopLevel);
+ this->write(");");
+ return;
+ }
+
+ // Otherwise, do the do while loop workaround, to rewrite loops of the form:
+ // do {
+ // CODE;
+ // } while (CONDITION)
+ //
+ // to loops of the form
+ // bool temp = false;
+ // while (true) {
+ // if (temp) {
+ // if (!CONDITION) {
+ // break;
+ // }
+ // }
+ // temp = true;
+ // CODE;
+ // }
+ std::string tmpVar = "_tmpLoopSeenOnce" + std::to_string(fVarCount++);
+ this->write("bool ");
+ this->write(tmpVar);
+ this->writeLine(" = false;");
+ this->writeLine("while (true) {");
+ fIndentation++;
+ this->write("if (");
+ this->write(tmpVar);
+ this->writeLine(") {");
+ fIndentation++;
+ this->write("if (!");
+ this->writeExpression(*d.test(), Precedence::kPrefix);
+ this->writeLine(") {");
+ fIndentation++;
+ this->writeLine("break;");
+ fIndentation--;
+ this->writeLine("}");
+ fIndentation--;
+ this->writeLine("}");
+ this->write(tmpVar);
+ this->writeLine(" = true;");
+ this->writeStatement(*d.statement());
+ this->finishLine();
+ fIndentation--;
+ this->write("}");
+}
+
+void GLSLCodeGenerator::writeExpressionStatement(const ExpressionStatement& s) {
+ if (fProgram.fConfig->fSettings.fOptimize && !Analysis::HasSideEffects(*s.expression())) {
+ // Don't emit dead expressions.
+ return;
+ }
+ this->writeExpression(*s.expression(), Precedence::kTopLevel);
+ this->write(";");
+}
+
+void GLSLCodeGenerator::writeSwitchStatement(const SwitchStatement& s) {
+ if (this->caps().fRewriteSwitchStatements) {
+ std::string fallthroughVar = "_tmpSwitchFallthrough" + std::to_string(fVarCount++);
+ std::string valueVar = "_tmpSwitchValue" + std::to_string(fVarCount++);
+ std::string loopVar = "_tmpSwitchLoop" + std::to_string(fVarCount++);
+ this->write("int ");
+ this->write(valueVar);
+ this->write(" = ");
+ this->writeExpression(*s.value(), Precedence::kAssignment);
+ this->write(", ");
+ this->write(fallthroughVar);
+ this->writeLine(" = 0;");
+ this->write("for (int ");
+ this->write(loopVar);
+ this->write(" = 0; ");
+ this->write(loopVar);
+ this->write(" < 1; ");
+ this->write(loopVar);
+ this->writeLine("++) {");
+ fIndentation++;
+
+ bool firstCase = true;
+ for (const std::unique_ptr<Statement>& stmt : s.cases()) {
+ const SwitchCase& c = stmt->as<SwitchCase>();
+ if (!c.isDefault()) {
+ this->write("if ((");
+ if (firstCase) {
+ firstCase = false;
+ } else {
+ this->write(fallthroughVar);
+ this->write(" > 0) || (");
+ }
+ this->write(valueVar);
+ this->write(" == ");
+ this->write(std::to_string(c.value()));
+ this->writeLine(")) {");
+ fIndentation++;
+
+ // We write the entire case-block statement here, and then set `switchFallthrough`
+ // to 1. If the case-block had a break statement in it, we break out of the outer
+ // for-loop entirely, meaning the `switchFallthrough` assignment never occurs, nor
+ // does any code after it inside the switch. We've forbidden `continue` statements
+ // inside switch case-blocks entirely, so we don't need to consider their effect on
+ // control flow; see the Finalizer in FunctionDefinition::Convert.
+ this->writeStatement(*c.statement());
+ this->finishLine();
+ this->write(fallthroughVar);
+ this->write(" = 1;");
+ this->writeLine();
+
+ fIndentation--;
+ this->writeLine("}");
+ } else {
+ // This is the default case. Since it's always last, we can just dump in the code.
+ this->writeStatement(*c.statement());
+ this->finishLine();
+ }
+ }
+
+ fIndentation--;
+ this->writeLine("}");
+ return;
+ }
+
+ this->write("switch (");
+ this->writeExpression(*s.value(), Precedence::kTopLevel);
+ this->writeLine(") {");
+ fIndentation++;
+ // If a switch contains only a `default` case and nothing else, this confuses some drivers and
+ // can lead to a crash. Adding a real case before the default seems to work around the bug,
+ // and doesn't change the meaning of the switch. (skia:12465)
+ if (s.cases().size() == 1 && s.cases().front()->as<SwitchCase>().isDefault()) {
+ this->writeLine("case 0:");
+ }
+
+ // The GLSL spec insists that the last case in a switch statement must have an associated
+ // statement. In practice, the Apple GLSL compiler crashes if that statement is a no-op, such as
+ // a semicolon or an empty brace pair. (This is filed as FB11992149.) It also crashes if we put
+ // two `break` statements in a row. To work around this while honoring the rules of the
+ // standard, we inject an extra break if and only if the last switch-case block is empty.
+ bool foundEmptyCase = false;
+
+ for (const std::unique_ptr<Statement>& stmt : s.cases()) {
+ const SwitchCase& c = stmt->as<SwitchCase>();
+ if (c.isDefault()) {
+ this->writeLine("default:");
+ } else {
+ this->write("case ");
+ this->write(std::to_string(c.value()));
+ this->writeLine(":");
+ }
+ if (c.statement()->isEmpty()) {
+ foundEmptyCase = true;
+ } else {
+ foundEmptyCase = false;
+ fIndentation++;
+ this->writeStatement(*c.statement());
+ this->finishLine();
+ fIndentation--;
+ }
+ }
+ if (foundEmptyCase) {
+ fIndentation++;
+ this->writeLine("break;");
+ fIndentation--;
+ }
+ fIndentation--;
+ this->finishLine();
+ this->write("}");
+}
+
+void GLSLCodeGenerator::writeReturnStatement(const ReturnStatement& r) {
+ this->write("return");
+ if (r.expression()) {
+ this->write(" ");
+ this->writeExpression(*r.expression(), Precedence::kTopLevel);
+ }
+ this->write(";");
+}
+
+void GLSLCodeGenerator::writeHeader() {
+ if (this->caps().fVersionDeclString) {
+ this->write(this->caps().fVersionDeclString);
+ this->finishLine();
+ }
+}
+
+void GLSLCodeGenerator::writeProgramElement(const ProgramElement& e) {
+ switch (e.kind()) {
+ case ProgramElement::Kind::kExtension:
+ this->writeExtension(e.as<Extension>().name());
+ break;
+ case ProgramElement::Kind::kGlobalVar: {
+ const VarDeclaration& decl = e.as<GlobalVarDeclaration>().varDeclaration();
+ int builtin = decl.var()->modifiers().fLayout.fBuiltin;
+ if (builtin == -1) {
+ // normal var
+ this->writeVarDeclaration(decl, true);
+ this->finishLine();
+ } else if (builtin == SK_FRAGCOLOR_BUILTIN &&
+ this->caps().mustDeclareFragmentShaderOutput()) {
+ if (fProgram.fConfig->fSettings.fFragColorIsInOut) {
+ this->write("inout ");
+ } else {
+ this->write("out ");
+ }
+ if (this->usesPrecisionModifiers()) {
+ this->write("mediump ");
+ }
+ this->writeLine("vec4 sk_FragColor;");
+ }
+ break;
+ }
+ case ProgramElement::Kind::kInterfaceBlock:
+ this->writeInterfaceBlock(e.as<InterfaceBlock>());
+ break;
+ case ProgramElement::Kind::kFunction:
+ this->writeFunction(e.as<FunctionDefinition>());
+ break;
+ case ProgramElement::Kind::kFunctionPrototype:
+ this->writeFunctionPrototype(e.as<FunctionPrototype>());
+ break;
+ case ProgramElement::Kind::kModifiers: {
+ const Modifiers& modifiers = e.as<ModifiersDeclaration>().modifiers();
+ this->writeModifiers(modifiers, true);
+ this->writeLine(";");
+ break;
+ }
+ case ProgramElement::Kind::kStructDefinition:
+ this->writeStructDefinition(e.as<StructDefinition>());
+ break;
+ default:
+ SkDEBUGFAILF("unsupported program element %s\n", e.description().c_str());
+ break;
+ }
+}
+
+void GLSLCodeGenerator::writeInputVars() {
+ if (fProgram.fInputs.fUseFlipRTUniform) {
+ const char* precision = this->usesPrecisionModifiers() ? "highp " : "";
+ fGlobals.writeText("uniform ");
+ fGlobals.writeText(precision);
+ fGlobals.writeText("vec2 " SKSL_RTFLIP_NAME ";\n");
+ }
+}
+
+bool GLSLCodeGenerator::generateCode() {
+ this->writeHeader();
+ OutputStream* rawOut = fOut;
+ StringStream body;
+ fOut = &body;
+ // Write all the program elements except for functions.
+ for (const ProgramElement* e : fProgram.elements()) {
+ if (!e->is<FunctionDefinition>()) {
+ this->writeProgramElement(*e);
+ }
+ }
+ // Emit prototypes for every built-in function; these aren't always added in perfect order.
+ for (const ProgramElement* e : fProgram.fSharedElements) {
+ if (e->is<FunctionDefinition>()) {
+ this->writeFunctionDeclaration(e->as<FunctionDefinition>().declaration());
+ this->writeLine(";");
+ }
+ }
+ // Write the functions last.
+ // Why don't we write things in their original order? Because the Inliner likes to move function
+ // bodies around. After inlining, code can inadvertently move upwards, above ProgramElements
+ // that the code relies on.
+ for (const ProgramElement* e : fProgram.elements()) {
+ if (e->is<FunctionDefinition>()) {
+ this->writeProgramElement(*e);
+ }
+ }
+ fOut = rawOut;
+
+ write_stringstream(fExtensions, *rawOut);
+ this->writeInputVars();
+ write_stringstream(fGlobals, *rawOut);
+
+ if (!this->caps().fCanUseFragCoord) {
+ Layout layout;
+ if (ProgramConfig::IsVertex(fProgram.fConfig->fKind)) {
+ Modifiers modifiers(layout, Modifiers::kOut_Flag);
+ this->writeModifiers(modifiers, true);
+ if (this->usesPrecisionModifiers()) {
+ this->write("highp ");
+ }
+ this->write("vec4 sk_FragCoord_Workaround;\n");
+ } else if (ProgramConfig::IsFragment(fProgram.fConfig->fKind)) {
+ Modifiers modifiers(layout, Modifiers::kIn_Flag);
+ this->writeModifiers(modifiers, true);
+ if (this->usesPrecisionModifiers()) {
+ this->write("highp ");
+ }
+ this->write("vec4 sk_FragCoord_Workaround;\n");
+ }
+ }
+
+ if (this->usesPrecisionModifiers()) {
+ const char* precision =
+ fProgram.fConfig->fSettings.fForceHighPrecision ? "highp" : "mediump";
+ this->write(String::printf("precision %s float;\n", precision));
+ this->write(String::printf("precision %s sampler2D;\n", precision));
+ if (fFoundExternalSamplerDecl && !this->caps().fNoDefaultPrecisionForExternalSamplers) {
+ this->write(String::printf("precision %s samplerExternalOES;\n", precision));
+ }
+ if (fFoundRectSamplerDecl) {
+ this->write(String::printf("precision %s sampler2DRect;\n", precision));
+ }
+ }
+ write_stringstream(fExtraFunctions, *rawOut);
+ write_stringstream(body, *rawOut);
+ return fContext.fErrors->errorCount() == 0;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/codegen/SkSLGLSLCodeGenerator.h b/gfx/skia/skia/src/sksl/codegen/SkSLGLSLCodeGenerator.h
new file mode 100644
index 0000000000..e6672ab45f
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/codegen/SkSLGLSLCodeGenerator.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_GLSLCODEGENERATOR
+#define SKSL_GLSLCODEGENERATOR
+
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLStringStream.h"
+#include "src/sksl/codegen/SkSLCodeGenerator.h"
+
+#include <cstdint>
+#include <string>
+#include <string_view>
+
+namespace SkSL {
+
+class AnyConstructor;
+class BinaryExpression;
+class Block;
+class ConstructorCompound;
+class ConstructorDiagonalMatrix;
+class DoStatement;
+class Expression;
+class ExpressionStatement;
+class FieldAccess;
+class ForStatement;
+class FunctionCall;
+class FunctionDeclaration;
+class FunctionDefinition;
+class FunctionPrototype;
+class IfStatement;
+class IndexExpression;
+class InterfaceBlock;
+class Literal;
+class OutputStream;
+class PostfixExpression;
+class PrefixExpression;
+class ProgramElement;
+class ReturnStatement;
+class Statement;
+class StructDefinition;
+class SwitchStatement;
+class Swizzle;
+class TernaryExpression;
+class Type;
+class VarDeclaration;
+class Variable;
+class VariableReference;
+enum class OperatorPrecedence : uint8_t;
+struct Layout;
+struct Modifiers;
+struct Program;
+struct ShaderCaps;
+
+/**
+ * Converts a Program into GLSL code.
+ */
+class GLSLCodeGenerator : public CodeGenerator {
+public:
+ GLSLCodeGenerator(const Context* context, const Program* program, OutputStream* out)
+ : INHERITED(context, program, out) {}
+
+ bool generateCode() override;
+
+protected:
+ using Precedence = OperatorPrecedence;
+
+ void write(std::string_view s);
+
+ void writeLine(std::string_view s = std::string_view());
+
+ void finishLine();
+
+ virtual void writeHeader();
+
+ bool usesPrecisionModifiers() const;
+
+ void writeIdentifier(std::string_view identifier);
+
+ virtual std::string getTypeName(const Type& type);
+
+ void writeStructDefinition(const StructDefinition& s);
+
+ void writeType(const Type& type);
+
+ void writeExtension(std::string_view name, bool require = true);
+
+ void writeInterfaceBlock(const InterfaceBlock& intf);
+
+ void writeFunctionDeclaration(const FunctionDeclaration& f);
+
+ void writeFunctionPrototype(const FunctionPrototype& f);
+
+ virtual void writeFunction(const FunctionDefinition& f);
+
+ void writeLayout(const Layout& layout);
+
+ void writeModifiers(const Modifiers& modifiers, bool globalContext);
+
+ virtual void writeInputVars();
+
+ virtual void writeVarInitializer(const Variable& var, const Expression& value);
+
+ const char* getTypePrecision(const Type& type);
+
+ void writeTypePrecision(const Type& type);
+
+ void writeVarDeclaration(const VarDeclaration& var, bool global);
+
+ void writeFragCoord();
+
+ virtual void writeVariableReference(const VariableReference& ref);
+
+ void writeExpression(const Expression& expr, Precedence parentPrecedence);
+
+ void writeIntrinsicCall(const FunctionCall& c);
+
+ void writeMinAbsHack(Expression& absExpr, Expression& otherExpr);
+
+ void writeDeterminantHack(const Expression& mat);
+
+ void writeInverseHack(const Expression& mat);
+
+ void writeTransposeHack(const Expression& mat);
+
+ void writeInverseSqrtHack(const Expression& x);
+
+ void writeMatrixComparisonWorkaround(const BinaryExpression& x);
+
+ virtual void writeFunctionCall(const FunctionCall& c);
+
+ void writeConstructorCompound(const ConstructorCompound& c, Precedence parentPrecedence);
+
+ void writeConstructorDiagonalMatrix(const ConstructorDiagonalMatrix& c,
+ Precedence parentPrecedence);
+
+ virtual void writeAnyConstructor(const AnyConstructor& c, Precedence parentPrecedence);
+
+ virtual void writeCastConstructor(const AnyConstructor& c, Precedence parentPrecedence);
+
+ virtual void writeFieldAccess(const FieldAccess& f);
+
+ virtual void writeSwizzle(const Swizzle& swizzle);
+
+ virtual void writeBinaryExpression(const BinaryExpression& b, Precedence parentPrecedence);
+
+ void writeShortCircuitWorkaroundExpression(const BinaryExpression& b,
+ Precedence parentPrecedence);
+
+ virtual void writeTernaryExpression(const TernaryExpression& t, Precedence parentPrecedence);
+
+ virtual void writeIndexExpression(const IndexExpression& expr);
+
+ void writePrefixExpression(const PrefixExpression& p, Precedence parentPrecedence);
+
+ void writePostfixExpression(const PostfixExpression& p, Precedence parentPrecedence);
+
+ virtual void writeLiteral(const Literal& l);
+
+ void writeStatement(const Statement& s);
+
+ void writeBlock(const Block& b);
+
+ virtual void writeIfStatement(const IfStatement& stmt);
+
+ void writeForStatement(const ForStatement& f);
+
+ void writeDoStatement(const DoStatement& d);
+
+ void writeExpressionStatement(const ExpressionStatement& s);
+
+ virtual void writeSwitchStatement(const SwitchStatement& s);
+
+ virtual void writeReturnStatement(const ReturnStatement& r);
+
+ virtual void writeProgramElement(const ProgramElement& e);
+
+ const ShaderCaps& caps() const { return *fContext.fCaps; }
+
+ StringStream fExtensions;
+ StringStream fGlobals;
+ StringStream fExtraFunctions;
+ std::string fFunctionHeader;
+ int fVarCount = 0;
+ int fIndentation = 0;
+ bool fAtLineStart = false;
+ // true if we have run into usages of dFdx / dFdy
+ bool fFoundDerivatives = false;
+ bool fFoundExternalSamplerDecl = false;
+ bool fFoundRectSamplerDecl = false;
+ bool fSetupClockwise = false;
+ bool fSetupFragPosition = false;
+ bool fSetupFragCoordWorkaround = false;
+
+ // Workaround/polyfill flags
+ bool fWrittenAbsEmulation = false;
+ bool fWrittenDeterminant2 = false, fWrittenDeterminant3 = false, fWrittenDeterminant4 = false;
+ bool fWrittenInverse2 = false, fWrittenInverse3 = false, fWrittenInverse4 = false;
+ bool fWrittenTranspose[3][3] = {};
+
+ using INHERITED = CodeGenerator;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/codegen/SkSLMetalCodeGenerator.cpp b/gfx/skia/skia/src/sksl/codegen/SkSLMetalCodeGenerator.cpp
new file mode 100644
index 0000000000..d173fae687
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/codegen/SkSLMetalCodeGenerator.cpp
@@ -0,0 +1,3226 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/codegen/SkSLMetalCodeGenerator.h"
+
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLLayout.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLStatement.h"
+#include "include/private/SkSLString.h"
+#include "include/private/base/SkTo.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLOperator.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/base/SkScopeExit.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLIntrinsicList.h"
+#include "src/sksl/SkSLMemoryLayout.h"
+#include "src/sksl/SkSLOutputStream.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/SkSLUtil.h"
+#include "src/sksl/analysis/SkSLProgramVisitor.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLConstructorArrayCast.h"
+#include "src/sksl/ir/SkSLConstructorCompound.h"
+#include "src/sksl/ir/SkSLConstructorMatrixResize.h"
+#include "src/sksl/ir/SkSLDoStatement.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLExpressionStatement.h"
+#include "src/sksl/ir/SkSLExtension.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLForStatement.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLFunctionPrototype.h"
+#include "src/sksl/ir/SkSLIfStatement.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLInterfaceBlock.h"
+#include "src/sksl/ir/SkSLLiteral.h"
+#include "src/sksl/ir/SkSLModifiersDeclaration.h"
+#include "src/sksl/ir/SkSLNop.h"
+#include "src/sksl/ir/SkSLPostfixExpression.h"
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/ir/SkSLReturnStatement.h"
+#include "src/sksl/ir/SkSLSetting.h"
+#include "src/sksl/ir/SkSLStructDefinition.h"
+#include "src/sksl/ir/SkSLSwitchCase.h"
+#include "src/sksl/ir/SkSLSwitchStatement.h"
+#include "src/sksl/ir/SkSLSwizzle.h"
+#include "src/sksl/ir/SkSLTernaryExpression.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+#include "src/sksl/spirv.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <functional>
+#include <limits>
+#include <memory>
+
+namespace SkSL {
+
+static const char* operator_name(Operator op) {
+ switch (op.kind()) {
+ case Operator::Kind::LOGICALXOR: return " != ";
+ default: return op.operatorName();
+ }
+}
+
+class MetalCodeGenerator::GlobalStructVisitor {
+public:
+ virtual ~GlobalStructVisitor() = default;
+ virtual void visitInterfaceBlock(const InterfaceBlock& block, std::string_view blockName) {}
+ virtual void visitTexture(const Type& type, const Modifiers& modifiers,
+ std::string_view name) {}
+ virtual void visitSampler(const Type& type, std::string_view name) {}
+ virtual void visitConstantVariable(const VarDeclaration& decl) {}
+ virtual void visitNonconstantVariable(const Variable& var, const Expression* value) {}
+};
+
+class MetalCodeGenerator::ThreadgroupStructVisitor {
+public:
+ virtual ~ThreadgroupStructVisitor() = default;
+ virtual void visitNonconstantVariable(const Variable& var) = 0;
+};
+
+void MetalCodeGenerator::write(std::string_view s) {
+ if (s.empty()) {
+ return;
+ }
+ if (fAtLineStart) {
+ for (int i = 0; i < fIndentation; i++) {
+ fOut->writeText(" ");
+ }
+ }
+ fOut->writeText(std::string(s).c_str());
+ fAtLineStart = false;
+}
+
+void MetalCodeGenerator::writeLine(std::string_view s) {
+ this->write(s);
+ fOut->writeText(fLineEnding);
+ fAtLineStart = true;
+}
+
+void MetalCodeGenerator::finishLine() {
+ if (!fAtLineStart) {
+ this->writeLine();
+ }
+}
+
+void MetalCodeGenerator::writeExtension(const Extension& ext) {
+ this->writeLine("#extension " + std::string(ext.name()) + " : enable");
+}
+
+std::string MetalCodeGenerator::typeName(const Type& type) {
+ // we need to know the modifiers for textures
+ switch (type.typeKind()) {
+ case Type::TypeKind::kArray:
+ SkASSERT(!type.isUnsizedArray());
+ SkASSERTF(type.columns() > 0, "invalid array size: %s", type.description().c_str());
+ return String::printf("array<%s, %d>",
+ this->typeName(type.componentType()).c_str(), type.columns());
+
+ case Type::TypeKind::kVector:
+ return this->typeName(type.componentType()) + std::to_string(type.columns());
+
+ case Type::TypeKind::kMatrix:
+ return this->typeName(type.componentType()) + std::to_string(type.columns()) + "x" +
+ std::to_string(type.rows());
+
+ case Type::TypeKind::kSampler:
+ if (type.dimensions() != SpvDim2D) {
+ fContext.fErrors->error(Position(), "Unsupported texture dimensions");
+ }
+ return "sampler2D";
+
+ case Type::TypeKind::kTexture:
+ switch (type.textureAccess()) {
+ case Type::TextureAccess::kSample: return "texture2d<half>";
+ case Type::TextureAccess::kRead: return "texture2d<half, access::read>";
+ case Type::TextureAccess::kWrite: return "texture2d<half, access::write>";
+ case Type::TextureAccess::kReadWrite: return "texture2d<half, access::read_write>";
+ default: break;
+ }
+ SkUNREACHABLE;
+ case Type::TypeKind::kAtomic:
+ // SkSL currently only supports the atomicUint type.
+ SkASSERT(type.matches(*fContext.fTypes.fAtomicUInt));
+ return "atomic_uint";
+ default:
+ return std::string(type.name());
+ }
+}
+
+void MetalCodeGenerator::writeStructDefinition(const StructDefinition& s) {
+ const Type& type = s.type();
+ this->writeLine("struct " + type.displayName() + " {");
+ fIndentation++;
+ this->writeFields(type.fields(), type.fPosition);
+ fIndentation--;
+ this->writeLine("};");
+}
+
+void MetalCodeGenerator::writeType(const Type& type) {
+ this->write(this->typeName(type));
+}
+
+void MetalCodeGenerator::writeExpression(const Expression& expr, Precedence parentPrecedence) {
+ switch (expr.kind()) {
+ case Expression::Kind::kBinary:
+ this->writeBinaryExpression(expr.as<BinaryExpression>(), parentPrecedence);
+ break;
+ case Expression::Kind::kConstructorArray:
+ case Expression::Kind::kConstructorStruct:
+ this->writeAnyConstructor(expr.asAnyConstructor(), "{", "}", parentPrecedence);
+ break;
+ case Expression::Kind::kConstructorArrayCast:
+ this->writeConstructorArrayCast(expr.as<ConstructorArrayCast>(), parentPrecedence);
+ break;
+ case Expression::Kind::kConstructorCompound:
+ this->writeConstructorCompound(expr.as<ConstructorCompound>(), parentPrecedence);
+ break;
+ case Expression::Kind::kConstructorDiagonalMatrix:
+ case Expression::Kind::kConstructorSplat:
+ this->writeAnyConstructor(expr.asAnyConstructor(), "(", ")", parentPrecedence);
+ break;
+ case Expression::Kind::kConstructorMatrixResize:
+ this->writeConstructorMatrixResize(expr.as<ConstructorMatrixResize>(),
+ parentPrecedence);
+ break;
+ case Expression::Kind::kConstructorScalarCast:
+ case Expression::Kind::kConstructorCompoundCast:
+ this->writeCastConstructor(expr.asAnyConstructor(), "(", ")", parentPrecedence);
+ break;
+ case Expression::Kind::kFieldAccess:
+ this->writeFieldAccess(expr.as<FieldAccess>());
+ break;
+ case Expression::Kind::kLiteral:
+ this->writeLiteral(expr.as<Literal>());
+ break;
+ case Expression::Kind::kFunctionCall:
+ this->writeFunctionCall(expr.as<FunctionCall>());
+ break;
+ case Expression::Kind::kPrefix:
+ this->writePrefixExpression(expr.as<PrefixExpression>(), parentPrecedence);
+ break;
+ case Expression::Kind::kPostfix:
+ this->writePostfixExpression(expr.as<PostfixExpression>(), parentPrecedence);
+ break;
+ case Expression::Kind::kSetting:
+ this->writeExpression(*expr.as<Setting>().toLiteral(fContext), parentPrecedence);
+ break;
+ case Expression::Kind::kSwizzle:
+ this->writeSwizzle(expr.as<Swizzle>());
+ break;
+ case Expression::Kind::kVariableReference:
+ this->writeVariableReference(expr.as<VariableReference>());
+ break;
+ case Expression::Kind::kTernary:
+ this->writeTernaryExpression(expr.as<TernaryExpression>(), parentPrecedence);
+ break;
+ case Expression::Kind::kIndex:
+ this->writeIndexExpression(expr.as<IndexExpression>());
+ break;
+ default:
+ SkDEBUGFAILF("unsupported expression: %s", expr.description().c_str());
+ break;
+ }
+}
+
+// returns true if we should pass by reference instead of by value
+static bool pass_by_reference(const Type& type, const Modifiers& modifiers) {
+ return (modifiers.fFlags & Modifiers::kOut_Flag) && !type.isUnsizedArray();
+}
+
+// returns true if we need to specify an address space modifier
+static bool needs_address_space(const Type& type, const Modifiers& modifiers) {
+ return type.isUnsizedArray() || pass_by_reference(type, modifiers);
+}
+
+// returns true if the InterfaceBlock has the `buffer` modifier
+static bool is_buffer(const InterfaceBlock& block) {
+ return block.var()->modifiers().fFlags & Modifiers::kBuffer_Flag;
+}
+
+// returns true if the InterfaceBlock has the `readonly` modifier
+static bool is_readonly(const InterfaceBlock& block) {
+ return block.var()->modifiers().fFlags & Modifiers::kReadOnly_Flag;
+}
+
+std::string MetalCodeGenerator::getOutParamHelper(const FunctionCall& call,
+ const ExpressionArray& arguments,
+ const SkTArray<VariableReference*>& outVars) {
+ // It's possible for out-param function arguments to contain an out-param function call
+ // expression. Emit the function into a temporary stream to prevent the nested helper from
+ // clobbering the current helper as we recursively evaluate argument expressions.
+ StringStream tmpStream;
+ AutoOutputStream outputToExtraFunctions(this, &tmpStream, &fIndentation);
+
+ const FunctionDeclaration& function = call.function();
+
+ std::string name = "_skOutParamHelper" + std::to_string(fSwizzleHelperCount++) +
+ "_" + function.mangledName();
+ const char* separator = "";
+
+ // Emit a prototype for the function we'll be calling through to in our helper.
+ if (!function.isBuiltin()) {
+ this->writeFunctionDeclaration(function);
+ this->writeLine(";");
+ }
+
+ // Synthesize a helper function that takes the same inputs as `function`, except in places where
+ // `outVars` is non-null; in those places, we take the type of the VariableReference.
+ //
+ // float _skOutParamHelper0_originalFuncName(float _var0, float _var1, float& outParam) {
+ this->writeType(call.type());
+ this->write(" ");
+ this->write(name);
+ this->write("(");
+ this->writeFunctionRequirementParams(function, separator);
+
+ SkASSERT(outVars.size() == arguments.size());
+ SkASSERT(SkToSizeT(outVars.size()) == function.parameters().size());
+
+ // We need to detect cases where the caller passes the same variable as an out-param more than
+ // once, and avoid reusing the variable name. (In those cases we can actually just ignore the
+ // redundant input parameter entirely, and not give it any name.)
+ SkTHashSet<const Variable*> writtenVars;
+
+ for (int index = 0; index < arguments.size(); ++index) {
+ this->write(separator);
+ separator = ", ";
+
+ const Variable* param = function.parameters()[index];
+ this->writeModifiers(param->modifiers());
+
+ const Type* type = outVars[index] ? &outVars[index]->type() : &arguments[index]->type();
+ this->writeType(*type);
+
+ if (pass_by_reference(param->type(), param->modifiers())) {
+ this->write("&");
+ }
+ if (outVars[index]) {
+ const Variable* var = outVars[index]->variable();
+ if (!writtenVars.contains(var)) {
+ writtenVars.add(var);
+
+ this->write(" ");
+ fIgnoreVariableReferenceModifiers = true;
+ this->writeVariableReference(*outVars[index]);
+ fIgnoreVariableReferenceModifiers = false;
+ }
+ } else {
+ this->write(" _var");
+ this->write(std::to_string(index));
+ }
+ }
+ this->writeLine(") {");
+
+ ++fIndentation;
+ for (int index = 0; index < outVars.size(); ++index) {
+ if (!outVars[index]) {
+ continue;
+ }
+ // float3 _var2[ = outParam.zyx];
+ this->writeType(arguments[index]->type());
+ this->write(" _var");
+ this->write(std::to_string(index));
+
+ const Variable* param = function.parameters()[index];
+ if (param->modifiers().fFlags & Modifiers::kIn_Flag) {
+ this->write(" = ");
+ fIgnoreVariableReferenceModifiers = true;
+ this->writeExpression(*arguments[index], Precedence::kAssignment);
+ fIgnoreVariableReferenceModifiers = false;
+ }
+
+ this->writeLine(";");
+ }
+
+ // [int _skResult = ] myFunction(inputs, outputs, _globals, _var0, _var1, _var2, _var3);
+ bool hasResult = (call.type().name() != "void");
+ if (hasResult) {
+ this->writeType(call.type());
+ this->write(" _skResult = ");
+ }
+
+ this->writeName(function.mangledName());
+ this->write("(");
+ separator = "";
+ this->writeFunctionRequirementArgs(function, separator);
+
+ for (int index = 0; index < arguments.size(); ++index) {
+ this->write(separator);
+ separator = ", ";
+
+ this->write("_var");
+ this->write(std::to_string(index));
+ }
+ this->writeLine(");");
+
+ for (int index = 0; index < outVars.size(); ++index) {
+ if (!outVars[index]) {
+ continue;
+ }
+ // outParam.zyx = _var2;
+ fIgnoreVariableReferenceModifiers = true;
+ this->writeExpression(*arguments[index], Precedence::kAssignment);
+ fIgnoreVariableReferenceModifiers = false;
+ this->write(" = _var");
+ this->write(std::to_string(index));
+ this->writeLine(";");
+ }
+
+ if (hasResult) {
+ this->writeLine("return _skResult;");
+ }
+
+ --fIndentation;
+ this->writeLine("}");
+
+ // Write the function out to `fExtraFunctions`.
+ write_stringstream(tmpStream, fExtraFunctions);
+
+ return name;
+}
+
+std::string MetalCodeGenerator::getBitcastIntrinsic(const Type& outType) {
+ return "as_type<" + outType.displayName() + ">";
+}
+
+void MetalCodeGenerator::writeFunctionCall(const FunctionCall& c) {
+ const FunctionDeclaration& function = c.function();
+
+ // Many intrinsics need to be rewritten in Metal.
+ if (function.isIntrinsic()) {
+ if (this->writeIntrinsicCall(c, function.intrinsicKind())) {
+ return;
+ }
+ }
+
+ // Determine whether or not we need to emulate GLSL's out-param semantics for Metal using a
+ // helper function. (Specifically, out-parameters in GLSL are only written back to the original
+ // variable at the end of the function call; also, swizzles are supported, whereas Metal doesn't
+ // allow a swizzle to be passed to a `floatN&`.)
+ const ExpressionArray& arguments = c.arguments();
+ const std::vector<Variable*>& parameters = function.parameters();
+ SkASSERT(SkToSizeT(arguments.size()) == parameters.size());
+
+ bool foundOutParam = false;
+ SkSTArray<16, VariableReference*> outVars;
+ outVars.push_back_n(arguments.size(), (VariableReference*)nullptr);
+
+ for (int index = 0; index < arguments.size(); ++index) {
+ // If this is an out parameter...
+ if (parameters[index]->modifiers().fFlags & Modifiers::kOut_Flag) {
+ // Find the expression's inner variable being written to.
+ Analysis::AssignmentInfo info;
+ // Assignability was verified at IRGeneration time, so this should always succeed.
+ SkAssertResult(Analysis::IsAssignable(*arguments[index], &info));
+ outVars[index] = info.fAssignedVar;
+ foundOutParam = true;
+ }
+ }
+
+ if (foundOutParam) {
+ // Out parameters need to be written back to at the end of the function. To do this, we
+ // synthesize a helper function which evaluates the out-param expression into a temporary
+ // variable, calls the original function, then writes the temp var back into the out param
+ // using the original out-param expression. (This lets us support things like swizzles and
+ // array indices.)
+ this->write(getOutParamHelper(c, arguments, outVars));
+ } else {
+ this->write(function.mangledName());
+ }
+
+ this->write("(");
+ const char* separator = "";
+ this->writeFunctionRequirementArgs(function, separator);
+ for (int i = 0; i < arguments.size(); ++i) {
+ this->write(separator);
+ separator = ", ";
+
+ if (outVars[i]) {
+ this->writeExpression(*outVars[i], Precedence::kSequence);
+ } else {
+ this->writeExpression(*arguments[i], Precedence::kSequence);
+ }
+ }
+ this->write(")");
+}
+
+static constexpr char kInverse2x2[] = R"(
+template <typename T>
+matrix<T, 2, 2> mat2_inverse(matrix<T, 2, 2> m) {
+return matrix<T, 2, 2>(m[1].y, -m[0].y, -m[1].x, m[0].x) * (1/determinant(m));
+}
+)";
+
+static constexpr char kInverse3x3[] = R"(
+template <typename T>
+matrix<T, 3, 3> mat3_inverse(matrix<T, 3, 3> m) {
+T
+ a00 = m[0].x, a01 = m[0].y, a02 = m[0].z,
+ a10 = m[1].x, a11 = m[1].y, a12 = m[1].z,
+ a20 = m[2].x, a21 = m[2].y, a22 = m[2].z,
+ b01 = a22*a11 - a12*a21,
+ b11 = -a22*a10 + a12*a20,
+ b21 = a21*a10 - a11*a20,
+ det = a00*b01 + a01*b11 + a02*b21;
+return matrix<T, 3, 3>(
+ b01, (-a22*a01 + a02*a21), ( a12*a01 - a02*a11),
+ b11, ( a22*a00 - a02*a20), (-a12*a00 + a02*a10),
+ b21, (-a21*a00 + a01*a20), ( a11*a00 - a01*a10)) * (1/det);
+}
+)";
+
+static constexpr char kInverse4x4[] = R"(
+template <typename T>
+matrix<T, 4, 4> mat4_inverse(matrix<T, 4, 4> m) {
+T
+ a00 = m[0].x, a01 = m[0].y, a02 = m[0].z, a03 = m[0].w,
+ a10 = m[1].x, a11 = m[1].y, a12 = m[1].z, a13 = m[1].w,
+ a20 = m[2].x, a21 = m[2].y, a22 = m[2].z, a23 = m[2].w,
+ a30 = m[3].x, a31 = m[3].y, a32 = m[3].z, a33 = m[3].w,
+ b00 = a00*a11 - a01*a10,
+ b01 = a00*a12 - a02*a10,
+ b02 = a00*a13 - a03*a10,
+ b03 = a01*a12 - a02*a11,
+ b04 = a01*a13 - a03*a11,
+ b05 = a02*a13 - a03*a12,
+ b06 = a20*a31 - a21*a30,
+ b07 = a20*a32 - a22*a30,
+ b08 = a20*a33 - a23*a30,
+ b09 = a21*a32 - a22*a31,
+ b10 = a21*a33 - a23*a31,
+ b11 = a22*a33 - a23*a32,
+ det = b00*b11 - b01*b10 + b02*b09 + b03*b08 - b04*b07 + b05*b06;
+return matrix<T, 4, 4>(
+ a11*b11 - a12*b10 + a13*b09,
+ a02*b10 - a01*b11 - a03*b09,
+ a31*b05 - a32*b04 + a33*b03,
+ a22*b04 - a21*b05 - a23*b03,
+ a12*b08 - a10*b11 - a13*b07,
+ a00*b11 - a02*b08 + a03*b07,
+ a32*b02 - a30*b05 - a33*b01,
+ a20*b05 - a22*b02 + a23*b01,
+ a10*b10 - a11*b08 + a13*b06,
+ a01*b08 - a00*b10 - a03*b06,
+ a30*b04 - a31*b02 + a33*b00,
+ a21*b02 - a20*b04 - a23*b00,
+ a11*b07 - a10*b09 - a12*b06,
+ a00*b09 - a01*b07 + a02*b06,
+ a31*b01 - a30*b03 - a32*b00,
+ a20*b03 - a21*b01 + a22*b00) * (1/det);
+}
+)";
+
+std::string MetalCodeGenerator::getInversePolyfill(const ExpressionArray& arguments) {
+ // Only use polyfills for a function taking a single-argument square matrix.
+ SkASSERT(arguments.size() == 1);
+ const Type& type = arguments.front()->type();
+ if (type.isMatrix() && type.rows() == type.columns()) {
+ switch (type.rows()) {
+ case 2:
+ if (!fWrittenInverse2) {
+ fWrittenInverse2 = true;
+ fExtraFunctions.writeText(kInverse2x2);
+ }
+ return "mat2_inverse";
+ case 3:
+ if (!fWrittenInverse3) {
+ fWrittenInverse3 = true;
+ fExtraFunctions.writeText(kInverse3x3);
+ }
+ return "mat3_inverse";
+ case 4:
+ if (!fWrittenInverse4) {
+ fWrittenInverse4 = true;
+ fExtraFunctions.writeText(kInverse4x4);
+ }
+ return "mat4_inverse";
+ }
+ }
+ SkDEBUGFAILF("no polyfill for inverse(%s)", type.description().c_str());
+ return "inverse";
+}
+
+void MetalCodeGenerator::writeMatrixCompMult() {
+ static constexpr char kMatrixCompMult[] = R"(
+template <typename T, int C, int R>
+matrix<T, C, R> matrixCompMult(matrix<T, C, R> a, const matrix<T, C, R> b) {
+ for (int c = 0; c < C; ++c) { a[c] *= b[c]; }
+ return a;
+}
+)";
+ if (!fWrittenMatrixCompMult) {
+ fWrittenMatrixCompMult = true;
+ fExtraFunctions.writeText(kMatrixCompMult);
+ }
+}
+
+void MetalCodeGenerator::writeOuterProduct() {
+ static constexpr char kOuterProduct[] = R"(
+template <typename T, int C, int R>
+matrix<T, C, R> outerProduct(const vec<T, R> a, const vec<T, C> b) {
+ matrix<T, C, R> m;
+ for (int c = 0; c < C; ++c) { m[c] = a * b[c]; }
+ return m;
+}
+)";
+ if (!fWrittenOuterProduct) {
+ fWrittenOuterProduct = true;
+ fExtraFunctions.writeText(kOuterProduct);
+ }
+}
+
+std::string MetalCodeGenerator::getTempVariable(const Type& type) {
+ std::string tempVar = "_skTemp" + std::to_string(fVarCount++);
+ this->fFunctionHeader += " " + this->typeName(type) + " " + tempVar + ";\n";
+ return tempVar;
+}
+
+void MetalCodeGenerator::writeSimpleIntrinsic(const FunctionCall& c) {
+ // Write out an intrinsic function call exactly as-is. No muss no fuss.
+ this->write(c.function().name());
+ this->writeArgumentList(c.arguments());
+}
+
+void MetalCodeGenerator::writeArgumentList(const ExpressionArray& arguments) {
+ this->write("(");
+ const char* separator = "";
+ for (const std::unique_ptr<Expression>& arg : arguments) {
+ this->write(separator);
+ separator = ", ";
+ this->writeExpression(*arg, Precedence::kSequence);
+ }
+ this->write(")");
+}
+
+bool MetalCodeGenerator::writeIntrinsicCall(const FunctionCall& c, IntrinsicKind kind) {
+ const ExpressionArray& arguments = c.arguments();
+ switch (kind) {
+ case k_read_IntrinsicKind: {
+ this->writeExpression(*arguments[0], Precedence::kTopLevel);
+ this->write(".read(");
+ this->writeExpression(*arguments[1], Precedence::kSequence);
+ this->write(")");
+ return true;
+ }
+ case k_write_IntrinsicKind: {
+ this->writeExpression(*arguments[0], Precedence::kTopLevel);
+ this->write(".write(");
+ this->writeExpression(*arguments[2], Precedence::kSequence);
+ this->write(", ");
+ this->writeExpression(*arguments[1], Precedence::kSequence);
+ this->write(")");
+ return true;
+ }
+ case k_width_IntrinsicKind: {
+ this->writeExpression(*arguments[0], Precedence::kTopLevel);
+ this->write(".get_width()");
+ return true;
+ }
+ case k_height_IntrinsicKind: {
+ this->writeExpression(*arguments[0], Precedence::kTopLevel);
+ this->write(".get_height()");
+ return true;
+ }
+ case k_mod_IntrinsicKind: {
+ // fmod(x, y) in metal calculates x - y * trunc(x / y) instead of x - y * floor(x / y)
+ std::string tmpX = this->getTempVariable(arguments[0]->type());
+ std::string tmpY = this->getTempVariable(arguments[1]->type());
+ this->write("(" + tmpX + " = ");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write(", " + tmpY + " = ");
+ this->writeExpression(*arguments[1], Precedence::kSequence);
+ this->write(", " + tmpX + " - " + tmpY + " * floor(" + tmpX + " / " + tmpY + "))");
+ return true;
+ }
+ // GLSL declares scalar versions of most geometric intrinsics, but these don't exist in MSL
+ case k_distance_IntrinsicKind: {
+ if (arguments[0]->type().columns() == 1) {
+ this->write("abs(");
+ this->writeExpression(*arguments[0], Precedence::kAdditive);
+ this->write(" - ");
+ this->writeExpression(*arguments[1], Precedence::kAdditive);
+ this->write(")");
+ } else {
+ this->writeSimpleIntrinsic(c);
+ }
+ return true;
+ }
+ case k_dot_IntrinsicKind: {
+ if (arguments[0]->type().columns() == 1) {
+ this->write("(");
+ this->writeExpression(*arguments[0], Precedence::kMultiplicative);
+ this->write(" * ");
+ this->writeExpression(*arguments[1], Precedence::kMultiplicative);
+ this->write(")");
+ } else {
+ this->writeSimpleIntrinsic(c);
+ }
+ return true;
+ }
+ case k_faceforward_IntrinsicKind: {
+ if (arguments[0]->type().columns() == 1) {
+ // ((((Nref) * (I) < 0) ? 1 : -1) * (N))
+ this->write("((((");
+ this->writeExpression(*arguments[2], Precedence::kSequence);
+ this->write(") * (");
+ this->writeExpression(*arguments[1], Precedence::kSequence);
+ this->write(") < 0) ? 1 : -1) * (");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write("))");
+ } else {
+ this->writeSimpleIntrinsic(c);
+ }
+ return true;
+ }
+ case k_length_IntrinsicKind: {
+ this->write(arguments[0]->type().columns() == 1 ? "abs(" : "length(");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write(")");
+ return true;
+ }
+ case k_normalize_IntrinsicKind: {
+ this->write(arguments[0]->type().columns() == 1 ? "sign(" : "normalize(");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write(")");
+ return true;
+ }
+ case k_packUnorm2x16_IntrinsicKind: {
+ this->write("pack_float_to_unorm2x16(");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write(")");
+ return true;
+ }
+ case k_unpackUnorm2x16_IntrinsicKind: {
+ this->write("unpack_unorm2x16_to_float(");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write(")");
+ return true;
+ }
+ case k_packSnorm2x16_IntrinsicKind: {
+ this->write("pack_float_to_snorm2x16(");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write(")");
+ return true;
+ }
+ case k_unpackSnorm2x16_IntrinsicKind: {
+ this->write("unpack_snorm2x16_to_float(");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write(")");
+ return true;
+ }
+ case k_packUnorm4x8_IntrinsicKind: {
+ this->write("pack_float_to_unorm4x8(");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write(")");
+ return true;
+ }
+ case k_unpackUnorm4x8_IntrinsicKind: {
+ this->write("unpack_unorm4x8_to_float(");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write(")");
+ return true;
+ }
+ case k_packSnorm4x8_IntrinsicKind: {
+ this->write("pack_float_to_snorm4x8(");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write(")");
+ return true;
+ }
+ case k_unpackSnorm4x8_IntrinsicKind: {
+ this->write("unpack_snorm4x8_to_float(");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write(")");
+ return true;
+ }
+ case k_packHalf2x16_IntrinsicKind: {
+ this->write("as_type<uint>(half2(");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write("))");
+ return true;
+ }
+ case k_unpackHalf2x16_IntrinsicKind: {
+ this->write("float2(as_type<half2>(");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write("))");
+ return true;
+ }
+ case k_floatBitsToInt_IntrinsicKind:
+ case k_floatBitsToUint_IntrinsicKind:
+ case k_intBitsToFloat_IntrinsicKind:
+ case k_uintBitsToFloat_IntrinsicKind: {
+ this->write(this->getBitcastIntrinsic(c.type()));
+ this->write("(");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write(")");
+ return true;
+ }
+ case k_degrees_IntrinsicKind: {
+ this->write("((");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write(") * 57.2957795)");
+ return true;
+ }
+ case k_radians_IntrinsicKind: {
+ this->write("((");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write(") * 0.0174532925)");
+ return true;
+ }
+ case k_dFdx_IntrinsicKind: {
+ this->write("dfdx");
+ this->writeArgumentList(c.arguments());
+ return true;
+ }
+ case k_dFdy_IntrinsicKind: {
+ if (!fRTFlipName.empty()) {
+ this->write("(" + fRTFlipName + ".y * dfdy");
+ } else {
+ this->write("(dfdy");
+ }
+ this->writeArgumentList(c.arguments());
+ this->write(")");
+ return true;
+ }
+ case k_inverse_IntrinsicKind: {
+ this->write(this->getInversePolyfill(arguments));
+ this->writeArgumentList(c.arguments());
+ return true;
+ }
+ case k_inversesqrt_IntrinsicKind: {
+ this->write("rsqrt");
+ this->writeArgumentList(c.arguments());
+ return true;
+ }
+ case k_atan_IntrinsicKind: {
+ this->write(c.arguments().size() == 2 ? "atan2" : "atan");
+ this->writeArgumentList(c.arguments());
+ return true;
+ }
+ case k_reflect_IntrinsicKind: {
+ if (arguments[0]->type().columns() == 1) {
+ // We need to synthesize `I - 2 * N * I * N`.
+ std::string tmpI = this->getTempVariable(arguments[0]->type());
+ std::string tmpN = this->getTempVariable(arguments[1]->type());
+
+ // (_skTempI = ...
+ this->write("(" + tmpI + " = ");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+
+ // , _skTempN = ...
+ this->write(", " + tmpN + " = ");
+ this->writeExpression(*arguments[1], Precedence::kSequence);
+
+ // , _skTempI - 2 * _skTempN * _skTempI * _skTempN)
+ this->write(", " + tmpI + " - 2 * " + tmpN + " * " + tmpI + " * " + tmpN + ")");
+ } else {
+ this->writeSimpleIntrinsic(c);
+ }
+ return true;
+ }
+ case k_refract_IntrinsicKind: {
+ if (arguments[0]->type().columns() == 1) {
+ // Metal does implement refract for vectors; rather than reimplementing refract from
+ // scratch, we can replace the call with `refract(float2(I,0), float2(N,0), eta).x`.
+ this->write("(refract(float2(");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write(", 0), float2(");
+ this->writeExpression(*arguments[1], Precedence::kSequence);
+ this->write(", 0), ");
+ this->writeExpression(*arguments[2], Precedence::kSequence);
+ this->write(").x)");
+ } else {
+ this->writeSimpleIntrinsic(c);
+ }
+ return true;
+ }
+ case k_roundEven_IntrinsicKind: {
+ this->write("rint");
+ this->writeArgumentList(c.arguments());
+ return true;
+ }
+ case k_bitCount_IntrinsicKind: {
+ this->write("popcount(");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write(")");
+ return true;
+ }
+ case k_findLSB_IntrinsicKind: {
+ // Create a temp variable to store the expression, to avoid double-evaluating it.
+ std::string skTemp = this->getTempVariable(arguments[0]->type());
+ std::string exprType = this->typeName(arguments[0]->type());
+
+ // ctz returns numbits(type) on zero inputs; GLSL documents it as generating -1 instead.
+ // Use select to detect zero inputs and force a -1 result.
+
+ // (_skTemp1 = (.....), select(ctz(_skTemp1), int4(-1), _skTemp1 == int4(0)))
+ this->write("(");
+ this->write(skTemp);
+ this->write(" = (");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write("), select(ctz(");
+ this->write(skTemp);
+ this->write("), ");
+ this->write(exprType);
+ this->write("(-1), ");
+ this->write(skTemp);
+ this->write(" == ");
+ this->write(exprType);
+ this->write("(0)))");
+ return true;
+ }
+ case k_findMSB_IntrinsicKind: {
+ // Create a temp variable to store the expression, to avoid double-evaluating it.
+ std::string skTemp1 = this->getTempVariable(arguments[0]->type());
+ std::string exprType = this->typeName(arguments[0]->type());
+
+ // GLSL findMSB is actually quite different from Metal's clz:
+ // - For signed negative numbers, it returns the first zero bit, not the first one bit!
+ // - For an empty input (0/~0 depending on sign), findMSB gives -1; clz is numbits(type)
+
+ // (_skTemp1 = (.....),
+ this->write("(");
+ this->write(skTemp1);
+ this->write(" = (");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write("), ");
+
+ // Signed input types might be negative; we need another helper variable to negate the
+ // input (since we can only find one bits, not zero bits).
+ std::string skTemp2;
+ if (arguments[0]->type().isSigned()) {
+ // ... _skTemp2 = (select(_skTemp1, ~_skTemp1, _skTemp1 < 0)),
+ skTemp2 = this->getTempVariable(arguments[0]->type());
+ this->write(skTemp2);
+ this->write(" = (select(");
+ this->write(skTemp1);
+ this->write(", ~");
+ this->write(skTemp1);
+ this->write(", ");
+ this->write(skTemp1);
+ this->write(" < 0)), ");
+ } else {
+ skTemp2 = skTemp1;
+ }
+
+ // ... select(int4(clz(_skTemp2)), int4(-1), _skTemp2 == int4(0)))
+ this->write("select(");
+ this->write(this->typeName(c.type()));
+ this->write("(clz(");
+ this->write(skTemp2);
+ this->write(")), ");
+ this->write(this->typeName(c.type()));
+ this->write("(-1), ");
+ this->write(skTemp2);
+ this->write(" == ");
+ this->write(exprType);
+ this->write("(0)))");
+ return true;
+ }
+ case k_sign_IntrinsicKind: {
+ if (arguments[0]->type().componentType().isInteger()) {
+ // Create a temp variable to store the expression, to avoid double-evaluating it.
+ std::string skTemp = this->getTempVariable(arguments[0]->type());
+ std::string exprType = this->typeName(arguments[0]->type());
+
+ // (_skTemp = (.....),
+ this->write("(");
+ this->write(skTemp);
+ this->write(" = (");
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+ this->write("), ");
+
+ // ... select(select(int4(0), int4(-1), _skTemp < 0), int4(1), _skTemp > 0))
+ this->write("select(select(");
+ this->write(exprType);
+ this->write("(0), ");
+ this->write(exprType);
+ this->write("(-1), ");
+ this->write(skTemp);
+ this->write(" < 0), ");
+ this->write(exprType);
+ this->write("(1), ");
+ this->write(skTemp);
+ this->write(" > 0))");
+ } else {
+ this->writeSimpleIntrinsic(c);
+ }
+ return true;
+ }
+ case k_matrixCompMult_IntrinsicKind: {
+ this->writeMatrixCompMult();
+ this->writeSimpleIntrinsic(c);
+ return true;
+ }
+ case k_outerProduct_IntrinsicKind: {
+ this->writeOuterProduct();
+ this->writeSimpleIntrinsic(c);
+ return true;
+ }
+ case k_mix_IntrinsicKind: {
+ SkASSERT(c.arguments().size() == 3);
+ if (arguments[2]->type().componentType().isBoolean()) {
+ // The Boolean forms of GLSL mix() use the select() intrinsic in Metal.
+ this->write("select");
+ this->writeArgumentList(c.arguments());
+ return true;
+ }
+ // The basic form of mix() is supported by Metal as-is.
+ this->writeSimpleIntrinsic(c);
+ return true;
+ }
+ case k_equal_IntrinsicKind:
+ case k_greaterThan_IntrinsicKind:
+ case k_greaterThanEqual_IntrinsicKind:
+ case k_lessThan_IntrinsicKind:
+ case k_lessThanEqual_IntrinsicKind:
+ case k_notEqual_IntrinsicKind: {
+ this->write("(");
+ this->writeExpression(*c.arguments()[0], Precedence::kRelational);
+ switch (kind) {
+ case k_equal_IntrinsicKind:
+ this->write(" == ");
+ break;
+ case k_notEqual_IntrinsicKind:
+ this->write(" != ");
+ break;
+ case k_lessThan_IntrinsicKind:
+ this->write(" < ");
+ break;
+ case k_lessThanEqual_IntrinsicKind:
+ this->write(" <= ");
+ break;
+ case k_greaterThan_IntrinsicKind:
+ this->write(" > ");
+ break;
+ case k_greaterThanEqual_IntrinsicKind:
+ this->write(" >= ");
+ break;
+ default:
+ SK_ABORT("unsupported comparison intrinsic kind");
+ }
+ this->writeExpression(*c.arguments()[1], Precedence::kRelational);
+ this->write(")");
+ return true;
+ }
+ case k_storageBarrier_IntrinsicKind:
+ this->write("threadgroup_barrier(mem_flags::mem_device)");
+ return true;
+ case k_workgroupBarrier_IntrinsicKind:
+ this->write("threadgroup_barrier(mem_flags::mem_threadgroup)");
+ return true;
+ case k_atomicAdd_IntrinsicKind:
+ this->write("atomic_fetch_add_explicit(&");
+ this->writeExpression(*c.arguments()[0], Precedence::kSequence);
+ this->write(", ");
+ this->writeExpression(*c.arguments()[1], Precedence::kSequence);
+ this->write(", memory_order_relaxed)");
+ return true;
+ case k_atomicLoad_IntrinsicKind:
+ this->write("atomic_load_explicit(&");
+ this->writeExpression(*c.arguments()[0], Precedence::kSequence);
+ this->write(", memory_order_relaxed)");
+ return true;
+ case k_atomicStore_IntrinsicKind:
+ this->write("atomic_store_explicit(&");
+ this->writeExpression(*c.arguments()[0], Precedence::kSequence);
+ this->write(", ");
+ this->writeExpression(*c.arguments()[1], Precedence::kSequence);
+ this->write(", memory_order_relaxed)");
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Assembles a matrix of type floatRxC by resizing another matrix named `x0`.
+// Cells that don't exist in the source matrix will be populated with identity-matrix values.
+void MetalCodeGenerator::assembleMatrixFromMatrix(const Type& sourceMatrix, int rows, int columns) {
+ SkASSERT(rows <= 4);
+ SkASSERT(columns <= 4);
+
+ std::string matrixType = this->typeName(sourceMatrix.componentType());
+
+ const char* separator = "";
+ for (int c = 0; c < columns; ++c) {
+ fExtraFunctions.printf("%s%s%d(", separator, matrixType.c_str(), rows);
+ separator = "), ";
+
+ // Determine how many values to take from the source matrix for this row.
+ int swizzleLength = 0;
+ if (c < sourceMatrix.columns()) {
+ swizzleLength = std::min<>(rows, sourceMatrix.rows());
+ }
+
+ // Emit all the values from the source matrix row.
+ bool firstItem;
+ switch (swizzleLength) {
+ case 0: firstItem = true; break;
+ case 1: firstItem = false; fExtraFunctions.printf("x0[%d].x", c); break;
+ case 2: firstItem = false; fExtraFunctions.printf("x0[%d].xy", c); break;
+ case 3: firstItem = false; fExtraFunctions.printf("x0[%d].xyz", c); break;
+ case 4: firstItem = false; fExtraFunctions.printf("x0[%d].xyzw", c); break;
+ default: SkUNREACHABLE;
+ }
+
+ // Emit the placeholder identity-matrix cells.
+ for (int r = swizzleLength; r < rows; ++r) {
+ fExtraFunctions.printf("%s%s", firstItem ? "" : ", ", (r == c) ? "1.0" : "0.0");
+ firstItem = false;
+ }
+ }
+
+ fExtraFunctions.writeText(")");
+}
+
+// Assembles a matrix of type floatCxR by concatenating an arbitrary mix of values, named `x0`,
+// `x1`, etc. An error is written if the expression list don't contain exactly C*R scalars.
+void MetalCodeGenerator::assembleMatrixFromExpressions(const AnyConstructor& ctor,
+ int columns, int rows) {
+ SkASSERT(rows <= 4);
+ SkASSERT(columns <= 4);
+
+ std::string matrixType = this->typeName(ctor.type().componentType());
+ size_t argIndex = 0;
+ int argPosition = 0;
+ auto args = ctor.argumentSpan();
+
+ static constexpr char kSwizzle[] = "xyzw";
+ const char* separator = "";
+ for (int c = 0; c < columns; ++c) {
+ fExtraFunctions.printf("%s%s%d(", separator, matrixType.c_str(), rows);
+ separator = "), ";
+
+ const char* columnSeparator = "";
+ for (int r = 0; r < rows;) {
+ fExtraFunctions.writeText(columnSeparator);
+ columnSeparator = ", ";
+
+ if (argIndex < args.size()) {
+ const Type& argType = args[argIndex]->type();
+ switch (argType.typeKind()) {
+ case Type::TypeKind::kScalar: {
+ fExtraFunctions.printf("x%zu", argIndex);
+ ++r;
+ ++argPosition;
+ break;
+ }
+ case Type::TypeKind::kVector: {
+ fExtraFunctions.printf("x%zu.", argIndex);
+ do {
+ fExtraFunctions.write8(kSwizzle[argPosition]);
+ ++r;
+ ++argPosition;
+ } while (r < rows && argPosition < argType.columns());
+ break;
+ }
+ case Type::TypeKind::kMatrix: {
+ fExtraFunctions.printf("x%zu[%d].", argIndex, argPosition / argType.rows());
+ do {
+ fExtraFunctions.write8(kSwizzle[argPosition]);
+ ++r;
+ ++argPosition;
+ } while (r < rows && (argPosition % argType.rows()) != 0);
+ break;
+ }
+ default: {
+ SkDEBUGFAIL("incorrect type of argument for matrix constructor");
+ fExtraFunctions.writeText("<error>");
+ break;
+ }
+ }
+
+ if (argPosition >= argType.columns() * argType.rows()) {
+ ++argIndex;
+ argPosition = 0;
+ }
+ } else {
+ SkDEBUGFAIL("not enough arguments for matrix constructor");
+ fExtraFunctions.writeText("<error>");
+ }
+ }
+ }
+
+ if (argPosition != 0 || argIndex != args.size()) {
+ SkDEBUGFAIL("incorrect number of arguments for matrix constructor");
+ fExtraFunctions.writeText(", <error>");
+ }
+
+ fExtraFunctions.writeText(")");
+}
+
+// Generates a constructor for 'matrix' which reorganizes the input arguments into the proper shape.
+// Keeps track of previously generated constructors so that we won't generate more than one
+// constructor for any given permutation of input argument types. Returns the name of the
+// generated constructor method.
+std::string MetalCodeGenerator::getMatrixConstructHelper(const AnyConstructor& c) {
+ const Type& type = c.type();
+ int columns = type.columns();
+ int rows = type.rows();
+ auto args = c.argumentSpan();
+ std::string typeName = this->typeName(type);
+
+ // Create the helper-method name and use it as our lookup key.
+ std::string name = String::printf("%s_from", typeName.c_str());
+ for (const std::unique_ptr<Expression>& expr : args) {
+ String::appendf(&name, "_%s", this->typeName(expr->type()).c_str());
+ }
+
+ // If a helper-method has not been synthesized yet, create it now.
+ if (!fHelpers.contains(name)) {
+ fHelpers.add(name);
+
+ // Unlike GLSL, Metal requires that matrices are initialized with exactly R vectors of C
+ // components apiece. (In Metal 2.0, you can also supply R*C scalars, but you still cannot
+ // supply a mixture of scalars and vectors.)
+ fExtraFunctions.printf("%s %s(", typeName.c_str(), name.c_str());
+
+ size_t argIndex = 0;
+ const char* argSeparator = "";
+ for (const std::unique_ptr<Expression>& expr : args) {
+ fExtraFunctions.printf("%s%s x%zu", argSeparator,
+ this->typeName(expr->type()).c_str(), argIndex++);
+ argSeparator = ", ";
+ }
+
+ fExtraFunctions.printf(") {\n return %s(", typeName.c_str());
+
+ if (args.size() == 1 && args.front()->type().isMatrix()) {
+ this->assembleMatrixFromMatrix(args.front()->type(), rows, columns);
+ } else {
+ this->assembleMatrixFromExpressions(c, columns, rows);
+ }
+
+ fExtraFunctions.writeText(");\n}\n");
+ }
+ return name;
+}
+
+bool MetalCodeGenerator::matrixConstructHelperIsNeeded(const ConstructorCompound& c) {
+ SkASSERT(c.type().isMatrix());
+
+ // GLSL is fairly free-form about inputs to its matrix constructors, but Metal is not; it
+ // expects exactly R vectors of C components apiece. (Metal 2.0 also allows a list of R*C
+ // scalars.) Some cases are simple to translate and so we handle those inline--e.g. a list of
+ // scalars can be constructed trivially. In more complex cases, we generate a helper function
+ // that converts our inputs into a properly-shaped matrix.
+ // A matrix construct helper method is always used if any input argument is a matrix.
+ // Helper methods are also necessary when any argument would span multiple rows. For instance:
+ //
+ // float2 x = (1, 2);
+ // float3x2(x, 3, 4, 5, 6) = | 1 3 5 | = no helper needed; conversion can be done inline
+ // | 2 4 6 |
+ //
+ // float2 x = (2, 3);
+ // float3x2(1, x, 4, 5, 6) = | 1 3 5 | = x spans multiple rows; a helper method will be used
+ // | 2 4 6 |
+ //
+ // float4 x = (1, 2, 3, 4);
+ // float2x2(x) = | 1 3 | = x spans multiple rows; a helper method will be used
+ // | 2 4 |
+ //
+
+ int position = 0;
+ for (const std::unique_ptr<Expression>& expr : c.arguments()) {
+ // If an input argument is a matrix, we need a helper function.
+ if (expr->type().isMatrix()) {
+ return true;
+ }
+ position += expr->type().columns();
+ if (position > c.type().rows()) {
+ // An input argument would span multiple rows; a helper function is required.
+ return true;
+ }
+ if (position == c.type().rows()) {
+ // We've advanced to the end of a row. Wrap to the start of the next row.
+ position = 0;
+ }
+ }
+
+ return false;
+}
+
+void MetalCodeGenerator::writeConstructorMatrixResize(const ConstructorMatrixResize& c,
+ Precedence parentPrecedence) {
+ // Matrix-resize via casting doesn't natively exist in Metal at all, so we always need to use a
+ // matrix-construct helper here.
+ this->write(this->getMatrixConstructHelper(c));
+ this->write("(");
+ this->writeExpression(*c.argument(), Precedence::kSequence);
+ this->write(")");
+}
+
+void MetalCodeGenerator::writeConstructorCompound(const ConstructorCompound& c,
+ Precedence parentPrecedence) {
+ if (c.type().isVector()) {
+ this->writeConstructorCompoundVector(c, parentPrecedence);
+ } else if (c.type().isMatrix()) {
+ this->writeConstructorCompoundMatrix(c, parentPrecedence);
+ } else {
+ fContext.fErrors->error(c.fPosition, "unsupported compound constructor");
+ }
+}
+
+void MetalCodeGenerator::writeConstructorArrayCast(const ConstructorArrayCast& c,
+ Precedence parentPrecedence) {
+ const Type& inType = c.argument()->type().componentType();
+ const Type& outType = c.type().componentType();
+ std::string inTypeName = this->typeName(inType);
+ std::string outTypeName = this->typeName(outType);
+
+ std::string name = "array_of_" + outTypeName + "_from_" + inTypeName;
+ if (!fHelpers.contains(name)) {
+ fHelpers.add(name);
+ fExtraFunctions.printf(R"(
+template <size_t N>
+array<%s, N> %s(thread const array<%s, N>& x) {
+ array<%s, N> result;
+ for (int i = 0; i < N; ++i) {
+ result[i] = %s(x[i]);
+ }
+ return result;
+}
+)",
+ outTypeName.c_str(), name.c_str(), inTypeName.c_str(),
+ outTypeName.c_str(),
+ outTypeName.c_str());
+ }
+
+ this->write(name);
+ this->write("(");
+ this->writeExpression(*c.argument(), Precedence::kSequence);
+ this->write(")");
+}
+
+std::string MetalCodeGenerator::getVectorFromMat2x2ConstructorHelper(const Type& matrixType) {
+ SkASSERT(matrixType.isMatrix());
+ SkASSERT(matrixType.rows() == 2);
+ SkASSERT(matrixType.columns() == 2);
+
+ std::string baseType = this->typeName(matrixType.componentType());
+ std::string name = String::printf("%s4_from_%s2x2", baseType.c_str(), baseType.c_str());
+ if (!fHelpers.contains(name)) {
+ fHelpers.add(name);
+
+ fExtraFunctions.printf(R"(
+%s4 %s(%s2x2 x) {
+ return %s4(x[0].xy, x[1].xy);
+}
+)", baseType.c_str(), name.c_str(), baseType.c_str(), baseType.c_str());
+ }
+
+ return name;
+}
+
+void MetalCodeGenerator::writeConstructorCompoundVector(const ConstructorCompound& c,
+ Precedence parentPrecedence) {
+ SkASSERT(c.type().isVector());
+
+ // Metal supports constructing vectors from a mix of scalars and vectors, but not matrices.
+ // GLSL supports vec4(mat2x2), so we detect that case here and emit a helper function.
+ if (c.type().columns() == 4 && c.argumentSpan().size() == 1) {
+ const Expression& expr = *c.argumentSpan().front();
+ if (expr.type().isMatrix()) {
+ this->write(this->getVectorFromMat2x2ConstructorHelper(expr.type()));
+ this->write("(");
+ this->writeExpression(expr, Precedence::kSequence);
+ this->write(")");
+ return;
+ }
+ }
+
+ this->writeAnyConstructor(c, "(", ")", parentPrecedence);
+}
+
+void MetalCodeGenerator::writeConstructorCompoundMatrix(const ConstructorCompound& c,
+ Precedence parentPrecedence) {
+ SkASSERT(c.type().isMatrix());
+
+ // Emit and invoke a matrix-constructor helper method if one is necessary.
+ if (this->matrixConstructHelperIsNeeded(c)) {
+ this->write(this->getMatrixConstructHelper(c));
+ this->write("(");
+ const char* separator = "";
+ for (const std::unique_ptr<Expression>& expr : c.arguments()) {
+ this->write(separator);
+ separator = ", ";
+ this->writeExpression(*expr, Precedence::kSequence);
+ }
+ this->write(")");
+ return;
+ }
+
+ // Metal doesn't allow creating matrices by passing in scalars and vectors in a jumble; it
+ // requires your scalars to be grouped up into columns. Because `matrixConstructHelperIsNeeded`
+ // returned false, we know that none of our scalars/vectors "wrap" across across a column, so we
+ // can group our inputs up and synthesize a constructor for each column.
+ const Type& matrixType = c.type();
+ const Type& columnType = matrixType.componentType().toCompound(
+ fContext, /*columns=*/matrixType.rows(), /*rows=*/1);
+
+ this->writeType(matrixType);
+ this->write("(");
+ const char* separator = "";
+ int scalarCount = 0;
+ for (const std::unique_ptr<Expression>& arg : c.arguments()) {
+ this->write(separator);
+ separator = ", ";
+ if (arg->type().columns() < matrixType.rows()) {
+ // Write a `floatN(` constructor to group scalars and smaller vectors together.
+ if (!scalarCount) {
+ this->writeType(columnType);
+ this->write("(");
+ }
+ scalarCount += arg->type().columns();
+ }
+ this->writeExpression(*arg, Precedence::kSequence);
+ if (scalarCount && scalarCount == matrixType.rows()) {
+ // Close our `floatN(...` constructor block from above.
+ this->write(")");
+ scalarCount = 0;
+ }
+ }
+ this->write(")");
+}
+
+void MetalCodeGenerator::writeAnyConstructor(const AnyConstructor& c,
+ const char* leftBracket,
+ const char* rightBracket,
+ Precedence parentPrecedence) {
+ this->writeType(c.type());
+ this->write(leftBracket);
+ const char* separator = "";
+ for (const std::unique_ptr<Expression>& arg : c.argumentSpan()) {
+ this->write(separator);
+ separator = ", ";
+ this->writeExpression(*arg, Precedence::kSequence);
+ }
+ this->write(rightBracket);
+}
+
+void MetalCodeGenerator::writeCastConstructor(const AnyConstructor& c,
+ const char* leftBracket,
+ const char* rightBracket,
+ Precedence parentPrecedence) {
+ return this->writeAnyConstructor(c, leftBracket, rightBracket, parentPrecedence);
+}
+
+void MetalCodeGenerator::writeFragCoord() {
+ if (!fRTFlipName.empty()) {
+ this->write("float4(_fragCoord.x, ");
+ this->write(fRTFlipName.c_str());
+ this->write(".x + ");
+ this->write(fRTFlipName.c_str());
+ this->write(".y * _fragCoord.y, 0.0, _fragCoord.w)");
+ } else {
+ this->write("float4(_fragCoord.x, _fragCoord.y, 0.0, _fragCoord.w)");
+ }
+}
+
+static bool is_compute_builtin(const Variable& var) {
+ switch (var.modifiers().fLayout.fBuiltin) {
+ case SK_NUMWORKGROUPS_BUILTIN:
+ case SK_WORKGROUPID_BUILTIN:
+ case SK_LOCALINVOCATIONID_BUILTIN:
+ case SK_GLOBALINVOCATIONID_BUILTIN:
+ case SK_LOCALINVOCATIONINDEX_BUILTIN:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+// true if the var is part of the Inputs struct
+static bool is_input(const Variable& var) {
+ SkASSERT(var.storage() == VariableStorage::kGlobal);
+ return var.modifiers().fFlags & Modifiers::kIn_Flag &&
+ (var.modifiers().fLayout.fBuiltin == -1 || is_compute_builtin(var)) &&
+ var.type().typeKind() != Type::TypeKind::kTexture;
+}
+
+// true if the var is part of the Outputs struct
+static bool is_output(const Variable& var) {
+ SkASSERT(var.storage() == VariableStorage::kGlobal);
+ // inout vars get written into the Inputs struct, so we exclude them from Outputs
+ return (var.modifiers().fFlags & Modifiers::kOut_Flag) &&
+ !(var.modifiers().fFlags & Modifiers::kIn_Flag) &&
+ var.modifiers().fLayout.fBuiltin == -1 &&
+ var.type().typeKind() != Type::TypeKind::kTexture;
+}
+
+// true if the var is part of the Uniforms struct
+static bool is_uniforms(const Variable& var) {
+ SkASSERT(var.storage() == VariableStorage::kGlobal);
+ return var.modifiers().fFlags & Modifiers::kUniform_Flag &&
+ var.type().typeKind() != Type::TypeKind::kSampler;
+}
+
+// true if the var is part of the Threadgroups struct
+static bool is_threadgroup(const Variable& var) {
+ SkASSERT(var.storage() == VariableStorage::kGlobal);
+ return var.modifiers().fFlags & Modifiers::kWorkgroup_Flag;
+}
+
+// true if the var is part of the Globals struct
+static bool is_in_globals(const Variable& var) {
+ SkASSERT(var.storage() == VariableStorage::kGlobal);
+ return !(var.modifiers().fFlags & Modifiers::kConst_Flag);
+}
+
+void MetalCodeGenerator::writeVariableReference(const VariableReference& ref) {
+ // When assembling out-param helper functions, we copy variables into local clones with matching
+ // names. We never want to prepend "_in." or "_globals." when writing these variables since
+ // we're actually targeting the clones.
+ if (fIgnoreVariableReferenceModifiers) {
+ this->writeName(ref.variable()->mangledName());
+ return;
+ }
+
+ switch (ref.variable()->modifiers().fLayout.fBuiltin) {
+ case SK_FRAGCOLOR_BUILTIN:
+ this->write("_out.sk_FragColor");
+ break;
+ case SK_FRAGCOORD_BUILTIN:
+ this->writeFragCoord();
+ break;
+ case SK_VERTEXID_BUILTIN:
+ this->write("sk_VertexID");
+ break;
+ case SK_INSTANCEID_BUILTIN:
+ this->write("sk_InstanceID");
+ break;
+ case SK_CLOCKWISE_BUILTIN:
+ // We'd set the front facing winding in the MTLRenderCommandEncoder to be counter
+ // clockwise to match Skia convention.
+ if (!fRTFlipName.empty()) {
+ this->write("(" + fRTFlipName + ".y < 0 ? _frontFacing : !_frontFacing)");
+ } else {
+ this->write("_frontFacing");
+ }
+ break;
+ default:
+ const Variable& var = *ref.variable();
+ if (var.storage() == Variable::Storage::kGlobal) {
+ if (is_input(var)) {
+ this->write("_in.");
+ } else if (is_output(var)) {
+ this->write("_out.");
+ } else if (is_uniforms(var)) {
+ this->write("_uniforms.");
+ } else if (is_threadgroup(var)) {
+ this->write("_threadgroups.");
+ } else if (is_in_globals(var)) {
+ this->write("_globals.");
+ }
+ }
+ this->writeName(var.mangledName());
+ }
+}
+
+void MetalCodeGenerator::writeIndexExpression(const IndexExpression& expr) {
+ // Metal does not seem to handle assignment into `vec.zyx[i]` properly--it compiles, but the
+ // results are wrong. We rewrite the expression as `vec[uint3(2,1,0)[i]]` instead. (Filed with
+ // Apple as FB12055941.)
+ if (expr.base()->is<Swizzle>()) {
+ const Swizzle& swizzle = expr.base()->as<Swizzle>();
+ if (swizzle.components().size() > 1) {
+ this->writeExpression(*swizzle.base(), Precedence::kPostfix);
+ this->write("[uint" + std::to_string(swizzle.components().size()) + "(");
+ auto separator = SkSL::String::Separator();
+ for (int8_t component : swizzle.components()) {
+ this->write(separator());
+ this->write(std::to_string(component));
+ }
+ this->write(")[");
+ this->writeExpression(*expr.index(), Precedence::kTopLevel);
+ this->write("]]");
+ return;
+ }
+ }
+
+ this->writeExpression(*expr.base(), Precedence::kPostfix);
+ this->write("[");
+ this->writeExpression(*expr.index(), Precedence::kTopLevel);
+ this->write("]");
+}
+
+void MetalCodeGenerator::writeFieldAccess(const FieldAccess& f) {
+ const Type::Field* field = &f.base()->type().fields()[f.fieldIndex()];
+ if (FieldAccess::OwnerKind::kDefault == f.ownerKind()) {
+ this->writeExpression(*f.base(), Precedence::kPostfix);
+ this->write(".");
+ }
+ switch (field->fModifiers.fLayout.fBuiltin) {
+ case SK_POSITION_BUILTIN:
+ this->write("_out.sk_Position");
+ break;
+ case SK_POINTSIZE_BUILTIN:
+ this->write("_out.sk_PointSize");
+ break;
+ default:
+ if (FieldAccess::OwnerKind::kAnonymousInterfaceBlock == f.ownerKind()) {
+ this->write("_globals.");
+ this->write(fInterfaceBlockNameMap[fInterfaceBlockMap[field]]);
+ this->write("->");
+ }
+ this->writeName(field->fName);
+ }
+}
+
+void MetalCodeGenerator::writeSwizzle(const Swizzle& swizzle) {
+ this->writeExpression(*swizzle.base(), Precedence::kPostfix);
+ this->write(".");
+ for (int c : swizzle.components()) {
+ SkASSERT(c >= 0 && c <= 3);
+ this->write(&("x\0y\0z\0w\0"[c * 2]));
+ }
+}
+
+void MetalCodeGenerator::writeMatrixTimesEqualHelper(const Type& left, const Type& right,
+ const Type& result) {
+ SkASSERT(left.isMatrix());
+ SkASSERT(right.isMatrix());
+ SkASSERT(result.isMatrix());
+
+ std::string key = "Matrix *= " + this->typeName(left) + ":" + this->typeName(right);
+
+ if (!fHelpers.contains(key)) {
+ fHelpers.add(key);
+ fExtraFunctions.printf("thread %s& operator*=(thread %s& left, thread const %s& right) {\n"
+ " left = left * right;\n"
+ " return left;\n"
+ "}\n",
+ this->typeName(result).c_str(), this->typeName(left).c_str(),
+ this->typeName(right).c_str());
+ }
+}
+
+void MetalCodeGenerator::writeMatrixEqualityHelpers(const Type& left, const Type& right) {
+ SkASSERT(left.isMatrix());
+ SkASSERT(right.isMatrix());
+ SkASSERT(left.rows() == right.rows());
+ SkASSERT(left.columns() == right.columns());
+
+ std::string key = "Matrix == " + this->typeName(left) + ":" + this->typeName(right);
+
+ if (!fHelpers.contains(key)) {
+ fHelpers.add(key);
+ fExtraFunctionPrototypes.printf(R"(
+thread bool operator==(const %s left, const %s right);
+thread bool operator!=(const %s left, const %s right);
+)",
+ this->typeName(left).c_str(),
+ this->typeName(right).c_str(),
+ this->typeName(left).c_str(),
+ this->typeName(right).c_str());
+
+ fExtraFunctions.printf(
+ "thread bool operator==(const %s left, const %s right) {\n"
+ " return ",
+ this->typeName(left).c_str(), this->typeName(right).c_str());
+
+ const char* separator = "";
+ for (int index=0; index<left.columns(); ++index) {
+ fExtraFunctions.printf("%sall(left[%d] == right[%d])", separator, index, index);
+ separator = " &&\n ";
+ }
+
+ fExtraFunctions.printf(
+ ";\n"
+ "}\n"
+ "thread bool operator!=(const %s left, const %s right) {\n"
+ " return !(left == right);\n"
+ "}\n",
+ this->typeName(left).c_str(), this->typeName(right).c_str());
+ }
+}
+
+void MetalCodeGenerator::writeMatrixDivisionHelpers(const Type& type) {
+ SkASSERT(type.isMatrix());
+
+ std::string key = "Matrix / " + this->typeName(type);
+
+ if (!fHelpers.contains(key)) {
+ fHelpers.add(key);
+ std::string typeName = this->typeName(type);
+
+ fExtraFunctions.printf(
+ "thread %s operator/(const %s left, const %s right) {\n"
+ " return %s(",
+ typeName.c_str(), typeName.c_str(), typeName.c_str(), typeName.c_str());
+
+ const char* separator = "";
+ for (int index=0; index<type.columns(); ++index) {
+ fExtraFunctions.printf("%sleft[%d] / right[%d]", separator, index, index);
+ separator = ", ";
+ }
+
+ fExtraFunctions.printf(");\n"
+ "}\n"
+ "thread %s& operator/=(thread %s& left, thread const %s& right) {\n"
+ " left = left / right;\n"
+ " return left;\n"
+ "}\n",
+ typeName.c_str(), typeName.c_str(), typeName.c_str());
+ }
+}
+
+void MetalCodeGenerator::writeArrayEqualityHelpers(const Type& type) {
+ SkASSERT(type.isArray());
+
+ // If the array's component type needs a helper as well, we need to emit that one first.
+ this->writeEqualityHelpers(type.componentType(), type.componentType());
+
+ std::string key = "ArrayEquality []";
+ if (!fHelpers.contains(key)) {
+ fHelpers.add(key);
+ fExtraFunctionPrototypes.writeText(R"(
+template <typename T1, typename T2>
+bool operator==(const array_ref<T1> left, const array_ref<T2> right);
+template <typename T1, typename T2>
+bool operator!=(const array_ref<T1> left, const array_ref<T2> right);
+)");
+ fExtraFunctions.writeText(R"(
+template <typename T1, typename T2>
+bool operator==(const array_ref<T1> left, const array_ref<T2> right) {
+ if (left.size() != right.size()) {
+ return false;
+ }
+ for (size_t index = 0; index < left.size(); ++index) {
+ if (!all(left[index] == right[index])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+template <typename T1, typename T2>
+bool operator!=(const array_ref<T1> left, const array_ref<T2> right) {
+ return !(left == right);
+}
+)");
+ }
+}
+
+void MetalCodeGenerator::writeStructEqualityHelpers(const Type& type) {
+ SkASSERT(type.isStruct());
+ std::string key = "StructEquality " + this->typeName(type);
+
+ if (!fHelpers.contains(key)) {
+ fHelpers.add(key);
+ // If one of the struct's fields needs a helper as well, we need to emit that one first.
+ for (const Type::Field& field : type.fields()) {
+ this->writeEqualityHelpers(*field.fType, *field.fType);
+ }
+
+ // Write operator== and operator!= for this struct, since those are assumed to exist in SkSL
+ // and GLSL but do not exist by default in Metal.
+ fExtraFunctionPrototypes.printf(R"(
+thread bool operator==(thread const %s& left, thread const %s& right);
+thread bool operator!=(thread const %s& left, thread const %s& right);
+)",
+ this->typeName(type).c_str(),
+ this->typeName(type).c_str(),
+ this->typeName(type).c_str(),
+ this->typeName(type).c_str());
+
+ fExtraFunctions.printf(
+ "thread bool operator==(thread const %s& left, thread const %s& right) {\n"
+ " return ",
+ this->typeName(type).c_str(),
+ this->typeName(type).c_str());
+
+ const char* separator = "";
+ for (const Type::Field& field : type.fields()) {
+ if (field.fType->isArray()) {
+ fExtraFunctions.printf(
+ "%s(make_array_ref(left.%.*s) == make_array_ref(right.%.*s))",
+ separator,
+ (int)field.fName.size(), field.fName.data(),
+ (int)field.fName.size(), field.fName.data());
+ } else {
+ fExtraFunctions.printf("%sall(left.%.*s == right.%.*s)",
+ separator,
+ (int)field.fName.size(), field.fName.data(),
+ (int)field.fName.size(), field.fName.data());
+ }
+ separator = " &&\n ";
+ }
+ fExtraFunctions.printf(
+ ";\n"
+ "}\n"
+ "thread bool operator!=(thread const %s& left, thread const %s& right) {\n"
+ " return !(left == right);\n"
+ "}\n",
+ this->typeName(type).c_str(),
+ this->typeName(type).c_str());
+ }
+}
+
+void MetalCodeGenerator::writeEqualityHelpers(const Type& leftType, const Type& rightType) {
+ if (leftType.isArray() && rightType.isArray()) {
+ this->writeArrayEqualityHelpers(leftType);
+ return;
+ }
+ if (leftType.isStruct() && rightType.isStruct()) {
+ this->writeStructEqualityHelpers(leftType);
+ return;
+ }
+ if (leftType.isMatrix() && rightType.isMatrix()) {
+ this->writeMatrixEqualityHelpers(leftType, rightType);
+ return;
+ }
+}
+
+void MetalCodeGenerator::writeNumberAsMatrix(const Expression& expr, const Type& matrixType) {
+ SkASSERT(expr.type().isNumber());
+ SkASSERT(matrixType.isMatrix());
+
+ // Componentwise multiply the scalar against a matrix of the desired size which contains all 1s.
+ this->write("(");
+ this->writeType(matrixType);
+ this->write("(");
+
+ const char* separator = "";
+ for (int index = matrixType.slotCount(); index--;) {
+ this->write(separator);
+ this->write("1.0");
+ separator = ", ";
+ }
+
+ this->write(") * ");
+ this->writeExpression(expr, Precedence::kMultiplicative);
+ this->write(")");
+}
+
+void MetalCodeGenerator::writeBinaryExpressionElement(const Expression& expr,
+ Operator op,
+ const Expression& other,
+ Precedence precedence) {
+ bool needMatrixSplatOnScalar = other.type().isMatrix() && expr.type().isNumber() &&
+ op.isValidForMatrixOrVector() &&
+ op.removeAssignment().kind() != Operator::Kind::STAR;
+ if (needMatrixSplatOnScalar) {
+ this->writeNumberAsMatrix(expr, other.type());
+ } else if (op.isEquality() && expr.type().isArray()) {
+ this->write("make_array_ref(");
+ this->writeExpression(expr, precedence);
+ this->write(")");
+ } else {
+ this->writeExpression(expr, precedence);
+ }
+}
+
+void MetalCodeGenerator::writeBinaryExpression(const BinaryExpression& b,
+ Precedence parentPrecedence) {
+ const Expression& left = *b.left();
+ const Expression& right = *b.right();
+ const Type& leftType = left.type();
+ const Type& rightType = right.type();
+ Operator op = b.getOperator();
+ Precedence precedence = op.getBinaryPrecedence();
+ bool needParens = precedence >= parentPrecedence;
+ switch (op.kind()) {
+ case Operator::Kind::EQEQ:
+ this->writeEqualityHelpers(leftType, rightType);
+ if (leftType.isVector()) {
+ this->write("all");
+ needParens = true;
+ }
+ break;
+ case Operator::Kind::NEQ:
+ this->writeEqualityHelpers(leftType, rightType);
+ if (leftType.isVector()) {
+ this->write("any");
+ needParens = true;
+ }
+ break;
+ default:
+ break;
+ }
+ if (leftType.isMatrix() && rightType.isMatrix() && op.kind() == Operator::Kind::STAREQ) {
+ this->writeMatrixTimesEqualHelper(leftType, rightType, b.type());
+ }
+ if (op.removeAssignment().kind() == Operator::Kind::SLASH &&
+ ((leftType.isMatrix() && rightType.isMatrix()) ||
+ (leftType.isScalar() && rightType.isMatrix()) ||
+ (leftType.isMatrix() && rightType.isScalar()))) {
+ this->writeMatrixDivisionHelpers(leftType.isMatrix() ? leftType : rightType);
+ }
+
+ if (needParens) {
+ this->write("(");
+ }
+
+ this->writeBinaryExpressionElement(left, op, right, precedence);
+
+ if (op.kind() != Operator::Kind::EQ && op.isAssignment() &&
+ left.kind() == Expression::Kind::kSwizzle && !Analysis::HasSideEffects(left)) {
+ // This doesn't compile in Metal:
+ // float4 x = float4(1);
+ // x.xy *= float2x2(...);
+ // with the error message "non-const reference cannot bind to vector element",
+ // but switching it to x.xy = x.xy * float2x2(...) fixes it. We perform this tranformation
+ // as long as the LHS has no side effects, and hope for the best otherwise.
+ this->write(" = ");
+ this->writeExpression(left, Precedence::kAssignment);
+ this->write(operator_name(op.removeAssignment()));
+ precedence = op.removeAssignment().getBinaryPrecedence();
+ } else {
+ this->write(operator_name(op));
+ }
+
+ this->writeBinaryExpressionElement(right, op, left, precedence);
+
+ if (needParens) {
+ this->write(")");
+ }
+}
+
+void MetalCodeGenerator::writeTernaryExpression(const TernaryExpression& t,
+ Precedence parentPrecedence) {
+ if (Precedence::kTernary >= parentPrecedence) {
+ this->write("(");
+ }
+ this->writeExpression(*t.test(), Precedence::kTernary);
+ this->write(" ? ");
+ this->writeExpression(*t.ifTrue(), Precedence::kTernary);
+ this->write(" : ");
+ this->writeExpression(*t.ifFalse(), Precedence::kTernary);
+ if (Precedence::kTernary >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+void MetalCodeGenerator::writePrefixExpression(const PrefixExpression& p,
+ Precedence parentPrecedence) {
+ // According to the MSL specification, the arithmetic unary operators (+ and –) do not act
+ // upon matrix type operands. We treat the unary "+" as NOP for all operands.
+ const Operator op = p.getOperator();
+ if (op.kind() == Operator::Kind::PLUS) {
+ return this->writeExpression(*p.operand(), Precedence::kPrefix);
+ }
+
+ const bool matrixNegation =
+ op.kind() == Operator::Kind::MINUS && p.operand()->type().isMatrix();
+ const bool needParens = Precedence::kPrefix >= parentPrecedence || matrixNegation;
+
+ if (needParens) {
+ this->write("(");
+ }
+
+ // Transform the unary "-" on a matrix type to a multiplication by -1.
+ if (matrixNegation) {
+ this->write("-1.0 * ");
+ } else {
+ this->write(p.getOperator().tightOperatorName());
+ }
+ this->writeExpression(*p.operand(), Precedence::kPrefix);
+
+ if (needParens) {
+ this->write(")");
+ }
+}
+
+void MetalCodeGenerator::writePostfixExpression(const PostfixExpression& p,
+ Precedence parentPrecedence) {
+ if (Precedence::kPostfix >= parentPrecedence) {
+ this->write("(");
+ }
+ this->writeExpression(*p.operand(), Precedence::kPostfix);
+ this->write(p.getOperator().tightOperatorName());
+ if (Precedence::kPostfix >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+void MetalCodeGenerator::writeLiteral(const Literal& l) {
+ const Type& type = l.type();
+ if (type.isFloat()) {
+ this->write(l.description(OperatorPrecedence::kTopLevel));
+ if (!l.type().highPrecision()) {
+ this->write("h");
+ }
+ return;
+ }
+ if (type.isInteger()) {
+ if (type.matches(*fContext.fTypes.fUInt)) {
+ this->write(std::to_string(l.intValue() & 0xffffffff));
+ this->write("u");
+ } else if (type.matches(*fContext.fTypes.fUShort)) {
+ this->write(std::to_string(l.intValue() & 0xffff));
+ this->write("u");
+ } else {
+ this->write(std::to_string(l.intValue()));
+ }
+ return;
+ }
+ SkASSERT(type.isBoolean());
+ this->write(l.description(OperatorPrecedence::kTopLevel));
+}
+
+void MetalCodeGenerator::writeFunctionRequirementArgs(const FunctionDeclaration& f,
+ const char*& separator) {
+ Requirements requirements = this->requirements(f);
+ if (requirements & kInputs_Requirement) {
+ this->write(separator);
+ this->write("_in");
+ separator = ", ";
+ }
+ if (requirements & kOutputs_Requirement) {
+ this->write(separator);
+ this->write("_out");
+ separator = ", ";
+ }
+ if (requirements & kUniforms_Requirement) {
+ this->write(separator);
+ this->write("_uniforms");
+ separator = ", ";
+ }
+ if (requirements & kGlobals_Requirement) {
+ this->write(separator);
+ this->write("_globals");
+ separator = ", ";
+ }
+ if (requirements & kFragCoord_Requirement) {
+ this->write(separator);
+ this->write("_fragCoord");
+ separator = ", ";
+ }
+ if (requirements & kThreadgroups_Requirement) {
+ this->write(separator);
+ this->write("_threadgroups");
+ separator = ", ";
+ }
+}
+
+void MetalCodeGenerator::writeFunctionRequirementParams(const FunctionDeclaration& f,
+ const char*& separator) {
+ Requirements requirements = this->requirements(f);
+ if (requirements & kInputs_Requirement) {
+ this->write(separator);
+ this->write("Inputs _in");
+ separator = ", ";
+ }
+ if (requirements & kOutputs_Requirement) {
+ this->write(separator);
+ this->write("thread Outputs& _out");
+ separator = ", ";
+ }
+ if (requirements & kUniforms_Requirement) {
+ this->write(separator);
+ this->write("Uniforms _uniforms");
+ separator = ", ";
+ }
+ if (requirements & kGlobals_Requirement) {
+ this->write(separator);
+ this->write("thread Globals& _globals");
+ separator = ", ";
+ }
+ if (requirements & kFragCoord_Requirement) {
+ this->write(separator);
+ this->write("float4 _fragCoord");
+ separator = ", ";
+ }
+ if (requirements & kThreadgroups_Requirement) {
+ this->write(separator);
+ this->write("threadgroup Threadgroups& _threadgroups");
+ separator = ", ";
+ }
+}
+
+int MetalCodeGenerator::getUniformBinding(const Modifiers& m) {
+ return (m.fLayout.fBinding >= 0) ? m.fLayout.fBinding
+ : fProgram.fConfig->fSettings.fDefaultUniformBinding;
+}
+
+int MetalCodeGenerator::getUniformSet(const Modifiers& m) {
+ return (m.fLayout.fSet >= 0) ? m.fLayout.fSet
+ : fProgram.fConfig->fSettings.fDefaultUniformSet;
+}
+
+bool MetalCodeGenerator::writeFunctionDeclaration(const FunctionDeclaration& f) {
+ fRTFlipName = fProgram.fInputs.fUseFlipRTUniform
+ ? "_globals._anonInterface0->" SKSL_RTFLIP_NAME
+ : "";
+ const char* separator = "";
+ if (f.isMain()) {
+ if (ProgramConfig::IsFragment(fProgram.fConfig->fKind)) {
+ this->write("fragment Outputs fragmentMain");
+ } else if (ProgramConfig::IsVertex(fProgram.fConfig->fKind)) {
+ this->write("vertex Outputs vertexMain");
+ } else if (ProgramConfig::IsCompute(fProgram.fConfig->fKind)) {
+ this->write("kernel void computeMain");
+ } else {
+ fContext.fErrors->error(Position(), "unsupported kind of program");
+ return false;
+ }
+ this->write("(");
+ if (!ProgramConfig::IsCompute(fProgram.fConfig->fKind)) {
+ this->write("Inputs _in [[stage_in]]");
+ separator = ", ";
+ }
+ if (-1 != fUniformBuffer) {
+ this->write(separator);
+ this->write("constant Uniforms& _uniforms [[buffer(" +
+ std::to_string(fUniformBuffer) + ")]]");
+ separator = ", ";
+ }
+ for (const ProgramElement* e : fProgram.elements()) {
+ if (e->is<GlobalVarDeclaration>()) {
+ const GlobalVarDeclaration& decls = e->as<GlobalVarDeclaration>();
+ const VarDeclaration& decl = decls.varDeclaration();
+ const Variable* var = decl.var();
+ const SkSL::Type::TypeKind varKind = var->type().typeKind();
+
+ if (varKind == Type::TypeKind::kSampler || varKind == Type::TypeKind::kTexture) {
+ if (var->type().dimensions() != SpvDim2D) {
+ // Not yet implemented--Skia currently only uses 2D textures.
+ fContext.fErrors->error(decls.fPosition, "Unsupported texture dimensions");
+ return false;
+ }
+
+ int binding = getUniformBinding(var->modifiers());
+ this->write(separator);
+ separator = ", ";
+
+ if (varKind == Type::TypeKind::kSampler) {
+ this->writeType(var->type().textureType());
+ this->write(" ");
+ this->writeName(var->mangledName());
+ this->write(kTextureSuffix);
+ this->write(" [[texture(");
+ this->write(std::to_string(binding));
+ this->write(")]], sampler ");
+ this->writeName(var->mangledName());
+ this->write(kSamplerSuffix);
+ this->write(" [[sampler(");
+ this->write(std::to_string(binding));
+ this->write(")]]");
+ } else {
+ SkASSERT(varKind == Type::TypeKind::kTexture);
+ this->writeType(var->type());
+ this->write(" ");
+ this->writeName(var->mangledName());
+ this->write(" [[texture(");
+ this->write(std::to_string(binding));
+ this->write(")]]");
+ }
+ } else if (ProgramConfig::IsCompute(fProgram.fConfig->fKind)) {
+ std::string type, attr;
+ switch (var->modifiers().fLayout.fBuiltin) {
+ case SK_NUMWORKGROUPS_BUILTIN:
+ type = "uint3 ";
+ attr = " [[threadgroups_per_grid]]";
+ break;
+ case SK_WORKGROUPID_BUILTIN:
+ type = "uint3 ";
+ attr = " [[threadgroup_position_in_grid]]";
+ break;
+ case SK_LOCALINVOCATIONID_BUILTIN:
+ type = "uint3 ";
+ attr = " [[thread_position_in_threadgroup]]";
+ break;
+ case SK_GLOBALINVOCATIONID_BUILTIN:
+ type = "uint3 ";
+ attr = " [[thread_position_in_grid]]";
+ break;
+ case SK_LOCALINVOCATIONINDEX_BUILTIN:
+ type = "uint ";
+ attr = " [[thread_index_in_threadgroup]]";
+ break;
+ default:
+ break;
+ }
+ if (!attr.empty()) {
+ this->write(separator);
+ this->write(type);
+ this->write(var->name());
+ this->write(attr);
+ separator = ", ";
+ }
+ }
+ } else if (e->is<InterfaceBlock>()) {
+ const InterfaceBlock& intf = e->as<InterfaceBlock>();
+ if (intf.typeName() == "sk_PerVertex") {
+ continue;
+ }
+ this->write(separator);
+ if (is_readonly(intf)) {
+ this->write("const ");
+ }
+ this->write(is_buffer(intf) ? "device " : "constant ");
+ this->writeType(intf.var()->type());
+ this->write("& " );
+ this->write(fInterfaceBlockNameMap[&intf]);
+ this->write(" [[buffer(");
+ this->write(std::to_string(this->getUniformBinding(intf.var()->modifiers())));
+ this->write(")]]");
+ separator = ", ";
+ }
+ }
+ if (ProgramConfig::IsFragment(fProgram.fConfig->fKind)) {
+ if (fProgram.fInputs.fUseFlipRTUniform && fInterfaceBlockNameMap.empty()) {
+ this->write(separator);
+ this->write("constant sksl_synthetic_uniforms& _anonInterface0 [[buffer(1)]]");
+ fRTFlipName = "_anonInterface0." SKSL_RTFLIP_NAME;
+ separator = ", ";
+ }
+ this->write(separator);
+ this->write("bool _frontFacing [[front_facing]]");
+ this->write(", float4 _fragCoord [[position]]");
+ separator = ", ";
+ } else if (ProgramConfig::IsVertex(fProgram.fConfig->fKind)) {
+ this->write(separator);
+ this->write("uint sk_VertexID [[vertex_id]], uint sk_InstanceID [[instance_id]]");
+ separator = ", ";
+ }
+ } else {
+ this->writeType(f.returnType());
+ this->write(" ");
+ this->writeName(f.mangledName());
+ this->write("(");
+ this->writeFunctionRequirementParams(f, separator);
+ }
+ for (const Variable* param : f.parameters()) {
+ if (f.isMain() && param->modifiers().fLayout.fBuiltin != -1) {
+ continue;
+ }
+ this->write(separator);
+ separator = ", ";
+ this->writeModifiers(param->modifiers());
+ this->writeType(param->type());
+ if (pass_by_reference(param->type(), param->modifiers())) {
+ this->write("&");
+ }
+ this->write(" ");
+ this->writeName(param->mangledName());
+ }
+ this->write(")");
+ return true;
+}
+
+void MetalCodeGenerator::writeFunctionPrototype(const FunctionPrototype& f) {
+ this->writeFunctionDeclaration(f.declaration());
+ this->writeLine(";");
+}
+
+static bool is_block_ending_with_return(const Statement* stmt) {
+ // This function detects (potentially nested) blocks that end in a return statement.
+ if (!stmt->is<Block>()) {
+ return false;
+ }
+ const StatementArray& block = stmt->as<Block>().children();
+ for (int index = block.size(); index--; ) {
+ stmt = block[index].get();
+ if (stmt->is<ReturnStatement>()) {
+ return true;
+ }
+ if (stmt->is<Block>()) {
+ return is_block_ending_with_return(stmt);
+ }
+ if (!stmt->is<Nop>()) {
+ break;
+ }
+ }
+ return false;
+}
+
+void MetalCodeGenerator::writeComputeMainInputs() {
+ // Compute shaders only have input variables (e.g. sk_GlobalInvocationID) and access program
+ // inputs/outputs via the Globals and Uniforms structs. We collect the allowed "in" parameters
+ // into an Input struct here, since the rest of the code expects the normal _in / _out pattern.
+ this->write("Inputs _in = { ");
+ const char* separator = "";
+ for (const ProgramElement* e : fProgram.elements()) {
+ if (e->is<GlobalVarDeclaration>()) {
+ const GlobalVarDeclaration& decls = e->as<GlobalVarDeclaration>();
+ const Variable* var = decls.varDeclaration().var();
+ if (is_input(*var)) {
+ this->write(separator);
+ separator = ", ";
+ this->writeName(var->mangledName());
+ }
+ }
+ }
+ this->writeLine(" };");
+}
+
+void MetalCodeGenerator::writeFunction(const FunctionDefinition& f) {
+ SkASSERT(!fProgram.fConfig->fSettings.fFragColorIsInOut);
+
+ if (!this->writeFunctionDeclaration(f.declaration())) {
+ return;
+ }
+
+ fCurrentFunction = &f.declaration();
+ SkScopeExit clearCurrentFunction([&] { fCurrentFunction = nullptr; });
+
+ this->writeLine(" {");
+
+ if (f.declaration().isMain()) {
+ fIndentation++;
+ this->writeGlobalInit();
+ if (ProgramConfig::IsCompute(fProgram.fConfig->fKind)) {
+ this->writeThreadgroupInit();
+ this->writeComputeMainInputs();
+ }
+ else {
+ this->writeLine("Outputs _out;");
+ this->writeLine("(void)_out;");
+ }
+ fIndentation--;
+ }
+
+ fFunctionHeader.clear();
+ StringStream buffer;
+ {
+ AutoOutputStream outputToBuffer(this, &buffer);
+ fIndentation++;
+ for (const std::unique_ptr<Statement>& stmt : f.body()->as<Block>().children()) {
+ if (!stmt->isEmpty()) {
+ this->writeStatement(*stmt);
+ this->finishLine();
+ }
+ }
+ if (f.declaration().isMain()) {
+ // If the main function doesn't end with a return, we need to synthesize one here.
+ if (!is_block_ending_with_return(f.body().get())) {
+ this->writeReturnStatementFromMain();
+ this->finishLine();
+ }
+ }
+ fIndentation--;
+ this->writeLine("}");
+ }
+ this->write(fFunctionHeader);
+ this->write(buffer.str());
+}
+
+void MetalCodeGenerator::writeModifiers(const Modifiers& modifiers) {
+ if (ProgramConfig::IsCompute(fProgram.fConfig->fKind) &&
+ (modifiers.fFlags & (Modifiers::kIn_Flag | Modifiers::kOut_Flag))) {
+ this->write("device ");
+ } else if (modifiers.fFlags & Modifiers::kOut_Flag) {
+ this->write("thread ");
+ }
+ if (modifiers.fFlags & Modifiers::kConst_Flag) {
+ this->write("const ");
+ }
+}
+
+void MetalCodeGenerator::writeInterfaceBlock(const InterfaceBlock& intf) {
+ if (intf.typeName() == "sk_PerVertex") {
+ return;
+ }
+ const Type* structType = &intf.var()->type().componentType();
+ this->writeModifiers(intf.var()->modifiers());
+ this->write("struct ");
+ this->writeType(*structType);
+ this->writeLine(" {");
+ fIndentation++;
+ this->writeFields(structType->fields(), structType->fPosition, &intf);
+ if (fProgram.fInputs.fUseFlipRTUniform) {
+ this->writeLine("float2 " SKSL_RTFLIP_NAME ";");
+ }
+ fIndentation--;
+ this->write("}");
+ if (intf.instanceName().size()) {
+ this->write(" ");
+ this->write(intf.instanceName());
+ if (intf.arraySize() > 0) {
+ this->write("[");
+ this->write(std::to_string(intf.arraySize()));
+ this->write("]");
+ }
+ fInterfaceBlockNameMap.set(&intf, intf.instanceName());
+ } else {
+ fInterfaceBlockNameMap.set(&intf, *fProgram.fSymbols->takeOwnershipOfString(
+ "_anonInterface" + std::to_string(fAnonInterfaceCount++)));
+ }
+ this->writeLine(";");
+}
+
+void MetalCodeGenerator::writeFields(const std::vector<Type::Field>& fields, Position parentPos,
+ const InterfaceBlock* parentIntf) {
+ MemoryLayout memoryLayout(MemoryLayout::Standard::kMetal);
+ int currentOffset = 0;
+ for (const Type::Field& field : fields) {
+ int fieldOffset = field.fModifiers.fLayout.fOffset;
+ const Type* fieldType = field.fType;
+ if (!memoryLayout.isSupported(*fieldType)) {
+ fContext.fErrors->error(parentPos, "type '" + std::string(fieldType->name()) +
+ "' is not permitted here");
+ return;
+ }
+ if (fieldOffset != -1) {
+ if (currentOffset > fieldOffset) {
+ fContext.fErrors->error(field.fPosition,
+ "offset of field '" + std::string(field.fName) +
+ "' must be at least " + std::to_string(currentOffset));
+ return;
+ } else if (currentOffset < fieldOffset) {
+ this->write("char pad");
+ this->write(std::to_string(fPaddingCount++));
+ this->write("[");
+ this->write(std::to_string(fieldOffset - currentOffset));
+ this->writeLine("];");
+ currentOffset = fieldOffset;
+ }
+ int alignment = memoryLayout.alignment(*fieldType);
+ if (fieldOffset % alignment) {
+ fContext.fErrors->error(field.fPosition,
+ "offset of field '" + std::string(field.fName) +
+ "' must be a multiple of " + std::to_string(alignment));
+ return;
+ }
+ }
+ if (fieldType->isUnsizedArray()) {
+ // An unsized array always appears as the last member of a storage block. We declare
+ // it as a one-element array and allow dereferencing past the capacity.
+ // TODO(armansito): This is because C++ does not support flexible array members like C99
+ // does. This generally works but it can lead to UB as compilers are free to insert
+ // padding past the first element of the array. An alternative approach is to declare
+ // the struct without the unsized array member and replace variable references with a
+ // buffer offset calculation based on sizeof().
+ this->writeModifiers(field.fModifiers);
+ this->writeType(fieldType->componentType());
+ this->write(" ");
+ this->writeName(field.fName);
+ this->write("[1]");
+ } else {
+ size_t fieldSize = memoryLayout.size(*fieldType);
+ if (fieldSize > static_cast<size_t>(std::numeric_limits<int>::max() - currentOffset)) {
+ fContext.fErrors->error(parentPos, "field offset overflow");
+ return;
+ }
+ currentOffset += fieldSize;
+ this->writeModifiers(field.fModifiers);
+ this->writeType(*fieldType);
+ this->write(" ");
+ this->writeName(field.fName);
+ }
+ this->writeLine(";");
+ if (parentIntf) {
+ fInterfaceBlockMap.set(&field, parentIntf);
+ }
+ }
+}
+
+void MetalCodeGenerator::writeVarInitializer(const Variable& var, const Expression& value) {
+ this->writeExpression(value, Precedence::kTopLevel);
+}
+
+void MetalCodeGenerator::writeName(std::string_view name) {
+ if (fReservedWords.contains(name)) {
+ this->write("_"); // adding underscore before name to avoid conflict with reserved words
+ }
+ this->write(name);
+}
+
+void MetalCodeGenerator::writeVarDeclaration(const VarDeclaration& varDecl) {
+ this->writeModifiers(varDecl.var()->modifiers());
+ this->writeType(varDecl.var()->type());
+ this->write(" ");
+ this->writeName(varDecl.var()->mangledName());
+ if (varDecl.value()) {
+ this->write(" = ");
+ this->writeVarInitializer(*varDecl.var(), *varDecl.value());
+ }
+ this->write(";");
+}
+
+void MetalCodeGenerator::writeStatement(const Statement& s) {
+ switch (s.kind()) {
+ case Statement::Kind::kBlock:
+ this->writeBlock(s.as<Block>());
+ break;
+ case Statement::Kind::kExpression:
+ this->writeExpressionStatement(s.as<ExpressionStatement>());
+ break;
+ case Statement::Kind::kReturn:
+ this->writeReturnStatement(s.as<ReturnStatement>());
+ break;
+ case Statement::Kind::kVarDeclaration:
+ this->writeVarDeclaration(s.as<VarDeclaration>());
+ break;
+ case Statement::Kind::kIf:
+ this->writeIfStatement(s.as<IfStatement>());
+ break;
+ case Statement::Kind::kFor:
+ this->writeForStatement(s.as<ForStatement>());
+ break;
+ case Statement::Kind::kDo:
+ this->writeDoStatement(s.as<DoStatement>());
+ break;
+ case Statement::Kind::kSwitch:
+ this->writeSwitchStatement(s.as<SwitchStatement>());
+ break;
+ case Statement::Kind::kBreak:
+ this->write("break;");
+ break;
+ case Statement::Kind::kContinue:
+ this->write("continue;");
+ break;
+ case Statement::Kind::kDiscard:
+ this->write("discard_fragment();");
+ break;
+ case Statement::Kind::kNop:
+ this->write(";");
+ break;
+ default:
+ SkDEBUGFAILF("unsupported statement: %s", s.description().c_str());
+ break;
+ }
+}
+
+void MetalCodeGenerator::writeBlock(const Block& b) {
+ // Write scope markers if this block is a scope, or if the block is empty (since we need to emit
+ // something here to make the code valid).
+ bool isScope = b.isScope() || b.isEmpty();
+ if (isScope) {
+ this->writeLine("{");
+ fIndentation++;
+ }
+ for (const std::unique_ptr<Statement>& stmt : b.children()) {
+ if (!stmt->isEmpty()) {
+ this->writeStatement(*stmt);
+ this->finishLine();
+ }
+ }
+ if (isScope) {
+ fIndentation--;
+ this->write("}");
+ }
+}
+
+void MetalCodeGenerator::writeIfStatement(const IfStatement& stmt) {
+ this->write("if (");
+ this->writeExpression(*stmt.test(), Precedence::kTopLevel);
+ this->write(") ");
+ this->writeStatement(*stmt.ifTrue());
+ if (stmt.ifFalse()) {
+ this->write(" else ");
+ this->writeStatement(*stmt.ifFalse());
+ }
+}
+
+void MetalCodeGenerator::writeForStatement(const ForStatement& f) {
+ // Emit loops of the form 'for(;test;)' as 'while(test)', which is probably how they started
+ if (!f.initializer() && f.test() && !f.next()) {
+ this->write("while (");
+ this->writeExpression(*f.test(), Precedence::kTopLevel);
+ this->write(") ");
+ this->writeStatement(*f.statement());
+ return;
+ }
+
+ this->write("for (");
+ if (f.initializer() && !f.initializer()->isEmpty()) {
+ this->writeStatement(*f.initializer());
+ } else {
+ this->write("; ");
+ }
+ if (f.test()) {
+ this->writeExpression(*f.test(), Precedence::kTopLevel);
+ }
+ this->write("; ");
+ if (f.next()) {
+ this->writeExpression(*f.next(), Precedence::kTopLevel);
+ }
+ this->write(") ");
+ this->writeStatement(*f.statement());
+}
+
+void MetalCodeGenerator::writeDoStatement(const DoStatement& d) {
+ this->write("do ");
+ this->writeStatement(*d.statement());
+ this->write(" while (");
+ this->writeExpression(*d.test(), Precedence::kTopLevel);
+ this->write(");");
+}
+
+void MetalCodeGenerator::writeExpressionStatement(const ExpressionStatement& s) {
+ if (fProgram.fConfig->fSettings.fOptimize && !Analysis::HasSideEffects(*s.expression())) {
+ // Don't emit dead expressions.
+ return;
+ }
+ this->writeExpression(*s.expression(), Precedence::kTopLevel);
+ this->write(";");
+}
+
+void MetalCodeGenerator::writeSwitchStatement(const SwitchStatement& s) {
+ this->write("switch (");
+ this->writeExpression(*s.value(), Precedence::kTopLevel);
+ this->writeLine(") {");
+ fIndentation++;
+ for (const std::unique_ptr<Statement>& stmt : s.cases()) {
+ const SwitchCase& c = stmt->as<SwitchCase>();
+ if (c.isDefault()) {
+ this->writeLine("default:");
+ } else {
+ this->write("case ");
+ this->write(std::to_string(c.value()));
+ this->writeLine(":");
+ }
+ if (!c.statement()->isEmpty()) {
+ fIndentation++;
+ this->writeStatement(*c.statement());
+ this->finishLine();
+ fIndentation--;
+ }
+ }
+ fIndentation--;
+ this->write("}");
+}
+
+void MetalCodeGenerator::writeReturnStatementFromMain() {
+ // main functions in Metal return a magic _out parameter that doesn't exist in SkSL.
+ if (ProgramConfig::IsVertex(fProgram.fConfig->fKind) ||
+ ProgramConfig::IsFragment(fProgram.fConfig->fKind)) {
+ this->write("return _out;");
+ } else if (ProgramConfig::IsCompute(fProgram.fConfig->fKind)) {
+ this->write("return;");
+ } else {
+ SkDEBUGFAIL("unsupported kind of program");
+ }
+}
+
+void MetalCodeGenerator::writeReturnStatement(const ReturnStatement& r) {
+ if (fCurrentFunction && fCurrentFunction->isMain()) {
+ if (r.expression()) {
+ if (r.expression()->type().matches(*fContext.fTypes.fHalf4)) {
+ this->write("_out.sk_FragColor = ");
+ this->writeExpression(*r.expression(), Precedence::kTopLevel);
+ this->writeLine(";");
+ } else {
+ fContext.fErrors->error(r.fPosition,
+ "Metal does not support returning '" +
+ r.expression()->type().description() + "' from main()");
+ }
+ }
+ this->writeReturnStatementFromMain();
+ return;
+ }
+
+ this->write("return");
+ if (r.expression()) {
+ this->write(" ");
+ this->writeExpression(*r.expression(), Precedence::kTopLevel);
+ }
+ this->write(";");
+}
+
+void MetalCodeGenerator::writeHeader() {
+ this->write("#include <metal_stdlib>\n");
+ this->write("#include <simd/simd.h>\n");
+ this->write("using namespace metal;\n");
+}
+
+void MetalCodeGenerator::writeSampler2DPolyfill() {
+ class : public GlobalStructVisitor {
+ public:
+ void visitSampler(const Type&, std::string_view) override {
+ if (fWrotePolyfill) {
+ return;
+ }
+ fWrotePolyfill = true;
+
+ std::string polyfill = SkSL::String::printf(R"(
+struct sampler2D {
+ texture2d<half> tex;
+ sampler smp;
+};
+half4 sample(sampler2D i, float2 p, float b=%g) { return i.tex.sample(i.smp, p, bias(b)); }
+half4 sample(sampler2D i, float3 p, float b=%g) { return i.tex.sample(i.smp, p.xy / p.z, bias(b)); }
+half4 sampleLod(sampler2D i, float2 p, float lod) { return i.tex.sample(i.smp, p, level(lod)); }
+half4 sampleLod(sampler2D i, float3 p, float lod) {
+ return i.tex.sample(i.smp, p.xy / p.z, level(lod));
+}
+half4 sampleGrad(sampler2D i, float2 p, float2 dPdx, float2 dPdy) {
+ return i.tex.sample(i.smp, p, gradient2d(dPdx, dPdy));
+}
+
+)",
+ fTextureBias,
+ fTextureBias);
+ fCodeGen->write(polyfill.c_str());
+ }
+
+ MetalCodeGenerator* fCodeGen = nullptr;
+ float fTextureBias = 0.0f;
+ bool fWrotePolyfill = false;
+ } visitor;
+
+ visitor.fCodeGen = this;
+ visitor.fTextureBias = fProgram.fConfig->fSettings.fSharpenTextures ? kSharpenTexturesBias
+ : 0.0f;
+ this->visitGlobalStruct(&visitor);
+}
+
+void MetalCodeGenerator::writeUniformStruct() {
+ for (const ProgramElement* e : fProgram.elements()) {
+ if (e->is<GlobalVarDeclaration>()) {
+ const GlobalVarDeclaration& decls = e->as<GlobalVarDeclaration>();
+ const Variable& var = *decls.varDeclaration().var();
+ if (var.modifiers().fFlags & Modifiers::kUniform_Flag &&
+ var.type().typeKind() != Type::TypeKind::kSampler &&
+ var.type().typeKind() != Type::TypeKind::kTexture) {
+ int uniformSet = this->getUniformSet(var.modifiers());
+ // Make sure that the program's uniform-set value is consistent throughout.
+ if (-1 == fUniformBuffer) {
+ this->write("struct Uniforms {\n");
+ fUniformBuffer = uniformSet;
+ } else if (uniformSet != fUniformBuffer) {
+ fContext.fErrors->error(decls.fPosition,
+ "Metal backend requires all uniforms to have the same "
+ "'layout(set=...)'");
+ }
+ this->write(" ");
+ this->writeType(var.type());
+ this->write(" ");
+ this->writeName(var.mangledName());
+ this->write(";\n");
+ }
+ }
+ }
+ if (-1 != fUniformBuffer) {
+ this->write("};\n");
+ }
+}
+
+void MetalCodeGenerator::writeInputStruct() {
+ this->write("struct Inputs {\n");
+ for (const ProgramElement* e : fProgram.elements()) {
+ if (e->is<GlobalVarDeclaration>()) {
+ const GlobalVarDeclaration& decls = e->as<GlobalVarDeclaration>();
+ const Variable& var = *decls.varDeclaration().var();
+ if (is_input(var)) {
+ this->write(" ");
+ if (ProgramConfig::IsCompute(fProgram.fConfig->fKind) &&
+ needs_address_space(var.type(), var.modifiers())) {
+ // TODO: address space support
+ this->write("device ");
+ }
+ this->writeType(var.type());
+ if (pass_by_reference(var.type(), var.modifiers())) {
+ this->write("&");
+ }
+ this->write(" ");
+ this->writeName(var.mangledName());
+ if (-1 != var.modifiers().fLayout.fLocation) {
+ if (ProgramConfig::IsVertex(fProgram.fConfig->fKind)) {
+ this->write(" [[attribute(" +
+ std::to_string(var.modifiers().fLayout.fLocation) + ")]]");
+ } else if (ProgramConfig::IsFragment(fProgram.fConfig->fKind)) {
+ this->write(" [[user(locn" +
+ std::to_string(var.modifiers().fLayout.fLocation) + ")]]");
+ }
+ }
+ this->write(";\n");
+ }
+ }
+ }
+ this->write("};\n");
+}
+
+void MetalCodeGenerator::writeOutputStruct() {
+ this->write("struct Outputs {\n");
+ if (ProgramConfig::IsVertex(fProgram.fConfig->fKind)) {
+ this->write(" float4 sk_Position [[position]];\n");
+ } else if (ProgramConfig::IsFragment(fProgram.fConfig->fKind)) {
+ this->write(" half4 sk_FragColor [[color(0)]];\n");
+ }
+ for (const ProgramElement* e : fProgram.elements()) {
+ if (e->is<GlobalVarDeclaration>()) {
+ const GlobalVarDeclaration& decls = e->as<GlobalVarDeclaration>();
+ const Variable& var = *decls.varDeclaration().var();
+ if (is_output(var)) {
+ this->write(" ");
+ if (ProgramConfig::IsCompute(fProgram.fConfig->fKind) &&
+ needs_address_space(var.type(), var.modifiers())) {
+ // TODO: address space support
+ this->write("device ");
+ }
+ this->writeType(var.type());
+ if (ProgramConfig::IsCompute(fProgram.fConfig->fKind) &&
+ pass_by_reference(var.type(), var.modifiers())) {
+ this->write("&");
+ }
+ this->write(" ");
+ this->writeName(var.mangledName());
+
+ int location = var.modifiers().fLayout.fLocation;
+ if (!ProgramConfig::IsCompute(fProgram.fConfig->fKind) && location < 0 &&
+ var.type().typeKind() != Type::TypeKind::kTexture) {
+ fContext.fErrors->error(var.fPosition,
+ "Metal out variables must have 'layout(location=...)'");
+ } else if (ProgramConfig::IsVertex(fProgram.fConfig->fKind)) {
+ this->write(" [[user(locn" + std::to_string(location) + ")]]");
+ } else if (ProgramConfig::IsFragment(fProgram.fConfig->fKind)) {
+ this->write(" [[color(" + std::to_string(location) + ")");
+ int colorIndex = var.modifiers().fLayout.fIndex;
+ if (colorIndex) {
+ this->write(", index(" + std::to_string(colorIndex) + ")");
+ }
+ this->write("]]");
+ }
+ this->write(";\n");
+ }
+ }
+ }
+ if (ProgramConfig::IsVertex(fProgram.fConfig->fKind)) {
+ this->write(" float sk_PointSize [[point_size]];\n");
+ }
+ this->write("};\n");
+}
+
+void MetalCodeGenerator::writeInterfaceBlocks() {
+ bool wroteInterfaceBlock = false;
+ for (const ProgramElement* e : fProgram.elements()) {
+ if (e->is<InterfaceBlock>()) {
+ this->writeInterfaceBlock(e->as<InterfaceBlock>());
+ wroteInterfaceBlock = true;
+ }
+ }
+ if (!wroteInterfaceBlock && fProgram.fInputs.fUseFlipRTUniform) {
+ this->writeLine("struct sksl_synthetic_uniforms {");
+ this->writeLine(" float2 " SKSL_RTFLIP_NAME ";");
+ this->writeLine("};");
+ }
+}
+
+void MetalCodeGenerator::writeStructDefinitions() {
+ for (const ProgramElement* e : fProgram.elements()) {
+ if (e->is<StructDefinition>()) {
+ this->writeStructDefinition(e->as<StructDefinition>());
+ }
+ }
+}
+
+void MetalCodeGenerator::writeConstantVariables() {
+ class : public GlobalStructVisitor {
+ public:
+ void visitConstantVariable(const VarDeclaration& decl) override {
+ fCodeGen->write("constant ");
+ fCodeGen->writeVarDeclaration(decl);
+ fCodeGen->finishLine();
+ }
+
+ MetalCodeGenerator* fCodeGen = nullptr;
+ } visitor;
+
+ visitor.fCodeGen = this;
+ this->visitGlobalStruct(&visitor);
+}
+
+void MetalCodeGenerator::visitGlobalStruct(GlobalStructVisitor* visitor) {
+ for (const ProgramElement* element : fProgram.elements()) {
+ if (element->is<InterfaceBlock>()) {
+ const auto* ib = &element->as<InterfaceBlock>();
+ if (ib->typeName() != "sk_PerVertex") {
+ visitor->visitInterfaceBlock(*ib, fInterfaceBlockNameMap[ib]);
+ }
+ continue;
+ }
+ if (!element->is<GlobalVarDeclaration>()) {
+ continue;
+ }
+ const GlobalVarDeclaration& global = element->as<GlobalVarDeclaration>();
+ const VarDeclaration& decl = global.varDeclaration();
+ const Variable& var = *decl.var();
+ if (var.type().typeKind() == Type::TypeKind::kSampler) {
+ visitor->visitSampler(var.type(), var.mangledName());
+ continue;
+ }
+ if (var.type().typeKind() == Type::TypeKind::kTexture) {
+ visitor->visitTexture(var.type(), var.modifiers(), var.mangledName());
+ continue;
+ }
+ if (!(var.modifiers().fFlags & ~Modifiers::kConst_Flag) &&
+ var.modifiers().fLayout.fBuiltin == -1) {
+ if (is_in_globals(var)) {
+ // Visit a regular global variable.
+ visitor->visitNonconstantVariable(var, decl.value().get());
+ } else {
+ // Visit a constant-expression variable.
+ SkASSERT(var.modifiers().fFlags & Modifiers::kConst_Flag);
+ visitor->visitConstantVariable(decl);
+ }
+ }
+ }
+}
+
+void MetalCodeGenerator::writeGlobalStruct() {
+ class : public GlobalStructVisitor {
+ public:
+ void visitInterfaceBlock(const InterfaceBlock& block,
+ std::string_view blockName) override {
+ this->addElement();
+ fCodeGen->write(" ");
+ if (is_readonly(block)) {
+ fCodeGen->write("const ");
+ }
+ fCodeGen->write(is_buffer(block) ? "device " : "constant ");
+ fCodeGen->write(block.typeName());
+ fCodeGen->write("* ");
+ fCodeGen->writeName(blockName);
+ fCodeGen->write(";\n");
+ }
+ void visitTexture(const Type& type, const Modifiers& modifiers,
+ std::string_view name) override {
+ this->addElement();
+ fCodeGen->write(" ");
+ fCodeGen->writeType(type);
+ fCodeGen->write(" ");
+ fCodeGen->writeName(name);
+ fCodeGen->write(";\n");
+ }
+ void visitSampler(const Type&, std::string_view name) override {
+ this->addElement();
+ fCodeGen->write(" sampler2D ");
+ fCodeGen->writeName(name);
+ fCodeGen->write(";\n");
+ }
+ void visitConstantVariable(const VarDeclaration& decl) override {
+ // Constants aren't added to the global struct.
+ }
+ void visitNonconstantVariable(const Variable& var, const Expression* value) override {
+ this->addElement();
+ fCodeGen->write(" ");
+ fCodeGen->writeModifiers(var.modifiers());
+ fCodeGen->writeType(var.type());
+ fCodeGen->write(" ");
+ fCodeGen->writeName(var.mangledName());
+ fCodeGen->write(";\n");
+ }
+ void addElement() {
+ if (fFirst) {
+ fCodeGen->write("struct Globals {\n");
+ fFirst = false;
+ }
+ }
+ void finish() {
+ if (!fFirst) {
+ fCodeGen->writeLine("};");
+ fFirst = true;
+ }
+ }
+
+ MetalCodeGenerator* fCodeGen = nullptr;
+ bool fFirst = true;
+ } visitor;
+
+ visitor.fCodeGen = this;
+ this->visitGlobalStruct(&visitor);
+ visitor.finish();
+}
+
+void MetalCodeGenerator::writeGlobalInit() {
+ class : public GlobalStructVisitor {
+ public:
+ void visitInterfaceBlock(const InterfaceBlock& blockType,
+ std::string_view blockName) override {
+ this->addElement();
+ fCodeGen->write("&");
+ fCodeGen->writeName(blockName);
+ }
+ void visitTexture(const Type&, const Modifiers& modifiers, std::string_view name) override {
+ this->addElement();
+ fCodeGen->writeName(name);
+ }
+ void visitSampler(const Type&, std::string_view name) override {
+ this->addElement();
+ fCodeGen->write("{");
+ fCodeGen->writeName(name);
+ fCodeGen->write(kTextureSuffix);
+ fCodeGen->write(", ");
+ fCodeGen->writeName(name);
+ fCodeGen->write(kSamplerSuffix);
+ fCodeGen->write("}");
+ }
+ void visitConstantVariable(const VarDeclaration& decl) override {
+ // Constant-expression variables aren't put in the global struct.
+ }
+ void visitNonconstantVariable(const Variable& var, const Expression* value) override {
+ this->addElement();
+ if (value) {
+ fCodeGen->writeVarInitializer(var, *value);
+ } else {
+ fCodeGen->write("{}");
+ }
+ }
+ void addElement() {
+ if (fFirst) {
+ fCodeGen->write("Globals _globals{");
+ fFirst = false;
+ } else {
+ fCodeGen->write(", ");
+ }
+ }
+ void finish() {
+ if (!fFirst) {
+ fCodeGen->writeLine("};");
+ fCodeGen->writeLine("(void)_globals;");
+ }
+ }
+ MetalCodeGenerator* fCodeGen = nullptr;
+ bool fFirst = true;
+ } visitor;
+
+ visitor.fCodeGen = this;
+ this->visitGlobalStruct(&visitor);
+ visitor.finish();
+}
+
+void MetalCodeGenerator::visitThreadgroupStruct(ThreadgroupStructVisitor* visitor) {
+ for (const ProgramElement* element : fProgram.elements()) {
+ if (!element->is<GlobalVarDeclaration>()) {
+ continue;
+ }
+ const GlobalVarDeclaration& global = element->as<GlobalVarDeclaration>();
+ const VarDeclaration& decl = global.varDeclaration();
+ const Variable& var = *decl.var();
+ if (var.modifiers().fFlags & Modifiers::kWorkgroup_Flag) {
+ SkASSERT(!decl.value());
+ SkASSERT(!(var.modifiers().fFlags & Modifiers::kConst_Flag));
+ visitor->visitNonconstantVariable(var);
+ }
+ }
+}
+
+void MetalCodeGenerator::writeThreadgroupStruct() {
+ class : public ThreadgroupStructVisitor {
+ public:
+ void visitNonconstantVariable(const Variable& var) override {
+ this->addElement();
+ fCodeGen->write(" ");
+ fCodeGen->writeModifiers(var.modifiers());
+ fCodeGen->writeType(var.type());
+ fCodeGen->write(" ");
+ fCodeGen->writeName(var.mangledName());
+ fCodeGen->write(";\n");
+ }
+ void addElement() {
+ if (fFirst) {
+ fCodeGen->write("struct Threadgroups {\n");
+ fFirst = false;
+ }
+ }
+ void finish() {
+ if (!fFirst) {
+ fCodeGen->writeLine("};");
+ fFirst = true;
+ }
+ }
+
+ MetalCodeGenerator* fCodeGen = nullptr;
+ bool fFirst = true;
+ } visitor;
+
+ visitor.fCodeGen = this;
+ this->visitThreadgroupStruct(&visitor);
+ visitor.finish();
+}
+
+void MetalCodeGenerator::writeThreadgroupInit() {
+ class : public ThreadgroupStructVisitor {
+ public:
+ void visitNonconstantVariable(const Variable& var) override {
+ this->addElement();
+ fCodeGen->write("{}");
+ }
+ void addElement() {
+ if (fFirst) {
+ fCodeGen->write("threadgroup Threadgroups _threadgroups{");
+ fFirst = false;
+ } else {
+ fCodeGen->write(", ");
+ }
+ }
+ void finish() {
+ if (!fFirst) {
+ fCodeGen->writeLine("};");
+ fCodeGen->writeLine("(void)_threadgroups;");
+ }
+ }
+ MetalCodeGenerator* fCodeGen = nullptr;
+ bool fFirst = true;
+ } visitor;
+
+ visitor.fCodeGen = this;
+ this->visitThreadgroupStruct(&visitor);
+ visitor.finish();
+}
+
+void MetalCodeGenerator::writeProgramElement(const ProgramElement& e) {
+ switch (e.kind()) {
+ case ProgramElement::Kind::kExtension:
+ break;
+ case ProgramElement::Kind::kGlobalVar:
+ break;
+ case ProgramElement::Kind::kInterfaceBlock:
+ // handled in writeInterfaceBlocks, do nothing
+ break;
+ case ProgramElement::Kind::kStructDefinition:
+ // Handled in writeStructDefinitions. Do nothing.
+ break;
+ case ProgramElement::Kind::kFunction:
+ this->writeFunction(e.as<FunctionDefinition>());
+ break;
+ case ProgramElement::Kind::kFunctionPrototype:
+ this->writeFunctionPrototype(e.as<FunctionPrototype>());
+ break;
+ case ProgramElement::Kind::kModifiers:
+ this->writeModifiers(e.as<ModifiersDeclaration>().modifiers());
+ this->writeLine(";");
+ break;
+ default:
+ SkDEBUGFAILF("unsupported program element: %s\n", e.description().c_str());
+ break;
+ }
+}
+
+MetalCodeGenerator::Requirements MetalCodeGenerator::requirements(const Statement* s) {
+ class RequirementsVisitor : public ProgramVisitor {
+ public:
+ using ProgramVisitor::visitStatement;
+
+ bool visitExpression(const Expression& e) override {
+ switch (e.kind()) {
+ case Expression::Kind::kFunctionCall: {
+ const FunctionCall& f = e.as<FunctionCall>();
+ fRequirements |= fCodeGen->requirements(f.function());
+ break;
+ }
+ case Expression::Kind::kFieldAccess: {
+ const FieldAccess& f = e.as<FieldAccess>();
+ if (f.ownerKind() == FieldAccess::OwnerKind::kAnonymousInterfaceBlock) {
+ fRequirements |= kGlobals_Requirement;
+ return false; // don't recurse into the base variable
+ }
+ break;
+ }
+ case Expression::Kind::kVariableReference: {
+ const Variable& var = *e.as<VariableReference>().variable();
+
+ if (var.modifiers().fLayout.fBuiltin == SK_FRAGCOORD_BUILTIN) {
+ fRequirements |= kGlobals_Requirement | kFragCoord_Requirement;
+ } else if (var.storage() == Variable::Storage::kGlobal) {
+ if (is_input(var)) {
+ fRequirements |= kInputs_Requirement;
+ } else if (is_output(var)) {
+ fRequirements |= kOutputs_Requirement;
+ } else if (is_uniforms(var)) {
+ fRequirements |= kUniforms_Requirement;
+ } else if (is_threadgroup(var)) {
+ fRequirements |= kThreadgroups_Requirement;
+ } else if (is_in_globals(var)) {
+ fRequirements |= kGlobals_Requirement;
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return INHERITED::visitExpression(e);
+ }
+
+ MetalCodeGenerator* fCodeGen;
+ Requirements fRequirements = kNo_Requirements;
+ using INHERITED = ProgramVisitor;
+ };
+
+ RequirementsVisitor visitor;
+ if (s) {
+ visitor.fCodeGen = this;
+ visitor.visitStatement(*s);
+ }
+ return visitor.fRequirements;
+}
+
+MetalCodeGenerator::Requirements MetalCodeGenerator::requirements(const FunctionDeclaration& f) {
+ Requirements* found = fRequirements.find(&f);
+ if (!found) {
+ fRequirements.set(&f, kNo_Requirements);
+ for (const ProgramElement* e : fProgram.elements()) {
+ if (e->is<FunctionDefinition>()) {
+ const FunctionDefinition& def = e->as<FunctionDefinition>();
+ if (&def.declaration() == &f) {
+ Requirements reqs = this->requirements(def.body().get());
+ fRequirements.set(&f, reqs);
+ return reqs;
+ }
+ }
+ }
+ // We never found a definition for this declared function, but it's legal to prototype a
+ // function without ever giving a definition, as long as you don't call it.
+ return kNo_Requirements;
+ }
+ return *found;
+}
+
+bool MetalCodeGenerator::generateCode() {
+ StringStream header;
+ {
+ AutoOutputStream outputToHeader(this, &header, &fIndentation);
+ this->writeHeader();
+ this->writeConstantVariables();
+ this->writeSampler2DPolyfill();
+ this->writeStructDefinitions();
+ this->writeUniformStruct();
+ this->writeInputStruct();
+ if (!ProgramConfig::IsCompute(fProgram.fConfig->fKind)) {
+ this->writeOutputStruct();
+ }
+ this->writeInterfaceBlocks();
+ this->writeGlobalStruct();
+ this->writeThreadgroupStruct();
+
+ // Emit prototypes for every built-in function; these aren't always added in perfect order.
+ for (const ProgramElement* e : fProgram.fSharedElements) {
+ if (e->is<FunctionDefinition>()) {
+ this->writeFunctionDeclaration(e->as<FunctionDefinition>().declaration());
+ this->writeLine(";");
+ }
+ }
+ }
+ StringStream body;
+ {
+ AutoOutputStream outputToBody(this, &body, &fIndentation);
+
+ for (const ProgramElement* e : fProgram.elements()) {
+ this->writeProgramElement(*e);
+ }
+ }
+ write_stringstream(header, *fOut);
+ write_stringstream(fExtraFunctionPrototypes, *fOut);
+ write_stringstream(fExtraFunctions, *fOut);
+ write_stringstream(body, *fOut);
+ return fContext.fErrors->errorCount() == 0;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/codegen/SkSLMetalCodeGenerator.h b/gfx/skia/skia/src/sksl/codegen/SkSLMetalCodeGenerator.h
new file mode 100644
index 0000000000..621b99edb4
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/codegen/SkSLMetalCodeGenerator.h
@@ -0,0 +1,330 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_METALCODEGENERATOR
+#define SKSL_METALCODEGENERATOR
+
+#include "include/private/SkSLDefines.h"
+#include "include/private/base/SkTArray.h"
+#include "src/core/SkTHash.h"
+#include "src/sksl/SkSLStringStream.h"
+#include "src/sksl/codegen/SkSLCodeGenerator.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <cstdint>
+#include <initializer_list>
+#include <string>
+#include <string_view>
+#include <vector>
+
+namespace SkSL {
+
+class AnyConstructor;
+class BinaryExpression;
+class Block;
+class ConstructorArrayCast;
+class ConstructorCompound;
+class ConstructorMatrixResize;
+class Context;
+class DoStatement;
+class Expression;
+class ExpressionStatement;
+class Extension;
+class FieldAccess;
+class ForStatement;
+class FunctionCall;
+class FunctionDeclaration;
+class FunctionDefinition;
+class FunctionPrototype;
+class IfStatement;
+class IndexExpression;
+class InterfaceBlock;
+class Literal;
+class Operator;
+class OutputStream;
+class Position;
+class PostfixExpression;
+class PrefixExpression;
+class ProgramElement;
+class ReturnStatement;
+class Statement;
+class StructDefinition;
+class SwitchStatement;
+class Swizzle;
+class TernaryExpression;
+class VarDeclaration;
+class Variable;
+class VariableReference;
+enum class OperatorPrecedence : uint8_t;
+enum IntrinsicKind : int8_t;
+struct Layout;
+struct Modifiers;
+struct Program;
+
+/**
+ * Converts a Program into Metal code.
+ */
+class MetalCodeGenerator : public CodeGenerator {
+public:
+ MetalCodeGenerator(const Context* context, const Program* program, OutputStream* out)
+ : INHERITED(context, program, out)
+ , fReservedWords({"atan2", "rsqrt", "rint", "dfdx", "dfdy", "vertex", "fragment"})
+ , fLineEnding("\n") {}
+
+ bool generateCode() override;
+
+protected:
+ using Precedence = OperatorPrecedence;
+
+ typedef int Requirements;
+ inline static constexpr Requirements kNo_Requirements = 0;
+ inline static constexpr Requirements kInputs_Requirement = 1 << 0;
+ inline static constexpr Requirements kOutputs_Requirement = 1 << 1;
+ inline static constexpr Requirements kUniforms_Requirement = 1 << 2;
+ inline static constexpr Requirements kGlobals_Requirement = 1 << 3;
+ inline static constexpr Requirements kFragCoord_Requirement = 1 << 4;
+ inline static constexpr Requirements kThreadgroups_Requirement = 1 << 5;
+
+ class GlobalStructVisitor;
+ void visitGlobalStruct(GlobalStructVisitor* visitor);
+
+ class ThreadgroupStructVisitor;
+ void visitThreadgroupStruct(ThreadgroupStructVisitor* visitor);
+
+ void write(std::string_view s);
+
+ void writeLine(std::string_view s = std::string_view());
+
+ void finishLine();
+
+ void writeHeader();
+
+ void writeSampler2DPolyfill();
+
+ void writeUniformStruct();
+
+ void writeInputStruct();
+
+ void writeOutputStruct();
+
+ void writeInterfaceBlocks();
+
+ void writeStructDefinitions();
+
+ void writeConstantVariables();
+
+ void writeFields(const std::vector<Type::Field>& fields, Position pos,
+ const InterfaceBlock* parentIntf = nullptr);
+
+ int size(const Type* type, bool isPacked) const;
+
+ int alignment(const Type* type, bool isPacked) const;
+
+ void writeGlobalStruct();
+
+ void writeGlobalInit();
+
+ void writeThreadgroupStruct();
+
+ void writeThreadgroupInit();
+
+ void writePrecisionModifier();
+
+ std::string typeName(const Type& type);
+
+ void writeStructDefinition(const StructDefinition& s);
+
+ void writeType(const Type& type);
+
+ void writeExtension(const Extension& ext);
+
+ void writeInterfaceBlock(const InterfaceBlock& intf);
+
+ void writeFunctionRequirementParams(const FunctionDeclaration& f,
+ const char*& separator);
+
+ void writeFunctionRequirementArgs(const FunctionDeclaration& f, const char*& separator);
+
+ bool writeFunctionDeclaration(const FunctionDeclaration& f);
+
+ void writeFunction(const FunctionDefinition& f);
+
+ void writeFunctionPrototype(const FunctionPrototype& f);
+
+ void writeLayout(const Layout& layout);
+
+ void writeModifiers(const Modifiers& modifiers);
+
+ void writeVarInitializer(const Variable& var, const Expression& value);
+
+ void writeName(std::string_view name);
+
+ void writeVarDeclaration(const VarDeclaration& decl);
+
+ void writeFragCoord();
+
+ void writeVariableReference(const VariableReference& ref);
+
+ void writeExpression(const Expression& expr, Precedence parentPrecedence);
+
+ void writeMinAbsHack(Expression& absExpr, Expression& otherExpr);
+
+ std::string getOutParamHelper(const FunctionCall& c,
+ const ExpressionArray& arguments,
+ const SkTArray<VariableReference*>& outVars);
+
+ std::string getInversePolyfill(const ExpressionArray& arguments);
+
+ std::string getBitcastIntrinsic(const Type& outType);
+
+ std::string getTempVariable(const Type& varType);
+
+ void writeFunctionCall(const FunctionCall& c);
+
+ bool matrixConstructHelperIsNeeded(const ConstructorCompound& c);
+ std::string getMatrixConstructHelper(const AnyConstructor& c);
+ void assembleMatrixFromMatrix(const Type& sourceMatrix, int rows, int columns);
+ void assembleMatrixFromExpressions(const AnyConstructor& ctor, int rows, int columns);
+
+ void writeMatrixCompMult();
+
+ void writeOuterProduct();
+
+ void writeMatrixTimesEqualHelper(const Type& left, const Type& right, const Type& result);
+
+ void writeMatrixDivisionHelpers(const Type& type);
+
+ void writeMatrixEqualityHelpers(const Type& left, const Type& right);
+
+ std::string getVectorFromMat2x2ConstructorHelper(const Type& matrixType);
+
+ void writeArrayEqualityHelpers(const Type& type);
+
+ void writeStructEqualityHelpers(const Type& type);
+
+ void writeEqualityHelpers(const Type& leftType, const Type& rightType);
+
+ void writeArgumentList(const ExpressionArray& arguments);
+
+ void writeSimpleIntrinsic(const FunctionCall& c);
+
+ bool writeIntrinsicCall(const FunctionCall& c, IntrinsicKind kind);
+
+ void writeConstructorCompound(const ConstructorCompound& c, Precedence parentPrecedence);
+
+ void writeConstructorCompoundVector(const ConstructorCompound& c, Precedence parentPrecedence);
+
+ void writeConstructorCompoundMatrix(const ConstructorCompound& c, Precedence parentPrecedence);
+
+ void writeConstructorMatrixResize(const ConstructorMatrixResize& c,
+ Precedence parentPrecedence);
+
+ void writeAnyConstructor(const AnyConstructor& c,
+ const char* leftBracket,
+ const char* rightBracket,
+ Precedence parentPrecedence);
+
+ void writeCastConstructor(const AnyConstructor& c,
+ const char* leftBracket,
+ const char* rightBracket,
+ Precedence parentPrecedence);
+
+ void writeConstructorArrayCast(const ConstructorArrayCast& c, Precedence parentPrecedence);
+
+ void writeFieldAccess(const FieldAccess& f);
+
+ void writeSwizzle(const Swizzle& swizzle);
+
+ // Splats a scalar expression across a matrix of arbitrary size.
+ void writeNumberAsMatrix(const Expression& expr, const Type& matrixType);
+
+ void writeBinaryExpressionElement(const Expression& expr,
+ Operator op,
+ const Expression& other,
+ Precedence precedence);
+
+ void writeBinaryExpression(const BinaryExpression& b, Precedence parentPrecedence);
+
+ void writeTernaryExpression(const TernaryExpression& t, Precedence parentPrecedence);
+
+ void writeIndexExpression(const IndexExpression& expr);
+
+ void writePrefixExpression(const PrefixExpression& p, Precedence parentPrecedence);
+
+ void writePostfixExpression(const PostfixExpression& p, Precedence parentPrecedence);
+
+ void writeLiteral(const Literal& f);
+
+ void writeStatement(const Statement& s);
+
+ void writeStatements(const StatementArray& statements);
+
+ void writeBlock(const Block& b);
+
+ void writeIfStatement(const IfStatement& stmt);
+
+ void writeForStatement(const ForStatement& f);
+
+ void writeDoStatement(const DoStatement& d);
+
+ void writeExpressionStatement(const ExpressionStatement& s);
+
+ void writeSwitchStatement(const SwitchStatement& s);
+
+ void writeReturnStatementFromMain();
+
+ void writeReturnStatement(const ReturnStatement& r);
+
+ void writeProgramElement(const ProgramElement& e);
+
+ Requirements requirements(const FunctionDeclaration& f);
+
+ Requirements requirements(const Statement* s);
+
+ // For compute shader main functions, writes and initializes the _in and _out structs (the
+ // instances, not the types themselves)
+ void writeComputeMainInputs();
+
+ int getUniformBinding(const Modifiers& m);
+
+ int getUniformSet(const Modifiers& m);
+
+ SkTHashSet<std::string_view> fReservedWords;
+ SkTHashMap<const Type::Field*, const InterfaceBlock*> fInterfaceBlockMap;
+ SkTHashMap<const InterfaceBlock*, std::string_view> fInterfaceBlockNameMap;
+ int fAnonInterfaceCount = 0;
+ int fPaddingCount = 0;
+ const char* fLineEnding;
+ std::string fFunctionHeader;
+ StringStream fExtraFunctions;
+ StringStream fExtraFunctionPrototypes;
+ int fVarCount = 0;
+ int fIndentation = 0;
+ bool fAtLineStart = false;
+ // true if we have run into usages of dFdx / dFdy
+ bool fFoundDerivatives = false;
+ SkTHashMap<const FunctionDeclaration*, Requirements> fRequirements;
+ SkTHashSet<std::string> fHelpers;
+ int fUniformBuffer = -1;
+ std::string fRTFlipName;
+ const FunctionDeclaration* fCurrentFunction = nullptr;
+ int fSwizzleHelperCount = 0;
+ bool fIgnoreVariableReferenceModifiers = false;
+ static constexpr char kTextureSuffix[] = "_Tex";
+ static constexpr char kSamplerSuffix[] = "_Smplr";
+
+ // Workaround/polyfill flags
+ bool fWrittenInverse2 = false, fWrittenInverse3 = false, fWrittenInverse4 = false;
+ bool fWrittenMatrixCompMult = false;
+ bool fWrittenOuterProduct = false;
+
+ using INHERITED = CodeGenerator;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/codegen/SkSLPipelineStageCodeGenerator.cpp b/gfx/skia/skia/src/sksl/codegen/SkSLPipelineStageCodeGenerator.cpp
new file mode 100644
index 0000000000..20466a922d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/codegen/SkSLPipelineStageCodeGenerator.cpp
@@ -0,0 +1,814 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/codegen/SkSLPipelineStageCodeGenerator.h"
+
+#if defined(SKSL_STANDALONE) || defined(SK_GANESH) || defined(SK_GRAPHITE)
+
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLLayout.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLProgramKind.h"
+#include "include/private/SkSLStatement.h"
+#include "include/private/SkSLString.h"
+#include "include/private/base/SkTArray.h"
+#include "include/sksl/SkSLOperator.h"
+#include "src/core/SkTHash.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLIntrinsicList.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/SkSLStringStream.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLChildCall.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLDoStatement.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLExpressionStatement.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLForStatement.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLIfStatement.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLPostfixExpression.h"
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/ir/SkSLReturnStatement.h"
+#include "src/sksl/ir/SkSLStructDefinition.h"
+#include "src/sksl/ir/SkSLSwitchCase.h"
+#include "src/sksl/ir/SkSLSwitchStatement.h"
+#include "src/sksl/ir/SkSLSwizzle.h"
+#include "src/sksl/ir/SkSLTernaryExpression.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+
+#include <memory>
+#include <string_view>
+#include <utility>
+#include <vector>
+
+namespace SkSL {
+namespace PipelineStage {
+
+class PipelineStageCodeGenerator {
+public:
+ PipelineStageCodeGenerator(const Program& program,
+ const char* sampleCoords,
+ const char* inputColor,
+ const char* destColor,
+ Callbacks* callbacks)
+ : fProgram(program)
+ , fSampleCoords(sampleCoords)
+ , fInputColor(inputColor)
+ , fDestColor(destColor)
+ , fCallbacks(callbacks) {}
+
+ void generateCode();
+
+private:
+ using Precedence = OperatorPrecedence;
+
+ void write(std::string_view s);
+ void writeLine(std::string_view s = std::string_view());
+
+ std::string typeName(const Type& type);
+ void writeType(const Type& type);
+
+ std::string functionName(const FunctionDeclaration& decl);
+ void writeFunction(const FunctionDefinition& f);
+ void writeFunctionDeclaration(const FunctionDeclaration& decl);
+
+ std::string modifierString(const Modifiers& modifiers);
+ std::string functionDeclaration(const FunctionDeclaration& decl);
+
+ // Handles arrays correctly, eg: `float x[2]`
+ std::string typedVariable(const Type& type, std::string_view name);
+
+ void writeVarDeclaration(const VarDeclaration& var);
+ void writeGlobalVarDeclaration(const GlobalVarDeclaration& g);
+ void writeStructDefinition(const StructDefinition& s);
+
+ void writeExpression(const Expression& expr, Precedence parentPrecedence);
+ void writeChildCall(const ChildCall& c);
+ void writeFunctionCall(const FunctionCall& c);
+ void writeAnyConstructor(const AnyConstructor& c, Precedence parentPrecedence);
+ void writeFieldAccess(const FieldAccess& f);
+ void writeSwizzle(const Swizzle& swizzle);
+ void writeBinaryExpression(const BinaryExpression& b, Precedence parentPrecedence);
+ void writeTernaryExpression(const TernaryExpression& t, Precedence parentPrecedence);
+ void writeIndexExpression(const IndexExpression& expr);
+ void writePrefixExpression(const PrefixExpression& p, Precedence parentPrecedence);
+ void writePostfixExpression(const PostfixExpression& p, Precedence parentPrecedence);
+ void writeVariableReference(const VariableReference& ref);
+
+ void writeStatement(const Statement& s);
+ void writeBlock(const Block& b);
+ void writeIfStatement(const IfStatement& stmt);
+ void writeDoStatement(const DoStatement& d);
+ void writeForStatement(const ForStatement& f);
+ void writeReturnStatement(const ReturnStatement& r);
+ void writeSwitchStatement(const SwitchStatement& s);
+
+ void writeProgramElementFirstPass(const ProgramElement& e);
+ void writeProgramElementSecondPass(const ProgramElement& e);
+
+ struct AutoOutputBuffer {
+ AutoOutputBuffer(PipelineStageCodeGenerator* generator) : fGenerator(generator) {
+ fOldBuffer = fGenerator->fBuffer;
+ fGenerator->fBuffer = &fBuffer;
+ }
+
+ ~AutoOutputBuffer() {
+ fGenerator->fBuffer = fOldBuffer;
+ }
+
+ PipelineStageCodeGenerator* fGenerator;
+ StringStream* fOldBuffer;
+ StringStream fBuffer;
+ };
+
+ const Program& fProgram;
+ const char* fSampleCoords;
+ const char* fInputColor;
+ const char* fDestColor;
+ Callbacks* fCallbacks;
+
+ SkTHashMap<const Variable*, std::string> fVariableNames;
+ SkTHashMap<const FunctionDeclaration*, std::string> fFunctionNames;
+ SkTHashMap<const Type*, std::string> fStructNames;
+
+ StringStream* fBuffer = nullptr;
+ bool fCastReturnsToHalf = false;
+};
+
+
+void PipelineStageCodeGenerator::write(std::string_view s) {
+ fBuffer->write(s.data(), s.length());
+}
+
+void PipelineStageCodeGenerator::writeLine(std::string_view s) {
+ fBuffer->write(s.data(), s.length());
+ fBuffer->writeText("\n");
+}
+
+void PipelineStageCodeGenerator::writeChildCall(const ChildCall& c) {
+ const ExpressionArray& arguments = c.arguments();
+ SkASSERT(arguments.size() >= 1);
+ int index = 0;
+ bool found = false;
+ for (const ProgramElement* p : fProgram.elements()) {
+ if (p->is<GlobalVarDeclaration>()) {
+ const GlobalVarDeclaration& global = p->as<GlobalVarDeclaration>();
+ const VarDeclaration& decl = global.varDeclaration();
+ if (decl.var() == &c.child()) {
+ found = true;
+ } else if (decl.var()->type().isEffectChild()) {
+ ++index;
+ }
+ }
+ if (found) {
+ break;
+ }
+ }
+ SkASSERT(found);
+
+ // Shaders require a coordinate argument. Color filters require a color argument.
+ // Blenders require two color arguments.
+ std::string sampleOutput;
+ {
+ AutoOutputBuffer exprBuffer(this);
+ this->writeExpression(*arguments[0], Precedence::kSequence);
+
+ switch (c.child().type().typeKind()) {
+ case Type::TypeKind::kShader: {
+ SkASSERT(arguments.size() == 1);
+ SkASSERT(arguments[0]->type().matches(*fProgram.fContext->fTypes.fFloat2));
+ sampleOutput = fCallbacks->sampleShader(index, exprBuffer.fBuffer.str());
+ break;
+ }
+ case Type::TypeKind::kColorFilter: {
+ SkASSERT(arguments.size() == 1);
+ SkASSERT(arguments[0]->type().matches(*fProgram.fContext->fTypes.fHalf4) ||
+ arguments[0]->type().matches(*fProgram.fContext->fTypes.fFloat4));
+ sampleOutput = fCallbacks->sampleColorFilter(index, exprBuffer.fBuffer.str());
+ break;
+ }
+ case Type::TypeKind::kBlender: {
+ SkASSERT(arguments.size() == 2);
+ SkASSERT(arguments[0]->type().matches(*fProgram.fContext->fTypes.fHalf4) ||
+ arguments[0]->type().matches(*fProgram.fContext->fTypes.fFloat4));
+ SkASSERT(arguments[1]->type().matches(*fProgram.fContext->fTypes.fHalf4) ||
+ arguments[1]->type().matches(*fProgram.fContext->fTypes.fFloat4));
+
+ AutoOutputBuffer exprBuffer2(this);
+ this->writeExpression(*arguments[1], Precedence::kSequence);
+
+ sampleOutput = fCallbacks->sampleBlender(index, exprBuffer.fBuffer.str(),
+ exprBuffer2.fBuffer.str());
+ break;
+ }
+ default: {
+ SkDEBUGFAILF("cannot sample from type '%s'",
+ c.child().type().description().c_str());
+ }
+ }
+ }
+ this->write(sampleOutput);
+ return;
+}
+
+void PipelineStageCodeGenerator::writeFunctionCall(const FunctionCall& c) {
+ const FunctionDeclaration& function = c.function();
+
+ if (function.intrinsicKind() == IntrinsicKind::k_toLinearSrgb_IntrinsicKind ||
+ function.intrinsicKind() == IntrinsicKind::k_fromLinearSrgb_IntrinsicKind) {
+ SkASSERT(c.arguments().size() == 1);
+ std::string colorArg;
+ {
+ AutoOutputBuffer exprBuffer(this);
+ this->writeExpression(*c.arguments()[0], Precedence::kSequence);
+ colorArg = exprBuffer.fBuffer.str();
+ }
+
+ switch (function.intrinsicKind()) {
+ case IntrinsicKind::k_toLinearSrgb_IntrinsicKind:
+ this->write(fCallbacks->toLinearSrgb(std::move(colorArg)));
+ break;
+ case IntrinsicKind::k_fromLinearSrgb_IntrinsicKind:
+ this->write(fCallbacks->fromLinearSrgb(std::move(colorArg)));
+ break;
+ default:
+ SkUNREACHABLE;
+ }
+
+ return;
+ }
+
+ if (function.isBuiltin()) {
+ this->write(function.name());
+ } else {
+ this->write(this->functionName(function));
+ }
+
+ this->write("(");
+ auto separator = SkSL::String::Separator();
+ for (const auto& arg : c.arguments()) {
+ this->write(separator());
+ this->writeExpression(*arg, Precedence::kSequence);
+ }
+ this->write(")");
+}
+
+void PipelineStageCodeGenerator::writeVariableReference(const VariableReference& ref) {
+ const Variable* var = ref.variable();
+ const Modifiers& modifiers = var->modifiers();
+
+ if (modifiers.fLayout.fBuiltin == SK_MAIN_COORDS_BUILTIN) {
+ this->write(fSampleCoords);
+ return;
+ } else if (modifiers.fLayout.fBuiltin == SK_INPUT_COLOR_BUILTIN) {
+ this->write(fInputColor);
+ return;
+ } else if (modifiers.fLayout.fBuiltin == SK_DEST_COLOR_BUILTIN) {
+ this->write(fDestColor);
+ return;
+ }
+
+ std::string* name = fVariableNames.find(var);
+ this->write(name ? *name : var->name());
+}
+
+void PipelineStageCodeGenerator::writeIfStatement(const IfStatement& stmt) {
+ this->write("if (");
+ this->writeExpression(*stmt.test(), Precedence::kTopLevel);
+ this->write(") ");
+ this->writeStatement(*stmt.ifTrue());
+ if (stmt.ifFalse()) {
+ this->write(" else ");
+ this->writeStatement(*stmt.ifFalse());
+ }
+}
+
+void PipelineStageCodeGenerator::writeReturnStatement(const ReturnStatement& r) {
+ this->write("return");
+ if (r.expression()) {
+ this->write(" ");
+ if (fCastReturnsToHalf) {
+ this->write("half4(");
+ }
+ this->writeExpression(*r.expression(), Precedence::kTopLevel);
+ if (fCastReturnsToHalf) {
+ this->write(")");
+ }
+ }
+ this->write(";");
+}
+
+void PipelineStageCodeGenerator::writeSwitchStatement(const SwitchStatement& s) {
+ this->write("switch (");
+ this->writeExpression(*s.value(), Precedence::kTopLevel);
+ this->writeLine(") {");
+ for (const std::unique_ptr<Statement>& stmt : s.cases()) {
+ const SwitchCase& c = stmt->as<SwitchCase>();
+ if (c.isDefault()) {
+ this->writeLine("default:");
+ } else {
+ this->write("case ");
+ this->write(std::to_string(c.value()));
+ this->writeLine(":");
+ }
+ if (!c.statement()->isEmpty()) {
+ this->writeStatement(*c.statement());
+ this->writeLine();
+ }
+ }
+ this->writeLine();
+ this->write("}");
+}
+
+std::string PipelineStageCodeGenerator::functionName(const FunctionDeclaration& decl) {
+ if (decl.isMain()) {
+ return std::string(fCallbacks->getMainName());
+ }
+
+ std::string* name = fFunctionNames.find(&decl);
+ if (name) {
+ return *name;
+ }
+
+ std::string mangledName = fCallbacks->getMangledName(std::string(decl.name()).c_str());
+ fFunctionNames.set(&decl, mangledName);
+ return mangledName;
+}
+
+void PipelineStageCodeGenerator::writeFunction(const FunctionDefinition& f) {
+ if (f.declaration().isBuiltin()) {
+ // Don't re-emit builtin functions.
+ return;
+ }
+
+ AutoOutputBuffer body(this);
+
+ // We allow public SkSL's main() to return half4 -or- float4 (ie vec4). When we emit
+ // our code in the processor, the surrounding code is going to expect half4, so we
+ // explicitly cast any returns (from main) to half4. This is only strictly necessary
+ // if the return type is float4 - injecting it unconditionally reduces the risk of an
+ // obscure bug.
+ const FunctionDeclaration& decl = f.declaration();
+ if (decl.isMain() &&
+ fProgram.fConfig->fKind != SkSL::ProgramKind::kMeshVertex &&
+ fProgram.fConfig->fKind != SkSL::ProgramKind::kMeshFragment) {
+ fCastReturnsToHalf = true;
+ }
+
+ for (const std::unique_ptr<Statement>& stmt : f.body()->as<Block>().children()) {
+ this->writeStatement(*stmt);
+ this->writeLine();
+ }
+
+ if (decl.isMain()) {
+ fCastReturnsToHalf = false;
+ }
+
+ fCallbacks->defineFunction(this->functionDeclaration(decl).c_str(),
+ body.fBuffer.str().c_str(),
+ decl.isMain());
+}
+
+std::string PipelineStageCodeGenerator::functionDeclaration(const FunctionDeclaration& decl) {
+ // This is similar to decl.description(), but substitutes a mangled name, and handles modifiers
+ // on the function (e.g. `inline`) and its parameters (e.g. `inout`).
+ std::string declString =
+ String::printf("%s%s%s %s(",
+ (decl.modifiers().fFlags & Modifiers::kInline_Flag) ? "inline " : "",
+ (decl.modifiers().fFlags & Modifiers::kNoInline_Flag) ? "noinline " : "",
+ this->typeName(decl.returnType()).c_str(),
+ this->functionName(decl).c_str());
+ auto separator = SkSL::String::Separator();
+ for (const Variable* p : decl.parameters()) {
+ declString.append(separator());
+ declString.append(this->modifierString(p->modifiers()));
+ declString.append(this->typedVariable(p->type(), p->name()).c_str());
+ }
+
+ return declString + ")";
+}
+
+void PipelineStageCodeGenerator::writeFunctionDeclaration(const FunctionDeclaration& decl) {
+ if (!decl.isMain() && !decl.isBuiltin()) {
+ fCallbacks->declareFunction(this->functionDeclaration(decl).c_str());
+ }
+}
+
+void PipelineStageCodeGenerator::writeGlobalVarDeclaration(const GlobalVarDeclaration& g) {
+ const VarDeclaration& decl = g.varDeclaration();
+ const Variable& var = *decl.var();
+
+ if (var.isBuiltin() || var.type().isOpaque()) {
+ // Don't re-declare these. (eg, sk_FragCoord, or fragmentProcessor children)
+ } else if (var.modifiers().fFlags & Modifiers::kUniform_Flag) {
+ std::string uniformName = fCallbacks->declareUniform(&decl);
+ fVariableNames.set(&var, std::move(uniformName));
+ } else {
+ std::string mangledName = fCallbacks->getMangledName(std::string(var.name()).c_str());
+ std::string declaration = this->modifierString(var.modifiers()) +
+ this->typedVariable(var.type(),
+ std::string_view(mangledName.c_str()));
+ if (decl.value()) {
+ AutoOutputBuffer outputToBuffer(this);
+ this->writeExpression(*decl.value(), Precedence::kTopLevel);
+ declaration += " = ";
+ declaration += outputToBuffer.fBuffer.str();
+ }
+ declaration += ";\n";
+ fCallbacks->declareGlobal(declaration.c_str());
+ fVariableNames.set(&var, std::move(mangledName));
+ }
+}
+
+void PipelineStageCodeGenerator::writeStructDefinition(const StructDefinition& s) {
+ const Type& type = s.type();
+ std::string mangledName = fCallbacks->getMangledName(type.displayName().c_str());
+ std::string definition = "struct " + mangledName + " {\n";
+ for (const auto& f : type.fields()) {
+ definition += this->typedVariable(*f.fType, f.fName) + ";\n";
+ }
+ definition += "};\n";
+ fStructNames.set(&type, std::move(mangledName));
+ fCallbacks->defineStruct(definition.c_str());
+}
+
+void PipelineStageCodeGenerator::writeProgramElementFirstPass(const ProgramElement& e) {
+ switch (e.kind()) {
+ case ProgramElement::Kind::kGlobalVar:
+ this->writeGlobalVarDeclaration(e.as<GlobalVarDeclaration>());
+ break;
+ case ProgramElement::Kind::kFunction:
+ this->writeFunctionDeclaration(e.as<FunctionDefinition>().declaration());
+ break;
+ case ProgramElement::Kind::kFunctionPrototype:
+ // Skip this; we're already emitting prototypes for every FunctionDefinition.
+ // (See case kFunction, directly above.)
+ break;
+ case ProgramElement::Kind::kStructDefinition:
+ this->writeStructDefinition(e.as<StructDefinition>());
+ break;
+
+ case ProgramElement::Kind::kExtension:
+ case ProgramElement::Kind::kInterfaceBlock:
+ case ProgramElement::Kind::kModifiers:
+ default:
+ SkDEBUGFAILF("unsupported program element %s\n", e.description().c_str());
+ break;
+ }
+}
+
+void PipelineStageCodeGenerator::writeProgramElementSecondPass(const ProgramElement& e) {
+ if (e.is<FunctionDefinition>()) {
+ this->writeFunction(e.as<FunctionDefinition>());
+ }
+}
+
+std::string PipelineStageCodeGenerator::typeName(const Type& raw) {
+ const Type& type = raw.resolve();
+ if (type.isArray()) {
+ // This is necessary so that name mangling on arrays-of-structs works properly.
+ std::string arrayName = this->typeName(type.componentType());
+ arrayName.push_back('[');
+ arrayName += std::to_string(type.columns());
+ arrayName.push_back(']');
+ return arrayName;
+ }
+
+ std::string* name = fStructNames.find(&type);
+ return name ? *name : std::string(type.name());
+}
+
+void PipelineStageCodeGenerator::writeType(const Type& type) {
+ this->write(this->typeName(type));
+}
+
+void PipelineStageCodeGenerator::writeExpression(const Expression& expr,
+ Precedence parentPrecedence) {
+ switch (expr.kind()) {
+ case Expression::Kind::kBinary:
+ this->writeBinaryExpression(expr.as<BinaryExpression>(), parentPrecedence);
+ break;
+ case Expression::Kind::kLiteral:
+ this->write(expr.description());
+ break;
+ case Expression::Kind::kChildCall:
+ this->writeChildCall(expr.as<ChildCall>());
+ break;
+ case Expression::Kind::kConstructorArray:
+ case Expression::Kind::kConstructorArrayCast:
+ case Expression::Kind::kConstructorCompound:
+ case Expression::Kind::kConstructorCompoundCast:
+ case Expression::Kind::kConstructorDiagonalMatrix:
+ case Expression::Kind::kConstructorMatrixResize:
+ case Expression::Kind::kConstructorScalarCast:
+ case Expression::Kind::kConstructorSplat:
+ case Expression::Kind::kConstructorStruct:
+ this->writeAnyConstructor(expr.asAnyConstructor(), parentPrecedence);
+ break;
+ case Expression::Kind::kFieldAccess:
+ this->writeFieldAccess(expr.as<FieldAccess>());
+ break;
+ case Expression::Kind::kFunctionCall:
+ this->writeFunctionCall(expr.as<FunctionCall>());
+ break;
+ case Expression::Kind::kPrefix:
+ this->writePrefixExpression(expr.as<PrefixExpression>(), parentPrecedence);
+ break;
+ case Expression::Kind::kPostfix:
+ this->writePostfixExpression(expr.as<PostfixExpression>(), parentPrecedence);
+ break;
+ case Expression::Kind::kSwizzle:
+ this->writeSwizzle(expr.as<Swizzle>());
+ break;
+ case Expression::Kind::kVariableReference:
+ this->writeVariableReference(expr.as<VariableReference>());
+ break;
+ case Expression::Kind::kTernary:
+ this->writeTernaryExpression(expr.as<TernaryExpression>(), parentPrecedence);
+ break;
+ case Expression::Kind::kIndex:
+ this->writeIndexExpression(expr.as<IndexExpression>());
+ break;
+ case Expression::Kind::kSetting:
+ default:
+ SkDEBUGFAILF("unsupported expression: %s", expr.description().c_str());
+ break;
+ }
+}
+
+void PipelineStageCodeGenerator::writeAnyConstructor(const AnyConstructor& c,
+ Precedence parentPrecedence) {
+ this->writeType(c.type());
+ this->write("(");
+ auto separator = SkSL::String::Separator();
+ for (const auto& arg : c.argumentSpan()) {
+ this->write(separator());
+ this->writeExpression(*arg, Precedence::kSequence);
+ }
+ this->write(")");
+}
+
+void PipelineStageCodeGenerator::writeIndexExpression(const IndexExpression& expr) {
+ this->writeExpression(*expr.base(), Precedence::kPostfix);
+ this->write("[");
+ this->writeExpression(*expr.index(), Precedence::kTopLevel);
+ this->write("]");
+}
+
+void PipelineStageCodeGenerator::writeFieldAccess(const FieldAccess& f) {
+ if (f.ownerKind() == FieldAccess::OwnerKind::kDefault) {
+ this->writeExpression(*f.base(), Precedence::kPostfix);
+ this->write(".");
+ }
+ const Type& baseType = f.base()->type();
+ this->write(baseType.fields()[f.fieldIndex()].fName);
+}
+
+void PipelineStageCodeGenerator::writeSwizzle(const Swizzle& swizzle) {
+ this->writeExpression(*swizzle.base(), Precedence::kPostfix);
+ this->write(".");
+ for (int c : swizzle.components()) {
+ SkASSERT(c >= 0 && c <= 3);
+ this->write(&("x\0y\0z\0w\0"[c * 2]));
+ }
+}
+
+void PipelineStageCodeGenerator::writeBinaryExpression(const BinaryExpression& b,
+ Precedence parentPrecedence) {
+ const Expression& left = *b.left();
+ const Expression& right = *b.right();
+ Operator op = b.getOperator();
+
+ Precedence precedence = op.getBinaryPrecedence();
+ if (precedence >= parentPrecedence) {
+ this->write("(");
+ }
+ this->writeExpression(left, precedence);
+ this->write(op.operatorName());
+ this->writeExpression(right, precedence);
+ if (precedence >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+void PipelineStageCodeGenerator::writeTernaryExpression(const TernaryExpression& t,
+ Precedence parentPrecedence) {
+ if (Precedence::kTernary >= parentPrecedence) {
+ this->write("(");
+ }
+ this->writeExpression(*t.test(), Precedence::kTernary);
+ this->write(" ? ");
+ this->writeExpression(*t.ifTrue(), Precedence::kTernary);
+ this->write(" : ");
+ this->writeExpression(*t.ifFalse(), Precedence::kTernary);
+ if (Precedence::kTernary >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+void PipelineStageCodeGenerator::writePrefixExpression(const PrefixExpression& p,
+ Precedence parentPrecedence) {
+ if (Precedence::kPrefix >= parentPrecedence) {
+ this->write("(");
+ }
+ this->write(p.getOperator().tightOperatorName());
+ this->writeExpression(*p.operand(), Precedence::kPrefix);
+ if (Precedence::kPrefix >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+void PipelineStageCodeGenerator::writePostfixExpression(const PostfixExpression& p,
+ Precedence parentPrecedence) {
+ if (Precedence::kPostfix >= parentPrecedence) {
+ this->write("(");
+ }
+ this->writeExpression(*p.operand(), Precedence::kPostfix);
+ this->write(p.getOperator().tightOperatorName());
+ if (Precedence::kPostfix >= parentPrecedence) {
+ this->write(")");
+ }
+}
+
+std::string PipelineStageCodeGenerator::modifierString(const Modifiers& modifiers) {
+ std::string result;
+ if (modifiers.fFlags & Modifiers::kConst_Flag) {
+ result.append("const ");
+ }
+
+ if ((modifiers.fFlags & Modifiers::kIn_Flag) && (modifiers.fFlags & Modifiers::kOut_Flag)) {
+ result.append("inout ");
+ } else if (modifiers.fFlags & Modifiers::kIn_Flag) {
+ result.append("in ");
+ } else if (modifiers.fFlags & Modifiers::kOut_Flag) {
+ result.append("out ");
+ }
+
+ return result;
+}
+
+std::string PipelineStageCodeGenerator::typedVariable(const Type& type, std::string_view name) {
+ const Type& baseType = type.isArray() ? type.componentType() : type;
+
+ std::string decl = this->typeName(baseType) + " " + std::string(name);
+ if (type.isArray()) {
+ decl += "[" + std::to_string(type.columns()) + "]";
+ }
+ return decl;
+}
+
+void PipelineStageCodeGenerator::writeVarDeclaration(const VarDeclaration& var) {
+ this->write(this->modifierString(var.var()->modifiers()));
+ this->write(this->typedVariable(var.var()->type(), var.var()->name()));
+ if (var.value()) {
+ this->write(" = ");
+ this->writeExpression(*var.value(), Precedence::kTopLevel);
+ }
+ this->write(";");
+}
+
+void PipelineStageCodeGenerator::writeStatement(const Statement& s) {
+ switch (s.kind()) {
+ case Statement::Kind::kBlock:
+ this->writeBlock(s.as<Block>());
+ break;
+ case Statement::Kind::kBreak:
+ this->write("break;");
+ break;
+ case Statement::Kind::kContinue:
+ this->write("continue;");
+ break;
+ case Statement::Kind::kExpression:
+ this->writeExpression(*s.as<ExpressionStatement>().expression(), Precedence::kTopLevel);
+ this->write(";");
+ break;
+ case Statement::Kind::kDo:
+ this->writeDoStatement(s.as<DoStatement>());
+ break;
+ case Statement::Kind::kFor:
+ this->writeForStatement(s.as<ForStatement>());
+ break;
+ case Statement::Kind::kIf:
+ this->writeIfStatement(s.as<IfStatement>());
+ break;
+ case Statement::Kind::kReturn:
+ this->writeReturnStatement(s.as<ReturnStatement>());
+ break;
+ case Statement::Kind::kSwitch:
+ this->writeSwitchStatement(s.as<SwitchStatement>());
+ break;
+ case Statement::Kind::kVarDeclaration:
+ this->writeVarDeclaration(s.as<VarDeclaration>());
+ break;
+ case Statement::Kind::kDiscard:
+ SkDEBUGFAIL("Unsupported control flow");
+ break;
+ case Statement::Kind::kNop:
+ this->write(";");
+ break;
+ default:
+ SkDEBUGFAILF("unsupported statement: %s", s.description().c_str());
+ break;
+ }
+}
+
+void PipelineStageCodeGenerator::writeBlock(const Block& b) {
+ // Write scope markers if this block is a scope, or if the block is empty (since we need to emit
+ // something here to make the code valid).
+ bool isScope = b.isScope() || b.isEmpty();
+ if (isScope) {
+ this->writeLine("{");
+ }
+ for (const std::unique_ptr<Statement>& stmt : b.children()) {
+ if (!stmt->isEmpty()) {
+ this->writeStatement(*stmt);
+ this->writeLine();
+ }
+ }
+ if (isScope) {
+ this->write("}");
+ }
+}
+
+void PipelineStageCodeGenerator::writeDoStatement(const DoStatement& d) {
+ this->write("do ");
+ this->writeStatement(*d.statement());
+ this->write(" while (");
+ this->writeExpression(*d.test(), Precedence::kTopLevel);
+ this->write(");");
+ return;
+}
+
+void PipelineStageCodeGenerator::writeForStatement(const ForStatement& f) {
+ // Emit loops of the form 'for(;test;)' as 'while(test)', which is probably how they started
+ if (!f.initializer() && f.test() && !f.next()) {
+ this->write("while (");
+ this->writeExpression(*f.test(), Precedence::kTopLevel);
+ this->write(") ");
+ this->writeStatement(*f.statement());
+ return;
+ }
+
+ this->write("for (");
+ if (f.initializer() && !f.initializer()->isEmpty()) {
+ this->writeStatement(*f.initializer());
+ } else {
+ this->write("; ");
+ }
+ if (f.test()) {
+ this->writeExpression(*f.test(), Precedence::kTopLevel);
+ }
+ this->write("; ");
+ if (f.next()) {
+ this->writeExpression(*f.next(), Precedence::kTopLevel);
+ }
+ this->write(") ");
+ this->writeStatement(*f.statement());
+}
+
+void PipelineStageCodeGenerator::generateCode() {
+ // Write all the program elements except for functions; prototype all the functions.
+ for (const ProgramElement* e : fProgram.elements()) {
+ this->writeProgramElementFirstPass(*e);
+ }
+
+ // We always place FunctionDefinition elements last, because the inliner likes to move function
+ // bodies around. After inlining, code can inadvertently move upwards, above ProgramElements
+ // that the code relies on.
+ for (const ProgramElement* e : fProgram.elements()) {
+ this->writeProgramElementSecondPass(*e);
+ }
+}
+
+void ConvertProgram(const Program& program,
+ const char* sampleCoords,
+ const char* inputColor,
+ const char* destColor,
+ Callbacks* callbacks) {
+ PipelineStageCodeGenerator generator(program, sampleCoords, inputColor, destColor, callbacks);
+ generator.generateCode();
+}
+
+} // namespace PipelineStage
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/codegen/SkSLPipelineStageCodeGenerator.h b/gfx/skia/skia/src/sksl/codegen/SkSLPipelineStageCodeGenerator.h
new file mode 100644
index 0000000000..7efb0e187e
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/codegen/SkSLPipelineStageCodeGenerator.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_PIPELINESTAGECODEGENERATOR
+#define SKSL_PIPELINESTAGECODEGENERATOR
+
+#include "include/core/SkTypes.h"
+
+#if defined(SKSL_STANDALONE) || defined(SK_GANESH) || defined(SK_GRAPHITE)
+
+#include <string>
+
+namespace SkSL {
+
+struct Program;
+class VarDeclaration;
+
+namespace PipelineStage {
+ class Callbacks {
+ public:
+ virtual ~Callbacks() = default;
+
+ virtual std::string getMainName() { return "main"; }
+ virtual std::string getMangledName(const char* name) { return name; }
+ virtual void defineFunction(const char* declaration, const char* body, bool isMain) = 0;
+ virtual void declareFunction(const char* declaration) = 0;
+ virtual void defineStruct(const char* definition) = 0;
+ virtual void declareGlobal(const char* declaration) = 0;
+
+ virtual std::string declareUniform(const VarDeclaration*) = 0;
+ virtual std::string sampleShader(int index, std::string coords) = 0;
+ virtual std::string sampleColorFilter(int index, std::string color) = 0;
+ virtual std::string sampleBlender(int index, std::string src, std::string dst) = 0;
+
+ virtual std::string toLinearSrgb(std::string color) = 0;
+ virtual std::string fromLinearSrgb(std::string color) = 0;
+ };
+
+ /*
+ * Processes 'program' for use in a GrFragmentProcessor, or other context that wants SkSL-like
+ * code as input. To support fragment processor usage, there are callbacks that allow elements
+ * to be declared programmatically and to rename those elements (mangling to avoid collisions).
+ *
+ * - Any reference to the main coords builtin variable will be replaced with 'sampleCoords'.
+ * - Any reference to the input color builtin variable will be replaced with 'inputColor'.
+ * - Any reference to the dest color builtin variable will be replaced with 'destColor'.
+ * Dest-color is used in blend programs.
+ * - Each uniform variable declaration triggers a call to 'declareUniform', which should emit
+ * the declaration, and return the (possibly different) name to use for the variable.
+ * - Each function definition triggers a call to 'defineFunction', which should emit the
+ * definition, and return the (possibly different) name to use for calls to that function.
+ * - Each invocation of sample() triggers a call to 'sampleChild', which should return the full
+ * text of the call expression.
+ */
+ void ConvertProgram(const Program& program,
+ const char* sampleCoords,
+ const char* inputColor,
+ const char* destColor,
+ Callbacks* callbacks);
+} // namespace PipelineStage
+
+} // namespace SkSL
+
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/codegen/SkSLRasterPipelineBuilder.cpp b/gfx/skia/skia/src/sksl/codegen/SkSLRasterPipelineBuilder.cpp
new file mode 100644
index 0000000000..48d9f26d74
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/codegen/SkSLRasterPipelineBuilder.cpp
@@ -0,0 +1,2861 @@
+/*
+ * Copyright 2022 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkStream.h"
+#include "include/private/SkSLString.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkTo.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/core/SkOpts.h"
+#include "src/core/SkRasterPipelineOpContexts.h"
+#include "src/core/SkRasterPipelineOpList.h"
+#include "src/sksl/codegen/SkSLRasterPipelineBuilder.h"
+#include "src/sksl/tracing/SkRPDebugTrace.h"
+#include "src/sksl/tracing/SkSLDebugInfo.h"
+#include "src/utils/SkBitSet.h"
+
+#if !defined(SKSL_STANDALONE)
+#include "src/core/SkRasterPipeline.h"
+#endif
+
+#include <algorithm>
+#include <cmath>
+#include <cstring>
+#include <iterator>
+#include <string>
+#include <string_view>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+namespace SkSL {
+namespace RP {
+
+#define ALL_SINGLE_SLOT_UNARY_OP_CASES \
+ BuilderOp::acos_float: \
+ case BuilderOp::asin_float: \
+ case BuilderOp::atan_float: \
+ case BuilderOp::cos_float: \
+ case BuilderOp::exp_float: \
+ case BuilderOp::exp2_float: \
+ case BuilderOp::log_float: \
+ case BuilderOp::log2_float: \
+ case BuilderOp::sin_float: \
+ case BuilderOp::sqrt_float: \
+ case BuilderOp::tan_float
+
+#define ALL_MULTI_SLOT_UNARY_OP_CASES \
+ BuilderOp::abs_float: \
+ case BuilderOp::abs_int: \
+ case BuilderOp::bitwise_not_int: \
+ case BuilderOp::cast_to_float_from_int: \
+ case BuilderOp::cast_to_float_from_uint: \
+ case BuilderOp::cast_to_int_from_float: \
+ case BuilderOp::cast_to_uint_from_float: \
+ case BuilderOp::ceil_float: \
+ case BuilderOp::floor_float: \
+ case BuilderOp::invsqrt_float
+
+#define ALL_N_WAY_BINARY_OP_CASES \
+ BuilderOp::atan2_n_floats: \
+ case BuilderOp::pow_n_floats
+
+#define ALL_MULTI_SLOT_BINARY_OP_CASES \
+ BuilderOp::add_n_floats: \
+ case BuilderOp::add_n_ints: \
+ case BuilderOp::sub_n_floats: \
+ case BuilderOp::sub_n_ints: \
+ case BuilderOp::mul_n_floats: \
+ case BuilderOp::mul_n_ints: \
+ case BuilderOp::div_n_floats: \
+ case BuilderOp::div_n_ints: \
+ case BuilderOp::div_n_uints: \
+ case BuilderOp::bitwise_and_n_ints: \
+ case BuilderOp::bitwise_or_n_ints: \
+ case BuilderOp::bitwise_xor_n_ints: \
+ case BuilderOp::mod_n_floats: \
+ case BuilderOp::min_n_floats: \
+ case BuilderOp::min_n_ints: \
+ case BuilderOp::min_n_uints: \
+ case BuilderOp::max_n_floats: \
+ case BuilderOp::max_n_ints: \
+ case BuilderOp::max_n_uints: \
+ case BuilderOp::cmple_n_floats: \
+ case BuilderOp::cmple_n_ints: \
+ case BuilderOp::cmple_n_uints: \
+ case BuilderOp::cmplt_n_floats: \
+ case BuilderOp::cmplt_n_ints: \
+ case BuilderOp::cmplt_n_uints: \
+ case BuilderOp::cmpeq_n_floats: \
+ case BuilderOp::cmpeq_n_ints: \
+ case BuilderOp::cmpne_n_floats: \
+ case BuilderOp::cmpne_n_ints
+
+#define ALL_N_WAY_TERNARY_OP_CASES \
+ BuilderOp::smoothstep_n_floats
+
+#define ALL_MULTI_SLOT_TERNARY_OP_CASES \
+ BuilderOp::mix_n_floats: \
+ case BuilderOp::mix_n_ints
+
+void Builder::unary_op(BuilderOp op, int32_t slots) {
+ switch (op) {
+ case ALL_SINGLE_SLOT_UNARY_OP_CASES:
+ case ALL_MULTI_SLOT_UNARY_OP_CASES:
+ fInstructions.push_back({op, {}, slots});
+ break;
+
+ default:
+ SkDEBUGFAIL("not a unary op");
+ break;
+ }
+}
+
+void Builder::binary_op(BuilderOp op, int32_t slots) {
+ switch (op) {
+ case ALL_N_WAY_BINARY_OP_CASES:
+ case ALL_MULTI_SLOT_BINARY_OP_CASES:
+ fInstructions.push_back({op, {}, slots});
+ break;
+
+ default:
+ SkDEBUGFAIL("not a binary op");
+ break;
+ }
+}
+
+void Builder::ternary_op(BuilderOp op, int32_t slots) {
+ switch (op) {
+ case ALL_N_WAY_TERNARY_OP_CASES:
+ case ALL_MULTI_SLOT_TERNARY_OP_CASES:
+ fInstructions.push_back({op, {}, slots});
+ break;
+
+ default:
+ SkDEBUGFAIL("not a ternary op");
+ break;
+ }
+}
+
+void Builder::dot_floats(int32_t slots) {
+ switch (slots) {
+ case 1: fInstructions.push_back({BuilderOp::mul_n_floats, {}, slots}); break;
+ case 2: fInstructions.push_back({BuilderOp::dot_2_floats, {}, slots}); break;
+ case 3: fInstructions.push_back({BuilderOp::dot_3_floats, {}, slots}); break;
+ case 4: fInstructions.push_back({BuilderOp::dot_4_floats, {}, slots}); break;
+
+ default:
+ SkDEBUGFAIL("invalid number of slots");
+ break;
+ }
+}
+
+void Builder::refract_floats() {
+ fInstructions.push_back({BuilderOp::refract_4_floats, {}});
+}
+
+void Builder::inverse_matrix(int32_t n) {
+ switch (n) {
+ case 2: fInstructions.push_back({BuilderOp::inverse_mat2, {}, 4}); break;
+ case 3: fInstructions.push_back({BuilderOp::inverse_mat3, {}, 9}); break;
+ case 4: fInstructions.push_back({BuilderOp::inverse_mat4, {}, 16}); break;
+ default: SkUNREACHABLE;
+ }
+}
+
+void Builder::discard_stack(int32_t count) {
+ // If we pushed something onto the stack and then immediately discarded part of it, we can
+ // shrink or eliminate the push.
+ while (count > 0 && !fInstructions.empty()) {
+ Instruction& lastInstruction = fInstructions.back();
+
+ switch (lastInstruction.fOp) {
+ case BuilderOp::discard_stack:
+ // Our last op was actually a separate discard_stack; combine the discards.
+ lastInstruction.fImmA += count;
+ return;
+
+ case BuilderOp::push_zeros:
+ case BuilderOp::push_clone:
+ case BuilderOp::push_clone_from_stack:
+ case BuilderOp::push_clone_indirect_from_stack:
+ case BuilderOp::push_slots:
+ case BuilderOp::push_slots_indirect:
+ case BuilderOp::push_uniform:
+ case BuilderOp::push_uniform_indirect:
+ // Our last op was a multi-slot push; cancel out one discard and eliminate the op
+ // if its count reached zero.
+ --count;
+ --lastInstruction.fImmA;
+ if (lastInstruction.fImmA == 0) {
+ fInstructions.pop_back();
+ }
+ continue;
+
+ case BuilderOp::push_literal:
+ case BuilderOp::push_condition_mask:
+ case BuilderOp::push_loop_mask:
+ case BuilderOp::push_return_mask:
+ // Our last op was a single-slot push; cancel out one discard and eliminate the op.
+ --count;
+ fInstructions.pop_back();
+ continue;
+
+ default:
+ break;
+ }
+
+ // This instruction wasn't a push.
+ break;
+ }
+
+ if (count > 0) {
+ fInstructions.push_back({BuilderOp::discard_stack, {}, count});
+ }
+}
+
+void Builder::label(int labelID) {
+ SkASSERT(labelID >= 0 && labelID < fNumLabels);
+
+ // If the previous instruction was a branch to this label, it's a no-op; jumping to the very
+ // next instruction is effectively meaningless.
+ while (!fInstructions.empty()) {
+ Instruction& lastInstruction = fInstructions.back();
+ switch (lastInstruction.fOp) {
+ case BuilderOp::jump:
+ case BuilderOp::branch_if_all_lanes_active:
+ case BuilderOp::branch_if_any_lanes_active:
+ case BuilderOp::branch_if_no_lanes_active:
+ case BuilderOp::branch_if_no_active_lanes_on_stack_top_equal:
+ if (lastInstruction.fImmA == labelID) {
+ fInstructions.pop_back();
+ continue;
+ }
+ break;
+
+ default:
+ break;
+ }
+ break;
+ }
+ fInstructions.push_back({BuilderOp::label, {}, labelID});
+}
+
+void Builder::jump(int labelID) {
+ SkASSERT(labelID >= 0 && labelID < fNumLabels);
+ if (!fInstructions.empty() && fInstructions.back().fOp == BuilderOp::jump) {
+ // The previous instruction was also `jump`, so this branch could never possibly occur.
+ return;
+ }
+ fInstructions.push_back({BuilderOp::jump, {}, labelID});
+}
+
+void Builder::branch_if_any_lanes_active(int labelID) {
+ if (!this->executionMaskWritesAreEnabled()) {
+ this->jump(labelID);
+ return;
+ }
+
+ SkASSERT(labelID >= 0 && labelID < fNumLabels);
+ if (!fInstructions.empty() &&
+ (fInstructions.back().fOp == BuilderOp::branch_if_any_lanes_active ||
+ fInstructions.back().fOp == BuilderOp::jump)) {
+ // The previous instruction was `jump` or `branch_if_any_lanes_active`, so this branch
+ // could never possibly occur.
+ return;
+ }
+ fInstructions.push_back({BuilderOp::branch_if_any_lanes_active, {}, labelID});
+}
+
+void Builder::branch_if_all_lanes_active(int labelID) {
+ if (!this->executionMaskWritesAreEnabled()) {
+ this->jump(labelID);
+ return;
+ }
+
+ SkASSERT(labelID >= 0 && labelID < fNumLabels);
+ if (!fInstructions.empty() &&
+ (fInstructions.back().fOp == BuilderOp::branch_if_all_lanes_active ||
+ fInstructions.back().fOp == BuilderOp::jump)) {
+ // The previous instruction was `jump` or `branch_if_all_lanes_active`, so this branch
+ // could never possibly occur.
+ return;
+ }
+ fInstructions.push_back({BuilderOp::branch_if_all_lanes_active, {}, labelID});
+}
+
+void Builder::branch_if_no_lanes_active(int labelID) {
+ if (!this->executionMaskWritesAreEnabled()) {
+ return;
+ }
+
+ SkASSERT(labelID >= 0 && labelID < fNumLabels);
+ if (!fInstructions.empty() &&
+ (fInstructions.back().fOp == BuilderOp::branch_if_no_lanes_active ||
+ fInstructions.back().fOp == BuilderOp::jump)) {
+ // The previous instruction was `jump` or `branch_if_no_lanes_active`, so this branch
+ // could never possibly occur.
+ return;
+ }
+ fInstructions.push_back({BuilderOp::branch_if_no_lanes_active, {}, labelID});
+}
+
+void Builder::branch_if_no_active_lanes_on_stack_top_equal(int value, int labelID) {
+ SkASSERT(labelID >= 0 && labelID < fNumLabels);
+ if (!fInstructions.empty() &&
+ (fInstructions.back().fOp == BuilderOp::jump ||
+ (fInstructions.back().fOp == BuilderOp::branch_if_no_active_lanes_on_stack_top_equal &&
+ fInstructions.back().fImmB == value))) {
+ // The previous instruction was `jump` or `branch_if_no_active_lanes_on_stack_top_equal`
+ // (checking against the same value), so this branch could never possibly occur.
+ return;
+ }
+ fInstructions.push_back({BuilderOp::branch_if_no_active_lanes_on_stack_top_equal,
+ {}, labelID, value});
+}
+
+void Builder::push_slots(SlotRange src) {
+ SkASSERT(src.count >= 0);
+ if (!fInstructions.empty()) {
+ Instruction& lastInstruction = fInstructions.back();
+
+ // If the previous instruction was pushing slots contiguous to this range, we can collapse
+ // the two pushes into one larger push.
+ if (lastInstruction.fOp == BuilderOp::push_slots &&
+ lastInstruction.fSlotA + lastInstruction.fImmA == src.index) {
+ lastInstruction.fImmA += src.count;
+ return;
+ }
+
+ // If the previous instruction was discarding an equal number of slots...
+ if (lastInstruction.fOp == BuilderOp::discard_stack && lastInstruction.fImmA == src.count) {
+ // ... and the instruction before that was copying from the stack to the same slots...
+ Instruction& prevInstruction = fInstructions.fromBack(1);
+ if ((prevInstruction.fOp == BuilderOp::copy_stack_to_slots ||
+ prevInstruction.fOp == BuilderOp::copy_stack_to_slots_unmasked) &&
+ prevInstruction.fSlotA == src.index &&
+ prevInstruction.fImmA == src.count) {
+ // ... we are emitting `copy stack to X, discard stack, copy X to stack`. This is a
+ // common pattern when multiple operations in a row affect the same variable. We can
+ // eliminate the discard and just leave X on the stack.
+ fInstructions.pop_back();
+ return;
+ }
+ }
+ }
+
+ if (src.count > 0) {
+ fInstructions.push_back({BuilderOp::push_slots, {src.index}, src.count});
+ }
+}
+
+void Builder::push_slots_indirect(SlotRange fixedRange, int dynamicStackID, SlotRange limitRange) {
+ // SlotA: fixed-range start
+ // SlotB: limit-range end
+ // immA: number of slots
+ // immB: dynamic stack ID
+ fInstructions.push_back({BuilderOp::push_slots_indirect,
+ {fixedRange.index, limitRange.index + limitRange.count},
+ fixedRange.count,
+ dynamicStackID});
+}
+
+void Builder::push_uniform(SlotRange src) {
+ SkASSERT(src.count >= 0);
+ if (!fInstructions.empty()) {
+ Instruction& lastInstruction = fInstructions.back();
+
+ // If the previous instruction was pushing uniforms contiguous to this range, we can
+ // collapse the two pushes into one larger push.
+ if (lastInstruction.fOp == BuilderOp::push_uniform &&
+ lastInstruction.fSlotA + lastInstruction.fImmA == src.index) {
+ lastInstruction.fImmA += src.count;
+ return;
+ }
+ }
+
+ if (src.count > 0) {
+ fInstructions.push_back({BuilderOp::push_uniform, {src.index}, src.count});
+ }
+}
+
+void Builder::push_uniform_indirect(SlotRange fixedRange,
+ int dynamicStackID,
+ SlotRange limitRange) {
+ // SlotA: fixed-range start
+ // SlotB: limit-range end
+ // immA: number of slots
+ // immB: dynamic stack ID
+ fInstructions.push_back({BuilderOp::push_uniform_indirect,
+ {fixedRange.index, limitRange.index + limitRange.count},
+ fixedRange.count,
+ dynamicStackID});
+}
+
+void Builder::push_duplicates(int count) {
+ if (!fInstructions.empty()) {
+ Instruction& lastInstruction = fInstructions.back();
+
+ // If the previous op is pushing a zero, we can just push more of them.
+ if (lastInstruction.fOp == BuilderOp::push_zeros) {
+ lastInstruction.fImmA += count;
+ return;
+ }
+ }
+ SkASSERT(count >= 0);
+ if (count >= 3) {
+ // Use a swizzle to splat the input into a 4-slot value.
+ this->swizzle(/*consumedSlots=*/1, {0, 0, 0, 0});
+ count -= 3;
+ }
+ for (; count >= 4; count -= 4) {
+ // Clone the splatted value four slots at a time.
+ this->push_clone(/*numSlots=*/4);
+ }
+ // Use a swizzle or clone to handle the trailing items.
+ switch (count) {
+ case 3: this->swizzle(/*consumedSlots=*/1, {0, 0, 0, 0}); break;
+ case 2: this->swizzle(/*consumedSlots=*/1, {0, 0, 0}); break;
+ case 1: this->push_clone(/*numSlots=*/1); break;
+ default: break;
+ }
+}
+
+void Builder::push_clone_from_stack(SlotRange range, int otherStackID, int offsetFromStackTop) {
+ // immA: number of slots
+ // immB: other stack ID
+ // immC: offset from stack top
+ offsetFromStackTop -= range.index;
+
+ if (!fInstructions.empty()) {
+ Instruction& lastInstruction = fInstructions.back();
+
+ // If the previous op is also pushing a clone...
+ if (lastInstruction.fOp == BuilderOp::push_clone_from_stack &&
+ // ... from the same stack...
+ lastInstruction.fImmB == otherStackID &&
+ // ... and this clone starts at the same place that the last clone ends...
+ lastInstruction.fImmC - lastInstruction.fImmA == offsetFromStackTop) {
+ // ... just extend the existing clone-op.
+ lastInstruction.fImmA += range.count;
+ return;
+ }
+ }
+
+ fInstructions.push_back({BuilderOp::push_clone_from_stack, {},
+ range.count, otherStackID, offsetFromStackTop});
+}
+
+void Builder::push_clone_indirect_from_stack(SlotRange fixedOffset,
+ int dynamicStackID,
+ int otherStackID,
+ int offsetFromStackTop) {
+ // immA: number of slots
+ // immB: other stack ID
+ // immC: offset from stack top
+ // immD: dynamic stack ID
+ offsetFromStackTop -= fixedOffset.index;
+
+ fInstructions.push_back({BuilderOp::push_clone_indirect_from_stack, {},
+ fixedOffset.count, otherStackID, offsetFromStackTop, dynamicStackID});
+}
+
+void Builder::pop_slots(SlotRange dst) {
+ if (!this->executionMaskWritesAreEnabled()) {
+ this->pop_slots_unmasked(dst);
+ return;
+ }
+
+ this->copy_stack_to_slots(dst);
+ this->discard_stack(dst.count);
+}
+
+void Builder::simplifyPopSlotsUnmasked(SlotRange* dst) {
+ if (!dst->count || fInstructions.empty()) {
+ // There's nothing left to simplify.
+ return;
+ }
+
+ Instruction& lastInstruction = fInstructions.back();
+
+ // If the last instruction is pushing a constant, we can simplify it by copying the constant
+ // directly into the destination slot.
+ if (lastInstruction.fOp == BuilderOp::push_literal) {
+ // Remove the constant-push instruction.
+ int value = lastInstruction.fImmA;
+ fInstructions.pop_back();
+
+ // Consume one destination slot.
+ dst->count--;
+ Slot destinationSlot = dst->index + dst->count;
+
+ // Continue simplifying if possible.
+ this->simplifyPopSlotsUnmasked(dst);
+
+ // Write the constant directly to the destination slot.
+ this->copy_constant(destinationSlot, value);
+ return;
+ }
+
+ // If the last instruction is pushing a zero, we can save a step by directly zeroing out
+ // the destination slot.
+ if (lastInstruction.fOp == BuilderOp::push_zeros) {
+ // Remove one zero-push.
+ lastInstruction.fImmA--;
+ if (lastInstruction.fImmA == 0) {
+ fInstructions.pop_back();
+ }
+
+ // Consume one destination slot.
+ dst->count--;
+ Slot destinationSlot = dst->index + dst->count;
+
+ // Continue simplifying if possible.
+ this->simplifyPopSlotsUnmasked(dst);
+
+ // Zero the destination slot directly.
+ this->zero_slots_unmasked({destinationSlot, 1});
+ return;
+ }
+
+ // If the last instruction is pushing a slot, we can just copy that slot.
+ if (lastInstruction.fOp == BuilderOp::push_slots) {
+ // Get the last slot.
+ Slot sourceSlot = lastInstruction.fSlotA + lastInstruction.fImmA - 1;
+ lastInstruction.fImmA--;
+ if (lastInstruction.fImmA == 0) {
+ fInstructions.pop_back();
+ }
+
+ // Consume one destination slot.
+ dst->count--;
+ Slot destinationSlot = dst->index + dst->count;
+
+ // Try once more.
+ this->simplifyPopSlotsUnmasked(dst);
+
+ // Copy the slot directly.
+ if (destinationSlot != sourceSlot) {
+ this->copy_slots_unmasked({destinationSlot, 1}, {sourceSlot, 1});
+ }
+ return;
+ }
+}
+
+void Builder::pop_slots_unmasked(SlotRange dst) {
+ SkASSERT(dst.count >= 0);
+
+ // If we are popping immediately after a push, we can simplify the code by writing the pushed
+ // value directly to the destination range.
+ this->simplifyPopSlotsUnmasked(&dst);
+
+ // Pop from the stack normally.
+ if (dst.count > 0) {
+ this->copy_stack_to_slots_unmasked(dst);
+ this->discard_stack(dst.count);
+ }
+}
+
+void Builder::copy_stack_to_slots(SlotRange dst, int offsetFromStackTop) {
+ // If the execution mask is known to be all-true, then we can ignore the write mask.
+ if (!this->executionMaskWritesAreEnabled()) {
+ this->copy_stack_to_slots_unmasked(dst, offsetFromStackTop);
+ return;
+ }
+
+ // If the last instruction copied the previous stack slots, just extend it.
+ if (!fInstructions.empty()) {
+ Instruction& lastInstruction = fInstructions.back();
+
+ // If the last op is copy-stack-to-slots...
+ if (lastInstruction.fOp == BuilderOp::copy_stack_to_slots &&
+ // and this op's destination is immediately after the last copy-slots-op's destination
+ lastInstruction.fSlotA + lastInstruction.fImmA == dst.index &&
+ // and this op's source is immediately after the last copy-slots-op's source
+ lastInstruction.fImmB - lastInstruction.fImmA == offsetFromStackTop) {
+ // then we can just extend the copy!
+ lastInstruction.fImmA += dst.count;
+ return;
+ }
+ }
+
+ fInstructions.push_back({BuilderOp::copy_stack_to_slots, {dst.index},
+ dst.count, offsetFromStackTop});
+}
+
+void Builder::copy_stack_to_slots_indirect(SlotRange fixedRange,
+ int dynamicStackID,
+ SlotRange limitRange) {
+ // SlotA: fixed-range start
+ // SlotB: limit-range end
+ // immA: number of slots
+ // immB: dynamic stack ID
+ fInstructions.push_back({BuilderOp::copy_stack_to_slots_indirect,
+ {fixedRange.index, limitRange.index + limitRange.count},
+ fixedRange.count,
+ dynamicStackID});
+}
+
+static bool slot_ranges_overlap(SlotRange x, SlotRange y) {
+ return x.index < y.index + y.count &&
+ y.index < x.index + x.count;
+}
+
+void Builder::copy_slots_unmasked(SlotRange dst, SlotRange src) {
+ // If the last instruction copied adjacent slots, just extend it.
+ if (!fInstructions.empty()) {
+ Instruction& lastInstr = fInstructions.back();
+
+ // If the last op is copy-slots-unmasked...
+ if (lastInstr.fOp == BuilderOp::copy_slot_unmasked &&
+ // and this op's destination is immediately after the last copy-slots-op's destination
+ lastInstr.fSlotA + lastInstr.fImmA == dst.index &&
+ // and this op's source is immediately after the last copy-slots-op's source
+ lastInstr.fSlotB + lastInstr.fImmA == src.index &&
+ // and the source/dest ranges will not overlap
+ !slot_ranges_overlap({lastInstr.fSlotB, lastInstr.fImmA + dst.count},
+ {lastInstr.fSlotA, lastInstr.fImmA + dst.count})) {
+ // then we can just extend the copy!
+ lastInstr.fImmA += dst.count;
+ return;
+ }
+ }
+
+ SkASSERT(dst.count == src.count);
+ fInstructions.push_back({BuilderOp::copy_slot_unmasked, {dst.index, src.index}, dst.count});
+}
+
+void Builder::copy_stack_to_slots_unmasked(SlotRange dst, int offsetFromStackTop) {
+ // If the last instruction copied the previous stack slots, just extend it.
+ if (!fInstructions.empty()) {
+ Instruction& lastInstruction = fInstructions.back();
+
+ // If the last op is copy-stack-to-slots-unmasked...
+ if (lastInstruction.fOp == BuilderOp::copy_stack_to_slots_unmasked &&
+ // and this op's destination is immediately after the last copy-slots-op's destination
+ lastInstruction.fSlotA + lastInstruction.fImmA == dst.index &&
+ // and this op's source is immediately after the last copy-slots-op's source
+ lastInstruction.fImmB - lastInstruction.fImmA == offsetFromStackTop) {
+ // then we can just extend the copy!
+ lastInstruction.fImmA += dst.count;
+ return;
+ }
+ }
+
+ fInstructions.push_back({BuilderOp::copy_stack_to_slots_unmasked, {dst.index},
+ dst.count, offsetFromStackTop});
+}
+
+void Builder::pop_return_mask() {
+ SkASSERT(this->executionMaskWritesAreEnabled());
+
+ // This instruction is going to overwrite the return mask. If the previous instruction was
+ // masking off the return mask, that's wasted work and it can be eliminated.
+ if (!fInstructions.empty()) {
+ Instruction& lastInstruction = fInstructions.back();
+
+ if (lastInstruction.fOp == BuilderOp::mask_off_return_mask) {
+ fInstructions.pop_back();
+ }
+ }
+
+ fInstructions.push_back({BuilderOp::pop_return_mask, {}});
+}
+
+void Builder::zero_slots_unmasked(SlotRange dst) {
+ if (!fInstructions.empty()) {
+ Instruction& lastInstruction = fInstructions.back();
+
+ if (lastInstruction.fOp == BuilderOp::zero_slot_unmasked) {
+ if (lastInstruction.fSlotA + lastInstruction.fImmA == dst.index) {
+ // The previous instruction was zeroing the range immediately before this range.
+ // Combine the ranges.
+ lastInstruction.fImmA += dst.count;
+ return;
+ }
+ }
+
+ if (lastInstruction.fOp == BuilderOp::zero_slot_unmasked) {
+ if (lastInstruction.fSlotA == dst.index + dst.count) {
+ // The previous instruction was zeroing the range immediately after this range.
+ // Combine the ranges.
+ lastInstruction.fSlotA = dst.index;
+ lastInstruction.fImmA += dst.count;
+ return;
+ }
+ }
+ }
+
+ fInstructions.push_back({BuilderOp::zero_slot_unmasked, {dst.index}, dst.count});
+}
+
+static int pack_nybbles(SkSpan<const int8_t> components) {
+ // Pack up to 8 elements into nybbles, in reverse order.
+ int packed = 0;
+ for (auto iter = components.rbegin(); iter != components.rend(); ++iter) {
+ SkASSERT(*iter >= 0 && *iter <= 0xF);
+ packed <<= 4;
+ packed |= *iter;
+ }
+ return packed;
+}
+
+static void unpack_nybbles_to_offsets(uint32_t components, SkSpan<uint16_t> offsets) {
+ // Unpack component nybbles into byte-offsets pointing at stack slots.
+ for (size_t index = 0; index < offsets.size(); ++index) {
+ offsets[index] = (components & 0xF) * SkOpts::raster_pipeline_highp_stride * sizeof(float);
+ components >>= 4;
+ }
+}
+
+static int max_packed_nybble(uint32_t components, size_t numComponents) {
+ int largest = 0;
+ for (size_t index = 0; index < numComponents; ++index) {
+ largest = std::max<int>(largest, components & 0xF);
+ components >>= 4;
+ }
+ return largest;
+}
+
+void Builder::swizzle_copy_stack_to_slots(SlotRange dst,
+ SkSpan<const int8_t> components,
+ int offsetFromStackTop) {
+ // When the execution-mask writes-enabled flag is off, we could squeeze out a little bit of
+ // extra speed here by implementing and using an unmasked version of this op.
+
+ // SlotA: fixed-range start
+ // immA: number of swizzle components
+ // immB: swizzle components
+ // immC: offset from stack top
+ fInstructions.push_back({BuilderOp::swizzle_copy_stack_to_slots, {dst.index},
+ (int)components.size(),
+ pack_nybbles(components),
+ offsetFromStackTop});
+}
+
+void Builder::swizzle_copy_stack_to_slots_indirect(SlotRange fixedRange,
+ int dynamicStackID,
+ SlotRange limitRange,
+ SkSpan<const int8_t> components,
+ int offsetFromStackTop) {
+ // When the execution-mask writes-enabled flag is off, we could squeeze out a little bit of
+ // extra speed here by implementing and using an unmasked version of this op.
+
+ // SlotA: fixed-range start
+ // SlotB: limit-range end
+ // immA: number of swizzle components
+ // immB: swizzle components
+ // immC: offset from stack top
+ // immD: dynamic stack ID
+ fInstructions.push_back({BuilderOp::swizzle_copy_stack_to_slots_indirect,
+ {fixedRange.index, limitRange.index + limitRange.count},
+ (int)components.size(),
+ pack_nybbles(components),
+ offsetFromStackTop,
+ dynamicStackID});
+}
+
+void Builder::swizzle(int consumedSlots, SkSpan<const int8_t> components) {
+ // Consumes `consumedSlots` elements on the stack, then generates `elementSpan.size()` elements.
+ SkASSERT(consumedSlots >= 0);
+
+ // We only allow up to 16 elements, and they can only reach 0-15 slots, due to nybble packing.
+ int numElements = components.size();
+ SkASSERT(numElements <= 16);
+ SkASSERT(std::all_of(components.begin(), components.end(), [](int8_t e){ return e >= 0; }));
+ SkASSERT(std::all_of(components.begin(), components.end(), [](int8_t e){ return e <= 0xF; }));
+
+ // Make a local copy of the element array.
+ int8_t elements[16] = {};
+ std::copy(components.begin(), components.end(), std::begin(elements));
+
+ while (numElements > 0) {
+ // If the first element of the swizzle is zero...
+ if (elements[0] != 0) {
+ break;
+ }
+ // ...and zero isn't used elsewhere in the swizzle...
+ if (std::any_of(&elements[1], &elements[numElements], [](int8_t e) { return e == 0; })) {
+ break;
+ }
+ // We can omit the first slot from the swizzle entirely.
+ // Slide everything forward by one slot, and reduce the element index by one.
+ for (int index = 1; index < numElements; ++index) {
+ elements[index - 1] = elements[index] - 1;
+ }
+ elements[numElements - 1] = 0;
+ --consumedSlots;
+ --numElements;
+ }
+
+ // A completely empty swizzle is a no-op.
+ if (numElements == 0) {
+ this->discard_stack(consumedSlots);
+ return;
+ }
+
+ if (consumedSlots <= 4 && numElements <= 4) {
+ // We can fit everything into a little swizzle.
+ int op = (int)BuilderOp::swizzle_1 + numElements - 1;
+ fInstructions.push_back({(BuilderOp)op, {}, consumedSlots,
+ pack_nybbles(SkSpan(elements, numElements))});
+ return;
+ }
+
+ // This is a big swizzle. We use the `shuffle` op to handle these.
+ // Slot usage is packed into immA. The top 16 bits of immA count the consumed slots; the bottom
+ // 16 bits count the generated slots.
+ int slotUsage = consumedSlots << 16;
+ slotUsage |= numElements;
+
+ // Pack immB and immC with the shuffle list in packed-nybble form.
+ fInstructions.push_back({BuilderOp::shuffle, {}, slotUsage,
+ pack_nybbles(SkSpan(&elements[0], 8)),
+ pack_nybbles(SkSpan(&elements[8], 8))});
+}
+
+void Builder::transpose(int columns, int rows) {
+ // Transposes a matrix of size CxR on the stack (into a matrix of size RxC).
+ int8_t elements[16] = {};
+ size_t index = 0;
+ for (int r = 0; r < rows; ++r) {
+ for (int c = 0; c < columns; ++c) {
+ elements[index++] = (c * rows) + r;
+ }
+ }
+ this->swizzle(/*consumedSlots=*/columns * rows, SkSpan(elements, index));
+}
+
+void Builder::diagonal_matrix(int columns, int rows) {
+ // Generates a CxR diagonal matrix from the top two scalars on the stack.
+ int8_t elements[16] = {};
+ size_t index = 0;
+ for (int c = 0; c < columns; ++c) {
+ for (int r = 0; r < rows; ++r) {
+ elements[index++] = (c == r) ? 1 : 0;
+ }
+ }
+ this->swizzle(/*consumedSlots=*/2, SkSpan(elements, index));
+}
+
+void Builder::matrix_resize(int origColumns, int origRows, int newColumns, int newRows) {
+ // Resizes a CxR matrix at the top of the stack to C'xR'.
+ int8_t elements[16] = {};
+ size_t index = 0;
+
+ size_t consumedSlots = origColumns * origRows;
+ size_t zeroOffset = 0, oneOffset = 0;
+
+ for (int c = 0; c < newColumns; ++c) {
+ for (int r = 0; r < newRows; ++r) {
+ if (c < origColumns && r < origRows) {
+ // Push an element from the original matrix.
+ elements[index++] = (c * origRows) + r;
+ } else {
+ // This element is outside the original matrix; push 1 or 0.
+ if (c == r) {
+ // We need to synthesize a literal 1.
+ if (oneOffset == 0) {
+ this->push_literal_f(1.0f);
+ oneOffset = consumedSlots++;
+ }
+ elements[index++] = oneOffset;
+ } else {
+ // We need to synthesize a literal 0.
+ if (zeroOffset == 0) {
+ this->push_zeros(1);
+ zeroOffset = consumedSlots++;
+ }
+ elements[index++] = zeroOffset;
+ }
+ }
+ }
+ }
+ this->swizzle(consumedSlots, SkSpan(elements, index));
+}
+
+std::unique_ptr<Program> Builder::finish(int numValueSlots,
+ int numUniformSlots,
+ SkRPDebugTrace* debugTrace) {
+ // Verify that calls to enableExecutionMaskWrites and disableExecutionMaskWrites are balanced.
+ SkASSERT(fExecutionMaskWritesEnabled == 0);
+
+ return std::make_unique<Program>(std::move(fInstructions), numValueSlots, numUniformSlots,
+ fNumLabels, debugTrace);
+}
+
+void Program::optimize() {
+ // TODO(johnstiles): perform any last-minute cleanup of the instruction stream here
+}
+
+static int stack_usage(const Instruction& inst) {
+ switch (inst.fOp) {
+ case BuilderOp::push_literal:
+ case BuilderOp::push_condition_mask:
+ case BuilderOp::push_loop_mask:
+ case BuilderOp::push_return_mask:
+ return 1;
+
+ case BuilderOp::push_src_rgba:
+ case BuilderOp::push_dst_rgba:
+ return 4;
+
+ case BuilderOp::push_slots:
+ case BuilderOp::push_slots_indirect:
+ case BuilderOp::push_uniform:
+ case BuilderOp::push_uniform_indirect:
+ case BuilderOp::push_zeros:
+ case BuilderOp::push_clone:
+ case BuilderOp::push_clone_from_stack:
+ case BuilderOp::push_clone_indirect_from_stack:
+ return inst.fImmA;
+
+ case BuilderOp::pop_condition_mask:
+ case BuilderOp::pop_loop_mask:
+ case BuilderOp::pop_and_reenable_loop_mask:
+ case BuilderOp::pop_return_mask:
+ return -1;
+
+ case BuilderOp::pop_src_rg:
+ return -2;
+
+ case BuilderOp::pop_src_rgba:
+ case BuilderOp::pop_dst_rgba:
+ return -4;
+
+ case ALL_N_WAY_BINARY_OP_CASES:
+ case ALL_MULTI_SLOT_BINARY_OP_CASES:
+ case BuilderOp::discard_stack:
+ case BuilderOp::select:
+ return -inst.fImmA;
+
+ case ALL_N_WAY_TERNARY_OP_CASES:
+ case ALL_MULTI_SLOT_TERNARY_OP_CASES:
+ return 2 * -inst.fImmA;
+
+ case BuilderOp::swizzle_1:
+ return 1 - inst.fImmA; // consumes immA slots and emits a scalar
+ case BuilderOp::swizzle_2:
+ return 2 - inst.fImmA; // consumes immA slots and emits a 2-slot vector
+ case BuilderOp::swizzle_3:
+ return 3 - inst.fImmA; // consumes immA slots and emits a 3-slot vector
+ case BuilderOp::swizzle_4:
+ return 4 - inst.fImmA; // consumes immA slots and emits a 4-slot vector
+
+ case BuilderOp::dot_2_floats:
+ return -3; // consumes two 2-slot vectors and emits one scalar
+ case BuilderOp::dot_3_floats:
+ return -5; // consumes two 3-slot vectors and emits one scalar
+ case BuilderOp::dot_4_floats:
+ return -7; // consumes two 4-slot vectors and emits one scalar
+
+ case BuilderOp::refract_4_floats:
+ return -5; // consumes nine slots (N + I + eta) and emits a 4-slot vector (R)
+
+ case BuilderOp::shuffle: {
+ int consumed = inst.fImmA >> 16;
+ int generated = inst.fImmA & 0xFFFF;
+ return generated - consumed;
+ }
+ case ALL_SINGLE_SLOT_UNARY_OP_CASES:
+ case ALL_MULTI_SLOT_UNARY_OP_CASES:
+ default:
+ return 0;
+ }
+}
+
+Program::StackDepthMap Program::tempStackMaxDepths() const {
+ StackDepthMap largest;
+ StackDepthMap current;
+
+ int curIdx = 0;
+ for (const Instruction& inst : fInstructions) {
+ if (inst.fOp == BuilderOp::set_current_stack) {
+ curIdx = inst.fImmA;
+ }
+ current[curIdx] += stack_usage(inst);
+ largest[curIdx] = std::max(current[curIdx], largest[curIdx]);
+ SkASSERTF(current[curIdx] >= 0, "unbalanced temp stack push/pop on stack %d", curIdx);
+ }
+
+ for (const auto& [stackIdx, depth] : current) {
+ (void)stackIdx;
+ SkASSERTF(depth == 0, "unbalanced temp stack push/pop");
+ }
+
+ return largest;
+}
+
+Program::Program(SkTArray<Instruction> instrs,
+ int numValueSlots,
+ int numUniformSlots,
+ int numLabels,
+ SkRPDebugTrace* debugTrace)
+ : fInstructions(std::move(instrs))
+ , fNumValueSlots(numValueSlots)
+ , fNumUniformSlots(numUniformSlots)
+ , fNumLabels(numLabels)
+ , fDebugTrace(debugTrace) {
+ this->optimize();
+
+ fTempStackMaxDepths = this->tempStackMaxDepths();
+
+ fNumTempStackSlots = 0;
+ for (const auto& [stackIdx, depth] : fTempStackMaxDepths) {
+ (void)stackIdx;
+ fNumTempStackSlots += depth;
+ }
+}
+
+void Program::appendCopy(SkTArray<Stage>* pipeline,
+ SkArenaAlloc* alloc,
+ ProgramOp baseStage,
+ float* dst, int dstStride,
+ const float* src, int srcStride,
+ int numSlots) const {
+ SkASSERT(numSlots >= 0);
+ while (numSlots > 4) {
+ this->appendCopy(pipeline, alloc, baseStage, dst, dstStride, src, srcStride,/*numSlots=*/4);
+ dst += 4 * dstStride;
+ src += 4 * srcStride;
+ numSlots -= 4;
+ }
+
+ if (numSlots > 0) {
+ SkASSERT(numSlots <= 4);
+ auto stage = (ProgramOp)((int)baseStage + numSlots - 1);
+ auto* ctx = alloc->make<SkRasterPipeline_BinaryOpCtx>();
+ ctx->dst = dst;
+ ctx->src = src;
+ pipeline->push_back({stage, ctx});
+ }
+}
+
+void Program::appendCopySlotsUnmasked(SkTArray<Stage>* pipeline,
+ SkArenaAlloc* alloc,
+ float* dst,
+ const float* src,
+ int numSlots) const {
+ this->appendCopy(pipeline, alloc,
+ ProgramOp::copy_slot_unmasked,
+ dst, /*dstStride=*/SkOpts::raster_pipeline_highp_stride,
+ src, /*srcStride=*/SkOpts::raster_pipeline_highp_stride,
+ numSlots);
+}
+
+void Program::appendCopySlotsMasked(SkTArray<Stage>* pipeline,
+ SkArenaAlloc* alloc,
+ float* dst,
+ const float* src,
+ int numSlots) const {
+ this->appendCopy(pipeline, alloc,
+ ProgramOp::copy_slot_masked,
+ dst, /*dstStride=*/SkOpts::raster_pipeline_highp_stride,
+ src, /*srcStride=*/SkOpts::raster_pipeline_highp_stride,
+ numSlots);
+}
+
+void Program::appendCopyConstants(SkTArray<Stage>* pipeline,
+ SkArenaAlloc* alloc,
+ float* dst,
+ const float* src,
+ int numSlots) const {
+ this->appendCopy(pipeline, alloc,
+ ProgramOp::copy_constant,
+ dst, /*dstStride=*/SkOpts::raster_pipeline_highp_stride,
+ src, /*srcStride=*/1,
+ numSlots);
+}
+
+void Program::appendSingleSlotUnaryOp(SkTArray<Stage>* pipeline, ProgramOp stage,
+ float* dst, int numSlots) const {
+ SkASSERT(numSlots >= 0);
+ while (numSlots--) {
+ pipeline->push_back({stage, dst});
+ dst += SkOpts::raster_pipeline_highp_stride;
+ }
+}
+
+void Program::appendMultiSlotUnaryOp(SkTArray<Stage>* pipeline, ProgramOp baseStage,
+ float* dst, int numSlots) const {
+ SkASSERT(numSlots >= 0);
+ while (numSlots > 4) {
+ this->appendMultiSlotUnaryOp(pipeline, baseStage, dst, /*numSlots=*/4);
+ dst += 4 * SkOpts::raster_pipeline_highp_stride;
+ numSlots -= 4;
+ }
+
+ SkASSERT(numSlots <= 4);
+ auto stage = (ProgramOp)((int)baseStage + numSlots - 1);
+ pipeline->push_back({stage, dst});
+}
+
+void Program::appendAdjacentNWayBinaryOp(SkTArray<Stage>* pipeline, SkArenaAlloc* alloc,
+ ProgramOp stage,
+ float* dst, const float* src, int numSlots) const {
+ // The source and destination must be directly next to one another.
+ SkASSERT(numSlots >= 0);
+ SkASSERT((dst + SkOpts::raster_pipeline_highp_stride * numSlots) == src);
+
+ if (numSlots > 0) {
+ auto ctx = alloc->make<SkRasterPipeline_BinaryOpCtx>();
+ ctx->dst = dst;
+ ctx->src = src;
+ pipeline->push_back({stage, ctx});
+ }
+}
+
+void Program::appendAdjacentMultiSlotBinaryOp(SkTArray<Stage>* pipeline, SkArenaAlloc* alloc,
+ ProgramOp baseStage,
+ float* dst, const float* src, int numSlots) const {
+ // The source and destination must be directly next to one another.
+ SkASSERT(numSlots >= 0);
+ SkASSERT((dst + SkOpts::raster_pipeline_highp_stride * numSlots) == src);
+
+ if (numSlots > 4) {
+ this->appendAdjacentNWayBinaryOp(pipeline, alloc, baseStage, dst, src, numSlots);
+ return;
+ }
+ if (numSlots > 0) {
+ auto specializedStage = (ProgramOp)((int)baseStage + numSlots);
+ pipeline->push_back({specializedStage, dst});
+ }
+}
+
+void Program::appendAdjacentNWayTernaryOp(SkTArray<Stage>* pipeline, SkArenaAlloc* alloc,
+ ProgramOp stage, float* dst, const float* src0,
+ const float* src1, int numSlots) const {
+ // The float pointers must all be immediately adjacent to each other.
+ SkASSERT(numSlots >= 0);
+ SkASSERT((dst + SkOpts::raster_pipeline_highp_stride * numSlots) == src0);
+ SkASSERT((src0 + SkOpts::raster_pipeline_highp_stride * numSlots) == src1);
+
+ if (numSlots > 0) {
+ auto ctx = alloc->make<SkRasterPipeline_TernaryOpCtx>();
+ ctx->dst = dst;
+ ctx->src0 = src0;
+ ctx->src1 = src1;
+ pipeline->push_back({stage, ctx});
+ }
+}
+
+void Program::appendAdjacentMultiSlotTernaryOp(SkTArray<Stage>* pipeline, SkArenaAlloc* alloc,
+ ProgramOp baseStage, float* dst, const float* src0,
+ const float* src1, int numSlots) const {
+ // The float pointers must all be immediately adjacent to each other.
+ SkASSERT(numSlots >= 0);
+ SkASSERT((dst + SkOpts::raster_pipeline_highp_stride * numSlots) == src0);
+ SkASSERT((src0 + SkOpts::raster_pipeline_highp_stride * numSlots) == src1);
+
+ if (numSlots > 4) {
+ this->appendAdjacentNWayTernaryOp(pipeline, alloc, baseStage, dst, src0, src1, numSlots);
+ return;
+ }
+ if (numSlots > 0) {
+ auto specializedStage = (ProgramOp)((int)baseStage + numSlots);
+ pipeline->push_back({specializedStage, dst});
+ }
+}
+
+void Program::appendStackRewind(SkTArray<Stage>* pipeline) const {
+#if defined(SKSL_STANDALONE) || !SK_HAS_MUSTTAIL
+ pipeline->push_back({ProgramOp::stack_rewind, nullptr});
+#endif
+}
+
+static void* context_bit_pun(intptr_t val) {
+ return sk_bit_cast<void*>(val);
+}
+
+Program::SlotData Program::allocateSlotData(SkArenaAlloc* alloc) const {
+ // Allocate a contiguous slab of slot data for values and stack entries.
+ const int N = SkOpts::raster_pipeline_highp_stride;
+ const int vectorWidth = N * sizeof(float);
+ const int allocSize = vectorWidth * (fNumValueSlots + fNumTempStackSlots);
+ float* slotPtr = static_cast<float*>(alloc->makeBytesAlignedTo(allocSize, vectorWidth));
+ sk_bzero(slotPtr, allocSize);
+
+ // Store the temp stack immediately after the values.
+ SlotData s;
+ s.values = SkSpan(slotPtr, N * fNumValueSlots);
+ s.stack = SkSpan(s.values.end(), N * fNumTempStackSlots);
+ return s;
+}
+
+#if !defined(SKSL_STANDALONE)
+
+bool Program::appendStages(SkRasterPipeline* pipeline,
+ SkArenaAlloc* alloc,
+ RP::Callbacks* callbacks,
+ SkSpan<const float> uniforms) const {
+ // Convert our Instruction list to an array of ProgramOps.
+ SkTArray<Stage> stages;
+ this->makeStages(&stages, alloc, uniforms, this->allocateSlotData(alloc));
+
+ // Allocate buffers for branch targets and labels; these are needed to convert labels into
+ // actual offsets into the pipeline and fix up branches.
+ SkTArray<SkRasterPipeline_BranchCtx*> branchContexts;
+ branchContexts.reserve_back(fNumLabels);
+ SkTArray<int> labelOffsets;
+ labelOffsets.push_back_n(fNumLabels, -1);
+ SkTArray<int> branchGoesToLabel;
+ branchGoesToLabel.reserve_back(fNumLabels);
+
+ for (const Stage& stage : stages) {
+ switch (stage.op) {
+ case ProgramOp::stack_rewind:
+ pipeline->append_stack_rewind();
+ break;
+
+ case ProgramOp::invoke_shader:
+ if (!callbacks || !callbacks->appendShader(sk_bit_cast<intptr_t>(stage.ctx))) {
+ return false;
+ }
+ break;
+
+ case ProgramOp::invoke_color_filter:
+ if (!callbacks || !callbacks->appendColorFilter(sk_bit_cast<intptr_t>(stage.ctx))) {
+ return false;
+ }
+ break;
+
+ case ProgramOp::invoke_blender:
+ if (!callbacks || !callbacks->appendBlender(sk_bit_cast<intptr_t>(stage.ctx))) {
+ return false;
+ }
+ break;
+
+ case ProgramOp::invoke_to_linear_srgb:
+ if (!callbacks) {
+ return false;
+ }
+ callbacks->toLinearSrgb();
+ break;
+
+ case ProgramOp::invoke_from_linear_srgb:
+ if (!callbacks) {
+ return false;
+ }
+ callbacks->fromLinearSrgb();
+ break;
+
+ case ProgramOp::label: {
+ // Remember the absolute pipeline position of this label.
+ int labelID = sk_bit_cast<intptr_t>(stage.ctx);
+ SkASSERT(labelID >= 0 && labelID < fNumLabels);
+ labelOffsets[labelID] = pipeline->getNumStages();
+ break;
+ }
+ case ProgramOp::jump:
+ case ProgramOp::branch_if_all_lanes_active:
+ case ProgramOp::branch_if_any_lanes_active:
+ case ProgramOp::branch_if_no_lanes_active:
+ case ProgramOp::branch_if_no_active_lanes_eq: {
+ // The branch context contain a valid label ID at this point.
+ auto* branchCtx = static_cast<SkRasterPipeline_BranchCtx*>(stage.ctx);
+ int labelID = branchCtx->offset;
+ SkASSERT(labelID >= 0 && labelID < fNumLabels);
+
+ // Replace the label ID in the branch context with the absolute pipeline position.
+ // We will go back over the branch targets at the end and fix them up.
+ branchCtx->offset = pipeline->getNumStages();
+
+ SkASSERT(branchContexts.size() == branchGoesToLabel.size());
+ branchContexts.push_back(branchCtx);
+ branchGoesToLabel.push_back(labelID);
+ [[fallthrough]];
+ }
+ default:
+ // Append a regular op to the program.
+ SkASSERT((int)stage.op < kNumRasterPipelineHighpOps);
+ pipeline->append((SkRasterPipelineOp)stage.op, stage.ctx);
+ break;
+ }
+ }
+
+ // Now that we have assembled the program and know the pipeline positions of each label and
+ // branch, fix up every branch target.
+ SkASSERT(branchContexts.size() == branchGoesToLabel.size());
+ for (int index = 0; index < branchContexts.size(); ++index) {
+ int branchFromIdx = branchContexts[index]->offset;
+ int branchToIdx = labelOffsets[branchGoesToLabel[index]];
+ branchContexts[index]->offset = branchToIdx - branchFromIdx;
+ }
+
+ return true;
+}
+
+#endif
+
+void Program::makeStages(SkTArray<Stage>* pipeline,
+ SkArenaAlloc* alloc,
+ SkSpan<const float> uniforms,
+ const SlotData& slots) const {
+ SkASSERT(fNumUniformSlots == SkToInt(uniforms.size()));
+
+ const int N = SkOpts::raster_pipeline_highp_stride;
+ StackDepthMap tempStackDepth;
+ int currentStack = 0;
+ int mostRecentRewind = 0;
+
+ // Assemble a map holding the current stack-top for each temporary stack. Position each temp
+ // stack immediately after the previous temp stack; temp stacks are never allowed to overlap.
+ int pos = 0;
+ SkTHashMap<int, float*> tempStackMap;
+ for (auto& [idx, depth] : fTempStackMaxDepths) {
+ tempStackMap[idx] = slots.stack.begin() + (pos * N);
+ pos += depth;
+ }
+
+ // Track labels that we have reached in processing.
+ SkBitSet labelsEncountered(fNumLabels);
+
+ auto EmitStackRewindForBackwardsBranch = [&](int labelID) {
+ // If we have already encountered the label associated with this branch, this is a
+ // backwards branch. Add a stack-rewind immediately before the branch to ensure that
+ // long-running loops don't use an unbounded amount of stack space.
+ if (labelsEncountered.test(labelID)) {
+ this->appendStackRewind(pipeline);
+ mostRecentRewind = pipeline->size();
+ }
+ };
+
+ // We can reuse constants from our arena by placing them in this map.
+ SkTHashMap<int, int*> constantLookupMap; // <constant value, pointer into arena>
+
+ // Write each BuilderOp to the pipeline array.
+ pipeline->reserve_back(fInstructions.size());
+ for (const Instruction& inst : fInstructions) {
+ auto SlotA = [&]() { return &slots.values[N * inst.fSlotA]; };
+ auto SlotB = [&]() { return &slots.values[N * inst.fSlotB]; };
+ auto UniformA = [&]() { return &uniforms[inst.fSlotA]; };
+ float*& tempStackPtr = tempStackMap[currentStack];
+
+ switch (inst.fOp) {
+ case BuilderOp::label:
+ SkASSERT(inst.fImmA >= 0 && inst.fImmA < fNumLabels);
+ labelsEncountered.set(inst.fImmA);
+ pipeline->push_back({ProgramOp::label, context_bit_pun(inst.fImmA)});
+ break;
+
+ case BuilderOp::jump:
+ case BuilderOp::branch_if_all_lanes_active:
+ case BuilderOp::branch_if_any_lanes_active:
+ case BuilderOp::branch_if_no_lanes_active: {
+ SkASSERT(inst.fImmA >= 0 && inst.fImmA < fNumLabels);
+ EmitStackRewindForBackwardsBranch(inst.fImmA);
+
+ auto* ctx = alloc->make<SkRasterPipeline_BranchCtx>();
+ ctx->offset = inst.fImmA;
+ pipeline->push_back({(ProgramOp)inst.fOp, ctx});
+ break;
+ }
+ case BuilderOp::branch_if_no_active_lanes_on_stack_top_equal: {
+ SkASSERT(inst.fImmA >= 0 && inst.fImmA < fNumLabels);
+ EmitStackRewindForBackwardsBranch(inst.fImmA);
+
+ auto* ctx = alloc->make<SkRasterPipeline_BranchIfEqualCtx>();
+ ctx->offset = inst.fImmA;
+ ctx->value = inst.fImmB;
+ ctx->ptr = reinterpret_cast<int*>(tempStackPtr - N);
+ pipeline->push_back({ProgramOp::branch_if_no_active_lanes_eq, ctx});
+ break;
+ }
+ case BuilderOp::init_lane_masks:
+ pipeline->push_back({ProgramOp::init_lane_masks, nullptr});
+ break;
+
+ case BuilderOp::store_src_rg:
+ pipeline->push_back({ProgramOp::store_src_rg, SlotA()});
+ break;
+
+ case BuilderOp::store_src:
+ pipeline->push_back({ProgramOp::store_src, SlotA()});
+ break;
+
+ case BuilderOp::store_dst:
+ pipeline->push_back({ProgramOp::store_dst, SlotA()});
+ break;
+
+ case BuilderOp::store_device_xy01:
+ pipeline->push_back({ProgramOp::store_device_xy01, SlotA()});
+ break;
+
+ case BuilderOp::load_src:
+ pipeline->push_back({ProgramOp::load_src, SlotA()});
+ break;
+
+ case BuilderOp::load_dst:
+ pipeline->push_back({ProgramOp::load_dst, SlotA()});
+ break;
+
+ case ALL_SINGLE_SLOT_UNARY_OP_CASES: {
+ float* dst = tempStackPtr - (inst.fImmA * N);
+ this->appendSingleSlotUnaryOp(pipeline, (ProgramOp)inst.fOp, dst, inst.fImmA);
+ break;
+ }
+ case ALL_MULTI_SLOT_UNARY_OP_CASES: {
+ float* dst = tempStackPtr - (inst.fImmA * N);
+ this->appendMultiSlotUnaryOp(pipeline, (ProgramOp)inst.fOp, dst, inst.fImmA);
+ break;
+ }
+ case ALL_N_WAY_BINARY_OP_CASES: {
+ float* src = tempStackPtr - (inst.fImmA * N);
+ float* dst = tempStackPtr - (inst.fImmA * 2 * N);
+ this->appendAdjacentNWayBinaryOp(pipeline, alloc, (ProgramOp)inst.fOp,
+ dst, src, inst.fImmA);
+ break;
+ }
+ case ALL_MULTI_SLOT_BINARY_OP_CASES: {
+ float* src = tempStackPtr - (inst.fImmA * N);
+ float* dst = tempStackPtr - (inst.fImmA * 2 * N);
+ this->appendAdjacentMultiSlotBinaryOp(pipeline, alloc, (ProgramOp)inst.fOp,
+ dst, src, inst.fImmA);
+ break;
+ }
+ case ALL_N_WAY_TERNARY_OP_CASES: {
+ float* src1 = tempStackPtr - (inst.fImmA * N);
+ float* src0 = tempStackPtr - (inst.fImmA * 2 * N);
+ float* dst = tempStackPtr - (inst.fImmA * 3 * N);
+ this->appendAdjacentNWayTernaryOp(pipeline, alloc, (ProgramOp)inst.fOp,
+ dst, src0, src1, inst.fImmA);
+ break;
+ }
+ case ALL_MULTI_SLOT_TERNARY_OP_CASES: {
+ float* src1 = tempStackPtr - (inst.fImmA * N);
+ float* src0 = tempStackPtr - (inst.fImmA * 2 * N);
+ float* dst = tempStackPtr - (inst.fImmA * 3 * N);
+ this->appendAdjacentMultiSlotTernaryOp(pipeline, alloc, (ProgramOp)inst.fOp,
+ dst, src0, src1, inst.fImmA);
+ break;
+ }
+ case BuilderOp::select: {
+ float* src = tempStackPtr - (inst.fImmA * N);
+ float* dst = tempStackPtr - (inst.fImmA * 2 * N);
+ this->appendCopySlotsMasked(pipeline, alloc, dst, src, inst.fImmA);
+ break;
+ }
+ case BuilderOp::copy_slot_masked:
+ this->appendCopySlotsMasked(pipeline, alloc, SlotA(), SlotB(), inst.fImmA);
+ break;
+
+ case BuilderOp::copy_slot_unmasked:
+ this->appendCopySlotsUnmasked(pipeline, alloc, SlotA(), SlotB(), inst.fImmA);
+ break;
+
+ case BuilderOp::zero_slot_unmasked:
+ this->appendMultiSlotUnaryOp(pipeline, ProgramOp::zero_slot_unmasked,
+ SlotA(), inst.fImmA);
+ break;
+
+ case BuilderOp::refract_4_floats: {
+ float* dst = tempStackPtr - (9 * N);
+ pipeline->push_back({ProgramOp::refract_4_floats, dst});
+ break;
+ }
+ case BuilderOp::inverse_mat2:
+ case BuilderOp::inverse_mat3:
+ case BuilderOp::inverse_mat4: {
+ float* dst = tempStackPtr - (inst.fImmA * N);
+ pipeline->push_back({(ProgramOp)inst.fOp, dst});
+ break;
+ }
+ case BuilderOp::dot_2_floats:
+ case BuilderOp::dot_3_floats:
+ case BuilderOp::dot_4_floats: {
+ float* dst = tempStackPtr - (inst.fImmA * 2 * N);
+ pipeline->push_back({(ProgramOp)inst.fOp, dst});
+ break;
+ }
+ case BuilderOp::swizzle_1:
+ case BuilderOp::swizzle_2:
+ case BuilderOp::swizzle_3:
+ case BuilderOp::swizzle_4: {
+ auto* ctx = alloc->make<SkRasterPipeline_SwizzleCtx>();
+ ctx->ptr = tempStackPtr - (N * inst.fImmA);
+ // Unpack component nybbles into byte-offsets pointing at stack slots.
+ unpack_nybbles_to_offsets(inst.fImmB, SkSpan(ctx->offsets));
+ pipeline->push_back({(ProgramOp)inst.fOp, ctx});
+ break;
+ }
+ case BuilderOp::shuffle: {
+ int consumed = inst.fImmA >> 16;
+ int generated = inst.fImmA & 0xFFFF;
+
+ auto* ctx = alloc->make<SkRasterPipeline_ShuffleCtx>();
+ ctx->ptr = tempStackPtr - (N * consumed);
+ ctx->count = generated;
+ // Unpack immB and immC from nybble form into the offset array.
+ unpack_nybbles_to_offsets(inst.fImmB, SkSpan(&ctx->offsets[0], 8));
+ unpack_nybbles_to_offsets(inst.fImmC, SkSpan(&ctx->offsets[8], 8));
+ pipeline->push_back({ProgramOp::shuffle, ctx});
+ break;
+ }
+ case BuilderOp::push_src_rgba: {
+ float* dst = tempStackPtr;
+ pipeline->push_back({ProgramOp::store_src, dst});
+ break;
+ }
+ case BuilderOp::push_dst_rgba: {
+ float* dst = tempStackPtr;
+ pipeline->push_back({ProgramOp::store_dst, dst});
+ break;
+ }
+ case BuilderOp::pop_src_rg: {
+ float* src = tempStackPtr - (2 * N);
+ pipeline->push_back({ProgramOp::load_src_rg, src});
+ break;
+ }
+ case BuilderOp::pop_src_rgba: {
+ float* src = tempStackPtr - (4 * N);
+ pipeline->push_back({ProgramOp::load_src, src});
+ break;
+ }
+ case BuilderOp::pop_dst_rgba: {
+ float* src = tempStackPtr - (4 * N);
+ pipeline->push_back({ProgramOp::load_dst, src});
+ break;
+ }
+ case BuilderOp::push_slots: {
+ float* dst = tempStackPtr;
+ this->appendCopySlotsUnmasked(pipeline, alloc, dst, SlotA(), inst.fImmA);
+ break;
+ }
+ case BuilderOp::copy_stack_to_slots_indirect:
+ case BuilderOp::push_slots_indirect:
+ case BuilderOp::push_uniform_indirect: {
+ // SlotA: fixed-range start
+ // SlotB: limit-range end
+ // immA: number of slots to copy
+ // immB: dynamic stack ID
+ ProgramOp op;
+ auto* ctx = alloc->make<SkRasterPipeline_CopyIndirectCtx>();
+ ctx->indirectOffset =
+ reinterpret_cast<const uint32_t*>(tempStackMap[inst.fImmB]) - (1 * N);
+ ctx->indirectLimit = inst.fSlotB - inst.fSlotA - inst.fImmA;
+ ctx->slots = inst.fImmA;
+ if (inst.fOp == BuilderOp::push_slots_indirect) {
+ op = ProgramOp::copy_from_indirect_unmasked;
+ ctx->src = SlotA();
+ ctx->dst = tempStackPtr;
+ } else if (inst.fOp == BuilderOp::push_uniform_indirect) {
+ op = ProgramOp::copy_from_indirect_uniform_unmasked;
+ ctx->src = UniformA();
+ ctx->dst = tempStackPtr;
+ } else {
+ op = ProgramOp::copy_to_indirect_masked;
+ ctx->src = tempStackPtr - (ctx->slots * N);
+ ctx->dst = SlotA();
+ }
+ pipeline->push_back({op, ctx});
+ break;
+ }
+ case BuilderOp::push_uniform: {
+ float* dst = tempStackPtr;
+ this->appendCopyConstants(pipeline, alloc, dst, UniformA(), inst.fImmA);
+ break;
+ }
+ case BuilderOp::push_zeros: {
+ float* dst = tempStackPtr;
+ this->appendMultiSlotUnaryOp(pipeline, ProgramOp::zero_slot_unmasked, dst,
+ inst.fImmA);
+ break;
+ }
+ case BuilderOp::push_condition_mask: {
+ float* dst = tempStackPtr;
+ pipeline->push_back({ProgramOp::store_condition_mask, dst});
+ break;
+ }
+ case BuilderOp::pop_condition_mask: {
+ float* src = tempStackPtr - (1 * N);
+ pipeline->push_back({ProgramOp::load_condition_mask, src});
+ break;
+ }
+ case BuilderOp::merge_condition_mask: {
+ float* ptr = tempStackPtr - (2 * N);
+ pipeline->push_back({ProgramOp::merge_condition_mask, ptr});
+ break;
+ }
+ case BuilderOp::push_loop_mask: {
+ float* dst = tempStackPtr;
+ pipeline->push_back({ProgramOp::store_loop_mask, dst});
+ break;
+ }
+ case BuilderOp::pop_loop_mask: {
+ float* src = tempStackPtr - (1 * N);
+ pipeline->push_back({ProgramOp::load_loop_mask, src});
+ break;
+ }
+ case BuilderOp::pop_and_reenable_loop_mask: {
+ float* src = tempStackPtr - (1 * N);
+ pipeline->push_back({ProgramOp::reenable_loop_mask, src});
+ break;
+ }
+ case BuilderOp::reenable_loop_mask:
+ pipeline->push_back({ProgramOp::reenable_loop_mask, SlotA()});
+ break;
+
+ case BuilderOp::mask_off_loop_mask:
+ pipeline->push_back({ProgramOp::mask_off_loop_mask, nullptr});
+ break;
+
+ case BuilderOp::merge_loop_mask: {
+ float* src = tempStackPtr - (1 * N);
+ pipeline->push_back({ProgramOp::merge_loop_mask, src});
+ break;
+ }
+ case BuilderOp::push_return_mask: {
+ float* dst = tempStackPtr;
+ pipeline->push_back({ProgramOp::store_return_mask, dst});
+ break;
+ }
+ case BuilderOp::pop_return_mask: {
+ float* src = tempStackPtr - (1 * N);
+ pipeline->push_back({ProgramOp::load_return_mask, src});
+ break;
+ }
+ case BuilderOp::mask_off_return_mask:
+ pipeline->push_back({ProgramOp::mask_off_return_mask, nullptr});
+ break;
+
+ case BuilderOp::copy_constant:
+ case BuilderOp::push_literal: {
+ float* dst = (inst.fOp == BuilderOp::push_literal) ? tempStackPtr : SlotA();
+ int* constantPtr;
+ if (int** lookup = constantLookupMap.find(inst.fImmA)) {
+ constantPtr = *lookup;
+ } else {
+ constantPtr = alloc->make<int>(inst.fImmA);
+ constantLookupMap[inst.fImmA] = constantPtr;
+ }
+ SkASSERT(constantPtr);
+ this->appendCopyConstants(pipeline, alloc, dst, (float*)constantPtr,/*numSlots=*/1);
+ break;
+ }
+ case BuilderOp::copy_stack_to_slots: {
+ float* src = tempStackPtr - (inst.fImmB * N);
+ this->appendCopySlotsMasked(pipeline, alloc, SlotA(), src, inst.fImmA);
+ break;
+ }
+ case BuilderOp::copy_stack_to_slots_unmasked: {
+ float* src = tempStackPtr - (inst.fImmB * N);
+ this->appendCopySlotsUnmasked(pipeline, alloc, SlotA(), src, inst.fImmA);
+ break;
+ }
+ case BuilderOp::swizzle_copy_stack_to_slots: {
+ // SlotA: fixed-range start
+ // immA: number of swizzle components
+ // immB: swizzle components
+ // immC: offset from stack top
+ auto stage = (ProgramOp)((int)ProgramOp::swizzle_copy_slot_masked + inst.fImmA - 1);
+ auto* ctx = alloc->make<SkRasterPipeline_SwizzleCopyCtx>();
+ ctx->src = tempStackPtr - (inst.fImmC * N);
+ ctx->dst = SlotA();
+ unpack_nybbles_to_offsets(inst.fImmB, SkSpan(ctx->offsets));
+ pipeline->push_back({stage, ctx});
+ break;
+ }
+ case BuilderOp::push_clone: {
+ float* src = tempStackPtr - (inst.fImmB * N);
+ float* dst = tempStackPtr;
+ this->appendCopySlotsUnmasked(pipeline, alloc, dst, src, inst.fImmA);
+ break;
+ }
+ case BuilderOp::push_clone_from_stack: {
+ // immA: number of slots
+ // immB: other stack ID
+ // immC: offset from stack top
+ float* sourceStackPtr = tempStackMap[inst.fImmB];
+ float* src = sourceStackPtr - (inst.fImmC * N);
+ float* dst = tempStackPtr;
+ this->appendCopySlotsUnmasked(pipeline, alloc, dst, src, inst.fImmA);
+ break;
+ }
+ case BuilderOp::push_clone_indirect_from_stack: {
+ // immA: number of slots
+ // immB: other stack ID
+ // immC: offset from stack top
+ // immD: dynamic stack ID
+ float* sourceStackPtr = tempStackMap[inst.fImmB];
+
+ auto* ctx = alloc->make<SkRasterPipeline_CopyIndirectCtx>();
+ ctx->dst = tempStackPtr;
+ ctx->src = sourceStackPtr - (inst.fImmC * N);
+ ctx->indirectOffset =
+ reinterpret_cast<const uint32_t*>(tempStackMap[inst.fImmD]) - (1 * N);
+ ctx->indirectLimit = inst.fImmC - inst.fImmA;
+ ctx->slots = inst.fImmA;
+ pipeline->push_back({ProgramOp::copy_from_indirect_unmasked, ctx});
+ break;
+ }
+ case BuilderOp::swizzle_copy_stack_to_slots_indirect: {
+ // SlotA: fixed-range start
+ // SlotB: limit-range end
+ // immA: number of swizzle components
+ // immB: swizzle components
+ // immC: offset from stack top
+ // immD: dynamic stack ID
+ auto* ctx = alloc->make<SkRasterPipeline_SwizzleCopyIndirectCtx>();
+ ctx->src = tempStackPtr - (inst.fImmC * N);
+ ctx->dst = SlotA();
+ ctx->indirectOffset =
+ reinterpret_cast<const uint32_t*>(tempStackMap[inst.fImmD]) - (1 * N);
+ ctx->indirectLimit =
+ inst.fSlotB - inst.fSlotA - (max_packed_nybble(inst.fImmB, inst.fImmA) + 1);
+ ctx->slots = inst.fImmA;
+ unpack_nybbles_to_offsets(inst.fImmB, SkSpan(ctx->offsets));
+ pipeline->push_back({ProgramOp::swizzle_copy_to_indirect_masked, ctx});
+ break;
+ }
+ case BuilderOp::case_op: {
+ auto* ctx = alloc->make<SkRasterPipeline_CaseOpCtx>();
+ ctx->ptr = reinterpret_cast<int*>(tempStackPtr - 2 * N);
+ ctx->expectedValue = inst.fImmA;
+ pipeline->push_back({ProgramOp::case_op, ctx});
+ break;
+ }
+ case BuilderOp::discard_stack:
+ break;
+
+ case BuilderOp::set_current_stack:
+ currentStack = inst.fImmA;
+ break;
+
+ case BuilderOp::invoke_shader:
+ case BuilderOp::invoke_color_filter:
+ case BuilderOp::invoke_blender:
+ pipeline->push_back({(ProgramOp)inst.fOp, context_bit_pun(inst.fImmA)});
+ break;
+
+ case BuilderOp::invoke_to_linear_srgb:
+ case BuilderOp::invoke_from_linear_srgb:
+ pipeline->push_back({(ProgramOp)inst.fOp, nullptr});
+ break;
+
+ default:
+ SkDEBUGFAILF("Raster Pipeline: unsupported instruction %d", (int)inst.fOp);
+ break;
+ }
+
+ tempStackPtr += stack_usage(inst) * N;
+ SkASSERT(tempStackPtr >= slots.stack.begin());
+ SkASSERT(tempStackPtr <= slots.stack.end());
+
+ // Periodically rewind the stack every 500 instructions. When SK_HAS_MUSTTAIL is set,
+ // rewinds are not actually used; the appendStackRewind call becomes a no-op. On platforms
+ // that don't support SK_HAS_MUSTTAIL, rewinding the stack periodically can prevent a
+ // potential stack overflow when running a long program.
+ int numPipelineStages = pipeline->size();
+ if (numPipelineStages - mostRecentRewind > 500) {
+ this->appendStackRewind(pipeline);
+ mostRecentRewind = numPipelineStages;
+ }
+ }
+}
+
+// Finds duplicate names in the program and disambiguates them with subscripts.
+SkTArray<std::string> build_unique_slot_name_list(const SkRPDebugTrace* debugTrace) {
+ SkTArray<std::string> slotName;
+ if (debugTrace) {
+ slotName.reserve_back(debugTrace->fSlotInfo.size());
+
+ // The map consists of <variable name, <source position, unique name>>.
+ SkTHashMap<std::string_view, SkTHashMap<int, std::string>> uniqueNameMap;
+
+ for (const SlotDebugInfo& slotInfo : debugTrace->fSlotInfo) {
+ // Look up this variable by its name and source position.
+ int pos = slotInfo.pos.valid() ? slotInfo.pos.startOffset() : 0;
+ SkTHashMap<int, std::string>& positionMap = uniqueNameMap[slotInfo.name];
+ std::string& uniqueName = positionMap[pos];
+
+ // Have we seen this variable name/position combination before?
+ if (uniqueName.empty()) {
+ // This is a unique name/position pair.
+ uniqueName = slotInfo.name;
+
+ // But if it's not a unique _name_, it deserves a subscript to disambiguate it.
+ int subscript = positionMap.count() - 1;
+ if (subscript > 0) {
+ for (char digit : std::to_string(subscript)) {
+ // U+2080 through U+2089 (₀₁₂₃₄₅₆₇₈₉) in UTF8:
+ uniqueName.push_back((char)0xE2);
+ uniqueName.push_back((char)0x82);
+ uniqueName.push_back((char)(0x80 + digit - '0'));
+ }
+ }
+ }
+
+ slotName.push_back(uniqueName);
+ }
+ }
+ return slotName;
+}
+
+void Program::dump(SkWStream* out) const {
+ // Allocate memory for the slot and uniform data, even though the program won't ever be
+ // executed. The program requires pointer ranges for managing its data, and ASAN will report
+ // errors if those pointers are pointing at unallocated memory.
+ SkArenaAlloc alloc(/*firstHeapAllocation=*/1000);
+ const int N = SkOpts::raster_pipeline_highp_stride;
+ SlotData slots = this->allocateSlotData(&alloc);
+ float* uniformPtr = alloc.makeArray<float>(fNumUniformSlots);
+ SkSpan<float> uniforms = SkSpan(uniformPtr, fNumUniformSlots);
+
+ // Turn this program into an array of Raster Pipeline stages.
+ SkTArray<Stage> stages;
+ this->makeStages(&stages, &alloc, uniforms, slots);
+
+ // Find the labels in the program, and keep track of their offsets.
+ SkTHashMap<int, int> labelToStageMap; // <label ID, stage index>
+ for (int index = 0; index < stages.size(); ++index) {
+ if (stages[index].op == ProgramOp::label) {
+ int labelID = sk_bit_cast<intptr_t>(stages[index].ctx);
+ SkASSERT(!labelToStageMap.find(labelID));
+ labelToStageMap[labelID] = index;
+ }
+ }
+
+ // Assign unique names to each variable slot; our trace might have multiple variables with the
+ // same name, which can make a dump hard to read.
+ SkTArray<std::string> slotName = build_unique_slot_name_list(fDebugTrace);
+
+ // Emit the program's instruction list.
+ for (int index = 0; index < stages.size(); ++index) {
+ const Stage& stage = stages[index];
+
+ // Interpret the context value as a branch offset.
+ auto BranchOffset = [&](const SkRasterPipeline_BranchCtx* ctx) -> std::string {
+ // The context's offset field contains a label ID
+ int labelID = ctx->offset;
+ SkASSERT(labelToStageMap.find(labelID));
+ int labelIndex = labelToStageMap[labelID];
+ return SkSL::String::printf("%+d (label %d at #%d)",
+ labelIndex - index, labelID, labelIndex + 1);
+ };
+
+ // Print a 32-bit immediate value of unknown type (int/float).
+ auto Imm = [&](float immFloat, bool showAsFloat = true) -> std::string {
+ // Start with `0x3F800000` as a baseline.
+ uint32_t immUnsigned;
+ memcpy(&immUnsigned, &immFloat, sizeof(uint32_t));
+ auto text = SkSL::String::printf("0x%08X", immUnsigned);
+
+ // Extend it to `0x3F800000 (1.0)` for finite floating point values.
+ if (showAsFloat && std::isfinite(immFloat)) {
+ text += " (";
+ text += skstd::to_string(immFloat);
+ text += ")";
+ }
+ return text;
+ };
+
+ // Interpret the context pointer as a 32-bit immediate value of unknown type (int/float).
+ auto ImmCtx = [&](const void* ctx, bool showAsFloat = true) -> std::string {
+ float f;
+ memcpy(&f, &ctx, sizeof(float));
+ return Imm(f, showAsFloat);
+ };
+
+ // Print `1` for single slots and `1..3` for ranges of slots.
+ auto AsRange = [](int first, int count) -> std::string {
+ std::string text = std::to_string(first);
+ if (count > 1) {
+ text += ".." + std::to_string(first + count - 1);
+ }
+ return text;
+ };
+
+ // Come up with a reasonable name for a range of slots, e.g.:
+ // `val`: slot range points at one variable, named val
+ // `val(0..1)`: slot range points at the first and second slot of val (which has 3+ slots)
+ // `foo, bar`: slot range fully covers two variables, named foo and bar
+ // `foo(3), bar(0)`: slot range covers the fourth slot of foo and the first slot of bar
+ auto SlotName = [&](SkSpan<const SlotDebugInfo> debugInfo,
+ SkSpan<const std::string> names,
+ SlotRange range) -> std::string {
+ SkASSERT(range.index >= 0 && (range.index + range.count) <= (int)debugInfo.size());
+
+ std::string text;
+ auto separator = SkSL::String::Separator();
+ while (range.count > 0) {
+ const SlotDebugInfo& slotInfo = debugInfo[range.index];
+ text += separator();
+ text += names.empty() ? slotInfo.name : names[range.index];
+
+ // Figure out how many slots we can chomp in this iteration.
+ int entireVariable = slotInfo.columns * slotInfo.rows;
+ int slotsToChomp = std::min(range.count, entireVariable - slotInfo.componentIndex);
+ // If we aren't consuming an entire variable, from first slot to last...
+ if (slotsToChomp != entireVariable) {
+ // ... decorate it with a range suffix.
+ text += "(" + AsRange(slotInfo.componentIndex, slotsToChomp) + ")";
+ }
+ range.index += slotsToChomp;
+ range.count -= slotsToChomp;
+ }
+
+ return text;
+ };
+
+ // Attempts to interpret the passed-in pointer as a uniform range.
+ auto UniformPtrCtx = [&](const float* ptr, int numSlots) -> std::string {
+ const float* end = ptr + numSlots;
+ if (ptr >= uniforms.begin() && end <= uniforms.end()) {
+ int uniformIdx = ptr - uniforms.begin();
+ if (fDebugTrace) {
+ // Handle pointers to named uniform slots.
+ std::string name = SlotName(fDebugTrace->fUniformInfo, /*names=*/{},
+ {uniformIdx, numSlots});
+ if (!name.empty()) {
+ return name;
+ }
+ }
+ // Handle pointers to uniforms (when no debug info exists).
+ return "u" + AsRange(uniformIdx, numSlots);
+ }
+ return {};
+ };
+
+ // Attempts to interpret the passed-in pointer as a value slot range.
+ auto ValuePtrCtx = [&](const float* ptr, int numSlots) -> std::string {
+ const float* end = ptr + (N * numSlots);
+ if (ptr >= slots.values.begin() && end <= slots.values.end()) {
+ int valueIdx = ptr - slots.values.begin();
+ SkASSERT((valueIdx % N) == 0);
+ valueIdx /= N;
+ if (fDebugTrace) {
+ // Handle pointers to named value slots.
+ std::string name = SlotName(fDebugTrace->fSlotInfo, slotName,
+ {valueIdx, numSlots});
+ if (!name.empty()) {
+ return name;
+ }
+ }
+ // Handle pointers to value slots (when no debug info exists).
+ return "v" + AsRange(valueIdx, numSlots);
+ }
+ return {};
+ };
+
+ // Interpret the context value as a pointer to `count` immediate values.
+ auto MultiImmCtx = [&](const float* ptr, int count) -> std::string {
+ // If this is a uniform, print it by name.
+ if (std::string text = UniformPtrCtx(ptr, count); !text.empty()) {
+ return text;
+ }
+ // Emit a single unbracketed immediate.
+ if (count == 1) {
+ return Imm(*ptr);
+ }
+ // Emit a list like `[0x00000000 (0.0), 0x3F80000 (1.0)]`.
+ std::string text = "[";
+ auto separator = SkSL::String::Separator();
+ while (count--) {
+ text += separator();
+ text += Imm(*ptr++);
+ }
+ return text + "]";
+ };
+
+ // Interpret the context value as a generic pointer.
+ auto PtrCtx = [&](const void* ctx, int numSlots) -> std::string {
+ const float *ctxAsSlot = static_cast<const float*>(ctx);
+ // Check for uniform and value pointers.
+ if (std::string uniform = UniformPtrCtx(ctxAsSlot, numSlots); !uniform.empty()) {
+ return uniform;
+ }
+ if (std::string value = ValuePtrCtx(ctxAsSlot, numSlots); !value.empty()) {
+ return value;
+ }
+ // Handle pointers to temporary stack slots.
+ if (ctxAsSlot >= slots.stack.begin() && ctxAsSlot < slots.stack.end()) {
+ int stackIdx = ctxAsSlot - slots.stack.begin();
+ SkASSERT((stackIdx % N) == 0);
+ return "$" + AsRange(stackIdx / N, numSlots);
+ }
+ // This pointer is out of our expected bounds; this generally isn't expected to happen.
+ return "ExternalPtr(" + AsRange(0, numSlots) + ")";
+ };
+
+ // Interpret the context value as a pointer to two adjacent values.
+ auto AdjacentPtrCtx = [&](const void* ctx,
+ int numSlots) -> std::tuple<std::string, std::string> {
+ const float *ctxAsSlot = static_cast<const float*>(ctx);
+ return std::make_tuple(PtrCtx(ctxAsSlot, numSlots),
+ PtrCtx(ctxAsSlot + (N * numSlots), numSlots));
+ };
+
+ // Interpret the context value as a pointer to three adjacent values.
+ auto Adjacent3PtrCtx = [&](const void* ctx, int numSlots) ->
+ std::tuple<std::string, std::string, std::string> {
+ const float *ctxAsSlot = static_cast<const float*>(ctx);
+ return std::make_tuple(PtrCtx(ctxAsSlot, numSlots),
+ PtrCtx(ctxAsSlot + (N * numSlots), numSlots),
+ PtrCtx(ctxAsSlot + (2 * N * numSlots), numSlots));
+ };
+
+ // Interpret the context value as a BinaryOp structure for copy_n_slots (numSlots is
+ // dictated by the op itself).
+ auto BinaryOpCtx = [&](const void* v,
+ int numSlots) -> std::tuple<std::string, std::string> {
+ const auto *ctx = static_cast<const SkRasterPipeline_BinaryOpCtx*>(v);
+ return std::make_tuple(PtrCtx(ctx->dst, numSlots),
+ PtrCtx(ctx->src, numSlots));
+ };
+
+ // Interpret the context value as a BinaryOp structure for copy_n_constants (numSlots is
+ // dictated by the op itself).
+ auto CopyConstantCtx = [&](const void* v,
+ int numSlots) -> std::tuple<std::string, std::string> {
+ const auto *ctx = static_cast<const SkRasterPipeline_BinaryOpCtx*>(v);
+ return std::make_tuple(PtrCtx(ctx->dst, numSlots),
+ MultiImmCtx(ctx->src, numSlots));
+ };
+
+ // Interpret the context value as a BinaryOp structure (numSlots is inferred from the
+ // distance between pointers).
+ auto AdjacentBinaryOpCtx = [&](const void* v) -> std::tuple<std::string, std::string> {
+ const auto *ctx = static_cast<const SkRasterPipeline_BinaryOpCtx*>(v);
+ int numSlots = (ctx->src - ctx->dst) / N;
+ return AdjacentPtrCtx(ctx->dst, numSlots);
+ };
+
+ // Interpret the context value as a TernaryOp structure (numSlots is inferred from the
+ // distance between pointers).
+ auto AdjacentTernaryOpCtx = [&](const void* v) ->
+ std::tuple<std::string, std::string, std::string> {
+ const auto* ctx = static_cast<const SkRasterPipeline_TernaryOpCtx*>(v);
+ int numSlots = (ctx->src0 - ctx->dst) / N;
+ return Adjacent3PtrCtx(ctx->dst, numSlots);
+ };
+
+ // Stringize a span of swizzle offsets to the textual equivalent (`xyzw`).
+ auto SwizzleOffsetSpan = [&](SkSpan<const uint16_t> offsets) {
+ std::string src;
+ for (uint16_t offset : offsets) {
+ if (offset == (0 * N * sizeof(float))) {
+ src.push_back('x');
+ } else if (offset == (1 * N * sizeof(float))) {
+ src.push_back('y');
+ } else if (offset == (2 * N * sizeof(float))) {
+ src.push_back('z');
+ } else if (offset == (3 * N * sizeof(float))) {
+ src.push_back('w');
+ } else {
+ src.push_back('?');
+ }
+ }
+ return src;
+ };
+
+ // When we decode a swizzle, we don't know the slot width of the original value; that's not
+ // preserved in the instruction encoding. (e.g., myFloat4.y would be indistinguishable from
+ // myFloat2.y.) We do our best to make a readable dump using the data we have.
+ auto SwizzleWidth = [&](SkSpan<const uint16_t> offsets) {
+ size_t highestComponent = *std::max_element(offsets.begin(), offsets.end()) /
+ (N * sizeof(float));
+ size_t swizzleWidth = offsets.size();
+ return std::max(swizzleWidth, highestComponent + 1);
+ };
+
+ // Stringize a swizzled pointer.
+ auto SwizzlePtr = [&](const float* ptr, SkSpan<const uint16_t> offsets) {
+ return "(" + PtrCtx(ptr, SwizzleWidth(offsets)) + ")." + SwizzleOffsetSpan(offsets);
+ };
+
+ // Interpret the context value as a Swizzle structure.
+ auto SwizzleCtx = [&](ProgramOp op, const void* v) -> std::tuple<std::string, std::string> {
+ const auto* ctx = static_cast<const SkRasterPipeline_SwizzleCtx*>(v);
+ int destSlots = (int)op - (int)BuilderOp::swizzle_1 + 1;
+
+ return std::make_tuple(PtrCtx(ctx->ptr, destSlots),
+ SwizzlePtr(ctx->ptr, SkSpan(ctx->offsets, destSlots)));
+ };
+
+ // Interpret the context value as a SwizzleCopy structure.
+ auto SwizzleCopyCtx = [&](ProgramOp op,
+ const void* v) -> std::tuple<std::string, std::string> {
+ const auto* ctx = static_cast<const SkRasterPipeline_SwizzleCopyCtx*>(v);
+ int destSlots = (int)op - (int)BuilderOp::swizzle_copy_slot_masked + 1;
+
+ return std::make_tuple(SwizzlePtr(ctx->dst, SkSpan(ctx->offsets, destSlots)),
+ PtrCtx(ctx->src, destSlots));
+ };
+
+ // Interpret the context value as a Shuffle structure.
+ auto ShuffleCtx = [&](const void* v) -> std::tuple<std::string, std::string> {
+ const auto* ctx = static_cast<const SkRasterPipeline_ShuffleCtx*>(v);
+
+ std::string dst = PtrCtx(ctx->ptr, ctx->count);
+ std::string src = "(" + dst + ")[";
+ for (int index = 0; index < ctx->count; ++index) {
+ if (ctx->offsets[index] % (N * sizeof(float))) {
+ src.push_back('?');
+ } else {
+ src += std::to_string(ctx->offsets[index] / (N * sizeof(float)));
+ }
+ src.push_back(' ');
+ }
+ src.back() = ']';
+ return std::make_tuple(dst, src);
+ };
+
+ std::string opArg1, opArg2, opArg3, opSwizzle;
+ using POp = ProgramOp;
+ switch (stage.op) {
+ case POp::label:
+ case POp::invoke_shader:
+ case POp::invoke_color_filter:
+ case POp::invoke_blender:
+ opArg1 = ImmCtx(stage.ctx, /*showAsFloat=*/false);
+ break;
+
+ case POp::case_op: {
+ const auto* ctx = static_cast<SkRasterPipeline_CaseOpCtx*>(stage.ctx);
+ opArg1 = PtrCtx(ctx->ptr, 1);
+ opArg2 = PtrCtx(ctx->ptr + N, 1);
+ opArg3 = Imm(sk_bit_cast<float>(ctx->expectedValue), /*showAsFloat=*/false);
+ break;
+ }
+ case POp::swizzle_1:
+ case POp::swizzle_2:
+ case POp::swizzle_3:
+ case POp::swizzle_4:
+ std::tie(opArg1, opArg2) = SwizzleCtx(stage.op, stage.ctx);
+ break;
+
+ case POp::swizzle_copy_slot_masked:
+ case POp::swizzle_copy_2_slots_masked:
+ case POp::swizzle_copy_3_slots_masked:
+ case POp::swizzle_copy_4_slots_masked:
+ std::tie(opArg1, opArg2) = SwizzleCopyCtx(stage.op, stage.ctx);
+ break;
+
+ case POp::refract_4_floats:
+ std::tie(opArg1, opArg2) = AdjacentPtrCtx(stage.ctx, 4);
+ opArg3 = PtrCtx((const float*)(stage.ctx) + (8 * N), 1);
+ break;
+
+ case POp::dot_2_floats:
+ opArg1 = PtrCtx(stage.ctx, 1);
+ std::tie(opArg2, opArg3) = AdjacentPtrCtx(stage.ctx, 2);
+ break;
+
+ case POp::dot_3_floats:
+ opArg1 = PtrCtx(stage.ctx, 1);
+ std::tie(opArg2, opArg3) = AdjacentPtrCtx(stage.ctx, 3);
+ break;
+
+ case POp::dot_4_floats:
+ opArg1 = PtrCtx(stage.ctx, 1);
+ std::tie(opArg2, opArg3) = AdjacentPtrCtx(stage.ctx, 4);
+ break;
+
+ case POp::shuffle:
+ std::tie(opArg1, opArg2) = ShuffleCtx(stage.ctx);
+ break;
+
+ case POp::load_condition_mask:
+ case POp::store_condition_mask:
+ case POp::load_loop_mask:
+ case POp::store_loop_mask:
+ case POp::merge_loop_mask:
+ case POp::reenable_loop_mask:
+ case POp::load_return_mask:
+ case POp::store_return_mask:
+ case POp::zero_slot_unmasked:
+ case POp::bitwise_not_int:
+ case POp::cast_to_float_from_int: case POp::cast_to_float_from_uint:
+ case POp::cast_to_int_from_float: case POp::cast_to_uint_from_float:
+ case POp::abs_float: case POp::abs_int:
+ case POp::acos_float:
+ case POp::asin_float:
+ case POp::atan_float:
+ case POp::ceil_float:
+ case POp::cos_float:
+ case POp::exp_float:
+ case POp::exp2_float:
+ case POp::log_float:
+ case POp::log2_float:
+ case POp::floor_float:
+ case POp::invsqrt_float:
+ case POp::sin_float:
+ case POp::sqrt_float:
+ case POp::tan_float:
+ opArg1 = PtrCtx(stage.ctx, 1);
+ break;
+
+ case POp::zero_2_slots_unmasked:
+ case POp::bitwise_not_2_ints:
+ case POp::load_src_rg: case POp::store_src_rg:
+ case POp::cast_to_float_from_2_ints: case POp::cast_to_float_from_2_uints:
+ case POp::cast_to_int_from_2_floats: case POp::cast_to_uint_from_2_floats:
+ case POp::abs_2_floats: case POp::abs_2_ints:
+ case POp::ceil_2_floats:
+ case POp::floor_2_floats:
+ case POp::invsqrt_2_floats:
+ opArg1 = PtrCtx(stage.ctx, 2);
+ break;
+
+ case POp::zero_3_slots_unmasked:
+ case POp::bitwise_not_3_ints:
+ case POp::cast_to_float_from_3_ints: case POp::cast_to_float_from_3_uints:
+ case POp::cast_to_int_from_3_floats: case POp::cast_to_uint_from_3_floats:
+ case POp::abs_3_floats: case POp::abs_3_ints:
+ case POp::ceil_3_floats:
+ case POp::floor_3_floats:
+ case POp::invsqrt_3_floats:
+ opArg1 = PtrCtx(stage.ctx, 3);
+ break;
+
+ case POp::load_src:
+ case POp::load_dst:
+ case POp::store_src:
+ case POp::store_dst:
+ case POp::store_device_xy01:
+ case POp::zero_4_slots_unmasked:
+ case POp::bitwise_not_4_ints:
+ case POp::cast_to_float_from_4_ints: case POp::cast_to_float_from_4_uints:
+ case POp::cast_to_int_from_4_floats: case POp::cast_to_uint_from_4_floats:
+ case POp::abs_4_floats: case POp::abs_4_ints:
+ case POp::ceil_4_floats:
+ case POp::floor_4_floats:
+ case POp::invsqrt_4_floats:
+ case POp::inverse_mat2:
+ opArg1 = PtrCtx(stage.ctx, 4);
+ break;
+
+ case POp::inverse_mat3:
+ opArg1 = PtrCtx(stage.ctx, 9);
+ break;
+
+ case POp::inverse_mat4:
+ opArg1 = PtrCtx(stage.ctx, 16);
+ break;
+
+
+ case POp::copy_constant:
+ std::tie(opArg1, opArg2) = CopyConstantCtx(stage.ctx, 1);
+ break;
+
+ case POp::copy_2_constants:
+ std::tie(opArg1, opArg2) = CopyConstantCtx(stage.ctx, 2);
+ break;
+
+ case POp::copy_3_constants:
+ std::tie(opArg1, opArg2) = CopyConstantCtx(stage.ctx, 3);
+ break;
+
+ case POp::copy_4_constants:
+ std::tie(opArg1, opArg2) = CopyConstantCtx(stage.ctx, 4);
+ break;
+
+ case POp::copy_slot_masked:
+ case POp::copy_slot_unmasked:
+ std::tie(opArg1, opArg2) = BinaryOpCtx(stage.ctx, 1);
+ break;
+
+ case POp::copy_2_slots_masked:
+ case POp::copy_2_slots_unmasked:
+ std::tie(opArg1, opArg2) = BinaryOpCtx(stage.ctx, 2);
+ break;
+
+ case POp::copy_3_slots_masked:
+ case POp::copy_3_slots_unmasked:
+ std::tie(opArg1, opArg2) = BinaryOpCtx(stage.ctx, 3);
+ break;
+
+ case POp::copy_4_slots_masked:
+ case POp::copy_4_slots_unmasked:
+ std::tie(opArg1, opArg2) = BinaryOpCtx(stage.ctx, 4);
+ break;
+
+ case POp::copy_from_indirect_unmasked:
+ case POp::copy_to_indirect_masked: {
+ const auto* ctx = static_cast<SkRasterPipeline_CopyIndirectCtx*>(stage.ctx);
+ // We don't incorporate the indirect-limit in the output
+ opArg1 = PtrCtx(ctx->dst, ctx->slots);
+ opArg2 = PtrCtx(ctx->src, ctx->slots);
+ opArg3 = PtrCtx(ctx->indirectOffset, 1);
+ break;
+ }
+ case POp::copy_from_indirect_uniform_unmasked: {
+ const auto* ctx = static_cast<SkRasterPipeline_CopyIndirectCtx*>(stage.ctx);
+ opArg1 = PtrCtx(ctx->dst, ctx->slots);
+ opArg2 = UniformPtrCtx(ctx->src, ctx->slots);
+ opArg3 = PtrCtx(ctx->indirectOffset, 1);
+ break;
+ }
+ case POp::swizzle_copy_to_indirect_masked: {
+ const auto* ctx = static_cast<SkRasterPipeline_SwizzleCopyIndirectCtx*>(stage.ctx);
+ opArg1 = PtrCtx(ctx->dst, SwizzleWidth(SkSpan(ctx->offsets, ctx->slots)));
+ opArg2 = PtrCtx(ctx->src, ctx->slots);
+ opArg3 = PtrCtx(ctx->indirectOffset, 1);
+ opSwizzle = SwizzleOffsetSpan(SkSpan(ctx->offsets, ctx->slots));
+ break;
+ }
+ case POp::merge_condition_mask:
+ case POp::add_float: case POp::add_int:
+ case POp::sub_float: case POp::sub_int:
+ case POp::mul_float: case POp::mul_int:
+ case POp::div_float: case POp::div_int: case POp::div_uint:
+ case POp::bitwise_and_int:
+ case POp::bitwise_or_int:
+ case POp::bitwise_xor_int:
+ case POp::mod_float:
+ case POp::min_float: case POp::min_int: case POp::min_uint:
+ case POp::max_float: case POp::max_int: case POp::max_uint:
+ case POp::cmplt_float: case POp::cmplt_int: case POp::cmplt_uint:
+ case POp::cmple_float: case POp::cmple_int: case POp::cmple_uint:
+ case POp::cmpeq_float: case POp::cmpeq_int:
+ case POp::cmpne_float: case POp::cmpne_int:
+ std::tie(opArg1, opArg2) = AdjacentPtrCtx(stage.ctx, 1);
+ break;
+
+ case POp::mix_float: case POp::mix_int:
+ std::tie(opArg1, opArg2, opArg3) = Adjacent3PtrCtx(stage.ctx, 1);
+ break;
+
+ case POp::add_2_floats: case POp::add_2_ints:
+ case POp::sub_2_floats: case POp::sub_2_ints:
+ case POp::mul_2_floats: case POp::mul_2_ints:
+ case POp::div_2_floats: case POp::div_2_ints: case POp::div_2_uints:
+ case POp::bitwise_and_2_ints:
+ case POp::bitwise_or_2_ints:
+ case POp::bitwise_xor_2_ints:
+ case POp::mod_2_floats:
+ case POp::min_2_floats: case POp::min_2_ints: case POp::min_2_uints:
+ case POp::max_2_floats: case POp::max_2_ints: case POp::max_2_uints:
+ case POp::cmplt_2_floats: case POp::cmplt_2_ints: case POp::cmplt_2_uints:
+ case POp::cmple_2_floats: case POp::cmple_2_ints: case POp::cmple_2_uints:
+ case POp::cmpeq_2_floats: case POp::cmpeq_2_ints:
+ case POp::cmpne_2_floats: case POp::cmpne_2_ints:
+ std::tie(opArg1, opArg2) = AdjacentPtrCtx(stage.ctx, 2);
+ break;
+
+ case POp::mix_2_floats: case POp::mix_2_ints:
+ std::tie(opArg1, opArg2, opArg3) = Adjacent3PtrCtx(stage.ctx, 2);
+ break;
+
+ case POp::add_3_floats: case POp::add_3_ints:
+ case POp::sub_3_floats: case POp::sub_3_ints:
+ case POp::mul_3_floats: case POp::mul_3_ints:
+ case POp::div_3_floats: case POp::div_3_ints: case POp::div_3_uints:
+ case POp::bitwise_and_3_ints:
+ case POp::bitwise_or_3_ints:
+ case POp::bitwise_xor_3_ints:
+ case POp::mod_3_floats:
+ case POp::min_3_floats: case POp::min_3_ints: case POp::min_3_uints:
+ case POp::max_3_floats: case POp::max_3_ints: case POp::max_3_uints:
+ case POp::cmplt_3_floats: case POp::cmplt_3_ints: case POp::cmplt_3_uints:
+ case POp::cmple_3_floats: case POp::cmple_3_ints: case POp::cmple_3_uints:
+ case POp::cmpeq_3_floats: case POp::cmpeq_3_ints:
+ case POp::cmpne_3_floats: case POp::cmpne_3_ints:
+ std::tie(opArg1, opArg2) = AdjacentPtrCtx(stage.ctx, 3);
+ break;
+
+ case POp::mix_3_floats: case POp::mix_3_ints:
+ std::tie(opArg1, opArg2, opArg3) = Adjacent3PtrCtx(stage.ctx, 3);
+ break;
+
+ case POp::add_4_floats: case POp::add_4_ints:
+ case POp::sub_4_floats: case POp::sub_4_ints:
+ case POp::mul_4_floats: case POp::mul_4_ints:
+ case POp::div_4_floats: case POp::div_4_ints: case POp::div_4_uints:
+ case POp::bitwise_and_4_ints:
+ case POp::bitwise_or_4_ints:
+ case POp::bitwise_xor_4_ints:
+ case POp::mod_4_floats:
+ case POp::min_4_floats: case POp::min_4_ints: case POp::min_4_uints:
+ case POp::max_4_floats: case POp::max_4_ints: case POp::max_4_uints:
+ case POp::cmplt_4_floats: case POp::cmplt_4_ints: case POp::cmplt_4_uints:
+ case POp::cmple_4_floats: case POp::cmple_4_ints: case POp::cmple_4_uints:
+ case POp::cmpeq_4_floats: case POp::cmpeq_4_ints:
+ case POp::cmpne_4_floats: case POp::cmpne_4_ints:
+ std::tie(opArg1, opArg2) = AdjacentPtrCtx(stage.ctx, 4);
+ break;
+
+ case POp::mix_4_floats: case POp::mix_4_ints:
+ std::tie(opArg1, opArg2, opArg3) = Adjacent3PtrCtx(stage.ctx, 4);
+ break;
+
+ case POp::add_n_floats: case POp::add_n_ints:
+ case POp::sub_n_floats: case POp::sub_n_ints:
+ case POp::mul_n_floats: case POp::mul_n_ints:
+ case POp::div_n_floats: case POp::div_n_ints: case POp::div_n_uints:
+ case POp::bitwise_and_n_ints:
+ case POp::bitwise_or_n_ints:
+ case POp::bitwise_xor_n_ints:
+ case POp::mod_n_floats:
+ case POp::min_n_floats: case POp::min_n_ints: case POp::min_n_uints:
+ case POp::max_n_floats: case POp::max_n_ints: case POp::max_n_uints:
+ case POp::cmplt_n_floats: case POp::cmplt_n_ints: case POp::cmplt_n_uints:
+ case POp::cmple_n_floats: case POp::cmple_n_ints: case POp::cmple_n_uints:
+ case POp::cmpeq_n_floats: case POp::cmpeq_n_ints:
+ case POp::cmpne_n_floats: case POp::cmpne_n_ints:
+ case POp::atan2_n_floats:
+ case POp::pow_n_floats:
+ std::tie(opArg1, opArg2) = AdjacentBinaryOpCtx(stage.ctx);
+ break;
+
+ case POp::mix_n_floats: case POp::mix_n_ints:
+ case POp::smoothstep_n_floats:
+ std::tie(opArg1, opArg2, opArg3) = AdjacentTernaryOpCtx(stage.ctx);
+ break;
+
+ case POp::jump:
+ case POp::branch_if_all_lanes_active:
+ case POp::branch_if_any_lanes_active:
+ case POp::branch_if_no_lanes_active:
+ opArg1 = BranchOffset(static_cast<SkRasterPipeline_BranchCtx*>(stage.ctx));
+ break;
+
+ case POp::branch_if_no_active_lanes_eq: {
+ const auto* ctx = static_cast<SkRasterPipeline_BranchIfEqualCtx*>(stage.ctx);
+ opArg1 = BranchOffset(ctx);
+ opArg2 = PtrCtx(ctx->ptr, 1);
+ opArg3 = Imm(sk_bit_cast<float>(ctx->value));
+ break;
+ }
+ default:
+ break;
+ }
+
+ std::string_view opName;
+ switch (stage.op) {
+ #define M(x) case POp::x: opName = #x; break;
+ SK_RASTER_PIPELINE_OPS_ALL(M)
+ #undef M
+ case POp::label: opName = "label"; break;
+ case POp::invoke_shader: opName = "invoke_shader"; break;
+ case POp::invoke_color_filter: opName = "invoke_color_filter"; break;
+ case POp::invoke_blender: opName = "invoke_blender"; break;
+ case POp::invoke_to_linear_srgb: opName = "invoke_to_linear_srgb"; break;
+ case POp::invoke_from_linear_srgb: opName = "invoke_from_linear_srgb"; break;
+ }
+
+ std::string opText;
+ switch (stage.op) {
+ case POp::init_lane_masks:
+ opText = "CondMask = LoopMask = RetMask = true";
+ break;
+
+ case POp::load_condition_mask:
+ opText = "CondMask = " + opArg1;
+ break;
+
+ case POp::store_condition_mask:
+ opText = opArg1 + " = CondMask";
+ break;
+
+ case POp::merge_condition_mask:
+ opText = "CondMask = " + opArg1 + " & " + opArg2;
+ break;
+
+ case POp::load_loop_mask:
+ opText = "LoopMask = " + opArg1;
+ break;
+
+ case POp::store_loop_mask:
+ opText = opArg1 + " = LoopMask";
+ break;
+
+ case POp::mask_off_loop_mask:
+ opText = "LoopMask &= ~(CondMask & LoopMask & RetMask)";
+ break;
+
+ case POp::reenable_loop_mask:
+ opText = "LoopMask |= " + opArg1;
+ break;
+
+ case POp::merge_loop_mask:
+ opText = "LoopMask &= " + opArg1;
+ break;
+
+ case POp::load_return_mask:
+ opText = "RetMask = " + opArg1;
+ break;
+
+ case POp::store_return_mask:
+ opText = opArg1 + " = RetMask";
+ break;
+
+ case POp::mask_off_return_mask:
+ opText = "RetMask &= ~(CondMask & LoopMask & RetMask)";
+ break;
+
+ case POp::store_src_rg:
+ opText = opArg1 + " = src.rg";
+ break;
+
+ case POp::store_src:
+ opText = opArg1 + " = src.rgba";
+ break;
+
+ case POp::store_dst:
+ opText = opArg1 + " = dst.rgba";
+ break;
+
+ case POp::store_device_xy01:
+ opText = opArg1 + " = DeviceCoords.xy01";
+ break;
+
+ case POp::load_src_rg:
+ opText = "src.rg = " + opArg1;
+ break;
+
+ case POp::load_src:
+ opText = "src.rgba = " + opArg1;
+ break;
+
+ case POp::load_dst:
+ opText = "dst.rgba = " + opArg1;
+ break;
+
+ case POp::bitwise_and_int:
+ case POp::bitwise_and_2_ints:
+ case POp::bitwise_and_3_ints:
+ case POp::bitwise_and_4_ints:
+ case POp::bitwise_and_n_ints:
+ opText = opArg1 + " &= " + opArg2;
+ break;
+
+ case POp::bitwise_or_int:
+ case POp::bitwise_or_2_ints:
+ case POp::bitwise_or_3_ints:
+ case POp::bitwise_or_4_ints:
+ case POp::bitwise_or_n_ints:
+ opText = opArg1 + " |= " + opArg2;
+ break;
+
+ case POp::bitwise_xor_int:
+ case POp::bitwise_xor_2_ints:
+ case POp::bitwise_xor_3_ints:
+ case POp::bitwise_xor_4_ints:
+ case POp::bitwise_xor_n_ints:
+ opText = opArg1 + " ^= " + opArg2;
+ break;
+
+ case POp::bitwise_not_int:
+ case POp::bitwise_not_2_ints:
+ case POp::bitwise_not_3_ints:
+ case POp::bitwise_not_4_ints:
+ opText = opArg1 + " = ~" + opArg1;
+ break;
+
+ case POp::cast_to_float_from_int:
+ case POp::cast_to_float_from_2_ints:
+ case POp::cast_to_float_from_3_ints:
+ case POp::cast_to_float_from_4_ints:
+ opText = opArg1 + " = IntToFloat(" + opArg1 + ")";
+ break;
+
+ case POp::cast_to_float_from_uint:
+ case POp::cast_to_float_from_2_uints:
+ case POp::cast_to_float_from_3_uints:
+ case POp::cast_to_float_from_4_uints:
+ opText = opArg1 + " = UintToFloat(" + opArg1 + ")";
+ break;
+
+ case POp::cast_to_int_from_float:
+ case POp::cast_to_int_from_2_floats:
+ case POp::cast_to_int_from_3_floats:
+ case POp::cast_to_int_from_4_floats:
+ opText = opArg1 + " = FloatToInt(" + opArg1 + ")";
+ break;
+
+ case POp::cast_to_uint_from_float:
+ case POp::cast_to_uint_from_2_floats:
+ case POp::cast_to_uint_from_3_floats:
+ case POp::cast_to_uint_from_4_floats:
+ opText = opArg1 + " = FloatToUint(" + opArg1 + ")";
+ break;
+
+ case POp::copy_slot_masked: case POp::copy_2_slots_masked:
+ case POp::copy_3_slots_masked: case POp::copy_4_slots_masked:
+ case POp::swizzle_copy_slot_masked: case POp::swizzle_copy_2_slots_masked:
+ case POp::swizzle_copy_3_slots_masked: case POp::swizzle_copy_4_slots_masked:
+ opText = opArg1 + " = Mask(" + opArg2 + ")";
+ break;
+
+ case POp::copy_constant: case POp::copy_2_constants:
+ case POp::copy_3_constants: case POp::copy_4_constants:
+ case POp::copy_slot_unmasked: case POp::copy_2_slots_unmasked:
+ case POp::copy_3_slots_unmasked: case POp::copy_4_slots_unmasked:
+ case POp::swizzle_1: case POp::swizzle_2:
+ case POp::swizzle_3: case POp::swizzle_4:
+ case POp::shuffle:
+ opText = opArg1 + " = " + opArg2;
+ break;
+
+ case POp::copy_from_indirect_unmasked:
+ case POp::copy_from_indirect_uniform_unmasked:
+ opText = opArg1 + " = Indirect(" + opArg2 + " + " + opArg3 + ")";
+ break;
+
+ case POp::copy_to_indirect_masked:
+ opText = "Indirect(" + opArg1 + " + " + opArg3 + ") = Mask(" + opArg2 + ")";
+ break;
+
+ case POp::swizzle_copy_to_indirect_masked:
+ opText = "Indirect(" + opArg1 + " + " + opArg3 + ")." + opSwizzle + " = Mask(" +
+ opArg2 + ")";
+ break;
+
+ case POp::zero_slot_unmasked: case POp::zero_2_slots_unmasked:
+ case POp::zero_3_slots_unmasked: case POp::zero_4_slots_unmasked:
+ opText = opArg1 + " = 0";
+ break;
+
+ case POp::abs_float: case POp::abs_int:
+ case POp::abs_2_floats: case POp::abs_2_ints:
+ case POp::abs_3_floats: case POp::abs_3_ints:
+ case POp::abs_4_floats: case POp::abs_4_ints:
+ opText = opArg1 + " = abs(" + opArg1 + ")";
+ break;
+
+ case POp::acos_float:
+ opText = opArg1 + " = acos(" + opArg1 + ")";
+ break;
+
+ case POp::asin_float:
+ opText = opArg1 + " = asin(" + opArg1 + ")";
+ break;
+
+ case POp::atan_float:
+ opText = opArg1 + " = atan(" + opArg1 + ")";
+ break;
+
+ case POp::atan2_n_floats:
+ opText = opArg1 + " = atan2(" + opArg1 + ", " + opArg2 + ")";
+ break;
+
+ case POp::ceil_float:
+ case POp::ceil_2_floats:
+ case POp::ceil_3_floats:
+ case POp::ceil_4_floats:
+ opText = opArg1 + " = ceil(" + opArg1 + ")";
+ break;
+
+ case POp::cos_float:
+ opText = opArg1 + " = cos(" + opArg1 + ")";
+ break;
+
+ case POp::refract_4_floats:
+ opText = opArg1 + " = refract(" + opArg1 + ", " + opArg2 + ", " + opArg3 + ")";
+ break;
+
+ case POp::dot_2_floats:
+ case POp::dot_3_floats:
+ case POp::dot_4_floats:
+ opText = opArg1 + " = dot(" + opArg2 + ", " + opArg3 + ")";
+ break;
+
+ case POp::exp_float:
+ opText = opArg1 + " = exp(" + opArg1 + ")";
+ break;
+
+ case POp::exp2_float:
+ opText = opArg1 + " = exp2(" + opArg1 + ")";
+ break;
+
+ case POp::log_float:
+ opText = opArg1 + " = log(" + opArg1 + ")";
+ break;
+
+ case POp::log2_float:
+ opText = opArg1 + " = log2(" + opArg1 + ")";
+ break;
+
+ case POp::pow_n_floats:
+ opText = opArg1 + " = pow(" + opArg1 + ", " + opArg2 + ")";
+ break;
+
+ case POp::sin_float:
+ opText = opArg1 + " = sin(" + opArg1 + ")";
+ break;
+
+ case POp::sqrt_float:
+ opText = opArg1 + " = sqrt(" + opArg1 + ")";
+ break;
+
+ case POp::tan_float:
+ opText = opArg1 + " = tan(" + opArg1 + ")";
+ break;
+
+ case POp::floor_float:
+ case POp::floor_2_floats:
+ case POp::floor_3_floats:
+ case POp::floor_4_floats:
+ opText = opArg1 + " = floor(" + opArg1 + ")";
+ break;
+
+ case POp::invsqrt_float:
+ case POp::invsqrt_2_floats:
+ case POp::invsqrt_3_floats:
+ case POp::invsqrt_4_floats:
+ opText = opArg1 + " = inversesqrt(" + opArg1 + ")";
+ break;
+
+ case POp::inverse_mat2:
+ case POp::inverse_mat3:
+ case POp::inverse_mat4:
+ opText = opArg1 + " = inverse(" + opArg1 + ")";
+ break;
+
+ case POp::add_float: case POp::add_int:
+ case POp::add_2_floats: case POp::add_2_ints:
+ case POp::add_3_floats: case POp::add_3_ints:
+ case POp::add_4_floats: case POp::add_4_ints:
+ case POp::add_n_floats: case POp::add_n_ints:
+ opText = opArg1 + " += " + opArg2;
+ break;
+
+ case POp::sub_float: case POp::sub_int:
+ case POp::sub_2_floats: case POp::sub_2_ints:
+ case POp::sub_3_floats: case POp::sub_3_ints:
+ case POp::sub_4_floats: case POp::sub_4_ints:
+ case POp::sub_n_floats: case POp::sub_n_ints:
+ opText = opArg1 + " -= " + opArg2;
+ break;
+
+ case POp::mul_float: case POp::mul_int:
+ case POp::mul_2_floats: case POp::mul_2_ints:
+ case POp::mul_3_floats: case POp::mul_3_ints:
+ case POp::mul_4_floats: case POp::mul_4_ints:
+ case POp::mul_n_floats: case POp::mul_n_ints:
+ opText = opArg1 + " *= " + opArg2;
+ break;
+
+ case POp::div_float: case POp::div_int: case POp::div_uint:
+ case POp::div_2_floats: case POp::div_2_ints: case POp::div_2_uints:
+ case POp::div_3_floats: case POp::div_3_ints: case POp::div_3_uints:
+ case POp::div_4_floats: case POp::div_4_ints: case POp::div_4_uints:
+ case POp::div_n_floats: case POp::div_n_ints: case POp::div_n_uints:
+ opText = opArg1 + " /= " + opArg2;
+ break;
+
+ case POp::mod_float:
+ case POp::mod_2_floats:
+ case POp::mod_3_floats:
+ case POp::mod_4_floats:
+ case POp::mod_n_floats:
+ opText = opArg1 + " = mod(" + opArg1 + ", " + opArg2 + ")";
+ break;
+
+ case POp::min_float: case POp::min_int: case POp::min_uint:
+ case POp::min_2_floats: case POp::min_2_ints: case POp::min_2_uints:
+ case POp::min_3_floats: case POp::min_3_ints: case POp::min_3_uints:
+ case POp::min_4_floats: case POp::min_4_ints: case POp::min_4_uints:
+ case POp::min_n_floats: case POp::min_n_ints: case POp::min_n_uints:
+ opText = opArg1 + " = min(" + opArg1 + ", " + opArg2 + ")";
+ break;
+
+ case POp::max_float: case POp::max_int: case POp::max_uint:
+ case POp::max_2_floats: case POp::max_2_ints: case POp::max_2_uints:
+ case POp::max_3_floats: case POp::max_3_ints: case POp::max_3_uints:
+ case POp::max_4_floats: case POp::max_4_ints: case POp::max_4_uints:
+ case POp::max_n_floats: case POp::max_n_ints: case POp::max_n_uints:
+ opText = opArg1 + " = max(" + opArg1 + ", " + opArg2 + ")";
+ break;
+
+ case POp::cmplt_float: case POp::cmplt_int: case POp::cmplt_uint:
+ case POp::cmplt_2_floats: case POp::cmplt_2_ints: case POp::cmplt_2_uints:
+ case POp::cmplt_3_floats: case POp::cmplt_3_ints: case POp::cmplt_3_uints:
+ case POp::cmplt_4_floats: case POp::cmplt_4_ints: case POp::cmplt_4_uints:
+ case POp::cmplt_n_floats: case POp::cmplt_n_ints: case POp::cmplt_n_uints:
+ opText = opArg1 + " = lessThan(" + opArg1 + ", " + opArg2 + ")";
+ break;
+
+ case POp::cmple_float: case POp::cmple_int: case POp::cmple_uint:
+ case POp::cmple_2_floats: case POp::cmple_2_ints: case POp::cmple_2_uints:
+ case POp::cmple_3_floats: case POp::cmple_3_ints: case POp::cmple_3_uints:
+ case POp::cmple_4_floats: case POp::cmple_4_ints: case POp::cmple_4_uints:
+ case POp::cmple_n_floats: case POp::cmple_n_ints: case POp::cmple_n_uints:
+ opText = opArg1 + " = lessThanEqual(" + opArg1 + ", " + opArg2 + ")";
+ break;
+
+ case POp::cmpeq_float: case POp::cmpeq_int:
+ case POp::cmpeq_2_floats: case POp::cmpeq_2_ints:
+ case POp::cmpeq_3_floats: case POp::cmpeq_3_ints:
+ case POp::cmpeq_4_floats: case POp::cmpeq_4_ints:
+ case POp::cmpeq_n_floats: case POp::cmpeq_n_ints:
+ opText = opArg1 + " = equal(" + opArg1 + ", " + opArg2 + ")";
+ break;
+
+ case POp::cmpne_float: case POp::cmpne_int:
+ case POp::cmpne_2_floats: case POp::cmpne_2_ints:
+ case POp::cmpne_3_floats: case POp::cmpne_3_ints:
+ case POp::cmpne_4_floats: case POp::cmpne_4_ints:
+ case POp::cmpne_n_floats: case POp::cmpne_n_ints:
+ opText = opArg1 + " = notEqual(" + opArg1 + ", " + opArg2 + ")";
+ break;
+
+ case POp::mix_float: case POp::mix_int:
+ case POp::mix_2_floats: case POp::mix_2_ints:
+ case POp::mix_3_floats: case POp::mix_3_ints:
+ case POp::mix_4_floats: case POp::mix_4_ints:
+ case POp::mix_n_floats: case POp::mix_n_ints:
+ opText = opArg1 + " = mix(" + opArg2 + ", " + opArg3 + ", " + opArg1 + ")";
+ break;
+
+ case POp::smoothstep_n_floats:
+ opText = opArg1 + " = smoothstep(" + opArg1 + ", " + opArg2 + ", " + opArg3 + ")";
+ break;
+
+ case POp::jump:
+ case POp::branch_if_all_lanes_active:
+ case POp::branch_if_any_lanes_active:
+ case POp::branch_if_no_lanes_active:
+ case POp::invoke_shader:
+ case POp::invoke_color_filter:
+ case POp::invoke_blender:
+ opText = std::string(opName) + " " + opArg1;
+ break;
+
+ case POp::invoke_to_linear_srgb:
+ opText = "src.rgba = toLinearSrgb(src.rgba)";
+ break;
+
+ case POp::invoke_from_linear_srgb:
+ opText = "src.rgba = fromLinearSrgb(src.rgba)";
+ break;
+
+ case POp::branch_if_no_active_lanes_eq:
+ opText = "branch " + opArg1 + " if no lanes of " + opArg2 + " == " + opArg3;
+ break;
+
+ case POp::label:
+ opText = "label " + opArg1;
+ break;
+
+ case POp::case_op: {
+ opText = "if (" + opArg1 + " == " + opArg3 +
+ ") { LoopMask = true; " + opArg2 + " = false; }";
+ break;
+ }
+ default:
+ break;
+ }
+
+ opName = opName.substr(0, 30);
+ if (!opText.empty()) {
+ out->writeText(SkSL::String::printf("% 5d. %-30.*s %s\n",
+ index + 1,
+ (int)opName.size(), opName.data(),
+ opText.c_str()).c_str());
+ } else {
+ out->writeText(SkSL::String::printf("% 5d. %.*s\n",
+ index + 1,
+ (int)opName.size(), opName.data()).c_str());
+ }
+ }
+}
+
+} // namespace RP
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/codegen/SkSLRasterPipelineBuilder.h b/gfx/skia/skia/src/sksl/codegen/SkSLRasterPipelineBuilder.h
new file mode 100644
index 0000000000..a0717fa539
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/codegen/SkSLRasterPipelineBuilder.h
@@ -0,0 +1,655 @@
+/*
+ * Copyright 2022 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_RASTERPIPELINECODEBUILDER
+#define SKSL_RASTERPIPELINECODEBUILDER
+
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTArray.h"
+#include "src/base/SkUtils.h"
+#include "src/core/SkRasterPipelineOpList.h"
+#include "src/core/SkTHash.h"
+
+#include <cstdint>
+#include <initializer_list>
+#include <memory>
+
+class SkArenaAlloc;
+class SkRasterPipeline;
+class SkWStream;
+
+namespace SkSL {
+
+class SkRPDebugTrace;
+
+namespace RP {
+
+// A single scalar in our program consumes one slot.
+using Slot = int;
+constexpr Slot NA = -1;
+
+// Scalars, vectors, and matrices can be represented as a range of slot indices.
+struct SlotRange {
+ Slot index = 0;
+ int count = 0;
+};
+
+// An RP::Program will consist entirely of ProgramOps. The ProgramOps list is a superset of the
+// native SkRasterPipelineOps op-list. It also has a few extra ops to indicate child-effect
+// invocation, and a `label` op to indicate branch targets.
+enum class ProgramOp {
+ // A finished program can contain any native Raster Pipeline op...
+ #define M(stage) stage,
+ SK_RASTER_PIPELINE_OPS_ALL(M)
+ #undef M
+
+ // ... has branch targets...
+ label,
+
+ // ... can invoke child programs ...
+ invoke_shader,
+ invoke_color_filter,
+ invoke_blender,
+
+ // ... and can invoke color space transforms.
+ invoke_to_linear_srgb,
+ invoke_from_linear_srgb,
+};
+
+// BuilderOps are a superset of ProgramOps. They are used by the RP::Builder, which works in terms
+// of Instructions; Instructions are slightly more expressive than raw SkRasterPipelineOps. In
+// particular, the Builder supports stacks for pushing and popping scratch values.
+// RP::Program::makeStages is responsible for rewriting Instructions/BuilderOps into an array of
+// RP::Program::Stages, which will contain only native SkRasterPipelineOps and (optionally)
+// child-effect invocations.
+enum class BuilderOp {
+ // An in-flight program can contain all the native Raster Pipeline ops...
+ #define M(stage) stage,
+ SK_RASTER_PIPELINE_OPS_ALL(M)
+ #undef M
+
+ // ... has branch targets...
+ label,
+
+ // ... can invoke child programs...
+ invoke_shader,
+ invoke_color_filter,
+ invoke_blender,
+
+ // ... can invoke color space transforms ...
+ invoke_to_linear_srgb,
+ invoke_from_linear_srgb,
+
+ // ... and also has Builder-specific ops. These ops generally interface with the stack, and are
+ // converted into ProgramOps during `makeStages`.
+ push_literal,
+ push_slots,
+ push_slots_indirect,
+ push_uniform,
+ push_uniform_indirect,
+ push_zeros,
+ push_clone,
+ push_clone_from_stack,
+ push_clone_indirect_from_stack,
+ copy_stack_to_slots,
+ copy_stack_to_slots_unmasked,
+ copy_stack_to_slots_indirect,
+ swizzle_copy_stack_to_slots,
+ swizzle_copy_stack_to_slots_indirect,
+ discard_stack,
+ select,
+ push_condition_mask,
+ pop_condition_mask,
+ push_loop_mask,
+ pop_loop_mask,
+ pop_and_reenable_loop_mask,
+ push_return_mask,
+ pop_return_mask,
+ push_src_rgba,
+ push_dst_rgba,
+ pop_src_rg,
+ pop_src_rgba,
+ pop_dst_rgba,
+ set_current_stack,
+ branch_if_no_active_lanes_on_stack_top_equal,
+ unsupported
+};
+
+// If the child-invocation enums are not in sync between enums, program creation will not work.
+static_assert((int)ProgramOp::label == (int)BuilderOp::label);
+static_assert((int)ProgramOp::invoke_shader == (int)BuilderOp::invoke_shader);
+static_assert((int)ProgramOp::invoke_color_filter == (int)BuilderOp::invoke_color_filter);
+static_assert((int)ProgramOp::invoke_blender == (int)BuilderOp::invoke_blender);
+static_assert((int)ProgramOp::invoke_to_linear_srgb == (int)BuilderOp::invoke_to_linear_srgb);
+static_assert((int)ProgramOp::invoke_from_linear_srgb == (int)BuilderOp::invoke_from_linear_srgb);
+
+// Represents a single raster-pipeline SkSL instruction.
+struct Instruction {
+ Instruction(BuilderOp op, std::initializer_list<Slot> slots,
+ int a = 0, int b = 0, int c = 0, int d = 0)
+ : fOp(op), fImmA(a), fImmB(b), fImmC(c), fImmD(d) {
+ auto iter = slots.begin();
+ if (iter != slots.end()) { fSlotA = *iter++; }
+ if (iter != slots.end()) { fSlotB = *iter++; }
+ SkASSERT(iter == slots.end());
+ }
+
+ BuilderOp fOp;
+ Slot fSlotA = NA;
+ Slot fSlotB = NA;
+ int fImmA = 0;
+ int fImmB = 0;
+ int fImmC = 0;
+ int fImmD = 0;
+};
+
+class Callbacks {
+public:
+ virtual ~Callbacks() = default;
+
+ virtual bool appendShader(int index) = 0;
+ virtual bool appendColorFilter(int index) = 0;
+ virtual bool appendBlender(int index) = 0;
+
+ virtual void toLinearSrgb() = 0;
+ virtual void fromLinearSrgb() = 0;
+};
+
+class Program {
+public:
+ Program(SkTArray<Instruction> instrs,
+ int numValueSlots,
+ int numUniformSlots,
+ int numLabels,
+ SkRPDebugTrace* debugTrace);
+
+#if !defined(SKSL_STANDALONE)
+ bool appendStages(SkRasterPipeline* pipeline,
+ SkArenaAlloc* alloc,
+ Callbacks* callbacks,
+ SkSpan<const float> uniforms) const;
+#endif
+
+ void dump(SkWStream* out) const;
+
+private:
+ using StackDepthMap = SkTHashMap<int, int>; // <stack index, depth of stack>
+
+ struct SlotData {
+ SkSpan<float> values;
+ SkSpan<float> stack;
+ };
+ SlotData allocateSlotData(SkArenaAlloc* alloc) const;
+
+ struct Stage {
+ ProgramOp op;
+ void* ctx;
+ };
+ void makeStages(SkTArray<Stage>* pipeline,
+ SkArenaAlloc* alloc,
+ SkSpan<const float> uniforms,
+ const SlotData& slots) const;
+ void optimize();
+ StackDepthMap tempStackMaxDepths() const;
+
+ // These methods are used to split up large multi-slot operations into multiple ops as needed.
+ void appendCopy(SkTArray<Stage>* pipeline, SkArenaAlloc* alloc,
+ ProgramOp baseStage,
+ float* dst, int dstStride, const float* src, int srcStride, int numSlots) const;
+ void appendCopySlotsUnmasked(SkTArray<Stage>* pipeline, SkArenaAlloc* alloc,
+ float* dst, const float* src, int numSlots) const;
+ void appendCopySlotsMasked(SkTArray<Stage>* pipeline, SkArenaAlloc* alloc,
+ float* dst, const float* src, int numSlots) const;
+ void appendCopyConstants(SkTArray<Stage>* pipeline, SkArenaAlloc* alloc,
+ float* dst, const float* src, int numSlots) const;
+
+ // Appends a single-slot single-input math operation to the pipeline. The op `stage` will
+ // appended `numSlots` times, starting at position `dst` and advancing one slot for each
+ // subsequent invocation.
+ void appendSingleSlotUnaryOp(SkTArray<Stage>* pipeline, ProgramOp stage,
+ float* dst, int numSlots) const;
+
+ // Appends a multi-slot single-input math operation to the pipeline. `baseStage` must refer to
+ // an single-slot "apply_op" stage, which must be immediately followed by specializations for
+ // 2-4 slots. For instance, {`zero_slot`, `zero_2_slots`, `zero_3_slots`, `zero_4_slots`}
+ // must be contiguous ops in the stage list, listed in that order; pass `zero_slot` and we
+ // pick the appropriate op based on `numSlots`.
+ void appendMultiSlotUnaryOp(SkTArray<Stage>* pipeline, ProgramOp baseStage,
+ float* dst, int numSlots) const;
+
+ // Appends a two-input math operation to the pipeline. `src` must be _immediately_ after `dst`
+ // in memory. `baseStage` must refer to an unbounded "apply_to_n_slots" stage. A BinaryOpCtx
+ // will be used to pass pointers to the destination and source; the delta between the two
+ // pointers implicitly gives the number of slots.
+ void appendAdjacentNWayBinaryOp(SkTArray<Stage>* pipeline, SkArenaAlloc* alloc,
+ ProgramOp stage,
+ float* dst, const float* src, int numSlots) const;
+
+ // Appends a multi-slot two-input math operation to the pipeline. `src` must be _immediately_
+ // after `dst` in memory. `baseStage` must refer to an unbounded "apply_to_n_slots" stage, which
+ // must be immediately followed by specializations for 1-4 slots. For instance, {`add_n_floats`,
+ // `add_float`, `add_2_floats`, `add_3_floats`, `add_4_floats`} must be contiguous ops in the
+ // stage list, listed in that order; pass `add_n_floats` and we pick the appropriate op based on
+ // `numSlots`.
+ void appendAdjacentMultiSlotBinaryOp(SkTArray<Stage>* pipeline, SkArenaAlloc* alloc,
+ ProgramOp baseStage,
+ float* dst, const float* src, int numSlots) const;
+
+ // Appends a multi-slot math operation having three inputs (dst, src0, src1) and one output
+ // (dst) to the pipeline. The three inputs must be _immediately_ adjacent in memory. `baseStage`
+ // must refer to an unbounded "apply_to_n_slots" stage, which must be immediately followed by
+ // specializations for 1-4 slots.
+ void appendAdjacentMultiSlotTernaryOp(SkTArray<Stage>* pipeline, SkArenaAlloc* alloc,
+ ProgramOp stage, float* dst,
+ const float* src0, const float* src1, int numSlots) const;
+
+ // Appends a math operation having three inputs (dst, src0, src1) and one output (dst) to the
+ // pipeline. The three inputs must be _immediately_ adjacent in memory. `baseStage` must refer
+ // to an unbounded "apply_to_n_slots" stage. A TernaryOpCtx will be used to pass pointers to the
+ // destination and sources; the delta between the each pointer implicitly gives the slot count.
+ void appendAdjacentNWayTernaryOp(SkTArray<Stage>* pipeline, SkArenaAlloc* alloc,
+ ProgramOp stage, float* dst,
+ const float* src0, const float* src1, int numSlots) const;
+
+ // Appends a stack_rewind op on platforms where it is needed (when SK_HAS_MUSTTAIL is not set).
+ void appendStackRewind(SkTArray<Stage>* pipeline) const;
+
+ SkTArray<Instruction> fInstructions;
+ int fNumValueSlots = 0;
+ int fNumUniformSlots = 0;
+ int fNumTempStackSlots = 0;
+ int fNumLabels = 0;
+ SkTHashMap<int, int> fTempStackMaxDepths;
+ SkRPDebugTrace* fDebugTrace = nullptr;
+};
+
+class Builder {
+public:
+ /** Finalizes and optimizes the program. */
+ std::unique_ptr<Program> finish(int numValueSlots,
+ int numUniformSlots,
+ SkRPDebugTrace* debugTrace = nullptr);
+ /**
+ * Peels off a label ID for use in the program. Set the label's position in the program with
+ * the `label` instruction. Actually branch to the target with an instruction like
+ * `branch_if_any_lanes_active` or `jump`.
+ */
+ int nextLabelID() {
+ return fNumLabels++;
+ }
+
+ /**
+ * The builder keeps track of the state of execution masks; when we know that the execution
+ * mask is unaltered, we can generate simpler code. Code which alters the execution mask is
+ * required to enable this flag.
+ */
+ void enableExecutionMaskWrites() {
+ ++fExecutionMaskWritesEnabled;
+ }
+
+ void disableExecutionMaskWrites() {
+ SkASSERT(this->executionMaskWritesAreEnabled());
+ --fExecutionMaskWritesEnabled;
+ }
+
+ bool executionMaskWritesAreEnabled() {
+ return fExecutionMaskWritesEnabled > 0;
+ }
+
+ /** Assemble a program from the Raster Pipeline instructions below. */
+ void init_lane_masks() {
+ fInstructions.push_back({BuilderOp::init_lane_masks, {}});
+ }
+
+ void store_src_rg(SlotRange slots) {
+ SkASSERT(slots.count == 2);
+ fInstructions.push_back({BuilderOp::store_src_rg, {slots.index}});
+ }
+
+ void store_src(SlotRange slots) {
+ SkASSERT(slots.count == 4);
+ fInstructions.push_back({BuilderOp::store_src, {slots.index}});
+ }
+
+ void store_dst(SlotRange slots) {
+ SkASSERT(slots.count == 4);
+ fInstructions.push_back({BuilderOp::store_dst, {slots.index}});
+ }
+
+ void store_device_xy01(SlotRange slots) {
+ SkASSERT(slots.count == 4);
+ fInstructions.push_back({BuilderOp::store_device_xy01, {slots.index}});
+ }
+
+ void load_src(SlotRange slots) {
+ SkASSERT(slots.count == 4);
+ fInstructions.push_back({BuilderOp::load_src, {slots.index}});
+ }
+
+ void load_dst(SlotRange slots) {
+ SkASSERT(slots.count == 4);
+ fInstructions.push_back({BuilderOp::load_dst, {slots.index}});
+ }
+
+ void set_current_stack(int stackIdx) {
+ fInstructions.push_back({BuilderOp::set_current_stack, {}, stackIdx});
+ }
+
+ // Inserts a label into the instruction stream.
+ void label(int labelID);
+
+ // Unconditionally branches to a label.
+ void jump(int labelID);
+
+ // Branches to a label if the execution mask is active in every lane.
+ void branch_if_all_lanes_active(int labelID);
+
+ // Branches to a label if the execution mask is active in any lane.
+ void branch_if_any_lanes_active(int labelID);
+
+ // Branches to a label if the execution mask is inactive across all lanes.
+ void branch_if_no_lanes_active(int labelID);
+
+ // Branches to a label if the top value on the stack is _not_ equal to `value` in any lane.
+ void branch_if_no_active_lanes_on_stack_top_equal(int value, int labelID);
+
+ // We use the same SkRasterPipeline op regardless of the literal type, and bitcast the value.
+ void push_literal_f(float val) {
+ this->push_literal_i(sk_bit_cast<int32_t>(val));
+ }
+
+ void push_literal_i(int32_t val) {
+ if (val == 0) {
+ this->push_zeros(1);
+ } else {
+ fInstructions.push_back({BuilderOp::push_literal, {}, val});
+ }
+ }
+
+ void push_literal_u(uint32_t val) {
+ this->push_literal_i(sk_bit_cast<int32_t>(val));
+ }
+
+ // Translates into copy_constants (from uniforms into temp stack) in Raster Pipeline.
+ void push_uniform(SlotRange src);
+
+ // Translates into copy_from_indirect_uniform_unmasked (from values into temp stack) in Raster
+ // Pipeline. `fixedRange` denotes a fixed set of slots; this range is pushed forward by the
+ // value at the top of stack `dynamicStack`. Pass the range of the uniform being indexed as
+ // `limitRange`; this is used as a hard cap, to avoid indexing outside of bounds.
+ void push_uniform_indirect(SlotRange fixedRange, int dynamicStack, SlotRange limitRange);
+
+ void push_zeros(int count) {
+ // Translates into zero_slot_unmasked in Raster Pipeline.
+ SkASSERT(count >= 0);
+ if (count > 0) {
+ if (!fInstructions.empty() && fInstructions.back().fOp == BuilderOp::push_zeros) {
+ // Coalesce adjacent push_zero ops into a single op.
+ fInstructions.back().fImmA += count;
+ } else {
+ fInstructions.push_back({BuilderOp::push_zeros, {}, count});
+ }
+ }
+ }
+
+ // Translates into copy_slots_unmasked (from values into temp stack) in Raster Pipeline.
+ void push_slots(SlotRange src);
+
+ // Translates into copy_from_indirect_unmasked (from values into temp stack) in Raster Pipeline.
+ // `fixedRange` denotes a fixed set of slots; this range is pushed forward by the value at the
+ // top of stack `dynamicStack`. Pass the slot range of the variable being indexed as
+ // `limitRange`; this is used as a hard cap, to avoid indexing outside of bounds.
+ void push_slots_indirect(SlotRange fixedRange, int dynamicStack, SlotRange limitRange);
+
+ // Translates into copy_slots_masked (from temp stack to values) in Raster Pipeline.
+ // Does not discard any values on the temp stack.
+ void copy_stack_to_slots(SlotRange dst) {
+ this->copy_stack_to_slots(dst, /*offsetFromStackTop=*/dst.count);
+ }
+
+ void copy_stack_to_slots(SlotRange dst, int offsetFromStackTop);
+
+ // Translates into swizzle_copy_slots_masked (from temp stack to values) in Raster Pipeline.
+ // Does not discard any values on the temp stack.
+ void swizzle_copy_stack_to_slots(SlotRange dst,
+ SkSpan<const int8_t> components,
+ int offsetFromStackTop);
+
+ // Translates into swizzle_copy_to_indirect_masked (from temp stack to values) in Raster
+ // Pipeline. Does not discard any values on the temp stack.
+ void swizzle_copy_stack_to_slots_indirect(SlotRange fixedRange,
+ int dynamicStackID,
+ SlotRange limitRange,
+ SkSpan<const int8_t> components,
+ int offsetFromStackTop);
+
+ // Translates into copy_slots_unmasked (from temp stack to values) in Raster Pipeline.
+ // Does not discard any values on the temp stack.
+ void copy_stack_to_slots_unmasked(SlotRange dst) {
+ this->copy_stack_to_slots_unmasked(dst, /*offsetFromStackTop=*/dst.count);
+ }
+
+ void copy_stack_to_slots_unmasked(SlotRange dst, int offsetFromStackTop);
+
+ // Translates into copy_to_indirect_masked (from temp stack into values) in Raster Pipeline.
+ // `fixedRange` denotes a fixed set of slots; this range is pushed forward by the value at the
+ // top of stack `dynamicStack`. Pass the slot range of the variable being indexed as
+ // `limitRange`; this is used as a hard cap, to avoid indexing outside of bounds.
+ void copy_stack_to_slots_indirect(SlotRange fixedRange,
+ int dynamicStackID,
+ SlotRange limitRange);
+
+ // Copies from temp stack to slots, including an indirect offset, then shrinks the temp stack.
+ void pop_slots_indirect(SlotRange fixedRange, int dynamicStackID, SlotRange limitRange) {
+ this->copy_stack_to_slots_indirect(fixedRange, dynamicStackID, limitRange);
+ this->discard_stack(fixedRange.count);
+ }
+
+ // Performs a unary op (like `bitwise_not`), given a slot count of `slots`. The stack top is
+ // replaced with the result.
+ void unary_op(BuilderOp op, int32_t slots);
+
+ // Performs a binary op (like `add_n_floats` or `cmpeq_n_ints`), given a slot count of
+ // `slots`. Two n-slot input values are consumed, and the result is pushed onto the stack.
+ void binary_op(BuilderOp op, int32_t slots);
+
+ // Performs a ternary op (like `mix` or `smoothstep`), given a slot count of
+ // `slots`. Three n-slot input values are consumed, and the result is pushed onto the stack.
+ void ternary_op(BuilderOp op, int32_t slots);
+
+ // Computes a dot product on the stack. The slots consumed (`slots`) must be between 1 and 4.
+ // Two n-slot input vectors are consumed, and a scalar result is pushed onto the stack.
+ void dot_floats(int32_t slots);
+
+ // Computes refract(N, I, eta) on the stack. N and I are assumed to be 4-slot vectors, and can
+ // be padded with zeros for smaller inputs. Eta is a scalar. The result is a 4-slot vector.
+ void refract_floats();
+
+ // Computes inverse(matN) on the stack. Pass 2, 3 or 4 for n to specify matrix size.
+ void inverse_matrix(int32_t n);
+
+ // Shrinks the temp stack, discarding values on top.
+ void discard_stack(int32_t count = 1);
+
+ // Copies vales from the temp stack into slots, and then shrinks the temp stack.
+ void pop_slots(SlotRange dst);
+
+ // Creates many clones of the top single-slot item on the temp stack.
+ void push_duplicates(int count);
+
+ // Creates a single clone of an item on the current temp stack. The cloned item can consist of
+ // any number of slots, and can be copied from an earlier position on the stack.
+ void push_clone(int numSlots, int offsetFromStackTop = 0) {
+ fInstructions.push_back({BuilderOp::push_clone, {}, numSlots,
+ numSlots + offsetFromStackTop});
+ }
+
+ // Clones a range of slots from another stack onto this stack.
+ void push_clone_from_stack(SlotRange range, int otherStackID, int offsetFromStackTop);
+
+ // Translates into copy_from_indirect_unmasked (from one temp stack to another) in Raster
+ // Pipeline. `fixedOffset` denotes a range of slots within the top `offsetFromStackTop` slots of
+ // `otherStackID`. This range is pushed forward by the value at the top of `dynamicStackID`.
+ void push_clone_indirect_from_stack(SlotRange fixedOffset,
+ int dynamicStackID,
+ int otherStackID,
+ int offsetFromStackTop);
+
+ // Compares the stack top with the passed-in value; if it matches, enables the loop mask.
+ void case_op(int value) {
+ fInstructions.push_back({BuilderOp::case_op, {}, value});
+ }
+
+ void select(int slots) {
+ // Overlays the top two entries on the stack, making one hybrid entry. The execution mask
+ // is used to select which lanes are preserved.
+ SkASSERT(slots > 0);
+ fInstructions.push_back({BuilderOp::select, {}, slots});
+ }
+
+ // The opposite of push_slots; copies values from the temp stack into value slots, then
+ // shrinks the temp stack.
+ void pop_slots_unmasked(SlotRange dst);
+
+ void copy_slots_masked(SlotRange dst, SlotRange src) {
+ SkASSERT(dst.count == src.count);
+ fInstructions.push_back({BuilderOp::copy_slot_masked, {dst.index, src.index}, dst.count});
+ }
+
+ void copy_slots_unmasked(SlotRange dst, SlotRange src);
+
+ void copy_constant(Slot slot, int constantValue) {
+ fInstructions.push_back({BuilderOp::copy_constant, {slot}, constantValue});
+ }
+
+ // Stores zeros across the entire slot range.
+ void zero_slots_unmasked(SlotRange dst);
+
+ // Consumes `consumedSlots` elements on the stack, then generates `components.size()` elements.
+ void swizzle(int consumedSlots, SkSpan<const int8_t> components);
+
+ // Transposes a matrix of size CxR on the stack (into a matrix of size RxC).
+ void transpose(int columns, int rows);
+
+ // Generates a CxR diagonal matrix from the top two scalars on the stack. The second scalar is
+ // used as the diagonal value; the first scalar (usually zero) fills in the rest of the slots.
+ void diagonal_matrix(int columns, int rows);
+
+ // Resizes a CxR matrix at the top of the stack to C'xR'.
+ void matrix_resize(int origColumns, int origRows, int newColumns, int newRows);
+
+ void push_condition_mask() {
+ SkASSERT(this->executionMaskWritesAreEnabled());
+ fInstructions.push_back({BuilderOp::push_condition_mask, {}});
+ }
+
+ void pop_condition_mask() {
+ SkASSERT(this->executionMaskWritesAreEnabled());
+ fInstructions.push_back({BuilderOp::pop_condition_mask, {}});
+ }
+
+ void merge_condition_mask() {
+ SkASSERT(this->executionMaskWritesAreEnabled());
+ fInstructions.push_back({BuilderOp::merge_condition_mask, {}});
+ }
+
+ void push_loop_mask() {
+ SkASSERT(this->executionMaskWritesAreEnabled());
+ fInstructions.push_back({BuilderOp::push_loop_mask, {}});
+ }
+
+ void pop_loop_mask() {
+ SkASSERT(this->executionMaskWritesAreEnabled());
+ fInstructions.push_back({BuilderOp::pop_loop_mask, {}});
+ }
+
+ void push_src_rgba() {
+ fInstructions.push_back({BuilderOp::push_src_rgba, {}});
+ }
+
+ void push_dst_rgba() {
+ fInstructions.push_back({BuilderOp::push_dst_rgba, {}});
+ }
+
+ void pop_src_rg() {
+ fInstructions.push_back({BuilderOp::pop_src_rg, {}});
+ }
+
+ void pop_src_rgba() {
+ fInstructions.push_back({BuilderOp::pop_src_rgba, {}});
+ }
+
+ void pop_dst_rgba() {
+ fInstructions.push_back({BuilderOp::pop_dst_rgba, {}});
+ }
+
+ void mask_off_loop_mask() {
+ SkASSERT(this->executionMaskWritesAreEnabled());
+ fInstructions.push_back({BuilderOp::mask_off_loop_mask, {}});
+ }
+
+ void reenable_loop_mask(SlotRange src) {
+ SkASSERT(this->executionMaskWritesAreEnabled());
+ SkASSERT(src.count == 1);
+ fInstructions.push_back({BuilderOp::reenable_loop_mask, {src.index}});
+ }
+
+ void pop_and_reenable_loop_mask() {
+ SkASSERT(this->executionMaskWritesAreEnabled());
+ fInstructions.push_back({BuilderOp::pop_and_reenable_loop_mask, {}});
+ }
+
+ void merge_loop_mask() {
+ SkASSERT(this->executionMaskWritesAreEnabled());
+ fInstructions.push_back({BuilderOp::merge_loop_mask, {}});
+ }
+
+ void push_return_mask() {
+ SkASSERT(this->executionMaskWritesAreEnabled());
+ fInstructions.push_back({BuilderOp::push_return_mask, {}});
+ }
+
+ void pop_return_mask();
+
+ void mask_off_return_mask() {
+ SkASSERT(this->executionMaskWritesAreEnabled());
+ fInstructions.push_back({BuilderOp::mask_off_return_mask, {}});
+ }
+
+ void invoke_shader(int childIdx) {
+ fInstructions.push_back({BuilderOp::invoke_shader, {}, childIdx});
+ }
+
+ void invoke_color_filter(int childIdx) {
+ fInstructions.push_back({BuilderOp::invoke_color_filter, {}, childIdx});
+ }
+
+ void invoke_blender(int childIdx) {
+ fInstructions.push_back({BuilderOp::invoke_blender, {}, childIdx});
+ }
+
+ void invoke_to_linear_srgb() {
+ fInstructions.push_back({BuilderOp::invoke_to_linear_srgb, {}});
+ }
+
+ void invoke_from_linear_srgb() {
+ fInstructions.push_back({BuilderOp::invoke_from_linear_srgb, {}});
+ }
+
+private:
+ void simplifyPopSlotsUnmasked(SlotRange* dst);
+
+ SkTArray<Instruction> fInstructions;
+ int fNumLabels = 0;
+ int fExecutionMaskWritesEnabled = 0;
+};
+
+} // namespace RP
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/codegen/SkSLRasterPipelineCodeGenerator.cpp b/gfx/skia/skia/src/sksl/codegen/SkSLRasterPipelineCodeGenerator.cpp
new file mode 100644
index 0000000000..4be7d38936
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/codegen/SkSLRasterPipelineCodeGenerator.cpp
@@ -0,0 +1,3444 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkSpan.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLLayout.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLStatement.h"
+#include "include/private/base/SkTArray.h"
+#include "include/sksl/SkSLOperator.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/base/SkStringView.h"
+#include "src/core/SkTHash.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLConstantFolder.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLIntrinsicList.h"
+#include "src/sksl/SkSLModifiersPool.h"
+#include "src/sksl/codegen/SkSLRasterPipelineBuilder.h"
+#include "src/sksl/codegen/SkSLRasterPipelineCodeGenerator.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLBreakStatement.h"
+#include "src/sksl/ir/SkSLChildCall.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLConstructorDiagonalMatrix.h"
+#include "src/sksl/ir/SkSLConstructorMatrixResize.h"
+#include "src/sksl/ir/SkSLConstructorSplat.h"
+#include "src/sksl/ir/SkSLContinueStatement.h"
+#include "src/sksl/ir/SkSLDoStatement.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLExpressionStatement.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLForStatement.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLIfStatement.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLLiteral.h"
+#include "src/sksl/ir/SkSLPostfixExpression.h"
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/ir/SkSLReturnStatement.h"
+#include "src/sksl/ir/SkSLSwitchCase.h"
+#include "src/sksl/ir/SkSLSwitchStatement.h"
+#include "src/sksl/ir/SkSLSwizzle.h"
+#include "src/sksl/ir/SkSLTernaryExpression.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+#include "src/sksl/tracing/SkRPDebugTrace.h"
+#include "src/sksl/tracing/SkSLDebugInfo.h"
+#include "src/sksl/transform/SkSLTransform.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <float.h>
+#include <optional>
+#include <string>
+#include <string_view>
+#include <utility>
+#include <vector>
+
+namespace SkSL {
+namespace RP {
+
+static bool unsupported() {
+ // If MakeRasterPipelineProgram returns false, set a breakpoint here for more information.
+ return false;
+}
+
+class SlotManager {
+public:
+ SlotManager(std::vector<SlotDebugInfo>* i) : fSlotDebugInfo(i) {}
+
+ /** Used by `create` to add this variable to SlotDebugInfo inside SkRPDebugTrace. */
+ void addSlotDebugInfoForGroup(const std::string& varName,
+ const Type& type,
+ Position pos,
+ int* groupIndex,
+ bool isFunctionReturnValue);
+ void addSlotDebugInfo(const std::string& varName,
+ const Type& type,
+ Position pos,
+ bool isFunctionReturnValue);
+
+ /** Creates slots associated with an SkSL variable or return value. */
+ SlotRange createSlots(std::string name,
+ const Type& type,
+ Position pos,
+ bool isFunctionReturnValue);
+
+ /** Looks up the slots associated with an SkSL variable; creates the slot if necessary. */
+ SlotRange getVariableSlots(const Variable& v);
+
+ /**
+ * Looks up the slots associated with an SkSL function's return value; creates the range if
+ * necessary. Note that recursion is never supported, so we don't need to maintain return values
+ * in a stack; we can just statically allocate one slot per function call-site.
+ */
+ SlotRange getFunctionSlots(const IRNode& callSite, const FunctionDeclaration& f);
+
+ /** Returns the total number of slots consumed. */
+ int slotCount() const { return fSlotCount; }
+
+private:
+ SkTHashMap<const IRNode*, SlotRange> fSlotMap;
+ int fSlotCount = 0;
+ std::vector<SlotDebugInfo>* fSlotDebugInfo;
+};
+
+class AutoContinueMask;
+class LValue;
+
+class Generator {
+public:
+ Generator(const SkSL::Program& program, SkRPDebugTrace* debugTrace)
+ : fProgram(program)
+ , fContext(fProgram.fContext->fTypes,
+ fProgram.fContext->fCaps,
+ *fProgram.fContext->fErrors)
+ , fDebugTrace(debugTrace)
+ , fProgramSlots(debugTrace ? &debugTrace->fSlotInfo : nullptr)
+ , fUniformSlots(debugTrace ? &debugTrace->fUniformInfo : nullptr) {
+ fContext.fModifiersPool = &fModifiersPool;
+ fContext.fConfig = fProgram.fConfig.get();
+ fContext.fModule = fProgram.fContext->fModule;
+ }
+
+ /** Converts the SkSL main() function into a set of Instructions. */
+ bool writeProgram(const FunctionDefinition& function);
+
+ /** Returns the generated program. */
+ std::unique_ptr<RP::Program> finish();
+
+ /**
+ * Converts an SkSL function into a set of Instructions. Returns nullopt if the function
+ * contained unsupported statements or expressions.
+ */
+ std::optional<SlotRange> writeFunction(const IRNode& callSite,
+ const FunctionDefinition& function);
+
+ /**
+ * Returns the slot index of this function inside the FunctionDebugInfo array in SkRPDebugTrace.
+ * The FunctionDebugInfo slot will be created if it doesn't already exist.
+ */
+ int getFunctionDebugInfo(const FunctionDeclaration& decl);
+
+ /** Looks up the slots associated with an SkSL variable; creates the slot if necessary. */
+ SlotRange getVariableSlots(const Variable& v) {
+ SkASSERT(!IsUniform(v));
+ return fProgramSlots.getVariableSlots(v);
+ }
+
+ /** Looks up the slots associated with an SkSL uniform; creates the slot if necessary. */
+ SlotRange getUniformSlots(const Variable& v) {
+ SkASSERT(IsUniform(v));
+ return fUniformSlots.getVariableSlots(v);
+ }
+
+ /**
+ * Looks up the slots associated with an SkSL function's return value; creates the range if
+ * necessary. Note that recursion is never supported, so we don't need to maintain return values
+ * in a stack; we can just statically allocate one slot per function call-site.
+ */
+ SlotRange getFunctionSlots(const IRNode& callSite, const FunctionDeclaration& f) {
+ return fProgramSlots.getFunctionSlots(callSite, f);
+ }
+
+ /**
+ * Creates an additional stack for the program to push values onto. The stack will not become
+ * actively in-use until `setCurrentStack` is called.
+ */
+ int createStack();
+
+ /** Frees a stack generated by `createStack`. The freed stack must be completely empty. */
+ void recycleStack(int stackID);
+
+ /** Redirects builder ops to point to a different stack (created by `createStack`). */
+ void setCurrentStack(int stackID);
+
+ /** Reports the currently active stack. */
+ int currentStack() {
+ return fCurrentStack;
+ }
+
+ /**
+ * Returns an LValue for the passed-in expression; if the expression isn't supported as an
+ * LValue, returns nullptr.
+ */
+ std::unique_ptr<LValue> makeLValue(const Expression& e, bool allowScratch = false);
+
+ /** Copies the top-of-stack value into this lvalue, without discarding it from the stack. */
+ [[nodiscard]] bool store(LValue& lvalue);
+
+ /** Pushes the lvalue onto the top-of-stack. */
+ [[nodiscard]] bool push(LValue& lvalue);
+
+ /** The Builder stitches our instructions together into Raster Pipeline code. */
+ Builder* builder() { return &fBuilder; }
+
+ /** Appends a statement to the program. */
+ [[nodiscard]] bool writeStatement(const Statement& s);
+ [[nodiscard]] bool writeBlock(const Block& b);
+ [[nodiscard]] bool writeBreakStatement(const BreakStatement& b);
+ [[nodiscard]] bool writeContinueStatement(const ContinueStatement& b);
+ [[nodiscard]] bool writeDoStatement(const DoStatement& d);
+ [[nodiscard]] bool writeExpressionStatement(const ExpressionStatement& e);
+ [[nodiscard]] bool writeMasklessForStatement(const ForStatement& f);
+ [[nodiscard]] bool writeForStatement(const ForStatement& f);
+ [[nodiscard]] bool writeGlobals();
+ [[nodiscard]] bool writeIfStatement(const IfStatement& i);
+ [[nodiscard]] bool writeDynamicallyUniformIfStatement(const IfStatement& i);
+ [[nodiscard]] bool writeReturnStatement(const ReturnStatement& r);
+ [[nodiscard]] bool writeSwitchStatement(const SwitchStatement& s);
+ [[nodiscard]] bool writeVarDeclaration(const VarDeclaration& v);
+
+ /** Pushes an expression to the value stack. */
+ [[nodiscard]] bool pushBinaryExpression(const BinaryExpression& e);
+ [[nodiscard]] bool pushBinaryExpression(const Expression& left,
+ Operator op,
+ const Expression& right);
+ [[nodiscard]] bool pushChildCall(const ChildCall& c);
+ [[nodiscard]] bool pushConstructorCast(const AnyConstructor& c);
+ [[nodiscard]] bool pushConstructorCompound(const AnyConstructor& c);
+ [[nodiscard]] bool pushConstructorDiagonalMatrix(const ConstructorDiagonalMatrix& c);
+ [[nodiscard]] bool pushConstructorMatrixResize(const ConstructorMatrixResize& c);
+ [[nodiscard]] bool pushConstructorSplat(const ConstructorSplat& c);
+ [[nodiscard]] bool pushExpression(const Expression& e, bool usesResult = true);
+ [[nodiscard]] bool pushFieldAccess(const FieldAccess& f);
+ [[nodiscard]] bool pushFunctionCall(const FunctionCall& c);
+ [[nodiscard]] bool pushIndexExpression(const IndexExpression& i);
+ [[nodiscard]] bool pushIntrinsic(const FunctionCall& c);
+ [[nodiscard]] bool pushIntrinsic(IntrinsicKind intrinsic, const Expression& arg0);
+ [[nodiscard]] bool pushIntrinsic(IntrinsicKind intrinsic,
+ const Expression& arg0,
+ const Expression& arg1);
+ [[nodiscard]] bool pushIntrinsic(IntrinsicKind intrinsic,
+ const Expression& arg0,
+ const Expression& arg1,
+ const Expression& arg2);
+ [[nodiscard]] bool pushLiteral(const Literal& l);
+ [[nodiscard]] bool pushPostfixExpression(const PostfixExpression& p, bool usesResult);
+ [[nodiscard]] bool pushPrefixExpression(const PrefixExpression& p);
+ [[nodiscard]] bool pushPrefixExpression(Operator op, const Expression& expr);
+ [[nodiscard]] bool pushSwizzle(const Swizzle& s);
+ [[nodiscard]] bool pushTernaryExpression(const TernaryExpression& t);
+ [[nodiscard]] bool pushTernaryExpression(const Expression& test,
+ const Expression& ifTrue,
+ const Expression& ifFalse);
+ [[nodiscard]] bool pushDynamicallyUniformTernaryExpression(const Expression& test,
+ const Expression& ifTrue,
+ const Expression& ifFalse);
+ [[nodiscard]] bool pushVariableReference(const VariableReference& v);
+
+ /** Pops an expression from the value stack and copies it into slots. */
+ void popToSlotRange(SlotRange r) { fBuilder.pop_slots(r); }
+ void popToSlotRangeUnmasked(SlotRange r) { fBuilder.pop_slots_unmasked(r); }
+
+ /** Pops an expression from the value stack and discards it. */
+ void discardExpression(int slots) { fBuilder.discard_stack(slots); }
+
+ /** Zeroes out a range of slots. */
+ void zeroSlotRangeUnmasked(SlotRange r) { fBuilder.zero_slots_unmasked(r); }
+
+ /** Expression utilities. */
+ struct TypedOps {
+ BuilderOp fFloatOp;
+ BuilderOp fSignedOp;
+ BuilderOp fUnsignedOp;
+ BuilderOp fBooleanOp;
+ };
+
+ static BuilderOp GetTypedOp(const SkSL::Type& type, const TypedOps& ops);
+
+ [[nodiscard]] bool unaryOp(const SkSL::Type& type, const TypedOps& ops);
+ [[nodiscard]] bool binaryOp(const SkSL::Type& type, const TypedOps& ops);
+ [[nodiscard]] bool ternaryOp(const SkSL::Type& type, const TypedOps& ops);
+ [[nodiscard]] bool pushIntrinsic(const TypedOps& ops, const Expression& arg0);
+ [[nodiscard]] bool pushIntrinsic(const TypedOps& ops,
+ const Expression& arg0,
+ const Expression& arg1);
+ [[nodiscard]] bool pushIntrinsic(BuilderOp builderOp, const Expression& arg0);
+ [[nodiscard]] bool pushIntrinsic(BuilderOp builderOp,
+ const Expression& arg0,
+ const Expression& arg1);
+ [[nodiscard]] bool pushLengthIntrinsic(int slotCount);
+ [[nodiscard]] bool pushVectorizedExpression(const Expression& expr, const Type& vectorType);
+ [[nodiscard]] bool pushVariableReferencePartial(const VariableReference& v, SlotRange subset);
+ [[nodiscard]] bool pushLValueOrExpression(LValue* lvalue, const Expression& expr);
+ [[nodiscard]] bool pushMatrixMultiply(LValue* lvalue,
+ const Expression& left,
+ const Expression& right,
+ int leftColumns, int leftRows,
+ int rightColumns, int rightRows);
+ [[nodiscard]] bool pushStructuredComparison(LValue* left,
+ Operator op,
+ LValue* right,
+ const Type& type);
+
+ void foldWithMultiOp(BuilderOp op, int elements);
+ void foldComparisonOp(Operator op, int elements);
+
+ BuilderOp getTypedOp(const SkSL::Type& type, const TypedOps& ops) const;
+
+ Analysis::ReturnComplexity returnComplexity(const FunctionDefinition* func) {
+ Analysis::ReturnComplexity* complexity = fReturnComplexityMap.find(fCurrentFunction);
+ if (!complexity) {
+ complexity = fReturnComplexityMap.set(fCurrentFunction,
+ Analysis::GetReturnComplexity(*fCurrentFunction));
+ }
+ return *complexity;
+ }
+
+ bool needsReturnMask() {
+ return this->returnComplexity(fCurrentFunction) >=
+ Analysis::ReturnComplexity::kEarlyReturns;
+ }
+
+ bool needsFunctionResultSlots() {
+ return this->returnComplexity(fCurrentFunction) >
+ Analysis::ReturnComplexity::kSingleSafeReturn;
+ }
+
+ static bool IsUniform(const Variable& var) {
+ return var.modifiers().fFlags & Modifiers::kUniform_Flag;
+ }
+
+ static bool IsOutParameter(const Variable& var) {
+ return (var.modifiers().fFlags & (Modifiers::kIn_Flag | Modifiers::kOut_Flag)) ==
+ Modifiers::kOut_Flag;
+ }
+
+ static bool IsInoutParameter(const Variable& var) {
+ return (var.modifiers().fFlags & (Modifiers::kIn_Flag | Modifiers::kOut_Flag)) ==
+ (Modifiers::kIn_Flag | Modifiers::kOut_Flag);
+ }
+
+private:
+ const SkSL::Program& fProgram;
+ SkSL::Context fContext;
+ SkSL::ModifiersPool fModifiersPool;
+ Builder fBuilder;
+ SkRPDebugTrace* fDebugTrace = nullptr;
+ SkTHashMap<const Variable*, int> fChildEffectMap;
+
+ SlotManager fProgramSlots;
+ SlotManager fUniformSlots;
+
+ const FunctionDefinition* fCurrentFunction = nullptr;
+ SlotRange fCurrentFunctionResult;
+ AutoContinueMask* fCurrentContinueMask = nullptr;
+ int fCurrentBreakTarget = -1;
+ int fCurrentStack = 0;
+ int fNextStackID = 0;
+ SkTArray<int> fRecycledStacks;
+
+ SkTHashMap<const FunctionDefinition*, Analysis::ReturnComplexity> fReturnComplexityMap;
+
+ static constexpr auto kAbsOps = TypedOps{BuilderOp::abs_float,
+ BuilderOp::abs_int,
+ BuilderOp::unsupported,
+ BuilderOp::unsupported};
+ static constexpr auto kAddOps = TypedOps{BuilderOp::add_n_floats,
+ BuilderOp::add_n_ints,
+ BuilderOp::add_n_ints,
+ BuilderOp::unsupported};
+ static constexpr auto kSubtractOps = TypedOps{BuilderOp::sub_n_floats,
+ BuilderOp::sub_n_ints,
+ BuilderOp::sub_n_ints,
+ BuilderOp::unsupported};
+ static constexpr auto kMultiplyOps = TypedOps{BuilderOp::mul_n_floats,
+ BuilderOp::mul_n_ints,
+ BuilderOp::mul_n_ints,
+ BuilderOp::unsupported};
+ static constexpr auto kDivideOps = TypedOps{BuilderOp::div_n_floats,
+ BuilderOp::div_n_ints,
+ BuilderOp::div_n_uints,
+ BuilderOp::unsupported};
+ static constexpr auto kLessThanOps = TypedOps{BuilderOp::cmplt_n_floats,
+ BuilderOp::cmplt_n_ints,
+ BuilderOp::cmplt_n_uints,
+ BuilderOp::unsupported};
+ static constexpr auto kLessThanEqualOps = TypedOps{BuilderOp::cmple_n_floats,
+ BuilderOp::cmple_n_ints,
+ BuilderOp::cmple_n_uints,
+ BuilderOp::unsupported};
+ static constexpr auto kEqualOps = TypedOps{BuilderOp::cmpeq_n_floats,
+ BuilderOp::cmpeq_n_ints,
+ BuilderOp::cmpeq_n_ints,
+ BuilderOp::cmpeq_n_ints};
+ static constexpr auto kNotEqualOps = TypedOps{BuilderOp::cmpne_n_floats,
+ BuilderOp::cmpne_n_ints,
+ BuilderOp::cmpne_n_ints,
+ BuilderOp::cmpne_n_ints};
+ static constexpr auto kModOps = TypedOps{BuilderOp::mod_n_floats,
+ BuilderOp::unsupported,
+ BuilderOp::unsupported,
+ BuilderOp::unsupported};
+ static constexpr auto kMinOps = TypedOps{BuilderOp::min_n_floats,
+ BuilderOp::min_n_ints,
+ BuilderOp::min_n_uints,
+ BuilderOp::min_n_uints};
+ static constexpr auto kMaxOps = TypedOps{BuilderOp::max_n_floats,
+ BuilderOp::max_n_ints,
+ BuilderOp::max_n_uints,
+ BuilderOp::max_n_uints};
+ static constexpr auto kMixOps = TypedOps{BuilderOp::mix_n_floats,
+ BuilderOp::unsupported,
+ BuilderOp::unsupported,
+ BuilderOp::unsupported};
+ static constexpr auto kInverseSqrtOps = TypedOps{BuilderOp::invsqrt_float,
+ BuilderOp::unsupported,
+ BuilderOp::unsupported,
+ BuilderOp::unsupported};
+ friend class AutoContinueMask;
+};
+
+class AutoStack {
+public:
+ explicit AutoStack(Generator* g)
+ : fGenerator(g)
+ , fStackID(g->createStack()) {}
+
+ ~AutoStack() {
+ fGenerator->recycleStack(fStackID);
+ }
+
+ void enter() {
+ fParentStackID = fGenerator->currentStack();
+ fGenerator->setCurrentStack(fStackID);
+ }
+
+ void exit() {
+ SkASSERT(fGenerator->currentStack() == fStackID);
+ fGenerator->setCurrentStack(fParentStackID);
+ }
+
+ void pushClone(int slots) {
+ this->pushClone(SlotRange{0, slots}, /*offsetFromStackTop=*/slots);
+ }
+
+ void pushClone(SlotRange range, int offsetFromStackTop) {
+ fGenerator->builder()->push_clone_from_stack(range, fStackID, offsetFromStackTop);
+ }
+
+ void pushCloneIndirect(SlotRange range, int dynamicStackID, int offsetFromStackTop) {
+ fGenerator->builder()->push_clone_indirect_from_stack(
+ range, dynamicStackID, /*otherStackID=*/fStackID, offsetFromStackTop);
+ }
+
+ int stackID() const {
+ return fStackID;
+ }
+
+private:
+ Generator* fGenerator;
+ int fStackID = 0;
+ int fParentStackID = 0;
+};
+
+class AutoContinueMask {
+public:
+ AutoContinueMask(Generator* gen) : fGenerator(gen) {}
+
+ ~AutoContinueMask() {
+ if (fPreviousContinueMask) {
+ fGenerator->fCurrentContinueMask = fPreviousContinueMask;
+ }
+ }
+
+ void enable() {
+ SkASSERT(!fContinueMaskStack.has_value());
+
+ fContinueMaskStack.emplace(fGenerator);
+ fPreviousContinueMask = fGenerator->fCurrentContinueMask;
+ fGenerator->fCurrentContinueMask = this;
+ }
+
+ void enter() {
+ SkASSERT(fContinueMaskStack.has_value());
+ fContinueMaskStack->enter();
+ }
+
+ void exit() {
+ SkASSERT(fContinueMaskStack.has_value());
+ fContinueMaskStack->exit();
+ }
+
+ void enterLoopBody() {
+ if (fContinueMaskStack.has_value()) {
+ fContinueMaskStack->enter();
+ fGenerator->builder()->push_literal_i(0);
+ fContinueMaskStack->exit();
+ }
+ }
+
+ void exitLoopBody() {
+ if (fContinueMaskStack.has_value()) {
+ fContinueMaskStack->enter();
+ fGenerator->builder()->pop_and_reenable_loop_mask();
+ fContinueMaskStack->exit();
+ }
+ }
+
+private:
+ std::optional<AutoStack> fContinueMaskStack;
+ Generator* fGenerator = nullptr;
+ AutoContinueMask* fPreviousContinueMask = nullptr;
+};
+
+class AutoLoopTarget {
+public:
+ AutoLoopTarget(Generator* gen, int* targetPtr) : fGenerator(gen), fLoopTargetPtr(targetPtr) {
+ fLabelID = fGenerator->builder()->nextLabelID();
+ fPreviousLoopTarget = *fLoopTargetPtr;
+ *fLoopTargetPtr = fLabelID;
+ }
+
+ ~AutoLoopTarget() {
+ *fLoopTargetPtr = fPreviousLoopTarget;
+ }
+
+ int labelID() {
+ return fLabelID;
+ }
+
+private:
+ Generator* fGenerator = nullptr;
+ int* fLoopTargetPtr = nullptr;
+ int fPreviousLoopTarget;
+ int fLabelID;
+};
+
+class LValue {
+public:
+ virtual ~LValue() = default;
+
+ /** Returns true if this lvalue is actually writable--temporaries and uniforms are not. */
+ virtual bool isWritable() const = 0;
+
+ /**
+ * Returns the fixed slot range of the lvalue, after it is winnowed down to the selected
+ * field/index. The range is calculated assuming every dynamic index will evaluate to zero.
+ */
+ virtual SlotRange fixedSlotRange(Generator* gen) = 0;
+
+ /**
+ * Returns a stack which holds a single integer, representing the dynamic offset of the lvalue.
+ * This value does not incorporate the fixed offset. If null is returned, the lvalue doesn't
+ * have a dynamic offset. `evaluateDynamicIndices` must be called before this is used.
+ */
+ virtual AutoStack* dynamicSlotRange() = 0;
+
+ /** Returns the swizzle components of the lvalue, or an empty span for non-swizzle LValues. */
+ virtual SkSpan<const int8_t> swizzle() { return {}; }
+
+ /** Pushes values directly onto the stack. */
+ [[nodiscard]] virtual bool push(Generator* gen,
+ SlotRange fixedOffset,
+ AutoStack* dynamicOffset,
+ SkSpan<const int8_t> swizzle) = 0;
+
+ /** Stores topmost values from the stack directly into the lvalue. */
+ [[nodiscard]] virtual bool store(Generator* gen,
+ SlotRange fixedOffset,
+ AutoStack* dynamicOffset,
+ SkSpan<const int8_t> swizzle) = 0;
+ /**
+ * Some lvalues refer to a temporary expression; these temps can be held in the
+ * scratch-expression field to ensure that they exist for the lifetime of the lvalue.
+ */
+ std::unique_ptr<Expression> fScratchExpression;
+};
+
+class ScratchLValue final : public LValue {
+public:
+ explicit ScratchLValue(const Expression& e)
+ : fExpression(&e)
+ , fNumSlots(e.type().slotCount()) {}
+
+ ~ScratchLValue() override {
+ if (fGenerator && fDedicatedStack.has_value()) {
+ // Jettison the scratch expression.
+ fDedicatedStack->enter();
+ fGenerator->discardExpression(fNumSlots);
+ fDedicatedStack->exit();
+ }
+ }
+
+ bool isWritable() const override {
+ return false;
+ }
+
+ SlotRange fixedSlotRange(Generator* gen) override {
+ return SlotRange{0, fNumSlots};
+ }
+
+ AutoStack* dynamicSlotRange() override {
+ return nullptr;
+ }
+
+ [[nodiscard]] bool push(Generator* gen,
+ SlotRange fixedOffset,
+ AutoStack* dynamicOffset,
+ SkSpan<const int8_t> swizzle) override {
+ if (!fDedicatedStack.has_value()) {
+ // Push the scratch expression onto a dedicated stack.
+ fGenerator = gen;
+ fDedicatedStack.emplace(fGenerator);
+ fDedicatedStack->enter();
+ if (!fGenerator->pushExpression(*fExpression)) {
+ return unsupported();
+ }
+ fDedicatedStack->exit();
+ }
+
+ if (dynamicOffset) {
+ fDedicatedStack->pushCloneIndirect(fixedOffset, dynamicOffset->stackID(), fNumSlots);
+ } else {
+ fDedicatedStack->pushClone(fixedOffset, fNumSlots);
+ }
+ if (!swizzle.empty()) {
+ gen->builder()->swizzle(fixedOffset.count, swizzle);
+ }
+ return true;
+ }
+
+ [[nodiscard]] bool store(Generator*, SlotRange, AutoStack*, SkSpan<const int8_t>) override {
+ SkDEBUGFAIL("scratch lvalues cannot be stored into");
+ return unsupported();
+ }
+
+private:
+ Generator* fGenerator = nullptr;
+ const Expression* fExpression = nullptr;
+ std::optional<AutoStack> fDedicatedStack;
+ int fNumSlots = 0;
+};
+
+class VariableLValue final : public LValue {
+public:
+ explicit VariableLValue(const Variable* v) : fVariable(v) {}
+
+ bool isWritable() const override {
+ return !Generator::IsUniform(*fVariable);
+ }
+
+ SlotRange fixedSlotRange(Generator* gen) override {
+ return Generator::IsUniform(*fVariable) ? gen->getUniformSlots(*fVariable)
+ : gen->getVariableSlots(*fVariable);
+ }
+
+ AutoStack* dynamicSlotRange() override {
+ return nullptr;
+ }
+
+ [[nodiscard]] bool push(Generator* gen,
+ SlotRange fixedOffset,
+ AutoStack* dynamicOffset,
+ SkSpan<const int8_t> swizzle) override {
+ if (Generator::IsUniform(*fVariable)) {
+ if (dynamicOffset) {
+ gen->builder()->push_uniform_indirect(fixedOffset, dynamicOffset->stackID(),
+ this->fixedSlotRange(gen));
+ } else {
+ gen->builder()->push_uniform(fixedOffset);
+ }
+ } else {
+ if (dynamicOffset) {
+ gen->builder()->push_slots_indirect(fixedOffset, dynamicOffset->stackID(),
+ this->fixedSlotRange(gen));
+ } else {
+ gen->builder()->push_slots(fixedOffset);
+ }
+ }
+ if (!swizzle.empty()) {
+ gen->builder()->swizzle(fixedOffset.count, swizzle);
+ }
+ return true;
+ }
+
+ [[nodiscard]] bool store(Generator* gen,
+ SlotRange fixedOffset,
+ AutoStack* dynamicOffset,
+ SkSpan<const int8_t> swizzle) override {
+ SkASSERT(!Generator::IsUniform(*fVariable));
+
+ if (swizzle.empty()) {
+ if (dynamicOffset) {
+ gen->builder()->copy_stack_to_slots_indirect(fixedOffset, dynamicOffset->stackID(),
+ this->fixedSlotRange(gen));
+ } else {
+ gen->builder()->copy_stack_to_slots(fixedOffset);
+ }
+ } else {
+ if (dynamicOffset) {
+ gen->builder()->swizzle_copy_stack_to_slots_indirect(fixedOffset,
+ dynamicOffset->stackID(),
+ this->fixedSlotRange(gen),
+ swizzle,
+ swizzle.size());
+ } else {
+ gen->builder()->swizzle_copy_stack_to_slots(fixedOffset, swizzle, swizzle.size());
+ }
+ }
+ return true;
+ }
+
+private:
+ const Variable* fVariable;
+};
+
+class SwizzleLValue final : public LValue {
+public:
+ explicit SwizzleLValue(std::unique_ptr<LValue> p, const ComponentArray& c)
+ : fParent(std::move(p))
+ , fComponents(c) {
+ SkASSERT(!fComponents.empty() && fComponents.size() <= 4);
+ }
+
+ bool isWritable() const override {
+ return fParent->isWritable();
+ }
+
+ SlotRange fixedSlotRange(Generator* gen) override {
+ return fParent->fixedSlotRange(gen);
+ }
+
+ AutoStack* dynamicSlotRange() override {
+ return fParent->dynamicSlotRange();
+ }
+
+ SkSpan<const int8_t> swizzle() override {
+ return fComponents;
+ }
+
+ [[nodiscard]] bool push(Generator* gen,
+ SlotRange fixedOffset,
+ AutoStack* dynamicOffset,
+ SkSpan<const int8_t> swizzle) override {
+ if (!swizzle.empty()) {
+ SkDEBUGFAIL("swizzle-of-a-swizzle should have been folded out in front end");
+ return unsupported();
+ }
+ return fParent->push(gen, fixedOffset, dynamicOffset, fComponents);
+ }
+
+ [[nodiscard]] bool store(Generator* gen,
+ SlotRange fixedOffset,
+ AutoStack* dynamicOffset,
+ SkSpan<const int8_t> swizzle) override {
+ if (!swizzle.empty()) {
+ SkDEBUGFAIL("swizzle-of-a-swizzle should have been folded out in front end");
+ return unsupported();
+ }
+ return fParent->store(gen, fixedOffset, dynamicOffset, fComponents);
+ }
+
+private:
+ std::unique_ptr<LValue> fParent;
+ const ComponentArray& fComponents;
+};
+
+class UnownedLValueSlice : public LValue {
+public:
+ explicit UnownedLValueSlice(LValue* p, int initialSlot, int numSlots)
+ : fParent(p)
+ , fInitialSlot(initialSlot)
+ , fNumSlots(numSlots) {
+ SkASSERT(fInitialSlot >= 0);
+ SkASSERT(fNumSlots > 0);
+ }
+
+ bool isWritable() const override {
+ return fParent->isWritable();
+ }
+
+ SlotRange fixedSlotRange(Generator* gen) override {
+ SlotRange range = fParent->fixedSlotRange(gen);
+ SlotRange adjusted = range;
+ adjusted.index += fInitialSlot;
+ adjusted.count = fNumSlots;
+ SkASSERT((adjusted.index + adjusted.count) <= (range.index + range.count));
+ return adjusted;
+ }
+
+ AutoStack* dynamicSlotRange() override {
+ return fParent->dynamicSlotRange();
+ }
+
+ [[nodiscard]] bool push(Generator* gen,
+ SlotRange fixedOffset,
+ AutoStack* dynamicOffset,
+ SkSpan<const int8_t> swizzle) override {
+ return fParent->push(gen, fixedOffset, dynamicOffset, swizzle);
+ }
+
+ [[nodiscard]] bool store(Generator* gen,
+ SlotRange fixedOffset,
+ AutoStack* dynamicOffset,
+ SkSpan<const int8_t> swizzle) override {
+ return fParent->store(gen, fixedOffset, dynamicOffset, swizzle);
+ }
+
+protected:
+ LValue* fParent;
+
+private:
+ int fInitialSlot = 0;
+ int fNumSlots = 0;
+};
+
+class LValueSlice final : public UnownedLValueSlice {
+public:
+ explicit LValueSlice(std::unique_ptr<LValue> p, int initialSlot, int numSlots)
+ : UnownedLValueSlice(p.release(), initialSlot, numSlots) {}
+
+ ~LValueSlice() override {
+ delete fParent;
+ }
+};
+
+class DynamicIndexLValue final : public LValue {
+public:
+ explicit DynamicIndexLValue(std::unique_ptr<LValue> p, const IndexExpression& i)
+ : fParent(std::move(p))
+ , fIndexExpr(&i) {
+ SkASSERT(fIndexExpr->index()->type().isInteger());
+ }
+
+ ~DynamicIndexLValue() override {
+ if (fDedicatedStack.has_value()) {
+ SkASSERT(fGenerator);
+
+ // Jettison the index expression.
+ fDedicatedStack->enter();
+ fGenerator->discardExpression(/*slots=*/1);
+ fDedicatedStack->exit();
+ }
+ }
+
+ bool isWritable() const override {
+ return fParent->isWritable();
+ }
+
+ [[nodiscard]] bool evaluateDynamicIndices(Generator* gen) {
+ // The index must only be computed once; the index-expression could have side effects.
+ // Once it has been computed, the offset lives on `fDedicatedStack`.
+ SkASSERT(!fDedicatedStack.has_value());
+ SkASSERT(!fGenerator);
+ fGenerator = gen;
+ fDedicatedStack.emplace(fGenerator);
+
+ if (!fParent->swizzle().empty()) {
+ SkDEBUGFAIL("an indexed-swizzle should have been handled by RewriteIndexedSwizzle");
+ return unsupported();
+ }
+
+ // Push the index expression onto the dedicated stack.
+ fDedicatedStack->enter();
+ if (!fGenerator->pushExpression(*fIndexExpr->index())) {
+ return unsupported();
+ }
+
+ // Multiply the index-expression result by the per-value slot count.
+ int slotCount = fIndexExpr->type().slotCount();
+ if (slotCount != 1) {
+ fGenerator->builder()->push_literal_i(fIndexExpr->type().slotCount());
+ fGenerator->builder()->binary_op(BuilderOp::mul_n_ints, 1);
+ }
+
+ // Check to see if a parent LValue already has a dynamic index. If so, we need to
+ // incorporate its value into our own.
+ if (AutoStack* parentDynamicIndexStack = fParent->dynamicSlotRange()) {
+ parentDynamicIndexStack->pushClone(/*slots=*/1);
+ fGenerator->builder()->binary_op(BuilderOp::add_n_ints, 1);
+ }
+ fDedicatedStack->exit();
+ return true;
+ }
+
+ SlotRange fixedSlotRange(Generator* gen) override {
+ // Compute the fixed slot range as if we are indexing into position zero.
+ SlotRange range = fParent->fixedSlotRange(gen);
+ range.count = fIndexExpr->type().slotCount();
+ return range;
+ }
+
+ AutoStack* dynamicSlotRange() override {
+ // We incorporated any parent dynamic offsets when `evaluateDynamicIndices` was called.
+ SkASSERT(fDedicatedStack.has_value());
+ return &*fDedicatedStack;
+ }
+
+ [[nodiscard]] bool push(Generator* gen,
+ SlotRange fixedOffset,
+ AutoStack* dynamicOffset,
+ SkSpan<const int8_t> swizzle) override {
+ return fParent->push(gen, fixedOffset, dynamicOffset, swizzle);
+ }
+
+ [[nodiscard]] bool store(Generator* gen,
+ SlotRange fixedOffset,
+ AutoStack* dynamicOffset,
+ SkSpan<const int8_t> swizzle) override {
+ return fParent->store(gen, fixedOffset, dynamicOffset, swizzle);
+ }
+
+private:
+ Generator* fGenerator = nullptr;
+ std::unique_ptr<LValue> fParent;
+ std::optional<AutoStack> fDedicatedStack;
+ const IndexExpression* fIndexExpr = nullptr;
+};
+
+void SlotManager::addSlotDebugInfoForGroup(const std::string& varName,
+ const Type& type,
+ Position pos,
+ int* groupIndex,
+ bool isFunctionReturnValue) {
+ SkASSERT(fSlotDebugInfo);
+ switch (type.typeKind()) {
+ case Type::TypeKind::kArray: {
+ int nslots = type.columns();
+ const Type& elemType = type.componentType();
+ for (int slot = 0; slot < nslots; ++slot) {
+ this->addSlotDebugInfoForGroup(varName + "[" + std::to_string(slot) + "]", elemType,
+ pos, groupIndex, isFunctionReturnValue);
+ }
+ break;
+ }
+ case Type::TypeKind::kStruct: {
+ for (const Type::Field& field : type.fields()) {
+ this->addSlotDebugInfoForGroup(varName + "." + std::string(field.fName),
+ *field.fType, pos, groupIndex,
+ isFunctionReturnValue);
+ }
+ break;
+ }
+ default:
+ SkASSERTF(0, "unsupported slot type %d", (int)type.typeKind());
+ [[fallthrough]];
+
+ case Type::TypeKind::kScalar:
+ case Type::TypeKind::kVector:
+ case Type::TypeKind::kMatrix: {
+ Type::NumberKind numberKind = type.componentType().numberKind();
+ int nslots = type.slotCount();
+
+ for (int slot = 0; slot < nslots; ++slot) {
+ SlotDebugInfo slotInfo;
+ slotInfo.name = varName;
+ slotInfo.columns = type.columns();
+ slotInfo.rows = type.rows();
+ slotInfo.componentIndex = slot;
+ slotInfo.groupIndex = (*groupIndex)++;
+ slotInfo.numberKind = numberKind;
+ slotInfo.pos = pos;
+ slotInfo.fnReturnValue = isFunctionReturnValue ? 1 : -1;
+ fSlotDebugInfo->push_back(std::move(slotInfo));
+ }
+ break;
+ }
+ }
+}
+
+void SlotManager::addSlotDebugInfo(const std::string& varName,
+ const Type& type,
+ Position pos,
+ bool isFunctionReturnValue) {
+ int groupIndex = 0;
+ this->addSlotDebugInfoForGroup(varName, type, pos, &groupIndex, isFunctionReturnValue);
+ SkASSERT((size_t)groupIndex == type.slotCount());
+}
+
+SlotRange SlotManager::createSlots(std::string name,
+ const Type& type,
+ Position pos,
+ bool isFunctionReturnValue) {
+ size_t nslots = type.slotCount();
+ if (nslots == 0) {
+ return {};
+ }
+ if (fSlotDebugInfo) {
+ // Our debug slot-info table should have the same length as the actual slot table.
+ SkASSERT(fSlotDebugInfo->size() == (size_t)fSlotCount);
+
+ // Append slot names and types to our debug slot-info table.
+ fSlotDebugInfo->reserve(fSlotCount + nslots);
+ this->addSlotDebugInfo(name, type, pos, isFunctionReturnValue);
+
+ // Confirm that we added the expected number of slots.
+ SkASSERT(fSlotDebugInfo->size() == (size_t)(fSlotCount + nslots));
+ }
+
+ SlotRange result = {fSlotCount, (int)nslots};
+ fSlotCount += nslots;
+ return result;
+}
+
+SlotRange SlotManager::getVariableSlots(const Variable& v) {
+ SlotRange* entry = fSlotMap.find(&v);
+ if (entry != nullptr) {
+ return *entry;
+ }
+ SlotRange range = this->createSlots(std::string(v.name()),
+ v.type(),
+ v.fPosition,
+ /*isFunctionReturnValue=*/false);
+ fSlotMap.set(&v, range);
+ return range;
+}
+
+SlotRange SlotManager::getFunctionSlots(const IRNode& callSite, const FunctionDeclaration& f) {
+ SlotRange* entry = fSlotMap.find(&callSite);
+ if (entry != nullptr) {
+ return *entry;
+ }
+ SlotRange range = this->createSlots("[" + std::string(f.name()) + "].result",
+ f.returnType(),
+ f.fPosition,
+ /*isFunctionReturnValue=*/true);
+ fSlotMap.set(&callSite, range);
+ return range;
+}
+
+static bool is_sliceable_swizzle(SkSpan<const int8_t> components) {
+ // Determine if the swizzle rearranges its elements, or if it's a simple subset of its elements.
+ // (A simple subset would be a sequential non-repeating range of components, like `.xyz` or
+ // `.yzw` or `.z`, but not `.xx` or `.xz`, which can be accessed as a slice of the variable.)
+ for (size_t index = 1; index < components.size(); ++index) {
+ if (components[index] != int8_t(components[0] + index)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+std::unique_ptr<LValue> Generator::makeLValue(const Expression& e, bool allowScratch) {
+ if (e.is<VariableReference>()) {
+ return std::make_unique<VariableLValue>(e.as<VariableReference>().variable());
+ }
+ if (e.is<Swizzle>()) {
+ const Swizzle& swizzleExpr = e.as<Swizzle>();
+ if (std::unique_ptr<LValue> base = this->makeLValue(*swizzleExpr.base(),
+ allowScratch)) {
+ const ComponentArray& components = swizzleExpr.components();
+ if (is_sliceable_swizzle(components)) {
+ // If the swizzle is a contiguous subset, we can represent it with a fixed slice.
+ return std::make_unique<LValueSlice>(std::move(base), components[0],
+ components.size());
+ }
+ return std::make_unique<SwizzleLValue>(std::move(base), components);
+ }
+ return nullptr;
+ }
+ if (e.is<FieldAccess>()) {
+ const FieldAccess& fieldExpr = e.as<FieldAccess>();
+ if (std::unique_ptr<LValue> base = this->makeLValue(*fieldExpr.base(),
+ allowScratch)) {
+ // Represent field access with a slice.
+ return std::make_unique<LValueSlice>(std::move(base), fieldExpr.initialSlot(),
+ fieldExpr.type().slotCount());
+ }
+ return nullptr;
+ }
+ if (e.is<IndexExpression>()) {
+ const IndexExpression& indexExpr = e.as<IndexExpression>();
+
+ // If the index base is swizzled (`vec.zyx[idx]`), rewrite it into an equivalent
+ // non-swizzled form (`vec[uint3(2,1,0)[idx]]`).
+ if (std::unique_ptr<Expression> rewritten = Transform::RewriteIndexedSwizzle(fContext,
+ indexExpr)) {
+ // Convert the rewritten expression into an lvalue.
+ std::unique_ptr<LValue> lvalue = this->makeLValue(*rewritten, allowScratch);
+ if (!lvalue) {
+ return nullptr;
+ }
+ // We need to hold onto the rewritten expression for the lifetime of the lvalue.
+ lvalue->fScratchExpression = std::move(rewritten);
+ return lvalue;
+ }
+ if (std::unique_ptr<LValue> base = this->makeLValue(*indexExpr.base(),
+ allowScratch)) {
+ // If the index is a compile-time constant, we can represent it with a fixed slice.
+ SKSL_INT indexValue;
+ if (ConstantFolder::GetConstantInt(*indexExpr.index(), &indexValue)) {
+ int numSlots = indexExpr.type().slotCount();
+ return std::make_unique<LValueSlice>(std::move(base), numSlots * indexValue,
+ numSlots);
+ }
+
+ // Represent non-constant indexing via a dynamic index.
+ auto dynLValue = std::make_unique<DynamicIndexLValue>(std::move(base), indexExpr);
+ return dynLValue->evaluateDynamicIndices(this) ? std::move(dynLValue)
+ : nullptr;
+ }
+ return nullptr;
+ }
+ if (allowScratch) {
+ // This path allows us to perform field- and index-accesses on an expression as if it were
+ // an lvalue, but is a temporary and shouldn't be written back to.
+ return std::make_unique<ScratchLValue>(e);
+ }
+ return nullptr;
+}
+
+bool Generator::push(LValue& lvalue) {
+ return lvalue.push(this,
+ lvalue.fixedSlotRange(this),
+ lvalue.dynamicSlotRange(),
+ /*swizzle=*/{});
+}
+
+bool Generator::store(LValue& lvalue) {
+ SkASSERT(lvalue.isWritable());
+ return lvalue.store(this,
+ lvalue.fixedSlotRange(this),
+ lvalue.dynamicSlotRange(),
+ /*swizzle=*/{});
+}
+
+int Generator::getFunctionDebugInfo(const FunctionDeclaration& decl) {
+ SkASSERT(fDebugTrace);
+
+ std::string name = decl.description();
+
+ // When generating the debug trace, we typically mark every function as `noinline`. This makes
+ // the trace more confusing, since this isn't in the source program, so remove it.
+ static constexpr std::string_view kNoInline = "noinline ";
+ if (skstd::starts_with(name, kNoInline)) {
+ name = name.substr(kNoInline.size());
+ }
+
+ // Look for a matching FunctionDebugInfo slot.
+ for (size_t index = 0; index < fDebugTrace->fFuncInfo.size(); ++index) {
+ if (fDebugTrace->fFuncInfo[index].name == name) {
+ return index;
+ }
+ }
+
+ // We've never called this function before; create a new slot to hold its information.
+ int slot = (int)fDebugTrace->fFuncInfo.size();
+ fDebugTrace->fFuncInfo.push_back(FunctionDebugInfo{std::move(name)});
+ return slot;
+}
+
+int Generator::createStack() {
+ if (!fRecycledStacks.empty()) {
+ int stackID = fRecycledStacks.back();
+ fRecycledStacks.pop_back();
+ return stackID;
+ }
+ return ++fNextStackID;
+}
+
+void Generator::recycleStack(int stackID) {
+ fRecycledStacks.push_back(stackID);
+}
+
+void Generator::setCurrentStack(int stackID) {
+ if (fCurrentStack != stackID) {
+ fCurrentStack = stackID;
+ fBuilder.set_current_stack(stackID);
+ }
+}
+
+std::optional<SlotRange> Generator::writeFunction(const IRNode& callSite,
+ const FunctionDefinition& function) {
+ [[maybe_unused]] int funcIndex = -1;
+ if (fDebugTrace) {
+ funcIndex = this->getFunctionDebugInfo(function.declaration());
+ SkASSERT(funcIndex >= 0);
+ // TODO(debugger): add trace for function-enter
+ }
+
+ SlotRange lastFunctionResult = fCurrentFunctionResult;
+ fCurrentFunctionResult = this->getFunctionSlots(callSite, function.declaration());
+
+ if (!this->writeStatement(*function.body())) {
+ return std::nullopt;
+ }
+
+ SlotRange functionResult = fCurrentFunctionResult;
+ fCurrentFunctionResult = lastFunctionResult;
+
+ if (fDebugTrace) {
+ // TODO(debugger): add trace for function-exit
+ }
+
+ return functionResult;
+}
+
+bool Generator::writeGlobals() {
+ for (const ProgramElement* e : fProgram.elements()) {
+ if (e->is<GlobalVarDeclaration>()) {
+ const GlobalVarDeclaration& gvd = e->as<GlobalVarDeclaration>();
+ const VarDeclaration& decl = gvd.varDeclaration();
+ const Variable* var = decl.var();
+
+ if (var->type().isEffectChild()) {
+ // Associate each child effect variable with its numeric index.
+ SkASSERT(!fChildEffectMap.find(var));
+ int childEffectIndex = fChildEffectMap.count();
+ fChildEffectMap[var] = childEffectIndex;
+ continue;
+ }
+
+ // Opaque types include child processors and GL objects (samplers, textures, etc).
+ // Of those, only child processors are legal variables.
+ SkASSERT(!var->type().isVoid());
+ SkASSERT(!var->type().isOpaque());
+
+ // Builtin variables are system-defined, with special semantics.
+ if (int builtin = var->modifiers().fLayout.fBuiltin; builtin >= 0) {
+ if (builtin == SK_FRAGCOORD_BUILTIN) {
+ fBuilder.store_device_xy01(this->getVariableSlots(*var));
+ continue;
+ }
+ // The only builtin variable exposed to runtime effects is sk_FragCoord.
+ return unsupported();
+ }
+
+ if (IsUniform(*var)) {
+ // Create the uniform slot map in first-to-last order.
+ (void)this->getUniformSlots(*var);
+ continue;
+ }
+
+ // Other globals are treated as normal variable declarations.
+ if (!this->writeVarDeclaration(decl)) {
+ return unsupported();
+ }
+ }
+ }
+
+ return true;
+}
+
+bool Generator::writeStatement(const Statement& s) {
+ switch (s.kind()) {
+ case Statement::Kind::kBlock:
+ return this->writeBlock(s.as<Block>());
+
+ case Statement::Kind::kBreak:
+ return this->writeBreakStatement(s.as<BreakStatement>());
+
+ case Statement::Kind::kContinue:
+ return this->writeContinueStatement(s.as<ContinueStatement>());
+
+ case Statement::Kind::kDo:
+ return this->writeDoStatement(s.as<DoStatement>());
+
+ case Statement::Kind::kExpression:
+ return this->writeExpressionStatement(s.as<ExpressionStatement>());
+
+ case Statement::Kind::kFor:
+ return this->writeForStatement(s.as<ForStatement>());
+
+ case Statement::Kind::kIf:
+ return this->writeIfStatement(s.as<IfStatement>());
+
+ case Statement::Kind::kNop:
+ return true;
+
+ case Statement::Kind::kReturn:
+ return this->writeReturnStatement(s.as<ReturnStatement>());
+
+ case Statement::Kind::kSwitch:
+ return this->writeSwitchStatement(s.as<SwitchStatement>());
+
+ case Statement::Kind::kVarDeclaration:
+ return this->writeVarDeclaration(s.as<VarDeclaration>());
+
+ default:
+ return unsupported();
+ }
+}
+
+bool Generator::writeBlock(const Block& b) {
+ for (const std::unique_ptr<Statement>& stmt : b.children()) {
+ if (!this->writeStatement(*stmt)) {
+ return unsupported();
+ }
+ }
+ return true;
+}
+
+bool Generator::writeBreakStatement(const BreakStatement&) {
+ // If all lanes have reached this break, we can just branch straight to the break target instead
+ // of updating masks.
+ fBuilder.branch_if_all_lanes_active(fCurrentBreakTarget);
+ fBuilder.mask_off_loop_mask();
+ return true;
+}
+
+bool Generator::writeContinueStatement(const ContinueStatement&) {
+ // This could be written as one hand-tuned RasterPipeline op, but for now, we reuse existing ops
+ // to assemble a continue op.
+
+ // Set any currently-executing lanes in the continue-mask to true via `select.`
+ fCurrentContinueMask->enter();
+ fBuilder.push_literal_i(~0);
+ fBuilder.select(/*slots=*/1);
+
+ // Disable any currently-executing lanes from the loop mask.
+ fBuilder.mask_off_loop_mask();
+ fCurrentContinueMask->exit();
+
+ return true;
+}
+
+bool Generator::writeDoStatement(const DoStatement& d) {
+ // Set up a break target.
+ AutoLoopTarget breakTarget(this, &fCurrentBreakTarget);
+
+ // Save off the original loop mask.
+ fBuilder.enableExecutionMaskWrites();
+ fBuilder.push_loop_mask();
+
+ // If `continue` is used in the loop...
+ Analysis::LoopControlFlowInfo loopInfo = Analysis::GetLoopControlFlowInfo(*d.statement());
+ AutoContinueMask autoContinueMask(this);
+ if (loopInfo.fHasContinue) {
+ // ... create a temporary slot for continue-mask storage.
+ autoContinueMask.enable();
+ }
+
+ // Write the do-loop body.
+ int labelID = fBuilder.nextLabelID();
+ fBuilder.label(labelID);
+
+ autoContinueMask.enterLoopBody();
+
+ if (!this->writeStatement(*d.statement())) {
+ return false;
+ }
+
+ autoContinueMask.exitLoopBody();
+
+ // Emit the test-expression, in order to combine it with the loop mask.
+ if (!this->pushExpression(*d.test())) {
+ return false;
+ }
+
+ // Mask off any lanes in the loop mask where the test-expression is false; this breaks the loop.
+ // We don't use the test expression for anything else, so jettison it.
+ fBuilder.merge_loop_mask();
+ this->discardExpression(/*slots=*/1);
+
+ // If any lanes are still running, go back to the top and run the loop body again.
+ fBuilder.branch_if_any_lanes_active(labelID);
+
+ // If we hit a break statement on all lanes, we will branch here to escape from the loop.
+ fBuilder.label(breakTarget.labelID());
+
+ // Restore the loop mask.
+ fBuilder.pop_loop_mask();
+ fBuilder.disableExecutionMaskWrites();
+
+ return true;
+}
+
+bool Generator::writeMasklessForStatement(const ForStatement& f) {
+ SkASSERT(f.unrollInfo());
+ SkASSERT(f.unrollInfo()->fCount > 0);
+ SkASSERT(f.initializer());
+ SkASSERT(f.test());
+ SkASSERT(f.next());
+
+ // If no lanes are active, skip over the loop entirely. This guards against looping forever;
+ // with no lanes active, we wouldn't be able to write the loop variable back to its slot, so
+ // we'd never make forward progress.
+ int loopExitID = fBuilder.nextLabelID();
+ int loopBodyID = fBuilder.nextLabelID();
+ fBuilder.branch_if_no_lanes_active(loopExitID);
+
+ // Run the loop initializer.
+ if (!this->writeStatement(*f.initializer())) {
+ return unsupported();
+ }
+
+ // Write the for-loop body. We know the for-loop has a standard ES2 unrollable structure, and
+ // that it runs for at least one iteration, so we can plow straight ahead into the loop body
+ // instead of running the loop-test first.
+ fBuilder.label(loopBodyID);
+
+ if (!this->writeStatement(*f.statement())) {
+ return unsupported();
+ }
+
+ // If the loop only runs for a single iteration, we are already done. If not...
+ if (f.unrollInfo()->fCount > 1) {
+ // ... run the next-expression, and immediately discard its result.
+ if (!this->pushExpression(*f.next(), /*usesResult=*/false)) {
+ return unsupported();
+ }
+ this->discardExpression(f.next()->type().slotCount());
+
+ // Run the test-expression, and repeat the loop until the test-expression evaluates false.
+ if (!this->pushExpression(*f.test())) {
+ return unsupported();
+ }
+ fBuilder.branch_if_no_active_lanes_on_stack_top_equal(0, loopBodyID);
+
+ // Jettison the test-expression.
+ this->discardExpression(/*slots=*/1);
+ }
+
+ fBuilder.label(loopExitID);
+ return true;
+}
+
+bool Generator::writeForStatement(const ForStatement& f) {
+ // If we've determined that the loop does not run, omit its code entirely.
+ if (f.unrollInfo() && f.unrollInfo()->fCount == 0) {
+ return true;
+ }
+
+ // If the loop doesn't escape early due to a `continue`, `break` or `return`, and the loop
+ // conforms to ES2 structure, we know that we will run the full number of iterations across all
+ // lanes and don't need to use a loop mask.
+ Analysis::LoopControlFlowInfo loopInfo = Analysis::GetLoopControlFlowInfo(*f.statement());
+ if (!loopInfo.fHasContinue && !loopInfo.fHasBreak && !loopInfo.fHasReturn && f.unrollInfo()) {
+ return this->writeMasklessForStatement(f);
+ }
+
+ // Set up a break target.
+ AutoLoopTarget breakTarget(this, &fCurrentBreakTarget);
+
+ // Run the loop initializer.
+ if (f.initializer() && !this->writeStatement(*f.initializer())) {
+ return unsupported();
+ }
+
+ AutoContinueMask autoContinueMask(this);
+ if (loopInfo.fHasContinue) {
+ // Acquire a temporary slot for continue-mask storage.
+ autoContinueMask.enable();
+ }
+
+ // Save off the original loop mask.
+ fBuilder.enableExecutionMaskWrites();
+ fBuilder.push_loop_mask();
+
+ int loopTestID = fBuilder.nextLabelID();
+ int loopBodyID = fBuilder.nextLabelID();
+
+ // Jump down to the loop test so we can fall out of the loop immediately if it's zero-iteration.
+ fBuilder.jump(loopTestID);
+
+ // Write the for-loop body.
+ fBuilder.label(loopBodyID);
+
+ autoContinueMask.enterLoopBody();
+
+ if (!this->writeStatement(*f.statement())) {
+ return unsupported();
+ }
+
+ autoContinueMask.exitLoopBody();
+
+ // Run the next-expression. Immediately discard its result.
+ if (f.next()) {
+ if (!this->pushExpression(*f.next(), /*usesResult=*/false)) {
+ return unsupported();
+ }
+ this->discardExpression(f.next()->type().slotCount());
+ }
+
+ fBuilder.label(loopTestID);
+ if (f.test()) {
+ // Emit the test-expression, in order to combine it with the loop mask.
+ if (!this->pushExpression(*f.test())) {
+ return unsupported();
+ }
+ // Mask off any lanes in the loop mask where the test-expression is false; this breaks the
+ // loop. We don't use the test expression for anything else, so jettison it.
+ fBuilder.merge_loop_mask();
+ this->discardExpression(/*slots=*/1);
+ }
+
+ // If any lanes are still running, go back to the top and run the loop body again.
+ fBuilder.branch_if_any_lanes_active(loopBodyID);
+
+ // If we hit a break statement on all lanes, we will branch here to escape from the loop.
+ fBuilder.label(breakTarget.labelID());
+
+ // Restore the loop mask.
+ fBuilder.pop_loop_mask();
+ fBuilder.disableExecutionMaskWrites();
+
+ return true;
+}
+
+bool Generator::writeExpressionStatement(const ExpressionStatement& e) {
+ if (!this->pushExpression(*e.expression(), /*usesResult=*/false)) {
+ return unsupported();
+ }
+ this->discardExpression(e.expression()->type().slotCount());
+ return true;
+}
+
+bool Generator::writeDynamicallyUniformIfStatement(const IfStatement& i) {
+ SkASSERT(Analysis::IsDynamicallyUniformExpression(*i.test()));
+
+ int falseLabelID = fBuilder.nextLabelID();
+ int exitLabelID = fBuilder.nextLabelID();
+
+ if (!this->pushExpression(*i.test())) {
+ return unsupported();
+ }
+
+ fBuilder.branch_if_no_active_lanes_on_stack_top_equal(~0, falseLabelID);
+
+ if (!this->writeStatement(*i.ifTrue())) {
+ return unsupported();
+ }
+
+ if (!i.ifFalse()) {
+ // We don't have an if-false condition at all.
+ fBuilder.label(falseLabelID);
+ } else {
+ // We do have an if-false condition. We've just completed the if-true block, so we need to
+ // jump past the if-false block to avoid executing it.
+ fBuilder.jump(exitLabelID);
+
+ // The if-false block starts here.
+ fBuilder.label(falseLabelID);
+
+ if (!this->writeStatement(*i.ifFalse())) {
+ return unsupported();
+ }
+
+ fBuilder.label(exitLabelID);
+ }
+
+ // Jettison the test-expression.
+ this->discardExpression(/*slots=*/1);
+ return true;
+}
+
+bool Generator::writeIfStatement(const IfStatement& i) {
+ // If the test condition is known to be uniform, we can skip over the untrue portion entirely.
+ if (Analysis::IsDynamicallyUniformExpression(*i.test())) {
+ return this->writeDynamicallyUniformIfStatement(i);
+ }
+
+ // Save the current condition-mask.
+ fBuilder.enableExecutionMaskWrites();
+ fBuilder.push_condition_mask();
+
+ // Push the test condition mask.
+ if (!this->pushExpression(*i.test())) {
+ return unsupported();
+ }
+
+ // Merge the current condition-mask with the test condition, then run the if-true branch.
+ fBuilder.merge_condition_mask();
+ if (!this->writeStatement(*i.ifTrue())) {
+ return unsupported();
+ }
+
+ if (i.ifFalse()) {
+ // Negate the test-condition, then reapply it to the condition-mask.
+ // Then, run the if-false branch.
+ fBuilder.unary_op(BuilderOp::bitwise_not_int, /*slots=*/1);
+ fBuilder.merge_condition_mask();
+ if (!this->writeStatement(*i.ifFalse())) {
+ return unsupported();
+ }
+ }
+
+ // Jettison the test-expression, and restore the the condition-mask.
+ this->discardExpression(/*slots=*/1);
+ fBuilder.pop_condition_mask();
+ fBuilder.disableExecutionMaskWrites();
+
+ return true;
+}
+
+bool Generator::writeReturnStatement(const ReturnStatement& r) {
+ if (r.expression()) {
+ if (!this->pushExpression(*r.expression())) {
+ return unsupported();
+ }
+ if (this->needsFunctionResultSlots()) {
+ this->popToSlotRange(fCurrentFunctionResult);
+ }
+ }
+ if (fBuilder.executionMaskWritesAreEnabled() && this->needsReturnMask()) {
+ fBuilder.mask_off_return_mask();
+ }
+ return true;
+}
+
+bool Generator::writeSwitchStatement(const SwitchStatement& s) {
+ const StatementArray& cases = s.cases();
+ SkASSERT(std::all_of(cases.begin(), cases.end(), [](const std::unique_ptr<Statement>& stmt) {
+ return stmt->is<SwitchCase>();
+ }));
+
+ // Set up a break target.
+ AutoLoopTarget breakTarget(this, &fCurrentBreakTarget);
+
+ // Save off the original loop mask.
+ fBuilder.enableExecutionMaskWrites();
+ fBuilder.push_loop_mask();
+
+ // Push the switch-case value, and write a default-mask that enables every lane which already
+ // has an active loop mask. As we match cases, the default mask will get pared down.
+ if (!this->pushExpression(*s.value())) {
+ return unsupported();
+ }
+ fBuilder.push_loop_mask();
+
+ // Zero out the loop mask; each case op will re-enable it as we go.
+ fBuilder.mask_off_loop_mask();
+
+ // Write each switch-case.
+ bool foundDefaultCase = false;
+ for (const std::unique_ptr<Statement>& stmt : cases) {
+ int skipLabelID = fBuilder.nextLabelID();
+
+ const SwitchCase& sc = stmt->as<SwitchCase>();
+ if (sc.isDefault()) {
+ foundDefaultCase = true;
+ if (stmt.get() != cases.back().get()) {
+ // We only support a default case when it is the very last case. If that changes,
+ // this logic will need to be updated.
+ return unsupported();
+ }
+ // Keep whatever lanes are executing now, and also enable any lanes in the default mask.
+ fBuilder.pop_and_reenable_loop_mask();
+ // Execute the switch-case block, if any lanes are alive to see it.
+ fBuilder.branch_if_no_lanes_active(skipLabelID);
+ if (!this->writeStatement(*sc.statement())) {
+ return unsupported();
+ }
+ } else {
+ // The case-op will enable the loop mask if the switch-value matches, and mask off lanes
+ // from the default-mask.
+ fBuilder.case_op(sc.value());
+ // Execute the switch-case block, if any lanes are alive to see it.
+ fBuilder.branch_if_no_lanes_active(skipLabelID);
+ if (!this->writeStatement(*sc.statement())) {
+ return unsupported();
+ }
+ }
+ fBuilder.label(skipLabelID);
+ }
+
+ // Jettison the switch value, and the default case mask if it was never consumed above.
+ this->discardExpression(/*slots=*/foundDefaultCase ? 1 : 2);
+
+ // If we hit a break statement on all lanes, we will branch here to escape from the switch.
+ fBuilder.label(breakTarget.labelID());
+
+ // Restore the loop mask.
+ fBuilder.pop_loop_mask();
+ fBuilder.disableExecutionMaskWrites();
+ return true;
+}
+
+bool Generator::writeVarDeclaration(const VarDeclaration& v) {
+ if (v.value()) {
+ if (!this->pushExpression(*v.value())) {
+ return unsupported();
+ }
+ this->popToSlotRangeUnmasked(this->getVariableSlots(*v.var()));
+ } else {
+ this->zeroSlotRangeUnmasked(this->getVariableSlots(*v.var()));
+ }
+ return true;
+}
+
+bool Generator::pushExpression(const Expression& e, bool usesResult) {
+ switch (e.kind()) {
+ case Expression::Kind::kBinary:
+ return this->pushBinaryExpression(e.as<BinaryExpression>());
+
+ case Expression::Kind::kChildCall:
+ return this->pushChildCall(e.as<ChildCall>());
+
+ case Expression::Kind::kConstructorArray:
+ case Expression::Kind::kConstructorArrayCast:
+ case Expression::Kind::kConstructorCompound:
+ case Expression::Kind::kConstructorStruct:
+ return this->pushConstructorCompound(e.asAnyConstructor());
+
+ case Expression::Kind::kConstructorCompoundCast:
+ case Expression::Kind::kConstructorScalarCast:
+ return this->pushConstructorCast(e.asAnyConstructor());
+
+ case Expression::Kind::kConstructorDiagonalMatrix:
+ return this->pushConstructorDiagonalMatrix(e.as<ConstructorDiagonalMatrix>());
+
+ case Expression::Kind::kConstructorMatrixResize:
+ return this->pushConstructorMatrixResize(e.as<ConstructorMatrixResize>());
+
+ case Expression::Kind::kConstructorSplat:
+ return this->pushConstructorSplat(e.as<ConstructorSplat>());
+
+ case Expression::Kind::kFieldAccess:
+ return this->pushFieldAccess(e.as<FieldAccess>());
+
+ case Expression::Kind::kFunctionCall:
+ return this->pushFunctionCall(e.as<FunctionCall>());
+
+ case Expression::Kind::kIndex:
+ return this->pushIndexExpression(e.as<IndexExpression>());
+
+ case Expression::Kind::kLiteral:
+ return this->pushLiteral(e.as<Literal>());
+
+ case Expression::Kind::kPrefix:
+ return this->pushPrefixExpression(e.as<PrefixExpression>());
+
+ case Expression::Kind::kPostfix:
+ return this->pushPostfixExpression(e.as<PostfixExpression>(), usesResult);
+
+ case Expression::Kind::kSwizzle:
+ return this->pushSwizzle(e.as<Swizzle>());
+
+ case Expression::Kind::kTernary:
+ return this->pushTernaryExpression(e.as<TernaryExpression>());
+
+ case Expression::Kind::kVariableReference:
+ return this->pushVariableReference(e.as<VariableReference>());
+
+ default:
+ return unsupported();
+ }
+}
+
+BuilderOp Generator::GetTypedOp(const SkSL::Type& type, const TypedOps& ops) {
+ switch (type.componentType().numberKind()) {
+ case Type::NumberKind::kFloat: return ops.fFloatOp;
+ case Type::NumberKind::kSigned: return ops.fSignedOp;
+ case Type::NumberKind::kUnsigned: return ops.fUnsignedOp;
+ case Type::NumberKind::kBoolean: return ops.fBooleanOp;
+ default: return BuilderOp::unsupported;
+ }
+}
+
+bool Generator::unaryOp(const SkSL::Type& type, const TypedOps& ops) {
+ BuilderOp op = GetTypedOp(type, ops);
+ if (op == BuilderOp::unsupported) {
+ return unsupported();
+ }
+ fBuilder.unary_op(op, type.slotCount());
+ return true;
+}
+
+bool Generator::binaryOp(const SkSL::Type& type, const TypedOps& ops) {
+ BuilderOp op = GetTypedOp(type, ops);
+ if (op == BuilderOp::unsupported) {
+ return unsupported();
+ }
+ fBuilder.binary_op(op, type.slotCount());
+ return true;
+}
+
+bool Generator::ternaryOp(const SkSL::Type& type, const TypedOps& ops) {
+ BuilderOp op = GetTypedOp(type, ops);
+ if (op == BuilderOp::unsupported) {
+ return unsupported();
+ }
+ fBuilder.ternary_op(op, type.slotCount());
+ return true;
+}
+
+void Generator::foldWithMultiOp(BuilderOp op, int elements) {
+ // Fold the top N elements on the stack using an op that supports multiple slots, e.g.:
+ // (A + B + C + D) -> add_2_floats $0..1 += $2..3
+ // add_float $0 += $1
+ for (; elements >= 8; elements -= 4) {
+ fBuilder.binary_op(op, /*slots=*/4);
+ }
+ for (; elements >= 6; elements -= 3) {
+ fBuilder.binary_op(op, /*slots=*/3);
+ }
+ for (; elements >= 4; elements -= 2) {
+ fBuilder.binary_op(op, /*slots=*/2);
+ }
+ for (; elements >= 2; elements -= 1) {
+ fBuilder.binary_op(op, /*slots=*/1);
+ }
+}
+
+bool Generator::pushLValueOrExpression(LValue* lvalue, const Expression& expr) {
+ return lvalue ? this->push(*lvalue)
+ : this->pushExpression(expr);
+}
+
+bool Generator::pushMatrixMultiply(LValue* lvalue,
+ const Expression& left,
+ const Expression& right,
+ int leftColumns,
+ int leftRows,
+ int rightColumns,
+ int rightRows) {
+ SkASSERT(left.type().isMatrix() || left.type().isVector());
+ SkASSERT(right.type().isMatrix() || right.type().isVector());
+
+ SkASSERT(leftColumns == rightRows);
+ int outColumns = rightColumns,
+ outRows = leftRows;
+
+ // Push the left matrix onto the adjacent-neighbor stack. We transpose it so that we can copy
+ // rows from it in a single op, instead of gathering one element at a time.
+ AutoStack matrixStack(this);
+ matrixStack.enter();
+ if (!this->pushLValueOrExpression(lvalue, left)) {
+ return unsupported();
+ }
+ fBuilder.transpose(leftColumns, leftRows);
+
+ // Push the right matrix as well, then go back to the primary stack.
+ if (!this->pushExpression(right)) {
+ return unsupported();
+ }
+ matrixStack.exit();
+
+ // Calculate the offsets of the left- and right-matrix, relative to the stack-top.
+ int leftMtxBase = left.type().slotCount() + right.type().slotCount();
+ int rightMtxBase = right.type().slotCount();
+
+ // Emit each matrix element.
+ for (int c = 0; c < outColumns; ++c) {
+ for (int r = 0; r < outRows; ++r) {
+ // Dot a vector from left[*][r] with right[c][*].
+ // (Because the left=matrix has been transposed, we actually pull left[r][*], which
+ // allows us to clone a column at once instead of cloning each slot individually.)
+ matrixStack.pushClone(SlotRange{r * leftColumns, leftColumns}, leftMtxBase);
+ matrixStack.pushClone(SlotRange{c * leftColumns, leftColumns}, rightMtxBase);
+ fBuilder.dot_floats(leftColumns);
+ }
+ }
+
+ // Dispose of the source matrices on the adjacent-neighbor stack.
+ matrixStack.enter();
+ this->discardExpression(left.type().slotCount());
+ this->discardExpression(right.type().slotCount());
+ matrixStack.exit();
+
+ // If this multiply was actually an assignment (via *=), write the result back to the lvalue.
+ return lvalue ? this->store(*lvalue)
+ : true;
+}
+
+void Generator::foldComparisonOp(Operator op, int elements) {
+ switch (op.kind()) {
+ case OperatorKind::EQEQ:
+ // equal(x,y) returns a vector; use & to fold into a scalar.
+ this->foldWithMultiOp(BuilderOp::bitwise_and_n_ints, elements);
+ break;
+
+ case OperatorKind::NEQ:
+ // notEqual(x,y) returns a vector; use | to fold into a scalar.
+ this->foldWithMultiOp(BuilderOp::bitwise_or_n_ints, elements);
+ break;
+
+ default:
+ SkDEBUGFAIL("comparison only allows == and !=");
+ break;
+ }
+}
+
+bool Generator::pushStructuredComparison(LValue* left,
+ Operator op,
+ LValue* right,
+ const Type& type) {
+ if (type.isStruct()) {
+ // Compare every field in the struct.
+ SkSpan<const Type::Field> fields = type.fields();
+ int currentSlot = 0;
+ for (size_t index = 0; index < fields.size(); ++index) {
+ const Type& fieldType = *fields[index].fType;
+ const int fieldSlotCount = fieldType.slotCount();
+ UnownedLValueSlice fieldLeft {left, currentSlot, fieldSlotCount};
+ UnownedLValueSlice fieldRight{right, currentSlot, fieldSlotCount};
+ if (!this->pushStructuredComparison(&fieldLeft, op, &fieldRight, fieldType)) {
+ return unsupported();
+ }
+ currentSlot += fieldSlotCount;
+ }
+
+ this->foldComparisonOp(op, fields.size());
+ return true;
+ }
+
+ if (type.isArray()) {
+ const Type& indexedType = type.componentType();
+ if (indexedType.numberKind() == Type::NumberKind::kNonnumeric) {
+ // Compare every element in the array.
+ const int indexedSlotCount = indexedType.slotCount();
+ int currentSlot = 0;
+ for (int index = 0; index < type.columns(); ++index) {
+ UnownedLValueSlice indexedLeft {left, currentSlot, indexedSlotCount};
+ UnownedLValueSlice indexedRight{right, currentSlot, indexedSlotCount};
+ if (!this->pushStructuredComparison(&indexedLeft, op, &indexedRight, indexedType)) {
+ return unsupported();
+ }
+ currentSlot += indexedSlotCount;
+ }
+
+ this->foldComparisonOp(op, type.columns());
+ return true;
+ }
+ }
+
+ // We've winnowed down to a single element, or an array of homogeneous numeric elements.
+ // Push the elements onto the stack, then compare them.
+ if (!this->push(*left) || !this->push(*right)) {
+ return unsupported();
+ }
+ switch (op.kind()) {
+ case OperatorKind::EQEQ:
+ if (!this->binaryOp(type, kEqualOps)) {
+ return unsupported();
+ }
+ break;
+
+ case OperatorKind::NEQ:
+ if (!this->binaryOp(type, kNotEqualOps)) {
+ return unsupported();
+ }
+ break;
+
+ default:
+ SkDEBUGFAIL("comparison only allows == and !=");
+ break;
+ }
+
+ this->foldComparisonOp(op, type.slotCount());
+ return true;
+}
+
+bool Generator::pushBinaryExpression(const BinaryExpression& e) {
+ return this->pushBinaryExpression(*e.left(), e.getOperator(), *e.right());
+}
+
+bool Generator::pushBinaryExpression(const Expression& left, Operator op, const Expression& right) {
+ switch (op.kind()) {
+ // Rewrite greater-than ops as their less-than equivalents.
+ case OperatorKind::GT:
+ return this->pushBinaryExpression(right, OperatorKind::LT, left);
+
+ case OperatorKind::GTEQ:
+ return this->pushBinaryExpression(right, OperatorKind::LTEQ, left);
+
+ // Handle struct and array comparisons.
+ case OperatorKind::EQEQ:
+ case OperatorKind::NEQ:
+ if (left.type().isStruct() || left.type().isArray()) {
+ SkASSERT(left.type().matches(right.type()));
+ std::unique_ptr<LValue> lvLeft = this->makeLValue(left, /*allowScratch=*/true);
+ std::unique_ptr<LValue> lvRight = this->makeLValue(right, /*allowScratch=*/true);
+ return this->pushStructuredComparison(lvLeft.get(), op, lvRight.get(), left.type());
+ }
+ break;
+
+ // Emit comma expressions.
+ case OperatorKind::COMMA:
+ if (Analysis::HasSideEffects(left)) {
+ if (!this->pushExpression(left, /*usesResult=*/false)) {
+ return unsupported();
+ }
+ this->discardExpression(left.type().slotCount());
+ }
+ return this->pushExpression(right);
+
+ default:
+ break;
+ }
+
+ // Handle binary expressions with mismatched types.
+ bool vectorizeLeft = false, vectorizeRight = false;
+ if (!left.type().matches(right.type())) {
+ if (left.type().componentType().numberKind() != right.type().componentType().numberKind()) {
+ return unsupported();
+ }
+ if (left.type().isScalar() && (right.type().isVector() || right.type().isMatrix())) {
+ vectorizeLeft = true;
+ } else if ((left.type().isVector() || left.type().isMatrix()) && right.type().isScalar()) {
+ vectorizeRight = true;
+ }
+ }
+
+ const Type& type = vectorizeLeft ? right.type() : left.type();
+
+ // If this is an assignment...
+ std::unique_ptr<LValue> lvalue;
+ if (op.isAssignment()) {
+ // ... turn the left side into an lvalue.
+ lvalue = this->makeLValue(left);
+ if (!lvalue) {
+ return unsupported();
+ }
+
+ // Handle simple assignment (`var = expr`).
+ if (op.kind() == OperatorKind::EQ) {
+ return this->pushExpression(right) &&
+ this->store(*lvalue);
+ }
+
+ // Strip off the assignment from the op (turning += into +).
+ op = op.removeAssignment();
+ }
+
+ // Handle matrix multiplication (MxM/MxV/VxM).
+ if (op.kind() == OperatorKind::STAR) {
+ // Matrix * matrix:
+ if (left.type().isMatrix() && right.type().isMatrix()) {
+ return this->pushMatrixMultiply(lvalue.get(), left, right,
+ left.type().columns(), left.type().rows(),
+ right.type().columns(), right.type().rows());
+ }
+
+ // Vector * matrix:
+ if (left.type().isVector() && right.type().isMatrix()) {
+ return this->pushMatrixMultiply(lvalue.get(), left, right,
+ left.type().columns(), 1,
+ right.type().columns(), right.type().rows());
+ }
+
+ // Matrix * vector:
+ if (left.type().isMatrix() && right.type().isVector()) {
+ return this->pushMatrixMultiply(lvalue.get(), left, right,
+ left.type().columns(), left.type().rows(),
+ 1, right.type().columns());
+ }
+ }
+
+ if (!vectorizeLeft && !vectorizeRight && !type.matches(right.type())) {
+ // We have mismatched types but don't know how to handle them.
+ return unsupported();
+ }
+
+ // Handle binary ops which require short-circuiting.
+ switch (op.kind()) {
+ case OperatorKind::LOGICALAND:
+ if (Analysis::HasSideEffects(right)) {
+ // If the RHS has side effects, we rewrite `a && b` as `a ? b : false`. This
+ // generates pretty solid code and gives us the required short-circuit behavior.
+ SkASSERT(!op.isAssignment());
+ SkASSERT(type.componentType().isBoolean());
+ SkASSERT(type.slotCount() == 1); // operator&& only works with scalar types
+ Literal falseLiteral{Position{}, 0.0, &right.type()};
+ return this->pushTernaryExpression(left, right, falseLiteral);
+ }
+ break;
+
+ case OperatorKind::LOGICALOR:
+ if (Analysis::HasSideEffects(right)) {
+ // If the RHS has side effects, we rewrite `a || b` as `a ? true : b`.
+ SkASSERT(!op.isAssignment());
+ SkASSERT(type.componentType().isBoolean());
+ SkASSERT(type.slotCount() == 1); // operator|| only works with scalar types
+ Literal trueLiteral{Position{}, 1.0, &right.type()};
+ return this->pushTernaryExpression(left, trueLiteral, right);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ // Push the left- and right-expressions onto the stack.
+ if (!this->pushLValueOrExpression(lvalue.get(), left)) {
+ return unsupported();
+ }
+ if (vectorizeLeft) {
+ fBuilder.push_duplicates(right.type().slotCount() - 1);
+ }
+ if (!this->pushExpression(right)) {
+ return unsupported();
+ }
+ if (vectorizeRight) {
+ fBuilder.push_duplicates(left.type().slotCount() - 1);
+ }
+
+ switch (op.kind()) {
+ case OperatorKind::PLUS:
+ if (!this->binaryOp(type, kAddOps)) {
+ return unsupported();
+ }
+ break;
+
+ case OperatorKind::MINUS:
+ if (!this->binaryOp(type, kSubtractOps)) {
+ return unsupported();
+ }
+ break;
+
+ case OperatorKind::STAR:
+ if (!this->binaryOp(type, kMultiplyOps)) {
+ return unsupported();
+ }
+ break;
+
+ case OperatorKind::SLASH:
+ if (!this->binaryOp(type, kDivideOps)) {
+ return unsupported();
+ }
+ break;
+
+ case OperatorKind::LT:
+ case OperatorKind::GT:
+ if (!this->binaryOp(type, kLessThanOps)) {
+ return unsupported();
+ }
+ SkASSERT(type.slotCount() == 1); // operator< only works with scalar types
+ break;
+
+ case OperatorKind::LTEQ:
+ case OperatorKind::GTEQ:
+ if (!this->binaryOp(type, kLessThanEqualOps)) {
+ return unsupported();
+ }
+ SkASSERT(type.slotCount() == 1); // operator<= only works with scalar types
+ break;
+
+ case OperatorKind::EQEQ:
+ if (!this->binaryOp(type, kEqualOps)) {
+ return unsupported();
+ }
+ this->foldComparisonOp(op, type.slotCount());
+ break;
+
+ case OperatorKind::NEQ:
+ if (!this->binaryOp(type, kNotEqualOps)) {
+ return unsupported();
+ }
+ this->foldComparisonOp(op, type.slotCount());
+ break;
+
+ case OperatorKind::LOGICALAND:
+ case OperatorKind::BITWISEAND:
+ // For logical-and, we verified above that the RHS does not have side effects, so we
+ // don't need to worry about short-circuiting side effects.
+ fBuilder.binary_op(BuilderOp::bitwise_and_n_ints, type.slotCount());
+ break;
+
+ case OperatorKind::LOGICALOR:
+ case OperatorKind::BITWISEOR:
+ // For logical-or, we verified above that the RHS does not have side effects.
+ fBuilder.binary_op(BuilderOp::bitwise_or_n_ints, type.slotCount());
+ break;
+
+ case OperatorKind::LOGICALXOR:
+ case OperatorKind::BITWISEXOR:
+ // Logical-xor does not short circuit.
+ fBuilder.binary_op(BuilderOp::bitwise_xor_n_ints, type.slotCount());
+ break;
+
+ default:
+ return unsupported();
+ }
+
+ // If we have an lvalue, we need to write the result back into it.
+ return lvalue ? this->store(*lvalue)
+ : true;
+}
+
+bool Generator::pushConstructorCompound(const AnyConstructor& c) {
+ for (const std::unique_ptr<Expression> &arg : c.argumentSpan()) {
+ if (!this->pushExpression(*arg)) {
+ return unsupported();
+ }
+ }
+ return true;
+}
+
+bool Generator::pushChildCall(const ChildCall& c) {
+ int* childIdx = fChildEffectMap.find(&c.child());
+ SkASSERT(childIdx != nullptr);
+ SkASSERT(!c.arguments().empty());
+
+ // Save the dst.rgba fields; these hold our execution masks, and could potentially be
+ // clobbered by the child effect.
+ fBuilder.push_dst_rgba();
+
+ // All child calls have at least one argument.
+ const Expression* arg = c.arguments()[0].get();
+ if (!this->pushExpression(*arg)) {
+ return unsupported();
+ }
+
+ // Copy arguments from the stack into src/dst as required by this particular child-call.
+ switch (c.child().type().typeKind()) {
+ case Type::TypeKind::kShader: {
+ // The argument must be a float2.
+ SkASSERT(c.arguments().size() == 1);
+ SkASSERT(arg->type().matches(*fContext.fTypes.fFloat2));
+ fBuilder.pop_src_rg();
+ fBuilder.invoke_shader(*childIdx);
+ break;
+ }
+ case Type::TypeKind::kColorFilter: {
+ // The argument must be a half4/float4.
+ SkASSERT(c.arguments().size() == 1);
+ SkASSERT(arg->type().matches(*fContext.fTypes.fHalf4) ||
+ arg->type().matches(*fContext.fTypes.fFloat4));
+ fBuilder.pop_src_rgba();
+ fBuilder.invoke_color_filter(*childIdx);
+ break;
+ }
+ case Type::TypeKind::kBlender: {
+ // The first argument must be a half4/float4.
+ SkASSERT(c.arguments().size() == 2);
+ SkASSERT(arg->type().matches(*fContext.fTypes.fHalf4) ||
+ arg->type().matches(*fContext.fTypes.fFloat4));
+
+ // The second argument must also be a half4/float4.
+ arg = c.arguments()[1].get();
+ SkASSERT(arg->type().matches(*fContext.fTypes.fHalf4) ||
+ arg->type().matches(*fContext.fTypes.fFloat4));
+
+ if (!this->pushExpression(*arg)) {
+ return unsupported();
+ }
+
+ fBuilder.pop_dst_rgba();
+ fBuilder.pop_src_rgba();
+ fBuilder.invoke_blender(*childIdx);
+ break;
+ }
+ default: {
+ SkDEBUGFAILF("cannot sample from type '%s'", c.child().type().description().c_str());
+ }
+ }
+
+ // Restore dst.rgba so our execution masks are back to normal.
+ fBuilder.pop_dst_rgba();
+
+ // The child call has returned the result color via src.rgba; push it onto the stack.
+ fBuilder.push_src_rgba();
+ return true;
+}
+
+bool Generator::pushConstructorCast(const AnyConstructor& c) {
+ SkASSERT(c.argumentSpan().size() == 1);
+ const Expression& inner = *c.argumentSpan().front();
+ SkASSERT(inner.type().slotCount() == c.type().slotCount());
+
+ if (!this->pushExpression(inner)) {
+ return unsupported();
+ }
+ if (inner.type().componentType().numberKind() == c.type().componentType().numberKind()) {
+ // Since we ignore type precision, this cast is effectively a no-op.
+ return true;
+ }
+ if (inner.type().componentType().isSigned() && c.type().componentType().isUnsigned()) {
+ // Treat uint(int) as a no-op.
+ return true;
+ }
+ if (inner.type().componentType().isUnsigned() && c.type().componentType().isSigned()) {
+ // Treat int(uint) as a no-op.
+ return true;
+ }
+
+ if (c.type().componentType().isBoolean()) {
+ // Converting int or float to boolean can be accomplished via `notEqual(x, 0)`.
+ fBuilder.push_zeros(c.type().slotCount());
+ return this->binaryOp(inner.type(), kNotEqualOps);
+ }
+ if (inner.type().componentType().isBoolean()) {
+ // Converting boolean to int or float can be accomplished via bitwise-and.
+ if (c.type().componentType().isFloat()) {
+ fBuilder.push_literal_f(1.0f);
+ } else if (c.type().componentType().isSigned() || c.type().componentType().isUnsigned()) {
+ fBuilder.push_literal_i(1);
+ } else {
+ SkDEBUGFAILF("unexpected cast from bool to %s", c.type().description().c_str());
+ return unsupported();
+ }
+ fBuilder.push_duplicates(c.type().slotCount() - 1);
+ fBuilder.binary_op(BuilderOp::bitwise_and_n_ints, c.type().slotCount());
+ return true;
+ }
+ // We have dedicated ops to cast between float and integer types.
+ if (inner.type().componentType().isFloat()) {
+ if (c.type().componentType().isSigned()) {
+ fBuilder.unary_op(BuilderOp::cast_to_int_from_float, c.type().slotCount());
+ return true;
+ }
+ if (c.type().componentType().isUnsigned()) {
+ fBuilder.unary_op(BuilderOp::cast_to_uint_from_float, c.type().slotCount());
+ return true;
+ }
+ } else if (c.type().componentType().isFloat()) {
+ if (inner.type().componentType().isSigned()) {
+ fBuilder.unary_op(BuilderOp::cast_to_float_from_int, c.type().slotCount());
+ return true;
+ }
+ if (inner.type().componentType().isUnsigned()) {
+ fBuilder.unary_op(BuilderOp::cast_to_float_from_uint, c.type().slotCount());
+ return true;
+ }
+ }
+
+ SkDEBUGFAILF("unexpected cast from %s to %s",
+ c.type().description().c_str(), inner.type().description().c_str());
+ return unsupported();
+}
+
+bool Generator::pushConstructorDiagonalMatrix(const ConstructorDiagonalMatrix& c) {
+ fBuilder.push_zeros(1);
+ if (!this->pushExpression(*c.argument())) {
+ return unsupported();
+ }
+ fBuilder.diagonal_matrix(c.type().columns(), c.type().rows());
+
+ return true;
+}
+
+bool Generator::pushConstructorMatrixResize(const ConstructorMatrixResize& c) {
+ if (!this->pushExpression(*c.argument())) {
+ return unsupported();
+ }
+ fBuilder.matrix_resize(c.argument()->type().columns(),
+ c.argument()->type().rows(),
+ c.type().columns(),
+ c.type().rows());
+ return true;
+}
+
+bool Generator::pushConstructorSplat(const ConstructorSplat& c) {
+ if (!this->pushExpression(*c.argument())) {
+ return unsupported();
+ }
+ fBuilder.push_duplicates(c.type().slotCount() - 1);
+ return true;
+}
+
+bool Generator::pushFieldAccess(const FieldAccess& f) {
+ // If possible, get direct field access via the lvalue.
+ std::unique_ptr<LValue> lvalue = this->makeLValue(f, /*allowScratch=*/true);
+ return lvalue && this->push(*lvalue);
+}
+
+bool Generator::pushFunctionCall(const FunctionCall& c) {
+ if (c.function().isIntrinsic()) {
+ return this->pushIntrinsic(c);
+ }
+
+ // Keep track of the current function.
+ const FunctionDefinition* lastFunction = fCurrentFunction;
+ fCurrentFunction = c.function().definition();
+
+ // Skip over the function body entirely if there are no active lanes.
+ // (If the function call was trivial, it would likely have been inlined in the frontend, so this
+ // is likely to save a significant amount of work if the lanes are all dead.)
+ int skipLabelID = fBuilder.nextLabelID();
+ fBuilder.branch_if_no_lanes_active(skipLabelID);
+
+ // Save off the return mask.
+ if (this->needsReturnMask()) {
+ fBuilder.enableExecutionMaskWrites();
+ fBuilder.push_return_mask();
+ }
+
+ // Write all the arguments into their parameter's variable slots. Because we never allow
+ // recursion, we don't need to worry about overwriting any existing values in those slots.
+ // (In fact, we don't even need to apply the write mask.)
+ SkTArray<std::unique_ptr<LValue>> lvalues;
+ lvalues.resize(c.arguments().size());
+
+ for (int index = 0; index < c.arguments().size(); ++index) {
+ const Expression& arg = *c.arguments()[index];
+ const Variable& param = *c.function().parameters()[index];
+
+ // Use LValues for out-parameters and inout-parameters, so we can store back to them later.
+ if (IsInoutParameter(param) || IsOutParameter(param)) {
+ lvalues[index] = this->makeLValue(arg);
+ if (!lvalues[index]) {
+ return unsupported();
+ }
+ // There are no guarantees on the starting value of an out-parameter, so we only need to
+ // store the lvalues associated with an inout parameter.
+ if (IsInoutParameter(param)) {
+ if (!this->push(*lvalues[index])) {
+ return unsupported();
+ }
+ this->popToSlotRangeUnmasked(this->getVariableSlots(param));
+ }
+ } else {
+ // Copy input arguments into their respective parameter slots.
+ if (!this->pushExpression(arg)) {
+ return unsupported();
+ }
+ this->popToSlotRangeUnmasked(this->getVariableSlots(param));
+ }
+ }
+
+ // Emit the function body.
+ std::optional<SlotRange> r = this->writeFunction(c, *fCurrentFunction);
+ if (!r.has_value()) {
+ return unsupported();
+ }
+
+ // Restore the original return mask.
+ if (this->needsReturnMask()) {
+ fBuilder.pop_return_mask();
+ fBuilder.disableExecutionMaskWrites();
+ }
+
+ // If the function uses result slots, move its result from slots onto the stack.
+ if (this->needsFunctionResultSlots()) {
+ fBuilder.push_slots(*r);
+ }
+
+ // We've returned back to the last function.
+ fCurrentFunction = lastFunction;
+
+ // Copy out-parameters and inout-parameters back to their homes.
+ for (int index = 0; index < c.arguments().size(); ++index) {
+ if (lvalues[index]) {
+ // Only out- and inout-parameters should have an associated lvalue.
+ const Variable& param = *c.function().parameters()[index];
+ SkASSERT(IsInoutParameter(param) || IsOutParameter(param));
+
+ // Copy the parameter's slots directly into the lvalue.
+ fBuilder.push_slots(this->getVariableSlots(param));
+ if (!this->store(*lvalues[index])) {
+ return unsupported();
+ }
+ this->discardExpression(param.type().slotCount());
+ }
+ }
+
+ // Copy the function result from its slots onto the stack.
+ fBuilder.label(skipLabelID);
+ return true;
+}
+
+bool Generator::pushIndexExpression(const IndexExpression& i) {
+ std::unique_ptr<LValue> lvalue = this->makeLValue(i, /*allowScratch=*/true);
+ return lvalue && this->push(*lvalue);
+}
+
+bool Generator::pushIntrinsic(const FunctionCall& c) {
+ const ExpressionArray& args = c.arguments();
+ switch (args.size()) {
+ case 1:
+ return this->pushIntrinsic(c.function().intrinsicKind(), *args[0]);
+
+ case 2:
+ return this->pushIntrinsic(c.function().intrinsicKind(), *args[0], *args[1]);
+
+ case 3:
+ return this->pushIntrinsic(c.function().intrinsicKind(), *args[0], *args[1], *args[2]);
+
+ default:
+ break;
+ }
+
+ return unsupported();
+}
+
+bool Generator::pushLengthIntrinsic(int slotCount) {
+ if (slotCount > 1) {
+ // Implement `length(vec)` as `sqrt(dot(x, x))`.
+ fBuilder.push_clone(slotCount);
+ fBuilder.dot_floats(slotCount);
+ fBuilder.unary_op(BuilderOp::sqrt_float, 1);
+ } else {
+ // `length(scalar)` is `sqrt(x^2)`, which is equivalent to `abs(x)`.
+ fBuilder.unary_op(BuilderOp::abs_float, 1);
+ }
+ return true;
+}
+
+bool Generator::pushVectorizedExpression(const Expression& expr, const Type& vectorType) {
+ if (!this->pushExpression(expr)) {
+ return unsupported();
+ }
+ if (vectorType.slotCount() > expr.type().slotCount()) {
+ SkASSERT(expr.type().slotCount() == 1);
+ fBuilder.push_duplicates(vectorType.slotCount() - expr.type().slotCount());
+ }
+ return true;
+}
+
+bool Generator::pushIntrinsic(const TypedOps& ops, const Expression& arg0) {
+ if (!this->pushExpression(arg0)) {
+ return unsupported();
+ }
+ return this->unaryOp(arg0.type(), ops);
+}
+
+bool Generator::pushIntrinsic(BuilderOp builderOp, const Expression& arg0) {
+ if (!this->pushExpression(arg0)) {
+ return unsupported();
+ }
+ fBuilder.unary_op(builderOp, arg0.type().slotCount());
+ return true;
+}
+
+bool Generator::pushIntrinsic(IntrinsicKind intrinsic, const Expression& arg0) {
+ switch (intrinsic) {
+ case IntrinsicKind::k_abs_IntrinsicKind:
+ return this->pushIntrinsic(kAbsOps, arg0);
+
+ case IntrinsicKind::k_any_IntrinsicKind:
+ if (!this->pushExpression(arg0)) {
+ return unsupported();
+ }
+ this->foldWithMultiOp(BuilderOp::bitwise_or_n_ints, arg0.type().slotCount());
+ return true;
+
+ case IntrinsicKind::k_all_IntrinsicKind:
+ if (!this->pushExpression(arg0)) {
+ return unsupported();
+ }
+ this->foldWithMultiOp(BuilderOp::bitwise_and_n_ints, arg0.type().slotCount());
+ return true;
+
+ case IntrinsicKind::k_acos_IntrinsicKind:
+ return this->pushIntrinsic(BuilderOp::acos_float, arg0);
+
+ case IntrinsicKind::k_asin_IntrinsicKind:
+ return this->pushIntrinsic(BuilderOp::asin_float, arg0);
+
+ case IntrinsicKind::k_atan_IntrinsicKind:
+ return this->pushIntrinsic(BuilderOp::atan_float, arg0);
+
+ case IntrinsicKind::k_ceil_IntrinsicKind:
+ return this->pushIntrinsic(BuilderOp::ceil_float, arg0);
+
+ case IntrinsicKind::k_cos_IntrinsicKind:
+ return this->pushIntrinsic(BuilderOp::cos_float, arg0);
+
+ case IntrinsicKind::k_degrees_IntrinsicKind: {
+ Literal lit180OverPi{Position{}, 57.2957795131f, &arg0.type().componentType()};
+ return this->pushBinaryExpression(arg0, OperatorKind::STAR, lit180OverPi);
+ }
+ case IntrinsicKind::k_floatBitsToInt_IntrinsicKind:
+ case IntrinsicKind::k_floatBitsToUint_IntrinsicKind:
+ case IntrinsicKind::k_intBitsToFloat_IntrinsicKind:
+ case IntrinsicKind::k_uintBitsToFloat_IntrinsicKind:
+ return this->pushExpression(arg0);
+
+ case IntrinsicKind::k_exp_IntrinsicKind:
+ return this->pushIntrinsic(BuilderOp::exp_float, arg0);
+
+ case IntrinsicKind::k_exp2_IntrinsicKind:
+ return this->pushIntrinsic(BuilderOp::exp2_float, arg0);
+
+ case IntrinsicKind::k_floor_IntrinsicKind:
+ return this->pushIntrinsic(BuilderOp::floor_float, arg0);
+
+ case IntrinsicKind::k_fract_IntrinsicKind:
+ // Implement fract as `x - floor(x)`.
+ if (!this->pushExpression(arg0)) {
+ return unsupported();
+ }
+ fBuilder.push_clone(arg0.type().slotCount());
+ fBuilder.unary_op(BuilderOp::floor_float, arg0.type().slotCount());
+ return this->binaryOp(arg0.type(), kSubtractOps);
+
+ case IntrinsicKind::k_inverse_IntrinsicKind:
+ SkASSERT(arg0.type().isMatrix());
+ SkASSERT(arg0.type().rows() == arg0.type().columns());
+ if (!this->pushExpression(arg0)) {
+ return unsupported();
+ }
+ fBuilder.inverse_matrix(arg0.type().rows());
+ return true;
+
+ case IntrinsicKind::k_inversesqrt_IntrinsicKind:
+ return this->pushIntrinsic(kInverseSqrtOps, arg0);
+
+ case IntrinsicKind::k_length_IntrinsicKind:
+ return this->pushExpression(arg0) &&
+ this->pushLengthIntrinsic(arg0.type().slotCount());
+
+ case IntrinsicKind::k_log_IntrinsicKind:
+ if (!this->pushExpression(arg0)) {
+ return unsupported();
+ }
+ fBuilder.unary_op(BuilderOp::log_float, arg0.type().slotCount());
+ return true;
+
+ case IntrinsicKind::k_log2_IntrinsicKind:
+ if (!this->pushExpression(arg0)) {
+ return unsupported();
+ }
+ fBuilder.unary_op(BuilderOp::log2_float, arg0.type().slotCount());
+ return true;
+
+ case IntrinsicKind::k_normalize_IntrinsicKind: {
+ // Implement normalize as `x / length(x)`. First, push the expression.
+ if (!this->pushExpression(arg0)) {
+ return unsupported();
+ }
+ int slotCount = arg0.type().slotCount();
+ if (slotCount > 1) {
+ // Instead of `x / sqrt(dot(x, x))`, we can get roughly the same result in less time
+ // by computing `x * invsqrt(dot(x, x))`.
+ fBuilder.push_clone(slotCount);
+ fBuilder.push_clone(slotCount);
+ fBuilder.dot_floats(slotCount);
+
+ // Compute `vec(inversesqrt(dot(x, x)))`.
+ fBuilder.unary_op(BuilderOp::invsqrt_float, 1);
+ fBuilder.push_duplicates(slotCount - 1);
+
+ // Return `x * vec(inversesqrt(dot(x, x)))`.
+ return this->binaryOp(arg0.type(), kMultiplyOps);
+ } else {
+ // For single-slot normalization, we can simplify `sqrt(x * x)` into `abs(x)`.
+ fBuilder.push_clone(slotCount);
+ fBuilder.unary_op(BuilderOp::abs_float, 1);
+ return this->binaryOp(arg0.type(), kDivideOps);
+ }
+ }
+ case IntrinsicKind::k_not_IntrinsicKind:
+ return this->pushPrefixExpression(OperatorKind::LOGICALNOT, arg0);
+
+ case IntrinsicKind::k_radians_IntrinsicKind: {
+ Literal litPiOver180{Position{}, 0.01745329251f, &arg0.type().componentType()};
+ return this->pushBinaryExpression(arg0, OperatorKind::STAR, litPiOver180);
+ }
+ case IntrinsicKind::k_saturate_IntrinsicKind: {
+ // Implement saturate as clamp(arg, 0, 1).
+ Literal zeroLiteral{Position{}, 0.0, &arg0.type().componentType()};
+ Literal oneLiteral{Position{}, 1.0, &arg0.type().componentType()};
+ return this->pushIntrinsic(k_clamp_IntrinsicKind, arg0, zeroLiteral, oneLiteral);
+ }
+ case IntrinsicKind::k_sign_IntrinsicKind: {
+ // Implement floating-point sign() as `clamp(arg * FLT_MAX, -1, 1)`.
+ // FLT_MIN * FLT_MAX evaluates to 4, so multiplying any float value against FLT_MAX is
+ // sufficient to ensure that |value| is always 1 or greater (excluding zero and nan).
+ // Integer sign() doesn't need to worry about fractional values or nans, and can simply
+ // be `clamp(arg, -1, 1)`.
+ if (!this->pushExpression(arg0)) {
+ return unsupported();
+ }
+ if (arg0.type().componentType().isFloat()) {
+ Literal fltMaxLiteral{Position{}, FLT_MAX, &arg0.type().componentType()};
+ if (!this->pushVectorizedExpression(fltMaxLiteral, arg0.type())) {
+ return unsupported();
+ }
+ if (!this->binaryOp(arg0.type(), kMultiplyOps)) {
+ return unsupported();
+ }
+ }
+ Literal neg1Literal{Position{}, -1.0, &arg0.type().componentType()};
+ if (!this->pushVectorizedExpression(neg1Literal, arg0.type())) {
+ return unsupported();
+ }
+ if (!this->binaryOp(arg0.type(), kMaxOps)) {
+ return unsupported();
+ }
+ Literal pos1Literal{Position{}, 1.0, &arg0.type().componentType()};
+ if (!this->pushVectorizedExpression(pos1Literal, arg0.type())) {
+ return unsupported();
+ }
+ return this->binaryOp(arg0.type(), kMinOps);
+ }
+ case IntrinsicKind::k_sin_IntrinsicKind:
+ return this->pushIntrinsic(BuilderOp::sin_float, arg0);
+
+ case IntrinsicKind::k_sqrt_IntrinsicKind:
+ return this->pushIntrinsic(BuilderOp::sqrt_float, arg0);
+
+ case IntrinsicKind::k_tan_IntrinsicKind:
+ return this->pushIntrinsic(BuilderOp::tan_float, arg0);
+
+ case IntrinsicKind::k_transpose_IntrinsicKind:
+ SkASSERT(arg0.type().isMatrix());
+ if (!this->pushExpression(arg0)) {
+ return unsupported();
+ }
+ fBuilder.transpose(arg0.type().columns(), arg0.type().rows());
+ return true;
+
+ case IntrinsicKind::k_trunc_IntrinsicKind:
+ // Implement trunc as `float(int(x))`, since float-to-int rounds toward zero.
+ if (!this->pushExpression(arg0)) {
+ return unsupported();
+ }
+ fBuilder.unary_op(BuilderOp::cast_to_int_from_float, arg0.type().slotCount());
+ fBuilder.unary_op(BuilderOp::cast_to_float_from_int, arg0.type().slotCount());
+ return true;
+
+ case IntrinsicKind::k_fromLinearSrgb_IntrinsicKind:
+ case IntrinsicKind::k_toLinearSrgb_IntrinsicKind: {
+ // The argument must be a half3.
+ SkASSERT(arg0.type().matches(*fContext.fTypes.fHalf3));
+ if (!this->pushExpression(arg0)) {
+ return unsupported();
+ }
+ // The intrinsics accept a three-component value; add alpha for the push/pop_src_rgba
+ fBuilder.push_literal_f(1.0f);
+ // Copy arguments from the stack into src
+ fBuilder.pop_src_rgba();
+
+ if (intrinsic == IntrinsicKind::k_fromLinearSrgb_IntrinsicKind) {
+ fBuilder.invoke_from_linear_srgb();
+ } else {
+ fBuilder.invoke_to_linear_srgb();
+ }
+
+ // The xform has left the result color in src.rgba; push it onto the stack
+ fBuilder.push_src_rgba();
+ // The intrinsic returns a three-component value; discard alpha
+ this->discardExpression(/*slots=*/1);
+ return true;
+ }
+
+ default:
+ break;
+ }
+ return unsupported();
+}
+
+bool Generator::pushIntrinsic(const TypedOps& ops, const Expression& arg0, const Expression& arg1) {
+ if (!this->pushExpression(arg0) || !this->pushVectorizedExpression(arg1, arg0.type())) {
+ return unsupported();
+ }
+ return this->binaryOp(arg0.type(), ops);
+}
+
+bool Generator::pushIntrinsic(BuilderOp builderOp, const Expression& arg0, const Expression& arg1) {
+ if (!this->pushExpression(arg0) || !this->pushVectorizedExpression(arg1, arg0.type())) {
+ return unsupported();
+ }
+ fBuilder.binary_op(builderOp, arg0.type().slotCount());
+ return true;
+}
+
+bool Generator::pushIntrinsic(IntrinsicKind intrinsic,
+ const Expression& arg0,
+ const Expression& arg1) {
+ switch (intrinsic) {
+ case IntrinsicKind::k_atan_IntrinsicKind:
+ return this->pushIntrinsic(BuilderOp::atan2_n_floats, arg0, arg1);
+
+ case IntrinsicKind::k_cross_IntrinsicKind: {
+ // Implement cross as `arg0.yzx * arg1.zxy - arg0.zxy * arg1.yzx`. We use two stacks so
+ // that each subexpression can be multiplied separately.
+ SkASSERT(arg0.type().matches(arg1.type()));
+ SkASSERT(arg0.type().slotCount() == 3);
+ SkASSERT(arg1.type().slotCount() == 3);
+
+ // Push `arg0.yzx` onto this stack and `arg0.zxy` onto a separate subexpression stack.
+ AutoStack subexpressionStack(this);
+ subexpressionStack.enter();
+ if (!this->pushExpression(arg0)) {
+ return unsupported();
+ }
+ subexpressionStack.exit();
+ subexpressionStack.pushClone(/*slots=*/3);
+
+ fBuilder.swizzle(/*consumedSlots=*/3, {1, 2, 0});
+ subexpressionStack.enter();
+ fBuilder.swizzle(/*consumedSlots=*/3, {2, 0, 1});
+ subexpressionStack.exit();
+
+ // Push `arg1.zxy` onto this stack and `arg1.yzx` onto the next stack. Perform the
+ // multiply on each subexpression (`arg0.yzx * arg1.zxy` on the first stack, and
+ // `arg0.zxy * arg1.yzx` on the next).
+ subexpressionStack.enter();
+ if (!this->pushExpression(arg1)) {
+ return unsupported();
+ }
+ subexpressionStack.exit();
+ subexpressionStack.pushClone(/*slots=*/3);
+
+ fBuilder.swizzle(/*consumedSlots=*/3, {2, 0, 1});
+ fBuilder.binary_op(BuilderOp::mul_n_floats, 3);
+
+ subexpressionStack.enter();
+ fBuilder.swizzle(/*consumedSlots=*/3, {1, 2, 0});
+ fBuilder.binary_op(BuilderOp::mul_n_floats, 3);
+ subexpressionStack.exit();
+
+ // Migrate the result of the second subexpression (`arg0.zxy * arg1.yzx`) back onto the
+ // main stack and subtract it from the first subexpression (`arg0.yzx * arg1.zxy`).
+ subexpressionStack.pushClone(/*slots=*/3);
+ fBuilder.binary_op(BuilderOp::sub_n_floats, 3);
+
+ // Now that the calculation is complete, discard the subexpression on the next stack.
+ subexpressionStack.enter();
+ this->discardExpression(/*slots=*/3);
+ subexpressionStack.exit();
+ return true;
+ }
+ case IntrinsicKind::k_distance_IntrinsicKind:
+ // Implement distance as `length(a - b)`.
+ SkASSERT(arg0.type().slotCount() == arg1.type().slotCount());
+ return this->pushBinaryExpression(arg0, OperatorKind::MINUS, arg1) &&
+ this->pushLengthIntrinsic(arg0.type().slotCount());
+
+ case IntrinsicKind::k_dot_IntrinsicKind:
+ SkASSERT(arg0.type().matches(arg1.type()));
+ if (!this->pushExpression(arg0) || !this->pushExpression(arg1)) {
+ return unsupported();
+ }
+ fBuilder.dot_floats(arg0.type().slotCount());
+ return true;
+
+ case IntrinsicKind::k_equal_IntrinsicKind:
+ SkASSERT(arg0.type().matches(arg1.type()));
+ return this->pushIntrinsic(kEqualOps, arg0, arg1);
+
+ case IntrinsicKind::k_notEqual_IntrinsicKind:
+ SkASSERT(arg0.type().matches(arg1.type()));
+ return this->pushIntrinsic(kNotEqualOps, arg0, arg1);
+
+ case IntrinsicKind::k_lessThan_IntrinsicKind:
+ SkASSERT(arg0.type().matches(arg1.type()));
+ return this->pushIntrinsic(kLessThanOps, arg0, arg1);
+
+ case IntrinsicKind::k_greaterThan_IntrinsicKind:
+ SkASSERT(arg0.type().matches(arg1.type()));
+ return this->pushIntrinsic(kLessThanOps, arg1, arg0);
+
+ case IntrinsicKind::k_lessThanEqual_IntrinsicKind:
+ SkASSERT(arg0.type().matches(arg1.type()));
+ return this->pushIntrinsic(kLessThanEqualOps, arg0, arg1);
+
+ case IntrinsicKind::k_greaterThanEqual_IntrinsicKind:
+ SkASSERT(arg0.type().matches(arg1.type()));
+ return this->pushIntrinsic(kLessThanEqualOps, arg1, arg0);
+
+ case IntrinsicKind::k_min_IntrinsicKind:
+ SkASSERT(arg0.type().componentType().matches(arg1.type().componentType()));
+ return this->pushIntrinsic(kMinOps, arg0, arg1);
+
+ case IntrinsicKind::k_matrixCompMult_IntrinsicKind:
+ SkASSERT(arg0.type().matches(arg1.type()));
+ return this->pushIntrinsic(kMultiplyOps, arg0, arg1);
+
+ case IntrinsicKind::k_max_IntrinsicKind:
+ SkASSERT(arg0.type().componentType().matches(arg1.type().componentType()));
+ return this->pushIntrinsic(kMaxOps, arg0, arg1);
+
+ case IntrinsicKind::k_mod_IntrinsicKind:
+ SkASSERT(arg0.type().componentType().matches(arg1.type().componentType()));
+ return this->pushIntrinsic(kModOps, arg0, arg1);
+
+ case IntrinsicKind::k_pow_IntrinsicKind:
+ SkASSERT(arg0.type().matches(arg1.type()));
+ return this->pushIntrinsic(BuilderOp::pow_n_floats, arg0, arg1);
+
+ case IntrinsicKind::k_reflect_IntrinsicKind: {
+ // Implement reflect as `I - (N * dot(I,N) * 2)`.
+ SkASSERT(arg0.type().matches(arg1.type()));
+ SkASSERT(arg0.type().slotCount() == arg1.type().slotCount());
+ SkASSERT(arg0.type().componentType().isFloat());
+ int slotCount = arg0.type().slotCount();
+
+ // Stack: I, N.
+ if (!this->pushExpression(arg0) || !this->pushExpression(arg1)) {
+ return unsupported();
+ }
+ // Stack: I, N, I, N.
+ fBuilder.push_clone(2 * slotCount);
+ // Stack: I, N, dot(I,N)
+ fBuilder.dot_floats(slotCount);
+ // Stack: I, N, dot(I,N), 2
+ fBuilder.push_literal_f(2.0);
+ // Stack: I, N, dot(I,N) * 2
+ fBuilder.binary_op(BuilderOp::mul_n_floats, 1);
+ // Stack: I, N * dot(I,N) * 2
+ fBuilder.push_duplicates(slotCount - 1);
+ fBuilder.binary_op(BuilderOp::mul_n_floats, slotCount);
+ // Stack: I - (N * dot(I,N) * 2)
+ fBuilder.binary_op(BuilderOp::sub_n_floats, slotCount);
+ return true;
+ }
+ case IntrinsicKind::k_step_IntrinsicKind: {
+ // Compute step as `float(lessThan(edge, x))`. We convert from boolean 0/~0 to floating
+ // point zero/one by using a bitwise-and against the bit-pattern of 1.0.
+ SkASSERT(arg0.type().componentType().matches(arg1.type().componentType()));
+ if (!this->pushVectorizedExpression(arg0, arg1.type()) || !this->pushExpression(arg1)) {
+ return unsupported();
+ }
+ if (!this->binaryOp(arg1.type(), kLessThanOps)) {
+ return unsupported();
+ }
+ Literal pos1Literal{Position{}, 1.0, &arg1.type().componentType()};
+ if (!this->pushVectorizedExpression(pos1Literal, arg1.type())) {
+ return unsupported();
+ }
+ fBuilder.binary_op(BuilderOp::bitwise_and_n_ints, arg1.type().slotCount());
+ return true;
+ }
+
+ default:
+ break;
+ }
+ return unsupported();
+}
+
+bool Generator::pushIntrinsic(IntrinsicKind intrinsic,
+ const Expression& arg0,
+ const Expression& arg1,
+ const Expression& arg2) {
+ switch (intrinsic) {
+ case IntrinsicKind::k_clamp_IntrinsicKind:
+ // Implement clamp as min(max(arg, low), high).
+ SkASSERT(arg0.type().componentType().matches(arg1.type().componentType()));
+ SkASSERT(arg0.type().componentType().matches(arg2.type().componentType()));
+ if (!this->pushExpression(arg0) || !this->pushVectorizedExpression(arg1, arg0.type())) {
+ return unsupported();
+ }
+ if (!this->binaryOp(arg0.type(), kMaxOps)) {
+ return unsupported();
+ }
+ if (!this->pushVectorizedExpression(arg2, arg0.type())) {
+ return unsupported();
+ }
+ if (!this->binaryOp(arg0.type(), kMinOps)) {
+ return unsupported();
+ }
+ return true;
+
+ case IntrinsicKind::k_faceforward_IntrinsicKind: {
+ // Implement faceforward as `N ^ ((0 <= dot(I, NRef)) & 0x80000000)`.
+ // In other words, flip the sign bit of N if `0 <= dot(I, NRef)`.
+ SkASSERT(arg0.type().matches(arg1.type()));
+ SkASSERT(arg0.type().matches(arg2.type()));
+ int slotCount = arg0.type().slotCount();
+
+ // Stack: N, 0, I, Nref
+ if (!this->pushExpression(arg0)) {
+ return unsupported();
+ }
+ fBuilder.push_literal_f(0.0);
+ if (!this->pushExpression(arg1) || !this->pushExpression(arg2)) {
+ return unsupported();
+ }
+ // Stack: N, 0, dot(I,NRef)
+ fBuilder.dot_floats(slotCount);
+ // Stack: N, (0 <= dot(I,NRef))
+ fBuilder.binary_op(BuilderOp::cmple_n_floats, 1);
+ // Stack: N, (0 <= dot(I,NRef)), 0x80000000
+ fBuilder.push_literal_i(0x80000000);
+ // Stack: N, (0 <= dot(I,NRef)) & 0x80000000)
+ fBuilder.binary_op(BuilderOp::bitwise_and_n_ints, 1);
+ // Stack: N, vec(0 <= dot(I,NRef)) & 0x80000000)
+ fBuilder.push_duplicates(slotCount - 1);
+ // Stack: N ^ vec((0 <= dot(I,NRef)) & 0x80000000)
+ fBuilder.binary_op(BuilderOp::bitwise_xor_n_ints, slotCount);
+ return true;
+ }
+ case IntrinsicKind::k_mix_IntrinsicKind:
+ // Note: our SkRP mix op takes the interpolation point first, not the interpolants.
+ SkASSERT(arg0.type().matches(arg1.type()));
+ if (arg2.type().componentType().isFloat()) {
+ SkASSERT(arg0.type().componentType().matches(arg2.type().componentType()));
+ if (!this->pushVectorizedExpression(arg2, arg0.type())) {
+ return unsupported();
+ }
+ if (!this->pushExpression(arg0) || !this->pushExpression(arg1)) {
+ return unsupported();
+ }
+ return this->ternaryOp(arg0.type(), kMixOps);
+ }
+ if (arg2.type().componentType().isBoolean()) {
+ if (!this->pushExpression(arg2)) {
+ return unsupported();
+ }
+ if (!this->pushExpression(arg0) || !this->pushExpression(arg1)) {
+ return unsupported();
+ }
+ // The `mix_int` op isn't doing a lerp; it uses the third argument to select values
+ // from the first and second arguments. It's safe for use with any type in arguments
+ // 0 and 1.
+ fBuilder.ternary_op(BuilderOp::mix_n_ints, arg0.type().slotCount());
+ return true;
+ }
+ return unsupported();
+
+ case IntrinsicKind::k_refract_IntrinsicKind: {
+ // We always calculate refraction using vec4s, so we pad out unused N/I slots with zero.
+ int padding = 4 - arg0.type().slotCount();
+ if (!this->pushExpression(arg0)) {
+ return unsupported();
+ }
+ fBuilder.push_zeros(padding);
+
+ if (!this->pushExpression(arg1)) {
+ return unsupported();
+ }
+ fBuilder.push_zeros(padding);
+
+ // eta is always a scalar and doesn't need padding.
+ if (!this->pushExpression(arg2)) {
+ return unsupported();
+ }
+ fBuilder.refract_floats();
+
+ // The result vector was returned as a vec4, so discard the extra columns.
+ fBuilder.discard_stack(padding);
+ return true;
+ }
+ case IntrinsicKind::k_smoothstep_IntrinsicKind:
+ SkASSERT(arg0.type().componentType().isFloat());
+ SkASSERT(arg1.type().matches(arg0.type()));
+ SkASSERT(arg2.type().componentType().isFloat());
+
+ if (!this->pushVectorizedExpression(arg0, arg2.type()) ||
+ !this->pushVectorizedExpression(arg1, arg2.type()) ||
+ !this->pushExpression(arg2)) {
+ return unsupported();
+ }
+ fBuilder.ternary_op(BuilderOp::smoothstep_n_floats, arg2.type().slotCount());
+ return true;
+
+ default:
+ break;
+ }
+ return unsupported();
+}
+
+bool Generator::pushLiteral(const Literal& l) {
+ switch (l.type().numberKind()) {
+ case Type::NumberKind::kFloat:
+ fBuilder.push_literal_f(l.floatValue());
+ return true;
+
+ case Type::NumberKind::kSigned:
+ fBuilder.push_literal_i(l.intValue());
+ return true;
+
+ case Type::NumberKind::kUnsigned:
+ fBuilder.push_literal_u(l.intValue());
+ return true;
+
+ case Type::NumberKind::kBoolean:
+ fBuilder.push_literal_i(l.boolValue() ? ~0 : 0);
+ return true;
+
+ default:
+ SkUNREACHABLE;
+ }
+}
+
+bool Generator::pushPostfixExpression(const PostfixExpression& p, bool usesResult) {
+ // If the result is ignored...
+ if (!usesResult) {
+ // ... just emit a prefix expression instead.
+ return this->pushPrefixExpression(p.getOperator(), *p.operand());
+ }
+ // Get the operand as an lvalue, and push it onto the stack as-is.
+ std::unique_ptr<LValue> lvalue = this->makeLValue(*p.operand());
+ if (!lvalue || !this->push(*lvalue)) {
+ return unsupported();
+ }
+
+ // Push a scratch copy of the operand.
+ fBuilder.push_clone(p.type().slotCount());
+
+ // Increment or decrement the scratch copy by one.
+ Literal oneLiteral{Position{}, 1.0, &p.type().componentType()};
+ if (!this->pushVectorizedExpression(oneLiteral, p.type())) {
+ return unsupported();
+ }
+
+ switch (p.getOperator().kind()) {
+ case OperatorKind::PLUSPLUS:
+ if (!this->binaryOp(p.type(), kAddOps)) {
+ return unsupported();
+ }
+ break;
+
+ case OperatorKind::MINUSMINUS:
+ if (!this->binaryOp(p.type(), kSubtractOps)) {
+ return unsupported();
+ }
+ break;
+
+ default:
+ SkUNREACHABLE;
+ }
+
+ // Write the new value back to the operand.
+ if (!this->store(*lvalue)) {
+ return unsupported();
+ }
+
+ // Discard the scratch copy, leaving only the original value as-is.
+ this->discardExpression(p.type().slotCount());
+ return true;
+}
+
+bool Generator::pushPrefixExpression(const PrefixExpression& p) {
+ return this->pushPrefixExpression(p.getOperator(), *p.operand());
+}
+
+bool Generator::pushPrefixExpression(Operator op, const Expression& expr) {
+ switch (op.kind()) {
+ case OperatorKind::BITWISENOT:
+ case OperatorKind::LOGICALNOT:
+ // Handle operators ! and ~.
+ if (!this->pushExpression(expr)) {
+ return unsupported();
+ }
+ fBuilder.unary_op(BuilderOp::bitwise_not_int, expr.type().slotCount());
+ return true;
+
+ case OperatorKind::MINUS:
+ // Handle negation as a componentwise `0 - expr`.
+ fBuilder.push_zeros(expr.type().slotCount());
+ if (!this->pushExpression(expr)) {
+ return unsupported();
+ }
+ return this->binaryOp(expr.type(), kSubtractOps);
+
+ case OperatorKind::PLUSPLUS: {
+ // Rewrite as `expr += 1`.
+ Literal oneLiteral{Position{}, 1.0, &expr.type().componentType()};
+ return this->pushBinaryExpression(expr, OperatorKind::PLUSEQ, oneLiteral);
+ }
+ case OperatorKind::MINUSMINUS: {
+ // Rewrite as `expr -= 1`.
+ Literal oneLiteral{Position{}, 1.0, &expr.type().componentType()};
+ return this->pushBinaryExpression(expr, OperatorKind::MINUSEQ, oneLiteral);
+ }
+ default:
+ break;
+ }
+
+ return unsupported();
+}
+
+bool Generator::pushSwizzle(const Swizzle& s) {
+ SkASSERT(!s.components().empty() && s.components().size() <= 4);
+
+ // If this is a simple subset of a variable's slots...
+ bool isSimpleSubset = is_sliceable_swizzle(s.components());
+ if (isSimpleSubset && s.base()->is<VariableReference>()) {
+ // ... we can just push part of the variable directly onto the stack, rather than pushing
+ // the whole expression and then immediately cutting it down. (Either way works, but this
+ // saves a step.)
+ return this->pushVariableReferencePartial(
+ s.base()->as<VariableReference>(),
+ SlotRange{/*index=*/s.components()[0], /*count=*/s.components().size()});
+ }
+ // Push the base expression.
+ if (!this->pushExpression(*s.base())) {
+ return false;
+ }
+ // An identity swizzle doesn't rearrange the data; it just (potentially) discards tail elements.
+ if (isSimpleSubset && s.components()[0] == 0) {
+ int discardedElements = s.base()->type().slotCount() - s.components().size();
+ SkASSERT(discardedElements >= 0);
+ fBuilder.discard_stack(discardedElements);
+ return true;
+ }
+ // Perform the swizzle.
+ fBuilder.swizzle(s.base()->type().slotCount(), s.components());
+ return true;
+}
+
+bool Generator::pushTernaryExpression(const TernaryExpression& t) {
+ return this->pushTernaryExpression(*t.test(), *t.ifTrue(), *t.ifFalse());
+}
+
+bool Generator::pushDynamicallyUniformTernaryExpression(const Expression& test,
+ const Expression& ifTrue,
+ const Expression& ifFalse) {
+ SkASSERT(Analysis::IsDynamicallyUniformExpression(test));
+
+ int falseLabelID = fBuilder.nextLabelID();
+ int exitLabelID = fBuilder.nextLabelID();
+
+ // First, push the test-expression into a separate stack.
+ AutoStack testStack(this);
+ testStack.enter();
+ if (!this->pushExpression(test)) {
+ return unsupported();
+ }
+
+ // Branch to the true- or false-expression based on the test-expression. We can skip the
+ // non-true path entirely since the test is known to be uniform.
+ fBuilder.branch_if_no_active_lanes_on_stack_top_equal(~0, falseLabelID);
+ testStack.exit();
+
+ if (!this->pushExpression(ifTrue)) {
+ return unsupported();
+ }
+
+ fBuilder.jump(exitLabelID);
+
+ // The builder doesn't understand control flow, and assumes that every push moves the stack-top
+ // forwards. We need to manually balance out the `pushExpression` from the if-true path by
+ // moving the stack position backwards, so that the if-false path pushes its expression into the
+ // same as the if-true result.
+ this->discardExpression(/*slots=*/ifTrue.type().slotCount());
+
+ fBuilder.label(falseLabelID);
+
+ if (!this->pushExpression(ifFalse)) {
+ return unsupported();
+ }
+
+ fBuilder.label(exitLabelID);
+
+ // Jettison the text-expression from the separate stack.
+ testStack.enter();
+ this->discardExpression(/*slots=*/1);
+ testStack.exit();
+ return true;
+}
+
+bool Generator::pushTernaryExpression(const Expression& test,
+ const Expression& ifTrue,
+ const Expression& ifFalse) {
+ // If the test-expression is dynamically-uniform, we can skip over the non-true expressions
+ // entirely, and not need to involve the condition mask.
+ if (Analysis::IsDynamicallyUniformExpression(test)) {
+ return this->pushDynamicallyUniformTernaryExpression(test, ifTrue, ifFalse);
+ }
+
+ // Analyze the ternary to see which corners we can safely cut.
+ bool ifFalseHasSideEffects = Analysis::HasSideEffects(ifFalse);
+ bool ifTrueHasSideEffects = Analysis::HasSideEffects(ifTrue);
+ bool ifTrueIsTrivial = Analysis::IsTrivialExpression(ifTrue);
+ int cleanupLabelID = fBuilder.nextLabelID();
+
+ // If the true- and false-expressions both lack side effects, we evaluate both of them safely
+ // without masking off their effects. In that case, we can emit both sides and use boolean mix
+ // to select the correct result without using the condition mask at all.
+ if (!ifFalseHasSideEffects && !ifTrueHasSideEffects && ifTrueIsTrivial) {
+ // Push all of the arguments to mix.
+ if (!this->pushVectorizedExpression(test, ifTrue.type())) {
+ return unsupported();
+ }
+ if (!this->pushExpression(ifFalse)) {
+ return unsupported();
+ }
+ if (!this->pushExpression(ifTrue)) {
+ return unsupported();
+ }
+ // Use boolean mix to select the true- or false-expression via the test-expression.
+ fBuilder.ternary_op(BuilderOp::mix_n_ints, ifTrue.type().slotCount());
+ return true;
+ }
+
+ // First, push the current condition-mask and the test-expression into a separate stack.
+ fBuilder.enableExecutionMaskWrites();
+ AutoStack testStack(this);
+ testStack.enter();
+ fBuilder.push_condition_mask();
+ if (!this->pushExpression(test)) {
+ return unsupported();
+ }
+ testStack.exit();
+
+ // We can take some shortcuts with condition-mask handling if the false-expression is entirely
+ // side-effect free. (We can evaluate it without masking off its effects.) We always handle the
+ // condition mask properly for the test-expression and true-expression properly.
+ if (!ifFalseHasSideEffects) {
+ // Push the false-expression onto the primary stack.
+ if (!this->pushExpression(ifFalse)) {
+ return unsupported();
+ }
+
+ // Next, merge the condition mask (on the separate stack) with the test expression.
+ testStack.enter();
+ fBuilder.merge_condition_mask();
+ testStack.exit();
+
+ // If no lanes are active, we can skip the true-expression entirely. This isn't super likely
+ // to happen, so it's probably only a win for non-trivial true-expressions.
+ if (!ifTrueIsTrivial) {
+ fBuilder.branch_if_no_lanes_active(cleanupLabelID);
+ }
+
+ // Push the true-expression onto the primary stack, immediately after the false-expression.
+ if (!this->pushExpression(ifTrue)) {
+ return unsupported();
+ }
+
+ // Use a select to conditionally mask-merge the true-expression and false-expression lanes.
+ fBuilder.select(/*slots=*/ifTrue.type().slotCount());
+ fBuilder.label(cleanupLabelID);
+ } else {
+ // Merge the condition mask (on the separate stack) with the test expression.
+ testStack.enter();
+ fBuilder.merge_condition_mask();
+ testStack.exit();
+
+ // Push the true-expression onto the primary stack.
+ if (!this->pushExpression(ifTrue)) {
+ return unsupported();
+ }
+
+ // Switch back to the test-expression stack temporarily, and negate the test condition.
+ testStack.enter();
+ fBuilder.unary_op(BuilderOp::bitwise_not_int, /*slots=*/1);
+ fBuilder.merge_condition_mask();
+ testStack.exit();
+
+ // Push the false-expression onto the primary stack, immediately after the true-expression.
+ if (!this->pushExpression(ifFalse)) {
+ return unsupported();
+ }
+
+ // Use a select to conditionally mask-merge the true-expression and false-expression lanes;
+ // the mask is already set up for this.
+ fBuilder.select(/*slots=*/ifTrue.type().slotCount());
+ }
+
+ // Restore the condition-mask to its original state and jettison the test-expression.
+ testStack.enter();
+ this->discardExpression(/*slots=*/1);
+ fBuilder.pop_condition_mask();
+ testStack.exit();
+
+ fBuilder.disableExecutionMaskWrites();
+ return true;
+}
+
+bool Generator::pushVariableReference(const VariableReference& v) {
+ return this->pushVariableReferencePartial(v, SlotRange{0, (int)v.type().slotCount()});
+}
+
+bool Generator::pushVariableReferencePartial(const VariableReference& v, SlotRange subset) {
+ const Variable& var = *v.variable();
+ SlotRange r;
+ if (IsUniform(var)) {
+ r = this->getUniformSlots(var);
+ SkASSERT(r.count == (int)var.type().slotCount());
+ r.index += subset.index;
+ r.count = subset.count;
+ fBuilder.push_uniform(r);
+ } else {
+ r = this->getVariableSlots(var);
+ SkASSERT(r.count == (int)var.type().slotCount());
+ r.index += subset.index;
+ r.count = subset.count;
+ fBuilder.push_slots(r);
+ }
+ return true;
+}
+
+bool Generator::writeProgram(const FunctionDefinition& function) {
+ fCurrentFunction = &function;
+
+ if (fDebugTrace) {
+ // Copy the program source into the debug info so that it will be written in the trace file.
+ fDebugTrace->setSource(*fProgram.fSource);
+ }
+ // Assign slots to the parameters of main; copy src and dst into those slots as appropriate.
+ for (const SkSL::Variable* param : function.declaration().parameters()) {
+ switch (param->modifiers().fLayout.fBuiltin) {
+ case SK_MAIN_COORDS_BUILTIN: {
+ // Coordinates are passed via RG.
+ SlotRange fragCoord = this->getVariableSlots(*param);
+ SkASSERT(fragCoord.count == 2);
+ fBuilder.store_src_rg(fragCoord);
+ break;
+ }
+ case SK_INPUT_COLOR_BUILTIN: {
+ // Input colors are passed via RGBA.
+ SlotRange srcColor = this->getVariableSlots(*param);
+ SkASSERT(srcColor.count == 4);
+ fBuilder.store_src(srcColor);
+ break;
+ }
+ case SK_DEST_COLOR_BUILTIN: {
+ // Dest colors are passed via dRGBA.
+ SlotRange destColor = this->getVariableSlots(*param);
+ SkASSERT(destColor.count == 4);
+ fBuilder.store_dst(destColor);
+ break;
+ }
+ default: {
+ SkDEBUGFAIL("Invalid parameter to main()");
+ return unsupported();
+ }
+ }
+ }
+
+ // Initialize the program.
+ fBuilder.init_lane_masks();
+
+ // Emit global variables.
+ if (!this->writeGlobals()) {
+ return unsupported();
+ }
+
+ // Invoke main().
+ if (this->needsReturnMask()) {
+ fBuilder.enableExecutionMaskWrites();
+ }
+
+ std::optional<SlotRange> mainResult = this->writeFunction(function, function);
+ if (!mainResult.has_value()) {
+ return unsupported();
+ }
+
+ if (this->needsReturnMask()) {
+ fBuilder.disableExecutionMaskWrites();
+ }
+
+ // Move the result of main() from slots into RGBA. Allow dRGBA to remain in a trashed state.
+ SkASSERT(mainResult->count == 4);
+ if (this->needsFunctionResultSlots()) {
+ fBuilder.load_src(*mainResult);
+ } else {
+ fBuilder.pop_src_rgba();
+ }
+ return true;
+}
+
+std::unique_ptr<RP::Program> Generator::finish() {
+ return fBuilder.finish(fProgramSlots.slotCount(), fUniformSlots.slotCount(), fDebugTrace);
+}
+
+} // namespace RP
+
+std::unique_ptr<RP::Program> MakeRasterPipelineProgram(const SkSL::Program& program,
+ const FunctionDefinition& function,
+ SkRPDebugTrace* debugTrace) {
+ RP::Generator generator(program, debugTrace);
+ if (!generator.writeProgram(function)) {
+ return nullptr;
+ }
+ return generator.finish();
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/codegen/SkSLRasterPipelineCodeGenerator.h b/gfx/skia/skia/src/sksl/codegen/SkSLRasterPipelineCodeGenerator.h
new file mode 100644
index 0000000000..c49a8d571d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/codegen/SkSLRasterPipelineCodeGenerator.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_RASTERPIPELINECODEGENERATOR
+#define SKSL_RASTERPIPELINECODEGENERATOR
+
+#include "include/core/SkTypes.h"
+#include <memory>
+
+namespace SkSL {
+
+class FunctionDefinition;
+struct Program;
+class SkRPDebugTrace;
+namespace RP { class Program; }
+
+// Convert 'function' to Raster Pipeline stages, for use by blends, shaders, and color filters.
+// The arguments to the function are passed in registers:
+// -- coordinates in src.rg for shaders
+// -- color in src.rgba for color filters
+// -- src/dst in src.rgba and dst.rgba for blenders
+std::unique_ptr<RP::Program> MakeRasterPipelineProgram(const Program& program,
+ const FunctionDefinition& function,
+ SkRPDebugTrace* debugTrace = nullptr);
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/codegen/SkSLSPIRVCodeGenerator.cpp b/gfx/skia/skia/src/sksl/codegen/SkSLSPIRVCodeGenerator.cpp
new file mode 100644
index 0000000000..f355a64a83
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/codegen/SkSLSPIRVCodeGenerator.cpp
@@ -0,0 +1,4365 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/codegen/SkSLSPIRVCodeGenerator.h"
+
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkOpts_spi.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLStatement.h"
+#include "include/private/SkSLSymbol.h"
+#include "include/private/base/SkTArray.h"
+#include "include/sksl/DSLCore.h"
+#include "include/sksl/DSLExpression.h"
+#include "include/sksl/DSLType.h"
+#include "include/sksl/DSLVar.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLOperator.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/GLSL.std.450.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLConstantFolder.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLIntrinsicList.h"
+#include "src/sksl/SkSLModifiersPool.h"
+#include "src/sksl/SkSLOutputStream.h"
+#include "src/sksl/SkSLPool.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/SkSLThreadContext.h"
+#include "src/sksl/SkSLUtil.h"
+#include "src/sksl/analysis/SkSLProgramUsage.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLConstructorArrayCast.h"
+#include "src/sksl/ir/SkSLConstructorCompound.h"
+#include "src/sksl/ir/SkSLConstructorCompoundCast.h"
+#include "src/sksl/ir/SkSLConstructorDiagonalMatrix.h"
+#include "src/sksl/ir/SkSLConstructorMatrixResize.h"
+#include "src/sksl/ir/SkSLConstructorScalarCast.h"
+#include "src/sksl/ir/SkSLConstructorSplat.h"
+#include "src/sksl/ir/SkSLDoStatement.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLExpressionStatement.h"
+#include "src/sksl/ir/SkSLExtension.h"
+#include "src/sksl/ir/SkSLField.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLForStatement.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLIfStatement.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLInterfaceBlock.h"
+#include "src/sksl/ir/SkSLLiteral.h"
+#include "src/sksl/ir/SkSLPostfixExpression.h"
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/ir/SkSLReturnStatement.h"
+#include "src/sksl/ir/SkSLSetting.h"
+#include "src/sksl/ir/SkSLSwitchCase.h"
+#include "src/sksl/ir/SkSLSwitchStatement.h"
+#include "src/sksl/ir/SkSLSwizzle.h"
+#include "src/sksl/ir/SkSLTernaryExpression.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+#include "src/sksl/transform/SkSLTransform.h"
+#include "src/utils/SkBitSet.h"
+
+#include <cstring>
+#include <set>
+#include <string>
+#include <utility>
+
+#define kLast_Capability SpvCapabilityMultiViewport
+
+constexpr int DEVICE_FRAGCOORDS_BUILTIN = -1000;
+constexpr int DEVICE_CLOCKWISE_BUILTIN = -1001;
+
+namespace SkSL {
+
+// Equality and hash operators for Instructions.
+bool SPIRVCodeGenerator::Instruction::operator==(const SPIRVCodeGenerator::Instruction& that) const {
+ return fOp == that.fOp &&
+ fResultKind == that.fResultKind &&
+ fWords == that.fWords;
+}
+
+struct SPIRVCodeGenerator::Instruction::Hash {
+ uint32_t operator()(const SPIRVCodeGenerator::Instruction& key) const {
+ uint32_t hash = key.fResultKind;
+ hash = SkOpts::hash_fn(&key.fOp, sizeof(key.fOp), hash);
+ hash = SkOpts::hash_fn(key.fWords.data(), key.fWords.size() * sizeof(int32_t), hash);
+ return hash;
+ }
+};
+
+// This class is used to pass values and result placeholder slots to writeInstruction.
+struct SPIRVCodeGenerator::Word {
+ enum Kind {
+ kNone, // intended for use as a sentinel, not part of any Instruction
+ kSpvId,
+ kNumber,
+ kDefaultPrecisionResult,
+ kRelaxedPrecisionResult,
+ kUniqueResult,
+ kKeyedResult,
+ };
+
+ Word(SpvId id) : fValue(id), fKind(Kind::kSpvId) {}
+ Word(int32_t val, Kind kind) : fValue(val), fKind(kind) {}
+
+ static Word Number(int32_t val) {
+ return Word{val, Kind::kNumber};
+ }
+
+ static Word Result(const Type& type) {
+ return (type.hasPrecision() && !type.highPrecision()) ? RelaxedResult() : Result();
+ }
+
+ static Word RelaxedResult() {
+ return Word{(int32_t)NA, kRelaxedPrecisionResult};
+ }
+
+ static Word UniqueResult() {
+ return Word{(int32_t)NA, kUniqueResult};
+ }
+
+ static Word Result() {
+ return Word{(int32_t)NA, kDefaultPrecisionResult};
+ }
+
+ // Unlike a Result (where the result ID is always deduplicated to its first instruction) or a
+ // UniqueResult (which always produces a new instruction), a KeyedResult allows an instruction
+ // to be deduplicated among those that share the same `key`.
+ static Word KeyedResult(int32_t key) { return Word{key, Kind::kKeyedResult}; }
+
+ bool isResult() const { return fKind >= Kind::kDefaultPrecisionResult; }
+
+ int32_t fValue;
+ Kind fKind;
+};
+
+// Skia's magic number is 31 and goes in the top 16 bits. We can use the lower bits to version the
+// sksl generator if we want.
+// https://github.com/KhronosGroup/SPIRV-Headers/blob/master/include/spirv/spir-v.xml#L84
+static const int32_t SKSL_MAGIC = 0x001F0000;
+
+SPIRVCodeGenerator::Intrinsic SPIRVCodeGenerator::getIntrinsic(IntrinsicKind ik) const {
+
+#define ALL_GLSL(x) Intrinsic{kGLSL_STD_450_IntrinsicOpcodeKind, GLSLstd450 ## x, \
+ GLSLstd450 ## x, GLSLstd450 ## x, GLSLstd450 ## x}
+#define BY_TYPE_GLSL(ifFloat, ifInt, ifUInt) Intrinsic{kGLSL_STD_450_IntrinsicOpcodeKind, \
+ GLSLstd450 ## ifFloat, \
+ GLSLstd450 ## ifInt, \
+ GLSLstd450 ## ifUInt, \
+ SpvOpUndef}
+#define ALL_SPIRV(x) Intrinsic{kSPIRV_IntrinsicOpcodeKind, \
+ SpvOp ## x, SpvOp ## x, SpvOp ## x, SpvOp ## x}
+#define BOOL_SPIRV(x) Intrinsic{kSPIRV_IntrinsicOpcodeKind, \
+ SpvOpUndef, SpvOpUndef, SpvOpUndef, SpvOp ## x}
+#define FLOAT_SPIRV(x) Intrinsic{kSPIRV_IntrinsicOpcodeKind, \
+ SpvOp ## x, SpvOpUndef, SpvOpUndef, SpvOpUndef}
+#define SPECIAL(x) Intrinsic{kSpecial_IntrinsicOpcodeKind, k ## x ## _SpecialIntrinsic, \
+ k ## x ## _SpecialIntrinsic, k ## x ## _SpecialIntrinsic, \
+ k ## x ## _SpecialIntrinsic}
+
+ switch (ik) {
+ case k_round_IntrinsicKind: return ALL_GLSL(Round);
+ case k_roundEven_IntrinsicKind: return ALL_GLSL(RoundEven);
+ case k_trunc_IntrinsicKind: return ALL_GLSL(Trunc);
+ case k_abs_IntrinsicKind: return BY_TYPE_GLSL(FAbs, SAbs, SAbs);
+ case k_sign_IntrinsicKind: return BY_TYPE_GLSL(FSign, SSign, SSign);
+ case k_floor_IntrinsicKind: return ALL_GLSL(Floor);
+ case k_ceil_IntrinsicKind: return ALL_GLSL(Ceil);
+ case k_fract_IntrinsicKind: return ALL_GLSL(Fract);
+ case k_radians_IntrinsicKind: return ALL_GLSL(Radians);
+ case k_degrees_IntrinsicKind: return ALL_GLSL(Degrees);
+ case k_sin_IntrinsicKind: return ALL_GLSL(Sin);
+ case k_cos_IntrinsicKind: return ALL_GLSL(Cos);
+ case k_tan_IntrinsicKind: return ALL_GLSL(Tan);
+ case k_asin_IntrinsicKind: return ALL_GLSL(Asin);
+ case k_acos_IntrinsicKind: return ALL_GLSL(Acos);
+ case k_atan_IntrinsicKind: return SPECIAL(Atan);
+ case k_sinh_IntrinsicKind: return ALL_GLSL(Sinh);
+ case k_cosh_IntrinsicKind: return ALL_GLSL(Cosh);
+ case k_tanh_IntrinsicKind: return ALL_GLSL(Tanh);
+ case k_asinh_IntrinsicKind: return ALL_GLSL(Asinh);
+ case k_acosh_IntrinsicKind: return ALL_GLSL(Acosh);
+ case k_atanh_IntrinsicKind: return ALL_GLSL(Atanh);
+ case k_pow_IntrinsicKind: return ALL_GLSL(Pow);
+ case k_exp_IntrinsicKind: return ALL_GLSL(Exp);
+ case k_log_IntrinsicKind: return ALL_GLSL(Log);
+ case k_exp2_IntrinsicKind: return ALL_GLSL(Exp2);
+ case k_log2_IntrinsicKind: return ALL_GLSL(Log2);
+ case k_sqrt_IntrinsicKind: return ALL_GLSL(Sqrt);
+ case k_inverse_IntrinsicKind: return ALL_GLSL(MatrixInverse);
+ case k_outerProduct_IntrinsicKind: return ALL_SPIRV(OuterProduct);
+ case k_transpose_IntrinsicKind: return ALL_SPIRV(Transpose);
+ case k_isinf_IntrinsicKind: return ALL_SPIRV(IsInf);
+ case k_isnan_IntrinsicKind: return ALL_SPIRV(IsNan);
+ case k_inversesqrt_IntrinsicKind: return ALL_GLSL(InverseSqrt);
+ case k_determinant_IntrinsicKind: return ALL_GLSL(Determinant);
+ case k_matrixCompMult_IntrinsicKind: return SPECIAL(MatrixCompMult);
+ case k_matrixInverse_IntrinsicKind: return ALL_GLSL(MatrixInverse);
+ case k_mod_IntrinsicKind: return SPECIAL(Mod);
+ case k_modf_IntrinsicKind: return ALL_GLSL(Modf);
+ case k_min_IntrinsicKind: return SPECIAL(Min);
+ case k_max_IntrinsicKind: return SPECIAL(Max);
+ case k_clamp_IntrinsicKind: return SPECIAL(Clamp);
+ case k_saturate_IntrinsicKind: return SPECIAL(Saturate);
+ case k_dot_IntrinsicKind: return FLOAT_SPIRV(Dot);
+ case k_mix_IntrinsicKind: return SPECIAL(Mix);
+ case k_step_IntrinsicKind: return SPECIAL(Step);
+ case k_smoothstep_IntrinsicKind: return SPECIAL(SmoothStep);
+ case k_fma_IntrinsicKind: return ALL_GLSL(Fma);
+ case k_frexp_IntrinsicKind: return ALL_GLSL(Frexp);
+ case k_ldexp_IntrinsicKind: return ALL_GLSL(Ldexp);
+
+#define PACK(type) case k_pack##type##_IntrinsicKind: return ALL_GLSL(Pack##type); \
+ case k_unpack##type##_IntrinsicKind: return ALL_GLSL(Unpack##type)
+ PACK(Snorm4x8);
+ PACK(Unorm4x8);
+ PACK(Snorm2x16);
+ PACK(Unorm2x16);
+ PACK(Half2x16);
+ PACK(Double2x32);
+#undef PACK
+
+ case k_length_IntrinsicKind: return ALL_GLSL(Length);
+ case k_distance_IntrinsicKind: return ALL_GLSL(Distance);
+ case k_cross_IntrinsicKind: return ALL_GLSL(Cross);
+ case k_normalize_IntrinsicKind: return ALL_GLSL(Normalize);
+ case k_faceforward_IntrinsicKind: return ALL_GLSL(FaceForward);
+ case k_reflect_IntrinsicKind: return ALL_GLSL(Reflect);
+ case k_refract_IntrinsicKind: return ALL_GLSL(Refract);
+ case k_bitCount_IntrinsicKind: return ALL_SPIRV(BitCount);
+ case k_findLSB_IntrinsicKind: return ALL_GLSL(FindILsb);
+ case k_findMSB_IntrinsicKind: return BY_TYPE_GLSL(FindSMsb, FindSMsb, FindUMsb);
+ case k_dFdx_IntrinsicKind: return FLOAT_SPIRV(DPdx);
+ case k_dFdy_IntrinsicKind: return SPECIAL(DFdy);
+ case k_fwidth_IntrinsicKind: return FLOAT_SPIRV(Fwidth);
+ case k_makeSampler2D_IntrinsicKind: return SPECIAL(SampledImage);
+
+ case k_sample_IntrinsicKind: return SPECIAL(Texture);
+ case k_sampleGrad_IntrinsicKind: return SPECIAL(TextureGrad);
+ case k_sampleLod_IntrinsicKind: return SPECIAL(TextureLod);
+ case k_subpassLoad_IntrinsicKind: return SPECIAL(SubpassLoad);
+
+ case k_floatBitsToInt_IntrinsicKind: return ALL_SPIRV(Bitcast);
+ case k_floatBitsToUint_IntrinsicKind: return ALL_SPIRV(Bitcast);
+ case k_intBitsToFloat_IntrinsicKind: return ALL_SPIRV(Bitcast);
+ case k_uintBitsToFloat_IntrinsicKind: return ALL_SPIRV(Bitcast);
+
+ case k_any_IntrinsicKind: return BOOL_SPIRV(Any);
+ case k_all_IntrinsicKind: return BOOL_SPIRV(All);
+ case k_not_IntrinsicKind: return BOOL_SPIRV(LogicalNot);
+
+ case k_equal_IntrinsicKind:
+ return Intrinsic{kSPIRV_IntrinsicOpcodeKind,
+ SpvOpFOrdEqual,
+ SpvOpIEqual,
+ SpvOpIEqual,
+ SpvOpLogicalEqual};
+ case k_notEqual_IntrinsicKind:
+ return Intrinsic{kSPIRV_IntrinsicOpcodeKind,
+ SpvOpFUnordNotEqual,
+ SpvOpINotEqual,
+ SpvOpINotEqual,
+ SpvOpLogicalNotEqual};
+ case k_lessThan_IntrinsicKind:
+ return Intrinsic{kSPIRV_IntrinsicOpcodeKind,
+ SpvOpFOrdLessThan,
+ SpvOpSLessThan,
+ SpvOpULessThan,
+ SpvOpUndef};
+ case k_lessThanEqual_IntrinsicKind:
+ return Intrinsic{kSPIRV_IntrinsicOpcodeKind,
+ SpvOpFOrdLessThanEqual,
+ SpvOpSLessThanEqual,
+ SpvOpULessThanEqual,
+ SpvOpUndef};
+ case k_greaterThan_IntrinsicKind:
+ return Intrinsic{kSPIRV_IntrinsicOpcodeKind,
+ SpvOpFOrdGreaterThan,
+ SpvOpSGreaterThan,
+ SpvOpUGreaterThan,
+ SpvOpUndef};
+ case k_greaterThanEqual_IntrinsicKind:
+ return Intrinsic{kSPIRV_IntrinsicOpcodeKind,
+ SpvOpFOrdGreaterThanEqual,
+ SpvOpSGreaterThanEqual,
+ SpvOpUGreaterThanEqual,
+ SpvOpUndef};
+ default:
+ return Intrinsic{kInvalid_IntrinsicOpcodeKind, 0, 0, 0, 0};
+ }
+}
+
+void SPIRVCodeGenerator::writeWord(int32_t word, OutputStream& out) {
+ out.write((const char*) &word, sizeof(word));
+}
+
+static bool is_float(const Type& type) {
+ return (type.isScalar() || type.isVector() || type.isMatrix()) &&
+ type.componentType().isFloat();
+}
+
+static bool is_signed(const Type& type) {
+ return (type.isScalar() || type.isVector()) && type.componentType().isSigned();
+}
+
+static bool is_unsigned(const Type& type) {
+ return (type.isScalar() || type.isVector()) && type.componentType().isUnsigned();
+}
+
+static bool is_bool(const Type& type) {
+ return (type.isScalar() || type.isVector()) && type.componentType().isBoolean();
+}
+
+template <typename T>
+static T pick_by_type(const Type& type, T ifFloat, T ifInt, T ifUInt, T ifBool) {
+ if (is_float(type)) {
+ return ifFloat;
+ }
+ if (is_signed(type)) {
+ return ifInt;
+ }
+ if (is_unsigned(type)) {
+ return ifUInt;
+ }
+ if (is_bool(type)) {
+ return ifBool;
+ }
+ SkDEBUGFAIL("unrecognized type");
+ return ifFloat;
+}
+
+static bool is_out(const Modifiers& m) {
+ return (m.fFlags & Modifiers::kOut_Flag) != 0;
+}
+
+static bool is_in(const Modifiers& m) {
+ switch (m.fFlags & (Modifiers::kOut_Flag | Modifiers::kIn_Flag)) {
+ case Modifiers::kOut_Flag: // out
+ return false;
+
+ case 0: // implicit in
+ case Modifiers::kIn_Flag: // explicit in
+ case Modifiers::kOut_Flag | Modifiers::kIn_Flag: // inout
+ return true;
+
+ default: SkUNREACHABLE;
+ }
+}
+
+static bool is_control_flow_op(SpvOp_ op) {
+ switch (op) {
+ case SpvOpReturn:
+ case SpvOpReturnValue:
+ case SpvOpKill:
+ case SpvOpSwitch:
+ case SpvOpBranch:
+ case SpvOpBranchConditional:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_globally_reachable_op(SpvOp_ op) {
+ switch (op) {
+ case SpvOpConstant:
+ case SpvOpConstantTrue:
+ case SpvOpConstantFalse:
+ case SpvOpConstantComposite:
+ case SpvOpTypeVoid:
+ case SpvOpTypeInt:
+ case SpvOpTypeFloat:
+ case SpvOpTypeBool:
+ case SpvOpTypeVector:
+ case SpvOpTypeMatrix:
+ case SpvOpTypeArray:
+ case SpvOpTypePointer:
+ case SpvOpTypeFunction:
+ case SpvOpTypeRuntimeArray:
+ case SpvOpTypeStruct:
+ case SpvOpTypeImage:
+ case SpvOpTypeSampledImage:
+ case SpvOpTypeSampler:
+ case SpvOpVariable:
+ case SpvOpFunction:
+ case SpvOpFunctionParameter:
+ case SpvOpFunctionEnd:
+ case SpvOpExecutionMode:
+ case SpvOpMemoryModel:
+ case SpvOpCapability:
+ case SpvOpExtInstImport:
+ case SpvOpEntryPoint:
+ case SpvOpSource:
+ case SpvOpSourceExtension:
+ case SpvOpName:
+ case SpvOpMemberName:
+ case SpvOpDecorate:
+ case SpvOpMemberDecorate:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void SPIRVCodeGenerator::writeOpCode(SpvOp_ opCode, int length, OutputStream& out) {
+ SkASSERT(opCode != SpvOpLoad || &out != &fConstantBuffer);
+ SkASSERT(opCode != SpvOpUndef);
+ bool foundDeadCode = false;
+ if (is_control_flow_op(opCode)) {
+ // This instruction causes us to leave the current block.
+ foundDeadCode = (fCurrentBlock == 0);
+ fCurrentBlock = 0;
+ } else if (!is_globally_reachable_op(opCode)) {
+ foundDeadCode = (fCurrentBlock == 0);
+ }
+
+ if (foundDeadCode) {
+ // We just encountered dead code--an instruction that don't have an associated block.
+ // Synthesize a label if this happens; this is necessary to satisfy the validator.
+ this->writeLabel(this->nextId(nullptr), kBranchlessBlock, out);
+ }
+
+ this->writeWord((length << 16) | opCode, out);
+}
+
+void SPIRVCodeGenerator::writeLabel(SpvId label, StraightLineLabelType, OutputStream& out) {
+ // The straight-line label type is not important; in any case, no caches are invalidated.
+ SkASSERT(!fCurrentBlock);
+ fCurrentBlock = label;
+ this->writeInstruction(SpvOpLabel, label, out);
+}
+
+void SPIRVCodeGenerator::writeLabel(SpvId label, BranchingLabelType type,
+ ConditionalOpCounts ops, OutputStream& out) {
+ switch (type) {
+ case kBranchIsBelow:
+ case kBranchesOnBothSides:
+ // With a backward or bidirectional branch, we haven't seen the code between the label
+ // and the branch yet, so any stored value is potentially suspect. Without scanning
+ // ahead to check, the only safe option is to ditch the store cache entirely.
+ fStoreCache.reset();
+ [[fallthrough]];
+
+ case kBranchIsAbove:
+ // With a forward branch, we can rely on stores that we had cached at the start of the
+ // statement/expression, if they haven't been touched yet. Anything newer than that is
+ // pruned.
+ this->pruneConditionalOps(ops);
+ break;
+ }
+
+ // Emit the label.
+ this->writeLabel(label, kBranchlessBlock, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, OutputStream& out) {
+ this->writeOpCode(opCode, 1, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, OutputStream& out) {
+ this->writeOpCode(opCode, 2, out);
+ this->writeWord(word1, out);
+}
+
+void SPIRVCodeGenerator::writeString(std::string_view s, OutputStream& out) {
+ out.write(s.data(), s.length());
+ switch (s.length() % 4) {
+ case 1:
+ out.write8(0);
+ [[fallthrough]];
+ case 2:
+ out.write8(0);
+ [[fallthrough]];
+ case 3:
+ out.write8(0);
+ break;
+ default:
+ this->writeWord(0, out);
+ break;
+ }
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, std::string_view string,
+ OutputStream& out) {
+ this->writeOpCode(opCode, 1 + (string.length() + 4) / 4, out);
+ this->writeString(string, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, std::string_view string,
+ OutputStream& out) {
+ this->writeOpCode(opCode, 2 + (string.length() + 4) / 4, out);
+ this->writeWord(word1, out);
+ this->writeString(string, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ std::string_view string, OutputStream& out) {
+ this->writeOpCode(opCode, 3 + (string.length() + 4) / 4, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+ this->writeString(string, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ OutputStream& out) {
+ this->writeOpCode(opCode, 3, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ int32_t word3, OutputStream& out) {
+ this->writeOpCode(opCode, 4, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+ this->writeWord(word3, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ int32_t word3, int32_t word4, OutputStream& out) {
+ this->writeOpCode(opCode, 5, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+ this->writeWord(word3, out);
+ this->writeWord(word4, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ int32_t word3, int32_t word4, int32_t word5,
+ OutputStream& out) {
+ this->writeOpCode(opCode, 6, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+ this->writeWord(word3, out);
+ this->writeWord(word4, out);
+ this->writeWord(word5, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ int32_t word3, int32_t word4, int32_t word5,
+ int32_t word6, OutputStream& out) {
+ this->writeOpCode(opCode, 7, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+ this->writeWord(word3, out);
+ this->writeWord(word4, out);
+ this->writeWord(word5, out);
+ this->writeWord(word6, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ int32_t word3, int32_t word4, int32_t word5,
+ int32_t word6, int32_t word7, OutputStream& out) {
+ this->writeOpCode(opCode, 8, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+ this->writeWord(word3, out);
+ this->writeWord(word4, out);
+ this->writeWord(word5, out);
+ this->writeWord(word6, out);
+ this->writeWord(word7, out);
+}
+
+void SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2,
+ int32_t word3, int32_t word4, int32_t word5,
+ int32_t word6, int32_t word7, int32_t word8,
+ OutputStream& out) {
+ this->writeOpCode(opCode, 9, out);
+ this->writeWord(word1, out);
+ this->writeWord(word2, out);
+ this->writeWord(word3, out);
+ this->writeWord(word4, out);
+ this->writeWord(word5, out);
+ this->writeWord(word6, out);
+ this->writeWord(word7, out);
+ this->writeWord(word8, out);
+}
+
+SPIRVCodeGenerator::Instruction SPIRVCodeGenerator::BuildInstructionKey(
+ SpvOp_ opCode, const SkTArray<Word>& words) {
+ // Assemble a cache key for this instruction.
+ Instruction key;
+ key.fOp = opCode;
+ key.fWords.resize(words.size());
+ key.fResultKind = Word::Kind::kNone;
+
+ for (int index = 0; index < words.size(); ++index) {
+ const Word& word = words[index];
+ key.fWords[index] = word.fValue;
+ if (word.isResult()) {
+ SkASSERT(key.fResultKind == Word::Kind::kNone);
+ key.fResultKind = word.fKind;
+ }
+ }
+
+ return key;
+}
+
+SpvId SPIRVCodeGenerator::writeInstruction(SpvOp_ opCode,
+ const SkTArray<Word>& words,
+ OutputStream& out) {
+ // writeOpLoad and writeOpStore have dedicated code.
+ SkASSERT(opCode != SpvOpLoad);
+ SkASSERT(opCode != SpvOpStore);
+
+ // If this instruction exists in our op cache, return the cached SpvId.
+ Instruction key = BuildInstructionKey(opCode, words);
+ if (SpvId* cachedOp = fOpCache.find(key)) {
+ return *cachedOp;
+ }
+
+ SpvId result = NA;
+ Precision precision = Precision::kDefault;
+
+ switch (key.fResultKind) {
+ case Word::Kind::kUniqueResult:
+ // The instruction returns a SpvId, but we do not want deduplication.
+ result = this->nextId(Precision::kDefault);
+ fSpvIdCache.set(result, key);
+ break;
+
+ case Word::Kind::kNone:
+ // The instruction doesn't return a SpvId, but we can still cache and deduplicate it.
+ fOpCache.set(key, result);
+ break;
+
+ case Word::Kind::kRelaxedPrecisionResult:
+ precision = Precision::kRelaxed;
+ [[fallthrough]];
+
+ case Word::Kind::kKeyedResult:
+ [[fallthrough]];
+
+ case Word::Kind::kDefaultPrecisionResult:
+ // Consume a new SpvId.
+ result = this->nextId(precision);
+ fOpCache.set(key, result);
+ fSpvIdCache.set(result, key);
+
+ // Globally-reachable ops are not subject to the whims of flow control.
+ if (!is_globally_reachable_op(opCode)) {
+ fReachableOps.push_back(result);
+ }
+ break;
+
+ default:
+ SkDEBUGFAIL("unexpected result kind");
+ break;
+ }
+
+ // Write the requested instruction.
+ this->writeOpCode(opCode, words.size() + 1, out);
+ for (const Word& word : words) {
+ if (word.isResult()) {
+ SkASSERT(result != NA);
+ this->writeWord(result, out);
+ } else {
+ this->writeWord(word.fValue, out);
+ }
+ }
+
+ // Return the result.
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeOpLoad(SpvId type,
+ Precision precision,
+ SpvId pointer,
+ OutputStream& out) {
+ // Look for this pointer in our load-cache.
+ if (SpvId* cachedOp = fStoreCache.find(pointer)) {
+ return *cachedOp;
+ }
+
+ // Write the requested OpLoad instruction.
+ SpvId result = this->nextId(precision);
+ this->writeInstruction(SpvOpLoad, type, result, pointer, out);
+ return result;
+}
+
+void SPIRVCodeGenerator::writeOpStore(SpvStorageClass_ storageClass,
+ SpvId pointer,
+ SpvId value,
+ OutputStream& out) {
+ // Write the uncached SpvOpStore directly.
+ this->writeInstruction(SpvOpStore, pointer, value, out);
+
+ if (storageClass == SpvStorageClassFunction) {
+ // Insert a pointer-to-SpvId mapping into the load cache. A writeOpLoad to this pointer will
+ // return the cached value as-is.
+ fStoreCache.set(pointer, value);
+ fStoreOps.push_back(pointer);
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeOpConstantTrue(const Type& type) {
+ return this->writeInstruction(SpvOpConstantTrue,
+ Words{this->getType(type), Word::Result()},
+ fConstantBuffer);
+}
+
+SpvId SPIRVCodeGenerator::writeOpConstantFalse(const Type& type) {
+ return this->writeInstruction(SpvOpConstantFalse,
+ Words{this->getType(type), Word::Result()},
+ fConstantBuffer);
+}
+
+SpvId SPIRVCodeGenerator::writeOpConstant(const Type& type, int32_t valueBits) {
+ return this->writeInstruction(
+ SpvOpConstant,
+ Words{this->getType(type), Word::Result(), Word::Number(valueBits)},
+ fConstantBuffer);
+}
+
+SpvId SPIRVCodeGenerator::writeOpConstantComposite(const Type& type,
+ const SkTArray<SpvId>& values) {
+ SkASSERT(values.size() == (type.isStruct() ? (int)type.fields().size() : type.columns()));
+
+ Words words;
+ words.push_back(this->getType(type));
+ words.push_back(Word::Result());
+ for (SpvId value : values) {
+ words.push_back(value);
+ }
+ return this->writeInstruction(SpvOpConstantComposite, words, fConstantBuffer);
+}
+
+bool SPIRVCodeGenerator::toConstants(SpvId value, SkTArray<SpvId>* constants) {
+ Instruction* instr = fSpvIdCache.find(value);
+ if (!instr) {
+ return false;
+ }
+ switch (instr->fOp) {
+ case SpvOpConstant:
+ case SpvOpConstantTrue:
+ case SpvOpConstantFalse:
+ constants->push_back(value);
+ return true;
+
+ case SpvOpConstantComposite: // OpConstantComposite ResultType ResultID Constituents...
+ // Start at word 2 to skip past ResultType and ResultID.
+ for (int i = 2; i < instr->fWords.size(); ++i) {
+ if (!this->toConstants(instr->fWords[i], constants)) {
+ return false;
+ }
+ }
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool SPIRVCodeGenerator::toConstants(SkSpan<const SpvId> values, SkTArray<SpvId>* constants) {
+ for (SpvId value : values) {
+ if (!this->toConstants(value, constants)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+SpvId SPIRVCodeGenerator::writeOpCompositeConstruct(const Type& type,
+ const SkTArray<SpvId>& values,
+ OutputStream& out) {
+ // If this is a vector composed entirely of literals, write a constant-composite instead.
+ if (type.isVector()) {
+ SkSTArray<4, SpvId> constants;
+ if (this->toConstants(SkSpan(values), &constants)) {
+ // Create a vector from literals.
+ return this->writeOpConstantComposite(type, constants);
+ }
+ }
+
+ // If this is a matrix composed entirely of literals, constant-composite them instead.
+ if (type.isMatrix()) {
+ SkSTArray<16, SpvId> constants;
+ if (this->toConstants(SkSpan(values), &constants)) {
+ // Create each matrix column.
+ SkASSERT(type.isMatrix());
+ const Type& vecType = type.componentType().toCompound(fContext,
+ /*columns=*/type.rows(),
+ /*rows=*/1);
+ SkSTArray<4, SpvId> columnIDs;
+ for (int index=0; index < type.columns(); ++index) {
+ SkSTArray<4, SpvId> columnConstants(&constants[index * type.rows()],
+ type.rows());
+ columnIDs.push_back(this->writeOpConstantComposite(vecType, columnConstants));
+ }
+ // Compose the matrix from its columns.
+ return this->writeOpConstantComposite(type, columnIDs);
+ }
+ }
+
+ Words words;
+ words.push_back(this->getType(type));
+ words.push_back(Word::Result(type));
+ for (SpvId value : values) {
+ words.push_back(value);
+ }
+
+ return this->writeInstruction(SpvOpCompositeConstruct, words, out);
+}
+
+SPIRVCodeGenerator::Instruction* SPIRVCodeGenerator::resultTypeForInstruction(
+ const Instruction& instr) {
+ // This list should contain every op that we cache that has a result and result-type.
+ // (If one is missing, we will not find some optimization opportunities.)
+ // Generally, the result type of an op is in the 0th word, but I'm not sure if this is
+ // universally true, so it's configurable on a per-op basis.
+ int resultTypeWord;
+ switch (instr.fOp) {
+ case SpvOpConstant:
+ case SpvOpConstantTrue:
+ case SpvOpConstantFalse:
+ case SpvOpConstantComposite:
+ case SpvOpCompositeConstruct:
+ case SpvOpCompositeExtract:
+ case SpvOpLoad:
+ resultTypeWord = 0;
+ break;
+
+ default:
+ return nullptr;
+ }
+
+ Instruction* typeInstr = fSpvIdCache.find(instr.fWords[resultTypeWord]);
+ SkASSERT(typeInstr);
+ return typeInstr;
+}
+
+int SPIRVCodeGenerator::numComponentsForVecInstruction(const Instruction& instr) {
+ // If an instruction is in the op cache, its type should be as well.
+ Instruction* typeInstr = this->resultTypeForInstruction(instr);
+ SkASSERT(typeInstr);
+ SkASSERT(typeInstr->fOp == SpvOpTypeVector || typeInstr->fOp == SpvOpTypeFloat ||
+ typeInstr->fOp == SpvOpTypeInt || typeInstr->fOp == SpvOpTypeBool);
+
+ // For vectors, extract their column count. Scalars have one component by definition.
+ // SpvOpTypeVector ResultID ComponentType NumComponents
+ return (typeInstr->fOp == SpvOpTypeVector) ? typeInstr->fWords[2]
+ : 1;
+}
+
+SpvId SPIRVCodeGenerator::toComponent(SpvId id, int component) {
+ Instruction* instr = fSpvIdCache.find(id);
+ if (!instr) {
+ return NA;
+ }
+ if (instr->fOp == SpvOpConstantComposite) {
+ // SpvOpConstantComposite ResultType ResultID [components...]
+ // Add 2 to the component index to skip past ResultType and ResultID.
+ return instr->fWords[2 + component];
+ }
+ if (instr->fOp == SpvOpCompositeConstruct) {
+ // SpvOpCompositeConstruct ResultType ResultID [components...]
+ // Vectors have special rules; check to see if we are composing a vector.
+ Instruction* composedType = fSpvIdCache.find(instr->fWords[0]);
+ SkASSERT(composedType);
+
+ // When composing a non-vector, each instruction word maps 1:1 to the component index.
+ // We can just extract out the associated component directly.
+ if (composedType->fOp != SpvOpTypeVector) {
+ return instr->fWords[2 + component];
+ }
+
+ // When composing a vector, components can be either scalars or vectors.
+ // This means we need to check the op type on each component. (+2 to skip ResultType/Result)
+ for (int index = 2; index < instr->fWords.size(); ++index) {
+ int32_t currentWord = instr->fWords[index];
+
+ // Retrieve the sub-instruction pointed to by OpCompositeConstruct.
+ Instruction* subinstr = fSpvIdCache.find(currentWord);
+ if (!subinstr) {
+ return NA;
+ }
+ // If this subinstruction contains the component we're looking for...
+ int numComponents = this->numComponentsForVecInstruction(*subinstr);
+ if (component < numComponents) {
+ if (numComponents == 1) {
+ // ... it's a scalar. Return it.
+ SkASSERT(component == 0);
+ return currentWord;
+ } else {
+ // ... it's a vector. Recurse into it.
+ return this->toComponent(currentWord, component);
+ }
+ }
+ // This sub-instruction doesn't contain our component. Keep walking forward.
+ component -= numComponents;
+ }
+ SkDEBUGFAIL("component index goes past the end of this composite value");
+ return NA;
+ }
+ return NA;
+}
+
+SpvId SPIRVCodeGenerator::writeOpCompositeExtract(const Type& type,
+ SpvId base,
+ int component,
+ OutputStream& out) {
+ // If the base op is a composite, we can extract from it directly.
+ SpvId result = this->toComponent(base, component);
+ if (result != NA) {
+ return result;
+ }
+ return this->writeInstruction(
+ SpvOpCompositeExtract,
+ {this->getType(type), Word::Result(type), base, Word::Number(component)},
+ out);
+}
+
+SpvId SPIRVCodeGenerator::writeOpCompositeExtract(const Type& type,
+ SpvId base,
+ int componentA,
+ int componentB,
+ OutputStream& out) {
+ // If the base op is a composite, we can extract from it directly.
+ SpvId result = this->toComponent(base, componentA);
+ if (result != NA) {
+ return this->writeOpCompositeExtract(type, result, componentB, out);
+ }
+ return this->writeInstruction(SpvOpCompositeExtract,
+ {this->getType(type),
+ Word::Result(type),
+ base,
+ Word::Number(componentA),
+ Word::Number(componentB)},
+ out);
+}
+
+void SPIRVCodeGenerator::writeCapabilities(OutputStream& out) {
+ for (uint64_t i = 0, bit = 1; i <= kLast_Capability; i++, bit <<= 1) {
+ if (fCapabilities & bit) {
+ this->writeInstruction(SpvOpCapability, (SpvId) i, out);
+ }
+ }
+ this->writeInstruction(SpvOpCapability, SpvCapabilityShader, out);
+}
+
+SpvId SPIRVCodeGenerator::nextId(const Type* type) {
+ return this->nextId(type && type->hasPrecision() && !type->highPrecision()
+ ? Precision::kRelaxed
+ : Precision::kDefault);
+}
+
+SpvId SPIRVCodeGenerator::nextId(Precision precision) {
+ if (precision == Precision::kRelaxed && !fProgram.fConfig->fSettings.fForceHighPrecision) {
+ this->writeInstruction(SpvOpDecorate, fIdCount, SpvDecorationRelaxedPrecision,
+ fDecorationBuffer);
+ }
+ return fIdCount++;
+}
+
+SpvId SPIRVCodeGenerator::writeStruct(const Type& type, const MemoryLayout& memoryLayout) {
+ // If we've already written out this struct, return its existing SpvId.
+ if (SpvId* cachedStructId = fStructMap.find(&type)) {
+ return *cachedStructId;
+ }
+
+ // Write all of the field types first, so we don't inadvertently write them while we're in the
+ // middle of writing the struct instruction.
+ Words words;
+ words.push_back(Word::UniqueResult());
+ for (const auto& f : type.fields()) {
+ words.push_back(this->getType(*f.fType, memoryLayout));
+ }
+ SpvId resultId = this->writeInstruction(SpvOpTypeStruct, words, fConstantBuffer);
+ this->writeInstruction(SpvOpName, resultId, type.name(), fNameBuffer);
+ fStructMap.set(&type, resultId);
+
+ size_t offset = 0;
+ for (int32_t i = 0; i < (int32_t) type.fields().size(); i++) {
+ const Type::Field& field = type.fields()[i];
+ if (!memoryLayout.isSupported(*field.fType)) {
+ fContext.fErrors->error(type.fPosition, "type '" + field.fType->displayName() +
+ "' is not permitted here");
+ return resultId;
+ }
+ size_t size = memoryLayout.size(*field.fType);
+ size_t alignment = memoryLayout.alignment(*field.fType);
+ const Layout& fieldLayout = field.fModifiers.fLayout;
+ if (fieldLayout.fOffset >= 0) {
+ if (fieldLayout.fOffset < (int) offset) {
+ fContext.fErrors->error(field.fPosition, "offset of field '" +
+ std::string(field.fName) + "' must be at least " + std::to_string(offset));
+ }
+ if (fieldLayout.fOffset % alignment) {
+ fContext.fErrors->error(field.fPosition,
+ "offset of field '" + std::string(field.fName) +
+ "' must be a multiple of " + std::to_string(alignment));
+ }
+ offset = fieldLayout.fOffset;
+ } else {
+ size_t mod = offset % alignment;
+ if (mod) {
+ offset += alignment - mod;
+ }
+ }
+ this->writeInstruction(SpvOpMemberName, resultId, i, field.fName, fNameBuffer);
+ this->writeFieldLayout(fieldLayout, resultId, i);
+ if (field.fModifiers.fLayout.fBuiltin < 0) {
+ this->writeInstruction(SpvOpMemberDecorate, resultId, (SpvId) i, SpvDecorationOffset,
+ (SpvId) offset, fDecorationBuffer);
+ }
+ if (field.fType->isMatrix()) {
+ this->writeInstruction(SpvOpMemberDecorate, resultId, i, SpvDecorationColMajor,
+ fDecorationBuffer);
+ this->writeInstruction(SpvOpMemberDecorate, resultId, i, SpvDecorationMatrixStride,
+ (SpvId) memoryLayout.stride(*field.fType),
+ fDecorationBuffer);
+ }
+ if (!field.fType->highPrecision()) {
+ this->writeInstruction(SpvOpMemberDecorate, resultId, (SpvId) i,
+ SpvDecorationRelaxedPrecision, fDecorationBuffer);
+ }
+ offset += size;
+ if ((field.fType->isArray() || field.fType->isStruct()) && offset % alignment != 0) {
+ offset += alignment - offset % alignment;
+ }
+ }
+
+ return resultId;
+}
+
+SpvId SPIRVCodeGenerator::getType(const Type& type) {
+ return this->getType(type, fDefaultLayout);
+}
+
+SpvId SPIRVCodeGenerator::getType(const Type& rawType, const MemoryLayout& layout) {
+ const Type* type = &rawType;
+
+ switch (type->typeKind()) {
+ case Type::TypeKind::kVoid: {
+ return this->writeInstruction(SpvOpTypeVoid, Words{Word::Result()}, fConstantBuffer);
+ }
+ case Type::TypeKind::kScalar:
+ case Type::TypeKind::kLiteral: {
+ if (type->isBoolean()) {
+ return this->writeInstruction(SpvOpTypeBool, {Word::Result()}, fConstantBuffer);
+ }
+ if (type->isSigned()) {
+ return this->writeInstruction(
+ SpvOpTypeInt,
+ Words{Word::Result(), Word::Number(32), Word::Number(1)},
+ fConstantBuffer);
+ }
+ if (type->isUnsigned()) {
+ return this->writeInstruction(
+ SpvOpTypeInt,
+ Words{Word::Result(), Word::Number(32), Word::Number(0)},
+ fConstantBuffer);
+ }
+ if (type->isFloat()) {
+ return this->writeInstruction(
+ SpvOpTypeFloat,
+ Words{Word::Result(), Word::Number(32)},
+ fConstantBuffer);
+ }
+ SkDEBUGFAILF("unrecognized scalar type '%s'", type->description().c_str());
+ return (SpvId)-1;
+ }
+ case Type::TypeKind::kVector: {
+ SpvId scalarTypeId = this->getType(type->componentType(), layout);
+ return this->writeInstruction(
+ SpvOpTypeVector,
+ Words{Word::Result(), scalarTypeId, Word::Number(type->columns())},
+ fConstantBuffer);
+ }
+ case Type::TypeKind::kMatrix: {
+ SpvId vectorTypeId = this->getType(IndexExpression::IndexType(fContext, *type), layout);
+ return this->writeInstruction(
+ SpvOpTypeMatrix,
+ Words{Word::Result(), vectorTypeId, Word::Number(type->columns())},
+ fConstantBuffer);
+ }
+ case Type::TypeKind::kArray: {
+ if (!layout.isSupported(*type)) {
+ fContext.fErrors->error(type->fPosition, "type '" + type->displayName() +
+ "' is not permitted here");
+ return NA;
+ }
+ size_t stride = layout.stride(*type);
+ SpvId typeId = this->getType(type->componentType(), layout);
+ SpvId result = NA;
+ if (type->isUnsizedArray()) {
+ result = this->writeInstruction(SpvOpTypeRuntimeArray,
+ Words{Word::KeyedResult(stride), typeId},
+ fConstantBuffer);
+ } else {
+ SpvId countId = this->writeLiteral(type->columns(), *fContext.fTypes.fInt);
+ result = this->writeInstruction(SpvOpTypeArray,
+ Words{Word::KeyedResult(stride), typeId, countId},
+ fConstantBuffer);
+ }
+ this->writeInstruction(SpvOpDecorate,
+ {result, SpvDecorationArrayStride, Word::Number(stride)},
+ fDecorationBuffer);
+ return result;
+ }
+ case Type::TypeKind::kStruct: {
+ return this->writeStruct(*type, layout);
+ }
+ case Type::TypeKind::kSeparateSampler: {
+ return this->writeInstruction(SpvOpTypeSampler, Words{Word::Result()}, fConstantBuffer);
+ }
+ case Type::TypeKind::kSampler: {
+ // Subpass inputs should use the Texture type, not a Sampler.
+ SkASSERT(type->dimensions() != SpvDimSubpassData);
+ if (SpvDimBuffer == type->dimensions()) {
+ fCapabilities |= 1ULL << SpvCapabilitySampledBuffer;
+ }
+ SpvId imageTypeId = this->getType(type->textureType(), layout);
+ return this->writeInstruction(SpvOpTypeSampledImage,
+ Words{Word::Result(), imageTypeId},
+ fConstantBuffer);
+ }
+ case Type::TypeKind::kTexture: {
+ SpvId floatTypeId = this->getType(*fContext.fTypes.fFloat, layout);
+ int sampled = (type->textureAccess() == Type::TextureAccess::kSample) ? 1 : 2;
+ return this->writeInstruction(SpvOpTypeImage,
+ Words{Word::Result(),
+ floatTypeId,
+ Word::Number(type->dimensions()),
+ Word::Number(type->isDepth()),
+ Word::Number(type->isArrayedTexture()),
+ Word::Number(type->isMultisampled()),
+ Word::Number(sampled),
+ SpvImageFormatUnknown},
+ fConstantBuffer);
+ }
+ default: {
+ SkDEBUGFAILF("invalid type: %s", type->description().c_str());
+ return NA;
+ }
+ }
+}
+
+SpvId SPIRVCodeGenerator::getFunctionType(const FunctionDeclaration& function) {
+ Words words;
+ words.push_back(Word::Result());
+ words.push_back(this->getType(function.returnType()));
+ for (const Variable* parameter : function.parameters()) {
+ if (parameter->type().typeKind() == Type::TypeKind::kSampler &&
+ fProgram.fConfig->fSettings.fSPIRVDawnCompatMode) {
+ words.push_back(this->getFunctionParameterType(parameter->type().textureType()));
+ words.push_back(this->getFunctionParameterType(*fContext.fTypes.fSampler));
+ } else {
+ words.push_back(this->getFunctionParameterType(parameter->type()));
+ }
+ }
+ return this->writeInstruction(SpvOpTypeFunction, words, fConstantBuffer);
+}
+
+SpvId SPIRVCodeGenerator::getFunctionParameterType(const Type& parameterType) {
+ // glslang treats all function arguments as pointers whether they need to be or
+ // not. I was initially puzzled by this until I ran bizarre failures with certain
+ // patterns of function calls and control constructs, as exemplified by this minimal
+ // failure case:
+ //
+ // void sphere(float x) {
+ // }
+ //
+ // void map() {
+ // sphere(1.0);
+ // }
+ //
+ // void main() {
+ // for (int i = 0; i < 1; i++) {
+ // map();
+ // }
+ // }
+ //
+ // As of this writing, compiling this in the "obvious" way (with sphere taking a float)
+ // crashes. Making it take a float* and storing the argument in a temporary variable,
+ // as glslang does, fixes it.
+ //
+ // The consensus among shader compiler authors seems to be that GPU driver generally don't
+ // handle value-based parameters consistently. It is highly likely that they fit their
+ // implementations to conform to glslang. We take care to do so ourselves.
+ //
+ // Our implementation first stores every parameter value into a function storage-class pointer
+ // before calling a function. The exception is for opaque handle types (samplers and textures)
+ // which must be stored in a pointer with UniformConstant storage-class. This prevents
+ // unnecessary temporaries (becuase opaque handles are always rooted in a pointer variable),
+ // matches glslang's behavior, and translates into WGSL more easily when targeting Dawn.
+ SpvStorageClass_ storageClass;
+ if (parameterType.typeKind() == Type::TypeKind::kSampler ||
+ parameterType.typeKind() == Type::TypeKind::kSeparateSampler ||
+ parameterType.typeKind() == Type::TypeKind::kTexture) {
+ storageClass = SpvStorageClassUniformConstant;
+ } else {
+ storageClass = SpvStorageClassFunction;
+ }
+ return this->getPointerType(parameterType, storageClass);
+}
+
+SpvId SPIRVCodeGenerator::getPointerType(const Type& type, SpvStorageClass_ storageClass) {
+ return this->getPointerType(
+ type, this->memoryLayoutForStorageClass(storageClass), storageClass);
+}
+
+SpvId SPIRVCodeGenerator::getPointerType(const Type& type, const MemoryLayout& layout,
+ SpvStorageClass_ storageClass) {
+ return this->writeInstruction(
+ SpvOpTypePointer,
+ Words{Word::Result(), Word::Number(storageClass), this->getType(type, layout)},
+ fConstantBuffer);
+}
+
+SpvId SPIRVCodeGenerator::writeExpression(const Expression& expr, OutputStream& out) {
+ switch (expr.kind()) {
+ case Expression::Kind::kBinary:
+ return this->writeBinaryExpression(expr.as<BinaryExpression>(), out);
+ case Expression::Kind::kConstructorArrayCast:
+ return this->writeExpression(*expr.as<ConstructorArrayCast>().argument(), out);
+ case Expression::Kind::kConstructorArray:
+ case Expression::Kind::kConstructorStruct:
+ return this->writeCompositeConstructor(expr.asAnyConstructor(), out);
+ case Expression::Kind::kConstructorDiagonalMatrix:
+ return this->writeConstructorDiagonalMatrix(expr.as<ConstructorDiagonalMatrix>(), out);
+ case Expression::Kind::kConstructorMatrixResize:
+ return this->writeConstructorMatrixResize(expr.as<ConstructorMatrixResize>(), out);
+ case Expression::Kind::kConstructorScalarCast:
+ return this->writeConstructorScalarCast(expr.as<ConstructorScalarCast>(), out);
+ case Expression::Kind::kConstructorSplat:
+ return this->writeConstructorSplat(expr.as<ConstructorSplat>(), out);
+ case Expression::Kind::kConstructorCompound:
+ return this->writeConstructorCompound(expr.as<ConstructorCompound>(), out);
+ case Expression::Kind::kConstructorCompoundCast:
+ return this->writeConstructorCompoundCast(expr.as<ConstructorCompoundCast>(), out);
+ case Expression::Kind::kFieldAccess:
+ return this->writeFieldAccess(expr.as<FieldAccess>(), out);
+ case Expression::Kind::kFunctionCall:
+ return this->writeFunctionCall(expr.as<FunctionCall>(), out);
+ case Expression::Kind::kLiteral:
+ return this->writeLiteral(expr.as<Literal>());
+ case Expression::Kind::kPrefix:
+ return this->writePrefixExpression(expr.as<PrefixExpression>(), out);
+ case Expression::Kind::kPostfix:
+ return this->writePostfixExpression(expr.as<PostfixExpression>(), out);
+ case Expression::Kind::kSwizzle:
+ return this->writeSwizzle(expr.as<Swizzle>(), out);
+ case Expression::Kind::kVariableReference:
+ return this->writeVariableReference(expr.as<VariableReference>(), out);
+ case Expression::Kind::kTernary:
+ return this->writeTernaryExpression(expr.as<TernaryExpression>(), out);
+ case Expression::Kind::kIndex:
+ return this->writeIndexExpression(expr.as<IndexExpression>(), out);
+ case Expression::Kind::kSetting:
+ return this->writeExpression(*expr.as<Setting>().toLiteral(fContext), out);
+ default:
+ SkDEBUGFAILF("unsupported expression: %s", expr.description().c_str());
+ break;
+ }
+ return NA;
+}
+
+SpvId SPIRVCodeGenerator::writeIntrinsicCall(const FunctionCall& c, OutputStream& out) {
+ const FunctionDeclaration& function = c.function();
+ Intrinsic intrinsic = this->getIntrinsic(function.intrinsicKind());
+ if (intrinsic.opKind == kInvalid_IntrinsicOpcodeKind) {
+ fContext.fErrors->error(c.fPosition, "unsupported intrinsic '" + function.description() +
+ "'");
+ return NA;
+ }
+ const ExpressionArray& arguments = c.arguments();
+ int32_t intrinsicId = intrinsic.floatOp;
+ if (arguments.size() > 0) {
+ const Type& type = arguments[0]->type();
+ if (intrinsic.opKind == kSpecial_IntrinsicOpcodeKind) {
+ // Keep the default float op.
+ } else {
+ intrinsicId = pick_by_type(type, intrinsic.floatOp, intrinsic.signedOp,
+ intrinsic.unsignedOp, intrinsic.boolOp);
+ }
+ }
+ switch (intrinsic.opKind) {
+ case kGLSL_STD_450_IntrinsicOpcodeKind: {
+ SpvId result = this->nextId(&c.type());
+ SkTArray<SpvId> argumentIds;
+ std::vector<TempVar> tempVars;
+ argumentIds.reserve_back(arguments.size());
+ for (int i = 0; i < arguments.size(); i++) {
+ argumentIds.push_back(this->writeFunctionCallArgument(c, i, &tempVars, out));
+ }
+ this->writeOpCode(SpvOpExtInst, 5 + (int32_t) argumentIds.size(), out);
+ this->writeWord(this->getType(c.type()), out);
+ this->writeWord(result, out);
+ this->writeWord(fGLSLExtendedInstructions, out);
+ this->writeWord(intrinsicId, out);
+ for (SpvId id : argumentIds) {
+ this->writeWord(id, out);
+ }
+ this->copyBackTempVars(tempVars, out);
+ return result;
+ }
+ case kSPIRV_IntrinsicOpcodeKind: {
+ // GLSL supports dot(float, float), but SPIR-V does not. Convert it to FMul
+ if (intrinsicId == SpvOpDot && arguments[0]->type().isScalar()) {
+ intrinsicId = SpvOpFMul;
+ }
+ SpvId result = this->nextId(&c.type());
+ SkTArray<SpvId> argumentIds;
+ std::vector<TempVar> tempVars;
+ argumentIds.reserve_back(arguments.size());
+ for (int i = 0; i < arguments.size(); i++) {
+ argumentIds.push_back(this->writeFunctionCallArgument(c, i, &tempVars, out));
+ }
+ if (!c.type().isVoid()) {
+ this->writeOpCode((SpvOp_) intrinsicId, 3 + (int32_t) arguments.size(), out);
+ this->writeWord(this->getType(c.type()), out);
+ this->writeWord(result, out);
+ } else {
+ this->writeOpCode((SpvOp_) intrinsicId, 1 + (int32_t) arguments.size(), out);
+ }
+ for (SpvId id : argumentIds) {
+ this->writeWord(id, out);
+ }
+ this->copyBackTempVars(tempVars, out);
+ return result;
+ }
+ case kSpecial_IntrinsicOpcodeKind:
+ return this->writeSpecialIntrinsic(c, (SpecialIntrinsic) intrinsicId, out);
+ default:
+ fContext.fErrors->error(c.fPosition, "unsupported intrinsic '" +
+ function.description() + "'");
+ return NA;
+ }
+}
+
+SpvId SPIRVCodeGenerator::vectorize(const Expression& arg, int vectorSize, OutputStream& out) {
+ SkASSERT(vectorSize >= 1 && vectorSize <= 4);
+ const Type& argType = arg.type();
+ if (argType.isScalar() && vectorSize > 1) {
+ ConstructorSplat splat{arg.fPosition,
+ argType.toCompound(fContext, vectorSize, /*rows=*/1),
+ arg.clone()};
+ return this->writeConstructorSplat(splat, out);
+ }
+
+ SkASSERT(vectorSize == argType.columns());
+ return this->writeExpression(arg, out);
+}
+
+SkTArray<SpvId> SPIRVCodeGenerator::vectorize(const ExpressionArray& args, OutputStream& out) {
+ int vectorSize = 1;
+ for (const auto& a : args) {
+ if (a->type().isVector()) {
+ if (vectorSize > 1) {
+ SkASSERT(a->type().columns() == vectorSize);
+ } else {
+ vectorSize = a->type().columns();
+ }
+ }
+ }
+ SkTArray<SpvId> result;
+ result.reserve_back(args.size());
+ for (const auto& arg : args) {
+ result.push_back(this->vectorize(*arg, vectorSize, out));
+ }
+ return result;
+}
+
+void SPIRVCodeGenerator::writeGLSLExtendedInstruction(const Type& type, SpvId id, SpvId floatInst,
+ SpvId signedInst, SpvId unsignedInst,
+ const SkTArray<SpvId>& args,
+ OutputStream& out) {
+ this->writeOpCode(SpvOpExtInst, 5 + args.size(), out);
+ this->writeWord(this->getType(type), out);
+ this->writeWord(id, out);
+ this->writeWord(fGLSLExtendedInstructions, out);
+ this->writeWord(pick_by_type(type, floatInst, signedInst, unsignedInst, NA), out);
+ for (SpvId a : args) {
+ this->writeWord(a, out);
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeSpecialIntrinsic(const FunctionCall& c, SpecialIntrinsic kind,
+ OutputStream& out) {
+ const ExpressionArray& arguments = c.arguments();
+ const Type& callType = c.type();
+ SpvId result = this->nextId(nullptr);
+ switch (kind) {
+ case kAtan_SpecialIntrinsic: {
+ SkSTArray<2, SpvId> argumentIds;
+ for (const std::unique_ptr<Expression>& arg : arguments) {
+ argumentIds.push_back(this->writeExpression(*arg, out));
+ }
+ this->writeOpCode(SpvOpExtInst, 5 + (int32_t) argumentIds.size(), out);
+ this->writeWord(this->getType(callType), out);
+ this->writeWord(result, out);
+ this->writeWord(fGLSLExtendedInstructions, out);
+ this->writeWord(argumentIds.size() == 2 ? GLSLstd450Atan2 : GLSLstd450Atan, out);
+ for (SpvId id : argumentIds) {
+ this->writeWord(id, out);
+ }
+ break;
+ }
+ case kSampledImage_SpecialIntrinsic: {
+ SkASSERT(arguments.size() == 2);
+ SpvId img = this->writeExpression(*arguments[0], out);
+ SpvId sampler = this->writeExpression(*arguments[1], out);
+ this->writeInstruction(SpvOpSampledImage,
+ this->getType(callType),
+ result,
+ img,
+ sampler,
+ out);
+ break;
+ }
+ case kSubpassLoad_SpecialIntrinsic: {
+ SpvId img = this->writeExpression(*arguments[0], out);
+ ExpressionArray args;
+ args.reserve_back(2);
+ args.push_back(Literal::MakeInt(fContext, Position(), /*value=*/0));
+ args.push_back(Literal::MakeInt(fContext, Position(), /*value=*/0));
+ ConstructorCompound ctor(Position(), *fContext.fTypes.fInt2, std::move(args));
+ SpvId coords = this->writeExpression(ctor, out);
+ if (arguments.size() == 1) {
+ this->writeInstruction(SpvOpImageRead,
+ this->getType(callType),
+ result,
+ img,
+ coords,
+ out);
+ } else {
+ SkASSERT(arguments.size() == 2);
+ SpvId sample = this->writeExpression(*arguments[1], out);
+ this->writeInstruction(SpvOpImageRead,
+ this->getType(callType),
+ result,
+ img,
+ coords,
+ SpvImageOperandsSampleMask,
+ sample,
+ out);
+ }
+ break;
+ }
+ case kTexture_SpecialIntrinsic: {
+ SpvOp_ op = SpvOpImageSampleImplicitLod;
+ const Type& arg1Type = arguments[1]->type();
+ switch (arguments[0]->type().dimensions()) {
+ case SpvDim1D:
+ if (arg1Type.matches(*fContext.fTypes.fFloat2)) {
+ op = SpvOpImageSampleProjImplicitLod;
+ } else {
+ SkASSERT(arg1Type.matches(*fContext.fTypes.fFloat));
+ }
+ break;
+ case SpvDim2D:
+ if (arg1Type.matches(*fContext.fTypes.fFloat3)) {
+ op = SpvOpImageSampleProjImplicitLod;
+ } else {
+ SkASSERT(arg1Type.matches(*fContext.fTypes.fFloat2));
+ }
+ break;
+ case SpvDim3D:
+ if (arg1Type.matches(*fContext.fTypes.fFloat4)) {
+ op = SpvOpImageSampleProjImplicitLod;
+ } else {
+ SkASSERT(arg1Type.matches(*fContext.fTypes.fFloat3));
+ }
+ break;
+ case SpvDimCube: // fall through
+ case SpvDimRect: // fall through
+ case SpvDimBuffer: // fall through
+ case SpvDimSubpassData:
+ break;
+ }
+ SpvId type = this->getType(callType);
+ SpvId sampler = this->writeExpression(*arguments[0], out);
+ SpvId uv = this->writeExpression(*arguments[1], out);
+ if (arguments.size() == 3) {
+ this->writeInstruction(op, type, result, sampler, uv,
+ SpvImageOperandsBiasMask,
+ this->writeExpression(*arguments[2], out),
+ out);
+ } else {
+ SkASSERT(arguments.size() == 2);
+ if (fProgram.fConfig->fSettings.fSharpenTextures) {
+ SpvId lodBias = this->writeLiteral(kSharpenTexturesBias,
+ *fContext.fTypes.fFloat);
+ this->writeInstruction(op, type, result, sampler, uv,
+ SpvImageOperandsBiasMask, lodBias, out);
+ } else {
+ this->writeInstruction(op, type, result, sampler, uv,
+ out);
+ }
+ }
+ break;
+ }
+ case kTextureGrad_SpecialIntrinsic: {
+ SpvOp_ op = SpvOpImageSampleExplicitLod;
+ SkASSERT(arguments.size() == 4);
+ SkASSERT(arguments[0]->type().dimensions() == SpvDim2D);
+ SkASSERT(arguments[1]->type().matches(*fContext.fTypes.fFloat2));
+ SkASSERT(arguments[2]->type().matches(*fContext.fTypes.fFloat2));
+ SkASSERT(arguments[3]->type().matches(*fContext.fTypes.fFloat2));
+ SpvId type = this->getType(callType);
+ SpvId sampler = this->writeExpression(*arguments[0], out);
+ SpvId uv = this->writeExpression(*arguments[1], out);
+ SpvId dPdx = this->writeExpression(*arguments[2], out);
+ SpvId dPdy = this->writeExpression(*arguments[3], out);
+ this->writeInstruction(op, type, result, sampler, uv, SpvImageOperandsGradMask,
+ dPdx, dPdy, out);
+ break;
+ }
+ case kTextureLod_SpecialIntrinsic: {
+ SpvOp_ op = SpvOpImageSampleExplicitLod;
+ SkASSERT(arguments.size() == 3);
+ SkASSERT(arguments[0]->type().dimensions() == SpvDim2D);
+ SkASSERT(arguments[2]->type().matches(*fContext.fTypes.fFloat));
+ const Type& arg1Type = arguments[1]->type();
+ if (arg1Type.matches(*fContext.fTypes.fFloat3)) {
+ op = SpvOpImageSampleProjExplicitLod;
+ } else {
+ SkASSERT(arg1Type.matches(*fContext.fTypes.fFloat2));
+ }
+ SpvId type = this->getType(callType);
+ SpvId sampler = this->writeExpression(*arguments[0], out);
+ SpvId uv = this->writeExpression(*arguments[1], out);
+ this->writeInstruction(op, type, result, sampler, uv,
+ SpvImageOperandsLodMask,
+ this->writeExpression(*arguments[2], out),
+ out);
+ break;
+ }
+ case kMod_SpecialIntrinsic: {
+ SkTArray<SpvId> args = this->vectorize(arguments, out);
+ SkASSERT(args.size() == 2);
+ const Type& operandType = arguments[0]->type();
+ SpvOp_ op = pick_by_type(operandType, SpvOpFMod, SpvOpSMod, SpvOpUMod, SpvOpUndef);
+ SkASSERT(op != SpvOpUndef);
+ this->writeOpCode(op, 5, out);
+ this->writeWord(this->getType(operandType), out);
+ this->writeWord(result, out);
+ this->writeWord(args[0], out);
+ this->writeWord(args[1], out);
+ break;
+ }
+ case kDFdy_SpecialIntrinsic: {
+ SpvId fn = this->writeExpression(*arguments[0], out);
+ this->writeOpCode(SpvOpDPdy, 4, out);
+ this->writeWord(this->getType(callType), out);
+ this->writeWord(result, out);
+ this->writeWord(fn, out);
+ if (!fProgram.fConfig->fSettings.fForceNoRTFlip) {
+ this->addRTFlipUniform(c.fPosition);
+ using namespace dsl;
+ DSLExpression rtFlip(
+ ThreadContext::Compiler().convertIdentifier(Position(), SKSL_RTFLIP_NAME));
+ SpvId rtFlipY = this->vectorize(*rtFlip.y().release(), callType.columns(), out);
+ SpvId flipped = this->nextId(&callType);
+ this->writeInstruction(
+ SpvOpFMul, this->getType(callType), flipped, result, rtFlipY, out);
+ result = flipped;
+ }
+ break;
+ }
+ case kClamp_SpecialIntrinsic: {
+ SkTArray<SpvId> args = this->vectorize(arguments, out);
+ SkASSERT(args.size() == 3);
+ this->writeGLSLExtendedInstruction(callType, result, GLSLstd450FClamp, GLSLstd450SClamp,
+ GLSLstd450UClamp, args, out);
+ break;
+ }
+ case kMax_SpecialIntrinsic: {
+ SkTArray<SpvId> args = this->vectorize(arguments, out);
+ SkASSERT(args.size() == 2);
+ this->writeGLSLExtendedInstruction(callType, result, GLSLstd450FMax, GLSLstd450SMax,
+ GLSLstd450UMax, args, out);
+ break;
+ }
+ case kMin_SpecialIntrinsic: {
+ SkTArray<SpvId> args = this->vectorize(arguments, out);
+ SkASSERT(args.size() == 2);
+ this->writeGLSLExtendedInstruction(callType, result, GLSLstd450FMin, GLSLstd450SMin,
+ GLSLstd450UMin, args, out);
+ break;
+ }
+ case kMix_SpecialIntrinsic: {
+ SkTArray<SpvId> args = this->vectorize(arguments, out);
+ SkASSERT(args.size() == 3);
+ if (arguments[2]->type().componentType().isBoolean()) {
+ // Use OpSelect to implement Boolean mix().
+ SpvId falseId = this->writeExpression(*arguments[0], out);
+ SpvId trueId = this->writeExpression(*arguments[1], out);
+ SpvId conditionId = this->writeExpression(*arguments[2], out);
+ this->writeInstruction(SpvOpSelect, this->getType(arguments[0]->type()), result,
+ conditionId, trueId, falseId, out);
+ } else {
+ this->writeGLSLExtendedInstruction(callType, result, GLSLstd450FMix, SpvOpUndef,
+ SpvOpUndef, args, out);
+ }
+ break;
+ }
+ case kSaturate_SpecialIntrinsic: {
+ SkASSERT(arguments.size() == 1);
+ ExpressionArray finalArgs;
+ finalArgs.reserve_back(3);
+ finalArgs.push_back(arguments[0]->clone());
+ finalArgs.push_back(Literal::MakeFloat(fContext, Position(), /*value=*/0));
+ finalArgs.push_back(Literal::MakeFloat(fContext, Position(), /*value=*/1));
+ SkTArray<SpvId> spvArgs = this->vectorize(finalArgs, out);
+ this->writeGLSLExtendedInstruction(callType, result, GLSLstd450FClamp, GLSLstd450SClamp,
+ GLSLstd450UClamp, spvArgs, out);
+ break;
+ }
+ case kSmoothStep_SpecialIntrinsic: {
+ SkTArray<SpvId> args = this->vectorize(arguments, out);
+ SkASSERT(args.size() == 3);
+ this->writeGLSLExtendedInstruction(callType, result, GLSLstd450SmoothStep, SpvOpUndef,
+ SpvOpUndef, args, out);
+ break;
+ }
+ case kStep_SpecialIntrinsic: {
+ SkTArray<SpvId> args = this->vectorize(arguments, out);
+ SkASSERT(args.size() == 2);
+ this->writeGLSLExtendedInstruction(callType, result, GLSLstd450Step, SpvOpUndef,
+ SpvOpUndef, args, out);
+ break;
+ }
+ case kMatrixCompMult_SpecialIntrinsic: {
+ SkASSERT(arguments.size() == 2);
+ SpvId lhs = this->writeExpression(*arguments[0], out);
+ SpvId rhs = this->writeExpression(*arguments[1], out);
+ result = this->writeComponentwiseMatrixBinary(callType, lhs, rhs, SpvOpFMul, out);
+ break;
+ }
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeFunctionCallArgument(const FunctionCall& call,
+ int argIndex,
+ std::vector<TempVar>* tempVars,
+ OutputStream& out,
+ SpvId* outSynthesizedSamplerId) {
+ const FunctionDeclaration& funcDecl = call.function();
+ const Expression& arg = *call.arguments()[argIndex];
+ const Modifiers& paramModifiers = funcDecl.parameters()[argIndex]->modifiers();
+
+ // ID of temporary variable that we will use to hold this argument, or 0 if it is being
+ // passed directly
+ SpvId tmpVar;
+ // if we need a temporary var to store this argument, this is the value to store in the var
+ SpvId tmpValueId = NA;
+
+ if (is_out(paramModifiers)) {
+ std::unique_ptr<LValue> lv = this->getLValue(arg, out);
+ // We handle out params with a temp var that we copy back to the original variable at the
+ // end of the call. GLSL guarantees that the original variable will be unchanged until the
+ // end of the call, and also that out params are written back to their original variables in
+ // a specific order (left-to-right), so it's unsafe to pass a pointer to the original value.
+ if (is_in(paramModifiers)) {
+ tmpValueId = lv->load(out);
+ }
+ tmpVar = this->nextId(&arg.type());
+ tempVars->push_back(TempVar{tmpVar, &arg.type(), std::move(lv)});
+ } else if (funcDecl.isIntrinsic()) {
+ // Unlike user function calls, non-out intrinsic arguments don't need pointer parameters.
+ return this->writeExpression(arg, out);
+ } else if (arg.is<VariableReference>() &&
+ (arg.type().typeKind() == Type::TypeKind::kSampler ||
+ arg.type().typeKind() == Type::TypeKind::kSeparateSampler ||
+ arg.type().typeKind() == Type::TypeKind::kTexture)) {
+ // Opaque handle (sampler/texture) arguments are always declared as pointers but never
+ // stored in intermediates when calling user-defined functions.
+ //
+ // The case for intrinsics (which take opaque arguments by value) is handled above just like
+ // regular pointers.
+ //
+ // See getFunctionParameterType for further explanation.
+ const Variable* var = arg.as<VariableReference>().variable();
+
+ // In Dawn-mode the texture and sampler arguments are forwarded to the helper function.
+ if (const auto* p = fSynthesizedSamplerMap.find(var)) {
+ SkASSERT(fProgram.fConfig->fSettings.fSPIRVDawnCompatMode);
+ SkASSERT(arg.type().typeKind() == Type::TypeKind::kSampler);
+ SkASSERT(outSynthesizedSamplerId);
+
+ SpvId* img = fVariableMap.find((*p)->fTexture.get());
+ SpvId* sampler = fVariableMap.find((*p)->fSampler.get());
+ SkASSERT(img);
+ SkASSERT(sampler);
+
+ *outSynthesizedSamplerId = *sampler;
+ return *img;
+ }
+
+ SpvId* entry = fVariableMap.find(var);
+ SkASSERTF(entry, "%s", arg.description().c_str());
+ return *entry;
+ } else {
+ // We always use pointer parameters when calling user functions.
+ // See getFunctionParameterType for further explanation.
+ tmpValueId = this->writeExpression(arg, out);
+ tmpVar = this->nextId(nullptr);
+ }
+ this->writeInstruction(SpvOpVariable,
+ this->getPointerType(arg.type(), SpvStorageClassFunction),
+ tmpVar,
+ SpvStorageClassFunction,
+ fVariableBuffer);
+ if (tmpValueId != NA) {
+ this->writeOpStore(SpvStorageClassFunction, tmpVar, tmpValueId, out);
+ }
+ return tmpVar;
+}
+
+void SPIRVCodeGenerator::copyBackTempVars(const std::vector<TempVar>& tempVars, OutputStream& out) {
+ for (const TempVar& tempVar : tempVars) {
+ SpvId load = this->nextId(tempVar.type);
+ this->writeInstruction(SpvOpLoad, this->getType(*tempVar.type), load, tempVar.spvId, out);
+ tempVar.lvalue->store(load, out);
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeFunctionCall(const FunctionCall& c, OutputStream& out) {
+ const FunctionDeclaration& function = c.function();
+ if (function.isIntrinsic() && !function.definition()) {
+ return this->writeIntrinsicCall(c, out);
+ }
+ const ExpressionArray& arguments = c.arguments();
+ SpvId* entry = fFunctionMap.find(&function);
+ if (!entry) {
+ fContext.fErrors->error(c.fPosition, "function '" + function.description() +
+ "' is not defined");
+ return NA;
+ }
+ // Temp variables are used to write back out-parameters after the function call is complete.
+ std::vector<TempVar> tempVars;
+ SkTArray<SpvId> argumentIds;
+ argumentIds.reserve_back(arguments.size());
+ for (int i = 0; i < arguments.size(); i++) {
+ SpvId samplerId = NA;
+ argumentIds.push_back(this->writeFunctionCallArgument(c, i, &tempVars, out, &samplerId));
+ if (samplerId != NA) {
+ argumentIds.push_back(samplerId);
+ }
+ }
+ SpvId result = this->nextId(nullptr);
+ this->writeOpCode(SpvOpFunctionCall, 4 + (int32_t)argumentIds.size(), out);
+ this->writeWord(this->getType(c.type()), out);
+ this->writeWord(result, out);
+ this->writeWord(*entry, out);
+ for (SpvId id : argumentIds) {
+ this->writeWord(id, out);
+ }
+ // Now that the call is complete, we copy temp out-variables back to their real lvalues.
+ this->copyBackTempVars(tempVars, out);
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::castScalarToType(SpvId inputExprId,
+ const Type& inputType,
+ const Type& outputType,
+ OutputStream& out) {
+ if (outputType.isFloat()) {
+ return this->castScalarToFloat(inputExprId, inputType, outputType, out);
+ }
+ if (outputType.isSigned()) {
+ return this->castScalarToSignedInt(inputExprId, inputType, outputType, out);
+ }
+ if (outputType.isUnsigned()) {
+ return this->castScalarToUnsignedInt(inputExprId, inputType, outputType, out);
+ }
+ if (outputType.isBoolean()) {
+ return this->castScalarToBoolean(inputExprId, inputType, outputType, out);
+ }
+
+ fContext.fErrors->error(Position(), "unsupported cast: " + inputType.description() + " to " +
+ outputType.description());
+ return inputExprId;
+}
+
+SpvId SPIRVCodeGenerator::writeFloatConstructor(const AnyConstructor& c, OutputStream& out) {
+ SkASSERT(c.argumentSpan().size() == 1);
+ SkASSERT(c.type().isFloat());
+ const Expression& ctorExpr = *c.argumentSpan().front();
+ SpvId expressionId = this->writeExpression(ctorExpr, out);
+ return this->castScalarToFloat(expressionId, ctorExpr.type(), c.type(), out);
+}
+
+SpvId SPIRVCodeGenerator::castScalarToFloat(SpvId inputId, const Type& inputType,
+ const Type& outputType, OutputStream& out) {
+ // Casting a float to float is a no-op.
+ if (inputType.isFloat()) {
+ return inputId;
+ }
+
+ // Given the input type, generate the appropriate instruction to cast to float.
+ SpvId result = this->nextId(&outputType);
+ if (inputType.isBoolean()) {
+ // Use OpSelect to convert the boolean argument to a literal 1.0 or 0.0.
+ const SpvId oneID = this->writeLiteral(1.0, *fContext.fTypes.fFloat);
+ const SpvId zeroID = this->writeLiteral(0.0, *fContext.fTypes.fFloat);
+ this->writeInstruction(SpvOpSelect, this->getType(outputType), result,
+ inputId, oneID, zeroID, out);
+ } else if (inputType.isSigned()) {
+ this->writeInstruction(SpvOpConvertSToF, this->getType(outputType), result, inputId, out);
+ } else if (inputType.isUnsigned()) {
+ this->writeInstruction(SpvOpConvertUToF, this->getType(outputType), result, inputId, out);
+ } else {
+ SkDEBUGFAILF("unsupported type for float typecast: %s", inputType.description().c_str());
+ return NA;
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeIntConstructor(const AnyConstructor& c, OutputStream& out) {
+ SkASSERT(c.argumentSpan().size() == 1);
+ SkASSERT(c.type().isSigned());
+ const Expression& ctorExpr = *c.argumentSpan().front();
+ SpvId expressionId = this->writeExpression(ctorExpr, out);
+ return this->castScalarToSignedInt(expressionId, ctorExpr.type(), c.type(), out);
+}
+
+SpvId SPIRVCodeGenerator::castScalarToSignedInt(SpvId inputId, const Type& inputType,
+ const Type& outputType, OutputStream& out) {
+ // Casting a signed int to signed int is a no-op.
+ if (inputType.isSigned()) {
+ return inputId;
+ }
+
+ // Given the input type, generate the appropriate instruction to cast to signed int.
+ SpvId result = this->nextId(&outputType);
+ if (inputType.isBoolean()) {
+ // Use OpSelect to convert the boolean argument to a literal 1 or 0.
+ const SpvId oneID = this->writeLiteral(1.0, *fContext.fTypes.fInt);
+ const SpvId zeroID = this->writeLiteral(0.0, *fContext.fTypes.fInt);
+ this->writeInstruction(SpvOpSelect, this->getType(outputType), result,
+ inputId, oneID, zeroID, out);
+ } else if (inputType.isFloat()) {
+ this->writeInstruction(SpvOpConvertFToS, this->getType(outputType), result, inputId, out);
+ } else if (inputType.isUnsigned()) {
+ this->writeInstruction(SpvOpBitcast, this->getType(outputType), result, inputId, out);
+ } else {
+ SkDEBUGFAILF("unsupported type for signed int typecast: %s",
+ inputType.description().c_str());
+ return NA;
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeUIntConstructor(const AnyConstructor& c, OutputStream& out) {
+ SkASSERT(c.argumentSpan().size() == 1);
+ SkASSERT(c.type().isUnsigned());
+ const Expression& ctorExpr = *c.argumentSpan().front();
+ SpvId expressionId = this->writeExpression(ctorExpr, out);
+ return this->castScalarToUnsignedInt(expressionId, ctorExpr.type(), c.type(), out);
+}
+
+SpvId SPIRVCodeGenerator::castScalarToUnsignedInt(SpvId inputId, const Type& inputType,
+ const Type& outputType, OutputStream& out) {
+ // Casting an unsigned int to unsigned int is a no-op.
+ if (inputType.isUnsigned()) {
+ return inputId;
+ }
+
+ // Given the input type, generate the appropriate instruction to cast to unsigned int.
+ SpvId result = this->nextId(&outputType);
+ if (inputType.isBoolean()) {
+ // Use OpSelect to convert the boolean argument to a literal 1u or 0u.
+ const SpvId oneID = this->writeLiteral(1.0, *fContext.fTypes.fUInt);
+ const SpvId zeroID = this->writeLiteral(0.0, *fContext.fTypes.fUInt);
+ this->writeInstruction(SpvOpSelect, this->getType(outputType), result,
+ inputId, oneID, zeroID, out);
+ } else if (inputType.isFloat()) {
+ this->writeInstruction(SpvOpConvertFToU, this->getType(outputType), result, inputId, out);
+ } else if (inputType.isSigned()) {
+ this->writeInstruction(SpvOpBitcast, this->getType(outputType), result, inputId, out);
+ } else {
+ SkDEBUGFAILF("unsupported type for unsigned int typecast: %s",
+ inputType.description().c_str());
+ return NA;
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeBooleanConstructor(const AnyConstructor& c, OutputStream& out) {
+ SkASSERT(c.argumentSpan().size() == 1);
+ SkASSERT(c.type().isBoolean());
+ const Expression& ctorExpr = *c.argumentSpan().front();
+ SpvId expressionId = this->writeExpression(ctorExpr, out);
+ return this->castScalarToBoolean(expressionId, ctorExpr.type(), c.type(), out);
+}
+
+SpvId SPIRVCodeGenerator::castScalarToBoolean(SpvId inputId, const Type& inputType,
+ const Type& outputType, OutputStream& out) {
+ // Casting a bool to bool is a no-op.
+ if (inputType.isBoolean()) {
+ return inputId;
+ }
+
+ // Given the input type, generate the appropriate instruction to cast to bool.
+ SpvId result = this->nextId(nullptr);
+ if (inputType.isSigned()) {
+ // Synthesize a boolean result by comparing the input against a signed zero literal.
+ const SpvId zeroID = this->writeLiteral(0.0, *fContext.fTypes.fInt);
+ this->writeInstruction(SpvOpINotEqual, this->getType(outputType), result,
+ inputId, zeroID, out);
+ } else if (inputType.isUnsigned()) {
+ // Synthesize a boolean result by comparing the input against an unsigned zero literal.
+ const SpvId zeroID = this->writeLiteral(0.0, *fContext.fTypes.fUInt);
+ this->writeInstruction(SpvOpINotEqual, this->getType(outputType), result,
+ inputId, zeroID, out);
+ } else if (inputType.isFloat()) {
+ // Synthesize a boolean result by comparing the input against a floating-point zero literal.
+ const SpvId zeroID = this->writeLiteral(0.0, *fContext.fTypes.fFloat);
+ this->writeInstruction(SpvOpFUnordNotEqual, this->getType(outputType), result,
+ inputId, zeroID, out);
+ } else {
+ SkDEBUGFAILF("unsupported type for boolean typecast: %s", inputType.description().c_str());
+ return NA;
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeMatrixCopy(SpvId src, const Type& srcType, const Type& dstType,
+ OutputStream& out) {
+ SkASSERT(srcType.isMatrix());
+ SkASSERT(dstType.isMatrix());
+ SkASSERT(srcType.componentType().matches(dstType.componentType()));
+ const Type& srcColumnType = srcType.componentType().toCompound(fContext, srcType.rows(), 1);
+ const Type& dstColumnType = dstType.componentType().toCompound(fContext, dstType.rows(), 1);
+ SkASSERT(dstType.componentType().isFloat());
+ SpvId dstColumnTypeId = this->getType(dstColumnType);
+ const SpvId zeroId = this->writeLiteral(0.0, dstType.componentType());
+ const SpvId oneId = this->writeLiteral(1.0, dstType.componentType());
+
+ SkSTArray<4, SpvId> columns;
+ for (int i = 0; i < dstType.columns(); i++) {
+ if (i < srcType.columns()) {
+ // we're still inside the src matrix, copy the column
+ SpvId srcColumn = this->writeOpCompositeExtract(srcColumnType, src, i, out);
+ SpvId dstColumn;
+ if (srcType.rows() == dstType.rows()) {
+ // columns are equal size, don't need to do anything
+ dstColumn = srcColumn;
+ }
+ else if (dstType.rows() > srcType.rows()) {
+ // dst column is bigger, need to zero-pad it
+ SkSTArray<4, SpvId> values;
+ values.push_back(srcColumn);
+ for (int j = srcType.rows(); j < dstType.rows(); ++j) {
+ values.push_back((i == j) ? oneId : zeroId);
+ }
+ dstColumn = this->writeOpCompositeConstruct(dstColumnType, values, out);
+ }
+ else {
+ // dst column is smaller, need to swizzle the src column
+ dstColumn = this->nextId(&dstType);
+ this->writeOpCode(SpvOpVectorShuffle, 5 + dstType.rows(), out);
+ this->writeWord(dstColumnTypeId, out);
+ this->writeWord(dstColumn, out);
+ this->writeWord(srcColumn, out);
+ this->writeWord(srcColumn, out);
+ for (int j = 0; j < dstType.rows(); j++) {
+ this->writeWord(j, out);
+ }
+ }
+ columns.push_back(dstColumn);
+ } else {
+ // we're past the end of the src matrix, need to synthesize an identity-matrix column
+ SkSTArray<4, SpvId> values;
+ for (int j = 0; j < dstType.rows(); ++j) {
+ values.push_back((i == j) ? oneId : zeroId);
+ }
+ columns.push_back(this->writeOpCompositeConstruct(dstColumnType, values, out));
+ }
+ }
+
+ return this->writeOpCompositeConstruct(dstType, columns, out);
+}
+
+void SPIRVCodeGenerator::addColumnEntry(const Type& columnType,
+ SkTArray<SpvId>* currentColumn,
+ SkTArray<SpvId>* columnIds,
+ int rows,
+ SpvId entry,
+ OutputStream& out) {
+ SkASSERT(currentColumn->size() < rows);
+ currentColumn->push_back(entry);
+ if (currentColumn->size() == rows) {
+ // Synthesize this column into a vector.
+ SpvId columnId = this->writeOpCompositeConstruct(columnType, *currentColumn, out);
+ columnIds->push_back(columnId);
+ currentColumn->clear();
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeMatrixConstructor(const ConstructorCompound& c, OutputStream& out) {
+ const Type& type = c.type();
+ SkASSERT(type.isMatrix());
+ SkASSERT(!c.arguments().empty());
+ const Type& arg0Type = c.arguments()[0]->type();
+ // go ahead and write the arguments so we don't try to write new instructions in the middle of
+ // an instruction
+ SkSTArray<16, SpvId> arguments;
+ for (const std::unique_ptr<Expression>& arg : c.arguments()) {
+ arguments.push_back(this->writeExpression(*arg, out));
+ }
+
+ if (arguments.size() == 1 && arg0Type.isVector()) {
+ // Special-case handling of float4 -> mat2x2.
+ SkASSERT(type.rows() == 2 && type.columns() == 2);
+ SkASSERT(arg0Type.columns() == 4);
+ SpvId v[4];
+ for (int i = 0; i < 4; ++i) {
+ v[i] = this->writeOpCompositeExtract(type.componentType(), arguments[0], i, out);
+ }
+ const Type& vecType = type.componentType().toCompound(fContext, /*columns=*/2, /*rows=*/1);
+ SpvId v0v1 = this->writeOpCompositeConstruct(vecType, {v[0], v[1]}, out);
+ SpvId v2v3 = this->writeOpCompositeConstruct(vecType, {v[2], v[3]}, out);
+ return this->writeOpCompositeConstruct(type, {v0v1, v2v3}, out);
+ }
+
+ int rows = type.rows();
+ const Type& columnType = type.componentType().toCompound(fContext,
+ /*columns=*/rows, /*rows=*/1);
+ // SpvIds of completed columns of the matrix.
+ SkSTArray<4, SpvId> columnIds;
+ // SpvIds of scalars we have written to the current column so far.
+ SkSTArray<4, SpvId> currentColumn;
+ for (int i = 0; i < arguments.size(); i++) {
+ const Type& argType = c.arguments()[i]->type();
+ if (currentColumn.empty() && argType.isVector() && argType.columns() == rows) {
+ // This vector is a complete matrix column by itself and can be used as-is.
+ columnIds.push_back(arguments[i]);
+ } else if (argType.columns() == 1) {
+ // This argument is a lone scalar and can be added to the current column as-is.
+ this->addColumnEntry(columnType, &currentColumn, &columnIds, rows, arguments[i], out);
+ } else {
+ // This argument needs to be decomposed into its constituent scalars.
+ for (int j = 0; j < argType.columns(); ++j) {
+ SpvId swizzle = this->writeOpCompositeExtract(argType.componentType(),
+ arguments[i], j, out);
+ this->addColumnEntry(columnType, &currentColumn, &columnIds, rows, swizzle, out);
+ }
+ }
+ }
+ SkASSERT(columnIds.size() == type.columns());
+ return this->writeOpCompositeConstruct(type, columnIds, out);
+}
+
+SpvId SPIRVCodeGenerator::writeConstructorCompound(const ConstructorCompound& c,
+ OutputStream& out) {
+ return c.type().isMatrix() ? this->writeMatrixConstructor(c, out)
+ : this->writeVectorConstructor(c, out);
+}
+
+SpvId SPIRVCodeGenerator::writeVectorConstructor(const ConstructorCompound& c, OutputStream& out) {
+ const Type& type = c.type();
+ const Type& componentType = type.componentType();
+ SkASSERT(type.isVector());
+
+ SkSTArray<4, SpvId> arguments;
+ for (int i = 0; i < c.arguments().size(); i++) {
+ const Type& argType = c.arguments()[i]->type();
+ SkASSERT(componentType.numberKind() == argType.componentType().numberKind());
+
+ SpvId arg = this->writeExpression(*c.arguments()[i], out);
+ if (argType.isMatrix()) {
+ // CompositeConstruct cannot take a 2x2 matrix as an input, so we need to extract out
+ // each scalar separately.
+ SkASSERT(argType.rows() == 2);
+ SkASSERT(argType.columns() == 2);
+ for (int j = 0; j < 4; ++j) {
+ arguments.push_back(this->writeOpCompositeExtract(componentType, arg,
+ j / 2, j % 2, out));
+ }
+ } else if (argType.isVector()) {
+ // There's a bug in the Intel Vulkan driver where OpCompositeConstruct doesn't handle
+ // vector arguments at all, so we always extract each vector component and pass them
+ // into OpCompositeConstruct individually.
+ for (int j = 0; j < argType.columns(); j++) {
+ arguments.push_back(this->writeOpCompositeExtract(componentType, arg, j, out));
+ }
+ } else {
+ arguments.push_back(arg);
+ }
+ }
+
+ return this->writeOpCompositeConstruct(type, arguments, out);
+}
+
+SpvId SPIRVCodeGenerator::writeConstructorSplat(const ConstructorSplat& c, OutputStream& out) {
+ // Write the splat argument.
+ SpvId argument = this->writeExpression(*c.argument(), out);
+
+ // Generate a OpCompositeConstruct which repeats the argument N times.
+ SkSTArray<4, SpvId> values;
+ values.push_back_n(/*n=*/c.type().columns(), /*t=*/argument);
+ return this->writeOpCompositeConstruct(c.type(), values, out);
+}
+
+SpvId SPIRVCodeGenerator::writeCompositeConstructor(const AnyConstructor& c, OutputStream& out) {
+ SkASSERT(c.type().isArray() || c.type().isStruct());
+ auto ctorArgs = c.argumentSpan();
+
+ SkSTArray<4, SpvId> arguments;
+ for (const std::unique_ptr<Expression>& arg : ctorArgs) {
+ arguments.push_back(this->writeExpression(*arg, out));
+ }
+
+ return this->writeOpCompositeConstruct(c.type(), arguments, out);
+}
+
+SpvId SPIRVCodeGenerator::writeConstructorScalarCast(const ConstructorScalarCast& c,
+ OutputStream& out) {
+ const Type& type = c.type();
+ if (type.componentType().numberKind() == c.argument()->type().componentType().numberKind()) {
+ return this->writeExpression(*c.argument(), out);
+ }
+
+ const Expression& ctorExpr = *c.argument();
+ SpvId expressionId = this->writeExpression(ctorExpr, out);
+ return this->castScalarToType(expressionId, ctorExpr.type(), type, out);
+}
+
+SpvId SPIRVCodeGenerator::writeConstructorCompoundCast(const ConstructorCompoundCast& c,
+ OutputStream& out) {
+ const Type& ctorType = c.type();
+ const Type& argType = c.argument()->type();
+ SkASSERT(ctorType.isVector() || ctorType.isMatrix());
+
+ // Write the composite that we are casting. If the actual type matches, we are done.
+ SpvId compositeId = this->writeExpression(*c.argument(), out);
+ if (ctorType.componentType().numberKind() == argType.componentType().numberKind()) {
+ return compositeId;
+ }
+
+ // writeMatrixCopy can cast matrices to a different type.
+ if (ctorType.isMatrix()) {
+ return this->writeMatrixCopy(compositeId, argType, ctorType, out);
+ }
+
+ // SPIR-V doesn't support vector(vector-of-different-type) directly, so we need to extract the
+ // components and convert each one manually.
+ const Type& srcType = argType.componentType();
+ const Type& dstType = ctorType.componentType();
+
+ SkSTArray<4, SpvId> arguments;
+ for (int index = 0; index < argType.columns(); ++index) {
+ SpvId componentId = this->writeOpCompositeExtract(srcType, compositeId, index, out);
+ arguments.push_back(this->castScalarToType(componentId, srcType, dstType, out));
+ }
+
+ return this->writeOpCompositeConstruct(ctorType, arguments, out);
+}
+
+SpvId SPIRVCodeGenerator::writeConstructorDiagonalMatrix(const ConstructorDiagonalMatrix& c,
+ OutputStream& out) {
+ const Type& type = c.type();
+ SkASSERT(type.isMatrix());
+ SkASSERT(c.argument()->type().isScalar());
+
+ // Write out the scalar argument.
+ SpvId diagonal = this->writeExpression(*c.argument(), out);
+
+ // Build the diagonal matrix.
+ SpvId zeroId = this->writeLiteral(0.0, *fContext.fTypes.fFloat);
+
+ const Type& vecType = type.componentType().toCompound(fContext,
+ /*columns=*/type.rows(),
+ /*rows=*/1);
+ SkSTArray<4, SpvId> columnIds;
+ SkSTArray<4, SpvId> arguments;
+ arguments.resize(type.rows());
+ for (int column = 0; column < type.columns(); column++) {
+ for (int row = 0; row < type.rows(); row++) {
+ arguments[row] = (row == column) ? diagonal : zeroId;
+ }
+ columnIds.push_back(this->writeOpCompositeConstruct(vecType, arguments, out));
+ }
+ return this->writeOpCompositeConstruct(type, columnIds, out);
+}
+
+SpvId SPIRVCodeGenerator::writeConstructorMatrixResize(const ConstructorMatrixResize& c,
+ OutputStream& out) {
+ // Write the input matrix.
+ SpvId argument = this->writeExpression(*c.argument(), out);
+
+ // Use matrix-copy to resize the input matrix to its new size.
+ return this->writeMatrixCopy(argument, c.argument()->type(), c.type(), out);
+}
+
+static SpvStorageClass_ get_storage_class_for_global_variable(
+ const Variable& var, SpvStorageClass_ fallbackStorageClass) {
+ SkASSERT(var.storage() == Variable::Storage::kGlobal);
+
+ const Modifiers& modifiers = var.modifiers();
+ if (modifiers.fFlags & Modifiers::kIn_Flag) {
+ SkASSERT(!(modifiers.fLayout.fFlags & Layout::kPushConstant_Flag));
+ return SpvStorageClassInput;
+ }
+ if (modifiers.fFlags & Modifiers::kOut_Flag) {
+ SkASSERT(!(modifiers.fLayout.fFlags & Layout::kPushConstant_Flag));
+ return SpvStorageClassOutput;
+ }
+ if (modifiers.fFlags & Modifiers::kUniform_Flag) {
+ if (modifiers.fLayout.fFlags & Layout::kPushConstant_Flag) {
+ return SpvStorageClassPushConstant;
+ }
+ if (var.type().typeKind() == Type::TypeKind::kSampler ||
+ var.type().typeKind() == Type::TypeKind::kSeparateSampler ||
+ var.type().typeKind() == Type::TypeKind::kTexture) {
+ return SpvStorageClassUniformConstant;
+ }
+ return SpvStorageClassUniform;
+ }
+ if (modifiers.fFlags & Modifiers::kBuffer_Flag) {
+ // Note: In SPIR-V 1.3, a storage buffer can be declared with the "StorageBuffer"
+ // storage class and the "Block" decoration and the <1.3 approach we use here ("Uniform"
+ // storage class and the "BufferBlock" decoration) is deprecated. Since we target SPIR-V
+ // 1.0, we have to use the deprecated approach which is well supported in Vulkan and
+ // addresses SkSL use cases (notably SkSL currently doesn't support pointer features that
+ // would benefit from SPV_KHR_variable_pointers capabilities).
+ return SpvStorageClassUniform;
+ }
+ return fallbackStorageClass;
+}
+
+static SpvStorageClass_ get_storage_class(const Expression& expr) {
+ switch (expr.kind()) {
+ case Expression::Kind::kVariableReference: {
+ const Variable& var = *expr.as<VariableReference>().variable();
+ if (var.storage() != Variable::Storage::kGlobal) {
+ return SpvStorageClassFunction;
+ }
+ return get_storage_class_for_global_variable(var, SpvStorageClassPrivate);
+ }
+ case Expression::Kind::kFieldAccess:
+ return get_storage_class(*expr.as<FieldAccess>().base());
+ case Expression::Kind::kIndex:
+ return get_storage_class(*expr.as<IndexExpression>().base());
+ default:
+ return SpvStorageClassFunction;
+ }
+}
+
+SkTArray<SpvId> SPIRVCodeGenerator::getAccessChain(const Expression& expr, OutputStream& out) {
+ switch (expr.kind()) {
+ case Expression::Kind::kIndex: {
+ const IndexExpression& indexExpr = expr.as<IndexExpression>();
+ if (indexExpr.base()->is<Swizzle>()) {
+ // Access chains don't directly support dynamically indexing into a swizzle, but we
+ // can rewrite them into a supported form.
+ return this->getAccessChain(*Transform::RewriteIndexedSwizzle(fContext, indexExpr),
+ out);
+ }
+ // All other index-expressions can be represented as typical access chains.
+ SkTArray<SpvId> chain = this->getAccessChain(*indexExpr.base(), out);
+ chain.push_back(this->writeExpression(*indexExpr.index(), out));
+ return chain;
+ }
+ case Expression::Kind::kFieldAccess: {
+ const FieldAccess& fieldExpr = expr.as<FieldAccess>();
+ SkTArray<SpvId> chain = this->getAccessChain(*fieldExpr.base(), out);
+ chain.push_back(this->writeLiteral(fieldExpr.fieldIndex(), *fContext.fTypes.fInt));
+ return chain;
+ }
+ default: {
+ SpvId id = this->getLValue(expr, out)->getPointer();
+ SkASSERT(id != NA);
+ return SkTArray<SpvId>{id};
+ }
+ }
+ SkUNREACHABLE;
+}
+
+class PointerLValue : public SPIRVCodeGenerator::LValue {
+public:
+ PointerLValue(SPIRVCodeGenerator& gen, SpvId pointer, bool isMemoryObject, SpvId type,
+ SPIRVCodeGenerator::Precision precision, SpvStorageClass_ storageClass)
+ : fGen(gen)
+ , fPointer(pointer)
+ , fIsMemoryObject(isMemoryObject)
+ , fType(type)
+ , fPrecision(precision)
+ , fStorageClass(storageClass) {}
+
+ SpvId getPointer() override {
+ return fPointer;
+ }
+
+ bool isMemoryObjectPointer() const override {
+ return fIsMemoryObject;
+ }
+
+ SpvId load(OutputStream& out) override {
+ return fGen.writeOpLoad(fType, fPrecision, fPointer, out);
+ }
+
+ void store(SpvId value, OutputStream& out) override {
+ if (!fIsMemoryObject) {
+ // We are going to write into an access chain; this could represent one component of a
+ // vector, or one element of an array. This has the potential to invalidate other,
+ // *unknown* elements of our store cache. (e.g. if the store cache holds `%50 = myVec4`,
+ // and we store `%60 = myVec4.z`, this invalidates the cached value for %50.) To avoid
+ // relying on stale data, reset the store cache entirely when this happens.
+ fGen.fStoreCache.reset();
+ }
+
+ fGen.writeOpStore(fStorageClass, fPointer, value, out);
+ }
+
+private:
+ SPIRVCodeGenerator& fGen;
+ const SpvId fPointer;
+ const bool fIsMemoryObject;
+ const SpvId fType;
+ const SPIRVCodeGenerator::Precision fPrecision;
+ const SpvStorageClass_ fStorageClass;
+};
+
+class SwizzleLValue : public SPIRVCodeGenerator::LValue {
+public:
+ SwizzleLValue(SPIRVCodeGenerator& gen, SpvId vecPointer, const ComponentArray& components,
+ const Type& baseType, const Type& swizzleType, SpvStorageClass_ storageClass)
+ : fGen(gen)
+ , fVecPointer(vecPointer)
+ , fComponents(components)
+ , fBaseType(&baseType)
+ , fSwizzleType(&swizzleType)
+ , fStorageClass(storageClass) {}
+
+ bool applySwizzle(const ComponentArray& components, const Type& newType) override {
+ ComponentArray updatedSwizzle;
+ for (int8_t component : components) {
+ if (component < 0 || component >= fComponents.size()) {
+ SkDEBUGFAILF("swizzle accessed nonexistent component %d", (int)component);
+ return false;
+ }
+ updatedSwizzle.push_back(fComponents[component]);
+ }
+ fComponents = updatedSwizzle;
+ fSwizzleType = &newType;
+ return true;
+ }
+
+ SpvId load(OutputStream& out) override {
+ SpvId base = fGen.nextId(fBaseType);
+ fGen.writeInstruction(SpvOpLoad, fGen.getType(*fBaseType), base, fVecPointer, out);
+ SpvId result = fGen.nextId(fBaseType);
+ fGen.writeOpCode(SpvOpVectorShuffle, 5 + (int32_t) fComponents.size(), out);
+ fGen.writeWord(fGen.getType(*fSwizzleType), out);
+ fGen.writeWord(result, out);
+ fGen.writeWord(base, out);
+ fGen.writeWord(base, out);
+ for (int component : fComponents) {
+ fGen.writeWord(component, out);
+ }
+ return result;
+ }
+
+ void store(SpvId value, OutputStream& out) override {
+ // use OpVectorShuffle to mix and match the vector components. We effectively create
+ // a virtual vector out of the concatenation of the left and right vectors, and then
+ // select components from this virtual vector to make the result vector. For
+ // instance, given:
+ // float3L = ...;
+ // float3R = ...;
+ // L.xz = R.xy;
+ // we end up with the virtual vector (L.x, L.y, L.z, R.x, R.y, R.z). Then we want
+ // our result vector to look like (R.x, L.y, R.y), so we need to select indices
+ // (3, 1, 4).
+ SpvId base = fGen.nextId(fBaseType);
+ fGen.writeInstruction(SpvOpLoad, fGen.getType(*fBaseType), base, fVecPointer, out);
+ SpvId shuffle = fGen.nextId(fBaseType);
+ fGen.writeOpCode(SpvOpVectorShuffle, 5 + fBaseType->columns(), out);
+ fGen.writeWord(fGen.getType(*fBaseType), out);
+ fGen.writeWord(shuffle, out);
+ fGen.writeWord(base, out);
+ fGen.writeWord(value, out);
+ for (int i = 0; i < fBaseType->columns(); i++) {
+ // current offset into the virtual vector, defaults to pulling the unmodified
+ // value from the left side
+ int offset = i;
+ // check to see if we are writing this component
+ for (int j = 0; j < fComponents.size(); j++) {
+ if (fComponents[j] == i) {
+ // we're writing to this component, so adjust the offset to pull from
+ // the correct component of the right side instead of preserving the
+ // value from the left
+ offset = (int) (j + fBaseType->columns());
+ break;
+ }
+ }
+ fGen.writeWord(offset, out);
+ }
+ fGen.writeOpStore(fStorageClass, fVecPointer, shuffle, out);
+ }
+
+private:
+ SPIRVCodeGenerator& fGen;
+ const SpvId fVecPointer;
+ ComponentArray fComponents;
+ const Type* fBaseType;
+ const Type* fSwizzleType;
+ const SpvStorageClass_ fStorageClass;
+};
+
+int SPIRVCodeGenerator::findUniformFieldIndex(const Variable& var) const {
+ int* fieldIndex = fTopLevelUniformMap.find(&var);
+ return fieldIndex ? *fieldIndex : -1;
+}
+
+std::unique_ptr<SPIRVCodeGenerator::LValue> SPIRVCodeGenerator::getLValue(const Expression& expr,
+ OutputStream& out) {
+ const Type& type = expr.type();
+ Precision precision = type.highPrecision() ? Precision::kDefault : Precision::kRelaxed;
+ switch (expr.kind()) {
+ case Expression::Kind::kVariableReference: {
+ const Variable& var = *expr.as<VariableReference>().variable();
+ int uniformIdx = this->findUniformFieldIndex(var);
+ if (uniformIdx >= 0) {
+ SpvId memberId = this->nextId(nullptr);
+ SpvId typeId = this->getPointerType(type, SpvStorageClassUniform);
+ SpvId uniformIdxId = this->writeLiteral((double)uniformIdx, *fContext.fTypes.fInt);
+ this->writeInstruction(SpvOpAccessChain, typeId, memberId, fUniformBufferId,
+ uniformIdxId, out);
+ return std::make_unique<PointerLValue>(
+ *this,
+ memberId,
+ /*isMemoryObjectPointer=*/true,
+ this->getType(type, this->memoryLayoutForVariable(var)),
+ precision,
+ SpvStorageClassUniform);
+ }
+ SpvId typeId = this->getType(type, this->memoryLayoutForVariable(var));
+ SpvId* entry = fVariableMap.find(&var);
+ SkASSERTF(entry, "%s", expr.description().c_str());
+ return std::make_unique<PointerLValue>(*this, *entry,
+ /*isMemoryObjectPointer=*/true,
+ typeId, precision, get_storage_class(expr));
+ }
+ case Expression::Kind::kIndex: // fall through
+ case Expression::Kind::kFieldAccess: {
+ SkTArray<SpvId> chain = this->getAccessChain(expr, out);
+ SpvId member = this->nextId(nullptr);
+ SpvStorageClass_ storageClass = get_storage_class(expr);
+ this->writeOpCode(SpvOpAccessChain, (SpvId) (3 + chain.size()), out);
+ this->writeWord(this->getPointerType(type, storageClass), out);
+ this->writeWord(member, out);
+ for (SpvId idx : chain) {
+ this->writeWord(idx, out);
+ }
+ return std::make_unique<PointerLValue>(
+ *this,
+ member,
+ /*isMemoryObjectPointer=*/false,
+ this->getType(type, this->memoryLayoutForStorageClass(storageClass)),
+ precision,
+ storageClass);
+ }
+ case Expression::Kind::kSwizzle: {
+ const Swizzle& swizzle = expr.as<Swizzle>();
+ std::unique_ptr<LValue> lvalue = this->getLValue(*swizzle.base(), out);
+ if (lvalue->applySwizzle(swizzle.components(), type)) {
+ return lvalue;
+ }
+ SpvId base = lvalue->getPointer();
+ if (base == NA) {
+ fContext.fErrors->error(swizzle.fPosition,
+ "unable to retrieve lvalue from swizzle");
+ }
+ SpvStorageClass_ storageClass = get_storage_class(*swizzle.base());
+ if (swizzle.components().size() == 1) {
+ SpvId member = this->nextId(nullptr);
+ SpvId typeId = this->getPointerType(type, storageClass);
+ SpvId indexId = this->writeLiteral(swizzle.components()[0], *fContext.fTypes.fInt);
+ this->writeInstruction(SpvOpAccessChain, typeId, member, base, indexId, out);
+ return std::make_unique<PointerLValue>(*this, member,
+ /*isMemoryObjectPointer=*/false,
+ this->getType(type),
+ precision, storageClass);
+ } else {
+ return std::make_unique<SwizzleLValue>(*this, base, swizzle.components(),
+ swizzle.base()->type(), type, storageClass);
+ }
+ }
+ default: {
+ // expr isn't actually an lvalue, create a placeholder variable for it. This case
+ // happens due to the need to store values in temporary variables during function
+ // calls (see comments in getFunctionParameterType); erroneous uses of rvalues as
+ // lvalues should have been caught before code generation.
+ //
+ // This is with the exception of opaque handle types (textures/samplers) which are
+ // always defined as UniformConstant pointers and don't need to be explicitly stored
+ // into a temporary (which is handled explicitly in writeFunctionCallArgument).
+ SpvId result = this->nextId(nullptr);
+ SpvId pointerType = this->getPointerType(type, SpvStorageClassFunction);
+ this->writeInstruction(SpvOpVariable, pointerType, result, SpvStorageClassFunction,
+ fVariableBuffer);
+ this->writeOpStore(SpvStorageClassFunction, result, this->writeExpression(expr, out),
+ out);
+ return std::make_unique<PointerLValue>(*this, result, /*isMemoryObjectPointer=*/true,
+ this->getType(type), precision,
+ SpvStorageClassFunction);
+ }
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeVariableReference(const VariableReference& ref, OutputStream& out) {
+ const Variable* variable = ref.variable();
+ switch (variable->modifiers().fLayout.fBuiltin) {
+ case DEVICE_FRAGCOORDS_BUILTIN: {
+ // Down below, we rewrite raw references to sk_FragCoord with expressions that reference
+ // DEVICE_FRAGCOORDS_BUILTIN. This is a fake variable that means we need to directly
+ // access the fragcoord; do so now.
+ dsl::DSLGlobalVar fragCoord("sk_FragCoord");
+ return this->getLValue(*dsl::DSLExpression(fragCoord).release(), out)->load(out);
+ }
+ case DEVICE_CLOCKWISE_BUILTIN: {
+ // Down below, we rewrite raw references to sk_Clockwise with expressions that reference
+ // DEVICE_CLOCKWISE_BUILTIN. This is a fake variable that means we need to directly
+ // access front facing; do so now.
+ dsl::DSLGlobalVar clockwise("sk_Clockwise");
+ return this->getLValue(*dsl::DSLExpression(clockwise).release(), out)->load(out);
+ }
+ case SK_SECONDARYFRAGCOLOR_BUILTIN: {
+ // sk_SecondaryFragColor corresponds to gl_SecondaryFragColorEXT, which isn't supposed
+ // to appear in a SPIR-V program (it's only valid in ES2). Report an error.
+ fContext.fErrors->error(ref.fPosition,
+ "sk_SecondaryFragColor is not allowed in SPIR-V");
+ return NA;
+ }
+ case SK_FRAGCOORD_BUILTIN: {
+ if (fProgram.fConfig->fSettings.fForceNoRTFlip) {
+ dsl::DSLGlobalVar fragCoord("sk_FragCoord");
+ return this->getLValue(*dsl::DSLExpression(fragCoord).release(), out)->load(out);
+ }
+
+ // Handle inserting use of uniform to flip y when referencing sk_FragCoord.
+ this->addRTFlipUniform(ref.fPosition);
+ // Use sk_RTAdjust to compute the flipped coordinate
+ using namespace dsl;
+ const char* DEVICE_COORDS_NAME = "$device_FragCoords";
+ SymbolTable& symbols = *ThreadContext::SymbolTable();
+ // Use a uniform to flip the Y coordinate. The new expression will be written in
+ // terms of $device_FragCoords, which is a fake variable that means "access the
+ // underlying fragcoords directly without flipping it".
+ DSLExpression rtFlip(ThreadContext::Compiler().convertIdentifier(Position(),
+ SKSL_RTFLIP_NAME));
+ if (!symbols.find(DEVICE_COORDS_NAME)) {
+ AutoAttachPoolToThread attach(fProgram.fPool.get());
+ Modifiers modifiers;
+ modifiers.fLayout.fBuiltin = DEVICE_FRAGCOORDS_BUILTIN;
+ auto coordsVar = std::make_unique<Variable>(/*pos=*/Position(),
+ /*modifiersPosition=*/Position(),
+ fContext.fModifiersPool->add(modifiers),
+ DEVICE_COORDS_NAME,
+ fContext.fTypes.fFloat4.get(),
+ /*builtin=*/true,
+ Variable::Storage::kGlobal);
+ fSPIRVBonusVariables.add(coordsVar.get());
+ symbols.add(std::move(coordsVar));
+ }
+ DSLGlobalVar deviceCoord(DEVICE_COORDS_NAME);
+ std::unique_ptr<Expression> rtFlipSkSLExpr = rtFlip.release();
+ DSLExpression x = DSLExpression(rtFlipSkSLExpr->clone()).x();
+ DSLExpression y = DSLExpression(std::move(rtFlipSkSLExpr)).y();
+ return this->writeExpression(*dsl::Float4(deviceCoord.x(),
+ std::move(x) + std::move(y) * deviceCoord.y(),
+ deviceCoord.z(),
+ deviceCoord.w()).release(),
+ out);
+ }
+ case SK_CLOCKWISE_BUILTIN: {
+ if (fProgram.fConfig->fSettings.fForceNoRTFlip) {
+ dsl::DSLGlobalVar clockwise("sk_Clockwise");
+ return this->getLValue(*dsl::DSLExpression(clockwise).release(), out)->load(out);
+ }
+
+ // Handle flipping sk_Clockwise.
+ this->addRTFlipUniform(ref.fPosition);
+ using namespace dsl;
+ const char* DEVICE_CLOCKWISE_NAME = "$device_Clockwise";
+ SymbolTable& symbols = *ThreadContext::SymbolTable();
+ // Use a uniform to flip the Y coordinate. The new expression will be written in
+ // terms of $device_Clockwise, which is a fake variable that means "access the
+ // underlying FrontFacing directly".
+ DSLExpression rtFlip(ThreadContext::Compiler().convertIdentifier(Position(),
+ SKSL_RTFLIP_NAME));
+ if (!symbols.find(DEVICE_CLOCKWISE_NAME)) {
+ AutoAttachPoolToThread attach(fProgram.fPool.get());
+ Modifiers modifiers;
+ modifiers.fLayout.fBuiltin = DEVICE_CLOCKWISE_BUILTIN;
+ auto clockwiseVar = std::make_unique<Variable>(/*pos=*/Position(),
+ /*modifiersPosition=*/Position(),
+ fContext.fModifiersPool->add(modifiers),
+ DEVICE_CLOCKWISE_NAME,
+ fContext.fTypes.fBool.get(),
+ /*builtin=*/true,
+ Variable::Storage::kGlobal);
+ fSPIRVBonusVariables.add(clockwiseVar.get());
+ symbols.add(std::move(clockwiseVar));
+ }
+ DSLGlobalVar deviceClockwise(DEVICE_CLOCKWISE_NAME);
+ // FrontFacing in Vulkan is defined in terms of a top-down render target. In skia,
+ // we use the default convention of "counter-clockwise face is front".
+ return this->writeExpression(*dsl::Bool(Select(rtFlip.y() > 0,
+ !deviceClockwise,
+ deviceClockwise)).release(),
+ out);
+ }
+ default: {
+ // Constant-propagate variables that have a known compile-time value.
+ if (const Expression* expr = ConstantFolder::GetConstantValueOrNullForVariable(ref)) {
+ return this->writeExpression(*expr, out);
+ }
+
+ // A reference to a sampler variable at global scope with synthesized texture/sampler
+ // backing should construct a function-scope combined image-sampler from the synthesized
+ // constituents. This is the case in which a sample intrinsic was invoked.
+ //
+ // Variable references to opaque handles (texture/sampler) that appear as the argument
+ // of a user-defined function call are explicitly handled in writeFunctionCallArgument.
+ if (const auto* p = fSynthesizedSamplerMap.find(variable)) {
+ SkASSERT(fProgram.fConfig->fSettings.fSPIRVDawnCompatMode);
+
+ SpvId* imgPtr = fVariableMap.find((*p)->fTexture.get());
+ SpvId* samplerPtr = fVariableMap.find((*p)->fSampler.get());
+ SkASSERT(imgPtr);
+ SkASSERT(samplerPtr);
+
+ SpvId img = this->writeOpLoad(
+ this->getType((*p)->fTexture->type()), Precision::kDefault, *imgPtr, out);
+ SpvId sampler = this->writeOpLoad(this->getType((*p)->fSampler->type()),
+ Precision::kDefault,
+ *samplerPtr,
+ out);
+
+ SpvId result = this->nextId(nullptr);
+ this->writeInstruction(SpvOpSampledImage,
+ this->getType(variable->type()),
+ result,
+ img,
+ sampler,
+ out);
+
+ return result;
+ }
+
+ return this->getLValue(ref, out)->load(out);
+ }
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeIndexExpression(const IndexExpression& expr, OutputStream& out) {
+ if (expr.base()->type().isVector()) {
+ SpvId base = this->writeExpression(*expr.base(), out);
+ SpvId index = this->writeExpression(*expr.index(), out);
+ SpvId result = this->nextId(nullptr);
+ this->writeInstruction(SpvOpVectorExtractDynamic, this->getType(expr.type()), result, base,
+ index, out);
+ return result;
+ }
+ return getLValue(expr, out)->load(out);
+}
+
+SpvId SPIRVCodeGenerator::writeFieldAccess(const FieldAccess& f, OutputStream& out) {
+ return getLValue(f, out)->load(out);
+}
+
+SpvId SPIRVCodeGenerator::writeSwizzle(const Swizzle& swizzle, OutputStream& out) {
+ SpvId base = this->writeExpression(*swizzle.base(), out);
+ size_t count = swizzle.components().size();
+ if (count == 1) {
+ return this->writeOpCompositeExtract(swizzle.type(), base, swizzle.components()[0], out);
+ }
+
+ SpvId result = this->nextId(&swizzle.type());
+ this->writeOpCode(SpvOpVectorShuffle, 5 + (int32_t) count, out);
+ this->writeWord(this->getType(swizzle.type()), out);
+ this->writeWord(result, out);
+ this->writeWord(base, out);
+ this->writeWord(base, out);
+ for (int component : swizzle.components()) {
+ this->writeWord(component, out);
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeBinaryOperation(const Type& resultType,
+ const Type& operandType, SpvId lhs,
+ SpvId rhs, SpvOp_ ifFloat, SpvOp_ ifInt,
+ SpvOp_ ifUInt, SpvOp_ ifBool, OutputStream& out) {
+ SpvId result = this->nextId(&resultType);
+ SpvOp_ op = pick_by_type(operandType, ifFloat, ifInt, ifUInt, ifBool);
+ if (op == SpvOpUndef) {
+ fContext.fErrors->error(operandType.fPosition,
+ "unsupported operand for binary expression: " + operandType.description());
+ return NA;
+ }
+ this->writeInstruction(op, this->getType(resultType), result, lhs, rhs, out);
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::foldToBool(SpvId id, const Type& operandType, SpvOp op,
+ OutputStream& out) {
+ if (operandType.isVector()) {
+ SpvId result = this->nextId(nullptr);
+ this->writeInstruction(op, this->getType(*fContext.fTypes.fBool), result, id, out);
+ return result;
+ }
+ return id;
+}
+
+SpvId SPIRVCodeGenerator::writeMatrixComparison(const Type& operandType, SpvId lhs, SpvId rhs,
+ SpvOp_ floatOperator, SpvOp_ intOperator,
+ SpvOp_ vectorMergeOperator, SpvOp_ mergeOperator,
+ OutputStream& out) {
+ SpvOp_ compareOp = is_float(operandType) ? floatOperator : intOperator;
+ SkASSERT(operandType.isMatrix());
+ const Type& columnType = operandType.componentType().toCompound(fContext,
+ operandType.rows(),
+ 1);
+ SpvId bvecType = this->getType(fContext.fTypes.fBool->toCompound(fContext,
+ operandType.rows(),
+ 1));
+ SpvId boolType = this->getType(*fContext.fTypes.fBool);
+ SpvId result = 0;
+ for (int i = 0; i < operandType.columns(); i++) {
+ SpvId columnL = this->writeOpCompositeExtract(columnType, lhs, i, out);
+ SpvId columnR = this->writeOpCompositeExtract(columnType, rhs, i, out);
+ SpvId compare = this->nextId(&operandType);
+ this->writeInstruction(compareOp, bvecType, compare, columnL, columnR, out);
+ SpvId merge = this->nextId(nullptr);
+ this->writeInstruction(vectorMergeOperator, boolType, merge, compare, out);
+ if (result != 0) {
+ SpvId next = this->nextId(nullptr);
+ this->writeInstruction(mergeOperator, boolType, next, result, merge, out);
+ result = next;
+ } else {
+ result = merge;
+ }
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeComponentwiseMatrixUnary(const Type& operandType,
+ SpvId operand,
+ SpvOp_ op,
+ OutputStream& out) {
+ SkASSERT(operandType.isMatrix());
+ const Type& columnType = operandType.componentType().toCompound(fContext,
+ /*columns=*/operandType.rows(),
+ /*rows=*/1);
+ SpvId columnTypeId = this->getType(columnType);
+
+ SkSTArray<4, SpvId> columns;
+ for (int i = 0; i < operandType.columns(); i++) {
+ SpvId srcColumn = this->writeOpCompositeExtract(columnType, operand, i, out);
+ SpvId dstColumn = this->nextId(&operandType);
+ this->writeInstruction(op, columnTypeId, dstColumn, srcColumn, out);
+ columns.push_back(dstColumn);
+ }
+
+ return this->writeOpCompositeConstruct(operandType, columns, out);
+}
+
+SpvId SPIRVCodeGenerator::writeComponentwiseMatrixBinary(const Type& operandType, SpvId lhs,
+ SpvId rhs, SpvOp_ op, OutputStream& out) {
+ SkASSERT(operandType.isMatrix());
+ const Type& columnType = operandType.componentType().toCompound(fContext,
+ /*columns=*/operandType.rows(),
+ /*rows=*/1);
+ SpvId columnTypeId = this->getType(columnType);
+
+ SkSTArray<4, SpvId> columns;
+ for (int i = 0; i < operandType.columns(); i++) {
+ SpvId columnL = this->writeOpCompositeExtract(columnType, lhs, i, out);
+ SpvId columnR = this->writeOpCompositeExtract(columnType, rhs, i, out);
+ columns.push_back(this->nextId(&operandType));
+ this->writeInstruction(op, columnTypeId, columns[i], columnL, columnR, out);
+ }
+ return this->writeOpCompositeConstruct(operandType, columns, out);
+}
+
+SpvId SPIRVCodeGenerator::writeReciprocal(const Type& type, SpvId value, OutputStream& out) {
+ SkASSERT(type.isFloat());
+ SpvId one = this->writeLiteral(1.0, type);
+ SpvId reciprocal = this->nextId(&type);
+ this->writeInstruction(SpvOpFDiv, this->getType(type), reciprocal, one, value, out);
+ return reciprocal;
+}
+
+SpvId SPIRVCodeGenerator::writeScalarToMatrixSplat(const Type& matrixType,
+ SpvId scalarId,
+ OutputStream& out) {
+ // Splat the scalar into a vector.
+ const Type& vectorType = matrixType.componentType().toCompound(fContext,
+ /*columns=*/matrixType.rows(),
+ /*rows=*/1);
+ SkSTArray<4, SpvId> vecArguments;
+ vecArguments.push_back_n(/*n=*/matrixType.rows(), /*t=*/scalarId);
+ SpvId vectorId = this->writeOpCompositeConstruct(vectorType, vecArguments, out);
+
+ // Splat the vector into a matrix.
+ SkSTArray<4, SpvId> matArguments;
+ matArguments.push_back_n(/*n=*/matrixType.columns(), /*t=*/vectorId);
+ return this->writeOpCompositeConstruct(matrixType, matArguments, out);
+}
+
+static bool types_match(const Type& a, const Type& b) {
+ if (a.matches(b)) {
+ return true;
+ }
+ return (a.typeKind() == b.typeKind()) &&
+ (a.isScalar() || a.isVector() || a.isMatrix()) &&
+ (a.columns() == b.columns() && a.rows() == b.rows()) &&
+ a.componentType().numberKind() == b.componentType().numberKind();
+}
+
+SpvId SPIRVCodeGenerator::writeBinaryExpression(const Type& leftType, SpvId lhs, Operator op,
+ const Type& rightType, SpvId rhs,
+ const Type& resultType, OutputStream& out) {
+ // The comma operator ignores the type of the left-hand side entirely.
+ if (op.kind() == Operator::Kind::COMMA) {
+ return rhs;
+ }
+ // overall type we are operating on: float2, int, uint4...
+ const Type* operandType;
+ if (types_match(leftType, rightType)) {
+ operandType = &leftType;
+ } else {
+ // IR allows mismatched types in expressions (e.g. float2 * float), but they need special
+ // handling in SPIR-V
+ if (leftType.isVector() && rightType.isNumber()) {
+ if (resultType.componentType().isFloat()) {
+ switch (op.kind()) {
+ case Operator::Kind::SLASH: {
+ rhs = this->writeReciprocal(rightType, rhs, out);
+ [[fallthrough]];
+ }
+ case Operator::Kind::STAR: {
+ SpvId result = this->nextId(&resultType);
+ this->writeInstruction(SpvOpVectorTimesScalar, this->getType(resultType),
+ result, lhs, rhs, out);
+ return result;
+ }
+ default:
+ break;
+ }
+ }
+ // Vectorize the right-hand side.
+ SkSTArray<4, SpvId> arguments;
+ arguments.push_back_n(/*n=*/leftType.columns(), /*t=*/rhs);
+ rhs = this->writeOpCompositeConstruct(leftType, arguments, out);
+ operandType = &leftType;
+ } else if (rightType.isVector() && leftType.isNumber()) {
+ if (resultType.componentType().isFloat()) {
+ if (op.kind() == Operator::Kind::STAR) {
+ SpvId result = this->nextId(&resultType);
+ this->writeInstruction(SpvOpVectorTimesScalar, this->getType(resultType),
+ result, rhs, lhs, out);
+ return result;
+ }
+ }
+ // Vectorize the left-hand side.
+ SkSTArray<4, SpvId> arguments;
+ arguments.push_back_n(/*n=*/rightType.columns(), /*t=*/lhs);
+ lhs = this->writeOpCompositeConstruct(rightType, arguments, out);
+ operandType = &rightType;
+ } else if (leftType.isMatrix()) {
+ if (op.kind() == Operator::Kind::STAR) {
+ // Matrix-times-vector and matrix-times-scalar have dedicated ops in SPIR-V.
+ SpvOp_ spvop;
+ if (rightType.isMatrix()) {
+ spvop = SpvOpMatrixTimesMatrix;
+ } else if (rightType.isVector()) {
+ spvop = SpvOpMatrixTimesVector;
+ } else {
+ SkASSERT(rightType.isScalar());
+ spvop = SpvOpMatrixTimesScalar;
+ }
+ SpvId result = this->nextId(&resultType);
+ this->writeInstruction(spvop, this->getType(resultType), result, lhs, rhs, out);
+ return result;
+ } else {
+ // Matrix-op-vector is not supported in GLSL/SkSL for non-multiplication ops; we
+ // expect to have a scalar here.
+ SkASSERT(rightType.isScalar());
+
+ // Splat rhs across an entire matrix so we can reuse the matrix-op-matrix path.
+ SpvId rhsMatrix = this->writeScalarToMatrixSplat(leftType, rhs, out);
+
+ // Perform this operation as matrix-op-matrix.
+ return this->writeBinaryExpression(leftType, lhs, op, leftType, rhsMatrix,
+ resultType, out);
+ }
+ } else if (rightType.isMatrix()) {
+ if (op.kind() == Operator::Kind::STAR) {
+ // Matrix-times-vector and matrix-times-scalar have dedicated ops in SPIR-V.
+ SpvId result = this->nextId(&resultType);
+ if (leftType.isVector()) {
+ this->writeInstruction(SpvOpVectorTimesMatrix, this->getType(resultType),
+ result, lhs, rhs, out);
+ } else {
+ SkASSERT(leftType.isScalar());
+ this->writeInstruction(SpvOpMatrixTimesScalar, this->getType(resultType),
+ result, rhs, lhs, out);
+ }
+ return result;
+ } else {
+ // Vector-op-matrix is not supported in GLSL/SkSL for non-multiplication ops; we
+ // expect to have a scalar here.
+ SkASSERT(leftType.isScalar());
+
+ // Splat lhs across an entire matrix so we can reuse the matrix-op-matrix path.
+ SpvId lhsMatrix = this->writeScalarToMatrixSplat(rightType, lhs, out);
+
+ // Perform this operation as matrix-op-matrix.
+ return this->writeBinaryExpression(rightType, lhsMatrix, op, rightType, rhs,
+ resultType, out);
+ }
+ } else {
+ fContext.fErrors->error(leftType.fPosition, "unsupported mixed-type expression");
+ return NA;
+ }
+ }
+
+ switch (op.kind()) {
+ case Operator::Kind::EQEQ: {
+ if (operandType->isMatrix()) {
+ return this->writeMatrixComparison(*operandType, lhs, rhs, SpvOpFOrdEqual,
+ SpvOpIEqual, SpvOpAll, SpvOpLogicalAnd, out);
+ }
+ if (operandType->isStruct()) {
+ return this->writeStructComparison(*operandType, lhs, op, rhs, out);
+ }
+ if (operandType->isArray()) {
+ return this->writeArrayComparison(*operandType, lhs, op, rhs, out);
+ }
+ SkASSERT(resultType.isBoolean());
+ const Type* tmpType;
+ if (operandType->isVector()) {
+ tmpType = &fContext.fTypes.fBool->toCompound(fContext,
+ operandType->columns(),
+ operandType->rows());
+ } else {
+ tmpType = &resultType;
+ }
+ if (lhs == rhs) {
+ // This ignores the effects of NaN.
+ return this->writeOpConstantTrue(*fContext.fTypes.fBool);
+ }
+ return this->foldToBool(this->writeBinaryOperation(*tmpType, *operandType, lhs, rhs,
+ SpvOpFOrdEqual, SpvOpIEqual,
+ SpvOpIEqual, SpvOpLogicalEqual, out),
+ *operandType, SpvOpAll, out);
+ }
+ case Operator::Kind::NEQ:
+ if (operandType->isMatrix()) {
+ return this->writeMatrixComparison(*operandType, lhs, rhs, SpvOpFUnordNotEqual,
+ SpvOpINotEqual, SpvOpAny, SpvOpLogicalOr, out);
+ }
+ if (operandType->isStruct()) {
+ return this->writeStructComparison(*operandType, lhs, op, rhs, out);
+ }
+ if (operandType->isArray()) {
+ return this->writeArrayComparison(*operandType, lhs, op, rhs, out);
+ }
+ [[fallthrough]];
+ case Operator::Kind::LOGICALXOR:
+ SkASSERT(resultType.isBoolean());
+ const Type* tmpType;
+ if (operandType->isVector()) {
+ tmpType = &fContext.fTypes.fBool->toCompound(fContext,
+ operandType->columns(),
+ operandType->rows());
+ } else {
+ tmpType = &resultType;
+ }
+ if (lhs == rhs) {
+ // This ignores the effects of NaN.
+ return this->writeOpConstantFalse(*fContext.fTypes.fBool);
+ }
+ return this->foldToBool(this->writeBinaryOperation(*tmpType, *operandType, lhs, rhs,
+ SpvOpFUnordNotEqual, SpvOpINotEqual,
+ SpvOpINotEqual, SpvOpLogicalNotEqual,
+ out),
+ *operandType, SpvOpAny, out);
+ case Operator::Kind::GT:
+ SkASSERT(resultType.isBoolean());
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs,
+ SpvOpFOrdGreaterThan, SpvOpSGreaterThan,
+ SpvOpUGreaterThan, SpvOpUndef, out);
+ case Operator::Kind::LT:
+ SkASSERT(resultType.isBoolean());
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFOrdLessThan,
+ SpvOpSLessThan, SpvOpULessThan, SpvOpUndef, out);
+ case Operator::Kind::GTEQ:
+ SkASSERT(resultType.isBoolean());
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs,
+ SpvOpFOrdGreaterThanEqual, SpvOpSGreaterThanEqual,
+ SpvOpUGreaterThanEqual, SpvOpUndef, out);
+ case Operator::Kind::LTEQ:
+ SkASSERT(resultType.isBoolean());
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs,
+ SpvOpFOrdLessThanEqual, SpvOpSLessThanEqual,
+ SpvOpULessThanEqual, SpvOpUndef, out);
+ case Operator::Kind::PLUS:
+ if (leftType.isMatrix() && rightType.isMatrix()) {
+ SkASSERT(leftType.matches(rightType));
+ return this->writeComponentwiseMatrixBinary(leftType, lhs, rhs, SpvOpFAdd, out);
+ }
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFAdd,
+ SpvOpIAdd, SpvOpIAdd, SpvOpUndef, out);
+ case Operator::Kind::MINUS:
+ if (leftType.isMatrix() && rightType.isMatrix()) {
+ SkASSERT(leftType.matches(rightType));
+ return this->writeComponentwiseMatrixBinary(leftType, lhs, rhs, SpvOpFSub, out);
+ }
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFSub,
+ SpvOpISub, SpvOpISub, SpvOpUndef, out);
+ case Operator::Kind::STAR:
+ if (leftType.isMatrix() && rightType.isMatrix()) {
+ // matrix multiply
+ SpvId result = this->nextId(&resultType);
+ this->writeInstruction(SpvOpMatrixTimesMatrix, this->getType(resultType), result,
+ lhs, rhs, out);
+ return result;
+ }
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFMul,
+ SpvOpIMul, SpvOpIMul, SpvOpUndef, out);
+ case Operator::Kind::SLASH:
+ if (leftType.isMatrix() && rightType.isMatrix()) {
+ SkASSERT(leftType.matches(rightType));
+ return this->writeComponentwiseMatrixBinary(leftType, lhs, rhs, SpvOpFDiv, out);
+ }
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFDiv,
+ SpvOpSDiv, SpvOpUDiv, SpvOpUndef, out);
+ case Operator::Kind::PERCENT:
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpFMod,
+ SpvOpSMod, SpvOpUMod, SpvOpUndef, out);
+ case Operator::Kind::SHL:
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpUndef,
+ SpvOpShiftLeftLogical, SpvOpShiftLeftLogical,
+ SpvOpUndef, out);
+ case Operator::Kind::SHR:
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpUndef,
+ SpvOpShiftRightArithmetic, SpvOpShiftRightLogical,
+ SpvOpUndef, out);
+ case Operator::Kind::BITWISEAND:
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpUndef,
+ SpvOpBitwiseAnd, SpvOpBitwiseAnd, SpvOpUndef, out);
+ case Operator::Kind::BITWISEOR:
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpUndef,
+ SpvOpBitwiseOr, SpvOpBitwiseOr, SpvOpUndef, out);
+ case Operator::Kind::BITWISEXOR:
+ return this->writeBinaryOperation(resultType, *operandType, lhs, rhs, SpvOpUndef,
+ SpvOpBitwiseXor, SpvOpBitwiseXor, SpvOpUndef, out);
+ default:
+ fContext.fErrors->error(Position(), "unsupported token");
+ return NA;
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeArrayComparison(const Type& arrayType, SpvId lhs, Operator op,
+ SpvId rhs, OutputStream& out) {
+ // The inputs must be arrays, and the op must be == or !=.
+ SkASSERT(op.kind() == Operator::Kind::EQEQ || op.kind() == Operator::Kind::NEQ);
+ SkASSERT(arrayType.isArray());
+ const Type& componentType = arrayType.componentType();
+ const int arraySize = arrayType.columns();
+ SkASSERT(arraySize > 0);
+
+ // Synthesize equality checks for each item in the array.
+ const Type& boolType = *fContext.fTypes.fBool;
+ SpvId allComparisons = NA;
+ for (int index = 0; index < arraySize; ++index) {
+ // Get the left and right item in the array.
+ SpvId itemL = this->writeOpCompositeExtract(componentType, lhs, index, out);
+ SpvId itemR = this->writeOpCompositeExtract(componentType, rhs, index, out);
+ // Use `writeBinaryExpression` with the requested == or != operator on these items.
+ SpvId comparison = this->writeBinaryExpression(componentType, itemL, op,
+ componentType, itemR, boolType, out);
+ // Merge this comparison result with all the other comparisons we've done.
+ allComparisons = this->mergeComparisons(comparison, allComparisons, op, out);
+ }
+ return allComparisons;
+}
+
+SpvId SPIRVCodeGenerator::writeStructComparison(const Type& structType, SpvId lhs, Operator op,
+ SpvId rhs, OutputStream& out) {
+ // The inputs must be structs containing fields, and the op must be == or !=.
+ SkASSERT(op.kind() == Operator::Kind::EQEQ || op.kind() == Operator::Kind::NEQ);
+ SkASSERT(structType.isStruct());
+ const std::vector<Type::Field>& fields = structType.fields();
+ SkASSERT(!fields.empty());
+
+ // Synthesize equality checks for each field in the struct.
+ const Type& boolType = *fContext.fTypes.fBool;
+ SpvId allComparisons = NA;
+ for (int index = 0; index < (int)fields.size(); ++index) {
+ // Get the left and right versions of this field.
+ const Type& fieldType = *fields[index].fType;
+
+ SpvId fieldL = this->writeOpCompositeExtract(fieldType, lhs, index, out);
+ SpvId fieldR = this->writeOpCompositeExtract(fieldType, rhs, index, out);
+ // Use `writeBinaryExpression` with the requested == or != operator on these fields.
+ SpvId comparison = this->writeBinaryExpression(fieldType, fieldL, op, fieldType, fieldR,
+ boolType, out);
+ // Merge this comparison result with all the other comparisons we've done.
+ allComparisons = this->mergeComparisons(comparison, allComparisons, op, out);
+ }
+ return allComparisons;
+}
+
+SpvId SPIRVCodeGenerator::mergeComparisons(SpvId comparison, SpvId allComparisons, Operator op,
+ OutputStream& out) {
+ // If this is the first entry, we don't need to merge comparison results with anything.
+ if (allComparisons == NA) {
+ return comparison;
+ }
+ // Use LogicalAnd or LogicalOr to combine the comparison with all the other comparisons.
+ const Type& boolType = *fContext.fTypes.fBool;
+ SpvId boolTypeId = this->getType(boolType);
+ SpvId logicalOp = this->nextId(&boolType);
+ switch (op.kind()) {
+ case Operator::Kind::EQEQ:
+ this->writeInstruction(SpvOpLogicalAnd, boolTypeId, logicalOp,
+ comparison, allComparisons, out);
+ break;
+ case Operator::Kind::NEQ:
+ this->writeInstruction(SpvOpLogicalOr, boolTypeId, logicalOp,
+ comparison, allComparisons, out);
+ break;
+ default:
+ SkDEBUGFAILF("mergeComparisons only supports == and !=, not %s", op.operatorName());
+ return NA;
+ }
+ return logicalOp;
+}
+
+SpvId SPIRVCodeGenerator::writeBinaryExpression(const BinaryExpression& b, OutputStream& out) {
+ const Expression* left = b.left().get();
+ const Expression* right = b.right().get();
+ Operator op = b.getOperator();
+
+ switch (op.kind()) {
+ case Operator::Kind::EQ: {
+ // Handles assignment.
+ SpvId rhs = this->writeExpression(*right, out);
+ this->getLValue(*left, out)->store(rhs, out);
+ return rhs;
+ }
+ case Operator::Kind::LOGICALAND:
+ // Handles short-circuiting; we don't necessarily evaluate both LHS and RHS.
+ return this->writeLogicalAnd(*b.left(), *b.right(), out);
+
+ case Operator::Kind::LOGICALOR:
+ // Handles short-circuiting; we don't necessarily evaluate both LHS and RHS.
+ return this->writeLogicalOr(*b.left(), *b.right(), out);
+
+ default:
+ break;
+ }
+
+ std::unique_ptr<LValue> lvalue;
+ SpvId lhs;
+ if (op.isAssignment()) {
+ lvalue = this->getLValue(*left, out);
+ lhs = lvalue->load(out);
+ } else {
+ lvalue = nullptr;
+ lhs = this->writeExpression(*left, out);
+ }
+
+ SpvId rhs = this->writeExpression(*right, out);
+ SpvId result = this->writeBinaryExpression(left->type(), lhs, op.removeAssignment(),
+ right->type(), rhs, b.type(), out);
+ if (lvalue) {
+ lvalue->store(result, out);
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeLogicalAnd(const Expression& left, const Expression& right,
+ OutputStream& out) {
+ SpvId falseConstant = this->writeLiteral(0.0, *fContext.fTypes.fBool);
+ SpvId lhs = this->writeExpression(left, out);
+
+ ConditionalOpCounts conditionalOps = this->getConditionalOpCounts();
+
+ SpvId rhsLabel = this->nextId(nullptr);
+ SpvId end = this->nextId(nullptr);
+ SpvId lhsBlock = fCurrentBlock;
+ this->writeInstruction(SpvOpSelectionMerge, end, SpvSelectionControlMaskNone, out);
+ this->writeInstruction(SpvOpBranchConditional, lhs, rhsLabel, end, out);
+ this->writeLabel(rhsLabel, kBranchIsOnPreviousLine, out);
+ SpvId rhs = this->writeExpression(right, out);
+ SpvId rhsBlock = fCurrentBlock;
+ this->writeInstruction(SpvOpBranch, end, out);
+ this->writeLabel(end, kBranchIsAbove, conditionalOps, out);
+ SpvId result = this->nextId(nullptr);
+ this->writeInstruction(SpvOpPhi, this->getType(*fContext.fTypes.fBool), result, falseConstant,
+ lhsBlock, rhs, rhsBlock, out);
+
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeLogicalOr(const Expression& left, const Expression& right,
+ OutputStream& out) {
+ SpvId trueConstant = this->writeLiteral(1.0, *fContext.fTypes.fBool);
+ SpvId lhs = this->writeExpression(left, out);
+
+ ConditionalOpCounts conditionalOps = this->getConditionalOpCounts();
+
+ SpvId rhsLabel = this->nextId(nullptr);
+ SpvId end = this->nextId(nullptr);
+ SpvId lhsBlock = fCurrentBlock;
+ this->writeInstruction(SpvOpSelectionMerge, end, SpvSelectionControlMaskNone, out);
+ this->writeInstruction(SpvOpBranchConditional, lhs, end, rhsLabel, out);
+ this->writeLabel(rhsLabel, kBranchIsOnPreviousLine, out);
+ SpvId rhs = this->writeExpression(right, out);
+ SpvId rhsBlock = fCurrentBlock;
+ this->writeInstruction(SpvOpBranch, end, out);
+ this->writeLabel(end, kBranchIsAbove, conditionalOps, out);
+ SpvId result = this->nextId(nullptr);
+ this->writeInstruction(SpvOpPhi, this->getType(*fContext.fTypes.fBool), result, trueConstant,
+ lhsBlock, rhs, rhsBlock, out);
+
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeTernaryExpression(const TernaryExpression& t, OutputStream& out) {
+ const Type& type = t.type();
+ SpvId test = this->writeExpression(*t.test(), out);
+ if (t.ifTrue()->type().columns() == 1 &&
+ Analysis::IsCompileTimeConstant(*t.ifTrue()) &&
+ Analysis::IsCompileTimeConstant(*t.ifFalse())) {
+ // both true and false are constants, can just use OpSelect
+ SpvId result = this->nextId(nullptr);
+ SpvId trueId = this->writeExpression(*t.ifTrue(), out);
+ SpvId falseId = this->writeExpression(*t.ifFalse(), out);
+ this->writeInstruction(SpvOpSelect, this->getType(type), result, test, trueId, falseId,
+ out);
+ return result;
+ }
+
+ ConditionalOpCounts conditionalOps = this->getConditionalOpCounts();
+
+ // was originally using OpPhi to choose the result, but for some reason that is crashing on
+ // Adreno. Switched to storing the result in a temp variable as glslang does.
+ SpvId var = this->nextId(nullptr);
+ this->writeInstruction(SpvOpVariable, this->getPointerType(type, SpvStorageClassFunction),
+ var, SpvStorageClassFunction, fVariableBuffer);
+ SpvId trueLabel = this->nextId(nullptr);
+ SpvId falseLabel = this->nextId(nullptr);
+ SpvId end = this->nextId(nullptr);
+ this->writeInstruction(SpvOpSelectionMerge, end, SpvSelectionControlMaskNone, out);
+ this->writeInstruction(SpvOpBranchConditional, test, trueLabel, falseLabel, out);
+ this->writeLabel(trueLabel, kBranchIsOnPreviousLine, out);
+ this->writeOpStore(SpvStorageClassFunction, var, this->writeExpression(*t.ifTrue(), out), out);
+ this->writeInstruction(SpvOpBranch, end, out);
+ this->writeLabel(falseLabel, kBranchIsAbove, conditionalOps, out);
+ this->writeOpStore(SpvStorageClassFunction, var, this->writeExpression(*t.ifFalse(), out), out);
+ this->writeInstruction(SpvOpBranch, end, out);
+ this->writeLabel(end, kBranchIsAbove, conditionalOps, out);
+ SpvId result = this->nextId(&type);
+ this->writeInstruction(SpvOpLoad, this->getType(type), result, var, out);
+
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writePrefixExpression(const PrefixExpression& p, OutputStream& out) {
+ const Type& type = p.type();
+ if (p.getOperator().kind() == Operator::Kind::MINUS) {
+ SpvOp_ negateOp = pick_by_type(type, SpvOpFNegate, SpvOpSNegate, SpvOpSNegate, SpvOpUndef);
+ SkASSERT(negateOp != SpvOpUndef);
+ SpvId expr = this->writeExpression(*p.operand(), out);
+ if (type.isMatrix()) {
+ return this->writeComponentwiseMatrixUnary(type, expr, negateOp, out);
+ }
+ SpvId result = this->nextId(&type);
+ SpvId typeId = this->getType(type);
+ this->writeInstruction(negateOp, typeId, result, expr, out);
+ return result;
+ }
+ switch (p.getOperator().kind()) {
+ case Operator::Kind::PLUS:
+ return this->writeExpression(*p.operand(), out);
+ case Operator::Kind::PLUSPLUS: {
+ std::unique_ptr<LValue> lv = this->getLValue(*p.operand(), out);
+ SpvId one = this->writeLiteral(1.0, type);
+ SpvId result = this->writeBinaryOperation(type, type, lv->load(out), one,
+ SpvOpFAdd, SpvOpIAdd, SpvOpIAdd, SpvOpUndef,
+ out);
+ lv->store(result, out);
+ return result;
+ }
+ case Operator::Kind::MINUSMINUS: {
+ std::unique_ptr<LValue> lv = this->getLValue(*p.operand(), out);
+ SpvId one = this->writeLiteral(1.0, type);
+ SpvId result = this->writeBinaryOperation(type, type, lv->load(out), one, SpvOpFSub,
+ SpvOpISub, SpvOpISub, SpvOpUndef, out);
+ lv->store(result, out);
+ return result;
+ }
+ case Operator::Kind::LOGICALNOT: {
+ SkASSERT(p.operand()->type().isBoolean());
+ SpvId result = this->nextId(nullptr);
+ this->writeInstruction(SpvOpLogicalNot, this->getType(type), result,
+ this->writeExpression(*p.operand(), out), out);
+ return result;
+ }
+ case Operator::Kind::BITWISENOT: {
+ SpvId result = this->nextId(nullptr);
+ this->writeInstruction(SpvOpNot, this->getType(type), result,
+ this->writeExpression(*p.operand(), out), out);
+ return result;
+ }
+ default:
+ SkDEBUGFAILF("unsupported prefix expression: %s",
+ p.description(OperatorPrecedence::kTopLevel).c_str());
+ return NA;
+ }
+}
+
+SpvId SPIRVCodeGenerator::writePostfixExpression(const PostfixExpression& p, OutputStream& out) {
+ const Type& type = p.type();
+ std::unique_ptr<LValue> lv = this->getLValue(*p.operand(), out);
+ SpvId result = lv->load(out);
+ SpvId one = this->writeLiteral(1.0, type);
+ switch (p.getOperator().kind()) {
+ case Operator::Kind::PLUSPLUS: {
+ SpvId temp = this->writeBinaryOperation(type, type, result, one, SpvOpFAdd,
+ SpvOpIAdd, SpvOpIAdd, SpvOpUndef, out);
+ lv->store(temp, out);
+ return result;
+ }
+ case Operator::Kind::MINUSMINUS: {
+ SpvId temp = this->writeBinaryOperation(type, type, result, one, SpvOpFSub,
+ SpvOpISub, SpvOpISub, SpvOpUndef, out);
+ lv->store(temp, out);
+ return result;
+ }
+ default:
+ SkDEBUGFAILF("unsupported postfix expression %s",
+ p.description(OperatorPrecedence::kTopLevel).c_str());
+ return NA;
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeLiteral(const Literal& l) {
+ return this->writeLiteral(l.value(), l.type());
+}
+
+SpvId SPIRVCodeGenerator::writeLiteral(double value, const Type& type) {
+ switch (type.numberKind()) {
+ case Type::NumberKind::kFloat: {
+ float floatVal = value;
+ int32_t valueBits;
+ memcpy(&valueBits, &floatVal, sizeof(valueBits));
+ return this->writeOpConstant(type, valueBits);
+ }
+ case Type::NumberKind::kBoolean: {
+ return value ? this->writeOpConstantTrue(type)
+ : this->writeOpConstantFalse(type);
+ }
+ default: {
+ return this->writeOpConstant(type, (SKSL_INT)value);
+ }
+ }
+}
+
+SpvId SPIRVCodeGenerator::writeFunctionStart(const FunctionDeclaration& f, OutputStream& out) {
+ SpvId result = fFunctionMap[&f];
+ SpvId returnTypeId = this->getType(f.returnType());
+ SpvId functionTypeId = this->getFunctionType(f);
+ this->writeInstruction(SpvOpFunction, returnTypeId, result,
+ SpvFunctionControlMaskNone, functionTypeId, out);
+ std::string mangledName = f.mangledName();
+ this->writeInstruction(SpvOpName,
+ result,
+ std::string_view(mangledName.c_str(), mangledName.size()),
+ fNameBuffer);
+ for (const Variable* parameter : f.parameters()) {
+ if (parameter->type().typeKind() == Type::TypeKind::kSampler &&
+ fProgram.fConfig->fSettings.fSPIRVDawnCompatMode) {
+ auto [texture, sampler] = this->synthesizeTextureAndSampler(*parameter);
+
+ SpvId textureId = this->nextId(nullptr);
+ SpvId samplerId = this->nextId(nullptr);
+ fVariableMap.set(texture, textureId);
+ fVariableMap.set(sampler, samplerId);
+
+ SpvId textureType = this->getFunctionParameterType(texture->type());
+ SpvId samplerType = this->getFunctionParameterType(sampler->type());
+
+ this->writeInstruction(SpvOpFunctionParameter, textureType, textureId, out);
+ this->writeInstruction(SpvOpFunctionParameter, samplerType, samplerId, out);
+ } else {
+ SpvId id = this->nextId(nullptr);
+ fVariableMap.set(parameter, id);
+
+ SpvId type = this->getFunctionParameterType(parameter->type());
+ this->writeInstruction(SpvOpFunctionParameter, type, id, out);
+ }
+ }
+ return result;
+}
+
+SpvId SPIRVCodeGenerator::writeFunction(const FunctionDefinition& f, OutputStream& out) {
+ ConditionalOpCounts conditionalOps = this->getConditionalOpCounts();
+
+ fVariableBuffer.reset();
+ SpvId result = this->writeFunctionStart(f.declaration(), out);
+ fCurrentBlock = 0;
+ this->writeLabel(this->nextId(nullptr), kBranchlessBlock, out);
+ StringStream bodyBuffer;
+ this->writeBlock(f.body()->as<Block>(), bodyBuffer);
+ write_stringstream(fVariableBuffer, out);
+ if (f.declaration().isMain()) {
+ write_stringstream(fGlobalInitializersBuffer, out);
+ }
+ write_stringstream(bodyBuffer, out);
+ if (fCurrentBlock) {
+ if (f.declaration().returnType().isVoid()) {
+ this->writeInstruction(SpvOpReturn, out);
+ } else {
+ this->writeInstruction(SpvOpUnreachable, out);
+ }
+ }
+ this->writeInstruction(SpvOpFunctionEnd, out);
+ this->pruneConditionalOps(conditionalOps);
+ return result;
+}
+
+void SPIRVCodeGenerator::writeLayout(const Layout& layout, SpvId target, Position pos) {
+ bool isPushConstant = (layout.fFlags & Layout::kPushConstant_Flag);
+ if (layout.fLocation >= 0) {
+ this->writeInstruction(SpvOpDecorate, target, SpvDecorationLocation, layout.fLocation,
+ fDecorationBuffer);
+ }
+ if (layout.fBinding >= 0) {
+ if (isPushConstant) {
+ fContext.fErrors->error(pos, "Can't apply 'binding' to push constants");
+ } else {
+ this->writeInstruction(SpvOpDecorate, target, SpvDecorationBinding, layout.fBinding,
+ fDecorationBuffer);
+ }
+ }
+ if (layout.fIndex >= 0) {
+ this->writeInstruction(SpvOpDecorate, target, SpvDecorationIndex, layout.fIndex,
+ fDecorationBuffer);
+ }
+ if (layout.fSet >= 0) {
+ if (isPushConstant) {
+ fContext.fErrors->error(pos, "Can't apply 'set' to push constants");
+ } else {
+ this->writeInstruction(SpvOpDecorate, target, SpvDecorationDescriptorSet, layout.fSet,
+ fDecorationBuffer);
+ }
+ }
+ if (layout.fInputAttachmentIndex >= 0) {
+ this->writeInstruction(SpvOpDecorate, target, SpvDecorationInputAttachmentIndex,
+ layout.fInputAttachmentIndex, fDecorationBuffer);
+ fCapabilities |= (((uint64_t) 1) << SpvCapabilityInputAttachment);
+ }
+ if (layout.fBuiltin >= 0 && layout.fBuiltin != SK_FRAGCOLOR_BUILTIN) {
+ this->writeInstruction(SpvOpDecorate, target, SpvDecorationBuiltIn, layout.fBuiltin,
+ fDecorationBuffer);
+ }
+}
+
+void SPIRVCodeGenerator::writeFieldLayout(const Layout& layout, SpvId target, int member) {
+ // 'binding' and 'set' can not be applied to struct members
+ SkASSERT(layout.fBinding == -1);
+ SkASSERT(layout.fSet == -1);
+ if (layout.fLocation >= 0) {
+ this->writeInstruction(SpvOpMemberDecorate, target, member, SpvDecorationLocation,
+ layout.fLocation, fDecorationBuffer);
+ }
+ if (layout.fIndex >= 0) {
+ this->writeInstruction(SpvOpMemberDecorate, target, member, SpvDecorationIndex,
+ layout.fIndex, fDecorationBuffer);
+ }
+ if (layout.fInputAttachmentIndex >= 0) {
+ this->writeInstruction(SpvOpDecorate, target, member, SpvDecorationInputAttachmentIndex,
+ layout.fInputAttachmentIndex, fDecorationBuffer);
+ }
+ if (layout.fBuiltin >= 0) {
+ this->writeInstruction(SpvOpMemberDecorate, target, member, SpvDecorationBuiltIn,
+ layout.fBuiltin, fDecorationBuffer);
+ }
+}
+
+MemoryLayout SPIRVCodeGenerator::memoryLayoutForStorageClass(SpvStorageClass_ storageClass) {
+ return storageClass == SpvStorageClassPushConstant ? MemoryLayout(MemoryLayout::Standard::k430)
+ : fDefaultLayout;
+}
+
+MemoryLayout SPIRVCodeGenerator::memoryLayoutForVariable(const Variable& v) const {
+ bool pushConstant = ((v.modifiers().fLayout.fFlags & Layout::kPushConstant_Flag) != 0);
+ return pushConstant ? MemoryLayout(MemoryLayout::Standard::k430) : fDefaultLayout;
+}
+
+SpvId SPIRVCodeGenerator::writeInterfaceBlock(const InterfaceBlock& intf, bool appendRTFlip) {
+ MemoryLayout memoryLayout = this->memoryLayoutForVariable(*intf.var());
+ SpvId result = this->nextId(nullptr);
+ const Variable& intfVar = *intf.var();
+ const Type& type = intfVar.type();
+ if (!memoryLayout.isSupported(type)) {
+ fContext.fErrors->error(type.fPosition, "type '" + type.displayName() +
+ "' is not permitted here");
+ return this->nextId(nullptr);
+ }
+ SpvStorageClass_ storageClass =
+ get_storage_class_for_global_variable(intfVar, SpvStorageClassFunction);
+ if (fProgram.fInputs.fUseFlipRTUniform && appendRTFlip && type.isStruct()) {
+ // We can only have one interface block (because we use push_constant and that is limited
+ // to one per program), so we need to append rtflip to this one rather than synthesize an
+ // entirely new block when the variable is referenced. And we can't modify the existing
+ // block, so we instead create a modified copy of it and write that.
+ std::vector<Type::Field> fields = type.fields();
+ fields.emplace_back(Position(),
+ Modifiers(Layout(/*flags=*/0,
+ /*location=*/-1,
+ fProgram.fConfig->fSettings.fRTFlipOffset,
+ /*binding=*/-1,
+ /*index=*/-1,
+ /*set=*/-1,
+ /*builtin=*/-1,
+ /*inputAttachmentIndex=*/-1),
+ /*flags=*/0),
+ SKSL_RTFLIP_NAME,
+ fContext.fTypes.fFloat2.get());
+ {
+ AutoAttachPoolToThread attach(fProgram.fPool.get());
+ const Type* rtFlipStructType = fProgram.fSymbols->takeOwnershipOfSymbol(
+ Type::MakeStructType(fContext,
+ type.fPosition,
+ type.name(),
+ std::move(fields),
+ /*interfaceBlock=*/true));
+ InterfaceBlockVariable* modifiedVar = fProgram.fSymbols->takeOwnershipOfSymbol(
+ std::make_unique<InterfaceBlockVariable>(intfVar.fPosition,
+ intfVar.modifiersPosition(),
+ &intfVar.modifiers(),
+ intfVar.name(),
+ rtFlipStructType,
+ intfVar.isBuiltin(),
+ intfVar.storage()));
+ fSPIRVBonusVariables.add(modifiedVar);
+ InterfaceBlock modifiedCopy(intf.fPosition, modifiedVar, intf.typeOwner());
+ result = this->writeInterfaceBlock(modifiedCopy, /*appendRTFlip=*/false);
+ fProgram.fSymbols->add(std::make_unique<Field>(
+ Position(), modifiedVar, rtFlipStructType->fields().size() - 1));
+ }
+ fVariableMap.set(&intfVar, result);
+ fWroteRTFlip = true;
+ return result;
+ }
+ const Modifiers& intfModifiers = intfVar.modifiers();
+ SpvId typeId = this->getType(type, memoryLayout);
+ if (intfModifiers.fLayout.fBuiltin == -1) {
+ // Note: In SPIR-V 1.3, a storage buffer can be declared with the "StorageBuffer"
+ // storage class and the "Block" decoration and the <1.3 approach we use here ("Uniform"
+ // storage class and the "BufferBlock" decoration) is deprecated. Since we target SPIR-V
+ // 1.0, we have to use the deprecated approach which is well supported in Vulkan and
+ // addresses SkSL use cases (notably SkSL currently doesn't support pointer features that
+ // would benefit from SPV_KHR_variable_pointers capabilities).
+ bool isStorageBuffer = intfModifiers.fFlags & Modifiers::kBuffer_Flag;
+ this->writeInstruction(SpvOpDecorate,
+ typeId,
+ isStorageBuffer ? SpvDecorationBufferBlock : SpvDecorationBlock,
+ fDecorationBuffer);
+ }
+ SpvId ptrType = this->nextId(nullptr);
+ this->writeInstruction(SpvOpTypePointer, ptrType, storageClass, typeId, fConstantBuffer);
+ this->writeInstruction(SpvOpVariable, ptrType, result, storageClass, fConstantBuffer);
+ Layout layout = intfModifiers.fLayout;
+ if (storageClass == SpvStorageClassUniform && layout.fSet < 0) {
+ layout.fSet = fProgram.fConfig->fSettings.fDefaultUniformSet;
+ }
+ this->writeLayout(layout, result, intfVar.fPosition);
+ fVariableMap.set(&intfVar, result);
+ return result;
+}
+
+bool SPIRVCodeGenerator::isDead(const Variable& var) const {
+ // During SPIR-V code generation, we synthesize some extra bonus variables that don't actually
+ // exist in the Program at all and aren't tracked by the ProgramUsage. They aren't dead, though.
+ if (fSPIRVBonusVariables.contains(&var)) {
+ return false;
+ }
+ ProgramUsage::VariableCounts counts = fProgram.usage()->get(var);
+ if (counts.fRead || counts.fWrite) {
+ return false;
+ }
+ // It's not entirely clear what the rules are for eliding interface variables. Generally, it
+ // causes problems to elide them, even when they're dead.
+ return !(var.modifiers().fFlags &
+ (Modifiers::kIn_Flag | Modifiers::kOut_Flag | Modifiers::kUniform_Flag));
+}
+
+// This function determines whether to skip an OpVariable (of pointer type) declaration for
+// compile-time constant scalars and vectors which we turn into OpConstant/OpConstantComposite and
+// always reference by value.
+//
+// Accessing a matrix or array member with a dynamic index requires the use of OpAccessChain which
+// requires a base operand of pointer type. However, a vector can always be accessed by value using
+// OpVectorExtractDynamic (see writeIndexExpression).
+//
+// This is why we always emit an OpVariable for all non-scalar and non-vector types in case they get
+// accessed via a dynamic index.
+static bool is_vardecl_compile_time_constant(const VarDeclaration& varDecl) {
+ return varDecl.var()->modifiers().fFlags & Modifiers::kConst_Flag &&
+ (varDecl.var()->type().isScalar() || varDecl.var()->type().isVector()) &&
+ (ConstantFolder::GetConstantValueOrNullForVariable(*varDecl.value()) ||
+ Analysis::IsCompileTimeConstant(*varDecl.value()));
+}
+
+bool SPIRVCodeGenerator::writeGlobalVarDeclaration(ProgramKind kind,
+ const VarDeclaration& varDecl) {
+ const Variable* var = varDecl.var();
+ const bool inDawnMode = fProgram.fConfig->fSettings.fSPIRVDawnCompatMode;
+ const int backendFlags = var->modifiers().fLayout.fFlags & Layout::kAllBackendFlagsMask;
+ const int permittedBackendFlags = Layout::kSPIRV_Flag | (inDawnMode ? Layout::kWGSL_Flag : 0);
+ if (backendFlags & ~permittedBackendFlags) {
+ fContext.fErrors->error(var->fPosition, "incompatible backend flag in SPIR-V codegen");
+ return false;
+ }
+
+ // If this global variable is a compile-time constant then we'll emit OpConstant or
+ // OpConstantComposite later when the variable is referenced. Avoid declaring an OpVariable now.
+ if (is_vardecl_compile_time_constant(varDecl)) {
+ return true;
+ }
+
+ SpvStorageClass_ storageClass =
+ get_storage_class_for_global_variable(*var, SpvStorageClassPrivate);
+ if (storageClass == SpvStorageClassUniform) {
+ // Top-level uniforms are emitted in writeUniformBuffer.
+ fTopLevelUniforms.push_back(&varDecl);
+ return true;
+ }
+
+ if (this->isDead(*var)) {
+ return true;
+ }
+
+ if (var->type().typeKind() == Type::TypeKind::kSampler && inDawnMode) {
+ if (var->modifiers().fLayout.fTexture == -1 || var->modifiers().fLayout.fSampler == -1 ||
+ !(var->modifiers().fLayout.fFlags & Layout::kWGSL_Flag)) {
+ fContext.fErrors->error(var->fPosition,
+ "SPIR-V dawn compatibility mode requires an explicit texture "
+ "and sampler index");
+ return false;
+ }
+ SkASSERT(storageClass == SpvStorageClassUniformConstant);
+
+ auto [texture, sampler] = this->synthesizeTextureAndSampler(*var);
+ this->writeGlobalVar(kind, storageClass, *texture);
+ this->writeGlobalVar(kind, storageClass, *sampler);
+
+ return true;
+ }
+
+ SpvId id = this->writeGlobalVar(kind, storageClass, *var);
+ if (id != NA && varDecl.value()) {
+ SkASSERT(!fCurrentBlock);
+ fCurrentBlock = NA;
+ SpvId value = this->writeExpression(*varDecl.value(), fGlobalInitializersBuffer);
+ this->writeOpStore(storageClass, id, value, fGlobalInitializersBuffer);
+ fCurrentBlock = 0;
+ }
+ return true;
+}
+
+SpvId SPIRVCodeGenerator::writeGlobalVar(ProgramKind kind,
+ SpvStorageClass_ storageClass,
+ const Variable& var) {
+ if (var.modifiers().fLayout.fBuiltin == SK_FRAGCOLOR_BUILTIN &&
+ !ProgramConfig::IsFragment(kind)) {
+ SkASSERT(!fProgram.fConfig->fSettings.fFragColorIsInOut);
+ return NA;
+ }
+
+ // Add this global to the variable map.
+ const Type& type = var.type();
+ SpvId id = this->nextId(&type);
+ fVariableMap.set(&var, id);
+
+ Layout layout = var.modifiers().fLayout;
+ if (layout.fSet < 0 && storageClass == SpvStorageClassUniformConstant) {
+ layout.fSet = fProgram.fConfig->fSettings.fDefaultUniformSet;
+ }
+
+ SpvId typeId = this->getPointerType(type, storageClass);
+ this->writeInstruction(SpvOpVariable, typeId, id, storageClass, fConstantBuffer);
+ this->writeInstruction(SpvOpName, id, var.name(), fNameBuffer);
+ this->writeLayout(layout, id, var.fPosition);
+ if (var.modifiers().fFlags & Modifiers::kFlat_Flag) {
+ this->writeInstruction(SpvOpDecorate, id, SpvDecorationFlat, fDecorationBuffer);
+ }
+ if (var.modifiers().fFlags & Modifiers::kNoPerspective_Flag) {
+ this->writeInstruction(SpvOpDecorate, id, SpvDecorationNoPerspective,
+ fDecorationBuffer);
+ }
+
+ return id;
+}
+
+void SPIRVCodeGenerator::writeVarDeclaration(const VarDeclaration& varDecl, OutputStream& out) {
+ // If this variable is a compile-time constant then we'll emit OpConstant or
+ // OpConstantComposite later when the variable is referenced. Avoid declaring an OpVariable now.
+ if (is_vardecl_compile_time_constant(varDecl)) {
+ return;
+ }
+
+ const Variable* var = varDecl.var();
+ SpvId id = this->nextId(&var->type());
+ fVariableMap.set(var, id);
+ SpvId type = this->getPointerType(var->type(), SpvStorageClassFunction);
+ this->writeInstruction(SpvOpVariable, type, id, SpvStorageClassFunction, fVariableBuffer);
+ this->writeInstruction(SpvOpName, id, var->name(), fNameBuffer);
+ if (varDecl.value()) {
+ SpvId value = this->writeExpression(*varDecl.value(), out);
+ this->writeOpStore(SpvStorageClassFunction, id, value, out);
+ }
+}
+
+void SPIRVCodeGenerator::writeStatement(const Statement& s, OutputStream& out) {
+ switch (s.kind()) {
+ case Statement::Kind::kNop:
+ break;
+ case Statement::Kind::kBlock:
+ this->writeBlock(s.as<Block>(), out);
+ break;
+ case Statement::Kind::kExpression:
+ this->writeExpression(*s.as<ExpressionStatement>().expression(), out);
+ break;
+ case Statement::Kind::kReturn:
+ this->writeReturnStatement(s.as<ReturnStatement>(), out);
+ break;
+ case Statement::Kind::kVarDeclaration:
+ this->writeVarDeclaration(s.as<VarDeclaration>(), out);
+ break;
+ case Statement::Kind::kIf:
+ this->writeIfStatement(s.as<IfStatement>(), out);
+ break;
+ case Statement::Kind::kFor:
+ this->writeForStatement(s.as<ForStatement>(), out);
+ break;
+ case Statement::Kind::kDo:
+ this->writeDoStatement(s.as<DoStatement>(), out);
+ break;
+ case Statement::Kind::kSwitch:
+ this->writeSwitchStatement(s.as<SwitchStatement>(), out);
+ break;
+ case Statement::Kind::kBreak:
+ this->writeInstruction(SpvOpBranch, fBreakTarget.back(), out);
+ break;
+ case Statement::Kind::kContinue:
+ this->writeInstruction(SpvOpBranch, fContinueTarget.back(), out);
+ break;
+ case Statement::Kind::kDiscard:
+ this->writeInstruction(SpvOpKill, out);
+ break;
+ default:
+ SkDEBUGFAILF("unsupported statement: %s", s.description().c_str());
+ break;
+ }
+}
+
+void SPIRVCodeGenerator::writeBlock(const Block& b, OutputStream& out) {
+ for (const std::unique_ptr<Statement>& stmt : b.children()) {
+ this->writeStatement(*stmt, out);
+ }
+}
+
+SPIRVCodeGenerator::ConditionalOpCounts SPIRVCodeGenerator::getConditionalOpCounts() {
+ return {fReachableOps.size(), fStoreOps.size()};
+}
+
+void SPIRVCodeGenerator::pruneConditionalOps(ConditionalOpCounts ops) {
+ // Remove ops which are no longer reachable.
+ while (fReachableOps.size() > ops.numReachableOps) {
+ SpvId prunableSpvId = fReachableOps.back();
+ const Instruction* prunableOp = fSpvIdCache.find(prunableSpvId);
+
+ if (prunableOp) {
+ fOpCache.remove(*prunableOp);
+ fSpvIdCache.remove(prunableSpvId);
+ } else {
+ SkDEBUGFAIL("reachable-op list contains unrecognized SpvId");
+ }
+
+ fReachableOps.pop_back();
+ }
+
+ // Remove any cached stores that occurred during the conditional block.
+ while (fStoreOps.size() > ops.numStoreOps) {
+ if (fStoreCache.find(fStoreOps.back())) {
+ fStoreCache.remove(fStoreOps.back());
+ }
+ fStoreOps.pop_back();
+ }
+}
+
+void SPIRVCodeGenerator::writeIfStatement(const IfStatement& stmt, OutputStream& out) {
+ SpvId test = this->writeExpression(*stmt.test(), out);
+ SpvId ifTrue = this->nextId(nullptr);
+ SpvId ifFalse = this->nextId(nullptr);
+
+ ConditionalOpCounts conditionalOps = this->getConditionalOpCounts();
+
+ if (stmt.ifFalse()) {
+ SpvId end = this->nextId(nullptr);
+ this->writeInstruction(SpvOpSelectionMerge, end, SpvSelectionControlMaskNone, out);
+ this->writeInstruction(SpvOpBranchConditional, test, ifTrue, ifFalse, out);
+ this->writeLabel(ifTrue, kBranchIsOnPreviousLine, out);
+ this->writeStatement(*stmt.ifTrue(), out);
+ if (fCurrentBlock) {
+ this->writeInstruction(SpvOpBranch, end, out);
+ }
+ this->writeLabel(ifFalse, kBranchIsAbove, conditionalOps, out);
+ this->writeStatement(*stmt.ifFalse(), out);
+ if (fCurrentBlock) {
+ this->writeInstruction(SpvOpBranch, end, out);
+ }
+ this->writeLabel(end, kBranchIsAbove, conditionalOps, out);
+ } else {
+ this->writeInstruction(SpvOpSelectionMerge, ifFalse, SpvSelectionControlMaskNone, out);
+ this->writeInstruction(SpvOpBranchConditional, test, ifTrue, ifFalse, out);
+ this->writeLabel(ifTrue, kBranchIsOnPreviousLine, out);
+ this->writeStatement(*stmt.ifTrue(), out);
+ if (fCurrentBlock) {
+ this->writeInstruction(SpvOpBranch, ifFalse, out);
+ }
+ this->writeLabel(ifFalse, kBranchIsAbove, conditionalOps, out);
+ }
+}
+
+void SPIRVCodeGenerator::writeForStatement(const ForStatement& f, OutputStream& out) {
+ if (f.initializer()) {
+ this->writeStatement(*f.initializer(), out);
+ }
+
+ ConditionalOpCounts conditionalOps = this->getConditionalOpCounts();
+
+ // The store cache isn't trustworthy in the presence of branches; store caching only makes sense
+ // in the context of linear straight-line execution. If we wanted to be more clever, we could
+ // only invalidate store cache entries for variables affected by the loop body, but for now we
+ // simply clear the entire cache whenever branching occurs.
+ SpvId header = this->nextId(nullptr);
+ SpvId start = this->nextId(nullptr);
+ SpvId body = this->nextId(nullptr);
+ SpvId next = this->nextId(nullptr);
+ fContinueTarget.push_back(next);
+ SpvId end = this->nextId(nullptr);
+ fBreakTarget.push_back(end);
+ this->writeInstruction(SpvOpBranch, header, out);
+ this->writeLabel(header, kBranchIsBelow, conditionalOps, out);
+ this->writeInstruction(SpvOpLoopMerge, end, next, SpvLoopControlMaskNone, out);
+ this->writeInstruction(SpvOpBranch, start, out);
+ this->writeLabel(start, kBranchIsOnPreviousLine, out);
+ if (f.test()) {
+ SpvId test = this->writeExpression(*f.test(), out);
+ this->writeInstruction(SpvOpBranchConditional, test, body, end, out);
+ } else {
+ this->writeInstruction(SpvOpBranch, body, out);
+ }
+ this->writeLabel(body, kBranchIsOnPreviousLine, out);
+ this->writeStatement(*f.statement(), out);
+ if (fCurrentBlock) {
+ this->writeInstruction(SpvOpBranch, next, out);
+ }
+ this->writeLabel(next, kBranchIsAbove, conditionalOps, out);
+ if (f.next()) {
+ this->writeExpression(*f.next(), out);
+ }
+ this->writeInstruction(SpvOpBranch, header, out);
+ this->writeLabel(end, kBranchIsAbove, conditionalOps, out);
+ fBreakTarget.pop_back();
+ fContinueTarget.pop_back();
+}
+
+void SPIRVCodeGenerator::writeDoStatement(const DoStatement& d, OutputStream& out) {
+ ConditionalOpCounts conditionalOps = this->getConditionalOpCounts();
+
+ // The store cache isn't trustworthy in the presence of branches; store caching only makes sense
+ // in the context of linear straight-line execution. If we wanted to be more clever, we could
+ // only invalidate store cache entries for variables affected by the loop body, but for now we
+ // simply clear the entire cache whenever branching occurs.
+ SpvId header = this->nextId(nullptr);
+ SpvId start = this->nextId(nullptr);
+ SpvId next = this->nextId(nullptr);
+ SpvId continueTarget = this->nextId(nullptr);
+ fContinueTarget.push_back(continueTarget);
+ SpvId end = this->nextId(nullptr);
+ fBreakTarget.push_back(end);
+ this->writeInstruction(SpvOpBranch, header, out);
+ this->writeLabel(header, kBranchIsBelow, conditionalOps, out);
+ this->writeInstruction(SpvOpLoopMerge, end, continueTarget, SpvLoopControlMaskNone, out);
+ this->writeInstruction(SpvOpBranch, start, out);
+ this->writeLabel(start, kBranchIsOnPreviousLine, out);
+ this->writeStatement(*d.statement(), out);
+ if (fCurrentBlock) {
+ this->writeInstruction(SpvOpBranch, next, out);
+ this->writeLabel(next, kBranchIsOnPreviousLine, out);
+ this->writeInstruction(SpvOpBranch, continueTarget, out);
+ }
+ this->writeLabel(continueTarget, kBranchIsAbove, conditionalOps, out);
+ SpvId test = this->writeExpression(*d.test(), out);
+ this->writeInstruction(SpvOpBranchConditional, test, header, end, out);
+ this->writeLabel(end, kBranchIsAbove, conditionalOps, out);
+ fBreakTarget.pop_back();
+ fContinueTarget.pop_back();
+}
+
+void SPIRVCodeGenerator::writeSwitchStatement(const SwitchStatement& s, OutputStream& out) {
+ SpvId value = this->writeExpression(*s.value(), out);
+
+ ConditionalOpCounts conditionalOps = this->getConditionalOpCounts();
+
+ // The store cache isn't trustworthy in the presence of branches; store caching only makes sense
+ // in the context of linear straight-line execution. If we wanted to be more clever, we could
+ // only invalidate store cache entries for variables affected by the switch body, but for now we
+ // simply clear the entire cache whenever branching occurs.
+ SkTArray<SpvId> labels;
+ SpvId end = this->nextId(nullptr);
+ SpvId defaultLabel = end;
+ fBreakTarget.push_back(end);
+ int size = 3;
+ const StatementArray& cases = s.cases();
+ for (const std::unique_ptr<Statement>& stmt : cases) {
+ const SwitchCase& c = stmt->as<SwitchCase>();
+ SpvId label = this->nextId(nullptr);
+ labels.push_back(label);
+ if (!c.isDefault()) {
+ size += 2;
+ } else {
+ defaultLabel = label;
+ }
+ }
+
+ // We should have exactly one label for each case.
+ SkASSERT(labels.size() == cases.size());
+
+ // Collapse adjacent switch-cases into one; that is, reduce `case 1: case 2: case 3:` into a
+ // single OpLabel. The Tint SPIR-V reader does not support switch-case fallthrough, but it
+ // does support multiple switch-cases branching to the same label.
+ SkBitSet caseIsCollapsed(cases.size());
+ for (int index = cases.size() - 2; index >= 0; index--) {
+ if (cases[index]->as<SwitchCase>().statement()->isEmpty()) {
+ caseIsCollapsed.set(index);
+ labels[index] = labels[index + 1];
+ }
+ }
+
+ labels.push_back(end);
+
+ this->writeInstruction(SpvOpSelectionMerge, end, SpvSelectionControlMaskNone, out);
+ this->writeOpCode(SpvOpSwitch, size, out);
+ this->writeWord(value, out);
+ this->writeWord(defaultLabel, out);
+ for (int i = 0; i < cases.size(); ++i) {
+ const SwitchCase& c = cases[i]->as<SwitchCase>();
+ if (c.isDefault()) {
+ continue;
+ }
+ this->writeWord(c.value(), out);
+ this->writeWord(labels[i], out);
+ }
+ for (int i = 0; i < cases.size(); ++i) {
+ if (caseIsCollapsed.test(i)) {
+ continue;
+ }
+ const SwitchCase& c = cases[i]->as<SwitchCase>();
+ if (i == 0) {
+ this->writeLabel(labels[i], kBranchIsOnPreviousLine, out);
+ } else {
+ this->writeLabel(labels[i], kBranchIsAbove, conditionalOps, out);
+ }
+ this->writeStatement(*c.statement(), out);
+ if (fCurrentBlock) {
+ this->writeInstruction(SpvOpBranch, labels[i + 1], out);
+ }
+ }
+ this->writeLabel(end, kBranchIsAbove, conditionalOps, out);
+ fBreakTarget.pop_back();
+}
+
+void SPIRVCodeGenerator::writeReturnStatement(const ReturnStatement& r, OutputStream& out) {
+ if (r.expression()) {
+ this->writeInstruction(SpvOpReturnValue, this->writeExpression(*r.expression(), out),
+ out);
+ } else {
+ this->writeInstruction(SpvOpReturn, out);
+ }
+}
+
+// Given any function, returns the top-level symbol table (OUTSIDE of the function's scope).
+static std::shared_ptr<SymbolTable> get_top_level_symbol_table(const FunctionDeclaration& anyFunc) {
+ return anyFunc.definition()->body()->as<Block>().symbolTable()->fParent;
+}
+
+SPIRVCodeGenerator::EntrypointAdapter SPIRVCodeGenerator::writeEntrypointAdapter(
+ const FunctionDeclaration& main) {
+ // Our goal is to synthesize a tiny helper function which looks like this:
+ // void _entrypoint() { sk_FragColor = main(); }
+
+ // Fish a symbol table out of main().
+ std::shared_ptr<SymbolTable> symbolTable = get_top_level_symbol_table(main);
+
+ // Get `sk_FragColor` as a writable reference.
+ const Symbol* skFragColorSymbol = symbolTable->find("sk_FragColor");
+ SkASSERT(skFragColorSymbol);
+ const Variable& skFragColorVar = skFragColorSymbol->as<Variable>();
+ auto skFragColorRef = std::make_unique<VariableReference>(Position(), &skFragColorVar,
+ VariableReference::RefKind::kWrite);
+ // Synthesize a call to the `main()` function.
+ if (!main.returnType().matches(skFragColorRef->type())) {
+ fContext.fErrors->error(main.fPosition, "SPIR-V does not support returning '" +
+ main.returnType().description() + "' from main()");
+ return {};
+ }
+ ExpressionArray args;
+ if (main.parameters().size() == 1) {
+ if (!main.parameters()[0]->type().matches(*fContext.fTypes.fFloat2)) {
+ fContext.fErrors->error(main.fPosition,
+ "SPIR-V does not support parameter of type '" +
+ main.parameters()[0]->type().description() + "' to main()");
+ return {};
+ }
+ args.push_back(dsl::Float2(0).release());
+ }
+ auto callMainFn = std::make_unique<FunctionCall>(Position(), &main.returnType(), &main,
+ std::move(args));
+
+ // Synthesize `skFragColor = main()` as a BinaryExpression.
+ auto assignmentStmt = std::make_unique<ExpressionStatement>(std::make_unique<BinaryExpression>(
+ Position(),
+ std::move(skFragColorRef),
+ Operator::Kind::EQ,
+ std::move(callMainFn),
+ &main.returnType()));
+
+ // Function bodies are always wrapped in a Block.
+ StatementArray entrypointStmts;
+ entrypointStmts.push_back(std::move(assignmentStmt));
+ auto entrypointBlock = Block::Make(Position(), std::move(entrypointStmts),
+ Block::Kind::kBracedScope, symbolTable);
+ // Declare an entrypoint function.
+ EntrypointAdapter adapter;
+ adapter.fLayout = {};
+ adapter.fModifiers = Modifiers{adapter.fLayout, Modifiers::kNo_Flag};
+ adapter.entrypointDecl =
+ std::make_unique<FunctionDeclaration>(Position(),
+ &adapter.fModifiers,
+ "_entrypoint",
+ /*parameters=*/std::vector<Variable*>{},
+ /*returnType=*/fContext.fTypes.fVoid.get(),
+ /*builtin=*/false);
+ // Define it.
+ adapter.entrypointDef = FunctionDefinition::Convert(fContext,
+ Position(),
+ *adapter.entrypointDecl,
+ std::move(entrypointBlock),
+ /*builtin=*/false);
+
+ adapter.entrypointDecl->setDefinition(adapter.entrypointDef.get());
+ return adapter;
+}
+
+void SPIRVCodeGenerator::writeUniformBuffer(std::shared_ptr<SymbolTable> topLevelSymbolTable) {
+ SkASSERT(!fTopLevelUniforms.empty());
+ static constexpr char kUniformBufferName[] = "_UniformBuffer";
+
+ // Convert the list of top-level uniforms into a matching struct named _UniformBuffer, and build
+ // a lookup table of variables to UniformBuffer field indices.
+ std::vector<Type::Field> fields;
+ fields.reserve(fTopLevelUniforms.size());
+ for (const VarDeclaration* topLevelUniform : fTopLevelUniforms) {
+ const Variable* var = topLevelUniform->var();
+ fTopLevelUniformMap.set(var, (int)fields.size());
+ Modifiers modifiers = var->modifiers();
+ modifiers.fFlags &= ~Modifiers::kUniform_Flag;
+ fields.emplace_back(var->fPosition, modifiers, var->name(), &var->type());
+ }
+ fUniformBuffer.fStruct = Type::MakeStructType(fContext,
+ Position(),
+ kUniformBufferName,
+ std::move(fields),
+ /*interfaceBlock=*/true);
+
+ // Create a global variable to contain this struct.
+ Layout layout;
+ layout.fBinding = fProgram.fConfig->fSettings.fDefaultUniformBinding;
+ layout.fSet = fProgram.fConfig->fSettings.fDefaultUniformSet;
+ Modifiers modifiers{layout, Modifiers::kUniform_Flag};
+
+ fUniformBuffer.fInnerVariable = std::make_unique<InterfaceBlockVariable>(
+ /*pos=*/Position(), /*modifiersPosition=*/Position(),
+ fContext.fModifiersPool->add(modifiers), kUniformBufferName,
+ fUniformBuffer.fStruct.get(), /*builtin=*/false, Variable::Storage::kGlobal);
+
+ // Create an interface block object for this global variable.
+ fUniformBuffer.fInterfaceBlock =
+ std::make_unique<InterfaceBlock>(Position(),
+ fUniformBuffer.fInnerVariable.get(),
+ topLevelSymbolTable);
+
+ // Generate an interface block and hold onto its ID.
+ fUniformBufferId = this->writeInterfaceBlock(*fUniformBuffer.fInterfaceBlock);
+}
+
+void SPIRVCodeGenerator::addRTFlipUniform(Position pos) {
+ SkASSERT(!fProgram.fConfig->fSettings.fForceNoRTFlip);
+
+ if (fWroteRTFlip) {
+ return;
+ }
+ // Flip variable hasn't been written yet. This means we don't have an existing
+ // interface block, so we're free to just synthesize one.
+ fWroteRTFlip = true;
+ std::vector<Type::Field> fields;
+ if (fProgram.fConfig->fSettings.fRTFlipOffset < 0) {
+ fContext.fErrors->error(pos, "RTFlipOffset is negative");
+ }
+ fields.emplace_back(pos,
+ Modifiers(Layout(/*flags=*/0,
+ /*location=*/-1,
+ fProgram.fConfig->fSettings.fRTFlipOffset,
+ /*binding=*/-1,
+ /*index=*/-1,
+ /*set=*/-1,
+ /*builtin=*/-1,
+ /*inputAttachmentIndex=*/-1),
+ /*flags=*/0),
+ SKSL_RTFLIP_NAME,
+ fContext.fTypes.fFloat2.get());
+ std::string_view name = "sksl_synthetic_uniforms";
+ const Type* intfStruct = fSynthetics.takeOwnershipOfSymbol(
+ Type::MakeStructType(fContext, Position(), name, fields, /*interfaceBlock=*/true));
+ bool usePushConstants = fProgram.fConfig->fSettings.fUsePushConstants;
+ int binding = -1, set = -1;
+ if (!usePushConstants) {
+ binding = fProgram.fConfig->fSettings.fRTFlipBinding;
+ if (binding == -1) {
+ fContext.fErrors->error(pos, "layout(binding=...) is required in SPIR-V");
+ }
+ set = fProgram.fConfig->fSettings.fRTFlipSet;
+ if (set == -1) {
+ fContext.fErrors->error(pos, "layout(set=...) is required in SPIR-V");
+ }
+ }
+ int flags = usePushConstants ? Layout::Flag::kPushConstant_Flag : 0;
+ const Modifiers* modsPtr;
+ {
+ AutoAttachPoolToThread attach(fProgram.fPool.get());
+ Modifiers modifiers(Layout(flags,
+ /*location=*/-1,
+ /*offset=*/-1,
+ binding,
+ /*index=*/-1,
+ set,
+ /*builtin=*/-1,
+ /*inputAttachmentIndex=*/-1),
+ Modifiers::kUniform_Flag);
+ modsPtr = fContext.fModifiersPool->add(modifiers);
+ }
+ InterfaceBlockVariable* intfVar = fSynthetics.takeOwnershipOfSymbol(
+ std::make_unique<InterfaceBlockVariable>(/*pos=*/Position(),
+ /*modifiersPosition=*/Position(),
+ modsPtr,
+ name,
+ intfStruct,
+ /*builtin=*/false,
+ Variable::Storage::kGlobal));
+ fSPIRVBonusVariables.add(intfVar);
+ {
+ AutoAttachPoolToThread attach(fProgram.fPool.get());
+ fProgram.fSymbols->add(std::make_unique<Field>(Position(), intfVar, /*field=*/0));
+ }
+ InterfaceBlock intf(Position(), intfVar, std::make_shared<SymbolTable>(/*builtin=*/false));
+ this->writeInterfaceBlock(intf, false);
+}
+
+std::tuple<const Variable*, const Variable*> SPIRVCodeGenerator::synthesizeTextureAndSampler(
+ const Variable& combinedSampler) {
+ SkASSERT(fProgram.fConfig->fSettings.fSPIRVDawnCompatMode);
+ SkASSERT(combinedSampler.type().typeKind() == Type::TypeKind::kSampler);
+
+ const Modifiers& modifiers = combinedSampler.modifiers();
+
+ auto data = std::make_unique<SynthesizedTextureSamplerPair>();
+
+ Modifiers texModifiers = modifiers;
+ texModifiers.fLayout.fBinding = modifiers.fLayout.fTexture;
+ data->fTextureName = std::string(combinedSampler.name()) + "_texture";
+ auto texture = std::make_unique<Variable>(/*pos=*/Position(),
+ /*modifierPosition=*/Position(),
+ fContext.fModifiersPool->add(texModifiers),
+ data->fTextureName,
+ &combinedSampler.type().textureType(),
+ /*builtin=*/false,
+ Variable::Storage::kGlobal);
+
+ Modifiers samplerModifiers = modifiers;
+ samplerModifiers.fLayout.fBinding = modifiers.fLayout.fSampler;
+ data->fSamplerName = std::string(combinedSampler.name()) + "_sampler";
+ auto sampler = std::make_unique<Variable>(/*pos=*/Position(),
+ /*modifierPosition=*/Position(),
+ fContext.fModifiersPool->add(samplerModifiers),
+ data->fSamplerName,
+ fContext.fTypes.fSampler.get(),
+ /*builtin=*/false,
+ Variable::Storage::kGlobal);
+
+ const Variable* t = texture.get();
+ const Variable* s = sampler.get();
+ data->fTexture = std::move(texture);
+ data->fSampler = std::move(sampler);
+ fSynthesizedSamplerMap.set(&combinedSampler, std::move(data));
+
+ return {t, s};
+}
+
+void SPIRVCodeGenerator::writeInstructions(const Program& program, OutputStream& out) {
+ fGLSLExtendedInstructions = this->nextId(nullptr);
+ StringStream body;
+ // Assign SpvIds to functions.
+ const FunctionDeclaration* main = nullptr;
+ for (const ProgramElement* e : program.elements()) {
+ if (e->is<FunctionDefinition>()) {
+ const FunctionDefinition& funcDef = e->as<FunctionDefinition>();
+ const FunctionDeclaration& funcDecl = funcDef.declaration();
+ fFunctionMap.set(&funcDecl, this->nextId(nullptr));
+ if (funcDecl.isMain()) {
+ main = &funcDecl;
+ }
+ }
+ }
+ // Make sure we have a main() function.
+ if (!main) {
+ fContext.fErrors->error(Position(), "program does not contain a main() function");
+ return;
+ }
+ // Emit interface blocks.
+ std::set<SpvId> interfaceVars;
+ for (const ProgramElement* e : program.elements()) {
+ if (e->is<InterfaceBlock>()) {
+ const InterfaceBlock& intf = e->as<InterfaceBlock>();
+ SpvId id = this->writeInterfaceBlock(intf);
+
+ const Modifiers& modifiers = intf.var()->modifiers();
+ if ((modifiers.fFlags & (Modifiers::kIn_Flag | Modifiers::kOut_Flag)) &&
+ modifiers.fLayout.fBuiltin == -1 && !this->isDead(*intf.var())) {
+ interfaceVars.insert(id);
+ }
+ }
+ }
+ // Emit global variable declarations.
+ for (const ProgramElement* e : program.elements()) {
+ if (e->is<GlobalVarDeclaration>()) {
+ if (!this->writeGlobalVarDeclaration(program.fConfig->fKind,
+ e->as<GlobalVarDeclaration>().varDeclaration())) {
+ return;
+ }
+ }
+ }
+ // Emit top-level uniforms into a dedicated uniform buffer.
+ if (!fTopLevelUniforms.empty()) {
+ this->writeUniformBuffer(get_top_level_symbol_table(*main));
+ }
+ // If main() returns a half4, synthesize a tiny entrypoint function which invokes the real
+ // main() and stores the result into sk_FragColor.
+ EntrypointAdapter adapter;
+ if (main->returnType().matches(*fContext.fTypes.fHalf4)) {
+ adapter = this->writeEntrypointAdapter(*main);
+ if (adapter.entrypointDecl) {
+ fFunctionMap.set(adapter.entrypointDecl.get(), this->nextId(nullptr));
+ this->writeFunction(*adapter.entrypointDef, body);
+ main = adapter.entrypointDecl.get();
+ }
+ }
+ // Emit all the functions.
+ for (const ProgramElement* e : program.elements()) {
+ if (e->is<FunctionDefinition>()) {
+ this->writeFunction(e->as<FunctionDefinition>(), body);
+ }
+ }
+ // Add global in/out variables to the list of interface variables.
+ for (const auto& [var, spvId] : fVariableMap) {
+ if (var->storage() == Variable::Storage::kGlobal &&
+ (var->modifiers().fFlags & (Modifiers::kIn_Flag | Modifiers::kOut_Flag)) &&
+ !this->isDead(*var)) {
+ interfaceVars.insert(spvId);
+ }
+ }
+ this->writeCapabilities(out);
+ this->writeInstruction(SpvOpExtInstImport, fGLSLExtendedInstructions, "GLSL.std.450", out);
+ this->writeInstruction(SpvOpMemoryModel, SpvAddressingModelLogical, SpvMemoryModelGLSL450, out);
+ this->writeOpCode(SpvOpEntryPoint, (SpvId) (3 + (main->name().length() + 4) / 4) +
+ (int32_t) interfaceVars.size(), out);
+ if (ProgramConfig::IsVertex(program.fConfig->fKind)) {
+ this->writeWord(SpvExecutionModelVertex, out);
+ } else if (ProgramConfig::IsFragment(program.fConfig->fKind)) {
+ this->writeWord(SpvExecutionModelFragment, out);
+ } else {
+ SK_ABORT("cannot write this kind of program to SPIR-V\n");
+ }
+ SpvId entryPoint = fFunctionMap[main];
+ this->writeWord(entryPoint, out);
+ this->writeString(main->name(), out);
+ for (int var : interfaceVars) {
+ this->writeWord(var, out);
+ }
+ if (ProgramConfig::IsFragment(program.fConfig->fKind)) {
+ this->writeInstruction(SpvOpExecutionMode,
+ fFunctionMap[main],
+ SpvExecutionModeOriginUpperLeft,
+ out);
+ }
+ for (const ProgramElement* e : program.elements()) {
+ if (e->is<Extension>()) {
+ this->writeInstruction(SpvOpSourceExtension, e->as<Extension>().name(), out);
+ }
+ }
+
+ write_stringstream(fNameBuffer, out);
+ write_stringstream(fDecorationBuffer, out);
+ write_stringstream(fConstantBuffer, out);
+ write_stringstream(body, out);
+}
+
+bool SPIRVCodeGenerator::generateCode() {
+ SkASSERT(!fContext.fErrors->errorCount());
+ this->writeWord(SpvMagicNumber, *fOut);
+ this->writeWord(SpvVersion, *fOut);
+ this->writeWord(SKSL_MAGIC, *fOut);
+ StringStream buffer;
+ this->writeInstructions(fProgram, buffer);
+ this->writeWord(fIdCount, *fOut);
+ this->writeWord(0, *fOut); // reserved, always zero
+ write_stringstream(buffer, *fOut);
+ return fContext.fErrors->errorCount() == 0;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/codegen/SkSLSPIRVCodeGenerator.h b/gfx/skia/skia/src/sksl/codegen/SkSLSPIRVCodeGenerator.h
new file mode 100644
index 0000000000..59fca83ddb
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/codegen/SkSLSPIRVCodeGenerator.h
@@ -0,0 +1,601 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_SPIRVCODEGENERATOR
+#define SKSL_SPIRVCODEGENERATOR
+
+#include "include/core/SkSpan.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLLayout.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/base/SkTArray.h"
+#include "src/core/SkTHash.h"
+#include "src/sksl/SkSLMemoryLayout.h"
+#include "src/sksl/SkSLStringStream.h"
+#include "src/sksl/codegen/SkSLCodeGenerator.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLInterfaceBlock.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/spirv.h"
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <string_view>
+#include <tuple>
+#include <vector>
+
+namespace SkSL {
+
+class AnyConstructor;
+class BinaryExpression;
+class Block;
+class ConstructorCompound;
+class ConstructorCompoundCast;
+class ConstructorDiagonalMatrix;
+class ConstructorMatrixResize;
+class ConstructorScalarCast;
+class ConstructorSplat;
+class Context;
+class DoStatement;
+class Expression;
+class FieldAccess;
+class ForStatement;
+class FunctionCall;
+class IfStatement;
+class IndexExpression;
+class Literal;
+class Operator;
+class OutputStream;
+class Position;
+class PostfixExpression;
+class PrefixExpression;
+class ProgramElement;
+class ReturnStatement;
+class Statement;
+class SwitchStatement;
+class Swizzle;
+class TernaryExpression;
+class VarDeclaration;
+class VariableReference;
+enum class ProgramKind : int8_t;
+enum IntrinsicKind : int8_t;
+struct Program;
+
+/**
+ * Converts a Program into a SPIR-V binary.
+ */
+class SPIRVCodeGenerator : public CodeGenerator {
+public:
+ // We reserve an impossible SpvId as a sentinel. (NA meaning none, n/a, etc.)
+ static constexpr SpvId NA = (SpvId)-1;
+
+ class LValue {
+ public:
+ virtual ~LValue() {}
+
+ // returns a pointer to the lvalue, if possible. If the lvalue cannot be directly referenced
+ // by a pointer (e.g. vector swizzles), returns NA.
+ virtual SpvId getPointer() { return NA; }
+
+ // Returns true if a valid pointer returned by getPointer represents a memory object
+ // (see https://github.com/KhronosGroup/SPIRV-Tools/issues/2892). Has no meaning if
+ // getPointer() returns NA.
+ virtual bool isMemoryObjectPointer() const { return true; }
+
+ // Applies a swizzle to the components of the LValue, if possible. This is used to create
+ // LValues that are swizzes-of-swizzles. Non-swizzle LValues can just return false.
+ virtual bool applySwizzle(const ComponentArray& components, const Type& newType) {
+ return false;
+ }
+
+ virtual SpvId load(OutputStream& out) = 0;
+
+ virtual void store(SpvId value, OutputStream& out) = 0;
+ };
+
+ SPIRVCodeGenerator(const Context* context, const Program* program, OutputStream* out)
+ : INHERITED(context, program, out)
+ , fDefaultLayout(MemoryLayout::Standard::k140)
+ , fCapabilities(0)
+ , fIdCount(1)
+ , fCurrentBlock(0)
+ , fSynthetics(/*builtin=*/true) {}
+
+ bool generateCode() override;
+
+private:
+ enum IntrinsicOpcodeKind {
+ kGLSL_STD_450_IntrinsicOpcodeKind,
+ kSPIRV_IntrinsicOpcodeKind,
+ kSpecial_IntrinsicOpcodeKind,
+ kInvalid_IntrinsicOpcodeKind,
+ };
+
+ enum SpecialIntrinsic {
+ kAtan_SpecialIntrinsic,
+ kClamp_SpecialIntrinsic,
+ kMatrixCompMult_SpecialIntrinsic,
+ kMax_SpecialIntrinsic,
+ kMin_SpecialIntrinsic,
+ kMix_SpecialIntrinsic,
+ kMod_SpecialIntrinsic,
+ kDFdy_SpecialIntrinsic,
+ kSaturate_SpecialIntrinsic,
+ kSampledImage_SpecialIntrinsic,
+ kSmoothStep_SpecialIntrinsic,
+ kStep_SpecialIntrinsic,
+ kSubpassLoad_SpecialIntrinsic,
+ kTexture_SpecialIntrinsic,
+ kTextureGrad_SpecialIntrinsic,
+ kTextureLod_SpecialIntrinsic,
+ };
+
+ enum class Precision {
+ kDefault,
+ kRelaxed,
+ };
+
+ struct TempVar {
+ SpvId spvId;
+ const Type* type;
+ std::unique_ptr<SPIRVCodeGenerator::LValue> lvalue;
+ };
+
+ /**
+ * Pass in the type to automatically add a RelaxedPrecision decoration for the id when
+ * appropriate, or null to never add one.
+ */
+ SpvId nextId(const Type* type);
+
+ SpvId nextId(Precision precision);
+
+ SpvId getType(const Type& type);
+
+ SpvId getType(const Type& type, const MemoryLayout& layout);
+
+ SpvId getFunctionType(const FunctionDeclaration& function);
+
+ SpvId getFunctionParameterType(const Type& parameterType);
+
+ SpvId getPointerType(const Type& type, SpvStorageClass_ storageClass);
+
+ SpvId getPointerType(const Type& type, const MemoryLayout& layout,
+ SpvStorageClass_ storageClass);
+
+ SkTArray<SpvId> getAccessChain(const Expression& expr, OutputStream& out);
+
+ void writeLayout(const Layout& layout, SpvId target, Position pos);
+
+ void writeFieldLayout(const Layout& layout, SpvId target, int member);
+
+ SpvId writeStruct(const Type& type, const MemoryLayout& memoryLayout);
+
+ void writeProgramElement(const ProgramElement& pe, OutputStream& out);
+
+ SpvId writeInterfaceBlock(const InterfaceBlock& intf, bool appendRTFlip = true);
+
+ SpvId writeFunctionStart(const FunctionDeclaration& f, OutputStream& out);
+
+ SpvId writeFunctionDeclaration(const FunctionDeclaration& f, OutputStream& out);
+
+ SpvId writeFunction(const FunctionDefinition& f, OutputStream& out);
+
+ bool writeGlobalVarDeclaration(ProgramKind kind, const VarDeclaration& v);
+
+ SpvId writeGlobalVar(ProgramKind kind, SpvStorageClass_, const Variable& v);
+
+ void writeVarDeclaration(const VarDeclaration& var, OutputStream& out);
+
+ SpvId writeVariableReference(const VariableReference& ref, OutputStream& out);
+
+ int findUniformFieldIndex(const Variable& var) const;
+
+ std::unique_ptr<LValue> getLValue(const Expression& value, OutputStream& out);
+
+ SpvId writeExpression(const Expression& expr, OutputStream& out);
+
+ SpvId writeIntrinsicCall(const FunctionCall& c, OutputStream& out);
+
+ SpvId writeFunctionCallArgument(const FunctionCall& call,
+ int argIndex,
+ std::vector<TempVar>* tempVars,
+ OutputStream& out,
+ SpvId* outSynthesizedSamplerId = nullptr);
+
+ void copyBackTempVars(const std::vector<TempVar>& tempVars, OutputStream& out);
+
+ SpvId writeFunctionCall(const FunctionCall& c, OutputStream& out);
+
+
+ void writeGLSLExtendedInstruction(const Type& type, SpvId id, SpvId floatInst,
+ SpvId signedInst, SpvId unsignedInst,
+ const SkTArray<SpvId>& args, OutputStream& out);
+
+ /**
+ * Promotes an expression to a vector. If the expression is already a vector with vectorSize
+ * columns, returns it unmodified. If the expression is a scalar, either promotes it to a
+ * vector (if vectorSize > 1) or returns it unmodified (if vectorSize == 1). Asserts if the
+ * expression is already a vector and it does not have vectorSize columns.
+ */
+ SpvId vectorize(const Expression& expr, int vectorSize, OutputStream& out);
+
+ /**
+ * Given a list of potentially mixed scalars and vectors, promotes the scalars to match the
+ * size of the vectors and returns the ids of the written expressions. e.g. given (float, vec2),
+ * returns (vec2(float), vec2). It is an error to use mismatched vector sizes, e.g. (float,
+ * vec2, vec3).
+ */
+ SkTArray<SpvId> vectorize(const ExpressionArray& args, OutputStream& out);
+
+ SpvId writeSpecialIntrinsic(const FunctionCall& c, SpecialIntrinsic kind, OutputStream& out);
+
+ SpvId writeScalarToMatrixSplat(const Type& matrixType, SpvId scalarId, OutputStream& out);
+
+ SpvId writeFloatConstructor(const AnyConstructor& c, OutputStream& out);
+
+ SpvId castScalarToFloat(SpvId inputId, const Type& inputType, const Type& outputType,
+ OutputStream& out);
+
+ SpvId writeIntConstructor(const AnyConstructor& c, OutputStream& out);
+
+ SpvId castScalarToSignedInt(SpvId inputId, const Type& inputType, const Type& outputType,
+ OutputStream& out);
+
+ SpvId writeUIntConstructor(const AnyConstructor& c, OutputStream& out);
+
+ SpvId castScalarToUnsignedInt(SpvId inputId, const Type& inputType, const Type& outputType,
+ OutputStream& out);
+
+ SpvId writeBooleanConstructor(const AnyConstructor& c, OutputStream& out);
+
+ SpvId castScalarToBoolean(SpvId inputId, const Type& inputType, const Type& outputType,
+ OutputStream& out);
+
+ SpvId castScalarToType(SpvId inputExprId, const Type& inputType, const Type& outputType,
+ OutputStream& out);
+
+ /**
+ * Writes a potentially-different-sized copy of a matrix. Entries which do not exist in the
+ * source matrix are filled with zero; entries which do not exist in the destination matrix are
+ * ignored.
+ */
+ SpvId writeMatrixCopy(SpvId src, const Type& srcType, const Type& dstType, OutputStream& out);
+
+ void addColumnEntry(const Type& columnType, SkTArray<SpvId>* currentColumn,
+ SkTArray<SpvId>* columnIds, int rows, SpvId entry, OutputStream& out);
+
+ SpvId writeConstructorCompound(const ConstructorCompound& c, OutputStream& out);
+
+ SpvId writeMatrixConstructor(const ConstructorCompound& c, OutputStream& out);
+
+ SpvId writeVectorConstructor(const ConstructorCompound& c, OutputStream& out);
+
+ SpvId writeCompositeConstructor(const AnyConstructor& c, OutputStream& out);
+
+ SpvId writeConstructorDiagonalMatrix(const ConstructorDiagonalMatrix& c, OutputStream& out);
+
+ SpvId writeConstructorMatrixResize(const ConstructorMatrixResize& c, OutputStream& out);
+
+ SpvId writeConstructorScalarCast(const ConstructorScalarCast& c, OutputStream& out);
+
+ SpvId writeConstructorSplat(const ConstructorSplat& c, OutputStream& out);
+
+ SpvId writeConstructorCompoundCast(const ConstructorCompoundCast& c, OutputStream& out);
+
+ SpvId writeFieldAccess(const FieldAccess& f, OutputStream& out);
+
+ SpvId writeSwizzle(const Swizzle& swizzle, OutputStream& out);
+
+ /**
+ * Folds the potentially-vector result of a logical operation down to a single bool. If
+ * operandType is a vector type, assumes that the intermediate result in id is a bvec of the
+ * same dimensions, and applys all() to it to fold it down to a single bool value. Otherwise,
+ * returns the original id value.
+ */
+ SpvId foldToBool(SpvId id, const Type& operandType, SpvOp op, OutputStream& out);
+
+ SpvId writeMatrixComparison(const Type& operandType, SpvId lhs, SpvId rhs, SpvOp_ floatOperator,
+ SpvOp_ intOperator, SpvOp_ vectorMergeOperator,
+ SpvOp_ mergeOperator, OutputStream& out);
+
+ SpvId writeStructComparison(const Type& structType, SpvId lhs, Operator op, SpvId rhs,
+ OutputStream& out);
+
+ SpvId writeArrayComparison(const Type& structType, SpvId lhs, Operator op, SpvId rhs,
+ OutputStream& out);
+
+ // Used by writeStructComparison and writeArrayComparison to logically combine field-by-field
+ // comparisons into an overall comparison result.
+ // - `a.x == b.x` merged with `a.y == b.y` generates `(a.x == b.x) && (a.y == b.y)`
+ // - `a.x != b.x` merged with `a.y != b.y` generates `(a.x != b.x) || (a.y != b.y)`
+ SpvId mergeComparisons(SpvId comparison, SpvId allComparisons, Operator op, OutputStream& out);
+
+ SpvId writeComponentwiseMatrixUnary(const Type& operandType,
+ SpvId operand,
+ SpvOp_ op,
+ OutputStream& out);
+
+ SpvId writeComponentwiseMatrixBinary(const Type& operandType, SpvId lhs, SpvId rhs,
+ SpvOp_ op, OutputStream& out);
+
+ SpvId writeBinaryOperation(const Type& resultType, const Type& operandType, SpvId lhs,
+ SpvId rhs, SpvOp_ ifFloat, SpvOp_ ifInt, SpvOp_ ifUInt,
+ SpvOp_ ifBool, OutputStream& out);
+
+ SpvId writeReciprocal(const Type& type, SpvId value, OutputStream& out);
+
+ SpvId writeBinaryExpression(const Type& leftType, SpvId lhs, Operator op,
+ const Type& rightType, SpvId rhs, const Type& resultType,
+ OutputStream& out);
+
+ SpvId writeBinaryExpression(const BinaryExpression& b, OutputStream& out);
+
+ SpvId writeTernaryExpression(const TernaryExpression& t, OutputStream& out);
+
+ SpvId writeIndexExpression(const IndexExpression& expr, OutputStream& out);
+
+ SpvId writeLogicalAnd(const Expression& left, const Expression& right, OutputStream& out);
+
+ SpvId writeLogicalOr(const Expression& left, const Expression& right, OutputStream& out);
+
+ SpvId writePrefixExpression(const PrefixExpression& p, OutputStream& out);
+
+ SpvId writePostfixExpression(const PostfixExpression& p, OutputStream& out);
+
+ SpvId writeLiteral(const Literal& f);
+
+ SpvId writeLiteral(double value, const Type& type);
+
+ void writeStatement(const Statement& s, OutputStream& out);
+
+ void writeBlock(const Block& b, OutputStream& out);
+
+ void writeIfStatement(const IfStatement& stmt, OutputStream& out);
+
+ void writeForStatement(const ForStatement& f, OutputStream& out);
+
+ void writeDoStatement(const DoStatement& d, OutputStream& out);
+
+ void writeSwitchStatement(const SwitchStatement& s, OutputStream& out);
+
+ void writeReturnStatement(const ReturnStatement& r, OutputStream& out);
+
+ void writeCapabilities(OutputStream& out);
+
+ void writeInstructions(const Program& program, OutputStream& out);
+
+ void writeOpCode(SpvOp_ opCode, int length, OutputStream& out);
+
+ void writeWord(int32_t word, OutputStream& out);
+
+ void writeString(std::string_view s, OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, std::string_view string, OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, std::string_view string,
+ OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, std::string_view string,
+ OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, int32_t word3,
+ OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, int32_t word3, int32_t word4,
+ OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, int32_t word3, int32_t word4,
+ int32_t word5, OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, int32_t word3, int32_t word4,
+ int32_t word5, int32_t word6, OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, int32_t word3, int32_t word4,
+ int32_t word5, int32_t word6, int32_t word7, OutputStream& out);
+
+ void writeInstruction(SpvOp_ opCode, int32_t word1, int32_t word2, int32_t word3, int32_t word4,
+ int32_t word5, int32_t word6, int32_t word7, int32_t word8,
+ OutputStream& out);
+
+ // This form of writeInstruction can deduplicate redundant ops.
+ struct Word;
+ // 8 Words is enough for nearly all instructions (except variable-length instructions like
+ // OpAccessChain or OpConstantComposite).
+ using Words = SkSTArray<8, Word, true>;
+ SpvId writeInstruction(SpvOp_ opCode, const SkTArray<Word, true>& words, OutputStream& out);
+
+ struct Instruction {
+ SpvId fOp;
+ int32_t fResultKind;
+ SkSTArray<8, int32_t> fWords;
+
+ bool operator==(const Instruction& that) const;
+ struct Hash;
+ };
+
+ static Instruction BuildInstructionKey(SpvOp_ opCode, const SkTArray<Word, true>& words);
+
+ // The writeOpXxxxx calls will simplify and deduplicate ops where possible.
+ SpvId writeOpConstantTrue(const Type& type);
+ SpvId writeOpConstantFalse(const Type& type);
+ SpvId writeOpConstant(const Type& type, int32_t valueBits);
+ SpvId writeOpConstantComposite(const Type& type, const SkTArray<SpvId>& values);
+ SpvId writeOpCompositeConstruct(const Type& type, const SkTArray<SpvId>&, OutputStream& out);
+ SpvId writeOpCompositeExtract(const Type& type, SpvId base, int component, OutputStream& out);
+ SpvId writeOpCompositeExtract(const Type& type, SpvId base, int componentA, int componentB,
+ OutputStream& out);
+ SpvId writeOpLoad(SpvId type, Precision precision, SpvId pointer, OutputStream& out);
+ void writeOpStore(SpvStorageClass_ storageClass, SpvId pointer, SpvId value, OutputStream& out);
+
+ // Converts the provided SpvId(s) into an array of scalar OpConstants, if it can be done.
+ bool toConstants(SpvId value, SkTArray<SpvId>* constants);
+ bool toConstants(SkSpan<const SpvId> values, SkTArray<SpvId>* constants);
+
+ // Extracts the requested component SpvId from a composite instruction, if it can be done.
+ Instruction* resultTypeForInstruction(const Instruction& instr);
+ int numComponentsForVecInstruction(const Instruction& instr);
+ SpvId toComponent(SpvId id, int component);
+
+ struct ConditionalOpCounts {
+ int numReachableOps;
+ int numStoreOps;
+ };
+ ConditionalOpCounts getConditionalOpCounts();
+ void pruneConditionalOps(ConditionalOpCounts ops);
+
+ enum StraightLineLabelType {
+ // Use "BranchlessBlock" for blocks which are never explicitly branched-to at all. This
+ // happens at the start of a function, or when we find unreachable code.
+ kBranchlessBlock,
+
+ // Use "BranchIsOnPreviousLine" when writing a label that comes immediately after its
+ // associated branch. Example usage:
+ // - SPIR-V does not implicitly fall through from one block to the next, so you may need to
+ // use an OpBranch to explicitly jump to the next block, even when they are adjacent in
+ // the code.
+ // - The block immediately following an OpBranchConditional or OpSwitch.
+ kBranchIsOnPreviousLine,
+ };
+
+ enum BranchingLabelType {
+ // Use "BranchIsAbove" for labels which are referenced by OpBranch or OpBranchConditional
+ // ops that are above the label in the code--i.e., the branch skips forward in the code.
+ kBranchIsAbove,
+
+ // Use "BranchIsBelow" for labels which are referenced by OpBranch or OpBranchConditional
+ // ops below the label in the code--i.e., the branch jumps backward in the code.
+ kBranchIsBelow,
+
+ // Use "BranchesOnBothSides" for labels which have branches coming from both directions.
+ kBranchesOnBothSides,
+ };
+ void writeLabel(SpvId label, StraightLineLabelType type, OutputStream& out);
+ void writeLabel(SpvId label, BranchingLabelType type, ConditionalOpCounts ops,
+ OutputStream& out);
+
+ bool isDead(const Variable& var) const;
+
+ MemoryLayout memoryLayoutForStorageClass(SpvStorageClass_ storageClass);
+ MemoryLayout memoryLayoutForVariable(const Variable&) const;
+
+ struct EntrypointAdapter {
+ std::unique_ptr<FunctionDefinition> entrypointDef;
+ std::unique_ptr<FunctionDeclaration> entrypointDecl;
+ Layout fLayout;
+ Modifiers fModifiers;
+ };
+
+ EntrypointAdapter writeEntrypointAdapter(const FunctionDeclaration& main);
+
+ struct UniformBuffer {
+ std::unique_ptr<InterfaceBlock> fInterfaceBlock;
+ std::unique_ptr<Variable> fInnerVariable;
+ std::unique_ptr<Type> fStruct;
+ };
+
+ void writeUniformBuffer(std::shared_ptr<SymbolTable> topLevelSymbolTable);
+
+ void addRTFlipUniform(Position pos);
+
+ std::tuple<const Variable*, const Variable*> synthesizeTextureAndSampler(
+ const Variable& combinedSampler);
+
+ const MemoryLayout fDefaultLayout;
+
+ uint64_t fCapabilities;
+ SpvId fIdCount;
+ SpvId fGLSLExtendedInstructions;
+ struct Intrinsic {
+ IntrinsicOpcodeKind opKind;
+ int32_t floatOp;
+ int32_t signedOp;
+ int32_t unsignedOp;
+ int32_t boolOp;
+ };
+ Intrinsic getIntrinsic(IntrinsicKind) const;
+ SkTHashMap<const FunctionDeclaration*, SpvId> fFunctionMap;
+ SkTHashMap<const Variable*, SpvId> fVariableMap;
+ SkTHashMap<const Type*, SpvId> fStructMap;
+ StringStream fGlobalInitializersBuffer;
+ StringStream fConstantBuffer;
+ StringStream fVariableBuffer;
+ StringStream fNameBuffer;
+ StringStream fDecorationBuffer;
+
+ // Mapping from combined sampler declarations to synthesized texture/sampler variables.
+ // This is only used if the SPIRVDawnCompatMode setting is enabled.
+ // TODO(skia:14023): Remove when WGSL codegen is complete
+ struct SynthesizedTextureSamplerPair {
+ // The names of the synthesized variables. The Variable objects themselves store string
+ // views referencing these strings. It is important for the std::string instances to have a
+ // fixed memory location after the string views get created, which is why
+ // `fSynthesizedSamplerMap` stores unique_ptr instead of values.
+ std::string fTextureName;
+ std::string fSamplerName;
+ std::unique_ptr<Variable> fTexture;
+ std::unique_ptr<Variable> fSampler;
+ };
+ SkTHashMap<const Variable*, std::unique_ptr<SynthesizedTextureSamplerPair>>
+ fSynthesizedSamplerMap;
+
+ // These caches map SpvIds to Instructions, and vice-versa. This enables us to deduplicate code
+ // (by detecting an Instruction we've already issued and reusing the SpvId), and to introspect
+ // and simplify code we've already emitted (by taking a SpvId from an Instruction and following
+ // it back to its source).
+ SkTHashMap<Instruction, SpvId, Instruction::Hash> fOpCache; // maps instruction -> SpvId
+ SkTHashMap<SpvId, Instruction> fSpvIdCache; // maps SpvId -> instruction
+ SkTHashMap<SpvId, SpvId> fStoreCache; // maps ptr SpvId -> value SpvId
+
+ // "Reachable" ops are instructions which can safely be accessed from the current block.
+ // For instance, if our SPIR-V contains `%3 = OpFAdd %1 %2`, we would be able to access and
+ // reuse that computation on following lines. However, if that Add operation occurred inside an
+ // `if` block, then its SpvId becomes inaccessible once we complete the if statement (since
+ // depending on the if condition, we may or may not have actually done that computation). The
+ // same logic applies to other control-flow blocks as well. Once an instruction becomes
+ // unreachable, we remove it from both op-caches.
+ SkTArray<SpvId> fReachableOps;
+
+ // The "store-ops" list contains a running list of all the pointers in the store cache. If a
+ // store occurs inside of a conditional block, once that block exits, we no longer know what is
+ // stored in that particular SpvId. At that point, we must remove any associated entry from the
+ // store cache.
+ SkTArray<SpvId> fStoreOps;
+
+ // label of the current block, or 0 if we are not in a block
+ SpvId fCurrentBlock;
+ SkTArray<SpvId> fBreakTarget;
+ SkTArray<SpvId> fContinueTarget;
+ bool fWroteRTFlip = false;
+ // holds variables synthesized during output, for lifetime purposes
+ SymbolTable fSynthetics;
+ // Holds a list of uniforms that were declared as globals at the top-level instead of in an
+ // interface block.
+ UniformBuffer fUniformBuffer;
+ std::vector<const VarDeclaration*> fTopLevelUniforms;
+ SkTHashMap<const Variable*, int> fTopLevelUniformMap; // <var, UniformBuffer field index>
+ SkTHashSet<const Variable*> fSPIRVBonusVariables;
+ SpvId fUniformBufferId = NA;
+
+ friend class PointerLValue;
+ friend class SwizzleLValue;
+
+ using INHERITED = CodeGenerator;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/codegen/SkSLSPIRVtoHLSL.cpp b/gfx/skia/skia/src/sksl/codegen/SkSLSPIRVtoHLSL.cpp
new file mode 100644
index 0000000000..98bb969c5b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/codegen/SkSLSPIRVtoHLSL.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/codegen/SkSLSPIRVtoHLSL.h"
+
+#if defined(SK_ENABLE_SPIRV_CROSS)
+
+#include <spirv_hlsl.hpp>
+
+/*
+ * This translation unit serves as a bridge between Skia/SkSL and SPIRV-Cross.
+ * Each library is built with a separate copy of spirv.h (or spirv.hpp), so we
+ * avoid conflicts by never including both in the same cpp.
+ */
+
+namespace SkSL {
+
+bool SPIRVtoHLSL(const std::string& spirv, std::string* hlsl) {
+ spirv_cross::CompilerHLSL hlslCompiler((const uint32_t*)spirv.c_str(),
+ spirv.size() / sizeof(uint32_t));
+
+ spirv_cross::CompilerGLSL::Options optionsGLSL;
+ // Force all uninitialized variables to be 0, otherwise they will fail to compile
+ // by FXC.
+ optionsGLSL.force_zero_initialized_variables = true;
+
+ spirv_cross::CompilerHLSL::Options optionsHLSL;
+ optionsHLSL.shader_model = 51;
+ // PointCoord and PointSize are not supported in HLSL
+ optionsHLSL.point_coord_compat = true;
+ optionsHLSL.point_size_compat = true;
+
+ hlslCompiler.set_common_options(optionsGLSL);
+ hlslCompiler.set_hlsl_options(optionsHLSL);
+ hlsl->assign(hlslCompiler.compile());
+ return true;
+}
+
+}
+
+#else
+
+namespace SkSL { bool SPIRVtoHLSL(const std::string&, std::string*) { return false; } }
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/codegen/SkSLSPIRVtoHLSL.h b/gfx/skia/skia/src/sksl/codegen/SkSLSPIRVtoHLSL.h
new file mode 100644
index 0000000000..5207546a67
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/codegen/SkSLSPIRVtoHLSL.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_SPIRVTOHLSL
+#define SKSL_SPIRVTOHLSL
+
+#include <string>
+
+namespace SkSL {
+
+bool SPIRVtoHLSL(const std::string& spirv, std::string* hlsl);
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/codegen/SkSLVMCodeGenerator.cpp b/gfx/skia/skia/src/sksl/codegen/SkSLVMCodeGenerator.cpp
new file mode 100644
index 0000000000..33eab93b9a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/codegen/SkSLVMCodeGenerator.cpp
@@ -0,0 +1,2302 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/codegen/SkSLVMCodeGenerator.h"
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLLayout.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLStatement.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTPin.h"
+#include "include/sksl/SkSLOperator.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/base/SkStringView.h"
+#include "src/core/SkTHash.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLIntrinsicList.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLChildCall.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLConstructorArrayCast.h"
+#include "src/sksl/ir/SkSLConstructorDiagonalMatrix.h"
+#include "src/sksl/ir/SkSLConstructorMatrixResize.h"
+#include "src/sksl/ir/SkSLConstructorSplat.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLExpressionStatement.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLForStatement.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLIfStatement.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLLiteral.h"
+#include "src/sksl/ir/SkSLPostfixExpression.h"
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/ir/SkSLReturnStatement.h"
+#include "src/sksl/ir/SkSLSwitchCase.h"
+#include "src/sksl/ir/SkSLSwitchStatement.h"
+#include "src/sksl/ir/SkSLSwizzle.h"
+#include "src/sksl/ir/SkSLTernaryExpression.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+#include "src/sksl/tracing/SkSLDebugInfo.h"
+#include "src/sksl/tracing/SkSLTraceHook.h"
+#include "src/sksl/tracing/SkVMDebugTrace.h"
+
+#include <algorithm>
+#include <functional>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <string_view>
+#include <utility>
+#include <vector>
+
+namespace {
+ // sksl allows the optimizations of fast_mul(), so we want to use that most of the time.
+ // This little sneaky snippet of code lets us use ** as a fast multiply infix operator.
+ struct FastF32 { skvm::F32 val; };
+ static FastF32 operator*(skvm::F32 y) { return {y}; }
+ static skvm::F32 operator*(skvm::F32 x, FastF32 y) { return fast_mul(x, y.val); }
+ static skvm::F32 operator*(float x, FastF32 y) { return fast_mul(x, y.val); }
+}
+
+namespace SkSL {
+
+namespace {
+
+// Holds scalars, vectors, or matrices
+struct Value {
+ Value() = default;
+ explicit Value(size_t slots) {
+ fVals.resize(slots);
+ }
+ Value(skvm::F32 x) : fVals({ x.id }) {}
+ Value(skvm::I32 x) : fVals({ x.id }) {}
+
+ explicit operator bool() const { return !fVals.empty(); }
+
+ size_t slots() const { return fVals.size(); }
+
+ struct ValRef {
+ ValRef(skvm::Val& val) : fVal(val) {}
+
+ ValRef& operator=(ValRef v) { fVal = v.fVal; return *this; }
+ ValRef& operator=(skvm::Val v) { fVal = v; return *this; }
+ ValRef& operator=(skvm::F32 v) { fVal = v.id; return *this; }
+ ValRef& operator=(skvm::I32 v) { fVal = v.id; return *this; }
+
+ operator skvm::Val() { return fVal; }
+
+ skvm::Val& fVal;
+ };
+
+ ValRef operator[](int i) {
+ // These redundant asserts work around what we think is a codegen bug in GCC 8.x for
+ // 32-bit x86 Debug builds.
+ SkASSERT(i < fVals.size());
+ return fVals[i];
+ }
+ skvm::Val operator[](int i) const {
+ // These redundant asserts work around what we think is a codegen bug in GCC 8.x for
+ // 32-bit x86 Debug builds.
+ SkASSERT(i < fVals.size());
+ return fVals[i];
+ }
+
+ SkSpan<skvm::Val> asSpan() { return SkSpan(fVals); }
+
+private:
+ SkSTArray<4, skvm::Val, true> fVals;
+};
+
+} // namespace
+
+class SkVMGenerator {
+public:
+ SkVMGenerator(const Program& program,
+ skvm::Builder* builder,
+ SkVMDebugTrace* debugTrace,
+ SkVMCallbacks* callbacks);
+
+ void writeProgram(SkSpan<skvm::Val> uniforms,
+ skvm::Coord device,
+ const FunctionDefinition& function,
+ SkSpan<skvm::Val> arguments,
+ SkSpan<skvm::Val> outReturn);
+
+private:
+ /**
+ * In SkSL, a Variable represents a named, typed value (along with qualifiers, etc).
+ * Every Variable is mapped to one (or several, contiguous) indices into our vector of
+ * skvm::Val. Those skvm::Val entries hold the current actual value of that variable.
+ *
+ * NOTE: Conceptually, each Variable is just mapped to a Value. We could implement it that way,
+ * (and eliminate the indirection), but it would add overhead for each Variable,
+ * and add additional (different) bookkeeping for things like lvalue-swizzles.
+ *
+ * Any time a variable appears in an expression, that's a VariableReference, which is a kind of
+ * Expression. Evaluating that VariableReference (or any other Expression) produces a Value,
+ * which is a set of skvm::Val. (This allows an Expression to produce a vector or matrix, in
+ * addition to a scalar).
+ *
+ * For a VariableReference, producing a Value is straightforward - we get the slot of the
+ * Variable (from fSlotMap), use that to look up the current skvm::Vals holding the variable's
+ * contents, and construct a Value with those ids.
+ */
+
+ /** Creates a Value from a collection of adjacent slots. */
+ Value getSlotValue(size_t slot, size_t nslots);
+
+ /**
+ * Returns the slot index of this function inside the FunctionDebugInfo array in SkVMDebugTrace.
+ * The FunctionDebugInfo slot will be created if it doesn't already exist.
+ */
+ int getDebugFunctionInfo(const FunctionDeclaration& decl);
+
+ /** Used by `createSlot` to add this variable to SlotDebugInfo inside SkVMDebugTrace. */
+ void addDebugSlotInfo(const std::string& varName, const Type& type, int line,
+ int fnReturnValue);
+
+ void addDebugSlotInfoForGroup(const std::string& varName, const Type& type, int line,
+ int* groupIndex, int fnReturnValue);
+
+ /** Used by `getSlot` to create a new slot on its first access. */
+ size_t createSlot(const std::string& name, const Type& type, int line, int fnReturnValue);
+
+ /**
+ * Returns the slot holding v's Val(s). Allocates storage if this is first time 'v' is
+ * referenced. Compound variables (e.g. vectors) will consume more than one slot, with
+ * getSlot returning the start of the contiguous chunk of slots.
+ */
+ size_t getSlot(const Variable& v);
+
+ /**
+ * Returns the slot holding fn's return value. Each call site is given a distinct slot, since
+ * multiple calls to the same function can occur in a single statement. This is generally the
+ * FunctionCall or ChildCall node, but main() doesn't have one of these so it uses the
+ * FunctionDefinition. Allocates storage if this is first time accessing the slot.
+ */
+ size_t getFunctionSlot(const IRNode& callSite, const FunctionDefinition& fn);
+
+ /**
+ * Writes a value to a slot previously created by getSlot.
+ */
+ void writeToSlot(int slot, skvm::Val value);
+
+ /**
+ * Returns the line number corresponding to a position.
+ */
+ int getLine(Position pos);
+
+ /**
+ * Emits an trace_line opcode. writeStatement does this, and statements that alter control flow
+ * may need to explicitly add additional traces.
+ */
+ void emitTraceLine(int line);
+
+ /** Emits an trace_scope opcode, which alters the SkSL variable-scope depth. */
+ void emitTraceScope(skvm::I32 executionMask, int delta);
+
+ /** Initializes uniforms and global variables at the start of main(). */
+ void setupGlobals(SkSpan<skvm::Val> uniforms, skvm::Coord device);
+
+ /** Emits an SkSL function. Returns the slot index of the SkSL function's return value. */
+ size_t writeFunction(const IRNode& caller,
+ const FunctionDefinition& function,
+ SkSpan<skvm::Val> arguments);
+
+ skvm::F32 f32(skvm::Val id) { SkASSERT(id != skvm::NA); return {fBuilder, id}; }
+ skvm::I32 i32(skvm::Val id) { SkASSERT(id != skvm::NA); return {fBuilder, id}; }
+
+ // Shorthand for scalars
+ skvm::F32 f32(const Value& v) { SkASSERT(v.slots() == 1); return f32(v[0]); }
+ skvm::I32 i32(const Value& v) { SkASSERT(v.slots() == 1); return i32(v[0]); }
+
+ template <typename Fn>
+ Value unary(const Value& v, Fn&& fn) {
+ Value result(v.slots());
+ for (size_t i = 0; i < v.slots(); ++i) {
+ result[i] = fn({fBuilder, v[i]});
+ }
+ return result;
+ }
+
+ skvm::I32 mask() {
+ // Mask off execution if we have encountered `break` or `continue` on this path.
+ skvm::I32 result = fConditionMask & fLoopMask;
+ if (!fFunctionStack.empty()) {
+ // As we encounter (possibly conditional) return statements, fReturned is updated to
+ // store the lanes that have already returned. For the remainder of the current
+ // function, those lanes should be disabled.
+ result = result & ~currentFunction().fReturned;
+ }
+ return result;
+ }
+
+ size_t indexSlotOffset(const IndexExpression& expr);
+
+ Value writeExpression(const Expression& expr);
+ Value writeBinaryExpression(const BinaryExpression& b);
+ Value writeAggregationConstructor(const AnyConstructor& c);
+ Value writeChildCall(const ChildCall& c);
+ Value writeConstructorDiagonalMatrix(const ConstructorDiagonalMatrix& c);
+ Value writeConstructorMatrixResize(const ConstructorMatrixResize& c);
+ Value writeConstructorCast(const AnyConstructor& c);
+ Value writeConstructorSplat(const ConstructorSplat& c);
+ Value writeFunctionCall(const FunctionCall& c);
+ Value writeFieldAccess(const FieldAccess& expr);
+ Value writeLiteral(const Literal& l);
+ Value writeIndexExpression(const IndexExpression& expr);
+ Value writeIntrinsicCall(const FunctionCall& c);
+ Value writePostfixExpression(const PostfixExpression& p);
+ Value writePrefixExpression(const PrefixExpression& p);
+ Value writeSwizzle(const Swizzle& swizzle);
+ Value writeTernaryExpression(const TernaryExpression& t);
+ Value writeVariableExpression(const VariableReference& expr);
+
+ Value writeTypeConversion(const Value& src, Type::NumberKind srcKind, Type::NumberKind dstKind);
+
+ void writeStatement(const Statement& s);
+ void writeBlock(const Block& b);
+ void writeBreakStatement();
+ void writeContinueStatement();
+ void writeForStatement(const ForStatement& f);
+ void writeIfStatement(const IfStatement& stmt);
+ void writeReturnStatement(const ReturnStatement& r);
+ void writeSwitchStatement(const SwitchStatement& s);
+ void writeVarDeclaration(const VarDeclaration& decl);
+
+ Value writeStore(const Expression& lhs, const Value& rhs);
+ skvm::Val writeConditionalStore(skvm::Val lhs, skvm::Val rhs, skvm::I32 mask);
+
+ Value writeMatrixInverse2x2(const Value& m);
+ Value writeMatrixInverse3x3(const Value& m);
+ Value writeMatrixInverse4x4(const Value& m);
+
+ void recursiveBinaryCompare(const Value& lVal, const Type& lType,
+ const Value& rVal, const Type& rType,
+ size_t* slotOffset, Value* result,
+ const std::function <Value(skvm::F32 x, skvm::F32 y)>& float_comp,
+ const std::function <Value(skvm::I32 x, skvm::I32 y)>& int_comp);
+
+ void determineLineOffsets();
+
+ //
+ // Global state for the lifetime of the generator:
+ //
+ const Program& fProgram;
+ skvm::Builder* fBuilder;
+ SkVMDebugTrace* fDebugTrace;
+ int fTraceHookID = -1;
+ SkVMCallbacks* fCallbacks;
+ // contains the position of each newline in the source, plus a zero at the beginning and the
+ // total source length at the end as sentinels
+ std::vector<int> fLineOffsets;
+
+ struct Slot {
+ skvm::Val val;
+ bool writtenTo = false;
+ };
+ std::vector<Slot> fSlots;
+
+ // [Variable/Function, first slot in fSlots]
+ SkTHashMap<const IRNode*, size_t> fSlotMap;
+
+ // Debug trace mask (set to true when fTraceCoord matches device coordinates)
+ skvm::I32 fTraceMask;
+
+ // Conditional execution mask (managed by ScopedCondition, and tied to control-flow scopes)
+ skvm::I32 fConditionMask;
+
+ // Similar: loop execution masks. Each loop starts with all lanes active (fLoopMask).
+ // 'break' disables a lane in fLoopMask until the loop finishes
+ // 'continue' disables a lane in fLoopMask, and sets fContinueMask to be re-enabled on the next
+ // iteration
+ skvm::I32 fLoopMask;
+ skvm::I32 fContinueMask;
+
+ // `fInsideCompoundStatement` will be nonzero if we are currently writing statements inside of a
+ // compound-statement Block. (Conceptually those statements should all count as one.)
+ int fInsideCompoundStatement = 0;
+
+ //
+ // State that's local to the generation of a single function:
+ //
+ struct Function {
+ size_t fReturnSlot;
+ skvm::I32 fReturned;
+ };
+ std::vector<Function> fFunctionStack;
+ Function& currentFunction() { return fFunctionStack.back(); }
+
+ class ScopedCondition {
+ public:
+ ScopedCondition(SkVMGenerator* generator, skvm::I32 mask)
+ : fGenerator(generator), fOldConditionMask(fGenerator->fConditionMask) {
+ fGenerator->fConditionMask &= mask;
+ }
+
+ ~ScopedCondition() { fGenerator->fConditionMask = fOldConditionMask; }
+
+ private:
+ SkVMGenerator* fGenerator;
+ skvm::I32 fOldConditionMask;
+ };
+};
+
+static Type::NumberKind base_number_kind(const Type& type) {
+ if (type.typeKind() == Type::TypeKind::kMatrix || type.typeKind() == Type::TypeKind::kVector) {
+ return base_number_kind(type.componentType());
+ }
+ return type.numberKind();
+}
+
+static inline bool is_uniform(const SkSL::Variable& var) {
+ return var.modifiers().fFlags & Modifiers::kUniform_Flag;
+}
+
+SkVMGenerator::SkVMGenerator(const Program& program,
+ skvm::Builder* builder,
+ SkVMDebugTrace* debugTrace,
+ SkVMCallbacks* callbacks)
+ : fProgram(program)
+ , fBuilder(builder)
+ , fDebugTrace(debugTrace)
+ , fCallbacks(callbacks) {}
+
+void SkVMGenerator::writeProgram(SkSpan<skvm::Val> uniforms,
+ skvm::Coord device,
+ const FunctionDefinition& function,
+ SkSpan<skvm::Val> arguments,
+ SkSpan<skvm::Val> outReturn) {
+ this->determineLineOffsets();
+ fConditionMask = fLoopMask = fBuilder->splat(0xffff'ffff);
+
+ this->setupGlobals(uniforms, device);
+ size_t returnSlot = this->writeFunction(function, function, arguments);
+
+ // Copy the value from the return slot into outReturn.
+ SkASSERT(function.declaration().returnType().slotCount() == outReturn.size());
+ for (size_t i = 0; i < outReturn.size(); ++i) {
+ outReturn[i] = fSlots[returnSlot + i].val;
+ }
+}
+
+void SkVMGenerator::determineLineOffsets() {
+ SkASSERT(fLineOffsets.empty());
+ fLineOffsets.push_back(0);
+ for (size_t i = 0; i < fProgram.fSource->length(); ++i) {
+ if ((*fProgram.fSource)[i] == '\n') {
+ fLineOffsets.push_back(i);
+ }
+ }
+ fLineOffsets.push_back(fProgram.fSource->length());
+}
+
+void SkVMGenerator::setupGlobals(SkSpan<skvm::Val> uniforms, skvm::Coord device) {
+ if (fDebugTrace) {
+ // Copy the program source into the debug info so that it will be written in the trace file.
+ fDebugTrace->setSource(*fProgram.fSource);
+
+ // Create a trace hook and attach it to the builder.
+ fDebugTrace->fTraceHook = SkSL::Tracer::Make(&fDebugTrace->fTraceInfo);
+ fTraceHookID = fBuilder->attachTraceHook(fDebugTrace->fTraceHook.get());
+
+ // The SkVM blitter generates centered pixel coordinates. (0.5, 1.5, 2.5, 3.5, etc.)
+ // Add 0.5 to the requested trace coordinate to match this.
+ skvm::Coord traceCoord = {to_F32(fBuilder->splat(fDebugTrace->fTraceCoord.fX)) + 0.5f,
+ to_F32(fBuilder->splat(fDebugTrace->fTraceCoord.fY)) + 0.5f};
+
+ // If we are debugging, we need to create a trace mask. This will be true when the current
+ // device coordinates match the requested trace coordinates. We calculate each mask
+ // individually to guarantee consistent order-of-evaluation.
+ skvm::I32 xMask = (device.x == traceCoord.x),
+ yMask = (device.y == traceCoord.y);
+ fTraceMask = xMask & yMask;
+ }
+
+ // Add storage for each global variable (including uniforms) to fSlots, and entries in
+ // fSlotMap to remember where every variable is stored.
+ const skvm::Val* uniformIter = uniforms.begin();
+ size_t fpCount = 0;
+ for (const ProgramElement* e : fProgram.elements()) {
+ if (e->is<GlobalVarDeclaration>()) {
+ const GlobalVarDeclaration& gvd = e->as<GlobalVarDeclaration>();
+ const VarDeclaration& decl = gvd.varDeclaration();
+ const Variable* var = decl.var();
+ SkASSERT(!fSlotMap.find(var));
+
+ // For most variables, fSlotMap stores an index into fSlots, but for children,
+ // fSlotMap stores the index to pass to fSample(Shader|ColorFilter|Blender)
+ if (var->type().isEffectChild()) {
+ fSlotMap.set(var, fpCount++);
+ continue;
+ }
+
+ // Opaque types include child processors and GL objects (samplers, textures, etc).
+ // Of those, only child processors are legal variables.
+ SkASSERT(!var->type().isVoid());
+ SkASSERT(!var->type().isOpaque());
+
+ // getSlot() allocates space for the variable's value in fSlots, initializes it to zero,
+ // and populates fSlotMap.
+ size_t slot = this->getSlot(*var),
+ nslots = var->type().slotCount();
+
+ // builtin variables are system-defined, with special semantics. The only builtin
+ // variable exposed to runtime effects is sk_FragCoord.
+ if (int builtin = var->modifiers().fLayout.fBuiltin; builtin >= 0) {
+ switch (builtin) {
+ case SK_FRAGCOORD_BUILTIN:
+ SkASSERT(nslots == 4);
+ this->writeToSlot(slot + 0, device.x.id);
+ this->writeToSlot(slot + 1, device.y.id);
+ this->writeToSlot(slot + 2, fBuilder->splat(0.0f).id);
+ this->writeToSlot(slot + 3, fBuilder->splat(1.0f).id);
+ break;
+ default:
+ SkDEBUGFAILF("Unsupported builtin %d", builtin);
+ }
+ continue;
+ }
+
+ // For uniforms, copy the supplied IDs over
+ if (is_uniform(*var)) {
+ SkASSERT(uniformIter + nslots <= uniforms.end());
+ for (size_t i = 0; i < nslots; ++i) {
+ this->writeToSlot(slot + i, uniformIter[i]);
+ }
+ uniformIter += nslots;
+ continue;
+ }
+
+ // For other globals, populate with the initializer expression (if there is one)
+ if (decl.value()) {
+ Value val = this->writeExpression(*decl.value());
+ for (size_t i = 0; i < nslots; ++i) {
+ this->writeToSlot(slot + i, val[i]);
+ }
+ }
+ }
+ }
+ SkASSERT(uniformIter == uniforms.end());
+}
+
+Value SkVMGenerator::getSlotValue(size_t slot, size_t nslots) {
+ Value val(nslots);
+ for (size_t i = 0; i < nslots; ++i) {
+ val[i] = fSlots[slot + i].val;
+ }
+ return val;
+}
+
+int SkVMGenerator::getDebugFunctionInfo(const FunctionDeclaration& decl) {
+ SkASSERT(fDebugTrace);
+
+ std::string name = decl.description();
+
+ // When generating the debug trace, we typically mark every function as `noinline`. This makes
+ // the trace more confusing, since this isn't in the source program, so remove it.
+ static constexpr std::string_view kNoInline = "noinline ";
+ if (skstd::starts_with(name, kNoInline)) {
+ name = name.substr(kNoInline.size());
+ }
+
+ // Look for a matching FunctionDebugInfo slot.
+ for (size_t index = 0; index < fDebugTrace->fFuncInfo.size(); ++index) {
+ if (fDebugTrace->fFuncInfo[index].name == name) {
+ return index;
+ }
+ }
+
+ // We've never called this function before; create a new slot to hold its information.
+ int slot = (int)fDebugTrace->fFuncInfo.size();
+ fDebugTrace->fFuncInfo.push_back(FunctionDebugInfo{std::move(name)});
+ return slot;
+}
+
+size_t SkVMGenerator::writeFunction(const IRNode& caller,
+ const FunctionDefinition& function,
+ SkSpan<skvm::Val> arguments) {
+ const FunctionDeclaration& decl = function.declaration();
+
+ int funcIndex = -1;
+ if (fDebugTrace) {
+ funcIndex = this->getDebugFunctionInfo(decl);
+ fBuilder->trace_enter(fTraceHookID, this->mask(), fTraceMask, funcIndex);
+ }
+
+ size_t returnSlot = this->getFunctionSlot(caller, function);
+ fFunctionStack.push_back({/*fReturnSlot=*/returnSlot, /*fReturned=*/fBuilder->splat(0)});
+
+ // For all parameters, copy incoming argument IDs to our vector of (all) variable IDs
+ size_t argIdx = 0;
+ for (const Variable* p : decl.parameters()) {
+ size_t paramSlot = this->getSlot(*p),
+ nslots = p->type().slotCount();
+
+ for (size_t i = 0; i < nslots; ++i) {
+ fSlots[paramSlot + i].writtenTo = false;
+ this->writeToSlot(paramSlot + i, arguments[argIdx + i]);
+ }
+ argIdx += nslots;
+ }
+ SkASSERT(argIdx == arguments.size());
+
+ this->writeBlock(function.body()->as<Block>());
+
+ // Copy 'out' and 'inout' parameters back to their caller-supplied argument storage
+ argIdx = 0;
+ for (const Variable* p : decl.parameters()) {
+ size_t nslots = p->type().slotCount();
+
+ if (p->modifiers().fFlags & Modifiers::kOut_Flag) {
+ size_t paramSlot = this->getSlot(*p);
+ for (size_t i = 0; i < nslots; ++i) {
+ arguments[argIdx + i] = fSlots[paramSlot + i].val;
+ }
+ }
+ argIdx += nslots;
+ }
+ SkASSERT(argIdx == arguments.size());
+
+ fFunctionStack.pop_back();
+
+ if (fDebugTrace) {
+ fBuilder->trace_exit(fTraceHookID, this->mask(), fTraceMask, funcIndex);
+ }
+
+ return returnSlot;
+}
+
+void SkVMGenerator::writeToSlot(int slot, skvm::Val value) {
+ if (fDebugTrace && (!fSlots[slot].writtenTo || fSlots[slot].val != value)) {
+ if (fProgram.fConfig->fSettings.fAllowTraceVarInSkVMDebugTrace) {
+ fBuilder->trace_var(fTraceHookID, this->mask(), fTraceMask, slot, i32(value));
+ }
+ fSlots[slot].writtenTo = true;
+ }
+
+ fSlots[slot].val = value;
+}
+
+void SkVMGenerator::addDebugSlotInfoForGroup(const std::string& varName, const Type& type, int line,
+ int* groupIndex, int fnReturnValue) {
+ SkASSERT(fDebugTrace);
+ switch (type.typeKind()) {
+ case Type::TypeKind::kArray: {
+ int nslots = type.columns();
+ const Type& elemType = type.componentType();
+ for (int slot = 0; slot < nslots; ++slot) {
+ this->addDebugSlotInfoForGroup(varName + "[" + std::to_string(slot) + "]", elemType,
+ line, groupIndex, fnReturnValue);
+ }
+ break;
+ }
+ case Type::TypeKind::kStruct: {
+ for (const Type::Field& field : type.fields()) {
+ this->addDebugSlotInfoForGroup(varName + "." + std::string(field.fName),
+ *field.fType, line, groupIndex, fnReturnValue);
+ }
+ break;
+ }
+ default:
+ SkASSERTF(0, "unsupported slot type %d", (int)type.typeKind());
+ [[fallthrough]];
+
+ case Type::TypeKind::kScalar:
+ case Type::TypeKind::kVector:
+ case Type::TypeKind::kMatrix: {
+ Type::NumberKind numberKind = type.componentType().numberKind();
+ int nslots = type.slotCount();
+
+ for (int slot = 0; slot < nslots; ++slot) {
+ SlotDebugInfo slotInfo;
+ slotInfo.name = varName;
+ slotInfo.columns = type.columns();
+ slotInfo.rows = type.rows();
+ slotInfo.componentIndex = slot;
+ slotInfo.groupIndex = (*groupIndex)++;
+ slotInfo.numberKind = numberKind;
+ slotInfo.line = line;
+ slotInfo.fnReturnValue = fnReturnValue;
+ fDebugTrace->fSlotInfo.push_back(std::move(slotInfo));
+ }
+ break;
+ }
+ }
+}
+
+void SkVMGenerator::addDebugSlotInfo(const std::string& varName, const Type& type, int line,
+ int fnReturnValue) {
+ int groupIndex = 0;
+ this->addDebugSlotInfoForGroup(varName, type, line, &groupIndex, fnReturnValue);
+ SkASSERT((size_t)groupIndex == type.slotCount());
+}
+
+size_t SkVMGenerator::createSlot(const std::string& name,
+ const Type& type,
+ int line,
+ int fnReturnValue) {
+ size_t slot = fSlots.size(),
+ nslots = type.slotCount();
+
+ if (nslots > 0) {
+ if (fDebugTrace) {
+ // Our debug slot-info table should have the same length as the actual slot table.
+ SkASSERT(fDebugTrace->fSlotInfo.size() == slot);
+
+ // Append slot names and types to our debug slot-info table.
+ fDebugTrace->fSlotInfo.reserve(slot + nslots);
+ this->addDebugSlotInfo(name, type, line, fnReturnValue);
+
+ // Confirm that we added the expected number of slots.
+ SkASSERT(fDebugTrace->fSlotInfo.size() == (slot + nslots));
+ }
+
+ // Create brand new slots initialized to zero.
+ skvm::Val initialValue = fBuilder->splat(0.0f).id;
+ fSlots.insert(fSlots.end(), nslots, Slot{initialValue});
+ }
+ return slot;
+}
+
+// TODO(skia:13058): remove this and track positions directly
+int SkVMGenerator::getLine(Position pos) {
+ if (pos.valid()) {
+ // Binary search within fLineOffets to find the line.
+ SkASSERT(fLineOffsets.size() >= 2);
+ SkASSERT(fLineOffsets[0] == 0);
+ SkASSERT(fLineOffsets.back() == (int)fProgram.fSource->length());
+ return std::distance(fLineOffsets.begin(), std::upper_bound(fLineOffsets.begin(),
+ fLineOffsets.end(), pos.startOffset()));
+ } else {
+ return -1;
+ }
+}
+
+size_t SkVMGenerator::getSlot(const Variable& v) {
+ size_t* entry = fSlotMap.find(&v);
+ if (entry != nullptr) {
+ return *entry;
+ }
+
+ size_t slot = this->createSlot(std::string(v.name()), v.type(), this->getLine(v.fPosition),
+ /*fnReturnValue=*/-1);
+ fSlotMap.set(&v, slot);
+ return slot;
+}
+
+size_t SkVMGenerator::getFunctionSlot(const IRNode& callSite, const FunctionDefinition& fn) {
+ size_t* entry = fSlotMap.find(&callSite);
+ if (entry != nullptr) {
+ return *entry;
+ }
+
+ const FunctionDeclaration& decl = fn.declaration();
+ size_t slot = this->createSlot("[" + std::string(decl.name()) + "].result",
+ decl.returnType(),
+ this->getLine(fn.fPosition),
+ /*fnReturnValue=*/1);
+ fSlotMap.set(&callSite, slot);
+ return slot;
+}
+
+void SkVMGenerator::recursiveBinaryCompare(
+ const Value& lVal,
+ const Type& lType,
+ const Value& rVal,
+ const Type& rType,
+ size_t* slotOffset,
+ Value* result,
+ const std::function<Value(skvm::F32 x, skvm::F32 y)>& float_comp,
+ const std::function<Value(skvm::I32 x, skvm::I32 y)>& int_comp) {
+ switch (lType.typeKind()) {
+ case Type::TypeKind::kStruct:
+ SkASSERT(rType.typeKind() == Type::TypeKind::kStruct);
+ // Go through all the fields
+ for (size_t f = 0; f < lType.fields().size(); ++f) {
+ const Type::Field& lField = lType.fields()[f];
+ const Type::Field& rField = rType.fields()[f];
+ this->recursiveBinaryCompare(lVal,
+ *lField.fType,
+ rVal,
+ *rField.fType,
+ slotOffset,
+ result,
+ float_comp,
+ int_comp);
+ }
+ break;
+
+ case Type::TypeKind::kArray:
+ case Type::TypeKind::kVector:
+ case Type::TypeKind::kMatrix:
+ SkASSERT(lType.typeKind() == rType.typeKind());
+ // Go through all the elements
+ for (int c = 0; c < lType.columns(); ++c) {
+ this->recursiveBinaryCompare(lVal,
+ lType.componentType(),
+ rVal,
+ rType.componentType(),
+ slotOffset,
+ result,
+ float_comp,
+ int_comp);
+ }
+ break;
+ default:
+ SkASSERT(lType.typeKind() == rType.typeKind() &&
+ lType.slotCount() == rType.slotCount());
+ Type::NumberKind nk = base_number_kind(lType);
+ auto L = lVal[*slotOffset];
+ auto R = rVal[*slotOffset];
+ (*result)[*slotOffset] =
+ i32(nk == Type::NumberKind::kFloat
+ ? float_comp(f32(L), f32(R))
+ : int_comp(i32(L), i32(R))).id;
+ *slotOffset += lType.slotCount();
+ break;
+ }
+}
+
+Value SkVMGenerator::writeBinaryExpression(const BinaryExpression& b) {
+ const Expression& left = *b.left();
+ const Expression& right = *b.right();
+ Operator op = b.getOperator();
+ if (op.kind() == Operator::Kind::EQ) {
+ return this->writeStore(left, this->writeExpression(right));
+ }
+
+ const Type& lType = left.type();
+ const Type& rType = right.type();
+ bool lVecOrMtx = (lType.isVector() || lType.isMatrix());
+ bool rVecOrMtx = (rType.isVector() || rType.isMatrix());
+ bool isAssignment = op.isAssignment();
+ if (isAssignment) {
+ op = op.removeAssignment();
+ }
+ Type::NumberKind nk = base_number_kind(lType);
+
+ // A few ops require special treatment:
+ switch (op.kind()) {
+ case Operator::Kind::LOGICALAND: {
+ SkASSERT(!isAssignment);
+ SkASSERT(nk == Type::NumberKind::kBoolean);
+ skvm::I32 lVal = i32(this->writeExpression(left));
+ ScopedCondition shortCircuit(this, lVal);
+ skvm::I32 rVal = i32(this->writeExpression(right));
+ return lVal & rVal;
+ }
+ case Operator::Kind::LOGICALOR: {
+ SkASSERT(!isAssignment);
+ SkASSERT(nk == Type::NumberKind::kBoolean);
+ skvm::I32 lVal = i32(this->writeExpression(left));
+ ScopedCondition shortCircuit(this, ~lVal);
+ skvm::I32 rVal = i32(this->writeExpression(right));
+ return lVal | rVal;
+ }
+ case Operator::Kind::COMMA:
+ // We write the left side of the expression to preserve its side effects, even though we
+ // immediately discard the result.
+ this->writeExpression(left);
+ return this->writeExpression(right);
+ default:
+ break;
+ }
+
+ // All of the other ops always evaluate both sides of the expression
+ Value lVal = this->writeExpression(left),
+ rVal = this->writeExpression(right);
+
+ // Special case for M*V, V*M, M*M (but not V*V!)
+ if (op.kind() == Operator::Kind::STAR
+ && lVecOrMtx && rVecOrMtx && !(lType.isVector() && rType.isVector())) {
+ int rCols = rType.columns(),
+ rRows = rType.rows(),
+ lCols = lType.columns(),
+ lRows = lType.rows();
+ // M*V treats the vector as a column
+ if (rType.isVector()) {
+ std::swap(rCols, rRows);
+ }
+ SkASSERT(lCols == rRows);
+ SkASSERT(b.type().slotCount() == static_cast<size_t>(lRows * rCols));
+ Value result(lRows * rCols);
+ size_t resultIdx = 0;
+ const skvm::F32 zero = fBuilder->splat(0.0f);
+ for (int c = 0; c < rCols; ++c)
+ for (int r = 0; r < lRows; ++r) {
+ skvm::F32 sum = zero;
+ for (int j = 0; j < lCols; ++j) {
+ sum += f32(lVal[j*lRows + r]) * f32(rVal[c*rRows + j]);
+ }
+ result[resultIdx++] = sum;
+ }
+ SkASSERT(resultIdx == result.slots());
+ return isAssignment ? this->writeStore(left, result) : result;
+ }
+
+ size_t nslots = std::max(lVal.slots(), rVal.slots());
+
+ auto binary = [&](const std::function <Value(skvm::F32 x, skvm::F32 y)>& f_fn,
+ const std::function <Value(skvm::I32 x, skvm::I32 y)>& i_fn,
+ bool foldResults = false) -> Value {
+
+ Value result(nslots);
+ if (op.isEquality() && (lType.isStruct() || lType.isArray())) {
+ // Shifting over lVal and rVal
+ size_t slotOffset = 0;
+ this->recursiveBinaryCompare(
+ lVal, lType, rVal, rType, &slotOffset, &result, f_fn, i_fn);
+ SkASSERT(slotOffset == nslots);
+ } else {
+ for (size_t slot = 0; slot < nslots; ++slot) {
+ // If one side is scalar, replicate it to all channels
+ skvm::Val L = lVal.slots() == 1 ? lVal[0] : lVal[slot],
+ R = rVal.slots() == 1 ? rVal[0] : rVal[slot];
+
+ if (nk == Type::NumberKind::kFloat) {
+ result[slot] = i32(f_fn(f32(L), f32(R)));
+ } else {
+ result[slot] = i32(i_fn(i32(L), i32(R)));
+ }
+ }
+ }
+
+ if (foldResults && nslots > 1) {
+ SkASSERT(op.isEquality());
+ skvm::I32 folded = i32(result[0]);
+ for (size_t i = 1; i < nslots; ++i) {
+ if (op.kind() == Operator::Kind::NEQ) {
+ folded |= i32(result[i]);
+ } else {
+ folded &= i32(result[i]);
+ }
+ }
+ return folded;
+ }
+
+ return isAssignment ? this->writeStore(left, result) : result;
+ };
+
+ auto unsupported_f = [&](skvm::F32, skvm::F32) {
+ SkDEBUGFAIL("Unsupported operator");
+ return skvm::F32{};
+ };
+
+ switch (op.kind()) {
+ case Operator::Kind::EQEQ:
+ SkASSERT(!isAssignment);
+ return binary([](skvm::F32 x, skvm::F32 y) { return x == y; },
+ [](skvm::I32 x, skvm::I32 y) { return x == y; }, /*foldResults=*/ true);
+ case Operator::Kind::NEQ:
+ SkASSERT(!isAssignment);
+ return binary([](skvm::F32 x, skvm::F32 y) { return x != y; },
+ [](skvm::I32 x, skvm::I32 y) { return x != y; }, /*foldResults=*/ true);
+ case Operator::Kind::GT:
+ return binary([](skvm::F32 x, skvm::F32 y) { return x > y; },
+ [](skvm::I32 x, skvm::I32 y) { return x > y; });
+ case Operator::Kind::GTEQ:
+ return binary([](skvm::F32 x, skvm::F32 y) { return x >= y; },
+ [](skvm::I32 x, skvm::I32 y) { return x >= y; });
+ case Operator::Kind::LT:
+ return binary([](skvm::F32 x, skvm::F32 y) { return x < y; },
+ [](skvm::I32 x, skvm::I32 y) { return x < y; });
+ case Operator::Kind::LTEQ:
+ return binary([](skvm::F32 x, skvm::F32 y) { return x <= y; },
+ [](skvm::I32 x, skvm::I32 y) { return x <= y; });
+
+ case Operator::Kind::PLUS:
+ return binary([](skvm::F32 x, skvm::F32 y) { return x + y; },
+ [](skvm::I32 x, skvm::I32 y) { return x + y; });
+ case Operator::Kind::MINUS:
+ return binary([](skvm::F32 x, skvm::F32 y) { return x - y; },
+ [](skvm::I32 x, skvm::I32 y) { return x - y; });
+ case Operator::Kind::STAR:
+ return binary([](skvm::F32 x, skvm::F32 y) { return x ** y; },
+ [](skvm::I32 x, skvm::I32 y) { return x * y; });
+ case Operator::Kind::SLASH:
+ // Minimum spec (GLSL ES 1.0) has very loose requirements for integer operations.
+ // (Low-end GPUs may not have integer ALUs). Given that, we are allowed to do floating
+ // point division plus rounding. Section 10.28 of the spec even clarifies that the
+ // rounding mode is undefined (but round-towards-zero is the obvious/common choice).
+ return binary([](skvm::F32 x, skvm::F32 y) { return x / y; },
+ [](skvm::I32 x, skvm::I32 y) {
+ return skvm::trunc(skvm::to_F32(x) / skvm::to_F32(y));
+ });
+
+ case Operator::Kind::BITWISEXOR:
+ case Operator::Kind::LOGICALXOR:
+ return binary(unsupported_f, [](skvm::I32 x, skvm::I32 y) { return x ^ y; });
+ case Operator::Kind::BITWISEAND:
+ return binary(unsupported_f, [](skvm::I32 x, skvm::I32 y) { return x & y; });
+ case Operator::Kind::BITWISEOR:
+ return binary(unsupported_f, [](skvm::I32 x, skvm::I32 y) { return x | y; });
+
+ // These three operators are all 'reserved' (illegal) in our minimum spec, but will require
+ // implementation in the future.
+ case Operator::Kind::PERCENT:
+ case Operator::Kind::SHL:
+ case Operator::Kind::SHR:
+ default:
+ SkDEBUGFAIL("Unsupported operator");
+ return {};
+ }
+}
+
+Value SkVMGenerator::writeAggregationConstructor(const AnyConstructor& c) {
+ Value result(c.type().slotCount());
+ size_t resultIdx = 0;
+ for (const auto &arg : c.argumentSpan()) {
+ Value tmp = this->writeExpression(*arg);
+ for (size_t tmpSlot = 0; tmpSlot < tmp.slots(); ++tmpSlot) {
+ result[resultIdx++] = tmp[tmpSlot];
+ }
+ }
+ return result;
+}
+
+Value SkVMGenerator::writeTypeConversion(const Value& src,
+ Type::NumberKind srcKind,
+ Type::NumberKind dstKind) {
+ // Conversion among "similar" types (floatN <-> halfN), (shortN <-> intN), etc. is a no-op.
+ if (srcKind == dstKind) {
+ return src;
+ }
+
+ // TODO: Handle signed vs. unsigned. GLSL ES 1.0 only has 'int', so no problem yet.
+ Value dst(src.slots());
+ switch (dstKind) {
+ case Type::NumberKind::kFloat:
+ if (srcKind == Type::NumberKind::kSigned) {
+ // int -> float
+ for (size_t i = 0; i < src.slots(); ++i) {
+ dst[i] = skvm::to_F32(i32(src[i]));
+ }
+ return dst;
+ }
+ if (srcKind == Type::NumberKind::kBoolean) {
+ // bool -> float
+ for (size_t i = 0; i < src.slots(); ++i) {
+ dst[i] = skvm::select(i32(src[i]), 1.0f, 0.0f);
+ }
+ return dst;
+ }
+ break;
+
+ case Type::NumberKind::kSigned:
+ if (srcKind == Type::NumberKind::kFloat) {
+ // float -> int
+ for (size_t i = 0; i < src.slots(); ++i) {
+ dst[i] = skvm::trunc(f32(src[i]));
+ }
+ return dst;
+ }
+ if (srcKind == Type::NumberKind::kBoolean) {
+ // bool -> int
+ for (size_t i = 0; i < src.slots(); ++i) {
+ dst[i] = skvm::select(i32(src[i]), 1, 0);
+ }
+ return dst;
+ }
+ break;
+
+ case Type::NumberKind::kBoolean:
+ if (srcKind == Type::NumberKind::kSigned) {
+ // int -> bool
+ for (size_t i = 0; i < src.slots(); ++i) {
+ dst[i] = i32(src[i]) != 0;
+ }
+ return dst;
+ }
+ if (srcKind == Type::NumberKind::kFloat) {
+ // float -> bool
+ for (size_t i = 0; i < src.slots(); ++i) {
+ dst[i] = f32(src[i]) != 0.0;
+ }
+ return dst;
+ }
+ break;
+
+ default:
+ break;
+ }
+ SkDEBUGFAILF("Unsupported type conversion: %d -> %d", (int)srcKind, (int)dstKind);
+ return {};
+}
+
+Value SkVMGenerator::writeConstructorCast(const AnyConstructor& c) {
+ auto arguments = c.argumentSpan();
+ SkASSERT(arguments.size() == 1);
+ const Expression& argument = *arguments.front();
+
+ const Type& srcType = argument.type();
+ const Type& dstType = c.type();
+ Type::NumberKind srcKind = base_number_kind(srcType);
+ Type::NumberKind dstKind = base_number_kind(dstType);
+ Value src = this->writeExpression(argument);
+ return this->writeTypeConversion(src, srcKind, dstKind);
+}
+
+Value SkVMGenerator::writeConstructorSplat(const ConstructorSplat& c) {
+ SkASSERT(c.type().isVector());
+ SkASSERT(c.argument()->type().isScalar());
+ int columns = c.type().columns();
+
+ // Splat the argument across all components of a vector.
+ Value src = this->writeExpression(*c.argument());
+ Value dst(columns);
+ for (int i = 0; i < columns; ++i) {
+ dst[i] = src[0];
+ }
+ return dst;
+}
+
+Value SkVMGenerator::writeConstructorDiagonalMatrix(const ConstructorDiagonalMatrix& ctor) {
+ const Type& dstType = ctor.type();
+ SkASSERT(dstType.isMatrix());
+ SkASSERT(ctor.argument()->type().matches(dstType.componentType()));
+
+ Value src = this->writeExpression(*ctor.argument());
+ Value dst(dstType.rows() * dstType.columns());
+ size_t dstIndex = 0;
+
+ // Matrix-from-scalar builds a diagonal scale matrix
+ const skvm::F32 zero = fBuilder->splat(0.0f);
+ for (int c = 0; c < dstType.columns(); ++c) {
+ for (int r = 0; r < dstType.rows(); ++r) {
+ dst[dstIndex++] = (c == r ? f32(src) : zero);
+ }
+ }
+
+ SkASSERT(dstIndex == dst.slots());
+ return dst;
+}
+
+Value SkVMGenerator::writeConstructorMatrixResize(const ConstructorMatrixResize& ctor) {
+ const Type& srcType = ctor.argument()->type();
+ const Type& dstType = ctor.type();
+ Value src = this->writeExpression(*ctor.argument());
+ Value dst(dstType.rows() * dstType.columns());
+
+ // Matrix-from-matrix uses src where it overlaps, and fills in missing fields with identity.
+ size_t dstIndex = 0;
+ for (int c = 0; c < dstType.columns(); ++c) {
+ for (int r = 0; r < dstType.rows(); ++r) {
+ if (c < srcType.columns() && r < srcType.rows()) {
+ dst[dstIndex++] = src[c * srcType.rows() + r];
+ } else {
+ dst[dstIndex++] = fBuilder->splat(c == r ? 1.0f : 0.0f);
+ }
+ }
+ }
+
+ SkASSERT(dstIndex == dst.slots());
+ return dst;
+}
+
+Value SkVMGenerator::writeFieldAccess(const FieldAccess& expr) {
+ Value base = this->writeExpression(*expr.base());
+ Value field(expr.type().slotCount());
+ size_t offset = expr.initialSlot();
+ for (size_t i = 0; i < field.slots(); ++i) {
+ field[i] = base[offset + i];
+ }
+ return field;
+}
+
+size_t SkVMGenerator::indexSlotOffset(const IndexExpression& expr) {
+ Value index = this->writeExpression(*expr.index());
+ int indexValue = -1;
+ SkAssertResult(fBuilder->allImm(index[0], &indexValue));
+
+ // When indexing by a literal, the front-end guarantees that we don't go out of bounds.
+ // But when indexing by a loop variable, it's possible to generate out-of-bounds access.
+ // The GLSL spec leaves that behavior undefined - we'll just clamp everything here.
+ indexValue = SkTPin(indexValue, 0, expr.base()->type().columns() - 1);
+
+ size_t stride = expr.type().slotCount();
+ return indexValue * stride;
+}
+
+Value SkVMGenerator::writeIndexExpression(const IndexExpression& expr) {
+ Value base = this->writeExpression(*expr.base());
+ Value element(expr.type().slotCount());
+ size_t offset = this->indexSlotOffset(expr);
+ for (size_t i = 0; i < element.slots(); ++i) {
+ element[i] = base[offset + i];
+ }
+ return element;
+}
+
+Value SkVMGenerator::writeVariableExpression(const VariableReference& expr) {
+ size_t slot = this->getSlot(*expr.variable());
+ return this->getSlotValue(slot, expr.type().slotCount());
+}
+
+Value SkVMGenerator::writeMatrixInverse2x2(const Value& m) {
+ SkASSERT(m.slots() == 4);
+ skvm::F32 a = f32(m[0]),
+ b = f32(m[1]),
+ c = f32(m[2]),
+ d = f32(m[3]);
+ skvm::F32 idet = 1.0f / (a*d - b*c);
+
+ Value result(m.slots());
+ result[0] = ( d ** idet);
+ result[1] = (-b ** idet);
+ result[2] = (-c ** idet);
+ result[3] = ( a ** idet);
+ return result;
+}
+
+Value SkVMGenerator::writeMatrixInverse3x3(const Value& m) {
+ SkASSERT(m.slots() == 9);
+ skvm::F32 a11 = f32(m[0]), a12 = f32(m[3]), a13 = f32(m[6]),
+ a21 = f32(m[1]), a22 = f32(m[4]), a23 = f32(m[7]),
+ a31 = f32(m[2]), a32 = f32(m[5]), a33 = f32(m[8]);
+ skvm::F32 idet = 1.0f / (a11*a22*a33 + a12*a23*a31 + a13*a21*a32 -
+ a11*a23*a32 - a12*a21*a33 - a13*a22*a31);
+
+ Value result(m.slots());
+ result[0] = ((a22**a33 - a23**a32) ** idet);
+ result[1] = ((a23**a31 - a21**a33) ** idet);
+ result[2] = ((a21**a32 - a22**a31) ** idet);
+ result[3] = ((a13**a32 - a12**a33) ** idet);
+ result[4] = ((a11**a33 - a13**a31) ** idet);
+ result[5] = ((a12**a31 - a11**a32) ** idet);
+ result[6] = ((a12**a23 - a13**a22) ** idet);
+ result[7] = ((a13**a21 - a11**a23) ** idet);
+ result[8] = ((a11**a22 - a12**a21) ** idet);
+ return result;
+}
+
+Value SkVMGenerator::writeMatrixInverse4x4(const Value& m) {
+ SkASSERT(m.slots() == 16);
+ skvm::F32 a00 = f32(m[0]), a10 = f32(m[4]), a20 = f32(m[ 8]), a30 = f32(m[12]),
+ a01 = f32(m[1]), a11 = f32(m[5]), a21 = f32(m[ 9]), a31 = f32(m[13]),
+ a02 = f32(m[2]), a12 = f32(m[6]), a22 = f32(m[10]), a32 = f32(m[14]),
+ a03 = f32(m[3]), a13 = f32(m[7]), a23 = f32(m[11]), a33 = f32(m[15]);
+
+ skvm::F32 b00 = a00**a11 - a01**a10,
+ b01 = a00**a12 - a02**a10,
+ b02 = a00**a13 - a03**a10,
+ b03 = a01**a12 - a02**a11,
+ b04 = a01**a13 - a03**a11,
+ b05 = a02**a13 - a03**a12,
+ b06 = a20**a31 - a21**a30,
+ b07 = a20**a32 - a22**a30,
+ b08 = a20**a33 - a23**a30,
+ b09 = a21**a32 - a22**a31,
+ b10 = a21**a33 - a23**a31,
+ b11 = a22**a33 - a23**a32;
+
+ skvm::F32 idet = 1.0f / (b00**b11 - b01**b10 + b02**b09 + b03**b08 - b04**b07 + b05**b06);
+
+ b00 *= idet;
+ b01 *= idet;
+ b02 *= idet;
+ b03 *= idet;
+ b04 *= idet;
+ b05 *= idet;
+ b06 *= idet;
+ b07 *= idet;
+ b08 *= idet;
+ b09 *= idet;
+ b10 *= idet;
+ b11 *= idet;
+
+ Value result(m.slots());
+ result[ 0] = (a11*b11 - a12*b10 + a13*b09);
+ result[ 1] = (a02*b10 - a01*b11 - a03*b09);
+ result[ 2] = (a31*b05 - a32*b04 + a33*b03);
+ result[ 3] = (a22*b04 - a21*b05 - a23*b03);
+ result[ 4] = (a12*b08 - a10*b11 - a13*b07);
+ result[ 5] = (a00*b11 - a02*b08 + a03*b07);
+ result[ 6] = (a32*b02 - a30*b05 - a33*b01);
+ result[ 7] = (a20*b05 - a22*b02 + a23*b01);
+ result[ 8] = (a10*b10 - a11*b08 + a13*b06);
+ result[ 9] = (a01*b08 - a00*b10 - a03*b06);
+ result[10] = (a30*b04 - a31*b02 + a33*b00);
+ result[11] = (a21*b02 - a20*b04 - a23*b00);
+ result[12] = (a11*b07 - a10*b09 - a12*b06);
+ result[13] = (a00*b09 - a01*b07 + a02*b06);
+ result[14] = (a31*b01 - a30*b03 - a32*b00);
+ result[15] = (a20*b03 - a21*b01 + a22*b00);
+ return result;
+}
+
+Value SkVMGenerator::writeChildCall(const ChildCall& c) {
+ size_t* childPtr = fSlotMap.find(&c.child());
+ SkASSERT(childPtr != nullptr);
+
+ const Expression* arg = c.arguments()[0].get();
+ Value argVal = this->writeExpression(*arg);
+ skvm::Color color;
+
+ switch (c.child().type().typeKind()) {
+ case Type::TypeKind::kShader: {
+ SkASSERT(c.arguments().size() == 1);
+ SkASSERT(arg->type().matches(*fProgram.fContext->fTypes.fFloat2));
+ skvm::Coord coord = {f32(argVal[0]), f32(argVal[1])};
+ color = fCallbacks->sampleShader(*childPtr, coord);
+ break;
+ }
+ case Type::TypeKind::kColorFilter: {
+ SkASSERT(c.arguments().size() == 1);
+ SkASSERT(arg->type().matches(*fProgram.fContext->fTypes.fHalf4) ||
+ arg->type().matches(*fProgram.fContext->fTypes.fFloat4));
+ skvm::Color inColor = {f32(argVal[0]), f32(argVal[1]), f32(argVal[2]), f32(argVal[3])};
+ color = fCallbacks->sampleColorFilter(*childPtr, inColor);
+ break;
+ }
+ case Type::TypeKind::kBlender: {
+ SkASSERT(c.arguments().size() == 2);
+ SkASSERT(arg->type().matches(*fProgram.fContext->fTypes.fHalf4) ||
+ arg->type().matches(*fProgram.fContext->fTypes.fFloat4));
+ skvm::Color srcColor = {f32(argVal[0]), f32(argVal[1]), f32(argVal[2]), f32(argVal[3])};
+
+ arg = c.arguments()[1].get();
+ argVal = this->writeExpression(*arg);
+ SkASSERT(arg->type().matches(*fProgram.fContext->fTypes.fHalf4) ||
+ arg->type().matches(*fProgram.fContext->fTypes.fFloat4));
+ skvm::Color dstColor = {f32(argVal[0]), f32(argVal[1]), f32(argVal[2]), f32(argVal[3])};
+
+ color = fCallbacks->sampleBlender(*childPtr, srcColor, dstColor);
+ break;
+ }
+ default: {
+ SkDEBUGFAILF("cannot sample from type '%s'", c.child().type().description().c_str());
+ }
+ }
+
+ Value result(4);
+ result[0] = color.r;
+ result[1] = color.g;
+ result[2] = color.b;
+ result[3] = color.a;
+ return result;
+}
+
+Value SkVMGenerator::writeIntrinsicCall(const FunctionCall& c) {
+ IntrinsicKind intrinsicKind = c.function().intrinsicKind();
+ SkASSERT(intrinsicKind != kNotIntrinsic);
+
+ const size_t nargs = c.arguments().size();
+ const size_t kMaxArgs = 3; // eg: clamp, mix, smoothstep
+ Value args[kMaxArgs];
+ SkASSERT(nargs >= 1 && nargs <= std::size(args));
+
+ // All other intrinsics have at most three args, and those can all be evaluated up front:
+ for (size_t i = 0; i < nargs; ++i) {
+ args[i] = this->writeExpression(*c.arguments()[i]);
+ }
+ Type::NumberKind nk = base_number_kind(c.arguments()[0]->type());
+
+ auto binary = [&](auto&& fn) {
+ // Binary intrinsics are (vecN, vecN), (vecN, float), or (float, vecN)
+ size_t nslots = std::max(args[0].slots(), args[1].slots());
+ Value result(nslots);
+ SkASSERT(args[0].slots() == nslots || args[0].slots() == 1);
+ SkASSERT(args[1].slots() == nslots || args[1].slots() == 1);
+
+ for (size_t i = 0; i < nslots; ++i) {
+ result[i] = fn({fBuilder, args[0][args[0].slots() == 1 ? 0 : i]},
+ {fBuilder, args[1][args[1].slots() == 1 ? 0 : i]});
+ }
+ return result;
+ };
+
+ auto ternary = [&](auto&& fn) {
+ // Ternary intrinsics are some combination of vecN and float
+ size_t nslots = std::max({args[0].slots(), args[1].slots(), args[2].slots()});
+ Value result(nslots);
+ SkASSERT(args[0].slots() == nslots || args[0].slots() == 1);
+ SkASSERT(args[1].slots() == nslots || args[1].slots() == 1);
+ SkASSERT(args[2].slots() == nslots || args[2].slots() == 1);
+
+ for (size_t i = 0; i < nslots; ++i) {
+ result[i] = fn({fBuilder, args[0][args[0].slots() == 1 ? 0 : i]},
+ {fBuilder, args[1][args[1].slots() == 1 ? 0 : i]},
+ {fBuilder, args[2][args[2].slots() == 1 ? 0 : i]});
+ }
+ return result;
+ };
+
+ auto dot = [&](const Value& x, const Value& y) {
+ SkASSERT(x.slots() == y.slots());
+ skvm::F32 result = f32(x[0]) * f32(y[0]);
+ for (size_t i = 1; i < x.slots(); ++i) {
+ result += f32(x[i]) * f32(y[i]);
+ }
+ return result;
+ };
+
+ switch (intrinsicKind) {
+ case k_radians_IntrinsicKind:
+ return unary(args[0], [](skvm::F32 deg) { return deg * (SK_FloatPI / 180); });
+ case k_degrees_IntrinsicKind:
+ return unary(args[0], [](skvm::F32 rad) { return rad * (180 / SK_FloatPI); });
+
+ case k_sin_IntrinsicKind: return unary(args[0], skvm::approx_sin);
+ case k_cos_IntrinsicKind: return unary(args[0], skvm::approx_cos);
+ case k_tan_IntrinsicKind: return unary(args[0], skvm::approx_tan);
+
+ case k_asin_IntrinsicKind: return unary(args[0], skvm::approx_asin);
+ case k_acos_IntrinsicKind: return unary(args[0], skvm::approx_acos);
+
+ case k_atan_IntrinsicKind: return nargs == 1 ? unary(args[0], skvm::approx_atan)
+ : binary(skvm::approx_atan2);
+
+ case k_pow_IntrinsicKind:
+ return binary([](skvm::F32 x, skvm::F32 y) { return skvm::approx_powf(x, y); });
+ case k_exp_IntrinsicKind: return unary(args[0], skvm::approx_exp);
+ case k_log_IntrinsicKind: return unary(args[0], skvm::approx_log);
+ case k_exp2_IntrinsicKind: return unary(args[0], skvm::approx_pow2);
+ case k_log2_IntrinsicKind: return unary(args[0], skvm::approx_log2);
+
+ case k_sqrt_IntrinsicKind: return unary(args[0], skvm::sqrt);
+ case k_inversesqrt_IntrinsicKind:
+ return unary(args[0], [](skvm::F32 x) { return 1.0f / skvm::sqrt(x); });
+
+ case k_abs_IntrinsicKind: return unary(args[0], skvm::abs);
+ case k_sign_IntrinsicKind:
+ return unary(args[0], [](skvm::F32 x) { return select(x < 0, -1.0f,
+ select(x > 0, +1.0f, 0.0f)); });
+ case k_floor_IntrinsicKind: return unary(args[0], skvm::floor);
+ case k_ceil_IntrinsicKind: return unary(args[0], skvm::ceil);
+ case k_fract_IntrinsicKind: return unary(args[0], skvm::fract);
+ case k_mod_IntrinsicKind:
+ return binary([](skvm::F32 x, skvm::F32 y) { return x - y*skvm::floor(x / y); });
+
+ case k_min_IntrinsicKind:
+ return binary([](skvm::F32 x, skvm::F32 y) { return skvm::min(x, y); });
+ case k_max_IntrinsicKind:
+ return binary([](skvm::F32 x, skvm::F32 y) { return skvm::max(x, y); });
+ case k_clamp_IntrinsicKind:
+ return ternary(
+ [](skvm::F32 x, skvm::F32 lo, skvm::F32 hi) { return skvm::clamp(x, lo, hi); });
+ case k_saturate_IntrinsicKind:
+ return unary(args[0], [](skvm::F32 x) { return skvm::clamp01(x); });
+ case k_mix_IntrinsicKind:
+ return ternary(
+ [](skvm::F32 x, skvm::F32 y, skvm::F32 t) { return skvm::lerp(x, y, t); });
+ case k_step_IntrinsicKind:
+ return binary([](skvm::F32 edge, skvm::F32 x) { return select(x < edge, 0.0f, 1.0f); });
+ case k_smoothstep_IntrinsicKind:
+ return ternary([](skvm::F32 edge0, skvm::F32 edge1, skvm::F32 x) {
+ skvm::F32 t = skvm::clamp01((x - edge0) / (edge1 - edge0));
+ return t ** t ** (3 - 2 ** t);
+ });
+
+ case k_length_IntrinsicKind: return skvm::sqrt(dot(args[0], args[0]));
+ case k_distance_IntrinsicKind: {
+ Value vec = binary([](skvm::F32 x, skvm::F32 y) { return x - y; });
+ return skvm::sqrt(dot(vec, vec));
+ }
+ case k_dot_IntrinsicKind: return dot(args[0], args[1]);
+ case k_cross_IntrinsicKind: {
+ skvm::F32 ax = f32(args[0][0]), ay = f32(args[0][1]), az = f32(args[0][2]),
+ bx = f32(args[1][0]), by = f32(args[1][1]), bz = f32(args[1][2]);
+ Value result(3);
+ result[0] = ay**bz - az**by;
+ result[1] = az**bx - ax**bz;
+ result[2] = ax**by - ay**bx;
+ return result;
+ }
+ case k_normalize_IntrinsicKind: {
+ skvm::F32 invLen = 1.0f / skvm::sqrt(dot(args[0], args[0]));
+ return unary(args[0], [&](skvm::F32 x) { return x ** invLen; });
+ }
+ case k_faceforward_IntrinsicKind: {
+ const Value &N = args[0],
+ &I = args[1],
+ &Nref = args[2];
+
+ skvm::F32 dotNrefI = dot(Nref, I);
+ return unary(N, [&](skvm::F32 n) { return select(dotNrefI<0, n, -n); });
+ }
+ case k_reflect_IntrinsicKind: {
+ const Value &I = args[0],
+ &N = args[1];
+
+ skvm::F32 dotNI = dot(N, I);
+ return binary([&](skvm::F32 i, skvm::F32 n) {
+ return i - 2**dotNI**n;
+ });
+ }
+ case k_refract_IntrinsicKind: {
+ const Value &I = args[0],
+ &N = args[1];
+ skvm::F32 eta = f32(args[2]);
+
+ skvm::F32 dotNI = dot(N, I),
+ k = 1 - eta**eta**(1 - dotNI**dotNI);
+ return binary([&](skvm::F32 i, skvm::F32 n) {
+ return select(k<0, 0.0f, eta**i - (eta**dotNI + sqrt(k))**n);
+ });
+ }
+
+ case k_matrixCompMult_IntrinsicKind:
+ return binary([](skvm::F32 x, skvm::F32 y) { return x ** y; });
+ case k_inverse_IntrinsicKind: {
+ switch (args[0].slots()) {
+ case 4: return this->writeMatrixInverse2x2(args[0]);
+ case 9: return this->writeMatrixInverse3x3(args[0]);
+ case 16: return this->writeMatrixInverse4x4(args[0]);
+ default:
+ SkDEBUGFAIL("Invalid call to inverse");
+ return {};
+ }
+ }
+
+ case k_lessThan_IntrinsicKind:
+ return nk == Type::NumberKind::kFloat
+ ? binary([](skvm::F32 x, skvm::F32 y) { return x < y; })
+ : binary([](skvm::I32 x, skvm::I32 y) { return x < y; });
+ case k_lessThanEqual_IntrinsicKind:
+ return nk == Type::NumberKind::kFloat
+ ? binary([](skvm::F32 x, skvm::F32 y) { return x <= y; })
+ : binary([](skvm::I32 x, skvm::I32 y) { return x <= y; });
+ case k_greaterThan_IntrinsicKind:
+ return nk == Type::NumberKind::kFloat
+ ? binary([](skvm::F32 x, skvm::F32 y) { return x > y; })
+ : binary([](skvm::I32 x, skvm::I32 y) { return x > y; });
+ case k_greaterThanEqual_IntrinsicKind:
+ return nk == Type::NumberKind::kFloat
+ ? binary([](skvm::F32 x, skvm::F32 y) { return x >= y; })
+ : binary([](skvm::I32 x, skvm::I32 y) { return x >= y; });
+
+ case k_equal_IntrinsicKind:
+ return nk == Type::NumberKind::kFloat
+ ? binary([](skvm::F32 x, skvm::F32 y) { return x == y; })
+ : binary([](skvm::I32 x, skvm::I32 y) { return x == y; });
+ case k_notEqual_IntrinsicKind:
+ return nk == Type::NumberKind::kFloat
+ ? binary([](skvm::F32 x, skvm::F32 y) { return x != y; })
+ : binary([](skvm::I32 x, skvm::I32 y) { return x != y; });
+
+ case k_any_IntrinsicKind: {
+ skvm::I32 result = i32(args[0][0]);
+ for (size_t i = 1; i < args[0].slots(); ++i) {
+ result |= i32(args[0][i]);
+ }
+ return result;
+ }
+ case k_all_IntrinsicKind: {
+ skvm::I32 result = i32(args[0][0]);
+ for (size_t i = 1; i < args[0].slots(); ++i) {
+ result &= i32(args[0][i]);
+ }
+ return result;
+ }
+ case k_not_IntrinsicKind: return unary(args[0], [](skvm::I32 x) { return ~x; });
+
+ case k_toLinearSrgb_IntrinsicKind: {
+ skvm::Color color = {
+ f32(args[0][0]), f32(args[0][1]), f32(args[0][2]), fBuilder->splat(1.0f)};
+ color = fCallbacks->toLinearSrgb(color);
+ Value result(3);
+ result[0] = color.r;
+ result[1] = color.g;
+ result[2] = color.b;
+ return result;
+ }
+ case k_fromLinearSrgb_IntrinsicKind: {
+ skvm::Color color = {
+ f32(args[0][0]), f32(args[0][1]), f32(args[0][2]), fBuilder->splat(1.0f)};
+ color = fCallbacks->fromLinearSrgb(color);
+ Value result(3);
+ result[0] = color.r;
+ result[1] = color.g;
+ result[2] = color.b;
+ return result;
+ }
+
+ default:
+ SkDEBUGFAILF("unsupported intrinsic %s", c.function().description().c_str());
+ return {};
+ }
+ SkUNREACHABLE;
+}
+
+Value SkVMGenerator::writeFunctionCall(const FunctionCall& call) {
+ if (call.function().isIntrinsic() && !call.function().definition()) {
+ return this->writeIntrinsicCall(call);
+ }
+
+ const FunctionDeclaration& decl = call.function();
+ SkASSERTF(decl.definition(), "no definition for function '%s'", decl.description().c_str());
+ const FunctionDefinition& funcDef = *decl.definition();
+
+ // Evaluate all arguments, gather the results into a contiguous list of IDs
+ std::vector<skvm::Val> argVals;
+ for (const auto& arg : call.arguments()) {
+ Value v = this->writeExpression(*arg);
+ for (size_t i = 0; i < v.slots(); ++i) {
+ argVals.push_back(v[i]);
+ }
+ }
+
+ size_t returnSlot;
+ {
+ // This merges currentFunction().fReturned into fConditionMask. Lanes that conditionally
+ // returned in the current function would otherwise resume execution within the child.
+ ScopedCondition m(this, ~currentFunction().fReturned);
+ returnSlot = this->writeFunction(call, funcDef, SkSpan(argVals));
+ }
+
+ // Propagate new values of any 'out' params back to the original arguments
+ const std::unique_ptr<Expression>* argIter = call.arguments().begin();
+ size_t valIdx = 0;
+ for (const Variable* p : decl.parameters()) {
+ size_t nslots = p->type().slotCount();
+ if (p->modifiers().fFlags & Modifiers::kOut_Flag) {
+ Value v(nslots);
+ for (size_t i = 0; i < nslots; ++i) {
+ v[i] = argVals[valIdx + i];
+ }
+ const std::unique_ptr<Expression>& arg = *argIter;
+ this->writeStore(*arg, v);
+ }
+ valIdx += nslots;
+ argIter++;
+ }
+
+ // Create a result Value from the return slot
+ return this->getSlotValue(returnSlot, call.type().slotCount());
+}
+
+Value SkVMGenerator::writeLiteral(const Literal& l) {
+ if (l.type().isFloat()) {
+ return fBuilder->splat(l.as<Literal>().floatValue());
+ }
+ if (l.type().isInteger()) {
+ return fBuilder->splat(static_cast<int>(l.as<Literal>().intValue()));
+ }
+ SkASSERT(l.type().isBoolean());
+ return fBuilder->splat(l.as<Literal>().boolValue() ? ~0 : 0);
+}
+
+Value SkVMGenerator::writePrefixExpression(const PrefixExpression& p) {
+ Value val = this->writeExpression(*p.operand());
+
+ switch (p.getOperator().kind()) {
+ case Operator::Kind::PLUSPLUS:
+ case Operator::Kind::MINUSMINUS: {
+ bool incr = p.getOperator().kind() == Operator::Kind::PLUSPLUS;
+
+ switch (base_number_kind(p.type())) {
+ case Type::NumberKind::kFloat:
+ val = f32(val) + fBuilder->splat(incr ? 1.0f : -1.0f);
+ break;
+ case Type::NumberKind::kSigned:
+ val = i32(val) + fBuilder->splat(incr ? 1 : -1);
+ break;
+ default:
+ SkASSERT(false);
+ return {};
+ }
+ return this->writeStore(*p.operand(), val);
+ }
+ case Operator::Kind::MINUS: {
+ switch (base_number_kind(p.type())) {
+ case Type::NumberKind::kFloat:
+ return this->unary(val, [](skvm::F32 x) { return -x; });
+ case Type::NumberKind::kSigned:
+ return this->unary(val, [](skvm::I32 x) { return -x; });
+ default:
+ SkASSERT(false);
+ return {};
+ }
+ }
+ case Operator::Kind::LOGICALNOT:
+ case Operator::Kind::BITWISENOT:
+ return this->unary(val, [](skvm::I32 x) { return ~x; });
+ default:
+ SkASSERT(false);
+ return {};
+ }
+}
+
+Value SkVMGenerator::writePostfixExpression(const PostfixExpression& p) {
+ switch (p.getOperator().kind()) {
+ case Operator::Kind::PLUSPLUS:
+ case Operator::Kind::MINUSMINUS: {
+ Value old = this->writeExpression(*p.operand()),
+ val = old;
+ SkASSERT(val.slots() == 1);
+ bool incr = p.getOperator().kind() == Operator::Kind::PLUSPLUS;
+
+ switch (base_number_kind(p.type())) {
+ case Type::NumberKind::kFloat:
+ val = f32(val) + fBuilder->splat(incr ? 1.0f : -1.0f);
+ break;
+ case Type::NumberKind::kSigned:
+ val = i32(val) + fBuilder->splat(incr ? 1 : -1);
+ break;
+ default:
+ SkASSERT(false);
+ return {};
+ }
+ this->writeStore(*p.operand(), val);
+ return old;
+ }
+ default:
+ SkASSERT(false);
+ return {};
+ }
+}
+
+Value SkVMGenerator::writeSwizzle(const Swizzle& s) {
+ Value base = this->writeExpression(*s.base());
+ Value swizzled(s.components().size());
+ for (int i = 0; i < s.components().size(); ++i) {
+ swizzled[i] = base[s.components()[i]];
+ }
+ return swizzled;
+}
+
+Value SkVMGenerator::writeTernaryExpression(const TernaryExpression& t) {
+ skvm::I32 test = i32(this->writeExpression(*t.test()));
+ Value ifTrue, ifFalse;
+
+ {
+ ScopedCondition m(this, test);
+ ifTrue = this->writeExpression(*t.ifTrue());
+ }
+ {
+ ScopedCondition m(this, ~test);
+ ifFalse = this->writeExpression(*t.ifFalse());
+ }
+
+ size_t nslots = ifTrue.slots();
+ SkASSERT(nslots == ifFalse.slots());
+
+ Value result(nslots);
+ for (size_t i = 0; i < nslots; ++i) {
+ result[i] = skvm::select(test, i32(ifTrue[i]), i32(ifFalse[i]));
+ }
+ return result;
+}
+
+Value SkVMGenerator::writeExpression(const Expression& e) {
+ switch (e.kind()) {
+ case Expression::Kind::kBinary:
+ return this->writeBinaryExpression(e.as<BinaryExpression>());
+ case Expression::Kind::kChildCall:
+ return this->writeChildCall(e.as<ChildCall>());
+ case Expression::Kind::kConstructorArray:
+ case Expression::Kind::kConstructorCompound:
+ case Expression::Kind::kConstructorStruct:
+ return this->writeAggregationConstructor(e.asAnyConstructor());
+ case Expression::Kind::kConstructorArrayCast:
+ return this->writeExpression(*e.as<ConstructorArrayCast>().argument());
+ case Expression::Kind::kConstructorDiagonalMatrix:
+ return this->writeConstructorDiagonalMatrix(e.as<ConstructorDiagonalMatrix>());
+ case Expression::Kind::kConstructorMatrixResize:
+ return this->writeConstructorMatrixResize(e.as<ConstructorMatrixResize>());
+ case Expression::Kind::kConstructorScalarCast:
+ case Expression::Kind::kConstructorCompoundCast:
+ return this->writeConstructorCast(e.asAnyConstructor());
+ case Expression::Kind::kConstructorSplat:
+ return this->writeConstructorSplat(e.as<ConstructorSplat>());
+ case Expression::Kind::kFieldAccess:
+ return this->writeFieldAccess(e.as<FieldAccess>());
+ case Expression::Kind::kIndex:
+ return this->writeIndexExpression(e.as<IndexExpression>());
+ case Expression::Kind::kVariableReference:
+ return this->writeVariableExpression(e.as<VariableReference>());
+ case Expression::Kind::kLiteral:
+ return this->writeLiteral(e.as<Literal>());
+ case Expression::Kind::kFunctionCall:
+ return this->writeFunctionCall(e.as<FunctionCall>());
+ case Expression::Kind::kPrefix:
+ return this->writePrefixExpression(e.as<PrefixExpression>());
+ case Expression::Kind::kPostfix:
+ return this->writePostfixExpression(e.as<PostfixExpression>());
+ case Expression::Kind::kSwizzle:
+ return this->writeSwizzle(e.as<Swizzle>());
+ case Expression::Kind::kTernary:
+ return this->writeTernaryExpression(e.as<TernaryExpression>());
+ default:
+ SkDEBUGFAIL("Unsupported expression");
+ return {};
+ }
+}
+
+Value SkVMGenerator::writeStore(const Expression& lhs, const Value& rhs) {
+ SkASSERTF(rhs.slots() == lhs.type().slotCount(),
+ "lhs=%s (%s)\nrhs=%zu slot",
+ lhs.type().description().c_str(), lhs.description().c_str(), rhs.slots());
+
+ // We need to figure out the collection of slots that we're storing into. The l-value (lhs)
+ // is always a VariableReference, possibly wrapped by one or more Swizzle, FieldAccess, or
+ // IndexExpressions. The underlying VariableReference has a range of slots for its storage,
+ // and each expression wrapped around that selects a sub-set of those slots (Field/Index),
+ // or rearranges them (Swizzle).
+ SkSTArray<4, size_t, true> slots;
+ slots.resize(rhs.slots());
+
+ // Start with the identity slot map - this basically says that the values from rhs belong in
+ // slots [0, 1, 2 ... N] of the lhs.
+ for (int i = 0; i < slots.size(); ++i) {
+ slots[i] = i;
+ }
+
+ // Now, as we peel off each outer expression, adjust 'slots' to be the locations relative to
+ // the next (inner) expression:
+ const Expression* expr = &lhs;
+ while (!expr->is<VariableReference>()) {
+ switch (expr->kind()) {
+ case Expression::Kind::kFieldAccess: {
+ const FieldAccess& fld = expr->as<FieldAccess>();
+ size_t offset = fld.initialSlot();
+ for (size_t& s : slots) {
+ s += offset;
+ }
+ expr = fld.base().get();
+ } break;
+ case Expression::Kind::kIndex: {
+ const IndexExpression& idx = expr->as<IndexExpression>();
+ size_t offset = this->indexSlotOffset(idx);
+ for (size_t& s : slots) {
+ s += offset;
+ }
+ expr = idx.base().get();
+ } break;
+ case Expression::Kind::kSwizzle: {
+ const Swizzle& swz = expr->as<Swizzle>();
+ for (size_t& s : slots) {
+ s = swz.components()[s];
+ }
+ expr = swz.base().get();
+ } break;
+ default:
+ // No other kinds of expressions are valid in lvalues. (see Analysis::IsAssignable)
+ SkDEBUGFAIL("Invalid expression type");
+ return {};
+ }
+ }
+
+ // When we get here, 'slots' are all relative to the first slot holding 'var's storage
+ const Variable& var = *expr->as<VariableReference>().variable();
+ size_t varSlot = this->getSlot(var);
+ for (size_t& slot : slots) {
+ SkASSERT(slot < var.type().slotCount());
+ slot += varSlot;
+ }
+
+ // `slots` are now absolute indices into `fSlots`.
+ skvm::I32 mask = this->mask();
+ for (size_t i = 0; i < rhs.slots(); ++i) {
+ int slotNum = slots[i];
+ skvm::Val conditionalStore = this->writeConditionalStore(fSlots[slotNum].val, rhs[i], mask);
+ this->writeToSlot(slotNum, conditionalStore);
+ }
+
+ return rhs;
+}
+
+skvm::Val SkVMGenerator::writeConditionalStore(skvm::Val lhs, skvm::Val rhs, skvm::I32 mask) {
+ return select(mask, f32(rhs), f32(lhs)).id;
+}
+
+void SkVMGenerator::writeBlock(const Block& b) {
+ skvm::I32 mask = this->mask();
+ if (b.blockKind() == Block::Kind::kCompoundStatement) {
+ this->emitTraceLine(this->getLine(b.fPosition));
+ ++fInsideCompoundStatement;
+ } else {
+ this->emitTraceScope(mask, +1);
+ }
+
+ for (const std::unique_ptr<Statement>& stmt : b.children()) {
+ this->writeStatement(*stmt);
+ }
+
+ if (b.blockKind() == Block::Kind::kCompoundStatement) {
+ --fInsideCompoundStatement;
+ } else {
+ this->emitTraceScope(mask, -1);
+ }
+}
+
+void SkVMGenerator::writeBreakStatement() {
+ // Any active lanes stop executing for the duration of the current loop
+ fLoopMask &= ~this->mask();
+}
+
+void SkVMGenerator::writeContinueStatement() {
+ // Any active lanes stop executing for the current iteration.
+ // Remember them in fContinueMask, to be re-enabled later.
+ skvm::I32 mask = this->mask();
+ fLoopMask &= ~mask;
+ fContinueMask |= mask;
+}
+
+void SkVMGenerator::writeForStatement(const ForStatement& f) {
+ // We require that all loops be ES2-compliant (unrollable), and actually unroll them here
+ SkASSERT(f.unrollInfo());
+ const LoopUnrollInfo& loop = *f.unrollInfo();
+ SkASSERT(loop.fIndex->type().slotCount() == 1);
+
+ size_t indexSlot = this->getSlot(*loop.fIndex);
+ double val = loop.fStart;
+
+ const skvm::I32 zero = fBuilder->splat(0);
+ skvm::I32 oldLoopMask = fLoopMask,
+ oldContinueMask = fContinueMask;
+
+ const Type::NumberKind indexKind = base_number_kind(loop.fIndex->type());
+
+ // We want the loop index to disappear at the end of the loop, so wrap the for statement in a
+ // trace scope.
+ if (loop.fCount > 0) {
+ int line = this->getLine(f.test() ? f.test()->fPosition : f.fPosition);
+ skvm::I32 mask = this->mask();
+ this->emitTraceScope(mask, +1);
+
+ for (int i = 0; i < loop.fCount; ++i) {
+ this->writeToSlot(indexSlot, (indexKind == Type::NumberKind::kFloat)
+ ? fBuilder->splat(static_cast<float>(val)).id
+ : fBuilder->splat(static_cast<int>(val)).id);
+
+ fContinueMask = zero;
+ this->writeStatement(*f.statement());
+ fLoopMask |= fContinueMask;
+
+ this->emitTraceLine(line);
+ val += loop.fDelta;
+ }
+
+ this->emitTraceScope(mask, -1);
+ }
+
+ fLoopMask = oldLoopMask;
+ fContinueMask = oldContinueMask;
+}
+
+void SkVMGenerator::writeIfStatement(const IfStatement& i) {
+ Value test = this->writeExpression(*i.test());
+ {
+ ScopedCondition ifTrue(this, i32(test));
+ this->writeStatement(*i.ifTrue());
+ }
+ if (i.ifFalse()) {
+ ScopedCondition ifFalse(this, ~i32(test));
+ this->writeStatement(*i.ifFalse());
+ }
+}
+
+void SkVMGenerator::writeReturnStatement(const ReturnStatement& r) {
+ skvm::I32 returnsHere = this->mask();
+
+ if (r.expression()) {
+ Value val = this->writeExpression(*r.expression());
+
+ size_t slot = currentFunction().fReturnSlot;
+ size_t nslots = r.expression()->type().slotCount();
+ for (size_t i = 0; i < nslots; ++i) {
+ fSlots[slot + i].writtenTo = false;
+ skvm::Val conditionalStore = this->writeConditionalStore(fSlots[slot + i].val, val[i],
+ returnsHere);
+ this->writeToSlot(slot + i, conditionalStore);
+ }
+ }
+
+ currentFunction().fReturned |= returnsHere;
+}
+
+void SkVMGenerator::writeSwitchStatement(const SwitchStatement& s) {
+ skvm::I32 falseValue = fBuilder->splat( 0);
+ skvm::I32 trueValue = fBuilder->splat(~0);
+
+ // Create a "switchFallthough" scratch variable, initialized to false.
+ skvm::I32 switchFallthrough = falseValue;
+
+ // Loop masks behave just like for statements. When a break is encountered, it masks off all
+ // lanes for the rest of the body of the switch.
+ skvm::I32 oldLoopMask = fLoopMask;
+ Value switchValue = this->writeExpression(*s.value());
+
+ for (const std::unique_ptr<Statement>& stmt : s.cases()) {
+ const SwitchCase& c = stmt->as<SwitchCase>();
+ if (!c.isDefault()) {
+ Value caseValue = fBuilder->splat((int) c.value());
+
+ // We want to execute this switch case if we're falling through from a previous case, or
+ // if the case value matches.
+ ScopedCondition conditionalCaseBlock(
+ this,
+ switchFallthrough | (i32(caseValue) == i32(switchValue)));
+ this->writeStatement(*c.statement());
+
+ // If we are inside the case block, we set the fallthrough flag to true (`break` still
+ // works to stop the flow of execution regardless, since it zeroes out the loop-mask).
+ switchFallthrough.id = this->writeConditionalStore(switchFallthrough.id, trueValue.id,
+ this->mask());
+ } else {
+ // This is the default case. Since it's always last, we can just dump in the code.
+ this->writeStatement(*c.statement());
+ }
+ }
+
+ // Restore state.
+ fLoopMask = oldLoopMask;
+}
+
+void SkVMGenerator::writeVarDeclaration(const VarDeclaration& decl) {
+ size_t slot = this->getSlot(*decl.var()),
+ nslots = decl.var()->type().slotCount();
+
+ Value val = decl.value() ? this->writeExpression(*decl.value()) : Value{};
+ for (size_t i = 0; i < nslots; ++i) {
+ fSlots[slot + i].writtenTo = false;
+ this->writeToSlot(slot + i, val ? val[i] : fBuilder->splat(0.0f).id);
+ }
+}
+
+void SkVMGenerator::emitTraceLine(int line) {
+ if (fDebugTrace && line > 0 && fInsideCompoundStatement == 0) {
+ fBuilder->trace_line(fTraceHookID, this->mask(), fTraceMask, line);
+ }
+}
+
+void SkVMGenerator::emitTraceScope(skvm::I32 executionMask, int delta) {
+ if (fDebugTrace) {
+ fBuilder->trace_scope(fTraceHookID, executionMask, fTraceMask, delta);
+ }
+}
+
+void SkVMGenerator::writeStatement(const Statement& s) {
+ // The debugger should stop on all types of statements, except for Blocks.
+ if (!s.is<Block>()) {
+ this->emitTraceLine(this->getLine(s.fPosition));
+ }
+
+ switch (s.kind()) {
+ case Statement::Kind::kBlock:
+ this->writeBlock(s.as<Block>());
+ break;
+ case Statement::Kind::kBreak:
+ this->writeBreakStatement();
+ break;
+ case Statement::Kind::kContinue:
+ this->writeContinueStatement();
+ break;
+ case Statement::Kind::kExpression:
+ this->writeExpression(*s.as<ExpressionStatement>().expression());
+ break;
+ case Statement::Kind::kFor:
+ this->writeForStatement(s.as<ForStatement>());
+ break;
+ case Statement::Kind::kIf:
+ this->writeIfStatement(s.as<IfStatement>());
+ break;
+ case Statement::Kind::kReturn:
+ this->writeReturnStatement(s.as<ReturnStatement>());
+ break;
+ case Statement::Kind::kSwitch:
+ this->writeSwitchStatement(s.as<SwitchStatement>());
+ break;
+ case Statement::Kind::kVarDeclaration:
+ this->writeVarDeclaration(s.as<VarDeclaration>());
+ break;
+ case Statement::Kind::kDiscard:
+ case Statement::Kind::kDo:
+ SkDEBUGFAIL("Unsupported control flow");
+ break;
+ case Statement::Kind::kNop:
+ break;
+ default:
+ SkDEBUGFAIL("Unrecognized statement");
+ break;
+ }
+}
+
+skvm::Color ProgramToSkVM(const Program& program,
+ const FunctionDefinition& function,
+ skvm::Builder* builder,
+ SkVMDebugTrace* debugTrace,
+ SkSpan<skvm::Val> uniforms,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color inputColor,
+ skvm::Color destColor,
+ SkVMCallbacks* callbacks) {
+ skvm::Val zero = builder->splat(0.0f).id;
+ skvm::Val result[4] = {zero,zero,zero,zero};
+
+ skvm::Val args[8]; // At most 8 arguments (half4 srcColor, half4 dstColor)
+ size_t argSlots = 0;
+ for (const SkSL::Variable* param : function.declaration().parameters()) {
+ switch (param->modifiers().fLayout.fBuiltin) {
+ case SK_MAIN_COORDS_BUILTIN:
+ SkASSERT(param->type().slotCount() == 2);
+ SkASSERT((argSlots + 2) <= std::size(args));
+ args[argSlots++] = local.x.id;
+ args[argSlots++] = local.y.id;
+ break;
+ case SK_INPUT_COLOR_BUILTIN:
+ SkASSERT(param->type().slotCount() == 4);
+ SkASSERT((argSlots + 4) <= std::size(args));
+ args[argSlots++] = inputColor.r.id;
+ args[argSlots++] = inputColor.g.id;
+ args[argSlots++] = inputColor.b.id;
+ args[argSlots++] = inputColor.a.id;
+ break;
+ case SK_DEST_COLOR_BUILTIN:
+ SkASSERT(param->type().slotCount() == 4);
+ SkASSERT((argSlots + 4) <= std::size(args));
+ args[argSlots++] = destColor.r.id;
+ args[argSlots++] = destColor.g.id;
+ args[argSlots++] = destColor.b.id;
+ args[argSlots++] = destColor.a.id;
+ break;
+ default:
+ SkDEBUGFAIL("Invalid parameter to main()");
+ return {};
+ }
+ }
+ SkASSERT(argSlots <= std::size(args));
+
+ // Make sure that the SkVMDebugTrace starts from a clean slate.
+ if (debugTrace) {
+ debugTrace->fSlotInfo.clear();
+ debugTrace->fFuncInfo.clear();
+ debugTrace->fTraceInfo.clear();
+ }
+
+ SkVMGenerator generator(program, builder, debugTrace, callbacks);
+ generator.writeProgram(uniforms, device, function, {args, argSlots}, SkSpan(result));
+
+ return skvm::Color{{builder, result[0]},
+ {builder, result[1]},
+ {builder, result[2]},
+ {builder, result[3]}};
+}
+
+bool ProgramToSkVM(const Program& program,
+ const FunctionDefinition& function,
+ skvm::Builder* b,
+ SkVMDebugTrace* debugTrace,
+ SkSpan<skvm::Val> uniforms,
+ SkVMSignature* outSignature) {
+ SkVMSignature ignored,
+ *signature = outSignature ? outSignature : &ignored;
+
+ std::vector<skvm::Ptr> argPtrs;
+ std::vector<skvm::Val> argVals;
+
+ for (const Variable* p : function.declaration().parameters()) {
+ size_t slots = p->type().slotCount();
+ signature->fParameterSlots += slots;
+ for (size_t i = 0; i < slots; ++i) {
+ argPtrs.push_back(b->varying<float>());
+ argVals.push_back(b->loadF(argPtrs.back()).id);
+ }
+ }
+
+ std::vector<skvm::Ptr> returnPtrs;
+ std::vector<skvm::Val> returnVals;
+
+ signature->fReturnSlots = function.declaration().returnType().slotCount();
+ for (size_t i = 0; i < signature->fReturnSlots; ++i) {
+ returnPtrs.push_back(b->varying<float>());
+ returnVals.push_back(b->splat(0.0f).id);
+ }
+
+ class Callbacks : public SkVMCallbacks {
+ public:
+ Callbacks(skvm::Color color) : fColor(color) {}
+
+ skvm::Color sampleShader(int, skvm::Coord) override {
+ fUsedUnsupportedFeatures = true;
+ return fColor;
+ }
+ skvm::Color sampleColorFilter(int, skvm::Color) override {
+ fUsedUnsupportedFeatures = true;
+ return fColor;
+ }
+ skvm::Color sampleBlender(int, skvm::Color, skvm::Color) override {
+ fUsedUnsupportedFeatures = true;
+ return fColor;
+ }
+
+ skvm::Color toLinearSrgb(skvm::Color) override {
+ fUsedUnsupportedFeatures = true;
+ return fColor;
+ }
+ skvm::Color fromLinearSrgb(skvm::Color) override {
+ fUsedUnsupportedFeatures = true;
+ return fColor;
+ }
+
+ bool fUsedUnsupportedFeatures = false;
+ const skvm::Color fColor;
+ };
+
+ // Set up device coordinates so that the rightmost evaluated pixel will be centered on (0, 0).
+ // (If the coordinates aren't used, dead-code elimination will optimize this away.)
+ skvm::F32 pixelCenter = b->splat(0.5f);
+ skvm::Coord device = {pixelCenter, pixelCenter};
+ device.x += to_F32(b->splat(1) - b->index());
+
+ skvm::F32 zero = b->splat(0.0f);
+ skvm::Color sampledColor{zero, zero, zero, zero};
+ Callbacks callbacks(sampledColor);
+
+ SkVMGenerator generator(program, b, debugTrace, &callbacks);
+ generator.writeProgram(uniforms, device, function, SkSpan(argVals), SkSpan(returnVals));
+
+ // If the SkSL tried to use any shader, colorFilter, or blender objects - we don't have a
+ // mechanism (yet) for binding to those.
+ if (callbacks.fUsedUnsupportedFeatures) {
+ return false;
+ }
+
+ // generateCode has updated the contents of 'argVals' for any 'out' or 'inout' parameters.
+ // Propagate those changes back to our varying buffers:
+ size_t argIdx = 0;
+ for (const Variable* p : function.declaration().parameters()) {
+ size_t nslots = p->type().slotCount();
+ if (p->modifiers().fFlags & Modifiers::kOut_Flag) {
+ for (size_t i = 0; i < nslots; ++i) {
+ b->storeF(argPtrs[argIdx + i], skvm::F32{b, argVals[argIdx + i]});
+ }
+ }
+ argIdx += nslots;
+ }
+
+ // It's also updated the contents of 'returnVals' with the return value of the entry point.
+ // Store that as well:
+ for (size_t i = 0; i < signature->fReturnSlots; ++i) {
+ b->storeF(returnPtrs[i], skvm::F32{b, returnVals[i]});
+ }
+
+ return true;
+}
+
+/*
+ * Testing utility function that emits program's "main" with a minimal harness. Used to create
+ * representative skvm op sequences for SkSL tests.
+ */
+bool testingOnly_ProgramToSkVMShader(const Program& program,
+ skvm::Builder* builder,
+ SkVMDebugTrace* debugTrace) {
+ const SkSL::FunctionDeclaration* main = program.getFunction("main");
+ if (!main) {
+ return false;
+ }
+
+ size_t uniformSlots = 0;
+ int childSlots = 0;
+ for (const SkSL::ProgramElement* e : program.elements()) {
+ if (e->is<GlobalVarDeclaration>()) {
+ const GlobalVarDeclaration& decl = e->as<GlobalVarDeclaration>();
+ const Variable& var = *decl.varDeclaration().var();
+ if (var.type().isEffectChild()) {
+ childSlots++;
+ } else if (is_uniform(var)) {
+ uniformSlots += var.type().slotCount();
+ }
+ }
+ }
+
+ skvm::Uniforms uniforms(builder->uniform(), 0);
+
+ auto new_uni = [&]() { return builder->uniformF(uniforms.pushF(0.0f)); };
+
+ // Assume identity CTM
+ skvm::Coord device = {pun_to_F32(builder->index()), new_uni()};
+ // Position device coords at pixel centers, so debug traces will trigger
+ device.x += 0.5f;
+ device.y += 0.5f;
+ skvm::Coord local = device;
+
+ class Callbacks : public SkVMCallbacks {
+ public:
+ Callbacks(skvm::Builder* builder, skvm::Uniforms* uniforms, int numChildren) {
+ for (int i = 0; i < numChildren; ++i) {
+ fChildren.push_back(
+ {uniforms->pushPtr(nullptr), builder->uniform32(uniforms->push(0))});
+ }
+ }
+
+ skvm::Color sampleShader(int i, skvm::Coord coord) override {
+ skvm::PixelFormat pixelFormat = skvm::SkColorType_to_PixelFormat(kRGBA_F32_SkColorType);
+ skvm::I32 index = trunc(coord.x);
+ index += trunc(coord.y) * fChildren[i].rowBytesAsPixels;
+ return gather(pixelFormat, fChildren[i].addr, index);
+ }
+
+ skvm::Color sampleColorFilter(int i, skvm::Color color) override {
+ return color;
+ }
+
+ skvm::Color sampleBlender(int i, skvm::Color src, skvm::Color dst) override {
+ return blend(SkBlendMode::kSrcOver, src, dst);
+ }
+
+ // TODO(skia:10479): Make these actually convert to/from something like sRGB, for use in
+ // test files.
+ skvm::Color toLinearSrgb(skvm::Color color) override {
+ return color;
+ }
+ skvm::Color fromLinearSrgb(skvm::Color color) override {
+ return color;
+ }
+
+ struct Child {
+ skvm::Uniform addr;
+ skvm::I32 rowBytesAsPixels;
+ };
+ std::vector<Child> fChildren;
+ };
+ Callbacks callbacks(builder, &uniforms, childSlots);
+
+ std::vector<skvm::Val> uniformVals;
+ for (size_t i = 0; i < uniformSlots; ++i) {
+ uniformVals.push_back(new_uni().id);
+ }
+
+ skvm::Color inColor = builder->uniformColor(SkColors::kWhite, &uniforms);
+ skvm::Color destColor = builder->uniformColor(SkColors::kBlack, &uniforms);
+
+ skvm::Color result = SkSL::ProgramToSkVM(program, *main->definition(), builder, debugTrace,
+ SkSpan(uniformVals), device, local, inColor,
+ destColor, &callbacks);
+
+ storeF(builder->varying<float>(), result.r);
+ storeF(builder->varying<float>(), result.g);
+ storeF(builder->varying<float>(), result.b);
+ storeF(builder->varying<float>(), result.a);
+
+ return true;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/codegen/SkSLVMCodeGenerator.h b/gfx/skia/skia/src/sksl/codegen/SkSLVMCodeGenerator.h
new file mode 100644
index 0000000000..cfff7477bf
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/codegen/SkSLVMCodeGenerator.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_VMGENERATOR
+#define SKSL_VMGENERATOR
+
+#include "src/core/SkVM.h"
+
+#include <cstddef>
+
+template <typename T> class SkSpan;
+
+namespace SkSL {
+
+class FunctionDefinition;
+struct Program;
+class SkVMDebugTrace;
+
+class SkVMCallbacks {
+public:
+ virtual ~SkVMCallbacks() = default;
+
+ virtual skvm::Color sampleShader(int index, skvm::Coord coord) = 0;
+ virtual skvm::Color sampleColorFilter(int index, skvm::Color color) = 0;
+ virtual skvm::Color sampleBlender(int index, skvm::Color src, skvm::Color dst) = 0;
+
+ virtual skvm::Color toLinearSrgb(skvm::Color color) = 0;
+ virtual skvm::Color fromLinearSrgb(skvm::Color color) = 0;
+};
+
+// Convert 'function' to skvm instructions in 'builder', for use by blends, shaders, & color filters
+skvm::Color ProgramToSkVM(const Program& program,
+ const FunctionDefinition& function,
+ skvm::Builder* builder,
+ SkVMDebugTrace* debugTrace,
+ SkSpan<skvm::Val> uniforms,
+ skvm::Coord device,
+ skvm::Coord local,
+ skvm::Color inputColor,
+ skvm::Color destColor,
+ SkVMCallbacks* callbacks);
+
+struct SkVMSignature {
+ size_t fParameterSlots = 0;
+ size_t fReturnSlots = 0;
+};
+
+/*
+ * Converts 'function' to skvm instructions in 'builder'. Always adds one arg per value in the
+ * parameter list, then one per value in the return type. For example:
+ *
+ * float2 fn(float2 a, float b) { ... }
+ *
+ * ... is mapped so that it can be called as:
+ *
+ * p.eval(N, &a.x, &a.y, &b, &return.x, &return.y);
+ *
+ * The number of parameter and return slots (pointers) is placed in 'outSignature', if provided.
+ * If the program declares any uniforms, 'uniforms' should contain the IDs of each individual value
+ * (eg, one ID per component of a vector).
+ */
+bool ProgramToSkVM(const Program& program,
+ const FunctionDefinition& function,
+ skvm::Builder* b,
+ SkVMDebugTrace* debugTrace,
+ SkSpan<skvm::Val> uniforms,
+ SkVMSignature* outSignature = nullptr);
+
+bool testingOnly_ProgramToSkVMShader(const Program& program,
+ skvm::Builder* builder,
+ SkVMDebugTrace* debugTrace);
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/codegen/SkSLWGSLCodeGenerator.cpp b/gfx/skia/skia/src/sksl/codegen/SkSLWGSLCodeGenerator.cpp
new file mode 100644
index 0000000000..f5f593b33c
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/codegen/SkSLWGSLCodeGenerator.cpp
@@ -0,0 +1,1939 @@
+/*
+ * Copyright 2022 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/codegen/SkSLWGSLCodeGenerator.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <memory>
+#include <optional>
+#include <string>
+#include <vector>
+
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkBitmaskEnum.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLLayout.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLStatement.h"
+#include "include/private/SkSLString.h"
+#include "include/private/SkSLSymbol.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTo.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLOperator.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLOutputStream.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/SkSLStringStream.h"
+#include "src/sksl/SkSLUtil.h"
+#include "src/sksl/analysis/SkSLProgramVisitor.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLConstructorCompound.h"
+#include "src/sksl/ir/SkSLConstructorDiagonalMatrix.h"
+#include "src/sksl/ir/SkSLConstructorMatrixResize.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLExpressionStatement.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLIfStatement.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLInterfaceBlock.h"
+#include "src/sksl/ir/SkSLLiteral.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/ir/SkSLReturnStatement.h"
+#include "src/sksl/ir/SkSLStructDefinition.h"
+#include "src/sksl/ir/SkSLSwizzle.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLTernaryExpression.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+
+// TODO(skia:13092): This is a temporary debug feature. Remove when the implementation is
+// complete and this is no longer needed.
+#define DUMP_SRC_IR 0
+
+namespace SkSL {
+
+enum class ProgramKind : int8_t;
+
+namespace {
+
+// See https://www.w3.org/TR/WGSL/#memory-view-types
+enum class PtrAddressSpace {
+ kFunction,
+ kPrivate,
+ kStorage,
+};
+
+std::string_view pipeline_struct_prefix(ProgramKind kind) {
+ if (ProgramConfig::IsVertex(kind)) {
+ return "VS";
+ }
+ if (ProgramConfig::IsFragment(kind)) {
+ return "FS";
+ }
+ return "";
+}
+
+std::string_view address_space_to_str(PtrAddressSpace addressSpace) {
+ switch (addressSpace) {
+ case PtrAddressSpace::kFunction:
+ return "function";
+ case PtrAddressSpace::kPrivate:
+ return "private";
+ case PtrAddressSpace::kStorage:
+ return "storage";
+ }
+ SkDEBUGFAIL("unsupported ptr address space");
+ return "unsupported";
+}
+
+std::string_view to_scalar_type(const Type& type) {
+ SkASSERT(type.typeKind() == Type::TypeKind::kScalar);
+ switch (type.numberKind()) {
+ // Floating-point numbers in WebGPU currently always have 32-bit footprint and
+ // relaxed-precision is not supported without extensions. f32 is the only floating-point
+ // number type in WGSL (see the discussion on https://github.com/gpuweb/gpuweb/issues/658).
+ case Type::NumberKind::kFloat:
+ return "f32";
+ case Type::NumberKind::kSigned:
+ return "i32";
+ case Type::NumberKind::kUnsigned:
+ return "u32";
+ case Type::NumberKind::kBoolean:
+ return "bool";
+ case Type::NumberKind::kNonnumeric:
+ [[fallthrough]];
+ default:
+ break;
+ }
+ return type.name();
+}
+
+// Convert a SkSL type to a WGSL type. Handles all plain types except structure types
+// (see https://www.w3.org/TR/WGSL/#plain-types-section).
+std::string to_wgsl_type(const Type& type) {
+ switch (type.typeKind()) {
+ case Type::TypeKind::kScalar:
+ return std::string(to_scalar_type(type));
+ case Type::TypeKind::kVector: {
+ std::string_view ct = to_scalar_type(type.componentType());
+ return String::printf("vec%d<%.*s>", type.columns(), (int)ct.length(), ct.data());
+ }
+ case Type::TypeKind::kMatrix: {
+ std::string_view ct = to_scalar_type(type.componentType());
+ return String::printf(
+ "mat%dx%d<%.*s>", type.columns(), type.rows(), (int)ct.length(), ct.data());
+ }
+ case Type::TypeKind::kArray: {
+ std::string elementType = to_wgsl_type(type.componentType());
+ if (type.isUnsizedArray()) {
+ return String::printf("array<%s>", elementType.c_str());
+ }
+ return String::printf("array<%s, %d>", elementType.c_str(), type.columns());
+ }
+ default:
+ break;
+ }
+ return std::string(type.name());
+}
+
+// Create a mangled WGSL type name that can be used in function and variable declarations (regular
+// type names cannot be used in this manner since they may contain tokens that are not allowed in
+// symbol names).
+std::string to_mangled_wgsl_type_name(const Type& type) {
+ switch (type.typeKind()) {
+ case Type::TypeKind::kScalar:
+ return std::string(to_scalar_type(type));
+ case Type::TypeKind::kVector: {
+ std::string_view ct = to_scalar_type(type.componentType());
+ return String::printf("vec%d%.*s", type.columns(), (int)ct.length(), ct.data());
+ }
+ case Type::TypeKind::kMatrix: {
+ std::string_view ct = to_scalar_type(type.componentType());
+ return String::printf(
+ "mat%dx%d%.*s", type.columns(), type.rows(), (int)ct.length(), ct.data());
+ }
+ case Type::TypeKind::kArray: {
+ std::string elementType = to_wgsl_type(type.componentType());
+ if (type.isUnsizedArray()) {
+ return String::printf("arrayof%s", elementType.c_str());
+ }
+ return String::printf("array%dof%s", type.columns(), elementType.c_str());
+ }
+ default:
+ break;
+ }
+ return std::string(type.name());
+}
+
+std::string to_ptr_type(const Type& type,
+ PtrAddressSpace addressSpace = PtrAddressSpace::kFunction) {
+ return "ptr<" + std::string(address_space_to_str(addressSpace)) + ", " + to_wgsl_type(type) +
+ ">";
+}
+
+std::string_view wgsl_builtin_name(WGSLCodeGenerator::Builtin builtin) {
+ using Builtin = WGSLCodeGenerator::Builtin;
+ switch (builtin) {
+ case Builtin::kVertexIndex:
+ return "vertex_index";
+ case Builtin::kInstanceIndex:
+ return "instance_index";
+ case Builtin::kPosition:
+ return "position";
+ case Builtin::kFrontFacing:
+ return "front_facing";
+ case Builtin::kSampleIndex:
+ return "sample_index";
+ case Builtin::kFragDepth:
+ return "frag_depth";
+ case Builtin::kSampleMask:
+ return "sample_mask";
+ case Builtin::kLocalInvocationId:
+ return "local_invocation_id";
+ case Builtin::kLocalInvocationIndex:
+ return "local_invocation_index";
+ case Builtin::kGlobalInvocationId:
+ return "global_invocation_id";
+ case Builtin::kWorkgroupId:
+ return "workgroup_id";
+ case Builtin::kNumWorkgroups:
+ return "num_workgroups";
+ default:
+ break;
+ }
+
+ SkDEBUGFAIL("unsupported builtin");
+ return "unsupported";
+}
+
+std::string_view wgsl_builtin_type(WGSLCodeGenerator::Builtin builtin) {
+ using Builtin = WGSLCodeGenerator::Builtin;
+ switch (builtin) {
+ case Builtin::kVertexIndex:
+ return "u32";
+ case Builtin::kInstanceIndex:
+ return "u32";
+ case Builtin::kPosition:
+ return "vec4<f32>";
+ case Builtin::kFrontFacing:
+ return "bool";
+ case Builtin::kSampleIndex:
+ return "u32";
+ case Builtin::kFragDepth:
+ return "f32";
+ case Builtin::kSampleMask:
+ return "u32";
+ case Builtin::kLocalInvocationId:
+ return "vec3<u32>";
+ case Builtin::kLocalInvocationIndex:
+ return "u32";
+ case Builtin::kGlobalInvocationId:
+ return "vec3<u32>";
+ case Builtin::kWorkgroupId:
+ return "vec3<u32>";
+ case Builtin::kNumWorkgroups:
+ return "vec3<u32>";
+ default:
+ break;
+ }
+
+ SkDEBUGFAIL("unsupported builtin");
+ return "unsupported";
+}
+
+// Some built-in variables have a type that differs from their SkSL counterpart (e.g. signed vs
+// unsigned integer). We handle these cases with an explicit type conversion during a variable
+// reference. Returns the WGSL type of the conversion target if conversion is needed, otherwise
+// returns std::nullopt.
+std::optional<std::string_view> needs_builtin_type_conversion(const Variable& v) {
+ switch (v.modifiers().fLayout.fBuiltin) {
+ case SK_VERTEXID_BUILTIN:
+ case SK_INSTANCEID_BUILTIN:
+ return {"i32"};
+ default:
+ break;
+ }
+ return std::nullopt;
+}
+
+// Map a SkSL builtin flag to a WGSL builtin kind. Returns std::nullopt if `builtin` is not
+// not supported for WGSL.
+//
+// Also see //src/sksl/sksl_vert.sksl and //src/sksl/sksl_frag.sksl for supported built-ins.
+std::optional<WGSLCodeGenerator::Builtin> builtin_from_sksl_name(int builtin) {
+ using Builtin = WGSLCodeGenerator::Builtin;
+ switch (builtin) {
+ case SK_POSITION_BUILTIN:
+ [[fallthrough]];
+ case SK_FRAGCOORD_BUILTIN:
+ return {Builtin::kPosition};
+ case SK_VERTEXID_BUILTIN:
+ return {Builtin::kVertexIndex};
+ case SK_INSTANCEID_BUILTIN:
+ return {Builtin::kInstanceIndex};
+ case SK_CLOCKWISE_BUILTIN:
+ // TODO(skia:13092): While `front_facing` is the corresponding built-in, it does not
+ // imply a particular winding order. We correctly compute the face orientation based
+ // on how Skia configured the render pipeline for all references to this built-in
+ // variable (see `SkSL::Program::Inputs::fUseFlipRTUniform`).
+ return {Builtin::kFrontFacing};
+ default:
+ break;
+ }
+ return std::nullopt;
+}
+
+const SymbolTable* top_level_symbol_table(const FunctionDefinition& f) {
+ return f.body()->as<Block>().symbolTable()->fParent.get();
+}
+
+const char* delimiter_to_str(WGSLCodeGenerator::Delimiter delimiter) {
+ using Delim = WGSLCodeGenerator::Delimiter;
+ switch (delimiter) {
+ case Delim::kComma:
+ return ",";
+ case Delim::kSemicolon:
+ return ";";
+ case Delim::kNone:
+ default:
+ break;
+ }
+ return "";
+}
+
+// FunctionDependencyResolver visits the IR tree rooted at a particular function definition and
+// computes that function's dependencies on pipeline stage IO parameters. These are later used to
+// synthesize arguments when writing out function definitions.
+class FunctionDependencyResolver : public ProgramVisitor {
+public:
+ using Deps = WGSLCodeGenerator::FunctionDependencies;
+ using DepsMap = WGSLCodeGenerator::ProgramRequirements::DepsMap;
+
+ FunctionDependencyResolver(const Program* p,
+ const FunctionDeclaration* f,
+ DepsMap* programDependencyMap)
+ : fProgram(p), fFunction(f), fDependencyMap(programDependencyMap) {}
+
+ Deps resolve() {
+ fDeps = Deps::kNone;
+ this->visit(*fProgram);
+ return fDeps;
+ }
+
+private:
+ bool visitProgramElement(const ProgramElement& p) override {
+ // Only visit the program that matches the requested function.
+ if (p.is<FunctionDefinition>() && &p.as<FunctionDefinition>().declaration() == fFunction) {
+ return INHERITED::visitProgramElement(p);
+ }
+ // Continue visiting other program elements.
+ return false;
+ }
+
+ bool visitExpression(const Expression& e) override {
+ if (e.is<VariableReference>()) {
+ const VariableReference& v = e.as<VariableReference>();
+ const Modifiers& modifiers = v.variable()->modifiers();
+ if (v.variable()->storage() == Variable::Storage::kGlobal) {
+ if (modifiers.fFlags & Modifiers::kIn_Flag) {
+ fDeps |= Deps::kPipelineInputs;
+ }
+ if (modifiers.fFlags & Modifiers::kOut_Flag) {
+ fDeps |= Deps::kPipelineOutputs;
+ }
+ }
+ } else if (e.is<FunctionCall>()) {
+ // The current function that we're processing (`fFunction`) inherits the dependencies of
+ // functions that it makes calls to, because the pipeline stage IO parameters need to be
+ // passed down as an argument.
+ const FunctionCall& callee = e.as<FunctionCall>();
+
+ // Don't process a function again if we have already resolved it.
+ Deps* found = fDependencyMap->find(&callee.function());
+ if (found) {
+ fDeps |= *found;
+ } else {
+ // Store the dependencies that have been discovered for the current function so far.
+ // If `callee` directly or indirectly calls the current function, then this value
+ // will prevent an infinite recursion.
+ fDependencyMap->set(fFunction, fDeps);
+
+ // Separately traverse the called function's definition and determine its
+ // dependencies.
+ FunctionDependencyResolver resolver(fProgram, &callee.function(), fDependencyMap);
+ Deps calleeDeps = resolver.resolve();
+
+ // Store the callee's dependencies in the global map to avoid processing
+ // the function again for future calls.
+ fDependencyMap->set(&callee.function(), calleeDeps);
+
+ // Add to the current function's dependencies.
+ fDeps |= calleeDeps;
+ }
+ }
+ return INHERITED::visitExpression(e);
+ }
+
+ const Program* const fProgram;
+ const FunctionDeclaration* const fFunction;
+ DepsMap* const fDependencyMap;
+ Deps fDeps = Deps::kNone;
+
+ using INHERITED = ProgramVisitor;
+};
+
+WGSLCodeGenerator::ProgramRequirements resolve_program_requirements(const Program* program) {
+ bool mainNeedsCoordsArgument = false;
+ WGSLCodeGenerator::ProgramRequirements::DepsMap dependencies;
+
+ for (const ProgramElement* e : program->elements()) {
+ if (!e->is<FunctionDefinition>()) {
+ continue;
+ }
+
+ const FunctionDeclaration& decl = e->as<FunctionDefinition>().declaration();
+ if (decl.isMain()) {
+ for (const Variable* v : decl.parameters()) {
+ if (v->modifiers().fLayout.fBuiltin == SK_MAIN_COORDS_BUILTIN) {
+ mainNeedsCoordsArgument = true;
+ break;
+ }
+ }
+ }
+
+ FunctionDependencyResolver resolver(program, &decl, &dependencies);
+ dependencies.set(&decl, resolver.resolve());
+ }
+
+ return WGSLCodeGenerator::ProgramRequirements(std::move(dependencies), mainNeedsCoordsArgument);
+}
+
+int count_pipeline_inputs(const Program* program) {
+ int inputCount = 0;
+ for (const ProgramElement* e : program->elements()) {
+ if (e->is<GlobalVarDeclaration>()) {
+ const Variable* v = e->as<GlobalVarDeclaration>().varDeclaration().var();
+ if (v->modifiers().fFlags & Modifiers::kIn_Flag) {
+ inputCount++;
+ }
+ } else if (e->is<InterfaceBlock>()) {
+ const Variable* v = e->as<InterfaceBlock>().var();
+ if (v->modifiers().fFlags & Modifiers::kIn_Flag) {
+ inputCount++;
+ }
+ }
+ }
+ return inputCount;
+}
+
+static bool is_in_global_uniforms(const Variable& var) {
+ SkASSERT(var.storage() == VariableStorage::kGlobal);
+ return var.modifiers().fFlags & Modifiers::kUniform_Flag && !var.type().isOpaque();
+}
+
+} // namespace
+
+bool WGSLCodeGenerator::generateCode() {
+ // The resources of a WGSL program are structured in the following way:
+ // - Vertex and fragment stage attribute inputs and outputs are bundled
+ // inside synthetic structs called VSIn/VSOut/FSIn/FSOut.
+ // - All uniform and storage type resources are declared in global scope.
+ this->preprocessProgram();
+
+ StringStream header;
+ {
+ AutoOutputStream outputToHeader(this, &header, &fIndentation);
+ // TODO(skia:13092): Implement the following:
+ // - global uniform/storage resource declarations, including interface blocks.
+ this->writeStageInputStruct();
+ this->writeStageOutputStruct();
+ this->writeNonBlockUniformsForTests();
+ }
+ StringStream body;
+ {
+ AutoOutputStream outputToBody(this, &body, &fIndentation);
+ for (const ProgramElement* e : fProgram.elements()) {
+ this->writeProgramElement(*e);
+ }
+
+// TODO(skia:13092): This is a temporary debug feature. Remove when the implementation is
+// complete and this is no longer needed.
+#if DUMP_SRC_IR
+ this->writeLine("\n----------");
+ this->writeLine("Source IR:\n");
+ for (const ProgramElement* e : fProgram.elements()) {
+ this->writeLine(e->description().c_str());
+ }
+#endif
+ }
+
+ write_stringstream(header, *fOut);
+ write_stringstream(fExtraFunctions, *fOut);
+ write_stringstream(body, *fOut);
+ return fContext.fErrors->errorCount() == 0;
+}
+
+void WGSLCodeGenerator::preprocessProgram() {
+ fRequirements = resolve_program_requirements(&fProgram);
+ fPipelineInputCount = count_pipeline_inputs(&fProgram);
+}
+
+void WGSLCodeGenerator::write(std::string_view s) {
+ if (s.empty()) {
+ return;
+ }
+ if (fAtLineStart) {
+ for (int i = 0; i < fIndentation; i++) {
+ fOut->writeText(" ");
+ }
+ }
+ fOut->writeText(std::string(s).c_str());
+ fAtLineStart = false;
+}
+
+void WGSLCodeGenerator::writeLine(std::string_view s) {
+ this->write(s);
+ fOut->writeText("\n");
+ fAtLineStart = true;
+}
+
+void WGSLCodeGenerator::finishLine() {
+ if (!fAtLineStart) {
+ this->writeLine();
+ }
+}
+
+void WGSLCodeGenerator::writeName(std::string_view name) {
+ // Add underscore before name to avoid conflict with reserved words.
+ if (fReservedWords.contains(name)) {
+ this->write("_");
+ }
+ this->write(name);
+}
+
+void WGSLCodeGenerator::writeVariableDecl(const Type& type,
+ std::string_view name,
+ Delimiter delimiter) {
+ this->writeName(name);
+ this->write(": " + to_wgsl_type(type));
+ this->writeLine(delimiter_to_str(delimiter));
+}
+
+void WGSLCodeGenerator::writePipelineIODeclaration(Modifiers modifiers,
+ const Type& type,
+ std::string_view name,
+ Delimiter delimiter) {
+ // In WGSL, an entry-point IO parameter is "one of either a built-in value or
+ // assigned a location". However, some SkSL declarations, specifically sk_FragColor, can
+ // contain both a location and a builtin modifier. In addition, WGSL doesn't have a built-in
+ // equivalent for sk_FragColor as it relies on the user-defined location for a render
+ // target.
+ //
+ // Instead of special-casing sk_FragColor, we just give higher precedence to a location
+ // modifier if a declaration happens to both have a location and it's a built-in.
+ //
+ // Also see:
+ // https://www.w3.org/TR/WGSL/#input-output-locations
+ // https://www.w3.org/TR/WGSL/#attribute-location
+ // https://www.w3.org/TR/WGSL/#builtin-inputs-outputs
+ int location = modifiers.fLayout.fLocation;
+ if (location >= 0) {
+ this->writeUserDefinedIODecl(type, name, location, delimiter);
+ } else if (modifiers.fLayout.fBuiltin >= 0) {
+ auto builtin = builtin_from_sksl_name(modifiers.fLayout.fBuiltin);
+ if (builtin.has_value()) {
+ this->writeBuiltinIODecl(type, name, *builtin, delimiter);
+ }
+ }
+}
+
+void WGSLCodeGenerator::writeUserDefinedIODecl(const Type& type,
+ std::string_view name,
+ int location,
+ Delimiter delimiter) {
+ this->write("@location(" + std::to_string(location) + ") ");
+
+ // "User-defined IO of scalar or vector integer type must always be specified as
+ // @interpolate(flat)" (see https://www.w3.org/TR/WGSL/#interpolation)
+ if (type.isInteger() || (type.isVector() && type.componentType().isInteger())) {
+ this->write("@interpolate(flat) ");
+ }
+
+ this->writeVariableDecl(type, name, delimiter);
+}
+
+void WGSLCodeGenerator::writeBuiltinIODecl(const Type& type,
+ std::string_view name,
+ Builtin builtin,
+ Delimiter delimiter) {
+ this->write("@builtin(");
+ this->write(wgsl_builtin_name(builtin));
+ this->write(") ");
+
+ this->writeName(name);
+ this->write(": ");
+ this->write(wgsl_builtin_type(builtin));
+ this->writeLine(delimiter_to_str(delimiter));
+}
+
+void WGSLCodeGenerator::writeFunction(const FunctionDefinition& f) {
+ this->writeFunctionDeclaration(f.declaration());
+ this->write(" ");
+ this->writeBlock(f.body()->as<Block>());
+
+ if (f.declaration().isMain()) {
+ // We just emitted the user-defined main function. Next, we generate a program entry point
+ // that calls the user-defined main.
+ this->writeEntryPoint(f);
+ }
+}
+
+void WGSLCodeGenerator::writeFunctionDeclaration(const FunctionDeclaration& f) {
+ this->write("fn ");
+ this->write(f.mangledName());
+ this->write("(");
+ auto separator = SkSL::String::Separator();
+ if (this->writeFunctionDependencyParams(f)) {
+ separator(); // update the separator as parameters have been written
+ }
+ for (const Variable* param : f.parameters()) {
+ this->write(separator());
+ this->writeName(param->mangledName());
+ this->write(": ");
+
+ // Declare an "out" function parameter as a pointer.
+ if (param->modifiers().fFlags & Modifiers::kOut_Flag) {
+ this->write(to_ptr_type(param->type()));
+ } else {
+ this->write(to_wgsl_type(param->type()));
+ }
+ }
+ this->write(")");
+ if (!f.returnType().isVoid()) {
+ this->write(" -> ");
+ this->write(to_wgsl_type(f.returnType()));
+ }
+}
+
+void WGSLCodeGenerator::writeEntryPoint(const FunctionDefinition& main) {
+ SkASSERT(main.declaration().isMain());
+
+ // The input and output parameters for a vertex/fragment stage entry point function have the
+ // FSIn/FSOut/VSIn/VSOut struct types that have been synthesized in generateCode(). An entry
+ // point always has the same signature and acts as a trampoline to the user-defined main
+ // function.
+ std::string outputType;
+ if (ProgramConfig::IsVertex(fProgram.fConfig->fKind)) {
+ this->write("@vertex fn vertexMain(");
+ if (fPipelineInputCount > 0) {
+ this->write("_stageIn: VSIn");
+ }
+ this->writeLine(") -> VSOut {");
+ outputType = "VSOut";
+ } else if (ProgramConfig::IsFragment(fProgram.fConfig->fKind)) {
+ this->write("@fragment fn fragmentMain(");
+ if (fPipelineInputCount > 0) {
+ this->write("_stageIn: FSIn");
+ }
+ this->writeLine(") -> FSOut {");
+ outputType = "FSOut";
+ } else {
+ fContext.fErrors->error(Position(), "program kind not supported");
+ return;
+ }
+
+ // Declare the stage output struct.
+ fIndentation++;
+ this->write("var _stageOut: ");
+ this->write(outputType);
+ this->writeLine(";");
+
+ // Generate assignment to sk_FragColor built-in if the user-defined main returns a color.
+ if (ProgramConfig::IsFragment(fProgram.fConfig->fKind)) {
+ const SymbolTable* symbolTable = top_level_symbol_table(main);
+ const Symbol* symbol = symbolTable->find("sk_FragColor");
+ SkASSERT(symbol);
+ if (main.declaration().returnType().matches(symbol->type())) {
+ this->write("_stageOut.sk_FragColor = ");
+ }
+ }
+
+ // Generate the function call to the user-defined main:
+ this->write(main.declaration().mangledName());
+ this->write("(");
+ auto separator = SkSL::String::Separator();
+ FunctionDependencies* deps = fRequirements.dependencies.find(&main.declaration());
+ if (deps) {
+ if ((*deps & FunctionDependencies::kPipelineInputs) != FunctionDependencies::kNone) {
+ this->write(separator());
+ this->write("_stageIn");
+ }
+ if ((*deps & FunctionDependencies::kPipelineOutputs) != FunctionDependencies::kNone) {
+ this->write(separator());
+ this->write("&_stageOut");
+ }
+ }
+ // TODO(armansito): Handle arbitrary parameters.
+ if (main.declaration().parameters().size() != 0) {
+ const Variable* v = main.declaration().parameters()[0];
+ const Type& type = v->type();
+ if (v->modifiers().fLayout.fBuiltin == SK_MAIN_COORDS_BUILTIN) {
+ if (!type.matches(*fContext.fTypes.fFloat2)) {
+ fContext.fErrors->error(
+ main.fPosition,
+ "main function has unsupported parameter: " + type.description());
+ return;
+ }
+
+ this->write(separator());
+ this->write("_stageIn.sk_FragCoord.xy");
+ }
+ }
+ this->writeLine(");");
+ this->writeLine("return _stageOut;");
+
+ fIndentation--;
+ this->writeLine("}");
+}
+
+void WGSLCodeGenerator::writeStatement(const Statement& s) {
+ switch (s.kind()) {
+ case Statement::Kind::kBlock:
+ this->writeBlock(s.as<Block>());
+ break;
+ case Statement::Kind::kExpression:
+ this->writeExpressionStatement(s.as<ExpressionStatement>());
+ break;
+ case Statement::Kind::kIf:
+ this->writeIfStatement(s.as<IfStatement>());
+ break;
+ case Statement::Kind::kReturn:
+ this->writeReturnStatement(s.as<ReturnStatement>());
+ break;
+ case Statement::Kind::kVarDeclaration:
+ this->writeVarDeclaration(s.as<VarDeclaration>());
+ break;
+ default:
+ SkDEBUGFAILF("unsupported statement (kind: %d) %s",
+ static_cast<int>(s.kind()), s.description().c_str());
+ break;
+ }
+}
+
+void WGSLCodeGenerator::writeStatements(const StatementArray& statements) {
+ for (const auto& s : statements) {
+ if (!s->isEmpty()) {
+ this->writeStatement(*s);
+ this->finishLine();
+ }
+ }
+}
+
+void WGSLCodeGenerator::writeBlock(const Block& b) {
+ // Write scope markers if this block is a scope, or if the block is empty (since we need to emit
+ // something here to make the code valid).
+ bool isScope = b.isScope() || b.isEmpty();
+ if (isScope) {
+ this->writeLine("{");
+ fIndentation++;
+ }
+ this->writeStatements(b.children());
+ if (isScope) {
+ fIndentation--;
+ this->writeLine("}");
+ }
+}
+
+void WGSLCodeGenerator::writeExpressionStatement(const ExpressionStatement& s) {
+ if (Analysis::HasSideEffects(*s.expression())) {
+ this->writeExpression(*s.expression(), Precedence::kTopLevel);
+ this->write(";");
+ }
+}
+
+void WGSLCodeGenerator::writeIfStatement(const IfStatement& s) {
+ this->write("if (");
+ this->writeExpression(*s.test(), Precedence::kTopLevel);
+ this->write(") ");
+ this->writeStatement(*s.ifTrue());
+ if (s.ifFalse()) {
+ this->write("else ");
+ this->writeStatement(*s.ifFalse());
+ }
+}
+
+void WGSLCodeGenerator::writeReturnStatement(const ReturnStatement& s) {
+ this->write("return");
+ if (s.expression()) {
+ this->write(" ");
+ this->writeExpression(*s.expression(), Precedence::kTopLevel);
+ }
+ this->write(";");
+}
+
+void WGSLCodeGenerator::writeVarDeclaration(const VarDeclaration& varDecl) {
+ bool isConst = varDecl.var()->modifiers().fFlags & Modifiers::kConst_Flag;
+ if (isConst) {
+ this->write("let ");
+ } else {
+ this->write("var ");
+ }
+ this->writeName(varDecl.var()->mangledName());
+ this->write(": ");
+ this->write(to_wgsl_type(varDecl.var()->type()));
+
+ if (varDecl.value()) {
+ this->write(" = ");
+ this->writeExpression(*varDecl.value(), Precedence::kTopLevel);
+ } else if (isConst) {
+ SkDEBUGFAILF("A let-declared constant must specify a value");
+ }
+
+ this->write(";");
+}
+
+void WGSLCodeGenerator::writeExpression(const Expression& e, Precedence parentPrecedence) {
+ switch (e.kind()) {
+ case Expression::Kind::kBinary:
+ this->writeBinaryExpression(e.as<BinaryExpression>(), parentPrecedence);
+ break;
+ case Expression::Kind::kConstructorCompound:
+ this->writeConstructorCompound(e.as<ConstructorCompound>(), parentPrecedence);
+ break;
+ case Expression::Kind::kConstructorCompoundCast:
+ case Expression::Kind::kConstructorScalarCast:
+ case Expression::Kind::kConstructorSplat:
+ this->writeAnyConstructor(e.asAnyConstructor(), parentPrecedence);
+ break;
+ case Expression::Kind::kConstructorDiagonalMatrix:
+ this->writeConstructorDiagonalMatrix(e.as<ConstructorDiagonalMatrix>(),
+ parentPrecedence);
+ break;
+ case Expression::Kind::kConstructorMatrixResize:
+ this->writeConstructorMatrixResize(e.as<ConstructorMatrixResize>(), parentPrecedence);
+ break;
+ case Expression::Kind::kFieldAccess:
+ this->writeFieldAccess(e.as<FieldAccess>());
+ break;
+ case Expression::Kind::kFunctionCall:
+ this->writeFunctionCall(e.as<FunctionCall>());
+ break;
+ case Expression::Kind::kIndex:
+ this->writeIndexExpression(e.as<IndexExpression>());
+ break;
+ case Expression::Kind::kLiteral:
+ this->writeLiteral(e.as<Literal>());
+ break;
+ case Expression::Kind::kSwizzle:
+ this->writeSwizzle(e.as<Swizzle>());
+ break;
+ case Expression::Kind::kTernary:
+ this->writeTernaryExpression(e.as<TernaryExpression>(), parentPrecedence);
+ break;
+ case Expression::Kind::kVariableReference:
+ this->writeVariableReference(e.as<VariableReference>());
+ break;
+ default:
+ SkDEBUGFAILF("unsupported expression (kind: %d) %s",
+ static_cast<int>(e.kind()),
+ e.description().c_str());
+ break;
+ }
+}
+
+void WGSLCodeGenerator::writeBinaryExpression(const BinaryExpression& b,
+ Precedence parentPrecedence) {
+ const Expression& left = *b.left();
+ const Expression& right = *b.right();
+ Operator op = b.getOperator();
+
+ // The equality and comparison operators are only supported for scalar and vector types.
+ if (op.isEquality() && !left.type().isScalar() && !left.type().isVector()) {
+ if (left.type().isMatrix()) {
+ if (op.kind() == OperatorKind::NEQ) {
+ this->write("!");
+ }
+ this->writeMatrixEquality(left, right);
+ return;
+ }
+
+ // TODO(skia:13092): Synthesize helper functions for structs and arrays.
+ return;
+ }
+
+ Precedence precedence = op.getBinaryPrecedence();
+ bool needParens = precedence >= parentPrecedence;
+
+ // The equality operators ('=='/'!=') in WGSL apply component-wise to vectors and result in a
+ // vector. We need to reduce the value to a boolean.
+ if (left.type().isVector()) {
+ if (op.kind() == Operator::Kind::EQEQ) {
+ this->write("all");
+ needParens = true;
+ } else if (op.kind() == Operator::Kind::NEQ) {
+ this->write("any");
+ needParens = true;
+ }
+ }
+
+ if (needParens) {
+ this->write("(");
+ }
+
+ // TODO(skia:13092): Correctly handle the case when lhs is a pointer.
+
+ this->writeExpression(left, precedence);
+ this->write(op.operatorName());
+ this->writeExpression(right, precedence);
+
+ if (needParens) {
+ this->write(")");
+ }
+}
+
+void WGSLCodeGenerator::writeFieldAccess(const FieldAccess& f) {
+ const Type::Field* field = &f.base()->type().fields()[f.fieldIndex()];
+ if (FieldAccess::OwnerKind::kDefault == f.ownerKind()) {
+ this->writeExpression(*f.base(), Precedence::kPostfix);
+ this->write(".");
+ } else {
+ // We are accessing a field in an anonymous interface block. If the field refers to a
+ // pipeline IO parameter, then we access it via the synthesized IO structs. We make an
+ // explicit exception for `sk_PointSize` which we declare as a placeholder variable in
+ // global scope as it is not supported by WebGPU as a pipeline IO parameter (see comments
+ // in `writeStageOutputStruct`).
+ const Variable& v = *f.base()->as<VariableReference>().variable();
+ if (v.modifiers().fFlags & Modifiers::kIn_Flag) {
+ this->write("_stageIn.");
+ } else if (v.modifiers().fFlags & Modifiers::kOut_Flag &&
+ field->fModifiers.fLayout.fBuiltin != SK_POINTSIZE_BUILTIN) {
+ this->write("(*_stageOut).");
+ } else {
+ // TODO(skia:13092): Reference the variable using the base name used for its
+ // uniform/storage block global declaration.
+ }
+ }
+ this->writeName(field->fName);
+}
+
+void WGSLCodeGenerator::writeFunctionCall(const FunctionCall& c) {
+ const FunctionDeclaration& func = c.function();
+
+ // TODO(skia:13092): Handle intrinsic call as many of them need to be rewritten.
+
+ // We implement function out-parameters by declaring them as pointers. SkSL follows GLSL's
+ // out-parameter semantics, in which out-parameters are only written back to the original
+ // variable after the function's execution is complete (see
+ // https://www.khronos.org/opengl/wiki/Core_Language_(GLSL)#Parameters).
+ //
+ // In addition, SkSL supports swizzles and array index expressions to be passed into
+ // out-parameters however WGSL does not allow taking their address into a pointer.
+ //
+ // We support these by wrapping each function call in a special helper, which internally stores
+ // all out parameters in temporaries.
+
+ // First detect which arguments are passed to out-parameters.
+ const ExpressionArray& args = c.arguments();
+ const std::vector<Variable*>& params = func.parameters();
+ SkASSERT(SkToSizeT(args.size()) == params.size());
+
+ bool foundOutParam = false;
+ SkSTArray<16, VariableReference*> outVars;
+ outVars.push_back_n(args.size(), static_cast<VariableReference*>(nullptr));
+
+ for (int i = 0; i < args.size(); ++i) {
+ if (params[i]->modifiers().fFlags & Modifiers::kOut_Flag) {
+ // Find the expression's inner variable being written to. Assignability was verified at
+ // IR generation time, so this should always succeed.
+ Analysis::AssignmentInfo info;
+ SkAssertResult(Analysis::IsAssignable(*args[i], &info));
+ outVars[i] = info.fAssignedVar;
+ foundOutParam = true;
+ }
+ }
+
+ if (foundOutParam) {
+ this->writeName(this->writeOutParamHelper(c, args, outVars));
+ } else {
+ this->writeName(func.mangledName());
+ }
+
+ this->write("(");
+ auto separator = SkSL::String::Separator();
+ if (this->writeFunctionDependencyArgs(func)) {
+ separator();
+ }
+ for (int i = 0; i < args.size(); ++i) {
+ this->write(separator());
+ if (outVars[i]) {
+ // We need to take the address of the variable and pass it down as a pointer.
+ this->write("&");
+ this->writeExpression(*outVars[i], Precedence::kSequence);
+ } else {
+ this->writeExpression(*args[i], Precedence::kSequence);
+ }
+ }
+ this->write(")");
+}
+
+void WGSLCodeGenerator::writeIndexExpression(const IndexExpression& i) {
+ this->writeExpression(*i.base(), Precedence::kPostfix);
+ this->write("[");
+ this->writeExpression(*i.index(), Precedence::kTopLevel);
+ this->write("]");
+}
+
+void WGSLCodeGenerator::writeLiteral(const Literal& l) {
+ const Type& type = l.type();
+ if (type.isFloat() || type.isBoolean()) {
+ this->write(l.description(OperatorPrecedence::kTopLevel));
+ return;
+ }
+ SkASSERT(type.isInteger());
+ if (type.matches(*fContext.fTypes.fUInt)) {
+ this->write(std::to_string(l.intValue() & 0xffffffff));
+ this->write("u");
+ } else if (type.matches(*fContext.fTypes.fUShort)) {
+ this->write(std::to_string(l.intValue() & 0xffff));
+ this->write("u");
+ } else {
+ this->write(std::to_string(l.intValue()));
+ }
+}
+
+void WGSLCodeGenerator::writeSwizzle(const Swizzle& swizzle) {
+ this->writeExpression(*swizzle.base(), Precedence::kPostfix);
+ this->write(".");
+ for (int c : swizzle.components()) {
+ SkASSERT(c >= 0 && c <= 3);
+ this->write(&("x\0y\0z\0w\0"[c * 2]));
+ }
+}
+
+void WGSLCodeGenerator::writeTernaryExpression(const TernaryExpression& t,
+ Precedence parentPrecedence) {
+ bool needParens = Precedence::kTernary >= parentPrecedence;
+ if (needParens) {
+ this->write("(");
+ }
+
+ // The trivial case is when neither branch has side effects and evaluate to a scalar or vector
+ // type. This can be represented with a call to the WGSL `select` intrinsic although it doesn't
+ // support short-circuiting.
+ if ((t.type().isScalar() || t.type().isVector()) && !Analysis::HasSideEffects(*t.ifTrue()) &&
+ !Analysis::HasSideEffects(*t.ifFalse())) {
+ this->write("select(");
+ this->writeExpression(*t.ifFalse(), Precedence::kTernary);
+ this->write(", ");
+ this->writeExpression(*t.ifTrue(), Precedence::kTernary);
+ this->write(", ");
+
+ bool isVector = t.type().isVector();
+ if (isVector) {
+ // Splat the condition expression into a vector.
+ this->write(String::printf("vec%d<bool>(", t.type().columns()));
+ }
+ this->writeExpression(*t.test(), Precedence::kTernary);
+ if (isVector) {
+ this->write(")");
+ }
+ this->write(")");
+ if (needParens) {
+ this->write(")");
+ }
+ return;
+ }
+
+ // TODO(skia:13092): WGSL does not support ternary expressions. To replicate the required
+ // short-circuting behavior we need to hoist the expression out into the surrounding block,
+ // convert it into an if statement that writes the result to a synthesized variable, and replace
+ // the original expression with a reference to that variable.
+ //
+ // Once hoisting is supported, we may want to use that for vector type expressions as well,
+ // since select above does a component-wise select
+}
+
+void WGSLCodeGenerator::writeVariableReference(const VariableReference& r) {
+ // TODO(skia:13092): Correctly handle RTflip for built-ins.
+ const Variable& v = *r.variable();
+
+ // Insert a conversion expression if this is a built-in variable whose type differs from the
+ // SkSL.
+ std::optional<std::string_view> conversion = needs_builtin_type_conversion(v);
+ if (conversion.has_value()) {
+ this->write(*conversion);
+ this->write("(");
+ }
+
+ bool needsDeref = false;
+ bool isSynthesizedOutParamArg = fOutParamArgVars.contains(&v);
+
+ // When a variable is referenced in the context of a synthesized out-parameter helper argument,
+ // two special rules apply:
+ // 1. If it's accessed via a pipeline I/O or global uniforms struct, it should instead
+ // be referenced by name (since it's actually referring to a function parameter).
+ // 2. Its type should be treated as a pointer and should be dereferenced as such.
+ if (v.storage() == Variable::Storage::kGlobal && !isSynthesizedOutParamArg) {
+ if (v.modifiers().fFlags & Modifiers::kIn_Flag) {
+ this->write("_stageIn.");
+ } else if (v.modifiers().fFlags & Modifiers::kOut_Flag) {
+ this->write("(*_stageOut).");
+ } else if (is_in_global_uniforms(v)) {
+ this->write("_globalUniforms.");
+ }
+ } else if ((v.storage() == Variable::Storage::kParameter &&
+ v.modifiers().fFlags & Modifiers::kOut_Flag) ||
+ isSynthesizedOutParamArg) {
+ // This is an out-parameter and its type is a pointer, which we need to dereference.
+ // We wrap the dereference in parentheses in case the value is used in an access expression
+ // later.
+ needsDeref = true;
+ this->write("(*");
+ }
+
+ this->writeName(v.mangledName());
+ if (needsDeref) {
+ this->write(")");
+ }
+ if (conversion.has_value()) {
+ this->write(")");
+ }
+}
+
+void WGSLCodeGenerator::writeAnyConstructor(const AnyConstructor& c, Precedence parentPrecedence) {
+ this->write(to_wgsl_type(c.type()));
+ this->write("(");
+ auto separator = SkSL::String::Separator();
+ for (const auto& e : c.argumentSpan()) {
+ this->write(separator());
+ this->writeExpression(*e, Precedence::kSequence);
+ }
+ this->write(")");
+}
+
+void WGSLCodeGenerator::writeConstructorCompound(const ConstructorCompound& c,
+ Precedence parentPrecedence) {
+ if (c.type().isVector()) {
+ this->writeConstructorCompoundVector(c, parentPrecedence);
+ } else if (c.type().isMatrix()) {
+ this->writeConstructorCompoundMatrix(c, parentPrecedence);
+ } else {
+ fContext.fErrors->error(c.fPosition, "unsupported compound constructor");
+ }
+}
+
+void WGSLCodeGenerator::writeConstructorCompoundVector(const ConstructorCompound& c,
+ Precedence parentPrecedence) {
+ // WGSL supports constructing vectors from a mix of scalars and vectors but
+ // not matrices (see https://www.w3.org/TR/WGSL/#type-constructor-expr).
+ //
+ // SkSL supports vec4(mat2x2) which we handle specially.
+ if (c.type().columns() == 4 && c.argumentSpan().size() == 1) {
+ const Expression& arg = *c.argumentSpan().front();
+ if (arg.type().isMatrix()) {
+ // This is the vec4(mat2x2) case.
+ SkASSERT(arg.type().columns() == 2);
+ SkASSERT(arg.type().rows() == 2);
+
+ // Generate a helper so that the argument expression gets evaluated once.
+ std::string name = String::printf("%s_from_%s",
+ to_mangled_wgsl_type_name(c.type()).c_str(),
+ to_mangled_wgsl_type_name(arg.type()).c_str());
+ if (!fHelpers.contains(name)) {
+ fHelpers.add(name);
+ std::string returnType = to_wgsl_type(c.type());
+ std::string argType = to_wgsl_type(arg.type());
+ fExtraFunctions.printf(
+ "fn %s(x: %s) -> %s {\n return %s(x[0].xy, x[1].xy);\n}\n",
+ name.c_str(),
+ argType.c_str(),
+ returnType.c_str(),
+ returnType.c_str());
+ }
+ this->write(name);
+ this->write("(");
+ this->writeExpression(arg, Precedence::kSequence);
+ this->write(")");
+ return;
+ }
+ }
+ this->writeAnyConstructor(c, parentPrecedence);
+}
+
+void WGSLCodeGenerator::writeConstructorCompoundMatrix(const ConstructorCompound& c,
+ Precedence parentPrecedence) {
+ SkASSERT(c.type().isMatrix());
+
+ // Emit and invoke a matrix-constructor helper method if one is necessary.
+ if (this->isMatrixConstructorHelperNeeded(c)) {
+ this->write(this->getMatrixConstructorHelper(c));
+ this->write("(");
+ auto separator = String::Separator();
+ for (const std::unique_ptr<Expression>& expr : c.arguments()) {
+ this->write(separator());
+ this->writeExpression(*expr, Precedence::kSequence);
+ }
+ this->write(")");
+ return;
+ }
+
+ // WGSL doesn't allow creating matrices by passing in scalars and vectors in a jumble; it
+ // requires your scalars to be grouped up into columns. As `isMatrixConstructorHelperNeeded`
+ // returned false, we know that none of our scalars/vectors "wrap" across across a column, so we
+ // can group our inputs up and synthesize a constructor for each column.
+ const Type& matrixType = c.type();
+ const Type& columnType = matrixType.componentType().toCompound(
+ fContext, /*columns=*/matrixType.rows(), /*rows=*/1);
+
+ this->write(to_wgsl_type(matrixType));
+ this->write("(");
+ auto separator = String::Separator();
+ int scalarCount = 0;
+ for (const std::unique_ptr<Expression>& arg : c.arguments()) {
+ this->write(separator());
+ if (arg->type().columns() < matrixType.rows()) {
+ // Write a `floatN(` constructor to group scalars and smaller vectors together.
+ if (!scalarCount) {
+ this->write(to_wgsl_type(columnType));
+ this->write("(");
+ }
+ scalarCount += arg->type().columns();
+ }
+ this->writeExpression(*arg, Precedence::kSequence);
+ if (scalarCount && scalarCount == matrixType.rows()) {
+ // Close our `floatN(...` constructor block from above.
+ this->write(")");
+ scalarCount = 0;
+ }
+ }
+ this->write(")");
+}
+
+void WGSLCodeGenerator::writeConstructorDiagonalMatrix(const ConstructorDiagonalMatrix& c,
+ Precedence parentPrecedence) {
+ const Type& type = c.type();
+ SkASSERT(type.isMatrix());
+ SkASSERT(c.argument()->type().isScalar());
+
+ // Generate a helper so that the argument expression gets evaluated once.
+ std::string name = String::printf("%s_diagonal", to_mangled_wgsl_type_name(type).c_str());
+ if (!fHelpers.contains(name)) {
+ fHelpers.add(name);
+
+ std::string typeName = to_wgsl_type(type);
+ fExtraFunctions.printf("fn %s(x: %s) -> %s {\n",
+ name.c_str(),
+ to_wgsl_type(c.argument()->type()).c_str(),
+ typeName.c_str());
+ fExtraFunctions.printf(" return %s(", typeName.c_str());
+ auto separator = String::Separator();
+ for (int col = 0; col < type.columns(); ++col) {
+ for (int row = 0; row < type.rows(); ++row) {
+ fExtraFunctions.printf("%s%s", separator().c_str(), (col == row) ? "x" : "0.0");
+ }
+ }
+ fExtraFunctions.printf(");\n}\n");
+ }
+ this->write(name);
+ this->write("(");
+ this->writeExpression(*c.argument(), Precedence::kSequence);
+ this->write(")");
+}
+
+void WGSLCodeGenerator::writeConstructorMatrixResize(const ConstructorMatrixResize& c,
+ Precedence parentPrecedence) {
+ this->write(this->getMatrixConstructorHelper(c));
+ this->write("(");
+ this->writeExpression(*c.argument(), Precedence::kSequence);
+ this->write(")");
+}
+
+bool WGSLCodeGenerator::isMatrixConstructorHelperNeeded(const ConstructorCompound& c) {
+ // WGSL supports 3 categories of matrix constructors:
+ // 1. Identity construction from a matrix of identical dimensions (handled as
+ // ConstructorCompoundCast);
+ // 2. Column-major construction by elements (scalars);
+ // 3. Column-by-column construction from vectors.
+ //
+ // WGSL does not have a diagonal constructor. In addition, SkSL (like GLSL) supports free-form
+ // inputs that combine vectors, matrices, and scalars.
+ //
+ // Some cases are simple to translate and so we handle those inline--e.g. a list of scalars can
+ // be constructed trivially. In more complex cases, we generate a helper function that converts
+ // our inputs into a properly-shaped matrix.
+ //
+ // A matrix constructor helper method is always used if any input argument is a matrix.
+ // Helper methods are also necessary when any argument would span multiple rows. For instance:
+ //
+ // float2 x = (1, 2);
+ // float3x2(x, 3, 4, 5, 6) = | 1 3 5 | = no helper needed; conversion can be done inline
+ // | 2 4 6 |
+ //
+ // float2 x = (2, 3);
+ // float3x2(1, x, 4, 5, 6) = | 1 3 5 | = x spans multiple rows; a helper method will be used
+ // | 2 4 6 |
+ //
+ // float4 x = (1, 2, 3, 4);
+ // float2x2(x) = | 1 3 | = x spans multiple rows; a helper method will be used
+ // | 2 4 |
+ //
+ int position = 0;
+ for (const std::unique_ptr<Expression>& expr : c.arguments()) {
+ if (expr->type().isMatrix()) {
+ return true;
+ }
+ position += expr->type().columns();
+ if (position > c.type().rows()) {
+ // An input argument would span multiple rows; a helper function is required.
+ return true;
+ }
+ if (position == c.type().rows()) {
+ // We've advanced to the end of a row. Wrap to the start of the next row.
+ position = 0;
+ }
+ }
+ return false;
+}
+
+std::string WGSLCodeGenerator::getMatrixConstructorHelper(const AnyConstructor& c) {
+ const Type& type = c.type();
+ int columns = type.columns();
+ int rows = type.rows();
+ auto args = c.argumentSpan();
+ std::string typeName = to_wgsl_type(type);
+
+ // Create the helper-method name and use it as our lookup key.
+ std::string name = String::printf("%s_from", to_mangled_wgsl_type_name(type).c_str());
+ for (const std::unique_ptr<Expression>& expr : args) {
+ String::appendf(&name, "_%s", to_mangled_wgsl_type_name(expr->type()).c_str());
+ }
+
+ // If a helper-method has not been synthesized yet, create it now.
+ if (!fHelpers.contains(name)) {
+ fHelpers.add(name);
+
+ fExtraFunctions.printf("fn %s(", name.c_str());
+
+ auto separator = String::Separator();
+ for (size_t i = 0; i < args.size(); ++i) {
+ fExtraFunctions.printf(
+ "%sx%zu: %s", separator().c_str(), i, to_wgsl_type(args[i]->type()).c_str());
+ }
+
+ fExtraFunctions.printf(") -> %s {\n return %s(", typeName.c_str(), typeName.c_str());
+
+ if (args.size() == 1 && args.front()->type().isMatrix()) {
+ this->writeMatrixFromMatrixArgs(args.front()->type(), columns, rows);
+ } else {
+ this->writeMatrixFromScalarAndVectorArgs(c, columns, rows);
+ }
+
+ fExtraFunctions.writeText(");\n}\n");
+ }
+ return name;
+}
+
+// Assembles a matrix by resizing another matrix named `x0`.
+// Cells that don't exist in the source matrix will be populated with identity-matrix values.
+void WGSLCodeGenerator::writeMatrixFromMatrixArgs(const Type& sourceMatrix, int columns, int rows) {
+ SkASSERT(rows <= 4);
+ SkASSERT(columns <= 4);
+
+ const char* separator = "";
+ std::string matrixType = to_wgsl_type(sourceMatrix.componentType());
+ for (int c = 0; c < columns; ++c) {
+ fExtraFunctions.printf("%svec%d<%s>(", separator, rows, matrixType.c_str());
+ separator = "), ";
+
+ // Determine how many values to take from the source matrix for this row.
+ int swizzleLength = 0;
+ if (c < sourceMatrix.columns()) {
+ swizzleLength = std::min<>(rows, sourceMatrix.rows());
+ }
+
+ // Emit all the values from the source matrix row.
+ bool firstItem;
+ switch (swizzleLength) {
+ case 0:
+ firstItem = true;
+ break;
+ case 1:
+ firstItem = false;
+ fExtraFunctions.printf("x0[%d].x", c);
+ break;
+ case 2:
+ firstItem = false;
+ fExtraFunctions.printf("x0[%d].xy", c);
+ break;
+ case 3:
+ firstItem = false;
+ fExtraFunctions.printf("x0[%d].xyz", c);
+ break;
+ case 4:
+ firstItem = false;
+ fExtraFunctions.printf("x0[%d].xyzw", c);
+ break;
+ default:
+ SkUNREACHABLE;
+ }
+
+ // Emit the placeholder identity-matrix cells.
+ for (int r = swizzleLength; r < rows; ++r) {
+ fExtraFunctions.printf("%s%s", firstItem ? "" : ", ", (r == c) ? "1.0" : "0.0");
+ firstItem = false;
+ }
+ }
+
+ fExtraFunctions.writeText(")");
+}
+
+// Assembles a matrix of type by concatenating an arbitrary mix of scalar and vector values, named
+// `x0`, `x1`, etc. An error is written if the expression list don't contain exactly C*R scalars.
+void WGSLCodeGenerator::writeMatrixFromScalarAndVectorArgs(const AnyConstructor& ctor,
+ int columns,
+ int rows) {
+ SkASSERT(rows <= 4);
+ SkASSERT(columns <= 4);
+
+ std::string matrixType = to_wgsl_type(ctor.type().componentType());
+ size_t argIndex = 0;
+ int argPosition = 0;
+ auto args = ctor.argumentSpan();
+
+ static constexpr char kSwizzle[] = "xyzw";
+ const char* separator = "";
+ for (int c = 0; c < columns; ++c) {
+ fExtraFunctions.printf("%svec%d<%s>(", separator, rows, matrixType.c_str());
+ separator = "), ";
+
+ auto columnSeparator = String::Separator();
+ for (int r = 0; r < rows;) {
+ fExtraFunctions.writeText(columnSeparator().c_str());
+ if (argIndex < args.size()) {
+ const Type& argType = args[argIndex]->type();
+ switch (argType.typeKind()) {
+ case Type::TypeKind::kScalar: {
+ fExtraFunctions.printf("x%zu", argIndex);
+ ++r;
+ ++argPosition;
+ break;
+ }
+ case Type::TypeKind::kVector: {
+ fExtraFunctions.printf("x%zu.", argIndex);
+ do {
+ fExtraFunctions.write8(kSwizzle[argPosition]);
+ ++r;
+ ++argPosition;
+ } while (r < rows && argPosition < argType.columns());
+ break;
+ }
+ case Type::TypeKind::kMatrix: {
+ fExtraFunctions.printf("x%zu[%d].", argIndex, argPosition / argType.rows());
+ do {
+ fExtraFunctions.write8(kSwizzle[argPosition]);
+ ++r;
+ ++argPosition;
+ } while (r < rows && (argPosition % argType.rows()) != 0);
+ break;
+ }
+ default: {
+ SkDEBUGFAIL("incorrect type of argument for matrix constructor");
+ fExtraFunctions.writeText("<error>");
+ break;
+ }
+ }
+
+ if (argPosition >= argType.columns() * argType.rows()) {
+ ++argIndex;
+ argPosition = 0;
+ }
+ } else {
+ SkDEBUGFAIL("not enough arguments for matrix constructor");
+ fExtraFunctions.writeText("<error>");
+ }
+ }
+ }
+
+ if (argPosition != 0 || argIndex != args.size()) {
+ SkDEBUGFAIL("incorrect number of arguments for matrix constructor");
+ fExtraFunctions.writeText(", <error>");
+ }
+
+ fExtraFunctions.writeText(")");
+}
+
+void WGSLCodeGenerator::writeMatrixEquality(const Expression& left, const Expression& right) {
+ const Type& leftType = left.type();
+ const Type& rightType = right.type();
+ SkASSERT(leftType.isMatrix());
+ SkASSERT(rightType.isMatrix());
+ SkASSERT(leftType.rows() == rightType.rows());
+ SkASSERT(leftType.columns() == rightType.columns());
+
+ std::string name = String::printf("%s_eq_%s",
+ to_mangled_wgsl_type_name(leftType).c_str(),
+ to_mangled_wgsl_type_name(rightType).c_str());
+ if (!fHelpers.contains(name)) {
+ fHelpers.add(name);
+ fExtraFunctions.printf("fn %s(left: %s, right: %s) -> bool {\n return ",
+ name.c_str(),
+ to_wgsl_type(leftType).c_str(),
+ to_wgsl_type(rightType).c_str());
+ const char* separator = "";
+ for (int i = 0; i < leftType.columns(); ++i) {
+ fExtraFunctions.printf("%sall(left[%d] == right[%d])", separator, i, i);
+ separator = " &&\n ";
+ }
+ fExtraFunctions.printf(";\n}\n");
+ }
+ this->write(name);
+ this->write("(");
+ this->writeExpression(left, Precedence::kSequence);
+ this->write(", ");
+ this->writeExpression(right, Precedence::kSequence);
+ this->write(")");
+}
+
+void WGSLCodeGenerator::writeProgramElement(const ProgramElement& e) {
+ switch (e.kind()) {
+ case ProgramElement::Kind::kExtension:
+ // TODO(skia:13092): WGSL supports extensions via the "enable" directive
+ // (https://www.w3.org/TR/WGSL/#language-extensions). While we could easily emit this
+ // directive, we should first ensure that all possible SkSL extension names are
+ // converted to their appropriate WGSL extension. Currently there are no known supported
+ // WGSL extensions aside from the hypotheticals listed in the spec.
+ break;
+ case ProgramElement::Kind::kGlobalVar:
+ this->writeGlobalVarDeclaration(e.as<GlobalVarDeclaration>());
+ break;
+ case ProgramElement::Kind::kInterfaceBlock:
+ // All interface block declarations are handled explicitly as the "program header" in
+ // generateCode().
+ break;
+ case ProgramElement::Kind::kStructDefinition:
+ this->writeStructDefinition(e.as<StructDefinition>());
+ break;
+ case ProgramElement::Kind::kFunctionPrototype:
+ // A WGSL function declaration must contain its body and the function name is in scope
+ // for the entire program (see https://www.w3.org/TR/WGSL/#function-declaration and
+ // https://www.w3.org/TR/WGSL/#declaration-and-scope).
+ //
+ // As such, we don't emit function prototypes.
+ break;
+ case ProgramElement::Kind::kFunction:
+ this->writeFunction(e.as<FunctionDefinition>());
+ break;
+ default:
+ SkDEBUGFAILF("unsupported program element: %s\n", e.description().c_str());
+ break;
+ }
+}
+
+void WGSLCodeGenerator::writeGlobalVarDeclaration(const GlobalVarDeclaration& d) {
+ const Variable& var = *d.declaration()->as<VarDeclaration>().var();
+ if ((var.modifiers().fFlags & (Modifiers::kIn_Flag | Modifiers::kOut_Flag)) ||
+ is_in_global_uniforms(var)) {
+ // Pipeline stage I/O parameters and top-level (non-block) uniforms are handled specially
+ // in generateCode().
+ return;
+ }
+
+ // TODO(skia:13092): Implement workgroup variable decoration
+ this->write("var<private> ");
+ this->writeVariableDecl(var.type(), var.name(), Delimiter::kSemicolon);
+}
+
+void WGSLCodeGenerator::writeStructDefinition(const StructDefinition& s) {
+ const Type& type = s.type();
+ this->writeLine("struct " + type.displayName() + " {");
+ fIndentation++;
+ this->writeFields(SkSpan(type.fields()), type.fPosition);
+ fIndentation--;
+ this->writeLine("};");
+}
+
+void WGSLCodeGenerator::writeFields(SkSpan<const Type::Field> fields,
+ Position parentPos,
+ const MemoryLayout*) {
+ // TODO(skia:13092): Check alignment against `layout` constraints, if present. A layout
+ // constraint will be specified for interface blocks and for structs that appear in a block.
+ for (const Type::Field& field : fields) {
+ const Type* fieldType = field.fType;
+ this->writeVariableDecl(*fieldType, field.fName, Delimiter::kComma);
+ }
+}
+
+void WGSLCodeGenerator::writeStageInputStruct() {
+ std::string_view structNamePrefix = pipeline_struct_prefix(fProgram.fConfig->fKind);
+ if (structNamePrefix.empty()) {
+ // There's no need to declare pipeline stage outputs.
+ return;
+ }
+
+ // It is illegal to declare a struct with no members.
+ if (fPipelineInputCount < 1) {
+ return;
+ }
+
+ this->write("struct ");
+ this->write(structNamePrefix);
+ this->writeLine("In {");
+ fIndentation++;
+
+ bool declaredFragCoordsBuiltin = false;
+ for (const ProgramElement* e : fProgram.elements()) {
+ if (e->is<GlobalVarDeclaration>()) {
+ const Variable* v = e->as<GlobalVarDeclaration>().declaration()
+ ->as<VarDeclaration>().var();
+ if (v->modifiers().fFlags & Modifiers::kIn_Flag) {
+ this->writePipelineIODeclaration(v->modifiers(), v->type(), v->mangledName(),
+ Delimiter::kComma);
+ if (v->modifiers().fLayout.fBuiltin == SK_FRAGCOORD_BUILTIN) {
+ declaredFragCoordsBuiltin = true;
+ }
+ }
+ } else if (e->is<InterfaceBlock>()) {
+ const Variable* v = e->as<InterfaceBlock>().var();
+ // Merge all the members of `in` interface blocks to the input struct, which are
+ // specified as either "builtin" or with a "layout(location=".
+ //
+ // TODO(armansito): Is it legal to have an interface block without a storage qualifier
+ // but with members that have individual storage qualifiers?
+ if (v->modifiers().fFlags & Modifiers::kIn_Flag) {
+ for (const auto& f : v->type().fields()) {
+ this->writePipelineIODeclaration(f.fModifiers, *f.fType, f.fName,
+ Delimiter::kComma);
+ if (f.fModifiers.fLayout.fBuiltin == SK_FRAGCOORD_BUILTIN) {
+ declaredFragCoordsBuiltin = true;
+ }
+ }
+ }
+ }
+ }
+
+ if (ProgramConfig::IsFragment(fProgram.fConfig->fKind) &&
+ fRequirements.mainNeedsCoordsArgument && !declaredFragCoordsBuiltin) {
+ this->writeLine("@builtin(position) sk_FragCoord: vec4<f32>,");
+ }
+
+ fIndentation--;
+ this->writeLine("};");
+}
+
+void WGSLCodeGenerator::writeStageOutputStruct() {
+ std::string_view structNamePrefix = pipeline_struct_prefix(fProgram.fConfig->fKind);
+ if (structNamePrefix.empty()) {
+ // There's no need to declare pipeline stage outputs.
+ return;
+ }
+
+ this->write("struct ");
+ this->write(structNamePrefix);
+ this->writeLine("Out {");
+ fIndentation++;
+
+ // TODO(skia:13092): Remember all variables that are added to the output struct here so they
+ // can be referenced correctly when handling variable references.
+ bool declaredPositionBuiltin = false;
+ bool requiresPointSizeBuiltin = false;
+ for (const ProgramElement* e : fProgram.elements()) {
+ if (e->is<GlobalVarDeclaration>()) {
+ const Variable* v = e->as<GlobalVarDeclaration>().declaration()
+ ->as<VarDeclaration>().var();
+ if (v->modifiers().fFlags & Modifiers::kOut_Flag) {
+ this->writePipelineIODeclaration(v->modifiers(), v->type(), v->mangledName(),
+ Delimiter::kComma);
+ }
+ } else if (e->is<InterfaceBlock>()) {
+ const Variable* v = e->as<InterfaceBlock>().var();
+ // Merge all the members of `out` interface blocks to the output struct, which are
+ // specified as either "builtin" or with a "layout(location=".
+ //
+ // TODO(armansito): Is it legal to have an interface block without a storage qualifier
+ // but with members that have individual storage qualifiers?
+ if (v->modifiers().fFlags & Modifiers::kOut_Flag) {
+ for (const auto& f : v->type().fields()) {
+ this->writePipelineIODeclaration(f.fModifiers, *f.fType, f.fName,
+ Delimiter::kComma);
+ if (f.fModifiers.fLayout.fBuiltin == SK_POSITION_BUILTIN) {
+ declaredPositionBuiltin = true;
+ } else if (f.fModifiers.fLayout.fBuiltin == SK_POINTSIZE_BUILTIN) {
+ // sk_PointSize is explicitly not supported by `builtin_from_sksl_name` so
+ // writePipelineIODeclaration will never write it. We mark it here if the
+ // declaration is needed so we can synthesize it below.
+ requiresPointSizeBuiltin = true;
+ }
+ }
+ }
+ }
+ }
+
+ // A vertex program must include the `position` builtin in its entry point return type.
+ if (ProgramConfig::IsVertex(fProgram.fConfig->fKind) && !declaredPositionBuiltin) {
+ this->writeLine("@builtin(position) sk_Position: vec4<f32>,");
+ }
+
+ fIndentation--;
+ this->writeLine("};");
+
+ // In WebGPU/WGSL, the vertex stage does not support a point-size output and the size
+ // of a point primitive is always 1 pixel (see https://github.com/gpuweb/gpuweb/issues/332).
+ //
+ // There isn't anything we can do to emulate this correctly at this stage so we
+ // synthesize a placeholder variable that has no effect. Programs should not rely on
+ // sk_PointSize when using the Dawn backend.
+ if (ProgramConfig::IsVertex(fProgram.fConfig->fKind) && requiresPointSizeBuiltin) {
+ this->writeLine("/* unsupported */ var<private> sk_PointSize: f32;");
+ }
+}
+
+void WGSLCodeGenerator::writeNonBlockUniformsForTests() {
+ for (const ProgramElement* e : fProgram.elements()) {
+ if (e->is<GlobalVarDeclaration>()) {
+ const GlobalVarDeclaration& decls = e->as<GlobalVarDeclaration>();
+ const Variable& var = *decls.varDeclaration().var();
+ if (is_in_global_uniforms(var)) {
+ if (!fDeclaredUniformsStruct) {
+ this->write("struct _GlobalUniforms {\n");
+ fDeclaredUniformsStruct = true;
+ }
+ this->write(" ");
+ this->writeVariableDecl(var.type(), var.mangledName(), Delimiter::kComma);
+ }
+ }
+ }
+ if (fDeclaredUniformsStruct) {
+ int binding = fProgram.fConfig->fSettings.fDefaultUniformBinding;
+ int set = fProgram.fConfig->fSettings.fDefaultUniformSet;
+ this->write("};\n");
+ this->write("@binding(" + std::to_string(binding) + ") ");
+ this->write("@group(" + std::to_string(set) + ") ");
+ this->writeLine("var<uniform> _globalUniforms: _GlobalUniforms;");
+ }
+}
+
+bool WGSLCodeGenerator::writeFunctionDependencyArgs(const FunctionDeclaration& f) {
+ FunctionDependencies* deps = fRequirements.dependencies.find(&f);
+ if (!deps || *deps == FunctionDependencies::kNone) {
+ return false;
+ }
+
+ const char* separator = "";
+ if ((*deps & FunctionDependencies::kPipelineInputs) != FunctionDependencies::kNone) {
+ this->write("_stageIn");
+ separator = ", ";
+ }
+ if ((*deps & FunctionDependencies::kPipelineOutputs) != FunctionDependencies::kNone) {
+ this->write(separator);
+ this->write("_stageOut");
+ }
+ return true;
+}
+
+bool WGSLCodeGenerator::writeFunctionDependencyParams(const FunctionDeclaration& f) {
+ FunctionDependencies* deps = fRequirements.dependencies.find(&f);
+ if (!deps || *deps == FunctionDependencies::kNone) {
+ return false;
+ }
+
+ std::string_view structNamePrefix = pipeline_struct_prefix(fProgram.fConfig->fKind);
+ if (structNamePrefix.empty()) {
+ return false;
+ }
+ const char* separator = "";
+ if ((*deps & FunctionDependencies::kPipelineInputs) != FunctionDependencies::kNone) {
+ this->write("_stageIn: ");
+ separator = ", ";
+ this->write(structNamePrefix);
+ this->write("In");
+ }
+ if ((*deps & FunctionDependencies::kPipelineOutputs) != FunctionDependencies::kNone) {
+ this->write(separator);
+ this->write("_stageOut: ptr<function, ");
+ this->write(structNamePrefix);
+ this->write("Out>");
+ }
+ return true;
+}
+
+std::string WGSLCodeGenerator::writeOutParamHelper(const FunctionCall& c,
+ const ExpressionArray& args,
+ const SkTArray<VariableReference*>& outVars) {
+ // It's possible for out-param function arguments to contain an out-param function call
+ // expression. Emit the function into a temporary stream to prevent the nested helper from
+ // clobbering the current helper as we recursively evaluate argument expressions.
+ StringStream tmpStream;
+ AutoOutputStream outputToExtraFunctions(this, &tmpStream, &fIndentation);
+
+ // Reset the line start state while the AutoOutputStream is active. We restore it later before
+ // the function returns.
+ bool atLineStart = fAtLineStart;
+ fAtLineStart = false;
+ const FunctionDeclaration& func = c.function();
+
+ // Synthesize a helper function that takes the same inputs as `function`, except in places where
+ // `outVars` is non-null; in those places, we take the type of the VariableReference.
+ //
+ // float _outParamHelper_0_originalFuncName(float _var0, float _var1, float& outParam) {
+ std::string name =
+ "_outParamHelper_" + std::to_string(fSwizzleHelperCount++) + "_" + func.mangledName();
+ auto separator = SkSL::String::Separator();
+ this->write("fn ");
+ this->write(name);
+ this->write("(");
+ if (this->writeFunctionDependencyParams(func)) {
+ separator();
+ }
+
+ SkASSERT(outVars.size() == args.size());
+ SkASSERT(SkToSizeT(outVars.size()) == func.parameters().size());
+
+ // We need to detect cases where the caller passes the same variable as an out-param more than
+ // once and avoid redeclaring the variable name. This is also a situation that is not permitted
+ // by WGSL aliasing rules (see https://www.w3.org/TR/WGSL/#aliasing). Because the parameter is
+ // redundant and we don't actually ever reference it, we give it a placeholder name.
+ auto parentOutParamArgVars = std::move(fOutParamArgVars);
+ SkASSERT(fOutParamArgVars.empty());
+
+ for (int i = 0; i < args.size(); ++i) {
+ this->write(separator());
+
+ if (outVars[i]) {
+ const Variable* var = outVars[i]->variable();
+ if (!fOutParamArgVars.contains(var)) {
+ fOutParamArgVars.add(var);
+ this->writeName(var->mangledName());
+ } else {
+ this->write("_unused");
+ this->write(std::to_string(i));
+ }
+ } else {
+ this->write("_var");
+ this->write(std::to_string(i));
+ }
+
+ this->write(": ");
+
+ // Declare the parameter using the type of argument variable. If the complete argument is an
+ // access or swizzle expression, the target assignment will be resolved below when we copy
+ // the value to the out-parameter.
+ const Type& type = outVars[i] ? outVars[i]->type() : args[i]->type();
+
+ // Declare an out-parameter as a pointer.
+ if (func.parameters()[i]->modifiers().fFlags & Modifiers::kOut_Flag) {
+ this->write(to_ptr_type(type));
+ } else {
+ this->write(to_wgsl_type(type));
+ }
+ }
+
+ this->write(")");
+ if (!func.returnType().isVoid()) {
+ this->write(" -> ");
+ this->write(to_wgsl_type(func.returnType()));
+ }
+ this->writeLine(" {");
+ ++fIndentation;
+
+ // Declare a temporary variable for each out-parameter.
+ for (int i = 0; i < outVars.size(); ++i) {
+ if (!outVars[i]) {
+ continue;
+ }
+ this->write("var ");
+ this->write("_var");
+ this->write(std::to_string(i));
+ this->write(": ");
+ this->write(to_wgsl_type(args[i]->type()));
+
+ // If this is an inout parameter then we need to copy the input argument into the parameter
+ // per https://www.khronos.org/opengl/wiki/Core_Language_(GLSL)#Parameters.
+ if (func.parameters()[i]->modifiers().fFlags & Modifiers::kIn_Flag) {
+ this->write(" = ");
+ this->writeExpression(*args[i], Precedence::kAssignment);
+ }
+
+ this->writeLine(";");
+ }
+
+ // Call the function we're wrapping. If it has a return type, then store it so it can be
+ // returned later.
+ bool hasReturn = !c.type().isVoid();
+ if (hasReturn) {
+ this->write("var _return: ");
+ this->write(to_wgsl_type(c.type()));
+ this->write(" = ");
+ }
+
+ // Write the function call.
+ this->writeName(func.mangledName());
+ this->write("(");
+ auto newSeparator = SkSL::String::Separator();
+ if (this->writeFunctionDependencyArgs(func)) {
+ newSeparator();
+ }
+ for (int i = 0; i < args.size(); ++i) {
+ this->write(newSeparator());
+ // All forwarded arguments now have a name that looks like "_var[i]" (e.g. _var0, var1,
+ // etc.). All such variables should be of value type and those that have been passed in as
+ // inout should have been dereferenced when they were stored in a local temporary. We need
+ // to take their address again when forwarding to a pointer.
+ if (outVars[i]) {
+ this->write("&");
+ }
+ this->write("_var");
+ this->write(std::to_string(i));
+ }
+ this->writeLine(");");
+
+ // Copy the temporary variables back into the original out-parameters.
+ for (int i = 0; i < outVars.size(); ++i) {
+ if (!outVars[i]) {
+ continue;
+ }
+ // TODO(skia:13092): WGSL does not support assigning to a swizzle
+ // (see https://github.com/gpuweb/gpuweb/issues/737). These will require special treatment
+ // when they appear on the lhs of an assignment.
+ this->writeExpression(*args[i], Precedence::kAssignment);
+ this->write(" = _var");
+ this->write(std::to_string(i));
+ this->writeLine(";");
+ }
+
+ // Return
+ if (hasReturn) {
+ this->writeLine("return _return;");
+ }
+
+ --fIndentation;
+ this->writeLine("}");
+
+ // Write the function out to `fExtraFunctions`.
+ write_stringstream(tmpStream, fExtraFunctions);
+
+ // Restore any global state
+ fOutParamArgVars = std::move(parentOutParamArgVars);
+ fAtLineStart = atLineStart;
+ return name;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/codegen/SkSLWGSLCodeGenerator.h b/gfx/skia/skia/src/sksl/codegen/SkSLWGSLCodeGenerator.h
new file mode 100644
index 0000000000..9ec57ba17a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/codegen/SkSLWGSLCodeGenerator.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2022 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_WGSLCODEGENERATOR
+#define SKSL_WGSLCODEGENERATOR
+
+#include "include/core/SkSpan.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/base/SkTArray.h"
+#include "src/core/SkTHash.h"
+#include "src/sksl/SkSLStringStream.h"
+#include "src/sksl/codegen/SkSLCodeGenerator.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <cstdint>
+#include <initializer_list>
+#include <string>
+#include <string_view>
+#include <utility>
+
+namespace sknonstd {
+template <typename T> struct is_bitmask_enum;
+} // namespace sknonstd
+
+namespace SkSL {
+
+class AnyConstructor;
+class BinaryExpression;
+class Block;
+class Context;
+class ConstructorCompound;
+class ConstructorDiagonalMatrix;
+class ConstructorMatrixResize;
+class Expression;
+class ExpressionStatement;
+class FieldAccess;
+class FunctionCall;
+class FunctionDeclaration;
+class FunctionDefinition;
+class GlobalVarDeclaration;
+class IfStatement;
+class IndexExpression;
+class Literal;
+class MemoryLayout;
+class OutputStream;
+class Position;
+class ProgramElement;
+class ReturnStatement;
+class Statement;
+class StructDefinition;
+class Swizzle;
+class TernaryExpression;
+class VarDeclaration;
+class Variable;
+class VariableReference;
+enum class OperatorPrecedence : uint8_t;
+struct Modifiers;
+struct Program;
+
+/**
+ * Convert a Program into WGSL code.
+ */
+class WGSLCodeGenerator : public CodeGenerator {
+public:
+ // See https://www.w3.org/TR/WGSL/#builtin-values
+ enum class Builtin {
+ // Vertex stage:
+ kVertexIndex, // input
+ kInstanceIndex, // input
+ kPosition, // output, fragment stage input
+
+ // Fragment stage:
+ kFrontFacing, // input
+ kSampleIndex, // input
+ kFragDepth, // output
+ kSampleMask, // input, output
+
+ // Compute stage:
+ kLocalInvocationId, // input
+ kLocalInvocationIndex, // input
+ kGlobalInvocationId, // input
+ kWorkgroupId, // input
+ kNumWorkgroups, // input
+ };
+
+ // Represents a function's dependencies that are not accessible in global scope. For instance,
+ // pipeline stage input and output parameters must be passed in as an argument.
+ //
+ // This is a bitmask enum.
+ enum class FunctionDependencies : uint8_t {
+ kNone = 0,
+ kPipelineInputs = 1,
+ kPipelineOutputs = 2,
+ };
+
+ // Variable declarations can be terminated by:
+ // - comma (","), e.g. in struct member declarations or function parameters
+ // - semicolon (";"), e.g. in function scope variables
+ // A "none" option is provided to skip the delimiter when not needed, e.g. at the end of a list
+ // of declarations.
+ enum class Delimiter {
+ kComma,
+ kSemicolon,
+ kNone,
+ };
+
+ struct ProgramRequirements {
+ using DepsMap = SkTHashMap<const FunctionDeclaration*, FunctionDependencies>;
+
+ ProgramRequirements() = default;
+ ProgramRequirements(DepsMap dependencies, bool mainNeedsCoordsArgument)
+ : dependencies(std::move(dependencies))
+ , mainNeedsCoordsArgument(mainNeedsCoordsArgument) {}
+
+ // Mappings used to synthesize function parameters according to dependencies on pipeline
+ // input/output variables.
+ DepsMap dependencies;
+
+ // True, if the main function takes a coordinate parameter. This is used to ensure that
+ // sk_FragCoord is declared as part of pipeline inputs.
+ bool mainNeedsCoordsArgument;
+ };
+
+ WGSLCodeGenerator(const Context* context, const Program* program, OutputStream* out)
+ : INHERITED(context, program, out)
+ , fReservedWords({"array",
+ "FSIn",
+ "FSOut",
+ "_globalUniforms",
+ "_GlobalUniforms",
+ "_return",
+ "_stageIn",
+ "_stageOut",
+ "VSIn",
+ "VSOut"}) {}
+
+ bool generateCode() override;
+
+private:
+ using INHERITED = CodeGenerator;
+ using Precedence = OperatorPrecedence;
+
+ // Called by generateCode() as the first step.
+ void preprocessProgram();
+
+ // Write output content while correctly handling indentation.
+ void write(std::string_view s);
+ void writeLine(std::string_view s = std::string_view());
+ void finishLine();
+ void writeName(std::string_view name);
+ void writeVariableDecl(const Type& type, std::string_view name, Delimiter delimiter);
+
+ // Helpers to declare a pipeline stage IO parameter declaration.
+ void writePipelineIODeclaration(Modifiers modifiers,
+ const Type& type,
+ std::string_view name,
+ Delimiter delimiter);
+ void writeUserDefinedIODecl(const Type& type,
+ std::string_view name,
+ int location,
+ Delimiter delimiter);
+ void writeBuiltinIODecl(const Type& type,
+ std::string_view name,
+ Builtin builtin,
+ Delimiter delimiter);
+
+ // Write a function definition.
+ void writeFunction(const FunctionDefinition& f);
+ void writeFunctionDeclaration(const FunctionDeclaration& f);
+
+ // Write the program entry point.
+ void writeEntryPoint(const FunctionDefinition& f);
+
+ // Writers for supported statement types.
+ void writeStatement(const Statement& s);
+ void writeStatements(const StatementArray& statements);
+ void writeBlock(const Block& b);
+ void writeExpressionStatement(const ExpressionStatement& s);
+ void writeIfStatement(const IfStatement& s);
+ void writeReturnStatement(const ReturnStatement& s);
+ void writeVarDeclaration(const VarDeclaration& varDecl);
+
+ // Writers for expressions.
+ void writeExpression(const Expression& e, Precedence parentPrecedence);
+ void writeBinaryExpression(const BinaryExpression& b, Precedence parentPrecedence);
+ void writeFieldAccess(const FieldAccess& f);
+ void writeFunctionCall(const FunctionCall&);
+ void writeIndexExpression(const IndexExpression& i);
+ void writeLiteral(const Literal& l);
+ void writeSwizzle(const Swizzle& swizzle);
+ void writeTernaryExpression(const TernaryExpression& t, Precedence parentPrecedence);
+ void writeVariableReference(const VariableReference& r);
+
+ // Constructor expressions
+ void writeAnyConstructor(const AnyConstructor& c, Precedence parentPrecedence);
+ void writeConstructorCompound(const ConstructorCompound& c, Precedence parentPrecedence);
+ void writeConstructorCompoundVector(const ConstructorCompound& c, Precedence parentPrecedence);
+ void writeConstructorCompoundMatrix(const ConstructorCompound& c, Precedence parentPrecedence);
+ void writeConstructorDiagonalMatrix(const ConstructorDiagonalMatrix& c,
+ Precedence parentPrecedence);
+ void writeConstructorMatrixResize(const ConstructorMatrixResize& c,
+ Precedence parentPrecedence);
+
+ // Matrix constructor helpers.
+ bool isMatrixConstructorHelperNeeded(const ConstructorCompound& c);
+ std::string getMatrixConstructorHelper(const AnyConstructor& c);
+ void writeMatrixFromMatrixArgs(const Type& sourceMatrix, int columns, int rows);
+ void writeMatrixFromScalarAndVectorArgs(const AnyConstructor& ctor, int columns, int rows);
+
+ // Synthesized helper functions for comparison operators that are not supported by WGSL.
+ void writeMatrixEquality(const Expression& left, const Expression& right);
+
+ // Generic recursive ProgramElement visitor.
+ void writeProgramElement(const ProgramElement& e);
+ void writeGlobalVarDeclaration(const GlobalVarDeclaration& d);
+ void writeStructDefinition(const StructDefinition& s);
+
+ // Writes the WGSL struct fields for SkSL structs and interface blocks. Enforces WGSL address
+ // space layout constraints
+ // (https://www.w3.org/TR/WGSL/#address-space-layout-constraints) if a `layout` is
+ // provided. A struct that does not need to be host-shareable does not require a `layout`.
+ void writeFields(SkSpan<const Type::Field> fields,
+ Position parentPos,
+ const MemoryLayout* layout = nullptr);
+
+ // We bundle all varying pipeline stage inputs and outputs in a struct.
+ void writeStageInputStruct();
+ void writeStageOutputStruct();
+
+ // Writes all top-level non-opaque global uniform declarations (i.e. not part of an interface
+ // block) into a single uniform block binding.
+ //
+ // In complete fragment/vertex/compute programs, uniforms will be declared only as interface
+ // blocks and global opaque types (like textures and samplers) which we expect to be declared
+ // with a unique binding and descriptor set index. However, test files that are declared as RTE
+ // programs may contain OpenGL-style global uniform declarations with no clear binding index to
+ // use for the containing synthesized block.
+ //
+ // Since we are handling these variables only to generate gold files from RTEs and never run
+ // them, we always declare them at the default bind group and binding index.
+ void writeNonBlockUniformsForTests();
+
+ // For a given function declaration, writes out any implicitly required pipeline stage arguments
+ // based on the function's pre-determined dependencies. These are expected to be written out as
+ // the first parameters for a function that requires them. Returns true if any arguments were
+ // written.
+ bool writeFunctionDependencyArgs(const FunctionDeclaration&);
+ bool writeFunctionDependencyParams(const FunctionDeclaration&);
+
+ // Generate an out-parameter helper function for the given call and return its name.
+ std::string writeOutParamHelper(const FunctionCall&,
+ const ExpressionArray& args,
+ const SkTArray<VariableReference*>& outVars);
+
+ // Stores the disallowed identifier names.
+ SkTHashSet<std::string_view> fReservedWords;
+ ProgramRequirements fRequirements;
+ int fPipelineInputCount = 0;
+ bool fDeclaredUniformsStruct = false;
+
+ // Out-parameters to functions are declared as pointers. While we process the arguments to a
+ // out-parameter helper function, we need to temporarily track that they are re-declared as
+ // pointer-parameters in the helper, so that expression-tree processing can know to correctly
+ // dereference them when the variable is referenced. The contents of this set are expected to
+ // be uniquely scoped for each out-param helper and will be cleared every time a new out-param
+ // helper function has been emitted.
+ SkTHashSet<const Variable*> fOutParamArgVars;
+
+ // Output processing state.
+ int fIndentation = 0;
+ bool fAtLineStart = false;
+
+ int fSwizzleHelperCount = 0;
+ StringStream fExtraFunctions; // all internally synthesized helpers are written here
+ SkTHashSet<std::string> fHelpers; // all synthesized helper functions, by name
+};
+
+} // namespace SkSL
+
+namespace sknonstd {
+template <>
+struct is_bitmask_enum<SkSL::WGSLCodeGenerator::FunctionDependencies> : std::true_type {};
+} // namespace sknonstd
+
+#endif // SKSL_WGSLCODEGENERATOR
diff --git a/gfx/skia/skia/src/sksl/dsl/DSLBlock.cpp b/gfx/skia/skia/src/sksl/dsl/DSLBlock.cpp
new file mode 100644
index 0000000000..9a4a478e74
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/dsl/DSLBlock.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/sksl/DSLBlock.h"
+
+#include "include/sksl/DSLStatement.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLBlock.h"
+
+#include <utility>
+
+namespace SkSL {
+
+namespace dsl {
+
+DSLBlock::DSLBlock(SkSL::StatementArray statements,
+ std::shared_ptr<SymbolTable> symbols,
+ Position pos)
+ : fStatements(std::move(statements))
+ , fSymbols(std::move(symbols))
+ , fPosition(pos) {}
+
+DSLBlock::DSLBlock(SkTArray<DSLStatement> statements,
+ std::shared_ptr<SymbolTable> symbols,
+ Position pos)
+ : fSymbols(std::move(symbols))
+ , fPosition(pos) {
+ fStatements.reserve_back(statements.size());
+ for (DSLStatement& s : statements) {
+ fStatements.push_back(s.release());
+ }
+}
+
+std::unique_ptr<SkSL::Block> DSLBlock::release() {
+ return std::make_unique<SkSL::Block>(fPosition, std::move(fStatements),
+ Block::Kind::kBracedScope, std::move(fSymbols));
+}
+
+void DSLBlock::append(DSLStatement stmt) {
+ fStatements.push_back(stmt.release());
+}
+
+} // namespace dsl
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/dsl/DSLCase.cpp b/gfx/skia/skia/src/sksl/dsl/DSLCase.cpp
new file mode 100644
index 0000000000..4730894824
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/dsl/DSLCase.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/sksl/DSLCase.h"
+
+namespace SkSL {
+
+namespace dsl {
+
+DSLCase::DSLCase(DSLExpression value, SkSL::StatementArray statements, Position pos)
+ : fValue(std::move(value))
+ , fStatements(std::move(statements))
+ , fPosition(pos) {}
+
+DSLCase::DSLCase(DSLExpression value, SkTArray<DSLStatement> statements, Position pos)
+ : fValue(std::move(value))
+ , fPosition(pos) {
+ fStatements.reserve_back(statements.size());
+ for (DSLStatement& stmt : statements) {
+ fStatements.push_back(stmt.release());
+ }
+}
+
+DSLCase::DSLCase(DSLCase&& other)
+ : fValue(std::move(other.fValue))
+ , fStatements(std::move(other.fStatements)) {}
+
+DSLCase::~DSLCase() {}
+
+DSLCase& DSLCase::operator=(DSLCase&& other) {
+ fValue.assign(std::move(other.fValue));
+ fStatements = std::move(other.fStatements);
+ return *this;
+}
+
+void DSLCase::append(DSLStatement stmt) {
+ fStatements.push_back(stmt.release());
+}
+
+} // namespace dsl
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/dsl/DSLCore.cpp b/gfx/skia/skia/src/sksl/dsl/DSLCore.cpp
new file mode 100644
index 0000000000..21dfca49cf
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/dsl/DSLCore.cpp
@@ -0,0 +1,615 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/sksl/DSLCore.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLStatement.h"
+#include "include/sksl/DSLModifiers.h"
+#include "include/sksl/DSLType.h"
+#include "include/sksl/DSLVar.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLModifiersPool.h" // IWYU pragma: keep
+#include "src/sksl/SkSLPool.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/SkSLThreadContext.h"
+#include "src/sksl/dsl/priv/DSLWriter.h"
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLBreakStatement.h"
+#include "src/sksl/ir/SkSLContinueStatement.h"
+#include "src/sksl/ir/SkSLDiscardStatement.h"
+#include "src/sksl/ir/SkSLDoStatement.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLExtension.h"
+#include "src/sksl/ir/SkSLForStatement.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLIfStatement.h"
+#include "src/sksl/ir/SkSLInterfaceBlock.h"
+#include "src/sksl/ir/SkSLModifiersDeclaration.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/ir/SkSLReturnStatement.h"
+#include "src/sksl/ir/SkSLSwitchStatement.h"
+#include "src/sksl/ir/SkSLSwizzle.h"
+#include "src/sksl/ir/SkSLTernaryExpression.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+
+#include <vector>
+
+namespace SkSL {
+
+class Variable;
+
+namespace dsl {
+
+void Start(SkSL::Compiler* compiler, ProgramKind kind) {
+ Start(compiler, kind, ProgramSettings());
+}
+
+void Start(SkSL::Compiler* compiler, ProgramKind kind, const ProgramSettings& settings) {
+ ThreadContext::SetInstance(std::make_unique<ThreadContext>(compiler, kind, settings,
+ compiler->moduleForProgramKind(kind),
+ /*isModule=*/false));
+}
+
+void StartModule(SkSL::Compiler* compiler,
+ ProgramKind kind,
+ const ProgramSettings& settings,
+ const SkSL::Module* parent) {
+ ThreadContext::SetInstance(std::make_unique<ThreadContext>(compiler, kind, settings,
+ parent, /*isModule=*/true));
+}
+
+void End() {
+ ThreadContext::SetInstance(nullptr);
+}
+
+ErrorReporter& GetErrorReporter() {
+ return ThreadContext::GetErrorReporter();
+}
+
+void SetErrorReporter(ErrorReporter* errorReporter) {
+ SkASSERT(errorReporter);
+ ThreadContext::SetErrorReporter(errorReporter);
+}
+
+class DSLCore {
+public:
+ static std::unique_ptr<SkSL::Program> ReleaseProgram(std::unique_ptr<std::string> source) {
+ ThreadContext& instance = ThreadContext::Instance();
+ SkSL::Compiler& compiler = *instance.fCompiler;
+ Pool* pool = instance.fPool.get();
+ auto result = std::make_unique<SkSL::Program>(std::move(source),
+ std::move(instance.fConfig),
+ compiler.fContext,
+ std::move(instance.fProgramElements),
+ std::move(instance.fSharedElements),
+ std::move(instance.fModifiersPool),
+ std::move(compiler.fSymbolTable),
+ std::move(instance.fPool),
+ instance.fInputs);
+ bool success = false;
+ if (!compiler.finalize(*result)) {
+ // Do not return programs that failed to compile.
+ } else if (!compiler.optimize(*result)) {
+ // Do not return programs that failed to optimize.
+ } else {
+ // We have a successful program!
+ success = true;
+ }
+ if (pool) {
+ pool->detachFromThread();
+ }
+ SkASSERT(instance.fProgramElements.empty());
+ SkASSERT(!ThreadContext::SymbolTable());
+ return success ? std::move(result) : nullptr;
+ }
+
+ template <typename... Args>
+ static DSLExpression Call(const char* name, Position pos, Args... args) {
+ SkSL::ExpressionArray argArray;
+ argArray.reserve_back(sizeof...(args));
+ ((void)argArray.push_back(args.release()), ...);
+
+ return DSLExpression(SkSL::FunctionCall::Convert(ThreadContext::Context(), pos,
+ ThreadContext::Compiler().convertIdentifier(Position(), name),
+ std::move(argArray)));
+ }
+
+ static DSLStatement Break(Position pos) {
+ return SkSL::BreakStatement::Make(pos);
+ }
+
+ static DSLStatement Continue(Position pos) {
+ return SkSL::ContinueStatement::Make(pos);
+ }
+
+ static void Declare(const DSLModifiers& modifiers) {
+ ThreadContext::ProgramElements().push_back(std::make_unique<SkSL::ModifiersDeclaration>(
+ ThreadContext::Modifiers(modifiers.fModifiers)));
+ }
+
+ static DSLStatement Declare(DSLVar& var, Position pos) {
+ return DSLWriter::Declaration(var);
+ }
+
+ static DSLStatement Declare(SkTArray<DSLVar>& vars, Position pos) {
+ StatementArray statements;
+ for (DSLVar& v : vars) {
+ statements.push_back(Declare(v, pos).release());
+ }
+ return SkSL::Block::Make(pos, std::move(statements), Block::Kind::kCompoundStatement);
+ }
+
+ static void Declare(DSLGlobalVar& var, Position pos) {
+ std::unique_ptr<SkSL::Statement> stmt = DSLWriter::Declaration(var);
+ if (stmt && !stmt->isEmpty()) {
+ ThreadContext::ProgramElements().push_back(
+ std::make_unique<SkSL::GlobalVarDeclaration>(std::move(stmt)));
+ }
+ }
+
+ static void Declare(SkTArray<DSLGlobalVar>& vars, Position pos) {
+ for (DSLGlobalVar& v : vars) {
+ Declare(v, pos);
+ }
+ }
+
+ static DSLStatement Discard(Position pos) {
+ return DSLStatement(SkSL::DiscardStatement::Convert(ThreadContext::Context(), pos), pos);
+ }
+
+ static DSLStatement Do(DSLStatement stmt, DSLExpression test, Position pos) {
+ return DSLStatement(DoStatement::Convert(ThreadContext::Context(), pos, stmt.release(),
+ test.release()), pos);
+ }
+
+ static DSLStatement For(DSLStatement initializer, DSLExpression test,
+ DSLExpression next, DSLStatement stmt, Position pos,
+ const ForLoopPositions& forLoopPositions) {
+ return DSLStatement(ForStatement::Convert(ThreadContext::Context(), pos, forLoopPositions,
+ initializer.releaseIfPossible(),
+ test.releaseIfPossible(),
+ next.releaseIfPossible(),
+ stmt.release(),
+ ThreadContext::SymbolTable()), pos);
+ }
+
+ static DSLStatement If(DSLExpression test, DSLStatement ifTrue, DSLStatement ifFalse,
+ Position pos) {
+ return DSLStatement(IfStatement::Convert(ThreadContext::Context(),
+ pos,
+ test.release(),
+ ifTrue.release(),
+ ifFalse.releaseIfPossible()), pos);
+ }
+
+ static DSLExpression InterfaceBlock(const DSLModifiers& modifiers, std::string_view typeName,
+ SkTArray<DSLField> fields, std::string_view varName,
+ int arraySize, Position pos) {
+ // Build a struct type corresponding to the passed-in fields and array size.
+ DSLType varType = StructType(typeName, fields, /*interfaceBlock=*/true, pos);
+ if (arraySize > 0) {
+ varType = Array(varType, arraySize);
+ }
+
+ // Create a global variable to attach our interface block to. (The variable doesn't actually
+ // get a program element, though; the interface block does instead.)
+ DSLGlobalVar var(modifiers, varType, varName, DSLExpression(), pos);
+ if (SkSL::Variable* skslVar = DSLWriter::Var(var)) {
+ // Add an InterfaceBlock program element to the program.
+ if (std::unique_ptr<SkSL::InterfaceBlock> intf = SkSL::InterfaceBlock::Convert(
+ ThreadContext::Context(), pos, skslVar, ThreadContext::SymbolTable())) {
+ ThreadContext::ProgramElements().push_back(std::move(intf));
+ // Return a VariableReference to the global variable tied to the interface block.
+ return DSLExpression(var);
+ }
+ }
+
+ // The InterfaceBlock couldn't be created; return poison.
+ return DSLExpression(nullptr);
+ }
+
+ static DSLStatement Return(DSLExpression value, Position pos) {
+ // Note that because Return is called before the function in which it resides exists, at
+ // this point we do not know the function's return type. We therefore do not check for
+ // errors, or coerce the value to the correct type, until the return statement is actually
+ // added to a function. (This is done in FunctionDefinition::Convert.)
+ return SkSL::ReturnStatement::Make(pos, value.releaseIfPossible());
+ }
+
+ static DSLExpression Swizzle(DSLExpression base, SkSL::SwizzleComponent::Type a,
+ Position pos, Position maskPos) {
+ return DSLExpression(Swizzle::Convert(ThreadContext::Context(), pos, maskPos,
+ base.release(), ComponentArray{a}),
+ pos);
+ }
+
+ static DSLExpression Swizzle(DSLExpression base,
+ SkSL::SwizzleComponent::Type a,
+ SkSL::SwizzleComponent::Type b,
+ Position pos,
+ Position maskPos) {
+ return DSLExpression(Swizzle::Convert(ThreadContext::Context(), pos, maskPos,
+ base.release(), ComponentArray{a, b}),
+ pos);
+ }
+
+ static DSLExpression Swizzle(DSLExpression base,
+ SkSL::SwizzleComponent::Type a,
+ SkSL::SwizzleComponent::Type b,
+ SkSL::SwizzleComponent::Type c,
+ Position pos,
+ Position maskPos) {
+ return DSLExpression(Swizzle::Convert(ThreadContext::Context(), pos, maskPos,
+ base.release(), ComponentArray{a, b, c}),
+ pos);
+ }
+
+ static DSLExpression Swizzle(DSLExpression base,
+ SkSL::SwizzleComponent::Type a,
+ SkSL::SwizzleComponent::Type b,
+ SkSL::SwizzleComponent::Type c,
+ SkSL::SwizzleComponent::Type d,
+ Position pos,
+ Position maskPos) {
+ return DSLExpression(Swizzle::Convert(ThreadContext::Context(), pos, maskPos,
+ base.release(), ComponentArray{a, b, c, d}),
+ pos);
+ }
+
+ static DSLExpression Select(DSLExpression test, DSLExpression ifTrue, DSLExpression ifFalse,
+ Position pos) {
+ auto result = TernaryExpression::Convert(ThreadContext::Context(), pos, test.release(),
+ ifTrue.release(), ifFalse.release());
+ SkASSERT(!result || result->fPosition == pos);
+ return DSLExpression(std::move(result), pos);
+ }
+
+ static DSLStatement Switch(DSLExpression value, SkTArray<DSLCase> cases, Position pos) {
+ ExpressionArray values;
+ values.reserve_back(cases.size());
+ StatementArray caseBlocks;
+ caseBlocks.reserve_back(cases.size());
+ for (DSLCase& c : cases) {
+ values.push_back(c.fValue.releaseIfPossible());
+ caseBlocks.push_back(SkSL::Block::Make(Position(), std::move(c.fStatements),
+ Block::Kind::kUnbracedBlock));
+ }
+ return DSLStatement(SwitchStatement::Convert(ThreadContext::Context(), pos,
+ value.release(),
+ std::move(values),
+ std::move(caseBlocks),
+ ThreadContext::SymbolTable()), pos);
+ }
+
+ static DSLStatement While(DSLExpression test, DSLStatement stmt, Position pos) {
+ return DSLStatement(ForStatement::ConvertWhile(ThreadContext::Context(), pos,
+ test.release(),
+ stmt.release(),
+ ThreadContext::SymbolTable()), pos);
+ }
+};
+
+std::unique_ptr<SkSL::Program> ReleaseProgram(std::unique_ptr<std::string> source) {
+ return DSLCore::ReleaseProgram(std::move(source));
+}
+
+void AddExtension(std::string_view name, Position pos) {
+ ThreadContext::ProgramElements().push_back(std::make_unique<SkSL::Extension>(pos, name));
+}
+
+DSLStatement Break(Position pos) {
+ return DSLCore::Break(pos);
+}
+
+DSLStatement Continue(Position pos) {
+ return DSLCore::Continue(pos);
+}
+
+void Declare(const DSLModifiers& modifiers, Position pos) {
+ SkSL::ProgramKind kind = ThreadContext::GetProgramConfig()->fKind;
+ if (!ProgramConfig::IsFragment(kind) &&
+ !ProgramConfig::IsVertex(kind)) {
+ ThreadContext::ReportError("layout qualifiers are not allowed in this kind of program",
+ pos);
+ return;
+ }
+ DSLCore::Declare(modifiers);
+}
+
+// Logically, we'd want the variable's initial value to appear on here in Declare, since that
+// matches how we actually write code (and in fact that was what our first attempt looked like).
+// Unfortunately, C++ doesn't guarantee execution order between arguments, and Declare() can appear
+// as a function argument in constructs like Block(Declare(x, 0), foo(x)). If these are executed out
+// of order, we will evaluate the reference to x before we evaluate Declare(x, 0), and thus the
+// variable's initial value is unknown at the point of reference. There are probably some other
+// issues with this as well, but it is particularly dangerous when x is const, since SkSL will
+// expect its value to be known when it is referenced and will end up asserting, dereferencing a
+// null pointer, or possibly doing something else awful.
+//
+// So, we put the initial value onto the Var itself instead of the Declare to guarantee that it is
+// always executed in the correct order.
+DSLStatement Declare(DSLVar& var, Position pos) {
+ return DSLCore::Declare(var, pos);
+}
+
+DSLStatement Declare(SkTArray<DSLVar>& vars, Position pos) {
+ return DSLCore::Declare(vars, pos);
+}
+
+void Declare(DSLGlobalVar& var, Position pos) {
+ DSLCore::Declare(var, pos);
+}
+
+void Declare(SkTArray<DSLGlobalVar>& vars, Position pos) {
+ DSLCore::Declare(vars, pos);
+}
+
+DSLStatement Discard(Position pos) {
+ return DSLCore::Discard(pos);
+}
+
+DSLStatement Do(DSLStatement stmt, DSLExpression test, Position pos) {
+ return DSLCore::Do(std::move(stmt), std::move(test), pos);
+}
+
+DSLStatement For(DSLStatement initializer, DSLExpression test, DSLExpression next,
+ DSLStatement stmt, Position pos, ForLoopPositions forLoopPositions) {
+ return DSLCore::For(std::move(initializer), std::move(test), std::move(next),
+ std::move(stmt), pos, forLoopPositions);
+}
+
+DSLStatement If(DSLExpression test, DSLStatement ifTrue, DSLStatement ifFalse, Position pos) {
+ return DSLCore::If(std::move(test), std::move(ifTrue), std::move(ifFalse), pos);
+}
+
+DSLExpression InterfaceBlock(const DSLModifiers& modifiers, std::string_view typeName,
+ SkTArray<DSLField> fields, std::string_view varName, int arraySize,
+ Position pos) {
+ return DSLCore::InterfaceBlock(modifiers, typeName, std::move(fields), varName, arraySize, pos);
+}
+
+DSLStatement Return(DSLExpression expr, Position pos) {
+ return DSLCore::Return(std::move(expr), pos);
+}
+
+DSLExpression Select(DSLExpression test, DSLExpression ifTrue, DSLExpression ifFalse,
+ Position pos) {
+ return DSLCore::Select(std::move(test), std::move(ifTrue), std::move(ifFalse), pos);
+}
+
+DSLStatement Switch(DSLExpression value, SkTArray<DSLCase> cases, Position pos) {
+ return DSLCore::Switch(std::move(value), std::move(cases), pos);
+}
+
+DSLStatement While(DSLExpression test, DSLStatement stmt, Position pos) {
+ return DSLCore::While(std::move(test), std::move(stmt), pos);
+}
+
+DSLExpression Abs(DSLExpression x, Position pos) {
+ return DSLCore::Call("abs", pos, std::move(x));
+}
+
+DSLExpression All(DSLExpression x, Position pos) {
+ return DSLCore::Call("all", pos, std::move(x));
+}
+
+DSLExpression Any(DSLExpression x, Position pos) {
+ return DSLCore::Call("any", pos, std::move(x));
+}
+
+DSLExpression Atan(DSLExpression y_over_x, Position pos) {
+ return DSLCore::Call("atan", pos, std::move(y_over_x));
+}
+
+DSLExpression Atan(DSLExpression y, DSLExpression x, Position pos) {
+ return DSLCore::Call("atan", pos, std::move(y), std::move(x));
+}
+
+DSLExpression Ceil(DSLExpression x, Position pos) {
+ return DSLCore::Call("ceil", pos, std::move(x));
+}
+
+DSLExpression Clamp(DSLExpression x, DSLExpression min, DSLExpression max, Position pos) {
+ return DSLCore::Call("clamp", pos, std::move(x), std::move(min), std::move(max));
+}
+
+DSLExpression Cos(DSLExpression x, Position pos) {
+ return DSLCore::Call("cos", pos, std::move(x));
+}
+
+DSLExpression Cross(DSLExpression x, DSLExpression y, Position pos) {
+ return DSLCore::Call("cross", pos, std::move(x), std::move(y));
+}
+
+DSLExpression Degrees(DSLExpression x, Position pos) {
+ return DSLCore::Call("degrees", pos, std::move(x));
+}
+
+DSLExpression Distance(DSLExpression x, DSLExpression y, Position pos) {
+ return DSLCore::Call("distance", pos, std::move(x), std::move(y));
+}
+
+DSLExpression Dot(DSLExpression x, DSLExpression y, Position pos) {
+ return DSLCore::Call("dot", pos, std::move(x), std::move(y));
+}
+
+DSLExpression Equal(DSLExpression x, DSLExpression y, Position pos) {
+ return DSLCore::Call("equal", pos, std::move(x), std::move(y));
+}
+
+DSLExpression Exp(DSLExpression x, Position pos) {
+ return DSLCore::Call("exp", pos, std::move(x));
+}
+
+DSLExpression Exp2(DSLExpression x, Position pos) {
+ return DSLCore::Call("exp2", pos, std::move(x));
+}
+
+DSLExpression Faceforward(DSLExpression n, DSLExpression i, DSLExpression nref, Position pos) {
+ return DSLCore::Call("faceforward", pos, std::move(n), std::move(i), std::move(nref));
+}
+
+DSLExpression Fract(DSLExpression x, Position pos) {
+ return DSLCore::Call("fract", pos, std::move(x));
+}
+
+DSLExpression Floor(DSLExpression x, Position pos) {
+ return DSLCore::Call("floor", pos, std::move(x));
+}
+
+DSLExpression GreaterThan(DSLExpression x, DSLExpression y, Position pos) {
+ return DSLCore::Call("greaterThan", pos, std::move(x), std::move(y));
+}
+
+DSLExpression GreaterThanEqual(DSLExpression x, DSLExpression y, Position pos) {
+ return DSLCore::Call("greaterThanEqual", pos, std::move(x), std::move(y));
+}
+
+DSLExpression Inverse(DSLExpression x, Position pos) {
+ return DSLCore::Call("inverse", pos, std::move(x));
+}
+
+DSLExpression Inversesqrt(DSLExpression x, Position pos) {
+ return DSLCore::Call("inversesqrt", pos, std::move(x));
+}
+
+DSLExpression Length(DSLExpression x, Position pos) {
+ return DSLCore::Call("length", pos, std::move(x));
+}
+
+DSLExpression LessThan(DSLExpression x, DSLExpression y, Position pos) {
+ return DSLCore::Call("lessThan", pos, std::move(x), std::move(y));
+}
+
+DSLExpression LessThanEqual(DSLExpression x, DSLExpression y, Position pos) {
+ return DSLCore::Call("lessThanEqual", pos, std::move(x), std::move(y));
+}
+
+DSLExpression Log(DSLExpression x, Position pos) {
+ return DSLCore::Call("log", pos, std::move(x));
+}
+
+DSLExpression Log2(DSLExpression x, Position pos) {
+ return DSLCore::Call("log2", pos, std::move(x));
+}
+
+DSLExpression Max(DSLExpression x, DSLExpression y, Position pos) {
+ return DSLCore::Call("max", pos, std::move(x), std::move(y));
+}
+
+DSLExpression Min(DSLExpression x, DSLExpression y, Position pos) {
+ return DSLCore::Call("min", pos, std::move(x), std::move(y));
+}
+
+DSLExpression Mix(DSLExpression x, DSLExpression y, DSLExpression a, Position pos) {
+ return DSLCore::Call("mix", pos, std::move(x), std::move(y), std::move(a));
+}
+
+DSLExpression Mod(DSLExpression x, DSLExpression y, Position pos) {
+ return DSLCore::Call("mod", pos, std::move(x), std::move(y));
+}
+
+DSLExpression Normalize(DSLExpression x, Position pos) {
+ return DSLCore::Call("normalize", pos, std::move(x));
+}
+
+DSLExpression NotEqual(DSLExpression x, DSLExpression y, Position pos) {
+ return DSLCore::Call("notEqual", pos, std::move(x), std::move(y));
+}
+
+DSLExpression Pow(DSLExpression x, DSLExpression y, Position pos) {
+ return DSLCore::Call("pow", pos, std::move(x), std::move(y));
+}
+
+DSLExpression Radians(DSLExpression x, Position pos) {
+ return DSLCore::Call("radians", pos, std::move(x));
+}
+
+DSLExpression Reflect(DSLExpression i, DSLExpression n, Position pos) {
+ return DSLCore::Call("reflect", pos, std::move(i), std::move(n));
+}
+
+DSLExpression Refract(DSLExpression i, DSLExpression n, DSLExpression eta, Position pos) {
+ return DSLCore::Call("refract", pos, std::move(i), std::move(n), std::move(eta));
+}
+
+DSLExpression Round(DSLExpression x, Position pos) {
+ return DSLCore::Call("round", pos, std::move(x));
+}
+
+DSLExpression Saturate(DSLExpression x, Position pos) {
+ return DSLCore::Call("saturate", pos, std::move(x));
+}
+
+DSLExpression Sign(DSLExpression x, Position pos) {
+ return DSLCore::Call("sign", pos, std::move(x));
+}
+
+DSLExpression Sin(DSLExpression x, Position pos) {
+ return DSLCore::Call("sin", pos, std::move(x));
+}
+
+DSLExpression Smoothstep(DSLExpression edge1, DSLExpression edge2, DSLExpression x,
+ Position pos) {
+ return DSLCore::Call("smoothstep", pos, std::move(edge1), std::move(edge2), std::move(x));
+}
+
+DSLExpression Sqrt(DSLExpression x, Position pos) {
+ return DSLCore::Call("sqrt", pos, std::move(x));
+}
+
+DSLExpression Step(DSLExpression edge, DSLExpression x, Position pos) {
+ return DSLCore::Call("step", pos, std::move(edge), std::move(x));
+}
+
+DSLExpression Swizzle(DSLExpression base, SkSL::SwizzleComponent::Type a,
+ Position pos, Position maskPos) {
+ return DSLCore::Swizzle(std::move(base), a, pos, maskPos);
+}
+
+DSLExpression Swizzle(DSLExpression base,
+ SkSL::SwizzleComponent::Type a,
+ SkSL::SwizzleComponent::Type b,
+ Position pos,
+ Position maskPos) {
+ return DSLCore::Swizzle(std::move(base), a, b, pos, maskPos);
+}
+
+DSLExpression Swizzle(DSLExpression base,
+ SkSL::SwizzleComponent::Type a,
+ SkSL::SwizzleComponent::Type b,
+ SkSL::SwizzleComponent::Type c,
+ Position pos,
+ Position maskPos) {
+ return DSLCore::Swizzle(std::move(base), a, b, c, pos, maskPos);
+}
+
+DSLExpression Swizzle(DSLExpression base,
+ SkSL::SwizzleComponent::Type a,
+ SkSL::SwizzleComponent::Type b,
+ SkSL::SwizzleComponent::Type c,
+ SkSL::SwizzleComponent::Type d,
+ Position pos,
+ Position maskPos) {
+ return DSLCore::Swizzle(std::move(base), a, b, c, d, pos, maskPos);
+}
+
+DSLExpression Tan(DSLExpression x, Position pos) {
+ return DSLCore::Call("tan", pos, std::move(x));
+}
+
+DSLExpression Unpremul(DSLExpression x, Position pos) {
+ return DSLCore::Call("unpremul", pos, std::move(x));
+}
+
+} // namespace dsl
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/dsl/DSLExpression.cpp b/gfx/skia/skia/src/sksl/dsl/DSLExpression.cpp
new file mode 100644
index 0000000000..937f58432b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/dsl/DSLExpression.cpp
@@ -0,0 +1,295 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/sksl/DSLExpression.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/sksl/DSLCore.h"
+#include "include/sksl/DSLType.h"
+#include "include/sksl/DSLVar.h"
+#include "include/sksl/SkSLOperator.h"
+#include "src/sksl/SkSLThreadContext.h"
+#include "src/sksl/dsl/priv/DSLWriter.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLLiteral.h"
+#include "src/sksl/ir/SkSLPoison.h"
+#include "src/sksl/ir/SkSLPostfixExpression.h"
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+
+#include <utility>
+
+namespace SkSL {
+
+namespace dsl {
+
+DSLExpression::DSLExpression() {}
+
+DSLExpression::DSLExpression(DSLExpression&& other)
+ : fExpression(std::move(other.fExpression)) {}
+
+DSLExpression::DSLExpression(std::unique_ptr<SkSL::Expression> expression, Position pos)
+ : fExpression(expression ? std::move(expression)
+ : SkSL::Poison::Make(pos, ThreadContext::Context())) {
+ // If a position was passed in, it must match the expression's position.
+ SkASSERTF(!pos.valid() || this->position() == pos,
+ "expected expression position (%d-%d), but received (%d-%d)",
+ pos.startOffset(), pos.endOffset(),
+ this->position().startOffset(), this->position().endOffset());
+}
+
+DSLExpression::DSLExpression(float value, Position pos)
+ : fExpression(SkSL::Literal::MakeFloat(ThreadContext::Context(),
+ pos,
+ value)) {}
+
+DSLExpression::DSLExpression(int value, Position pos)
+ : fExpression(SkSL::Literal::MakeInt(ThreadContext::Context(),
+ pos,
+ value)) {}
+
+DSLExpression::DSLExpression(int64_t value, Position pos)
+ : fExpression(SkSL::Literal::MakeInt(ThreadContext::Context(),
+ pos,
+ value)) {}
+
+DSLExpression::DSLExpression(unsigned int value, Position pos)
+ : fExpression(SkSL::Literal::MakeInt(ThreadContext::Context(),
+ pos,
+ value)) {}
+
+DSLExpression::DSLExpression(bool value, Position pos)
+ : fExpression(SkSL::Literal::MakeBool(ThreadContext::Context(),
+ pos,
+ value)) {}
+
+DSLExpression::DSLExpression(DSLVarBase& var, Position pos)
+ : fExpression(std::make_unique<SkSL::VariableReference>(
+ pos, DSLWriter::Var(var), SkSL::VariableReference::RefKind::kRead)) {}
+
+DSLExpression::DSLExpression(DSLVarBase&& var, Position pos)
+ : DSLExpression(var) {}
+
+DSLExpression::~DSLExpression() {}
+
+DSLExpression DSLExpression::Poison(Position pos) {
+ return DSLExpression(SkSL::Poison::Make(pos, ThreadContext::Context()));
+}
+
+bool DSLExpression::isValid() const {
+ return this->hasValue() && !fExpression->is<SkSL::Poison>();
+}
+
+void DSLExpression::swap(DSLExpression& other) {
+ std::swap(fExpression, other.fExpression);
+}
+
+std::unique_ptr<SkSL::Expression> DSLExpression::release() {
+ SkASSERT(this->hasValue());
+ return std::move(fExpression);
+}
+
+std::unique_ptr<SkSL::Expression> DSLExpression::releaseIfPossible() {
+ return std::move(fExpression);
+}
+
+DSLType DSLExpression::type() const {
+ if (!this->hasValue()) {
+ return kVoid_Type;
+ }
+ return &fExpression->type();
+}
+
+std::string DSLExpression::description() const {
+ SkASSERT(this->hasValue());
+ return fExpression->description();
+}
+
+Position DSLExpression::position() const {
+ SkASSERT(this->hasValue());
+ return fExpression->fPosition;
+}
+
+void DSLExpression::setPosition(Position pos) {
+ SkASSERT(this->hasValue());
+ fExpression->fPosition = pos;
+}
+
+DSLExpression DSLExpression::x(Position pos) {
+ return Swizzle(std::move(*this), X, pos);
+}
+
+DSLExpression DSLExpression::y(Position pos) {
+ return Swizzle(std::move(*this), Y, pos);
+}
+
+DSLExpression DSLExpression::z(Position pos) {
+ return Swizzle(std::move(*this), Z, pos);
+}
+
+DSLExpression DSLExpression::w(Position pos) {
+ return Swizzle(std::move(*this), W, pos);
+}
+
+DSLExpression DSLExpression::r(Position pos) {
+ return Swizzle(std::move(*this), R, pos);
+}
+
+DSLExpression DSLExpression::g(Position pos) {
+ return Swizzle(std::move(*this), G, pos);
+}
+
+DSLExpression DSLExpression::b(Position pos) {
+ return Swizzle(std::move(*this), B, pos);
+}
+
+DSLExpression DSLExpression::a(Position pos) {
+ return Swizzle(std::move(*this), A, pos);
+}
+
+DSLExpression DSLExpression::field(std::string_view name, Position pos) {
+ return DSLExpression(FieldAccess::Convert(ThreadContext::Context(), pos,
+ *ThreadContext::SymbolTable(), this->release(), name), pos);
+}
+
+DSLExpression DSLExpression::assign(DSLExpression right) {
+ Position pos = this->position().rangeThrough(right.position());
+ return DSLExpression(BinaryExpression::Convert(ThreadContext::Context(), pos, this->release(),
+ SkSL::Operator::Kind::EQ, right.release()));
+}
+
+DSLExpression DSLExpression::operator[](DSLExpression right) {
+ Position pos = this->position().rangeThrough(right.position());
+ return DSLExpression(IndexExpression::Convert(ThreadContext::Context(),
+ *ThreadContext::SymbolTable(), pos,
+ this->release(), right.release()));
+}
+
+DSLExpression DSLExpression::index(DSLExpression index, Position pos) {
+ std::unique_ptr<SkSL::Expression> result = IndexExpression::Convert(ThreadContext::Context(),
+ *ThreadContext::SymbolTable(), pos, this->release(), index.release());
+ return DSLExpression(std::move(result), pos);
+}
+
+DSLExpression DSLExpression::operator()(SkTArray<DSLExpression> args, Position pos) {
+ ExpressionArray converted;
+ converted.reserve_back(args.size());
+ for (DSLExpression& arg : args) {
+ converted.push_back(arg.release());
+ }
+ return (*this)(std::move(converted), pos);
+}
+
+DSLExpression DSLExpression::operator()(ExpressionArray args, Position pos) {
+ return DSLExpression(SkSL::FunctionCall::Convert(ThreadContext::Context(), pos, this->release(),
+ std::move(args)), pos);
+}
+
+DSLExpression DSLExpression::prefix(Operator::Kind op, Position pos) {
+ std::unique_ptr<SkSL::Expression> result = PrefixExpression::Convert(ThreadContext::Context(),
+ pos, op, this->release());
+ return DSLExpression(std::move(result), pos);
+}
+
+DSLExpression DSLExpression::postfix(Operator::Kind op, Position pos) {
+ std::unique_ptr<SkSL::Expression> result = PostfixExpression::Convert(ThreadContext::Context(),
+ pos, this->release(), op);
+ return DSLExpression(std::move(result), pos);
+}
+
+DSLExpression DSLExpression::binary(Operator::Kind op, DSLExpression right, Position pos) {
+ std::unique_ptr<SkSL::Expression> result = BinaryExpression::Convert(ThreadContext::Context(),
+ pos, this->release(), op, right.release());
+ return DSLExpression(std::move(result), pos);
+}
+
+#define OP(op, token) \
+DSLExpression operator op(DSLExpression left, DSLExpression right) { \
+ return DSLExpression(BinaryExpression::Convert(ThreadContext::Context(), \
+ Position(), \
+ left.release(), \
+ Operator::Kind::token, \
+ right.release())); \
+}
+
+#define PREFIXOP(op, token) \
+DSLExpression operator op(DSLExpression expr) { \
+ return DSLExpression(PrefixExpression::Convert(ThreadContext::Context(), \
+ Position(), \
+ Operator::Kind::token, \
+ expr.release())); \
+}
+
+#define POSTFIXOP(op, token) \
+DSLExpression operator op(DSLExpression expr, int) { \
+ return DSLExpression(PostfixExpression::Convert(ThreadContext::Context(), \
+ Position(), \
+ expr.release(), \
+ Operator::Kind::token)); \
+}
+
+OP(+, PLUS)
+OP(+=, PLUSEQ)
+OP(-, MINUS)
+OP(-=, MINUSEQ)
+OP(*, STAR)
+OP(*=, STAREQ)
+OP(/, SLASH)
+OP(/=, SLASHEQ)
+OP(%, PERCENT)
+OP(%=, PERCENTEQ)
+OP(<<, SHL)
+OP(<<=, SHLEQ)
+OP(>>, SHR)
+OP(>>=, SHREQ)
+OP(&&, LOGICALAND)
+OP(||, LOGICALOR)
+OP(&, BITWISEAND)
+OP(&=, BITWISEANDEQ)
+OP(|, BITWISEOR)
+OP(|=, BITWISEOREQ)
+OP(^, BITWISEXOR)
+OP(^=, BITWISEXOREQ)
+DSLExpression LogicalXor(DSLExpression left, DSLExpression right) {
+ return DSLExpression(BinaryExpression::Convert(ThreadContext::Context(),
+ Position(),
+ left.release(),
+ SkSL::Operator::Kind::LOGICALXOR,
+ right.release()));
+}
+OP(==, EQEQ)
+OP(!=, NEQ)
+OP(>, GT)
+OP(<, LT)
+OP(>=, GTEQ)
+OP(<=, LTEQ)
+
+PREFIXOP(+, PLUS)
+PREFIXOP(-, MINUS)
+PREFIXOP(!, LOGICALNOT)
+PREFIXOP(~, BITWISENOT)
+PREFIXOP(++, PLUSPLUS)
+POSTFIXOP(++, PLUSPLUS)
+PREFIXOP(--, MINUSMINUS)
+POSTFIXOP(--, MINUSMINUS)
+
+DSLExpression operator,(DSLExpression left, DSLExpression right) {
+ return DSLExpression(BinaryExpression::Convert(ThreadContext::Context(),
+ Position(),
+ left.release(),
+ SkSL::Operator::Kind::COMMA,
+ right.release()));
+}
+
+} // namespace dsl
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/dsl/DSLFunction.cpp b/gfx/skia/skia/src/sksl/dsl/DSLFunction.cpp
new file mode 100644
index 0000000000..1a3836ce5a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/dsl/DSLFunction.cpp
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/sksl/DSLFunction.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLStatement.h"
+#include "include/private/SkSLString.h"
+#include "include/sksl/DSLType.h"
+#include "include/sksl/DSLVar.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLIntrinsicList.h"
+#include "src/sksl/SkSLModifiersPool.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/SkSLThreadContext.h"
+#include "src/sksl/dsl/priv/DSLWriter.h"
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLFunctionPrototype.h"
+#include "src/sksl/ir/SkSLVariable.h"
+
+#include <cstddef>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace SkSL {
+
+namespace dsl {
+
+static bool is_intrinsic_in_module(const Context& context, std::string_view name) {
+ return context.fConfig->fIsBuiltinCode && SkSL::FindIntrinsicKind(name) != kNotIntrinsic;
+}
+
+void DSLFunction::init(DSLModifiers modifiers, const DSLType& returnType, std::string_view name,
+ SkSpan<DSLParameter*> params, Position pos) {
+ fPosition = pos;
+
+ const Context& context = ThreadContext::Context();
+ if (context.fConfig->fSettings.fForceNoInline) {
+ // Apply the `noinline` modifier to every function. This allows us to test Runtime
+ // Effects without any inlining, even when the code is later added to a paint.
+ modifiers.fModifiers.fFlags &= ~Modifiers::kInline_Flag;
+ modifiers.fModifiers.fFlags |= Modifiers::kNoInline_Flag;
+ }
+
+ std::vector<std::unique_ptr<Variable>> paramVars;
+ paramVars.reserve(params.size());
+ for (DSLParameter* param : params) {
+ SkASSERT(!param->fInitialValue.hasValue());
+ SkASSERT(!param->fDeclaration);
+ std::unique_ptr<SkSL::Variable> paramVar = DSLWriter::CreateParameterVar(*param);
+ if (!paramVar) {
+ return;
+ }
+ paramVars.push_back(std::move(paramVar));
+ }
+ SkASSERT(paramVars.size() == params.size());
+ fDecl = SkSL::FunctionDeclaration::Convert(context,
+ *ThreadContext::SymbolTable(),
+ pos,
+ modifiers.fPosition,
+ context.fModifiersPool->add(modifiers.fModifiers),
+ name,
+ std::move(paramVars),
+ pos,
+ &returnType.skslType());
+ if (fDecl) {
+ for (size_t i = 0; i < params.size(); ++i) {
+ params[i]->fVar = fDecl->parameters()[i];
+ params[i]->fInitialized = true;
+ }
+ }
+}
+
+void DSLFunction::prototype() {
+ if (!fDecl) {
+ // We failed to create the declaration; error should already have been reported.
+ return;
+ }
+ ThreadContext::ProgramElements().push_back(std::make_unique<SkSL::FunctionPrototype>(
+ fDecl->fPosition, fDecl, ThreadContext::IsModule()));
+}
+
+void DSLFunction::define(DSLBlock block, Position pos) {
+ std::unique_ptr<SkSL::Block> body = block.release();
+ body->fPosition = pos;
+ if (!fDecl) {
+ // We failed to create the declaration; error should already have been reported.
+ return;
+ }
+ // We don't allow modules to define actual functions with intrinsic names. (Those should be
+ // reserved for actual intrinsics.)
+ const Context& context = ThreadContext::Context();
+ if (is_intrinsic_in_module(context, fDecl->name())) {
+ ThreadContext::ReportError(
+ SkSL::String::printf("Intrinsic function '%.*s' should not have a definition",
+ (int)fDecl->name().size(),
+ fDecl->name().data()),
+ fDecl->fPosition);
+ return;
+ }
+
+ if (fDecl->definition()) {
+ ThreadContext::ReportError(SkSL::String::printf("function '%s' was already defined",
+ fDecl->description().c_str()),
+ fDecl->fPosition);
+ return;
+ }
+ std::unique_ptr<FunctionDefinition> function = FunctionDefinition::Convert(
+ ThreadContext::Context(),
+ pos,
+ *fDecl,
+ std::move(body),
+ /*builtin=*/false);
+ fDecl->setDefinition(function.get());
+ ThreadContext::ProgramElements().push_back(std::move(function));
+}
+
+DSLExpression DSLFunction::call(SkSpan<DSLExpression> args, Position pos) {
+ ExpressionArray released;
+ released.reserve_back(args.size());
+ for (DSLExpression& arg : args) {
+ released.push_back(arg.release());
+ }
+ return this->call(std::move(released));
+}
+
+DSLExpression DSLFunction::call(ExpressionArray args, Position pos) {
+ std::unique_ptr<SkSL::Expression> result =
+ SkSL::FunctionCall::Convert(ThreadContext::Context(), pos, *fDecl, std::move(args));
+ return DSLExpression(std::move(result), pos);
+}
+
+} // namespace dsl
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/dsl/DSLLayout.cpp b/gfx/skia/skia/src/sksl/dsl/DSLLayout.cpp
new file mode 100644
index 0000000000..4ef840f230
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/dsl/DSLLayout.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/sksl/DSLLayout.h"
+
+#include "src/sksl/SkSLThreadContext.h"
+
+#include <string>
+
+namespace SkSL {
+
+namespace dsl {
+
+DSLLayout& DSLLayout::flag(SkSL::Layout::Flag mask, const char* name, Position pos) {
+ if (fSkSLLayout.fFlags & mask) {
+ ThreadContext::ReportError("layout qualifier '" + std::string(name) +
+ "' appears more than once", pos);
+ }
+ fSkSLLayout.fFlags |= mask;
+ return *this;
+}
+
+DSLLayout& DSLLayout::intValue(int* target, int value, SkSL::Layout::Flag flag, const char* name,
+ Position pos) {
+ this->flag(flag, name, pos);
+ *target = value;
+ return *this;
+}
+
+} // namespace dsl
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/dsl/DSLStatement.cpp b/gfx/skia/skia/src/sksl/dsl/DSLStatement.cpp
new file mode 100644
index 0000000000..ed11acac80
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/dsl/DSLStatement.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/sksl/DSLStatement.h"
+
+#include "include/private/SkSLDefines.h"
+#include "include/sksl/DSLBlock.h"
+#include "include/sksl/DSLExpression.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/SkSLThreadContext.h"
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLExpressionStatement.h"
+#include "src/sksl/ir/SkSLNop.h"
+
+namespace SkSL {
+
+namespace dsl {
+
+DSLStatement::DSLStatement() {}
+
+DSLStatement::DSLStatement(DSLBlock block)
+ : fStatement(block.release()) {}
+
+DSLStatement::DSLStatement(DSLExpression expr) {
+ std::unique_ptr<SkSL::Expression> skslExpr = expr.release();
+ if (skslExpr) {
+ fStatement = SkSL::ExpressionStatement::Convert(ThreadContext::Context(),
+ std::move(skslExpr));
+ }
+}
+
+DSLStatement::DSLStatement(std::unique_ptr<SkSL::Expression> expr)
+ : fStatement(SkSL::ExpressionStatement::Convert(ThreadContext::Context(), std::move(expr))) {
+ SkASSERT(this->hasValue());
+}
+
+DSLStatement::DSLStatement(std::unique_ptr<SkSL::Statement> stmt)
+ : fStatement(std::move(stmt)) {
+ SkASSERT(this->hasValue());
+}
+
+DSLStatement::DSLStatement(std::unique_ptr<SkSL::Statement> stmt, Position pos)
+ : fStatement(stmt ? std::move(stmt) : SkSL::Nop::Make()) {
+ if (pos.valid() && !fStatement->fPosition.valid()) {
+ fStatement->fPosition = pos;
+ }
+}
+
+DSLStatement::~DSLStatement() {}
+
+DSLStatement operator,(DSLStatement left, DSLStatement right) {
+ Position pos = left.fStatement->fPosition;
+ StatementArray stmts;
+ stmts.reserve_back(2);
+ stmts.push_back(left.release());
+ stmts.push_back(right.release());
+ return DSLStatement(SkSL::Block::Make(pos, std::move(stmts), Block::Kind::kCompoundStatement));
+}
+
+} // namespace dsl
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/dsl/DSLType.cpp b/gfx/skia/skia/src/sksl/dsl/DSLType.cpp
new file mode 100644
index 0000000000..82c000e5ab
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/dsl/DSLType.cpp
@@ -0,0 +1,316 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/sksl/DSLType.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLString.h"
+#include "include/private/SkSLSymbol.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/SkSLThreadContext.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLStructDefinition.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace SkSL {
+
+struct Modifiers;
+
+namespace dsl {
+
+static const SkSL::Type* verify_type(const Context& context,
+ const SkSL::Type* type,
+ bool allowGenericTypes,
+ Position pos) {
+ if (!context.fConfig->fIsBuiltinCode && type) {
+ if (!allowGenericTypes && (type->isGeneric() || type->isLiteral())) {
+ context.fErrors->error(pos, "type '" + std::string(type->name()) + "' is generic");
+ return context.fTypes.fPoison.get();
+ }
+ if (!type->isAllowedInES2(context)) {
+ context.fErrors->error(pos, "type '" + std::string(type->name()) +"' is not supported");
+ return context.fTypes.fPoison.get();
+ }
+ }
+ return type;
+}
+
+static const SkSL::Type* find_type(const Context& context,
+ Position pos,
+ std::string_view name) {
+ const Symbol* symbol = ThreadContext::SymbolTable()->find(name);
+ if (!symbol) {
+ context.fErrors->error(pos, String::printf("no symbol named '%.*s'",
+ (int)name.length(), name.data()));
+ return context.fTypes.fPoison.get();
+ }
+ if (!symbol->is<SkSL::Type>()) {
+ context.fErrors->error(pos, String::printf("symbol '%.*s' is not a type",
+ (int)name.length(), name.data()));
+ return context.fTypes.fPoison.get();
+ }
+ const SkSL::Type* type = &symbol->as<SkSL::Type>();
+ return verify_type(context, type, /*allowGenericTypes=*/false, pos);
+}
+
+static const SkSL::Type* find_type(const Context& context,
+ Position overallPos,
+ std::string_view name,
+ Position modifiersPos,
+ Modifiers* modifiers) {
+ const auto* type = find_type(context, overallPos, name);
+ return type->applyQualifiers(context, modifiers, ThreadContext::SymbolTable().get(),
+ modifiersPos);
+}
+
+static const SkSL::Type* get_type_from_type_constant(TypeConstant tc) {
+ const Context& context = ThreadContext::Context();
+ switch (tc) {
+ case kBool_Type:
+ return context.fTypes.fBool.get();
+ case kBool2_Type:
+ return context.fTypes.fBool2.get();
+ case kBool3_Type:
+ return context.fTypes.fBool3.get();
+ case kBool4_Type:
+ return context.fTypes.fBool4.get();
+ case kHalf_Type:
+ return context.fTypes.fHalf.get();
+ case kHalf2_Type:
+ return context.fTypes.fHalf2.get();
+ case kHalf3_Type:
+ return context.fTypes.fHalf3.get();
+ case kHalf4_Type:
+ return context.fTypes.fHalf4.get();
+ case kHalf2x2_Type:
+ return context.fTypes.fHalf2x2.get();
+ case kHalf3x2_Type:
+ return context.fTypes.fHalf3x2.get();
+ case kHalf4x2_Type:
+ return context.fTypes.fHalf4x2.get();
+ case kHalf2x3_Type:
+ return context.fTypes.fHalf2x3.get();
+ case kHalf3x3_Type:
+ return context.fTypes.fHalf3x3.get();
+ case kHalf4x3_Type:
+ return context.fTypes.fHalf4x3.get();
+ case kHalf2x4_Type:
+ return context.fTypes.fHalf2x4.get();
+ case kHalf3x4_Type:
+ return context.fTypes.fHalf3x4.get();
+ case kHalf4x4_Type:
+ return context.fTypes.fHalf4x4.get();
+ case kFloat_Type:
+ return context.fTypes.fFloat.get();
+ case kFloat2_Type:
+ return context.fTypes.fFloat2.get();
+ case kFloat3_Type:
+ return context.fTypes.fFloat3.get();
+ case kFloat4_Type:
+ return context.fTypes.fFloat4.get();
+ case kFloat2x2_Type:
+ return context.fTypes.fFloat2x2.get();
+ case kFloat3x2_Type:
+ return context.fTypes.fFloat3x2.get();
+ case kFloat4x2_Type:
+ return context.fTypes.fFloat4x2.get();
+ case kFloat2x3_Type:
+ return context.fTypes.fFloat2x3.get();
+ case kFloat3x3_Type:
+ return context.fTypes.fFloat3x3.get();
+ case kFloat4x3_Type:
+ return context.fTypes.fFloat4x3.get();
+ case kFloat2x4_Type:
+ return context.fTypes.fFloat2x4.get();
+ case kFloat3x4_Type:
+ return context.fTypes.fFloat3x4.get();
+ case kFloat4x4_Type:
+ return context.fTypes.fFloat4x4.get();
+ case kInt_Type:
+ return context.fTypes.fInt.get();
+ case kInt2_Type:
+ return context.fTypes.fInt2.get();
+ case kInt3_Type:
+ return context.fTypes.fInt3.get();
+ case kInt4_Type:
+ return context.fTypes.fInt4.get();
+ case kShader_Type:
+ return context.fTypes.fShader.get();
+ case kShort_Type:
+ return context.fTypes.fShort.get();
+ case kShort2_Type:
+ return context.fTypes.fShort2.get();
+ case kShort3_Type:
+ return context.fTypes.fShort3.get();
+ case kShort4_Type:
+ return context.fTypes.fShort4.get();
+ case kUInt_Type:
+ return context.fTypes.fUInt.get();
+ case kUInt2_Type:
+ return context.fTypes.fUInt2.get();
+ case kUInt3_Type:
+ return context.fTypes.fUInt3.get();
+ case kUInt4_Type:
+ return context.fTypes.fUInt4.get();
+ case kUShort_Type:
+ return context.fTypes.fUShort.get();
+ case kUShort2_Type:
+ return context.fTypes.fUShort2.get();
+ case kUShort3_Type:
+ return context.fTypes.fUShort3.get();
+ case kUShort4_Type:
+ return context.fTypes.fUShort4.get();
+ case kVoid_Type:
+ return context.fTypes.fVoid.get();
+ case kPoison_Type:
+ return context.fTypes.fPoison.get();
+ default:
+ SkUNREACHABLE;
+ }
+}
+
+DSLType::DSLType(TypeConstant tc, Position pos)
+ : fSkSLType(verify_type(ThreadContext::Context(),
+ get_type_from_type_constant(tc),
+ /*allowGenericTypes=*/false,
+ pos)) {}
+
+DSLType::DSLType(std::string_view name, Position pos)
+ : fSkSLType(find_type(ThreadContext::Context(), pos, name)) {}
+
+DSLType::DSLType(std::string_view name, DSLModifiers* modifiers, Position pos)
+ : fSkSLType(find_type(ThreadContext::Context(),
+ pos,
+ name,
+ modifiers->fPosition,
+ &modifiers->fModifiers)) {}
+
+DSLType::DSLType(const SkSL::Type* type, Position pos)
+ : fSkSLType(verify_type(ThreadContext::Context(), type, /*allowGenericTypes=*/true, pos)) {}
+
+DSLType DSLType::Invalid() {
+ return DSLType(ThreadContext::Context().fTypes.fInvalid.get(), Position());
+}
+
+bool DSLType::isBoolean() const {
+ return this->skslType().isBoolean();
+}
+
+bool DSLType::isNumber() const {
+ return this->skslType().isNumber();
+}
+
+bool DSLType::isFloat() const {
+ return this->skslType().isFloat();
+}
+
+bool DSLType::isSigned() const {
+ return this->skslType().isSigned();
+}
+
+bool DSLType::isUnsigned() const {
+ return this->skslType().isUnsigned();
+}
+
+bool DSLType::isInteger() const {
+ return this->skslType().isInteger();
+}
+
+bool DSLType::isScalar() const {
+ return this->skslType().isScalar();
+}
+
+bool DSLType::isVector() const {
+ return this->skslType().isVector();
+}
+
+bool DSLType::isMatrix() const {
+ return this->skslType().isMatrix();
+}
+
+bool DSLType::isArray() const {
+ return this->skslType().isArray();
+}
+
+bool DSLType::isStruct() const {
+ return this->skslType().isStruct();
+}
+
+bool DSLType::isInterfaceBlock() const {
+ return this->skslType().isInterfaceBlock();
+}
+
+bool DSLType::isEffectChild() const {
+ return this->skslType().isEffectChild();
+}
+
+DSLExpression DSLType::Construct(DSLType type, SkSpan<DSLExpression> argArray) {
+ SkSL::ExpressionArray skslArgs;
+ skslArgs.reserve_back(argArray.size());
+
+ for (DSLExpression& arg : argArray) {
+ if (!arg.hasValue()) {
+ return DSLExpression();
+ }
+ skslArgs.push_back(arg.release());
+ }
+ return DSLExpression(SkSL::Constructor::Convert(ThreadContext::Context(), Position(),
+ type.skslType(), std::move(skslArgs)));
+}
+
+DSLType Array(const DSLType& base, int count, Position pos) {
+ count = base.skslType().convertArraySize(ThreadContext::Context(), pos,
+ DSLExpression(count, pos).release());
+ if (!count) {
+ return DSLType(kPoison_Type);
+ }
+ return DSLType(ThreadContext::SymbolTable()->addArrayDimension(&base.skslType(), count), pos);
+}
+
+DSLType UnsizedArray(const DSLType& base, Position pos) {
+ if (!base.skslType().checkIfUsableInArray(ThreadContext::Context(), pos)) {
+ return DSLType(kPoison_Type);
+ }
+ return ThreadContext::SymbolTable()->addArrayDimension(&base.skslType(),
+ SkSL::Type::kUnsizedArray);
+}
+
+DSLType StructType(std::string_view name,
+ SkSpan<DSLField> fields,
+ bool interfaceBlock,
+ Position pos) {
+ std::vector<SkSL::Type::Field> skslFields;
+ skslFields.reserve(fields.size());
+ for (const DSLField& field : fields) {
+ skslFields.emplace_back(field.fPosition, field.fModifiers.fModifiers, field.fName,
+ &field.fType.skslType());
+ }
+ auto newType = SkSL::Type::MakeStructType(ThreadContext::Context(), pos, name,
+ std::move(skslFields), interfaceBlock);
+ return DSLType(ThreadContext::SymbolTable()->add(std::move(newType)), pos);
+}
+
+DSLType Struct(std::string_view name, SkSpan<DSLField> fields, Position pos) {
+ DSLType result = StructType(name, fields, /*interfaceBlock=*/false, pos);
+ ThreadContext::ProgramElements().push_back(
+ std::make_unique<SkSL::StructDefinition>(pos, result.skslType()));
+ return result;
+}
+
+} // namespace dsl
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/dsl/DSLVar.cpp b/gfx/skia/skia/src/sksl/dsl/DSLVar.cpp
new file mode 100644
index 0000000000..299aa27a11
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/dsl/DSLVar.cpp
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/sksl/DSLVar.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLStatement.h"
+#include "include/private/SkSLSymbol.h"
+#include "include/sksl/DSLModifiers.h"
+#include "include/sksl/DSLType.h"
+#include "include/sksl/SkSLOperator.h"
+#include "src/sksl/SkSLThreadContext.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLFunctionCall.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLVariable.h"
+
+#include <utility>
+
+namespace SkSL {
+
+namespace dsl {
+
+/**
+ * DSLVarBase
+ */
+
+DSLVarBase::DSLVarBase(VariableStorage storage, DSLType type, std::string_view name,
+ DSLExpression initialValue, Position pos, Position namePos)
+ : DSLVarBase(storage, DSLModifiers(), std::move(type), name, std::move(initialValue),
+ pos, namePos) {}
+
+DSLVarBase::DSLVarBase(VariableStorage storage, const DSLModifiers& modifiers, DSLType type,
+ std::string_view name, DSLExpression initialValue, Position pos,
+ Position namePos)
+ : fModifiers(std::move(modifiers))
+ , fType(std::move(type))
+ , fNamePosition(namePos)
+ , fName(name)
+ , fInitialValue(std::move(initialValue))
+ , fPosition(pos)
+ , fStorage(storage) {}
+
+void DSLVarBase::swap(DSLVarBase& other) {
+ SkASSERT(this->storage() == other.storage());
+ std::swap(fModifiers, other.fModifiers);
+ std::swap(fType, other.fType);
+ std::swap(fDeclaration, other.fDeclaration);
+ std::swap(fVar, other.fVar);
+ std::swap(fNamePosition, other.fNamePosition);
+ std::swap(fName, other.fName);
+ std::swap(fInitialValue.fExpression, other.fInitialValue.fExpression);
+ std::swap(fInitialized, other.fInitialized);
+ std::swap(fPosition, other.fPosition);
+}
+
+DSLExpression DSLVarBase::operator[](DSLExpression&& index) {
+ return DSLExpression(*this)[std::move(index)];
+}
+
+DSLExpression DSLVarBase::assignExpression(DSLExpression expr) {
+ return DSLExpression(BinaryExpression::Convert(ThreadContext::Context(), Position(),
+ DSLExpression(*this, Position()).release(), SkSL::Operator::Kind::EQ,
+ expr.release()));
+}
+
+/**
+ * DSLVar
+ */
+
+DSLVar::DSLVar() : DSLVarBase(SkSL::VariableStorage::kLocal) {}
+
+DSLVar::DSLVar(DSLType type, std::string_view name, DSLExpression initialValue,
+ Position pos, Position namePos)
+ : INHERITED(SkSL::VariableStorage::kLocal, type, name, std::move(initialValue),
+ pos, namePos) {}
+
+DSLVar::DSLVar(const DSLModifiers& modifiers, DSLType type, std::string_view name,
+ DSLExpression initialValue, Position pos, Position namePos)
+ : INHERITED(SkSL::VariableStorage::kLocal, modifiers, type, name, std::move(initialValue),
+ pos, namePos) {}
+
+void DSLVar::swap(DSLVar& other) {
+ INHERITED::swap(other);
+}
+
+/**
+ * DSLGlobalVar
+ */
+
+DSLGlobalVar::DSLGlobalVar() : DSLVarBase(SkSL::VariableStorage::kGlobal) {}
+
+DSLGlobalVar::DSLGlobalVar(DSLType type, std::string_view name, DSLExpression initialValue,
+ Position pos, Position namePos)
+ : INHERITED(SkSL::VariableStorage::kGlobal, type, name, std::move(initialValue),
+ pos, namePos) {}
+
+DSLGlobalVar::DSLGlobalVar(const DSLModifiers& modifiers, DSLType type, std::string_view name,
+ DSLExpression initialValue, Position pos, Position namePos)
+ : INHERITED(SkSL::VariableStorage::kGlobal, modifiers, type, name, std::move(initialValue),
+ pos, namePos) {}
+
+DSLGlobalVar::DSLGlobalVar(const char* name)
+ : INHERITED(SkSL::VariableStorage::kGlobal, kVoid_Type, name, DSLExpression(),
+ Position(), Position()) {
+ fName = name;
+ SkSL::SymbolTable* symbolTable = ThreadContext::SymbolTable().get();
+ SkSL::Symbol* result = symbolTable->findMutable(fName);
+ SkASSERTF(result, "could not find '%.*s' in symbol table", (int)fName.length(), fName.data());
+ fVar = &result->as<SkSL::Variable>();
+ fInitialized = true;
+}
+
+void DSLGlobalVar::swap(DSLGlobalVar& other) {
+ INHERITED::swap(other);
+}
+
+std::unique_ptr<SkSL::Expression> DSLGlobalVar::methodCall(std::string_view methodName,
+ Position pos) {
+ if (!this->fType.isEffectChild()) {
+ ThreadContext::ReportError("type does not support method calls", pos);
+ return nullptr;
+ }
+ return FieldAccess::Convert(ThreadContext::Context(), pos, *ThreadContext::SymbolTable(),
+ DSLExpression(*this, pos).release(), methodName);
+}
+
+DSLExpression DSLGlobalVar::eval(ExpressionArray args, Position pos) {
+ auto method = this->methodCall("eval", pos);
+ return DSLExpression(
+ method ? SkSL::FunctionCall::Convert(ThreadContext::Context(), pos, std::move(method),
+ std::move(args))
+ : nullptr,
+ pos);
+}
+
+DSLExpression DSLGlobalVar::eval(DSLExpression x, Position pos) {
+ ExpressionArray converted;
+ converted.push_back(x.release());
+ return this->eval(std::move(converted), pos);
+}
+
+DSLExpression DSLGlobalVar::eval(DSLExpression x, DSLExpression y, Position pos) {
+ ExpressionArray converted;
+ converted.push_back(x.release());
+ converted.push_back(y.release());
+ return this->eval(std::move(converted), pos);
+}
+
+/**
+ * DSLParameter
+ */
+
+DSLParameter::DSLParameter() : DSLVarBase(SkSL::VariableStorage::kParameter) {}
+
+DSLParameter::DSLParameter(DSLType type, std::string_view name, Position pos, Position namePos)
+ : INHERITED(SkSL::VariableStorage::kParameter, type, name, DSLExpression(), pos, namePos) {}
+
+DSLParameter::DSLParameter(const DSLModifiers& modifiers, DSLType type, std::string_view name,
+ Position pos, Position namePos)
+ : INHERITED(SkSL::VariableStorage::kParameter, modifiers, type, name, DSLExpression(),
+ pos, namePos) {}
+
+void DSLParameter::swap(DSLParameter& other) {
+ INHERITED::swap(other);
+}
+
+} // namespace dsl
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/dsl/priv/DSLWriter.cpp b/gfx/skia/skia/src/sksl/dsl/priv/DSLWriter.cpp
new file mode 100644
index 0000000000..9885db21f8
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/dsl/priv/DSLWriter.cpp
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/dsl/priv/DSLWriter.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLStatement.h"
+#include "include/sksl/DSLCore.h"
+#include "include/sksl/DSLExpression.h"
+#include "include/sksl/DSLModifiers.h"
+#include "include/sksl/DSLStatement.h"
+#include "include/sksl/DSLType.h"
+#include "include/sksl/DSLVar.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/SkSLModifiersPool.h"
+#include "src/sksl/SkSLThreadContext.h"
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLNop.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVariable.h"
+
+#include <utility>
+#include <vector>
+
+namespace SkSL {
+
+namespace dsl {
+
+SkSL::Variable* DSLWriter::Var(DSLVarBase& var) {
+ // fInitialized is true if we have attempted to create a var, whether or not we actually
+ // succeeded. If it's true, we don't want to try again, to avoid reporting the same error
+ // multiple times.
+ if (!var.fInitialized) {
+ // We haven't even attempted to create a var yet, so fVar ought to be null
+ SkASSERT(!var.fVar);
+ var.fInitialized = true;
+ if (var.storage() != SkSL::VariableStorage::kParameter) {
+ const SkSL::Type* baseType = &var.fType.skslType();
+ if (baseType->isArray()) {
+ baseType = &baseType->componentType();
+ }
+ }
+ std::unique_ptr<SkSL::Variable> skslvar = SkSL::Variable::Convert(ThreadContext::Context(),
+ var.fPosition,
+ var.fModifiers.fPosition,
+ var.fModifiers.fModifiers,
+ &var.fType.skslType(),
+ var.fNamePosition,
+ var.fName,
+ /*isArray=*/false,
+ /*arraySize=*/nullptr,
+ var.storage());
+ SkSL::Variable* varPtr = skslvar.get();
+ if (var.storage() != SkSL::VariableStorage::kParameter) {
+ var.fDeclaration = VarDeclaration::Convert(ThreadContext::Context(),
+ std::move(skslvar),
+ var.fInitialValue.releaseIfPossible(),
+ /*addToSymbolTable=*/false);
+ if (var.fDeclaration) {
+ var.fVar = varPtr;
+ var.fInitialized = true;
+ }
+ }
+ }
+ return var.fVar;
+}
+
+std::unique_ptr<SkSL::Variable> DSLWriter::CreateParameterVar(DSLParameter& var) {
+ // This should only be called on undeclared parameter variables, but we allow the creation to go
+ // ahead regardless so we don't have to worry about null pointers potentially sneaking in and
+ // breaking things. DSLFunction is responsible for reporting errors for invalid parameters.
+ return SkSL::Variable::Convert(ThreadContext::Context(),
+ var.fPosition,
+ var.fModifiers.fPosition,
+ var.fModifiers.fModifiers,
+ &var.fType.skslType(),
+ var.fNamePosition,
+ var.fName,
+ /*isArray=*/false,
+ /*arraySize=*/nullptr,
+ var.storage());
+}
+
+std::unique_ptr<SkSL::Statement> DSLWriter::Declaration(DSLVarBase& var) {
+ Var(var);
+ if (!var.fDeclaration) {
+ // We should have already reported an error before ending up here, just clean up the
+ // initial value so it doesn't assert and return a nop.
+ var.fInitialValue.releaseIfPossible();
+ return SkSL::Nop::Make();
+ }
+ return std::move(var.fDeclaration);
+}
+
+void DSLWriter::AddVarDeclaration(DSLStatement& existing, DSLVar& additional) {
+ if (existing.fStatement->is<Block>()) {
+ SkSL::Block& block = existing.fStatement->as<Block>();
+ SkASSERT(!block.isScope());
+ block.children().push_back(Declare(additional).release());
+ } else if (existing.fStatement->is<VarDeclaration>()) {
+ Position pos = existing.fStatement->fPosition;
+ StatementArray stmts;
+ stmts.reserve_back(2);
+ stmts.push_back(std::move(existing.fStatement));
+ stmts.push_back(Declare(additional).release());
+ existing.fStatement = SkSL::Block::Make(pos, std::move(stmts),
+ Block::Kind::kCompoundStatement);
+ } else if (existing.fStatement->isEmpty()) {
+ // If the variable declaration generated an error, we can end up with a Nop statement here.
+ existing.fStatement = Declare(additional).release();
+ }
+}
+
+void DSLWriter::Reset() {
+ SymbolTable::Pop(&ThreadContext::SymbolTable());
+ SymbolTable::Push(&ThreadContext::SymbolTable());
+ ThreadContext::ProgramElements().clear();
+ ThreadContext::GetModifiersPool()->clear();
+}
+
+} // namespace dsl
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/dsl/priv/DSLWriter.h b/gfx/skia/skia/src/sksl/dsl/priv/DSLWriter.h
new file mode 100644
index 0000000000..798c642eab
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/dsl/priv/DSLWriter.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DSLWRITER
+#define SKSL_DSLWRITER
+
+#include "include/core/SkTypes.h"
+
+#include <memory>
+
+namespace SkSL {
+
+class Variable;
+class Statement;
+
+namespace dsl {
+
+class DSLParameter;
+class DSLStatement;
+class DSLVarBase;
+class DSLVar;
+
+/**
+ * Various utility methods needed by DSL code.
+ */
+class DSLWriter {
+public:
+ /**
+ * Returns the SkSL variable corresponding to a DSL var.
+ */
+ static SkSL::Variable* Var(DSLVarBase& var);
+
+ /**
+ * Creates an SkSL variable corresponding to a DSLParameter.
+ */
+ static std::unique_ptr<SkSL::Variable> CreateParameterVar(DSLParameter& var);
+
+ /**
+ * Returns the SkSL declaration corresponding to a DSLVar.
+ */
+ static std::unique_ptr<SkSL::Statement> Declaration(DSLVarBase& var);
+
+ /**
+ * Adds a new declaration into an existing declaration statement. This either turns the original
+ * declaration into an unscoped block or, if it already was, appends a new statement to the end
+ * of it.
+ */
+ static void AddVarDeclaration(DSLStatement& existing, DSLVar& additional);
+
+ /**
+ * Clears any elements or symbols which have been output.
+ */
+ static void Reset();
+};
+
+} // namespace dsl
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/dsl/priv/DSL_priv.h b/gfx/skia/skia/src/sksl/dsl/priv/DSL_priv.h
new file mode 100644
index 0000000000..4967291e7e
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/dsl/priv/DSL_priv.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DSL_PRIV
+#define SKSL_DSL_PRIV
+
+#include "include/private/SkSLProgramKind.h"
+
+namespace SkSL {
+
+class Compiler;
+struct ProgramSettings;
+
+namespace dsl {
+
+/**
+ * Initializes the DSL for compiling modules (SkSL include files).
+ */
+void StartModule(SkSL::Compiler* compiler,
+ SkSL::ProgramKind kind,
+ const SkSL::ProgramSettings& settings,
+ const SkSL::Module* parent);
+
+} // namespace dsl
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/generated/sksl_compute.minified.sksl b/gfx/skia/skia/src/sksl/generated/sksl_compute.minified.sksl
new file mode 100644
index 0000000000..f0322f9ae2
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/generated/sksl_compute.minified.sksl
@@ -0,0 +1,7 @@
+static constexpr char SKSL_MINIFIED_sksl_compute[] =
+"layout(builtin=24)in uint3 sk_NumWorkgroups;layout(builtin=26)in uint3 sk_WorkgroupID"
+";layout(builtin=27)in uint3 sk_LocalInvocationID;layout(builtin=28)in uint3"
+" sk_GlobalInvocationID;layout(builtin=29)in uint sk_LocalInvocationIndex;$pure"
+" half4 read($readableTexture2D,uint2);void write($writableTexture2D,uint2,half4"
+");$pure uint width($genTexture2D);$pure uint height($genTexture2D);void workgroupBarrier"
+"();void storageBarrier();";
diff --git a/gfx/skia/skia/src/sksl/generated/sksl_compute.unoptimized.sksl b/gfx/skia/skia/src/sksl/generated/sksl_compute.unoptimized.sksl
new file mode 100644
index 0000000000..7f7d211176
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/generated/sksl_compute.unoptimized.sksl
@@ -0,0 +1,7 @@
+static constexpr char SKSL_MINIFIED_sksl_compute[] =
+"layout(builtin=24)in uint3 sk_NumWorkgroups;layout(builtin=26)in uint3 sk_WorkgroupID"
+";layout(builtin=27)in uint3 sk_LocalInvocationID;layout(builtin=28)in uint3"
+" sk_GlobalInvocationID;layout(builtin=29)in uint sk_LocalInvocationIndex;$pure"
+" half4 read($readableTexture2D t,uint2 pos);void write($writableTexture2D t"
+",uint2 pos,half4 color);$pure uint width($genTexture2D t);$pure uint height"
+"($genTexture2D t);void workgroupBarrier();void storageBarrier();";
diff --git a/gfx/skia/skia/src/sksl/generated/sksl_frag.minified.sksl b/gfx/skia/skia/src/sksl/generated/sksl_frag.minified.sksl
new file mode 100644
index 0000000000..f0d4a792d0
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/generated/sksl_frag.minified.sksl
@@ -0,0 +1,5 @@
+static constexpr char SKSL_MINIFIED_sksl_frag[] =
+"layout(builtin=15)in float4 sk_FragCoord;layout(builtin=17)in bool sk_Clockwise"
+";layout(location=0,index=0,builtin=10001)out half4 sk_FragColor;layout(builtin"
+"=10008)half4 sk_LastFragColor;layout(builtin=10012)out half4 sk_SecondaryFragColor"
+";";
diff --git a/gfx/skia/skia/src/sksl/generated/sksl_frag.unoptimized.sksl b/gfx/skia/skia/src/sksl/generated/sksl_frag.unoptimized.sksl
new file mode 100644
index 0000000000..f0d4a792d0
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/generated/sksl_frag.unoptimized.sksl
@@ -0,0 +1,5 @@
+static constexpr char SKSL_MINIFIED_sksl_frag[] =
+"layout(builtin=15)in float4 sk_FragCoord;layout(builtin=17)in bool sk_Clockwise"
+";layout(location=0,index=0,builtin=10001)out half4 sk_FragColor;layout(builtin"
+"=10008)half4 sk_LastFragColor;layout(builtin=10012)out half4 sk_SecondaryFragColor"
+";";
diff --git a/gfx/skia/skia/src/sksl/generated/sksl_gpu.minified.sksl b/gfx/skia/skia/src/sksl/generated/sksl_gpu.minified.sksl
new file mode 100644
index 0000000000..beb7f44c00
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/generated/sksl_gpu.minified.sksl
@@ -0,0 +1,85 @@
+static constexpr char SKSL_MINIFIED_sksl_gpu[] =
+"$pure $genIType mix($genIType,$genIType,$genBType);$pure $genBType mix($genBType"
+",$genBType,$genBType);$pure $genType fma($genType,$genType,$genType);$pure $genHType"
+" fma($genHType,$genHType,$genHType);$genType frexp($genType,out $genIType);"
+"$genHType frexp($genHType,out $genIType);$pure $genType ldexp($genType,$genIType"
+");$pure $genHType ldexp($genHType,$genIType);$pure uint packSnorm2x16(float2"
+");$pure uint packUnorm4x8(float4);$pure uint packSnorm4x8(float4);$pure float2"
+" unpackSnorm2x16(uint);$pure float4 unpackUnorm4x8(uint);$pure float4 unpackSnorm4x8"
+"(uint);$pure uint packHalf2x16(float2);$pure float2 unpackHalf2x16(uint);$pure"
+" $genIType bitCount($genIType);$pure $genIType bitCount($genUType);$pure $genIType"
+" findLSB($genIType);$pure $genIType findLSB($genUType);$pure $genIType findMSB"
+"($genIType);$pure $genIType findMSB($genUType);$pure sampler2D makeSampler2D"
+"(texture2D,sampler);$pure half4 sample(sampler2D,float2);$pure half4 sample"
+"(sampler2D,float3);$pure half4 sample(sampler2D,float3,float);$pure half4 sample"
+"(samplerExternalOES,float2);$pure half4 sample(samplerExternalOES,float2,float"
+");$pure half4 sample(sampler2DRect,float2);$pure half4 sample(sampler2DRect"
+",float3);$pure half4 sampleLod(sampler2D,float2,float);$pure half4 sampleLod"
+"(sampler2D,float3,float);$pure half4 sampleGrad(sampler2D,float2,float2,float2"
+");$pure half4 subpassLoad(subpassInput);$pure half4 subpassLoad(subpassInputMS"
+",int);$pure uint atomicLoad(atomicUint);void atomicStore(atomicUint,uint);uint"
+" atomicAdd(atomicUint,uint);$pure half4 blend_clear(half4 a,half4 b){return"
+" half4(0.);}$pure half4 blend_src(half4 a,half4 b){return a;}$pure half4 blend_dst"
+"(half4 a,half4 b){return b;}$pure half4 blend_src_over(half4 a,half4 b){return"
+" a+(1.-a.w)*b;}$pure half4 blend_dst_over(half4 a,half4 b){return(1.-b.w)*a"
+"+b;}$pure half4 blend_src_in(half4 a,half4 b){return a*b.w;}$pure half4 blend_dst_in"
+"(half4 a,half4 b){return b*a.w;}$pure half4 blend_src_out(half4 a,half4 b){"
+"return(1.-b.w)*a;}$pure half4 blend_dst_out(half4 a,half4 b){return(1.-a.w)"
+"*b;}$pure half4 blend_src_atop(half4 a,half4 b){return b.w*a+(1.-a.w)*b;}$pure"
+" half4 blend_dst_atop(half4 a,half4 b){return(1.-b.w)*a+a.w*b;}$pure half4 blend_xor"
+"(half4 a,half4 b){return(1.-b.w)*a+(1.-a.w)*b;}$pure half4 blend_plus(half4"
+" a,half4 b){return min(a+b,1.);}$pure half4 blend_porter_duff(half4 a,half4"
+" b,half4 c){half2 d=a.xy+a.zw*(half2(c.w,b.w)+min(a.zw,0.));return min(half4"
+"(1.),b*d.x+c*d.y);}$pure half4 blend_modulate(half4 a,half4 b){return a*b;}"
+"$pure half4 blend_screen(half4 a,half4 b){return a+(1.-a)*b;}$pure half $b("
+"half2 a,half2 b){return 2.*b.x<=b.y?(2.*a.x)*b.x:a.y*b.y-(2.*(b.y-b.x))*(a."
+"y-a.x);}$pure half4 blend_overlay(half4 a,half4 b){half4 c=half4($b(a.xw,b."
+"xw),$b(a.yw,b.yw),$b(a.zw,b.zw),a.w+(1.-a.w)*b.w);c.xyz+=b.xyz*(1.-a.w)+a.xyz"
+"*(1.-b.w);return c;}$pure half4 blend_overlay(half c,half4 d,half4 e){return"
+" blend_overlay(bool(c)?e:d,bool(c)?d:e);}$pure half4 blend_lighten(half4 a,"
+"half4 b){half4 c=blend_src_over(a,b);c.xyz=max(c.xyz,(1.-b.w)*a.xyz+b.xyz);"
+"return c;}$pure half4 blend_darken(half c,half4 d,half4 e){half4 f=blend_src_over"
+"(d,e);half3 g=(1.-e.w)*d.xyz+e.xyz;f.xyz=c*min(f.xyz*c,g*c);return f;}$pure"
+" half4 blend_darken(half4 a,half4 b){return blend_darken(1.,a,b);}const half"
+" $kGuardedDivideEpsilon=half(sk_Caps.mustGuardDivisionEvenAfterExplicitZeroCheck"
+"?1e-08:0.);$pure inline half $c(half a,half b){return a/(b+$kGuardedDivideEpsilon"
+");}$pure inline half3 $c(half3 a,half b){return a/(b+$kGuardedDivideEpsilon"
+");}$pure half $d(half2 a,half2 b){if(b.x==0.){return a.x*(1.-b.y);}else{half"
+" c=a.y-a.x;if(c==0.){return(a.y*b.y+a.x*(1.-b.y))+b.x*(1.-a.y);}else{c=min("
+"b.y,$c(b.x*a.y,c));return(c*a.y+a.x*(1.-b.y))+b.x*(1.-a.y);}}}$pure half4 blend_color_dodge"
+"(half4 a,half4 b){return half4($d(a.xw,b.xw),$d(a.yw,b.yw),$d(a.zw,b.zw),a."
+"w+(1.-a.w)*b.w);}$pure half $e(half2 a,half2 b){if(b.y==b.x){return(a.y*b.y"
+"+a.x*(1.-b.y))+b.x*(1.-a.y);}else if(a.x==0.){return b.x*(1.-a.y);}else{half"
+" c=max(0.,b.y-$c((b.y-b.x)*a.y,a.x));return(c*a.y+a.x*(1.-b.y))+b.x*(1.-a.y"
+");}}$pure half4 blend_color_burn(half4 a,half4 b){return half4($e(a.xw,b.xw"
+"),$e(a.yw,b.yw),$e(a.zw,b.zw),a.w+(1.-a.w)*b.w);}$pure half4 blend_hard_light"
+"(half4 a,half4 b){return blend_overlay(b,a);}$pure half $f(half2 a,half2 b)"
+"{if(2.*a.x<=a.y){return($c((b.x*b.x)*(a.y-2.*a.x),b.y)+(1.-b.y)*a.x)+b.x*(("
+"-a.y+2.*a.x)+1.);}else if(4.*b.x<=b.y){half c=b.x*b.x;half e=c*b.x;half f=b"
+".y*b.y;half g=f*b.y;return $c(((f*(a.x-b.x*((3.*a.y-6.*a.x)-1.))+((12.*b.y)"
+"*c)*(a.y-2.*a.x))-(16.*e)*(a.y-2.*a.x))-g*a.x,f);}else{return((b.x*((a.y-2."
+"*a.x)+1.)+a.x)-sqrt(b.y*b.x)*(a.y-2.*a.x))-b.y*a.x;}}$pure half4 blend_soft_light"
+"(half4 a,half4 b){return b.w==0.?a:half4($f(a.xw,b.xw),$f(a.yw,b.yw),$f(a.zw"
+",b.zw),a.w+(1.-a.w)*b.w);}$pure half4 blend_difference(half4 a,half4 b){return"
+" half4((a.xyz+b.xyz)-2.*min(a.xyz*b.w,b.xyz*a.w),a.w+(1.-a.w)*b.w);}$pure half4"
+" blend_exclusion(half4 a,half4 b){return half4((b.xyz+a.xyz)-(2.*b.xyz)*a.xyz"
+",a.w+(1.-a.w)*b.w);}$pure half4 blend_multiply(half4 a,half4 b){return half4"
+"(((1.-a.w)*b.xyz+(1.-b.w)*a.xyz)+a.xyz*b.xyz,a.w+(1.-a.w)*b.w);}$pure half $g"
+"(half3 a){return dot(half3(.3,.59,.11),a);}$pure half3 $h(half3 a,half b,half3"
+" c){half d=$g(c);half3 e=(d-$g(a))+a;half f=min(min(e.x,e.y),e.z);half g=max"
+"(max(e.x,e.y),e.z);if(f<0.&&d!=f){e=d+(e-d)*$c(d,d-f);}if(g>b&&g!=d){e=d+$c"
+"((e-d)*(b-d),g-d);}return e;}$pure half $i(half3 a){return max(max(a.x,a.y)"
+",a.z)-min(min(a.x,a.y),a.z);}$pure half3 $j(half3 a,half3 b){half c=min(min"
+"(a.x,a.y),a.z);half d=max(max(a.x,a.y),a.z);return d>c?((a-c)*$i(b))/(d-c):"
+"half3(0.);}$pure half4 blend_hslc(half2 a,half4 b,half4 c){half d=c.w*b.w;half3"
+" e=b.xyz*c.w;half3 f=c.xyz*b.w;half3 g=bool(a.x)?f:e;half3 h=bool(a.x)?e:f;"
+"if(bool(a.y)){g=$j(g,h);h=f;}return half4(((($h(g,d,h)+c.xyz)-f)+b.xyz)-e,("
+"b.w+c.w)-d);}$pure half4 blend_hue(half4 a,half4 b){return blend_hslc(half2"
+"(0.,1.),a,b);}$pure half4 blend_saturation(half4 a,half4 b){return blend_hslc"
+"(half2(1.),a,b);}$pure half4 blend_color(half4 a,half4 b){return blend_hslc"
+"(half2(0.),a,b);}$pure half4 blend_luminosity(half4 a,half4 b){return blend_hslc"
+"(half2(1.,0.),a,b);}$pure float2 proj(float3 a){return a.xy/a.z;}$pure float"
+" cross_length_2d(float2 c,float2 d){return determinant(float2x2(c,d));}$pure"
+" half cross_length_2d(half2 c,half2 d){return determinant(half2x2(c,d));}$pure"
+" float2 perp(float2 a){return float2(-a.y,a.x);}$pure half2 perp(half2 a){return"
+" half2(-a.y,a.x);}$pure float coverage_bias(float a){return 1.-.5*a;}";
diff --git a/gfx/skia/skia/src/sksl/generated/sksl_gpu.unoptimized.sksl b/gfx/skia/skia/src/sksl/generated/sksl_gpu.unoptimized.sksl
new file mode 100644
index 0000000000..0b09c7d461
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/generated/sksl_gpu.unoptimized.sksl
@@ -0,0 +1,107 @@
+static constexpr char SKSL_MINIFIED_sksl_gpu[] =
+"$pure $genIType mix($genIType x,$genIType y,$genBType a);$pure $genBType mix"
+"($genBType x,$genBType y,$genBType a);$pure $genType fma($genType a,$genType"
+" b,$genType c);$pure $genHType fma($genHType a,$genHType b,$genHType c);$genType"
+" frexp($genType x,out $genIType exp);$genHType frexp($genHType x,out $genIType"
+" exp);$pure $genType ldexp($genType x,$genIType exp);$pure $genHType ldexp("
+"$genHType x,$genIType exp);$pure uint packSnorm2x16(float2 v);$pure uint packUnorm4x8"
+"(float4 v);$pure uint packSnorm4x8(float4 v);$pure float2 unpackSnorm2x16(uint"
+" p);$pure float4 unpackUnorm4x8(uint p);$pure float4 unpackSnorm4x8(uint p)"
+";$pure uint packHalf2x16(float2 v);$pure float2 unpackHalf2x16(uint v);$pure"
+" $genIType bitCount($genIType value);$pure $genIType bitCount($genUType value"
+");$pure $genIType findLSB($genIType value);$pure $genIType findLSB($genUType"
+" value);$pure $genIType findMSB($genIType value);$pure $genIType findMSB($genUType"
+" value);$pure sampler2D makeSampler2D(texture2D texture,sampler s);$pure half4"
+" sample(sampler2D s,float2 P);$pure half4 sample(sampler2D s,float3 P);$pure"
+" half4 sample(sampler2D s,float3 P,float bias);$pure half4 sample(samplerExternalOES"
+" s,float2 P);$pure half4 sample(samplerExternalOES s,float2 P,float bias);$pure"
+" half4 sample(sampler2DRect s,float2 P);$pure half4 sample(sampler2DRect s,"
+"float3 P);$pure half4 sampleLod(sampler2D s,float2 P,float lod);$pure half4"
+" sampleLod(sampler2D s,float3 P,float lod);$pure half4 sampleGrad(sampler2D"
+" s,float2,float2 dPdx,float2 dPdy);$pure half4 subpassLoad(subpassInput subpass"
+");$pure half4 subpassLoad(subpassInputMS subpass,int sample);$pure uint atomicLoad"
+"(atomicUint a);void atomicStore(atomicUint a,uint value);uint atomicAdd(atomicUint"
+" a,uint value);$pure half4 blend_clear(half4 src,half4 dst){return half4(0."
+");}$pure half4 blend_src(half4 src,half4 dst){return src;}$pure half4 blend_dst"
+"(half4 src,half4 dst){return dst;}$pure half4 blend_src_over(half4 src,half4"
+" dst){return src+(1.-src.w)*dst;}$pure half4 blend_dst_over(half4 src,half4"
+" dst){return(1.-dst.w)*src+dst;}$pure half4 blend_src_in(half4 src,half4 dst"
+"){return src*dst.w;}$pure half4 blend_dst_in(half4 src,half4 dst){return dst"
+"*src.w;}$pure half4 blend_src_out(half4 src,half4 dst){return(1.-dst.w)*src"
+";}$pure half4 blend_dst_out(half4 src,half4 dst){return(1.-src.w)*dst;}$pure"
+" half4 blend_src_atop(half4 src,half4 dst){return dst.w*src+(1.-src.w)*dst;"
+"}$pure half4 blend_dst_atop(half4 src,half4 dst){return(1.-dst.w)*src+src.w"
+"*dst;}$pure half4 blend_xor(half4 src,half4 dst){return(1.-dst.w)*src+(1.-src"
+".w)*dst;}$pure half4 blend_plus(half4 src,half4 dst){return min(src+dst,1.)"
+";}$pure half4 blend_porter_duff(half4 blendOp,half4 src,half4 dst){half2 coeff"
+"=blendOp.xy+blendOp.zw*(half2(dst.w,src.w)+min(blendOp.zw,0.));return min(half4"
+"(1.),src*coeff.x+dst*coeff.y);}$pure half4 blend_modulate(half4 src,half4 dst"
+"){return src*dst;}$pure half4 blend_screen(half4 src,half4 dst){return src+"
+"(1.-src)*dst;}$pure half $blend_overlay_component(half2 s,half2 d){return 2."
+"*d.x<=d.y?(2.*s.x)*d.x:s.y*d.y-(2.*(d.y-d.x))*(s.y-s.x);}$pure half4 blend_overlay"
+"(half4 src,half4 dst){half4 result=half4($blend_overlay_component(src.xw,dst"
+".xw),$blend_overlay_component(src.yw,dst.yw),$blend_overlay_component(src.zw"
+",dst.zw),src.w+(1.-src.w)*dst.w);result.xyz+=dst.xyz*(1.-src.w)+src.xyz*(1."
+"-dst.w);return result;}$pure half4 blend_overlay(half flip,half4 a,half4 b)"
+"{return blend_overlay(bool(flip)?b:a,bool(flip)?a:b);}$pure half4 blend_lighten"
+"(half4 src,half4 dst){half4 result=blend_src_over(src,dst);result.xyz=max(result"
+".xyz,(1.-dst.w)*src.xyz+dst.xyz);return result;}$pure half4 blend_darken(half"
+" mode,half4 src,half4 dst){half4 a=blend_src_over(src,dst);half3 b=(1.-dst."
+"w)*src.xyz+dst.xyz;a.xyz=mode*min(a.xyz*mode,b*mode);return a;}$pure half4 blend_darken"
+"(half4 src,half4 dst){return blend_darken(1.,src,dst);}const half $kGuardedDivideEpsilon"
+"=half(sk_Caps.mustGuardDivisionEvenAfterExplicitZeroCheck?1e-08:0.);$pure inline"
+" half $guarded_divide(half n,half d){return n/(d+$kGuardedDivideEpsilon);}$pure"
+" inline half3 $guarded_divide(half3 n,half d){return n/(d+$kGuardedDivideEpsilon"
+");}$pure half $color_dodge_component(half2 s,half2 d){if(d.x==0.){return s."
+"x*(1.-d.y);}else{half delta=s.y-s.x;if(delta==0.){return(s.y*d.y+s.x*(1.-d."
+"y))+d.x*(1.-s.y);}else{delta=min(d.y,$guarded_divide(d.x*s.y,delta));return"
+"(delta*s.y+s.x*(1.-d.y))+d.x*(1.-s.y);}}}$pure half4 blend_color_dodge(half4"
+" src,half4 dst){return half4($color_dodge_component(src.xw,dst.xw),$color_dodge_component"
+"(src.yw,dst.yw),$color_dodge_component(src.zw,dst.zw),src.w+(1.-src.w)*dst."
+"w);}$pure half $color_burn_component(half2 s,half2 d){if(d.y==d.x){return(s"
+".y*d.y+s.x*(1.-d.y))+d.x*(1.-s.y);}else if(s.x==0.){return d.x*(1.-s.y);}else"
+"{half delta=max(0.,d.y-$guarded_divide((d.y-d.x)*s.y,s.x));return(delta*s.y"
+"+s.x*(1.-d.y))+d.x*(1.-s.y);}}$pure half4 blend_color_burn(half4 src,half4 dst"
+"){return half4($color_burn_component(src.xw,dst.xw),$color_burn_component(src"
+".yw,dst.yw),$color_burn_component(src.zw,dst.zw),src.w+(1.-src.w)*dst.w);}$pure"
+" half4 blend_hard_light(half4 src,half4 dst){return blend_overlay(dst,src);"
+"}$pure half $soft_light_component(half2 s,half2 d){if(2.*s.x<=s.y){return($guarded_divide"
+"((d.x*d.x)*(s.y-2.*s.x),d.y)+(1.-d.y)*s.x)+d.x*((-s.y+2.*s.x)+1.);}else if("
+"4.*d.x<=d.y){half DSqd=d.x*d.x;half DCub=DSqd*d.x;half DaSqd=d.y*d.y;half DaCub"
+"=DaSqd*d.y;return $guarded_divide(((DaSqd*(s.x-d.x*((3.*s.y-6.*s.x)-1.))+(("
+"12.*d.y)*DSqd)*(s.y-2.*s.x))-(16.*DCub)*(s.y-2.*s.x))-DaCub*s.x,DaSqd);}else"
+"{return((d.x*((s.y-2.*s.x)+1.)+s.x)-sqrt(d.y*d.x)*(s.y-2.*s.x))-d.y*s.x;}}$pure"
+" half4 blend_soft_light(half4 src,half4 dst){return dst.w==0.?src:half4($soft_light_component"
+"(src.xw,dst.xw),$soft_light_component(src.yw,dst.yw),$soft_light_component("
+"src.zw,dst.zw),src.w+(1.-src.w)*dst.w);}$pure half4 blend_difference(half4 src"
+",half4 dst){return half4((src.xyz+dst.xyz)-2.*min(src.xyz*dst.w,dst.xyz*src"
+".w),src.w+(1.-src.w)*dst.w);}$pure half4 blend_exclusion(half4 src,half4 dst"
+"){return half4((dst.xyz+src.xyz)-(2.*dst.xyz)*src.xyz,src.w+(1.-src.w)*dst."
+"w);}$pure half4 blend_multiply(half4 src,half4 dst){return half4(((1.-src.w"
+")*dst.xyz+(1.-dst.w)*src.xyz)+src.xyz*dst.xyz,src.w+(1.-src.w)*dst.w);}$pure"
+" half $blend_color_luminance(half3 color){return dot(half3(.3,.59,.11),color"
+");}$pure half3 $blend_set_color_luminance(half3 hueSatColor,half alpha,half3"
+" lumColor){half lum=$blend_color_luminance(lumColor);half3 result=(lum-$blend_color_luminance"
+"(hueSatColor))+hueSatColor;half minComp=min(min(result.x,result.y),result.z"
+");half maxComp=max(max(result.x,result.y),result.z);if(minComp<0.&&lum!=minComp"
+"){result=lum+(result-lum)*$guarded_divide(lum,lum-minComp);}if(maxComp>alpha"
+"&&maxComp!=lum){result=lum+$guarded_divide((result-lum)*(alpha-lum),maxComp"
+"-lum);}return result;}$pure half $blend_color_saturation(half3 color){return"
+" max(max(color.x,color.y),color.z)-min(min(color.x,color.y),color.z);}$pure"
+" half3 $blend_set_color_saturation(half3 color,half3 satColor){half mn=min("
+"min(color.x,color.y),color.z);half mx=max(max(color.x,color.y),color.z);return"
+" mx>mn?((color-mn)*$blend_color_saturation(satColor))/(mx-mn):half3(0.);}$pure"
+" half4 blend_hslc(half2 flipSat,half4 src,half4 dst){half alpha=dst.w*src.w"
+";half3 sda=src.xyz*dst.w;half3 dsa=dst.xyz*src.w;half3 l=bool(flipSat.x)?dsa"
+":sda;half3 r=bool(flipSat.x)?sda:dsa;if(bool(flipSat.y)){l=$blend_set_color_saturation"
+"(l,r);r=dsa;}return half4(((($blend_set_color_luminance(l,alpha,r)+dst.xyz)"
+"-dsa)+src.xyz)-sda,(src.w+dst.w)-alpha);}$pure half4 blend_hue(half4 src,half4"
+" dst){return blend_hslc(half2(0.,1.),src,dst);}$pure half4 blend_saturation"
+"(half4 src,half4 dst){return blend_hslc(half2(1.),src,dst);}$pure half4 blend_color"
+"(half4 src,half4 dst){return blend_hslc(half2(0.),src,dst);}$pure half4 blend_luminosity"
+"(half4 src,half4 dst){return blend_hslc(half2(1.,0.),src,dst);}$pure float2"
+" proj(float3 p){return p.xy/p.z;}$pure float cross_length_2d(float2 a,float2"
+" b){return determinant(float2x2(a,b));}$pure half cross_length_2d(half2 a,half2"
+" b){return determinant(half2x2(a,b));}$pure float2 perp(float2 v){return float2"
+"(-v.y,v.x);}$pure half2 perp(half2 v){return half2(-v.y,v.x);}$pure float coverage_bias"
+"(float scale){return 1.-.5*scale;}";
diff --git a/gfx/skia/skia/src/sksl/generated/sksl_graphite_frag.dehydrated.sksl b/gfx/skia/skia/src/sksl/generated/sksl_graphite_frag.dehydrated.sksl
new file mode 100644
index 0000000000..93d4e47b8a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/generated/sksl_graphite_frag.dehydrated.sksl
@@ -0,0 +1,3119 @@
+static constexpr uint8_t SKSL_INCLUDE_sksl_graphite_frag[] = {14,0,27,6,
+8,115,107,95,101,114,114,111,114,
+5,104,97,108,102,52,
+5,99,111,108,111,114,
+14,115,107,95,112,97,115,115,116,104,114,111,117,103,104,
+10,99,111,108,111,114,80,97,114,97,109,
+6,102,108,111,97,116,52,
+15,115,107,95,115,111,108,105,100,95,115,104,97,100,101,114,
+2,116,109,
+3,105,110,116,
+1,102,
+5,102,108,111,97,116,
+3,108,111,119,
+4,104,105,103,104,
+5,36,116,105,108,101,
+6,99,111,111,114,100,115,
+6,102,108,111,97,116,50,
+6,115,117,98,115,101,116,
+3,116,109,88,
+3,116,109,89,
+8,105,109,103,87,105,100,116,104,
+9,105,109,103,72,101,105,103,104,116,
+1,115,
+9,115,97,109,112,108,101,114,50,68,
+15,115,107,95,105,109,97,103,101,95,115,104,97,100,101,114,
+8,116,105,108,101,77,111,100,101,
+1,116,
+10,36,116,105,108,101,95,103,114,97,100,
+11,99,111,108,111,114,115,80,97,114,97,109,
+12,111,102,102,115,101,116,115,80,97,114,97,109,
+16,36,99,111,108,111,114,105,122,101,95,103,114,97,100,95,52,
+16,36,99,111,108,111,114,105,122,101,95,103,114,97,100,95,56,
+11,112,111,105,110,116,48,80,97,114,97,109,
+11,112,111,105,110,116,49,80,97,114,97,109,
+3,112,111,115,
+19,36,108,105,110,101,97,114,95,103,114,97,100,95,108,97,121,111,117,116,
+11,99,101,110,116,101,114,80,97,114,97,109,
+11,114,97,100,105,117,115,80,97,114,97,109,
+19,36,114,97,100,105,97,108,95,103,114,97,100,95,108,97,121,111,117,116,
+9,98,105,97,115,80,97,114,97,109,
+10,115,99,97,108,101,80,97,114,97,109,
+18,36,115,119,101,101,112,95,103,114,97,100,95,108,97,121,111,117,116,
+2,112,48,
+2,112,49,
+14,36,109,97,112,95,116,111,95,117,110,105,116,95,120,
+8,102,108,111,97,116,51,120,51,
+12,114,97,100,105,117,115,48,80,97,114,97,109,
+12,114,97,100,105,117,115,49,80,97,114,97,109,
+20,36,99,111,110,105,99,97,108,95,103,114,97,100,95,108,97,121,111,117,116,
+23,115,107,95,108,105,110,101,97,114,95,103,114,97,100,95,52,95,115,104,97,100,101,114,
+23,115,107,95,108,105,110,101,97,114,95,103,114,97,100,95,56,95,115,104,97,100,101,114,
+23,115,107,95,114,97,100,105,97,108,95,103,114,97,100,95,52,95,115,104,97,100,101,114,
+23,115,107,95,114,97,100,105,97,108,95,103,114,97,100,95,56,95,115,104,97,100,101,114,
+22,115,107,95,115,119,101,101,112,95,103,114,97,100,95,52,95,115,104,97,100,101,114,
+22,115,107,95,115,119,101,101,112,95,103,114,97,100,95,56,95,115,104,97,100,101,114,
+24,115,107,95,99,111,110,105,99,97,108,95,103,114,97,100,95,52,95,115,104,97,100,101,114,
+24,115,107,95,99,111,110,105,99,97,108,95,103,114,97,100,95,56,95,115,104,97,100,101,114,
+7,99,111,108,111,114,73,110,
+1,109,
+8,102,108,111,97,116,52,120,52,
+1,118,
+6,105,110,72,83,76,65,
+21,115,107,95,109,97,116,114,105,120,95,99,111,108,111,114,102,105,108,116,101,114,
+9,98,108,101,110,100,77,111,100,101,
+3,115,114,99,
+3,100,115,116,
+8,115,107,95,98,108,101,110,100,
+15,115,107,95,98,108,101,110,100,95,115,104,97,100,101,114,
+8,100,115,116,67,111,108,111,114,
+8,115,114,99,67,111,108,111,114,
+20,115,107,95,98,108,101,110,100,95,99,111,108,111,114,102,105,108,116,101,114,
+7,105,110,67,111,108,111,114,
+20,115,107,95,116,97,98,108,101,95,99,111,108,111,114,102,105,108,116,101,114,
+23,115,107,95,103,97,117,115,115,105,97,110,95,99,111,108,111,114,102,105,108,116,101,114,
+4,104,97,108,102,
+6,107,67,108,97,109,112,
+7,107,82,101,112,101,97,116,
+13,107,77,105,114,114,111,114,82,101,112,101,97,116,
+5,99,108,97,109,112,
+6,108,101,110,103,116,104,
+3,109,111,100,
+7,108,101,110,103,116,104,50,
+3,116,109,112,
+3,109,105,120,
+4,115,116,101,112,
+10,105,110,115,101,116,67,108,97,109,112,
+10,99,108,97,109,112,101,100,80,111,115,
+5,102,108,111,111,114,
+4,99,101,105,108,
+6,115,97,109,112,108,101,
+5,102,114,97,99,116,
+3,116,95,49,
+26,109,117,115,116,68,111,79,112,66,101,116,119,101,101,110,70,108,111,111,114,65,110,100,65,98,115,
+3,97,98,115,
+11,36,105,110,116,76,105,116,101,114,97,108,
+5,100,101,108,116,97,
+3,100,111,116,
+8,100,105,115,116,97,110,99,101,
+5,97,110,103,108,101,
+28,97,116,97,110,50,73,109,112,108,101,109,101,110,116,101,100,65,115,65,116,97,110,89,79,118,101,114,88,
+4,97,116,97,110,
+7,105,110,118,101,114,115,101,
+19,83,75,95,83,99,97,108,97,114,78,101,97,114,108,121,90,101,114,111,
+7,100,67,101,110,116,101,114,
+7,100,82,97,100,105,117,115,
+6,114,97,100,105,97,108,
+4,98,111,111,108,
+5,115,116,114,105,112,
+5,115,99,97,108,101,
+9,115,99,97,108,101,83,105,103,110,
+4,98,105,97,115,
+2,112,116,
+4,115,105,103,110,
+9,116,114,97,110,115,102,111,114,109,
+1,114,
+3,114,95,50,
+6,102,108,111,97,116,51,
+4,115,113,114,116,
+9,105,115,83,119,97,112,112,101,100,
+2,67,102,
+6,115,99,97,108,101,88,
+6,115,99,97,108,101,89,
+2,114,49,
+15,105,115,70,111,99,97,108,79,110,67,105,114,99,108,101,
+5,105,110,118,82,49,
+11,100,82,97,100,105,117,115,83,105,103,110,
+13,105,115,87,101,108,108,66,101,104,97,118,101,100,
+3,120,95,116,
+5,116,109,112,80,116,
+4,116,101,109,112,
+8,99,111,108,111,114,79,117,116,
+11,36,114,103,98,95,116,111,95,104,115,108,
+8,117,110,112,114,101,109,117,108,
+11,36,104,115,108,95,116,111,95,114,103,98,
+8,115,97,116,117,114,97,116,101,
+11,98,108,101,110,100,95,99,108,101,97,114,
+9,98,108,101,110,100,95,115,114,99,
+9,98,108,101,110,100,95,100,115,116,
+17,98,108,101,110,100,95,112,111,114,116,101,114,95,100,117,102,102,
+14,98,108,101,110,100,95,109,111,100,117,108,97,116,101,
+12,98,108,101,110,100,95,115,99,114,101,101,110,
+13,98,108,101,110,100,95,111,118,101,114,108,97,121,
+12,98,108,101,110,100,95,100,97,114,107,101,110,
+17,98,108,101,110,100,95,99,111,108,111,114,95,100,111,100,103,101,
+16,98,108,101,110,100,95,99,111,108,111,114,95,98,117,114,110,
+16,98,108,101,110,100,95,115,111,102,116,95,108,105,103,104,116,
+16,98,108,101,110,100,95,100,105,102,102,101,114,101,110,99,101,
+15,98,108,101,110,100,95,101,120,99,108,117,115,105,111,110,
+14,98,108,101,110,100,95,109,117,108,116,105,112,108,121,
+10,98,108,101,110,100,95,104,115,108,99,
+5,104,97,108,102,50,
+6,102,97,99,116,111,114,
+3,101,120,112,
+52,1,139,0,
+28,1,0,
+39,
+16,0,64,0,0,2,0,0,
+51,255,255,11,0,
+54,2,0,
+17,17,0,
+51,255,255,11,0,3,
+28,3,0,
+39,
+16,0,64,0,0,23,0,1,
+51,2,0,
+51,255,255,11,0,
+54,4,0,
+17,38,0,
+51,255,255,49,0,3,
+28,5,0,
+39,
+16,0,64,0,0,56,0,1,
+51,4,0,
+51,255,255,11,0,
+54,6,0,
+17,72,0,
+51,255,255,75,0,3,
+54,7,0,
+17,79,0,
+51,255,255,81,0,3,
+54,8,0,
+17,87,0,
+51,255,255,81,0,3,
+54,9,0,
+17,91,0,
+51,255,255,81,0,3,
+28,10,0,
+39,
+16,0,64,0,0,96,0,4,
+51,6,0,
+51,7,0,
+51,8,0,
+51,9,0,
+51,255,255,81,0,
+54,11,0,
+17,102,0,
+51,255,255,109,0,3,
+54,12,0,
+17,116,0,
+51,255,255,49,0,3,
+54,13,0,
+17,123,0,
+51,255,255,75,0,3,
+54,14,0,
+17,127,0,
+51,255,255,75,0,3,
+54,15,0,
+17,131,0,
+51,255,255,75,0,3,
+54,16,0,
+17,140,0,
+51,255,255,75,0,3,
+54,17,0,
+17,150,0,
+51,255,255,152,0,3,
+28,18,0,
+39,
+16,0,64,0,0,162,0,7,
+51,11,0,
+51,12,0,
+51,13,0,
+51,14,0,
+51,15,0,
+51,16,0,
+51,17,0,
+51,255,255,11,0,
+54,19,0,
+17,178,0,
+51,255,255,75,0,3,
+54,20,0,
+17,187,0,
+51,255,255,109,0,3,
+28,21,0,
+39,
+16,0,64,0,0,189,0,2,
+51,19,0,
+51,20,0,
+51,255,255,109,0,
+0,22,0,
+51,255,255,49,0,4,
+0,23,0,
+51,255,255,81,0,4,
+54,24,0,
+17,200,0,
+51,22,0,3,
+54,25,0,
+17,212,0,
+51,23,0,3,
+54,26,0,
+17,187,0,
+51,255,255,109,0,3,
+28,27,0,
+39,
+16,0,64,0,0,225,0,3,
+51,24,0,
+51,25,0,
+51,26,0,
+51,255,255,11,0,
+0,28,0,
+51,255,255,49,0,8,
+0,29,0,
+51,255,255,81,0,8,
+54,30,0,
+17,200,0,
+51,28,0,3,
+54,31,0,
+17,212,0,
+51,29,0,3,
+54,32,0,
+17,187,0,
+51,255,255,109,0,3,
+28,33,0,
+39,
+16,0,64,0,0,242,0,3,
+51,30,0,
+51,31,0,
+51,32,0,
+51,255,255,11,0,
+54,34,0,
+17,3,1,
+51,255,255,109,0,3,
+54,35,0,
+17,15,1,
+51,255,255,109,0,3,
+54,36,0,
+17,27,1,
+51,255,255,109,0,3,
+28,37,0,
+39,
+16,0,64,0,0,31,1,3,
+51,34,0,
+51,35,0,
+51,36,0,
+51,255,255,109,0,
+54,38,0,
+17,51,1,
+51,255,255,109,0,3,
+54,39,0,
+17,63,1,
+51,255,255,81,0,3,
+54,40,0,
+17,27,1,
+51,255,255,109,0,3,
+28,41,0,
+39,
+16,0,64,0,0,75,1,3,
+51,38,0,
+51,39,0,
+51,40,0,
+51,255,255,109,0,
+54,42,0,
+17,51,1,
+51,255,255,109,0,3,
+54,43,0,
+17,95,1,
+51,255,255,81,0,3,
+54,44,0,
+17,105,1,
+51,255,255,81,0,3,
+54,45,0,
+17,27,1,
+51,255,255,109,0,3,
+28,46,0,
+39,
+16,0,64,0,0,116,1,4,
+51,42,0,
+51,43,0,
+51,44,0,
+51,45,0,
+51,255,255,109,0,
+54,47,0,
+17,135,1,
+51,255,255,109,0,3,
+54,48,0,
+17,138,1,
+51,255,255,109,0,3,
+28,49,0,
+39,
+16,0,64,0,0,141,1,2,
+51,47,0,
+51,48,0,
+51,255,255,156,1,
+54,50,0,
+17,3,1,
+51,255,255,109,0,3,
+54,51,0,
+17,15,1,
+51,255,255,109,0,3,
+54,52,0,
+17,165,1,
+51,255,255,81,0,3,
+54,53,0,
+17,178,1,
+51,255,255,81,0,3,
+54,54,0,
+17,27,1,
+51,255,255,109,0,3,
+28,55,0,
+39,
+16,0,64,0,0,191,1,5,
+51,50,0,
+51,51,0,
+51,52,0,
+51,53,0,
+51,54,0,
+51,255,255,109,0,
+54,56,0,
+17,102,0,
+51,255,255,109,0,3,
+54,57,0,
+17,200,0,
+51,22,0,3,
+54,58,0,
+17,212,0,
+51,23,0,3,
+54,59,0,
+17,3,1,
+51,255,255,109,0,3,
+54,60,0,
+17,15,1,
+51,255,255,109,0,3,
+54,61,0,
+17,178,0,
+51,255,255,75,0,3,
+28,62,0,
+39,
+16,0,64,0,0,212,1,6,
+51,56,0,
+51,57,0,
+51,58,0,
+51,59,0,
+51,60,0,
+51,61,0,
+51,255,255,11,0,
+54,63,0,
+17,102,0,
+51,255,255,109,0,3,
+54,64,0,
+17,200,0,
+51,28,0,3,
+54,65,0,
+17,212,0,
+51,29,0,3,
+54,66,0,
+17,3,1,
+51,255,255,109,0,3,
+54,67,0,
+17,15,1,
+51,255,255,109,0,3,
+54,68,0,
+17,178,0,
+51,255,255,75,0,3,
+28,69,0,
+39,
+16,0,64,0,0,236,1,6,
+51,63,0,
+51,64,0,
+51,65,0,
+51,66,0,
+51,67,0,
+51,68,0,
+51,255,255,11,0,
+54,70,0,
+17,102,0,
+51,255,255,109,0,3,
+54,71,0,
+17,200,0,
+51,22,0,3,
+54,72,0,
+17,212,0,
+51,23,0,3,
+54,73,0,
+17,51,1,
+51,255,255,109,0,3,
+54,74,0,
+17,63,1,
+51,255,255,81,0,3,
+54,75,0,
+17,178,0,
+51,255,255,75,0,3,
+28,76,0,
+39,
+16,0,64,0,0,4,2,6,
+51,70,0,
+51,71,0,
+51,72,0,
+51,73,0,
+51,74,0,
+51,75,0,
+51,255,255,11,0,
+54,77,0,
+17,102,0,
+51,255,255,109,0,3,
+54,78,0,
+17,200,0,
+51,28,0,3,
+54,79,0,
+17,212,0,
+51,29,0,3,
+54,80,0,
+17,51,1,
+51,255,255,109,0,3,
+54,81,0,
+17,63,1,
+51,255,255,81,0,3,
+54,82,0,
+17,178,0,
+51,255,255,75,0,3,
+28,83,0,
+39,
+16,0,64,0,0,28,2,6,
+51,77,0,
+51,78,0,
+51,79,0,
+51,80,0,
+51,81,0,
+51,82,0,
+51,255,255,11,0,
+54,84,0,
+17,102,0,
+51,255,255,109,0,3,
+54,85,0,
+17,200,0,
+51,22,0,3,
+54,86,0,
+17,212,0,
+51,23,0,3,
+54,87,0,
+17,51,1,
+51,255,255,109,0,3,
+54,88,0,
+17,95,1,
+51,255,255,81,0,3,
+54,89,0,
+17,105,1,
+51,255,255,81,0,3,
+54,90,0,
+17,178,0,
+51,255,255,75,0,3,
+28,91,0,
+39,
+16,0,64,0,0,52,2,7,
+51,84,0,
+51,85,0,
+51,86,0,
+51,87,0,
+51,88,0,
+51,89,0,
+51,90,0,
+51,255,255,11,0,
+54,92,0,
+17,102,0,
+51,255,255,109,0,3,
+54,93,0,
+17,200,0,
+51,28,0,3,
+54,94,0,
+17,212,0,
+51,29,0,3,
+54,95,0,
+17,51,1,
+51,255,255,109,0,3,
+54,96,0,
+17,95,1,
+51,255,255,81,0,3,
+54,97,0,
+17,105,1,
+51,255,255,81,0,3,
+54,98,0,
+17,178,0,
+51,255,255,75,0,3,
+28,99,0,
+39,
+16,0,64,0,0,75,2,7,
+51,92,0,
+51,93,0,
+51,94,0,
+51,95,0,
+51,96,0,
+51,97,0,
+51,98,0,
+51,255,255,11,0,
+54,100,0,
+17,102,0,
+51,255,255,109,0,3,
+54,101,0,
+17,200,0,
+51,22,0,3,
+54,102,0,
+17,212,0,
+51,23,0,3,
+54,103,0,
+17,3,1,
+51,255,255,109,0,3,
+54,104,0,
+17,15,1,
+51,255,255,109,0,3,
+54,105,0,
+17,165,1,
+51,255,255,81,0,3,
+54,106,0,
+17,178,1,
+51,255,255,81,0,3,
+54,107,0,
+17,178,0,
+51,255,255,75,0,3,
+28,108,0,
+39,
+16,0,64,0,0,98,2,8,
+51,100,0,
+51,101,0,
+51,102,0,
+51,103,0,
+51,104,0,
+51,105,0,
+51,106,0,
+51,107,0,
+51,255,255,11,0,
+54,109,0,
+17,102,0,
+51,255,255,109,0,3,
+54,110,0,
+17,200,0,
+51,28,0,3,
+54,111,0,
+17,212,0,
+51,29,0,3,
+54,112,0,
+17,3,1,
+51,255,255,109,0,3,
+54,113,0,
+17,15,1,
+51,255,255,109,0,3,
+54,114,0,
+17,165,1,
+51,255,255,81,0,3,
+54,115,0,
+17,178,1,
+51,255,255,81,0,3,
+54,116,0,
+17,178,0,
+51,255,255,75,0,3,
+28,117,0,
+39,
+16,0,64,0,0,123,2,8,
+51,109,0,
+51,110,0,
+51,111,0,
+51,112,0,
+51,113,0,
+51,114,0,
+51,115,0,
+51,116,0,
+51,255,255,11,0,
+54,118,0,
+17,148,2,
+51,255,255,11,0,3,
+54,119,0,
+17,156,2,
+51,255,255,158,2,3,
+54,120,0,
+17,167,2,
+51,255,255,49,0,3,
+54,121,0,
+17,169,2,
+51,255,255,75,0,3,
+28,122,0,
+39,
+16,0,64,0,0,176,2,4,
+51,118,0,
+51,119,0,
+51,120,0,
+51,121,0,
+51,255,255,11,0,
+54,123,0,
+17,198,2,
+51,255,255,75,0,3,
+54,124,0,
+17,208,2,
+51,255,255,11,0,3,
+54,125,0,
+17,212,2,
+51,255,255,11,0,3,
+28,126,0,
+39,
+16,0,64,0,0,216,2,3,
+51,123,0,
+51,124,0,
+51,125,0,
+51,255,255,11,0,
+54,127,0,
+17,198,2,
+51,255,255,75,0,3,
+54,128,0,
+17,208,2,
+51,255,255,11,0,3,
+54,129,0,
+17,212,2,
+51,255,255,11,0,3,
+28,130,0,
+39,
+16,0,64,0,0,225,2,3,
+51,127,0,
+51,128,0,
+51,129,0,
+51,255,255,11,0,
+54,131,0,
+17,241,2,
+51,255,255,11,0,3,
+54,132,0,
+17,198,2,
+51,255,255,75,0,3,
+54,133,0,
+17,250,2,
+51,255,255,49,0,3,
+28,134,0,
+39,
+16,0,64,0,0,3,3,3,
+51,131,0,
+51,132,0,
+51,133,0,
+51,255,255,11,0,
+54,135,0,
+17,24,3,
+51,255,255,11,0,3,
+54,136,0,
+17,150,0,
+51,255,255,152,0,3,
+28,137,0,
+39,
+16,0,64,0,0,32,3,2,
+51,135,0,
+51,136,0,
+51,255,255,11,0,
+54,138,0,
+17,24,3,
+51,255,255,11,0,3,
+28,139,0,
+39,
+16,0,64,0,0,53,3,1,
+51,138,0,
+51,255,255,11,0,31,0,
+26,0,
+32,0,
+54,0,
+36,0,
+48,0,
+40,0,
+45,0,
+9,0,
+20,0,
+21,0,
+27,0,
+22,0,
+28,0,
+125,0,
+133,0,
+129,0,
+107,0,
+116,0,
+0,0,
+138,0,
+17,0,
+61,0,
+68,0,
+121,0,
+2,0,
+75,0,
+82,0,
+4,0,
+90,0,
+98,0,
+136,0,
+20,
+29,1,0,
+2,
+52,1,0,0,0,0,1,
+44,
+8,
+51,255,255,11,0,4,
+25,
+51,255,255,77,3,0,0,128,63,
+25,
+51,255,255,77,3,0,0,0,0,
+25,
+51,255,255,77,3,0,0,128,63,
+25,
+51,255,255,77,3,0,0,128,63,1,
+29,3,0,
+2,
+52,1,0,0,0,0,1,
+44,
+56,2,0,0,1,
+29,5,0,
+2,
+52,1,0,0,0,0,1,
+44,
+9,
+51,255,255,11,0,1,
+56,4,0,0,1,
+29,10,0,
+2,
+52,1,3,0,
+54,140,0,
+38,
+16,4,82,3,
+51,255,255,75,0,2,
+54,141,0,
+38,
+16,4,89,3,
+51,255,255,75,0,2,
+54,142,0,
+38,
+16,4,97,3,
+51,255,255,75,0,2,3,0,
+0,0,
+2,0,
+1,0,4,
+55,140,0,
+51,255,255,75,0,0,
+36,
+51,255,255,75,0,0,0,0,0,
+55,141,0,
+51,255,255,75,0,0,
+36,
+51,255,255,75,0,1,0,0,0,
+55,142,0,
+51,255,255,75,0,0,
+36,
+51,255,255,75,0,2,0,0,0,
+32,0,
+1,
+56,6,0,0,16,
+56,140,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,81,0,255,255,111,3,3,
+56,7,0,0,
+56,8,0,0,
+56,9,0,0,1,
+32,0,
+1,
+56,6,0,0,16,
+56,141,0,0,
+2,
+52,1,1,0,
+54,143,0,
+17,117,3,
+51,255,255,81,0,2,1,0,
+0,0,2,
+55,143,0,
+51,255,255,81,0,0,
+1,
+56,9,0,0,1,
+56,8,0,0,
+44,
+1,
+27,
+51,255,255,81,0,255,255,124,3,2,
+1,
+56,7,0,0,1,
+56,8,0,0,
+56,143,0,0,0,
+56,8,0,0,1,
+32,0,
+1,
+56,6,0,0,16,
+56,142,0,0,
+2,
+52,1,3,0,
+54,144,0,
+17,117,3,
+51,255,255,81,0,2,
+54,145,0,
+17,128,3,
+51,255,255,81,0,2,
+54,146,0,
+17,136,3,
+51,255,255,81,0,2,3,0,
+0,0,
+1,0,
+2,0,4,
+55,144,0,
+51,255,255,81,0,0,
+1,
+56,9,0,0,1,
+56,8,0,0,
+55,145,0,
+51,255,255,81,0,0,
+1,
+25,
+51,255,255,81,0,0,0,0,64,2,
+56,144,0,0,
+55,146,0,
+51,255,255,81,0,0,
+27,
+51,255,255,81,0,255,255,124,3,2,
+1,
+56,7,0,0,1,
+56,8,0,0,
+56,145,0,0,
+44,
+1,
+27,
+51,255,255,81,0,255,255,140,3,3,
+56,146,0,0,
+1,
+56,145,0,0,1,
+56,146,0,0,
+27,
+51,255,255,81,0,255,255,144,3,2,
+56,144,0,0,
+56,146,0,0,0,
+56,8,0,0,1,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,81,0,255,255,111,3,3,
+56,7,0,0,
+56,8,0,0,
+56,9,0,0,1,1,
+29,18,0,
+2,
+52,1,2,0,
+54,147,0,
+17,149,3,
+51,255,255,49,0,2,
+54,148,0,
+17,160,3,
+51,255,255,109,0,2,2,0,
+1,0,
+0,0,5,
+22,
+1,
+50,
+56,11,0,1,1,0,15,
+27,
+51,255,255,81,0,10,0,4,
+56,13,0,0,
+50,
+56,11,0,0,1,0,
+50,
+56,12,0,0,1,0,
+50,
+56,12,0,0,1,2,
+22,
+1,
+50,
+56,11,0,1,1,1,15,
+27,
+51,255,255,81,0,10,0,4,
+56,14,0,0,
+50,
+56,11,0,0,1,1,
+50,
+56,12,0,0,1,1,
+50,
+56,12,0,0,1,3,
+55,147,0,
+51,255,255,49,0,0,
+8,
+51,255,255,49,0,2,
+1,
+27,
+51,255,255,109,0,255,255,171,3,1,
+50,
+56,12,0,0,2,0,1,0,
+25,
+51,255,255,81,0,0,0,0,63,
+1,
+27,
+51,255,255,109,0,255,255,177,3,1,
+50,
+56,12,0,0,2,2,3,1,
+25,
+51,255,255,81,0,0,0,0,63,
+55,148,0,
+51,255,255,109,0,0,
+27,
+51,255,255,109,0,255,255,111,3,3,
+56,11,0,0,
+50,
+56,147,0,0,2,0,1,
+50,
+56,147,0,0,2,2,3,
+44,
+27,
+51,255,255,11,0,255,255,182,3,2,
+56,17,0,0,
+1,
+56,148,0,0,3,
+8,
+51,255,255,109,0,2,
+12,
+51,255,255,81,0,1,
+56,15,0,0,
+12,
+51,255,255,81,0,1,
+56,16,0,0,1,
+29,21,0,
+2,
+52,1,0,0,0,0,2,
+49,0,
+52,1,0,0,0,0,
+56,19,0,0,4,0,0,0,0,0,
+2,
+57,2,
+22,
+1,
+50,
+56,20,0,1,1,0,15,
+27,
+51,255,255,81,0,255,255,111,3,3,
+50,
+56,20,0,0,1,0,
+25,
+51,255,255,81,0,0,0,0,0,
+25,
+51,255,255,81,0,0,0,128,63,
+4,0,0,1,0,0,0,
+2,
+57,2,
+22,
+1,
+50,
+56,20,0,1,1,0,15,
+27,
+51,255,255,81,0,255,255,189,3,1,
+50,
+56,20,0,0,1,0,
+4,0,0,2,0,0,0,
+2,
+52,1,1,0,
+54,149,0,
+17,195,3,
+51,255,255,81,0,2,1,0,
+0,0,5,
+55,149,0,
+51,255,255,81,0,0,
+1,
+50,
+56,20,0,0,1,0,1,
+25,
+51,255,255,81,0,0,0,128,63,
+22,
+1,
+50,
+56,20,0,1,1,0,15,
+1,
+1,
+56,149,0,0,1,
+1,
+25,
+51,255,255,81,0,0,0,0,64,2,
+27,
+51,255,255,81,0,255,255,171,3,1,
+1,
+56,149,0,0,2,
+25,
+51,255,255,81,0,0,0,0,63,1,
+25,
+51,255,255,81,0,0,0,128,63,
+32,0,
+45,199,3,
+2,
+52,1,0,0,0,0,1,
+22,
+1,
+50,
+56,20,0,1,1,0,15,
+27,
+51,255,255,81,0,255,255,111,3,3,
+50,
+56,20,0,0,1,0,
+25,
+51,255,255,81,0,0,0,128,191,
+25,
+51,255,255,81,0,0,0,128,63,1,
+57,
+22,
+1,
+50,
+56,20,0,1,1,0,15,
+27,
+51,255,255,81,0,255,255,226,3,1,
+50,
+56,20,0,0,1,0,
+4,1,0,3,0,0,0,
+2,
+57,2,
+32,0,
+1,
+1,
+50,
+56,20,0,0,1,0,18,
+25,
+51,255,255,81,0,0,0,0,0,9,
+1,
+50,
+56,20,0,0,1,0,19,
+25,
+51,255,255,81,0,0,0,128,63,
+2,
+52,1,0,0,0,0,1,
+44,
+8,
+51,255,255,109,0,2,
+25,
+51,255,255,81,0,0,0,0,0,
+25,
+51,255,255,81,0,0,0,128,191,1,
+57,
+4,0,
+44,
+56,20,0,0,1,
+29,27,0,
+2,
+52,1,0,0,0,0,1,
+32,0,
+1,
+50,
+56,26,0,0,1,1,18,
+25,
+51,255,255,81,0,0,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+13,
+51,255,255,11,0,1,
+25,
+51,255,255,77,3,0,0,0,0,1,
+32,0,
+1,
+50,
+56,26,0,0,1,0,20,
+33,
+56,25,0,0,
+36,
+51,255,255,230,3,0,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+9,
+51,255,255,11,0,1,
+33,
+56,24,0,0,
+36,
+51,255,255,230,3,0,0,0,0,1,
+32,0,
+1,
+50,
+56,26,0,0,1,0,18,
+33,
+56,25,0,0,
+36,
+51,255,255,230,3,1,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+9,
+51,255,255,11,0,1,
+27,
+51,255,255,49,0,255,255,140,3,3,
+33,
+56,24,0,0,
+36,
+51,255,255,230,3,0,0,0,0,
+33,
+56,24,0,0,
+36,
+51,255,255,230,3,1,0,0,0,
+1,
+1,
+50,
+56,26,0,0,1,0,1,
+33,
+56,25,0,0,
+36,
+51,255,255,230,3,0,0,0,0,3,
+1,
+33,
+56,25,0,0,
+36,
+51,255,255,230,3,1,0,0,0,1,
+33,
+56,25,0,0,
+36,
+51,255,255,230,3,0,0,0,0,1,
+32,0,
+1,
+50,
+56,26,0,0,1,0,18,
+33,
+56,25,0,0,
+36,
+51,255,255,230,3,2,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+9,
+51,255,255,11,0,1,
+27,
+51,255,255,49,0,255,255,140,3,3,
+33,
+56,24,0,0,
+36,
+51,255,255,230,3,1,0,0,0,
+33,
+56,24,0,0,
+36,
+51,255,255,230,3,2,0,0,0,
+1,
+1,
+50,
+56,26,0,0,1,0,1,
+33,
+56,25,0,0,
+36,
+51,255,255,230,3,1,0,0,0,3,
+1,
+33,
+56,25,0,0,
+36,
+51,255,255,230,3,2,0,0,0,1,
+33,
+56,25,0,0,
+36,
+51,255,255,230,3,1,0,0,0,1,
+32,0,
+1,
+50,
+56,26,0,0,1,0,18,
+33,
+56,25,0,0,
+36,
+51,255,255,230,3,3,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+9,
+51,255,255,11,0,1,
+27,
+51,255,255,49,0,255,255,140,3,3,
+33,
+56,24,0,0,
+36,
+51,255,255,230,3,2,0,0,0,
+33,
+56,24,0,0,
+36,
+51,255,255,230,3,3,0,0,0,
+1,
+1,
+50,
+56,26,0,0,1,0,1,
+33,
+56,25,0,0,
+36,
+51,255,255,230,3,2,0,0,0,3,
+1,
+33,
+56,25,0,0,
+36,
+51,255,255,230,3,3,0,0,0,1,
+33,
+56,25,0,0,
+36,
+51,255,255,230,3,2,0,0,0,1,
+2,
+52,1,0,0,0,0,1,
+44,
+9,
+51,255,255,11,0,1,
+33,
+56,24,0,0,
+36,
+51,255,255,230,3,3,0,0,0,1,1,
+29,33,0,
+2,
+52,1,0,0,0,0,1,
+32,0,
+1,
+50,
+56,32,0,0,1,1,18,
+25,
+51,255,255,81,0,0,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+13,
+51,255,255,11,0,1,
+25,
+51,255,255,77,3,0,0,0,0,1,
+32,0,
+1,
+50,
+56,32,0,0,1,0,18,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,4,0,0,0,
+2,
+52,1,0,0,0,0,1,
+32,0,
+1,
+50,
+56,32,0,0,1,0,18,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,2,0,0,0,
+2,
+52,1,0,0,0,0,1,
+32,0,
+1,
+50,
+56,32,0,0,1,0,20,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,0,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+9,
+51,255,255,11,0,1,
+33,
+56,30,0,0,
+36,
+51,255,255,230,3,0,0,0,0,1,
+32,0,
+1,
+50,
+56,32,0,0,1,0,18,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,1,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+9,
+51,255,255,11,0,1,
+27,
+51,255,255,49,0,255,255,140,3,3,
+33,
+56,30,0,0,
+36,
+51,255,255,230,3,0,0,0,0,
+33,
+56,30,0,0,
+36,
+51,255,255,230,3,1,0,0,0,
+1,
+1,
+50,
+56,32,0,0,1,0,1,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,0,0,0,0,3,
+1,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,1,0,0,0,1,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,0,0,0,0,1,
+2,
+52,1,0,0,0,0,1,
+44,
+9,
+51,255,255,11,0,1,
+27,
+51,255,255,49,0,255,255,140,3,3,
+33,
+56,30,0,0,
+36,
+51,255,255,230,3,1,0,0,0,
+33,
+56,30,0,0,
+36,
+51,255,255,230,3,2,0,0,0,
+1,
+1,
+50,
+56,32,0,0,1,0,1,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,1,0,0,0,3,
+1,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,2,0,0,0,1,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,1,0,0,0,1,1,
+2,
+52,1,0,0,0,0,1,
+32,0,
+1,
+50,
+56,32,0,0,1,0,18,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,3,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+9,
+51,255,255,11,0,1,
+27,
+51,255,255,49,0,255,255,140,3,3,
+33,
+56,30,0,0,
+36,
+51,255,255,230,3,2,0,0,0,
+33,
+56,30,0,0,
+36,
+51,255,255,230,3,3,0,0,0,
+1,
+1,
+50,
+56,32,0,0,1,0,1,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,2,0,0,0,3,
+1,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,3,0,0,0,1,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,2,0,0,0,1,
+2,
+52,1,0,0,0,0,1,
+44,
+9,
+51,255,255,11,0,1,
+27,
+51,255,255,49,0,255,255,140,3,3,
+33,
+56,30,0,0,
+36,
+51,255,255,230,3,3,0,0,0,
+33,
+56,30,0,0,
+36,
+51,255,255,230,3,4,0,0,0,
+1,
+1,
+50,
+56,32,0,0,1,0,1,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,3,0,0,0,3,
+1,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,4,0,0,0,1,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,3,0,0,0,1,1,1,
+2,
+52,1,0,0,0,0,1,
+32,0,
+1,
+50,
+56,32,0,0,1,0,18,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,6,0,0,0,
+2,
+52,1,0,0,0,0,1,
+32,0,
+1,
+50,
+56,32,0,0,1,0,18,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,5,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+9,
+51,255,255,11,0,1,
+27,
+51,255,255,49,0,255,255,140,3,3,
+33,
+56,30,0,0,
+36,
+51,255,255,230,3,4,0,0,0,
+33,
+56,30,0,0,
+36,
+51,255,255,230,3,5,0,0,0,
+1,
+1,
+50,
+56,32,0,0,1,0,1,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,4,0,0,0,3,
+1,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,5,0,0,0,1,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,4,0,0,0,1,
+2,
+52,1,0,0,0,0,1,
+44,
+9,
+51,255,255,11,0,1,
+27,
+51,255,255,49,0,255,255,140,3,3,
+33,
+56,30,0,0,
+36,
+51,255,255,230,3,5,0,0,0,
+33,
+56,30,0,0,
+36,
+51,255,255,230,3,6,0,0,0,
+1,
+1,
+50,
+56,32,0,0,1,0,1,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,5,0,0,0,3,
+1,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,6,0,0,0,1,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,5,0,0,0,1,1,
+2,
+52,1,0,0,0,0,1,
+32,0,
+1,
+50,
+56,32,0,0,1,0,18,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,7,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+9,
+51,255,255,11,0,1,
+27,
+51,255,255,49,0,255,255,140,3,3,
+33,
+56,30,0,0,
+36,
+51,255,255,230,3,6,0,0,0,
+33,
+56,30,0,0,
+36,
+51,255,255,230,3,7,0,0,0,
+1,
+1,
+50,
+56,32,0,0,1,0,1,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,6,0,0,0,3,
+1,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,7,0,0,0,1,
+33,
+56,31,0,0,
+36,
+51,255,255,230,3,6,0,0,0,1,
+2,
+52,1,0,0,0,0,1,
+44,
+9,
+51,255,255,11,0,1,
+33,
+56,30,0,0,
+36,
+51,255,255,230,3,7,0,0,0,1,1,1,1,
+29,37,0,
+2,
+52,1,2,0,
+54,150,0,
+17,242,3,
+51,255,255,109,0,2,
+54,151,0,
+17,187,0,
+51,255,255,81,0,2,2,0,
+0,0,
+1,0,4,
+22,
+1,
+56,36,0,2,23,
+56,34,0,0,
+55,150,0,
+51,255,255,109,0,0,
+1,
+56,35,0,0,1,
+56,34,0,0,
+55,151,0,
+51,255,255,81,0,0,
+1,
+27,
+51,255,255,81,0,255,255,248,3,2,
+56,36,0,0,
+56,150,0,0,3,
+27,
+51,255,255,81,0,255,255,248,3,2,
+56,150,0,0,
+56,150,0,0,
+44,
+8,
+51,255,255,109,0,2,
+56,151,0,0,
+25,
+51,255,255,81,0,0,0,128,63,1,
+29,41,0,
+2,
+52,1,1,0,
+54,152,0,
+17,187,0,
+51,255,255,81,0,2,1,0,
+0,0,2,
+55,152,0,
+51,255,255,81,0,0,
+1,
+27,
+51,255,255,81,0,255,255,252,3,2,
+56,40,0,0,
+56,38,0,0,3,
+56,39,0,0,
+44,
+8,
+51,255,255,109,0,2,
+56,152,0,0,
+25,
+51,255,255,81,0,0,0,128,63,1,
+29,46,0,
+2,
+52,1,2,0,
+54,153,0,
+17,5,4,
+51,255,255,81,0,2,
+54,154,0,
+17,187,0,
+51,255,255,81,0,2,2,0,
+0,0,
+1,0,4,
+22,
+1,
+56,45,0,2,23,
+56,42,0,0,
+55,153,0,
+51,255,255,81,0,0,
+53,
+45,11,4,
+1,
+25,
+51,255,255,81,0,0,0,0,64,2,
+27,
+51,255,255,81,0,255,255,40,4,2,
+42,1,
+50,
+56,45,0,0,1,1,
+1,
+27,
+51,255,255,81,0,255,255,117,3,1,
+56,45,0,0,1,
+50,
+56,45,0,0,1,0,
+27,
+51,255,255,81,0,255,255,40,4,2,
+42,1,
+50,
+56,45,0,0,1,1,
+42,1,
+50,
+56,45,0,0,1,0,
+55,154,0,
+51,255,255,81,0,0,
+1,
+1,
+1,
+1,
+56,153,0,0,2,
+25,
+51,255,255,81,0,131,249,34,62,0,
+25,
+51,255,255,81,0,0,0,0,63,0,
+56,43,0,0,2,
+56,44,0,0,
+44,
+8,
+51,255,255,109,0,2,
+56,154,0,0,
+25,
+51,255,255,81,0,0,0,128,63,1,
+29,49,0,
+2,
+52,1,0,0,0,0,1,
+44,
+1,
+8,
+51,255,255,156,1,9,
+25,
+51,255,255,81,0,0,0,0,0,
+25,
+51,255,255,81,0,0,0,128,191,
+25,
+51,255,255,81,0,0,0,0,0,
+25,
+51,255,255,81,0,0,0,128,63,
+25,
+51,255,255,81,0,0,0,0,0,
+25,
+51,255,255,81,0,0,0,0,0,
+25,
+51,255,255,81,0,0,0,0,0,
+25,
+51,255,255,81,0,0,0,0,0,
+25,
+51,255,255,81,0,0,0,128,63,2,
+27,
+51,255,255,156,1,255,255,45,4,1,
+8,
+51,255,255,156,1,9,
+1,
+50,
+56,48,0,0,1,1,1,
+50,
+56,47,0,0,1,1,
+1,
+50,
+56,47,0,0,1,0,1,
+50,
+56,48,0,0,1,0,
+25,
+51,255,255,81,0,0,0,0,0,
+1,
+50,
+56,48,0,0,1,0,1,
+50,
+56,47,0,0,1,0,
+1,
+50,
+56,48,0,0,1,1,1,
+50,
+56,47,0,0,1,1,
+25,
+51,255,255,81,0,0,0,0,0,
+50,
+56,47,0,0,1,0,
+50,
+56,47,0,0,1,1,
+25,
+51,255,255,81,0,0,0,128,63,1,
+29,55,0,
+2,
+52,1,5,0,
+54,155,0,
+38,
+16,4,53,4,
+51,255,255,81,0,2,
+54,156,0,
+17,73,4,
+51,255,255,81,0,2,
+54,157,0,
+17,81,4,
+51,255,255,81,0,2,
+54,158,0,
+17,89,4,
+51,255,255,96,4,2,
+54,159,0,
+17,101,4,
+51,255,255,96,4,2,5,0,
+0,0,
+1,0,
+2,0,
+3,0,
+4,0,6,
+55,155,0,
+51,255,255,81,0,0,
+25,
+51,255,255,81,0,0,0,128,57,
+55,156,0,
+51,255,255,81,0,0,
+27,
+51,255,255,81,0,255,255,252,3,2,
+56,50,0,0,
+56,51,0,0,
+55,157,0,
+51,255,255,81,0,0,
+1,
+56,53,0,0,1,
+56,52,0,0,
+55,158,0,
+51,255,255,96,4,0,
+1,
+56,156,0,0,18,
+56,155,0,0,
+55,159,0,
+51,255,255,96,4,0,
+1,
+27,
+51,255,255,81,0,255,255,226,3,1,
+56,157,0,0,18,
+56,155,0,0,
+32,0,
+56,158,0,0,
+2,
+52,1,5,0,
+54,160,0,
+17,107,4,
+51,255,255,81,0,2,
+54,161,0,
+17,113,4,
+51,255,255,81,0,2,
+54,162,0,
+17,123,4,
+51,255,255,81,0,2,
+54,163,0,
+17,128,4,
+51,255,255,109,0,2,
+54,164,0,
+17,187,0,
+51,255,255,81,0,2,5,0,
+2,0,
+3,0,
+0,0,
+1,0,
+4,0,7,
+32,0,
+56,159,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+8,
+51,255,255,109,0,2,
+25,
+51,255,255,81,0,0,0,0,0,
+25,
+51,255,255,81,0,0,0,128,191,1,
+57,
+55,160,0,
+51,255,255,81,0,0,
+1,
+25,
+51,255,255,81,0,0,0,128,63,3,
+56,157,0,0,
+55,161,0,
+51,255,255,81,0,0,
+27,
+51,255,255,81,0,255,255,131,4,1,
+56,157,0,0,
+55,162,0,
+51,255,255,81,0,0,
+1,
+56,52,0,0,3,
+56,157,0,0,
+55,163,0,
+51,255,255,109,0,0,
+1,
+1,
+56,54,0,0,1,
+56,50,0,0,2,
+56,160,0,0,
+55,164,0,
+51,255,255,81,0,0,
+1,
+1,
+27,
+51,255,255,81,0,255,255,117,3,1,
+56,163,0,0,2,
+56,161,0,0,1,
+56,162,0,0,
+44,
+8,
+51,255,255,109,0,2,
+56,164,0,0,
+25,
+51,255,255,81,0,0,0,128,63,1,
+32,0,
+56,159,0,0,
+2,
+52,1,5,0,
+54,165,0,
+17,136,4,
+51,255,255,156,1,2,
+54,166,0,
+17,146,4,
+51,255,255,81,0,2,
+54,167,0,
+17,148,4,
+51,255,255,81,0,2,
+54,168,0,
+17,128,4,
+51,255,255,109,0,2,
+54,169,0,
+17,187,0,
+51,255,255,81,0,2,5,0,
+3,0,
+1,0,
+2,0,
+4,0,
+0,0,8,
+55,165,0,
+51,255,255,156,1,0,
+27,
+51,255,255,156,1,49,0,2,
+56,50,0,0,
+56,51,0,0,
+55,166,0,
+51,255,255,81,0,0,
+1,
+56,52,0,0,3,
+56,156,0,0,
+55,167,0,
+51,255,255,81,0,0,
+1,
+56,166,0,0,2,
+56,166,0,0,
+55,168,0,
+51,255,255,109,0,0,
+50,
+1,
+56,165,0,0,2,
+8,
+51,255,255,152,4,2,
+56,54,0,0,
+25,
+51,255,255,81,0,0,0,128,63,2,0,1,
+55,169,0,
+51,255,255,81,0,0,
+1,
+56,167,0,0,1,
+1,
+50,
+56,168,0,0,1,1,2,
+50,
+56,168,0,0,1,1,
+32,0,
+1,
+56,169,0,0,18,
+25,
+51,255,255,81,0,0,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+8,
+51,255,255,109,0,2,
+25,
+51,255,255,81,0,0,0,0,0,
+25,
+51,255,255,81,0,0,0,128,191,1,
+57,
+22,
+1,
+56,169,0,1,15,
+1,
+50,
+56,168,0,0,1,0,0,
+27,
+51,255,255,81,0,255,255,159,4,1,
+56,169,0,0,
+44,
+8,
+51,255,255,109,0,2,
+56,169,0,0,
+25,
+51,255,255,81,0,0,0,128,63,1,
+2,
+52,1,14,0,
+54,170,0,
+17,79,0,
+51,255,255,81,0,2,
+54,171,0,
+17,164,4,
+51,255,255,96,4,2,
+54,172,0,
+17,174,4,
+51,255,255,109,0,2,
+54,173,0,
+17,136,4,
+51,255,255,156,1,2,
+54,174,0,
+17,177,4,
+51,255,255,81,0,2,
+54,175,0,
+17,184,4,
+51,255,255,81,0,2,
+54,176,0,
+17,191,4,
+51,255,255,81,0,2,
+54,177,0,
+17,194,4,
+51,255,255,96,4,2,
+54,178,0,
+17,128,4,
+51,255,255,109,0,2,
+54,179,0,
+17,210,4,
+51,255,255,81,0,2,
+54,180,0,
+17,216,4,
+51,255,255,81,0,2,
+54,181,0,
+17,228,4,
+51,255,255,96,4,2,
+54,182,0,
+17,242,4,
+51,255,255,81,0,2,
+54,183,0,
+17,187,0,
+51,255,255,81,0,2,14,0,
+2,0,
+10,0,
+0,0,
+9,0,
+7,0,
+1,0,
+11,0,
+8,0,
+6,0,
+4,0,
+5,0,
+13,0,
+3,0,
+12,0,21,
+55,170,0,
+51,255,255,81,0,0,
+1,
+56,52,0,0,3,
+1,
+56,52,0,0,1,
+56,53,0,0,
+55,171,0,
+51,255,255,96,4,0,
+1,
+27,
+51,255,255,81,0,255,255,226,3,1,
+1,
+56,170,0,0,1,
+25,
+51,255,255,81,0,0,0,128,63,18,
+56,155,0,0,
+32,0,
+56,171,0,0,
+2,
+52,1,1,0,
+54,184,0,
+17,246,4,
+51,255,255,109,0,2,1,0,
+0,0,4,
+55,184,0,
+51,255,255,109,0,0,
+56,50,0,0,
+22,
+1,
+56,50,0,1,15,
+56,51,0,0,
+22,
+1,
+56,51,0,1,15,
+56,184,0,0,
+22,
+1,
+56,170,0,1,15,
+25,
+51,255,255,81,0,0,0,0,0,1,
+57,
+55,172,0,
+51,255,255,109,0,0,
+1,
+1,
+56,50,0,0,2,
+1,
+25,
+51,255,255,81,0,0,0,128,63,1,
+56,170,0,0,0,
+1,
+56,51,0,0,2,
+56,170,0,0,
+55,173,0,
+51,255,255,156,1,0,
+27,
+51,255,255,156,1,49,0,2,
+56,172,0,0,
+56,51,0,0,
+55,174,0,
+51,255,255,81,0,0,
+27,
+51,255,255,81,0,255,255,226,3,1,
+1,
+25,
+51,255,255,81,0,0,0,128,63,1,
+56,170,0,0,
+55,175,0,
+51,255,255,81,0,0,
+56,174,0,0,
+55,176,0,
+51,255,255,81,0,0,
+1,
+27,
+51,255,255,81,0,255,255,226,3,1,
+1,
+56,53,0,0,1,
+56,52,0,0,3,
+56,156,0,0,
+55,177,0,
+51,255,255,96,4,0,
+1,
+27,
+51,255,255,81,0,255,255,226,3,1,
+1,
+56,176,0,0,1,
+25,
+51,255,255,81,0,0,0,128,63,18,
+56,155,0,0,
+32,0,
+56,177,0,0,
+2,
+52,1,0,0,0,0,2,
+22,
+1,
+56,174,0,2,24,
+25,
+51,255,255,81,0,0,0,0,63,
+22,
+1,
+56,175,0,2,24,
+25,
+51,255,255,81,0,0,0,0,63,1,
+2,
+52,1,0,0,0,0,2,
+22,
+1,
+56,174,0,2,24,
+1,
+56,176,0,0,3,
+1,
+1,
+56,176,0,0,2,
+56,176,0,0,1,
+25,
+51,255,255,81,0,0,0,128,63,
+22,
+1,
+56,175,0,2,25,
+27,
+51,255,255,81,0,255,255,159,4,1,
+27,
+51,255,255,81,0,255,255,226,3,1,
+1,
+1,
+56,176,0,0,2,
+56,176,0,0,1,
+25,
+51,255,255,81,0,0,0,128,63,1,
+22,
+1,
+56,173,0,1,15,
+1,
+8,
+51,255,255,156,1,9,
+56,174,0,0,
+25,
+51,255,255,81,0,0,0,0,0,
+25,
+51,255,255,81,0,0,0,0,0,
+25,
+51,255,255,81,0,0,0,0,0,
+56,175,0,0,
+25,
+51,255,255,81,0,0,0,0,0,
+25,
+51,255,255,81,0,0,0,0,0,
+25,
+51,255,255,81,0,0,0,0,0,
+25,
+51,255,255,81,0,0,0,128,63,2,
+56,173,0,0,
+55,178,0,
+51,255,255,109,0,0,
+50,
+1,
+56,173,0,0,2,
+8,
+51,255,255,152,4,2,
+56,54,0,0,
+25,
+51,255,255,81,0,0,0,128,63,2,0,1,
+55,179,0,
+51,255,255,81,0,0,
+1,
+25,
+51,255,255,81,0,0,0,128,63,3,
+56,176,0,0,
+55,180,0,
+51,255,255,81,0,0,
+27,
+51,255,255,81,0,255,255,131,4,1,
+1,
+25,
+51,255,255,81,0,0,0,128,63,1,
+56,170,0,0,
+55,181,0,
+51,255,255,96,4,0,
+1,
+42,7,
+56,177,0,0,8,
+1,
+56,176,0,0,19,
+25,
+51,255,255,81,0,0,0,128,63,
+55,182,0,
+51,255,255,81,0,0,
+25,
+51,255,255,81,0,0,0,128,191,
+32,0,
+56,177,0,0,
+2,
+52,1,0,0,0,0,1,
+22,
+1,
+56,182,0,1,15,
+1,
+27,
+51,255,255,81,0,255,255,248,3,2,
+56,178,0,0,
+56,178,0,0,3,
+50,
+56,178,0,0,1,0,1,
+32,0,
+56,181,0,0,
+2,
+52,1,0,0,0,0,1,
+22,
+1,
+56,182,0,1,15,
+1,
+27,
+51,255,255,81,0,255,255,117,3,1,
+56,178,0,0,1,
+1,
+50,
+56,178,0,0,1,0,2,
+56,179,0,0,1,
+2,
+52,1,1,0,
+54,185,0,
+17,252,4,
+51,255,255,81,0,2,1,0,
+0,0,2,
+55,185,0,
+51,255,255,81,0,0,
+1,
+1,
+50,
+56,178,0,0,1,0,2,
+50,
+56,178,0,0,1,0,1,
+1,
+50,
+56,178,0,0,1,1,2,
+50,
+56,178,0,0,1,1,
+32,0,
+1,
+56,185,0,0,21,
+25,
+51,255,255,81,0,0,0,0,0,
+2,
+52,1,0,0,0,0,1,
+32,0,
+1,
+56,171,0,0,9,
+1,
+56,180,0,0,18,
+25,
+51,255,255,81,0,0,0,0,0,
+2,
+52,1,0,0,0,0,1,
+22,
+1,
+56,182,0,1,15,
+1,
+42,1,
+27,
+51,255,255,81,0,255,255,159,4,1,
+56,185,0,0,1,
+1,
+50,
+56,178,0,0,1,0,2,
+56,179,0,0,1,
+2,
+52,1,0,0,0,0,1,
+22,
+1,
+56,182,0,1,15,
+1,
+27,
+51,255,255,81,0,255,255,159,4,1,
+56,185,0,0,1,
+1,
+50,
+56,178,0,0,1,0,2,
+56,179,0,0,1,1,
+57,1,
+32,0,
+1,
+42,7,
+56,181,0,0,8,
+1,
+56,182,0,0,18,
+25,
+51,255,255,81,0,0,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+8,
+51,255,255,109,0,2,
+25,
+51,255,255,81,0,0,0,0,0,
+25,
+51,255,255,81,0,0,0,128,191,1,
+57,
+55,183,0,
+51,255,255,81,0,0,
+1,
+56,170,0,0,0,
+1,
+56,180,0,0,2,
+56,182,0,0,
+32,0,
+56,171,0,0,
+2,
+52,1,0,0,0,0,1,
+22,
+1,
+56,183,0,1,15,
+1,
+25,
+51,255,255,81,0,0,0,128,63,1,
+56,183,0,0,1,
+57,
+44,
+8,
+51,255,255,109,0,2,
+56,183,0,0,
+25,
+51,255,255,81,0,0,0,128,63,1,1,
+29,62,0,
+2,
+52,1,1,0,
+54,186,0,
+17,187,0,
+51,255,255,109,0,2,1,0,
+0,0,3,
+55,186,0,
+51,255,255,109,0,0,
+27,
+51,255,255,109,0,37,0,3,
+56,59,0,0,
+56,60,0,0,
+56,56,0,0,
+22,
+1,
+56,186,0,1,15,
+27,
+51,255,255,109,0,21,0,2,
+56,61,0,0,
+56,186,0,0,
+44,
+27,
+51,255,255,11,0,27,0,3,
+56,57,0,0,
+56,58,0,0,
+56,186,0,0,1,
+29,69,0,
+2,
+52,1,1,0,
+54,187,0,
+17,187,0,
+51,255,255,109,0,2,1,0,
+0,0,3,
+55,187,0,
+51,255,255,109,0,0,
+27,
+51,255,255,109,0,37,0,3,
+56,66,0,0,
+56,67,0,0,
+56,63,0,0,
+22,
+1,
+56,187,0,1,15,
+27,
+51,255,255,109,0,21,0,2,
+56,68,0,0,
+56,187,0,0,
+44,
+27,
+51,255,255,11,0,33,0,3,
+56,64,0,0,
+56,65,0,0,
+56,187,0,0,1,
+29,76,0,
+2,
+52,1,1,0,
+54,188,0,
+17,187,0,
+51,255,255,109,0,2,1,0,
+0,0,3,
+55,188,0,
+51,255,255,109,0,0,
+27,
+51,255,255,109,0,41,0,3,
+56,73,0,0,
+56,74,0,0,
+56,70,0,0,
+22,
+1,
+56,188,0,1,15,
+27,
+51,255,255,109,0,21,0,2,
+56,75,0,0,
+56,188,0,0,
+44,
+27,
+51,255,255,11,0,27,0,3,
+56,71,0,0,
+56,72,0,0,
+56,188,0,0,1,
+29,83,0,
+2,
+52,1,1,0,
+54,189,0,
+17,187,0,
+51,255,255,109,0,2,1,0,
+0,0,3,
+55,189,0,
+51,255,255,109,0,0,
+27,
+51,255,255,109,0,41,0,3,
+56,80,0,0,
+56,81,0,0,
+56,77,0,0,
+22,
+1,
+56,189,0,1,15,
+27,
+51,255,255,109,0,21,0,2,
+56,82,0,0,
+56,189,0,0,
+44,
+27,
+51,255,255,11,0,33,0,3,
+56,78,0,0,
+56,79,0,0,
+56,189,0,0,1,
+29,91,0,
+2,
+52,1,1,0,
+54,190,0,
+17,187,0,
+51,255,255,109,0,2,1,0,
+0,0,3,
+55,190,0,
+51,255,255,109,0,0,
+27,
+51,255,255,109,0,46,0,4,
+56,87,0,0,
+56,88,0,0,
+56,89,0,0,
+56,84,0,0,
+22,
+1,
+56,190,0,1,15,
+27,
+51,255,255,109,0,21,0,2,
+56,90,0,0,
+56,190,0,0,
+44,
+27,
+51,255,255,11,0,27,0,3,
+56,85,0,0,
+56,86,0,0,
+56,190,0,0,1,
+29,99,0,
+2,
+52,1,1,0,
+54,191,0,
+17,187,0,
+51,255,255,109,0,2,1,0,
+0,0,3,
+55,191,0,
+51,255,255,109,0,0,
+27,
+51,255,255,109,0,46,0,4,
+56,95,0,0,
+56,96,0,0,
+56,97,0,0,
+56,92,0,0,
+22,
+1,
+56,191,0,1,15,
+27,
+51,255,255,109,0,21,0,2,
+56,98,0,0,
+56,191,0,0,
+44,
+27,
+51,255,255,11,0,33,0,3,
+56,93,0,0,
+56,94,0,0,
+56,191,0,0,1,
+29,108,0,
+2,
+52,1,1,0,
+54,192,0,
+17,187,0,
+51,255,255,109,0,2,1,0,
+0,0,3,
+55,192,0,
+51,255,255,109,0,0,
+27,
+51,255,255,109,0,55,0,5,
+56,103,0,0,
+56,104,0,0,
+56,105,0,0,
+56,106,0,0,
+56,100,0,0,
+22,
+1,
+56,192,0,1,15,
+27,
+51,255,255,109,0,21,0,2,
+56,107,0,0,
+56,192,0,0,
+44,
+27,
+51,255,255,11,0,27,0,3,
+56,101,0,0,
+56,102,0,0,
+56,192,0,0,1,
+29,117,0,
+2,
+52,1,1,0,
+54,193,0,
+17,187,0,
+51,255,255,109,0,2,1,0,
+0,0,3,
+55,193,0,
+51,255,255,109,0,0,
+27,
+51,255,255,109,0,55,0,5,
+56,112,0,0,
+56,113,0,0,
+56,114,0,0,
+56,115,0,0,
+56,109,0,0,
+22,
+1,
+56,193,0,1,15,
+27,
+51,255,255,109,0,21,0,2,
+56,116,0,0,
+56,193,0,0,
+44,
+27,
+51,255,255,11,0,33,0,3,
+56,110,0,0,
+56,111,0,0,
+56,193,0,0,1,
+29,122,0,
+2,
+52,1,1,0,
+54,194,0,
+17,1,5,
+51,255,255,11,0,2,1,0,
+0,0,4,
+32,0,
+12,
+51,255,255,96,4,1,
+56,121,0,0,
+2,
+52,1,0,0,0,0,1,
+22,
+1,
+56,118,0,1,15,
+27,
+51,255,255,11,0,255,255,10,5,2,
+50,
+56,118,0,0,3,0,1,2,
+50,
+56,118,0,0,1,3,1,
+2,
+52,1,0,0,0,0,1,
+22,
+1,
+56,118,0,1,15,
+27,
+51,255,255,11,0,255,255,22,5,1,
+56,118,0,0,1,
+55,194,0,
+51,255,255,11,0,0,
+9,
+51,255,255,11,0,1,
+1,
+1,
+56,119,0,0,2,
+9,
+51,255,255,49,0,1,
+56,118,0,0,0,
+56,120,0,0,
+32,0,
+12,
+51,255,255,96,4,1,
+56,121,0,0,
+2,
+52,1,0,0,0,0,1,
+22,
+1,
+56,194,0,1,15,
+27,
+51,255,255,11,0,255,255,31,5,2,
+50,
+56,194,0,0,3,0,1,2,
+50,
+56,194,0,0,1,3,1,
+2,
+52,1,0,0,0,0,2,
+22,
+1,
+56,194,0,1,15,
+27,
+51,255,255,11,0,255,255,43,5,1,
+56,194,0,0,
+22,
+1,
+50,
+56,194,0,2,3,0,1,2,24,
+50,
+56,194,0,0,1,3,1,
+44,
+56,194,0,0,1,
+29,126,0,
+2,
+52,1,0,0,0,0,1,
+49,0,
+52,1,0,0,0,0,
+56,123,0,0,30,0,0,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,52,5,2,
+56,124,0,0,
+56,125,0,0,1,0,1,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,64,5,2,
+56,124,0,0,
+56,125,0,0,1,0,2,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,74,5,2,
+56,124,0,0,
+56,125,0,0,1,0,3,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,84,5,3,
+8,
+51,255,255,11,0,4,
+25,
+51,255,255,77,3,0,0,128,63,
+25,
+51,255,255,77,3,0,0,0,0,
+25,
+51,255,255,77,3,0,0,0,0,
+25,
+51,255,255,77,3,0,0,128,191,
+56,124,0,0,
+56,125,0,0,1,0,4,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,84,5,3,
+8,
+51,255,255,11,0,4,
+25,
+51,255,255,77,3,0,0,0,0,
+25,
+51,255,255,77,3,0,0,128,63,
+25,
+51,255,255,77,3,0,0,128,191,
+25,
+51,255,255,77,3,0,0,0,0,
+56,124,0,0,
+56,125,0,0,1,0,5,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,84,5,3,
+8,
+51,255,255,11,0,4,
+25,
+51,255,255,77,3,0,0,0,0,
+25,
+51,255,255,77,3,0,0,0,0,
+25,
+51,255,255,77,3,0,0,128,63,
+25,
+51,255,255,77,3,0,0,0,0,
+56,124,0,0,
+56,125,0,0,1,0,6,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,84,5,3,
+8,
+51,255,255,11,0,4,
+25,
+51,255,255,77,3,0,0,0,0,
+25,
+51,255,255,77,3,0,0,0,0,
+25,
+51,255,255,77,3,0,0,0,0,
+25,
+51,255,255,77,3,0,0,128,63,
+56,124,0,0,
+56,125,0,0,1,0,7,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,84,5,3,
+8,
+51,255,255,11,0,4,
+25,
+51,255,255,77,3,0,0,0,0,
+25,
+51,255,255,77,3,0,0,0,0,
+25,
+51,255,255,77,3,0,0,128,191,
+25,
+51,255,255,77,3,0,0,0,0,
+56,124,0,0,
+56,125,0,0,1,0,8,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,84,5,3,
+8,
+51,255,255,11,0,4,
+25,
+51,255,255,77,3,0,0,0,0,
+25,
+51,255,255,77,3,0,0,0,0,
+25,
+51,255,255,77,3,0,0,0,0,
+25,
+51,255,255,77,3,0,0,128,191,
+56,124,0,0,
+56,125,0,0,1,0,9,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,84,5,3,
+8,
+51,255,255,11,0,4,
+25,
+51,255,255,77,3,0,0,0,0,
+25,
+51,255,255,77,3,0,0,0,0,
+25,
+51,255,255,77,3,0,0,128,63,
+25,
+51,255,255,77,3,0,0,128,191,
+56,124,0,0,
+56,125,0,0,1,0,10,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,84,5,3,
+8,
+51,255,255,11,0,4,
+25,
+51,255,255,77,3,0,0,0,0,
+25,
+51,255,255,77,3,0,0,0,0,
+25,
+51,255,255,77,3,0,0,128,191,
+25,
+51,255,255,77,3,0,0,128,63,
+56,124,0,0,
+56,125,0,0,1,0,11,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,84,5,3,
+8,
+51,255,255,11,0,4,
+25,
+51,255,255,77,3,0,0,0,0,
+25,
+51,255,255,77,3,0,0,0,0,
+25,
+51,255,255,77,3,0,0,128,191,
+25,
+51,255,255,77,3,0,0,128,191,
+56,124,0,0,
+56,125,0,0,1,0,12,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,84,5,3,
+8,
+51,255,255,11,0,4,
+25,
+51,255,255,77,3,0,0,128,63,
+25,
+51,255,255,77,3,0,0,128,63,
+25,
+51,255,255,77,3,0,0,0,0,
+25,
+51,255,255,77,3,0,0,0,0,
+56,124,0,0,
+56,125,0,0,1,0,13,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,102,5,2,
+56,124,0,0,
+56,125,0,0,1,0,14,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,117,5,2,
+56,124,0,0,
+56,125,0,0,1,0,15,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,130,5,3,
+25,
+51,255,255,77,3,0,0,0,0,
+56,124,0,0,
+56,125,0,0,1,0,16,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,144,5,3,
+25,
+51,255,255,77,3,0,0,128,63,
+56,124,0,0,
+56,125,0,0,1,0,17,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,144,5,3,
+25,
+51,255,255,77,3,0,0,128,191,
+56,124,0,0,
+56,125,0,0,1,0,18,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,157,5,2,
+56,124,0,0,
+56,125,0,0,1,0,19,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,175,5,2,
+56,124,0,0,
+56,125,0,0,1,0,20,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,130,5,3,
+25,
+51,255,255,77,3,0,0,128,63,
+56,124,0,0,
+56,125,0,0,1,0,21,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,192,5,2,
+56,124,0,0,
+56,125,0,0,1,0,22,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,209,5,2,
+56,124,0,0,
+56,125,0,0,1,0,23,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,226,5,2,
+56,124,0,0,
+56,125,0,0,1,0,24,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,242,5,2,
+56,124,0,0,
+56,125,0,0,1,0,25,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,1,6,3,
+8,
+51,255,255,12,6,2,
+25,
+51,255,255,77,3,0,0,0,0,
+25,
+51,255,255,77,3,0,0,128,63,
+56,124,0,0,
+56,125,0,0,1,0,26,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,1,6,3,
+13,
+51,255,255,12,6,1,
+25,
+51,255,255,77,3,0,0,128,63,
+56,124,0,0,
+56,125,0,0,1,0,27,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,1,6,3,
+13,
+51,255,255,12,6,1,
+25,
+51,255,255,77,3,0,0,0,0,
+56,124,0,0,
+56,125,0,0,1,0,28,0,0,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,255,255,1,6,3,
+8,
+51,255,255,12,6,2,
+25,
+51,255,255,77,3,0,0,128,63,
+25,
+51,255,255,77,3,0,0,0,0,
+56,124,0,0,
+56,125,0,0,1,1,
+44,
+13,
+51,255,255,11,0,1,
+25,
+51,255,255,77,3,0,0,0,0,1,
+29,130,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,126,0,3,
+56,127,0,0,
+56,128,0,0,
+56,129,0,0,1,
+29,134,0,
+2,
+52,1,0,0,0,0,1,
+44,
+27,
+51,255,255,11,0,126,0,3,
+56,132,0,0,
+9,
+51,255,255,11,0,1,
+56,133,0,0,
+56,131,0,0,1,
+29,137,0,
+2,
+52,1,2,0,
+54,195,0,
+17,102,0,
+51,255,255,11,0,2,
+54,196,0,
+17,17,0,
+51,255,255,11,0,2,2,0,
+1,0,
+0,0,3,
+55,195,0,
+51,255,255,11,0,0,
+1,
+1,
+1,
+27,
+51,255,255,11,0,255,255,22,5,1,
+56,135,0,0,2,
+25,
+51,255,255,77,3,0,0,127,67,3,
+25,
+51,255,255,77,3,0,0,128,67,0,
+25,
+51,255,255,77,3,0,0,0,59,
+55,196,0,
+51,255,255,11,0,0,
+8,
+51,255,255,11,0,4,
+50,
+27,
+51,255,255,11,0,255,255,182,3,2,
+56,136,0,0,
+9,
+51,255,255,109,0,1,
+8,
+51,255,255,12,6,2,
+50,
+56,195,0,0,1,0,
+25,
+51,255,255,77,3,0,0,192,62,1,0,
+50,
+27,
+51,255,255,11,0,255,255,182,3,2,
+56,136,0,0,
+9,
+51,255,255,109,0,1,
+8,
+51,255,255,12,6,2,
+50,
+56,195,0,0,1,1,
+25,
+51,255,255,77,3,0,0,32,63,1,0,
+50,
+27,
+51,255,255,11,0,255,255,182,3,2,
+56,136,0,0,
+9,
+51,255,255,109,0,1,
+8,
+51,255,255,12,6,2,
+50,
+56,195,0,0,1,2,
+25,
+51,255,255,77,3,0,0,96,63,1,0,
+25,
+51,255,255,77,3,0,0,128,63,
+44,
+1,
+56,196,0,0,2,
+50,
+27,
+51,255,255,11,0,255,255,182,3,2,
+56,136,0,0,
+9,
+51,255,255,109,0,1,
+8,
+51,255,255,12,6,2,
+50,
+56,195,0,0,1,3,
+25,
+51,255,255,77,3,0,0,0,62,1,0,1,
+29,139,0,
+2,
+52,1,1,0,
+54,197,0,
+17,18,6,
+51,255,255,77,3,2,1,0,
+0,0,3,
+55,197,0,
+51,255,255,77,3,0,
+1,
+25,
+51,255,255,77,3,0,0,128,63,1,
+50,
+56,138,0,0,1,3,
+22,
+1,
+56,197,0,1,15,
+1,
+27,
+51,255,255,77,3,255,255,25,6,1,
+1,
+1,
+42,1,
+56,197,0,0,2,
+56,197,0,0,2,
+25,
+51,255,255,77,3,0,0,128,64,1,
+25,
+51,255,255,77,3,188,116,147,60,
+44,
+13,
+51,255,255,11,0,1,
+56,197,0,0,1,
+21,};
+static constexpr size_t SKSL_INCLUDE_sksl_graphite_frag_LENGTH = sizeof(SKSL_INCLUDE_sksl_graphite_frag);
diff --git a/gfx/skia/skia/src/sksl/generated/sksl_graphite_frag.minified.sksl b/gfx/skia/skia/src/sksl/generated/sksl_graphite_frag.minified.sksl
new file mode 100644
index 0000000000..464e53c517
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/generated/sksl_graphite_frag.minified.sksl
@@ -0,0 +1,179 @@
+static constexpr char SKSL_MINIFIED_sksl_graphite_frag[] =
+"$pure half4 sk_error(){return half4(1.,0.,1.,1.);}$pure half4 sk_passthrough"
+"(half4 a){return a;}$pure half4 sk_solid_shader(float4 a){return half4(a);}"
+"$pure half4 $k(int a,half4 b){half4 c=b;switch(a){case 0:break;case 1:c=half4"
+"(b.xyz,1.);break;case 2:c=b.xxxx;break;case 3:c=half4(b.xxx,1.);break;case 4"
+":c=b.zyxw;break;}return c;}$pure half $l(int a,half b,half[7]c){half d=c[0]"
+";half e=c[1];half f=c[2];half g=c[3];half h=c[4];half i=c[5];half j=c[6];half"
+" k=sign(b);b=abs(b);switch(a){case 1:b=b<h?g*b+j:pow(e*b+f,d)+i;break;case 2"
+":b=pow(max(e+f*pow(b,g),0.)/(h+i*pow(b,g)),j);break;case 3:b=b*e<=1.?pow(b*"
+"e,f):exp((b-i)*g)+h;b*=j+1.;break;case 4:b/=j+1.;b=b<=1.?e*pow(b,f):g*log(b"
+"-h)+i;break;}return k*b;}$pure half4 sk_color_space_transform(half4 a,int b"
+",int c,half[7]d,half3x3 e,int f,half[7]g){if(bool(b&1)){a=unpremul(a);}if(bool"
+"(b&2)){a.x=$l(c,a.x,d);a.y=$l(c,a.y,d);a.z=$l(c,a.z,d);}if(bool(b&4)){a.xyz"
+"=e*a.xyz;}if(bool(b&8)){a.x=$l(f,a.x,g);a.y=$l(f,a.y,g);a.z=$l(f,a.z,g);}if"
+"(bool(b&16)){a.xyz*=a.w;}return a;}$pure float $m(int a,float b,float c,float"
+" d){switch(a){case 0:return clamp(b,c,d);case 1:{float e=d-c;return mod(b-c"
+",e)+c;}case 2:{float e=d-c;float g=2.*e;float h=mod(b-c,g);return mix(h,g-h"
+",step(e,h))+c;}default:return b;}}$pure half4 $n(float2 a,float2 b,float4 c"
+",int d,int e,int f,int g,sampler2D h){if(d==3&&f==0){float i=floor(a.x)+.5;"
+"if(i<c.x||i>c.z){return half4(0.);}}if(e==3&&f==0){float i=floor(a.y)+.5;if"
+"(i<c.y||i>c.w){return half4(0.);}}a.x=$m(d,a.x,c.x,c.z);a.y=$m(e,a.y,c.y,c."
+"w);float4 i;if(f==0){i=float4(floor(c.xy)+.5,ceil(c.zw)-.5);}else{i=float4("
+"c.xy+.5,c.zw-.5);}float2 j=clamp(a,i.xy,i.zw);half4 k=sample(h,j/b);k=$k(g,"
+"k);if(f==1){half2 l=half2(a-j);half2 m=abs(l);bool n=d==1;bool o=e==1;if(n||"
+"o){float p;float q;half4 r;half4 t;if(n){p=l.x>0.?i.x:i.z;r=sample(h,float2"
+"(p,j.y)/b);r=$k(g,r);}if(o){q=l.y>0.?i.y:i.w;t=sample(h,float2(j.x,q)/b);t="
+"$k(g,t);}if(n&&o){half4 u=sample(h,float2(p,q)/b);u=$k(g,u);k=mix(mix(k,r,m"
+".x),mix(t,u,m.x),m.y);}else if(n){k=mix(k,r,m.x);}else if(o){k=mix(k,t,m.y)"
+";}}if(d==3){k*=max(1.-m.x,0.);}if(e==3){k*=max(1.-m.y,0.);}}return k;}$pure"
+" half4 $o(float2 a,float2 b,float4 c,int d,int e,float4x4 g,int h,sampler2D"
+" i){float2 j=fract(a-.5);a-=1.5;a=floor(a)+.5;float4 k=g*float4(1.,j.x,j.x*"
+"j.x,(j.x*j.x)*j.x);float4 l=g*float4(1.,j.y,j.y*j.y,(j.y*j.y)*j.y);float4 m"
+"=float4(0.);for(int n=0;n<4;++n){float4 o=float4(0.);for(int p=0;p<4;++p){o"
+"+=k[p]*float4($n(a+float2(float(p),float(n)),b,c,d,e,0,h,i));}m+=l[n]*o;}return"
+" half4(m);}$pure half4 sk_image_shader(float2 a,float2 b,float4 c,int d,int"
+" e,int f,int g,float4x4 h,int i,int j,int k,half[7]l,half3x3 m,int n,half[7"
+"]o,sampler2D p){half4 q=g!=0?$o(a,b,c,d,e,h,i,p):$n(a,b,c,d,e,f,i,p);return"
+" sk_color_space_transform(q,j,k,l,m,n,o);}$pure half4 sk_dither_shader(half4"
+" a,float2 b,float c,sampler2D d){half2 f=half2(half(b.x*.125),half(b.y*.125"
+"));half g=sample(d,float2(f)).x-.5;return half4(half3(clamp(float3(a.xyz)+float"
+"(g)*c,0.,float(a.w))),a.w);}$pure float2 $p(int a,float2 b){switch(a){case 0"
+":b.x=clamp(b.x,0.,1.);break;case 1:b.x=fract(b.x);break;case 2:{float c=b.x"
+"-1.;b.x=(c-2.*floor(c*.5))-1.;if(sk_Caps.mustDoOpBetweenFloorAndAbs){b.x=clamp"
+"(b.x,-1.,1.);}b.x=abs(b.x);break;}case 3:if(b.x<0.||b.x>1.){return float2(0."
+",-1.);}break;}return b;}$pure half4 $q(float4[4]a,float[4]b,float2 c){if(c."
+"y<0.){return half4(0.);}else if(c.x<=b[0]){return half4(a[0]);}else if(c.x<"
+"b[1]){return half4(mix(a[0],a[1],(c.x-b[0])/(b[1]-b[0])));}else if(c.x<b[2]"
+"){return half4(mix(a[1],a[2],(c.x-b[1])/(b[2]-b[1])));}else if(c.x<b[3]){return"
+" half4(mix(a[2],a[3],(c.x-b[2])/(b[3]-b[2])));}else{return half4(a[3]);}}$pure"
+" half4 $r(float4[8]a,float[8]b,float2 c){if(c.y<0.){return half4(0.);}else if"
+"(c.x<b[4]){if(c.x<b[2]){if(c.x<=b[0]){return half4(a[0]);}else if(c.x<b[1])"
+"{return half4(mix(a[0],a[1],(c.x-b[0])/(b[1]-b[0])));}else{return half4(mix"
+"(a[1],a[2],(c.x-b[1])/(b[2]-b[1])));}}else{if(c.x<b[3]){return half4(mix(a["
+"2],a[3],(c.x-b[2])/(b[3]-b[2])));}else{return half4(mix(a[3],a[4],(c.x-b[3]"
+")/(b[4]-b[3])));}}}else{if(c.x<b[6]){if(c.x<b[5]){return half4(mix(a[4],a[5"
+"],(c.x-b[4])/(b[5]-b[4])));}else{return half4(mix(a[5],a[6],(c.x-b[5])/(b[6"
+"]-b[5])));}}else{if(c.x<b[7]){return half4(mix(a[6],a[7],(c.x-b[6])/(b[7]-b"
+"[6])));}else{return half4(a[7]);}}}}half4 $s(sampler2D a,int b,float2 c){if"
+"(c.y<0.){return half4(0.);}else if(c.x==0.){return sampleLod(a,float2(0.,.25"
+"),0.);}else if(c.x==1.){return sampleLod(a,float2(1.,.25),0.);}else{int f=0"
+";int g=b;for(int h=1;h<b;h<<=1){int i=(f+g)/2;float j=(float(i)+.5)/float(b"
+");float2 k=float2(sampleLod(a,float2(j,.75),0.).xy);float l=ldexp(k.x,int(k"
+".y));if(c.x<l){g=i;}else{f=i;}}float h=(float(f)+.5)/float(b);float i=(float"
+"(f+1)+.5)/float(b);half4 j=sampleLod(a,float2(h,.25),0.);half4 k=sampleLod("
+"a,float2(i,.25),0.);float2 l=float2(sampleLod(a,float2(h,.75),0.).xy);float"
+" m=ldexp(l.x,int(l.y));l=float2(sampleLod(a,float2(i,.75),0.).xy);float n=ldexp"
+"(l.x,int(l.y));return half4(mix(float4(j),float4(k),(c.x-m)/(n-m)));}}$pure"
+" float2 $t(float2 a,float2 b,float2 c){c-=a;float2 d=b-a;float e=dot(c,d)/dot"
+"(d,d);return float2(e,1.);}$pure float2 $u(float2 a,float b,float2 c){float"
+" d=distance(c,a)/b;return float2(d,1.);}$pure float2 $v(float2 a,float b,float"
+" c,float2 d){d-=a;float e=sk_Caps.atan2ImplementedAsAtanYOverX?2.*atan(-d.y"
+",length(d)-d.x):atan(-d.y,-d.x);float f=((e*.159154937+.5)+b)*c;return float2"
+"(f,1.);}$pure float3x3 $w(float2 a,float2 b){return float3x3(0.,-1.,0.,1.,0."
+",0.,0.,0.,1.)*inverse(float3x3(b.y-a.y,a.x-b.x,0.,b.x-a.x,b.y-a.y,0.,a.x,a."
+"y,1.));}$pure float2 $x(float2 a,float2 b,float c,float d,float2 e){const float"
+" f=.000244140625;float g=distance(a,b);float h=d-c;bool i=g<f;bool j=abs(h)"
+"<f;if(i){if(j){return float2(0.,-1.);}float k=1./h;float l=sign(h);float m="
+"c/h;float2 n=(e-a)*k;float o=length(n)*l-m;return float2(o,1.);}else if(j){"
+"float3x3 k=$w(a,b);float l=c/g;float m=l*l;float2 n=(k*float3(e,1.)).xy;float"
+" o=m-n.y*n.y;if(o<0.){return float2(0.,-1.);}o=n.x+sqrt(o);return float2(o,"
+"1.);}else{float k=c/(c-d);bool l=abs(k-1.)<f;if(l){float2 m=a;a=b;b=m;k=0.;"
+"}float2 m=a*(1.-k)+b*k;float3x3 n=$w(m,b);float o=abs(1.-k);float p=o;float"
+" q=abs(d-c)/g;bool r=abs(q-1.)<f;if(r){o*=.5;p*=.5;}else{o*=q/(q*q-1.);p/=sqrt"
+"(abs(q*q-1.));}n=float3x3(o,0.,0.,0.,p,0.,0.,0.,1.)*n;float2 s=(n*float3(e,"
+"1.)).xy;float u=1./q;float v=sign(1.-k);bool w=!r&&q>1.;float x=-1.;if(r){x"
+"=dot(s,s)/s.x;}else if(w){x=length(s)-s.x*u;}else{float y=s.x*s.x-s.y*s.y;if"
+"(y>=0.){if(l||v<0.){x=-sqrt(y)-s.x*u;}else{x=sqrt(y)-s.x*u;}}}if(!w&&x<0.){"
+"return float2(0.,-1.);}float y=k+v*x;if(l){y=1.-y;}return float2(y,1.);}}$pure"
+" half4 sk_linear_grad_4_shader(float2 a,float4[4]b,float[4]c,float2 d,float2"
+" e,int f,int g,int h){float2 i=$t(d,e,a);i=$p(f,i);half4 j=$q(b,c,i);return"
+" $interpolated_to_rgb_unpremul(j,g,h);}$pure half4 sk_linear_grad_8_shader("
+"float2 a,float4[8]b,float[8]c,float2 d,float2 e,int f,int g,int h){float2 i"
+"=$t(d,e,a);i=$p(f,i);half4 j=$r(b,c,i);return $interpolated_to_rgb_unpremul"
+"(j,g,h);}$pure half4 sk_linear_grad_tex_shader(float2 a,float2 b,float2 c,int"
+" d,int e,int f,int g,sampler2D h){float2 i=$t(b,c,a);i=$p(e,i);half4 j=$s(h"
+",d,i);return $interpolated_to_rgb_unpremul(j,f,g);}$pure half4 sk_radial_grad_4_shader"
+"(float2 a,float4[4]b,float[4]c,float2 d,float e,int f,int g,int h){float2 i"
+"=$u(d,e,a);i=$p(f,i);half4 j=$q(b,c,i);return $interpolated_to_rgb_unpremul"
+"(j,g,h);}$pure half4 sk_radial_grad_8_shader(float2 a,float4[8]b,float[8]c,"
+"float2 d,float e,int f,int g,int h){float2 i=$u(d,e,a);i=$p(f,i);half4 j=$r"
+"(b,c,i);return $interpolated_to_rgb_unpremul(j,g,h);}$pure half4 sk_radial_grad_tex_shader"
+"(float2 a,float2 b,float c,int d,int e,int f,int g,sampler2D h){float2 i=$u"
+"(b,c,a);i=$p(e,i);half4 j=$s(h,d,i);return $interpolated_to_rgb_unpremul(j,"
+"f,g);}$pure half4 sk_sweep_grad_4_shader(float2 a,float4[4]b,float[4]c,float2"
+" d,float e,float f,int g,int h,int i){float2 j=$v(d,e,f,a);j=$p(g,j);half4 k"
+"=$q(b,c,j);return $interpolated_to_rgb_unpremul(k,h,i);}$pure half4 sk_sweep_grad_8_shader"
+"(float2 a,float4[8]b,float[8]c,float2 d,float e,float f,int g,int h,int i){"
+"float2 j=$v(d,e,f,a);j=$p(g,j);half4 k=$r(b,c,j);return $interpolated_to_rgb_unpremul"
+"(k,h,i);}$pure half4 sk_sweep_grad_tex_shader(float2 a,float2 b,float c,float"
+" d,int e,int f,int g,int h,sampler2D i){float2 j=$v(b,c,d,a);j=$p(f,j);half4"
+" k=$s(i,e,j);return $interpolated_to_rgb_unpremul(k,g,h);}$pure half4 sk_conical_grad_4_shader"
+"(float2 a,float4[4]b,float[4]c,float2 d,float2 e,float f,float g,int h,int i"
+",int j){float2 k=$x(d,e,f,g,a);k=$p(h,k);half4 l=$q(b,c,k);return $interpolated_to_rgb_unpremul"
+"(l,i,j);}$pure half4 sk_conical_grad_8_shader(float2 a,float4[8]b,float[8]c"
+",float2 d,float2 e,float f,float g,int h,int i,int j){float2 k=$x(d,e,f,g,a"
+");k=$p(h,k);half4 l=$r(b,c,k);return $interpolated_to_rgb_unpremul(l,i,j);}"
+"$pure half4 sk_conical_grad_tex_shader(float2 a,float2 b,float2 c,float d,float"
+" e,int f,int g,int h,int i,sampler2D j){float2 k=$x(b,c,d,e,a);k=$p(g,k);half4"
+" l=$s(j,f,k);return $interpolated_to_rgb_unpremul(l,h,i);}$pure half4 sk_matrix_colorfilter"
+"(half4 a,float4x4 b,float4 c,int d){if(bool(d)){a=$rgb_to_hsl(a.xyz,a.w);}else"
+"{a=unpremul(a);}half4 e=half4(b*float4(a)+c);if(bool(d)){e=$hsl_to_rgb(e.xyz"
+",e.w);}else{e=saturate(e);e.xyz*=e.w;}return e;}$pure half4 noise_helper(half2"
+" a,half2 b,int c,sampler2D d){half4 f;f.xy=floor(a);f.zw=f.xy+half2(1.);if("
+"bool(c)){if(f.x>=b.x){f.x-=b.x;}if(f.y>=b.y){f.y-=b.y;}if(f.z>=b.x){f.z-=b."
+"x;}if(f.w>=b.y){f.w-=b.y;}}half g=sample(d,float2(half2(f.x*.00390625,.5)))"
+".x;half h=sample(d,float2(half2(f.z*.00390625,.5))).x;half2 i=half2(g,h);i="
+"floor(i*half2(255.)+half2(.5))*half2(.003921569);half4 k=256.*i.xyxy+f.yyww"
+";k*=half4(.00390625);return k;}$pure half4 noise_function(half2 a,half4 b,sampler2D"
+" c){half2 d=fract(a);half2 e=(d*d)*(half2(3.)-2.*d);const half f=.00390625;"
+"half4 g;for(int h=0;h<4;h++){half i=(half(h)+.5)*.25;half4 j=sample(c,float2"
+"(half2(b.x,i)));half4 k=sample(c,float2(half2(b.y,i)));half4 l=sample(c,float2"
+"(half2(b.w,i)));half4 m=sample(c,float2(half2(b.z,i)));half2 n;half2 o=d;n."
+"x=dot((j.yw+j.xz*f)*2.-half2(1.),o);o.x-=1.;n.y=dot((k.yw+k.xz*f)*2.-half2("
+"1.),o);half2 p;p.x=mix(n.x,n.y,e.x);o.y-=1.;n.y=dot((l.yw+l.xz*f)*2.-half2("
+"1.),o);o.x+=1.;n.x=dot((m.yw+m.xz*f)*2.-half2(1.),o);p.y=mix(n.x,n.y,e.x);g"
+"[h]=mix(p.x,p.y,e.y);}return g;}$pure half4 perlin_noise_shader(float2 a,float2"
+" b,float2 c,int d,int e,int f,sampler2D g,sampler2D h){half2 k=half2(floor("
+"a)*b);half4 l=half4(0.);half2 m=half2(c);half n=1.;for(int o=0;o<e;++o){half4"
+" p=noise_helper(k,m,f,g);half4 q=noise_function(k,p,h);if(d!=0){q=abs(q);}q"
+"*=n;l+=q;k*=half2(2.);n*=.5;m*=half2(2.);}if(d==0){l=l*half4(.5)+half4(.5);"
+"}l=saturate(l);return half4(l.xyz*l.www,l.w);}$pure half4 sk_blend(int a,half4"
+" b,half4 c){switch(a){case 0:{return blend_clear(b,c);}case 1:{return blend_src"
+"(b,c);}case 2:{return blend_dst(b,c);}case 3:{return blend_porter_duff(half4"
+"(1.,0.,0.,-1.),b,c);}case 4:{return blend_porter_duff(half4(0.,1.,-1.,0.),b"
+",c);}case 5:{return blend_porter_duff(half4(0.,0.,1.,0.),b,c);}case 6:{return"
+" blend_porter_duff(half4(0.,0.,0.,1.),b,c);}case 7:{return blend_porter_duff"
+"(half4(0.,0.,-1.,0.),b,c);}case 8:{return blend_porter_duff(half4(0.,0.,0.,"
+"-1.),b,c);}case 9:{return blend_porter_duff(half4(0.,0.,1.,-1.),b,c);}case 10"
+":{return blend_porter_duff(half4(0.,0.,-1.,1.),b,c);}case 11:{return blend_porter_duff"
+"(half4(0.,0.,-1.,-1.),b,c);}case 12:{return blend_porter_duff(half4(1.,1.,0."
+",0.),b,c);}case 13:{return blend_modulate(b,c);}case 14:{return blend_screen"
+"(b,c);}case 15:{return blend_overlay(0.,b,c);}case 16:{return blend_darken("
+"1.,b,c);}case 17:{return blend_darken(-1.,b,c);}case 18:{return blend_color_dodge"
+"(b,c);}case 19:{return blend_color_burn(b,c);}case 20:{return blend_overlay"
+"(1.,b,c);}case 21:{return blend_soft_light(b,c);}case 22:{return blend_difference"
+"(b,c);}case 23:{return blend_exclusion(b,c);}case 24:{return blend_multiply"
+"(b,c);}case 25:{return blend_hslc(half2(0.,1.),b,c);}case 26:{return blend_hslc"
+"(half2(1.),b,c);}case 27:{return blend_hslc(half2(0.),b,c);}case 28:{return"
+" blend_hslc(half2(1.,0.),b,c);}default:return half4(0.);}}$pure half4 sk_blend_shader"
+"(int a,half4 b,half4 c){return sk_blend(a,c,b);}$pure half4 porter_duff_blend_shader"
+"(half4 a,half4 b,half4 c){return blend_porter_duff(a,c,b);}$pure half4 sk_blend_colorfilter"
+"(half4 a,int b,float4 c){return sk_blend(b,half4(c),a);}$pure half4 sk_table_colorfilter"
+"(half4 a,sampler2D b){half4 c=(unpremul(a)*255.)*.00390625+.001953125;half4"
+" d=half4(sample(b,float2(half2(c.x,.375))).x,sample(b,float2(half2(c.y,.625"
+"))).x,sample(b,float2(half2(c.z,.875))).x,1.);return d*sample(b,float2(half2"
+"(c.w,.125))).x;}$pure half4 sk_gaussian_colorfilter(half4 a){half b=1.-a.w;"
+"b=exp((-b*b)*4.)-.018;return half4(b);}$pure float inverse_grad_len(float2 a"
+",float2x2 b){float2 c=a*b;return inversesqrt(dot(c,c));}$pure float2 elliptical_distance"
+"(float2 a,float2 b,float c,float2x2 d){float2 e=1./(b*b+c*c);float2 g=e*a;float"
+" h=inverse_grad_len(g,d);float i=(.5*h)*(dot(a,g)-1.);float j=((b.x*c)*e.x)"
+"*h;return float2(j-i,j+i);}void corner_distance(inout float2 a,float2x2 b,float2"
+" c,float2 d,float2 e,float2 f){float2 g=f-d;if(g.x>0.&&g.y>0.){if(f.x>0.&&f"
+".y>0.||c.x>0.&&c.y<0.){float2 h=elliptical_distance(g*e,f,c.x,b);if(f.x-c.x"
+"<=0.){h.y=1.;}else{h.y*=-1.;}a=min(a,h);}else if(c.y==0.){float h=((c.x-g.x"
+")-g.y)*inverse_grad_len(e,b);a.x=min(a.x,h);}}}void corner_distances(inout float2"
+" a,float2x2 b,float2 c,float4 e,float4 f,float4 g){corner_distance(a,b,c,e."
+"xy,float2(-1.),float2(f.x,g.x));corner_distance(a,b,c,e.zy,float2(1.,-1.),float2"
+"(f.y,g.y));corner_distance(a,b,c,e.zw,float2(1.),float2(f.z,g.z));corner_distance"
+"(a,b,c,e.xw,float2(-1.,1.),float2(f.w,g.w));}";
diff --git a/gfx/skia/skia/src/sksl/generated/sksl_graphite_frag.unoptimized.sksl b/gfx/skia/skia/src/sksl/generated/sksl_graphite_frag.unoptimized.sksl
new file mode 100644
index 0000000000..3be44de02c
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/generated/sksl_graphite_frag.unoptimized.sksl
@@ -0,0 +1,314 @@
+static constexpr char SKSL_MINIFIED_sksl_graphite_frag[] =
+"const int $kTileModeClamp=0;const int $kTileModeRepeat=1;const int $kTileModeMirror"
+"=2;const int $kTileModeDecal=3;const int $kReadSwizzleNormalRGBA=0;const int"
+" $kReadSwizzleRGB1=1;const int $kReadSwizzleRRRR=2;const int $kReadSwizzleRRR1"
+"=3;const int $kReadSwizzleBGRA=4;const int $kFilterModeNearest=0;const int $kFilterModeLinear"
+"=1;const int $kTFTypeSRGB=1;const int $kTFTypePQ=2;const int $kTFTypeHLG=3;"
+"const int $kTFTypeHLGinv=4;const int $kColorSpaceXformFlagUnpremul=1;const int"
+" $kColorSpaceXformFlagLinearize=2;const int $kColorSpaceXformFlagGamutTransform"
+"=4;const int $kColorSpaceXformFlagEncode=8;const int $kColorSpaceXformFlagPremul"
+"=16;$pure half4 sk_error(){return half4(1.,0.,1.,1.);}$pure half4 sk_passthrough"
+"(half4 color){return color;}$pure half4 sk_solid_shader(float4 colorParam){"
+"return half4(colorParam);}$pure half4 $apply_swizzle(int swizzleType,half4 color"
+"){half4 resultantColor=color;switch(swizzleType){case 0:break;case 1:resultantColor"
+"=half4(color.xyz,1.);break;case 2:resultantColor=color.xxxx;break;case 3:resultantColor"
+"=half4(color.xxx,1.);break;case 4:resultantColor=color.zyxw;break;}return resultantColor"
+";}$pure half $apply_xfer_fn(int kind,half x,half[7]cs){half G=cs[0];half A="
+"cs[1];half B=cs[2];half C=cs[3];half D=cs[4];half E=cs[5];half F=cs[6];half"
+" s=sign(x);x=abs(x);switch(kind){case 1:x=x<D?C*x+F:pow(A*x+B,G)+E;break;case"
+" 2:x=pow(max(A+B*pow(x,C),0.)/(D+E*pow(x,C)),F);break;case 3:x=x*A<=1.?pow("
+"x*A,B):exp((x-E)*C)+D;x*=F+1.;break;case 4:x/=F+1.;x=x<=1.?A*pow(x,B):C*log"
+"(x-D)+E;break;}return s*x;}$pure half4 sk_color_space_transform(half4 color"
+",int flags,int srcKind,half[7]srcCoeffs,half3x3 gamutTransform,int dstKind,"
+"half[7]dstCoeffs){if(bool(flags&$kColorSpaceXformFlagUnpremul)){color=unpremul"
+"(color);}if(bool(flags&$kColorSpaceXformFlagLinearize)){color.x=$apply_xfer_fn"
+"(srcKind,color.x,srcCoeffs);color.y=$apply_xfer_fn(srcKind,color.y,srcCoeffs"
+");color.z=$apply_xfer_fn(srcKind,color.z,srcCoeffs);}if(bool(flags&$kColorSpaceXformFlagGamutTransform"
+")){color.xyz=gamutTransform*color.xyz;}if(bool(flags&$kColorSpaceXformFlagEncode"
+")){color.x=$apply_xfer_fn(dstKind,color.x,dstCoeffs);color.y=$apply_xfer_fn"
+"(dstKind,color.y,dstCoeffs);color.z=$apply_xfer_fn(dstKind,color.z,dstCoeffs"
+");}if(bool(flags&$kColorSpaceXformFlagPremul)){color.xyz*=color.w;}return color"
+";}$pure float $tile(int tileMode,float f,float low,float high){switch(tileMode"
+"){case 0:return clamp(f,low,high);case 1:{float length=high-low;return mod("
+"f-low,length)+low;}case 2:{float length=high-low;float length2=2.*length;float"
+" tmp=mod(f-low,length2);return mix(tmp,length2-tmp,step(length,tmp))+low;}default"
+":return f;}}$pure half4 $sample_image(float2 pos,float2 imgSize,float4 subset"
+",int tileModeX,int tileModeY,int filterMode,int readSwizzle,sampler2D s){if"
+"(tileModeX==$kTileModeDecal&&filterMode==$kFilterModeNearest){float snappedX"
+"=floor(pos.x)+.5;if(snappedX<subset.x||snappedX>subset.z){return half4(0.);"
+"}}if(tileModeY==$kTileModeDecal&&filterMode==$kFilterModeNearest){float snappedY"
+"=floor(pos.y)+.5;if(snappedY<subset.y||snappedY>subset.w){return half4(0.);"
+"}}pos.x=$tile(tileModeX,pos.x,subset.x,subset.z);pos.y=$tile(tileModeY,pos."
+"y,subset.y,subset.w);float4 insetClamp;if(filterMode==$kFilterModeNearest){"
+"insetClamp=float4(floor(subset.xy)+.5,ceil(subset.zw)-.5);}else{insetClamp="
+"float4(subset.xy+.5,subset.zw-.5);}float2 clampedPos=clamp(pos,insetClamp.xy"
+",insetClamp.zw);half4 color=sample(s,clampedPos/imgSize);color=$apply_swizzle"
+"(readSwizzle,color);if(filterMode==$kFilterModeLinear){half2 error=half2(pos"
+"-clampedPos);half2 absError=abs(error);bool sampleExtraX=tileModeX==$kTileModeRepeat"
+";bool sampleExtraY=tileModeY==$kTileModeRepeat;if(sampleExtraX||sampleExtraY"
+"){float extraCoordX;float extraCoordY;half4 extraColorX;half4 extraColorY;if"
+"(sampleExtraX){extraCoordX=error.x>0.?insetClamp.x:insetClamp.z;extraColorX"
+"=sample(s,float2(extraCoordX,clampedPos.y)/imgSize);extraColorX=$apply_swizzle"
+"(readSwizzle,extraColorX);}if(sampleExtraY){extraCoordY=error.y>0.?insetClamp"
+".y:insetClamp.w;extraColorY=sample(s,float2(clampedPos.x,extraCoordY)/imgSize"
+");extraColorY=$apply_swizzle(readSwizzle,extraColorY);}if(sampleExtraX&&sampleExtraY"
+"){half4 extraColorXY=sample(s,float2(extraCoordX,extraCoordY)/imgSize);extraColorXY"
+"=$apply_swizzle(readSwizzle,extraColorXY);color=mix(mix(color,extraColorX,absError"
+".x),mix(extraColorY,extraColorXY,absError.x),absError.y);}else if(sampleExtraX"
+"){color=mix(color,extraColorX,absError.x);}else if(sampleExtraY){color=mix("
+"color,extraColorY,absError.y);}}if(tileModeX==$kTileModeDecal){color*=max(1."
+"-absError.x,0.);}if(tileModeY==$kTileModeDecal){color*=max(1.-absError.y,0."
+");}}return color;}$pure half4 $cubic_filter_image(float2 pos,float2 imgSize"
+",float4 subset,int tileModeX,int tileModeY,float4x4 coeffs,int readSwizzle,"
+"sampler2D s){float2 f=fract(pos-.5);pos-=1.5;pos=floor(pos)+.5;float4 wx=coeffs"
+"*float4(1.,f.x,f.x*f.x,(f.x*f.x)*f.x);float4 wy=coeffs*float4(1.,f.y,f.y*f."
+"y,(f.y*f.y)*f.y);float4 color=float4(0.);for(int y=0;y<4;++y){float4 rowColor"
+"=float4(0.);for(int x=0;x<4;++x){rowColor+=wx[x]*float4($sample_image(pos+float2"
+"(float(x),float(y)),imgSize,subset,tileModeX,tileModeY,$kFilterModeNearest,"
+"readSwizzle,s));}color+=wy[y]*rowColor;}return half4(color);}$pure half4 sk_image_shader"
+"(float2 coords,float2 imgSize,float4 subset,int tileModeX,int tileModeY,int"
+" filterMode,int useCubic,float4x4 cubicCoeffs,int readSwizzle,int csXformFlags"
+",int csXformSrcKind,half[7]csXformSrcCoeffs,half3x3 csXformGamutTransform,int"
+" csXformDstKind,half[7]csXformDstCoeffs,sampler2D s){half4 sampleColor=useCubic"
+"!=0?$cubic_filter_image(coords,imgSize,subset,tileModeX,tileModeY,cubicCoeffs"
+",readSwizzle,s):$sample_image(coords,imgSize,subset,tileModeX,tileModeY,filterMode"
+",readSwizzle,s);return sk_color_space_transform(sampleColor,csXformFlags,csXformSrcKind"
+",csXformSrcCoeffs,csXformGamutTransform,csXformDstKind,csXformDstCoeffs);}$pure"
+" half4 sk_dither_shader(half4 colorIn,float2 coords,float range,sampler2D lut"
+"){const float kImgSize=8.;half2 lutCoords=half2(half(coords.x*.125),half(coords"
+".y*.125));half value=sample(lut,float2(lutCoords)).x-.5;return half4(half3("
+"clamp(float3(colorIn.xyz)+float(value)*range,0.,float(colorIn.w))),colorIn."
+"w);}$pure float2 $tile_grad(int tileMode,float2 t){switch(tileMode){case 0:"
+"t.x=clamp(t.x,0.,1.);break;case 1:t.x=fract(t.x);break;case 2:{float t_1=t."
+"x-1.;t.x=(t_1-2.*floor(t_1*.5))-1.;if(sk_Caps.mustDoOpBetweenFloorAndAbs){t"
+".x=clamp(t.x,-1.,1.);}t.x=abs(t.x);break;}case 3:if(t.x<0.||t.x>1.){return float2"
+"(0.,-1.);}break;}return t;}$pure half4 $colorize_grad_4(float4[4]colorsParam"
+",float[4]offsetsParam,float2 t){if(t.y<0.){return half4(0.);}else if(t.x<=offsetsParam"
+"[0]){return half4(colorsParam[0]);}else if(t.x<offsetsParam[1]){return half4"
+"(mix(colorsParam[0],colorsParam[1],(t.x-offsetsParam[0])/(offsetsParam[1]-offsetsParam"
+"[0])));}else if(t.x<offsetsParam[2]){return half4(mix(colorsParam[1],colorsParam"
+"[2],(t.x-offsetsParam[1])/(offsetsParam[2]-offsetsParam[1])));}else if(t.x<"
+"offsetsParam[3]){return half4(mix(colorsParam[2],colorsParam[3],(t.x-offsetsParam"
+"[2])/(offsetsParam[3]-offsetsParam[2])));}else{return half4(colorsParam[3])"
+";}}$pure half4 $colorize_grad_8(float4[8]colorsParam,float[8]offsetsParam,float2"
+" t){if(t.y<0.){return half4(0.);}else if(t.x<offsetsParam[4]){if(t.x<offsetsParam"
+"[2]){if(t.x<=offsetsParam[0]){return half4(colorsParam[0]);}else if(t.x<offsetsParam"
+"[1]){return half4(mix(colorsParam[0],colorsParam[1],(t.x-offsetsParam[0])/("
+"offsetsParam[1]-offsetsParam[0])));}else{return half4(mix(colorsParam[1],colorsParam"
+"[2],(t.x-offsetsParam[1])/(offsetsParam[2]-offsetsParam[1])));}}else{if(t.x"
+"<offsetsParam[3]){return half4(mix(colorsParam[2],colorsParam[3],(t.x-offsetsParam"
+"[2])/(offsetsParam[3]-offsetsParam[2])));}else{return half4(mix(colorsParam"
+"[3],colorsParam[4],(t.x-offsetsParam[3])/(offsetsParam[4]-offsetsParam[3]))"
+");}}}else{if(t.x<offsetsParam[6]){if(t.x<offsetsParam[5]){return half4(mix("
+"colorsParam[4],colorsParam[5],(t.x-offsetsParam[4])/(offsetsParam[5]-offsetsParam"
+"[4])));}else{return half4(mix(colorsParam[5],colorsParam[6],(t.x-offsetsParam"
+"[5])/(offsetsParam[6]-offsetsParam[5])));}}else{if(t.x<offsetsParam[7]){return"
+" half4(mix(colorsParam[6],colorsParam[7],(t.x-offsetsParam[6])/(offsetsParam"
+"[7]-offsetsParam[6])));}else{return half4(colorsParam[7]);}}}}half4 $colorize_grad_tex"
+"(sampler2D colorsAndOffsetsSampler,int numStops,float2 t){const float kColorCoord"
+"=.25;const float kOffsetCoord=.75;if(t.y<0.){return half4(0.);}else if(t.x=="
+"0.){return sampleLod(colorsAndOffsetsSampler,float2(0.,.25),0.);}else if(t."
+"x==1.){return sampleLod(colorsAndOffsetsSampler,float2(1.,.25),0.);}else{int"
+" low=0;int high=numStops;for(int loop=1;loop<numStops;loop<<=1){int mid=(low"
+"+high)/2;float midFlt=(float(mid)+.5)/float(numStops);float2 tmp=float2(sampleLod"
+"(colorsAndOffsetsSampler,float2(midFlt,.75),0.).xy);float offset=ldexp(tmp."
+"x,int(tmp.y));if(t.x<offset){high=mid;}else{low=mid;}}float lowFlt=(float(low"
+")+.5)/float(numStops);float highFlt=(float(low+1)+.5)/float(numStops);half4"
+" color0=sampleLod(colorsAndOffsetsSampler,float2(lowFlt,.25),0.);half4 color1"
+"=sampleLod(colorsAndOffsetsSampler,float2(highFlt,.25),0.);float2 tmp=float2"
+"(sampleLod(colorsAndOffsetsSampler,float2(lowFlt,.75),0.).xy);float offset0"
+"=ldexp(tmp.x,int(tmp.y));tmp=float2(sampleLod(colorsAndOffsetsSampler,float2"
+"(highFlt,.75),0.).xy);float offset1=ldexp(tmp.x,int(tmp.y));return half4(mix"
+"(float4(color0),float4(color1),(t.x-offset0)/(offset1-offset0)));}}$pure float2"
+" $linear_grad_layout(float2 point0Param,float2 point1Param,float2 pos){pos-="
+"point0Param;float2 delta=point1Param-point0Param;float t=dot(pos,delta)/dot"
+"(delta,delta);return float2(t,1.);}$pure float2 $radial_grad_layout(float2 centerParam"
+",float radiusParam,float2 pos){float t=distance(pos,centerParam)/radiusParam"
+";return float2(t,1.);}$pure float2 $sweep_grad_layout(float2 centerParam,float"
+" biasParam,float scaleParam,float2 pos){pos-=centerParam;float angle=sk_Caps"
+".atan2ImplementedAsAtanYOverX?2.*atan(-pos.y,length(pos)-pos.x):atan(-pos.y"
+",-pos.x);float t=((angle*.159154937+.5)+biasParam)*scaleParam;return float2"
+"(t,1.);}$pure float3x3 $map_to_unit_x(float2 p0,float2 p1){return float3x3("
+"0.,-1.,0.,1.,0.,0.,0.,0.,1.)*inverse(float3x3(p1.y-p0.y,p0.x-p1.x,0.,p1.x-p0"
+".x,p1.y-p0.y,0.,p0.x,p0.y,1.));}$pure float2 $conical_grad_layout(float2 point0Param"
+",float2 point1Param,float radius0Param,float radius1Param,float2 pos){const"
+" float SK_ScalarNearlyZero=.000244140625;float dCenter=distance(point0Param"
+",point1Param);float dRadius=radius1Param-radius0Param;bool radial=dCenter<SK_ScalarNearlyZero"
+";bool strip=abs(dRadius)<SK_ScalarNearlyZero;if(radial){if(strip){return float2"
+"(0.,-1.);}float scale=1./dRadius;float scaleSign=sign(dRadius);float bias=radius0Param"
+"/dRadius;float2 pt=(pos-point0Param)*scale;float t=length(pt)*scaleSign-bias"
+";return float2(t,1.);}else if(strip){float3x3 transform=$map_to_unit_x(point0Param"
+",point1Param);float r=radius0Param/dCenter;float r_2=r*r;float2 pt=(transform"
+"*float3(pos,1.)).xy;float t=r_2-pt.y*pt.y;if(t<0.){return float2(0.,-1.);}t"
+"=pt.x+sqrt(t);return float2(t,1.);}else{float f=radius0Param/(radius0Param-"
+"radius1Param);bool isSwapped=abs(f-1.)<SK_ScalarNearlyZero;if(isSwapped){float2"
+" tmpPt=point0Param;point0Param=point1Param;point1Param=tmpPt;f=0.;}float2 Cf"
+"=point0Param*(1.-f)+point1Param*f;float3x3 transform=$map_to_unit_x(Cf,point1Param"
+");float scaleX=abs(1.-f);float scaleY=scaleX;float r1=abs(radius1Param-radius0Param"
+")/dCenter;bool isFocalOnCircle=abs(r1-1.)<SK_ScalarNearlyZero;if(isFocalOnCircle"
+"){scaleX*=.5;scaleY*=.5;}else{scaleX*=r1/(r1*r1-1.);scaleY/=sqrt(abs(r1*r1-"
+"1.));}transform=float3x3(scaleX,0.,0.,0.,scaleY,0.,0.,0.,1.)*transform;float2"
+" pt=(transform*float3(pos,1.)).xy;float invR1=1./r1;float dRadiusSign=sign("
+"1.-f);bool isWellBehaved=!isFocalOnCircle&&r1>1.;float x_t=-1.;if(isFocalOnCircle"
+"){x_t=dot(pt,pt)/pt.x;}else if(isWellBehaved){x_t=length(pt)-pt.x*invR1;}else"
+"{float temp=pt.x*pt.x-pt.y*pt.y;if(temp>=0.){if(isSwapped||dRadiusSign<0.){"
+"x_t=-sqrt(temp)-pt.x*invR1;}else{x_t=sqrt(temp)-pt.x*invR1;}}}if(!isWellBehaved"
+"&&x_t<0.){return float2(0.,-1.);}float t=f+dRadiusSign*x_t;if(isSwapped){t="
+"1.-t;}return float2(t,1.);}}$pure half4 sk_linear_grad_4_shader(float2 coords"
+",float4[4]colorsParam,float[4]offsetsParam,float2 point0Param,float2 point1Param"
+",int tileMode,int colorSpace,int doUnpremul){float2 t=$linear_grad_layout(point0Param"
+",point1Param,coords);t=$tile_grad(tileMode,t);half4 color=$colorize_grad_4("
+"colorsParam,offsetsParam,t);return $interpolated_to_rgb_unpremul(color,colorSpace"
+",doUnpremul);}$pure half4 sk_linear_grad_8_shader(float2 coords,float4[8]colorsParam"
+",float[8]offsetsParam,float2 point0Param,float2 point1Param,int tileMode,int"
+" colorSpace,int doUnpremul){float2 t=$linear_grad_layout(point0Param,point1Param"
+",coords);t=$tile_grad(tileMode,t);half4 color=$colorize_grad_8(colorsParam,"
+"offsetsParam,t);return $interpolated_to_rgb_unpremul(color,colorSpace,doUnpremul"
+");}$pure half4 sk_linear_grad_tex_shader(float2 coords,float2 point0Param,float2"
+" point1Param,int numStops,int tileMode,int colorSpace,int doUnpremul,sampler2D"
+" colorAndOffsetSampler){float2 t=$linear_grad_layout(point0Param,point1Param"
+",coords);t=$tile_grad(tileMode,t);half4 color=$colorize_grad_tex(colorAndOffsetSampler"
+",numStops,t);return $interpolated_to_rgb_unpremul(color,colorSpace,doUnpremul"
+");}$pure half4 sk_radial_grad_4_shader(float2 coords,float4[4]colorsParam,float"
+"[4]offsetsParam,float2 centerParam,float radiusParam,int tileMode,int colorSpace"
+",int doUnpremul){float2 t=$radial_grad_layout(centerParam,radiusParam,coords"
+");t=$tile_grad(tileMode,t);half4 color=$colorize_grad_4(colorsParam,offsetsParam"
+",t);return $interpolated_to_rgb_unpremul(color,colorSpace,doUnpremul);}$pure"
+" half4 sk_radial_grad_8_shader(float2 coords,float4[8]colorsParam,float[8]offsetsParam"
+",float2 centerParam,float radiusParam,int tileMode,int colorSpace,int doUnpremul"
+"){float2 t=$radial_grad_layout(centerParam,radiusParam,coords);t=$tile_grad"
+"(tileMode,t);half4 color=$colorize_grad_8(colorsParam,offsetsParam,t);return"
+" $interpolated_to_rgb_unpremul(color,colorSpace,doUnpremul);}$pure half4 sk_radial_grad_tex_shader"
+"(float2 coords,float2 centerParam,float radiusParam,int numStops,int tileMode"
+",int colorSpace,int doUnpremul,sampler2D colorAndOffsetSampler){float2 t=$radial_grad_layout"
+"(centerParam,radiusParam,coords);t=$tile_grad(tileMode,t);half4 color=$colorize_grad_tex"
+"(colorAndOffsetSampler,numStops,t);return $interpolated_to_rgb_unpremul(color"
+",colorSpace,doUnpremul);}$pure half4 sk_sweep_grad_4_shader(float2 coords,float4"
+"[4]colorsParam,float[4]offsetsParam,float2 centerParam,float biasParam,float"
+" scaleParam,int tileMode,int colorSpace,int doUnpremul){float2 t=$sweep_grad_layout"
+"(centerParam,biasParam,scaleParam,coords);t=$tile_grad(tileMode,t);half4 color"
+"=$colorize_grad_4(colorsParam,offsetsParam,t);return $interpolated_to_rgb_unpremul"
+"(color,colorSpace,doUnpremul);}$pure half4 sk_sweep_grad_8_shader(float2 coords"
+",float4[8]colorsParam,float[8]offsetsParam,float2 centerParam,float biasParam"
+",float scaleParam,int tileMode,int colorSpace,int doUnpremul){float2 t=$sweep_grad_layout"
+"(centerParam,biasParam,scaleParam,coords);t=$tile_grad(tileMode,t);half4 color"
+"=$colorize_grad_8(colorsParam,offsetsParam,t);return $interpolated_to_rgb_unpremul"
+"(color,colorSpace,doUnpremul);}$pure half4 sk_sweep_grad_tex_shader(float2 coords"
+",float2 centerParam,float biasParam,float scaleParam,int numStops,int tileMode"
+",int colorSpace,int doUnpremul,sampler2D colorAndOffsetSampler){float2 t=$sweep_grad_layout"
+"(centerParam,biasParam,scaleParam,coords);t=$tile_grad(tileMode,t);half4 color"
+"=$colorize_grad_tex(colorAndOffsetSampler,numStops,t);return $interpolated_to_rgb_unpremul"
+"(color,colorSpace,doUnpremul);}$pure half4 sk_conical_grad_4_shader(float2 coords"
+",float4[4]colorsParam,float[4]offsetsParam,float2 point0Param,float2 point1Param"
+",float radius0Param,float radius1Param,int tileMode,int colorSpace,int doUnpremul"
+"){float2 t=$conical_grad_layout(point0Param,point1Param,radius0Param,radius1Param"
+",coords);t=$tile_grad(tileMode,t);half4 color=$colorize_grad_4(colorsParam,"
+"offsetsParam,t);return $interpolated_to_rgb_unpremul(color,colorSpace,doUnpremul"
+");}$pure half4 sk_conical_grad_8_shader(float2 coords,float4[8]colorsParam,"
+"float[8]offsetsParam,float2 point0Param,float2 point1Param,float radius0Param"
+",float radius1Param,int tileMode,int colorSpace,int doUnpremul){float2 t=$conical_grad_layout"
+"(point0Param,point1Param,radius0Param,radius1Param,coords);t=$tile_grad(tileMode"
+",t);half4 color=$colorize_grad_8(colorsParam,offsetsParam,t);return $interpolated_to_rgb_unpremul"
+"(color,colorSpace,doUnpremul);}$pure half4 sk_conical_grad_tex_shader(float2"
+" coords,float2 point0Param,float2 point1Param,float radius0Param,float radius1Param"
+",int numStops,int tileMode,int colorSpace,int doUnpremul,sampler2D colorAndOffsetSampler"
+"){float2 t=$conical_grad_layout(point0Param,point1Param,radius0Param,radius1Param"
+",coords);t=$tile_grad(tileMode,t);half4 color=$colorize_grad_tex(colorAndOffsetSampler"
+",numStops,t);return $interpolated_to_rgb_unpremul(color,colorSpace,doUnpremul"
+");}$pure half4 sk_matrix_colorfilter(half4 colorIn,float4x4 m,float4 v,int inHSLA"
+"){if(bool(inHSLA)){colorIn=$rgb_to_hsl(colorIn.xyz,colorIn.w);}else{colorIn"
+"=unpremul(colorIn);}half4 colorOut=half4(m*float4(colorIn)+v);if(bool(inHSLA"
+")){colorOut=$hsl_to_rgb(colorOut.xyz,colorOut.w);}else{colorOut=saturate(colorOut"
+");colorOut.xyz*=colorOut.w;}return colorOut;}$pure half4 noise_helper(half2"
+" noiseVec,half2 stitchData,int stitching,sampler2D permutationSampler){const"
+" half kBlockSize=256.;half4 floorVal;floorVal.xy=floor(noiseVec);floorVal.zw"
+"=floorVal.xy+half2(1.);if(bool(stitching)){if(floorVal.x>=stitchData.x){floorVal"
+".x-=stitchData.x;}{}if(floorVal.y>=stitchData.y){floorVal.y-=stitchData.y;}"
+"{}if(floorVal.z>=stitchData.x){floorVal.z-=stitchData.x;}{}if(floorVal.w>=stitchData"
+".y){floorVal.w-=stitchData.y;}{}}half sampleX=sample(permutationSampler,float2"
+"(half2(floorVal.x*.00390625,.5))).x;half sampleY=sample(permutationSampler,"
+"float2(half2(floorVal.z*.00390625,.5))).x;half2 latticeIdx=half2(sampleX,sampleY"
+");const half kInv255=.003921569;latticeIdx=floor(latticeIdx*half2(255.)+half2"
+"(.5))*half2(.003921569);half4 noiseXCoords=kBlockSize*latticeIdx.xyxy+floorVal"
+".yyww;noiseXCoords*=half4(.00390625);return noiseXCoords;}$pure half4 noise_function"
+"(half2 noiseVec,half4 noiseXCoords,sampler2D noiseSampler){half2 fractVal=fract"
+"(noiseVec);half2 noiseSmooth=(fractVal*fractVal)*(half2(3.)-2.*fractVal);const"
+" half kInv256=.00390625;half4 result;for(int channel=0;channel<4;channel++)"
+"{half chanCoord=(half(channel)+.5)*.25;half4 sampleA=sample(noiseSampler,float2"
+"(half2(noiseXCoords.x,chanCoord)));half4 sampleB=sample(noiseSampler,float2"
+"(half2(noiseXCoords.y,chanCoord)));half4 sampleC=sample(noiseSampler,float2"
+"(half2(noiseXCoords.w,chanCoord)));half4 sampleD=sample(noiseSampler,float2"
+"(half2(noiseXCoords.z,chanCoord)));half2 uv;half2 tmpFractVal=fractVal;uv.x"
+"=dot((sampleA.yw+sampleA.xz*kInv256)*2.-half2(1.),tmpFractVal);tmpFractVal."
+"x-=1.;uv.y=dot((sampleB.yw+sampleB.xz*kInv256)*2.-half2(1.),tmpFractVal);half2"
+" ab;ab.x=mix(uv.x,uv.y,noiseSmooth.x);tmpFractVal.y-=1.;uv.y=dot((sampleC.yw"
+"+sampleC.xz*kInv256)*2.-half2(1.),tmpFractVal);tmpFractVal.x+=1.;uv.x=dot(("
+"sampleD.yw+sampleD.xz*kInv256)*2.-half2(1.),tmpFractVal);ab.y=mix(uv.x,uv.y"
+",noiseSmooth.x);result[channel]=mix(ab.x,ab.y,noiseSmooth.y);}return result"
+";}$pure half4 perlin_noise_shader(float2 coords,float2 baseFrequency,float2"
+" stitchDataIn,int noiseType,int numOctaves,int stitching,sampler2D permutationSampler"
+",sampler2D noiseSampler){const int kFractalNoise_Type=0;const int kTurbulence_Type"
+"=1;half2 noiseVec=half2(floor(coords)*baseFrequency);half4 color=half4(0.);"
+"half2 stitchData=half2(stitchDataIn);half ratio=1.;for(int octave=0;octave<"
+"numOctaves;++octave){half4 noiseXCoords=noise_helper(noiseVec,stitchData,stitching"
+",permutationSampler);half4 tmp=noise_function(noiseVec,noiseXCoords,noiseSampler"
+");if(noiseType!=kFractalNoise_Type){tmp=abs(tmp);}tmp*=ratio;color+=tmp;noiseVec"
+"*=half2(2.);ratio*=.5;stitchData*=half2(2.);}if(noiseType==kFractalNoise_Type"
+"){color=color*half4(.5)+half4(.5);}color=saturate(color);return half4(color"
+".xyz*color.www,color.w);}$pure half4 sk_blend(int blendMode,half4 src,half4"
+" dst){const int kClear=0;const int kSrc=1;const int kDst=2;const int kSrcOver"
+"=3;const int kDstOver=4;const int kSrcIn=5;const int kDstIn=6;const int kSrcOut"
+"=7;const int kDstOut=8;const int kSrcATop=9;const int kDstATop=10;const int"
+" kXor=11;const int kPlus=12;const int kModulate=13;const int kScreen=14;const"
+" int kOverlay=15;const int kDarken=16;const int kLighten=17;const int kColorDodge"
+"=18;const int kColorBurn=19;const int kHardLight=20;const int kSoftLight=21"
+";const int kDifference=22;const int kExclusion=23;const int kMultiply=24;const"
+" int kHue=25;const int kSaturation=26;const int kColor=27;const int kLuminosity"
+"=28;switch(blendMode){case 0:{return blend_clear(src,dst);}case 1:{return blend_src"
+"(src,dst);}case 2:{return blend_dst(src,dst);}case 3:{return blend_porter_duff"
+"(half4(1.,0.,0.,-1.),src,dst);}case 4:{return blend_porter_duff(half4(0.,1."
+",-1.,0.),src,dst);}case 5:{return blend_porter_duff(half4(0.,0.,1.,0.),src,"
+"dst);}case 6:{return blend_porter_duff(half4(0.,0.,0.,1.),src,dst);}case 7:"
+"{return blend_porter_duff(half4(0.,0.,-1.,0.),src,dst);}case 8:{return blend_porter_duff"
+"(half4(0.,0.,0.,-1.),src,dst);}case 9:{return blend_porter_duff(half4(0.,0."
+",1.,-1.),src,dst);}case 10:{return blend_porter_duff(half4(0.,0.,-1.,1.),src"
+",dst);}case 11:{return blend_porter_duff(half4(0.,0.,-1.,-1.),src,dst);}case"
+" 12:{return blend_porter_duff(half4(1.,1.,0.,0.),src,dst);}case 13:{return blend_modulate"
+"(src,dst);}case 14:{return blend_screen(src,dst);}case 15:{return blend_overlay"
+"(0.,src,dst);}case 16:{return blend_darken(1.,src,dst);}case 17:{return blend_darken"
+"(-1.,src,dst);}case 18:{return blend_color_dodge(src,dst);}case 19:{return blend_color_burn"
+"(src,dst);}case 20:{return blend_overlay(1.,src,dst);}case 21:{return blend_soft_light"
+"(src,dst);}case 22:{return blend_difference(src,dst);}case 23:{return blend_exclusion"
+"(src,dst);}case 24:{return blend_multiply(src,dst);}case 25:{return blend_hslc"
+"(half2(0.,1.),src,dst);}case 26:{return blend_hslc(half2(1.),src,dst);}case"
+" 27:{return blend_hslc(half2(0.),src,dst);}case 28:{return blend_hslc(half2"
+"(1.,0.),src,dst);}default:return half4(0.);}}$pure half4 sk_blend_shader(int"
+" blendMode,half4 dst,half4 src){return sk_blend(blendMode,src,dst);}$pure half4"
+" porter_duff_blend_shader(half4 blendOp,half4 dst,half4 src){return blend_porter_duff"
+"(blendOp,src,dst);}$pure half4 sk_blend_colorfilter(half4 dstColor,int blendMode"
+",float4 srcColor){return sk_blend(blendMode,half4(srcColor),dstColor);}$pure"
+" half4 sk_table_colorfilter(half4 inColor,sampler2D s){half4 coords=(unpremul"
+"(inColor)*255.)*.00390625+.001953125;half4 color=half4(sample(s,float2(half2"
+"(coords.x,.375))).x,sample(s,float2(half2(coords.y,.625))).x,sample(s,float2"
+"(half2(coords.z,.875))).x,1.);return color*sample(s,float2(half2(coords.w,.125"
+"))).x;}$pure half4 sk_gaussian_colorfilter(half4 inColor){half factor=1.-inColor"
+".w;factor=exp((-factor*factor)*4.)-.018;return half4(factor);}$pure float inverse_grad_len"
+"(float2 localGrad,float2x2 jacobian){float2 devGrad=localGrad*jacobian;return"
+" inversesqrt(dot(devGrad,devGrad));}$pure float2 elliptical_distance(float2"
+" uv,float2 radii,float strokeRadius,float2x2 jacobian){float2 invR2=1./(radii"
+"*radii+strokeRadius*strokeRadius);float2 normUV=invR2*uv;float invGradLength"
+"=inverse_grad_len(normUV,jacobian);float f=(.5*invGradLength)*(dot(uv,normUV"
+")-1.);float width=((radii.x*strokeRadius)*invR2.x)*invGradLength;return float2"
+"(width-f,width+f);}void corner_distance(inout float2 dist,float2x2 jacobian"
+",float2 strokeParams,float2 cornerEdgeDist,float2 xyFlip,float2 radii){float2"
+" uv=radii-cornerEdgeDist;if(uv.x>0.&&uv.y>0.){if(radii.x>0.&&radii.y>0.||strokeParams"
+".x>0.&&strokeParams.y<0.){float2 d=elliptical_distance(uv*xyFlip,radii,strokeParams"
+".x,jacobian);if(radii.x-strokeParams.x<=0.){d.y=1.;}else{d.y*=-1.;}dist=min"
+"(dist,d);}else if(strokeParams.y==0.){float bevelDist=((strokeParams.x-uv.x"
+")-uv.y)*inverse_grad_len(xyFlip,jacobian);dist.x=min(dist.x,bevelDist);}}}void"
+" corner_distances(inout float2 d,float2x2 J,float2 stroke,float4 edgeDists,"
+"float4 xRadii,float4 yRadii){corner_distance(d,J,stroke,edgeDists.xy,float2"
+"(-1.),float2(xRadii.x,yRadii.x));corner_distance(d,J,stroke,edgeDists.zy,float2"
+"(1.,-1.),float2(xRadii.y,yRadii.y));corner_distance(d,J,stroke,edgeDists.zw"
+",float2(1.),float2(xRadii.z,yRadii.z));corner_distance(d,J,stroke,edgeDists"
+".xw,float2(-1.,1.),float2(xRadii.w,yRadii.w));}";
diff --git a/gfx/skia/skia/src/sksl/generated/sksl_graphite_vert.minified.sksl b/gfx/skia/skia/src/sksl/generated/sksl_graphite_vert.minified.sksl
new file mode 100644
index 0000000000..99bded4db6
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/generated/sksl_graphite_vert.minified.sksl
@@ -0,0 +1,64 @@
+static constexpr char SKSL_MINIFIED_sksl_graphite_vert[] =
+"$pure float curve_type_using_inf_support(float4 a){if(isinf(a.z)){return 2."
+";}if(isinf(a.w)){return 1.;}return 0.;}$pure bool $k(float a){return a!=0.;"
+"}$pure bool $l(float a){return a==2.;}$pure float $m(float2 a,float2 b,float2"
+" c,float2 d,float2x2 e){float2 f=e*(fma(float2(-2.),b,c)+a);float2 g=e*(fma"
+"(float2(-2.),c,d)+b);return max(dot(f,f),dot(g,g));}$pure float $n(float2 a"
+",float2 b,float2 c,float2 d,float2x2 e){float f=$m(a,b,c,d,e);return max(ceil"
+"(sqrt(3.*sqrt(f))),1.);}$pure float $o(float2 a,float2 b,float2 c,float2 d,"
+"float2x2 e){float f=$m(a,b,c,d,e);return ceil(log2(max(9.*f,1.))*.25);}$pure"
+" float $p(float2 a,float2 b,float2 c,float d){float2 e=(min(min(a,b),c)+max"
+"(max(a,b),c))*.5;a-=e;b-=e;c-=e;float f=sqrt(max(max(dot(a,a),dot(b,b)),dot"
+"(c,c)));float2 g=fma(float2(-2.*d),b,a)+c;float h=abs(fma(-2.,d,2.));float i"
+"=max(0.,fma(f,4.,-1.));float j=length(g)*4.+i*h;float k=4.*min(d,1.);return"
+" j/k;}$pure float $q(float2 a,float2 b,float2 c,float d){float e=$p(a,b,c,d"
+");return max(ceil(sqrt(e)),1.);}$pure float $r(float2 a,float2 b,float2 c,float"
+" d){float e=$p(a,b,c,d);return ceil(log2(max(e,1.))*.5);}$pure float2 $s(float2"
+" c,float2 d){float2 e=c-d;if(e==float2(0.)){return float2(0.);}else{float f"
+"=1./max(abs(e.x),abs(e.y));return normalize(f*e);}}$pure float $t(float2 c,"
+"float2 d){return clamp(dot(c,d),-1.,1.);}$pure float $u(float a,float b){float"
+" c=fma(a,.5,.5);return(c*b)*b>=1.?inversesqrt(c):sqrt(c);}$pure float $v(float"
+" a){return.5/acos(max(1.-.25/a,-1.));}$pure float $w(float c,float d,float e"
+"){return fma(d-c,e,c);}$pure float2 $w(float2 c,float2 d,float e){return fma"
+"(d-c,float2(e),c);}$pure float4 $w(float4 c,float4 d,float4 e){return fma(d"
+"-c,e,c);}$pure float2 tessellate_filled_curve(float2x2 a,float b,float c,float4"
+" d,float4 e,float f){float2 g;if($l(f)){g=b!=0.?d.zw:(c!=0.?e.xy:d.xy);}else"
+"{float2 h=d.xy;float2 i=d.zw;float2 j=e.xy;float2 k=e.zw;float l=-1.;float m"
+";if($k(f)){l=k.x;m=$r(a*h,a*i,a*j,l);i*=l;k=j;}else{m=$o(h,i,j,k,a);}if(b>m"
+"){c=floor(ldexp(c,int(m-b)));b=m;}float n=floor(.5+ldexp(c,int(5.-b)));if(0."
+"<n&&n<32.){float o=n*.03125;float2 p=mix(h,i,o);float2 q=mix(i,j,o);float2 r"
+"=mix(j,k,o);float2 s=mix(p,q,o);float2 t=mix(q,r,o);float2 x=mix(s,t,o);float"
+" y=mix(1.,l,o);float z=(l+1.)-y;float A=mix(y,z,o);g=l<0.?x:s/A;}else{g=n=="
+"0.?h:k;}}return g;}$pure float4 tessellate_stroked_curve(float a,float b,float2x2"
+" c,float2 d,float e,float4 f,float4 g,float2 h,float2 i,float j){float2 k=f"
+".xy;float2 l=f.zw;float2 m=g.xy;float2 n=g.zw;float o=-1.;if($k(j)){o=n.x;n"
+"=m;}float p;if(o<0.){if(k==l&&m==n){p=1.;}else{p=$n(k,l,m,n,c);}}else{p=$q("
+"c*k,c*l,c*m,o);}float q=i.x;float r=i.y;bool s=i.x==0.;float t;if(s){t=$v(1."
+");q=.5;}else{t=$v(e*i.x);}if(s){k=c*k;l=c*l;m=c*m;n=c*n;h=c*h;}float2 u=$s("
+"k==l?(l==m?n:m):l,k);float2 v=$s(n,n==m?(m==l?k:l):m);if(u==float2(0.)){u=float2"
+"(1.,0.);v=float2(-1.,0.);}float x;if(r>=0.){x=(sign(r)+1.)+2.;}else{float2 y"
+"=$s(k,h);float z=acos($t(y,u));float A=max(ceil(z*t),1.);x=A+2.;x=min(x,b-2."
+");}float y=cross_length_2d(m-k,n-l);float z=abs(a)-x;if(z<0.){v=u;if(h!=k){"
+"u=$s(k,h);}y=cross_length_2d(u,v);}float A=$t(u,v);float B=acos(A);if(y<0.)"
+"{B=-B;}float C;float D=sign(a);if(z<0.){C=x-2.;p=1.;n=(m=(l=k));z+=C+1.;float"
+" E=.01;bool F=abs(y)*inversesqrt(dot(u,u)*dot(v,v))<E;if(!F||dot(u,v)<0.){if"
+"(z>=0.){D=y<0.?min(D,0.):max(D,0.);}}z=max(z,0.);}else{float E=(b-x)-1.;C=max"
+"(ceil(abs(B)*t),1.);C=min(C,E);p=min(p,(E-C)+1.);}float E=B/C;float F=(p+C)"
+"-1.;bool G=z>=F;if(z>F){D=0.;}if(abs(a)==2.&&r>0.){D*=$u(A,r);}float2 H;float2"
+" I;if(z!=0.&&!G){float2 J;float2 K;float2 L=l-k;float2 M=n-k;if(o>=0.){L*=o"
+";K=.5*M-L;J=(o-1.)*M;l*=o;}else{float2 N=m-l;K=N-L;J=fma(float2(-3.),N,M);}"
+"float2 N=K*(p*2.);float2 O=L*(p*p);float P=0.;float Q=min(p-1.,z);float R=-"
+"abs(E);float S=(1.+z)*abs(E);for(int U=4;U>=0;--U){float V=P+exp2(float(U))"
+";if(V<=Q){float2 W=fma(float2(V),J,N);W=fma(float2(V),W,O);float X=dot(normalize"
+"(W),u);float Y=fma(V,R,S);Y=min(Y,3.14159274);if(X>=cos(Y)){P=V;}}}float U="
+"P/p;float V=z-P;float W=acos(clamp(u.x,-1.,1.));W=u.y>=0.?W:-W;float X=fma("
+"V,E,W);H=float2(cos(X),sin(X));float2 Y=float2(-H.y,H.x);float Z=dot(Y,J);float"
+" aa=dot(Y,K);float ac=dot(Y,L);float ad=max(aa*aa-Z*ac,0.);float ae=sqrt(ad"
+");if(aa>0.){ae=-ae;}ae-=aa;float af=(-.5*ae)*Z;float2 ag=abs(fma(ae,ae,af))"
+"<abs(fma(Z,ac,af))?float2(ae,Z):float2(ac,ae);float ah=ag.y!=0.?ag.x/ag.y:0."
+";ah=clamp(ah,0.,1.);if(V==0.){ah=0.;}float ai=max(U,ah);float2 aj=$w(k,l,ai"
+");float2 ak=$w(l,m,ai);float2 al=$w(m,n,ai);float2 am=$w(aj,ak,ai);float2 an"
+"=$w(ak,al,ai);float2 ao=$w(am,an,ai);float ap=$w(1.,o,ai);float aq=(o+1.)-ap"
+";float ar=$w(ap,aq,ai);if(ai!=ah){H=o>=0.?$s(ak*ap,aj*aq):$s(an,am);}I=o>=0."
+"?am/ar:ao;}else{H=z==0.?u:v;I=z==0.?k:n;}float2 J=float2(H.y,-H.x);I+=J*(q*"
+"D);if(s){return float4(I+d,inverse(c)*I);}else{return float4(c*I+d,I);}}";
diff --git a/gfx/skia/skia/src/sksl/generated/sksl_graphite_vert.unoptimized.sksl b/gfx/skia/skia/src/sksl/generated/sksl_graphite_vert.unoptimized.sksl
new file mode 100644
index 0000000000..e2b4f80fc5
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/generated/sksl_graphite_vert.unoptimized.sksl
@@ -0,0 +1,121 @@
+static constexpr char SKSL_MINIFIED_sksl_graphite_vert[] =
+"const float $PI=3.14159274;const float $kCubicCurveType=0.;const float $kConicCurveType"
+"=1.;const float $kTriangularConicCurveType=2.;$pure float curve_type_using_inf_support"
+"(float4 p23){if(isinf(p23.z)){return $kTriangularConicCurveType;}if(isinf(p23"
+".w)){return $kConicCurveType;}return $kCubicCurveType;}$pure bool $is_conic_curve"
+"(float curveType){return curveType!=$kCubicCurveType;}$pure bool $is_triangular_conic_curve"
+"(float curveType){return curveType==$kTriangularConicCurveType;}const float"
+" $kDegree=3.;const float $kPrecision=4.;const float $kLengthTerm=3.;const float"
+" $kLengthTermPow2=9.;$pure float $wangs_formula_max_fdiff_p2(float2 p0,float2"
+" p1,float2 p2,float2 p3,float2x2 matrix){float2 d0=matrix*(fma(float2(-2.),"
+"p1,p2)+p0);float2 d1=matrix*(fma(float2(-2.),p2,p3)+p1);return max(dot(d0,d0"
+"),dot(d1,d1));}$pure float $wangs_formula_cubic(float2 p0,float2 p1,float2 p2"
+",float2 p3,float2x2 matrix){float m=$wangs_formula_max_fdiff_p2(p0,p1,p2,p3"
+",matrix);return max(ceil(sqrt($kLengthTerm*sqrt(m))),1.);}$pure float $wangs_formula_cubic_log2"
+"(float2 p0,float2 p1,float2 p2,float2 p3,float2x2 matrix){float m=$wangs_formula_max_fdiff_p2"
+"(p0,p1,p2,p3,matrix);return ceil(log2(max($kLengthTermPow2*m,1.))*.25);}$pure"
+" float $wangs_formula_conic_p2(float2 p0,float2 p1,float2 p2,float w){float2"
+" C=(min(min(p0,p1),p2)+max(max(p0,p1),p2))*.5;p0-=C;p1-=C;p2-=C;float m=sqrt"
+"(max(max(dot(p0,p0),dot(p1,p1)),dot(p2,p2)));float2 dp=fma(float2(-2.*w),p1"
+",p0)+p2;float dw=abs(fma(-2.,w,2.));float rp_minus_1=max(0.,fma(m,$kPrecision"
+",-1.));float numer=length(dp)*$kPrecision+rp_minus_1*dw;float denom=4.*min("
+"w,1.);return numer/denom;}$pure float $wangs_formula_conic(float2 p0,float2"
+" p1,float2 p2,float w){float n2=$wangs_formula_conic_p2(p0,p1,p2,w);return max"
+"(ceil(sqrt(n2)),1.);}$pure float $wangs_formula_conic_log2(float2 p0,float2"
+" p1,float2 p2,float w){float n2=$wangs_formula_conic_p2(p0,p1,p2,w);return ceil"
+"(log2(max(n2,1.))*.5);}$pure float2 $robust_normalize_diff(float2 a,float2 b"
+"){float2 diff=a-b;if(diff==float2(0.)){return float2(0.);}else{float invMag"
+"=1./max(abs(diff.x),abs(diff.y));return normalize(invMag*diff);}}$pure float"
+" $cosine_between_unit_vectors(float2 a,float2 b){return clamp(dot(a,b),-1.,"
+"1.);}$pure float $miter_extent(float cosTheta,float miterLimit){float x=fma"
+"(cosTheta,.5,.5);return(x*miterLimit)*miterLimit>=1.?inversesqrt(x):sqrt(x)"
+";}$pure float $num_radial_segments_per_radian(float approxDevStrokeRadius){"
+"return.5/acos(max(1.-.25/approxDevStrokeRadius,-1.));}$pure float $unchecked_mix"
+"(float a,float b,float T){return fma(b-a,T,a);}$pure float2 $unchecked_mix("
+"float2 a,float2 b,float T){return fma(b-a,float2(T),a);}$pure float4 $unchecked_mix"
+"(float4 a,float4 b,float4 T){return fma(b-a,T,a);}$pure float2 tessellate_filled_curve"
+"(float2x2 vectorXform,float resolveLevel,float idxInResolveLevel,float4 p01"
+",float4 p23,float curveType){float2 localcoord;if($is_triangular_conic_curve"
+"(curveType)){localcoord=resolveLevel!=0.?p01.zw:(idxInResolveLevel!=0.?p23."
+"xy:p01.xy);}else{float2 p0=p01.xy;float2 p1=p01.zw;float2 p2=p23.xy;float2 p3"
+"=p23.zw;float w=-1.;float maxResolveLevel;if($is_conic_curve(curveType)){w="
+"p3.x;maxResolveLevel=$wangs_formula_conic_log2(vectorXform*p0,vectorXform*p1"
+",vectorXform*p2,w);p1*=w;p3=p2;}else{maxResolveLevel=$wangs_formula_cubic_log2"
+"(p0,p1,p2,p3,vectorXform);}if(resolveLevel>maxResolveLevel){idxInResolveLevel"
+"=floor(ldexp(idxInResolveLevel,int(maxResolveLevel-resolveLevel)));resolveLevel"
+"=maxResolveLevel;}float fixedVertexID=floor(.5+ldexp(idxInResolveLevel,int("
+"5.-resolveLevel)));if(0.<fixedVertexID&&fixedVertexID<32.){float T=fixedVertexID"
+"*.03125;float2 ab=mix(p0,p1,T);float2 bc=mix(p1,p2,T);float2 cd=mix(p2,p3,T"
+");float2 abc=mix(ab,bc,T);float2 bcd=mix(bc,cd,T);float2 abcd=mix(abc,bcd,T"
+");float u=mix(1.,w,T);float v=(w+1.)-u;float uv=mix(u,v,T);localcoord=w<0.?"
+"abcd:abc/uv;}else{localcoord=fixedVertexID==0.?p0:p3;}}return localcoord;}$pure"
+" float4 tessellate_stroked_curve(float edgeID,float maxEdges,float2x2 affineMatrix"
+",float2 translate,float maxScale,float4 p01,float4 p23,float2 lastControlPoint"
+",float2 strokeParams,float curveType){float2 p0=p01.xy;float2 p1=p01.zw;float2"
+" p2=p23.xy;float2 p3=p23.zw;float w=-1.;if($is_conic_curve(curveType)){w=p3"
+".x;p3=p2;}float numParametricSegments;if(w<0.){if(p0==p1&&p2==p3){numParametricSegments"
+"=1.;}else{numParametricSegments=$wangs_formula_cubic(p0,p1,p2,p3,affineMatrix"
+");}}else{numParametricSegments=$wangs_formula_conic(affineMatrix*p0,affineMatrix"
+"*p1,affineMatrix*p2,w);}float strokeRadius=strokeParams.x;float joinType=strokeParams"
+".y;bool isHairline=strokeParams.x==0.;float numRadialSegmentsPerRadian;if(isHairline"
+"){numRadialSegmentsPerRadian=$num_radial_segments_per_radian(1.);strokeRadius"
+"=.5;}else{numRadialSegmentsPerRadian=$num_radial_segments_per_radian(maxScale"
+"*strokeParams.x);}if(isHairline){p0=affineMatrix*p0;p1=affineMatrix*p1;p2=affineMatrix"
+"*p2;p3=affineMatrix*p3;lastControlPoint=affineMatrix*lastControlPoint;}float2"
+" tan0=$robust_normalize_diff(p0==p1?(p1==p2?p3:p2):p1,p0);float2 tan1=$robust_normalize_diff"
+"(p3,p3==p2?(p2==p1?p0:p1):p2);if(tan0==float2(0.)){tan0=float2(1.,0.);tan1="
+"float2(-1.,0.);}float numEdgesInJoin;if(joinType>=0.){numEdgesInJoin=(sign("
+"joinType)+1.)+2.;}else{float2 prevTan=$robust_normalize_diff(p0,lastControlPoint"
+");float joinRads=acos($cosine_between_unit_vectors(prevTan,tan0));float numRadialSegmentsInJoin"
+"=max(ceil(joinRads*numRadialSegmentsPerRadian),1.);numEdgesInJoin=numRadialSegmentsInJoin"
+"+2.;numEdgesInJoin=min(numEdgesInJoin,maxEdges-2.);}float turn=cross_length_2d"
+"(p2-p0,p3-p1);float combinedEdgeID=abs(edgeID)-numEdgesInJoin;if(combinedEdgeID"
+"<0.){tan1=tan0;if(lastControlPoint!=p0){tan0=$robust_normalize_diff(p0,lastControlPoint"
+");}turn=cross_length_2d(tan0,tan1);}float cosTheta=$cosine_between_unit_vectors"
+"(tan0,tan1);float rotation=acos(cosTheta);if(turn<0.){rotation=-rotation;}float"
+" numRadialSegments;float strokeOutset=sign(edgeID);if(combinedEdgeID<0.){numRadialSegments"
+"=numEdgesInJoin-2.;numParametricSegments=1.;p3=(p2=(p1=p0));combinedEdgeID+="
+"numRadialSegments+1.;float sinEpsilon=.01;bool tangentsNearlyParallel=abs(turn"
+")*inversesqrt(dot(tan0,tan0)*dot(tan1,tan1))<sinEpsilon;if(!tangentsNearlyParallel"
+"||dot(tan0,tan1)<0.){if(combinedEdgeID>=0.){strokeOutset=turn<0.?min(strokeOutset"
+",0.):max(strokeOutset,0.);}}combinedEdgeID=max(combinedEdgeID,0.);}else{float"
+" maxCombinedSegments=(maxEdges-numEdgesInJoin)-1.;numRadialSegments=max(ceil"
+"(abs(rotation)*numRadialSegmentsPerRadian),1.);numRadialSegments=min(numRadialSegments"
+",maxCombinedSegments);numParametricSegments=min(numParametricSegments,(maxCombinedSegments"
+"-numRadialSegments)+1.);}float radsPerSegment=rotation/numRadialSegments;float"
+" numCombinedSegments=(numParametricSegments+numRadialSegments)-1.;bool isFinalEdge"
+"=combinedEdgeID>=numCombinedSegments;if(combinedEdgeID>numCombinedSegments)"
+"{strokeOutset=0.;}if(abs(edgeID)==2.&&joinType>0.){strokeOutset*=$miter_extent"
+"(cosTheta,joinType);}float2 tangent;float2 strokeCoord;if(combinedEdgeID!=0."
+"&&!isFinalEdge){float2 A;float2 B;float2 C=p1-p0;float2 D=p3-p0;if(w>=0.){C"
+"*=w;B=.5*D-C;A=(w-1.)*D;p1*=w;}else{float2 E=p2-p1;B=E-C;A=fma(float2(-3.),"
+"E,D);}float2 B_=B*(numParametricSegments*2.);float2 C_=C*(numParametricSegments"
+"*numParametricSegments);float lastParametricEdgeID=0.;float maxParametricEdgeID"
+"=min(numParametricSegments-1.,combinedEdgeID);float negAbsRadsPerSegment=-abs"
+"(radsPerSegment);float maxRotation0=(1.+combinedEdgeID)*abs(radsPerSegment)"
+";for(int exp=4;exp>=0;--exp){float testParametricID=lastParametricEdgeID+exp2"
+"(float(exp));if(testParametricID<=maxParametricEdgeID){float2 testTan=fma(float2"
+"(testParametricID),A,B_);testTan=fma(float2(testParametricID),testTan,C_);float"
+" cosRotation=dot(normalize(testTan),tan0);float maxRotation=fma(testParametricID"
+",negAbsRadsPerSegment,maxRotation0);maxRotation=min(maxRotation,$PI);if(cosRotation"
+">=cos(maxRotation)){lastParametricEdgeID=testParametricID;}}}float parametricT"
+"=lastParametricEdgeID/numParametricSegments;float lastRadialEdgeID=combinedEdgeID"
+"-lastParametricEdgeID;float angle0=acos(clamp(tan0.x,-1.,1.));angle0=tan0.y"
+">=0.?angle0:-angle0;float radialAngle=fma(lastRadialEdgeID,radsPerSegment,angle0"
+");tangent=float2(cos(radialAngle),sin(radialAngle));float2 norm=float2(-tangent"
+".y,tangent.x);float a=dot(norm,A);float b_over_2=dot(norm,B);float c=dot(norm"
+",C);float discr_over_4=max(b_over_2*b_over_2-a*c,0.);float q=sqrt(discr_over_4"
+");if(b_over_2>0.){q=-q;}q-=b_over_2;float _5qa=(-.5*q)*a;float2 root=abs(fma"
+"(q,q,_5qa))<abs(fma(a,c,_5qa))?float2(q,a):float2(c,q);float radialT=root.y"
+"!=0.?root.x/root.y:0.;radialT=clamp(radialT,0.,1.);if(lastRadialEdgeID==0.)"
+"{radialT=0.;}float T=max(parametricT,radialT);float2 ab=$unchecked_mix(p0,p1"
+",T);float2 bc=$unchecked_mix(p1,p2,T);float2 cd=$unchecked_mix(p2,p3,T);float2"
+" abc=$unchecked_mix(ab,bc,T);float2 bcd=$unchecked_mix(bc,cd,T);float2 abcd"
+"=$unchecked_mix(abc,bcd,T);float u=$unchecked_mix(1.,w,T);float v=(w+1.)-u;"
+"float uv=$unchecked_mix(u,v,T);if(T!=radialT){tangent=w>=0.?$robust_normalize_diff"
+"(bc*u,ab*v):$robust_normalize_diff(bcd,abc);}strokeCoord=w>=0.?abc/uv:abcd;"
+"}else{tangent=combinedEdgeID==0.?tan0:tan1;strokeCoord=combinedEdgeID==0.?p0"
+":p3;}float2 ortho=float2(tangent.y,-tangent.x);strokeCoord+=ortho*(strokeRadius"
+"*strokeOutset);if(isHairline){return float4(strokeCoord+translate,inverse(affineMatrix"
+")*strokeCoord);}else{return float4(affineMatrix*strokeCoord+translate,strokeCoord"
+");}}";
diff --git a/gfx/skia/skia/src/sksl/generated/sksl_public.minified.sksl b/gfx/skia/skia/src/sksl/generated/sksl_public.minified.sksl
new file mode 100644
index 0000000000..9e07ced8b6
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/generated/sksl_public.minified.sksl
@@ -0,0 +1,4 @@
+static constexpr char SKSL_MINIFIED_sksl_public[] =
+"$pure half3 toLinearSrgb(half3);$pure half3 fromLinearSrgb(half3);half4 $eval"
+"(float2,shader);half4 $eval(half4,colorFilter);half4 $eval(half4,half4,blender"
+");";
diff --git a/gfx/skia/skia/src/sksl/generated/sksl_public.unoptimized.sksl b/gfx/skia/skia/src/sksl/generated/sksl_public.unoptimized.sksl
new file mode 100644
index 0000000000..d71770d441
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/generated/sksl_public.unoptimized.sksl
@@ -0,0 +1,4 @@
+static constexpr char SKSL_MINIFIED_sksl_public[] =
+"$pure half3 toLinearSrgb(half3 color);$pure half3 fromLinearSrgb(half3 color"
+");half4 $eval(float2 coords,shader s);half4 $eval(half4 color,colorFilter f"
+");half4 $eval(half4 src,half4 dst,blender b);";
diff --git a/gfx/skia/skia/src/sksl/generated/sksl_rt_shader.minified.sksl b/gfx/skia/skia/src/sksl/generated/sksl_rt_shader.minified.sksl
new file mode 100644
index 0000000000..70ca8dacc6
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/generated/sksl_rt_shader.minified.sksl
@@ -0,0 +1,2 @@
+static constexpr char SKSL_MINIFIED_sksl_rt_shader[] =
+"layout(builtin=15)float4 sk_FragCoord;";
diff --git a/gfx/skia/skia/src/sksl/generated/sksl_rt_shader.unoptimized.sksl b/gfx/skia/skia/src/sksl/generated/sksl_rt_shader.unoptimized.sksl
new file mode 100644
index 0000000000..70ca8dacc6
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/generated/sksl_rt_shader.unoptimized.sksl
@@ -0,0 +1,2 @@
+static constexpr char SKSL_MINIFIED_sksl_rt_shader[] =
+"layout(builtin=15)float4 sk_FragCoord;";
diff --git a/gfx/skia/skia/src/sksl/generated/sksl_shared.minified.sksl b/gfx/skia/skia/src/sksl/generated/sksl_shared.minified.sksl
new file mode 100644
index 0000000000..7f2b17c64c
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/generated/sksl_shared.minified.sksl
@@ -0,0 +1,143 @@
+static constexpr char SKSL_MINIFIED_sksl_shared[] =
+"$pure $genType radians($genType);$pure $genHType radians($genHType);$pure $genType"
+" degrees($genType);$pure $genHType degrees($genHType);$pure $genType sin($genType"
+");$pure $genHType sin($genHType);$pure $genType cos($genType);$pure $genHType"
+" cos($genHType);$pure $genType tan($genType);$pure $genHType tan($genHType)"
+";$pure $genType asin($genType);$pure $genHType asin($genHType);$pure $genType"
+" acos($genType);$pure $genHType acos($genHType);$pure $genType atan($genType"
+",$genType);$pure $genHType atan($genHType,$genHType);$pure $genType atan($genType"
+");$pure $genHType atan($genHType);$es3 $pure $genType sinh($genType);$es3 $pure"
+" $genHType sinh($genHType);$es3 $pure $genType cosh($genType);$es3 $pure $genHType"
+" cosh($genHType);$es3 $pure $genType tanh($genType);$es3 $pure $genHType tanh"
+"($genHType);$es3 $pure $genType asinh($genType);$es3 $pure $genHType asinh("
+"$genHType);$es3 $pure $genType acosh($genType);$es3 $pure $genHType acosh($genHType"
+");$es3 $pure $genType atanh($genType);$es3 $pure $genHType atanh($genHType)"
+";$pure $genType pow($genType,$genType);$pure $genHType pow($genHType,$genHType"
+");$pure $genType exp($genType);$pure $genHType exp($genHType);$pure $genType"
+" log($genType);$pure $genHType log($genHType);$pure $genType exp2($genType)"
+";$pure $genHType exp2($genHType);$pure $genType log2($genType);$pure $genHType"
+" log2($genHType);$pure $genType sqrt($genType);$pure $genHType sqrt($genHType"
+");$pure $genType inversesqrt($genType);$pure $genHType inversesqrt($genHType"
+");$pure $genType abs($genType);$pure $genHType abs($genHType);$pure $genType"
+" sign($genType);$pure $genHType sign($genHType);$pure $genType floor($genType"
+");$pure $genHType floor($genHType);$pure $genType ceil($genType);$pure $genHType"
+" ceil($genHType);$pure $genType fract($genType);$pure $genHType fract($genHType"
+");$pure $genType mod($genType,float);$pure $genType mod($genType,$genType);"
+"$pure $genHType mod($genHType,half);$pure $genHType mod($genHType,$genHType"
+");$pure $genType min($genType,$genType);$pure $genType min($genType,float);"
+"$pure $genHType min($genHType,$genHType);$pure $genHType min($genHType,half"
+");$pure $genType max($genType,$genType);$pure $genType max($genType,float);"
+"$pure $genHType max($genHType,$genHType);$pure $genHType max($genHType,half"
+");$pure $genType clamp($genType,$genType,$genType);$pure $genType clamp($genType"
+",float,float);$pure $genHType clamp($genHType,$genHType,$genHType);$pure $genHType"
+" clamp($genHType,half,half);$pure $genType saturate($genType);$pure $genHType"
+" saturate($genHType);$pure $genType mix($genType,$genType,$genType);$pure $genType"
+" mix($genType,$genType,float);$pure $genHType mix($genHType,$genHType,$genHType"
+");$pure $genHType mix($genHType,$genHType,half);$pure $genType step($genType"
+",$genType);$pure $genType step(float,$genType);$pure $genHType step($genHType"
+",$genHType);$pure $genHType step(half,$genHType);$pure $genType smoothstep("
+"$genType,$genType,$genType);$pure $genType smoothstep(float,float,$genType)"
+";$pure $genHType smoothstep($genHType,$genHType,$genHType);$pure $genHType smoothstep"
+"(half,half,$genHType);$es3 $pure $genIType abs($genIType);$es3 $pure $genIType"
+" sign($genIType);$es3 $pure $genIType floatBitsToInt($genType);$es3 $pure $genUType"
+" floatBitsToUint($genType);$es3 $pure $genType intBitsToFloat($genIType);$es3"
+" $pure $genType uintBitsToFloat($genUType);$es3 $pure $genType trunc($genType"
+");$es3 $pure $genHType trunc($genHType);$es3 $pure $genType round($genType)"
+";$es3 $pure $genHType round($genHType);$es3 $pure $genType roundEven($genType"
+");$es3 $pure $genHType roundEven($genHType);$es3 $pure $genIType min($genIType"
+",$genIType);$es3 $pure $genIType min($genIType,int);$es3 $pure $genUType min"
+"($genUType,$genUType);$es3 $pure $genUType min($genUType,uint);$es3 $pure $genIType"
+" max($genIType,$genIType);$es3 $pure $genIType max($genIType,int);$es3 $pure"
+" $genUType max($genUType,$genUType);$es3 $pure $genUType max($genUType,uint"
+");$es3 $pure $genIType clamp($genIType,$genIType,$genIType);$es3 $pure $genIType"
+" clamp($genIType,int,int);$es3 $pure $genUType clamp($genUType,$genUType,$genUType"
+");$es3 $pure $genUType clamp($genUType,uint,uint);$es3 $pure $genType mix($genType"
+",$genType,$genBType);$es3 $pure $genHType mix($genHType,$genHType,$genBType"
+");$es3 $pure $genBType isnan($genType);$es3 $pure $genBType isnan($genHType"
+");$es3 $pure $genBType isinf($genType);$es3 $pure $genBType isinf($genHType"
+");$es3 $pure $genType modf($genType,out $genType);$es3 $pure $genHType modf"
+"($genHType,out $genHType);$es3 $pure uint packUnorm2x16(float2);$es3 $pure float2"
+" unpackUnorm2x16(uint);$pure float length($genType);$pure half length($genHType"
+");$pure float distance($genType,$genType);$pure half distance($genHType,$genHType"
+");$pure float dot($genType,$genType);$pure half dot($genHType,$genHType);$pure"
+" float3 cross(float3,float3);$pure half3 cross(half3,half3);$pure $genType normalize"
+"($genType);$pure $genHType normalize($genHType);$pure $genType faceforward("
+"$genType,$genType,$genType);$pure $genHType faceforward($genHType,$genHType"
+",$genHType);$pure $genType reflect($genType,$genType);$pure $genHType reflect"
+"($genHType,$genHType);$pure $genType refract($genType,$genType,float);$pure"
+" $genHType refract($genHType,$genHType,half);$pure $squareMat matrixCompMult"
+"($squareMat,$squareMat);$pure $squareHMat matrixCompMult($squareHMat,$squareHMat"
+");$es3 $pure $mat matrixCompMult($mat,$mat);$es3 $pure $hmat matrixCompMult"
+"($hmat,$hmat);$pure $squareMat inverse($squareMat);$pure $squareHMat inverse"
+"($squareHMat);$es3 $pure float determinant($squareMat);$es3 $pure half determinant"
+"($squareHMat);$es3 $pure $squareMat transpose($squareMat);$es3 $pure $squareHMat"
+" transpose($squareHMat);$es3 $pure float2x3 transpose(float3x2);$es3 $pure half2x3"
+" transpose(half3x2);$es3 $pure float2x4 transpose(float4x2);$es3 $pure half2x4"
+" transpose(half4x2);$es3 $pure float3x2 transpose(float2x3);$es3 $pure half3x2"
+" transpose(half2x3);$es3 $pure float3x4 transpose(float4x3);$es3 $pure half3x4"
+" transpose(half4x3);$es3 $pure float4x2 transpose(float2x4);$es3 $pure half4x2"
+" transpose(half2x4);$es3 $pure float4x3 transpose(float3x4);$es3 $pure half4x3"
+" transpose(half3x4);$es3 $pure $squareMat outerProduct($vec,$vec);$es3 $pure"
+" $squareHMat outerProduct($hvec,$hvec);$es3 $pure float2x3 outerProduct(float3"
+",float2);$es3 $pure half2x3 outerProduct(half3,half2);$es3 $pure float3x2 outerProduct"
+"(float2,float3);$es3 $pure half3x2 outerProduct(half2,half3);$es3 $pure float2x4"
+" outerProduct(float4,float2);$es3 $pure half2x4 outerProduct(half4,half2);$es3"
+" $pure float4x2 outerProduct(float2,float4);$es3 $pure half4x2 outerProduct"
+"(half2,half4);$es3 $pure float3x4 outerProduct(float4,float3);$es3 $pure half3x4"
+" outerProduct(half4,half3);$es3 $pure float4x3 outerProduct(float3,float4);"
+"$es3 $pure half4x3 outerProduct(half3,half4);$pure $bvec lessThan($vec,$vec"
+");$pure $bvec lessThan($hvec,$hvec);$pure $bvec lessThan($ivec,$ivec);$pure"
+" $bvec lessThan($svec,$svec);$pure $bvec lessThanEqual($vec,$vec);$pure $bvec"
+" lessThanEqual($hvec,$hvec);$pure $bvec lessThanEqual($ivec,$ivec);$pure $bvec"
+" lessThanEqual($svec,$svec);$pure $bvec greaterThan($vec,$vec);$pure $bvec greaterThan"
+"($hvec,$hvec);$pure $bvec greaterThan($ivec,$ivec);$pure $bvec greaterThan("
+"$svec,$svec);$pure $bvec greaterThanEqual($vec,$vec);$pure $bvec greaterThanEqual"
+"($hvec,$hvec);$pure $bvec greaterThanEqual($ivec,$ivec);$pure $bvec greaterThanEqual"
+"($svec,$svec);$pure $bvec equal($vec,$vec);$pure $bvec equal($hvec,$hvec);$pure"
+" $bvec equal($ivec,$ivec);$pure $bvec equal($svec,$svec);$pure $bvec equal("
+"$bvec,$bvec);$pure $bvec notEqual($vec,$vec);$pure $bvec notEqual($hvec,$hvec"
+");$pure $bvec notEqual($ivec,$ivec);$pure $bvec notEqual($svec,$svec);$pure"
+" $bvec notEqual($bvec,$bvec);$es3 $pure $bvec lessThan($usvec,$usvec);$es3 $pure"
+" $bvec lessThan($uvec,$uvec);$es3 $pure $bvec lessThanEqual($uvec,$uvec);$es3"
+" $pure $bvec lessThanEqual($usvec,$usvec);$es3 $pure $bvec greaterThan($uvec"
+",$uvec);$es3 $pure $bvec greaterThan($usvec,$usvec);$es3 $pure $bvec greaterThanEqual"
+"($uvec,$uvec);$es3 $pure $bvec greaterThanEqual($usvec,$usvec);$es3 $pure $bvec"
+" equal($uvec,$uvec);$es3 $pure $bvec equal($usvec,$usvec);$es3 $pure $bvec notEqual"
+"($uvec,$uvec);$es3 $pure $bvec notEqual($usvec,$usvec);$pure bool any($bvec"
+");$pure bool all($bvec);$pure $bvec not($bvec);$es3 $pure $genType dFdx($genType"
+");$es3 $pure $genType dFdy($genType);$es3 $pure $genHType dFdx($genHType);$es3"
+" $pure $genHType dFdy($genHType);$es3 $pure $genType fwidth($genType);$es3 $pure"
+" $genHType fwidth($genHType);$pure half4 unpremul(half4 a){return half4(a.xyz"
+"/max(a.w,.0001),a.w);}$pure float4 unpremul(float4 a){return float4(a.xyz/max"
+"(a.w,.0001),a.w);}$pure half4 $unpremul_polar(half4 a){return half4(a.x,a.yz"
+"/max(a.w,.0001),a.w);}$pure half4 $rgb_to_hsl(half3 b,half d){half4 e=b.y<b"
+".z?half4(b.zy,-1.,.6666667):half4(b.yz,0.,-.333333343);half4 f=b.x<e.x?half4"
+"(e.x,b.x,e.yw):half4(b.x,e.x,e.yz);half h=f.x;half i=h-min(f.y,f.z);half j="
+"h-i*.5;half k=abs(f.w+(f.y-f.z)/(i*6.+.0001));half l=i/((d+.0001)-abs(j*2.-"
+"d));half m=j/(d+.0001);return half4(k,l,m,d);}$pure half3 $hsl_to_rgb(half3"
+" a){half b=(1.-abs(2.*a.z-1.))*a.y;half3 c=a.xxx+half3(0.,.6666667,.333333343"
+");half3 d=saturate(abs(fract(c)*6.-3.)-1.);return(d-.5)*b+a.z;}$pure half4 $hsl_to_rgb"
+"(half3 b,half c){return saturate(half4($hsl_to_rgb(b)*c,c));}$pure half3 $css_lab_to_xyz"
+"(half3 a){half3 d;d.y=(a.x+16.)*.00862069;d.x=a.y*.002+d.y;d.z=d.y-a.z*.005"
+";half3 g=pow(d,half3(3.));half3 h=half3(g.x>.008856452?g.x:(116.*d.x-16.)*.00110705639"
+",a.x>8.000001?g.y:a.x*.00110705639,g.z>.008856452?g.z:(116.*d.z-16.)*.00110705639"
+");return h*half3(.9642956,1.,.825104535);}$pure half3 $a(half3 a){return half3"
+"(a.z,a.y*cos(radians(a.x)),a.y*sin(radians(a.x)));}$pure half3 $css_hcl_to_xyz"
+"(half3 a){return $css_lab_to_xyz($a(a));}$pure half3 $css_oklab_to_linear_srgb"
+"(half3 a){half b=(a.x+.396337777*a.y)+.215803757*a.z;half c=(a.x-.105561346"
+"*a.y)-.06385417*a.z;half d=(a.x-.08948418*a.y)-1.29148555*a.z;half e=(b*b)*"
+"b;half f=(c*c)*c;half g=(d*d)*d;return half3((4.0767417*e-3.3077116*f)+.230969936"
+"*g,(-1.268438*e+2.60975742*f)-.341319382*g,(-.00419608643*e-.7034186*f)+1.70761466"
+"*g);}$pure half3 $css_okhcl_to_linear_srgb(half3 a){return $css_oklab_to_linear_srgb"
+"($a(a));}$pure half3 $css_hsl_to_srgb(half3 b){b.x=mod(b.x,360.);if(b.x<0.)"
+"{b.x+=360.;}b.yz*=.01;half3 c=mod(half3(0.,8.,4.)+b.x*.0333333351,12.);half"
+" d=b.y*min(b.z,1.-b.z);return b.z-d*clamp(min(c-3.,9.-c),-1.,1.);}$pure half3"
+" $css_hwb_to_srgb(half3 a){a.yz*=.01;if(a.y+a.z>=1.){half b=a.y/(a.y+a.z);return"
+" half3(b);}half3 b=$css_hsl_to_srgb(half3(a.x,100.,50.));b*=(1.-a.y)-a.z;b+="
+"a.y;return b;}$pure half4 $interpolated_to_rgb_unpremul(half4 a,int b,int c"
+"){if(bool(c)){switch(b){case 2:;case 3:a=unpremul(a);break;case 4:;case 5:;"
+"case 7:;case 8:a=$unpremul_polar(a);break;}}switch(b){case 2:{a.xyz=$css_lab_to_xyz"
+"(a.xyz);break;}case 3:{a.xyz=$css_oklab_to_linear_srgb(a.xyz);break;}case 4"
+":{a.xyz=$css_hcl_to_xyz(a.xyz);break;}case 5:{a.xyz=$css_okhcl_to_linear_srgb"
+"(a.xyz);break;}case 7:{a.xyz=$css_hsl_to_srgb(a.xyz);break;}case 8:{a.xyz=$css_hwb_to_srgb"
+"(a.xyz);break;}}return a;}";
diff --git a/gfx/skia/skia/src/sksl/generated/sksl_shared.unoptimized.sksl b/gfx/skia/skia/src/sksl/generated/sksl_shared.unoptimized.sksl
new file mode 100644
index 0000000000..050060ed4b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/generated/sksl_shared.unoptimized.sksl
@@ -0,0 +1,163 @@
+static constexpr char SKSL_MINIFIED_sksl_shared[] =
+"$pure $genType radians($genType degrees);$pure $genHType radians($genHType degrees"
+");$pure $genType degrees($genType radians);$pure $genHType degrees($genHType"
+" radians);$pure $genType sin($genType angle);$pure $genHType sin($genHType angle"
+");$pure $genType cos($genType angle);$pure $genHType cos($genHType angle);$pure"
+" $genType tan($genType angle);$pure $genHType tan($genHType angle);$pure $genType"
+" asin($genType x);$pure $genHType asin($genHType x);$pure $genType acos($genType"
+" x);$pure $genHType acos($genHType x);$pure $genType atan($genType y,$genType"
+" x);$pure $genHType atan($genHType y,$genHType x);$pure $genType atan($genType"
+" y_over_x);$pure $genHType atan($genHType y_over_x);$es3 $pure $genType sinh"
+"($genType x);$es3 $pure $genHType sinh($genHType x);$es3 $pure $genType cosh"
+"($genType x);$es3 $pure $genHType cosh($genHType x);$es3 $pure $genType tanh"
+"($genType x);$es3 $pure $genHType tanh($genHType x);$es3 $pure $genType asinh"
+"($genType x);$es3 $pure $genHType asinh($genHType x);$es3 $pure $genType acosh"
+"($genType x);$es3 $pure $genHType acosh($genHType x);$es3 $pure $genType atanh"
+"($genType x);$es3 $pure $genHType atanh($genHType x);$pure $genType pow($genType"
+" x,$genType y);$pure $genHType pow($genHType x,$genHType y);$pure $genType exp"
+"($genType x);$pure $genHType exp($genHType x);$pure $genType log($genType x"
+");$pure $genHType log($genHType x);$pure $genType exp2($genType x);$pure $genHType"
+" exp2($genHType x);$pure $genType log2($genType x);$pure $genHType log2($genHType"
+" x);$pure $genType sqrt($genType x);$pure $genHType sqrt($genHType x);$pure"
+" $genType inversesqrt($genType x);$pure $genHType inversesqrt($genHType x);"
+"$pure $genType abs($genType x);$pure $genHType abs($genHType x);$pure $genType"
+" sign($genType x);$pure $genHType sign($genHType x);$pure $genType floor($genType"
+" x);$pure $genHType floor($genHType x);$pure $genType ceil($genType x);$pure"
+" $genHType ceil($genHType x);$pure $genType fract($genType x);$pure $genHType"
+" fract($genHType x);$pure $genType mod($genType x,float y);$pure $genType mod"
+"($genType x,$genType y);$pure $genHType mod($genHType x,half y);$pure $genHType"
+" mod($genHType x,$genHType y);$pure $genType min($genType x,$genType y);$pure"
+" $genType min($genType x,float y);$pure $genHType min($genHType x,$genHType"
+" y);$pure $genHType min($genHType x,half y);$pure $genType max($genType x,$genType"
+" y);$pure $genType max($genType x,float y);$pure $genHType max($genHType x,"
+"$genHType y);$pure $genHType max($genHType x,half y);$pure $genType clamp($genType"
+" x,$genType minVal,$genType maxVal);$pure $genType clamp($genType x,float minVal"
+",float maxVal);$pure $genHType clamp($genHType x,$genHType minVal,$genHType"
+" maxVal);$pure $genHType clamp($genHType x,half minVal,half maxVal);$pure $genType"
+" saturate($genType x);$pure $genHType saturate($genHType x);$pure $genType mix"
+"($genType x,$genType y,$genType a);$pure $genType mix($genType x,$genType y"
+",float a);$pure $genHType mix($genHType x,$genHType y,$genHType a);$pure $genHType"
+" mix($genHType x,$genHType y,half a);$pure $genType step($genType edge,$genType"
+" x);$pure $genType step(float edge,$genType x);$pure $genHType step($genHType"
+" edge,$genHType x);$pure $genHType step(half edge,$genHType x);$pure $genType"
+" smoothstep($genType edge0,$genType edge1,$genType x);$pure $genType smoothstep"
+"(float edge0,float edge1,$genType x);$pure $genHType smoothstep($genHType edge0"
+",$genHType edge1,$genHType x);$pure $genHType smoothstep(half edge0,half edge1"
+",$genHType x);$es3 $pure $genIType abs($genIType x);$es3 $pure $genIType sign"
+"($genIType x);$es3 $pure $genIType floatBitsToInt($genType value);$es3 $pure"
+" $genUType floatBitsToUint($genType value);$es3 $pure $genType intBitsToFloat"
+"($genIType value);$es3 $pure $genType uintBitsToFloat($genUType value);$es3"
+" $pure $genType trunc($genType x);$es3 $pure $genHType trunc($genHType x);$es3"
+" $pure $genType round($genType x);$es3 $pure $genHType round($genHType x);$es3"
+" $pure $genType roundEven($genType x);$es3 $pure $genHType roundEven($genHType"
+" x);$es3 $pure $genIType min($genIType x,$genIType y);$es3 $pure $genIType min"
+"($genIType x,int y);$es3 $pure $genUType min($genUType x,$genUType y);$es3 $pure"
+" $genUType min($genUType x,uint y);$es3 $pure $genIType max($genIType x,$genIType"
+" y);$es3 $pure $genIType max($genIType x,int y);$es3 $pure $genUType max($genUType"
+" x,$genUType y);$es3 $pure $genUType max($genUType x,uint y);$es3 $pure $genIType"
+" clamp($genIType x,$genIType minVal,$genIType maxVal);$es3 $pure $genIType clamp"
+"($genIType x,int minVal,int maxVal);$es3 $pure $genUType clamp($genUType x,"
+"$genUType minVal,$genUType maxVal);$es3 $pure $genUType clamp($genUType x,uint"
+" minVal,uint maxVal);$es3 $pure $genType mix($genType x,$genType y,$genBType"
+" a);$es3 $pure $genHType mix($genHType x,$genHType y,$genBType a);$es3 $pure"
+" $genBType isnan($genType x);$es3 $pure $genBType isnan($genHType x);$es3 $pure"
+" $genBType isinf($genType x);$es3 $pure $genBType isinf($genHType x);$es3 $pure"
+" $genType modf($genType x,out $genType i);$es3 $pure $genHType modf($genHType"
+" x,out $genHType i);$es3 $pure uint packUnorm2x16(float2 v);$es3 $pure float2"
+" unpackUnorm2x16(uint p);$pure float length($genType x);$pure half length($genHType"
+" x);$pure float distance($genType p0,$genType p1);$pure half distance($genHType"
+" p0,$genHType p1);$pure float dot($genType x,$genType y);$pure half dot($genHType"
+" x,$genHType y);$pure float3 cross(float3 x,float3 y);$pure half3 cross(half3"
+" x,half3 y);$pure $genType normalize($genType x);$pure $genHType normalize("
+"$genHType x);$pure $genType faceforward($genType N,$genType I,$genType Nref"
+");$pure $genHType faceforward($genHType N,$genHType I,$genHType Nref);$pure"
+" $genType reflect($genType I,$genType N);$pure $genHType reflect($genHType I"
+",$genHType N);$pure $genType refract($genType I,$genType N,float eta);$pure"
+" $genHType refract($genHType I,$genHType N,half eta);$pure $squareMat matrixCompMult"
+"($squareMat x,$squareMat y);$pure $squareHMat matrixCompMult($squareHMat x,"
+"$squareHMat y);$es3 $pure $mat matrixCompMult($mat x,$mat y);$es3 $pure $hmat"
+" matrixCompMult($hmat x,$hmat y);$pure $squareMat inverse($squareMat m);$pure"
+" $squareHMat inverse($squareHMat m);$es3 $pure float determinant($squareMat"
+" m);$es3 $pure half determinant($squareHMat m);$es3 $pure $squareMat transpose"
+"($squareMat m);$es3 $pure $squareHMat transpose($squareHMat m);$es3 $pure float2x3"
+" transpose(float3x2 m);$es3 $pure half2x3 transpose(half3x2 m);$es3 $pure float2x4"
+" transpose(float4x2 m);$es3 $pure half2x4 transpose(half4x2 m);$es3 $pure float3x2"
+" transpose(float2x3 m);$es3 $pure half3x2 transpose(half2x3 m);$es3 $pure float3x4"
+" transpose(float4x3 m);$es3 $pure half3x4 transpose(half4x3 m);$es3 $pure float4x2"
+" transpose(float2x4 m);$es3 $pure half4x2 transpose(half2x4 m);$es3 $pure float4x3"
+" transpose(float3x4 m);$es3 $pure half4x3 transpose(half3x4 m);$es3 $pure $squareMat"
+" outerProduct($vec c,$vec r);$es3 $pure $squareHMat outerProduct($hvec c,$hvec"
+" r);$es3 $pure float2x3 outerProduct(float3 c,float2 r);$es3 $pure half2x3 outerProduct"
+"(half3 c,half2 r);$es3 $pure float3x2 outerProduct(float2 c,float3 r);$es3 $pure"
+" half3x2 outerProduct(half2 c,half3 r);$es3 $pure float2x4 outerProduct(float4"
+" c,float2 r);$es3 $pure half2x4 outerProduct(half4 c,half2 r);$es3 $pure float4x2"
+" outerProduct(float2 c,float4 r);$es3 $pure half4x2 outerProduct(half2 c,half4"
+" r);$es3 $pure float3x4 outerProduct(float4 c,float3 r);$es3 $pure half3x4 outerProduct"
+"(half4 c,half3 r);$es3 $pure float4x3 outerProduct(float3 c,float4 r);$es3 $pure"
+" half4x3 outerProduct(half3 c,half4 r);$pure $bvec lessThan($vec x,$vec y);"
+"$pure $bvec lessThan($hvec x,$hvec y);$pure $bvec lessThan($ivec x,$ivec y)"
+";$pure $bvec lessThan($svec x,$svec y);$pure $bvec lessThanEqual($vec x,$vec"
+" y);$pure $bvec lessThanEqual($hvec x,$hvec y);$pure $bvec lessThanEqual($ivec"
+" x,$ivec y);$pure $bvec lessThanEqual($svec x,$svec y);$pure $bvec greaterThan"
+"($vec x,$vec y);$pure $bvec greaterThan($hvec x,$hvec y);$pure $bvec greaterThan"
+"($ivec x,$ivec y);$pure $bvec greaterThan($svec x,$svec y);$pure $bvec greaterThanEqual"
+"($vec x,$vec y);$pure $bvec greaterThanEqual($hvec x,$hvec y);$pure $bvec greaterThanEqual"
+"($ivec x,$ivec y);$pure $bvec greaterThanEqual($svec x,$svec y);$pure $bvec"
+" equal($vec x,$vec y);$pure $bvec equal($hvec x,$hvec y);$pure $bvec equal("
+"$ivec x,$ivec y);$pure $bvec equal($svec x,$svec y);$pure $bvec equal($bvec"
+" x,$bvec y);$pure $bvec notEqual($vec x,$vec y);$pure $bvec notEqual($hvec x"
+",$hvec y);$pure $bvec notEqual($ivec x,$ivec y);$pure $bvec notEqual($svec x"
+",$svec y);$pure $bvec notEqual($bvec x,$bvec y);$es3 $pure $bvec lessThan($usvec"
+" x,$usvec y);$es3 $pure $bvec lessThan($uvec x,$uvec y);$es3 $pure $bvec lessThanEqual"
+"($uvec x,$uvec y);$es3 $pure $bvec lessThanEqual($usvec x,$usvec y);$es3 $pure"
+" $bvec greaterThan($uvec x,$uvec y);$es3 $pure $bvec greaterThan($usvec x,$usvec"
+" y);$es3 $pure $bvec greaterThanEqual($uvec x,$uvec y);$es3 $pure $bvec greaterThanEqual"
+"($usvec x,$usvec y);$es3 $pure $bvec equal($uvec x,$uvec y);$es3 $pure $bvec"
+" equal($usvec x,$usvec y);$es3 $pure $bvec notEqual($uvec x,$uvec y);$es3 $pure"
+" $bvec notEqual($usvec x,$usvec y);$pure bool any($bvec x);$pure bool all($bvec"
+" x);$pure $bvec not($bvec x);$es3 $pure $genType dFdx($genType p);$es3 $pure"
+" $genType dFdy($genType p);$es3 $pure $genHType dFdx($genHType p);$es3 $pure"
+" $genHType dFdy($genHType p);$es3 $pure $genType fwidth($genType p);$es3 $pure"
+" $genHType fwidth($genHType p);$pure half4 unpremul(half4 color){return half4"
+"(color.xyz/max(color.w,.0001),color.w);}$pure float4 unpremul(float4 color)"
+"{return float4(color.xyz/max(color.w,.0001),color.w);}$export $pure half4 $unpremul_polar"
+"(half4 color){return half4(color.x,color.yz/max(color.w,.0001),color.w);}$export"
+" $pure half4 $rgb_to_hsl(half3 c,half a){half4 p=c.y<c.z?half4(c.zy,-1.,.6666667"
+"):half4(c.yz,0.,-.333333343);half4 q=c.x<p.x?half4(p.x,c.x,p.yw):half4(c.x,"
+"p.x,p.yz);const half kEps=.0001;half pmV=q.x;half pmC=pmV-min(q.y,q.z);half"
+" pmL=pmV-pmC*.5;half H=abs(q.w+(q.y-q.z)/(pmC*6.+kEps));half S=pmC/((a+kEps"
+")-abs(pmL*2.-a));half L=pmL/(a+kEps);return half4(H,S,L,a);}$export $pure half3"
+" $hsl_to_rgb(half3 hsl){half C=(1.-abs(2.*hsl.z-1.))*hsl.y;half3 p=hsl.xxx+"
+"half3(0.,.6666667,.333333343);half3 q=saturate(abs(fract(p)*6.-3.)-1.);return"
+"(q-.5)*C+hsl.z;}$export $pure half4 $hsl_to_rgb(half3 hsl,half a){return saturate"
+"(half4($hsl_to_rgb(hsl)*a,a));}$export $pure half3 $css_lab_to_xyz(half3 lab"
+"){const half k=903.2963;const half e=.008856452;half3 f;f.y=(lab.x+16.)*.00862069"
+";f.x=lab.y*.002+f.y;f.z=f.y-lab.z*.005;half3 f_cubed=pow(f,half3(3.));half3"
+" xyz=half3(f_cubed.x>e?f_cubed.x:(116.*f.x-16.)*.00110705639,lab.x>8.000001"
+"?f_cubed.y:lab.x*.00110705639,f_cubed.z>e?f_cubed.z:(116.*f.z-16.)*.00110705639"
+");const half3 D50=half3(.9642956,1.,.825104535);return xyz*D50;}$pure half3"
+" $css_hcl_to_lab(half3 hcl){return half3(hcl.z,hcl.y*cos(radians(hcl.x)),hcl"
+".y*sin(radians(hcl.x)));}$export $pure half3 $css_hcl_to_xyz(half3 hcl){return"
+" $css_lab_to_xyz($css_hcl_to_lab(hcl));}$export $pure half3 $css_oklab_to_linear_srgb"
+"(half3 oklab){half l_=(oklab.x+.396337777*oklab.y)+.215803757*oklab.z;half m_"
+"=(oklab.x-.105561346*oklab.y)-.06385417*oklab.z;half s_=(oklab.x-.08948418*"
+"oklab.y)-1.29148555*oklab.z;half l=(l_*l_)*l_;half m=(m_*m_)*m_;half s=(s_*"
+"s_)*s_;return half3((4.0767417*l-3.3077116*m)+.230969936*s,(-1.268438*l+2.60975742"
+"*m)-.341319382*s,(-.00419608643*l-.7034186*m)+1.70761466*s);}$export $pure half3"
+" $css_okhcl_to_linear_srgb(half3 okhcl){return $css_oklab_to_linear_srgb($css_hcl_to_lab"
+"(okhcl));}$export $pure half3 $css_hsl_to_srgb(half3 hsl){hsl.x=mod(hsl.x,360."
+");if(hsl.x<0.){hsl.x+=360.;}hsl.yz*=.01;half3 k=mod(half3(0.,8.,4.)+hsl.x*.0333333351"
+",12.);half a=hsl.y*min(hsl.z,1.-hsl.z);return hsl.z-a*clamp(min(k-3.,9.-k),"
+"-1.,1.);}$export $pure half3 $css_hwb_to_srgb(half3 hwb){hwb.yz*=.01;if(hwb"
+".y+hwb.z>=1.){half gray=hwb.y/(hwb.y+hwb.z);return half3(gray);}half3 rgb=$css_hsl_to_srgb"
+"(half3(hwb.x,100.,50.));rgb*=(1.-hwb.y)-hwb.z;rgb+=hwb.y;return rgb;}$export"
+" $pure half4 $interpolated_to_rgb_unpremul(half4 color,int colorSpace,int doUnpremul"
+"){const int kDestination=0;const int kSRGBLinear=1;const int kLab=2;const int"
+" kOKLab=3;const int kLCH=4;const int kOKLCH=5;const int kSRGB=6;const int kHSL"
+"=7;const int kHWB=8;if(bool(doUnpremul)){switch(colorSpace){case 2:;case 3:"
+"color=unpremul(color);break;case 4:;case 5:;case 7:;case 8:color=$unpremul_polar"
+"(color);break;}}switch(colorSpace){case 2:{color.xyz=$css_lab_to_xyz(color."
+"xyz);break;}case 3:{color.xyz=$css_oklab_to_linear_srgb(color.xyz);break;}case"
+" 4:{color.xyz=$css_hcl_to_xyz(color.xyz);break;}case 5:{color.xyz=$css_okhcl_to_linear_srgb"
+"(color.xyz);break;}case 7:{color.xyz=$css_hsl_to_srgb(color.xyz);break;}case"
+" 8:{color.xyz=$css_hwb_to_srgb(color.xyz);break;}}return color;}";
diff --git a/gfx/skia/skia/src/sksl/generated/sksl_vert.minified.sksl b/gfx/skia/skia/src/sksl/generated/sksl_vert.minified.sksl
new file mode 100644
index 0000000000..ce8fd5f9a0
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/generated/sksl_vert.minified.sksl
@@ -0,0 +1,4 @@
+static constexpr char SKSL_MINIFIED_sksl_vert[] =
+"out sk_PerVertex{layout(builtin=0)float4 sk_Position;layout(builtin=1)float"
+" sk_PointSize;};layout(builtin=42)in int sk_VertexID;layout(builtin=43)in int"
+" sk_InstanceID;";
diff --git a/gfx/skia/skia/src/sksl/generated/sksl_vert.unoptimized.sksl b/gfx/skia/skia/src/sksl/generated/sksl_vert.unoptimized.sksl
new file mode 100644
index 0000000000..ce8fd5f9a0
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/generated/sksl_vert.unoptimized.sksl
@@ -0,0 +1,4 @@
+static constexpr char SKSL_MINIFIED_sksl_vert[] =
+"out sk_PerVertex{layout(builtin=0)float4 sk_Position;layout(builtin=1)float"
+" sk_PointSize;};layout(builtin=42)in int sk_VertexID;layout(builtin=43)in int"
+" sk_InstanceID;";
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLBinaryExpression.cpp b/gfx/skia/skia/src/sksl/ir/SkSLBinaryExpression.cpp
new file mode 100644
index 0000000000..fb9d544a40
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLBinaryExpression.cpp
@@ -0,0 +1,284 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+
+#include "include/private/SkSLDefines.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLConstantFolder.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/SkSLUtil.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLLiteral.h"
+#include "src/sksl/ir/SkSLSetting.h"
+#include "src/sksl/ir/SkSLSwizzle.h"
+#include "src/sksl/ir/SkSLTernaryExpression.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+
+namespace SkSL {
+
+static bool is_low_precision_matrix_vector_multiply(const Expression& left,
+ const Operator& op,
+ const Expression& right,
+ const Type& resultType) {
+ return !resultType.highPrecision() &&
+ op.kind() == Operator::Kind::STAR &&
+ left.type().isMatrix() &&
+ right.type().isVector() &&
+ left.type().rows() == right.type().columns() &&
+ Analysis::IsTrivialExpression(left) &&
+ Analysis::IsTrivialExpression(right);
+}
+
+static std::unique_ptr<Expression> rewrite_matrix_vector_multiply(const Context& context,
+ Position pos,
+ const Expression& left,
+ const Operator& op,
+ const Expression& right,
+ const Type& resultType) {
+ // Rewrite m33 * v3 as (m[0] * v[0] + m[1] * v[1] + m[2] * v[2])
+ std::unique_ptr<Expression> sum;
+ for (int n = 0; n < left.type().rows(); ++n) {
+ // Get mat[N] with an index expression.
+ std::unique_ptr<Expression> matN = IndexExpression::Make(
+ context, pos, left.clone(), Literal::MakeInt(context, left.fPosition, n));
+ // Get vec[N] with a swizzle expression.
+ std::unique_ptr<Expression> vecN = Swizzle::Make(context,
+ left.fPosition.rangeThrough(right.fPosition), right.clone(),
+ ComponentArray{(SkSL::SwizzleComponent::Type)n});
+ // Multiply them together.
+ const Type* matNType = &matN->type();
+ std::unique_ptr<Expression> product =
+ BinaryExpression::Make(context, pos, std::move(matN), op, std::move(vecN),
+ matNType);
+ // Sum all the components together.
+ if (!sum) {
+ sum = std::move(product);
+ } else {
+ sum = BinaryExpression::Make(context,
+ pos,
+ std::move(sum),
+ Operator(Operator::Kind::PLUS),
+ std::move(product),
+ matNType);
+ }
+ }
+
+ return sum;
+}
+
+std::unique_ptr<Expression> BinaryExpression::Convert(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> left,
+ Operator op,
+ std::unique_ptr<Expression> right) {
+ if (!left || !right) {
+ return nullptr;
+ }
+ const Type* rawLeftType = (left->isIntLiteral() && right->type().isInteger())
+ ? &right->type()
+ : &left->type();
+ const Type* rawRightType = (right->isIntLiteral() && left->type().isInteger())
+ ? &left->type()
+ : &right->type();
+
+ bool isAssignment = op.isAssignment();
+ if (isAssignment &&
+ !Analysis::UpdateVariableRefKind(left.get(),
+ op.kind() != Operator::Kind::EQ
+ ? VariableReference::RefKind::kReadWrite
+ : VariableReference::RefKind::kWrite,
+ context.fErrors)) {
+ return nullptr;
+ }
+
+ const Type* leftType;
+ const Type* rightType;
+ const Type* resultType;
+ if (!op.determineBinaryType(context, *rawLeftType, *rawRightType,
+ &leftType, &rightType, &resultType)) {
+ context.fErrors->error(pos, "type mismatch: '" + std::string(op.tightOperatorName()) +
+ "' cannot operate on '" + left->type().displayName() + "', '" +
+ right->type().displayName() + "'");
+ return nullptr;
+ }
+
+ if (isAssignment && (leftType->componentType().isOpaque() || leftType->isOrContainsAtomic())) {
+ context.fErrors->error(pos, "assignments to opaque type '" + left->type().displayName() +
+ "' are not permitted");
+ return nullptr;
+ }
+ if (context.fConfig->strictES2Mode()) {
+ if (!op.isAllowedInStrictES2Mode()) {
+ context.fErrors->error(pos, "operator '" + std::string(op.tightOperatorName()) +
+ "' is not allowed");
+ return nullptr;
+ }
+ if (leftType->isOrContainsArray()) {
+ // Most operators are already rejected on arrays, but GLSL ES 1.0 is very explicit that
+ // the *only* operator allowed on arrays is subscripting (and the rules against
+ // assignment, comparison, and even sequence apply to structs containing arrays as well)
+ context.fErrors->error(pos, "operator '" + std::string(op.tightOperatorName()) +
+ "' can not operate on arrays (or structs containing arrays)");
+ return nullptr;
+ }
+ }
+
+ left = leftType->coerceExpression(std::move(left), context);
+ right = rightType->coerceExpression(std::move(right), context);
+ if (!left || !right) {
+ return nullptr;
+ }
+
+ return BinaryExpression::Make(context, pos, std::move(left), op, std::move(right), resultType);
+}
+
+std::unique_ptr<Expression> BinaryExpression::Make(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> left,
+ Operator op,
+ std::unique_ptr<Expression> right) {
+ // Determine the result type of the binary expression.
+ const Type* leftType;
+ const Type* rightType;
+ const Type* resultType;
+ SkAssertResult(op.determineBinaryType(context, left->type(), right->type(),
+ &leftType, &rightType, &resultType));
+
+ return BinaryExpression::Make(context, pos, std::move(left), op, std::move(right), resultType);
+}
+
+std::unique_ptr<Expression> BinaryExpression::Make(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> left,
+ Operator op,
+ std::unique_ptr<Expression> right,
+ const Type* resultType) {
+ // We should have detected non-ES2 compliant behavior in Convert.
+ SkASSERT(!context.fConfig->strictES2Mode() || op.isAllowedInStrictES2Mode());
+ SkASSERT(!context.fConfig->strictES2Mode() || !left->type().isOrContainsArray());
+
+ // We should have detected non-assignable assignment expressions in Convert.
+ SkASSERT(!op.isAssignment() || Analysis::IsAssignable(*left));
+ SkASSERT(!op.isAssignment() || !left->type().componentType().isOpaque());
+
+ // For simple assignments, detect and report out-of-range literal values.
+ if (op.kind() == Operator::Kind::EQ) {
+ left->type().checkForOutOfRangeLiteral(context, *right);
+ }
+
+ // Perform constant-folding on the expression.
+ if (std::unique_ptr<Expression> result = ConstantFolder::Simplify(context, pos, *left,
+ op, *right, *resultType)) {
+ return result;
+ }
+
+ if (context.fConfig->fSettings.fOptimize && !context.fConfig->fIsBuiltinCode) {
+ // When sk_Caps.rewriteMatrixVectorMultiply is set, we rewrite medium-precision
+ // matrix * vector multiplication as:
+ // (sk_Caps.rewriteMatrixVectorMultiply ? (mat[0]*vec[0] + ... + mat[N]*vec[N])
+ // : mat * vec)
+ if (is_low_precision_matrix_vector_multiply(*left, op, *right, *resultType)) {
+ // Look up `sk_Caps.rewriteMatrixVectorMultiply`.
+ auto caps = Setting::Make(context, pos, &ShaderCaps::fRewriteMatrixVectorMultiply);
+
+ // There are three possible outcomes from Setting::Convert:
+ // - If the ShaderCaps aren't known (fCaps in the Context is null), we will get back a
+ // Setting IRNode. In practice, this should happen when compiling a module.
+ // In this case, we generate a ternary expression which will be optimized away when
+ // the module code is actually incorporated into a program.
+ // - If `rewriteMatrixVectorMultiply` is true in our shader caps, we will get back a
+ // Literal set to true. When this happens, we always return the rewritten expression.
+ // - If `rewriteMatrixVectorMultiply` is false in our shader caps, we will get back a
+ // Literal set to false. When this happens, we return the expression as-is.
+ bool capsBitIsTrue = caps->isBoolLiteral() && caps->as<Literal>().boolValue();
+ if (capsBitIsTrue || !caps->isBoolLiteral()) {
+ // Rewrite the multiplication as a sum of vector-scalar products.
+ std::unique_ptr<Expression> rewrite =
+ rewrite_matrix_vector_multiply(context, pos, *left, op, *right,
+ *resultType);
+
+ // If we know the caps bit is true, return the rewritten expression directly.
+ if (capsBitIsTrue) {
+ return rewrite;
+ }
+
+ // Return a ternary expression:
+ // sk_Caps.rewriteMatrixVectorMultiply ? (rewrite) : (mat * vec)
+ return TernaryExpression::Make(
+ context,
+ pos,
+ std::move(caps),
+ std::move(rewrite),
+ std::make_unique<BinaryExpression>(pos, std::move(left), op,
+ std::move(right), resultType));
+ }
+ }
+ }
+
+ return std::make_unique<BinaryExpression>(pos, std::move(left), op,
+ std::move(right), resultType);
+}
+
+bool BinaryExpression::CheckRef(const Expression& expr) {
+ switch (expr.kind()) {
+ case Expression::Kind::kFieldAccess:
+ return CheckRef(*expr.as<FieldAccess>().base());
+
+ case Expression::Kind::kIndex:
+ return CheckRef(*expr.as<IndexExpression>().base());
+
+ case Expression::Kind::kSwizzle:
+ return CheckRef(*expr.as<Swizzle>().base());
+
+ case Expression::Kind::kTernary: {
+ const TernaryExpression& t = expr.as<TernaryExpression>();
+ return CheckRef(*t.ifTrue()) && CheckRef(*t.ifFalse());
+ }
+ case Expression::Kind::kVariableReference: {
+ const VariableReference& ref = expr.as<VariableReference>();
+ return ref.refKind() == VariableRefKind::kWrite ||
+ ref.refKind() == VariableRefKind::kReadWrite;
+ }
+ default:
+ return false;
+ }
+}
+
+std::unique_ptr<Expression> BinaryExpression::clone(Position pos) const {
+ return std::make_unique<BinaryExpression>(pos,
+ this->left()->clone(),
+ this->getOperator(),
+ this->right()->clone(),
+ &this->type());
+}
+
+std::string BinaryExpression::description(OperatorPrecedence parentPrecedence) const {
+ OperatorPrecedence operatorPrecedence = this->getOperator().getBinaryPrecedence();
+ bool needsParens = (operatorPrecedence >= parentPrecedence);
+ return std::string(needsParens ? "(" : "") +
+ this->left()->description(operatorPrecedence) +
+ this->getOperator().operatorName() +
+ this->right()->description(operatorPrecedence) +
+ std::string(needsParens ? ")" : "");
+}
+
+VariableReference* BinaryExpression::isAssignmentIntoVariable() {
+ if (this->getOperator().isAssignment()) {
+ Analysis::AssignmentInfo assignmentInfo;
+ if (Analysis::IsAssignable(*this->left(), &assignmentInfo, /*errors=*/nullptr)) {
+ return assignmentInfo.fAssignedVar;
+ }
+ }
+ return nullptr;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLBinaryExpression.h b/gfx/skia/skia/src/sksl/ir/SkSLBinaryExpression.h
new file mode 100644
index 0000000000..094af55ef2
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLBinaryExpression.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_BINARYEXPRESSION
+#define SKSL_BINARYEXPRESSION
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLOperator.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <memory>
+#include <string>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+class Type;
+class VariableReference;
+
+/**
+ * A binary operation.
+ */
+class BinaryExpression final : public Expression {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kBinary;
+
+ BinaryExpression(Position pos, std::unique_ptr<Expression> left, Operator op,
+ std::unique_ptr<Expression> right, const Type* type)
+ : INHERITED(pos, kIRNodeKind, type)
+ , fLeft(std::move(left))
+ , fOperator(op)
+ , fRight(std::move(right)) {
+ // If we are assigning to a VariableReference, ensure that it is set to Write or ReadWrite.
+ SkASSERT(!op.isAssignment() || CheckRef(*this->left()));
+ }
+
+ // Creates a potentially-simplified form of the expression. Determines the result type
+ // programmatically. Typechecks and coerces input expressions; reports errors via ErrorReporter.
+ static std::unique_ptr<Expression> Convert(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> left,
+ Operator op,
+ std::unique_ptr<Expression> right);
+
+ // Creates a potentially-simplified form of the expression. Determines the result type
+ // programmatically. Asserts if the expressions do not typecheck or are otherwise invalid.
+ static std::unique_ptr<Expression> Make(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> left,
+ Operator op,
+ std::unique_ptr<Expression> right);
+
+ // Creates a potentially-simplified form of the expression. Result type is passed in.
+ // Asserts if the expressions do not typecheck or are otherwise invalid.
+ static std::unique_ptr<Expression> Make(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> left,
+ Operator op,
+ std::unique_ptr<Expression> right,
+ const Type* resultType);
+
+ std::unique_ptr<Expression>& left() {
+ return fLeft;
+ }
+
+ const std::unique_ptr<Expression>& left() const {
+ return fLeft;
+ }
+
+ std::unique_ptr<Expression>& right() {
+ return fRight;
+ }
+
+ const std::unique_ptr<Expression>& right() const {
+ return fRight;
+ }
+
+ Operator getOperator() const {
+ return fOperator;
+ }
+
+ std::unique_ptr<Expression> clone(Position pos) const override;
+
+ std::string description(OperatorPrecedence parentPrecedence) const override;
+
+ /**
+ * If the expression is an assignment like `a = 1` or `a += sin(b)`, returns the
+ * VariableReference that will be written to. For other types of expressions, returns null.
+ * Complex expressions that contain inner assignments, like `(a = b) * 2`, will return null.
+ */
+ VariableReference* isAssignmentIntoVariable();
+
+private:
+ static bool CheckRef(const Expression& expr);
+
+ std::unique_ptr<Expression> fLeft;
+ Operator fOperator;
+ std::unique_ptr<Expression> fRight;
+
+ using INHERITED = Expression;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLBlock.cpp b/gfx/skia/skia/src/sksl/ir/SkSLBlock.cpp
new file mode 100644
index 0000000000..24531e2cf3
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLBlock.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLBlock.h"
+
+#include "src/sksl/ir/SkSLNop.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+
+#include <type_traits>
+
+namespace SkSL {
+
+std::unique_ptr<Statement> Block::Make(Position pos,
+ StatementArray statements,
+ Kind kind,
+ std::shared_ptr<SymbolTable> symbols) {
+ // We can't simplify away braces or populated symbol tables.
+ if (kind == Kind::kBracedScope || (symbols && symbols->count())) {
+ return std::make_unique<Block>(pos, std::move(statements), kind, std::move(symbols));
+ }
+
+ // If the Block is completely empty, synthesize a Nop.
+ if (statements.empty()) {
+ return Nop::Make();
+ }
+
+ if (statements.size() > 1) {
+ // The statement array contains multiple statements, but some of those might be no-ops.
+ // If the statement array only contains one real statement, we can return that directly and
+ // avoid creating an additional Block node.
+ std::unique_ptr<Statement>* foundStatement = nullptr;
+ for (std::unique_ptr<Statement>& stmt : statements) {
+ if (!stmt->isEmpty()) {
+ if (!foundStatement) {
+ // We found a single non-empty statement. Remember it and keep looking.
+ foundStatement = &stmt;
+ continue;
+ }
+ // We found more than one non-empty statement. We actually do need a Block.
+ return std::make_unique<Block>(pos, std::move(statements), kind,
+ /*symbols=*/nullptr);
+ }
+ }
+
+ // The array wrapped one valid Statement. Avoid allocating a Block by returning it directly.
+ if (foundStatement) {
+ return std::move(*foundStatement);
+ }
+
+ // The statement array contained nothing but empty statements!
+ // In this case, we don't actually need to allocate a Block.
+ // We can just return one of those empty statements. Fall through to...
+ }
+
+ return std::move(statements.front());
+}
+
+std::unique_ptr<Block> Block::MakeBlock(Position pos,
+ StatementArray statements,
+ Kind kind,
+ std::shared_ptr<SymbolTable> symbols) {
+ // Nothing to optimize here--eliminating empty statements doesn't actually improve the generated
+ // code, and we promise to return a Block.
+ return std::make_unique<Block>(pos, std::move(statements), kind, std::move(symbols));
+}
+
+std::unique_ptr<Statement> Block::clone() const {
+ StatementArray cloned;
+ cloned.reserve_back(this->children().size());
+ for (const std::unique_ptr<Statement>& stmt : this->children()) {
+ cloned.push_back(stmt->clone());
+ }
+ return std::make_unique<Block>(fPosition,
+ std::move(cloned),
+ fBlockKind,
+ SymbolTable::WrapIfBuiltin(this->symbolTable()));
+}
+
+std::string Block::description() const {
+ std::string result;
+
+ // Write scope markers if this block is a scope, or if the block is empty (since we need to emit
+ // something here to make the code valid).
+ bool isScope = this->isScope() || this->isEmpty();
+ if (isScope) {
+ result += "{";
+ }
+ for (const std::unique_ptr<Statement>& stmt : this->children()) {
+ result += "\n";
+ result += stmt->description();
+ }
+ result += isScope ? "\n}\n" : "\n";
+ return result;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLBlock.h b/gfx/skia/skia/src/sksl/ir/SkSLBlock.h
new file mode 100644
index 0000000000..7bdfdf8bf7
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLBlock.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_BLOCK
+#define SKSL_BLOCK
+
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLStatement.h"
+#include "include/sksl/SkSLPosition.h"
+
+#include <memory>
+#include <string>
+#include <utility>
+
+namespace SkSL {
+
+class SymbolTable;
+
+/**
+ * A block of multiple statements functioning as a single statement.
+ */
+class Block final : public Statement {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kBlock;
+
+ // "kBracedScope" represents an actual language-level block. Other kinds of block are used to
+ // pass around multiple statements as if they were a single unit, with no semantic impact.
+ enum class Kind {
+ kUnbracedBlock, // Represents a group of statements without curly braces.
+ kBracedScope, // Represents a language-level Block, with curly braces.
+ kCompoundStatement, // A block which conceptually represents a single statement, such as
+ // `int a, b;`. (SkSL represents this internally as two statements:
+ // `int a; int b;`) Allowed to optimize away to its interior Statement.
+ // Treated as a single statement by the debugger.
+ };
+
+ Block(Position pos, StatementArray statements,
+ Kind kind = Kind::kBracedScope, const std::shared_ptr<SymbolTable> symbols = nullptr)
+ : INHERITED(pos, kIRNodeKind)
+ , fChildren(std::move(statements))
+ , fBlockKind(kind)
+ , fSymbolTable(std::move(symbols)) {}
+
+ // Make is allowed to simplify compound statements. For a single-statement unscoped Block,
+ // Make can return the Statement as-is. For an empty unscoped Block, Make can return Nop.
+ static std::unique_ptr<Statement> Make(Position pos,
+ StatementArray statements,
+ Kind kind = Kind::kBracedScope,
+ std::shared_ptr<SymbolTable> symbols = nullptr);
+
+ // MakeBlock always makes a real Block object. This is important because many callers rely on
+ // Blocks specifically; e.g. a function body must be a scoped Block, nothing else will do.
+ static std::unique_ptr<Block> MakeBlock(Position pos,
+ StatementArray statements,
+ Kind kind = Kind::kBracedScope,
+ std::shared_ptr<SymbolTable> symbols = nullptr);
+
+ const StatementArray& children() const {
+ return fChildren;
+ }
+
+ StatementArray& children() {
+ return fChildren;
+ }
+
+ bool isScope() const {
+ return fBlockKind == Kind::kBracedScope;
+ }
+
+ Kind blockKind() const {
+ return fBlockKind;
+ }
+
+ void setBlockKind(Kind kind) {
+ fBlockKind = kind;
+ }
+
+ std::shared_ptr<SymbolTable> symbolTable() const {
+ return fSymbolTable;
+ }
+
+ bool isEmpty() const override {
+ for (const std::unique_ptr<Statement>& stmt : this->children()) {
+ if (!stmt->isEmpty()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ std::unique_ptr<Statement> clone() const override;
+
+ std::string description() const override;
+
+private:
+ StatementArray fChildren;
+ Kind fBlockKind;
+ std::shared_ptr<SymbolTable> fSymbolTable;
+
+ using INHERITED = Statement;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLBreakStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLBreakStatement.h
new file mode 100644
index 0000000000..96cb700b14
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLBreakStatement.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_BREAKSTATEMENT
+#define SKSL_BREAKSTATEMENT
+
+#include "include/private/SkSLStatement.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * A 'break' statement.
+ */
+class BreakStatement final : public Statement {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kBreak;
+
+ BreakStatement(Position pos)
+ : INHERITED(pos, kIRNodeKind) {}
+
+ static std::unique_ptr<Statement> Make(Position pos) {
+ return std::make_unique<BreakStatement>(pos);
+ }
+
+ std::unique_ptr<Statement> clone() const override {
+ return std::make_unique<BreakStatement>(fPosition);
+ }
+
+ std::string description() const override {
+ return "break;";
+ }
+
+private:
+ using INHERITED = Statement;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLChildCall.cpp b/gfx/skia/skia/src/sksl/ir/SkSLChildCall.cpp
new file mode 100644
index 0000000000..e20a577051
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLChildCall.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLChildCall.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLString.h"
+#include "include/private/base/SkTArray.h"
+#include "include/sksl/SkSLOperator.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVariable.h"
+
+namespace SkSL {
+
+std::unique_ptr<Expression> ChildCall::clone(Position pos) const {
+ return std::make_unique<ChildCall>(pos, &this->type(), &this->child(),
+ this->arguments().clone());
+}
+
+std::string ChildCall::description(OperatorPrecedence) const {
+ std::string result = std::string(this->child().name()) + ".eval(";
+ auto separator = SkSL::String::Separator();
+ for (const std::unique_ptr<Expression>& arg : this->arguments()) {
+ result += separator();
+ result += arg->description(OperatorPrecedence::kSequence);
+ }
+ result += ")";
+ return result;
+}
+
+[[maybe_unused]] static bool call_signature_is_valid(const Context& context,
+ const Variable& child,
+ const ExpressionArray& arguments) {
+ const Type* half4 = context.fTypes.fHalf4.get();
+ const Type* float2 = context.fTypes.fFloat2.get();
+
+ auto params = [&]() -> SkSTArray<2, const Type*> {
+ switch (child.type().typeKind()) {
+ case Type::TypeKind::kBlender: return { half4, half4 };
+ case Type::TypeKind::kColorFilter: return { half4 };
+ case Type::TypeKind::kShader: return { float2 };
+ default:
+ SkUNREACHABLE;
+ }
+ }();
+
+ if (params.size() != arguments.size()) {
+ return false;
+ }
+ for (int i = 0; i < arguments.size(); i++) {
+ if (!arguments[i]->type().matches(*params[i])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+std::unique_ptr<Expression> ChildCall::Make(const Context& context,
+ Position pos,
+ const Type* returnType,
+ const Variable& child,
+ ExpressionArray arguments) {
+ SkASSERT(call_signature_is_valid(context, child, arguments));
+ return std::make_unique<ChildCall>(pos, returnType, &child, std::move(arguments));
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLChildCall.h b/gfx/skia/skia/src/sksl/ir/SkSLChildCall.h
new file mode 100644
index 0000000000..7d48a84d58
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLChildCall.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CHILDCALL
+#define SKSL_CHILDCALL
+
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+class Type;
+class Variable;
+enum class OperatorPrecedence : uint8_t;
+
+/**
+ * A call to a child effect object (shader, color filter, or blender).
+ */
+class ChildCall final : public Expression {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kChildCall;
+
+ ChildCall(Position pos, const Type* type, const Variable* child, ExpressionArray arguments)
+ : INHERITED(pos, kIRNodeKind, type)
+ , fChild(*child)
+ , fArguments(std::move(arguments)) {}
+
+ // Creates the child call; reports errors via ASSERT.
+ static std::unique_ptr<Expression> Make(const Context& context,
+ Position pos,
+ const Type* returnType,
+ const Variable& child,
+ ExpressionArray arguments);
+
+ const Variable& child() const {
+ return fChild;
+ }
+
+ ExpressionArray& arguments() {
+ return fArguments;
+ }
+
+ const ExpressionArray& arguments() const {
+ return fArguments;
+ }
+
+ std::unique_ptr<Expression> clone(Position pos) const override;
+
+ std::string description(OperatorPrecedence) const override;
+
+private:
+ const Variable& fChild;
+ ExpressionArray fArguments;
+
+ using INHERITED = Expression;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLConstructor.cpp b/gfx/skia/skia/src/sksl/ir/SkSLConstructor.cpp
new file mode 100644
index 0000000000..5b505a6584
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLConstructor.cpp
@@ -0,0 +1,241 @@
+/*
+ * Copyright 2020 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLConstructor.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLString.h"
+#include "include/private/base/SkTArray.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLOperator.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/ir/SkSLConstructorArray.h"
+#include "src/sksl/ir/SkSLConstructorCompound.h"
+#include "src/sksl/ir/SkSLConstructorCompoundCast.h"
+#include "src/sksl/ir/SkSLConstructorDiagonalMatrix.h"
+#include "src/sksl/ir/SkSLConstructorMatrixResize.h"
+#include "src/sksl/ir/SkSLConstructorScalarCast.h"
+#include "src/sksl/ir/SkSLConstructorSplat.h"
+#include "src/sksl/ir/SkSLConstructorStruct.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <vector>
+
+namespace SkSL {
+
+static std::unique_ptr<Expression> convert_compound_constructor(const Context& context,
+ Position pos,
+ const Type& type,
+ ExpressionArray args) {
+ SkASSERT(type.isVector() || type.isMatrix());
+
+ // The meaning of a compound constructor containing a single argument varies significantly in
+ // GLSL/SkSL, depending on the argument type.
+ if (args.size() == 1) {
+ std::unique_ptr<Expression>& argument = args.front();
+ if (type.isVector() && argument->type().isVector() &&
+ argument->type().componentType().matches(type.componentType()) &&
+ argument->type().slotCount() > type.slotCount()) {
+ // Casting a vector-type into a smaller matching vector-type is a slice in GLSL.
+ // We don't allow those casts in SkSL; recommend a swizzle instead.
+ // Only `.xy` and `.xyz` are valid recommendations here, because `.x` would imply a
+ // scalar(vector) cast, and nothing has more slots than `.xyzw`.
+ const char* swizzleHint;
+ switch (type.slotCount()) {
+ case 2: swizzleHint = "; use '.xy' instead"; break;
+ case 3: swizzleHint = "; use '.xyz' instead"; break;
+ default: swizzleHint = ""; SkDEBUGFAIL("unexpected slicing cast"); break;
+ }
+
+ context.fErrors->error(pos, "'" + argument->type().displayName() +
+ "' is not a valid parameter to '" + type.displayName() + "' constructor" +
+ swizzleHint);
+ return nullptr;
+ }
+
+ if (argument->type().isScalar()) {
+ // A constructor containing a single scalar is a splat (for vectors) or diagonal matrix
+ // (for matrices). It's legal regardless of the scalar's type, so synthesize an explicit
+ // conversion to the proper type. (This cast is a no-op if it's unnecessary; it can fail
+ // if we're casting a literal that exceeds the limits of the type.)
+ std::unique_ptr<Expression> typecast = ConstructorScalarCast::Convert(
+ context, pos, type.componentType(), std::move(args));
+ if (!typecast) {
+ return nullptr;
+ }
+
+ // Matrix-from-scalar creates a diagonal matrix; vector-from-scalar creates a splat.
+ return type.isMatrix()
+ ? ConstructorDiagonalMatrix::Make(context, pos, type, std::move(typecast))
+ : ConstructorSplat::Make(context, pos, type, std::move(typecast));
+ } else if (argument->type().isVector()) {
+ // A vector constructor containing a single vector with the same number of columns is a
+ // cast (e.g. float3 -> int3).
+ if (type.isVector() && argument->type().columns() == type.columns()) {
+ return ConstructorCompoundCast::Make(context, pos, type, std::move(argument));
+ }
+ } else if (argument->type().isMatrix()) {
+ // A matrix constructor containing a single matrix can be a resize, typecast, or both.
+ // GLSL lumps these into one category, but internally SkSL keeps them distinct.
+ if (type.isMatrix()) {
+ // First, handle type conversion. If the component types differ, synthesize the
+ // destination type with the argument's rows/columns. (This will be a no-op if it's
+ // already the right type.)
+ const Type& typecastType = type.componentType().toCompound(
+ context,
+ argument->type().columns(),
+ argument->type().rows());
+ argument = ConstructorCompoundCast::Make(context, pos, typecastType,
+ std::move(argument));
+
+ // Casting a matrix type into another matrix type is a resize.
+ return ConstructorMatrixResize::Make(context, pos, type,
+ std::move(argument));
+ }
+
+ // A vector constructor containing a single matrix can be compound construction if the
+ // matrix is 2x2 and the vector is 4-slot.
+ if (type.isVector() && type.columns() == 4 && argument->type().slotCount() == 4) {
+ // Casting a 2x2 matrix to a vector is a form of compound construction.
+ // First, reshape the matrix into a 4-slot vector of the same type.
+ const Type& vectorType = argument->type().componentType().toCompound(context,
+ /*columns=*/4,
+ /*rows=*/1);
+ std::unique_ptr<Expression> vecCtor =
+ ConstructorCompound::Make(context, pos, vectorType, std::move(args));
+
+ // Then, add a typecast to the result expression to ensure the types match.
+ // This will be a no-op if no typecasting is needed.
+ return ConstructorCompoundCast::Make(context, pos, type, std::move(vecCtor));
+ }
+ }
+ }
+
+ // For more complex cases, we walk the argument list and fix up the arguments as needed.
+ int expected = type.rows() * type.columns();
+ int actual = 0;
+ for (std::unique_ptr<Expression>& arg : args) {
+ if (!arg->type().isScalar() && !arg->type().isVector()) {
+ context.fErrors->error(pos, "'" + arg->type().displayName() +
+ "' is not a valid parameter to '" + type.displayName() + "' constructor");
+ return nullptr;
+ }
+
+ // Rely on Constructor::Convert to force this subexpression to the proper type. If it's a
+ // literal, this will make sure it's the right type of literal. If an expression of matching
+ // type, the expression will be returned as-is. If it's an expression of mismatched type,
+ // this adds a cast.
+ const Type& ctorType = type.componentType().toCompound(context, arg->type().columns(),
+ /*rows=*/1);
+ ExpressionArray ctorArg;
+ ctorArg.push_back(std::move(arg));
+ arg = Constructor::Convert(context, pos, ctorType, std::move(ctorArg));
+ if (!arg) {
+ return nullptr;
+ }
+ actual += ctorType.columns();
+ }
+
+ if (actual != expected) {
+ context.fErrors->error(pos, "invalid arguments to '" + type.displayName() +
+ "' constructor (expected " + std::to_string(expected) +
+ " scalars, but found " + std::to_string(actual) + ")");
+ return nullptr;
+ }
+
+ return ConstructorCompound::Make(context, pos, type, std::move(args));
+}
+
+std::unique_ptr<Expression> Constructor::Convert(const Context& context,
+ Position pos,
+ const Type& type,
+ ExpressionArray args) {
+ if (args.size() == 1 && args[0]->type().matches(type) && !type.componentType().isOpaque()) {
+ // Don't generate redundant casts; if the expression is already of the correct type, just
+ // return it as-is.
+ args[0]->fPosition = pos;
+ return std::move(args[0]);
+ }
+ if (type.isScalar()) {
+ return ConstructorScalarCast::Convert(context, pos, type, std::move(args));
+ }
+ if (type.isVector() || type.isMatrix()) {
+ return convert_compound_constructor(context, pos, type, std::move(args));
+ }
+ if (type.isArray() && type.columns() > 0) {
+ return ConstructorArray::Convert(context, pos, type, std::move(args));
+ }
+ if (type.isStruct() && type.fields().size() > 0) {
+ return ConstructorStruct::Convert(context, pos, type, std::move(args));
+ }
+
+ context.fErrors->error(pos, "cannot construct '" + type.displayName() + "'");
+ return nullptr;
+}
+
+std::optional<double> AnyConstructor::getConstantValue(int n) const {
+ SkASSERT(n >= 0 && n < (int)this->type().slotCount());
+ for (const std::unique_ptr<Expression>& arg : this->argumentSpan()) {
+ int argSlots = arg->type().slotCount();
+ if (n < argSlots) {
+ return arg->getConstantValue(n);
+ }
+ n -= argSlots;
+ }
+
+ SkDEBUGFAIL("argument-list slot count doesn't match constructor-type slot count");
+ return std::nullopt;
+}
+
+Expression::ComparisonResult AnyConstructor::compareConstant(const Expression& other) const {
+ SkASSERT(this->type().slotCount() == other.type().slotCount());
+
+ if (!other.supportsConstantValues()) {
+ return ComparisonResult::kUnknown;
+ }
+
+ int exprs = this->type().slotCount();
+ for (int n = 0; n < exprs; ++n) {
+ // Get the n'th subexpression from each side. If either one is null, return "unknown."
+ std::optional<double> left = this->getConstantValue(n);
+ if (!left.has_value()) {
+ return ComparisonResult::kUnknown;
+ }
+ std::optional<double> right = other.getConstantValue(n);
+ if (!right.has_value()) {
+ return ComparisonResult::kUnknown;
+ }
+ // Both sides are known and can be compared for equality directly.
+ if (*left != *right) {
+ return ComparisonResult::kNotEqual;
+ }
+ }
+ return ComparisonResult::kEqual;
+}
+
+AnyConstructor& Expression::asAnyConstructor() {
+ SkASSERT(this->isAnyConstructor());
+ return static_cast<AnyConstructor&>(*this);
+}
+
+const AnyConstructor& Expression::asAnyConstructor() const {
+ SkASSERT(this->isAnyConstructor());
+ return static_cast<const AnyConstructor&>(*this);
+}
+
+std::string AnyConstructor::description(OperatorPrecedence) const {
+ std::string result = this->type().description() + "(";
+ auto separator = SkSL::String::Separator();
+ for (const std::unique_ptr<Expression>& arg : this->argumentSpan()) {
+ result += separator();
+ result += arg->description(OperatorPrecedence::kSequence);
+ }
+ result.push_back(')');
+ return result;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLConstructor.h b/gfx/skia/skia/src/sksl/ir/SkSLConstructor.h
new file mode 100644
index 0000000000..7b3616e7bf
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLConstructor.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CONSTRUCTOR
+#define SKSL_CONSTRUCTOR
+
+#include "include/core/SkSpan.h"
+#include "include/private/SkSLDefines.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <cstdint>
+#include <memory>
+#include <optional>
+#include <string>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+enum class OperatorPrecedence : uint8_t;
+
+/**
+ * Base class representing a constructor with unknown arguments.
+ */
+class AnyConstructor : public Expression {
+public:
+ AnyConstructor(Position pos, Kind kind, const Type* type)
+ : INHERITED(pos, kind, type) {}
+
+ virtual SkSpan<std::unique_ptr<Expression>> argumentSpan() = 0;
+ virtual SkSpan<const std::unique_ptr<Expression>> argumentSpan() const = 0;
+
+ std::string description(OperatorPrecedence) const override;
+
+ const Type& componentType() const {
+ return this->type().componentType();
+ }
+
+ bool supportsConstantValues() const override { return true; }
+ std::optional<double> getConstantValue(int n) const override;
+
+ ComparisonResult compareConstant(const Expression& other) const override;
+
+private:
+ using INHERITED = Expression;
+};
+
+/**
+ * Base class representing a constructor that takes a single argument.
+ */
+class SingleArgumentConstructor : public AnyConstructor {
+public:
+ SingleArgumentConstructor(Position pos, Kind kind, const Type* type,
+ std::unique_ptr<Expression> argument)
+ : INHERITED(pos, kind, type)
+ , fArgument(std::move(argument)) {}
+
+ std::unique_ptr<Expression>& argument() {
+ return fArgument;
+ }
+
+ const std::unique_ptr<Expression>& argument() const {
+ return fArgument;
+ }
+
+ SkSpan<std::unique_ptr<Expression>> argumentSpan() final {
+ return {&fArgument, 1};
+ }
+
+ SkSpan<const std::unique_ptr<Expression>> argumentSpan() const final {
+ return {&fArgument, 1};
+ }
+
+private:
+ std::unique_ptr<Expression> fArgument;
+
+ using INHERITED = AnyConstructor;
+};
+
+/**
+ * Base class representing a constructor that takes an array of arguments.
+ */
+class MultiArgumentConstructor : public AnyConstructor {
+public:
+ MultiArgumentConstructor(Position pos, Kind kind, const Type* type,
+ ExpressionArray arguments)
+ : INHERITED(pos, kind, type)
+ , fArguments(std::move(arguments)) {}
+
+ ExpressionArray& arguments() {
+ return fArguments;
+ }
+
+ const ExpressionArray& arguments() const {
+ return fArguments;
+ }
+
+ SkSpan<std::unique_ptr<Expression>> argumentSpan() final {
+ return {&fArguments.front(), fArguments.size()};
+ }
+
+ SkSpan<const std::unique_ptr<Expression>> argumentSpan() const final {
+ return {&fArguments.front(), fArguments.size()};
+ }
+
+private:
+ ExpressionArray fArguments;
+
+ using INHERITED = AnyConstructor;
+};
+
+/**
+ * Converts any GLSL constructor, such as `float2(x, y)` or `mat3x3(otherMat)` or `int[2](0, i)`, to
+ * an SkSL expression.
+ *
+ * Vector constructors must always consist of either exactly 1 scalar, or a collection of vectors
+ * and scalars totaling exactly the right number of scalar components.
+ *
+ * Matrix constructors must always consist of either exactly 1 scalar, exactly 1 matrix, or a
+ * collection of vectors and scalars totaling exactly the right number of scalar components.
+ *
+ * Array constructors must always contain the proper number of array elements (matching the Type).
+ */
+namespace Constructor {
+ // Creates, typechecks and simplifies constructor expressions. Reports errors via the
+ // ErrorReporter. This can return null on error, so be careful. There are several different
+ // Constructor expression types; this class chooses the proper one based on context, e.g.
+ // `ConstructorCompound`, `ConstructorScalarCast`, or `ConstructorMatrixResize`.
+ std::unique_ptr<Expression> Convert(const Context& context,
+ Position pos,
+ const Type& type,
+ ExpressionArray args);
+}
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLConstructorArray.cpp b/gfx/skia/skia/src/sksl/ir/SkSLConstructorArray.cpp
new file mode 100644
index 0000000000..f88098cf3a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLConstructorArray.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLConstructorArray.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLString.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/ir/SkSLConstructorArrayCast.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <algorithm>
+#include <string>
+
+namespace SkSL {
+
+std::unique_ptr<Expression> ConstructorArray::Convert(const Context& context,
+ Position pos,
+ const Type& type,
+ ExpressionArray args) {
+ SkASSERTF(type.isArray() && type.columns() > 0, "%s", type.description().c_str());
+
+ // ES2 doesn't support first-class array types.
+ if (context.fConfig->strictES2Mode()) {
+ context.fErrors->error(pos, "construction of array type '" + type.displayName() +
+ "' is not supported");
+ return nullptr;
+ }
+
+ // An array of atomics cannot be constructed.
+ if (type.isOrContainsAtomic()) {
+ context.fErrors->error(
+ pos,
+ String::printf("construction of array type '%s' with atomic member is not allowed",
+ type.displayName().c_str()));
+ return nullptr;
+ }
+
+ // If there is a single argument containing an array of matching size and the types are
+ // coercible, this is actually a cast. i.e., `half[10](myFloat10Array)`. This isn't a GLSL
+ // feature, but the Pipeline stage code generator needs this functionality so that code which
+ // was originally compiled with "allow narrowing conversions" enabled can be later recompiled
+ // without narrowing conversions (we patch over these conversions with an explicit cast).
+ if (args.size() == 1) {
+ const Expression& expr = *args.front();
+ const Type& exprType = expr.type();
+
+ if (exprType.isArray() && exprType.canCoerceTo(type, /*allowNarrowing=*/true)) {
+ return ConstructorArrayCast::Make(context, pos, type, std::move(args.front()));
+ }
+ }
+
+ // Check that the number of constructor arguments matches the array size.
+ if (type.columns() != args.size()) {
+ context.fErrors->error(pos, String::printf("invalid arguments to '%s' constructor "
+ "(expected %d elements, but found %d)", type.displayName().c_str(), type.columns(),
+ args.size()));
+ return nullptr;
+ }
+
+ // Convert each constructor argument to the array's component type.
+ const Type& baseType = type.componentType();
+ for (std::unique_ptr<Expression>& argument : args) {
+ argument = baseType.coerceExpression(std::move(argument), context);
+ if (!argument) {
+ return nullptr;
+ }
+ }
+
+ return ConstructorArray::Make(context, pos, type, std::move(args));
+}
+
+std::unique_ptr<Expression> ConstructorArray::Make(const Context& context,
+ Position pos,
+ const Type& type,
+ ExpressionArray args) {
+ SkASSERT(!context.fConfig->strictES2Mode());
+ SkASSERT(type.isAllowedInES2(context));
+ SkASSERT(type.columns() == args.size());
+ SkASSERT(!type.isOrContainsAtomic());
+ SkASSERT(std::all_of(args.begin(), args.end(), [&](const std::unique_ptr<Expression>& arg) {
+ return type.componentType().matches(arg->type());
+ }));
+
+ return std::make_unique<ConstructorArray>(pos, type, std::move(args));
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLConstructorArray.h b/gfx/skia/skia/src/sksl/ir/SkSLConstructorArray.h
new file mode 100644
index 0000000000..8dfdb34db1
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLConstructorArray.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CONSTRUCTOR_ARRAY
+#define SKSL_CONSTRUCTOR_ARRAY
+
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <memory>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+class Type;
+
+/**
+ * Represents the construction of an array type, such as "float[5](x, y, z, w, 1)".
+ */
+class ConstructorArray final : public MultiArgumentConstructor {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kConstructorArray;
+
+ ConstructorArray(Position pos, const Type& type, ExpressionArray arguments)
+ : INHERITED(pos, kIRNodeKind, &type, std::move(arguments)) {}
+
+ // ConstructorArray::Convert will typecheck and create array-constructor expressions.
+ // Reports errors via the ErrorReporter; returns null on error.
+ static std::unique_ptr<Expression> Convert(const Context& context,
+ Position pos,
+ const Type& type,
+ ExpressionArray args);
+
+ // ConstructorArray::Make creates array-constructor expressions; errors reported via SkASSERT.
+ static std::unique_ptr<Expression> Make(const Context& context,
+ Position pos,
+ const Type& type,
+ ExpressionArray args);
+
+ std::unique_ptr<Expression> clone(Position pos) const override {
+ return std::make_unique<ConstructorArray>(pos, this->type(), this->arguments().clone());
+ }
+
+private:
+ using INHERITED = MultiArgumentConstructor;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLConstructorArrayCast.cpp b/gfx/skia/skia/src/sksl/ir/SkSLConstructorArrayCast.cpp
new file mode 100644
index 0000000000..a5f9039eb0
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLConstructorArrayCast.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLConstructorArrayCast.h"
+
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLConstantFolder.h"
+#include "src/sksl/ir/SkSLConstructorArray.h"
+#include "src/sksl/ir/SkSLConstructorCompoundCast.h"
+#include "src/sksl/ir/SkSLConstructorScalarCast.h"
+#include "src/sksl/ir/SkSLType.h"
+
+namespace SkSL {
+
+static std::unique_ptr<Expression> cast_constant_array(const Context& context,
+ Position pos,
+ const Type& destType,
+ std::unique_ptr<Expression> constCtor) {
+ const Type& scalarType = destType.componentType();
+
+ // Create a ConstructorArray(...) which typecasts each argument inside.
+ auto inputArgs = constCtor->as<ConstructorArray>().argumentSpan();
+ ExpressionArray typecastArgs;
+ typecastArgs.reserve_back(inputArgs.size());
+ for (std::unique_ptr<Expression>& arg : inputArgs) {
+ Position argPos = arg->fPosition;
+ if (arg->type().isScalar()) {
+ typecastArgs.push_back(ConstructorScalarCast::Make(context, argPos, scalarType,
+ std::move(arg)));
+ } else {
+ typecastArgs.push_back(ConstructorCompoundCast::Make(context, argPos, scalarType,
+ std::move(arg)));
+ }
+ }
+
+ return ConstructorArray::Make(context, pos, destType, std::move(typecastArgs));
+}
+
+std::unique_ptr<Expression> ConstructorArrayCast::Make(const Context& context,
+ Position pos,
+ const Type& type,
+ std::unique_ptr<Expression> arg) {
+ // Only arrays of the same size are allowed.
+ SkASSERT(type.isArray());
+ SkASSERT(type.isAllowedInES2(context));
+ SkASSERT(arg->type().isArray());
+ SkASSERT(type.columns() == arg->type().columns());
+
+ // If this is a no-op cast, return the expression as-is.
+ if (type.matches(arg->type())) {
+ arg->fPosition = pos;
+ return arg;
+ }
+
+ // Look up the value of constant variables. This allows constant-expressions like `myArray` to
+ // be replaced with the compile-time constant `int[2](0, 1)`.
+ arg = ConstantFolder::MakeConstantValueForVariable(pos, std::move(arg));
+
+ // We can cast a vector of compile-time constants at compile-time.
+ if (Analysis::IsCompileTimeConstant(*arg)) {
+ return cast_constant_array(context, pos, type, std::move(arg));
+ }
+ return std::make_unique<ConstructorArrayCast>(pos, type, std::move(arg));
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLConstructorArrayCast.h b/gfx/skia/skia/src/sksl/ir/SkSLConstructorArrayCast.h
new file mode 100644
index 0000000000..7db825142b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLConstructorArrayCast.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CONSTRUCTOR_ARRAY_CAST
+#define SKSL_CONSTRUCTOR_ARRAY_CAST
+
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <memory>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+class Type;
+
+/**
+ * Represents the typecasting of an array. Arrays cannot be directly casted in SkSL (or GLSL), but
+ * type narrowing can cause an array to be implicitly casted. For instance, the expression
+ * `myHalf2Array == float[2](a, b)` should be allowed when narrowing conversions are enabled; this
+ * constructor allows the necessary array-type conversion to be represented in IR.
+ *
+ * These always contain exactly 1 array of matching size, and are never constant.
+ */
+class ConstructorArrayCast final : public SingleArgumentConstructor {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kConstructorArrayCast;
+
+ ConstructorArrayCast(Position pos, const Type& type, std::unique_ptr<Expression> arg)
+ : INHERITED(pos, kIRNodeKind, &type, std::move(arg)) {}
+
+ static std::unique_ptr<Expression> Make(const Context& context,
+ Position pos,
+ const Type& type,
+ std::unique_ptr<Expression> arg);
+
+ std::unique_ptr<Expression> clone(Position pos) const override {
+ return std::make_unique<ConstructorArrayCast>(pos, this->type(), argument()->clone());
+ }
+
+private:
+ using INHERITED = SingleArgumentConstructor;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLConstructorCompound.cpp b/gfx/skia/skia/src/sksl/ir/SkSLConstructorCompound.cpp
new file mode 100644
index 0000000000..06bbd8a6d8
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLConstructorCompound.cpp
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLConstructorCompound.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTArray.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLConstantFolder.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/ir/SkSLConstructorSplat.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <numeric>
+#include <string>
+
+namespace SkSL {
+
+static bool is_safe_to_eliminate(const Type& type, const Expression& arg) {
+ if (type.isScalar()) {
+ // A scalar "compound type" with a single scalar argument is a no-op and can be eliminated.
+ // (Pedantically, this isn't a compound at all, but it's harmless to allow and simplifies
+ // call sites which need to narrow a vector and may sometimes end up with a scalar.)
+ SkASSERTF(arg.type().matches(type), "Creating type '%s' from '%s'",
+ type.description().c_str(), arg.type().description().c_str());
+ return true;
+ }
+ if (type.isVector() && arg.type().matches(type)) {
+ // A vector compound constructor containing a single argument of matching type can trivially
+ // be eliminated.
+ return true;
+ }
+ // This is a meaningful single-argument compound constructor (e.g. vector-from-matrix,
+ // matrix-from-vector).
+ return false;
+}
+
+static const Expression* make_splat_from_arguments(const Type& type, const ExpressionArray& args) {
+ // Splats cannot represent a matrix.
+ if (type.isMatrix()) {
+ return nullptr;
+ }
+ const Expression* splatExpression = nullptr;
+ for (int index = 0; index < args.size(); ++index) {
+ // Arguments must only be scalars or a splat constructors (which can only contain scalars).
+ const Expression* expr;
+ if (args[index]->type().isScalar()) {
+ expr = args[index].get();
+ } else if (args[index]->is<ConstructorSplat>()) {
+ expr = args[index]->as<ConstructorSplat>().argument().get();
+ } else {
+ return nullptr;
+ }
+ // On the first iteration, just remember the expression we encountered.
+ if (index == 0) {
+ splatExpression = expr;
+ continue;
+ }
+ // On subsequent iterations, ensure that the expression we found matches the first one.
+ // (Note that IsSameExpressionTree will always reject an Expression with side effects.)
+ if (!Analysis::IsSameExpressionTree(*expr, *splatExpression)) {
+ return nullptr;
+ }
+ }
+
+ return splatExpression;
+}
+
+std::unique_ptr<Expression> ConstructorCompound::Make(const Context& context,
+ Position pos,
+ const Type& type,
+ ExpressionArray args) {
+ SkASSERT(type.isAllowedInES2(context));
+
+ // All the arguments must have matching component type.
+ SkASSERT(std::all_of(args.begin(), args.end(), [&](const std::unique_ptr<Expression>& arg) {
+ const Type& argType = arg->type();
+ return (argType.isScalar() || argType.isVector() || argType.isMatrix()) &&
+ (argType.componentType().matches(type.componentType()));
+ }));
+
+ // The slot count of the combined argument list must match the composite type's slot count.
+ SkASSERT(type.slotCount() ==
+ std::accumulate(args.begin(), args.end(), /*initial value*/ (size_t)0,
+ [](size_t n, const std::unique_ptr<Expression>& arg) {
+ return n + arg->type().slotCount();
+ }));
+ // No-op compound constructors (containing a single argument of the same type) are eliminated.
+ // (Even though this is a "compound constructor," we let scalars pass through here; it's
+ // harmless to allow and simplifies call sites which need to narrow a vector and may sometimes
+ // end up with a scalar.)
+ if (args.size() == 1 && is_safe_to_eliminate(type, *args.front())) {
+ args.front()->fPosition = pos;
+ return std::move(args.front());
+ }
+ // Beyond this point, the type must be a vector or matrix.
+ SkASSERT(type.isVector() || type.isMatrix());
+
+ if (context.fConfig->fSettings.fOptimize) {
+ // Find ConstructorCompounds embedded inside other ConstructorCompounds and flatten them.
+ // - float4(float2(1, 2), 3, 4) --> float4(1, 2, 3, 4)
+ // - float4(w, float3(sin(x), cos(y), tan(z))) --> float4(w, sin(x), cos(y), tan(z))
+ // - mat2(float2(a, b), float2(c, d)) --> mat2(a, b, c, d)
+
+ // See how many fields we would have if composite constructors were flattened out.
+ int fields = 0;
+ for (const std::unique_ptr<Expression>& arg : args) {
+ fields += arg->is<ConstructorCompound>()
+ ? arg->as<ConstructorCompound>().arguments().size()
+ : 1;
+ }
+
+ // If we added up more fields than we're starting with, we found at least one input that can
+ // be flattened out.
+ if (fields > args.size()) {
+ ExpressionArray flattened;
+ flattened.reserve_back(fields);
+ for (std::unique_ptr<Expression>& arg : args) {
+ // For non-ConstructorCompound fields, move them over as-is.
+ if (!arg->is<ConstructorCompound>()) {
+ flattened.push_back(std::move(arg));
+ continue;
+ }
+ // For ConstructorCompound fields, move over their inner arguments individually.
+ ConstructorCompound& compositeCtor = arg->as<ConstructorCompound>();
+ for (std::unique_ptr<Expression>& innerArg : compositeCtor.arguments()) {
+ flattened.push_back(std::move(innerArg));
+ }
+ }
+ args = std::move(flattened);
+ }
+ }
+
+ // Replace constant variables with their corresponding values, so `float2(one, two)` can
+ // compile down to `float2(1.0, 2.0)` (the latter is a compile-time constant).
+ for (std::unique_ptr<Expression>& arg : args) {
+ arg = ConstantFolder::MakeConstantValueForVariable(pos, std::move(arg));
+ }
+
+ if (context.fConfig->fSettings.fOptimize) {
+ // Reduce compound constructors to splats where possible.
+ if (const Expression* splat = make_splat_from_arguments(type, args)) {
+ return ConstructorSplat::Make(context, pos, type, splat->clone());
+ }
+ }
+
+ return std::make_unique<ConstructorCompound>(pos, type, std::move(args));
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLConstructorCompound.h b/gfx/skia/skia/src/sksl/ir/SkSLConstructorCompound.h
new file mode 100644
index 0000000000..5dfd93f63c
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLConstructorCompound.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CONSTRUCTOR_COMPOUND
+#define SKSL_CONSTRUCTOR_COMPOUND
+
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <memory>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+class Type;
+
+/**
+ * Represents a vector or matrix that is composed from other expressions, such as
+ * `half3(pos.xy, 1)` or `mat3(a.xyz, b.xyz, 0, 0, 1)`
+ *
+ * These can contain a mix of scalars and aggregates. The total number of scalar values inside the
+ * constructor must always match the type's slot count. (e.g. `pos.xy` consumes two slots.)
+ * The inner values must have the same component type as the vector/matrix.
+ */
+class ConstructorCompound final : public MultiArgumentConstructor {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kConstructorCompound;
+
+ ConstructorCompound(Position pos, const Type& type, ExpressionArray args)
+ : INHERITED(pos, kIRNodeKind, &type, std::move(args)) {}
+
+ static std::unique_ptr<Expression> Make(const Context& context,
+ Position pos,
+ const Type& type,
+ ExpressionArray args);
+
+ std::unique_ptr<Expression> clone(Position pos) const override {
+ return std::make_unique<ConstructorCompound>(pos, this->type(), this->arguments().clone());
+ }
+
+private:
+ using INHERITED = MultiArgumentConstructor;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLConstructorCompoundCast.cpp b/gfx/skia/skia/src/sksl/ir/SkSLConstructorCompoundCast.cpp
new file mode 100644
index 0000000000..2e9d9fca4c
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLConstructorCompoundCast.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLConstructorCompoundCast.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLConstantFolder.h"
+#include "src/sksl/ir/SkSLConstructorCompound.h"
+#include "src/sksl/ir/SkSLConstructorDiagonalMatrix.h"
+#include "src/sksl/ir/SkSLConstructorScalarCast.h"
+#include "src/sksl/ir/SkSLConstructorSplat.h"
+#include "src/sksl/ir/SkSLLiteral.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <cstddef>
+#include <optional>
+
+namespace SkSL {
+
+static std::unique_ptr<Expression> cast_constant_composite(const Context& context,
+ Position pos,
+ const Type& destType,
+ std::unique_ptr<Expression> constCtor) {
+ const Type& scalarType = destType.componentType();
+
+ // We generate nicer code for splats and diagonal matrices by handling them separately instead
+ // of relying on the constant-subexpression code below. This is not truly necessary but it makes
+ // our output look a little better; human beings prefer `half4(0)` to `half4(0, 0, 0, 0)`.
+ if (constCtor->is<ConstructorSplat>()) {
+ // This is a typecast of a splat containing a constant value, e.g. `half4(7)`. We can
+ // replace it with a splat of a different type, e.g. `int4(7)`.
+ ConstructorSplat& splat = constCtor->as<ConstructorSplat>();
+ return ConstructorSplat::Make(
+ context, pos, destType,
+ ConstructorScalarCast::Make(context, pos, scalarType, std::move(splat.argument())));
+ }
+
+ if (constCtor->is<ConstructorDiagonalMatrix>() && destType.isMatrix()) {
+ // This is a typecast of a constant diagonal matrix, e.g. `float3x3(2)`. We can replace it
+ // with a diagonal matrix of a different type, e.g. `half3x3(2)`.
+ ConstructorDiagonalMatrix& matrixCtor = constCtor->as<ConstructorDiagonalMatrix>();
+ return ConstructorDiagonalMatrix::Make(
+ context, pos, destType,
+ ConstructorScalarCast::Make(context, pos, scalarType,
+ std::move(matrixCtor.argument())));
+ }
+
+ // Create a compound Constructor(literal, ...) which typecasts each scalar value inside.
+ size_t numSlots = destType.slotCount();
+ SkASSERT(numSlots == constCtor->type().slotCount());
+
+ ExpressionArray typecastArgs;
+ typecastArgs.reserve_back(numSlots);
+ for (size_t index = 0; index < numSlots; ++index) {
+ std::optional<double> slotVal = constCtor->getConstantValue(index);
+ if (scalarType.checkForOutOfRangeLiteral(context, *slotVal, constCtor->fPosition)) {
+ // We've reported an error because the literal is out of range for this type. Zero out
+ // the value to avoid a cascade of errors.
+ *slotVal = 0.0;
+ }
+ typecastArgs.push_back(Literal::Make(pos, *slotVal, &scalarType));
+ }
+
+ return ConstructorCompound::Make(context, pos, destType, std::move(typecastArgs));
+}
+
+std::unique_ptr<Expression> ConstructorCompoundCast::Make(const Context& context,
+ Position pos,
+ const Type& type,
+ std::unique_ptr<Expression> arg) {
+ // Only vectors or matrices of the same dimensions are allowed.
+ SkASSERT(type.isVector() || type.isMatrix());
+ SkASSERT(type.isAllowedInES2(context));
+ SkASSERT(arg->type().isVector() == type.isVector());
+ SkASSERT(arg->type().isMatrix() == type.isMatrix());
+ SkASSERT(type.columns() == arg->type().columns());
+ SkASSERT(type.rows() == arg->type().rows());
+
+ // If this is a no-op cast, return the expression as-is.
+ if (type.matches(arg->type())) {
+ return arg;
+ }
+ // Look up the value of constant variables. This allows constant-expressions like
+ // `int4(colorGreen)` to be replaced with the compile-time constant `int4(0, 1, 0, 1)`.
+ arg = ConstantFolder::MakeConstantValueForVariable(pos, std::move(arg));
+
+ // We can cast a vector of compile-time constants at compile-time.
+ if (Analysis::IsCompileTimeConstant(*arg)) {
+ return cast_constant_composite(context, pos, type, std::move(arg));
+ }
+ return std::make_unique<ConstructorCompoundCast>(pos, type, std::move(arg));
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLConstructorCompoundCast.h b/gfx/skia/skia/src/sksl/ir/SkSLConstructorCompoundCast.h
new file mode 100644
index 0000000000..9dc08271d7
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLConstructorCompoundCast.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CONSTRUCTOR_COMPOUND_CAST
+#define SKSL_CONSTRUCTOR_COMPOUND_CAST
+
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <memory>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+class Type;
+
+/**
+ * Represents the construction of a vector/matrix typecast, such as `half3(myInt3)` or
+ * `float4x4(myHalf4x4)`. Matrix resizes are done in ConstructorMatrixResize, not here.
+ *
+ * These always contain exactly 1 vector or matrix of matching size, and are never constant.
+ */
+class ConstructorCompoundCast final : public SingleArgumentConstructor {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kConstructorCompoundCast;
+
+ ConstructorCompoundCast(Position pos, const Type& type, std::unique_ptr<Expression> arg)
+ : INHERITED(pos, kIRNodeKind, &type, std::move(arg)) {}
+
+ static std::unique_ptr<Expression> Make(const Context& context,
+ Position pos,
+ const Type& type,
+ std::unique_ptr<Expression> arg);
+
+ std::unique_ptr<Expression> clone(Position pos) const override {
+ return std::make_unique<ConstructorCompoundCast>(pos, this->type(), argument()->clone());
+ }
+
+private:
+ using INHERITED = SingleArgumentConstructor;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLConstructorDiagonalMatrix.cpp b/gfx/skia/skia/src/sksl/ir/SkSLConstructorDiagonalMatrix.cpp
new file mode 100644
index 0000000000..e863babfa9
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLConstructorDiagonalMatrix.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLConstructorDiagonalMatrix.h"
+
+#include "include/core/SkTypes.h"
+#include "src/sksl/SkSLConstantFolder.h"
+#include "src/sksl/ir/SkSLType.h"
+
+namespace SkSL {
+
+std::unique_ptr<Expression> ConstructorDiagonalMatrix::Make(const Context& context,
+ Position pos,
+ const Type& type,
+ std::unique_ptr<Expression> arg) {
+ SkASSERT(type.isMatrix());
+ SkASSERT(type.isAllowedInES2(context));
+ SkASSERT(arg->type().isScalar());
+ SkASSERT(arg->type().matches(type.componentType()));
+
+ // Look up the value of constant variables. This allows constant-expressions like `mat4(five)`
+ // to be replaced with `mat4(5.0)`.
+ arg = ConstantFolder::MakeConstantValueForVariable(pos, std::move(arg));
+
+ return std::make_unique<ConstructorDiagonalMatrix>(pos, type, std::move(arg));
+}
+
+std::optional<double> ConstructorDiagonalMatrix::getConstantValue(int n) const {
+ int rows = this->type().rows();
+ int row = n % rows;
+ int col = n / rows;
+
+ SkASSERT(col >= 0);
+ SkASSERT(row >= 0);
+ SkASSERT(col < this->type().columns());
+ SkASSERT(row < this->type().rows());
+
+ return (col == row) ? this->argument()->getConstantValue(0) : 0.0;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLConstructorDiagonalMatrix.h b/gfx/skia/skia/src/sksl/ir/SkSLConstructorDiagonalMatrix.h
new file mode 100644
index 0000000000..65342d750c
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLConstructorDiagonalMatrix.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CONSTRUCTOR_DIAGONAL_MATRIX
+#define SKSL_CONSTRUCTOR_DIAGONAL_MATRIX
+
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <memory>
+#include <optional>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+class Type;
+
+/**
+ * Represents the construction of a diagonal matrix, such as `half3x3(n)`.
+ *
+ * These always contain exactly 1 scalar.
+ */
+class ConstructorDiagonalMatrix final : public SingleArgumentConstructor {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kConstructorDiagonalMatrix;
+
+ ConstructorDiagonalMatrix(Position pos, const Type& type, std::unique_ptr<Expression> arg)
+ : INHERITED(pos, kIRNodeKind, &type, std::move(arg)) {}
+
+ static std::unique_ptr<Expression> Make(const Context& context,
+ Position pos,
+ const Type& type,
+ std::unique_ptr<Expression> arg);
+
+ std::unique_ptr<Expression> clone(Position pos) const override {
+ return std::make_unique<ConstructorDiagonalMatrix>(pos, this->type(), argument()->clone());
+ }
+
+ bool supportsConstantValues() const override { return true; }
+ std::optional<double> getConstantValue(int n) const override;
+
+private:
+ using INHERITED = SingleArgumentConstructor;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLConstructorMatrixResize.cpp b/gfx/skia/skia/src/sksl/ir/SkSLConstructorMatrixResize.cpp
new file mode 100644
index 0000000000..a015666e4b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLConstructorMatrixResize.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLConstructorMatrixResize.h"
+
+#include "include/core/SkTypes.h"
+#include "src/sksl/ir/SkSLType.h"
+
+namespace SkSL {
+
+std::unique_ptr<Expression> ConstructorMatrixResize::Make(const Context& context,
+ Position pos,
+ const Type& type,
+ std::unique_ptr<Expression> arg) {
+ SkASSERT(type.isMatrix());
+ SkASSERT(type.isAllowedInES2(context));
+ SkASSERT(arg->type().componentType().matches(type.componentType()));
+
+ // If the matrix isn't actually changing size, return it as-is.
+ if (type.rows() == arg->type().rows() && type.columns() == arg->type().columns()) {
+ return arg;
+ }
+
+ return std::make_unique<ConstructorMatrixResize>(pos, type, std::move(arg));
+}
+
+std::optional<double> ConstructorMatrixResize::getConstantValue(int n) const {
+ int rows = this->type().rows();
+ int row = n % rows;
+ int col = n / rows;
+
+ SkASSERT(col >= 0);
+ SkASSERT(row >= 0);
+ SkASSERT(col < this->type().columns());
+ SkASSERT(row < this->type().rows());
+
+ // GLSL resize matrices are of the form:
+ // |m m 0|
+ // |m m 0|
+ // |0 0 1|
+ // Where `m` is the matrix being wrapped, and other cells contain the identity matrix.
+
+ // Forward `getConstantValue` to the wrapped matrix if the position is in its bounds.
+ if (col < this->argument()->type().columns() && row < this->argument()->type().rows()) {
+ // Recalculate `n` in terms of the inner matrix's dimensions.
+ n = row + (col * this->argument()->type().rows());
+ return this->argument()->getConstantValue(n);
+ }
+
+ // Synthesize an identity matrix for out-of-bounds positions.
+ return (col == row) ? 1.0 : 0.0;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLConstructorMatrixResize.h b/gfx/skia/skia/src/sksl/ir/SkSLConstructorMatrixResize.h
new file mode 100644
index 0000000000..bf1d3f5897
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLConstructorMatrixResize.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CONSTRUCTOR_MATRIX_RESIZE
+#define SKSL_CONSTRUCTOR_MATRIX_RESIZE
+
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <memory>
+#include <optional>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+class Type;
+
+/**
+ * Represents the construction of a matrix resize operation, such as `mat4x4(myMat2x2)`.
+ *
+ * These always contain exactly 1 matrix of non-matching size. Cells that aren't present in the
+ * input matrix are populated with the identity matrix.
+ */
+class ConstructorMatrixResize final : public SingleArgumentConstructor {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kConstructorMatrixResize;
+
+ ConstructorMatrixResize(Position pos, const Type& type, std::unique_ptr<Expression> arg)
+ : INHERITED(pos, kIRNodeKind, &type, std::move(arg)) {}
+
+ static std::unique_ptr<Expression> Make(const Context& context,
+ Position pos,
+ const Type& type,
+ std::unique_ptr<Expression> arg);
+
+ std::unique_ptr<Expression> clone(Position pos) const override {
+ return std::make_unique<ConstructorMatrixResize>(pos, this->type(), argument()->clone());
+ }
+
+ bool supportsConstantValues() const override { return true; }
+ std::optional<double> getConstantValue(int n) const override;
+
+private:
+ using INHERITED = SingleArgumentConstructor;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLConstructorScalarCast.cpp b/gfx/skia/skia/src/sksl/ir/SkSLConstructorScalarCast.cpp
new file mode 100644
index 0000000000..7b3074f31c
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLConstructorScalarCast.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLConstructorScalarCast.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/base/SkTArray.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "src/sksl/SkSLConstantFolder.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/ir/SkSLLiteral.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <string>
+
+namespace SkSL {
+
+std::unique_ptr<Expression> ConstructorScalarCast::Convert(const Context& context,
+ Position pos,
+ const Type& rawType,
+ ExpressionArray args) {
+ // As you might expect, scalar-cast constructors should only be created with scalar types.
+ const Type& type = rawType.scalarTypeForLiteral();
+ SkASSERT(type.isScalar());
+
+ if (args.size() != 1) {
+ context.fErrors->error(pos, "invalid arguments to '" + type.displayName() +
+ "' constructor, (expected exactly 1 argument, but found " +
+ std::to_string(args.size()) + ")");
+ return nullptr;
+ }
+
+ const Type& argType = args[0]->type();
+ if (!argType.isScalar()) {
+ // Casting a vector-type into its scalar component type is treated as a slice in GLSL.
+ // We don't allow those casts in SkSL; recommend a .x swizzle instead.
+ const char* swizzleHint = "";
+ if (argType.componentType().matches(type)) {
+ if (argType.isVector()) {
+ swizzleHint = "; use '.x' instead";
+ } else if (argType.isMatrix()) {
+ swizzleHint = "; use '[0][0]' instead";
+ }
+ }
+
+ context.fErrors->error(pos,
+ "'" + argType.displayName() + "' is not a valid parameter to '" +
+ type.displayName() + "' constructor" + swizzleHint);
+ return nullptr;
+ }
+ if (type.checkForOutOfRangeLiteral(context, *args[0])) {
+ return nullptr;
+ }
+
+ return ConstructorScalarCast::Make(context, pos, type, std::move(args[0]));
+}
+
+std::unique_ptr<Expression> ConstructorScalarCast::Make(const Context& context,
+ Position pos,
+ const Type& type,
+ std::unique_ptr<Expression> arg) {
+ SkASSERT(type.isScalar());
+ SkASSERT(type.isAllowedInES2(context));
+ SkASSERT(arg->type().isScalar());
+
+ // No cast required when the types match.
+ if (arg->type().matches(type)) {
+ return arg;
+ }
+ // Look up the value of constant variables. This allows constant-expressions like `int(zero)` to
+ // be replaced with a literal zero.
+ arg = ConstantFolder::MakeConstantValueForVariable(pos, std::move(arg));
+
+ // We can cast scalar literals at compile-time when possible. (If the resulting literal would be
+ // out of range for its type, we report an error and return zero to minimize error cascading.
+ // This can occur when code is inlined, so we can't necessarily catch it during Convert. As
+ // such, it's not safe to return null or assert.)
+ if (arg->is<Literal>()) {
+ double value = arg->as<Literal>().value();
+ if (type.checkForOutOfRangeLiteral(context, value, arg->fPosition)) {
+ value = 0.0;
+ }
+ return Literal::Make(pos, value, &type);
+ }
+ return std::make_unique<ConstructorScalarCast>(pos, type, std::move(arg));
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLConstructorScalarCast.h b/gfx/skia/skia/src/sksl/ir/SkSLConstructorScalarCast.h
new file mode 100644
index 0000000000..295d19d959
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLConstructorScalarCast.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CONSTRUCTOR_SCALAR_CAST
+#define SKSL_CONSTRUCTOR_SCALAR_CAST
+
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <memory>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+class ExpressionArray;
+class Type;
+
+/**
+ * Represents the construction of a scalar cast, such as `float(intVariable)`.
+ *
+ * These always contain exactly 1 scalar of a differing type, and are never constant.
+ */
+class ConstructorScalarCast final : public SingleArgumentConstructor {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kConstructorScalarCast;
+
+ ConstructorScalarCast(Position pos, const Type& type, std::unique_ptr<Expression> arg)
+ : INHERITED(pos, kIRNodeKind, &type, std::move(arg)) {}
+
+ // ConstructorScalarCast::Convert will typecheck and create scalar-constructor expressions.
+ // Reports errors via the ErrorReporter; returns null on error.
+ static std::unique_ptr<Expression> Convert(const Context& context,
+ Position pos,
+ const Type& rawType,
+ ExpressionArray args);
+
+ // ConstructorScalarCast::Make casts a scalar expression. Casts that can be evaluated at
+ // compile-time will do so (e.g. `int(4.1)` --> `Literal(int 4)`). Errors reported via SkASSERT.
+ static std::unique_ptr<Expression> Make(const Context& context,
+ Position pos,
+ const Type& type,
+ std::unique_ptr<Expression> arg);
+
+ std::unique_ptr<Expression> clone(Position pos) const override {
+ return std::make_unique<ConstructorScalarCast>(pos, this->type(), argument()->clone());
+ }
+
+private:
+ using INHERITED = SingleArgumentConstructor;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLConstructorSplat.cpp b/gfx/skia/skia/src/sksl/ir/SkSLConstructorSplat.cpp
new file mode 100644
index 0000000000..0d5110c279
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLConstructorSplat.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLConstructorSplat.h"
+
+#include "src/sksl/SkSLConstantFolder.h"
+
+namespace SkSL {
+
+std::unique_ptr<Expression> ConstructorSplat::Make(const Context& context,
+ Position pos,
+ const Type& type,
+ std::unique_ptr<Expression> arg) {
+ SkASSERT(type.isAllowedInES2(context));
+ SkASSERT(type.isScalar() || type.isVector());
+ SkASSERT(arg->type().scalarTypeForLiteral().matches(
+ type.componentType().scalarTypeForLiteral()));
+ SkASSERT(arg->type().isScalar());
+
+ // A "splat" to a scalar type is a no-op and can be eliminated.
+ if (type.isScalar()) {
+ arg->fPosition = pos;
+ return arg;
+ }
+
+ // Replace constant variables with their corresponding values, so `float3(five)` can compile
+ // down to `float3(5.0)` (the latter is a compile-time constant).
+ arg = ConstantFolder::MakeConstantValueForVariable(pos, std::move(arg));
+
+ SkASSERT(type.isVector());
+ return std::make_unique<ConstructorSplat>(pos, type, std::move(arg));
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLConstructorSplat.h b/gfx/skia/skia/src/sksl/ir/SkSLConstructorSplat.h
new file mode 100644
index 0000000000..4b342d8d7a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLConstructorSplat.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CONSTRUCTOR_SPLAT
+#define SKSL_CONSTRUCTOR_SPLAT
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <memory>
+#include <optional>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+
+/**
+ * Represents the construction of a vector splat, such as `half3(n)`.
+ *
+ * These always contain exactly 1 scalar.
+ */
+class ConstructorSplat final : public SingleArgumentConstructor {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kConstructorSplat;
+
+ ConstructorSplat(Position pos, const Type& type, std::unique_ptr<Expression> arg)
+ : INHERITED(pos, kIRNodeKind, &type, std::move(arg)) {}
+
+ // The input argument must be scalar. A "splat" to a scalar type will be optimized into a no-op.
+ static std::unique_ptr<Expression> Make(const Context& context,
+ Position pos,
+ const Type& type,
+ std::unique_ptr<Expression> arg);
+
+ std::unique_ptr<Expression> clone(Position pos) const override {
+ return std::make_unique<ConstructorSplat>(pos, this->type(), argument()->clone());
+ }
+
+ bool supportsConstantValues() const override {
+ return true;
+ }
+
+ std::optional<double> getConstantValue(int n) const override {
+ SkASSERT(n >= 0 && n < this->type().columns());
+ return this->argument()->getConstantValue(0);
+ }
+
+private:
+ using INHERITED = SingleArgumentConstructor;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLConstructorStruct.cpp b/gfx/skia/skia/src/sksl/ir/SkSLConstructorStruct.cpp
new file mode 100644
index 0000000000..d8c42b4abc
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLConstructorStruct.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLConstructorStruct.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLString.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTo.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <string>
+#include <vector>
+
+namespace SkSL {
+
+std::unique_ptr<Expression> ConstructorStruct::Convert(const Context& context,
+ Position pos,
+ const Type& type,
+ ExpressionArray args) {
+ SkASSERTF(type.isStruct() && type.fields().size() > 0, "%s", type.description().c_str());
+
+ // Check that the number of constructor arguments matches the array size.
+ if (type.fields().size() != SkToSizeT(args.size())) {
+ context.fErrors->error(pos,
+ String::printf("invalid arguments to '%s' constructor "
+ "(expected %zu elements, but found %d)",
+ type.displayName().c_str(), type.fields().size(),
+ args.size()));
+ return nullptr;
+ }
+
+ // A struct with atomic members cannot be constructed.
+ if (type.isOrContainsAtomic()) {
+ context.fErrors->error(
+ pos,
+ String::printf("construction of struct type '%s' with atomic member is not allowed",
+ type.displayName().c_str()));
+ return nullptr;
+ }
+
+ // Convert each constructor argument to the struct's field type.
+ for (int index=0; index<args.size(); ++index) {
+ std::unique_ptr<Expression>& argument = args[index];
+ const Type::Field& field = type.fields()[index];
+
+ argument = field.fType->coerceExpression(std::move(argument), context);
+ if (!argument) {
+ return nullptr;
+ }
+ }
+
+ return ConstructorStruct::Make(context, pos, type, std::move(args));
+}
+
+[[maybe_unused]] static bool arguments_match_field_types(const ExpressionArray& args,
+ const Type& type) {
+ SkASSERT(type.fields().size() == SkToSizeT(args.size()));
+
+ for (int index = 0; index < args.size(); ++index) {
+ const std::unique_ptr<Expression>& argument = args[index];
+ const Type::Field& field = type.fields()[index];
+ if (!argument->type().matches(*field.fType)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+std::unique_ptr<Expression> ConstructorStruct::Make(const Context& context,
+ Position pos,
+ const Type& type,
+ ExpressionArray args) {
+ SkASSERT(type.isAllowedInES2(context));
+ SkASSERT(arguments_match_field_types(args, type));
+ SkASSERT(!type.isOrContainsAtomic());
+ return std::make_unique<ConstructorStruct>(pos, type, std::move(args));
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLConstructorStruct.h b/gfx/skia/skia/src/sksl/ir/SkSLConstructorStruct.h
new file mode 100644
index 0000000000..dab7c6c67d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLConstructorStruct.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CONSTRUCTOR_STRUCT
+#define SKSL_CONSTRUCTOR_STRUCT
+
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <memory>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+class Type;
+
+/**
+ * Represents the construction of an struct object, such as "Color(red, green, blue, 1)".
+ */
+class ConstructorStruct final : public MultiArgumentConstructor {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kConstructorStruct;
+
+ ConstructorStruct(Position pos, const Type& type, ExpressionArray arguments)
+ : INHERITED(pos, kIRNodeKind, &type, std::move(arguments)) {}
+
+ // ConstructorStruct::Convert will typecheck and create struct-constructor expressions.
+ // Reports errors via the ErrorReporter; returns null on error.
+ static std::unique_ptr<Expression> Convert(const Context& context,
+ Position pos,
+ const Type& type,
+ ExpressionArray args);
+
+ // ConstructorStruct::Make creates struct-constructor expressions; errors reported via SkASSERT.
+ static std::unique_ptr<Expression> Make(const Context& context,
+ Position pos,
+ const Type& type,
+ ExpressionArray args);
+
+ std::unique_ptr<Expression> clone(Position pos) const override {
+ return std::make_unique<ConstructorStruct>(pos, this->type(), this->arguments().clone());
+ }
+
+private:
+ using INHERITED = MultiArgumentConstructor;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLContinueStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLContinueStatement.h
new file mode 100644
index 0000000000..64eadbe53d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLContinueStatement.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_CONTINUESTATEMENT
+#define SKSL_CONTINUESTATEMENT
+
+#include "include/private/SkSLStatement.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * A 'continue' statement.
+ */
+class ContinueStatement final : public Statement {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kContinue;
+
+ ContinueStatement(Position pos)
+ : INHERITED(pos, kIRNodeKind) {}
+
+ static std::unique_ptr<Statement> Make(Position pos) {
+ return std::make_unique<ContinueStatement>(pos);
+ }
+
+ std::unique_ptr<Statement> clone() const override {
+ return std::make_unique<ContinueStatement>(fPosition);
+ }
+
+ std::string description() const override {
+ return "continue;";
+ }
+
+private:
+ using INHERITED = Statement;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLDiscardStatement.cpp b/gfx/skia/skia/src/sksl/ir/SkSLDiscardStatement.cpp
new file mode 100644
index 0000000000..2f090219f2
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLDiscardStatement.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2022 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/ir/SkSLDiscardStatement.h"
+
+namespace SkSL {
+
+std::unique_ptr<Statement> DiscardStatement::Convert(const Context& context, Position pos) {
+ if (!ProgramConfig::IsFragment(context.fConfig->fKind)) {
+ context.fErrors->error(pos, "discard statement is only permitted in fragment shaders");
+ return nullptr;
+ }
+ return DiscardStatement::Make(context, pos);
+}
+
+std::unique_ptr<Statement> DiscardStatement::Make(const Context& context, Position pos) {
+ SkASSERT(ProgramConfig::IsFragment(context.fConfig->fKind));
+ return std::make_unique<DiscardStatement>(pos);
+}
+
+std::unique_ptr<Statement> DiscardStatement::clone() const {
+ return std::make_unique<DiscardStatement>(fPosition);
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLDiscardStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLDiscardStatement.h
new file mode 100644
index 0000000000..1e947d7d0f
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLDiscardStatement.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DISCARDSTATEMENT
+#define SKSL_DISCARDSTATEMENT
+
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLStatement.h"
+#include "include/sksl/SkSLPosition.h"
+
+#include <memory>
+#include <string>
+
+namespace SkSL {
+
+class Context;
+
+/**
+ * A 'discard' statement.
+ */
+class DiscardStatement final : public Statement {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kDiscard;
+
+ DiscardStatement(Position pos) : INHERITED(pos, kIRNodeKind) {}
+
+ // Creates a discard-statement; reports errors via ErrorReporter.
+ static std::unique_ptr<Statement> Convert(const Context& context, Position pos);
+
+ // Creates a discard-statement; reports errors via SkASSERT.
+ static std::unique_ptr<Statement> Make(const Context& context, Position pos);
+
+ std::unique_ptr<Statement> clone() const override;
+
+ std::string description() const override {
+ return "discard;";
+ }
+
+private:
+ using INHERITED = Statement;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLDoStatement.cpp b/gfx/skia/skia/src/sksl/ir/SkSLDoStatement.cpp
new file mode 100644
index 0000000000..1a0a62e0aa
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLDoStatement.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLDoStatement.h"
+
+#include "include/core/SkTypes.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/ir/SkSLType.h"
+
+namespace SkSL {
+
+std::unique_ptr<Statement> DoStatement::Convert(const Context& context,
+ Position pos,
+ std::unique_ptr<Statement> stmt,
+ std::unique_ptr<Expression> test) {
+ if (context.fConfig->strictES2Mode()) {
+ context.fErrors->error(pos, "do-while loops are not supported");
+ return nullptr;
+ }
+ test = context.fTypes.fBool->coerceExpression(std::move(test), context);
+ if (!test) {
+ return nullptr;
+ }
+ if (Analysis::DetectVarDeclarationWithoutScope(*stmt, context.fErrors)) {
+ return nullptr;
+ }
+ return DoStatement::Make(context, pos, std::move(stmt), std::move(test));
+}
+
+std::unique_ptr<Statement> DoStatement::Make(const Context& context,
+ Position pos,
+ std::unique_ptr<Statement> stmt,
+ std::unique_ptr<Expression> test) {
+ SkASSERT(!context.fConfig->strictES2Mode());
+ SkASSERT(test->type().matches(*context.fTypes.fBool));
+ SkASSERT(!Analysis::DetectVarDeclarationWithoutScope(*stmt));
+ return std::make_unique<DoStatement>(pos, std::move(stmt), std::move(test));
+}
+
+std::unique_ptr<Statement> DoStatement::clone() const {
+ return std::make_unique<DoStatement>(fPosition, this->statement()->clone(),
+ this->test()->clone());
+}
+
+std::string DoStatement::description() const {
+ return "do " + this->statement()->description() +
+ " while (" + this->test()->description() + ");";
+}
+
+} // namespace SkSL
+
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLDoStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLDoStatement.h
new file mode 100644
index 0000000000..461252b38d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLDoStatement.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DOSTATEMENT
+#define SKSL_DOSTATEMENT
+
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLStatement.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <memory>
+#include <string>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+
+/**
+ * A 'do' statement.
+ */
+class DoStatement final : public Statement {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kDo;
+
+ DoStatement(Position pos, std::unique_ptr<Statement> statement,
+ std::unique_ptr<Expression> test)
+ : INHERITED(pos, kIRNodeKind)
+ , fStatement(std::move(statement))
+ , fTest(std::move(test)) {}
+
+ // Creates an SkSL do-while loop; uses the ErrorReporter to report errors.
+ static std::unique_ptr<Statement> Convert(const Context& context,
+ Position pos,
+ std::unique_ptr<Statement> stmt,
+ std::unique_ptr<Expression> test);
+
+ // Creates an SkSL do-while loop; reports errors via ASSERT.
+ static std::unique_ptr<Statement> Make(const Context& context,
+ Position pos,
+ std::unique_ptr<Statement> stmt,
+ std::unique_ptr<Expression> test);
+
+ std::unique_ptr<Statement>& statement() {
+ return fStatement;
+ }
+
+ const std::unique_ptr<Statement>& statement() const {
+ return fStatement;
+ }
+
+ std::unique_ptr<Expression>& test() {
+ return fTest;
+ }
+
+ const std::unique_ptr<Expression>& test() const {
+ return fTest;
+ }
+
+ std::unique_ptr<Statement> clone() const override;
+
+ std::string description() const override;
+
+private:
+ std::unique_ptr<Statement> fStatement;
+ std::unique_ptr<Expression> fTest;
+
+ using INHERITED = Statement;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLExpression.cpp b/gfx/skia/skia/src/sksl/ir/SkSLExpression.cpp
new file mode 100644
index 0000000000..b6ebb7b0ec
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLExpression.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include "include/private/SkSLDefines.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLOperator.h"
+#include "src/sksl/SkSLContext.h"
+
+namespace SkSL {
+
+std::string Expression::description() const {
+ return this->description(OperatorPrecedence::kTopLevel);
+}
+
+bool Expression::isIncomplete(const Context& context) const {
+ switch (this->kind()) {
+ case Kind::kFunctionReference:
+ context.fErrors->error(fPosition.after(), "expected '(' to begin function call");
+ return true;
+
+ case Kind::kMethodReference:
+ context.fErrors->error(fPosition.after(), "expected '(' to begin method call");
+ return true;
+
+ case Kind::kTypeReference:
+ context.fErrors->error(fPosition.after(),
+ "expected '(' to begin constructor invocation");
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+ExpressionArray ExpressionArray::clone() const {
+ ExpressionArray cloned;
+ cloned.reserve_back(this->size());
+ for (const std::unique_ptr<Expression>& expr : *this) {
+ cloned.push_back(expr ? expr->clone() : nullptr);
+ }
+ return cloned;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLExpression.h b/gfx/skia/skia/src/sksl/ir/SkSLExpression.h
new file mode 100644
index 0000000000..6336b4a5ef
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLExpression.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_EXPRESSION
+#define SKSL_EXPRESSION
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <cstdint>
+#include <memory>
+#include <optional>
+#include <string>
+
+namespace SkSL {
+
+class AnyConstructor;
+class Context;
+enum class OperatorPrecedence : uint8_t;
+
+/**
+ * Abstract supertype of all expressions.
+ */
+class Expression : public IRNode {
+public:
+ using Kind = ExpressionKind;
+
+ Expression(Position pos, Kind kind, const Type* type)
+ : INHERITED(pos, (int) kind)
+ , fType(type) {
+ SkASSERT(kind >= Kind::kFirst && kind <= Kind::kLast);
+ }
+
+ Kind kind() const {
+ return (Kind) fKind;
+ }
+
+ virtual const Type& type() const {
+ return *fType;
+ }
+
+ bool isAnyConstructor() const {
+ static_assert((int)Kind::kConstructorArray - 1 == (int)Kind::kChildCall);
+ static_assert((int)Kind::kConstructorStruct + 1 == (int)Kind::kFieldAccess);
+ return this->kind() >= Kind::kConstructorArray && this->kind() <= Kind::kConstructorStruct;
+ }
+
+ bool isIntLiteral() const {
+ return this->kind() == Kind::kLiteral && this->type().isInteger();
+ }
+
+ bool isFloatLiteral() const {
+ return this->kind() == Kind::kLiteral && this->type().isFloat();
+ }
+
+ bool isBoolLiteral() const {
+ return this->kind() == Kind::kLiteral && this->type().isBoolean();
+ }
+
+ AnyConstructor& asAnyConstructor();
+ const AnyConstructor& asAnyConstructor() const;
+
+ /**
+ * Returns true if this expression is incomplete. Specifically, dangling function/method-call
+ * references that were never invoked, or type references that were never constructed, are
+ * considered incomplete expressions and should result in an error.
+ */
+ bool isIncomplete(const Context& context) const;
+
+ /**
+ * Compares this constant expression against another constant expression. Returns kUnknown if
+ * we aren't able to deduce a result (an expression isn't actually constant, the types are
+ * mismatched, etc).
+ */
+ enum class ComparisonResult {
+ kUnknown = -1,
+ kNotEqual,
+ kEqual
+ };
+ virtual ComparisonResult compareConstant(const Expression& other) const {
+ return ComparisonResult::kUnknown;
+ }
+
+ CoercionCost coercionCost(const Type& target) const {
+ return this->type().coercionCost(target);
+ }
+
+ /**
+ * Returns true if this expression type supports `getConstantValue`. (This particular expression
+ * may or may not actually contain a constant value.) It's harmless to call `getConstantValue`
+ * on expressions which don't support constant values or don't contain any constant values, but
+ * if `supportsConstantValues` returns false, you can assume that `getConstantValue` will return
+ * nullopt for every slot of this expression. This allows for early-out opportunities in some
+ * cases. (Some expressions have tons of slots but never hold a constant value; e.g. a variable
+ * holding a very large array.)
+ */
+ virtual bool supportsConstantValues() const {
+ return false;
+ }
+
+ /**
+ * Returns the n'th compile-time constant value within a literal or constructor.
+ * Use Type::slotCount to determine the number of slots within an expression.
+ * Slots which do not contain compile-time constant values will return nullopt.
+ * `vec4(1, vec2(2), 3)` contains four compile-time constants: (1, 2, 2, 3)
+ * `mat2(f)` contains four slots, and two are constant: (nullopt, 0,
+ * 0, nullopt)
+ * All classes which override this function must also implement `supportsConstantValues`.
+ */
+ virtual std::optional<double> getConstantValue(int n) const {
+ SkASSERT(!this->supportsConstantValues());
+ return std::nullopt;
+ }
+
+ virtual std::unique_ptr<Expression> clone(Position pos) const = 0;
+
+ /**
+ * Returns a clone at the same position.
+ */
+ std::unique_ptr<Expression> clone() const { return this->clone(fPosition); }
+
+ /**
+ * Returns a description of the expression.
+ */
+ std::string description() const final;
+ virtual std::string description(OperatorPrecedence parentPrecedence) const = 0;
+
+
+private:
+ const Type* fType;
+
+ using INHERITED = IRNode;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLExpressionStatement.cpp b/gfx/skia/skia/src/sksl/ir/SkSLExpressionStatement.cpp
new file mode 100644
index 0000000000..7f2831644f
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLExpressionStatement.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLExpressionStatement.h"
+
+#include "include/core/SkTypes.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLNop.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+
+namespace SkSL {
+
+std::unique_ptr<Statement> ExpressionStatement::Convert(const Context& context,
+ std::unique_ptr<Expression> expr) {
+ // Expression-statements need to represent a complete expression.
+ // Report an error on intermediate expressions, like FunctionReference or TypeReference.
+ if (expr->isIncomplete(context)) {
+ return nullptr;
+ }
+ return ExpressionStatement::Make(context, std::move(expr));
+}
+
+std::unique_ptr<Statement> ExpressionStatement::Make(const Context& context,
+ std::unique_ptr<Expression> expr) {
+ SkASSERT(!expr->isIncomplete(context));
+
+ if (context.fConfig->fSettings.fOptimize) {
+ // Expression-statements without any side effect can be replaced with a Nop.
+ if (!Analysis::HasSideEffects(*expr)) {
+ return Nop::Make();
+ }
+
+ // If this is an assignment statement like `a += b;`, the ref-kind of `a` will be set as
+ // read-write; `a` is written-to by the +=, and read-from by the consumer of the expression.
+ // We can demote the ref-kind to "write" safely, because the result of the expression is
+ // discarded; that is, `a` is never actually read-from.
+ if (expr->is<BinaryExpression>()) {
+ BinaryExpression& binary = expr->as<BinaryExpression>();
+ if (VariableReference* assignedVar = binary.isAssignmentIntoVariable()) {
+ if (assignedVar->refKind() == VariableRefKind::kReadWrite) {
+ assignedVar->setRefKind(VariableRefKind::kWrite);
+ }
+ }
+ }
+ }
+
+ return std::make_unique<ExpressionStatement>(std::move(expr));
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLExpressionStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLExpressionStatement.h
new file mode 100644
index 0000000000..d213ad230e
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLExpressionStatement.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_EXPRESSIONSTATEMENT
+#define SKSL_EXPRESSIONSTATEMENT
+
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLStatement.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <memory>
+#include <string>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+
+/**
+ * A lone expression being used as a statement.
+ */
+class ExpressionStatement final : public Statement {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kExpression;
+
+ ExpressionStatement(std::unique_ptr<Expression> expression)
+ : INHERITED(expression->fPosition, kIRNodeKind)
+ , fExpression(std::move(expression)) {}
+
+ // Creates an SkSL expression-statement; reports errors via ErrorReporter.
+ static std::unique_ptr<Statement> Convert(const Context& context,
+ std::unique_ptr<Expression> expr);
+
+ // Creates an SkSL expression-statement; reports errors via assertion.
+ static std::unique_ptr<Statement> Make(const Context& context,
+ std::unique_ptr<Expression> expr);
+
+ const std::unique_ptr<Expression>& expression() const {
+ return fExpression;
+ }
+
+ std::unique_ptr<Expression>& expression() {
+ return fExpression;
+ }
+
+ std::unique_ptr<Statement> clone() const override {
+ return std::make_unique<ExpressionStatement>(this->expression()->clone());
+ }
+
+ std::string description() const override {
+ return this->expression()->description() + ";";
+ }
+
+private:
+ std::unique_ptr<Expression> fExpression;
+
+ using INHERITED = Statement;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLExtension.h b/gfx/skia/skia/src/sksl/ir/SkSLExtension.h
new file mode 100644
index 0000000000..94c2dd933e
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLExtension.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_EXTENSION
+#define SKSL_EXTENSION
+
+#include "include/private/SkSLProgramElement.h"
+
+namespace SkSL {
+
+/**
+ * An extension declaration.
+ */
+class Extension final : public ProgramElement {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kExtension;
+
+ Extension(Position pos, std::string_view name)
+ : INHERITED(pos, kIRNodeKind)
+ , fName(name) {}
+
+ std::string_view name() const {
+ return fName;
+ }
+
+ std::unique_ptr<ProgramElement> clone() const override {
+ return std::unique_ptr<ProgramElement>(new Extension(fPosition, this->name()));
+ }
+
+ std::string description() const override {
+ return "#extension " + std::string(this->name()) + " : enable";
+ }
+
+private:
+ std::string_view fName;
+
+ using INHERITED = ProgramElement;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLField.h b/gfx/skia/skia/src/sksl/ir/SkSLField.h
new file mode 100644
index 0000000000..a7fa6575cf
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLField.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FIELD
+#define SKSL_FIELD
+
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLSymbol.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVariable.h"
+
+namespace SkSL {
+
+/**
+ * A symbol which should be interpreted as a field access. Fields are added to the symboltable
+ * whenever a bare reference to an identifier should refer to a struct field; in GLSL, this is the
+ * result of declaring anonymous interface blocks.
+ */
+class Field final : public Symbol {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kField;
+
+ Field(Position pos, const Variable* owner, int fieldIndex)
+ : INHERITED(pos, kIRNodeKind, owner->type().fields()[fieldIndex].fName,
+ owner->type().fields()[fieldIndex].fType)
+ , fOwner(owner)
+ , fFieldIndex(fieldIndex) {}
+
+ int fieldIndex() const {
+ return fFieldIndex;
+ }
+
+ const Variable& owner() const {
+ return *fOwner;
+ }
+
+ std::string description() const override {
+ return this->owner().name().empty()
+ ? std::string(this->name())
+ : (this->owner().description() + "." + std::string(this->name()));
+ }
+
+private:
+ const Variable* fOwner;
+ int fFieldIndex;
+
+ using INHERITED = Symbol;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLFieldAccess.cpp b/gfx/skia/skia/src/sksl/ir/SkSLFieldAccess.cpp
new file mode 100644
index 0000000000..5758280b7a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLFieldAccess.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLFieldAccess.h"
+
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLSymbol.h"
+#include "include/private/base/SkTArray.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLOperator.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLConstantFolder.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/ir/SkSLConstructorStruct.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLMethodReference.h"
+#include "src/sksl/ir/SkSLSetting.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+
+#include <cstddef>
+
+namespace SkSL {
+
+std::unique_ptr<Expression> FieldAccess::Convert(const Context& context,
+ Position pos,
+ SymbolTable& symbolTable,
+ std::unique_ptr<Expression> base,
+ std::string_view field) {
+ const Type& baseType = base->type();
+ if (baseType.isEffectChild()) {
+ // Turn the field name into a free function name, prefixed with '$':
+ std::string methodName = "$" + std::string(field);
+ const Symbol* result = symbolTable.find(methodName);
+ if (result && result->is<FunctionDeclaration>()) {
+ return std::make_unique<MethodReference>(context, pos, std::move(base),
+ &result->as<FunctionDeclaration>());
+ }
+ context.fErrors->error(pos, "type '" + baseType.displayName() + "' has no method named '" +
+ std::string(field) + "'");
+ return nullptr;
+ }
+ if (baseType.isStruct()) {
+ const std::vector<Type::Field>& fields = baseType.fields();
+ for (size_t i = 0; i < fields.size(); i++) {
+ if (fields[i].fName == field) {
+ return FieldAccess::Make(context, pos, std::move(base), (int) i);
+ }
+ }
+ }
+ if (baseType.matches(*context.fTypes.fSkCaps)) {
+ return Setting::Convert(context, pos, field);
+ }
+
+ context.fErrors->error(pos, "type '" + baseType.displayName() +
+ "' does not have a field named '" + std::string(field) + "'");
+ return nullptr;
+}
+
+static std::unique_ptr<Expression> extract_field(Position pos,
+ const ConstructorStruct& ctor,
+ int fieldIndex) {
+ // Confirm that the fields that are being removed are side-effect free.
+ const ExpressionArray& args = ctor.arguments();
+ int numFields = args.size();
+ for (int index = 0; index < numFields; ++index) {
+ if (fieldIndex == index) {
+ continue;
+ }
+ if (Analysis::HasSideEffects(*args[index])) {
+ return nullptr;
+ }
+ }
+
+ // Return the desired field.
+ return args[fieldIndex]->clone(pos);
+}
+
+std::unique_ptr<Expression> FieldAccess::Make(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> base,
+ int fieldIndex,
+ OwnerKind ownerKind) {
+ SkASSERT(base->type().isStruct());
+ SkASSERT(fieldIndex >= 0);
+ SkASSERT(fieldIndex < (int)base->type().fields().size());
+
+ // Replace `knownStruct.field` with the field's value if there are no side-effects involved.
+ const Expression* expr = ConstantFolder::GetConstantValueForVariable(*base);
+ if (expr->is<ConstructorStruct>()) {
+ if (std::unique_ptr<Expression> field = extract_field(pos, expr->as<ConstructorStruct>(),
+ fieldIndex)) {
+ return field;
+ }
+ }
+
+ return std::make_unique<FieldAccess>(pos, std::move(base), fieldIndex, ownerKind);
+}
+
+size_t FieldAccess::initialSlot() const {
+ SkSpan<const Type::Field> fields = this->base()->type().fields();
+ const int fieldIndex = this->fieldIndex();
+
+ size_t slot = 0;
+ for (int index = 0; index < fieldIndex; ++index) {
+ slot += fields[index].fType->slotCount();
+ }
+ return slot;
+}
+
+std::string FieldAccess::description(OperatorPrecedence) const {
+ std::string f = this->base()->description(OperatorPrecedence::kPostfix);
+ if (!f.empty()) {
+ f.push_back('.');
+ }
+ return f + std::string(this->base()->type().fields()[this->fieldIndex()].fName);
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLFieldAccess.h b/gfx/skia/skia/src/sksl/ir/SkSLFieldAccess.h
new file mode 100644
index 0000000000..8eca68fb38
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLFieldAccess.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FIELDACCESS
+#define SKSL_FIELDACCESS
+
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <string_view>
+#include <utility>
+#include <vector>
+
+namespace SkSL {
+
+class Context;
+class SymbolTable;
+enum class OperatorPrecedence : uint8_t;
+
+enum class FieldAccessOwnerKind : int8_t {
+ kDefault,
+ // this field access is to a field of an anonymous interface block (and thus, the field name
+ // is actually in global scope, so only the field name needs to be written in GLSL)
+ kAnonymousInterfaceBlock
+};
+
+/**
+ * An expression which extracts a field from a struct, as in 'foo.bar'.
+ */
+class FieldAccess final : public Expression {
+public:
+ using OwnerKind = FieldAccessOwnerKind;
+
+ inline static constexpr Kind kIRNodeKind = Kind::kFieldAccess;
+
+ FieldAccess(Position pos, std::unique_ptr<Expression> base, int fieldIndex,
+ OwnerKind ownerKind = OwnerKind::kDefault)
+ : INHERITED(pos, kIRNodeKind, base->type().fields()[fieldIndex].fType)
+ , fFieldIndex(fieldIndex)
+ , fOwnerKind(ownerKind)
+ , fBase(std::move(base)) {}
+
+ // Returns a field-access expression; reports errors via the ErrorReporter.
+ static std::unique_ptr<Expression> Convert(const Context& context,
+ Position pos,
+ SymbolTable& symbolTable,
+ std::unique_ptr<Expression> base,
+ std::string_view field);
+
+ // Returns a field-access expression; reports errors via ASSERT.
+ static std::unique_ptr<Expression> Make(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> base,
+ int fieldIndex,
+ OwnerKind ownerKind = OwnerKind::kDefault);
+
+ std::unique_ptr<Expression>& base() {
+ return fBase;
+ }
+
+ const std::unique_ptr<Expression>& base() const {
+ return fBase;
+ }
+
+ int fieldIndex() const {
+ return fFieldIndex;
+ }
+
+ OwnerKind ownerKind() const {
+ return fOwnerKind;
+ }
+
+ std::unique_ptr<Expression> clone(Position pos) const override {
+ return std::make_unique<FieldAccess>(pos,
+ this->base()->clone(),
+ this->fieldIndex(),
+ this->ownerKind());
+ }
+
+ size_t initialSlot() const;
+
+ std::string description(OperatorPrecedence) const override;
+
+private:
+ int fFieldIndex;
+ FieldAccessOwnerKind fOwnerKind;
+ std::unique_ptr<Expression> fBase;
+
+ using INHERITED = Expression;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLForStatement.cpp b/gfx/skia/skia/src/sksl/ir/SkSLForStatement.cpp
new file mode 100644
index 0000000000..8777f6638c
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLForStatement.cpp
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLForStatement.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLExpressionStatement.h"
+#include "src/sksl/ir/SkSLNop.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+
+namespace SkSL {
+
+static bool is_vardecl_block_initializer(const Statement* stmt) {
+ if (!stmt) {
+ return false;
+ }
+ if (!stmt->is<SkSL::Block>()) {
+ return false;
+ }
+ const SkSL::Block& b = stmt->as<SkSL::Block>();
+ if (b.isScope()) {
+ return false;
+ }
+ for (const auto& child : b.children()) {
+ if (!child->is<SkSL::VarDeclaration>()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool is_simple_initializer(const Statement* stmt) {
+ return !stmt || stmt->isEmpty() || stmt->is<SkSL::VarDeclaration>() ||
+ stmt->is<SkSL::ExpressionStatement>();
+}
+
+std::unique_ptr<Statement> ForStatement::clone() const {
+ std::unique_ptr<LoopUnrollInfo> unrollInfo;
+ if (fUnrollInfo) {
+ unrollInfo = std::make_unique<LoopUnrollInfo>(*fUnrollInfo);
+ }
+
+ return std::make_unique<ForStatement>(
+ fPosition,
+ fForLoopPositions,
+ this->initializer() ? this->initializer()->clone() : nullptr,
+ this->test() ? this->test()->clone() : nullptr,
+ this->next() ? this->next()->clone() : nullptr,
+ this->statement()->clone(),
+ std::move(unrollInfo),
+ SymbolTable::WrapIfBuiltin(this->symbols()));
+}
+
+std::string ForStatement::description() const {
+ std::string result("for (");
+ if (this->initializer()) {
+ result += this->initializer()->description();
+ } else {
+ result += ";";
+ }
+ result += " ";
+ if (this->test()) {
+ result += this->test()->description();
+ }
+ result += "; ";
+ if (this->next()) {
+ result += this->next()->description();
+ }
+ result += ") " + this->statement()->description();
+ return result;
+}
+
+std::unique_ptr<Statement> ForStatement::Convert(const Context& context,
+ Position pos,
+ ForLoopPositions positions,
+ std::unique_ptr<Statement> initializer,
+ std::unique_ptr<Expression> test,
+ std::unique_ptr<Expression> next,
+ std::unique_ptr<Statement> statement,
+ std::shared_ptr<SymbolTable> symbolTable) {
+ bool isSimpleInitializer = is_simple_initializer(initializer.get());
+ bool isVardeclBlockInitializer =
+ !isSimpleInitializer && is_vardecl_block_initializer(initializer.get());
+
+ if (!isSimpleInitializer && !isVardeclBlockInitializer) {
+ context.fErrors->error(initializer->fPosition, "invalid for loop initializer");
+ return nullptr;
+ }
+
+ if (test) {
+ test = context.fTypes.fBool->coerceExpression(std::move(test), context);
+ if (!test) {
+ return nullptr;
+ }
+ }
+
+ // The type of the next-expression doesn't matter, but it needs to be a complete expression.
+ // Report an error on intermediate expressions like FunctionReference or TypeReference.
+ if (next && next->isIncomplete(context)) {
+ return nullptr;
+ }
+
+ std::unique_ptr<LoopUnrollInfo> unrollInfo;
+ if (context.fConfig->strictES2Mode()) {
+ // In strict-ES2, loops must be unrollable or it's an error.
+ unrollInfo = Analysis::GetLoopUnrollInfo(pos, positions, initializer.get(), test.get(),
+ next.get(), statement.get(), context.fErrors);
+ if (!unrollInfo) {
+ return nullptr;
+ }
+ } else {
+ // In ES3, loops don't have to be unrollable, but we can use the unroll information for
+ // optimization purposes.
+ unrollInfo = Analysis::GetLoopUnrollInfo(pos, positions, initializer.get(), test.get(),
+ next.get(), statement.get(), /*errors=*/nullptr);
+ }
+
+ if (Analysis::DetectVarDeclarationWithoutScope(*statement, context.fErrors)) {
+ return nullptr;
+ }
+
+ if (isVardeclBlockInitializer) {
+ // If the initializer statement of a for loop contains multiple variables, this causes
+ // difficulties for several of our backends; e.g. Metal doesn't have a way to express arrays
+ // of different size in the same decl-stmt, because the array-size is part of the type. It's
+ // conceptually equivalent to synthesize a scope, declare the variables, and then emit a for
+ // statement with an empty init-stmt. (Note that we can't just do this transformation
+ // unilaterally for all for-statements, because the resulting for loop isn't ES2-compliant.)
+ StatementArray scope;
+ scope.push_back(std::move(initializer));
+ scope.push_back(ForStatement::Make(context, pos, positions, /*initializer=*/nullptr,
+ std::move(test), std::move(next), std::move(statement),
+ std::move(unrollInfo), /*symbolTable=*/nullptr));
+ return Block::Make(pos, std::move(scope), Block::Kind::kBracedScope,
+ std::move(symbolTable));
+ }
+
+ return ForStatement::Make(context, pos, positions, std::move(initializer), std::move(test),
+ std::move(next), std::move(statement), std::move(unrollInfo),
+ std::move(symbolTable));
+}
+
+std::unique_ptr<Statement> ForStatement::ConvertWhile(const Context& context, Position pos,
+ std::unique_ptr<Expression> test,
+ std::unique_ptr<Statement> statement,
+ std::shared_ptr<SymbolTable> symbolTable) {
+ if (context.fConfig->strictES2Mode()) {
+ context.fErrors->error(pos, "while loops are not supported");
+ return nullptr;
+ }
+ return ForStatement::Convert(context, pos, ForLoopPositions(), /*initializer=*/nullptr,
+ std::move(test), /*next=*/nullptr, std::move(statement), std::move(symbolTable));
+}
+
+std::unique_ptr<Statement> ForStatement::Make(const Context& context,
+ Position pos,
+ ForLoopPositions positions,
+ std::unique_ptr<Statement> initializer,
+ std::unique_ptr<Expression> test,
+ std::unique_ptr<Expression> next,
+ std::unique_ptr<Statement> statement,
+ std::unique_ptr<LoopUnrollInfo> unrollInfo,
+ std::shared_ptr<SymbolTable> symbolTable) {
+ SkASSERT(is_simple_initializer(initializer.get()) ||
+ is_vardecl_block_initializer(initializer.get()));
+ SkASSERT(!test || test->type().matches(*context.fTypes.fBool));
+ SkASSERT(!Analysis::DetectVarDeclarationWithoutScope(*statement));
+ SkASSERT(unrollInfo || !context.fConfig->strictES2Mode());
+
+ // Unrollable loops are easy to optimize because we know initializer, test and next don't have
+ // interesting side effects.
+ if (unrollInfo) {
+ // A zero-iteration unrollable loop can be replaced with Nop.
+ // An unrollable loop with an empty body can be replaced with Nop.
+ if (unrollInfo->fCount <= 0 || statement->isEmpty()) {
+ return Nop::Make();
+ }
+ }
+
+ return std::make_unique<ForStatement>(pos, positions, std::move(initializer), std::move(test),
+ std::move(next), std::move(statement), std::move(unrollInfo), std::move(symbolTable));
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLForStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLForStatement.h
new file mode 100644
index 0000000000..468df41cbc
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLForStatement.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FORSTATEMENT
+#define SKSL_FORSTATEMENT
+
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLStatement.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <memory>
+#include <string>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+class SymbolTable;
+class Variable;
+
+/**
+ * The unrollability information for an ES2-compatible loop.
+ */
+struct LoopUnrollInfo {
+ const Variable* fIndex;
+ double fStart;
+ double fDelta;
+ int fCount;
+};
+
+/**
+ * A 'for' statement.
+ */
+class ForStatement final : public Statement {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kFor;
+
+ ForStatement(Position pos,
+ ForLoopPositions forLoopPositions,
+ std::unique_ptr<Statement> initializer,
+ std::unique_ptr<Expression> test,
+ std::unique_ptr<Expression> next,
+ std::unique_ptr<Statement> statement,
+ std::unique_ptr<LoopUnrollInfo> unrollInfo,
+ std::shared_ptr<SymbolTable> symbols)
+ : INHERITED(pos, kIRNodeKind)
+ , fForLoopPositions(forLoopPositions)
+ , fSymbolTable(std::move(symbols))
+ , fInitializer(std::move(initializer))
+ , fTest(std::move(test))
+ , fNext(std::move(next))
+ , fStatement(std::move(statement))
+ , fUnrollInfo(std::move(unrollInfo)) {}
+
+ // Creates an SkSL for loop; handles type-coercion and uses the ErrorReporter to report errors.
+ static std::unique_ptr<Statement> Convert(const Context& context,
+ Position pos,
+ ForLoopPositions forLoopPositions,
+ std::unique_ptr<Statement> initializer,
+ std::unique_ptr<Expression> test,
+ std::unique_ptr<Expression> next,
+ std::unique_ptr<Statement> statement,
+ std::shared_ptr<SymbolTable> symbolTable);
+
+ // Creates an SkSL while loop; handles type-coercion and uses the ErrorReporter for errors.
+ static std::unique_ptr<Statement> ConvertWhile(const Context& context, Position pos,
+ std::unique_ptr<Expression> test,
+ std::unique_ptr<Statement> statement,
+ std::shared_ptr<SymbolTable> symbolTable);
+
+ // Creates an SkSL for/while loop. Assumes properly coerced types and reports errors via assert.
+ static std::unique_ptr<Statement> Make(const Context& context,
+ Position pos,
+ ForLoopPositions forLoopPositions,
+ std::unique_ptr<Statement> initializer,
+ std::unique_ptr<Expression> test,
+ std::unique_ptr<Expression> next,
+ std::unique_ptr<Statement> statement,
+ std::unique_ptr<LoopUnrollInfo> unrollInfo,
+ std::shared_ptr<SymbolTable> symbolTable);
+
+ ForLoopPositions forLoopPositions() const {
+ return fForLoopPositions;
+ }
+
+ std::unique_ptr<Statement>& initializer() {
+ return fInitializer;
+ }
+
+ const std::unique_ptr<Statement>& initializer() const {
+ return fInitializer;
+ }
+
+ std::unique_ptr<Expression>& test() {
+ return fTest;
+ }
+
+ const std::unique_ptr<Expression>& test() const {
+ return fTest;
+ }
+
+ std::unique_ptr<Expression>& next() {
+ return fNext;
+ }
+
+ const std::unique_ptr<Expression>& next() const {
+ return fNext;
+ }
+
+ std::unique_ptr<Statement>& statement() {
+ return fStatement;
+ }
+
+ const std::unique_ptr<Statement>& statement() const {
+ return fStatement;
+ }
+
+ const std::shared_ptr<SymbolTable>& symbols() const {
+ return fSymbolTable;
+ }
+
+ /** Loop-unroll information is only supported in strict-ES2 code. Null is returned in ES3+. */
+ const LoopUnrollInfo* unrollInfo() const {
+ return fUnrollInfo.get();
+ }
+
+ std::unique_ptr<Statement> clone() const override;
+
+ std::string description() const override;
+
+private:
+ ForLoopPositions fForLoopPositions;
+ std::shared_ptr<SymbolTable> fSymbolTable;
+ std::unique_ptr<Statement> fInitializer;
+ std::unique_ptr<Expression> fTest;
+ std::unique_ptr<Expression> fNext;
+ std::unique_ptr<Statement> fStatement;
+ std::unique_ptr<LoopUnrollInfo> fUnrollInfo;
+
+ using INHERITED = Statement;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLFunctionCall.cpp b/gfx/skia/skia/src/sksl/ir/SkSLFunctionCall.cpp
new file mode 100644
index 0000000000..69df07db9d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLFunctionCall.cpp
@@ -0,0 +1,1056 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLFunctionCall.h"
+
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLString.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTo.h"
+#include "include/sksl/DSLCore.h"
+#include "include/sksl/DSLExpression.h"
+#include "include/sksl/DSLType.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLOperator.h"
+#include "src/base/SkHalf.h"
+#include "src/core/SkMatrixInvert.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLConstantFolder.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLIntrinsicList.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/ir/SkSLChildCall.h"
+#include "src/sksl/ir/SkSLConstructor.h"
+#include "src/sksl/ir/SkSLConstructorCompound.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionReference.h"
+#include "src/sksl/ir/SkSLLiteral.h"
+#include "src/sksl/ir/SkSLMethodReference.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLTypeReference.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+
+#include <algorithm>
+#include <array>
+#include <cmath>
+#include <cstdint>
+#include <cstring>
+#include <optional>
+#include <string_view>
+#include <vector>
+
+namespace SkSL {
+
+using IntrinsicArguments = std::array<const Expression*, 3>;
+
+static bool has_compile_time_constant_arguments(const ExpressionArray& arguments) {
+ for (const std::unique_ptr<Expression>& arg : arguments) {
+ const Expression* expr = ConstantFolder::GetConstantValueForVariable(*arg);
+ if (!Analysis::IsCompileTimeConstant(*expr)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+template <typename T>
+static void type_check_expression(const Expression& expr);
+
+template <>
+void type_check_expression<float>(const Expression& expr) {
+ SkASSERT(expr.type().componentType().isFloat());
+}
+
+template <>
+void type_check_expression<SKSL_INT>(const Expression& expr) {
+ SkASSERT(expr.type().componentType().isInteger());
+}
+
+template <>
+void type_check_expression<bool>(const Expression& expr) {
+ SkASSERT(expr.type().componentType().isBoolean());
+}
+
+static std::unique_ptr<Expression> assemble_compound(const Context& context,
+ Position pos,
+ const Type& returnType,
+ double value[]) {
+ int numSlots = returnType.slotCount();
+ ExpressionArray array;
+ array.reserve_back(numSlots);
+ for (int index = 0; index < numSlots; ++index) {
+ array.push_back(Literal::Make(pos, value[index], &returnType.componentType()));
+ }
+ return ConstructorCompound::Make(context, pos, returnType, std::move(array));
+}
+
+using CoalesceFn = double (*)(double, double, double);
+using FinalizeFn = double (*)(double);
+
+static std::unique_ptr<Expression> coalesce_n_way_vector(const Expression* arg0,
+ const Expression* arg1,
+ double startingState,
+ const Type& returnType,
+ CoalesceFn coalesce,
+ FinalizeFn finalize) {
+ // Takes up to two vector or scalar arguments and coalesces them in sequence:
+ // scalar = startingState;
+ // scalar = coalesce(scalar, arg0.x, arg1.x);
+ // scalar = coalesce(scalar, arg0.y, arg1.y);
+ // scalar = coalesce(scalar, arg0.z, arg1.z);
+ // scalar = coalesce(scalar, arg0.w, arg1.w);
+ // scalar = finalize(scalar);
+ //
+ // If an argument is null, zero is passed to the coalesce function. If the arguments are a mix
+ // of scalars and vectors, the scalars is interpreted as a vector containing the same value for
+ // every component.
+
+ Position pos = arg0->fPosition;
+ double minimumValue = returnType.componentType().minimumValue();
+ double maximumValue = returnType.componentType().maximumValue();
+
+ const Type& vecType = arg0->type().isVector() ? arg0->type() :
+ (arg1 && arg1->type().isVector()) ? arg1->type() :
+ arg0->type();
+ SkASSERT( arg0->type().componentType().matches(vecType.componentType()));
+ SkASSERT(!arg1 || arg1->type().componentType().matches(vecType.componentType()));
+
+ double value = startingState;
+ int arg0Index = 0;
+ int arg1Index = 0;
+ for (int index = 0; index < vecType.columns(); ++index) {
+ std::optional<double> arg0Value = arg0->getConstantValue(arg0Index);
+ arg0Index += arg0->type().isVector() ? 1 : 0;
+ SkASSERT(arg0Value.has_value());
+
+ std::optional<double> arg1Value = 0.0;
+ if (arg1) {
+ arg1Value = arg1->getConstantValue(arg1Index);
+ arg1Index += arg1->type().isVector() ? 1 : 0;
+ SkASSERT(arg1Value.has_value());
+ }
+
+ value = coalesce(value, *arg0Value, *arg1Value);
+
+ if (value >= minimumValue && value <= maximumValue) {
+ // This result will fit inside the return type.
+ } else {
+ // The value is outside the float range or is NaN (all if-checks fail); do not optimize.
+ return nullptr;
+ }
+ }
+
+ if (finalize) {
+ value = finalize(value);
+ }
+
+ return Literal::Make(pos, value, &returnType);
+}
+
+template <typename T>
+static std::unique_ptr<Expression> coalesce_vector(const IntrinsicArguments& arguments,
+ double startingState,
+ const Type& returnType,
+ CoalesceFn coalesce,
+ FinalizeFn finalize) {
+ SkASSERT(arguments[0]);
+ SkASSERT(!arguments[1]);
+ type_check_expression<T>(*arguments[0]);
+
+ return coalesce_n_way_vector(arguments[0], /*arg1=*/nullptr,
+ startingState, returnType, coalesce, finalize);
+}
+
+template <typename T>
+static std::unique_ptr<Expression> coalesce_pairwise_vectors(const IntrinsicArguments& arguments,
+ double startingState,
+ const Type& returnType,
+ CoalesceFn coalesce,
+ FinalizeFn finalize) {
+ SkASSERT(arguments[0]);
+ SkASSERT(arguments[1]);
+ SkASSERT(!arguments[2]);
+ type_check_expression<T>(*arguments[0]);
+ type_check_expression<T>(*arguments[1]);
+
+ return coalesce_n_way_vector(arguments[0], arguments[1],
+ startingState, returnType, coalesce, finalize);
+}
+
+using CompareFn = bool (*)(double, double);
+
+static std::unique_ptr<Expression> optimize_comparison(const Context& context,
+ const IntrinsicArguments& arguments,
+ CompareFn compare) {
+ const Expression* left = arguments[0];
+ const Expression* right = arguments[1];
+ SkASSERT(left);
+ SkASSERT(right);
+ SkASSERT(!arguments[2]);
+
+ const Type& type = left->type();
+ SkASSERT(type.isVector());
+ SkASSERT(type.componentType().isScalar());
+ SkASSERT(type.matches(right->type()));
+
+ double array[4];
+
+ for (int index = 0; index < type.columns(); ++index) {
+ std::optional<double> leftValue = left->getConstantValue(index);
+ std::optional<double> rightValue = right->getConstantValue(index);
+ SkASSERT(leftValue.has_value());
+ SkASSERT(rightValue.has_value());
+ array[index] = compare(*leftValue, *rightValue) ? 1.0 : 0.0;
+ }
+
+ const Type& bvecType = context.fTypes.fBool->toCompound(context, type.columns(), /*rows=*/1);
+ return assemble_compound(context, left->fPosition, bvecType, array);
+}
+
+using EvaluateFn = double (*)(double, double, double);
+
+static std::unique_ptr<Expression> evaluate_n_way_intrinsic(const Context& context,
+ const Expression* arg0,
+ const Expression* arg1,
+ const Expression* arg2,
+ const Type& returnType,
+ EvaluateFn eval) {
+ // Takes up to three arguments and evaluates all of them, left-to-right, in tandem.
+ // Equivalent to constructing a new compound value containing the results from:
+ // eval(arg0.x, arg1.x, arg2.x),
+ // eval(arg0.y, arg1.y, arg2.y),
+ // eval(arg0.z, arg1.z, arg2.z),
+ // eval(arg0.w, arg1.w, arg2.w)
+ //
+ // If an argument is null, zero is passed to the evaluation function. If the arguments are a mix
+ // of scalars and compounds, scalars are interpreted as a compound containing the same value for
+ // every component.
+
+ double minimumValue = returnType.componentType().minimumValue();
+ double maximumValue = returnType.componentType().maximumValue();
+ int slots = returnType.slotCount();
+ double array[16];
+
+ int arg0Index = 0;
+ int arg1Index = 0;
+ int arg2Index = 0;
+ for (int index = 0; index < slots; ++index) {
+ std::optional<double> arg0Value = arg0->getConstantValue(arg0Index);
+ arg0Index += arg0->type().isScalar() ? 0 : 1;
+ SkASSERT(arg0Value.has_value());
+
+ std::optional<double> arg1Value = 0.0;
+ if (arg1) {
+ arg1Value = arg1->getConstantValue(arg1Index);
+ arg1Index += arg1->type().isScalar() ? 0 : 1;
+ SkASSERT(arg1Value.has_value());
+ }
+
+ std::optional<double> arg2Value = 0.0;
+ if (arg2) {
+ arg2Value = arg2->getConstantValue(arg2Index);
+ arg2Index += arg2->type().isScalar() ? 0 : 1;
+ SkASSERT(arg2Value.has_value());
+ }
+
+ array[index] = eval(*arg0Value, *arg1Value, *arg2Value);
+
+ if (array[index] >= minimumValue && array[index] <= maximumValue) {
+ // This result will fit inside the return type.
+ } else {
+ // The value is outside the float range or is NaN (all if-checks fail); do not optimize.
+ return nullptr;
+ }
+ }
+
+ return assemble_compound(context, arg0->fPosition, returnType, array);
+}
+
+template <typename T>
+static std::unique_ptr<Expression> evaluate_intrinsic(const Context& context,
+ const IntrinsicArguments& arguments,
+ const Type& returnType,
+ EvaluateFn eval) {
+ SkASSERT(arguments[0]);
+ SkASSERT(!arguments[1]);
+ type_check_expression<T>(*arguments[0]);
+
+ return evaluate_n_way_intrinsic(context, arguments[0], /*arg1=*/nullptr, /*arg2=*/nullptr,
+ returnType, eval);
+}
+
+static std::unique_ptr<Expression> evaluate_intrinsic_numeric(const Context& context,
+ const IntrinsicArguments& arguments,
+ const Type& returnType,
+ EvaluateFn eval) {
+ SkASSERT(arguments[0]);
+ SkASSERT(!arguments[1]);
+ const Type& type = arguments[0]->type().componentType();
+
+ if (type.isFloat()) {
+ return evaluate_intrinsic<float>(context, arguments, returnType, eval);
+ }
+ if (type.isInteger()) {
+ return evaluate_intrinsic<SKSL_INT>(context, arguments, returnType, eval);
+ }
+
+ SkDEBUGFAILF("unsupported type %s", type.description().c_str());
+ return nullptr;
+}
+
+static std::unique_ptr<Expression> evaluate_pairwise_intrinsic(const Context& context,
+ const IntrinsicArguments& arguments,
+ const Type& returnType,
+ EvaluateFn eval) {
+ SkASSERT(arguments[0]);
+ SkASSERT(arguments[1]);
+ SkASSERT(!arguments[2]);
+ const Type& type = arguments[0]->type().componentType();
+
+ if (type.isFloat()) {
+ type_check_expression<float>(*arguments[0]);
+ type_check_expression<float>(*arguments[1]);
+ } else if (type.isInteger()) {
+ type_check_expression<SKSL_INT>(*arguments[0]);
+ type_check_expression<SKSL_INT>(*arguments[1]);
+ } else {
+ SkDEBUGFAILF("unsupported type %s", type.description().c_str());
+ return nullptr;
+ }
+
+ return evaluate_n_way_intrinsic(context, arguments[0], arguments[1], /*arg2=*/nullptr,
+ returnType, eval);
+}
+
+static std::unique_ptr<Expression> evaluate_3_way_intrinsic(const Context& context,
+ const IntrinsicArguments& arguments,
+ const Type& returnType,
+ EvaluateFn eval) {
+ SkASSERT(arguments[0]);
+ SkASSERT(arguments[1]);
+ SkASSERT(arguments[2]);
+ const Type& type = arguments[0]->type().componentType();
+
+ if (type.isFloat()) {
+ type_check_expression<float>(*arguments[0]);
+ type_check_expression<float>(*arguments[1]);
+ type_check_expression<float>(*arguments[2]);
+ } else if (type.isInteger()) {
+ type_check_expression<SKSL_INT>(*arguments[0]);
+ type_check_expression<SKSL_INT>(*arguments[1]);
+ type_check_expression<SKSL_INT>(*arguments[2]);
+ } else {
+ SkDEBUGFAILF("unsupported type %s", type.description().c_str());
+ return nullptr;
+ }
+
+ return evaluate_n_way_intrinsic(context, arguments[0], arguments[1], arguments[2],
+ returnType, eval);
+}
+
+template <typename T1, typename T2>
+static double pun_value(double val) {
+ // Interpret `val` as a value of type T1.
+ static_assert(sizeof(T1) == sizeof(T2));
+ T1 inputValue = (T1)val;
+ // Reinterpret those bits as a value of type T2.
+ T2 outputValue;
+ memcpy(&outputValue, &inputValue, sizeof(T2));
+ // Return the value-of-type-T2 as a double. (Non-finite values will prohibit optimization.)
+ return (double)outputValue;
+}
+
+// Helper functions for optimizing all of our intrinsics.
+namespace Intrinsics {
+namespace {
+
+double coalesce_length(double a, double b, double) { return a + (b * b); }
+double finalize_length(double a) { return std::sqrt(a); }
+
+double coalesce_distance(double a, double b, double c) { b -= c; return a + (b * b); }
+double finalize_distance(double a) { return std::sqrt(a); }
+
+double coalesce_dot(double a, double b, double c) { return a + (b * c); }
+double coalesce_any(double a, double b, double) { return a || b; }
+double coalesce_all(double a, double b, double) { return a && b; }
+
+bool compare_lessThan(double a, double b) { return a < b; }
+bool compare_lessThanEqual(double a, double b) { return a <= b; }
+bool compare_greaterThan(double a, double b) { return a > b; }
+bool compare_greaterThanEqual(double a, double b) { return a >= b; }
+bool compare_equal(double a, double b) { return a == b; }
+bool compare_notEqual(double a, double b) { return a != b; }
+
+double evaluate_radians(double a, double, double) { return a * 0.0174532925; }
+double evaluate_degrees(double a, double, double) { return a * 57.2957795; }
+double evaluate_sin(double a, double, double) { return std::sin(a); }
+double evaluate_cos(double a, double, double) { return std::cos(a); }
+double evaluate_tan(double a, double, double) { return std::tan(a); }
+double evaluate_asin(double a, double, double) { return std::asin(a); }
+double evaluate_acos(double a, double, double) { return std::acos(a); }
+double evaluate_atan(double a, double, double) { return std::atan(a); }
+double evaluate_atan2(double a, double b, double) { return std::atan2(a, b); }
+double evaluate_asinh(double a, double, double) { return std::asinh(a); }
+double evaluate_acosh(double a, double, double) { return std::acosh(a); }
+double evaluate_atanh(double a, double, double) { return std::atanh(a); }
+
+double evaluate_pow(double a, double b, double) { return std::pow(a, b); }
+double evaluate_exp(double a, double, double) { return std::exp(a); }
+double evaluate_log(double a, double, double) { return std::log(a); }
+double evaluate_exp2(double a, double, double) { return std::exp2(a); }
+double evaluate_log2(double a, double, double) { return std::log2(a); }
+double evaluate_sqrt(double a, double, double) { return std::sqrt(a); }
+double evaluate_inversesqrt(double a, double, double) {
+ return sk_ieee_double_divide(1.0, std::sqrt(a));
+}
+
+double evaluate_abs(double a, double, double) { return std::abs(a); }
+double evaluate_sign(double a, double, double) { return (a > 0) - (a < 0); }
+double evaluate_floor(double a, double, double) { return std::floor(a); }
+double evaluate_ceil(double a, double, double) { return std::ceil(a); }
+double evaluate_fract(double a, double, double) { return a - std::floor(a); }
+double evaluate_min(double a, double b, double) { return (a < b) ? a : b; }
+double evaluate_max(double a, double b, double) { return (a > b) ? a : b; }
+double evaluate_clamp(double x, double l, double h) { return (x < l) ? l : (x > h) ? h : x; }
+double evaluate_fma(double a, double b, double c) { return a * b + c; }
+double evaluate_saturate(double a, double, double) { return (a < 0) ? 0 : (a > 1) ? 1 : a; }
+double evaluate_mix(double x, double y, double a) { return x * (1 - a) + y * a; }
+double evaluate_step(double e, double x, double) { return (x < e) ? 0 : 1; }
+double evaluate_mod(double a, double b, double) {
+ return a - b * std::floor(sk_ieee_double_divide(a, b));
+}
+double evaluate_smoothstep(double edge0, double edge1, double x) {
+ double t = sk_ieee_double_divide(x - edge0, edge1 - edge0);
+ t = (t < 0) ? 0 : (t > 1) ? 1 : t;
+ return t * t * (3.0 - 2.0 * t);
+}
+
+double evaluate_matrixCompMult(double x, double y, double) { return x * y; }
+
+double evaluate_not(double a, double, double) { return !a; }
+double evaluate_sinh(double a, double, double) { return std::sinh(a); }
+double evaluate_cosh(double a, double, double) { return std::cosh(a); }
+double evaluate_tanh(double a, double, double) { return std::tanh(a); }
+double evaluate_trunc(double a, double, double) { return std::trunc(a); }
+double evaluate_round(double a, double, double) {
+ // The semantics of std::remainder guarantee a rounded-to-even result here, regardless of the
+ // current float-rounding mode.
+ return a - std::remainder(a, 1.0);
+}
+double evaluate_floatBitsToInt(double a, double, double) { return pun_value<float, int32_t> (a); }
+double evaluate_floatBitsToUint(double a, double, double) { return pun_value<float, uint32_t>(a); }
+double evaluate_intBitsToFloat(double a, double, double) { return pun_value<int32_t, float>(a); }
+double evaluate_uintBitsToFloat(double a, double, double) { return pun_value<uint32_t, float>(a); }
+
+} // namespace
+} // namespace Intrinsics
+
+static void extract_matrix(const Expression* expr, float mat[16]) {
+ size_t numSlots = expr->type().slotCount();
+ for (size_t index = 0; index < numSlots; ++index) {
+ mat[index] = *expr->getConstantValue(index);
+ }
+}
+
+static std::unique_ptr<Expression> optimize_intrinsic_call(const Context& context,
+ Position pos,
+ IntrinsicKind intrinsic,
+ const ExpressionArray& argArray,
+ const Type& returnType) {
+ // Replace constant variables with their literal values.
+ IntrinsicArguments arguments = {};
+ SkASSERT(SkToSizeT(argArray.size()) <= arguments.size());
+ for (int index = 0; index < argArray.size(); ++index) {
+ arguments[index] = ConstantFolder::GetConstantValueForVariable(*argArray[index]);
+ }
+
+ auto Get = [&](int idx, int col) -> float {
+ return *arguments[idx]->getConstantValue(col);
+ };
+
+ using namespace SkSL::dsl;
+ switch (intrinsic) {
+ // 8.1 : Angle and Trigonometry Functions
+ case k_radians_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_radians);
+ case k_degrees_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_degrees);
+ case k_sin_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_sin);
+ case k_cos_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_cos);
+ case k_tan_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_tan);
+ case k_sinh_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_sinh);
+ case k_cosh_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_cosh);
+ case k_tanh_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_tanh);
+ case k_asin_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_asin);
+ case k_acos_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_acos);
+ case k_atan_IntrinsicKind:
+ if (argArray.size() == 1) {
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_atan);
+ } else {
+ return evaluate_pairwise_intrinsic(context, arguments, returnType,
+ Intrinsics::evaluate_atan2);
+ }
+ case k_asinh_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_asinh);
+
+ case k_acosh_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_acosh);
+ case k_atanh_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_atanh);
+ // 8.2 : Exponential Functions
+ case k_pow_IntrinsicKind:
+ return evaluate_pairwise_intrinsic(context, arguments, returnType,
+ Intrinsics::evaluate_pow);
+ case k_exp_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_exp);
+ case k_log_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_log);
+ case k_exp2_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_exp2);
+ case k_log2_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_log2);
+ case k_sqrt_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_sqrt);
+ case k_inversesqrt_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_inversesqrt);
+ // 8.3 : Common Functions
+ case k_abs_IntrinsicKind:
+ return evaluate_intrinsic_numeric(context, arguments, returnType,
+ Intrinsics::evaluate_abs);
+ case k_sign_IntrinsicKind:
+ return evaluate_intrinsic_numeric(context, arguments, returnType,
+ Intrinsics::evaluate_sign);
+ case k_floor_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_floor);
+ case k_ceil_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_ceil);
+ case k_fract_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_fract);
+ case k_mod_IntrinsicKind:
+ return evaluate_pairwise_intrinsic(context, arguments, returnType,
+ Intrinsics::evaluate_mod);
+ case k_min_IntrinsicKind:
+ return evaluate_pairwise_intrinsic(context, arguments, returnType,
+ Intrinsics::evaluate_min);
+ case k_max_IntrinsicKind:
+ return evaluate_pairwise_intrinsic(context, arguments, returnType,
+ Intrinsics::evaluate_max);
+ case k_clamp_IntrinsicKind:
+ return evaluate_3_way_intrinsic(context, arguments, returnType,
+ Intrinsics::evaluate_clamp);
+ case k_fma_IntrinsicKind:
+ return evaluate_3_way_intrinsic(context, arguments, returnType,
+ Intrinsics::evaluate_fma);
+ case k_saturate_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_saturate);
+ case k_mix_IntrinsicKind:
+ if (arguments[2]->type().componentType().isBoolean()) {
+ const SkSL::Type& numericType = arguments[0]->type().componentType();
+
+ if (numericType.isFloat()) {
+ type_check_expression<float>(*arguments[0]);
+ type_check_expression<float>(*arguments[1]);
+ } else if (numericType.isInteger()) {
+ type_check_expression<SKSL_INT>(*arguments[0]);
+ type_check_expression<SKSL_INT>(*arguments[1]);
+ } else if (numericType.isBoolean()) {
+ type_check_expression<bool>(*arguments[0]);
+ type_check_expression<bool>(*arguments[1]);
+ } else {
+ SkDEBUGFAILF("unsupported type %s", numericType.description().c_str());
+ return nullptr;
+ }
+ return evaluate_n_way_intrinsic(context, arguments[0], arguments[1], arguments[2],
+ returnType, Intrinsics::evaluate_mix);
+ } else {
+ return evaluate_3_way_intrinsic(context, arguments, returnType,
+ Intrinsics::evaluate_mix);
+ }
+ case k_step_IntrinsicKind:
+ return evaluate_pairwise_intrinsic(context, arguments, returnType,
+ Intrinsics::evaluate_step);
+ case k_smoothstep_IntrinsicKind:
+ return evaluate_3_way_intrinsic(context, arguments, returnType,
+ Intrinsics::evaluate_smoothstep);
+ case k_trunc_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_trunc);
+ case k_round_IntrinsicKind: // GLSL `round` documents its rounding mode as unspecified
+ case k_roundEven_IntrinsicKind: // and is allowed to behave identically to `roundEven`.
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_round);
+ case k_floatBitsToInt_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_floatBitsToInt);
+ case k_floatBitsToUint_IntrinsicKind:
+ return evaluate_intrinsic<float>(context, arguments, returnType,
+ Intrinsics::evaluate_floatBitsToUint);
+ case k_intBitsToFloat_IntrinsicKind:
+ return evaluate_intrinsic<SKSL_INT>(context, arguments, returnType,
+ Intrinsics::evaluate_intBitsToFloat);
+ case k_uintBitsToFloat_IntrinsicKind:
+ return evaluate_intrinsic<SKSL_INT>(context, arguments, returnType,
+ Intrinsics::evaluate_uintBitsToFloat);
+ // 8.4 : Floating-Point Pack and Unpack Functions
+ case k_packUnorm2x16_IntrinsicKind: {
+ auto Pack = [&](int n) -> unsigned int {
+ float x = Get(0, n);
+ return (int)std::round(Intrinsics::evaluate_clamp(x, 0.0, 1.0) * 65535.0);
+ };
+ return UInt(((Pack(0) << 0) & 0x0000FFFF) |
+ ((Pack(1) << 16) & 0xFFFF0000)).release();
+ }
+ case k_packSnorm2x16_IntrinsicKind: {
+ auto Pack = [&](int n) -> unsigned int {
+ float x = Get(0, n);
+ return (int)std::round(Intrinsics::evaluate_clamp(x, -1.0, 1.0) * 32767.0);
+ };
+ return UInt(((Pack(0) << 0) & 0x0000FFFF) |
+ ((Pack(1) << 16) & 0xFFFF0000)).release();
+ }
+ case k_packHalf2x16_IntrinsicKind: {
+ auto Pack = [&](int n) -> unsigned int {
+ return SkFloatToHalf(Get(0, n));
+ };
+ return UInt(((Pack(0) << 0) & 0x0000FFFF) |
+ ((Pack(1) << 16) & 0xFFFF0000)).release();
+ }
+ case k_unpackUnorm2x16_IntrinsicKind: {
+ SKSL_INT x = *arguments[0]->getConstantValue(0);
+ uint16_t a = ((x >> 0) & 0x0000FFFF);
+ uint16_t b = ((x >> 16) & 0x0000FFFF);
+ return Float2(double(a) / 65535.0,
+ double(b) / 65535.0).release();
+ }
+ case k_unpackSnorm2x16_IntrinsicKind: {
+ SKSL_INT x = *arguments[0]->getConstantValue(0);
+ int16_t a = ((x >> 0) & 0x0000FFFF);
+ int16_t b = ((x >> 16) & 0x0000FFFF);
+ return Float2(Intrinsics::evaluate_clamp(double(a) / 32767.0, -1.0, 1.0),
+ Intrinsics::evaluate_clamp(double(b) / 32767.0, -1.0, 1.0)).release();
+ }
+ case k_unpackHalf2x16_IntrinsicKind: {
+ SKSL_INT x = *arguments[0]->getConstantValue(0);
+ uint16_t a = ((x >> 0) & 0x0000FFFF);
+ uint16_t b = ((x >> 16) & 0x0000FFFF);
+ return Float2(SkHalfToFloat(a),
+ SkHalfToFloat(b)).release();
+ }
+ // 8.5 : Geometric Functions
+ case k_length_IntrinsicKind:
+ return coalesce_vector<float>(arguments, /*startingState=*/0, returnType,
+ Intrinsics::coalesce_length,
+ Intrinsics::finalize_length);
+ case k_distance_IntrinsicKind:
+ return coalesce_pairwise_vectors<float>(arguments, /*startingState=*/0, returnType,
+ Intrinsics::coalesce_distance,
+ Intrinsics::finalize_distance);
+ case k_dot_IntrinsicKind:
+ return coalesce_pairwise_vectors<float>(arguments, /*startingState=*/0, returnType,
+ Intrinsics::coalesce_dot,
+ /*finalize=*/nullptr);
+ case k_cross_IntrinsicKind: {
+ auto X = [&](int n) -> float { return Get(0, n); };
+ auto Y = [&](int n) -> float { return Get(1, n); };
+ SkASSERT(arguments[0]->type().columns() == 3); // the vec2 form is not a real intrinsic
+
+ double vec[3] = {X(1) * Y(2) - Y(1) * X(2),
+ X(2) * Y(0) - Y(2) * X(0),
+ X(0) * Y(1) - Y(0) * X(1)};
+ return assemble_compound(context, arguments[0]->fPosition, returnType, vec);
+ }
+ case k_normalize_IntrinsicKind: {
+ auto Vec = [&] { return DSLExpression{arguments[0]->clone()}; };
+ return (Vec() / Length(Vec())).release();
+ }
+ case k_faceforward_IntrinsicKind: {
+ auto N = [&] { return DSLExpression{arguments[0]->clone()}; };
+ auto I = [&] { return DSLExpression{arguments[1]->clone()}; };
+ auto NRef = [&] { return DSLExpression{arguments[2]->clone()}; };
+ return (N() * Select(Dot(NRef(), I()) < 0, 1, -1)).release();
+ }
+ case k_reflect_IntrinsicKind: {
+ auto I = [&] { return DSLExpression{arguments[0]->clone()}; };
+ auto N = [&] { return DSLExpression{arguments[1]->clone()}; };
+ return (I() - 2.0 * Dot(N(), I()) * N()).release();
+ }
+ case k_refract_IntrinsicKind: {
+ // Refract uses its arguments out-of-order in such a way that we end up trying to create
+ // an invalid Position range, so we rewrite the arguments' positions to avoid that here.
+ auto clone = [&](const Expression* expr) {
+ return DSLExpression(expr->clone(pos));
+ };
+ auto I = [&] { return clone(arguments[0]); };
+ auto N = [&] { return clone(arguments[1]); };
+ auto Eta = [&] { return clone(arguments[2]); };
+
+ std::unique_ptr<Expression> k =
+ (1 - Pow(Eta(), 2) * (1 - Pow(Dot(N(), I()), 2))).release();
+ if (!k->is<Literal>()) {
+ return nullptr;
+ }
+ double kValue = k->as<Literal>().value();
+ return ((kValue < 0) ?
+ (0 * I()) :
+ (Eta() * I() - (Eta() * Dot(N(), I()) + std::sqrt(kValue)) * N())).release();
+ }
+
+ // 8.6 : Matrix Functions
+ case k_matrixCompMult_IntrinsicKind:
+ return evaluate_pairwise_intrinsic(context, arguments, returnType,
+ Intrinsics::evaluate_matrixCompMult);
+ case k_transpose_IntrinsicKind: {
+ double mat[16];
+ int index = 0;
+ for (int c = 0; c < returnType.columns(); ++c) {
+ for (int r = 0; r < returnType.rows(); ++r) {
+ mat[index++] = Get(0, (returnType.columns() * r) + c);
+ }
+ }
+ return assemble_compound(context, arguments[0]->fPosition, returnType, mat);
+ }
+ case k_outerProduct_IntrinsicKind: {
+ double mat[16];
+ int index = 0;
+ for (int c = 0; c < returnType.columns(); ++c) {
+ for (int r = 0; r < returnType.rows(); ++r) {
+ mat[index++] = Get(0, r) * Get(1, c);
+ }
+ }
+ return assemble_compound(context, arguments[0]->fPosition, returnType, mat);
+ }
+ case k_determinant_IntrinsicKind: {
+ float mat[16];
+ extract_matrix(arguments[0], mat);
+ float determinant;
+ switch (arguments[0]->type().slotCount()) {
+ case 4:
+ determinant = SkInvert2x2Matrix(mat, /*outMatrix=*/nullptr);
+ break;
+ case 9:
+ determinant = SkInvert3x3Matrix(mat, /*outMatrix=*/nullptr);
+ break;
+ case 16:
+ determinant = SkInvert4x4Matrix(mat, /*outMatrix=*/nullptr);
+ break;
+ default:
+ SkDEBUGFAILF("unsupported type %s", arguments[0]->type().description().c_str());
+ return nullptr;
+ }
+ return Literal::MakeFloat(arguments[0]->fPosition, determinant, &returnType);
+ }
+ case k_inverse_IntrinsicKind: {
+ float mat[16] = {};
+ extract_matrix(arguments[0], mat);
+ switch (arguments[0]->type().slotCount()) {
+ case 4:
+ if (SkInvert2x2Matrix(mat, mat) == 0.0f) {
+ return nullptr;
+ }
+ break;
+ case 9:
+ if (SkInvert3x3Matrix(mat, mat) == 0.0f) {
+ return nullptr;
+ }
+ break;
+ case 16:
+ if (SkInvert4x4Matrix(mat, mat) == 0.0f) {
+ return nullptr;
+ }
+ break;
+ default:
+ SkDEBUGFAILF("unsupported type %s", arguments[0]->type().description().c_str());
+ return nullptr;
+ }
+
+ double dmat[16];
+ std::copy(mat, mat + std::size(mat), dmat);
+ return assemble_compound(context, arguments[0]->fPosition, returnType, dmat);
+ }
+ // 8.7 : Vector Relational Functions
+ case k_lessThan_IntrinsicKind:
+ return optimize_comparison(context, arguments, Intrinsics::compare_lessThan);
+
+ case k_lessThanEqual_IntrinsicKind:
+ return optimize_comparison(context, arguments, Intrinsics::compare_lessThanEqual);
+
+ case k_greaterThan_IntrinsicKind:
+ return optimize_comparison(context, arguments, Intrinsics::compare_greaterThan);
+
+ case k_greaterThanEqual_IntrinsicKind:
+ return optimize_comparison(context, arguments, Intrinsics::compare_greaterThanEqual);
+
+ case k_equal_IntrinsicKind:
+ return optimize_comparison(context, arguments, Intrinsics::compare_equal);
+
+ case k_notEqual_IntrinsicKind:
+ return optimize_comparison(context, arguments, Intrinsics::compare_notEqual);
+
+ case k_any_IntrinsicKind:
+ return coalesce_vector<bool>(arguments, /*startingState=*/false, returnType,
+ Intrinsics::coalesce_any,
+ /*finalize=*/nullptr);
+ case k_all_IntrinsicKind:
+ return coalesce_vector<bool>(arguments, /*startingState=*/true, returnType,
+ Intrinsics::coalesce_all,
+ /*finalize=*/nullptr);
+ case k_not_IntrinsicKind:
+ return evaluate_intrinsic<bool>(context, arguments, returnType,
+ Intrinsics::evaluate_not);
+ default:
+ return nullptr;
+ }
+}
+
+std::unique_ptr<Expression> FunctionCall::clone(Position pos) const {
+ return std::make_unique<FunctionCall>(pos, &this->type(), &this->function(),
+ this->arguments().clone());
+}
+
+std::string FunctionCall::description(OperatorPrecedence) const {
+ std::string result = std::string(this->function().name()) + "(";
+ auto separator = SkSL::String::Separator();
+ for (const std::unique_ptr<Expression>& arg : this->arguments()) {
+ result += separator();
+ result += arg->description(OperatorPrecedence::kSequence);
+ }
+ result += ")";
+ return result;
+}
+
+/**
+ * Determines the cost of coercing the arguments of a function to the required types. Cost has no
+ * particular meaning other than "lower costs are preferred". Returns CoercionCost::Impossible() if
+ * the call is not valid.
+ */
+static CoercionCost call_cost(const Context& context,
+ const FunctionDeclaration& function,
+ const ExpressionArray& arguments) {
+ if (context.fConfig->strictES2Mode() &&
+ (function.modifiers().fFlags & Modifiers::kES3_Flag)) {
+ return CoercionCost::Impossible();
+ }
+ if (function.parameters().size() != SkToSizeT(arguments.size())) {
+ return CoercionCost::Impossible();
+ }
+ FunctionDeclaration::ParamTypes types;
+ const Type* ignored;
+ if (!function.determineFinalTypes(arguments, &types, &ignored)) {
+ return CoercionCost::Impossible();
+ }
+ CoercionCost total = CoercionCost::Free();
+ for (int i = 0; i < arguments.size(); i++) {
+ total = total + arguments[i]->coercionCost(*types[i]);
+ }
+ return total;
+}
+
+const FunctionDeclaration* FunctionCall::FindBestFunctionForCall(
+ const Context& context,
+ const FunctionDeclaration* overloadChain,
+ const ExpressionArray& arguments) {
+ if (!overloadChain->nextOverload()) {
+ return overloadChain;
+ }
+ CoercionCost bestCost = CoercionCost::Impossible();
+ const FunctionDeclaration* best = nullptr;
+ for (const FunctionDeclaration* f = overloadChain; f != nullptr; f = f->nextOverload()) {
+ CoercionCost cost = call_cost(context, *f, arguments);
+ if (cost <= bestCost) {
+ bestCost = cost;
+ best = f;
+ }
+ }
+ return bestCost.fImpossible ? nullptr : best;
+}
+
+static std::string build_argument_type_list(SkSpan<const std::unique_ptr<Expression>> arguments) {
+ std::string result = "(";
+ auto separator = SkSL::String::Separator();
+ for (const std::unique_ptr<Expression>& arg : arguments) {
+ result += separator();
+ result += arg->type().displayName();
+ }
+ return result + ")";
+}
+
+std::unique_ptr<Expression> FunctionCall::Convert(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> functionValue,
+ ExpressionArray arguments) {
+ switch (functionValue->kind()) {
+ case Expression::Kind::kTypeReference:
+ return Constructor::Convert(context,
+ pos,
+ functionValue->as<TypeReference>().value(),
+ std::move(arguments));
+ case Expression::Kind::kFunctionReference: {
+ const FunctionReference& ref = functionValue->as<FunctionReference>();
+ const FunctionDeclaration* best = FindBestFunctionForCall(context, ref.overloadChain(),
+ arguments);
+ if (best) {
+ return FunctionCall::Convert(context, pos, *best, std::move(arguments));
+ }
+ std::string msg = "no match for " + std::string(ref.overloadChain()->name()) +
+ build_argument_type_list(arguments);
+ context.fErrors->error(pos, msg);
+ return nullptr;
+ }
+ case Expression::Kind::kMethodReference: {
+ MethodReference& ref = functionValue->as<MethodReference>();
+ arguments.push_back(std::move(ref.self()));
+
+ const FunctionDeclaration* best = FindBestFunctionForCall(context, ref.overloadChain(),
+ arguments);
+ if (best) {
+ return FunctionCall::Convert(context, pos, *best, std::move(arguments));
+ }
+ std::string msg =
+ "no match for " + arguments.back()->type().displayName() +
+ "::" + std::string(ref.overloadChain()->name().substr(1)) +
+ build_argument_type_list(SkSpan(arguments).first(arguments.size() - 1));
+ context.fErrors->error(pos, msg);
+ return nullptr;
+ }
+ case Expression::Kind::kPoison:
+ functionValue->fPosition = pos;
+ return functionValue;
+ default:
+ context.fErrors->error(pos, "not a function");
+ return nullptr;
+ }
+}
+
+std::unique_ptr<Expression> FunctionCall::Convert(const Context& context,
+ Position pos,
+ const FunctionDeclaration& function,
+ ExpressionArray arguments) {
+ // Reject ES3 function calls in strict ES2 mode.
+ if (context.fConfig->strictES2Mode() && (function.modifiers().fFlags & Modifiers::kES3_Flag)) {
+ context.fErrors->error(pos, "call to '" + function.description() + "' is not supported");
+ return nullptr;
+ }
+
+ // Reject function calls with the wrong number of arguments.
+ if (function.parameters().size() != SkToSizeT(arguments.size())) {
+ std::string msg = "call to '" + std::string(function.name()) + "' expected " +
+ std::to_string(function.parameters().size()) + " argument";
+ if (function.parameters().size() != 1) {
+ msg += "s";
+ }
+ msg += ", but found " + std::to_string(arguments.size());
+ context.fErrors->error(pos, msg);
+ return nullptr;
+ }
+
+ // Resolve generic types.
+ FunctionDeclaration::ParamTypes types;
+ const Type* returnType;
+ if (!function.determineFinalTypes(arguments, &types, &returnType)) {
+ std::string msg = "no match for " + std::string(function.name()) +
+ build_argument_type_list(arguments);
+ context.fErrors->error(pos, msg);
+ return nullptr;
+ }
+
+ for (int i = 0; i < arguments.size(); i++) {
+ // Coerce each argument to the proper type.
+ arguments[i] = types[i]->coerceExpression(std::move(arguments[i]), context);
+ if (!arguments[i]) {
+ return nullptr;
+ }
+ // Update the refKind on out-parameters, and ensure that they are actually assignable.
+ const Modifiers& paramModifiers = function.parameters()[i]->modifiers();
+ if (paramModifiers.fFlags & Modifiers::kOut_Flag) {
+ const VariableRefKind refKind = paramModifiers.fFlags & Modifiers::kIn_Flag
+ ? VariableReference::RefKind::kReadWrite
+ : VariableReference::RefKind::kPointer;
+ if (!Analysis::UpdateVariableRefKind(arguments[i].get(), refKind, context.fErrors)) {
+ return nullptr;
+ }
+ }
+ // TODO(skia:13609): Make sure that we don't pass writeonly objects to readonly parameters,
+ // or vice-versa.
+ }
+
+ if (function.isMain()) {
+ context.fErrors->error(pos, "call to 'main' is not allowed");
+ return nullptr;
+ }
+
+ if (function.intrinsicKind() == k_eval_IntrinsicKind) {
+ // This is a method call on an effect child. Translate it into a ChildCall, which simplifies
+ // handling in the generators and analysis code.
+ const Variable& child = *arguments.back()->as<VariableReference>().variable();
+ arguments.pop_back();
+ return ChildCall::Make(context, pos, returnType, child, std::move(arguments));
+ }
+
+ return Make(context, pos, returnType, function, std::move(arguments));
+}
+
+std::unique_ptr<Expression> FunctionCall::Make(const Context& context,
+ Position pos,
+ const Type* returnType,
+ const FunctionDeclaration& function,
+ ExpressionArray arguments) {
+ SkASSERT(function.parameters().size() == SkToSizeT(arguments.size()));
+
+ // We might be able to optimize built-in intrinsics.
+ if (function.isIntrinsic() && has_compile_time_constant_arguments(arguments)) {
+ // The function is an intrinsic and all inputs are compile-time constants. Optimize it.
+ if (std::unique_ptr<Expression> expr = optimize_intrinsic_call(context,
+ pos,
+ function.intrinsicKind(),
+ arguments,
+ *returnType)) {
+ expr->fPosition = pos;
+ return expr;
+ }
+ }
+
+ return std::make_unique<FunctionCall>(pos, returnType, &function, std::move(arguments));
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLFunctionCall.h b/gfx/skia/skia/src/sksl/ir/SkSLFunctionCall.h
new file mode 100644
index 0000000000..9f31a52772
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLFunctionCall.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FUNCTIONCALL
+#define SKSL_FUNCTIONCALL
+
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+class FunctionDeclaration;
+class Type;
+enum class OperatorPrecedence : uint8_t;
+
+/**
+ * A function invocation.
+ */
+class FunctionCall final : public Expression {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kFunctionCall;
+
+ FunctionCall(Position pos, const Type* type, const FunctionDeclaration* function,
+ ExpressionArray arguments)
+ : INHERITED(pos, kIRNodeKind, type)
+ , fFunction(*function)
+ , fArguments(std::move(arguments)) {}
+
+ // Resolves generic types, performs type conversion on arguments, determines return type, and
+ // reports errors via the ErrorReporter.
+ static std::unique_ptr<Expression> Convert(const Context& context,
+ Position pos,
+ const FunctionDeclaration& function,
+ ExpressionArray arguments);
+
+ static std::unique_ptr<Expression> Convert(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> functionValue,
+ ExpressionArray arguments);
+
+ // Creates the function call; reports errors via ASSERT.
+ static std::unique_ptr<Expression> Make(const Context& context,
+ Position pos,
+ const Type* returnType,
+ const FunctionDeclaration& function,
+ ExpressionArray arguments);
+
+ static const FunctionDeclaration* FindBestFunctionForCall(const Context& context,
+ const FunctionDeclaration* overloads,
+ const ExpressionArray& arguments);
+
+ const FunctionDeclaration& function() const {
+ return fFunction;
+ }
+
+ ExpressionArray& arguments() {
+ return fArguments;
+ }
+
+ const ExpressionArray& arguments() const {
+ return fArguments;
+ }
+
+ std::unique_ptr<Expression> clone(Position pos) const override;
+
+ std::string description(OperatorPrecedence) const override;
+
+private:
+ const FunctionDeclaration& fFunction;
+ ExpressionArray fArguments;
+
+ using INHERITED = Expression;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLFunctionDeclaration.cpp b/gfx/skia/skia/src/sksl/ir/SkSLFunctionDeclaration.cpp
new file mode 100644
index 0000000000..036bfac02e
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLFunctionDeclaration.cpp
@@ -0,0 +1,598 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLLayout.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLProgramKind.h"
+#include "include/private/SkSLString.h"
+#include "include/private/base/SkTo.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/base/SkStringView.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLModifiersPool.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVariable.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <utility>
+
+namespace SkSL {
+
+static bool check_modifiers(const Context& context,
+ Position pos,
+ const Modifiers& modifiers) {
+ const int permitted = Modifiers::kInline_Flag |
+ Modifiers::kNoInline_Flag |
+ (context.fConfig->fIsBuiltinCode ? (Modifiers::kES3_Flag |
+ Modifiers::kPure_Flag |
+ Modifiers::kExport_Flag) : 0);
+ modifiers.checkPermitted(context, pos, permitted, /*permittedLayoutFlags=*/0);
+ if ((modifiers.fFlags & Modifiers::kInline_Flag) &&
+ (modifiers.fFlags & Modifiers::kNoInline_Flag)) {
+ context.fErrors->error(pos, "functions cannot be both 'inline' and 'noinline'");
+ return false;
+ }
+ return true;
+}
+
+static bool check_return_type(const Context& context, Position pos, const Type& returnType) {
+ ErrorReporter& errors = *context.fErrors;
+ if (returnType.isArray()) {
+ errors.error(pos, "functions may not return type '" + returnType.displayName() + "'");
+ return false;
+ }
+ if (context.fConfig->strictES2Mode() && returnType.isOrContainsArray()) {
+ errors.error(pos, "functions may not return structs containing arrays");
+ return false;
+ }
+ if (!context.fConfig->fIsBuiltinCode && returnType.componentType().isOpaque()) {
+ errors.error(pos, "functions may not return opaque type '" + returnType.displayName() +
+ "'");
+ return false;
+ }
+ return true;
+}
+
+static bool check_parameters(const Context& context,
+ std::vector<std::unique_ptr<Variable>>& parameters,
+ bool isMain) {
+ auto typeIsValidForColor = [&](const Type& type) {
+ return type.matches(*context.fTypes.fHalf4) || type.matches(*context.fTypes.fFloat4);
+ };
+
+ // The first color parameter passed to main() is the input color; the second is the dest color.
+ static constexpr int kBuiltinColorIDs[] = {SK_INPUT_COLOR_BUILTIN, SK_DEST_COLOR_BUILTIN};
+ unsigned int builtinColorIndex = 0;
+
+ // Check modifiers on each function parameter.
+ for (auto& param : parameters) {
+ const Type& type = param->type();
+ int permittedFlags = Modifiers::kConst_Flag | Modifiers::kIn_Flag;
+ if (!type.isOpaque()) {
+ permittedFlags |= Modifiers::kOut_Flag;
+ }
+ if (type.typeKind() == Type::TypeKind::kTexture) {
+ permittedFlags |= Modifiers::kReadOnly_Flag | Modifiers::kWriteOnly_Flag;
+ }
+ param->modifiers().checkPermitted(context,
+ param->modifiersPosition(),
+ permittedFlags,
+ /*permittedLayoutFlags=*/0);
+ // Only the (builtin) declarations of 'sample' are allowed to have shader/colorFilter or FP
+ // parameters. You can pass other opaque types to functions safely; this restriction is
+ // specific to "child" objects.
+ if (type.isEffectChild() && !context.fConfig->fIsBuiltinCode) {
+ context.fErrors->error(param->fPosition, "parameters of type '" + type.displayName() +
+ "' not allowed");
+ return false;
+ }
+
+ Modifiers m = param->modifiers();
+ bool modifiersChanged = false;
+
+ // The `in` modifier on function parameters is implicit, so we can replace `in float x` with
+ // `float x`. This prevents any ambiguity when matching a function by its param types.
+ if (Modifiers::kIn_Flag == (m.fFlags & (Modifiers::kOut_Flag | Modifiers::kIn_Flag))) {
+ m.fFlags &= ~(Modifiers::kOut_Flag | Modifiers::kIn_Flag);
+ modifiersChanged = true;
+ }
+
+ if (isMain) {
+ if (ProgramConfig::IsRuntimeEffect(context.fConfig->fKind) &&
+ context.fConfig->fKind != ProgramKind::kMeshFragment &&
+ context.fConfig->fKind != ProgramKind::kMeshVertex) {
+ // We verify that the signature is fully correct later. For now, if this is a
+ // runtime effect of any flavor, a float2 param is supposed to be the coords, and a
+ // half4/float parameter is supposed to be the input or destination color:
+ if (type.matches(*context.fTypes.fFloat2)) {
+ m.fLayout.fBuiltin = SK_MAIN_COORDS_BUILTIN;
+ modifiersChanged = true;
+ } else if (typeIsValidForColor(type) &&
+ builtinColorIndex < std::size(kBuiltinColorIDs)) {
+ m.fLayout.fBuiltin = kBuiltinColorIDs[builtinColorIndex++];
+ modifiersChanged = true;
+ }
+ } else if (ProgramConfig::IsFragment(context.fConfig->fKind)) {
+ // For testing purposes, we have .sksl inputs that are treated as both runtime
+ // effects and fragment shaders. To make that work, fragment shaders are allowed to
+ // have a coords parameter.
+ if (type.matches(*context.fTypes.fFloat2)) {
+ m.fLayout.fBuiltin = SK_MAIN_COORDS_BUILTIN;
+ modifiersChanged = true;
+ }
+ }
+ }
+
+ if (modifiersChanged) {
+ param->setModifiers(context.fModifiersPool->add(m));
+ }
+ }
+ return true;
+}
+
+static bool check_main_signature(const Context& context, Position pos, const Type& returnType,
+ std::vector<std::unique_ptr<Variable>>& parameters) {
+ ErrorReporter& errors = *context.fErrors;
+ ProgramKind kind = context.fConfig->fKind;
+
+ auto typeIsValidForColor = [&](const Type& type) {
+ return type.matches(*context.fTypes.fHalf4) || type.matches(*context.fTypes.fFloat4);
+ };
+
+ auto typeIsValidForAttributes = [&](const Type& type) {
+ return type.isStruct() && type.name() == "Attributes";
+ };
+
+ auto typeIsValidForVaryings = [&](const Type& type) {
+ return type.isStruct() && type.name() == "Varyings";
+ };
+
+ auto paramIsCoords = [&](int idx) {
+ const Variable& p = *parameters[idx];
+ return p.type().matches(*context.fTypes.fFloat2) &&
+ p.modifiers().fFlags == 0 &&
+ p.modifiers().fLayout.fBuiltin == SK_MAIN_COORDS_BUILTIN;
+ };
+
+ auto paramIsBuiltinColor = [&](int idx, int builtinID) {
+ const Variable& p = *parameters[idx];
+ return typeIsValidForColor(p.type()) &&
+ p.modifiers().fFlags == 0 &&
+ p.modifiers().fLayout.fBuiltin == builtinID;
+ };
+
+ auto paramIsConstInAttributes = [&](int idx) {
+ const Variable& p = *parameters[idx];
+ return typeIsValidForAttributes(p.type()) && p.modifiers().fFlags == Modifiers::kConst_Flag;
+ };
+
+ auto paramIsConstInVaryings = [&](int idx) {
+ const Variable& p = *parameters[idx];
+ return typeIsValidForVaryings(p.type()) && p.modifiers().fFlags == Modifiers::kConst_Flag;
+ };
+
+ auto paramIsOutColor = [&](int idx) {
+ const Variable& p = *parameters[idx];
+ return typeIsValidForColor(p.type()) && p.modifiers().fFlags == Modifiers::kOut_Flag;
+ };
+
+ auto paramIsInputColor = [&](int n) { return paramIsBuiltinColor(n, SK_INPUT_COLOR_BUILTIN); };
+ auto paramIsDestColor = [&](int n) { return paramIsBuiltinColor(n, SK_DEST_COLOR_BUILTIN); };
+
+ switch (kind) {
+ case ProgramKind::kRuntimeColorFilter:
+ case ProgramKind::kPrivateRuntimeColorFilter: {
+ // (half4|float4) main(half4|float4)
+ if (!typeIsValidForColor(returnType)) {
+ errors.error(pos, "'main' must return: 'vec4', 'float4', or 'half4'");
+ return false;
+ }
+ bool validParams = (parameters.size() == 1 && paramIsInputColor(0));
+ if (!validParams) {
+ errors.error(pos, "'main' parameter must be 'vec4', 'float4', or 'half4'");
+ return false;
+ }
+ break;
+ }
+ case ProgramKind::kRuntimeShader:
+ case ProgramKind::kPrivateRuntimeShader: {
+ // (half4|float4) main(float2)
+ if (!typeIsValidForColor(returnType)) {
+ errors.error(pos, "'main' must return: 'vec4', 'float4', or 'half4'");
+ return false;
+ }
+ if (!(parameters.size() == 1 && paramIsCoords(0))) {
+ errors.error(pos, "'main' parameter must be 'float2' or 'vec2'");
+ return false;
+ }
+ break;
+ }
+ case ProgramKind::kRuntimeBlender:
+ case ProgramKind::kPrivateRuntimeBlender: {
+ // (half4|float4) main(half4|float4, half4|float4)
+ if (!typeIsValidForColor(returnType)) {
+ errors.error(pos, "'main' must return: 'vec4', 'float4', or 'half4'");
+ return false;
+ }
+ if (!(parameters.size() == 2 &&
+ paramIsInputColor(0) &&
+ paramIsDestColor(1))) {
+ errors.error(pos, "'main' parameters must be (vec4|float4|half4, "
+ "vec4|float4|half4)");
+ return false;
+ }
+ break;
+ }
+ case ProgramKind::kMeshVertex: {
+ // Varyings main(const Attributes)
+ if (!typeIsValidForVaryings(returnType)) {
+ errors.error(pos, "'main' must return 'Varyings'.");
+ return false;
+ }
+ if (!(parameters.size() == 1 && paramIsConstInAttributes(0))) {
+ errors.error(pos, "'main' parameter must be 'const Attributes'.");
+ return false;
+ }
+ break;
+ }
+ case ProgramKind::kMeshFragment: {
+ // float2 main(const Varyings) -or- float2 main(const Varyings, out half4|float4)
+ if (!returnType.matches(*context.fTypes.fFloat2)) {
+ errors.error(pos, "'main' must return: 'vec2' or 'float2'");
+ return false;
+ }
+ if (!((parameters.size() == 1 && paramIsConstInVaryings(0)) ||
+ (parameters.size() == 2 && paramIsConstInVaryings(0) && paramIsOutColor(1)))) {
+ errors.error(pos,
+ "'main' parameters must be (const Varyings, (out (half4|float4))?)");
+ return false;
+ }
+ break;
+ }
+ case ProgramKind::kFragment:
+ case ProgramKind::kGraphiteFragment: {
+ bool validParams = (parameters.size() == 0) ||
+ (parameters.size() == 1 && paramIsCoords(0));
+ if (!validParams) {
+ errors.error(pos, "shader 'main' must be main() or main(float2)");
+ return false;
+ }
+ break;
+ }
+ case ProgramKind::kVertex:
+ case ProgramKind::kGraphiteVertex:
+ case ProgramKind::kCompute:
+ if (!returnType.matches(*context.fTypes.fVoid)) {
+ errors.error(pos, "'main' must return 'void'");
+ return false;
+ }
+ if (parameters.size()) {
+ errors.error(pos, "shader 'main' must have zero parameters");
+ return false;
+ }
+ break;
+ }
+ return true;
+}
+
+/**
+ * Given a concrete type (`float3`) and a generic type (`$genType`), returns the index of the
+ * concrete type within the generic type's typelist. Returns -1 if there is no match.
+ */
+static int find_generic_index(const Type& concreteType,
+ const Type& genericType,
+ bool allowNarrowing) {
+ SkSpan<const Type* const> genericTypes = genericType.coercibleTypes();
+ for (size_t index = 0; index < genericTypes.size(); ++index) {
+ if (concreteType.canCoerceTo(*genericTypes[index], allowNarrowing)) {
+ return index;
+ }
+ }
+ return -1;
+}
+
+/** Returns true if the types match, or if `concreteType` can be found in `maybeGenericType`. */
+static bool type_generically_matches(const Type& concreteType, const Type& maybeGenericType) {
+ return maybeGenericType.isGeneric()
+ ? find_generic_index(concreteType, maybeGenericType, /*allowNarrowing=*/false) != -1
+ : concreteType.matches(maybeGenericType);
+}
+
+/**
+ * Checks a parameter list (params) against the parameters of a function that was declared earlier
+ * (otherParams). Returns true if they match, even if the parameters in `otherParams` contain
+ * generic types.
+ */
+static bool parameters_match(const std::vector<std::unique_ptr<Variable>>& params,
+ const std::vector<Variable*>& otherParams) {
+ // If the param lists are different lengths, they're definitely not a match.
+ if (params.size() != otherParams.size()) {
+ return false;
+ }
+
+ // Figure out a consistent generic index (or bail if we find a contradiction).
+ int genericIndex = -1;
+ for (size_t i = 0; i < params.size(); ++i) {
+ const Type* paramType = &params[i]->type();
+ const Type* otherParamType = &otherParams[i]->type();
+
+ if (otherParamType->isGeneric()) {
+ int genericIndexForThisParam = find_generic_index(*paramType, *otherParamType,
+ /*allowNarrowing=*/false);
+ if (genericIndexForThisParam == -1) {
+ // The type wasn't a match for this generic at all; these params can't be a match.
+ return false;
+ }
+ if (genericIndex != -1 && genericIndex != genericIndexForThisParam) {
+ // The generic index mismatches from what we determined on a previous parameter.
+ return false;
+ }
+ genericIndex = genericIndexForThisParam;
+ }
+ }
+
+ // Now that we've determined a generic index (if we needed one), do a parameter check.
+ for (size_t i = 0; i < params.size(); i++) {
+ const Type* paramType = &params[i]->type();
+ const Type* otherParamType = &otherParams[i]->type();
+
+ // Make generic types concrete.
+ if (otherParamType->isGeneric()) {
+ SkASSERT(genericIndex != -1);
+ SkASSERT(genericIndex < (int)otherParamType->coercibleTypes().size());
+ otherParamType = otherParamType->coercibleTypes()[genericIndex];
+ }
+ // Detect type mismatches.
+ if (!paramType->matches(*otherParamType)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * Checks for a previously existing declaration of this function, reporting errors if there is an
+ * incompatible symbol. Returns true and sets outExistingDecl to point to the existing declaration
+ * (or null if none) on success, returns false on error.
+ */
+static bool find_existing_declaration(const Context& context,
+ SymbolTable& symbols,
+ Position pos,
+ const Modifiers* modifiers,
+ std::string_view name,
+ std::vector<std::unique_ptr<Variable>>& parameters,
+ Position returnTypePos,
+ const Type* returnType,
+ FunctionDeclaration** outExistingDecl) {
+ auto invalidDeclDescription = [&]() -> std::string {
+ std::vector<Variable*> paramPtrs;
+ paramPtrs.reserve(parameters.size());
+ for (std::unique_ptr<Variable>& param : parameters) {
+ paramPtrs.push_back(param.get());
+ }
+ return FunctionDeclaration(pos,
+ modifiers,
+ name,
+ std::move(paramPtrs),
+ returnType,
+ context.fConfig->fIsBuiltinCode)
+ .description();
+ };
+
+ ErrorReporter& errors = *context.fErrors;
+ Symbol* entry = symbols.findMutable(name);
+ *outExistingDecl = nullptr;
+ if (entry) {
+ if (!entry->is<FunctionDeclaration>()) {
+ errors.error(pos, "symbol '" + std::string(name) + "' was already defined");
+ return false;
+ }
+ for (FunctionDeclaration* other = &entry->as<FunctionDeclaration>(); other;
+ other = other->mutableNextOverload()) {
+ SkASSERT(name == other->name());
+ if (!parameters_match(parameters, other->parameters())) {
+ continue;
+ }
+ if (!type_generically_matches(*returnType, other->returnType())) {
+ errors.error(returnTypePos,
+ "functions '" + invalidDeclDescription() + "' and '" +
+ other->description() + "' differ only in return type");
+ return false;
+ }
+ for (size_t i = 0; i < parameters.size(); i++) {
+ if (parameters[i]->modifiers() != other->parameters()[i]->modifiers()) {
+ errors.error(parameters[i]->fPosition,
+ "modifiers on parameter " + std::to_string(i + 1) +
+ " differ between declaration and definition");
+ return false;
+ }
+ }
+ if (*modifiers != other->modifiers() || other->definition() || other->isIntrinsic()) {
+ errors.error(pos, "duplicate definition of '" + invalidDeclDescription() + "'");
+ return false;
+ }
+ *outExistingDecl = other;
+ break;
+ }
+ if (!*outExistingDecl && entry->as<FunctionDeclaration>().isMain()) {
+ errors.error(pos, "duplicate definition of 'main'");
+ return false;
+ }
+ }
+ return true;
+}
+
+FunctionDeclaration::FunctionDeclaration(Position pos,
+ const Modifiers* modifiers,
+ std::string_view name,
+ std::vector<Variable*> parameters,
+ const Type* returnType,
+ bool builtin)
+ : INHERITED(pos, kIRNodeKind, name, /*type=*/nullptr)
+ , fDefinition(nullptr)
+ , fModifiers(modifiers)
+ , fParameters(std::move(parameters))
+ , fReturnType(returnType)
+ , fBuiltin(builtin)
+ , fIsMain(name == "main")
+ , fIntrinsicKind(builtin ? FindIntrinsicKind(name) : kNotIntrinsic) {
+ // None of the parameters are allowed to be be null.
+ SkASSERT(std::count(fParameters.begin(), fParameters.end(), nullptr) == 0);
+}
+
+FunctionDeclaration* FunctionDeclaration::Convert(const Context& context,
+ SymbolTable& symbols,
+ Position pos,
+ Position modifiersPosition,
+ const Modifiers* modifiers,
+ std::string_view name,
+ std::vector<std::unique_ptr<Variable>> parameters,
+ Position returnTypePos,
+ const Type* returnType) {
+ bool isMain = (name == "main");
+
+ FunctionDeclaration* decl = nullptr;
+ if (!check_modifiers(context, modifiersPosition, *modifiers) ||
+ !check_return_type(context, returnTypePos, *returnType) ||
+ !check_parameters(context, parameters, isMain) ||
+ (isMain && !check_main_signature(context, pos, *returnType, parameters)) ||
+ !find_existing_declaration(context, symbols, pos, modifiers, name, parameters,
+ returnTypePos, returnType, &decl)) {
+ return nullptr;
+ }
+ std::vector<Variable*> finalParameters;
+ finalParameters.reserve(parameters.size());
+ for (std::unique_ptr<Variable>& param : parameters) {
+ finalParameters.push_back(symbols.takeOwnershipOfSymbol(std::move(param)));
+ }
+ if (decl) {
+ return decl;
+ }
+ auto result = std::make_unique<FunctionDeclaration>(pos,
+ modifiers,
+ name,
+ std::move(finalParameters),
+ returnType,
+ context.fConfig->fIsBuiltinCode);
+ return symbols.add(std::move(result));
+}
+
+std::string FunctionDeclaration::mangledName() const {
+ if ((this->isBuiltin() && !this->definition()) || this->isMain()) {
+ // Builtins without a definition (like `sin` or `sqrt`) must use their real names.
+ return std::string(this->name());
+ }
+ // Built-in functions can have a $ prefix, which will fail to compile in GLSL. Remove the
+ // $ and add a unique mangling specifier, so user code can't conflict with the name.
+ std::string_view name = this->name();
+ const char* builtinMarker = "";
+ if (skstd::starts_with(name, '$')) {
+ name.remove_prefix(1);
+ builtinMarker = "Q"; // a unique, otherwise-unused mangle character
+ }
+ // Rename function to `funcname_returntypeparamtypes`.
+ std::string result = std::string(name) + "_" + builtinMarker +
+ this->returnType().abbreviatedName();
+ for (const Variable* p : this->parameters()) {
+ result += p->type().abbreviatedName();
+ }
+ return result;
+}
+
+std::string FunctionDeclaration::description() const {
+ int modifierFlags = this->modifiers().fFlags;
+ std::string result =
+ (modifierFlags ? Modifiers::DescribeFlags(modifierFlags) + " " : std::string()) +
+ this->returnType().displayName() + " " + std::string(this->name()) + "(";
+ auto separator = SkSL::String::Separator();
+ for (const Variable* p : this->parameters()) {
+ result += separator();
+ // We can't just say `p->description()` here, because occasionally might have added layout
+ // flags onto parameters (like `layout(builtin=10009)`) and don't want to reproduce that.
+ if (p->modifiers().fFlags) {
+ result += Modifiers::DescribeFlags(p->modifiers().fFlags) + " ";
+ }
+ result += p->type().displayName();
+ result += " ";
+ result += p->name();
+ }
+ result += ")";
+ return result;
+}
+
+bool FunctionDeclaration::matches(const FunctionDeclaration& f) const {
+ if (this->name() != f.name()) {
+ return false;
+ }
+ const std::vector<Variable*>& parameters = this->parameters();
+ const std::vector<Variable*>& otherParameters = f.parameters();
+ if (parameters.size() != otherParameters.size()) {
+ return false;
+ }
+ for (size_t i = 0; i < parameters.size(); i++) {
+ if (!parameters[i]->type().matches(otherParameters[i]->type())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool FunctionDeclaration::determineFinalTypes(const ExpressionArray& arguments,
+ ParamTypes* outParameterTypes,
+ const Type** outReturnType) const {
+ const std::vector<Variable*>& parameters = this->parameters();
+ SkASSERT(SkToSizeT(arguments.size()) == parameters.size());
+
+ outParameterTypes->reserve_back(arguments.size());
+ int genericIndex = -1;
+ for (int i = 0; i < arguments.size(); i++) {
+ // Non-generic parameters are final as-is.
+ const Type& parameterType = parameters[i]->type();
+ if (!parameterType.isGeneric()) {
+ outParameterTypes->push_back(&parameterType);
+ continue;
+ }
+ // We use the first generic parameter we find to lock in the generic index;
+ // e.g. if we find `float3` here, all `$genType`s will be assumed to be `float3`.
+ if (genericIndex == -1) {
+ genericIndex = find_generic_index(arguments[i]->type(), parameterType,
+ /*allowNarrowing=*/true);
+ if (genericIndex == -1) {
+ // The passed-in type wasn't a match for ANY of the generic possibilities.
+ // This function isn't a match at all.
+ return false;
+ }
+ }
+ outParameterTypes->push_back(parameterType.coercibleTypes()[genericIndex]);
+ }
+ // Apply the generic index to our return type.
+ const Type& returnType = this->returnType();
+ if (returnType.isGeneric()) {
+ if (genericIndex == -1) {
+ // We don't support functions with a generic return type and no other generics.
+ return false;
+ }
+ *outReturnType = returnType.coercibleTypes()[genericIndex];
+ } else {
+ *outReturnType = &returnType;
+ }
+ return true;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLFunctionDeclaration.h b/gfx/skia/skia/src/sksl/ir/SkSLFunctionDeclaration.h
new file mode 100644
index 0000000000..462456c1ea
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLFunctionDeclaration.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FUNCTIONDECLARATION
+#define SKSL_FUNCTIONDECLARATION
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLSymbol.h"
+#include "include/private/base/SkTArray.h"
+#include "src/sksl/SkSLIntrinsicList.h"
+
+#include <memory>
+#include <string>
+#include <string_view>
+#include <vector>
+
+namespace SkSL {
+
+class Context;
+class ExpressionArray;
+class FunctionDefinition;
+class Position;
+class SymbolTable;
+class Type;
+class Variable;
+
+struct Modifiers;
+
+/**
+ * A function declaration (not a definition -- does not contain a body).
+ */
+class FunctionDeclaration final : public Symbol {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kFunctionDeclaration;
+
+ FunctionDeclaration(Position pos,
+ const Modifiers* modifiers,
+ std::string_view name,
+ std::vector<Variable*> parameters,
+ const Type* returnType,
+ bool builtin);
+
+ static FunctionDeclaration* Convert(const Context& context,
+ SymbolTable& symbols,
+ Position pos,
+ Position modifiersPos,
+ const Modifiers* modifiers,
+ std::string_view name,
+ std::vector<std::unique_ptr<Variable>> parameters,
+ Position returnTypePos,
+ const Type* returnType);
+
+ const Modifiers& modifiers() const {
+ return *fModifiers;
+ }
+
+ void setModifiers(const Modifiers* m) {
+ fModifiers = m;
+ }
+
+ const FunctionDefinition* definition() const {
+ return fDefinition;
+ }
+
+ void setDefinition(const FunctionDefinition* definition) {
+ fDefinition = definition;
+ fIntrinsicKind = kNotIntrinsic;
+ }
+
+ void setNextOverload(FunctionDeclaration* overload) {
+ SkASSERT(!overload || overload->name() == this->name());
+ fNextOverload = overload;
+ }
+
+ const std::vector<Variable*>& parameters() const {
+ return fParameters;
+ }
+
+ const Type& returnType() const {
+ return *fReturnType;
+ }
+
+ bool isBuiltin() const {
+ return fBuiltin;
+ }
+
+ bool isMain() const {
+ return fIsMain;
+ }
+
+ IntrinsicKind intrinsicKind() const {
+ return fIntrinsicKind;
+ }
+
+ bool isIntrinsic() const {
+ return this->intrinsicKind() != kNotIntrinsic;
+ }
+
+ const FunctionDeclaration* nextOverload() const {
+ return fNextOverload;
+ }
+
+ FunctionDeclaration* mutableNextOverload() const {
+ return fNextOverload;
+ }
+
+ std::string mangledName() const;
+
+ std::string description() const override;
+
+ bool matches(const FunctionDeclaration& f) const;
+
+ /**
+ * Determine the effective types of this function's parameters and return value when called with
+ * the given arguments. This is relevant for functions with generic parameter types, where this
+ * will collapse the generic types down into specific concrete types.
+ *
+ * Returns true if it was able to select a concrete set of types for the generic function, false
+ * if there is no possible way this can match the argument types. Note that even a true return
+ * does not guarantee that the function can be successfully called with those arguments, merely
+ * indicates that an attempt should be made. If false is returned, the state of
+ * outParameterTypes and outReturnType are undefined.
+ *
+ * This always assumes narrowing conversions are *allowed*. The calling code needs to verify
+ * that each argument can actually be coerced to the final parameter type, respecting the
+ * narrowing-conversions flag. This is handled in callCost(), or in convertCall() (via coerce).
+ */
+ using ParamTypes = SkSTArray<8, const Type*>;
+ bool determineFinalTypes(const ExpressionArray& arguments,
+ ParamTypes* outParameterTypes,
+ const Type** outReturnType) const;
+
+private:
+ const FunctionDefinition* fDefinition;
+ FunctionDeclaration* fNextOverload = nullptr;
+ const Modifiers* fModifiers;
+ std::vector<Variable*> fParameters;
+ const Type* fReturnType;
+ bool fBuiltin;
+ bool fIsMain;
+ mutable IntrinsicKind fIntrinsicKind = kNotIntrinsic;
+
+ using INHERITED = Symbol;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLFunctionDefinition.cpp b/gfx/skia/skia/src/sksl/ir/SkSLFunctionDefinition.cpp
new file mode 100644
index 0000000000..b33a4352a6
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLFunctionDefinition.cpp
@@ -0,0 +1,246 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLSymbol.h"
+#include "include/sksl/DSLCore.h"
+#include "include/sksl/DSLExpression.h"
+#include "include/sksl/DSLStatement.h"
+#include "include/sksl/DSLType.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "src/base/SkSafeMath.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/SkSLThreadContext.h"
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLField.h"
+#include "src/sksl/ir/SkSLFieldAccess.h"
+#include "src/sksl/ir/SkSLReturnStatement.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+#include "src/sksl/transform/SkSLProgramWriter.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <forward_list>
+#include <string_view>
+#include <vector>
+
+namespace SkSL {
+
+static void append_rtadjust_fixup_to_vertex_main(const Context& context,
+ const FunctionDeclaration& decl,
+ Block& body) {
+ using namespace SkSL::dsl;
+ using SkSL::dsl::Swizzle; // disambiguate from SkSL::Swizzle
+ using OwnerKind = SkSL::FieldAccess::OwnerKind;
+
+ // If this program uses RTAdjust...
+ ThreadContext::RTAdjustData& rtAdjust = ThreadContext::RTAdjustState();
+ if (rtAdjust.fVar || rtAdjust.fInterfaceBlock) {
+ // ...append a line to the end of the function body which fixes up sk_Position.
+ const SymbolTable* symbolTable = ThreadContext::SymbolTable().get();
+ const Field& skPositionField = symbolTable->find(Compiler::POSITION_NAME)->as<Field>();
+
+ auto Ref = [](const Variable* var) -> std::unique_ptr<Expression> {
+ return VariableReference::Make(Position(), var);
+ };
+ auto Field = [&](const Variable* var, int idx) -> std::unique_ptr<Expression> {
+ return FieldAccess::Make(context, Position(), Ref(var), idx,
+ OwnerKind::kAnonymousInterfaceBlock);
+ };
+ auto Pos = [&]() -> DSLExpression {
+ return DSLExpression(Field(&skPositionField.owner(), skPositionField.fieldIndex()));
+ };
+ auto Adjust = [&]() -> DSLExpression {
+ return DSLExpression(rtAdjust.fInterfaceBlock
+ ? Field(rtAdjust.fInterfaceBlock, rtAdjust.fFieldIndex)
+ : Ref(rtAdjust.fVar));
+ };
+
+ auto fixupStmt = DSLStatement(
+ Pos().assign(Float4(Swizzle(Pos(), X, Y) * Swizzle(Adjust(), X, Z) +
+ Swizzle(Pos(), W, W) * Swizzle(Adjust(), Y, W),
+ 0,
+ Pos().w()))
+ );
+
+ body.children().push_back(fixupStmt.release());
+ }
+}
+
+std::unique_ptr<FunctionDefinition> FunctionDefinition::Convert(const Context& context,
+ Position pos,
+ const FunctionDeclaration& function,
+ std::unique_ptr<Statement> body,
+ bool builtin) {
+ class Finalizer : public ProgramWriter {
+ public:
+ Finalizer(const Context& context, const FunctionDeclaration& function, Position pos)
+ : fContext(context)
+ , fFunction(function) {
+ // Function parameters count as local variables.
+ for (const Variable* var : function.parameters()) {
+ this->addLocalVariable(var, pos);
+ }
+ }
+
+ void addLocalVariable(const Variable* var, Position pos) {
+ // We count the number of slots used, but don't consider the precision of the base type.
+ // In practice, this reflects what GPUs actually do pretty well. (i.e., RelaxedPrecision
+ // math doesn't mean your variable takes less space.) We also don't attempt to reclaim
+ // slots at the end of a Block.
+ size_t prevSlotsUsed = fSlotsUsed;
+ fSlotsUsed = SkSafeMath::Add(fSlotsUsed, var->type().slotCount());
+ // To avoid overzealous error reporting, only trigger the error at the first
+ // place where the stack limit is exceeded.
+ if (prevSlotsUsed < kVariableSlotLimit && fSlotsUsed >= kVariableSlotLimit) {
+ fContext.fErrors->error(pos, "variable '" + std::string(var->name()) +
+ "' exceeds the stack size limit");
+ }
+ }
+
+ ~Finalizer() override {
+ SkASSERT(fBreakableLevel == 0);
+ SkASSERT(fContinuableLevel == std::forward_list<int>{0});
+ }
+
+ bool functionReturnsValue() const {
+ return !fFunction.returnType().isVoid();
+ }
+
+ bool visitExpression(Expression& expr) override {
+ // We don't need to scan expressions.
+ return false;
+ }
+
+ bool visitStatement(Statement& stmt) override {
+ switch (stmt.kind()) {
+ case Statement::Kind::kVarDeclaration: {
+ const Variable* var = stmt.as<VarDeclaration>().var();
+ if (var->type().isOrContainsUnsizedArray()) {
+ fContext.fErrors->error(stmt.fPosition,
+ "unsized arrays are not permitted here");
+ } else {
+ this->addLocalVariable(var, stmt.fPosition);
+ }
+ break;
+ }
+ case Statement::Kind::kReturn: {
+ // Early returns from a vertex main() function will bypass sk_Position
+ // normalization, so SkASSERT that we aren't doing that. If this becomes an
+ // issue, we can add normalization before each return statement.
+ if (ProgramConfig::IsVertex(fContext.fConfig->fKind) && fFunction.isMain()) {
+ fContext.fErrors->error(
+ stmt.fPosition,
+ "early returns from vertex programs are not supported");
+ }
+
+ // Verify that the return statement matches the function's return type.
+ ReturnStatement& returnStmt = stmt.as<ReturnStatement>();
+ if (returnStmt.expression()) {
+ if (this->functionReturnsValue()) {
+ // Coerce return expression to the function's return type.
+ returnStmt.setExpression(fFunction.returnType().coerceExpression(
+ std::move(returnStmt.expression()), fContext));
+ } else {
+ // Returning something from a function with a void return type.
+ fContext.fErrors->error(returnStmt.expression()->fPosition,
+ "may not return a value from a void function");
+ returnStmt.setExpression(nullptr);
+ }
+ } else {
+ if (this->functionReturnsValue()) {
+ // Returning nothing from a function with a non-void return type.
+ fContext.fErrors->error(returnStmt.fPosition,
+ "expected function to return '" +
+ fFunction.returnType().displayName() + "'");
+ }
+ }
+ break;
+ }
+ case Statement::Kind::kDo:
+ case Statement::Kind::kFor: {
+ ++fBreakableLevel;
+ ++fContinuableLevel.front();
+ bool result = INHERITED::visitStatement(stmt);
+ --fContinuableLevel.front();
+ --fBreakableLevel;
+ return result;
+ }
+ case Statement::Kind::kSwitch: {
+ ++fBreakableLevel;
+ fContinuableLevel.push_front(0);
+ bool result = INHERITED::visitStatement(stmt);
+ fContinuableLevel.pop_front();
+ --fBreakableLevel;
+ return result;
+ }
+ case Statement::Kind::kBreak:
+ if (fBreakableLevel == 0) {
+ fContext.fErrors->error(stmt.fPosition,
+ "break statement must be inside a loop or switch");
+ }
+ break;
+ case Statement::Kind::kContinue:
+ if (fContinuableLevel.front() == 0) {
+ if (std::any_of(fContinuableLevel.begin(),
+ fContinuableLevel.end(),
+ [](int level) { return level > 0; })) {
+ fContext.fErrors->error(stmt.fPosition,
+ "continue statement cannot be used in a switch");
+ } else {
+ fContext.fErrors->error(stmt.fPosition,
+ "continue statement must be inside a loop");
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ return INHERITED::visitStatement(stmt);
+ }
+
+ private:
+ const Context& fContext;
+ const FunctionDeclaration& fFunction;
+ // how deeply nested we are in breakable constructs (for, do, switch).
+ int fBreakableLevel = 0;
+ // number of slots consumed by all variables declared in the function
+ size_t fSlotsUsed = 0;
+ // how deeply nested we are in continuable constructs (for, do).
+ // We keep a stack (via a forward_list) in order to disallow continue inside of switch.
+ std::forward_list<int> fContinuableLevel{0};
+
+ using INHERITED = ProgramWriter;
+ };
+
+ Finalizer(context, function, pos).visitStatement(*body);
+ if (function.isMain() && ProgramConfig::IsVertex(context.fConfig->fKind)) {
+ append_rtadjust_fixup_to_vertex_main(context, function, body->as<Block>());
+ }
+
+ if (Analysis::CanExitWithoutReturningValue(function, *body)) {
+ context.fErrors->error(body->fPosition, "function '" + std::string(function.name()) +
+ "' can exit without returning a value");
+ }
+
+ SkASSERTF(!function.isIntrinsic(), "Intrinsic function '%.*s' should not have a definition",
+ (int)function.name().size(), function.name().data());
+ return std::make_unique<FunctionDefinition>(pos, &function, builtin, std::move(body));
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLFunctionDefinition.h b/gfx/skia/skia/src/sksl/ir/SkSLFunctionDefinition.h
new file mode 100644
index 0000000000..7b77b68a2b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLFunctionDefinition.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FUNCTIONDEFINITION
+#define SKSL_FUNCTIONDEFINITION
+
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLStatement.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+
+#include <memory>
+#include <string>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+
+/**
+ * A function definition (a declaration plus an associated block of code).
+ */
+class FunctionDefinition final : public ProgramElement {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kFunction;
+
+ FunctionDefinition(Position pos, const FunctionDeclaration* declaration, bool builtin,
+ std::unique_ptr<Statement> body)
+ : INHERITED(pos, kIRNodeKind)
+ , fDeclaration(declaration)
+ , fBuiltin(builtin)
+ , fBody(std::move(body)) {}
+
+ /**
+ * Coerces `return` statements to the return type of the function, and reports errors in the
+ * function that can't be detected at the individual statement level:
+ * - `break` and `continue` statements must be in reasonable places.
+ * - non-void functions are required to return a value on all paths.
+ * - vertex main() functions don't allow early returns.
+ *
+ * This will return a FunctionDefinition even if an error is detected; this leads to better
+ * diagnostics overall. (Returning null here leads to spurious "function 'f()' was not defined"
+ * errors when trying to call a function with an error in it.)
+ */
+ static std::unique_ptr<FunctionDefinition> Convert(const Context& context,
+ Position pos,
+ const FunctionDeclaration& function,
+ std::unique_ptr<Statement> body,
+ bool builtin);
+
+ const FunctionDeclaration& declaration() const {
+ return *fDeclaration;
+ }
+
+ bool isBuiltin() const {
+ return fBuiltin;
+ }
+
+ std::unique_ptr<Statement>& body() {
+ return fBody;
+ }
+
+ const std::unique_ptr<Statement>& body() const {
+ return fBody;
+ }
+
+ std::unique_ptr<ProgramElement> clone() const override {
+ return std::make_unique<FunctionDefinition>(fPosition, &this->declaration(),
+ /*builtin=*/false, this->body()->clone());
+ }
+
+ std::string description() const override {
+ return this->declaration().description() + " " + this->body()->description();
+ }
+
+private:
+ const FunctionDeclaration* fDeclaration;
+ bool fBuiltin;
+ std::unique_ptr<Statement> fBody;
+
+ using INHERITED = ProgramElement;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLFunctionPrototype.h b/gfx/skia/skia/src/sksl/ir/SkSLFunctionPrototype.h
new file mode 100644
index 0000000000..934cf85c27
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLFunctionPrototype.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FUNCTIONPROTOTYPE
+#define SKSL_FUNCTIONPROTOTYPE
+
+#include "include/private/SkSLProgramElement.h"
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+
+namespace SkSL {
+
+/**
+ * A function prototype (a function declaration as a top-level program element)
+ */
+class FunctionPrototype final : public ProgramElement {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kFunctionPrototype;
+
+ FunctionPrototype(Position pos, const FunctionDeclaration* declaration, bool builtin)
+ : INHERITED(pos, kIRNodeKind)
+ , fDeclaration(declaration)
+ , fBuiltin(builtin) {}
+
+ const FunctionDeclaration& declaration() const {
+ return *fDeclaration;
+ }
+
+ bool isBuiltin() const {
+ return fBuiltin;
+ }
+
+ std::unique_ptr<ProgramElement> clone() const override {
+ return std::make_unique<FunctionPrototype>(fPosition, &this->declaration(),
+ /*builtin=*/false);
+ }
+
+ std::string description() const override {
+ return this->declaration().description() + ";";
+ }
+
+private:
+ const FunctionDeclaration* fDeclaration;
+ bool fBuiltin;
+
+ using INHERITED = ProgramElement;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLFunctionReference.h b/gfx/skia/skia/src/sksl/ir/SkSLFunctionReference.h
new file mode 100644
index 0000000000..4788dbb418
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLFunctionReference.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FUNCTIONREFERENCE
+#define SKSL_FUNCTIONREFERENCE
+
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+
+namespace SkSL {
+
+/**
+ * An identifier referring to a function name. This is an intermediate value: FunctionReferences are
+ * always eventually replaced by FunctionCalls in valid programs.
+ */
+class FunctionReference final : public Expression {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kFunctionReference;
+
+ FunctionReference(const Context& context, Position pos,
+ const FunctionDeclaration* overloadChain)
+ : INHERITED(pos, kIRNodeKind, context.fTypes.fInvalid.get())
+ , fOverloadChain(overloadChain) {}
+
+ const FunctionDeclaration* overloadChain() const {
+ return fOverloadChain;
+ }
+
+ std::unique_ptr<Expression> clone(Position pos) const override {
+ return std::unique_ptr<Expression>(new FunctionReference(pos, this->overloadChain(),
+ &this->type()));
+ }
+
+ std::string description(OperatorPrecedence) const override {
+ return "<function>";
+ }
+
+private:
+ FunctionReference(Position pos, const FunctionDeclaration* overloadChain, const Type* type)
+ : INHERITED(pos, kIRNodeKind, type)
+ , fOverloadChain(overloadChain) {}
+
+ const FunctionDeclaration* fOverloadChain;
+
+ using INHERITED = Expression;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLIfStatement.cpp b/gfx/skia/skia/src/sksl/ir/SkSLIfStatement.cpp
new file mode 100644
index 0000000000..7d6918629c
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLIfStatement.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLIfStatement.h"
+
+#include "include/core/SkTypes.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLConstantFolder.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/ir/SkSLExpressionStatement.h"
+#include "src/sksl/ir/SkSLLiteral.h"
+#include "src/sksl/ir/SkSLNop.h"
+#include "src/sksl/ir/SkSLType.h"
+
+namespace SkSL {
+
+std::unique_ptr<Statement> IfStatement::clone() const {
+ return std::make_unique<IfStatement>(fPosition, this->test()->clone(), this->ifTrue()->clone(),
+ this->ifFalse() ? this->ifFalse()->clone() : nullptr);
+}
+
+std::string IfStatement::description() const {
+ std::string result;
+ result += "if (" + this->test()->description() + ") " + this->ifTrue()->description();
+ if (this->ifFalse()) {
+ result += " else " + this->ifFalse()->description();
+ }
+ return result;
+}
+
+std::unique_ptr<Statement> IfStatement::Convert(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> test,
+ std::unique_ptr<Statement> ifTrue,
+ std::unique_ptr<Statement> ifFalse) {
+ test = context.fTypes.fBool->coerceExpression(std::move(test), context);
+ if (!test) {
+ return nullptr;
+ }
+ SkASSERT(ifTrue);
+ if (Analysis::DetectVarDeclarationWithoutScope(*ifTrue, context.fErrors)) {
+ return nullptr;
+ }
+ if (ifFalse && Analysis::DetectVarDeclarationWithoutScope(*ifFalse, context.fErrors)) {
+ return nullptr;
+ }
+ return IfStatement::Make(context, pos, std::move(test), std::move(ifTrue), std::move(ifFalse));
+}
+
+static std::unique_ptr<Statement> replace_empty_with_nop(std::unique_ptr<Statement> stmt,
+ bool isEmpty) {
+ return (stmt && (!isEmpty || stmt->is<Nop>())) ? std::move(stmt)
+ : Nop::Make();
+}
+
+std::unique_ptr<Statement> IfStatement::Make(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> test,
+ std::unique_ptr<Statement> ifTrue,
+ std::unique_ptr<Statement> ifFalse) {
+ SkASSERT(test->type().matches(*context.fTypes.fBool));
+ SkASSERT(!Analysis::DetectVarDeclarationWithoutScope(*ifTrue));
+ SkASSERT(!ifFalse || !Analysis::DetectVarDeclarationWithoutScope(*ifFalse));
+
+ const bool optimize = context.fConfig->fSettings.fOptimize;
+ bool trueIsEmpty = false;
+ bool falseIsEmpty = false;
+
+ if (optimize) {
+ // If both sides are empty, the if statement can be reduced to its test expression.
+ trueIsEmpty = ifTrue->isEmpty();
+ falseIsEmpty = !ifFalse || ifFalse->isEmpty();
+ if (trueIsEmpty && falseIsEmpty) {
+ return ExpressionStatement::Make(context, std::move(test));
+ }
+ }
+
+ if (optimize) {
+ // Static Boolean values can fold down to a single branch.
+ const Expression* testValue = ConstantFolder::GetConstantValueForVariable(*test);
+ if (testValue->isBoolLiteral()) {
+ if (testValue->as<Literal>().boolValue()) {
+ return replace_empty_with_nop(std::move(ifTrue), trueIsEmpty);
+ } else {
+ return replace_empty_with_nop(std::move(ifFalse), falseIsEmpty);
+ }
+ }
+ }
+
+ if (optimize) {
+ // Replace an empty if-true branches with Nop; eliminate empty if-false branches entirely.
+ ifTrue = replace_empty_with_nop(std::move(ifTrue), trueIsEmpty);
+ if (falseIsEmpty) {
+ ifFalse = nullptr;
+ }
+ }
+
+ return std::make_unique<IfStatement>(
+ pos, std::move(test), std::move(ifTrue), std::move(ifFalse));
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLIfStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLIfStatement.h
new file mode 100644
index 0000000000..379b8ad50d
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLIfStatement.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_IFSTATEMENT
+#define SKSL_IFSTATEMENT
+
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLStatement.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <memory>
+#include <string>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+
+/**
+ * An 'if' statement.
+ */
+class IfStatement final : public Statement {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kIf;
+
+ IfStatement(Position pos, std::unique_ptr<Expression> test,
+ std::unique_ptr<Statement> ifTrue, std::unique_ptr<Statement> ifFalse)
+ : INHERITED(pos, kIRNodeKind)
+ , fTest(std::move(test))
+ , fIfTrue(std::move(ifTrue))
+ , fIfFalse(std::move(ifFalse)) {}
+
+ // Creates a potentially-simplified form of the if-statement. Typechecks and coerces the test
+ // expression; reports errors via ErrorReporter.
+ static std::unique_ptr<Statement> Convert(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> test,
+ std::unique_ptr<Statement> ifTrue,
+ std::unique_ptr<Statement> ifFalse);
+
+ // Creates a potentially-simplified form of the if-statement; reports errors via ASSERT.
+ static std::unique_ptr<Statement> Make(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> test,
+ std::unique_ptr<Statement> ifTrue,
+ std::unique_ptr<Statement> ifFalse);
+
+ std::unique_ptr<Expression>& test() {
+ return fTest;
+ }
+
+ const std::unique_ptr<Expression>& test() const {
+ return fTest;
+ }
+
+ std::unique_ptr<Statement>& ifTrue() {
+ return fIfTrue;
+ }
+
+ const std::unique_ptr<Statement>& ifTrue() const {
+ return fIfTrue;
+ }
+
+ std::unique_ptr<Statement>& ifFalse() {
+ return fIfFalse;
+ }
+
+ const std::unique_ptr<Statement>& ifFalse() const {
+ return fIfFalse;
+ }
+
+ std::unique_ptr<Statement> clone() const override;
+
+ std::string description() const override;
+
+private:
+ std::unique_ptr<Expression> fTest;
+ std::unique_ptr<Statement> fIfTrue;
+ std::unique_ptr<Statement> fIfFalse;
+
+ using INHERITED = Statement;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLIndexExpression.cpp b/gfx/skia/skia/src/sksl/ir/SkSLIndexExpression.cpp
new file mode 100644
index 0000000000..b12f1b3726
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLIndexExpression.cpp
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLIndexExpression.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/base/SkTArray.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLOperator.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLConstantFolder.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/ir/SkSLConstructorArray.h"
+#include "src/sksl/ir/SkSLConstructorCompound.h"
+#include "src/sksl/ir/SkSLLiteral.h"
+#include "src/sksl/ir/SkSLSwizzle.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLTypeReference.h"
+
+#include <cstdint>
+#include <optional>
+
+namespace SkSL {
+
+static bool index_out_of_range(const Context& context, Position pos, SKSL_INT index,
+ const Expression& base) {
+ if (index >= 0) {
+ if (base.type().columns() == Type::kUnsizedArray) {
+ return false;
+ } else if (index < base.type().columns()) {
+ return false;
+ }
+ }
+ context.fErrors->error(pos, "index " + std::to_string(index) + " out of range for '" +
+ base.type().displayName() + "'");
+ return true;
+}
+
+const Type& IndexExpression::IndexType(const Context& context, const Type& type) {
+ if (type.isMatrix()) {
+ if (type.componentType().matches(*context.fTypes.fFloat)) {
+ switch (type.rows()) {
+ case 2: return *context.fTypes.fFloat2;
+ case 3: return *context.fTypes.fFloat3;
+ case 4: return *context.fTypes.fFloat4;
+ default: SkASSERT(false);
+ }
+ } else if (type.componentType().matches(*context.fTypes.fHalf)) {
+ switch (type.rows()) {
+ case 2: return *context.fTypes.fHalf2;
+ case 3: return *context.fTypes.fHalf3;
+ case 4: return *context.fTypes.fHalf4;
+ default: SkASSERT(false);
+ }
+ }
+ }
+ return type.componentType();
+}
+
+std::unique_ptr<Expression> IndexExpression::Convert(const Context& context,
+ SymbolTable& symbolTable,
+ Position pos,
+ std::unique_ptr<Expression> base,
+ std::unique_ptr<Expression> index) {
+ // Convert an array type reference: `int[10]`.
+ if (base->is<TypeReference>()) {
+ const Type& baseType = base->as<TypeReference>().value();
+ SKSL_INT arraySize = baseType.convertArraySize(context, pos, std::move(index));
+ if (!arraySize) {
+ return nullptr;
+ }
+ return TypeReference::Convert(context, pos,
+ symbolTable.addArrayDimension(&baseType, arraySize));
+ }
+ // Convert an index expression with an expression inside of it: `arr[a * 3]`.
+ const Type& baseType = base->type();
+ if (!baseType.isArray() && !baseType.isMatrix() && !baseType.isVector()) {
+ context.fErrors->error(base->fPosition,
+ "expected array, but found '" + baseType.displayName() + "'");
+ return nullptr;
+ }
+ if (!index->type().isInteger()) {
+ index = context.fTypes.fInt->coerceExpression(std::move(index), context);
+ if (!index) {
+ return nullptr;
+ }
+ }
+ // Perform compile-time bounds checking on constant-expression indices.
+ const Expression* indexExpr = ConstantFolder::GetConstantValueForVariable(*index);
+ if (indexExpr->isIntLiteral()) {
+ SKSL_INT indexValue = indexExpr->as<Literal>().intValue();
+ if (index_out_of_range(context, index->fPosition, indexValue, *base)) {
+ return nullptr;
+ }
+ }
+ return IndexExpression::Make(context, pos, std::move(base), std::move(index));
+}
+
+std::unique_ptr<Expression> IndexExpression::Make(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> base,
+ std::unique_ptr<Expression> index) {
+ const Type& baseType = base->type();
+ SkASSERT(baseType.isArray() || baseType.isMatrix() || baseType.isVector());
+ SkASSERT(index->type().isInteger());
+
+ const Expression* indexExpr = ConstantFolder::GetConstantValueForVariable(*index);
+ if (indexExpr->isIntLiteral()) {
+ SKSL_INT indexValue = indexExpr->as<Literal>().intValue();
+ if (!index_out_of_range(context, index->fPosition, indexValue, *base)) {
+ if (baseType.isVector()) {
+ // Constant array indexes on vectors can be converted to swizzles: `v[2]` --> `v.z`.
+ // Swizzling is harmless and can unlock further simplifications for some base types.
+ return Swizzle::Make(context, pos, std::move(base),
+ ComponentArray{(int8_t)indexValue});
+ }
+
+ if (baseType.isArray() && !Analysis::HasSideEffects(*base)) {
+ // Indexing an constant array constructor with a constant index can just pluck out
+ // the requested value from the array.
+ const Expression* baseExpr = ConstantFolder::GetConstantValueForVariable(*base);
+ if (baseExpr->is<ConstructorArray>()) {
+ const ConstructorArray& arrayCtor = baseExpr->as<ConstructorArray>();
+ const ExpressionArray& arguments = arrayCtor.arguments();
+ SkASSERT(arguments.size() == baseType.columns());
+
+ return arguments[indexValue]->clone(pos);
+ }
+ }
+
+ if (baseType.isMatrix() && !Analysis::HasSideEffects(*base)) {
+ // Matrices can be constructed with vectors that don't line up on column boundaries,
+ // so extracting out the values from the constructor can be tricky. Fortunately, we
+ // can reconstruct an equivalent vector using `getConstantValue`. If we
+ // can't extract the data using `getConstantValue`, it wasn't constant and
+ // we're not obligated to simplify anything.
+ const Expression* baseExpr = ConstantFolder::GetConstantValueForVariable(*base);
+ int vecWidth = baseType.rows();
+ const Type& scalarType = baseType.componentType();
+ const Type& vecType = scalarType.toCompound(context, vecWidth, /*rows=*/1);
+ indexValue *= vecWidth;
+
+ ExpressionArray ctorArgs;
+ ctorArgs.reserve_back(vecWidth);
+ for (int slot = 0; slot < vecWidth; ++slot) {
+ std::optional<double> slotVal = baseExpr->getConstantValue(indexValue + slot);
+ if (slotVal.has_value()) {
+ ctorArgs.push_back(Literal::Make(baseExpr->fPosition, *slotVal,
+ &scalarType));
+ } else {
+ ctorArgs.clear();
+ break;
+ }
+ }
+
+ if (!ctorArgs.empty()) {
+ return ConstructorCompound::Make(context, pos, vecType, std::move(ctorArgs));
+ }
+ }
+ }
+ }
+
+ return std::make_unique<IndexExpression>(context, pos, std::move(base), std::move(index));
+}
+
+std::string IndexExpression::description(OperatorPrecedence) const {
+ return this->base()->description(OperatorPrecedence::kPostfix) + "[" +
+ this->index()->description(OperatorPrecedence::kTopLevel) + "]";
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLIndexExpression.h b/gfx/skia/skia/src/sksl/ir/SkSLIndexExpression.h
new file mode 100644
index 0000000000..222728e9eb
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLIndexExpression.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_INDEX
+#define SKSL_INDEX
+
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+class SymbolTable;
+class Type;
+enum class OperatorPrecedence : uint8_t;
+
+/**
+ * An expression which extracts a value from an array, vector or matrix, as in 'm[2]'.
+ */
+class IndexExpression final : public Expression {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kIndex;
+
+ IndexExpression(const Context& context, Position pos, std::unique_ptr<Expression> base,
+ std::unique_ptr<Expression> index)
+ : INHERITED(pos, kIRNodeKind, &IndexType(context, base->type()))
+ , fBase(std::move(base))
+ , fIndex(std::move(index)) {}
+
+ // Returns a simplified index-expression; reports errors via the ErrorReporter.
+ static std::unique_ptr<Expression> Convert(const Context& context,
+ SymbolTable& symbolTable,
+ Position pos,
+ std::unique_ptr<Expression> base,
+ std::unique_ptr<Expression> index);
+
+ // Returns a simplified index-expression; reports errors via ASSERT.
+ static std::unique_ptr<Expression> Make(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> base,
+ std::unique_ptr<Expression> index);
+
+ /**
+ * Given a type, returns the type that will result from extracting an array value from it.
+ */
+ static const Type& IndexType(const Context& context, const Type& type);
+
+ std::unique_ptr<Expression>& base() {
+ return fBase;
+ }
+
+ const std::unique_ptr<Expression>& base() const {
+ return fBase;
+ }
+
+ std::unique_ptr<Expression>& index() {
+ return fIndex;
+ }
+
+ const std::unique_ptr<Expression>& index() const {
+ return fIndex;
+ }
+
+ std::unique_ptr<Expression> clone(Position pos) const override {
+ return std::unique_ptr<Expression>(new IndexExpression(pos, this->base()->clone(),
+ this->index()->clone(),
+ &this->type()));
+ }
+
+ std::string description(OperatorPrecedence) const override;
+
+ using INHERITED = Expression;
+
+private:
+ IndexExpression(Position pos, std::unique_ptr<Expression> base,
+ std::unique_ptr<Expression> index, const Type* type)
+ : INHERITED(pos, Kind::kIndex, type)
+ , fBase(std::move(base))
+ , fIndex(std::move(index)) {}
+
+ std::unique_ptr<Expression> fBase;
+ std::unique_ptr<Expression> fIndex;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLInterfaceBlock.cpp b/gfx/skia/skia/src/sksl/ir/SkSLInterfaceBlock.cpp
new file mode 100644
index 0000000000..f093624eff
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLInterfaceBlock.cpp
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2022 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkSpan.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLString.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/SkSLThreadContext.h"
+#include "src/sksl/ir/SkSLField.h"
+#include "src/sksl/ir/SkSLInterfaceBlock.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <vector>
+
+namespace SkSL {
+
+enum class ProgramKind : int8_t;
+
+InterfaceBlock::~InterfaceBlock() {
+ // Unhook this InterfaceBlock from its associated Variable, since we're being deleted.
+ if (fVariable) {
+ fVariable->detachDeadInterfaceBlock();
+ }
+}
+
+static std::optional<int> find_rt_adjust_index(SkSpan<const Type::Field> fields) {
+ for (size_t index = 0; index < fields.size(); ++index) {
+ const SkSL::Type::Field& f = fields[index];
+ if (f.fName == SkSL::Compiler::RTADJUST_NAME) {
+ return index;
+ }
+ }
+
+ return std::nullopt;
+}
+
+std::unique_ptr<InterfaceBlock> InterfaceBlock::Convert(const Context& context,
+ Position pos,
+ Variable* variable,
+ std::shared_ptr<SymbolTable> symbols) {
+ if (SkSL::ProgramKind kind = context.fConfig->fKind; !ProgramConfig::IsFragment(kind) &&
+ !ProgramConfig::IsVertex(kind) &&
+ !ProgramConfig::IsCompute(kind)) {
+ context.fErrors->error(pos, "interface blocks are not allowed in this kind of program");
+ return nullptr;
+ }
+
+ // Find sk_RTAdjust and error out if it's not of type `float4`.
+ SkSpan<const Type::Field> fields = variable->type().componentType().fields();
+ std::optional<int> rtAdjustIndex = find_rt_adjust_index(fields);
+ if (rtAdjustIndex.has_value()) {
+ const Type::Field& rtAdjustField = fields[*rtAdjustIndex];
+ if (!rtAdjustField.fType->matches(*context.fTypes.fFloat4)) {
+ context.fErrors->error(rtAdjustField.fPosition, "sk_RTAdjust must have type 'float4'");
+ return nullptr;
+ }
+ }
+ return InterfaceBlock::Make(context, pos, variable, rtAdjustIndex, symbols);
+}
+
+std::unique_ptr<InterfaceBlock> InterfaceBlock::Make(const Context& context,
+ Position pos,
+ Variable* variable,
+ std::optional<int> rtAdjustIndex,
+ std::shared_ptr<SymbolTable> symbols) {
+ SkASSERT(ProgramConfig::IsFragment(context.fConfig->fKind) ||
+ ProgramConfig::IsVertex(context.fConfig->fKind) ||
+ ProgramConfig::IsCompute(context.fConfig->fKind));
+
+ SkASSERT(variable->type().componentType().isInterfaceBlock());
+ SkSpan<const Type::Field> fields = variable->type().componentType().fields();
+
+ if (rtAdjustIndex.has_value()) {
+ [[maybe_unused]] const Type::Field& rtAdjustField = fields[*rtAdjustIndex];
+ SkASSERT(rtAdjustField.fName == SkSL::Compiler::RTADJUST_NAME);
+ SkASSERT(rtAdjustField.fType->matches(*context.fTypes.fFloat4));
+
+ ThreadContext::RTAdjustData& rtAdjustData = ThreadContext::RTAdjustState();
+ rtAdjustData.fInterfaceBlock = variable;
+ rtAdjustData.fFieldIndex = *rtAdjustIndex;
+ }
+
+ if (variable->name().empty()) {
+ // This interface block is anonymous. Add each field to the top-level symbol table.
+ for (size_t i = 0; i < fields.size(); ++i) {
+ symbols->add(std::make_unique<SkSL::Field>(fields[i].fPosition, variable, i));
+ }
+ } else {
+ // Add the global variable to the top-level symbol table.
+ symbols->addWithoutOwnership(variable);
+ }
+
+ return std::make_unique<SkSL::InterfaceBlock>(pos, variable, symbols);
+}
+
+std::unique_ptr<ProgramElement> InterfaceBlock::clone() const {
+ return std::make_unique<InterfaceBlock>(fPosition,
+ this->var(),
+ SymbolTable::WrapIfBuiltin(this->typeOwner()));
+}
+
+std::string InterfaceBlock::description() const {
+ std::string result = this->var()->modifiers().description() +
+ std::string(this->typeName()) + " {\n";
+ const Type* structType = &this->var()->type();
+ if (structType->isArray()) {
+ structType = &structType->componentType();
+ }
+ for (const auto& f : structType->fields()) {
+ result += f.description() + "\n";
+ }
+ result += "}";
+ if (!this->instanceName().empty()) {
+ result += " " + std::string(this->instanceName());
+ if (this->arraySize() > 0) {
+ String::appendf(&result, "[%d]", this->arraySize());
+ }
+ }
+ return result + ";";
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLInterfaceBlock.h b/gfx/skia/skia/src/sksl/ir/SkSLInterfaceBlock.h
new file mode 100644
index 0000000000..a9447dd9cb
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLInterfaceBlock.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_INTERFACEBLOCK
+#define SKSL_INTERFACEBLOCK
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVariable.h"
+
+#include <memory>
+#include <optional>
+#include <string>
+#include <string_view>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+class SymbolTable;
+
+/**
+ * An interface block, as in:
+ *
+ * out sk_PerVertex {
+ * layout(builtin=0) float4 sk_Position;
+ * layout(builtin=1) float sk_PointSize;
+ * };
+ *
+ * At the IR level, this is represented by a single variable of struct type.
+ */
+class InterfaceBlock final : public ProgramElement {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kInterfaceBlock;
+
+ InterfaceBlock(Position pos,
+ Variable* var,
+ std::shared_ptr<SymbolTable> typeOwner)
+ : INHERITED(pos, kIRNodeKind)
+ , fVariable(var)
+ , fTypeOwner(std::move(typeOwner)) {
+ SkASSERT(fVariable->type().componentType().isInterfaceBlock());
+ fVariable->setInterfaceBlock(this);
+ }
+
+ ~InterfaceBlock() override;
+
+ // Returns an InterfaceBlock; errors are reported to the ErrorReporter.
+ // The caller is responsible for adding the InterfaceBlock to the program elements.
+ // The program's RTAdjustData will be updated if the InterfaceBlock contains sk_RTAdjust.
+ // The passed-in symbol table will be updated with a reference to the interface block variable
+ // (if it is named) or each of the interface block fields (if it is anonymous).
+ static std::unique_ptr<InterfaceBlock> Convert(const Context& context,
+ Position pos,
+ Variable* variable,
+ std::shared_ptr<SymbolTable> symbols);
+
+ // Returns an InterfaceBlock; errors are reported via SkASSERT.
+ // The caller is responsible for adding the InterfaceBlock to the program elements.
+ // If the InterfaceBlock contains sk_RTAdjust, the caller is responsible for passing its field
+ // index in `rtAdjustIndex`.
+ // The passed-in symbol table will be updated with a reference to the interface block variable
+ // (if it is named) or each of the interface block fields (if it is anonymous).
+ static std::unique_ptr<InterfaceBlock> Make(const Context& context,
+ Position pos,
+ Variable* variable,
+ std::optional<int> rtAdjustIndex,
+ std::shared_ptr<SymbolTable> symbols);
+
+ Variable* var() const {
+ return fVariable;
+ }
+
+ void detachDeadVariable() {
+ fVariable = nullptr;
+ }
+
+ std::string_view typeName() const {
+ return fVariable->type().componentType().name();
+ }
+
+ std::string_view instanceName() const {
+ return fVariable->name();
+ }
+
+ const std::shared_ptr<SymbolTable>& typeOwner() const {
+ return fTypeOwner;
+ }
+
+ int arraySize() const {
+ return fVariable->type().isArray() ? fVariable->type().columns() : 0;
+ }
+
+ std::unique_ptr<ProgramElement> clone() const override;
+
+ std::string description() const override;
+
+private:
+ Variable* fVariable;
+ std::shared_ptr<SymbolTable> fTypeOwner;
+
+ using INHERITED = ProgramElement;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLLayout.cpp b/gfx/skia/skia/src/sksl/ir/SkSLLayout.cpp
new file mode 100644
index 0000000000..3274ab1185
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLLayout.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2022 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkSLLayout.h"
+#include "include/private/SkSLString.h"
+
+namespace SkSL {
+
+std::string Layout::description() const {
+ std::string result;
+ auto separator = SkSL::String::Separator();
+ if (fLocation >= 0) {
+ result += separator() + "location = " + std::to_string(fLocation);
+ }
+ if (fOffset >= 0) {
+ result += separator() + "offset = " + std::to_string(fOffset);
+ }
+ if (fBinding >= 0) {
+ result += separator() + "binding = " + std::to_string(fBinding);
+ }
+ if (fTexture >= 0) {
+ result += separator() + "texture = " + std::to_string(fTexture);
+ }
+ if (fSampler >= 0) {
+ result += separator() + "sampler = " + std::to_string(fSampler);
+ }
+ if (fIndex >= 0) {
+ result += separator() + "index = " + std::to_string(fIndex);
+ }
+ if (fSet >= 0) {
+ result += separator() + "set = " + std::to_string(fSet);
+ }
+ if (fBuiltin >= 0) {
+ result += separator() + "builtin = " + std::to_string(fBuiltin);
+ }
+ if (fInputAttachmentIndex >= 0) {
+ result += separator() + "input_attachment_index = " +
+ std::to_string(fInputAttachmentIndex);
+ }
+ if (fFlags & kOriginUpperLeft_Flag) {
+ result += separator() + "origin_upper_left";
+ }
+ if (fFlags & kBlendSupportAllEquations_Flag) {
+ result += separator() + "blend_support_all_equations";
+ }
+ if (fFlags & kPushConstant_Flag) {
+ result += separator() + "push_constant";
+ }
+ if (fFlags & kColor_Flag) {
+ result += separator() + "color";
+ }
+ if (result.size() > 0) {
+ result = "layout (" + result + ")";
+ }
+ return result;
+}
+
+bool Layout::operator==(const Layout& other) const {
+ return fFlags == other.fFlags &&
+ fLocation == other.fLocation &&
+ fOffset == other.fOffset &&
+ fBinding == other.fBinding &&
+ fTexture == other.fTexture &&
+ fSampler == other.fSampler &&
+ fIndex == other.fIndex &&
+ fSet == other.fSet &&
+ fBuiltin == other.fBuiltin &&
+ fInputAttachmentIndex == other.fInputAttachmentIndex;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLLiteral.cpp b/gfx/skia/skia/src/sksl/ir/SkSLLiteral.cpp
new file mode 100644
index 0000000000..aa8c5c4440
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLLiteral.cpp
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2022 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkSLString.h"
+#include "src/sksl/ir/SkSLLiteral.h"
+
+namespace SkSL {
+
+std::string Literal::description(OperatorPrecedence) const {
+ if (this->type().isBoolean()) {
+ return fValue ? "true" : "false";
+ }
+ if (this->type().isInteger()) {
+ return std::to_string(this->intValue());
+ }
+ return skstd::to_string(this->floatValue());
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLLiteral.h b/gfx/skia/skia/src/sksl/ir/SkSLLiteral.h
new file mode 100644
index 0000000000..d4b0bd1be6
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLLiteral.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_FLOATLITERAL
+#define SKSL_FLOATLITERAL
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <cstdint>
+#include <cinttypes>
+#include <memory>
+#include <optional>
+#include <string>
+
+namespace SkSL {
+
+enum class OperatorPrecedence : uint8_t;
+
+/**
+ * A literal value. These can contain ints, floats, or booleans.
+ */
+
+class Literal : public Expression {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kLiteral;
+
+ Literal(Position pos, double value, const Type* type)
+ : INHERITED(pos, kIRNodeKind, type)
+ , fValue(value) {}
+
+ // Makes a literal of $floatLiteral type.
+ static std::unique_ptr<Literal> MakeFloat(const Context& context, Position pos,
+ float value) {
+ return std::make_unique<Literal>(pos, value, context.fTypes.fFloatLiteral.get());
+ }
+
+ // Makes a float literal of the specified type.
+ static std::unique_ptr<Literal> MakeFloat(Position pos, float value, const Type* type) {
+ SkASSERT(type->isFloat());
+ return std::make_unique<Literal>(pos, value, type);
+ }
+
+ // Makes a literal of $intLiteral type.
+ static std::unique_ptr<Literal> MakeInt(const Context& context, Position pos,
+ SKSL_INT value) {
+ return std::make_unique<Literal>(pos, value, context.fTypes.fIntLiteral.get());
+ }
+
+ // Makes an int literal of the specified type.
+ static std::unique_ptr<Literal> MakeInt(Position pos, SKSL_INT value, const Type* type) {
+ SkASSERT(type->isInteger());
+ SkASSERTF(value >= type->minimumValue(), "Value %" PRId64 " does not fit in type %s",
+ value, type->description().c_str());
+ SkASSERTF(value <= type->maximumValue(), "Value %" PRId64 " does not fit in type %s",
+ value, type->description().c_str());
+ return std::make_unique<Literal>(pos, value, type);
+ }
+
+ // Makes a literal of boolean type.
+ static std::unique_ptr<Literal> MakeBool(const Context& context, Position pos, bool value) {
+ return std::make_unique<Literal>(pos, value, context.fTypes.fBool.get());
+ }
+
+ // Makes a literal of boolean type. (Functionally identical to the above, but useful if you
+ // don't have access to the Context.)
+ static std::unique_ptr<Literal> MakeBool(Position pos, bool value, const Type* type) {
+ SkASSERT(type->isBoolean());
+ return std::make_unique<Literal>(pos, value, type);
+ }
+
+ // Makes a literal of the specified type, rounding as needed.
+ static std::unique_ptr<Literal> Make(Position pos, double value, const Type* type) {
+ if (type->isFloat()) {
+ return MakeFloat(pos, value, type);
+ }
+ if (type->isInteger()) {
+ return MakeInt(pos, value, type);
+ }
+ SkASSERT(type->isBoolean());
+ return MakeBool(pos, value, type);
+ }
+
+ float floatValue() const {
+ SkASSERT(this->type().isFloat());
+ return (SKSL_FLOAT)fValue;
+ }
+
+ SKSL_INT intValue() const {
+ SkASSERT(this->type().isInteger());
+ return (SKSL_INT)fValue;
+ }
+
+ SKSL_INT boolValue() const {
+ SkASSERT(this->type().isBoolean());
+ return (bool)fValue;
+ }
+
+ double value() const {
+ return fValue;
+ }
+
+ std::string description(OperatorPrecedence) const override;
+
+ ComparisonResult compareConstant(const Expression& other) const override {
+ if (!other.is<Literal>() || this->type().numberKind() != other.type().numberKind()) {
+ return ComparisonResult::kUnknown;
+ }
+ return this->value() == other.as<Literal>().value()
+ ? ComparisonResult::kEqual
+ : ComparisonResult::kNotEqual;
+ }
+
+ std::unique_ptr<Expression> clone(Position pos) const override {
+ return std::make_unique<Literal>(pos, this->value(), &this->type());
+ }
+
+ bool supportsConstantValues() const override {
+ return true;
+ }
+
+ std::optional<double> getConstantValue(int n) const override {
+ SkASSERT(n == 0);
+ return fValue;
+ }
+
+private:
+ double fValue;
+
+ using INHERITED = Expression;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLMethodReference.h b/gfx/skia/skia/src/sksl/ir/SkSLMethodReference.h
new file mode 100644
index 0000000000..c077ec3f9f
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLMethodReference.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_METHODREFERENCE
+#define SKSL_METHODREFERENCE
+
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+namespace SkSL {
+
+class FunctionDeclaration;
+
+/**
+ * An identifier referring to a method name, along with an instance for the call.
+ * This is an intermediate value: MethodReferences are always eventually replaced by FunctionCalls
+ * in valid programs.
+ *
+ * Method calls are only supported on effect-child types, and they all resolve to intrinsics
+ * prefixed with '$', and taking the 'self' object as the last parameter. For example:
+ *
+ * uniform shader child;
+ * ...
+ * child.eval(xy) --> $eval(xy, child)
+ */
+class MethodReference final : public Expression {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kMethodReference;
+
+ MethodReference(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> self,
+ const FunctionDeclaration* overloadChain)
+ : INHERITED(pos, kIRNodeKind, context.fTypes.fInvalid.get())
+ , fSelf(std::move(self))
+ , fOverloadChain(overloadChain) {}
+
+ std::unique_ptr<Expression>& self() { return fSelf; }
+ const std::unique_ptr<Expression>& self() const { return fSelf; }
+
+ const FunctionDeclaration* overloadChain() const { return fOverloadChain; }
+
+ std::unique_ptr<Expression> clone(Position pos) const override {
+ return std::unique_ptr<Expression>(new MethodReference(
+ pos, this->self()->clone(), this->overloadChain(), &this->type()));
+ }
+
+ std::string description(OperatorPrecedence) const override {
+ return "<method>";
+ }
+
+private:
+ MethodReference(Position pos,
+ std::unique_ptr<Expression> self,
+ const FunctionDeclaration* overloadChain,
+ const Type* type)
+ : INHERITED(pos, kIRNodeKind, type)
+ , fSelf(std::move(self))
+ , fOverloadChain(overloadChain) {}
+
+ std::unique_ptr<Expression> fSelf;
+ const FunctionDeclaration* fOverloadChain;
+
+ using INHERITED = Expression;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLModifiers.cpp b/gfx/skia/skia/src/sksl/ir/SkSLModifiers.cpp
new file mode 100644
index 0000000000..548d38d2cd
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLModifiers.cpp
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkSLModifiers.h"
+
+#include "include/core/SkTypes.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/base/SkMathPriv.h"
+#include "src/sksl/SkSLContext.h"
+
+namespace SkSL {
+
+bool Modifiers::checkPermitted(const Context& context,
+ Position pos,
+ int permittedModifierFlags,
+ int permittedLayoutFlags) const {
+ static constexpr struct { Modifiers::Flag flag; const char* name; } kModifierFlags[] = {
+ { Modifiers::kConst_Flag, "const" },
+ { Modifiers::kIn_Flag, "in" },
+ { Modifiers::kOut_Flag, "out" },
+ { Modifiers::kUniform_Flag, "uniform" },
+ { Modifiers::kFlat_Flag, "flat" },
+ { Modifiers::kNoPerspective_Flag, "noperspective" },
+ { Modifiers::kPure_Flag, "$pure" },
+ { Modifiers::kInline_Flag, "inline" },
+ { Modifiers::kNoInline_Flag, "noinline" },
+ { Modifiers::kHighp_Flag, "highp" },
+ { Modifiers::kMediump_Flag, "mediump" },
+ { Modifiers::kLowp_Flag, "lowp" },
+ { Modifiers::kExport_Flag, "$export" },
+ { Modifiers::kES3_Flag, "$es3" },
+ { Modifiers::kWorkgroup_Flag, "workgroup" },
+ { Modifiers::kReadOnly_Flag, "readonly" },
+ { Modifiers::kWriteOnly_Flag, "writeonly" },
+ { Modifiers::kBuffer_Flag, "buffer" },
+ };
+
+ bool success = true;
+ int modifierFlags = fFlags;
+ for (const auto& f : kModifierFlags) {
+ if (modifierFlags & f.flag) {
+ if (!(permittedModifierFlags & f.flag)) {
+ context.fErrors->error(pos, "'" + std::string(f.name) + "' is not permitted here");
+ success = false;
+ }
+ modifierFlags &= ~f.flag;
+ }
+ }
+ SkASSERT(modifierFlags == 0);
+
+ int backendFlags = fLayout.fFlags & Layout::kAllBackendFlagsMask;
+ if (SkPopCount(backendFlags) > 1) {
+ context.fErrors->error(pos, "only one backend qualifier can be used");
+ success = false;
+ }
+
+ static constexpr struct { Layout::Flag flag; const char* name; } kLayoutFlags[] = {
+ { Layout::kOriginUpperLeft_Flag, "origin_upper_left"},
+ { Layout::kPushConstant_Flag, "push_constant"},
+ { Layout::kBlendSupportAllEquations_Flag, "blend_support_all_equations"},
+ { Layout::kColor_Flag, "color"},
+ { Layout::kLocation_Flag, "location"},
+ { Layout::kOffset_Flag, "offset"},
+ { Layout::kBinding_Flag, "binding"},
+ { Layout::kTexture_Flag, "texture"},
+ { Layout::kSampler_Flag, "sampler"},
+ { Layout::kIndex_Flag, "index"},
+ { Layout::kSet_Flag, "set"},
+ { Layout::kBuiltin_Flag, "builtin"},
+ { Layout::kInputAttachmentIndex_Flag, "input_attachment_index"},
+ { Layout::kSPIRV_Flag, "spirv"},
+ { Layout::kMetal_Flag, "metal"},
+ { Layout::kGL_Flag, "gl"},
+ { Layout::kWGSL_Flag, "wgsl"},
+ };
+
+ int layoutFlags = fLayout.fFlags;
+ if ((layoutFlags & (Layout::kTexture_Flag | Layout::kSampler_Flag)) &&
+ layoutFlags & Layout::kBinding_Flag) {
+ context.fErrors->error(pos, "'binding' modifier cannot coexist with 'texture'/'sampler'");
+ success = false;
+ }
+ // The `texture` and `sampler` flags are only allowed when explicitly targeting Metal and WGSL
+ if (!(layoutFlags & (Layout::kMetal_Flag | Layout::kWGSL_Flag))) {
+ permittedLayoutFlags &= ~Layout::kTexture_Flag;
+ permittedLayoutFlags &= ~Layout::kSampler_Flag;
+ }
+ // The `set` flag is not allowed when explicitly targeting Metal and GLSL. It is currently
+ // allowed when no backend flag is present.
+ // TODO(skia:14023): Further restrict the `set` flag to SPIR-V and WGSL
+ if (layoutFlags & (Layout::kMetal_Flag | Layout::kGL_Flag)) {
+ permittedLayoutFlags &= ~Layout::kSet_Flag;
+ }
+ // TODO(skia:14023): Restrict the `push_constant` flag to SPIR-V and WGSL
+
+ for (const auto& lf : kLayoutFlags) {
+ if (layoutFlags & lf.flag) {
+ if (!(permittedLayoutFlags & lf.flag)) {
+ context.fErrors->error(pos, "layout qualifier '" + std::string(lf.name) +
+ "' is not permitted here");
+ success = false;
+ }
+ layoutFlags &= ~lf.flag;
+ }
+ }
+ SkASSERT(layoutFlags == 0);
+ return success;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLModifiersDeclaration.h b/gfx/skia/skia/src/sksl/ir/SkSLModifiersDeclaration.h
new file mode 100644
index 0000000000..8d9179ce89
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLModifiersDeclaration.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_MODIFIERDECLARATION
+#define SKSL_MODIFIERDECLARATION
+
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLProgramElement.h"
+
+namespace SkSL {
+
+/**
+ * A declaration that consists only of modifiers, e.g.:
+ *
+ * layout(blend_support_all_equations) out;
+ */
+class ModifiersDeclaration final : public ProgramElement {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kModifiers;
+
+ ModifiersDeclaration(const Modifiers* modifiers)
+ : INHERITED(Position(), kIRNodeKind)
+ , fModifiers(modifiers) {}
+
+ const Modifiers& modifiers() const {
+ return *fModifiers;
+ }
+
+ std::unique_ptr<ProgramElement> clone() const override {
+ return std::make_unique<ModifiersDeclaration>(&this->modifiers());
+ }
+
+ std::string description() const override {
+ return this->modifiers().description() + ";";
+ }
+
+private:
+ const Modifiers* fModifiers;
+
+ using INHERITED = ProgramElement;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLNop.h b/gfx/skia/skia/src/sksl/ir/SkSLNop.h
new file mode 100644
index 0000000000..f2810ef15f
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLNop.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_NOP
+#define SKSL_NOP
+
+#include "include/private/SkSLStatement.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+
+namespace SkSL {
+
+/**
+ * A no-op statement that does nothing.
+ */
+class Nop final : public Statement {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kNop;
+
+ Nop()
+ : INHERITED(Position(), kIRNodeKind) {}
+
+ static std::unique_ptr<Statement> Make() {
+ return std::make_unique<Nop>();
+ }
+
+ bool isEmpty() const override {
+ return true;
+ }
+
+ std::string description() const override {
+ return ";";
+ }
+
+ std::unique_ptr<Statement> clone() const override {
+ return std::make_unique<Nop>();
+ }
+
+private:
+ using INHERITED = Statement;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLPoison.h b/gfx/skia/skia/src/sksl/ir/SkSLPoison.h
new file mode 100644
index 0000000000..31bd850308
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLPoison.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLContext.h"
+
+namespace SkSL {
+
+class Poison : public Expression {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kPoison;
+
+ static std::unique_ptr<Expression> Make(Position pos, const Context& context) {
+ return std::make_unique<Poison>(pos, context.fTypes.fPoison.get());
+ }
+
+ Poison(Position pos, const Type* type)
+ : INHERITED(pos, kIRNodeKind, type) {}
+
+ std::unique_ptr<Expression> clone(Position pos) const override {
+ return std::make_unique<Poison>(pos, &this->type());
+ }
+
+ std::string description(OperatorPrecedence) const override {
+ return Compiler::POISON_TAG;
+ }
+
+private:
+ using INHERITED = Expression;
+};
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLPostfixExpression.cpp b/gfx/skia/skia/src/sksl/ir/SkSLPostfixExpression.cpp
new file mode 100644
index 0000000000..83d5856faf
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLPostfixExpression.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLPostfixExpression.h"
+
+#include "include/core/SkTypes.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLOperator.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+
+namespace SkSL {
+
+std::unique_ptr<Expression> PostfixExpression::Convert(const Context& context, Position pos,
+ std::unique_ptr<Expression> base, Operator op) {
+ const Type& baseType = base->type();
+ if (!baseType.isNumber()) {
+ context.fErrors->error(pos, "'" + std::string(op.tightOperatorName()) +
+ "' cannot operate on '" + baseType.displayName() + "'");
+ return nullptr;
+ }
+ if (!Analysis::UpdateVariableRefKind(base.get(), VariableRefKind::kReadWrite,
+ context.fErrors)) {
+ return nullptr;
+ }
+ return PostfixExpression::Make(context, pos, std::move(base), op);
+}
+
+std::unique_ptr<Expression> PostfixExpression::Make(const Context& context, Position pos,
+ std::unique_ptr<Expression> base, Operator op) {
+ SkASSERT(base->type().isNumber());
+ SkASSERT(Analysis::IsAssignable(*base));
+ return std::make_unique<PostfixExpression>(pos, std::move(base), op);
+}
+
+std::string PostfixExpression::description(OperatorPrecedence parentPrecedence) const {
+ bool needsParens = (OperatorPrecedence::kPostfix >= parentPrecedence);
+ return std::string(needsParens ? "(" : "") +
+ this->operand()->description(OperatorPrecedence::kPostfix) +
+ std::string(this->getOperator().tightOperatorName()) +
+ std::string(needsParens ? ")" : "");
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLPostfixExpression.h b/gfx/skia/skia/src/sksl/ir/SkSLPostfixExpression.h
new file mode 100644
index 0000000000..a8d457a527
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLPostfixExpression.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_POSTFIXEXPRESSION
+#define SKSL_POSTFIXEXPRESSION
+
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLOperator.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <memory>
+#include <string>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+
+/**
+ * An expression modified by a unary operator appearing after it, such as 'i++'.
+ */
+class PostfixExpression final : public Expression {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kPostfix;
+
+ PostfixExpression(Position pos, std::unique_ptr<Expression> operand, Operator op)
+ : INHERITED(pos, kIRNodeKind, &operand->type())
+ , fOperand(std::move(operand))
+ , fOperator(op) {}
+
+ // Creates an SkSL postfix expression; uses the ErrorReporter to report errors.
+ static std::unique_ptr<Expression> Convert(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> base,
+ Operator op);
+
+ // Creates an SkSL postfix expression; reports errors via ASSERT.
+ static std::unique_ptr<Expression> Make(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> base,
+ Operator op);
+
+ Operator getOperator() const {
+ return fOperator;
+ }
+
+ std::unique_ptr<Expression>& operand() {
+ return fOperand;
+ }
+
+ const std::unique_ptr<Expression>& operand() const {
+ return fOperand;
+ }
+
+ std::unique_ptr<Expression> clone(Position pos) const override {
+ return std::make_unique<PostfixExpression>(pos, this->operand()->clone(),
+ this->getOperator());
+ }
+
+ std::string description(OperatorPrecedence parentPrecedence) const override;
+
+private:
+ std::unique_ptr<Expression> fOperand;
+ Operator fOperator;
+
+ using INHERITED = Expression;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLPrefixExpression.cpp b/gfx/skia/skia/src/sksl/ir/SkSLPrefixExpression.cpp
new file mode 100644
index 0000000000..a5ea728de2
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLPrefixExpression.cpp
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLOperator.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLConstantFolder.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/ir/SkSLConstructorArray.h"
+#include "src/sksl/ir/SkSLConstructorCompound.h"
+#include "src/sksl/ir/SkSLConstructorDiagonalMatrix.h"
+#include "src/sksl/ir/SkSLConstructorSplat.h"
+#include "src/sksl/ir/SkSLLiteral.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+
+namespace SkSL {
+
+static ExpressionArray negate_operands(const Context& context,
+ Position pos,
+ const ExpressionArray& operands);
+
+static std::unique_ptr<Expression> simplify_negation(const Context& context,
+ Position pos,
+ const Expression& originalExpr) {
+ const Expression* value = ConstantFolder::GetConstantValueForVariable(originalExpr);
+ switch (value->kind()) {
+ case Expression::Kind::kLiteral: {
+ // Convert -literal(1) to literal(-1).
+ double negated = -value->as<Literal>().value();
+ // Don't simplify the expression if the type can't hold the negated value.
+ const Type& type = value->type();
+ if (type.checkForOutOfRangeLiteral(context, negated, pos)) {
+ return nullptr;
+ }
+ return Literal::Make(pos, negated, &type);
+ }
+ case Expression::Kind::kPrefix: {
+ // Convert `-(-expression)` into `expression`.
+ const PrefixExpression& prefix = value->as<PrefixExpression>();
+ if (prefix.getOperator().kind() == Operator::Kind::MINUS) {
+ return prefix.operand()->clone(pos);
+ }
+ break;
+ }
+ case Expression::Kind::kConstructorArray:
+ // Convert `-array[N](literal, ...)` into `array[N](-literal, ...)`.
+ if (Analysis::IsCompileTimeConstant(*value)) {
+ const ConstructorArray& ctor = value->as<ConstructorArray>();
+ return ConstructorArray::Make(context, pos, ctor.type(),
+ negate_operands(context, pos, ctor.arguments()));
+ }
+ break;
+
+ case Expression::Kind::kConstructorDiagonalMatrix:
+ // Convert `-matrix(literal)` into `matrix(-literal)`.
+ if (Analysis::IsCompileTimeConstant(*value)) {
+ const ConstructorDiagonalMatrix& ctor = value->as<ConstructorDiagonalMatrix>();
+ if (std::unique_ptr<Expression> simplified = simplify_negation(context,
+ pos,
+ *ctor.argument())) {
+ return ConstructorDiagonalMatrix::Make(context, pos, ctor.type(),
+ std::move(simplified));
+ }
+ }
+ break;
+
+ case Expression::Kind::kConstructorSplat:
+ // Convert `-vector(literal)` into `vector(-literal)`.
+ if (Analysis::IsCompileTimeConstant(*value)) {
+ const ConstructorSplat& ctor = value->as<ConstructorSplat>();
+ if (std::unique_ptr<Expression> simplified = simplify_negation(context,
+ pos,
+ *ctor.argument())) {
+ return ConstructorSplat::Make(context, pos, ctor.type(), std::move(simplified));
+ }
+ }
+ break;
+
+ case Expression::Kind::kConstructorCompound:
+ // Convert `-vecN(literal, ...)` into `vecN(-literal, ...)`.
+ if (Analysis::IsCompileTimeConstant(*value)) {
+ const ConstructorCompound& ctor = value->as<ConstructorCompound>();
+ return ConstructorCompound::Make(context, pos, ctor.type(),
+ negate_operands(context, pos, ctor.arguments()));
+ }
+ break;
+
+ default:
+ break;
+ }
+ return nullptr;
+}
+
+static ExpressionArray negate_operands(const Context& context,
+ Position pos,
+ const ExpressionArray& array) {
+ ExpressionArray replacement;
+ replacement.reserve_back(array.size());
+ for (const std::unique_ptr<Expression>& expr : array) {
+ // The logic below is very similar to `negate_operand`, but with different ownership rules.
+ if (std::unique_ptr<Expression> simplified = simplify_negation(context, pos, *expr)) {
+ replacement.push_back(std::move(simplified));
+ } else {
+ replacement.push_back(std::make_unique<PrefixExpression>(pos, Operator::Kind::MINUS,
+ expr->clone()));
+ }
+ }
+ return replacement;
+}
+
+static std::unique_ptr<Expression> negate_operand(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> value) {
+ // Attempt to simplify this negation (e.g. eliminate double negation, literal values)
+ if (std::unique_ptr<Expression> simplified = simplify_negation(context, pos, *value)) {
+ return simplified;
+ }
+
+ // No simplified form; convert expression to Prefix(TK_MINUS, expression).
+ return std::make_unique<PrefixExpression>(pos, Operator::Kind::MINUS, std::move(value));
+}
+
+static std::unique_ptr<Expression> logical_not_operand(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> operand) {
+ const Expression* value = ConstantFolder::GetConstantValueForVariable(*operand);
+ switch (value->kind()) {
+ case Expression::Kind::kLiteral: {
+ // Convert !boolLiteral(true) to boolLiteral(false).
+ SkASSERT(value->type().isBoolean());
+ const Literal& b = value->as<Literal>();
+ return Literal::MakeBool(pos, !b.boolValue(), &operand->type());
+ }
+ case Expression::Kind::kPrefix: {
+ // Convert `!(!expression)` into `expression`.
+ PrefixExpression& prefix = operand->as<PrefixExpression>();
+ if (prefix.getOperator().kind() == Operator::Kind::LOGICALNOT) {
+ prefix.operand()->fPosition = pos;
+ return std::move(prefix.operand());
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ // No simplified form; convert expression to Prefix(TK_LOGICALNOT, expression).
+ return std::make_unique<PrefixExpression>(pos, Operator::Kind::LOGICALNOT, std::move(operand));
+}
+
+std::unique_ptr<Expression> PrefixExpression::Convert(const Context& context, Position pos,
+ Operator op, std::unique_ptr<Expression> base) {
+ const Type& baseType = base->type();
+ switch (op.kind()) {
+ case Operator::Kind::PLUS:
+ if (baseType.isArray() || !baseType.componentType().isNumber()) {
+ context.fErrors->error(pos,
+ "'+' cannot operate on '" + baseType.displayName() + "'");
+ return nullptr;
+ }
+ break;
+
+ case Operator::Kind::MINUS:
+ if (baseType.isArray() || !baseType.componentType().isNumber()) {
+ context.fErrors->error(pos,
+ "'-' cannot operate on '" + baseType.displayName() + "'");
+ return nullptr;
+ }
+ break;
+
+ case Operator::Kind::PLUSPLUS:
+ case Operator::Kind::MINUSMINUS:
+ if (!baseType.isNumber()) {
+ context.fErrors->error(pos,
+ "'" + std::string(op.tightOperatorName()) +
+ "' cannot operate on '" + baseType.displayName() + "'");
+ return nullptr;
+ }
+ if (!Analysis::UpdateVariableRefKind(base.get(), VariableReference::RefKind::kReadWrite,
+ context.fErrors)) {
+ return nullptr;
+ }
+ break;
+
+ case Operator::Kind::LOGICALNOT:
+ if (!baseType.isBoolean()) {
+ context.fErrors->error(pos,
+ "'" + std::string(op.tightOperatorName()) +
+ "' cannot operate on '" + baseType.displayName() + "'");
+ return nullptr;
+ }
+ break;
+
+ case Operator::Kind::BITWISENOT:
+ if (context.fConfig->strictES2Mode()) {
+ // GLSL ES 1.00, Section 5.1
+ context.fErrors->error(
+ pos,
+ "operator '" + std::string(op.tightOperatorName()) + "' is not allowed");
+ return nullptr;
+ }
+ if (baseType.isArray() || !baseType.componentType().isInteger()) {
+ context.fErrors->error(pos,
+ "'" + std::string(op.tightOperatorName()) +
+ "' cannot operate on '" + baseType.displayName() + "'");
+ return nullptr;
+ }
+ if (baseType.isLiteral()) {
+ // The expression `~123` is no longer a literal; coerce to the actual type.
+ base = baseType.scalarTypeForLiteral().coerceExpression(std::move(base), context);
+ if (!base) {
+ return nullptr;
+ }
+ }
+ break;
+
+ default:
+ SK_ABORT("unsupported prefix operator");
+ }
+
+ std::unique_ptr<Expression> result = PrefixExpression::Make(context, pos, op, std::move(base));
+ SkASSERT(result->fPosition == pos);
+ return result;
+}
+
+std::unique_ptr<Expression> PrefixExpression::Make(const Context& context, Position pos,
+ Operator op, std::unique_ptr<Expression> base) {
+ switch (op.kind()) {
+ case Operator::Kind::PLUS:
+ SkASSERT(!base->type().isArray());
+ SkASSERT(base->type().componentType().isNumber());
+ base->fPosition = pos;
+ return base;
+
+ case Operator::Kind::MINUS:
+ SkASSERT(!base->type().isArray());
+ SkASSERT(base->type().componentType().isNumber());
+ return negate_operand(context, pos, std::move(base));
+
+ case Operator::Kind::LOGICALNOT:
+ SkASSERT(base->type().isBoolean());
+ return logical_not_operand(context, pos, std::move(base));
+
+ case Operator::Kind::PLUSPLUS:
+ case Operator::Kind::MINUSMINUS:
+ SkASSERT(base->type().isNumber());
+ SkASSERT(Analysis::IsAssignable(*base));
+ break;
+
+ case Operator::Kind::BITWISENOT:
+ SkASSERT(!context.fConfig->strictES2Mode());
+ SkASSERT(!base->type().isArray());
+ SkASSERT(base->type().componentType().isInteger());
+ SkASSERT(!base->type().isLiteral());
+ break;
+
+ default:
+ SkDEBUGFAILF("unsupported prefix operator: %s", op.operatorName());
+ }
+
+ return std::make_unique<PrefixExpression>(pos, op, std::move(base));
+}
+
+std::string PrefixExpression::description(OperatorPrecedence parentPrecedence) const {
+ bool needsParens = (OperatorPrecedence::kPrefix >= parentPrecedence);
+ return std::string(needsParens ? "(" : "") +
+ std::string(this->getOperator().tightOperatorName()) +
+ this->operand()->description(OperatorPrecedence::kPrefix) +
+ std::string(needsParens ? ")" : "");
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLPrefixExpression.h b/gfx/skia/skia/src/sksl/ir/SkSLPrefixExpression.h
new file mode 100644
index 0000000000..58c41d404a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLPrefixExpression.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_PREFIXEXPRESSION
+#define SKSL_PREFIXEXPRESSION
+
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLOperator.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <memory>
+#include <string>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+
+/**
+ * An expression modified by a unary operator appearing before it, such as '!flag'.
+ */
+class PrefixExpression final : public Expression {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kPrefix;
+
+ // Use PrefixExpression::Make to automatically simplify various prefix expression types.
+ PrefixExpression(Position pos, Operator op, std::unique_ptr<Expression> operand)
+ : INHERITED(pos, kIRNodeKind, &operand->type())
+ , fOperator(op)
+ , fOperand(std::move(operand)) {}
+
+ // Creates an SkSL prefix expression; uses the ErrorReporter to report errors.
+ static std::unique_ptr<Expression> Convert(const Context& context, Position pos, Operator op,
+ std::unique_ptr<Expression> base);
+
+ // Creates an SkSL prefix expression; reports errors via ASSERT.
+ static std::unique_ptr<Expression> Make(const Context& context, Position pos, Operator op,
+ std::unique_ptr<Expression> base);
+
+ Operator getOperator() const {
+ return fOperator;
+ }
+
+ std::unique_ptr<Expression>& operand() {
+ return fOperand;
+ }
+
+ const std::unique_ptr<Expression>& operand() const {
+ return fOperand;
+ }
+
+ std::unique_ptr<Expression> clone(Position pos) const override {
+ return std::make_unique<PrefixExpression>(pos, this->getOperator(),
+ this->operand()->clone());
+ }
+
+ std::string description(OperatorPrecedence parentPrecedence) const override;
+
+private:
+ Operator fOperator;
+ std::unique_ptr<Expression> fOperand;
+
+ using INHERITED = Expression;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLProgram.cpp b/gfx/skia/skia/src/sksl/ir/SkSLProgram.cpp
new file mode 100644
index 0000000000..332e4a2ac4
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLProgram.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLString.h"
+#include "include/private/SkSLSymbol.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLModifiersPool.h"
+#include "src/sksl/SkSLPool.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/analysis/SkSLProgramUsage.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/ir/SkSLSymbolTable.h" // IWYU pragma: keep
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVariable.h"
+
+#include <type_traits>
+#include <utility>
+
+namespace SkSL {
+
+Program::Program(std::unique_ptr<std::string> source,
+ std::unique_ptr<ProgramConfig> config,
+ std::shared_ptr<Context> context,
+ std::vector<std::unique_ptr<ProgramElement>> elements,
+ std::vector<const ProgramElement*> sharedElements,
+ std::unique_ptr<ModifiersPool> modifiers,
+ std::shared_ptr<SymbolTable> symbols,
+ std::unique_ptr<Pool> pool,
+ Inputs inputs)
+ : fSource(std::move(source))
+ , fConfig(std::move(config))
+ , fContext(context)
+ , fModifiers(std::move(modifiers))
+ , fSymbols(symbols)
+ , fPool(std::move(pool))
+ , fOwnedElements(std::move(elements))
+ , fSharedElements(std::move(sharedElements))
+ , fInputs(inputs) {
+ fUsage = Analysis::GetUsage(*this);
+}
+
+Program::~Program() {
+ // Some or all of the program elements are in the pool. To free them safely, we must attach
+ // the pool before destroying any program elements. (Otherwise, we may accidentally call
+ // delete on a pooled node.)
+ AutoAttachPoolToThread attach(fPool.get());
+
+ fOwnedElements.clear();
+ fContext.reset();
+ fSymbols.reset();
+ fModifiers.reset();
+}
+
+std::string Program::description() const {
+ std::string result = fConfig->versionDescription();
+ for (const ProgramElement* e : this->elements()) {
+ result += e->description();
+ }
+ return result;
+}
+
+const FunctionDeclaration* Program::getFunction(const char* functionName) const {
+ const Symbol* symbol = fSymbols->find(functionName);
+ bool valid = symbol && symbol->is<FunctionDeclaration>() &&
+ symbol->as<FunctionDeclaration>().definition();
+ return valid ? &symbol->as<FunctionDeclaration>() : nullptr;
+}
+
+static void gather_uniforms(UniformInfo* info, const Type& type, const std::string& name) {
+ switch (type.typeKind()) {
+ case Type::TypeKind::kStruct:
+ for (const auto& f : type.fields()) {
+ gather_uniforms(info, *f.fType, name + "." + std::string(f.fName));
+ }
+ break;
+ case Type::TypeKind::kArray:
+ for (int i = 0; i < type.columns(); ++i) {
+ gather_uniforms(info, type.componentType(),
+ String::printf("%s[%d]", name.c_str(), i));
+ }
+ break;
+ case Type::TypeKind::kScalar:
+ case Type::TypeKind::kVector:
+ case Type::TypeKind::kMatrix:
+ info->fUniforms.push_back({name, type.componentType().numberKind(),
+ type.rows(), type.columns(), info->fUniformSlotCount});
+ info->fUniformSlotCount += type.slotCount();
+ break;
+ default:
+ break;
+ }
+}
+
+std::unique_ptr<UniformInfo> Program::getUniformInfo() {
+ auto info = std::make_unique<UniformInfo>();
+ for (const ProgramElement* e : this->elements()) {
+ if (!e->is<GlobalVarDeclaration>()) {
+ continue;
+ }
+ const GlobalVarDeclaration& decl = e->as<GlobalVarDeclaration>();
+ const Variable& var = *decl.varDeclaration().var();
+ if (var.modifiers().fFlags & Modifiers::kUniform_Flag) {
+ gather_uniforms(info.get(), var.type(), std::string(var.name()));
+ }
+ }
+ return info;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLProgram.h b/gfx/skia/skia/src/sksl/ir/SkSLProgram.h
new file mode 100644
index 0000000000..fbdb936e49
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLProgram.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_PROGRAM
+#define SKSL_PROGRAM
+
+#include "src/sksl/ir/SkSLType.h"
+
+#include <memory>
+#include <string>
+#include <vector>
+
+// name of the uniform used to handle features that are sensitive to whether Y is flipped.
+// TODO: find a better home for this constant
+#define SKSL_RTFLIP_NAME "u_skRTFlip"
+
+namespace SkSL {
+
+class Context;
+class FunctionDeclaration;
+class ModifiersPool;
+class Pool;
+class ProgramElement;
+class ProgramUsage;
+class SymbolTable;
+struct ProgramConfig;
+
+/** Represents a list the Uniforms contained within a Program. */
+struct UniformInfo {
+ struct Uniform {
+ std::string fName;
+ SkSL::Type::NumberKind fKind;
+ int fColumns;
+ int fRows;
+ int fSlot;
+ };
+ std::vector<Uniform> fUniforms;
+ int fUniformSlotCount = 0;
+};
+
+/**
+ * Represents a fully-digested program, ready for code generation.
+ */
+struct Program {
+ struct Inputs {
+ bool fUseFlipRTUniform = false;
+ bool operator==(const Inputs& that) const {
+ return fUseFlipRTUniform == that.fUseFlipRTUniform;
+ }
+ bool operator!=(const Inputs& that) const { return !(*this == that); }
+ };
+
+ Program(std::unique_ptr<std::string> source,
+ std::unique_ptr<ProgramConfig> config,
+ std::shared_ptr<Context> context,
+ std::vector<std::unique_ptr<ProgramElement>> elements,
+ std::vector<const ProgramElement*> sharedElements,
+ std::unique_ptr<ModifiersPool> modifiers,
+ std::shared_ptr<SymbolTable> symbols,
+ std::unique_ptr<Pool> pool,
+ Inputs inputs);
+
+ ~Program();
+
+ class ElementsCollection {
+ public:
+ class iterator {
+ public:
+ const ProgramElement* operator*() {
+ if (fShared != fSharedEnd) {
+ return *fShared;
+ } else {
+ return fOwned->get();
+ }
+ }
+
+ iterator& operator++() {
+ if (fShared != fSharedEnd) {
+ ++fShared;
+ } else {
+ ++fOwned;
+ }
+ return *this;
+ }
+
+ bool operator==(const iterator& other) const {
+ return fOwned == other.fOwned && fShared == other.fShared;
+ }
+
+ bool operator!=(const iterator& other) const {
+ return !(*this == other);
+ }
+
+ private:
+ using Owned = std::vector<std::unique_ptr<ProgramElement>>::const_iterator;
+ using Shared = std::vector<const ProgramElement*>::const_iterator;
+ friend class ElementsCollection;
+
+ iterator(Owned owned, Owned ownedEnd, Shared shared, Shared sharedEnd)
+ : fOwned(owned), fOwnedEnd(ownedEnd), fShared(shared), fSharedEnd(sharedEnd) {}
+
+ Owned fOwned;
+ Owned fOwnedEnd;
+ Shared fShared;
+ Shared fSharedEnd;
+ };
+
+ iterator begin() const {
+ return iterator(fProgram.fOwnedElements.begin(), fProgram.fOwnedElements.end(),
+ fProgram.fSharedElements.begin(), fProgram.fSharedElements.end());
+ }
+
+ iterator end() const {
+ return iterator(fProgram.fOwnedElements.end(), fProgram.fOwnedElements.end(),
+ fProgram.fSharedElements.end(), fProgram.fSharedElements.end());
+ }
+
+ private:
+ friend struct Program;
+
+ ElementsCollection(const Program& program) : fProgram(program) {}
+ const Program& fProgram;
+ };
+
+ /**
+ * Iterates over *all* elements in this Program, both owned and shared (builtin). The iterator's
+ * value type is `const ProgramElement*`, so it's clear that you *must not* modify anything (as
+ * you might be mutating shared data).
+ */
+ ElementsCollection elements() const { return ElementsCollection(*this); }
+
+ /**
+ * Returns a function declaration with the given name; null is returned if the function doesn't
+ * exist or has no definition. If the function might have overloads, you can use nextOverload()
+ * to search for the function with the expected parameter list.
+ */
+ const FunctionDeclaration* getFunction(const char* functionName) const;
+
+ /**
+ * Returns a list of uniforms used by this Program. The uniform list will exclude opaque types
+ * like textures, samplers, or child effects.
+ */
+ std::unique_ptr<UniformInfo> getUniformInfo();
+
+ std::string description() const;
+ const ProgramUsage* usage() const { return fUsage.get(); }
+
+ std::unique_ptr<std::string> fSource;
+ std::unique_ptr<ProgramConfig> fConfig;
+ std::shared_ptr<Context> fContext;
+ std::unique_ptr<ProgramUsage> fUsage;
+ std::unique_ptr<ModifiersPool> fModifiers;
+ // it's important to keep fOwnedElements defined after (and thus destroyed before) fSymbols,
+ // because destroying elements can modify reference counts in symbols
+ std::shared_ptr<SymbolTable> fSymbols;
+ std::unique_ptr<Pool> fPool;
+ // Contains *only* elements owned exclusively by this program.
+ std::vector<std::unique_ptr<ProgramElement>> fOwnedElements;
+ // Contains *only* elements owned by a built-in module that are included in this program.
+ // Use elements() to iterate over the combined set of owned + shared elements.
+ std::vector<const ProgramElement*> fSharedElements;
+ Inputs fInputs;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLReturnStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLReturnStatement.h
new file mode 100644
index 0000000000..3ee739fb79
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLReturnStatement.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_RETURNSTATEMENT
+#define SKSL_RETURNSTATEMENT
+
+#include "include/private/SkSLStatement.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+namespace SkSL {
+
+/**
+ * A 'return' statement.
+ */
+class ReturnStatement final : public Statement {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kReturn;
+
+ ReturnStatement(Position pos, std::unique_ptr<Expression> expression)
+ : INHERITED(pos, kIRNodeKind)
+ , fExpression(std::move(expression)) {}
+
+ static std::unique_ptr<Statement> Make(Position pos,
+ std::unique_ptr<Expression> expression) {
+ return std::make_unique<ReturnStatement>(pos, std::move(expression));
+ }
+
+ std::unique_ptr<Expression>& expression() {
+ return fExpression;
+ }
+
+ const std::unique_ptr<Expression>& expression() const {
+ return fExpression;
+ }
+
+ void setExpression(std::unique_ptr<Expression> expr) {
+ fExpression = std::move(expr);
+ }
+
+ std::unique_ptr<Statement> clone() const override {
+ return std::make_unique<ReturnStatement>(fPosition,
+ this->expression() ? this->expression()->clone() : nullptr);
+ }
+
+ std::string description() const override {
+ if (this->expression()) {
+ return "return " + this->expression()->description() + ";";
+ } else {
+ return "return;";
+ }
+ }
+
+private:
+ std::unique_ptr<Expression> fExpression;
+
+ using INHERITED = Statement;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLSetting.cpp b/gfx/skia/skia/src/sksl/ir/SkSLSetting.cpp
new file mode 100644
index 0000000000..303549bcf7
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLSetting.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLSetting.h"
+
+#include "include/core/SkTypes.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "src/core/SkTHash.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/SkSLUtil.h"
+#include "src/sksl/ir/SkSLLiteral.h"
+
+#include <initializer_list>
+
+namespace SkSL {
+
+namespace {
+
+using CapsLookupTable = SkTHashMap<std::string_view, Setting::CapsPtr>;
+
+static const CapsLookupTable& caps_lookup_table() {
+ // Create a lookup table that converts strings into the equivalent ShaderCaps member-pointers.
+ static CapsLookupTable* sCapsLookupTable = new CapsLookupTable({
+ CapsLookupTable::Pair("mustDoOpBetweenFloorAndAbs",
+ &ShaderCaps::fMustDoOpBetweenFloorAndAbs),
+ CapsLookupTable::Pair("mustGuardDivisionEvenAfterExplicitZeroCheck",
+ &ShaderCaps::fMustGuardDivisionEvenAfterExplicitZeroCheck),
+ CapsLookupTable::Pair("atan2ImplementedAsAtanYOverX",
+ &ShaderCaps::fAtan2ImplementedAsAtanYOverX),
+ CapsLookupTable::Pair("floatIs32Bits",
+ &ShaderCaps::fFloatIs32Bits),
+ CapsLookupTable::Pair("integerSupport",
+ &ShaderCaps::fIntegerSupport),
+ CapsLookupTable::Pair("builtinDeterminantSupport",
+ &ShaderCaps::fBuiltinDeterminantSupport),
+ CapsLookupTable::Pair("rewriteMatrixVectorMultiply",
+ &ShaderCaps::fRewriteMatrixVectorMultiply),
+ });
+ return *sCapsLookupTable;
+}
+
+} // namespace
+
+std::string_view Setting::name() const {
+ for (const auto& [name, capsPtr] : caps_lookup_table()) {
+ if (capsPtr == fCapsPtr) {
+ return name;
+ }
+ }
+ SkUNREACHABLE;
+}
+
+std::unique_ptr<Expression> Setting::Convert(const Context& context,
+ Position pos,
+ const std::string_view& name) {
+ SkASSERT(context.fConfig);
+
+ if (ProgramConfig::IsRuntimeEffect(context.fConfig->fKind)) {
+ context.fErrors->error(pos, "name 'sk_Caps' is reserved");
+ return nullptr;
+ }
+
+ const CapsPtr* capsPtr = caps_lookup_table().find(name);
+ if (!capsPtr) {
+ context.fErrors->error(pos, "unknown capability flag '" + std::string(name) + "'");
+ return nullptr;
+ }
+
+ return Setting::Make(context, pos, *capsPtr);
+}
+
+std::unique_ptr<Expression> Setting::Make(const Context& context, Position pos, CapsPtr capsPtr) {
+ if (context.fCaps) {
+ // We know the caps values--return a boolean literal.
+ return Literal::MakeBool(context, pos, context.fCaps->*capsPtr);
+ }
+
+ // We don't know the caps values yet--generate a Setting IRNode.
+ return std::make_unique<Setting>(pos, capsPtr, context.fTypes.fBool.get());
+}
+
+std::unique_ptr<Expression> Setting::toLiteral(const Context& context) const {
+ SkASSERT(context.fCaps);
+ return Literal::MakeBool(fPosition, context.fCaps->*fCapsPtr, &this->type());
+}
+
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLSetting.h b/gfx/skia/skia/src/sksl/ir/SkSLSetting.h
new file mode 100644
index 0000000000..6085744ad0
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLSetting.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_SETTING
+#define SKSL_SETTING
+
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/SkSLUtil.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <string_view>
+
+
+namespace SkSL {
+
+class Context;
+class Type;
+enum class OperatorPrecedence : uint8_t;
+
+/**
+ * Represents a compile-time constant setting, such as sk_Caps.integerSupport. These IRNodes are
+ * used when assembling a module. These nodes are replaced with the value of the setting during
+ * compilation when ShaderCaps are available.
+ */
+class Setting final : public Expression {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kSetting;
+
+ using CapsPtr = const bool ShaderCaps::*;
+
+ Setting(Position pos, CapsPtr capsPtr, const Type* type)
+ : INHERITED(pos, kIRNodeKind, type)
+ , fCapsPtr(capsPtr) {}
+
+ // Creates the current value of the associated caps bit as a Literal if ShaderCaps are
+ // available, or a Setting IRNode when ShaderCaps are not known. Reports errors via the
+ // ErrorReporter.
+ static std::unique_ptr<Expression> Convert(const Context& context,
+ Position pos,
+ const std::string_view& name);
+
+ // Creates the current value of the passed-in caps bit as a Literal if ShaderCaps are
+ // available, or a Setting IRNode when ShaderCaps are not known.
+ static std::unique_ptr<Expression> Make(const Context& context, Position pos, CapsPtr capsPtr);
+
+ // Converts a Setting expression to its actual ShaderCaps value (boolean true/false).
+ std::unique_ptr<Expression> toLiteral(const Context& context) const;
+
+ std::unique_ptr<Expression> clone(Position pos) const override {
+ return std::make_unique<Setting>(pos, fCapsPtr, &this->type());
+ }
+
+ std::string_view name() const;
+
+ std::string description(OperatorPrecedence) const override {
+ return "sk_Caps." + std::string(this->name());
+ }
+
+private:
+ CapsPtr fCapsPtr;
+
+ using INHERITED = Expression;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLStructDefinition.h b/gfx/skia/skia/src/sksl/ir/SkSLStructDefinition.h
new file mode 100644
index 0000000000..4b6b5b81d7
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLStructDefinition.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_STRUCTDEFINITION
+#define SKSL_STRUCTDEFINITION
+
+#include <memory>
+
+#include "include/private/SkSLProgramElement.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLType.h"
+
+namespace SkSL {
+
+/**
+ * A struct at global scope, as in:
+ *
+ * struct RenderData {
+ * float3 color;
+ * bool highQuality;
+ * };
+ */
+class StructDefinition final : public ProgramElement {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kStructDefinition;
+
+ StructDefinition(Position pos, const Type& type)
+ : INHERITED(pos, kIRNodeKind)
+ , fType(&type) {}
+
+ const Type& type() const {
+ return *fType;
+ }
+
+ std::unique_ptr<ProgramElement> clone() const override {
+ return std::make_unique<StructDefinition>(fPosition, this->type());
+ }
+
+ std::string description() const override {
+ std::string s = "struct ";
+ s += this->type().name();
+ s += " { ";
+ for (const auto& f : this->type().fields()) {
+ s += f.fModifiers.description();
+ s += f.fType->description();
+ s += " ";
+ s += f.fName;
+ s += "; ";
+ }
+ s += "};";
+ return s;
+ }
+
+private:
+ const Type* fType = nullptr;
+
+ using INHERITED = ProgramElement;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLSwitchCase.h b/gfx/skia/skia/src/sksl/ir/SkSLSwitchCase.h
new file mode 100644
index 0000000000..206693ceaf
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLSwitchCase.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_SWITCHCASE
+#define SKSL_SWITCHCASE
+
+#include "include/private/SkSLStatement.h"
+#include "include/private/SkSLString.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <cinttypes>
+
+namespace SkSL {
+
+/**
+ * A single case of a 'switch' statement.
+ */
+class SwitchCase final : public Statement {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kSwitchCase;
+
+ static std::unique_ptr<SwitchCase> Make(Position pos, SKSL_INT value,
+ std::unique_ptr<Statement> statement) {
+ return std::unique_ptr<SwitchCase>(new SwitchCase(pos, /*isDefault=*/false, value,
+ std::move(statement)));
+ }
+
+ static std::unique_ptr<SwitchCase> MakeDefault(Position pos,
+ std::unique_ptr<Statement> statement) {
+ return std::unique_ptr<SwitchCase>(new SwitchCase(pos, /*isDefault=*/true, -1,
+ std::move(statement)));
+ }
+
+ bool isDefault() const {
+ return fDefault;
+ }
+
+ SKSL_INT value() const {
+ SkASSERT(!this->isDefault());
+ return fValue;
+ }
+
+ std::unique_ptr<Statement>& statement() {
+ return fStatement;
+ }
+
+ const std::unique_ptr<Statement>& statement() const {
+ return fStatement;
+ }
+
+ std::unique_ptr<Statement> clone() const override {
+ return fDefault ? SwitchCase::MakeDefault(fPosition, this->statement()->clone())
+ : SwitchCase::Make(fPosition, this->value(), this->statement()->clone());
+ }
+
+ std::string description() const override {
+ if (this->isDefault()) {
+ return String::printf("default:\n%s", fStatement->description().c_str());
+ } else {
+ return String::printf("case %" PRId64 ":\n%s",
+ (int64_t) this->value(),
+ fStatement->description().c_str());
+ }
+ }
+
+private:
+ SwitchCase(Position pos, bool isDefault, SKSL_INT value, std::unique_ptr<Statement> statement)
+ : INHERITED(pos, kIRNodeKind)
+ , fDefault(isDefault)
+ , fValue(std::move(value))
+ , fStatement(std::move(statement)) {}
+
+ bool fDefault;
+ SKSL_INT fValue;
+ std::unique_ptr<Statement> fStatement;
+
+ using INHERITED = Statement;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLSwitchStatement.cpp b/gfx/skia/skia/src/sksl/ir/SkSLSwitchStatement.cpp
new file mode 100644
index 0000000000..f0bb7d05ce
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLSwitchStatement.cpp
@@ -0,0 +1,275 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLSwitchStatement.h"
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLString.h"
+#include "include/private/base/SkTArray.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "src/core/SkTHash.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLConstantFolder.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLNop.h"
+#include "src/sksl/ir/SkSLSwitchCase.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <algorithm>
+#include <forward_list>
+#include <iterator>
+
+namespace SkSL {
+
+std::unique_ptr<Statement> SwitchStatement::clone() const {
+ StatementArray cases;
+ cases.reserve_back(this->cases().size());
+ for (const std::unique_ptr<Statement>& stmt : this->cases()) {
+ cases.push_back(stmt->clone());
+ }
+ return std::make_unique<SwitchStatement>(fPosition,
+ this->value()->clone(),
+ std::move(cases),
+ SymbolTable::WrapIfBuiltin(this->symbols()));
+}
+
+std::string SwitchStatement::description() const {
+ std::string result;
+ result += String::printf("switch (%s) {\n", this->value()->description().c_str());
+ for (const auto& c : this->cases()) {
+ result += c->description();
+ }
+ result += "}";
+ return result;
+}
+
+static std::forward_list<const SwitchCase*> find_duplicate_case_values(
+ const StatementArray& cases) {
+ std::forward_list<const SwitchCase*> duplicateCases;
+ SkTHashSet<SKSL_INT> intValues;
+ bool foundDefault = false;
+
+ for (const std::unique_ptr<Statement>& stmt : cases) {
+ const SwitchCase* sc = &stmt->as<SwitchCase>();
+ if (sc->isDefault()) {
+ if (foundDefault) {
+ duplicateCases.push_front(sc);
+ continue;
+ }
+ foundDefault = true;
+ } else {
+ SKSL_INT value = sc->value();
+ if (intValues.contains(value)) {
+ duplicateCases.push_front(sc);
+ continue;
+ }
+ intValues.add(value);
+ }
+ }
+
+ return duplicateCases;
+}
+
+static void move_all_but_break(std::unique_ptr<Statement>& stmt, StatementArray* target) {
+ switch (stmt->kind()) {
+ case Statement::Kind::kBlock: {
+ // Recurse into the block.
+ Block& block = stmt->as<Block>();
+
+ StatementArray blockStmts;
+ blockStmts.reserve_back(block.children().size());
+ for (std::unique_ptr<Statement>& blockStmt : block.children()) {
+ move_all_but_break(blockStmt, &blockStmts);
+ }
+
+ target->push_back(Block::Make(block.fPosition, std::move(blockStmts), block.blockKind(),
+ block.symbolTable()));
+ break;
+ }
+
+ case Statement::Kind::kBreak:
+ // Do not append a break to the target.
+ break;
+
+ default:
+ // Append normal statements to the target.
+ target->push_back(std::move(stmt));
+ break;
+ }
+}
+
+std::unique_ptr<Statement> SwitchStatement::BlockForCase(StatementArray* cases,
+ SwitchCase* caseToCapture,
+ std::shared_ptr<SymbolTable> symbolTable) {
+ // We have to be careful to not move any of the pointers until after we're sure we're going to
+ // succeed, so before we make any changes at all, we check the switch-cases to decide on a plan
+ // of action. First, find the switch-case we are interested in.
+ auto iter = cases->begin();
+ for (; iter != cases->end(); ++iter) {
+ const SwitchCase& sc = (*iter)->as<SwitchCase>();
+ if (&sc == caseToCapture) {
+ break;
+ }
+ }
+
+ // Next, walk forward through the rest of the switch. If we find a conditional break, we're
+ // stuck and can't simplify at all. If we find an unconditional break, we have a range of
+ // statements that we can use for simplification.
+ auto startIter = iter;
+ Statement* stripBreakStmt = nullptr;
+ for (; iter != cases->end(); ++iter) {
+ std::unique_ptr<Statement>& stmt = (*iter)->as<SwitchCase>().statement();
+ if (Analysis::SwitchCaseContainsConditionalExit(*stmt)) {
+ // We can't reduce switch-cases to a block when they have conditional exits.
+ return nullptr;
+ }
+ if (Analysis::SwitchCaseContainsUnconditionalExit(*stmt)) {
+ // We found an unconditional exit. We can use this block, but we'll need to strip
+ // out the break statement if there is one.
+ stripBreakStmt = stmt.get();
+ break;
+ }
+ }
+
+ // We fell off the bottom of the switch or encountered a break. We know the range of statements
+ // that we need to move over, and we know it's safe to do so.
+ StatementArray caseStmts;
+ caseStmts.reserve_back(std::distance(startIter, iter) + 1);
+
+ // We can move over most of the statements as-is.
+ while (startIter != iter) {
+ caseStmts.push_back(std::move((*startIter)->as<SwitchCase>().statement()));
+ ++startIter;
+ }
+
+ // If we found an unconditional break at the end, we need to move what we can while avoiding
+ // that break.
+ if (stripBreakStmt != nullptr) {
+ SkASSERT((*startIter)->as<SwitchCase>().statement().get() == stripBreakStmt);
+ move_all_but_break((*startIter)->as<SwitchCase>().statement(), &caseStmts);
+ }
+
+ // Return our newly-synthesized block.
+ return Block::Make(caseToCapture->fPosition, std::move(caseStmts), Block::Kind::kBracedScope,
+ std::move(symbolTable));
+}
+
+std::unique_ptr<Statement> SwitchStatement::Convert(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> value,
+ ExpressionArray caseValues,
+ StatementArray caseStatements,
+ std::shared_ptr<SymbolTable> symbolTable) {
+ SkASSERT(caseValues.size() == caseStatements.size());
+
+ value = context.fTypes.fInt->coerceExpression(std::move(value), context);
+ if (!value) {
+ return nullptr;
+ }
+
+ StatementArray cases;
+ for (int i = 0; i < caseValues.size(); ++i) {
+ if (caseValues[i]) {
+ Position casePos = caseValues[i]->fPosition;
+ // Case values must be constant integers of the same type as the switch value
+ std::unique_ptr<Expression> caseValue = value->type().coerceExpression(
+ std::move(caseValues[i]), context);
+ if (!caseValue) {
+ return nullptr;
+ }
+ SKSL_INT intValue;
+ if (!ConstantFolder::GetConstantInt(*caseValue, &intValue)) {
+ context.fErrors->error(casePos, "case value must be a constant integer");
+ return nullptr;
+ }
+ cases.push_back(SwitchCase::Make(casePos, intValue, std::move(caseStatements[i])));
+ } else {
+ cases.push_back(SwitchCase::MakeDefault(pos, std::move(caseStatements[i])));
+ }
+ }
+
+ // Detect duplicate `case` labels and report an error.
+ // (Using forward_list here to optimize for the common case of no results.)
+ std::forward_list<const SwitchCase*> duplicateCases = find_duplicate_case_values(cases);
+ if (!duplicateCases.empty()) {
+ duplicateCases.reverse();
+ for (const SwitchCase* sc : duplicateCases) {
+ if (sc->isDefault()) {
+ context.fErrors->error(sc->fPosition, "duplicate default case");
+ } else {
+ context.fErrors->error(sc->fPosition, "duplicate case value '" +
+ std::to_string(sc->value()) + "'");
+ }
+ }
+ return nullptr;
+ }
+
+ return SwitchStatement::Make(
+ context, pos, std::move(value), std::move(cases), std::move(symbolTable));
+}
+
+std::unique_ptr<Statement> SwitchStatement::Make(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> value,
+ StatementArray cases,
+ std::shared_ptr<SymbolTable> symbolTable) {
+ // Confirm that every statement in `cases` is a SwitchCase.
+ SkASSERT(std::all_of(cases.begin(), cases.end(), [&](const std::unique_ptr<Statement>& stmt) {
+ return stmt->is<SwitchCase>();
+ }));
+
+ // Confirm that every switch-case value is unique.
+ SkASSERT(find_duplicate_case_values(cases).empty());
+
+ // Flatten switch statements if we're optimizing, and the value is known
+ if (context.fConfig->fSettings.fOptimize) {
+ SKSL_INT switchValue;
+ if (ConstantFolder::GetConstantInt(*value, &switchValue)) {
+ SwitchCase* defaultCase = nullptr;
+ SwitchCase* matchingCase = nullptr;
+ for (const std::unique_ptr<Statement>& stmt : cases) {
+ SwitchCase& sc = stmt->as<SwitchCase>();
+ if (sc.isDefault()) {
+ defaultCase = &sc;
+ continue;
+ }
+
+ if (sc.value() == switchValue) {
+ matchingCase = &sc;
+ break;
+ }
+ }
+
+ if (!matchingCase) {
+ // No case value matches the switch value.
+ if (!defaultCase) {
+ // No default switch-case exists; the switch had no effect.
+ // We can eliminate the entire switch!
+ return Nop::Make();
+ }
+ // We had a default case; that's what we matched with.
+ matchingCase = defaultCase;
+ }
+
+ // Convert the switch-case that we matched with into a block.
+ std::unique_ptr<Statement> newBlock = BlockForCase(&cases, matchingCase, symbolTable);
+ if (newBlock) {
+ return newBlock;
+ }
+ }
+ }
+
+ // The switch couldn't be optimized away; emit it normally.
+ return std::make_unique<SwitchStatement>(
+ pos, std::move(value), std::move(cases), std::move(symbolTable));
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLSwitchStatement.h b/gfx/skia/skia/src/sksl/ir/SkSLSwitchStatement.h
new file mode 100644
index 0000000000..71b96aa229
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLSwitchStatement.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_SWITCHSTATEMENT
+#define SKSL_SWITCHSTATEMENT
+
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLStatement.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <memory>
+#include <string>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+class SwitchCase;
+class SymbolTable;
+
+/**
+ * A 'switch' statement.
+ */
+class SwitchStatement final : public Statement {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kSwitch;
+
+ SwitchStatement(Position pos, std::unique_ptr<Expression> value,
+ StatementArray cases, std::shared_ptr<SymbolTable> symbols)
+ : INHERITED(pos, kIRNodeKind)
+ , fValue(std::move(value))
+ , fCases(std::move(cases))
+ , fSymbols(std::move(symbols)) {}
+
+ // Create a `switch` statement with an array of case-values and case-statements.
+ // Coerces case values to the proper type and reports an error if cases are duplicated.
+ // Reports errors via the ErrorReporter.
+ static std::unique_ptr<Statement> Convert(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> value,
+ ExpressionArray caseValues,
+ StatementArray caseStatements,
+ std::shared_ptr<SymbolTable> symbolTable);
+
+ // Create a `switch` statement with an array of SwitchCases. The array of SwitchCases must
+ // already contain non-overlapping, correctly-typed case values. Reports errors via ASSERT.
+ static std::unique_ptr<Statement> Make(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> value,
+ StatementArray cases,
+ std::shared_ptr<SymbolTable> symbolTable);
+
+ // Returns a block containing all of the statements that will be run if the given case matches
+ // (which, owing to the statements being owned by unique_ptrs, means the switch itself will be
+ // disassembled by this call and must then be discarded).
+ // Returns null (and leaves the switch unmodified) if no such simple reduction is possible, such
+ // as when break statements appear inside conditionals.
+ static std::unique_ptr<Statement> BlockForCase(StatementArray* cases,
+ SwitchCase* caseToCapture,
+ std::shared_ptr<SymbolTable> symbolTable);
+
+ std::unique_ptr<Expression>& value() {
+ return fValue;
+ }
+
+ const std::unique_ptr<Expression>& value() const {
+ return fValue;
+ }
+
+ StatementArray& cases() {
+ return fCases;
+ }
+
+ const StatementArray& cases() const {
+ return fCases;
+ }
+
+ const std::shared_ptr<SymbolTable>& symbols() const {
+ return fSymbols;
+ }
+
+ std::unique_ptr<Statement> clone() const override;
+
+ std::string description() const override;
+
+private:
+ std::unique_ptr<Expression> fValue;
+ StatementArray fCases; // every Statement inside fCases must be a SwitchCase
+ std::shared_ptr<SymbolTable> fSymbols;
+
+ using INHERITED = Statement;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLSwizzle.cpp b/gfx/skia/skia/src/sksl/ir/SkSLSwizzle.cpp
new file mode 100644
index 0000000000..a19ff0274e
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLSwizzle.cpp
@@ -0,0 +1,548 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLSwizzle.h"
+
+#include "include/core/SkSpan.h"
+#include "include/private/SkSLString.h"
+#include "include/private/base/SkTArray.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLOperator.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLConstantFolder.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/ir/SkSLConstructorCompound.h"
+#include "src/sksl/ir/SkSLConstructorCompoundCast.h"
+#include "src/sksl/ir/SkSLConstructorScalarCast.h"
+#include "src/sksl/ir/SkSLConstructorSplat.h"
+#include "src/sksl/ir/SkSLLiteral.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <optional>
+
+namespace SkSL {
+
+static bool validate_swizzle_domain(const ComponentArray& fields) {
+ enum SwizzleDomain {
+ kCoordinate,
+ kColor,
+ kUV,
+ kRectangle,
+ };
+
+ std::optional<SwizzleDomain> domain;
+
+ for (int8_t field : fields) {
+ SwizzleDomain fieldDomain;
+ switch (field) {
+ case SwizzleComponent::X:
+ case SwizzleComponent::Y:
+ case SwizzleComponent::Z:
+ case SwizzleComponent::W:
+ fieldDomain = kCoordinate;
+ break;
+ case SwizzleComponent::R:
+ case SwizzleComponent::G:
+ case SwizzleComponent::B:
+ case SwizzleComponent::A:
+ fieldDomain = kColor;
+ break;
+ case SwizzleComponent::S:
+ case SwizzleComponent::T:
+ case SwizzleComponent::P:
+ case SwizzleComponent::Q:
+ fieldDomain = kUV;
+ break;
+ case SwizzleComponent::UL:
+ case SwizzleComponent::UT:
+ case SwizzleComponent::UR:
+ case SwizzleComponent::UB:
+ fieldDomain = kRectangle;
+ break;
+ case SwizzleComponent::ZERO:
+ case SwizzleComponent::ONE:
+ continue;
+ default:
+ return false;
+ }
+
+ if (!domain.has_value()) {
+ domain = fieldDomain;
+ } else if (domain != fieldDomain) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static char mask_char(int8_t component) {
+ switch (component) {
+ case SwizzleComponent::X: return 'x';
+ case SwizzleComponent::Y: return 'y';
+ case SwizzleComponent::Z: return 'z';
+ case SwizzleComponent::W: return 'w';
+ case SwizzleComponent::R: return 'r';
+ case SwizzleComponent::G: return 'g';
+ case SwizzleComponent::B: return 'b';
+ case SwizzleComponent::A: return 'a';
+ case SwizzleComponent::S: return 's';
+ case SwizzleComponent::T: return 't';
+ case SwizzleComponent::P: return 'p';
+ case SwizzleComponent::Q: return 'q';
+ case SwizzleComponent::UL: return 'L';
+ case SwizzleComponent::UT: return 'T';
+ case SwizzleComponent::UR: return 'R';
+ case SwizzleComponent::UB: return 'B';
+ case SwizzleComponent::ZERO: return '0';
+ case SwizzleComponent::ONE: return '1';
+ default: SkUNREACHABLE;
+ }
+}
+
+static std::string mask_string(const ComponentArray& components) {
+ std::string result;
+ for (int8_t component : components) {
+ result += mask_char(component);
+ }
+ return result;
+}
+
+static std::unique_ptr<Expression> optimize_constructor_swizzle(const Context& context,
+ Position pos,
+ const ConstructorCompound& base,
+ ComponentArray components) {
+ auto baseArguments = base.argumentSpan();
+ std::unique_ptr<Expression> replacement;
+ const Type& exprType = base.type();
+ const Type& componentType = exprType.componentType();
+ int swizzleSize = components.size();
+
+ // Swizzles can duplicate some elements and discard others, e.g.
+ // `half4(1, 2, 3, 4).xxz` --> `half3(1, 1, 3)`. However, there are constraints:
+ // - Expressions with side effects need to occur exactly once, even if they would otherwise be
+ // swizzle-eliminated
+ // - Non-trivial expressions should not be repeated, but elimination is OK.
+ //
+ // Look up the argument for the constructor at each index. This is typically simple but for
+ // weird cases like `half4(bar.yz, half2(foo))`, it can be harder than it seems. This example
+ // would result in:
+ // argMap[0] = {.fArgIndex = 0, .fComponent = 0} (bar.yz .x)
+ // argMap[1] = {.fArgIndex = 0, .fComponent = 1} (bar.yz .y)
+ // argMap[2] = {.fArgIndex = 1, .fComponent = 0} (half2(foo) .x)
+ // argMap[3] = {.fArgIndex = 1, .fComponent = 1} (half2(foo) .y)
+ struct ConstructorArgMap {
+ int8_t fArgIndex;
+ int8_t fComponent;
+ };
+
+ int numConstructorArgs = base.type().columns();
+ ConstructorArgMap argMap[4] = {};
+ int writeIdx = 0;
+ for (int argIdx = 0; argIdx < (int)baseArguments.size(); ++argIdx) {
+ const Expression& arg = *baseArguments[argIdx];
+ const Type& argType = arg.type();
+
+ if (!argType.isScalar() && !argType.isVector()) {
+ return nullptr;
+ }
+
+ int argSlots = argType.slotCount();
+ for (int componentIdx = 0; componentIdx < argSlots; ++componentIdx) {
+ argMap[writeIdx].fArgIndex = argIdx;
+ argMap[writeIdx].fComponent = componentIdx;
+ ++writeIdx;
+ }
+ }
+ SkASSERT(writeIdx == numConstructorArgs);
+
+ // Count up the number of times each constructor argument is used by the swizzle.
+ // `half4(bar.yz, half2(foo)).xwxy` -> { 3, 1 }
+ // - bar.yz is referenced 3 times, by `.x_xy`
+ // - half(foo) is referenced 1 time, by `._w__`
+ int8_t exprUsed[4] = {};
+ for (int8_t c : components) {
+ exprUsed[argMap[c].fArgIndex]++;
+ }
+
+ for (int index = 0; index < numConstructorArgs; ++index) {
+ int8_t constructorArgIndex = argMap[index].fArgIndex;
+ const Expression& baseArg = *baseArguments[constructorArgIndex];
+
+ // Check that non-trivial expressions are not swizzled in more than once.
+ if (exprUsed[constructorArgIndex] > 1 && !Analysis::IsTrivialExpression(baseArg)) {
+ return nullptr;
+ }
+ // Check that side-effect-bearing expressions are swizzled in exactly once.
+ if (exprUsed[constructorArgIndex] != 1 && Analysis::HasSideEffects(baseArg)) {
+ return nullptr;
+ }
+ }
+
+ struct ReorderedArgument {
+ int8_t fArgIndex;
+ ComponentArray fComponents;
+ };
+ SkSTArray<4, ReorderedArgument> reorderedArgs;
+ for (int8_t c : components) {
+ const ConstructorArgMap& argument = argMap[c];
+ const Expression& baseArg = *baseArguments[argument.fArgIndex];
+
+ if (baseArg.type().isScalar()) {
+ // This argument is a scalar; add it to the list as-is.
+ SkASSERT(argument.fComponent == 0);
+ reorderedArgs.push_back({argument.fArgIndex,
+ ComponentArray{}});
+ } else {
+ // This argument is a component from a vector.
+ SkASSERT(baseArg.type().isVector());
+ SkASSERT(argument.fComponent < baseArg.type().columns());
+ if (reorderedArgs.empty() ||
+ reorderedArgs.back().fArgIndex != argument.fArgIndex) {
+ // This can't be combined with the previous argument. Add a new one.
+ reorderedArgs.push_back({argument.fArgIndex,
+ ComponentArray{argument.fComponent}});
+ } else {
+ // Since we know this argument uses components, it should already have at least one
+ // component set.
+ SkASSERT(!reorderedArgs.back().fComponents.empty());
+ // Build up the current argument with one more component.
+ reorderedArgs.back().fComponents.push_back(argument.fComponent);
+ }
+ }
+ }
+
+ // Convert our reordered argument list to an actual array of expressions, with the new order and
+ // any new inner swizzles that need to be applied.
+ ExpressionArray newArgs;
+ newArgs.reserve_back(swizzleSize);
+ for (const ReorderedArgument& reorderedArg : reorderedArgs) {
+ std::unique_ptr<Expression> newArg =
+ baseArguments[reorderedArg.fArgIndex]->clone();
+
+ if (reorderedArg.fComponents.empty()) {
+ newArgs.push_back(std::move(newArg));
+ } else {
+ newArgs.push_back(Swizzle::Make(context, pos, std::move(newArg),
+ reorderedArg.fComponents));
+ }
+ }
+
+ // Wrap the new argument list in a compound constructor.
+ return ConstructorCompound::Make(context,
+ pos,
+ componentType.toCompound(context, swizzleSize, /*rows=*/1),
+ std::move(newArgs));
+}
+
+std::unique_ptr<Expression> Swizzle::Convert(const Context& context,
+ Position pos,
+ Position maskPos,
+ std::unique_ptr<Expression> base,
+ std::string_view maskString) {
+ ComponentArray components;
+ for (size_t i = 0; i < maskString.length(); ++i) {
+ char field = maskString[i];
+ switch (field) {
+ case '0': components.push_back(SwizzleComponent::ZERO); break;
+ case '1': components.push_back(SwizzleComponent::ONE); break;
+ case 'x': components.push_back(SwizzleComponent::X); break;
+ case 'r': components.push_back(SwizzleComponent::R); break;
+ case 's': components.push_back(SwizzleComponent::S); break;
+ case 'L': components.push_back(SwizzleComponent::UL); break;
+ case 'y': components.push_back(SwizzleComponent::Y); break;
+ case 'g': components.push_back(SwizzleComponent::G); break;
+ case 't': components.push_back(SwizzleComponent::T); break;
+ case 'T': components.push_back(SwizzleComponent::UT); break;
+ case 'z': components.push_back(SwizzleComponent::Z); break;
+ case 'b': components.push_back(SwizzleComponent::B); break;
+ case 'p': components.push_back(SwizzleComponent::P); break;
+ case 'R': components.push_back(SwizzleComponent::UR); break;
+ case 'w': components.push_back(SwizzleComponent::W); break;
+ case 'a': components.push_back(SwizzleComponent::A); break;
+ case 'q': components.push_back(SwizzleComponent::Q); break;
+ case 'B': components.push_back(SwizzleComponent::UB); break;
+ default:
+ context.fErrors->error(Position::Range(maskPos.startOffset() + i,
+ maskPos.startOffset() + i + 1),
+ String::printf("invalid swizzle component '%c'", field));
+ return nullptr;
+ }
+ }
+ return Convert(context, pos, maskPos, std::move(base), std::move(components));
+}
+
+// Swizzles are complicated due to constant components. The most difficult case is a mask like
+// '.x1w0'. A naive approach might turn that into 'float4(base.x, 1, base.w, 0)', but that evaluates
+// 'base' twice. We instead group the swizzle mask ('xw') and constants ('1, 0') together and use a
+// secondary swizzle to put them back into the right order, so in this case we end up with
+// 'float4(base.xw, 1, 0).xzyw'.
+std::unique_ptr<Expression> Swizzle::Convert(const Context& context,
+ Position pos,
+ Position rawMaskPos,
+ std::unique_ptr<Expression> base,
+ ComponentArray inComponents) {
+ Position maskPos = rawMaskPos.valid() ? rawMaskPos : pos;
+ if (!validate_swizzle_domain(inComponents)) {
+ context.fErrors->error(maskPos, "invalid swizzle mask '" + mask_string(inComponents) + "'");
+ return nullptr;
+ }
+
+ const Type& baseType = base->type().scalarTypeForLiteral();
+
+ if (!baseType.isVector() && !baseType.isScalar()) {
+ context.fErrors->error(
+ pos, "cannot swizzle value of type '" + baseType.displayName() + "'");
+ return nullptr;
+ }
+
+ if (inComponents.size() > 4) {
+ Position errorPos = rawMaskPos.valid() ? Position::Range(maskPos.startOffset() + 4,
+ maskPos.endOffset())
+ : pos;
+ context.fErrors->error(errorPos,
+ "too many components in swizzle mask '" + mask_string(inComponents) + "'");
+ return nullptr;
+ }
+
+ ComponentArray maskComponents;
+ bool foundXYZW = false;
+ for (int i = 0; i < inComponents.size(); ++i) {
+ switch (inComponents[i]) {
+ case SwizzleComponent::ZERO:
+ case SwizzleComponent::ONE:
+ // Skip over constant fields for now.
+ break;
+ case SwizzleComponent::X:
+ case SwizzleComponent::R:
+ case SwizzleComponent::S:
+ case SwizzleComponent::UL:
+ foundXYZW = true;
+ maskComponents.push_back(SwizzleComponent::X);
+ break;
+ case SwizzleComponent::Y:
+ case SwizzleComponent::G:
+ case SwizzleComponent::T:
+ case SwizzleComponent::UT:
+ foundXYZW = true;
+ if (baseType.columns() >= 2) {
+ maskComponents.push_back(SwizzleComponent::Y);
+ break;
+ }
+ [[fallthrough]];
+ case SwizzleComponent::Z:
+ case SwizzleComponent::B:
+ case SwizzleComponent::P:
+ case SwizzleComponent::UR:
+ foundXYZW = true;
+ if (baseType.columns() >= 3) {
+ maskComponents.push_back(SwizzleComponent::Z);
+ break;
+ }
+ [[fallthrough]];
+ case SwizzleComponent::W:
+ case SwizzleComponent::A:
+ case SwizzleComponent::Q:
+ case SwizzleComponent::UB:
+ foundXYZW = true;
+ if (baseType.columns() >= 4) {
+ maskComponents.push_back(SwizzleComponent::W);
+ break;
+ }
+ [[fallthrough]];
+ default:
+ // The swizzle component references a field that doesn't exist in the base type.
+ context.fErrors->error(
+ Position::Range(maskPos.startOffset() + i,
+ maskPos.startOffset() + i + 1),
+ String::printf("invalid swizzle component '%c'",
+ mask_char(inComponents[i])));
+ return nullptr;
+ }
+ }
+
+ if (!foundXYZW) {
+ context.fErrors->error(maskPos, "swizzle must refer to base expression");
+ return nullptr;
+ }
+
+ // Coerce literals in expressions such as `(12345).xxx` to their actual type.
+ base = baseType.coerceExpression(std::move(base), context);
+ if (!base) {
+ return nullptr;
+ }
+
+ // First, we need a vector expression that is the non-constant portion of the swizzle, packed:
+ // scalar.xxx -> type3(scalar)
+ // scalar.x0x0 -> type2(scalar)
+ // vector.zyx -> vector.zyx
+ // vector.x0y0 -> vector.xy
+ std::unique_ptr<Expression> expr = Swizzle::Make(context, pos, std::move(base), maskComponents);
+
+ // If we have processed the entire swizzle, we're done.
+ if (maskComponents.size() == inComponents.size()) {
+ return expr;
+ }
+
+ // Now we create a constructor that has the correct number of elements for the final swizzle,
+ // with all fields at the start. It's not finished yet; constants we need will be added below.
+ // scalar.x0x0 -> type4(type2(x), ...)
+ // vector.y111 -> type4(vector.y, ...)
+ // vector.z10x -> type4(vector.zx, ...)
+ //
+ // The constructor will have at most three arguments: { base expr, constant 0, constant 1 }
+ ExpressionArray constructorArgs;
+ constructorArgs.reserve_back(3);
+ constructorArgs.push_back(std::move(expr));
+
+ // Apply another swizzle to shuffle the constants into the correct place. Any constant values we
+ // need are also tacked on to the end of the constructor.
+ // scalar.x0x0 -> type4(type2(x), 0).xyxy
+ // vector.y111 -> type2(vector.y, 1).xyyy
+ // vector.z10x -> type4(vector.zx, 1, 0).xzwy
+ const Type* scalarType = &baseType.componentType();
+ ComponentArray swizzleComponents;
+ int maskFieldIdx = 0;
+ int constantFieldIdx = maskComponents.size();
+ int constantZeroIdx = -1, constantOneIdx = -1;
+
+ for (int i = 0; i < inComponents.size(); i++) {
+ switch (inComponents[i]) {
+ case SwizzleComponent::ZERO:
+ if (constantZeroIdx == -1) {
+ // Synthesize a '0' argument at the end of the constructor.
+ constructorArgs.push_back(Literal::Make(pos, /*value=*/0, scalarType));
+ constantZeroIdx = constantFieldIdx++;
+ }
+ swizzleComponents.push_back(constantZeroIdx);
+ break;
+ case SwizzleComponent::ONE:
+ if (constantOneIdx == -1) {
+ // Synthesize a '1' argument at the end of the constructor.
+ constructorArgs.push_back(Literal::Make(pos, /*value=*/1, scalarType));
+ constantOneIdx = constantFieldIdx++;
+ }
+ swizzleComponents.push_back(constantOneIdx);
+ break;
+ default:
+ // The non-constant fields are already in the expected order.
+ swizzleComponents.push_back(maskFieldIdx++);
+ break;
+ }
+ }
+
+ expr = ConstructorCompound::Make(context, pos,
+ scalarType->toCompound(context, constantFieldIdx, /*rows=*/1),
+ std::move(constructorArgs));
+
+ // Create (and potentially optimize-away) the resulting swizzle-expression.
+ return Swizzle::Make(context, pos, std::move(expr), swizzleComponents);
+}
+
+std::unique_ptr<Expression> Swizzle::Make(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> expr,
+ ComponentArray components) {
+ const Type& exprType = expr->type();
+ SkASSERTF(exprType.isVector() || exprType.isScalar(),
+ "cannot swizzle type '%s'", exprType.description().c_str());
+ SkASSERT(components.size() >= 1 && components.size() <= 4);
+
+ // Confirm that the component array only contains X/Y/Z/W. (Call MakeWith01 if you want support
+ // for ZERO and ONE. Once initial IR generation is complete, no swizzles should have zeros or
+ // ones in them.)
+ SkASSERT(std::all_of(components.begin(), components.end(), [](int8_t component) {
+ return component >= SwizzleComponent::X &&
+ component <= SwizzleComponent::W;
+ }));
+
+ // SkSL supports splatting a scalar via `scalar.xxxx`, but not all versions of GLSL allow this.
+ // Replace swizzles with equivalent splat constructors (`scalar.xxx` --> `half3(value)`).
+ if (exprType.isScalar()) {
+ return ConstructorSplat::Make(context, pos,
+ exprType.toCompound(context, components.size(), /*rows=*/1),
+ std::move(expr));
+ }
+
+ // Detect identity swizzles like `color.rgba` and optimize it away.
+ if (components.size() == exprType.columns()) {
+ bool identity = true;
+ for (int i = 0; i < components.size(); ++i) {
+ if (components[i] != i) {
+ identity = false;
+ break;
+ }
+ }
+ if (identity) {
+ expr->fPosition = pos;
+ return expr;
+ }
+ }
+
+ // Optimize swizzles of swizzles, e.g. replace `foo.argb.rggg` with `foo.arrr`.
+ if (expr->is<Swizzle>()) {
+ Swizzle& base = expr->as<Swizzle>();
+ ComponentArray combined;
+ for (int8_t c : components) {
+ combined.push_back(base.components()[c]);
+ }
+
+ // It may actually be possible to further simplify this swizzle. Go again.
+ // (e.g. `color.abgr.abgr` --> `color.rgba` --> `color`.)
+ return Swizzle::Make(context, pos, std::move(base.base()), combined);
+ }
+
+ // If we are swizzling a constant expression, we can use its value instead here (so that
+ // swizzles like `colorWhite.x` can be simplified to `1`).
+ const Expression* value = ConstantFolder::GetConstantValueForVariable(*expr);
+
+ // `half4(scalar).zyy` can be optimized to `half3(scalar)`, and `half3(scalar).y` can be
+ // optimized to just `scalar`. The swizzle components don't actually matter, as every field
+ // in a splat constructor holds the same value.
+ if (value->is<ConstructorSplat>()) {
+ const ConstructorSplat& splat = value->as<ConstructorSplat>();
+ return ConstructorSplat::Make(
+ context, pos,
+ splat.type().componentType().toCompound(context, components.size(), /*rows=*/1),
+ splat.argument()->clone());
+ }
+
+ // Swizzles on casts, like `half4(myFloat4).zyy`, can optimize to `half3(myFloat4.zyy)`.
+ if (value->is<ConstructorCompoundCast>()) {
+ const ConstructorCompoundCast& cast = value->as<ConstructorCompoundCast>();
+ const Type& castType = cast.type().componentType().toCompound(context, components.size(),
+ /*rows=*/1);
+ std::unique_ptr<Expression> swizzled = Swizzle::Make(context, pos, cast.argument()->clone(),
+ std::move(components));
+ return (castType.columns() > 1)
+ ? ConstructorCompoundCast::Make(context, pos, castType, std::move(swizzled))
+ : ConstructorScalarCast::Make(context, pos, castType, std::move(swizzled));
+ }
+
+ // Swizzles on compound constructors, like `half4(1, 2, 3, 4).yw`, can become `half2(2, 4)`.
+ if (value->is<ConstructorCompound>()) {
+ const ConstructorCompound& ctor = value->as<ConstructorCompound>();
+ if (auto replacement = optimize_constructor_swizzle(context, pos, ctor, components)) {
+ return replacement;
+ }
+ }
+
+ // The swizzle could not be simplified, so apply the requested swizzle to the base expression.
+ return std::make_unique<Swizzle>(context, pos, std::move(expr), components);
+}
+
+std::string Swizzle::description(OperatorPrecedence) const {
+ std::string result = this->base()->description(OperatorPrecedence::kPostfix) + ".";
+ for (int x : this->components()) {
+ result += "xyzw"[x];
+ }
+ return result;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLSwizzle.h b/gfx/skia/skia/src/sksl/ir/SkSLSwizzle.h
new file mode 100644
index 0000000000..9911546103
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLSwizzle.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_SWIZZLE
+#define SKSL_SWIZZLE
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <string_view>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+enum class OperatorPrecedence : uint8_t;
+
+/**
+ * Represents a vector swizzle operation such as 'float3(1, 2, 3).zyx'.
+ */
+class Swizzle final : public Expression {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kSwizzle;
+
+ Swizzle(const Context& context, Position pos, std::unique_ptr<Expression> base,
+ const ComponentArray& components)
+ : INHERITED(pos, kIRNodeKind,
+ &base->type().componentType().toCompound(context, components.size(), 1))
+ , fBase(std::move(base))
+ , fComponents(components) {
+ SkASSERT(this->components().size() >= 1 && this->components().size() <= 4);
+ }
+
+ // Swizzle::Convert permits component arrays containing ZERO or ONE, does typechecking, reports
+ // errors via ErrorReporter, and returns an expression that combines constructors and native
+ // swizzles (comprised solely of X/Y/W/Z).
+ static std::unique_ptr<Expression> Convert(const Context& context,
+ Position pos,
+ Position maskPos,
+ std::unique_ptr<Expression> base,
+ ComponentArray inComponents);
+
+ static std::unique_ptr<Expression> Convert(const Context& context,
+ Position pos,
+ Position maskPos,
+ std::unique_ptr<Expression> base,
+ std::string_view maskString);
+
+ // Swizzle::Make does not permit ZERO or ONE in the component array, just X/Y/Z/W; errors are
+ // reported via ASSERT.
+ static std::unique_ptr<Expression> Make(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> expr,
+ ComponentArray inComponents);
+
+ std::unique_ptr<Expression>& base() {
+ return fBase;
+ }
+
+ const std::unique_ptr<Expression>& base() const {
+ return fBase;
+ }
+
+ const ComponentArray& components() const {
+ return fComponents;
+ }
+
+ std::unique_ptr<Expression> clone(Position pos) const override {
+ return std::unique_ptr<Expression>(new Swizzle(pos, &this->type(), this->base()->clone(),
+ this->components()));
+ }
+
+ std::string description(OperatorPrecedence) const override;
+
+private:
+ Swizzle(Position pos, const Type* type, std::unique_ptr<Expression> base,
+ const ComponentArray& components)
+ : INHERITED(pos, kIRNodeKind, type)
+ , fBase(std::move(base))
+ , fComponents(components) {
+ SkASSERT(this->components().size() >= 1 && this->components().size() <= 4);
+ }
+
+ std::unique_ptr<Expression> fBase;
+ ComponentArray fComponents;
+
+ using INHERITED = Expression;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.cpp b/gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.cpp
new file mode 100644
index 0000000000..e8771c9560
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.cpp
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLSymbolTable.h"
+
+#include "src/sksl/SkSLThreadContext.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLType.h"
+
+namespace SkSL {
+
+bool SymbolTable::isType(std::string_view name) const {
+ const Symbol* symbol = this->find(name);
+ return symbol && symbol->is<Type>();
+}
+
+bool SymbolTable::isBuiltinType(std::string_view name) const {
+ if (!this->isBuiltin()) {
+ return fParent && fParent->isBuiltinType(name);
+ }
+ return this->isType(name);
+}
+
+const Symbol* SymbolTable::findBuiltinSymbol(std::string_view name) const {
+ if (!this->isBuiltin()) {
+ return fParent ? fParent->findBuiltinSymbol(name) : nullptr;
+ }
+ return this->find(name);
+}
+
+Symbol* SymbolTable::lookup(const SymbolKey& key) const {
+ Symbol** symbolPPtr = fSymbols.find(key);
+ if (symbolPPtr) {
+ return *symbolPPtr;
+ }
+
+ // The symbol wasn't found; recurse into the parent symbol table.
+ return fParent ? fParent->lookup(key) : nullptr;
+}
+
+void SymbolTable::renameSymbol(Symbol* symbol, std::string_view newName) {
+ if (symbol->is<FunctionDeclaration>()) {
+ // This is a function declaration, so we need to rename the entire overload set.
+ for (FunctionDeclaration* fn = &symbol->as<FunctionDeclaration>(); fn != nullptr;
+ fn = fn->mutableNextOverload()) {
+ fn->setName(newName);
+ }
+ } else {
+ // Other types of symbols don't allow multiple symbols with the same name.
+ symbol->setName(newName);
+ }
+
+ this->addWithoutOwnership(symbol);
+}
+
+const std::string* SymbolTable::takeOwnershipOfString(std::string str) {
+ fOwnedStrings.push_front(std::move(str));
+ // Because fOwnedStrings is a linked list, pointers to elements are stable.
+ return &fOwnedStrings.front();
+}
+
+void SymbolTable::addWithoutOwnership(Symbol* symbol) {
+ auto key = MakeSymbolKey(symbol->name());
+
+ // If this is a function declaration, we need to keep the overload chain in sync.
+ if (symbol->is<FunctionDeclaration>()) {
+ // If we have a function with the same name...
+ Symbol* existingSymbol = this->lookup(key);
+ if (existingSymbol && existingSymbol->is<FunctionDeclaration>()) {
+ // ... add the existing function as the next overload in the chain.
+ FunctionDeclaration* existingDecl = &existingSymbol->as<FunctionDeclaration>();
+ symbol->as<FunctionDeclaration>().setNextOverload(existingDecl);
+ fSymbols[key] = symbol;
+ return;
+ }
+ }
+
+ if (fAtModuleBoundary && fParent && fParent->lookup(key)) {
+ // We are attempting to declare a symbol at global scope that already exists in a parent
+ // module. This is a duplicate symbol and should be rejected.
+ } else {
+ Symbol*& refInSymbolTable = fSymbols[key];
+
+ if (refInSymbolTable == nullptr) {
+ refInSymbolTable = symbol;
+ return;
+ }
+ }
+
+ ThreadContext::ReportError("symbol '" + std::string(symbol->name()) + "' was already defined",
+ symbol->fPosition);
+}
+
+void SymbolTable::injectWithoutOwnership(Symbol* symbol) {
+ auto key = MakeSymbolKey(symbol->name());
+ fSymbols[key] = symbol;
+}
+
+const Type* SymbolTable::addArrayDimension(const Type* type, int arraySize) {
+ if (arraySize == 0) {
+ return type;
+ }
+ // If this is a builtin type, we add it as high as possible in the symbol table tree (at the
+ // module boundary), to enable additional reuse of the array-type.
+ if (type->isInBuiltinTypes() && fParent && !fAtModuleBoundary) {
+ return fParent->addArrayDimension(type, arraySize);
+ }
+ // Reuse an existing array type with this name if one already exists in our symbol table.
+ std::string arrayName = type->getArrayName(arraySize);
+ if (const Symbol* existingType = this->find(arrayName)) {
+ return &existingType->as<Type>();
+ }
+ // Add a new array type to the symbol table.
+ const std::string* arrayNamePtr = this->takeOwnershipOfString(std::move(arrayName));
+ return this->add(Type::MakeArrayType(*arrayNamePtr, *type, arraySize));
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.h b/gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.h
new file mode 100644
index 0000000000..47f619c82a
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLSymbolTable.h
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_SYMBOLTABLE
+#define SKSL_SYMBOLTABLE
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkOpts_spi.h"
+#include "include/private/SkSLSymbol.h"
+#include "src/core/SkTHash.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <forward_list>
+#include <memory>
+#include <string>
+#include <string_view>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+namespace SkSL {
+
+class Type;
+
+/**
+ * Maps identifiers to symbols.
+ */
+class SymbolTable {
+public:
+ explicit SymbolTable(bool builtin)
+ : fBuiltin(builtin) {}
+
+ explicit SymbolTable(std::shared_ptr<SymbolTable> parent, bool builtin)
+ : fParent(parent)
+ , fBuiltin(builtin) {}
+
+ /** Replaces the passed-in SymbolTable with a newly-created child symbol table. */
+ static void Push(std::shared_ptr<SymbolTable>* table) {
+ Push(table, (*table)->isBuiltin());
+ }
+ static void Push(std::shared_ptr<SymbolTable>* table, bool isBuiltin) {
+ *table = std::make_shared<SymbolTable>(*table, isBuiltin);
+ }
+
+ /**
+ * Replaces the passed-in SymbolTable with its parent. If the child symbol table is otherwise
+ * unreferenced, it will be deleted.
+ */
+ static void Pop(std::shared_ptr<SymbolTable>* table) {
+ *table = (*table)->fParent;
+ }
+
+ /**
+ * If the input is a built-in symbol table, returns a new empty symbol table as a child of the
+ * input table. If the input is not a built-in symbol table, returns it as-is. Built-in symbol
+ * tables must not be mutated after creation, so they must be wrapped if mutation is necessary.
+ */
+ static std::shared_ptr<SymbolTable> WrapIfBuiltin(std::shared_ptr<SymbolTable> symbolTable) {
+ if (!symbolTable) {
+ return nullptr;
+ }
+ if (!symbolTable->isBuiltin()) {
+ return symbolTable;
+ }
+ return std::make_shared<SymbolTable>(std::move(symbolTable), /*builtin=*/false);
+ }
+
+ /**
+ * Looks up the requested symbol and returns a const pointer.
+ */
+ const Symbol* find(std::string_view name) const {
+ return this->lookup(MakeSymbolKey(name));
+ }
+
+ /**
+ * Looks up the requested symbol, only searching the built-in symbol tables. Always const.
+ */
+ const Symbol* findBuiltinSymbol(std::string_view name) const;
+
+ /**
+ * Looks up the requested symbol and returns a mutable pointer. Use caution--mutating a symbol
+ * will have program-wide impact, and built-in symbol tables must never be mutated.
+ */
+ Symbol* findMutable(std::string_view name) const {
+ return this->lookup(MakeSymbolKey(name));
+ }
+
+ /**
+ * Assigns a new name to the passed-in symbol. The old name will continue to exist in the symbol
+ * table and point to the symbol.
+ */
+ void renameSymbol(Symbol* symbol, std::string_view newName);
+
+ /**
+ * Returns true if the name refers to a type (user or built-in) in the current symbol table.
+ */
+ bool isType(std::string_view name) const;
+
+ /**
+ * Returns true if the name refers to a builtin type.
+ */
+ bool isBuiltinType(std::string_view name) const;
+
+ /**
+ * Adds a symbol to this symbol table, without conferring ownership. The caller is responsible
+ * for keeping the Symbol alive throughout the lifetime of the program/module.
+ */
+ void addWithoutOwnership(Symbol* symbol);
+
+ /**
+ * Adds a symbol to this symbol table, conferring ownership.
+ */
+ template <typename T>
+ T* add(std::unique_ptr<T> symbol) {
+ T* ptr = symbol.get();
+ this->addWithoutOwnership(this->takeOwnershipOfSymbol(std::move(symbol)));
+ return ptr;
+ }
+
+ /**
+ * Forces a symbol into this symbol table, without conferring ownership. Replaces any existing
+ * symbol with the same name, if one exists.
+ */
+ void injectWithoutOwnership(Symbol* symbol);
+
+ /**
+ * Forces a symbol into this symbol table, conferring ownership. Replaces any existing symbol
+ * with the same name, if one exists.
+ */
+ template <typename T>
+ T* inject(std::unique_ptr<T> symbol) {
+ T* ptr = symbol.get();
+ this->injectWithoutOwnership(this->takeOwnershipOfSymbol(std::move(symbol)));
+ return ptr;
+ }
+
+ /**
+ * Confers ownership of a symbol without adding its name to the lookup table.
+ */
+ template <typename T>
+ T* takeOwnershipOfSymbol(std::unique_ptr<T> symbol) {
+ T* ptr = symbol.get();
+ fOwnedSymbols.push_back(std::move(symbol));
+ return ptr;
+ }
+
+ /**
+ * Given type = `float` and arraySize = 5, creates the array type `float[5]` in the symbol
+ * table. The created array type is returned. If zero is passed, the base type is returned
+ * unchanged.
+ */
+ const Type* addArrayDimension(const Type* type, int arraySize);
+
+ // Call fn for every symbol in the table. You may not mutate anything.
+ template <typename Fn>
+ void foreach(Fn&& fn) const {
+ fSymbols.foreach(
+ [&fn](const SymbolKey& key, const Symbol* symbol) { fn(key.fName, symbol); });
+ }
+
+ size_t count() {
+ return fSymbols.count();
+ }
+
+ /** Returns true if this is a built-in SymbolTable. */
+ bool isBuiltin() const {
+ return fBuiltin;
+ }
+
+ const std::string* takeOwnershipOfString(std::string n);
+
+ /**
+ * Indicates that this symbol table's parent is in a different module than this one.
+ */
+ void markModuleBoundary() {
+ fAtModuleBoundary = true;
+ }
+
+ std::shared_ptr<SymbolTable> fParent;
+
+ std::vector<std::unique_ptr<const Symbol>> fOwnedSymbols;
+
+private:
+ struct SymbolKey {
+ std::string_view fName;
+ uint32_t fHash;
+
+ bool operator==(const SymbolKey& that) const { return fName == that.fName; }
+ bool operator!=(const SymbolKey& that) const { return fName != that.fName; }
+ struct Hash {
+ uint32_t operator()(const SymbolKey& key) const { return key.fHash; }
+ };
+ };
+
+ static SymbolKey MakeSymbolKey(std::string_view name) {
+ return SymbolKey{name, SkOpts::hash_fn(name.data(), name.size(), 0)};
+ }
+
+ Symbol* lookup(const SymbolKey& key) const;
+
+ bool fBuiltin = false;
+ bool fAtModuleBoundary = false;
+ std::forward_list<std::string> fOwnedStrings;
+ SkTHashMap<SymbolKey, Symbol*, SymbolKey::Hash> fSymbols;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLTernaryExpression.cpp b/gfx/skia/skia/src/sksl/ir/SkSLTernaryExpression.cpp
new file mode 100644
index 0000000000..2447bc043f
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLTernaryExpression.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLTernaryExpression.h"
+
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLOperator.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLConstantFolder.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLLiteral.h"
+#include "src/sksl/ir/SkSLPrefixExpression.h"
+
+namespace SkSL {
+
+std::unique_ptr<Expression> TernaryExpression::Convert(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> test,
+ std::unique_ptr<Expression> ifTrue,
+ std::unique_ptr<Expression> ifFalse) {
+ test = context.fTypes.fBool->coerceExpression(std::move(test), context);
+ if (!test || !ifTrue || !ifFalse) {
+ return nullptr;
+ }
+ if (ifTrue->type().componentType().isOpaque()) {
+ context.fErrors->error(pos, "ternary expression of opaque type '" +
+ ifTrue->type().displayName() + "' not allowed");
+ return nullptr;
+ }
+ const Type* trueType;
+ const Type* falseType;
+ const Type* resultType;
+ Operator equalityOp(Operator::Kind::EQEQ);
+ if (!equalityOp.determineBinaryType(context, ifTrue->type(), ifFalse->type(),
+ &trueType, &falseType, &resultType) ||
+ !trueType->matches(*falseType)) {
+ context.fErrors->error(ifTrue->fPosition.rangeThrough(ifFalse->fPosition),
+ "ternary operator result mismatch: '" + ifTrue->type().displayName() + "', '" +
+ ifFalse->type().displayName() + "'");
+ return nullptr;
+ }
+ if (context.fConfig->strictES2Mode() && trueType->isOrContainsArray()) {
+ context.fErrors->error(pos, "ternary operator result may not be an array (or struct "
+ "containing an array)");
+ return nullptr;
+ }
+ ifTrue = trueType->coerceExpression(std::move(ifTrue), context);
+ if (!ifTrue) {
+ return nullptr;
+ }
+ ifFalse = falseType->coerceExpression(std::move(ifFalse), context);
+ if (!ifFalse) {
+ return nullptr;
+ }
+ return TernaryExpression::Make(context, pos, std::move(test), std::move(ifTrue),
+ std::move(ifFalse));
+}
+
+std::unique_ptr<Expression> TernaryExpression::Make(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> test,
+ std::unique_ptr<Expression> ifTrue,
+ std::unique_ptr<Expression> ifFalse) {
+ SkASSERT(ifTrue->type().matches(ifFalse->type()));
+ SkASSERT(!ifTrue->type().componentType().isOpaque());
+ SkASSERT(!context.fConfig->strictES2Mode() || !ifTrue->type().isOrContainsArray());
+
+ const Expression* testExpr = ConstantFolder::GetConstantValueForVariable(*test);
+ if (testExpr->isBoolLiteral()) {
+ // static boolean test, just return one of the branches
+ if (testExpr->as<Literal>().boolValue()) {
+ ifTrue->fPosition = pos;
+ return ifTrue;
+ } else {
+ ifFalse->fPosition = pos;
+ return ifFalse;
+ }
+ }
+
+ if (context.fConfig->fSettings.fOptimize) {
+ const Expression* ifTrueExpr = ConstantFolder::GetConstantValueForVariable(*ifTrue);
+ const Expression* ifFalseExpr = ConstantFolder::GetConstantValueForVariable(*ifFalse);
+
+ // A ternary with matching true- and false-cases does not need to branch.
+ if (Analysis::IsSameExpressionTree(*ifTrueExpr, *ifFalseExpr)) {
+ // If `test` has no side-effects, we can eliminate it too, and just return `ifTrue`.
+ if (!Analysis::HasSideEffects(*test)) {
+ ifTrue->fPosition = pos;
+ return ifTrue;
+ }
+ // Return a comma-expression containing `(test, ifTrue)`.
+ return BinaryExpression::Make(context, pos, std::move(test),
+ Operator::Kind::COMMA, std::move(ifTrue));
+ }
+
+ // A ternary of the form `test ? expr : false` can be simplified to `test && expr`.
+ if (ifFalseExpr->isBoolLiteral() && !ifFalseExpr->as<Literal>().boolValue()) {
+ return BinaryExpression::Make(context, pos, std::move(test),
+ Operator::Kind::LOGICALAND, std::move(ifTrue));
+ }
+
+ // A ternary of the form `test ? true : expr` can be simplified to `test || expr`.
+ if (ifTrueExpr->isBoolLiteral() && ifTrueExpr->as<Literal>().boolValue()) {
+ return BinaryExpression::Make(context, pos, std::move(test),
+ Operator::Kind::LOGICALOR, std::move(ifFalse));
+ }
+
+ // A ternary of the form `test ? false : true` can be simplified to `!test`.
+ if (ifTrueExpr->isBoolLiteral() && !ifTrueExpr->as<Literal>().boolValue() &&
+ ifFalseExpr->isBoolLiteral() && ifFalseExpr->as<Literal>().boolValue()) {
+ return PrefixExpression::Make(context, pos, Operator::Kind::LOGICALNOT,
+ std::move(test));
+ }
+ }
+
+ return std::make_unique<TernaryExpression>(pos, std::move(test), std::move(ifTrue),
+ std::move(ifFalse));
+}
+
+std::string TernaryExpression::description(OperatorPrecedence parentPrecedence) const {
+ bool needsParens = (OperatorPrecedence::kTernary >= parentPrecedence);
+ return std::string(needsParens ? "(" : "") +
+ this->test()->description(OperatorPrecedence::kTernary) + " ? " +
+ this->ifTrue()->description(OperatorPrecedence::kTernary) + " : " +
+ this->ifFalse()->description(OperatorPrecedence::kTernary) +
+ std::string(needsParens ? ")" : "");
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLTernaryExpression.h b/gfx/skia/skia/src/sksl/ir/SkSLTernaryExpression.h
new file mode 100644
index 0000000000..41f864e2c7
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLTernaryExpression.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_TERNARYEXPRESSION
+#define SKSL_TERNARYEXPRESSION
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+enum class OperatorPrecedence : uint8_t;
+
+/**
+ * A ternary expression (test ? ifTrue : ifFalse).
+ */
+class TernaryExpression final : public Expression {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kTernary;
+
+ TernaryExpression(Position pos, std::unique_ptr<Expression> test,
+ std::unique_ptr<Expression> ifTrue, std::unique_ptr<Expression> ifFalse)
+ : INHERITED(pos, kIRNodeKind, &ifTrue->type())
+ , fTest(std::move(test))
+ , fIfTrue(std::move(ifTrue))
+ , fIfFalse(std::move(ifFalse)) {
+ SkASSERT(this->ifTrue()->type().matches(this->ifFalse()->type()));
+ }
+
+ // Creates a potentially-simplified form of the ternary. Typechecks and coerces input
+ // expressions; reports errors via ErrorReporter.
+ static std::unique_ptr<Expression> Convert(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> test,
+ std::unique_ptr<Expression> ifTrue,
+ std::unique_ptr<Expression> ifFalse);
+
+ // Creates a potentially-simplified form of the ternary; reports errors via ASSERT.
+ static std::unique_ptr<Expression> Make(const Context& context,
+ Position pos,
+ std::unique_ptr<Expression> test,
+ std::unique_ptr<Expression> ifTrue,
+ std::unique_ptr<Expression> ifFalse);
+
+ std::unique_ptr<Expression>& test() {
+ return fTest;
+ }
+
+ const std::unique_ptr<Expression>& test() const {
+ return fTest;
+ }
+
+ std::unique_ptr<Expression>& ifTrue() {
+ return fIfTrue;
+ }
+
+ const std::unique_ptr<Expression>& ifTrue() const {
+ return fIfTrue;
+ }
+
+ std::unique_ptr<Expression>& ifFalse() {
+ return fIfFalse;
+ }
+
+ const std::unique_ptr<Expression>& ifFalse() const {
+ return fIfFalse;
+ }
+
+ std::unique_ptr<Expression> clone(Position pos) const override {
+ return std::make_unique<TernaryExpression>(pos, this->test()->clone(),
+ this->ifTrue()->clone(),
+ this->ifFalse()->clone());
+ }
+
+ std::string description(OperatorPrecedence parentPrecedence) const override;
+
+private:
+ std::unique_ptr<Expression> fTest;
+ std::unique_ptr<Expression> fIfTrue;
+ std::unique_ptr<Expression> fIfFalse;
+
+ using INHERITED = Expression;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLType.cpp b/gfx/skia/skia/src/sksl/ir/SkSLType.cpp
new file mode 100644
index 0000000000..265d49dcc8
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLType.cpp
@@ -0,0 +1,1208 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLType.h"
+
+#include "include/private/SkSLLayout.h"
+#include "include/private/SkSLString.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "src/base/SkMathPriv.h"
+#include "src/base/SkSafeMath.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLConstantFolder.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/ir/SkSLConstructorArrayCast.h"
+#include "src/sksl/ir/SkSLConstructorCompoundCast.h"
+#include "src/sksl/ir/SkSLConstructorScalarCast.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <limits>
+#include <optional>
+#include <string_view>
+#include <utility>
+
+namespace SkSL {
+
+static constexpr int kMaxStructDepth = 8;
+
+class AliasType final : public Type {
+public:
+ AliasType(std::string_view name, const Type& targetType)
+ : INHERITED(name, targetType.abbreviatedName(), targetType.typeKind())
+ , fTargetType(targetType) {}
+
+ const Type& resolve() const override {
+ return fTargetType;
+ }
+
+ const Type& componentType() const override {
+ return fTargetType.componentType();
+ }
+
+ NumberKind numberKind() const override {
+ return fTargetType.numberKind();
+ }
+
+ int priority() const override {
+ return fTargetType.priority();
+ }
+
+ int columns() const override {
+ return fTargetType.columns();
+ }
+
+ int rows() const override {
+ return fTargetType.rows();
+ }
+
+ int bitWidth() const override {
+ return fTargetType.bitWidth();
+ }
+
+ bool isAllowedInES2() const override {
+ return fTargetType.isAllowedInES2();
+ }
+
+ size_t slotCount() const override {
+ return fTargetType.slotCount();
+ }
+
+ SpvDim_ dimensions() const override {
+ return fTargetType.dimensions();
+ }
+
+ bool isDepth() const override {
+ return fTargetType.isDepth();
+ }
+
+ bool isArrayedTexture() const override {
+ return fTargetType.isArrayedTexture();
+ }
+
+ bool isScalar() const override {
+ return fTargetType.isScalar();
+ }
+
+ bool isLiteral() const override {
+ return fTargetType.isLiteral();
+ }
+
+ bool isVector() const override {
+ return fTargetType.isVector();
+ }
+
+ bool isMatrix() const override {
+ return fTargetType.isMatrix();
+ }
+
+ bool isArray() const override {
+ return fTargetType.isArray();
+ }
+
+ bool isUnsizedArray() const override {
+ return fTargetType.isUnsizedArray();
+ }
+
+ bool isStruct() const override {
+ return fTargetType.isStruct();
+ }
+
+ bool isInterfaceBlock() const override {
+ return fTargetType.isInterfaceBlock();
+ }
+
+ bool isMultisampled() const override {
+ return fTargetType.isMultisampled();
+ }
+
+ TextureAccess textureAccess() const override {
+ return fTargetType.textureAccess();
+ }
+
+ SkSpan<const Type* const> coercibleTypes() const override {
+ return fTargetType.coercibleTypes();
+ }
+
+private:
+ using INHERITED = Type;
+
+ const Type& fTargetType;
+};
+
+class ArrayType final : public Type {
+public:
+ inline static constexpr TypeKind kTypeKind = TypeKind::kArray;
+
+ ArrayType(std::string_view name, const char* abbrev, const Type& componentType, int count)
+ : INHERITED(name, abbrev, kTypeKind)
+ , fComponentType(componentType)
+ , fCount(count) {
+ SkASSERT(count > 0 || count == kUnsizedArray);
+ // Disallow multi-dimensional arrays.
+ SkASSERT(!componentType.is<ArrayType>());
+ }
+
+ bool isArray() const override {
+ return true;
+ }
+
+ bool isUnsizedArray() const override {
+ return fCount == kUnsizedArray;
+ }
+
+ const Type& componentType() const override {
+ return fComponentType;
+ }
+
+ int columns() const override {
+ return fCount;
+ }
+
+ int bitWidth() const override {
+ return this->componentType().bitWidth();
+ }
+
+ bool isAllowedInES2() const override {
+ return fComponentType.isAllowedInES2();
+ }
+
+ size_t slotCount() const override {
+ SkASSERT(fCount != kUnsizedArray);
+ SkASSERT(fCount > 0);
+ return fCount * fComponentType.slotCount();
+ }
+
+private:
+ using INHERITED = Type;
+
+ const Type& fComponentType;
+ int fCount;
+};
+
+class GenericType final : public Type {
+public:
+ inline static constexpr TypeKind kTypeKind = TypeKind::kGeneric;
+
+ GenericType(const char* name, SkSpan<const Type* const> coercibleTypes)
+ : INHERITED(name, "G", kTypeKind) {
+ fNumTypes = coercibleTypes.size();
+ SkASSERT(fNumTypes <= std::size(fCoercibleTypes));
+ std::copy(coercibleTypes.begin(), coercibleTypes.end(), fCoercibleTypes);
+ }
+
+ SkSpan<const Type* const> coercibleTypes() const override {
+ return SkSpan(fCoercibleTypes, fNumTypes);
+ }
+
+private:
+ using INHERITED = Type;
+
+ const Type* fCoercibleTypes[9];
+ size_t fNumTypes;
+};
+
+class LiteralType : public Type {
+public:
+ inline static constexpr TypeKind kTypeKind = TypeKind::kLiteral;
+
+ LiteralType(const char* name, const Type& scalarType, int8_t priority)
+ : INHERITED(name, "L", kTypeKind)
+ , fScalarType(scalarType)
+ , fPriority(priority) {}
+
+ const Type& scalarTypeForLiteral() const override {
+ return fScalarType;
+ }
+
+ int priority() const override {
+ return fPriority;
+ }
+
+ int columns() const override {
+ return 1;
+ }
+
+ int rows() const override {
+ return 1;
+ }
+
+ NumberKind numberKind() const override {
+ return fScalarType.numberKind();
+ }
+
+ int bitWidth() const override {
+ return fScalarType.bitWidth();
+ }
+
+ double minimumValue() const override {
+ return fScalarType.minimumValue();
+ }
+
+ double maximumValue() const override {
+ return fScalarType.maximumValue();
+ }
+
+ bool isScalar() const override {
+ return true;
+ }
+
+ bool isLiteral() const override {
+ return true;
+ }
+
+ size_t slotCount() const override {
+ return 1;
+ }
+
+private:
+ using INHERITED = Type;
+
+ const Type& fScalarType;
+ int8_t fPriority;
+};
+
+
+class ScalarType final : public Type {
+public:
+ inline static constexpr TypeKind kTypeKind = TypeKind::kScalar;
+
+ ScalarType(std::string_view name, const char* abbrev, NumberKind numberKind, int8_t priority,
+ int8_t bitWidth)
+ : INHERITED(name, abbrev, kTypeKind)
+ , fNumberKind(numberKind)
+ , fPriority(priority)
+ , fBitWidth(bitWidth) {}
+
+ NumberKind numberKind() const override {
+ return fNumberKind;
+ }
+
+ int priority() const override {
+ return fPriority;
+ }
+
+ int bitWidth() const override {
+ return fBitWidth;
+ }
+
+ int columns() const override {
+ return 1;
+ }
+
+ int rows() const override {
+ return 1;
+ }
+
+ bool isScalar() const override {
+ return true;
+ }
+
+ bool isAllowedInES2() const override {
+ return fNumberKind != NumberKind::kUnsigned;
+ }
+
+ size_t slotCount() const override {
+ return 1;
+ }
+
+ using int_limits = std::numeric_limits<int32_t>;
+ using short_limits = std::numeric_limits<int16_t>;
+ using uint_limits = std::numeric_limits<uint32_t>;
+ using ushort_limits = std::numeric_limits<uint16_t>;
+ using float_limits = std::numeric_limits<float>;
+
+ /** Returns the maximum value that can fit in the type. */
+ double minimumValue() const override {
+ switch (this->numberKind()) {
+ case NumberKind::kSigned:
+ return this->highPrecision() ? int_limits::lowest()
+ : short_limits::lowest();
+
+ case NumberKind::kUnsigned:
+ return 0;
+
+ case NumberKind::kFloat:
+ default:
+ return float_limits::lowest();
+ }
+ }
+
+ /** Returns the maximum value that can fit in the type. */
+ double maximumValue() const override {
+ switch (this->numberKind()) {
+ case NumberKind::kSigned:
+ return this->highPrecision() ? int_limits::max()
+ : short_limits::max();
+
+ case NumberKind::kUnsigned:
+ return this->highPrecision() ? uint_limits::max()
+ : ushort_limits::max();
+
+ case NumberKind::kFloat:
+ default:
+ return float_limits::max();
+ }
+ }
+
+private:
+ using INHERITED = Type;
+
+ NumberKind fNumberKind;
+ int8_t fPriority;
+ int8_t fBitWidth;
+};
+
+class AtomicType final : public Type {
+public:
+ inline static constexpr TypeKind kTypeKind = TypeKind::kAtomic;
+
+ AtomicType(std::string_view name, const char* abbrev) : INHERITED(name, abbrev, kTypeKind) {}
+
+ bool isAllowedInES2() const override { return false; }
+
+private:
+ using INHERITED = Type;
+};
+
+class MatrixType final : public Type {
+public:
+ inline static constexpr TypeKind kTypeKind = TypeKind::kMatrix;
+
+ MatrixType(std::string_view name, const char* abbrev, const Type& componentType,
+ int8_t columns, int8_t rows)
+ : INHERITED(name, abbrev, kTypeKind)
+ , fComponentType(componentType.as<ScalarType>())
+ , fColumns(columns)
+ , fRows(rows) {
+ SkASSERT(columns >= 2 && columns <= 4);
+ SkASSERT(rows >= 2 && rows <= 4);
+ }
+
+ const ScalarType& componentType() const override {
+ return fComponentType;
+ }
+
+ int columns() const override {
+ return fColumns;
+ }
+
+ int rows() const override {
+ return fRows;
+ }
+
+ int bitWidth() const override {
+ return this->componentType().bitWidth();
+ }
+
+ bool isMatrix() const override {
+ return true;
+ }
+
+ bool isAllowedInES2() const override {
+ return fColumns == fRows;
+ }
+
+ size_t slotCount() const override {
+ return fColumns * fRows;
+ }
+
+private:
+ using INHERITED = Type;
+
+ const ScalarType& fComponentType;
+ int8_t fColumns;
+ int8_t fRows;
+};
+
+class TextureType final : public Type {
+public:
+ inline static constexpr TypeKind kTypeKind = TypeKind::kTexture;
+
+ TextureType(const char* name, SpvDim_ dimensions, bool isDepth, bool isArrayed,
+ bool isMultisampled, TextureAccess textureAccess)
+ : INHERITED(name, "T", kTypeKind)
+ , fDimensions(dimensions)
+ , fIsDepth(isDepth)
+ , fIsArrayed(isArrayed)
+ , fIsMultisampled(isMultisampled)
+ , fTextureAccess(textureAccess) {}
+
+ SpvDim_ dimensions() const override {
+ return fDimensions;
+ }
+
+ bool isDepth() const override {
+ return fIsDepth;
+ }
+
+ bool isArrayedTexture() const override {
+ return fIsArrayed;
+ }
+
+ bool isMultisampled() const override {
+ return fIsMultisampled;
+ }
+
+ TextureAccess textureAccess() const override {
+ return fTextureAccess;
+ }
+
+private:
+ using INHERITED = Type;
+
+ SpvDim_ fDimensions;
+ bool fIsDepth;
+ bool fIsArrayed;
+ bool fIsMultisampled;
+ TextureAccess fTextureAccess;
+};
+
+class SamplerType final : public Type {
+public:
+ inline static constexpr TypeKind kTypeKind = TypeKind::kSampler;
+
+ SamplerType(const char* name, const Type& textureType)
+ : INHERITED(name, "Z", kTypeKind)
+ , fTextureType(textureType.as<TextureType>()) {}
+
+ const TextureType& textureType() const override {
+ return fTextureType;
+ }
+
+ SpvDim_ dimensions() const override {
+ return fTextureType.dimensions();
+ }
+
+ bool isDepth() const override {
+ return fTextureType.isDepth();
+ }
+
+ bool isArrayedTexture() const override {
+ return fTextureType.isArrayedTexture();
+ }
+
+ bool isMultisampled() const override {
+ return fTextureType.isMultisampled();
+ }
+
+ TextureAccess textureAccess() const override {
+ return fTextureType.textureAccess();
+ }
+
+private:
+ using INHERITED = Type;
+
+ const TextureType& fTextureType;
+};
+
+class StructType final : public Type {
+public:
+ inline static constexpr TypeKind kTypeKind = TypeKind::kStruct;
+
+ StructType(Position pos, std::string_view name, std::vector<Field> fields, bool interfaceBlock)
+ : INHERITED(std::move(name), "S", kTypeKind, pos)
+ , fFields(std::move(fields))
+ , fInterfaceBlock(interfaceBlock) {}
+
+ const std::vector<Field>& fields() const override {
+ return fFields;
+ }
+
+ bool isStruct() const override {
+ return true;
+ }
+
+ bool isInterfaceBlock() const override {
+ return fInterfaceBlock;
+ }
+
+ bool isAllowedInES2() const override {
+ return std::all_of(fFields.begin(), fFields.end(), [](const Field& f) {
+ return f.fType->isAllowedInES2();
+ });
+ }
+
+ size_t slotCount() const override {
+ size_t slots = 0;
+ for (const Field& field : fFields) {
+ slots += field.fType->slotCount();
+ }
+ return slots;
+ }
+
+private:
+ using INHERITED = Type;
+
+ std::vector<Field> fFields;
+ bool fInterfaceBlock;
+};
+
+class VectorType final : public Type {
+public:
+ inline static constexpr TypeKind kTypeKind = TypeKind::kVector;
+
+ VectorType(std::string_view name, const char* abbrev, const Type& componentType,
+ int8_t columns)
+ : INHERITED(name, abbrev, kTypeKind)
+ , fComponentType(componentType.as<ScalarType>())
+ , fColumns(columns) {
+ SkASSERT(columns >= 2 && columns <= 4);
+ }
+
+ const ScalarType& componentType() const override {
+ return fComponentType;
+ }
+
+ int columns() const override {
+ return fColumns;
+ }
+
+ int rows() const override {
+ return 1;
+ }
+
+ int bitWidth() const override {
+ return this->componentType().bitWidth();
+ }
+
+ bool isVector() const override {
+ return true;
+ }
+
+ bool isAllowedInES2() const override {
+ return fComponentType.isAllowedInES2();
+ }
+
+ size_t slotCount() const override {
+ return fColumns;
+ }
+
+private:
+ using INHERITED = Type;
+
+ const ScalarType& fComponentType;
+ int8_t fColumns;
+};
+
+std::string Type::getArrayName(int arraySize) const {
+ std::string_view name = this->name();
+ if (arraySize == kUnsizedArray) {
+ return String::printf("%.*s[]", (int)name.size(), name.data());
+ }
+ return String::printf("%.*s[%d]", (int)name.size(), name.data(), arraySize);
+}
+
+std::unique_ptr<Type> Type::MakeAliasType(std::string_view name, const Type& targetType) {
+ return std::make_unique<AliasType>(std::move(name), targetType);
+}
+
+std::unique_ptr<Type> Type::MakeArrayType(std::string_view name, const Type& componentType,
+ int columns) {
+ return std::make_unique<ArrayType>(std::move(name), componentType.abbreviatedName(),
+ componentType, columns);
+}
+
+std::unique_ptr<Type> Type::MakeGenericType(const char* name, SkSpan<const Type* const> types) {
+ return std::make_unique<GenericType>(name, types);
+}
+
+std::unique_ptr<Type> Type::MakeLiteralType(const char* name, const Type& scalarType,
+ int8_t priority) {
+ return std::make_unique<LiteralType>(name, scalarType, priority);
+}
+
+std::unique_ptr<Type> Type::MakeMatrixType(std::string_view name, const char* abbrev,
+ const Type& componentType, int columns, int8_t rows) {
+ return std::make_unique<MatrixType>(name, abbrev, componentType, columns, rows);
+}
+
+std::unique_ptr<Type> Type::MakeSamplerType(const char* name, const Type& textureType) {
+ return std::make_unique<SamplerType>(name, textureType);
+}
+
+std::unique_ptr<Type> Type::MakeSpecialType(const char* name, const char* abbrev,
+ Type::TypeKind typeKind) {
+ return std::unique_ptr<Type>(new Type(name, abbrev, typeKind));
+}
+
+std::unique_ptr<Type> Type::MakeScalarType(std::string_view name, const char* abbrev,
+ Type::NumberKind numberKind, int8_t priority,
+ int8_t bitWidth) {
+ return std::make_unique<ScalarType>(name, abbrev, numberKind, priority, bitWidth);
+}
+
+std::unique_ptr<Type> Type::MakeAtomicType(std::string_view name, const char* abbrev) {
+ return std::make_unique<AtomicType>(name, abbrev);
+}
+
+static bool is_too_deeply_nested(const Type* t, int limit) {
+ if (limit <= 0) {
+ return true;
+ }
+
+ if (t->isStruct()) {
+ for (const Type::Field& f : t->fields()) {
+ if (is_too_deeply_nested(f.fType, limit - 1)) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+std::unique_ptr<Type> Type::MakeStructType(const Context& context,
+ Position pos,
+ std::string_view name,
+ std::vector<Field> fields,
+ bool interfaceBlock) {
+ for (const Field& field : fields) {
+ if (field.fModifiers.fFlags != Modifiers::kNo_Flag) {
+ std::string desc = field.fModifiers.description();
+ desc.pop_back(); // remove trailing space
+ context.fErrors->error(field.fPosition,
+ "modifier '" + desc + "' is not permitted on a struct field");
+ }
+ if (field.fModifiers.fLayout.fFlags & Layout::kBinding_Flag) {
+ context.fErrors->error(field.fPosition,
+ "layout qualifier 'binding' is not permitted on a struct field");
+ }
+ if (field.fModifiers.fLayout.fFlags & Layout::kSet_Flag) {
+ context.fErrors->error(field.fPosition,
+ "layout qualifier 'set' is not permitted on a struct field");
+ }
+
+ if (field.fType->isVoid()) {
+ context.fErrors->error(field.fPosition, "type 'void' is not permitted in a struct");
+ }
+ if (field.fType->isOpaque() && !field.fType->isAtomic()) {
+ context.fErrors->error(field.fPosition, "opaque type '" + field.fType->displayName() +
+ "' is not permitted in a struct");
+ }
+ }
+ for (const Field& field : fields) {
+ if (is_too_deeply_nested(field.fType, kMaxStructDepth)) {
+ context.fErrors->error(pos, "struct '" + std::string(name) + "' is too deeply nested");
+ break;
+ }
+ }
+ size_t slots = 0;
+ for (const Field& field : fields) {
+ if (field.fType->isUnsizedArray()) {
+ continue;
+ }
+ slots = SkSafeMath::Add(slots, field.fType->slotCount());
+ if (slots >= kVariableSlotLimit) {
+ context.fErrors->error(pos, "struct is too large");
+ break;
+ }
+ }
+ return std::make_unique<StructType>(pos, name, std::move(fields), interfaceBlock);
+}
+
+std::unique_ptr<Type> Type::MakeTextureType(const char* name, SpvDim_ dimensions, bool isDepth,
+ bool isArrayedTexture, bool isMultisampled,
+ TextureAccess textureAccess) {
+ return std::make_unique<TextureType>(name, dimensions, isDepth, isArrayedTexture,
+ isMultisampled, textureAccess);
+}
+
+std::unique_ptr<Type> Type::MakeVectorType(std::string_view name, const char* abbrev,
+ const Type& componentType, int columns) {
+ return std::make_unique<VectorType>(name, abbrev, componentType, columns);
+}
+
+CoercionCost Type::coercionCost(const Type& other) const {
+ if (this->matches(other)) {
+ return CoercionCost::Free();
+ }
+ if (this->typeKind() == other.typeKind() &&
+ (this->isVector() || this->isMatrix() || this->isArray())) {
+ // Vectors/matrices/arrays of the same size can be coerced if their component type can be.
+ if (this->isMatrix() && (this->rows() != other.rows())) {
+ return CoercionCost::Impossible();
+ }
+ if (this->columns() != other.columns()) {
+ return CoercionCost::Impossible();
+ }
+ return this->componentType().coercionCost(other.componentType());
+ }
+ if (this->isNumber() && other.isNumber()) {
+ if (this->isLiteral() && this->isInteger()) {
+ return CoercionCost::Free();
+ } else if (this->numberKind() != other.numberKind()) {
+ return CoercionCost::Impossible();
+ } else if (other.priority() >= this->priority()) {
+ return CoercionCost::Normal(other.priority() - this->priority());
+ } else {
+ return CoercionCost::Narrowing(this->priority() - other.priority());
+ }
+ }
+ if (fTypeKind == TypeKind::kGeneric) {
+ SkSpan<const Type* const> types = this->coercibleTypes();
+ for (size_t i = 0; i < types.size(); i++) {
+ if (types[i]->matches(other)) {
+ return CoercionCost::Normal((int) i + 1);
+ }
+ }
+ }
+ return CoercionCost::Impossible();
+}
+
+const Type* Type::applyQualifiers(const Context& context,
+ Modifiers* modifiers,
+ SymbolTable* symbols,
+ Position pos) const {
+ const Type* type;
+ type = this->applyPrecisionQualifiers(context, modifiers, symbols, pos);
+ type = type->applyAccessQualifiers(context, modifiers, symbols, pos);
+ return type;
+}
+
+const Type* Type::applyPrecisionQualifiers(const Context& context,
+ Modifiers* modifiers,
+ SymbolTable* symbols,
+ Position pos) const {
+ int precisionQualifiers = modifiers->fFlags & (Modifiers::kHighp_Flag |
+ Modifiers::kMediump_Flag |
+ Modifiers::kLowp_Flag);
+ if (!precisionQualifiers) {
+ // No precision qualifiers here. Return the type as-is.
+ return this;
+ }
+
+ if (!ProgramConfig::IsRuntimeEffect(context.fConfig->fKind)) {
+ // We want to discourage precision modifiers internally. Instead, use the type that
+ // corresponds to the precision you need. (e.g. half vs float, short vs int)
+ context.fErrors->error(pos, "precision qualifiers are not allowed");
+ return context.fTypes.fPoison.get();
+ }
+
+ if (SkPopCount(precisionQualifiers) > 1) {
+ context.fErrors->error(pos, "only one precision qualifier can be used");
+ return context.fTypes.fPoison.get();
+ }
+
+ // We're going to return a whole new type, so the modifier bits can be cleared out.
+ modifiers->fFlags &= ~(Modifiers::kHighp_Flag |
+ Modifiers::kMediump_Flag |
+ Modifiers::kLowp_Flag);
+
+ const Type& component = this->componentType();
+ if (component.highPrecision()) {
+ if (precisionQualifiers & Modifiers::kHighp_Flag) {
+ // Type is already high precision, and we are requesting high precision. Return as-is.
+ return this;
+ }
+
+ // SkSL doesn't support low precision, so `lowp` is interpreted as medium precision.
+ // Ascertain the mediump equivalent type for this type, if any.
+ const Type* mediumpType;
+ switch (component.numberKind()) {
+ case Type::NumberKind::kFloat:
+ mediumpType = context.fTypes.fHalf.get();
+ break;
+
+ case Type::NumberKind::kSigned:
+ mediumpType = context.fTypes.fShort.get();
+ break;
+
+ case Type::NumberKind::kUnsigned:
+ mediumpType = context.fTypes.fUShort.get();
+ break;
+
+ default:
+ mediumpType = context.fTypes.fPoison.get();
+ break;
+ }
+
+ if (mediumpType) {
+ // Convert the mediump component type into the final vector/matrix/array type as needed.
+ return this->isArray()
+ ? symbols->addArrayDimension(mediumpType, this->columns())
+ : &mediumpType->toCompound(context, this->columns(), this->rows());
+ }
+ }
+
+ context.fErrors->error(pos, "type '" + this->displayName() +
+ "' does not support precision qualifiers");
+ return context.fTypes.fPoison.get();
+}
+
+const Type* Type::applyAccessQualifiers(const Context& context,
+ Modifiers* modifiers,
+ SymbolTable* symbols,
+ Position pos) const {
+ int accessQualifiers = modifiers->fFlags & (Modifiers::kReadOnly_Flag |
+ Modifiers::kWriteOnly_Flag);
+ if (!accessQualifiers) {
+ // No access qualifiers here. Return the type as-is.
+ return this;
+ }
+
+ // We're going to return a whole new type, so the modifier bits can be cleared out.
+ modifiers->fFlags &= ~(Modifiers::kReadOnly_Flag |
+ Modifiers::kWriteOnly_Flag);
+
+ if (this->matches(*context.fTypes.fReadWriteTexture2D)) {
+ switch (accessQualifiers) {
+ case Modifiers::kReadOnly_Flag:
+ return context.fTypes.fReadOnlyTexture2D.get();
+
+ case Modifiers::kWriteOnly_Flag:
+ return context.fTypes.fWriteOnlyTexture2D.get();
+
+ default:
+ context.fErrors->error(pos, "'readonly' and 'writeonly' qualifiers "
+ "cannot be combined");
+ return this;
+ }
+ }
+
+ context.fErrors->error(pos, "type '" + this->displayName() + "' does not support qualifier '" +
+ Modifiers::DescribeFlags(accessQualifiers) + "'");
+ return this;
+}
+
+const Type& Type::toCompound(const Context& context, int columns, int rows) const {
+ SkASSERT(this->isScalar());
+ if (columns == 1 && rows == 1) {
+ return *this;
+ }
+ if (this->matches(*context.fTypes.fFloat) || this->matches(*context.fTypes.fFloatLiteral)) {
+ switch (rows) {
+ case 1:
+ switch (columns) {
+ case 1: return *context.fTypes.fFloat;
+ case 2: return *context.fTypes.fFloat2;
+ case 3: return *context.fTypes.fFloat3;
+ case 4: return *context.fTypes.fFloat4;
+ default: SK_ABORT("unsupported vector column count (%d)", columns);
+ }
+ case 2:
+ switch (columns) {
+ case 2: return *context.fTypes.fFloat2x2;
+ case 3: return *context.fTypes.fFloat3x2;
+ case 4: return *context.fTypes.fFloat4x2;
+ default: SK_ABORT("unsupported matrix column count (%d)", columns);
+ }
+ case 3:
+ switch (columns) {
+ case 2: return *context.fTypes.fFloat2x3;
+ case 3: return *context.fTypes.fFloat3x3;
+ case 4: return *context.fTypes.fFloat4x3;
+ default: SK_ABORT("unsupported matrix column count (%d)", columns);
+ }
+ case 4:
+ switch (columns) {
+ case 2: return *context.fTypes.fFloat2x4;
+ case 3: return *context.fTypes.fFloat3x4;
+ case 4: return *context.fTypes.fFloat4x4;
+ default: SK_ABORT("unsupported matrix column count (%d)", columns);
+ }
+ default: SK_ABORT("unsupported row count (%d)", rows);
+ }
+ } else if (this->matches(*context.fTypes.fHalf)) {
+ switch (rows) {
+ case 1:
+ switch (columns) {
+ case 1: return *context.fTypes.fHalf;
+ case 2: return *context.fTypes.fHalf2;
+ case 3: return *context.fTypes.fHalf3;
+ case 4: return *context.fTypes.fHalf4;
+ default: SK_ABORT("unsupported vector column count (%d)", columns);
+ }
+ case 2:
+ switch (columns) {
+ case 2: return *context.fTypes.fHalf2x2;
+ case 3: return *context.fTypes.fHalf3x2;
+ case 4: return *context.fTypes.fHalf4x2;
+ default: SK_ABORT("unsupported matrix column count (%d)", columns);
+ }
+ case 3:
+ switch (columns) {
+ case 2: return *context.fTypes.fHalf2x3;
+ case 3: return *context.fTypes.fHalf3x3;
+ case 4: return *context.fTypes.fHalf4x3;
+ default: SK_ABORT("unsupported matrix column count (%d)", columns);
+ }
+ case 4:
+ switch (columns) {
+ case 2: return *context.fTypes.fHalf2x4;
+ case 3: return *context.fTypes.fHalf3x4;
+ case 4: return *context.fTypes.fHalf4x4;
+ default: SK_ABORT("unsupported matrix column count (%d)", columns);
+ }
+ default: SK_ABORT("unsupported row count (%d)", rows);
+ }
+ } else if (this->matches(*context.fTypes.fInt) || this->matches(*context.fTypes.fIntLiteral)) {
+ switch (rows) {
+ case 1:
+ switch (columns) {
+ case 1: return *context.fTypes.fInt;
+ case 2: return *context.fTypes.fInt2;
+ case 3: return *context.fTypes.fInt3;
+ case 4: return *context.fTypes.fInt4;
+ default: SK_ABORT("unsupported vector column count (%d)", columns);
+ }
+ default: SK_ABORT("unsupported row count (%d)", rows);
+ }
+ } else if (this->matches(*context.fTypes.fShort)) {
+ switch (rows) {
+ case 1:
+ switch (columns) {
+ case 1: return *context.fTypes.fShort;
+ case 2: return *context.fTypes.fShort2;
+ case 3: return *context.fTypes.fShort3;
+ case 4: return *context.fTypes.fShort4;
+ default: SK_ABORT("unsupported vector column count (%d)", columns);
+ }
+ default: SK_ABORT("unsupported row count (%d)", rows);
+ }
+ } else if (this->matches(*context.fTypes.fUInt)) {
+ switch (rows) {
+ case 1:
+ switch (columns) {
+ case 1: return *context.fTypes.fUInt;
+ case 2: return *context.fTypes.fUInt2;
+ case 3: return *context.fTypes.fUInt3;
+ case 4: return *context.fTypes.fUInt4;
+ default: SK_ABORT("unsupported vector column count (%d)", columns);
+ }
+ default: SK_ABORT("unsupported row count (%d)", rows);
+ }
+ } else if (this->matches(*context.fTypes.fUShort)) {
+ switch (rows) {
+ case 1:
+ switch (columns) {
+ case 1: return *context.fTypes.fUShort;
+ case 2: return *context.fTypes.fUShort2;
+ case 3: return *context.fTypes.fUShort3;
+ case 4: return *context.fTypes.fUShort4;
+ default: SK_ABORT("unsupported vector column count (%d)", columns);
+ }
+ default: SK_ABORT("unsupported row count (%d)", rows);
+ }
+ } else if (this->matches(*context.fTypes.fBool)) {
+ switch (rows) {
+ case 1:
+ switch (columns) {
+ case 1: return *context.fTypes.fBool;
+ case 2: return *context.fTypes.fBool2;
+ case 3: return *context.fTypes.fBool3;
+ case 4: return *context.fTypes.fBool4;
+ default: SK_ABORT("unsupported vector column count (%d)", columns);
+ }
+ default: SK_ABORT("unsupported row count (%d)", rows);
+ }
+ }
+ SkDEBUGFAILF("unsupported toCompound type %s", this->description().c_str());
+ return *context.fTypes.fVoid;
+}
+
+const Type* Type::clone(SymbolTable* symbolTable) const {
+ // Many types are built-ins, and exist in every SymbolTable by default.
+ if (this->isInBuiltinTypes()) {
+ return this;
+ }
+ // Even if the type isn't a built-in, it might already exist in the SymbolTable.
+ const Symbol* clonedSymbol = symbolTable->find(this->name());
+ if (clonedSymbol != nullptr) {
+ const Type& clonedType = clonedSymbol->as<Type>();
+ SkASSERT(clonedType.typeKind() == this->typeKind());
+ return &clonedType;
+ }
+ // This type actually needs to be cloned into the destination SymbolTable.
+ switch (this->typeKind()) {
+ case TypeKind::kArray: {
+ return symbolTable->addArrayDimension(&this->componentType(), this->columns());
+ }
+ case TypeKind::kStruct: {
+ // We are cloning an existing struct, so there's no need to call MakeStructType and
+ // fully error-check it again.
+ const std::string* name = symbolTable->takeOwnershipOfString(std::string(this->name()));
+ return symbolTable->add(std::make_unique<StructType>(
+ this->fPosition, *name, this->fields(), this->isInterfaceBlock()));
+ }
+ default:
+ SkDEBUGFAILF("don't know how to clone type '%s'", this->description().c_str());
+ return nullptr;
+ }
+}
+
+std::unique_ptr<Expression> Type::coerceExpression(std::unique_ptr<Expression> expr,
+ const Context& context) const {
+ if (!expr || expr->isIncomplete(context)) {
+ return nullptr;
+ }
+ if (expr->type().matches(*this)) {
+ return expr;
+ }
+
+ const Position pos = expr->fPosition;
+ const ProgramSettings& settings = context.fConfig->fSettings;
+ if (!expr->coercionCost(*this).isPossible(settings.fAllowNarrowingConversions)) {
+ context.fErrors->error(pos, "expected '" + this->displayName() + "', but found '" +
+ expr->type().displayName() + "'");
+ return nullptr;
+ }
+
+ if (this->isScalar()) {
+ return ConstructorScalarCast::Make(context, pos, *this, std::move(expr));
+ }
+ if (this->isVector() || this->isMatrix()) {
+ return ConstructorCompoundCast::Make(context, pos, *this, std::move(expr));
+ }
+ if (this->isArray()) {
+ return ConstructorArrayCast::Make(context, pos, *this, std::move(expr));
+ }
+ context.fErrors->error(pos, "cannot construct '" + this->displayName() + "'");
+ return nullptr;
+}
+
+static bool is_or_contains_array(const Type* type, bool onlyMatchUnsizedArrays) {
+ if (type->isStruct()) {
+ for (const Type::Field& f : type->fields()) {
+ if (is_or_contains_array(f.fType, onlyMatchUnsizedArrays)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ if (type->isArray()) {
+ return onlyMatchUnsizedArrays
+ ? (type->isUnsizedArray() || is_or_contains_array(&type->componentType(), true))
+ : true;
+ }
+
+ return false;
+}
+
+bool Type::isOrContainsArray() const {
+ return is_or_contains_array(this, /*onlyMatchUnsizedArrays=*/false);
+}
+
+bool Type::isOrContainsUnsizedArray() const {
+ return is_or_contains_array(this, /*onlyMatchUnsizedArrays=*/true);
+}
+
+bool Type::isOrContainsAtomic() const {
+ if (this->isAtomic()) {
+ return true;
+ }
+
+ if (this->isArray() && this->componentType().isOrContainsAtomic()) {
+ return true;
+ }
+
+ if (this->isStruct()) {
+ for (const Field& f : this->fields()) {
+ if (f.fType->isOrContainsAtomic()) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+bool Type::isAllowedInES2(const Context& context) const {
+ return !context.fConfig->strictES2Mode() || this->isAllowedInES2();
+}
+
+bool Type::checkForOutOfRangeLiteral(const Context& context, const Expression& expr) const {
+ bool foundError = false;
+ const Type& baseType = this->componentType();
+ if (baseType.isNumber()) {
+ // Replace constant expressions with their corresponding values.
+ const Expression* valueExpr = ConstantFolder::GetConstantValueForVariable(expr);
+ if (valueExpr->supportsConstantValues()) {
+ // Iterate over every constant subexpression in the value.
+ int numSlots = valueExpr->type().slotCount();
+ for (int slot = 0; slot < numSlots; ++slot) {
+ std::optional<double> slotVal = valueExpr->getConstantValue(slot);
+ // Check for Literal values that are out of range for the base type.
+ if (slotVal.has_value() &&
+ baseType.checkForOutOfRangeLiteral(context, *slotVal, valueExpr->fPosition)) {
+ foundError = true;
+ }
+ }
+ }
+ }
+
+ // We don't need range checks for floats or booleans; any matched-type value is acceptable.
+ return foundError;
+}
+
+bool Type::checkForOutOfRangeLiteral(const Context& context, double value, Position pos) const {
+ SkASSERT(this->isScalar());
+ if (!this->isNumber()) {
+ return false;
+ }
+ if (value >= this->minimumValue() && value <= this->maximumValue()) {
+ return false;
+ }
+ // We found a value that can't fit in our type. Flag it as an error.
+ context.fErrors->error(pos, SkSL::String::printf("value is out of range for type '%s': %.0f",
+ this->displayName().c_str(),
+ value));
+ return true;
+}
+
+bool Type::checkIfUsableInArray(const Context& context, Position arrayPos) const {
+ if (this->isArray()) {
+ context.fErrors->error(arrayPos, "multi-dimensional arrays are not supported");
+ return false;
+ }
+ if (this->isVoid()) {
+ context.fErrors->error(arrayPos, "type 'void' may not be used in an array");
+ return false;
+ }
+ if (this->isOpaque() && !this->isAtomic()) {
+ context.fErrors->error(arrayPos, "opaque type '" + std::string(this->name()) +
+ "' may not be used in an array");
+ return false;
+ }
+ return true;
+}
+
+SKSL_INT Type::convertArraySize(const Context& context,
+ Position arrayPos,
+ std::unique_ptr<Expression> size) const {
+ size = context.fTypes.fInt->coerceExpression(std::move(size), context);
+ if (!size) {
+ return 0;
+ }
+ if (!this->checkIfUsableInArray(context, arrayPos)) {
+ return 0;
+ }
+ SKSL_INT count;
+ if (!ConstantFolder::GetConstantInt(*size, &count)) {
+ context.fErrors->error(size->fPosition, "array size must be an integer");
+ return 0;
+ }
+ if (count <= 0) {
+ context.fErrors->error(size->fPosition, "array size must be positive");
+ return 0;
+ }
+ if (SkSafeMath::Mul(this->slotCount(), count) > kVariableSlotLimit) {
+ context.fErrors->error(size->fPosition, "array size is too large");
+ return 0;
+ }
+ return static_cast<int>(count);
+}
+
+std::string Type::Field::description() const {
+ return fModifiers.description() + fType->displayName() + " " + std::string(fName) + ";";
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLType.h b/gfx/skia/skia/src/sksl/ir/SkSLType.h
new file mode 100644
index 0000000000..955381f8c9
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLType.h
@@ -0,0 +1,600 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_TYPE
+#define SKSL_TYPE
+
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLSymbol.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/spirv.h"
+
+#include <cmath>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <string_view>
+#include <tuple>
+#include <vector>
+
+namespace SkSL {
+
+class Context;
+class Expression;
+class SymbolTable;
+
+struct CoercionCost {
+ static CoercionCost Free() { return { 0, 0, false }; }
+ static CoercionCost Normal(int cost) { return { cost, 0, false }; }
+ static CoercionCost Narrowing(int cost) { return { 0, cost, false }; }
+ static CoercionCost Impossible() { return { 0, 0, true }; }
+
+ bool isPossible(bool allowNarrowing) const {
+ return !fImpossible && (fNarrowingCost == 0 || allowNarrowing);
+ }
+
+ // Addition of two costs. Saturates at Impossible().
+ CoercionCost operator+(CoercionCost rhs) const {
+ if (fImpossible || rhs.fImpossible) {
+ return Impossible();
+ }
+ return { fNormalCost + rhs.fNormalCost, fNarrowingCost + rhs.fNarrowingCost, false };
+ }
+
+ bool operator<(CoercionCost rhs) const {
+ return std::tie( fImpossible, fNarrowingCost, fNormalCost) <
+ std::tie(rhs.fImpossible, rhs.fNarrowingCost, rhs.fNormalCost);
+ }
+
+ bool operator<=(CoercionCost rhs) const {
+ return std::tie( fImpossible, fNarrowingCost, fNormalCost) <=
+ std::tie(rhs.fImpossible, rhs.fNarrowingCost, rhs.fNormalCost);
+ }
+
+ int fNormalCost;
+ int fNarrowingCost;
+ bool fImpossible;
+};
+
+/**
+ * Represents a type, such as int or float4.
+ */
+class Type : public Symbol {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kType;
+ inline static constexpr int kMaxAbbrevLength = 3;
+ // Represents unspecified array dimensions, as in `int[]`.
+ inline static constexpr int kUnsizedArray = -1;
+ struct Field {
+ Field(Position pos, Modifiers modifiers, std::string_view name, const Type* type)
+ : fPosition(pos)
+ , fModifiers(modifiers)
+ , fName(name)
+ , fType(type) {}
+
+ std::string description() const;
+
+ Position fPosition;
+ Modifiers fModifiers;
+ std::string_view fName;
+ const Type* fType;
+ };
+
+ enum class TypeKind : int8_t {
+ kArray,
+ kAtomic,
+ kGeneric,
+ kLiteral,
+ kMatrix,
+ kOther,
+ kSampler,
+ kSeparateSampler,
+ kScalar,
+ kStruct,
+ kTexture,
+ kVector,
+ kVoid,
+
+ // Types that represent stages in the Skia pipeline
+ kColorFilter,
+ kShader,
+ kBlender,
+ };
+
+ enum class NumberKind : int8_t {
+ kFloat,
+ kSigned,
+ kUnsigned,
+ kBoolean,
+ kNonnumeric
+ };
+
+ enum class TextureAccess : int8_t {
+ kSample, // `kSample` access level allows both sampling and reading
+ kRead,
+ kWrite,
+ kReadWrite,
+ };
+
+ Type(const Type& other) = delete;
+
+ /** Creates an array type. `columns` may be kUnsizedArray. */
+ static std::unique_ptr<Type> MakeArrayType(std::string_view name, const Type& componentType,
+ int columns);
+
+ /** Converts a component type and a size (float, 10) into an array name ("float[10]"). */
+ std::string getArrayName(int arraySize) const;
+
+ /**
+ * Creates an alias which maps to another type.
+ */
+ static std::unique_ptr<Type> MakeAliasType(std::string_view name, const Type& targetType);
+
+ /**
+ * Create a generic type which maps to the listed types--e.g. $genType is a generic type which
+ * can match float, float2, float3 or float4.
+ */
+ static std::unique_ptr<Type> MakeGenericType(const char* name, SkSpan<const Type* const> types);
+
+ /** Create a type for literal scalars. */
+ static std::unique_ptr<Type> MakeLiteralType(const char* name, const Type& scalarType,
+ int8_t priority);
+
+ /** Create a matrix type. */
+ static std::unique_ptr<Type> MakeMatrixType(std::string_view name, const char* abbrev,
+ const Type& componentType, int columns,
+ int8_t rows);
+
+ /** Create a sampler type. */
+ static std::unique_ptr<Type> MakeSamplerType(const char* name, const Type& textureType);
+
+ /** Create a scalar type. */
+ static std::unique_ptr<Type> MakeScalarType(std::string_view name, const char* abbrev,
+ Type::NumberKind numberKind, int8_t priority,
+ int8_t bitWidth);
+
+ /**
+ * Create a "special" type with the given name, abbreviation, and TypeKind.
+ */
+ static std::unique_ptr<Type> MakeSpecialType(const char* name, const char* abbrev,
+ Type::TypeKind typeKind);
+
+ /**
+ * Creates a struct type with the given fields. Reports an error if the struct is not
+ * well-formed.
+ */
+ static std::unique_ptr<Type> MakeStructType(const Context& context,
+ Position pos,
+ std::string_view name,
+ std::vector<Field> fields,
+ bool interfaceBlock = false);
+
+ /** Create a texture type. */
+ static std::unique_ptr<Type> MakeTextureType(const char* name, SpvDim_ dimensions, bool isDepth,
+ bool isArrayedTexture, bool isMultisampled,
+ TextureAccess textureAccess);
+
+ /** Create a vector type. */
+ static std::unique_ptr<Type> MakeVectorType(std::string_view name, const char* abbrev,
+ const Type& componentType, int columns);
+
+ /** Create an atomic type. */
+ static std::unique_ptr<Type> MakeAtomicType(std::string_view name, const char* abbrev);
+
+ template <typename T>
+ bool is() const {
+ return this->typeKind() == T::kTypeKind;
+ }
+
+ template <typename T>
+ const T& as() const {
+ SkASSERT(this->is<T>());
+ return static_cast<const T&>(*this);
+ }
+
+ template <typename T>
+ T& as() {
+ SkASSERT(this->is<T>());
+ return static_cast<T&>(*this);
+ }
+
+ /** Creates a clone of this Type, if needed, and inserts it into a different symbol table. */
+ const Type* clone(SymbolTable* symbolTable) const;
+
+ /**
+ * Returns true if this type is known to come from BuiltinTypes. If this returns true, the Type
+ * will always be available in the root SymbolTable and never needs to be copied to migrate an
+ * Expression from one location to another. If it returns false, the Type might not exist in a
+ * separate SymbolTable and you'll need to consider copying it.
+ */
+ bool isInBuiltinTypes() const {
+ return !(this->isArray() || this->isStruct());
+ }
+
+ std::string displayName() const {
+ return std::string(this->scalarTypeForLiteral().name());
+ }
+
+ std::string description() const override {
+ return this->displayName();
+ }
+
+ /** Returns true if the program supports this type. Strict ES2 programs can't use ES3 types. */
+ bool isAllowedInES2(const Context& context) const;
+
+ /** Returns true if this type is legal to use in a strict-ES2 program. */
+ virtual bool isAllowedInES2() const {
+ return true;
+ }
+
+ /** If this is an alias, returns the underlying type, otherwise returns this. */
+ virtual const Type& resolve() const {
+ return *this;
+ }
+
+ /** Returns true if these types are equal after alias resolution. */
+ bool matches(const Type& other) const {
+ return this->resolve().name() == other.resolve().name();
+ }
+
+ /**
+ * Returns an abbreviated name of the type, meant for name-mangling. (e.g. float4x4 -> f44)
+ */
+ const char* abbreviatedName() const {
+ return fAbbreviatedName;
+ }
+
+ /**
+ * Returns the category (scalar, vector, matrix, etc.) of this type.
+ */
+ TypeKind typeKind() const {
+ return fTypeKind;
+ }
+
+ /**
+ * Returns the NumberKind of this type (always kNonnumeric for non-scalar values).
+ */
+ virtual NumberKind numberKind() const {
+ return NumberKind::kNonnumeric;
+ }
+
+ /**
+ * Returns true if this type is a bool.
+ */
+ bool isBoolean() const {
+ return this->numberKind() == NumberKind::kBoolean;
+ }
+
+ /**
+ * Returns true if this is a numeric scalar type.
+ */
+ bool isNumber() const {
+ switch (this->numberKind()) {
+ case NumberKind::kFloat:
+ case NumberKind::kSigned:
+ case NumberKind::kUnsigned:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /**
+ * Returns true if this is a floating-point scalar type (float or half).
+ */
+ bool isFloat() const {
+ return this->numberKind() == NumberKind::kFloat;
+ }
+
+ /**
+ * Returns true if this is a signed scalar type (int or short).
+ */
+ bool isSigned() const {
+ return this->numberKind() == NumberKind::kSigned;
+ }
+
+ /**
+ * Returns true if this is an unsigned scalar type (uint or ushort).
+ */
+ bool isUnsigned() const {
+ return this->numberKind() == NumberKind::kUnsigned;
+ }
+
+ /**
+ * Returns true if this is a signed or unsigned integer.
+ */
+ bool isInteger() const {
+ switch (this->numberKind()) {
+ case NumberKind::kSigned:
+ case NumberKind::kUnsigned:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /**
+ * Returns true if this is an "opaque type" (an external object which the shader references in
+ * some fashion). https://www.khronos.org/opengl/wiki/Data_Type_(GLSL)#Opaque_types
+ */
+ bool isOpaque() const {
+ switch (fTypeKind) {
+ case TypeKind::kAtomic:
+ case TypeKind::kBlender:
+ case TypeKind::kColorFilter:
+ case TypeKind::kSampler:
+ case TypeKind::kSeparateSampler:
+ case TypeKind::kShader:
+ case TypeKind::kTexture:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /**
+ * Returns the "priority" of a number type, in order of float > half > int > short.
+ * When operating on two number types, the result is the higher-priority type.
+ */
+ virtual int priority() const {
+ SkDEBUGFAIL("not a number type");
+ return -1;
+ }
+
+ /**
+ * Returns true if an instance of this type can be freely coerced (implicitly converted) to
+ * another type.
+ */
+ bool canCoerceTo(const Type& other, bool allowNarrowing) const {
+ return this->coercionCost(other).isPossible(allowNarrowing);
+ }
+
+ /**
+ * Determines the "cost" of coercing (implicitly converting) this type to another type. The cost
+ * is a number with no particular meaning other than that lower costs are preferable to higher
+ * costs. Returns INT_MAX if the coercion is not possible.
+ */
+ CoercionCost coercionCost(const Type& other) const;
+
+ /**
+ * For matrices and vectors, returns the type of individual cells (e.g. mat2 has a component
+ * type of Float). For arrays, returns the base type. For all other types, returns the type
+ * itself.
+ */
+ virtual const Type& componentType() const {
+ return *this;
+ }
+
+ /**
+ * For texture samplers, returns the type of texture it samples (e.g., sampler2D has
+ * a texture type of texture2D).
+ */
+ virtual const Type& textureType() const {
+ SkDEBUGFAIL("not a sampler type");
+ return *this;
+ }
+
+ /**
+ * For matrices and vectors, returns the number of columns (e.g. both mat3 and float3 return 3).
+ * For scalars, returns 1. For arrays, returns either the size of the array (if known) or -1.
+ * For all other types, causes an assertion failure.
+ */
+ virtual int columns() const {
+ SkDEBUGFAIL("type does not have columns");
+ return -1;
+ }
+
+ /**
+ * For matrices, returns the number of rows (e.g. mat2x4 returns 4). For vectors and scalars,
+ * returns 1. For all other types, causes an assertion failure.
+ */
+ virtual int rows() const {
+ SkDEBUGFAIL("type does not have rows");
+ return -1;
+ }
+
+ /** Returns the minimum value that can fit in the type. */
+ virtual double minimumValue() const {
+ SkDEBUGFAIL("type does not have a minimum value");
+ return -INFINITY;
+ }
+
+ virtual double maximumValue() const {
+ SkDEBUGFAIL("type does not have a maximum value");
+ return +INFINITY;
+ }
+
+ /**
+ * Returns the number of scalars needed to hold this type.
+ */
+ virtual size_t slotCount() const {
+ return 0;
+ }
+
+ virtual const std::vector<Field>& fields() const {
+ SK_ABORT("Internal error: not a struct");
+ }
+
+ /**
+ * For generic types, returns the types that this generic type can substitute for.
+ */
+ virtual SkSpan<const Type* const> coercibleTypes() const {
+ SkDEBUGFAIL("Internal error: not a generic type");
+ return {};
+ }
+
+ virtual SpvDim_ dimensions() const {
+ SkASSERT(false);
+ return SpvDim1D;
+ }
+
+ virtual bool isDepth() const {
+ SkASSERT(false);
+ return false;
+ }
+
+ virtual bool isArrayedTexture() const {
+ SkASSERT(false);
+ return false;
+ }
+
+ bool isVoid() const {
+ return fTypeKind == TypeKind::kVoid;
+ }
+
+ bool isGeneric() const {
+ return fTypeKind == TypeKind::kGeneric;
+ }
+
+ bool isAtomic() const { return this->typeKind() == TypeKind::kAtomic; }
+
+ virtual bool isScalar() const {
+ return false;
+ }
+
+ virtual bool isLiteral() const {
+ return false;
+ }
+
+ virtual const Type& scalarTypeForLiteral() const {
+ return *this;
+ }
+
+ virtual bool isVector() const {
+ return false;
+ }
+
+ virtual bool isMatrix() const {
+ return false;
+ }
+
+ virtual bool isArray() const {
+ return false;
+ }
+
+ virtual bool isUnsizedArray() const {
+ return false;
+ }
+
+ virtual bool isStruct() const {
+ return false;
+ }
+
+ virtual bool isInterfaceBlock() const {
+ return false;
+ }
+
+ // Is this type something that can be bound & sampled from an SkRuntimeEffect?
+ // Includes types that represent stages of the Skia pipeline (colorFilter, shader, blender).
+ bool isEffectChild() const {
+ return fTypeKind == TypeKind::kColorFilter ||
+ fTypeKind == TypeKind::kShader ||
+ fTypeKind == TypeKind::kBlender;
+ }
+
+ virtual bool isMultisampled() const {
+ SkDEBUGFAIL("not a texture type");
+ return false;
+ }
+
+ virtual TextureAccess textureAccess() const {
+ SkDEBUGFAIL("not a texture type");
+ return TextureAccess::kSample;
+ }
+
+ bool hasPrecision() const {
+ return this->componentType().isNumber() || fTypeKind == TypeKind::kSampler;
+ }
+
+ bool highPrecision() const {
+ return this->bitWidth() >= 32;
+ }
+
+ virtual int bitWidth() const {
+ return 0;
+ }
+
+ bool isOrContainsArray() const;
+ bool isOrContainsUnsizedArray() const;
+ bool isOrContainsAtomic() const;
+
+ /**
+ * Returns the corresponding vector or matrix type with the specified number of columns and
+ * rows.
+ */
+ const Type& toCompound(const Context& context, int columns, int rows) const;
+
+ /**
+ * Returns a type which honors the precision and access-level qualifiers set in Modifiers. e.g.:
+ * - Modifier `mediump` + Type `float2`: Type `half2`
+ * - Modifier `readonly` + Type `texture2D`: Type `readonlyTexture2D`
+ * Generates an error if the qualifiers don't make sense (`highp bool`, `writeonly MyStruct`)
+ */
+ const Type* applyQualifiers(const Context& context,
+ Modifiers* modifiers,
+ SymbolTable* symbols,
+ Position pos) const;
+
+ /**
+ * Coerces the passed-in expression to this type. If the types are incompatible, reports an
+ * error and returns null.
+ */
+ std::unique_ptr<Expression> coerceExpression(std::unique_ptr<Expression> expr,
+ const Context& context) const;
+
+ /** Detects any IntLiterals in the expression which can't fit in this type. */
+ bool checkForOutOfRangeLiteral(const Context& context, const Expression& expr) const;
+
+ /** Checks if `value` can fit in this type. The type must be scalar. */
+ bool checkForOutOfRangeLiteral(const Context& context, double value, Position pos) const;
+
+ /**
+ * Reports errors and returns false if this type cannot be used as the base type for an array.
+ */
+ bool checkIfUsableInArray(const Context& context, Position arrayPos) const;
+
+ /**
+ * Verifies that the expression is a valid constant array size for this type. Returns the array
+ * size, or reports errors and returns zero if the expression isn't a valid literal value.
+ */
+ SKSL_INT convertArraySize(const Context& context, Position arrayPos,
+ std::unique_ptr<Expression> size) const;
+
+protected:
+ Type(std::string_view name, const char* abbrev, TypeKind kind,
+ Position pos = Position())
+ : INHERITED(pos, kIRNodeKind, name)
+ , fTypeKind(kind) {
+ SkASSERT(strlen(abbrev) <= kMaxAbbrevLength);
+ strcpy(fAbbreviatedName, abbrev);
+ }
+
+ const Type* applyPrecisionQualifiers(const Context& context,
+ Modifiers* modifiers,
+ SymbolTable* symbols,
+ Position pos) const;
+
+ const Type* applyAccessQualifiers(const Context& context,
+ Modifiers* modifiers,
+ SymbolTable* symbols,
+ Position pos) const;
+
+private:
+ using INHERITED = Symbol;
+
+ char fAbbreviatedName[kMaxAbbrevLength + 1] = {};
+ TypeKind fTypeKind;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLTypeReference.cpp b/gfx/skia/skia/src/sksl/ir/SkSLTypeReference.cpp
new file mode 100644
index 0000000000..c725b38bb6
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLTypeReference.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLTypeReference.h"
+
+#include "include/core/SkTypes.h"
+#include "include/sksl/SkSLErrorReporter.h"
+
+namespace SkSL {
+
+std::unique_ptr<TypeReference> TypeReference::Convert(const Context& context,
+ Position pos,
+ const Type* type) {
+ if (!type->isAllowedInES2(context)) {
+ context.fErrors->error(pos, "type '" + type->displayName() + "' is not supported");
+ return nullptr;
+ }
+ return TypeReference::Make(context, pos, type);
+}
+
+std::unique_ptr<TypeReference> TypeReference::Make(const Context& context,
+ Position pos,
+ const Type* type) {
+ SkASSERT(type->isAllowedInES2(context));
+ return std::make_unique<TypeReference>(context, pos, type);
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLTypeReference.h b/gfx/skia/skia/src/sksl/ir/SkSLTypeReference.h
new file mode 100644
index 0000000000..aaca8a8f64
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLTypeReference.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_TYPEREFERENCE
+#define SKSL_TYPEREFERENCE
+
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <cstdint>
+#include <memory>
+#include <string>
+
+namespace SkSL {
+
+enum class OperatorPrecedence : uint8_t;
+
+/**
+ * Represents an identifier referring to a type. This is an intermediate value: TypeReferences are
+ * always eventually replaced by Constructors in valid programs.
+ */
+class TypeReference final : public Expression {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kTypeReference;
+
+ TypeReference(const Context& context, Position pos, const Type* value)
+ : TypeReference(pos, value, context.fTypes.fInvalid.get()) {}
+
+ // Creates a reference to an SkSL type; uses the ErrorReporter to report errors.
+ static std::unique_ptr<TypeReference> Convert(const Context& context,
+ Position pos,
+ const Type* type);
+
+ // Creates a reference to an SkSL type; reports errors via ASSERT.
+ static std::unique_ptr<TypeReference> Make(const Context& context, Position pos,
+ const Type* type);
+
+ const Type& value() const {
+ return fValue;
+ }
+
+ std::string description(OperatorPrecedence) const override {
+ return std::string(this->value().name());
+ }
+
+ std::unique_ptr<Expression> clone(Position pos) const override {
+ return std::unique_ptr<Expression>(new TypeReference(pos, &this->value(), &this->type()));
+ }
+
+private:
+ TypeReference(Position pos, const Type* value, const Type* type)
+ : INHERITED(pos, kIRNodeKind, type)
+ , fValue(*value) {}
+
+ const Type& fValue;
+
+ using INHERITED = Expression;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLVarDeclarations.cpp b/gfx/skia/skia/src/sksl/ir/SkSLVarDeclarations.cpp
new file mode 100644
index 0000000000..8d698687bf
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLVarDeclarations.cpp
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+
+#include "include/private/SkSLLayout.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLProgramKind.h"
+#include "include/private/SkSLString.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/SkSLThreadContext.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <cstddef>
+#include <string_view>
+#include <vector>
+
+namespace SkSL {
+namespace {
+
+static bool check_valid_uniform_type(Position pos,
+ const Type* t,
+ const Context& context,
+ bool topLevel = true) {
+ const Type& ct = t->componentType();
+
+ // In RuntimeEffects we only allow a restricted set of types, namely shader/blender/colorFilter,
+ // 32-bit signed integers, 16-bit and 32-bit floats, and their composites.
+ {
+ bool error = false;
+ if (ProgramConfig::IsRuntimeEffect(context.fConfig->fKind)) {
+ // `shader`, `blender`, `colorFilter`
+ if (t->isEffectChild()) {
+ return true;
+ }
+
+ // `int`, `int2`, `int3`, `int4`
+ if (ct.isSigned() && ct.bitWidth() == 32 && (t->isScalar() || t->isVector())) {
+ return true;
+ }
+
+ // `float`, `float2`, `float3`, `float4`, `float2x2`, `float3x3`, `float4x4`
+ // `half`, `half2`, `half3`, `half4`, `half2x2`, `half3x3`, `half4x4`
+ if (ct.isFloat() &&
+ (t->isScalar() || t->isVector() || (t->isMatrix() && t->rows() == t->columns()))) {
+ return true;
+ }
+
+ // Everything else is an error.
+ error = true;
+ }
+
+ // We disallow boolean uniforms in SkSL since they are not well supported by backend
+ // platforms and drivers. We disallow atomic variables in uniforms as that doesn't map
+ // cleanly to all backends.
+ if (error || (ct.isBoolean() && (t->isScalar() || t->isVector())) || ct.isAtomic()) {
+ context.fErrors->error(
+ pos, "variables of type '" + t->displayName() + "' may not be uniform");
+ return false;
+ }
+ }
+
+ // In non-RTE SkSL we allow structs and interface blocks to be uniforms but we must make sure
+ // their fields are allowed.
+ if (t->isStruct()) {
+ for (const Type::Field& field : t->fields()) {
+ if (!check_valid_uniform_type(
+ field.fPosition, field.fType, context, /*topLevel=*/false)) {
+ // Emit a "caused by" line only for the top-level uniform type and not for any
+ // nested structs.
+ if (topLevel) {
+ context.fErrors->error(pos, "caused by:");
+ }
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+} // namespace
+
+std::unique_ptr<Statement> VarDeclaration::clone() const {
+ // Cloning a VarDeclaration is inherently problematic, as we normally expect a one-to-one
+ // mapping between Variables and VarDeclarations and a straightforward clone would violate this
+ // assumption. We could of course theoretically clone the Variable as well, but that would
+ // require additional context and tracking, since for the whole process to work we would also
+ // have to fixup any subsequent VariableReference clones to point to the newly cloned Variables
+ // instead of the originals.
+ //
+ // Since the only reason we ever clone VarDeclarations is to support tests of clone() and we do
+ // not expect to ever need to do so otherwise, a full solution to this issue is unnecessary at
+ // the moment. We instead just keep track of whether a VarDeclaration is a clone so we can
+ // handle its cleanup properly. This allows clone() to work in the simple case that a
+ // VarDeclaration's clone does not outlive the original, which is adequate for testing. Since
+ // this leaves a sharp edge in place - destroying the original could cause a use-after-free in
+ // some circumstances - we also disable cloning altogether unless the
+ // fAllowVarDeclarationCloneForTesting ProgramSetting is enabled.
+ if (ThreadContext::Settings().fAllowVarDeclarationCloneForTesting) {
+ return std::make_unique<VarDeclaration>(this->var(),
+ &this->baseType(),
+ fArraySize,
+ this->value() ? this->value()->clone() : nullptr,
+ /*isClone=*/true);
+ } else {
+ SkDEBUGFAIL("VarDeclaration::clone() is unsupported");
+ return nullptr;
+ }
+}
+
+std::string VarDeclaration::description() const {
+ std::string result = this->var()->modifiers().description() + this->baseType().description() +
+ " " + std::string(this->var()->name());
+ if (this->arraySize() > 0) {
+ String::appendf(&result, "[%d]", this->arraySize());
+ }
+ if (this->value()) {
+ result += " = " + this->value()->description();
+ }
+ result += ";";
+ return result;
+}
+
+void VarDeclaration::ErrorCheck(const Context& context,
+ Position pos,
+ Position modifiersPosition,
+ const Modifiers& modifiers,
+ const Type* type,
+ Variable::Storage storage) {
+ const Type* baseType = type;
+ if (baseType->isArray()) {
+ baseType = &baseType->componentType();
+ }
+ SkASSERT(!baseType->isArray());
+
+ if (baseType->matches(*context.fTypes.fInvalid)) {
+ context.fErrors->error(pos, "invalid type");
+ return;
+ }
+ if (baseType->isVoid()) {
+ context.fErrors->error(pos, "variables of type 'void' are not allowed");
+ return;
+ }
+
+ if (baseType->componentType().isOpaque() && !baseType->componentType().isAtomic() &&
+ storage != Variable::Storage::kGlobal) {
+ context.fErrors->error(pos,
+ "variables of type '" + baseType->displayName() + "' must be global");
+ }
+ if ((modifiers.fFlags & Modifiers::kIn_Flag) && baseType->isMatrix()) {
+ context.fErrors->error(pos, "'in' variables may not have matrix type");
+ }
+ if ((modifiers.fFlags & Modifiers::kIn_Flag) && type->isUnsizedArray()) {
+ context.fErrors->error(pos, "'in' variables may not have unsized array type");
+ }
+ if ((modifiers.fFlags & Modifiers::kOut_Flag) && type->isUnsizedArray()) {
+ context.fErrors->error(pos, "'out' variables may not have unsized array type");
+ }
+ if ((modifiers.fFlags & Modifiers::kIn_Flag) && (modifiers.fFlags & Modifiers::kUniform_Flag)) {
+ context.fErrors->error(pos, "'in uniform' variables not permitted");
+ }
+ if ((modifiers.fFlags & Modifiers::kReadOnly_Flag) &&
+ (modifiers.fFlags & Modifiers::kWriteOnly_Flag)) {
+ context.fErrors->error(pos, "'readonly' and 'writeonly' qualifiers cannot be combined");
+ }
+ if ((modifiers.fFlags & Modifiers::kUniform_Flag) &&
+ (modifiers.fFlags & Modifiers::kBuffer_Flag)) {
+ context.fErrors->error(pos, "'uniform buffer' variables not permitted");
+ }
+ if ((modifiers.fFlags & Modifiers::kWorkgroup_Flag) &&
+ (modifiers.fFlags & (Modifiers::kIn_Flag | Modifiers::kOut_Flag))) {
+ context.fErrors->error(pos, "in / out variables may not be declared workgroup");
+ }
+ if ((modifiers.fFlags & Modifiers::kUniform_Flag)) {
+ check_valid_uniform_type(pos, baseType, context);
+ }
+ if (baseType->isEffectChild() && !(modifiers.fFlags & Modifiers::kUniform_Flag)) {
+ context.fErrors->error(pos,
+ "variables of type '" + baseType->displayName() + "' must be uniform");
+ }
+ if (baseType->isEffectChild() && (context.fConfig->fKind == ProgramKind::kMeshVertex ||
+ context.fConfig->fKind == ProgramKind::kMeshFragment)) {
+ context.fErrors->error(pos, "effects are not permitted in custom mesh shaders");
+ }
+ if (baseType->isOrContainsAtomic()) {
+ // An atomic variable (or a struct or an array that contains an atomic member) must be
+ // either:
+ // a. Declared as a workgroup-shared variable, OR
+ // b. Declared as the member of writable storage buffer block (i.e. has no readonly
+ // restriction).
+ //
+ // The checks below will enforce these two rules on all declarations. If the variable is not
+ // declared with the workgroup modifier, then it must be declared in the interface block
+ // storage. If this is the declaration for an interface block that contains an atomic
+ // member, then it must have the `buffer` modifier and no `readonly` modifier.
+ bool isWorkgroup = modifiers.fFlags & Modifiers::kWorkgroup_Flag;
+ bool isBlockMember = (storage == Variable::Storage::kInterfaceBlock);
+ bool isWritableStorageBuffer = modifiers.fFlags & Modifiers::kBuffer_Flag &&
+ !(modifiers.fFlags & Modifiers::kReadOnly_Flag);
+
+ if (!isWorkgroup &&
+ !(baseType->isInterfaceBlock() ? isWritableStorageBuffer : isBlockMember)) {
+ context.fErrors->error(pos,
+ "atomics are only permitted in workgroup variables and writable "
+ "storage blocks");
+ }
+ }
+ if (modifiers.fLayout.fFlags & Layout::kColor_Flag) {
+ if (!ProgramConfig::IsRuntimeEffect(context.fConfig->fKind)) {
+ context.fErrors->error(pos, "'layout(color)' is only permitted in runtime effects");
+ }
+ if (!(modifiers.fFlags & Modifiers::kUniform_Flag)) {
+ context.fErrors->error(pos,
+ "'layout(color)' is only permitted on 'uniform' variables");
+ }
+ auto validColorXformType = [](const Type& t) {
+ return t.isVector() && t.componentType().isFloat() &&
+ (t.columns() == 3 || t.columns() == 4);
+ };
+ if (!validColorXformType(*baseType)) {
+ context.fErrors->error(pos,
+ "'layout(color)' is not permitted on variables of type '" +
+ baseType->displayName() + "'");
+ }
+ }
+
+ int permitted = Modifiers::kConst_Flag | Modifiers::kHighp_Flag | Modifiers::kMediump_Flag |
+ Modifiers::kLowp_Flag;
+ if (storage == Variable::Storage::kGlobal) {
+ // Uniforms are allowed in all programs
+ permitted |= Modifiers::kUniform_Flag;
+
+ // No other modifiers are allowed in runtime effects.
+ if (!ProgramConfig::IsRuntimeEffect(context.fConfig->fKind)) {
+ if (baseType->isInterfaceBlock()) {
+ // Interface blocks allow `buffer`.
+ permitted |= Modifiers::kBuffer_Flag;
+
+ if (modifiers.fFlags & Modifiers::kBuffer_Flag) {
+ // Only storage blocks allow `readonly` and `writeonly`.
+ // (`readonly` and `writeonly` textures are converted to separate types via
+ // applyAccessQualifiers.)
+ permitted |= Modifiers::kReadOnly_Flag | Modifiers::kWriteOnly_Flag;
+ }
+
+ // It is an error for an unsized array to appear anywhere but the last member of a
+ // "buffer" block.
+ const auto& fields = baseType->fields();
+ const size_t illegalRangeEnd =
+ fields.size() - ((modifiers.fFlags & Modifiers::kBuffer_Flag) ? 1 : 0);
+ for (size_t i = 0; i < illegalRangeEnd; ++i) {
+ if (fields[i].fType->isUnsizedArray()) {
+ context.fErrors->error(
+ fields[i].fPosition,
+ "unsized array must be the last member of a storage block");
+ }
+ }
+ }
+
+ if (!baseType->isOpaque()) {
+ // Only non-opaque types allow `in` and `out`.
+ permitted |= Modifiers::kIn_Flag | Modifiers::kOut_Flag;
+ }
+ if (ProgramConfig::IsCompute(context.fConfig->fKind)) {
+ // Only compute shaders allow `workgroup`.
+ if (!baseType->isOpaque() || baseType->isAtomic()) {
+ permitted |= Modifiers::kWorkgroup_Flag;
+ }
+ } else {
+ // Only vertex/fragment shaders allow `flat` and `noperspective`.
+ permitted |= Modifiers::kFlat_Flag | Modifiers::kNoPerspective_Flag;
+ }
+ }
+ }
+
+ int permittedLayoutFlags = ~0;
+
+ // The `texture` and `sampler` modifiers can be present respectively on a texture and sampler or
+ // simultaneously on a combined image-sampler but they are not permitted on any other type.
+ switch (baseType->typeKind()) {
+ case Type::TypeKind::kSampler:
+ // Both texture and sampler flags are permitted
+ break;
+ case Type::TypeKind::kTexture:
+ permittedLayoutFlags &= ~Layout::kSampler_Flag;
+ break;
+ case Type::TypeKind::kSeparateSampler:
+ permittedLayoutFlags &= ~Layout::kTexture_Flag;
+ break;
+ default:
+ permittedLayoutFlags &= ~(Layout::kTexture_Flag | Layout::kSampler_Flag);
+ break;
+ }
+
+ // We don't allow 'binding' or 'set' on normal uniform variables, only on textures, samplers,
+ // and interface blocks (holding uniform variables). They're also only allowed at global scope,
+ // not on interface block fields (or locals/parameters).
+ bool permitBindingAndSet = baseType->typeKind() == Type::TypeKind::kSampler ||
+ baseType->typeKind() == Type::TypeKind::kSeparateSampler ||
+ baseType->typeKind() == Type::TypeKind::kTexture ||
+ baseType->isInterfaceBlock();
+ if (storage != Variable::Storage::kGlobal ||
+ ((modifiers.fFlags & Modifiers::kUniform_Flag) && !permitBindingAndSet)) {
+ permittedLayoutFlags &= ~Layout::kBinding_Flag;
+ permittedLayoutFlags &= ~Layout::kSet_Flag;
+ permittedLayoutFlags &= ~Layout::kSPIRV_Flag;
+ permittedLayoutFlags &= ~Layout::kMetal_Flag;
+ permittedLayoutFlags &= ~Layout::kWGSL_Flag;
+ permittedLayoutFlags &= ~Layout::kGL_Flag;
+ }
+ if (ProgramConfig::IsRuntimeEffect(context.fConfig->fKind)) {
+ // Disallow all layout flags except 'color' in runtime effects
+ permittedLayoutFlags &= Layout::kColor_Flag;
+ }
+ modifiers.checkPermitted(context, modifiersPosition, permitted, permittedLayoutFlags);
+}
+
+bool VarDeclaration::ErrorCheckAndCoerce(const Context& context, const Variable& var,
+ std::unique_ptr<Expression>& value) {
+ ErrorCheck(context, var.fPosition, var.modifiersPosition(), var.modifiers(), &var.type(),
+ var.storage());
+ if (value) {
+ if (var.type().isOpaque()) {
+ context.fErrors->error(value->fPosition, "opaque type '" + var.type().displayName() +
+ "' cannot use initializer expressions");
+ return false;
+ }
+ if (var.modifiers().fFlags & Modifiers::kIn_Flag) {
+ context.fErrors->error(value->fPosition,
+ "'in' variables cannot use initializer expressions");
+ return false;
+ }
+ if (var.modifiers().fFlags & Modifiers::kUniform_Flag) {
+ context.fErrors->error(value->fPosition,
+ "'uniform' variables cannot use initializer expressions");
+ return false;
+ }
+ if (var.storage() == Variable::Storage::kInterfaceBlock) {
+ context.fErrors->error(value->fPosition,
+ "initializers are not permitted on interface block fields");
+ return false;
+ }
+ value = var.type().coerceExpression(std::move(value), context);
+ if (!value) {
+ return false;
+ }
+ }
+ if (var.modifiers().fFlags & Modifiers::kConst_Flag) {
+ if (!value) {
+ context.fErrors->error(var.fPosition, "'const' variables must be initialized");
+ return false;
+ }
+ if (!Analysis::IsConstantExpression(*value)) {
+ context.fErrors->error(value->fPosition,
+ "'const' variable initializer must be a constant expression");
+ return false;
+ }
+ }
+ if (var.storage() == Variable::Storage::kInterfaceBlock) {
+ if (var.type().isOpaque()) {
+ context.fErrors->error(var.fPosition, "opaque type '" + var.type().displayName() +
+ "' is not permitted in an interface block");
+ return false;
+ }
+ }
+ if (var.storage() == Variable::Storage::kGlobal) {
+ if (value && !Analysis::IsConstantExpression(*value)) {
+ context.fErrors->error(value->fPosition,
+ "global variable initializer must be a constant expression");
+ return false;
+ }
+ }
+ return true;
+}
+
+std::unique_ptr<Statement> VarDeclaration::Convert(const Context& context,
+ std::unique_ptr<Variable> var,
+ std::unique_ptr<Expression> value,
+ bool addToSymbolTable) {
+ if (!ErrorCheckAndCoerce(context, *var, value)) {
+ return nullptr;
+ }
+ const Type* baseType = &var->type();
+ int arraySize = 0;
+ if (baseType->isArray()) {
+ arraySize = baseType->columns();
+ baseType = &baseType->componentType();
+ }
+ std::unique_ptr<Statement> varDecl = VarDeclaration::Make(context, var.get(), baseType,
+ arraySize, std::move(value));
+ if (!varDecl) {
+ return nullptr;
+ }
+
+ SymbolTable* symbols = ThreadContext::SymbolTable().get();
+ if (var->storage() == Variable::Storage::kGlobal ||
+ var->storage() == Variable::Storage::kInterfaceBlock) {
+ // Check if this globally-scoped variable name overlaps an existing symbol name.
+ if (symbols->find(var->name())) {
+ context.fErrors->error(var->fPosition,
+ "symbol '" + std::string(var->name()) + "' was already defined");
+ return nullptr;
+ }
+
+ // `sk_RTAdjust` is special, and makes the IR generator emit position-fixup expressions.
+ if (var->name() == Compiler::RTADJUST_NAME) {
+ if (ThreadContext::RTAdjustState().fVar ||
+ ThreadContext::RTAdjustState().fInterfaceBlock) {
+ context.fErrors->error(var->fPosition, "duplicate definition of 'sk_RTAdjust'");
+ return nullptr;
+ }
+ if (!var->type().matches(*context.fTypes.fFloat4)) {
+ context.fErrors->error(var->fPosition, "sk_RTAdjust must have type 'float4'");
+ return nullptr;
+ }
+ ThreadContext::RTAdjustState().fVar = var.get();
+ }
+ }
+
+ if (addToSymbolTable) {
+ symbols->add(std::move(var));
+ } else {
+ symbols->takeOwnershipOfSymbol(std::move(var));
+ }
+ return varDecl;
+}
+
+std::unique_ptr<Statement> VarDeclaration::Make(const Context& context, Variable* var,
+ const Type* baseType, int arraySize, std::unique_ptr<Expression> value) {
+ SkASSERT(!baseType->isArray());
+ // function parameters cannot have variable declarations
+ SkASSERT(var->storage() != Variable::Storage::kParameter);
+ // 'const' variables must be initialized
+ SkASSERT(!(var->modifiers().fFlags & Modifiers::kConst_Flag) || value);
+ // 'const' variable initializer must be a constant expression
+ SkASSERT(!(var->modifiers().fFlags & Modifiers::kConst_Flag) ||
+ Analysis::IsConstantExpression(*value));
+ // global variable initializer must be a constant expression
+ SkASSERT(!(value && var->storage() == Variable::Storage::kGlobal &&
+ !Analysis::IsConstantExpression(*value)));
+ // opaque type not permitted on an interface block
+ SkASSERT(!(var->storage() == Variable::Storage::kInterfaceBlock && var->type().isOpaque()));
+ // initializers are not permitted on interface block fields
+ SkASSERT(!(var->storage() == Variable::Storage::kInterfaceBlock && value));
+ // opaque type cannot use initializer expressions
+ SkASSERT(!(value && var->type().isOpaque()));
+ // 'in' variables cannot use initializer expressions
+ SkASSERT(!(value && (var->modifiers().fFlags & Modifiers::kIn_Flag)));
+ // 'uniform' variables cannot use initializer expressions
+ SkASSERT(!(value && (var->modifiers().fFlags & Modifiers::kUniform_Flag)));
+
+ auto result = std::make_unique<VarDeclaration>(var, baseType, arraySize, std::move(value));
+ var->setVarDeclaration(result.get());
+ return std::move(result);
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLVarDeclarations.h b/gfx/skia/skia/src/sksl/ir/SkSLVarDeclarations.h
new file mode 100644
index 0000000000..b90528732e
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLVarDeclarations.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_VARDECLARATIONS
+#define SKSL_VARDECLARATIONS
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLStatement.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLVariable.h"
+
+#include <memory>
+#include <string>
+#include <utility>
+
+namespace SkSL {
+
+class Context;
+class Position;
+class Type;
+
+struct Modifiers;
+
+/**
+ * A single variable declaration statement. Multiple variables declared together are expanded to
+ * separate (sequential) statements. For instance, the SkSL 'int x = 2, y[3];' produces two
+ * VarDeclaration instances (wrapped in an unscoped Block).
+ */
+class VarDeclaration final : public Statement {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kVarDeclaration;
+
+ VarDeclaration(Variable* var,
+ const Type* baseType,
+ int arraySize,
+ std::unique_ptr<Expression> value,
+ bool isClone = false)
+ : INHERITED(var->fPosition, kIRNodeKind)
+ , fVar(var)
+ , fBaseType(*baseType)
+ , fArraySize(arraySize)
+ , fValue(std::move(value))
+ , fIsClone(isClone) {}
+
+ ~VarDeclaration() override {
+ // Unhook this VarDeclaration from its associated Variable, since we're being deleted.
+ if (fVar && !fIsClone) {
+ fVar->detachDeadVarDeclaration();
+ }
+ }
+
+ // Checks the modifiers, baseType, and storage for compatibility with one another and reports
+ // errors if needed. This method is implicitly called during Convert(), but is also explicitly
+ // called while processing interface block fields.
+ static void ErrorCheck(const Context& context, Position pos, Position modifiersPosition,
+ const Modifiers& modifiers, const Type* type, Variable::Storage storage);
+
+ // Does proper error checking and type coercion; reports errors via ErrorReporter.
+ static std::unique_ptr<Statement> Convert(const Context& context, std::unique_ptr<Variable> var,
+ std::unique_ptr<Expression> value, bool addToSymbolTable = true);
+
+ // Reports errors via ASSERT.
+ static std::unique_ptr<Statement> Make(const Context& context,
+ Variable* var,
+ const Type* baseType,
+ int arraySize,
+ std::unique_ptr<Expression> value);
+ const Type& baseType() const {
+ return fBaseType;
+ }
+
+ Variable* var() const {
+ return fVar;
+ }
+
+ void detachDeadVariable() {
+ fVar = nullptr;
+ }
+
+ int arraySize() const {
+ return fArraySize;
+ }
+
+ std::unique_ptr<Expression>& value() {
+ return fValue;
+ }
+
+ const std::unique_ptr<Expression>& value() const {
+ return fValue;
+ }
+
+ std::unique_ptr<Statement> clone() const override;
+
+ std::string description() const override;
+
+private:
+ static bool ErrorCheckAndCoerce(const Context& context,
+ const Variable& var,
+ std::unique_ptr<Expression>& value);
+
+ Variable* fVar;
+ const Type& fBaseType;
+ int fArraySize; // zero means "not an array"
+ std::unique_ptr<Expression> fValue;
+ // if this VarDeclaration is a clone, it doesn't actually own the associated variable
+ bool fIsClone;
+
+ using INHERITED = Statement;
+};
+
+/**
+ * A variable declaration appearing at global scope. A global declaration like 'int x, y;' produces
+ * two GlobalVarDeclaration elements, each containing the declaration of one variable.
+ */
+class GlobalVarDeclaration final : public ProgramElement {
+public:
+ inline static constexpr Kind kIRNodeKind = Kind::kGlobalVar;
+
+ GlobalVarDeclaration(std::unique_ptr<Statement> decl)
+ : INHERITED(decl->fPosition, kIRNodeKind)
+ , fDeclaration(std::move(decl)) {
+ SkASSERT(this->declaration()->is<VarDeclaration>());
+ this->varDeclaration().var()->setGlobalVarDeclaration(this);
+ }
+
+ std::unique_ptr<Statement>& declaration() {
+ return fDeclaration;
+ }
+
+ const std::unique_ptr<Statement>& declaration() const {
+ return fDeclaration;
+ }
+
+ VarDeclaration& varDeclaration() {
+ return fDeclaration->as<VarDeclaration>();
+ }
+
+ const VarDeclaration& varDeclaration() const {
+ return fDeclaration->as<VarDeclaration>();
+ }
+
+ std::unique_ptr<ProgramElement> clone() const override {
+ return std::make_unique<GlobalVarDeclaration>(this->declaration()->clone());
+ }
+
+ std::string description() const override {
+ return this->declaration()->description();
+ }
+
+private:
+ std::unique_ptr<Statement> fDeclaration;
+
+ using INHERITED = ProgramElement;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLVariable.cpp b/gfx/skia/skia/src/sksl/ir/SkSLVariable.cpp
new file mode 100644
index 0000000000..95c292a8ad
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLVariable.cpp
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLVariable.h"
+
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLLayout.h"
+#include "include/sksl/SkSLErrorReporter.h"
+#include "src/base/SkStringView.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLMangler.h"
+#include "src/sksl/SkSLModifiersPool.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/SkSLThreadContext.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLInterfaceBlock.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+
+#include <type_traits>
+#include <utility>
+
+namespace SkSL {
+
+Variable::~Variable() {
+ // Unhook this Variable from its associated VarDeclaration, since we're being deleted.
+ if (VarDeclaration* declaration = this->varDeclaration()) {
+ declaration->detachDeadVariable();
+ }
+}
+
+InterfaceBlockVariable::~InterfaceBlockVariable() {
+ // Unhook this Variable from its associated InterfaceBlock, since we're being deleted.
+ if (fInterfaceBlockElement) {
+ fInterfaceBlockElement->detachDeadVariable();
+ }
+}
+
+const Expression* Variable::initialValue() const {
+ VarDeclaration* declaration = this->varDeclaration();
+ return declaration ? declaration->value().get() : nullptr;
+}
+
+VarDeclaration* Variable::varDeclaration() const {
+ if (!fDeclaringElement) {
+ return nullptr;
+ }
+ SkASSERT(fDeclaringElement->is<VarDeclaration>() ||
+ fDeclaringElement->is<GlobalVarDeclaration>());
+ return fDeclaringElement->is<GlobalVarDeclaration>()
+ ? &fDeclaringElement->as<GlobalVarDeclaration>().varDeclaration()
+ : &fDeclaringElement->as<VarDeclaration>();
+}
+
+GlobalVarDeclaration* Variable::globalVarDeclaration() const {
+ if (!fDeclaringElement) {
+ return nullptr;
+ }
+ SkASSERT(fDeclaringElement->is<VarDeclaration>() ||
+ fDeclaringElement->is<GlobalVarDeclaration>());
+ return fDeclaringElement->is<GlobalVarDeclaration>()
+ ? &fDeclaringElement->as<GlobalVarDeclaration>()
+ : nullptr;
+}
+
+void Variable::setVarDeclaration(VarDeclaration* declaration) {
+ SkASSERT(!fDeclaringElement || this == declaration->var());
+ if (!fDeclaringElement) {
+ fDeclaringElement = declaration;
+ }
+}
+
+void Variable::setGlobalVarDeclaration(GlobalVarDeclaration* global) {
+ SkASSERT(!fDeclaringElement || this == global->varDeclaration().var());
+ fDeclaringElement = global;
+}
+
+std::string Variable::mangledName() const {
+ // Only private variables need to use name mangling.
+ std::string_view name = this->name();
+ if (!skstd::starts_with(name, '$')) {
+ return std::string(name);
+ }
+
+ // The $ prefix will fail to compile in GLSL, so replace it with `sk_Priv`.
+ name.remove_prefix(1);
+ return "sk_Priv" + std::string(name);
+}
+
+std::unique_ptr<Variable> Variable::Convert(const Context& context,
+ Position pos,
+ Position modifiersPos,
+ const Modifiers& modifiers,
+ const Type* baseType,
+ Position namePos,
+ std::string_view name,
+ bool isArray,
+ std::unique_ptr<Expression> arraySize,
+ Variable::Storage storage) {
+ if (modifiers.fLayout.fLocation == 0 && modifiers.fLayout.fIndex == 0 &&
+ (modifiers.fFlags & Modifiers::kOut_Flag) &&
+ ProgramConfig::IsFragment(context.fConfig->fKind) && name != Compiler::FRAGCOLOR_NAME) {
+ context.fErrors->error(modifiersPos,
+ "out location=0, index=0 is reserved for sk_FragColor");
+ }
+ if (baseType->isUnsizedArray() && storage != Variable::Storage::kInterfaceBlock) {
+ context.fErrors->error(pos, "unsized arrays are not permitted here");
+ }
+ if (ProgramConfig::IsCompute(ThreadContext::Context().fConfig->fKind) &&
+ modifiers.fLayout.fBuiltin == -1) {
+ if (storage == Variable::Storage::kGlobal) {
+ if (modifiers.fFlags & Modifiers::kIn_Flag) {
+ context.fErrors->error(pos, "pipeline inputs not permitted in compute shaders");
+ } else if (modifiers.fFlags & Modifiers::kOut_Flag) {
+ context.fErrors->error(pos, "pipeline outputs not permitted in compute shaders");
+ }
+ }
+ }
+
+ return Make(context, pos, modifiersPos, modifiers, baseType, name, isArray,
+ std::move(arraySize), storage);
+}
+
+std::unique_ptr<Variable> Variable::Make(const Context& context,
+ Position pos,
+ Position modifiersPos,
+ const Modifiers& modifiers,
+ const Type* baseType,
+ std::string_view name,
+ bool isArray,
+ std::unique_ptr<Expression> arraySize,
+ Variable::Storage storage) {
+ const Type* type = baseType;
+ int arraySizeValue = 0;
+ if (isArray) {
+ SkASSERT(arraySize);
+ arraySizeValue = type->convertArraySize(context, pos, std::move(arraySize));
+ if (!arraySizeValue) {
+ return nullptr;
+ }
+ type = ThreadContext::SymbolTable()->addArrayDimension(type, arraySizeValue);
+ }
+ if (type->componentType().isInterfaceBlock()) {
+ return std::make_unique<InterfaceBlockVariable>(pos,
+ modifiersPos,
+ context.fModifiersPool->add(modifiers),
+ name,
+ type,
+ context.fConfig->fIsBuiltinCode,
+ storage);
+ } else {
+ return std::make_unique<Variable>(pos,
+ modifiersPos,
+ context.fModifiersPool->add(modifiers),
+ name,
+ type,
+ context.fConfig->fIsBuiltinCode,
+ storage);
+ }
+}
+
+Variable::ScratchVariable Variable::MakeScratchVariable(const Context& context,
+ Mangler& mangler,
+ std::string_view baseName,
+ const Type* type,
+ const Modifiers& modifiers,
+ SymbolTable* symbolTable,
+ std::unique_ptr<Expression> initialValue) {
+ // $floatLiteral or $intLiteral aren't real types that we can use for scratch variables, so
+ // replace them if they ever appear here. If this happens, we likely forgot to coerce a type
+ // somewhere during compilation.
+ if (type->isLiteral()) {
+ SkDEBUGFAIL("found a $literal type in MakeScratchVariable");
+ type = &type->scalarTypeForLiteral();
+ }
+
+ // Out-parameters aren't supported.
+ SkASSERT(!(modifiers.fFlags & Modifiers::kOut_Flag));
+
+ // Provide our new variable with a unique name, and add it to our symbol table.
+ const std::string* name =
+ symbolTable->takeOwnershipOfString(mangler.uniqueName(baseName, symbolTable));
+
+ // Create our new variable and add it to the symbol table.
+ ScratchVariable result;
+ auto var = std::make_unique<Variable>(initialValue ? initialValue->fPosition : Position(),
+ /*modifiersPosition=*/Position(),
+ context.fModifiersPool->add(Modifiers{}),
+ name->c_str(),
+ type,
+ symbolTable->isBuiltin(),
+ Variable::Storage::kLocal);
+
+ // If we are creating an array type, reduce it to base type plus array-size.
+ int arraySize = 0;
+ if (type->isArray()) {
+ arraySize = type->columns();
+ type = &type->componentType();
+ }
+ // Create our variable declaration.
+ result.fVarDecl = VarDeclaration::Make(context, var.get(), type, arraySize,
+ std::move(initialValue));
+ result.fVarSymbol = symbolTable->add(std::move(var));
+ return result;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLVariable.h b/gfx/skia/skia/src/sksl/ir/SkSLVariable.h
new file mode 100644
index 0000000000..a94292873b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLVariable.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_VARIABLE
+#define SKSL_VARIABLE
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLStatement.h"
+#include "include/private/SkSLSymbol.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLType.h"
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <string_view>
+
+namespace SkSL {
+
+class Context;
+class Expression;
+class GlobalVarDeclaration;
+class InterfaceBlock;
+class Mangler;
+class SymbolTable;
+class VarDeclaration;
+
+enum class VariableStorage : int8_t {
+ kGlobal,
+ kInterfaceBlock,
+ kLocal,
+ kParameter,
+};
+
+/**
+ * Represents a variable, whether local, global, or a function parameter. This represents the
+ * variable itself (the storage location), which is shared between all VariableReferences which
+ * read or write that storage location.
+ */
+class Variable : public Symbol {
+public:
+ using Storage = VariableStorage;
+
+ inline static constexpr Kind kIRNodeKind = Kind::kVariable;
+
+ Variable(Position pos, Position modifiersPosition, const Modifiers* modifiers,
+ std::string_view name, const Type* type, bool builtin, Storage storage)
+ : INHERITED(pos, kIRNodeKind, name, type)
+ , fModifiersPosition(modifiersPosition)
+ , fModifiers(modifiers)
+ , fStorage(storage)
+ , fBuiltin(builtin) {}
+
+ ~Variable() override;
+
+ static std::unique_ptr<Variable> Convert(const Context& context, Position pos,
+ Position modifiersPos, const Modifiers& modifiers, const Type* baseType,
+ Position namePos, std::string_view name, bool isArray,
+ std::unique_ptr<Expression> arraySize, Variable::Storage storage);
+
+ static std::unique_ptr<Variable> Make(const Context& context, Position pos,
+ Position modifiersPos, const Modifiers& modifiers, const Type* baseType,
+ std::string_view name, bool isArray, std::unique_ptr<Expression> arraySize,
+ Variable::Storage storage);
+
+ /**
+ * Creates a local scratch variable and the associated VarDeclaration statement.
+ * Useful when doing IR rewrites, e.g. inlining a function call.
+ */
+ struct ScratchVariable {
+ const Variable* fVarSymbol;
+ std::unique_ptr<Statement> fVarDecl;
+ };
+ static ScratchVariable MakeScratchVariable(const Context& context,
+ Mangler& mangler,
+ std::string_view baseName,
+ const Type* type,
+ const Modifiers& modifiers,
+ SymbolTable* symbolTable,
+ std::unique_ptr<Expression> initialValue);
+ const Modifiers& modifiers() const {
+ return *fModifiers;
+ }
+
+ void setModifiers(const Modifiers* modifiers) {
+ fModifiers = modifiers;
+ }
+
+ Position modifiersPosition() const {
+ return fModifiersPosition;
+ }
+
+ bool isBuiltin() const {
+ return fBuiltin;
+ }
+
+ Storage storage() const {
+ return fStorage;
+ }
+
+ const Expression* initialValue() const;
+
+ VarDeclaration* varDeclaration() const;
+
+ void setVarDeclaration(VarDeclaration* declaration);
+
+ GlobalVarDeclaration* globalVarDeclaration() const;
+
+ void setGlobalVarDeclaration(GlobalVarDeclaration* global);
+
+ void detachDeadVarDeclaration() {
+ // The VarDeclaration is being deleted, so our reference to it has become stale.
+ fDeclaringElement = nullptr;
+ }
+
+ // The interfaceBlock methods are no-op stubs here. They have proper implementations in
+ // InterfaceBlockVariable, declared below this class, which dedicates extra space to store the
+ // pointer back to the InterfaceBlock.
+ virtual InterfaceBlock* interfaceBlock() const { return nullptr; }
+
+ virtual void setInterfaceBlock(InterfaceBlock*) { SkUNREACHABLE; }
+
+ virtual void detachDeadInterfaceBlock() {}
+
+ std::string description() const override {
+ return this->modifiers().description() + this->type().displayName() + " " +
+ std::string(this->name());
+ }
+
+ std::string mangledName() const;
+
+private:
+ IRNode* fDeclaringElement = nullptr;
+ // We don't store the position in the Modifiers object itself because they are pooled
+ Position fModifiersPosition;
+ const Modifiers* fModifiers;
+ VariableStorage fStorage;
+ bool fBuiltin;
+
+ using INHERITED = Symbol;
+};
+
+/**
+ * This represents a Variable associated with an InterfaceBlock. Mostly a normal variable, but also
+ * has an extra pointer back to the InterfaceBlock element that owns it.
+ */
+class InterfaceBlockVariable final : public Variable {
+public:
+ using Variable::Variable;
+
+ ~InterfaceBlockVariable() override;
+
+ InterfaceBlock* interfaceBlock() const override { return fInterfaceBlockElement; }
+
+ void setInterfaceBlock(InterfaceBlock* elem) override {
+ SkASSERT(!fInterfaceBlockElement);
+ fInterfaceBlockElement = elem;
+ }
+
+ void detachDeadInterfaceBlock() override {
+ // The InterfaceBlock is being deleted, so our reference to it has become stale.
+ fInterfaceBlockElement = nullptr;
+ }
+
+private:
+ InterfaceBlock* fInterfaceBlockElement = nullptr;
+
+ using INHERITED = Variable;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLVariableReference.cpp b/gfx/skia/skia/src/sksl/ir/SkSLVariableReference.cpp
new file mode 100644
index 0000000000..6eca3ddb89
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLVariableReference.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/ir/SkSLVariableReference.h"
+
+#include "src/sksl/ir/SkSLVariable.h"
+
+namespace SkSL {
+
+VariableReference::VariableReference(Position pos, const Variable* variable, RefKind refKind)
+ : INHERITED(pos, kIRNodeKind, &variable->type())
+ , fVariable(variable)
+ , fRefKind(refKind) {
+ SkASSERT(this->variable());
+}
+
+std::string VariableReference::description(OperatorPrecedence) const {
+ return std::string(this->variable()->name());
+}
+
+void VariableReference::setRefKind(RefKind refKind) {
+ fRefKind = refKind;
+}
+
+void VariableReference::setVariable(const Variable* variable) {
+ fVariable = variable;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/ir/SkSLVariableReference.h b/gfx/skia/skia/src/sksl/ir/SkSLVariableReference.h
new file mode 100644
index 0000000000..d33c569314
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/ir/SkSLVariableReference.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_VARIABLEREFERENCE
+#define SKSL_VARIABLEREFERENCE
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/sksl/SkSLPosition.h"
+#include "src/sksl/ir/SkSLExpression.h"
+
+#include <cstdint>
+#include <memory>
+#include <string>
+
+namespace SkSL {
+
+class Variable;
+enum class OperatorPrecedence : uint8_t;
+
+enum class VariableRefKind : int8_t {
+ kRead,
+ kWrite,
+ kReadWrite,
+ // taking the address of a variable - we consider this a read & write but don't complain if
+ // the variable was not previously assigned
+ kPointer
+};
+
+/**
+ * A reference to a variable, through which it can be read or written. In the statement:
+ *
+ * x = x + 1;
+ *
+ * there is only one Variable 'x', but two VariableReferences to it.
+ */
+class VariableReference final : public Expression {
+public:
+ using RefKind = VariableRefKind;
+
+ inline static constexpr Kind kIRNodeKind = Kind::kVariableReference;
+
+ VariableReference(Position pos, const Variable* variable, RefKind refKind);
+
+ // Creates a VariableReference. There isn't much in the way of error-checking or optimization
+ // opportunities here.
+ static std::unique_ptr<Expression> Make(Position pos,
+ const Variable* variable,
+ RefKind refKind = RefKind::kRead) {
+ SkASSERT(variable);
+ return std::make_unique<VariableReference>(pos, variable, refKind);
+ }
+
+ VariableReference(const VariableReference&) = delete;
+ VariableReference& operator=(const VariableReference&) = delete;
+
+ const Variable* variable() const {
+ return fVariable;
+ }
+
+ RefKind refKind() const {
+ return fRefKind;
+ }
+
+ void setRefKind(RefKind refKind);
+ void setVariable(const Variable* variable);
+
+ std::unique_ptr<Expression> clone(Position pos) const override {
+ return std::make_unique<VariableReference>(pos, this->variable(), this->refKind());
+ }
+
+ std::string description(OperatorPrecedence) const override;
+
+private:
+ const Variable* fVariable;
+ VariableRefKind fRefKind;
+
+ using INHERITED = Expression;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/lex/DFA.h b/gfx/skia/skia/src/sksl/lex/DFA.h
new file mode 100644
index 0000000000..1fab51f921
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/DFA.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DFA
+#define SKSL_DFA
+
+#include <string>
+#include <vector>
+
+/**
+ * Tables representing a deterministic finite automaton for matching regular expressions.
+ */
+struct DFA {
+ DFA(std::vector<int> charMappings, std::vector<std::vector<int>> transitions,
+ std::vector<int> accepts)
+ : fCharMappings(charMappings)
+ , fTransitions(transitions)
+ , fAccepts(accepts) {}
+
+ // maps chars to the row index of fTransitions, as multiple characters may map to the same row.
+ // starting from state s and looking at char c, the new state is
+ // fTransitions[fCharMappings[c]][s].
+ std::vector<int> fCharMappings;
+
+ // one row per character mapping, one column per state
+ std::vector<std::vector<int>> fTransitions;
+
+ // contains, for each state, the token id we should report when matching ends in that state (-1
+ // for no match)
+ std::vector<int> fAccepts;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/lex/DFAState.h b/gfx/skia/skia/src/sksl/lex/DFAState.h
new file mode 100644
index 0000000000..a09d7ba673
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/DFAState.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_DFASTATE
+#define SKSL_DFASTATE
+
+#include "src/sksl/lex/LexUtil.h"
+
+#include <vector>
+#include <string>
+
+struct DFAState {
+ struct Label {
+ std::vector<int> fStates;
+
+ Label(std::vector<int> states)
+ : fStates(std::move(states)) {}
+
+ bool operator==(const Label& other) const {
+ return fStates == other.fStates;
+ }
+
+ bool operator!=(const Label& other) const {
+ return !(*this == other);
+ }
+
+#ifdef SK_DEBUG
+ std::string description() const {
+ std::string result = "<";
+ const char* separator = "";
+ for (int s : fStates) {
+ result += separator;
+ result += std::to_string(s);
+ separator = ", ";
+ }
+ result += ">";
+ return result;
+ }
+#endif
+ };
+
+ DFAState()
+ : fId(INVALID)
+ , fLabel({}) {}
+
+ DFAState(int id, Label label)
+ : fId(id)
+ , fLabel(std::move(label)) {}
+
+ DFAState(const DFAState& other) = delete;
+
+ int fId;
+
+ Label fLabel;
+
+ bool fIsScanned = false;
+};
+
+namespace std {
+ template<> struct hash<DFAState::Label> {
+ size_t operator()(const DFAState::Label& s) const {
+ size_t result = 0;
+ for (int i : s.fStates) {
+ result = result * 101 + i;
+ }
+ return result;
+ }
+ };
+} // namespace
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/lex/LexUtil.h b/gfx/skia/skia/src/sksl/lex/LexUtil.h
new file mode 100644
index 0000000000..65692cb21b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/LexUtil.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_LEXUTIL
+#define SKSL_LEXUTIL
+
+#include <cstdlib>
+
+#define INVALID -1
+
+#define SK_ABORT(...) (fprintf(stderr, __VA_ARGS__), abort())
+#define SkASSERT(x) \
+ (void)((x) || (SK_ABORT("failed SkASSERT(%s): %s:%d\n", #x, __FILE__, __LINE__), 0))
+#define SkUNREACHABLE (SK_ABORT("unreachable"))
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/lex/Main.cpp b/gfx/skia/skia/src/sksl/lex/Main.cpp
new file mode 100644
index 0000000000..ab4e3a618b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/Main.cpp
@@ -0,0 +1,238 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/lex/DFA.h"
+#include "src/sksl/lex/LexUtil.h"
+#include "src/sksl/lex/NFA.h"
+#include "src/sksl/lex/NFAtoDFA.h"
+#include "src/sksl/lex/RegexNode.h"
+#include "src/sksl/lex/RegexParser.h"
+#include "src/sksl/lex/TransitionTable.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <algorithm>
+#include <sstream>
+#include <string>
+#include <vector>
+
+/**
+ * Processes a .lex file and produces .h and .cpp files which implement a lexical analyzer. The .lex
+ * file is a text file with one token definition per line. Each line is of the form:
+ * <TOKEN_NAME> = <pattern>
+ * where <pattern> is either a regular expression (e.g [0-9]) or a double-quoted literal string.
+ */
+
+static constexpr const char HEADER[] =
+ "/*\n"
+ " * Copyright 2017 Google Inc.\n"
+ " *\n"
+ " * Use of this source code is governed by a BSD-style license that can be\n"
+ " * found in the LICENSE file.\n"
+ " */\n"
+ "/*****************************************************************************************\n"
+ " ******************** This file was generated by sksllex. Do not edit. *******************\n"
+ " *****************************************************************************************/\n";
+
+static void writeH(const DFA& dfa, const char* lexer, const char* token,
+ const std::vector<std::string>& tokens, const char* hPath) {
+ std::ofstream out(hPath);
+ SkASSERT(out.good());
+ out << HEADER;
+ out << "#ifndef SKSL_" << lexer << "\n";
+ out << "#define SKSL_" << lexer << "\n";
+ out << "#include <cstdint>\n";
+ out << "#include <string_view>\n";
+ out << "namespace SkSL {\n";
+ out << "\n";
+ out << "struct " << token << " {\n";
+ out << " enum class Kind {\n";
+ for (const std::string& t : tokens) {
+ out << " TK_" << t << ",\n";
+ }
+ out << " TK_NONE,";
+ out << R"(
+ };
+
+ )" << token << "() {}";
+
+ out << token << R"((Kind kind, int32_t offset, int32_t length)
+ : fKind(kind)
+ , fOffset(offset)
+ , fLength(length) {}
+
+ Kind fKind = Kind::TK_NONE;
+ int32_t fOffset = -1;
+ int32_t fLength = -1;
+};
+
+class )" << lexer << R"( {
+public:
+ void start(std::string_view text) {
+ fText = text;
+ fOffset = 0;
+ }
+
+ )" << token << R"( next();
+
+ struct Checkpoint {
+ int32_t fOffset;
+ };
+
+ Checkpoint getCheckpoint() const {
+ return {fOffset};
+ }
+
+ void rewindToCheckpoint(Checkpoint checkpoint) {
+ fOffset = checkpoint.fOffset;
+ }
+
+private:
+ std::string_view fText;
+ int32_t fOffset;
+};
+
+} // namespace
+#endif
+)";
+}
+
+static void writeCPP(const DFA& dfa, const char* lexer, const char* token, const char* include,
+ const char* cppPath) {
+ std::ofstream out(cppPath);
+ SkASSERT(out.good());
+ out << HEADER;
+ out << "#include \"" << include << "\"\n";
+ out << "\n";
+ out << "namespace SkSL {\n";
+ out << "\n";
+
+ size_t states = 0;
+ for (const auto& row : dfa.fTransitions) {
+ states = std::max(states, row.size());
+ }
+ out << "using State = " << (states <= 256 ? "uint8_t" : "uint16_t") << ";\n";
+
+ // Find the first character mapped in our DFA.
+ size_t startChar = 0;
+ for (; startChar < dfa.fCharMappings.size(); ++startChar) {
+ if (dfa.fCharMappings[startChar] != 0) {
+ break;
+ }
+ }
+
+ // Arbitrarily-chosen character which is greater than startChar, and should not appear in actual
+ // input.
+ SkASSERT(startChar < 18);
+ out << "static constexpr uint8_t kInvalidChar = 18;";
+ out << "static constexpr int8_t kMappings[" << dfa.fCharMappings.size() - startChar << "] = {\n"
+ " ";
+ const char* separator = "";
+ for (size_t index = startChar; index < dfa.fCharMappings.size(); ++index) {
+ out << separator << std::to_string(dfa.fCharMappings[index]);
+ separator = ", ";
+ }
+ out << "\n};\n";
+
+ WriteTransitionTable(out, dfa, states);
+
+ out << "static const int8_t kAccepts[" << states << "] = {";
+ for (size_t i = 0; i < states; ++i) {
+ if (i < dfa.fAccepts.size()) {
+ out << " " << dfa.fAccepts[i] << ",";
+ } else {
+ out << " " << INVALID << ",";
+ }
+ }
+ out << " };\n";
+ out << "\n";
+
+ out << token << " " << lexer << "::next() {";
+ out << R"(
+ // note that we cheat here: normally a lexer needs to worry about the case
+ // where a token has a prefix which is not itself a valid token - for instance,
+ // maybe we have a valid token 'while', but 'w', 'wh', etc. are not valid
+ // tokens. Our grammar doesn't have this property, so we can simplify the logic
+ // a bit.
+ int32_t startOffset = fOffset;
+ State state = 1;
+ for (;;) {
+ if (fOffset >= (int32_t)fText.length()) {
+ if (startOffset == (int32_t)fText.length() || kAccepts[state] == -1) {
+ return )" << token << "(" << token << R"(::Kind::TK_END_OF_FILE, startOffset, 0);
+ }
+ break;
+ }
+ uint8_t c = (uint8_t)(fText[fOffset] - )" << startChar << R"();
+ if (c >= )" << dfa.fCharMappings.size() - startChar << R"() {
+ c = kInvalidChar;
+ }
+ State newState = get_transition(kMappings[c], state);
+ if (!newState) {
+ break;
+ }
+ state = newState;
+ ++fOffset;
+ }
+ Token::Kind kind = ()" << token << R"(::Kind) kAccepts[state];
+ return )" << token << R"((kind, startOffset, fOffset - startOffset);
+}
+
+} // namespace
+)";
+}
+
+static void process(const char* inPath, const char* lexer, const char* token, const char* hPath,
+ const char* cppPath) {
+ NFA nfa;
+ std::vector<std::string> tokens;
+ tokens.push_back("END_OF_FILE");
+ std::string line;
+ std::ifstream in(inPath);
+ while (std::getline(in, line)) {
+ if (line.length() == 0) {
+ continue;
+ }
+ if (line.length() >= 2 && line[0] == '/' && line[1] == '/') {
+ continue;
+ }
+ std::istringstream split(line);
+ std::string name, delimiter, pattern;
+ if (split >> name >> delimiter >> pattern) {
+ SkASSERT(split.eof());
+ SkASSERT(name != "");
+ SkASSERT(delimiter == "=");
+ SkASSERT(pattern != "");
+ tokens.push_back(name);
+ if (pattern[0] == '"') {
+ SkASSERT(pattern.size() > 2 && pattern[pattern.size() - 1] == '"');
+ RegexNode node = RegexNode(RegexNode::kChar_Kind, pattern[1]);
+ for (size_t i = 2; i < pattern.size() - 1; ++i) {
+ node = RegexNode(RegexNode::kConcat_Kind, node,
+ RegexNode(RegexNode::kChar_Kind, pattern[i]));
+ }
+ nfa.addRegex(node);
+ }
+ else {
+ nfa.addRegex(RegexParser().parse(pattern));
+ }
+ }
+ }
+ NFAtoDFA converter(&nfa);
+ DFA dfa = converter.convert();
+ writeH(dfa, lexer, token, tokens, hPath);
+ writeCPP(dfa, lexer, token, (std::string("src/sksl/SkSL") + lexer + ".h").c_str(), cppPath);
+}
+
+int main(int argc, const char** argv) {
+ if (argc != 6) {
+ printf("usage: sksllex <input.lex> <lexername> <tokenname> <output.h> <output.cpp>\n");
+ exit(1);
+ }
+ process(argv[1], argv[2], argv[3], argv[4], argv[5]);
+ return 0;
+}
diff --git a/gfx/skia/skia/src/sksl/lex/NFA.cpp b/gfx/skia/skia/src/sksl/lex/NFA.cpp
new file mode 100644
index 0000000000..e73fc154d7
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/NFA.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/lex/NFA.h"
+
+#include "src/sksl/lex/LexUtil.h"
+#include <string>
+
+int NFA::match(std::string s) const {
+ std::vector<int> states = fStartStates;
+ for (size_t i = 0; i < s.size(); ++i) {
+ std::vector<int> next;
+ for (int id : states) {
+ if (fStates[id].accept(s[i])) {
+ for (int nextId : fStates[id].fNext) {
+ if (fStates[nextId].fKind != NFAState::kRemapped_Kind) {
+ next.push_back(nextId);
+ } else {
+ next.insert(next.end(), fStates[nextId].fData.begin(),
+ fStates[nextId].fData.end());
+ }
+ }
+ }
+ }
+ if (!next.size()) {
+ return INVALID;
+ }
+ states = next;
+ }
+ int accept = INVALID;
+ for (int id : states) {
+ if (fStates[id].fKind == NFAState::kAccept_Kind) {
+ int result = fStates[id].fData[0];
+ if (accept == INVALID || result < accept) {
+ accept = result;
+ }
+ }
+ }
+ return accept;
+}
diff --git a/gfx/skia/skia/src/sksl/lex/NFA.h b/gfx/skia/skia/src/sksl/lex/NFA.h
new file mode 100644
index 0000000000..368fb3ec19
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/NFA.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_NFA
+#define SKSL_NFA
+
+#include "src/sksl/lex/NFAState.h"
+#include "src/sksl/lex/RegexNode.h"
+
+#include <string>
+#include <utility>
+#include <vector>
+
+/**
+ * A nondeterministic finite automaton for matching regular expressions. The NFA is initialized with
+ * a number of regular expressions, and then matches a string against all of them simultaneously.
+ */
+struct NFA {
+ /**
+ * Adds a new regular expression to the set of expressions matched by this automaton, returning
+ * its index.
+ */
+ int addRegex(const RegexNode& regex) {
+ std::vector<int> accept;
+ // we reserve token 0 for END_OF_FILE, so this starts at 1
+ accept.push_back(this->addState(NFAState(++fRegexCount)));
+ std::vector<int> startStates = regex.createStates(this, accept);
+ fStartStates.insert(fStartStates.end(), startStates.begin(), startStates.end());
+ return fStartStates.size() - 1;
+ }
+
+ /**
+ * Adds a new state to the NFA, returning its index.
+ */
+ int addState(NFAState s) {
+ fStates.push_back(std::move(s));
+ return fStates.size() - 1;
+ }
+
+ /**
+ * Matches a string against all of the regexes added to this NFA. Returns the index of the first
+ * (in addRegex order) matching expression, or -1 if no match. This is relatively slow and used
+ * only for debugging purposes; the NFA should be converted to a DFA before actual use.
+ */
+ int match(std::string s) const;
+
+ int fRegexCount = 0;
+
+ std::vector<NFAState> fStates;
+
+ std::vector<int> fStartStates;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/lex/NFAState.h b/gfx/skia/skia/src/sksl/lex/NFAState.h
new file mode 100644
index 0000000000..848a6f11ee
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/NFAState.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_NFASTATE
+#define SKSL_NFASTATE
+
+#include <string>
+#include <vector>
+
+#include "src/sksl/lex/LexUtil.h"
+
+struct NFAState {
+ enum Kind {
+ // represents an accept state - if the NFA ends up in this state, we have successfully
+ // matched the token indicated by fData[0]
+ kAccept_Kind,
+ // matches the single character fChar
+ kChar_Kind,
+ // the regex '.'; matches any char but '\n'
+ kDot_Kind,
+ // a state which serves as a placeholder for the states indicated in fData. When we
+ // transition to this state, we instead transition to all of the fData states.
+ kRemapped_Kind,
+ // contains a list of true/false values in fData. fData[c] tells us whether we accept the
+ // character c.
+ kTable_Kind
+ };
+
+ NFAState(Kind kind, std::vector<int> next)
+ : fKind(kind)
+ , fNext(std::move(next)) {}
+
+ NFAState(char c, std::vector<int> next)
+ : fKind(kChar_Kind)
+ , fChar(c)
+ , fNext(std::move(next)) {}
+
+ NFAState(std::vector<int> states)
+ : fKind(kRemapped_Kind)
+ , fData(std::move(states)) {}
+
+ NFAState(bool inverse, std::vector<bool> accepts, std::vector<int> next)
+ : fKind(kTable_Kind)
+ , fInverse(inverse)
+ , fNext(std::move(next)) {
+ for (bool b : accepts) {
+ fData.push_back(b);
+ }
+ }
+
+ NFAState(int token)
+ : fKind(kAccept_Kind) {
+ fData.push_back(token);
+ }
+
+ bool accept(char c) const {
+ switch (fKind) {
+ case kAccept_Kind:
+ return false;
+ case kChar_Kind:
+ return c == fChar;
+ case kDot_Kind:
+ return c != '\n';
+ case kTable_Kind: {
+ bool value;
+ if ((size_t) c < fData.size()) {
+ value = fData[c];
+ } else {
+ value = false;
+ }
+ return value != fInverse;
+ }
+ default:
+ SkUNREACHABLE;
+ }
+ }
+
+#ifdef SK_DEBUG
+ std::string description() const {
+ switch (fKind) {
+ case kAccept_Kind:
+ return "Accept(" + std::to_string(fData[0]) + ")";
+ case kChar_Kind: {
+ std::string result = "Char('" + std::string(1, fChar) + "'";
+ for (int v : fNext) {
+ result += ", ";
+ result += std::to_string(v);
+ }
+ result += ")";
+ return result;
+ }
+ case kDot_Kind: {
+ std::string result = "Dot(";
+ const char* separator = "";
+ for (int v : fNext) {
+ result += separator;
+ result += std::to_string(v);
+ separator = ", ";
+ }
+ result += ")";
+ return result;
+ }
+ case kRemapped_Kind: {
+ std::string result = "Remapped(";
+ const char* separator = "";
+ for (int v : fData) {
+ result += separator;
+ result += std::to_string(v);
+ separator = ", ";
+ }
+ result += ")";
+ return result;
+ }
+ case kTable_Kind: {
+ std::string result = std::string("Table(") + (fInverse ? "true" : "false") + ", [";
+ const char* separator = "";
+ for (int v : fData) {
+ result += separator;
+ result += v ? "true" : "false";
+ separator = ", ";
+ }
+ result += "]";
+ for (int n : fNext) {
+ result += ", ";
+ result += std::to_string(n);
+ }
+ result += ")";
+ return result;
+ }
+ default:
+ SkUNREACHABLE;
+ }
+ }
+#endif
+
+ Kind fKind;
+
+ char fChar = 0;
+
+ bool fInverse = false;
+
+ std::vector<int> fData;
+
+ // states we transition to upon a succesful match from this state
+ std::vector<int> fNext;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/lex/NFAtoDFA.h b/gfx/skia/skia/src/sksl/lex/NFAtoDFA.h
new file mode 100644
index 0000000000..5331042985
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/NFAtoDFA.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef NFAtoDFA_DEFINED
+#define NFAtoDFA_DEFINED
+
+#include "src/sksl/lex/DFA.h"
+#include "src/sksl/lex/DFAState.h"
+#include "src/sksl/lex/NFA.h"
+#include "src/sksl/lex/NFAState.h"
+
+#include <algorithm>
+#include <climits>
+#include <memory>
+#include <unordered_map>
+#include <set>
+#include <vector>
+
+/**
+ * Converts a nondeterministic finite automaton to a deterministic finite automaton. Since NFAs and
+ * DFAs differ only in that an NFA allows multiple states at the same time, we can find each
+ * possible combination of simultaneous NFA states and give this combination a label. These labelled
+ * nodes are our DFA nodes, since we can only be in one such unique set of NFA states at a time.
+ *
+ * As an NFA can end up in multiple accept states at the same time (for instance, the token "while"
+ * is valid for both WHILE and IDENTIFIER), we disambiguate by preferring the first matching regex
+ * (in terms of the order in which they were added to the NFA).
+ */
+class NFAtoDFA {
+public:
+ inline static constexpr char START_CHAR = 9;
+ inline static constexpr char END_CHAR = 126;
+
+ NFAtoDFA(NFA* nfa)
+ : fNFA(*nfa) {}
+
+ /**
+ * Returns a DFA created from the NFA.
+ */
+ DFA convert() {
+ // create state 0, the "reject" state
+ getState(DFAState::Label({}));
+ // create a state representing being in all of the NFA's start states at once
+ std::vector<int> startStates = fNFA.fStartStates;
+ std::sort(startStates.begin(), startStates.end());
+ // this becomes state 1, our start state
+ DFAState* start = getState(DFAState::Label(startStates));
+ this->scanState(start);
+
+ this->computeMappings();
+
+ int stateCount = 0;
+ for (const auto& row : fTransitions) {
+ stateCount = std::max(stateCount, (int) row.size());
+ }
+ return DFA(fCharMappings, fTransitions, fAccepts);
+ }
+
+private:
+ /**
+ * Returns an existing state with the given label, or creates a new one and returns it.
+ */
+ DFAState* getState(DFAState::Label label) {
+ auto found = fStates.find(label);
+ if (found == fStates.end()) {
+ int id = fStates.size();
+ fStates[label] = std::unique_ptr<DFAState>(new DFAState(id, label));
+ return fStates[label].get();
+ }
+ return found->second.get();
+ }
+
+ void add(int nfaState, std::vector<int>* states) {
+ NFAState state = fNFA.fStates[nfaState];
+ if (state.fKind == NFAState::kRemapped_Kind) {
+ for (int next : state.fData) {
+ this->add(next, states);
+ }
+ } else {
+ for (int entry : *states) {
+ if (nfaState == entry) {
+ return;
+ }
+ }
+ states->push_back(nfaState);
+ }
+ }
+
+ void addTransition(char c, int start, int next) {
+ while (fTransitions.size() <= (size_t) c) {
+ fTransitions.push_back(std::vector<int>());
+ }
+ std::vector<int>& row = fTransitions[c];
+ while (row.size() <= (size_t) start) {
+ row.push_back(INVALID);
+ }
+ row[start] = next;
+ }
+
+ void scanState(DFAState* state) {
+ state->fIsScanned = true;
+ for (char c = START_CHAR; c <= END_CHAR; ++c) {
+ std::vector<int> next;
+ int bestAccept = INT_MAX;
+ for (int idx : state->fLabel.fStates) {
+ const NFAState& nfaState = fNFA.fStates[idx];
+ if (nfaState.accept(c)) {
+ for (int nextState : nfaState.fNext) {
+ if (fNFA.fStates[nextState].fKind == NFAState::kAccept_Kind) {
+ bestAccept = std::min(bestAccept, fNFA.fStates[nextState].fData[0]);
+ }
+ this->add(nextState, &next);
+ }
+ }
+ }
+ std::sort(next.begin(), next.end());
+ DFAState* nextState = this->getState(DFAState::Label(next));
+ this->addTransition(c, state->fId, nextState->fId);
+ if (bestAccept != INT_MAX) {
+ while (fAccepts.size() <= (size_t) nextState->fId) {
+ fAccepts.push_back(INVALID);
+ }
+ fAccepts[nextState->fId] = bestAccept;
+ }
+ if (!nextState->fIsScanned) {
+ this->scanState(nextState);
+ }
+ }
+ }
+
+ // collapse rows with the same transitions to a single row. This is common, as each row
+ // represents a character and often there are many characters for which all transitions are
+ // identical (e.g. [0-9] are treated the same way by all lexer rules)
+ void computeMappings() {
+ // mappings[<input row>] = <output row>
+ std::vector<std::vector<int>*> uniques;
+ // this could be done more efficiently, but O(n^2) is plenty fast for our purposes
+ for (size_t i = 0; i < fTransitions.size(); ++i) {
+ int found = -1;
+ for (size_t j = 0; j < uniques.size(); ++j) {
+ if (*uniques[j] == fTransitions[i]) {
+ found = j;
+ break;
+ }
+ }
+ if (found == -1) {
+ found = (int) uniques.size();
+ uniques.push_back(&fTransitions[i]);
+ }
+ fCharMappings.push_back(found);
+ }
+ std::vector<std::vector<int>> newTransitions;
+ for (std::vector<int>* row : uniques) {
+ newTransitions.push_back(*row);
+ }
+ fTransitions = newTransitions;
+ }
+
+ const NFA& fNFA;
+ std::unordered_map<DFAState::Label, std::unique_ptr<DFAState>> fStates;
+ std::vector<std::vector<int>> fTransitions;
+ std::vector<int> fCharMappings;
+ std::vector<int> fAccepts;
+};
+#endif // NFAtoDFA_DEFINED
diff --git a/gfx/skia/skia/src/sksl/lex/RegexNode.cpp b/gfx/skia/skia/src/sksl/lex/RegexNode.cpp
new file mode 100644
index 0000000000..aa4e23a8b9
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/RegexNode.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/lex/RegexNode.h"
+
+#include "src/sksl/lex/LexUtil.h"
+#include "src/sksl/lex/NFA.h"
+#include "src/sksl/lex/NFAState.h"
+
+#include <string>
+
+std::vector<int> RegexNode::createStates(NFA* nfa, const std::vector<int>& accept) const {
+ std::vector<int> result;
+ switch (fKind) {
+ case kChar_Kind:
+ result.push_back(nfa->addState(NFAState(fPayload.fChar, accept)));
+ break;
+ case kCharset_Kind: {
+ std::vector<bool> chars;
+ for (const RegexNode& child : fChildren) {
+ if (child.fKind == kChar_Kind) {
+ while (chars.size() <= (size_t) child.fPayload.fChar) {
+ chars.push_back(false);
+ }
+ chars[child.fPayload.fChar] = true;
+ } else {
+ SkASSERT(child.fKind == kRange_Kind);
+ while (chars.size() <= (size_t) child.fChildren[1].fPayload.fChar) {
+ chars.push_back(false);
+ }
+ for (char c = child.fChildren[0].fPayload.fChar;
+ c <= child.fChildren[1].fPayload.fChar;
+ ++c) {
+ chars[c] = true;
+ }
+ }
+ }
+ result.push_back(nfa->addState(NFAState(fPayload.fBool, chars, accept)));
+ break;
+ }
+ case kConcat_Kind: {
+ std::vector<int> right = fChildren[1].createStates(nfa, accept);
+ result = fChildren[0].createStates(nfa, right);
+ break;
+ }
+ case kDot_Kind:
+ result.push_back(nfa->addState(NFAState(NFAState::kDot_Kind, accept)));
+ break;
+ case kOr_Kind: {
+ std::vector<int> states = fChildren[0].createStates(nfa, accept);
+ result.insert(result.end(), states.begin(), states.end());
+ states = fChildren[1].createStates(nfa, accept);
+ result.insert(result.end(), states.begin(), states.end());
+ break;
+ }
+ case kPlus_Kind: {
+ std::vector<int> next = accept;
+ std::vector<int> placeholder;
+ int id = nfa->addState(NFAState(placeholder));
+ next.push_back(id);
+ result = fChildren[0].createStates(nfa, next);
+ nfa->fStates[id] = NFAState(result);
+ break;
+ }
+ case kQuestion_Kind:
+ result = fChildren[0].createStates(nfa, accept);
+ result.insert(result.end(), accept.begin(), accept.end());
+ break;
+ case kRange_Kind:
+ SkUNREACHABLE;
+ case kStar_Kind: {
+ std::vector<int> next = accept;
+ std::vector<int> placeholder;
+ int id = nfa->addState(NFAState(placeholder));
+ next.push_back(id);
+ result = fChildren[0].createStates(nfa, next);
+ result.insert(result.end(), accept.begin(), accept.end());
+ nfa->fStates[id] = NFAState(result);
+ break;
+ }
+ }
+ return result;
+}
+
+#ifdef SK_DEBUG
+std::string RegexNode::description() const {
+ switch (fKind) {
+ case kChar_Kind:
+ return std::string(1, fPayload.fChar);
+ case kCharset_Kind: {
+ std::string result("[");
+ if (fPayload.fBool) {
+ result += "^";
+ }
+ for (const RegexNode& c : fChildren) {
+ result += c.description();
+ }
+ result += "]";
+ return result;
+ }
+ case kConcat_Kind:
+ return fChildren[0].description() + fChildren[1].description();
+ case kDot_Kind:
+ return ".";
+ case kOr_Kind:
+ return "(" + fChildren[0].description() + "|" + fChildren[1].description() + ")";
+ case kPlus_Kind:
+ return fChildren[0].description() + "+";
+ case kQuestion_Kind:
+ return fChildren[0].description() + "?";
+ case kRange_Kind:
+ return fChildren[0].description() + "-" + fChildren[1].description();
+ case kStar_Kind:
+ return fChildren[0].description() + "*";
+ default:
+ return "<" + std::to_string(fKind) + ">";
+ }
+}
+#endif
diff --git a/gfx/skia/skia/src/sksl/lex/RegexNode.h b/gfx/skia/skia/src/sksl/lex/RegexNode.h
new file mode 100644
index 0000000000..20c294744e
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/RegexNode.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_REGEXNODE
+#define SKSL_REGEXNODE
+
+#include <string>
+#include <utility>
+#include <vector>
+
+struct NFA;
+
+/**
+ * Represents a node in the parse tree of a regular expression.
+ */
+struct RegexNode {
+ enum Kind {
+ kChar_Kind,
+ kCharset_Kind,
+ kConcat_Kind,
+ kDot_Kind,
+ kOr_Kind,
+ kPlus_Kind,
+ kRange_Kind,
+ kQuestion_Kind,
+ kStar_Kind
+ };
+
+ RegexNode(Kind kind)
+ : fKind(kind) {}
+
+ RegexNode(Kind kind, char payload)
+ : fKind(kind) {
+ fPayload.fChar = payload;
+ }
+
+ RegexNode(Kind kind, const char* children)
+ : fKind(kind) {
+ fPayload.fBool = false;
+ while (*children != '\0') {
+ fChildren.emplace_back(kChar_Kind, *children);
+ ++children;
+ }
+ }
+
+ RegexNode(Kind kind, RegexNode child)
+ : fKind(kind) {
+ fChildren.push_back(std::move(child));
+ }
+
+ RegexNode(Kind kind, RegexNode child1, RegexNode child2)
+ : fKind(kind) {
+ fChildren.push_back(std::move(child1));
+ fChildren.push_back(std::move(child2));
+ }
+
+ /**
+ * Creates NFA states for this node, with a successful match against this node resulting in a
+ * transition to all of the states in the accept vector.
+ */
+ std::vector<int> createStates(NFA* nfa, const std::vector<int>& accept) const;
+
+ std::string description() const;
+
+ Kind fKind;
+
+ union Payload {
+ char fChar;
+ bool fBool;
+ } fPayload;
+
+ std::vector<RegexNode> fChildren;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/lex/RegexParser.cpp b/gfx/skia/skia/src/sksl/lex/RegexParser.cpp
new file mode 100644
index 0000000000..27c499e66b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/RegexParser.cpp
@@ -0,0 +1,183 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/lex/RegexParser.h"
+
+#include "src/sksl/lex/LexUtil.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <utility>
+#include <vector>
+
+RegexNode RegexParser::parse(std::string source) {
+ fSource = source;
+ fIndex = 0;
+ SkASSERT(fStack.size() == 0);
+ this->regex();
+ SkASSERT(fStack.size() == 1);
+ SkASSERT(fIndex == source.size());
+ return this->pop();
+}
+
+char RegexParser::peek() {
+ if (fIndex >= fSource.size()) {
+ return END;
+ }
+ return fSource[fIndex];
+}
+
+void RegexParser::expect(char c) {
+ if (this->peek() != c) {
+ printf("expected '%c' at index %d, but found '%c'", c, (int) fIndex, this->peek());
+ exit(1);
+ }
+ ++fIndex;
+}
+
+RegexNode RegexParser::pop() {
+ RegexNode result = fStack.top();
+ fStack.pop();
+ return result;
+}
+
+void RegexParser::term() {
+ switch (this->peek()) {
+ case '(': this->group(); break;
+ case '[': this->set(); break;
+ case '.': this->dot(); break;
+ default: this->literal(); break;
+ }
+}
+
+void RegexParser::quantifiedTerm() {
+ this->term();
+ switch (this->peek()) {
+ case '*': fStack.push(RegexNode(RegexNode::kStar_Kind, this->pop())); ++fIndex; break;
+ case '+': fStack.push(RegexNode(RegexNode::kPlus_Kind, this->pop())); ++fIndex; break;
+ case '?': fStack.push(RegexNode(RegexNode::kQuestion_Kind, this->pop())); ++fIndex; break;
+ default: break;
+ }
+}
+
+void RegexParser::sequence() {
+ this->quantifiedTerm();
+ for (;;) {
+ switch (this->peek()) {
+ case END: [[fallthrough]];
+ case '|': [[fallthrough]];
+ case ')': return;
+ default:
+ this->sequence();
+ RegexNode right = this->pop();
+ RegexNode left = this->pop();
+ fStack.emplace(RegexNode::kConcat_Kind, std::move(left), std::move(right));
+ break;
+ }
+ }
+}
+
+RegexNode RegexParser::escapeSequence(char c) {
+ switch (c) {
+ case 'n': return RegexNode(RegexNode::kChar_Kind, '\n');
+ case 'r': return RegexNode(RegexNode::kChar_Kind, '\r');
+ case 't': return RegexNode(RegexNode::kChar_Kind, '\t');
+ case 's': return RegexNode(RegexNode::kCharset_Kind, " \t\n\r");
+ default: return RegexNode(RegexNode::kChar_Kind, c);
+ }
+}
+
+void RegexParser::literal() {
+ char c = this->peek();
+ if (c == '\\') {
+ ++fIndex;
+ fStack.push(this->escapeSequence(peek()));
+ ++fIndex;
+ }
+ else {
+ fStack.push(RegexNode(RegexNode::kChar_Kind, c));
+ ++fIndex;
+ }
+}
+
+void RegexParser::dot() {
+ this->expect('.');
+ fStack.push(RegexNode(RegexNode::kDot_Kind));
+}
+
+void RegexParser::group() {
+ this->expect('(');
+ this->regex();
+ this->expect(')');
+}
+
+void RegexParser::setItem() {
+ this->literal();
+ if (this->peek() == '-') {
+ ++fIndex;
+ if (peek() == ']') {
+ fStack.push(RegexNode(RegexNode::kChar_Kind, '-'));
+ }
+ else {
+ literal();
+ RegexNode end = this->pop();
+ SkASSERT(end.fKind == RegexNode::kChar_Kind);
+ RegexNode start = this->pop();
+ SkASSERT(start.fKind == RegexNode::kChar_Kind);
+ fStack.push(RegexNode(RegexNode::kRange_Kind, std::move(start), std::move(end)));
+ }
+ }
+}
+
+void RegexParser::set() {
+ expect('[');
+ size_t depth = fStack.size();
+ RegexNode set(RegexNode::kCharset_Kind);
+ if (this->peek() == '^') {
+ ++fIndex;
+ set.fPayload.fBool = true;
+ }
+ else {
+ set.fPayload.fBool = false;
+ }
+ for (;;) {
+ switch (this->peek()) {
+ case ']':
+ ++fIndex;
+ while (fStack.size() > depth) {
+ set.fChildren.push_back(this->pop());
+ }
+ fStack.push(std::move(set));
+ return;
+ case END:
+ printf("unterminated character set\n");
+ exit(1);
+ default:
+ this->setItem();
+ break;
+ }
+ }
+}
+
+void RegexParser::regex() {
+ this->sequence();
+ switch (this->peek()) {
+ case '|': {
+ ++fIndex;
+ this->regex();
+ RegexNode right = this->pop();
+ RegexNode left = this->pop();
+ fStack.push(RegexNode(RegexNode::kOr_Kind, left, right));
+ break;
+ }
+ case END: // fall through
+ case ')':
+ return;
+ default:
+ SkASSERT(false);
+ }
+}
diff --git a/gfx/skia/skia/src/sksl/lex/RegexParser.h b/gfx/skia/skia/src/sksl/lex/RegexParser.h
new file mode 100644
index 0000000000..b8f4f1ffb4
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/RegexParser.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_REGEXPARSER
+#define SKSL_REGEXPARSER
+
+#include "src/sksl/lex/RegexNode.h"
+
+#include <stack>
+#include <string>
+
+/**
+ * Turns a simple regular expression into a parse tree. The regular expression syntax supports only
+ * the basic quantifiers ('*', '+', and '?'), alternation ('|'), character sets ('[a-z]'), and
+ * groups ('()').
+ */
+class RegexParser {
+public:
+ RegexNode parse(std::string source);
+
+private:
+ inline static constexpr char END = '\0';
+
+ char peek();
+
+ void expect(char c);
+
+ RegexNode pop();
+
+ /**
+ * Matches a char literal, parenthesized group, character set, or dot ('.').
+ */
+ void term();
+
+ /**
+ * Matches a term followed by an optional quantifier ('*', '+', or '?').
+ */
+ void quantifiedTerm();
+
+ /**
+ * Matches a sequence of quantifiedTerms.
+ */
+ void sequence();
+
+ /**
+ * Returns a node representing the given escape character (e.g. escapeSequence('n') returns a
+ * node which matches a newline character).
+ */
+ RegexNode escapeSequence(char c);
+
+ /**
+ * Matches a literal character or escape sequence.
+ */
+ void literal();
+
+ /**
+ * Matches a dot ('.').
+ */
+ void dot();
+
+ /**
+ * Matches a parenthesized group.
+ */
+ void group();
+
+ /**
+ * Matches a literal character, escape sequence, or character range from a character set.
+ */
+ void setItem();
+
+ /**
+ * Matches a character set.
+ */
+ void set();
+
+ void regex();
+
+ std::string fSource;
+
+ size_t fIndex;
+
+ std::stack<RegexNode> fStack;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/lex/TransitionTable.cpp b/gfx/skia/skia/src/sksl/lex/TransitionTable.cpp
new file mode 100644
index 0000000000..83720fca1c
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/TransitionTable.cpp
@@ -0,0 +1,241 @@
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/lex/DFA.h"
+#include "src/sksl/lex/TransitionTable.h"
+
+#include <array>
+#include <algorithm>
+#include <cassert>
+#include <cmath>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+namespace {
+
+// The number of bits to use per entry in our compact transition table. This is customizable:
+// - 1-bit: reasonable in theory. Doesn't actually pack many slices.
+// - 2-bit: best fit for our data. Packs extremely well.
+// - 4-bit: packs all but one slice, but doesn't save as much space overall.
+// - 8-bit: way too large (an 8-bit LUT plus an 8-bit data table is as big as a 16-bit table)
+// Other values don't divide cleanly into a byte and do not work.
+constexpr int kNumBits = 2;
+
+// These values are derived from kNumBits and shouldn't need to change.
+constexpr int kNumValues = (1 << kNumBits) - 1;
+constexpr int kDataPerByte = 8 / kNumBits;
+
+enum IndexType {
+ kCompactEntry = 0,
+ kFullEntry,
+};
+struct IndexEntry {
+ IndexType type;
+ int pos;
+};
+struct CompactEntry {
+ std::array<int, kNumValues> v = {};
+ std::vector<int> data;
+};
+struct FullEntry {
+ std::vector<int> data;
+};
+
+using TransitionSet = std::unordered_set<int>;
+
+static int add_compact_entry(const TransitionSet& transitionSet,
+ const std::vector<int>& data,
+ std::vector<CompactEntry>* entries) {
+ // Create a compact entry with the unique values from the transition set, padded out with zeros
+ // and sorted.
+ CompactEntry result{};
+ assert(transitionSet.size() <= result.v.size());
+ std::copy(transitionSet.begin(), transitionSet.end(), result.v.begin());
+ std::sort(result.v.rbegin(), result.v.rend());
+
+ // Create a mapping from real values to small values.
+ std::unordered_map<int, int> translationTable;
+ for (size_t index = 0; index < result.v.size(); ++index) {
+ translationTable[result.v[index]] = index;
+ }
+ translationTable[0] = result.v.size();
+
+ // Convert the real values into small values.
+ for (size_t index = 0; index < data.size(); ++index) {
+ int value = data[index];
+ assert(translationTable.find(value) != translationTable.end());
+ result.data.push_back(translationTable[value]);
+ }
+
+ // Look for an existing entry that exactly matches this one.
+ for (size_t index = 0; index < entries->size(); ++index) {
+ if (entries->at(index).v == result.v && entries->at(index).data == result.data) {
+ return index;
+ }
+ }
+
+ // Add this as a new entry.
+ entries->push_back(std::move(result));
+ return (int)(entries->size() - 1);
+}
+
+static int add_full_entry(const TransitionSet& transitionMap,
+ const std::vector<int>& data,
+ std::vector<FullEntry>* entries) {
+ // Create a full entry with this data.
+ FullEntry result{};
+ result.data = std::vector<int>(data.begin(), data.end());
+
+ // Look for an existing entry that exactly matches this one.
+ for (size_t index = 0; index < entries->size(); ++index) {
+ if (entries->at(index).data == result.data) {
+ return index;
+ }
+ }
+
+ // Add this as a new entry.
+ entries->push_back(std::move(result));
+ return (int)(entries->size() - 1);
+}
+
+} // namespace
+
+void WriteTransitionTable(std::ofstream& out, const DFA& dfa, size_t states) {
+ int numTransitions = dfa.fTransitions.size();
+
+ // Assemble our compact and full data tables, and an index into them.
+ std::vector<CompactEntry> compactEntries;
+ std::vector<FullEntry> fullEntries;
+ std::vector<IndexEntry> indices;
+ for (size_t s = 0; s < states; ++s) {
+ // Copy all the transitions for this state into a flat array, and into a histogram (counting
+ // the number of unique state-transition values). Most states only transition to a few
+ // possible new states.
+ TransitionSet transitionSet;
+ std::vector<int> data(numTransitions);
+ for (int t = 0; t < numTransitions; ++t) {
+ if ((size_t) t < dfa.fTransitions.size() && s < dfa.fTransitions[t].size()) {
+ int value = dfa.fTransitions[t][s];
+ assert(value >= 0 && value < (int)states);
+ data[t] = value;
+ transitionSet.insert(value);
+ }
+ }
+
+ transitionSet.erase(0);
+ if (transitionSet.size() <= kNumValues) {
+ // This table only contained a small number of unique nonzero values.
+ // Use a compact representation that squishes each value down to a few bits.
+ int index = add_compact_entry(transitionSet, data, &compactEntries);
+ indices.push_back(IndexEntry{kCompactEntry, index});
+ } else {
+ // This table contained a large number of values. We can't compact it.
+ int index = add_full_entry(transitionSet, data, &fullEntries);
+ indices.push_back(IndexEntry{kFullEntry, index});
+ }
+ }
+
+ // Find the largest value for each compact-entry slot.
+ int maxValue = 0;
+ for (const CompactEntry& entry : compactEntries) {
+ for (int index=0; index < kNumValues; ++index) {
+ maxValue = std::max(maxValue, entry.v[index]);
+ }
+ }
+
+ // Figure out how many bits we need to store our max value.
+ int bitsPerValue = std::ceil(std::log2(maxValue));
+ maxValue = (1 << bitsPerValue) - 1;
+
+ // If we exceed 10 bits per value, three values would overflow 32 bits. If this happens, we'll
+ // need to pack our values another way.
+ assert(bitsPerValue <= 10);
+
+ // Emit all the structs our transition table will use.
+ out << "using IndexEntry = int16_t;\n"
+ << "struct FullEntry {\n"
+ << " State data[" << numTransitions << "];\n"
+ << "};\n";
+
+ // Emit the compact-entry structure. We store all three values in `v`. If kNumBits were to
+ // change, we would need to adjust the packing algorithm.
+ static_assert(kNumBits == 2);
+ out << "struct CompactEntry {\n"
+ << " uint32_t values;\n"
+ << " uint8_t data[" << std::ceil(float(numTransitions) / float(kDataPerByte)) << "];\n"
+ << "};\n";
+
+ // Emit the full-table data.
+ out << "static constexpr FullEntry kFull[] = {\n";
+ for (const FullEntry& entry : fullEntries) {
+ out << " {";
+ for (int value : entry.data) {
+ out << value << ", ";
+ }
+ out << "},\n";
+ }
+ out << "};\n";
+
+ // Emit the compact-table data.
+ out << "static constexpr CompactEntry kCompact[] = {\n";
+ for (const CompactEntry& entry : compactEntries) {
+ out << " {";
+
+ // We pack all three values into `v`. If kNumBits were to change, we would need to adjust
+ // this packing algorithm.
+ static_assert(kNumBits == 2);
+ out << entry.v[0];
+ if (entry.v[1]) {
+ out << " | (" << entry.v[1] << " << " << bitsPerValue << ")";
+ }
+ if (entry.v[2]) {
+ out << " | (" << entry.v[2] << " << " << (2 * bitsPerValue) << ")";
+ }
+ out << ", {";
+
+ unsigned int shiftBits = 0, combinedBits = 0;
+ for (int index = 0; index < numTransitions; index++) {
+ combinedBits |= entry.data[index] << shiftBits;
+ shiftBits += kNumBits;
+ assert(shiftBits <= 8);
+ if (shiftBits == 8) {
+ out << combinedBits << ", ";
+ shiftBits = 0;
+ combinedBits = 0;
+ }
+ }
+ if (shiftBits > 0) {
+ // Flush any partial values.
+ out << combinedBits;
+ }
+ out << "}},\n";
+ }
+ out << "};\n"
+ << "static constexpr IndexEntry kIndices[] = {\n";
+ for (const IndexEntry& entry : indices) {
+ if (entry.type == kFullEntry) {
+ // Bit-not is used so that full entries start at -1 and go down from there.
+ out << ~entry.pos << ", ";
+ } else {
+ // Compact entries start at 0 and go up from there.
+ out << entry.pos << ", ";
+ }
+ }
+ out << "};\n"
+ << "State get_transition(int transition, int state) {\n"
+ << " IndexEntry index = kIndices[state];\n"
+ << " if (index < 0) { return kFull[~index].data[transition]; }\n"
+ << " const CompactEntry& entry = kCompact[index];\n"
+ << " int v = entry.data[transition >> " << std::log2(kDataPerByte) << "];\n"
+ << " v >>= " << kNumBits << " * (transition & " << kDataPerByte - 1 << ");\n"
+ << " v &= " << kNumValues << ";\n"
+ << " v *= " << bitsPerValue << ";\n"
+ << " return (entry.values >> v) & " << maxValue << ";\n"
+ << "}\n";
+}
diff --git a/gfx/skia/skia/src/sksl/lex/TransitionTable.h b/gfx/skia/skia/src/sksl/lex/TransitionTable.h
new file mode 100644
index 0000000000..6ac6986fae
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/TransitionTable.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_TRANSITIONTABLE
+#define SKSL_TRANSITIONTABLE
+
+#include <stddef.h>
+#include <fstream>
+
+struct DFA;
+
+void WriteTransitionTable(std::ofstream& out, const DFA& dfa, size_t states);
+
+#endif // SKSL_TRANSITIONTABLE
diff --git a/gfx/skia/skia/src/sksl/lex/sksl.lex b/gfx/skia/skia/src/sksl/lex/sksl.lex
new file mode 100644
index 0000000000..02a946a350
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/lex/sksl.lex
@@ -0,0 +1,102 @@
+// *****************
+// *** IMPORTANT ***
+// *****************
+//
+// 1. This file is only used when gn arg skia_lex is set to true. It is used to regenerate the
+// SkSLLexer.h and SkSLLexer.cpp files.
+// 2. Since token IDs are used to identify operators and baked into the .dehydrated.sksl files,
+// after modifying this file it is likely everything will break until you update the dehydrated
+// binaries. If things break after updating the lexer, set REHYDRATE in SkSLCompiler.cpp to 0,
+// rebuild, and then set it back to 1.
+
+FLOAT_LITERAL = [0-9]*\.[0-9]+([eE][+-]?[0-9]+)?|[0-9]+\.[0-9]*([eE][+-]?[0-9]+)?|[0-9]+([eE][+-]?[0-9]+)
+INT_LITERAL = ([1-9][0-9]*|0[0-7]*|0[xX][0-9a-fA-F]+)[uU]?
+BAD_OCTAL = (0[0-9]+)[uU]?
+TRUE_LITERAL = "true"
+FALSE_LITERAL = "false"
+IF = "if"
+ELSE = "else"
+FOR = "for"
+WHILE = "while"
+DO = "do"
+SWITCH = "switch"
+CASE = "case"
+DEFAULT = "default"
+BREAK = "break"
+CONTINUE = "continue"
+DISCARD = "discard"
+RETURN = "return"
+IN = "in"
+OUT = "out"
+INOUT = "inout"
+UNIFORM = "uniform"
+CONST = "const"
+FLAT = "flat"
+NOPERSPECTIVE = "noperspective"
+INLINE = "inline"
+NOINLINE = "noinline"
+PURE = "$pure"
+READONLY = "readonly"
+WRITEONLY = "writeonly"
+BUFFER = "buffer"
+STRUCT = "struct"
+LAYOUT = "layout"
+HIGHP = "highp"
+MEDIUMP = "mediump"
+LOWP = "lowp"
+ES3 = "$es3"
+EXPORT = "$export"
+WORKGROUP = "workgroup"
+RESERVED = atomic|attribute|varying|precision|invariant|asm|class|union|enum|typedef|template|this|packed|goto|volatile|public|static|extern|external|interface|long|double|fixed|unsigned|superp|input|output|hvec[234]|dvec[234]|fvec[234]|sampler[13]D|sampler[12]DShadow|sampler3DRect|sampler2DRectShadow|samplerCube|sizeof|cast|namespace|using|gl_[0-9a-zA-Z_]*
+PRIVATE_IDENTIFIER = $[0-9a-zA-Z_]*
+IDENTIFIER = [a-zA-Z_][0-9a-zA-Z_]*
+DIRECTIVE = #[a-zA-Z_][0-9a-zA-Z_]*
+LPAREN = "("
+RPAREN = ")"
+LBRACE = "{"
+RBRACE = "}"
+LBRACKET = "["
+RBRACKET = "]"
+DOT = "."
+COMMA = ","
+PLUSPLUS = "++"
+MINUSMINUS = "--"
+PLUS = "+"
+MINUS = "-"
+STAR = "*"
+SLASH = "/"
+PERCENT = "%"
+SHL = "<<"
+SHR = ">>"
+BITWISEOR = "|"
+BITWISEXOR = "^"
+BITWISEAND = "&"
+BITWISENOT = "~"
+LOGICALOR = "||"
+LOGICALXOR = "^^"
+LOGICALAND = "&&"
+LOGICALNOT = "!"
+QUESTION = "?"
+COLON = ":"
+EQ = "="
+EQEQ = "=="
+NEQ = "!="
+GT = ">"
+LT = "<"
+GTEQ = ">="
+LTEQ = "<="
+PLUSEQ = "+="
+MINUSEQ = "-="
+STAREQ = "*="
+SLASHEQ = "/="
+PERCENTEQ = "%="
+SHLEQ = "<<="
+SHREQ = ">>="
+BITWISEOREQ = "|="
+BITWISEXOREQ = "^="
+BITWISEANDEQ = "&="
+SEMICOLON = ";"
+WHITESPACE = \s+
+LINE_COMMENT = //.*
+BLOCK_COMMENT = /\*([^*]|\*[^/])*\*/
+INVALID = .
diff --git a/gfx/skia/skia/src/sksl/sksl_compute.sksl b/gfx/skia/skia/src/sksl/sksl_compute.sksl
new file mode 100644
index 0000000000..b06cb7b38b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/sksl_compute.sksl
@@ -0,0 +1,21 @@
+// defines built-in interfaces supported by SkSL compute shaders
+
+layout(builtin=24) in uint3 sk_NumWorkgroups;
+layout(builtin=26) in uint3 sk_WorkgroupID;
+layout(builtin=27) in uint3 sk_LocalInvocationID;
+layout(builtin=28) in uint3 sk_GlobalInvocationID;
+layout(builtin=29) in uint sk_LocalInvocationIndex;
+
+$pure half4 read($readableTexture2D t, uint2 pos);
+void write($writableTexture2D t, uint2 pos, half4 color);
+
+$pure uint width($genTexture2D t);
+$pure uint height($genTexture2D t);
+
+// Control-barrier with memory-ordering constraints applied to
+// workgroup shared memory only.
+void workgroupBarrier();
+
+// Control-barrier with memory-ordering constraints applied to
+// uniform and storage-buffer memory.
+void storageBarrier();
diff --git a/gfx/skia/skia/src/sksl/sksl_frag.sksl b/gfx/skia/skia/src/sksl/sksl_frag.sksl
new file mode 100644
index 0000000000..aa1e09d645
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/sksl_frag.sksl
@@ -0,0 +1,9 @@
+// defines built-in interfaces supported by SkSL fragment shaders
+
+// See "enum SpvBuiltIn_" in ./spirv.h
+layout(builtin=15) in float4 sk_FragCoord;
+layout(builtin=17) in bool sk_Clockwise; // Similar to gl_FrontFacing, but defined in device space.
+
+layout(location=0,index=0,builtin=10001) out half4 sk_FragColor;
+layout(builtin=10008) half4 sk_LastFragColor;
+layout(builtin=10012) out half4 sk_SecondaryFragColor;
diff --git a/gfx/skia/skia/src/sksl/sksl_gpu.sksl b/gfx/skia/skia/src/sksl/sksl_gpu.sksl
new file mode 100644
index 0000000000..80ac013a55
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/sksl_gpu.sksl
@@ -0,0 +1,324 @@
+// Not exposed in shared module
+
+$pure $genIType mix($genIType x, $genIType y, $genBType a);
+$pure $genBType mix($genBType x, $genBType y, $genBType a);
+$pure $genType fma($genType a, $genType b, $genType c);
+$pure $genHType fma($genHType a, $genHType b, $genHType c);
+ $genType frexp($genType x, out $genIType exp);
+ $genHType frexp($genHType x, out $genIType exp);
+$pure $genType ldexp($genType x, in $genIType exp);
+$pure $genHType ldexp($genHType x, in $genIType exp);
+
+$pure uint packSnorm2x16(float2 v);
+$pure uint packUnorm4x8(float4 v);
+$pure uint packSnorm4x8(float4 v);
+$pure float2 unpackSnorm2x16(uint p);
+$pure float4 unpackUnorm4x8(uint p);
+$pure float4 unpackSnorm4x8(uint p);
+$pure uint packHalf2x16(float2 v);
+$pure float2 unpackHalf2x16(uint v);
+
+$pure $genIType bitCount($genIType value);
+$pure $genIType bitCount($genUType value);
+$pure $genIType findLSB($genIType value);
+$pure $genIType findLSB($genUType value);
+$pure $genIType findMSB($genIType value);
+$pure $genIType findMSB($genUType value);
+
+$pure sampler2D makeSampler2D(texture2D texture, sampler s);
+
+$pure half4 sample(sampler2D s, float2 P);
+$pure half4 sample(sampler2D s, float3 P);
+$pure half4 sample(sampler2D s, float3 P, float bias);
+
+$pure half4 sample(samplerExternalOES s, float2 P);
+$pure half4 sample(samplerExternalOES s, float2 P, float bias);
+
+$pure half4 sample(sampler2DRect s, float2 P);
+$pure half4 sample(sampler2DRect s, float3 P);
+
+$pure half4 sampleLod(sampler2D s, float2 P, float lod);
+$pure half4 sampleLod(sampler2D s, float3 P, float lod);
+
+$pure half4 sampleGrad(sampler2D s, float2, float2 dPdx, float2 dPdy);
+
+// Currently we do not support the generic types of loading subpassInput so we have some explicit
+// versions that we currently use
+$pure half4 subpassLoad(subpassInput subpass);
+$pure half4 subpassLoad(subpassInputMS subpass, int sample);
+
+/** Atomically loads the value from `a` and returns it. */
+$pure uint atomicLoad(atomicUint a);
+
+/** Atomically stores the value of `value` to `a` */
+void atomicStore(atomicUint a, uint value);
+
+/**
+ * Performs an atomic addition of `value` to the contents of `a` and returns the original contents
+ * of `a` from before the addition occurred.
+ */
+uint atomicAdd(atomicUint a, uint value);
+
+// Definitions of functions implementing all of the SkBlendMode blends.
+
+$pure half4 blend_clear(half4 src, half4 dst) { return half4(0); }
+
+$pure half4 blend_src(half4 src, half4 dst) { return src; }
+
+$pure half4 blend_dst(half4 src, half4 dst) { return dst; }
+
+$pure half4 blend_src_over(half4 src, half4 dst) { return src + (1 - src.a)*dst; }
+
+$pure half4 blend_dst_over(half4 src, half4 dst) { return (1 - dst.a)*src + dst; }
+
+$pure half4 blend_src_in(half4 src, half4 dst) { return src*dst.a; }
+
+$pure half4 blend_dst_in(half4 src, half4 dst) { return dst*src.a; }
+
+$pure half4 blend_src_out(half4 src, half4 dst) { return (1 - dst.a)*src; }
+
+$pure half4 blend_dst_out(half4 src, half4 dst) { return (1 - src.a)*dst; }
+
+$pure half4 blend_src_atop(half4 src, half4 dst) { return dst.a*src + (1 - src.a)*dst; }
+
+$pure half4 blend_dst_atop(half4 src, half4 dst) { return (1 - dst.a) * src + src.a*dst; }
+
+$pure half4 blend_xor(half4 src, half4 dst) { return (1 - dst.a)*src + (1 - src.a)*dst; }
+
+$pure half4 blend_plus(half4 src, half4 dst) { return min(src + dst, 1); }
+
+// This multi-purpose Porter-Duff blend function can perform any of the thirteen blends above,
+// when passed one of the following values for BlendOp:
+// - Clear: half4(0, 0, 0, 0)
+// - Src: half4(1, 0, 0, 0)
+// - Dst: half4(0, 1, 0, 0)
+// - SrcOver: half4(1, 0, 0, -1)
+// - DstOver: half4(0, 1, -1, 0)
+// - SrcIn: half4(0, 0, 1, 0)
+// - DstIn: half4(0, 0, 0, 1)
+// - SrcOut: half4(0, 0, -1, 0)
+// - DstOut: half4(0, 0, 0, -1)
+// - SrcATop: half4(0, 0, 1, -1)
+// - DstATop: half4(0, 0, -1, 1)
+// - Xor: half4(0, 0, -1, -1)
+// - Plus: half4(1, 1, 0, 0)
+$pure half4 blend_porter_duff(half4 blendOp, half4 src, half4 dst) {
+ half2 coeff = blendOp.xy + (blendOp.zw * (half2(dst.a, src.a) + min(blendOp.zw, 0)));
+ return min(half4(1), src * coeff.x + dst * coeff.y);
+}
+
+$pure half4 blend_modulate(half4 src, half4 dst) { return src*dst; }
+
+$pure half4 blend_screen(half4 src, half4 dst) { return src + (1 - src)*dst; }
+
+$pure half $blend_overlay_component(half2 s, half2 d) {
+ return (2*d.x <= d.y) ? 2*s.x*d.x
+ : s.y*d.y - 2*(d.y - d.x)*(s.y - s.x);
+}
+
+$pure half4 blend_overlay(half4 src, half4 dst) {
+ half4 result = half4($blend_overlay_component(src.ra, dst.ra),
+ $blend_overlay_component(src.ga, dst.ga),
+ $blend_overlay_component(src.ba, dst.ba),
+ src.a + (1 - src.a)*dst.a);
+ result.rgb += dst.rgb*(1 - src.a) + src.rgb*(1 - dst.a);
+ return result;
+}
+
+$pure half4 blend_overlay(half flip, half4 a, half4 b) {
+ return blend_overlay(bool(flip) ? b : a, bool(flip) ? a : b);
+}
+
+$pure half4 blend_lighten(half4 src, half4 dst) {
+ half4 result = blend_src_over(src, dst);
+ result.rgb = max(result.rgb, (1 - dst.a)*src.rgb + dst.rgb);
+ return result;
+}
+
+$pure half4 blend_darken(half mode /* darken: 1, lighten: -1 */, half4 src, half4 dst) {
+ half4 a = blend_src_over(src, dst);
+ half3 b = (1 - dst.a) * src.rgb + dst.rgb; // DstOver.rgb
+ a.rgb = mode * min(a.rgb * mode, b.rgb * mode);
+ return a;
+}
+
+$pure half4 blend_darken(half4 src, half4 dst) {
+ return blend_darken(1, src, dst);
+}
+
+const half $kGuardedDivideEpsilon = sk_Caps.mustGuardDivisionEvenAfterExplicitZeroCheck
+ ? 0.00000001
+ : 0.0;
+
+$pure inline half $guarded_divide(half n, half d) {
+ return n / (d + $kGuardedDivideEpsilon);
+}
+
+$pure inline half3 $guarded_divide(half3 n, half d) {
+ return n / (d + $kGuardedDivideEpsilon);
+}
+
+$pure half $color_dodge_component(half2 s, half2 d) {
+ if (d.x == 0) {
+ return s.x*(1 - d.y);
+ } else {
+ half delta = s.y - s.x;
+ if (delta == 0) {
+ return s.y*d.y + s.x*(1 - d.y) + d.x*(1 - s.y);
+ } else {
+ delta = min(d.y, $guarded_divide(d.x*s.y, delta));
+ return delta*s.y + s.x*(1 - d.y) + d.x*(1 - s.y);
+ }
+ }
+}
+
+$pure half4 blend_color_dodge(half4 src, half4 dst) {
+ return half4($color_dodge_component(src.ra, dst.ra),
+ $color_dodge_component(src.ga, dst.ga),
+ $color_dodge_component(src.ba, dst.ba),
+ src.a + (1 - src.a)*dst.a);
+}
+
+$pure half $color_burn_component(half2 s, half2 d) {
+ if (d.y == d.x) {
+ return s.y*d.y + s.x*(1 - d.y) + d.x*(1 - s.y);
+ } else if (s.x == 0) {
+ return d.x*(1 - s.y);
+ } else {
+ half delta = max(0, d.y - $guarded_divide((d.y - d.x)*s.y, s.x));
+ return delta*s.y + s.x*(1 - d.y) + d.x*(1 - s.y);
+ }
+}
+
+$pure half4 blend_color_burn(half4 src, half4 dst) {
+ return half4($color_burn_component(src.ra, dst.ra),
+ $color_burn_component(src.ga, dst.ga),
+ $color_burn_component(src.ba, dst.ba),
+ src.a + (1 - src.a)*dst.a);
+}
+
+$pure half4 blend_hard_light(half4 src, half4 dst) {
+ return blend_overlay(dst, src);
+}
+
+$pure half $soft_light_component(half2 s, half2 d) {
+ if (2*s.x <= s.y) {
+ return $guarded_divide(d.x*d.x*(s.y - 2*s.x), d.y) + (1 - d.y)*s.x + d.x*(-s.y + 2*s.x + 1);
+ } else if (4.0 * d.x <= d.y) {
+ half DSqd = d.x*d.x;
+ half DCub = DSqd*d.x;
+ half DaSqd = d.y*d.y;
+ half DaCub = DaSqd*d.y;
+ return $guarded_divide(DaSqd*(s.x - d.x*(3*s.y - 6*s.x - 1)) + 12*d.y*DSqd*(s.y - 2*s.x)
+ - 16*DCub * (s.y - 2*s.x) - DaCub*s.x, DaSqd);
+ } else {
+ return d.x*(s.y - 2*s.x + 1) + s.x - sqrt(d.y*d.x)*(s.y - 2*s.x) - d.y*s.x;
+ }
+}
+
+$pure half4 blend_soft_light(half4 src, half4 dst) {
+ return (dst.a == 0) ? src : half4($soft_light_component(src.ra, dst.ra),
+ $soft_light_component(src.ga, dst.ga),
+ $soft_light_component(src.ba, dst.ba),
+ src.a + (1 - src.a)*dst.a);
+}
+
+$pure half4 blend_difference(half4 src, half4 dst) {
+ return half4(src.rgb + dst.rgb - 2*min(src.rgb*dst.a, dst.rgb*src.a),
+ src.a + (1 - src.a)*dst.a);
+}
+
+$pure half4 blend_exclusion(half4 src, half4 dst) {
+ return half4(dst.rgb + src.rgb - 2*dst.rgb*src.rgb, src.a + (1 - src.a)*dst.a);
+}
+
+$pure half4 blend_multiply(half4 src, half4 dst) {
+ return half4((1 - src.a)*dst.rgb + (1 - dst.a)*src.rgb + src.rgb*dst.rgb,
+ src.a + (1 - src.a)*dst.a);
+}
+
+$pure half $blend_color_luminance(half3 color) { return dot(half3(0.3, 0.59, 0.11), color); }
+
+$pure half3 $blend_set_color_luminance(half3 hueSatColor, half alpha, half3 lumColor) {
+ half lum = $blend_color_luminance(lumColor);
+ half3 result = lum - $blend_color_luminance(hueSatColor) + hueSatColor;
+ half minComp = min(min(result.r, result.g), result.b);
+ half maxComp = max(max(result.r, result.g), result.b);
+ if (minComp < 0 && lum != minComp) {
+ result = lum + (result - lum) * $guarded_divide(lum, (lum - minComp));
+ }
+ if (maxComp > alpha && maxComp != lum) {
+ result = lum + $guarded_divide((result - lum) * (alpha - lum), (maxComp - lum));
+ }
+ return result;
+}
+
+$pure half $blend_color_saturation(half3 color) {
+ return max(max(color.r, color.g), color.b) - min(min(color.r, color.g), color.b);
+}
+
+$pure half3 $blend_set_color_saturation(half3 color, half3 satColor) {
+ half mn = min(min(color.r, color.g), color.b);
+ half mx = max(max(color.r, color.g), color.b);
+
+ return (mx > mn) ? ((color - mn) * $blend_color_saturation(satColor)) / (mx - mn)
+ : half3(0);
+}
+
+$pure half4 blend_hslc(half2 flipSat, half4 src, half4 dst) {
+ half alpha = dst.a * src.a;
+ half3 sda = src.rgb * dst.a;
+ half3 dsa = dst.rgb * src.a;
+ half3 l = bool(flipSat.x) ? dsa : sda;
+ half3 r = bool(flipSat.x) ? sda : dsa;
+ if (bool(flipSat.y)) {
+ l = $blend_set_color_saturation(l, r);
+ r = dsa;
+ }
+ return half4($blend_set_color_luminance(l, alpha, r) + dst.rgb - dsa + src.rgb - sda,
+ src.a + dst.a - alpha);
+}
+
+$pure half4 blend_hue(half4 src, half4 dst) {
+ return blend_hslc(half2(0, 1), src, dst);
+}
+
+$pure half4 blend_saturation(half4 src, half4 dst) {
+ return blend_hslc(half2(1), src, dst);
+}
+
+$pure half4 blend_color(half4 src, half4 dst) {
+ return blend_hslc(half2(0), src, dst);
+}
+
+$pure half4 blend_luminosity(half4 src, half4 dst) {
+ return blend_hslc(half2(1, 0), src, dst);
+}
+
+$pure float2 proj(float3 p) { return p.xy / p.z; }
+
+// Implement cross() as a determinant to communicate our intent more clearly to the compiler.
+// NOTE: Due to precision issues, it might be the case that cross(a, a) != 0.
+$pure float cross_length_2d(float2 a, float2 b) {
+ return determinant(float2x2(a, b));
+}
+
+$pure half cross_length_2d(half2 a, half2 b) {
+ return determinant(half2x2(a, b));
+}
+
+$pure float2 perp(float2 v) {
+ return float2(-v.y, v.x);
+}
+
+$pure half2 perp(half2 v) {
+ return half2(-v.y, v.x);
+}
+
+// Returns a bias given a scale factor, such that 'scale * (dist + bias)' converts the distance to
+// a per-pixel coverage value, automatically widening the visible coverage ramp for subpixel
+// dimensions. The 'scale' must already be equal to the narrowest dimension of the shape and clamped
+// to [0, 1.0].
+$pure float coverage_bias(float scale) {
+ return 1.0 - 0.5 * scale;
+}
diff --git a/gfx/skia/skia/src/sksl/sksl_graphite_frag.sksl b/gfx/skia/skia/src/sksl/sksl_graphite_frag.sksl
new file mode 100644
index 0000000000..7cd6080959
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/sksl_graphite_frag.sksl
@@ -0,0 +1,1135 @@
+// Graphite-specific fragment shader code
+
+const int $kTileModeClamp = 0;
+const int $kTileModeRepeat = 1;
+const int $kTileModeMirror = 2;
+const int $kTileModeDecal = 3;
+
+const int $kReadSwizzleNormalRGBA = 0;
+const int $kReadSwizzleRGB1 = 1;
+const int $kReadSwizzleRRRR = 2;
+const int $kReadSwizzleRRR1 = 3;
+const int $kReadSwizzleBGRA = 4;
+
+const int $kFilterModeNearest = 0;
+const int $kFilterModeLinear = 1;
+
+const int $kTFTypeSRGB = 1;
+const int $kTFTypePQ = 2;
+const int $kTFTypeHLG = 3;
+const int $kTFTypeHLGinv = 4;
+
+const int $kColorSpaceXformFlagUnpremul = 0x1;
+const int $kColorSpaceXformFlagLinearize = 0x2;
+const int $kColorSpaceXformFlagGamutTransform = 0x4;
+const int $kColorSpaceXformFlagEncode = 0x8;
+const int $kColorSpaceXformFlagPremul = 0x10;
+
+$pure half4 sk_error() {
+ return half4(1.0, 0.0, 1.0, 1.0);
+}
+
+$pure half4 sk_passthrough(half4 color) {
+ return color;
+}
+
+$pure half4 sk_solid_shader(float4 colorParam) {
+ return half4(colorParam);
+}
+
+$pure half4 $apply_swizzle(int swizzleType, half4 color) {
+ half4 resultantColor = color;
+ switch (swizzleType) {
+ case $kReadSwizzleNormalRGBA:
+ break;
+ case $kReadSwizzleRGB1:
+ resultantColor = color.rgb1;
+ break;
+ case $kReadSwizzleRRRR:
+ resultantColor = color.rrrr;
+ break;
+ case $kReadSwizzleRRR1:
+ resultantColor = color.rrr1;
+ break;
+ case $kReadSwizzleBGRA:
+ resultantColor = color.bgra;
+ break;
+ }
+ return resultantColor;
+}
+
+$pure half $apply_xfer_fn(int kind, half x, half cs[7]) {
+ half G = cs[0], A = cs[1], B = cs[2], C = cs[3], D = cs[4], E = cs[5], F = cs[6];
+ half s = sign(x);
+ x = abs(x);
+ switch (kind) {
+ case $kTFTypeSRGB:
+ x = (x < D) ? (C * x) + F
+ : pow(A * x + B, G) + E;
+ break;
+ case $kTFTypePQ:
+ x = pow(max(A + B * pow(x, C), 0) / (D + E * pow(x, C)), F);
+ break;
+ case $kTFTypeHLG:
+ x = (x * A <= 1) ? pow(x * A, B)
+ : exp((x - E) * C) + D;
+ x *= (F + 1);
+ break;
+ case $kTFTypeHLGinv:
+ x /= (F + 1);
+ x = (x <= 1) ? A * pow(x, B)
+ : C * log(x - D) + E;
+ break;
+ }
+ return s * x;
+}
+
+// TODO(b/239548614) need to plumb Graphite equivalent of fColorSpaceMathNeedsFloat.
+// This would change 'color' from half4 to float4
+$pure half4 sk_color_space_transform(half4 color,
+ int flags,
+ int srcKind,
+ half srcCoeffs[7],
+ half3x3 gamutTransform,
+ int dstKind,
+ half dstCoeffs[7]) {
+ if (bool(flags & $kColorSpaceXformFlagUnpremul)) {
+ color = unpremul(color);
+ }
+
+ if (bool(flags & $kColorSpaceXformFlagLinearize)) {
+ color.r = $apply_xfer_fn(srcKind, color.r, srcCoeffs);
+ color.g = $apply_xfer_fn(srcKind, color.g, srcCoeffs);
+ color.b = $apply_xfer_fn(srcKind, color.b, srcCoeffs);
+ }
+ if (bool(flags & $kColorSpaceXformFlagGamutTransform)) {
+ color.rgb = gamutTransform * color.rgb;
+ }
+ if (bool(flags & $kColorSpaceXformFlagEncode)) {
+ color.r = $apply_xfer_fn(dstKind, color.r, dstCoeffs);
+ color.g = $apply_xfer_fn(dstKind, color.g, dstCoeffs);
+ color.b = $apply_xfer_fn(dstKind, color.b, dstCoeffs);
+ }
+
+ if (bool(flags & $kColorSpaceXformFlagPremul)) {
+ color.rgb *= color.a;
+ }
+ return color;
+}
+
+$pure float $tile(int tileMode, float f, float low, float high) {
+ switch (tileMode) {
+ case $kTileModeClamp:
+ return clamp(f, low, high);
+
+ case $kTileModeRepeat: {
+ float length = high - low;
+ return (mod(f - low, length) + low);
+ }
+ case $kTileModeMirror: {
+ float length = high - low;
+ float length2 = 2 * length;
+ float tmp = mod(f - low, length2);
+ return (mix(tmp, length2 - tmp, step(length, tmp)) + low);
+ }
+ default: // $kTileModeDecal
+ // Decal is handled later.
+ return f;
+ }
+}
+
+$pure half4 $sample_image(float2 pos,
+ float2 imgSize,
+ float4 subset,
+ int tileModeX,
+ int tileModeY,
+ int filterMode,
+ int readSwizzle,
+ sampler2D s) {
+ // Do hard-edge shader transitions to the border color for nearest-neighbor decal tiling at the
+ // subset boundaries. Snap the input coordinates to nearest neighbor before comparing to the
+ // subset rect, to avoid GPU interpolation errors. See https://crbug.com/skia/10403.
+ if (tileModeX == $kTileModeDecal && filterMode == $kFilterModeNearest) {
+ float snappedX = floor(pos.x) + 0.5;
+ if (snappedX < subset.x || snappedX > subset.z) {
+ return half4(0);
+ }
+ }
+ if (tileModeY == $kTileModeDecal && filterMode == $kFilterModeNearest) {
+ float snappedY = floor(pos.y) + 0.5;
+ if (snappedY < subset.y || snappedY > subset.w) {
+ return half4(0);
+ }
+ }
+
+ pos.x = $tile(tileModeX, pos.x, subset.x, subset.z);
+ pos.y = $tile(tileModeY, pos.y, subset.y, subset.w);
+
+ // Clamp to an inset subset to prevent sampling neighboring texels when coords fall exactly at
+ // texel boundaries.
+ float4 insetClamp;
+ if (filterMode == $kFilterModeNearest) {
+ insetClamp = float4(floor(subset.xy) + 0.5, ceil(subset.zw) - 0.5);
+ } else {
+ insetClamp = float4(subset.xy + 0.5, subset.zw - 0.5);
+ }
+ float2 clampedPos = clamp(pos, insetClamp.xy, insetClamp.zw);
+ half4 color = sample(s, clampedPos / imgSize);
+ color = $apply_swizzle(readSwizzle, color);
+
+ if (filterMode == $kFilterModeLinear) {
+ // Remember the amount the coord moved for clamping. This is used to implement shader-based
+ // filtering for repeat and decal tiling.
+ half2 error = half2(pos - clampedPos);
+ half2 absError = abs(error);
+
+ // Do 1 or 3 more texture reads depending on whether both x and y tiling modes are repeat
+ // and whether we're near a single subset edge or a corner. Then blend the multiple reads
+ // using the error values calculated above.
+ bool sampleExtraX = tileModeX == $kTileModeRepeat;
+ bool sampleExtraY = tileModeY == $kTileModeRepeat;
+ if (sampleExtraX || sampleExtraY) {
+ float extraCoordX;
+ float extraCoordY;
+ half4 extraColorX;
+ half4 extraColorY;
+ if (sampleExtraX) {
+ extraCoordX = error.x > 0 ? insetClamp.x : insetClamp.z;
+ extraColorX = sample(s, float2(extraCoordX, clampedPos.y) / imgSize);
+ extraColorX = $apply_swizzle(readSwizzle, extraColorX);
+ }
+ if (sampleExtraY) {
+ extraCoordY = error.y > 0 ? insetClamp.y : insetClamp.w;
+ extraColorY = sample(s, float2(clampedPos.x, extraCoordY) / imgSize);
+ extraColorY = $apply_swizzle(readSwizzle, extraColorY);
+ }
+ if (sampleExtraX && sampleExtraY) {
+ half4 extraColorXY = sample(s, float2(extraCoordX, extraCoordY) / imgSize);
+ extraColorXY = $apply_swizzle(readSwizzle, extraColorXY);
+ color = mix(mix(color, extraColorX, absError.x),
+ mix(extraColorY, extraColorXY, absError.x),
+ absError.y);
+ } else if (sampleExtraX) {
+ color = mix(color, extraColorX, absError.x);
+ } else if (sampleExtraY) {
+ color = mix(color, extraColorY, absError.y);
+ }
+ }
+
+ // Do soft edge shader filtering for decal tiling and linear filtering using the error
+ // values calculated above.
+ if (tileModeX == $kTileModeDecal) {
+ color *= max(1 - absError.x, 0);
+ }
+ if (tileModeY == $kTileModeDecal) {
+ color *= max(1 - absError.y, 0);
+ }
+ }
+
+ return color;
+}
+
+$pure half4 $cubic_filter_image(float2 pos,
+ float2 imgSize,
+ float4 subset,
+ int tileModeX,
+ int tileModeY,
+ float4x4 coeffs,
+ int readSwizzle,
+ sampler2D s) {
+ // Determine pos's fractional offset f between texel centers.
+ float2 f = fract(pos - 0.5);
+ // Sample 16 points at 1-pixel intervals from [p - 1.5 ... p + 1.5].
+ pos -= 1.5;
+ // Snap to texel centers to prevent sampling neighboring texels.
+ pos = floor(pos) + 0.5;
+
+ float4 wx = coeffs * float4(1.0, f.x, f.x * f.x, f.x * f.x * f.x);
+ float4 wy = coeffs * float4(1.0, f.y, f.y * f.y, f.y * f.y * f.y);
+ float4 color = float4(0);
+ for (int y = 0; y < 4; ++y) {
+ float4 rowColor = float4(0);
+ for (int x = 0; x < 4; ++x) {
+ rowColor += wx[x] * $sample_image(pos + float2(x, y), imgSize, subset,
+ tileModeX, tileModeY, $kFilterModeNearest,
+ readSwizzle, s);
+ }
+ color += wy[y] * rowColor;
+ }
+ return half4(color);
+}
+
+$pure half4 sk_image_shader(float2 coords,
+ float2 imgSize,
+ float4 subset,
+ int tileModeX,
+ int tileModeY,
+ int filterMode,
+ int useCubic,
+ float4x4 cubicCoeffs,
+ int readSwizzle,
+ int csXformFlags,
+ int csXformSrcKind,
+ half csXformSrcCoeffs[7],
+ half3x3 csXformGamutTransform,
+ int csXformDstKind,
+ half csXformDstCoeffs[7],
+ sampler2D s) {
+ half4 sampleColor = (useCubic != 0)
+ ? $cubic_filter_image(coords, imgSize, subset, tileModeX, tileModeY, cubicCoeffs,
+ readSwizzle, s)
+ : $sample_image(coords, imgSize, subset, tileModeX, tileModeY, filterMode, readSwizzle, s);
+ return sk_color_space_transform(sampleColor, csXformFlags, csXformSrcKind, csXformSrcCoeffs,
+ csXformGamutTransform, csXformDstKind, csXformDstCoeffs);
+}
+
+$pure half4 sk_dither_shader(half4 colorIn,
+ float2 coords,
+ float range,
+ sampler2D lut) {
+ const float kImgSize = 8;
+
+ half2 lutCoords = half2(coords.x/kImgSize, coords.y/kImgSize);
+ half value = sample(lut, lutCoords).r - 0.5; // undo the bias in the table
+ // For each color channel, add the random offset to the channel value and then clamp
+ // between 0 and alpha to keep the color premultiplied.
+ return half4(clamp(colorIn.rgb + value * range, 0.0, colorIn.a), colorIn.a);
+}
+
+$pure float2 $tile_grad(int tileMode, float2 t) {
+ switch (tileMode) {
+ case $kTileModeClamp:
+ t.x = clamp(t.x, 0, 1);
+ break;
+
+ case $kTileModeRepeat:
+ t.x = fract(t.x);
+ break;
+
+ case $kTileModeMirror: {
+ float t_1 = t.x - 1;
+ t.x = t_1 - 2 * floor(t_1 * 0.5) - 1;
+ if (sk_Caps.mustDoOpBetweenFloorAndAbs) {
+ // At this point the expected value of tiled_t should between -1 and 1, so this
+ // clamp has no effect other than to break up the floor and abs calls and make sure
+ // the compiler doesn't merge them back together.
+ t.x = clamp(t.x, -1, 1);
+ }
+ t.x = abs(t.x);
+ break;
+ }
+
+ case $kTileModeDecal:
+ if (t.x < 0 || t.x > 1) {
+ return float2(0, -1);
+ }
+ break;
+ }
+
+ return t;
+}
+
+$pure half4 $colorize_grad_4(float4 colorsParam[4], float offsetsParam[4], float2 t) {
+ if (t.y < 0) {
+ return half4(0);
+
+ } else if (t.x <= offsetsParam[0]) {
+ return half4(colorsParam[0]);
+ } else if (t.x < offsetsParam[1]) {
+ return half4(mix(colorsParam[0], colorsParam[1], (t.x - offsetsParam[0]) /
+ (offsetsParam[1] - offsetsParam[0])));
+ } else if (t.x < offsetsParam[2]) {
+ return half4(mix(colorsParam[1], colorsParam[2], (t.x - offsetsParam[1]) /
+ (offsetsParam[2] - offsetsParam[1])));
+ } else if (t.x < offsetsParam[3]) {
+ return half4(mix(colorsParam[2], colorsParam[3], (t.x - offsetsParam[2]) /
+ (offsetsParam[3] - offsetsParam[2])));
+ } else {
+ return half4(colorsParam[3]);
+ }
+}
+
+$pure half4 $colorize_grad_8(float4 colorsParam[8], float offsetsParam[8], float2 t) {
+ if (t.y < 0) {
+ return half4(0);
+
+ // Unrolled binary search through intervals
+ // ( .. 0), (0 .. 1), (1 .. 2), (2 .. 3), (3 .. 4), (4 .. 5), (5 .. 6), (6 .. 7), (7 .. ).
+ } else if (t.x < offsetsParam[4]) {
+ if (t.x < offsetsParam[2]) {
+ if (t.x <= offsetsParam[0]) {
+ return half4(colorsParam[0]);
+ } else if (t.x < offsetsParam[1]) {
+ return half4(mix(colorsParam[0], colorsParam[1],
+ (t.x - offsetsParam[0]) /
+ (offsetsParam[1] - offsetsParam[0])));
+ } else {
+ return half4(mix(colorsParam[1], colorsParam[2],
+ (t.x - offsetsParam[1]) /
+ (offsetsParam[2] - offsetsParam[1])));
+ }
+ } else {
+ if (t.x < offsetsParam[3]) {
+ return half4(mix(colorsParam[2], colorsParam[3],
+ (t.x - offsetsParam[2]) /
+ (offsetsParam[3] - offsetsParam[2])));
+ } else {
+ return half4(mix(colorsParam[3], colorsParam[4],
+ (t.x - offsetsParam[3]) /
+ (offsetsParam[4] - offsetsParam[3])));
+ }
+ }
+ } else {
+ if (t.x < offsetsParam[6]) {
+ if (t.x < offsetsParam[5]) {
+ return half4(mix(colorsParam[4], colorsParam[5],
+ (t.x - offsetsParam[4]) /
+ (offsetsParam[5] - offsetsParam[4])));
+ } else {
+ return half4(mix(colorsParam[5], colorsParam[6],
+ (t.x - offsetsParam[5]) /
+ (offsetsParam[6] - offsetsParam[5])));
+ }
+ } else {
+ if (t.x < offsetsParam[7]) {
+ return half4(mix(colorsParam[6], colorsParam[7],
+ (t.x - offsetsParam[6]) /
+ (offsetsParam[7] - offsetsParam[6])));
+ } else {
+ return half4(colorsParam[7]);
+ }
+ }
+ }
+}
+
+half4 $colorize_grad_tex(sampler2D colorsAndOffsetsSampler, int numStops, float2 t) {
+ const float kColorCoord = 0.25;
+ const float kOffsetCoord = 0.75;
+
+ if (t.y < 0) {
+ return half4(0);
+ } else if (t.x == 0) {
+ return sampleLod(colorsAndOffsetsSampler, float2(0, kColorCoord), 0);
+ } else if (t.x == 1) {
+ return sampleLod(colorsAndOffsetsSampler, float2(1, kColorCoord), 0);
+ } else {
+ int low = 0;
+ int high = numStops;
+ for (int loop = 1; loop < numStops; loop <<= 1) {
+ int mid = (low + high) / 2;
+ float midFlt = (float(mid) + 0.5) / float(numStops);
+
+ float2 tmp = sampleLod(colorsAndOffsetsSampler, float2(midFlt, kOffsetCoord), 0).xy;
+ float offset = ldexp(tmp.x, int(tmp.y));
+
+ if (t.x < offset) {
+ high = mid;
+ } else {
+ low = mid;
+ }
+ }
+
+ float lowFlt = (float(low) + 0.5) / float(numStops);
+ float highFlt = (float(low + 1) + 0.5) / float(numStops);
+ half4 color0 = sampleLod(colorsAndOffsetsSampler, float2(lowFlt, kColorCoord), 0);
+ half4 color1 = sampleLod(colorsAndOffsetsSampler, float2(highFlt, kColorCoord), 0);
+
+ float2 tmp = sampleLod(colorsAndOffsetsSampler, float2(lowFlt, kOffsetCoord), 0).xy;
+ float offset0 = ldexp(tmp.x, int(tmp.y));
+
+ tmp = sampleLod(colorsAndOffsetsSampler, float2(highFlt, kOffsetCoord), 0).xy;
+ float offset1 = ldexp(tmp.x, int(tmp.y));
+
+ return half4(mix(color0, color1,
+ (t.x - offset0) /
+ (offset1 - offset0)));
+ }
+}
+
+$pure float2 $linear_grad_layout(float2 point0Param, float2 point1Param, float2 pos) {
+ pos -= point0Param;
+ float2 delta = point1Param - point0Param;
+ float t = dot(pos, delta) / dot(delta, delta);
+ return float2(t, 1);
+}
+
+$pure float2 $radial_grad_layout(float2 centerParam, float radiusParam, float2 pos) {
+ float t = distance(pos, centerParam) / radiusParam;
+ return float2(t, 1);
+}
+
+$pure float2 $sweep_grad_layout(float2 centerParam, float biasParam, float scaleParam, float2 pos) {
+ pos -= centerParam;
+
+ // Some devices incorrectly implement atan2(y,x) as atan(y/x). In actuality it is
+ // atan2(y,x) = 2 * atan(y / (sqrt(x^2 + y^2) + x)). To work around this we pass in
+ // (sqrt(x^2 + y^2) + x) as the second parameter to atan2 in these cases. We let the device
+ // handle the undefined behavior if the second parameter is 0, instead of doing the divide
+ // ourselves and calling atan with the quotient.
+ float angle = sk_Caps.atan2ImplementedAsAtanYOverX ? 2 * atan(-pos.y, length(pos) - pos.x)
+ : atan(-pos.y, -pos.x);
+
+ // 0.1591549430918 is 1/(2*pi), used since atan returns values [-pi, pi]
+ float t = (angle * 0.1591549430918 + 0.5 + biasParam) * scaleParam;
+ return float2(t, 1);
+}
+
+$pure float3x3 $map_to_unit_x(float2 p0, float2 p1) {
+ // Returns a matrix that maps [p0, p1] to [(0, 0), (1, 0)]. Results are undefined if p0 = p1.
+ // From skia/src/core/SkMatrix.cpp, SkMatrix::setPolyToPoly.
+ return float3x3(
+ 0, -1, 0,
+ 1, 0, 0,
+ 0, 0, 1
+ ) * inverse(float3x3(
+ p1.y - p0.y, p0.x - p1.x, 0,
+ p1.x - p0.x, p1.y - p0.y, 0,
+ p0.x, p0.y, 1
+ ));
+}
+
+$pure float2 $conical_grad_layout(float2 point0Param,
+ float2 point1Param,
+ float radius0Param,
+ float radius1Param,
+ float2 pos) {
+ const float SK_ScalarNearlyZero = 1.0 / (1 << 12);
+ float dCenter = distance(point0Param, point1Param);
+ float dRadius = radius1Param - radius0Param;
+
+ // Degenerate case: a radial gradient (p0 = p1).
+ bool radial = dCenter < SK_ScalarNearlyZero;
+
+ // Degenerate case: a strip with bandwidth 2r (r0 = r1).
+ bool strip = abs(dRadius) < SK_ScalarNearlyZero;
+
+ if (radial) {
+ if (strip) {
+ // The start and end inputs are the same in both position and radius.
+ // We don't expect to see this input, but just in case we avoid dividing by zero.
+ return float2(0, -1);
+ }
+
+ float scale = 1 / dRadius;
+ float scaleSign = sign(dRadius);
+ float bias = radius0Param / dRadius;
+
+ float2 pt = (pos - point0Param) * scale;
+ float t = length(pt) * scaleSign - bias;
+ return float2(t, 1);
+
+ } else if (strip) {
+ float3x3 transform = $map_to_unit_x(point0Param, point1Param);
+ float r = radius0Param / dCenter;
+ float r_2 = r * r;
+
+ float2 pt = (transform * pos.xy1).xy;
+ float t = r_2 - pt.y * pt.y;
+ if (t < 0) {
+ return float2(0, -1);
+ }
+ t = pt.x + sqrt(t);
+ return float2(t, 1);
+
+ } else {
+ // See https://skia.org/docs/dev/design/conical/ for details on how this algorithm works.
+ // Calculate f and swap inputs if necessary (steps 1 and 2).
+ float f = radius0Param / (radius0Param - radius1Param);
+
+ bool isSwapped = abs(f - 1) < SK_ScalarNearlyZero;
+ if (isSwapped) {
+ float2 tmpPt = point0Param;
+ point0Param = point1Param;
+ point1Param = tmpPt;
+ f = 0;
+ }
+
+ // Apply mapping from [Cf, C1] to unit x, and apply the precalculations from steps 3 and 4,
+ // all in the same transformation.
+ float2 Cf = point0Param * (1 - f) + point1Param * f;
+ float3x3 transform = $map_to_unit_x(Cf, point1Param);
+
+ float scaleX = abs(1 - f);
+ float scaleY = scaleX;
+ float r1 = abs(radius1Param - radius0Param) / dCenter;
+ bool isFocalOnCircle = abs(r1 - 1) < SK_ScalarNearlyZero;
+ if (isFocalOnCircle) {
+ scaleX *= 0.5;
+ scaleY *= 0.5;
+ } else {
+ scaleX *= r1 / (r1 * r1 - 1);
+ scaleY /= sqrt(abs(r1 * r1 - 1));
+ }
+ transform = float3x3(
+ scaleX, 0, 0,
+ 0, scaleY, 0,
+ 0, 0, 1
+ ) * transform;
+
+ float2 pt = (transform * pos.xy1).xy;
+
+ // Continue with step 5 onward.
+ float invR1 = 1 / r1;
+ float dRadiusSign = sign(1 - f);
+ bool isWellBehaved = !isFocalOnCircle && r1 > 1;
+
+ float x_t = -1;
+ if (isFocalOnCircle) {
+ x_t = dot(pt, pt) / pt.x;
+ } else if (isWellBehaved) {
+ x_t = length(pt) - pt.x * invR1;
+ } else {
+ float temp = pt.x * pt.x - pt.y * pt.y;
+ if (temp >= 0) {
+ if (isSwapped || dRadiusSign < 0) {
+ x_t = -sqrt(temp) - pt.x * invR1;
+ } else {
+ x_t = sqrt(temp) - pt.x * invR1;
+ }
+ }
+ }
+
+ if (!isWellBehaved && x_t < 0) {
+ return float2(0, -1);
+ }
+
+ float t = f + dRadiusSign * x_t;
+ if (isSwapped) {
+ t = 1 - t;
+ }
+ return float2(t, 1);
+ }
+}
+
+$pure half4 sk_linear_grad_4_shader(float2 coords,
+ float4 colorsParam[4],
+ float offsetsParam[4],
+ float2 point0Param,
+ float2 point1Param,
+ int tileMode,
+ int colorSpace,
+ int doUnpremul) {
+ float2 t = $linear_grad_layout(point0Param, point1Param, coords);
+ t = $tile_grad(tileMode, t);
+ half4 color = $colorize_grad_4(colorsParam, offsetsParam, t);
+ return $interpolated_to_rgb_unpremul(color, colorSpace, doUnpremul);
+}
+
+$pure half4 sk_linear_grad_8_shader(float2 coords,
+ float4 colorsParam[8],
+ float offsetsParam[8],
+ float2 point0Param,
+ float2 point1Param,
+ int tileMode,
+ int colorSpace,
+ int doUnpremul) {
+ float2 t = $linear_grad_layout(point0Param, point1Param, coords);
+ t = $tile_grad(tileMode, t);
+ half4 color = $colorize_grad_8(colorsParam, offsetsParam, t);
+ return $interpolated_to_rgb_unpremul(color, colorSpace, doUnpremul);
+}
+
+$pure half4 sk_linear_grad_tex_shader(float2 coords,
+ float2 point0Param,
+ float2 point1Param,
+ int numStops,
+ int tileMode,
+ int colorSpace,
+ int doUnpremul,
+ sampler2D colorAndOffsetSampler) {
+ float2 t = $linear_grad_layout(point0Param, point1Param, coords);
+ t = $tile_grad(tileMode, t);
+ half4 color = $colorize_grad_tex(colorAndOffsetSampler, numStops, t);
+ return $interpolated_to_rgb_unpremul(color, colorSpace, doUnpremul);
+}
+
+$pure half4 sk_radial_grad_4_shader(float2 coords,
+ float4 colorsParam[4],
+ float offsetsParam[4],
+ float2 centerParam,
+ float radiusParam,
+ int tileMode,
+ int colorSpace,
+ int doUnpremul) {
+ float2 t = $radial_grad_layout(centerParam, radiusParam, coords);
+ t = $tile_grad(tileMode, t);
+ half4 color = $colorize_grad_4(colorsParam, offsetsParam, t);
+ return $interpolated_to_rgb_unpremul(color, colorSpace, doUnpremul);
+}
+
+$pure half4 sk_radial_grad_8_shader(float2 coords,
+ float4 colorsParam[8],
+ float offsetsParam[8],
+ float2 centerParam,
+ float radiusParam,
+ int tileMode,
+ int colorSpace,
+ int doUnpremul) {
+ float2 t = $radial_grad_layout(centerParam, radiusParam, coords);
+ t = $tile_grad(tileMode, t);
+ half4 color = $colorize_grad_8(colorsParam, offsetsParam, t);
+ return $interpolated_to_rgb_unpremul(color, colorSpace, doUnpremul);
+}
+
+$pure half4 sk_radial_grad_tex_shader(float2 coords,
+ float2 centerParam,
+ float radiusParam,
+ int numStops,
+ int tileMode,
+ int colorSpace,
+ int doUnpremul,
+ sampler2D colorAndOffsetSampler) {
+ float2 t = $radial_grad_layout(centerParam, radiusParam, coords);
+ t = $tile_grad(tileMode, t);
+ half4 color = $colorize_grad_tex(colorAndOffsetSampler, numStops, t);
+ return $interpolated_to_rgb_unpremul(color, colorSpace, doUnpremul);
+}
+
+$pure half4 sk_sweep_grad_4_shader(float2 coords,
+ float4 colorsParam[4],
+ float offsetsParam[4],
+ float2 centerParam,
+ float biasParam,
+ float scaleParam,
+ int tileMode,
+ int colorSpace,
+ int doUnpremul) {
+ float2 t = $sweep_grad_layout(centerParam, biasParam, scaleParam, coords);
+ t = $tile_grad(tileMode, t);
+ half4 color = $colorize_grad_4(colorsParam, offsetsParam, t);
+ return $interpolated_to_rgb_unpremul(color, colorSpace, doUnpremul);
+}
+
+$pure half4 sk_sweep_grad_8_shader(float2 coords,
+ float4 colorsParam[8],
+ float offsetsParam[8],
+ float2 centerParam,
+ float biasParam,
+ float scaleParam,
+ int tileMode,
+ int colorSpace,
+ int doUnpremul) {
+ float2 t = $sweep_grad_layout(centerParam, biasParam, scaleParam, coords);
+ t = $tile_grad(tileMode, t);
+ half4 color = $colorize_grad_8(colorsParam, offsetsParam, t);
+ return $interpolated_to_rgb_unpremul(color, colorSpace, doUnpremul);
+}
+
+$pure half4 sk_sweep_grad_tex_shader(float2 coords,
+ float2 centerParam,
+ float biasParam,
+ float scaleParam,
+ int numStops,
+ int tileMode,
+ int colorSpace,
+ int doUnpremul,
+ sampler2D colorAndOffsetSampler) {
+ float2 t = $sweep_grad_layout(centerParam, biasParam, scaleParam, coords);
+ t = $tile_grad(tileMode, t);
+ half4 color = $colorize_grad_tex(colorAndOffsetSampler, numStops, t);
+ return $interpolated_to_rgb_unpremul(color, colorSpace, doUnpremul);
+}
+
+$pure half4 sk_conical_grad_4_shader(float2 coords,
+ float4 colorsParam[4],
+ float offsetsParam[4],
+ float2 point0Param,
+ float2 point1Param,
+ float radius0Param,
+ float radius1Param,
+ int tileMode,
+ int colorSpace,
+ int doUnpremul) {
+ float2 t = $conical_grad_layout(point0Param, point1Param, radius0Param, radius1Param, coords);
+ t = $tile_grad(tileMode, t);
+ half4 color = $colorize_grad_4(colorsParam, offsetsParam, t);
+ return $interpolated_to_rgb_unpremul(color, colorSpace, doUnpremul);
+}
+
+$pure half4 sk_conical_grad_8_shader(float2 coords,
+ float4 colorsParam[8],
+ float offsetsParam[8],
+ float2 point0Param,
+ float2 point1Param,
+ float radius0Param,
+ float radius1Param,
+ int tileMode,
+ int colorSpace,
+ int doUnpremul) {
+ float2 t = $conical_grad_layout(point0Param, point1Param, radius0Param, radius1Param, coords);
+ t = $tile_grad(tileMode, t);
+ half4 color = $colorize_grad_8(colorsParam, offsetsParam, t);
+ return $interpolated_to_rgb_unpremul(color, colorSpace, doUnpremul);
+}
+
+$pure half4 sk_conical_grad_tex_shader(float2 coords,
+ float2 point0Param,
+ float2 point1Param,
+ float radius0Param,
+ float radius1Param,
+ int numStops,
+ int tileMode,
+ int colorSpace,
+ int doUnpremul,
+ sampler2D colorAndOffsetSampler) {
+ float2 t = $conical_grad_layout(point0Param, point1Param, radius0Param, radius1Param, coords);
+ t = $tile_grad(tileMode, t);
+ half4 color = $colorize_grad_tex(colorAndOffsetSampler, numStops, t);
+ return $interpolated_to_rgb_unpremul(color, colorSpace, doUnpremul);
+}
+
+$pure half4 sk_matrix_colorfilter(half4 colorIn, float4x4 m, float4 v, int inHSLA) {
+ if (bool(inHSLA)) {
+ colorIn = $rgb_to_hsl(colorIn.rgb, colorIn.a); // includes unpremul
+ } else {
+ colorIn = unpremul(colorIn);
+ }
+
+ half4 colorOut = half4((m * colorIn) + v);
+
+ if (bool(inHSLA)) {
+ colorOut = $hsl_to_rgb(colorOut.rgb, colorOut.a); // includes clamp and premul
+ } else {
+ colorOut = saturate(colorOut);
+ colorOut.rgb *= colorOut.a;
+ }
+
+ return colorOut;
+}
+
+// This method computes the 4 x-coodinates ([0..1]) that should be used to look
+// up in the Perlin noise shader's noise table.
+$pure half4 noise_helper(half2 noiseVec,
+ half2 stitchData,
+ int stitching,
+ sampler2D permutationSampler) {
+ const half kBlockSize = 256.0;
+
+ half4 floorVal;
+ floorVal.xy = floor(noiseVec);
+ floorVal.zw = floorVal.xy + half2(1);
+
+ // Adjust frequencies if we're stitching tiles
+ if (bool(stitching)) {
+ if (floorVal.x >= stitchData.x) { floorVal.x -= stitchData.x; };
+ if (floorVal.y >= stitchData.y) { floorVal.y -= stitchData.y; };
+ if (floorVal.z >= stitchData.x) { floorVal.z -= stitchData.x; };
+ if (floorVal.w >= stitchData.y) { floorVal.w -= stitchData.y; };
+ }
+
+ half sampleX = sample(permutationSampler, half2(floorVal.x/kBlockSize, 0.5)).r;
+ half sampleY = sample(permutationSampler, half2(floorVal.z/kBlockSize, 0.5)).r;
+
+ half2 latticeIdx = half2(sampleX, sampleY);
+
+ const half kInv255 = 0.003921569; // 1.0 / 255.0
+
+ // Aggressively round to the nearest exact (N / 255) floating point values.
+ // This prevents rounding errors on some platforms (e.g., Tegras)
+ latticeIdx = floor(latticeIdx * half2(255.0) + half2(0.5)) * half2(kInv255);
+
+ // Get (x,y) coordinates with the permuted x
+ half4 noiseXCoords = kBlockSize*latticeIdx.xyxy + floorVal.yyww;
+
+ noiseXCoords /= half4(kBlockSize);
+ return noiseXCoords;
+}
+
+// TODO: Move this to sksl_shared.sksl and try to share with Ganesh
+$pure half4 noise_function(half2 noiseVec,
+ half4 noiseXCoords,
+ sampler2D noiseSampler) {
+
+ half2 fractVal = fract(noiseVec);
+
+ // smooth curve : t^2*(3 - 2*t)
+ half2 noiseSmooth = fractVal*fractVal*(half2(3) - 2*fractVal);
+
+ // This is used to convert the two 16bit integers packed into rgba 8 bit input into
+ // a [-1,1] vector
+ const half kInv256 = 0.00390625; // 1.0 / 256.0
+
+ half4 result;
+
+ for (int channel = 0; channel < 4; channel++) {
+
+ // There are 4 lines in the noise texture, put y coords at center of each.
+ half chanCoord = (half(channel) + 0.5) / 4.0;
+
+ half4 sampleA = sample(noiseSampler, half2(noiseXCoords.x, chanCoord));
+ half4 sampleB = sample(noiseSampler, half2(noiseXCoords.y, chanCoord));
+ half4 sampleC = sample(noiseSampler, half2(noiseXCoords.w, chanCoord));
+ half4 sampleD = sample(noiseSampler, half2(noiseXCoords.z, chanCoord));
+
+ half2 uv;
+ half2 tmpFractVal = fractVal;
+
+ // Compute u, at offset (0,0)
+ uv.x = dot((sampleA.ga + sampleA.rb*kInv256)*2 - half2(1), tmpFractVal);
+
+ // Compute v, at offset (-1,0)
+ tmpFractVal.x -= 1.0;
+ uv.y = dot((sampleB.ga + sampleB.rb*kInv256)*2 - half2(1), tmpFractVal);
+
+ // Compute 'a' as a linear interpolation of 'u' and 'v'
+ half2 ab;
+ ab.x = mix(uv.x, uv.y, noiseSmooth.x);
+
+ // Compute v, at offset (-1,-1)
+ tmpFractVal.y -= 1.0;
+ uv.y = dot((sampleC.ga + sampleC.rb*kInv256)*2 - half2(1), tmpFractVal);
+
+ // Compute u, at offset (0,-1)
+ tmpFractVal.x += 1.0;
+ uv.x = dot((sampleD.ga + sampleD.rb*kInv256)*2 - half2(1), tmpFractVal);
+
+ // Compute 'b' as a linear interpolation of 'u' and 'v'
+ ab.y = mix(uv.x, uv.y, noiseSmooth.x);
+
+ // Compute the noise as a linear interpolation of 'a' and 'b'
+ result[channel] = mix(ab.x, ab.y, noiseSmooth.y);
+ }
+
+ return result;
+}
+
+// permutationSampler is [kBlockSize x 1] A8
+// noiseSampler is [kBlockSize x 4] RGBA8 premul
+$pure half4 perlin_noise_shader(float2 coords,
+ float2 baseFrequency,
+ float2 stitchDataIn,
+ int noiseType,
+ int numOctaves,
+ int stitching,
+ sampler2D permutationSampler,
+ sampler2D noiseSampler) {
+ const int kFractalNoise_Type = 0;
+ const int kTurbulence_Type = 1;
+
+ // There are rounding errors if the floor operation is not performed here
+ half2 noiseVec = half2(floor(coords.xy) * baseFrequency);
+
+ // Clear the color accumulator
+ half4 color = half4(0);
+
+ half2 stitchData = half2(stitchDataIn);
+
+ half ratio = 1.0;
+
+ // Loop over all octaves
+ for (int octave = 0; octave < numOctaves; ++octave) {
+ half4 noiseXCoords = noise_helper(noiseVec, stitchData, stitching, permutationSampler);
+
+ half4 tmp = noise_function(noiseVec, noiseXCoords, noiseSampler);
+
+ if (noiseType != kFractalNoise_Type) {
+ // For kTurbulence_Type the result is: abs(noise[-1,1])
+ tmp = abs(tmp);
+ }
+
+ tmp *= ratio;
+ color += tmp;
+
+ noiseVec *= half2(2.0);
+ ratio *= 0.5;
+ stitchData *= half2(2.0);
+ }
+
+ if (noiseType == kFractalNoise_Type) {
+ // For kFractalNoise_Type the result is: noise[-1,1] * 0.5 + 0.5
+ color = color * half4(0.5) + half4(0.5);
+ }
+
+ // Clamp values
+ color = saturate(color);
+
+ // Pre-multiply the result
+ return half4(color.rgb * color.aaa, color.a);
+}
+
+$pure half4 sk_blend(int blendMode, half4 src, half4 dst) {
+ const int kClear = 0;
+ const int kSrc = 1;
+ const int kDst = 2;
+ const int kSrcOver = 3;
+ const int kDstOver = 4;
+ const int kSrcIn = 5;
+ const int kDstIn = 6;
+ const int kSrcOut = 7;
+ const int kDstOut = 8;
+ const int kSrcATop = 9;
+ const int kDstATop = 10;
+ const int kXor = 11;
+ const int kPlus = 12;
+ const int kModulate = 13;
+ const int kScreen = 14;
+ const int kOverlay = 15;
+ const int kDarken = 16;
+ const int kLighten = 17;
+ const int kColorDodge = 18;
+ const int kColorBurn = 19;
+ const int kHardLight = 20;
+ const int kSoftLight = 21;
+ const int kDifference = 22;
+ const int kExclusion = 23;
+ const int kMultiply = 24;
+ const int kHue = 25;
+ const int kSaturation = 26;
+ const int kColor = 27;
+ const int kLuminosity = 28;
+
+ switch (blendMode) {
+ case kClear: { return blend_clear(src, dst); }
+ case kSrc: { return blend_src(src, dst); }
+ case kDst: { return blend_dst(src, dst); }
+ case kSrcOver: { return blend_porter_duff(half4(1, 0, 0, -1), src, dst); }
+ case kDstOver: { return blend_porter_duff(half4(0, 1, -1, 0), src, dst); }
+ case kSrcIn: { return blend_porter_duff(half4(0, 0, 1, 0), src, dst); }
+ case kDstIn: { return blend_porter_duff(half4(0, 0, 0, 1), src, dst); }
+ case kSrcOut: { return blend_porter_duff(half4(0, 0, -1, 0), src, dst); }
+ case kDstOut: { return blend_porter_duff(half4(0, 0, 0, -1), src, dst); }
+ case kSrcATop: { return blend_porter_duff(half4(0, 0, 1, -1), src, dst); }
+ case kDstATop: { return blend_porter_duff(half4(0, 0, -1, 1), src, dst); }
+ case kXor: { return blend_porter_duff(half4(0, 0, -1, -1), src, dst); }
+ case kPlus: { return blend_porter_duff(half4(1, 1, 0, 0), src, dst); }
+ case kModulate: { return blend_modulate(src, dst); }
+ case kScreen: { return blend_screen(src, dst); }
+ case kOverlay: { return blend_overlay(/*flip=*/0, src, dst); }
+ case kDarken: { return blend_darken(/*mode=*/1, src, dst); }
+ case kLighten: { return blend_darken(/*mode=*/-1, src, dst); }
+ case kColorDodge: { return blend_color_dodge(src, dst); }
+ case kColorBurn: { return blend_color_burn(src, dst); }
+ case kHardLight: { return blend_overlay(/*flip=*/1, src, dst); }
+ case kSoftLight: { return blend_soft_light(src, dst); }
+ case kDifference: { return blend_difference(src, dst); }
+ case kExclusion: { return blend_exclusion(src, dst); }
+ case kMultiply: { return blend_multiply(src, dst); }
+ case kHue: { return blend_hslc(/*flipSat=*/half2(0, 1), src, dst); }
+ case kSaturation: { return blend_hslc(/*flipSat=*/half2(1), src, dst); }
+ case kColor: { return blend_hslc(/*flipSat=*/half2(0), src, dst); }
+ case kLuminosity: { return blend_hslc(/*flipSat=*/half2(1, 0), src, dst); }
+ default: return half4(0); // Avoids 'blend can exit without returning a value' error
+ }
+}
+
+$pure half4 sk_blend_shader(int blendMode, half4 dst, half4 src) {
+ return sk_blend(blendMode, src, dst);
+}
+
+$pure half4 porter_duff_blend_shader(half4 blendOp, half4 dst, half4 src) {
+ return blend_porter_duff(blendOp, src, dst);
+}
+
+$pure half4 sk_blend_colorfilter(half4 dstColor, int blendMode, float4 srcColor) {
+ return sk_blend(blendMode, half4(srcColor), dstColor);
+}
+
+$pure half4 sk_table_colorfilter(half4 inColor, sampler2D s) {
+ half4 coords = unpremul(inColor) * 255.0/256.0 + 0.5/256.0;
+ half4 color = half4(sample(s, half2(coords.r, 3.0/8.0)).r,
+ sample(s, half2(coords.g, 5.0/8.0)).r,
+ sample(s, half2(coords.b, 7.0/8.0)).r,
+ 1);
+ return color * sample(s, half2(coords.a, 1.0/8.0)).r;
+}
+
+$pure half4 sk_gaussian_colorfilter(half4 inColor) {
+ half factor = 1 - inColor.a;
+ factor = exp(-factor * factor * 4) - 0.018;
+ return half4(factor);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// Support functions for analytic round rectangles
+
+// Calculates 1/|∇| in device space by applying the chain rule to a local gradient vector and the
+// 2x2 Jacobian describing the transform from local-to-device space. For non-perspective, this is
+// equivalent to the "normal matrix", or the inverse transpose. For perspective, J should be
+// W(u,v) [m00' - m20'u m01' - m21'u] derived from the first two columns of the 3x3 inverse.
+// [m10' - m20'v m11' - m21'v]
+$pure float inverse_grad_len(float2 localGrad, float2x2 jacobian) {
+ // NOTE: By chain rule, the local gradient is on the left side of the Jacobian matrix
+ float2 devGrad = localGrad * jacobian;
+ // NOTE: This uses the L2 norm, which is more accurate than the L1 norm used by fwidth().
+ // TODO: Switch to L1 since it is a 2x perf improvement according to Xcode with little visual
+ // impact, but start with L2 to measure the change separately from the algorithmic update.
+ // return 1.0 / (abs(devGrad.x) + abs(devGrad.y));
+ return inversesqrt(dot(devGrad, devGrad));
+}
+
+// Returns distance from both sides of a stroked circle or ellipse. Elliptical coverage is
+// only accurate if strokeRadius = 0. A positive value represents the interior of the stroke.
+$pure float2 elliptical_distance(float2 uv, float2 radii, float strokeRadius, float2x2 jacobian) {
+ // We do need to evaluate up to two circle equations: one with
+ // R = cornerRadius(r)+strokeRadius(s), and another with R = r-s.
+ // This can be consolidated into a common evaluation against a circle of radius sqrt(r^2+s^2):
+ // (x/(r+/-s))^2 + (y/(r+/-s))^2 = 1
+ // x^2 + y^2 = (r+/-s)^2
+ // x^2 + y^2 = r^2 + s^2 +/- 2rs
+ // (x/sqrt(r^2+s^2))^2 + (y/sqrt(r^2+s^2)) = 1 +/- 2rs/(r^2+s^2)
+ // The 2rs/(r^2+s^2) is the "width" that adjusts the implicit function to the outer or inner
+ // edge of the stroke. For fills and hairlines, s = 0, which means these operations remain valid
+ // for elliptical corners where radii holds the different X and Y corner radii.
+ float2 invR2 = 1.0 / (radii * radii + strokeRadius*strokeRadius);
+ float2 normUV = invR2 * uv;
+ float invGradLength = inverse_grad_len(normUV, jacobian);
+
+ // Since normUV already includes 1/r^2 in the denominator, dot with just 'uv' instead.
+ float f = 0.5 * invGradLength * (dot(uv, normUV) - 1.0);
+
+ // This is 0 for fills/hairlines, which are the only types that allow
+ // elliptical corners (strokeRadius == 0). For regular strokes just use X.
+ float width = radii.x * strokeRadius * invR2.x * invGradLength;
+ return float2(width - f, width + f);
+}
+
+// Accumulates the minimum (and negative maximum) of the outer and inner corner distances in 'dist'
+// for a possibly elliptical corner with 'radii' and relative pixel location specified by
+// 'cornerEdgeDist'. The corner's basis relative to the jacobian is defined in 'xyFlip'.
+void corner_distance(inout float2 dist,
+ float2x2 jacobian,
+ float2 strokeParams,
+ float2 cornerEdgeDist,
+ float2 xyFlip,
+ float2 radii) {
+ float2 uv = radii - cornerEdgeDist;
+ // NOTE: For mitered corners uv > 0 only if it's stroked, and in that case the
+ // subsequent conditions skip calculating anything.
+ if (uv.x > 0.0 && uv.y > 0.0) {
+ if ((radii.x > 0.0 && radii.y > 0.0) ||
+ (strokeParams.x > 0.0 && strokeParams.y < 0.0 /* round-join */)) {
+ // A rounded corner so incorporate outer elliptical distance if we're within the
+ // quarter circle.
+ float2 d = elliptical_distance(uv * xyFlip, radii, strokeParams.x, jacobian);
+ if (radii.x - strokeParams.x <= 0.0) {
+ d.y = 1.0; // disregard inner curve since it's collapsed into an inner miter.
+ } else {
+ d.y *= -1.0; // Negate so that "min" accumulates the maximum value instead
+ }
+ dist = min(dist, d);
+ } else if (strokeParams.y == 0.0 /* bevel-join */) {
+ // Bevels are--by construction--interior mitered, so inner distance is based
+ // purely on the edge distance calculations, but the outer distance is to a 45-degree
+ // line and not the vertical/horizontal lines of the other edges.
+ float bevelDist = (strokeParams.x - uv.x - uv.y) * inverse_grad_len(xyFlip, jacobian);
+ dist.x = min(dist.x, bevelDist);
+ } // Else it's a miter so both inner and outer distances are unmodified
+ } // Else we're not affected by the corner so leave distances unmodified
+}
+
+// Accumulates the minimum (and negative maximum) of the outer and inner corner distances into 'd',
+// for all four corners of a [round] rectangle. 'edgeDists' should be ordered LTRB with positive
+// distance representing the interior of the edge. 'xRadii' and 'yRadii' should hold the per-corner
+// elliptical radii, ordered TL, TR, BR, BL.
+void corner_distances(inout float2 d,
+ float2x2 J,
+ float2 stroke, // {radii, joinStyle}, see StrokeStyle struct definition
+ float4 edgeDists,
+ float4 xRadii,
+ float4 yRadii) {
+ corner_distance(d, J, stroke, edgeDists.xy, float2(-1.0, -1.0), float2(xRadii[0], yRadii[0]));
+ corner_distance(d, J, stroke, edgeDists.zy, float2( 1.0, -1.0), float2(xRadii[1], yRadii[1]));
+ corner_distance(d, J, stroke, edgeDists.zw, float2( 1.0, 1.0), float2(xRadii[2], yRadii[2]));
+ corner_distance(d, J, stroke, edgeDists.xw, float2(-1.0, 1.0), float2(xRadii[3], yRadii[3]));
+}
diff --git a/gfx/skia/skia/src/sksl/sksl_graphite_vert.sksl b/gfx/skia/skia/src/sksl/sksl_graphite_vert.sksl
new file mode 100644
index 0000000000..4f20c0bca5
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/sksl_graphite_vert.sksl
@@ -0,0 +1,535 @@
+// Graphite-specific vertex shader code
+
+const float $PI = 3.141592653589793238;
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// Support functions for tessellating path renderers
+
+const float $kCubicCurveType = 0; // skgpu::tess::kCubicCurveType
+const float $kConicCurveType = 1; // skgpu::tess::kConicCurveType
+const float $kTriangularConicCurveType = 2; // skgpu::tess::kTriangularConicCurveType
+
+// This function can be used on GPUs with infinity support to infer the curve type from the specific
+// path control-point encoding used by tessellating path renderers. Calling this function on a
+// platform that lacks infinity support may result in a shader compilation error.
+$pure float curve_type_using_inf_support(float4 p23) {
+ if (isinf(p23.z)) {
+ return $kTriangularConicCurveType;
+ }
+ if (isinf(p23.w)) {
+ return $kConicCurveType;
+ }
+ return $kCubicCurveType;
+}
+
+$pure bool $is_conic_curve(float curveType) {
+ return curveType != $kCubicCurveType;
+}
+
+$pure bool $is_triangular_conic_curve(float curveType) {
+ return curveType == $kTriangularConicCurveType;
+}
+
+// Wang's formula gives the minimum number of evenly spaced (in the parametric sense) line segments
+// that a bezier curve must be chopped into in order to guarantee all lines stay within a distance
+// of "1/precision" pixels from the true curve. Its definition for a bezier curve of degree "n" is
+// as follows:
+//
+// maxLength = max([length(p[i+2] - 2p[i+1] + p[i]) for (0 <= i <= n-2)])
+// numParametricSegments = sqrt(maxLength * precision * n*(n - 1)/8)
+//
+// (Goldman, Ron. (2003). 5.6.3 Wang's Formula. "Pyramid Algorithms: A Dynamic Programming Approach
+// to Curves and Surfaces for Geometric Modeling". Morgan Kaufmann Publishers.)
+
+const float $kDegree = 3;
+const float $kPrecision = 4; // Must match skgpu::tess::kPrecision
+const float $kLengthTerm = ($kDegree * ($kDegree - 1) / 8.0) * $kPrecision;
+const float $kLengthTermPow2 = (($kDegree * $kDegree) * (($kDegree - 1) * ($kDegree - 1)) / 64.0) *
+ ($kPrecision * $kPrecision);
+
+// Returns the length squared of the largest forward difference from Wang's cubic formula.
+$pure float $wangs_formula_max_fdiff_p2(float2 p0, float2 p1, float2 p2, float2 p3,
+ float2x2 matrix) {
+ float2 d0 = matrix * (fma(float2(-2), p1, p2) + p0);
+ float2 d1 = matrix * (fma(float2(-2), p2, p3) + p1);
+ return max(dot(d0,d0), dot(d1,d1));
+}
+
+$pure float $wangs_formula_cubic(float2 p0, float2 p1, float2 p2, float2 p3,
+ float2x2 matrix) {
+ float m = $wangs_formula_max_fdiff_p2(p0, p1, p2, p3, matrix);
+ return max(ceil(sqrt($kLengthTerm * sqrt(m))), 1.0);
+}
+
+$pure float $wangs_formula_cubic_log2(float2 p0, float2 p1, float2 p2, float2 p3,
+ float2x2 matrix) {
+ float m = $wangs_formula_max_fdiff_p2(p0, p1, p2, p3, matrix);
+ return ceil(log2(max($kLengthTermPow2 * m, 1.0)) * .25);
+}
+
+$pure float $wangs_formula_conic_p2(float2 p0, float2 p1, float2 p2, float w) {
+ // Translate the bounding box center to the origin.
+ float2 C = (min(min(p0, p1), p2) + max(max(p0, p1), p2)) * 0.5;
+ p0 -= C;
+ p1 -= C;
+ p2 -= C;
+
+ // Compute max length.
+ float m = sqrt(max(max(dot(p0,p0), dot(p1,p1)), dot(p2,p2)));
+
+ // Compute forward differences.
+ float2 dp = fma(float2(-2.0 * w), p1, p0) + p2;
+ float dw = abs(fma(-2.0, w, 2.0));
+
+ // Compute numerator and denominator for parametric step size of linearization. Here, the
+ // epsilon referenced from the cited paper is 1/precision.
+ float rp_minus_1 = max(0.0, fma(m, $kPrecision, -1.0));
+ float numer = length(dp) * $kPrecision + rp_minus_1 * dw;
+ float denom = 4 * min(w, 1.0);
+
+ return numer/denom;
+}
+
+$pure float $wangs_formula_conic(float2 p0, float2 p1, float2 p2, float w) {
+ float n2 = $wangs_formula_conic_p2(p0, p1, p2, w);
+ return max(ceil(sqrt(n2)), 1.0);
+}
+
+$pure float $wangs_formula_conic_log2(float2 p0, float2 p1, float2 p2, float w) {
+ float n2 = $wangs_formula_conic_p2(p0, p1, p2, w);
+ return ceil(log2(max(n2, 1.0)) * .5);
+}
+
+// Returns the normalized difference between a and b, i.e. normalize(a - b), with care taken for
+// if 'a' and/or 'b' have large coordinates.
+$pure float2 $robust_normalize_diff(float2 a, float2 b) {
+ float2 diff = a - b;
+ if (diff == float2(0.0)) {
+ return float2(0.0);
+ } else {
+ float invMag = 1.0 / max(abs(diff.x), abs(diff.y));
+ return normalize(invMag * diff);
+ }
+}
+
+// Returns the cosine of the angle between a and b, assuming a and b are unit vectors already.
+// Guaranteed to be between [-1, 1].
+$pure float $cosine_between_unit_vectors(float2 a, float2 b) {
+ // Since a and b are assumed to be normalized, the cosine is equal to the dot product, although
+ // we clamp that to ensure it falls within the expected range of [-1, 1].
+ return clamp(dot(a, b), -1.0, 1.0);
+}
+
+// Extends the middle radius to either the miter point, or the bevel edge if we surpassed the
+// miter limit and need to revert to a bevel join.
+$pure float $miter_extent(float cosTheta, float miterLimit) {
+ float x = fma(cosTheta, .5, .5);
+ return (x * miterLimit * miterLimit >= 1.0) ? inversesqrt(x) : sqrt(x);
+}
+
+// Returns the number of radial segments required for each radian of rotation, in order for the
+// curve to appear "smooth" as defined by the approximate device-space stroke radius.
+$pure float $num_radial_segments_per_radian(float approxDevStrokeRadius) {
+ return .5 / acos(max(1.0 - (1.0 / $kPrecision) / approxDevStrokeRadius, -1.0));
+}
+
+// Unlike mix(), this does not return b when t==1. But it otherwise seems to get better
+// precision than "a*(1 - t) + b*t" for things like chopping cubics on exact cusp points.
+// We override this result anyway when t==1 so it shouldn't be a problem.
+$pure float $unchecked_mix(float a, float b, float T) {
+ return fma(b - a, T, a);
+}
+$pure float2 $unchecked_mix(float2 a, float2 b, float T) {
+ return fma(b - a, float2(T), a);
+}
+$pure float4 $unchecked_mix(float4 a, float4 b, float4 T) {
+ return fma(b - a, T, a);
+}
+
+// Compute a vertex position for the curve described by p01 and p23 packed control points,
+// tessellated to the given resolve level, and assuming it will be drawn as a filled curve.
+$pure float2 tessellate_filled_curve(float2x2 vectorXform,
+ float resolveLevel, float idxInResolveLevel,
+ float4 p01, float4 p23,
+ float curveType) {
+ float2 localcoord;
+ if ($is_triangular_conic_curve(curveType)) {
+ // This patch is an exact triangle.
+ localcoord = (resolveLevel != 0) ? p01.zw
+ : (idxInResolveLevel != 0) ? p23.xy
+ : p01.xy;
+ } else {
+ float2 p0=p01.xy, p1=p01.zw, p2=p23.xy, p3=p23.zw;
+ float w = -1; // w < 0 tells us to treat the instance as an integral cubic.
+ float maxResolveLevel;
+ if ($is_conic_curve(curveType)) {
+ // Conics are 3 points, with the weight in p3.
+ w = p3.x;
+ maxResolveLevel = $wangs_formula_conic_log2(vectorXform*p0,
+ vectorXform*p1,
+ vectorXform*p2, w);
+ p1 *= w; // Unproject p1.
+ p3 = p2; // Duplicate the endpoint for shared code that also runs on cubics.
+ } else {
+ // The patch is an integral cubic.
+ maxResolveLevel = $wangs_formula_cubic_log2(p0, p1, p2, p3, vectorXform);
+ }
+ if (resolveLevel > maxResolveLevel) {
+ // This vertex is at a higher resolve level than we need. Demote to a lower
+ // resolveLevel, which will produce a degenerate triangle.
+ idxInResolveLevel = floor(ldexp(idxInResolveLevel,
+ int(maxResolveLevel - resolveLevel)));
+ resolveLevel = maxResolveLevel;
+ }
+ // Promote our location to a discrete position in the maximum fixed resolve level.
+ // This is extra paranoia to ensure we get the exact same fp32 coordinates for
+ // colocated points from different resolve levels (e.g., the vertices T=3/4 and
+ // T=6/8 should be exactly colocated).
+ float fixedVertexID = floor(.5 + ldexp(idxInResolveLevel, int(5 - resolveLevel)));
+ if (0 < fixedVertexID && fixedVertexID < 32) {
+ float T = fixedVertexID * (1 / 32.0);
+
+ // Evaluate at T. Use De Casteljau's for its accuracy and stability.
+ float2 ab = mix(p0, p1, T);
+ float2 bc = mix(p1, p2, T);
+ float2 cd = mix(p2, p3, T);
+ float2 abc = mix(ab, bc, T);
+ float2 bcd = mix(bc, cd, T);
+ float2 abcd = mix(abc, bcd, T);
+
+ // Evaluate the conic weight at T.
+ float u = mix(1.0, w, T);
+ float v = w + 1 - u; // == mix(w, 1, T)
+ float uv = mix(u, v, T);
+
+ localcoord = (w < 0) ? /*cubic*/ abcd : /*conic*/ abc/uv;
+ } else {
+ localcoord = (fixedVertexID == 0) ? p0.xy : p3.xy;
+ }
+ }
+ return localcoord;
+}
+
+// Device coords are in xy, local coords are in zw, since for now perspective isn't supported.
+$pure float4 tessellate_stroked_curve(float edgeID, float maxEdges,
+ float2x2 affineMatrix,
+ float2 translate,
+ float maxScale /* derived from affineMatrix */,
+ float4 p01, float4 p23,
+ float2 lastControlPoint,
+ float2 strokeParams,
+ float curveType) {
+ float2 p0=p01.xy, p1=p01.zw, p2=p23.xy, p3=p23.zw;
+ float w = -1; // w<0 means the curve is an integral cubic.
+ if ($is_conic_curve(curveType)) {
+ // Conics are 3 points, with the weight in p3.
+ w = p3.x;
+ p3 = p2; // Setting p3 equal to p2 works for the remaining rotational logic.
+ }
+
+ // Call Wang's formula to determine parametric segments before transform points for hairlines
+ // so that it is consistent with how the CPU tested the control points for chopping.
+ float numParametricSegments;
+ if (w < 0) {
+ if (p0 == p1 && p2 == p3) {
+ numParametricSegments = 1; // a line
+ } else {
+ numParametricSegments = $wangs_formula_cubic(p0, p1, p2, p3, affineMatrix);
+ }
+ } else {
+ numParametricSegments = $wangs_formula_conic(affineMatrix * p0,
+ affineMatrix * p1,
+ affineMatrix * p2, w);
+ }
+
+ // Matches skgpu::tess::StrokeParams
+ float strokeRadius = strokeParams.x;
+ float joinType = strokeParams.y; // <0 = round join, ==0 = bevel join, >0 encodes miter limit
+ bool isHairline = strokeParams.x == 0.0;
+ float numRadialSegmentsPerRadian;
+ if (isHairline) {
+ numRadialSegmentsPerRadian = $num_radial_segments_per_radian(1.0);
+ strokeRadius = 0.5;
+ } else {
+ numRadialSegmentsPerRadian = $num_radial_segments_per_radian(maxScale * strokeParams.x);
+ }
+
+ if (isHairline) {
+ // Hairline case. Transform the points before tessellation. We can still hold off on the
+ // translate until the end; we just need to perform the scale and skew right now.
+ p0 = affineMatrix * p0;
+ p1 = affineMatrix * p1;
+ p2 = affineMatrix * p2;
+ p3 = affineMatrix * p3;
+ lastControlPoint = affineMatrix * lastControlPoint;
+ }
+
+ // Find the starting and ending tangents.
+ float2 tan0 = $robust_normalize_diff((p0 == p1) ? ((p1 == p2) ? p3 : p2) : p1, p0);
+ float2 tan1 = $robust_normalize_diff(p3, (p3 == p2) ? ((p2 == p1) ? p0 : p1) : p2);
+ if (tan0 == float2(0)) {
+ // The stroke is a point. This special case tells us to draw a stroke-width circle as a
+ // 180 degree point stroke instead.
+ tan0 = float2(1,0);
+ tan1 = float2(-1,0);
+ }
+
+ // Determine how many edges to give to the join. We emit the first and final edges
+ // of the join twice: once full width and once restricted to half width. This guarantees
+ // perfect seaming by matching the vertices from the join as well as from the strokes on
+ // either side.
+ float numEdgesInJoin;
+ if (joinType >= 0 /*Is the join not a round type?*/) {
+ // Bevel(0) and miter(+) joins get 1 and 2 segments respectively.
+ // +2 because we emit the beginning and ending edges twice (see above comments).
+ numEdgesInJoin = sign(joinType) + 1 + 2;
+ } else {
+ float2 prevTan = $robust_normalize_diff(p0, lastControlPoint);
+ float joinRads = acos($cosine_between_unit_vectors(prevTan, tan0));
+ float numRadialSegmentsInJoin = max(ceil(joinRads * numRadialSegmentsPerRadian), 1);
+ // +2 because we emit the beginning and ending edges twice (see above comment).
+ numEdgesInJoin = numRadialSegmentsInJoin + 2;
+ // The stroke section needs at least two edges. Don't assign more to the join than
+ // "maxEdges - 2". (This is only relevant when the ideal max edge count calculated
+ // on the CPU had to be limited to maxEdges in the draw call).
+ numEdgesInJoin = min(numEdgesInJoin, maxEdges - 2);
+ }
+
+ // Find which direction the curve turns.
+ // NOTE: Since the curve is not allowed to inflect, we can just check F'(.5) x F''(.5).
+ // NOTE: F'(.5) x F''(.5) has the same sign as (P2 - P0) x (P3 - P1)
+ float turn = cross_length_2d(p2 - p0, p3 - p1);
+ float combinedEdgeID = abs(edgeID) - numEdgesInJoin;
+ if (combinedEdgeID < 0) {
+ tan1 = tan0;
+ // Don't let tan0 become zero. The code as-is isn't built to handle that case. tan0=0
+ // means the join is disabled, and to disable it with the existing code we can leave
+ // tan0 equal to tan1.
+ if (lastControlPoint != p0) {
+ tan0 = $robust_normalize_diff(p0, lastControlPoint);
+ }
+ turn = cross_length_2d(tan0, tan1);
+ }
+
+ // Calculate the curve's starting angle and rotation.
+ float cosTheta = $cosine_between_unit_vectors(tan0, tan1);
+ float rotation = acos(cosTheta);
+ if (turn < 0) {
+ // Adjust sign of rotation to match the direction the curve turns.
+ rotation = -rotation;
+ }
+
+ float numRadialSegments;
+ float strokeOutset = sign(edgeID);
+ if (combinedEdgeID < 0) {
+ // We belong to the preceding join. The first and final edges get duplicated, so we only
+ // have "numEdgesInJoin - 2" segments.
+ numRadialSegments = numEdgesInJoin - 2;
+ numParametricSegments = 1; // Joins don't have parametric segments.
+ p3 = p2 = p1 = p0; // Colocate all points on the junction point.
+ // Shift combinedEdgeID to the range [-1, numRadialSegments]. This duplicates the first
+ // edge and lands one edge at the very end of the join. (The duplicated final edge will
+ // actually come from the section of our strip that belongs to the stroke.)
+ combinedEdgeID += numRadialSegments + 1;
+ // We normally restrict the join on one side of the junction, but if the tangents are
+ // nearly equivalent this could theoretically result in bad seaming and/or cracks on the
+ // side we don't put it on. If the tangents are nearly equivalent then we leave the join
+ // double-sided.
+ float sinEpsilon = 1e-2; // ~= sin(180deg / 3000)
+ bool tangentsNearlyParallel =
+ (abs(turn) * inversesqrt(dot(tan0, tan0) * dot(tan1, tan1))) < sinEpsilon;
+ if (!tangentsNearlyParallel || dot(tan0, tan1) < 0) {
+ // There are two edges colocated at the beginning. Leave the first one double sided
+ // for seaming with the previous stroke. (The double sided edge at the end will
+ // actually come from the section of our strip that belongs to the stroke.)
+ if (combinedEdgeID >= 0) {
+ strokeOutset = (turn < 0) ? min(strokeOutset, 0) : max(strokeOutset, 0);
+ }
+ }
+ combinedEdgeID = max(combinedEdgeID, 0);
+ } else {
+ // We belong to the stroke. Unless numRadialSegmentsPerRadian is incredibly high,
+ // clamping to maxCombinedSegments will be a no-op because the draw call was invoked with
+ // sufficient vertices to cover the worst case scenario of 180 degree rotation.
+ float maxCombinedSegments = maxEdges - numEdgesInJoin - 1;
+ numRadialSegments = max(ceil(abs(rotation) * numRadialSegmentsPerRadian), 1);
+ numRadialSegments = min(numRadialSegments, maxCombinedSegments);
+ numParametricSegments = min(numParametricSegments,
+ maxCombinedSegments - numRadialSegments + 1);
+ }
+
+ // Additional parameters for final tessellation evaluation.
+ float radsPerSegment = rotation / numRadialSegments;
+ float numCombinedSegments = numParametricSegments + numRadialSegments - 1;
+ bool isFinalEdge = (combinedEdgeID >= numCombinedSegments);
+ if (combinedEdgeID > numCombinedSegments) {
+ strokeOutset = 0; // The strip has more edges than we need. Drop this one.
+ }
+ // Edge #2 extends to the miter point.
+ if (abs(edgeID) == 2 && joinType > 0/*Is the join a miter type?*/) {
+ strokeOutset *= $miter_extent(cosTheta, joinType/*miterLimit*/);
+ }
+
+ float2 tangent, strokeCoord;
+ if (combinedEdgeID != 0 && !isFinalEdge) {
+ // Compute the location and tangent direction of the stroke edge with the integral id
+ // "combinedEdgeID", where combinedEdgeID is the sorted-order index of parametric and radial
+ // edges. Start by finding the tangent function's power basis coefficients. These define a
+ // tangent direction (scaled by some uniform value) as:
+ // |T^2|
+ // Tangent_Direction(T) = dx,dy = |A 2B C| * |T |
+ // |. . .| |1 |
+ float2 A, B, C = p1 - p0;
+ float2 D = p3 - p0;
+ if (w >= 0.0) {
+ // P0..P2 represent a conic and P3==P2. The derivative of a conic has a cumbersome
+ // order-4 denominator. However, this isn't necessary if we are only interested in a
+ // vector in the same *direction* as a given tangent line. Since the denominator scales
+ // dx and dy uniformly, we can throw it out completely after evaluating the derivative
+ // with the standard quotient rule. This leaves us with a simpler quadratic function
+ // that we use to find a tangent.
+ C *= w;
+ B = .5*D - C;
+ A = (w - 1.0) * D;
+ p1 *= w;
+ } else {
+ float2 E = p2 - p1;
+ B = E - C;
+ A = fma(float2(-3), E, D);
+ }
+ // FIXME(crbug.com/800804,skbug.com/11268): Consider normalizing the exponents in A,B,C at
+ // this point in order to prevent fp32 overflow.
+
+ // Now find the coefficients that give a tangent direction from a parametric edge ID:
+ //
+ // |parametricEdgeID^2|
+ // Tangent_Direction(parametricEdgeID) = dx,dy = |A B_ C_| * |parametricEdgeID |
+ // |. . .| |1 |
+ //
+ float2 B_ = B * (numParametricSegments * 2.0);
+ float2 C_ = C * (numParametricSegments * numParametricSegments);
+
+ // Run a binary search to determine the highest parametric edge that is located on or before
+ // the combinedEdgeID. A combined ID is determined by the sum of complete parametric and
+ // radial segments behind it. i.e., find the highest parametric edge where:
+ //
+ // parametricEdgeID + floor(numRadialSegmentsAtParametricT) <= combinedEdgeID
+ //
+ float lastParametricEdgeID = 0.0;
+ float maxParametricEdgeID = min(numParametricSegments - 1.0, combinedEdgeID);
+ float negAbsRadsPerSegment = -abs(radsPerSegment);
+ float maxRotation0 = (1.0 + combinedEdgeID) * abs(radsPerSegment);
+ for (int exp = 5 /*max resolve level*/ - 1; exp >= 0; --exp) {
+ // Test the parametric edge at lastParametricEdgeID + 2^exp.
+ float testParametricID = lastParametricEdgeID + exp2(float(exp));
+ if (testParametricID <= maxParametricEdgeID) {
+ float2 testTan = fma(float2(testParametricID), A, B_);
+ testTan = fma(float2(testParametricID), testTan, C_);
+ float cosRotation = dot(normalize(testTan), tan0);
+ float maxRotation = fma(testParametricID, negAbsRadsPerSegment, maxRotation0);
+ maxRotation = min(maxRotation, $PI);
+ // Is rotation <= maxRotation? (i.e., is the number of complete radial segments
+ // behind testT, + testParametricID <= combinedEdgeID?)
+ if (cosRotation >= cos(maxRotation)) {
+ // testParametricID is on or before the combinedEdgeID. Keep it!
+ lastParametricEdgeID = testParametricID;
+ }
+ }
+ }
+
+ // Find the T value of the parametric edge at lastParametricEdgeID.
+ float parametricT = lastParametricEdgeID / numParametricSegments;
+
+ // Now that we've identified the highest parametric edge on or before the
+ // combinedEdgeID, the highest radial edge is easy:
+ float lastRadialEdgeID = combinedEdgeID - lastParametricEdgeID;
+
+ // Find the angle of tan0, i.e. the angle between tan0 and the positive x axis.
+ float angle0 = acos(clamp(tan0.x, -1.0, 1.0));
+ angle0 = tan0.y >= 0.0 ? angle0 : -angle0;
+
+ // Find the tangent vector on the edge at lastRadialEdgeID. By construction it is already
+ // normalized.
+ float radialAngle = fma(lastRadialEdgeID, radsPerSegment, angle0);
+ tangent = float2(cos(radialAngle), sin(radialAngle));
+ float2 norm = float2(-tangent.y, tangent.x);
+
+ // Find the T value where the tangent is orthogonal to norm. This is a quadratic:
+ //
+ // dot(norm, Tangent_Direction(T)) == 0
+ //
+ // |T^2|
+ // norm * |A 2B C| * |T | == 0
+ // |. . .| |1 |
+ //
+ float a=dot(norm,A), b_over_2=dot(norm,B), c=dot(norm,C);
+ float discr_over_4 = max(b_over_2*b_over_2 - a*c, 0.0);
+ float q = sqrt(discr_over_4);
+ if (b_over_2 > 0.0) {
+ q = -q;
+ }
+ q -= b_over_2;
+
+ // Roots are q/a and c/q. Since each curve section does not inflect or rotate more than 180
+ // degrees, there can only be one tangent orthogonal to "norm" inside 0..1. Pick the root
+ // nearest .5.
+ float _5qa = -.5*q*a;
+ float2 root = (abs(fma(q,q,_5qa)) < abs(fma(a,c,_5qa))) ? float2(q,a) : float2(c,q);
+ float radialT = (root.t != 0.0) ? root.s / root.t : 0.0;
+ radialT = clamp(radialT, 0.0, 1.0);
+
+ if (lastRadialEdgeID == 0.0) {
+ // The root finder above can become unstable when lastRadialEdgeID == 0 (e.g., if
+ // there are roots at exatly 0 and 1 both). radialT should always == 0 in this case.
+ radialT = 0.0;
+ }
+
+ // Now that we've identified the T values of the last parametric and radial edges, our final
+ // T value for combinedEdgeID is whichever is larger.
+ float T = max(parametricT, radialT);
+
+ // Evaluate the cubic at T. Use De Casteljau's for its accuracy and stability.
+ float2 ab = $unchecked_mix(p0, p1, T);
+ float2 bc = $unchecked_mix(p1, p2, T);
+ float2 cd = $unchecked_mix(p2, p3, T);
+ float2 abc = $unchecked_mix(ab, bc, T);
+ float2 bcd = $unchecked_mix(bc, cd, T);
+ float2 abcd = $unchecked_mix(abc, bcd, T);
+
+ // Evaluate the conic weight at T.
+ float u = $unchecked_mix(1.0, w, T);
+ float v = w + 1 - u; // == mix(w, 1, T)
+ float uv = $unchecked_mix(u, v, T);
+
+ // If we went with T=parametricT, then update the tangent. Otherwise leave it at the radial
+ // tangent found previously. (In the event that parametricT == radialT, we keep the radial
+ // tangent.)
+ if (T != radialT) {
+ // We must re-normalize here because the tangent is determined by the curve coefficients
+ tangent = w >= 0.0 ? $robust_normalize_diff(bc*u, ab*v)
+ : $robust_normalize_diff(bcd, abc);
+ }
+
+ strokeCoord = (w >= 0.0) ? abc/uv : abcd;
+ } else {
+ // Edges at the beginning and end of the strip use exact endpoints and tangents. This
+ // ensures crack-free seaming between instances.
+ tangent = (combinedEdgeID == 0) ? tan0 : tan1;
+ strokeCoord = (combinedEdgeID == 0) ? p0 : p3;
+ }
+
+ // At this point 'tangent' is normalized, so the orthogonal vector is also normalized.
+ float2 ortho = float2(tangent.y, -tangent.x);
+ strokeCoord += ortho * (strokeRadius * strokeOutset);
+
+ if (isHairline) {
+ // Hairline case. The scale and skew already happened before tessellation.
+ // TODO: There's probably a more efficient way to tessellate the hairline that lets us
+ // avoid inverting the affine matrix to get back to local coords, but it's just a 2x2 so
+ // this works for now.
+ return float4(strokeCoord + translate, inverse(affineMatrix) * strokeCoord);
+ } else {
+ // Normal case. Do the transform after tessellation.
+ return float4(affineMatrix * strokeCoord + translate, strokeCoord);
+ }
+}
diff --git a/gfx/skia/skia/src/sksl/sksl_public.sksl b/gfx/skia/skia/src/sksl/sksl_public.sksl
new file mode 100644
index 0000000000..1168612c70
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/sksl_public.sksl
@@ -0,0 +1,10 @@
+// SkSL intrinsics that are not part of GLSL
+
+// Color space transformation, between the working (destination) space and fixed (known) spaces:
+$pure half3 toLinearSrgb(half3 color);
+$pure half3 fromLinearSrgb(half3 color);
+
+// SkSL intrinsics that reflect Skia's C++ object model:
+ half4 $eval(float2 coords, shader s);
+ half4 $eval(half4 color, colorFilter f);
+ half4 $eval(half4 src, half4 dst, blender b);
diff --git a/gfx/skia/skia/src/sksl/sksl_rt_shader.sksl b/gfx/skia/skia/src/sksl/sksl_rt_shader.sksl
new file mode 100644
index 0000000000..abae14745b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/sksl_rt_shader.sksl
@@ -0,0 +1 @@
+layout(builtin=15) float4 sk_FragCoord;
diff --git a/gfx/skia/skia/src/sksl/sksl_shared.sksl b/gfx/skia/skia/src/sksl/sksl_shared.sksl
new file mode 100644
index 0000000000..3720e4c872
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/sksl_shared.sksl
@@ -0,0 +1,449 @@
+// Intrinsics that are available to public SkSL (SkRuntimeEffect)
+
+// See "The OpenGL ES Shading Language, Section 8"
+
+// 8.1 : Angle and Trigonometry Functions
+$pure $genType radians($genType degrees);
+$pure $genHType radians($genHType degrees);
+$pure $genType degrees($genType radians);
+$pure $genHType degrees($genHType radians);
+
+$pure $genType sin($genType angle);
+$pure $genHType sin($genHType angle);
+$pure $genType cos($genType angle);
+$pure $genHType cos($genHType angle);
+$pure $genType tan($genType angle);
+$pure $genHType tan($genHType angle);
+
+$pure $genType asin($genType x);
+$pure $genHType asin($genHType x);
+$pure $genType acos($genType x);
+$pure $genHType acos($genHType x);
+$pure $genType atan($genType y, $genType x);
+$pure $genHType atan($genHType y, $genHType x);
+$pure $genType atan($genType y_over_x);
+$pure $genHType atan($genHType y_over_x);
+
+// 8.1 : Angle and Trigonometry Functions (GLSL ES 3.0)
+$pure $es3 $genType sinh($genType x);
+$pure $es3 $genHType sinh($genHType x);
+$pure $es3 $genType cosh($genType x);
+$pure $es3 $genHType cosh($genHType x);
+$pure $es3 $genType tanh($genType x);
+$pure $es3 $genHType tanh($genHType x);
+$pure $es3 $genType asinh($genType x);
+$pure $es3 $genHType asinh($genHType x);
+$pure $es3 $genType acosh($genType x);
+$pure $es3 $genHType acosh($genHType x);
+$pure $es3 $genType atanh($genType x);
+$pure $es3 $genHType atanh($genHType x);
+
+// 8.2 : Exponential Functions
+$pure $genType pow($genType x, $genType y);
+$pure $genHType pow($genHType x, $genHType y);
+$pure $genType exp($genType x);
+$pure $genHType exp($genHType x);
+$pure $genType log($genType x);
+$pure $genHType log($genHType x);
+$pure $genType exp2($genType x);
+$pure $genHType exp2($genHType x);
+$pure $genType log2($genType x);
+$pure $genHType log2($genHType x);
+
+$pure $genType sqrt($genType x);
+$pure $genHType sqrt($genHType x);
+$pure $genType inversesqrt($genType x);
+$pure $genHType inversesqrt($genHType x);
+
+// 8.3 : Common Functions
+$pure $genType abs($genType x);
+$pure $genHType abs($genHType x);
+$pure $genType sign($genType x);
+$pure $genHType sign($genHType x);
+$pure $genType floor($genType x);
+$pure $genHType floor($genHType x);
+$pure $genType ceil($genType x);
+$pure $genHType ceil($genHType x);
+$pure $genType fract($genType x);
+$pure $genHType fract($genHType x);
+$pure $genType mod($genType x, float y);
+$pure $genType mod($genType x, $genType y);
+$pure $genHType mod($genHType x, half y);
+$pure $genHType mod($genHType x, $genHType y);
+
+$pure $genType min($genType x, $genType y);
+$pure $genType min($genType x, float y);
+$pure $genHType min($genHType x, $genHType y);
+$pure $genHType min($genHType x, half y);
+$pure $genType max($genType x, $genType y);
+$pure $genType max($genType x, float y);
+$pure $genHType max($genHType x, $genHType y);
+$pure $genHType max($genHType x, half y);
+$pure $genType clamp($genType x, $genType minVal, $genType maxVal);
+$pure $genType clamp($genType x, float minVal, float maxVal);
+$pure $genHType clamp($genHType x, $genHType minVal, $genHType maxVal);
+$pure $genHType clamp($genHType x, half minVal, half maxVal);
+$pure $genType saturate($genType x); // SkSL extension
+$pure $genHType saturate($genHType x); // SkSL extension
+$pure $genType mix($genType x, $genType y, $genType a);
+$pure $genType mix($genType x, $genType y, float a);
+$pure $genHType mix($genHType x, $genHType y, $genHType a);
+$pure $genHType mix($genHType x, $genHType y, half a);
+$pure $genType step($genType edge, $genType x);
+$pure $genType step(float edge, $genType x);
+$pure $genHType step($genHType edge, $genHType x);
+$pure $genHType step(half edge, $genHType x);
+$pure $genType smoothstep($genType edge0, $genType edge1, $genType x);
+$pure $genType smoothstep(float edge0, float edge1, $genType x);
+$pure $genHType smoothstep($genHType edge0, $genHType edge1, $genHType x);
+$pure $genHType smoothstep(half edge0, half edge1, $genHType x);
+
+// 8.3 : Common Functions (GLSL ES 3.0)
+$pure $es3 $genIType abs($genIType x);
+$pure $es3 $genIType sign($genIType x);
+$pure $es3 $genIType floatBitsToInt ($genType value);
+$pure $es3 $genUType floatBitsToUint($genType value);
+$pure $es3 $genType intBitsToFloat ($genIType value);
+$pure $es3 $genType uintBitsToFloat($genUType value);
+$pure $es3 $genType trunc($genType x);
+$pure $es3 $genHType trunc($genHType x);
+$pure $es3 $genType round($genType x);
+$pure $es3 $genHType round($genHType x);
+$pure $es3 $genType roundEven($genType x);
+$pure $es3 $genHType roundEven($genHType x);
+$pure $es3 $genIType min($genIType x, $genIType y);
+$pure $es3 $genIType min($genIType x, int y);
+$pure $es3 $genUType min($genUType x, $genUType y);
+$pure $es3 $genUType min($genUType x, uint y);
+$pure $es3 $genIType max($genIType x, $genIType y);
+$pure $es3 $genIType max($genIType x, int y);
+$pure $es3 $genUType max($genUType x, $genUType y);
+$pure $es3 $genUType max($genUType x, uint y);
+$pure $es3 $genIType clamp($genIType x, $genIType minVal, $genIType maxVal);
+$pure $es3 $genIType clamp($genIType x, int minVal, int maxVal);
+$pure $es3 $genUType clamp($genUType x, $genUType minVal, $genUType maxVal);
+$pure $es3 $genUType clamp($genUType x, uint minVal, uint maxVal);
+$pure $es3 $genType mix($genType x, $genType y, $genBType a);
+$pure $es3 $genHType mix($genHType x, $genHType y, $genBType a);
+
+// 8.3 : Common Functions (GLSL ES 3.0) -- cannot be used in constant-expressions
+$pure $es3 $genBType isnan($genType x);
+$pure $es3 $genBType isnan($genHType x);
+$pure $es3 $genBType isinf($genType x);
+$pure $es3 $genBType isinf($genHType x);
+$pure $es3 $genType modf($genType x, out $genType i);
+$pure $es3 $genHType modf($genHType x, out $genHType i);
+
+// 8.4 : Floating-Point Pack and Unpack Functions (GLSL ES 3.0)
+$pure $es3 uint packUnorm2x16(float2 v);
+$pure $es3 float2 unpackUnorm2x16(uint p);
+
+// 8.5 : Geometric Functions
+$pure float length($genType x);
+$pure half length($genHType x);
+$pure float distance($genType p0, $genType p1);
+$pure half distance($genHType p0, $genHType p1);
+$pure float dot($genType x, $genType y);
+$pure half dot($genHType x, $genHType y);
+$pure float3 cross(float3 x, float3 y);
+$pure half3 cross(half3 x, half3 y);
+$pure $genType normalize($genType x);
+$pure $genHType normalize($genHType x);
+$pure $genType faceforward($genType N, $genType I, $genType Nref);
+$pure $genHType faceforward($genHType N, $genHType I, $genHType Nref);
+$pure $genType reflect($genType I, $genType N);
+$pure $genHType reflect($genHType I, $genHType N);
+$pure $genType refract($genType I, $genType N, float eta);
+$pure $genHType refract($genHType I, $genHType N, half eta);
+
+// 8.6 : Matrix Functions
+$pure $squareMat matrixCompMult($squareMat x, $squareMat y);
+$pure $squareHMat matrixCompMult($squareHMat x, $squareHMat y);
+$pure $es3 $mat matrixCompMult($mat x, $mat y);
+$pure $es3 $hmat matrixCompMult($hmat x, $hmat y);
+
+// 8.6 : Matrix Functions (GLSL 1.4, poly-filled by SkSL as needed)
+$pure $squareMat inverse($squareMat m);
+$pure $squareHMat inverse($squareHMat m);
+
+// 8.6 : Matrix Functions (GLSL ES 3.0)
+$pure $es3 float determinant($squareMat m);
+$pure $es3 half determinant($squareHMat m);
+$pure $es3 $squareMat transpose($squareMat m);
+$pure $es3 $squareHMat transpose($squareHMat m);
+$pure $es3 float2x3 transpose(float3x2 m);
+$pure $es3 half2x3 transpose(half3x2 m);
+$pure $es3 float2x4 transpose(float4x2 m);
+$pure $es3 half2x4 transpose(half4x2 m);
+$pure $es3 float3x2 transpose(float2x3 m);
+$pure $es3 half3x2 transpose(half2x3 m);
+$pure $es3 float3x4 transpose(float4x3 m);
+$pure $es3 half3x4 transpose(half4x3 m);
+$pure $es3 float4x2 transpose(float2x4 m);
+$pure $es3 half4x2 transpose(half2x4 m);
+$pure $es3 float4x3 transpose(float3x4 m);
+$pure $es3 half4x3 transpose(half3x4 m);
+$pure $es3 $squareMat outerProduct($vec c, $vec r);
+$pure $es3 $squareHMat outerProduct($hvec c, $hvec r);
+$pure $es3 float2x3 outerProduct(float3 c, float2 r);
+$pure $es3 half2x3 outerProduct(half3 c, half2 r);
+$pure $es3 float3x2 outerProduct(float2 c, float3 r);
+$pure $es3 half3x2 outerProduct(half2 c, half3 r);
+$pure $es3 float2x4 outerProduct(float4 c, float2 r);
+$pure $es3 half2x4 outerProduct(half4 c, half2 r);
+$pure $es3 float4x2 outerProduct(float2 c, float4 r);
+$pure $es3 half4x2 outerProduct(half2 c, half4 r);
+$pure $es3 float3x4 outerProduct(float4 c, float3 r);
+$pure $es3 half3x4 outerProduct(half4 c, half3 r);
+$pure $es3 float4x3 outerProduct(float3 c, float4 r);
+$pure $es3 half4x3 outerProduct(half3 c, half4 r);
+
+// 8.7 : Vector Relational Functions
+$pure $bvec lessThan($vec x, $vec y);
+$pure $bvec lessThan($hvec x, $hvec y);
+$pure $bvec lessThan($ivec x, $ivec y);
+$pure $bvec lessThan($svec x, $svec y);
+$pure $bvec lessThanEqual($vec x, $vec y);
+$pure $bvec lessThanEqual($hvec x, $hvec y);
+$pure $bvec lessThanEqual($ivec x, $ivec y);
+$pure $bvec lessThanEqual($svec x, $svec y);
+$pure $bvec greaterThan($vec x, $vec y);
+$pure $bvec greaterThan($hvec x, $hvec y);
+$pure $bvec greaterThan($ivec x, $ivec y);
+$pure $bvec greaterThan($svec x, $svec y);
+$pure $bvec greaterThanEqual($vec x, $vec y);
+$pure $bvec greaterThanEqual($hvec x, $hvec y);
+$pure $bvec greaterThanEqual($ivec x, $ivec y);
+$pure $bvec greaterThanEqual($svec x, $svec y);
+$pure $bvec equal($vec x, $vec y);
+$pure $bvec equal($hvec x, $hvec y);
+$pure $bvec equal($ivec x, $ivec y);
+$pure $bvec equal($svec x, $svec y);
+$pure $bvec equal($bvec x, $bvec y);
+$pure $bvec notEqual($vec x, $vec y);
+$pure $bvec notEqual($hvec x, $hvec y);
+$pure $bvec notEqual($ivec x, $ivec y);
+$pure $bvec notEqual($svec x, $svec y);
+$pure $bvec notEqual($bvec x, $bvec y);
+
+$pure $es3 $bvec lessThan($usvec x, $usvec y);
+$pure $es3 $bvec lessThan($uvec x, $uvec y);
+$pure $es3 $bvec lessThanEqual($uvec x, $uvec y);
+$pure $es3 $bvec lessThanEqual($usvec x, $usvec y);
+$pure $es3 $bvec greaterThan($uvec x, $uvec y);
+$pure $es3 $bvec greaterThan($usvec x, $usvec y);
+$pure $es3 $bvec greaterThanEqual($uvec x, $uvec y);
+$pure $es3 $bvec greaterThanEqual($usvec x, $usvec y);
+$pure $es3 $bvec equal($uvec x, $uvec y);
+$pure $es3 $bvec equal($usvec x, $usvec y);
+$pure $es3 $bvec notEqual($uvec x, $uvec y);
+$pure $es3 $bvec notEqual($usvec x, $usvec y);
+
+$pure bool any($bvec x);
+$pure bool all($bvec x);
+$pure $bvec not($bvec x);
+
+// 8.9 : Fragment Processing Functions (GLSL ES 3.0)
+$pure $es3 $genType dFdx($genType p);
+$pure $es3 $genType dFdy($genType p);
+$pure $es3 $genHType dFdx($genHType p);
+$pure $es3 $genHType dFdy($genHType p);
+$pure $es3 $genType fwidth($genType p);
+$pure $es3 $genHType fwidth($genHType p);
+
+
+// SkSL utility functions
+
+// The max() guards against division by zero when the incoming color is transparent black
+$pure half4 unpremul(half4 color) { return half4 (color.rgb / max(color.a, 0.0001), color.a); }
+$pure float4 unpremul(float4 color) { return float4(color.rgb / max(color.a, 0.0001), color.a); }
+
+// Similar, but used for polar-space CSS colors
+$export $pure half4 $unpremul_polar(half4 color) {
+ return half4(color.r, color.gb / max(color.a, 0.0001), color.a);
+}
+
+// Convert RGBA -> HSLA (including unpremul).
+//
+// Based on work by Sam Hocevar, Emil Persson, and Ian Taylor [1][2][3]. High-level ideas:
+//
+// - minimize the number of branches by sorting and computing the hue phase in parallel (vec4s)
+//
+// - trade the third sorting branch for a potentially faster std::min and leaving 2nd/3rd
+// channels unsorted (based on the observation that swapping both the channels and the bias sign
+// has no effect under abs)
+//
+// - use epsilon offsets for denominators, to avoid explicit zero-checks
+//
+// An additional trick we employ is deferring premul->unpremul conversion until the very end: the
+// alpha factor gets naturally simplified for H and S, and only L requires a dedicated unpremul
+// division (so we trade three divs for one).
+//
+// [1] http://lolengine.net/blog/2013/01/13/fast-rgb-to-hsv
+// [2] http://lolengine.net/blog/2013/07/27/rgb-to-hsv-in-glsl
+// [3] http://www.chilliant.com/rgb2hsv.html
+
+$export $pure half4 $rgb_to_hsl(half3 c, half a) {
+ half4 p = (c.g < c.b) ? half4(c.bg, -1, 2/3.0)
+ : half4(c.gb, 0, -1/3.0);
+ half4 q = (c.r < p.x) ? half4(p.x, c.r, p.yw)
+ : half4(c.r, p.x, p.yz);
+
+ // q.x -> max channel value
+ // q.yz -> 2nd/3rd channel values (unsorted)
+ // q.w -> bias value dependent on max channel selection
+
+ const half kEps = 0.0001;
+ half pmV = q.x;
+ half pmC = pmV - min(q.y, q.z);
+ half pmL = pmV - pmC * 0.5;
+ half H = abs(q.w + (q.y - q.z) / (pmC * 6 + kEps));
+ half S = pmC / (a + kEps - abs(pmL * 2 - a));
+ half L = pmL / (a + kEps);
+
+ return half4(H, S, L, a);
+}
+
+// Convert HSLA -> RGBA (including clamp and premul).
+//
+// Based on work by Sam Hocevar, Emil Persson, and Ian Taylor [1][2][3].
+//
+// [1] http://lolengine.net/blog/2013/01/13/fast-rgb-to-hsv
+// [2] http://lolengine.net/blog/2013/07/27/rgb-to-hsv-in-glsl
+// [3] http://www.chilliant.com/rgb2hsv.html
+
+$export $pure half3 $hsl_to_rgb(half3 hsl) {
+ half C = (1 - abs(2 * hsl.z - 1)) * hsl.y;
+ half3 p = hsl.xxx + half3(0, 2/3.0, 1/3.0);
+ half3 q = saturate(abs(fract(p) * 6 - 3) - 1);
+
+ return (q - 0.5) * C + hsl.z;
+}
+
+$export $pure half4 $hsl_to_rgb(half3 hsl, half a) {
+ return saturate(half4($hsl_to_rgb(hsl) * a, a));
+}
+
+// Color conversion functions used in gradient interpolation, based on
+// https://www.w3.org/TR/css-color-4/#color-conversion-code
+// TODO(skia:13108): For all of these, we can eliminate any linear math at the beginning
+// (by removing the corresponding linear math at the end of the CPU code).
+$export $pure half3 $css_lab_to_xyz(half3 lab) {
+ const half k = 24389 / 27.0;
+ const half e = 216 / 24389.0;
+
+ half3 f;
+ f[1] = (lab[0] + 16) / 116;
+ f[0] = (lab[1] / 500) + f[1];
+ f[2] = f[1] - (lab[2] / 200);
+
+ half3 f_cubed = pow(f, half3(3));
+
+ half3 xyz = half3(
+ f_cubed[0] > e ? f_cubed[0] : (116 * f[0] - 16) / k,
+ lab[0] > k * e ? f_cubed[1] : lab[0] / k,
+ f_cubed[2] > e ? f_cubed[2] : (116 * f[2] - 16) / k
+ );
+
+ const half3 D50 = half3(0.3457 / 0.3585, 1.0, (1.0 - 0.3457 - 0.3585) / 0.3585);
+ return xyz * D50;
+}
+
+// Skia stores all polar colors with hue in the first component, so this "LCH -> Lab" transform
+// actually takes "HCL". This is also used to do the same polar transform for OkHCL to OkLAB.
+// See similar comments & logic in SkGradientShaderBase.cpp.
+$pure half3 $css_hcl_to_lab(half3 hcl) {
+ return half3(
+ hcl[2],
+ hcl[1] * cos(radians(hcl[0])),
+ hcl[1] * sin(radians(hcl[0]))
+ );
+}
+
+$export $pure half3 $css_hcl_to_xyz(half3 hcl) {
+ return $css_lab_to_xyz($css_hcl_to_lab(hcl));
+}
+
+$export $pure half3 $css_oklab_to_linear_srgb(half3 oklab) {
+ half l_ = oklab.x + 0.3963377774 * oklab.y + 0.2158037573 * oklab.z,
+ m_ = oklab.x - 0.1055613458 * oklab.y - 0.0638541728 * oklab.z,
+ s_ = oklab.x - 0.0894841775 * oklab.y - 1.2914855480 * oklab.z;
+
+ half l = l_*l_*l_,
+ m = m_*m_*m_,
+ s = s_*s_*s_;
+
+ return half3(
+ +4.0767416621 * l - 3.3077115913 * m + 0.2309699292 * s,
+ -1.2684380046 * l + 2.6097574011 * m - 0.3413193965 * s,
+ -0.0041960863 * l - 0.7034186147 * m + 1.7076147010 * s
+ );
+}
+
+$export $pure half3 $css_okhcl_to_linear_srgb(half3 okhcl) {
+ return $css_oklab_to_linear_srgb($css_hcl_to_lab(okhcl));
+}
+
+// TODO(skia:13108): Use our optimized version (though it has different range)
+// Doing so might require fixing (re-deriving?) the math for the HWB version below
+$export $pure half3 $css_hsl_to_srgb(half3 hsl) {
+ hsl.x = mod(hsl.x, 360);
+ if (hsl.x < 0) {
+ hsl.x += 360;
+ }
+
+ hsl.yz /= 100;
+
+ half3 k = mod(half3(0, 8, 4) + hsl.x/30, 12);
+ half a = hsl.y * min(hsl.z, 1 - hsl.z);
+ return hsl.z - a * clamp(min(k - 3, 9 - k), -1, 1);
+}
+
+$export $pure half3 $css_hwb_to_srgb(half3 hwb) {
+ hwb.yz /= 100;
+ if (hwb.y + hwb.z >= 1) {
+ half gray = hwb.y / (hwb.y + hwb.z);
+ return half3(gray);
+ }
+ half3 rgb = $css_hsl_to_srgb(half3(hwb.x, 100, 50));
+ rgb *= (1 - hwb.y - hwb.z);
+ rgb += hwb.y;
+ return rgb;
+}
+
+/*
+ * The actual output color space of this function depends on the input color space
+ * (it might be sRGB, linear sRGB, or linear XYZ). The actual space is what's stored
+ * in the gradient/SkColor4fXformer's fIntermediateColorSpace.
+ */
+$export $pure half4 $interpolated_to_rgb_unpremul(half4 color, int colorSpace, int doUnpremul) {
+ const int kDestination = 0;
+ const int kSRGBLinear = 1;
+ const int kLab = 2;
+ const int kOKLab = 3;
+ const int kLCH = 4;
+ const int kOKLCH = 5;
+ const int kSRGB = 6;
+ const int kHSL = 7;
+ const int kHWB = 8;
+
+ if (bool(doUnpremul)) {
+ switch (colorSpace) {
+ case kLab:
+ case kOKLab: color = unpremul(color); break;
+ case kLCH:
+ case kOKLCH:
+ case kHSL:
+ case kHWB: color = $unpremul_polar(color); break;
+ }
+ }
+ switch (colorSpace) {
+ case kLab: { color.rgb = $css_lab_to_xyz(color.rgb); break; }
+ case kOKLab: { color.rgb = $css_oklab_to_linear_srgb(color.rgb); break; }
+ case kLCH: { color.rgb = $css_hcl_to_xyz(color.rgb); break; }
+ case kOKLCH: { color.rgb = $css_okhcl_to_linear_srgb(color.rgb); break; }
+ case kHSL: { color.rgb = $css_hsl_to_srgb(color.rgb); break; }
+ case kHWB: { color.rgb = $css_hwb_to_srgb(color.rgb); break; }
+ }
+ return color;
+}
diff --git a/gfx/skia/skia/src/sksl/sksl_vert.sksl b/gfx/skia/skia/src/sksl/sksl_vert.sksl
new file mode 100644
index 0000000000..de730b3fbf
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/sksl_vert.sksl
@@ -0,0 +1,9 @@
+// defines built-in interfaces supported by SkSL vertex shaders
+
+out sk_PerVertex {
+ layout(builtin=0) float4 sk_Position;
+ layout(builtin=1) float sk_PointSize;
+};
+
+layout(builtin=42) in int sk_VertexID;
+layout(builtin=43) in int sk_InstanceID;
diff --git a/gfx/skia/skia/src/sksl/spirv.h b/gfx/skia/skia/src/sksl/spirv.h
new file mode 100644
index 0000000000..22821ed862
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/spirv.h
@@ -0,0 +1,870 @@
+/*
+** Copyright (c) 2014-2016 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and/or associated documentation files (the "Materials"),
+** to deal in the Materials without restriction, including without limitation
+** the rights to use, copy, modify, merge, publish, distribute, sublicense,
+** and/or sell copies of the Materials, and to permit persons to whom the
+** Materials are furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Materials.
+**
+** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+** IN THE MATERIALS.
+*/
+
+/*
+** This header is automatically generated by the same tool that creates
+** the Binary Section of the SPIR-V specification.
+*/
+
+/*
+** Enumeration tokens for SPIR-V, in various styles:
+** C, C++, C++11, JSON, Lua, Python
+**
+** - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL
+** - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL
+** - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL
+** - Lua will use tables, e.g.: spv.SourceLanguage.GLSL
+** - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL']
+**
+** Some tokens act like mask values, which can be OR'd together,
+** while others are mutually exclusive. The mask-like ones have
+** "Mask" in their name, and a parallel enum that has the shift
+** amount (1 << x) for each corresponding enumerant.
+*/
+
+#ifndef spirv_H
+#define spirv_H
+
+typedef unsigned int SpvId;
+
+#define SPV_VERSION 0x10000
+#define SPV_REVISION 4
+
+static const unsigned int SpvMagicNumber = 0x07230203;
+static const unsigned int SpvVersion = 0x00010000;
+static const unsigned int SpvRevision = 4;
+static const unsigned int SpvOpCodeMask = 0xffff;
+static const unsigned int SpvWordCountShift = 16;
+
+typedef enum SpvSourceLanguage_ {
+ SpvSourceLanguageUnknown = 0,
+ SpvSourceLanguageESSL = 1,
+ SpvSourceLanguageGLSL = 2,
+ SpvSourceLanguageOpenCL_C = 3,
+ SpvSourceLanguageOpenCL_CPP = 4,
+} SpvSourceLanguage;
+
+typedef enum SpvExecutionModel_ {
+ SpvExecutionModelVertex = 0,
+ SpvExecutionModelTessellationControl = 1,
+ SpvExecutionModelTessellationEvaluation = 2,
+ SpvExecutionModelGeometry = 3,
+ SpvExecutionModelFragment = 4,
+ SpvExecutionModelGLCompute = 5,
+ SpvExecutionModelKernel = 6,
+} SpvExecutionModel;
+
+typedef enum SpvAddressingModel_ {
+ SpvAddressingModelLogical = 0,
+ SpvAddressingModelPhysical32 = 1,
+ SpvAddressingModelPhysical64 = 2,
+} SpvAddressingModel;
+
+typedef enum SpvMemoryModel_ {
+ SpvMemoryModelSimple = 0,
+ SpvMemoryModelGLSL450 = 1,
+ SpvMemoryModelOpenCL = 2,
+} SpvMemoryModel;
+
+typedef enum SpvExecutionMode_ {
+ SpvExecutionModeInvocations = 0,
+ SpvExecutionModeSpacingEqual = 1,
+ SpvExecutionModeSpacingFractionalEven = 2,
+ SpvExecutionModeSpacingFractionalOdd = 3,
+ SpvExecutionModeVertexOrderCw = 4,
+ SpvExecutionModeVertexOrderCcw = 5,
+ SpvExecutionModePixelCenterInteger = 6,
+ SpvExecutionModeOriginUpperLeft = 7,
+ SpvExecutionModeOriginLowerLeft = 8,
+ SpvExecutionModeEarlyFragmentTests = 9,
+ SpvExecutionModePointMode = 10,
+ SpvExecutionModeXfb = 11,
+ SpvExecutionModeDepthReplacing = 12,
+ SpvExecutionModeDepthGreater = 14,
+ SpvExecutionModeDepthLess = 15,
+ SpvExecutionModeDepthUnchanged = 16,
+ SpvExecutionModeLocalSize = 17,
+ SpvExecutionModeLocalSizeHint = 18,
+ SpvExecutionModeInputPoints = 19,
+ SpvExecutionModeInputLines = 20,
+ SpvExecutionModeInputLinesAdjacency = 21,
+ SpvExecutionModeTriangles = 22,
+ SpvExecutionModeInputTrianglesAdjacency = 23,
+ SpvExecutionModeQuads = 24,
+ SpvExecutionModeIsolines = 25,
+ SpvExecutionModeOutputVertices = 26,
+ SpvExecutionModeOutputPoints = 27,
+ SpvExecutionModeOutputLineStrip = 28,
+ SpvExecutionModeOutputTriangleStrip = 29,
+ SpvExecutionModeVecTypeHint = 30,
+ SpvExecutionModeContractionOff = 31,
+} SpvExecutionMode;
+
+typedef enum SpvStorageClass_ {
+ SpvStorageClassUniformConstant = 0,
+ SpvStorageClassInput = 1,
+ SpvStorageClassUniform = 2,
+ SpvStorageClassOutput = 3,
+ SpvStorageClassWorkgroup = 4,
+ SpvStorageClassCrossWorkgroup = 5,
+ SpvStorageClassPrivate = 6,
+ SpvStorageClassFunction = 7,
+ SpvStorageClassGeneric = 8,
+ SpvStorageClassPushConstant = 9,
+ SpvStorageClassAtomicCounter = 10,
+ SpvStorageClassImage = 11,
+} SpvStorageClass;
+
+typedef enum SpvDim_ {
+ SpvDim1D = 0,
+ SpvDim2D = 1,
+ SpvDim3D = 2,
+ SpvDimCube = 3,
+ SpvDimRect = 4,
+ SpvDimBuffer = 5,
+ SpvDimSubpassData = 6,
+} SpvDim;
+
+typedef enum SpvSamplerAddressingMode_ {
+ SpvSamplerAddressingModeNone = 0,
+ SpvSamplerAddressingModeClampToEdge = 1,
+ SpvSamplerAddressingModeClamp = 2,
+ SpvSamplerAddressingModeRepeat = 3,
+ SpvSamplerAddressingModeRepeatMirrored = 4,
+} SpvSamplerAddressingMode;
+
+typedef enum SpvSamplerFilterMode_ {
+ SpvSamplerFilterModeNearest = 0,
+ SpvSamplerFilterModeLinear = 1,
+} SpvSamplerFilterMode;
+
+typedef enum SpvImageFormat_ {
+ SpvImageFormatUnknown = 0,
+ SpvImageFormatRgba32f = 1,
+ SpvImageFormatRgba16f = 2,
+ SpvImageFormatR32f = 3,
+ SpvImageFormatRgba8 = 4,
+ SpvImageFormatRgba8Snorm = 5,
+ SpvImageFormatRg32f = 6,
+ SpvImageFormatRg16f = 7,
+ SpvImageFormatR11fG11fB10f = 8,
+ SpvImageFormatR16f = 9,
+ SpvImageFormatRgba16 = 10,
+ SpvImageFormatRgb10A2 = 11,
+ SpvImageFormatRg16 = 12,
+ SpvImageFormatRg8 = 13,
+ SpvImageFormatR16 = 14,
+ SpvImageFormatR8 = 15,
+ SpvImageFormatRgba16Snorm = 16,
+ SpvImageFormatRg16Snorm = 17,
+ SpvImageFormatRg8Snorm = 18,
+ SpvImageFormatR16Snorm = 19,
+ SpvImageFormatR8Snorm = 20,
+ SpvImageFormatRgba32i = 21,
+ SpvImageFormatRgba16i = 22,
+ SpvImageFormatRgba8i = 23,
+ SpvImageFormatR32i = 24,
+ SpvImageFormatRg32i = 25,
+ SpvImageFormatRg16i = 26,
+ SpvImageFormatRg8i = 27,
+ SpvImageFormatR16i = 28,
+ SpvImageFormatR8i = 29,
+ SpvImageFormatRgba32ui = 30,
+ SpvImageFormatRgba16ui = 31,
+ SpvImageFormatRgba8ui = 32,
+ SpvImageFormatR32ui = 33,
+ SpvImageFormatRgb10a2ui = 34,
+ SpvImageFormatRg32ui = 35,
+ SpvImageFormatRg16ui = 36,
+ SpvImageFormatRg8ui = 37,
+ SpvImageFormatR16ui = 38,
+ SpvImageFormatR8ui = 39,
+} SpvImageFormat;
+
+typedef enum SpvImageChannelOrder_ {
+ SpvImageChannelOrderR = 0,
+ SpvImageChannelOrderA = 1,
+ SpvImageChannelOrderRG = 2,
+ SpvImageChannelOrderRA = 3,
+ SpvImageChannelOrderRGB = 4,
+ SpvImageChannelOrderRGBA = 5,
+ SpvImageChannelOrderBGRA = 6,
+ SpvImageChannelOrderARGB = 7,
+ SpvImageChannelOrderIntensity = 8,
+ SpvImageChannelOrderLuminance = 9,
+ SpvImageChannelOrderRx = 10,
+ SpvImageChannelOrderRGx = 11,
+ SpvImageChannelOrderRGBx = 12,
+ SpvImageChannelOrderDepth = 13,
+ SpvImageChannelOrderDepthStencil = 14,
+ SpvImageChannelOrdersRGB = 15,
+ SpvImageChannelOrdersRGBx = 16,
+ SpvImageChannelOrdersRGBA = 17,
+ SpvImageChannelOrdersBGRA = 18,
+} SpvImageChannelOrder;
+
+typedef enum SpvImageChannelDataType_ {
+ SpvImageChannelDataTypeSnormInt8 = 0,
+ SpvImageChannelDataTypeSnormInt16 = 1,
+ SpvImageChannelDataTypeUnormInt8 = 2,
+ SpvImageChannelDataTypeUnormInt16 = 3,
+ SpvImageChannelDataTypeUnormShort565 = 4,
+ SpvImageChannelDataTypeUnormShort555 = 5,
+ SpvImageChannelDataTypeUnormInt101010 = 6,
+ SpvImageChannelDataTypeSignedInt8 = 7,
+ SpvImageChannelDataTypeSignedInt16 = 8,
+ SpvImageChannelDataTypeSignedInt32 = 9,
+ SpvImageChannelDataTypeUnsignedInt8 = 10,
+ SpvImageChannelDataTypeUnsignedInt16 = 11,
+ SpvImageChannelDataTypeUnsignedInt32 = 12,
+ SpvImageChannelDataTypeHalfFloat = 13,
+ SpvImageChannelDataTypeFloat = 14,
+ SpvImageChannelDataTypeUnormInt24 = 15,
+ SpvImageChannelDataTypeUnormInt101010_2 = 16,
+} SpvImageChannelDataType;
+
+typedef enum SpvImageOperandsShift_ {
+ SpvImageOperandsBiasShift = 0,
+ SpvImageOperandsLodShift = 1,
+ SpvImageOperandsGradShift = 2,
+ SpvImageOperandsConstOffsetShift = 3,
+ SpvImageOperandsOffsetShift = 4,
+ SpvImageOperandsConstOffsetsShift = 5,
+ SpvImageOperandsSampleShift = 6,
+ SpvImageOperandsMinLodShift = 7,
+} SpvImageOperandsShift;
+
+typedef enum SpvImageOperandsMask_ {
+ SpvImageOperandsMaskNone = 0,
+ SpvImageOperandsBiasMask = 0x00000001,
+ SpvImageOperandsLodMask = 0x00000002,
+ SpvImageOperandsGradMask = 0x00000004,
+ SpvImageOperandsConstOffsetMask = 0x00000008,
+ SpvImageOperandsOffsetMask = 0x00000010,
+ SpvImageOperandsConstOffsetsMask = 0x00000020,
+ SpvImageOperandsSampleMask = 0x00000040,
+ SpvImageOperandsMinLodMask = 0x00000080,
+} SpvImageOperandsMask;
+
+typedef enum SpvFPFastMathModeShift_ {
+ SpvFPFastMathModeNotNaNShift = 0,
+ SpvFPFastMathModeNotInfShift = 1,
+ SpvFPFastMathModeNSZShift = 2,
+ SpvFPFastMathModeAllowRecipShift = 3,
+ SpvFPFastMathModeFastShift = 4,
+} SpvFPFastMathModeShift;
+
+typedef enum SpvFPFastMathModeMask_ {
+ SpvFPFastMathModeMaskNone = 0,
+ SpvFPFastMathModeNotNaNMask = 0x00000001,
+ SpvFPFastMathModeNotInfMask = 0x00000002,
+ SpvFPFastMathModeNSZMask = 0x00000004,
+ SpvFPFastMathModeAllowRecipMask = 0x00000008,
+ SpvFPFastMathModeFastMask = 0x00000010,
+} SpvFPFastMathModeMask;
+
+typedef enum SpvFPRoundingMode_ {
+ SpvFPRoundingModeRTE = 0,
+ SpvFPRoundingModeRTZ = 1,
+ SpvFPRoundingModeRTP = 2,
+ SpvFPRoundingModeRTN = 3,
+} SpvFPRoundingMode;
+
+typedef enum SpvLinkageType_ {
+ SpvLinkageTypeExport = 0,
+ SpvLinkageTypeImport = 1,
+} SpvLinkageType;
+
+typedef enum SpvAccessQualifier_ {
+ SpvAccessQualifierReadOnly = 0,
+ SpvAccessQualifierWriteOnly = 1,
+ SpvAccessQualifierReadWrite = 2,
+} SpvAccessQualifier;
+
+typedef enum SpvFunctionParameterAttribute_ {
+ SpvFunctionParameterAttributeZext = 0,
+ SpvFunctionParameterAttributeSext = 1,
+ SpvFunctionParameterAttributeByVal = 2,
+ SpvFunctionParameterAttributeSret = 3,
+ SpvFunctionParameterAttributeNoAlias = 4,
+ SpvFunctionParameterAttributeNoCapture = 5,
+ SpvFunctionParameterAttributeNoWrite = 6,
+ SpvFunctionParameterAttributeNoReadWrite = 7,
+} SpvFunctionParameterAttribute;
+
+typedef enum SpvDecoration_ {
+ SpvDecorationRelaxedPrecision = 0,
+ SpvDecorationSpecId = 1,
+ SpvDecorationBlock = 2,
+ SpvDecorationBufferBlock = 3,
+ SpvDecorationRowMajor = 4,
+ SpvDecorationColMajor = 5,
+ SpvDecorationArrayStride = 6,
+ SpvDecorationMatrixStride = 7,
+ SpvDecorationGLSLShared = 8,
+ SpvDecorationGLSLPacked = 9,
+ SpvDecorationCPacked = 10,
+ SpvDecorationBuiltIn = 11,
+ SpvDecorationNoPerspective = 13,
+ SpvDecorationFlat = 14,
+ SpvDecorationPatch = 15,
+ SpvDecorationCentroid = 16,
+ SpvDecorationSample = 17,
+ SpvDecorationInvariant = 18,
+ SpvDecorationRestrict = 19,
+ SpvDecorationAliased = 20,
+ SpvDecorationVolatile = 21,
+ SpvDecorationConstant = 22,
+ SpvDecorationCoherent = 23,
+ SpvDecorationNonWritable = 24,
+ SpvDecorationNonReadable = 25,
+ SpvDecorationUniform = 26,
+ SpvDecorationSaturatedConversion = 28,
+ SpvDecorationStream = 29,
+ SpvDecorationLocation = 30,
+ SpvDecorationComponent = 31,
+ SpvDecorationIndex = 32,
+ SpvDecorationBinding = 33,
+ SpvDecorationDescriptorSet = 34,
+ SpvDecorationOffset = 35,
+ SpvDecorationXfbBuffer = 36,
+ SpvDecorationXfbStride = 37,
+ SpvDecorationFuncParamAttr = 38,
+ SpvDecorationFPRoundingMode = 39,
+ SpvDecorationFPFastMathMode = 40,
+ SpvDecorationLinkageAttributes = 41,
+ SpvDecorationNoContraction = 42,
+ SpvDecorationInputAttachmentIndex = 43,
+ SpvDecorationAlignment = 44,
+} SpvDecoration;
+
+typedef enum SpvBuiltIn_ {
+ SpvBuiltInPosition = 0,
+ SpvBuiltInPointSize = 1,
+ SpvBuiltInClipDistance = 3,
+ SpvBuiltInCullDistance = 4,
+ SpvBuiltInVertexId = 5,
+ SpvBuiltInInstanceId = 6,
+ SpvBuiltInPrimitiveId = 7,
+ SpvBuiltInInvocationId = 8,
+ SpvBuiltInLayer = 9,
+ SpvBuiltInViewportIndex = 10,
+ SpvBuiltInTessLevelOuter = 11,
+ SpvBuiltInTessLevelInner = 12,
+ SpvBuiltInTessCoord = 13,
+ SpvBuiltInPatchVertices = 14,
+ SpvBuiltInFragCoord = 15,
+ SpvBuiltInPointCoord = 16,
+ SpvBuiltInFrontFacing = 17,
+ SpvBuiltInSampleId = 18,
+ SpvBuiltInSamplePosition = 19,
+ SpvBuiltInSampleMask = 20,
+ SpvBuiltInFragDepth = 22,
+ SpvBuiltInHelperInvocation = 23,
+ SpvBuiltInNumWorkgroups = 24,
+ SpvBuiltInWorkgroupSize = 25,
+ SpvBuiltInWorkgroupId = 26,
+ SpvBuiltInLocalInvocationId = 27,
+ SpvBuiltInGlobalInvocationId = 28,
+ SpvBuiltInLocalInvocationIndex = 29,
+ SpvBuiltInWorkDim = 30,
+ SpvBuiltInGlobalSize = 31,
+ SpvBuiltInEnqueuedWorkgroupSize = 32,
+ SpvBuiltInGlobalOffset = 33,
+ SpvBuiltInGlobalLinearId = 34,
+ SpvBuiltInSubgroupSize = 36,
+ SpvBuiltInSubgroupMaxSize = 37,
+ SpvBuiltInNumSubgroups = 38,
+ SpvBuiltInNumEnqueuedSubgroups = 39,
+ SpvBuiltInSubgroupId = 40,
+ SpvBuiltInSubgroupLocalInvocationId = 41,
+ SpvBuiltInVertexIndex = 42,
+ SpvBuiltInInstanceIndex = 43,
+} SpvBuiltIn;
+
+typedef enum SpvSelectionControlShift_ {
+ SpvSelectionControlFlattenShift = 0,
+ SpvSelectionControlDontFlattenShift = 1,
+} SpvSelectionControlShift;
+
+typedef enum SpvSelectionControlMask_ {
+ SpvSelectionControlMaskNone = 0,
+ SpvSelectionControlFlattenMask = 0x00000001,
+ SpvSelectionControlDontFlattenMask = 0x00000002,
+} SpvSelectionControlMask;
+
+typedef enum SpvLoopControlShift_ {
+ SpvLoopControlUnrollShift = 0,
+ SpvLoopControlDontUnrollShift = 1,
+} SpvLoopControlShift;
+
+typedef enum SpvLoopControlMask_ {
+ SpvLoopControlMaskNone = 0,
+ SpvLoopControlUnrollMask = 0x00000001,
+ SpvLoopControlDontUnrollMask = 0x00000002,
+} SpvLoopControlMask;
+
+typedef enum SpvFunctionControlShift_ {
+ SpvFunctionControlInlineShift = 0,
+ SpvFunctionControlDontInlineShift = 1,
+ SpvFunctionControlPureShift = 2,
+ SpvFunctionControlConstShift = 3,
+} SpvFunctionControlShift;
+
+typedef enum SpvFunctionControlMask_ {
+ SpvFunctionControlMaskNone = 0,
+ SpvFunctionControlInlineMask = 0x00000001,
+ SpvFunctionControlDontInlineMask = 0x00000002,
+ SpvFunctionControlPureMask = 0x00000004,
+ SpvFunctionControlConstMask = 0x00000008,
+} SpvFunctionControlMask;
+
+typedef enum SpvMemorySemanticsShift_ {
+ SpvMemorySemanticsAcquireShift = 1,
+ SpvMemorySemanticsReleaseShift = 2,
+ SpvMemorySemanticsAcquireReleaseShift = 3,
+ SpvMemorySemanticsSequentiallyConsistentShift = 4,
+ SpvMemorySemanticsUniformMemoryShift = 6,
+ SpvMemorySemanticsSubgroupMemoryShift = 7,
+ SpvMemorySemanticsWorkgroupMemoryShift = 8,
+ SpvMemorySemanticsCrossWorkgroupMemoryShift = 9,
+ SpvMemorySemanticsAtomicCounterMemoryShift = 10,
+ SpvMemorySemanticsImageMemoryShift = 11,
+} SpvMemorySemanticsShift;
+
+typedef enum SpvMemorySemanticsMask_ {
+ SpvMemorySemanticsMaskNone = 0,
+ SpvMemorySemanticsAcquireMask = 0x00000002,
+ SpvMemorySemanticsReleaseMask = 0x00000004,
+ SpvMemorySemanticsAcquireReleaseMask = 0x00000008,
+ SpvMemorySemanticsSequentiallyConsistentMask = 0x00000010,
+ SpvMemorySemanticsUniformMemoryMask = 0x00000040,
+ SpvMemorySemanticsSubgroupMemoryMask = 0x00000080,
+ SpvMemorySemanticsWorkgroupMemoryMask = 0x00000100,
+ SpvMemorySemanticsCrossWorkgroupMemoryMask = 0x00000200,
+ SpvMemorySemanticsAtomicCounterMemoryMask = 0x00000400,
+ SpvMemorySemanticsImageMemoryMask = 0x00000800,
+} SpvMemorySemanticsMask;
+
+typedef enum SpvMemoryAccessShift_ {
+ SpvMemoryAccessVolatileShift = 0,
+ SpvMemoryAccessAlignedShift = 1,
+ SpvMemoryAccessNontemporalShift = 2,
+} SpvMemoryAccessShift;
+
+typedef enum SpvMemoryAccessMask_ {
+ SpvMemoryAccessMaskNone = 0,
+ SpvMemoryAccessVolatileMask = 0x00000001,
+ SpvMemoryAccessAlignedMask = 0x00000002,
+ SpvMemoryAccessNontemporalMask = 0x00000004,
+} SpvMemoryAccessMask;
+
+typedef enum SpvScope_ {
+ SpvScopeCrossDevice = 0,
+ SpvScopeDevice = 1,
+ SpvScopeWorkgroup = 2,
+ SpvScopeSubgroup = 3,
+ SpvScopeInvocation = 4,
+} SpvScope;
+
+typedef enum SpvGroupOperation_ {
+ SpvGroupOperationReduce = 0,
+ SpvGroupOperationInclusiveScan = 1,
+ SpvGroupOperationExclusiveScan = 2,
+} SpvGroupOperation;
+
+typedef enum SpvKernelEnqueueFlags_ {
+ SpvKernelEnqueueFlagsNoWait = 0,
+ SpvKernelEnqueueFlagsWaitKernel = 1,
+ SpvKernelEnqueueFlagsWaitWorkGroup = 2,
+} SpvKernelEnqueueFlags;
+
+typedef enum SpvKernelProfilingInfoShift_ {
+ SpvKernelProfilingInfoCmdExecTimeShift = 0,
+} SpvKernelProfilingInfoShift;
+
+typedef enum SpvKernelProfilingInfoMask_ {
+ SpvKernelProfilingInfoMaskNone = 0,
+ SpvKernelProfilingInfoCmdExecTimeMask = 0x00000001,
+} SpvKernelProfilingInfoMask;
+
+typedef enum SpvCapability_ {
+ SpvCapabilityMatrix = 0,
+ SpvCapabilityShader = 1,
+ SpvCapabilityGeometry = 2,
+ SpvCapabilityTessellation = 3,
+ SpvCapabilityAddresses = 4,
+ SpvCapabilityLinkage = 5,
+ SpvCapabilityKernel = 6,
+ SpvCapabilityVector16 = 7,
+ SpvCapabilityFloat16Buffer = 8,
+ SpvCapabilityFloat16 = 9,
+ SpvCapabilityFloat64 = 10,
+ SpvCapabilityInt64 = 11,
+ SpvCapabilityInt64Atomics = 12,
+ SpvCapabilityImageBasic = 13,
+ SpvCapabilityImageReadWrite = 14,
+ SpvCapabilityImageMipmap = 15,
+ SpvCapabilityPipes = 17,
+ SpvCapabilityGroups = 18,
+ SpvCapabilityDeviceEnqueue = 19,
+ SpvCapabilityLiteralSampler = 20,
+ SpvCapabilityAtomicStorage = 21,
+ SpvCapabilityInt16 = 22,
+ SpvCapabilityTessellationPointSize = 23,
+ SpvCapabilityGeometryPointSize = 24,
+ SpvCapabilityImageGatherExtended = 25,
+ SpvCapabilityStorageImageMultisample = 27,
+ SpvCapabilityUniformBufferArrayDynamicIndexing = 28,
+ SpvCapabilitySampledImageArrayDynamicIndexing = 29,
+ SpvCapabilityStorageBufferArrayDynamicIndexing = 30,
+ SpvCapabilityStorageImageArrayDynamicIndexing = 31,
+ SpvCapabilityClipDistance = 32,
+ SpvCapabilityCullDistance = 33,
+ SpvCapabilityImageCubeArray = 34,
+ SpvCapabilitySampleRateShading = 35,
+ SpvCapabilityImageRect = 36,
+ SpvCapabilitySampledRect = 37,
+ SpvCapabilityGenericPointer = 38,
+ SpvCapabilityInt8 = 39,
+ SpvCapabilityInputAttachment = 40,
+ SpvCapabilitySparseResidency = 41,
+ SpvCapabilityMinLod = 42,
+ SpvCapabilitySampled1D = 43,
+ SpvCapabilityImage1D = 44,
+ SpvCapabilitySampledCubeArray = 45,
+ SpvCapabilitySampledBuffer = 46,
+ SpvCapabilityImageBuffer = 47,
+ SpvCapabilityImageMSArray = 48,
+ SpvCapabilityStorageImageExtendedFormats = 49,
+ SpvCapabilityImageQuery = 50,
+ SpvCapabilityDerivativeControl = 51,
+ SpvCapabilityInterpolationFunction = 52,
+ SpvCapabilityTransformFeedback = 53,
+ SpvCapabilityGeometryStreams = 54,
+ SpvCapabilityStorageImageReadWithoutFormat = 55,
+ SpvCapabilityStorageImageWriteWithoutFormat = 56,
+ SpvCapabilityMultiViewport = 57,
+} SpvCapability;
+
+typedef enum SpvOp_ {
+ SpvOpNop = 0,
+ SpvOpUndef = 1,
+ SpvOpSourceContinued = 2,
+ SpvOpSource = 3,
+ SpvOpSourceExtension = 4,
+ SpvOpName = 5,
+ SpvOpMemberName = 6,
+ SpvOpString = 7,
+ SpvOpLine = 8,
+ SpvOpExtension = 10,
+ SpvOpExtInstImport = 11,
+ SpvOpExtInst = 12,
+ SpvOpMemoryModel = 14,
+ SpvOpEntryPoint = 15,
+ SpvOpExecutionMode = 16,
+ SpvOpCapability = 17,
+ SpvOpTypeVoid = 19,
+ SpvOpTypeBool = 20,
+ SpvOpTypeInt = 21,
+ SpvOpTypeFloat = 22,
+ SpvOpTypeVector = 23,
+ SpvOpTypeMatrix = 24,
+ SpvOpTypeImage = 25,
+ SpvOpTypeSampler = 26,
+ SpvOpTypeSampledImage = 27,
+ SpvOpTypeArray = 28,
+ SpvOpTypeRuntimeArray = 29,
+ SpvOpTypeStruct = 30,
+ SpvOpTypeOpaque = 31,
+ SpvOpTypePointer = 32,
+ SpvOpTypeFunction = 33,
+ SpvOpTypeEvent = 34,
+ SpvOpTypeDeviceEvent = 35,
+ SpvOpTypeReserveId = 36,
+ SpvOpTypeQueue = 37,
+ SpvOpTypePipe = 38,
+ SpvOpTypeForwardPointer = 39,
+ SpvOpConstantTrue = 41,
+ SpvOpConstantFalse = 42,
+ SpvOpConstant = 43,
+ SpvOpConstantComposite = 44,
+ SpvOpConstantSampler = 45,
+ SpvOpConstantNull = 46,
+ SpvOpSpecConstantTrue = 48,
+ SpvOpSpecConstantFalse = 49,
+ SpvOpSpecConstant = 50,
+ SpvOpSpecConstantComposite = 51,
+ SpvOpSpecConstantOp = 52,
+ SpvOpFunction = 54,
+ SpvOpFunctionParameter = 55,
+ SpvOpFunctionEnd = 56,
+ SpvOpFunctionCall = 57,
+ SpvOpVariable = 59,
+ SpvOpImageTexelPointer = 60,
+ SpvOpLoad = 61,
+ SpvOpStore = 62,
+ SpvOpCopyMemory = 63,
+ SpvOpCopyMemorySized = 64,
+ SpvOpAccessChain = 65,
+ SpvOpInBoundsAccessChain = 66,
+ SpvOpPtrAccessChain = 67,
+ SpvOpArrayLength = 68,
+ SpvOpGenericPtrMemSemantics = 69,
+ SpvOpInBoundsPtrAccessChain = 70,
+ SpvOpDecorate = 71,
+ SpvOpMemberDecorate = 72,
+ SpvOpDecorationGroup = 73,
+ SpvOpGroupDecorate = 74,
+ SpvOpGroupMemberDecorate = 75,
+ SpvOpVectorExtractDynamic = 77,
+ SpvOpVectorInsertDynamic = 78,
+ SpvOpVectorShuffle = 79,
+ SpvOpCompositeConstruct = 80,
+ SpvOpCompositeExtract = 81,
+ SpvOpCompositeInsert = 82,
+ SpvOpCopyObject = 83,
+ SpvOpTranspose = 84,
+ SpvOpSampledImage = 86,
+ SpvOpImageSampleImplicitLod = 87,
+ SpvOpImageSampleExplicitLod = 88,
+ SpvOpImageSampleDrefImplicitLod = 89,
+ SpvOpImageSampleDrefExplicitLod = 90,
+ SpvOpImageSampleProjImplicitLod = 91,
+ SpvOpImageSampleProjExplicitLod = 92,
+ SpvOpImageSampleProjDrefImplicitLod = 93,
+ SpvOpImageSampleProjDrefExplicitLod = 94,
+ SpvOpImageFetch = 95,
+ SpvOpImageGather = 96,
+ SpvOpImageDrefGather = 97,
+ SpvOpImageRead = 98,
+ SpvOpImageWrite = 99,
+ SpvOpImage = 100,
+ SpvOpImageQueryFormat = 101,
+ SpvOpImageQueryOrder = 102,
+ SpvOpImageQuerySizeLod = 103,
+ SpvOpImageQuerySize = 104,
+ SpvOpImageQueryLod = 105,
+ SpvOpImageQueryLevels = 106,
+ SpvOpImageQuerySamples = 107,
+ SpvOpConvertFToU = 109,
+ SpvOpConvertFToS = 110,
+ SpvOpConvertSToF = 111,
+ SpvOpConvertUToF = 112,
+ SpvOpUConvert = 113,
+ SpvOpSConvert = 114,
+ SpvOpFConvert = 115,
+ SpvOpQuantizeToF16 = 116,
+ SpvOpConvertPtrToU = 117,
+ SpvOpSatConvertSToU = 118,
+ SpvOpSatConvertUToS = 119,
+ SpvOpConvertUToPtr = 120,
+ SpvOpPtrCastToGeneric = 121,
+ SpvOpGenericCastToPtr = 122,
+ SpvOpGenericCastToPtrExplicit = 123,
+ SpvOpBitcast = 124,
+ SpvOpSNegate = 126,
+ SpvOpFNegate = 127,
+ SpvOpIAdd = 128,
+ SpvOpFAdd = 129,
+ SpvOpISub = 130,
+ SpvOpFSub = 131,
+ SpvOpIMul = 132,
+ SpvOpFMul = 133,
+ SpvOpUDiv = 134,
+ SpvOpSDiv = 135,
+ SpvOpFDiv = 136,
+ SpvOpUMod = 137,
+ SpvOpSRem = 138,
+ SpvOpSMod = 139,
+ SpvOpFRem = 140,
+ SpvOpFMod = 141,
+ SpvOpVectorTimesScalar = 142,
+ SpvOpMatrixTimesScalar = 143,
+ SpvOpVectorTimesMatrix = 144,
+ SpvOpMatrixTimesVector = 145,
+ SpvOpMatrixTimesMatrix = 146,
+ SpvOpOuterProduct = 147,
+ SpvOpDot = 148,
+ SpvOpIAddCarry = 149,
+ SpvOpISubBorrow = 150,
+ SpvOpUMulExtended = 151,
+ SpvOpSMulExtended = 152,
+ SpvOpAny = 154,
+ SpvOpAll = 155,
+ SpvOpIsNan = 156,
+ SpvOpIsInf = 157,
+ SpvOpIsFinite = 158,
+ SpvOpIsNormal = 159,
+ SpvOpSignBitSet = 160,
+ SpvOpLessOrGreater = 161,
+ SpvOpOrdered = 162,
+ SpvOpUnordered = 163,
+ SpvOpLogicalEqual = 164,
+ SpvOpLogicalNotEqual = 165,
+ SpvOpLogicalOr = 166,
+ SpvOpLogicalAnd = 167,
+ SpvOpLogicalNot = 168,
+ SpvOpSelect = 169,
+ SpvOpIEqual = 170,
+ SpvOpINotEqual = 171,
+ SpvOpUGreaterThan = 172,
+ SpvOpSGreaterThan = 173,
+ SpvOpUGreaterThanEqual = 174,
+ SpvOpSGreaterThanEqual = 175,
+ SpvOpULessThan = 176,
+ SpvOpSLessThan = 177,
+ SpvOpULessThanEqual = 178,
+ SpvOpSLessThanEqual = 179,
+ SpvOpFOrdEqual = 180,
+ SpvOpFUnordEqual = 181,
+ SpvOpFOrdNotEqual = 182,
+ SpvOpFUnordNotEqual = 183,
+ SpvOpFOrdLessThan = 184,
+ SpvOpFUnordLessThan = 185,
+ SpvOpFOrdGreaterThan = 186,
+ SpvOpFUnordGreaterThan = 187,
+ SpvOpFOrdLessThanEqual = 188,
+ SpvOpFUnordLessThanEqual = 189,
+ SpvOpFOrdGreaterThanEqual = 190,
+ SpvOpFUnordGreaterThanEqual = 191,
+ SpvOpShiftRightLogical = 194,
+ SpvOpShiftRightArithmetic = 195,
+ SpvOpShiftLeftLogical = 196,
+ SpvOpBitwiseOr = 197,
+ SpvOpBitwiseXor = 198,
+ SpvOpBitwiseAnd = 199,
+ SpvOpNot = 200,
+ SpvOpBitFieldInsert = 201,
+ SpvOpBitFieldSExtract = 202,
+ SpvOpBitFieldUExtract = 203,
+ SpvOpBitReverse = 204,
+ SpvOpBitCount = 205,
+ SpvOpDPdx = 207,
+ SpvOpDPdy = 208,
+ SpvOpFwidth = 209,
+ SpvOpDPdxFine = 210,
+ SpvOpDPdyFine = 211,
+ SpvOpFwidthFine = 212,
+ SpvOpDPdxCoarse = 213,
+ SpvOpDPdyCoarse = 214,
+ SpvOpFwidthCoarse = 215,
+ SpvOpEmitVertex = 218,
+ SpvOpEndPrimitive = 219,
+ SpvOpEmitStreamVertex = 220,
+ SpvOpEndStreamPrimitive = 221,
+ SpvOpControlBarrier = 224,
+ SpvOpMemoryBarrier = 225,
+ SpvOpAtomicLoad = 227,
+ SpvOpAtomicStore = 228,
+ SpvOpAtomicExchange = 229,
+ SpvOpAtomicCompareExchange = 230,
+ SpvOpAtomicCompareExchangeWeak = 231,
+ SpvOpAtomicIIncrement = 232,
+ SpvOpAtomicIDecrement = 233,
+ SpvOpAtomicIAdd = 234,
+ SpvOpAtomicISub = 235,
+ SpvOpAtomicSMin = 236,
+ SpvOpAtomicUMin = 237,
+ SpvOpAtomicSMax = 238,
+ SpvOpAtomicUMax = 239,
+ SpvOpAtomicAnd = 240,
+ SpvOpAtomicOr = 241,
+ SpvOpAtomicXor = 242,
+ SpvOpPhi = 245,
+ SpvOpLoopMerge = 246,
+ SpvOpSelectionMerge = 247,
+ SpvOpLabel = 248,
+ SpvOpBranch = 249,
+ SpvOpBranchConditional = 250,
+ SpvOpSwitch = 251,
+ SpvOpKill = 252,
+ SpvOpReturn = 253,
+ SpvOpReturnValue = 254,
+ SpvOpUnreachable = 255,
+ SpvOpLifetimeStart = 256,
+ SpvOpLifetimeStop = 257,
+ SpvOpGroupAsyncCopy = 259,
+ SpvOpGroupWaitEvents = 260,
+ SpvOpGroupAll = 261,
+ SpvOpGroupAny = 262,
+ SpvOpGroupBroadcast = 263,
+ SpvOpGroupIAdd = 264,
+ SpvOpGroupFAdd = 265,
+ SpvOpGroupFMin = 266,
+ SpvOpGroupUMin = 267,
+ SpvOpGroupSMin = 268,
+ SpvOpGroupFMax = 269,
+ SpvOpGroupUMax = 270,
+ SpvOpGroupSMax = 271,
+ SpvOpReadPipe = 274,
+ SpvOpWritePipe = 275,
+ SpvOpReservedReadPipe = 276,
+ SpvOpReservedWritePipe = 277,
+ SpvOpReserveReadPipePackets = 278,
+ SpvOpReserveWritePipePackets = 279,
+ SpvOpCommitReadPipe = 280,
+ SpvOpCommitWritePipe = 281,
+ SpvOpIsValidReserveId = 282,
+ SpvOpGetNumPipePackets = 283,
+ SpvOpGetMaxPipePackets = 284,
+ SpvOpGroupReserveReadPipePackets = 285,
+ SpvOpGroupReserveWritePipePackets = 286,
+ SpvOpGroupCommitReadPipe = 287,
+ SpvOpGroupCommitWritePipe = 288,
+ SpvOpEnqueueMarker = 291,
+ SpvOpEnqueueKernel = 292,
+ SpvOpGetKernelNDrangeSubGroupCount = 293,
+ SpvOpGetKernelNDrangeMaxSubGroupSize = 294,
+ SpvOpGetKernelWorkGroupSize = 295,
+ SpvOpGetKernelPreferredWorkGroupSizeMultiple = 296,
+ SpvOpRetainEvent = 297,
+ SpvOpReleaseEvent = 298,
+ SpvOpCreateUserEvent = 299,
+ SpvOpIsValidEvent = 300,
+ SpvOpSetUserEventStatus = 301,
+ SpvOpCaptureEventProfilingInfo = 302,
+ SpvOpGetDefaultQueue = 303,
+ SpvOpBuildNDRange = 304,
+ SpvOpImageSparseSampleImplicitLod = 305,
+ SpvOpImageSparseSampleExplicitLod = 306,
+ SpvOpImageSparseSampleDrefImplicitLod = 307,
+ SpvOpImageSparseSampleDrefExplicitLod = 308,
+ SpvOpImageSparseSampleProjImplicitLod = 309,
+ SpvOpImageSparseSampleProjExplicitLod = 310,
+ SpvOpImageSparseSampleProjDrefImplicitLod = 311,
+ SpvOpImageSparseSampleProjDrefExplicitLod = 312,
+ SpvOpImageSparseFetch = 313,
+ SpvOpImageSparseGather = 314,
+ SpvOpImageSparseDrefGather = 315,
+ SpvOpImageSparseTexelsResident = 316,
+ SpvOpNoLine = 317,
+ SpvOpAtomicFlagTestAndSet = 318,
+ SpvOpAtomicFlagClear = 319,
+ SpvOpImageSparseRead = 320,
+} SpvOp;
+
+#endif // #ifndef spirv_H
diff --git a/gfx/skia/skia/src/sksl/tracing/SkRPDebugTrace.cpp b/gfx/skia/skia/src/sksl/tracing/SkRPDebugTrace.cpp
new file mode 100644
index 0000000000..9e3e6254f7
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/tracing/SkRPDebugTrace.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/tracing/SkRPDebugTrace.h"
+
+#include <sstream>
+#include <utility>
+
+namespace SkSL {
+
+void SkRPDebugTrace::writeTrace(SkWStream* o) const {
+ // Not yet implemented.
+}
+
+void SkRPDebugTrace::dump(SkWStream* o) const {
+ // Not yet implemented.
+}
+
+void SkRPDebugTrace::setSource(std::string source) {
+ fSource.clear();
+ std::stringstream stream{std::move(source)};
+ while (stream.good()) {
+ fSource.push_back({});
+ std::getline(stream, fSource.back(), '\n');
+ }
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/tracing/SkRPDebugTrace.h b/gfx/skia/skia/src/sksl/tracing/SkRPDebugTrace.h
new file mode 100644
index 0000000000..1c3fa3bc54
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/tracing/SkRPDebugTrace.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKRPDEBUGTRACE
+#define SKRPDEBUGTRACE
+
+#include "include/sksl/SkSLDebugTrace.h"
+#include "src/sksl/tracing/SkSLDebugInfo.h"
+
+#include <string>
+#include <vector>
+
+class SkWStream;
+
+namespace SkSL {
+
+class SkRPDebugTrace : public DebugTrace {
+public:
+ /** Serializes a debug trace to JSON which can be parsed by our debugger. */
+ void writeTrace(SkWStream* o) const override;
+
+ /** Generates a human-readable dump of the debug trace. */
+ void dump(SkWStream* o) const override;
+
+ /** Attaches the SkSL source to be debugged. */
+ void setSource(std::string source);
+
+ /** A 1:1 mapping of slot numbers to debug information. */
+ std::vector<SlotDebugInfo> fSlotInfo;
+ std::vector<FunctionDebugInfo> fFuncInfo;
+
+ /** The SkSL debug trace. */
+ std::vector<TraceInfo> fTraceInfo;
+
+ /** SkVM uniforms live in fSlotInfo; SkRP has dedicated a uniform slot map in fUniformInfo. */
+ std::vector<SlotDebugInfo> fUniformInfo;
+
+ /** The SkSL code, split line-by-line. */
+ std::vector<std::string> fSource;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/tracing/SkSLDebugInfo.h b/gfx/skia/skia/src/sksl/tracing/SkSLDebugInfo.h
new file mode 100644
index 0000000000..74b7a430d4
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/tracing/SkSLDebugInfo.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSLDEBUGINFO
+#define SKSLDEBUGINFO
+
+#include "src/sksl/ir/SkSLType.h"
+
+#include <cstdint>
+#include <string>
+
+namespace SkSL {
+
+struct TraceInfo {
+ enum class Op {
+ kLine, /** data: line number, (unused) */
+ kVar, /** data: slot, value */
+ kEnter, /** data: function index, (unused) */
+ kExit, /** data: function index, (unused) */
+ kScope, /** data: scope delta, (unused) */
+ };
+ Op op;
+ int32_t data[2];
+};
+
+struct SlotDebugInfo {
+ /** The full name of this variable (without component), e.g. `myArray[3].myStruct.myVector` */
+ std::string name;
+ /** The dimensions of this variable: 1x1 is a scalar, Nx1 is a vector, NxM is a matrix. */
+ uint8_t columns = 1, rows = 1;
+ /** Which component of the variable is this slot? (e.g. `vec4.z` is component 2) */
+ uint8_t componentIndex = 0;
+ /** Complex types (arrays/structs) can be tracked as a "group" of adjacent slots. */
+ int groupIndex = 0;
+ /** What kind of numbers belong in this slot? */
+ SkSL::Type::NumberKind numberKind = SkSL::Type::NumberKind::kNonnumeric;
+ /** Where is this variable located in the program? */
+ int line = 0;
+ Position pos = {};
+ /** If this slot holds a function's return value, contains 1; if not, -1. */
+ int fnReturnValue = -1;
+};
+
+struct FunctionDebugInfo {
+ /** Full function declaration: `float myFunction(half4 color)`) */
+ std::string name;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/tracing/SkSLTraceHook.cpp b/gfx/skia/skia/src/sksl/tracing/SkSLTraceHook.cpp
new file mode 100644
index 0000000000..c394f7e0a7
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/tracing/SkSLTraceHook.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/tracing/SkSLDebugInfo.h"
+#include "src/sksl/tracing/SkSLTraceHook.h"
+
+namespace SkSL {
+
+std::unique_ptr<Tracer> Tracer::Make(std::vector<TraceInfo>* traceInfo) {
+ auto hook = std::make_unique<Tracer>();
+ hook->fTraceInfo = traceInfo;
+ return hook;
+}
+
+void Tracer::line(int lineNum) {
+ fTraceInfo->push_back({TraceInfo::Op::kLine, /*data=*/{lineNum, 0}});
+}
+void Tracer::var(int slot, int32_t val) {
+ fTraceInfo->push_back({TraceInfo::Op::kVar, /*data=*/{slot, val}});
+}
+void Tracer::enter(int fnIdx) {
+ fTraceInfo->push_back({TraceInfo::Op::kEnter, /*data=*/{fnIdx, 0}});
+}
+void Tracer::exit(int fnIdx) {
+ fTraceInfo->push_back({TraceInfo::Op::kExit, /*data=*/{fnIdx, 0}});
+}
+void Tracer::scope(int delta) {
+ fTraceInfo->push_back({TraceInfo::Op::kScope, /*data=*/{delta, 0}});
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/tracing/SkSLTraceHook.h b/gfx/skia/skia/src/sksl/tracing/SkSLTraceHook.h
new file mode 100644
index 0000000000..404e1be229
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/tracing/SkSLTraceHook.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSLTRACEHOOK
+#define SKSLTRACEHOOK
+
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+namespace SkSL {
+
+struct TraceInfo;
+
+class TraceHook {
+public:
+ virtual ~TraceHook() = default;
+ virtual void line(int lineNum) = 0;
+ virtual void var(int slot, int32_t val) = 0;
+ virtual void enter(int fnIdx) = 0;
+ virtual void exit(int fnIdx) = 0;
+ virtual void scope(int delta) = 0;
+};
+
+class Tracer : public TraceHook {
+public:
+ static std::unique_ptr<Tracer> Make(std::vector<TraceInfo>* traceInfo);
+
+ void line(int lineNum) override;
+ void var(int slot, int32_t val) override;
+ void enter(int fnIdx) override;
+ void exit(int fnIdx) override;
+ void scope(int delta) override;
+
+private:
+ std::vector<TraceInfo>* fTraceInfo;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/tracing/SkVMDebugTrace.cpp b/gfx/skia/skia/src/sksl/tracing/SkVMDebugTrace.cpp
new file mode 100644
index 0000000000..091bd73720
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/tracing/SkVMDebugTrace.cpp
@@ -0,0 +1,417 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/tracing/SkVMDebugTrace.h"
+
+#ifdef SKSL_ENABLE_TRACING
+
+#include "include/core/SkData.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkTypes.h"
+#include "src/core/SkStreamPriv.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/utils/SkJSON.h"
+#include "src/utils/SkJSONWriter.h"
+
+#include <cstdio>
+#include <cstring>
+#include <sstream>
+#include <string>
+#include <string_view>
+#include <utility>
+
+static constexpr char kTraceVersion[] = "20220209";
+
+namespace SkSL {
+
+std::string SkVMDebugTrace::getSlotComponentSuffix(int slotIndex) const {
+ const SkSL::SlotDebugInfo& slot = fSlotInfo[slotIndex];
+
+ if (slot.rows > 1) {
+ return "[" + std::to_string(slot.componentIndex / slot.rows) +
+ "][" + std::to_string(slot.componentIndex % slot.rows) +
+ "]";
+ }
+ if (slot.columns > 1) {
+ switch (slot.componentIndex) {
+ case 0: return ".x";
+ case 1: return ".y";
+ case 2: return ".z";
+ case 3: return ".w";
+ default: return "[???]";
+ }
+ }
+ return {};
+}
+
+double SkVMDebugTrace::interpretValueBits(int slotIndex, int32_t valueBits) const {
+ SkASSERT(slotIndex >= 0);
+ SkASSERT((size_t)slotIndex < fSlotInfo.size());
+ switch (fSlotInfo[slotIndex].numberKind) {
+ case SkSL::Type::NumberKind::kUnsigned: {
+ uint32_t uintValue;
+ static_assert(sizeof(uintValue) == sizeof(valueBits));
+ memcpy(&uintValue, &valueBits, sizeof(uintValue));
+ return uintValue;
+ }
+ case SkSL::Type::NumberKind::kFloat: {
+ float floatValue;
+ static_assert(sizeof(floatValue) == sizeof(valueBits));
+ memcpy(&floatValue, &valueBits, sizeof(floatValue));
+ return floatValue;
+ }
+ default: {
+ return valueBits;
+ }
+ }
+}
+
+std::string SkVMDebugTrace::slotValueToString(int slotIndex, double value) const {
+ SkASSERT(slotIndex >= 0);
+ SkASSERT((size_t)slotIndex < fSlotInfo.size());
+ switch (fSlotInfo[slotIndex].numberKind) {
+ case SkSL::Type::NumberKind::kBoolean: {
+ return value ? "true" : "false";
+ }
+ default: {
+ char buffer[32];
+ snprintf(buffer, std::size(buffer), "%.8g", value);
+ return buffer;
+ }
+ }
+}
+
+std::string SkVMDebugTrace::getSlotValue(int slotIndex, int32_t valueBits) const {
+ return this->slotValueToString(slotIndex, this->interpretValueBits(slotIndex, valueBits));
+}
+
+void SkVMDebugTrace::setTraceCoord(const SkIPoint& coord) {
+ fTraceCoord = coord;
+}
+
+void SkVMDebugTrace::setSource(std::string source) {
+ fSource.clear();
+ std::stringstream stream{std::move(source)};
+ while (stream.good()) {
+ fSource.push_back({});
+ std::getline(stream, fSource.back(), '\n');
+ }
+}
+
+void SkVMDebugTrace::dump(SkWStream* o) const {
+ for (size_t index = 0; index < fSlotInfo.size(); ++index) {
+ const SlotDebugInfo& info = fSlotInfo[index];
+
+ o->writeText("$");
+ o->writeDecAsText(index);
+ o->writeText(" = ");
+ o->writeText(info.name.c_str());
+ o->writeText(" (");
+ switch (info.numberKind) {
+ case Type::NumberKind::kFloat: o->writeText("float"); break;
+ case Type::NumberKind::kSigned: o->writeText("int"); break;
+ case Type::NumberKind::kUnsigned: o->writeText("uint"); break;
+ case Type::NumberKind::kBoolean: o->writeText("bool"); break;
+ case Type::NumberKind::kNonnumeric: o->writeText("???"); break;
+ }
+ if (info.rows * info.columns > 1) {
+ o->writeDecAsText(info.columns);
+ if (info.rows != 1) {
+ o->writeText("x");
+ o->writeDecAsText(info.rows);
+ }
+ o->writeText(" : ");
+ o->writeText("slot ");
+ o->writeDecAsText(info.componentIndex + 1);
+ o->writeText("/");
+ o->writeDecAsText(info.rows * info.columns);
+ }
+ o->writeText(", L");
+ o->writeDecAsText(info.line);
+ o->writeText(")");
+ o->newline();
+ }
+
+ for (size_t index = 0; index < fFuncInfo.size(); ++index) {
+ const FunctionDebugInfo& info = fFuncInfo[index];
+
+ o->writeText("F");
+ o->writeDecAsText(index);
+ o->writeText(" = ");
+ o->writeText(info.name.c_str());
+ o->newline();
+ }
+
+ o->newline();
+
+ if (!fTraceInfo.empty()) {
+ std::string indent = "";
+ for (const SkSL::TraceInfo& traceInfo : fTraceInfo) {
+ int data0 = traceInfo.data[0];
+ int data1 = traceInfo.data[1];
+ switch (traceInfo.op) {
+ case SkSL::TraceInfo::Op::kLine:
+ o->writeText(indent.c_str());
+ o->writeText("line ");
+ o->writeDecAsText(data0);
+ break;
+
+ case SkSL::TraceInfo::Op::kVar: {
+ const SlotDebugInfo& slot = fSlotInfo[data0];
+ o->writeText(indent.c_str());
+ o->writeText(slot.name.c_str());
+ o->writeText(this->getSlotComponentSuffix(data0).c_str());
+ o->writeText(" = ");
+ o->writeText(this->getSlotValue(data0, data1).c_str());
+ break;
+ }
+ case SkSL::TraceInfo::Op::kEnter:
+ o->writeText(indent.c_str());
+ o->writeText("enter ");
+ o->writeText(fFuncInfo[data0].name.c_str());
+ indent += " ";
+ break;
+
+ case SkSL::TraceInfo::Op::kExit:
+ indent.resize(indent.size() - 2);
+ o->writeText(indent.c_str());
+ o->writeText("exit ");
+ o->writeText(fFuncInfo[data0].name.c_str());
+ break;
+
+ case SkSL::TraceInfo::Op::kScope:
+ for (int delta = data0; delta < 0; ++delta) {
+ indent.pop_back();
+ }
+ o->writeText(indent.c_str());
+ o->writeText("scope ");
+ o->writeText((data0 >= 0) ? "+" : "");
+ o->writeDecAsText(data0);
+ for (int delta = data0; delta > 0; --delta) {
+ indent.push_back(' ');
+ }
+ break;
+ }
+ o->newline();
+ }
+ }
+}
+
+void SkVMDebugTrace::writeTrace(SkWStream* w) const {
+ SkJSONWriter json(w);
+
+ json.beginObject(); // root
+ json.appendNString("version", kTraceVersion);
+ json.beginArray("source");
+
+ for (const std::string& line : fSource) {
+ json.appendString(line);
+ }
+
+ json.endArray(); // code
+ json.beginArray("slots");
+
+ for (size_t index = 0; index < fSlotInfo.size(); ++index) {
+ const SlotDebugInfo& info = fSlotInfo[index];
+
+ json.beginObject();
+ json.appendString("name", info.name.data(), info.name.size());
+ json.appendS32("columns", info.columns);
+ json.appendS32("rows", info.rows);
+ json.appendS32("index", info.componentIndex);
+ if (info.groupIndex != info.componentIndex) {
+ json.appendS32("groupIdx", info.groupIndex);
+ }
+ json.appendS32("kind", (int)info.numberKind);
+ json.appendS32("line", info.line);
+ if (info.fnReturnValue >= 0) {
+ json.appendS32("retval", info.fnReturnValue);
+ }
+ json.endObject();
+ }
+
+ json.endArray(); // slots
+ json.beginArray("functions");
+
+ for (size_t index = 0; index < fFuncInfo.size(); ++index) {
+ const FunctionDebugInfo& info = fFuncInfo[index];
+
+ json.beginObject();
+ json.appendString("name", info.name);
+ json.endObject();
+ }
+
+ json.endArray(); // functions
+ json.beginArray("trace");
+
+ for (size_t index = 0; index < fTraceInfo.size(); ++index) {
+ const TraceInfo& trace = fTraceInfo[index];
+ json.beginArray();
+ json.appendS32((int)trace.op);
+
+ // Skip trailing zeros in the data (since most ops only use one value).
+ int lastDataIdx = std::size(trace.data) - 1;
+ while (lastDataIdx >= 0 && !trace.data[lastDataIdx]) {
+ --lastDataIdx;
+ }
+ for (int dataIdx = 0; dataIdx <= lastDataIdx; ++dataIdx) {
+ json.appendS32(trace.data[dataIdx]);
+ }
+ json.endArray();
+ }
+
+ json.endArray(); // trace
+ json.endObject(); // root
+ json.flush();
+}
+
+bool SkVMDebugTrace::readTrace(SkStream* r) {
+ sk_sp<SkData> data = SkCopyStreamToData(r);
+ skjson::DOM json(reinterpret_cast<const char*>(data->bytes()), data->size());
+ const skjson::ObjectValue* root = json.root();
+ if (!root) {
+ return false;
+ }
+
+ const skjson::StringValue* version = (*root)["version"];
+ if (!version || version->str() != kTraceVersion) {
+ return false;
+ }
+
+ const skjson::ArrayValue* source = (*root)["source"];
+ if (!source) {
+ return false;
+ }
+
+ fSource.clear();
+ for (const skjson::StringValue* line : *source) {
+ if (!line) {
+ return false;
+ }
+ fSource.push_back(line->begin());
+ }
+
+ const skjson::ArrayValue* slots = (*root)["slots"];
+ if (!slots) {
+ return false;
+ }
+
+ fSlotInfo.clear();
+ for (const skjson::ObjectValue* element : *slots) {
+ if (!element) {
+ return false;
+ }
+
+ // Grow the slot array to hold this element.
+ fSlotInfo.push_back({});
+ SlotDebugInfo& info = fSlotInfo.back();
+
+ // Populate the SlotInfo with our JSON data.
+ const skjson::StringValue* name = (*element)["name"];
+ const skjson::NumberValue* columns = (*element)["columns"];
+ const skjson::NumberValue* rows = (*element)["rows"];
+ const skjson::NumberValue* index = (*element)["index"];
+ const skjson::NumberValue* groupIdx = (*element)["groupIdx"];
+ const skjson::NumberValue* kind = (*element)["kind"];
+ const skjson::NumberValue* line = (*element)["line"];
+ const skjson::NumberValue* retval = (*element)["retval"];
+ if (!name || !columns || !rows || !index || !kind || !line) {
+ return false;
+ }
+
+ info.name = name->begin();
+ info.columns = **columns;
+ info.rows = **rows;
+ info.componentIndex = **index;
+ info.groupIndex = groupIdx ? **groupIdx : info.componentIndex;
+ info.numberKind = (SkSL::Type::NumberKind)(int)**kind;
+ info.line = **line;
+ info.fnReturnValue = retval ? **retval : -1;
+ }
+
+ const skjson::ArrayValue* functions = (*root)["functions"];
+ if (!functions) {
+ return false;
+ }
+
+ fFuncInfo.clear();
+ for (const skjson::ObjectValue* element : *functions) {
+ if (!element) {
+ return false;
+ }
+
+ // Grow the function array to hold this element.
+ fFuncInfo.push_back({});
+ FunctionDebugInfo& info = fFuncInfo.back();
+
+ // Populate the FunctionInfo with our JSON data.
+ const skjson::StringValue* name = (*element)["name"];
+ if (!name) {
+ return false;
+ }
+
+ info.name = name->begin();
+ }
+
+ const skjson::ArrayValue* trace = (*root)["trace"];
+ if (!trace) {
+ return false;
+ }
+
+ fTraceInfo.clear();
+ fTraceInfo.reserve(trace->size());
+ for (const skjson::ArrayValue* element : *trace) {
+ fTraceInfo.push_back(TraceInfo{});
+ TraceInfo& info = fTraceInfo.back();
+
+ if (!element || element->size() < 1 || element->size() > (1 + std::size(info.data))) {
+ return false;
+ }
+ const skjson::NumberValue* opVal = (*element)[0];
+ if (!opVal) {
+ return false;
+ }
+ info.op = (TraceInfo::Op)(int)**opVal;
+ for (size_t elemIdx = 1; elemIdx < element->size(); ++elemIdx) {
+ const skjson::NumberValue* dataVal = (*element)[elemIdx];
+ if (!dataVal) {
+ return false;
+ }
+ info.data[elemIdx - 1] = **dataVal;
+ }
+ }
+
+ return true;
+}
+
+} // namespace SkSL
+
+#else // SKSL_ENABLE_TRACING
+
+#include <string>
+
+namespace SkSL {
+ void SkVMDebugTrace::setTraceCoord(const SkIPoint &coord) {}
+
+ void SkVMDebugTrace::setSource(std::string source) {}
+
+ bool SkVMDebugTrace::readTrace(SkStream *r) { return false; }
+
+ void SkVMDebugTrace::writeTrace(SkWStream *w) const {}
+
+ void SkVMDebugTrace::dump(SkWStream *o) const {}
+
+ std::string SkVMDebugTrace::getSlotComponentSuffix(int slotIndex) const { return ""; }
+
+ std::string SkVMDebugTrace::getSlotValue(int slotIndex, int32_t value) const { return ""; }
+
+ double SkVMDebugTrace::interpretValueBits(int slotIndex, int32_t valueBits) const { return 0; }
+
+ std::string SkVMDebugTrace::slotValueToString(int slotIndex, double value) const { return ""; }
+}
+#endif
diff --git a/gfx/skia/skia/src/sksl/tracing/SkVMDebugTrace.h b/gfx/skia/skia/src/sksl/tracing/SkVMDebugTrace.h
new file mode 100644
index 0000000000..c9b3525fe7
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/tracing/SkVMDebugTrace.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKVMDEBUGTRACE
+#define SKVMDEBUGTRACE
+
+#include "include/sksl/SkSLDebugTrace.h"
+
+#include "include/core/SkPoint.h"
+#include "src/sksl/tracing/SkSLDebugInfo.h"
+#include "src/sksl/tracing/SkSLTraceHook.h"
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <vector>
+
+class SkStream;
+class SkWStream;
+
+namespace SkSL {
+
+class SkVMDebugTrace : public DebugTrace {
+public:
+ /**
+ * Sets the device-coordinate pixel to trace. If it's not set, the point at (0, 0) will be used.
+ */
+ void setTraceCoord(const SkIPoint& coord);
+
+ /** Attaches the SkSL source to be debugged. */
+ void setSource(std::string source);
+
+ /** Serializes a debug trace to JSON which can be parsed by our debugger. */
+ bool readTrace(SkStream* r);
+ void writeTrace(SkWStream* w) const override;
+
+ /** Generates a human-readable dump of the debug trace. */
+ void dump(SkWStream* o) const override;
+
+ /** Returns a slot's component as a variable-name suffix, e.g. ".x" or "[2][2]". */
+ std::string getSlotComponentSuffix(int slotIndex) const;
+
+ /** Bit-casts a slot's value, then converts to text, e.g. "3.14" or "true" or "12345". */
+ std::string getSlotValue(int slotIndex, int32_t value) const;
+
+ /** Bit-casts a value for a given slot into a double, honoring the slot's NumberKind. */
+ double interpretValueBits(int slotIndex, int32_t valueBits) const;
+
+ /** Converts a numeric value into text, based on the slot's NumberKind. */
+ std::string slotValueToString(int slotIndex, double value) const;
+
+ /** The device-coordinate pixel to trace (controlled by setTraceCoord) */
+ SkIPoint fTraceCoord = {};
+
+ /** A 1:1 mapping of slot numbers to debug information. */
+ std::vector<SlotDebugInfo> fSlotInfo;
+ std::vector<FunctionDebugInfo> fFuncInfo;
+
+ /** The SkSL debug trace. */
+ std::vector<TraceInfo> fTraceInfo;
+
+ /** The SkSL code, split line-by-line. */
+ std::vector<std::string> fSource;
+
+ /**
+ * A trace hook which populates fTraceInfo during SkVM program evaluation. This will be created
+ * automatically by the SkSLVMCodeGenerator.
+ */
+ std::unique_ptr<SkSL::TraceHook> fTraceHook;
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/tracing/SkVMDebugTracePlayer.cpp b/gfx/skia/skia/src/sksl/tracing/SkVMDebugTracePlayer.cpp
new file mode 100644
index 0000000000..7ae9e4638b
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/tracing/SkVMDebugTracePlayer.cpp
@@ -0,0 +1,284 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/tracing/SkSLDebugInfo.h"
+#include "src/sksl/tracing/SkVMDebugTracePlayer.h"
+
+#include <limits.h>
+#include <algorithm>
+#include <utility>
+
+namespace SkSL {
+
+void SkVMDebugTracePlayer::reset(sk_sp<SkVMDebugTrace> debugTrace) {
+ size_t nslots = debugTrace ? debugTrace->fSlotInfo.size() : 0;
+ fDebugTrace = debugTrace;
+ fCursor = 0;
+ fScope = 0;
+ fSlots.clear();
+ fSlots.resize(nslots, {/*fValue=*/0,
+ /*fScope=*/INT_MAX,
+ /*fWriteTime=*/0});
+ fStack.clear();
+ fStack.push_back({/*fFunction=*/-1,
+ /*fLine=*/-1,
+ /*fDisplayMask=*/SkBitSet(nslots)});
+ fDirtyMask.emplace(nslots);
+ fReturnValues.emplace(nslots);
+
+ if (fDebugTrace) {
+ for (size_t slotIdx = 0; slotIdx < nslots; ++slotIdx) {
+ if (fDebugTrace->fSlotInfo[slotIdx].fnReturnValue >= 0) {
+ fReturnValues->set(slotIdx);
+ }
+ }
+
+ for (const TraceInfo& trace : fDebugTrace->fTraceInfo) {
+ if (trace.op == TraceInfo::Op::kLine) {
+ fLineNumbers[trace.data[0]] += 1;
+ }
+ }
+ }
+}
+
+void SkVMDebugTracePlayer::step() {
+ this->tidyState();
+ while (!this->traceHasCompleted()) {
+ if (this->execute(fCursor++)) {
+ break;
+ }
+ }
+}
+
+void SkVMDebugTracePlayer::stepOver() {
+ this->tidyState();
+ size_t initialStackDepth = fStack.size();
+ while (!this->traceHasCompleted()) {
+ bool canEscapeFromThisStackDepth = (fStack.size() <= initialStackDepth);
+ if (this->execute(fCursor++)) {
+ if (canEscapeFromThisStackDepth || this->atBreakpoint()) {
+ break;
+ }
+ }
+ }
+}
+
+void SkVMDebugTracePlayer::stepOut() {
+ this->tidyState();
+ size_t initialStackDepth = fStack.size();
+ while (!this->traceHasCompleted()) {
+ if (this->execute(fCursor++)) {
+ bool hasEscapedFromInitialStackDepth = (fStack.size() < initialStackDepth);
+ if (hasEscapedFromInitialStackDepth || this->atBreakpoint()) {
+ break;
+ }
+ }
+ }
+}
+
+void SkVMDebugTracePlayer::run() {
+ this->tidyState();
+ while (!this->traceHasCompleted()) {
+ if (this->execute(fCursor++)) {
+ if (this->atBreakpoint()) {
+ break;
+ }
+ }
+ }
+}
+
+void SkVMDebugTracePlayer::tidyState() {
+ fDirtyMask->reset();
+
+ // Conceptually this is `fStack.back().fDisplayMask &= ~fReturnValues`, but SkBitSet doesn't
+ // support masking one set of bits against another.
+ fReturnValues->forEachSetIndex([&](int slot) {
+ fStack.back().fDisplayMask.reset(slot);
+ });
+}
+
+bool SkVMDebugTracePlayer::traceHasCompleted() const {
+ return !fDebugTrace || fCursor >= fDebugTrace->fTraceInfo.size();
+}
+
+int32_t SkVMDebugTracePlayer::getCurrentLine() const {
+ SkASSERT(!fStack.empty());
+ return fStack.back().fLine;
+}
+
+int32_t SkVMDebugTracePlayer::getCurrentLineInStackFrame(int stackFrameIndex) const {
+ // The first entry on the stack is the "global" frame before we enter main, so offset our index
+ // by one to account for it.
+ ++stackFrameIndex;
+ SkASSERT(stackFrameIndex > 0);
+ SkASSERT((size_t)stackFrameIndex < fStack.size());
+ return fStack[stackFrameIndex].fLine;
+}
+
+bool SkVMDebugTracePlayer::atBreakpoint() const {
+ return fBreakpointLines.count(this->getCurrentLine());
+}
+
+void SkVMDebugTracePlayer::setBreakpoints(std::unordered_set<int> breakpointLines) {
+ fBreakpointLines = std::move(breakpointLines);
+}
+
+void SkVMDebugTracePlayer::addBreakpoint(int line) {
+ fBreakpointLines.insert(line);
+}
+
+void SkVMDebugTracePlayer::removeBreakpoint(int line) {
+ fBreakpointLines.erase(line);
+}
+
+std::vector<int> SkVMDebugTracePlayer::getCallStack() const {
+ SkASSERT(!fStack.empty());
+ std::vector<int> funcs;
+ funcs.reserve(fStack.size() - 1);
+ for (size_t index = 1; index < fStack.size(); ++index) {
+ funcs.push_back(fStack[index].fFunction);
+ }
+ return funcs;
+}
+
+int SkVMDebugTracePlayer::getStackDepth() const {
+ SkASSERT(!fStack.empty());
+ return fStack.size() - 1;
+}
+
+std::vector<SkVMDebugTracePlayer::VariableData> SkVMDebugTracePlayer::getVariablesForDisplayMask(
+ const SkBitSet& displayMask) const {
+ SkASSERT(displayMask.size() == fSlots.size());
+
+ std::vector<VariableData> vars;
+ displayMask.forEachSetIndex([&](int slot) {
+ double typedValue = fDebugTrace->interpretValueBits(slot, fSlots[slot].fValue);
+ vars.push_back({slot, fDirtyMask->test(slot), typedValue});
+ });
+ // Order the variable list so that the most recently-written variables are shown at the top.
+ std::stable_sort(vars.begin(), vars.end(), [&](const VariableData& a, const VariableData& b) {
+ return fSlots[a.fSlotIndex].fWriteTime > fSlots[b.fSlotIndex].fWriteTime;
+ });
+ return vars;
+}
+
+std::vector<SkVMDebugTracePlayer::VariableData> SkVMDebugTracePlayer::getLocalVariables(
+ int stackFrameIndex) const {
+ // The first entry on the stack is the "global" frame before we enter main, so offset our index
+ // by one to account for it.
+ ++stackFrameIndex;
+ if (stackFrameIndex <= 0 || (size_t)stackFrameIndex >= fStack.size()) {
+ SkDEBUGFAILF("stack frame %d doesn't exist", stackFrameIndex - 1);
+ return {};
+ }
+ return this->getVariablesForDisplayMask(fStack[stackFrameIndex].fDisplayMask);
+}
+
+std::vector<SkVMDebugTracePlayer::VariableData> SkVMDebugTracePlayer::getGlobalVariables() const {
+ if (fStack.empty()) {
+ return {};
+ }
+ return this->getVariablesForDisplayMask(fStack.front().fDisplayMask);
+}
+
+void SkVMDebugTracePlayer::updateVariableWriteTime(int slotIdx, size_t cursor) {
+ // The slotIdx could point to any slot within a variable.
+ // We want to update the write time on EVERY slot associated with this variable.
+ // The SlotInfo's groupIndex gives us enough information to find the affected range.
+ const SkSL::SlotDebugInfo& changedSlot = fDebugTrace->fSlotInfo[slotIdx];
+ slotIdx -= changedSlot.groupIndex;
+ SkASSERT(slotIdx >= 0);
+ SkASSERT(slotIdx < (int)fDebugTrace->fSlotInfo.size());
+
+ for (;;) {
+ fSlots[slotIdx++].fWriteTime = cursor;
+
+ // Stop if we've reached the final slot.
+ if (slotIdx >= (int)fDebugTrace->fSlotInfo.size()) {
+ break;
+ }
+ // Each separate variable-group starts with a groupIndex of 0; stop when we detect this.
+ if (fDebugTrace->fSlotInfo[slotIdx].groupIndex == 0) {
+ break;
+ }
+ }
+}
+
+bool SkVMDebugTracePlayer::execute(size_t position) {
+ if (position >= fDebugTrace->fTraceInfo.size()) {
+ SkDEBUGFAILF("position %zu out of range", position);
+ return true;
+ }
+
+ const TraceInfo& trace = fDebugTrace->fTraceInfo[position];
+ switch (trace.op) {
+ case TraceInfo::Op::kLine: { // data: line number, (unused)
+ SkASSERT(!fStack.empty());
+ int lineNumber = trace.data[0];
+ SkASSERT(lineNumber >= 0);
+ SkASSERT((size_t)lineNumber < fDebugTrace->fSource.size());
+ SkASSERT(fLineNumbers[lineNumber] > 0);
+ fStack.back().fLine = lineNumber;
+ fLineNumbers[lineNumber] -= 1;
+ return true;
+ }
+ case TraceInfo::Op::kVar: { // data: slot, value
+ int slotIdx = trace.data[0];
+ int value = trace.data[1];
+ SkASSERT(slotIdx >= 0);
+ SkASSERT((size_t)slotIdx < fDebugTrace->fSlotInfo.size());
+ fSlots[slotIdx].fValue = value;
+ fSlots[slotIdx].fScope = std::min<>(fSlots[slotIdx].fScope, fScope);
+ this->updateVariableWriteTime(slotIdx, position);
+ if (fDebugTrace->fSlotInfo[slotIdx].fnReturnValue < 0) {
+ // Normal variables are associated with the current function.
+ SkASSERT(fStack.size() > 0);
+ fStack.rbegin()[0].fDisplayMask.set(slotIdx);
+ } else {
+ // Return values are associated with the parent function (since the current function
+ // is exiting and we won't see them there).
+ SkASSERT(fStack.size() > 1);
+ fStack.rbegin()[1].fDisplayMask.set(slotIdx);
+ }
+ fDirtyMask->set(slotIdx);
+ break;
+ }
+ case TraceInfo::Op::kEnter: { // data: function index, (unused)
+ int fnIdx = trace.data[0];
+ SkASSERT(fnIdx >= 0);
+ SkASSERT((size_t)fnIdx < fDebugTrace->fFuncInfo.size());
+ fStack.push_back({/*fFunction=*/fnIdx,
+ /*fLine=*/-1,
+ /*fDisplayMask=*/SkBitSet(fDebugTrace->fSlotInfo.size())});
+ break;
+ }
+ case TraceInfo::Op::kExit: { // data: function index, (unused)
+ SkASSERT(!fStack.empty());
+ SkASSERT(fStack.back().fFunction == trace.data[0]);
+ fStack.pop_back();
+ return true;
+ }
+ case TraceInfo::Op::kScope: { // data: scope delta, (unused)
+ SkASSERT(!fStack.empty());
+ fScope += trace.data[0];
+ if (trace.data[0] < 0) {
+ // If the scope is being reduced, discard variables that are now out of scope.
+ for (size_t slotIdx = 0; slotIdx < fSlots.size(); ++slotIdx) {
+ if (fScope < fSlots[slotIdx].fScope) {
+ fSlots[slotIdx].fScope = INT_MAX;
+ fStack.back().fDisplayMask.reset(slotIdx);
+ }
+ }
+ }
+ return false;
+ }
+ }
+
+ return false;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/tracing/SkVMDebugTracePlayer.h b/gfx/skia/skia/src/sksl/tracing/SkVMDebugTracePlayer.h
new file mode 100644
index 0000000000..13ee7b7bd8
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/tracing/SkVMDebugTracePlayer.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/sksl/tracing/SkVMDebugTrace.h"
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+#include "src/utils/SkBitSet.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <optional>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+namespace SkSL {
+
+/**
+ * Plays back a SkVM debug trace, allowing its contents to be viewed like a traditional debugger.
+ */
+class SkVMDebugTracePlayer {
+public:
+ /** Resets playback to the start of the trace. Breakpoints are not cleared. */
+ void reset(sk_sp<SkVMDebugTrace> trace);
+
+ /** Advances the simulation to the next Line op. */
+ void step();
+
+ /**
+ * Advances the simulation to the next Line op, skipping past matched Enter/Exit pairs.
+ * Breakpoints will also stop the simulation even if we haven't reached an Exit.
+ */
+ void stepOver();
+
+ /**
+ * Advances the simulation until we exit from the current stack frame.
+ * Breakpoints will also stop the simulation even if we haven't left the stack frame.
+ */
+ void stepOut();
+
+ /** Advances the simulation until we hit a breakpoint, or the trace completes. */
+ void run();
+
+ /** Breakpoints will force the simulation to stop whenever a desired line is reached. */
+ void setBreakpoints(std::unordered_set<int> breakpointLines);
+ void addBreakpoint(int line);
+ void removeBreakpoint(int line);
+ using BreakpointSet = std::unordered_set<int>;
+ const BreakpointSet& getBreakpoints() { return fBreakpointLines; }
+
+ /** Returns true if we have reached the end of the trace. */
+ bool traceHasCompleted() const;
+
+ /** Returns true if there is a breakpoint set at the current line. */
+ bool atBreakpoint() const;
+
+ /** Retrieves the cursor position. */
+ size_t cursor() { return fCursor; }
+
+ /** Retrieves the current line. */
+ int32_t getCurrentLine() const;
+
+ /** Retrieves the current line for a given stack frame. */
+ int32_t getCurrentLineInStackFrame(int stackFrameIndex) const;
+
+ /** Returns the call stack as an array of FunctionInfo indices. */
+ std::vector<int> getCallStack() const;
+
+ /** Returns the size of the call stack. */
+ int getStackDepth() const;
+
+ /**
+ * Returns every line number reached inside this debug trace, along with the remaining number of
+ * times that this trace will reach it. e.g. {100, 2} means line 100 will be reached twice.
+ */
+ using LineNumberMap = std::unordered_map<int, int>;
+ const LineNumberMap& getLineNumbersReached() const { return fLineNumbers; }
+
+ /** Returns variables from a stack frame, or from global scope. */
+ struct VariableData {
+ int fSlotIndex;
+ bool fDirty; // has this slot been written-to since the last step call?
+ double fValue; // value in slot (with type-conversion applied)
+ };
+ std::vector<VariableData> getLocalVariables(int stackFrameIndex) const;
+ std::vector<VariableData> getGlobalVariables() const;
+
+private:
+ /**
+ * Executes the trace op at the passed-in cursor position. Returns true if we've reached a line
+ * or exit trace op, which indicate a stopping point.
+ */
+ bool execute(size_t position);
+
+ /**
+ * Cleans up temporary state between steps, such as the dirty mask and function return values.
+ */
+ void tidyState();
+
+ /** Updates fWriteTime for the entire variable at a given slot. */
+ void updateVariableWriteTime(int slotIdx, size_t writeTime);
+
+ /** Returns a vector of the indices and values of each slot that is enabled in `bits`. */
+ std::vector<VariableData> getVariablesForDisplayMask(const SkBitSet& bits) const;
+
+ struct StackFrame {
+ int32_t fFunction; // from fFuncInfo
+ int32_t fLine; // our current line number within the function
+ SkBitSet fDisplayMask; // the variable slots which have been touched in this function
+ };
+ struct Slot {
+ int32_t fValue; // values in each slot
+ int fScope; // the scope value of each slot
+ size_t fWriteTime; // when was the variable in this slot most recently written?
+ // (by cursor position)
+ };
+ sk_sp<SkVMDebugTrace> fDebugTrace;
+ size_t fCursor = 0; // position of the read head
+ int fScope = 0; // the current scope depth (as tracked by
+ // trace_scope)
+ std::vector<Slot> fSlots; // the array of all slots
+ std::vector<StackFrame> fStack; // the execution stack
+ std::optional<SkBitSet> fDirtyMask; // variable slots touched during the most-recently
+ // executed step
+ std::optional<SkBitSet> fReturnValues; // variable slots containing return values
+ LineNumberMap fLineNumbers; // holds [line number, the remaining number of
+ // times to reach this line during the trace]
+ BreakpointSet fBreakpointLines; // all breakpoints set by setBreakpointLines
+};
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/transform/SkSLAddConstToVarModifiers.cpp b/gfx/skia/skia/src/sksl/transform/SkSLAddConstToVarModifiers.cpp
new file mode 100644
index 0000000000..f024d7d681
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/transform/SkSLAddConstToVarModifiers.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkSLModifiers.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLModifiersPool.h"
+#include "src/sksl/analysis/SkSLProgramUsage.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/transform/SkSLTransform.h"
+
+namespace SkSL {
+
+class Expression;
+
+const Modifiers* Transform::AddConstToVarModifiers(const Context& context,
+ const Variable& var,
+ const Expression* initialValue,
+ const ProgramUsage* usage) {
+ // If the variable is already marked as `const`, keep our existing modifiers.
+ const Modifiers* modifiers = &var.modifiers();
+ if (modifiers->fFlags & Modifiers::kConst_Flag) {
+ return modifiers;
+ }
+ // If the variable doesn't have a compile-time-constant initial value, we can't `const` it.
+ if (!initialValue || !Analysis::IsCompileTimeConstant(*initialValue)) {
+ return modifiers;
+ }
+ // This only works for variables that are written-to a single time.
+ ProgramUsage::VariableCounts counts = usage->get(var);
+ if (counts.fWrite != 1) {
+ return modifiers;
+ }
+ // Add `const` to our variable's modifiers, making it eligible for constant-folding.
+ Modifiers constModifiers = *modifiers;
+ constModifiers.fFlags |= Modifiers::kConst_Flag;
+ return context.fModifiersPool->add(constModifiers);
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/transform/SkSLEliminateDeadFunctions.cpp b/gfx/skia/skia/src/sksl/transform/SkSLEliminateDeadFunctions.cpp
new file mode 100644
index 0000000000..6332a9b716
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/transform/SkSLEliminateDeadFunctions.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkSLProgramElement.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/analysis/SkSLProgramUsage.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/transform/SkSLTransform.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <memory>
+#include <vector>
+
+namespace SkSL {
+
+static bool dead_function_predicate(const ProgramElement* element, ProgramUsage* usage) {
+ if (!element->is<FunctionDefinition>()) {
+ return false;
+ }
+ const FunctionDefinition& fn = element->as<FunctionDefinition>();
+ if (fn.declaration().isMain() || usage->get(fn.declaration()) > 0) {
+ return false;
+ }
+ // This function is about to be eliminated by remove_if; update ProgramUsage accordingly.
+ usage->remove(*element);
+ return true;
+}
+
+bool Transform::EliminateDeadFunctions(Program& program) {
+ ProgramUsage* usage = program.fUsage.get();
+
+ size_t numOwnedElements = program.fOwnedElements.size();
+ size_t numSharedElements = program.fSharedElements.size();
+
+ if (program.fConfig->fSettings.fRemoveDeadFunctions) {
+ program.fOwnedElements.erase(std::remove_if(program.fOwnedElements.begin(),
+ program.fOwnedElements.end(),
+ [&](const std::unique_ptr<ProgramElement>& pe) {
+ return dead_function_predicate(pe.get(),
+ usage);
+ }),
+ program.fOwnedElements.end());
+ program.fSharedElements.erase(std::remove_if(program.fSharedElements.begin(),
+ program.fSharedElements.end(),
+ [&](const ProgramElement* pe) {
+ return dead_function_predicate(pe, usage);
+ }),
+ program.fSharedElements.end());
+ }
+ return program.fOwnedElements.size() < numOwnedElements ||
+ program.fSharedElements.size() < numSharedElements;
+}
+
+bool Transform::EliminateDeadFunctions(const Context& context,
+ Module& module,
+ ProgramUsage* usage) {
+ size_t numElements = module.fElements.size();
+
+ if (context.fConfig->fSettings.fRemoveDeadFunctions) {
+ module.fElements.erase(std::remove_if(module.fElements.begin(),
+ module.fElements.end(),
+ [&](const std::unique_ptr<ProgramElement>& pe) {
+ return dead_function_predicate(pe.get(), usage);
+ }),
+ module.fElements.end());
+ }
+ return module.fElements.size() < numElements;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/transform/SkSLEliminateDeadGlobalVariables.cpp b/gfx/skia/skia/src/sksl/transform/SkSLEliminateDeadGlobalVariables.cpp
new file mode 100644
index 0000000000..700e176ca5
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/transform/SkSLEliminateDeadGlobalVariables.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkSLProgramElement.h"
+#include "src/base/SkStringView.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/analysis/SkSLProgramUsage.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/transform/SkSLTransform.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <memory>
+#include <vector>
+
+namespace SkSL {
+
+static bool is_dead_variable(const ProgramElement& element,
+ ProgramUsage* usage,
+ bool onlyPrivateGlobals) {
+ if (!element.is<GlobalVarDeclaration>()) {
+ return false;
+ }
+ const GlobalVarDeclaration& global = element.as<GlobalVarDeclaration>();
+ const VarDeclaration& varDecl = global.varDeclaration();
+ if (onlyPrivateGlobals && !skstd::starts_with(varDecl.var()->name(), '$')) {
+ return false;
+ }
+ if (!usage->isDead(*varDecl.var())) {
+ return false;
+ }
+ // This declaration is about to be eliminated by remove_if; update ProgramUsage accordingly.
+ usage->remove(&varDecl);
+ return true;
+}
+
+bool Transform::EliminateDeadGlobalVariables(const Context& context,
+ Module& module,
+ ProgramUsage* usage,
+ bool onlyPrivateGlobals) {
+ auto isDeadVariable = [&](const ProgramElement& element) {
+ return is_dead_variable(element, usage, onlyPrivateGlobals);
+ };
+
+ size_t numElements = module.fElements.size();
+ if (context.fConfig->fSettings.fRemoveDeadVariables) {
+ module.fElements.erase(std::remove_if(module.fElements.begin(),
+ module.fElements.end(),
+ [&](const std::unique_ptr<ProgramElement>& pe) {
+ return isDeadVariable(*pe);
+ }),
+ module.fElements.end());
+ }
+ return module.fElements.size() < numElements;
+}
+
+bool Transform::EliminateDeadGlobalVariables(Program& program) {
+ auto isDeadVariable = [&](const ProgramElement& element) {
+ return is_dead_variable(element, program.fUsage.get(), /*onlyPrivateGlobals=*/false);
+ };
+
+ size_t numOwnedElements = program.fOwnedElements.size();
+ size_t numSharedElements = program.fSharedElements.size();
+ if (program.fConfig->fSettings.fRemoveDeadVariables) {
+ program.fOwnedElements.erase(std::remove_if(program.fOwnedElements.begin(),
+ program.fOwnedElements.end(),
+ [&](const std::unique_ptr<ProgramElement>& pe) {
+ return isDeadVariable(*pe);
+ }),
+ program.fOwnedElements.end());
+ program.fSharedElements.erase(std::remove_if(program.fSharedElements.begin(),
+ program.fSharedElements.end(),
+ [&](const ProgramElement* pe) {
+ return isDeadVariable(*pe);
+ }),
+ program.fSharedElements.end());
+ }
+ return program.fOwnedElements.size() < numOwnedElements ||
+ program.fSharedElements.size() < numSharedElements;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/transform/SkSLEliminateDeadLocalVariables.cpp b/gfx/skia/skia/src/sksl/transform/SkSLEliminateDeadLocalVariables.cpp
new file mode 100644
index 0000000000..8329cc90c0
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/transform/SkSLEliminateDeadLocalVariables.cpp
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLStatement.h"
+#include "src/core/SkTHash.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/analysis/SkSLProgramUsage.h"
+#include "src/sksl/ir/SkSLBinaryExpression.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLExpressionStatement.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLNop.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+#include "src/sksl/transform/SkSLProgramWriter.h"
+#include "src/sksl/transform/SkSLTransform.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+namespace SkSL {
+
+class Context;
+
+static bool eliminate_dead_local_variables(const Context& context,
+ SkSpan<std::unique_ptr<ProgramElement>> elements,
+ ProgramUsage* usage) {
+ class DeadLocalVariableEliminator : public ProgramWriter {
+ public:
+ DeadLocalVariableEliminator(const Context& context, ProgramUsage* usage)
+ : fContext(context)
+ , fUsage(usage) {}
+
+ using ProgramWriter::visitProgramElement;
+
+ bool visitExpressionPtr(std::unique_ptr<Expression>& expr) override {
+ if (expr->is<BinaryExpression>()) {
+ // Search for expressions of the form `deadVar = anyExpression`.
+ BinaryExpression& binary = expr->as<BinaryExpression>();
+ if (VariableReference* assignedVar = binary.isAssignmentIntoVariable()) {
+ if (fDeadVariables.contains(assignedVar->variable())) {
+ // Replace `deadVar = anyExpression` with `anyExpression`.
+ fUsage->remove(binary.left().get());
+ expr = std::move(binary.right());
+
+ // If `anyExpression` is now a lone ExpressionStatement, it's highly likely
+ // that we can eliminate it entirely. This flag will let us know to check.
+ fAssignmentWasEliminated = true;
+
+ // Re-process the newly cleaned-up expression. This lets us fully clean up
+ // gnarly assignments like `a = b = 123;` where both `a` and `b` are dead,
+ // or silly double-assignments like `a = a = 123;`.
+ return this->visitExpressionPtr(expr);
+ }
+ }
+ }
+ if (expr->is<VariableReference>()) {
+ SkASSERT(!fDeadVariables.contains(expr->as<VariableReference>().variable()));
+ }
+ return INHERITED::visitExpressionPtr(expr);
+ }
+
+ bool visitStatementPtr(std::unique_ptr<Statement>& stmt) override {
+ if (stmt->is<VarDeclaration>()) {
+ VarDeclaration& varDecl = stmt->as<VarDeclaration>();
+ const Variable* var = varDecl.var();
+ ProgramUsage::VariableCounts* counts = fUsage->fVariableCounts.find(var);
+ SkASSERT(counts);
+ SkASSERT(counts->fVarExists);
+ if (CanEliminate(var, *counts)) {
+ fDeadVariables.add(var);
+ if (var->initialValue()) {
+ // The variable has an initial-value expression, which might have side
+ // effects. ExpressionStatement::Make will preserve side effects, but
+ // replaces pure expressions with Nop.
+ fUsage->remove(stmt.get());
+ stmt = ExpressionStatement::Make(fContext, std::move(varDecl.value()));
+ fUsage->add(stmt.get());
+ } else {
+ // The variable has no initial-value and can be cleanly eliminated.
+ fUsage->remove(stmt.get());
+ stmt = Nop::Make();
+ }
+ fMadeChanges = true;
+
+ // Re-process the newly cleaned-up statement. This lets us fully clean up
+ // gnarly assignments like `a = b = 123;` where both `a` and `b` are dead,
+ // or silly double-assignments like `a = a = 123;`.
+ return this->visitStatementPtr(stmt);
+ }
+ }
+
+ bool result = INHERITED::visitStatementPtr(stmt);
+
+ // If we eliminated an assignment above, we may have left behind an inert
+ // ExpressionStatement.
+ if (fAssignmentWasEliminated) {
+ fAssignmentWasEliminated = false;
+ if (stmt->is<ExpressionStatement>()) {
+ ExpressionStatement& exprStmt = stmt->as<ExpressionStatement>();
+ if (!Analysis::HasSideEffects(*exprStmt.expression())) {
+ // The expression-statement was inert; eliminate it entirely.
+ fUsage->remove(&exprStmt);
+ stmt = Nop::Make();
+ }
+ }
+ }
+
+ return result;
+ }
+
+ static bool CanEliminate(const Variable* var, const ProgramUsage::VariableCounts& counts) {
+ return counts.fVarExists && !counts.fRead && var->storage() == VariableStorage::kLocal;
+ }
+
+ bool fMadeChanges = false;
+ const Context& fContext;
+ ProgramUsage* fUsage;
+ SkTHashSet<const Variable*> fDeadVariables;
+ bool fAssignmentWasEliminated = false;
+
+ using INHERITED = ProgramWriter;
+ };
+
+ DeadLocalVariableEliminator visitor{context, usage};
+
+ for (auto& [var, counts] : usage->fVariableCounts) {
+ if (DeadLocalVariableEliminator::CanEliminate(var, counts)) {
+ // This program contains at least one dead local variable.
+ // Scan the program for any dead local variables and eliminate them all.
+ for (std::unique_ptr<ProgramElement>& pe : elements) {
+ if (pe->is<FunctionDefinition>()) {
+ visitor.visitProgramElement(*pe);
+ }
+ }
+ break;
+ }
+ }
+
+ return visitor.fMadeChanges;
+}
+
+bool Transform::EliminateDeadLocalVariables(const Context& context,
+ Module& module,
+ ProgramUsage* usage) {
+ return eliminate_dead_local_variables(context, SkSpan(module.fElements), usage);
+}
+
+bool Transform::EliminateDeadLocalVariables(Program& program) {
+ return program.fConfig->fSettings.fRemoveDeadVariables
+ ? eliminate_dead_local_variables(*program.fContext,
+ SkSpan(program.fOwnedElements),
+ program.fUsage.get())
+ : false;
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/transform/SkSLEliminateEmptyStatements.cpp b/gfx/skia/skia/src/sksl/transform/SkSLEliminateEmptyStatements.cpp
new file mode 100644
index 0000000000..e36867bd5e
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/transform/SkSLEliminateEmptyStatements.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkSpan.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLStatement.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/ir/SkSLBlock.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/transform/SkSLProgramWriter.h"
+#include "src/sksl/transform/SkSLTransform.h"
+
+#include <algorithm>
+#include <iterator>
+#include <memory>
+#include <vector>
+
+namespace SkSL {
+
+class Expression;
+
+static void eliminate_empty_statements(SkSpan<std::unique_ptr<ProgramElement>> elements) {
+ class EmptyStatementEliminator : public ProgramWriter {
+ public:
+ bool visitExpressionPtr(std::unique_ptr<Expression>& expr) override {
+ // We don't need to look inside expressions at all.
+ return false;
+ }
+
+ bool visitStatementPtr(std::unique_ptr<Statement>& stmt) override {
+ // Work from the innermost blocks to the outermost.
+ INHERITED::visitStatementPtr(stmt);
+
+ if (stmt->is<Block>()) {
+ StatementArray& children = stmt->as<Block>().children();
+ auto iter = std::remove_if(children.begin(), children.end(),
+ [](std::unique_ptr<Statement>& stmt) {
+ return stmt->isEmpty();
+ });
+ children.resize(std::distance(children.begin(), iter));
+ }
+
+ // We always check the entire program.
+ return false;
+ }
+
+ using INHERITED = ProgramWriter;
+ };
+
+ for (std::unique_ptr<ProgramElement>& pe : elements) {
+ if (pe->is<FunctionDefinition>()) {
+ EmptyStatementEliminator visitor;
+ visitor.visitStatementPtr(pe->as<FunctionDefinition>().body());
+ }
+ }
+}
+
+void Transform::EliminateEmptyStatements(Module& module) {
+ return eliminate_empty_statements(SkSpan(module.fElements));
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/transform/SkSLEliminateUnreachableCode.cpp b/gfx/skia/skia/src/sksl/transform/SkSLEliminateUnreachableCode.cpp
new file mode 100644
index 0000000000..23d1b39be0
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/transform/SkSLEliminateUnreachableCode.cpp
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLDefines.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLStatement.h"
+#include "include/private/base/SkTArray.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/analysis/SkSLProgramUsage.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLIfStatement.h"
+#include "src/sksl/ir/SkSLNop.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/ir/SkSLSwitchCase.h"
+#include "src/sksl/ir/SkSLSwitchStatement.h"
+#include "src/sksl/transform/SkSLProgramWriter.h"
+#include "src/sksl/transform/SkSLTransform.h"
+
+#include <memory>
+#include <vector>
+
+namespace SkSL {
+
+class Expression;
+
+static void eliminate_unreachable_code(SkSpan<std::unique_ptr<ProgramElement>> elements,
+ ProgramUsage* usage) {
+ class UnreachableCodeEliminator : public ProgramWriter {
+ public:
+ UnreachableCodeEliminator(ProgramUsage* usage) : fUsage(usage) {
+ fFoundFunctionExit.push_back(false);
+ fFoundBlockExit.push_back(false);
+ }
+
+ bool visitExpressionPtr(std::unique_ptr<Expression>& expr) override {
+ // We don't need to look inside expressions at all.
+ return false;
+ }
+
+ bool visitStatementPtr(std::unique_ptr<Statement>& stmt) override {
+ if (fFoundFunctionExit.back() || fFoundBlockExit.back()) {
+ // If we already found an exit in this section, anything beyond it is dead code.
+ if (!stmt->is<Nop>()) {
+ // Eliminate the dead statement by substituting a Nop.
+ fUsage->remove(stmt.get());
+ stmt = Nop::Make();
+ }
+ return false;
+ }
+
+ switch (stmt->kind()) {
+ case Statement::Kind::kReturn:
+ case Statement::Kind::kDiscard:
+ // We found a function exit on this path.
+ fFoundFunctionExit.back() = true;
+ break;
+
+ case Statement::Kind::kBreak:
+ // A `break` statement can either be breaking out of a loop or terminating an
+ // individual switch case. We treat both cases the same way: they only apply
+ // to the statements associated with the parent statement (i.e. enclosing loop
+ // block / preceding case label).
+ case Statement::Kind::kContinue:
+ fFoundBlockExit.back() = true;
+ break;
+
+ case Statement::Kind::kExpression:
+ case Statement::Kind::kNop:
+ case Statement::Kind::kVarDeclaration:
+ // These statements don't affect control flow.
+ break;
+
+ case Statement::Kind::kBlock:
+ // Blocks are on the straight-line path and don't affect control flow.
+ return INHERITED::visitStatementPtr(stmt);
+
+ case Statement::Kind::kDo: {
+ // Function-exits are allowed to propagate outside of a do-loop, because it
+ // always executes its body at least once.
+ fFoundBlockExit.push_back(false);
+ bool result = INHERITED::visitStatementPtr(stmt);
+ fFoundBlockExit.pop_back();
+ return result;
+ }
+ case Statement::Kind::kFor: {
+ // Function-exits are not allowed to propagate out, because a for-loop or while-
+ // loop could potentially run zero times.
+ fFoundFunctionExit.push_back(false);
+ fFoundBlockExit.push_back(false);
+ bool result = INHERITED::visitStatementPtr(stmt);
+ fFoundBlockExit.pop_back();
+ fFoundFunctionExit.pop_back();
+ return result;
+ }
+ case Statement::Kind::kIf: {
+ // This statement is conditional and encloses two inner sections of code.
+ // If both sides contain a function-exit or loop-exit, that exit is allowed to
+ // propagate out.
+ IfStatement& ifStmt = stmt->as<IfStatement>();
+
+ fFoundFunctionExit.push_back(false);
+ fFoundBlockExit.push_back(false);
+ bool result = (ifStmt.ifTrue() && this->visitStatementPtr(ifStmt.ifTrue()));
+ bool foundFunctionExitOnTrue = fFoundFunctionExit.back();
+ bool foundLoopExitOnTrue = fFoundBlockExit.back();
+ fFoundFunctionExit.pop_back();
+ fFoundBlockExit.pop_back();
+
+ fFoundFunctionExit.push_back(false);
+ fFoundBlockExit.push_back(false);
+ result |= (ifStmt.ifFalse() && this->visitStatementPtr(ifStmt.ifFalse()));
+ bool foundFunctionExitOnFalse = fFoundFunctionExit.back();
+ bool foundLoopExitOnFalse = fFoundBlockExit.back();
+ fFoundFunctionExit.pop_back();
+ fFoundBlockExit.pop_back();
+
+ fFoundFunctionExit.back() |= foundFunctionExitOnTrue &&
+ foundFunctionExitOnFalse;
+ fFoundBlockExit.back() |= foundLoopExitOnTrue &&
+ foundLoopExitOnFalse;
+ return result;
+ }
+ case Statement::Kind::kSwitch: {
+ // In switch statements we consider unreachable code on a per-case basis.
+ SwitchStatement& sw = stmt->as<SwitchStatement>();
+ bool result = false;
+
+ // Tracks whether we found at least one case that doesn't lead to a return
+ // statement (potentially via fallthrough).
+ bool foundCaseWithoutReturn = false;
+ bool hasDefault = false;
+ for (std::unique_ptr<Statement>& c : sw.cases()) {
+ // We eliminate unreachable code within the statements of the individual
+ // case. Breaks are not allowed to propagate outside the case statement
+ // itself. Function returns are allowed to propagate out only if all cases
+ // have a return AND one of the cases is default (so that we know at least
+ // one of the branches will be taken). This is similar to how we handle if
+ // statements above.
+ fFoundFunctionExit.push_back(false);
+ fFoundBlockExit.push_back(false);
+
+ SwitchCase& sc = c->as<SwitchCase>();
+ result |= this->visitStatementPtr(sc.statement());
+
+ // When considering whether a case has a return we can propagate, we
+ // assume the following:
+ // 1. The default case is always placed last in a switch statement and
+ // it is the last possible label reachable via fallthrough. Thus if
+ // it does not contain a return statement, then we don't propagate a
+ // function return.
+ // 2. In all other cases we prevent the return from propagating only if
+ // we encounter a break statement. If no return or break is found,
+ // we defer the decision to the fallthrough case. We won't propagate
+ // a return unless we eventually encounter a default label.
+ //
+ // See resources/sksl/shared/SwitchWithEarlyReturn.sksl for test cases that
+ // exercise this.
+ if (sc.isDefault()) {
+ foundCaseWithoutReturn |= !fFoundFunctionExit.back();
+ hasDefault = true;
+ } else {
+ // We can only be sure that a case does not lead to a return if it
+ // doesn't fallthrough.
+ foundCaseWithoutReturn |=
+ (!fFoundFunctionExit.back() && fFoundBlockExit.back());
+ }
+
+ fFoundFunctionExit.pop_back();
+ fFoundBlockExit.pop_back();
+ }
+
+ fFoundFunctionExit.back() |= !foundCaseWithoutReturn && hasDefault;
+ return result;
+ }
+ case Statement::Kind::kSwitchCase:
+ // We should never hit this case as switch cases are handled in the previous
+ // case.
+ SkUNREACHABLE;
+ }
+
+ return false;
+ }
+
+ ProgramUsage* fUsage;
+ SkSTArray<32, bool> fFoundFunctionExit;
+ SkSTArray<32, bool> fFoundBlockExit;
+
+ using INHERITED = ProgramWriter;
+ };
+
+ for (std::unique_ptr<ProgramElement>& pe : elements) {
+ if (pe->is<FunctionDefinition>()) {
+ UnreachableCodeEliminator visitor{usage};
+ visitor.visitStatementPtr(pe->as<FunctionDefinition>().body());
+ }
+ }
+}
+
+void Transform::EliminateUnreachableCode(Module& module, ProgramUsage* usage) {
+ return eliminate_unreachable_code(SkSpan(module.fElements), usage);
+}
+
+void Transform::EliminateUnreachableCode(Program& program) {
+ return eliminate_unreachable_code(SkSpan(program.fOwnedElements), program.fUsage.get());
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/transform/SkSLFindAndDeclareBuiltinFunctions.cpp b/gfx/skia/skia/src/sksl/transform/SkSLFindAndDeclareBuiltinFunctions.cpp
new file mode 100644
index 0000000000..cb937dec49
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/transform/SkSLFindAndDeclareBuiltinFunctions.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#include "src/core/SkTHash.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLIntrinsicList.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/analysis/SkSLProgramUsage.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/transform/SkSLTransform.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <memory>
+#include <string>
+#include <string_view>
+#include <vector>
+
+namespace SkSL {
+
+class ProgramElement;
+
+void Transform::FindAndDeclareBuiltinFunctions(Program& program) {
+ ProgramUsage* usage = program.fUsage.get();
+ Context& context = *program.fContext;
+
+ std::vector<const FunctionDefinition*> addedBuiltins;
+ for (;;) {
+ // Find all the built-ins referenced by the program but not yet included in the code.
+ size_t numBuiltinsAtStart = addedBuiltins.size();
+ for (const auto& [fn, count] : usage->fCallCounts) {
+ if (!fn->isBuiltin() || count == 0) {
+ // Not a built-in; skip it.
+ continue;
+ }
+ if (fn->intrinsicKind() == k_dFdy_IntrinsicKind) {
+ // Programs that invoke the `dFdy` intrinsic will need the RTFlip input.
+ program.fInputs.fUseFlipRTUniform = !context.fConfig->fSettings.fForceNoRTFlip;
+ }
+ if (const FunctionDefinition* builtinDef = fn->definition()) {
+ // Make sure we only add a built-in function once. We rarely add more than a handful
+ // of builtin functions, so linear search here is good enough.
+ if (std::find(addedBuiltins.begin(), addedBuiltins.end(), builtinDef) ==
+ addedBuiltins.end()) {
+ addedBuiltins.push_back(builtinDef);
+ }
+ }
+ }
+
+ if (addedBuiltins.size() == numBuiltinsAtStart) {
+ // If we didn't reference any more built-in functions than before, we're done.
+ break;
+ }
+
+ // Sort the referenced builtin functions into a consistent order; otherwise our output will
+ // become non-deterministic. The exact order isn't particularly important; we sort backwards
+ // because we add elements to the shared-elements in reverse order at the end.
+ std::sort(addedBuiltins.begin() + numBuiltinsAtStart,
+ addedBuiltins.end(),
+ [](const FunctionDefinition* aDefinition, const FunctionDefinition* bDefinition) {
+ const FunctionDeclaration& a = aDefinition->declaration();
+ const FunctionDeclaration& b = bDefinition->declaration();
+ if (a.name() != b.name()) {
+ return a.name() > b.name();
+ }
+ return a.description() > b.description();
+ });
+
+ // Update the ProgramUsage to track all these newly discovered functions.
+ int usageCallCounts = usage->fCallCounts.count();
+
+ for (size_t index = numBuiltinsAtStart; index < addedBuiltins.size(); ++index) {
+ usage->add(*addedBuiltins[index]);
+ }
+
+ if (usage->fCallCounts.count() == usageCallCounts) {
+ // If we aren't making any more unique function calls than before, we're done.
+ break;
+ }
+ }
+
+ // Insert the new functions into the program's shared elements, right at the front.
+ // They are added in reverse so that the deepest dependencies are added to the top.
+ program.fSharedElements.insert(program.fSharedElements.begin(),
+ addedBuiltins.rbegin(), addedBuiltins.rend());
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/transform/SkSLFindAndDeclareBuiltinVariables.cpp b/gfx/skia/skia/src/sksl/transform/SkSLFindAndDeclareBuiltinVariables.cpp
new file mode 100644
index 0000000000..bbc68fa7af
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/transform/SkSLFindAndDeclareBuiltinVariables.cpp
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLLayout.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLSymbol.h"
+#include "src/core/SkTHash.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/SkSLUtil.h"
+#include "src/sksl/analysis/SkSLProgramUsage.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLInterfaceBlock.h"
+#include "src/sksl/ir/SkSLProgram.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/transform/SkSLTransform.h"
+
+#include <algorithm>
+#include <memory>
+#include <string_view>
+#include <vector>
+
+namespace SkSL {
+namespace Transform {
+namespace {
+
+class BuiltinVariableScanner {
+public:
+ BuiltinVariableScanner(const Context& context, const SymbolTable& symbols)
+ : fContext(context)
+ , fSymbols(symbols) {}
+
+ void addDeclaringElement(const ProgramElement* decl) {
+ // Make sure we only add a built-in variable once. We only have a small handful of built-in
+ // variables to declare, so linear search here is good enough.
+ if (std::find(fNewElements.begin(), fNewElements.end(), decl) == fNewElements.end()) {
+ fNewElements.push_back(decl);
+ }
+ }
+
+ void addDeclaringElement(const Symbol* symbol) {
+ if (!symbol || !symbol->is<Variable>()) {
+ return;
+ }
+ const Variable& var = symbol->as<Variable>();
+ if (const GlobalVarDeclaration* decl = var.globalVarDeclaration()) {
+ this->addDeclaringElement(decl);
+ } else if (const InterfaceBlock* block = var.interfaceBlock()) {
+ this->addDeclaringElement(block);
+ } else {
+ // Double-check that this variable isn't associated with a global or an interface block.
+ // (Locals and parameters will come along naturally as part of the associated function.)
+ SkASSERTF(var.storage() != VariableStorage::kGlobal &&
+ var.storage() != VariableStorage::kInterfaceBlock,
+ "%.*s", (int)var.name().size(), var.name().data());
+ }
+ }
+
+ void addImplicitFragColorWrite(SkSpan<const std::unique_ptr<ProgramElement>> elements) {
+ for (const std::unique_ptr<ProgramElement>& pe : elements) {
+ if (!pe->is<FunctionDefinition>()) {
+ continue;
+ }
+ const FunctionDefinition& funcDef = pe->as<FunctionDefinition>();
+ if (funcDef.declaration().isMain()) {
+ if (funcDef.declaration().returnType().matches(*fContext.fTypes.fHalf4)) {
+ // We synthesize writes to sk_FragColor if main() returns a color, even if it's
+ // otherwise unreferenced.
+ this->addDeclaringElement(fSymbols.findBuiltinSymbol(Compiler::FRAGCOLOR_NAME));
+ }
+ // Now that main() has been found, we can stop scanning.
+ break;
+ }
+ }
+ }
+
+ static std::string_view GlobalVarBuiltinName(const ProgramElement& elem) {
+ return elem.as<GlobalVarDeclaration>().varDeclaration().var()->name();
+ }
+
+ static std::string_view InterfaceBlockName(const ProgramElement& elem) {
+ return elem.as<InterfaceBlock>().instanceName();
+ }
+
+ void sortNewElements() {
+ std::sort(fNewElements.begin(),
+ fNewElements.end(),
+ [](const ProgramElement* a, const ProgramElement* b) {
+ if (a->kind() != b->kind()) {
+ return a->kind() < b->kind();
+ }
+ switch (a->kind()) {
+ case ProgramElement::Kind::kGlobalVar:
+ SkASSERT(GlobalVarBuiltinName(*a) != GlobalVarBuiltinName(*b));
+ return GlobalVarBuiltinName(*a) < GlobalVarBuiltinName(*b);
+
+ case ProgramElement::Kind::kInterfaceBlock:
+ SkASSERT(InterfaceBlockName(*a) != InterfaceBlockName(*b));
+ return InterfaceBlockName(*a) < InterfaceBlockName(*b);
+
+ default:
+ SkUNREACHABLE;
+ }
+ });
+ }
+
+ const Context& fContext;
+ const SymbolTable& fSymbols;
+ std::vector<const ProgramElement*> fNewElements;
+};
+
+} // namespace
+
+void FindAndDeclareBuiltinVariables(Program& program) {
+ const Context& context = *program.fContext;
+ const SymbolTable& symbols = *program.fSymbols;
+ BuiltinVariableScanner scanner(context, symbols);
+
+ if (ProgramConfig::IsFragment(program.fConfig->fKind)) {
+ // Find main() in the program and check its return type.
+ // If it's half4, we treat that as an implicit write to sk_FragColor and add a reference.
+ scanner.addImplicitFragColorWrite(program.fOwnedElements);
+
+ // Vulkan requires certain builtin variables be present, even if they're unused. At one
+ // time, validation errors would result if sk_Clockwise was missing. Now, it's just (Adreno)
+ // driver bugs that drop or corrupt draws if they're missing.
+ scanner.addDeclaringElement(symbols.findBuiltinSymbol("sk_Clockwise"));
+ }
+
+ // Scan all the variables used by the program and declare any built-ins.
+ for (const auto& [var, counts] : program.fUsage->fVariableCounts) {
+ if (var->isBuiltin()) {
+ scanner.addDeclaringElement(var);
+
+ // Set the FlipRT program input if we find sk_FragCoord or sk_Clockwise.
+ switch (var->modifiers().fLayout.fBuiltin) {
+ case SK_FRAGCOORD_BUILTIN:
+ if (context.fCaps->fCanUseFragCoord) {
+ program.fInputs.fUseFlipRTUniform =
+ !context.fConfig->fSettings.fForceNoRTFlip;
+ }
+ break;
+
+ case SK_CLOCKWISE_BUILTIN:
+ program.fInputs.fUseFlipRTUniform = !context.fConfig->fSettings.fForceNoRTFlip;
+ break;
+ }
+ }
+ }
+
+ // Sort the referenced builtin functions into a consistent order; otherwise our output will
+ // become non-deterministic. The exact order isn't particularly important.
+ scanner.sortNewElements();
+
+ // Add all the newly-declared elements to the program, and update ProgramUsage to match.
+ program.fSharedElements.insert(program.fSharedElements.begin(),
+ scanner.fNewElements.begin(),
+ scanner.fNewElements.end());
+
+ for (const ProgramElement* element : scanner.fNewElements) {
+ program.fUsage->add(*element);
+ }
+}
+
+} // namespace Transform
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/transform/SkSLProgramWriter.h b/gfx/skia/skia/src/sksl/transform/SkSLProgramWriter.h
new file mode 100644
index 0000000000..6e5988aa92
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/transform/SkSLProgramWriter.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkSLProgramWriter_DEFINED
+#define SkSLProgramWriter_DEFINED
+
+#include "src/sksl/analysis/SkSLProgramVisitor.h"
+
+namespace SkSL {
+
+struct ProgramWriterTypes {
+ using Program = SkSL::Program;
+ using Expression = SkSL::Expression;
+ using Statement = SkSL::Statement;
+ using ProgramElement = SkSL::ProgramElement;
+ using UniquePtrExpression = std::unique_ptr<SkSL::Expression>;
+ using UniquePtrStatement = std::unique_ptr<SkSL::Statement>;
+};
+
+extern template class TProgramVisitor<ProgramWriterTypes>;
+
+class ProgramWriter : public TProgramVisitor<ProgramWriterTypes> {
+public:
+ // Subclass these methods if you want access to the unique_ptrs of IRNodes in a program.
+ // This will allow statements or expressions to be replaced during a visit.
+ bool visitExpressionPtr(std::unique_ptr<Expression>& e) override {
+ return this->visitExpression(*e);
+ }
+ bool visitStatementPtr(std::unique_ptr<Statement>& s) override {
+ return this->visitStatement(*s);
+ }
+};
+
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/sksl/transform/SkSLRenamePrivateSymbols.cpp b/gfx/skia/skia/src/sksl/transform/SkSLRenamePrivateSymbols.cpp
new file mode 100644
index 0000000000..50bb88baf9
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/transform/SkSLRenamePrivateSymbols.cpp
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLIRNode.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLProgramElement.h"
+#include "include/private/SkSLStatement.h"
+#include "include/private/SkSLSymbol.h"
+#include "src/base/SkStringView.h"
+#include "src/sksl/SkSLAnalysis.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/SkSLModifiersPool.h"
+#include "src/sksl/SkSLProgramSettings.h"
+#include "src/sksl/ir/SkSLFunctionDeclaration.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLFunctionPrototype.h"
+#include "src/sksl/ir/SkSLSymbolTable.h"
+#include "src/sksl/ir/SkSLVarDeclarations.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/transform/SkSLProgramWriter.h"
+#include "src/sksl/transform/SkSLTransform.h"
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <string_view>
+#include <utility>
+#include <vector>
+
+namespace SkSL {
+
+class ProgramUsage;
+enum class ProgramKind : int8_t;
+
+static void strip_export_flag(Context& context,
+ const FunctionDeclaration* funcDecl,
+ SymbolTable* symbols) {
+ // Remove `$export` from every overload of this function.
+ Symbol* mutableSym = symbols->findMutable(funcDecl->name());
+ while (mutableSym) {
+ FunctionDeclaration* mutableDecl = &mutableSym->as<FunctionDeclaration>();
+
+ Modifiers modifiers = mutableDecl->modifiers();
+ modifiers.fFlags &= ~Modifiers::kExport_Flag;
+ mutableDecl->setModifiers(context.fModifiersPool->add(modifiers));
+
+ mutableSym = mutableDecl->mutableNextOverload();
+ }
+}
+
+void Transform::RenamePrivateSymbols(Context& context,
+ Module& module,
+ ProgramUsage* usage,
+ ProgramKind kind) {
+ class SymbolRenamer : public ProgramWriter {
+ public:
+ SymbolRenamer(Context& context,
+ ProgramUsage* usage,
+ std::shared_ptr<SymbolTable> symbolBase,
+ ProgramKind kind)
+ : fContext(context)
+ , fUsage(usage)
+ , fSymbolTableStack({std::move(symbolBase)})
+ , fKind(kind) {}
+
+ static std::string FindShortNameForSymbol(const Symbol* sym,
+ const SymbolTable* symbolTable,
+ std::string namePrefix) {
+ static constexpr std::string_view kLetters[] = {
+ "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m",
+ "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z",
+ "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
+ "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"};
+
+ // Try any single-letter option.
+ for (std::string_view letter : kLetters) {
+ std::string name = namePrefix + std::string(letter);
+ if (symbolTable->find(name) == nullptr) {
+ return name;
+ }
+ }
+
+ // Try every two-letter option.
+ for (std::string_view letterA : kLetters) {
+ for (std::string_view letterB : kLetters) {
+ std::string name = namePrefix + std::string(letterA) + std::string(letterB);
+ if (symbolTable->find(name) == nullptr) {
+ return name;
+ }
+ }
+ }
+
+ // We struck out. Somehow, all 2700 two-letter names have been claimed.
+ SkDEBUGFAILF("Unable to find unique name for '%s'", std::string(sym->name()).c_str());
+ return std::string(sym->name());
+ }
+
+ void minifyVariableName(const Variable* var) {
+ // Some variables are associated with anonymous parameters--these don't have names and
+ // aren't present in the symbol table. Their names are already empty so there's no way
+ // to shrink them further.
+ if (var->name().empty()) {
+ return;
+ }
+
+ // Ensure that this variable is properly set up in the symbol table.
+ SymbolTable* symbols = fSymbolTableStack.back().get();
+ Symbol* mutableSym = symbols->findMutable(var->name());
+ SkASSERTF(mutableSym != nullptr,
+ "symbol table missing '%.*s'", (int)var->name().size(), var->name().data());
+ SkASSERTF(mutableSym == var,
+ "wrong symbol found for '%.*s'", (int)var->name().size(), var->name().data());
+
+ // Look for a new name for this symbol.
+ // Note: we always rename _every_ variable, even ones with single-letter names. This is
+ // a safeguard: if we claimed a name like `i`, and then the program itself contained an
+ // `i` later on, in a nested SymbolTable, the two names would clash. By always renaming
+ // everything, we can ignore that problem.
+ std::string shortName = FindShortNameForSymbol(var, symbols, "");
+ SkASSERT(symbols->findMutable(shortName) == nullptr);
+
+ // Update the symbol's name.
+ const std::string* ownedName = symbols->takeOwnershipOfString(std::move(shortName));
+ symbols->renameSymbol(mutableSym, *ownedName);
+ }
+
+ void minifyFunctionName(const FunctionDeclaration* funcDecl) {
+ // Look for a new name for this function.
+ std::string namePrefix = ProgramConfig::IsRuntimeEffect(fKind) ? "" : "$";
+ SymbolTable* symbols = fSymbolTableStack.back().get();
+ std::string shortName = FindShortNameForSymbol(funcDecl, symbols,
+ std::move(namePrefix));
+ SkASSERT(symbols->findMutable(shortName) == nullptr);
+
+ if (shortName.size() < funcDecl->name().size()) {
+ // Update the function's name. (If the function has overloads, this will rename all
+ // of them at once.)
+ Symbol* mutableSym = symbols->findMutable(funcDecl->name());
+ const std::string* ownedName = symbols->takeOwnershipOfString(std::move(shortName));
+ symbols->renameSymbol(mutableSym, *ownedName);
+ }
+ }
+
+ bool functionNameCanBeMinifiedSafely(const FunctionDeclaration& funcDecl) const {
+ if (ProgramConfig::IsRuntimeEffect(fKind)) {
+ // The only externally-accessible function in a runtime effect is main().
+ return !funcDecl.isMain();
+ } else {
+ // We will only minify $private_functions, and only ones not marked as $export.
+ return skstd::starts_with(funcDecl.name(), '$') &&
+ !(funcDecl.modifiers().fFlags & Modifiers::kExport_Flag);
+ }
+ }
+
+ void minifyFunction(FunctionDefinition& def) {
+ // If the function is private, minify its name.
+ const FunctionDeclaration* funcDecl = &def.declaration();
+ if (this->functionNameCanBeMinifiedSafely(*funcDecl)) {
+ this->minifyFunctionName(funcDecl);
+ }
+
+ // Minify the names of each function parameter.
+ Analysis::SymbolTableStackBuilder symbolTableStackBuilder(def.body().get(),
+ &fSymbolTableStack);
+ for (Variable* param : funcDecl->parameters()) {
+ this->minifyVariableName(param);
+ }
+ }
+
+ void minifyPrototype(FunctionPrototype& proto) {
+ const FunctionDeclaration* funcDecl = &proto.declaration();
+ if (funcDecl->definition()) {
+ // This function is defined somewhere; this isn't just a loose prototype.
+ return;
+ }
+
+ // Eliminate the names of each function parameter.
+ // The parameter names aren't in the symbol table's name lookup map at all.
+ // All we need to do is blank out their names.
+ for (Variable* param : funcDecl->parameters()) {
+ param->setName("");
+ }
+ }
+
+ bool visitProgramElement(ProgramElement& elem) override {
+ switch (elem.kind()) {
+ case ProgramElement::Kind::kFunction:
+ this->minifyFunction(elem.as<FunctionDefinition>());
+ return INHERITED::visitProgramElement(elem);
+
+ case ProgramElement::Kind::kFunctionPrototype:
+ this->minifyPrototype(elem.as<FunctionPrototype>());
+ return INHERITED::visitProgramElement(elem);
+
+ default:
+ return false;
+ }
+ }
+
+ bool visitStatementPtr(std::unique_ptr<Statement>& stmt) override {
+ Analysis::SymbolTableStackBuilder symbolTableStackBuilder(stmt.get(),
+ &fSymbolTableStack);
+ if (stmt->is<VarDeclaration>()) {
+ // Minify the variable's name.
+ VarDeclaration& decl = stmt->as<VarDeclaration>();
+ this->minifyVariableName(decl.var());
+ }
+
+ return INHERITED::visitStatementPtr(stmt);
+ }
+
+ Context& fContext;
+ ProgramUsage* fUsage;
+ std::vector<std::shared_ptr<SymbolTable>> fSymbolTableStack;
+ ProgramKind fKind;
+ using INHERITED = ProgramWriter;
+ };
+
+ // Rename local variables and private functions.
+ SymbolRenamer renamer{context, usage, module.fSymbols, kind};
+ for (std::unique_ptr<ProgramElement>& pe : module.fElements) {
+ renamer.visitProgramElement(*pe);
+ }
+
+ // Strip off modifier `$export` from every function. (Only the minifier checks this flag, so we
+ // can remove it without affecting the meaning of the code.)
+ for (std::unique_ptr<ProgramElement>& pe : module.fElements) {
+ if (pe->is<FunctionDefinition>()) {
+ const FunctionDeclaration* funcDecl = &pe->as<FunctionDefinition>().declaration();
+ if (funcDecl->modifiers().fFlags & Modifiers::kExport_Flag) {
+ strip_export_flag(context, funcDecl, module.fSymbols.get());
+ }
+ }
+ }
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/transform/SkSLReplaceConstVarsWithLiterals.cpp b/gfx/skia/skia/src/sksl/transform/SkSLReplaceConstVarsWithLiterals.cpp
new file mode 100644
index 0000000000..e484104a33
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/transform/SkSLReplaceConstVarsWithLiterals.cpp
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#include "include/private/SkSLModifiers.h"
+#include "include/private/SkSLProgramElement.h"
+#include "src/core/SkTHash.h"
+#include "src/sksl/SkSLCompiler.h"
+#include "src/sksl/SkSLConstantFolder.h"
+#include "src/sksl/analysis/SkSLProgramUsage.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLFunctionDefinition.h"
+#include "src/sksl/ir/SkSLVariable.h"
+#include "src/sksl/ir/SkSLVariableReference.h"
+#include "src/sksl/transform/SkSLProgramWriter.h"
+#include "src/sksl/transform/SkSLTransform.h"
+
+#include <cstddef>
+#include <memory>
+#include <string>
+#include <string_view>
+#include <vector>
+
+namespace SkSL {
+
+void Transform::ReplaceConstVarsWithLiterals(Module& module, ProgramUsage* usage) {
+ class ConstVarReplacer : public ProgramWriter {
+ public:
+ ConstVarReplacer(ProgramUsage* usage) : fUsage(usage) {}
+
+ using ProgramWriter::visitProgramElement;
+
+ bool visitExpressionPtr(std::unique_ptr<Expression>& expr) override {
+ // If this is a variable...
+ if (expr->is<VariableReference>()) {
+ VariableReference& var = expr->as<VariableReference>();
+ // ... and it's a candidate for size reduction...
+ if (fCandidates.contains(var.variable())) {
+ // ... get its constant value...
+ if (const Expression* value =
+ ConstantFolder::GetConstantValueOrNullForVariable(var)) {
+ // ... and replace it with that value.
+ fUsage->remove(expr.get());
+ expr = value->clone();
+ fUsage->add(expr.get());
+ return false;
+ }
+ }
+ }
+ return INHERITED::visitExpressionPtr(expr);
+ }
+
+ ProgramUsage* fUsage;
+ SkTHashSet<const Variable*> fCandidates;
+
+ using INHERITED = ProgramWriter;
+ };
+
+ ConstVarReplacer visitor{usage};
+
+ for (const auto& [var, count] : usage->fVariableCounts) {
+ // We can only replace const variables that still exist, and that have initial values.
+ if (!count.fVarExists || count.fWrite != 1) {
+ continue;
+ }
+ if (!(var->modifiers().fFlags & Modifiers::kConst_Flag)) {
+ continue;
+ }
+ if (!var->initialValue()) {
+ continue;
+ }
+ // The current size is:
+ // strlen("const type varname=initialvalue;`") + count*strlen("varname").
+ size_t initialvalueSize = ConstantFolder::GetConstantValueForVariable(*var->initialValue())
+ ->description()
+ .size();
+ size_t totalOldSize = var->description().size() + // const type varname
+ 1 + // =
+ initialvalueSize + // initialvalue
+ 1 + // ;
+ count.fRead * var->name().size(); // count * varname
+ // If we replace varname with initialvalue everywhere, the new size would be:
+ // count*strlen("initialvalue")
+ size_t totalNewSize = count.fRead * initialvalueSize; // count * initialvalue
+
+ if (totalNewSize <= totalOldSize) {
+ visitor.fCandidates.add(var);
+ }
+ }
+
+ if (!visitor.fCandidates.empty()) {
+ for (std::unique_ptr<ProgramElement>& pe : module.fElements) {
+ if (pe->is<FunctionDefinition>()) {
+ visitor.visitProgramElement(*pe);
+ }
+ }
+ }
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/transform/SkSLRewriteIndexedSwizzle.cpp b/gfx/skia/skia/src/sksl/transform/SkSLRewriteIndexedSwizzle.cpp
new file mode 100644
index 0000000000..21c68d97b1
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/transform/SkSLRewriteIndexedSwizzle.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/private/SkSLDefines.h"
+#include "src/sksl/SkSLBuiltinTypes.h"
+#include "src/sksl/SkSLContext.h"
+#include "src/sksl/ir/SkSLConstructorCompound.h"
+#include "src/sksl/ir/SkSLExpression.h"
+#include "src/sksl/ir/SkSLIndexExpression.h"
+#include "src/sksl/ir/SkSLLiteral.h"
+#include "src/sksl/ir/SkSLSwizzle.h"
+#include "src/sksl/ir/SkSLType.h"
+#include "src/sksl/transform/SkSLTransform.h"
+
+#include <cstdint>
+#include <memory>
+#include <utility>
+
+namespace SkSL {
+
+std::unique_ptr<Expression> Transform::RewriteIndexedSwizzle(const Context& context,
+ const IndexExpression& indexExpr) {
+ // The index expression _must_ have a swizzle base for this transformation to be valid.
+ if (!indexExpr.base()->is<Swizzle>()) {
+ return nullptr;
+ }
+ const Swizzle& swizzle = indexExpr.base()->as<Swizzle>();
+
+ // Convert the swizzle components to a literal array.
+ ExpressionArray vecArray;
+ vecArray.reserve(swizzle.components().size());
+ for (int8_t comp : swizzle.components()) {
+ vecArray.push_back(Literal::Make(indexExpr.fPosition, comp, context.fTypes.fInt.get()));
+ }
+
+ // Make a compound constructor with the literal array.
+ const Type& vecType = context.fTypes.fInt->toCompound(context, vecArray.size(), /*rows=*/1);
+ std::unique_ptr<Expression> vec =
+ ConstructorCompound::Make(context, indexExpr.fPosition, vecType, std::move(vecArray));
+
+ // Create a rewritten inner-expression corresponding to `vec(1,2,3)[originalIndex]`.
+ std::unique_ptr<Expression> innerExpr = IndexExpression::Make(
+ context, indexExpr.fPosition, std::move(vec), indexExpr.index()->clone());
+
+ // Return a rewritten outer-expression corresponding to `base[vec(1,2,3)[originalIndex]]`.
+ return IndexExpression::Make(
+ context, indexExpr.fPosition, swizzle.base()->clone(), std::move(innerExpr));
+}
+
+} // namespace SkSL
diff --git a/gfx/skia/skia/src/sksl/transform/SkSLTransform.h b/gfx/skia/skia/src/sksl/transform/SkSLTransform.h
new file mode 100644
index 0000000000..e051ec8086
--- /dev/null
+++ b/gfx/skia/skia/src/sksl/transform/SkSLTransform.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SKSL_TRANSFORM
+#define SKSL_TRANSFORM
+
+#include "include/core/SkSpan.h"
+#include <memory>
+#include <vector>
+
+namespace SkSL {
+
+class Context;
+class Expression;
+class IndexExpression;
+struct Modifiers;
+struct Module;
+struct Program;
+class ProgramElement;
+class ProgramUsage;
+class Statement;
+class Variable;
+enum class ProgramKind : int8_t;
+
+namespace Transform {
+
+/**
+ * Checks to see if it would be safe to add `const` to the modifiers of a variable. If so, returns
+ * the modifiers with `const` applied; if not, returns the existing modifiers as-is. Adding `const`
+ * allows the inliner to fold away more values and generate tighter code.
+ */
+const Modifiers* AddConstToVarModifiers(const Context& context,
+ const Variable& var,
+ const Expression* initialValue,
+ const ProgramUsage* usage);
+
+/**
+ * Rewrites indexed swizzles of the form `myVec.zyx[i]` by replacing the swizzle with a lookup into
+ * a constant vector. e.g., the above expression would be rewritten as `myVec[vec3(2, 1, 0)[i]]`.
+ * This roughly matches glslang's handling of the code.
+ */
+std::unique_ptr<Expression> RewriteIndexedSwizzle(const Context& context,
+ const IndexExpression& swizzle);
+
+/**
+ * Copies built-in functions from modules into the program. Relies on ProgramUsage to determine
+ * which functions are necessary.
+ */
+void FindAndDeclareBuiltinFunctions(Program& program);
+
+/**
+ * Scans the finished program for built-in variables like `sk_FragColor` and adds them to the
+ * program's shared elements.
+ */
+void FindAndDeclareBuiltinVariables(Program& program);
+
+/**
+ * Eliminates statements in a block which cannot be reached; for example, a statement
+ * immediately after a `return` or `continue` can safely be eliminated.
+ */
+void EliminateUnreachableCode(Module& module, ProgramUsage* usage);
+void EliminateUnreachableCode(Program& program);
+
+/**
+ * Eliminates empty statements in a module (Nops, or blocks holding only Nops). Not implemented for
+ * Programs because Nops are harmless, but they waste space in long-lived module IR.
+ */
+void EliminateEmptyStatements(Module& module);
+
+/**
+ * Eliminates functions in a program which are never called. Returns true if any changes were made.
+ */
+bool EliminateDeadFunctions(const Context& context, Module& module, ProgramUsage* usage);
+bool EliminateDeadFunctions(Program& program);
+
+/**
+ * Eliminates variables in a program which are never read or written (past their initializer).
+ * Preserves side effects from initializers, if any. Returns true if any changes were made.
+ */
+bool EliminateDeadLocalVariables(const Context& context,
+ Module& module,
+ ProgramUsage* usage);
+bool EliminateDeadLocalVariables(Program& program);
+bool EliminateDeadGlobalVariables(const Context& context,
+ Module& module,
+ ProgramUsage* usage,
+ bool onlyPrivateGlobals);
+bool EliminateDeadGlobalVariables(Program& program);
+
+/** Renames private functions and function-local variables to minimize code size. */
+void RenamePrivateSymbols(Context& context, Module& module, ProgramUsage* usage, ProgramKind kind);
+
+/** Replaces constant variables in a program with their equivalent values. */
+void ReplaceConstVarsWithLiterals(Module& module, ProgramUsage* usage);
+
+} // namespace Transform
+} // namespace SkSL
+
+#endif
diff --git a/gfx/skia/skia/src/text/GlyphRun.cpp b/gfx/skia/skia/src/text/GlyphRun.cpp
new file mode 100644
index 0000000000..bc511bf40f
--- /dev/null
+++ b/gfx/skia/skia/src/text/GlyphRun.cpp
@@ -0,0 +1,372 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/text/GlyphRun.h"
+
+#include "include/core/SkFont.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkRSXform.h"
+#include "include/core/SkTextBlob.h"
+#include "src/base/SkUtils.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkFontPriv.h"
+#include "src/core/SkStrike.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkStrikeSpec.h"
+#include "src/core/SkTextBlobPriv.h"
+
+namespace sktext {
+// -- GlyphRun -------------------------------------------------------------------------------------
+GlyphRun::GlyphRun(const SkFont& font,
+ SkSpan<const SkPoint> positions,
+ SkSpan<const SkGlyphID> glyphIDs,
+ SkSpan<const char> text,
+ SkSpan<const uint32_t> clusters,
+ SkSpan<const SkVector> scaledRotations)
+ : fSource{SkMakeZip(glyphIDs, positions)}
+ , fText{text}
+ , fClusters{clusters}
+ , fScaledRotations{scaledRotations}
+ , fFont{font} {}
+
+GlyphRun::GlyphRun(const GlyphRun& that, const SkFont& font)
+ : fSource{that.fSource}
+ , fText{that.fText}
+ , fClusters{that.fClusters}
+ , fFont{font} {}
+
+// -- GlyphRunList ---------------------------------------------------------------------------------
+GlyphRunList::GlyphRunList(const SkTextBlob* blob,
+ SkRect bounds,
+ SkPoint origin,
+ SkSpan<const GlyphRun> glyphRunList,
+ GlyphRunBuilder* builder)
+ : fGlyphRuns{glyphRunList}
+ , fOriginalTextBlob{blob}
+ , fSourceBounds{bounds}
+ , fOrigin{origin}
+ , fBuilder{builder} {}
+
+GlyphRunList::GlyphRunList(const GlyphRun& glyphRun,
+ const SkRect& bounds,
+ SkPoint origin,
+ GlyphRunBuilder* builder)
+ : fGlyphRuns{SkSpan<const GlyphRun>{&glyphRun, 1}}
+ , fOriginalTextBlob{nullptr}
+ , fSourceBounds{bounds}
+ , fOrigin{origin}
+ , fBuilder{builder} {}
+
+uint64_t GlyphRunList::uniqueID() const {
+ return fOriginalTextBlob != nullptr ? fOriginalTextBlob->uniqueID()
+ : SK_InvalidUniqueID;
+}
+
+bool GlyphRunList::anyRunsLCD() const {
+ for (const auto& r : fGlyphRuns) {
+ if (r.font().getEdging() == SkFont::Edging::kSubpixelAntiAlias) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void GlyphRunList::temporaryShuntBlobNotifyAddedToCache(uint32_t cacheID) const {
+ SkASSERT(fOriginalTextBlob != nullptr);
+ fOriginalTextBlob->notifyAddedToCache(cacheID);
+}
+
+sk_sp<SkTextBlob> GlyphRunList::makeBlob() const {
+ SkTextBlobBuilder builder;
+ for (auto& run : *this) {
+ SkTextBlobBuilder::RunBuffer buffer;
+ if (run.scaledRotations().empty()) {
+ if (run.text().empty()) {
+ buffer = builder.allocRunPos(run.font(), run.runSize(), nullptr);
+ } else {
+ buffer = builder.allocRunTextPos(run.font(), run.runSize(), run.text().size(), nullptr);
+ auto text = run.text();
+ memcpy(buffer.utf8text, text.data(), text.size_bytes());
+ auto clusters = run.clusters();
+ memcpy(buffer.clusters, clusters.data(), clusters.size_bytes());
+ }
+ auto positions = run.positions();
+ memcpy(buffer.points(), positions.data(), positions.size_bytes());
+ } else {
+ buffer = builder.allocRunRSXform(run.font(), run.runSize());
+ for (auto [xform, pos, sr] : SkMakeZip(buffer.xforms(),
+ run.positions(),
+ run.scaledRotations())) {
+ xform = SkRSXform::Make(sr.x(), sr.y(), pos.x(), pos.y());
+ }
+ }
+ auto glyphIDs = run.glyphsIDs();
+ memcpy(buffer.glyphs, glyphIDs.data(), glyphIDs.size_bytes());
+ }
+ return builder.make();
+}
+
+// -- GlyphRunBuilder ------------------------------------------------------------------------------
+static SkRect glyphrun_source_bounds(
+ const SkFont& font,
+ const SkPaint& paint,
+ SkZip<const SkGlyphID, const SkPoint> source,
+ SkSpan<const SkVector> scaledRotations) {
+ SkASSERT(source.size() > 0);
+ const SkRect fontBounds = SkFontPriv::GetFontBounds(font);
+
+ SkSpan<const SkGlyphID> glyphIDs = source.get<0>();
+ SkSpan<const SkPoint> positions = source.get<1>();
+
+ if (fontBounds.isEmpty()) {
+ // Empty font bounds are likely a font bug. TightBounds has a better chance of
+ // producing useful results in this case.
+ auto [strikeSpec, strikeToSourceScale] = SkStrikeSpec::MakeCanonicalized(font, &paint);
+ SkBulkGlyphMetrics metrics{strikeSpec};
+ SkSpan<const SkGlyph*> glyphs = metrics.glyphs(glyphIDs);
+ if (scaledRotations.empty()) {
+ // No RSXForm data - glyphs x/y aligned.
+ auto scaleAndTranslateRect =
+ [scale = strikeToSourceScale](const SkRect& in, const SkPoint& pos) {
+ return SkRect::MakeLTRB(in.left() * scale + pos.x(),
+ in.top() * scale + pos.y(),
+ in.right() * scale + pos.x(),
+ in.bottom() * scale + pos.y());
+ };
+
+ SkRect bounds = SkRect::MakeEmpty();
+ for (auto [pos, glyph] : SkMakeZip(positions, glyphs)) {
+ if (SkRect r = glyph->rect(); !r.isEmpty()) {
+ bounds.join(scaleAndTranslateRect(r, pos));
+ }
+ }
+ return bounds;
+ } else {
+ // RSXForm - glyphs can be any scale or rotation.
+ SkRect bounds = SkRect::MakeEmpty();
+ for (auto [pos, scaleRotate, glyph] : SkMakeZip(positions, scaledRotations, glyphs)) {
+ if (!glyph->rect().isEmpty()) {
+ SkMatrix xform = SkMatrix().setRSXform(
+ SkRSXform{pos.x(), pos.y(), scaleRotate.x(), scaleRotate.y()});
+ xform.preScale(strikeToSourceScale, strikeToSourceScale);
+ bounds.join(xform.mapRect(glyph->rect()));
+ }
+ }
+ return bounds;
+ }
+ }
+
+ // Use conservative bounds. All glyph have a box of fontBounds size.
+ if (scaledRotations.empty()) {
+ SkRect bounds;
+ bounds.setBounds(positions.data(), SkCount(positions));
+ bounds.fLeft += fontBounds.left();
+ bounds.fTop += fontBounds.top();
+ bounds.fRight += fontBounds.right();
+ bounds.fBottom += fontBounds.bottom();
+ return bounds;
+ } else {
+ // RSXForm case glyphs can be any scale or rotation.
+ SkRect bounds;
+ bounds.setEmpty();
+ for (auto [pos, scaleRotate] : SkMakeZip(positions, scaledRotations)) {
+ const SkRSXform xform{pos.x(), pos.y(), scaleRotate.x(), scaleRotate.y()};
+ bounds.join(SkMatrix().setRSXform(xform).mapRect(fontBounds));
+ }
+ return bounds;
+ }
+}
+
+GlyphRunList GlyphRunBuilder::makeGlyphRunList(
+ const GlyphRun& run, const SkPaint& paint, SkPoint origin) {
+ const SkRect bounds =
+ glyphrun_source_bounds(run.font(), paint, run.source(), run.scaledRotations());
+ return GlyphRunList{run, bounds, origin, this};
+}
+
+static SkSpan<const SkPoint> draw_text_positions(
+ const SkFont& font, SkSpan<const SkGlyphID> glyphIDs, SkPoint origin, SkPoint* buffer) {
+ SkStrikeSpec strikeSpec = SkStrikeSpec::MakeWithNoDevice(font);
+ SkBulkGlyphMetrics storage{strikeSpec};
+ auto glyphs = storage.glyphs(glyphIDs);
+
+ SkPoint* positionCursor = buffer;
+ SkPoint endOfLastGlyph = origin;
+ for (auto glyph : glyphs) {
+ *positionCursor++ = endOfLastGlyph;
+ endOfLastGlyph += glyph->advanceVector();
+ }
+ return SkSpan(buffer, glyphIDs.size());
+}
+
+const GlyphRunList& GlyphRunBuilder::textToGlyphRunList(
+ const SkFont& font, const SkPaint& paint,
+ const void* bytes, size_t byteLength, SkPoint origin,
+ SkTextEncoding encoding) {
+ auto glyphIDs = textToGlyphIDs(font, bytes, byteLength, encoding);
+ SkRect bounds = SkRect::MakeEmpty();
+ this->prepareBuffers(glyphIDs.size(), 0);
+ if (!glyphIDs.empty()) {
+ SkSpan<const SkPoint> positions = draw_text_positions(font, glyphIDs, {0, 0}, fPositions);
+ this->makeGlyphRun(font,
+ glyphIDs,
+ positions,
+ SkSpan<const char>{},
+ SkSpan<const uint32_t>{},
+ SkSpan<const SkVector>{});
+ auto run = fGlyphRunListStorage.front();
+ bounds = glyphrun_source_bounds(run.font(), paint, run.source(), run.scaledRotations());
+ }
+
+ return this->setGlyphRunList(nullptr, bounds, origin);
+}
+
+const GlyphRunList& sktext::GlyphRunBuilder::blobToGlyphRunList(
+ const SkTextBlob& blob, SkPoint origin) {
+ // Pre-size all the buffers, so they don't move during processing.
+ this->initialize(blob);
+
+ SkPoint* positionCursor = fPositions;
+ SkVector* scaledRotationsCursor = fScaledRotations;
+ for (SkTextBlobRunIterator it(&blob); !it.done(); it.next()) {
+ size_t runSize = it.glyphCount();
+ if (runSize == 0 || !SkFontPriv::IsFinite(it.font())) {
+ // If no glyphs or the font is not finite, don't add the run.
+ continue;
+ }
+
+ const SkFont& font = it.font();
+ auto glyphIDs = SkSpan<const SkGlyphID>{it.glyphs(), runSize};
+
+ SkSpan<const SkPoint> positions;
+ SkSpan<const SkVector> scaledRotations;
+ switch (it.positioning()) {
+ case SkTextBlobRunIterator::kDefault_Positioning: {
+ positions = draw_text_positions(font, glyphIDs, it.offset(), positionCursor);
+ positionCursor += positions.size();
+ break;
+ }
+ case SkTextBlobRunIterator::kHorizontal_Positioning: {
+ positions = SkSpan(positionCursor, runSize);
+ for (auto x : SkSpan<const SkScalar>{it.pos(), glyphIDs.size()}) {
+ *positionCursor++ = SkPoint::Make(x, it.offset().y());
+ }
+ break;
+ }
+ case SkTextBlobRunIterator::kFull_Positioning: {
+ positions = SkSpan(it.points(), runSize);
+ break;
+ }
+ case SkTextBlobRunIterator::kRSXform_Positioning: {
+ positions = SkSpan(positionCursor, runSize);
+ scaledRotations = SkSpan(scaledRotationsCursor, runSize);
+ for (const SkRSXform& xform : SkSpan(it.xforms(), runSize)) {
+ *positionCursor++ = {xform.fTx, xform.fTy};
+ *scaledRotationsCursor++ = {xform.fSCos, xform.fSSin};
+ }
+ break;
+ }
+ }
+
+ const uint32_t* clusters = it.clusters();
+ this->makeGlyphRun(
+ font,
+ glyphIDs,
+ positions,
+ SkSpan<const char>(it.text(), it.textSize()),
+ SkSpan<const uint32_t>(clusters, clusters ? runSize : 0),
+ scaledRotations);
+ }
+
+ return this->setGlyphRunList(&blob, blob.bounds(), origin);
+}
+
+std::tuple<SkSpan<const SkPoint>, SkSpan<const SkVector>>
+GlyphRunBuilder::convertRSXForm(SkSpan<const SkRSXform> xforms) {
+ const int count = SkCount(xforms);
+ this->prepareBuffers(count, count);
+ auto positions = SkSpan(fPositions.get(), count);
+ auto scaledRotations = SkSpan(fScaledRotations.get(), count);
+ for (auto [pos, sr, xform] : SkMakeZip(positions, scaledRotations, xforms)) {
+ auto [scos, ssin, tx, ty] = xform;
+ pos = {tx, ty};
+ sr = {scos, ssin};
+ }
+ return {positions, scaledRotations};
+}
+
+void GlyphRunBuilder::initialize(const SkTextBlob& blob) {
+ int positionCount = 0;
+ int rsxFormCount = 0;
+ for (SkTextBlobRunIterator it(&blob); !it.done(); it.next()) {
+ if (it.positioning() != SkTextBlobRunIterator::kFull_Positioning) {
+ positionCount += it.glyphCount();
+ }
+ if (it.positioning() == SkTextBlobRunIterator::kRSXform_Positioning) {
+ rsxFormCount += it.glyphCount();
+ }
+ }
+
+ prepareBuffers(positionCount, rsxFormCount);
+}
+
+void GlyphRunBuilder::prepareBuffers(int positionCount, int RSXFormCount) {
+ if (positionCount > fMaxTotalRunSize) {
+ fMaxTotalRunSize = positionCount;
+ fPositions.reset(fMaxTotalRunSize);
+ }
+
+ if (RSXFormCount > fMaxScaledRotations) {
+ fMaxScaledRotations = RSXFormCount;
+ fScaledRotations.reset(RSXFormCount);
+ }
+
+ fGlyphRunListStorage.clear();
+}
+
+SkSpan<const SkGlyphID> GlyphRunBuilder::textToGlyphIDs(
+ const SkFont& font, const void* bytes, size_t byteLength, SkTextEncoding encoding) {
+ if (encoding != SkTextEncoding::kGlyphID) {
+ int count = font.countText(bytes, byteLength, encoding);
+ if (count > 0) {
+ fScratchGlyphIDs.resize(count);
+ font.textToGlyphs(bytes, byteLength, encoding, fScratchGlyphIDs.data(), count);
+ return SkSpan(fScratchGlyphIDs);
+ } else {
+ return SkSpan<const SkGlyphID>();
+ }
+ } else {
+ return SkSpan<const SkGlyphID>((const SkGlyphID*)bytes, byteLength / 2);
+ }
+}
+
+void GlyphRunBuilder::makeGlyphRun(
+ const SkFont& font,
+ SkSpan<const SkGlyphID> glyphIDs,
+ SkSpan<const SkPoint> positions,
+ SkSpan<const char> text,
+ SkSpan<const uint32_t> clusters,
+ SkSpan<const SkVector> scaledRotations) {
+
+ // Ignore empty runs.
+ if (!glyphIDs.empty()) {
+ fGlyphRunListStorage.emplace_back(
+ font,
+ positions,
+ glyphIDs,
+ text,
+ clusters,
+ scaledRotations);
+ }
+}
+
+const GlyphRunList& sktext::GlyphRunBuilder::setGlyphRunList(
+ const SkTextBlob* blob, const SkRect& bounds, SkPoint origin) {
+ fGlyphRunList.emplace(blob, bounds, origin, SkSpan(fGlyphRunListStorage), this);
+ return fGlyphRunList.value();
+}
+} // namespace sktext
diff --git a/gfx/skia/skia/src/text/GlyphRun.h b/gfx/skia/skia/src/text/GlyphRun.h
new file mode 100644
index 0000000000..cc8980f824
--- /dev/null
+++ b/gfx/skia/skia/src/text/GlyphRun.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkGlyphRun_DEFINED
+#define SkGlyphRun_DEFINED
+
+#include <functional>
+#include <optional>
+#include <vector>
+
+#include "include/core/SkFont.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRSXform.h"
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/base/SkZip.h"
+
+class SkBaseDevice;
+class SkCanvas;
+class SkGlyph;
+class SkTextBlob;
+class SkTextBlobRunIterator;
+
+namespace sktext {
+class GlyphRunBuilder;
+class GlyphRunList;
+
+class GlyphRun {
+public:
+ GlyphRun(const SkFont& font,
+ SkSpan<const SkPoint> positions,
+ SkSpan<const SkGlyphID> glyphIDs,
+ SkSpan<const char> text,
+ SkSpan<const uint32_t> clusters,
+ SkSpan<const SkVector> scaledRotations);
+
+ GlyphRun(const GlyphRun& glyphRun, const SkFont& font);
+
+ size_t runSize() const { return fSource.size(); }
+ SkSpan<const SkPoint> positions() const { return fSource.get<1>(); }
+ SkSpan<const SkGlyphID> glyphsIDs() const { return fSource.get<0>(); }
+ SkZip<const SkGlyphID, const SkPoint> source() const { return fSource; }
+ const SkFont& font() const { return fFont; }
+ SkSpan<const uint32_t> clusters() const { return fClusters; }
+ SkSpan<const char> text() const { return fText; }
+ SkSpan<const SkVector> scaledRotations() const { return fScaledRotations; }
+
+private:
+ // GlyphIDs and positions.
+ const SkZip<const SkGlyphID, const SkPoint> fSource;
+ // Original text from SkTextBlob if present. Will be empty of not present.
+ const SkSpan<const char> fText;
+ // Original clusters from SkTextBlob if present. Will be empty if not present.
+ const SkSpan<const uint32_t> fClusters;
+ // Possible RSXForm information
+ const SkSpan<const SkVector> fScaledRotations;
+ // Font for this run modified to have glyph encoding and left alignment.
+ SkFont fFont;
+};
+
+class GlyphRunList {
+ SkSpan<const GlyphRun> fGlyphRuns;
+
+public:
+ // Blob maybe null.
+ GlyphRunList(const SkTextBlob* blob,
+ SkRect bounds,
+ SkPoint origin,
+ SkSpan<const GlyphRun> glyphRunList,
+ GlyphRunBuilder* builder);
+
+ GlyphRunList(const GlyphRun& glyphRun,
+ const SkRect& bounds,
+ SkPoint origin,
+ GlyphRunBuilder* builder);
+ uint64_t uniqueID() const;
+ bool anyRunsLCD() const;
+ void temporaryShuntBlobNotifyAddedToCache(uint32_t cacheID) const;
+
+ bool canCache() const { return fOriginalTextBlob != nullptr; }
+ size_t runCount() const { return fGlyphRuns.size(); }
+ size_t totalGlyphCount() const {
+ size_t glyphCount = 0;
+ for (const GlyphRun& run : *this) {
+ glyphCount += run.runSize();
+ }
+ return glyphCount;
+ }
+ size_t maxGlyphRunSize() const {
+ size_t size = 0;
+ for (const GlyphRun& run : *this) {
+ size = std::max(run.runSize(), size);
+ }
+ return size;
+ }
+
+ bool hasRSXForm() const {
+ for (const GlyphRun& run : *this) {
+ if (!run.scaledRotations().empty()) { return true; }
+ }
+ return false;
+ }
+
+ sk_sp<SkTextBlob> makeBlob() const;
+
+ SkPoint origin() const { return fOrigin; }
+ SkRect sourceBounds() const { return fSourceBounds; }
+ SkRect sourceBoundsWithOrigin() const { return fSourceBounds.makeOffset(fOrigin); }
+ const SkTextBlob* blob() const { return fOriginalTextBlob; }
+ GlyphRunBuilder* builder() const { return fBuilder; }
+
+ auto begin() -> decltype(fGlyphRuns.begin()) { return fGlyphRuns.begin(); }
+ auto end() -> decltype(fGlyphRuns.end()) { return fGlyphRuns.end(); }
+ auto begin() const -> decltype(std::cbegin(fGlyphRuns)) { return std::cbegin(fGlyphRuns); }
+ auto end() const -> decltype(std::cend(fGlyphRuns)) { return std::cend(fGlyphRuns); }
+ auto size() const -> decltype(fGlyphRuns.size()) { return fGlyphRuns.size(); }
+ auto empty() const -> decltype(fGlyphRuns.empty()) { return fGlyphRuns.empty(); }
+ auto operator [] (size_t i) const -> decltype(fGlyphRuns[i]) { return fGlyphRuns[i]; }
+
+private:
+ // The text blob is needed to hook up the call back that the SkTextBlob destructor calls. It
+ // should be used for nothing else.
+ const SkTextBlob* fOriginalTextBlob{nullptr};
+ const SkRect fSourceBounds{SkRect::MakeEmpty()};
+ const SkPoint fOrigin = {0, 0};
+ GlyphRunBuilder* const fBuilder;
+};
+
+class GlyphRunBuilder {
+public:
+ GlyphRunList makeGlyphRunList(const GlyphRun& run, const SkPaint& paint, SkPoint origin);
+ const GlyphRunList& textToGlyphRunList(const SkFont& font,
+ const SkPaint& paint,
+ const void* bytes,
+ size_t byteLength,
+ SkPoint origin,
+ SkTextEncoding encoding = SkTextEncoding::kUTF8);
+ const GlyphRunList& blobToGlyphRunList(const SkTextBlob& blob, SkPoint origin);
+ std::tuple<SkSpan<const SkPoint>, SkSpan<const SkVector>>
+ convertRSXForm(SkSpan<const SkRSXform> xforms);
+
+ bool empty() const { return fGlyphRunListStorage.empty(); }
+
+private:
+ void initialize(const SkTextBlob& blob);
+ void prepareBuffers(int positionCount, int RSXFormCount);
+
+ SkSpan<const SkGlyphID> textToGlyphIDs(
+ const SkFont& font, const void* bytes, size_t byteLength, SkTextEncoding);
+
+ void makeGlyphRun(
+ const SkFont& font,
+ SkSpan<const SkGlyphID> glyphIDs,
+ SkSpan<const SkPoint> positions,
+ SkSpan<const char> text,
+ SkSpan<const uint32_t> clusters,
+ SkSpan<const SkVector> scaledRotations);
+
+ const GlyphRunList& setGlyphRunList(
+ const SkTextBlob* blob, const SkRect& bounds, SkPoint origin);
+
+ int fMaxTotalRunSize{0};
+ skia_private::AutoTMalloc<SkPoint> fPositions;
+ int fMaxScaledRotations{0};
+ skia_private::AutoTMalloc<SkVector> fScaledRotations;
+
+ std::vector<GlyphRun> fGlyphRunListStorage;
+ std::optional<GlyphRunList> fGlyphRunList; // Defaults to no value;
+
+ // Used as a temporary for preparing using utfN text. This implies that only one run of
+ // glyph ids will ever be needed because blobs are already glyph based.
+ std::vector<SkGlyphID> fScratchGlyphIDs;
+
+};
+} // namespace sktext
+
+#endif // SkGlyphRun_DEFINED
diff --git a/gfx/skia/skia/src/text/StrikeForGPU.cpp b/gfx/skia/skia/src/text/StrikeForGPU.cpp
new file mode 100644
index 0000000000..c2f8b03ccc
--- /dev/null
+++ b/gfx/skia/skia/src/text/StrikeForGPU.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/text/StrikeForGPU.h"
+
+#include <memory>
+#include <utility>
+
+#include "include/private/chromium/SkChromeRemoteGlyphCache.h"
+#include "src/core/SkDescriptor.h"
+#include "src/core/SkReadBuffer.h"
+#include "src/core/SkStrike.h"
+#include "src/core/SkStrikeCache.h"
+#include "src/core/SkWriteBuffer.h"
+
+namespace sktext {
+// -- SkStrikePromise ------------------------------------------------------------------------------
+SkStrikePromise::SkStrikePromise(sktext::SkStrikePromise&&) = default;
+SkStrikePromise& SkStrikePromise::operator=(sktext::SkStrikePromise&&) = default;
+SkStrikePromise::SkStrikePromise(sk_sp<SkStrike>&& strike)
+ : fStrikeOrSpec{std::move(strike)} {}
+SkStrikePromise::SkStrikePromise(const SkStrikeSpec& spec)
+ : fStrikeOrSpec{std::make_unique<SkStrikeSpec>(spec)} {}
+
+SkStrike* SkStrikePromise::strike() {
+ if (std::holds_alternative<std::unique_ptr<SkStrikeSpec>>(fStrikeOrSpec)) {
+ // Turn the strike spec into a strike.
+ std::unique_ptr<SkStrikeSpec> spec =
+ std::exchange(std::get<std::unique_ptr<SkStrikeSpec>>(fStrikeOrSpec), nullptr);
+
+ fStrikeOrSpec = SkStrikeCache::GlobalStrikeCache()->findOrCreateStrike(*spec);
+ }
+ return std::get<sk_sp<SkStrike>>(fStrikeOrSpec).get();
+}
+
+void SkStrikePromise::resetStrike() {
+ fStrikeOrSpec = sk_sp<SkStrike>();
+}
+
+const SkDescriptor& SkStrikePromise::descriptor() const {
+ if (std::holds_alternative<std::unique_ptr<SkStrikeSpec>>(fStrikeOrSpec)) {
+ return std::get<std::unique_ptr<SkStrikeSpec>>(fStrikeOrSpec)->descriptor();
+ }
+
+ return std::get<sk_sp<SkStrike>>(fStrikeOrSpec)->getDescriptor();
+}
+
+void SkStrikePromise::flatten(SkWriteBuffer& buffer) const {
+ this->descriptor().flatten(buffer);
+}
+
+std::optional<SkStrikePromise> SkStrikePromise::MakeFromBuffer(
+ SkReadBuffer& buffer, const SkStrikeClient* client, SkStrikeCache* strikeCache) {
+ std::optional<SkAutoDescriptor> descriptor = SkAutoDescriptor::MakeFromBuffer(buffer);
+ if (!buffer.validate(descriptor.has_value())) {
+ return std::nullopt;
+ }
+
+ // If there is a client, then this from a different process. Translate the SkTypefaceID from
+ // the strike server (Renderer) process to strike client (GPU) process.
+ if (client != nullptr) {
+ if (!client->translateTypefaceID(&descriptor.value())) {
+ return std::nullopt;
+ }
+ }
+
+ sk_sp<SkStrike> strike = strikeCache->findStrike(*descriptor->getDesc());
+ SkASSERT(strike != nullptr);
+ if (!buffer.validate(strike != nullptr)) {
+ return std::nullopt;
+ }
+
+ return SkStrikePromise{std::move(strike)};
+}
+
+// -- StrikeMutationMonitor ------------------------------------------------------------------------
+StrikeMutationMonitor::StrikeMutationMonitor(StrikeForGPU* strike)
+ : fStrike{strike} {
+ fStrike->lock();
+}
+
+StrikeMutationMonitor::~StrikeMutationMonitor() {
+ fStrike->unlock();
+}
+} // namespace sktext
diff --git a/gfx/skia/skia/src/text/StrikeForGPU.h b/gfx/skia/skia/src/text/StrikeForGPU.h
new file mode 100644
index 0000000000..378d74c43b
--- /dev/null
+++ b/gfx/skia/skia/src/text/StrikeForGPU.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef sktext_StrikeForGPU_DEFINED
+#define sktext_StrikeForGPU_DEFINED
+
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkSpan.h"
+#include "include/core/SkTypes.h"
+#include "src/core/SkGlyph.h"
+
+#include <memory>
+#include <optional>
+#include <variant>
+
+class SkDescriptor;
+class SkDrawableGlyphBuffer;
+class SkReadBuffer;
+class SkSourceGlyphBuffer;
+class SkStrike;
+class SkStrikeCache;
+class SkStrikeClient;
+class SkStrikeSpec;
+class SkWriteBuffer;
+struct SkGlyphPositionRoundingSpec;
+struct SkScalerContextEffects;
+
+namespace sktext {
+// -- SkStrikePromise ------------------------------------------------------------------------------
+// SkStrikePromise produces an SkStrike when needed by GPU glyph rendering. In ordinary
+// operation, it just wraps an SkStrike. When used for remote glyph cache operation, the promise is
+// serialized to an SkDescriptor. When SkStrikePromise is deserialized, it uses the descriptor to
+// look up the SkStrike.
+//
+// When deserializing some care must be taken; if the needed SkStrike is removed from the cache,
+// then looking up using the descriptor will fail resulting in a deserialization failure. The
+// Renderer/GPU system solves this problem by pinning all the strikes needed into the cache.
+class SkStrikePromise {
+public:
+ SkStrikePromise() = delete;
+ SkStrikePromise(const SkStrikePromise&) = delete;
+ SkStrikePromise& operator=(const SkStrikePromise&) = delete;
+ SkStrikePromise(SkStrikePromise&&);
+ SkStrikePromise& operator=(SkStrikePromise&&);
+
+ explicit SkStrikePromise(sk_sp<SkStrike>&& strike);
+ explicit SkStrikePromise(const SkStrikeSpec& spec);
+
+ static std::optional<SkStrikePromise> MakeFromBuffer(SkReadBuffer& buffer,
+ const SkStrikeClient* client,
+ SkStrikeCache* strikeCache);
+ void flatten(SkWriteBuffer& buffer) const;
+
+ // Do what is needed to return a strike.
+ SkStrike* strike();
+
+ // Reset the sk_sp<SkStrike> to nullptr.
+ void resetStrike();
+
+ // Return a descriptor used to look up the SkStrike.
+ const SkDescriptor& descriptor() const;
+
+private:
+ std::variant<sk_sp<SkStrike>, std::unique_ptr<SkStrikeSpec>> fStrikeOrSpec;
+};
+
+// -- StrikeForGPU ---------------------------------------------------------------------------------
+class StrikeForGPU : public SkRefCnt {
+public:
+ virtual void lock() = 0;
+ virtual void unlock() = 0;
+
+ // Generate a digest for a given packed glyph ID as drawn using the give action type.
+ virtual SkGlyphDigest digestFor(skglyph::ActionType, SkPackedGlyphID) = 0;
+
+ // Prepare the glyph to draw an image, and return if the image exists.
+ virtual bool prepareForImage(SkGlyph*) = 0;
+
+ // Prepare the glyph to draw a path, and return if the path exists.
+ virtual bool prepareForPath(SkGlyph*) = 0;
+
+ // Prepare the glyph to draw a drawable, and return if the drawable exists.
+ virtual bool prepareForDrawable(SkGlyph*) = 0;
+
+
+ virtual const SkDescriptor& getDescriptor() const = 0;
+
+ virtual const SkGlyphPositionRoundingSpec& roundingSpec() const = 0;
+
+ // Return a strike promise.
+ virtual SkStrikePromise strikePromise() = 0;
+};
+
+// prepareForPathDrawing uses this union to convert glyph ids to paths.
+union IDOrPath {
+ IDOrPath() {}
+ IDOrPath(SkGlyphID glyphID) : fGlyphID{glyphID} {}
+
+ // PathOpSubmitter takes care of destroying the paths.
+ ~IDOrPath() {}
+ SkGlyphID fGlyphID;
+ SkPath fPath;
+};
+
+// prepareForDrawableDrawing uses this union to convert glyph ids to drawables.
+union IDOrDrawable {
+ SkGlyphID fGlyphID;
+ SkDrawable* fDrawable;
+};
+
+// -- StrikeMutationMonitor ------------------------------------------------------------------------
+class StrikeMutationMonitor {
+public:
+ StrikeMutationMonitor(StrikeForGPU* strike);
+ ~StrikeMutationMonitor();
+
+private:
+ StrikeForGPU* fStrike;
+};
+
+// -- StrikeForGPUCacheInterface -------------------------------------------------------------------
+class StrikeForGPUCacheInterface {
+public:
+ virtual ~StrikeForGPUCacheInterface() = default;
+ virtual sk_sp<StrikeForGPU> findOrCreateScopedStrike(const SkStrikeSpec& strikeSpec) = 0;
+};
+} // namespace sktext
+#endif // sktext_StrikeForGPU_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkAnimCodecPlayer.cpp b/gfx/skia/skia/src/utils/SkAnimCodecPlayer.cpp
new file mode 100644
index 0000000000..f296a4081d
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkAnimCodecPlayer.cpp
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/utils/SkAnimCodecPlayer.h"
+
+#include "include/codec/SkCodec.h"
+#include "include/codec/SkEncodedOrigin.h"
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkData.h"
+#include "include/core/SkImage.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSamplingOptions.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkTypes.h"
+#include "src/codec/SkCodecImageGenerator.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <memory>
+#include <utility>
+#include <vector>
+
+SkAnimCodecPlayer::SkAnimCodecPlayer(std::unique_ptr<SkCodec> codec) : fCodec(std::move(codec)) {
+ fImageInfo = fCodec->getInfo();
+ fFrameInfos = fCodec->getFrameInfo();
+ fImages.resize(fFrameInfos.size());
+
+ // change the interpretation of fDuration to a end-time for that frame
+ size_t dur = 0;
+ for (auto& f : fFrameInfos) {
+ dur += f.fDuration;
+ f.fDuration = dur;
+ }
+ fTotalDuration = dur;
+
+ if (!fTotalDuration) {
+ // Static image -- may or may not have returned a single frame info.
+ fFrameInfos.clear();
+ fImages.clear();
+ fImages.push_back(SkImage::MakeFromGenerator(
+ SkCodecImageGenerator::MakeFromCodec(std::move(fCodec))));
+ }
+}
+
+SkAnimCodecPlayer::~SkAnimCodecPlayer() {}
+
+SkISize SkAnimCodecPlayer::dimensions() const {
+ if (!fCodec) {
+ auto image = fImages.front();
+ return image ? image->dimensions() : SkISize::MakeEmpty();
+ }
+ if (SkEncodedOriginSwapsWidthHeight(fCodec->getOrigin())) {
+ return { fImageInfo.height(), fImageInfo.width() };
+ }
+ return { fImageInfo.width(), fImageInfo.height() };
+}
+
+sk_sp<SkImage> SkAnimCodecPlayer::getFrameAt(int index) {
+ SkASSERT((unsigned)index < fFrameInfos.size());
+
+ if (fImages[index]) {
+ return fImages[index];
+ }
+
+ size_t rb = fImageInfo.minRowBytes();
+ size_t size = fImageInfo.computeByteSize(rb);
+ auto data = SkData::MakeUninitialized(size);
+
+ SkCodec::Options opts;
+ opts.fFrameIndex = index;
+
+ const auto origin = fCodec->getOrigin();
+ const auto orientedDims = this->dimensions();
+ const auto originMatrix = SkEncodedOriginToMatrix(origin, orientedDims.width(),
+ orientedDims.height());
+
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kSrc);
+
+ auto imageInfo = fImageInfo;
+ if (fFrameInfos[index].fAlphaType != kOpaque_SkAlphaType && imageInfo.isOpaque()) {
+ imageInfo = imageInfo.makeAlphaType(kPremul_SkAlphaType);
+ }
+ const int requiredFrame = fFrameInfos[index].fRequiredFrame;
+ if (requiredFrame != SkCodec::kNoFrame && fImages[requiredFrame]) {
+ auto requiredImage = fImages[requiredFrame];
+ auto canvas = SkCanvas::MakeRasterDirect(imageInfo, data->writable_data(), rb);
+ if (origin != kDefault_SkEncodedOrigin) {
+ // The required frame is stored after applying the origin. Undo that,
+ // because the codec decodes prior to applying the origin.
+ // FIXME: Another approach would be to decode the frame's delta on top
+ // of transparent black, and then draw that through the origin matrix
+ // onto the required frame. To do that, SkCodec needs to expose the
+ // rectangle of the delta and the blend mode, so we can handle
+ // kRestoreBGColor frames and Blend::kSrc.
+ SkMatrix inverse;
+ SkAssertResult(originMatrix.invert(&inverse));
+ canvas->concat(inverse);
+ }
+ canvas->drawImage(requiredImage, 0, 0, SkSamplingOptions(), &paint);
+ opts.fPriorFrame = requiredFrame;
+ }
+
+ if (SkCodec::kSuccess != fCodec->getPixels(imageInfo, data->writable_data(), rb, &opts)) {
+ return nullptr;
+ }
+
+ auto image = SkImage::MakeRasterData(imageInfo, std::move(data), rb);
+ if (origin != kDefault_SkEncodedOrigin) {
+ imageInfo = imageInfo.makeDimensions(orientedDims);
+ rb = imageInfo.minRowBytes();
+ size = imageInfo.computeByteSize(rb);
+ data = SkData::MakeUninitialized(size);
+ auto canvas = SkCanvas::MakeRasterDirect(imageInfo, data->writable_data(), rb);
+ canvas->concat(originMatrix);
+ canvas->drawImage(image, 0, 0, SkSamplingOptions(), &paint);
+ image = SkImage::MakeRasterData(imageInfo, std::move(data), rb);
+ }
+ return fImages[index] = image;
+}
+
+sk_sp<SkImage> SkAnimCodecPlayer::getFrame() {
+ SkASSERT(fTotalDuration > 0 || fImages.size() == 1);
+
+ return fTotalDuration > 0
+ ? this->getFrameAt(fCurrIndex)
+ : fImages.front();
+}
+
+bool SkAnimCodecPlayer::seek(uint32_t msec) {
+ if (!fTotalDuration) {
+ return false;
+ }
+
+ msec %= fTotalDuration;
+
+ auto lower = std::lower_bound(fFrameInfos.begin(), fFrameInfos.end(), msec,
+ [](const SkCodec::FrameInfo& info, uint32_t msec) {
+ return (uint32_t)info.fDuration <= msec;
+ });
+ int prevIndex = fCurrIndex;
+ fCurrIndex = lower - fFrameInfos.begin();
+ return fCurrIndex != prevIndex;
+}
+
+
diff --git a/gfx/skia/skia/src/utils/SkBase64.cpp b/gfx/skia/skia/src/utils/SkBase64.cpp
new file mode 100644
index 0000000000..4ebb9d7ae2
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkBase64.cpp
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "include/utils/SkBase64.h"
+
+#include <cstdint>
+
+#define DecodePad -2
+#define EncodePad 64
+
+static const char default_encode[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz"
+ "0123456789+/=";
+
+static const signed char decodeData[] = {
+ 62, -1, -1, -1, 63,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, DecodePad, -1, -1,
+ -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, -1,
+ -1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51
+};
+
+#if defined _WIN32 // disable 'two', etc. may be used without having been initialized
+#pragma warning ( push )
+#pragma warning ( disable : 4701 )
+#endif
+
+SkBase64::Error SkBase64::Decode(const void* srcv, size_t srcLength, void* dstv, size_t* dstLength){
+ const unsigned char* src = static_cast<const unsigned char*>(srcv);
+ unsigned char* dst = static_cast<unsigned char*>(dstv);
+
+ int i = 0;
+ bool padTwo = false;
+ bool padThree = false;
+ char unsigned const * const end = src + srcLength;
+ while (src < end) {
+ unsigned char bytes[4];
+ int byte = 0;
+ do {
+ unsigned char srcByte = *src++;
+ if (srcByte == 0)
+ goto goHome;
+ if (srcByte <= ' ')
+ continue; // treat as white space
+ if (srcByte < '+' || srcByte > 'z')
+ return kBadCharError;
+ signed char decoded = decodeData[srcByte - '+'];
+ bytes[byte] = decoded;
+ if (decoded < 0) {
+ if (decoded == DecodePad)
+ goto handlePad;
+ return kBadCharError;
+ } else
+ byte++;
+ if (*src)
+ continue;
+ if (byte == 0)
+ goto goHome;
+ if (byte == 4)
+ break;
+handlePad:
+ if (byte < 2)
+ return kPadError;
+ padThree = true;
+ if (byte == 2)
+ padTwo = true;
+ break;
+ } while (byte < 4);
+ int two = 0;
+ int three = 0;
+ if (dst) {
+ int one = (uint8_t) (bytes[0] << 2);
+ two = bytes[1];
+ one |= two >> 4;
+ two = (uint8_t) ((two << 4) & 0xFF);
+ three = bytes[2];
+ two |= three >> 2;
+ three = (uint8_t) ((three << 6) & 0xFF);
+ three |= bytes[3];
+ SkASSERT(one < 256 && two < 256 && three < 256);
+ dst[i] = (unsigned char) one;
+ }
+ i++;
+ if (padTwo)
+ break;
+ if (dst)
+ dst[i] = (unsigned char) two;
+ i++;
+ if (padThree)
+ break;
+ if (dst)
+ dst[i] = (unsigned char) three;
+ i++;
+ }
+goHome:
+ *dstLength = i;
+ return kNoError;
+}
+
+#if defined _WIN32
+#pragma warning ( pop )
+#endif
+
+size_t SkBase64::Encode(const void* srcv, size_t length, void* dstv, const char* encodeMap) {
+ const unsigned char* src = static_cast<const unsigned char*>(srcv);
+ unsigned char* dst = static_cast<unsigned char*>(dstv);
+
+ const char* encode;
+ if (nullptr == encodeMap) {
+ encode = default_encode;
+ } else {
+ encode = encodeMap;
+ }
+ if (dst) {
+ size_t remainder = length % 3;
+ char unsigned const * const end = &src[length - remainder];
+ while (src < end) {
+ unsigned a = *src++;
+ unsigned b = *src++;
+ unsigned c = *src++;
+ int d = c & 0x3F;
+ c = (c >> 6 | b << 2) & 0x3F;
+ b = (b >> 4 | a << 4) & 0x3F;
+ a = a >> 2;
+ *dst++ = encode[a];
+ *dst++ = encode[b];
+ *dst++ = encode[c];
+ *dst++ = encode[d];
+ }
+ if (remainder > 0) {
+ int k1 = 0;
+ int k2 = EncodePad;
+ int a = (uint8_t) *src++;
+ if (remainder == 2)
+ {
+ int b = *src++;
+ k1 = b >> 4;
+ k2 = (b << 2) & 0x3F;
+ }
+ *dst++ = encode[a >> 2];
+ *dst++ = encode[(k1 | a << 4) & 0x3F];
+ *dst++ = encode[k2];
+ *dst++ = encode[EncodePad];
+ }
+ }
+ return (length + 2) / 3 * 4;
+}
diff --git a/gfx/skia/skia/src/utils/SkBitSet.h b/gfx/skia/skia/src/utils/SkBitSet.h
new file mode 100644
index 0000000000..4a333bc539
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkBitSet.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBitSet_DEFINED
+#define SkBitSet_DEFINED
+
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/base/SkMathPriv.h"
+
+#include <climits>
+#include <cstring>
+#include <limits>
+#include <memory>
+#include <optional>
+
+class SkBitSet {
+public:
+ explicit SkBitSet(size_t size)
+ : fSize(size)
+ , fChunks((Chunk*)sk_calloc_throw(NumChunksFor(fSize) * sizeof(Chunk))) {}
+
+ SkBitSet(const SkBitSet&) = delete;
+ SkBitSet& operator=(const SkBitSet&) = delete;
+ SkBitSet(SkBitSet&& that) { *this = std::move(that); }
+ SkBitSet& operator=(SkBitSet&& that) {
+ if (this != &that) {
+ this->fSize = that.fSize;
+ this->fChunks = std::move(that.fChunks);
+ that.fSize = 0;
+ }
+ return *this;
+ }
+ ~SkBitSet() = default;
+
+ /** Set the value of the index-th bit to true. */
+ void set(size_t index) {
+ SkASSERT(index < fSize);
+ *this->chunkFor(index) |= ChunkMaskFor(index);
+ }
+
+ /** Sets every bit in the bitset to true. */
+ void set() {
+ Chunk* chunks = fChunks.get();
+ const size_t numChunks = NumChunksFor(fSize);
+ std::memset(chunks, 0xFF, sizeof(Chunk) * numChunks);
+ }
+
+ /** Set the value of the index-th bit to false. */
+ void reset(size_t index) {
+ SkASSERT(index < fSize);
+ *this->chunkFor(index) &= ~ChunkMaskFor(index);
+ }
+
+ /** Sets every bit in the bitset to false. */
+ void reset() {
+ Chunk* chunks = fChunks.get();
+ const size_t numChunks = NumChunksFor(fSize);
+ std::memset(chunks, 0, sizeof(Chunk) * numChunks);
+ }
+
+ bool test(size_t index) const {
+ SkASSERT(index < fSize);
+ return SkToBool(*this->chunkFor(index) & ChunkMaskFor(index));
+ }
+
+ size_t size() const {
+ return fSize;
+ }
+
+ // Calls f(size_t index) for each set index.
+ template<typename FN>
+ void forEachSetIndex(FN f) const {
+ const Chunk* chunks = fChunks.get();
+ const size_t numChunks = NumChunksFor(fSize);
+ for (size_t i = 0; i < numChunks; ++i) {
+ if (Chunk chunk = chunks[i]) { // There are set bits
+ const size_t index = i * kChunkBits;
+ for (size_t j = 0; j < kChunkBits; ++j) {
+ if (0x1 & (chunk >> j)) {
+ f(index + j);
+ }
+ }
+ }
+ }
+ }
+
+ using OptionalIndex = std::optional<size_t>;
+
+ // If any bits are set, returns the index of the first.
+ OptionalIndex findFirst() {
+ const Chunk* chunks = fChunks.get();
+ const size_t numChunks = NumChunksFor(fSize);
+ for (size_t i = 0; i < numChunks; ++i) {
+ if (Chunk chunk = chunks[i]) { // There are set bits
+ static_assert(kChunkBits <= std::numeric_limits<uint32_t>::digits, "SkCTZ");
+ const size_t bitIndex = i * kChunkBits + SkCTZ(chunk);
+ return OptionalIndex(bitIndex);
+ }
+ }
+ return OptionalIndex();
+ }
+
+ // If any bits are not set, returns the index of the first.
+ OptionalIndex findFirstUnset() {
+ const Chunk* chunks = fChunks.get();
+ const size_t numChunks = NumChunksFor(fSize);
+ for (size_t i = 0; i < numChunks; ++i) {
+ if (Chunk chunk = ~chunks[i]) { // if there are unset bits ...
+ static_assert(kChunkBits <= std::numeric_limits<uint32_t>::digits, "SkCTZ");
+ const size_t bitIndex = i * kChunkBits + SkCTZ(chunk);
+ if (bitIndex >= fSize) {
+ break;
+ }
+ return OptionalIndex(bitIndex);
+ }
+ }
+ return OptionalIndex();
+ }
+
+private:
+ size_t fSize;
+
+ using Chunk = uint32_t;
+ static_assert(std::numeric_limits<Chunk>::radix == 2);
+ inline static constexpr size_t kChunkBits = std::numeric_limits<Chunk>::digits;
+ static_assert(kChunkBits == sizeof(Chunk)*CHAR_BIT, "SkBitSet must use every bit in a Chunk");
+ std::unique_ptr<Chunk, SkOverloadedFunctionObject<void(void*), sk_free>> fChunks;
+
+ Chunk* chunkFor(size_t index) const {
+ return fChunks.get() + (index / kChunkBits);
+ }
+
+ static constexpr Chunk ChunkMaskFor(size_t index) {
+ return (Chunk)1 << (index & (kChunkBits-1));
+ }
+
+ static constexpr size_t NumChunksFor(size_t size) {
+ return (size + (kChunkBits-1)) / kChunkBits;
+ }
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkBlitterTrace.h b/gfx/skia/skia/src/utils/SkBlitterTrace.h
new file mode 100644
index 0000000000..e350c0f8c0
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkBlitterTrace.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2022 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * This is an experimental (and probably temporary) solution that allows
+ * to compare performance SkVM blitters vs RasterPipeline blitters.
+ * In addition to measuring performance (which is questionable) it also produces
+ * other counts (pixels, scanlines) and more detailed traces that
+ * can explain the current results (SkVM is slower) and help improve it.
+ * The entire code is hidden under build flag skia_compare_vm_vs_rp=true
+ * and will not appear at all without it.
+ *
+ * In order to collect the tracing information SkVM Blitters should run with SKVM_BLITTER_TRACE_ON
+ * and RasterPipeline Blitters with RASTER_PIPELINE_BLITTER_TRACE_ON.
+ */
+
+#ifndef SkBlitterTrace_DEFINED
+#define SkBlitterTrace_DEFINED
+
+#include <inttypes.h>
+#include <unordered_map>
+#include "src/utils/SkBlitterTraceCommon.h"
+
+#ifdef SKIA_COMPARE_VM_VS_RP
+
+#if !defined(SK_BLITTER_TRACE_IS_SKVM) && !defined(SK_BLITTER_TRACE_IS_RASTER_PIPELINE)
+#error "One blitter trace type should be defined if we have flag skia_compare_vm_vs_rp flag = true."
+#endif
+
+#if defined(SK_BLITTER_TRACE_IS_SKVM) && defined(SK_BLITTER_TRACE_IS_RASTER_PIPELINE)
+#error "Only one blitter trace type should be defined."
+#endif
+
+#ifdef SK_BLITTER_TRACE_IS_SKVM
+SkBlitterTrace gSkVMBlitterTrace("VM", false);
+#define SK_BLITTER_TRACE_STEP(name, trace, scanlines, pixels) \
+ SkBlitterTrace::Step name(trace ? &gSkVMBlitterTrace : nullptr, \
+ #name, \
+ scanlines, \
+ pixels)
+#endif
+
+#ifdef SK_BLITTER_TRACE_IS_RASTER_PIPELINE
+SkBlitterTrace gSkRPBlitterTrace("RP", false);
+#define SK_BLITTER_TRACE_STEP(name, trace, scanlines, pixels) \
+ SkBlitterTrace::Step name(trace ? &gSkRPBlitterTrace : nullptr, \
+ #name, \
+ scanlines, \
+ pixels)
+#endif
+
+#define SK_BLITTER_TRACE_STEP_ACCUMULATE(step, pixels) \
+ step.add(/*scanlines=*/0, /*pixels=*/run)
+#else
+#define INITIATE_BLITTER_TRACE(type) SK_BLITTER_TRACE_NO_CODE
+#define SK_BLITTER_TRACE_STEP(name, trace, scanlines, pixels) SK_BLITTER_TRACE_NO_CODE
+#define SK_BLITTER_TRACE_STEP_ACCUMULATE(step, pixels) SK_BLITTER_TRACE_NO_CODE
+#endif
+
+#endif // SkBlitterTrace_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkBlitterTraceCommon.h b/gfx/skia/skia/src/utils/SkBlitterTraceCommon.h
new file mode 100644
index 0000000000..b4adb5ce77
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkBlitterTraceCommon.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2022 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * This is an experimental (and probably temporary) solution that allows
+ * to compare performance SkVM blitters vs RasterPipeline blitters.
+ * In addition to measuring performance (which is questionable) it also produces
+ * other counts (pixels, scanlines) and more detailed traces that
+ * can explain the current results (SkVM is slower) and help improve it.
+ * The entire code is hidden under build flag skia_compare_vm_vs_rp=true
+ * and will not appear at all without it.
+ */
+
+#ifndef SkBlitterTraceCommon_DEFINED
+#define SkBlitterTraceCommon_DEFINED
+
+#include <inttypes.h>
+#include <unordered_map>
+
+#define SK_BLITTER_TRACE_NO_CODE do {} while (0)
+
+#ifdef SKIA_COMPARE_VM_VS_RP
+
+#include "src/utils/SkCycles.h"
+
+class SkBlitterTrace {
+/*
+ * This class collects information for RasterPipeLine vs SkVM
+ * performance comparison.
+ * How to get the comparison table:
+ * 1. Add to your Release/args.gn an argument: skia_compare_vm_vs_rp=true
+ * Build nanobench.
+ * 2. Run nanobench for SkVM:
+ * []/Release/nanobench
+ * --csv --config 8888 --skvm --loops 100 --samples 1
+ * --match $(ls skps | grep --invert-match svg ) 2>&1 | tee VM.data
+ * 3. Run nanobench for RasterPipeLine:
+ * []/Release/nanobench
+ * --csv --config 8888 --forceRasterPipeline --loops 100
+ * --samples 1 --match $(ls skps | grep --invert-match svg )
+ * 2>&1 | tee RP.data
+ * 4. Extract the information side-by-side:
+ * awk 'BEGIN {OFS=","; fileNum = 0} ($2 ~ /MB/) && fileNum == 0
+ * {vmvmcycles[$3] = $6; vmvmscan[$3] = $8; vmvmpixels[$3] = $10;
+ * vmvminterp[$3] = $11; vmrpcycle[$3] = $14; vmrpscan[$3] = $16;
+ * vmrppixels[$3] = $18} ($2 ~ /MB/) && fileNum == 1 {print $3,
+ * vmvmcycles[$3], vmvmscan[$3], vmvmpixels[$3], vmvminterp[$3], $6, $8,
+ * $10, $11, $14, $16, $18} ENDFILE {fileNum += 1}'
+ * VM.data RP.data > compare.csv
+ * 5. Open the compare.csv table in Google Spreadsheets.
+ * You will get columns [A:P]. Add 4 more columns with formulas:
+ * Q: =B/M-1
+ * R: =N-C
+ * S: =O-D
+ * T: =2*(S<>0)+(R<>0)
+ * To be honest R, S, T columns are here for checking only (they all
+ * supposed to have zero values in them)
+ * Column Q shows the actual performance difference. Negative value means
+ * that wins SkVM, positive - RasterPipeLine.
+ */
+public:
+ SkBlitterTrace(const char* header, bool traceSteps = false)
+ : fHeader(header), fTraceSteps(traceSteps) { }
+
+ SkBlitterTrace& operator= (const SkBlitterTrace&) = default;
+
+ void addTrace(const char* name, uint64_t cycles, uint64_t scanLines, uint64_t pixels) {
+ fCycles += cycles;
+ fScanlines += scanLines;
+ fPixels += pixels;
+ if (fTraceSteps) {
+ printIncrements(name, cycles, scanLines, pixels);
+ }
+ }
+
+ void reset() {
+ fCycles = 0ul;
+ fScanlines = 0ul;
+ fPixels = 0ul;
+ }
+
+ void printIncrements(const char* name,
+ uint64_t cycles,
+ uint64_t scanLines,
+ uint64_t pixels) const {
+ SkDebugf("%s %s: cycles=%" PRIu64 "+%" PRIu64
+ " scanlines=%" PRIu64 "+%" PRIu64 " pixels=%" PRIu64,
+ fHeader, name,
+ fCycles - cycles, cycles,
+ fScanlines - scanLines, scanLines,
+ fPixels);
+ SkDebugf("\n");
+ }
+
+ void printCounts(const char* name) const {
+ SkDebugf("%s cycles: %" PRIu64 " "
+ " scanlines: %" PRIu64 " pixels: %" PRIu64,
+ fHeader,
+ fCycles,
+ fScanlines,
+ fPixels);
+ SkDebugf(" ");
+ }
+
+ uint64_t getCycles() const { return fCycles; }
+ uint64_t getScanlines() const { return fScanlines; }
+ uint64_t getPixels() const { return fPixels; }
+
+ class Step {
+ public:
+ Step(SkBlitterTrace* trace,
+ const char* name,
+ uint64_t scanlines,
+ uint64_t pixels)
+ : fTrace(trace)
+ , fName(name)
+ , fScanlines(scanlines)
+ , fPixels(pixels) {
+ fStartTime = SkCycles::Now();
+ }
+ void add(uint64_t scanlines, uint64_t pixels) {
+ fScanlines += scanlines;
+ fPixels += pixels;
+ }
+ ~Step() {
+ if (fTrace == nullptr) {
+ return;
+ }
+ auto endTime = SkCycles::Now() - fStartTime;
+ fTrace->addTrace(/*name=*/fName,
+ /*cycles=*/endTime,
+ /*scanlines=*/fScanlines,
+ /*pixels=*/fPixels);
+ }
+ private:
+ SkBlitterTrace* fTrace = nullptr;
+ const char* fName = "";
+ uint64_t fStartTime = 0ul;
+ uint64_t fScanlines = 0ul;
+ uint64_t fPixels = 0ul;
+ };
+
+private:
+ const char* fHeader = "";
+ bool fTraceSteps = false;
+ uint64_t fCycles = 0ul;
+ uint64_t fScanlines = 0ul;
+ uint64_t fPixels = 0ul;
+};
+
+#define SK_BLITTER_TRACE_INIT \
+extern SkBlitterTrace gSkVMBlitterTrace; \
+extern SkBlitterTrace gSkRPBlitterTrace; \
+ \
+static SkBlitterTrace gSkVMBlitterTraceCapture("VM", false); \
+static SkBlitterTrace gSkRPBlitterTraceCapture("RP", false);
+
+#define SK_BLITTER_TRACE_LOCAL_SETUP \
+ gSkVMBlitterTrace.reset(); \
+ gSkRPBlitterTrace.reset()
+
+#define SK_BLITTER_TRACE_LOCAL_TEARDOWN \
+ gSkVMBlitterTraceCapture = gSkVMBlitterTrace; \
+ gSkRPBlitterTraceCapture = gSkRPBlitterTrace
+
+#define SK_BLITTER_TRACE_PRINT \
+ gSkVMBlitterTraceCapture.printCounts("Total"); \
+ SkDebugf("0 "); \
+ gSkRPBlitterTraceCapture.printCounts("Total")
+
+#else
+#define SK_BLITTER_TRACE_INIT
+#define SK_BLITTER_TRACE_LOCAL_SETUP SK_BLITTER_TRACE_NO_CODE
+#define SK_BLITTER_TRACE_LOCAL_TEARDOWN SK_BLITTER_TRACE_NO_CODE
+#define SK_BLITTER_TRACE_PRINT SK_BLITTER_TRACE_NO_CODE
+#endif // SKIA_COMPARE_VM_VS_RP
+
+#endif // SkBlitterTraceCommon_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkCallableTraits.h b/gfx/skia/skia/src/utils/SkCallableTraits.h
new file mode 100644
index 0000000000..003db21280
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkCallableTraits.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCallableTraits_DEFINED
+#define SkCallableTraits_DEFINED
+
+#include <type_traits>
+#include <tuple>
+
+template <typename R, typename... Args> struct sk_base_callable_traits {
+ using return_type = R;
+ static constexpr std::size_t arity = sizeof...(Args);
+ template <std::size_t N> struct argument {
+ static_assert(N < arity, "");
+ using type = typename std::tuple_element<N, std::tuple<Args...>>::type;
+ };
+};
+
+#define SK_CALLABLE_TRAITS__COMMA ,
+
+#define SK_CALLABLE_TRAITS__VARARGS(quals, _) \
+SK_CALLABLE_TRAITS__INSTANCE(quals,) \
+SK_CALLABLE_TRAITS__INSTANCE(quals, SK_CALLABLE_TRAITS__COMMA ...)
+
+#ifdef __cpp_noexcept_function_type
+#define SK_CALLABLE_TRAITS__NE_VARARGS(quals, _) \
+SK_CALLABLE_TRAITS__VARARGS(quals,) \
+SK_CALLABLE_TRAITS__VARARGS(quals noexcept,)
+#else
+#define SK_CALLABLE_TRAITS__NE_VARARGS(quals, _) \
+SK_CALLABLE_TRAITS__VARARGS(quals,)
+#endif
+
+#define SK_CALLABLE_TRAITS__REF_NE_VARARGS(quals, _) \
+SK_CALLABLE_TRAITS__NE_VARARGS(quals,) \
+SK_CALLABLE_TRAITS__NE_VARARGS(quals &,) \
+SK_CALLABLE_TRAITS__NE_VARARGS(quals &&,)
+
+#define SK_CALLABLE_TRAITS__CV_REF_NE_VARARGS() \
+SK_CALLABLE_TRAITS__REF_NE_VARARGS(,) \
+SK_CALLABLE_TRAITS__REF_NE_VARARGS(const,) \
+SK_CALLABLE_TRAITS__REF_NE_VARARGS(volatile,) \
+SK_CALLABLE_TRAITS__REF_NE_VARARGS(const volatile,)
+
+/** Infer the return_type and argument<N> of a callable type T. */
+template <typename T> struct SkCallableTraits : SkCallableTraits<decltype(&T::operator())> {};
+
+// function (..., (const, volatile), (&, &&), noexcept)
+#define SK_CALLABLE_TRAITS__INSTANCE(quals, varargs) \
+template <typename R, typename... Args> \
+struct SkCallableTraits<R(Args... varargs) quals> : sk_base_callable_traits<R, Args...> {};
+
+SK_CALLABLE_TRAITS__CV_REF_NE_VARARGS()
+#undef SK_CALLABLE_TRAITS__INSTANCE
+
+// pointer to function (..., noexcept)
+#define SK_CALLABLE_TRAITS__INSTANCE(quals, varargs) \
+template <typename R, typename... Args> \
+struct SkCallableTraits<R(*)(Args... varargs) quals> : sk_base_callable_traits<R, Args...> {};
+
+SK_CALLABLE_TRAITS__NE_VARARGS(,)
+#undef SK_CALLABLE_TRAITS__INSTANCE
+
+// pointer to method (..., (const, volatile), (&, &&), noexcept)
+#define SK_CALLABLE_TRAITS__INSTANCE(quals, varargs) \
+template <typename T, typename R, typename... Args> \
+struct SkCallableTraits<R(T::*)(Args... varargs) quals> : sk_base_callable_traits<R, Args...> {};
+
+SK_CALLABLE_TRAITS__CV_REF_NE_VARARGS()
+#undef SK_CALLABLE_TRAITS__INSTANCE
+
+// pointer to field
+template <typename T, typename R>
+struct SkCallableTraits<R T::*> : sk_base_callable_traits<typename std::add_lvalue_reference<R>::type> {};
+
+#undef SK_CALLABLE_TRAITS__CV_REF_NE_VARARGS
+#undef SK_CALLABLE_TRAITS__REF_NE_VARARGS
+#undef SK_CALLABLE_TRAITS__NE_VARARGS
+#undef SK_CALLABLE_TRAITS__VARARGS
+#undef SK_CALLABLE_TRAITS__COMMA
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkCamera.cpp b/gfx/skia/skia/src/utils/SkCamera.cpp
new file mode 100644
index 0000000000..45e216edb2
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkCamera.cpp
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/utils/SkCamera.h"
+
+static SkScalar SkScalarDotDiv(int count, const SkScalar a[], int step_a,
+ const SkScalar b[], int step_b,
+ SkScalar denom) {
+ SkScalar prod = 0;
+ for (int i = 0; i < count; i++) {
+ prod += a[0] * b[0];
+ a += step_a;
+ b += step_b;
+ }
+ return prod / denom;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkPatch3D::SkPatch3D() {
+ this->reset();
+}
+
+void SkPatch3D::reset() {
+ fOrigin = {0, 0, 0};
+ fU = {SK_Scalar1, 0, 0};
+ fV = {0, -SK_Scalar1, 0};
+}
+
+void SkPatch3D::transform(const SkM44& m, SkPatch3D* dst) const {
+ if (dst == nullptr) {
+ dst = (SkPatch3D*)this;
+ }
+ dst->fU = m * fU;
+ dst->fV = m * fV;
+ auto [x,y,z,_] = m.map(fOrigin.x, fOrigin.y, fOrigin.z, 1);
+ dst->fOrigin = {x, y, z};
+}
+
+SkScalar SkPatch3D::dotWith(SkScalar dx, SkScalar dy, SkScalar dz) const {
+ SkScalar cx = fU.y * fV.z - fU.z * fV.y;
+ SkScalar cy = fU.z * fV.x - fU.x * fV.y;
+ SkScalar cz = fU.x * fV.y - fU.y * fV.x;
+
+ return cx * dx + cy * dy + cz * dz;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkCamera3D::SkCamera3D() {
+ this->reset();
+}
+
+void SkCamera3D::reset() {
+ fLocation = {0, 0, -SkIntToScalar(576)}; // 8 inches backward
+ fAxis = {0, 0, SK_Scalar1}; // forward
+ fZenith = {0, -SK_Scalar1, 0}; // up
+
+ fObserver = {0, 0, fLocation.z};
+
+ fNeedToUpdate = true;
+}
+
+void SkCamera3D::update() {
+ fNeedToUpdate = true;
+}
+
+void SkCamera3D::doUpdate() const {
+ SkV3 axis, zenith, cross;
+
+ // construct a orthonormal basis of cross (x), zenith (y), and axis (z)
+ axis = fAxis.normalize();
+
+ zenith = fZenith - (axis * fZenith) * axis;
+ zenith = zenith.normalize();
+
+ cross = axis.cross(zenith);
+
+ {
+ SkMatrix* orien = &fOrientation;
+ auto [x, y, z] = fObserver;
+
+ // Looking along the view axis we have:
+ //
+ // /|\ zenith
+ // |
+ // |
+ // | * observer (projected on XY plane)
+ // |
+ // |____________\ cross
+ // /
+ //
+ // So this does a z-shear along the view axis based on the observer's x and y values,
+ // and scales in x and y relative to the negative of the observer's z value
+ // (the observer is in the negative z direction).
+
+ orien->set(SkMatrix::kMScaleX, x * axis.x - z * cross.x);
+ orien->set(SkMatrix::kMSkewX, x * axis.y - z * cross.y);
+ orien->set(SkMatrix::kMTransX, x * axis.z - z * cross.z);
+ orien->set(SkMatrix::kMSkewY, y * axis.x - z * zenith.x);
+ orien->set(SkMatrix::kMScaleY, y * axis.y - z * zenith.y);
+ orien->set(SkMatrix::kMTransY, y * axis.z - z * zenith.z);
+ orien->set(SkMatrix::kMPersp0, axis.x);
+ orien->set(SkMatrix::kMPersp1, axis.y);
+ orien->set(SkMatrix::kMPersp2, axis.z);
+ }
+}
+
+void SkCamera3D::patchToMatrix(const SkPatch3D& quilt, SkMatrix* matrix) const {
+ if (fNeedToUpdate) {
+ this->doUpdate();
+ fNeedToUpdate = false;
+ }
+
+ const SkScalar* mapPtr = (const SkScalar*)(const void*)&fOrientation;
+ const SkScalar* patchPtr;
+
+ SkV3 diff = quilt.fOrigin - fLocation;
+ SkScalar dot = diff.dot({mapPtr[6], mapPtr[7], mapPtr[8]});
+
+ // This multiplies fOrientation by the matrix [quilt.fU quilt.fV diff] -- U, V, and diff are
+ // column vectors in the matrix -- then divides by the length of the projection of diff onto
+ // the view axis (which is 'dot'). This transforms the patch (which transforms from local path
+ // space to world space) into view space (since fOrientation transforms from world space to
+ // view space).
+ //
+ // The divide by 'dot' isn't strictly necessary as the homogeneous divide would do much the
+ // same thing (it's just scaling the entire matrix by 1/dot). It looks like it's normalizing
+ // the matrix into some canonical space.
+ patchPtr = (const SkScalar*)&quilt;
+ matrix->set(SkMatrix::kMScaleX, SkScalarDotDiv(3, patchPtr, 1, mapPtr, 1, dot));
+ matrix->set(SkMatrix::kMSkewY, SkScalarDotDiv(3, patchPtr, 1, mapPtr+3, 1, dot));
+ matrix->set(SkMatrix::kMPersp0, SkScalarDotDiv(3, patchPtr, 1, mapPtr+6, 1, dot));
+
+ patchPtr += 3;
+ matrix->set(SkMatrix::kMSkewX, SkScalarDotDiv(3, patchPtr, 1, mapPtr, 1, dot));
+ matrix->set(SkMatrix::kMScaleY, SkScalarDotDiv(3, patchPtr, 1, mapPtr+3, 1, dot));
+ matrix->set(SkMatrix::kMPersp1, SkScalarDotDiv(3, patchPtr, 1, mapPtr+6, 1, dot));
+
+ patchPtr = (const SkScalar*)(const void*)&diff;
+ matrix->set(SkMatrix::kMTransX, SkScalarDotDiv(3, patchPtr, 1, mapPtr, 1, dot));
+ matrix->set(SkMatrix::kMTransY, SkScalarDotDiv(3, patchPtr, 1, mapPtr+3, 1, dot));
+ matrix->set(SkMatrix::kMPersp2, SK_Scalar1);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+Sk3DView::Sk3DView() {
+ fRec = &fInitialRec;
+}
+
+Sk3DView::~Sk3DView() {
+ Rec* rec = fRec;
+ while (rec != &fInitialRec) {
+ Rec* next = rec->fNext;
+ delete rec;
+ rec = next;
+ }
+}
+
+void Sk3DView::save() {
+ Rec* rec = new Rec;
+ rec->fNext = fRec;
+ rec->fMatrix = fRec->fMatrix;
+ fRec = rec;
+}
+
+void Sk3DView::restore() {
+ SkASSERT(fRec != &fInitialRec);
+ Rec* next = fRec->fNext;
+ delete fRec;
+ fRec = next;
+}
+
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+void Sk3DView::setCameraLocation(SkScalar x, SkScalar y, SkScalar z) {
+ // the camera location is passed in inches, set in pt
+ SkScalar lz = z * 72.0f;
+ fCamera.fLocation = {x * 72.0f, y * 72.0f, lz};
+ fCamera.fObserver = {0, 0, lz};
+ fCamera.update();
+
+}
+
+SkScalar Sk3DView::getCameraLocationX() const {
+ return fCamera.fLocation.x / 72.0f;
+}
+
+SkScalar Sk3DView::getCameraLocationY() const {
+ return fCamera.fLocation.y / 72.0f;
+}
+
+SkScalar Sk3DView::getCameraLocationZ() const {
+ return fCamera.fLocation.z / 72.0f;
+}
+#endif
+
+void Sk3DView::translate(SkScalar x, SkScalar y, SkScalar z) {
+ fRec->fMatrix.preTranslate(x, y, z);
+}
+
+void Sk3DView::rotateX(SkScalar deg) {
+ fRec->fMatrix.preConcat(SkM44::Rotate({1, 0, 0}, deg * SK_ScalarPI / 180));
+}
+
+void Sk3DView::rotateY(SkScalar deg) {
+ fRec->fMatrix.preConcat(SkM44::Rotate({0,-1, 0}, deg * SK_ScalarPI / 180));
+}
+
+void Sk3DView::rotateZ(SkScalar deg) {
+ fRec->fMatrix.preConcat(SkM44::Rotate({0, 0, 1}, deg * SK_ScalarPI / 180));
+}
+
+SkScalar Sk3DView::dotWithNormal(SkScalar x, SkScalar y, SkScalar z) const {
+ SkPatch3D patch;
+ patch.transform(fRec->fMatrix);
+ return patch.dotWith(x, y, z);
+}
+
+void Sk3DView::getMatrix(SkMatrix* matrix) const {
+ if (matrix != nullptr) {
+ SkPatch3D patch;
+ patch.transform(fRec->fMatrix);
+ fCamera.patchToMatrix(patch, matrix);
+ }
+}
+
+#include "include/core/SkCanvas.h"
+
+void Sk3DView::applyToCanvas(SkCanvas* canvas) const {
+ SkMatrix matrix;
+
+ this->getMatrix(&matrix);
+ canvas->concat(matrix);
+}
diff --git a/gfx/skia/skia/src/utils/SkCanvasStack.cpp b/gfx/skia/skia/src/utils/SkCanvasStack.cpp
new file mode 100644
index 0000000000..ea3918b8bc
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkCanvasStack.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/utils/SkCanvasStack.h"
+
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkShader.h"
+#include "include/private/base/SkTDArray.h"
+#include <utility>
+
+class SkPath;
+class SkRRect;
+
+SkCanvasStack::SkCanvasStack(int width, int height)
+ : INHERITED(width, height) {}
+
+SkCanvasStack::~SkCanvasStack() {
+ this->removeAll();
+}
+
+void SkCanvasStack::pushCanvas(std::unique_ptr<SkCanvas> canvas, const SkIPoint& origin) {
+ if (canvas) {
+ // compute the bounds of this canvas
+ const SkIRect canvasBounds = SkIRect::MakeSize(canvas->getBaseLayerSize());
+
+ // push the canvas onto the stack
+ this->INHERITED::addCanvas(canvas.get());
+
+ // push the canvas data onto the stack
+ CanvasData* data = &fCanvasData.push_back();
+ data->origin = origin;
+ data->requiredClip.setRect(canvasBounds);
+ data->ownedCanvas = std::move(canvas);
+
+ // subtract this region from the canvas objects already on the stack.
+ // This ensures they do not draw into the space occupied by the layers
+ // above them.
+ for (int i = fList.size() - 1; i > 0; --i) {
+ SkIRect localBounds = canvasBounds;
+ localBounds.offset(origin - fCanvasData[i-1].origin);
+
+ fCanvasData[i-1].requiredClip.op(localBounds, SkRegion::kDifference_Op);
+ fList[i-1]->clipRegion(fCanvasData[i-1].requiredClip);
+ }
+ }
+ SkASSERT(fList.size() == fCanvasData.size());
+}
+
+void SkCanvasStack::removeAll() {
+ this->INHERITED::removeAll(); // call the baseclass *before* we actually delete the canvases
+ fCanvasData.clear();
+}
+
+/**
+ * Traverse all canvases (e.g. layers) the stack and ensure that they are clipped
+ * to their bounds and that the area covered by any canvas higher in the stack is
+ * also clipped out.
+ */
+void SkCanvasStack::clipToZOrderedBounds() {
+ SkASSERT(fList.size() == fCanvasData.size());
+ for (int i = 0; i < fList.size(); ++i) {
+ fList[i]->clipRegion(fCanvasData[i].requiredClip);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * We need to handle setMatrix specially as it overwrites the matrix in each
+ * canvas unlike all other matrix operations (i.e. translate, scale, etc) which
+ * just pre-concatenate with the existing matrix.
+ */
+void SkCanvasStack::didSetM44(const SkM44& mx) {
+ SkASSERT(fList.size() == fCanvasData.size());
+ for (int i = 0; i < fList.size(); ++i) {
+ fList[i]->setMatrix(SkM44::Translate(SkIntToScalar(-fCanvasData[i].origin.x()),
+ SkIntToScalar(-fCanvasData[i].origin.y())) * mx);
+ }
+ this->SkCanvas::didSetM44(mx);
+}
+
+void SkCanvasStack::onClipRect(const SkRect& r, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ this->INHERITED::onClipRect(r, op, edgeStyle);
+ this->clipToZOrderedBounds();
+}
+
+void SkCanvasStack::onClipRRect(const SkRRect& rr, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ this->INHERITED::onClipRRect(rr, op, edgeStyle);
+ this->clipToZOrderedBounds();
+}
+
+void SkCanvasStack::onClipPath(const SkPath& p, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ this->INHERITED::onClipPath(p, op, edgeStyle);
+ this->clipToZOrderedBounds();
+}
+
+void SkCanvasStack::onClipShader(sk_sp<SkShader> cs, SkClipOp op) {
+ this->INHERITED::onClipShader(std::move(cs), op);
+ // we don't change the "bounds" of the clip, so we don't need to update zorder
+}
+
+void SkCanvasStack::onClipRegion(const SkRegion& deviceRgn, SkClipOp op) {
+ SkASSERT(fList.size() == fCanvasData.size());
+ for (int i = 0; i < fList.size(); ++i) {
+ SkRegion tempRegion;
+ deviceRgn.translate(-fCanvasData[i].origin.x(),
+ -fCanvasData[i].origin.y(), &tempRegion);
+ tempRegion.op(fCanvasData[i].requiredClip, SkRegion::kIntersect_Op);
+ fList[i]->clipRegion(tempRegion, op);
+ }
+ this->SkCanvas::onClipRegion(deviceRgn, op);
+}
diff --git a/gfx/skia/skia/src/utils/SkCanvasStack.h b/gfx/skia/skia/src/utils/SkCanvasStack.h
new file mode 100644
index 0000000000..4c5f82353f
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkCanvasStack.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCanvasStack_DEFINED
+#define SkCanvasStack_DEFINED
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkM44.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkRegion.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTypeTraits.h"
+#include "include/utils/SkNWayCanvas.h"
+
+#include <memory>
+#include <type_traits>
+
+class SkPath;
+class SkRRect;
+class SkShader;
+enum class SkClipOp;
+struct SkRect;
+
+/**
+ * Like NWayCanvas, in that it forwards all canvas methods to each sub-canvas that is "pushed".
+ *
+ * Unlike NWayCanvas, this takes ownership of each subcanvas, and deletes them when this canvas
+ * is deleted.
+ */
+class SkCanvasStack : public SkNWayCanvas {
+public:
+ SkCanvasStack(int width, int height);
+ ~SkCanvasStack() override;
+
+ void pushCanvas(std::unique_ptr<SkCanvas>, const SkIPoint& origin);
+ void removeAll() override;
+
+ /*
+ * The following add/remove canvas methods are overrides from SkNWayCanvas
+ * that do not make sense in the context of our CanvasStack, but since we
+ * can share most of the other implementation of NWay we override those
+ * methods to be no-ops.
+ */
+ void addCanvas(SkCanvas*) override { SkDEBUGFAIL("Invalid Op"); }
+ void removeCanvas(SkCanvas*) override { SkDEBUGFAIL("Invalid Op"); }
+
+protected:
+ void didSetM44(const SkM44&) override;
+
+ void onClipRect(const SkRect&, SkClipOp, ClipEdgeStyle) override;
+ void onClipRRect(const SkRRect&, SkClipOp, ClipEdgeStyle) override;
+ void onClipPath(const SkPath&, SkClipOp, ClipEdgeStyle) override;
+ void onClipShader(sk_sp<SkShader>, SkClipOp) override;
+ void onClipRegion(const SkRegion&, SkClipOp) override;
+
+private:
+ void clipToZOrderedBounds();
+
+ struct CanvasData {
+ SkIPoint origin;
+ SkRegion requiredClip;
+ std::unique_ptr<SkCanvas> ownedCanvas;
+
+ static_assert(::sk_is_trivially_relocatable<decltype(origin)>::value);
+ static_assert(::sk_is_trivially_relocatable<decltype(requiredClip)>::value);
+ static_assert(::sk_is_trivially_relocatable<decltype(ownedCanvas)>::value);
+
+ using sk_is_trivially_relocatable = std::true_type;
+ };
+
+ SkTArray<CanvasData> fCanvasData;
+
+ using INHERITED = SkNWayCanvas;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkCanvasStateUtils.cpp b/gfx/skia/skia/src/utils/SkCanvasStateUtils.cpp
new file mode 100644
index 0000000000..da73ed5656
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkCanvasStateUtils.cpp
@@ -0,0 +1,338 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/utils/SkCanvasStateUtils.h"
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkBitmap.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkSize.h"
+#include "include/private/base/SkMalloc.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkWriter32.h"
+#include "src/utils/SkCanvasStack.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <utility>
+
+/*
+ * WARNING: The structs below are part of a stable ABI and as such we explicitly
+ * use unambigious primitives (e.g. int32_t instead of an enum).
+ *
+ * ANY CHANGES TO THE STRUCTS BELOW THAT IMPACT THE ABI SHOULD RESULT IN A NEW
+ * NEW SUBCLASS OF SkCanvasState. SUCH CHANGES SHOULD ONLY BE MADE IF ABSOLUTELY
+ * NECESSARY!
+ *
+ * In order to test changes, run the CanvasState tests. gyp/canvas_state_lib.gyp
+ * describes how to create a library to pass to the CanvasState tests. The tests
+ * should succeed when building the library with your changes and passing that to
+ * the tests running in the unchanged Skia.
+ */
+enum RasterConfigs {
+ kUnknown_RasterConfig = 0,
+ kRGB_565_RasterConfig = 1,
+ kARGB_8888_RasterConfig = 2
+};
+typedef int32_t RasterConfig;
+
+enum CanvasBackends {
+ kUnknown_CanvasBackend = 0,
+ kRaster_CanvasBackend = 1,
+ kGPU_CanvasBackend = 2,
+ kPDF_CanvasBackend = 3
+};
+typedef int32_t CanvasBackend;
+
+struct ClipRect {
+ int32_t left, top, right, bottom;
+};
+
+struct SkMCState {
+ float matrix[9];
+ // NOTE: this only works for non-antialiased clips
+ int32_t clipRectCount;
+ ClipRect* clipRects;
+};
+
+// NOTE: If you add more members, create a new subclass of SkCanvasState with a
+// new CanvasState::version.
+struct SkCanvasLayerState {
+ CanvasBackend type;
+ int32_t x, y;
+ int32_t width;
+ int32_t height;
+
+ SkMCState mcState;
+
+ union {
+ struct {
+ RasterConfig config; // pixel format: a value from RasterConfigs.
+ uint64_t rowBytes; // Number of bytes from start of one line to next.
+ void* pixels; // The pixels, all (height * rowBytes) of them.
+ } raster;
+ struct {
+ int32_t textureID;
+ } gpu;
+ };
+};
+
+class SkCanvasState {
+public:
+ SkCanvasState(int32_t version, SkCanvas* canvas) {
+ SkASSERT(canvas);
+ this->version = version;
+ width = canvas->getBaseLayerSize().width();
+ height = canvas->getBaseLayerSize().height();
+
+ }
+
+ /**
+ * The version this struct was built with. This field must always appear
+ * first in the struct so that when the versions don't match (and the
+ * remaining contents and size are potentially different) we can still
+ * compare the version numbers.
+ */
+ int32_t version;
+ int32_t width;
+ int32_t height;
+ int32_t alignmentPadding;
+};
+
+class SkCanvasState_v1 : public SkCanvasState {
+public:
+ static const int32_t kVersion = 1;
+
+ SkCanvasState_v1(SkCanvas* canvas) : INHERITED(kVersion, canvas) {
+ layerCount = 0;
+ layers = nullptr;
+ mcState.clipRectCount = 0;
+ mcState.clipRects = nullptr;
+ originalCanvas = canvas;
+ }
+
+ ~SkCanvasState_v1() {
+ // loop through the layers and free the data allocated to the clipRects.
+ // See setup_MC_state, clipRects is only allocated when the clip isn't empty; and empty
+ // is implicitly represented as clipRectCount == 0.
+ for (int i = 0; i < layerCount; ++i) {
+ if (layers[i].mcState.clipRectCount > 0) {
+ sk_free(layers[i].mcState.clipRects);
+ }
+ }
+
+ if (mcState.clipRectCount > 0) {
+ sk_free(mcState.clipRects);
+ }
+
+ // layers is always allocated, even if it's with sk_malloc(0), so this is safe.
+ sk_free(layers);
+ }
+
+ SkMCState mcState;
+
+ int32_t layerCount;
+ SkCanvasLayerState* layers;
+private:
+ SkCanvas* originalCanvas;
+ using INHERITED = SkCanvasState;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+static void setup_MC_state(SkMCState* state, const SkMatrix& matrix, const SkIRect& clip) {
+ // initialize the struct
+ state->clipRectCount = 0;
+
+ // capture the matrix
+ for (int i = 0; i < 9; i++) {
+ state->matrix[i] = matrix.get(i);
+ }
+
+ /*
+ * We only support a single clipRect, so we take the clip's bounds. Clients have long made
+ * this assumption anyway, so this restriction is fine.
+ */
+ SkSWriter32<sizeof(ClipRect)> clipWriter;
+
+ if (!clip.isEmpty()) {
+ state->clipRectCount = 1;
+ state->clipRects = (ClipRect*)sk_malloc_throw(sizeof(ClipRect));
+ state->clipRects->left = clip.fLeft;
+ state->clipRects->top = clip.fTop;
+ state->clipRects->right = clip.fRight;
+ state->clipRects->bottom = clip.fBottom;
+ }
+}
+
+SkCanvasState* SkCanvasStateUtils::CaptureCanvasState(SkCanvas* canvas) {
+ SkASSERT(canvas);
+
+ // Check the clip can be decomposed into rectangles (i.e. no soft clips).
+ if (canvas->androidFramework_isClipAA()) {
+ return nullptr;
+ }
+
+ std::unique_ptr<SkCanvasState_v1> canvasState(new SkCanvasState_v1(canvas));
+
+ setup_MC_state(&canvasState->mcState, canvas->getTotalMatrix(), canvas->getDeviceClipBounds());
+
+ // Historically, the canvas state could report multiple top-level layers because SkCanvas
+ // supported unclipped layers. With that feature removed, all required information is contained
+ // by the canvas' top-most device.
+ SkBaseDevice* device = canvas->topDevice();
+ SkASSERT(device);
+
+ SkSWriter32<sizeof(SkCanvasLayerState)> layerWriter;
+ // we currently only work for bitmap backed devices
+ SkPixmap pmap;
+ if (!device->accessPixels(&pmap) || 0 == pmap.width() || 0 == pmap.height()) {
+ return nullptr;
+ }
+ // and for axis-aligned devices (so not transformed for an image filter)
+ if (!device->isPixelAlignedToGlobal()) {
+ return nullptr;
+ }
+
+ SkIPoint origin = device->getOrigin(); // safe since it's pixel aligned
+
+ SkCanvasLayerState* layerState =
+ (SkCanvasLayerState*) layerWriter.reserve(sizeof(SkCanvasLayerState));
+ layerState->type = kRaster_CanvasBackend;
+ layerState->x = origin.x();
+ layerState->y = origin.y();
+ layerState->width = pmap.width();
+ layerState->height = pmap.height();
+
+ switch (pmap.colorType()) {
+ case kN32_SkColorType:
+ layerState->raster.config = kARGB_8888_RasterConfig;
+ break;
+ case kRGB_565_SkColorType:
+ layerState->raster.config = kRGB_565_RasterConfig;
+ break;
+ default:
+ return nullptr;
+ }
+ layerState->raster.rowBytes = pmap.rowBytes();
+ layerState->raster.pixels = pmap.writable_addr();
+
+ setup_MC_state(&layerState->mcState, device->localToDevice(), device->devClipBounds());
+
+ // allocate memory for the layers and then and copy them to the struct
+ SkASSERT(layerWriter.bytesWritten() == sizeof(SkCanvasLayerState));
+ canvasState->layerCount = 1;
+ canvasState->layers = (SkCanvasLayerState*) sk_malloc_throw(layerWriter.bytesWritten());
+ layerWriter.flatten(canvasState->layers);
+
+ return canvasState.release();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static void setup_canvas_from_MC_state(const SkMCState& state, SkCanvas* canvas) {
+ // reconstruct the matrix
+ SkMatrix matrix;
+ for (int i = 0; i < 9; i++) {
+ matrix.set(i, state.matrix[i]);
+ }
+
+ // only realy support 1 rect, so if the caller (legacy?) sent us more, we just take the bounds
+ // of what they sent.
+ SkIRect bounds = SkIRect::MakeEmpty();
+ if (state.clipRectCount > 0) {
+ bounds.setLTRB(state.clipRects[0].left,
+ state.clipRects[0].top,
+ state.clipRects[0].right,
+ state.clipRects[0].bottom);
+ for (int i = 1; i < state.clipRectCount; ++i) {
+ bounds.join({state.clipRects[i].left,
+ state.clipRects[i].top,
+ state.clipRects[i].right,
+ state.clipRects[i].bottom});
+ }
+ }
+
+ canvas->clipRect(SkRect::Make(bounds));
+ canvas->concat(matrix);
+}
+
+static std::unique_ptr<SkCanvas>
+make_canvas_from_canvas_layer(const SkCanvasLayerState& layerState) {
+ SkASSERT(kRaster_CanvasBackend == layerState.type);
+
+ SkBitmap bitmap;
+ SkColorType colorType =
+ layerState.raster.config == kARGB_8888_RasterConfig ? kN32_SkColorType :
+ layerState.raster.config == kRGB_565_RasterConfig ? kRGB_565_SkColorType :
+ kUnknown_SkColorType;
+
+ if (colorType == kUnknown_SkColorType) {
+ return nullptr;
+ }
+
+ bitmap.installPixels(SkImageInfo::Make(layerState.width, layerState.height,
+ colorType, kPremul_SkAlphaType),
+ layerState.raster.pixels, (size_t) layerState.raster.rowBytes);
+
+ SkASSERT(!bitmap.empty());
+ SkASSERT(!bitmap.isNull());
+
+ std::unique_ptr<SkCanvas> canvas(new SkCanvas(bitmap));
+
+ // setup the matrix and clip
+ setup_canvas_from_MC_state(layerState.mcState, canvas.get());
+
+ return canvas;
+}
+
+std::unique_ptr<SkCanvas> SkCanvasStateUtils::MakeFromCanvasState(const SkCanvasState* state) {
+ SkASSERT(state);
+ // Currently there is only one possible version.
+ SkASSERT(SkCanvasState_v1::kVersion == state->version);
+
+ const SkCanvasState_v1* state_v1 = static_cast<const SkCanvasState_v1*>(state);
+
+ if (state_v1->layerCount < 1) {
+ return nullptr;
+ }
+
+ std::unique_ptr<SkCanvasStack> canvas(new SkCanvasStack(state->width, state->height));
+
+ // setup the matrix and clip on the n-way canvas
+ setup_canvas_from_MC_state(state_v1->mcState, canvas.get());
+
+ // Iterate over the layers and add them to the n-way canvas. New clients will only send one
+ // layer since unclipped layers are no longer supported, but old canvas clients may still
+ // create them.
+ for (int i = state_v1->layerCount - 1; i >= 0; --i) {
+ std::unique_ptr<SkCanvas> canvasLayer = make_canvas_from_canvas_layer(state_v1->layers[i]);
+ if (!canvasLayer) {
+ return nullptr;
+ }
+ canvas->pushCanvas(std::move(canvasLayer), SkIPoint::Make(state_v1->layers[i].x,
+ state_v1->layers[i].y));
+ }
+
+ return std::move(canvas);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void SkCanvasStateUtils::ReleaseCanvasState(SkCanvasState* state) {
+ SkASSERT(!state || SkCanvasState_v1::kVersion == state->version);
+ // Upcast to the correct version of SkCanvasState. This avoids having a virtual destructor on
+ // SkCanvasState. That would be strange since SkCanvasState has no other virtual functions, and
+ // instead uses the field "version" to determine how to behave.
+ delete static_cast<SkCanvasState_v1*>(state);
+}
diff --git a/gfx/skia/skia/src/utils/SkCharToGlyphCache.cpp b/gfx/skia/skia/src/utils/SkCharToGlyphCache.cpp
new file mode 100644
index 0000000000..6d6ecc1376
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkCharToGlyphCache.cpp
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/utils/SkCharToGlyphCache.h"
+
+SkCharToGlyphCache::SkCharToGlyphCache() {
+ this->reset();
+}
+
+SkCharToGlyphCache::~SkCharToGlyphCache() {}
+
+void SkCharToGlyphCache::reset() {
+ fK32.reset();
+ fV16.reset();
+
+ // Add sentinels so we can always rely on these to stop linear searches (in either direction)
+ // Neither is a legal unichar, so we don't care what glyphID we use.
+ //
+ *fK32.append() = 0x80000000; *fV16.append() = 0;
+ *fK32.append() = 0x7FFFFFFF; *fV16.append() = 0;
+
+ fDenom = 0;
+}
+
+// Determined experimentally. For N much larger, the slope technique is faster.
+// For N much smaller, a simple search is faster.
+//
+constexpr int kSmallCountLimit = 16;
+
+// To use slope technique we need at least 2 real entries (+2 sentinels) hence the min of 4
+//
+constexpr int kMinCountForSlope = 4;
+
+static int find_simple(const SkUnichar base[], int count, SkUnichar value) {
+ int index;
+ for (index = 0;; ++index) {
+ if (value <= base[index]) {
+ if (value < base[index]) {
+ index = ~index; // not found
+ }
+ break;
+ }
+ }
+ return index;
+}
+
+static int find_with_slope(const SkUnichar base[], int count, SkUnichar value, double denom) {
+ SkASSERT(count >= kMinCountForSlope);
+
+ int index;
+ if (value <= base[1]) {
+ index = 1;
+ if (value < base[index]) {
+ index = ~index;
+ }
+ } else if (value >= base[count - 2]) {
+ index = count - 2;
+ if (value > base[index]) {
+ index = ~(index + 1);
+ }
+ } else {
+ // make our guess based on the "slope" of the current values
+// index = 1 + (int64_t)(count - 2) * (value - base[1]) / (base[count - 2] - base[1]);
+ index = 1 + (int)(denom * (count - 2) * (value - base[1]));
+ SkASSERT(index >= 1 && index <= count - 2);
+
+ if (value >= base[index]) {
+ for (;; ++index) {
+ if (value <= base[index]) {
+ if (value < base[index]) {
+ index = ~index; // not found
+ }
+ break;
+ }
+ }
+ } else {
+ for (--index;; --index) {
+ SkASSERT(index >= 0);
+ if (value >= base[index]) {
+ if (value > base[index]) {
+ index = ~(index + 1);
+ }
+ break;
+ }
+ }
+ }
+ }
+ return index;
+}
+
+int SkCharToGlyphCache::findGlyphIndex(SkUnichar unichar) const {
+ const int count = fK32.size();
+ int index;
+ if (count <= kSmallCountLimit) {
+ index = find_simple(fK32.begin(), count, unichar);
+ } else {
+ index = find_with_slope(fK32.begin(), count, unichar, fDenom);
+ }
+ if (index >= 0) {
+ return fV16[index];
+ }
+ return index;
+}
+
+void SkCharToGlyphCache::insertCharAndGlyph(int index, SkUnichar unichar, SkGlyphID glyph) {
+ SkASSERT(fK32.size() == fV16.size());
+ SkASSERT(index < fK32.size());
+ SkASSERT(unichar < fK32[index]);
+
+ *fK32.insert(index) = unichar;
+ *fV16.insert(index) = glyph;
+
+ // if we've changed the first [1] or last [count-2] entry, recompute our slope
+ const int count = fK32.size();
+ if (count >= kMinCountForSlope && (index == 1 || index == count - 2)) {
+ SkASSERT(index >= 1 && index <= count - 2);
+ fDenom = 1.0 / ((double)fK32[count - 2] - fK32[1]);
+ }
+
+#ifdef SK_DEBUG
+ for (int i = 1; i < fK32.size(); ++i) {
+ SkASSERT(fK32[i-1] < fK32[i]);
+ }
+#endif
+}
diff --git a/gfx/skia/skia/src/utils/SkCharToGlyphCache.h b/gfx/skia/skia/src/utils/SkCharToGlyphCache.h
new file mode 100644
index 0000000000..8137131587
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkCharToGlyphCache.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2019 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCharToGlyphCache_DEFINED
+#define SkCharToGlyphCache_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTDArray.h"
+#include "include/private/base/SkTo.h"
+
+#include <cstdint>
+
+class SkCharToGlyphCache {
+public:
+ SkCharToGlyphCache();
+ ~SkCharToGlyphCache();
+
+ // return number of unichars cached
+ int count() const {
+ return fK32.size();
+ }
+
+ void reset(); // forget all cache entries (to save memory)
+
+ /**
+ * Given a unichar, return its glyphID (if the return value is positive), else return
+ * ~index of where to insert the computed glyphID.
+ *
+ * int result = cache.charToGlyph(unichar);
+ * if (result >= 0) {
+ * glyphID = result;
+ * } else {
+ * glyphID = compute_glyph_using_typeface(unichar);
+ * cache.insertCharAndGlyph(~result, unichar, glyphID);
+ * }
+ */
+ int findGlyphIndex(SkUnichar c) const;
+
+ /**
+ * Insert a new char/glyph pair into the cache at the specified index.
+ * See charToGlyph() for how to compute the bit-not of the index.
+ */
+ void insertCharAndGlyph(int index, SkUnichar, SkGlyphID);
+
+ // helper to pre-seed an entry in the cache
+ void addCharAndGlyph(SkUnichar unichar, SkGlyphID glyph) {
+ int index = this->findGlyphIndex(unichar);
+ if (index >= 0) {
+ SkASSERT(SkToU16(index) == glyph);
+ } else {
+ this->insertCharAndGlyph(~index, unichar, glyph);
+ }
+ }
+
+private:
+ SkTDArray<int32_t> fK32;
+ SkTDArray<uint16_t> fV16;
+ double fDenom;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkClipStackUtils.cpp b/gfx/skia/skia/src/utils/SkClipStackUtils.cpp
new file mode 100644
index 0000000000..0080f7fb3a
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkClipStackUtils.cpp
@@ -0,0 +1,42 @@
+/*
+* Copyright 2019 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "src/utils/SkClipStackUtils.h"
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPathTypes.h"
+#include "include/pathops/SkPathOps.h"
+#include "src/core/SkClipStack.h"
+
+enum class SkClipOp;
+
+void SkClipStack_AsPath(const SkClipStack& cs, SkPath* path) {
+ path->reset();
+ path->setFillType(SkPathFillType::kInverseEvenOdd);
+
+ SkClipStack::Iter iter(cs, SkClipStack::Iter::kBottom_IterStart);
+ while (const SkClipStack::Element* element = iter.next()) {
+ if (element->getDeviceSpaceType() == SkClipStack::Element::DeviceSpaceType::kShader) {
+ // TODO: Handle DeviceSpaceType::kShader somehow; it can't be turned into an SkPath
+ // but perhaps the pdf backend can apply shaders in another way.
+ continue;
+ }
+ SkPath operand;
+ if (element->getDeviceSpaceType() != SkClipStack::Element::DeviceSpaceType::kEmpty) {
+ element->asDeviceSpacePath(&operand);
+ }
+
+ SkClipOp elementOp = element->getOp();
+ if (element->isReplaceOp()) {
+ *path = operand;
+ // TODO: Once expanding clip ops are removed, we can switch the iterator to be top
+ // to bottom, which allows us to break here on encountering a replace op.
+ } else {
+ Op(*path, operand, (SkPathOp)elementOp, path);
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/utils/SkClipStackUtils.h b/gfx/skia/skia/src/utils/SkClipStackUtils.h
new file mode 100644
index 0000000000..cc28ef384f
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkClipStackUtils.h
@@ -0,0 +1,21 @@
+/*
+* Copyright 2019 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef SkClipStackUtils_DEFINED
+#define SkClipStackUtils_DEFINED
+
+#include "include/core/SkTypes.h"
+
+class SkClipStack;
+class SkPath;
+
+// Return the resolved clipstack as a single path.
+// Note: uses SkPathOps as part of its implementation.
+//
+void SkClipStack_AsPath(const SkClipStack& cs, SkPath* path);
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkCustomTypeface.cpp b/gfx/skia/skia/src/utils/SkCustomTypeface.cpp
new file mode 100644
index 0000000000..be731f954d
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkCustomTypeface.cpp
@@ -0,0 +1,523 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/utils/SkCustomTypeface.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkData.h"
+#include "include/core/SkDrawable.h"
+#include "include/core/SkFontArguments.h"
+#include "include/core/SkFontMetrics.h"
+#include "include/core/SkFontParameters.h"
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkFontTypes.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypeface.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkAlign.h"
+#include "include/private/base/SkFixed.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkAdvancedTypefaceMetrics.h" // IWYU pragma: keep
+#include "src/core/SkFontDescriptor.h"
+#include "src/core/SkGlyph.h"
+#include "src/core/SkMask.h"
+#include "src/core/SkScalerContext.h"
+#include "src/core/SkStreamPriv.h"
+
+#include <cstdint>
+#include <cstring>
+#include <memory>
+#include <utility>
+#include <vector>
+
+class SkArenaAlloc;
+class SkDescriptor;
+
+namespace {
+static inline const constexpr bool kSkShowTextBlitCoverage = false;
+}
+
+static SkFontMetrics scale_fontmetrics(const SkFontMetrics& src, float sx, float sy) {
+ SkFontMetrics dst = src;
+
+ #define SCALE_X(field) dst.field *= sx
+ #define SCALE_Y(field) dst.field *= sy
+
+ SCALE_X(fAvgCharWidth);
+ SCALE_X(fMaxCharWidth);
+ SCALE_X(fXMin);
+ SCALE_X(fXMax);
+
+ SCALE_Y(fTop);
+ SCALE_Y(fAscent);
+ SCALE_Y(fDescent);
+ SCALE_Y(fBottom);
+ SCALE_Y(fLeading);
+ SCALE_Y(fXHeight);
+ SCALE_Y(fCapHeight);
+ SCALE_Y(fUnderlineThickness);
+ SCALE_Y(fUnderlinePosition);
+ SCALE_Y(fStrikeoutThickness);
+ SCALE_Y(fStrikeoutPosition);
+
+ #undef SCALE_X
+ #undef SCALE_Y
+
+ return dst;
+}
+
+class SkUserTypeface final : public SkTypeface {
+private:
+ friend class SkCustomTypefaceBuilder;
+ friend class SkUserScalerContext;
+
+ explicit SkUserTypeface(SkFontStyle style, const SkFontMetrics& metrics,
+ std::vector<SkCustomTypefaceBuilder::GlyphRec>&& recs)
+ : SkTypeface(style)
+ , fGlyphRecs(std::move(recs))
+ , fMetrics(metrics)
+ {}
+
+ const std::vector<SkCustomTypefaceBuilder::GlyphRec> fGlyphRecs;
+ const SkFontMetrics fMetrics;
+
+ std::unique_ptr<SkScalerContext> onCreateScalerContext(const SkScalerContextEffects&,
+ const SkDescriptor* desc) const override;
+ void onFilterRec(SkScalerContextRec* rec) const override;
+ void getGlyphToUnicodeMap(SkUnichar* glyphToUnicode) const override;
+ std::unique_ptr<SkAdvancedTypefaceMetrics> onGetAdvancedMetrics() const override;
+
+ void onGetFontDescriptor(SkFontDescriptor* desc, bool* isLocal) const override;
+
+ void onCharsToGlyphs(const SkUnichar* chars, int count, SkGlyphID glyphs[]) const override;
+
+ void onGetFamilyName(SkString* familyName) const override;
+ bool onGetPostScriptName(SkString*) const override;
+ SkTypeface::LocalizedStrings* onCreateFamilyNameIterator() const override;
+
+ std::unique_ptr<SkStreamAsset> onOpenStream(int*) const override;
+
+ // trivial
+
+ std::unique_ptr<SkStreamAsset> onOpenExistingStream(int*) const override { return nullptr; }
+
+ sk_sp<SkTypeface> onMakeClone(const SkFontArguments& args) const override {
+ return sk_ref_sp(this);
+ }
+ int onCountGlyphs() const override { return this->glyphCount(); }
+ int onGetUPEM() const override { return 2048; /* ?? */ }
+ bool onComputeBounds(SkRect* bounds) const override {
+ bounds->setLTRB(fMetrics.fXMin, fMetrics.fTop, fMetrics.fXMax, fMetrics.fBottom);
+ return true;
+ }
+
+ // noops
+
+ void getPostScriptGlyphNames(SkString*) const override {}
+ bool onGlyphMaskNeedsCurrentColor() const override { return false; }
+ int onGetVariationDesignPosition(SkFontArguments::VariationPosition::Coordinate[],
+ int) const override { return 0; }
+ int onGetVariationDesignParameters(SkFontParameters::Variation::Axis[],
+ int) const override { return 0; }
+ int onGetTableTags(SkFontTableTag tags[]) const override { return 0; }
+ size_t onGetTableData(SkFontTableTag, size_t, size_t, void*) const override { return 0; }
+
+ int glyphCount() const {
+ return SkToInt(fGlyphRecs.size());
+ }
+};
+
+SkCustomTypefaceBuilder::SkCustomTypefaceBuilder() {
+ sk_bzero(&fMetrics, sizeof(fMetrics));
+}
+
+void SkCustomTypefaceBuilder::setMetrics(const SkFontMetrics& fm, float scale) {
+ fMetrics = scale_fontmetrics(fm, scale, scale);
+}
+
+void SkCustomTypefaceBuilder::setFontStyle(SkFontStyle style) {
+ fStyle = style;
+}
+
+SkCustomTypefaceBuilder::GlyphRec& SkCustomTypefaceBuilder::ensureStorage(SkGlyphID index) {
+ if (index >= fGlyphRecs.size()) {
+ fGlyphRecs.resize(SkToSizeT(index) + 1);
+ }
+
+ return fGlyphRecs[index];
+}
+
+void SkCustomTypefaceBuilder::setGlyph(SkGlyphID index, float advance, const SkPath& path) {
+ auto& rec = this->ensureStorage(index);
+ rec.fAdvance = advance;
+ rec.fPath = path;
+ rec.fDrawable = nullptr;
+}
+
+void SkCustomTypefaceBuilder::setGlyph(SkGlyphID index, float advance,
+ sk_sp<SkDrawable> drawable, const SkRect& bounds) {
+ auto& rec = this->ensureStorage(index);
+ rec.fAdvance = advance;
+ rec.fDrawable = std::move(drawable);
+ rec.fBounds = bounds;
+ rec.fPath.reset();
+}
+
+sk_sp<SkTypeface> SkCustomTypefaceBuilder::detach() {
+ if (fGlyphRecs.empty()) return nullptr;
+
+ // initially inverted, so that any "union" will overwrite the first time
+ SkRect bounds = {SK_ScalarMax, SK_ScalarMax, -SK_ScalarMax, -SK_ScalarMax};
+
+ for (const auto& rec : fGlyphRecs) {
+ bounds.join(rec.isDrawable()
+ ? rec.fBounds
+ : rec.fPath.getBounds());
+ }
+
+ fMetrics.fTop = bounds.top();
+ fMetrics.fBottom = bounds.bottom();
+ fMetrics.fXMin = bounds.left();
+ fMetrics.fXMax = bounds.right();
+
+ return sk_sp<SkUserTypeface>(new SkUserTypeface(fStyle, fMetrics, std::move(fGlyphRecs)));
+}
+
+/////////////
+
+void SkUserTypeface::onFilterRec(SkScalerContextRec* rec) const {
+ rec->setHinting(SkFontHinting::kNone);
+}
+
+void SkUserTypeface::getGlyphToUnicodeMap(SkUnichar* glyphToUnicode) const {
+ for (int gid = 0; gid < this->glyphCount(); ++gid) {
+ glyphToUnicode[gid] = SkTo<SkUnichar>(gid);
+ }
+}
+
+std::unique_ptr<SkAdvancedTypefaceMetrics> SkUserTypeface::onGetAdvancedMetrics() const {
+ return nullptr;
+}
+
+void SkUserTypeface::onGetFontDescriptor(SkFontDescriptor* desc, bool* isLocal) const {
+ desc->setFactoryId(SkCustomTypefaceBuilder::FactoryId);
+ *isLocal = true;
+}
+
+void SkUserTypeface::onCharsToGlyphs(const SkUnichar* chars, int count, SkGlyphID glyphs[]) const {
+ for (int i = 0; i < count; ++i) {
+ glyphs[i] = chars[i] < this->glyphCount() ? SkTo<SkGlyphID>(chars[i]) : 0;
+ }
+}
+
+void SkUserTypeface::onGetFamilyName(SkString* familyName) const {
+ *familyName = "";
+}
+
+bool SkUserTypeface::onGetPostScriptName(SkString*) const {
+ return false;
+}
+
+SkTypeface::LocalizedStrings* SkUserTypeface::onCreateFamilyNameIterator() const {
+ return nullptr;
+}
+
+//////////////
+
+class SkUserScalerContext : public SkScalerContext {
+public:
+ SkUserScalerContext(sk_sp<SkUserTypeface> face,
+ const SkScalerContextEffects& effects,
+ const SkDescriptor* desc)
+ : SkScalerContext(std::move(face), effects, desc) {
+ fRec.getSingleMatrix(&fMatrix);
+ this->forceGenerateImageFromPath();
+ }
+
+ const SkUserTypeface* userTF() const {
+ return static_cast<SkUserTypeface*>(this->getTypeface());
+ }
+
+protected:
+ bool generateAdvance(SkGlyph* glyph) override {
+ const SkUserTypeface* tf = this->userTF();
+ auto advance = fMatrix.mapXY(tf->fGlyphRecs[glyph->getGlyphID()].fAdvance, 0);
+
+ glyph->fAdvanceX = advance.fX;
+ glyph->fAdvanceY = advance.fY;
+ return true;
+ }
+
+ void generateMetrics(SkGlyph* glyph, SkArenaAlloc* alloc) override {
+ glyph->zeroMetrics();
+ this->generateAdvance(glyph);
+
+ const auto& rec = this->userTF()->fGlyphRecs[glyph->getGlyphID()];
+ if (rec.isDrawable()) {
+ glyph->fMaskFormat = SkMask::kARGB32_Format;
+
+ SkRect bounds = fMatrix.mapRect(rec.fBounds);
+ bounds.offset(SkFixedToScalar(glyph->getSubXFixed()),
+ SkFixedToScalar(glyph->getSubYFixed()));
+
+ SkIRect ibounds;
+ bounds.roundOut(&ibounds);
+ glyph->fLeft = ibounds.fLeft;
+ glyph->fTop = ibounds.fTop;
+ glyph->fWidth = ibounds.width();
+ glyph->fHeight = ibounds.height();
+
+ // These do not have an outline path.
+ glyph->setPath(alloc, nullptr, false);
+ }
+ }
+
+ void generateImage(const SkGlyph& glyph) override {
+ const auto& rec = this->userTF()->fGlyphRecs[glyph.getGlyphID()];
+ SkASSERTF(rec.isDrawable(), "Only drawable-backed glyphs should reach generateImage.");
+
+ auto canvas = SkCanvas::MakeRasterDirectN32(glyph.fWidth, glyph.fHeight,
+ static_cast<SkPMColor*>(glyph.fImage),
+ glyph.rowBytes());
+ if constexpr (kSkShowTextBlitCoverage) {
+ canvas->clear(0x33FF0000);
+ } else {
+ canvas->clear(SK_ColorTRANSPARENT);
+ }
+
+ canvas->translate(-glyph.fLeft, -glyph.fTop);
+ canvas->translate(SkFixedToScalar(glyph.getSubXFixed()),
+ SkFixedToScalar(glyph.getSubYFixed()));
+ canvas->drawDrawable(rec.fDrawable.get(), &fMatrix);
+ }
+
+ bool generatePath(const SkGlyph& glyph, SkPath* path) override {
+ const auto& rec = this->userTF()->fGlyphRecs[glyph.getGlyphID()];
+
+ SkASSERT(!rec.isDrawable());
+
+ rec.fPath.transform(fMatrix, path);
+
+ return true;
+ }
+
+ sk_sp<SkDrawable> generateDrawable(const SkGlyph& glyph) override {
+ class DrawableMatrixWrapper final : public SkDrawable {
+ public:
+ DrawableMatrixWrapper(sk_sp<SkDrawable> drawable, const SkMatrix& m)
+ : fDrawable(std::move(drawable))
+ , fMatrix(m)
+ {}
+
+ SkRect onGetBounds() override {
+ return fMatrix.mapRect(fDrawable->getBounds());
+ }
+
+ size_t onApproximateBytesUsed() override {
+ return fDrawable->approximateBytesUsed() + sizeof(DrawableMatrixWrapper);
+ }
+
+ void onDraw(SkCanvas* canvas) override {
+ if constexpr (kSkShowTextBlitCoverage) {
+ SkPaint paint;
+ paint.setColor(0x3300FF00);
+ paint.setStyle(SkPaint::kFill_Style);
+ canvas->drawRect(this->onGetBounds(), paint);
+ }
+ canvas->drawDrawable(fDrawable.get(), &fMatrix);
+ }
+ private:
+ const sk_sp<SkDrawable> fDrawable;
+ const SkMatrix fMatrix;
+ };
+
+ const auto& rec = this->userTF()->fGlyphRecs[glyph.getGlyphID()];
+
+ return rec.fDrawable
+ ? sk_make_sp<DrawableMatrixWrapper>(rec.fDrawable, fMatrix)
+ : nullptr;
+ }
+
+ void generateFontMetrics(SkFontMetrics* metrics) override {
+ auto [sx, sy] = fMatrix.mapXY(1, 1);
+ *metrics = scale_fontmetrics(this->userTF()->fMetrics, sx, sy);
+ }
+
+private:
+ SkMatrix fMatrix;
+};
+
+std::unique_ptr<SkScalerContext> SkUserTypeface::onCreateScalerContext(
+ const SkScalerContextEffects& effects, const SkDescriptor* desc) const
+{
+ return std::make_unique<SkUserScalerContext>(
+ sk_ref_sp(const_cast<SkUserTypeface*>(this)), effects, desc);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+static constexpr int kMaxGlyphCount = 65536;
+static constexpr size_t kHeaderSize = 16;
+static const char gHeaderString[] = "SkUserTypeface01";
+static_assert(sizeof(gHeaderString) == 1 + kHeaderSize, "need header to be 16 bytes");
+
+enum GlyphType : uint32_t { kPath, kDrawable };
+
+std::unique_ptr<SkStreamAsset> SkUserTypeface::onOpenStream(int* ttcIndex) const {
+ SkDynamicMemoryWStream wstream;
+
+ wstream.write(gHeaderString, kHeaderSize);
+
+ wstream.write(&fMetrics, sizeof(fMetrics));
+
+ SkFontStyle style = this->fontStyle();
+ wstream.write(&style, sizeof(style));
+
+ wstream.write32(this->glyphCount());
+
+ for (const auto& rec : fGlyphRecs) {
+ wstream.write32(rec.isDrawable() ? GlyphType::kDrawable : GlyphType::kPath);
+
+ wstream.writeScalar(rec.fAdvance);
+
+ wstream.write(&rec.fBounds, sizeof(rec.fBounds));
+
+ auto data = rec.isDrawable()
+ ? rec.fDrawable->serialize()
+ : rec.fPath.serialize();
+
+ const size_t sz = data->size();
+ SkASSERT(SkIsAlign4(sz));
+ wstream.write(&sz, sizeof(sz));
+ wstream.write(data->data(), sz);
+ }
+
+ *ttcIndex = 0;
+ return wstream.detachAsStream();
+}
+
+class AutoRestorePosition {
+ SkStream* fStream;
+ size_t fPosition;
+public:
+ AutoRestorePosition(SkStream* stream) : fStream(stream) {
+ fPosition = stream->getPosition();
+ }
+
+ ~AutoRestorePosition() {
+ if (fStream) {
+ fStream->seek(fPosition);
+ }
+ }
+
+ // So we don't restore the position
+ void markDone() { fStream = nullptr; }
+};
+
+sk_sp<SkTypeface> SkCustomTypefaceBuilder::Deserialize(SkStream* stream) {
+ AutoRestorePosition arp(stream);
+
+ char header[kHeaderSize];
+ if (stream->read(header, kHeaderSize) != kHeaderSize ||
+ 0 != memcmp(header, gHeaderString, kHeaderSize))
+ {
+ return nullptr;
+ }
+
+ SkFontMetrics metrics;
+ if (stream->read(&metrics, sizeof(metrics)) != sizeof(metrics)) {
+ return nullptr;
+ }
+
+ SkFontStyle style;
+ if (stream->read(&style, sizeof(style)) != sizeof(style)) {
+ return nullptr;
+ }
+
+ int glyphCount;
+ if (!stream->readS32(&glyphCount) || glyphCount < 0 || glyphCount > kMaxGlyphCount) {
+ return nullptr;
+ }
+
+ SkCustomTypefaceBuilder builder;
+
+ builder.setMetrics(metrics);
+ builder.setFontStyle(style);
+
+ for (int i = 0; i < glyphCount; ++i) {
+ uint32_t gtype;
+ if (!stream->readU32(&gtype) ||
+ (gtype != GlyphType::kDrawable && gtype != GlyphType::kPath)) {
+ return nullptr;
+ }
+
+ float advance;
+ if (!stream->readScalar(&advance)) {
+ return nullptr;
+ }
+
+ SkRect bounds;
+ if (stream->read(&bounds, sizeof(bounds)) != sizeof(bounds) || !bounds.isFinite()) {
+ return nullptr;
+ }
+
+ // SkPath and SkDrawable cannot read from a stream, so we have to page them into ram
+ size_t sz;
+ if (stream->read(&sz, sizeof(sz)) != sizeof(sz)) {
+ return nullptr;
+ }
+
+ // The amount of bytes in the stream must be at least as big as sz, otherwise
+ // sz is invalid.
+ if (StreamRemainingLengthIsBelow(stream, sz)) {
+ return nullptr;
+ }
+
+ auto data = SkData::MakeUninitialized(sz);
+ if (stream->read(data->writable_data(), sz) != sz) {
+ return nullptr;
+ }
+
+ switch (gtype) {
+ case GlyphType::kDrawable: {
+ auto drawable = SkDrawable::Deserialize(data->data(), data->size());
+ if (!drawable) {
+ return nullptr;
+ }
+ builder.setGlyph(i, advance, std::move(drawable), bounds);
+ } break;
+ case GlyphType::kPath: {
+ SkPath path;
+ if (path.readFromMemory(data->data(), data->size()) != data->size()) {
+ return nullptr;
+ }
+
+ builder.setGlyph(i, advance, path);
+ } break;
+ default:
+ return nullptr;
+ }
+ }
+
+ arp.markDone();
+ return builder.detach();
+}
+
+sk_sp<SkTypeface> SkCustomTypefaceBuilder::MakeFromStream(std::unique_ptr<SkStreamAsset> stream,
+ const SkFontArguments&) {
+ return Deserialize(stream.get());
+}
diff --git a/gfx/skia/skia/src/utils/SkCycles.h b/gfx/skia/skia/src/utils/SkCycles.h
new file mode 100644
index 0000000000..93385a52e1
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkCycles.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2022 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ *
+ * This is an experimental (and probably temporary) solution that allows
+ * to compare performance SkVM blitters vs RasterPipeline blitters.
+ * In addition to measuring performance (which is questionable) it also produces
+ * other counts (pixels, scanlines) and more detailed traces that
+ * can explain the current results (SkVM is slower) and help improve it.
+ * The entire code is hidden under build flag skia_compare_vm_vs_rp=true
+ * and will not appear at all without it.
+ */
+#ifndef SkCycles_DEFINED
+#define SkCycles_DEFINED
+#include <cstdint>
+#include <x86intrin.h>
+class SkCycles {
+public:
+ static uint64_t Now() {
+ #ifndef SKIA_COMPARE_VM_VS_RP
+ {
+ return 0ul;
+ }
+ #elif defined(SK_BUILD_FOR_WIN)
+ {
+ return 0ul;
+ }
+ #elif defined(SK_BUILD_FOR_IOS)
+ {
+ return 0ul;
+ }
+ #elif defined(SK_BUILD_FOR_ANDROID)
+ {
+ return 0ul;
+ }
+ #elif defined(SK_CPU_X86)
+ {
+ unsigned aux;
+ return __rdtscp(&aux);
+ }
+ #elif defined(SK_CPU_ARM64)
+ {
+ int64_t cycles;
+ asm volatile("mrs %0, cntvct_el0" : "=r"(cycles));
+ return cycles;
+ }
+ #else
+ {
+ return 0ul;
+ }
+ #endif
+ }
+};
+#endif // SkCycles_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkDashPath.cpp b/gfx/skia/skia/src/utils/SkDashPath.cpp
new file mode 100644
index 0000000000..c0b0c8b086
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkDashPath.cpp
@@ -0,0 +1,485 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/utils/SkDashPathPriv.h"
+
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPathEffect.h"
+#include "include/core/SkPathMeasure.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkStrokeRec.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkAlign.h"
+#include "include/private/base/SkPathEnums.h"
+#include "include/private/base/SkTo.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkPointPriv.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstdint>
+#include <iterator>
+
+static inline int is_even(int x) {
+ return !(x & 1);
+}
+
+static SkScalar find_first_interval(const SkScalar intervals[], SkScalar phase,
+ int32_t* index, int count) {
+ for (int i = 0; i < count; ++i) {
+ SkScalar gap = intervals[i];
+ if (phase > gap || (phase == gap && gap)) {
+ phase -= gap;
+ } else {
+ *index = i;
+ return gap - phase;
+ }
+ }
+ // If we get here, phase "appears" to be larger than our length. This
+ // shouldn't happen with perfect precision, but we can accumulate errors
+ // during the initial length computation (rounding can make our sum be too
+ // big or too small. In that event, we just have to eat the error here.
+ *index = 0;
+ return intervals[0];
+}
+
+void SkDashPath::CalcDashParameters(SkScalar phase, const SkScalar intervals[], int32_t count,
+ SkScalar* initialDashLength, int32_t* initialDashIndex,
+ SkScalar* intervalLength, SkScalar* adjustedPhase) {
+ SkScalar len = 0;
+ for (int i = 0; i < count; i++) {
+ len += intervals[i];
+ }
+ *intervalLength = len;
+ // Adjust phase to be between 0 and len, "flipping" phase if negative.
+ // e.g., if len is 100, then phase of -20 (or -120) is equivalent to 80
+ if (adjustedPhase) {
+ if (phase < 0) {
+ phase = -phase;
+ if (phase > len) {
+ phase = SkScalarMod(phase, len);
+ }
+ phase = len - phase;
+
+ // Due to finite precision, it's possible that phase == len,
+ // even after the subtract (if len >>> phase), so fix that here.
+ // This fixes http://crbug.com/124652 .
+ SkASSERT(phase <= len);
+ if (phase == len) {
+ phase = 0;
+ }
+ } else if (phase >= len) {
+ phase = SkScalarMod(phase, len);
+ }
+ *adjustedPhase = phase;
+ }
+ SkASSERT(phase >= 0 && phase < len);
+
+ *initialDashLength = find_first_interval(intervals, phase,
+ initialDashIndex, count);
+
+ SkASSERT(*initialDashLength >= 0);
+ SkASSERT(*initialDashIndex >= 0 && *initialDashIndex < count);
+}
+
+static void outset_for_stroke(SkRect* rect, const SkStrokeRec& rec) {
+ SkScalar radius = SkScalarHalf(rec.getWidth());
+ if (0 == radius) {
+ radius = SK_Scalar1; // hairlines
+ }
+ if (SkPaint::kMiter_Join == rec.getJoin()) {
+ radius *= rec.getMiter();
+ }
+ rect->outset(radius, radius);
+}
+
+// If line is zero-length, bump out the end by a tiny amount
+// to draw endcaps. The bump factor is sized so that
+// SkPoint::Distance() computes a non-zero length.
+// Offsets SK_ScalarNearlyZero or smaller create empty paths when Iter measures length.
+// Large values are scaled by SK_ScalarNearlyZero so significant bits change.
+static void adjust_zero_length_line(SkPoint pts[2]) {
+ SkASSERT(pts[0] == pts[1]);
+ pts[1].fX += std::max(1.001f, pts[1].fX) * SK_ScalarNearlyZero;
+}
+
+static bool clip_line(SkPoint pts[2], const SkRect& bounds, SkScalar intervalLength,
+ SkScalar priorPhase) {
+ SkVector dxy = pts[1] - pts[0];
+
+ // only horizontal or vertical lines
+ if (dxy.fX && dxy.fY) {
+ return false;
+ }
+ int xyOffset = SkToBool(dxy.fY); // 0 to adjust horizontal, 1 to adjust vertical
+
+ SkScalar minXY = (&pts[0].fX)[xyOffset];
+ SkScalar maxXY = (&pts[1].fX)[xyOffset];
+ bool swapped = maxXY < minXY;
+ if (swapped) {
+ using std::swap;
+ swap(minXY, maxXY);
+ }
+
+ SkASSERT(minXY <= maxXY);
+ SkScalar leftTop = (&bounds.fLeft)[xyOffset];
+ SkScalar rightBottom = (&bounds.fRight)[xyOffset];
+ if (maxXY < leftTop || minXY > rightBottom) {
+ return false;
+ }
+
+ // Now we actually perform the chop, removing the excess to the left/top and
+ // right/bottom of the bounds (keeping our new line "in phase" with the dash,
+ // hence the (mod intervalLength).
+
+ if (minXY < leftTop) {
+ minXY = leftTop - SkScalarMod(leftTop - minXY, intervalLength);
+ if (!swapped) {
+ minXY -= priorPhase; // for rectangles, adjust by prior phase
+ }
+ }
+ if (maxXY > rightBottom) {
+ maxXY = rightBottom + SkScalarMod(maxXY - rightBottom, intervalLength);
+ if (swapped) {
+ maxXY += priorPhase; // for rectangles, adjust by prior phase
+ }
+ }
+
+ SkASSERT(maxXY >= minXY);
+ if (swapped) {
+ using std::swap;
+ swap(minXY, maxXY);
+ }
+ (&pts[0].fX)[xyOffset] = minXY;
+ (&pts[1].fX)[xyOffset] = maxXY;
+
+ if (minXY == maxXY) {
+ adjust_zero_length_line(pts);
+ }
+ return true;
+}
+
+// Handles only lines and rects.
+// If cull_path() returns true, dstPath is the new smaller path,
+// otherwise dstPath may have been changed but you should ignore it.
+static bool cull_path(const SkPath& srcPath, const SkStrokeRec& rec,
+ const SkRect* cullRect, SkScalar intervalLength, SkPath* dstPath) {
+ if (!cullRect) {
+ SkPoint pts[2];
+ if (srcPath.isLine(pts) && pts[0] == pts[1]) {
+ adjust_zero_length_line(pts);
+ dstPath->moveTo(pts[0]);
+ dstPath->lineTo(pts[1]);
+ return true;
+ }
+ return false;
+ }
+
+ SkRect bounds;
+ bounds = *cullRect;
+ outset_for_stroke(&bounds, rec);
+
+ {
+ SkPoint pts[2];
+ if (srcPath.isLine(pts)) {
+ if (clip_line(pts, bounds, intervalLength, 0)) {
+ dstPath->moveTo(pts[0]);
+ dstPath->lineTo(pts[1]);
+ return true;
+ }
+ return false;
+ }
+ }
+
+ if (srcPath.isRect(nullptr)) {
+ // We'll break the rect into four lines, culling each separately.
+ SkPath::Iter iter(srcPath, false);
+
+ SkPoint pts[4]; // Rects are all moveTo and lineTo, so we'll only use pts[0] and pts[1].
+ SkAssertResult(SkPath::kMove_Verb == iter.next(pts));
+
+ double accum = 0; // Sum of unculled edge lengths to keep the phase correct.
+ // Intentionally a double to minimize the risk of overflow and drift.
+ while (iter.next(pts) == SkPath::kLine_Verb) {
+ // Notice this vector v and accum work with the original unclipped length.
+ SkVector v = pts[1] - pts[0];
+
+ if (clip_line(pts, bounds, intervalLength, std::fmod(accum, intervalLength))) {
+ // pts[0] may have just been changed by clip_line().
+ // If that's not where we ended the previous lineTo(), we need to moveTo() there.
+ SkPoint last;
+ if (!dstPath->getLastPt(&last) || last != pts[0]) {
+ dstPath->moveTo(pts[0]);
+ }
+ dstPath->lineTo(pts[1]);
+ }
+
+ // We either just traveled v.fX horizontally or v.fY vertically.
+ SkASSERT(v.fX == 0 || v.fY == 0);
+ accum += SkScalarAbs(v.fX + v.fY);
+ }
+ return !dstPath->isEmpty();
+ }
+
+ return false;
+}
+
+class SpecialLineRec {
+public:
+ bool init(const SkPath& src, SkPath* dst, SkStrokeRec* rec,
+ int intervalCount, SkScalar intervalLength) {
+ if (rec->isHairlineStyle() || !src.isLine(fPts)) {
+ return false;
+ }
+
+ // can relax this in the future, if we handle square and round caps
+ if (SkPaint::kButt_Cap != rec->getCap()) {
+ return false;
+ }
+
+ SkScalar pathLength = SkPoint::Distance(fPts[0], fPts[1]);
+
+ fTangent = fPts[1] - fPts[0];
+ if (fTangent.isZero()) {
+ return false;
+ }
+
+ fPathLength = pathLength;
+ fTangent.scale(SkScalarInvert(pathLength));
+ SkPointPriv::RotateCCW(fTangent, &fNormal);
+ fNormal.scale(SkScalarHalf(rec->getWidth()));
+
+ // now estimate how many quads will be added to the path
+ // resulting segments = pathLen * intervalCount / intervalLen
+ // resulting points = 4 * segments
+
+ SkScalar ptCount = pathLength * intervalCount / (float)intervalLength;
+ ptCount = std::min(ptCount, SkDashPath::kMaxDashCount);
+ if (SkScalarIsNaN(ptCount)) {
+ return false;
+ }
+ int n = SkScalarCeilToInt(ptCount) << 2;
+ dst->incReserve(n);
+
+ // we will take care of the stroking
+ rec->setFillStyle();
+ return true;
+ }
+
+ void addSegment(SkScalar d0, SkScalar d1, SkPath* path) const {
+ SkASSERT(d0 <= fPathLength);
+ // clamp the segment to our length
+ if (d1 > fPathLength) {
+ d1 = fPathLength;
+ }
+
+ SkScalar x0 = fPts[0].fX + fTangent.fX * d0;
+ SkScalar x1 = fPts[0].fX + fTangent.fX * d1;
+ SkScalar y0 = fPts[0].fY + fTangent.fY * d0;
+ SkScalar y1 = fPts[0].fY + fTangent.fY * d1;
+
+ SkPoint pts[4];
+ pts[0].set(x0 + fNormal.fX, y0 + fNormal.fY); // moveTo
+ pts[1].set(x1 + fNormal.fX, y1 + fNormal.fY); // lineTo
+ pts[2].set(x1 - fNormal.fX, y1 - fNormal.fY); // lineTo
+ pts[3].set(x0 - fNormal.fX, y0 - fNormal.fY); // lineTo
+
+ path->addPoly(pts, std::size(pts), false);
+ }
+
+private:
+ SkPoint fPts[2];
+ SkVector fTangent;
+ SkVector fNormal;
+ SkScalar fPathLength;
+};
+
+
+bool SkDashPath::InternalFilter(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cullRect, const SkScalar aIntervals[],
+ int32_t count, SkScalar initialDashLength, int32_t initialDashIndex,
+ SkScalar intervalLength, SkScalar startPhase,
+ StrokeRecApplication strokeRecApplication) {
+ // we must always have an even number of intervals
+ SkASSERT(is_even(count));
+
+ // we do nothing if the src wants to be filled
+ SkStrokeRec::Style style = rec->getStyle();
+ if (SkStrokeRec::kFill_Style == style || SkStrokeRec::kStrokeAndFill_Style == style) {
+ return false;
+ }
+
+ const SkScalar* intervals = aIntervals;
+ SkScalar dashCount = 0;
+ int segCount = 0;
+
+ SkPath cullPathStorage;
+ const SkPath* srcPtr = &src;
+ if (cull_path(src, *rec, cullRect, intervalLength, &cullPathStorage)) {
+ // if rect is closed, starts in a dash, and ends in a dash, add the initial join
+ // potentially a better fix is described here: bug.skia.org/7445
+ if (src.isRect(nullptr) && src.isLastContourClosed() && is_even(initialDashIndex)) {
+ SkScalar pathLength = SkPathMeasure(src, false, rec->getResScale()).getLength();
+#if defined(SK_LEGACY_RECT_DASHING_BUG)
+ SkScalar endPhase = SkScalarMod(pathLength + initialDashLength, intervalLength);
+#else
+ SkScalar endPhase = SkScalarMod(pathLength + startPhase, intervalLength);
+#endif
+ int index = 0;
+ while (endPhase > intervals[index]) {
+ endPhase -= intervals[index++];
+ SkASSERT(index <= count);
+ if (index == count) {
+ // We have run out of intervals. endPhase "should" never get to this point,
+ // but it could if the subtracts underflowed. Hence we will pin it as if it
+ // perfectly ran through the intervals.
+ // See crbug.com/875494 (and skbug.com/8274)
+ endPhase = 0;
+ break;
+ }
+ }
+ // if dash ends inside "on", or ends at beginning of "off"
+ if (is_even(index) == (endPhase > 0)) {
+ SkPoint midPoint = src.getPoint(0);
+ // get vector at end of rect
+ int last = src.countPoints() - 1;
+ while (midPoint == src.getPoint(last)) {
+ --last;
+ SkASSERT(last >= 0);
+ }
+ // get vector at start of rect
+ int next = 1;
+ while (midPoint == src.getPoint(next)) {
+ ++next;
+ SkASSERT(next < last);
+ }
+ SkVector v = midPoint - src.getPoint(last);
+ const SkScalar kTinyOffset = SK_ScalarNearlyZero;
+ // scale vector to make start of tiny right angle
+ v *= kTinyOffset;
+ cullPathStorage.moveTo(midPoint - v);
+ cullPathStorage.lineTo(midPoint);
+ v = midPoint - src.getPoint(next);
+ // scale vector to make end of tiny right angle
+ v *= kTinyOffset;
+ cullPathStorage.lineTo(midPoint - v);
+ }
+ }
+ srcPtr = &cullPathStorage;
+ }
+
+ SpecialLineRec lineRec;
+ bool specialLine = (StrokeRecApplication::kAllow == strokeRecApplication) &&
+ lineRec.init(*srcPtr, dst, rec, count >> 1, intervalLength);
+
+ SkPathMeasure meas(*srcPtr, false, rec->getResScale());
+
+ do {
+ bool skipFirstSegment = meas.isClosed();
+ bool addedSegment = false;
+ SkScalar length = meas.getLength();
+ int index = initialDashIndex;
+
+ // Since the path length / dash length ratio may be arbitrarily large, we can exert
+ // significant memory pressure while attempting to build the filtered path. To avoid this,
+ // we simply give up dashing beyond a certain threshold.
+ //
+ // The original bug report (http://crbug.com/165432) is based on a path yielding more than
+ // 90 million dash segments and crashing the memory allocator. A limit of 1 million
+ // segments seems reasonable: at 2 verbs per segment * 9 bytes per verb, this caps the
+ // maximum dash memory overhead at roughly 17MB per path.
+ dashCount += length * (count >> 1) / intervalLength;
+ if (dashCount > kMaxDashCount) {
+ dst->reset();
+ return false;
+ }
+
+ // Using double precision to avoid looping indefinitely due to single precision rounding
+ // (for extreme path_length/dash_length ratios). See test_infinite_dash() unittest.
+ double distance = 0;
+ double dlen = initialDashLength;
+
+ while (distance < length) {
+ SkASSERT(dlen >= 0);
+ addedSegment = false;
+ if (is_even(index) && !skipFirstSegment) {
+ addedSegment = true;
+ ++segCount;
+
+ if (specialLine) {
+ lineRec.addSegment(SkDoubleToScalar(distance),
+ SkDoubleToScalar(distance + dlen),
+ dst);
+ } else {
+ meas.getSegment(SkDoubleToScalar(distance),
+ SkDoubleToScalar(distance + dlen),
+ dst, true);
+ }
+ }
+ distance += dlen;
+
+ // clear this so we only respect it the first time around
+ skipFirstSegment = false;
+
+ // wrap around our intervals array if necessary
+ index += 1;
+ SkASSERT(index <= count);
+ if (index == count) {
+ index = 0;
+ }
+
+ // fetch our next dlen
+ dlen = intervals[index];
+ }
+
+ // extend if we ended on a segment and we need to join up with the (skipped) initial segment
+ if (meas.isClosed() && is_even(initialDashIndex) &&
+ initialDashLength >= 0) {
+ meas.getSegment(0, initialDashLength, dst, !addedSegment);
+ ++segCount;
+ }
+ } while (meas.nextContour());
+
+ // TODO: do we still need this?
+ if (segCount > 1) {
+ SkPathPriv::SetConvexity(*dst, SkPathConvexity::kConcave);
+ }
+
+ return true;
+}
+
+bool SkDashPath::FilterDashPath(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cullRect, const SkPathEffect::DashInfo& info) {
+ if (!ValidDashPath(info.fPhase, info.fIntervals, info.fCount)) {
+ return false;
+ }
+ SkScalar initialDashLength = 0;
+ int32_t initialDashIndex = 0;
+ SkScalar intervalLength = 0;
+ CalcDashParameters(info.fPhase, info.fIntervals, info.fCount,
+ &initialDashLength, &initialDashIndex, &intervalLength);
+ return InternalFilter(dst, src, rec, cullRect, info.fIntervals, info.fCount, initialDashLength,
+ initialDashIndex, intervalLength, info.fPhase);
+}
+
+bool SkDashPath::ValidDashPath(SkScalar phase, const SkScalar intervals[], int32_t count) {
+ if (count < 2 || !SkIsAlign2(count)) {
+ return false;
+ }
+ SkScalar length = 0;
+ for (int i = 0; i < count; i++) {
+ if (intervals[i] < 0) {
+ return false;
+ }
+ length += intervals[i];
+ }
+ // watch out for values that might make us go out of bounds
+ return length > 0 && SkScalarIsFinite(phase) && SkScalarIsFinite(length);
+}
diff --git a/gfx/skia/skia/src/utils/SkDashPathPriv.h b/gfx/skia/skia/src/utils/SkDashPathPriv.h
new file mode 100644
index 0000000000..d29a95e50d
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkDashPathPriv.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDashPathPriv_DEFINED
+#define SkDashPathPriv_DEFINED
+
+#include "include/core/SkPathEffect.h"
+
+namespace SkDashPath {
+ /**
+ * Calculates the initialDashLength, initialDashIndex, and intervalLength based on the
+ * inputed phase and intervals. If adjustedPhase is passed in, then the phase will be
+ * adjusted to be between 0 and intervalLength. The result will be stored in adjustedPhase.
+ * If adjustedPhase is nullptr then it is assumed phase is already between 0 and intervalLength
+ *
+ * Caller should have already used ValidDashPath to exclude invalid data.
+ */
+ void CalcDashParameters(SkScalar phase, const SkScalar intervals[], int32_t count,
+ SkScalar* initialDashLength, int32_t* initialDashIndex,
+ SkScalar* intervalLength, SkScalar* adjustedPhase = nullptr);
+
+ bool FilterDashPath(SkPath* dst, const SkPath& src, SkStrokeRec*, const SkRect*,
+ const SkPathEffect::DashInfo& info);
+
+#ifdef SK_BUILD_FOR_FUZZER
+ const SkScalar kMaxDashCount = 10000;
+#else
+ const SkScalar kMaxDashCount = 1000000;
+#endif
+
+ /** See comments for InternalFilter */
+ enum class StrokeRecApplication {
+ kDisallow,
+ kAllow,
+ };
+
+ /**
+ * Caller should have already used ValidDashPath to exclude invalid data. Typically, this leaves
+ * the strokeRec unmodified. However, for some simple shapes (e.g. a line) it may directly
+ * evaluate the dash and stroke to produce a stroked output path with a fill strokeRec. Passing
+ * true for disallowStrokeRecApplication turns this behavior off.
+ */
+ bool InternalFilter(SkPath* dst, const SkPath& src, SkStrokeRec* rec,
+ const SkRect* cullRect, const SkScalar aIntervals[],
+ int32_t count, SkScalar initialDashLength, int32_t initialDashIndex,
+ SkScalar intervalLength, SkScalar startPhase,
+ StrokeRecApplication = StrokeRecApplication::kAllow);
+
+ bool ValidDashPath(SkScalar phase, const SkScalar intervals[], int32_t count);
+} // namespace SkDashPath
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkEventTracer.cpp b/gfx/skia/skia/src/utils/SkEventTracer.cpp
new file mode 100644
index 0000000000..dcc35dcd08
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkEventTracer.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/utils/SkEventTracer.h"
+
+#include "include/private/base/SkOnce.h"
+
+#include <stdlib.h>
+#include <atomic>
+
+class SkDefaultEventTracer : public SkEventTracer {
+ SkEventTracer::Handle
+ addTraceEvent(char phase,
+ const uint8_t* categoryEnabledFlag,
+ const char* name,
+ uint64_t id,
+ int numArgs,
+ const char** argNames,
+ const uint8_t* argTypes,
+ const uint64_t* argValues,
+ uint8_t flags) override { return 0; }
+
+ void
+ updateTraceEventDuration(const uint8_t* categoryEnabledFlag,
+ const char* name,
+ SkEventTracer::Handle handle) override {}
+
+ const uint8_t* getCategoryGroupEnabled(const char* name) override {
+ static uint8_t no = 0;
+ return &no;
+ }
+ const char* getCategoryGroupName(
+ const uint8_t* categoryEnabledFlag) override {
+ static const char* stub = "stub";
+ return stub;
+ }
+
+ // The default tracer does not yet support splitting up trace output into sections.
+ void newTracingSection(const char* name) override {}
+};
+
+// We prefer gUserTracer if it's been set, otherwise we fall back on a default tracer;
+static std::atomic<SkEventTracer*> gUserTracer{nullptr};
+
+bool SkEventTracer::SetInstance(SkEventTracer* tracer, bool leakTracer) {
+ SkEventTracer* expected = nullptr;
+ if (!gUserTracer.compare_exchange_strong(expected, tracer)) {
+ delete tracer;
+ return false;
+ }
+ // If leaking the tracer is accepted then there is no need to install
+ // the atexit.
+ if (!leakTracer) {
+ atexit([]() { delete gUserTracer.load(); });
+ }
+ return true;
+}
+
+SkEventTracer* SkEventTracer::GetInstance() {
+ if (auto tracer = gUserTracer.load(std::memory_order_acquire)) {
+ return tracer;
+ }
+ static SkOnce once;
+ static SkDefaultEventTracer* defaultTracer;
+ once([] { defaultTracer = new SkDefaultEventTracer; });
+ return defaultTracer;
+}
diff --git a/gfx/skia/skia/src/utils/SkFloatToDecimal.cpp b/gfx/skia/skia/src/utils/SkFloatToDecimal.cpp
new file mode 100644
index 0000000000..32bbf27605
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkFloatToDecimal.cpp
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/utils/SkFloatToDecimal.h"
+
+#include "include/core/SkTypes.h"
+
+#include <cfloat>
+#include <cmath>
+
+#ifdef SK_DEBUG
+#include <limits.h>
+#endif
+
+// returns `value * pow(base, e)`, assuming `e` is positive.
+static double pow_by_squaring(double value, double base, int e) {
+ // https://en.wikipedia.org/wiki/Exponentiation_by_squaring
+ SkASSERT(e > 0);
+ while (true) {
+ if (e & 1) {
+ value *= base;
+ }
+ e >>= 1;
+ if (0 == e) {
+ return value;
+ }
+ base *= base;
+ }
+}
+
+// Return pow(10.0, e), optimized for common cases.
+static double pow10(int e) {
+ switch (e) {
+ case 0: return 1.0; // common cases
+ case 1: return 10.0;
+ case 2: return 100.0;
+ case 3: return 1e+03;
+ case 4: return 1e+04;
+ case 5: return 1e+05;
+ case 6: return 1e+06;
+ case 7: return 1e+07;
+ case 8: return 1e+08;
+ case 9: return 1e+09;
+ case 10: return 1e+10;
+ case 11: return 1e+11;
+ case 12: return 1e+12;
+ case 13: return 1e+13;
+ case 14: return 1e+14;
+ case 15: return 1e+15;
+ default:
+ if (e > 15) {
+ return pow_by_squaring(1e+15, 10.0, e - 15);
+ } else {
+ SkASSERT(e < 0);
+ return pow_by_squaring(1.0, 0.1, -e);
+ }
+ }
+}
+
+/** Write a string into output, including a terminating '\0' (for
+ unit testing). Return strlen(output) (for SkWStream::write) The
+ resulting string will be in the form /[-]?([0-9]*.)?[0-9]+/ and
+ sscanf(output, "%f", &x) will return the original value iff the
+ value is finite. This function accepts all possible input values.
+
+ Motivation: "PDF does not support [numbers] in exponential format
+ (such as 6.02e23)." Otherwise, this function would rely on a
+ sprintf-type function from the standard library. */
+unsigned SkFloatToDecimal(float value, char output[kMaximumSkFloatToDecimalLength]) {
+ /* The longest result is -FLT_MIN.
+ We serialize it as "-.0000000000000000000000000000000000000117549435"
+ which has 48 characters plus a terminating '\0'. */
+
+ static_assert(kMaximumSkFloatToDecimalLength == 49, "");
+ // 3 = '-', '.', and '\0' characters.
+ // 9 = number of significant digits
+ // abs(FLT_MIN_10_EXP) = number of zeros in FLT_MIN
+ static_assert(kMaximumSkFloatToDecimalLength == 3 + 9 - FLT_MIN_10_EXP, "");
+
+ /* section C.1 of the PDF1.4 spec (http://goo.gl/0SCswJ) says that
+ most PDF rasterizers will use fixed-point scalars that lack the
+ dynamic range of floats. Even if this is the case, I want to
+ serialize these (uncommon) very small and very large scalar
+ values with enough precision to allow a floating-point
+ rasterizer to read them in with perfect accuracy.
+ Experimentally, rasterizers such as pdfium do seem to benefit
+ from this. Rasterizers that rely on fixed-point scalars should
+ gracefully ignore these values that they can not parse. */
+ char* output_ptr = &output[0];
+ const char* const end = &output[kMaximumSkFloatToDecimalLength - 1];
+ // subtract one to leave space for '\0'.
+
+ /* This function is written to accept any possible input value,
+ including non-finite values such as INF and NAN. In that case,
+ we ignore value-correctness and output a syntacticly-valid
+ number. */
+ if (value == INFINITY) {
+ value = FLT_MAX; // nearest finite float.
+ }
+ if (value == -INFINITY) {
+ value = -FLT_MAX; // nearest finite float.
+ }
+ if (!std::isfinite(value) || value == 0.0f) {
+ // NAN is unsupported in PDF. Always output a valid number.
+ // Also catch zero here, as a special case.
+ *output_ptr++ = '0';
+ *output_ptr = '\0';
+ return static_cast<unsigned>(output_ptr - output);
+ }
+ if (value < 0.0) {
+ *output_ptr++ = '-';
+ value = -value;
+ }
+ SkASSERT(value >= 0.0f);
+
+ int binaryExponent;
+ (void)std::frexp(value, &binaryExponent);
+ static const double kLog2 = 0.3010299956639812; // log10(2.0);
+ int decimalExponent = static_cast<int>(std::floor(kLog2 * binaryExponent));
+ int decimalShift = decimalExponent - 8;
+ double power = pow10(-decimalShift);
+ SkASSERT(value * power <= (double)INT_MAX);
+ int d = static_cast<int>(value * power + 0.5);
+ // SkASSERT(value == (float)(d * pow(10.0, decimalShift)));
+ SkASSERT(d <= 999999999);
+ if (d > 167772159) { // floor(pow(10,1+log10(1<<24)))
+ // need one fewer decimal digits for 24-bit precision.
+ decimalShift = decimalExponent - 7;
+ // SkASSERT(power * 0.1 = pow10(-decimalShift));
+ // recalculate to get rounding right.
+ d = static_cast<int>(value * (power * 0.1) + 0.5);
+ SkASSERT(d <= 99999999);
+ }
+ while (d % 10 == 0) {
+ d /= 10;
+ ++decimalShift;
+ }
+ SkASSERT(d > 0);
+ // SkASSERT(value == (float)(d * pow(10.0, decimalShift)));
+ unsigned char buffer[9]; // decimal value buffer.
+ int bufferIndex = 0;
+ do {
+ buffer[bufferIndex++] = d % 10;
+ d /= 10;
+ } while (d != 0);
+ SkASSERT(bufferIndex <= (int)sizeof(buffer) && bufferIndex > 0);
+ if (decimalShift >= 0) {
+ do {
+ --bufferIndex;
+ *output_ptr++ = '0' + buffer[bufferIndex];
+ } while (bufferIndex);
+ for (int i = 0; i < decimalShift; ++i) {
+ *output_ptr++ = '0';
+ }
+ } else {
+ int placesBeforeDecimal = bufferIndex + decimalShift;
+ if (placesBeforeDecimal > 0) {
+ while (placesBeforeDecimal-- > 0) {
+ --bufferIndex;
+ *output_ptr++ = '0' + buffer[bufferIndex];
+ }
+ *output_ptr++ = '.';
+ } else {
+ *output_ptr++ = '.';
+ int placesAfterDecimal = -placesBeforeDecimal;
+ while (placesAfterDecimal-- > 0) {
+ *output_ptr++ = '0';
+ }
+ }
+ while (bufferIndex > 0) {
+ --bufferIndex;
+ *output_ptr++ = '0' + buffer[bufferIndex];
+ if (output_ptr == end) {
+ break; // denormalized: don't need extra precision.
+ // Note: denormalized numbers will not have the same number of
+ // significantDigits, but do not need them to round-trip.
+ }
+ }
+ }
+ SkASSERT(output_ptr <= end);
+ *output_ptr = '\0';
+ return static_cast<unsigned>(output_ptr - output);
+}
diff --git a/gfx/skia/skia/src/utils/SkFloatToDecimal.h b/gfx/skia/skia/src/utils/SkFloatToDecimal.h
new file mode 100644
index 0000000000..ac1042dbfb
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkFloatToDecimal.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFloatToDecimal_DEFINED
+#define SkFloatToDecimal_DEFINED
+
+constexpr unsigned kMaximumSkFloatToDecimalLength = 49;
+
+/** \fn SkFloatToDecimal
+ Convert a float into a decimal string.
+
+ The resulting string will be in the form `[-]?([0-9]*\.)?[0-9]+` (It does
+ not use scientific notation.) and `sscanf(output, "%f", &x)` will return
+ the original value if the value is finite. This function accepts all
+ possible input values.
+
+ INFINITY and -INFINITY are rounded to FLT_MAX and -FLT_MAX.
+
+ NAN values are converted to 0.
+
+ This function will always add a terminating '\0' to the output.
+
+ @param value Any floating-point number
+ @param output The buffer to write the string into. Must be non-null.
+
+ @return strlen(output)
+*/
+unsigned SkFloatToDecimal(float value, char output[kMaximumSkFloatToDecimalLength]);
+
+#endif // SkFloatToDecimal_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkFloatUtils.h b/gfx/skia/skia/src/utils/SkFloatUtils.h
new file mode 100644
index 0000000000..f89a77254e
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkFloatUtils.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkFloatUtils_DEFINED
+#define SkFloatUtils_DEFINED
+
+#include "include/core/SkTypes.h"
+#include <limits.h>
+#include <float.h>
+
+template <size_t size>
+class SkTypeWithSize {
+public:
+ // Prevents using SkTypeWithSize<N> with non-specialized N.
+ typedef void UInt;
+};
+
+template <>
+class SkTypeWithSize<32> {
+public:
+ typedef uint32_t UInt;
+};
+
+template <>
+class SkTypeWithSize<64> {
+public:
+ typedef uint64_t UInt;
+};
+
+template <typename RawType>
+struct SkNumericLimits {
+ static const int digits = 0;
+};
+
+template <>
+struct SkNumericLimits<double> {
+ static const int digits = DBL_MANT_DIG;
+};
+
+template <>
+struct SkNumericLimits<float> {
+ static const int digits = FLT_MANT_DIG;
+};
+
+//See
+//http://stackoverflow.com/questions/17333/most-effective-way-for-float-and-double-comparison/3423299#3423299
+//http://code.google.com/p/googletest/source/browse/trunk/include/gtest/internal/gtest-internal.h
+//http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
+
+template <typename RawType, unsigned int ULPs>
+class SkFloatingPoint {
+public:
+ /** Bits is a unsigned integer the same size as the floating point number. */
+ typedef typename SkTypeWithSize<sizeof(RawType) * CHAR_BIT>::UInt Bits;
+
+ /** # of bits in a number. */
+ static const size_t kBitCount = CHAR_BIT * sizeof(RawType);
+
+ /** # of fraction bits in a number. */
+ static const size_t kFractionBitCount = SkNumericLimits<RawType>::digits - 1;
+
+ /** # of exponent bits in a number. */
+ static const size_t kExponentBitCount = kBitCount - 1 - kFractionBitCount;
+
+ /** The mask for the sign bit. */
+ static const Bits kSignBitMask = static_cast<Bits>(1) << (kBitCount - 1);
+
+ /** The mask for the fraction bits. */
+ static const Bits kFractionBitMask =
+ ~static_cast<Bits>(0) >> (kExponentBitCount + 1);
+
+ /** The mask for the exponent bits. */
+ static const Bits kExponentBitMask = ~(kSignBitMask | kFractionBitMask);
+
+ /** How many ULP's (Units in the Last Place) to tolerate when comparing. */
+ static const size_t kMaxUlps = ULPs;
+
+ /**
+ * Constructs a FloatingPoint from a raw floating-point number.
+ *
+ * On an Intel CPU, passing a non-normalized NAN (Not a Number)
+ * around may change its bits, although the new value is guaranteed
+ * to be also a NAN. Therefore, don't expect this constructor to
+ * preserve the bits in x when x is a NAN.
+ */
+ explicit SkFloatingPoint(const RawType& x) { fU.value = x; }
+
+ /** Returns the exponent bits of this number. */
+ Bits exponent_bits() const { return kExponentBitMask & fU.bits; }
+
+ /** Returns the fraction bits of this number. */
+ Bits fraction_bits() const { return kFractionBitMask & fU.bits; }
+
+ /** Returns true iff this is NAN (not a number). */
+ bool is_nan() const {
+ // It's a NAN if both of the folloowing are true:
+ // * the exponent bits are all ones
+ // * the fraction bits are not all zero.
+ return (exponent_bits() == kExponentBitMask) && (fraction_bits() != 0);
+ }
+
+ /**
+ * Returns true iff this number is at most kMaxUlps ULP's away from ths.
+ * In particular, this function:
+ * - returns false if either number is (or both are) NAN.
+ * - treats really large numbers as almost equal to infinity.
+ * - thinks +0.0 and -0.0 are 0 DLP's apart.
+ */
+ bool AlmostEquals(const SkFloatingPoint& rhs) const {
+ // Any comparison operation involving a NAN must return false.
+ if (is_nan() || rhs.is_nan()) return false;
+
+ const Bits dist = DistanceBetweenSignAndMagnitudeNumbers(fU.bits,
+ rhs.fU.bits);
+ //SkDEBUGF("(%f, %f, %d) ", u_.value_, rhs.u_.value_, dist);
+ return dist <= kMaxUlps;
+ }
+
+private:
+ /** The data type used to store the actual floating-point number. */
+ union FloatingPointUnion {
+ /** The raw floating-point number. */
+ RawType value;
+ /** The bits that represent the number. */
+ Bits bits;
+ };
+
+ /**
+ * Converts an integer from the sign-and-magnitude representation to
+ * the biased representation. More precisely, let N be 2 to the
+ * power of (kBitCount - 1), an integer x is represented by the
+ * unsigned number x + N.
+ *
+ * For instance,
+ *
+ * -N + 1 (the most negative number representable using
+ * sign-and-magnitude) is represented by 1;
+ * 0 is represented by N; and
+ * N - 1 (the biggest number representable using
+ * sign-and-magnitude) is represented by 2N - 1.
+ *
+ * Read http://en.wikipedia.org/wiki/Signed_number_representations
+ * for more details on signed number representations.
+ */
+ static Bits SignAndMagnitudeToBiased(const Bits &sam) {
+ if (kSignBitMask & sam) {
+ // sam represents a negative number.
+ return ~sam + 1;
+ } else {
+ // sam represents a positive number.
+ return kSignBitMask | sam;
+ }
+ }
+
+ /**
+ * Given two numbers in the sign-and-magnitude representation,
+ * returns the distance between them as an unsigned number.
+ */
+ static Bits DistanceBetweenSignAndMagnitudeNumbers(const Bits &sam1,
+ const Bits &sam2) {
+ const Bits biased1 = SignAndMagnitudeToBiased(sam1);
+ const Bits biased2 = SignAndMagnitudeToBiased(sam2);
+ return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1);
+ }
+
+ FloatingPointUnion fU;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkGaussianColorFilter.cpp b/gfx/skia/skia/src/utils/SkGaussianColorFilter.cpp
new file mode 100644
index 0000000000..1eeeeefee6
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkGaussianColorFilter.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkFlattenable.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkTypes.h"
+#include "src/core/SkColorFilterBase.h"
+#include "src/core/SkColorFilterPriv.h"
+#include "src/core/SkEffectPriv.h"
+#include "src/core/SkRasterPipeline.h"
+#include "src/core/SkRasterPipelineOpList.h"
+#include "src/core/SkVM.h"
+
+#if defined(SK_GANESH)
+#include "src/gpu/ganesh/GrFragmentProcessor.h"
+// This shouldn't be needed but IWYU needs both (identical) defs of GrFPResult.
+#include "src/shaders/SkShaderBase.h"
+#include <memory>
+#include <utility>
+
+class GrColorInfo;
+class GrRecordingContext;
+class SkSurfaceProps;
+#endif
+
+class SkArenaAlloc;
+class SkColorInfo;
+class SkReadBuffer;
+class SkWriteBuffer;
+
+#if defined(SK_GRAPHITE)
+#include "src/gpu/graphite/KeyContext.h"
+#include "src/gpu/graphite/KeyHelpers.h"
+#include "src/gpu/graphite/PaintParamsKey.h"
+
+namespace skgpu::graphite {
+class PipelineDataGatherer;
+}
+#endif
+
+/**
+ * Remaps the input color's alpha to a Gaussian ramp and then outputs premul white using the
+ * remapped alpha.
+ */
+class SkGaussianColorFilter final : public SkColorFilterBase {
+public:
+ SkGaussianColorFilter() : SkColorFilterBase() {}
+
+ bool appendStages(const SkStageRec& rec, bool shaderIsOpaque) const override {
+ rec.fPipeline->append(SkRasterPipelineOp::gauss_a_to_rgba);
+ return true;
+ }
+
+#if defined(SK_GANESH)
+ GrFPResult asFragmentProcessor(std::unique_ptr<GrFragmentProcessor> inputFP,
+ GrRecordingContext*,
+ const GrColorInfo&,
+ const SkSurfaceProps&) const override;
+#endif
+
+#if defined(SK_GRAPHITE)
+ void addToKey(const skgpu::graphite::KeyContext&,
+ skgpu::graphite::PaintParamsKeyBuilder*,
+ skgpu::graphite::PipelineDataGatherer*) const override;
+#endif
+
+protected:
+ void flatten(SkWriteBuffer&) const override {}
+
+ skvm::Color onProgram(skvm::Builder* p, skvm::Color c, const SkColorInfo& dst, skvm::Uniforms*,
+ SkArenaAlloc*) const override {
+ // x = 1 - x;
+ // exp(-x * x * 4) - 0.018f;
+ // ... now approximate with quartic
+ //
+ skvm::F32 x = p->splat(-2.26661229133605957031f);
+ x = c.a * x + 2.89795351028442382812f;
+ x = c.a * x + 0.21345567703247070312f;
+ x = c.a * x + 0.15489584207534790039f;
+ x = c.a * x + 0.00030726194381713867f;
+ return {x, x, x, x};
+ }
+
+private:
+ SK_FLATTENABLE_HOOKS(SkGaussianColorFilter)
+};
+
+sk_sp<SkFlattenable> SkGaussianColorFilter::CreateProc(SkReadBuffer&) {
+ return SkColorFilterPriv::MakeGaussian();
+}
+
+#if defined(SK_GANESH)
+
+#include "include/effects/SkRuntimeEffect.h"
+#include "src/core/SkRuntimeEffectPriv.h"
+#include "src/gpu/ganesh/effects/GrSkSLFP.h"
+
+GrFPResult SkGaussianColorFilter::asFragmentProcessor(std::unique_ptr<GrFragmentProcessor> inputFP,
+ GrRecordingContext*,
+ const GrColorInfo&,
+ const SkSurfaceProps&) const {
+ static const SkRuntimeEffect* effect = SkMakeRuntimeEffect(SkRuntimeEffect::MakeForColorFilter,
+ "half4 main(half4 inColor) {"
+ "half factor = 1 - inColor.a;"
+ "factor = exp(-factor * factor * 4) - 0.018;"
+ "return half4(factor);"
+ "}"
+ );
+ SkASSERT(SkRuntimeEffectPriv::SupportsConstantOutputForConstantInput(effect));
+ return GrFPSuccess(GrSkSLFP::Make(effect, "gaussian_fp", std::move(inputFP),
+ GrSkSLFP::OptFlags::kNone));
+}
+#endif
+
+#if defined(SK_GRAPHITE)
+
+void SkGaussianColorFilter::addToKey(const skgpu::graphite::KeyContext& keyContext,
+ skgpu::graphite::PaintParamsKeyBuilder* builder,
+ skgpu::graphite::PipelineDataGatherer* gatherer) const {
+ using namespace skgpu::graphite;
+
+ GaussianColorFilterBlock::BeginBlock(keyContext, builder, gatherer);
+ builder->endBlock();
+}
+
+#endif
+
+sk_sp<SkColorFilter> SkColorFilterPriv::MakeGaussian() {
+ return sk_sp<SkColorFilter>(new SkGaussianColorFilter);
+}
diff --git a/gfx/skia/skia/src/utils/SkJSON.cpp b/gfx/skia/skia/src/utils/SkJSON.cpp
new file mode 100644
index 0000000000..1d237f8d67
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkJSON.cpp
@@ -0,0 +1,933 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/utils/SkJSON.h"
+
+#include "include/core/SkData.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkTo.h"
+#include "include/utils/SkParse.h"
+#include "src/base/SkUTF.h"
+
+#include <cmath>
+#include <cstdint>
+#include <cstdlib>
+#include <limits>
+#include <new>
+#include <tuple>
+#include <vector>
+
+namespace skjson {
+
+// #define SK_JSON_REPORT_ERRORS
+
+static_assert( sizeof(Value) == 8, "");
+static_assert(alignof(Value) == 8, "");
+
+static constexpr size_t kRecAlign = alignof(Value);
+
+void Value::init_tagged(Tag t) {
+ memset(fData8, 0, sizeof(fData8));
+ fData8[0] = SkTo<uint8_t>(t);
+ SkASSERT(this->getTag() == t);
+}
+
+// Pointer values store a type (in the lower kTagBits bits) and a pointer.
+void Value::init_tagged_pointer(Tag t, void* p) {
+ if (sizeof(Value) == sizeof(uintptr_t)) {
+ *this->cast<uintptr_t>() = reinterpret_cast<uintptr_t>(p);
+ // For 64-bit, we rely on the pointer lower bits being zero.
+ SkASSERT(!(fData8[0] & kTagMask));
+ fData8[0] |= SkTo<uint8_t>(t);
+ } else {
+ // For 32-bit, we store the pointer in the upper word
+ SkASSERT(sizeof(Value) == sizeof(uintptr_t) * 2);
+ this->init_tagged(t);
+ *this->cast<uintptr_t>() = reinterpret_cast<uintptr_t>(p);
+ }
+
+ SkASSERT(this->getTag() == t);
+ SkASSERT(this->ptr<void>() == p);
+}
+
+NullValue::NullValue() {
+ this->init_tagged(Tag::kNull);
+ SkASSERT(this->getTag() == Tag::kNull);
+}
+
+BoolValue::BoolValue(bool b) {
+ this->init_tagged(Tag::kBool);
+ *this->cast<bool>() = b;
+ SkASSERT(this->getTag() == Tag::kBool);
+}
+
+NumberValue::NumberValue(int32_t i) {
+ this->init_tagged(Tag::kInt);
+ *this->cast<int32_t>() = i;
+ SkASSERT(this->getTag() == Tag::kInt);
+}
+
+NumberValue::NumberValue(float f) {
+ this->init_tagged(Tag::kFloat);
+ *this->cast<float>() = f;
+ SkASSERT(this->getTag() == Tag::kFloat);
+}
+
+// Vector recs point to externally allocated slabs with the following layout:
+//
+// [size_t n] [REC_0] ... [REC_n-1] [optional extra trailing storage]
+//
+// Long strings use extra_alloc_size == 1 to store the \0 terminator.
+//
+template <typename T, size_t extra_alloc_size = 0>
+static void* MakeVector(const void* src, size_t size, SkArenaAlloc& alloc) {
+ // The Ts are already in memory, so their size should be safe.
+ const auto total_size = sizeof(size_t) + size * sizeof(T) + extra_alloc_size;
+ auto* size_ptr = reinterpret_cast<size_t*>(alloc.makeBytesAlignedTo(total_size, kRecAlign));
+
+ *size_ptr = size;
+ sk_careful_memcpy(size_ptr + 1, src, size * sizeof(T));
+
+ return size_ptr;
+}
+
+ArrayValue::ArrayValue(const Value* src, size_t size, SkArenaAlloc& alloc) {
+ this->init_tagged_pointer(Tag::kArray, MakeVector<Value>(src, size, alloc));
+ SkASSERT(this->getTag() == Tag::kArray);
+}
+
+// Strings have two flavors:
+//
+// -- short strings (len <= 7) -> these are stored inline, in the record
+// (one byte reserved for null terminator/type):
+//
+// [str] [\0]|[max_len - actual_len]
+//
+// Storing [max_len - actual_len] allows the 'len' field to double-up as a
+// null terminator when size == max_len (this works 'cause kShortString == 0).
+//
+// -- long strings (len > 7) -> these are externally allocated vectors (VectorRec<char>).
+//
+// The string data plus a null-char terminator are copied over.
+//
+namespace {
+
+// An internal string builder with a fast 8 byte short string load path
+// (for the common case where the string is not at the end of the stream).
+class FastString final : public Value {
+public:
+ FastString(const char* src, size_t size, const char* eos, SkArenaAlloc& alloc) {
+ SkASSERT(src <= eos);
+
+ if (size > kMaxInlineStringSize) {
+ this->initLongString(src, size, alloc);
+ SkASSERT(this->getTag() == Tag::kString);
+ return;
+ }
+
+ // initFastShortString is faster (doh), but requires access to 6 chars past src.
+ if (src && src + 6 <= eos) {
+ this->initFastShortString(src, size);
+ } else {
+ this->initShortString(src, size);
+ }
+
+ SkASSERT(this->getTag() == Tag::kShortString);
+ }
+
+private:
+ // first byte reserved for tagging, \0 terminator => 6 usable chars
+ inline static constexpr size_t kMaxInlineStringSize = sizeof(Value) - 2;
+
+ void initLongString(const char* src, size_t size, SkArenaAlloc& alloc) {
+ SkASSERT(size > kMaxInlineStringSize);
+
+ this->init_tagged_pointer(Tag::kString, MakeVector<char, 1>(src, size, alloc));
+
+ auto* data = this->cast<VectorValue<char, Value::Type::kString>>()->begin();
+ const_cast<char*>(data)[size] = '\0';
+ }
+
+ void initShortString(const char* src, size_t size) {
+ SkASSERT(size <= kMaxInlineStringSize);
+
+ this->init_tagged(Tag::kShortString);
+ sk_careful_memcpy(this->cast<char>(), src, size);
+ // Null terminator provided by init_tagged() above (fData8 is zero-initialized).
+ }
+
+ void initFastShortString(const char* src, size_t size) {
+ SkASSERT(size <= kMaxInlineStringSize);
+
+ uint64_t* s64 = this->cast<uint64_t>();
+
+ // Load 8 chars and mask out the tag and \0 terminator.
+ // Note: we picked kShortString == 0 to avoid setting explicitly below.
+ static_assert(SkToU8(Tag::kShortString) == 0, "please don't break this");
+
+ // Since the first byte is occupied by the tag, we want the string chars [0..5] to land
+ // on bytes [1..6] => the fastest way is to read8 @(src - 1) (always safe, because the
+ // string requires a " prefix at the very least).
+ memcpy(s64, src - 1, 8);
+
+#if defined(SK_CPU_LENDIAN)
+ // The mask for a max-length string (6), with a leading tag and trailing \0 is
+ // 0x00ffffffffffff00. Accounting for the final left-shift, this becomes
+ // 0x0000ffffffffffff.
+ *s64 &= (0x0000ffffffffffffULL >> ((kMaxInlineStringSize - size) * 8)) // trailing \0s
+ << 8; // tag byte
+#else
+ static_assert(false, "Big-endian builds are not supported at this time.");
+#endif
+ }
+};
+
+} // namespace
+
+StringValue::StringValue(const char* src, size_t size, SkArenaAlloc& alloc) {
+ new (this) FastString(src, size, src, alloc);
+}
+
+ObjectValue::ObjectValue(const Member* src, size_t size, SkArenaAlloc& alloc) {
+ this->init_tagged_pointer(Tag::kObject, MakeVector<Member>(src, size, alloc));
+ SkASSERT(this->getTag() == Tag::kObject);
+}
+
+
+// Boring public Value glue.
+
+static int inline_strcmp(const char a[], const char b[]) {
+ for (;;) {
+ char c = *a++;
+ if (c == 0) {
+ break;
+ }
+ if (c != *b++) {
+ return 1;
+ }
+ }
+ return *b != 0;
+}
+
+const Value& ObjectValue::operator[](const char* key) const {
+ // Reverse search for duplicates resolution (policy: return last).
+ const auto* begin = this->begin();
+ const auto* member = this->end();
+
+ while (member > begin) {
+ --member;
+ if (0 == inline_strcmp(key, member->fKey.as<StringValue>().begin())) {
+ return member->fValue;
+ }
+ }
+
+ static const Value g_null = NullValue();
+ return g_null;
+}
+
+namespace {
+
+// Lexer/parser inspired by rapidjson [1], sajson [2] and pjson [3].
+//
+// [1] https://github.com/Tencent/rapidjson/
+// [2] https://github.com/chadaustin/sajson
+// [3] https://pastebin.com/hnhSTL3h
+
+
+// bit 0 (0x01) - plain ASCII string character
+// bit 1 (0x02) - whitespace
+// bit 2 (0x04) - string terminator (" \\ \0 [control chars] **AND } ]** <- see matchString notes)
+// bit 3 (0x08) - 0-9
+// bit 4 (0x10) - 0-9 e E .
+// bit 5 (0x20) - scope terminator (} ])
+static constexpr uint8_t g_token_flags[256] = {
+ // 0 1 2 3 4 5 6 7 8 9 A B C D E F
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 6, 6, 4, 4, 6, 4, 4, // 0
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, // 1
+ 3, 1, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0x11,1, // 2
+ 0x19,0x19,0x19,0x19,0x19,0x19,0x19,0x19, 0x19,0x19, 1, 1, 1, 1, 1, 1, // 3
+ 1, 1, 1, 1, 1, 0x11,1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4,0x25, 1, 1, // 5
+ 1, 1, 1, 1, 1, 0x11,1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,0x25, 1, 1, // 7
+
+ // 128-255
+ 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0
+};
+
+static inline bool is_ws(char c) { return g_token_flags[static_cast<uint8_t>(c)] & 0x02; }
+static inline bool is_eostring(char c) { return g_token_flags[static_cast<uint8_t>(c)] & 0x04; }
+static inline bool is_digit(char c) { return g_token_flags[static_cast<uint8_t>(c)] & 0x08; }
+static inline bool is_numeric(char c) { return g_token_flags[static_cast<uint8_t>(c)] & 0x10; }
+static inline bool is_eoscope(char c) { return g_token_flags[static_cast<uint8_t>(c)] & 0x20; }
+
+static inline const char* skip_ws(const char* p) {
+ while (is_ws(*p)) ++p;
+ return p;
+}
+
+static inline float pow10(int32_t exp) {
+ static constexpr float g_pow10_table[63] =
+ {
+ 1.e-031f, 1.e-030f, 1.e-029f, 1.e-028f, 1.e-027f, 1.e-026f, 1.e-025f, 1.e-024f,
+ 1.e-023f, 1.e-022f, 1.e-021f, 1.e-020f, 1.e-019f, 1.e-018f, 1.e-017f, 1.e-016f,
+ 1.e-015f, 1.e-014f, 1.e-013f, 1.e-012f, 1.e-011f, 1.e-010f, 1.e-009f, 1.e-008f,
+ 1.e-007f, 1.e-006f, 1.e-005f, 1.e-004f, 1.e-003f, 1.e-002f, 1.e-001f, 1.e+000f,
+ 1.e+001f, 1.e+002f, 1.e+003f, 1.e+004f, 1.e+005f, 1.e+006f, 1.e+007f, 1.e+008f,
+ 1.e+009f, 1.e+010f, 1.e+011f, 1.e+012f, 1.e+013f, 1.e+014f, 1.e+015f, 1.e+016f,
+ 1.e+017f, 1.e+018f, 1.e+019f, 1.e+020f, 1.e+021f, 1.e+022f, 1.e+023f, 1.e+024f,
+ 1.e+025f, 1.e+026f, 1.e+027f, 1.e+028f, 1.e+029f, 1.e+030f, 1.e+031f
+ };
+
+ static constexpr int32_t k_exp_offset = std::size(g_pow10_table) / 2;
+
+ // We only support negative exponents for now.
+ SkASSERT(exp <= 0);
+
+ return (exp >= -k_exp_offset) ? g_pow10_table[exp + k_exp_offset]
+ : std::pow(10.0f, static_cast<float>(exp));
+}
+
+class DOMParser {
+public:
+ explicit DOMParser(SkArenaAlloc& alloc)
+ : fAlloc(alloc) {
+ fValueStack.reserve(kValueStackReserve);
+ fUnescapeBuffer.reserve(kUnescapeBufferReserve);
+ }
+
+ Value parse(const char* p, size_t size) {
+ if (!size) {
+ return this->error(NullValue(), p, "invalid empty input");
+ }
+
+ const char* p_stop = p + size - 1;
+
+ // We're only checking for end-of-stream on object/array close('}',']'),
+ // so we must trim any whitespace from the buffer tail.
+ while (p_stop > p && is_ws(*p_stop)) --p_stop;
+
+ SkASSERT(p_stop >= p && p_stop < p + size);
+ if (!is_eoscope(*p_stop)) {
+ return this->error(NullValue(), p_stop, "invalid top-level value");
+ }
+
+ p = skip_ws(p);
+
+ switch (*p) {
+ case '{':
+ goto match_object;
+ case '[':
+ goto match_array;
+ default:
+ return this->error(NullValue(), p, "invalid top-level value");
+ }
+
+ match_object:
+ SkASSERT(*p == '{');
+ p = skip_ws(p + 1);
+
+ this->pushObjectScope();
+
+ if (*p == '}') goto pop_object;
+
+ // goto match_object_key;
+ match_object_key:
+ p = skip_ws(p);
+ if (*p != '"') return this->error(NullValue(), p, "expected object key");
+
+ p = this->matchString(p, p_stop, [this](const char* key, size_t size, const char* eos) {
+ this->pushObjectKey(key, size, eos);
+ });
+ if (!p) return NullValue();
+
+ p = skip_ws(p);
+ if (*p != ':') return this->error(NullValue(), p, "expected ':' separator");
+
+ ++p;
+
+ // goto match_value;
+ match_value:
+ p = skip_ws(p);
+
+ switch (*p) {
+ case '\0':
+ return this->error(NullValue(), p, "unexpected input end");
+ case '"':
+ p = this->matchString(p, p_stop, [this](const char* str, size_t size, const char* eos) {
+ this->pushString(str, size, eos);
+ });
+ break;
+ case '[':
+ goto match_array;
+ case 'f':
+ p = this->matchFalse(p);
+ break;
+ case 'n':
+ p = this->matchNull(p);
+ break;
+ case 't':
+ p = this->matchTrue(p);
+ break;
+ case '{':
+ goto match_object;
+ default:
+ p = this->matchNumber(p);
+ break;
+ }
+
+ if (!p) return NullValue();
+
+ // goto match_post_value;
+ match_post_value:
+ SkASSERT(!this->inTopLevelScope());
+
+ p = skip_ws(p);
+ switch (*p) {
+ case ',':
+ ++p;
+ if (this->inObjectScope()) {
+ goto match_object_key;
+ } else {
+ SkASSERT(this->inArrayScope());
+ goto match_value;
+ }
+ case ']':
+ goto pop_array;
+ case '}':
+ goto pop_object;
+ default:
+ return this->error(NullValue(), p - 1, "unexpected value-trailing token");
+ }
+
+ // unreachable
+ SkASSERT(false);
+
+ pop_object:
+ SkASSERT(*p == '}');
+
+ if (this->inArrayScope()) {
+ return this->error(NullValue(), p, "unexpected object terminator");
+ }
+
+ this->popObjectScope();
+
+ // goto pop_common
+ pop_common:
+ SkASSERT(is_eoscope(*p));
+
+ if (this->inTopLevelScope()) {
+ SkASSERT(fValueStack.size() == 1);
+
+ // Success condition: parsed the top level element and reached the stop token.
+ return p == p_stop
+ ? fValueStack.front()
+ : this->error(NullValue(), p + 1, "trailing root garbage");
+ }
+
+ if (p == p_stop) {
+ return this->error(NullValue(), p, "unexpected end-of-input");
+ }
+
+ ++p;
+
+ goto match_post_value;
+
+ match_array:
+ SkASSERT(*p == '[');
+ p = skip_ws(p + 1);
+
+ this->pushArrayScope();
+
+ if (*p != ']') goto match_value;
+
+ // goto pop_array;
+ pop_array:
+ SkASSERT(*p == ']');
+
+ if (this->inObjectScope()) {
+ return this->error(NullValue(), p, "unexpected array terminator");
+ }
+
+ this->popArrayScope();
+
+ goto pop_common;
+
+ SkASSERT(false);
+ return NullValue();
+ }
+
+ std::tuple<const char*, const SkString> getError() const {
+ return std::make_tuple(fErrorToken, fErrorMessage);
+ }
+
+private:
+ SkArenaAlloc& fAlloc;
+
+ // Pending values stack.
+ inline static constexpr size_t kValueStackReserve = 256;
+ std::vector<Value> fValueStack;
+
+ // String unescape buffer.
+ inline static constexpr size_t kUnescapeBufferReserve = 512;
+ std::vector<char> fUnescapeBuffer;
+
+ // Tracks the current object/array scope, as an index into fStack:
+ //
+ // - for objects: fScopeIndex = (index of first value in scope)
+ // - for arrays : fScopeIndex = -(index of first value in scope)
+ //
+ // fScopeIndex == 0 IFF we are at the top level (no current/active scope).
+ intptr_t fScopeIndex = 0;
+
+ // Error reporting.
+ const char* fErrorToken = nullptr;
+ SkString fErrorMessage;
+
+ bool inTopLevelScope() const { return fScopeIndex == 0; }
+ bool inObjectScope() const { return fScopeIndex > 0; }
+ bool inArrayScope() const { return fScopeIndex < 0; }
+
+ // Helper for masquerading raw primitive types as Values (bypassing tagging, etc).
+ template <typename T>
+ class RawValue final : public Value {
+ public:
+ explicit RawValue(T v) {
+ static_assert(sizeof(T) <= sizeof(Value), "");
+ *this->cast<T>() = v;
+ }
+
+ T operator *() const { return *this->cast<T>(); }
+ };
+
+ template <typename VectorT>
+ void popScopeAsVec(size_t scope_start) {
+ SkASSERT(scope_start > 0);
+ SkASSERT(scope_start <= fValueStack.size());
+
+ using T = typename VectorT::ValueT;
+ static_assert( sizeof(T) >= sizeof(Value), "");
+ static_assert( sizeof(T) % sizeof(Value) == 0, "");
+ static_assert(alignof(T) == alignof(Value), "");
+
+ const auto scope_count = fValueStack.size() - scope_start,
+ count = scope_count / (sizeof(T) / sizeof(Value));
+ SkASSERT(scope_count % (sizeof(T) / sizeof(Value)) == 0);
+
+ const auto* begin = reinterpret_cast<const T*>(fValueStack.data() + scope_start);
+
+ // Restore the previous scope index from saved placeholder value,
+ // and instantiate as a vector of values in scope.
+ auto& placeholder = fValueStack[scope_start - 1];
+ fScopeIndex = *static_cast<RawValue<intptr_t>&>(placeholder);
+ placeholder = VectorT(begin, count, fAlloc);
+
+ // Drop the (consumed) values in scope.
+ fValueStack.resize(scope_start);
+ }
+
+ void pushObjectScope() {
+ // Save a scope index now, and then later we'll overwrite this value as the Object itself.
+ fValueStack.push_back(RawValue<intptr_t>(fScopeIndex));
+
+ // New object scope.
+ fScopeIndex = SkTo<intptr_t>(fValueStack.size());
+ }
+
+ void popObjectScope() {
+ SkASSERT(this->inObjectScope());
+ this->popScopeAsVec<ObjectValue>(SkTo<size_t>(fScopeIndex));
+
+ SkDEBUGCODE(
+ const auto& obj = fValueStack.back().as<ObjectValue>();
+ SkASSERT(obj.is<ObjectValue>());
+ for (const auto& member : obj) {
+ SkASSERT(member.fKey.is<StringValue>());
+ }
+ )
+ }
+
+ void pushArrayScope() {
+ // Save a scope index now, and then later we'll overwrite this value as the Array itself.
+ fValueStack.push_back(RawValue<intptr_t>(fScopeIndex));
+
+ // New array scope.
+ fScopeIndex = -SkTo<intptr_t>(fValueStack.size());
+ }
+
+ void popArrayScope() {
+ SkASSERT(this->inArrayScope());
+ this->popScopeAsVec<ArrayValue>(SkTo<size_t>(-fScopeIndex));
+
+ SkDEBUGCODE(
+ const auto& arr = fValueStack.back().as<ArrayValue>();
+ SkASSERT(arr.is<ArrayValue>());
+ )
+ }
+
+ void pushObjectKey(const char* key, size_t size, const char* eos) {
+ SkASSERT(this->inObjectScope());
+ SkASSERT(fValueStack.size() >= SkTo<size_t>(fScopeIndex));
+ SkASSERT(!((fValueStack.size() - SkTo<size_t>(fScopeIndex)) & 1));
+ this->pushString(key, size, eos);
+ }
+
+ void pushTrue() {
+ fValueStack.push_back(BoolValue(true));
+ }
+
+ void pushFalse() {
+ fValueStack.push_back(BoolValue(false));
+ }
+
+ void pushNull() {
+ fValueStack.push_back(NullValue());
+ }
+
+ void pushString(const char* s, size_t size, const char* eos) {
+ fValueStack.push_back(FastString(s, size, eos, fAlloc));
+ }
+
+ void pushInt32(int32_t i) {
+ fValueStack.push_back(NumberValue(i));
+ }
+
+ void pushFloat(float f) {
+ fValueStack.push_back(NumberValue(f));
+ }
+
+ template <typename T>
+ T error(T&& ret_val, const char* p, const char* msg) {
+#if defined(SK_JSON_REPORT_ERRORS)
+ fErrorToken = p;
+ fErrorMessage.set(msg);
+#endif
+ return ret_val;
+ }
+
+ const char* matchTrue(const char* p) {
+ SkASSERT(p[0] == 't');
+
+ if (p[1] == 'r' && p[2] == 'u' && p[3] == 'e') {
+ this->pushTrue();
+ return p + 4;
+ }
+
+ return this->error(nullptr, p, "invalid token");
+ }
+
+ const char* matchFalse(const char* p) {
+ SkASSERT(p[0] == 'f');
+
+ if (p[1] == 'a' && p[2] == 'l' && p[3] == 's' && p[4] == 'e') {
+ this->pushFalse();
+ return p + 5;
+ }
+
+ return this->error(nullptr, p, "invalid token");
+ }
+
+ const char* matchNull(const char* p) {
+ SkASSERT(p[0] == 'n');
+
+ if (p[1] == 'u' && p[2] == 'l' && p[3] == 'l') {
+ this->pushNull();
+ return p + 4;
+ }
+
+ return this->error(nullptr, p, "invalid token");
+ }
+
+ const std::vector<char>* unescapeString(const char* begin, const char* end) {
+ fUnescapeBuffer.clear();
+
+ for (const auto* p = begin; p != end; ++p) {
+ if (*p != '\\') {
+ fUnescapeBuffer.push_back(*p);
+ continue;
+ }
+
+ if (++p == end) {
+ return nullptr;
+ }
+
+ switch (*p) {
+ case '"': fUnescapeBuffer.push_back( '"'); break;
+ case '\\': fUnescapeBuffer.push_back('\\'); break;
+ case '/': fUnescapeBuffer.push_back( '/'); break;
+ case 'b': fUnescapeBuffer.push_back('\b'); break;
+ case 'f': fUnescapeBuffer.push_back('\f'); break;
+ case 'n': fUnescapeBuffer.push_back('\n'); break;
+ case 'r': fUnescapeBuffer.push_back('\r'); break;
+ case 't': fUnescapeBuffer.push_back('\t'); break;
+ case 'u': {
+ if (p + 4 >= end) {
+ return nullptr;
+ }
+
+ uint32_t hexed;
+ const char hex_str[] = {p[1], p[2], p[3], p[4], '\0'};
+ const auto* eos = SkParse::FindHex(hex_str, &hexed);
+ if (!eos || *eos) {
+ return nullptr;
+ }
+
+ char utf8[SkUTF::kMaxBytesInUTF8Sequence];
+ const auto utf8_len = SkUTF::ToUTF8(SkTo<SkUnichar>(hexed), utf8);
+ fUnescapeBuffer.insert(fUnescapeBuffer.end(), utf8, utf8 + utf8_len);
+ p += 4;
+ } break;
+ default: return nullptr;
+ }
+ }
+
+ return &fUnescapeBuffer;
+ }
+
+ template <typename MatchFunc>
+ const char* matchString(const char* p, const char* p_stop, MatchFunc&& func) {
+ SkASSERT(*p == '"');
+ const auto* s_begin = p + 1;
+ bool requires_unescape = false;
+
+ do {
+ // Consume string chars.
+ // This is the fast path, and hopefully we only hit it once then quick-exit below.
+ for (p = p + 1; !is_eostring(*p); ++p);
+
+ if (*p == '"') {
+ // Valid string found.
+ if (!requires_unescape) {
+ func(s_begin, p - s_begin, p_stop);
+ } else {
+ // Slow unescape. We could avoid this extra copy with some effort,
+ // but in practice escaped strings should be rare.
+ const auto* buf = this->unescapeString(s_begin, p);
+ if (!buf) {
+ break;
+ }
+
+ SkASSERT(!buf->empty());
+ func(buf->data(), buf->size(), buf->data() + buf->size() - 1);
+ }
+ return p + 1;
+ }
+
+ if (*p == '\\') {
+ requires_unescape = true;
+ ++p;
+ continue;
+ }
+
+ // End-of-scope chars are special: we use them to tag the end of the input.
+ // Thus they cannot be consumed indiscriminately -- we need to check if we hit the
+ // end of the input. To that effect, we treat them as string terminators above,
+ // then we catch them here.
+ if (is_eoscope(*p)) {
+ continue;
+ }
+
+ // Invalid/unexpected char.
+ break;
+ } while (p != p_stop);
+
+ // Premature end-of-input, or illegal string char.
+ return this->error(nullptr, s_begin - 1, "invalid string");
+ }
+
+ const char* matchFastFloatDecimalPart(const char* p, int sign, float f, int exp) {
+ SkASSERT(exp <= 0);
+
+ for (;;) {
+ if (!is_digit(*p)) break;
+ f = f * 10.f + (*p++ - '0'); --exp;
+ if (!is_digit(*p)) break;
+ f = f * 10.f + (*p++ - '0'); --exp;
+ }
+
+ const auto decimal_scale = pow10(exp);
+ if (is_numeric(*p) || !decimal_scale) {
+ SkASSERT((*p == '.' || *p == 'e' || *p == 'E') || !decimal_scale);
+ // Malformed input, or an (unsupported) exponent, or a collapsed decimal factor.
+ return nullptr;
+ }
+
+ this->pushFloat(sign * f * decimal_scale);
+
+ return p;
+ }
+
+ const char* matchFastFloatPart(const char* p, int sign, float f) {
+ for (;;) {
+ if (!is_digit(*p)) break;
+ f = f * 10.f + (*p++ - '0');
+ if (!is_digit(*p)) break;
+ f = f * 10.f + (*p++ - '0');
+ }
+
+ if (!is_numeric(*p)) {
+ // Matched (integral) float.
+ this->pushFloat(sign * f);
+ return p;
+ }
+
+ return (*p == '.') ? this->matchFastFloatDecimalPart(p + 1, sign, f, 0)
+ : nullptr;
+ }
+
+ const char* matchFast32OrFloat(const char* p) {
+ int sign = 1;
+ if (*p == '-') {
+ sign = -1;
+ ++p;
+ }
+
+ const auto* digits_start = p;
+
+ int32_t n32 = 0;
+
+ // This is the largest absolute int32 value we can handle before
+ // risking overflow *on the next digit* (214748363).
+ static constexpr int32_t kMaxInt32 = (std::numeric_limits<int32_t>::max() - 9) / 10;
+
+ if (is_digit(*p)) {
+ n32 = (*p++ - '0');
+ for (;;) {
+ if (!is_digit(*p) || n32 > kMaxInt32) break;
+ n32 = n32 * 10 + (*p++ - '0');
+ }
+ }
+
+ if (!is_numeric(*p)) {
+ // Did we actually match any digits?
+ if (p > digits_start) {
+ this->pushInt32(sign * n32);
+ return p;
+ }
+ return nullptr;
+ }
+
+ if (*p == '.') {
+ const auto* decimals_start = ++p;
+
+ int exp = 0;
+
+ for (;;) {
+ if (!is_digit(*p) || n32 > kMaxInt32) break;
+ n32 = n32 * 10 + (*p++ - '0'); --exp;
+ if (!is_digit(*p) || n32 > kMaxInt32) break;
+ n32 = n32 * 10 + (*p++ - '0'); --exp;
+ }
+
+ if (!is_numeric(*p)) {
+ // Did we actually match any digits?
+ if (p > decimals_start) {
+ this->pushFloat(sign * n32 * pow10(exp));
+ return p;
+ }
+ return nullptr;
+ }
+
+ if (n32 > kMaxInt32) {
+ // we ran out on n32 bits
+ return this->matchFastFloatDecimalPart(p, sign, n32, exp);
+ }
+ }
+
+ return this->matchFastFloatPart(p, sign, n32);
+ }
+
+ const char* matchNumber(const char* p) {
+ if (const auto* fast = this->matchFast32OrFloat(p)) return fast;
+
+ // slow fallback
+ char* matched;
+ float f = strtof(p, &matched);
+ if (matched > p) {
+ this->pushFloat(f);
+ return matched;
+ }
+ return this->error(nullptr, p, "invalid numeric token");
+ }
+};
+
+void Write(const Value& v, SkWStream* stream) {
+ switch (v.getType()) {
+ case Value::Type::kNull:
+ stream->writeText("null");
+ break;
+ case Value::Type::kBool:
+ stream->writeText(*v.as<BoolValue>() ? "true" : "false");
+ break;
+ case Value::Type::kNumber:
+ stream->writeScalarAsText(*v.as<NumberValue>());
+ break;
+ case Value::Type::kString:
+ stream->writeText("\"");
+ stream->writeText(v.as<StringValue>().begin());
+ stream->writeText("\"");
+ break;
+ case Value::Type::kArray: {
+ const auto& array = v.as<ArrayValue>();
+ stream->writeText("[");
+ bool first_value = true;
+ for (const auto& entry : array) {
+ if (!first_value) stream->writeText(",");
+ Write(entry, stream);
+ first_value = false;
+ }
+ stream->writeText("]");
+ break;
+ }
+ case Value::Type::kObject:
+ const auto& object = v.as<ObjectValue>();
+ stream->writeText("{");
+ bool first_member = true;
+ for (const auto& member : object) {
+ SkASSERT(member.fKey.getType() == Value::Type::kString);
+ if (!first_member) stream->writeText(",");
+ Write(member.fKey, stream);
+ stream->writeText(":");
+ Write(member.fValue, stream);
+ first_member = false;
+ }
+ stream->writeText("}");
+ break;
+ }
+}
+
+} // namespace
+
+SkString Value::toString() const {
+ SkDynamicMemoryWStream wstream;
+ Write(*this, &wstream);
+ const auto data = wstream.detachAsData();
+ // TODO: is there a better way to pass data around without copying?
+ return SkString(static_cast<const char*>(data->data()), data->size());
+}
+
+static constexpr size_t kMinChunkSize = 4096;
+
+DOM::DOM(const char* data, size_t size)
+ : fAlloc(kMinChunkSize) {
+ DOMParser parser(fAlloc);
+
+ fRoot = parser.parse(data, size);
+}
+
+void DOM::write(SkWStream* stream) const {
+ Write(fRoot, stream);
+}
+
+} // namespace skjson
diff --git a/gfx/skia/skia/src/utils/SkJSON.h b/gfx/skia/skia/src/utils/SkJSON.h
new file mode 100644
index 0000000000..96161baf8d
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkJSON.h
@@ -0,0 +1,372 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkJSON_DEFINED
+#define SkJSON_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkNoncopyable.h"
+#include "src/base/SkArenaAlloc.h"
+
+#include <cstdint>
+#include <cstring>
+#include <string_view>
+
+class SkString;
+class SkWStream;
+
+namespace skjson {
+
+/**
+ * A fast and likely non-conforming JSON parser.
+ *
+ * Some known limitations/compromises:
+ *
+ * -- single-precision FP numbers
+ *
+ * -- missing string unescaping (no current users, could be easily added)
+ *
+ *
+ * Values are opaque, fixed-size (64 bits), immutable records.
+ *
+ * They can be converted to facade types for type-specific functionality.
+ *
+ * E.g.:
+ *
+ * if (v.is<ArrayValue>()) {
+ * for (const auto& item : v.as<ArrayValue>()) {
+ * if (const NumberValue* n = item) {
+ * printf("Found number: %f", **n);
+ * }
+ * }
+ * }
+ *
+ * if (v.is<ObjectValue>()) {
+ * const StringValue* id = v.as<ObjectValue>()["id"];
+ * if (id) {
+ * printf("Found object ID: %s", id->begin());
+ * } else {
+ * printf("Missing object ID");
+ * }
+ * }
+ */
+class alignas(8) Value {
+public:
+ enum class Type {
+ kNull,
+ kBool,
+ kNumber,
+ kString,
+ kArray,
+ kObject,
+ };
+
+ /**
+ * @return The type of this value.
+ */
+ Type getType() const;
+
+ /**
+ * @return True if the record matches the facade type T.
+ */
+ template <typename T>
+ bool is() const { return this->getType() == T::kType; }
+
+ /**
+ * Unguarded conversion to facade types.
+ *
+ * @return The record cast as facade type T&.
+ */
+ template <typename T>
+ const T& as() const {
+ SkASSERT(this->is<T>());
+ return *reinterpret_cast<const T*>(this);
+ }
+
+ /**
+ * Guarded conversion to facade types.
+ *
+ * @return The record cast as facade type T*.
+ */
+ template <typename T>
+ operator const T*() const {
+ return this->is<T>() ? &this->as<T>() : nullptr;
+ }
+
+ /**
+ * @return The string representation of this value.
+ */
+ SkString toString() const;
+
+protected:
+ /*
+ Value implementation notes:
+
+ -- fixed 64-bit size
+
+ -- 8-byte aligned
+
+ -- union of:
+
+ bool
+ int32
+ float
+ char[8] (short string storage)
+ external payload (tagged) pointer
+
+ -- lowest 3 bits reserved for tag storage
+
+ */
+ enum class Tag : uint8_t {
+ // n.b.: we picked kShortString == 0 on purpose,
+ // to enable certain short-string optimizations.
+ kShortString = 0b00000000, // inline payload
+ kNull = 0b00000001, // no payload
+ kBool = 0b00000010, // inline payload
+ kInt = 0b00000011, // inline payload
+ kFloat = 0b00000100, // inline payload
+ kString = 0b00000101, // ptr to external storage
+ kArray = 0b00000110, // ptr to external storage
+ kObject = 0b00000111, // ptr to external storage
+ };
+ inline static constexpr uint8_t kTagMask = 0b00000111;
+
+ void init_tagged(Tag);
+ void init_tagged_pointer(Tag, void*);
+
+ Tag getTag() const {
+ return static_cast<Tag>(fData8[0] & kTagMask);
+ }
+
+ // Access the record payload as T.
+ //
+ // Since the tag is stored in the lower bits, we skip the first word whenever feasible.
+ //
+ // E.g. (U == unused)
+ //
+ // uint8_t
+ // -----------------------------------------------------------------------
+ // |TAG| U | val8 | U | U | U | U | U | U |
+ // -----------------------------------------------------------------------
+ //
+ // uint16_t
+ // -----------------------------------------------------------------------
+ // |TAG| U | val16 | U | U |
+ // -----------------------------------------------------------------------
+ //
+ // uint32_t
+ // -----------------------------------------------------------------------
+ // |TAG| U | val32 |
+ // -----------------------------------------------------------------------
+ //
+ // T* (32b)
+ // -----------------------------------------------------------------------
+ // |TAG| U | T* (32bits) |
+ // -----------------------------------------------------------------------
+ //
+ // T* (64b)
+ // -----------------------------------------------------------------------
+ // |TAG| T* (61bits) |
+ // -----------------------------------------------------------------------
+ //
+ template <typename T>
+ const T* cast() const {
+ static_assert(sizeof (T) <= sizeof(Value), "");
+ static_assert(alignof(T) <= alignof(Value), "");
+
+ return (sizeof(T) > sizeof(*this) / 2)
+ ? reinterpret_cast<const T*>(this) + 0 // need all the bits
+ : reinterpret_cast<const T*>(this) + 1; // skip the first word (where the tag lives)
+ }
+
+ template <typename T>
+ T* cast() { return const_cast<T*>(const_cast<const Value*>(this)->cast<T>()); }
+
+ // Access the pointer payload.
+ template <typename T>
+ const T* ptr() const {
+ static_assert(sizeof(uintptr_t) == sizeof(Value) ||
+ sizeof(uintptr_t) * 2 == sizeof(Value), "");
+
+ return (sizeof(uintptr_t) < sizeof(Value))
+ // For 32-bit, pointers are stored unmodified.
+ ? *this->cast<const T*>()
+ // For 64-bit, we use the lower bits of the pointer as tag storage.
+ : reinterpret_cast<T*>(*this->cast<uintptr_t>() & ~static_cast<uintptr_t>(kTagMask));
+ }
+
+private:
+ inline static constexpr size_t kValueSize = 8;
+
+ uint8_t fData8[kValueSize];
+
+#if !defined(SK_CPU_LENDIAN)
+ // The current value layout assumes LE and will take some tweaking for BE.
+ static_assert(false, "Big-endian builds are not supported at this time.");
+#endif
+};
+
+class NullValue final : public Value {
+public:
+ inline static constexpr Type kType = Type::kNull;
+
+ NullValue();
+};
+
+class BoolValue final : public Value {
+public:
+ inline static constexpr Type kType = Type::kBool;
+
+ explicit BoolValue(bool);
+
+ bool operator *() const {
+ SkASSERT(this->getTag() == Tag::kBool);
+ return *this->cast<bool>();
+ }
+};
+
+class NumberValue final : public Value {
+public:
+ inline static constexpr Type kType = Type::kNumber;
+
+ explicit NumberValue(int32_t);
+ explicit NumberValue(float);
+
+ double operator *() const {
+ SkASSERT(this->getTag() == Tag::kInt ||
+ this->getTag() == Tag::kFloat);
+
+ return this->getTag() == Tag::kInt
+ ? static_cast<double>(*this->cast<int32_t>())
+ : static_cast<double>(*this->cast<float>());
+ }
+};
+
+template <typename T, Value::Type vtype>
+class VectorValue : public Value {
+public:
+ using ValueT = T;
+ inline static constexpr Type kType = vtype;
+
+ size_t size() const {
+ SkASSERT(this->getType() == kType);
+ return *this->ptr<size_t>();
+ }
+
+ const T* begin() const {
+ SkASSERT(this->getType() == kType);
+ const auto* size_ptr = this->ptr<size_t>();
+ return reinterpret_cast<const T*>(size_ptr + 1);
+ }
+
+ const T* end() const {
+ SkASSERT(this->getType() == kType);
+ const auto* size_ptr = this->ptr<size_t>();
+ return reinterpret_cast<const T*>(size_ptr + 1) + *size_ptr;
+ }
+
+ const T& operator[](size_t i) const {
+ SkASSERT(this->getType() == kType);
+ SkASSERT(i < this->size());
+
+ return *(this->begin() + i);
+ }
+};
+
+class ArrayValue final : public VectorValue<Value, Value::Type::kArray> {
+public:
+ ArrayValue(const Value* src, size_t size, SkArenaAlloc& alloc);
+};
+
+class StringValue final : public Value {
+public:
+ inline static constexpr Type kType = Type::kString;
+
+ StringValue();
+ StringValue(const char* src, size_t size, SkArenaAlloc& alloc);
+
+ size_t size() const {
+ switch (this->getTag()) {
+ case Tag::kShortString:
+ // We don't bother storing a length for short strings on the assumption
+ // that strlen is fast in this case. If this becomes problematic, we
+ // can either go back to storing (7-len) in the tag byte or write a fast
+ // short_strlen.
+ return strlen(this->cast<char>());
+ case Tag::kString:
+ return this->cast<VectorValue<char, Value::Type::kString>>()->size();
+ default:
+ return 0;
+ }
+ }
+
+ const char* begin() const {
+ return this->getTag() == Tag::kShortString
+ ? this->cast<char>()
+ : this->cast<VectorValue<char, Value::Type::kString>>()->begin();
+ }
+
+ const char* end() const {
+ return this->getTag() == Tag::kShortString
+ ? strchr(this->cast<char>(), '\0')
+ : this->cast<VectorValue<char, Value::Type::kString>>()->end();
+ }
+
+ std::string_view str() const {
+ return std::string_view(this->begin(), this->size());
+ }
+};
+
+struct Member {
+ StringValue fKey;
+ Value fValue;
+};
+
+class ObjectValue final : public VectorValue<Member, Value::Type::kObject> {
+public:
+ ObjectValue(const Member* src, size_t size, SkArenaAlloc& alloc);
+
+ const Value& operator[](const char*) const;
+
+ const Member& operator[](size_t i) const {
+ return this->VectorValue::operator[](i);
+ }
+};
+
+class DOM final : public SkNoncopyable {
+public:
+ DOM(const char*, size_t);
+
+ const Value& root() const { return fRoot; }
+
+ void write(SkWStream*) const;
+
+private:
+ SkArenaAlloc fAlloc;
+ Value fRoot;
+};
+
+inline Value::Type Value::getType() const {
+ switch (this->getTag()) {
+ case Tag::kNull: return Type::kNull;
+ case Tag::kBool: return Type::kBool;
+ case Tag::kInt: return Type::kNumber;
+ case Tag::kFloat: return Type::kNumber;
+ case Tag::kShortString: return Type::kString;
+ case Tag::kString: return Type::kString;
+ case Tag::kArray: return Type::kArray;
+ case Tag::kObject: return Type::kObject;
+ }
+
+ SkASSERT(false); // unreachable
+ return Type::kNull;
+}
+
+} // namespace skjson
+
+#endif // SkJSON_DEFINED
+
diff --git a/gfx/skia/skia/src/utils/SkJSONWriter.cpp b/gfx/skia/skia/src/utils/SkJSONWriter.cpp
new file mode 100644
index 0000000000..3038f2e7cf
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkJSONWriter.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Make sure that the PRI format string macros are defined
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include "src/utils/SkJSONWriter.h"
+
+#include <inttypes.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+void SkJSONWriter::appendS64(int64_t value) {
+ this->beginValue();
+ this->appendf("%" PRId64, value);
+}
+
+void SkJSONWriter::appendU64(uint64_t value) {
+ this->beginValue();
+ this->appendf("%" PRIu64, value);
+}
+
+void SkJSONWriter::appendHexU64(uint64_t value) {
+ this->beginValue();
+ this->appendf("\"0x%" PRIx64 "\"", value);
+}
+
+void SkJSONWriter::appendf(const char* fmt, ...) {
+ const int kBufferSize = 1024;
+ char buffer[kBufferSize];
+ va_list argp;
+ va_start(argp, fmt);
+#ifdef SK_BUILD_FOR_WIN
+ int length = _vsnprintf_s(buffer, kBufferSize, _TRUNCATE, fmt, argp);
+#else
+ int length = vsnprintf(buffer, kBufferSize, fmt, argp);
+#endif
+ SkASSERT(length >= 0 && length < kBufferSize);
+ va_end(argp);
+ this->write(buffer, length);
+}
diff --git a/gfx/skia/skia/src/utils/SkJSONWriter.h b/gfx/skia/skia/src/utils/SkJSONWriter.h
new file mode 100644
index 0000000000..a12b977cfb
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkJSONWriter.h
@@ -0,0 +1,419 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkJSONWriter_DEFINED
+#define SkJSONWriter_DEFINED
+
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkNoncopyable.h"
+#include "include/private/base/SkTArray.h"
+#include "src/base/SkUTF.h"
+
+#include <cstring>
+#include <cstdint>
+#include <string>
+#include <type_traits>
+
+/**
+ * Lightweight class for writing properly structured JSON data. No random-access, everything must
+ * be generated in-order. The resulting JSON is written directly to the SkWStream supplied at
+ * construction time. Output is buffered, so writing to disk (via an SkFILEWStream) is ideal.
+ *
+ * There is a basic state machine to ensure that JSON is structured correctly, and to allow for
+ * (optional) pretty formatting.
+ *
+ * This class adheres to the RFC-4627 usage of JSON (not ECMA-404). In other words, all JSON
+ * created with this class must have a top-level object or array. Free-floating values of other
+ * types are not considered valid.
+ *
+ * Note that all error checking is in the form of asserts - invalid usage in a non-debug build
+ * will simply produce invalid JSON.
+ */
+class SkJSONWriter : SkNoncopyable {
+public:
+ enum class Mode {
+ /**
+ * Output the minimal amount of text. No additional whitespace (including newlines) is
+ * generated. The resulting JSON is suitable for fast parsing and machine consumption.
+ */
+ kFast,
+
+ /**
+ * Output human-readable JSON, with indented objects and arrays, and one value per line.
+ * Slightly slower than kFast, and produces data that is somewhat larger.
+ */
+ kPretty
+ };
+
+ /**
+ * Construct a JSON writer that will serialize all the generated JSON to 'stream'.
+ */
+ SkJSONWriter(SkWStream* stream, Mode mode = Mode::kFast)
+ : fBlock(new char[kBlockSize])
+ , fWrite(fBlock)
+ , fBlockEnd(fBlock + kBlockSize)
+ , fStream(stream)
+ , fMode(mode)
+ , fState(State::kStart) {
+ fScopeStack.push_back(Scope::kNone);
+ fNewlineStack.push_back(true);
+ }
+
+ ~SkJSONWriter() {
+ this->flush();
+ delete[] fBlock;
+ SkASSERT(fScopeStack.size() == 1);
+ SkASSERT(fNewlineStack.size() == 1);
+ }
+
+ /**
+ * Force all buffered output to be flushed to the underlying stream.
+ */
+ void flush() {
+ if (fWrite != fBlock) {
+ fStream->write(fBlock, fWrite - fBlock);
+ fWrite = fBlock;
+ }
+ }
+
+ /**
+ * Append the name (key) portion of an object member. Must be called between beginObject() and
+ * endObject(). If you have both the name and value of an object member, you can simply call
+ * the two argument versions of the other append functions.
+ */
+ void appendName(const char* name) {
+ if (!name) {
+ return;
+ }
+ SkASSERT(Scope::kObject == this->scope());
+ SkASSERT(State::kObjectBegin == fState || State::kObjectValue == fState);
+ if (State::kObjectValue == fState) {
+ this->write(",", 1);
+ }
+ this->separator(this->multiline());
+ this->write("\"", 1);
+ this->write(name, strlen(name));
+ this->write("\":", 2);
+ fState = State::kObjectName;
+ }
+
+ /**
+ * Adds a new object. A name must be supplied when called between beginObject() and
+ * endObject(). Calls to beginObject() must be balanced by corresponding calls to endObject().
+ * By default, objects are written out with one named value per line (when in kPretty mode).
+ * This can be overridden for a particular object by passing false for multiline, this will
+ * keep the entire object on a single line. This can help with readability in some situations.
+ * In kFast mode, this parameter is ignored.
+ */
+ void beginObject(const char* name = nullptr, bool multiline = true) {
+ this->appendName(name);
+ this->beginValue(true);
+ this->write("{", 1);
+ fScopeStack.push_back(Scope::kObject);
+ fNewlineStack.push_back(multiline);
+ fState = State::kObjectBegin;
+ }
+
+ /**
+ * Ends an object that was previously started with beginObject().
+ */
+ void endObject() {
+ SkASSERT(Scope::kObject == this->scope());
+ SkASSERT(State::kObjectBegin == fState || State::kObjectValue == fState);
+ bool emptyObject = State::kObjectBegin == fState;
+ bool wasMultiline = this->multiline();
+ this->popScope();
+ if (!emptyObject) {
+ this->separator(wasMultiline);
+ }
+ this->write("}", 1);
+ }
+
+ /**
+ * Adds a new array. A name must be supplied when called between beginObject() and
+ * endObject(). Calls to beginArray() must be balanced by corresponding calls to endArray().
+ * By default, arrays are written out with one value per line (when in kPretty mode).
+ * This can be overridden for a particular array by passing false for multiline, this will
+ * keep the entire array on a single line. This can help with readability in some situations.
+ * In kFast mode, this parameter is ignored.
+ */
+ void beginArray(const char* name = nullptr, bool multiline = true) {
+ this->appendName(name);
+ this->beginValue(true);
+ this->write("[", 1);
+ fScopeStack.push_back(Scope::kArray);
+ fNewlineStack.push_back(multiline);
+ fState = State::kArrayBegin;
+ }
+
+ /**
+ * Ends an array that was previous started with beginArray().
+ */
+ void endArray() {
+ SkASSERT(Scope::kArray == this->scope());
+ SkASSERT(State::kArrayBegin == fState || State::kArrayValue == fState);
+ bool emptyArray = State::kArrayBegin == fState;
+ bool wasMultiline = this->multiline();
+ this->popScope();
+ if (!emptyArray) {
+ this->separator(wasMultiline);
+ }
+ this->write("]", 1);
+ }
+
+ /**
+ * Functions for adding values of various types. The single argument versions add un-named
+ * values, so must be called either
+ * - Between beginArray() and endArray() -or-
+ * - Between beginObject() and endObject(), after calling appendName()
+ */
+ void appendString(const char* value, size_t size) {
+ this->beginValue();
+ this->write("\"", 1);
+ if (value) {
+ char const * const end = value + size;
+ while (value < end) {
+ char const * next = value;
+ SkUnichar u = SkUTF::NextUTF8(&next, end);
+ switch (u) {
+ case '"': this->write("\\\"", 2); break;
+ case '\\': this->write("\\\\", 2); break;
+ case '\b': this->write("\\b", 2); break;
+ case '\f': this->write("\\f", 2); break;
+ case '\n': this->write("\\n", 2); break;
+ case '\r': this->write("\\r", 2); break;
+ case '\t': this->write("\\t", 2); break;
+ default: {
+ if (u < 0) {
+ next = value + 1;
+ SkString s("\\u");
+ s.appendHex((unsigned char)*value, 4);
+ this->write(s.c_str(), s.size());
+ } else if (u < 0x20) {
+ SkString s("\\u");
+ s.appendHex(u, 4);
+ this->write(s.c_str(), s.size());
+ } else {
+ this->write(value, next - value);
+ }
+ } break;
+ }
+ value = next;
+ }
+ }
+ this->write("\"", 1);
+ }
+ void appendString(const SkString& value) {
+ this->appendString(value.c_str(), value.size());
+ }
+ // Avoid the non-explicit converting constructor from char*
+ template <class T, std::enable_if_t<std::is_same_v<T,std::string>,bool> = false>
+ void appendString(const T& value) {
+ this->appendString(value.data(), value.size());
+ }
+ template <size_t N> inline void appendNString(char const (&value)[N]) {
+ static_assert(N > 0);
+ this->appendString(value, N-1);
+ }
+ void appendCString(const char* value) {
+ this->appendString(value, value ? strlen(value) : 0);
+ }
+
+ void appendPointer(const void* value) { this->beginValue(); this->appendf("\"%p\"", value); }
+ void appendBool(bool value) {
+ this->beginValue();
+ if (value) {
+ this->write("true", 4);
+ } else {
+ this->write("false", 5);
+ }
+ }
+ void appendS32(int32_t value) { this->beginValue(); this->appendf("%d", value); }
+ void appendS64(int64_t value);
+ void appendU32(uint32_t value) { this->beginValue(); this->appendf("%u", value); }
+ void appendU64(uint64_t value);
+ void appendFloat(float value) { this->beginValue(); this->appendf("%g", value); }
+ void appendDouble(double value) { this->beginValue(); this->appendf("%g", value); }
+ void appendFloatDigits(float value, int digits) {
+ this->beginValue();
+ this->appendf("%.*g", digits, value);
+ }
+ void appendDoubleDigits(double value, int digits) {
+ this->beginValue();
+ this->appendf("%.*g", digits, value);
+ }
+ void appendHexU32(uint32_t value) { this->beginValue(); this->appendf("\"0x%x\"", value); }
+ void appendHexU64(uint64_t value);
+
+ void appendString(const char* name, const char* value, size_t size) {
+ this->appendName(name);
+ this->appendString(value, size);
+ }
+ void appendString(const char* name, const SkString& value) {
+ this->appendName(name);
+ this->appendString(value.c_str(), value.size());
+ }
+ // Avoid the non-explicit converting constructor from char*
+ template <class T, std::enable_if_t<std::is_same_v<T,std::string>,bool> = false>
+ void appendString(const char* name, const T& value) {
+ this->appendName(name);
+ this->appendString(value.data(), value.size());
+ }
+ template <size_t N> inline void appendNString(const char* name, char const (&value)[N]) {
+ static_assert(N > 0);
+ this->appendName(name);
+ this->appendString(value, N-1);
+ }
+ void appendCString(const char* name, const char* value) {
+ this->appendName(name);
+ this->appendString(value, value ? strlen(value) : 0);
+ }
+#define DEFINE_NAMED_APPEND(function, type) \
+ void function(const char* name, type value) { this->appendName(name); this->function(value); }
+
+ /**
+ * Functions for adding named values of various types. These add a name field, so must be
+ * called between beginObject() and endObject().
+ */
+ DEFINE_NAMED_APPEND(appendPointer, const void *)
+ DEFINE_NAMED_APPEND(appendBool, bool)
+ DEFINE_NAMED_APPEND(appendS32, int32_t)
+ DEFINE_NAMED_APPEND(appendS64, int64_t)
+ DEFINE_NAMED_APPEND(appendU32, uint32_t)
+ DEFINE_NAMED_APPEND(appendU64, uint64_t)
+ DEFINE_NAMED_APPEND(appendFloat, float)
+ DEFINE_NAMED_APPEND(appendDouble, double)
+ DEFINE_NAMED_APPEND(appendHexU32, uint32_t)
+ DEFINE_NAMED_APPEND(appendHexU64, uint64_t)
+
+#undef DEFINE_NAMED_APPEND
+
+ void appendFloatDigits(const char* name, float value, int digits) {
+ this->appendName(name);
+ this->appendFloatDigits(value, digits);
+ }
+ void appendDoubleDigits(const char* name, double value, int digits) {
+ this->appendName(name);
+ this->appendDoubleDigits(value, digits);
+ }
+
+private:
+ enum {
+ // Using a 32k scratch block gives big performance wins, but we diminishing returns going
+ // any larger. Even with a 1MB block, time to write a large (~300 MB) JSON file only drops
+ // another ~10%.
+ kBlockSize = 32 * 1024,
+ };
+
+ enum class Scope {
+ kNone,
+ kObject,
+ kArray
+ };
+
+ enum class State {
+ kStart,
+ kEnd,
+ kObjectBegin,
+ kObjectName,
+ kObjectValue,
+ kArrayBegin,
+ kArrayValue,
+ };
+
+ void appendf(const char* fmt, ...) SK_PRINTF_LIKE(2, 3);
+
+ void beginValue(bool structure = false) {
+ SkASSERT(State::kObjectName == fState ||
+ State::kArrayBegin == fState ||
+ State::kArrayValue == fState ||
+ (structure && State::kStart == fState));
+ if (State::kArrayValue == fState) {
+ this->write(",", 1);
+ }
+ if (Scope::kArray == this->scope()) {
+ this->separator(this->multiline());
+ } else if (Scope::kObject == this->scope() && Mode::kPretty == fMode) {
+ this->write(" ", 1);
+ }
+ // We haven't added the value yet, but all (non-structure) callers emit something
+ // immediately, so transition state, to simplify the calling code.
+ if (!structure) {
+ fState = Scope::kArray == this->scope() ? State::kArrayValue : State::kObjectValue;
+ }
+ }
+
+ void separator(bool multiline) {
+ if (Mode::kPretty == fMode) {
+ if (multiline) {
+ this->write("\n", 1);
+ for (int i = 0; i < fScopeStack.size() - 1; ++i) {
+ this->write(" ", 3);
+ }
+ } else {
+ this->write(" ", 1);
+ }
+ }
+ }
+
+ void write(const char* buf, size_t length) {
+ if (static_cast<size_t>(fBlockEnd - fWrite) < length) {
+ // Don't worry about splitting writes that overflow our block.
+ this->flush();
+ }
+ if (length > kBlockSize) {
+ // Send particularly large writes straight through to the stream (unbuffered).
+ fStream->write(buf, length);
+ } else {
+ memcpy(fWrite, buf, length);
+ fWrite += length;
+ }
+ }
+
+ Scope scope() const {
+ SkASSERT(!fScopeStack.empty());
+ return fScopeStack.back();
+ }
+
+ bool multiline() const {
+ SkASSERT(!fNewlineStack.empty());
+ return fNewlineStack.back();
+ }
+
+ void popScope() {
+ fScopeStack.pop_back();
+ fNewlineStack.pop_back();
+ switch (this->scope()) {
+ case Scope::kNone:
+ fState = State::kEnd;
+ break;
+ case Scope::kObject:
+ fState = State::kObjectValue;
+ break;
+ case Scope::kArray:
+ fState = State::kArrayValue;
+ break;
+ default:
+ SkDEBUGFAIL("Invalid scope");
+ break;
+ }
+ }
+
+ char* fBlock;
+ char* fWrite;
+ char* fBlockEnd;
+
+ SkWStream* fStream;
+ Mode fMode;
+ State fState;
+ SkSTArray<16, Scope, true> fScopeStack;
+ SkSTArray<16, bool, true> fNewlineStack;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkMatrix22.cpp b/gfx/skia/skia/src/utils/SkMatrix22.cpp
new file mode 100644
index 0000000000..25f3d60358
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkMatrix22.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/utils/SkMatrix22.h"
+
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+
+void SkComputeGivensRotation(const SkVector& h, SkMatrix* G) {
+ const SkScalar& a = h.fX;
+ const SkScalar& b = h.fY;
+ SkScalar c, s;
+ if (0 == b) {
+ c = SkScalarCopySign(SK_Scalar1, a);
+ s = 0;
+ //r = SkScalarAbs(a);
+ } else if (0 == a) {
+ c = 0;
+ s = -SkScalarCopySign(SK_Scalar1, b);
+ //r = SkScalarAbs(b);
+ } else if (SkScalarAbs(b) > SkScalarAbs(a)) {
+ SkScalar t = a / b;
+ SkScalar u = SkScalarCopySign(SkScalarSqrt(SK_Scalar1 + t*t), b);
+ s = -SK_Scalar1 / u;
+ c = -s * t;
+ //r = b * u;
+ } else {
+ SkScalar t = b / a;
+ SkScalar u = SkScalarCopySign(SkScalarSqrt(SK_Scalar1 + t*t), a);
+ c = SK_Scalar1 / u;
+ s = -c * t;
+ //r = a * u;
+ }
+
+ G->setSinCos(s, c);
+}
diff --git a/gfx/skia/skia/src/utils/SkMatrix22.h b/gfx/skia/skia/src/utils/SkMatrix22.h
new file mode 100644
index 0000000000..c8bcd5f6bd
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkMatrix22.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMatrix22_DEFINED
+#define SkMatrix22_DEFINED
+
+#include "include/core/SkPoint.h"
+
+class SkMatrix;
+
+/** Find the Givens matrix G, which is the rotational matrix
+ * that rotates the vector h to the positive hoizontal axis.
+ * G * h = [hypot(h), 0]
+ *
+ * This is equivalent to
+ *
+ * SkScalar r = h.length();
+ * SkScalar r_inv = r ? SkScalarInvert(r) : 0;
+ * h.scale(r_inv);
+ * G->setSinCos(-h.fY, h.fX);
+ *
+ * but has better numerical stability by using (partial) hypot,
+ * and saves a multiply by not computing r.
+ */
+void SkComputeGivensRotation(const SkVector& h, SkMatrix* G);
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkMultiPictureDocument.cpp b/gfx/skia/skia/src/utils/SkMultiPictureDocument.cpp
new file mode 100644
index 0000000000..39fc8ab90e
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkMultiPictureDocument.cpp
@@ -0,0 +1,224 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/utils/SkMultiPictureDocument.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkData.h"
+#include "include/core/SkDocument.h"
+#include "include/core/SkPicture.h"
+#include "include/core/SkPictureRecorder.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSerialProcs.h"
+#include "include/core/SkStream.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTo.h"
+#include "include/utils/SkNWayCanvas.h"
+#include "src/utils/SkMultiPictureDocumentPriv.h"
+
+#include <algorithm>
+#include <climits>
+#include <cstdint>
+#include <cstring>
+#include <functional>
+
+/*
+ File format:
+ BEGINNING_OF_FILE:
+ kMagic
+ uint32_t version_number (==2)
+ uint32_t page_count
+ {
+ float sizeX
+ float sizeY
+ } * page_count
+ skp file
+*/
+
+namespace {
+// The unique file signature for this file type.
+static constexpr char kMagic[] = "Skia Multi-Picture Doc\n\n";
+
+static constexpr char kEndPage[] = "SkMultiPictureEndPage";
+
+const uint32_t kVersion = 2;
+
+static SkSize join(const SkTArray<SkSize>& sizes) {
+ SkSize joined = {0, 0};
+ for (SkSize s : sizes) {
+ joined = SkSize{std::max(joined.width(), s.width()), std::max(joined.height(), s.height())};
+ }
+ return joined;
+}
+
+struct MultiPictureDocument final : public SkDocument {
+ const SkSerialProcs fProcs;
+ SkPictureRecorder fPictureRecorder;
+ SkSize fCurrentPageSize;
+ SkTArray<sk_sp<SkPicture>> fPages;
+ SkTArray<SkSize> fSizes;
+ std::function<void(const SkPicture*)> fOnEndPage;
+ MultiPictureDocument(SkWStream* s, const SkSerialProcs* procs,
+ std::function<void(const SkPicture*)> onEndPage)
+ : SkDocument(s)
+ , fProcs(procs ? *procs : SkSerialProcs())
+ , fOnEndPage(onEndPage)
+ {}
+ ~MultiPictureDocument() override { this->close(); }
+
+ SkCanvas* onBeginPage(SkScalar w, SkScalar h) override {
+ fCurrentPageSize.set(w, h);
+ return fPictureRecorder.beginRecording(w, h);
+ }
+ void onEndPage() override {
+ fSizes.push_back(fCurrentPageSize);
+ sk_sp<SkPicture> lastPage = fPictureRecorder.finishRecordingAsPicture();
+ fPages.push_back(lastPage);
+ if (fOnEndPage) {
+ fOnEndPage(lastPage.get());
+ }
+ }
+ void onClose(SkWStream* wStream) override {
+ SkASSERT(wStream);
+ SkASSERT(wStream->bytesWritten() == 0);
+ wStream->writeText(kMagic);
+ wStream->write32(kVersion);
+ wStream->write32(SkToU32(fPages.size()));
+ for (SkSize s : fSizes) {
+ wStream->write(&s, sizeof(s));
+ }
+ SkSize bigsize = join(fSizes);
+ SkCanvas* c = fPictureRecorder.beginRecording(SkRect::MakeSize(bigsize));
+ for (const sk_sp<SkPicture>& page : fPages) {
+ c->drawPicture(page);
+ // Annotations must include some data.
+ c->drawAnnotation(SkRect::MakeEmpty(), kEndPage, SkData::MakeWithCString("X"));
+ }
+ sk_sp<SkPicture> p = fPictureRecorder.finishRecordingAsPicture();
+ p->serialize(wStream, &fProcs);
+ fPages.clear();
+ fSizes.clear();
+ return;
+ }
+ void onAbort() override {
+ fPages.clear();
+ fSizes.clear();
+ }
+};
+} // namespace
+
+sk_sp<SkDocument> SkMakeMultiPictureDocument(SkWStream* wStream, const SkSerialProcs* procs,
+ std::function<void(const SkPicture*)> onEndPage) {
+ return sk_make_sp<MultiPictureDocument>(wStream, procs, onEndPage);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+int SkMultiPictureDocumentReadPageCount(SkStreamSeekable* stream) {
+ if (!stream) {
+ return 0;
+ }
+ stream->seek(0);
+ const size_t size = sizeof(kMagic) - 1;
+ char buffer[size];
+ if (size != stream->read(buffer, size) || 0 != memcmp(kMagic, buffer, size)) {
+ stream = nullptr;
+ return 0;
+ }
+ uint32_t versionNumber;
+ if (!stream->readU32(&versionNumber) || versionNumber != kVersion) {
+ return 0;
+ }
+ uint32_t pageCount;
+ if (!stream->readU32(&pageCount) || pageCount > INT_MAX) {
+ return 0;
+ }
+ // leave stream position right here.
+ return SkTo<int>(pageCount);
+}
+
+bool SkMultiPictureDocumentReadPageSizes(SkStreamSeekable* stream,
+ SkDocumentPage* dstArray,
+ int dstArrayCount) {
+ if (!dstArray || dstArrayCount < 1) {
+ return false;
+ }
+ int pageCount = SkMultiPictureDocumentReadPageCount(stream);
+ if (pageCount < 1 || pageCount != dstArrayCount) {
+ return false;
+ }
+ for (int i = 0; i < pageCount; ++i) {
+ SkSize& s = dstArray[i].fSize;
+ if (sizeof(s) != stream->read(&s, sizeof(s))) {
+ return false;
+ }
+ }
+ // leave stream position right here.
+ return true;
+}
+
+namespace {
+struct PagerCanvas : public SkNWayCanvas {
+ SkPictureRecorder fRecorder;
+ SkDocumentPage* fDst;
+ int fCount;
+ int fIndex = 0;
+ PagerCanvas(SkISize wh, SkDocumentPage* dst, int count)
+ : SkNWayCanvas(wh.width(), wh.height()), fDst(dst), fCount(count) {
+ this->nextCanvas();
+ }
+ void nextCanvas() {
+ if (fIndex < fCount) {
+ SkRect bounds = SkRect::MakeSize(fDst[fIndex].fSize);
+ this->addCanvas(fRecorder.beginRecording(bounds));
+ }
+ }
+ void onDrawAnnotation(const SkRect& r, const char* key, SkData* d) override {
+ if (0 == strcmp(key, kEndPage)) {
+ this->removeAll();
+ if (fIndex < fCount) {
+ fDst[fIndex].fPicture = fRecorder.finishRecordingAsPicture();
+ ++fIndex;
+ }
+ this->nextCanvas();
+ } else {
+ this->SkNWayCanvas::onDrawAnnotation(r, key, d);
+ }
+ }
+};
+} // namespace
+
+bool SkMultiPictureDocumentRead(SkStreamSeekable* stream,
+ SkDocumentPage* dstArray,
+ int dstArrayCount,
+ const SkDeserialProcs* procs) {
+ if (!SkMultiPictureDocumentReadPageSizes(stream, dstArray, dstArrayCount)) {
+ return false;
+ }
+ SkSize joined = {0.0f, 0.0f};
+ for (int i = 0; i < dstArrayCount; ++i) {
+ joined = SkSize{std::max(joined.width(), dstArray[i].fSize.width()),
+ std::max(joined.height(), dstArray[i].fSize.height())};
+ }
+
+ auto picture = SkPicture::MakeFromStream(stream, procs);
+ if (!picture) {
+ return false;
+ }
+
+ PagerCanvas canvas(joined.toCeil(), dstArray, dstArrayCount);
+ // Must call playback(), not drawPicture() to reach
+ // PagerCanvas::onDrawAnnotation().
+ picture->playback(&canvas);
+ if (canvas.fIndex != dstArrayCount) {
+ SkDEBUGF("Malformed SkMultiPictureDocument: canvas.fIndex=%d dstArrayCount=%d\n",
+ canvas.fIndex, dstArrayCount);
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/utils/SkMultiPictureDocument.h b/gfx/skia/skia/src/utils/SkMultiPictureDocument.h
new file mode 100644
index 0000000000..7076a2fe5b
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkMultiPictureDocument.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMultiPictureDocument_DEFINED
+#define SkMultiPictureDocument_DEFINED
+
+#include "include/core/SkPicture.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkTypes.h"
+
+#include <functional>
+
+class SkDocument;
+class SkStreamSeekable;
+class SkWStream;
+struct SkDeserialProcs;
+struct SkSerialProcs;
+
+/**
+ * Writes into a file format that is similar to SkPicture::serialize()
+ * Accepts a callback for endPage behavior
+ */
+SK_SPI sk_sp<SkDocument> SkMakeMultiPictureDocument(SkWStream* dst, const SkSerialProcs* = nullptr,
+ std::function<void(const SkPicture*)> onEndPage = nullptr);
+
+struct SkDocumentPage {
+ sk_sp<SkPicture> fPicture;
+ SkSize fSize;
+};
+
+/**
+ * Returns the number of pages in the SkMultiPictureDocument.
+ */
+SK_SPI int SkMultiPictureDocumentReadPageCount(SkStreamSeekable* src);
+
+/**
+ * Read the SkMultiPictureDocument into the provided array of pages.
+ * dstArrayCount must equal SkMultiPictureDocumentReadPageCount().
+ * Return false on error.
+ */
+SK_SPI bool SkMultiPictureDocumentRead(SkStreamSeekable* src,
+ SkDocumentPage* dstArray,
+ int dstArrayCount,
+ const SkDeserialProcs* = nullptr);
+
+#endif // SkMultiPictureDocument_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkMultiPictureDocumentPriv.h b/gfx/skia/skia/src/utils/SkMultiPictureDocumentPriv.h
new file mode 100644
index 0000000000..a33bcd99c1
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkMultiPictureDocumentPriv.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkMultiPictureDocumentPriv_DEFINED
+#define SkMultiPictureDocumentPriv_DEFINED
+
+#include "src/utils/SkMultiPictureDocument.h"
+
+/**
+ * Additional API allows one to read the array of page-sizes without parsing
+ * the entire file. Used by DM.
+ */
+bool SkMultiPictureDocumentReadPageSizes(SkStreamSeekable* src,
+ SkDocumentPage* dstArray,
+ int dstArrayCount);
+
+#endif // SkMultiPictureDocumentPriv_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkNWayCanvas.cpp b/gfx/skia/skia/src/utils/SkNWayCanvas.cpp
new file mode 100644
index 0000000000..2b85fc1b54
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkNWayCanvas.cpp
@@ -0,0 +1,414 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/utils/SkNWayCanvas.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColor.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkShader.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTDArray.h"
+#include "include/utils/SkNoDrawCanvas.h"
+#include "src/core/SkCanvasPriv.h"
+
+#include <algorithm>
+#include <iterator>
+#include <utility>
+
+namespace sktext {
+class GlyphRunList;
+}
+
+class SkData;
+class SkDrawable;
+class SkImage;
+class SkPaint;
+class SkPath;
+class SkPicture;
+class SkRRect;
+class SkRegion;
+class SkTextBlob;
+class SkVertices;
+enum class SkBlendMode;
+enum class SkClipOp;
+struct SkDrawShadowRec;
+
+SkNWayCanvas::SkNWayCanvas(int width, int height) : INHERITED(width, height) {}
+
+SkNWayCanvas::~SkNWayCanvas() {
+ this->removeAll();
+}
+
+void SkNWayCanvas::addCanvas(SkCanvas* canvas) {
+ if (!fList.empty()) {
+ // We are using the nway canvas as a wrapper for the originally added canvas, and the device
+ // on the nway may contradict calls for the device on this canvas. So, to add a second
+ // canvas, the devices on the first canvas, and the nway base device must be different.
+ SkASSERT(fList[0]->baseDevice() != this->baseDevice());
+ }
+ if (canvas) {
+ *fList.append() = canvas;
+ }
+}
+
+void SkNWayCanvas::removeCanvas(SkCanvas* canvas) {
+ auto found = std::find(fList.begin(), fList.end(), canvas);
+ if (found != fList.end()) {
+ fList.removeShuffle(std::distance(fList.begin(), found));
+ }
+}
+
+void SkNWayCanvas::removeAll() {
+ fList.reset();
+}
+
+///////////////////////////////////////////////////////////////////////////
+// These are forwarded to the N canvases we're referencing
+
+class SkNWayCanvas::Iter {
+public:
+ Iter(const SkTDArray<SkCanvas*>& list) : fList(list) {
+ fIndex = 0;
+ }
+ bool next() {
+ if (fIndex < fList.size()) {
+ fCanvas = fList[fIndex++];
+ return true;
+ }
+ return false;
+ }
+ SkCanvas* operator->() { return fCanvas; }
+ SkCanvas* get() const { return fCanvas; }
+
+private:
+ const SkTDArray<SkCanvas*>& fList;
+ int fIndex;
+ SkCanvas* fCanvas;
+};
+
+void SkNWayCanvas::willSave() {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->save();
+ }
+
+ this->INHERITED::willSave();
+}
+
+SkCanvas::SaveLayerStrategy SkNWayCanvas::getSaveLayerStrategy(const SaveLayerRec& rec) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->saveLayer(rec);
+ }
+
+ this->INHERITED::getSaveLayerStrategy(rec);
+ // No need for a layer.
+ return kNoLayer_SaveLayerStrategy;
+}
+
+bool SkNWayCanvas::onDoSaveBehind(const SkRect* bounds) {
+ Iter iter(fList);
+ while (iter.next()) {
+ SkCanvasPriv::SaveBehind(iter.get(), bounds);
+ }
+ this->INHERITED::onDoSaveBehind(bounds);
+ return false;
+}
+
+void SkNWayCanvas::willRestore() {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->restore();
+ }
+ this->INHERITED::willRestore();
+}
+
+void SkNWayCanvas::didConcat44(const SkM44& m) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->concat(m);
+ }
+}
+
+void SkNWayCanvas::didSetM44(const SkM44& matrix) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->setMatrix(matrix);
+ }
+}
+
+void SkNWayCanvas::didTranslate(SkScalar x, SkScalar y) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->translate(x, y);
+ }
+}
+
+void SkNWayCanvas::didScale(SkScalar x, SkScalar y) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->scale(x, y);
+ }
+}
+
+void SkNWayCanvas::onClipRect(const SkRect& rect, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->clipRect(rect, op, kSoft_ClipEdgeStyle == edgeStyle);
+ }
+ this->INHERITED::onClipRect(rect, op, edgeStyle);
+}
+
+void SkNWayCanvas::onClipRRect(const SkRRect& rrect, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->clipRRect(rrect, op, kSoft_ClipEdgeStyle == edgeStyle);
+ }
+ this->INHERITED::onClipRRect(rrect, op, edgeStyle);
+}
+
+void SkNWayCanvas::onClipPath(const SkPath& path, SkClipOp op, ClipEdgeStyle edgeStyle) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->clipPath(path, op, kSoft_ClipEdgeStyle == edgeStyle);
+ }
+ this->INHERITED::onClipPath(path, op, edgeStyle);
+}
+
+void SkNWayCanvas::onClipShader(sk_sp<SkShader> sh, SkClipOp op) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->clipShader(sh, op);
+ }
+ this->INHERITED::onClipShader(std::move(sh), op);
+}
+
+void SkNWayCanvas::onClipRegion(const SkRegion& deviceRgn, SkClipOp op) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->clipRegion(deviceRgn, op);
+ }
+ this->INHERITED::onClipRegion(deviceRgn, op);
+}
+
+void SkNWayCanvas::onResetClip() {
+ Iter iter(fList);
+ while (iter.next()) {
+ SkCanvasPriv::ResetClip(iter.get());
+ }
+ this->INHERITED::onResetClip();
+}
+
+void SkNWayCanvas::onDrawPaint(const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawPaint(paint);
+ }
+}
+
+void SkNWayCanvas::onDrawBehind(const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ SkCanvasPriv::DrawBehind(iter.get(), paint);
+ }
+}
+
+void SkNWayCanvas::onDrawPoints(PointMode mode, size_t count, const SkPoint pts[],
+ const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawPoints(mode, count, pts, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawRect(const SkRect& rect, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawRect(rect, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawRegion(const SkRegion& region, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawRegion(region, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawOval(const SkRect& rect, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawOval(rect, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawArc(const SkRect& rect, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawArc(rect, startAngle, sweepAngle, useCenter, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawRRect(rrect, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawDRRect(const SkRRect& outer, const SkRRect& inner, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawDRRect(outer, inner, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawPath(path, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawImage2(const SkImage* image, SkScalar left, SkScalar top,
+ const SkSamplingOptions& sampling, const SkPaint* paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawImage(image, left, top, sampling, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawImageRect2(const SkImage* image, const SkRect& src, const SkRect& dst,
+ const SkSamplingOptions& sampling, const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawImageRect(image, src, dst, sampling, paint, constraint);
+ }
+}
+
+void SkNWayCanvas::onDrawImageLattice2(const SkImage* image, const Lattice& lattice,
+ const SkRect& dst, SkFilterMode filter,
+ const SkPaint* paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawImageLattice(image, lattice, dst, filter, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawAtlas2(const SkImage* image, const SkRSXform xform[], const SkRect tex[],
+ const SkColor colors[], int count, SkBlendMode bmode,
+ const SkSamplingOptions& sampling, const SkRect* cull,
+ const SkPaint* paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawAtlas(image, xform, tex, colors, count, bmode, sampling, cull, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawGlyphRunList(const sktext::GlyphRunList& list,
+ const SkPaint &paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->onDrawGlyphRunList(list, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint &paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawTextBlob(blob, x, y, paint);
+ }
+}
+
+#if defined(SK_GANESH)
+void SkNWayCanvas::onDrawSlug(const sktext::gpu::Slug* slug) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawSlug(slug);
+ }
+}
+#endif
+
+void SkNWayCanvas::onDrawPicture(const SkPicture* picture, const SkMatrix* matrix,
+ const SkPaint* paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawPicture(picture, matrix, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawDrawable(SkDrawable* drawable, const SkMatrix* matrix) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawDrawable(drawable, matrix);
+ }
+}
+
+void SkNWayCanvas::onDrawVerticesObject(const SkVertices* vertices,
+ SkBlendMode bmode, const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawVertices(vertices, bmode, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode bmode,
+ const SkPaint& paint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawPatch(cubics, colors, texCoords, bmode, paint);
+ }
+}
+
+void SkNWayCanvas::onDrawShadowRec(const SkPath& path, const SkDrawShadowRec& rec) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->private_draw_shadow_rec(path, rec);
+ }
+}
+
+void SkNWayCanvas::onDrawAnnotation(const SkRect& rect, const char key[], SkData* data) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->drawAnnotation(rect, key, data);
+ }
+}
+
+void SkNWayCanvas::onDrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4],
+ QuadAAFlags aa, const SkColor4f& color, SkBlendMode mode) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->experimental_DrawEdgeAAQuad(rect, clip, aa, color, mode);
+ }
+}
+
+void SkNWayCanvas::onDrawEdgeAAImageSet2(const ImageSetEntry set[], int count,
+ const SkPoint dstClips[], const SkMatrix preViewMatrices[],
+ const SkSamplingOptions& sampling, const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->experimental_DrawEdgeAAImageSet(
+ set, count, dstClips, preViewMatrices, sampling, paint, constraint);
+ }
+}
+
+void SkNWayCanvas::onFlush() {
+ Iter iter(fList);
+ while (iter.next()) {
+ iter->flush();
+ }
+}
diff --git a/gfx/skia/skia/src/utils/SkNullCanvas.cpp b/gfx/skia/skia/src/utils/SkNullCanvas.cpp
new file mode 100644
index 0000000000..3179282156
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkNullCanvas.cpp
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/utils/SkNullCanvas.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/utils/SkNWayCanvas.h"
+
+std::unique_ptr<SkCanvas> SkMakeNullCanvas() {
+ // An N-Way canvas forwards calls to N canvas's. When N == 0 it's
+ // effectively a null canvas.
+ return std::unique_ptr<SkCanvas>(new SkNWayCanvas(0, 0));
+}
diff --git a/gfx/skia/skia/src/utils/SkOSPath.cpp b/gfx/skia/skia/src/utils/SkOSPath.cpp
new file mode 100644
index 0000000000..f960d1fde4
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkOSPath.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/utils/SkOSPath.h"
+
+#include "include/core/SkTypes.h"
+
+#include <string.h>
+
+SkString SkOSPath::Join(const char *rootPath, const char *relativePath) {
+ SkString result(rootPath);
+ if (!result.endsWith(SEPARATOR) && !result.isEmpty()) {
+ result.appendUnichar(SEPARATOR);
+ }
+ result.append(relativePath);
+ return result;
+}
+
+SkString SkOSPath::Basename(const char* fullPath) {
+ if (!fullPath) {
+ return SkString();
+ }
+ const char* filename = strrchr(fullPath, SEPARATOR);
+ if (nullptr == filename) {
+ filename = fullPath;
+ } else {
+ ++filename;
+ }
+ return SkString(filename);
+}
+
+SkString SkOSPath::Dirname(const char* fullPath) {
+ if (!fullPath) {
+ return SkString();
+ }
+ const char* end = strrchr(fullPath, SEPARATOR);
+ if (nullptr == end) {
+ return SkString();
+ }
+ if (end == fullPath) {
+ SkASSERT(fullPath[0] == SEPARATOR);
+ ++end;
+ }
+ return SkString(fullPath, end - fullPath);
+}
diff --git a/gfx/skia/skia/src/utils/SkOSPath.h b/gfx/skia/skia/src/utils/SkOSPath.h
new file mode 100644
index 0000000000..783e985fd6
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkOSPath.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOSPath_DEFINED
+#define SkOSPath_DEFINED
+
+#include "include/core/SkString.h"
+
+/**
+ * Functions for modifying SkStrings which represent paths on the filesystem.
+ */
+class SkOSPath {
+public:
+#ifdef _WIN32
+ static constexpr char SEPARATOR = '\\';
+#else
+ static constexpr char SEPARATOR = '/';
+#endif
+
+ /**
+ * Assembles rootPath and relativePath into a single path, like this:
+ * rootPath/relativePath.
+ * It is okay to call with a NULL rootPath and/or relativePath. A path
+ * separator will still be inserted.
+ *
+ * Uses SkPATH_SEPARATOR, to work on all platforms.
+ */
+ static SkString Join(const char* rootPath, const char* relativePath);
+
+ /**
+ * Return the name of the file, ignoring the directory structure.
+ * Behaves like python's os.path.basename. If the fullPath is
+ * /dir/subdir/, an empty string is returned.
+ * @param fullPath Full path to the file.
+ * @return SkString The basename of the file - anything beyond the
+ * final slash, or the full name if there is no slash.
+ */
+ static SkString Basename(const char* fullPath);
+
+ /**
+ * Given a qualified file name returns the directory.
+ * Behaves like python's os.path.dirname. If the fullPath is
+ * /dir/subdir/ the return will be /dir/subdir/
+ * @param fullPath Full path to the file.
+ * @return SkString The dir containing the file - anything preceding the
+ * final slash, or the full name if ending in a slash.
+ */
+ static SkString Dirname(const char* fullPath);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkOrderedFontMgr.cpp b/gfx/skia/skia/src/utils/SkOrderedFontMgr.cpp
new file mode 100644
index 0000000000..c4ff5a60ef
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkOrderedFontMgr.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/utils/SkOrderedFontMgr.h"
+
+#include "include/core/SkData.h" // IWYU pragma: keep
+#include "include/core/SkFontStyle.h"
+#include "include/core/SkStream.h" // IWYU pragma: keep
+#include "include/core/SkTypeface.h" // IWYU pragma: keep
+
+#include <utility>
+
+class SkString;
+struct SkFontArguments;
+
+SkOrderedFontMgr::SkOrderedFontMgr() {}
+SkOrderedFontMgr::~SkOrderedFontMgr() {}
+
+void SkOrderedFontMgr::append(sk_sp<SkFontMgr> fm) {
+ fList.push_back(std::move(fm));
+}
+
+int SkOrderedFontMgr::onCountFamilies() const {
+ int count = 0;
+ for (const auto& fm : fList) {
+ count += fm->countFamilies();
+ }
+ return count;
+}
+
+void SkOrderedFontMgr::onGetFamilyName(int index, SkString* familyName) const {
+ for (const auto& fm : fList) {
+ const int count = fm->countFamilies();
+ if (index < count) {
+ return fm->getFamilyName(index, familyName);
+ }
+ index -= count;
+ }
+}
+
+SkFontStyleSet* SkOrderedFontMgr::onCreateStyleSet(int index) const {
+ for (const auto& fm : fList) {
+ const int count = fm->countFamilies();
+ if (index < count) {
+ return fm->createStyleSet(index);
+ }
+ index -= count;
+ }
+ return nullptr;
+}
+
+SkFontStyleSet* SkOrderedFontMgr::onMatchFamily(const char familyName[]) const {
+ for (const auto& fm : fList) {
+ if (auto fs = fm->matchFamily(familyName)) {
+ return fs;
+ }
+ }
+ return nullptr;
+}
+
+SkTypeface* SkOrderedFontMgr::onMatchFamilyStyle(const char family[],
+ const SkFontStyle& style) const {
+ for (const auto& fm : fList) {
+ if (auto tf = fm->matchFamilyStyle(family, style)) {
+ return tf;
+ }
+ }
+ return nullptr;
+}
+
+SkTypeface* SkOrderedFontMgr::onMatchFamilyStyleCharacter(const char familyName[],
+ const SkFontStyle& style,
+ const char* bcp47[], int bcp47Count,
+ SkUnichar uni) const {
+ for (const auto& fm : fList) {
+ if (auto tf = fm->matchFamilyStyleCharacter(familyName, style, bcp47, bcp47Count, uni)) {
+ return tf;
+ }
+ }
+ return nullptr;
+}
+
+// All of these are defined to fail by returning null
+
+sk_sp<SkTypeface> SkOrderedFontMgr::onMakeFromData(sk_sp<SkData>, int ttcIndex) const {
+ return nullptr;
+}
+
+sk_sp<SkTypeface> SkOrderedFontMgr::onMakeFromStreamIndex(std::unique_ptr<SkStreamAsset>,
+ int ttcIndex) const {
+ return nullptr;
+}
+
+sk_sp<SkTypeface> SkOrderedFontMgr::onMakeFromStreamArgs(std::unique_ptr<SkStreamAsset>,
+ const SkFontArguments&) const {
+ return nullptr;
+}
+
+sk_sp<SkTypeface> SkOrderedFontMgr::onMakeFromFile(const char path[], int ttcIndex) const {
+ return nullptr;
+}
+
+sk_sp<SkTypeface> SkOrderedFontMgr::onLegacyMakeTypeface(const char family[], SkFontStyle) const {
+ return nullptr;
+}
diff --git a/gfx/skia/skia/src/utils/SkPaintFilterCanvas.cpp b/gfx/skia/skia/src/utils/SkPaintFilterCanvas.cpp
new file mode 100644
index 0000000000..e1abe333ef
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkPaintFilterCanvas.cpp
@@ -0,0 +1,301 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/utils/SkPaintFilterCanvas.h"
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPixmap.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkSurface.h" // IWYU pragma: keep
+#include "include/core/SkSurfaceProps.h"
+
+#include <optional>
+
+class SkData;
+class SkDrawable;
+class SkImage;
+class SkPath;
+class SkPicture;
+class SkRRect;
+class SkRegion;
+class SkTextBlob;
+class SkVertices;
+struct SkDrawShadowRec;
+
+class SkPaintFilterCanvas::AutoPaintFilter {
+public:
+ AutoPaintFilter(const SkPaintFilterCanvas* canvas, const SkPaint* paint)
+ : fPaint(paint ? *paint : SkPaint()) {
+ fShouldDraw = canvas->onFilter(fPaint);
+ }
+
+ AutoPaintFilter(const SkPaintFilterCanvas* canvas, const SkPaint& paint)
+ : AutoPaintFilter(canvas, &paint) { }
+
+ const SkPaint& paint() const { return fPaint; }
+
+ bool shouldDraw() const { return fShouldDraw; }
+
+private:
+ SkPaint fPaint;
+ bool fShouldDraw;
+};
+
+SkPaintFilterCanvas::SkPaintFilterCanvas(SkCanvas *canvas)
+ : SkCanvasVirtualEnforcer<SkNWayCanvas>(canvas->imageInfo().width(),
+ canvas->imageInfo().height()) {
+
+ // Transfer matrix & clip state before adding the target canvas.
+ this->clipRect(SkRect::Make(canvas->getDeviceClipBounds()));
+ this->setMatrix(canvas->getLocalToDevice());
+
+ this->addCanvas(canvas);
+}
+
+void SkPaintFilterCanvas::onDrawPaint(const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawPaint(apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawBehind(const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawBehind(apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawPoints(PointMode mode, size_t count, const SkPoint pts[],
+ const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawPoints(mode, count, pts, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawRect(const SkRect& rect, const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawRect(rect, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawRRect(const SkRRect& rrect, const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawRRect(rrect, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawDRRect(const SkRRect& outer, const SkRRect& inner,
+ const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawDRRect(outer, inner, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawRegion(const SkRegion& region, const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawRegion(region, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawOval(const SkRect& rect, const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawOval(rect, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawArc(const SkRect& rect, SkScalar startAngle, SkScalar sweepAngle,
+ bool useCenter, const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawArc(rect, startAngle, sweepAngle, useCenter, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawPath(const SkPath& path, const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawPath(path, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawImage2(const SkImage* image, SkScalar left, SkScalar top,
+ const SkSamplingOptions& sampling, const SkPaint* paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawImage2(image, left, top, sampling, &apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawImageRect2(const SkImage* image, const SkRect& src,
+ const SkRect& dst, const SkSamplingOptions& sampling,
+ const SkPaint* paint, SrcRectConstraint constraint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawImageRect2(image, src, dst, sampling, &apf.paint(), constraint);
+ }
+}
+
+void SkPaintFilterCanvas::onDrawImageLattice2(const SkImage* image, const Lattice& lattice,
+ const SkRect& dst, SkFilterMode filter,
+ const SkPaint* paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawImageLattice2(image, lattice, dst, filter, &apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawVerticesObject(const SkVertices* vertices,
+ SkBlendMode bmode, const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawVerticesObject(vertices, bmode, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawPatch(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], SkBlendMode bmode,
+ const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawPatch(cubics, colors, texCoords, bmode, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawPicture(const SkPicture* picture, const SkMatrix* m,
+ const SkPaint* originalPaint) {
+ AutoPaintFilter apf(this, originalPaint);
+ if (apf.shouldDraw()) {
+ const SkPaint* newPaint = &apf.paint();
+
+ // Passing a paint (-vs- passing null) makes drawPicture draw into a layer...
+ // much slower, and can produce different blending. Thus we should only do this
+ // if the filter's effect actually impacts the picture.
+ if (originalPaint == nullptr) {
+ if ( newPaint->getAlphaf() == 1.0f
+ && newPaint->getColorFilter() == nullptr
+ && newPaint->getImageFilter() == nullptr
+ && newPaint->asBlendMode() == SkBlendMode::kSrcOver) {
+ // restore the original nullptr
+ newPaint = nullptr;
+ }
+ }
+ this->SkNWayCanvas::onDrawPicture(picture, m, newPaint);
+ }
+}
+
+void SkPaintFilterCanvas::onDrawDrawable(SkDrawable* drawable, const SkMatrix* matrix) {
+ // There is no paint to filter in this case, but we can still filter on type.
+ // Subclasses need to unroll the drawable explicity (by overriding this method) in
+ // order to actually filter nested content.
+ AutoPaintFilter apf(this, nullptr);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawDrawable(drawable, matrix);
+ }
+}
+
+void SkPaintFilterCanvas::onDrawGlyphRunList(
+ const sktext::GlyphRunList& list, const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawGlyphRunList(list, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawTextBlob(const SkTextBlob* blob, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawTextBlob(blob, x, y, apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawAtlas2(const SkImage* image, const SkRSXform xform[],
+ const SkRect tex[], const SkColor colors[], int count,
+ SkBlendMode bmode, const SkSamplingOptions& sampling,
+ const SkRect* cull, const SkPaint* paint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawAtlas2(image, xform, tex, colors, count, bmode, sampling, cull,
+ &apf.paint());
+ }
+}
+
+void SkPaintFilterCanvas::onDrawAnnotation(const SkRect& rect, const char key[], SkData* value) {
+ this->SkNWayCanvas::onDrawAnnotation(rect, key, value);
+}
+
+void SkPaintFilterCanvas::onDrawShadowRec(const SkPath& path, const SkDrawShadowRec& rec) {
+ this->SkNWayCanvas::onDrawShadowRec(path, rec);
+}
+
+void SkPaintFilterCanvas::onDrawEdgeAAQuad(const SkRect& rect, const SkPoint clip[4],
+ QuadAAFlags aa, const SkColor4f& color, SkBlendMode mode) {
+ SkPaint paint;
+ paint.setColor(color);
+ paint.setBlendMode(mode);
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawEdgeAAQuad(rect, clip, aa, apf.paint().getColor4f(),
+ apf.paint().getBlendMode_or(SkBlendMode::kSrcOver));
+ }
+}
+
+void SkPaintFilterCanvas::onDrawEdgeAAImageSet2(const ImageSetEntry set[], int count,
+ const SkPoint dstClips[],
+ const SkMatrix preViewMatrices[],
+ const SkSamplingOptions& sampling,
+ const SkPaint* paint,
+ SrcRectConstraint constraint) {
+ AutoPaintFilter apf(this, paint);
+ if (apf.shouldDraw()) {
+ this->SkNWayCanvas::onDrawEdgeAAImageSet2(
+ set, count, dstClips, preViewMatrices, sampling, &apf.paint(), constraint);
+ }
+}
+
+sk_sp<SkSurface> SkPaintFilterCanvas::onNewSurface(const SkImageInfo& info,
+ const SkSurfaceProps& props) {
+ return this->proxy()->makeSurface(info, &props);
+}
+
+bool SkPaintFilterCanvas::onPeekPixels(SkPixmap* pixmap) {
+ return this->proxy()->peekPixels(pixmap);
+}
+
+bool SkPaintFilterCanvas::onAccessTopLayerPixels(SkPixmap* pixmap) {
+ SkImageInfo info;
+ size_t rowBytes;
+
+ void* addr = this->proxy()->accessTopLayerPixels(&info, &rowBytes);
+ if (!addr) {
+ return false;
+ }
+
+ pixmap->reset(info, addr, rowBytes);
+ return true;
+}
+
+SkImageInfo SkPaintFilterCanvas::onImageInfo() const {
+ return this->proxy()->imageInfo();
+}
+
+bool SkPaintFilterCanvas::onGetProps(SkSurfaceProps* props, bool top) const {
+ if (props) {
+ *props = top ? this->proxy()->getTopProps() : this->proxy()->getBaseProps();
+ }
+ return true;
+}
diff --git a/gfx/skia/skia/src/utils/SkParse.cpp b/gfx/skia/skia/src/utils/SkParse.cpp
new file mode 100644
index 0000000000..dce251411a
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkParse.cpp
@@ -0,0 +1,302 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkScalar.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTo.h"
+#include "include/utils/SkParse.h"
+
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <limits>
+#include <string>
+
+static inline bool is_between(int c, int min, int max)
+{
+ return (unsigned)(c - min) <= (unsigned)(max - min);
+}
+
+static inline bool is_ws(int c)
+{
+ return is_between(c, 1, 32);
+}
+
+static inline bool is_digit(int c)
+{
+ return is_between(c, '0', '9');
+}
+
+static inline bool is_sep(int c)
+{
+ return is_ws(c) || c == ',' || c == ';';
+}
+
+static int to_hex(int c)
+{
+ if (is_digit(c))
+ return c - '0';
+
+ c |= 0x20; // make us lower-case
+ if (is_between(c, 'a', 'f'))
+ return c + 10 - 'a';
+ else
+ return -1;
+}
+
+static inline bool is_hex(int c)
+{
+ return to_hex(c) >= 0;
+}
+
+static const char* skip_ws(const char str[])
+{
+ SkASSERT(str);
+ while (is_ws(*str))
+ str++;
+ return str;
+}
+
+static const char* skip_sep(const char str[])
+{
+ SkASSERT(str);
+ while (is_sep(*str))
+ str++;
+ return str;
+}
+
+int SkParse::Count(const char str[])
+{
+ char c;
+ int count = 0;
+ goto skipLeading;
+ do {
+ count++;
+ do {
+ if ((c = *str++) == '\0')
+ goto goHome;
+ } while (is_sep(c) == false);
+skipLeading:
+ do {
+ if ((c = *str++) == '\0')
+ goto goHome;
+ } while (is_sep(c));
+ } while (true);
+goHome:
+ return count;
+}
+
+int SkParse::Count(const char str[], char separator)
+{
+ char c;
+ int count = 0;
+ goto skipLeading;
+ do {
+ count++;
+ do {
+ if ((c = *str++) == '\0')
+ goto goHome;
+ } while (c != separator);
+skipLeading:
+ do {
+ if ((c = *str++) == '\0')
+ goto goHome;
+ } while (c == separator);
+ } while (true);
+goHome:
+ return count;
+}
+
+const char* SkParse::FindHex(const char str[], uint32_t* value)
+{
+ SkASSERT(str);
+ str = skip_ws(str);
+
+ if (!is_hex(*str))
+ return nullptr;
+
+ uint32_t n = 0;
+ int max_digits = 8;
+ int digit;
+
+ while ((digit = to_hex(*str)) >= 0)
+ {
+ if (--max_digits < 0)
+ return nullptr;
+ n = (n << 4) | digit;
+ str += 1;
+ }
+
+ if (*str == 0 || is_ws(*str))
+ {
+ if (value)
+ *value = n;
+ return str;
+ }
+ return nullptr;
+}
+
+const char* SkParse::FindS32(const char str[], int32_t* value)
+{
+ SkASSERT(str);
+ str = skip_ws(str);
+
+ int sign = 1;
+ int64_t maxAbsValue = std::numeric_limits<int>::max();
+ if (*str == '-')
+ {
+ sign = -1;
+ maxAbsValue = -static_cast<int64_t>(std::numeric_limits<int>::min());
+ str += 1;
+ }
+
+ if (!is_digit(*str)) {
+ return nullptr;
+ }
+
+ int64_t n = 0;
+ while (is_digit(*str))
+ {
+ n = 10*n + *str - '0';
+ if (n > maxAbsValue) {
+ return nullptr;
+ }
+
+ str += 1;
+ }
+ if (value) {
+ *value = SkToS32(sign*n);
+ }
+ return str;
+}
+
+const char* SkParse::FindMSec(const char str[], SkMSec* value)
+{
+ SkASSERT(str);
+ str = skip_ws(str);
+
+ int sign = 0;
+ if (*str == '-')
+ {
+ sign = -1;
+ str += 1;
+ }
+
+ if (!is_digit(*str))
+ return nullptr;
+
+ int n = 0;
+ while (is_digit(*str))
+ {
+ n = 10*n + *str - '0';
+ str += 1;
+ }
+ int remaining10s = 3;
+ if (*str == '.') {
+ str++;
+ while (is_digit(*str))
+ {
+ n = 10*n + *str - '0';
+ str += 1;
+ if (--remaining10s == 0)
+ break;
+ }
+ }
+ while (--remaining10s >= 0)
+ n *= 10;
+ if (value)
+ *value = (n ^ sign) - sign;
+ return str;
+}
+
+const char* SkParse::FindScalar(const char str[], SkScalar* value) {
+ SkASSERT(str);
+ str = skip_ws(str);
+
+ char* stop;
+ float v = (float)strtod(str, &stop);
+ if (str == stop) {
+ return nullptr;
+ }
+ if (value) {
+ *value = v;
+ }
+ return stop;
+}
+
+const char* SkParse::FindScalars(const char str[], SkScalar value[], int count)
+{
+ SkASSERT(count >= 0);
+
+ if (count > 0)
+ {
+ for (;;)
+ {
+ str = SkParse::FindScalar(str, value);
+ if (--count == 0 || str == nullptr)
+ break;
+
+ // keep going
+ str = skip_sep(str);
+ if (value)
+ value += 1;
+ }
+ }
+ return str;
+}
+
+static bool lookup_str(const char str[], const char** table, int count)
+{
+ while (--count >= 0)
+ if (!strcmp(str, table[count]))
+ return true;
+ return false;
+}
+
+bool SkParse::FindBool(const char str[], bool* value)
+{
+ static const char* gYes[] = { "yes", "1", "true" };
+ static const char* gNo[] = { "no", "0", "false" };
+
+ if (lookup_str(str, gYes, std::size(gYes)))
+ {
+ if (value) *value = true;
+ return true;
+ }
+ else if (lookup_str(str, gNo, std::size(gNo)))
+ {
+ if (value) *value = false;
+ return true;
+ }
+ return false;
+}
+
+int SkParse::FindList(const char target[], const char list[])
+{
+ size_t len = strlen(target);
+ int index = 0;
+
+ for (;;)
+ {
+ const char* end = strchr(list, ',');
+ size_t entryLen;
+
+ if (end == nullptr) // last entry
+ entryLen = strlen(list);
+ else
+ entryLen = end - list;
+
+ if (entryLen == len && memcmp(target, list, len) == 0)
+ return index;
+ if (end == nullptr)
+ break;
+
+ list = end + 1; // skip the ','
+ index += 1;
+ }
+ return -1;
+}
diff --git a/gfx/skia/skia/src/utils/SkParseColor.cpp b/gfx/skia/skia/src/utils/SkParseColor.cpp
new file mode 100644
index 0000000000..0101632c56
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkParseColor.cpp
@@ -0,0 +1,388 @@
+/*
+ * Copyright 2006 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/utils/SkParse.h"
+
+#include "include/core/SkColor.h"
+#include "include/core/SkTypes.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <string>
+
+static constexpr const char* gColorNames[] = {
+ "aliceblue",
+ "antiquewhite",
+ "aqua",
+ "aquamarine",
+ "azure",
+ "beige",
+ "bisque",
+ "black",
+ "blanchedalmond",
+ "blue",
+ "blueviolet",
+ "brown",
+ "burlywood",
+ "cadetblue",
+ "chartreuse",
+ "chocolate",
+ "coral",
+ "cornflowerblue",
+ "cornsilk",
+ "crimson",
+ "cyan",
+ "darkblue",
+ "darkcyan",
+ "darkgoldenrod",
+ "darkgray",
+ "darkgreen",
+ "darkkhaki",
+ "darkmagenta",
+ "darkolivegreen",
+ "darkorange",
+ "darkorchid",
+ "darkred",
+ "darksalmon",
+ "darkseagreen",
+ "darkslateblue",
+ "darkslategray",
+ "darkturquoise",
+ "darkviolet",
+ "deeppink",
+ "deepskyblue",
+ "dimgray",
+ "dodgerblue",
+ "firebrick",
+ "floralwhite",
+ "forestgreen",
+ "fuchsia",
+ "gainsboro",
+ "ghostwhite",
+ "gold",
+ "goldenrod",
+ "gray",
+ "green",
+ "greenyellow",
+ "honeydew",
+ "hotpink",
+ "indianred",
+ "indigo",
+ "ivory",
+ "khaki",
+ "lavender",
+ "lavenderblush",
+ "lawngreen",
+ "lemonchiffon",
+ "lightblue",
+ "lightcoral",
+ "lightcyan",
+ "lightgoldenrodyellow",
+ "lightgreen",
+ "lightgrey",
+ "lightpink",
+ "lightsalmon",
+ "lightseagreen",
+ "lightskyblue",
+ "lightslategray",
+ "lightsteelblue",
+ "lightyellow",
+ "lime",
+ "limegreen",
+ "linen",
+ "magenta",
+ "maroon",
+ "mediumaquamarine",
+ "mediumblue",
+ "mediumorchid",
+ "mediumpurple",
+ "mediumseagreen",
+ "mediumslateblue",
+ "mediumspringgreen",
+ "mediumturquoise",
+ "mediumvioletred",
+ "midnightblue",
+ "mintcream",
+ "mistyrose",
+ "moccasin",
+ "navajowhite",
+ "navy",
+ "oldlace",
+ "olive",
+ "olivedrab",
+ "orange",
+ "orangered",
+ "orchid",
+ "palegoldenrod",
+ "palegreen",
+ "paleturquoise",
+ "palevioletred",
+ "papayawhip",
+ "peachpuff",
+ "peru",
+ "pink",
+ "plum",
+ "powderblue",
+ "purple",
+ "red",
+ "rosybrown",
+ "royalblue",
+ "saddlebrown",
+ "salmon",
+ "sandybrown",
+ "seagreen",
+ "seashell",
+ "sienna",
+ "silver",
+ "skyblue",
+ "slateblue",
+ "slategray",
+ "snow",
+ "springgreen",
+ "steelblue",
+ "tan",
+ "teal",
+ "thistle",
+ "tomato",
+ "turquoise",
+ "violet",
+ "wheat",
+ "white",
+ "whitesmoke",
+ "yellow",
+ "yellowgreen",
+};
+
+static constexpr struct ColorRec {
+ uint8_t r, g, b;
+} gColors[] = {
+ { 0xf0,0xf8,0xff }, // aliceblue
+ { 0xfa,0xeb,0xd7 }, // antiquewhite
+ { 0x00,0xff,0xff }, // aqua
+ { 0x7f,0xff,0xd4 }, // aquamarine
+ { 0xf0,0xff,0xff }, // azure
+ { 0xf5,0xf5,0xdc }, // beige
+ { 0xff,0xe4,0xc4 }, // bisque
+ { 0x00,0x00,0x00 }, // black
+ { 0xff,0xeb,0xcd }, // blanchedalmond
+ { 0x00,0x00,0xff }, // blue
+ { 0x8a,0x2b,0xe2 }, // blueviolet
+ { 0xa5,0x2a,0x2a }, // brown
+ { 0xde,0xb8,0x87 }, // burlywood
+ { 0x5f,0x9e,0xa0 }, // cadetblue
+ { 0x7f,0xff,0x00 }, // chartreuse
+ { 0xd2,0x69,0x1e }, // chocolate
+ { 0xff,0x7f,0x50 }, // coral
+ { 0x64,0x95,0xed }, // cornflowerblue
+ { 0xff,0xf8,0xdc }, // cornsilk
+ { 0xdc,0x14,0x3c }, // crimson
+ { 0x00,0xff,0xff }, // cyan
+ { 0x00,0x00,0x8b }, // darkblue
+ { 0x00,0x8b,0x8b }, // darkcyan
+ { 0xb8,0x86,0x0b }, // darkgoldenrod
+ { 0xa9,0xa9,0xa9 }, // darkgray
+ { 0x00,0x64,0x00 }, // darkgreen
+ { 0xbd,0xb7,0x6b }, // darkkhaki
+ { 0x8b,0x00,0x8b }, // darkmagenta
+ { 0x55,0x6b,0x2f }, // darkolivegreen
+ { 0xff,0x8c,0x00 }, // darkorange
+ { 0x99,0x32,0xcc }, // darkorchid
+ { 0x8b,0x00,0x00 }, // darkred
+ { 0xe9,0x96,0x7a }, // darksalmon
+ { 0x8f,0xbc,0x8f }, // darkseagreen
+ { 0x48,0x3d,0x8b }, // darkslateblue
+ { 0x2f,0x4f,0x4f }, // darkslategray
+ { 0x00,0xce,0xd1 }, // darkturquoise
+ { 0x94,0x00,0xd3 }, // darkviolet
+ { 0xff,0x14,0x93 }, // deeppink
+ { 0x00,0xbf,0xff }, // deepskyblue
+ { 0x69,0x69,0x69 }, // dimgray
+ { 0x1e,0x90,0xff }, // dodgerblue
+ { 0xb2,0x22,0x22 }, // firebrick
+ { 0xff,0xfa,0xf0 }, // floralwhite
+ { 0x22,0x8b,0x22 }, // forestgreen
+ { 0xff,0x00,0xff }, // fuchsia
+ { 0xdc,0xdc,0xdc }, // gainsboro
+ { 0xf8,0xf8,0xff }, // ghostwhite
+ { 0xff,0xd7,0x00 }, // gold
+ { 0xda,0xa5,0x20 }, // goldenrod
+ { 0x80,0x80,0x80 }, // gray
+ { 0x00,0x80,0x00 }, // green
+ { 0xad,0xff,0x2f }, // greenyellow
+ { 0xf0,0xff,0xf0 }, // honeydew
+ { 0xff,0x69,0xb4 }, // hotpink
+ { 0xcd,0x5c,0x5c }, // indianred
+ { 0x4b,0x00,0x82 }, // indigo
+ { 0xff,0xff,0xf0 }, // ivory
+ { 0xf0,0xe6,0x8c }, // khaki
+ { 0xe6,0xe6,0xfa }, // lavender
+ { 0xff,0xf0,0xf5 }, // lavenderblush
+ { 0x7c,0xfc,0x00 }, // lawngreen
+ { 0xff,0xfa,0xcd }, // lemonchiffon
+ { 0xad,0xd8,0xe6 }, // lightblue
+ { 0xf0,0x80,0x80 }, // lightcoral
+ { 0xe0,0xff,0xff }, // lightcyan
+ { 0xfa,0xfa,0xd2 }, // lightgoldenrodyellow
+ { 0x90,0xee,0x90 }, // lightgreen
+ { 0xd3,0xd3,0xd3 }, // lightgrey
+ { 0xff,0xb6,0xc1 }, // lightpink
+ { 0xff,0xa0,0x7a }, // lightsalmon
+ { 0x20,0xb2,0xaa }, // lightseagreen
+ { 0x87,0xce,0xfa }, // lightskyblue
+ { 0x77,0x88,0x99 }, // lightslategray
+ { 0xb0,0xc4,0xde }, // lightsteelblue
+ { 0xff,0xff,0xe0 }, // lightyellow
+ { 0x00,0xff,0x00 }, // lime
+ { 0x32,0xcd,0x32 }, // limegreen
+ { 0xfa,0xf0,0xe6 }, // linen
+ { 0xff,0x00,0xff }, // magenta
+ { 0x80,0x00,0x00 }, // maroon
+ { 0x66,0xcd,0xaa }, // mediumaquamarine
+ { 0x00,0x00,0xcd }, // mediumblue
+ { 0xba,0x55,0xd3 }, // mediumorchid
+ { 0x93,0x70,0xdb }, // mediumpurple
+ { 0x3c,0xb3,0x71 }, // mediumseagreen
+ { 0x7b,0x68,0xee }, // mediumslateblue
+ { 0x00,0xfa,0x9a }, // mediumspringgreen
+ { 0x48,0xd1,0xcc }, // mediumturquoise
+ { 0xc7,0x15,0x85 }, // mediumvioletred
+ { 0x19,0x19,0x70 }, // midnightblue
+ { 0xf5,0xff,0xfa }, // mintcream
+ { 0xff,0xe4,0xe1 }, // mistyrose
+ { 0xff,0xe4,0xb5 }, // moccasin
+ { 0xff,0xde,0xad }, // navajowhite
+ { 0x00,0x00,0x80 }, // navy
+ { 0xfd,0xf5,0xe6 }, // oldlace
+ { 0x80,0x80,0x00 }, // olive
+ { 0x6b,0x8e,0x23 }, // olivedrab
+ { 0xff,0xa5,0x00 }, // orange
+ { 0xff,0x45,0x00 }, // orangered
+ { 0xda,0x70,0xd6 }, // orchid
+ { 0xee,0xe8,0xaa }, // palegoldenrod
+ { 0x98,0xfb,0x98 }, // palegreen
+ { 0xaf,0xee,0xee }, // paleturquoise
+ { 0xdb,0x70,0x93 }, // palevioletred
+ { 0xff,0xef,0xd5 }, // papayawhip
+ { 0xff,0xda,0xb9 }, // peachpuff
+ { 0xcd,0x85,0x3f }, // peru
+ { 0xff,0xc0,0xcb }, // pink
+ { 0xdd,0xa0,0xdd }, // plum
+ { 0xb0,0xe0,0xe6 }, // powderblue
+ { 0x80,0x00,0x80 }, // purple
+ { 0xff,0x00,0x00 }, // red
+ { 0xbc,0x8f,0x8f }, // rosybrown
+ { 0x41,0x69,0xe1 }, // royalblue
+ { 0x8b,0x45,0x13 }, // saddlebrown
+ { 0xfa,0x80,0x72 }, // salmon
+ { 0xf4,0xa4,0x60 }, // sandybrown
+ { 0x2e,0x8b,0x57 }, // seagreen
+ { 0xff,0xf5,0xee }, // seashell
+ { 0xa0,0x52,0x2d }, // sienna
+ { 0xc0,0xc0,0xc0 }, // silver
+ { 0x87,0xce,0xeb }, // skyblue
+ { 0x6a,0x5a,0xcd }, // slateblue
+ { 0x70,0x80,0x90 }, // slategray
+ { 0xff,0xfa,0xfa }, // snow
+ { 0x00,0xff,0x7f }, // springgreen
+ { 0x46,0x82,0xb4 }, // steelblue
+ { 0xd2,0xb4,0x8c }, // tan
+ { 0x00,0x80,0x80 }, // teal
+ { 0xd8,0xbf,0xd8 }, // thistle
+ { 0xff,0x63,0x47 }, // tomato
+ { 0x40,0xe0,0xd0 }, // turquoise
+ { 0xee,0x82,0xee }, // violet
+ { 0xf5,0xde,0xb3 }, // wheat
+ { 0xff,0xff,0xff }, // white
+ { 0xf5,0xf5,0xf5 }, // whitesmoke
+ { 0xff,0xff,0x00 }, // yellow
+ { 0x9a,0xcd,0x32 }, // yellowgreen
+};
+
+const char* SkParse::FindNamedColor(const char* name, size_t len, SkColor* color) {
+ const auto rec = std::lower_bound(std::begin(gColorNames),
+ std::end (gColorNames),
+ name, // key
+ [](const char* name, const char* key) {
+ return strcmp(name, key) < 0;
+ });
+
+ if (rec == std::end(gColorNames) || 0 != strcmp(name, *rec)) {
+ return nullptr;
+ }
+
+ if (color) {
+ int index = rec - gColorNames;
+ *color = SkColorSetRGB(gColors[index].r, gColors[index].g, gColors[index].b);
+ }
+
+ return name + strlen(*rec);
+}
+
+// !!! move to char utilities
+//static int count_separators(const char* str, const char* sep) {
+// char c;
+// int separators = 0;
+// while ((c = *str++) != '\0') {
+// if (strchr(sep, c) == nullptr)
+// continue;
+// do {
+// if ((c = *str++) == '\0')
+// goto goHome;
+// } while (strchr(sep, c) != nullptr);
+// separators++;
+// }
+//goHome:
+// return separators;
+//}
+
+static inline unsigned nib2byte(unsigned n)
+{
+ SkASSERT((n & ~0xF) == 0);
+ return (n << 4) | n;
+}
+
+const char* SkParse::FindColor(const char* value, SkColor* colorPtr) {
+ unsigned int oldAlpha = SkColorGetA(*colorPtr);
+ if (value[0] == '#') {
+ uint32_t hex;
+ const char* end = SkParse::FindHex(value + 1, &hex);
+// SkASSERT(end);
+ if (end == nullptr)
+ return end;
+ size_t len = end - value - 1;
+ if (len == 3 || len == 4) {
+ unsigned a = len == 4 ? nib2byte(hex >> 12) : oldAlpha;
+ unsigned r = nib2byte((hex >> 8) & 0xF);
+ unsigned g = nib2byte((hex >> 4) & 0xF);
+ unsigned b = nib2byte(hex & 0xF);
+ *colorPtr = SkColorSetARGB(a, r, g, b);
+ return end;
+ } else if (len == 6 || len == 8) {
+ if (len == 6)
+ hex |= oldAlpha << 24;
+ *colorPtr = hex;
+ return end;
+ } else {
+// SkASSERT(0);
+ return nullptr;
+ }
+// } else if (strchr(value, ',')) {
+// SkScalar array[4];
+// int count = count_separators(value, ",") + 1; // !!! count commas, add 1
+// SkASSERT(count == 3 || count == 4);
+// array[0] = SK_Scalar1 * 255;
+// const char* end = SkParse::FindScalars(value, &array[4 - count], count);
+// if (end == nullptr)
+// return nullptr;
+ // !!! range check for errors?
+// *colorPtr = SkColorSetARGB(SkScalarRoundToInt(array[0]), SkScalarRoundToInt(array[1]),
+// SkScalarRoundToInt(array[2]), SkScalarRoundToInt(array[3]));
+// return end;
+ } else
+ return FindNamedColor(value, strlen(value), colorPtr);
+}
diff --git a/gfx/skia/skia/src/utils/SkParsePath.cpp b/gfx/skia/skia/src/utils/SkParsePath.cpp
new file mode 100644
index 0000000000..c059f36625
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkParsePath.cpp
@@ -0,0 +1,305 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkStream.h"
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/utils/SkParse.h"
+#include "include/utils/SkParsePath.h"
+#include "src/core/SkGeometry.h"
+
+#include <cstdio>
+
+enum class SkPathDirection;
+
+static inline bool is_between(int c, int min, int max) {
+ return (unsigned)(c - min) <= (unsigned)(max - min);
+}
+
+static inline bool is_ws(int c) {
+ return is_between(c, 1, 32);
+}
+
+static inline bool is_digit(int c) {
+ return is_between(c, '0', '9');
+}
+
+static inline bool is_sep(int c) {
+ return is_ws(c) || c == ',';
+}
+
+static inline bool is_lower(int c) {
+ return is_between(c, 'a', 'z');
+}
+
+static inline int to_upper(int c) {
+ return c - 'a' + 'A';
+}
+
+static const char* skip_ws(const char str[]) {
+ SkASSERT(str);
+ while (is_ws(*str))
+ str++;
+ return str;
+}
+
+static const char* skip_sep(const char str[]) {
+ if (!str) {
+ return nullptr;
+ }
+ while (is_sep(*str))
+ str++;
+ return str;
+}
+
+static const char* find_points(const char str[], SkPoint value[], int count,
+ bool isRelative, SkPoint* relative) {
+ str = SkParse::FindScalars(str, &value[0].fX, count * 2);
+ if (isRelative) {
+ for (int index = 0; index < count; index++) {
+ value[index].fX += relative->fX;
+ value[index].fY += relative->fY;
+ }
+ }
+ return str;
+}
+
+static const char* find_scalar(const char str[], SkScalar* value,
+ bool isRelative, SkScalar relative) {
+ str = SkParse::FindScalar(str, value);
+ if (!str) {
+ return nullptr;
+ }
+ if (isRelative) {
+ *value += relative;
+ }
+ str = skip_sep(str);
+ return str;
+}
+
+// https://www.w3.org/TR/SVG11/paths.html#PathDataBNF
+//
+// flag:
+// "0" | "1"
+static const char* find_flag(const char str[], bool* value) {
+ if (!str) {
+ return nullptr;
+ }
+ if (str[0] != '1' && str[0] != '0') {
+ return nullptr;
+ }
+ *value = str[0] != '0';
+ str = skip_sep(str + 1);
+ return str;
+}
+
+bool SkParsePath::FromSVGString(const char data[], SkPath* result) {
+ SkPath path;
+ SkPoint first = {0, 0};
+ SkPoint c = {0, 0};
+ SkPoint lastc = {0, 0};
+ SkPoint points[3];
+ char op = '\0';
+ char previousOp = '\0';
+ bool relative = false;
+ for (;;) {
+ if (!data) {
+ // Truncated data
+ return false;
+ }
+ data = skip_ws(data);
+ if (data[0] == '\0') {
+ break;
+ }
+ char ch = data[0];
+ if (is_digit(ch) || ch == '-' || ch == '+' || ch == '.') {
+ if (op == '\0' || op == 'Z') {
+ return false;
+ }
+ } else if (is_sep(ch)) {
+ data = skip_sep(data);
+ } else {
+ op = ch;
+ relative = false;
+ if (is_lower(op)) {
+ op = (char) to_upper(op);
+ relative = true;
+ }
+ data++;
+ data = skip_sep(data);
+ }
+ switch (op) {
+ case 'M':
+ data = find_points(data, points, 1, relative, &c);
+ path.moveTo(points[0]);
+ previousOp = '\0';
+ op = 'L';
+ c = points[0];
+ break;
+ case 'L':
+ data = find_points(data, points, 1, relative, &c);
+ path.lineTo(points[0]);
+ c = points[0];
+ break;
+ case 'H': {
+ SkScalar x;
+ data = find_scalar(data, &x, relative, c.fX);
+ path.lineTo(x, c.fY);
+ c.fX = x;
+ } break;
+ case 'V': {
+ SkScalar y;
+ data = find_scalar(data, &y, relative, c.fY);
+ path.lineTo(c.fX, y);
+ c.fY = y;
+ } break;
+ case 'C':
+ data = find_points(data, points, 3, relative, &c);
+ goto cubicCommon;
+ case 'S':
+ data = find_points(data, &points[1], 2, relative, &c);
+ points[0] = c;
+ if (previousOp == 'C' || previousOp == 'S') {
+ points[0].fX -= lastc.fX - c.fX;
+ points[0].fY -= lastc.fY - c.fY;
+ }
+ cubicCommon:
+ path.cubicTo(points[0], points[1], points[2]);
+ lastc = points[1];
+ c = points[2];
+ break;
+ case 'Q': // Quadratic Bezier Curve
+ data = find_points(data, points, 2, relative, &c);
+ goto quadraticCommon;
+ case 'T':
+ data = find_points(data, &points[1], 1, relative, &c);
+ points[0] = c;
+ if (previousOp == 'Q' || previousOp == 'T') {
+ points[0].fX -= lastc.fX - c.fX;
+ points[0].fY -= lastc.fY - c.fY;
+ }
+ quadraticCommon:
+ path.quadTo(points[0], points[1]);
+ lastc = points[0];
+ c = points[1];
+ break;
+ case 'A': {
+ SkPoint radii;
+ SkScalar angle;
+ bool largeArc, sweep;
+ if ((data = find_points(data, &radii, 1, false, nullptr))
+ && (data = skip_sep(data))
+ && (data = find_scalar(data, &angle, false, 0))
+ && (data = skip_sep(data))
+ && (data = find_flag(data, &largeArc))
+ && (data = skip_sep(data))
+ && (data = find_flag(data, &sweep))
+ && (data = skip_sep(data))
+ && (data = find_points(data, &points[0], 1, relative, &c))) {
+ path.arcTo(radii, angle, (SkPath::ArcSize) largeArc,
+ (SkPathDirection) !sweep, points[0]);
+ path.getLastPt(&c);
+ }
+ } break;
+ case 'Z':
+ path.close();
+ c = first;
+ break;
+ case '~': {
+ SkPoint args[2];
+ data = find_points(data, args, 2, false, nullptr);
+ path.moveTo(args[0].fX, args[0].fY);
+ path.lineTo(args[1].fX, args[1].fY);
+ } break;
+ default:
+ return false;
+ }
+ if (previousOp == 0) {
+ first = c;
+ }
+ previousOp = op;
+ }
+ // we're good, go ahead and swap in the result
+ result->swap(path);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void write_scalar(SkWStream* stream, SkScalar value) {
+ char buffer[64];
+ int len = snprintf(buffer, sizeof(buffer), "%g", value);
+ char* stop = buffer + len;
+ stream->write(buffer, stop - buffer);
+}
+
+SkString SkParsePath::ToSVGString(const SkPath& path, PathEncoding encoding) {
+ SkDynamicMemoryWStream stream;
+
+ SkPoint current_point{0,0};
+ const auto rel_selector = encoding == PathEncoding::Relative;
+
+ const auto append_command = [&](char cmd, const SkPoint pts[], size_t count) {
+ // Use lower case cmds for relative encoding.
+ cmd += 32 * rel_selector;
+ stream.write(&cmd, 1);
+
+ for (size_t i = 0; i < count; ++i) {
+ const auto pt = pts[i] - current_point;
+ if (i > 0) {
+ stream.write(" ", 1);
+ }
+ write_scalar(&stream, pt.fX);
+ stream.write(" ", 1);
+ write_scalar(&stream, pt.fY);
+ }
+
+ SkASSERT(count > 0);
+ // For relative encoding, track the current point (otherwise == origin).
+ current_point = pts[count - 1] * rel_selector;
+ };
+
+ SkPath::Iter iter(path, false);
+ SkPoint pts[4];
+
+ for (;;) {
+ switch (iter.next(pts)) {
+ case SkPath::kConic_Verb: {
+ const SkScalar tol = SK_Scalar1 / 1024; // how close to a quad
+ SkAutoConicToQuads quadder;
+ const SkPoint* quadPts = quadder.computeQuads(pts, iter.conicWeight(), tol);
+ for (int i = 0; i < quadder.countQuads(); ++i) {
+ append_command('Q', &quadPts[i*2 + 1], 2);
+ }
+ } break;
+ case SkPath::kMove_Verb:
+ append_command('M', &pts[0], 1);
+ break;
+ case SkPath::kLine_Verb:
+ append_command('L', &pts[1], 1);
+ break;
+ case SkPath::kQuad_Verb:
+ append_command('Q', &pts[1], 2);
+ break;
+ case SkPath::kCubic_Verb:
+ append_command('C', &pts[1], 3);
+ break;
+ case SkPath::kClose_Verb:
+ stream.write("Z", 1);
+ break;
+ case SkPath::kDone_Verb: {
+ SkString str;
+ str.resize(stream.bytesWritten());
+ stream.copyTo(str.data());
+ return str;
+ }
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/utils/SkPatchUtils.cpp b/gfx/skia/skia/src/utils/SkPatchUtils.cpp
new file mode 100644
index 0000000000..eee7bae4eb
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkPatchUtils.cpp
@@ -0,0 +1,390 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/utils/SkPatchUtils.h"
+
+#include "include/core/SkAlphaType.h"
+#include "include/core/SkColorSpace.h"
+#include "include/core/SkColorType.h"
+#include "include/core/SkImageInfo.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkSize.h"
+#include "include/core/SkTypes.h"
+#include "include/core/SkVertices.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkMath.h"
+#include "include/private/base/SkTPin.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkArenaAlloc.h"
+#include "src/base/SkVx.h"
+#include "src/core/SkColorSpacePriv.h"
+#include "src/core/SkConvertPixels.h"
+#include "src/core/SkGeometry.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <cstring>
+
+namespace {
+ enum CubicCtrlPts {
+ kTopP0_CubicCtrlPts = 0,
+ kTopP1_CubicCtrlPts = 1,
+ kTopP2_CubicCtrlPts = 2,
+ kTopP3_CubicCtrlPts = 3,
+
+ kRightP0_CubicCtrlPts = 3,
+ kRightP1_CubicCtrlPts = 4,
+ kRightP2_CubicCtrlPts = 5,
+ kRightP3_CubicCtrlPts = 6,
+
+ kBottomP0_CubicCtrlPts = 9,
+ kBottomP1_CubicCtrlPts = 8,
+ kBottomP2_CubicCtrlPts = 7,
+ kBottomP3_CubicCtrlPts = 6,
+
+ kLeftP0_CubicCtrlPts = 0,
+ kLeftP1_CubicCtrlPts = 11,
+ kLeftP2_CubicCtrlPts = 10,
+ kLeftP3_CubicCtrlPts = 9,
+ };
+
+ // Enum for corner also clockwise.
+ enum Corner {
+ kTopLeft_Corner = 0,
+ kTopRight_Corner,
+ kBottomRight_Corner,
+ kBottomLeft_Corner
+ };
+} // namespace
+
+/**
+ * Evaluator to sample the values of a cubic bezier using forward differences.
+ * Forward differences is a method for evaluating a nth degree polynomial at a uniform step by only
+ * adding precalculated values.
+ * For a linear example we have the function f(t) = m*t+b, then the value of that function at t+h
+ * would be f(t+h) = m*(t+h)+b. If we want to know the uniform step that we must add to the first
+ * evaluation f(t) then we need to substract f(t+h) - f(t) = m*t + m*h + b - m*t + b = mh. After
+ * obtaining this value (mh) we could just add this constant step to our first sampled point
+ * to compute the next one.
+ *
+ * For the cubic case the first difference gives as a result a quadratic polynomial to which we can
+ * apply again forward differences and get linear function to which we can apply again forward
+ * differences to get a constant difference. This is why we keep an array of size 4, the 0th
+ * position keeps the sampled value while the next ones keep the quadratic, linear and constant
+ * difference values.
+ */
+
+class FwDCubicEvaluator {
+
+public:
+
+ /**
+ * Receives the 4 control points of the cubic bezier.
+ */
+
+ explicit FwDCubicEvaluator(const SkPoint points[4])
+ : fCoefs(points) {
+ memcpy(fPoints, points, 4 * sizeof(SkPoint));
+
+ this->restart(1);
+ }
+
+ /**
+ * Restarts the forward differences evaluator to the first value of t = 0.
+ */
+ void restart(int divisions) {
+ fDivisions = divisions;
+ fCurrent = 0;
+ fMax = fDivisions + 1;
+ skvx::float2 h = 1.f / fDivisions;
+ skvx::float2 h2 = h * h;
+ skvx::float2 h3 = h2 * h;
+ skvx::float2 fwDiff3 = 6 * fCoefs.fA * h3;
+ fFwDiff[3] = to_point(fwDiff3);
+ fFwDiff[2] = to_point(fwDiff3 + times_2(fCoefs.fB) * h2);
+ fFwDiff[1] = to_point(fCoefs.fA * h3 + fCoefs.fB * h2 + fCoefs.fC * h);
+ fFwDiff[0] = to_point(fCoefs.fD);
+ }
+
+ /**
+ * Check if the evaluator is still within the range of 0<=t<=1
+ */
+ bool done() const {
+ return fCurrent > fMax;
+ }
+
+ /**
+ * Call next to obtain the SkPoint sampled and move to the next one.
+ */
+ SkPoint next() {
+ SkPoint point = fFwDiff[0];
+ fFwDiff[0] += fFwDiff[1];
+ fFwDiff[1] += fFwDiff[2];
+ fFwDiff[2] += fFwDiff[3];
+ fCurrent++;
+ return point;
+ }
+
+ const SkPoint* getCtrlPoints() const {
+ return fPoints;
+ }
+
+private:
+ SkCubicCoeff fCoefs;
+ int fMax, fCurrent, fDivisions;
+ SkPoint fFwDiff[4], fPoints[4];
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+// size in pixels of each partition per axis, adjust this knob
+static const int kPartitionSize = 10;
+
+/**
+ * Calculate the approximate arc length given a bezier curve's control points.
+ * Returns -1 if bad calc (i.e. non-finite)
+ */
+static SkScalar approx_arc_length(const SkPoint points[], int count) {
+ if (count < 2) {
+ return 0;
+ }
+ SkScalar arcLength = 0;
+ for (int i = 0; i < count - 1; i++) {
+ arcLength += SkPoint::Distance(points[i], points[i + 1]);
+ }
+ return SkScalarIsFinite(arcLength) ? arcLength : -1;
+}
+
+static SkScalar bilerp(SkScalar tx, SkScalar ty, SkScalar c00, SkScalar c10, SkScalar c01,
+ SkScalar c11) {
+ SkScalar a = c00 * (1.f - tx) + c10 * tx;
+ SkScalar b = c01 * (1.f - tx) + c11 * tx;
+ return a * (1.f - ty) + b * ty;
+}
+
+static skvx::float4 bilerp(SkScalar tx, SkScalar ty,
+ const skvx::float4& c00,
+ const skvx::float4& c10,
+ const skvx::float4& c01,
+ const skvx::float4& c11) {
+ auto a = c00 * (1.f - tx) + c10 * tx;
+ auto b = c01 * (1.f - tx) + c11 * tx;
+ return a * (1.f - ty) + b * ty;
+}
+
+SkISize SkPatchUtils::GetLevelOfDetail(const SkPoint cubics[12], const SkMatrix* matrix) {
+ // Approximate length of each cubic.
+ SkPoint pts[kNumPtsCubic];
+ SkPatchUtils::GetTopCubic(cubics, pts);
+ matrix->mapPoints(pts, kNumPtsCubic);
+ SkScalar topLength = approx_arc_length(pts, kNumPtsCubic);
+
+ SkPatchUtils::GetBottomCubic(cubics, pts);
+ matrix->mapPoints(pts, kNumPtsCubic);
+ SkScalar bottomLength = approx_arc_length(pts, kNumPtsCubic);
+
+ SkPatchUtils::GetLeftCubic(cubics, pts);
+ matrix->mapPoints(pts, kNumPtsCubic);
+ SkScalar leftLength = approx_arc_length(pts, kNumPtsCubic);
+
+ SkPatchUtils::GetRightCubic(cubics, pts);
+ matrix->mapPoints(pts, kNumPtsCubic);
+ SkScalar rightLength = approx_arc_length(pts, kNumPtsCubic);
+
+ if (topLength < 0 || bottomLength < 0 || leftLength < 0 || rightLength < 0) {
+ return {0, 0}; // negative length is a sentinel for bad length (i.e. non-finite)
+ }
+
+ // Level of detail per axis, based on the larger side between top and bottom or left and right
+ int lodX = static_cast<int>(std::max(topLength, bottomLength) / kPartitionSize);
+ int lodY = static_cast<int>(std::max(leftLength, rightLength) / kPartitionSize);
+
+ return SkISize::Make(std::max(8, lodX), std::max(8, lodY));
+}
+
+void SkPatchUtils::GetTopCubic(const SkPoint cubics[12], SkPoint points[4]) {
+ points[0] = cubics[kTopP0_CubicCtrlPts];
+ points[1] = cubics[kTopP1_CubicCtrlPts];
+ points[2] = cubics[kTopP2_CubicCtrlPts];
+ points[3] = cubics[kTopP3_CubicCtrlPts];
+}
+
+void SkPatchUtils::GetBottomCubic(const SkPoint cubics[12], SkPoint points[4]) {
+ points[0] = cubics[kBottomP0_CubicCtrlPts];
+ points[1] = cubics[kBottomP1_CubicCtrlPts];
+ points[2] = cubics[kBottomP2_CubicCtrlPts];
+ points[3] = cubics[kBottomP3_CubicCtrlPts];
+}
+
+void SkPatchUtils::GetLeftCubic(const SkPoint cubics[12], SkPoint points[4]) {
+ points[0] = cubics[kLeftP0_CubicCtrlPts];
+ points[1] = cubics[kLeftP1_CubicCtrlPts];
+ points[2] = cubics[kLeftP2_CubicCtrlPts];
+ points[3] = cubics[kLeftP3_CubicCtrlPts];
+}
+
+void SkPatchUtils::GetRightCubic(const SkPoint cubics[12], SkPoint points[4]) {
+ points[0] = cubics[kRightP0_CubicCtrlPts];
+ points[1] = cubics[kRightP1_CubicCtrlPts];
+ points[2] = cubics[kRightP2_CubicCtrlPts];
+ points[3] = cubics[kRightP3_CubicCtrlPts];
+}
+
+static void skcolor_to_float(SkPMColor4f* dst, const SkColor* src, int count, SkColorSpace* dstCS) {
+ SkImageInfo srcInfo = SkImageInfo::Make(count, 1, kBGRA_8888_SkColorType,
+ kUnpremul_SkAlphaType, SkColorSpace::MakeSRGB());
+ SkImageInfo dstInfo = SkImageInfo::Make(count, 1, kRGBA_F32_SkColorType,
+ kPremul_SkAlphaType, sk_ref_sp(dstCS));
+ SkAssertResult(SkConvertPixels(dstInfo, dst, 0, srcInfo, src, 0));
+}
+
+static void float_to_skcolor(SkColor* dst, const SkPMColor4f* src, int count, SkColorSpace* srcCS) {
+ SkImageInfo srcInfo = SkImageInfo::Make(count, 1, kRGBA_F32_SkColorType,
+ kPremul_SkAlphaType, sk_ref_sp(srcCS));
+ SkImageInfo dstInfo = SkImageInfo::Make(count, 1, kBGRA_8888_SkColorType,
+ kUnpremul_SkAlphaType, SkColorSpace::MakeSRGB());
+ SkAssertResult(SkConvertPixels(dstInfo, dst, 0, srcInfo, src, 0));
+}
+
+sk_sp<SkVertices> SkPatchUtils::MakeVertices(const SkPoint cubics[12], const SkColor srcColors[4],
+ const SkPoint srcTexCoords[4], int lodX, int lodY,
+ SkColorSpace* colorSpace) {
+ if (lodX < 1 || lodY < 1 || nullptr == cubics) {
+ return nullptr;
+ }
+
+ // check for overflow in multiplication
+ const int64_t lodX64 = (lodX + 1),
+ lodY64 = (lodY + 1),
+ mult64 = lodX64 * lodY64;
+ if (mult64 > SK_MaxS32) {
+ return nullptr;
+ }
+
+ // Treat null interpolation space as sRGB.
+ if (!colorSpace) {
+ colorSpace = sk_srgb_singleton();
+ }
+
+ int vertexCount = SkToS32(mult64);
+ // it is recommended to generate draw calls of no more than 65536 indices, so we never generate
+ // more than 60000 indices. To accomplish that we resize the LOD and vertex count
+ if (vertexCount > 10000 || lodX > 200 || lodY > 200) {
+ float weightX = static_cast<float>(lodX) / (lodX + lodY);
+ float weightY = static_cast<float>(lodY) / (lodX + lodY);
+
+ // 200 comes from the 100 * 2 which is the max value of vertices because of the limit of
+ // 60000 indices ( sqrt(60000 / 6) that comes from data->fIndexCount = lodX * lodY * 6)
+ // Need a min of 1 since we later divide by lod
+ lodX = std::max(1, sk_float_floor2int_no_saturate(weightX * 200));
+ lodY = std::max(1, sk_float_floor2int_no_saturate(weightY * 200));
+ vertexCount = (lodX + 1) * (lodY + 1);
+ }
+ const int indexCount = lodX * lodY * 6;
+ uint32_t flags = 0;
+ if (srcTexCoords) {
+ flags |= SkVertices::kHasTexCoords_BuilderFlag;
+ }
+ if (srcColors) {
+ flags |= SkVertices::kHasColors_BuilderFlag;
+ }
+
+ SkSTArenaAlloc<2048> alloc;
+ SkPMColor4f* cornerColors = srcColors ? alloc.makeArray<SkPMColor4f>(4) : nullptr;
+ SkPMColor4f* tmpColors = srcColors ? alloc.makeArray<SkPMColor4f>(vertexCount) : nullptr;
+
+ SkVertices::Builder builder(SkVertices::kTriangles_VertexMode, vertexCount, indexCount, flags);
+ SkPoint* pos = builder.positions();
+ SkPoint* texs = builder.texCoords();
+ uint16_t* indices = builder.indices();
+
+ if (cornerColors) {
+ skcolor_to_float(cornerColors, srcColors, kNumCorners, colorSpace);
+ }
+
+ SkPoint pts[kNumPtsCubic];
+ SkPatchUtils::GetBottomCubic(cubics, pts);
+ FwDCubicEvaluator fBottom(pts);
+ SkPatchUtils::GetTopCubic(cubics, pts);
+ FwDCubicEvaluator fTop(pts);
+ SkPatchUtils::GetLeftCubic(cubics, pts);
+ FwDCubicEvaluator fLeft(pts);
+ SkPatchUtils::GetRightCubic(cubics, pts);
+ FwDCubicEvaluator fRight(pts);
+
+ fBottom.restart(lodX);
+ fTop.restart(lodX);
+
+ SkScalar u = 0.0f;
+ int stride = lodY + 1;
+ for (int x = 0; x <= lodX; x++) {
+ SkPoint bottom = fBottom.next(), top = fTop.next();
+ fLeft.restart(lodY);
+ fRight.restart(lodY);
+ SkScalar v = 0.f;
+ for (int y = 0; y <= lodY; y++) {
+ int dataIndex = x * (lodY + 1) + y;
+
+ SkPoint left = fLeft.next(), right = fRight.next();
+
+ SkPoint s0 = SkPoint::Make((1.0f - v) * top.x() + v * bottom.x(),
+ (1.0f - v) * top.y() + v * bottom.y());
+ SkPoint s1 = SkPoint::Make((1.0f - u) * left.x() + u * right.x(),
+ (1.0f - u) * left.y() + u * right.y());
+ SkPoint s2 = SkPoint::Make(
+ (1.0f - v) * ((1.0f - u) * fTop.getCtrlPoints()[0].x()
+ + u * fTop.getCtrlPoints()[3].x())
+ + v * ((1.0f - u) * fBottom.getCtrlPoints()[0].x()
+ + u * fBottom.getCtrlPoints()[3].x()),
+ (1.0f - v) * ((1.0f - u) * fTop.getCtrlPoints()[0].y()
+ + u * fTop.getCtrlPoints()[3].y())
+ + v * ((1.0f - u) * fBottom.getCtrlPoints()[0].y()
+ + u * fBottom.getCtrlPoints()[3].y()));
+ pos[dataIndex] = s0 + s1 - s2;
+
+ if (cornerColors) {
+ bilerp(u, v, skvx::float4::Load(cornerColors[kTopLeft_Corner].vec()),
+ skvx::float4::Load(cornerColors[kTopRight_Corner].vec()),
+ skvx::float4::Load(cornerColors[kBottomLeft_Corner].vec()),
+ skvx::float4::Load(cornerColors[kBottomRight_Corner].vec()))
+ .store(tmpColors[dataIndex].vec());
+ }
+
+ if (texs) {
+ texs[dataIndex] = SkPoint::Make(bilerp(u, v, srcTexCoords[kTopLeft_Corner].x(),
+ srcTexCoords[kTopRight_Corner].x(),
+ srcTexCoords[kBottomLeft_Corner].x(),
+ srcTexCoords[kBottomRight_Corner].x()),
+ bilerp(u, v, srcTexCoords[kTopLeft_Corner].y(),
+ srcTexCoords[kTopRight_Corner].y(),
+ srcTexCoords[kBottomLeft_Corner].y(),
+ srcTexCoords[kBottomRight_Corner].y()));
+
+ }
+
+ if(x < lodX && y < lodY) {
+ int i = 6 * (x * lodY + y);
+ indices[i] = x * stride + y;
+ indices[i + 1] = x * stride + 1 + y;
+ indices[i + 2] = (x + 1) * stride + 1 + y;
+ indices[i + 3] = indices[i];
+ indices[i + 4] = indices[i + 2];
+ indices[i + 5] = (x + 1) * stride + y;
+ }
+ v = SkTPin(v + 1.f / lodY, 0.0f, 1.0f);
+ }
+ u = SkTPin(u + 1.f / lodX, 0.0f, 1.0f);
+ }
+
+ if (tmpColors) {
+ float_to_skcolor(builder.colors(), tmpColors, vertexCount, colorSpace);
+ }
+ return builder.detach();
+}
diff --git a/gfx/skia/skia/src/utils/SkPatchUtils.h b/gfx/skia/skia/src/utils/SkPatchUtils.h
new file mode 100644
index 0000000000..6b89d84d9e
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkPatchUtils.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPatchUtils_DEFINED
+#define SkPatchUtils_DEFINED
+
+#include "include/core/SkColor.h"
+#include "include/core/SkRefCnt.h"
+
+class SkColorSpace;
+class SkMatrix;
+class SkVertices;
+struct SkISize;
+struct SkPoint;
+
+class SkPatchUtils {
+
+public:
+ // Enums for control points based on the order specified in the constructor (clockwise).
+ enum {
+ kNumCtrlPts = 12,
+ kNumCorners = 4,
+ kNumPtsCubic = 4
+ };
+
+ /**
+ * Get the points corresponding to the top cubic of cubics.
+ */
+ static void GetTopCubic(const SkPoint cubics[12], SkPoint points[4]);
+
+ /**
+ * Get the points corresponding to the bottom cubic of cubics.
+ */
+ static void GetBottomCubic(const SkPoint cubics[12], SkPoint points[4]);
+
+ /**
+ * Get the points corresponding to the left cubic of cubics.
+ */
+ static void GetLeftCubic(const SkPoint cubics[12], SkPoint points[4]);
+
+ /**
+ * Get the points corresponding to the right cubic of cubics.
+ */
+ static void GetRightCubic(const SkPoint cubics[12], SkPoint points[4]);
+
+ /**
+ * Method that calculates a level of detail (number of subdivisions) for a patch in both axis.
+ */
+ static SkISize GetLevelOfDetail(const SkPoint cubics[12], const SkMatrix* matrix);
+
+ static sk_sp<SkVertices> MakeVertices(const SkPoint cubics[12], const SkColor colors[4],
+ const SkPoint texCoords[4], int lodX, int lodY,
+ SkColorSpace* colorSpace = nullptr);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkPolyUtils.cpp b/gfx/skia/skia/src/utils/SkPolyUtils.cpp
new file mode 100644
index 0000000000..334fa7576b
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkPolyUtils.cpp
@@ -0,0 +1,1774 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/utils/SkPolyUtils.h"
+
+#include "include/core/SkRect.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkDebug.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkMalloc.h"
+#include "include/private/base/SkTArray.h"
+#include "include/private/base/SkTDArray.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/base/SkTDPQueue.h"
+#include "src/base/SkTInternalLList.h"
+#include "src/base/SkVx.h"
+#include "src/core/SkPointPriv.h"
+#include "src/core/SkRectPriv.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <limits>
+#include <new>
+
+using namespace skia_private;
+
+#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
+
+//////////////////////////////////////////////////////////////////////////////////
+// Helper data structures and functions
+
+struct OffsetSegment {
+ SkPoint fP0;
+ SkVector fV;
+};
+
+constexpr SkScalar kCrossTolerance = SK_ScalarNearlyZero * SK_ScalarNearlyZero;
+
+// Computes perpDot for point p compared to segment defined by origin p0 and vector v.
+// A positive value means the point is to the left of the segment,
+// negative is to the right, 0 is collinear.
+static int compute_side(const SkPoint& p0, const SkVector& v, const SkPoint& p) {
+ SkVector w = p - p0;
+ SkScalar perpDot = v.cross(w);
+ if (!SkScalarNearlyZero(perpDot, kCrossTolerance)) {
+ return ((perpDot > 0) ? 1 : -1);
+ }
+
+ return 0;
+}
+
+// Returns 1 for cw, -1 for ccw and 0 if zero signed area (either degenerate or self-intersecting)
+int SkGetPolygonWinding(const SkPoint* polygonVerts, int polygonSize) {
+ if (polygonSize < 3) {
+ return 0;
+ }
+
+ // compute area and use sign to determine winding
+ SkScalar quadArea = 0;
+ SkVector v0 = polygonVerts[1] - polygonVerts[0];
+ for (int curr = 2; curr < polygonSize; ++curr) {
+ SkVector v1 = polygonVerts[curr] - polygonVerts[0];
+ quadArea += v0.cross(v1);
+ v0 = v1;
+ }
+ if (SkScalarNearlyZero(quadArea, kCrossTolerance)) {
+ return 0;
+ }
+ // 1 == ccw, -1 == cw
+ return (quadArea > 0) ? 1 : -1;
+}
+
+// Compute difference vector to offset p0-p1 'offset' units in direction specified by 'side'
+bool compute_offset_vector(const SkPoint& p0, const SkPoint& p1, SkScalar offset, int side,
+ SkPoint* vector) {
+ SkASSERT(side == -1 || side == 1);
+ // if distances are equal, can just outset by the perpendicular
+ SkVector perp = SkVector::Make(p0.fY - p1.fY, p1.fX - p0.fX);
+ if (!perp.setLength(offset*side)) {
+ return false;
+ }
+ *vector = perp;
+ return true;
+}
+
+// check interval to see if intersection is in segment
+static inline bool outside_interval(SkScalar numer, SkScalar denom, bool denomPositive) {
+ return (denomPositive && (numer < 0 || numer > denom)) ||
+ (!denomPositive && (numer > 0 || numer < denom));
+}
+
+// special zero-length test when we're using vdotv as a denominator
+static inline bool zero_length(const SkPoint& v, SkScalar vdotv) {
+ return !(SkScalarsAreFinite(v.fX, v.fY) && vdotv);
+}
+
+// Compute the intersection 'p' between segments s0 and s1, if any.
+// 's' is the parametric value for the intersection along 's0' & 't' is the same for 's1'.
+// Returns false if there is no intersection.
+// If the length squared of a segment is 0, then we treat the segment as degenerate
+// and use only the first endpoint for tests.
+static bool compute_intersection(const OffsetSegment& s0, const OffsetSegment& s1,
+ SkPoint* p, SkScalar* s, SkScalar* t) {
+ const SkVector& v0 = s0.fV;
+ const SkVector& v1 = s1.fV;
+ SkVector w = s1.fP0 - s0.fP0;
+ SkScalar denom = v0.cross(v1);
+ bool denomPositive = (denom > 0);
+ SkScalar sNumer, tNumer;
+ if (SkScalarNearlyZero(denom, kCrossTolerance)) {
+ // segments are parallel, but not collinear
+ if (!SkScalarNearlyZero(w.cross(v0), kCrossTolerance) ||
+ !SkScalarNearlyZero(w.cross(v1), kCrossTolerance)) {
+ return false;
+ }
+
+ // Check for zero-length segments
+ SkScalar v0dotv0 = v0.dot(v0);
+ if (zero_length(v0, v0dotv0)) {
+ // Both are zero-length
+ SkScalar v1dotv1 = v1.dot(v1);
+ if (zero_length(v1, v1dotv1)) {
+ // Check if they're the same point
+ if (!SkPointPriv::CanNormalize(w.fX, w.fY)) {
+ *p = s0.fP0;
+ *s = 0;
+ *t = 0;
+ return true;
+ } else {
+ // Intersection is indeterminate
+ return false;
+ }
+ }
+ // Otherwise project segment0's origin onto segment1
+ tNumer = v1.dot(-w);
+ denom = v1dotv1;
+ if (outside_interval(tNumer, denom, true)) {
+ return false;
+ }
+ sNumer = 0;
+ } else {
+ // Project segment1's endpoints onto segment0
+ sNumer = v0.dot(w);
+ denom = v0dotv0;
+ tNumer = 0;
+ if (outside_interval(sNumer, denom, true)) {
+ // The first endpoint doesn't lie on segment0
+ // If segment1 is degenerate, then there's no collision
+ SkScalar v1dotv1 = v1.dot(v1);
+ if (zero_length(v1, v1dotv1)) {
+ return false;
+ }
+
+ // Otherwise try the other one
+ SkScalar oldSNumer = sNumer;
+ sNumer = v0.dot(w + v1);
+ tNumer = denom;
+ if (outside_interval(sNumer, denom, true)) {
+ // it's possible that segment1's interval surrounds segment0
+ // this is false if params have the same signs, and in that case no collision
+ if (sNumer*oldSNumer > 0) {
+ return false;
+ }
+ // otherwise project segment0's endpoint onto segment1 instead
+ sNumer = 0;
+ tNumer = v1.dot(-w);
+ denom = v1dotv1;
+ }
+ }
+ }
+ } else {
+ sNumer = w.cross(v1);
+ if (outside_interval(sNumer, denom, denomPositive)) {
+ return false;
+ }
+ tNumer = w.cross(v0);
+ if (outside_interval(tNumer, denom, denomPositive)) {
+ return false;
+ }
+ }
+
+ SkScalar localS = sNumer/denom;
+ SkScalar localT = tNumer/denom;
+
+ *p = s0.fP0 + v0*localS;
+ *s = localS;
+ *t = localT;
+
+ return true;
+}
+
+bool SkIsConvexPolygon(const SkPoint* polygonVerts, int polygonSize) {
+ if (polygonSize < 3) {
+ return false;
+ }
+
+ SkScalar lastPerpDot = 0;
+ int xSignChangeCount = 0;
+ int ySignChangeCount = 0;
+
+ int prevIndex = polygonSize - 1;
+ int currIndex = 0;
+ int nextIndex = 1;
+ SkVector v0 = polygonVerts[currIndex] - polygonVerts[prevIndex];
+ SkScalar lastVx = v0.fX;
+ SkScalar lastVy = v0.fY;
+ SkVector v1 = polygonVerts[nextIndex] - polygonVerts[currIndex];
+ for (int i = 0; i < polygonSize; ++i) {
+ if (!polygonVerts[i].isFinite()) {
+ return false;
+ }
+
+ // Check that winding direction is always the same (otherwise we have a reflex vertex)
+ SkScalar perpDot = v0.cross(v1);
+ if (lastPerpDot*perpDot < 0) {
+ return false;
+ }
+ if (0 != perpDot) {
+ lastPerpDot = perpDot;
+ }
+
+ // Check that the signs of the edge vectors don't change more than twice per coordinate
+ if (lastVx*v1.fX < 0) {
+ xSignChangeCount++;
+ }
+ if (lastVy*v1.fY < 0) {
+ ySignChangeCount++;
+ }
+ if (xSignChangeCount > 2 || ySignChangeCount > 2) {
+ return false;
+ }
+ prevIndex = currIndex;
+ currIndex = nextIndex;
+ nextIndex = (currIndex + 1) % polygonSize;
+ if (v1.fX != 0) {
+ lastVx = v1.fX;
+ }
+ if (v1.fY != 0) {
+ lastVy = v1.fY;
+ }
+ v0 = v1;
+ v1 = polygonVerts[nextIndex] - polygonVerts[currIndex];
+ }
+
+ return true;
+}
+
+struct OffsetEdge {
+ OffsetEdge* fPrev;
+ OffsetEdge* fNext;
+ OffsetSegment fOffset;
+ SkPoint fIntersection;
+ SkScalar fTValue;
+ uint16_t fIndex;
+ uint16_t fEnd;
+
+ void init(uint16_t start = 0, uint16_t end = 0) {
+ fIntersection = fOffset.fP0;
+ fTValue = SK_ScalarMin;
+ fIndex = start;
+ fEnd = end;
+ }
+
+ // special intersection check that looks for endpoint intersection
+ bool checkIntersection(const OffsetEdge* that,
+ SkPoint* p, SkScalar* s, SkScalar* t) {
+ if (this->fEnd == that->fIndex) {
+ SkPoint p1 = this->fOffset.fP0 + this->fOffset.fV;
+ if (SkPointPriv::EqualsWithinTolerance(p1, that->fOffset.fP0)) {
+ *p = p1;
+ *s = SK_Scalar1;
+ *t = 0;
+ return true;
+ }
+ }
+
+ return compute_intersection(this->fOffset, that->fOffset, p, s, t);
+ }
+
+ // computes the line intersection and then the "distance" from that to this
+ // this is really a signed squared distance, where negative means that
+ // the intersection lies inside this->fOffset
+ SkScalar computeCrossingDistance(const OffsetEdge* that) {
+ const OffsetSegment& s0 = this->fOffset;
+ const OffsetSegment& s1 = that->fOffset;
+ const SkVector& v0 = s0.fV;
+ const SkVector& v1 = s1.fV;
+
+ SkScalar denom = v0.cross(v1);
+ if (SkScalarNearlyZero(denom, kCrossTolerance)) {
+ // segments are parallel
+ return SK_ScalarMax;
+ }
+
+ SkVector w = s1.fP0 - s0.fP0;
+ SkScalar localS = w.cross(v1) / denom;
+ if (localS < 0) {
+ localS = -localS;
+ } else {
+ localS -= SK_Scalar1;
+ }
+
+ localS *= SkScalarAbs(localS);
+ localS *= v0.dot(v0);
+
+ return localS;
+ }
+
+};
+
+static void remove_node(const OffsetEdge* node, OffsetEdge** head) {
+ // remove from linked list
+ node->fPrev->fNext = node->fNext;
+ node->fNext->fPrev = node->fPrev;
+ if (node == *head) {
+ *head = (node->fNext == node) ? nullptr : node->fNext;
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////
+
+// The objective here is to inset all of the edges by the given distance, and then
+// remove any invalid inset edges by detecting right-hand turns. In a ccw polygon,
+// we should only be making left-hand turns (for cw polygons, we use the winding
+// parameter to reverse this). We detect this by checking whether the second intersection
+// on an edge is closer to its tail than the first one.
+//
+// We might also have the case that there is no intersection between two neighboring inset edges.
+// In this case, one edge will lie to the right of the other and should be discarded along with
+// its previous intersection (if any).
+//
+// Note: the assumption is that inputPolygon is convex and has no coincident points.
+//
+bool SkInsetConvexPolygon(const SkPoint* inputPolygonVerts, int inputPolygonSize,
+ SkScalar inset, SkTDArray<SkPoint>* insetPolygon) {
+ if (inputPolygonSize < 3) {
+ return false;
+ }
+
+ // restrict this to match other routines
+ // practically we don't want anything bigger than this anyway
+ if (inputPolygonSize > std::numeric_limits<uint16_t>::max()) {
+ return false;
+ }
+
+ // can't inset by a negative or non-finite amount
+ if (inset < -SK_ScalarNearlyZero || !SkScalarIsFinite(inset)) {
+ return false;
+ }
+
+ // insetting close to zero just returns the original poly
+ if (inset <= SK_ScalarNearlyZero) {
+ for (int i = 0; i < inputPolygonSize; ++i) {
+ *insetPolygon->append() = inputPolygonVerts[i];
+ }
+ return true;
+ }
+
+ // get winding direction
+ int winding = SkGetPolygonWinding(inputPolygonVerts, inputPolygonSize);
+ if (0 == winding) {
+ return false;
+ }
+
+ // set up
+ AutoSTMalloc<64, OffsetEdge> edgeData(inputPolygonSize);
+ int prev = inputPolygonSize - 1;
+ for (int curr = 0; curr < inputPolygonSize; prev = curr, ++curr) {
+ int next = (curr + 1) % inputPolygonSize;
+ if (!inputPolygonVerts[curr].isFinite()) {
+ return false;
+ }
+ // check for convexity just to be sure
+ if (compute_side(inputPolygonVerts[prev], inputPolygonVerts[curr] - inputPolygonVerts[prev],
+ inputPolygonVerts[next])*winding < 0) {
+ return false;
+ }
+ SkVector v = inputPolygonVerts[next] - inputPolygonVerts[curr];
+ SkVector perp = SkVector::Make(-v.fY, v.fX);
+ perp.setLength(inset*winding);
+ edgeData[curr].fPrev = &edgeData[prev];
+ edgeData[curr].fNext = &edgeData[next];
+ edgeData[curr].fOffset.fP0 = inputPolygonVerts[curr] + perp;
+ edgeData[curr].fOffset.fV = v;
+ edgeData[curr].init();
+ }
+
+ OffsetEdge* head = &edgeData[0];
+ OffsetEdge* currEdge = head;
+ OffsetEdge* prevEdge = currEdge->fPrev;
+ int insetVertexCount = inputPolygonSize;
+ unsigned int iterations = 0;
+ unsigned int maxIterations = inputPolygonSize * inputPolygonSize;
+ while (head && prevEdge != currEdge) {
+ ++iterations;
+ // we should check each edge against each other edge at most once
+ if (iterations > maxIterations) {
+ return false;
+ }
+
+ SkScalar s, t;
+ SkPoint intersection;
+ if (compute_intersection(prevEdge->fOffset, currEdge->fOffset,
+ &intersection, &s, &t)) {
+ // if new intersection is further back on previous inset from the prior intersection
+ if (s < prevEdge->fTValue) {
+ // no point in considering this one again
+ remove_node(prevEdge, &head);
+ --insetVertexCount;
+ // go back one segment
+ prevEdge = prevEdge->fPrev;
+ // we've already considered this intersection, we're done
+ } else if (currEdge->fTValue > SK_ScalarMin &&
+ SkPointPriv::EqualsWithinTolerance(intersection,
+ currEdge->fIntersection,
+ 1.0e-6f)) {
+ break;
+ } else {
+ // add intersection
+ currEdge->fIntersection = intersection;
+ currEdge->fTValue = t;
+
+ // go to next segment
+ prevEdge = currEdge;
+ currEdge = currEdge->fNext;
+ }
+ } else {
+ // if prev to right side of curr
+ int side = winding*compute_side(currEdge->fOffset.fP0,
+ currEdge->fOffset.fV,
+ prevEdge->fOffset.fP0);
+ if (side < 0 &&
+ side == winding*compute_side(currEdge->fOffset.fP0,
+ currEdge->fOffset.fV,
+ prevEdge->fOffset.fP0 + prevEdge->fOffset.fV)) {
+ // no point in considering this one again
+ remove_node(prevEdge, &head);
+ --insetVertexCount;
+ // go back one segment
+ prevEdge = prevEdge->fPrev;
+ } else {
+ // move to next segment
+ remove_node(currEdge, &head);
+ --insetVertexCount;
+ currEdge = currEdge->fNext;
+ }
+ }
+ }
+
+ // store all the valid intersections that aren't nearly coincident
+ // TODO: look at the main algorithm and see if we can detect these better
+ insetPolygon->reset();
+ if (!head) {
+ return false;
+ }
+
+ static constexpr SkScalar kCleanupTolerance = 0.01f;
+ if (insetVertexCount >= 0) {
+ insetPolygon->reserve(insetVertexCount);
+ }
+ int currIndex = 0;
+ *insetPolygon->append() = head->fIntersection;
+ currEdge = head->fNext;
+ while (currEdge != head) {
+ if (!SkPointPriv::EqualsWithinTolerance(currEdge->fIntersection,
+ (*insetPolygon)[currIndex],
+ kCleanupTolerance)) {
+ *insetPolygon->append() = currEdge->fIntersection;
+ currIndex++;
+ }
+ currEdge = currEdge->fNext;
+ }
+ // make sure the first and last points aren't coincident
+ if (currIndex >= 1 &&
+ SkPointPriv::EqualsWithinTolerance((*insetPolygon)[0], (*insetPolygon)[currIndex],
+ kCleanupTolerance)) {
+ insetPolygon->pop_back();
+ }
+
+ return SkIsConvexPolygon(insetPolygon->begin(), insetPolygon->size());
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+// compute the number of points needed for a circular join when offsetting a reflex vertex
+bool SkComputeRadialSteps(const SkVector& v1, const SkVector& v2, SkScalar offset,
+ SkScalar* rotSin, SkScalar* rotCos, int* n) {
+ const SkScalar kRecipPixelsPerArcSegment = 0.25f;
+
+ SkScalar rCos = v1.dot(v2);
+ if (!SkScalarIsFinite(rCos)) {
+ return false;
+ }
+ SkScalar rSin = v1.cross(v2);
+ if (!SkScalarIsFinite(rSin)) {
+ return false;
+ }
+ SkScalar theta = SkScalarATan2(rSin, rCos);
+
+ SkScalar floatSteps = SkScalarAbs(offset*theta*kRecipPixelsPerArcSegment);
+ // limit the number of steps to at most max uint16_t (that's all we can index)
+ // knock one value off the top to account for rounding
+ if (floatSteps >= std::numeric_limits<uint16_t>::max()) {
+ return false;
+ }
+ int steps = SkScalarRoundToInt(floatSteps);
+
+ SkScalar dTheta = steps > 0 ? theta / steps : 0;
+ *rotSin = SkScalarSin(dTheta);
+ *rotCos = SkScalarCos(dTheta);
+ // Our offset may be so large that we end up with a tiny dTheta, in which case we
+ // lose precision when computing rotSin and rotCos.
+ if (steps > 0 && (*rotSin == 0 || *rotCos == 1)) {
+ return false;
+ }
+ *n = steps;
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+// a point is "left" to another if its x-coord is less, or if equal, its y-coord is greater
+static bool left(const SkPoint& p0, const SkPoint& p1) {
+ return p0.fX < p1.fX || (!(p0.fX > p1.fX) && p0.fY > p1.fY);
+}
+
+// a point is "right" to another if its x-coord is greater, or if equal, its y-coord is less
+static bool right(const SkPoint& p0, const SkPoint& p1) {
+ return p0.fX > p1.fX || (!(p0.fX < p1.fX) && p0.fY < p1.fY);
+}
+
+struct Vertex {
+ static bool Left(const Vertex& qv0, const Vertex& qv1) {
+ return left(qv0.fPosition, qv1.fPosition);
+ }
+
+ // packed to fit into 16 bytes (one cache line)
+ SkPoint fPosition;
+ uint16_t fIndex; // index in unsorted polygon
+ uint16_t fPrevIndex; // indices for previous and next vertex in unsorted polygon
+ uint16_t fNextIndex;
+ uint16_t fFlags;
+};
+
+enum VertexFlags {
+ kPrevLeft_VertexFlag = 0x1,
+ kNextLeft_VertexFlag = 0x2,
+};
+
+struct ActiveEdge {
+ ActiveEdge() : fChild{ nullptr, nullptr }, fAbove(nullptr), fBelow(nullptr), fRed(false) {}
+ ActiveEdge(const SkPoint& p0, const SkVector& v, uint16_t index0, uint16_t index1)
+ : fSegment({ p0, v })
+ , fIndex0(index0)
+ , fIndex1(index1)
+ , fAbove(nullptr)
+ , fBelow(nullptr)
+ , fRed(true) {
+ fChild[0] = nullptr;
+ fChild[1] = nullptr;
+ }
+
+ // Returns true if "this" is above "that", assuming this->p0 is to the left of that->p0
+ // This is only used to verify the edgelist -- the actual test for insertion/deletion is much
+ // simpler because we can make certain assumptions then.
+ bool aboveIfLeft(const ActiveEdge* that) const {
+ const SkPoint& p0 = this->fSegment.fP0;
+ const SkPoint& q0 = that->fSegment.fP0;
+ SkASSERT(p0.fX <= q0.fX);
+ SkVector d = q0 - p0;
+ const SkVector& v = this->fSegment.fV;
+ const SkVector& w = that->fSegment.fV;
+ // The idea here is that if the vector between the origins of the two segments (d)
+ // rotates counterclockwise up to the vector representing the "this" segment (v),
+ // then we know that "this" is above "that". If the result is clockwise we say it's below.
+ if (this->fIndex0 != that->fIndex0) {
+ SkScalar cross = d.cross(v);
+ if (cross > kCrossTolerance) {
+ return true;
+ } else if (cross < -kCrossTolerance) {
+ return false;
+ }
+ } else if (this->fIndex1 == that->fIndex1) {
+ return false;
+ }
+ // At this point either the two origins are nearly equal or the origin of "that"
+ // lies on dv. So then we try the same for the vector from the tail of "this"
+ // to the head of "that". Again, ccw means "this" is above "that".
+ // d = that.P1 - this.P0
+ // = that.fP0 + that.fV - this.fP0
+ // = that.fP0 - this.fP0 + that.fV
+ // = old_d + that.fV
+ d += w;
+ SkScalar cross = d.cross(v);
+ if (cross > kCrossTolerance) {
+ return true;
+ } else if (cross < -kCrossTolerance) {
+ return false;
+ }
+ // If the previous check fails, the two segments are nearly collinear
+ // First check y-coord of first endpoints
+ if (p0.fX < q0.fX) {
+ return (p0.fY >= q0.fY);
+ } else if (p0.fY > q0.fY) {
+ return true;
+ } else if (p0.fY < q0.fY) {
+ return false;
+ }
+ // The first endpoints are the same, so check the other endpoint
+ SkPoint p1 = p0 + v;
+ SkPoint q1 = q0 + w;
+ if (p1.fX < q1.fX) {
+ return (p1.fY >= q1.fY);
+ } else {
+ return (p1.fY > q1.fY);
+ }
+ }
+
+ // same as leftAndAbove(), but generalized
+ bool above(const ActiveEdge* that) const {
+ const SkPoint& p0 = this->fSegment.fP0;
+ const SkPoint& q0 = that->fSegment.fP0;
+ if (right(p0, q0)) {
+ return !that->aboveIfLeft(this);
+ } else {
+ return this->aboveIfLeft(that);
+ }
+ }
+
+ bool intersect(const SkPoint& q0, const SkVector& w, uint16_t index0, uint16_t index1) const {
+ // check first to see if these edges are neighbors in the polygon
+ if (this->fIndex0 == index0 || this->fIndex1 == index0 ||
+ this->fIndex0 == index1 || this->fIndex1 == index1) {
+ return false;
+ }
+
+ // We don't need the exact intersection point so we can do a simpler test here.
+ const SkPoint& p0 = this->fSegment.fP0;
+ const SkVector& v = this->fSegment.fV;
+ SkPoint p1 = p0 + v;
+ SkPoint q1 = q0 + w;
+
+ // We assume some x-overlap due to how the edgelist works
+ // This allows us to simplify our test
+ // We need some slop here because storing the vector and recomputing the second endpoint
+ // doesn't necessary give us the original result in floating point.
+ // TODO: Store vector as double? Store endpoint as well?
+ SkASSERT(q0.fX <= p1.fX + SK_ScalarNearlyZero);
+
+ // if each segment straddles the other (i.e., the endpoints have different sides)
+ // then they intersect
+ bool result;
+ if (p0.fX < q0.fX) {
+ if (q1.fX < p1.fX) {
+ result = (compute_side(p0, v, q0)*compute_side(p0, v, q1) < 0);
+ } else {
+ result = (compute_side(p0, v, q0)*compute_side(q0, w, p1) > 0);
+ }
+ } else {
+ if (p1.fX < q1.fX) {
+ result = (compute_side(q0, w, p0)*compute_side(q0, w, p1) < 0);
+ } else {
+ result = (compute_side(q0, w, p0)*compute_side(p0, v, q1) > 0);
+ }
+ }
+ return result;
+ }
+
+ bool intersect(const ActiveEdge* edge) {
+ return this->intersect(edge->fSegment.fP0, edge->fSegment.fV, edge->fIndex0, edge->fIndex1);
+ }
+
+ bool lessThan(const ActiveEdge* that) const {
+ SkASSERT(!this->above(this));
+ SkASSERT(!that->above(that));
+ SkASSERT(!(this->above(that) && that->above(this)));
+ return this->above(that);
+ }
+
+ bool equals(uint16_t index0, uint16_t index1) const {
+ return (this->fIndex0 == index0 && this->fIndex1 == index1);
+ }
+
+ OffsetSegment fSegment;
+ uint16_t fIndex0; // indices for previous and next vertex in polygon
+ uint16_t fIndex1;
+ ActiveEdge* fChild[2];
+ ActiveEdge* fAbove;
+ ActiveEdge* fBelow;
+ int32_t fRed;
+};
+
+class ActiveEdgeList {
+public:
+ ActiveEdgeList(int maxEdges) {
+ fAllocation = (char*) sk_malloc_throw(sizeof(ActiveEdge)*maxEdges);
+ fCurrFree = 0;
+ fMaxFree = maxEdges;
+ }
+ ~ActiveEdgeList() {
+ fTreeHead.fChild[1] = nullptr;
+ sk_free(fAllocation);
+ }
+
+ bool insert(const SkPoint& p0, const SkPoint& p1, uint16_t index0, uint16_t index1) {
+ SkVector v = p1 - p0;
+ if (!v.isFinite()) {
+ return false;
+ }
+ // empty tree case -- easy
+ if (!fTreeHead.fChild[1]) {
+ ActiveEdge* root = fTreeHead.fChild[1] = this->allocate(p0, v, index0, index1);
+ SkASSERT(root);
+ if (!root) {
+ return false;
+ }
+ root->fRed = false;
+ return true;
+ }
+
+ // set up helpers
+ ActiveEdge* top = &fTreeHead;
+ ActiveEdge *grandparent = nullptr;
+ ActiveEdge *parent = nullptr;
+ ActiveEdge *curr = top->fChild[1];
+ int dir = 0;
+ int last = 0; // ?
+ // predecessor and successor, for intersection check
+ ActiveEdge* pred = nullptr;
+ ActiveEdge* succ = nullptr;
+
+ // search down the tree
+ while (true) {
+ if (!curr) {
+ // check for intersection with predecessor and successor
+ if ((pred && pred->intersect(p0, v, index0, index1)) ||
+ (succ && succ->intersect(p0, v, index0, index1))) {
+ return false;
+ }
+ // insert new node at bottom
+ parent->fChild[dir] = curr = this->allocate(p0, v, index0, index1);
+ SkASSERT(curr);
+ if (!curr) {
+ return false;
+ }
+ curr->fAbove = pred;
+ curr->fBelow = succ;
+ if (pred) {
+ if (pred->fSegment.fP0 == curr->fSegment.fP0 &&
+ pred->fSegment.fV == curr->fSegment.fV) {
+ return false;
+ }
+ pred->fBelow = curr;
+ }
+ if (succ) {
+ if (succ->fSegment.fP0 == curr->fSegment.fP0 &&
+ succ->fSegment.fV == curr->fSegment.fV) {
+ return false;
+ }
+ succ->fAbove = curr;
+ }
+ if (IsRed(parent)) {
+ int dir2 = (top->fChild[1] == grandparent);
+ if (curr == parent->fChild[last]) {
+ top->fChild[dir2] = SingleRotation(grandparent, !last);
+ } else {
+ top->fChild[dir2] = DoubleRotation(grandparent, !last);
+ }
+ }
+ break;
+ } else if (IsRed(curr->fChild[0]) && IsRed(curr->fChild[1])) {
+ // color flip
+ curr->fRed = true;
+ curr->fChild[0]->fRed = false;
+ curr->fChild[1]->fRed = false;
+ if (IsRed(parent)) {
+ int dir2 = (top->fChild[1] == grandparent);
+ if (curr == parent->fChild[last]) {
+ top->fChild[dir2] = SingleRotation(grandparent, !last);
+ } else {
+ top->fChild[dir2] = DoubleRotation(grandparent, !last);
+ }
+ }
+ }
+
+ last = dir;
+ int side;
+ // check to see if segment is above or below
+ if (curr->fIndex0 == index0) {
+ side = compute_side(curr->fSegment.fP0, curr->fSegment.fV, p1);
+ } else {
+ side = compute_side(curr->fSegment.fP0, curr->fSegment.fV, p0);
+ }
+ if (0 == side) {
+ return false;
+ }
+ dir = (side < 0);
+
+ if (0 == dir) {
+ succ = curr;
+ } else {
+ pred = curr;
+ }
+
+ // update helpers
+ if (grandparent) {
+ top = grandparent;
+ }
+ grandparent = parent;
+ parent = curr;
+ curr = curr->fChild[dir];
+ }
+
+ // update root and make it black
+ fTreeHead.fChild[1]->fRed = false;
+
+ SkDEBUGCODE(VerifyTree(fTreeHead.fChild[1]));
+
+ return true;
+ }
+
+ // replaces edge p0p1 with p1p2
+ bool replace(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2,
+ uint16_t index0, uint16_t index1, uint16_t index2) {
+ if (!fTreeHead.fChild[1]) {
+ return false;
+ }
+
+ SkVector v = p2 - p1;
+ ActiveEdge* curr = &fTreeHead;
+ ActiveEdge* found = nullptr;
+ int dir = 1;
+
+ // search
+ while (curr->fChild[dir] != nullptr) {
+ // update helpers
+ curr = curr->fChild[dir];
+ // save found node
+ if (curr->equals(index0, index1)) {
+ found = curr;
+ break;
+ } else {
+ // check to see if segment is above or below
+ int side;
+ if (curr->fIndex1 == index1) {
+ side = compute_side(curr->fSegment.fP0, curr->fSegment.fV, p0);
+ } else {
+ side = compute_side(curr->fSegment.fP0, curr->fSegment.fV, p1);
+ }
+ if (0 == side) {
+ return false;
+ }
+ dir = (side < 0);
+ }
+ }
+
+ if (!found) {
+ return false;
+ }
+
+ // replace if found
+ ActiveEdge* pred = found->fAbove;
+ ActiveEdge* succ = found->fBelow;
+ // check deletion and insert intersection cases
+ if (pred && (pred->intersect(found) || pred->intersect(p1, v, index1, index2))) {
+ return false;
+ }
+ if (succ && (succ->intersect(found) || succ->intersect(p1, v, index1, index2))) {
+ return false;
+ }
+ found->fSegment.fP0 = p1;
+ found->fSegment.fV = v;
+ found->fIndex0 = index1;
+ found->fIndex1 = index2;
+ // above and below should stay the same
+
+ SkDEBUGCODE(VerifyTree(fTreeHead.fChild[1]));
+
+ return true;
+ }
+
+ bool remove(const SkPoint& p0, const SkPoint& p1, uint16_t index0, uint16_t index1) {
+ if (!fTreeHead.fChild[1]) {
+ return false;
+ }
+
+ ActiveEdge* curr = &fTreeHead;
+ ActiveEdge* parent = nullptr;
+ ActiveEdge* grandparent = nullptr;
+ ActiveEdge* found = nullptr;
+ int dir = 1;
+
+ // search and push a red node down
+ while (curr->fChild[dir] != nullptr) {
+ int last = dir;
+
+ // update helpers
+ grandparent = parent;
+ parent = curr;
+ curr = curr->fChild[dir];
+ // save found node
+ if (curr->equals(index0, index1)) {
+ found = curr;
+ dir = 0;
+ } else {
+ // check to see if segment is above or below
+ int side;
+ if (curr->fIndex1 == index1) {
+ side = compute_side(curr->fSegment.fP0, curr->fSegment.fV, p0);
+ } else {
+ side = compute_side(curr->fSegment.fP0, curr->fSegment.fV, p1);
+ }
+ if (0 == side) {
+ return false;
+ }
+ dir = (side < 0);
+ }
+
+ // push the red node down
+ if (!IsRed(curr) && !IsRed(curr->fChild[dir])) {
+ if (IsRed(curr->fChild[!dir])) {
+ parent = parent->fChild[last] = SingleRotation(curr, dir);
+ } else {
+ ActiveEdge *s = parent->fChild[!last];
+
+ if (s != nullptr) {
+ if (!IsRed(s->fChild[!last]) && !IsRed(s->fChild[last])) {
+ // color flip
+ parent->fRed = false;
+ s->fRed = true;
+ curr->fRed = true;
+ } else {
+ int dir2 = (grandparent->fChild[1] == parent);
+
+ if (IsRed(s->fChild[last])) {
+ grandparent->fChild[dir2] = DoubleRotation(parent, last);
+ } else if (IsRed(s->fChild[!last])) {
+ grandparent->fChild[dir2] = SingleRotation(parent, last);
+ }
+
+ // ensure correct coloring
+ curr->fRed = grandparent->fChild[dir2]->fRed = true;
+ grandparent->fChild[dir2]->fChild[0]->fRed = false;
+ grandparent->fChild[dir2]->fChild[1]->fRed = false;
+ }
+ }
+ }
+ }
+ }
+
+ // replace and remove if found
+ if (found) {
+ ActiveEdge* pred = found->fAbove;
+ ActiveEdge* succ = found->fBelow;
+ if ((pred && pred->intersect(found)) || (succ && succ->intersect(found))) {
+ return false;
+ }
+ if (found != curr) {
+ found->fSegment = curr->fSegment;
+ found->fIndex0 = curr->fIndex0;
+ found->fIndex1 = curr->fIndex1;
+ found->fAbove = curr->fAbove;
+ pred = found->fAbove;
+ // we don't need to set found->fBelow here
+ } else {
+ if (succ) {
+ succ->fAbove = pred;
+ }
+ }
+ if (pred) {
+ pred->fBelow = curr->fBelow;
+ }
+ parent->fChild[parent->fChild[1] == curr] = curr->fChild[!curr->fChild[0]];
+
+ // no need to delete
+ curr->fAbove = reinterpret_cast<ActiveEdge*>(0xdeadbeefll);
+ curr->fBelow = reinterpret_cast<ActiveEdge*>(0xdeadbeefll);
+ if (fTreeHead.fChild[1]) {
+ fTreeHead.fChild[1]->fRed = false;
+ }
+ }
+
+ // update root and make it black
+ if (fTreeHead.fChild[1]) {
+ fTreeHead.fChild[1]->fRed = false;
+ }
+
+ SkDEBUGCODE(VerifyTree(fTreeHead.fChild[1]));
+
+ return true;
+ }
+
+private:
+ // allocator
+ ActiveEdge * allocate(const SkPoint& p0, const SkPoint& p1, uint16_t index0, uint16_t index1) {
+ if (fCurrFree >= fMaxFree) {
+ return nullptr;
+ }
+ char* bytes = fAllocation + sizeof(ActiveEdge)*fCurrFree;
+ ++fCurrFree;
+ return new(bytes) ActiveEdge(p0, p1, index0, index1);
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////
+ // Red-black tree methods
+ ///////////////////////////////////////////////////////////////////////////////////
+ static bool IsRed(const ActiveEdge* node) {
+ return node && node->fRed;
+ }
+
+ static ActiveEdge* SingleRotation(ActiveEdge* node, int dir) {
+ ActiveEdge* tmp = node->fChild[!dir];
+
+ node->fChild[!dir] = tmp->fChild[dir];
+ tmp->fChild[dir] = node;
+
+ node->fRed = true;
+ tmp->fRed = false;
+
+ return tmp;
+ }
+
+ static ActiveEdge* DoubleRotation(ActiveEdge* node, int dir) {
+ node->fChild[!dir] = SingleRotation(node->fChild[!dir], !dir);
+
+ return SingleRotation(node, dir);
+ }
+
+ // returns black link count
+ static int VerifyTree(const ActiveEdge* tree) {
+ if (!tree) {
+ return 1;
+ }
+
+ const ActiveEdge* left = tree->fChild[0];
+ const ActiveEdge* right = tree->fChild[1];
+
+ // no consecutive red links
+ if (IsRed(tree) && (IsRed(left) || IsRed(right))) {
+ SkASSERT(false);
+ return 0;
+ }
+
+ // check secondary links
+ if (tree->fAbove) {
+ SkASSERT(tree->fAbove->fBelow == tree);
+ SkASSERT(tree->fAbove->lessThan(tree));
+ }
+ if (tree->fBelow) {
+ SkASSERT(tree->fBelow->fAbove == tree);
+ SkASSERT(tree->lessThan(tree->fBelow));
+ }
+
+ // violates binary tree order
+ if ((left && tree->lessThan(left)) || (right && right->lessThan(tree))) {
+ SkASSERT(false);
+ return 0;
+ }
+
+ int leftCount = VerifyTree(left);
+ int rightCount = VerifyTree(right);
+
+ // return black link count
+ if (leftCount != 0 && rightCount != 0) {
+ // black height mismatch
+ if (leftCount != rightCount) {
+ SkASSERT(false);
+ return 0;
+ }
+ return IsRed(tree) ? leftCount : leftCount + 1;
+ } else {
+ return 0;
+ }
+ }
+
+ ActiveEdge fTreeHead;
+ char* fAllocation;
+ int fCurrFree;
+ int fMaxFree;
+};
+
+// Here we implement a sweep line algorithm to determine whether the provided points
+// represent a simple polygon, i.e., the polygon is non-self-intersecting.
+// We first insert the vertices into a priority queue sorting horizontally from left to right.
+// Then as we pop the vertices from the queue we generate events which indicate that an edge
+// should be added or removed from an edge list. If any intersections are detected in the edge
+// list, then we know the polygon is self-intersecting and hence not simple.
+bool SkIsSimplePolygon(const SkPoint* polygon, int polygonSize) {
+ if (polygonSize < 3) {
+ return false;
+ }
+
+ // If it's convex, it's simple
+ if (SkIsConvexPolygon(polygon, polygonSize)) {
+ return true;
+ }
+
+ // practically speaking, it takes too long to process large polygons
+ if (polygonSize > 2048) {
+ return false;
+ }
+
+ SkTDPQueue <Vertex, Vertex::Left> vertexQueue(polygonSize);
+ for (int i = 0; i < polygonSize; ++i) {
+ Vertex newVertex;
+ if (!polygon[i].isFinite()) {
+ return false;
+ }
+ newVertex.fPosition = polygon[i];
+ newVertex.fIndex = i;
+ newVertex.fPrevIndex = (i - 1 + polygonSize) % polygonSize;
+ newVertex.fNextIndex = (i + 1) % polygonSize;
+ newVertex.fFlags = 0;
+ // The two edges adjacent to this vertex are the same, so polygon is not simple
+ if (polygon[newVertex.fPrevIndex] == polygon[newVertex.fNextIndex]) {
+ return false;
+ }
+ if (left(polygon[newVertex.fPrevIndex], polygon[i])) {
+ newVertex.fFlags |= kPrevLeft_VertexFlag;
+ }
+ if (left(polygon[newVertex.fNextIndex], polygon[i])) {
+ newVertex.fFlags |= kNextLeft_VertexFlag;
+ }
+ vertexQueue.insert(newVertex);
+ }
+
+ // pop each vertex from the queue and generate events depending on
+ // where it lies relative to its neighboring edges
+ ActiveEdgeList sweepLine(polygonSize);
+ while (vertexQueue.count() > 0) {
+ const Vertex& v = vertexQueue.peek();
+
+ // both to the right -- insert both
+ if (v.fFlags == 0) {
+ if (!sweepLine.insert(v.fPosition, polygon[v.fPrevIndex], v.fIndex, v.fPrevIndex)) {
+ break;
+ }
+ if (!sweepLine.insert(v.fPosition, polygon[v.fNextIndex], v.fIndex, v.fNextIndex)) {
+ break;
+ }
+ // both to the left -- remove both
+ } else if (v.fFlags == (kPrevLeft_VertexFlag | kNextLeft_VertexFlag)) {
+ if (!sweepLine.remove(polygon[v.fPrevIndex], v.fPosition, v.fPrevIndex, v.fIndex)) {
+ break;
+ }
+ if (!sweepLine.remove(polygon[v.fNextIndex], v.fPosition, v.fNextIndex, v.fIndex)) {
+ break;
+ }
+ // one to left and right -- replace one with another
+ } else {
+ if (v.fFlags & kPrevLeft_VertexFlag) {
+ if (!sweepLine.replace(polygon[v.fPrevIndex], v.fPosition, polygon[v.fNextIndex],
+ v.fPrevIndex, v.fIndex, v.fNextIndex)) {
+ break;
+ }
+ } else {
+ SkASSERT(v.fFlags & kNextLeft_VertexFlag);
+ if (!sweepLine.replace(polygon[v.fNextIndex], v.fPosition, polygon[v.fPrevIndex],
+ v.fNextIndex, v.fIndex, v.fPrevIndex)) {
+ break;
+ }
+ }
+ }
+
+ vertexQueue.pop();
+ }
+
+ return (vertexQueue.count() == 0);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+
+// helper function for SkOffsetSimplePolygon
+static void setup_offset_edge(OffsetEdge* currEdge,
+ const SkPoint& endpoint0, const SkPoint& endpoint1,
+ uint16_t startIndex, uint16_t endIndex) {
+ currEdge->fOffset.fP0 = endpoint0;
+ currEdge->fOffset.fV = endpoint1 - endpoint0;
+ currEdge->init(startIndex, endIndex);
+}
+
+static bool is_reflex_vertex(const SkPoint* inputPolygonVerts, int winding, SkScalar offset,
+ uint16_t prevIndex, uint16_t currIndex, uint16_t nextIndex) {
+ int side = compute_side(inputPolygonVerts[prevIndex],
+ inputPolygonVerts[currIndex] - inputPolygonVerts[prevIndex],
+ inputPolygonVerts[nextIndex]);
+ // if reflex point, we need to add extra edges
+ return (side*winding*offset < 0);
+}
+
+bool SkOffsetSimplePolygon(const SkPoint* inputPolygonVerts, int inputPolygonSize,
+ const SkRect& bounds, SkScalar offset,
+ SkTDArray<SkPoint>* offsetPolygon, SkTDArray<int>* polygonIndices) {
+ if (inputPolygonSize < 3) {
+ return false;
+ }
+
+ // need to be able to represent all the vertices in the 16-bit indices
+ if (inputPolygonSize >= std::numeric_limits<uint16_t>::max()) {
+ return false;
+ }
+
+ if (!SkScalarIsFinite(offset)) {
+ return false;
+ }
+
+ // can't inset more than the half bounds of the polygon
+ if (offset > std::min(SkTAbs(SkRectPriv::HalfWidth(bounds)),
+ SkTAbs(SkRectPriv::HalfHeight(bounds)))) {
+ return false;
+ }
+
+ // offsetting close to zero just returns the original poly
+ if (SkScalarNearlyZero(offset)) {
+ for (int i = 0; i < inputPolygonSize; ++i) {
+ *offsetPolygon->append() = inputPolygonVerts[i];
+ if (polygonIndices) {
+ *polygonIndices->append() = i;
+ }
+ }
+ return true;
+ }
+
+ // get winding direction
+ int winding = SkGetPolygonWinding(inputPolygonVerts, inputPolygonSize);
+ if (0 == winding) {
+ return false;
+ }
+
+ // build normals
+ AutoSTMalloc<64, SkVector> normals(inputPolygonSize);
+ unsigned int numEdges = 0;
+ for (int currIndex = 0, prevIndex = inputPolygonSize - 1;
+ currIndex < inputPolygonSize;
+ prevIndex = currIndex, ++currIndex) {
+ if (!inputPolygonVerts[currIndex].isFinite()) {
+ return false;
+ }
+ int nextIndex = (currIndex + 1) % inputPolygonSize;
+ if (!compute_offset_vector(inputPolygonVerts[currIndex], inputPolygonVerts[nextIndex],
+ offset, winding, &normals[currIndex])) {
+ return false;
+ }
+ if (currIndex > 0) {
+ // if reflex point, we need to add extra edges
+ if (is_reflex_vertex(inputPolygonVerts, winding, offset,
+ prevIndex, currIndex, nextIndex)) {
+ SkScalar rotSin, rotCos;
+ int numSteps;
+ if (!SkComputeRadialSteps(normals[prevIndex], normals[currIndex], offset,
+ &rotSin, &rotCos, &numSteps)) {
+ return false;
+ }
+ numEdges += std::max(numSteps, 1);
+ }
+ }
+ numEdges++;
+ }
+ // finish up the edge counting
+ if (is_reflex_vertex(inputPolygonVerts, winding, offset, inputPolygonSize-1, 0, 1)) {
+ SkScalar rotSin, rotCos;
+ int numSteps;
+ if (!SkComputeRadialSteps(normals[inputPolygonSize-1], normals[0], offset,
+ &rotSin, &rotCos, &numSteps)) {
+ return false;
+ }
+ numEdges += std::max(numSteps, 1);
+ }
+
+ // Make sure we don't overflow the max array count.
+ // We shouldn't overflow numEdges, as SkComputeRadialSteps returns a max of 2^16-1,
+ // and we have a max of 2^16-1 original vertices.
+ if (numEdges > (unsigned int)std::numeric_limits<int32_t>::max()) {
+ return false;
+ }
+
+ // build initial offset edge list
+ SkSTArray<64, OffsetEdge> edgeData(numEdges);
+ OffsetEdge* prevEdge = nullptr;
+ for (int currIndex = 0, prevIndex = inputPolygonSize - 1;
+ currIndex < inputPolygonSize;
+ prevIndex = currIndex, ++currIndex) {
+ int nextIndex = (currIndex + 1) % inputPolygonSize;
+ // if reflex point, fill in curve
+ if (is_reflex_vertex(inputPolygonVerts, winding, offset,
+ prevIndex, currIndex, nextIndex)) {
+ SkScalar rotSin, rotCos;
+ int numSteps;
+ SkVector prevNormal = normals[prevIndex];
+ if (!SkComputeRadialSteps(prevNormal, normals[currIndex], offset,
+ &rotSin, &rotCos, &numSteps)) {
+ return false;
+ }
+ auto currEdge = edgeData.push_back_n(std::max(numSteps, 1));
+ for (int i = 0; i < numSteps - 1; ++i) {
+ SkVector currNormal = SkVector::Make(prevNormal.fX*rotCos - prevNormal.fY*rotSin,
+ prevNormal.fY*rotCos + prevNormal.fX*rotSin);
+ setup_offset_edge(currEdge,
+ inputPolygonVerts[currIndex] + prevNormal,
+ inputPolygonVerts[currIndex] + currNormal,
+ currIndex, currIndex);
+ prevNormal = currNormal;
+ currEdge->fPrev = prevEdge;
+ if (prevEdge) {
+ prevEdge->fNext = currEdge;
+ }
+ prevEdge = currEdge;
+ ++currEdge;
+ }
+ setup_offset_edge(currEdge,
+ inputPolygonVerts[currIndex] + prevNormal,
+ inputPolygonVerts[currIndex] + normals[currIndex],
+ currIndex, currIndex);
+ currEdge->fPrev = prevEdge;
+ if (prevEdge) {
+ prevEdge->fNext = currEdge;
+ }
+ prevEdge = currEdge;
+ }
+
+ // Add the edge
+ auto currEdge = edgeData.push_back_n(1);
+ setup_offset_edge(currEdge,
+ inputPolygonVerts[currIndex] + normals[currIndex],
+ inputPolygonVerts[nextIndex] + normals[currIndex],
+ currIndex, nextIndex);
+ currEdge->fPrev = prevEdge;
+ if (prevEdge) {
+ prevEdge->fNext = currEdge;
+ }
+ prevEdge = currEdge;
+ }
+ // close up the linked list
+ SkASSERT(prevEdge);
+ prevEdge->fNext = &edgeData[0];
+ edgeData[0].fPrev = prevEdge;
+
+ // now clip edges
+ SkASSERT(edgeData.size() == (int)numEdges);
+ auto head = &edgeData[0];
+ auto currEdge = head;
+ unsigned int offsetVertexCount = numEdges;
+ unsigned long long iterations = 0;
+ unsigned long long maxIterations = (unsigned long long)(numEdges) * numEdges;
+ while (head && prevEdge != currEdge && offsetVertexCount > 0) {
+ ++iterations;
+ // we should check each edge against each other edge at most once
+ if (iterations > maxIterations) {
+ return false;
+ }
+
+ SkScalar s, t;
+ SkPoint intersection;
+ if (prevEdge->checkIntersection(currEdge, &intersection, &s, &t)) {
+ // if new intersection is further back on previous inset from the prior intersection
+ if (s < prevEdge->fTValue) {
+ // no point in considering this one again
+ remove_node(prevEdge, &head);
+ --offsetVertexCount;
+ // go back one segment
+ prevEdge = prevEdge->fPrev;
+ // we've already considered this intersection, we're done
+ } else if (currEdge->fTValue > SK_ScalarMin &&
+ SkPointPriv::EqualsWithinTolerance(intersection,
+ currEdge->fIntersection,
+ 1.0e-6f)) {
+ break;
+ } else {
+ // add intersection
+ currEdge->fIntersection = intersection;
+ currEdge->fTValue = t;
+ currEdge->fIndex = prevEdge->fEnd;
+
+ // go to next segment
+ prevEdge = currEdge;
+ currEdge = currEdge->fNext;
+ }
+ } else {
+ // If there is no intersection, we want to minimize the distance between
+ // the point where the segment lines cross and the segments themselves.
+ OffsetEdge* prevPrevEdge = prevEdge->fPrev;
+ OffsetEdge* currNextEdge = currEdge->fNext;
+ SkScalar dist0 = currEdge->computeCrossingDistance(prevPrevEdge);
+ SkScalar dist1 = prevEdge->computeCrossingDistance(currNextEdge);
+ // if both lead to direct collision
+ if (dist0 < 0 && dist1 < 0) {
+ // check first to see if either represent parts of one contour
+ SkPoint p1 = prevPrevEdge->fOffset.fP0 + prevPrevEdge->fOffset.fV;
+ bool prevSameContour = SkPointPriv::EqualsWithinTolerance(p1,
+ prevEdge->fOffset.fP0);
+ p1 = currEdge->fOffset.fP0 + currEdge->fOffset.fV;
+ bool currSameContour = SkPointPriv::EqualsWithinTolerance(p1,
+ currNextEdge->fOffset.fP0);
+
+ // want to step along contour to find intersections rather than jump to new one
+ if (currSameContour && !prevSameContour) {
+ remove_node(currEdge, &head);
+ currEdge = currNextEdge;
+ --offsetVertexCount;
+ continue;
+ } else if (prevSameContour && !currSameContour) {
+ remove_node(prevEdge, &head);
+ prevEdge = prevPrevEdge;
+ --offsetVertexCount;
+ continue;
+ }
+ }
+
+ // otherwise minimize collision distance along segment
+ if (dist0 < dist1) {
+ remove_node(prevEdge, &head);
+ prevEdge = prevPrevEdge;
+ } else {
+ remove_node(currEdge, &head);
+ currEdge = currNextEdge;
+ }
+ --offsetVertexCount;
+ }
+ }
+
+ // store all the valid intersections that aren't nearly coincident
+ // TODO: look at the main algorithm and see if we can detect these better
+ offsetPolygon->reset();
+ if (!head || offsetVertexCount == 0 ||
+ offsetVertexCount >= std::numeric_limits<uint16_t>::max()) {
+ return false;
+ }
+
+ static constexpr SkScalar kCleanupTolerance = 0.01f;
+ offsetPolygon->reserve(offsetVertexCount);
+ int currIndex = 0;
+ *offsetPolygon->append() = head->fIntersection;
+ if (polygonIndices) {
+ *polygonIndices->append() = head->fIndex;
+ }
+ currEdge = head->fNext;
+ while (currEdge != head) {
+ if (!SkPointPriv::EqualsWithinTolerance(currEdge->fIntersection,
+ (*offsetPolygon)[currIndex],
+ kCleanupTolerance)) {
+ *offsetPolygon->append() = currEdge->fIntersection;
+ if (polygonIndices) {
+ *polygonIndices->append() = currEdge->fIndex;
+ }
+ currIndex++;
+ }
+ currEdge = currEdge->fNext;
+ }
+ // make sure the first and last points aren't coincident
+ if (currIndex >= 1 &&
+ SkPointPriv::EqualsWithinTolerance((*offsetPolygon)[0], (*offsetPolygon)[currIndex],
+ kCleanupTolerance)) {
+ offsetPolygon->pop_back();
+ if (polygonIndices) {
+ polygonIndices->pop_back();
+ }
+ }
+
+ // check winding of offset polygon (it should be same as the original polygon)
+ SkScalar offsetWinding = SkGetPolygonWinding(offsetPolygon->begin(), offsetPolygon->size());
+
+ return (winding*offsetWinding > 0 &&
+ SkIsSimplePolygon(offsetPolygon->begin(), offsetPolygon->size()));
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+
+struct TriangulationVertex {
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(TriangulationVertex);
+
+ enum class VertexType { kConvex, kReflex };
+
+ SkPoint fPosition;
+ VertexType fVertexType;
+ uint16_t fIndex;
+ uint16_t fPrevIndex;
+ uint16_t fNextIndex;
+};
+
+static void compute_triangle_bounds(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2,
+ SkRect* bounds) {
+ skvx::float4 min, max;
+ min = max = skvx::float4(p0.fX, p0.fY, p0.fX, p0.fY);
+ skvx::float4 xy(p1.fX, p1.fY, p2.fX, p2.fY);
+ min = skvx::min(min, xy);
+ max = skvx::max(max, xy);
+ bounds->setLTRB(std::min(min[0], min[2]), std::min(min[1], min[3]),
+ std::max(max[0], max[2]), std::max(max[1], max[3]));
+}
+
+// test to see if point p is in triangle p0p1p2.
+// for now assuming strictly inside -- if on the edge it's outside
+static bool point_in_triangle(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2,
+ const SkPoint& p) {
+ SkVector v0 = p1 - p0;
+ SkVector v1 = p2 - p1;
+ SkScalar n = v0.cross(v1);
+
+ SkVector w0 = p - p0;
+ if (n*v0.cross(w0) < SK_ScalarNearlyZero) {
+ return false;
+ }
+
+ SkVector w1 = p - p1;
+ if (n*v1.cross(w1) < SK_ScalarNearlyZero) {
+ return false;
+ }
+
+ SkVector v2 = p0 - p2;
+ SkVector w2 = p - p2;
+ if (n*v2.cross(w2) < SK_ScalarNearlyZero) {
+ return false;
+ }
+
+ return true;
+}
+
+// Data structure to track reflex vertices and check whether any are inside a given triangle
+class ReflexHash {
+public:
+ bool init(const SkRect& bounds, int vertexCount) {
+ fBounds = bounds;
+ fNumVerts = 0;
+ SkScalar width = bounds.width();
+ SkScalar height = bounds.height();
+ if (!SkScalarIsFinite(width) || !SkScalarIsFinite(height)) {
+ return false;
+ }
+
+ // We want vertexCount grid cells, roughly distributed to match the bounds ratio
+ SkScalar hCount = SkScalarSqrt(sk_ieee_float_divide(vertexCount*width, height));
+ if (!SkScalarIsFinite(hCount)) {
+ return false;
+ }
+ fHCount = std::max(std::min(SkScalarRoundToInt(hCount), vertexCount), 1);
+ fVCount = vertexCount/fHCount;
+ fGridConversion.set(sk_ieee_float_divide(fHCount - 0.001f, width),
+ sk_ieee_float_divide(fVCount - 0.001f, height));
+ if (!fGridConversion.isFinite()) {
+ return false;
+ }
+
+ fGrid.resize(fHCount*fVCount);
+ for (int i = 0; i < fGrid.size(); ++i) {
+ fGrid[i].reset();
+ }
+
+ return true;
+ }
+
+ void add(TriangulationVertex* v) {
+ int index = hash(v);
+ fGrid[index].addToTail(v);
+ ++fNumVerts;
+ }
+
+ void remove(TriangulationVertex* v) {
+ int index = hash(v);
+ fGrid[index].remove(v);
+ --fNumVerts;
+ }
+
+ bool checkTriangle(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2,
+ uint16_t ignoreIndex0, uint16_t ignoreIndex1) const {
+ if (!fNumVerts) {
+ return false;
+ }
+
+ SkRect triBounds;
+ compute_triangle_bounds(p0, p1, p2, &triBounds);
+ int h0 = (triBounds.fLeft - fBounds.fLeft)*fGridConversion.fX;
+ int h1 = (triBounds.fRight - fBounds.fLeft)*fGridConversion.fX;
+ int v0 = (triBounds.fTop - fBounds.fTop)*fGridConversion.fY;
+ int v1 = (triBounds.fBottom - fBounds.fTop)*fGridConversion.fY;
+
+ for (int v = v0; v <= v1; ++v) {
+ for (int h = h0; h <= h1; ++h) {
+ int i = v * fHCount + h;
+ for (SkTInternalLList<TriangulationVertex>::Iter reflexIter = fGrid[i].begin();
+ reflexIter != fGrid[i].end(); ++reflexIter) {
+ TriangulationVertex* reflexVertex = *reflexIter;
+ if (reflexVertex->fIndex != ignoreIndex0 &&
+ reflexVertex->fIndex != ignoreIndex1 &&
+ point_in_triangle(p0, p1, p2, reflexVertex->fPosition)) {
+ return true;
+ }
+ }
+
+ }
+ }
+
+ return false;
+ }
+
+private:
+ int hash(TriangulationVertex* vert) const {
+ int h = (vert->fPosition.fX - fBounds.fLeft)*fGridConversion.fX;
+ int v = (vert->fPosition.fY - fBounds.fTop)*fGridConversion.fY;
+ SkASSERT(v*fHCount + h >= 0);
+ return v*fHCount + h;
+ }
+
+ SkRect fBounds;
+ int fHCount;
+ int fVCount;
+ int fNumVerts;
+ // converts distance from the origin to a grid location (when cast to int)
+ SkVector fGridConversion;
+ SkTDArray<SkTInternalLList<TriangulationVertex>> fGrid;
+};
+
+// Check to see if a reflex vertex has become a convex vertex after clipping an ear
+static void reclassify_vertex(TriangulationVertex* p, const SkPoint* polygonVerts,
+ int winding, ReflexHash* reflexHash,
+ SkTInternalLList<TriangulationVertex>* convexList) {
+ if (TriangulationVertex::VertexType::kReflex == p->fVertexType) {
+ SkVector v0 = p->fPosition - polygonVerts[p->fPrevIndex];
+ SkVector v1 = polygonVerts[p->fNextIndex] - p->fPosition;
+ if (winding*v0.cross(v1) > SK_ScalarNearlyZero*SK_ScalarNearlyZero) {
+ p->fVertexType = TriangulationVertex::VertexType::kConvex;
+ reflexHash->remove(p);
+ p->fPrev = p->fNext = nullptr;
+ convexList->addToTail(p);
+ }
+ }
+}
+
+bool SkTriangulateSimplePolygon(const SkPoint* polygonVerts, uint16_t* indexMap, int polygonSize,
+ SkTDArray<uint16_t>* triangleIndices) {
+ if (polygonSize < 3) {
+ return false;
+ }
+ // need to be able to represent all the vertices in the 16-bit indices
+ if (polygonSize >= std::numeric_limits<uint16_t>::max()) {
+ return false;
+ }
+
+ // get bounds
+ SkRect bounds;
+ if (!bounds.setBoundsCheck(polygonVerts, polygonSize)) {
+ return false;
+ }
+ // get winding direction
+ // TODO: we do this for all the polygon routines -- might be better to have the client
+ // compute it and pass it in
+ int winding = SkGetPolygonWinding(polygonVerts, polygonSize);
+ if (0 == winding) {
+ return false;
+ }
+
+ // Set up vertices
+ AutoSTArray<64, TriangulationVertex> triangulationVertices(polygonSize);
+ int prevIndex = polygonSize - 1;
+ SkVector v0 = polygonVerts[0] - polygonVerts[prevIndex];
+ for (int currIndex = 0; currIndex < polygonSize; ++currIndex) {
+ int nextIndex = (currIndex + 1) % polygonSize;
+
+ triangulationVertices[currIndex] = TriangulationVertex{};
+ triangulationVertices[currIndex].fPosition = polygonVerts[currIndex];
+ triangulationVertices[currIndex].fIndex = currIndex;
+ triangulationVertices[currIndex].fPrevIndex = prevIndex;
+ triangulationVertices[currIndex].fNextIndex = nextIndex;
+ SkVector v1 = polygonVerts[nextIndex] - polygonVerts[currIndex];
+ if (winding*v0.cross(v1) > SK_ScalarNearlyZero*SK_ScalarNearlyZero) {
+ triangulationVertices[currIndex].fVertexType = TriangulationVertex::VertexType::kConvex;
+ } else {
+ triangulationVertices[currIndex].fVertexType = TriangulationVertex::VertexType::kReflex;
+ }
+
+ prevIndex = currIndex;
+ v0 = v1;
+ }
+
+ // Classify initial vertices into a list of convex vertices and a hash of reflex vertices
+ // TODO: possibly sort the convexList in some way to get better triangles
+ SkTInternalLList<TriangulationVertex> convexList;
+ ReflexHash reflexHash;
+ if (!reflexHash.init(bounds, polygonSize)) {
+ return false;
+ }
+ prevIndex = polygonSize - 1;
+ for (int currIndex = 0; currIndex < polygonSize; prevIndex = currIndex, ++currIndex) {
+ TriangulationVertex::VertexType currType = triangulationVertices[currIndex].fVertexType;
+ if (TriangulationVertex::VertexType::kConvex == currType) {
+ int nextIndex = (currIndex + 1) % polygonSize;
+ TriangulationVertex::VertexType prevType = triangulationVertices[prevIndex].fVertexType;
+ TriangulationVertex::VertexType nextType = triangulationVertices[nextIndex].fVertexType;
+ // We prioritize clipping vertices with neighboring reflex vertices.
+ // The intent here is that it will cull reflex vertices more quickly.
+ if (TriangulationVertex::VertexType::kReflex == prevType ||
+ TriangulationVertex::VertexType::kReflex == nextType) {
+ convexList.addToHead(&triangulationVertices[currIndex]);
+ } else {
+ convexList.addToTail(&triangulationVertices[currIndex]);
+ }
+ } else {
+ // We treat near collinear vertices as reflex
+ reflexHash.add(&triangulationVertices[currIndex]);
+ }
+ }
+
+ // The general concept: We are trying to find three neighboring vertices where
+ // no other vertex lies inside the triangle (an "ear"). If we find one, we clip
+ // that ear off, and then repeat on the new polygon. Once we get down to three vertices
+ // we have triangulated the entire polygon.
+ // In the worst case this is an n^2 algorithm. We can cut down the search space somewhat by
+ // noting that only convex vertices can be potential ears, and we only need to check whether
+ // any reflex vertices lie inside the ear.
+ triangleIndices->reserve(triangleIndices->size() + 3 * (polygonSize - 2));
+ int vertexCount = polygonSize;
+ while (vertexCount > 3) {
+ bool success = false;
+ TriangulationVertex* earVertex = nullptr;
+ TriangulationVertex* p0 = nullptr;
+ TriangulationVertex* p2 = nullptr;
+ // find a convex vertex to clip
+ for (SkTInternalLList<TriangulationVertex>::Iter convexIter = convexList.begin();
+ convexIter != convexList.end(); ++convexIter) {
+ earVertex = *convexIter;
+ SkASSERT(TriangulationVertex::VertexType::kReflex != earVertex->fVertexType);
+
+ p0 = &triangulationVertices[earVertex->fPrevIndex];
+ p2 = &triangulationVertices[earVertex->fNextIndex];
+
+ // see if any reflex vertices are inside the ear
+ bool failed = reflexHash.checkTriangle(p0->fPosition, earVertex->fPosition,
+ p2->fPosition, p0->fIndex, p2->fIndex);
+ if (failed) {
+ continue;
+ }
+
+ // found one we can clip
+ success = true;
+ break;
+ }
+ // If we can't find any ears to clip, this probably isn't a simple polygon
+ if (!success) {
+ return false;
+ }
+
+ // add indices
+ auto indices = triangleIndices->append(3);
+ indices[0] = indexMap[p0->fIndex];
+ indices[1] = indexMap[earVertex->fIndex];
+ indices[2] = indexMap[p2->fIndex];
+
+ // clip the ear
+ convexList.remove(earVertex);
+ --vertexCount;
+
+ // reclassify reflex verts
+ p0->fNextIndex = earVertex->fNextIndex;
+ reclassify_vertex(p0, polygonVerts, winding, &reflexHash, &convexList);
+
+ p2->fPrevIndex = earVertex->fPrevIndex;
+ reclassify_vertex(p2, polygonVerts, winding, &reflexHash, &convexList);
+ }
+
+ // output indices
+ for (SkTInternalLList<TriangulationVertex>::Iter vertexIter = convexList.begin();
+ vertexIter != convexList.end(); ++vertexIter) {
+ TriangulationVertex* vertex = *vertexIter;
+ *triangleIndices->append() = indexMap[vertex->fIndex];
+ }
+
+ return true;
+}
+
+#endif // !defined(SK_ENABLE_OPTIMIZE_SIZE)
+
diff --git a/gfx/skia/skia/src/utils/SkPolyUtils.h b/gfx/skia/skia/src/utils/SkPolyUtils.h
new file mode 100644
index 0000000000..5e2474cb18
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkPolyUtils.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkOffsetPolygon_DEFINED
+#define SkOffsetPolygon_DEFINED
+
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+
+#include <cstdint>
+
+#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
+struct SkRect;
+template <typename T> class SkTDArray;
+
+/**
+ * Generates a polygon that is inset a constant from the boundary of a given convex polygon.
+ * The input polygon is expected to have values clamped to the nearest 1/16th.
+ *
+ * @param inputPolygonVerts Array of points representing the vertices of the original polygon.
+ * It should be convex and have no coincident points.
+ * @param inputPolygonSize Number of vertices in the original polygon.
+ * @param inset How far we wish to inset the polygon. This should be a positive value.
+ * @param insetPolygon The resulting inset polygon, if any.
+ * @return true if an inset polygon exists, false otherwise.
+ */
+bool SkInsetConvexPolygon(const SkPoint* inputPolygonVerts, int inputPolygonSize,
+ SkScalar inset, SkTDArray<SkPoint>* insetPolygon);
+
+/**
+ * Generates a simple polygon (if possible) that is offset a constant distance from the boundary
+ * of a given simple polygon.
+ * The input polygon must be simple, have no coincident vertices or collinear edges, and have
+ * values clamped to the nearest 1/16th.
+ *
+ * @param inputPolygonVerts Array of points representing the vertices of the original polygon.
+ * @param inputPolygonSize Number of vertices in the original polygon.
+ * @param bounds Bounding rectangle for the original polygon.
+ * @param offset How far we wish to offset the polygon.
+ * Positive values indicate insetting, negative values outsetting.
+ * @param offsetPolgon The resulting offset polygon, if any.
+ * @param polygonIndices The indices of the original polygon that map to the new one.
+ * @return true if an offset simple polygon exists, false otherwise.
+ */
+bool SkOffsetSimplePolygon(const SkPoint* inputPolygonVerts, int inputPolygonSize,
+ const SkRect& bounds, SkScalar offset, SkTDArray<SkPoint>* offsetPolygon,
+ SkTDArray<int>* polygonIndices = nullptr);
+
+/**
+ * Compute the number of points needed for a circular join when offsetting a vertex.
+ * The lengths of offset0 and offset1 don't have to equal |offset| -- only the direction matters.
+ * The segment lengths will be approximately four pixels.
+ *
+ * @param offset0 Starting offset vector direction.
+ * @param offset1 Ending offset vector direction.
+ * @param offset Offset value (can be negative).
+ * @param rotSin Sine of rotation delta per step.
+ * @param rotCos Cosine of rotation delta per step.
+ * @param n Number of steps to fill out the arc.
+ * @return true for success, false otherwise
+ */
+bool SkComputeRadialSteps(const SkVector& offset0, const SkVector& offset1, SkScalar offset,
+ SkScalar* rotSin, SkScalar* rotCos, int* n);
+
+/**
+ * Determine winding direction for a polygon.
+ * The input polygon must be simple or the result will be meaningless.
+ *
+ * @param polygonVerts Array of points representing the vertices of the polygon.
+ * @param polygonSize Number of vertices in the polygon.
+ * @return 1 for cw, -1 for ccw, and 0 if zero signed area (either degenerate or self-intersecting).
+ * The y-axis is assumed to be pointing down.
+ */
+int SkGetPolygonWinding(const SkPoint* polygonVerts, int polygonSize);
+
+/**
+ * Determine whether a polygon is convex or not.
+ *
+ * @param polygonVerts Array of points representing the vertices of the polygon.
+ * @param polygonSize Number of vertices in the polygon.
+ * @return true if the polygon is convex, false otherwise.
+ */
+bool SkIsConvexPolygon(const SkPoint* polygonVerts, int polygonSize);
+
+/**
+ * Determine whether a polygon is simple (i.e., not self-intersecting) or not.
+ * The input polygon must have no coincident vertices or the test will fail.
+ * The polygon is also expected to have values clamped to the nearest 1/16th.
+ *
+ * @param polygonVerts Array of points representing the vertices of the polygon.
+ * @param polygonSize Number of vertices in the polygon.
+ * @return true if the polygon is simple, false otherwise.
+ */
+ bool SkIsSimplePolygon(const SkPoint* polygonVerts, int polygonSize);
+
+ /**
+ * Compute indices to triangulate the given polygon.
+ * The input polygon must be simple (i.e. it is not self-intersecting)
+ * and have no coincident vertices or collinear edges.
+ *
+ * @param polygonVerts Array of points representing the vertices of the polygon.
+ * @param indexMap Mapping from index in the given array to the final index in the triangulation.
+ * @param polygonSize Number of vertices in the polygon.
+ * @param triangleIndices Indices of the resulting triangulation.
+ * @return true if successful, false otherwise.
+ */
+ bool SkTriangulateSimplePolygon(const SkPoint* polygonVerts, uint16_t* indexMap, int polygonSize,
+ SkTDArray<uint16_t>* triangleIndices);
+
+#endif // !defined(SK_ENABLE_OPTIMIZE_SIZE)
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkShaderUtils.cpp b/gfx/skia/skia/src/utils/SkShaderUtils.cpp
new file mode 100644
index 0000000000..43b0b8d633
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkShaderUtils.cpp
@@ -0,0 +1,226 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/utils/SkShaderUtils.h"
+
+#include "include/core/SkString.h"
+#include "include/private/SkSLString.h"
+#include "include/private/base/SkTArray.h"
+#include "src/core/SkStringUtils.h"
+#include "src/sksl/SkSLProgramSettings.h"
+
+#include <cstddef>
+
+namespace SkShaderUtils {
+
+class GLSLPrettyPrint {
+public:
+ GLSLPrettyPrint() {}
+
+ std::string prettify(const std::string& string) {
+ fTabs = 0;
+ fFreshline = true;
+
+ // If a string breaks while in the middle 'parse until' we need to continue parsing on the
+ // next string
+ fInParseUntilNewline = false;
+ fInParseUntil = false;
+
+ int parensDepth = 0;
+
+ // setup pretty state
+ fIndex = 0;
+ fLength = string.length();
+ fInput = string.c_str();
+
+ while (fLength > fIndex) {
+ /* the heart and soul of our prettification algorithm. The rules should hopefully
+ * be self explanatory. For '#' and '//' tokens we parse until we reach a newline.
+ *
+ * For long style comments like this one, we search for the ending token. We also
+ * preserve whitespace in these comments WITH THE CAVEAT that we do the newlines
+ * ourselves. This allows us to remain in control of line numbers, and matching
+ * tabs Existing tabs in the input string are copied over too, but this will look
+ * funny
+ *
+ * '{' and '}' are handled in basically the same way. We add a newline if we aren't
+ * on a fresh line, dirty the line, then add a second newline, ie braces are always
+ * on their own lines indented properly. The one funkiness here is structs print
+ * with the semicolon on its own line. Its not a problem for a glsl compiler though
+ *
+ * '(' and ')' are basically ignored, except as a sign we need to ignore ';' ala
+ * in for loops.
+ *
+ * ';' means add a new line
+ *
+ * '\t' and '\n' are ignored in general parsing for backwards compatability with
+ * existing shader code and we also have a special case for handling whitespace
+ * at the beginning of fresh lines.
+ *
+ * Otherwise just add the new character to the pretty string, indenting if
+ * necessary.
+ */
+ if (fInParseUntilNewline) {
+ this->parseUntilNewline();
+ } else if (fInParseUntil) {
+ this->parseUntil(fInParseUntilToken);
+ } else if (this->hasToken("#") || this->hasToken("//")) {
+ this->parseUntilNewline();
+ } else if (this->hasToken("/*")) {
+ this->parseUntil("*/");
+ } else if ('{' == fInput[fIndex]) {
+ this->newline();
+ this->appendChar('{');
+ fTabs++;
+ this->newline();
+ } else if ('}' == fInput[fIndex]) {
+ fTabs--;
+ this->newline();
+ this->appendChar('}');
+ this->newline();
+ } else if (this->hasToken(")")) {
+ parensDepth--;
+ } else if (this->hasToken("(")) {
+ parensDepth++;
+ } else if (!parensDepth && this->hasToken(";")) {
+ this->newline();
+ } else if ('\t' == fInput[fIndex] || '\n' == fInput[fIndex] ||
+ (fFreshline && ' ' == fInput[fIndex])) {
+ fIndex++;
+ } else {
+ this->appendChar(fInput[fIndex]);
+ }
+ }
+
+ return fPretty;
+ }
+
+private:
+ void appendChar(char c) {
+ this->tabString();
+ SkSL::String::appendf(&fPretty, "%c", fInput[fIndex++]);
+ fFreshline = false;
+ }
+
+ // hasToken automatically consumes the next token, if it is a match, and then tabs
+ // if necessary, before inserting the token into the pretty string
+ bool hasToken(const char* token) {
+ size_t i = fIndex;
+ for (size_t j = 0; token[j] && fLength > i; i++, j++) {
+ if (token[j] != fInput[i]) {
+ return false;
+ }
+ }
+ this->tabString();
+ fIndex = i;
+ fPretty.append(token);
+ fFreshline = false;
+ return true;
+ }
+
+ void parseUntilNewline() {
+ while (fLength > fIndex) {
+ if ('\n' == fInput[fIndex]) {
+ fIndex++;
+ this->newline();
+ fInParseUntilNewline = false;
+ break;
+ }
+ SkSL::String::appendf(&fPretty, "%c", fInput[fIndex++]);
+ fInParseUntilNewline = true;
+ }
+ }
+
+ // this code assumes it is not actually searching for a newline. If you need to search for a
+ // newline, then use the function above. If you do search for a newline with this function
+ // it will consume the entire string and the output will certainly not be prettified
+ void parseUntil(const char* token) {
+ while (fLength > fIndex) {
+ // For embedded newlines, this code will make sure to embed the newline in the
+ // pretty string, increase the linecount, and tab out the next line to the appropriate
+ // place
+ if ('\n' == fInput[fIndex]) {
+ this->newline();
+ this->tabString();
+ fIndex++;
+ }
+ if (this->hasToken(token)) {
+ fInParseUntil = false;
+ break;
+ }
+ fFreshline = false;
+ SkSL::String::appendf(&fPretty, "%c", fInput[fIndex++]);
+ fInParseUntil = true;
+ fInParseUntilToken = token;
+ }
+ }
+
+ // We only tab if on a newline, otherwise consider the line tabbed
+ void tabString() {
+ if (fFreshline) {
+ for (int t = 0; t < fTabs; t++) {
+ fPretty.append("\t");
+ }
+ }
+ }
+
+ // newline is really a request to add a newline, if we are on a fresh line there is no reason
+ // to add another newline
+ void newline() {
+ if (!fFreshline) {
+ fFreshline = true;
+ fPretty.append("\n");
+ }
+ }
+
+ bool fFreshline;
+ int fTabs;
+ size_t fIndex, fLength;
+ const char* fInput;
+ std::string fPretty;
+
+ // Some helpers for parseUntil when we go over a string length
+ bool fInParseUntilNewline;
+ bool fInParseUntil;
+ const char* fInParseUntilToken;
+};
+
+std::string PrettyPrint(const std::string& string) {
+ GLSLPrettyPrint pp;
+ return pp.prettify(string);
+}
+
+void VisitLineByLine(const std::string& text,
+ const std::function<void(int lineNumber, const char* lineText)>& visitFn) {
+ SkTArray<SkString> lines;
+ SkStrSplit(text.c_str(), "\n", kStrict_SkStrSplitMode, &lines);
+ for (int i = 0; i < lines.size(); ++i) {
+ visitFn(i + 1, lines[i].c_str());
+ }
+}
+
+std::string BuildShaderErrorMessage(const char* shader, const char* errors) {
+ std::string abortText{"Shader compilation error\n"
+ "------------------------\n"};
+ VisitLineByLine(shader, [&](int lineNumber, const char* lineText) {
+ SkSL::String::appendf(&abortText, "%4i\t%s\n", lineNumber, lineText);
+ });
+ SkSL::String::appendf(&abortText, "Errors:\n%s", errors);
+ return abortText;
+}
+
+void PrintShaderBanner(SkSL::ProgramKind programKind) {
+ const char* typeName = "Unknown";
+ if (SkSL::ProgramConfig::IsVertex(programKind)) {
+ typeName = "Vertex";
+ } else if (SkSL::ProgramConfig::IsFragment(programKind)) {
+ typeName = "Fragment";
+ }
+ SkDebugf("---- %s shader ----------------------------------------------------\n", typeName);
+}
+
+} // namespace SkShaderUtils
diff --git a/gfx/skia/skia/src/utils/SkShaderUtils.h b/gfx/skia/skia/src/utils/SkShaderUtils.h
new file mode 100644
index 0000000000..a3e8b40ba5
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkShaderUtils.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkShaderUtils_DEFINED
+#define SkShaderUtils_DEFINED
+
+#include "include/private/base/SkDebug.h"
+
+#include <cstdint>
+#include <functional>
+#include <string>
+
+namespace SkSL { enum class ProgramKind : int8_t; }
+
+namespace SkShaderUtils {
+
+std::string PrettyPrint(const std::string& string);
+
+void VisitLineByLine(const std::string& text,
+ const std::function<void(int lineNumber, const char* lineText)>&);
+
+// Prints shaders one line at the time. This ensures they don't get truncated by the adb log.
+inline void PrintLineByLine(const std::string& text) {
+ VisitLineByLine(text, [](int lineNumber, const char* lineText) {
+ SkDebugf("%4i\t%s\n", lineNumber, lineText);
+ });
+}
+
+// Combines raw shader and error text into an easier-to-read error message with line numbers.
+std::string BuildShaderErrorMessage(const char* shader, const char* errors);
+
+void PrintShaderBanner(SkSL::ProgramKind programKind);
+
+} // namespace SkShaderUtils
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkShadowTessellator.cpp b/gfx/skia/skia/src/utils/SkShadowTessellator.cpp
new file mode 100644
index 0000000000..095a35747f
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkShadowTessellator.cpp
@@ -0,0 +1,1191 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "src/utils/SkShadowTessellator.h"
+
+#include "include/core/SkColor.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkPoint3.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkTypes.h"
+#include "include/core/SkVertices.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkFloatingPoint.h"
+#include "include/private/base/SkTDArray.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/core/SkDrawShadowInfo.h"
+#include "src/core/SkGeometry.h"
+#include "src/core/SkPointPriv.h"
+#include "src/core/SkRectPriv.h"
+#include "src/utils/SkPolyUtils.h"
+
+#include <algorithm>
+#include <cstdint>
+
+
+#if defined(SK_GANESH)
+#include "src/gpu/ganesh/geometry/GrPathUtils.h"
+#endif
+
+using namespace skia_private;
+
+#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
+
+/**
+ * Base class
+ */
+class SkBaseShadowTessellator {
+public:
+ SkBaseShadowTessellator(const SkPoint3& zPlaneParams, const SkRect& bounds, bool transparent);
+ virtual ~SkBaseShadowTessellator() {}
+
+ sk_sp<SkVertices> releaseVertices() {
+ if (!fSucceeded) {
+ return nullptr;
+ }
+ return SkVertices::MakeCopy(SkVertices::kTriangles_VertexMode, this->vertexCount(),
+ fPositions.begin(), nullptr, fColors.begin(),
+ this->indexCount(), fIndices.begin());
+ }
+
+protected:
+ inline static constexpr auto kMinHeight = 0.1f;
+ inline static constexpr auto kPenumbraColor = SK_ColorTRANSPARENT;
+ inline static constexpr auto kUmbraColor = SK_ColorBLACK;
+
+ int vertexCount() const { return fPositions.size(); }
+ int indexCount() const { return fIndices.size(); }
+
+ // initialization methods
+ bool accumulateCentroid(const SkPoint& c, const SkPoint& n);
+ bool checkConvexity(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2);
+ void finishPathPolygon();
+
+ // convex shadow methods
+ bool computeConvexShadow(SkScalar inset, SkScalar outset, bool doClip);
+ void computeClipVectorsAndTestCentroid();
+ bool clipUmbraPoint(const SkPoint& umbraPoint, const SkPoint& centroid, SkPoint* clipPoint);
+ void addEdge(const SkVector& nextPoint, const SkVector& nextNormal, SkColor umbraColor,
+ const SkTDArray<SkPoint>& umbraPolygon, bool lastEdge, bool doClip);
+ bool addInnerPoint(const SkPoint& pathPoint, SkColor umbraColor,
+ const SkTDArray<SkPoint>& umbraPolygon, int* currUmbraIndex);
+ int getClosestUmbraIndex(const SkPoint& point, const SkTDArray<SkPoint>& umbraPolygon);
+
+ // concave shadow methods
+ bool computeConcaveShadow(SkScalar inset, SkScalar outset);
+ void stitchConcaveRings(const SkTDArray<SkPoint>& umbraPolygon,
+ SkTDArray<int>* umbraIndices,
+ const SkTDArray<SkPoint>& penumbraPolygon,
+ SkTDArray<int>* penumbraIndices);
+
+ void handleLine(const SkPoint& p);
+ void handleLine(const SkMatrix& m, SkPoint* p);
+
+ void handleQuad(const SkPoint pts[3]);
+ void handleQuad(const SkMatrix& m, SkPoint pts[3]);
+
+ void handleCubic(const SkMatrix& m, SkPoint pts[4]);
+
+ void handleConic(const SkMatrix& m, SkPoint pts[3], SkScalar w);
+
+ bool addArc(const SkVector& nextNormal, SkScalar offset, bool finishArc);
+
+ void appendTriangle(uint16_t index0, uint16_t index1, uint16_t index2);
+ void appendQuad(uint16_t index0, uint16_t index1, uint16_t index2, uint16_t index3);
+
+ SkScalar heightFunc(SkScalar x, SkScalar y) {
+ return fZPlaneParams.fX*x + fZPlaneParams.fY*y + fZPlaneParams.fZ;
+ }
+
+ SkPoint3 fZPlaneParams;
+
+ // temporary buffer
+ SkTDArray<SkPoint> fPointBuffer;
+
+ SkTDArray<SkPoint> fPositions;
+ SkTDArray<SkColor> fColors;
+ SkTDArray<uint16_t> fIndices;
+
+ SkTDArray<SkPoint> fPathPolygon;
+ SkTDArray<SkPoint> fClipPolygon;
+ SkTDArray<SkVector> fClipVectors;
+
+ SkRect fPathBounds;
+ SkPoint fCentroid;
+ SkScalar fArea;
+ SkScalar fLastArea;
+ SkScalar fLastCross;
+
+ int fFirstVertexIndex;
+ SkVector fFirstOutset;
+ SkPoint fFirstPoint;
+
+ bool fSucceeded;
+ bool fTransparent;
+ bool fIsConvex;
+ bool fValidUmbra;
+
+ SkScalar fDirection;
+ int fPrevUmbraIndex;
+ int fCurrUmbraIndex;
+ int fCurrClipIndex;
+ bool fPrevUmbraOutside;
+ bool fFirstUmbraOutside;
+ SkVector fPrevOutset;
+ SkPoint fPrevPoint;
+};
+
+static bool compute_normal(const SkPoint& p0, const SkPoint& p1, SkScalar dir,
+ SkVector* newNormal) {
+ SkVector normal;
+ // compute perpendicular
+ normal.fX = p0.fY - p1.fY;
+ normal.fY = p1.fX - p0.fX;
+ normal *= dir;
+ if (!normal.normalize()) {
+ return false;
+ }
+ *newNormal = normal;
+ return true;
+}
+
+static bool duplicate_pt(const SkPoint& p0, const SkPoint& p1) {
+ static constexpr SkScalar kClose = (SK_Scalar1 / 16);
+ static constexpr SkScalar kCloseSqd = kClose * kClose;
+
+ SkScalar distSq = SkPointPriv::DistanceToSqd(p0, p1);
+ return distSq < kCloseSqd;
+}
+
+static SkScalar perp_dot(const SkPoint& p0, const SkPoint& p1, const SkPoint& p2) {
+ SkVector v0 = p1 - p0;
+ SkVector v1 = p2 - p1;
+ return v0.cross(v1);
+}
+
+SkBaseShadowTessellator::SkBaseShadowTessellator(const SkPoint3& zPlaneParams, const SkRect& bounds,
+ bool transparent)
+ : fZPlaneParams(zPlaneParams)
+ , fPathBounds(bounds)
+ , fCentroid({0, 0})
+ , fArea(0)
+ , fLastArea(0)
+ , fLastCross(0)
+ , fFirstVertexIndex(-1)
+ , fSucceeded(false)
+ , fTransparent(transparent)
+ , fIsConvex(true)
+ , fValidUmbra(true)
+ , fDirection(1)
+ , fPrevUmbraIndex(-1)
+ , fCurrUmbraIndex(0)
+ , fCurrClipIndex(0)
+ , fPrevUmbraOutside(false)
+ , fFirstUmbraOutside(false) {
+ // child classes will set reserve for positions, colors and indices
+}
+
+bool SkBaseShadowTessellator::accumulateCentroid(const SkPoint& curr, const SkPoint& next) {
+ if (duplicate_pt(curr, next)) {
+ return false;
+ }
+
+ SkASSERT(!fPathPolygon.empty());
+ SkVector v0 = curr - fPathPolygon[0];
+ SkVector v1 = next - fPathPolygon[0];
+ SkScalar quadArea = v0.cross(v1);
+ fCentroid.fX += (v0.fX + v1.fX) * quadArea;
+ fCentroid.fY += (v0.fY + v1.fY) * quadArea;
+ fArea += quadArea;
+ // convexity check
+ if (quadArea*fLastArea < 0) {
+ fIsConvex = false;
+ }
+ if (0 != quadArea) {
+ fLastArea = quadArea;
+ }
+
+ return true;
+}
+
+bool SkBaseShadowTessellator::checkConvexity(const SkPoint& p0,
+ const SkPoint& p1,
+ const SkPoint& p2) {
+ SkScalar cross = perp_dot(p0, p1, p2);
+ // skip collinear point
+ if (SkScalarNearlyZero(cross)) {
+ return false;
+ }
+
+ // check for convexity
+ if (fLastCross*cross < 0) {
+ fIsConvex = false;
+ }
+ if (0 != cross) {
+ fLastCross = cross;
+ }
+
+ return true;
+}
+
+void SkBaseShadowTessellator::finishPathPolygon() {
+ if (fPathPolygon.size() > 1) {
+ if (!this->accumulateCentroid(fPathPolygon[fPathPolygon.size() - 1], fPathPolygon[0])) {
+ // remove coincident point
+ fPathPolygon.pop_back();
+ }
+ }
+
+ if (fPathPolygon.size() > 2) {
+ // do this before the final convexity check, so we use the correct fPathPolygon[0]
+ fCentroid *= sk_ieee_float_divide(1, 3 * fArea);
+ fCentroid += fPathPolygon[0];
+ if (!checkConvexity(fPathPolygon[fPathPolygon.size() - 2],
+ fPathPolygon[fPathPolygon.size() - 1],
+ fPathPolygon[0])) {
+ // remove collinear point
+ fPathPolygon[0] = fPathPolygon[fPathPolygon.size() - 1];
+ fPathPolygon.pop_back();
+ }
+ }
+
+ // if area is positive, winding is ccw
+ fDirection = fArea > 0 ? -1 : 1;
+}
+
+bool SkBaseShadowTessellator::computeConvexShadow(SkScalar inset, SkScalar outset, bool doClip) {
+ if (doClip) {
+ this->computeClipVectorsAndTestCentroid();
+ }
+
+ // adjust inset distance and umbra color if necessary
+ auto umbraColor = kUmbraColor;
+ SkScalar minDistSq = SkPointPriv::DistanceToLineSegmentBetweenSqd(fCentroid,
+ fPathPolygon[0],
+ fPathPolygon[1]);
+ SkRect bounds;
+ bounds.setBounds(&fPathPolygon[0], fPathPolygon.size());
+ for (int i = 1; i < fPathPolygon.size(); ++i) {
+ int j = i + 1;
+ if (i == fPathPolygon.size() - 1) {
+ j = 0;
+ }
+ SkPoint currPoint = fPathPolygon[i];
+ SkPoint nextPoint = fPathPolygon[j];
+ SkScalar distSq = SkPointPriv::DistanceToLineSegmentBetweenSqd(fCentroid, currPoint,
+ nextPoint);
+ if (distSq < minDistSq) {
+ minDistSq = distSq;
+ }
+ }
+
+ SkTDArray<SkPoint> insetPolygon;
+ if (inset > SK_ScalarNearlyZero) {
+ static constexpr auto kTolerance = 1.0e-2f;
+ if (minDistSq < (inset + kTolerance)*(inset + kTolerance)) {
+ // if the umbra would collapse, we back off a bit on inner blur and adjust the alpha
+ auto newInset = SkScalarSqrt(minDistSq) - kTolerance;
+ auto ratio = 128 * (newInset / inset + 1);
+ SkASSERT(SkScalarIsFinite(ratio));
+ // they aren't PMColors, but the interpolation algorithm is the same
+ umbraColor = SkPMLerp(kUmbraColor, kPenumbraColor, (unsigned)ratio);
+ inset = newInset;
+ }
+
+ // generate inner ring
+ if (!SkInsetConvexPolygon(&fPathPolygon[0], fPathPolygon.size(), inset,
+ &insetPolygon)) {
+ // not ideal, but in this case we'll inset using the centroid
+ fValidUmbra = false;
+ }
+ }
+ const SkTDArray<SkPoint>& umbraPolygon = (inset > SK_ScalarNearlyZero) ? insetPolygon
+ : fPathPolygon;
+
+ // walk around the path polygon, generate outer ring and connect to inner ring
+ if (fTransparent) {
+ fPositions.push_back(fCentroid);
+ fColors.push_back(umbraColor);
+ }
+ fCurrUmbraIndex = 0;
+
+ // initial setup
+ // add first quad
+ int polyCount = fPathPolygon.size();
+ if (!compute_normal(fPathPolygon[polyCount - 1], fPathPolygon[0], fDirection, &fFirstOutset)) {
+ // polygon should be sanitized by this point, so this is unrecoverable
+ return false;
+ }
+
+ fFirstOutset *= outset;
+ fFirstPoint = fPathPolygon[polyCount - 1];
+ fFirstVertexIndex = fPositions.size();
+ fPrevOutset = fFirstOutset;
+ fPrevPoint = fFirstPoint;
+ fPrevUmbraIndex = -1;
+
+ this->addInnerPoint(fFirstPoint, umbraColor, umbraPolygon, &fPrevUmbraIndex);
+
+ if (!fTransparent && doClip) {
+ SkPoint clipPoint;
+ bool isOutside = this->clipUmbraPoint(fPositions[fFirstVertexIndex],
+ fCentroid, &clipPoint);
+ if (isOutside) {
+ fPositions.push_back(clipPoint);
+ fColors.push_back(umbraColor);
+ }
+ fPrevUmbraOutside = isOutside;
+ fFirstUmbraOutside = isOutside;
+ }
+
+ SkPoint newPoint = fFirstPoint + fFirstOutset;
+ fPositions.push_back(newPoint);
+ fColors.push_back(kPenumbraColor);
+ this->addEdge(fPathPolygon[0], fFirstOutset, umbraColor, umbraPolygon, false, doClip);
+
+ for (int i = 1; i < polyCount; ++i) {
+ SkVector normal;
+ if (!compute_normal(fPrevPoint, fPathPolygon[i], fDirection, &normal)) {
+ return false;
+ }
+ normal *= outset;
+ this->addArc(normal, outset, true);
+ this->addEdge(fPathPolygon[i], normal, umbraColor, umbraPolygon,
+ i == polyCount - 1, doClip);
+ }
+ SkASSERT(this->indexCount());
+
+ // final fan
+ SkASSERT(fPositions.size() >= 3);
+ if (this->addArc(fFirstOutset, outset, false)) {
+ if (fFirstUmbraOutside) {
+ this->appendTriangle(fFirstVertexIndex, fPositions.size() - 1,
+ fFirstVertexIndex + 2);
+ } else {
+ this->appendTriangle(fFirstVertexIndex, fPositions.size() - 1,
+ fFirstVertexIndex + 1);
+ }
+ } else {
+ // no arc added, fix up by setting first penumbra point position to last one
+ if (fFirstUmbraOutside) {
+ fPositions[fFirstVertexIndex + 2] = fPositions[fPositions.size() - 1];
+ } else {
+ fPositions[fFirstVertexIndex + 1] = fPositions[fPositions.size() - 1];
+ }
+ }
+
+ return true;
+}
+
+void SkBaseShadowTessellator::computeClipVectorsAndTestCentroid() {
+ SkASSERT(fClipPolygon.size() >= 3);
+ fCurrClipIndex = fClipPolygon.size() - 1;
+
+ // init clip vectors
+ SkVector v0 = fClipPolygon[1] - fClipPolygon[0];
+ SkVector v1 = fClipPolygon[2] - fClipPolygon[0];
+ fClipVectors.push_back(v0);
+
+ // init centroid check
+ bool hiddenCentroid = true;
+ v1 = fCentroid - fClipPolygon[0];
+ SkScalar initCross = v0.cross(v1);
+
+ for (int p = 1; p < fClipPolygon.size(); ++p) {
+ // add to clip vectors
+ v0 = fClipPolygon[(p + 1) % fClipPolygon.size()] - fClipPolygon[p];
+ fClipVectors.push_back(v0);
+ // Determine if transformed centroid is inside clipPolygon.
+ v1 = fCentroid - fClipPolygon[p];
+ if (initCross*v0.cross(v1) <= 0) {
+ hiddenCentroid = false;
+ }
+ }
+ SkASSERT(fClipVectors.size() == fClipPolygon.size());
+
+ fTransparent = fTransparent || !hiddenCentroid;
+}
+
+void SkBaseShadowTessellator::addEdge(const SkPoint& nextPoint, const SkVector& nextNormal,
+ SkColor umbraColor, const SkTDArray<SkPoint>& umbraPolygon,
+ bool lastEdge, bool doClip) {
+ // add next umbra point
+ int currUmbraIndex;
+ bool duplicate;
+ if (lastEdge) {
+ duplicate = false;
+ currUmbraIndex = fFirstVertexIndex;
+ fPrevPoint = nextPoint;
+ } else {
+ duplicate = this->addInnerPoint(nextPoint, umbraColor, umbraPolygon, &currUmbraIndex);
+ }
+ int prevPenumbraIndex = duplicate || (currUmbraIndex == fFirstVertexIndex)
+ ? fPositions.size() - 1
+ : fPositions.size() - 2;
+ if (!duplicate) {
+ // add to center fan if transparent or centroid showing
+ if (fTransparent) {
+ this->appendTriangle(0, fPrevUmbraIndex, currUmbraIndex);
+ // otherwise add to clip ring
+ } else if (doClip) {
+ SkPoint clipPoint;
+ bool isOutside = lastEdge ? fFirstUmbraOutside
+ : this->clipUmbraPoint(fPositions[currUmbraIndex], fCentroid,
+ &clipPoint);
+ if (isOutside) {
+ if (!lastEdge) {
+ fPositions.push_back(clipPoint);
+ fColors.push_back(umbraColor);
+ }
+ this->appendTriangle(fPrevUmbraIndex, currUmbraIndex, currUmbraIndex + 1);
+ if (fPrevUmbraOutside) {
+ // fill out quad
+ this->appendTriangle(fPrevUmbraIndex, currUmbraIndex + 1,
+ fPrevUmbraIndex + 1);
+ }
+ } else if (fPrevUmbraOutside) {
+ // add tri
+ this->appendTriangle(fPrevUmbraIndex, currUmbraIndex, fPrevUmbraIndex + 1);
+ }
+
+ fPrevUmbraOutside = isOutside;
+ }
+ }
+
+ // add next penumbra point and quad
+ SkPoint newPoint = nextPoint + nextNormal;
+ fPositions.push_back(newPoint);
+ fColors.push_back(kPenumbraColor);
+
+ if (!duplicate) {
+ this->appendTriangle(fPrevUmbraIndex, prevPenumbraIndex, currUmbraIndex);
+ }
+ this->appendTriangle(prevPenumbraIndex, fPositions.size() - 1, currUmbraIndex);
+
+ fPrevUmbraIndex = currUmbraIndex;
+ fPrevOutset = nextNormal;
+}
+
+bool SkBaseShadowTessellator::clipUmbraPoint(const SkPoint& umbraPoint, const SkPoint& centroid,
+ SkPoint* clipPoint) {
+ SkVector segmentVector = centroid - umbraPoint;
+
+ int startClipPoint = fCurrClipIndex;
+ do {
+ SkVector dp = umbraPoint - fClipPolygon[fCurrClipIndex];
+ SkScalar denom = fClipVectors[fCurrClipIndex].cross(segmentVector);
+ SkScalar t_num = dp.cross(segmentVector);
+ // if line segments are nearly parallel
+ if (SkScalarNearlyZero(denom)) {
+ // and collinear
+ if (SkScalarNearlyZero(t_num)) {
+ return false;
+ }
+ // otherwise are separate, will try the next poly segment
+ // else if crossing lies within poly segment
+ } else if (t_num >= 0 && t_num <= denom) {
+ SkScalar s_num = dp.cross(fClipVectors[fCurrClipIndex]);
+ // if umbra point is inside the clip polygon
+ if (s_num >= 0 && s_num <= denom) {
+ segmentVector *= s_num / denom;
+ *clipPoint = umbraPoint + segmentVector;
+ return true;
+ }
+ }
+ fCurrClipIndex = (fCurrClipIndex + 1) % fClipPolygon.size();
+ } while (fCurrClipIndex != startClipPoint);
+
+ return false;
+}
+
+bool SkBaseShadowTessellator::addInnerPoint(const SkPoint& pathPoint, SkColor umbraColor,
+ const SkTDArray<SkPoint>& umbraPolygon,
+ int* currUmbraIndex) {
+ SkPoint umbraPoint;
+ if (!fValidUmbra) {
+ SkVector v = fCentroid - pathPoint;
+ v *= 0.95f;
+ umbraPoint = pathPoint + v;
+ } else {
+ umbraPoint = umbraPolygon[this->getClosestUmbraIndex(pathPoint, umbraPolygon)];
+ }
+
+ fPrevPoint = pathPoint;
+
+ // merge "close" points
+ if (fPrevUmbraIndex == -1 ||
+ !duplicate_pt(umbraPoint, fPositions[fPrevUmbraIndex])) {
+ // if we've wrapped around, don't add a new point
+ if (fPrevUmbraIndex >= 0 && duplicate_pt(umbraPoint, fPositions[fFirstVertexIndex])) {
+ *currUmbraIndex = fFirstVertexIndex;
+ } else {
+ *currUmbraIndex = fPositions.size();
+ fPositions.push_back(umbraPoint);
+ fColors.push_back(umbraColor);
+ }
+ return false;
+ } else {
+ *currUmbraIndex = fPrevUmbraIndex;
+ return true;
+ }
+}
+
+int SkBaseShadowTessellator::getClosestUmbraIndex(const SkPoint& p,
+ const SkTDArray<SkPoint>& umbraPolygon) {
+ SkScalar minDistance = SkPointPriv::DistanceToSqd(p, umbraPolygon[fCurrUmbraIndex]);
+ int index = fCurrUmbraIndex;
+ int dir = 1;
+ int next = (index + dir) % umbraPolygon.size();
+
+ // init travel direction
+ SkScalar distance = SkPointPriv::DistanceToSqd(p, umbraPolygon[next]);
+ if (distance < minDistance) {
+ index = next;
+ minDistance = distance;
+ } else {
+ dir = umbraPolygon.size() - 1;
+ }
+
+ // iterate until we find a point that increases the distance
+ next = (index + dir) % umbraPolygon.size();
+ distance = SkPointPriv::DistanceToSqd(p, umbraPolygon[next]);
+ while (distance < minDistance) {
+ index = next;
+ minDistance = distance;
+ next = (index + dir) % umbraPolygon.size();
+ distance = SkPointPriv::DistanceToSqd(p, umbraPolygon[next]);
+ }
+
+ fCurrUmbraIndex = index;
+ return index;
+}
+
+bool SkBaseShadowTessellator::computeConcaveShadow(SkScalar inset, SkScalar outset) {
+ if (!SkIsSimplePolygon(&fPathPolygon[0], fPathPolygon.size())) {
+ return false;
+ }
+
+ // shouldn't inset more than the half bounds of the polygon
+ inset = std::min(inset, std::min(SkTAbs(SkRectPriv::HalfWidth(fPathBounds)),
+ SkTAbs(SkRectPriv::HalfHeight(fPathBounds))));
+ // generate inner ring
+ SkTDArray<SkPoint> umbraPolygon;
+ SkTDArray<int> umbraIndices;
+ umbraIndices.reserve(fPathPolygon.size());
+ if (!SkOffsetSimplePolygon(&fPathPolygon[0], fPathPolygon.size(), fPathBounds, inset,
+ &umbraPolygon, &umbraIndices)) {
+ // TODO: figure out how to handle this case
+ return false;
+ }
+
+ // generate outer ring
+ SkTDArray<SkPoint> penumbraPolygon;
+ SkTDArray<int> penumbraIndices;
+ penumbraPolygon.reserve(umbraPolygon.size());
+ penumbraIndices.reserve(umbraPolygon.size());
+ if (!SkOffsetSimplePolygon(&fPathPolygon[0], fPathPolygon.size(), fPathBounds, -outset,
+ &penumbraPolygon, &penumbraIndices)) {
+ // TODO: figure out how to handle this case
+ return false;
+ }
+
+ if (umbraPolygon.empty() || penumbraPolygon.empty()) {
+ return false;
+ }
+
+ // attach the rings together
+ this->stitchConcaveRings(umbraPolygon, &umbraIndices, penumbraPolygon, &penumbraIndices);
+
+ return true;
+}
+
+void SkBaseShadowTessellator::stitchConcaveRings(const SkTDArray<SkPoint>& umbraPolygon,
+ SkTDArray<int>* umbraIndices,
+ const SkTDArray<SkPoint>& penumbraPolygon,
+ SkTDArray<int>* penumbraIndices) {
+ // TODO: only create and fill indexMap when fTransparent is true?
+ AutoSTMalloc<64, uint16_t> indexMap(umbraPolygon.size());
+
+ // find minimum indices
+ int minIndex = 0;
+ int min = (*penumbraIndices)[0];
+ for (int i = 1; i < (*penumbraIndices).size(); ++i) {
+ if ((*penumbraIndices)[i] < min) {
+ min = (*penumbraIndices)[i];
+ minIndex = i;
+ }
+ }
+ int currPenumbra = minIndex;
+
+ minIndex = 0;
+ min = (*umbraIndices)[0];
+ for (int i = 1; i < (*umbraIndices).size(); ++i) {
+ if ((*umbraIndices)[i] < min) {
+ min = (*umbraIndices)[i];
+ minIndex = i;
+ }
+ }
+ int currUmbra = minIndex;
+
+ // now find a case where the indices are equal (there should be at least one)
+ int maxPenumbraIndex = fPathPolygon.size() - 1;
+ int maxUmbraIndex = fPathPolygon.size() - 1;
+ while ((*penumbraIndices)[currPenumbra] != (*umbraIndices)[currUmbra]) {
+ if ((*penumbraIndices)[currPenumbra] < (*umbraIndices)[currUmbra]) {
+ (*penumbraIndices)[currPenumbra] += fPathPolygon.size();
+ maxPenumbraIndex = (*penumbraIndices)[currPenumbra];
+ currPenumbra = (currPenumbra + 1) % penumbraPolygon.size();
+ } else {
+ (*umbraIndices)[currUmbra] += fPathPolygon.size();
+ maxUmbraIndex = (*umbraIndices)[currUmbra];
+ currUmbra = (currUmbra + 1) % umbraPolygon.size();
+ }
+ }
+
+ fPositions.push_back(penumbraPolygon[currPenumbra]);
+ fColors.push_back(kPenumbraColor);
+ int prevPenumbraIndex = 0;
+ fPositions.push_back(umbraPolygon[currUmbra]);
+ fColors.push_back(kUmbraColor);
+ fPrevUmbraIndex = 1;
+ indexMap[currUmbra] = 1;
+
+ int nextPenumbra = (currPenumbra + 1) % penumbraPolygon.size();
+ int nextUmbra = (currUmbra + 1) % umbraPolygon.size();
+ while ((*penumbraIndices)[nextPenumbra] <= maxPenumbraIndex ||
+ (*umbraIndices)[nextUmbra] <= maxUmbraIndex) {
+
+ if ((*umbraIndices)[nextUmbra] == (*penumbraIndices)[nextPenumbra]) {
+ // advance both one step
+ fPositions.push_back(penumbraPolygon[nextPenumbra]);
+ fColors.push_back(kPenumbraColor);
+ int currPenumbraIndex = fPositions.size() - 1;
+
+ fPositions.push_back(umbraPolygon[nextUmbra]);
+ fColors.push_back(kUmbraColor);
+ int currUmbraIndex = fPositions.size() - 1;
+ indexMap[nextUmbra] = currUmbraIndex;
+
+ this->appendQuad(prevPenumbraIndex, currPenumbraIndex,
+ fPrevUmbraIndex, currUmbraIndex);
+
+ prevPenumbraIndex = currPenumbraIndex;
+ (*penumbraIndices)[currPenumbra] += fPathPolygon.size();
+ currPenumbra = nextPenumbra;
+ nextPenumbra = (currPenumbra + 1) % penumbraPolygon.size();
+
+ fPrevUmbraIndex = currUmbraIndex;
+ (*umbraIndices)[currUmbra] += fPathPolygon.size();
+ currUmbra = nextUmbra;
+ nextUmbra = (currUmbra + 1) % umbraPolygon.size();
+ }
+
+ while ((*penumbraIndices)[nextPenumbra] < (*umbraIndices)[nextUmbra] &&
+ (*penumbraIndices)[nextPenumbra] <= maxPenumbraIndex) {
+ // fill out penumbra arc
+ fPositions.push_back(penumbraPolygon[nextPenumbra]);
+ fColors.push_back(kPenumbraColor);
+ int currPenumbraIndex = fPositions.size() - 1;
+
+ this->appendTriangle(prevPenumbraIndex, currPenumbraIndex, fPrevUmbraIndex);
+
+ prevPenumbraIndex = currPenumbraIndex;
+ // this ensures the ordering when we wrap around
+ (*penumbraIndices)[currPenumbra] += fPathPolygon.size();
+ currPenumbra = nextPenumbra;
+ nextPenumbra = (currPenumbra + 1) % penumbraPolygon.size();
+ }
+
+ while ((*umbraIndices)[nextUmbra] < (*penumbraIndices)[nextPenumbra] &&
+ (*umbraIndices)[nextUmbra] <= maxUmbraIndex) {
+ // fill out umbra arc
+ fPositions.push_back(umbraPolygon[nextUmbra]);
+ fColors.push_back(kUmbraColor);
+ int currUmbraIndex = fPositions.size() - 1;
+ indexMap[nextUmbra] = currUmbraIndex;
+
+ this->appendTriangle(fPrevUmbraIndex, prevPenumbraIndex, currUmbraIndex);
+
+ fPrevUmbraIndex = currUmbraIndex;
+ // this ensures the ordering when we wrap around
+ (*umbraIndices)[currUmbra] += fPathPolygon.size();
+ currUmbra = nextUmbra;
+ nextUmbra = (currUmbra + 1) % umbraPolygon.size();
+ }
+ }
+ // finish up by advancing both one step
+ fPositions.push_back(penumbraPolygon[nextPenumbra]);
+ fColors.push_back(kPenumbraColor);
+ int currPenumbraIndex = fPositions.size() - 1;
+
+ fPositions.push_back(umbraPolygon[nextUmbra]);
+ fColors.push_back(kUmbraColor);
+ int currUmbraIndex = fPositions.size() - 1;
+ indexMap[nextUmbra] = currUmbraIndex;
+
+ this->appendQuad(prevPenumbraIndex, currPenumbraIndex,
+ fPrevUmbraIndex, currUmbraIndex);
+
+ if (fTransparent) {
+ SkTriangulateSimplePolygon(umbraPolygon.begin(), indexMap, umbraPolygon.size(),
+ &fIndices);
+ }
+}
+
+
+// tesselation tolerance values, in device space pixels
+#if defined(SK_GANESH)
+static constexpr SkScalar kQuadTolerance = 0.2f;
+static constexpr SkScalar kCubicTolerance = 0.2f;
+static constexpr SkScalar kQuadToleranceSqd = kQuadTolerance * kQuadTolerance;
+static constexpr SkScalar kCubicToleranceSqd = kCubicTolerance * kCubicTolerance;
+#endif
+static constexpr SkScalar kConicTolerance = 0.25f;
+
+// clamps the point to the nearest 16th of a pixel
+static void sanitize_point(const SkPoint& in, SkPoint* out) {
+ out->fX = SkScalarRoundToScalar(16.f*in.fX)*0.0625f;
+ out->fY = SkScalarRoundToScalar(16.f*in.fY)*0.0625f;
+}
+
+void SkBaseShadowTessellator::handleLine(const SkPoint& p) {
+ SkPoint pSanitized;
+ sanitize_point(p, &pSanitized);
+
+ if (!fPathPolygon.empty()) {
+ if (!this->accumulateCentroid(fPathPolygon[fPathPolygon.size() - 1], pSanitized)) {
+ // skip coincident point
+ return;
+ }
+ }
+
+ if (fPathPolygon.size() > 1) {
+ if (!checkConvexity(fPathPolygon[fPathPolygon.size() - 2],
+ fPathPolygon[fPathPolygon.size() - 1],
+ pSanitized)) {
+ // remove collinear point
+ fPathPolygon.pop_back();
+ // it's possible that the previous point is coincident with the new one now
+ if (duplicate_pt(fPathPolygon[fPathPolygon.size() - 1], pSanitized)) {
+ fPathPolygon.pop_back();
+ }
+ }
+ }
+
+ fPathPolygon.push_back(pSanitized);
+}
+
+void SkBaseShadowTessellator::handleLine(const SkMatrix& m, SkPoint* p) {
+ m.mapPoints(p, 1);
+
+ this->handleLine(*p);
+}
+
+void SkBaseShadowTessellator::handleQuad(const SkPoint pts[3]) {
+#if defined(SK_GANESH)
+ // check for degeneracy
+ SkVector v0 = pts[1] - pts[0];
+ SkVector v1 = pts[2] - pts[0];
+ if (SkScalarNearlyZero(v0.cross(v1))) {
+ return;
+ }
+ // TODO: Pull PathUtils out of Ganesh?
+ int maxCount = GrPathUtils::quadraticPointCount(pts, kQuadTolerance);
+ fPointBuffer.resize(maxCount);
+ SkPoint* target = fPointBuffer.begin();
+ int count = GrPathUtils::generateQuadraticPoints(pts[0], pts[1], pts[2],
+ kQuadToleranceSqd, &target, maxCount);
+ fPointBuffer.resize(count);
+ for (int i = 0; i < count; i++) {
+ this->handleLine(fPointBuffer[i]);
+ }
+#else
+ // for now, just to draw something
+ this->handleLine(pts[1]);
+ this->handleLine(pts[2]);
+#endif
+}
+
+void SkBaseShadowTessellator::handleQuad(const SkMatrix& m, SkPoint pts[3]) {
+ m.mapPoints(pts, 3);
+ this->handleQuad(pts);
+}
+
+void SkBaseShadowTessellator::handleCubic(const SkMatrix& m, SkPoint pts[4]) {
+ m.mapPoints(pts, 4);
+#if defined(SK_GANESH)
+ // TODO: Pull PathUtils out of Ganesh?
+ int maxCount = GrPathUtils::cubicPointCount(pts, kCubicTolerance);
+ fPointBuffer.resize(maxCount);
+ SkPoint* target = fPointBuffer.begin();
+ int count = GrPathUtils::generateCubicPoints(pts[0], pts[1], pts[2], pts[3],
+ kCubicToleranceSqd, &target, maxCount);
+ fPointBuffer.resize(count);
+ for (int i = 0; i < count; i++) {
+ this->handleLine(fPointBuffer[i]);
+ }
+#else
+ // for now, just to draw something
+ this->handleLine(pts[1]);
+ this->handleLine(pts[2]);
+ this->handleLine(pts[3]);
+#endif
+}
+
+void SkBaseShadowTessellator::handleConic(const SkMatrix& m, SkPoint pts[3], SkScalar w) {
+ if (m.hasPerspective()) {
+ w = SkConic::TransformW(pts, w, m);
+ }
+ m.mapPoints(pts, 3);
+ SkAutoConicToQuads quadder;
+ const SkPoint* quads = quadder.computeQuads(pts, w, kConicTolerance);
+ SkPoint lastPoint = *(quads++);
+ int count = quadder.countQuads();
+ for (int i = 0; i < count; ++i) {
+ SkPoint quadPts[3];
+ quadPts[0] = lastPoint;
+ quadPts[1] = quads[0];
+ quadPts[2] = i == count - 1 ? pts[2] : quads[1];
+ this->handleQuad(quadPts);
+ lastPoint = quadPts[2];
+ quads += 2;
+ }
+}
+
+bool SkBaseShadowTessellator::addArc(const SkVector& nextNormal, SkScalar offset, bool finishArc) {
+ // fill in fan from previous quad
+ SkScalar rotSin, rotCos;
+ int numSteps;
+ if (!SkComputeRadialSteps(fPrevOutset, nextNormal, offset, &rotSin, &rotCos, &numSteps)) {
+ // recover as best we can
+ numSteps = 0;
+ }
+ SkVector prevNormal = fPrevOutset;
+ for (int i = 0; i < numSteps-1; ++i) {
+ SkVector currNormal;
+ currNormal.fX = prevNormal.fX*rotCos - prevNormal.fY*rotSin;
+ currNormal.fY = prevNormal.fY*rotCos + prevNormal.fX*rotSin;
+ fPositions.push_back(fPrevPoint + currNormal);
+ fColors.push_back(kPenumbraColor);
+ this->appendTriangle(fPrevUmbraIndex, fPositions.size() - 1, fPositions.size() - 2);
+
+ prevNormal = currNormal;
+ }
+ if (finishArc && numSteps) {
+ fPositions.push_back(fPrevPoint + nextNormal);
+ fColors.push_back(kPenumbraColor);
+ this->appendTriangle(fPrevUmbraIndex, fPositions.size() - 1, fPositions.size() - 2);
+ }
+ fPrevOutset = nextNormal;
+
+ return (numSteps > 0);
+}
+
+void SkBaseShadowTessellator::appendTriangle(uint16_t index0, uint16_t index1, uint16_t index2) {
+ auto indices = fIndices.append(3);
+
+ indices[0] = index0;
+ indices[1] = index1;
+ indices[2] = index2;
+}
+
+void SkBaseShadowTessellator::appendQuad(uint16_t index0, uint16_t index1,
+ uint16_t index2, uint16_t index3) {
+ auto indices = fIndices.append(6);
+
+ indices[0] = index0;
+ indices[1] = index1;
+ indices[2] = index2;
+
+ indices[3] = index2;
+ indices[4] = index1;
+ indices[5] = index3;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SkAmbientShadowTessellator : public SkBaseShadowTessellator {
+public:
+ SkAmbientShadowTessellator(const SkPath& path, const SkMatrix& ctm,
+ const SkPoint3& zPlaneParams, bool transparent);
+
+private:
+ bool computePathPolygon(const SkPath& path, const SkMatrix& ctm);
+
+ using INHERITED = SkBaseShadowTessellator;
+};
+
+SkAmbientShadowTessellator::SkAmbientShadowTessellator(const SkPath& path,
+ const SkMatrix& ctm,
+ const SkPoint3& zPlaneParams,
+ bool transparent)
+ : INHERITED(zPlaneParams, path.getBounds(), transparent) {
+ // Set base colors
+ auto baseZ = heightFunc(fPathBounds.centerX(), fPathBounds.centerY());
+ // umbraColor is the interior value, penumbraColor the exterior value.
+ auto outset = SkDrawShadowMetrics::AmbientBlurRadius(baseZ);
+ auto inset = outset * SkDrawShadowMetrics::AmbientRecipAlpha(baseZ) - outset;
+
+ if (!this->computePathPolygon(path, ctm)) {
+ return;
+ }
+ if (fPathPolygon.size() < 3 || !SkScalarIsFinite(fArea)) {
+ fSucceeded = true; // We don't want to try to blur these cases, so we will
+ // return an empty SkVertices instead.
+ return;
+ }
+
+ // Outer ring: 3*numPts
+ // Middle ring: numPts
+ fPositions.reserve(4 * path.countPoints());
+ fColors.reserve(4 * path.countPoints());
+ // Outer ring: 12*numPts
+ // Middle ring: 0
+ fIndices.reserve(12 * path.countPoints());
+
+ if (fIsConvex) {
+ fSucceeded = this->computeConvexShadow(inset, outset, false);
+ } else {
+ fSucceeded = this->computeConcaveShadow(inset, outset);
+ }
+}
+
+bool SkAmbientShadowTessellator::computePathPolygon(const SkPath& path, const SkMatrix& ctm) {
+ fPathPolygon.reserve(path.countPoints());
+
+ // walk around the path, tessellate and generate outer ring
+ // if original path is transparent, will accumulate sum of points for centroid
+ SkPath::Iter iter(path, true);
+ SkPoint pts[4];
+ SkPath::Verb verb;
+ bool verbSeen = false;
+ bool closeSeen = false;
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+ if (closeSeen) {
+ return false;
+ }
+ switch (verb) {
+ case SkPath::kLine_Verb:
+ this->handleLine(ctm, &pts[1]);
+ break;
+ case SkPath::kQuad_Verb:
+ this->handleQuad(ctm, pts);
+ break;
+ case SkPath::kCubic_Verb:
+ this->handleCubic(ctm, pts);
+ break;
+ case SkPath::kConic_Verb:
+ this->handleConic(ctm, pts, iter.conicWeight());
+ break;
+ case SkPath::kMove_Verb:
+ if (verbSeen) {
+ return false;
+ }
+ break;
+ case SkPath::kClose_Verb:
+ case SkPath::kDone_Verb:
+ closeSeen = true;
+ break;
+ }
+ verbSeen = true;
+ }
+
+ this->finishPathPolygon();
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+class SkSpotShadowTessellator : public SkBaseShadowTessellator {
+public:
+ SkSpotShadowTessellator(const SkPath& path, const SkMatrix& ctm,
+ const SkPoint3& zPlaneParams, const SkPoint3& lightPos,
+ SkScalar lightRadius, bool transparent, bool directional);
+
+private:
+ bool computeClipAndPathPolygons(const SkPath& path, const SkMatrix& ctm,
+ const SkMatrix& shadowTransform);
+ void addToClip(const SkVector& nextPoint);
+
+ using INHERITED = SkBaseShadowTessellator;
+};
+
+SkSpotShadowTessellator::SkSpotShadowTessellator(const SkPath& path, const SkMatrix& ctm,
+ const SkPoint3& zPlaneParams,
+ const SkPoint3& lightPos, SkScalar lightRadius,
+ bool transparent, bool directional)
+ : INHERITED(zPlaneParams, path.getBounds(), transparent) {
+
+ // Compute the blur radius, scale and translation for the spot shadow.
+ SkMatrix shadowTransform;
+ SkScalar outset;
+ if (!SkDrawShadowMetrics::GetSpotShadowTransform(lightPos, lightRadius, ctm, zPlaneParams,
+ path.getBounds(), directional,
+ &shadowTransform, &outset)) {
+ return;
+ }
+ SkScalar inset = outset;
+
+ // compute rough clip bounds for umbra, plus offset polygon, plus centroid
+ if (!this->computeClipAndPathPolygons(path, ctm, shadowTransform)) {
+ return;
+ }
+ if (fClipPolygon.size() < 3 || fPathPolygon.size() < 3 || !SkScalarIsFinite(fArea)) {
+ fSucceeded = true; // We don't want to try to blur these cases, so we will
+ // return an empty SkVertices instead.
+ return;
+ }
+
+ // TODO: calculate these reserves better
+ // Penumbra ring: 3*numPts
+ // Umbra ring: numPts
+ // Inner ring: numPts
+ fPositions.reserve(5 * path.countPoints());
+ fColors.reserve(5 * path.countPoints());
+ // Penumbra ring: 12*numPts
+ // Umbra ring: 3*numPts
+ fIndices.reserve(15 * path.countPoints());
+
+ if (fIsConvex) {
+ fSucceeded = this->computeConvexShadow(inset, outset, true);
+ } else {
+ fSucceeded = this->computeConcaveShadow(inset, outset);
+ }
+
+ if (!fSucceeded) {
+ return;
+ }
+
+ fSucceeded = true;
+}
+
+bool SkSpotShadowTessellator::computeClipAndPathPolygons(const SkPath& path, const SkMatrix& ctm,
+ const SkMatrix& shadowTransform) {
+
+ fPathPolygon.reserve(path.countPoints());
+ fClipPolygon.reserve(path.countPoints());
+
+ // Walk around the path and compute clip polygon and path polygon.
+ // Will also accumulate sum of areas for centroid.
+ // For Bezier curves, we compute additional interior points on curve.
+ SkPath::Iter iter(path, true);
+ SkPoint pts[4];
+ SkPoint clipPts[4];
+ SkPath::Verb verb;
+
+ // coefficients to compute cubic Bezier at t = 5/16
+ static constexpr SkScalar kA = 0.32495117187f;
+ static constexpr SkScalar kB = 0.44311523437f;
+ static constexpr SkScalar kC = 0.20141601562f;
+ static constexpr SkScalar kD = 0.03051757812f;
+
+ SkPoint curvePoint;
+ SkScalar w;
+ bool closeSeen = false;
+ bool verbSeen = false;
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+ if (closeSeen) {
+ return false;
+ }
+ switch (verb) {
+ case SkPath::kLine_Verb:
+ ctm.mapPoints(clipPts, &pts[1], 1);
+ this->addToClip(clipPts[0]);
+ this->handleLine(shadowTransform, &pts[1]);
+ break;
+ case SkPath::kQuad_Verb:
+ ctm.mapPoints(clipPts, pts, 3);
+ // point at t = 1/2
+ curvePoint.fX = 0.25f*clipPts[0].fX + 0.5f*clipPts[1].fX + 0.25f*clipPts[2].fX;
+ curvePoint.fY = 0.25f*clipPts[0].fY + 0.5f*clipPts[1].fY + 0.25f*clipPts[2].fY;
+ this->addToClip(curvePoint);
+ this->addToClip(clipPts[2]);
+ this->handleQuad(shadowTransform, pts);
+ break;
+ case SkPath::kConic_Verb:
+ ctm.mapPoints(clipPts, pts, 3);
+ w = iter.conicWeight();
+ // point at t = 1/2
+ curvePoint.fX = 0.25f*clipPts[0].fX + w*0.5f*clipPts[1].fX + 0.25f*clipPts[2].fX;
+ curvePoint.fY = 0.25f*clipPts[0].fY + w*0.5f*clipPts[1].fY + 0.25f*clipPts[2].fY;
+ curvePoint *= SkScalarInvert(0.5f + 0.5f*w);
+ this->addToClip(curvePoint);
+ this->addToClip(clipPts[2]);
+ this->handleConic(shadowTransform, pts, w);
+ break;
+ case SkPath::kCubic_Verb:
+ ctm.mapPoints(clipPts, pts, 4);
+ // point at t = 5/16
+ curvePoint.fX = kA*clipPts[0].fX + kB*clipPts[1].fX
+ + kC*clipPts[2].fX + kD*clipPts[3].fX;
+ curvePoint.fY = kA*clipPts[0].fY + kB*clipPts[1].fY
+ + kC*clipPts[2].fY + kD*clipPts[3].fY;
+ this->addToClip(curvePoint);
+ // point at t = 11/16
+ curvePoint.fX = kD*clipPts[0].fX + kC*clipPts[1].fX
+ + kB*clipPts[2].fX + kA*clipPts[3].fX;
+ curvePoint.fY = kD*clipPts[0].fY + kC*clipPts[1].fY
+ + kB*clipPts[2].fY + kA*clipPts[3].fY;
+ this->addToClip(curvePoint);
+ this->addToClip(clipPts[3]);
+ this->handleCubic(shadowTransform, pts);
+ break;
+ case SkPath::kMove_Verb:
+ if (verbSeen) {
+ return false;
+ }
+ break;
+ case SkPath::kClose_Verb:
+ case SkPath::kDone_Verb:
+ closeSeen = true;
+ break;
+ default:
+ SkDEBUGFAIL("unknown verb");
+ }
+ verbSeen = true;
+ }
+
+ this->finishPathPolygon();
+ return true;
+}
+
+void SkSpotShadowTessellator::addToClip(const SkPoint& point) {
+ if (fClipPolygon.empty() || !duplicate_pt(point, fClipPolygon[fClipPolygon.size() - 1])) {
+ fClipPolygon.push_back(point);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+sk_sp<SkVertices> SkShadowTessellator::MakeAmbient(const SkPath& path, const SkMatrix& ctm,
+ const SkPoint3& zPlane, bool transparent) {
+ if (!ctm.mapRect(path.getBounds()).isFinite() || !zPlane.isFinite()) {
+ return nullptr;
+ }
+ SkAmbientShadowTessellator ambientTess(path, ctm, zPlane, transparent);
+ return ambientTess.releaseVertices();
+}
+
+sk_sp<SkVertices> SkShadowTessellator::MakeSpot(const SkPath& path, const SkMatrix& ctm,
+ const SkPoint3& zPlane, const SkPoint3& lightPos,
+ SkScalar lightRadius, bool transparent,
+ bool directional) {
+ if (!ctm.mapRect(path.getBounds()).isFinite() || !zPlane.isFinite() ||
+ !lightPos.isFinite() || !(lightPos.fZ >= SK_ScalarNearlyZero) ||
+ !SkScalarIsFinite(lightRadius) || !(lightRadius >= SK_ScalarNearlyZero)) {
+ return nullptr;
+ }
+ SkSpotShadowTessellator spotTess(path, ctm, zPlane, lightPos, lightRadius, transparent,
+ directional);
+ return spotTess.releaseVertices();
+}
+
+#endif // !defined(SK_ENABLE_OPTIMIZE_SIZE)
+
diff --git a/gfx/skia/skia/src/utils/SkShadowTessellator.h b/gfx/skia/skia/src/utils/SkShadowTessellator.h
new file mode 100644
index 0000000000..607386db51
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkShadowTessellator.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkShadowTessellator_DEFINED
+#define SkShadowTessellator_DEFINED
+
+#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
+
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkScalar.h"
+
+#include <functional>
+
+class SkMatrix;
+class SkPath;
+class SkVertices;
+struct SkPoint3;
+
+namespace SkShadowTessellator {
+
+typedef std::function<SkScalar(SkScalar, SkScalar)> HeightFunc;
+
+/**
+ * This function generates an ambient shadow mesh for a path by walking the path, outsetting by
+ * the radius, and setting inner and outer colors to umbraColor and penumbraColor, respectively.
+ * If transparent is true, then the center of the ambient shadow will be filled in.
+ */
+sk_sp<SkVertices> MakeAmbient(const SkPath& path, const SkMatrix& ctm,
+ const SkPoint3& zPlane, bool transparent);
+
+/**
+ * This function generates a spot shadow mesh for a path by walking the transformed path,
+ * further transforming by the scale and translation, and outsetting and insetting by a radius.
+ * The center will be clipped against the original path unless transparent is true.
+ */
+sk_sp<SkVertices> MakeSpot(const SkPath& path, const SkMatrix& ctm, const SkPoint3& zPlane,
+ const SkPoint3& lightPos, SkScalar lightRadius, bool transparent,
+ bool directional);
+
+
+} // namespace SkShadowTessellator
+
+#endif // SK_ENABLE_OPTIMIZE_SIZE
+
+#endif
diff --git a/gfx/skia/skia/src/utils/SkShadowUtils.cpp b/gfx/skia/skia/src/utils/SkShadowUtils.cpp
new file mode 100644
index 0000000000..ddd287ad0c
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkShadowUtils.cpp
@@ -0,0 +1,844 @@
+/*
+* Copyright 2017 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "include/utils/SkShadowUtils.h"
+
+#include "include/core/SkBlendMode.h"
+#include "include/core/SkBlender.h"
+#include "include/core/SkBlurTypes.h"
+#include "include/core/SkCanvas.h"
+#include "include/core/SkColorFilter.h"
+#include "include/core/SkMaskFilter.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPaint.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkPoint3.h"
+#include "include/core/SkRect.h"
+#include "include/core/SkRefCnt.h"
+#include "include/core/SkVertices.h"
+#include "include/private/SkIDChangeListener.h"
+#include "include/private/base/SkTPin.h"
+#include "include/private/base/SkTemplates.h"
+#include "include/private/base/SkTo.h"
+#include "src/base/SkRandom.h"
+#include "src/core/SkBlurMask.h"
+#include "src/core/SkColorFilterPriv.h"
+#include "src/core/SkDevice.h"
+#include "src/core/SkDrawShadowInfo.h"
+#include "src/core/SkPathPriv.h"
+#include "src/core/SkResourceCache.h"
+#include "src/core/SkVerticesPriv.h"
+
+#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
+#include "src/utils/SkShadowTessellator.h"
+#endif
+
+#if defined(SK_GANESH)
+#include "src/gpu/ganesh/GrStyle.h"
+#include "src/gpu/ganesh/geometry/GrStyledShape.h"
+#endif
+
+#include <algorithm>
+#include <cstring>
+#include <functional>
+#include <memory>
+#include <new>
+#include <utility>
+
+using namespace skia_private;
+
+class SkRRect;
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
+namespace {
+
+uint64_t resource_cache_shared_id() {
+ return 0x2020776f64616873llu; // 'shadow '
+}
+
+/** Factory for an ambient shadow mesh with particular shadow properties. */
+struct AmbientVerticesFactory {
+ SkScalar fOccluderHeight = SK_ScalarNaN; // NaN so that isCompatible will fail until init'ed.
+ bool fTransparent;
+ SkVector fOffset;
+
+ bool isCompatible(const AmbientVerticesFactory& that, SkVector* translate) const {
+ if (fOccluderHeight != that.fOccluderHeight || fTransparent != that.fTransparent) {
+ return false;
+ }
+ *translate = that.fOffset;
+ return true;
+ }
+
+ sk_sp<SkVertices> makeVertices(const SkPath& path, const SkMatrix& ctm,
+ SkVector* translate) const {
+ SkPoint3 zParams = SkPoint3::Make(0, 0, fOccluderHeight);
+ // pick a canonical place to generate shadow
+ SkMatrix noTrans(ctm);
+ if (!ctm.hasPerspective()) {
+ noTrans[SkMatrix::kMTransX] = 0;
+ noTrans[SkMatrix::kMTransY] = 0;
+ }
+ *translate = fOffset;
+ return SkShadowTessellator::MakeAmbient(path, noTrans, zParams, fTransparent);
+ }
+};
+
+/** Factory for an spot shadow mesh with particular shadow properties. */
+struct SpotVerticesFactory {
+ enum class OccluderType {
+ // The umbra cannot be dropped out because either the occluder is not opaque,
+ // or the center of the umbra is visible. Uses point light.
+ kPointTransparent,
+ // The umbra can be dropped where it is occluded. Uses point light.
+ kPointOpaquePartialUmbra,
+ // It is known that the entire umbra is occluded. Uses point light.
+ kPointOpaqueNoUmbra,
+ // Uses directional light.
+ kDirectional,
+ // The umbra can't be dropped out. Uses directional light.
+ kDirectionalTransparent,
+ };
+
+ SkVector fOffset;
+ SkPoint fLocalCenter;
+ SkScalar fOccluderHeight = SK_ScalarNaN; // NaN so that isCompatible will fail until init'ed.
+ SkPoint3 fDevLightPos;
+ SkScalar fLightRadius;
+ OccluderType fOccluderType;
+
+ bool isCompatible(const SpotVerticesFactory& that, SkVector* translate) const {
+ if (fOccluderHeight != that.fOccluderHeight || fDevLightPos.fZ != that.fDevLightPos.fZ ||
+ fLightRadius != that.fLightRadius || fOccluderType != that.fOccluderType) {
+ return false;
+ }
+ switch (fOccluderType) {
+ case OccluderType::kPointTransparent:
+ case OccluderType::kPointOpaqueNoUmbra:
+ // 'this' and 'that' will either both have no umbra removed or both have all the
+ // umbra removed.
+ *translate = that.fOffset;
+ return true;
+ case OccluderType::kPointOpaquePartialUmbra:
+ // In this case we partially remove the umbra differently for 'this' and 'that'
+ // if the offsets don't match.
+ if (fOffset == that.fOffset) {
+ translate->set(0, 0);
+ return true;
+ }
+ return false;
+ case OccluderType::kDirectional:
+ case OccluderType::kDirectionalTransparent:
+ *translate = that.fOffset - fOffset;
+ return true;
+ }
+ SK_ABORT("Uninitialized occluder type?");
+ }
+
+ sk_sp<SkVertices> makeVertices(const SkPath& path, const SkMatrix& ctm,
+ SkVector* translate) const {
+ bool transparent = fOccluderType == OccluderType::kPointTransparent ||
+ fOccluderType == OccluderType::kDirectionalTransparent;
+ bool directional = fOccluderType == OccluderType::kDirectional ||
+ fOccluderType == OccluderType::kDirectionalTransparent;
+ SkPoint3 zParams = SkPoint3::Make(0, 0, fOccluderHeight);
+ if (directional) {
+ translate->set(0, 0);
+ return SkShadowTessellator::MakeSpot(path, ctm, zParams, fDevLightPos, fLightRadius,
+ transparent, true);
+ } else if (ctm.hasPerspective() || OccluderType::kPointOpaquePartialUmbra == fOccluderType) {
+ translate->set(0, 0);
+ return SkShadowTessellator::MakeSpot(path, ctm, zParams, fDevLightPos, fLightRadius,
+ transparent, false);
+ } else {
+ // pick a canonical place to generate shadow, with light centered over path
+ SkMatrix noTrans(ctm);
+ noTrans[SkMatrix::kMTransX] = 0;
+ noTrans[SkMatrix::kMTransY] = 0;
+ SkPoint devCenter(fLocalCenter);
+ noTrans.mapPoints(&devCenter, 1);
+ SkPoint3 centerLightPos = SkPoint3::Make(devCenter.fX, devCenter.fY, fDevLightPos.fZ);
+ *translate = fOffset;
+ return SkShadowTessellator::MakeSpot(path, noTrans, zParams,
+ centerLightPos, fLightRadius, transparent, false);
+ }
+ }
+};
+
+/**
+ * This manages a set of tessellations for a given shape in the cache. Because SkResourceCache
+ * records are immutable this is not itself a Rec. When we need to update it we return this on
+ * the FindVisitor and let the cache destroy the Rec. We'll update the tessellations and then add
+ * a new Rec with an adjusted size for any deletions/additions.
+ */
+class CachedTessellations : public SkRefCnt {
+public:
+ size_t size() const { return fAmbientSet.size() + fSpotSet.size(); }
+
+ sk_sp<SkVertices> find(const AmbientVerticesFactory& ambient, const SkMatrix& matrix,
+ SkVector* translate) const {
+ return fAmbientSet.find(ambient, matrix, translate);
+ }
+
+ sk_sp<SkVertices> add(const SkPath& devPath, const AmbientVerticesFactory& ambient,
+ const SkMatrix& matrix, SkVector* translate) {
+ return fAmbientSet.add(devPath, ambient, matrix, translate);
+ }
+
+ sk_sp<SkVertices> find(const SpotVerticesFactory& spot, const SkMatrix& matrix,
+ SkVector* translate) const {
+ return fSpotSet.find(spot, matrix, translate);
+ }
+
+ sk_sp<SkVertices> add(const SkPath& devPath, const SpotVerticesFactory& spot,
+ const SkMatrix& matrix, SkVector* translate) {
+ return fSpotSet.add(devPath, spot, matrix, translate);
+ }
+
+private:
+ template <typename FACTORY, int MAX_ENTRIES>
+ class Set {
+ public:
+ size_t size() const { return fSize; }
+
+ sk_sp<SkVertices> find(const FACTORY& factory, const SkMatrix& matrix,
+ SkVector* translate) const {
+ for (int i = 0; i < MAX_ENTRIES; ++i) {
+ if (fEntries[i].fFactory.isCompatible(factory, translate)) {
+ const SkMatrix& m = fEntries[i].fMatrix;
+ if (matrix.hasPerspective() || m.hasPerspective()) {
+ if (matrix != fEntries[i].fMatrix) {
+ continue;
+ }
+ } else if (matrix.getScaleX() != m.getScaleX() ||
+ matrix.getSkewX() != m.getSkewX() ||
+ matrix.getScaleY() != m.getScaleY() ||
+ matrix.getSkewY() != m.getSkewY()) {
+ continue;
+ }
+ return fEntries[i].fVertices;
+ }
+ }
+ return nullptr;
+ }
+
+ sk_sp<SkVertices> add(const SkPath& path, const FACTORY& factory, const SkMatrix& matrix,
+ SkVector* translate) {
+ sk_sp<SkVertices> vertices = factory.makeVertices(path, matrix, translate);
+ if (!vertices) {
+ return nullptr;
+ }
+ int i;
+ if (fCount < MAX_ENTRIES) {
+ i = fCount++;
+ } else {
+ i = fRandom.nextULessThan(MAX_ENTRIES);
+ fSize -= fEntries[i].fVertices->approximateSize();
+ }
+ fEntries[i].fFactory = factory;
+ fEntries[i].fVertices = vertices;
+ fEntries[i].fMatrix = matrix;
+ fSize += vertices->approximateSize();
+ return vertices;
+ }
+
+ private:
+ struct Entry {
+ FACTORY fFactory;
+ sk_sp<SkVertices> fVertices;
+ SkMatrix fMatrix;
+ };
+ Entry fEntries[MAX_ENTRIES];
+ int fCount = 0;
+ size_t fSize = 0;
+ SkRandom fRandom;
+ };
+
+ Set<AmbientVerticesFactory, 4> fAmbientSet;
+ Set<SpotVerticesFactory, 4> fSpotSet;
+};
+
+/**
+ * A record of shadow vertices stored in SkResourceCache of CachedTessellations for a particular
+ * path. The key represents the path's geometry and not any shadow params.
+ */
+class CachedTessellationsRec : public SkResourceCache::Rec {
+public:
+ CachedTessellationsRec(const SkResourceCache::Key& key,
+ sk_sp<CachedTessellations> tessellations)
+ : fTessellations(std::move(tessellations)) {
+ fKey.reset(new uint8_t[key.size()]);
+ memcpy(fKey.get(), &key, key.size());
+ }
+
+ const Key& getKey() const override {
+ return *reinterpret_cast<SkResourceCache::Key*>(fKey.get());
+ }
+
+ size_t bytesUsed() const override { return fTessellations->size(); }
+
+ const char* getCategory() const override { return "tessellated shadow masks"; }
+
+ sk_sp<CachedTessellations> refTessellations() const { return fTessellations; }
+
+ template <typename FACTORY>
+ sk_sp<SkVertices> find(const FACTORY& factory, const SkMatrix& matrix,
+ SkVector* translate) const {
+ return fTessellations->find(factory, matrix, translate);
+ }
+
+private:
+ std::unique_ptr<uint8_t[]> fKey;
+ sk_sp<CachedTessellations> fTessellations;
+};
+
+/**
+ * Used by FindVisitor to determine whether a cache entry can be reused and if so returns the
+ * vertices and a translation vector. If the CachedTessellations does not contain a suitable
+ * mesh then we inform SkResourceCache to destroy the Rec and we return the CachedTessellations
+ * to the caller. The caller will update it and reinsert it back into the cache.
+ */
+template <typename FACTORY>
+struct FindContext {
+ FindContext(const SkMatrix* viewMatrix, const FACTORY* factory)
+ : fViewMatrix(viewMatrix), fFactory(factory) {}
+ const SkMatrix* const fViewMatrix;
+ // If this is valid after Find is called then we found the vertices and they should be drawn
+ // with fTranslate applied.
+ sk_sp<SkVertices> fVertices;
+ SkVector fTranslate = {0, 0};
+
+ // If this is valid after Find then the caller should add the vertices to the tessellation set
+ // and create a new CachedTessellationsRec and insert it into SkResourceCache.
+ sk_sp<CachedTessellations> fTessellationsOnFailure;
+
+ const FACTORY* fFactory;
+};
+
+/**
+ * Function called by SkResourceCache when a matching cache key is found. The FACTORY and matrix of
+ * the FindContext are used to determine if the vertices are reusable. If so the vertices and
+ * necessary translation vector are set on the FindContext.
+ */
+template <typename FACTORY>
+bool FindVisitor(const SkResourceCache::Rec& baseRec, void* ctx) {
+ FindContext<FACTORY>* findContext = (FindContext<FACTORY>*)ctx;
+ const CachedTessellationsRec& rec = static_cast<const CachedTessellationsRec&>(baseRec);
+ findContext->fVertices =
+ rec.find(*findContext->fFactory, *findContext->fViewMatrix, &findContext->fTranslate);
+ if (findContext->fVertices) {
+ return true;
+ }
+ // We ref the tessellations and let the cache destroy the Rec. Once the tessellations have been
+ // manipulated we will add a new Rec.
+ findContext->fTessellationsOnFailure = rec.refTessellations();
+ return false;
+}
+
+class ShadowedPath {
+public:
+ ShadowedPath(const SkPath* path, const SkMatrix* viewMatrix)
+ : fPath(path)
+ , fViewMatrix(viewMatrix)
+#if defined(SK_GANESH)
+ , fShapeForKey(*path, GrStyle::SimpleFill())
+#endif
+ {}
+
+ const SkPath& path() const { return *fPath; }
+ const SkMatrix& viewMatrix() const { return *fViewMatrix; }
+#if defined(SK_GANESH)
+ /** Negative means the vertices should not be cached for this path. */
+ int keyBytes() const { return fShapeForKey.unstyledKeySize() * sizeof(uint32_t); }
+ void writeKey(void* key) const {
+ fShapeForKey.writeUnstyledKey(reinterpret_cast<uint32_t*>(key));
+ }
+ bool isRRect(SkRRect* rrect) { return fShapeForKey.asRRect(rrect, nullptr, nullptr, nullptr); }
+#else
+ int keyBytes() const { return -1; }
+ void writeKey(void* key) const { SK_ABORT("Should never be called"); }
+ bool isRRect(SkRRect* rrect) { return false; }
+#endif
+
+private:
+ const SkPath* fPath;
+ const SkMatrix* fViewMatrix;
+#if defined(SK_GANESH)
+ GrStyledShape fShapeForKey;
+#endif
+};
+
+// This creates a domain of keys in SkResourceCache used by this file.
+static void* kNamespace;
+
+// When the SkPathRef genID changes, invalidate a corresponding GrResource described by key.
+class ShadowInvalidator : public SkIDChangeListener {
+public:
+ ShadowInvalidator(const SkResourceCache::Key& key) {
+ fKey.reset(new uint8_t[key.size()]);
+ memcpy(fKey.get(), &key, key.size());
+ }
+
+private:
+ const SkResourceCache::Key& getKey() const {
+ return *reinterpret_cast<SkResourceCache::Key*>(fKey.get());
+ }
+
+ // always purge
+ static bool FindVisitor(const SkResourceCache::Rec&, void*) {
+ return false;
+ }
+
+ void changed() override {
+ SkResourceCache::Find(this->getKey(), ShadowInvalidator::FindVisitor, nullptr);
+ }
+
+ std::unique_ptr<uint8_t[]> fKey;
+};
+
+/**
+ * Draws a shadow to 'canvas'. The vertices used to draw the shadow are created by 'factory' unless
+ * they are first found in SkResourceCache.
+ */
+template <typename FACTORY>
+bool draw_shadow(const FACTORY& factory,
+ std::function<void(const SkVertices*, SkBlendMode, const SkPaint&,
+ SkScalar tx, SkScalar ty, bool)> drawProc, ShadowedPath& path, SkColor color) {
+ FindContext<FACTORY> context(&path.viewMatrix(), &factory);
+
+ SkResourceCache::Key* key = nullptr;
+ AutoSTArray<32 * 4, uint8_t> keyStorage;
+ int keyDataBytes = path.keyBytes();
+ if (keyDataBytes >= 0) {
+ keyStorage.reset(keyDataBytes + sizeof(SkResourceCache::Key));
+ key = new (keyStorage.begin()) SkResourceCache::Key();
+ path.writeKey((uint32_t*)(keyStorage.begin() + sizeof(*key)));
+ key->init(&kNamespace, resource_cache_shared_id(), keyDataBytes);
+ SkResourceCache::Find(*key, FindVisitor<FACTORY>, &context);
+ }
+
+ sk_sp<SkVertices> vertices;
+ bool foundInCache = SkToBool(context.fVertices);
+ if (foundInCache) {
+ vertices = std::move(context.fVertices);
+ } else {
+ // TODO: handle transforming the path as part of the tessellator
+ if (key) {
+ // Update or initialize a tessellation set and add it to the cache.
+ sk_sp<CachedTessellations> tessellations;
+ if (context.fTessellationsOnFailure) {
+ tessellations = std::move(context.fTessellationsOnFailure);
+ } else {
+ tessellations.reset(new CachedTessellations());
+ }
+ vertices = tessellations->add(path.path(), factory, path.viewMatrix(),
+ &context.fTranslate);
+ if (!vertices) {
+ return false;
+ }
+ auto rec = new CachedTessellationsRec(*key, std::move(tessellations));
+ SkPathPriv::AddGenIDChangeListener(path.path(), sk_make_sp<ShadowInvalidator>(*key));
+ SkResourceCache::Add(rec);
+ } else {
+ vertices = factory.makeVertices(path.path(), path.viewMatrix(),
+ &context.fTranslate);
+ if (!vertices) {
+ return false;
+ }
+ }
+ }
+
+ SkPaint paint;
+ // Run the vertex color through a GaussianColorFilter and then modulate the grayscale result of
+ // that against our 'color' param.
+ paint.setColorFilter(
+ SkColorFilters::Blend(color, SkBlendMode::kModulate)->makeComposed(
+ SkColorFilterPriv::MakeGaussian()));
+
+ drawProc(vertices.get(), SkBlendMode::kModulate, paint,
+ context.fTranslate.fX, context.fTranslate.fY, path.viewMatrix().hasPerspective());
+
+ return true;
+}
+} // namespace
+
+static bool tilted(const SkPoint3& zPlaneParams) {
+ return !SkScalarNearlyZero(zPlaneParams.fX) || !SkScalarNearlyZero(zPlaneParams.fY);
+}
+#endif // SK_ENABLE_OPTIMIZE_SIZE
+
+void SkShadowUtils::ComputeTonalColors(SkColor inAmbientColor, SkColor inSpotColor,
+ SkColor* outAmbientColor, SkColor* outSpotColor) {
+ // For tonal color we only compute color values for the spot shadow.
+ // The ambient shadow is greyscale only.
+
+ // Ambient
+ *outAmbientColor = SkColorSetARGB(SkColorGetA(inAmbientColor), 0, 0, 0);
+
+ // Spot
+ int spotR = SkColorGetR(inSpotColor);
+ int spotG = SkColorGetG(inSpotColor);
+ int spotB = SkColorGetB(inSpotColor);
+ int max = std::max(std::max(spotR, spotG), spotB);
+ int min = std::min(std::min(spotR, spotG), spotB);
+ SkScalar luminance = 0.5f*(max + min)/255.f;
+ SkScalar origA = SkColorGetA(inSpotColor)/255.f;
+
+ // We compute a color alpha value based on the luminance of the color, scaled by an
+ // adjusted alpha value. We want the following properties to match the UX examples
+ // (assuming a = 0.25) and to ensure that we have reasonable results when the color
+ // is black and/or the alpha is 0:
+ // f(0, a) = 0
+ // f(luminance, 0) = 0
+ // f(1, 0.25) = .5
+ // f(0.5, 0.25) = .4
+ // f(1, 1) = 1
+ // The following functions match this as closely as possible.
+ SkScalar alphaAdjust = (2.6f + (-2.66667f + 1.06667f*origA)*origA)*origA;
+ SkScalar colorAlpha = (3.544762f + (-4.891428f + 2.3466f*luminance)*luminance)*luminance;
+ colorAlpha = SkTPin(alphaAdjust*colorAlpha, 0.0f, 1.0f);
+
+ // Similarly, we set the greyscale alpha based on luminance and alpha so that
+ // f(0, a) = a
+ // f(luminance, 0) = 0
+ // f(1, 0.25) = 0.15
+ SkScalar greyscaleAlpha = SkTPin(origA*(1 - 0.4f*luminance), 0.0f, 1.0f);
+
+ // The final color we want to emulate is generated by rendering a color shadow (C_rgb) using an
+ // alpha computed from the color's luminance (C_a), and then a black shadow with alpha (S_a)
+ // which is an adjusted value of 'a'. Assuming SrcOver, a background color of B_rgb, and
+ // ignoring edge falloff, this becomes
+ //
+ // (C_a - S_a*C_a)*C_rgb + (1 - (S_a + C_a - S_a*C_a))*B_rgb
+ //
+ // Assuming premultiplied alpha, this means we scale the color by (C_a - S_a*C_a) and
+ // set the alpha to (S_a + C_a - S_a*C_a).
+ SkScalar colorScale = colorAlpha*(SK_Scalar1 - greyscaleAlpha);
+ SkScalar tonalAlpha = colorScale + greyscaleAlpha;
+ SkScalar unPremulScale = colorScale / tonalAlpha;
+ *outSpotColor = SkColorSetARGB(tonalAlpha*255.999f,
+ unPremulScale*spotR,
+ unPremulScale*spotG,
+ unPremulScale*spotB);
+}
+
+static bool fill_shadow_rec(const SkPath& path, const SkPoint3& zPlaneParams,
+ const SkPoint3& lightPos, SkScalar lightRadius,
+ SkColor ambientColor, SkColor spotColor,
+ uint32_t flags, const SkMatrix& ctm, SkDrawShadowRec* rec) {
+ SkPoint pt = { lightPos.fX, lightPos.fY };
+ if (!SkToBool(flags & kDirectionalLight_ShadowFlag)) {
+ // If light position is in device space, need to transform to local space
+ // before applying to SkCanvas.
+ SkMatrix inverse;
+ if (!ctm.invert(&inverse)) {
+ return false;
+ }
+ inverse.mapPoints(&pt, 1);
+ }
+
+ rec->fZPlaneParams = zPlaneParams;
+ rec->fLightPos = { pt.fX, pt.fY, lightPos.fZ };
+ rec->fLightRadius = lightRadius;
+ rec->fAmbientColor = ambientColor;
+ rec->fSpotColor = spotColor;
+ rec->fFlags = flags;
+
+ return true;
+}
+
+// Draw an offset spot shadow and outlining ambient shadow for the given path.
+void SkShadowUtils::DrawShadow(SkCanvas* canvas, const SkPath& path, const SkPoint3& zPlaneParams,
+ const SkPoint3& lightPos, SkScalar lightRadius,
+ SkColor ambientColor, SkColor spotColor,
+ uint32_t flags) {
+ SkDrawShadowRec rec;
+ if (!fill_shadow_rec(path, zPlaneParams, lightPos, lightRadius, ambientColor, spotColor,
+ flags, canvas->getTotalMatrix(), &rec)) {
+ return;
+ }
+
+ canvas->private_draw_shadow_rec(path, rec);
+}
+
+bool SkShadowUtils::GetLocalBounds(const SkMatrix& ctm, const SkPath& path,
+ const SkPoint3& zPlaneParams, const SkPoint3& lightPos,
+ SkScalar lightRadius, uint32_t flags, SkRect* bounds) {
+ SkDrawShadowRec rec;
+ if (!fill_shadow_rec(path, zPlaneParams, lightPos, lightRadius, SK_ColorBLACK, SK_ColorBLACK,
+ flags, ctm, &rec)) {
+ return false;
+ }
+
+ SkDrawShadowMetrics::GetLocalBounds(path, rec, ctm, bounds);
+
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+
+static bool validate_rec(const SkDrawShadowRec& rec) {
+ return rec.fLightPos.isFinite() && rec.fZPlaneParams.isFinite() &&
+ SkScalarIsFinite(rec.fLightRadius);
+}
+
+void SkBaseDevice::drawShadow(const SkPath& path, const SkDrawShadowRec& rec) {
+ if (!validate_rec(rec)) {
+ return;
+ }
+
+ SkMatrix viewMatrix = this->localToDevice();
+ SkAutoDeviceTransformRestore adr(this, SkMatrix::I());
+
+#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
+ auto drawVertsProc = [this](const SkVertices* vertices, SkBlendMode mode, const SkPaint& paint,
+ SkScalar tx, SkScalar ty, bool hasPerspective) {
+ if (vertices->priv().vertexCount()) {
+ // For perspective shadows we've already computed the shadow in world space,
+ // and we can't translate it without changing it. Otherwise we concat the
+ // change in translation from the cached version.
+ SkAutoDeviceTransformRestore adr(
+ this,
+ hasPerspective ? SkMatrix::I()
+ : this->localToDevice() * SkMatrix::Translate(tx, ty));
+ // The vertex colors for a tesselated shadow polygon are always either opaque black
+ // or transparent and their real contribution to the final blended color is via
+ // their alpha. We can skip expensive per-vertex color conversion for this.
+ this->drawVertices(vertices, SkBlender::Mode(mode), paint, /*skipColorXform=*/true);
+ }
+ };
+
+ ShadowedPath shadowedPath(&path, &viewMatrix);
+
+ bool tiltZPlane = tilted(rec.fZPlaneParams);
+ bool transparent = SkToBool(rec.fFlags & SkShadowFlags::kTransparentOccluder_ShadowFlag);
+ bool useBlur = SkToBool(rec.fFlags & SkShadowFlags::kConcaveBlurOnly_ShadowFlag) &&
+ !path.isConvex();
+ bool uncached = tiltZPlane || path.isVolatile();
+#endif
+ bool directional = SkToBool(rec.fFlags & SkShadowFlags::kDirectionalLight_ShadowFlag);
+
+ SkPoint3 zPlaneParams = rec.fZPlaneParams;
+ SkPoint3 devLightPos = rec.fLightPos;
+ if (!directional) {
+ viewMatrix.mapPoints((SkPoint*)&devLightPos.fX, 1);
+ }
+ float lightRadius = rec.fLightRadius;
+
+ if (SkColorGetA(rec.fAmbientColor) > 0) {
+ bool success = false;
+#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
+ if (uncached && !useBlur) {
+ sk_sp<SkVertices> vertices = SkShadowTessellator::MakeAmbient(path, viewMatrix,
+ zPlaneParams,
+ transparent);
+ if (vertices) {
+ SkPaint paint;
+ // Run the vertex color through a GaussianColorFilter and then modulate the
+ // grayscale result of that against our 'color' param.
+ paint.setColorFilter(
+ SkColorFilters::Blend(rec.fAmbientColor,
+ SkBlendMode::kModulate)->makeComposed(
+ SkColorFilterPriv::MakeGaussian()));
+ // The vertex colors for a tesselated shadow polygon are always either opaque black
+ // or transparent and their real contribution to the final blended color is via
+ // their alpha. We can skip expensive per-vertex color conversion for this.
+ this->drawVertices(vertices.get(),
+ SkBlender::Mode(SkBlendMode::kModulate),
+ paint,
+ /*skipColorXform=*/true);
+ success = true;
+ }
+ }
+
+ if (!success && !useBlur) {
+ AmbientVerticesFactory factory;
+ factory.fOccluderHeight = zPlaneParams.fZ;
+ factory.fTransparent = transparent;
+ if (viewMatrix.hasPerspective()) {
+ factory.fOffset.set(0, 0);
+ } else {
+ factory.fOffset.fX = viewMatrix.getTranslateX();
+ factory.fOffset.fY = viewMatrix.getTranslateY();
+ }
+
+ success = draw_shadow(factory, drawVertsProc, shadowedPath, rec.fAmbientColor);
+ }
+#endif // !defined(SK_ENABLE_OPTIMIZE_SIZE)
+
+ // All else has failed, draw with blur
+ if (!success) {
+ // Pretransform the path to avoid transforming the stroke, below.
+ SkPath devSpacePath;
+ path.transform(viewMatrix, &devSpacePath);
+ devSpacePath.setIsVolatile(true);
+
+ // The tesselator outsets by AmbientBlurRadius (or 'r') to get the outer ring of
+ // the tesselation, and sets the alpha on the path to 1/AmbientRecipAlpha (or 'a').
+ //
+ // We want to emulate this with a blur. The full blur width (2*blurRadius or 'f')
+ // can be calculated by interpolating:
+ //
+ // original edge outer edge
+ // | |<---------- r ------>|
+ // |<------|--- f -------------->|
+ // | | |
+ // alpha = 1 alpha = a alpha = 0
+ //
+ // Taking ratios, f/1 = r/a, so f = r/a and blurRadius = f/2.
+ //
+ // We now need to outset the path to place the new edge in the center of the
+ // blur region:
+ //
+ // original new
+ // | |<------|--- r ------>|
+ // |<------|--- f -|------------>|
+ // | |<- o ->|<--- f/2 --->|
+ //
+ // r = o + f/2, so o = r - f/2
+ //
+ // We outset by using the stroker, so the strokeWidth is o/2.
+ //
+ SkScalar devSpaceOutset = SkDrawShadowMetrics::AmbientBlurRadius(zPlaneParams.fZ);
+ SkScalar oneOverA = SkDrawShadowMetrics::AmbientRecipAlpha(zPlaneParams.fZ);
+ SkScalar blurRadius = 0.5f*devSpaceOutset*oneOverA;
+ SkScalar strokeWidth = 0.5f*(devSpaceOutset - blurRadius);
+
+ // Now draw with blur
+ SkPaint paint;
+ paint.setColor(rec.fAmbientColor);
+ paint.setStrokeWidth(strokeWidth);
+ paint.setStyle(SkPaint::kStrokeAndFill_Style);
+ SkScalar sigma = SkBlurMask::ConvertRadiusToSigma(blurRadius);
+ bool respectCTM = false;
+ paint.setMaskFilter(SkMaskFilter::MakeBlur(kNormal_SkBlurStyle, sigma, respectCTM));
+ this->drawPath(devSpacePath, paint);
+ }
+ }
+
+ if (SkColorGetA(rec.fSpotColor) > 0) {
+ bool success = false;
+#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
+ if (uncached && !useBlur) {
+ sk_sp<SkVertices> vertices = SkShadowTessellator::MakeSpot(path, viewMatrix,
+ zPlaneParams,
+ devLightPos, lightRadius,
+ transparent,
+ directional);
+ if (vertices) {
+ SkPaint paint;
+ // Run the vertex color through a GaussianColorFilter and then modulate the
+ // grayscale result of that against our 'color' param.
+ paint.setColorFilter(
+ SkColorFilters::Blend(rec.fSpotColor,
+ SkBlendMode::kModulate)->makeComposed(
+ SkColorFilterPriv::MakeGaussian()));
+ // The vertex colors for a tesselated shadow polygon are always either opaque black
+ // or transparent and their real contribution to the final blended color is via
+ // their alpha. We can skip expensive per-vertex color conversion for this.
+ this->drawVertices(vertices.get(),
+ SkBlender::Mode(SkBlendMode::kModulate),
+ paint,
+ /*skipColorXform=*/true);
+ success = true;
+ }
+ }
+
+ if (!success && !useBlur) {
+ SpotVerticesFactory factory;
+ factory.fOccluderHeight = zPlaneParams.fZ;
+ factory.fDevLightPos = devLightPos;
+ factory.fLightRadius = lightRadius;
+
+ SkPoint center = SkPoint::Make(path.getBounds().centerX(), path.getBounds().centerY());
+ factory.fLocalCenter = center;
+ viewMatrix.mapPoints(&center, 1);
+ SkScalar radius, scale;
+ if (SkToBool(rec.fFlags & kDirectionalLight_ShadowFlag)) {
+ SkDrawShadowMetrics::GetDirectionalParams(zPlaneParams.fZ, devLightPos.fX,
+ devLightPos.fY, devLightPos.fZ,
+ lightRadius, &radius, &scale,
+ &factory.fOffset);
+ } else {
+ SkDrawShadowMetrics::GetSpotParams(zPlaneParams.fZ, devLightPos.fX - center.fX,
+ devLightPos.fY - center.fY, devLightPos.fZ,
+ lightRadius, &radius, &scale, &factory.fOffset);
+ }
+
+ SkRect devBounds;
+ viewMatrix.mapRect(&devBounds, path.getBounds());
+ if (transparent ||
+ SkTAbs(factory.fOffset.fX) > 0.5f*devBounds.width() ||
+ SkTAbs(factory.fOffset.fY) > 0.5f*devBounds.height()) {
+ // if the translation of the shadow is big enough we're going to end up
+ // filling the entire umbra, we can treat these as all the same
+ if (directional) {
+ factory.fOccluderType =
+ SpotVerticesFactory::OccluderType::kDirectionalTransparent;
+ } else {
+ factory.fOccluderType = SpotVerticesFactory::OccluderType::kPointTransparent;
+ }
+ } else if (directional) {
+ factory.fOccluderType = SpotVerticesFactory::OccluderType::kDirectional;
+ } else if (factory.fOffset.length()*scale + scale < radius) {
+ // if we don't translate more than the blur distance, can assume umbra is covered
+ factory.fOccluderType = SpotVerticesFactory::OccluderType::kPointOpaqueNoUmbra;
+ } else if (path.isConvex()) {
+ factory.fOccluderType = SpotVerticesFactory::OccluderType::kPointOpaquePartialUmbra;
+ } else {
+ factory.fOccluderType = SpotVerticesFactory::OccluderType::kPointTransparent;
+ }
+ // need to add this after we classify the shadow
+ factory.fOffset.fX += viewMatrix.getTranslateX();
+ factory.fOffset.fY += viewMatrix.getTranslateY();
+
+ SkColor color = rec.fSpotColor;
+#ifdef DEBUG_SHADOW_CHECKS
+ switch (factory.fOccluderType) {
+ case SpotVerticesFactory::OccluderType::kPointTransparent:
+ color = 0xFFD2B48C; // tan for transparent
+ break;
+ case SpotVerticesFactory::OccluderType::kPointOpaquePartialUmbra:
+ color = 0xFFFFA500; // orange for opaque
+ break;
+ case SpotVerticesFactory::OccluderType::kPointOpaqueNoUmbra:
+ color = 0xFFE5E500; // corn yellow for covered
+ break;
+ case SpotVerticesFactory::OccluderType::kDirectional:
+ case SpotVerticesFactory::OccluderType::kDirectionalTransparent:
+ color = 0xFF550000; // dark red for directional
+ break;
+ }
+#endif
+ success = draw_shadow(factory, drawVertsProc, shadowedPath, color);
+ }
+#endif // !defined(SK_ENABLE_OPTIMIZE_SIZE)
+
+ // All else has failed, draw with blur
+ if (!success) {
+ SkMatrix shadowMatrix;
+ SkScalar radius;
+ if (!SkDrawShadowMetrics::GetSpotShadowTransform(devLightPos, lightRadius,
+ viewMatrix, zPlaneParams,
+ path.getBounds(), directional,
+ &shadowMatrix, &radius)) {
+ return;
+ }
+ SkAutoDeviceTransformRestore adr2(this, shadowMatrix);
+
+ SkPaint paint;
+ paint.setColor(rec.fSpotColor);
+ SkScalar sigma = SkBlurMask::ConvertRadiusToSigma(radius);
+ bool respectCTM = false;
+ paint.setMaskFilter(SkMaskFilter::MakeBlur(kNormal_SkBlurStyle, sigma, respectCTM));
+ this->drawPath(path, paint);
+ }
+ }
+}
diff --git a/gfx/skia/skia/src/utils/SkTestCanvas.h b/gfx/skia/skia/src/utils/SkTestCanvas.h
new file mode 100644
index 0000000000..56d5c0cf5e
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkTestCanvas.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// SkTestCanvas is a simple way to make a testing canvas which is allowed to use private
+// facilities of SkCanvas without having to add a friend to SkCanvas.h.
+//
+// You create a Key (a simple empty struct) to make a template specialization class. You need to
+// make a key for each of the different Canvases you need. The implementations of the canvases
+// are in SkCanvas.cpp, which allows the use of helper classes.
+
+#ifndef SkTestCanvas_DEFINED
+#define SkTestCanvas_DEFINED
+
+#include "include/core/SkSize.h"
+#include "include/private/chromium/SkChromeRemoteGlyphCache.h"
+#include "include/utils/SkNWayCanvas.h"
+#include "src/core/SkDevice.h"
+#include "src/text/GlyphRun.h"
+
+// You can only make template specializations of SkTestCanvas.
+template <typename Key> class SkTestCanvas;
+
+// A test canvas to test using slug rendering instead of text blob rendering.
+struct SkSlugTestKey {};
+template <>
+class SkTestCanvas<SkSlugTestKey> : public SkCanvas {
+public:
+ SkTestCanvas(SkCanvas* canvas);
+ void onDrawGlyphRunList(
+ const sktext::GlyphRunList& glyphRunList, const SkPaint& paint) override;
+};
+
+struct SkSerializeSlugTestKey {};
+template <>
+class SkTestCanvas<SkSerializeSlugTestKey> : public SkCanvas {
+public:
+ SkTestCanvas(SkCanvas* canvas);
+ void onDrawGlyphRunList(
+ const sktext::GlyphRunList& glyphRunList, const SkPaint& paint) override;
+};
+
+struct SkRemoteSlugTestKey {};
+template <>
+class SkTestCanvas<SkRemoteSlugTestKey> : public SkCanvas {
+public:
+ SkTestCanvas(SkCanvas* canvas);
+ ~SkTestCanvas() override;
+ void onDrawGlyphRunList(
+ const sktext::GlyphRunList& glyphRunList, const SkPaint& paint) override;
+
+private:
+ std::unique_ptr<SkStrikeServer::DiscardableHandleManager> fServerHandleManager;
+ sk_sp<SkStrikeClient::DiscardableHandleManager> fClientHandleManager;
+ SkStrikeServer fStrikeServer;
+ SkStrikeClient fStrikeClient;
+};
+
+#endif // SkTestCanvas_DEFINED
diff --git a/gfx/skia/skia/src/utils/SkTextUtils.cpp b/gfx/skia/skia/src/utils/SkTextUtils.cpp
new file mode 100644
index 0000000000..f7639b8104
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkTextUtils.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/utils/SkTextUtils.h"
+
+#include "include/core/SkCanvas.h"
+#include "include/core/SkFont.h"
+#include "include/core/SkMatrix.h"
+#include "include/core/SkPath.h"
+#include "include/core/SkPoint.h"
+#include "include/core/SkScalar.h"
+#include "include/core/SkTextBlob.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/core/SkFontPriv.h"
+
+using namespace skia_private;
+
+class SkPaint;
+
+void SkTextUtils::Draw(SkCanvas* canvas, const void* text, size_t size, SkTextEncoding encoding,
+ SkScalar x, SkScalar y, const SkFont& font, const SkPaint& paint,
+ Align align) {
+ if (align != kLeft_Align) {
+ SkScalar width = font.measureText(text, size, encoding);
+ if (align == kCenter_Align) {
+ width *= 0.5f;
+ }
+ x -= width;
+ }
+
+ canvas->drawTextBlob(SkTextBlob::MakeFromText(text, size, font, encoding), x, y, paint);
+}
+
+void SkTextUtils::GetPath(const void* text, size_t length, SkTextEncoding encoding,
+ SkScalar x, SkScalar y, const SkFont& font, SkPath* path) {
+ SkAutoToGlyphs ag(font, text, length, encoding);
+ AutoTArray<SkPoint> pos(ag.count());
+ font.getPos(ag.glyphs(), ag.count(), pos.get(), {x, y});
+
+ struct Rec {
+ SkPath* fDst;
+ const SkPoint* fPos;
+ } rec = { path, pos.get() };
+
+ path->reset();
+ font.getPaths(ag.glyphs(), ag.count(), [](const SkPath* src, const SkMatrix& mx, void* ctx) {
+ Rec* rec = (Rec*)ctx;
+ if (src) {
+ SkMatrix m(mx);
+ m.postTranslate(rec->fPos->fX, rec->fPos->fY);
+ rec->fDst->addPath(*src, m);
+ }
+ rec->fPos += 1;
+ }, &rec);
+}
+
diff --git a/gfx/skia/skia/src/utils/SkVMVisualizer.cpp b/gfx/skia/skia/src/utils/SkVMVisualizer.cpp
new file mode 100644
index 0000000000..cee4735bfd
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkVMVisualizer.cpp
@@ -0,0 +1,407 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "src/utils/SkVMVisualizer.h"
+
+#include "include/core/SkStream.h"
+#include "include/private/SkOpts_spi.h"
+#include "src/core/SkStreamPriv.h"
+
+#if defined(SK_ENABLE_SKSL)
+#include "src/sksl/tracing/SkSLDebugInfo.h"
+#include "src/sksl/tracing/SkVMDebugTrace.h"
+#endif
+
+#include <cstdarg>
+#include <cstring>
+#include <string>
+#include <utility>
+
+namespace skvm::viz {
+
+#if defined(SK_ENABLE_SKSL)
+Visualizer::Visualizer(SkSL::SkVMDebugTrace* debugInfo) : fDebugInfo(debugInfo), fOutput(nullptr) {}
+#else
+Visualizer::Visualizer(SkSL::SkVMDebugTrace* debugInfo) : fOutput(nullptr) {}
+#endif
+
+bool Instruction::operator == (const Instruction& o) const {
+ return this->kind == o.kind &&
+ this->instructionIndex == o.instructionIndex &&
+ this->instruction == o.instruction &&
+ this->duplicates == o.duplicates;
+}
+
+SkString Instruction::classes() const {
+ SkString result((kind & InstructionFlags::kDead) ? "dead" : "normal");
+ if (duplicates > 0) result += " origin";
+ if (duplicates < 0) result += " deduped";
+ return result;
+}
+
+uint32_t InstructionHash::operator()(const Instruction& i) const {
+ uint32_t hash = 0;
+ hash = SkOpts::hash_fn(&i.kind, sizeof(i.kind), hash);
+ hash = SkOpts::hash_fn(&i.instructionIndex, sizeof(i.instructionIndex), hash);
+ hash = SkOpts::hash_fn(&i.instruction, sizeof(i.instruction), hash);
+ return hash;
+}
+
+void Visualizer::dump(SkWStream* output) {
+ SkDebugfStream stream;
+ fOutput = output ? output : &stream;
+ this->dumpHead();
+ for (int id = 0; id < fInstructions.size(); ++id) {
+ this->dumpInstruction(id);
+ }
+ this->dumpTail();
+}
+
+void Visualizer::markAsDeadCode(std::vector<bool>& live, const std::vector<int>& newIds) {
+ for (int id = 0; id < fInstructions.size(); ++id) {
+ Instruction& instruction = fInstructions[id];
+ if (instruction.instructionIndex < 0) {
+ // We skip commands that are duplicates of some other commands
+ // They either will be dead or alive together with the origin
+ continue;
+ }
+ SkASSERT(instruction.instructionIndex < (int)live.size());
+ if (live[instruction.instructionIndex]) {
+ instruction.instructionIndex = newIds[instruction.instructionIndex];
+ fToDisassembler[instruction.instructionIndex] = id;
+ } else {
+ instruction.kind
+ = static_cast<InstructionFlags>(instruction.kind | InstructionFlags::kDead);
+ fToDisassembler[instruction.instructionIndex] = -1;
+ // Anything negative meaning the command is duplicate/dead
+ instruction.instructionIndex = -2;
+ }
+ }
+}
+
+void Visualizer::addInstructions(std::vector<skvm::Instruction>& program) {
+ for (Val id = 0; id < (Val)program.size(); id++) {
+ skvm::Instruction& instr = program[id];
+ auto isDuplicate = instr.op == Op::duplicate;
+ if (isDuplicate) {
+ this->markAsDuplicate(instr.immA, id);
+ instr = program[instr.immA];
+ }
+ this->addInstruction({viz::InstructionFlags::kNormal, id, isDuplicate ? -1 : 0, instr});
+ }
+}
+
+void Visualizer::addInstruction(Instruction skvm) {
+ if (!touches_varying_memory(skvm.instruction.op)) {
+ if (auto found = fIndex.find(skvm)) {
+ auto& instruction = fInstructions[*found];
+ ++(instruction.duplicates);
+ return;
+ }
+ }
+ fIndex.set(skvm, fInstructions.size());
+ fToDisassembler.set(skvm.instructionIndex, fInstructions.size());
+ fInstructions.emplace_back(std::move(skvm));
+}
+
+void Visualizer::finalize(const std::vector<skvm::Instruction>& all,
+ const std::vector<skvm::OptimizedInstruction>& optimized) {
+ for (Val id = 0; id < (Val)all.size(); id++) {
+ if (optimized[id].can_hoist) {
+ size_t found = fToDisassembler[id];
+ Instruction& instruction = fInstructions[found];
+ instruction.kind =
+ static_cast<InstructionFlags>(instruction.kind | InstructionFlags::kHoisted);
+ }
+ }
+}
+
+SkString Visualizer::V(int reg) const {
+ if (reg == -1) {
+ return SkString("{optimized}");
+ } else if (reg == -2) {
+ return SkString("{dead code}");
+ } else {
+ return SkStringPrintf("v%d", reg);
+ }
+}
+
+void Visualizer::formatVV(const char* op, int v1, int v2) const {
+ this->writeText("%s %s, %s", op, V(v1).c_str(), V(v2).c_str());
+}
+void Visualizer::formatPV(const char* op, int imm, int v1) const {
+ this->writeText("%s Ptr%d, %s", op, imm, V(v1).c_str());
+}
+void Visualizer::formatPVV(const char* op, int imm, int v1, int v2) const {
+ this->writeText("%s Ptr%d, %s, %s", op, imm, V(v1).c_str(), V(v2).c_str());
+}
+void Visualizer::formatPVVVV(const char* op, int imm, int v1, int v2, int v3, int v4) const {
+ this->writeText("%s Ptr%d, %s, %s, %s, %s",
+ op, imm, V(v1).c_str(), V(v2).c_str(), V(v3).c_str(), V(v4).c_str());
+}
+void Visualizer::formatA_(int id, const char* op) const {
+ writeText("%s = %s", V(id).c_str(), op);
+}
+void Visualizer::formatA_P(int id, const char* op, int imm) const {
+ this->writeText("%s = %s Ptr%d", V(id).c_str(), op, imm);
+}
+void Visualizer::formatA_PH(int id, const char* op, int immA, int immB) const {
+ this->writeText("%s = %s Ptr%d, %x", V(id).c_str(), op, immA, immB);
+}
+void Visualizer::formatA_PHH(int id, const char* op, int immA, int immB, int immC) const {
+ this->writeText("%s = %s Ptr%d, %x, %x", V(id).c_str(), op, immA, immB, immC);
+}
+void Visualizer::formatA_PHV(int id, const char* op, int immA, int immB, int v) const {
+ this->writeText("%s = %s Ptr%d, %x, %s", V(id).c_str(), op, immA, immB, V(v).c_str());
+}
+void Visualizer::formatA_S(int id, const char* op, int imm) const {
+ float f;
+ memcpy(&f, &imm, 4);
+ char buffer[kSkStrAppendScalar_MaxSize];
+ char* stop = SkStrAppendScalar(buffer, f);
+ this->writeText("%s = %s %x (", V(id).c_str(), op, imm);
+ fOutput->write(buffer, stop - buffer);
+ this->writeText(")");
+}
+void Visualizer::formatA_V(int id, const char* op, int v) const {
+ this->writeText("%s = %s %s", V(id).c_str(), op, V(v).c_str());
+}
+void Visualizer::formatA_VV(int id, const char* op, int v1, int v2) const {
+ this->writeText("%s = %s %s, %s", V(id).c_str(), op, V(v1).c_str(), V(v2).c_str());
+}
+void Visualizer::formatA_VVV(int id, const char* op, int v1, int v2, int v3) const {
+ this->writeText(
+ "%s = %s %s, %s, %s", V(id).c_str(), op, V(v1).c_str(), V(v2).c_str(), V(v3).c_str());
+}
+void Visualizer::formatA_VC(int id, const char* op, int v, int imm) const {
+ this->writeText("%s = %s %s, %d", V(id).c_str(), op, V(v).c_str(), imm);
+}
+
+void Visualizer::writeText(const char* format, ...) const {
+ SkString message;
+ va_list argp;
+ va_start(argp, format);
+ message.appendVAList(format, argp);
+ va_end(argp);
+ fOutput->writeText(message.c_str());
+}
+
+void Visualizer::dumpInstruction(int id0) const {
+ const Instruction& instruction = fInstructions[id0];
+ const int id = instruction.instructionIndex;
+ const int x = instruction.instruction.x,
+ y = instruction.instruction.y,
+ z = instruction.instruction.z,
+ w = instruction.instruction.w;
+ const int immA = instruction.instruction.immA,
+ immB = instruction.instruction.immB,
+ immC = instruction.instruction.immC;
+#if defined(SK_ENABLE_SKSL)
+ if (instruction.instruction.op == skvm::Op::trace_line) {
+ SkASSERT(fDebugInfo != nullptr);
+ SkASSERT(immA >= 0 && immB <= (int)fDebugInfo->fSource.size());
+ this->writeText("<tr class='source'><td class='mask'></td><td colspan=2>// %s</td></tr>\n",
+ fDebugInfo->fSource[immB].c_str());
+ return;
+ } else if (instruction.instruction.op == skvm::Op::trace_var ||
+ instruction.instruction.op == skvm::Op::trace_scope) {
+ // TODO: We can add some visualization here
+ return;
+ } else if (instruction.instruction.op == skvm::Op::trace_enter) {
+ SkASSERT(fDebugInfo != nullptr);
+ SkASSERT(immA >= 0 && immA <= (int)fDebugInfo->fFuncInfo.size());
+ std::string& func = fDebugInfo->fFuncInfo[immA].name;
+ SkString mask;
+ mask.printf(immC == 1 ? "%s(-1)" : "%s", V(x).c_str());
+ this->writeText(
+ "<tr class='source'><td class='mask'>&#8618;%s</td><td colspan=2>%s</td></tr>\n",
+ mask.c_str(),
+ func.c_str());
+ return;
+ } else if (instruction.instruction.op == skvm::Op::trace_exit) {
+ SkASSERT(fDebugInfo != nullptr);
+ SkASSERT(immA >= 0 && immA <= (int)fDebugInfo->fFuncInfo.size());
+ std::string& func = fDebugInfo->fFuncInfo[immA].name;
+ SkString mask;
+ mask.printf(immC == 1 ? "%s(-1)" : "%s", V(x).c_str());
+ this->writeText(
+ "<tr class='source'><td class='mask'>&#8617;%s</td><td colspan=2>%s</td></tr>\n",
+ mask.c_str(),
+ func.c_str());
+ return;
+ }
+#endif // defined(SK_ENABLE_SKSL)
+ // No label, to the operation
+ SkString label;
+ if ((instruction.kind & InstructionFlags::kHoisted) != 0) {
+ label.set("&#8593;&#8593;&#8593; ");
+ }
+ if (instruction.duplicates > 0) {
+ label.appendf("*%d", instruction.duplicates + 1);
+ }
+ SkString classes = instruction.classes();
+ this->writeText("<tr class='%s'><td>%s</td><td>", classes.c_str(), label.c_str());
+ // Operation
+ switch (instruction.instruction.op) {
+ case skvm::Op::assert_true: formatVV("assert_true", x, y); break;
+ case skvm::Op::store8: formatPV("store8", immA, x); break;
+ case skvm::Op::store16: formatPV("store16", immA, x); break;
+ case skvm::Op::store32: formatPV("store32", immA, x); break;
+ case skvm::Op::store64: formatPVV("store64", immA, x, y); break;
+ case skvm::Op::store128: formatPVVVV("store128", immA, x, y, z, w); break;
+ case skvm::Op::index: formatA_(id, "index"); break;
+ case skvm::Op::load8: formatA_P(id, "load8", immA); break;
+ case skvm::Op::load16: formatA_P(id, "load16", immA); break;
+ case skvm::Op::load32: formatA_P(id, "load32", immA); break;
+ case skvm::Op::load64: formatA_PH(id, "load64", immA, immB); break;
+ case skvm::Op::load128: formatA_PH(id, "load128", immA, immB); break;
+ case skvm::Op::gather8: formatA_PHV(id, "gather8", immA, immB, x); break;
+ case skvm::Op::gather16: formatA_PHV(id, "gather16", immA, immB, x); break;
+ case skvm::Op::gather32: formatA_PHV(id, "gather32", immA, immB, x); break;
+ case skvm::Op::uniform32: formatA_PH(id, "uniform32", immA, immB); break;
+ case skvm::Op::array32: formatA_PHH(id, "array32", immA, immB, immC); break;
+ case skvm::Op::splat: formatA_S(id, "splat", immA); break;
+ case skvm::Op:: add_f32: formatA_VV(id, "add_f32", x, y); break;
+ case skvm::Op:: sub_f32: formatA_VV(id, "sub_f32", x, y); break;
+ case skvm::Op:: mul_f32: formatA_VV(id, "mul_f32", x, y); break;
+ case skvm::Op:: div_f32: formatA_VV(id, "div_f32", x, y); break;
+ case skvm::Op:: min_f32: formatA_VV(id, "min_f32", x, y); break;
+ case skvm::Op:: max_f32: formatA_VV(id, "max_f32", x, y); break;
+ case skvm::Op:: fma_f32: formatA_VVV(id, "fma_f32", x, y, z); break;
+ case skvm::Op:: fms_f32: formatA_VVV(id, "fms_f32", x, y, z); break;
+ case skvm::Op::fnma_f32: formatA_VVV(id, "fnma_f32", x, y, z); break;
+ case skvm::Op::sqrt_f32: formatA_V(id, "sqrt_f32", x); break;
+ case skvm::Op:: eq_f32: formatA_VV(id, "eq_f32", x, y); break;
+ case skvm::Op::neq_f32: formatA_VV(id, "neq_f32", x, y); break;
+ case skvm::Op:: gt_f32: formatA_VV(id, "gt_f32", x, y); break;
+ case skvm::Op::gte_f32: formatA_VV(id, "gte_f32", x, y); break;
+ case skvm::Op::add_i32: formatA_VV(id, "add_i32", x, y); break;
+ case skvm::Op::sub_i32: formatA_VV(id, "sub_i32", x, y); break;
+ case skvm::Op::mul_i32: formatA_VV(id, "mul_i32", x, y); break;
+ case skvm::Op::shl_i32: formatA_VC(id, "shl_i32", x, immA); break;
+ case skvm::Op::shr_i32: formatA_VC(id, "shr_i32", x, immA); break;
+ case skvm::Op::sra_i32: formatA_VC(id, "sra_i32", x, immA); break;
+ case skvm::Op::eq_i32: formatA_VV(id, "eq_i32", x, y); break;
+ case skvm::Op::gt_i32: formatA_VV(id, "gt_i32", x, y); break;
+ case skvm::Op::bit_and: formatA_VV(id, "bit_and", x, y); break;
+ case skvm::Op::bit_or: formatA_VV(id, "bit_or", x, y); break;
+ case skvm::Op::bit_xor: formatA_VV(id, "bit_xor", x, y); break;
+ case skvm::Op::bit_clear: formatA_VV(id, "bit_clear", x, y); break;
+ case skvm::Op::select: formatA_VVV(id, "select", x, y, z); break;
+ case skvm::Op::ceil: formatA_V(id, "ceil", x); break;
+ case skvm::Op::floor: formatA_V(id, "floor", x); break;
+ case skvm::Op::to_f32: formatA_V(id, "to_f32", x); break;
+ case skvm::Op::to_fp16: formatA_V(id, "to_fp16", x); break;
+ case skvm::Op::from_fp16: formatA_V(id, "from_fp16", x); break;
+ case skvm::Op::trunc: formatA_V(id, "trunc", x); break;
+ case skvm::Op::round: formatA_V(id, "round", x); break;
+ default: SkASSERT(false);
+ }
+ this->writeText("</td></tr>\n");
+}
+
+void Visualizer::dumpHead() const {
+ this->writeText("%s",
+ "<html>\n"
+ "<head>\n"
+ " <title>SkVM Disassembler Output</title>\n"
+ " <style>\n"
+ " button { border-style: none; font-size: 10px; background-color: lightpink; }\n"
+ " table { text-align: left; }\n"
+ " table th { background-color: lightgray; }\n"
+ " .dead, .dead1 { color: lightgray; text-decoration: line-through; }\n"
+ " .normal, .normal1 { }\n"
+ " .origin, .origin1 { font-weight: bold; }\n"
+ " .source, .source1 { color: darkblue; }\n"
+ " .mask, .mask1 { color: green; }\n"
+ " .comments, .comments1 { }\n"
+ " </style>\n"
+ " <script>\n"
+ " function initializeButton(className) {\n"
+ " var btn = document.getElementById(className);\n"
+ " var elems = document.getElementsByClassName(className);\n"
+ " if (elems == undefined || elems.length == 0) {\n"
+ " btn.disabled = true;\n"
+ " btn.innerText = \"None\";\n"
+ " btn.style.background = \"lightgray\";\n"
+ " return;\n"
+ " }\n"
+ " };\n"
+ " function initialize() {\n"
+ " initializeButton('normal');\n"
+ " initializeButton('source');\n"
+ " initializeButton('dead');\n"
+ " };\n"
+ " </script>\n"
+ "</head>\n"
+ "<body onload='initialize();'>\n"
+ " <script>\n"
+ " function toggle(btn, className) {\n"
+ " var elems = document.getElementsByClassName(className);\n"
+ " for (var i = 0; i < elems.length; i++) {\n"
+ " var elem = elems.item(i);\n"
+ " if (elem.style.display === \"\") {\n"
+ " elem.style.display = \"none\";\n"
+ " btn.innerText = \"Show\";\n"
+ " btn.style.background = \"lightcyan\";\n"
+ " } else {\n"
+ " elem.style.display = \"\";\n"
+ " btn.innerText = \"Hide\";\n"
+ " btn.style.background = \"lightpink\";\n"
+ " }\n"
+ " }\n"
+ " };\n"
+ " </script>"
+ " <table border=\"0\" style='font-family:\"monospace\"; font-size: 10px;'>\n"
+ " <caption style='font-family:Roboto; font-size:15px; text-align:left;'>Legend</caption>\n"
+ " <tr>\n"
+ " <th style=\"min-width:100px;\"><u>Kind</u></th>\n"
+ " <th style=\"width:35%;\"><u>Example</u></th>\n"
+ " <th style=\"width: 5%; min-width:50px;\"><u></u></th>\n"
+ " <th style=\"width:60%;\"><u>Description</u></th>\n"
+ " </tr>\n"
+ " <tr class='normal1'>"
+ "<td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</td>"
+ "<td>v1 = load32 Ptr1</td>"
+ "<td><button id='normal' onclick=\"toggle(this, 'normal')\">Hide</button></td>"
+ "<td>A regular SkVM command</td></tr>\n"
+ " <tr class='normal1 origin1'><td>*{N}</td>"
+ "<td>v9 = gt_f32 v0, v1</td>"
+ "<td><button id='dead' onclick=\"toggle(this, 'deduped')\">Hide</button></td>"
+ "<td>A {N} times deduped SkVM command</td></tr>\n"
+ " <tr class='normal1'><td>&#8593;&#8593;&#8593; &nbsp;&nbsp;&nbsp;</td>"
+ "<td>v22 = splat 3f800000 (1)</td><td></td>"
+ "<td>A hoisted SkVM command</td></tr>\n"
+ " <tr class='source1'><td class='mask'>mask&#8618;v{N}(-1)</td>"
+ "<td>// C++ source line</td><td></td>"
+ "<td>Enter into the procedure with mask v{N} (which has a constant value -1)"
+ "</td></tr>\n"
+ " <tr class='source1'><td class='mask'>mask&#8617;v{N}</td>"
+ "<td>// C++ source line</td><td>"
+ "</td><td>Exit the procedure with mask v{N}</td></tr>\n"
+ " <tr class='source1'><td class='mask'></td><td>// C++ source line</td>"
+ "<td><button id='source' onclick=\"toggle(this, 'source')\">Hide</button></td>"
+ "<td>Line trace back to C++ code</td></tr>\n"
+ " <tr class='dead1'><td></td><td>{dead code} = mul_f32 v1, v18</td>"
+ "<td><button id='dead' onclick=\"toggle(this, 'dead')\">Hide</button></td>"
+ "<td>An eliminated \"dead code\" SkVM command</td></tr>\n"
+ " </table>\n"
+ " <table border = \"0\"style='font-family:\"monospace\"; font-size: 10px;'>\n"
+ " <caption style='font-family:Roboto;font-size:15px;text-align:left;'>SkVM Code</caption>\n"
+ " <tr>\n"
+ " <th style=\"min-width:100px;\"><u>Kind</u></th>\n"
+ " <th style=\"width:40%;min-width:100px;\"><u>Command</u></th>\n"
+ " <th style=\"width:60%;\"><u>Comments</u></th>\n"
+ " </tr>");
+}
+void Visualizer::dumpTail() const {
+ this->writeText(
+ " </table>\n"
+ "</body>\n"
+ "</html>"
+ );
+}
+} // namespace skvm::viz
diff --git a/gfx/skia/skia/src/utils/SkVMVisualizer.h b/gfx/skia/skia/src/utils/SkVMVisualizer.h
new file mode 100644
index 0000000000..24b8a56719
--- /dev/null
+++ b/gfx/skia/skia/src/utils/SkVMVisualizer.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2021 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef SkVMVisualizer_DEFINED
+#define SkVMVisualizer_DEFINED
+
+#include "include/core/SkString.h"
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTArray.h"
+#include "src/core/SkTHash.h"
+#include "src/core/SkVM.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <type_traits>
+#include <vector>
+
+class SkWStream;
+namespace SkSL { class SkVMDebugTrace; }
+
+namespace skvm::viz {
+ enum InstructionFlags : uint8_t {
+ kNormal = 0x00,
+ kHoisted = 0x01,
+ kDead = 0x02,
+ };
+
+ struct Instruction {
+ InstructionFlags kind = InstructionFlags::kNormal;
+ int instructionIndex; // index in the actual instructions list
+ int duplicates = 0; // number of duplicates;
+ // -1 means it's a duplicate itself; 0 - it does not have dups
+ skvm::Instruction instruction;
+ bool operator == (const Instruction& o) const;
+ SkString classes() const;
+ };
+
+ struct InstructionHash {
+ uint32_t operator()(const Instruction& i) const;
+ };
+
+ class Visualizer {
+ public:
+ explicit Visualizer(SkSL::SkVMDebugTrace* debugInfo);
+ ~Visualizer() = default;
+ void dump(SkWStream* output);
+ void markAsDeadCode(std::vector<bool>& live, const std::vector<int>& newIds);
+ void finalize(const std::vector<skvm::Instruction>& all,
+ const std::vector<skvm::OptimizedInstruction>& optimized);
+ void addInstructions(std::vector<skvm::Instruction>& program);
+ void markAsDuplicate(int origin, int id) {
+ ++fInstructions[origin].duplicates;
+ }
+ void addInstruction(Instruction skvm);
+ SkString V(int reg) const;
+ private:
+ void dumpInstruction(int id) const;
+ void dumpHead() const;
+ void dumpTail() const;
+ void formatVV(const char* op, int v1, int v2) const;
+ void formatPV(const char* op, int imm, int v1) const;
+ void formatPVV(const char* op, int imm, int v1, int v2) const;
+ void formatPVVVV(const char* op, int imm, int v1, int v2, int v3, int v4) const;
+ void formatA_(int id, const char* op) const;
+ void formatA_P(int id, const char* op, int imm) const;
+ void formatA_PH(int id, const char* op, int immA, int immB) const;
+ void formatA_PHH(int id, const char* op, int immA, int immB, int immC) const;
+ void formatA_PHV(int id, const char* op, int immA, int immB, int v) const;
+ void formatA_S(int id, const char* op, int imm) const;
+ void formatA_V(int id, const char* op, int v) const;
+ void formatA_VV(int id, const char* op, int v1, int v2) const;
+ void formatA_VVV(int id, const char* op, int v1, int v2, int v3) const;
+ void formatA_VC(int id, const char* op, int v, int imm) const;
+
+ void writeText(const char* format, ...) const SK_PRINTF_LIKE(2, 3);
+#if defined(SK_ENABLE_SKSL)
+ SkSL::SkVMDebugTrace* fDebugInfo;
+#endif
+ SkTHashMap<Instruction, size_t, InstructionHash> fIndex;
+ SkTArray<Instruction> fInstructions;
+ SkWStream* fOutput;
+ SkTHashMap<int, size_t> fToDisassembler;
+ };
+} // namespace skvm::viz
+
+namespace sknonstd {
+template <typename T> struct is_bitmask_enum;
+template <> struct is_bitmask_enum<skvm::viz::InstructionFlags> : std::true_type {};
+} // namespace sknonstd
+
+#endif // SkVMVisualizer_DEFINED
diff --git a/gfx/skia/skia/src/utils/mac/SkCGBase.h b/gfx/skia/skia/src/utils/mac/SkCGBase.h
new file mode 100644
index 0000000000..a7b0ed06d0
--- /dev/null
+++ b/gfx/skia/skia/src/utils/mac/SkCGBase.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCGBase_DEFINED
+#define SkCGBase_DEFINED
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#ifdef SK_BUILD_FOR_MAC
+#import <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreGraphics/CoreGraphics.h>
+#endif
+
+// Skia extensions for types in CGBase.h
+
+static inline CGFloat SkScalarToCGFloat(SkScalar scalar) {
+ return CGFLOAT_IS_DOUBLE ? SkScalarToDouble(scalar) : SkScalarToFloat(scalar);
+}
+
+static inline SkScalar SkScalarFromCGFloat(CGFloat cgFloat) {
+ return CGFLOAT_IS_DOUBLE ? SkDoubleToScalar(cgFloat) : SkFloatToScalar(cgFloat);
+}
+
+static inline float SkFloatFromCGFloat(CGFloat cgFloat) {
+ return CGFLOAT_IS_DOUBLE ? static_cast<float>(cgFloat) : cgFloat;
+}
+
+#endif
+#endif //SkCGBase_DEFINED
diff --git a/gfx/skia/skia/src/utils/mac/SkCGGeometry.h b/gfx/skia/skia/src/utils/mac/SkCGGeometry.h
new file mode 100644
index 0000000000..04b1263bcd
--- /dev/null
+++ b/gfx/skia/skia/src/utils/mac/SkCGGeometry.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCGGeometry_DEFINED
+#define SkCGGeometry_DEFINED
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#ifdef SK_BUILD_FOR_MAC
+#import <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreGraphics/CoreGraphics.h>
+#endif
+
+// Skia extensions for types in CGGeometry.h
+
+// Inline versions of these CGRect helpers.
+// The CG versions require making a call and a copy of the CGRect on the stack.
+
+static inline bool SkCGRectIsEmpty(const CGRect& rect) {
+ return rect.size.width <= 0 || rect.size.height <= 0;
+}
+
+static inline CGFloat SkCGRectGetMinX(const CGRect& rect) {
+ return rect.origin.x;
+}
+
+static inline CGFloat SkCGRectGetMaxX(const CGRect& rect) {
+ return rect.origin.x + rect.size.width;
+}
+
+static inline CGFloat SkCGRectGetMinY(const CGRect& rect) {
+ return rect.origin.y;
+}
+
+static inline CGFloat SkCGRectGetMaxY(const CGRect& rect) {
+ return rect.origin.y + rect.size.height;
+}
+
+static inline CGFloat SkCGRectGetWidth(const CGRect& rect) {
+ return rect.size.width;
+}
+
+#endif
+#endif //SkCGGeometry_DEFINED
diff --git a/gfx/skia/skia/src/utils/mac/SkCTFont.cpp b/gfx/skia/skia/src/utils/mac/SkCTFont.cpp
new file mode 100644
index 0000000000..d678374fdd
--- /dev/null
+++ b/gfx/skia/skia/src/utils/mac/SkCTFont.cpp
@@ -0,0 +1,425 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#include "include/core/SkData.h"
+#include "include/core/SkRefCnt.h"
+#include "include/private/base/SkOnce.h"
+#include "src/sfnt/SkOTTable_OS_2.h"
+#include "src/sfnt/SkSFNTHeader.h"
+#include "src/utils/mac/SkCTFont.h"
+#include "src/utils/mac/SkUniqueCFRef.h"
+
+#ifdef SK_BUILD_FOR_MAC
+#import <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreText/CoreText.h>
+#include <CoreText/CTFontManager.h>
+#include <CoreGraphics/CoreGraphics.h>
+#include <CoreFoundation/CoreFoundation.h>
+#endif
+
+#include <dlfcn.h>
+
+static constexpr CGBitmapInfo kBitmapInfoRGB = ((CGBitmapInfo)kCGImageAlphaNoneSkipFirst |
+ kCGBitmapByteOrder32Host);
+
+/** Drawn in FontForge, reduced with fonttools ttx, converted by xxd -i,
+ * this TrueType font contains a glyph of the spider.
+ *
+ * To re-forge the original bytes of the TrueType font file,
+ * remove all ',|( +0x)' from this definition,
+ * copy the data to the clipboard,
+ * run 'pbpaste | xxd -p -r - spider.ttf'.
+ */
+static constexpr const uint8_t kSpiderSymbol_ttf[] = {
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x80, 0x00, 0x03, 0x00, 0x40,
+ 0x47, 0x44, 0x45, 0x46, 0x00, 0x14, 0x00, 0x14, 0x00, 0x00, 0x07, 0xa8,
+ 0x00, 0x00, 0x00, 0x18, 0x4f, 0x53, 0x2f, 0x32, 0x8a, 0xf4, 0xfb, 0xdb,
+ 0x00, 0x00, 0x01, 0x48, 0x00, 0x00, 0x00, 0x60, 0x63, 0x6d, 0x61, 0x70,
+ 0xe0, 0x7f, 0x10, 0x7e, 0x00, 0x00, 0x01, 0xb8, 0x00, 0x00, 0x00, 0x54,
+ 0x67, 0x61, 0x73, 0x70, 0xff, 0xff, 0x00, 0x03, 0x00, 0x00, 0x07, 0xa0,
+ 0x00, 0x00, 0x00, 0x08, 0x67, 0x6c, 0x79, 0x66, 0x97, 0x0b, 0x6a, 0xf6,
+ 0x00, 0x00, 0x02, 0x18, 0x00, 0x00, 0x03, 0x40, 0x68, 0x65, 0x61, 0x64,
+ 0x0f, 0xa2, 0x24, 0x1a, 0x00, 0x00, 0x00, 0xcc, 0x00, 0x00, 0x00, 0x36,
+ 0x68, 0x68, 0x65, 0x61, 0x0e, 0xd3, 0x07, 0x3f, 0x00, 0x00, 0x01, 0x04,
+ 0x00, 0x00, 0x00, 0x24, 0x68, 0x6d, 0x74, 0x78, 0x10, 0x03, 0x00, 0x44,
+ 0x00, 0x00, 0x01, 0xa8, 0x00, 0x00, 0x00, 0x0e, 0x6c, 0x6f, 0x63, 0x61,
+ 0x01, 0xb4, 0x00, 0x28, 0x00, 0x00, 0x02, 0x0c, 0x00, 0x00, 0x00, 0x0a,
+ 0x6d, 0x61, 0x78, 0x70, 0x00, 0x4a, 0x01, 0x4d, 0x00, 0x00, 0x01, 0x28,
+ 0x00, 0x00, 0x00, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0xc3, 0xe5, 0x39, 0xd4,
+ 0x00, 0x00, 0x05, 0x58, 0x00, 0x00, 0x02, 0x28, 0x70, 0x6f, 0x73, 0x74,
+ 0xff, 0x03, 0x00, 0x67, 0x00, 0x00, 0x07, 0x80, 0x00, 0x00, 0x00, 0x20,
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x0b, 0x0f, 0x08, 0x1d,
+ 0x5f, 0x0f, 0x3c, 0xf5, 0x00, 0x0b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xd1, 0x97, 0xa8, 0x5a, 0x00, 0x00, 0x00, 0x00, 0xd6, 0xe8, 0x32, 0x33,
+ 0x00, 0x03, 0xff, 0x3b, 0x08, 0x00, 0x05, 0x55, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x05, 0x55, 0xff, 0x3b, 0x01, 0x79, 0x08, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x04, 0x01, 0x1c, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x2e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x08, 0x00, 0x01, 0x90, 0x00, 0x05,
+ 0x00, 0x00, 0x05, 0x33, 0x05, 0x99, 0x00, 0x00, 0x01, 0x1e, 0x05, 0x33,
+ 0x05, 0x99, 0x00, 0x00, 0x03, 0xd7, 0x00, 0x66, 0x02, 0x12, 0x00, 0x00,
+ 0x05, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x73, 0x6b, 0x69, 0x61, 0x00, 0xc0, 0x00, 0x00, 0xf0, 0x21,
+ 0x06, 0x66, 0xfe, 0x66, 0x01, 0x79, 0x05, 0x55, 0x00, 0xc5, 0x80, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x20, 0x00, 0x01, 0x08, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x04, 0x00, 0x48,
+ 0x00, 0x00, 0x00, 0x0e, 0x00, 0x08, 0x00, 0x02, 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x09, 0x00, 0x0d, 0x00, 0x1d, 0x00, 0x21, 0xf0, 0x21, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0d, 0x00, 0x1d, 0x00, 0x21,
+ 0xf0, 0x21, 0xff, 0xff, 0x00, 0x01, 0xff, 0xf9, 0xff, 0xf5, 0xff, 0xe4,
+ 0xff, 0xe2, 0x0f, 0xe2, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14,
+ 0x00, 0x14, 0x00, 0x14, 0x01, 0xa0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x44,
+ 0x00, 0x00, 0x02, 0x64, 0x05, 0x55, 0x00, 0x03, 0x00, 0x07, 0x00, 0x00,
+ 0x33, 0x11, 0x21, 0x11, 0x25, 0x21, 0x11, 0x21, 0x44, 0x02, 0x20, 0xfe,
+ 0x24, 0x01, 0x98, 0xfe, 0x68, 0x05, 0x55, 0xfa, 0xab, 0x44, 0x04, 0xcd,
+ 0x00, 0x04, 0x00, 0x03, 0xff, 0x3b, 0x08, 0x00, 0x05, 0x4c, 0x00, 0x15,
+ 0x00, 0x1d, 0x00, 0x25, 0x01, 0x1b, 0x00, 0x00, 0x01, 0x36, 0x37, 0x36,
+ 0x27, 0x26, 0x07, 0x06, 0x06, 0x23, 0x22, 0x27, 0x26, 0x27, 0x26, 0x07,
+ 0x06, 0x17, 0x16, 0x17, 0x16, 0x32, 0x37, 0x32, 0x35, 0x34, 0x23, 0x22,
+ 0x15, 0x14, 0x27, 0x32, 0x35, 0x34, 0x23, 0x22, 0x15, 0x14, 0x03, 0x32,
+ 0x17, 0x30, 0x17, 0x31, 0x36, 0x37, 0x36, 0x37, 0x36, 0x37, 0x36, 0x33,
+ 0x32, 0x33, 0x16, 0x33, 0x32, 0x17, 0x16, 0x07, 0x06, 0x23, 0x22, 0x27,
+ 0x26, 0x27, 0x26, 0x23, 0x22, 0x07, 0x07, 0x06, 0x07, 0x06, 0x07, 0x06,
+ 0x1f, 0x02, 0x37, 0x36, 0x37, 0x36, 0x33, 0x32, 0x17, 0x17, 0x16, 0x33,
+ 0x16, 0x17, 0x16, 0x07, 0x06, 0x23, 0x22, 0x27, 0x27, 0x26, 0x23, 0x22,
+ 0x07, 0x06, 0x07, 0x06, 0x17, 0x16, 0x17, 0x16, 0x33, 0x32, 0x33, 0x32,
+ 0x37, 0x36, 0x37, 0x36, 0x17, 0x16, 0x1f, 0x02, 0x16, 0x17, 0x16, 0x15,
+ 0x14, 0x23, 0x22, 0x27, 0x27, 0x26, 0x27, 0x27, 0x26, 0x27, 0x26, 0x07,
+ 0x06, 0x07, 0x06, 0x17, 0x16, 0x17, 0x16, 0x15, 0x14, 0x07, 0x06, 0x07,
+ 0x06, 0x23, 0x22, 0x27, 0x26, 0x07, 0x06, 0x07, 0x06, 0x15, 0x14, 0x17,
+ 0x16, 0x17, 0x16, 0x15, 0x14, 0x07, 0x06, 0x23, 0x22, 0x27, 0x26, 0x27,
+ 0x26, 0x35, 0x34, 0x37, 0x36, 0x37, 0x36, 0x37, 0x34, 0x27, 0x26, 0x07,
+ 0x06, 0x07, 0x06, 0x0f, 0x02, 0x06, 0x23, 0x22, 0x27, 0x26, 0x35, 0x34,
+ 0x37, 0x37, 0x36, 0x37, 0x36, 0x37, 0x36, 0x37, 0x36, 0x27, 0x26, 0x27,
+ 0x26, 0x07, 0x06, 0x07, 0x06, 0x07, 0x06, 0x07, 0x07, 0x06, 0x23, 0x22,
+ 0x27, 0x26, 0x35, 0x34, 0x37, 0x36, 0x37, 0x37, 0x36, 0x37, 0x37, 0x36,
+ 0x37, 0x36, 0x37, 0x36, 0x35, 0x34, 0x27, 0x26, 0x27, 0x26, 0x27, 0x26,
+ 0x23, 0x22, 0x07, 0x06, 0x07, 0x06, 0x07, 0x06, 0x27, 0x26, 0x27, 0x26,
+ 0x27, 0x26, 0x35, 0x34, 0x37, 0x36, 0x37, 0x36, 0x37, 0x36, 0x33, 0x32,
+ 0x17, 0x16, 0x33, 0x32, 0x37, 0x36, 0x35, 0x34, 0x37, 0x36, 0x37, 0x36,
+ 0x33, 0x04, 0xf5, 0x23, 0x13, 0x11, 0x14, 0x16, 0x1d, 0x1b, 0x4c, 0x1f,
+ 0x0e, 0x2d, 0x23, 0x14, 0x2c, 0x13, 0x18, 0x25, 0x2c, 0x10, 0x3c, 0x71,
+ 0x1d, 0x5c, 0x5c, 0x3f, 0xae, 0x5c, 0x5c, 0x3f, 0x6a, 0x27, 0x31, 0x5b,
+ 0x09, 0x27, 0x36, 0x03, 0x0a, 0x26, 0x35, 0x2e, 0x09, 0x08, 0xc6, 0x13,
+ 0x81, 0x17, 0x20, 0x18, 0x21, 0x1e, 0x04, 0x04, 0x15, 0x5c, 0x22, 0x26,
+ 0x48, 0x56, 0x3b, 0x10, 0x21, 0x01, 0x0c, 0x06, 0x06, 0x0f, 0x31, 0x44,
+ 0x3c, 0x52, 0x4a, 0x1d, 0x11, 0x3f, 0xb4, 0x71, 0x01, 0x26, 0x06, 0x0d,
+ 0x15, 0x1a, 0x2a, 0x13, 0x53, 0xaa, 0x42, 0x1d, 0x0a, 0x33, 0x20, 0x21,
+ 0x2b, 0x01, 0x02, 0x3e, 0x21, 0x09, 0x02, 0x02, 0x0f, 0x2d, 0x4b, 0x0a,
+ 0x22, 0x15, 0x20, 0x1f, 0x72, 0x8b, 0x2d, 0x2f, 0x1d, 0x1f, 0x0e, 0x25,
+ 0x3f, 0x4d, 0x1b, 0x63, 0x2a, 0x2c, 0x14, 0x22, 0x18, 0x1c, 0x0f, 0x08,
+ 0x2a, 0x08, 0x08, 0x0d, 0x3b, 0x4c, 0x52, 0x74, 0x27, 0x71, 0x2e, 0x01,
+ 0x0c, 0x10, 0x15, 0x0d, 0x06, 0x0d, 0x05, 0x01, 0x06, 0x2c, 0x28, 0x14,
+ 0x1b, 0x05, 0x04, 0x10, 0x06, 0x12, 0x08, 0x0a, 0x16, 0x27, 0x03, 0x0d,
+ 0x30, 0x4c, 0x4c, 0x4b, 0x1f, 0x0b, 0x22, 0x26, 0x0d, 0x15, 0x0d, 0x2d,
+ 0x68, 0x34, 0x14, 0x3c, 0x25, 0x12, 0x04, 0x10, 0x18, 0x0b, 0x09, 0x30,
+ 0x2b, 0x44, 0x66, 0x14, 0x47, 0x47, 0x59, 0x73, 0x25, 0x05, 0x03, 0x1f,
+ 0x01, 0x08, 0x3f, 0x48, 0x4b, 0x4b, 0x76, 0x2f, 0x49, 0x2d, 0x22, 0x24,
+ 0x0c, 0x15, 0x08, 0x0e, 0x33, 0x03, 0x44, 0x4c, 0x10, 0x46, 0x13, 0x1f,
+ 0x27, 0x1b, 0x1d, 0x13, 0x02, 0x24, 0x08, 0x02, 0x42, 0x0e, 0x4d, 0x3c,
+ 0x19, 0x1b, 0x40, 0x2b, 0x2b, 0x1e, 0x16, 0x11, 0x04, 0x1f, 0x11, 0x04,
+ 0x18, 0x11, 0x35, 0x01, 0xa3, 0x13, 0x24, 0x1f, 0x0b, 0x0c, 0x19, 0x19,
+ 0x18, 0x13, 0x0f, 0x0c, 0x1a, 0x18, 0x1f, 0x19, 0x1e, 0x07, 0x1a, 0xc3,
+ 0x54, 0x51, 0x54, 0x51, 0x04, 0x53, 0x51, 0x54, 0x50, 0x02, 0x48, 0x1a,
+ 0x31, 0x18, 0x55, 0x74, 0x04, 0x0e, 0x09, 0x0d, 0x06, 0x10, 0x16, 0x1b,
+ 0x24, 0x01, 0x04, 0x0b, 0x04, 0x10, 0x3f, 0x0a, 0x41, 0x02, 0x41, 0x20,
+ 0x06, 0x12, 0x16, 0x21, 0x17, 0x2a, 0x1e, 0x15, 0x40, 0x27, 0x11, 0x0e,
+ 0x1e, 0x11, 0x15, 0x1f, 0x43, 0x13, 0x1a, 0x10, 0x15, 0x1b, 0x04, 0x09,
+ 0x4d, 0x2a, 0x0f, 0x19, 0x0a, 0x0a, 0x03, 0x05, 0x15, 0x3c, 0x64, 0x21,
+ 0x4b, 0x2e, 0x21, 0x28, 0x13, 0x47, 0x44, 0x19, 0x3f, 0x11, 0x18, 0x0b,
+ 0x0a, 0x07, 0x18, 0x0d, 0x07, 0x24, 0x2c, 0x2b, 0x21, 0x32, 0x10, 0x48,
+ 0x2a, 0x2d, 0x1e, 0x1a, 0x01, 0x0c, 0x43, 0x59, 0x28, 0x4e, 0x1c, 0x0d,
+ 0x5d, 0x24, 0x14, 0x0a, 0x05, 0x1f, 0x24, 0x32, 0x46, 0x3e, 0x5f, 0x3e,
+ 0x44, 0x1a, 0x30, 0x15, 0x0d, 0x07, 0x18, 0x2b, 0x03, 0x0d, 0x1a, 0x28,
+ 0x28, 0x57, 0xb2, 0x29, 0x27, 0x40, 0x2c, 0x23, 0x16, 0x63, 0x58, 0x1a,
+ 0x0a, 0x18, 0x11, 0x23, 0x08, 0x1b, 0x29, 0x05, 0x04, 0x0b, 0x15, 0x0d,
+ 0x14, 0x0b, 0x2a, 0x29, 0x5a, 0x62, 0x01, 0x19, 0x1e, 0x05, 0x05, 0x26,
+ 0x42, 0x42, 0x2a, 0x2a, 0x3f, 0x0d, 0x0f, 0x09, 0x05, 0x07, 0x01, 0x0b,
+ 0x25, 0x3e, 0x0d, 0x17, 0x11, 0x01, 0x03, 0x0d, 0x13, 0x20, 0x19, 0x11,
+ 0x03, 0x02, 0x01, 0x04, 0x11, 0x04, 0x05, 0x1b, 0x3d, 0x10, 0x29, 0x20,
+ 0x04, 0x04, 0x0a, 0x07, 0x04, 0x1f, 0x15, 0x20, 0x3e, 0x0f, 0x2a, 0x1e,
+ 0x00, 0x00, 0x00, 0x1b, 0x01, 0x4a, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x1b, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x00, 0x0c, 0x00, 0x1b, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x02, 0x00, 0x07, 0x00, 0x27, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x03, 0x00, 0x0c, 0x00, 0x1b, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x04, 0x00, 0x0c, 0x00, 0x1b, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x05, 0x00, 0x02, 0x00, 0x2e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x0c, 0x00, 0x1b, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x0d, 0x00, 0x1b, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x0e, 0x00, 0x1a, 0x00, 0x30, 0x00, 0x03, 0x00, 0x00, 0x04, 0x09,
+ 0x00, 0x00, 0x00, 0x36, 0x00, 0x4a, 0x00, 0x03, 0x00, 0x00, 0x04, 0x09,
+ 0x00, 0x01, 0x00, 0x18, 0x00, 0x80, 0x00, 0x03, 0x00, 0x00, 0x04, 0x09,
+ 0x00, 0x02, 0x00, 0x0e, 0x00, 0x98, 0x00, 0x03, 0x00, 0x00, 0x04, 0x09,
+ 0x00, 0x03, 0x00, 0x18, 0x00, 0x80, 0x00, 0x03, 0x00, 0x00, 0x04, 0x09,
+ 0x00, 0x04, 0x00, 0x18, 0x00, 0x80, 0x00, 0x03, 0x00, 0x00, 0x04, 0x09,
+ 0x00, 0x05, 0x00, 0x04, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x00, 0x04, 0x09,
+ 0x00, 0x06, 0x00, 0x18, 0x00, 0x80, 0x00, 0x03, 0x00, 0x00, 0x04, 0x09,
+ 0x00, 0x0d, 0x00, 0x36, 0x00, 0x4a, 0x00, 0x03, 0x00, 0x00, 0x04, 0x09,
+ 0x00, 0x0e, 0x00, 0x34, 0x00, 0xaa, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09,
+ 0x00, 0x00, 0x00, 0x36, 0x00, 0x4a, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09,
+ 0x00, 0x01, 0x00, 0x18, 0x00, 0x80, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09,
+ 0x00, 0x02, 0x00, 0x0e, 0x00, 0x98, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09,
+ 0x00, 0x03, 0x00, 0x18, 0x00, 0x80, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09,
+ 0x00, 0x04, 0x00, 0x18, 0x00, 0x80, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09,
+ 0x00, 0x05, 0x00, 0x04, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09,
+ 0x00, 0x06, 0x00, 0x18, 0x00, 0x80, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09,
+ 0x00, 0x0d, 0x00, 0x36, 0x00, 0x4a, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09,
+ 0x00, 0x0e, 0x00, 0x34, 0x00, 0xaa, 0x43, 0x6f, 0x70, 0x79, 0x72, 0x69,
+ 0x67, 0x68, 0x74, 0x20, 0x28, 0x63, 0x29, 0x20, 0x32, 0x30, 0x31, 0x35,
+ 0x2c, 0x20, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x53, 0x70, 0x69,
+ 0x64, 0x65, 0x72, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x52, 0x65, 0x67,
+ 0x75, 0x6c, 0x61, 0x72, 0x56, 0x31, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f,
+ 0x2f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x73, 0x2e, 0x73, 0x69, 0x6c,
+ 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x4f, 0x46, 0x4c, 0x00, 0x43, 0x00, 0x6f,
+ 0x00, 0x70, 0x00, 0x79, 0x00, 0x72, 0x00, 0x69, 0x00, 0x67, 0x00, 0x68,
+ 0x00, 0x74, 0x00, 0x20, 0x00, 0x28, 0x00, 0x63, 0x00, 0x29, 0x00, 0x20,
+ 0x00, 0x32, 0x00, 0x30, 0x00, 0x31, 0x00, 0x35, 0x00, 0x2c, 0x00, 0x20,
+ 0x00, 0x47, 0x00, 0x6f, 0x00, 0x6f, 0x00, 0x67, 0x00, 0x6c, 0x00, 0x65,
+ 0x00, 0x2e, 0x00, 0x53, 0x00, 0x70, 0x00, 0x69, 0x00, 0x64, 0x00, 0x65,
+ 0x00, 0x72, 0x00, 0x53, 0x00, 0x79, 0x00, 0x6d, 0x00, 0x62, 0x00, 0x6f,
+ 0x00, 0x6c, 0x00, 0x52, 0x00, 0x65, 0x00, 0x67, 0x00, 0x75, 0x00, 0x6c,
+ 0x00, 0x61, 0x00, 0x72, 0x00, 0x56, 0x00, 0x31, 0x00, 0x68, 0x00, 0x74,
+ 0x00, 0x74, 0x00, 0x70, 0x00, 0x3a, 0x00, 0x2f, 0x00, 0x2f, 0x00, 0x73,
+ 0x00, 0x63, 0x00, 0x72, 0x00, 0x69, 0x00, 0x70, 0x00, 0x74, 0x00, 0x73,
+ 0x00, 0x2e, 0x00, 0x73, 0x00, 0x69, 0x00, 0x6c, 0x00, 0x2e, 0x00, 0x6f,
+ 0x00, 0x72, 0x00, 0x67, 0x00, 0x2f, 0x00, 0x4f, 0x00, 0x46, 0x00, 0x4c,
+ 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x66,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0xff, 0xff, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x0c, 0x00, 0x14, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x02, 0x00, 0x00
+};
+
+/**
+ * There does not appear to be a publicly accessible API for determining if lcd
+ * font smoothing will be applied if we request it. The main issue is that if
+ * smoothing is applied a gamma of 2.0 will be used, if not a gamma of 1.0.
+ */
+SkCTFontSmoothBehavior SkCTFontGetSmoothBehavior() {
+ static SkCTFontSmoothBehavior gSmoothBehavior = []{
+ uint32_t noSmoothBitmap[16][16] = {};
+ uint32_t smoothBitmap[16][16] = {};
+
+ SkUniqueCFRef<CGColorSpaceRef> colorspace(CGColorSpaceCreateDeviceRGB());
+ SkUniqueCFRef<CGContextRef> noSmoothContext(
+ CGBitmapContextCreate(&noSmoothBitmap, 16, 16, 8, 16*4,
+ colorspace.get(), kBitmapInfoRGB));
+ SkUniqueCFRef<CGContextRef> smoothContext(
+ CGBitmapContextCreate(&smoothBitmap, 16, 16, 8, 16*4,
+ colorspace.get(), kBitmapInfoRGB));
+
+ SkUniqueCFRef<CFDataRef> data(CFDataCreateWithBytesNoCopy(
+ kCFAllocatorDefault, kSpiderSymbol_ttf, std::size(kSpiderSymbol_ttf),
+ kCFAllocatorNull));
+ SkUniqueCFRef<CTFontDescriptorRef> desc(
+ CTFontManagerCreateFontDescriptorFromData(data.get()));
+ SkUniqueCFRef<CTFontRef> ctFont(CTFontCreateWithFontDescriptor(desc.get(), 16, nullptr));
+ SkASSERT(ctFont);
+
+ CGContextSetShouldSmoothFonts(noSmoothContext.get(), false);
+ CGContextSetShouldAntialias(noSmoothContext.get(), true);
+ CGContextSetTextDrawingMode(noSmoothContext.get(), kCGTextFill);
+ CGContextSetGrayFillColor(noSmoothContext.get(), 1, 1);
+
+ CGContextSetShouldSmoothFonts(smoothContext.get(), true);
+ CGContextSetShouldAntialias(smoothContext.get(), true);
+ CGContextSetTextDrawingMode(smoothContext.get(), kCGTextFill);
+ CGContextSetGrayFillColor(smoothContext.get(), 1, 1);
+
+ CGPoint point = CGPointMake(0, 3);
+ CGGlyph spiderGlyph = 3;
+ CTFontDrawGlyphs(ctFont.get(), &spiderGlyph, &point, 1, noSmoothContext.get());
+ CTFontDrawGlyphs(ctFont.get(), &spiderGlyph, &point, 1, smoothContext.get());
+
+ // For debugging.
+ //SkUniqueCFRef<CGImageRef> image(CGBitmapContextCreateImage(noSmoothContext()));
+ //SkUniqueCFRef<CGImageRef> image(CGBitmapContextCreateImage(smoothContext()));
+
+ SkCTFontSmoothBehavior smoothBehavior = SkCTFontSmoothBehavior::none;
+ for (int x = 0; x < 16; ++x) {
+ for (int y = 0; y < 16; ++y) {
+ uint32_t smoothPixel = smoothBitmap[x][y];
+ uint32_t r = (smoothPixel >> 16) & 0xFF;
+ uint32_t g = (smoothPixel >> 8) & 0xFF;
+ uint32_t b = (smoothPixel >> 0) & 0xFF;
+ if (r != g || r != b) {
+ return SkCTFontSmoothBehavior::subpixel;
+ }
+ if (noSmoothBitmap[x][y] != smoothPixel) {
+ smoothBehavior = SkCTFontSmoothBehavior::some;
+ }
+ }
+ }
+ return smoothBehavior;
+ }();
+ return gSmoothBehavior;
+}
+
+SkCTFontWeightMapping& SkCTFontGetNSFontWeightMapping() {
+ // In the event something goes wrong finding the real values, use this mapping.
+ static constexpr CGFloat defaultNSFontWeights[] =
+ { -1.00, -0.80, -0.60, -0.40, 0.00, 0.23, 0.30, 0.40, 0.56, 0.62, 1.00 };
+
+ // Declarations in <AppKit/AppKit.h> on macOS, <UIKit/UIKit.h> on iOS
+#ifdef SK_BUILD_FOR_MAC
+# define SK_KIT_FONT_WEIGHT_PREFIX "NS"
+#endif
+#ifdef SK_BUILD_FOR_IOS
+# define SK_KIT_FONT_WEIGHT_PREFIX "UI"
+#endif
+ static constexpr const char* nsFontWeightNames[] = {
+ SK_KIT_FONT_WEIGHT_PREFIX "FontWeightUltraLight",
+ SK_KIT_FONT_WEIGHT_PREFIX "FontWeightThin",
+ SK_KIT_FONT_WEIGHT_PREFIX "FontWeightLight",
+ SK_KIT_FONT_WEIGHT_PREFIX "FontWeightRegular",
+ SK_KIT_FONT_WEIGHT_PREFIX "FontWeightMedium",
+ SK_KIT_FONT_WEIGHT_PREFIX "FontWeightSemibold",
+ SK_KIT_FONT_WEIGHT_PREFIX "FontWeightBold",
+ SK_KIT_FONT_WEIGHT_PREFIX "FontWeightHeavy",
+ SK_KIT_FONT_WEIGHT_PREFIX "FontWeightBlack",
+ };
+ static_assert(std::size(nsFontWeightNames) == 9, "");
+
+ static CGFloat nsFontWeights[11];
+ static const CGFloat (*selectedNSFontWeights)[11] = &defaultNSFontWeights;
+ static SkOnce once;
+ once([&] {
+ size_t i = 0;
+ nsFontWeights[i++] = -1.00;
+ for (const char* nsFontWeightName : nsFontWeightNames) {
+ void* nsFontWeightValuePtr = dlsym(RTLD_DEFAULT, nsFontWeightName);
+ if (nsFontWeightValuePtr) {
+ nsFontWeights[i++] = *(static_cast<CGFloat*>(nsFontWeightValuePtr));
+ } else {
+ return;
+ }
+ }
+ nsFontWeights[i++] = 1.00;
+ selectedNSFontWeights = &nsFontWeights;
+ });
+ return *selectedNSFontWeights;
+}
+
+SkCTFontWeightMapping& SkCTFontGetDataFontWeightMapping() {
+ // In the event something goes wrong finding the real values, use this mapping.
+ // These were the values from macOS 10.13 to 10.15.
+ static constexpr CGFloat defaultDataFontWeights[] =
+ { -1.00, -0.70, -0.50, -0.23, 0.00, 0.20, 0.30, 0.40, 0.60, 0.80, 1.00 };
+
+ static const CGFloat (*selectedDataFontWeights)[11] = &defaultDataFontWeights;
+ static CGFloat dataFontWeights[11];
+ static SkOnce once;
+ once([&] {
+ constexpr size_t dataSize = std::size(kSpiderSymbol_ttf);
+ sk_sp<SkData> data = SkData::MakeWithCopy(kSpiderSymbol_ttf, dataSize);
+ const SkSFNTHeader* sfntHeader = reinterpret_cast<const SkSFNTHeader*>(data->data());
+ const SkSFNTHeader::TableDirectoryEntry* tableEntry =
+ SkTAfter<const SkSFNTHeader::TableDirectoryEntry>(sfntHeader);
+ const SkSFNTHeader::TableDirectoryEntry* os2TableEntry = nullptr;
+ int numTables = SkEndian_SwapBE16(sfntHeader->numTables);
+ for (int tableEntryIndex = 0; tableEntryIndex < numTables; ++tableEntryIndex) {
+ if (SkOTTableOS2::TAG == tableEntry[tableEntryIndex].tag) {
+ os2TableEntry = tableEntry + tableEntryIndex;
+ break;
+ }
+ }
+ if (!os2TableEntry) {
+ return;
+ }
+ size_t os2TableOffset = SkEndian_SwapBE32(os2TableEntry->offset);
+ SkOTTableOS2_V0* os2Table = SkTAddOffset<SkOTTableOS2_V0>(data->writable_data(),
+ os2TableOffset);
+
+ CGFloat previousWeight = -CGFLOAT_MAX;
+ for (int i = 0; i < 11; ++i) {
+ os2Table->usWeightClass.value = SkEndian_SwapBE16(i * 100);
+
+ // On macOS 10.14 and earlier it appears that the CFDataGetBytePtr is used somehow in
+ // font caching. Creating a slightly modified font with data at the same address seems
+ // to in some ways act like a font previously created at that address. As a result,
+ // always make a copy of the data.
+ SkUniqueCFRef<CFDataRef> cfData(
+ CFDataCreate(kCFAllocatorDefault, (const UInt8 *)data->data(), data->size()));
+ if (!cfData) {
+ return;
+ }
+ SkUniqueCFRef<CTFontDescriptorRef> desc(
+ CTFontManagerCreateFontDescriptorFromData(cfData.get()));
+ if (!desc) {
+ return;
+ }
+
+ // On macOS 10.14 and earlier, the CTFontDescriptorRef returned from
+ // CTFontManagerCreateFontDescriptorFromData is incomplete and does not have the
+ // correct traits. It is necessary to create the CTFont and then get the descriptor
+ // off of it.
+ SkUniqueCFRef<CTFontRef> ctFont(CTFontCreateWithFontDescriptor(desc.get(), 9, nullptr));
+ if (!ctFont) {
+ return;
+ }
+ SkUniqueCFRef<CTFontDescriptorRef> desc2(CTFontCopyFontDescriptor(ctFont.get()));
+ if (!desc2) {
+ return;
+ }
+
+ SkUniqueCFRef<CFTypeRef> traitsRef(
+ CTFontDescriptorCopyAttribute(desc2.get(), kCTFontTraitsAttribute));
+ if (!traitsRef || CFGetTypeID(traitsRef.get()) != CFDictionaryGetTypeID()) {
+ return;
+ }
+ CFDictionaryRef fontTraitsDict = static_cast<CFDictionaryRef>(traitsRef.get());
+
+ CFTypeRef weightRef;
+ if (!CFDictionaryGetValueIfPresent(fontTraitsDict, kCTFontWeightTrait, &weightRef) ||
+ !weightRef)
+ {
+ return;
+ }
+
+ // It is possible there is a kCTFontWeightTrait entry, but it is not a CFNumberRef.
+ // This is usually due to a bug with the handling of 0, so set the default to 0.
+ // See https://crbug.com/1372420
+ CGFloat weight = 0;
+ if (CFGetTypeID(weightRef) == CFNumberGetTypeID()) {
+ CFNumberRef weightNumber = static_cast<CFNumberRef>(weightRef);
+ if (!CFNumberIsFloatType(weightNumber) ||
+ !CFNumberGetValue(weightNumber, kCFNumberCGFloatType, &weight))
+ {
+ // CFNumberGetValue may modify `weight` even when returning `false`.
+ weight = 0;
+ }
+ }
+
+ // It is expected that the weights will be strictly monotonically increasing.
+ if (weight <= previousWeight) {
+ return;
+ }
+ previousWeight = weight;
+ dataFontWeights[i] = weight;
+ }
+ selectedDataFontWeights = &dataFontWeights;
+ });
+ return *selectedDataFontWeights;
+}
+
+#endif
diff --git a/gfx/skia/skia/src/utils/mac/SkCTFont.h b/gfx/skia/skia/src/utils/mac/SkCTFont.h
new file mode 100644
index 0000000000..b3836b07ac
--- /dev/null
+++ b/gfx/skia/skia/src/utils/mac/SkCTFont.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkCTFont_DEFINED
+#define SkCTFont_DEFINED
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#ifdef SK_BUILD_FOR_MAC
+#import <ApplicationServices/ApplicationServices.h>
+#endif
+
+#ifdef SK_BUILD_FOR_IOS
+#include <CoreGraphics/CoreGraphics.h>
+#endif
+
+enum class SkCTFontSmoothBehavior {
+ none, // SmoothFonts produces no effect.
+ some, // SmoothFonts produces some effect, but not subpixel coverage.
+ subpixel, // SmoothFonts produces some effect and provides subpixel coverage.
+};
+
+SkCTFontSmoothBehavior SkCTFontGetSmoothBehavior();
+
+using SkCTFontWeightMapping = const CGFloat[11];
+
+/** Returns the [-1, 1] CTFontDescriptor weights for the
+ * <0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000> CSS weights.
+ *
+ * It is assumed that the values will be interpolated linearly between these points.
+ * NSFontWeightXXX were added in 10.11, appear in 10.10, but do not appear in 10.9.
+ * The actual values appear to be stable, but they may change without notice.
+ * These values are valid for system fonts only.
+ */
+SkCTFontWeightMapping& SkCTFontGetNSFontWeightMapping();
+
+/** Returns the [-1, 1] CTFontDescriptor weights for the
+ * <0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000> CSS weights.
+ *
+ * It is assumed that the values will be interpolated linearly between these points.
+ * The actual values appear to be stable, but they may change without notice.
+ * These values are valid for fonts created from data only.
+ */
+SkCTFontWeightMapping& SkCTFontGetDataFontWeightMapping();
+
+#endif
+#endif // SkCTFontSmoothBehavior_DEFINED
diff --git a/gfx/skia/skia/src/utils/mac/SkCreateCGImageRef.cpp b/gfx/skia/skia/src/utils/mac/SkCreateCGImageRef.cpp
new file mode 100644
index 0000000000..6a5298cf29
--- /dev/null
+++ b/gfx/skia/skia/src/utils/mac/SkCreateCGImageRef.cpp
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#include "include/core/SkBitmap.h"
+#include "include/private/SkColorData.h"
+#include "include/private/base/SkMacros.h"
+#include "include/private/base/SkTo.h"
+#include "include/utils/mac/SkCGUtils.h"
+#include "src/utils/mac/SkUniqueCFRef.h"
+
+#include <climits>
+#include <memory>
+
+static CGBitmapInfo compute_cgalpha_info_rgba(SkAlphaType at) {
+ CGBitmapInfo info = kCGBitmapByteOrder32Big;
+ switch (at) {
+ case kUnknown_SkAlphaType: break;
+ case kOpaque_SkAlphaType: info |= kCGImageAlphaNoneSkipLast; break;
+ case kPremul_SkAlphaType: info |= kCGImageAlphaPremultipliedLast; break;
+ case kUnpremul_SkAlphaType: info |= kCGImageAlphaLast; break;
+ }
+ return info;
+}
+
+static CGBitmapInfo compute_cgalpha_info_bgra(SkAlphaType at) {
+ CGBitmapInfo info = kCGBitmapByteOrder32Little;
+ switch (at) {
+ case kUnknown_SkAlphaType: break;
+ case kOpaque_SkAlphaType: info |= kCGImageAlphaNoneSkipFirst; break;
+ case kPremul_SkAlphaType: info |= kCGImageAlphaPremultipliedFirst; break;
+ case kUnpremul_SkAlphaType: info |= kCGImageAlphaFirst; break;
+ }
+ return info;
+}
+static CGBitmapInfo compute_cgalpha_info_4444(SkAlphaType at) {
+ CGBitmapInfo info = kCGBitmapByteOrder16Little;
+ switch (at) {
+ case kOpaque_SkAlphaType: info |= kCGImageAlphaNoneSkipLast; break;
+ default: info |= kCGImageAlphaPremultipliedLast; break;
+ }
+ return info;
+}
+
+static bool get_bitmap_info(SkColorType skColorType,
+ SkAlphaType skAlphaType,
+ size_t* bitsPerComponent,
+ CGBitmapInfo* info,
+ bool* upscaleTo32) {
+ if (upscaleTo32) {
+ *upscaleTo32 = false;
+ }
+ switch (skColorType) {
+ case kRGB_565_SkColorType:
+ if (upscaleTo32) {
+ *upscaleTo32 = true;
+ }
+ // now treat like RGBA
+ *bitsPerComponent = 8;
+ *info = compute_cgalpha_info_rgba(kOpaque_SkAlphaType);
+ break;
+ case kRGBA_8888_SkColorType:
+ *bitsPerComponent = 8;
+ *info = compute_cgalpha_info_rgba(skAlphaType);
+ break;
+ case kBGRA_8888_SkColorType:
+ *bitsPerComponent = 8;
+ *info = compute_cgalpha_info_bgra(skAlphaType);
+ break;
+ case kARGB_4444_SkColorType:
+ *bitsPerComponent = 4;
+ *info = compute_cgalpha_info_4444(skAlphaType);
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static std::unique_ptr<SkBitmap> prepare_for_image_ref(const SkBitmap& bm,
+ size_t* bitsPerComponent,
+ CGBitmapInfo* info) {
+ bool upscaleTo32;
+ if (!get_bitmap_info(bm.colorType(), bm.alphaType(), bitsPerComponent, info, &upscaleTo32)) {
+ return nullptr;
+ }
+ if (upscaleTo32) {
+ std::unique_ptr<SkBitmap> copy(new SkBitmap);
+ // here we make a deep copy of the pixels, since CG won't take our
+ // 565 directly, so we always go to RGBA
+ copy->allocPixels(bm.info().makeColorType(kRGBA_8888_SkColorType));
+ bm.readPixels(copy->info(), copy->getPixels(), copy->rowBytes(), 0, 0);
+ return copy;
+ }
+ return std::make_unique<SkBitmap>(bm);
+}
+
+CGImageRef SkCreateCGImageRefWithColorspace(const SkBitmap& bm,
+ CGColorSpaceRef colorSpace) {
+ if (bm.drawsNothing()) {
+ return nullptr;
+ }
+ size_t bitsPerComponent SK_INIT_TO_AVOID_WARNING;
+ CGBitmapInfo info SK_INIT_TO_AVOID_WARNING;
+
+ std::unique_ptr<SkBitmap> bitmap = prepare_for_image_ref(bm, &bitsPerComponent, &info);
+ if (nullptr == bitmap) {
+ return nullptr;
+ }
+
+ SkPixmap pm = bitmap->pixmap(); // Copy bitmap info before releasing it.
+ const size_t s = bitmap->computeByteSize();
+ void* pixels = bitmap->getPixels();
+
+ // our provider "owns" the bitmap*, and will take care of deleting it
+ SkUniqueCFRef<CGDataProviderRef> dataRef(CGDataProviderCreateWithData(
+ bitmap.release(), pixels, s,
+ [](void* p, const void*, size_t) { delete reinterpret_cast<SkBitmap*>(p); }));
+
+ SkUniqueCFRef<CGColorSpaceRef> rgb;
+ if (nullptr == colorSpace) {
+ rgb.reset(CGColorSpaceCreateDeviceRGB());
+ colorSpace = rgb.get();
+ }
+ return CGImageCreate(pm.width(), pm.height(), bitsPerComponent,
+ pm.info().bytesPerPixel() * CHAR_BIT, pm.rowBytes(), colorSpace,
+ info, dataRef.get(), nullptr, false, kCGRenderingIntentDefault);
+}
+
+void SkCGDrawBitmap(CGContextRef cg, const SkBitmap& bm, float x, float y) {
+ SkUniqueCFRef<CGImageRef> img(SkCreateCGImageRef(bm));
+
+ if (img) {
+ CGRect r = CGRectMake(0, 0, bm.width(), bm.height());
+
+ CGContextSaveGState(cg);
+ CGContextTranslateCTM(cg, x, r.size.height + y);
+ CGContextScaleCTM(cg, 1, -1);
+
+ CGContextDrawImage(cg, r, img.get());
+
+ CGContextRestoreGState(cg);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+CGContextRef SkCreateCGContext(const SkPixmap& pmap) {
+ CGBitmapInfo cg_bitmap_info = 0;
+ size_t bitsPerComponent = 0;
+ switch (pmap.colorType()) {
+ case kRGBA_8888_SkColorType:
+ bitsPerComponent = 8;
+ cg_bitmap_info = compute_cgalpha_info_rgba(pmap.alphaType());
+ break;
+ case kBGRA_8888_SkColorType:
+ bitsPerComponent = 8;
+ cg_bitmap_info = compute_cgalpha_info_bgra(pmap.alphaType());
+ break;
+ default:
+ return nullptr; // no other colortypes are supported (for now)
+ }
+
+ size_t rb = pmap.addr() ? pmap.rowBytes() : 0;
+ SkUniqueCFRef<CGColorSpaceRef> cs(CGColorSpaceCreateDeviceRGB());
+ CGContextRef cg = CGBitmapContextCreate(pmap.writable_addr(), pmap.width(), pmap.height(),
+ bitsPerComponent, rb, cs.get(), cg_bitmap_info);
+ return cg;
+}
+
+bool SkCopyPixelsFromCGImage(const SkImageInfo& info, size_t rowBytes, void* pixels,
+ CGImageRef image) {
+ CGBitmapInfo cg_bitmap_info = 0;
+ size_t bitsPerComponent = 0;
+ switch (info.colorType()) {
+ case kRGBA_8888_SkColorType:
+ bitsPerComponent = 8;
+ cg_bitmap_info = compute_cgalpha_info_rgba(info.alphaType());
+ break;
+ case kBGRA_8888_SkColorType:
+ bitsPerComponent = 8;
+ cg_bitmap_info = compute_cgalpha_info_bgra(info.alphaType());
+ break;
+ default:
+ return false; // no other colortypes are supported (for now)
+ }
+
+ SkUniqueCFRef<CGColorSpaceRef> cs(CGColorSpaceCreateDeviceRGB());
+ SkUniqueCFRef<CGContextRef> cg(CGBitmapContextCreate(
+ pixels, info.width(), info.height(), bitsPerComponent,
+ rowBytes, cs.get(), cg_bitmap_info));
+ if (!cg) {
+ return false;
+ }
+
+ // use this blend mode, to avoid having to erase the pixels first, and to avoid CG performing
+ // any blending (which could introduce errors and be slower).
+ CGContextSetBlendMode(cg.get(), kCGBlendModeCopy);
+
+ CGContextDrawImage(cg.get(), CGRectMake(0, 0, info.width(), info.height()), image);
+ return true;
+}
+
+bool SkCreateBitmapFromCGImage(SkBitmap* dst, CGImageRef image) {
+ const int width = SkToInt(CGImageGetWidth(image));
+ const int height = SkToInt(CGImageGetHeight(image));
+ SkImageInfo info = SkImageInfo::MakeN32Premul(width, height);
+
+ SkBitmap tmp;
+ if (!tmp.tryAllocPixels(info)) {
+ return false;
+ }
+
+ if (!SkCopyPixelsFromCGImage(tmp.info(), tmp.rowBytes(), tmp.getPixels(), image)) {
+ return false;
+ }
+
+ CGImageAlphaInfo cgInfo = CGImageGetAlphaInfo(image);
+ switch (cgInfo) {
+ case kCGImageAlphaNone:
+ case kCGImageAlphaNoneSkipLast:
+ case kCGImageAlphaNoneSkipFirst:
+ SkASSERT(SkBitmap::ComputeIsOpaque(tmp));
+ tmp.setAlphaType(kOpaque_SkAlphaType);
+ break;
+ default:
+ // we don't know if we're opaque or not, so compute it.
+ if (SkBitmap::ComputeIsOpaque(tmp)) {
+ tmp.setAlphaType(kOpaque_SkAlphaType);
+ }
+ }
+
+ *dst = tmp;
+ return true;
+}
+
+sk_sp<SkImage> SkMakeImageFromCGImage(CGImageRef src) {
+ SkBitmap bm;
+ if (!SkCreateBitmapFromCGImage(&bm, src)) {
+ return nullptr;
+ }
+
+ bm.setImmutable();
+ return bm.asImage();
+}
+
+#endif//defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
diff --git a/gfx/skia/skia/src/utils/mac/SkUniqueCFRef.h b/gfx/skia/skia/src/utils/mac/SkUniqueCFRef.h
new file mode 100644
index 0000000000..894a42adec
--- /dev/null
+++ b/gfx/skia/skia/src/utils/mac/SkUniqueCFRef.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkUniqueCFRef_DEFINED
+#define SkUniqueCFRef_DEFINED
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_IOS)
+
+#include "include/private/base/SkTemplates.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <memory>
+#include <type_traits>
+
+template <typename CFRef> using SkUniqueCFRef =
+ std::unique_ptr<std::remove_pointer_t<CFRef>, SkFunctionObject<CFRelease>>;
+
+#endif
+#endif
diff --git a/gfx/skia/skia/src/utils/win/SkAutoCoInitialize.cpp b/gfx/skia/skia/src/utils/win/SkAutoCoInitialize.cpp
new file mode 100644
index 0000000000..5b0f3f9745
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkAutoCoInitialize.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "src/utils/win/SkAutoCoInitialize.h"
+
+#include <objbase.h>
+#include <winerror.h>
+
+SkAutoCoInitialize::SkAutoCoInitialize() :
+ fHR(
+ CoInitializeEx(nullptr, COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE)
+ )
+{ }
+
+SkAutoCoInitialize::~SkAutoCoInitialize() {
+ if (SUCCEEDED(this->fHR)) {
+ CoUninitialize();
+ }
+}
+
+bool SkAutoCoInitialize::succeeded() {
+ return SUCCEEDED(this->fHR) || RPC_E_CHANGED_MODE == this->fHR;
+}
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/utils/win/SkAutoCoInitialize.h b/gfx/skia/skia/src/utils/win/SkAutoCoInitialize.h
new file mode 100644
index 0000000000..24a0548613
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkAutoCoInitialize.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAutoCo_DEFINED
+#define SkAutoCo_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#ifdef SK_BUILD_FOR_WIN
+
+#include "include/private/base/SkNoncopyable.h"
+#include "src/base/SkLeanWindows.h"
+
+/**
+ * An instance of this class initializes COM on creation
+ * and closes the COM library on destruction.
+ */
+class SkAutoCoInitialize : SkNoncopyable {
+private:
+ HRESULT fHR;
+public:
+ SkAutoCoInitialize();
+ ~SkAutoCoInitialize();
+ bool succeeded();
+};
+
+#endif // SK_BUILD_FOR_WIN
+#endif // SkAutoCo_DEFINED
diff --git a/gfx/skia/skia/src/utils/win/SkDWrite.cpp b/gfx/skia/skia/src/utils/win/SkDWrite.cpp
new file mode 100644
index 0000000000..30460fe5f6
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkDWrite.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "include/core/SkString.h"
+#include "include/private/base/SkOnce.h"
+#include "src/utils/win/SkDWrite.h"
+#include "src/utils/win/SkHRESULT.h"
+
+#include <dwrite.h>
+
+#if defined(__clang__)
+ #pragma clang diagnostic push
+ #pragma clang diagnostic ignored "-Wcast-function-type"
+#endif
+
+static IDWriteFactory* gDWriteFactory = nullptr;
+
+static void release_dwrite_factory() {
+ if (gDWriteFactory) {
+ gDWriteFactory->Release();
+ }
+}
+
+static void create_dwrite_factory(IDWriteFactory** factory) {
+ typedef decltype(DWriteCreateFactory)* DWriteCreateFactoryProc;
+ DWriteCreateFactoryProc dWriteCreateFactoryProc = reinterpret_cast<DWriteCreateFactoryProc>(
+ GetProcAddress(LoadLibraryW(L"dwrite.dll"), "DWriteCreateFactory"));
+
+ if (!dWriteCreateFactoryProc) {
+ HRESULT hr = HRESULT_FROM_WIN32(GetLastError());
+ if (!IS_ERROR(hr)) {
+ hr = ERROR_PROC_NOT_FOUND;
+ }
+ HRVM(hr, "Could not get DWriteCreateFactory proc.");
+ }
+
+ HRVM(dWriteCreateFactoryProc(DWRITE_FACTORY_TYPE_SHARED,
+ __uuidof(IDWriteFactory),
+ reinterpret_cast<IUnknown**>(factory)),
+ "Could not create DirectWrite factory.");
+ atexit(release_dwrite_factory);
+}
+
+
+IDWriteFactory* sk_get_dwrite_factory() {
+ static SkOnce once;
+ once(create_dwrite_factory, &gDWriteFactory);
+ return gDWriteFactory;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// String conversion
+
+/** Converts a utf8 string to a WCHAR string. */
+HRESULT sk_cstring_to_wchar(const char* skname, SkSMallocWCHAR* name) {
+ int wlen = MultiByteToWideChar(CP_UTF8, 0, skname, -1, nullptr, 0);
+ if (0 == wlen) {
+ HRM(HRESULT_FROM_WIN32(GetLastError()),
+ "Could not get length for wchar to utf-8 conversion.");
+ }
+ name->reset(wlen);
+ wlen = MultiByteToWideChar(CP_UTF8, 0, skname, -1, name->get(), wlen);
+ if (0 == wlen) {
+ HRM(HRESULT_FROM_WIN32(GetLastError()), "Could not convert wchar to utf-8.");
+ }
+ return S_OK;
+}
+
+/** Converts a WCHAR string to a utf8 string. */
+HRESULT sk_wchar_to_skstring(WCHAR* name, int nameLen, SkString* skname) {
+ int len = WideCharToMultiByte(CP_UTF8, 0, name, nameLen, nullptr, 0, nullptr, nullptr);
+ if (0 == len) {
+ if (nameLen <= 0) {
+ skname->reset();
+ return S_OK;
+ }
+ HRM(HRESULT_FROM_WIN32(GetLastError()),
+ "Could not get length for utf-8 to wchar conversion.");
+ }
+ skname->resize(len);
+
+ len = WideCharToMultiByte(CP_UTF8, 0, name, nameLen, skname->data(), len, nullptr, nullptr);
+ if (0 == len) {
+ HRM(HRESULT_FROM_WIN32(GetLastError()), "Could not convert utf-8 to wchar.");
+ }
+ return S_OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Locale
+
+HRESULT sk_get_locale_string(IDWriteLocalizedStrings* names, const WCHAR* preferedLocale,
+ SkString* skname) {
+ UINT32 nameIndex = 0;
+ if (preferedLocale) {
+ // Ignore any errors and continue with index 0 if there is a problem.
+ BOOL nameExists = FALSE;
+ (void)names->FindLocaleName(preferedLocale, &nameIndex, &nameExists);
+ if (!nameExists) {
+ nameIndex = 0;
+ }
+ }
+
+ UINT32 nameLen;
+ HRM(names->GetStringLength(nameIndex, &nameLen), "Could not get name length.");
+
+ SkSMallocWCHAR name(nameLen + 1);
+ HRM(names->GetString(nameIndex, name.get(), nameLen + 1), "Could not get string.");
+
+ HR(sk_wchar_to_skstring(name.get(), nameLen, skname));
+ return S_OK;
+}
+
+HRESULT SkGetGetUserDefaultLocaleNameProc(SkGetUserDefaultLocaleNameProc* proc) {
+ *proc = reinterpret_cast<SkGetUserDefaultLocaleNameProc>(
+ GetProcAddress(LoadLibraryW(L"Kernel32.dll"), "GetUserDefaultLocaleName")
+ );
+ if (!*proc) {
+ HRESULT hr = HRESULT_FROM_WIN32(GetLastError());
+ if (!IS_ERROR(hr)) {
+ hr = ERROR_PROC_NOT_FOUND;
+ }
+ return hr;
+ }
+ return S_OK;
+}
+
+#if defined(__clang__)
+ #pragma clang diagnostic pop
+#endif
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/utils/win/SkDWrite.h b/gfx/skia/skia/src/utils/win/SkDWrite.h
new file mode 100644
index 0000000000..4c447c2a7d
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkDWrite.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDWrite_DEFINED
+#define SkDWrite_DEFINED
+
+#include "include/core/SkFontStyle.h"
+#include "include/private/base/SkTemplates.h"
+
+#include <dwrite.h>
+#include <winsdkver.h>
+
+class SkString;
+
+////////////////////////////////////////////////////////////////////////////////
+// Factory
+
+IDWriteFactory* sk_get_dwrite_factory();
+
+////////////////////////////////////////////////////////////////////////////////
+// String conversion
+
+/** Prefer to use this type to prevent template proliferation. */
+typedef skia_private::AutoSTMalloc<16, WCHAR> SkSMallocWCHAR;
+
+/** Converts a utf8 string to a WCHAR string. */
+HRESULT sk_cstring_to_wchar(const char* skname, SkSMallocWCHAR* name);
+
+/** Converts a WCHAR string to a utf8 string.
+ * @param nameLen the number of WCHARs in the name.
+ */
+HRESULT sk_wchar_to_skstring(WCHAR* name, int nameLen, SkString* skname);
+
+////////////////////////////////////////////////////////////////////////////////
+// Locale
+
+HRESULT sk_get_locale_string(IDWriteLocalizedStrings* names, const WCHAR* preferedLocale,
+ SkString* skname);
+
+typedef int (WINAPI *SkGetUserDefaultLocaleNameProc)(LPWSTR, int);
+HRESULT SkGetGetUserDefaultLocaleNameProc(SkGetUserDefaultLocaleNameProc* proc);
+
+////////////////////////////////////////////////////////////////////////////////
+// Table handling
+
+class AutoDWriteTable {
+public:
+ AutoDWriteTable(IDWriteFontFace* fontFace, UINT32 beTag) : fExists(FALSE), fFontFace(fontFace) {
+ // Any errors are ignored, user must check fExists anyway.
+ fontFace->TryGetFontTable(beTag,
+ reinterpret_cast<const void **>(&fData), &fSize, &fLock, &fExists);
+ }
+ ~AutoDWriteTable() {
+ if (fExists) {
+ fFontFace->ReleaseFontTable(fLock);
+ }
+ }
+
+ const uint8_t* fData;
+ UINT32 fSize;
+ BOOL fExists;
+private:
+ // Borrowed reference, the user must ensure the fontFace stays alive.
+ IDWriteFontFace* fFontFace;
+ void* fLock;
+};
+template<typename T> class AutoTDWriteTable : public AutoDWriteTable {
+public:
+ static const UINT32 tag = DWRITE_MAKE_OPENTYPE_TAG(T::TAG0, T::TAG1, T::TAG2, T::TAG3);
+ AutoTDWriteTable(IDWriteFontFace* fontFace) : AutoDWriteTable(fontFace, tag) { }
+
+ const T* get() const { return reinterpret_cast<const T*>(fData); }
+ const T* operator->() const { return reinterpret_cast<const T*>(fData); }
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// Style conversion
+
+struct DWriteStyle {
+ explicit DWriteStyle(const SkFontStyle& pattern) {
+ fWeight = (DWRITE_FONT_WEIGHT)pattern.weight();
+ fWidth = (DWRITE_FONT_STRETCH)pattern.width();
+ switch (pattern.slant()) {
+ case SkFontStyle::kUpright_Slant: fSlant = DWRITE_FONT_STYLE_NORMAL ; break;
+ case SkFontStyle::kItalic_Slant: fSlant = DWRITE_FONT_STYLE_ITALIC ; break;
+ case SkFontStyle::kOblique_Slant: fSlant = DWRITE_FONT_STYLE_OBLIQUE; break;
+ default: SkASSERT(false); break;
+ }
+ }
+ DWRITE_FONT_WEIGHT fWeight;
+ DWRITE_FONT_STRETCH fWidth;
+ DWRITE_FONT_STYLE fSlant;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.cpp b/gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.cpp
new file mode 100644
index 0000000000..e574cf1351
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.cpp
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "include/core/SkTypes.h"
+#include "include/private/base/SkTFitsIn.h"
+#include "include/private/base/SkTemplates.h"
+#include "src/utils/win/SkDWriteFontFileStream.h"
+#include "src/utils/win/SkHRESULT.h"
+#include "src/utils/win/SkTScopedComPtr.h"
+
+#include <dwrite.h>
+
+using namespace skia_private;
+
+///////////////////////////////////////////////////////////////////////////////
+// SkIDWriteFontFileStream
+
+SkDWriteFontFileStream::SkDWriteFontFileStream(IDWriteFontFileStream* fontFileStream)
+ : fFontFileStream(SkRefComPtr(fontFileStream))
+ , fPos(0)
+ , fLockedMemory(nullptr)
+ , fFragmentLock(nullptr) {
+}
+
+SkDWriteFontFileStream::~SkDWriteFontFileStream() {
+ if (fFragmentLock) {
+ fFontFileStream->ReleaseFileFragment(fFragmentLock);
+ }
+}
+
+size_t SkDWriteFontFileStream::read(void* buffer, size_t size) {
+ HRESULT hr = S_OK;
+
+ if (nullptr == buffer) {
+ size_t fileSize = this->getLength();
+
+ if (fPos + size > fileSize) {
+ size_t skipped = fileSize - fPos;
+ fPos = fileSize;
+ return skipped;
+ } else {
+ fPos += size;
+ return size;
+ }
+ }
+
+ const void* start;
+ void* fragmentLock;
+ hr = fFontFileStream->ReadFileFragment(&start, fPos, size, &fragmentLock);
+ if (SUCCEEDED(hr)) {
+ memcpy(buffer, start, size);
+ fFontFileStream->ReleaseFileFragment(fragmentLock);
+ fPos += size;
+ return size;
+ }
+
+ //The read may have failed because we asked for too much data.
+ size_t fileSize = this->getLength();
+ if (fPos + size <= fileSize) {
+ //This means we were within bounds, but failed for some other reason.
+ return 0;
+ }
+
+ size_t read = fileSize - fPos;
+ hr = fFontFileStream->ReadFileFragment(&start, fPos, read, &fragmentLock);
+ if (SUCCEEDED(hr)) {
+ memcpy(buffer, start, read);
+ fFontFileStream->ReleaseFileFragment(fragmentLock);
+ fPos = fileSize;
+ return read;
+ }
+
+ return 0;
+}
+
+bool SkDWriteFontFileStream::isAtEnd() const {
+ return fPos == this->getLength();
+}
+
+bool SkDWriteFontFileStream::rewind() {
+ fPos = 0;
+ return true;
+}
+
+SkDWriteFontFileStream* SkDWriteFontFileStream::onDuplicate() const {
+ return new SkDWriteFontFileStream(fFontFileStream.get());
+}
+
+size_t SkDWriteFontFileStream::getPosition() const {
+ return fPos;
+}
+
+bool SkDWriteFontFileStream::seek(size_t position) {
+ size_t length = this->getLength();
+ fPos = (position > length) ? length : position;
+ return true;
+}
+
+bool SkDWriteFontFileStream::move(long offset) {
+ return seek(fPos + offset);
+}
+
+SkDWriteFontFileStream* SkDWriteFontFileStream::onFork() const {
+ std::unique_ptr<SkDWriteFontFileStream> that(this->duplicate());
+ that->seek(fPos);
+ return that.release();
+}
+
+size_t SkDWriteFontFileStream::getLength() const {
+ UINT64 realFileSize = 0;
+ fFontFileStream->GetFileSize(&realFileSize);
+ if (!SkTFitsIn<size_t>(realFileSize)) {
+ return 0;
+ }
+ return static_cast<size_t>(realFileSize);
+}
+
+const void* SkDWriteFontFileStream::getMemoryBase() {
+ if (fLockedMemory) {
+ return fLockedMemory;
+ }
+
+ UINT64 fileSize;
+ HRNM(fFontFileStream->GetFileSize(&fileSize), "Could not get file size");
+ HRNM(fFontFileStream->ReadFileFragment(&fLockedMemory, 0, fileSize, &fFragmentLock),
+ "Could not lock file fragment.");
+ return fLockedMemory;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// SkIDWriteFontFileStreamWrapper
+
+HRESULT SkDWriteFontFileStreamWrapper::Create(SkStreamAsset* stream,
+ SkDWriteFontFileStreamWrapper** streamFontFileStream)
+{
+ *streamFontFileStream = new SkDWriteFontFileStreamWrapper(stream);
+ if (nullptr == *streamFontFileStream) {
+ return E_OUTOFMEMORY;
+ }
+ return S_OK;
+}
+
+SkDWriteFontFileStreamWrapper::SkDWriteFontFileStreamWrapper(SkStreamAsset* stream)
+ : fRefCount(1), fStream(stream) {
+}
+
+SK_STDMETHODIMP SkDWriteFontFileStreamWrapper::QueryInterface(REFIID iid, void** ppvObject) {
+ if (iid == IID_IUnknown || iid == __uuidof(IDWriteFontFileStream)) {
+ *ppvObject = this;
+ AddRef();
+ return S_OK;
+ } else {
+ *ppvObject = nullptr;
+ return E_NOINTERFACE;
+ }
+}
+
+SK_STDMETHODIMP_(ULONG) SkDWriteFontFileStreamWrapper::AddRef() {
+ return InterlockedIncrement(&fRefCount);
+}
+
+SK_STDMETHODIMP_(ULONG) SkDWriteFontFileStreamWrapper::Release() {
+ ULONG newCount = InterlockedDecrement(&fRefCount);
+ if (0 == newCount) {
+ delete this;
+ }
+ return newCount;
+}
+
+SK_STDMETHODIMP SkDWriteFontFileStreamWrapper::ReadFileFragment(
+ void const** fragmentStart,
+ UINT64 fileOffset,
+ UINT64 fragmentSize,
+ void** fragmentContext)
+{
+ // The loader is responsible for doing a bounds check.
+ UINT64 fileSize;
+ this->GetFileSize(&fileSize);
+ if (fileOffset > fileSize || fragmentSize > fileSize - fileOffset) {
+ *fragmentStart = nullptr;
+ *fragmentContext = nullptr;
+ return E_FAIL;
+ }
+
+ if (!SkTFitsIn<size_t>(fileOffset + fragmentSize)) {
+ return E_FAIL;
+ }
+
+ const void* data = fStream->getMemoryBase();
+ if (data) {
+ *fragmentStart = static_cast<BYTE const*>(data) + static_cast<size_t>(fileOffset);
+ *fragmentContext = nullptr;
+
+ } else {
+ // May be called from multiple threads.
+ SkAutoMutexExclusive ama(fStreamMutex);
+
+ *fragmentStart = nullptr;
+ *fragmentContext = nullptr;
+
+ if (!fStream->seek(static_cast<size_t>(fileOffset))) {
+ return E_FAIL;
+ }
+ AutoTMalloc<uint8_t> streamData(static_cast<size_t>(fragmentSize));
+ if (fStream->read(streamData.get(), static_cast<size_t>(fragmentSize)) != fragmentSize) {
+ return E_FAIL;
+ }
+
+ *fragmentStart = streamData.get();
+ *fragmentContext = streamData.release();
+ }
+ return S_OK;
+}
+
+SK_STDMETHODIMP_(void) SkDWriteFontFileStreamWrapper::ReleaseFileFragment(void* fragmentContext) {
+ sk_free(fragmentContext);
+}
+
+SK_STDMETHODIMP SkDWriteFontFileStreamWrapper::GetFileSize(UINT64* fileSize) {
+ *fileSize = fStream->getLength();
+ return S_OK;
+}
+
+SK_STDMETHODIMP SkDWriteFontFileStreamWrapper::GetLastWriteTime(UINT64* lastWriteTime) {
+ // The concept of last write time does not apply to this loader.
+ *lastWriteTime = 0;
+ return E_NOTIMPL;
+}
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.h b/gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.h
new file mode 100644
index 0000000000..71ef04c165
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkDWriteFontFileStream.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDWriteFontFileStream_DEFINED
+#define SkDWriteFontFileStream_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#include "include/core/SkStream.h"
+#include "include/private/base/SkMutex.h"
+#include "src/utils/win/SkObjBase.h"
+#include "src/utils/win/SkTScopedComPtr.h"
+
+#include <dwrite.h>
+
+/**
+ * An SkStream backed by an IDWriteFontFileStream.
+ * This allows Skia code to read an IDWriteFontFileStream.
+ */
+class SkDWriteFontFileStream : public SkStreamMemory {
+public:
+ explicit SkDWriteFontFileStream(IDWriteFontFileStream* fontFileStream);
+ ~SkDWriteFontFileStream() override;
+
+ size_t read(void* buffer, size_t size) override;
+ bool isAtEnd() const override;
+ bool rewind() override;
+ size_t getPosition() const override;
+ bool seek(size_t position) override;
+ bool move(long offset) override;
+ size_t getLength() const override;
+ const void* getMemoryBase() override;
+
+ std::unique_ptr<SkDWriteFontFileStream> duplicate() const {
+ return std::unique_ptr<SkDWriteFontFileStream>(this->onDuplicate());
+ }
+ std::unique_ptr<SkDWriteFontFileStream> fork() const {
+ return std::unique_ptr<SkDWriteFontFileStream>(this->onFork());
+ }
+
+private:
+ SkDWriteFontFileStream* onDuplicate() const override;
+ SkDWriteFontFileStream* onFork() const override;
+
+ SkTScopedComPtr<IDWriteFontFileStream> fFontFileStream;
+ size_t fPos;
+ const void* fLockedMemory;
+ void* fFragmentLock;
+};
+
+/**
+ * An IDWriteFontFileStream backed by an SkStream.
+ * This allows DirectWrite to read an SkStream.
+ */
+class SkDWriteFontFileStreamWrapper : public IDWriteFontFileStream {
+public:
+ // IUnknown methods
+ SK_STDMETHODIMP QueryInterface(REFIID iid, void** ppvObject) override;
+ SK_STDMETHODIMP_(ULONG) AddRef() override;
+ SK_STDMETHODIMP_(ULONG) Release() override;
+
+ // IDWriteFontFileStream methods
+ SK_STDMETHODIMP ReadFileFragment(
+ void const** fragmentStart,
+ UINT64 fileOffset,
+ UINT64 fragmentSize,
+ void** fragmentContext) override;
+
+ SK_STDMETHODIMP_(void) ReleaseFileFragment(void* fragmentContext) override;
+ SK_STDMETHODIMP GetFileSize(UINT64* fileSize) override;
+ SK_STDMETHODIMP GetLastWriteTime(UINT64* lastWriteTime) override;
+
+ static HRESULT Create(SkStreamAsset* stream,
+ SkDWriteFontFileStreamWrapper** streamFontFileStream);
+
+private:
+ explicit SkDWriteFontFileStreamWrapper(SkStreamAsset* stream);
+ virtual ~SkDWriteFontFileStreamWrapper() { }
+
+ ULONG fRefCount;
+ std::unique_ptr<SkStreamAsset> fStream;
+ SkMutex fStreamMutex;
+};
+#endif
diff --git a/gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.cpp b/gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.cpp
new file mode 100644
index 0000000000..cd5ae7cef9
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.cpp
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "include/core/SkPath.h"
+#include "src/utils/SkFloatUtils.h"
+#include "src/utils/win/SkDWriteGeometrySink.h"
+#include "src/utils/win/SkObjBase.h"
+
+#include <dwrite.h>
+#include <d2d1.h>
+
+SkDWriteGeometrySink::SkDWriteGeometrySink(SkPath* path)
+ : fRefCount{1}, fPath{path}, fStarted{false}, fCurrent{0,0} {}
+
+SkDWriteGeometrySink::~SkDWriteGeometrySink() { }
+
+SK_STDMETHODIMP SkDWriteGeometrySink::QueryInterface(REFIID iid, void **object) {
+ if (nullptr == object) {
+ return E_INVALIDARG;
+ }
+ if (iid == __uuidof(IUnknown) || iid == __uuidof(IDWriteGeometrySink)) {
+ *object = static_cast<IDWriteGeometrySink*>(this);
+ this->AddRef();
+ return S_OK;
+ } else {
+ *object = nullptr;
+ return E_NOINTERFACE;
+ }
+}
+
+SK_STDMETHODIMP_(ULONG) SkDWriteGeometrySink::AddRef(void) {
+ return static_cast<ULONG>(InterlockedIncrement(&fRefCount));
+}
+
+SK_STDMETHODIMP_(ULONG) SkDWriteGeometrySink::Release(void) {
+ ULONG res = static_cast<ULONG>(InterlockedDecrement(&fRefCount));
+ if (0 == res) {
+ delete this;
+ }
+ return res;
+}
+
+SK_STDMETHODIMP_(void) SkDWriteGeometrySink::SetFillMode(D2D1_FILL_MODE fillMode) {
+ switch (fillMode) {
+ case D2D1_FILL_MODE_ALTERNATE:
+ fPath->setFillType(SkPathFillType::kEvenOdd);
+ break;
+ case D2D1_FILL_MODE_WINDING:
+ fPath->setFillType(SkPathFillType::kWinding);
+ break;
+ default:
+ SkDEBUGFAIL("Unknown D2D1_FILL_MODE.");
+ break;
+ }
+}
+
+SK_STDMETHODIMP_(void) SkDWriteGeometrySink::SetSegmentFlags(D2D1_PATH_SEGMENT vertexFlags) {
+ if (vertexFlags == D2D1_PATH_SEGMENT_NONE || vertexFlags == D2D1_PATH_SEGMENT_FORCE_ROUND_LINE_JOIN) {
+ SkDEBUGFAIL("Invalid D2D1_PATH_SEGMENT value.");
+ }
+}
+
+SK_STDMETHODIMP_(void) SkDWriteGeometrySink::BeginFigure(D2D1_POINT_2F startPoint, D2D1_FIGURE_BEGIN figureBegin) {
+ if (figureBegin == D2D1_FIGURE_BEGIN_HOLLOW) {
+ SkDEBUGFAIL("Invalid D2D1_FIGURE_BEGIN value.");
+ }
+ fStarted = false;
+ fCurrent = startPoint;
+}
+
+SK_STDMETHODIMP_(void) SkDWriteGeometrySink::AddLines(const D2D1_POINT_2F *points, UINT pointsCount) {
+ for (const D2D1_POINT_2F *end = &points[pointsCount]; points < end; ++points) {
+ if (this->currentIsNot(*points)) {
+ this->goingTo(*points);
+ fPath->lineTo(points->x, points->y);
+ }
+ }
+}
+
+static bool approximately_equal(float a, float b) {
+ const SkFloatingPoint<float, 10> lhs(a), rhs(b);
+ return lhs.AlmostEquals(rhs);
+}
+
+typedef struct {
+ float x;
+ float y;
+} Cubic[4], Point;
+
+static bool check_quadratic(const Cubic& cubic, Point& quadraticP1) {
+ float dx10 = cubic[1].x - cubic[0].x;
+ float dx23 = cubic[2].x - cubic[3].x;
+ float midX = cubic[0].x + dx10 * 3 / 2;
+ //NOTE: !approximately_equal(midX - cubic[3].x, dx23 * 3 / 2)
+ //does not work as subnormals get in between the left side and 0.
+ if (!approximately_equal(midX, (dx23 * 3 / 2) + cubic[3].x)) {
+ return false;
+ }
+ float dy10 = cubic[1].y - cubic[0].y;
+ float dy23 = cubic[2].y - cubic[3].y;
+ float midY = cubic[0].y + dy10 * 3 / 2;
+ if (!approximately_equal(midY, (dy23 * 3 / 2) + cubic[3].y)) {
+ return false;
+ }
+ quadraticP1 = {midX, midY};
+ return true;
+}
+
+SK_STDMETHODIMP_(void) SkDWriteGeometrySink::AddBeziers(const D2D1_BEZIER_SEGMENT *beziers, UINT beziersCount) {
+ for (const D2D1_BEZIER_SEGMENT *end = &beziers[beziersCount]; beziers < end; ++beziers) {
+ if (this->currentIsNot(beziers->point1) ||
+ this->currentIsNot(beziers->point2) ||
+ this->currentIsNot(beziers->point3))
+ {
+ Cubic cubic = { { fCurrent.x, fCurrent.y },
+ { beziers->point1.x, beziers->point1.y },
+ { beziers->point2.x, beziers->point2.y },
+ { beziers->point3.x, beziers->point3.y }, };
+ this->goingTo(beziers->point3);
+ Point quadraticP1;
+ if (check_quadratic(cubic, quadraticP1)) {
+ fPath->quadTo( quadraticP1.x, quadraticP1.y,
+ beziers->point3.x, beziers->point3.y);
+ } else {
+ fPath->cubicTo(beziers->point1.x, beziers->point1.y,
+ beziers->point2.x, beziers->point2.y,
+ beziers->point3.x, beziers->point3.y);
+ }
+ }
+ }
+}
+
+SK_STDMETHODIMP_(void) SkDWriteGeometrySink::EndFigure(D2D1_FIGURE_END figureEnd) {
+ if (fStarted) {
+ fPath->close();
+ }
+}
+
+SK_STDMETHODIMP SkDWriteGeometrySink::Close() {
+ return S_OK;
+}
+
+HRESULT SkDWriteGeometrySink::Create(SkPath* path, IDWriteGeometrySink** geometryToPath) {
+ *geometryToPath = new SkDWriteGeometrySink(path);
+ return S_OK;
+}
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.h b/gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.h
new file mode 100644
index 0000000000..af4909aaaf
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkDWriteGeometrySink.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDWriteToPath_DEFINED
+#define SkDWriteToPath_DEFINED
+
+#include "include/core/SkTypes.h"
+#include "src/utils/win/SkObjBase.h"
+
+class SkPath;
+
+#include <dwrite.h>
+#include <d2d1.h>
+
+class SkDWriteGeometrySink : public IDWriteGeometrySink {
+private:
+ LONG fRefCount;
+ SkPath* fPath;
+ bool fStarted;
+ D2D1_POINT_2F fCurrent;
+
+ void goingTo(const D2D1_POINT_2F pt) {
+ if (!fStarted) {
+ fStarted = true;
+ fPath->moveTo(fCurrent.x, fCurrent.y);
+ }
+ fCurrent = pt;
+ }
+
+ bool currentIsNot(const D2D1_POINT_2F pt) {
+ return fCurrent.x != pt.x || fCurrent.y != pt.y;
+ }
+
+protected:
+ explicit SkDWriteGeometrySink(SkPath* path);
+ virtual ~SkDWriteGeometrySink();
+
+public:
+ SK_STDMETHODIMP QueryInterface(REFIID iid, void **object) override;
+ SK_STDMETHODIMP_(ULONG) AddRef() override;
+ SK_STDMETHODIMP_(ULONG) Release() override;
+
+ SK_STDMETHODIMP_(void) SetFillMode(D2D1_FILL_MODE fillMode) override;
+ SK_STDMETHODIMP_(void) SetSegmentFlags(D2D1_PATH_SEGMENT vertexFlags) override;
+ SK_STDMETHODIMP_(void) BeginFigure(D2D1_POINT_2F startPoint, D2D1_FIGURE_BEGIN figureBegin) override;
+ SK_STDMETHODIMP_(void) AddLines(const D2D1_POINT_2F *points, UINT pointsCount) override;
+ SK_STDMETHODIMP_(void) AddBeziers(const D2D1_BEZIER_SEGMENT *beziers, UINT beziersCount) override;
+ SK_STDMETHODIMP_(void) EndFigure(D2D1_FIGURE_END figureEnd) override;
+ SK_STDMETHODIMP Close() override;
+
+ static HRESULT Create(SkPath* path, IDWriteGeometrySink** geometryToPath);
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/win/SkDWriteNTDDI_VERSION.h b/gfx/skia/skia/src/utils/win/SkDWriteNTDDI_VERSION.h
new file mode 100644
index 0000000000..d95486b328
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkDWriteNTDDI_VERSION.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkDWriteNTDDI_VERSION_DEFINED
+#define SkDWriteNTDDI_VERSION_DEFINED
+
+// More strictly, this header should be the first thing in a translation unit,
+// since it is effectively negating build flags.
+#if defined(_WINDOWS_) || defined(DWRITE_3_H_INCLUDED)
+#error Must include SkDWriteNTDDI_VERSION.h before any Windows or DWrite headers.
+#endif
+
+// If the build defines NTDDI_VERSION, pretend it didn't.
+// This also requires resetting _WIN32_WINNT and WINVER.
+// dwrite_3.h guards enum, macro, and interface declarations behind NTDDI_VERSION,
+// but it is not clear this is correct since these are all immutable.
+#if defined(NTDDI_VERSION)
+# undef NTDDI_VERSION
+# if defined(_WIN32_WINNT)
+# undef _WIN32_WINNT
+# endif
+# if defined(WINVER)
+# undef WINVER
+# endif
+#endif
+
+#endif
diff --git a/gfx/skia/skia/src/utils/win/SkHRESULT.cpp b/gfx/skia/skia/src/utils/win/SkHRESULT.cpp
new file mode 100644
index 0000000000..43de3d6e8e
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkHRESULT.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "src/utils/win/SkHRESULT.h"
+
+void SkTraceHR(const char* file, unsigned long line, HRESULT hr, const char* msg) {
+ if (msg) {
+ SkDebugf("%s\n", msg);
+ }
+ SkDebugf("%s(%lu) : error 0x%lx: ", file, line, hr);
+
+ LPSTR errorText = nullptr;
+ FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ nullptr,
+ hr,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPSTR) &errorText,
+ 0,
+ nullptr
+ );
+
+ if (nullptr == errorText) {
+ SkDebugf("<unknown>\n");
+ } else {
+ SkDebugf("%s", errorText);
+ LocalFree(errorText);
+ errorText = nullptr;
+ }
+}
+
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/utils/win/SkHRESULT.h b/gfx/skia/skia/src/utils/win/SkHRESULT.h
new file mode 100644
index 0000000000..fbf312c771
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkHRESULT.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkHRESULT_DEFINED
+#define SkHRESULT_DEFINED
+
+#include "include/core/SkTypes.h"
+#ifdef SK_BUILD_FOR_WIN
+
+#include "src/base/SkLeanWindows.h"
+
+void SkTraceHR(const char* file, unsigned long line,
+ HRESULT hr, const char* msg);
+
+#ifdef SK_DEBUG
+#define SK_TRACEHR(_hr, _msg) SkTraceHR(__FILE__, __LINE__, _hr, _msg)
+#else
+#define SK_TRACEHR(_hr, _msg) sk_ignore_unused_variable(_hr)
+#endif
+
+#define HR_GENERAL(_ex, _msg, _ret) do {\
+ HRESULT _hr = _ex;\
+ if (FAILED(_hr)) {\
+ SK_TRACEHR(_hr, _msg);\
+ return _ret;\
+ }\
+} while(false)
+
+//@{
+/**
+These macros are for reporting HRESULT errors.
+The expression will be evaluated.
+If the resulting HRESULT SUCCEEDED then execution will continue normally.
+If the HRESULT FAILED then the macro will return from the current function.
+In variants ending with 'M' the given message will be traced when FAILED.
+The HR variants will return the HRESULT when FAILED.
+The HRB variants will return false when FAILED.
+The HRN variants will return nullptr when FAILED.
+The HRV variants will simply return when FAILED.
+The HRZ variants will return 0 when FAILED.
+*/
+#define HR(ex) HR_GENERAL(ex, nullptr, _hr)
+#define HRM(ex, msg) HR_GENERAL(ex, msg, _hr)
+
+#define HRB(ex) HR_GENERAL(ex, nullptr, false)
+#define HRBM(ex, msg) HR_GENERAL(ex, msg, false)
+
+#define HRN(ex) HR_GENERAL(ex, nullptr, nullptr)
+#define HRNM(ex, msg) HR_GENERAL(ex, msg, nullptr)
+
+#define HRV(ex) HR_GENERAL(ex, nullptr, )
+#define HRVM(ex, msg) HR_GENERAL(ex, msg, )
+
+#define HRZ(ex) HR_GENERAL(ex, nullptr, 0)
+#define HRZM(ex, msg) HR_GENERAL(ex, msg, 0)
+//@}
+#endif // SK_BUILD_FOR_WIN
+#endif // SkHRESULT_DEFINED
diff --git a/gfx/skia/skia/src/utils/win/SkIStream.cpp b/gfx/skia/skia/src/utils/win/SkIStream.cpp
new file mode 100644
index 0000000000..93b483bf78
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkIStream.cpp
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN)
+
+#include "include/core/SkStream.h"
+#include "src/utils/win/SkIStream.h"
+
+/**
+ * SkBaseIStream
+ */
+SkBaseIStream::SkBaseIStream() : _refcount(1) { }
+SkBaseIStream::~SkBaseIStream() { }
+
+SK_STDMETHODIMP SkBaseIStream::QueryInterface(REFIID iid, void ** ppvObject) {
+ if (nullptr == ppvObject) {
+ return E_INVALIDARG;
+ }
+ if (iid == __uuidof(IUnknown)
+ || iid == __uuidof(IStream)
+ || iid == __uuidof(ISequentialStream))
+ {
+ *ppvObject = static_cast<IStream*>(this);
+ AddRef();
+ return S_OK;
+ } else {
+ *ppvObject = nullptr;
+ return E_NOINTERFACE;
+ }
+}
+
+SK_STDMETHODIMP_(ULONG) SkBaseIStream::AddRef() {
+ return (ULONG)InterlockedIncrement(&_refcount);
+}
+
+SK_STDMETHODIMP_(ULONG) SkBaseIStream::Release() {
+ ULONG res = (ULONG) InterlockedDecrement(&_refcount);
+ if (0 == res) {
+ delete this;
+ }
+ return res;
+}
+
+// ISequentialStream Interface
+SK_STDMETHODIMP SkBaseIStream::Read(void* pv, ULONG cb, ULONG* pcbRead)
+{ return E_NOTIMPL; }
+
+SK_STDMETHODIMP SkBaseIStream::Write(void const* pv, ULONG cb, ULONG* pcbWritten)
+{ return E_NOTIMPL; }
+
+// IStream Interface
+SK_STDMETHODIMP SkBaseIStream::SetSize(ULARGE_INTEGER)
+{ return E_NOTIMPL; }
+
+SK_STDMETHODIMP SkBaseIStream::CopyTo(IStream*, ULARGE_INTEGER, ULARGE_INTEGER*, ULARGE_INTEGER*)
+{ return E_NOTIMPL; }
+
+SK_STDMETHODIMP SkBaseIStream::Commit(DWORD)
+{ return E_NOTIMPL; }
+
+SK_STDMETHODIMP SkBaseIStream::Revert()
+{ return E_NOTIMPL; }
+
+SK_STDMETHODIMP SkBaseIStream::LockRegion(ULARGE_INTEGER, ULARGE_INTEGER, DWORD)
+{ return E_NOTIMPL; }
+
+SK_STDMETHODIMP SkBaseIStream::UnlockRegion(ULARGE_INTEGER, ULARGE_INTEGER, DWORD)
+{ return E_NOTIMPL; }
+
+SK_STDMETHODIMP SkBaseIStream::Clone(IStream**)
+{ return E_NOTIMPL; }
+
+SK_STDMETHODIMP SkBaseIStream::Seek(LARGE_INTEGER liDistanceToMove,
+ DWORD dwOrigin,
+ ULARGE_INTEGER* lpNewFilePointer)
+{ return E_NOTIMPL; }
+
+SK_STDMETHODIMP SkBaseIStream::Stat(STATSTG* pStatstg, DWORD grfStatFlag)
+{ return E_NOTIMPL; }
+
+
+/**
+ * SkIStream
+ */
+SkIStream::SkIStream(std::unique_ptr<SkStreamAsset> stream)
+ : SkBaseIStream()
+ , fSkStream(std::move(stream))
+ , fLocation()
+{
+ this->fSkStream->rewind();
+}
+
+SkIStream::~SkIStream() {}
+
+HRESULT SkIStream::CreateFromSkStream(std::unique_ptr<SkStreamAsset> stream, IStream** ppStream) {
+ if (nullptr == stream) {
+ return E_INVALIDARG;
+ }
+ *ppStream = new SkIStream(std::move(stream));
+ return S_OK;
+}
+
+// ISequentialStream Interface
+SK_STDMETHODIMP SkIStream::Read(void* pv, ULONG cb, ULONG* pcbRead) {
+ *pcbRead = static_cast<ULONG>(this->fSkStream->read(pv, cb));
+ this->fLocation.QuadPart += *pcbRead;
+ return (*pcbRead == cb) ? S_OK : S_FALSE;
+}
+
+SK_STDMETHODIMP SkIStream::Write(void const* pv, ULONG cb, ULONG* pcbWritten) {
+ return STG_E_CANTSAVE;
+}
+
+// IStream Interface
+SK_STDMETHODIMP SkIStream::Seek(LARGE_INTEGER liDistanceToMove,
+ DWORD dwOrigin,
+ ULARGE_INTEGER* lpNewFilePointer)
+{
+ HRESULT hr = S_OK;
+
+ switch(dwOrigin) {
+ case STREAM_SEEK_SET: {
+ if (!this->fSkStream->rewind()) {
+ hr = E_FAIL;
+ } else {
+ size_t skip = static_cast<size_t>(liDistanceToMove.QuadPart);
+ size_t skipped = this->fSkStream->skip(skip);
+ this->fLocation.QuadPart = skipped;
+ if (skipped != skip) {
+ hr = E_FAIL;
+ }
+ }
+ break;
+ }
+ case STREAM_SEEK_CUR: {
+ size_t skip = static_cast<size_t>(liDistanceToMove.QuadPart);
+ size_t skipped = this->fSkStream->skip(skip);
+ this->fLocation.QuadPart += skipped;
+ if (skipped != skip) {
+ hr = E_FAIL;
+ }
+ break;
+ }
+ case STREAM_SEEK_END: {
+ if (!this->fSkStream->rewind()) {
+ hr = E_FAIL;
+ } else {
+ size_t skip = static_cast<size_t>(this->fSkStream->getLength() +
+ liDistanceToMove.QuadPart);
+ size_t skipped = this->fSkStream->skip(skip);
+ this->fLocation.QuadPart = skipped;
+ if (skipped != skip) {
+ hr = E_FAIL;
+ }
+ }
+ break;
+ }
+ default:
+ hr = STG_E_INVALIDFUNCTION;
+ break;
+ }
+
+ if (lpNewFilePointer) {
+ lpNewFilePointer->QuadPart = this->fLocation.QuadPart;
+ }
+ return hr;
+}
+
+SK_STDMETHODIMP SkIStream::Stat(STATSTG* pStatstg, DWORD grfStatFlag) {
+ if (0 == (grfStatFlag & STATFLAG_NONAME)) {
+ return STG_E_INVALIDFLAG;
+ }
+ pStatstg->pwcsName = nullptr;
+ pStatstg->cbSize.QuadPart = this->fSkStream->getLength();
+ pStatstg->clsid = CLSID_NULL;
+ pStatstg->type = STGTY_STREAM;
+ pStatstg->grfMode = STGM_READ;
+ return S_OK;
+}
+
+
+/**
+ * SkIWStream
+ */
+SkWIStream::SkWIStream(SkWStream* stream)
+ : SkBaseIStream()
+ , fSkWStream(stream)
+{ }
+
+SkWIStream::~SkWIStream() {
+ if (this->fSkWStream) {
+ this->fSkWStream->flush();
+ }
+}
+
+HRESULT SkWIStream::CreateFromSkWStream(SkWStream* stream, IStream ** ppStream) {
+ *ppStream = new SkWIStream(stream);
+ return S_OK;
+}
+
+// ISequentialStream Interface
+SK_STDMETHODIMP SkWIStream::Write(void const* pv, ULONG cb, ULONG* pcbWritten) {
+ HRESULT hr = S_OK;
+ bool wrote = this->fSkWStream->write(pv, cb);
+ if (wrote) {
+ *pcbWritten = cb;
+ } else {
+ *pcbWritten = 0;
+ hr = S_FALSE;
+ }
+ return hr;
+}
+
+// IStream Interface
+SK_STDMETHODIMP SkWIStream::Commit(DWORD) {
+ this->fSkWStream->flush();
+ return S_OK;
+}
+
+SK_STDMETHODIMP SkWIStream::Stat(STATSTG* pStatstg, DWORD grfStatFlag) {
+ if (0 == (grfStatFlag & STATFLAG_NONAME)) {
+ return STG_E_INVALIDFLAG;
+ }
+ pStatstg->pwcsName = nullptr;
+ pStatstg->cbSize.QuadPart = 0;
+ pStatstg->clsid = CLSID_NULL;
+ pStatstg->type = STGTY_STREAM;
+ pStatstg->grfMode = STGM_WRITE;
+ return S_OK;
+}
+#endif//defined(SK_BUILD_FOR_WIN)
diff --git a/gfx/skia/skia/src/utils/win/SkIStream.h b/gfx/skia/skia/src/utils/win/SkIStream.h
new file mode 100644
index 0000000000..37d9b02283
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkIStream.h
@@ -0,0 +1,101 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef SkIStream_DEFINED
+#define SkIStream_DEFINED
+
+#include "include/core/SkTypes.h"
+
+#ifdef SK_BUILD_FOR_WIN
+
+#include "src/base/SkLeanWindows.h"
+#include "src/utils/win/SkObjBase.h"
+#include <ole2.h>
+
+class SkStream;
+class SkWStream;
+
+/**
+ * A bare IStream implementation which properly reference counts
+ * but returns E_NOTIMPL for all ISequentialStream and IStream methods.
+ */
+class SkBaseIStream : public IStream {
+public:
+ // IUnknown methods
+ SK_STDMETHODIMP QueryInterface(REFIID iid, void ** ppvObject) override;
+ SK_STDMETHODIMP_(ULONG) AddRef() override;
+ SK_STDMETHODIMP_(ULONG) Release() override;
+
+ // ISequentialStream methods
+ SK_STDMETHODIMP Read(void* pv, ULONG cb, ULONG* pcbRead) override;
+ SK_STDMETHODIMP Write(void const* pv, ULONG cb, ULONG* pcbWritten) override;
+
+ // IStream methods
+ SK_STDMETHODIMP SetSize(ULARGE_INTEGER) override;
+ SK_STDMETHODIMP CopyTo(IStream*, ULARGE_INTEGER, ULARGE_INTEGER*, ULARGE_INTEGER*) override;
+ SK_STDMETHODIMP Commit(DWORD) override;
+ SK_STDMETHODIMP Revert() override;
+ SK_STDMETHODIMP LockRegion(ULARGE_INTEGER, ULARGE_INTEGER, DWORD) override;
+ SK_STDMETHODIMP UnlockRegion(ULARGE_INTEGER, ULARGE_INTEGER, DWORD) override;
+ SK_STDMETHODIMP Clone(IStream**) override;
+ SK_STDMETHODIMP Seek(LARGE_INTEGER liDistanceToMove,
+ DWORD dwOrigin,
+ ULARGE_INTEGER* lpNewFilePointer) override;
+ SK_STDMETHODIMP Stat(STATSTG* pStatstg, DWORD grfStatFlag) override;
+
+protected:
+ explicit SkBaseIStream();
+ virtual ~SkBaseIStream();
+
+private:
+ LONG _refcount;
+};
+
+/**
+ * A minimal read-only IStream implementation which wraps an SkStream.
+ */
+class SkIStream : public SkBaseIStream {
+public:
+ HRESULT static CreateFromSkStream(std::unique_ptr<SkStreamAsset>, IStream** ppStream);
+
+ SK_STDMETHODIMP Read(void* pv, ULONG cb, ULONG* pcbRead) override;
+ SK_STDMETHODIMP Write(void const* pv, ULONG cb, ULONG* pcbWritten) override;
+ SK_STDMETHODIMP Seek(LARGE_INTEGER liDistanceToMove,
+ DWORD dwOrigin,
+ ULARGE_INTEGER* lpNewFilePointer) override;
+ SK_STDMETHODIMP Stat(STATSTG* pStatstg, DWORD grfStatFlag) override;
+
+private:
+ const std::unique_ptr<SkStream> fSkStream;
+ ULARGE_INTEGER fLocation;
+
+ explicit SkIStream(std::unique_ptr<SkStreamAsset>);
+ ~SkIStream() override;
+};
+
+/**
+ * A minimal write-only IStream implementation which wraps an SkWIStream.
+ */
+class SkWIStream : public SkBaseIStream {
+public:
+ HRESULT static CreateFromSkWStream(SkWStream* stream, IStream ** ppStream);
+
+ SK_STDMETHODIMP Write(void const* pv, ULONG cb, ULONG* pcbWritten) override;
+ SK_STDMETHODIMP Commit(DWORD) override;
+ SK_STDMETHODIMP Stat(STATSTG* pStatstg, DWORD grfStatFlag) override;
+
+private:
+ SkWStream *fSkWStream;
+
+ SkWIStream(SkWStream* stream);
+ ~SkWIStream() override;
+};
+
+#endif // SK_BUILD_FOR_WIN
+#endif // SkIStream_DEFINED
diff --git a/gfx/skia/skia/src/utils/win/SkObjBase.h b/gfx/skia/skia/src/utils/win/SkObjBase.h
new file mode 100644
index 0000000000..1e713ae8bc
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkObjBase.h
@@ -0,0 +1,25 @@
+/*
+* Copyright 2019 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef SkObjBase_DEFINED
+#define SkObjBase_DEFINED
+
+#include "src/base/SkLeanWindows.h"
+#include <objbase.h>
+
+// STDMETHOD uses COM_DECLSPEC_NOTHROW, but STDMETHODIMP does not. This leads to attribute mismatch
+// between interfaces and implementations which produces warnings. In theory a COM component should
+// never throw a c++ exception, but COM_DECLSPEC_NOTHROW allows tweaking that (as it may be useful
+// for internal only implementations within a single project). The behavior of the attribute nothrow
+// and the keyword noexcept are slightly different, so use COM_DECLSPEC_NOTHROW instead of noexcept.
+// Older interfaces like IUnknown and IStream do not currently specify COM_DECLSPEC_NOTHROW, but it
+// is not harmful to mark the implementation more exception strict than the interface.
+
+#define SK_STDMETHODIMP COM_DECLSPEC_NOTHROW STDMETHODIMP
+#define SK_STDMETHODIMP_(type) COM_DECLSPEC_NOTHROW STDMETHODIMP_(type)
+
+#endif
diff --git a/gfx/skia/skia/src/utils/win/SkTScopedComPtr.h b/gfx/skia/skia/src/utils/win/SkTScopedComPtr.h
new file mode 100644
index 0000000000..f638ca868e
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkTScopedComPtr.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTScopedComPtr_DEFINED
+#define SkTScopedComPtr_DEFINED
+
+#include "src/base/SkLeanWindows.h"
+#include "src/utils/win/SkObjBase.h"
+
+#ifdef SK_BUILD_FOR_WIN
+
+template<typename T> T* SkRefComPtr(T* ptr) {
+ ptr->AddRef();
+ return ptr;
+}
+
+template<typename T> T* SkSafeRefComPtr(T* ptr) {
+ if (ptr) {
+ ptr->AddRef();
+ }
+ return ptr;
+}
+
+template<typename T>
+class SkTScopedComPtr {
+private:
+ T *fPtr;
+
+public:
+ constexpr SkTScopedComPtr() : fPtr(nullptr) {}
+ constexpr SkTScopedComPtr(std::nullptr_t) : fPtr(nullptr) {}
+ explicit SkTScopedComPtr(T *ptr) : fPtr(ptr) {}
+ SkTScopedComPtr(SkTScopedComPtr&& that) : fPtr(that.release()) {}
+ SkTScopedComPtr(const SkTScopedComPtr&) = delete;
+
+ ~SkTScopedComPtr() { this->reset();}
+
+ SkTScopedComPtr& operator=(SkTScopedComPtr&& that) {
+ this->reset(that.release());
+ return *this;
+ }
+ SkTScopedComPtr& operator=(const SkTScopedComPtr&) = delete;
+ SkTScopedComPtr& operator=(std::nullptr_t) { this->reset(); return *this; }
+
+ T &operator*() const { SkASSERT(fPtr != nullptr); return *fPtr; }
+
+ explicit operator bool() const { return fPtr != nullptr; }
+
+ T *operator->() const { return fPtr; }
+
+ /**
+ * Returns the address of the underlying pointer.
+ * This is dangerous -- it breaks encapsulation and the reference escapes.
+ * Must only be used on instances currently pointing to NULL,
+ * and only to initialize the instance.
+ */
+ T **operator&() { SkASSERT(fPtr == nullptr); return &fPtr; }
+
+ T *get() const { return fPtr; }
+
+ void reset(T* ptr = nullptr) {
+ if (fPtr) {
+ fPtr->Release();
+ }
+ fPtr = ptr;
+ }
+
+ void swap(SkTScopedComPtr<T>& that) {
+ T* temp = this->fPtr;
+ this->fPtr = that.fPtr;
+ that.fPtr = temp;
+ }
+
+ T* release() {
+ T* temp = this->fPtr;
+ this->fPtr = nullptr;
+ return temp;
+ }
+};
+
+#endif // SK_BUILD_FOR_WIN
+#endif // SkTScopedComPtr_DEFINED
diff --git a/gfx/skia/skia/src/utils/win/SkWGL.h b/gfx/skia/skia/src/utils/win/SkWGL.h
new file mode 100644
index 0000000000..e1d97aea35
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkWGL.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkRefCnt.h"
+
+#ifndef SkWGL_DEFINED
+#define SkWGL_DEFINED
+
+#include "src/base/SkLeanWindows.h"
+
+/**
+ * Working with WGL extensions can be a pain. Among the reasons is that you must
+ * have a GL context to get the proc addresses, but you want to use the procs to
+ * create a context in the first place. So you have to create a placeholder GL
+ * ctx to get the proc addresses.
+ *
+ * This file helps by providing SkCreateWGLInterface(). It returns a struct of
+ * function pointers that it initializes. It also has a helper function to query
+ * for WGL extensions. It handles the fact that wglGetExtensionsString is itself
+ * an extension.
+ */
+
+#define SK_WGL_DRAW_TO_WINDOW 0x2001
+#define SK_WGL_ACCELERATION 0x2003
+#define SK_WGL_SUPPORT_OPENGL 0x2010
+#define SK_WGL_DOUBLE_BUFFER 0x2011
+#define SK_WGL_COLOR_BITS 0x2014
+#define SK_WGL_RED_BITS 0x2015
+#define SK_WGL_GREEN_BITS 0x2017
+#define SK_WGL_BLUE_BITS 0x2019
+#define SK_WGL_ALPHA_BITS 0x201B
+#define SK_WGL_STENCIL_BITS 0x2023
+#define SK_WGL_FULL_ACCELERATION 0x2027
+#define SK_WGL_SAMPLE_BUFFERS 0x2041
+#define SK_WGL_SAMPLES 0x2042
+#define SK_WGL_CONTEXT_MAJOR_VERSION 0x2091
+#define SK_WGL_CONTEXT_MINOR_VERSION 0x2092
+#define SK_WGL_CONTEXT_LAYER_PLANE 0x2093
+#define SK_WGL_CONTEXT_FLAGS 0x2094
+#define SK_WGL_CONTEXT_PROFILE_MASK 0x9126
+#define SK_WGL_CONTEXT_DEBUG_BIT 0x0001
+#define SK_WGL_CONTEXT_FORWARD_COMPATIBLE_BIT 0x0002
+#define SK_WGL_CONTEXT_CORE_PROFILE_BIT 0x00000001
+#define SK_WGL_CONTEXT_COMPATIBILITY_PROFILE_BIT 0x00000002
+#define SK_WGL_CONTEXT_ES2_PROFILE_BIT 0x00000004
+#define SK_ERROR_INVALID_VERSION 0x2095
+#define SK_ERROR_INVALID_PROFILE 0x2096
+
+DECLARE_HANDLE(HPBUFFER);
+
+class SkWGLExtensions {
+public:
+ SkWGLExtensions();
+ /**
+ * Determines if an extensions is available for a given DC.
+ * WGL_extensions_string is considered a prerequisite for all other
+ * extensions. It is necessary to check this before calling other class
+ * functions.
+ */
+ bool hasExtension(HDC dc, const char* ext) const;
+
+ const char* getExtensionsString(HDC hdc) const;
+ BOOL choosePixelFormat(HDC hdc, const int*, const FLOAT*, UINT, int*, UINT*) const;
+ BOOL getPixelFormatAttribiv(HDC, int, int, UINT, const int*, int*) const;
+ BOOL getPixelFormatAttribfv(HDC hdc, int, int, UINT, const int*, FLOAT*) const;
+ HGLRC createContextAttribs(HDC, HGLRC, const int *) const;
+
+ BOOL swapInterval(int interval) const;
+
+ HPBUFFER createPbuffer(HDC, int , int, int, const int*) const;
+ HDC getPbufferDC(HPBUFFER) const;
+ int releasePbufferDC(HPBUFFER, HDC) const;
+ BOOL destroyPbuffer(HPBUFFER) const;
+
+ /**
+ * WGL doesn't have precise rules for the ordering of formats returned
+ * by wglChoosePixelFormat. This function helps choose among the set of
+ * formats returned by wglChoosePixelFormat. The rules in decreasing
+ * priority are:
+ * * Choose formats with the smallest sample count that is >=
+ * desiredSampleCount (or the largest sample count if all formats have
+ * fewer samples than desiredSampleCount.) If desiredSampleCount is 1 then
+ * all msaa formats are excluded from consideration.
+ * * Choose formats with the fewest color samples when coverage sampling
+ * is available.
+ * * If the above rules leave multiple formats, choose the one that
+ * appears first in the formats array parameter.
+ */
+ int selectFormat(const int formats[],
+ int formatCount,
+ HDC dc,
+ int desiredSampleCount) const;
+private:
+ typedef const char* (WINAPI *GetExtensionsStringProc)(HDC);
+ typedef BOOL (WINAPI *ChoosePixelFormatProc)(HDC, const int *, const FLOAT *, UINT, int *, UINT *);
+ typedef BOOL (WINAPI *GetPixelFormatAttribivProc)(HDC, int, int, UINT, const int*, int*);
+ typedef BOOL (WINAPI *GetPixelFormatAttribfvProc)(HDC, int, int, UINT, const int*, FLOAT*);
+ typedef HGLRC (WINAPI *CreateContextAttribsProc)(HDC, HGLRC, const int *);
+ typedef BOOL (WINAPI* SwapIntervalProc)(int);
+ typedef HPBUFFER (WINAPI* CreatePbufferProc)(HDC, int , int, int, const int*);
+ typedef HDC (WINAPI* GetPbufferDCProc)(HPBUFFER);
+ typedef int (WINAPI* ReleasePbufferDCProc)(HPBUFFER, HDC);
+ typedef BOOL (WINAPI* DestroyPbufferProc)(HPBUFFER);
+
+ static GetExtensionsStringProc fGetExtensionsString;
+ static ChoosePixelFormatProc fChoosePixelFormat;
+ static GetPixelFormatAttribfvProc fGetPixelFormatAttribfv;
+ static GetPixelFormatAttribivProc fGetPixelFormatAttribiv;
+ static CreateContextAttribsProc fCreateContextAttribs;
+ static SwapIntervalProc fSwapInterval;
+ static CreatePbufferProc fCreatePbuffer;
+ static GetPbufferDCProc fGetPbufferDC;
+ static ReleasePbufferDCProc fReleasePbufferDC;
+ static DestroyPbufferProc fDestroyPbuffer;
+};
+
+enum SkWGLContextRequest {
+ /** Requests to create core profile context if possible, otherwise
+ compatibility profile. */
+ kGLPreferCoreProfile_SkWGLContextRequest,
+ /** Requests to create compatibility profile context if possible, otherwise
+ core profile. */
+ kGLPreferCompatibilityProfile_SkWGLContextRequest,
+ /** Requests to create GL ES profile context. */
+ kGLES_SkWGLContextRequest
+};
+/**
+ * Helper to create an OpenGL context for a DC using WGL. Configs with a sample count >= to
+ * msaaSampleCount are preferred but if none is available then a context with a lower sample count
+ * (including non-MSAA) will be created. If msaaSampleCount is 1 then this will fail if a non-msaa
+ * context cannot be created. If preferCoreProfile is true but a core profile cannot be created
+ * then a compatible profile context will be created.
+ */
+HGLRC SkCreateWGLContext(HDC dc, int msaaSampleCount, bool deepColor, SkWGLContextRequest context,
+ HGLRC shareContext = nullptr);
+
+/**
+ * Helper class for creating a pbuffer context and deleting all the handles when finished. This
+ * requires that a device context has been created. However, the pbuffer gets its own device
+ * context. The original device context can be released once the pbuffer context is created.
+ */
+class SkWGLPbufferContext : public SkRefCnt {
+public:
+ static sk_sp<SkWGLPbufferContext> Create(HDC parentDC, SkWGLContextRequest contextType,
+ HGLRC shareContext);
+
+ ~SkWGLPbufferContext() override;
+
+ HDC getDC() const { return fDC; }
+ HGLRC getGLRC() const { return fGLRC; }
+
+private:
+ SkWGLPbufferContext(HPBUFFER pbuffer, HDC dc, HGLRC glrc);
+
+ HPBUFFER fPbuffer;
+ HDC fDC;
+ HGLRC fGLRC;
+ SkWGLExtensions fExtensions;
+};
+
+#endif
diff --git a/gfx/skia/skia/src/utils/win/SkWGL_win.cpp b/gfx/skia/skia/src/utils/win/SkWGL_win.cpp
new file mode 100644
index 0000000000..4a78c87dc2
--- /dev/null
+++ b/gfx/skia/skia/src/utils/win/SkWGL_win.cpp
@@ -0,0 +1,513 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "include/core/SkTypes.h"
+#if defined(SK_BUILD_FOR_WIN) && !defined(_M_ARM64) && !defined(WINUWP)
+
+#include "src/utils/win/SkWGL.h"
+
+#include "include/private/base/SkOnce.h"
+#include "include/private/base/SkTDArray.h"
+#include "src/base/SkTSearch.h"
+#include "src/base/SkTSort.h"
+
+bool SkWGLExtensions::hasExtension(HDC dc, const char* ext) const {
+ if (nullptr == this->fGetExtensionsString) {
+ return false;
+ }
+ if (!strcmp("WGL_ARB_extensions_string", ext)) {
+ return true;
+ }
+ const char* extensionString = this->getExtensionsString(dc);
+ size_t extLength = strlen(ext);
+
+ while (true) {
+ size_t n = strcspn(extensionString, " ");
+ if (n == extLength && 0 == strncmp(ext, extensionString, n)) {
+ return true;
+ }
+ if (0 == extensionString[n]) {
+ return false;
+ }
+ extensionString += n+1;
+ }
+}
+
+const char* SkWGLExtensions::getExtensionsString(HDC hdc) const {
+ return fGetExtensionsString(hdc);
+}
+
+BOOL SkWGLExtensions::choosePixelFormat(HDC hdc,
+ const int* piAttribIList,
+ const FLOAT* pfAttribFList,
+ UINT nMaxFormats,
+ int* piFormats,
+ UINT* nNumFormats) const {
+ return fChoosePixelFormat(hdc, piAttribIList, pfAttribFList,
+ nMaxFormats, piFormats, nNumFormats);
+}
+
+BOOL SkWGLExtensions::getPixelFormatAttribiv(HDC hdc,
+ int iPixelFormat,
+ int iLayerPlane,
+ UINT nAttributes,
+ const int *piAttributes,
+ int *piValues) const {
+ return fGetPixelFormatAttribiv(hdc, iPixelFormat, iLayerPlane,
+ nAttributes, piAttributes, piValues);
+}
+
+BOOL SkWGLExtensions::getPixelFormatAttribfv(HDC hdc,
+ int iPixelFormat,
+ int iLayerPlane,
+ UINT nAttributes,
+ const int *piAttributes,
+ float *pfValues) const {
+ return fGetPixelFormatAttribfv(hdc, iPixelFormat, iLayerPlane,
+ nAttributes, piAttributes, pfValues);
+}
+HGLRC SkWGLExtensions::createContextAttribs(HDC hDC,
+ HGLRC hShareContext,
+ const int *attribList) const {
+ return fCreateContextAttribs(hDC, hShareContext, attribList);
+}
+
+BOOL SkWGLExtensions::swapInterval(int interval) const {
+ return fSwapInterval(interval);
+}
+
+HPBUFFER SkWGLExtensions::createPbuffer(HDC hDC,
+ int iPixelFormat,
+ int iWidth,
+ int iHeight,
+ const int *piAttribList) const {
+ return fCreatePbuffer(hDC, iPixelFormat, iWidth, iHeight, piAttribList);
+}
+
+HDC SkWGLExtensions::getPbufferDC(HPBUFFER hPbuffer) const {
+ return fGetPbufferDC(hPbuffer);
+}
+
+int SkWGLExtensions::releasePbufferDC(HPBUFFER hPbuffer, HDC hDC) const {
+ return fReleasePbufferDC(hPbuffer, hDC);
+}
+
+BOOL SkWGLExtensions::destroyPbuffer(HPBUFFER hPbuffer) const {
+ return fDestroyPbuffer(hPbuffer);
+}
+
+namespace {
+
+struct PixelFormat {
+ int fFormat;
+ int fSampleCnt;
+ int fChoosePixelFormatRank;
+};
+
+bool pf_less(const PixelFormat& a, const PixelFormat& b) {
+ if (a.fSampleCnt < b.fSampleCnt) {
+ return true;
+ } else if (b.fSampleCnt < a.fSampleCnt) {
+ return false;
+ } else if (a.fChoosePixelFormatRank < b.fChoosePixelFormatRank) {
+ return true;
+ }
+ return false;
+}
+}
+
+int SkWGLExtensions::selectFormat(const int formats[],
+ int formatCount,
+ HDC dc,
+ int desiredSampleCount) const {
+ SkASSERT(desiredSampleCount >= 1);
+ if (formatCount <= 0) {
+ return -1;
+ }
+ PixelFormat desiredFormat = {
+ 0,
+ desiredSampleCount,
+ 0,
+ };
+ SkTDArray<PixelFormat> rankedFormats;
+ rankedFormats.resize(formatCount);
+ for (int i = 0; i < formatCount; ++i) {
+ static const int kQueryAttr = SK_WGL_SAMPLES;
+ int numSamples;
+ this->getPixelFormatAttribiv(dc,
+ formats[i],
+ 0,
+ 1,
+ &kQueryAttr,
+ &numSamples);
+ rankedFormats[i].fFormat = formats[i];
+ rankedFormats[i].fSampleCnt = std::max(1, numSamples);
+ rankedFormats[i].fChoosePixelFormatRank = i;
+ }
+ SkTQSort(rankedFormats.begin(), rankedFormats.end(), pf_less);
+ int idx = SkTSearch<PixelFormat, pf_less>(rankedFormats.begin(),
+ rankedFormats.size(),
+ desiredFormat,
+ sizeof(PixelFormat));
+ if (idx < 0) {
+ idx = ~idx;
+ }
+ // If the caller asked for non-MSAA fail if the closest format has MSAA.
+ if (desiredSampleCount == 1 && rankedFormats[idx].fSampleCnt != 1) {
+ return -1;
+ }
+ return rankedFormats[idx].fFormat;
+}
+
+
+namespace {
+
+#if defined(UNICODE)
+ #define STR_LIT(X) L## #X
+#else
+ #define STR_LIT(X) #X
+#endif
+
+#define TEMP_CLASS STR_LIT("TempClass")
+
+HWND create_temp_window() {
+ HMODULE module = GetModuleHandle(nullptr);
+ HWND wnd;
+ RECT windowRect;
+ windowRect.left = 0;
+ windowRect.right = 8;
+ windowRect.top = 0;
+ windowRect.bottom = 8;
+
+ WNDCLASS wc;
+
+ wc.style = CS_HREDRAW | CS_VREDRAW | CS_OWNDC;
+ wc.lpfnWndProc = (WNDPROC) DefWindowProc;
+ wc.cbClsExtra = 0;
+ wc.cbWndExtra = 0;
+ wc.hInstance = module;
+ wc.hIcon = LoadIcon(nullptr, IDI_WINLOGO);
+ wc.hCursor = LoadCursor(nullptr, IDC_ARROW);
+ wc.hbrBackground = nullptr;
+ wc.lpszMenuName = nullptr;
+ wc.lpszClassName = TEMP_CLASS;
+
+ if(!RegisterClass(&wc)) {
+ return 0;
+ }
+
+ DWORD style, exStyle;
+ exStyle = WS_EX_CLIENTEDGE;
+ style = WS_SYSMENU;
+
+ AdjustWindowRectEx(&windowRect, style, false, exStyle);
+ if(!(wnd = CreateWindowEx(exStyle,
+ TEMP_CLASS,
+ STR_LIT("PlaceholderWindow"),
+ WS_CLIPSIBLINGS | WS_CLIPCHILDREN | style,
+ 0, 0,
+ windowRect.right-windowRect.left,
+ windowRect.bottom-windowRect.top,
+ nullptr, nullptr,
+ module,
+ nullptr))) {
+ UnregisterClass(TEMP_CLASS, module);
+ return nullptr;
+ }
+ ShowWindow(wnd, SW_HIDE);
+
+ return wnd;
+}
+
+void destroy_temp_window(HWND wnd) {
+ DestroyWindow(wnd);
+ HMODULE module = GetModuleHandle(nullptr);
+ UnregisterClass(TEMP_CLASS, module);
+}
+}
+
+#define GET_PROC(NAME, SUFFIX) f##NAME = \
+ (NAME##Proc) wglGetProcAddress("wgl" #NAME #SUFFIX)
+
+
+SkWGLExtensions::GetExtensionsStringProc SkWGLExtensions::fGetExtensionsString = nullptr;
+SkWGLExtensions::ChoosePixelFormatProc SkWGLExtensions::fChoosePixelFormat = nullptr;
+SkWGLExtensions::GetPixelFormatAttribfvProc SkWGLExtensions::fGetPixelFormatAttribfv = nullptr;
+SkWGLExtensions::GetPixelFormatAttribivProc SkWGLExtensions::fGetPixelFormatAttribiv = nullptr;
+SkWGLExtensions::CreateContextAttribsProc SkWGLExtensions::fCreateContextAttribs = nullptr;
+SkWGLExtensions::SwapIntervalProc SkWGLExtensions::fSwapInterval = nullptr;
+SkWGLExtensions::CreatePbufferProc SkWGLExtensions::fCreatePbuffer = nullptr;
+SkWGLExtensions::GetPbufferDCProc SkWGLExtensions::fGetPbufferDC = nullptr;
+SkWGLExtensions::ReleasePbufferDCProc SkWGLExtensions::fReleasePbufferDC = nullptr;
+SkWGLExtensions::DestroyPbufferProc SkWGLExtensions::fDestroyPbuffer = nullptr;
+
+SkWGLExtensions::SkWGLExtensions() {
+ // We cache these function pointers once, and then reuse them. That's possibly incorrect if
+ // there are multiple GPUs, or if we intend to use these for rendering contexts of different
+ // pixel formats (where wglGetProcAddress is not guaranteed to return the same pointer).
+ static SkOnce once;
+ once([] {
+ HDC prevDC = wglGetCurrentDC();
+ HGLRC prevGLRC = wglGetCurrentContext();
+
+ PIXELFORMATDESCRIPTOR tempPFD;
+
+ ZeroMemory(&tempPFD, sizeof(tempPFD));
+ tempPFD.nSize = sizeof(tempPFD);
+ tempPFD.nVersion = 1;
+ tempPFD.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL;
+ tempPFD.iPixelType = PFD_TYPE_RGBA;
+ tempPFD.cColorBits = 32;
+ tempPFD.cDepthBits = 0;
+ tempPFD.cStencilBits = 8;
+ tempPFD.iLayerType = PFD_MAIN_PLANE;
+ HWND tempWND = create_temp_window();
+ if (tempWND) {
+ HDC tempDC = GetDC(tempWND);
+ int tempFormat = ChoosePixelFormat(tempDC, &tempPFD);
+ SetPixelFormat(tempDC, tempFormat, &tempPFD);
+ HGLRC tempGLRC = wglCreateContext(tempDC);
+ SkASSERT(tempGLRC);
+ wglMakeCurrent(tempDC, tempGLRC);
+
+ #if defined(__clang__)
+ #pragma clang diagnostic push
+ #pragma clang diagnostic ignored "-Wcast-function-type"
+ #endif
+
+ GET_PROC(GetExtensionsString, ARB);
+ GET_PROC(ChoosePixelFormat, ARB);
+ GET_PROC(GetPixelFormatAttribiv, ARB);
+ GET_PROC(GetPixelFormatAttribfv, ARB);
+ GET_PROC(CreateContextAttribs, ARB);
+ GET_PROC(SwapInterval, EXT);
+ GET_PROC(CreatePbuffer, ARB);
+ GET_PROC(GetPbufferDC, ARB);
+ GET_PROC(ReleasePbufferDC, ARB);
+ GET_PROC(DestroyPbuffer, ARB);
+
+ #if defined(__clang__)
+ #pragma clang diagnostic pop
+ #endif
+
+ wglMakeCurrent(tempDC, nullptr);
+ wglDeleteContext(tempGLRC);
+ destroy_temp_window(tempWND);
+ }
+
+ wglMakeCurrent(prevDC, prevGLRC);
+ });
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void get_pixel_formats_to_try(HDC dc, const SkWGLExtensions& extensions,
+ bool doubleBuffered, int msaaSampleCount, bool deepColor,
+ int formatsToTry[2]) {
+ auto appendAttr = [](SkTDArray<int>& attrs, int attr, int value) {
+ attrs.push_back(attr);
+ attrs.push_back(value);
+ };
+
+ SkTDArray<int> iAttrs;
+ appendAttr(iAttrs, SK_WGL_DRAW_TO_WINDOW, TRUE);
+ appendAttr(iAttrs, SK_WGL_DOUBLE_BUFFER, (doubleBuffered ? TRUE : FALSE));
+ appendAttr(iAttrs, SK_WGL_ACCELERATION, SK_WGL_FULL_ACCELERATION);
+ appendAttr(iAttrs, SK_WGL_SUPPORT_OPENGL, TRUE);
+ if (deepColor) {
+ appendAttr(iAttrs, SK_WGL_RED_BITS, 10);
+ appendAttr(iAttrs, SK_WGL_GREEN_BITS, 10);
+ appendAttr(iAttrs, SK_WGL_BLUE_BITS, 10);
+ appendAttr(iAttrs, SK_WGL_ALPHA_BITS, 2);
+ } else {
+ appendAttr(iAttrs, SK_WGL_COLOR_BITS, 24);
+ appendAttr(iAttrs, SK_WGL_ALPHA_BITS, 8);
+ }
+ appendAttr(iAttrs, SK_WGL_STENCIL_BITS, 8);
+
+ float fAttrs[] = {0, 0};
+
+ // Get a MSAA format if requested and possible.
+ if (msaaSampleCount > 0 &&
+ extensions.hasExtension(dc, "WGL_ARB_multisample")) {
+ SkTDArray<int> msaaIAttrs = iAttrs;
+ appendAttr(msaaIAttrs, SK_WGL_SAMPLE_BUFFERS, TRUE);
+ appendAttr(msaaIAttrs, SK_WGL_SAMPLES, msaaSampleCount);
+ appendAttr(msaaIAttrs, 0, 0);
+ unsigned int num;
+ int formats[64];
+ extensions.choosePixelFormat(dc, msaaIAttrs.begin(), fAttrs, 64, formats, &num);
+ num = std::min(num, 64U);
+ formatsToTry[0] = extensions.selectFormat(formats, num, dc, msaaSampleCount);
+ }
+
+ // Get a non-MSAA format
+ int* format = -1 == formatsToTry[0] ? &formatsToTry[0] : &formatsToTry[1];
+ unsigned int num;
+ appendAttr(iAttrs, 0, 0);
+ extensions.choosePixelFormat(dc, iAttrs.begin(), fAttrs, 1, format, &num);
+}
+
+static HGLRC create_gl_context(HDC dc, const SkWGLExtensions& extensions,
+ SkWGLContextRequest contextType, HGLRC shareContext) {
+ HDC prevDC = wglGetCurrentDC();
+ HGLRC prevGLRC = wglGetCurrentContext();
+
+ HGLRC glrc = nullptr;
+ if (kGLES_SkWGLContextRequest == contextType) {
+ if (!extensions.hasExtension(dc, "WGL_EXT_create_context_es2_profile")) {
+ wglMakeCurrent(prevDC, prevGLRC);
+ return nullptr;
+ }
+ static const int glesAttribs[] = {
+ SK_WGL_CONTEXT_MAJOR_VERSION, 3,
+ SK_WGL_CONTEXT_MINOR_VERSION, 0,
+ SK_WGL_CONTEXT_PROFILE_MASK, SK_WGL_CONTEXT_ES2_PROFILE_BIT,
+ 0,
+ };
+ glrc = extensions.createContextAttribs(dc, shareContext, glesAttribs);
+ if (nullptr == glrc) {
+ wglMakeCurrent(prevDC, prevGLRC);
+ return nullptr;
+ }
+ } else {
+ if (kGLPreferCoreProfile_SkWGLContextRequest == contextType &&
+ extensions.hasExtension(dc, "WGL_ARB_create_context")) {
+ static const int kCoreGLVersions[] = {
+ 4, 3,
+ 4, 2,
+ 4, 1,
+ 4, 0,
+ 3, 3,
+ 3, 2,
+ };
+ int coreProfileAttribs[] = {
+ SK_WGL_CONTEXT_MAJOR_VERSION, -1,
+ SK_WGL_CONTEXT_MINOR_VERSION, -1,
+ SK_WGL_CONTEXT_PROFILE_MASK, SK_WGL_CONTEXT_CORE_PROFILE_BIT,
+ 0,
+ };
+ for (size_t v = 0; v < std::size(kCoreGLVersions) / 2; ++v) {
+ coreProfileAttribs[1] = kCoreGLVersions[2 * v];
+ coreProfileAttribs[3] = kCoreGLVersions[2 * v + 1];
+ glrc = extensions.createContextAttribs(dc, shareContext, coreProfileAttribs);
+ if (glrc) {
+ break;
+ }
+ }
+ }
+ }
+
+ if (nullptr == glrc) {
+ glrc = wglCreateContext(dc);
+ if (shareContext) {
+ if (!wglShareLists(shareContext, glrc)) {
+ wglDeleteContext(glrc);
+ return nullptr;
+ }
+ }
+ }
+ SkASSERT(glrc);
+
+ wglMakeCurrent(prevDC, prevGLRC);
+
+ return glrc;
+}
+
+HGLRC SkCreateWGLContext(HDC dc, int msaaSampleCount, bool deepColor,
+ SkWGLContextRequest contextType, HGLRC shareContext) {
+ SkWGLExtensions extensions;
+ if (!extensions.hasExtension(dc, "WGL_ARB_pixel_format")) {
+ return nullptr;
+ }
+
+ BOOL set = FALSE;
+
+ int pixelFormatsToTry[] = { -1, -1 };
+ get_pixel_formats_to_try(dc, extensions, true, msaaSampleCount, deepColor, pixelFormatsToTry);
+ for (size_t f = 0;
+ !set && -1 != pixelFormatsToTry[f] && f < std::size(pixelFormatsToTry);
+ ++f) {
+ PIXELFORMATDESCRIPTOR pfd;
+ DescribePixelFormat(dc, pixelFormatsToTry[f], sizeof(pfd), &pfd);
+ set = SetPixelFormat(dc, pixelFormatsToTry[f], &pfd);
+ }
+
+ if (!set) {
+ return nullptr;
+ }
+
+ return create_gl_context(dc, extensions, contextType, shareContext);
+}
+
+sk_sp<SkWGLPbufferContext> SkWGLPbufferContext::Create(HDC parentDC,
+ SkWGLContextRequest contextType,
+ HGLRC shareContext) {
+ SkWGLExtensions extensions;
+ if (!extensions.hasExtension(parentDC, "WGL_ARB_pixel_format") ||
+ !extensions.hasExtension(parentDC, "WGL_ARB_pbuffer")) {
+ return nullptr;
+ }
+
+ // We cache the pixel formats once, and then reuse them. That's possibly incorrect if
+ // there are multiple GPUs, but this function is always called with a freshly made,
+ // identically constructed HDC (see WinGLTestContext).
+ //
+ // We only store two potential pixel formats, one for single buffer, one for double buffer.
+ // We never ask for MSAA, so we don't need the second pixel format for each buffering state.
+ static int gPixelFormats[2] = { -1, -1 };
+ static SkOnce once;
+ once([=] {
+ {
+ // Single buffer
+ int pixelFormatsToTry[2] = { -1, -1 };
+ get_pixel_formats_to_try(parentDC, extensions, false, 0, false, pixelFormatsToTry);
+ gPixelFormats[0] = pixelFormatsToTry[0];
+ }
+ {
+ // Double buffer
+ int pixelFormatsToTry[2] = { -1, -1 };
+ get_pixel_formats_to_try(parentDC, extensions, true, 0, false, pixelFormatsToTry);
+ gPixelFormats[1] = pixelFormatsToTry[0];
+ }
+ });
+
+ // try for single buffer first
+ for (int pixelFormat : gPixelFormats) {
+ if (-1 == pixelFormat) {
+ continue;
+ }
+ HPBUFFER pbuf = extensions.createPbuffer(parentDC, pixelFormat, 1, 1, nullptr);
+ if (0 != pbuf) {
+ HDC dc = extensions.getPbufferDC(pbuf);
+ if (dc) {
+ HGLRC glrc = create_gl_context(dc, extensions, contextType, shareContext);
+ if (glrc) {
+ return sk_sp<SkWGLPbufferContext>(new SkWGLPbufferContext(pbuf, dc, glrc));
+ }
+ extensions.releasePbufferDC(pbuf, dc);
+ }
+ extensions.destroyPbuffer(pbuf);
+ }
+ }
+ return nullptr;
+}
+
+SkWGLPbufferContext::~SkWGLPbufferContext() {
+ SkASSERT(fExtensions.hasExtension(fDC, "WGL_ARB_pbuffer"));
+ wglDeleteContext(fGLRC);
+ fExtensions.releasePbufferDC(fPbuffer, fDC);
+ fExtensions.destroyPbuffer(fPbuffer);
+}
+
+SkWGLPbufferContext::SkWGLPbufferContext(HPBUFFER pbuffer, HDC dc, HGLRC glrc)
+ : fPbuffer(pbuffer)
+ , fDC(dc)
+ , fGLRC(glrc) {
+}
+
+#endif//defined(SK_BUILD_FOR_WIN)